aboutsummaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/aac/aac.c21
-rw-r--r--sys/dev/aacraid/aacraid.c15
-rw-r--r--sys/dev/aacraid/aacraid_debug.c3
-rw-r--r--sys/dev/acpi_support/acpi_asus.c22
-rw-r--r--sys/dev/acpi_support/acpi_asus_wmi.c431
-rw-r--r--sys/dev/acpi_support/acpi_fujitsu.c4
-rw-r--r--sys/dev/acpi_support/acpi_hp.c4
-rw-r--r--sys/dev/acpi_support/acpi_ibm.c43
-rw-r--r--sys/dev/acpi_support/acpi_sbl_wmi.c193
-rw-r--r--sys/dev/acpi_support/acpi_wmi.c14
-rw-r--r--sys/dev/acpica/acpi.c590
-rw-r--r--sys/dev/acpica/acpi_apei.c2
-rw-r--r--sys/dev/acpica/acpi_cmbat.c13
-rw-r--r--sys/dev/acpica/acpi_container.c15
-rw-r--r--sys/dev/acpica/acpi_cpu.c13
-rw-r--r--sys/dev/acpica/acpi_ec.c8
-rw-r--r--sys/dev/acpica/acpi_ged.c5
-rw-r--r--sys/dev/acpica/acpi_lid.c4
-rw-r--r--sys/dev/acpica/acpi_pci.c45
-rw-r--r--sys/dev/acpica/acpi_pci_link.c9
-rw-r--r--sys/dev/acpica/acpi_pcib.c59
-rw-r--r--sys/dev/acpica/acpi_pcib_acpi.c123
-rw-r--r--sys/dev/acpica/acpi_pcibvar.h1
-rw-r--r--sys/dev/acpica/acpi_perf.c2
-rw-r--r--sys/dev/acpica/acpi_powerres.c58
-rw-r--r--sys/dev/acpica/acpi_resource.c53
-rw-r--r--sys/dev/acpica/acpi_throttle.c6
-rw-r--r--sys/dev/acpica/acpi_timer.c155
-rw-r--r--sys/dev/acpica/acpi_video.c4
-rw-r--r--sys/dev/acpica/acpivar.h64
-rw-r--r--sys/dev/adb/adb_bus.c12
-rw-r--r--sys/dev/ae/if_ae.c31
-rw-r--r--sys/dev/age/if_age.c31
-rw-r--r--sys/dev/age/if_agevar.h1
-rw-r--r--sys/dev/agp/agp.c14
-rw-r--r--sys/dev/agp/agp_i810.c6
-rw-r--r--sys/dev/ahci/ahci.c20
-rw-r--r--sys/dev/ahci/ahci_generic.c4
-rw-r--r--sys/dev/ahci/ahci_pci.c64
-rw-r--r--sys/dev/ahci/ahciem.c4
-rw-r--r--sys/dev/aic7xxx/ahc_pci.c12
-rw-r--r--sys/dev/aic7xxx/aic79xx.c7
-rw-r--r--sys/dev/aic7xxx/aic7xxx.c4
-rw-r--r--sys/dev/al_eth/al_eth.c24
-rw-r--r--sys/dev/alc/if_alc.c48
-rw-r--r--sys/dev/alc/if_alcvar.h1
-rw-r--r--sys/dev/ale/if_ale.c44
-rw-r--r--sys/dev/ale/if_alevar.h1
-rw-r--r--sys/dev/alpm/alpm.c13
-rw-r--r--sys/dev/altera/atse/if_atse.c1597
-rw-r--r--sys/dev/altera/atse/if_atse_fdt.c144
-rw-r--r--sys/dev/altera/atse/if_atse_nexus.c158
-rw-r--r--sys/dev/altera/atse/if_atsereg.h464
-rw-r--r--sys/dev/altera/avgen/altera_avgen.c551
-rw-r--r--sys/dev/altera/avgen/altera_avgen.h96
-rw-r--r--sys/dev/altera/avgen/altera_avgen_fdt.c159
-rw-r--r--sys/dev/altera/avgen/altera_avgen_nexus.c141
-rw-r--r--sys/dev/altera/jtag_uart/altera_jtag_uart.h197
-rw-r--r--sys/dev/altera/jtag_uart/altera_jtag_uart_cons.c331
-rw-r--r--sys/dev/altera/jtag_uart/altera_jtag_uart_fdt.c148
-rw-r--r--sys/dev/altera/jtag_uart/altera_jtag_uart_nexus.c139
-rw-r--r--sys/dev/altera/jtag_uart/altera_jtag_uart_tty.c561
-rw-r--r--sys/dev/altera/msgdma/msgdma.c642
-rw-r--r--sys/dev/altera/msgdma/msgdma.h148
-rw-r--r--sys/dev/altera/pio/pio.c208
-rw-r--r--sys/dev/altera/pio/pio_if.m64
-rw-r--r--sys/dev/altera/sdcard/altera_sdcard.c412
-rw-r--r--sys/dev/altera/sdcard/altera_sdcard.h247
-rw-r--r--sys/dev/altera/sdcard/altera_sdcard_disk.c184
-rw-r--r--sys/dev/altera/sdcard/altera_sdcard_fdt.c121
-rw-r--r--sys/dev/altera/sdcard/altera_sdcard_io.c446
-rw-r--r--sys/dev/altera/sdcard/altera_sdcard_nexus.c112
-rw-r--r--sys/dev/altera/softdma/a_api.h98
-rw-r--r--sys/dev/altera/softdma/softdma.c882
-rw-r--r--sys/dev/amdgpio/amdgpio.c139
-rw-r--r--sys/dev/amdgpio/amdgpio.h9
-rw-r--r--sys/dev/amdpm/amdpm.c12
-rw-r--r--sys/dev/amdsbwd/amd_chipset.h2
-rw-r--r--sys/dev/amdsbwd/amdsbwd.c9
-rw-r--r--sys/dev/amdsmb/amdsmb.c12
-rw-r--r--sys/dev/amdsmn/amdsmn.c51
-rw-r--r--sys/dev/amdsmu/amdsmu.c466
-rw-r--r--sys/dev/amdsmu/amdsmu.h95
-rw-r--r--sys/dev/amdsmu/amdsmu_reg.h84
-rw-r--r--sys/dev/amdtemp/amdtemp.c88
-rw-r--r--sys/dev/arcmsr/arcmsr.c6
-rw-r--r--sys/dev/asmc/asmc.c24
-rw-r--r--sys/dev/asmc/asmcvar.h96
-rw-r--r--sys/dev/ata/ata-all.c2
-rw-r--r--sys/dev/ata/ata-isa.c2
-rw-r--r--sys/dev/ata/ata-pci.c20
-rw-r--r--sys/dev/ata/ata-sata.c2
-rw-r--r--sys/dev/ata/chipsets/ata-acard.c2
-rw-r--r--sys/dev/ata/chipsets/ata-acerlabs.c2
-rw-r--r--sys/dev/ata/chipsets/ata-amd.c2
-rw-r--r--sys/dev/ata/chipsets/ata-ati.c2
-rw-r--r--sys/dev/ata/chipsets/ata-cenatek.c2
-rw-r--r--sys/dev/ata/chipsets/ata-cypress.c2
-rw-r--r--sys/dev/ata/chipsets/ata-cyrix.c2
-rw-r--r--sys/dev/ata/chipsets/ata-highpoint.c17
-rw-r--r--sys/dev/ata/chipsets/ata-intel.c2
-rw-r--r--sys/dev/ata/chipsets/ata-ite.c2
-rw-r--r--sys/dev/ata/chipsets/ata-jmicron.c10
-rw-r--r--sys/dev/ata/chipsets/ata-marvell.c6
-rw-r--r--sys/dev/ata/chipsets/ata-micron.c2
-rw-r--r--sys/dev/ata/chipsets/ata-national.c2
-rw-r--r--sys/dev/ata/chipsets/ata-netcell.c2
-rw-r--r--sys/dev/ata/chipsets/ata-nvidia.c2
-rw-r--r--sys/dev/ata/chipsets/ata-promise.c16
-rw-r--r--sys/dev/ata/chipsets/ata-serverworks.c2
-rw-r--r--sys/dev/ata/chipsets/ata-siliconimage.c2
-rw-r--r--sys/dev/ata/chipsets/ata-sis.c15
-rw-r--r--sys/dev/ata/chipsets/ata-via.c2
-rw-r--r--sys/dev/ath/ah_osdep.c3
-rw-r--r--sys/dev/ath/ath_hal/ar5212/ar5212_rfgain.c2
-rw-r--r--sys/dev/ath/ath_hal/ar9002/ar9285_btcoex.c2
-rw-r--r--sys/dev/ath/ath_rate/amrr/amrr.c7
-rw-r--r--sys/dev/ath/ath_rate/onoe/onoe.c8
-rw-r--r--sys/dev/ath/ath_rate/sample/sample.c27
-rw-r--r--sys/dev/ath/if_ath.c5
-rw-r--r--sys/dev/ath/if_ath_keycache.c3
-rw-r--r--sys/dev/ath/if_ath_lna_div.c6
-rw-r--r--sys/dev/ath/if_ath_pci.c5
-rw-r--r--sys/dev/ath/if_ath_spectral.c7
-rw-r--r--sys/dev/ath/if_ath_tx.c52
-rw-r--r--sys/dev/ath/if_ath_tx_ht.c44
-rw-r--r--sys/dev/atkbdc/atkbd.c14
-rw-r--r--sys/dev/atkbdc/atkbdc_isa.c4
-rw-r--r--sys/dev/atkbdc/psm.c46
-rw-r--r--sys/dev/atopcase/atopcase.c7
-rw-r--r--sys/dev/axgbe/if_axgbe.c5
-rw-r--r--sys/dev/axgbe/if_axgbe_pci.c34
-rw-r--r--sys/dev/axgbe/xgbe-dev.c9
-rw-r--r--sys/dev/axgbe/xgbe-phy-v2.c6
-rw-r--r--sys/dev/axgbe/xgbe-sysctl.c20
-rw-r--r--sys/dev/axgbe/xgbe_osdep.h73
-rw-r--r--sys/dev/bce/if_bce.c39
-rw-r--r--sys/dev/beri/beri_mem.c181
-rw-r--r--sys/dev/beri/beri_ring.c524
-rw-r--r--sys/dev/beri/virtio/network/if_vtbe.c648
-rw-r--r--sys/dev/beri/virtio/virtio.c256
-rw-r--r--sys/dev/beri/virtio/virtio.h69
-rw-r--r--sys/dev/beri/virtio/virtio_block.c553
-rw-r--r--sys/dev/beri/virtio/virtio_mmio_platform.c307
-rw-r--r--sys/dev/bfe/if_bfe.c7
-rw-r--r--sys/dev/bge/if_bge.c16
-rw-r--r--sys/dev/bhnd/bcma/bcma.c3
-rw-r--r--sys/dev/bhnd/bhnd_subr.c22
-rw-r--r--sys/dev/bhnd/bhndb/bhndb.c23
-rw-r--r--sys/dev/bhnd/bhndb/bhndb_pci.c3
-rw-r--r--sys/dev/bhnd/cores/chipc/bhnd_pmu_chipc.c4
-rw-r--r--sys/dev/bhnd/cores/chipc/chipc.c26
-rw-r--r--sys/dev/bhnd/cores/chipc/chipc_gpio.c4
-rw-r--r--sys/dev/bhnd/cores/chipc/chipc_spi.c9
-rw-r--r--sys/dev/bhnd/cores/pci/bhnd_pci.c10
-rw-r--r--sys/dev/bhnd/cores/pcie2/bhnd_pcie2.c10
-rw-r--r--sys/dev/bhnd/cores/usb/bhnd_ehci.c258
-rw-r--r--sys/dev/bhnd/cores/usb/bhnd_ohci.c220
-rw-r--r--sys/dev/bhnd/cores/usb/bhnd_usb.c549
-rw-r--r--sys/dev/bhnd/nvram/bhnd_nvram_private.h3
-rw-r--r--sys/dev/bhnd/nvram/bhnd_nvram_value.h2
-rw-r--r--sys/dev/bhnd/siba/siba.c10
-rw-r--r--sys/dev/bnxt/bnxt.h848
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt.h1390
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_auxbus_compat.c194
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_auxbus_compat.h76
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_dcb.c864
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_dcb.h131
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_hwrm.c (renamed from sys/dev/bnxt/bnxt_hwrm.c)1305
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_hwrm.h (renamed from sys/dev/bnxt/bnxt_hwrm.h)24
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_ioctl.h (renamed from sys/dev/bnxt/bnxt_ioctl.h)0
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_mgmt.c (renamed from sys/dev/bnxt/bnxt_mgmt.c)88
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_mgmt.h (renamed from sys/dev/bnxt/bnxt_mgmt.h)31
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_sysctl.c (renamed from sys/dev/bnxt/bnxt_sysctl.c)558
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_sysctl.h (renamed from sys/dev/bnxt/bnxt_sysctl.h)2
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_txrx.c (renamed from sys/dev/bnxt/bnxt_txrx.c)55
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_ulp.c526
-rw-r--r--sys/dev/bnxt/bnxt_en/bnxt_ulp.h161
-rwxr-xr-xsys/dev/bnxt/bnxt_en/convert_hsi.pl (renamed from sys/dev/bnxt/convert_hsi.pl)0
-rw-r--r--sys/dev/bnxt/bnxt_en/hsi_struct_def.h (renamed from sys/dev/bnxt/hsi_struct_def.h)22735
-rw-r--r--sys/dev/bnxt/bnxt_en/if_bnxt.c (renamed from sys/dev/bnxt/if_bnxt.c)2351
-rw-r--r--sys/dev/bnxt/bnxt_re/bnxt_re-abi.h177
-rw-r--r--sys/dev/bnxt/bnxt_re/bnxt_re.h1077
-rw-r--r--sys/dev/bnxt/bnxt_re/ib_verbs.c5498
-rw-r--r--sys/dev/bnxt/bnxt_re/ib_verbs.h632
-rw-r--r--sys/dev/bnxt/bnxt_re/main.c4467
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_fp.c3544
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_fp.h638
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_rcfw.c1338
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_rcfw.h354
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_res.c1226
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_res.h840
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_sp.c1234
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_sp.h432
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_tlv.h187
-rw-r--r--sys/dev/bnxt/bnxt_re/stats.c773
-rw-r--r--sys/dev/bnxt/bnxt_re/stats.h353
-rw-r--r--sys/dev/bwi/if_bwi.c19
-rw-r--r--sys/dev/bwn/if_bwn.c28
-rw-r--r--sys/dev/bwn/if_bwn_pci.c15
-rw-r--r--sys/dev/bxe/bxe.c29
-rw-r--r--sys/dev/bxe/bxe.h10
-rw-r--r--sys/dev/bxe/ecore_sp.h2
-rw-r--r--sys/dev/cadence/if_cgem.c12
-rw-r--r--sys/dev/cardbus/cardbus.c15
-rw-r--r--sys/dev/cardbus/cardbusvar.h2
-rw-r--r--sys/dev/cas/if_cas.c10
-rw-r--r--sys/dev/cesa/cesa.c20
-rw-r--r--sys/dev/cfe/cfe_resource.c2
-rw-r--r--sys/dev/cfi/cfi_core.c9
-rw-r--r--sys/dev/chromebook_platform/chromebook_platform.c4
-rw-r--r--sys/dev/ciss/ciss.c104
-rw-r--r--sys/dev/ciss/cissvar.h3
-rw-r--r--sys/dev/clk/allwinner/aw_ccu.c3
-rw-r--r--sys/dev/clk/allwinner/ccu_d1.c1062
-rw-r--r--sys/dev/clk/clk_bus.c3
-rw-r--r--sys/dev/clk/clk_fixed.c3
-rw-r--r--sys/dev/clk/starfive/jh7110_clk.c277
-rw-r--r--sys/dev/clk/starfive/jh7110_clk.h72
-rw-r--r--sys/dev/clk/starfive/jh7110_clk_aon.c168
-rw-r--r--sys/dev/clk/starfive/jh7110_clk_pll.c386
-rw-r--r--sys/dev/clk/starfive/jh7110_clk_pll.h211
-rw-r--r--sys/dev/clk/starfive/jh7110_clk_stg.c204
-rw-r--r--sys/dev/clk/starfive/jh7110_clk_sys.c268
-rw-r--r--sys/dev/coretemp/coretemp.c2
-rw-r--r--sys/dev/cpuctl/cpuctl.c24
-rw-r--r--sys/dev/cpufreq/cpufreq_dt.c4
-rw-r--r--sys/dev/cpufreq/ichss.c8
-rw-r--r--sys/dev/cxgb/cxgb_main.c101
-rw-r--r--sys/dev/cxgb/cxgb_sge.c20
-rw-r--r--sys/dev/cxgbe/adapter.h121
-rw-r--r--sys/dev/cxgbe/common/common.h212
-rw-r--r--sys/dev/cxgbe/common/t4_hw.c1974
-rw-r--r--sys/dev/cxgbe/common/t4_hw.h135
-rw-r--r--sys/dev/cxgbe/common/t4_msg.h3011
-rw-r--r--sys/dev/cxgbe/common/t4_regs.h27273
-rw-r--r--sys/dev/cxgbe/common/t4_regs_values.h24
-rw-r--r--sys/dev/cxgbe/common/t4_tcb.h182
-rw-r--r--sys/dev/cxgbe/common/t4vf_hw.c32
-rw-r--r--sys/dev/cxgbe/crypto/t4_crypto.c58
-rw-r--r--sys/dev/cxgbe/crypto/t4_crypto.h1
-rw-r--r--sys/dev/cxgbe/crypto/t4_keyctx.c30
-rw-r--r--sys/dev/cxgbe/crypto/t6_kern_tls.c31
-rw-r--r--sys/dev/cxgbe/crypto/t7_kern_tls.c2196
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_flash_utils.c90
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_lib.c11
-rw-r--r--sys/dev/cxgbe/cudbg/cudbg_lib_common.h7
-rw-r--r--sys/dev/cxgbe/cxgbei/cxgbei.c12
-rw-r--r--sys/dev/cxgbe/cxgbei/icl_cxgbei.c98
-rw-r--r--sys/dev/cxgbe/firmware/t4fw_interface.h1322
-rw-r--r--sys/dev/cxgbe/firmware/t7fw_cfg.txt644
-rw-r--r--sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt530
-rw-r--r--sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt644
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/cm.c18
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/cq.c4
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/device.c60
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h15
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/mem.c108
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/provider.c2
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/qp.c9
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/resource.c38
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/t4.h1
-rw-r--r--sys/dev/cxgbe/offload.h41
-rw-r--r--sys/dev/cxgbe/osdep.h8
-rw-r--r--sys/dev/cxgbe/t4_clip.c2
-rw-r--r--sys/dev/cxgbe/t4_filter.c527
-rw-r--r--sys/dev/cxgbe/t4_ioctl.h17
-rw-r--r--sys/dev/cxgbe/t4_iov.c110
-rw-r--r--sys/dev/cxgbe/t4_l2t.c71
-rw-r--r--sys/dev/cxgbe/t4_l2t.h14
-rw-r--r--sys/dev/cxgbe/t4_main.c2782
-rw-r--r--sys/dev/cxgbe/t4_mp_ring.c81
-rw-r--r--sys/dev/cxgbe/t4_mp_ring.h1
-rw-r--r--sys/dev/cxgbe/t4_netmap.c48
-rw-r--r--sys/dev/cxgbe/t4_sched.c8
-rw-r--r--sys/dev/cxgbe/t4_sge.c256
-rw-r--r--sys/dev/cxgbe/t4_tpt.c193
-rw-r--r--sys/dev/cxgbe/t4_tracer.c10
-rw-r--r--sys/dev/cxgbe/t4_vf.c86
-rw-r--r--sys/dev/cxgbe/tom/t4_connect.c70
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c168
-rw-r--r--sys/dev/cxgbe/tom/t4_ddp.c100
-rw-r--r--sys/dev/cxgbe/tom/t4_listen.c330
-rw-r--r--sys/dev/cxgbe/tom/t4_tls.c399
-rw-r--r--sys/dev/cxgbe/tom/t4_tls.h1
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c649
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.h47
-rw-r--r--sys/dev/cxgbe/tom/t4_tom_l2t.c64
-rw-r--r--sys/dev/cyapa/cyapa.c97
-rw-r--r--sys/dev/dc/if_dc.c7
-rw-r--r--sys/dev/dcons/dcons_os.c2
-rw-r--r--sys/dev/dpaa/bman_portals.c3
-rw-r--r--sys/dev/dpaa/fman.c3
-rw-r--r--sys/dev/dpaa/if_dtsec.c6
-rw-r--r--sys/dev/dpaa/qman_portals.c3
-rw-r--r--sys/dev/dpaa2/dpaa2_channel.c7
-rw-r--r--sys/dev/dpaa2/dpaa2_mc.c18
-rw-r--r--sys/dev/dpaa2/dpaa2_mc_acpi.c6
-rw-r--r--sys/dev/dpaa2/dpaa2_mc_fdt.c5
-rw-r--r--sys/dev/dpaa2/dpaa2_ni.c33
-rw-r--r--sys/dev/dpaa2/dpaa2_rc.c31
-rw-r--r--sys/dev/dpaa2/memac_mdio_acpi.c6
-rw-r--r--sys/dev/dpaa2/memac_mdio_fdt.c2
-rw-r--r--sys/dev/drm2/drm_buffer.c30
-rw-r--r--sys/dev/drm2/drm_crtc.c45
-rw-r--r--sys/dev/drm2/drm_dp_iic_helper.c4
-rw-r--r--sys/dev/drm2/drm_fb_helper.c2
-rw-r--r--sys/dev/drm2/drm_os_freebsd.h28
-rw-r--r--sys/dev/drm2/ttm/ttm_bo_vm.c33
-rw-r--r--sys/dev/drm2/ttm/ttm_object.c5
-rw-r--r--sys/dev/drm2/ttm/ttm_page_alloc.c2
-rw-r--r--sys/dev/dwc/if_dwc.c6
-rw-r--r--sys/dev/dwc/if_dwc_cvitek.c89
-rw-r--r--sys/dev/dwwdt/dwwdt.c10
-rw-r--r--sys/dev/e1000/e1000_82575.c42
-rw-r--r--sys/dev/e1000/e1000_api.c4
-rw-r--r--sys/dev/e1000/e1000_base.c5
-rw-r--r--sys/dev/e1000/e1000_defines.h1
-rw-r--r--sys/dev/e1000/e1000_hw.h4
-rw-r--r--sys/dev/e1000/e1000_i210.c14
-rw-r--r--sys/dev/e1000/e1000_osdep.c10
-rw-r--r--sys/dev/e1000/e1000_osdep.h6
-rw-r--r--sys/dev/e1000/e1000_phy.c2
-rw-r--r--sys/dev/e1000/em_txrx.c103
-rw-r--r--sys/dev/e1000/if_em.c1816
-rw-r--r--sys/dev/e1000/if_em.h48
-rw-r--r--sys/dev/e1000/igb_txrx.c78
-rw-r--r--sys/dev/efidev/efidev.c19
-rw-r--r--sys/dev/efidev/efirt.c122
-rw-r--r--sys/dev/efidev/efirtc.c4
-rw-r--r--sys/dev/ena/ena.c306
-rw-r--r--sys/dev/ena/ena.h78
-rw-r--r--sys/dev/ena/ena_datapath.c60
-rw-r--r--sys/dev/ena/ena_datapath.h2
-rw-r--r--sys/dev/ena/ena_netmap.c52
-rw-r--r--sys/dev/ena/ena_netmap.h2
-rw-r--r--sys/dev/ena/ena_rss.c7
-rw-r--r--sys/dev/ena/ena_rss.h2
-rw-r--r--sys/dev/ena/ena_sysctl.c51
-rw-r--r--sys/dev/ena/ena_sysctl.h4
-rw-r--r--sys/dev/enetc/enetc_hw.h1
-rw-r--r--sys/dev/enetc/if_enetc.c12
-rw-r--r--sys/dev/enic/cq_desc.h15
-rw-r--r--sys/dev/enic/enic.h78
-rw-r--r--sys/dev/enic/enic_res.c4
-rw-r--r--sys/dev/enic/enic_res.h2
-rw-r--r--sys/dev/enic/enic_txrx.c39
-rw-r--r--sys/dev/enic/if_enic.c192
-rw-r--r--sys/dev/enic/vnic_cq.c2
-rw-r--r--sys/dev/enic/vnic_cq.h9
-rw-r--r--sys/dev/enic/vnic_dev.c244
-rw-r--r--sys/dev/enic/vnic_dev.h9
-rw-r--r--sys/dev/enic/vnic_intr.c2
-rw-r--r--sys/dev/enic/vnic_intr.h2
-rw-r--r--sys/dev/enic/vnic_resource.h1
-rw-r--r--sys/dev/enic/vnic_rq.c7
-rw-r--r--sys/dev/enic/vnic_rq.h1
-rw-r--r--sys/dev/enic/vnic_rss.h5
-rw-r--r--sys/dev/enic/vnic_wq.c106
-rw-r--r--sys/dev/enic/vnic_wq.h18
-rw-r--r--sys/dev/eqos/if_eqos.c170
-rw-r--r--sys/dev/eqos/if_eqos_reg.h1
-rw-r--r--sys/dev/eqos/if_eqos_starfive.c219
-rw-r--r--sys/dev/eqos/if_eqos_var.h7
-rw-r--r--sys/dev/et/if_et.c7
-rw-r--r--sys/dev/etherswitch/ar40xx/ar40xx_main.c13
-rw-r--r--sys/dev/etherswitch/ar40xx/ar40xx_phy.c16
-rw-r--r--sys/dev/etherswitch/arswitch/arswitch.c35
-rw-r--r--sys/dev/etherswitch/e6000sw/e6000sw.c94
-rw-r--r--sys/dev/etherswitch/e6000sw/e6000swreg.h9
-rw-r--r--sys/dev/etherswitch/e6000sw/e6060sw.c33
-rw-r--r--sys/dev/etherswitch/etherswitch.c4
-rw-r--r--sys/dev/etherswitch/felix/felix.c14
-rw-r--r--sys/dev/etherswitch/infineon/adm6996fc.c53
-rw-r--r--sys/dev/etherswitch/ip17x/ip17x.c31
-rw-r--r--sys/dev/etherswitch/micrel/ksz8995ma.c53
-rw-r--r--sys/dev/etherswitch/miiproxy.c20
-rw-r--r--sys/dev/etherswitch/mtkswitch/mtkswitch.c32
-rw-r--r--sys/dev/etherswitch/rtl8366/rtl8366rb.c25
-rw-r--r--sys/dev/etherswitch/ukswitch/ukswitch.c27
-rw-r--r--sys/dev/evdev/cdev.c2
-rw-r--r--sys/dev/evdev/evdev.c2
-rw-r--r--sys/dev/evdev/evdev.h2
-rw-r--r--sys/dev/evdev/uinput.c4
-rw-r--r--sys/dev/exca/exca.c2
-rw-r--r--sys/dev/fdc/fdc.c7
-rw-r--r--sys/dev/fdc/fdc_acpi.c3
-rw-r--r--sys/dev/fdt/fdt_common.c162
-rw-r--r--sys/dev/fdt/fdt_common.h25
-rw-r--r--sys/dev/fdt/fdt_slicer.c16
-rw-r--r--sys/dev/fdt/simple_mfd.c3
-rw-r--r--sys/dev/fdt/simplebus.c13
-rw-r--r--sys/dev/ffec/if_ffec.c5
-rw-r--r--sys/dev/filemon/filemon_wrapper.c3
-rw-r--r--sys/dev/firewire/firewire.c6
-rw-r--r--sys/dev/firewire/firewirereg.h2
-rw-r--r--sys/dev/firewire/fwohci_pci.c9
-rw-r--r--sys/dev/firewire/if_fwe.c6
-rw-r--r--sys/dev/firewire/if_fwip.c10
-rw-r--r--sys/dev/firewire/sbp.c4
-rw-r--r--sys/dev/firmware/arm/scmi.c336
-rw-r--r--sys/dev/firmware/arm/scmi.h23
-rw-r--r--sys/dev/firmware/arm/scmi_mailbox.c12
-rw-r--r--sys/dev/firmware/arm/scmi_shmem.c20
-rw-r--r--sys/dev/firmware/arm/scmi_shmem.h6
-rw-r--r--sys/dev/firmware/arm/scmi_smc.c6
-rw-r--r--sys/dev/firmware/arm/scmi_virtio.c17
-rw-r--r--sys/dev/firmware/xilinx/zynqmp_firmware.c5
-rw-r--r--sys/dev/flash/cqspi.c2
-rw-r--r--sys/dev/flash/flexspi/flex_spi.c12
-rw-r--r--sys/dev/flash/w25n.c603
-rw-r--r--sys/dev/flash/w25nreg.h85
-rw-r--r--sys/dev/ftgpio/ftgpio.c3
-rw-r--r--sys/dev/fxp/if_fxp.c41
-rw-r--r--sys/dev/gem/if_gem.c4
-rw-r--r--sys/dev/glxiic/glxiic.c16
-rw-r--r--sys/dev/gpio/acpi_gpiobus.c449
-rw-r--r--sys/dev/gpio/acpi_gpiobusvar.h48
-rw-r--r--sys/dev/gpio/bytgpio.c9
-rw-r--r--sys/dev/gpio/chvgpio.c3
-rw-r--r--sys/dev/gpio/dwgpio/dwgpio.c3
-rw-r--r--sys/dev/gpio/dwgpio/dwgpio_bus.c5
-rw-r--r--sys/dev/gpio/gpio_if.m26
-rw-r--r--sys/dev/gpio/gpioaei.c259
-rw-r--r--sys/dev/gpio/gpiobus.c268
-rw-r--r--sys/dev/gpio/gpiobus_if.m30
-rw-r--r--sys/dev/gpio/gpiobus_internal.h (renamed from sys/dev/altera/pio/pio.h)40
-rw-r--r--sys/dev/gpio/gpiobusvar.h15
-rw-r--r--sys/dev/gpio/gpioc.c211
-rw-r--r--sys/dev/gpio/gpioiic.c7
-rw-r--r--sys/dev/gpio/gpioled.c106
-rw-r--r--sys/dev/gpio/gpiomdio.c1
-rw-r--r--sys/dev/gpio/gpiopps.c2
-rw-r--r--sys/dev/gpio/gpiospi.c9
-rw-r--r--sys/dev/gpio/ofw_gpiobus.c25
-rw-r--r--sys/dev/gpio/pl061.c21
-rw-r--r--sys/dev/gpio/pl061.h1
-rw-r--r--sys/dev/gpio/pl061_acpi.c15
-rw-r--r--sys/dev/gpio/pl061_fdt.c15
-rw-r--r--sys/dev/gpio/qoriq_gpio.c14
-rw-r--r--sys/dev/gve/gve.h400
-rw-r--r--sys/dev/gve/gve_adminq.c215
-rw-r--r--sys/dev/gve/gve_adminq.h84
-rw-r--r--sys/dev/gve/gve_desc.h4
-rw-r--r--sys/dev/gve/gve_dqo.h337
-rw-r--r--sys/dev/gve/gve_main.c381
-rw-r--r--sys/dev/gve/gve_plat.h3
-rw-r--r--sys/dev/gve/gve_qpl.c187
-rw-r--r--sys/dev/gve/gve_rx.c163
-rw-r--r--sys/dev/gve/gve_rx_dqo.c1035
-rw-r--r--sys/dev/gve/gve_sysctl.c252
-rw-r--r--sys/dev/gve/gve_tx.c269
-rw-r--r--sys/dev/gve/gve_tx_dqo.c1149
-rw-r--r--sys/dev/gve/gve_utils.c95
-rw-r--r--sys/dev/hid/hid.c91
-rw-r--r--sys/dev/hid/hid.h36
-rw-r--r--sys/dev/hid/hidbus.c68
-rw-r--r--sys/dev/hid/hidquirk.h1
-rw-r--r--sys/dev/hid/hidraw.c79
-rw-r--r--sys/dev/hid/hidraw.h4
-rw-r--r--sys/dev/hid/hkbd.c19
-rw-r--r--sys/dev/hid/hms.c32
-rw-r--r--sys/dev/hid/ietp.c86
-rw-r--r--sys/dev/hid/ps4dshock.c16
-rw-r--r--sys/dev/hid/u2f.c603
-rw-r--r--sys/dev/hifn/hifn7751.c2
-rw-r--r--sys/dev/hpt27xx/hpt27xx_osm_bsd.c29
-rw-r--r--sys/dev/hpt27xx/hptintf.h6
-rw-r--r--sys/dev/hpt27xx/os_bsd.h2
-rw-r--r--sys/dev/hptiop/hptiop.c8
-rw-r--r--sys/dev/hptmv/entry.c49
-rw-r--r--sys/dev/hptmv/gui_lib.c12
-rw-r--r--sys/dev/hptmv/hptproc.c4
-rw-r--r--sys/dev/hptnr/hptnr_osm_bsd.c24
-rw-r--r--sys/dev/hptnr/os_bsd.h2
-rw-r--r--sys/dev/hptrr/hptrr_osm_bsd.c27
-rw-r--r--sys/dev/hptrr/os_bsd.h2
-rw-r--r--sys/dev/hwpmc/hwpmc_arm64.c72
-rw-r--r--sys/dev/hwpmc/hwpmc_arm64.h2
-rw-r--r--sys/dev/hwpmc/hwpmc_core.c4
-rw-r--r--sys/dev/hwpmc/hwpmc_logging.c137
-rw-r--r--sys/dev/hwpmc/hwpmc_mod.c21
-rw-r--r--sys/dev/hwpmc/hwpmc_x86.c14
-rw-r--r--sys/dev/hwpmc/pmu_dmc620.c2
-rw-r--r--sys/dev/hwt/hwt.c242
-rw-r--r--sys/dev/hwt/hwt_backend.c289
-rw-r--r--sys/dev/hwt/hwt_backend.h87
-rw-r--r--sys/dev/hwt/hwt_config.c108
-rw-r--r--sys/dev/hwt/hwt_config.h36
-rw-r--r--sys/dev/hwt/hwt_context.c201
-rw-r--r--sys/dev/hwt/hwt_context.h86
-rw-r--r--sys/dev/hwt/hwt_contexthash.c134
-rw-r--r--sys/dev/hwt/hwt_contexthash.h42
-rw-r--r--sys/dev/hwt/hwt_cpu.c115
-rw-r--r--sys/dev/hwt/hwt_cpu.h45
-rw-r--r--sys/dev/hwt/hwt_hook.c323
-rw-r--r--sys/dev/hwt/hwt_hook.h56
-rw-r--r--sys/dev/hwt/hwt_intr.h33
-rw-r--r--sys/dev/hwt/hwt_ioctl.c444
-rw-r--r--sys/dev/hwt/hwt_ioctl.h (renamed from sys/dev/beri/virtio/virtio_mmio_platform.h)20
-rw-r--r--sys/dev/hwt/hwt_owner.c157
-rw-r--r--sys/dev/hwt/hwt_owner.h45
-rw-r--r--sys/dev/hwt/hwt_ownerhash.c141
-rw-r--r--sys/dev/hwt/hwt_ownerhash.h42
-rw-r--r--sys/dev/hwt/hwt_record.c302
-rw-r--r--sys/dev/hwt/hwt_record.h47
-rw-r--r--sys/dev/hwt/hwt_thread.c162
-rw-r--r--sys/dev/hwt/hwt_thread.h (renamed from sys/dev/sound/midi/sequencer.h)95
-rw-r--r--sys/dev/hwt/hwt_vm.c503
-rw-r--r--sys/dev/hwt/hwt_vm.h47
-rw-r--r--sys/dev/hyperv/hvsock/hv_sock.c2
-rw-r--r--sys/dev/hyperv/input/hv_hid.c8
-rw-r--r--sys/dev/hyperv/netvsc/if_hn.c39
-rw-r--r--sys/dev/hyperv/pcib/vmbus_pcib.c7
-rw-r--r--sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c9
-rw-r--r--sys/dev/hyperv/vmbus/aarch64/hyperv_machdep.c7
-rw-r--r--sys/dev/hyperv/vmbus/hyperv.c63
-rw-r--r--sys/dev/hyperv/vmbus/hyperv_mmu.c308
-rw-r--r--sys/dev/hyperv/vmbus/hyperv_mmu.h57
-rw-r--r--sys/dev/hyperv/vmbus/hyperv_var.h11
-rw-r--r--sys/dev/hyperv/vmbus/vmbus.c136
-rw-r--r--sys/dev/hyperv/vmbus/vmbus_chan.c18
-rw-r--r--sys/dev/hyperv/vmbus/vmbus_chanvar.h1
-rw-r--r--sys/dev/hyperv/vmbus/vmbus_et.c4
-rw-r--r--sys/dev/hyperv/vmbus/vmbus_reg.h10
-rw-r--r--sys/dev/hyperv/vmbus/vmbus_var.h57
-rw-r--r--sys/dev/hyperv/vmbus/vmbus_xact.c2
-rw-r--r--sys/dev/iavf/iavf_iflib.h2
-rw-r--r--sys/dev/iavf/iavf_lib.c28
-rw-r--r--sys/dev/iavf/iavf_lib.h3
-rw-r--r--sys/dev/iavf/iavf_osdep.c2
-rw-r--r--sys/dev/iavf/if_iavf_iflib.c7
-rw-r--r--sys/dev/ice/ice_adminq_cmd.h191
-rw-r--r--sys/dev/ice/ice_bitops.h8
-rw-r--r--sys/dev/ice/ice_common.c976
-rw-r--r--sys/dev/ice/ice_common.h189
-rw-r--r--sys/dev/ice/ice_common_txrx.h2
-rw-r--r--sys/dev/ice/ice_controlq.c166
-rw-r--r--sys/dev/ice/ice_controlq.h24
-rw-r--r--sys/dev/ice/ice_dcb.c90
-rw-r--r--sys/dev/ice/ice_dcb.h42
-rw-r--r--sys/dev/ice/ice_ddp_common.c152
-rw-r--r--sys/dev/ice/ice_ddp_common.h24
-rw-r--r--sys/dev/ice/ice_devids.h56
-rw-r--r--sys/dev/ice/ice_drv_info.h83
-rw-r--r--sys/dev/ice/ice_features.h5
-rw-r--r--sys/dev/ice/ice_flex_pipe.c266
-rw-r--r--sys/dev/ice/ice_flex_pipe.h38
-rw-r--r--sys/dev/ice/ice_flow.c100
-rw-r--r--sys/dev/ice/ice_flow.h14
-rw-r--r--sys/dev/ice/ice_fw_logging.c75
-rw-r--r--sys/dev/ice/ice_fwlog.c39
-rw-r--r--sys/dev/ice/ice_fwlog.h12
-rw-r--r--sys/dev/ice/ice_hw_autogen.h2571
-rw-r--r--sys/dev/ice/ice_iflib.h17
-rw-r--r--sys/dev/ice/ice_iov.c1856
-rw-r--r--sys/dev/ice/ice_iov.h125
-rw-r--r--sys/dev/ice/ice_lan_tx_rx.h10
-rw-r--r--sys/dev/ice/ice_lib.c1158
-rw-r--r--sys/dev/ice/ice_lib.h130
-rw-r--r--sys/dev/ice/ice_nvm.c398
-rw-r--r--sys/dev/ice/ice_nvm.h56
-rw-r--r--sys/dev/ice/ice_osdep.c2
-rw-r--r--sys/dev/ice/ice_protocol_type.h2
-rw-r--r--sys/dev/ice/ice_rdma.c2
-rw-r--r--sys/dev/ice/ice_rss.h16
-rw-r--r--sys/dev/ice/ice_sbq_cmd.h120
-rw-r--r--sys/dev/ice/ice_sched.c487
-rw-r--r--sys/dev/ice/ice_sched.h102
-rw-r--r--sys/dev/ice/ice_strings.c6
-rw-r--r--sys/dev/ice/ice_switch.c306
-rw-r--r--sys/dev/ice/ice_switch.h93
-rw-r--r--sys/dev/ice/ice_type.h36
-rw-r--r--sys/dev/ice/ice_vf_mbx.c471
-rw-r--r--sys/dev/ice/ice_vf_mbx.h67
-rw-r--r--sys/dev/ice/ice_vlan_mode.c22
-rw-r--r--sys/dev/ice/ice_vlan_mode.h2
-rw-r--r--sys/dev/ice/if_ice_iflib.c267
-rw-r--r--sys/dev/ice/virtchnl.h28
-rw-r--r--sys/dev/ichiic/ig4_acpi.c1
-rw-r--r--sys/dev/ichiic/ig4_iic.c23
-rw-r--r--sys/dev/ichiic/ig4_pci.c48
-rw-r--r--sys/dev/ichiic/ig4_var.h1
-rw-r--r--sys/dev/ichsmb/ichsmb.c7
-rw-r--r--sys/dev/ichsmb/ichsmb_pci.c12
-rw-r--r--sys/dev/ichwd/i6300esbwd.c245
-rw-r--r--sys/dev/ichwd/i6300esbwd.h46
-rw-r--r--sys/dev/ichwd/ichwd.c2
-rw-r--r--sys/dev/ichwd/ichwd.h3
-rw-r--r--sys/dev/ida/ida.c7
-rw-r--r--sys/dev/igc/if_igc.c1857
-rw-r--r--sys/dev/igc/if_igc.h118
-rw-r--r--sys/dev/igc/igc_api.c2
-rw-r--r--sys/dev/igc/igc_defines.h29
-rw-r--r--sys/dev/igc/igc_nvm.c81
-rw-r--r--sys/dev/igc/igc_nvm.h18
-rw-r--r--sys/dev/igc/igc_regs.h7
-rw-r--r--sys/dev/igc/igc_txrx.c113
-rw-r--r--sys/dev/iicbus/acpi_iicbus.c4
-rw-r--r--sys/dev/iicbus/adc/ads111x.c4
-rw-r--r--sys/dev/iicbus/controller/cadence/cdnc_i2c.c14
-rw-r--r--sys/dev/iicbus/controller/opencores/iicoc_fdt.c4
-rw-r--r--sys/dev/iicbus/controller/opencores/iicoc_pci.c5
-rw-r--r--sys/dev/iicbus/controller/qcom/geni_iic.c608
-rw-r--r--sys/dev/iicbus/controller/qcom/geni_iic_acpi.c189
-rw-r--r--sys/dev/iicbus/controller/qcom/geni_iic_var.h80
-rw-r--r--sys/dev/iicbus/controller/rockchip/rk_i2c.c44
-rw-r--r--sys/dev/iicbus/controller/twsi/twsi.c9
-rw-r--r--sys/dev/iicbus/controller/vybrid/vf_i2c.c20
-rw-r--r--sys/dev/iicbus/gpio/pcf8574.c7
-rw-r--r--sys/dev/iicbus/gpio/tca64xx.c10
-rw-r--r--sys/dev/iicbus/if_ic.c2
-rw-r--r--sys/dev/iicbus/iic.c20
-rw-r--r--sys/dev/iicbus/iicbb.c24
-rw-r--r--sys/dev/iicbus/iicbus.c19
-rw-r--r--sys/dev/iicbus/iichid.c186
-rw-r--r--sys/dev/iicbus/iiconf.h2
-rw-r--r--sys/dev/iicbus/iicsmb.c9
-rw-r--r--sys/dev/iicbus/mux/iic_gpiomux.c2
-rw-r--r--sys/dev/iicbus/mux/iicmux.c4
-rw-r--r--sys/dev/iicbus/mux/ltc430x.c2
-rw-r--r--sys/dev/iicbus/mux/pca9547.c2
-rw-r--r--sys/dev/iicbus/mux/pca954x.c2
-rw-r--r--sys/dev/iicbus/ofw_iicbus.c7
-rw-r--r--sys/dev/iicbus/pmic/act8846.c10
-rw-r--r--sys/dev/iicbus/rtc/ds1307.c6
-rw-r--r--sys/dev/iicbus/rtc/ds3231.c2
-rw-r--r--sys/dev/imcsmb/imcsmb.c34
-rw-r--r--sys/dev/imcsmb/imcsmb_pci.c35
-rw-r--r--sys/dev/intel/spi.c12
-rw-r--r--sys/dev/intpm/intpm.c51
-rw-r--r--sys/dev/ioat/ioat.c2
-rw-r--r--sys/dev/ioat/ioat_test.c10
-rw-r--r--sys/dev/iommu/busdma_iommu.c80
-rw-r--r--sys/dev/iommu/iommu.h20
-rw-r--r--sys/dev/iommu/iommu_gas.c33
-rw-r--r--sys/dev/ipmi/ipmi.c21
-rw-r--r--sys/dev/ipmi/ipmi_bt.c6
-rw-r--r--sys/dev/ipmi/ipmi_isa.c4
-rw-r--r--sys/dev/ipmi/ipmi_kcs.c8
-rw-r--r--sys/dev/ipmi/ipmi_opal.c6
-rw-r--r--sys/dev/ipmi/ipmi_smbios.c9
-rw-r--r--sys/dev/ipmi/ipmi_smbus.c4
-rw-r--r--sys/dev/ipmi/ipmi_smic.c2
-rw-r--r--sys/dev/ipmi/ipmi_ssif.c5
-rw-r--r--sys/dev/ipmi/ipmivars.h5
-rw-r--r--sys/dev/ips/ips.c18
-rw-r--r--sys/dev/ipw/if_ipw.c9
-rw-r--r--sys/dev/irdma/irdma_cm.c34
-rw-r--r--sys/dev/irdma/irdma_ctrl.c16
-rw-r--r--sys/dev/irdma/irdma_utils.c4
-rw-r--r--sys/dev/isci/isci.c2
-rw-r--r--sys/dev/isci/isci_logger.c2
-rw-r--r--sys/dev/isci/scil/intel_sata.h2
-rw-r--r--sys/dev/isci/scil/sati_util.c2
-rw-r--r--sys/dev/isci/scil/sci_abstract_list.c7
-rw-r--r--sys/dev/isci/scil/scif_sas_smp_remote_device.c2
-rw-r--r--sys/dev/iscsi/icl_soft.c2
-rw-r--r--sys/dev/iser/iser_verbs.c11
-rw-r--r--sys/dev/isl/isl.c4
-rw-r--r--sys/dev/ismt/ismt.c11
-rw-r--r--sys/dev/isp/isp.c29
-rw-r--r--sys/dev/isp/isp_freebsd.c141
-rw-r--r--sys/dev/isp/isp_freebsd.h10
-rw-r--r--sys/dev/isp/isp_pci.c9
-rw-r--r--sys/dev/isp/isp_target.c11
-rw-r--r--sys/dev/isp/ispmbox.h6
-rw-r--r--sys/dev/isp/ispvar.h1
-rw-r--r--sys/dev/iwi/if_iwi.c8
-rw-r--r--sys/dev/iwm/if_iwm.c112
-rw-r--r--sys/dev/iwm/if_iwmreg.h24
-rw-r--r--sys/dev/iwm/if_iwmvar.h1
-rw-r--r--sys/dev/iwn/if_iwn.c39
-rw-r--r--sys/dev/iwx/if_iwx.c11065
-rw-r--r--sys/dev/iwx/if_iwx_debug.c370
-rw-r--r--sys/dev/iwx/if_iwx_debug.h59
-rw-r--r--sys/dev/iwx/if_iwxreg.h7926
-rw-r--r--sys/dev/iwx/if_iwxvar.h924
-rw-r--r--sys/dev/ixgbe/if_bypass.c134
-rw-r--r--sys/dev/ixgbe/if_fdir.c24
-rw-r--r--sys/dev/ixgbe/if_ix.c1838
-rw-r--r--sys/dev/ixgbe/if_ixv.c281
-rw-r--r--sys/dev/ixgbe/if_sriov.c140
-rw-r--r--sys/dev/ixgbe/ix_txrx.c106
-rw-r--r--sys/dev/ixgbe/ixgbe.h71
-rw-r--r--sys/dev/ixgbe/ixgbe_82599.c29
-rw-r--r--sys/dev/ixgbe/ixgbe_api.c37
-rw-r--r--sys/dev/ixgbe/ixgbe_api.h3
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c241
-rw-r--r--sys/dev/ixgbe/ixgbe_common.h5
-rw-r--r--sys/dev/ixgbe/ixgbe_dcb.c2
-rw-r--r--sys/dev/ixgbe/ixgbe_dcb.h4
-rw-r--r--sys/dev/ixgbe/ixgbe_e610.c5533
-rw-r--r--sys/dev/ixgbe/ixgbe_e610.h224
-rw-r--r--sys/dev/ixgbe/ixgbe_features.h2
-rw-r--r--sys/dev/ixgbe/ixgbe_mbx.c909
-rw-r--r--sys/dev/ixgbe/ixgbe_mbx.h84
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.c32
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.h32
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.c43
-rw-r--r--sys/dev/ixgbe/ixgbe_phy.h2
-rw-r--r--sys/dev/ixgbe/ixgbe_rss.h1
-rw-r--r--sys/dev/ixgbe/ixgbe_type.h185
-rw-r--r--sys/dev/ixgbe/ixgbe_type_e610.h2278
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.c77
-rw-r--r--sys/dev/ixgbe/ixgbe_x540.c4
-rw-r--r--sys/dev/ixgbe/ixgbe_x550.c150
-rw-r--r--sys/dev/ixgbe/ixgbe_x550.h5
-rw-r--r--sys/dev/ixl/i40e_register.h12
-rw-r--r--sys/dev/ixl/i40e_type.h1
-rw-r--r--sys/dev/ixl/if_ixl.c56
-rw-r--r--sys/dev/ixl/ixl.h3
-rw-r--r--sys/dev/ixl/ixl_pf.h4
-rw-r--r--sys/dev/ixl/ixl_pf_main.c242
-rw-r--r--sys/dev/jedec_dimm/jedec_dimm.c17
-rw-r--r--sys/dev/jme/if_jme.c42
-rw-r--r--sys/dev/jme/if_jmevar.h1
-rw-r--r--sys/dev/kvm_clock/kvm_clock.c2
-rw-r--r--sys/dev/le/lance.c5
-rw-r--r--sys/dev/lge/if_lge.c13
-rw-r--r--sys/dev/liquidio/base/lio_request_manager.c9
-rw-r--r--sys/dev/liquidio/base/lio_response_manager.c4
-rw-r--r--sys/dev/liquidio/lio_ioctl.c2
-rw-r--r--sys/dev/liquidio/lio_main.c15
-rw-r--r--sys/dev/liquidio/lio_sysctl.c3
-rw-r--r--sys/dev/malo/if_malo.c18
-rw-r--r--sys/dev/mana/gdma_main.c34
-rw-r--r--sys/dev/mana/gdma_util.h21
-rw-r--r--sys/dev/mana/hw_channel.c17
-rw-r--r--sys/dev/mana/mana.h33
-rw-r--r--sys/dev/mana/mana_en.c276
-rw-r--r--sys/dev/mana/mana_sysctl.c23
-rw-r--r--sys/dev/md/embedfs.S6
-rw-r--r--sys/dev/md/md.c361
-rw-r--r--sys/dev/mdio/mdio.c17
-rw-r--r--sys/dev/mem/memutil.c19
-rw-r--r--sys/dev/mfi/mfi.c21
-rw-r--r--sys/dev/mfi/mfi_cam.c2
-rw-r--r--sys/dev/mfi/mfi_pci.c11
-rw-r--r--sys/dev/mfi/mfireg.h2
-rw-r--r--sys/dev/mgb/if_mgb.c7
-rw-r--r--sys/dev/mgb/if_mgb.h2
-rw-r--r--sys/dev/mge/if_mge.c47
-rw-r--r--sys/dev/mii/e1000phy.c7
-rw-r--r--sys/dev/mii/mcommphy.c276
-rw-r--r--sys/dev/mii/mii.c21
-rw-r--r--sys/dev/mii/mii_fdt.c2
-rw-r--r--sys/dev/mii/miidevs3
-rw-r--r--sys/dev/mii/mv88e151x.c8
-rw-r--r--sys/dev/mii/rgephy.c6
-rw-r--r--sys/dev/mii/rgephyreg.h1
-rw-r--r--sys/dev/mlx/mlx.c25
-rw-r--r--sys/dev/mlx4/mlx4_core/mlx4_alloc.c2
-rw-r--r--sys/dev/mlx4/mlx4_core/mlx4_cmd.c8
-rw-r--r--sys/dev/mlx4/mlx4_en/en.h2
-rw-r--r--sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c5
-rw-r--r--sys/dev/mlx4/mlx4_en/mlx4_en_tx.c10
-rw-r--r--sys/dev/mlx4/mlx4_ib/mlx4_ib_qp.c4
-rw-r--r--sys/dev/mlx5/cq.h6
-rw-r--r--sys/dev/mlx5/crypto.h36
-rw-r--r--sys/dev/mlx5/device.h112
-rw-r--r--sys/dev/mlx5/doorbell.h6
-rw-r--r--sys/dev/mlx5/driver.h10
-rw-r--r--sys/dev/mlx5/fs.h232
-rw-r--r--sys/dev/mlx5/mlx5_accel/ipsec.h349
-rw-r--r--sys/dev/mlx5/mlx5_accel/mlx5_ipsec.c821
-rw-r--r--sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c2289
-rw-r--r--sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c486
-rw-r--r--sys/dev/mlx5/mlx5_accel/mlx5_ipsec_rxtx.c87
-rw-r--r--sys/dev/mlx5/mlx5_core/eswitch.h8
-rw-r--r--sys/dev/mlx5/mlx5_core/fs_chains.h71
-rw-r--r--sys/dev/mlx5/mlx5_core/fs_cmd.h120
-rw-r--r--sys/dev/mlx5/mlx5_core/fs_core.h490
-rw-r--r--sys/dev/mlx5/mlx5_core/fs_ft_pool.h23
-rw-r--r--sys/dev/mlx5/mlx5_core/fs_tcp.h6
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_cmd.c8
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_core.h10
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_crypto.c94
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_diagnostics.c2
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_eq.c21
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_eswitch.c154
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_fc_cmd.c102
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_fc_cmd.h54
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_fs_chains.c664
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_fs_cmd.c1239
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_fs_core.c3522
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c2
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_fs_ft_pool.c86
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_fs_tcp.c50
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_fs_tree.c2874
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_fw.c10
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_health.c5
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_main.c55
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c4
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_tls.c60
-rw-r--r--sys/dev/mlx5/mlx5_core/mlx5_transobj.c12
-rw-r--r--sys/dev/mlx5/mlx5_core/transobj.h1
-rw-r--r--sys/dev/mlx5/mlx5_core/wq.h17
-rw-r--r--sys/dev/mlx5/mlx5_en/en.h31
-rw-r--r--sys/dev/mlx5/mlx5_en/en_hw_tls.h2
-rw-r--r--sys/dev/mlx5/mlx5_en/en_hw_tls_rx.h2
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c24
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c355
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c205
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c107
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_main.c376
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_rx.c102
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_tx.c5
-rw-r--r--sys/dev/mlx5/mlx5_ib/mlx5_ib.h2
-rw-r--r--sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c8
-rw-r--r--sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c39
-rw-r--r--sys/dev/mlx5/mlx5_ib/mlx5_ib_mem.c4
-rw-r--r--sys/dev/mlx5/mlx5_ifc.h725
-rw-r--r--sys/dev/mlx5/mlx5_lib/aso.h92
-rw-r--r--sys/dev/mlx5/mlx5_lib/mlx5_aso.c428
-rw-r--r--sys/dev/mlx5/port.h13
-rw-r--r--sys/dev/mlx5/qp.h13
-rw-r--r--sys/dev/mlx5/tls.h3
-rw-r--r--sys/dev/mmc/bridge.h5
-rw-r--r--sys/dev/mmc/host/dwmmc.c109
-rw-r--r--sys/dev/mmc/host/dwmmc_rockchip.c12
-rw-r--r--sys/dev/mmc/host/dwmmc_starfive.c114
-rw-r--r--sys/dev/mmc/mmc.c12
-rw-r--r--sys/dev/mmc/mmc_fdt_helpers.c10
-rw-r--r--sys/dev/mmc/mmc_subr.c2
-rw-r--r--sys/dev/mmc/mmcbrvar.h12
-rw-r--r--sys/dev/mmc/mmcreg.h1
-rw-r--r--sys/dev/mmc/mmcsd.c2
-rw-r--r--sys/dev/mmc/mmcspi.c2378
-rw-r--r--sys/dev/mmcnull/mmcnull.c2
-rw-r--r--sys/dev/mpi3mr/mpi/mpi30_api.h4
-rw-r--r--sys/dev/mpi3mr/mpi/mpi30_cnfg.h368
-rw-r--r--sys/dev/mpi3mr/mpi/mpi30_image.h63
-rw-r--r--sys/dev/mpi3mr/mpi/mpi30_init.h17
-rw-r--r--sys/dev/mpi3mr/mpi/mpi30_ioc.h108
-rw-r--r--sys/dev/mpi3mr/mpi/mpi30_pci.h6
-rw-r--r--sys/dev/mpi3mr/mpi/mpi30_raid.h4
-rw-r--r--sys/dev/mpi3mr/mpi/mpi30_sas.h5
-rw-r--r--sys/dev/mpi3mr/mpi/mpi30_targ.h17
-rw-r--r--sys/dev/mpi3mr/mpi/mpi30_tool.h11
-rw-r--r--sys/dev/mpi3mr/mpi/mpi30_transport.h104
-rw-r--r--sys/dev/mpi3mr/mpi/mpi30_type.h4
-rw-r--r--sys/dev/mpi3mr/mpi3mr.c742
-rw-r--r--sys/dev/mpi3mr/mpi3mr.h44
-rw-r--r--sys/dev/mpi3mr/mpi3mr_app.c33
-rw-r--r--sys/dev/mpi3mr/mpi3mr_app.h2
-rw-r--r--sys/dev/mpi3mr/mpi3mr_cam.c115
-rw-r--r--sys/dev/mpi3mr/mpi3mr_cam.h3
-rw-r--r--sys/dev/mpi3mr/mpi3mr_pci.c45
-rw-r--r--sys/dev/mpr/mpr.c33
-rw-r--r--sys/dev/mpr/mpr_mapping.c18
-rw-r--r--sys/dev/mpr/mpr_sas.c3
-rw-r--r--sys/dev/mpr/mpr_sas_lsi.c3
-rw-r--r--sys/dev/mpr/mprvar.h1
-rw-r--r--sys/dev/mps/mps.c38
-rw-r--r--sys/dev/mps/mps_pci.c4
-rw-r--r--sys/dev/mps/mps_sas.c7
-rw-r--r--sys/dev/mps/mps_sas_lsi.c3
-rw-r--r--sys/dev/mpt/mpt.c4
-rw-r--r--sys/dev/mpt/mpt_cam.c15
-rw-r--r--sys/dev/mpt/mpt_debug.c2
-rw-r--r--sys/dev/mpt/mpt_raid.c7
-rw-r--r--sys/dev/mrsas/mrsas_ioctl.c13
-rw-r--r--sys/dev/msk/if_msk.c52
-rw-r--r--sys/dev/mvs/mvs.c6
-rw-r--r--sys/dev/mvs/mvs_pci.c15
-rw-r--r--sys/dev/mvs/mvs_soc.c22
-rw-r--r--sys/dev/mwl/if_mwl.c29
-rw-r--r--sys/dev/mxge/if_mxge.c11
-rw-r--r--sys/dev/my/if_my.c6
-rw-r--r--sys/dev/nctgpio/nctgpio.c3
-rw-r--r--sys/dev/neta/if_mvneta.c21
-rw-r--r--sys/dev/netmap/if_ptnet.c14
-rw-r--r--sys/dev/netmap/netmap.c4
-rw-r--r--sys/dev/netmap/netmap_freebsd.c23
-rw-r--r--sys/dev/netmap/netmap_kern.h27
-rw-r--r--sys/dev/netmap/netmap_mem2.c69
-rw-r--r--sys/dev/netmap/netmap_mem2.h2
-rw-r--r--sys/dev/nfe/if_nfe.c22
-rw-r--r--sys/dev/nfsmb/nfsmb.c31
-rw-r--r--sys/dev/nge/if_nge.c32
-rw-r--r--sys/dev/ntb/if_ntb/if_ntb.c4
-rw-r--r--sys/dev/ntb/ntb.c4
-rw-r--r--sys/dev/ntb/ntb_transport.c4
-rw-r--r--sys/dev/null/null.c48
-rw-r--r--sys/dev/nvdimm/nvdimm_acpi.c27
-rw-r--r--sys/dev/nvdimm/nvdimm_e820.c10
-rw-r--r--sys/dev/nvme/nvme.c5
-rw-r--r--sys/dev/nvme/nvme.h63
-rw-r--r--sys/dev/nvme/nvme_ahci.c1
-rw-r--r--sys/dev/nvme/nvme_ctrlr.c586
-rw-r--r--sys/dev/nvme/nvme_ctrlr_cmd.c32
-rw-r--r--sys/dev/nvme/nvme_linux.h58
-rw-r--r--sys/dev/nvme/nvme_ns.c32
-rw-r--r--sys/dev/nvme/nvme_ns_cmd.c24
-rw-r--r--sys/dev/nvme/nvme_pci.c1
-rw-r--r--sys/dev/nvme/nvme_private.h50
-rw-r--r--sys/dev/nvme/nvme_qpair.c600
-rw-r--r--sys/dev/nvme/nvme_sim.c31
-rw-r--r--sys/dev/nvme/nvme_sysctl.c83
-rw-r--r--sys/dev/nvme/nvme_util.c238
-rw-r--r--sys/dev/nvmf/controller/ctl_frontend_nvmf.c196
-rw-r--r--sys/dev/nvmf/controller/nvmft_controller.c78
-rw-r--r--sys/dev/nvmf/controller/nvmft_qpair.c72
-rw-r--r--sys/dev/nvmf/controller/nvmft_subr.c40
-rw-r--r--sys/dev/nvmf/controller/nvmft_var.h19
-rw-r--r--sys/dev/nvmf/host/nvmf.c640
-rw-r--r--sys/dev/nvmf/host/nvmf_aer.c2
-rw-r--r--sys/dev/nvmf/host/nvmf_ctldev.c15
-rw-r--r--sys/dev/nvmf/host/nvmf_ns.c66
-rw-r--r--sys/dev/nvmf/host/nvmf_qpair.c88
-rw-r--r--sys/dev/nvmf/host/nvmf_sim.c29
-rw-r--r--sys/dev/nvmf/host/nvmf_var.h46
-rw-r--r--sys/dev/nvmf/nvmf.h131
-rw-r--r--sys/dev/nvmf/nvmf_proto.h6
-rw-r--r--sys/dev/nvmf/nvmf_tcp.c95
-rw-r--r--sys/dev/nvmf/nvmf_tcp.h27
-rw-r--r--sys/dev/nvmf/nvmf_transport.c102
-rw-r--r--sys/dev/nvmf/nvmf_transport.h25
-rw-r--r--sys/dev/nvmf/nvmf_transport_internal.h3
-rw-r--r--sys/dev/oce/oce_if.c24
-rw-r--r--sys/dev/ocs_fc/ocs_cam.c4
-rw-r--r--sys/dev/ocs_fc/ocs_hw.c4
-rw-r--r--sys/dev/ocs_fc/ocs_mgmt.c14
-rw-r--r--sys/dev/ocs_fc/ocs_os.h2
-rw-r--r--sys/dev/ofw/ofw_bus_subr.c107
-rw-r--r--sys/dev/ofw/ofw_cpu.c91
-rw-r--r--sys/dev/ofw/ofw_cpu.h2
-rw-r--r--sys/dev/ofw/ofw_fdt.c3
-rw-r--r--sys/dev/ofw/ofw_firmware.c3
-rw-r--r--sys/dev/ofw/ofw_pcib.c25
-rw-r--r--sys/dev/ofw/ofw_standard.c3
-rw-r--r--sys/dev/ofw/ofwbus.c5
-rw-r--r--sys/dev/ofw/openfirm.c32
-rw-r--r--sys/dev/ofw/openfirm.h5
-rw-r--r--sys/dev/otus/if_otus.c23
-rw-r--r--sys/dev/ow/ow.c28
-rw-r--r--sys/dev/ow/owc_gpiobus.c7
-rw-r--r--sys/dev/p2sb/lewisburg_gpio.c3
-rw-r--r--sys/dev/p2sb/lewisburg_gpiocm.c19
-rw-r--r--sys/dev/p2sb/p2sb.c9
-rw-r--r--sys/dev/pccbb/pccbb.c32
-rw-r--r--sys/dev/pccbb/pccbb_pci.c48
-rw-r--r--sys/dev/pcf/pcf_isa.c7
-rw-r--r--sys/dev/pci/controller/pci_n1sdp.c5
-rw-r--r--sys/dev/pci/fixup_pci.c1
-rw-r--r--sys/dev/pci/hostb_pci.c6
-rw-r--r--sys/dev/pci/ignore_pci.c9
-rw-r--r--sys/dev/pci/isa_pci.c2
-rw-r--r--sys/dev/pci/pci.c507
-rw-r--r--sys/dev/pci/pci_dw.c3
-rw-r--r--sys/dev/pci/pci_dw_mv.c4
-rw-r--r--sys/dev/pci/pci_host_generic.c91
-rw-r--r--sys/dev/pci/pci_host_generic.h2
-rw-r--r--sys/dev/pci/pci_host_generic_acpi.c60
-rw-r--r--sys/dev/pci/pci_host_generic_acpi.h1
-rw-r--r--sys/dev/pci/pci_host_generic_den0115.c19
-rw-r--r--sys/dev/pci/pci_host_generic_fdt.c8
-rw-r--r--sys/dev/pci/pci_iov.c179
-rw-r--r--sys/dev/pci/pci_iov_private.h2
-rw-r--r--sys/dev/pci/pci_iov_schema.c64
-rw-r--r--sys/dev/pci/pci_pci.c509
-rw-r--r--sys/dev/pci/pci_private.h173
-rw-r--r--sys/dev/pci/pci_subr.c5
-rw-r--r--sys/dev/pci/pci_user.c333
-rw-r--r--sys/dev/pci/pcib_private.h19
-rw-r--r--sys/dev/pci/pcireg.h3
-rw-r--r--sys/dev/pci/pcivar.h55
-rw-r--r--sys/dev/pci/vga_pci.c37
-rw-r--r--sys/dev/pms/RefTisa/sallsdk/spc/sampirsp.c2
-rw-r--r--sys/dev/pms/freebsd/driver/ini/src/agtiapi.c39
-rw-r--r--sys/dev/ppbus/if_plip.c8
-rw-r--r--sys/dev/ppbus/lpbb.c6
-rw-r--r--sys/dev/ppbus/lpt.c17
-rw-r--r--sys/dev/ppbus/pcfclock.c4
-rw-r--r--sys/dev/ppbus/ppb_msq.c2
-rw-r--r--sys/dev/ppbus/ppbconf.c21
-rw-r--r--sys/dev/ppbus/ppbconf.h2
-rw-r--r--sys/dev/ppbus/ppi.c4
-rw-r--r--sys/dev/ppbus/pps.c4
-rw-r--r--sys/dev/ppc/ppc.c9
-rw-r--r--sys/dev/proto/proto_bus_isa.c8
-rw-r--r--sys/dev/proto/proto_bus_pci.c12
-rw-r--r--sys/dev/proto/proto_core.c2
-rw-r--r--sys/dev/psci/psci.c26
-rw-r--r--sys/dev/psci/smccc.c55
-rw-r--r--sys/dev/psci/smccc.h33
-rw-r--r--sys/dev/psci/smccc_arm64.S7
-rw-r--r--sys/dev/psci/smccc_errata.c139
-rw-r--r--sys/dev/psci/smccc_trng.c143
-rw-r--r--sys/dev/pst/pst-iop.c14
-rw-r--r--sys/dev/pst/pst-pci.c5
-rw-r--r--sys/dev/pst/pst-raid.c10
-rw-r--r--sys/dev/puc/puc.c23
-rw-r--r--sys/dev/puc/pucdata.c465
-rw-r--r--sys/dev/pwm/controller/allwinner/aw_pwm.c8
-rw-r--r--sys/dev/pwm/controller/rockchip/rk_pwm.c5
-rw-r--r--sys/dev/pwm/ofw_pwmbus.c11
-rw-r--r--sys/dev/pwm/pwmbus.c18
-rw-r--r--sys/dev/qat/include/adf_cfg_device.h4
-rw-r--r--sys/dev/qat/include/adf_dbgfs.h11
-rw-r--r--sys/dev/qat/include/adf_gen4vf_hw_csr_data.h4
-rw-r--r--sys/dev/qat/include/adf_heartbeat.h6
-rw-r--r--sys/dev/qat/include/adf_pfvf_vf_msg.h3
-rw-r--r--sys/dev/qat/include/common/adf_accel_devices.h18
-rw-r--r--sys/dev/qat/include/common/adf_cfg_common.h4
-rw-r--r--sys/dev/qat/include/common/adf_common_drv.h3
-rw-r--r--sys/dev/qat/include/common/adf_gen4_hw_data.h6
-rw-r--r--sys/dev/qat/include/common/adf_pfvf_msg.h8
-rw-r--r--sys/dev/qat/include/common/adf_uio_cleanup.h3
-rw-r--r--sys/dev/qat/include/common/adf_uio_control.h3
-rw-r--r--sys/dev/qat/include/icp_qat_fw_init_admin.h9
-rw-r--r--sys/dev/qat/qat/qat_ocf.c15
-rw-r--r--sys/dev/qat/qat_api/common/compression/dc_datapath.c23
-rw-r--r--sys/dev/qat/qat_api/common/compression/dc_session.c6
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_session.h26
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym.h12
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_alg_chain.h7
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_cb.h6
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_cipher.h6
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash.h13
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash_defs.h4
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash_precomputes.h6
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_key.h21
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_partial.h4
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat.h4
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_cipher.h4
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_hash.h6
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_hash_defs_lookup.h4
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_stats.h6
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/key/lac_sym_key.c30
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/lac_sym_alg_chain.c10
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/lac_sym_api.c14
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/lac_sym_auth_enc.c3
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cb.c15
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cipher.c7
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/lac_sym_compile_check.c7
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/lac_sym_dp.c7
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash.c14
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash_sw_precomputes.c4
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/lac_sym_queue.c4
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat.c6
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_cipher.c58
-rw-r--r--sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_hash_defs_lookup.c8
-rw-r--r--sys/dev/qat/qat_api/common/ctrl/sal_compression.c8
-rw-r--r--sys/dev/qat/qat_api/common/ctrl/sal_crypto.c231
-rw-r--r--sys/dev/qat/qat_api/common/ctrl/sal_get_instances.c177
-rw-r--r--sys/dev/qat/qat_api/common/include/lac_common.h3
-rw-r--r--sys/dev/qat/qat_api/common/include/lac_hooks.h4
-rw-r--r--sys/dev/qat/qat_api/common/include/lac_mem.h6
-rw-r--r--sys/dev/qat/qat_api/common/include/lac_mem_pools.h14
-rw-r--r--sys/dev/qat/qat_api/common/include/lac_sal.h10
-rw-r--r--sys/dev/qat/qat_api/common/include/lac_sal_types.h6
-rw-r--r--sys/dev/qat/qat_api/common/include/lac_sal_types_crypto.h23
-rw-r--r--sys/dev/qat/qat_api/common/include/sal_qat_cmn_msg.h4
-rw-r--r--sys/dev/qat/qat_api/common/include/sal_types_compression.h4
-rw-r--r--sys/dev/qat/qat_api/common/qat_comms/sal_qat_cmn_msg.c4
-rw-r--r--sys/dev/qat/qat_api/common/utils/lac_buffer_desc.c6
-rw-r--r--sys/dev/qat/qat_api/firmware/include/icp_qat_fw.h7
-rw-r--r--sys/dev/qat/qat_api/firmware/include/icp_qat_fw_comp.h15
-rw-r--r--sys/dev/qat/qat_api/firmware/include/icp_qat_fw_la.h10
-rw-r--r--sys/dev/qat/qat_api/firmware/include/icp_qat_fw_mmp.h14
-rw-r--r--sys/dev/qat/qat_api/firmware/include/icp_qat_fw_mmp_ids.h5
-rw-r--r--sys/dev/qat/qat_api/firmware/include/icp_qat_fw_pke.h4
-rw-r--r--sys/dev/qat/qat_api/firmware/include/icp_qat_hw.h6
-rw-r--r--sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp.h3
-rw-r--r--sys/dev/qat/qat_api/include/cpa.h37
-rw-r--r--sys/dev/qat/qat_api/include/cpa_dev.h41
-rw-r--r--sys/dev/qat/qat_api/include/cpa_types.h37
-rw-r--r--sys/dev/qat/qat_api/include/dc/cpa_dc.h46
-rw-r--r--sys/dev/qat/qat_api/include/dc/cpa_dc_bp.h37
-rw-r--r--sys/dev/qat/qat_api/include/dc/cpa_dc_chain.h89
-rw-r--r--sys/dev/qat/qat_api/include/dc/cpa_dc_dp.h37
-rw-r--r--sys/dev/qat/qat_api/include/icp_buffer_desc.h9
-rw-r--r--sys/dev/qat/qat_api/include/icp_sal_user.h6
-rw-r--r--sys/dev/qat/qat_api/include/icp_sal_versions.h4
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_common.h37
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_dh.h37
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_dsa.h37
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_ec.h45
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_ecdh.h37
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_ecdsa.h37
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_ecsm2.h37
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_im.h37
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_key.h37
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_kpt.h55
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_ln.h37
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_prime.h37
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_rsa.h37
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_sym.h87
-rw-r--r--sys/dev/qat/qat_api/include/lac/cpa_cy_sym_dp.h41
-rw-r--r--sys/dev/qat/qat_api/qat_direct/include/icp_accel_devices.h4
-rw-r--r--sys/dev/qat/qat_api/qat_direct/include/icp_adf_init.h4
-rw-r--r--sys/dev/qat/qat_api/qat_kernel/src/lac_adf_interface_freebsd.c6
-rw-r--r--sys/dev/qat/qat_api/qat_utils/include/qat_utils.h7
-rw-r--r--sys/dev/qat/qat_api/qat_utils/src/QatUtilsServices.c8
-rw-r--r--sys/dev/qat/qat_common/adf_aer.c19
-rw-r--r--sys/dev/qat/qat_common/adf_cfg.c42
-rw-r--r--sys/dev/qat/qat_common/adf_cfg_device.c14
-rw-r--r--sys/dev/qat/qat_common/adf_cfg_sysctl.c15
-rw-r--r--sys/dev/qat/qat_common/adf_clock.c34
-rw-r--r--sys/dev/qat/qat_common/adf_freebsd_cfg_dev_dbg.c6
-rw-r--r--sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c60
-rw-r--r--sys/dev/qat/qat_common/adf_freebsd_dbgfs.c68
-rw-r--r--sys/dev/qat/qat_common/adf_freebsd_dev_processes.c26
-rw-r--r--sys/dev/qat/qat_common/adf_freebsd_heartbeat_dbg.c150
-rw-r--r--sys/dev/qat/qat_common/adf_freebsd_pfvf_ctrs_dbg.c6
-rw-r--r--sys/dev/qat/qat_common/adf_freebsd_transport_debug.c9
-rw-r--r--sys/dev/qat/qat_common/adf_freebsd_uio.c8
-rw-r--r--sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c3
-rw-r--r--sys/dev/qat/qat_common/adf_freebsd_ver_dbg.c108
-rw-r--r--sys/dev/qat/qat_common/adf_fw_counters.c46
-rw-r--r--sys/dev/qat/qat_common/adf_gen2_hw_data.c6
-rw-r--r--sys/dev/qat/qat_common/adf_gen4_pfvf.c3
-rw-r--r--sys/dev/qat/qat_common/adf_gen4_timer.c2
-rw-r--r--sys/dev/qat/qat_common/adf_gen4vf_hw_csr_data.c8
-rw-r--r--sys/dev/qat/qat_common/adf_init.c84
-rw-r--r--sys/dev/qat/qat_common/adf_pfvf_vf_msg.c20
-rw-r--r--sys/dev/qat/qat_common/adf_pfvf_vf_proto.c17
-rw-r--r--sys/dev/qat/qat_common/adf_vf_isr.c15
-rw-r--r--sys/dev/qat/qat_common/qat_hal.c17
-rw-r--r--sys/dev/qat/qat_common/qat_uclo.c14
-rw-r--r--sys/dev/qat/qat_hw/qat_200xx/adf_200xx_hw_data.h2
-rw-r--r--sys/dev/qat/qat_hw/qat_200xx/adf_drv.c31
-rw-r--r--sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c163
-rw-r--r--sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h10
-rw-r--r--sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c126
-rw-r--r--sys/dev/qat/qat_hw/qat_4xxxvf/adf_4xxxvf_hw_data.c92
-rw-r--r--sys/dev/qat/qat_hw/qat_4xxxvf/adf_4xxxvf_hw_data.h14
-rw-r--r--sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c48
-rw-r--r--sys/dev/qat/qat_hw/qat_c3xxx/adf_c3xxx_hw_data.h2
-rw-r--r--sys/dev/qat/qat_hw/qat_c3xxx/adf_drv.c31
-rw-r--r--sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ae_config.c7
-rw-r--r--sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_hw_data.c6
-rw-r--r--sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_misc_error_stats.c6
-rw-r--r--sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_pke_replay_stats.c6
-rw-r--r--sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ras.c15
-rw-r--r--sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c31
-rw-r--r--sys/dev/qat/qat_hw/qat_c62x/adf_drv.c31
-rw-r--r--sys/dev/qat/qat_hw/qat_dh895xcc/adf_drv.c32
-rw-r--r--sys/dev/qcom_dwc3/qcom_dwc3.c13
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma.c985
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_debug.h52
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_desc.c351
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_desc.h63
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.c462
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.h46
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c752
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_hw.h86
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_reg.h429
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c514
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_rx.h51
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c454
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_tx.h (renamed from sys/dev/bhnd/cores/usb/bhnd_usbvar.h)50
-rw-r--r--sys/dev/qcom_ess_edma/qcom_ess_edma_var.h258
-rw-r--r--sys/dev/qcom_gcc/qcom_gcc_clock.c98
-rw-r--r--sys/dev/qcom_gcc/qcom_gcc_ipq4018.h (renamed from sys/dev/sound/pcm/ac97_patch.h)22
-rw-r--r--sys/dev/qcom_gcc/qcom_gcc_ipq4018_clock.c84
-rw-r--r--sys/dev/qcom_gcc/qcom_gcc_ipq4018_reset.c20
-rw-r--r--sys/dev/qcom_gcc/qcom_gcc_main.c (renamed from sys/dev/qcom_gcc/qcom_gcc_ipq4018.c)124
-rw-r--r--sys/dev/qcom_gcc/qcom_gcc_reset.c64
-rw-r--r--sys/dev/qcom_gcc/qcom_gcc_var.h (renamed from sys/dev/qcom_gcc/qcom_gcc_ipq4018_var.h)41
-rw-r--r--sys/dev/qcom_qup/qcom_spi.c7
-rw-r--r--sys/dev/qcom_rnd/qcom_rnd.c2
-rw-r--r--sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c3
-rw-r--r--sys/dev/qlnx/qlnxe/bcm_osal.h26
-rw-r--r--sys/dev/qlnx/qlnxe/ecore.h1
-rw-r--r--sys/dev/qlnx/qlnxe/ecore_dev.c6
-rw-r--r--sys/dev/qlnx/qlnxe/ecore_mcp.c50
-rw-r--r--sys/dev/qlnx/qlnxe/ecore_mcp.h6
-rw-r--r--sys/dev/qlnx/qlnxe/qlnx_def.h18
-rw-r--r--sys/dev/qlnx/qlnxe/qlnx_os.c132
-rw-r--r--sys/dev/qlnx/qlnxe/qlnx_os.h4
-rw-r--r--sys/dev/qlnx/qlnxe/qlnx_rdma.c3
-rw-r--r--sys/dev/qlxgb/qla_os.c3
-rw-r--r--sys/dev/qlxgbe/ql_ioctl.c4
-rw-r--r--sys/dev/qlxgbe/ql_isr.c2
-rw-r--r--sys/dev/qlxgbe/ql_os.c14
-rw-r--r--sys/dev/qlxge/qls_os.c28
-rw-r--r--sys/dev/quicc/quicc_core.c2
-rw-r--r--sys/dev/ral/rt2560.c13
-rw-r--r--sys/dev/ral/rt2661.c12
-rw-r--r--sys/dev/ral/rt2860.c13
-rw-r--r--sys/dev/random/armv8rng.c2
-rw-r--r--sys/dev/random/darn.c2
-rw-r--r--sys/dev/random/fenestrasX/fx_pool.c3
-rw-r--r--sys/dev/random/fenestrasX/fx_rng.c2
-rw-r--r--sys/dev/random/fortuna.c9
-rw-r--r--sys/dev/random/fortuna.h4
-rw-r--r--sys/dev/random/ivy.c2
-rw-r--r--sys/dev/random/nehemiah.c2
-rw-r--r--sys/dev/random/random_harvestq.c498
-rw-r--r--sys/dev/random/random_harvestq.h13
-rw-r--r--sys/dev/random/randomdev.c8
-rw-r--r--sys/dev/random/randomdev.h7
-rw-r--r--sys/dev/rccgpio/rccgpio.c3
-rw-r--r--sys/dev/re/if_re.c26
-rw-r--r--sys/dev/regulator/regulator_bus.c3
-rw-r--r--sys/dev/regulator/regulator_fixed.c17
-rw-r--r--sys/dev/rl/if_rl.c34
-rw-r--r--sys/dev/rndtest/rndtest.c2
-rw-r--r--sys/dev/rtsx/rtsx.c25
-rw-r--r--sys/dev/rtwn/if_rtwn.c148
-rw-r--r--sys/dev/rtwn/if_rtwn_cam.c6
-rw-r--r--sys/dev/rtwn/if_rtwn_fw.c3
-rw-r--r--sys/dev/rtwn/if_rtwn_nop.h6
-rw-r--r--sys/dev/rtwn/if_rtwn_ridx.h80
-rw-r--r--sys/dev/rtwn/if_rtwn_rx.c113
-rw-r--r--sys/dev/rtwn/if_rtwn_rx.h4
-rw-r--r--sys/dev/rtwn/if_rtwn_tx.c74
-rw-r--r--sys/dev/rtwn/if_rtwnreg.h3
-rw-r--r--sys/dev/rtwn/if_rtwnvar.h30
-rw-r--r--sys/dev/rtwn/pci/rtwn_pci_rx.c30
-rw-r--r--sys/dev/rtwn/rtl8188e/pci/r88ee_attach.c3
-rw-r--r--sys/dev/rtwn/rtl8188e/r88e.h2
-rw-r--r--sys/dev/rtwn/rtl8188e/r88e_beacon.c12
-rw-r--r--sys/dev/rtwn/rtl8188e/r88e_chan.c36
-rw-r--r--sys/dev/rtwn/rtl8188e/r88e_rx.c96
-rw-r--r--sys/dev/rtwn/rtl8188e/r88e_rx_desc.h14
-rw-r--r--sys/dev/rtwn/rtl8188e/usb/r88eu_attach.c5
-rw-r--r--sys/dev/rtwn/rtl8188e/usb/r88eu_init.c15
-rw-r--r--sys/dev/rtwn/rtl8192c/pci/r92ce_attach.c3
-rw-r--r--sys/dev/rtwn/rtl8192c/r92c.h5
-rw-r--r--sys/dev/rtwn/rtl8192c/r92c_beacon.c21
-rw-r--r--sys/dev/rtwn/rtl8192c/r92c_chan.c102
-rw-r--r--sys/dev/rtwn/rtl8192c/r92c_fw.c65
-rw-r--r--sys/dev/rtwn/rtl8192c/r92c_fw_cmd.h1
-rw-r--r--sys/dev/rtwn/rtl8192c/r92c_reg.h33
-rw-r--r--sys/dev/rtwn/rtl8192c/r92c_rx.c8
-rw-r--r--sys/dev/rtwn/rtl8192c/r92c_tx.c251
-rw-r--r--sys/dev/rtwn/rtl8192c/r92c_tx_desc.h3
-rw-r--r--sys/dev/rtwn/rtl8192c/usb/r92cu_attach.c3
-rw-r--r--sys/dev/rtwn/rtl8192c/usb/r92cu_init.c8
-rw-r--r--sys/dev/rtwn/rtl8192e/r92e.h1
-rw-r--r--sys/dev/rtwn/rtl8192e/r92e_chan.c83
-rw-r--r--sys/dev/rtwn/rtl8192e/r92e_init.c4
-rw-r--r--sys/dev/rtwn/rtl8192e/r92e_rf.c3
-rw-r--r--sys/dev/rtwn/rtl8192e/usb/r92eu_attach.c3
-rw-r--r--sys/dev/rtwn/rtl8812a/r12a.h3
-rw-r--r--sys/dev/rtwn/rtl8812a/r12a_beacon.c9
-rw-r--r--sys/dev/rtwn/rtl8812a/r12a_chan.c276
-rw-r--r--sys/dev/rtwn/rtl8812a/r12a_fw.c6
-rw-r--r--sys/dev/rtwn/rtl8812a/r12a_reg.h59
-rw-r--r--sys/dev/rtwn/rtl8812a/r12a_rx.c37
-rw-r--r--sys/dev/rtwn/rtl8812a/r12a_tx.c150
-rw-r--r--sys/dev/rtwn/rtl8812a/r12a_tx_desc.h6
-rw-r--r--sys/dev/rtwn/rtl8812a/r12a_var.h1
-rw-r--r--sys/dev/rtwn/rtl8812a/usb/r12au_attach.c26
-rw-r--r--sys/dev/rtwn/rtl8812a/usb/r12au_init.c12
-rw-r--r--sys/dev/rtwn/rtl8821a/usb/r21au_attach.c24
-rw-r--r--sys/dev/rtwn/usb/rtwn_usb_attach.c26
-rw-r--r--sys/dev/rtwn/usb/rtwn_usb_attach.h1
-rw-r--r--sys/dev/rtwn/usb/rtwn_usb_ep.c23
-rw-r--r--sys/dev/rtwn/usb/rtwn_usb_rx.c32
-rw-r--r--sys/dev/rtwn/usb/rtwn_usb_tx.c100
-rw-r--r--sys/dev/rtwn/usb/rtwn_usb_tx.h5
-rw-r--r--sys/dev/rtwn/usb/rtwn_usb_var.h14
-rw-r--r--sys/dev/safe/safe.c1
-rw-r--r--sys/dev/sbni/if_sbni.c5
-rw-r--r--sys/dev/sbni/if_sbni_isa.c7
-rw-r--r--sys/dev/sbni/if_sbni_pci.c17
-rw-r--r--sys/dev/sbni/if_sbnivar.h2
-rw-r--r--sys/dev/scc/scc_core.c2
-rw-r--r--sys/dev/sdhci/fsl_sdhci.c4
-rw-r--r--sys/dev/sdhci/sdhci.c25
-rw-r--r--sys/dev/sdhci/sdhci_acpi.c1
-rw-r--r--sys/dev/sdhci/sdhci_fdt.c275
-rw-r--r--sys/dev/sdhci/sdhci_fdt.h66
-rw-r--r--sys/dev/sdhci/sdhci_fdt_cvitek.c144
-rw-r--r--sys/dev/sdhci/sdhci_fdt_rockchip.c282
-rw-r--r--sys/dev/sdhci/sdhci_fdt_xilinx.c115
-rw-r--r--sys/dev/sdhci/sdhci_fsl_fdt.c3
-rw-r--r--sys/dev/sdhci/sdhci_xenon.c2
-rw-r--r--sys/dev/sdhci/sdhci_xenon_acpi.c2
-rw-r--r--sys/dev/sdio/sdiob.c11
-rw-r--r--sys/dev/sdio/sdiodevs32
-rw-r--r--sys/dev/sff/sfp_fdt.c1
-rw-r--r--sys/dev/sfxge/sfxge.c17
-rw-r--r--sys/dev/sfxge/sfxge_rx.c6
-rw-r--r--sys/dev/sfxge/sfxge_tx.c14
-rw-r--r--sys/dev/sge/if_sge.c7
-rw-r--r--sys/dev/siis/siis.c19
-rw-r--r--sys/dev/sis/if_sis.c22
-rw-r--r--sys/dev/sk/if_sk.c44
-rw-r--r--sys/dev/smartpqi/smartpqi_event.c6
-rw-r--r--sys/dev/smartpqi/smartpqi_queue.c4
-rw-r--r--sys/dev/smartpqi/smartpqi_request.c2
-rw-r--r--sys/dev/smbios/smbios.c170
-rw-r--r--sys/dev/smbios/smbios.h8
-rw-r--r--sys/dev/smbus/smb.c4
-rw-r--r--sys/dev/smbus/smbconf.h2
-rw-r--r--sys/dev/smbus/smbus.c12
-rw-r--r--sys/dev/smc/if_smc.c9
-rw-r--r--sys/dev/sound/driver.c9
-rw-r--r--sys/dev/sound/dummy.c385
-rw-r--r--sys/dev/sound/fdt/audio_soc.c12
-rw-r--r--sys/dev/sound/macio/aoa.c6
-rw-r--r--sys/dev/sound/macio/i2s.c6
-rw-r--r--sys/dev/sound/macio/onyx.c1
-rw-r--r--sys/dev/sound/midi/midi.c711
-rw-r--r--sys/dev/sound/midi/midi.h15
-rw-r--r--sys/dev/sound/midi/mpu401.c46
-rw-r--r--sys/dev/sound/midi/mpu_if.m11
-rw-r--r--sys/dev/sound/midi/sequencer.c2102
-rw-r--r--sys/dev/sound/midi/synth_if.m312
-rw-r--r--sys/dev/sound/pci/als4000.c11
-rw-r--r--sys/dev/sound/pci/atiixp.c10
-rw-r--r--sys/dev/sound/pci/cmi.c6
-rw-r--r--sys/dev/sound/pci/cs4281.c6
-rw-r--r--sys/dev/sound/pci/csa.c98
-rw-r--r--sys/dev/sound/pci/csamidi.c1
-rw-r--r--sys/dev/sound/pci/csapcm.c9
-rw-r--r--sys/dev/sound/pci/csareg.h2
-rw-r--r--sys/dev/sound/pci/emu10k1.c5
-rw-r--r--sys/dev/sound/pci/emu10kx-midi.c1
-rw-r--r--sys/dev/sound/pci/emu10kx-pcm.c9
-rw-r--r--sys/dev/sound/pci/emu10kx.c185
-rw-r--r--sys/dev/sound/pci/envy24.c7
-rw-r--r--sys/dev/sound/pci/envy24ht.c8
-rw-r--r--sys/dev/sound/pci/es137x.c6
-rw-r--r--sys/dev/sound/pci/fm801.c15
-rw-r--r--sys/dev/sound/pci/hda/hdaa.c105
-rw-r--r--sys/dev/sound/pci/hda/hdaa_patches.c64
-rw-r--r--sys/dev/sound/pci/hda/hdac.c64
-rw-r--r--sys/dev/sound/pci/hda/hdac.h25
-rw-r--r--sys/dev/sound/pci/hda/hdacc.c10
-rw-r--r--sys/dev/sound/pci/hda/pin_patch_realtek.h15
-rw-r--r--sys/dev/sound/pci/hdsp-pcm.c1136
-rw-r--r--sys/dev/sound/pci/hdsp.c1022
-rw-r--r--sys/dev/sound/pci/hdsp.h266
-rw-r--r--sys/dev/sound/pci/hdspe-pcm.c83
-rw-r--r--sys/dev/sound/pci/hdspe.c239
-rw-r--r--sys/dev/sound/pci/hdspe.h78
-rw-r--r--sys/dev/sound/pci/ich.c9
-rw-r--r--sys/dev/sound/pci/maestro3.c15
-rw-r--r--sys/dev/sound/pci/neomagic.c5
-rw-r--r--sys/dev/sound/pci/solo.c7
-rw-r--r--sys/dev/sound/pci/spicds.c2
-rw-r--r--sys/dev/sound/pci/t4dwave.c6
-rw-r--r--sys/dev/sound/pci/via8233.c6
-rw-r--r--sys/dev/sound/pci/via82c686.c5
-rw-r--r--sys/dev/sound/pci/vibes.c11
-rw-r--r--sys/dev/sound/pcm/ac97.c119
-rw-r--r--sys/dev/sound/pcm/ac97_patch.c117
-rw-r--r--sys/dev/sound/pcm/buffer.c31
-rw-r--r--sys/dev/sound/pcm/buffer.h2
-rw-r--r--sys/dev/sound/pcm/channel.c471
-rw-r--r--sys/dev/sound/pcm/channel.h73
-rw-r--r--sys/dev/sound/pcm/dsp.c1132
-rw-r--r--sys/dev/sound/pcm/dsp.h10
-rw-r--r--sys/dev/sound/pcm/feeder.c139
-rw-r--r--sys/dev/sound/pcm/feeder.h32
-rw-r--r--sys/dev/sound/pcm/feeder_chain.c30
-rw-r--r--sys/dev/sound/pcm/feeder_eq.c238
-rw-r--r--sys/dev/sound/pcm/feeder_format.c130
-rw-r--r--sys/dev/sound/pcm/feeder_matrix.c229
-rw-r--r--sys/dev/sound/pcm/feeder_mixer.c176
-rw-r--r--sys/dev/sound/pcm/feeder_rate.c53
-rw-r--r--sys/dev/sound/pcm/feeder_volume.c17
-rw-r--r--sys/dev/sound/pcm/g711.h44
-rw-r--r--sys/dev/sound/pcm/intpcm.h135
-rw-r--r--sys/dev/sound/pcm/matrix.h640
-rw-r--r--sys/dev/sound/pcm/matrix_map.h672
-rw-r--r--sys/dev/sound/pcm/mixer.c241
-rw-r--r--sys/dev/sound/pcm/mixer.h2
-rw-r--r--sys/dev/sound/pcm/pcm.h664
-rw-r--r--sys/dev/sound/pcm/sndstat.c363
-rw-r--r--sys/dev/sound/pcm/sound.c879
-rw-r--r--sys/dev/sound/pcm/sound.h317
-rw-r--r--sys/dev/sound/pcm/vchan.c595
-rw-r--r--sys/dev/sound/pcm/vchan.h8
-rw-r--r--sys/dev/sound/usb/uaudio.c81
-rw-r--r--sys/dev/sound/usb/uaudio.h4
-rw-r--r--sys/dev/sound/usb/uaudio_pcm.c1
-rw-r--r--sys/dev/spibus/acpi_spibus.c5
-rw-r--r--sys/dev/spibus/controller/allwinner/aw_spi.c7
-rw-r--r--sys/dev/spibus/controller/rockchip/rk_spi.c7
-rw-r--r--sys/dev/spibus/ofw_spibus.c14
-rw-r--r--sys/dev/spibus/spibus.c45
-rw-r--r--sys/dev/spibus/spibusvar.h2
-rw-r--r--sys/dev/sram/mmio_sram.c5
-rw-r--r--sys/dev/ste/if_ste.c32
-rw-r--r--sys/dev/stge/if_stge.c10
-rw-r--r--sys/dev/sume/if_sume.c28
-rw-r--r--sys/dev/superio/superio.c29
-rw-r--r--sys/dev/sym/sym_hipd.c12
-rw-r--r--sys/dev/syscon/syscon_generic.c3
-rw-r--r--sys/dev/syscons/syscons.c6
-rw-r--r--sys/dev/syscons/sysmouse.c14
-rw-r--r--sys/dev/thunderbolt/hcm.c223
-rw-r--r--sys/dev/thunderbolt/hcm_var.h (renamed from sys/dev/sound/chip.h)38
-rw-r--r--sys/dev/thunderbolt/nhi.c1170
-rw-r--r--sys/dev/thunderbolt/nhi_pci.c529
-rw-r--r--sys/dev/thunderbolt/nhi_reg.h332
-rw-r--r--sys/dev/thunderbolt/nhi_var.h277
-rw-r--r--sys/dev/thunderbolt/nhi_wmi.c198
-rw-r--r--sys/dev/thunderbolt/router.c939
-rw-r--r--sys/dev/thunderbolt/router_var.h242
-rw-r--r--sys/dev/thunderbolt/tb_acpi_pcib.c181
-rw-r--r--sys/dev/thunderbolt/tb_debug.c334
-rw-r--r--sys/dev/thunderbolt/tb_debug.h93
-rw-r--r--sys/dev/thunderbolt/tb_dev.c331
-rw-r--r--sys/dev/thunderbolt/tb_dev.h (renamed from sys/dev/sound/version.h)27
-rw-r--r--sys/dev/thunderbolt/tb_if.m (renamed from sys/dev/virtio/mmio/virtio_mmio_if.m)112
-rw-r--r--sys/dev/thunderbolt/tb_ioctl.h52
-rw-r--r--sys/dev/thunderbolt/tb_pcib.c614
-rw-r--r--sys/dev/thunderbolt/tb_pcib.h93
-rw-r--r--sys/dev/thunderbolt/tb_reg.h52
-rw-r--r--sys/dev/thunderbolt/tb_var.h54
-rw-r--r--sys/dev/thunderbolt/tbcfg_reg.h363
-rw-r--r--sys/dev/ti/if_ti.c5
-rw-r--r--sys/dev/tpm/tpm20.c21
-rw-r--r--sys/dev/tpm/tpm20.h1
-rw-r--r--sys/dev/tpm/tpm_bus.c2
-rw-r--r--sys/dev/tpm/tpm_crb.c1
-rw-r--r--sys/dev/tpm/tpm_if.m12
-rw-r--r--sys/dev/tpm/tpm_tis_acpi.c2
-rw-r--r--sys/dev/tpm/tpm_tis_core.c8
-rw-r--r--sys/dev/tsec/if_tsec.c6
-rw-r--r--sys/dev/tws/tws.c13
-rw-r--r--sys/dev/tws/tws_services.c2
-rw-r--r--sys/dev/uart/uart.h2
-rw-r--r--sys/dev/uart/uart_bus.h1
-rw-r--r--sys/dev/uart/uart_bus_fdt.c3
-rw-r--r--sys/dev/uart/uart_bus_pci.c16
-rw-r--r--sys/dev/uart/uart_core.c19
-rw-r--r--sys/dev/uart/uart_cpu_acpi.c169
-rw-r--r--sys/dev/uart/uart_cpu_acpi.h17
-rw-r--r--sys/dev/uart/uart_cpu_fdt.c2
-rw-r--r--sys/dev/uart/uart_dev_ns8250.c124
-rw-r--r--sys/dev/uart/uart_dev_ns8250.h1
-rw-r--r--sys/dev/uart/uart_dev_pl011.c58
-rw-r--r--sys/dev/uart/uart_dev_quicc.c4
-rw-r--r--sys/dev/uart/uart_dev_z8530.c4
-rw-r--r--sys/dev/uart/uart_subr.c17
-rw-r--r--sys/dev/uart/uart_tty.c2
-rw-r--r--sys/dev/ufshci/ufshci.c76
-rw-r--r--sys/dev/ufshci/ufshci.h1086
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr.c612
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr_cmd.c79
-rw-r--r--sys/dev/ufshci/ufshci_dev.c776
-rw-r--r--sys/dev/ufshci/ufshci_pci.c262
-rw-r--r--sys/dev/ufshci/ufshci_private.h570
-rw-r--r--sys/dev/ufshci/ufshci_reg.h469
-rw-r--r--sys/dev/ufshci/ufshci_req_queue.c799
-rw-r--r--sys/dev/ufshci/ufshci_req_sdb.c562
-rw-r--r--sys/dev/ufshci/ufshci_sim.c371
-rw-r--r--sys/dev/ufshci/ufshci_sysctl.c253
-rw-r--r--sys/dev/ufshci/ufshci_uic_cmd.c241
-rw-r--r--sys/dev/usb/controller/dwc3/aw_dwc3.c3
-rw-r--r--sys/dev/usb/controller/dwc3/dwc3.c3
-rw-r--r--sys/dev/usb/controller/dwc3/rk_dwc3.c3
-rw-r--r--sys/dev/usb/controller/dwc_otg.c2
-rw-r--r--sys/dev/usb/controller/dwc_otg_acpi.c6
-rw-r--r--sys/dev/usb/controller/dwc_otg_fdt.c9
-rw-r--r--sys/dev/usb/controller/ehci_fsl.c17
-rw-r--r--sys/dev/usb/controller/ehci_imx.c5
-rw-r--r--sys/dev/usb/controller/ehci_msm.c13
-rw-r--r--sys/dev/usb/controller/ehci_mv.c7
-rw-r--r--sys/dev/usb/controller/ehci_pci.c8
-rw-r--r--sys/dev/usb/controller/generic_ehci.c7
-rw-r--r--sys/dev/usb/controller/generic_ehci_acpi.c1
-rw-r--r--sys/dev/usb/controller/generic_ehci_fdt.c1
-rw-r--r--sys/dev/usb/controller/generic_ohci.c6
-rw-r--r--sys/dev/usb/controller/generic_xhci.c7
-rw-r--r--sys/dev/usb/controller/generic_xhci_acpi.c1
-rw-r--r--sys/dev/usb/controller/generic_xhci_fdt.c1
-rw-r--r--sys/dev/usb/controller/musb_otg_allwinner.c20
-rw-r--r--sys/dev/usb/controller/ohci_pci.c8
-rw-r--r--sys/dev/usb/controller/uhci_pci.c8
-rw-r--r--sys/dev/usb/controller/usb_controller.c9
-rw-r--r--sys/dev/usb/controller/xhci.c85
-rw-r--r--sys/dev/usb/controller/xhci_pci.c20
-rw-r--r--sys/dev/usb/controller/xhcireg.h5
-rw-r--r--sys/dev/usb/controller/xlnx_dwc3.c5
-rw-r--r--sys/dev/usb/input/atp.c1
-rw-r--r--sys/dev/usb/input/uhid.c11
-rw-r--r--sys/dev/usb/input/ukbd.c73
-rw-r--r--sys/dev/usb/input/ums.c6
-rw-r--r--sys/dev/usb/input/usbhid.c26
-rw-r--r--sys/dev/usb/input/wmt.c1
-rw-r--r--sys/dev/usb/input/wsp.c234
-rw-r--r--sys/dev/usb/misc/cp2112.c31
-rw-r--r--sys/dev/usb/misc/i2ctinyusb.c5
-rw-r--r--sys/dev/usb/misc/udbp.c1
-rw-r--r--sys/dev/usb/misc/ugold.c1
-rw-r--r--sys/dev/usb/misc/uled.c1
-rw-r--r--sys/dev/usb/net/if_aue.c1
-rw-r--r--sys/dev/usb/net/if_axe.c1
-rw-r--r--sys/dev/usb/net/if_axge.c1
-rw-r--r--sys/dev/usb/net/if_cdce.c2
-rw-r--r--sys/dev/usb/net/if_cdceem.c1
-rw-r--r--sys/dev/usb/net/if_cue.c1
-rw-r--r--sys/dev/usb/net/if_ipheth.c219
-rw-r--r--sys/dev/usb/net/if_iphethvar.h21
-rw-r--r--sys/dev/usb/net/if_kue.c1
-rw-r--r--sys/dev/usb/net/if_mos.c5
-rw-r--r--sys/dev/usb/net/if_muge.c1
-rw-r--r--sys/dev/usb/net/if_rue.c1
-rw-r--r--sys/dev/usb/net/if_smsc.c5
-rw-r--r--sys/dev/usb/net/if_udav.c1
-rw-r--r--sys/dev/usb/net/if_umb.c2928
-rw-r--r--sys/dev/usb/net/if_umbreg.h443
-rw-r--r--sys/dev/usb/net/if_ure.c12
-rw-r--r--sys/dev/usb/net/if_urndis.c23
-rw-r--r--sys/dev/usb/net/if_usie.c9
-rw-r--r--sys/dev/usb/net/mbim.h727
-rw-r--r--sys/dev/usb/net/ruephy.c1
-rw-r--r--sys/dev/usb/net/uhso.c4
-rw-r--r--sys/dev/usb/net/usb_ethernet.c22
-rw-r--r--sys/dev/usb/quirk/usb_quirk.c847
-rw-r--r--sys/dev/usb/serial/u3g.c12
-rw-r--r--sys/dev/usb/serial/ubsa.c1
-rw-r--r--sys/dev/usb/serial/ubser.c1
-rw-r--r--sys/dev/usb/serial/uchcom.c354
-rw-r--r--sys/dev/usb/serial/ucycom.c1
-rw-r--r--sys/dev/usb/serial/udbc.c404
-rw-r--r--sys/dev/usb/serial/ufoma.c1
-rw-r--r--sys/dev/usb/serial/uftdi.c19
-rw-r--r--sys/dev/usb/serial/uipaq.c1
-rw-r--r--sys/dev/usb/serial/ulpt.c1
-rw-r--r--sys/dev/usb/serial/umcs.c1
-rw-r--r--sys/dev/usb/serial/umct.c1
-rw-r--r--sys/dev/usb/serial/umodem.c1
-rw-r--r--sys/dev/usb/serial/uplcom.c1
-rw-r--r--sys/dev/usb/serial/usb_serial.c127
-rw-r--r--sys/dev/usb/serial/uslcom.c1
-rw-r--r--sys/dev/usb/serial/uvscom.c1
-rw-r--r--sys/dev/usb/storage/umass.c304
-rw-r--r--sys/dev/usb/storage/urio.c1
-rw-r--r--sys/dev/usb/template/usb_template_multi.c1
-rw-r--r--sys/dev/usb/template/usb_template_serialnet.c1
-rw-r--r--sys/dev/usb/usb.h2
-rw-r--r--sys/dev/usb/usb_bus.h2
-rw-r--r--sys/dev/usb/usb_dev.c7
-rw-r--r--sys/dev/usb/usb_dev.h2
-rw-r--r--sys/dev/usb/usb_device.c70
-rw-r--r--sys/dev/usb/usb_device.h2
-rw-r--r--sys/dev/usb/usb_freebsd.h1
-rw-r--r--sys/dev/usb/usb_freebsd_loader.h1
-rw-r--r--sys/dev/usb/usb_generic.c37
-rw-r--r--sys/dev/usb/usb_hub.c3
-rw-r--r--sys/dev/usb/usb_ioctl.h2
-rw-r--r--sys/dev/usb/usb_msctest.c13
-rw-r--r--sys/dev/usb/usb_pf.c6
-rw-r--r--sys/dev/usb/usb_process.c69
-rw-r--r--sys/dev/usb/usb_process.h2
-rw-r--r--sys/dev/usb/usbdevs103
-rw-r--r--sys/dev/usb/usbdi.h5
-rw-r--r--sys/dev/usb/video/udl.c7
-rw-r--r--sys/dev/usb/wlan/if_mtw.c4690
-rw-r--r--sys/dev/usb/wlan/if_mtwreg.h1439
-rw-r--r--sys/dev/usb/wlan/if_mtwvar.h387
-rw-r--r--sys/dev/usb/wlan/if_rsu.c78
-rw-r--r--sys/dev/usb/wlan/if_rsureg.h9
-rw-r--r--sys/dev/usb/wlan/if_rum.c17
-rw-r--r--sys/dev/usb/wlan/if_run.c27
-rw-r--r--sys/dev/usb/wlan/if_uath.c29
-rw-r--r--sys/dev/usb/wlan/if_upgt.c8
-rw-r--r--sys/dev/usb/wlan/if_ural.c14
-rw-r--r--sys/dev/usb/wlan/if_urtw.c14
-rw-r--r--sys/dev/usb/wlan/if_zyd.c11
-rw-r--r--sys/dev/vge/if_vge.c31
-rw-r--r--sys/dev/vge/if_vgevar.h1
-rw-r--r--sys/dev/viapm/viapm.c14
-rw-r--r--sys/dev/viawd/viawd.c2
-rw-r--r--sys/dev/virtio/block/virtio_blk.c74
-rw-r--r--sys/dev/virtio/console/virtio_console.c4
-rw-r--r--sys/dev/virtio/gpu/virtio_gpu.c16
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio.c60
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio.h1
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio_fdt.c47
-rw-r--r--sys/dev/virtio/network/if_vtnet.c620
-rw-r--r--sys/dev/virtio/network/if_vtnetvar.h12
-rw-r--r--sys/dev/virtio/network/virtio_net.h2
-rw-r--r--sys/dev/virtio/p9fs/virtio_p9fs.c494
-rw-r--r--sys/dev/virtio/p9fs/virtio_p9fs.h39
-rw-r--r--sys/dev/virtio/pci/virtio_pci.c14
-rw-r--r--sys/dev/virtio/pci/virtio_pci_legacy.c4
-rw-r--r--sys/dev/virtio/pci/virtio_pci_modern.c4
-rw-r--r--sys/dev/virtio/random/virtio_random.c2
-rw-r--r--sys/dev/virtio/scsi/virtio_scsi.c3
-rw-r--r--sys/dev/virtio/virtio_bus_if.m4
-rw-r--r--sys/dev/virtio/virtqueue.c4
-rw-r--r--sys/dev/vkbd/vkbd.c14
-rw-r--r--sys/dev/vmd/vmd.c18
-rw-r--r--sys/dev/vmgenc/vmgenc_acpi.c8
-rw-r--r--sys/dev/vmm/vmm_dev.c1209
-rw-r--r--sys/dev/vmm/vmm_dev.h70
-rw-r--r--sys/dev/vmm/vmm_ktr.h69
-rw-r--r--sys/dev/vmm/vmm_mem.c485
-rw-r--r--sys/dev/vmm/vmm_mem.h107
-rw-r--r--sys/dev/vmm/vmm_stat.c151
-rw-r--r--sys/dev/vmm/vmm_stat.h135
-rw-r--r--sys/dev/vmware/pvscsi/pvscsi.c4
-rw-r--r--sys/dev/vmware/vmci/vmci_kernel_if.c4
-rw-r--r--sys/dev/vmware/vmxnet3/if_vmx.c10
-rw-r--r--sys/dev/vnic/mrml_bridge.c5
-rw-r--r--sys/dev/vnic/nicvf_main.c17
-rw-r--r--sys/dev/vnic/thunder_mdio.c5
-rw-r--r--sys/dev/vnic/thunder_mdio_fdt.c5
-rw-r--r--sys/dev/vr/if_vr.c23
-rw-r--r--sys/dev/vt/hw/efifb/efifb.c22
-rw-r--r--sys/dev/vt/hw/fb/vt_early_fb.c1
-rw-r--r--sys/dev/vt/hw/fb/vt_fb.c84
-rw-r--r--sys/dev/vt/hw/fb/vt_fb.h1
-rw-r--r--sys/dev/vt/hw/ofwfb/ofwfb.c12
-rw-r--r--sys/dev/vt/hw/simplefb/simplefb.c12
-rw-r--r--sys/dev/vt/hw/vbefb/vbefb.c13
-rw-r--r--sys/dev/vt/hw/vga/vt_vga.c14
-rw-r--r--sys/dev/vt/vt.h5
-rw-r--r--sys/dev/vt/vt_core.c66
-rw-r--r--sys/dev/vt/vt_sysmouse.c14
-rw-r--r--sys/dev/vte/if_vte.c10
-rw-r--r--sys/dev/watchdog/watchdog.c214
-rw-r--r--sys/dev/wbwd/wbwd.c4
-rw-r--r--sys/dev/wg/if_wg.c203
-rw-r--r--sys/dev/wg/if_wg.h6
-rw-r--r--sys/dev/wpi/if_wpi.c13
-rw-r--r--sys/dev/wtap/if_wtap.c10
-rw-r--r--sys/dev/wtap/if_wtap_module.c35
-rw-r--r--sys/dev/wtap/if_wtapioctl.h2
-rw-r--r--sys/dev/wtap/if_wtapvar.h28
-rw-r--r--sys/dev/wtap/plugins/visibility.c40
-rw-r--r--sys/dev/wtap/plugins/visibility_ioctl.h2
-rw-r--r--sys/dev/xdma/xdma.c2
-rw-r--r--sys/dev/xen/blkback/blkback.c22
-rw-r--r--sys/dev/xen/blkfront/blkfront.c51
-rw-r--r--sys/dev/xen/bus/xen_intr.c8
-rw-r--r--sys/dev/xen/bus/xenpv.c84
-rw-r--r--sys/dev/xen/console/xen_console.c3
-rw-r--r--sys/dev/xen/control/control.c67
-rw-r--r--sys/dev/xen/debug/debug.c10
-rw-r--r--sys/dev/xen/efi/pvefi.c6
-rw-r--r--sys/dev/xen/gntdev/gntdev.c13
-rw-r--r--sys/dev/xen/netfront/netfront.c38
-rw-r--r--sys/dev/xen/pcifront/pcifront.c690
-rw-r--r--sys/dev/xen/privcmd/privcmd.c14
-rw-r--r--sys/dev/xen/xenpci/xenpci.c6
-rw-r--r--sys/dev/xen/xenstore/xenstore.c11
-rw-r--r--sys/dev/xilinx/axi_quad_spi.c34
-rw-r--r--sys/dev/xilinx/axidma.c9
-rw-r--r--sys/dev/xilinx/if_xae.c30
-rw-r--r--sys/dev/xl/if_xl.c23
-rw-r--r--sys/dev/xl/if_xlreg.h1
1645 files changed, 223104 insertions, 53581 deletions
diff --git a/sys/dev/aac/aac.c b/sys/dev/aac/aac.c
index 34dbb56a66f7..2519c66e81d4 100644
--- a/sys/dev/aac/aac.c
+++ b/sys/dev/aac/aac.c
@@ -441,8 +441,7 @@ aac_startup(void *arg)
sc->aac_state &= ~AAC_STATE_SUSPEND;
/* poke the bus to actually attach the child devices */
- if (bus_generic_attach(sc->aac_dev))
- device_printf(sc->aac_dev, "bus_generic_attach failed\n");
+ bus_attach_children(sc->aac_dev);
/* disconnect ourselves from the intrhook chain */
config_intrhook_disestablish(&sc->aac_ich);
@@ -474,7 +473,7 @@ aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f)
mir->MntTable[0].FileSystemName,
mir->MntTable[0].Capacity, mir->MntTable[0].VolType);
- if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL)
+ if ((child = device_add_child(sc->aac_dev, "aacd", DEVICE_UNIT_ANY)) == NULL)
device_printf(sc->aac_dev, "device_add_child failed\n");
else
device_set_ivars(child, co);
@@ -670,6 +669,10 @@ aac_detach(device_t dev)
sc = device_get_softc(dev);
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
+
callout_drain(&sc->aac_daemontime);
mtx_lock(&sc->aac_io_lock);
@@ -684,9 +687,6 @@ aac_detach(device_t dev)
/* Remove the child containers */
while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
- error = device_delete_child(dev, co->co_disk);
- if (error)
- return (error);
TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link);
free(co, M_AACBUF);
}
@@ -694,9 +694,6 @@ aac_detach(device_t dev)
/* Remove the CAM SIMs */
while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
- error = device_delete_child(dev, sim->sim_dev);
- if (error)
- return (error);
free(sim, M_AACBUF);
}
@@ -3324,7 +3321,7 @@ aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib)
if (added) {
mtx_unlock(&sc->aac_io_lock);
bus_topo_lock();
- bus_generic_attach(sc->aac_dev);
+ bus_attach_children(sc->aac_dev);
bus_topo_unlock();
mtx_lock(&sc->aac_io_lock);
}
@@ -3785,7 +3782,7 @@ aac_get_bus_info(struct aac_softc *sc)
break;
}
- child = device_add_child(sc->aac_dev, "aacp", -1);
+ child = device_add_child(sc->aac_dev, "aacp", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(sc->aac_dev,
"device_add_child failed for passthrough bus %d\n",
@@ -3808,5 +3805,5 @@ aac_get_bus_info(struct aac_softc *sc)
}
if (found)
- bus_generic_attach(sc->aac_dev);
+ bus_attach_children(sc->aac_dev);
}
diff --git a/sys/dev/aacraid/aacraid.c b/sys/dev/aacraid/aacraid.c
index 031a13ef021b..90a073d10039 100644
--- a/sys/dev/aacraid/aacraid.c
+++ b/sys/dev/aacraid/aacraid.c
@@ -335,7 +335,7 @@ aacraid_attach(struct aac_softc *sc)
aac_get_bus_info(sc);
/* poke the bus to actually attach the child devices */
- bus_generic_attach(sc->aac_dev);
+ bus_attach_children(sc->aac_dev);
/* mark the controller up */
sc->aac_state &= ~AAC_STATE_SUSPEND;
@@ -739,6 +739,10 @@ aacraid_detach(device_t dev)
sc = device_get_softc(dev);
fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "");
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
+
callout_drain(&sc->aac_daemontime);
/* Remove the child containers */
while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) {
@@ -749,9 +753,6 @@ aacraid_detach(device_t dev)
/* Remove the CAM SIMs */
while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) {
TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link);
- error = device_delete_child(dev, sim->sim_dev);
- if (error)
- return (error);
free(sim, M_AACRAIDBUF);
}
@@ -3608,7 +3609,7 @@ aac_container_bus(struct aac_softc *sc)
"No memory to add container bus\n");
panic("Out of memory?!");
}
- child = device_add_child(sc->aac_dev, "aacraidp", -1);
+ child = device_add_child(sc->aac_dev, "aacraidp", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(sc->aac_dev,
"device_add_child failed for container bus\n");
@@ -3631,7 +3632,7 @@ aac_container_bus(struct aac_softc *sc)
device_set_desc(child, aac_describe_code(aac_container_types,
mir->MntTable[0].VolType));
*/
- bus_generic_attach(sc->aac_dev);
+ bus_attach_children(sc->aac_dev);
}
static void
@@ -3725,7 +3726,7 @@ aac_get_bus_info(struct aac_softc *sc)
break;
}
- child = device_add_child(sc->aac_dev, "aacraidp", -1);
+ child = device_add_child(sc->aac_dev, "aacraidp", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(sc->aac_dev,
"device_add_child failed for passthrough bus %d\n",
diff --git a/sys/dev/aacraid/aacraid_debug.c b/sys/dev/aacraid/aacraid_debug.c
index 0000e7d070a8..ca008cc082fc 100644
--- a/sys/dev/aacraid/aacraid_debug.c
+++ b/sys/dev/aacraid/aacraid_debug.c
@@ -51,13 +51,12 @@
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/conf.h>
-
#include <sys/bus.h>
#include <sys/rman.h>
+#include <sys/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
-#include <machine/stdarg.h>
#include <dev/aacraid/aacraid_debug.h>
diff --git a/sys/dev/acpi_support/acpi_asus.c b/sys/dev/acpi_support/acpi_asus.c
index 6e63d8fabab1..b9ba6650d2b7 100644
--- a/sys/dev/acpi_support/acpi_asus.c
+++ b/sys/dev/acpi_support/acpi_asus.c
@@ -43,7 +43,6 @@
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/bus.h>
-#include <sys/sbuf.h>
#include <contrib/dev/acpica/include/acpi.h>
#include <contrib/dev/acpica/include/accommon.h>
@@ -535,7 +534,6 @@ acpi_asus_probe(device_t dev)
{
struct acpi_asus_model *model;
struct acpi_asus_softc *sc;
- struct sbuf *sb;
ACPI_BUFFER Buf;
ACPI_OBJECT Arg, *Obj;
ACPI_OBJECT_LIST Args;
@@ -599,24 +597,17 @@ acpi_asus_probe(device_t dev)
}
}
- sb = sbuf_new_auto();
- if (sb == NULL)
- return (ENOMEM);
-
/*
* Asus laptops are simply identified by name, easy!
*/
for (model = acpi_asus_models; model->name != NULL; model++) {
if (strncmp(Obj->String.Pointer, model->name, 3) == 0) {
good:
- sbuf_printf(sb, "Asus %s Laptop Extras",
- Obj->String.Pointer);
- sbuf_finish(sb);
-
sc->model = model;
- device_set_desc_copy(dev, sbuf_data(sb));
- sbuf_delete(sb);
+ device_set_descf(dev, "Asus %s Laptop Extras",
+ Obj->String.Pointer);
+
AcpiOsFree(Buf.Pointer);
return (rv);
}
@@ -695,12 +686,9 @@ good:
}
}
- sbuf_printf(sb, "Unsupported Asus laptop: %s\n", Obj->String.Pointer);
- sbuf_finish(sb);
-
- device_printf(dev, "%s", sbuf_data(sb));
+ device_printf(dev, "Unsupported Asus laptop: %s\n",
+ Obj->String.Pointer);
- sbuf_delete(sb);
AcpiOsFree(Buf.Pointer);
return (ENXIO);
diff --git a/sys/dev/acpi_support/acpi_asus_wmi.c b/sys/dev/acpi_support/acpi_asus_wmi.c
index 853696e67f84..0198ccada3ed 100644
--- a/sys/dev/acpi_support/acpi_asus_wmi.c
+++ b/sys/dev/acpi_support/acpi_asus_wmi.c
@@ -26,6 +26,7 @@
#include <sys/cdefs.h>
#include "opt_acpi.h"
+#include "opt_evdev.h"
#include <sys/param.h>
#include <sys/conf.h>
#include <sys/uio.h>
@@ -41,6 +42,15 @@
#include <dev/acpica/acpivar.h>
#include "acpi_wmi_if.h"
+#include <dev/backlight/backlight.h>
+#include "backlight_if.h"
+
+#ifdef EVDEV_SUPPORT
+#include <dev/evdev/input.h>
+#include <dev/evdev/evdev.h>
+#define NO_KEY KEY_RESERVED
+#endif
+
#define _COMPONENT ACPI_OEM
ACPI_MODULE_NAME("ASUS-WMI")
@@ -89,9 +99,11 @@ ACPI_MODULE_NAME("ASUS-WMI")
#define ASUS_WMI_DEVID_CARDREADER 0x00080013
#define ASUS_WMI_DEVID_TOUCHPAD 0x00100011
#define ASUS_WMI_DEVID_TOUCHPAD_LED 0x00100012
+#define ASUS_WMI_DEVID_TUF_RGB_MODE 0x00100056
#define ASUS_WMI_DEVID_THERMAL_CTRL 0x00110011
#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012
#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012
+#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
/* DSTS masks */
#define ASUS_WMI_DSTS_STATUS_BIT 0x00000001
@@ -102,6 +114,12 @@ ACPI_MODULE_NAME("ASUS-WMI")
#define ASUS_WMI_DSTS_BRIGHTNESS_MASK 0x000000FF
#define ASUS_WMI_DSTS_MAX_BRIGTH_MASK 0x0000FF00
+/* Events */
+#define ASUS_WMI_EVENT_QUEUE_SIZE 0x10
+#define ASUS_WMI_EVENT_QUEUE_END 0x1
+#define ASUS_WMI_EVENT_MASK 0xFFFF
+#define ASUS_WMI_EVENT_VALUE_ATK 0xFF
+
struct acpi_asus_wmi_softc {
device_t dev;
device_t wmi_dev;
@@ -110,6 +128,14 @@ struct acpi_asus_wmi_softc {
struct sysctl_oid *sysctl_tree;
int dsts_id;
int handle_keys;
+ bool event_queue;
+ struct cdev *kbd_bkl;
+ uint32_t kbd_bkl_level;
+ uint32_t tuf_rgb_mode;
+ uint32_t ttp_mode;
+#ifdef EVDEV_SUPPORT
+ struct evdev_dev *evdev;
+#endif
};
static struct {
@@ -250,33 +276,133 @@ static struct {
.dev_id = ASUS_WMI_DEVID_PROCESSOR_STATE,
.flag_rdonly = 1
},
+ {
+ .name = "throttle_thermal_policy",
+ .dev_id = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+ .description = "Throttle Thermal Policy "
+ "(0 - default, 1 - overboost, 2 - silent)",
+ },
{ NULL, 0, NULL, 0 }
};
+#ifdef EVDEV_SUPPORT
+static const struct {
+ UINT32 notify;
+ uint16_t key;
+} acpi_asus_wmi_evdev_map[] = {
+ { 0x20, KEY_BRIGHTNESSDOWN },
+ { 0x2f, KEY_BRIGHTNESSUP },
+ { 0x30, KEY_VOLUMEUP },
+ { 0x31, KEY_VOLUMEDOWN },
+ { 0x32, KEY_MUTE },
+ { 0x35, KEY_SCREENLOCK },
+ { 0x38, KEY_PROG3 }, /* Armoury Crate */
+ { 0x40, KEY_PREVIOUSSONG },
+ { 0x41, KEY_NEXTSONG },
+ { 0x43, KEY_STOPCD }, /* Stop/Eject */
+ { 0x45, KEY_PLAYPAUSE },
+ { 0x4f, KEY_LEFTMETA }, /* Fn-locked "Windows" Key */
+ { 0x4c, KEY_MEDIA }, /* WMP Key */
+ { 0x50, KEY_EMAIL },
+ { 0x51, KEY_WWW },
+ { 0x55, KEY_CALC },
+ { 0x57, NO_KEY }, /* Battery mode */
+ { 0x58, NO_KEY }, /* AC mode */
+ { 0x5C, KEY_F15 }, /* Power Gear key */
+ { 0x5D, KEY_WLAN }, /* Wireless console Toggle */
+ { 0x5E, KEY_WLAN }, /* Wireless console Enable */
+ { 0x5F, KEY_WLAN }, /* Wireless console Disable */
+ { 0x60, KEY_TOUCHPAD_ON },
+ { 0x61, KEY_SWITCHVIDEOMODE }, /* SDSP LCD only */
+ { 0x62, KEY_SWITCHVIDEOMODE }, /* SDSP CRT only */
+ { 0x63, KEY_SWITCHVIDEOMODE }, /* SDSP LCD + CRT */
+ { 0x64, KEY_SWITCHVIDEOMODE }, /* SDSP TV */
+ { 0x65, KEY_SWITCHVIDEOMODE }, /* SDSP LCD + TV */
+ { 0x66, KEY_SWITCHVIDEOMODE }, /* SDSP CRT + TV */
+ { 0x67, KEY_SWITCHVIDEOMODE }, /* SDSP LCD + CRT + TV */
+ { 0x6B, KEY_TOUCHPAD_TOGGLE },
+ { 0x6E, NO_KEY }, /* Low Battery notification */
+ { 0x71, KEY_F13 }, /* General-purpose button */
+ { 0x79, NO_KEY }, /* Charger type dectection notification */
+ { 0x7a, KEY_ALS_TOGGLE }, /* Ambient Light Sensor Toggle */
+ { 0x7c, KEY_MICMUTE },
+ { 0x7D, KEY_BLUETOOTH }, /* Bluetooth Enable */
+ { 0x7E, KEY_BLUETOOTH }, /* Bluetooth Disable */
+ { 0x82, KEY_CAMERA },
+ { 0x86, KEY_PROG1 }, /* MyASUS Key */
+ { 0x88, KEY_RFKILL }, /* Radio Toggle Key */
+ { 0x8A, KEY_PROG1 }, /* Color enhancement mode */
+ { 0x8C, KEY_SWITCHVIDEOMODE }, /* SDSP DVI only */
+ { 0x8D, KEY_SWITCHVIDEOMODE }, /* SDSP LCD + DVI */
+ { 0x8E, KEY_SWITCHVIDEOMODE }, /* SDSP CRT + DVI */
+ { 0x8F, KEY_SWITCHVIDEOMODE }, /* SDSP TV + DVI */
+ { 0x90, KEY_SWITCHVIDEOMODE }, /* SDSP LCD + CRT + DVI */
+ { 0x91, KEY_SWITCHVIDEOMODE }, /* SDSP LCD + TV + DVI */
+ { 0x92, KEY_SWITCHVIDEOMODE }, /* SDSP CRT + TV + DVI */
+ { 0x93, KEY_SWITCHVIDEOMODE }, /* SDSP LCD + CRT + TV + DVI */
+ { 0x95, KEY_MEDIA },
+ { 0x99, KEY_PHONE }, /* Conflicts with fan mode switch */
+ { 0xA0, KEY_SWITCHVIDEOMODE }, /* SDSP HDMI only */
+ { 0xA1, KEY_SWITCHVIDEOMODE }, /* SDSP LCD + HDMI */
+ { 0xA2, KEY_SWITCHVIDEOMODE }, /* SDSP CRT + HDMI */
+ { 0xA3, KEY_SWITCHVIDEOMODE }, /* SDSP TV + HDMI */
+ { 0xA4, KEY_SWITCHVIDEOMODE }, /* SDSP LCD + CRT + HDMI */
+ { 0xA5, KEY_SWITCHVIDEOMODE }, /* SDSP LCD + TV + HDMI */
+ { 0xA6, KEY_SWITCHVIDEOMODE }, /* SDSP CRT + TV + HDMI */
+ { 0xA7, KEY_SWITCHVIDEOMODE }, /* SDSP LCD + CRT + TV + HDMI */
+ { 0xAE, KEY_FN_F5 }, /* Fn+F5 fan mode on 2020+ */
+ { 0xB3, KEY_PROG4 }, /* AURA */
+ { 0xB5, KEY_CALC },
+ { 0xC4, KEY_KBDILLUMUP },
+ { 0xC5, KEY_KBDILLUMDOWN },
+ { 0xC6, NO_KEY }, /* Ambient Light Sensor notification */
+ { 0xFA, KEY_PROG2 }, /* Lid flip action */
+ { 0xBD, KEY_PROG2 }, /* Lid flip action on ROG xflow laptops */
+};
+#endif
+
ACPI_SERIAL_DECL(asus_wmi, "ASUS WMI device");
static void acpi_asus_wmi_identify(driver_t *driver, device_t parent);
static int acpi_asus_wmi_probe(device_t dev);
static int acpi_asus_wmi_attach(device_t dev);
static int acpi_asus_wmi_detach(device_t dev);
+static int acpi_asus_wmi_suspend(device_t dev);
+static int acpi_asus_wmi_resume(device_t dev);
static int acpi_asus_wmi_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_asus_wmi_sysctl_set(struct acpi_asus_wmi_softc *sc, int dev_id,
int arg, int oldarg);
static int acpi_asus_wmi_sysctl_get(struct acpi_asus_wmi_softc *sc, int dev_id);
static int acpi_asus_wmi_evaluate_method(device_t wmi_dev, int method,
- UINT32 arg0, UINT32 arg1, UINT32 *retval);
+ UINT32 arg0, UINT32 arg1, UINT32 arg2, UINT32 *retval);
static int acpi_wpi_asus_get_devstate(struct acpi_asus_wmi_softc *sc,
UINT32 dev_id, UINT32 *retval);
static int acpi_wpi_asus_set_devstate(struct acpi_asus_wmi_softc *sc,
UINT32 dev_id, UINT32 ctrl_param, UINT32 *retval);
+static int acpi_asus_wmi_get_event_code(device_t wmi_dev, UINT32 notify,
+ int *code);
static void acpi_asus_wmi_notify(ACPI_HANDLE h, UINT32 notify, void *context);
+static int acpi_asus_wmi_backlight_update_status(device_t dev,
+ struct backlight_props *props);
+static int acpi_asus_wmi_backlight_get_status(device_t dev,
+ struct backlight_props *props);
+static int acpi_asus_wmi_backlight_get_info(device_t dev,
+ struct backlight_info *info);
static device_method_t acpi_asus_wmi_methods[] = {
+ /* Device interface */
DEVMETHOD(device_identify, acpi_asus_wmi_identify),
DEVMETHOD(device_probe, acpi_asus_wmi_probe),
DEVMETHOD(device_attach, acpi_asus_wmi_attach),
DEVMETHOD(device_detach, acpi_asus_wmi_detach),
+ DEVMETHOD(device_suspend, acpi_asus_wmi_suspend),
+ DEVMETHOD(device_resume, acpi_asus_wmi_resume),
+
+ /* Backlight interface */
+ DEVMETHOD(backlight_update_status, acpi_asus_wmi_backlight_update_status),
+ DEVMETHOD(backlight_get_status, acpi_asus_wmi_backlight_get_status),
+ DEVMETHOD(backlight_get_info, acpi_asus_wmi_backlight_get_info),
DEVMETHOD_END
};
@@ -290,6 +416,34 @@ static driver_t acpi_asus_wmi_driver = {
DRIVER_MODULE(acpi_asus_wmi, acpi_wmi, acpi_asus_wmi_driver, 0, 0);
MODULE_DEPEND(acpi_asus_wmi, acpi_wmi, 1, 1, 1);
MODULE_DEPEND(acpi_asus_wmi, acpi, 1, 1, 1);
+MODULE_DEPEND(acpi_asus_wmi, backlight, 1, 1, 1);
+#ifdef EVDEV_SUPPORT
+MODULE_DEPEND(acpi_asus_wmi, evdev, 1, 1, 1);
+#endif
+
+static const uint32_t acpi_asus_wmi_backlight_levels[] = { 0, 33, 66, 100 };
+
+static inline uint32_t
+devstate_to_kbd_bkl_level(UINT32 val)
+{
+ return (acpi_asus_wmi_backlight_levels[val & 0x3]);
+}
+
+static inline UINT32
+kbd_bkl_level_to_devstate(uint32_t bkl)
+{
+ UINT32 val;
+ int i;
+
+ for (i = 0; i < nitems(acpi_asus_wmi_backlight_levels); i++) {
+ if (bkl < acpi_asus_wmi_backlight_levels[i])
+ break;
+ }
+ val = (i - 1) & 0x3;
+ if (val != 0)
+ val |= 0x80;
+ return(val);
+}
static void
acpi_asus_wmi_identify(driver_t *driver, device_t parent)
@@ -300,7 +454,7 @@ acpi_asus_wmi_identify(driver_t *driver, device_t parent)
return;
/* Add only a single device instance. */
- if (device_find_child(parent, "acpi_asus_wmi", -1) != NULL)
+ if (device_find_child(parent, "acpi_asus_wmi", DEVICE_UNIT_ANY) != NULL)
return;
/* Check management GUID to see whether system is compatible. */
@@ -308,7 +462,7 @@ acpi_asus_wmi_identify(driver_t *driver, device_t parent)
ACPI_ASUS_WMI_MGMT_GUID))
return;
- if (BUS_ADD_CHILD(parent, 0, "acpi_asus_wmi", -1) == NULL)
+ if (BUS_ADD_CHILD(parent, 0, "acpi_asus_wmi", DEVICE_UNIT_ANY) == NULL)
device_printf(parent, "add acpi_asus_wmi child failed\n");
}
@@ -328,7 +482,8 @@ acpi_asus_wmi_attach(device_t dev)
{
struct acpi_asus_wmi_softc *sc;
UINT32 val;
- int dev_id, i;
+ int dev_id, i, code;
+ bool have_kbd_bkl = false;
ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
@@ -383,14 +538,14 @@ next:
/* Initialize. */
if (!acpi_asus_wmi_evaluate_method(sc->wmi_dev,
- ASUS_WMI_METHODID_INIT, 0, 0, &val) && bootverbose)
+ ASUS_WMI_METHODID_INIT, 0, 0, 0, &val) && bootverbose)
device_printf(dev, "Initialization: %#x\n", val);
if (!acpi_asus_wmi_evaluate_method(sc->wmi_dev,
- ASUS_WMI_METHODID_SPEC, 0, 0x9, &val) && bootverbose)
+ ASUS_WMI_METHODID_SPEC, 0, 0x9, 0, &val) && bootverbose)
device_printf(dev, "WMI BIOS version: %d.%d\n",
val >> 16, val & 0xFF);
if (!acpi_asus_wmi_evaluate_method(sc->wmi_dev,
- ASUS_WMI_METHODID_SFUN, 0, 0, &val) && bootverbose)
+ ASUS_WMI_METHODID_SFUN, 0, 0, 0, &val) && bootverbose)
device_printf(dev, "SFUN value: %#x\n", val);
ACPI_SERIAL_BEGIN(asus_wmi);
@@ -413,6 +568,10 @@ next:
if (val == 0)
continue;
break;
+ case ASUS_WMI_DEVID_KBD_BACKLIGHT:
+ sc->kbd_bkl_level = devstate_to_kbd_bkl_level(val);
+ have_kbd_bkl = true;
+ /* FALLTHROUGH */
default:
if ((val & ASUS_WMI_DSTS_PRESENCE_BIT) == 0)
continue;
@@ -437,6 +596,54 @@ next:
}
ACPI_SERIAL_END(asus_wmi);
+ /* Detect and flush event queue */
+ if (sc->dsts_id == ASUS_WMI_METHODID_DSTS2) {
+ for (i = 0; i <= ASUS_WMI_EVENT_QUEUE_SIZE; i++) {
+ if (acpi_asus_wmi_get_event_code(sc->wmi_dev,
+ ASUS_WMI_EVENT_VALUE_ATK, &code) != 0) {
+ device_printf(dev,
+ "Can not flush event queue\n");
+ break;
+ }
+ if (code == ASUS_WMI_EVENT_QUEUE_END ||
+ code == ASUS_WMI_EVENT_MASK) {
+ sc->event_queue = true;
+ break;
+ }
+ }
+ }
+
+#ifdef EVDEV_SUPPORT
+ if (sc->notify_guid != NULL) {
+ sc->evdev = evdev_alloc();
+ evdev_set_name(sc->evdev, device_get_desc(dev));
+ evdev_set_phys(sc->evdev, device_get_nameunit(dev));
+ evdev_set_id(sc->evdev, BUS_HOST, 0, 0, 1);
+ evdev_support_event(sc->evdev, EV_SYN);
+ evdev_support_event(sc->evdev, EV_KEY);
+ for (i = 0; i < nitems(acpi_asus_wmi_evdev_map); i++) {
+ if (acpi_asus_wmi_evdev_map[i].key != NO_KEY)
+ evdev_support_key(sc->evdev,
+ acpi_asus_wmi_evdev_map[i].key);
+ }
+
+ if (evdev_register(sc->evdev) != 0) {
+ device_printf(dev, "Can not register evdev\n");
+ acpi_asus_wmi_detach(dev);
+ return (ENXIO);
+ }
+ }
+#endif
+
+ if (have_kbd_bkl) {
+ sc->kbd_bkl = backlight_register("acpi_asus_wmi", dev);
+ if (sc->kbd_bkl == NULL) {
+ device_printf(dev, "Can not register backlight\n");
+ acpi_asus_wmi_detach(dev);
+ return (ENXIO);
+ }
+ }
+
return (0);
}
@@ -447,8 +654,43 @@ acpi_asus_wmi_detach(device_t dev)
ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
- if (sc->notify_guid)
+ if (sc->kbd_bkl != NULL)
+ backlight_destroy(sc->kbd_bkl);
+
+ if (sc->notify_guid) {
ACPI_WMI_REMOVE_EVENT_HANDLER(dev, sc->notify_guid);
+#ifdef EVDEV_SUPPORT
+ evdev_free(sc->evdev);
+#endif
+ }
+
+ return (0);
+}
+
+static int
+acpi_asus_wmi_suspend(device_t dev)
+{
+ struct acpi_asus_wmi_softc *sc = device_get_softc(dev);
+
+ if (sc->kbd_bkl != NULL) {
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
+ acpi_wpi_asus_set_devstate(sc,
+ ASUS_WMI_DEVID_KBD_BACKLIGHT, 0, NULL);
+ }
+
+ return (0);
+}
+
+static int
+acpi_asus_wmi_resume(device_t dev)
+{
+ struct acpi_asus_wmi_softc *sc = device_get_softc(dev);
+
+ if (sc->kbd_bkl != NULL) {
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t) __func__);
+ acpi_wpi_asus_set_devstate(sc, ASUS_WMI_DEVID_KBD_BACKLIGHT,
+ kbd_bkl_level_to_devstate(sc->kbd_bkl_level), NULL);
+ }
return (0);
}
@@ -488,6 +730,13 @@ acpi_asus_wmi_sysctl_get(struct acpi_asus_wmi_softc *sc, int dev_id)
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
ACPI_SERIAL_ASSERT(asus_wmi);
+ switch(dev_id) {
+ case ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY:
+ return (sc->ttp_mode);
+ default:
+ break;
+ }
+
acpi_wpi_asus_get_devstate(sc, dev_id, &val);
switch(dev_id) {
@@ -501,7 +750,7 @@ acpi_asus_wmi_sysctl_get(struct acpi_asus_wmi_softc *sc, int dev_id)
val &= ASUS_WMI_DSTS_BRIGHTNESS_MASK;
break;
case ASUS_WMI_DEVID_KBD_BACKLIGHT:
- val &= 0x7;
+ val &= 0x3;
break;
default:
if (val & ASUS_WMI_DSTS_UNKNOWN_BIT)
@@ -522,10 +771,14 @@ acpi_asus_wmi_sysctl_set(struct acpi_asus_wmi_softc *sc, int dev_id, int arg, in
switch(dev_id) {
case ASUS_WMI_DEVID_KBD_BACKLIGHT:
- arg = min(0x7, arg);
+ arg = min(0x3, arg);
if (arg != 0)
arg |= 0x80;
break;
+ case ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY:
+ arg = min(0x2, arg);
+ sc->ttp_mode = arg;
+ break;
}
acpi_wpi_asus_set_devstate(sc, dev_id, arg, NULL);
@@ -540,32 +793,65 @@ acpi_asus_wmi_free_buffer(ACPI_BUFFER* buf) {
}
}
-static void
-acpi_asus_wmi_notify(ACPI_HANDLE h, UINT32 notify, void *context)
+static int
+acpi_asus_wmi_get_event_code(device_t wmi_dev, UINT32 notify, int *code)
{
- device_t dev = context;
- ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, notify);
- UINT32 val;
- int code = 0;
-
- struct acpi_asus_wmi_softc *sc = device_get_softc(dev);
ACPI_BUFFER response = { ACPI_ALLOCATE_BUFFER, NULL };
ACPI_OBJECT *obj;
- ACPI_WMI_GET_EVENT_DATA(sc->wmi_dev, notify, &response);
+ int error = 0;
+
+ if (ACPI_FAILURE(ACPI_WMI_GET_EVENT_DATA(wmi_dev, notify, &response)))
+ return (EIO);
obj = (ACPI_OBJECT*) response.Pointer;
- if (obj && obj->Type == ACPI_TYPE_INTEGER) {
- code = obj->Integer.Value;
+ if (obj && obj->Type == ACPI_TYPE_INTEGER)
+ *code = obj->Integer.Value & ASUS_WMI_EVENT_MASK;
+ else
+ error = EINVAL;
+ acpi_asus_wmi_free_buffer(&response);
+ return (error);
+}
+
+#ifdef EVDEV_SUPPORT
+static void
+acpi_asus_wmi_push_evdev_event(struct evdev_dev *evdev, UINT32 notify)
+{
+ int i;
+ uint16_t key;
+
+ for (i = 0; i < nitems(acpi_asus_wmi_evdev_map); i++) {
+ if (acpi_asus_wmi_evdev_map[i].notify == notify &&
+ acpi_asus_wmi_evdev_map[i].key != NO_KEY) {
+ key = acpi_asus_wmi_evdev_map[i].key;
+ evdev_push_key(evdev, key, 1);
+ evdev_sync(evdev);
+ evdev_push_key(evdev, key, 0);
+ evdev_sync(evdev);
+ break;
+ }
+ }
+}
+#endif
+
+static void
+acpi_asus_wmi_handle_event(struct acpi_asus_wmi_softc *sc, int code)
+{
+ UINT32 val;
+
+ if (code != 0) {
acpi_UserNotify("ASUS", ACPI_ROOT_OBJECT,
code);
+#ifdef EVDEV_SUPPORT
+ acpi_asus_wmi_push_evdev_event(sc->evdev, code);
+#endif
}
if (code && sc->handle_keys) {
/* Keyboard backlight control. */
if (code == 0xc4 || code == 0xc5) {
acpi_wpi_asus_get_devstate(sc,
ASUS_WMI_DEVID_KBD_BACKLIGHT, &val);
- val &= 0x7;
+ val &= 0x3;
if (code == 0xc4) {
- if (val < 0x7)
+ if (val < 0x3)
val++;
} else if (val > 0)
val--;
@@ -573,6 +859,7 @@ acpi_asus_wmi_notify(ACPI_HANDLE h, UINT32 notify, void *context)
val |= 0x80;
acpi_wpi_asus_set_devstate(sc,
ASUS_WMI_DEVID_KBD_BACKLIGHT, val, NULL);
+ sc->kbd_bkl_level = devstate_to_kbd_bkl_level(val);
}
/* Touchpad control. */
if (code == 0x6b) {
@@ -582,15 +869,68 @@ acpi_asus_wmi_notify(ACPI_HANDLE h, UINT32 notify, void *context)
acpi_wpi_asus_set_devstate(sc,
ASUS_WMI_DEVID_TOUCHPAD, val, NULL);
}
+ /* Throttle thermal policy control. */
+ if (code == 0xae) {
+ sc->ttp_mode++;
+ if (sc->ttp_mode > 2)
+ sc->ttp_mode = 0;
+ acpi_wpi_asus_set_devstate(sc,
+ ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY,
+ sc->ttp_mode, NULL);
+ }
+ /* TUF laptop RGB mode control. */
+ if (code == 0xb3) {
+ const uint32_t cmd = 0xb4; /* Save to BIOS */
+ const uint32_t r = 0xff, g = 0xff, b = 0xff;
+ const uint32_t speed = 0xeb; /* Medium */
+ if (sc->tuf_rgb_mode < 2)
+ sc->tuf_rgb_mode++;
+ else if (sc->tuf_rgb_mode == 2)
+ sc->tuf_rgb_mode = 10;
+ else sc->tuf_rgb_mode = 0;
+ acpi_asus_wmi_evaluate_method(sc->wmi_dev,
+ ASUS_WMI_METHODID_DEVS,
+ ASUS_WMI_DEVID_TUF_RGB_MODE,
+ cmd | (sc->tuf_rgb_mode << 8) | (r << 16) | (g << 24),
+ b | (speed << 8), NULL);
+ }
}
- acpi_asus_wmi_free_buffer(&response);
+}
+
+static void
+acpi_asus_wmi_notify(ACPI_HANDLE h, UINT32 notify, void *context)
+{
+ device_t dev = context;
+ struct acpi_asus_wmi_softc *sc = device_get_softc(dev);
+ int code = 0, i = 1;
+
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, notify);
+
+ if (sc->event_queue)
+ i += ASUS_WMI_EVENT_QUEUE_SIZE;
+ do {
+ if (acpi_asus_wmi_get_event_code(sc->wmi_dev, notify, &code)
+ != 0) {
+ device_printf(dev, "Failed to get event code\n");
+ return;
+ }
+ if (code == ASUS_WMI_EVENT_QUEUE_END ||
+ code == ASUS_WMI_EVENT_MASK)
+ return;
+ acpi_asus_wmi_handle_event(sc, code);
+ if (notify != ASUS_WMI_EVENT_VALUE_ATK)
+ return;
+ } while (--i != 0);
+ if (sc->event_queue)
+ device_printf(dev, "Can not read event queue, "
+ "last code: 0x%x\n", code);
}
static int
acpi_asus_wmi_evaluate_method(device_t wmi_dev, int method,
- UINT32 arg0, UINT32 arg1, UINT32 *retval)
+ UINT32 arg0, UINT32 arg1, UINT32 arg2, UINT32 *retval)
{
- UINT32 params[2] = { arg0, arg1 };
+ UINT32 params[3] = { arg0, arg1, arg2 };
UINT32 result;
ACPI_OBJECT *obj;
ACPI_BUFFER in = { sizeof(params), &params };
@@ -618,7 +958,7 @@ acpi_wpi_asus_get_devstate(struct acpi_asus_wmi_softc *sc,
{
return (acpi_asus_wmi_evaluate_method(sc->wmi_dev,
- sc->dsts_id, dev_id, 0, retval));
+ sc->dsts_id, dev_id, 0, 0, retval));
}
static int
@@ -627,5 +967,40 @@ acpi_wpi_asus_set_devstate(struct acpi_asus_wmi_softc *sc,
{
return (acpi_asus_wmi_evaluate_method(sc->wmi_dev,
- ASUS_WMI_METHODID_DEVS, dev_id, ctrl_param, retval));
+ ASUS_WMI_METHODID_DEVS, dev_id, ctrl_param, 0, retval));
+}
+
+static int
+acpi_asus_wmi_backlight_update_status(device_t dev, struct backlight_props
+ *props)
+{
+ struct acpi_asus_wmi_softc *sc = device_get_softc(dev);
+
+ acpi_wpi_asus_set_devstate(sc, ASUS_WMI_DEVID_KBD_BACKLIGHT,
+ kbd_bkl_level_to_devstate(props->brightness), NULL);
+ sc->kbd_bkl_level = props->brightness;
+
+ return (0);
+}
+
+static int
+acpi_asus_wmi_backlight_get_status(device_t dev, struct backlight_props *props)
+{
+ struct acpi_asus_wmi_softc *sc = device_get_softc(dev);
+
+ props->brightness = sc->kbd_bkl_level;
+ props->nlevels = nitems(acpi_asus_wmi_backlight_levels);
+ memcpy(props->levels, acpi_asus_wmi_backlight_levels,
+ sizeof(acpi_asus_wmi_backlight_levels));
+
+ return (0);
+}
+
+static int
+acpi_asus_wmi_backlight_get_info(device_t dev, struct backlight_info *info)
+{
+ info->type = BACKLIGHT_TYPE_KEYBOARD;
+ strlcpy(info->name, "ASUS Keyboard", BACKLIGHTMAXNAMELENGTH);
+
+ return (0);
}
diff --git a/sys/dev/acpi_support/acpi_fujitsu.c b/sys/dev/acpi_support/acpi_fujitsu.c
index 2d3c6f17dfe2..1649f74d873c 100644
--- a/sys/dev/acpi_support/acpi_fujitsu.c
+++ b/sys/dev/acpi_support/acpi_fujitsu.c
@@ -222,14 +222,12 @@ static int
acpi_fujitsu_probe(device_t dev)
{
char *name;
- char buffer[64];
int rv;
rv = ACPI_ID_PROBE(device_get_parent(dev), dev, fujitsu_ids, &name);
if (acpi_disabled("fujitsu") || rv > 0 || device_get_unit(dev) > 1)
return (ENXIO);
- sprintf(buffer, "Fujitsu Function Hotkeys %s", name);
- device_set_desc_copy(dev, buffer);
+ device_set_descf(dev, "Fujitsu Function Hotkeys %s", name);
return (rv);
}
diff --git a/sys/dev/acpi_support/acpi_hp.c b/sys/dev/acpi_support/acpi_hp.c
index 088e46af2ce3..5523b8768d41 100644
--- a/sys/dev/acpi_support/acpi_hp.c
+++ b/sys/dev/acpi_support/acpi_hp.c
@@ -465,7 +465,7 @@ acpi_hp_identify(driver_t *driver, device_t parent)
return;
/* Add only a single device instance. */
- if (device_find_child(parent, "acpi_hp", -1) != NULL)
+ if (device_find_child(parent, "acpi_hp", DEVICE_UNIT_ANY) != NULL)
return;
/* Check BIOS GUID to see whether system is compatible. */
@@ -473,7 +473,7 @@ acpi_hp_identify(driver_t *driver, device_t parent)
ACPI_HP_WMI_BIOS_GUID))
return;
- if (BUS_ADD_CHILD(parent, 0, "acpi_hp", -1) == NULL)
+ if (BUS_ADD_CHILD(parent, 0, "acpi_hp", DEVICE_UNIT_ANY) == NULL)
device_printf(parent, "add acpi_hp child failed\n");
}
diff --git a/sys/dev/acpi_support/acpi_ibm.c b/sys/dev/acpi_support/acpi_ibm.c
index a617088d4246..c1302508b8a2 100644
--- a/sys/dev/acpi_support/acpi_ibm.c
+++ b/sys/dev/acpi_support/acpi_ibm.c
@@ -37,6 +37,7 @@
*/
#include "opt_acpi.h"
+#include "opt_evdev.h"
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
@@ -55,6 +56,11 @@
#include <sys/sysctl.h>
#include <isa/rtc.h>
+#ifdef EVDEV_SUPPORT
+#include <dev/evdev/input.h>
+#include <dev/evdev/evdev.h>
+#endif
+
#define _COMPONENT ACPI_OEM
ACPI_MODULE_NAME("IBM")
@@ -198,6 +204,9 @@ struct acpi_ibm_softc {
struct sysctl_ctx_list *sysctl_ctx;
struct sysctl_oid *sysctl_tree;
+#ifdef EVDEV_SUPPORT
+ struct evdev_dev *evdev;
+#endif
};
static struct {
@@ -363,6 +372,9 @@ static driver_t acpi_ibm_driver = {
DRIVER_MODULE(acpi_ibm, acpi, acpi_ibm_driver, 0, 0);
MODULE_DEPEND(acpi_ibm, acpi, 1, 1, 1);
+#ifdef EVDEV_SUPPORT
+MODULE_DEPEND(acpi_ibm, evdev, 1, 1, 1);
+#endif
static char *ibm_ids[] = {"IBM0068", "LEN0068", "LEN0268", NULL};
static int
@@ -482,6 +494,20 @@ acpi_ibm_attach(device_t dev)
}
sc->ec_handle = acpi_get_handle(sc->ec_dev);
+#ifdef EVDEV_SUPPORT
+ sc->evdev = evdev_alloc();
+ evdev_set_name(sc->evdev, device_get_desc(dev));
+ evdev_set_phys(sc->evdev, device_get_nameunit(dev));
+ evdev_set_id(sc->evdev, BUS_HOST, 0, 0, 1);
+ evdev_support_event(sc->evdev, EV_SYN);
+ evdev_support_event(sc->evdev, EV_KEY);
+ evdev_support_key(sc->evdev, KEY_BRIGHTNESSUP);
+ evdev_support_key(sc->evdev, KEY_BRIGHTNESSDOWN);
+
+ if (evdev_register(sc->evdev) != 0)
+ return (ENXIO);
+#endif
+
/* Get the sysctl tree */
sc->sysctl_ctx = device_get_sysctl_ctx(dev);
sc->sysctl_tree = device_get_sysctl_tree(dev);
@@ -627,6 +653,10 @@ acpi_ibm_detach(device_t dev)
if (sc->led_dev != NULL)
led_destroy(sc->led_dev);
+#ifdef EVDEV_SUPPORT
+ evdev_free(sc->evdev);
+#endif
+
return (0);
}
@@ -1499,6 +1529,19 @@ acpi_ibm_notify(ACPI_HANDLE h, UINT32 notify, void *context)
/* Execute event handler */
if (sc->handler_events & (1 << (arg - 1)))
acpi_ibm_eventhandler(sc, (arg & 0xff));
+#ifdef EVDEV_SUPPORT
+ else if ((arg & 0xff) == IBM_EVENT_BRIGHTNESS_UP ||
+ (arg & 0xff) == IBM_EVENT_BRIGHTNESS_DOWN) {
+ uint16_t key;
+
+ key = arg == IBM_EVENT_BRIGHTNESS_UP ?
+ KEY_BRIGHTNESSUP : KEY_BRIGHTNESSDOWN;
+ evdev_push_key(sc->evdev, key, 1);
+ evdev_sync(sc->evdev);
+ evdev_push_key(sc->evdev, key, 0);
+ evdev_sync(sc->evdev);
+ }
+#endif
/* Notify devd(8) */
acpi_UserNotify("IBM", h, (arg & 0xff));
diff --git a/sys/dev/acpi_support/acpi_sbl_wmi.c b/sys/dev/acpi_support/acpi_sbl_wmi.c
new file mode 100644
index 000000000000..8abee8c94e26
--- /dev/null
+++ b/sys/dev/acpi_support/acpi_sbl_wmi.c
@@ -0,0 +1,193 @@
+/*-
+ * Copyright (c) 2024 Rubicon Communications, LLC (Netgate)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include "opt_acpi.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/uio.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/sbuf.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <dev/acpica/acpivar.h>
+#include "acpi_wmi_if.h"
+
+#define _COMPONENT ACPI_OEM
+ACPI_MODULE_NAME("SBL-FW-UPDATE-WMI")
+ACPI_SERIAL_DECL(sbl_wmi, "SBL WMI device");
+
+#define ACPI_SBL_FW_UPDATE_WMI_GUID "44FADEB1-B204-40F2-8581-394BBDC1B651"
+
+struct acpi_sbl_wmi_softc {
+ device_t dev;
+ device_t wmi_dev;
+};
+
+static void
+acpi_sbl_wmi_identify(driver_t *driver, device_t parent)
+{
+ /* Don't do anything if driver is disabled. */
+ if (acpi_disabled("sbl_wmi"))
+ return;
+
+ /* Add only a single device instance. */
+ if (device_find_child(parent, "acpi_sbl_wmi", DEVICE_UNIT_ANY) != NULL)
+ return;
+
+ /* Check management GUID to see whether system is compatible. */
+ if (!ACPI_WMI_PROVIDES_GUID_STRING(parent,
+ ACPI_SBL_FW_UPDATE_WMI_GUID))
+ return;
+
+ if (BUS_ADD_CHILD(parent, 0, "acpi_sbl_wmi", DEVICE_UNIT_ANY) == NULL)
+ device_printf(parent, "add acpi_sbl_wmi child failed\n");
+}
+
+static int
+acpi_sbl_wmi_probe(device_t dev)
+{
+ if (!ACPI_WMI_PROVIDES_GUID_STRING(device_get_parent(dev),
+ ACPI_SBL_FW_UPDATE_WMI_GUID))
+ return (EINVAL);
+ device_set_desc(dev, "SBL Firmware Update WMI device");
+ return (0);
+}
+
+static int
+acpi_sbl_wmi_sysctl_get(struct acpi_sbl_wmi_softc *sc, int *val)
+{
+ ACPI_OBJECT *obj;
+ ACPI_BUFFER out = { ACPI_ALLOCATE_BUFFER, NULL };
+ int error = 0;
+
+ if (ACPI_FAILURE(ACPI_WMI_GET_BLOCK(sc->wmi_dev,
+ ACPI_SBL_FW_UPDATE_WMI_GUID, 0, &out))) {
+ error = EINVAL;
+ goto out;
+ }
+
+ obj = out.Pointer;
+ if (obj->Type != ACPI_TYPE_INTEGER) {
+ error = EINVAL;
+ goto out;
+ }
+
+ *val = obj->Integer.Value;
+
+out:
+ if (out.Pointer)
+ AcpiOsFree(out.Pointer);
+
+ return (error);
+}
+
+static int
+acpi_sbl_wmi_sysctl_set(struct acpi_sbl_wmi_softc *sc, int in)
+{
+ ACPI_BUFFER input = { ACPI_ALLOCATE_BUFFER, NULL };
+ uint32_t val;
+
+ val = in;
+ input.Length = sizeof(val);
+ input.Pointer = &val;
+
+ if (ACPI_FAILURE(ACPI_WMI_SET_BLOCK(sc->wmi_dev,
+ ACPI_SBL_FW_UPDATE_WMI_GUID, 0, &input)))
+ return (ENODEV);
+
+ return (0);
+}
+
+static int
+acpi_sbl_wmi_fw_upgrade_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct acpi_sbl_wmi_softc *sc;
+ int arg;
+ int error = 0;
+
+ ACPI_SERIAL_BEGIN(sbl_wmi);
+
+ sc = (struct acpi_sbl_wmi_softc *)oidp->oid_arg1;
+ error = acpi_sbl_wmi_sysctl_get(sc, &arg);
+ if (error != 0)
+ goto out;
+
+ error = sysctl_handle_int(oidp, &arg, 0, req);
+ if (! error && req->newptr != NULL)
+ error = acpi_sbl_wmi_sysctl_set(sc, arg);
+
+out:
+ ACPI_SERIAL_END(sbl_wmi);
+
+ return (error);
+}
+
+static int
+acpi_sbl_wmi_attach(device_t dev)
+{
+ struct acpi_sbl_wmi_softc *sc;
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->wmi_dev = device_get_parent(dev);
+
+ sysctl_ctx = device_get_sysctl_ctx(dev);
+ sysctl_tree = device_get_sysctl_tree(dev);
+
+ SYSCTL_ADD_PROC(sysctl_ctx,
+ SYSCTL_CHILDREN(sysctl_tree), OID_AUTO,
+ "firmware_update_request",
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ sc, 0, acpi_sbl_wmi_fw_upgrade_sysctl, "I",
+ "Signal SBL that a firmware update is available");
+
+ return (0);
+}
+
+static device_method_t acpi_sbl_wmi_methods[] = {
+ DEVMETHOD(device_identify, acpi_sbl_wmi_identify),
+ DEVMETHOD(device_probe, acpi_sbl_wmi_probe),
+ DEVMETHOD(device_attach, acpi_sbl_wmi_attach),
+
+ DEVMETHOD_END
+};
+
+static driver_t acpi_sbl_wmi_driver = {
+ "acpi_sbl_wmi",
+ acpi_sbl_wmi_methods,
+ sizeof(struct acpi_sbl_wmi_softc),
+};
+
+DRIVER_MODULE(acpi_sbl_wmi, acpi_wmi, acpi_sbl_wmi_driver, 0, 0);
+MODULE_DEPEND(acpi_sbl_wmi, acpi_wmi, 1, 1, 1);
+MODULE_DEPEND(acpi_sbl_wmi, acpi, 1, 1, 1);
diff --git a/sys/dev/acpi_support/acpi_wmi.c b/sys/dev/acpi_support/acpi_wmi.c
index 6601db4317cb..e973b287dbb4 100644
--- a/sys/dev/acpi_support/acpi_wmi.c
+++ b/sys/dev/acpi_support/acpi_wmi.c
@@ -296,8 +296,8 @@ acpi_wmi_attach(device_t dev)
}
if (ret == 0) {
- bus_generic_probe(dev);
- ret = bus_generic_attach(dev);
+ bus_identify_children(dev);
+ bus_attach_children(dev);
}
return (ret);
@@ -580,6 +580,16 @@ acpi_wmi_get_block_method(device_t dev, const char *guid_string, UINT8 instance,
}
wq_method[2] = winfo->ginfo.oid[0];
wq_method[3] = winfo->ginfo.oid[1];
+ {
+ ACPI_HANDLE wq_handle;
+ ACPI_OBJECT_TYPE at;
+
+ if (ACPI_SUCCESS(AcpiGetHandle(sc->wmi_handle, wq_method, &wq_handle)) &&
+ ACPI_SUCCESS(AcpiGetType(wq_handle, &at)) &&
+ at != ACPI_TYPE_METHOD) {
+ wq_input.Count = 0;
+ }
+ }
status = AcpiEvaluateObject(sc->wmi_handle, wq_method,
&wq_input, out);
if ((winfo->ginfo.flags & ACPI_WMI_REGFLAG_EXPENSIVE)
diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c
index ad1af9373fb7..3f0a7b40245d 100644
--- a/sys/dev/acpica/acpi.c
+++ b/sys/dev/acpica/acpi.c
@@ -4,6 +4,10 @@
* Copyright (c) 2000, 2001 Michael Smith
* Copyright (c) 2000 BSDi
* All rights reserved.
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Aymeric Wibo
+ * <obiwac@freebsd.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -55,6 +59,8 @@
#if defined(__i386__) || defined(__amd64__)
#include <machine/clock.h>
#include <machine/pci_cfgreg.h>
+#include <x86/cputypes.h>
+#include <x86/x86_var.h>
#endif
#include <machine/resource.h>
#include <machine/bus.h>
@@ -96,6 +102,11 @@ struct acpi_interface {
int num;
};
+struct acpi_wake_prep_context {
+ struct acpi_softc *sc;
+ enum power_stype stype;
+};
+
static char *sysres_ids[] = { "PNP0C01", "PNP0C02", NULL };
/* Global mutex for locking access to the ACPI subsystem. */
@@ -105,8 +116,9 @@ struct callout acpi_sleep_timer;
/* Bitmap of device quirks. */
int acpi_quirks;
-/* Supported sleep states. */
-static BOOLEAN acpi_sleep_states[ACPI_S_STATE_COUNT];
+/* Supported sleep states and types. */
+static bool acpi_supported_stypes[POWER_STYPE_COUNT];
+static bool acpi_supported_sstates[ACPI_S_STATE_COUNT];
static void acpi_lookup(void *arg, const char *name, device_t *dev);
static int acpi_modevent(struct module *mod, int event, void *junk);
@@ -140,6 +152,7 @@ static bus_child_location_t acpi_child_location_method;
static bus_hint_device_unit_t acpi_hint_device_unit;
static bus_get_property_t acpi_bus_get_prop;
static bus_get_device_path_t acpi_get_device_path;
+static bus_get_domain_t acpi_get_domain_method;
static acpi_id_probe_t acpi_device_id_probe;
static acpi_evaluate_object_t acpi_device_eval_obj;
@@ -162,23 +175,32 @@ static ACPI_STATUS acpi_probe_child(ACPI_HANDLE handle, UINT32 level,
void *context, void **status);
static void acpi_sleep_enable(void *arg);
static ACPI_STATUS acpi_sleep_disable(struct acpi_softc *sc);
-static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc, int state);
+static ACPI_STATUS acpi_EnterSleepState(struct acpi_softc *sc,
+ enum power_stype stype);
static void acpi_shutdown_final(void *arg, int howto);
static void acpi_enable_fixed_events(struct acpi_softc *sc);
static void acpi_resync_clock(struct acpi_softc *sc);
-static int acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate);
-static int acpi_wake_run_prep(ACPI_HANDLE handle, int sstate);
-static int acpi_wake_prep_walk(int sstate);
+static int acpi_wake_sleep_prep(struct acpi_softc *sc, ACPI_HANDLE handle,
+ enum power_stype stype);
+static int acpi_wake_run_prep(struct acpi_softc *sc, ACPI_HANDLE handle,
+ enum power_stype stype);
+static int acpi_wake_prep_walk(struct acpi_softc *sc, enum power_stype stype);
static int acpi_wake_sysctl_walk(device_t dev);
static int acpi_wake_set_sysctl(SYSCTL_HANDLER_ARGS);
-static void acpi_system_eventhandler_sleep(void *arg, int state);
-static void acpi_system_eventhandler_wakeup(void *arg, int state);
-static int acpi_sname2sstate(const char *sname);
-static const char *acpi_sstate2sname(int sstate);
static int acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
+static void acpi_system_eventhandler_sleep(void *arg,
+ enum power_stype stype);
+static void acpi_system_eventhandler_wakeup(void *arg,
+ enum power_stype stype);
+static enum power_stype acpi_sstate_to_stype(int sstate);
+static int acpi_sname_to_sstate(const char *sname);
+static const char *acpi_sstate_to_sname(int sstate);
+static int acpi_suspend_state_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS);
+static int acpi_stype_sysctl(SYSCTL_HANDLER_ARGS);
static int acpi_debug_objects_sysctl(SYSCTL_HANDLER_ARGS);
-static int acpi_pm_func(u_long cmd, void *arg, ...);
+static int acpi_stype_to_sstate(struct acpi_softc *sc, enum power_stype stype);
+static int acpi_pm_func(u_long cmd, void *arg, enum power_stype stype);
static void acpi_enable_pcie(void);
static void acpi_reset_interfaces(device_t dev);
@@ -217,7 +239,7 @@ static device_method_t acpi_methods[] = {
DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
DEVMETHOD(bus_hint_device_unit, acpi_hint_device_unit),
DEVMETHOD(bus_get_cpus, acpi_get_cpus),
- DEVMETHOD(bus_get_domain, acpi_get_domain),
+ DEVMETHOD(bus_get_domain, acpi_get_domain_method),
DEVMETHOD(bus_get_property, acpi_bus_get_prop),
DEVMETHOD(bus_get_device_path, acpi_get_device_path),
@@ -297,6 +319,10 @@ int acpi_susp_bounce;
SYSCTL_INT(_debug_acpi, OID_AUTO, suspend_bounce, CTLFLAG_RW,
&acpi_susp_bounce, 0, "Don't actually suspend, just test devices.");
+#if defined(__amd64__) || defined(__i386__)
+int acpi_override_isa_irq_polarity;
+#endif
+
/*
* ACPI standard UUID for Device Specific Data Package
* "Device Properties UUID for _DSD" Rev. 2.0
@@ -465,6 +491,7 @@ acpi_attach(device_t dev)
UINT32 flags;
UINT8 TypeA, TypeB;
char *env;
+ enum power_stype stype;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -577,31 +604,35 @@ acpi_attach(device_t dev)
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "power_button_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A",
+ &sc->acpi_power_button_stype, 0, acpi_stype_sysctl, "A",
"Power button ACPI sleep state.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "sleep_button_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A",
+ &sc->acpi_sleep_button_stype, 0, acpi_stype_sysctl, "A",
"Sleep button ACPI sleep state.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "lid_switch_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A",
- "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid.");
+ &sc->acpi_lid_switch_stype, 0, acpi_stype_sysctl, "A",
+ "Lid ACPI sleep state. Set to s2idle or s2mem if you want to suspend "
+ "your laptop when close the lid.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
- OID_AUTO, "standby_state",
- CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", "");
+ OID_AUTO, "suspend_state", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ NULL, 0, acpi_suspend_state_sysctl, "A",
+ "Current ACPI suspend state. This sysctl is deprecated; you probably "
+ "want to use kern.power.suspend instead.");
SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
- OID_AUTO, "suspend_state",
+ OID_AUTO, "standby_state",
CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
- &sc->acpi_suspend_sx, 0, acpi_sleep_state_sysctl, "A", "");
+ &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A",
+ "ACPI Sx state to use when going standby (S1 or S2).");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "sleep_delay", CTLFLAG_RW, &sc->acpi_sleep_delay, 0,
"sleep delay in seconds");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
- OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0, "S4BIOS mode");
+ OID_AUTO, "s4bios", CTLFLAG_RW, &sc->acpi_s4bios, 0,
+ "Use S4BIOS when hibernating.");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
OID_AUTO, "verbose", CTLFLAG_RW, &sc->acpi_verbose, 0, "verbose mode");
SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
@@ -611,6 +642,19 @@ acpi_attach(device_t dev)
OID_AUTO, "handle_reboot", CTLFLAG_RW,
&sc->acpi_handle_reboot, 0, "Use ACPI Reset Register to reboot");
+#if defined(__amd64__) || defined(__i386__)
+ /*
+ * Enable workaround for incorrect ISA IRQ polarity by default on
+ * systems with Intel CPUs.
+ */
+ if (cpu_vendor_id == CPU_VENDOR_INTEL)
+ acpi_override_isa_irq_polarity = 1;
+ SYSCTL_ADD_INT(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree),
+ OID_AUTO, "override_isa_irq_polarity", CTLFLAG_RDTUN,
+ &acpi_override_isa_irq_polarity, 0,
+ "Force active-hi polarity for edge-triggered ISA IRQs");
+#endif
+
/*
* Default to 1 second before sleeping to give some machines time to
* stabilize.
@@ -634,31 +678,38 @@ acpi_attach(device_t dev)
sc->acpi_s4bios = 1;
#endif
- /* Probe all supported sleep states. */
- acpi_sleep_states[ACPI_STATE_S0] = TRUE;
- for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
+ /*
+ * Probe all supported ACPI sleep states. Awake (S0) is always supported.
+ */
+ acpi_supported_sstates[ACPI_STATE_S0] = TRUE;
+ acpi_supported_stypes[POWER_STYPE_AWAKE] = true;
+ for (state = ACPI_STATE_S1; state <= ACPI_STATE_S5; state++)
if (ACPI_SUCCESS(AcpiEvaluateObject(ACPI_ROOT_OBJECT,
__DECONST(char *, AcpiGbl_SleepStateNames[state]), NULL, NULL)) &&
- ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB)))
- acpi_sleep_states[state] = TRUE;
+ ACPI_SUCCESS(AcpiGetSleepTypeData(state, &TypeA, &TypeB))) {
+ acpi_supported_sstates[state] = TRUE;
+ acpi_supported_stypes[acpi_sstate_to_stype(state)] = true;
+ }
/*
- * Dispatch the default sleep state to devices. The lid switch is set
+ * Dispatch the default sleep type to devices. The lid switch is set
* to UNKNOWN by default to avoid surprising users.
*/
- sc->acpi_power_button_sx = acpi_sleep_states[ACPI_STATE_S5] ?
- ACPI_STATE_S5 : ACPI_STATE_UNKNOWN;
- sc->acpi_lid_switch_sx = ACPI_STATE_UNKNOWN;
- sc->acpi_standby_sx = acpi_sleep_states[ACPI_STATE_S1] ?
- ACPI_STATE_S1 : ACPI_STATE_UNKNOWN;
- sc->acpi_suspend_sx = acpi_sleep_states[ACPI_STATE_S3] ?
- ACPI_STATE_S3 : ACPI_STATE_UNKNOWN;
-
- /* Pick the first valid sleep state for the sleep button default. */
- sc->acpi_sleep_button_sx = ACPI_STATE_UNKNOWN;
- for (state = ACPI_STATE_S1; state <= ACPI_STATE_S4; state++)
- if (acpi_sleep_states[state]) {
- sc->acpi_sleep_button_sx = state;
+ sc->acpi_power_button_stype = acpi_supported_stypes[POWER_STYPE_POWEROFF] ?
+ POWER_STYPE_POWEROFF : POWER_STYPE_UNKNOWN;
+ sc->acpi_lid_switch_stype = POWER_STYPE_UNKNOWN;
+
+ sc->acpi_standby_sx = ACPI_STATE_UNKNOWN;
+ if (acpi_supported_sstates[ACPI_STATE_S1])
+ sc->acpi_standby_sx = ACPI_STATE_S1;
+ else if (acpi_supported_sstates[ACPI_STATE_S2])
+ sc->acpi_standby_sx = ACPI_STATE_S2;
+
+ /* Pick the first valid sleep type for the sleep button default. */
+ sc->acpi_sleep_button_stype = POWER_STYPE_UNKNOWN;
+ for (stype = POWER_STYPE_STANDBY; stype <= POWER_STYPE_HIBERNATE; stype++)
+ if (acpi_supported_stypes[stype]) {
+ sc->acpi_sleep_button_stype = stype;
break;
}
@@ -683,7 +734,7 @@ acpi_attach(device_t dev)
/* Flag our initial states. */
sc->acpi_enabled = TRUE;
- sc->acpi_sstate = ACPI_STATE_S0;
+ sc->acpi_stype = POWER_STYPE_AWAKE;
sc->acpi_sleep_disabled = TRUE;
/* Create the control device */
@@ -695,7 +746,8 @@ acpi_attach(device_t dev)
goto out;
/* Register ACPI again to pass the correct argument of pm_func. */
- power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc);
+ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, sc,
+ acpi_supported_stypes);
acpi_platform_osc(dev);
@@ -721,6 +773,58 @@ acpi_attach(device_t dev)
return_VALUE (error);
}
+static int
+acpi_stype_to_sstate(struct acpi_softc *sc, enum power_stype stype)
+{
+ switch (stype) {
+ case POWER_STYPE_AWAKE:
+ return (ACPI_STATE_S0);
+ case POWER_STYPE_STANDBY:
+ return (sc->acpi_standby_sx);
+ case POWER_STYPE_SUSPEND_TO_MEM:
+ return (ACPI_STATE_S3);
+ case POWER_STYPE_HIBERNATE:
+ return (ACPI_STATE_S4);
+ case POWER_STYPE_POWEROFF:
+ return (ACPI_STATE_S5);
+ case POWER_STYPE_SUSPEND_TO_IDLE:
+ case POWER_STYPE_COUNT:
+ case POWER_STYPE_UNKNOWN:
+ return (ACPI_STATE_UNKNOWN);
+ }
+ return (ACPI_STATE_UNKNOWN);
+}
+
+/*
+ * XXX It would be nice if we didn't need this function, but we'd need
+ * acpi_EnterSleepState and acpi_ReqSleepState to take in actual ACPI S-states,
+ * which won't be possible at the moment because suspend-to-idle (which is not
+ * an ACPI S-state nor maps to one) will be implemented here.
+ *
+ * In the future, we should make generic a lot of the logic in these functions
+ * to enable suspend-to-idle on non-ACPI builds, and then make
+ * acpi_EnterSleepState and acpi_ReqSleepState truly take in ACPI S-states
+ * again.
+ */
+static enum power_stype
+acpi_sstate_to_stype(int sstate)
+{
+ switch (sstate) {
+ case ACPI_STATE_S0:
+ return (POWER_STYPE_AWAKE);
+ case ACPI_STATE_S1:
+ case ACPI_STATE_S2:
+ return (POWER_STYPE_STANDBY);
+ case ACPI_STATE_S3:
+ return (POWER_STYPE_SUSPEND_TO_MEM);
+ case ACPI_STATE_S4:
+ return (POWER_STYPE_HIBERNATE);
+ case ACPI_STATE_S5:
+ return (POWER_STYPE_POWEROFF);
+ }
+ return (POWER_STYPE_UNKNOWN);
+}
+
static void
acpi_set_power_children(device_t dev, int state)
{
@@ -773,6 +877,7 @@ acpi_resume(device_t dev)
static int
acpi_shutdown(device_t dev)
{
+ struct acpi_softc *sc = device_get_softc(dev);
bus_topo_assert();
@@ -783,7 +888,7 @@ acpi_shutdown(device_t dev)
* Enable any GPEs that are able to power-on the system (i.e., RTC).
* Also, disable any that are not valid for this state (most).
*/
- acpi_wake_prep_walk(ACPI_STATE_S5);
+ acpi_wake_prep_walk(sc, POWER_STYPE_POWEROFF);
return (0);
}
@@ -800,6 +905,7 @@ acpi_add_child(device_t bus, u_int order, const char *name, int unit)
if ((ad = malloc(sizeof(*ad), M_ACPIDEV, M_NOWAIT | M_ZERO)) == NULL)
return (NULL);
+ ad->ad_domain = ACPI_DEV_DOMAIN_UNKNOWN;
resource_list_init(&ad->ad_rl);
child = device_add_child_ordered(bus, order, name, unit);
@@ -1036,6 +1142,9 @@ acpi_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
case ACPI_IVAR_FLAGS:
*(int *)result = ad->ad_flags;
break;
+ case ACPI_IVAR_DOMAIN:
+ *(int *)result = ad->ad_domain;
+ break;
case ISA_IVAR_VENDORID:
case ISA_IVAR_SERIAL:
case ISA_IVAR_COMPATID:
@@ -1080,6 +1189,9 @@ acpi_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
case ACPI_IVAR_FLAGS:
ad->ad_flags = (int)value;
break;
+ case ACPI_IVAR_DOMAIN:
+ ad->ad_domain = (int)value;
+ break;
default:
panic("bad ivar write request (%d)", index);
return (ENOENT);
@@ -1227,8 +1339,8 @@ acpi_hint_device_unit(device_t acdev, device_t child, const char *name,
* _PXM to a NUMA domain. If the device does not have a _PXM method,
* -2 is returned. If any other error occurs, -1 is returned.
*/
-static int
-acpi_parse_pxm(device_t dev)
+int
+acpi_pxm_parse(device_t dev)
{
#ifdef NUMA
#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
@@ -1255,7 +1367,7 @@ acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize,
{
int d, error;
- d = acpi_parse_pxm(child);
+ d = acpi_pxm_parse(child);
if (d < 0)
return (bus_generic_get_cpus(dev, child, op, setsize, cpuset));
@@ -1278,29 +1390,16 @@ acpi_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize,
}
}
-/*
- * Fetch the NUMA domain for the given device 'dev'.
- *
- * If a device has a _PXM method, map that to a NUMA domain.
- * Otherwise, pass the request up to the parent.
- * If there's no matching domain or the domain cannot be
- * determined, return ENOENT.
- */
-int
-acpi_get_domain(device_t dev, device_t child, int *domain)
+static int
+acpi_get_domain_method(device_t dev, device_t child, int *domain)
{
- int d;
+ int error;
- d = acpi_parse_pxm(child);
- if (d >= 0) {
- *domain = d;
+ error = acpi_read_ivar(dev, child, ACPI_IVAR_DOMAIN,
+ (uintptr_t *)domain);
+ if (error == 0 && *domain != ACPI_DEV_DOMAIN_UNKNOWN)
return (0);
- }
- if (d == -1)
- return (ENOENT);
-
- /* No _PXM node; go up a level */
- return (bus_generic_get_domain(dev, child, domain));
+ return (ENOENT);
}
static struct rman *
@@ -1651,19 +1750,22 @@ acpi_map_resource(device_t bus, device_t child, struct resource *r,
args.offset = start - rman_get_start(sysres);
args.length = length;
- return (bus_generic_map_resource(bus, child, sysres, &args, map));
+ return (bus_map_resource(bus, sysres, &args, map));
}
static int
acpi_unmap_resource(device_t bus, device_t child, struct resource *r,
struct resource_map *map)
{
- if (acpi_is_resource_managed(bus, r)) {
- r = acpi_managed_resource(bus, r);
- if (r == NULL)
- return (ENOENT);
- }
- return (bus_generic_unmap_resource(bus, child, r, map));
+ struct resource *sysres;
+
+ if (!acpi_is_resource_managed(bus, r))
+ return (bus_generic_unmap_resource(bus, child, r, map));
+
+ sysres = acpi_managed_resource(bus, r);
+ if (sysres == NULL)
+ return (ENOENT);
+ return (bus_unmap_resource(bus, sysres, map));
}
/* Allocate an IO port or memory resource, given its GAS. */
@@ -2019,7 +2121,7 @@ acpi_device_pwr_for_sleep(device_t bus, device_t dev, int *dstate)
* Note illegal _S0D is evaluated because some systems expect this.
*/
sc = device_get_softc(bus);
- snprintf(sxd, sizeof(sxd), "_S%dD", sc->acpi_sstate);
+ snprintf(sxd, sizeof(sxd), "_S%dD", acpi_stype_to_sstate(sc, sc->acpi_stype));
status = acpi_GetInteger(handle, sxd, dstate);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
device_printf(dev, "failed to get %s on %s: %s\n", sxd,
@@ -2122,12 +2224,13 @@ acpi_set_powerstate(device_t child, int state)
status = acpi_pwr_switch_consumer(h, state);
if (ACPI_SUCCESS(status)) {
if (bootverbose)
- device_printf(child, "set ACPI power state D%d on %s\n",
- state, acpi_name(h));
+ device_printf(child, "set ACPI power state %s on %s\n",
+ acpi_d_state_to_str(state), acpi_name(h));
} else if (status != AE_NOT_FOUND)
device_printf(child,
- "failed to set ACPI power state D%d on %s: %s\n", state,
- acpi_name(h), AcpiFormatException(status));
+ "failed to set ACPI power state %s on %s: %s\n",
+ acpi_d_state_to_str(state), acpi_name(h),
+ AcpiFormatException(status));
return (0);
}
@@ -2262,11 +2365,11 @@ acpi_probe_children(device_t bus)
/* Create any static children by calling device identify methods. */
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "device identify routines\n"));
- bus_generic_probe(bus);
+ bus_identify_children(bus);
/* Probe/attach all children, created statically and from the namespace. */
- ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_generic_attach\n"));
- bus_generic_attach(bus);
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "acpi bus_attach_children\n"));
+ bus_attach_children(bus);
/*
* Reserve resources allocated to children but not yet allocated
@@ -2326,7 +2429,7 @@ acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
ACPI_HANDLE h;
device_t bus, child;
char *handle_str;
- int order;
+ int d, order;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -2374,7 +2477,7 @@ acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "scanning '%s'\n", handle_str));
order = level * 10 + ACPI_DEV_BASE_ORDER;
acpi_probe_order(handle, &order);
- child = BUS_ADD_CHILD(bus, order, NULL, -1);
+ child = BUS_ADD_CHILD(bus, order, NULL, DEVICE_UNIT_ANY);
if (child == NULL)
break;
@@ -2434,6 +2537,10 @@ acpi_probe_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
}
AcpiOsFree(devinfo);
}
+
+ d = acpi_pxm_parse(child);
+ if (d >= 0)
+ ad->ad_domain = d;
break;
}
}
@@ -3112,9 +3219,9 @@ acpi_sleep_force_task(void *context)
{
struct acpi_softc *sc = (struct acpi_softc *)context;
- if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
- device_printf(sc->acpi_dev, "force sleep state S%d failed\n",
- sc->acpi_next_sstate);
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_stype)))
+ device_printf(sc->acpi_dev, "force sleep state %s failed\n",
+ power_stype_to_name(sc->acpi_next_stype));
}
static void
@@ -3141,24 +3248,24 @@ acpi_sleep_force(void *arg)
* acks are in.
*/
int
-acpi_ReqSleepState(struct acpi_softc *sc, int state)
+acpi_ReqSleepState(struct acpi_softc *sc, enum power_stype stype)
{
#if defined(__amd64__) || defined(__i386__)
struct apm_clone_data *clone;
ACPI_STATUS status;
- if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
+ if (stype < POWER_STYPE_AWAKE || stype >= POWER_STYPE_COUNT)
return (EINVAL);
- if (!acpi_sleep_states[state])
+ if (!acpi_supported_stypes[stype])
return (EOPNOTSUPP);
/*
* If a reboot/shutdown/suspend request is already in progress or
* suspend is blocked due to an upcoming shutdown, just return.
*/
- if (rebooting || sc->acpi_next_sstate != 0 || suspend_blocked) {
+ if (rebooting || sc->acpi_next_stype != POWER_STYPE_AWAKE ||
+ suspend_blocked)
return (0);
- }
/* Wait until sleep is enabled. */
while (sc->acpi_sleep_disabled) {
@@ -3167,12 +3274,12 @@ acpi_ReqSleepState(struct acpi_softc *sc, int state)
ACPI_LOCK(acpi);
- sc->acpi_next_sstate = state;
+ sc->acpi_next_stype = stype;
/* S5 (soft-off) should be entered directly with no waiting. */
- if (state == ACPI_STATE_S5) {
+ if (stype == POWER_STYPE_POWEROFF) {
ACPI_UNLOCK(acpi);
- status = acpi_EnterSleepState(sc, state);
+ status = acpi_EnterSleepState(sc, stype);
return (ACPI_SUCCESS(status) ? 0 : ENXIO);
}
@@ -3188,7 +3295,7 @@ acpi_ReqSleepState(struct acpi_softc *sc, int state)
/* If devd(8) is not running, immediately enter the sleep state. */
if (!devctl_process_running()) {
ACPI_UNLOCK(acpi);
- status = acpi_EnterSleepState(sc, state);
+ status = acpi_EnterSleepState(sc, stype);
return (ACPI_SUCCESS(status) ? 0 : ENXIO);
}
@@ -3203,7 +3310,7 @@ acpi_ReqSleepState(struct acpi_softc *sc, int state)
ACPI_UNLOCK(acpi);
/* Now notify devd(8) also. */
- acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, state);
+ acpi_UserNotify("Suspend", ACPI_ROOT_OBJECT, stype);
return (0);
#else
@@ -3226,17 +3333,17 @@ acpi_AckSleepState(struct apm_clone_data *clone, int error)
struct acpi_softc *sc;
int ret, sleeping;
- /* If no pending sleep state, return an error. */
+ /* If no pending sleep type, return an error. */
ACPI_LOCK(acpi);
sc = clone->acpi_sc;
- if (sc->acpi_next_sstate == 0) {
+ if (sc->acpi_next_stype == POWER_STYPE_AWAKE) {
ACPI_UNLOCK(acpi);
return (ENXIO);
}
/* Caller wants to abort suspend process. */
if (error) {
- sc->acpi_next_sstate = 0;
+ sc->acpi_next_stype = POWER_STYPE_AWAKE;
callout_stop(&sc->susp_force_to);
device_printf(sc->acpi_dev,
"listener on %s cancelled the pending suspend\n",
@@ -3266,7 +3373,7 @@ acpi_AckSleepState(struct apm_clone_data *clone, int error)
ACPI_UNLOCK(acpi);
ret = 0;
if (sleeping) {
- if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_sstate)))
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, sc->acpi_next_stype)))
ret = ENODEV;
}
return (ret);
@@ -3323,24 +3430,27 @@ enum acpi_sleep_state {
* Currently we support S1-S5 but S4 is only S4BIOS
*/
static ACPI_STATUS
-acpi_EnterSleepState(struct acpi_softc *sc, int state)
+acpi_EnterSleepState(struct acpi_softc *sc, enum power_stype stype)
{
register_t intr;
ACPI_STATUS status;
ACPI_EVENT_STATUS power_button_status;
enum acpi_sleep_state slp_state;
+ int acpi_sstate;
int sleep_result;
- ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, stype);
- if (state < ACPI_STATE_S1 || state > ACPI_S_STATES_MAX)
+ if (stype <= POWER_STYPE_AWAKE || stype >= POWER_STYPE_COUNT)
return_ACPI_STATUS (AE_BAD_PARAMETER);
- if (!acpi_sleep_states[state]) {
- device_printf(sc->acpi_dev, "Sleep state S%d not supported by BIOS\n",
- state);
+ if (!acpi_supported_stypes[stype]) {
+ device_printf(sc->acpi_dev, "Sleep type %s not supported on this "
+ "platform\n", power_stype_to_name(stype));
return (AE_SUPPORT);
}
+ acpi_sstate = acpi_stype_to_sstate(sc, stype);
+
/* Re-entry once we're suspending is not allowed. */
status = acpi_sleep_disable(sc);
if (ACPI_FAILURE(status)) {
@@ -3349,7 +3459,7 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
return (status);
}
- if (state == ACPI_STATE_S5) {
+ if (stype == POWER_STYPE_POWEROFF) {
/*
* Shut down cleanly and power off. This will call us back through the
* shutdown handlers.
@@ -3358,10 +3468,10 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
return_ACPI_STATUS (AE_OK);
}
- EVENTHANDLER_INVOKE(power_suspend_early);
+ EVENTHANDLER_INVOKE(power_suspend_early, stype);
stop_all_proc();
suspend_all_fs();
- EVENTHANDLER_INVOKE(power_suspend);
+ EVENTHANDLER_INVOKE(power_suspend, stype);
#ifdef EARLY_AP_STARTUP
MPASS(mp_ncpus == 1 || smp_started);
@@ -3377,16 +3487,16 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
#endif
/*
- * Be sure to hold Giant across DEVICE_SUSPEND/RESUME
+ * Be sure to hold bus topology lock across DEVICE_SUSPEND/RESUME.
*/
bus_topo_lock();
slp_state = ACPI_SS_NONE;
- sc->acpi_sstate = state;
+ sc->acpi_stype = stype;
/* Enable any GPEs as appropriate and requested by the user. */
- acpi_wake_prep_walk(state);
+ acpi_wake_prep_walk(sc, stype);
slp_state = ACPI_SS_GPE_SET;
/*
@@ -3403,7 +3513,7 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
}
slp_state = ACPI_SS_DEV_SUSPEND;
- status = AcpiEnterSleepStatePrep(state);
+ status = AcpiEnterSleepStatePrep(acpi_sstate);
if (ACPI_FAILURE(status)) {
device_printf(sc->acpi_dev, "AcpiEnterSleepStatePrep failed - %s\n",
AcpiFormatException(status));
@@ -3416,9 +3526,9 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
suspendclock();
intr = intr_disable();
- if (state != ACPI_STATE_S1) {
- sleep_result = acpi_sleep_machdep(sc, state);
- acpi_wakeup_machdep(sc, state, sleep_result, 0);
+ if (stype != POWER_STYPE_STANDBY) {
+ sleep_result = acpi_sleep_machdep(sc, acpi_sstate);
+ acpi_wakeup_machdep(sc, acpi_sstate, sleep_result, 0);
/*
* XXX According to ACPI specification SCI_EN bit should be restored
@@ -3429,10 +3539,10 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
* This hack is picked up from Linux, which claims that it follows
* Windows behavior.
*/
- if (sleep_result == 1 && state != ACPI_STATE_S4)
+ if (sleep_result == 1 && stype != POWER_STYPE_HIBERNATE)
AcpiWriteBitRegister(ACPI_BITREG_SCI_ENABLE, ACPI_ENABLE_EVENT);
- if (sleep_result == 1 && state == ACPI_STATE_S3) {
+ if (sleep_result == 1 && stype == POWER_STYPE_SUSPEND_TO_MEM) {
/*
* Prevent mis-interpretation of the wakeup by power button
* as a request for power off.
@@ -3458,20 +3568,20 @@ acpi_EnterSleepState(struct acpi_softc *sc, int state)
intr_restore(intr);
/* call acpi_wakeup_machdep() again with interrupt enabled */
- acpi_wakeup_machdep(sc, state, sleep_result, 1);
+ acpi_wakeup_machdep(sc, acpi_sstate, sleep_result, 1);
- AcpiLeaveSleepStatePrep(state);
+ AcpiLeaveSleepStatePrep(acpi_sstate);
if (sleep_result == -1)
goto backout;
- /* Re-enable ACPI hardware on wakeup from sleep state 4. */
- if (state == ACPI_STATE_S4)
+ /* Re-enable ACPI hardware on wakeup from hibernate. */
+ if (stype == POWER_STYPE_HIBERNATE)
AcpiEnable();
} else {
- status = AcpiEnterSleepState(state);
+ status = AcpiEnterSleepState(acpi_sstate);
intr_restore(intr);
- AcpiLeaveSleepStatePrep(state);
+ AcpiLeaveSleepStatePrep(acpi_sstate);
if (ACPI_FAILURE(status)) {
device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n",
AcpiFormatException(status));
@@ -3488,13 +3598,13 @@ backout:
if (slp_state >= ACPI_SS_SLP_PREP)
resumeclock();
if (slp_state >= ACPI_SS_GPE_SET) {
- acpi_wake_prep_walk(state);
- sc->acpi_sstate = ACPI_STATE_S0;
+ acpi_wake_prep_walk(sc, stype);
+ sc->acpi_stype = POWER_STYPE_AWAKE;
}
if (slp_state >= ACPI_SS_DEV_SUSPEND)
DEVICE_RESUME(root_bus);
if (slp_state >= ACPI_SS_SLP_PREP)
- AcpiLeaveSleepState(state);
+ AcpiLeaveSleepState(acpi_sstate);
if (slp_state >= ACPI_SS_SLEPT) {
#if defined(__i386__) || defined(__amd64__)
/* NB: we are still using ACPI timecounter at this point. */
@@ -3503,7 +3613,7 @@ backout:
acpi_resync_clock(sc);
acpi_enable_fixed_events(sc);
}
- sc->acpi_next_sstate = 0;
+ sc->acpi_next_stype = POWER_STYPE_AWAKE;
bus_topo_unlock();
@@ -3522,14 +3632,14 @@ backout:
resume_all_fs();
resume_all_proc();
- EVENTHANDLER_INVOKE(power_resume);
+ EVENTHANDLER_INVOKE(power_resume, stype);
/* Allow another sleep request after a while. */
callout_schedule(&acpi_sleep_timer, hz * ACPI_MINIMUM_AWAKETIME);
/* Run /etc/rc.resume after we are back. */
if (devctl_process_running())
- acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, state);
+ acpi_UserNotify("Resume", ACPI_ROOT_OBJECT, stype);
return_ACPI_STATUS (status);
}
@@ -3580,8 +3690,10 @@ acpi_wake_set_enable(device_t dev, int enable)
}
static int
-acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
+acpi_wake_sleep_prep(struct acpi_softc *sc, ACPI_HANDLE handle,
+ enum power_stype stype)
{
+ int sstate;
struct acpi_prw_data prw;
device_t dev;
@@ -3590,6 +3702,8 @@ acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
return (ENXIO);
dev = acpi_get_device(handle);
+ sstate = acpi_stype_to_sstate(sc, stype);
+
/*
* The destination sleep state must be less than (i.e., higher power)
* or equal to the value specified by _PRW. If this GPE cannot be
@@ -3600,22 +3714,24 @@ acpi_wake_sleep_prep(ACPI_HANDLE handle, int sstate)
if (sstate > prw.lowest_wake) {
AcpiSetGpeWakeMask(prw.gpe_handle, prw.gpe_bit, ACPI_GPE_DISABLE);
if (bootverbose)
- device_printf(dev, "wake_prep disabled wake for %s (S%d)\n",
- acpi_name(handle), sstate);
+ device_printf(dev, "wake_prep disabled wake for %s (%s)\n",
+ acpi_name(handle), power_stype_to_name(stype));
} else if (dev && (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) != 0) {
acpi_pwr_wake_enable(handle, 1);
acpi_SetInteger(handle, "_PSW", 1);
if (bootverbose)
- device_printf(dev, "wake_prep enabled for %s (S%d)\n",
- acpi_name(handle), sstate);
+ device_printf(dev, "wake_prep enabled for %s (%s)\n",
+ acpi_name(handle), power_stype_to_name(stype));
}
return (0);
}
static int
-acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
+acpi_wake_run_prep(struct acpi_softc *sc, ACPI_HANDLE handle,
+ enum power_stype stype)
{
+ int sstate;
struct acpi_prw_data prw;
device_t dev;
@@ -3629,6 +3745,8 @@ acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
if (dev == NULL || (acpi_get_flags(dev) & ACPI_FLAG_WAKE_ENABLED) == 0)
return (0);
+ sstate = acpi_stype_to_sstate(sc, stype);
+
/*
* If this GPE couldn't be enabled for the previous sleep state, it was
* disabled before going to sleep so re-enable it. If it was enabled,
@@ -3652,26 +3770,29 @@ acpi_wake_run_prep(ACPI_HANDLE handle, int sstate)
static ACPI_STATUS
acpi_wake_prep(ACPI_HANDLE handle, UINT32 level, void *context, void **status)
{
- int sstate;
+ struct acpi_wake_prep_context *ctx = context;
/* If suspending, run the sleep prep function, otherwise wake. */
- sstate = *(int *)context;
if (AcpiGbl_SystemAwakeAndRunning)
- acpi_wake_sleep_prep(handle, sstate);
+ acpi_wake_sleep_prep(ctx->sc, handle, ctx->stype);
else
- acpi_wake_run_prep(handle, sstate);
+ acpi_wake_run_prep(ctx->sc, handle, ctx->stype);
return (AE_OK);
}
/* Walk the tree rooted at acpi0 to prep devices for suspend/resume. */
static int
-acpi_wake_prep_walk(int sstate)
+acpi_wake_prep_walk(struct acpi_softc *sc, enum power_stype stype)
{
ACPI_HANDLE sb_handle;
+ struct acpi_wake_prep_context ctx = {
+ .sc = sc,
+ .stype = stype,
+ };
if (ACPI_SUCCESS(AcpiGetHandle(ACPI_ROOT_OBJECT, "\\_SB_", &sb_handle)))
AcpiWalkNamespace(ACPI_TYPE_DEVICE, sb_handle, 100,
- acpi_wake_prep, NULL, &sstate, NULL);
+ acpi_wake_prep, NULL, &ctx, NULL);
return (0);
}
@@ -3830,31 +3951,35 @@ out:
/* System Event Handlers (registered by EVENTHANDLER_REGISTER) */
static void
-acpi_system_eventhandler_sleep(void *arg, int state)
+acpi_system_eventhandler_sleep(void *arg, enum power_stype stype)
{
struct acpi_softc *sc = (struct acpi_softc *)arg;
int ret;
- ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, stype);
/* Check if button action is disabled or unknown. */
- if (state == ACPI_STATE_UNKNOWN)
+ if (stype == ACPI_STATE_UNKNOWN)
return;
- /* Request that the system prepare to enter the given suspend state. */
- ret = acpi_ReqSleepState(sc, state);
+ /*
+ * Request that the system prepare to enter the given suspend state. We can
+ * totally pass an ACPI S-state to an enum power_stype.
+ */
+ ret = acpi_ReqSleepState(sc, stype);
if (ret != 0)
device_printf(sc->acpi_dev,
- "request to enter state S%d failed (err %d)\n", state, ret);
+ "request to enter state %s failed (err %d)\n",
+ power_stype_to_name(stype), ret);
return_VOID;
}
static void
-acpi_system_eventhandler_wakeup(void *arg, int state)
+acpi_system_eventhandler_wakeup(void *arg, enum power_stype stype)
{
- ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, state);
+ ACPI_FUNCTION_TRACE_U32((char *)(uintptr_t)__func__, stype);
/* Currently, nothing to do for wakeup. */
@@ -3868,26 +3993,35 @@ static void
acpi_invoke_sleep_eventhandler(void *context)
{
- EVENTHANDLER_INVOKE(acpi_sleep_event, *(int *)context);
+ EVENTHANDLER_INVOKE(acpi_sleep_event, *(enum power_stype *)context);
}
static void
acpi_invoke_wake_eventhandler(void *context)
{
- EVENTHANDLER_INVOKE(acpi_wakeup_event, *(int *)context);
+ EVENTHANDLER_INVOKE(acpi_wakeup_event, *(enum power_stype *)context);
}
UINT32
acpi_event_power_button_sleep(void *context)
{
+#if defined(__amd64__) || defined(__i386__)
struct acpi_softc *sc = (struct acpi_softc *)context;
+#else
+ (void)context;
+#endif
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+#if defined(__amd64__) || defined(__i386__)
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_sx)))
+ acpi_invoke_sleep_eventhandler, &sc->acpi_power_button_stype)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
+#else
+ shutdown_nice(RB_POWEROFF);
+#endif
+
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
@@ -3899,7 +4033,7 @@ acpi_event_power_button_wake(void *context)
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_wake_eventhandler, &sc->acpi_power_button_sx)))
+ acpi_invoke_wake_eventhandler, &sc->acpi_power_button_stype)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
@@ -3912,7 +4046,7 @@ acpi_event_sleep_button_sleep(void *context)
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_sx)))
+ acpi_invoke_sleep_eventhandler, &sc->acpi_sleep_button_stype)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
@@ -3925,7 +4059,7 @@ acpi_event_sleep_button_wake(void *context)
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
if (ACPI_FAILURE(AcpiOsExecute(OSL_NOTIFY_HANDLER,
- acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_sx)))
+ acpi_invoke_wake_eventhandler, &sc->acpi_sleep_button_stype)))
return_VALUE (ACPI_INTERRUPT_NOT_HANDLED);
return_VALUE (ACPI_INTERRUPT_HANDLED);
}
@@ -4121,7 +4255,8 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
{
struct acpi_softc *sc;
struct acpi_ioctl_hook *hp;
- int error, state;
+ int error;
+ int sstate;
error = 0;
hp = NULL;
@@ -4151,9 +4286,9 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
/* Core system ioctls. */
switch (cmd) {
case ACPIIO_REQSLPSTATE:
- state = *(int *)addr;
- if (state != ACPI_STATE_S5)
- return (acpi_ReqSleepState(sc, state));
+ sstate = *(int *)addr;
+ if (sstate != ACPI_STATE_S5)
+ return (acpi_ReqSleepState(sc, acpi_sstate_to_stype(sstate)));
device_printf(sc->acpi_dev, "power off via acpi ioctl not supported\n");
error = EOPNOTSUPP;
break;
@@ -4162,12 +4297,12 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
error = acpi_AckSleepState(sc->acpi_clone, error);
break;
case ACPIIO_SETSLPSTATE: /* DEPRECATED */
- state = *(int *)addr;
- if (state < ACPI_STATE_S0 || state > ACPI_S_STATES_MAX)
+ sstate = *(int *)addr;
+ if (sstate < ACPI_STATE_S0 || sstate > ACPI_STATE_S5)
return (EINVAL);
- if (!acpi_sleep_states[state])
+ if (!acpi_supported_sstates[sstate])
return (EOPNOTSUPP);
- if (ACPI_FAILURE(acpi_SetSleepState(sc, state)))
+ if (ACPI_FAILURE(acpi_SetSleepState(sc, acpi_sstate_to_stype(sstate))))
error = ENXIO;
break;
default:
@@ -4179,7 +4314,7 @@ acpiioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *t
}
static int
-acpi_sname2sstate(const char *sname)
+acpi_sname_to_sstate(const char *sname)
{
int sstate;
@@ -4194,14 +4329,15 @@ acpi_sname2sstate(const char *sname)
}
static const char *
-acpi_sstate2sname(int sstate)
+acpi_sstate_to_sname(int state)
{
- static const char *snames[] = { "S0", "S1", "S2", "S3", "S4", "S5" };
+ static const char *snames[ACPI_S_STATE_COUNT] = {"S0", "S1", "S2", "S3",
+ "S4", "S5"};
- if (sstate >= ACPI_STATE_S0 && sstate <= ACPI_STATE_S5)
- return (snames[sstate]);
- else if (sstate == ACPI_STATE_UNKNOWN)
+ if (state == ACPI_STATE_UNKNOWN)
return ("NONE");
+ if (state >= ACPI_STATE_S0 && state < ACPI_S_STATE_COUNT)
+ return (snames[state]);
return (NULL);
}
@@ -4214,8 +4350,8 @@ acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND);
for (state = ACPI_STATE_S1; state < ACPI_S_STATE_COUNT; state++)
- if (acpi_sleep_states[state])
- sbuf_printf(&sb, "%s ", acpi_sstate2sname(state));
+ if (acpi_supported_sstates[state])
+ sbuf_printf(&sb, "%s ", acpi_sstate_to_sname(state));
sbuf_trim(&sb);
sbuf_finish(&sb);
error = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req);
@@ -4224,26 +4360,89 @@ acpi_supported_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
}
static int
+acpi_suspend_state_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ char name[10];
+ int err;
+ struct acpi_softc *sc = oidp->oid_arg1;
+ enum power_stype new_stype;
+ enum power_stype old_stype = power_suspend_stype;
+ int old_sstate = acpi_stype_to_sstate(sc, old_stype);
+ int new_sstate;
+
+ strlcpy(name, acpi_sstate_to_sname(old_sstate), sizeof(name));
+ err = sysctl_handle_string(oidp, name, sizeof(name), req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ new_sstate = acpi_sname_to_sstate(name);
+ if (new_sstate < 0)
+ return (EINVAL);
+ new_stype = acpi_sstate_to_stype(new_sstate);
+ if (acpi_supported_stypes[new_stype] == false)
+ return (EOPNOTSUPP);
+ if (new_stype != old_stype)
+ power_suspend_stype = new_stype;
+ return (err);
+}
+
+static int
acpi_sleep_state_sysctl(SYSCTL_HANDLER_ARGS)
{
char sleep_state[10];
- int error, new_state, old_state;
+ int error;
+ int new_sstate, old_sstate;
- old_state = *(int *)oidp->oid_arg1;
- strlcpy(sleep_state, acpi_sstate2sname(old_state), sizeof(sleep_state));
+ old_sstate = *(int *)oidp->oid_arg1;
+ strlcpy(sleep_state, acpi_sstate_to_sname(old_sstate), sizeof(sleep_state));
error = sysctl_handle_string(oidp, sleep_state, sizeof(sleep_state), req);
if (error == 0 && req->newptr != NULL) {
- new_state = acpi_sname2sstate(sleep_state);
- if (new_state < ACPI_STATE_S1)
+ new_sstate = acpi_sname_to_sstate(sleep_state);
+ if (new_sstate < 0)
return (EINVAL);
- if (new_state < ACPI_S_STATE_COUNT && !acpi_sleep_states[new_state])
+ if (new_sstate < ACPI_S_STATE_COUNT &&
+ !acpi_supported_sstates[new_sstate])
return (EOPNOTSUPP);
- if (new_state != old_state)
- *(int *)oidp->oid_arg1 = new_state;
+ if (new_sstate != old_sstate)
+ *(int *)oidp->oid_arg1 = new_sstate;
}
return (error);
}
+static int
+acpi_stype_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ char name[10];
+ int err;
+ int sstate;
+ enum power_stype new_stype, old_stype;
+
+ old_stype = *(enum power_stype *)oidp->oid_arg1;
+ strlcpy(name, power_stype_to_name(old_stype), sizeof(name));
+ err = sysctl_handle_string(oidp, name, sizeof(name), req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ new_stype = power_name_to_stype(name);
+ if (new_stype == POWER_STYPE_UNKNOWN) {
+ sstate = acpi_sname_to_sstate(name);
+ if (sstate < 0)
+ return (EINVAL);
+ printf("warning: this sysctl expects a sleep type, but an ACPI S-state has "
+ "been passed to it. This functionality is deprecated; see acpi(4).\n");
+ MPASS(sstate < ACPI_S_STATE_COUNT);
+ if (acpi_supported_sstates[sstate] == false)
+ return (EOPNOTSUPP);
+ new_stype = acpi_sstate_to_stype(sstate);
+ }
+
+ if (acpi_supported_stypes[new_stype] == false)
+ return (EOPNOTSUPP);
+ if (new_stype != old_stype)
+ *(enum power_stype *)oidp->oid_arg1 = new_stype;
+ return (0);
+}
+
/* Inform devctl(4) when we receive a Notify. */
void
acpi_UserNotify(const char *subsystem, ACPI_HANDLE h, uint8_t notify)
@@ -4590,12 +4789,10 @@ acpi_reset_interfaces(device_t dev)
}
static int
-acpi_pm_func(u_long cmd, void *arg, ...)
+acpi_pm_func(u_long cmd, void *arg, enum power_stype stype)
{
- int state, acpi_state;
int error;
struct acpi_softc *sc;
- va_list ap;
error = 0;
switch (cmd) {
@@ -4605,27 +4802,7 @@ acpi_pm_func(u_long cmd, void *arg, ...)
error = EINVAL;
goto out;
}
-
- va_start(ap, arg);
- state = va_arg(ap, int);
- va_end(ap);
-
- switch (state) {
- case POWER_SLEEP_STATE_STANDBY:
- acpi_state = sc->acpi_standby_sx;
- break;
- case POWER_SLEEP_STATE_SUSPEND:
- acpi_state = sc->acpi_suspend_sx;
- break;
- case POWER_SLEEP_STATE_HIBERNATE:
- acpi_state = ACPI_STATE_S4;
- break;
- default:
- error = EINVAL;
- goto out;
- }
-
- if (ACPI_FAILURE(acpi_EnterSleepState(sc, acpi_state)))
+ if (ACPI_FAILURE(acpi_EnterSleepState(sc, stype)))
error = ENXIO;
break;
default:
@@ -4643,7 +4820,8 @@ acpi_pm_register(void *arg)
if (!cold || resource_disabled("acpi", 0))
return;
- power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL);
+ power_pm_register(POWER_PM_TYPE_ACPI, acpi_pm_func, NULL,
+ acpi_supported_stypes);
}
SYSINIT(power, SI_SUB_KLD, SI_ORDER_ANY, acpi_pm_register, NULL);
diff --git a/sys/dev/acpica/acpi_apei.c b/sys/dev/acpica/acpi_apei.c
index 9cfd46c97430..624c81ad1b4f 100644
--- a/sys/dev/acpica/acpi_apei.c
+++ b/sys/dev/acpica/acpi_apei.c
@@ -754,7 +754,7 @@ apei_detach(device_t dev)
apei_nmi = NULL;
apei_nmi_nges = NULL;
if (sc->nges.swi_ih != NULL) {
- swi_remove(&sc->nges.swi_ih);
+ swi_remove(sc->nges.swi_ih);
sc->nges.swi_ih = NULL;
}
if (acpi_get_handle(dev) != NULL) {
diff --git a/sys/dev/acpica/acpi_cmbat.c b/sys/dev/acpica/acpi_cmbat.c
index aeda34c3acff..35032244af19 100644
--- a/sys/dev/acpica/acpi_cmbat.c
+++ b/sys/dev/acpica/acpi_cmbat.c
@@ -398,8 +398,17 @@ acpi_cmbat_get_bix(void *arg)
sc->bix.rev != ACPI_BIX_REV_1)
ACPI_BIX_REV_MISMATCH_ERR(sc->bix.rev, ACPI_BIX_REV_1);
} else if (ACPI_PKG_VALID_EQ(res, 20)) {/* ACPI 4.0 _BIX */
- if (sc->bix.rev != ACPI_BIX_REV_0)
- ACPI_BIX_REV_MISMATCH_ERR(sc->bix.rev, ACPI_BIX_REV_0);
+ /*
+ * Some models claim to be rev.1, but have a _BIX with only 20
+ * members. Be lenient and treat this as a valid rev.0 _BIX.
+ */
+ if (sc->bix.rev != ACPI_BIX_REV_0) {
+ ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev),
+ "_BIX containing too few objects for revision %u. "
+ "Treating as revision %u instead.\n",
+ sc->bix.rev, ACPI_BIX_REV_0);
+ sc->bix.rev = ACPI_BIX_REV_0;
+ }
} else if (ACPI_PKG_VALID(res, 22)) {
/* _BIX with 22 or more members. */
if (ACPI_BIX_REV_MIN_CHECK(sc->bix.rev, ACPI_BIX_REV_1 + 1)) {
diff --git a/sys/dev/acpica/acpi_container.c b/sys/dev/acpica/acpi_container.c
index 0f23f6b25fe1..f9903e2b2bba 100644
--- a/sys/dev/acpica/acpi_container.c
+++ b/sys/dev/acpica/acpi_container.c
@@ -38,7 +38,6 @@ ACPI_MODULE_NAME("CONTAINER")
static int acpi_syscont_probe(device_t);
static int acpi_syscont_attach(device_t);
-static int acpi_syscont_detach(device_t);
static int acpi_syscont_alloc_msi(device_t, device_t,
int count, int maxcount, int *irqs);
static int acpi_syscont_release_msi(device_t bus, device_t dev,
@@ -54,7 +53,7 @@ static device_method_t acpi_syscont_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, acpi_syscont_probe),
DEVMETHOD(device_attach, acpi_syscont_attach),
- DEVMETHOD(device_detach, acpi_syscont_detach),
+ DEVMETHOD(device_detach, bus_generic_detach),
/* Bus interface */
DEVMETHOD(bus_add_child, bus_generic_add_child),
@@ -104,15 +103,9 @@ static int
acpi_syscont_attach(device_t dev)
{
- bus_generic_probe(dev);
- return (bus_generic_attach(dev));
-}
-
-static int
-acpi_syscont_detach(device_t dev)
-{
-
- return (bus_generic_detach(dev));
+ bus_identify_children(dev);
+ bus_attach_children(dev);
+ return (0);
}
static int
diff --git a/sys/dev/acpica/acpi_cpu.c b/sys/dev/acpica/acpi_cpu.c
index 80855cf168e9..f9b9a386c0c5 100644
--- a/sys/dev/acpica/acpi_cpu.c
+++ b/sys/dev/acpica/acpi_cpu.c
@@ -131,6 +131,7 @@ struct acpi_cpu_device {
#define PIIX4_PCNTRL_BST_EN (1<<10)
#define CST_FFH_VENDOR_INTEL 1
+#define CST_FFH_VENDOR_AMD 2
#define CST_FFH_INTEL_CL_C1IO 1
#define CST_FFH_INTEL_CL_MWAIT 2
#define CST_FFH_MWAIT_HW_COORD 0x0001
@@ -466,11 +467,11 @@ acpi_cpu_postattach(void *unused __unused)
bus_topo_lock();
CPU_FOREACH(i) {
if ((sc = cpu_softc[i]) != NULL)
- bus_generic_probe(sc->cpu_dev);
+ bus_identify_children(sc->cpu_dev);
}
CPU_FOREACH(i) {
if ((sc = cpu_softc[i]) != NULL) {
- bus_generic_attach(sc->cpu_dev);
+ bus_attach_children(sc->cpu_dev);
attached = 1;
}
}
@@ -855,7 +856,8 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
acpi_cpu_cx_cst_free_plvlx(sc->cpu_dev, cx_ptr);
#if defined(__i386__) || defined(__amd64__)
if (acpi_PkgFFH_IntelCpu(pkg, 0, &vendor, &class, &address,
- &accsize) == 0 && vendor == CST_FFH_VENDOR_INTEL) {
+ &accsize) == 0 &&
+ (vendor == CST_FFH_VENDOR_INTEL || vendor == CST_FFH_VENDOR_AMD)) {
if (class == CST_FFH_INTEL_CL_C1IO) {
/* C1 I/O then Halt */
cx_ptr->res_rid = sc->cpu_cx_count;
@@ -872,7 +874,9 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
"degrading to C1 Halt", (int)address);
}
} else if (class == CST_FFH_INTEL_CL_MWAIT) {
- acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
+ if (vendor == CST_FFH_VENDOR_INTEL ||
+ (vendor == CST_FFH_VENDOR_AMD && cpu_mon_mwait_edx != 0))
+ acpi_cpu_cx_cst_mwait(cx_ptr, address, accsize);
}
}
#endif
@@ -922,6 +926,7 @@ acpi_cpu_cx_cst(struct acpi_cpu_softc *sc)
acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type,
&cx_ptr->res_rid, &cx_ptr->p_lvlx, RF_SHAREABLE);
if (cx_ptr->p_lvlx) {
+ cx_ptr->do_mwait = false;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"acpi_cpu%d: Got C%d - %d latency\n",
device_get_unit(sc->cpu_dev), cx_ptr->type,
diff --git a/sys/dev/acpica/acpi_ec.c b/sys/dev/acpica/acpi_ec.c
index d27b7caee9e6..8ee7bc54f304 100644
--- a/sys/dev/acpica/acpi_ec.c
+++ b/sys/dev/acpica/acpi_ec.c
@@ -339,7 +339,6 @@ acpi_ec_probe(device_t dev)
ACPI_OBJECT *obj;
ACPI_STATUS status;
device_t peer;
- char desc[64];
int ecdt;
int ret, rc;
struct acpi_ec_params *params;
@@ -444,10 +443,9 @@ acpi_ec_probe(device_t dev)
ret = rc;
out:
if (ret <= 0) {
- snprintf(desc, sizeof(desc), "Embedded Controller: GPE %#x%s%s",
- params->gpe_bit, (params->glk) ? ", GLK" : "",
- ecdt ? ", ECDT" : "");
- device_set_desc_copy(dev, desc);
+ device_set_descf(dev, "Embedded Controller: GPE %#x%s%s",
+ params->gpe_bit, (params->glk) ? ", GLK" : "",
+ ecdt ? ", ECDT" : "");
} else
free(params, M_TEMP);
diff --git a/sys/dev/acpica/acpi_ged.c b/sys/dev/acpica/acpi_ged.c
index 23e125f277c5..caf911758ed2 100644
--- a/sys/dev/acpica/acpi_ged.c
+++ b/sys/dev/acpica/acpi_ged.c
@@ -244,8 +244,9 @@ acpi_ged_attach(device_t dev)
}
if (bus_setup_intr(dev, sc->evts[i].r,
- INTR_TYPE_MISC | INTR_MPSAFE, NULL, acpi_ged_intr,
- &sc->evts[i], &sc->evts[i].cookie) != 0) {
+ INTR_TYPE_MISC | INTR_MPSAFE | INTR_SLEEPABLE |
+ INTR_EXCL, NULL, acpi_ged_intr, &sc->evts[i],
+ &sc->evts[i].cookie) != 0) {
device_printf(dev, "Failed to setup intr %d\n", i);
}
}
diff --git a/sys/dev/acpica/acpi_lid.c b/sys/dev/acpica/acpi_lid.c
index 142791f7282a..fb8755d9f0fe 100644
--- a/sys/dev/acpica/acpi_lid.c
+++ b/sys/dev/acpica/acpi_lid.c
@@ -235,9 +235,9 @@ acpi_lid_notify_status_changed(void *arg)
sc->lid_status ? "opened" : "closed");
if (sc->lid_status == 0)
- EVENTHANDLER_INVOKE(acpi_sleep_event, acpi_sc->acpi_lid_switch_sx);
+ EVENTHANDLER_INVOKE(acpi_sleep_event, acpi_sc->acpi_lid_switch_stype);
else
- EVENTHANDLER_INVOKE(acpi_wakeup_event, acpi_sc->acpi_lid_switch_sx);
+ EVENTHANDLER_INVOKE(acpi_wakeup_event, acpi_sc->acpi_lid_switch_stype);
out:
ACPI_SERIAL_END(lid);
diff --git a/sys/dev/acpica/acpi_pci.c b/sys/dev/acpica/acpi_pci.c
index a1ac3499662d..1912350bbc48 100644
--- a/sys/dev/acpica/acpi_pci.c
+++ b/sys/dev/acpica/acpi_pci.c
@@ -53,9 +53,6 @@
#include <dev/iommu/iommu.h>
-#include "pcib_if.h"
-#include "pci_if.h"
-
/* Hooks for the ACPI CA debugging infrastructure. */
#define _COMPONENT ACPI_BUS
ACPI_MODULE_NAME("PCI")
@@ -93,6 +90,7 @@ static int acpi_pci_set_powerstate_method(device_t dev, device_t child,
int state);
static void acpi_pci_update_device(ACPI_HANDLE handle, device_t pci_child);
static bus_dma_tag_t acpi_pci_get_dma_tag(device_t bus, device_t child);
+static int acpi_pci_get_domain(device_t dev, device_t child, int *domain);
static device_method_t acpi_pci_methods[] = {
/* Device interface */
@@ -108,7 +106,7 @@ static device_method_t acpi_pci_methods[] = {
DEVMETHOD(bus_get_device_path, acpi_pci_get_device_path),
DEVMETHOD(bus_get_cpus, acpi_get_cpus),
DEVMETHOD(bus_get_dma_tag, acpi_pci_get_dma_tag),
- DEVMETHOD(bus_get_domain, acpi_get_domain),
+ DEVMETHOD(bus_get_domain, acpi_pci_get_domain),
/* PCI interface */
DEVMETHOD(pci_alloc_devinfo, acpi_pci_alloc_devinfo),
@@ -207,6 +205,31 @@ acpi_pci_get_device_path(device_t bus, device_t child, const char *locator, stru
}
/*
+ * Fetch the NUMA domain for the given device 'dev'.
+ *
+ * If a device has a _PXM method, map that to a NUMA domain.
+ * Otherwise, pass the request up to the parent.
+ * If there's no matching domain or the domain cannot be
+ * determined, return ENOENT.
+ */
+static int
+acpi_pci_get_domain(device_t dev, device_t child, int *domain)
+{
+ int d;
+
+ d = acpi_pxm_parse(child);
+ if (d >= 0) {
+ *domain = d;
+ return (0);
+ }
+ if (d == -1)
+ return (ENOENT);
+
+ /* No _PXM node; go up a level */
+ return (bus_generic_get_domain(dev, child, domain));
+}
+
+/*
* PCI power manangement
*/
static int
@@ -240,12 +263,13 @@ acpi_pci_set_powerstate_method(device_t dev, device_t child, int state)
status = acpi_pwr_switch_consumer(h, state);
if (ACPI_SUCCESS(status)) {
if (bootverbose)
- device_printf(dev, "set ACPI power state D%d on %s\n",
- state, acpi_name(h));
+ device_printf(dev, "set ACPI power state %s on %s\n",
+ acpi_d_state_to_str(state), acpi_name(h));
} else if (status != AE_NOT_FOUND)
device_printf(dev,
- "failed to set ACPI power state D%d on %s: %s\n",
- state, acpi_name(h), AcpiFormatException(status));
+ "failed to set ACPI power state %s on %s: %s\n",
+ acpi_d_state_to_str(state), acpi_name(h),
+ AcpiFormatException(status));
if (old_state > state && pci_do_power_resume)
error = pci_set_powerstate_method(dev, child, state);
@@ -390,6 +414,9 @@ acpi_pci_device_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context)
device_get_nameunit(child), error);
return;
}
+ if ((acpi_quirks & ACPI_Q_CLEAR_PME_ON_DETACH) &&
+ pci_has_pm(child))
+ pci_clear_pme(child);
status = acpi_SetInteger(h, "_EJ0", 1);
if (ACPI_FAILURE(status)) {
bus_topo_unlock();
@@ -397,6 +424,8 @@ acpi_pci_device_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context)
acpi_name(h), AcpiFormatException(status));
return;
}
+ if (acpi_quirks & ACPI_Q_DELAY_BEFORE_EJECT_RESCAN)
+ DELAY(10 * 1000);
BUS_RESCAN(dev);
bus_topo_unlock();
break;
diff --git a/sys/dev/acpica/acpi_pci_link.c b/sys/dev/acpica/acpi_pci_link.c
index dac07a07ae7d..d9807876c443 100644
--- a/sys/dev/acpica/acpi_pci_link.c
+++ b/sys/dev/acpica/acpi_pci_link.c
@@ -143,7 +143,7 @@ acpi_short_name(ACPI_HANDLE handle, char *buffer, size_t buflen)
static int
acpi_pci_link_probe(device_t dev)
{
- char descr[28], name[12];
+ char name[12];
int rv;
/*
@@ -157,10 +157,9 @@ acpi_pci_link_probe(device_t dev)
return (rv);
if (ACPI_SUCCESS(acpi_short_name(acpi_get_handle(dev), name,
- sizeof(name)))) {
- snprintf(descr, sizeof(descr), "ACPI PCI Link %s", name);
- device_set_desc_copy(dev, descr);
- } else
+ sizeof(name))))
+ device_set_descf(dev, "ACPI PCI Link %s", name);
+ else
device_set_desc(dev, "ACPI PCI Link");
device_quiet(dev);
return (rv);
diff --git a/sys/dev/acpica/acpi_pcib.c b/sys/dev/acpica/acpi_pcib.c
index b16457ec853d..dfb4f143d5c4 100644
--- a/sys/dev/acpica/acpi_pcib.c
+++ b/sys/dev/acpica/acpi_pcib.c
@@ -38,6 +38,7 @@
#include <dev/acpica/acpivar.h>
#include <dev/acpica/acpi_pcibvar.h>
+#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include "pcib_if.h"
@@ -277,3 +278,61 @@ acpi_pcib_get_cpus(device_t pcib, device_t dev, enum cpu_sets op,
return (bus_get_cpus(pcib, op, setsize, cpuset));
}
+
+int
+acpi_pcib_osc(device_t pcib, uint32_t *ap_osc_ctl, uint32_t osc_ctl)
+{
+ ACPI_STATUS status;
+ ACPI_HANDLE handle;
+ uint32_t cap_set[3];
+
+ static uint8_t pci_host_bridge_uuid[ACPI_UUID_LENGTH] = {
+ 0x5b, 0x4d, 0xdb, 0x33, 0xf7, 0x1f, 0x1c, 0x40,
+ 0x96, 0x57, 0x74, 0x41, 0xc0, 0x3d, 0xd7, 0x66
+ };
+
+ /*
+ * Don't invoke _OSC if a control is already granted.
+ * However, always invoke _OSC during attach when 0 is passed.
+ */
+ if (osc_ctl != 0 && (*ap_osc_ctl & osc_ctl) == osc_ctl)
+ return (0);
+
+ /* Support Field: Extended PCI Config Space, PCI Segment Groups, MSI */
+ cap_set[PCI_OSC_SUPPORT] = PCIM_OSC_SUPPORT_EXT_PCI_CONF |
+ PCIM_OSC_SUPPORT_SEG_GROUP | PCIM_OSC_SUPPORT_MSI;
+ /* Active State Power Management, Clock Power Management Capability */
+ if (pci_enable_aspm)
+ cap_set[PCI_OSC_SUPPORT] |= PCIM_OSC_SUPPORT_ASPM |
+ PCIM_OSC_SUPPORT_CPMC;
+
+ /* Control Field */
+ cap_set[PCI_OSC_CTL] = *ap_osc_ctl | osc_ctl;
+
+ handle = acpi_get_handle(pcib);
+ status = acpi_EvaluateOSC(handle, pci_host_bridge_uuid, 1,
+ nitems(cap_set), cap_set, cap_set, false);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_NOT_FOUND) {
+ *ap_osc_ctl |= osc_ctl;
+ return (0);
+ }
+ device_printf(pcib, "_OSC failed: %s\n",
+ AcpiFormatException(status));
+ return (EIO);
+ }
+
+ /*
+ * _OSC may return an error in the status word, but will
+ * update the control mask always. _OSC should not revoke
+ * previously-granted controls.
+ */
+ if ((cap_set[PCI_OSC_CTL] & *ap_osc_ctl) != *ap_osc_ctl)
+ device_printf(pcib, "_OSC revoked %#x\n",
+ (cap_set[PCI_OSC_CTL] & *ap_osc_ctl) ^ *ap_osc_ctl);
+ *ap_osc_ctl = cap_set[PCI_OSC_CTL];
+ if ((*ap_osc_ctl & osc_ctl) != osc_ctl)
+ return (EIO);
+
+ return (0);
+}
diff --git a/sys/dev/acpica/acpi_pcib_acpi.c b/sys/dev/acpica/acpi_pcib_acpi.c
index 4c3d62a66d58..3913ec612f79 100644
--- a/sys/dev/acpica/acpi_pcib_acpi.c
+++ b/sys/dev/acpica/acpi_pcib_acpi.c
@@ -67,9 +67,7 @@ struct acpi_hpcib_softc {
int ap_addr; /* device/func of PCI-Host bridge */
ACPI_BUFFER ap_prt; /* interrupt routing table */
-#ifdef NEW_PCIB
struct pcib_host_resources ap_host_res;
-#endif
};
static int acpi_pcib_acpi_probe(device_t bus);
@@ -95,19 +93,15 @@ static struct resource *acpi_pcib_acpi_alloc_resource(device_t dev,
device_t child, int type, int *rid,
rman_res_t start, rman_res_t end, rman_res_t count,
u_int flags);
-#ifdef NEW_PCIB
static int acpi_pcib_acpi_adjust_resource(device_t dev,
device_t child, struct resource *r,
rman_res_t start, rman_res_t end);
-#ifdef PCI_RES_BUS
static int acpi_pcib_acpi_release_resource(device_t dev,
device_t child, struct resource *r);
static int acpi_pcib_acpi_activate_resource(device_t dev,
device_t child, struct resource *r);
static int acpi_pcib_acpi_deactivate_resource(device_t dev,
device_t child, struct resource *r);
-#endif
-#endif
static int acpi_pcib_request_feature(device_t pcib, device_t dev,
enum pci_feature feature);
static bus_dma_tag_t acpi_pcib_get_dma_tag(device_t bus, device_t child);
@@ -124,20 +118,10 @@ static device_method_t acpi_pcib_acpi_methods[] = {
DEVMETHOD(bus_read_ivar, acpi_pcib_read_ivar),
DEVMETHOD(bus_write_ivar, acpi_pcib_write_ivar),
DEVMETHOD(bus_alloc_resource, acpi_pcib_acpi_alloc_resource),
-#ifdef NEW_PCIB
DEVMETHOD(bus_adjust_resource, acpi_pcib_acpi_adjust_resource),
-#else
- DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
-#endif
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
DEVMETHOD(bus_release_resource, acpi_pcib_acpi_release_resource),
DEVMETHOD(bus_activate_resource, acpi_pcib_acpi_activate_resource),
DEVMETHOD(bus_deactivate_resource, acpi_pcib_acpi_deactivate_resource),
-#else
- DEVMETHOD(bus_release_resource, bus_generic_release_resource),
- DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
- DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
-#endif
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
DEVMETHOD(bus_get_cpus, acpi_pcib_get_cpus),
@@ -183,7 +167,6 @@ acpi_pcib_acpi_probe(device_t dev)
return (0);
}
-#ifdef NEW_PCIB
static ACPI_STATUS
acpi_pcib_producer_handler(ACPI_RESOURCE *res, void *context)
{
@@ -252,11 +235,9 @@ acpi_pcib_producer_handler(ACPI_RESOURCE *res, void *context)
case ACPI_IO_RANGE:
type = SYS_RES_IOPORT;
break;
-#ifdef PCI_RES_BUS
case ACPI_BUS_NUMBER_RANGE:
type = PCI_RES_BUS;
break;
-#endif
default:
return (AE_OK);
}
@@ -290,9 +271,7 @@ acpi_pcib_producer_handler(ACPI_RESOURCE *res, void *context)
}
return (AE_OK);
}
-#endif
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
static bool
get_decoded_bus_range(struct acpi_hpcib_softc *sc, rman_res_t *startp,
rman_res_t *endp)
@@ -306,63 +285,6 @@ get_decoded_bus_range(struct acpi_hpcib_softc *sc, rman_res_t *startp,
*endp = rle->end;
return (true);
}
-#endif
-
-static int
-acpi_pcib_osc(struct acpi_hpcib_softc *sc, uint32_t osc_ctl)
-{
- ACPI_STATUS status;
- uint32_t cap_set[3];
-
- static uint8_t pci_host_bridge_uuid[ACPI_UUID_LENGTH] = {
- 0x5b, 0x4d, 0xdb, 0x33, 0xf7, 0x1f, 0x1c, 0x40,
- 0x96, 0x57, 0x74, 0x41, 0xc0, 0x3d, 0xd7, 0x66
- };
-
- /*
- * Don't invoke _OSC if a control is already granted.
- * However, always invoke _OSC during attach when 0 is passed.
- */
- if (osc_ctl != 0 && (sc->ap_osc_ctl & osc_ctl) == osc_ctl)
- return (0);
-
- /* Support Field: Extended PCI Config Space, PCI Segment Groups, MSI */
- cap_set[PCI_OSC_SUPPORT] = PCIM_OSC_SUPPORT_EXT_PCI_CONF |
- PCIM_OSC_SUPPORT_SEG_GROUP | PCIM_OSC_SUPPORT_MSI;
- /* Active State Power Management, Clock Power Management Capability */
- if (pci_enable_aspm)
- cap_set[PCI_OSC_SUPPORT] |= PCIM_OSC_SUPPORT_ASPM |
- PCIM_OSC_SUPPORT_CPMC;
-
- /* Control Field */
- cap_set[PCI_OSC_CTL] = sc->ap_osc_ctl | osc_ctl;
-
- status = acpi_EvaluateOSC(sc->ap_handle, pci_host_bridge_uuid, 1,
- nitems(cap_set), cap_set, cap_set, false);
- if (ACPI_FAILURE(status)) {
- if (status == AE_NOT_FOUND) {
- sc->ap_osc_ctl |= osc_ctl;
- return (0);
- }
- device_printf(sc->ap_dev, "_OSC failed: %s\n",
- AcpiFormatException(status));
- return (EIO);
- }
-
- /*
- * _OSC may return an error in the status word, but will
- * update the control mask always. _OSC should not revoke
- * previously-granted controls.
- */
- if ((cap_set[PCI_OSC_CTL] & sc->ap_osc_ctl) != sc->ap_osc_ctl)
- device_printf(sc->ap_dev, "_OSC revoked %#x\n",
- (cap_set[PCI_OSC_CTL] & sc->ap_osc_ctl) ^ sc->ap_osc_ctl);
- sc->ap_osc_ctl = cap_set[PCI_OSC_CTL];
- if ((sc->ap_osc_ctl & osc_ctl) != osc_ctl)
- return (EIO);
-
- return (0);
-}
static int
acpi_pcib_acpi_attach(device_t dev)
@@ -371,11 +293,9 @@ acpi_pcib_acpi_attach(device_t dev)
ACPI_STATUS status;
static int bus0_seen = 0;
u_int slot, func, busok;
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct resource *bus_res;
rman_res_t end, start;
int rid;
-#endif
int error, domain;
uint8_t busno;
@@ -391,7 +311,7 @@ acpi_pcib_acpi_attach(device_t dev)
if (!acpi_DeviceIsPresent(dev))
return (ENXIO);
- acpi_pcib_osc(sc, 0);
+ acpi_pcib_osc(dev, &sc->ap_osc_ctl, 0);
/*
* Get our segment number by evaluating _SEG.
@@ -421,7 +341,6 @@ acpi_pcib_acpi_attach(device_t dev)
sc->ap_addr = -1;
}
-#ifdef NEW_PCIB
/*
* Determine which address ranges this bridge decodes and setup
* resource managers for those ranges.
@@ -435,7 +354,6 @@ acpi_pcib_acpi_attach(device_t dev)
device_printf(sc->ap_dev, "failed to parse resources: %s\n",
AcpiFormatException(status));
}
-#endif
/*
* Get our base bus number by evaluating _BBN.
@@ -491,7 +409,6 @@ acpi_pcib_acpi_attach(device_t dev)
}
}
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
/*
* If nothing else worked, hope that ACPI at least lays out the
* Host-PCI bridges in order and that as a result the next free
@@ -536,18 +453,6 @@ acpi_pcib_acpi_attach(device_t dev)
}
}
}
-#else
- /*
- * If nothing else worked, hope that ACPI at least lays out the
- * host-PCI bridges in order and that as a result our unit number
- * is actually our bus number. There are several reasons this
- * might not be true.
- */
- if (busok == 0) {
- sc->ap_bus = device_get_unit(dev);
- device_printf(dev, "trying bus number %d\n", sc->ap_bus);
- }
-#endif
/* If this is bus 0 on segment 0, note that it has been seen already. */
if (sc->ap_segment == 0 && sc->ap_bus == 0)
@@ -567,20 +472,19 @@ acpi_pcib_acpi_attach(device_t dev)
/* Don't fail to attach if the domain can't be queried or set. */
error = 0;
- bus_generic_probe(dev);
- if (device_add_child(dev, "pci", -1) == NULL) {
+ bus_identify_children(dev);
+ if (device_add_child(dev, "pci", DEVICE_UNIT_ANY) == NULL) {
bus_dma_tag_destroy(sc->ap_dma_tag);
sc->ap_dma_tag = NULL;
error = ENXIO;
goto errout;
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
errout:
device_printf(device_get_parent(dev), "couldn't attach pci bus\n");
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
pcib_host_res_free(dev, &sc->ap_host_res);
-#endif
return (error);
}
@@ -704,22 +608,17 @@ struct resource *
acpi_pcib_acpi_alloc_resource(device_t dev, device_t child, int type, int *rid,
rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
-#ifdef NEW_PCIB
struct acpi_hpcib_softc *sc;
struct resource *res;
-#endif
#if defined(__i386__) || defined(__amd64__)
start = hostb_alloc_start(type, start, end, count);
#endif
-#ifdef NEW_PCIB
sc = device_get_softc(dev);
-#ifdef PCI_RES_BUS
if (type == PCI_RES_BUS)
return (pci_domain_alloc_bus(sc->ap_segment, child, rid, start, end,
count, flags));
-#endif
res = pcib_host_res_alloc(&sc->ap_host_res, child, type, rid, start, end,
count, flags);
@@ -734,13 +633,8 @@ acpi_pcib_acpi_alloc_resource(device_t dev, device_t child, int type, int *rid,
res = bus_generic_alloc_resource(dev, child, type, rid, start, end,
count, flags);
return (res);
-#else
- return (bus_generic_alloc_resource(dev, child, type, rid, start, end,
- count, flags));
-#endif
}
-#ifdef NEW_PCIB
int
acpi_pcib_acpi_adjust_resource(device_t dev, device_t child,
struct resource *r, rman_res_t start, rman_res_t end)
@@ -748,15 +642,12 @@ acpi_pcib_acpi_adjust_resource(device_t dev, device_t child,
struct acpi_hpcib_softc *sc;
sc = device_get_softc(dev);
-#ifdef PCI_RES_BUS
if (rman_get_type(r) == PCI_RES_BUS)
return (pci_domain_adjust_bus(sc->ap_segment, child, r, start,
end));
-#endif
return (pcib_host_res_adjust(&sc->ap_host_res, child, r, start, end));
}
-#ifdef PCI_RES_BUS
int
acpi_pcib_acpi_release_resource(device_t dev, device_t child,
struct resource *r)
@@ -792,8 +683,6 @@ acpi_pcib_acpi_deactivate_resource(device_t dev, device_t child,
return (pci_domain_deactivate_bus(sc->ap_segment, child, r));
return (bus_generic_deactivate_resource(dev, child, r));
}
-#endif
-#endif
static int
acpi_pcib_request_feature(device_t pcib, device_t dev, enum pci_feature feature)
@@ -814,7 +703,7 @@ acpi_pcib_request_feature(device_t pcib, device_t dev, enum pci_feature feature)
return (EINVAL);
}
- return (acpi_pcib_osc(sc, osc_ctl));
+ return (acpi_pcib_osc(pcib, &sc->ap_osc_ctl, osc_ctl));
}
static bus_dma_tag_t
diff --git a/sys/dev/acpica/acpi_pcibvar.h b/sys/dev/acpica/acpi_pcibvar.h
index 2b75c276f9d1..c763eeea1941 100644
--- a/sys/dev/acpica/acpi_pcibvar.h
+++ b/sys/dev/acpica/acpi_pcibvar.h
@@ -40,6 +40,7 @@ int acpi_pcib_route_interrupt(device_t pcib, device_t dev, int pin,
ACPI_BUFFER *prtbuf);
int acpi_pcib_power_for_sleep(device_t pcib, device_t dev,
int *pstate);
+int acpi_pcib_osc(device_t pcib, uint32_t *ap_osc_ctl, uint32_t osc_ctl);
#endif /* _KERNEL */
diff --git a/sys/dev/acpica/acpi_perf.c b/sys/dev/acpica/acpi_perf.c
index 0013d2a94552..ee7a4355f32a 100644
--- a/sys/dev/acpica/acpi_perf.c
+++ b/sys/dev/acpica/acpi_perf.c
@@ -142,7 +142,7 @@ acpi_perf_identify(driver_t *driver, device_t parent)
device_t dev;
/* Make sure we're not being doubly invoked. */
- if (device_find_child(parent, "acpi_perf", -1) != NULL)
+ if (device_find_child(parent, "acpi_perf", DEVICE_UNIT_ANY) != NULL)
return;
/* Get the handle for the Processor object and check for perf states. */
diff --git a/sys/dev/acpica/acpi_powerres.c b/sys/dev/acpica/acpi_powerres.c
index 0f2a25b1d02b..29d1690f1bdd 100644
--- a/sys/dev/acpica/acpi_powerres.c
+++ b/sys/dev/acpica/acpi_powerres.c
@@ -299,7 +299,7 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
ACPI_BUFFER reslist_buffer;
ACPI_OBJECT *reslist_object;
ACPI_STATUS status;
- char *method_name, *reslist_name;
+ char *method_name, *reslist_name = NULL;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -318,9 +318,26 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
panic("acpi added power consumer but can't find it");
}
- /* Check for valid transitions. We can only go to D0 from D3. */
+ /* Stop here if we're already at the target D-state. */
+ if (pc->ac_state == state) {
+ status = AE_OK;
+ goto out;
+ }
+
+ /*
+ * Check for valid transitions. From D3hot or D3cold, we can only go to D0.
+ * The exception to this is going from D3hot to D3cold or the other way
+ * around. This is because they both use _PS3, so the only difference when
+ * doing these transitions is whether or not the power resources for _PR3
+ * are on for devices which support D3cold, and turning these power
+ * resources on/off is always perfectly fine (ACPI 7.3.11).
+ */
status = AE_BAD_PARAMETER;
- if (pc->ac_state == ACPI_STATE_D3 && state != ACPI_STATE_D0)
+ if (pc->ac_state == ACPI_STATE_D3_HOT && state != ACPI_STATE_D0 &&
+ state != ACPI_STATE_D3_COLD)
+ goto out;
+ if (pc->ac_state == ACPI_STATE_D3_COLD && state != ACPI_STATE_D0 &&
+ state != ACPI_STATE_D3_HOT)
goto out;
/* Find transition mechanism(s) */
@@ -337,15 +354,20 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
method_name = "_PS2";
reslist_name = "_PR2";
break;
- case ACPI_STATE_D3:
+ case ACPI_STATE_D3_HOT:
method_name = "_PS3";
reslist_name = "_PR3";
break;
+ case ACPI_STATE_D3_COLD:
+ method_name = "_PS3";
+ reslist_name = NULL;
+ break;
default:
goto out;
}
- ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "setup to switch %s D%d -> D%d\n",
- acpi_name(consumer), pc->ac_state, state));
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "setup to switch %s %s -> %s\n",
+ acpi_name(consumer), acpi_d_state_to_str(pc->ac_state),
+ acpi_d_state_to_str(state)));
/*
* Verify that this state is supported, ie. one of method or
@@ -359,7 +381,8 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
*/
if (ACPI_FAILURE(AcpiGetHandle(consumer, method_name, &method_handle)))
method_handle = NULL;
- if (ACPI_FAILURE(AcpiGetHandle(consumer, reslist_name, &reslist_handle)))
+ if (reslist_name == NULL ||
+ ACPI_FAILURE(AcpiGetHandle(consumer, reslist_name, &reslist_handle)))
reslist_handle = NULL;
if (reslist_handle == NULL && method_handle == NULL) {
if (state == ACPI_STATE_D0) {
@@ -367,9 +390,12 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
status = AE_OK;
goto out;
}
- if (state != ACPI_STATE_D3) {
+ if (state == ACPI_STATE_D3_COLD)
+ state = ACPI_STATE_D3_HOT;
+ if (state != ACPI_STATE_D3_HOT) {
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
- "attempt to set unsupported state D%d\n", state));
+ "attempt to set unsupported state %s\n",
+ acpi_d_state_to_str(state)));
goto out;
}
@@ -380,21 +406,23 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
if (ACPI_FAILURE(AcpiGetHandle(consumer, "_PR0", &pr0_handle))) {
status = AE_NOT_FOUND;
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
- "device missing _PR0 (desired state was D%d)\n", state));
+ "device missing _PR0 (desired state was %s)\n",
+ acpi_d_state_to_str(state)));
goto out;
}
reslist_buffer.Length = ACPI_ALLOCATE_BUFFER;
status = AcpiEvaluateObject(pr0_handle, NULL, NULL, &reslist_buffer);
if (ACPI_FAILURE(status)) {
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
- "can't evaluate _PR0 for device %s, state D%d\n",
- acpi_name(consumer), state));
+ "can't evaluate _PR0 for device %s, state %s\n",
+ acpi_name(consumer), acpi_d_state_to_str(state)));
goto out;
}
reslist_object = (ACPI_OBJECT *)reslist_buffer.Pointer;
if (!ACPI_PKG_VALID(reslist_object, 1)) {
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
- "invalid package object for state D%d\n", state));
+ "invalid package object for state %s\n",
+ acpi_d_state_to_str(state)));
status = AE_TYPE;
goto out;
}
@@ -450,8 +478,8 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
*/
if (ACPI_FAILURE(status = acpi_pwr_switch_power())) {
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
- "failed to switch resources from %s to D%d\n",
- acpi_name(consumer), state));
+ "failed to switch resources from %s to %s\n",
+ acpi_name(consumer), acpi_d_state_to_str(state)));
/* XXX is this appropriate? Should we return to previous state? */
goto out;
diff --git a/sys/dev/acpica/acpi_resource.c b/sys/dev/acpica/acpi_resource.c
index 87b82a574beb..8ced9b32cb22 100644
--- a/sys/dev/acpica/acpi_resource.c
+++ b/sys/dev/acpica/acpi_resource.c
@@ -67,24 +67,48 @@ struct lookup_irq_request {
static char *pcilink_ids[] = { "PNP0C0F", NULL };
+/*
+ * Devices with invalid memory resources
+ */
+static char *bad_memresource_ids[] = {
+ /* PRCx on Radxa Orion O6 conflicts with the PCI resource range */
+ "CIXH2020",
+ NULL
+};
+
+
static ACPI_STATUS
acpi_lookup_irq_handler(ACPI_RESOURCE *res, void *context)
{
struct lookup_irq_request *req;
size_t len;
- u_int irqnum, irq, trig, pol;
+ u_int irqnum, trig, pol;
+ bool found;
+
+ found = false;
+ req = (struct lookup_irq_request *)context;
switch (res->Type) {
case ACPI_RESOURCE_TYPE_IRQ:
irqnum = res->Data.Irq.InterruptCount;
- irq = res->Data.Irq.Interrupts[0];
+ for (int i = 0; i < irqnum; i++) {
+ if (res->Data.Irq.Interrupts[i] == req->irq) {
+ found = true;
+ break;
+ }
+ }
len = ACPI_RS_SIZE(ACPI_RESOURCE_IRQ);
trig = res->Data.Irq.Triggering;
pol = res->Data.Irq.Polarity;
break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
irqnum = res->Data.ExtendedIrq.InterruptCount;
- irq = res->Data.ExtendedIrq.Interrupts[0];
+ for (int i = 0; i < irqnum; i++) {
+ if (res->Data.ExtendedIrq.Interrupts[i] == req->irq) {
+ found = true;
+ break;
+ }
+ }
len = ACPI_RS_SIZE(ACPI_RESOURCE_EXTENDED_IRQ);
trig = res->Data.ExtendedIrq.Triggering;
pol = res->Data.ExtendedIrq.Polarity;
@@ -92,18 +116,13 @@ acpi_lookup_irq_handler(ACPI_RESOURCE *res, void *context)
default:
return (AE_OK);
}
- if (irqnum != 1)
+ if (!found)
return (AE_OK);
- req = (struct lookup_irq_request *)context;
if (req->checkrid) {
if (req->counter != req->rid) {
req->counter++;
return (AE_OK);
}
- KASSERT(irq == req->irq, ("IRQ resources do not match"));
- } else {
- if (req->irq != irq)
- return (AE_OK);
}
req->found = 1;
req->pol = pol;
@@ -159,14 +178,11 @@ acpi_config_intr(device_t dev, ACPI_RESOURCE *res)
}
#if defined(__amd64__) || defined(__i386__)
- /*
- * XXX: Certain BIOSes have buggy AML that specify an IRQ that is
- * edge-sensitive and active-lo. However, edge-sensitive IRQs
- * should be active-hi. Force IRQs with an ISA IRQ value to be
- * active-hi instead.
- */
- if (irq < 16 && trig == ACPI_EDGE_SENSITIVE && pol == ACPI_ACTIVE_LOW)
+ if (irq < 16 && trig == ACPI_EDGE_SENSITIVE && pol == ACPI_ACTIVE_LOW &&
+ acpi_override_isa_irq_polarity) {
+ device_printf(dev, "forcing active-hi polarity for IRQ %u\n", irq);
pol = ACPI_ACTIVE_HIGH;
+ }
#endif
BUS_CONFIG_INTR(dev, irq, (trig == ACPI_EDGE_SENSITIVE) ?
INTR_TRIGGER_EDGE : INTR_TRIGGER_LEVEL, (pol == ACPI_ACTIVE_HIGH) ?
@@ -614,6 +630,11 @@ acpi_res_ignore(device_t dev, int type, rman_res_t start, rman_res_t count)
* access.
*/
if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) {
+ if (type == SYS_RES_MEMORY &&
+ ACPI_ID_PROBE(device_get_parent(dev), dev, bad_memresource_ids,
+ NULL) <= 0)
+ return (true);
+
if (ACPI_SUCCESS(AcpiGetObjectInfo(ad->ad_handle, &devinfo))) {
if ((devinfo->Flags & ACPI_PCI_ROOT_BRIDGE) != 0) {
#if defined(__i386__) || defined(__amd64__)
diff --git a/sys/dev/acpica/acpi_throttle.c b/sys/dev/acpica/acpi_throttle.c
index 5cdca63c27c6..8b2919c71073 100644
--- a/sys/dev/acpica/acpi_throttle.c
+++ b/sys/dev/acpica/acpi_throttle.c
@@ -131,7 +131,7 @@ acpi_throttle_identify(driver_t *driver, device_t parent)
ACPI_OBJECT *obj;
/* Make sure we're not being doubly invoked. */
- if (device_find_child(parent, "acpi_throttle", -1))
+ if (device_find_child(parent, "acpi_throttle", DEVICE_UNIT_ANY))
return;
/* Check for a valid duty width and parent CPU type. */
@@ -173,8 +173,8 @@ acpi_throttle_probe(device_t dev)
* Since p4tcc uses the same mechanism (but internal to the CPU),
* we disable acpi_throttle when p4tcc is also present.
*/
- if (device_find_child(device_get_parent(dev), "p4tcc", -1) &&
- !resource_disabled("p4tcc", 0))
+ if (device_find_child(device_get_parent(dev), "p4tcc", DEVICE_UNIT_ANY)
+ && !resource_disabled("p4tcc", 0))
return (ENXIO);
device_set_desc(dev, "ACPI CPU Throttling");
diff --git a/sys/dev/acpica/acpi_timer.c b/sys/dev/acpica/acpi_timer.c
index 8b362f52047a..b20912e2f5fb 100644
--- a/sys/dev/acpica/acpi_timer.c
+++ b/sys/dev/acpica/acpi_timer.c
@@ -34,6 +34,7 @@
#include <sys/module.h>
#include <sys/sysctl.h>
#include <sys/timetc.h>
+#include <sys/power.h>
#include <machine/bus.h>
#include <machine/resource.h>
@@ -69,16 +70,13 @@ bool acpi_timer_disabled = false;
static void acpi_timer_identify(driver_t *driver, device_t parent);
static int acpi_timer_probe(device_t dev);
static int acpi_timer_attach(device_t dev);
-static void acpi_timer_resume_handler(struct timecounter *);
-static void acpi_timer_suspend_handler(struct timecounter *);
+static void acpi_timer_resume_handler(struct timecounter *,
+ enum power_stype);
+static void acpi_timer_suspend_handler(struct timecounter *,
+ enum power_stype);
static u_int acpi_timer_get_timecount(struct timecounter *tc);
static u_int acpi_timer_get_timecount_safe(struct timecounter *tc);
static int acpi_timer_sysctl_freq(SYSCTL_HANDLER_ARGS);
-static void acpi_timer_boot_test(void);
-
-static int acpi_timer_test(void);
-static int acpi_timer_test_enabled = 0;
-TUNABLE_INT("hw.acpi.timer_test_enabled", &acpi_timer_test_enabled);
static device_method_t acpi_timer_methods[] = {
DEVMETHOD(device_identify, acpi_timer_identify),
@@ -159,8 +157,7 @@ acpi_timer_identify(driver_t *driver, device_t parent)
static int
acpi_timer_probe(device_t dev)
{
- char desc[40];
- int i, j, rid, rtype;
+ int rid, rtype;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -193,36 +190,15 @@ acpi_timer_probe(device_t dev)
acpi_timer_timecounter.tc_counter_mask = 0x00ffffff;
acpi_timer_timecounter.tc_frequency = acpi_timer_frequency;
acpi_timer_timecounter.tc_flags = TC_FLAGS_SUSPEND_SAFE;
- if (testenv("debug.acpi.timer_test"))
- acpi_timer_boot_test();
-
- /*
- * If all tests of the counter succeed, use the ACPI-fast method. If
- * at least one failed, default to using the safe routine, which reads
- * the timer multiple times to get a consistent value before returning.
- */
- j = 0;
- if (bootverbose)
- printf("ACPI timer:");
- for (i = 0; i < 10; i++)
- j += acpi_timer_test();
- if (bootverbose)
- printf(" -> %d\n", j);
- if (j == 10) {
- acpi_timer_timecounter.tc_name = "ACPI-fast";
- acpi_timer_timecounter.tc_get_timecount = acpi_timer_get_timecount;
- acpi_timer_timecounter.tc_quality = 900;
- } else {
- acpi_timer_timecounter.tc_name = "ACPI-safe";
- acpi_timer_timecounter.tc_get_timecount = acpi_timer_get_timecount_safe;
- acpi_timer_timecounter.tc_quality = 850;
- }
+
+ acpi_timer_timecounter.tc_name = "ACPI-fast";
+ acpi_timer_timecounter.tc_get_timecount = acpi_timer_get_timecount;
+ acpi_timer_timecounter.tc_quality = 900;
tc_init(&acpi_timer_timecounter);
- sprintf(desc, "%d-bit timer at %u.%06uMHz",
+ device_set_descf(dev, "%d-bit timer at %u.%06uMHz",
(AcpiGbl_FADT.Flags & ACPI_FADT_32BIT_TIMER) != 0 ? 32 : 24,
acpi_timer_frequency / 1000000, acpi_timer_frequency % 1000000);
- device_set_desc_copy(dev, desc);
/* Release the resource, we'll allocate it again during attach. */
bus_release_resource(dev, rtype, rid, acpi_timer_reg);
@@ -262,7 +238,7 @@ acpi_timer_attach(device_t dev)
}
static void
-acpi_timer_resume_handler(struct timecounter *newtc)
+acpi_timer_resume_handler(struct timecounter *newtc, enum power_stype stype)
{
struct timecounter *tc;
@@ -278,7 +254,7 @@ acpi_timer_resume_handler(struct timecounter *newtc)
}
static void
-acpi_timer_suspend_handler(struct timecounter *newtc)
+acpi_timer_suspend_handler(struct timecounter *newtc, enum power_stype stype)
{
struct timecounter *tc;
@@ -370,108 +346,3 @@ SYSCTL_PROC(_machdep, OID_AUTO, acpi_timer_freq,
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 0,
acpi_timer_sysctl_freq, "I",
"ACPI timer frequency");
-
-/*
- * Some ACPI timers are known or believed to suffer from implementation
- * problems which can lead to erroneous values being read. This function
- * tests for consistent results from the timer and returns 1 if it believes
- * the timer is consistent, otherwise it returns 0.
- *
- * It appears the cause is that the counter is not latched to the PCI bus
- * clock when read:
- *
- * ] 20. ACPI Timer Errata
- * ]
- * ] Problem: The power management timer may return improper result when
- * ] read. Although the timer value settles properly after incrementing,
- * ] while incrementing there is a 3nS window every 69.8nS where the
- * ] timer value is indeterminate (a 4.2% chance that the data will be
- * ] incorrect when read). As a result, the ACPI free running count up
- * ] timer specification is violated due to erroneous reads. Implication:
- * ] System hangs due to the "inaccuracy" of the timer when used by
- * ] software for time critical events and delays.
- * ]
- * ] Workaround: Read the register twice and compare.
- * ] Status: This will not be fixed in the PIIX4 or PIIX4E, it is fixed
- * ] in the PIIX4M.
- */
-#define N 2000
-static int
-acpi_timer_test(void)
-{
- uint32_t last, this;
- int delta, max, max2, min, n;
- register_t s;
-
- /* Skip the test based on the hw.acpi.timer_test_enabled tunable. */
- if (!acpi_timer_test_enabled)
- return (1);
-
- TSENTER();
-
- min = INT32_MAX;
- max = max2 = 0;
-
- /* Test the timer with interrupts disabled to get accurate results. */
- s = intr_disable();
- last = acpi_timer_read();
- for (n = 0; n < N; n++) {
- this = acpi_timer_read();
- delta = acpi_TimerDelta(this, last);
- if (delta > max) {
- max2 = max;
- max = delta;
- } else if (delta > max2)
- max2 = delta;
- if (delta < min)
- min = delta;
- last = this;
- }
- intr_restore(s);
-
- delta = max2 - min;
- if ((max - min > 8 || delta > 3) && vm_guest == VM_GUEST_NO)
- n = 0;
- else if (min < 0 || max == 0 || max2 == 0)
- n = 0;
- else
- n = 1;
- if (bootverbose)
- printf(" %d/%d", n, delta);
-
- TSEXIT();
-
- return (n);
-}
-#undef N
-
-/*
- * Test harness for verifying ACPI timer behaviour.
- * Boot with debug.acpi.timer_test set to invoke this.
- */
-static void
-acpi_timer_boot_test(void)
-{
- uint32_t u1, u2, u3;
-
- u1 = acpi_timer_read();
- u2 = acpi_timer_read();
- u3 = acpi_timer_read();
-
- device_printf(acpi_timer_dev, "timer test in progress, reboot to quit.\n");
- for (;;) {
- /*
- * The failure case is where u3 > u1, but u2 does not fall between
- * the two, ie. it contains garbage.
- */
- if (u3 > u1) {
- if (u2 < u1 || u2 > u3)
- device_printf(acpi_timer_dev,
- "timer is not monotonic: 0x%08x,0x%08x,0x%08x\n",
- u1, u2, u3);
- }
- u1 = u2;
- u2 = u3;
- u3 = acpi_timer_read();
- }
-}
diff --git a/sys/dev/acpica/acpi_video.c b/sys/dev/acpica/acpi_video.c
index da7c2a4d19cb..7a22c9dc0994 100644
--- a/sys/dev/acpica/acpi_video.c
+++ b/sys/dev/acpica/acpi_video.c
@@ -274,8 +274,8 @@ static void
acpi_video_identify(driver_t *driver, device_t parent)
{
- if (device_find_child(parent, "acpi_video", -1) == NULL)
- device_add_child(parent, "acpi_video", -1);
+ if (device_find_child(parent, "acpi_video", DEVICE_UNIT_ANY) == NULL)
+ device_add_child(parent, "acpi_video", DEVICE_UNIT_ANY);
}
static int
diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h
index 2322ab96014b..71d8e46ab310 100644
--- a/sys/dev/acpica/acpivar.h
+++ b/sys/dev/acpica/acpivar.h
@@ -40,6 +40,7 @@
#include <sys/ktr.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/power.h>
#include <sys/selinfo.h>
#include <sys/sx.h>
#include <sys/sysctl.h>
@@ -53,20 +54,19 @@ struct acpi_softc {
struct cdev *acpi_dev_t;
int acpi_enabled;
- int acpi_sstate;
+ enum power_stype acpi_stype;
int acpi_sleep_disabled;
struct sysctl_ctx_list acpi_sysctl_ctx;
struct sysctl_oid *acpi_sysctl_tree;
- int acpi_power_button_sx;
- int acpi_sleep_button_sx;
- int acpi_lid_switch_sx;
+ enum power_stype acpi_power_button_stype;
+ enum power_stype acpi_sleep_button_stype;
+ enum power_stype acpi_lid_switch_stype;
int acpi_standby_sx;
- int acpi_suspend_sx;
+ int acpi_s4bios;
int acpi_sleep_delay;
- int acpi_s4bios;
int acpi_do_disable;
int acpi_verbose;
int acpi_handle_reboot;
@@ -74,7 +74,7 @@ struct acpi_softc {
vm_offset_t acpi_wakeaddr;
vm_paddr_t acpi_wakephys;
- int acpi_next_sstate; /* Next suspend Sx state. */
+ enum power_stype acpi_next_stype; /* Next suspend sleep type. */
struct apm_clone_data *acpi_clone; /* Pseudo-dev for devd(8). */
STAILQ_HEAD(,apm_clone_data) apm_cdevs; /* All apm/apmctl/acpi cdevs. */
struct callout susp_force_to; /* Force suspend if no acks. */
@@ -89,6 +89,7 @@ struct acpi_device {
void *ad_private;
int ad_flags;
int ad_cls_class;
+ int ad_domain;
ACPI_BUFFER dsd; /* Device Specific Data */
const ACPI_OBJECT *dsd_pkg;
@@ -227,12 +228,33 @@ extern struct mtx acpi_mutex;
* ACPI_Q_MADT_IRQ0: Specifies that ISA IRQ 0 is wired up to pin 0 of the
* first APIC and that the MADT should force that by ignoring the PC-AT
* compatible flag and ignoring overrides that redirect IRQ 0 to pin 2.
+ * ACPI_Q_AEI_NOPULL: Specifies that _AEI objects incorrectly designate pins
+ * as "PullUp" and they should be treated as "NoPull" instead.
+ * ACPI_Q_CLEAR_PME_ON_DETACH: Specifies that PCIM_PSTAT_(PME & ~PMEENABLE)
+ * should be written to the power status register as part of ACPI Eject.
+ * ACPI_Q_DELAY_BEFORE_EJECT_RESCAN: Specifies that we need a short (10ms)
+ * delay after _EJ0 returns before rescanning the PCI bus.
*/
extern int acpi_quirks;
#define ACPI_Q_OK 0
#define ACPI_Q_BROKEN (1 << 0)
#define ACPI_Q_TIMER (1 << 1)
#define ACPI_Q_MADT_IRQ0 (1 << 2)
+#define ACPI_Q_AEI_NOPULL (1 << 3)
+#define ACPI_Q_CLEAR_PME_ON_DETACH (1 << 4)
+#define ACPI_Q_DELAY_BEFORE_EJECT_RESCAN (1 << 5)
+
+#if defined(__amd64__) || defined(__i386__)
+/*
+ * Certain Intel BIOSes have buggy AML that specify an IRQ that is
+ * edge-sensitive and active-lo. Normally, edge-sensitive IRQs should
+ * be active-hi. If this value is non-zero, edge-sensitive ISA IRQs
+ * are forced to be active-hi instead. At least some AMD systems use
+ * active-lo edge-sensitive ISA IRQs, so this setting is only enabled
+ * by default on systems with Intel CPUs.
+ */
+extern int acpi_override_isa_irq_polarity;
+#endif
/*
* Plug and play information for device matching. Matching table format
@@ -257,6 +279,12 @@ extern int acpi_quirks;
#define ACPI_IVAR_UNUSED 0x101 /* Unused/reserved. */
#define ACPI_IVAR_PRIVATE 0x102
#define ACPI_IVAR_FLAGS 0x103
+#define ACPI_IVAR_DOMAIN 0x104
+
+/*
+ * ad_domain NUMA domain special value.
+ */
+#define ACPI_DEV_DOMAIN_UNKNOWN (-1)
/*
* Accessor functions for our ivars. Default value for BUS_READ_IVAR is
@@ -282,6 +310,7 @@ static __inline void varp ## _set_ ## var(device_t dev, type t) \
__ACPI_BUS_ACCESSOR(acpi, handle, ACPI, HANDLE, ACPI_HANDLE)
__ACPI_BUS_ACCESSOR(acpi, private, ACPI, PRIVATE, void *)
__ACPI_BUS_ACCESSOR(acpi, flags, ACPI, FLAGS, int)
+__ACPI_BUS_ACCESSOR(acpi, domain, ACPI, DOMAIN, int)
void acpi_fake_objhandler(ACPI_HANDLE h, void *data);
static __inline device_t
@@ -382,7 +411,7 @@ ACPI_STATUS acpi_EvaluateOSC(ACPI_HANDLE handle, uint8_t *uuid,
uint32_t *caps_out, bool query);
ACPI_STATUS acpi_OverrideInterruptLevel(UINT32 InterruptNumber);
ACPI_STATUS acpi_SetIntrModel(int model);
-int acpi_ReqSleepState(struct acpi_softc *sc, int state);
+int acpi_ReqSleepState(struct acpi_softc *sc, enum power_stype stype);
int acpi_AckSleepState(struct apm_clone_data *clone, int error);
ACPI_STATUS acpi_SetSleepState(struct acpi_softc *sc, int state);
int acpi_wake_set_enable(device_t dev, int enable);
@@ -488,6 +517,16 @@ acpi_get_verbose(struct acpi_softc *sc)
return (0);
}
+static __inline const char *
+acpi_d_state_to_str(int state)
+{
+ const char *strs[ACPI_D_STATE_COUNT] = {"D0", "D1", "D2", "D3hot",
+ "D3cold"};
+
+ MPASS(state >= ACPI_STATE_D0 && state <= ACPI_D_STATES_MAX);
+ return (strs[state]);
+}
+
char *acpi_name(ACPI_HANDLE handle);
int acpi_avoid(ACPI_HANDLE handle);
int acpi_disabled(char *subsys);
@@ -570,6 +609,7 @@ void acpi_pxm_parse_tables(void);
void acpi_pxm_set_mem_locality(void);
void acpi_pxm_set_cpu_locality(void);
int acpi_pxm_get_cpu_locality(int apic_id);
+int acpi_pxm_parse(device_t dev);
/*
* Map a PXM to a VM domain.
@@ -578,19 +618,19 @@ int acpi_pxm_get_cpu_locality(int apic_id);
*/
int acpi_map_pxm_to_vm_domainid(int pxm);
bus_get_cpus_t acpi_get_cpus;
-bus_get_domain_t acpi_get_domain;
#ifdef __aarch64__
/*
* ARM specific ACPI interfaces, relating to IORT table.
*/
int acpi_iort_map_pci_msi(u_int seg, u_int rid, u_int *xref, u_int *devid);
-int acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, u_int *xref, u_int *devid);
+int acpi_iort_map_pci_smmuv3(u_int seg, u_int rid, uint64_t *xref,
+ u_int *devid);
int acpi_iort_its_lookup(u_int its_id, u_int *xref, int *pxm);
int acpi_iort_map_named_msi(const char *devname, u_int rid, u_int *xref,
u_int *devid);
-int acpi_iort_map_named_smmuv3(const char *devname, u_int rid, u_int *xref,
- u_int *devid);
+int acpi_iort_map_named_smmuv3(const char *devname, u_int rid,
+ uint64_t *xref, u_int *devid);
#endif
#endif /* _KERNEL */
#endif /* !_ACPIVAR_H_ */
diff --git a/sys/dev/adb/adb_bus.c b/sys/dev/adb/adb_bus.c
index 395ed555fec5..7f4bd8676fcf 100644
--- a/sys/dev/adb/adb_bus.c
+++ b/sys/dev/adb/adb_bus.c
@@ -42,7 +42,6 @@
static int adb_bus_probe(device_t dev);
static int adb_bus_attach(device_t dev);
-static int adb_bus_detach(device_t dev);
static void adb_bus_enumerate(void *xdev);
static void adb_probe_nomatch(device_t dev, device_t child);
static int adb_print_child(device_t dev, device_t child);
@@ -57,7 +56,7 @@ static device_method_t adb_bus_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, adb_bus_probe),
DEVMETHOD(device_attach, adb_bus_attach),
- DEVMETHOD(device_detach, adb_bus_detach),
+ DEVMETHOD(device_detach, bus_generic_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
DEVMETHOD(device_suspend, bus_generic_suspend),
DEVMETHOD(device_resume, bus_generic_resume),
@@ -178,21 +177,16 @@ adb_bus_enumerate(void *xdev)
for (i = 0; i < 16; i++) {
if (sc->devinfo[i].default_address) {
- sc->children[i] = device_add_child(dev, NULL, -1);
+ sc->children[i] = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
device_set_ivars(sc->children[i], &sc->devinfo[i]);
}
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
config_intrhook_disestablish(&sc->enum_hook);
}
-static int adb_bus_detach(device_t dev)
-{
- return (bus_generic_detach(dev));
-}
-
static void
adb_probe_nomatch(device_t dev, device_t child)
{
diff --git a/sys/dev/ae/if_ae.c b/sys/dev/ae/if_ae.c
index 62c1a8a30cc8..87de885701ae 100644
--- a/sys/dev/ae/if_ae.c
+++ b/sys/dev/ae/if_ae.c
@@ -238,7 +238,7 @@ ae_attach(device_t dev)
if_t ifp;
uint8_t chiprev;
uint32_t pcirev;
- int nmsi, pmc;
+ int nmsi;
int error;
sc = device_get_softc(dev); /* Automatically allocated and zeroed
@@ -326,12 +326,6 @@ ae_attach(device_t dev)
goto fail;
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "could not allocate ifnet structure.\n");
- error = ENXIO;
- goto fail;
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -342,7 +336,7 @@ ae_attach(device_t dev)
if_sethwassist(ifp, 0);
if_setsendqlen(ifp, ifqmaxlen);
if_setsendqready(ifp);
- if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
+ if (pci_has_pm(dev)) {
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
sc->flags |= AE_FLAG_PMG;
}
@@ -368,12 +362,6 @@ ae_attach(device_t dev)
*/
sc->tq = taskqueue_create_fast("ae_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->tq);
- if (sc->tq == NULL) {
- device_printf(dev, "could not create taskqueue.\n");
- ether_ifdetach(ifp);
- error = ENXIO;
- goto fail;
- }
taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->dev));
@@ -773,10 +761,6 @@ ae_detach(device_t dev)
taskqueue_free(sc->tq);
sc->tq = NULL;
}
- if (sc->miibus != NULL) {
- device_delete_child(dev, sc->miibus);
- sc->miibus = NULL;
- }
bus_generic_detach(sc->dev);
ae_dma_free(sc);
if (sc->intrhand != NULL) {
@@ -1318,9 +1302,7 @@ ae_pm_init(ae_softc_t *sc)
{
if_t ifp;
uint32_t val;
- uint16_t pmstat;
struct mii_data *mii;
- int pmc;
AE_LOCK_ASSERT(sc);
@@ -1379,13 +1361,8 @@ ae_pm_init(ae_softc_t *sc)
/*
* Configure PME.
*/
- if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) {
- pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
- if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
- }
+ if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
+ pci_enable_pme(sc->dev);
}
static int
diff --git a/sys/dev/age/if_age.c b/sys/dev/age/if_age.c
index afed720b6e56..46d92ab11f53 100644
--- a/sys/dev/age/if_age.c
+++ b/sys/dev/age/if_age.c
@@ -460,7 +460,7 @@ age_attach(device_t dev)
struct age_softc *sc;
if_t ifp;
uint16_t burst;
- int error, i, msic, msixc, pmc;
+ int error, i, msic, msixc;
error = 0;
sc = device_get_softc(dev);
@@ -590,12 +590,6 @@ age_attach(device_t dev)
age_get_macaddr(sc);
ifp = sc->age_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "cannot allocate ifnet structure.\n");
- error = ENXIO;
- goto fail;
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -606,8 +600,7 @@ age_attach(device_t dev)
if_setsendqready(ifp);
if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_TSO4);
if_sethwassist(ifp, AGE_CSUM_FEATURES | CSUM_TSO);
- if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
- sc->age_flags |= AGE_FLAG_PMCAP;
+ if (pci_has_pm(dev)) {
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST, 0);
}
if_setcapenable(ifp, if_getcapabilities(ifp));
@@ -634,12 +627,6 @@ age_attach(device_t dev)
/* Create local taskq. */
sc->age_tq = taskqueue_create_fast("age_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->age_tq);
- if (sc->age_tq == NULL) {
- device_printf(dev, "could not create taskqueue.\n");
- ether_ifdetach(ifp);
- error = ENXIO;
- goto fail;
- }
taskqueue_start_threads(&sc->age_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->age_dev));
@@ -698,10 +685,6 @@ age_detach(device_t dev)
sc->age_tq = NULL;
}
- if (sc->age_miibus != NULL) {
- device_delete_child(dev, sc->age_miibus);
- sc->age_miibus = NULL;
- }
bus_generic_detach(dev);
age_dma_free(sc);
@@ -1319,12 +1302,11 @@ age_setwol(struct age_softc *sc)
if_t ifp;
struct mii_data *mii;
uint32_t reg, pmcs;
- uint16_t pmstat;
- int aneg, i, pmc;
+ int aneg, i;
AGE_LOCK_ASSERT(sc);
- if (pci_find_cap(sc->age_dev, PCIY_PMG, &pmc) != 0) {
+ if (!pci_has_pm(sc->age_dev)) {
CSR_WRITE_4(sc, AGE_WOL_CFG, 0);
/*
* No PME capability, PHY power down.
@@ -1430,11 +1412,8 @@ got_link:
}
/* Request PME. */
- pmstat = pci_read_config(sc->age_dev, pmc + PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->age_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
+ pci_enable_pme(sc->age_dev);
#ifdef notyet
/* See above for powering down PHY issues. */
if ((if_getcapenable(ifp) & IFCAP_WOL) == 0) {
diff --git a/sys/dev/age/if_agevar.h b/sys/dev/age/if_agevar.h
index 72073654d5d2..54e26fed8416 100644
--- a/sys/dev/age/if_agevar.h
+++ b/sys/dev/age/if_agevar.h
@@ -210,7 +210,6 @@ struct age_softc {
#define AGE_FLAG_PCIX 0x0002
#define AGE_FLAG_MSI 0x0004
#define AGE_FLAG_MSIX 0x0008
-#define AGE_FLAG_PMCAP 0x0010
#define AGE_FLAG_DETACH 0x4000
#define AGE_FLAG_LINK 0x8000
diff --git a/sys/dev/agp/agp.c b/sys/dev/agp/agp.c
index f7eb906fc57c..ddf735a4130e 100644
--- a/sys/dev/agp/agp.c
+++ b/sys/dev/agp/agp.c
@@ -56,6 +56,7 @@
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
+#include <vm/vm_radix.h>
#include <vm/pmap.h>
#include <machine/bus.h>
@@ -254,7 +255,7 @@ agp_generic_attach(device_t dev)
mdargs.mda_uid = UID_ROOT;
mdargs.mda_gid = GID_WHEEL;
mdargs.mda_mode = 0600;
- mdargs.mda_si_drv1 = sc;
+ mdargs.mda_si_drv1 = dev;
mdargs.mda_si_drv2 = NULL;
unit = device_get_unit(dev);
@@ -539,6 +540,7 @@ int
agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
vm_offset_t offset)
{
+ struct pctrie_iter pages;
struct agp_softc *sc = device_get_softc(dev);
vm_offset_t i, j, k;
vm_page_t m;
@@ -571,7 +573,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
AGP_DPF("found page pa=%#jx\n", (uintmax_t)VM_PAGE_TO_PHYS(m));
}
VM_OBJECT_WUNLOCK(mem->am_obj);
-
+ vm_page_iter_init(&pages, mem->am_obj);
mtx_lock(&sc->as_lock);
if (mem->am_is_bound) {
@@ -588,7 +590,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
*/
VM_OBJECT_WLOCK(mem->am_obj);
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
- m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(i));
+ m = vm_radix_iter_lookup(&pages, OFF_TO_IDX(i));
/*
* Install entries in the GATT, making sure that if
@@ -631,7 +633,7 @@ bad:
mtx_unlock(&sc->as_lock);
VM_OBJECT_ASSERT_WLOCKED(mem->am_obj);
for (k = 0; k < mem->am_size; k += PAGE_SIZE) {
- m = vm_page_lookup(mem->am_obj, OFF_TO_IDX(k));
+ m = vm_radix_iter_lookup(&pages, OFF_TO_IDX(k));
if (k >= i)
vm_page_xunbusy(m);
vm_page_unwire(m, PQ_INACTIVE);
@@ -644,6 +646,7 @@ bad:
int
agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
{
+ struct pctrie_iter pages;
struct agp_softc *sc = device_get_softc(dev);
vm_page_t m;
int i;
@@ -665,9 +668,10 @@ agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
AGP_FLUSH_TLB(dev);
+ vm_page_iter_init(&pages, mem->am_obj);
VM_OBJECT_WLOCK(mem->am_obj);
for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
- m = vm_page_lookup(mem->am_obj, atop(i));
+ m = vm_radix_iter_lookup(&pages, atop(i));
vm_page_unwire(m, PQ_INACTIVE);
}
VM_OBJECT_WUNLOCK(mem->am_obj);
diff --git a/sys/dev/agp/agp_i810.c b/sys/dev/agp/agp_i810.c
index df977889a9c0..9d955745f673 100644
--- a/sys/dev/agp/agp_i810.c
+++ b/sys/dev/agp/agp_i810.c
@@ -680,9 +680,9 @@ static void
agp_i810_identify(driver_t *driver, device_t parent)
{
- if (device_find_child(parent, "agp", -1) == NULL &&
+ if (device_find_child(parent, "agp", DEVICE_UNIT_ANY) == NULL &&
agp_i810_match(parent))
- device_add_child(parent, "agp", -1);
+ device_add_child(parent, "agp", DEVICE_UNIT_ANY);
}
static int
@@ -1796,7 +1796,7 @@ agp_i810_free_memory(device_t dev, struct agp_memory *mem)
vm_page_unwire(m, PQ_INACTIVE);
VM_OBJECT_WUNLOCK(mem->am_obj);
} else {
- contigfree(sc->argb_cursor, mem->am_size, M_AGP);
+ free(sc->argb_cursor, M_AGP);
sc->argb_cursor = NULL;
}
}
diff --git a/sys/dev/ahci/ahci.c b/sys/dev/ahci/ahci.c
index b1f9c85141bb..4ef3259dd10f 100644
--- a/sys/dev/ahci/ahci.c
+++ b/sys/dev/ahci/ahci.c
@@ -37,8 +37,8 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sbuf.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
@@ -354,7 +354,7 @@ ahci_attach(device_t dev)
}
/* Attach all channels on this controller */
for (unit = 0; unit < ctlr->channels; unit++) {
- child = device_add_child(dev, "ahcich", -1);
+ child = device_add_child(dev, "ahcich", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(dev, "failed to add channel device\n");
continue;
@@ -365,7 +365,7 @@ ahci_attach(device_t dev)
}
/* Attach any remapped NVME device */
for (; unit < ctlr->channels + ctlr->remapped_devices; unit++) {
- child = device_add_child(dev, "nvme", -1);
+ child = device_add_child(dev, "nvme", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(dev, "failed to add remapped NVMe device");
continue;
@@ -377,13 +377,13 @@ ahci_attach(device_t dev)
resource_int_value(device_get_name(dev), device_get_unit(dev),
"em", &em);
if (em) {
- child = device_add_child(dev, "ahciem", -1);
+ child = device_add_child(dev, "ahciem", DEVICE_UNIT_ANY);
if (child == NULL)
device_printf(dev, "failed to add enclosure device\n");
else
device_set_ivars(child, (void *)(intptr_t)AHCI_EM_UNIT);
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -391,10 +391,12 @@ int
ahci_detach(device_t dev)
{
struct ahci_controller *ctlr = device_get_softc(dev);
- int i;
+ int error, i;
/* Detach & delete all children */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
/* Free interrupts. */
for (i = 0; i < ctlr->numirqs; i++) {
@@ -765,7 +767,7 @@ static int
ahci_ch_probe(device_t dev)
{
- device_set_desc_copy(dev, "AHCI channel");
+ device_set_desc(dev, "AHCI channel");
return (BUS_PROBE_DEFAULT);
}
@@ -2175,7 +2177,7 @@ completeall:
}
xpt_setup_ccb(&ccb->ccb_h, ch->hold[i]->ccb_h.path,
ch->hold[i]->ccb_h.pinfo.priority);
- if (ccb->ccb_h.func_code == XPT_ATA_IO) {
+ if (ch->hold[i]->ccb_h.func_code == XPT_ATA_IO) {
/* READ LOG */
ccb->ccb_h.recovery_type = RECOVERY_READ_LOG;
ccb->ccb_h.func_code = XPT_ATA_IO;
diff --git a/sys/dev/ahci/ahci_generic.c b/sys/dev/ahci/ahci_generic.c
index 90487b4622eb..2d4babb17645 100644
--- a/sys/dev/ahci/ahci_generic.c
+++ b/sys/dev/ahci/ahci_generic.c
@@ -76,7 +76,7 @@ ahci_fdt_probe(device_t dev)
if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
return (ENXIO);
- device_set_desc_copy(dev, "AHCI SATA controller");
+ device_set_desc(dev, "AHCI SATA controller");
node = ofw_bus_get_node(dev);
ctlr->dma_coherent = OF_hasprop(node, "dma-coherent");
return (BUS_PROBE_DEFAULT);
@@ -107,7 +107,7 @@ ahci_acpi_probe(device_t dev)
if (pci_get_class(dev) == PCIC_STORAGE &&
pci_get_subclass(dev) == PCIS_STORAGE_SATA &&
pci_get_progif(dev) == PCIP_STORAGE_SATA_AHCI_1_0) {
- device_set_desc_copy(dev, "AHCI SATA controller");
+ device_set_desc(dev, "AHCI SATA controller");
if (ACPI_FAILURE(acpi_GetInteger(h, "_CCA",
&ctlr->dma_coherent)))
ctlr->dma_coherent = 0;
diff --git a/sys/dev/ahci/ahci_pci.c b/sys/dev/ahci/ahci_pci.c
index 7cc3ce18c8ae..2b4cb37275a6 100644
--- a/sys/dev/ahci/ahci_pci.c
+++ b/sys/dev/ahci/ahci_pci.c
@@ -34,10 +34,10 @@
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <sys/tree.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
@@ -72,6 +72,9 @@ static const struct {
{0x43b61022, 0x00, "AMD X399", 0},
{0x43b51022, 0x00, "AMD 300 Series", 0}, /* X370 */
{0x43b71022, 0x00, "AMD 300 Series", 0}, /* B350 */
+ {0x43c81022, 0x00, "AMD 400 Series", 0}, /* B450 */
+ {0x43eb1022, 0x00, "AMD 500 Series", 0},
+ {0x43f61022, 0x00, "AMD 600 Series", 0}, /* X670 */
{0x78001022, 0x00, "AMD Hudson-2", 0},
{0x78011022, 0x00, "AMD Hudson-2", 0},
{0x78021022, 0x00, "AMD Hudson-2", 0},
@@ -192,6 +195,7 @@ static const struct {
{0x1f3f8086, 0x00, "Intel Avoton (RAID)", 0},
{0x23a38086, 0x00, "Intel Coleto Creek", 0},
{0x31e38086, 0x00, "Intel Gemini Lake", 0},
+ {0x4b638086, 0x00, "Intel Elkhart Lake", 0},
{0x5ae38086, 0x00, "Intel Apollo Lake", 0},
{0x7ae28086, 0x00, "Intel Alder Lake", 0},
{0x8c028086, 0x00, "Intel Lynx Point", 0},
@@ -399,7 +403,6 @@ ahci_pci_ctlr_reset(device_t dev)
static int
ahci_probe(device_t dev)
{
- char buf[64];
int i, valid = 0;
uint32_t devid = pci_get_devid(dev);
uint8_t revid = pci_get_revid(dev);
@@ -430,22 +433,20 @@ ahci_probe(device_t dev)
(ahci_ids[i].quirks & AHCI_Q_NOFORCE) &&
(pci_read_config(dev, 0xdf, 1) & 0x40) == 0)
return (ENXIO);
- snprintf(buf, sizeof(buf), "%s AHCI SATA controller",
+ device_set_descf(dev, "%s AHCI SATA controller",
ahci_ids[i].name);
- device_set_desc_copy(dev, buf);
return (BUS_PROBE_DEFAULT);
}
}
if (valid != 1)
return (ENXIO);
- device_set_desc_copy(dev, "AHCI SATA controller");
+ device_set_desc(dev, "AHCI SATA controller");
return (BUS_PROBE_DEFAULT);
}
static int
ahci_ata_probe(device_t dev)
{
- char buf[64];
int i;
uint32_t devid = pci_get_devid(dev);
uint8_t revid = pci_get_revid(dev);
@@ -456,39 +457,16 @@ ahci_ata_probe(device_t dev)
for (i = 0; ahci_ids[i].id != 0; i++) {
if (ahci_ids[i].id == devid &&
ahci_ids[i].rev <= revid) {
- snprintf(buf, sizeof(buf), "%s AHCI SATA controller",
+ device_set_descf(dev, "%s AHCI SATA controller",
ahci_ids[i].name);
- device_set_desc_copy(dev, buf);
return (BUS_PROBE_DEFAULT);
}
}
- device_set_desc_copy(dev, "AHCI SATA controller");
+ device_set_desc(dev, "AHCI SATA controller");
return (BUS_PROBE_DEFAULT);
}
static int
-ahci_pci_read_msix_bars(device_t dev, uint8_t *table_bar, uint8_t *pba_bar)
-{
- int cap_offset = 0, ret;
- uint32_t val;
-
- if ((table_bar == NULL) || (pba_bar == NULL))
- return (EINVAL);
-
- ret = pci_find_cap(dev, PCIY_MSIX, &cap_offset);
- if (ret != 0)
- return (EINVAL);
-
- val = pci_read_config(dev, cap_offset + PCIR_MSIX_TABLE, 4);
- *table_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
-
- val = pci_read_config(dev, cap_offset + PCIR_MSIX_PBA, 4);
- *pba_bar = PCIR_BAR(val & PCIM_MSIX_BIR_MASK);
-
- return (0);
-}
-
-static int
ahci_pci_attach(device_t dev)
{
struct ahci_controller *ctlr = device_get_softc(dev);
@@ -496,7 +474,6 @@ ahci_pci_attach(device_t dev)
uint32_t devid = pci_get_devid(dev);
uint8_t revid = pci_get_revid(dev);
int msi_count, msix_count;
- uint8_t table_bar = 0, pba_bar = 0;
uint32_t caps, pi;
msi_count = pci_msi_count(dev);
@@ -584,20 +561,11 @@ ahci_pci_attach(device_t dev)
if (ctlr->quirks & AHCI_Q_NOMSIX)
msix_count = 0;
- /* Read MSI-x BAR IDs if supported */
- if (msix_count > 0) {
- error = ahci_pci_read_msix_bars(dev, &table_bar, &pba_bar);
- if (error == 0) {
- ctlr->r_msix_tab_rid = table_bar;
- ctlr->r_msix_pba_rid = pba_bar;
- } else {
- /* Failed to read BARs, disable MSI-x */
- msix_count = 0;
- }
- }
-
/* Allocate resources for MSI-x table and PBA */
if (msix_count > 0) {
+ ctlr->r_msix_tab_rid = pci_msix_table_bar(dev);
+ ctlr->r_msix_pba_rid = pci_msix_pba_bar(dev);
+
/*
* Allocate new MSI-x table only if not
* allocated before.
@@ -608,8 +576,8 @@ ahci_pci_attach(device_t dev)
ctlr->r_msix_table = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&ctlr->r_msix_tab_rid, RF_ACTIVE);
if (ctlr->r_msix_table == NULL) {
- ahci_free_mem(dev);
- return (ENXIO);
+ msix_count = 0;
+ goto no_msix;
}
}
@@ -624,12 +592,12 @@ ahci_pci_attach(device_t dev)
ctlr->r_msix_pba = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&ctlr->r_msix_pba_rid, RF_ACTIVE);
if (ctlr->r_msix_pba == NULL) {
- ahci_free_mem(dev);
- return (ENXIO);
+ msix_count = 0;
}
}
}
+no_msix:
pci_enable_busmaster(dev);
/* Reset controller */
if ((error = ahci_pci_ctlr_reset(dev)) != 0) {
diff --git a/sys/dev/ahci/ahciem.c b/sys/dev/ahci/ahciem.c
index 8b941a73a4e0..c9e6c35f4233 100644
--- a/sys/dev/ahci/ahciem.c
+++ b/sys/dev/ahci/ahciem.c
@@ -36,7 +36,7 @@
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
@@ -63,7 +63,7 @@ static int
ahci_em_probe(device_t dev)
{
- device_set_desc_copy(dev, "AHCI enclosure management bridge");
+ device_set_desc(dev, "AHCI enclosure management bridge");
return (BUS_PROBE_DEFAULT);
}
diff --git a/sys/dev/aic7xxx/ahc_pci.c b/sys/dev/aic7xxx/ahc_pci.c
index 1f9cd33f9e00..8109a6714814 100644
--- a/sys/dev/aic7xxx/ahc_pci.c
+++ b/sys/dev/aic7xxx/ahc_pci.c
@@ -138,7 +138,7 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
struct resource *regs;
int regs_type;
int regs_id;
- int allow_memio;
+ int allow_memio = 1;
regs = NULL;
regs_type = 0;
@@ -150,14 +150,14 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
"allow_memio", &allow_memio) != 0) {
if (bootverbose)
device_printf(ahc->dev_softc, "Defaulting to MEMIO ");
-#ifdef AHC_ALLOW_MEMIO
- if (bootverbose)
- printf("on\n");
- allow_memio = 1;
-#else
+#if defined(AHC_ALLOW_MEMIO) && (AHC_ALLOW_MEMIO == 0)
if (bootverbose)
printf("off\n");
allow_memio = 0;
+#else
+ if (bootverbose)
+ printf("on\n");
+ allow_memio = 1;
#endif
}
diff --git a/sys/dev/aic7xxx/aic79xx.c b/sys/dev/aic7xxx/aic79xx.c
index 0c4b615c5b24..cee45fa5cc8a 100644
--- a/sys/dev/aic7xxx/aic79xx.c
+++ b/sys/dev/aic7xxx/aic79xx.c
@@ -7788,8 +7788,8 @@ ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel,
}
if (role != ROLE_TARGET) {
- for (;i < maxtarget; i++) {
- for (j = minlun;j < maxlun; j++) {
+ for (; i < maxtarget; i++) {
+ for (j = minlun; j < maxlun; j++) {
u_int scbid;
u_int tcl;
@@ -8593,8 +8593,7 @@ ahd_loadseq(struct ahd_softc *ahd)
if (sg_prefetch_align == 0)
sg_prefetch_align = 8;
/* Round down to the nearest power of 2. */
- while (powerof2(sg_prefetch_align) == 0)
- sg_prefetch_align--;
+ sg_prefetch_align = rounddown_pow_of_two(sg_prefetch_align);
cacheline_mask = sg_prefetch_align - 1;
diff --git a/sys/dev/aic7xxx/aic7xxx.c b/sys/dev/aic7xxx/aic7xxx.c
index c09876e9f589..18f68b806948 100644
--- a/sys/dev/aic7xxx/aic7xxx.c
+++ b/sys/dev/aic7xxx/aic7xxx.c
@@ -5903,8 +5903,8 @@ ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
}
if (role != ROLE_TARGET) {
- for (;i < maxtarget; i++) {
- for (j = minlun;j < maxlun; j++) {
+ for (; i < maxtarget; i++) {
+ for (j = minlun; j < maxlun; j++) {
u_int scbid;
u_int tcl;
diff --git a/sys/dev/al_eth/al_eth.c b/sys/dev/al_eth/al_eth.c
index b8dd95e7ca58..f4fec7c6aa94 100644
--- a/sys/dev/al_eth/al_eth.c
+++ b/sys/dev/al_eth/al_eth.c
@@ -1580,7 +1580,6 @@ al_eth_rx_recv_work(void *arg, int pending)
{
struct al_eth_ring *rx_ring = arg;
struct mbuf *mbuf;
- struct lro_entry *queued;
unsigned int qid = rx_ring->ring_id;
struct al_eth_pkt *hal_pkt = &rx_ring->hal_pkt;
uint16_t next_to_clean = rx_ring->next_to_clean;
@@ -1671,10 +1670,7 @@ al_eth_rx_recv_work(void *arg, int pending)
"%s: not filling rx queue %d\n", __func__, qid);
}
- while (((queued = LIST_FIRST(&rx_ring->lro.lro_active)) != NULL)) {
- LIST_REMOVE(queued, next);
- tcp_lro_flush(&rx_ring->lro, queued);
- }
+ tcp_lro_flush_all(&rx_ring->lro);
if (napi != 0) {
rx_ring->enqueue_is_running = 0;
@@ -2004,14 +2000,6 @@ al_eth_enable_msix(struct al_eth_adapter *adapter)
adapter->msix_entries = malloc(msix_vecs*sizeof(*adapter->msix_entries),
M_IFAL, M_ZERO | M_WAITOK);
-
- if (adapter->msix_entries == NULL) {
- device_printf_dbg(adapter->dev, "failed to allocate"
- " msix_entries %d\n", msix_vecs);
- rc = ENOMEM;
- goto exit;
- }
-
/* management vector (GROUP_A) @2*/
adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].entry = 2;
adapter->msix_entries[AL_ETH_MGMT_IRQ_IDX].vector = 0;
@@ -2299,9 +2287,6 @@ al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid)
size = sizeof(struct al_eth_tx_buffer) * tx_ring->sw_count;
tx_ring->tx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
- if (tx_ring->tx_buffer_info == NULL)
- return (ENOMEM);
-
tx_ring->descs_size = tx_ring->hw_count * sizeof(union al_udma_desc);
q_params->size = tx_ring->hw_count;
@@ -2324,10 +2309,6 @@ al_eth_setup_tx_resources(struct al_eth_adapter *adapter, int qid)
mtx_init(&tx_ring->br_mtx, "AlRingMtx", NULL, MTX_DEF);
tx_ring->br = buf_ring_alloc(AL_BR_SIZE, M_DEVBUF, M_WAITOK,
&tx_ring->br_mtx);
- if (tx_ring->br == NULL) {
- device_printf(dev, "Critical Failure setting up buf ring\n");
- return (ENOMEM);
- }
/* Allocate taskqueues */
TASK_INIT(&tx_ring->enqueue_task, 0, al_eth_start_xmit, tx_ring);
@@ -2476,9 +2457,6 @@ al_eth_setup_rx_resources(struct al_eth_adapter *adapter, unsigned int qid)
size += 1;
rx_ring->rx_buffer_info = malloc(size, M_IFAL, M_ZERO | M_WAITOK);
- if (rx_ring->rx_buffer_info == NULL)
- return (ENOMEM);
-
rx_ring->descs_size = rx_ring->hw_count * sizeof(union al_udma_desc);
q_params->size = rx_ring->hw_count;
diff --git a/sys/dev/alc/if_alc.c b/sys/dev/alc/if_alc.c
index 86ae705667de..7d47054414d6 100644
--- a/sys/dev/alc/if_alc.c
+++ b/sys/dev/alc/if_alc.c
@@ -1584,12 +1584,6 @@ alc_attach(device_t dev)
alc_get_macaddr(sc);
ifp = sc->alc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "cannot allocate ifnet structure.\n");
- error = ENXIO;
- goto fail;
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -1600,10 +1594,9 @@ alc_attach(device_t dev)
if_setsendqready(ifp);
if_setcapabilities(ifp, IFCAP_TXCSUM | IFCAP_TSO4);
if_sethwassist(ifp, ALC_CSUM_FEATURES | CSUM_TSO);
- if (pci_find_cap(dev, PCIY_PMG, &base) == 0) {
+ if (pci_has_pm(dev)) {
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST, 0);
sc->alc_flags |= ALC_FLAG_PM;
- sc->alc_pmcap = base;
}
if_setcapenable(ifp, if_getcapabilities(ifp));
@@ -1645,12 +1638,6 @@ alc_attach(device_t dev)
/* Create local taskq. */
sc->alc_tq = taskqueue_create_fast("alc_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->alc_tq);
- if (sc->alc_tq == NULL) {
- device_printf(dev, "could not create taskqueue.\n");
- ether_ifdetach(ifp);
- error = ENXIO;
- goto fail;
- }
taskqueue_start_threads(&sc->alc_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->alc_dev));
@@ -1711,10 +1698,6 @@ alc_detach(device_t dev)
sc->alc_tq = NULL;
}
- if (sc->alc_miibus != NULL) {
- device_delete_child(dev, sc->alc_miibus);
- sc->alc_miibus = NULL;
- }
bus_generic_detach(dev);
alc_dma_free(sc);
@@ -2546,7 +2529,6 @@ alc_setwol_813x(struct alc_softc *sc)
{
if_t ifp;
uint32_t reg, pmcs;
- uint16_t pmstat;
ALC_LOCK_ASSERT(sc);
@@ -2595,13 +2577,8 @@ alc_setwol_813x(struct alc_softc *sc)
CSR_READ_4(sc, ALC_MASTER_CFG) | MASTER_CLK_SEL_DIS);
}
/* Request PME. */
- pmstat = pci_read_config(sc->alc_dev,
- sc->alc_pmcap + PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->alc_dev,
- sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
+ pci_enable_pme(sc->alc_dev);
}
static void
@@ -2609,7 +2586,6 @@ alc_setwol_816x(struct alc_softc *sc)
{
if_t ifp;
uint32_t gphy, mac, master, pmcs, reg;
- uint16_t pmstat;
ALC_LOCK_ASSERT(sc);
@@ -2660,13 +2636,8 @@ alc_setwol_816x(struct alc_softc *sc)
if ((sc->alc_flags & ALC_FLAG_PM) != 0) {
/* Request PME. */
- pmstat = pci_read_config(sc->alc_dev,
- sc->alc_pmcap + PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->alc_dev,
- sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
+ pci_enable_pme(sc->alc_dev);
}
}
@@ -2690,22 +2661,11 @@ alc_resume(device_t dev)
{
struct alc_softc *sc;
if_t ifp;
- uint16_t pmstat;
sc = device_get_softc(dev);
- ALC_LOCK(sc);
- if ((sc->alc_flags & ALC_FLAG_PM) != 0) {
- /* Disable PME and clear PME status. */
- pmstat = pci_read_config(sc->alc_dev,
- sc->alc_pmcap + PCIR_POWER_STATUS, 2);
- if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
- pmstat &= ~PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->alc_dev,
- sc->alc_pmcap + PCIR_POWER_STATUS, pmstat, 2);
- }
- }
/* Reset PHY. */
+ ALC_LOCK(sc);
alc_phy_reset(sc);
ifp = sc->alc_ifp;
if ((if_getflags(ifp) & IFF_UP) != 0) {
diff --git a/sys/dev/alc/if_alcvar.h b/sys/dev/alc/if_alcvar.h
index f68c22146868..c3073c6f0a2e 100644
--- a/sys/dev/alc/if_alcvar.h
+++ b/sys/dev/alc/if_alcvar.h
@@ -219,7 +219,6 @@ struct alc_softc {
uint32_t alc_dma_wr_burst;
uint32_t alc_rcb;
int alc_expcap;
- int alc_pmcap;
int alc_flags;
#define ALC_FLAG_PCIE 0x0001
#define ALC_FLAG_PCIX 0x0002
diff --git a/sys/dev/ale/if_ale.c b/sys/dev/ale/if_ale.c
index dcab9d10dfa4..fa2306f1525e 100644
--- a/sys/dev/ale/if_ale.c
+++ b/sys/dev/ale/if_ale.c
@@ -452,7 +452,7 @@ ale_attach(device_t dev)
struct ale_softc *sc;
if_t ifp;
uint16_t burst;
- int error, i, msic, msixc, pmc;
+ int error, i, msic, msixc;
uint32_t rxf_len, txf_len;
error = 0;
@@ -609,12 +609,6 @@ ale_attach(device_t dev)
ale_get_macaddr(sc);
ifp = sc->ale_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "cannot allocate ifnet structure.\n");
- error = ENXIO;
- goto fail;
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -625,8 +619,7 @@ ale_attach(device_t dev)
if_setsendqready(ifp);
if_setcapabilities(ifp, IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_TSO4);
if_sethwassist(ifp, ALE_CSUM_FEATURES | CSUM_TSO);
- if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
- sc->ale_flags |= ALE_FLAG_PMCAP;
+ if (pci_has_pm(dev)) {
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC | IFCAP_WOL_MCAST, 0);
}
if_setcapenable(ifp, if_getcapabilities(ifp));
@@ -661,12 +654,6 @@ ale_attach(device_t dev)
/* Create local taskq. */
sc->ale_tq = taskqueue_create_fast("ale_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->ale_tq);
- if (sc->ale_tq == NULL) {
- device_printf(dev, "could not create taskqueue.\n");
- ether_ifdetach(ifp);
- error = ENXIO;
- goto fail;
- }
taskqueue_start_threads(&sc->ale_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->ale_dev));
@@ -723,10 +710,6 @@ ale_detach(device_t dev)
sc->ale_tq = NULL;
}
- if (sc->ale_miibus != NULL) {
- device_delete_child(dev, sc->ale_miibus);
- sc->ale_miibus = NULL;
- }
bus_generic_detach(dev);
ale_dma_free(sc);
@@ -1483,12 +1466,10 @@ ale_setwol(struct ale_softc *sc)
{
if_t ifp;
uint32_t reg, pmcs;
- uint16_t pmstat;
- int pmc;
ALE_LOCK_ASSERT(sc);
- if (pci_find_cap(sc->ale_dev, PCIY_PMG, &pmc) != 0) {
+ if (!pci_has_pm(sc->ale_dev)) {
/* Disable WOL. */
CSR_WRITE_4(sc, ALE_WOL_CFG, 0);
reg = CSR_READ_4(sc, ALE_PCIE_PHYMISC);
@@ -1534,11 +1515,8 @@ ale_setwol(struct ale_softc *sc)
GPHY_CTRL_PWDOWN_HW);
}
/* Request PME. */
- pmstat = pci_read_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->ale_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
+ pci_enable_pme(sc->ale_dev);
}
static int
@@ -1561,23 +1539,11 @@ ale_resume(device_t dev)
{
struct ale_softc *sc;
if_t ifp;
- int pmc;
- uint16_t pmstat;
sc = device_get_softc(dev);
- ALE_LOCK(sc);
- if (pci_find_cap(sc->ale_dev, PCIY_PMG, &pmc) == 0) {
- /* Disable PME and clear PME status. */
- pmstat = pci_read_config(sc->ale_dev,
- pmc + PCIR_POWER_STATUS, 2);
- if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
- pmstat &= ~PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->ale_dev,
- pmc + PCIR_POWER_STATUS, pmstat, 2);
- }
- }
/* Reset PHY. */
+ ALE_LOCK(sc);
ale_phy_reset(sc);
ifp = sc->ale_ifp;
if ((if_getflags(ifp) & IFF_UP) != 0) {
diff --git a/sys/dev/ale/if_alevar.h b/sys/dev/ale/if_alevar.h
index 2baff5106b81..74ed9edb0ff3 100644
--- a/sys/dev/ale/if_alevar.h
+++ b/sys/dev/ale/if_alevar.h
@@ -200,7 +200,6 @@ struct ale_softc {
#define ALE_FLAG_PCIX 0x0002
#define ALE_FLAG_MSI 0x0004
#define ALE_FLAG_MSIX 0x0008
-#define ALE_FLAG_PMCAP 0x0010
#define ALE_FLAG_FASTETHER 0x0020
#define ALE_FLAG_JUMBO 0x0040
#define ALE_FLAG_RXCSUM_BUG 0x0080
diff --git a/sys/dev/alpm/alpm.c b/sys/dev/alpm/alpm.c
index 6bd84c96d282..d7c3d3657d3b 100644
--- a/sys/dev/alpm/alpm.c
+++ b/sys/dev/alpm/alpm.c
@@ -218,12 +218,12 @@ alpm_attach(device_t dev)
mtx_init(&alpm->lock, device_get_nameunit(dev), "alpm", MTX_DEF);
/* attach the smbus */
- alpm->smbus = device_add_child(dev, "smbus", -1);
+ alpm->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY);
if (alpm->smbus == NULL) {
alpm_detach(dev);
return (EINVAL);
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -232,11 +232,12 @@ static int
alpm_detach(device_t dev)
{
struct alpm_softc *alpm = device_get_softc(dev);
+ int error;
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
- if (alpm->smbus) {
- device_delete_child(dev, alpm->smbus);
- alpm->smbus = NULL;
- }
mtx_destroy(&alpm->lock);
if (alpm->res)
diff --git a/sys/dev/altera/atse/if_atse.c b/sys/dev/altera/atse/if_atse.c
deleted file mode 100644
index 923292484207..000000000000
--- a/sys/dev/altera/atse/if_atse.c
+++ /dev/null
@@ -1,1597 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012, 2013 Bjoern A. Zeeb
- * Copyright (c) 2014 Robert N. M. Watson
- * Copyright (c) 2016-2017 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-11-C-0249)
- * ("MRC2"), as part of the DARPA MRC research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * Altera Triple-Speed Ethernet MegaCore, Function User Guide
- * UG-01008-3.0, Software Version: 12.0, June 2012.
- * Available at the time of writing at:
- * http://www.altera.com/literature/ug/ug_ethernet.pdf
- *
- * We are using an Marvell E1111 (Alaska) PHY on the DE4. See mii/e1000phy.c.
- */
-/*
- * XXX-BZ NOTES:
- * - ifOutBroadcastPkts are only counted if both ether dst and src are all-1s;
- * seems an IP core bug, they count ether broadcasts as multicast. Is this
- * still the case?
- * - figure out why the TX FIFO fill status and intr did not work as expected.
- * - test 100Mbit/s and 10Mbit/s
- * - blacklist the one special factory programmed ethernet address (for now
- * hardcoded, later from loader?)
- * - resolve all XXX, left as reminders to shake out details later
- * - Jumbo frame support
- */
-
-#include <sys/cdefs.h>
-#include "opt_device_polling.h"
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/kernel.h>
-#include <sys/bus.h>
-#include <sys/endian.h>
-#include <sys/jail.h>
-#include <sys/lock.h>
-#include <sys/module.h>
-#include <sys/mutex.h>
-#include <sys/proc.h>
-#include <sys/socket.h>
-#include <sys/sockio.h>
-#include <sys/types.h>
-
-#include <net/ethernet.h>
-#include <net/if.h>
-#include <net/if_var.h>
-#include <net/if_dl.h>
-#include <net/if_media.h>
-#include <net/if_types.h>
-#include <net/if_vlan_var.h>
-
-#include <net/bpf.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-#include <sys/rman.h>
-
-#include <dev/mii/mii.h>
-#include <dev/mii/miivar.h>
-
-#include <dev/altera/atse/if_atsereg.h>
-#include <dev/xdma/xdma.h>
-
-#define RX_QUEUE_SIZE 4096
-#define TX_QUEUE_SIZE 4096
-#define NUM_RX_MBUF 512
-#define BUFRING_SIZE 8192
-
-#include <machine/cache.h>
-
-/* XXX once we'd do parallel attach, we need a global lock for this. */
-#define ATSE_ETHERNET_OPTION_BITS_UNDEF 0
-#define ATSE_ETHERNET_OPTION_BITS_READ 1
-static int atse_ethernet_option_bits_flag = ATSE_ETHERNET_OPTION_BITS_UNDEF;
-static uint8_t atse_ethernet_option_bits[ALTERA_ETHERNET_OPTION_BITS_LEN];
-
-/*
- * Softc and critical resource locking.
- */
-#define ATSE_LOCK(_sc) mtx_lock(&(_sc)->atse_mtx)
-#define ATSE_UNLOCK(_sc) mtx_unlock(&(_sc)->atse_mtx)
-#define ATSE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->atse_mtx, MA_OWNED)
-
-#define ATSE_DEBUG
-#undef ATSE_DEBUG
-
-#ifdef ATSE_DEBUG
-#define DPRINTF(format, ...) printf(format, __VA_ARGS__)
-#else
-#define DPRINTF(format, ...)
-#endif
-
-/*
- * Register space access macros.
- */
-static inline void
-csr_write_4(struct atse_softc *sc, uint32_t reg, uint32_t val4,
- const char *f, const int l)
-{
-
- val4 = htole32(val4);
- DPRINTF("[%s:%d] CSR W %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
- "atse_mem_res", reg, reg * 4, val4);
- bus_write_4(sc->atse_mem_res, reg * 4, val4);
-}
-
-static inline uint32_t
-csr_read_4(struct atse_softc *sc, uint32_t reg, const char *f, const int l)
-{
- uint32_t val4;
-
- val4 = le32toh(bus_read_4(sc->atse_mem_res, reg * 4));
- DPRINTF("[%s:%d] CSR R %s 0x%08x (0x%08x) = 0x%08x\n", f, l,
- "atse_mem_res", reg, reg * 4, val4);
-
- return (val4);
-}
-
-/*
- * See page 5-2 that it's all dword offsets and the MS 16 bits need to be zero
- * on write and ignored on read.
- */
-static inline void
-pxx_write_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, uint16_t val,
- const char *f, const int l, const char *s)
-{
- uint32_t val4;
-
- val4 = htole32(val & 0x0000ffff);
- DPRINTF("[%s:%d] %s W %s 0x%08x (0x%08jx) = 0x%08x\n", f, l, s,
- "atse_mem_res", reg, (bmcr + reg) * 4, val4);
- bus_write_4(sc->atse_mem_res, (bmcr + reg) * 4, val4);
-}
-
-static inline uint16_t
-pxx_read_2(struct atse_softc *sc, bus_addr_t bmcr, uint32_t reg, const char *f,
- const int l, const char *s)
-{
- uint32_t val4;
- uint16_t val;
-
- val4 = bus_read_4(sc->atse_mem_res, (bmcr + reg) * 4);
- val = le32toh(val4) & 0x0000ffff;
- DPRINTF("[%s:%d] %s R %s 0x%08x (0x%08jx) = 0x%04x\n", f, l, s,
- "atse_mem_res", reg, (bmcr + reg) * 4, val);
-
- return (val);
-}
-
-#define CSR_WRITE_4(sc, reg, val) \
- csr_write_4((sc), (reg), (val), __func__, __LINE__)
-#define CSR_READ_4(sc, reg) \
- csr_read_4((sc), (reg), __func__, __LINE__)
-#define PCS_WRITE_2(sc, reg, val) \
- pxx_write_2((sc), sc->atse_bmcr0, (reg), (val), __func__, __LINE__, \
- "PCS")
-#define PCS_READ_2(sc, reg) \
- pxx_read_2((sc), sc->atse_bmcr0, (reg), __func__, __LINE__, "PCS")
-#define PHY_WRITE_2(sc, reg, val) \
- pxx_write_2((sc), sc->atse_bmcr1, (reg), (val), __func__, __LINE__, \
- "PHY")
-#define PHY_READ_2(sc, reg) \
- pxx_read_2((sc), sc->atse_bmcr1, (reg), __func__, __LINE__, "PHY")
-
-static void atse_tick(void *);
-static int atse_detach(device_t);
-
-static int
-atse_rx_enqueue(struct atse_softc *sc, uint32_t n)
-{
- struct mbuf *m;
- int i;
-
- for (i = 0; i < n; i++) {
- m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
- if (m == NULL) {
- device_printf(sc->dev,
- "%s: Can't alloc rx mbuf\n", __func__);
- return (-1);
- }
-
- m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
- xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM);
- }
-
- return (0);
-}
-
-static int
-atse_xdma_tx_intr(void *arg, xdma_transfer_status_t *status)
-{
- xdma_transfer_status_t st;
- struct atse_softc *sc;
- if_t ifp;
- struct mbuf *m;
- int err;
-
- sc = arg;
-
- ATSE_LOCK(sc);
-
- ifp = sc->atse_ifp;
-
- for (;;) {
- err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st);
- if (err != 0) {
- break;
- }
-
- if (st.error != 0) {
- if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
- }
-
- m_freem(m);
- sc->txcount--;
- }
-
- if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
-
- ATSE_UNLOCK(sc);
-
- return (0);
-}
-
-static int
-atse_xdma_rx_intr(void *arg, xdma_transfer_status_t *status)
-{
- xdma_transfer_status_t st;
- struct atse_softc *sc;
- if_t ifp;
- struct mbuf *m;
- int err;
- uint32_t cnt_processed;
-
- sc = arg;
-
- ATSE_LOCK(sc);
-
- ifp = sc->atse_ifp;
-
- cnt_processed = 0;
- for (;;) {
- err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st);
- if (err != 0) {
- break;
- }
- cnt_processed++;
-
- if (st.error != 0) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
- m_freem(m);
- continue;
- }
-
- m->m_pkthdr.len = m->m_len = st.transferred;
- m->m_pkthdr.rcvif = ifp;
- m_adj(m, ETHER_ALIGN);
- ATSE_UNLOCK(sc);
- if_input(ifp, m);
- ATSE_LOCK(sc);
- }
-
- atse_rx_enqueue(sc, cnt_processed);
-
- ATSE_UNLOCK(sc);
-
- return (0);
-}
-
-static int
-atse_transmit_locked(if_t ifp)
-{
- struct atse_softc *sc;
- struct mbuf *m;
- struct buf_ring *br;
- int error;
- int enq;
-
- sc = if_getsoftc(ifp);
- br = sc->br;
-
- enq = 0;
-
- while ((m = drbr_peek(ifp, br)) != NULL) {
- error = xdma_enqueue_mbuf(sc->xchan_tx, &m, 0, 4, 4, XDMA_MEM_TO_DEV);
- if (error != 0) {
- /* No space in request queue available yet. */
- drbr_putback(ifp, br, m);
- break;
- }
-
- drbr_advance(ifp, br);
-
- sc->txcount++;
- enq++;
-
- /* If anyone is interested give them a copy. */
- ETHER_BPF_MTAP(ifp, m);
- }
-
- if (enq > 0)
- xdma_queue_submit(sc->xchan_tx);
-
- return (0);
-}
-
-static int
-atse_transmit(if_t ifp, struct mbuf *m)
-{
- struct atse_softc *sc;
- struct buf_ring *br;
- int error;
-
- sc = if_getsoftc(ifp);
- br = sc->br;
-
- ATSE_LOCK(sc);
-
- mtx_lock(&sc->br_mtx);
-
- if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
- error = drbr_enqueue(ifp, sc->br, m);
- mtx_unlock(&sc->br_mtx);
- ATSE_UNLOCK(sc);
- return (error);
- }
-
- if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
- error = drbr_enqueue(ifp, sc->br, m);
- mtx_unlock(&sc->br_mtx);
- ATSE_UNLOCK(sc);
- return (error);
- }
-
- error = drbr_enqueue(ifp, br, m);
- if (error) {
- mtx_unlock(&sc->br_mtx);
- ATSE_UNLOCK(sc);
- return (error);
- }
- error = atse_transmit_locked(ifp);
-
- mtx_unlock(&sc->br_mtx);
- ATSE_UNLOCK(sc);
-
- return (error);
-}
-
-static void
-atse_qflush(if_t ifp)
-{
- struct atse_softc *sc;
-
- sc = if_getsoftc(ifp);
-
- printf("%s\n", __func__);
-}
-
-static int
-atse_stop_locked(struct atse_softc *sc)
-{
- uint32_t mask, val4;
- if_t ifp;
- int i;
-
- ATSE_LOCK_ASSERT(sc);
-
- callout_stop(&sc->atse_tick);
-
- ifp = sc->atse_ifp;
- if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
-
- /* Disable MAC transmit and receive datapath. */
- mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
- val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
- val4 &= ~mask;
- CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
-
- /* Wait for bits to be cleared; i=100 is excessive. */
- for (i = 0; i < 100; i++) {
- val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
- if ((val4 & mask) == 0) {
- break;
- }
- DELAY(10);
- }
-
- if ((val4 & mask) != 0) {
- device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
- /* Punt. */
- }
-
- sc->atse_flags &= ~ATSE_FLAGS_LINK;
-
- return (0);
-}
-
-static u_int
-atse_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
-{
- uint64_t *h = arg;
- uint8_t *addr, x, y;
- int i, j;
-
- addr = LLADDR(sdl);
- x = 0;
- for (i = 0; i < ETHER_ADDR_LEN; i++) {
- y = addr[i] & 0x01;
- for (j = 1; j < 8; j++)
- y ^= (addr[i] >> j) & 0x01;
- x |= (y << i);
- }
- *h |= (1 << x);
-
- return (1);
-}
-
-static int
-atse_rxfilter_locked(struct atse_softc *sc)
-{
- if_t ifp;
- uint32_t val4;
- int i;
-
- /* XXX-BZ can we find out if we have the MHASH synthesized? */
- val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
- /* For simplicity always hash full 48 bits of addresses. */
- if ((val4 & BASE_CFG_COMMAND_CONFIG_MHASH_SEL) != 0)
- val4 &= ~BASE_CFG_COMMAND_CONFIG_MHASH_SEL;
-
- ifp = sc->atse_ifp;
- if (if_getflags(ifp) & IFF_PROMISC) {
- val4 |= BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
- } else {
- val4 &= ~BASE_CFG_COMMAND_CONFIG_PROMIS_EN;
- }
-
- CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
-
- if (if_getflags(ifp) & IFF_ALLMULTI) {
- /* Accept all multicast addresses. */
- for (i = 0; i <= MHASH_LEN; i++)
- CSR_WRITE_4(sc, MHASH_START + i, 0x1);
- } else {
- /*
- * Can hold MHASH_LEN entries.
- * XXX-BZ bitstring.h would be more general.
- */
- uint64_t h;
-
- /*
- * Re-build and re-program hash table. First build the
- * bit-field "yes" or "no" for each slot per address, then
- * do all the programming afterwards.
- */
- h = 0;
- (void)if_foreach_llmaddr(ifp, atse_hash_maddr, &h);
- for (i = 0; i <= MHASH_LEN; i++) {
- CSR_WRITE_4(sc, MHASH_START + i,
- (h & (1 << i)) ? 0x01 : 0x00);
- }
- }
-
- return (0);
-}
-
-static int
-atse_ethernet_option_bits_read_fdt(device_t dev)
-{
- struct resource *res;
- device_t fdev;
- int i, rid;
-
- if (atse_ethernet_option_bits_flag & ATSE_ETHERNET_OPTION_BITS_READ) {
- return (0);
- }
-
- fdev = device_find_child(device_get_parent(dev), "cfi", 0);
- if (fdev == NULL) {
- return (ENOENT);
- }
-
- rid = 0;
- res = bus_alloc_resource_any(fdev, SYS_RES_MEMORY, &rid,
- RF_ACTIVE | RF_SHAREABLE);
- if (res == NULL) {
- return (ENXIO);
- }
-
- for (i = 0; i < ALTERA_ETHERNET_OPTION_BITS_LEN; i++) {
- atse_ethernet_option_bits[i] = bus_read_1(res,
- ALTERA_ETHERNET_OPTION_BITS_OFF + i);
- }
-
- bus_release_resource(fdev, SYS_RES_MEMORY, rid, res);
- atse_ethernet_option_bits_flag |= ATSE_ETHERNET_OPTION_BITS_READ;
-
- return (0);
-}
-
-static int
-atse_ethernet_option_bits_read(device_t dev)
-{
- int error;
-
- error = atse_ethernet_option_bits_read_fdt(dev);
- if (error == 0)
- return (0);
-
- device_printf(dev, "Cannot read Ethernet addresses from flash.\n");
-
- return (error);
-}
-
-static int
-atse_get_eth_address(struct atse_softc *sc)
-{
- unsigned long hostid;
- uint32_t val4;
- int unit;
-
- /*
- * Make sure to only ever do this once. Otherwise a reset would
- * possibly change our ethernet address, which is not good at all.
- */
- if (sc->atse_eth_addr[0] != 0x00 || sc->atse_eth_addr[1] != 0x00 ||
- sc->atse_eth_addr[2] != 0x00) {
- return (0);
- }
-
- if ((atse_ethernet_option_bits_flag &
- ATSE_ETHERNET_OPTION_BITS_READ) == 0) {
- goto get_random;
- }
-
- val4 = atse_ethernet_option_bits[0] << 24;
- val4 |= atse_ethernet_option_bits[1] << 16;
- val4 |= atse_ethernet_option_bits[2] << 8;
- val4 |= atse_ethernet_option_bits[3];
- /* They chose "safe". */
- if (val4 != le32toh(0x00005afe)) {
- device_printf(sc->atse_dev, "Magic '5afe' is not safe: 0x%08x. "
- "Falling back to random numbers for hardware address.\n",
- val4);
- goto get_random;
- }
-
- sc->atse_eth_addr[0] = atse_ethernet_option_bits[4];
- sc->atse_eth_addr[1] = atse_ethernet_option_bits[5];
- sc->atse_eth_addr[2] = atse_ethernet_option_bits[6];
- sc->atse_eth_addr[3] = atse_ethernet_option_bits[7];
- sc->atse_eth_addr[4] = atse_ethernet_option_bits[8];
- sc->atse_eth_addr[5] = atse_ethernet_option_bits[9];
-
- /* Handle factory default ethernet address: 00:07:ed:ff:ed:15 */
- if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x07 &&
- sc->atse_eth_addr[2] == 0xed && sc->atse_eth_addr[3] == 0xff &&
- sc->atse_eth_addr[4] == 0xed && sc->atse_eth_addr[5] == 0x15) {
- device_printf(sc->atse_dev, "Factory programmed Ethernet "
- "hardware address blacklisted. Falling back to random "
- "address to avoid collisions.\n");
- device_printf(sc->atse_dev, "Please re-program your flash.\n");
- goto get_random;
- }
-
- if (sc->atse_eth_addr[0] == 0x00 && sc->atse_eth_addr[1] == 0x00 &&
- sc->atse_eth_addr[2] == 0x00 && sc->atse_eth_addr[3] == 0x00 &&
- sc->atse_eth_addr[4] == 0x00 && sc->atse_eth_addr[5] == 0x00) {
- device_printf(sc->atse_dev, "All zero's Ethernet hardware "
- "address blacklisted. Falling back to random address.\n");
- device_printf(sc->atse_dev, "Please re-program your flash.\n");
- goto get_random;
- }
-
- if (ETHER_IS_MULTICAST(sc->atse_eth_addr)) {
- device_printf(sc->atse_dev, "Multicast Ethernet hardware "
- "address blacklisted. Falling back to random address.\n");
- device_printf(sc->atse_dev, "Please re-program your flash.\n");
- goto get_random;
- }
-
- /*
- * If we find an Altera prefixed address with a 0x0 ending
- * adjust by device unit. If not and this is not the first
- * Ethernet, go to random.
- */
- unit = device_get_unit(sc->atse_dev);
- if (unit == 0x00) {
- return (0);
- }
-
- if (unit > 0x0f) {
- device_printf(sc->atse_dev, "We do not support Ethernet "
- "addresses for more than 16 MACs. Falling back to "
- "random hadware address.\n");
- goto get_random;
- }
- if ((sc->atse_eth_addr[0] & ~0x2) != 0 ||
- sc->atse_eth_addr[1] != 0x07 || sc->atse_eth_addr[2] != 0xed ||
- (sc->atse_eth_addr[5] & 0x0f) != 0x0) {
- device_printf(sc->atse_dev, "Ethernet address not meeting our "
- "multi-MAC standards. Falling back to random hadware "
- "address.\n");
- goto get_random;
- }
- sc->atse_eth_addr[5] |= (unit & 0x0f);
-
- return (0);
-
-get_random:
- /*
- * Fall back to random code we also use on bridge(4).
- */
- getcredhostid(curthread->td_ucred, &hostid);
- if (hostid == 0) {
- arc4rand(sc->atse_eth_addr, ETHER_ADDR_LEN, 1);
- sc->atse_eth_addr[0] &= ~1;/* clear multicast bit */
- sc->atse_eth_addr[0] |= 2; /* set the LAA bit */
- } else {
- sc->atse_eth_addr[0] = 0x2;
- sc->atse_eth_addr[1] = (hostid >> 24) & 0xff;
- sc->atse_eth_addr[2] = (hostid >> 16) & 0xff;
- sc->atse_eth_addr[3] = (hostid >> 8 ) & 0xff;
- sc->atse_eth_addr[4] = hostid & 0xff;
- sc->atse_eth_addr[5] = sc->atse_unit & 0xff;
- }
-
- return (0);
-}
-
-static int
-atse_set_eth_address(struct atse_softc *sc, int n)
-{
- uint32_t v0, v1;
-
- v0 = (sc->atse_eth_addr[3] << 24) | (sc->atse_eth_addr[2] << 16) |
- (sc->atse_eth_addr[1] << 8) | sc->atse_eth_addr[0];
- v1 = (sc->atse_eth_addr[5] << 8) | sc->atse_eth_addr[4];
-
- if (n & ATSE_ETH_ADDR_DEF) {
- CSR_WRITE_4(sc, BASE_CFG_MAC_0, v0);
- CSR_WRITE_4(sc, BASE_CFG_MAC_1, v1);
- }
- if (n & ATSE_ETH_ADDR_SUPP1) {
- CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_0, v0);
- CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_0_1, v1);
- }
- if (n & ATSE_ETH_ADDR_SUPP2) {
- CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_0, v0);
- CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_1_1, v1);
- }
- if (n & ATSE_ETH_ADDR_SUPP3) {
- CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_0, v0);
- CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_2_1, v1);
- }
- if (n & ATSE_ETH_ADDR_SUPP4) {
- CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_0, v0);
- CSR_WRITE_4(sc, SUPPL_ADDR_SMAC_3_1, v1);
- }
-
- return (0);
-}
-
-static int
-atse_reset(struct atse_softc *sc)
-{
- uint32_t val4, mask;
- uint16_t val;
- int i;
-
- /* 1. External PHY Initialization using MDIO. */
- /*
- * We select the right MDIO space in atse_attach() and let MII do
- * anything else.
- */
-
- /* 2. PCS Configuration Register Initialization. */
- /* a. Set auto negotiation link timer to 1.6ms for SGMII. */
- PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_0, 0x0D40);
- PCS_WRITE_2(sc, PCS_EXT_LINK_TIMER_1, 0x0003);
-
- /* b. Configure SGMII. */
- val = PCS_EXT_IF_MODE_SGMII_ENA|PCS_EXT_IF_MODE_USE_SGMII_AN;
- PCS_WRITE_2(sc, PCS_EXT_IF_MODE, val);
-
- /* c. Enable auto negotiation. */
- /* Ignore Bits 6,8,13; should be set,set,unset. */
- val = PCS_READ_2(sc, PCS_CONTROL);
- val &= ~(PCS_CONTROL_ISOLATE|PCS_CONTROL_POWERDOWN);
- val &= ~PCS_CONTROL_LOOPBACK; /* Make this a -link1 option? */
- val |= PCS_CONTROL_AUTO_NEGOTIATION_ENABLE;
- PCS_WRITE_2(sc, PCS_CONTROL, val);
-
- /* d. PCS reset. */
- val = PCS_READ_2(sc, PCS_CONTROL);
- val |= PCS_CONTROL_RESET;
- PCS_WRITE_2(sc, PCS_CONTROL, val);
-
- /* Wait for reset bit to clear; i=100 is excessive. */
- for (i = 0; i < 100; i++) {
- val = PCS_READ_2(sc, PCS_CONTROL);
- if ((val & PCS_CONTROL_RESET) == 0) {
- break;
- }
- DELAY(10);
- }
-
- if ((val & PCS_CONTROL_RESET) != 0) {
- device_printf(sc->atse_dev, "PCS reset timed out.\n");
- return (ENXIO);
- }
-
- /* 3. MAC Configuration Register Initialization. */
- /* a. Disable MAC transmit and receive datapath. */
- mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
- val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
- val4 &= ~mask;
- /* Samples in the manual do have the SW_RESET bit set here, why? */
- CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
- /* Wait for bits to be cleared; i=100 is excessive. */
- for (i = 0; i < 100; i++) {
- val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
- if ((val4 & mask) == 0) {
- break;
- }
- DELAY(10);
- }
- if ((val4 & mask) != 0) {
- device_printf(sc->atse_dev, "Disabling MAC TX/RX timed out.\n");
- return (ENXIO);
- }
- /* b. MAC FIFO configuration. */
- CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_EMPTY, FIFO_DEPTH_TX - 16);
- CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_FULL, 3);
- CSR_WRITE_4(sc, BASE_CFG_TX_ALMOST_EMPTY, 8);
- CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_EMPTY, FIFO_DEPTH_RX - 16);
- CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_FULL, 8);
- CSR_WRITE_4(sc, BASE_CFG_RX_ALMOST_EMPTY, 8);
-#if 0
- CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 16);
- CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 16);
-#else
- /* For store-and-forward mode, set this threshold to 0. */
- CSR_WRITE_4(sc, BASE_CFG_TX_SECTION_FULL, 0);
- CSR_WRITE_4(sc, BASE_CFG_RX_SECTION_FULL, 0);
-#endif
- /* c. MAC address configuration. */
- /* Also intialize supplementary addresses to our primary one. */
- /* XXX-BZ FreeBSD really needs to grow and API for using these. */
- atse_get_eth_address(sc);
- atse_set_eth_address(sc, ATSE_ETH_ADDR_ALL);
-
- /* d. MAC function configuration. */
- CSR_WRITE_4(sc, BASE_CFG_FRM_LENGTH, 1518); /* Default. */
- CSR_WRITE_4(sc, BASE_CFG_TX_IPG_LENGTH, 12);
- CSR_WRITE_4(sc, BASE_CFG_PAUSE_QUANT, 0xFFFF);
-
- val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
- /*
- * If 1000BASE-X/SGMII PCS is initialized, set the ETH_SPEED (bit 3)
- * and ENA_10 (bit 25) in command_config register to 0. If half duplex
- * is reported in the PHY/PCS status register, set the HD_ENA (bit 10)
- * to 1 in command_config register.
- * BZ: We shoot for 1000 instead.
- */
-#if 0
- val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
-#else
- val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
-#endif
- val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
-#if 0
- /*
- * We do not want to set this, otherwise, we could not even send
- * random raw ethernet frames for various other research. By default
- * FreeBSD will use the right ether source address.
- */
- val4 |= BASE_CFG_COMMAND_CONFIG_TX_ADDR_INS;
-#endif
- val4 |= BASE_CFG_COMMAND_CONFIG_PAD_EN;
- val4 &= ~BASE_CFG_COMMAND_CONFIG_CRC_FWD;
-#if 0
- val4 |= BASE_CFG_COMMAND_CONFIG_CNTL_FRM_ENA;
-#endif
-#if 1
- val4 |= BASE_CFG_COMMAND_CONFIG_RX_ERR_DISC;
-#endif
- val &= ~BASE_CFG_COMMAND_CONFIG_LOOP_ENA; /* link0? */
- CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
-
- /*
- * Make sure we do not enable 32bit alignment; FreeBSD cannot
- * cope with the additional padding (though we should!?).
- * Also make sure we get the CRC appended.
- */
- val4 = CSR_READ_4(sc, TX_CMD_STAT);
- val4 &= ~(TX_CMD_STAT_OMIT_CRC|TX_CMD_STAT_TX_SHIFT16);
- CSR_WRITE_4(sc, TX_CMD_STAT, val4);
-
- val4 = CSR_READ_4(sc, RX_CMD_STAT);
- val4 &= ~RX_CMD_STAT_RX_SHIFT16;
- val4 |= RX_CMD_STAT_RX_SHIFT16;
- CSR_WRITE_4(sc, RX_CMD_STAT, val4);
-
- /* e. Reset MAC. */
- val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
- val4 |= BASE_CFG_COMMAND_CONFIG_SW_RESET;
- CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
- /* Wait for bits to be cleared; i=100 is excessive. */
- for (i = 0; i < 100; i++) {
- val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
- if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) == 0) {
- break;
- }
- DELAY(10);
- }
- if ((val4 & BASE_CFG_COMMAND_CONFIG_SW_RESET) != 0) {
- device_printf(sc->atse_dev, "MAC reset timed out.\n");
- return (ENXIO);
- }
-
- /* f. Enable MAC transmit and receive datapath. */
- mask = BASE_CFG_COMMAND_CONFIG_TX_ENA|BASE_CFG_COMMAND_CONFIG_RX_ENA;
- val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
- val4 |= mask;
- CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
- /* Wait for bits to be cleared; i=100 is excessive. */
- for (i = 0; i < 100; i++) {
- val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
- if ((val4 & mask) == mask) {
- break;
- }
- DELAY(10);
- }
- if ((val4 & mask) != mask) {
- device_printf(sc->atse_dev, "Enabling MAC TX/RX timed out.\n");
- return (ENXIO);
- }
-
- return (0);
-}
-
-static void
-atse_init_locked(struct atse_softc *sc)
-{
- if_t ifp;
- struct mii_data *mii;
- uint8_t *eaddr;
-
- ATSE_LOCK_ASSERT(sc);
- ifp = sc->atse_ifp;
-
- if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
- return;
- }
-
- /*
- * Must update the ether address if changed. Given we do not handle
- * in atse_ioctl() but it's in the general framework, just always
- * do it here before atse_reset().
- */
- eaddr = if_getlladdr(sc->atse_ifp);
- bcopy(eaddr, &sc->atse_eth_addr, ETHER_ADDR_LEN);
-
- /* Make things frind to halt, cleanup, ... */
- atse_stop_locked(sc);
-
- atse_reset(sc);
-
- /* ... and fire up the engine again. */
- atse_rxfilter_locked(sc);
-
- sc->atse_flags &= ATSE_FLAGS_LINK; /* Preserve. */
-
- mii = device_get_softc(sc->atse_miibus);
-
- sc->atse_flags &= ~ATSE_FLAGS_LINK;
- mii_mediachg(mii);
-
- if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
- if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
-
- callout_reset(&sc->atse_tick, hz, atse_tick, sc);
-}
-
-static void
-atse_init(void *xsc)
-{
- struct atse_softc *sc;
-
- /*
- * XXXRW: There is some argument that we should immediately do RX
- * processing after enabling interrupts, or one may not fire if there
- * are buffered packets.
- */
- sc = (struct atse_softc *)xsc;
- ATSE_LOCK(sc);
- atse_init_locked(sc);
- ATSE_UNLOCK(sc);
-}
-
-static int
-atse_ioctl(if_t ifp, u_long command, caddr_t data)
-{
- struct atse_softc *sc;
- struct ifreq *ifr;
- int error, mask;
-
- error = 0;
- sc = if_getsoftc(ifp);
- ifr = (struct ifreq *)data;
-
- switch (command) {
- case SIOCSIFFLAGS:
- ATSE_LOCK(sc);
- if (if_getflags(ifp) & IFF_UP) {
- if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
- ((if_getflags(ifp) ^ sc->atse_if_flags) &
- (IFF_PROMISC | IFF_ALLMULTI)) != 0)
- atse_rxfilter_locked(sc);
- else
- atse_init_locked(sc);
- } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
- atse_stop_locked(sc);
- sc->atse_if_flags = if_getflags(ifp);
- ATSE_UNLOCK(sc);
- break;
- case SIOCSIFCAP:
- ATSE_LOCK(sc);
- mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
- ATSE_UNLOCK(sc);
- break;
- case SIOCADDMULTI:
- case SIOCDELMULTI:
- ATSE_LOCK(sc);
- atse_rxfilter_locked(sc);
- ATSE_UNLOCK(sc);
- break;
- case SIOCGIFMEDIA:
- case SIOCSIFMEDIA:
- {
- struct mii_data *mii;
- struct ifreq *ifr;
-
- mii = device_get_softc(sc->atse_miibus);
- ifr = (struct ifreq *)data;
- error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command);
- break;
- }
- default:
- error = ether_ioctl(ifp, command, data);
- break;
- }
-
- return (error);
-}
-
-static void
-atse_tick(void *xsc)
-{
- struct atse_softc *sc;
- struct mii_data *mii;
- if_t ifp;
-
- sc = (struct atse_softc *)xsc;
- ATSE_LOCK_ASSERT(sc);
- ifp = sc->atse_ifp;
-
- mii = device_get_softc(sc->atse_miibus);
- mii_tick(mii);
- if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
- atse_miibus_statchg(sc->atse_dev);
- }
-
- callout_reset(&sc->atse_tick, hz, atse_tick, sc);
-}
-
-/*
- * Set media options.
- */
-static int
-atse_ifmedia_upd(if_t ifp)
-{
- struct atse_softc *sc;
- struct mii_data *mii;
- struct mii_softc *miisc;
- int error;
-
- sc = if_getsoftc(ifp);
-
- ATSE_LOCK(sc);
- mii = device_get_softc(sc->atse_miibus);
- LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
- PHY_RESET(miisc);
- }
- error = mii_mediachg(mii);
- ATSE_UNLOCK(sc);
-
- return (error);
-}
-
-/*
- * Report current media status.
- */
-static void
-atse_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
-{
- struct atse_softc *sc;
- struct mii_data *mii;
-
- sc = if_getsoftc(ifp);
-
- ATSE_LOCK(sc);
- mii = device_get_softc(sc->atse_miibus);
- mii_pollstat(mii);
- ifmr->ifm_active = mii->mii_media_active;
- ifmr->ifm_status = mii->mii_media_status;
- ATSE_UNLOCK(sc);
-}
-
-static struct atse_mac_stats_regs {
- const char *name;
- const char *descr; /* Mostly copied from Altera datasheet. */
-} atse_mac_stats_regs[] = {
- [0x1a] =
- { "aFramesTransmittedOK",
- "The number of frames that are successfully transmitted including "
- "the pause frames." },
- { "aFramesReceivedOK",
- "The number of frames that are successfully received including the "
- "pause frames." },
- { "aFrameCheckSequenceErrors",
- "The number of receive frames with CRC error." },
- { "aAlignmentErrors",
- "The number of receive frames with alignment error." },
- { "aOctetsTransmittedOK",
- "The lower 32 bits of the number of data and padding octets that "
- "are successfully transmitted." },
- { "aOctetsReceivedOK",
- "The lower 32 bits of the number of data and padding octets that "
- " are successfully received." },
- { "aTxPAUSEMACCtrlFrames",
- "The number of pause frames transmitted." },
- { "aRxPAUSEMACCtrlFrames",
- "The number received pause frames received." },
- { "ifInErrors",
- "The number of errored frames received." },
- { "ifOutErrors",
- "The number of transmit frames with either a FIFO overflow error, "
- "a FIFO underflow error, or a error defined by the user "
- "application." },
- { "ifInUcastPkts",
- "The number of valid unicast frames received." },
- { "ifInMulticastPkts",
- "The number of valid multicast frames received. The count does "
- "not include pause frames." },
- { "ifInBroadcastPkts",
- "The number of valid broadcast frames received." },
- { "ifOutDiscards",
- "This statistics counter is not in use. The MAC function does not "
- "discard frames that are written to the FIFO buffer by the user "
- "application." },
- { "ifOutUcastPkts",
- "The number of valid unicast frames transmitted." },
- { "ifOutMulticastPkts",
- "The number of valid multicast frames transmitted, excluding pause "
- "frames." },
- { "ifOutBroadcastPkts",
- "The number of valid broadcast frames transmitted." },
- { "etherStatsDropEvents",
- "The number of frames that are dropped due to MAC internal errors "
- "when FIFO buffer overflow persists." },
- { "etherStatsOctets",
- "The lower 32 bits of the total number of octets received. This "
- "count includes both good and errored frames." },
- { "etherStatsPkts",
- "The total number of good and errored frames received." },
- { "etherStatsUndersizePkts",
- "The number of frames received with length less than 64 bytes. "
- "This count does not include errored frames." },
- { "etherStatsOversizePkts",
- "The number of frames received that are longer than the value "
- "configured in the frm_length register. This count does not "
- "include errored frames." },
- { "etherStatsPkts64Octets",
- "The number of 64-byte frames received. This count includes good "
- "and errored frames." },
- { "etherStatsPkts65to127Octets",
- "The number of received good and errored frames between the length "
- "of 65 and 127 bytes." },
- { "etherStatsPkts128to255Octets",
- "The number of received good and errored frames between the length "
- "of 128 and 255 bytes." },
- { "etherStatsPkts256to511Octets",
- "The number of received good and errored frames between the length "
- "of 256 and 511 bytes." },
- { "etherStatsPkts512to1023Octets",
- "The number of received good and errored frames between the length "
- "of 512 and 1023 bytes." },
- { "etherStatsPkts1024to1518Octets",
- "The number of received good and errored frames between the length "
- "of 1024 and 1518 bytes." },
- { "etherStatsPkts1519toXOctets",
- "The number of received good and errored frames between the length "
- "of 1519 and the maximum frame length configured in the frm_length "
- "register." },
- { "etherStatsJabbers",
- "Too long frames with CRC error." },
- { "etherStatsFragments",
- "Too short frames with CRC error." },
- /* 0x39 unused, 0x3a/b non-stats. */
- [0x3c] =
- /* Extended Statistics Counters */
- { "msb_aOctetsTransmittedOK",
- "Upper 32 bits of the number of data and padding octets that are "
- "successfully transmitted." },
- { "msb_aOctetsReceivedOK",
- "Upper 32 bits of the number of data and padding octets that are "
- "successfully received." },
- { "msb_etherStatsOctets",
- "Upper 32 bits of the total number of octets received. This count "
- "includes both good and errored frames." }
-};
-
-static int
-sysctl_atse_mac_stats_proc(SYSCTL_HANDLER_ARGS)
-{
- struct atse_softc *sc;
- int error, offset, s;
-
- sc = arg1;
- offset = arg2;
-
- s = CSR_READ_4(sc, offset);
- error = sysctl_handle_int(oidp, &s, 0, req);
- if (error || !req->newptr) {
- return (error);
- }
-
- return (0);
-}
-
-static struct atse_rx_err_stats_regs {
- const char *name;
- const char *descr;
-} atse_rx_err_stats_regs[] = {
-#define ATSE_RX_ERR_FIFO_THRES_EOP 0 /* FIFO threshold reached, on EOP. */
-#define ATSE_RX_ERR_ELEN 1 /* Frame/payload length not valid. */
-#define ATSE_RX_ERR_CRC32 2 /* CRC-32 error. */
-#define ATSE_RX_ERR_FIFO_THRES_TRUNC 3 /* FIFO thresh., truncated frame. */
-#define ATSE_RX_ERR_4 4 /* ? */
-#define ATSE_RX_ERR_5 5 /* / */
-
- { "rx_err_fifo_thres_eop",
- "FIFO threshold reached, reported on EOP." },
- { "rx_err_fifo_elen",
- "Frame or payload length not valid." },
- { "rx_err_fifo_crc32",
- "CRC-32 error." },
- { "rx_err_fifo_thres_trunc",
- "FIFO threshold reached, truncated frame" },
- { "rx_err_4",
- "?" },
- { "rx_err_5",
- "?" },
-};
-
-static int
-sysctl_atse_rx_err_stats_proc(SYSCTL_HANDLER_ARGS)
-{
- struct atse_softc *sc;
- int error, offset, s;
-
- sc = arg1;
- offset = arg2;
-
- s = sc->atse_rx_err[offset];
- error = sysctl_handle_int(oidp, &s, 0, req);
- if (error || !req->newptr) {
- return (error);
- }
-
- return (0);
-}
-
-static void
-atse_sysctl_stats_attach(device_t dev)
-{
- struct sysctl_ctx_list *sctx;
- struct sysctl_oid *soid;
- struct atse_softc *sc;
- int i;
-
- sc = device_get_softc(dev);
- sctx = device_get_sysctl_ctx(dev);
- soid = device_get_sysctl_tree(dev);
-
- /* MAC statistics. */
- for (i = 0; i < nitems(atse_mac_stats_regs); i++) {
- if (atse_mac_stats_regs[i].name == NULL ||
- atse_mac_stats_regs[i].descr == NULL) {
- continue;
- }
-
- SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
- atse_mac_stats_regs[i].name,
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
- sc, i, sysctl_atse_mac_stats_proc, "IU",
- atse_mac_stats_regs[i].descr);
- }
-
- /* rx_err[]. */
- for (i = 0; i < ATSE_RX_ERR_MAX; i++) {
- if (atse_rx_err_stats_regs[i].name == NULL ||
- atse_rx_err_stats_regs[i].descr == NULL) {
- continue;
- }
-
- SYSCTL_ADD_PROC(sctx, SYSCTL_CHILDREN(soid), OID_AUTO,
- atse_rx_err_stats_regs[i].name,
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
- sc, i, sysctl_atse_rx_err_stats_proc, "IU",
- atse_rx_err_stats_regs[i].descr);
- }
-}
-
-/*
- * Generic device handling routines.
- */
-int
-atse_attach(device_t dev)
-{
- struct atse_softc *sc;
- if_t ifp;
- uint32_t caps;
- int error;
-
- sc = device_get_softc(dev);
- sc->dev = dev;
-
- /* Get xDMA controller */
- sc->xdma_tx = xdma_ofw_get(sc->dev, "tx");
- if (sc->xdma_tx == NULL) {
- device_printf(dev, "Can't find DMA controller.\n");
- return (ENXIO);
- }
-
- /*
- * Only final (EOP) write can be less than "symbols per beat" value
- * so we have to defrag mbuf chain.
- * Chapter 15. On-Chip FIFO Memory Core.
- * Embedded Peripherals IP User Guide.
- */
- caps = XCHAN_CAP_NOSEG;
-
- /* Alloc xDMA virtual channel. */
- sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, caps);
- if (sc->xchan_tx == NULL) {
- device_printf(dev, "Can't alloc virtual DMA channel.\n");
- return (ENXIO);
- }
-
- /* Setup interrupt handler. */
- error = xdma_setup_intr(sc->xchan_tx, 0,
- atse_xdma_tx_intr, sc, &sc->ih_tx);
- if (error) {
- device_printf(sc->dev,
- "Can't setup xDMA interrupt handler.\n");
- return (ENXIO);
- }
-
- xdma_prep_sg(sc->xchan_tx,
- TX_QUEUE_SIZE, /* xchan requests queue size */
- MCLBYTES, /* maxsegsize */
- 8, /* maxnsegs */
- 16, /* alignment */
- 0, /* boundary */
- BUS_SPACE_MAXADDR_32BIT,
- BUS_SPACE_MAXADDR);
-
- /* Get RX xDMA controller */
- sc->xdma_rx = xdma_ofw_get(sc->dev, "rx");
- if (sc->xdma_rx == NULL) {
- device_printf(dev, "Can't find DMA controller.\n");
- return (ENXIO);
- }
-
- /* Alloc xDMA virtual channel. */
- sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, caps);
- if (sc->xchan_rx == NULL) {
- device_printf(dev, "Can't alloc virtual DMA channel.\n");
- return (ENXIO);
- }
-
- /* Setup interrupt handler. */
- error = xdma_setup_intr(sc->xchan_rx, XDMA_INTR_NET,
- atse_xdma_rx_intr, sc, &sc->ih_rx);
- if (error) {
- device_printf(sc->dev,
- "Can't setup xDMA interrupt handler.\n");
- return (ENXIO);
- }
-
- xdma_prep_sg(sc->xchan_rx,
- RX_QUEUE_SIZE, /* xchan requests queue size */
- MCLBYTES, /* maxsegsize */
- 1, /* maxnsegs */
- 16, /* alignment */
- 0, /* boundary */
- BUS_SPACE_MAXADDR_32BIT,
- BUS_SPACE_MAXADDR);
-
- mtx_init(&sc->br_mtx, "buf ring mtx", NULL, MTX_DEF);
- sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF,
- M_NOWAIT, &sc->br_mtx);
- if (sc->br == NULL) {
- return (ENOMEM);
- }
-
- atse_ethernet_option_bits_read(dev);
-
- mtx_init(&sc->atse_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
- MTX_DEF);
-
- callout_init_mtx(&sc->atse_tick, &sc->atse_mtx, 0);
-
- /*
- * We are only doing single-PHY with this driver currently. The
- * defaults would be right so that BASE_CFG_MDIO_ADDR0 points to the
- * 1st PHY address (0) apart from the fact that BMCR0 is always
- * the PCS mapping, so we always use BMCR1. See Table 5-1 0xA0-0xBF.
- */
-#if 0 /* Always PCS. */
- sc->atse_bmcr0 = MDIO_0_START;
- CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR0, 0x00);
-#endif
- /* Always use matching PHY for atse[0..]. */
- sc->atse_phy_addr = device_get_unit(dev);
- sc->atse_bmcr1 = MDIO_1_START;
- CSR_WRITE_4(sc, BASE_CFG_MDIO_ADDR1, sc->atse_phy_addr);
-
- /* Reset the adapter. */
- atse_reset(sc);
-
- /* Setup interface. */
- ifp = sc->atse_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "if_alloc() failed\n");
- error = ENOSPC;
- goto err;
- }
- if_setsoftc(ifp, sc);
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
- if_setioctlfn(ifp, atse_ioctl);
- if_settransmitfn(ifp, atse_transmit);
- if_setqflushfn(ifp, atse_qflush);
- if_setinitfn(ifp, atse_init);
- if_setsendqlen(ifp, ATSE_TX_LIST_CNT - 1);
- if_setsendqready(ifp);
-
- /* MII setup. */
- error = mii_attach(dev, &sc->atse_miibus, ifp, atse_ifmedia_upd,
- atse_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0);
- if (error != 0) {
- device_printf(dev, "attaching PHY failed: %d\n", error);
- goto err;
- }
-
- /* Call media-indepedent attach routine. */
- ether_ifattach(ifp, sc->atse_eth_addr);
-
- /* Tell the upper layer(s) about vlan mtu support. */
- if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
- if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
- if_setcapenable(ifp, if_getcapabilities(ifp));
-
-err:
- if (error != 0) {
- atse_detach(dev);
- }
-
- if (error == 0) {
- atse_sysctl_stats_attach(dev);
- }
-
- atse_rx_enqueue(sc, NUM_RX_MBUF);
- xdma_queue_submit(sc->xchan_rx);
-
- return (error);
-}
-
-static int
-atse_detach(device_t dev)
-{
- struct atse_softc *sc;
- if_t ifp;
-
- sc = device_get_softc(dev);
- KASSERT(mtx_initialized(&sc->atse_mtx), ("%s: mutex not initialized",
- device_get_nameunit(dev)));
- ifp = sc->atse_ifp;
-
- /* Only cleanup if attach succeeded. */
- if (device_is_attached(dev)) {
- ATSE_LOCK(sc);
- atse_stop_locked(sc);
- ATSE_UNLOCK(sc);
- callout_drain(&sc->atse_tick);
- ether_ifdetach(ifp);
- }
- if (sc->atse_miibus != NULL) {
- device_delete_child(dev, sc->atse_miibus);
- }
-
- if (ifp != NULL) {
- if_free(ifp);
- }
-
- mtx_destroy(&sc->atse_mtx);
-
- xdma_channel_free(sc->xchan_tx);
- xdma_channel_free(sc->xchan_rx);
- xdma_put(sc->xdma_tx);
- xdma_put(sc->xdma_rx);
-
- return (0);
-}
-
-/* Shared between nexus and fdt implementation. */
-void
-atse_detach_resources(device_t dev)
-{
- struct atse_softc *sc;
-
- sc = device_get_softc(dev);
-
- if (sc->atse_mem_res != NULL) {
- bus_release_resource(dev, SYS_RES_MEMORY, sc->atse_mem_rid,
- sc->atse_mem_res);
- sc->atse_mem_res = NULL;
- }
-}
-
-int
-atse_detach_dev(device_t dev)
-{
- int error;
-
- error = atse_detach(dev);
- if (error) {
- /* We are basically in undefined state now. */
- device_printf(dev, "atse_detach() failed: %d\n", error);
- return (error);
- }
-
- atse_detach_resources(dev);
-
- return (0);
-}
-
-int
-atse_miibus_readreg(device_t dev, int phy, int reg)
-{
- struct atse_softc *sc;
- int val;
-
- sc = device_get_softc(dev);
-
- /*
- * We currently do not support re-mapping of MDIO space on-the-fly
- * but de-facto hard-code the phy#.
- */
- if (phy != sc->atse_phy_addr) {
- return (0);
- }
-
- val = PHY_READ_2(sc, reg);
-
- return (val);
-}
-
-int
-atse_miibus_writereg(device_t dev, int phy, int reg, int data)
-{
- struct atse_softc *sc;
-
- sc = device_get_softc(dev);
-
- /*
- * We currently do not support re-mapping of MDIO space on-the-fly
- * but de-facto hard-code the phy#.
- */
- if (phy != sc->atse_phy_addr) {
- return (0);
- }
-
- PHY_WRITE_2(sc, reg, data);
- return (0);
-}
-
-void
-atse_miibus_statchg(device_t dev)
-{
- struct atse_softc *sc;
- struct mii_data *mii;
- if_t ifp;
- uint32_t val4;
-
- sc = device_get_softc(dev);
- ATSE_LOCK_ASSERT(sc);
-
- mii = device_get_softc(sc->atse_miibus);
- ifp = sc->atse_ifp;
- if (mii == NULL || ifp == NULL ||
- (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
- return;
- }
-
- val4 = CSR_READ_4(sc, BASE_CFG_COMMAND_CONFIG);
-
- /* Assume no link. */
- sc->atse_flags &= ~ATSE_FLAGS_LINK;
-
- if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) ==
- (IFM_ACTIVE | IFM_AVALID)) {
- switch (IFM_SUBTYPE(mii->mii_media_active)) {
- case IFM_10_T:
- val4 |= BASE_CFG_COMMAND_CONFIG_ENA_10;
- val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
- sc->atse_flags |= ATSE_FLAGS_LINK;
- break;
- case IFM_100_TX:
- val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
- val4 &= ~BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
- sc->atse_flags |= ATSE_FLAGS_LINK;
- break;
- case IFM_1000_T:
- val4 &= ~BASE_CFG_COMMAND_CONFIG_ENA_10;
- val4 |= BASE_CFG_COMMAND_CONFIG_ETH_SPEED;
- sc->atse_flags |= ATSE_FLAGS_LINK;
- break;
- default:
- break;
- }
- }
-
- if ((sc->atse_flags & ATSE_FLAGS_LINK) == 0) {
- /* Need to stop the MAC? */
- return;
- }
-
- if (IFM_OPTIONS(mii->mii_media_active & IFM_FDX) != 0) {
- val4 &= ~BASE_CFG_COMMAND_CONFIG_HD_ENA;
- } else {
- val4 |= BASE_CFG_COMMAND_CONFIG_HD_ENA;
- }
-
- /* flow control? */
-
- /* Make sure the MAC is activated. */
- val4 |= BASE_CFG_COMMAND_CONFIG_TX_ENA;
- val4 |= BASE_CFG_COMMAND_CONFIG_RX_ENA;
-
- CSR_WRITE_4(sc, BASE_CFG_COMMAND_CONFIG, val4);
-}
-
-MODULE_DEPEND(atse, ether, 1, 1, 1);
-MODULE_DEPEND(atse, miibus, 1, 1, 1);
diff --git a/sys/dev/altera/atse/if_atse_fdt.c b/sys/dev/altera/atse/if_atse_fdt.c
deleted file mode 100644
index 77fa930ee945..000000000000
--- a/sys/dev/altera/atse/if_atse_fdt.c
+++ /dev/null
@@ -1,144 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2013 Bjoern A. Zeeb
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-11-C-0249)
- * ("MRC2"), as part of the DARPA MRC research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/bus.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/rman.h>
-#include <sys/socket.h>
-#include <sys/systm.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <net/ethernet.h>
-#include <net/if.h>
-#include <net/if_media.h>
-#include <net/if_var.h>
-
-#include <dev/mii/mii.h>
-#include <dev/mii/miivar.h>
-
-#include <dev/fdt/fdt_common.h>
-#include <dev/ofw/openfirm.h>
-#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
-
-#include <dev/altera/atse/if_atsereg.h>
-
-/* "device miibus" required. See GENERIC if you get errors here. */
-#include "miibus_if.h"
-
-static int
-atse_probe_fdt(device_t dev)
-{
-
- if (!ofw_bus_status_okay(dev))
- return (ENXIO);
-
- if (!ofw_bus_is_compatible(dev, "altera,atse")) {
- return (ENXIO);
- }
-
- device_set_desc(dev, "Altera Triple-Speed Ethernet MegaCore");
-
- return (BUS_PROBE_DEFAULT);
-}
-
-static int
-atse_attach_fdt(device_t dev)
-{
- struct atse_softc *sc;
- int error;
-
- sc = device_get_softc(dev);
- sc->atse_dev = dev;
- sc->atse_unit = device_get_unit(dev);
-
- /*
- * FDT has the list of our resources. Given we are using multiple
- * memory regions and possibly multiple interrupts, we need to attach
- * them in the order specified in .dts:
- * MAC, RX and RXC FIFO, TX and TXC FIFO; RX INTR, TX INTR.
- */
-
- /* MAC: Avalon-MM, atse management register region. */
- sc->atse_mem_rid = 0;
- sc->atse_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &sc->atse_mem_rid, RF_ACTIVE);
- if (sc->atse_mem_res == NULL) {
- device_printf(dev, "failed to map memory for ctrl region\n");
- /* Cleanup. */
- atse_detach_resources(dev);
-
- return (ENXIO);
- }
- if (bootverbose)
- device_printf(sc->atse_dev, "MAC ctrl region at mem %p-%p\n",
- (void *)rman_get_start(sc->atse_mem_res),
- (void *)(rman_get_start(sc->atse_mem_res) +
- rman_get_size(sc->atse_mem_res)));
-
- error = atse_attach(dev);
- if (error) {
- /* Cleanup. */
- atse_detach_resources(dev);
-
- return (error);
- }
-
- return (0);
-}
-
-static device_method_t atse_methods_fdt[] = {
- /* Device interface */
- DEVMETHOD(device_probe, atse_probe_fdt),
- DEVMETHOD(device_attach, atse_attach_fdt),
- DEVMETHOD(device_detach, atse_detach_dev),
-
- /* MII interface */
- DEVMETHOD(miibus_readreg, atse_miibus_readreg),
- DEVMETHOD(miibus_writereg, atse_miibus_writereg),
- DEVMETHOD(miibus_statchg, atse_miibus_statchg),
-
- DEVMETHOD_END
-};
-
-static driver_t atse_driver_fdt = {
- "atse",
- atse_methods_fdt,
- sizeof(struct atse_softc)
-};
-
-DRIVER_MODULE(atse, simplebus, atse_driver_fdt, 0, 0);
-DRIVER_MODULE(miibus, atse, miibus_driver, 0, 0);
diff --git a/sys/dev/altera/atse/if_atse_nexus.c b/sys/dev/altera/atse/if_atse_nexus.c
deleted file mode 100644
index 22a66dd305d4..000000000000
--- a/sys/dev/altera/atse/if_atse_nexus.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012,2013 Bjoern A. Zeeb
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-11-C-0249)
- * ("MRC2"), as part of the DARPA MRC research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/cdefs.h>
-#include "opt_device_polling.h"
-
-#include <sys/param.h>
-#include <sys/kernel.h>
-#include <sys/bus.h>
-#include <sys/module.h>
-#include <sys/rman.h>
-#include <sys/socket.h>
-#include <sys/types.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <net/ethernet.h>
-#include <net/if.h>
-#include <net/if_media.h>
-#include <net/if_var.h>
-
-#include <dev/mii/mii.h>
-#include <dev/mii/miivar.h>
-
-#include <dev/altera/atse/if_atsereg.h>
-
-/* "device miibus" required. See GENERIC if you get errors here. */
-#include "miibus_if.h"
-
-MODULE_DEPEND(atse, ether, 1, 1, 1);
-MODULE_DEPEND(atse, miibus, 1, 1, 1);
-
-/*
- * Device routines for interacting with nexus (probe, attach, detach) & helpers.
- * XXX We should add suspend/resume later.
- */
-static int __unused
-atse_resource_int(device_t dev, const char *resname, int *v)
-{
- int error;
-
- error = resource_int_value(device_get_name(dev), device_get_unit(dev),
- resname, v);
- if (error != 0) {
- /* If it does not exist, we fail, so not ingoring ENOENT. */
- device_printf(dev, "could not fetch '%s' hint\n", resname);
- return (error);
- }
-
- return (0);
-}
-
-static int __unused
-atse_resource_long(device_t dev, const char *resname, long *v)
-{
- int error;
-
- error = resource_long_value(device_get_name(dev), device_get_unit(dev),
- resname, v);
- if (error != 0) {
- /* If it does not exist, we fail, so not ingoring ENOENT. */
- device_printf(dev, "could not fetch '%s' hint\n", resname);
- return (error);
- }
-
- return (0);
-}
-
-static int
-atse_probe_nexus(device_t dev)
-{
-
- device_set_desc(dev, "Altera Triple-Speed Ethernet MegaCore");
-
- return (BUS_PROBE_NOWILDCARD);
-}
-
-static int
-atse_attach_nexus(device_t dev)
-{
- struct atse_softc *sc;
- int error;
-
- sc = device_get_softc(dev);
- sc->atse_dev = dev;
- sc->atse_unit = device_get_unit(dev);
-
- /* Avalon-MM, atse management register region. */
- sc->atse_mem_rid = 0;
- sc->atse_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &sc->atse_mem_rid, RF_ACTIVE);
- if (sc->atse_mem_res == NULL) {
- device_printf(dev, "failed to map memory for ctrl region\n");
- return (ENXIO);
- }
-
- error = atse_attach(dev);
- if (error) {
- /* Cleanup. */
- atse_detach_resources(dev);
- return (error);
- }
-
- return (0);
-}
-
-static device_method_t atse_methods_nexus[] = {
- /* Device interface */
- DEVMETHOD(device_probe, atse_probe_nexus),
- DEVMETHOD(device_attach, atse_attach_nexus),
- DEVMETHOD(device_detach, atse_detach_dev),
-
- /* MII interface */
- DEVMETHOD(miibus_readreg, atse_miibus_readreg),
- DEVMETHOD(miibus_writereg, atse_miibus_writereg),
- DEVMETHOD(miibus_statchg, atse_miibus_statchg),
-
- DEVMETHOD_END
-};
-
-static driver_t atse_driver_nexus = {
- "atse",
- atse_methods_nexus,
- sizeof(struct atse_softc)
-};
-
-DRIVER_MODULE(atse, nexus, atse_driver_nexus, 0, 0);
-DRIVER_MODULE(miibus, atse, miibus_driver, 0, 0);
diff --git a/sys/dev/altera/atse/if_atsereg.h b/sys/dev/altera/atse/if_atsereg.h
deleted file mode 100644
index 2f7643f27094..000000000000
--- a/sys/dev/altera/atse/if_atsereg.h
+++ /dev/null
@@ -1,464 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012 Bjoern A. Zeeb
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-11-C-0249)
- * ("MRC2"), as part of the DARPA MRC research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#ifndef _DEV_IF_ATSEREG_H
-#define _DEV_IF_ATSEREG_H
-
-#include <dev/xdma/xdma.h>
-
-#define ATSE_VENDOR 0x6af7
-#define ATSE_DEVICE 0x00bd
-
-/* See hints file/fdt for ctrl port and Avalon FIFO addresses. */
-
-/* Section 3. Parameter Settings. */
-/*
- * This is a lot of options that affect the way things are synthesized.
- * We cannot really make them all hints and most of them might be stale.
- */
-
-/* 3-1 Core Configuration */
-#if 0
-static const char *atse_core_core_variation[] = {
- [0] = "10/100/1000 Mbps Ethernet MAC only",
- [1] = "10/100/1000 Mbps Ethernet MAC with 1000BASE-X/SGMII PCS",
- [2] = "1000BASE-X/SGMII PCS only",
- [3] = "1000 Mbps Small MAC",
- [4] = "10/100 Mbps Small MAC",
- NULL
-};
-static const char *atse_core_interface[] = {
- [0] = "MII", /* Core variation 4. */
- [1] = "GMII", /* Core variation 3. */
- [2] = "RGMII", /* Core variation 0,1,3. */
- [3] = "MII/GMII", /* Core variation 0,1. */
- NULL
-};
-#endif
-#define CORE_CORE_VARIATION 1 /* atse_core_core_variation[] */
-#define CORE_INTERFACE 3 /* atse_core_interface[] */
-#define CORE_USE_INTERNAL_FIFO 1
-#define CORE_NUMBER_OF_PORTS 1 /* Internal FIFO count. */
-#define CORE_USE_TRANSCEIVER_BLOCK 1 /* SGMII PCS transceiver:
- * LVDS I/O. */
-
-/* 3-2 MAC Options. */
-/* Ethernet MAC Options. */
-#define MAC_ENABLE_10_100_HDX_SUPPORT 0
-#define MAC_ENABLE_RG_G_MII_LOOPBACK 0
-#define MAC_ENABLE_SUPL_MAC_UCAST_ADDR 0 /* Supplementary MAC unicast. */
-#define MAC_INCLUDE_STATISTICS_COUNTERS 0
-#define MAC_STATISTICS_COUNTERS_64BIT 0
-#define MAC_INCLUDE_MC_HASHTABLE 0 /* Multicast. */
-#define MAC_ALIGN_PKTHDR_32BIT 1
-#define MAC_ENABLE_FDX_FLOW_CTRL 0
-#define MAC_ENABLE_VLAN_DETECTION 0 /* VLAN and stacked VLANs. */
-#define MAC_ENABLE_MAGIC_PKT_DETECTION 0
-/* MDIO Module. */
-#define MAC_MDIO_INCLUDE_MDIO_MODULE 1
-#define MAC_MDIO_HOST_CLOCK_DIVISOR 40 /* Not just On/Off. */
-
-/* 3-4 FIFO Options. */
-/* Width and Memory Type. */
-#if 0
-static char *fifo_memory_block[] = {
- [0] = "M4K",
- [1] = "M9K",
- [2] = "M144K",
- [3] = "MRAM",
- [4] = "AUTO",
- NULL
-};
-#endif
-#define FIFO_MEMORY_BLOCK 4
-#define FIFO_WITDH 32 /* Other: 8 bits. */
-/* Depth. */
-#define FIFO_DEPTH_TX 2048 /* 64 .. 64k, 2048x32bits. */
-#define FIFO_DEPTH_RX 2048 /* 64 .. 64k, 2048x32bits. */
-
-#define ATSE_TX_LIST_CNT 5 /* Certainly not bufferbloat. */
-
-/* 3-4 PCS/Transceiver Options */
-/* PCS Options. */
-#define PCS_TXRX_PHY_ID 0x00000000 /* 32 bits */
-#define PCS_TXRX_ENABLE_SGMII_BRIDGE 0
-/* Transceiver Options. */
-#define PCS_TXRX_EXP_POWER_DOWN_SIGNAL 0 /* Export power down signal. */
-#define PCS_TXRX_ENABLE_DYNAMIC_RECONF 0 /* Dynamic trans. reconfig. */
-#define PCS_TXRX_STARTING_CHANNEL 0 /* 0..284. */
-
-/* -------------------------------------------------------------------------- */
-
-/* XXX more values based on the bitmaps provided. Cleanup. */
-/* See regs above. */
-#define AVALON_FIFO_TX_BLOCK_DIAGRAM 0
-#define AVALON_FIFO_TX_BLOCK_DIAGRAM_SHOW_SIGANLS 0
-#define AVALON_FIFO_TX_PARAM_SINGLE_RESET_MODE 0
-#define AVALON_FIFO_TX_BASIC_OPTS_DEPTH 16
-#define AVALON_FIFO_TX_BASIC_OPTS_ALLOW_BACKPRESSURE 1
-#define AVALON_FIFO_TX_BASIC_OPTS_CLOCK_SETTING "Single Clock Mode"
-#define AVALON_FIFO_TX_BASIC_OPTS_FIFO_IMPL "Construct FIFO from embedded memory blocks"
-#define AVALON_FIFO_TX_STATUS_PORT_CREATE_STATUS_INT_FOR_INPUT 1
-#define AVALON_FIFO_TX_STATUS_PORT_CREATE_STATUS_INT_FOR_OUTPUT 0
-#define AVALON_FIFO_TX_STATUS_PORT_ENABLE_IRQ_FOR_STATUS_PORT 1
-#define AVALON_FIFO_TX_INPUT_TYPE "AVALONMM_WRITE"
-#define AVALON_FIFO_TX_OUTPUT_TYPE "AVALONST_SOURCE"
-#define AVALON_FIFO_TX_AVALON_MM_PORT_SETTINGS_DATA_WIDTH ""
-#define AVALON_FIFO_TX_AVALON_ST_PORT_SETTINGS_BITS_PER_SYMBOL 8
-#define AVALON_FIFO_TX_AVALON_ST_PORT_SETTINGS_SYM_PER_BEAT 4
-#define AVALON_FIFO_TX_AVALON_ST_PORT_SETTINGS_ERROR_WIDTH 1
-#define AVALON_FIFO_TX_AVALON_ST_PORT_SETTINGS_CHANNEL_WIDTH 0
-#define AVALON_FIFO_TX_AVALON_ST_PORT_SETTINGS_ENABLE_PACKET_DATA 1
-
-#define AVALON_FIFO_RX_BLOCK_DIAGRAM 0
-#define AVALON_FIFO_RX_BLOCK_DIAGRAM_SHOW_SIGNALS 0
-#define AVALON_FIFO_RX_PARAM_SINGLE_RESET_MODE 0
-#define AVALON_FIFO_RX_BASIC_OPTS_DEPTH 16
-#define AVALON_FIFO_RX_BASIC_OPTS_ALLOW_BACKPRESSURE 1
-#define AVALON_FIFO_RX_BASIC_OPTS_CLOCK_SETTING "Single Clock Mode"
-#define AVALON_FIFO_RX_BASIC_OPTS_FIFO_IMPL "Construct FIFO from embedded memory blocks"
-#define AVALON_FIFO_RX_STATUS_PORT_CREATE_STATUS_INT_FOR_INPUT 1
-#define AVALON_FIFO_RX_STATUS_PORT_CREATE_STATUS_INT_FOR_OUTPUT 0
-#define AVALON_FIFO_RX_STATUS_PORT_ENABLE_IRQ_FOR_STATUS_PORT 1
-#define AVALON_FIFO_RX_INPUT_TYPE "AVALONST_SINK"
-#define AVALON_FIFO_RX_OUTPUT_TYPE "AVALONMM_READ"
-#define AVALON_FIFO_RX_AVALON_MM_PORT_SETTINGS_DATA_WIDTH ""
-#define AVALON_FIFO_RX_AVALON_ST_PORT_SETTINGS_BITS_PER_SYMBOL 8
-#define AVALON_FIFO_RX_AVALON_ST_PORT_SETTINGS_SYM_PER_BEAT 4
-#define AVALON_FIFO_RX_AVALON_ST_PORT_SETTINGS_ERROR_WIDTH 6
-#define AVALON_FIFO_RX_AVALON_ST_PORT_SETTINGS_CHANNEL_WIDTH 0
-#define AVALON_FIFO_RX_AVALON_ST_PORT_SETTINGS_ENABLE_PACKET_DATA 1
-
-/* -------------------------------------------------------------------------- */
-
-/* 5. Configuration Register Space. */
-
-/* 5-1, MAC Configuration Register Space; Dword offsets. */
-/* 0x00 - 0x17, Base Configuration. */
-#define BASE_CONFIG_REV 0x00 /* ro, IP Core ver. */
-#define BASE_CFG_REV_VER_MASK 0x0000FFFF
-#define BASE_CFG_REV_CUST_VERSION__MASK 0xFFFF0000
-
-#define BASE_CFG_SCRATCH 0x01 /* rw, 0 */
-
-#define BASE_CFG_COMMAND_CONFIG 0x02 /* rw, 0 */
-#define BASE_CFG_COMMAND_CONFIG_TX_ENA (1<<0) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_RX_ENA (1<<1) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_XON_GEN (1<<2) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_ETH_SPEED (1<<3) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_PROMIS_EN (1<<4) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_PAD_EN (1<<5) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_CRC_FWD (1<<6) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_PAUSE_FWD (1<<7) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_PAUSE_IGNORE (1<<8) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_TX_ADDR_INS (1<<9) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_HD_ENA (1<<10) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_EXCESS_COL (1<<11) /* ro */
-#define BASE_CFG_COMMAND_CONFIG_LATE_COL (1<<12) /* ro */
-#define BASE_CFG_COMMAND_CONFIG_SW_RESET (1<<13) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_MHASH_SEL (1<<14) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_LOOP_ENA (1<<15) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_TX_ADDR_SEL (1<<16|1<<17|1<<18) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_MAGIC_ENA (1<<19) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_SLEEP (1<<20) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_WAKEUP (1<<21) /* ro */
-#define BASE_CFG_COMMAND_CONFIG_XOFF_GEN (1<<22) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_CNTL_FRM_ENA (1<<23) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_NO_LGTH_CHECK (1<<24) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_ENA_10 (1<<25) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_RX_ERR_DISC (1<<26) /* rw */
-#define BASE_CFG_COMMAND_CONFIG_DISABLE_READ_TIMEOUT (1<<27) /* rw */
- /* 28-30 Reserved. */ /* - */
-#define BASE_CFG_COMMAND_CONFIG_CNT_RESET (1<<31) /* rw */
-
-#define BASE_CFG_MAC_0 0x03 /* rw, 0 */
-#define BASE_CFG_MAC_1 0x04 /* rw, 0 */
-#define BASE_CFG_FRM_LENGTH 0x05 /* rw/ro, 1518 */
-#define BASE_CFG_PAUSE_QUANT 0x06 /* rw, 0 */
-#define BASE_CFG_RX_SECTION_EMPTY 0x07 /* rw/ro, 0 */
-#define BASE_CFG_RX_SECTION_FULL 0x08 /* rw/ro, 0 */
-#define BASE_CFG_TX_SECTION_EMPTY 0x09 /* rw/ro, 0 */
-#define BASE_CFG_TX_SECTION_FULL 0x0A /* rw/ro, 0 */
-#define BASE_CFG_RX_ALMOST_EMPTY 0x0B /* rw/ro, 0 */
-#define BASE_CFG_RX_ALMOST_FULL 0x0C /* rw/ro, 0 */
-#define BASE_CFG_TX_ALMOST_EMPTY 0x0D /* rw/ro, 0 */
-#define BASE_CFG_TX_ALMOST_FULL 0x0E /* rw/ro, 0 */
-#define BASE_CFG_MDIO_ADDR0 0x0F /* rw, 0 */
-#define BASE_CFG_MDIO_ADDR1 0x10 /* rw, 1 */
-#define BASE_CFG_HOLDOFF_QUANT 0x11 /* rw, 0xFFFF */
-/* 0x12-0x16 Reserved. */ /* -, 0 */
-#define BASE_CFG_TX_IPG_LENGTH 0x17 /* rw, 0 */
-
-/* 0x18 - 0x38, Statistics Counters. */
-#define STATS_A_MAC_ID_0 0x18 /* ro */
-#define STATS_A_MAC_ID_1 0x19 /* ro */
-#define STATS_A_FRAMES_TX_OK 0x1A /* ro */
-#define STATS_A_FRAMES_RX_OK 0x1B /* ro */
-#define STATS_A_FCS_ERRORS 0x1C /* ro */
-#define STATS_A_ALIGNMENT_ERRORS 0x1D /* ro */
-#define STATS_A_OCTETS_TX_OK 0x1E /* ro */
-#define STATS_A_OCTETS_RX_OK 0x1F /* ro */
-#define STATS_A_TX_PAUSE_MAX_CTRL_FRAME 0x20 /* ro */
-#define STATS_A_RX_PAUSE_MAX_CTRL_FRAME 0x21 /* ro */
-#define STATS_IF_IN_ERRORS 0x22 /* ro */
-#define STATS_IF_OUT_ERRORS 0x23 /* ro */
-#define STATS_IF_IN_UCAST_PKTS 0x24 /* ro */
-#define STATS_IF_IN_MULTICAST_PKTS 0x25 /* ro */
-#define STATS_IF_IN_BROADCAST_PKTS 0x26 /* ro */
-#define STATS_IF_OUT_DISCARDS 0x27 /* ro */
-#define STATS_IF_OUT_UCAST_PKTS 0x28 /* ro */
-#define STATS_IF_OUT_MULTICAST_PKTS 0x29 /* ro */
-#define STATS_IF_OUT_BROADCAST_PKTS 0x2A /* ro */
-#define STATS_ETHER_STATS_DROP_EVENT 0x2B /* ro */
-#define STATS_ETHER_STATS_OCTETS 0x2C /* ro */
-#define STATS_ETHER_STATS_PKTS 0x2D /* ro */
-#define STATS_ETHER_STATS_USIZE_PKTS 0x2E /* ro */
-#define STATS_ETHER_STATS_OSIZE_PKTS 0x2F /* ro */
-#define STATS_ETHER_STATS_PKTS_64_OCTETS 0x30 /* ro */
-#define STATS_ETHER_STATS_PKTS_65_TO_127_OCTETS 0x31 /* ro */
-#define STATS_ETHER_STATS_PKTS_128_TO_255_OCTETS 0x32 /* ro */
-#define STATS_ETHER_STATS_PKTS_256_TO_511_OCTETS 0x33 /* ro */
-#define STATS_ETHER_STATS_PKTS_512_TO_1023_OCTETS 0x34 /* ro */
-#define STATS_ETHER_STATS_PKTS_1024_TO_1518_OCTETS 0x35 /* ro */
-#define STATS_ETHER_STATS_PKTS_1519_TO_X_OCTETS 0x36 /* ro */
-#define STATS_ETHER_STATS_JABBERS 0x37 /* ro */
-#define STATS_ETHER_STATS_FRAGMENTS 0x38 /* ro */
- /* 0x39, Reserved. */ /* - */
-
-/* 0x3A, Transmit Command. */
-#define TX_CMD_STAT 0x3A /* rw */
-#define TX_CMD_STAT_OMIT_CRC (1<<17)
-#define TX_CMD_STAT_TX_SHIFT16 (1<<18)
-
-/* 0x3B, Receive Command. */
-#define RX_CMD_STAT 0x3B /* rw */
-#define RX_CMD_STAT_RX_SHIFT16 (1<<25)
-
-/* 0x3C - 0x3E, Extended Statistics Counters. */
-#define ESTATS_MSB_A_OCTETS_TX_OK 0x3C /* ro */
-#define ESTATS_MSB_A_OCTETS_RX_OK 0x3D /* ro */
-#define ESTATS_MSB_ETHER_STATS_OCTETS 0x3E /* ro */
-
-/* 0x3F, Reserved. */
-
-/* 0x40 - 0x7F, Multicast Hash Table. */
-#define MHASH_START 0x40
-#define MHASH_LEN 0x3F
-
-/* 0x80 - 0x9F, MDIO Space 0 or PCS Function Configuration. */
-#define MDIO_0_START 0x80
-
-/* The following are offsets to the first PCS register at 0x80. */
-/* See sys/dev/mii/mii.h. */
-#define PCS_CONTROL 0x00 /* rw */
- /* Bits 0:4, Reserved. */ /* - */
-#define PCS_CONTROL_UNIDIRECTIONAL_ENABLE (1<<5) /* rw */
-#define PCS_CONTROL_SPEED_SELECTION (1<<6|1<<13) /* ro */
-#define PCS_CONTROL_COLLISION_TEST (1<<7) /* ro */
-#define PCS_CONTROL_DUPLEX_MODE (1<<8) /* ro */
-#define PCS_CONTROL_RESTART_AUTO_NEGOTIATION (1<<9) /* rw */
-#define PCS_CONTROL_ISOLATE (1<<10) /* rw */
-#define PCS_CONTROL_POWERDOWN (1<<11) /* rw */
-#define PCS_CONTROL_AUTO_NEGOTIATION_ENABLE (1<<12) /* rw */
- /* See bit 6 above. */ /* ro */
-#define PCS_CONTROL_LOOPBACK (1<<14) /* rw */
-#define PCS_CONTROL_RESET (1<<15) /* rw */
-
-#define PCS_STATUS 0x01 /* ro */
-#define PCS_STATUS_EXTENDED_CAPABILITY (1<<0) /* ro */
-#define PCS_STATUS_JABBER_DETECT (1<<1) /* -, 0 */
-#define PCS_STATUS_LINK_STATUS (1<<2) /* ro */
-#define PCS_STATUS_AUTO_NEGOTIATION_ABILITY (1<<3) /* ro */
-#define PCS_STATUS_REMOTE_FAULT (1<<4) /* -, 0 */
-#define PCS_STATUS_AUTO_NEGOTIATION_COMPLETE (1<<5) /* ro */
-#define PCS_STATUS_MF_PREAMBLE_SUPPRESSION (1<<6) /* -, 0 */
-#define PCS_STATUS_UNIDIRECTIONAL_ABILITY (1<<7) /* ro */
-#define PCS_STATUS_EXTENDED_STATUS (1<<8) /* -, 0 */
-#define PCS_STATUS_100BASET2_HALF_DUPLEX (1<<9) /* ro */
-#define PCS_STATUS_100BASET2_FULL_DUPLEX (1<<10) /* ro */
-#define PCS_STATUS_10MBPS_HALF_DUPLEX (1<<11) /* ro */
-#define PCS_STATUS_10MBPS_FULL_DUPLEX (1<<12) /* ro */
-#define PCS_STATUS_100BASE_X_HALF_DUPLEX (1<<13) /* ro */
-#define PCS_STATUS_100BASE_X_FULL_DUPLEX (1<<14) /* ro */
-#define PCS_STATUS_100BASE_T4 (1<<15) /* ro */
-
-#define PCS_PHY_IDENTIFIER_0 0x02 /* ro */
-#define PCS_PHY_IDENTIFIER_1 0x03 /* ro */
-
-#define PCS_DEV_ABILITY 0x04 /* rw */
- /* 1000BASE-X */
- /* Bits 0:4, Reserved. */ /* - */
-#define PCS_DEV_ABILITY_1000BASE_X_FD (1<<5) /* rw */
-#define PCS_DEV_ABILITY_1000BASE_X_HD (1<<6) /* rw */
-#define PCS_DEV_ABILITY_1000BASE_X_PS1 (1<<7) /* rw */
-#define PCS_DEV_ABILITY_1000BASE_X_PS2 (1<<8) /* rw */
- /* Bits 9:11, Reserved. */ /* - */
-#define PCS_DEV_ABILITY_1000BASE_X_RF1 (1<<12) /* rw */
-#define PCS_DEV_ABILITY_1000BASE_X_RF2 (1<<13) /* rw */
-#define PCS_DEV_ABILITY_1000BASE_X_ACK (1<<14) /* rw */
-#define PCS_DEV_ABILITY_1000BASE_X_NP (1<<15) /* rw */
-
-#define PCS_PARTNER_ABILITY 0x05 /* ro */
- /* 1000BASE-X */
- /* Bits 0:4, Reserved. */ /* - */
-#define PCS_PARTNER_ABILITY_1000BASE_X_FD (1<<5) /* ro */
-#define PCS_PARTNER_ABILITY_1000BASE_X_HD (1<<6) /* ro */
-#define PCS_PARTNER_ABILITY_1000BASE_X_PS1 (1<<7) /* ro */
-#define PCS_PARTNER_ABILITY_1000BASE_X_PS2 (1<<8) /* ro */
- /* Bits 9:11, Reserved. */ /* - */
-#define PCS_PARTNER_ABILITY_1000BASE_X_RF1 (1<<12) /* ro */
-#define PCS_PARTNER_ABILITY_1000BASE_X_RF2 (1<<13) /* ro */
-#define PCS_PARTNER_ABILITY_1000BASE_X_ACK (1<<14) /* ro */
-#define PCS_PARTNER_ABILITY_1000BASE_X_NP (1<<15) /* ro */
- /* SGMII */
- /* Bits 0:9, Reserved. */ /* - */
-#define PCS_PARTNER_ABILITY_SGMII_COPPER_SPEED0 (1<<10) /* ro */
-#define PCS_PARTNER_ABILITY_SGMII_COPPER_SPEED1 (1<<11) /* ro */
-#define PCS_PARTNER_ABILITY_SGMII_COPPER_DUPLEX_STATUS (1<<12) /* ro */
- /* Bit 13, Reserved. */ /* - */
-#define PCS_PARTNER_ABILITY_SGMII_ACK (1<<14) /* ro */
-#define PCS_PARTNER_ABILITY_SGMII_COPPER_LINK_STATUS (1<<15) /* ro */
-
-#define PCS_AN_EXPANSION 0x06 /* ro */
-#define PCS_AN_EXPANSION_LINK_PARTNER_AUTO_NEGOTIATION_ABLE (1<<0) /* ro */
-#define PCS_AN_EXPANSION_PAGE_RECEIVE (1<<1) /* ro */
-#define PCS_AN_EXPANSION_NEXT_PAGE_ABLE (1<<2) /* -, 0 */
- /* Bits 3:15, Reserved. */ /* - */
-
-#define PCS_DEVICE_NEXT_PAGE 0x07 /* ro */
-#define PCS_PARTNER_NEXT_PAGE 0x08 /* ro */
-#define PCS_MASTER_SLAVE_CNTL 0x09 /* ro */
-#define PCS_MASTER_SLAVE_STAT 0x0A /* ro */
- /* 0x0B - 0x0E, Reserved */ /* - */
-#define PCS_EXTENDED_STATUS 0x0F /* ro */
-/* Specific Extended Registers. */
-#define PCS_EXT_SCRATCH 0x10 /* rw */
-#define PCS_EXT_REV 0x11 /* ro */
-#define PCS_EXT_LINK_TIMER_0 0x12 /* rw */
-#define PCS_EXT_LINK_TIMER_1 0x13 /* rw */
-#define PCS_EXT_IF_MODE 0x14 /* rw */
-#define PCS_EXT_IF_MODE_SGMII_ENA (1<<0) /* rw */
-#define PCS_EXT_IF_MODE_USE_SGMII_AN (1<<1) /* rw */
-#define PCS_EXT_IF_MODE_SGMII_SPEED1 (1<<2) /* rw */
-#define PCS_EXT_IF_MODE_SGMII_SPEED0 (1<<3) /* rw */
-#define PCS_EXT_IF_MODE_SGMII_DUPLEX (1<<4) /* rw */
- /* Bits 5:15, Reserved. */ /* - */
-
-#define PCS_EXT_DISABLE_READ_TIMEOUT 0x15 /* rw */
-#define PCS_EXT_READ_TIMEOUT 0x16 /* r0 */
- /* 0x17-0x1F, Reserved. */
-
-/* 0xA0 - 0xBF, MDIO Space 1. */
-#define MDIO_1_START 0xA0
-#define ATSE_BMCR MDIO_1_START
-
-/* 0xC0 - 0xC7, Supplementary Address. */
-#define SUPPL_ADDR_SMAC_0_0 0xC0 /* rw */
-#define SUPPL_ADDR_SMAC_0_1 0xC1 /* rw */
-#define SUPPL_ADDR_SMAC_1_0 0xC2 /* rw */
-#define SUPPL_ADDR_SMAC_1_1 0xC3 /* rw */
-#define SUPPL_ADDR_SMAC_2_0 0xC4 /* rw */
-#define SUPPL_ADDR_SMAC_2_1 0xC5 /* rw */
-#define SUPPL_ADDR_SMAC_3_0 0xC6 /* rw */
-#define SUPPL_ADDR_SMAC_3_1 0xC7 /* rw */
-
-/* 0xC8 - 0xCF, Reserved; set to zero, ignore on read. */
-/* 0xD7 - 0xFF, Reserved; set to zero, ignore on read. */
-
-/* -------------------------------------------------------------------------- */
-
-/* DE4 Intel Strata Flash Ethernet Option Bits area. */
-/* XXX-BZ this is something a loader will have to handle for us. */
-#define ALTERA_ETHERNET_OPTION_BITS_OFF 0x00008000
-#define ALTERA_ETHERNET_OPTION_BITS_LEN 0x00007fff
-
-/* -------------------------------------------------------------------------- */
-
-struct atse_softc {
- if_t atse_ifp;
- struct resource *atse_mem_res;
- device_t atse_miibus;
- device_t atse_dev;
- int atse_unit;
- int atse_mem_rid;
- int atse_phy_addr;
- int atse_if_flags;
- bus_addr_t atse_bmcr0;
- bus_addr_t atse_bmcr1;
- uint32_t atse_flags;
-#define ATSE_FLAGS_LINK 0x00000001
-#define ATSE_FLAGS_ERROR 0x00000002
-#define ATSE_FLAGS_SOP_SEEN 0x00000004
- uint8_t atse_eth_addr[ETHER_ADDR_LEN];
-#define ATSE_ETH_ADDR_DEF 0x01
-#define ATSE_ETH_ADDR_SUPP1 0x02
-#define ATSE_ETH_ADDR_SUPP2 0x04
-#define ATSE_ETH_ADDR_SUPP3 0x08
-#define ATSE_ETH_ADDR_SUPP4 0x10
-#define ATSE_ETH_ADDR_ALL 0x1f
- int16_t atse_rx_cycles; /* POLLING */
-#define RX_CYCLES_IN_INTR 5
- uint32_t atse_rx_err[6];
-#define ATSE_RX_ERR_FIFO_THRES_EOP 0 /* FIFO threshold reached, on EOP. */
-#define ATSE_RX_ERR_ELEN 1 /* Frame/payload length not valid. */
-#define ATSE_RX_ERR_CRC32 2 /* CRC-32 error. */
-#define ATSE_RX_ERR_FIFO_THRES_TRUNC 3 /* FIFO thresh., truncated frame. */
-#define ATSE_RX_ERR_4 4 /* ? */
-#define ATSE_RX_ERR_5 5 /* / */
-#define ATSE_RX_ERR_MAX 6
- struct callout atse_tick;
- struct mtx atse_mtx;
- device_t dev;
-
- /* xDMA */
- xdma_controller_t *xdma_tx;
- xdma_channel_t *xchan_tx;
- void *ih_tx;
- int txcount;
-
- xdma_controller_t *xdma_rx;
- xdma_channel_t *xchan_rx;
- void *ih_rx;
-
- struct buf_ring *br;
- struct mtx br_mtx;
-};
-
-int atse_attach(device_t);
-int atse_detach_dev(device_t);
-void atse_detach_resources(device_t);
-
-int atse_miibus_readreg(device_t, int, int);
-int atse_miibus_writereg(device_t, int, int, int);
-void atse_miibus_statchg(device_t);
-
-#endif /* _DEV_IF_ATSEREG_H */
diff --git a/sys/dev/altera/avgen/altera_avgen.c b/sys/dev/altera/avgen/altera_avgen.c
deleted file mode 100644
index 846167c649f8..000000000000
--- a/sys/dev/altera/avgen/altera_avgen.c
+++ /dev/null
@@ -1,551 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012-2013, 2016 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/bio.h>
-#include <sys/bus.h>
-#include <sys/condvar.h>
-#include <sys/conf.h>
-#include <sys/kernel.h>
-#include <sys/lock.h>
-#include <sys/malloc.h>
-#include <sys/module.h>
-#include <sys/mutex.h>
-#include <sys/rman.h>
-#include <sys/stat.h>
-#include <sys/systm.h>
-#include <sys/uio.h>
-
-#include <geom/geom_disk.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <vm/vm.h>
-
-#include <dev/altera/avgen/altera_avgen.h>
-
-/*
- * Generic device driver for allowing read(), write(), and mmap() on
- * memory-mapped, Avalon-attached devices. There is no actual dependence on
- * Avalon, so conceivably this should just be soc_dev or similar, since many
- * system-on-chip bus environments would work fine with the same code.
- */
-
-static d_mmap_t altera_avgen_mmap;
-static d_read_t altera_avgen_read;
-static d_write_t altera_avgen_write;
-
-#define ALTERA_AVGEN_DEVNAME "altera_avgen"
-#define ALTERA_AVGEN_DEVNAME_FMT (ALTERA_AVGEN_DEVNAME "%d")
-
-static struct cdevsw avg_cdevsw = {
- .d_version = D_VERSION,
- .d_mmap = altera_avgen_mmap,
- .d_read = altera_avgen_read,
- .d_write = altera_avgen_write,
- .d_name = ALTERA_AVGEN_DEVNAME,
-};
-
-#define ALTERA_AVGEN_SECTORSIZE 512 /* Not configurable at this time. */
-
-static int
-altera_avgen_read(struct cdev *dev, struct uio *uio, int flag)
-{
- struct altera_avgen_softc *sc;
- u_long offset, size;
-#ifdef NOTYET
- uint64_t v8;
-#endif
- uint32_t v4;
- uint16_t v2;
- uint8_t v1;
- u_int width;
- int error;
-
- sc = dev->si_drv1;
- if ((sc->avg_flags & ALTERA_AVALON_FLAG_READ) == 0)
- return (EACCES);
- width = sc->avg_width;
- if (uio->uio_offset < 0 || uio->uio_offset % width != 0 ||
- uio->uio_resid % width != 0)
- return (ENODEV);
- size = rman_get_size(sc->avg_res);
- if ((uio->uio_offset + uio->uio_resid < 0) ||
- (uio->uio_offset + uio->uio_resid > size))
- return (ENODEV);
- while (uio->uio_resid > 0) {
- offset = uio->uio_offset;
- if (offset + width > size)
- return (ENODEV);
- switch (width) {
- case 1:
- v1 = bus_read_1(sc->avg_res, offset);
- error = uiomove(&v1, sizeof(v1), uio);
- break;
-
- case 2:
- v2 = bus_read_2(sc->avg_res, offset);
- error = uiomove(&v2, sizeof(v2), uio);
- break;
-
- case 4:
- v4 = bus_read_4(sc->avg_res, offset);
- error = uiomove(&v4, sizeof(v4), uio);
- break;
-
-#ifdef NOTYET
- case 8:
- v8 = bus_read_8(sc->avg_res, offset);
- error = uiomove(&v8, sizeof(v8), uio);
- break;
-
-#endif
-
- default:
- panic("%s: unexpected widthment %u", __func__, width);
- }
- if (error)
- return (error);
- }
- return (0);
-}
-
-static int
-altera_avgen_write(struct cdev *dev, struct uio *uio, int flag)
-{
- struct altera_avgen_softc *sc;
- u_long offset, size;
-#ifdef NOTYET
- uint64_t v8;
-#endif
- uint32_t v4;
- uint16_t v2;
- uint8_t v1;
- u_int width;
- int error;
-
- sc = dev->si_drv1;
- if ((sc->avg_flags & ALTERA_AVALON_FLAG_WRITE) == 0)
- return (EACCES);
- width = sc->avg_width;
- if (uio->uio_offset < 0 || uio->uio_offset % width != 0 ||
- uio->uio_resid % width != 0)
- return (ENODEV);
- size = rman_get_size(sc->avg_res);
- while (uio->uio_resid > 0) {
- offset = uio->uio_offset;
- if (offset + width > size)
- return (ENODEV);
- switch (width) {
- case 1:
- error = uiomove(&v1, sizeof(v1), uio);
- if (error)
- return (error);
- bus_write_1(sc->avg_res, offset, v1);
- break;
-
- case 2:
- error = uiomove(&v2, sizeof(v2), uio);
- if (error)
- return (error);
- bus_write_2(sc->avg_res, offset, v2);
- break;
-
- case 4:
- error = uiomove(&v4, sizeof(v4), uio);
- if (error)
- return (error);
- bus_write_4(sc->avg_res, offset, v4);
- break;
-
-#ifdef NOTYET
- case 8:
- error = uiomove(&v8, sizeof(v8), uio);
- if (error)
- return (error);
- bus_write_8(sc->avg_res, offset, v8);
- break;
-#endif
-
- default:
- panic("%s: unexpected width %u", __func__, width);
- }
- }
- return (0);
-}
-
-static int
-altera_avgen_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr,
- int nprot, vm_memattr_t *memattr)
-{
- struct altera_avgen_softc *sc;
-
- sc = dev->si_drv1;
- if (nprot & VM_PROT_READ) {
- if ((sc->avg_flags & ALTERA_AVALON_FLAG_MMAP_READ) == 0)
- return (EACCES);
- }
- if (nprot & VM_PROT_WRITE) {
- if ((sc->avg_flags & ALTERA_AVALON_FLAG_MMAP_WRITE) == 0)
- return (EACCES);
- }
- if (nprot & VM_PROT_EXECUTE) {
- if ((sc->avg_flags & ALTERA_AVALON_FLAG_MMAP_EXEC) == 0)
- return (EACCES);
- }
- if (trunc_page(offset) == offset &&
- offset + PAGE_SIZE > offset &&
- rman_get_size(sc->avg_res) >= offset + PAGE_SIZE) {
- *paddr = rman_get_start(sc->avg_res) + offset;
- *memattr = VM_MEMATTR_UNCACHEABLE;
- } else
- return (ENODEV);
- return (0);
-}
-
-/*
- * NB: We serialise block reads and writes in case the OS is generating
- * concurrent I/O against the same block, in which case we want one I/O (or
- * another) to win. This is not sufficient to provide atomicity for the
- * sector in the presence of a fail stop -- however, we're just writing this
- * to non-persistent DRAM .. right?
- */
-static void
-altera_avgen_disk_strategy(struct bio *bp)
-{
- struct altera_avgen_softc *sc;
- void *data;
- long bcount;
- daddr_t pblkno;
- int error;
-
- sc = bp->bio_disk->d_drv1;
- data = bp->bio_data;
- bcount = bp->bio_bcount;
- pblkno = bp->bio_pblkno;
- error = 0;
-
- /*
- * Serialize block reads / writes.
- */
- mtx_lock(&sc->avg_disk_mtx);
- switch (bp->bio_cmd) {
- case BIO_READ:
- if (!(sc->avg_flags & ALTERA_AVALON_FLAG_GEOM_READ)) {
- error = EROFS;
- break;
- }
- switch (sc->avg_width) {
- case 1:
- bus_read_region_1(sc->avg_res,
- bp->bio_pblkno * ALTERA_AVGEN_SECTORSIZE,
- (uint8_t *)data, bcount);
- break;
-
- case 2:
- bus_read_region_2(sc->avg_res,
- bp->bio_pblkno * ALTERA_AVGEN_SECTORSIZE,
- (uint16_t *)data, bcount / 2);
- break;
-
- case 4:
- bus_read_region_4(sc->avg_res,
- bp->bio_pblkno * ALTERA_AVGEN_SECTORSIZE,
- (uint32_t *)data, bcount / 4);
- break;
-
- default:
- panic("%s: unexpected width %u", __func__,
- sc->avg_width);
- }
- break;
-
- case BIO_WRITE:
- if (!(sc->avg_flags & ALTERA_AVALON_FLAG_GEOM_WRITE)) {
- biofinish(bp, NULL, EROFS);
- break;
- }
- switch (sc->avg_width) {
- case 1:
- bus_write_region_1(sc->avg_res,
- bp->bio_pblkno * ALTERA_AVGEN_SECTORSIZE,
- (uint8_t *)data, bcount);
- break;
-
- case 2:
- bus_write_region_2(sc->avg_res,
- bp->bio_pblkno * ALTERA_AVGEN_SECTORSIZE,
- (uint16_t *)data, bcount / 2);
- break;
-
- case 4:
- bus_write_region_4(sc->avg_res,
- bp->bio_pblkno * ALTERA_AVGEN_SECTORSIZE,
- (uint32_t *)data, bcount / 4);
- break;
-
- default:
- panic("%s: unexpected width %u", __func__,
- sc->avg_width);
- }
- break;
-
- default:
- error = EOPNOTSUPP;
- break;
- }
- mtx_unlock(&sc->avg_disk_mtx);
- biofinish(bp, NULL, error);
-}
-
-static int
-altera_avgen_process_options(struct altera_avgen_softc *sc,
- const char *str_fileio, const char *str_geomio, const char *str_mmapio,
- const char *str_devname, int devunit)
-{
- const char *cp;
- device_t dev = sc->avg_dev;
-
- /*
- * Check for valid combinations of options.
- */
- if (str_fileio == NULL && str_geomio == NULL && str_mmapio == NULL) {
- device_printf(dev,
- "at least one of %s, %s, or %s must be specified\n",
- ALTERA_AVALON_STR_FILEIO, ALTERA_AVALON_STR_GEOMIO,
- ALTERA_AVALON_STR_MMAPIO);
- return (ENXIO);
- }
-
- /*
- * Validity check: a device can either be a GEOM device (in which case
- * we use GEOM to register the device node), or a special device --
- * but not both as that causes a collision in /dev.
- */
- if (str_geomio != NULL && (str_fileio != NULL || str_mmapio != NULL)) {
- device_printf(dev,
- "at most one of %s and (%s or %s) may be specified\n",
- ALTERA_AVALON_STR_GEOMIO, ALTERA_AVALON_STR_FILEIO,
- ALTERA_AVALON_STR_MMAPIO);
- return (ENXIO);
- }
-
- /*
- * Ensure that a unit is specified if a name is also specified.
- */
- if (str_devname == NULL && devunit != -1) {
- device_printf(dev, "%s requires %s be specified\n",
- ALTERA_AVALON_STR_DEVUNIT, ALTERA_AVALON_STR_DEVNAME);
- return (ENXIO);
- }
-
- /*
- * Extract, digest, and save values.
- */
- switch (sc->avg_width) {
- case 1:
- case 2:
- case 4:
-#ifdef NOTYET
- case 8:
-#endif
- break;
-
- default:
- device_printf(dev, "%s unsupported value %u\n",
- ALTERA_AVALON_STR_WIDTH, sc->avg_width);
- return (ENXIO);
- }
- sc->avg_flags = 0;
- if (str_fileio != NULL) {
- for (cp = str_fileio; *cp != '\0'; cp++) {
- switch (*cp) {
- case ALTERA_AVALON_CHAR_READ:
- sc->avg_flags |= ALTERA_AVALON_FLAG_READ;
- break;
-
- case ALTERA_AVALON_CHAR_WRITE:
- sc->avg_flags |= ALTERA_AVALON_FLAG_WRITE;
- break;
-
- default:
- device_printf(dev,
- "invalid %s character %c\n",
- ALTERA_AVALON_STR_FILEIO, *cp);
- return (ENXIO);
- }
- }
- }
- if (str_geomio != NULL) {
- for (cp = str_geomio; *cp != '\0'; cp++){
- switch (*cp) {
- case ALTERA_AVALON_CHAR_READ:
- sc->avg_flags |= ALTERA_AVALON_FLAG_GEOM_READ;
- break;
-
- case ALTERA_AVALON_CHAR_WRITE:
- sc->avg_flags |= ALTERA_AVALON_FLAG_GEOM_WRITE;
- break;
-
- default:
- device_printf(dev,
- "invalid %s character %c\n",
- ALTERA_AVALON_STR_GEOMIO, *cp);
- return (ENXIO);
- }
- }
- }
- if (str_mmapio != NULL) {
- for (cp = str_mmapio; *cp != '\0'; cp++) {
- switch (*cp) {
- case ALTERA_AVALON_CHAR_READ:
- sc->avg_flags |= ALTERA_AVALON_FLAG_MMAP_READ;
- break;
-
- case ALTERA_AVALON_CHAR_WRITE:
- sc->avg_flags |=
- ALTERA_AVALON_FLAG_MMAP_WRITE;
- break;
-
- case ALTERA_AVALON_CHAR_EXEC:
- sc->avg_flags |= ALTERA_AVALON_FLAG_MMAP_EXEC;
- break;
-
- default:
- device_printf(dev,
- "invalid %s character %c\n",
- ALTERA_AVALON_STR_MMAPIO, *cp);
- return (ENXIO);
- }
- }
- }
- return (0);
-}
-
-int
-altera_avgen_attach(struct altera_avgen_softc *sc, const char *str_fileio,
- const char *str_geomio, const char *str_mmapio, const char *str_devname,
- int devunit)
-{
- device_t dev = sc->avg_dev;
- int error;
-
- error = altera_avgen_process_options(sc, str_fileio, str_geomio,
- str_mmapio, str_devname, devunit);
- if (error)
- return (error);
-
- if (rman_get_size(sc->avg_res) >= PAGE_SIZE || str_mmapio != NULL) {
- if (rman_get_size(sc->avg_res) % PAGE_SIZE != 0) {
- device_printf(dev,
- "memory region not even multiple of page size\n");
- return (ENXIO);
- }
- if (rman_get_start(sc->avg_res) % PAGE_SIZE != 0) {
- device_printf(dev, "memory region not page-aligned\n");
- return (ENXIO);
- }
- }
-
- /*
- * If a GEOM permission is requested, then create the device via GEOM.
- * Otherwise, create a special device. We checked during options
- * processing that both weren't requested a once.
- */
- if (str_devname != NULL) {
- sc->avg_name = strdup(str_devname, M_TEMP);
- devunit = sc->avg_unit;
- } else
- sc->avg_name = strdup(ALTERA_AVGEN_DEVNAME, M_TEMP);
- if (sc->avg_flags & (ALTERA_AVALON_FLAG_GEOM_READ |
- ALTERA_AVALON_FLAG_GEOM_WRITE)) {
- mtx_init(&sc->avg_disk_mtx, "altera_avgen_disk", NULL,
- MTX_DEF);
- sc->avg_disk = disk_alloc();
- sc->avg_disk->d_drv1 = sc;
- sc->avg_disk->d_strategy = altera_avgen_disk_strategy;
- if (devunit == -1)
- devunit = 0;
- sc->avg_disk->d_name = sc->avg_name;
- sc->avg_disk->d_unit = devunit;
-
- /*
- * NB: As avg_res is a multiple of PAGE_SIZE, it is also a
- * multiple of ALTERA_AVGEN_SECTORSIZE.
- */
- sc->avg_disk->d_sectorsize = ALTERA_AVGEN_SECTORSIZE;
- sc->avg_disk->d_mediasize = rman_get_size(sc->avg_res);
- sc->avg_disk->d_maxsize = ALTERA_AVGEN_SECTORSIZE;
- disk_create(sc->avg_disk, DISK_VERSION);
- } else {
- /* Device node allocation. */
- if (str_devname == NULL) {
- str_devname = ALTERA_AVGEN_DEVNAME_FMT;
- devunit = sc->avg_unit;
- }
- if (devunit != -1)
- sc->avg_cdev = make_dev(&avg_cdevsw, sc->avg_unit,
- UID_ROOT, GID_WHEEL, S_IRUSR | S_IWUSR, "%s%d",
- str_devname, devunit);
- else
- sc->avg_cdev = make_dev(&avg_cdevsw, sc->avg_unit,
- UID_ROOT, GID_WHEEL, S_IRUSR | S_IWUSR,
- "%s", str_devname);
- if (sc->avg_cdev == NULL) {
- device_printf(sc->avg_dev, "%s: make_dev failed\n",
- __func__);
- return (ENXIO);
- }
-
- /* XXXRW: Slight race between make_dev(9) and here. */
- sc->avg_cdev->si_drv1 = sc;
- }
- return (0);
-}
-
-void
-altera_avgen_detach(struct altera_avgen_softc *sc)
-{
-
- KASSERT((sc->avg_disk != NULL) || (sc->avg_cdev != NULL),
- ("%s: neither GEOM nor special device", __func__));
-
- if (sc->avg_disk != NULL) {
- disk_gone(sc->avg_disk);
- disk_destroy(sc->avg_disk);
- free(sc->avg_name, M_TEMP);
- mtx_destroy(&sc->avg_disk_mtx);
- } else {
- destroy_dev(sc->avg_cdev);
- }
-}
diff --git a/sys/dev/altera/avgen/altera_avgen.h b/sys/dev/altera/avgen/altera_avgen.h
deleted file mode 100644
index ffa813b8ec65..000000000000
--- a/sys/dev/altera/avgen/altera_avgen.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012, 2016 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#ifndef _DEV_ALTERA_AVALON_H_
-#define _DEV_ALTERA_AVALON_H_
-
-struct altera_avgen_softc {
- /*
- * Bus-related fields.
- */
- device_t avg_dev;
- int avg_unit;
- char *avg_name;
-
- /*
- * The device node and memory-mapped I/O region.
- */
- struct cdev *avg_cdev;
- struct resource *avg_res;
- int avg_rid;
-
- /*
- * Access properties configured by device.hints.
- */
- u_int avg_flags;
- u_int avg_width;
- u_int avg_sectorsize;
-
- /*
- * disk(9) state, if required for this device.
- */
- struct disk *avg_disk;
- struct mtx avg_disk_mtx;
-};
-
-/*
- * Various flags extracted from device.hints to configure operations on the
- * device.
- */
-#define ALTERA_AVALON_FLAG_READ 0x01
-#define ALTERA_AVALON_FLAG_WRITE 0x02
-#define ALTERA_AVALON_FLAG_MMAP_READ 0x04
-#define ALTERA_AVALON_FLAG_MMAP_WRITE 0x08
-#define ALTERA_AVALON_FLAG_MMAP_EXEC 0x10
-#define ALTERA_AVALON_FLAG_GEOM_READ 0x20
-#define ALTERA_AVALON_FLAG_GEOM_WRITE 0x40
-
-#define ALTERA_AVALON_CHAR_READ 'r'
-#define ALTERA_AVALON_CHAR_WRITE 'w'
-#define ALTERA_AVALON_CHAR_EXEC 'x'
-
-#define ALTERA_AVALON_STR_WIDTH "width"
-#define ALTERA_AVALON_STR_FILEIO "fileio"
-#define ALTERA_AVALON_STR_GEOMIO "geomio"
-#define ALTERA_AVALON_STR_MMAPIO "mmapio"
-#define ALTERA_AVALON_STR_DEVNAME "devname"
-#define ALTERA_AVALON_STR_DEVUNIT "devunit"
-
-/*
- * Driver setup routines from the bus attachment/teardown.
- */
-int altera_avgen_attach(struct altera_avgen_softc *sc,
- const char *str_fileio, const char *str_geomio,
- const char *str_mmapio, const char *str_devname, int devunit);
-void altera_avgen_detach(struct altera_avgen_softc *sc);
-
-#endif /* _DEV_ALTERA_AVALON_H_ */
diff --git a/sys/dev/altera/avgen/altera_avgen_fdt.c b/sys/dev/altera/avgen/altera_avgen_fdt.c
deleted file mode 100644
index ad12fc9df265..000000000000
--- a/sys/dev/altera/avgen/altera_avgen_fdt.c
+++ /dev/null
@@ -1,159 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012-2013, 2016 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/bus.h>
-#include <sys/condvar.h>
-#include <sys/conf.h>
-#include <sys/kernel.h>
-#include <sys/lock.h>
-#include <sys/malloc.h>
-#include <sys/module.h>
-#include <sys/mutex.h>
-#include <sys/rman.h>
-#include <sys/stat.h>
-#include <sys/systm.h>
-#include <sys/uio.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <vm/vm.h>
-
-#include <dev/fdt/fdt_common.h>
-#include <dev/ofw/openfirm.h>
-#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
-
-#include <dev/altera/avgen/altera_avgen.h>
-
-static int
-altera_avgen_fdt_probe(device_t dev)
-{
-
- if (!ofw_bus_status_okay(dev))
- return (ENXIO);
-
- if (ofw_bus_is_compatible(dev, "sri-cambridge,avgen")) {
- device_set_desc(dev, "Generic Altera Avalon device attachment");
- return (BUS_PROBE_DEFAULT);
- }
- return (ENXIO);
-}
-
-static int
-altera_avgen_fdt_attach(device_t dev)
-{
- struct altera_avgen_softc *sc;
- char *str_fileio, *str_geomio, *str_mmapio;
- char *str_devname;
- phandle_t node;
- pcell_t cell;
- int devunit, error;
-
- sc = device_get_softc(dev);
- sc->avg_dev = dev;
- sc->avg_unit = device_get_unit(dev);
-
- /*
- * Query driver-specific OpenFirmware properties to determine how to
- * expose the device via /dev.
- */
- str_fileio = NULL;
- str_geomio = NULL;
- str_mmapio = NULL;
- str_devname = NULL;
- devunit = -1;
- sc->avg_width = 1;
- node = ofw_bus_get_node(dev);
- if (OF_getprop(node, "sri-cambridge,width", &cell, sizeof(cell)) > 0)
- sc->avg_width = cell;
- (void)OF_getprop_alloc(node, "sri-cambridge,fileio",
- (void **)&str_fileio);
- (void)OF_getprop_alloc(node, "sri-cambridge,geomio",
- (void **)&str_geomio);
- (void)OF_getprop_alloc(node, "sri-cambridge,mmapio",
- (void **)&str_mmapio);
- (void)OF_getprop_alloc(node, "sri-cambridge,devname",
- (void **)&str_devname);
- if (OF_getprop(node, "sri-cambridge,devunit", &cell, sizeof(cell)) > 0)
- devunit = cell;
-
- /* Memory allocation and checking. */
- sc->avg_rid = 0;
- sc->avg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &sc->avg_rid, RF_ACTIVE);
- if (sc->avg_res == NULL) {
- device_printf(dev, "couldn't map memory\n");
- return (ENXIO);
- }
- error = altera_avgen_attach(sc, str_fileio, str_geomio, str_mmapio,
- str_devname, devunit);
- if (error != 0)
- bus_release_resource(dev, SYS_RES_MEMORY, sc->avg_rid,
- sc->avg_res);
- if (str_fileio != NULL)
- OF_prop_free(str_fileio);
- if (str_geomio != NULL)
- OF_prop_free(str_geomio);
- if (str_mmapio != NULL)
- OF_prop_free(str_mmapio);
- if (str_devname != NULL)
- OF_prop_free(str_devname);
- return (error);
-}
-
-static int
-altera_avgen_fdt_detach(device_t dev)
-{
- struct altera_avgen_softc *sc;
-
- sc = device_get_softc(dev);
- altera_avgen_detach(sc);
- bus_release_resource(dev, SYS_RES_MEMORY, sc->avg_rid, sc->avg_res);
- return (0);
-}
-
-static device_method_t altera_avgen_fdt_methods[] = {
- DEVMETHOD(device_probe, altera_avgen_fdt_probe),
- DEVMETHOD(device_attach, altera_avgen_fdt_attach),
- DEVMETHOD(device_detach, altera_avgen_fdt_detach),
- { 0, 0 }
-};
-
-static driver_t altera_avgen_fdt_driver = {
- "altera_avgen",
- altera_avgen_fdt_methods,
- sizeof(struct altera_avgen_softc),
-};
-
-DRIVER_MODULE(avgen, simplebus, altera_avgen_fdt_driver, 0, 0);
diff --git a/sys/dev/altera/avgen/altera_avgen_nexus.c b/sys/dev/altera/avgen/altera_avgen_nexus.c
deleted file mode 100644
index 67448bc83f9c..000000000000
--- a/sys/dev/altera/avgen/altera_avgen_nexus.c
+++ /dev/null
@@ -1,141 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012-2013, 2016 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/bus.h>
-#include <sys/condvar.h>
-#include <sys/conf.h>
-#include <sys/kernel.h>
-#include <sys/lock.h>
-#include <sys/malloc.h>
-#include <sys/module.h>
-#include <sys/mutex.h>
-#include <sys/rman.h>
-#include <sys/stat.h>
-#include <sys/systm.h>
-#include <sys/uio.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <vm/vm.h>
-
-#include <dev/altera/avgen/altera_avgen.h>
-
-static int
-altera_avgen_nexus_probe(device_t dev)
-{
-
- device_set_desc(dev, "Generic Altera Avalon device attachment");
- return (BUS_PROBE_NOWILDCARD);
-}
-
-static int
-altera_avgen_nexus_attach(device_t dev)
-{
- struct altera_avgen_softc *sc;
- const char *str_fileio, *str_geomio, *str_mmapio;
- const char *str_devname;
- int devunit, error;
-
- sc = device_get_softc(dev);
- sc->avg_dev = dev;
- sc->avg_unit = device_get_unit(dev);
-
- /*
- * Query non-standard hints to find out what operations are permitted
- * on the device, and whether it is cached.
- */
- str_fileio = NULL;
- str_geomio = NULL;
- str_mmapio = NULL;
- str_devname = NULL;
- devunit = -1;
- sc->avg_width = 1;
- error = resource_int_value(device_get_name(dev), device_get_unit(dev),
- ALTERA_AVALON_STR_WIDTH, &sc->avg_width);
- if (error != 0 && error != ENOENT) {
- device_printf(dev, "invalid %s\n", ALTERA_AVALON_STR_WIDTH);
- return (error);
- }
- (void)resource_string_value(device_get_name(dev),
- device_get_unit(dev), ALTERA_AVALON_STR_FILEIO, &str_fileio);
- (void)resource_string_value(device_get_name(dev),
- device_get_unit(dev), ALTERA_AVALON_STR_GEOMIO, &str_geomio);
- (void)resource_string_value(device_get_name(dev),
- device_get_unit(dev), ALTERA_AVALON_STR_MMAPIO, &str_mmapio);
- (void)resource_string_value(device_get_name(dev),
- device_get_unit(dev), ALTERA_AVALON_STR_DEVNAME, &str_devname);
- (void)resource_int_value(device_get_name(dev), device_get_unit(dev),
- ALTERA_AVALON_STR_DEVUNIT, &devunit);
-
- /* Memory allocation and checking. */
- sc->avg_rid = 0;
- sc->avg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &sc->avg_rid, RF_ACTIVE);
- if (sc->avg_res == NULL) {
- device_printf(dev, "couldn't map memory\n");
- return (ENXIO);
- }
- error = altera_avgen_attach(sc, str_fileio, str_geomio, str_mmapio,
- str_devname, devunit);
- if (error != 0)
- bus_release_resource(dev, SYS_RES_MEMORY, sc->avg_rid,
- sc->avg_res);
- return (error);
-}
-
-static int
-altera_avgen_nexus_detach(device_t dev)
-{
- struct altera_avgen_softc *sc;
-
- sc = device_get_softc(dev);
- altera_avgen_detach(sc);
- bus_release_resource(dev, SYS_RES_MEMORY, sc->avg_rid, sc->avg_res);
- return (0);
-}
-
-static device_method_t altera_avgen_nexus_methods[] = {
- DEVMETHOD(device_probe, altera_avgen_nexus_probe),
- DEVMETHOD(device_attach, altera_avgen_nexus_attach),
- DEVMETHOD(device_detach, altera_avgen_nexus_detach),
- { 0, 0 }
-};
-
-static driver_t altera_avgen_nexus_driver = {
- "altera_avgen",
- altera_avgen_nexus_methods,
- sizeof(struct altera_avgen_softc),
-};
-
-DRIVER_MODULE(avgen, nexus, altera_avgen_nexus_driver, 0, 0);
diff --git a/sys/dev/altera/jtag_uart/altera_jtag_uart.h b/sys/dev/altera/jtag_uart/altera_jtag_uart.h
deleted file mode 100644
index 1f64184f963f..000000000000
--- a/sys/dev/altera/jtag_uart/altera_jtag_uart.h
+++ /dev/null
@@ -1,197 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2011-2012 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#ifndef _DEV_ALTERA_JTAG_UART_H_
-#define _DEV_ALTERA_JTAG_UART_H_
-
-struct altera_jtag_uart_softc {
- device_t ajus_dev;
- int ajus_unit;
-
- /*
- * Hardware resources.
- */
- struct resource *ajus_irq_res;
- int ajus_irq_rid;
- void *ajus_irq_cookie;
- struct resource *ajus_mem_res;
- int ajus_mem_rid;
-
- /*
- * TTY resources.
- */
- struct tty *ajus_ttyp;
- int ajus_alt_break_state;
-
- /*
- * Driver resources.
- */
- u_int ajus_flags;
- struct mtx *ajus_lockp;
- struct mtx ajus_lock;
- struct callout ajus_io_callout;
- struct callout ajus_ac_callout;
-
- /*
- * One-character buffer required because it's not possible to peek at
- * the input FIFO without reading it.
- */
- int ajus_buffer_valid;
- int *ajus_buffer_validp;
- uint8_t ajus_buffer_data;
- uint8_t *ajus_buffer_datap;
- int ajus_jtag_present;
- int *ajus_jtag_presentp;
- u_int ajus_jtag_missed;
- u_int *ajus_jtag_missedp;
-};
-
-#define AJU_TTYNAME "ttyj"
-
-/*
- * Flag values for ajus_flags.
- */
-#define ALTERA_JTAG_UART_FLAG_CONSOLE 0x00000001 /* Is console. */
-
-/*
- * Because tty-level use of the I/O ports completes with low-level console
- * use, spinlocks must be employed here.
- */
-#define AJU_CONSOLE_LOCK_INIT() do { \
- mtx_init(&aju_cons_lock, "aju_cons_lock", NULL, MTX_SPIN); \
-} while (0)
-
-#define AJU_CONSOLE_LOCK() do { \
- if (!kdb_active) \
- mtx_lock_spin(&aju_cons_lock); \
-} while (0)
-
-#define AJU_CONSOLE_LOCK_ASSERT() { \
- if (!kdb_active) \
- mtx_assert(&aju_cons_lock, MA_OWNED); \
-} while (0)
-
-#define AJU_CONSOLE_UNLOCK() do { \
- if (!kdb_active) \
- mtx_unlock_spin(&aju_cons_lock); \
-} while (0)
-
-#define AJU_LOCK_INIT(sc) do { \
- mtx_init(&(sc)->ajus_lock, "aju_lock", NULL, MTX_SPIN); \
-} while (0)
-
-#define AJU_LOCK_DESTROY(sc) do { \
- mtx_destroy(&(sc)->ajus_lock); \
-} while (0)
-
-#define AJU_LOCK(sc) do { \
- mtx_lock_spin((sc)->ajus_lockp); \
-} while (0)
-
-#define AJU_LOCK_ASSERT(sc) do { \
- mtx_assert((sc)->ajus_lockp, MA_OWNED); \
-} while (0)
-
-#define AJU_UNLOCK(sc) do { \
- mtx_unlock_spin((sc)->ajus_lockp); \
-} while (0)
-
-/*
- * When a TTY-level Altera JTAG UART instance is also the low-level console,
- * the TTY layer borrows the console-layer lock and buffer rather than using
- * its own.
- */
-extern struct mtx aju_cons_lock;
-extern char aju_cons_buffer_data;
-extern int aju_cons_buffer_valid;
-extern int aju_cons_jtag_present;
-extern u_int aju_cons_jtag_missed;
-
-/*
- * Base physical address of the JTAG UART in BERI.
- */
-#define BERI_UART_BASE 0x7f000000 /* JTAG UART */
-
-/*-
- * Routines for interacting with the BERI console JTAG UART. Programming
- * details from the June 2011 "Embedded Peripherals User Guide" by Altera
- * Corporation, tables 6-2 (JTAG UART Core Register Map), 6-3 (Data Register
- * Bits), and 6-4 (Control Register Bits).
- *
- * Offsets of data and control registers relative to the base. Altera
- * conventions are maintained in BERI.
- */
-#define ALTERA_JTAG_UART_DATA_OFF 0x00000000
-#define ALTERA_JTAG_UART_CONTROL_OFF 0x00000004
-
-/*
- * Offset 0: 'data' register -- bits 31-16 (RAVAIL), 15 (RVALID),
- * 14-8 (Reserved), 7-0 (DATA).
- *
- * DATA - One byte read or written.
- * RAVAIL - Bytes available to read (excluding the current byte).
- * RVALID - Whether the byte in DATA is valid.
- */
-#define ALTERA_JTAG_UART_DATA_DATA 0x000000ff
-#define ALTERA_JTAG_UART_DATA_RESERVED 0x00007f00
-#define ALTERA_JTAG_UART_DATA_RVALID 0x00008000
-#define ALTERA_JTAG_UART_DATA_RAVAIL 0xffff0000
-#define ALTERA_JTAG_UART_DATA_RAVAIL_SHIFT 16
-
-/*-
- * Offset 1: 'control' register -- bits 31-16 (WSPACE), 15-11 (Reserved),
- * 10 (AC), 9 (WI), 8 (RI), 7..2 (Reserved), 1 (WE), 0 (RE).
- *
- * RE - Enable read interrupts.
- * WE - Enable write interrupts.
- * RI - Read interrupt pending.
- * WI - Write interrupt pending.
- * AC - Activity bit; set to '1' to clear to '0'.
- * WSPACE - Space available in the write FIFO.
- */
-#define ALTERA_JTAG_UART_CONTROL_RE 0x00000001
-#define ALTERA_JTAG_UART_CONTROL_WE 0x00000002
-#define ALTERA_JTAG_UART_CONTROL_RESERVED0 0x000000fc
-#define ALTERA_JTAG_UART_CONTROL_RI 0x00000100
-#define ALTERA_JTAG_UART_CONTROL_WI 0x00000200
-#define ALTERA_JTAG_UART_CONTROL_AC 0x00000400
-#define ALTERA_JTAG_UART_CONTROL_RESERVED1 0x0000f800
-#define ALTERA_JTAG_UART_CONTROL_WSPACE 0xffff0000
-#define ALTERA_JTAG_UART_CONTROL_WSPACE_SHIFT 16
-
-/*
- * Driver attachment functions for Nexus.
- */
-int altera_jtag_uart_attach(struct altera_jtag_uart_softc *sc);
-void altera_jtag_uart_detach(struct altera_jtag_uart_softc *sc);
-
-#endif /* _DEV_ALTERA_JTAG_UART_H_ */
diff --git a/sys/dev/altera/jtag_uart/altera_jtag_uart_cons.c b/sys/dev/altera/jtag_uart/altera_jtag_uart_cons.c
deleted file mode 100644
index dd708bd68f22..000000000000
--- a/sys/dev/altera/jtag_uart/altera_jtag_uart_cons.c
+++ /dev/null
@@ -1,331 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2011-2012 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/bus.h>
-#include <sys/cons.h>
-#include <sys/endian.h>
-#include <sys/kdb.h>
-#include <sys/kernel.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/reboot.h>
-#include <sys/sysctl.h>
-#include <sys/systm.h>
-#include <sys/tty.h>
-
-#include <ddb/ddb.h>
-
-#include <dev/altera/jtag_uart/altera_jtag_uart.h>
-
-static SYSCTL_NODE(_hw, OID_AUTO, altera_jtag_uart,
- CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
- "Altera JTAG UART configuration knobs");
-
-/*
- * One-byte buffer as we can't check whether the UART is readable without
- * actually reading from it, synchronised by a spinlock; this lock also
- * synchronises access to the I/O ports for non-atomic sequences. These
- * symbols are public so that the TTY layer can use them when working on an
- * instance of the UART that is also a low-level console.
- */
-char aju_cons_buffer_data;
-int aju_cons_buffer_valid;
-int aju_cons_jtag_present;
-u_int aju_cons_jtag_missed;
-struct mtx aju_cons_lock;
-
-/*
- * Low-level console driver functions.
- */
-static cn_probe_t aju_cnprobe;
-static cn_init_t aju_cninit;
-static cn_term_t aju_cnterm;
-static cn_getc_t aju_cngetc;
-static cn_putc_t aju_cnputc;
-static cn_grab_t aju_cngrab;
-static cn_ungrab_t aju_cnungrab;
-
-/*
- * JTAG sets the ALTERA_JTAG_UART_CONTROL_AC bit whenever it accesses the
- * FIFO. This allows us to (sort of) tell when JTAG is present, so that we
- * can adopt lossy, rather than blocking, behaviour when JTAG isn't there.
- * When it is present, we do full flow control. This delay is how long we
- * wait to see if JTAG has really disappeared when finding a full buffer and
- * no AC bit set.
- */
-#define ALTERA_JTAG_UART_AC_POLL_DELAY 10000
-static u_int altera_jtag_uart_ac_poll_delay =
- ALTERA_JTAG_UART_AC_POLL_DELAY;
-SYSCTL_UINT(_hw_altera_jtag_uart, OID_AUTO, ac_poll_delay,
- CTLFLAG_RW, &altera_jtag_uart_ac_poll_delay, 0,
- "Maximum delay waiting for JTAG present flag when buffer is full");
-
-/*
- * I/O routines lifted from Deimos. This is not only MIPS-specific, but also
- * BERI-specific, as we're hard coding the address at which we expect to
- * find the Altera JTAG UART and using it unconditionally. We use these
- * low-level routines so that we can perform console I/O long before newbus
- * has initialised and devices have attached. The TTY layer of the driver
- * knows about this, and uses the console-layer spinlock instead of the
- * TTY-layer lock to avoid confusion between layers for the console UART.
- *
- * XXXRW: The only place this inter-layer behaviour breaks down is if the
- * low-level console is used for polled read while the TTY driver is also
- * looking for input. Probably we should also share buffers between layers.
- */
-#define MIPS_XKPHYS_UNCACHED_BASE 0x9000000000000000
-
-typedef uint64_t paddr_t;
-typedef uint64_t vaddr_t;
-
-static inline vaddr_t
-mips_phys_to_uncached(paddr_t phys)
-{
-
- return (phys | MIPS_XKPHYS_UNCACHED_BASE);
-}
-
-static inline uint32_t
-mips_ioread_uint32(vaddr_t vaddr)
-{
- uint32_t v;
-
- __asm__ __volatile__ ("lw %0, 0(%1)" : "=r" (v) : "r" (vaddr));
- return (v);
-}
-
-static inline void
-mips_iowrite_uint32(vaddr_t vaddr, uint32_t v)
-{
-
- __asm__ __volatile__ ("sw %0, 0(%1)" : : "r" (v), "r" (vaddr));
-}
-
-/*
- * Little-endian versions of 32-bit I/O routines.
- */
-static inline uint32_t
-mips_ioread_uint32le(vaddr_t vaddr)
-{
-
- return (le32toh(mips_ioread_uint32(vaddr)));
-}
-
-static inline void
-mips_iowrite_uint32le(vaddr_t vaddr, uint32_t v)
-{
-
- mips_iowrite_uint32(vaddr, htole32(v));
-}
-
-/*
- * Low-level read and write register routines; the Altera UART is little
- * endian, so we byte swap 32-bit reads and writes.
- */
-static inline uint32_t
-aju_cons_data_read(void)
-{
-
- return (mips_ioread_uint32le(mips_phys_to_uncached(BERI_UART_BASE +
- ALTERA_JTAG_UART_DATA_OFF)));
-}
-
-static inline void
-aju_cons_data_write(uint32_t v)
-{
-
- mips_iowrite_uint32le(mips_phys_to_uncached(BERI_UART_BASE +
- ALTERA_JTAG_UART_DATA_OFF), v);
-}
-
-static inline uint32_t
-aju_cons_control_read(void)
-{
-
- return (mips_ioread_uint32le(mips_phys_to_uncached(BERI_UART_BASE +
- ALTERA_JTAG_UART_CONTROL_OFF)));
-}
-
-static inline void
-aju_cons_control_write(uint32_t v)
-{
-
- mips_iowrite_uint32le(mips_phys_to_uncached(BERI_UART_BASE +
- ALTERA_JTAG_UART_CONTROL_OFF), v);
-}
-
-/*
- * Slightly higher-level routines aware of buffering and flow control.
- */
-static int
-aju_cons_readable(void)
-{
- uint32_t v;
-
- AJU_CONSOLE_LOCK_ASSERT();
-
- if (aju_cons_buffer_valid)
- return (1);
- v = aju_cons_data_read();
- if ((v & ALTERA_JTAG_UART_DATA_RVALID) != 0) {
- aju_cons_buffer_valid = 1;
- aju_cons_buffer_data = (v & ALTERA_JTAG_UART_DATA_DATA);
- return (1);
- }
- return (0);
-}
-
-static void
-aju_cons_write(char ch)
-{
- uint32_t v;
-
- AJU_CONSOLE_LOCK_ASSERT();
-
- /*
- * The flow control logic here is somewhat subtle: we want to wait for
- * write buffer space only while JTAG is present. However, we can't
- * directly ask if JTAG is present -- just whether it's been seen
- * since we last cleared the ALTERA_JTAG_UART_CONTROL_AC bit. As
- * such, implement a polling loop in which we both wait for space and
- * try to decide whether JTAG has disappeared on us. We will have to
- * wait one complete polling delay to detect that JTAG has gone away,
- * but otherwise shouldn't wait any further once it has gone. And we
- * had to wait for buffer space anyway, if it was there.
- *
- * If JTAG is spotted, reset the TTY-layer miss counter so console-
- * layer clearing of the bit doesn't trigger a TTY-layer
- * disconnection.
- *
- * XXXRW: Notice the inherent race with hardware: in clearing the
- * bit, we may race with hardware setting the same bit. This can
- * cause real-world reliability problems due to lost output on the
- * console.
- */
- v = aju_cons_control_read();
- if (v & ALTERA_JTAG_UART_CONTROL_AC) {
- aju_cons_jtag_present = 1;
- aju_cons_jtag_missed = 0;
- v &= ~ALTERA_JTAG_UART_CONTROL_AC;
- aju_cons_control_write(v);
- }
- while ((v & ALTERA_JTAG_UART_CONTROL_WSPACE) == 0) {
- if (!aju_cons_jtag_present)
- return;
- DELAY(altera_jtag_uart_ac_poll_delay);
- v = aju_cons_control_read();
- if (v & ALTERA_JTAG_UART_CONTROL_AC) {
- aju_cons_jtag_present = 1;
- v &= ~ALTERA_JTAG_UART_CONTROL_AC;
- aju_cons_control_write(v);
- } else
- aju_cons_jtag_present = 0;
- }
- aju_cons_data_write(ch);
-}
-
-static char
-aju_cons_read(void)
-{
-
- AJU_CONSOLE_LOCK_ASSERT();
-
- while (!aju_cons_readable());
- aju_cons_buffer_valid = 0;
- return (aju_cons_buffer_data);
-}
-
-/*
- * Implementation of a FreeBSD low-level, polled console driver.
- */
-static void
-aju_cnprobe(struct consdev *cp)
-{
-
- sprintf(cp->cn_name, "%s%d", AJU_TTYNAME, 0);
- cp->cn_pri = (boothowto & RB_SERIAL) ? CN_REMOTE : CN_NORMAL;
-}
-
-static void
-aju_cninit(struct consdev *cp)
-{
- uint32_t v;
-
- AJU_CONSOLE_LOCK_INIT();
-
- AJU_CONSOLE_LOCK();
- v = aju_cons_control_read();
- v &= ~ALTERA_JTAG_UART_CONTROL_AC;
- aju_cons_control_write(v);
- AJU_CONSOLE_UNLOCK();
-}
-
-static void
-aju_cnterm(struct consdev *cp)
-{
-
-}
-
-static int
-aju_cngetc(struct consdev *cp)
-{
- int ret;
-
- AJU_CONSOLE_LOCK();
- ret = aju_cons_read();
- AJU_CONSOLE_UNLOCK();
- return (ret);
-}
-
-static void
-aju_cnputc(struct consdev *cp, int c)
-{
-
- AJU_CONSOLE_LOCK();
- aju_cons_write(c);
- AJU_CONSOLE_UNLOCK();
-}
-
-static void
-aju_cngrab(struct consdev *cp)
-{
-
-}
-
-static void
-aju_cnungrab(struct consdev *cp)
-{
-
-}
-
-CONSOLE_DRIVER(aju);
diff --git a/sys/dev/altera/jtag_uart/altera_jtag_uart_fdt.c b/sys/dev/altera/jtag_uart/altera_jtag_uart_fdt.c
deleted file mode 100644
index 6290238d784b..000000000000
--- a/sys/dev/altera/jtag_uart/altera_jtag_uart_fdt.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/bus.h>
-#include <sys/condvar.h>
-#include <sys/conf.h>
-#include <sys/bio.h>
-#include <sys/kernel.h>
-#include <sys/lock.h>
-#include <sys/malloc.h>
-#include <sys/module.h>
-#include <sys/mutex.h>
-#include <sys/rman.h>
-#include <sys/systm.h>
-#include <sys/taskqueue.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <geom/geom_disk.h>
-
-#include <dev/altera/jtag_uart/altera_jtag_uart.h>
-
-#include <dev/fdt/fdt_common.h>
-#include <dev/ofw/openfirm.h>
-#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
-
-/*
- * FDT bus attachment for Altera JTAG UARTs.
- */
-static int
-altera_jtag_uart_fdt_probe(device_t dev)
-{
-
- if (!ofw_bus_status_okay(dev))
- return (ENXIO);
-
- if (ofw_bus_is_compatible(dev, "altera,jtag_uart-11_0")) {
- device_set_desc(dev, "Altera JTAG UART");
- return (BUS_PROBE_DEFAULT);
- }
- return (ENXIO);
-}
-
-static int
-altera_jtag_uart_fdt_attach(device_t dev)
-{
- struct altera_jtag_uart_softc *sc;
- int error;
-
- error = 0;
- sc = device_get_softc(dev);
- sc->ajus_dev = dev;
- sc->ajus_unit = device_get_unit(dev);
- sc->ajus_mem_rid = 0;
- sc->ajus_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &sc->ajus_mem_rid, RF_ACTIVE);
- if (sc->ajus_mem_res == NULL) {
- device_printf(dev, "couldn't map memory\n");
- error = ENXIO;
- goto out;
- }
-
- /*
- * Interrupt support is optional -- if we can't allocate an IRQ, then
- * we fall back on polling.
- */
- sc->ajus_irq_rid = 0;
- sc->ajus_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
- &sc->ajus_irq_rid, RF_ACTIVE | RF_SHAREABLE);
- if (sc->ajus_irq_res == NULL)
- device_printf(dev,
- "IRQ unavailable; selecting polled operation\n");
- error = altera_jtag_uart_attach(sc);
-out:
- if (error) {
- if (sc->ajus_irq_res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ,
- sc->ajus_irq_rid, sc->ajus_irq_res);
- if (sc->ajus_mem_res != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- sc->ajus_mem_rid, sc->ajus_mem_res);
- }
- return (error);
-}
-
-static int
-altera_jtag_uart_fdt_detach(device_t dev)
-{
- struct altera_jtag_uart_softc *sc;
-
- sc = device_get_softc(dev);
- KASSERT(sc->ajus_mem_res != NULL, ("%s: resources not allocated",
- __func__));
-
- altera_jtag_uart_detach(sc);
- bus_release_resource(dev, SYS_RES_IRQ, sc->ajus_irq_rid,
- sc->ajus_irq_res);
- bus_release_resource(dev, SYS_RES_MEMORY, sc->ajus_mem_rid,
- sc->ajus_mem_res);
- return (0);
-}
-
-static device_method_t altera_jtag_uart_fdt_methods[] = {
- DEVMETHOD(device_probe, altera_jtag_uart_fdt_probe),
- DEVMETHOD(device_attach, altera_jtag_uart_fdt_attach),
- DEVMETHOD(device_detach, altera_jtag_uart_fdt_detach),
- { 0, 0 }
-};
-
-static driver_t altera_jtag_uart_fdt_driver = {
- "altera_jtag_uart",
- altera_jtag_uart_fdt_methods,
- sizeof(struct altera_jtag_uart_softc),
-};
-
-DRIVER_MODULE(altera_jtag_uart, simplebus, altera_jtag_uart_fdt_driver, 0, 0);
diff --git a/sys/dev/altera/jtag_uart/altera_jtag_uart_nexus.c b/sys/dev/altera/jtag_uart/altera_jtag_uart_nexus.c
deleted file mode 100644
index 141518e85cb5..000000000000
--- a/sys/dev/altera/jtag_uart/altera_jtag_uart_nexus.c
+++ /dev/null
@@ -1,139 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/bus.h>
-#include <sys/condvar.h>
-#include <sys/conf.h>
-#include <sys/bio.h>
-#include <sys/kernel.h>
-#include <sys/lock.h>
-#include <sys/malloc.h>
-#include <sys/module.h>
-#include <sys/mutex.h>
-#include <sys/rman.h>
-#include <sys/systm.h>
-#include <sys/taskqueue.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <geom/geom_disk.h>
-
-#include <dev/altera/jtag_uart/altera_jtag_uart.h>
-
-/*
- * Nexus bus attachment for Altera JTAG UARTs. Appropriate for most Altera
- * FPGA SoC-style configurations in which the IP core will be exposed to the
- * processor via a memory-mapped Avalon bus.
- */
-static int
-altera_jtag_uart_nexus_probe(device_t dev)
-{
-
- device_set_desc(dev, "Altera JTAG UART");
- return (BUS_PROBE_NOWILDCARD);
-}
-
-static int
-altera_jtag_uart_nexus_attach(device_t dev)
-{
- struct altera_jtag_uart_softc *sc;
- int error;
-
- error = 0;
- sc = device_get_softc(dev);
- sc->ajus_dev = dev;
- sc->ajus_unit = device_get_unit(dev);
- sc->ajus_mem_rid = 0;
- sc->ajus_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &sc->ajus_mem_rid, RF_ACTIVE);
- if (sc->ajus_mem_res == NULL) {
- device_printf(dev, "couldn't map memory\n");
- error = ENXIO;
- goto out;
- }
-
- /*
- * Interrupt support is optional -- if we can't allocate an IRQ, then
- * we fall back on polling.
- */
- sc->ajus_irq_rid = 0;
- sc->ajus_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
- &sc->ajus_irq_rid, RF_ACTIVE | RF_SHAREABLE);
- if (sc->ajus_irq_res == NULL)
- device_printf(dev,
- "IRQ unavailable; selecting polled operation\n");
- error = altera_jtag_uart_attach(sc);
-out:
- if (error) {
- if (sc->ajus_irq_res != NULL)
- bus_release_resource(dev, SYS_RES_IRQ,
- sc->ajus_irq_rid, sc->ajus_irq_res);
- if (sc->ajus_mem_res != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- sc->ajus_mem_rid, sc->ajus_mem_res);
- }
- return (error);
-}
-
-static int
-altera_jtag_uart_nexus_detach(device_t dev)
-{
- struct altera_jtag_uart_softc *sc;
-
- sc = device_get_softc(dev);
- KASSERT(sc->ajus_mem_res != NULL, ("%s: resources not allocated",
- __func__));
-
- altera_jtag_uart_detach(sc);
- bus_release_resource(dev, SYS_RES_IRQ, sc->ajus_irq_rid,
- sc->ajus_irq_res);
- bus_release_resource(dev, SYS_RES_MEMORY, sc->ajus_mem_rid,
- sc->ajus_mem_res);
- return (0);
-}
-
-static device_method_t altera_jtag_uart_nexus_methods[] = {
- DEVMETHOD(device_probe, altera_jtag_uart_nexus_probe),
- DEVMETHOD(device_attach, altera_jtag_uart_nexus_attach),
- DEVMETHOD(device_detach, altera_jtag_uart_nexus_detach),
- { 0, 0 }
-};
-
-static driver_t altera_jtag_uart_nexus_driver = {
- "altera_jtag_uart",
- altera_jtag_uart_nexus_methods,
- sizeof(struct altera_jtag_uart_softc),
-};
-
-DRIVER_MODULE(altera_jtag_uart, nexus, altera_jtag_uart_nexus_driver, 0, 0);
diff --git a/sys/dev/altera/jtag_uart/altera_jtag_uart_tty.c b/sys/dev/altera/jtag_uart/altera_jtag_uart_tty.c
deleted file mode 100644
index 3a299d80caa1..000000000000
--- a/sys/dev/altera/jtag_uart/altera_jtag_uart_tty.c
+++ /dev/null
@@ -1,561 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2011-2012, 2016 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/bus.h>
-#include <sys/cons.h>
-#include <sys/endian.h>
-#include <sys/kdb.h>
-#include <sys/rman.h>
-#include <sys/systm.h>
-#include <sys/kernel.h>
-#include <sys/reboot.h>
-#include <sys/sysctl.h>
-#include <sys/tty.h>
-
-#include <ddb/ddb.h>
-
-#include <machine/atomic.h>
-#include <machine/bus.h>
-
-#include <dev/altera/jtag_uart/altera_jtag_uart.h>
-
-/*
- * If one of the Altera JTAG UARTs is currently the system console, register
- * it here.
- */
-static struct altera_jtag_uart_softc *aju_cons_sc;
-
-static tsw_outwakeup_t aju_outwakeup;
-static void aju_ac_callout(void *);
-static void aju_io_callout(void *);
-
-static struct ttydevsw aju_ttydevsw = {
- .tsw_flags = TF_NOPREFIX,
- .tsw_outwakeup = aju_outwakeup,
-};
-
-/*
- * When polling for the AC bit, the number of times we have to not see it
- * before assuming JTAG has disappeared on us. By default, four seconds.
- */
-#define AJU_JTAG_MAXMISS 20
-
-/*
- * Polling intervals for input/output and JTAG connection events.
- */
-#define AJU_IO_POLLINTERVAL (hz/100)
-#define AJU_AC_POLLINTERVAL (hz/5)
-
-/*
- * Statistics on JTAG removal events when sending, for debugging purposes
- * only.
- */
-static u_int aju_jtag_vanished;
-SYSCTL_UINT(_debug, OID_AUTO, aju_jtag_vanished, CTLFLAG_RW,
- &aju_jtag_vanished, 0, "Number of times JTAG has vanished");
-
-static u_int aju_jtag_appeared;
-SYSCTL_UINT(_debug, OID_AUTO, aju_jtag_appeared, CTLFLAG_RW,
- &aju_jtag_appeared, 0, "Number of times JTAG has appeared");
-
-SYSCTL_INT(_debug, OID_AUTO, aju_cons_jtag_present, CTLFLAG_RW,
- &aju_cons_jtag_present, 0, "JTAG console present flag");
-
-SYSCTL_UINT(_debug, OID_AUTO, aju_cons_jtag_missed, CTLFLAG_RW,
- &aju_cons_jtag_missed, 0, "JTAG console missed counter");
-
-/*
- * Interrupt-related statistics.
- */
-static u_int aju_intr_readable_enabled;
-SYSCTL_UINT(_debug, OID_AUTO, aju_intr_readable_enabled, CTLFLAG_RW,
- &aju_intr_readable_enabled, 0, "Number of times read interrupt enabled");
-
-static u_int aju_intr_writable_disabled;
-SYSCTL_UINT(_debug, OID_AUTO, aju_intr_writable_disabled, CTLFLAG_RW,
- &aju_intr_writable_disabled, 0,
- "Number of times write interrupt disabled");
-
-static u_int aju_intr_writable_enabled;
-SYSCTL_UINT(_debug, OID_AUTO, aju_intr_writable_enabled, CTLFLAG_RW,
- &aju_intr_writable_enabled, 0,
- "Number of times write interrupt enabled");
-
-static u_int aju_intr_disabled;
-SYSCTL_UINT(_debug, OID_AUTO, aju_intr_disabled, CTLFLAG_RW,
- &aju_intr_disabled, 0, "Number of times write interrupt disabled");
-
-static u_int aju_intr_read_count;
-SYSCTL_UINT(_debug, OID_AUTO, aju_intr_read_count, CTLFLAG_RW,
- &aju_intr_read_count, 0, "Number of times read interrupt fired");
-
-static u_int aju_intr_write_count;
-SYSCTL_UINT(_debug, OID_AUTO, aju_intr_write_count, CTLFLAG_RW,
- &aju_intr_write_count, 0, "Number of times write interrupt fired");
-
-/*
- * Low-level read and write register routines; the Altera UART is little
- * endian, so we byte swap 32-bit reads and writes.
- */
-static inline uint32_t
-aju_data_read(struct altera_jtag_uart_softc *sc)
-{
-
- return (le32toh(bus_read_4(sc->ajus_mem_res,
- ALTERA_JTAG_UART_DATA_OFF)));
-}
-
-static inline void
-aju_data_write(struct altera_jtag_uart_softc *sc, uint32_t v)
-{
-
- bus_write_4(sc->ajus_mem_res, ALTERA_JTAG_UART_DATA_OFF, htole32(v));
-}
-
-static inline uint32_t
-aju_control_read(struct altera_jtag_uart_softc *sc)
-{
-
- return (le32toh(bus_read_4(sc->ajus_mem_res,
- ALTERA_JTAG_UART_CONTROL_OFF)));
-}
-
-static inline void
-aju_control_write(struct altera_jtag_uart_softc *sc, uint32_t v)
-{
-
- bus_write_4(sc->ajus_mem_res, ALTERA_JTAG_UART_CONTROL_OFF,
- htole32(v));
-}
-
-/*
- * Slightly higher-level routines aware of buffering and flow control.
- */
-static inline int
-aju_writable(struct altera_jtag_uart_softc *sc)
-{
-
- return ((aju_control_read(sc) &
- ALTERA_JTAG_UART_CONTROL_WSPACE) != 0);
-}
-
-static inline int
-aju_readable(struct altera_jtag_uart_softc *sc)
-{
- uint32_t v;
-
- AJU_LOCK_ASSERT(sc);
-
- if (*sc->ajus_buffer_validp)
- return (1);
- v = aju_data_read(sc);
- if ((v & ALTERA_JTAG_UART_DATA_RVALID) != 0) {
- *sc->ajus_buffer_validp = 1;
- *sc->ajus_buffer_datap = (v & ALTERA_JTAG_UART_DATA_DATA);
- return (1);
- }
- return (0);
-}
-
-static char
-aju_read(struct altera_jtag_uart_softc *sc)
-{
-
- AJU_LOCK_ASSERT(sc);
-
- while (!aju_readable(sc));
- *sc->ajus_buffer_validp = 0;
- return (*sc->ajus_buffer_datap);
-}
-
-/*
- * Routines for enabling and disabling interrupts for read and write.
- */
-static void
-aju_intr_readable_enable(struct altera_jtag_uart_softc *sc)
-{
- uint32_t v;
-
- AJU_LOCK_ASSERT(sc);
-
- atomic_add_int(&aju_intr_readable_enabled, 1);
- v = aju_control_read(sc);
- v |= ALTERA_JTAG_UART_CONTROL_RE;
- aju_control_write(sc, v);
-}
-
-static void
-aju_intr_writable_enable(struct altera_jtag_uart_softc *sc)
-{
- uint32_t v;
-
- AJU_LOCK_ASSERT(sc);
-
- atomic_add_int(&aju_intr_writable_enabled, 1);
- v = aju_control_read(sc);
- v |= ALTERA_JTAG_UART_CONTROL_WE;
- aju_control_write(sc, v);
-}
-
-static void
-aju_intr_writable_disable(struct altera_jtag_uart_softc *sc)
-{
- uint32_t v;
-
- AJU_LOCK_ASSERT(sc);
-
- atomic_add_int(&aju_intr_writable_disabled, 1);
- v = aju_control_read(sc);
- v &= ~ALTERA_JTAG_UART_CONTROL_WE;
- aju_control_write(sc, v);
-}
-
-static void
-aju_intr_disable(struct altera_jtag_uart_softc *sc)
-{
- uint32_t v;
-
- AJU_LOCK_ASSERT(sc);
-
- atomic_add_int(&aju_intr_disabled, 1);
- v = aju_control_read(sc);
- v &= ~(ALTERA_JTAG_UART_CONTROL_RE | ALTERA_JTAG_UART_CONTROL_WE);
- aju_control_write(sc, v);
-}
-
-/*
- * The actual work of checking for, and handling, available reads. This is
- * used in both polled and interrupt-driven modes, as JTAG UARTs may be hooked
- * up with, or without, IRQs allocated.
- */
-static void
-aju_handle_input(struct altera_jtag_uart_softc *sc, struct tty *tp)
-{
- int c;
-
- tty_assert_locked(tp);
- AJU_LOCK_ASSERT(sc);
-
- while (aju_readable(sc)) {
- c = aju_read(sc);
- AJU_UNLOCK(sc);
-#ifdef KDB
- if (sc->ajus_flags & ALTERA_JTAG_UART_FLAG_CONSOLE)
- kdb_alt_break(c, &sc->ajus_alt_break_state);
-#endif
- ttydisc_rint(tp, c, 0);
- AJU_LOCK(sc);
- }
- AJU_UNLOCK(sc);
- ttydisc_rint_done(tp);
- AJU_LOCK(sc);
-}
-
-/*
- * Send output to the UART until either there's none left to send, or we run
- * out of room and need to await an interrupt so that we can start sending
- * again.
- *
- * XXXRW: It would be nice to query WSPACE at the beginning and write to the
- * FIFO in bugger chunks.
- */
-static void
-aju_handle_output(struct altera_jtag_uart_softc *sc, struct tty *tp)
-{
- uint32_t v;
- uint8_t ch;
-
- tty_assert_locked(tp);
- AJU_LOCK_ASSERT(sc);
-
- AJU_UNLOCK(sc);
- while (ttydisc_getc_poll(tp) != 0) {
- AJU_LOCK(sc);
- if (*sc->ajus_jtag_presentp == 0) {
- /*
- * If JTAG is not present, then we will drop this
- * character instead of perhaps scheduling an
- * interrupt to let us know when there is buffer
- * space. Otherwise we might get a write interrupt
- * later even though we aren't interested in sending
- * anymore. Loop to drain TTY-layer buffer.
- */
- AJU_UNLOCK(sc);
- if (ttydisc_getc(tp, &ch, sizeof(ch)) !=
- sizeof(ch))
- panic("%s: ttydisc_getc", __func__);
- continue;
- }
- v = aju_control_read(sc);
- if ((v & ALTERA_JTAG_UART_CONTROL_WSPACE) == 0) {
- if (sc->ajus_irq_res != NULL &&
- (v & ALTERA_JTAG_UART_CONTROL_WE) == 0)
- aju_intr_writable_enable(sc);
- return;
- }
- AJU_UNLOCK(sc);
- if (ttydisc_getc(tp, &ch, sizeof(ch)) != sizeof(ch))
- panic("%s: ttydisc_getc 2", __func__);
- AJU_LOCK(sc);
-
- /*
- * XXXRW: There is a slight race here in which we test for
- * writability, drop the lock, get the character from the tty
- * layer, re-acquire the lock, and then write. It's possible
- * for other code -- specifically, the low-level console -- to
- * have* written in the mean time, which might mean that there
- * is no longer space. The BERI memory bus will cause this
- * write to block, wedging the processor until space is
- * available -- which could be a while if JTAG is not
- * attached!
- *
- * The 'easy' fix is to drop the character if WSPACE has
- * become unset. Not sure what the 'hard' fix is.
- */
- aju_data_write(sc, ch);
- AJU_UNLOCK(sc);
- }
- AJU_LOCK(sc);
-
- /*
- * If interrupts are configured, and there's no data to write, but we
- * had previously enabled write interrupts, disable them now.
- */
- v = aju_control_read(sc);
- if (sc->ajus_irq_res != NULL && (v & ALTERA_JTAG_UART_CONTROL_WE) != 0)
- aju_intr_writable_disable(sc);
-}
-
-static void
-aju_outwakeup(struct tty *tp)
-{
- struct altera_jtag_uart_softc *sc = tty_softc(tp);
-
- tty_assert_locked(tp);
-
- AJU_LOCK(sc);
- aju_handle_output(sc, tp);
- AJU_UNLOCK(sc);
-}
-
-static void
-aju_io_callout(void *arg)
-{
- struct altera_jtag_uart_softc *sc = arg;
- struct tty *tp = sc->ajus_ttyp;
-
- tty_lock(tp);
- AJU_LOCK(sc);
-
- /*
- * It would be convenient if we could share code with aju_intr() here
- * by testing the control register for ALTERA_JTAG_UART_CONTROL_RI and
- * ALTERA_JTAG_UART_CONTROL_WI. Unfortunately, it's not clear that
- * this is supported, so do all the work to poll for both input and
- * output.
- */
- aju_handle_input(sc, tp);
- aju_handle_output(sc, tp);
-
- /*
- * Reschedule next poll attempt. There's some argument that we should
- * do adaptive polling based on the expectation of I/O: is something
- * pending in the output buffer, or have we recently had input, but we
- * don't.
- */
- callout_reset(&sc->ajus_io_callout, AJU_IO_POLLINTERVAL,
- aju_io_callout, sc);
- AJU_UNLOCK(sc);
- tty_unlock(tp);
-}
-
-static void
-aju_ac_callout(void *arg)
-{
- struct altera_jtag_uart_softc *sc = arg;
- struct tty *tp = sc->ajus_ttyp;
- uint32_t v;
-
- tty_lock(tp);
- AJU_LOCK(sc);
- v = aju_control_read(sc);
- if (v & ALTERA_JTAG_UART_CONTROL_AC) {
- v &= ~ALTERA_JTAG_UART_CONTROL_AC;
- aju_control_write(sc, v);
- if (*sc->ajus_jtag_presentp == 0) {
- *sc->ajus_jtag_presentp = 1;
- atomic_add_int(&aju_jtag_appeared, 1);
- aju_handle_output(sc, tp);
- }
-
- /* Any hit eliminates all recent misses. */
- *sc->ajus_jtag_missedp = 0;
- } else if (*sc->ajus_jtag_presentp != 0) {
- /*
- * If we've exceeded our tolerance for misses, mark JTAG as
- * disconnected and drain output. Otherwise, bump the miss
- * counter.
- */
- if (*sc->ajus_jtag_missedp > AJU_JTAG_MAXMISS) {
- *sc->ajus_jtag_presentp = 0;
- atomic_add_int(&aju_jtag_vanished, 1);
- aju_handle_output(sc, tp);
- } else
- (*sc->ajus_jtag_missedp)++;
- }
- callout_reset(&sc->ajus_ac_callout, AJU_AC_POLLINTERVAL,
- aju_ac_callout, sc);
- AJU_UNLOCK(sc);
- tty_unlock(tp);
-}
-
-static void
-aju_intr(void *arg)
-{
- struct altera_jtag_uart_softc *sc = arg;
- struct tty *tp = sc->ajus_ttyp;
- uint32_t v;
-
- tty_lock(tp);
- AJU_LOCK(sc);
- v = aju_control_read(sc);
- if (v & ALTERA_JTAG_UART_CONTROL_RI) {
- atomic_add_int(&aju_intr_read_count, 1);
- aju_handle_input(sc, tp);
- }
- if (v & ALTERA_JTAG_UART_CONTROL_WI) {
- atomic_add_int(&aju_intr_write_count, 1);
- aju_handle_output(sc, tp);
- }
- AJU_UNLOCK(sc);
- tty_unlock(tp);
-}
-
-int
-altera_jtag_uart_attach(struct altera_jtag_uart_softc *sc)
-{
- struct tty *tp;
- int error;
-
- AJU_LOCK_INIT(sc);
-
- /*
- * XXXRW: Currently, we detect the console solely based on it using a
- * reserved address, and borrow console-level locks and buffer if so.
- * Is there a better way?
- */
- if (rman_get_start(sc->ajus_mem_res) == BERI_UART_BASE) {
- sc->ajus_lockp = &aju_cons_lock;
- sc->ajus_buffer_validp = &aju_cons_buffer_valid;
- sc->ajus_buffer_datap = &aju_cons_buffer_data;
- sc->ajus_jtag_presentp = &aju_cons_jtag_present;
- sc->ajus_jtag_missedp = &aju_cons_jtag_missed;
- sc->ajus_flags |= ALTERA_JTAG_UART_FLAG_CONSOLE;
- } else {
- sc->ajus_lockp = &sc->ajus_lock;
- sc->ajus_buffer_validp = &sc->ajus_buffer_valid;
- sc->ajus_buffer_datap = &sc->ajus_buffer_data;
- sc->ajus_jtag_presentp = &sc->ajus_jtag_present;
- sc->ajus_jtag_missedp = &sc->ajus_jtag_missed;
- }
-
- /*
- * Disable interrupts regardless of whether or not we plan to use
- * them. We will register an interrupt handler now if they will be
- * used, but not re-enable intil later once the remainder of the tty
- * layer is properly initialised, as we're not ready for input yet.
- */
- AJU_LOCK(sc);
- aju_intr_disable(sc);
- AJU_UNLOCK(sc);
- if (sc->ajus_irq_res != NULL) {
- error = bus_setup_intr(sc->ajus_dev, sc->ajus_irq_res,
- INTR_ENTROPY | INTR_TYPE_TTY | INTR_MPSAFE, NULL,
- aju_intr, sc, &sc->ajus_irq_cookie);
- if (error) {
- device_printf(sc->ajus_dev,
- "could not activate interrupt\n");
- AJU_LOCK_DESTROY(sc);
- return (error);
- }
- }
- tp = sc->ajus_ttyp = tty_alloc(&aju_ttydevsw, sc);
- if (sc->ajus_flags & ALTERA_JTAG_UART_FLAG_CONSOLE) {
- aju_cons_sc = sc;
- tty_init_console(tp, 0);
- }
- tty_makedev(tp, NULL, "%s%d", AJU_TTYNAME, sc->ajus_unit);
-
- /*
- * If we will be using interrupts, enable them now; otherwise, start
- * polling. From this point onwards, input can arrive.
- */
- if (sc->ajus_irq_res != NULL) {
- AJU_LOCK(sc);
- aju_intr_readable_enable(sc);
- AJU_UNLOCK(sc);
- } else {
- callout_init(&sc->ajus_io_callout, 1);
- callout_reset(&sc->ajus_io_callout, AJU_IO_POLLINTERVAL,
- aju_io_callout, sc);
- }
- callout_init(&sc->ajus_ac_callout, 1);
- callout_reset(&sc->ajus_ac_callout, AJU_AC_POLLINTERVAL,
- aju_ac_callout, sc);
- return (0);
-}
-
-void
-altera_jtag_uart_detach(struct altera_jtag_uart_softc *sc)
-{
- struct tty *tp = sc->ajus_ttyp;
-
- /*
- * If we're using interrupts, disable and release the interrupt
- * handler now. Otherwise drain the polling timeout.
- */
- if (sc->ajus_irq_res != NULL) {
- AJU_LOCK(sc);
- aju_intr_disable(sc);
- AJU_UNLOCK(sc);
- bus_teardown_intr(sc->ajus_dev, sc->ajus_irq_res,
- sc->ajus_irq_cookie);
- } else
- callout_drain(&sc->ajus_io_callout);
- callout_drain(&sc->ajus_ac_callout);
- if (sc->ajus_flags & ALTERA_JTAG_UART_FLAG_CONSOLE)
- aju_cons_sc = NULL;
- tty_lock(tp);
- tty_rel_gone(tp);
- AJU_LOCK_DESTROY(sc);
-}
diff --git a/sys/dev/altera/msgdma/msgdma.c b/sys/dev/altera/msgdma/msgdma.c
deleted file mode 100644
index bb35d7315b6c..000000000000
--- a/sys/dev/altera/msgdma/msgdma.c
+++ /dev/null
@@ -1,642 +0,0 @@
-/*-
- * Copyright (c) 2016-2018 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/* Altera mSGDMA driver. */
-
-#include <sys/cdefs.h>
-#include "opt_platform.h"
-#include <sys/param.h>
-#include <sys/endian.h>
-#include <sys/systm.h>
-#include <sys/conf.h>
-#include <sys/bus.h>
-#include <sys/kernel.h>
-#include <sys/kthread.h>
-#include <sys/sglist.h>
-#include <sys/module.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/resource.h>
-#include <sys/rman.h>
-
-#include <machine/bus.h>
-#include <machine/fdt.h>
-#include <machine/cache.h>
-
-#ifdef FDT
-#include <dev/fdt/fdt_common.h>
-#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
-#endif
-
-#include <dev/xdma/xdma.h>
-#include "xdma_if.h"
-#include "opt_altera_msgdma.h"
-
-#include <dev/altera/msgdma/msgdma.h>
-
-#define MSGDMA_DEBUG
-#undef MSGDMA_DEBUG
-
-#ifdef MSGDMA_DEBUG
-#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
-#else
-#define dprintf(fmt, ...)
-#endif
-
-#define MSGDMA_NCHANNELS 1
-
-struct msgdma_channel {
- struct msgdma_softc *sc;
- struct mtx mtx;
- xdma_channel_t *xchan;
- struct proc *p;
- int used;
- int index;
- int idx_head;
- int idx_tail;
-
- struct msgdma_desc **descs;
- bus_dma_segment_t *descs_phys;
- uint32_t descs_num;
- bus_dma_tag_t dma_tag;
- bus_dmamap_t *dma_map;
- uint32_t map_descr;
- uint8_t map_err;
- uint32_t descs_used_count;
-};
-
-struct msgdma_softc {
- device_t dev;
- struct resource *res[3];
- bus_space_tag_t bst;
- bus_space_handle_t bsh;
- bus_space_tag_t bst_d;
- bus_space_handle_t bsh_d;
- void *ih;
- struct msgdma_desc desc;
- struct msgdma_channel channels[MSGDMA_NCHANNELS];
-};
-
-static struct resource_spec msgdma_spec[] = {
- { SYS_RES_MEMORY, 0, RF_ACTIVE },
- { SYS_RES_MEMORY, 1, RF_ACTIVE },
- { SYS_RES_IRQ, 0, RF_ACTIVE },
- { -1, 0 }
-};
-
-#define HWTYPE_NONE 0
-#define HWTYPE_STD 1
-
-static struct ofw_compat_data compat_data[] = {
- { "altr,msgdma-16.0", HWTYPE_STD },
- { "altr,msgdma-1.0", HWTYPE_STD },
- { NULL, HWTYPE_NONE },
-};
-
-static int msgdma_probe(device_t dev);
-static int msgdma_attach(device_t dev);
-static int msgdma_detach(device_t dev);
-
-static inline uint32_t
-msgdma_next_desc(struct msgdma_channel *chan, uint32_t curidx)
-{
-
- return ((curidx + 1) % chan->descs_num);
-}
-
-static void
-msgdma_intr(void *arg)
-{
- xdma_transfer_status_t status;
- struct xdma_transfer_status st;
- struct msgdma_desc *desc;
- struct msgdma_channel *chan;
- struct xdma_channel *xchan;
- struct msgdma_softc *sc;
- uint32_t tot_copied;
-
- sc = arg;
- chan = &sc->channels[0];
- xchan = chan->xchan;
-
- dprintf("%s(%d): status 0x%08x next_descr 0x%08x, control 0x%08x\n",
- __func__, device_get_unit(sc->dev),
- READ4_DESC(sc, PF_STATUS),
- READ4_DESC(sc, PF_NEXT_LO),
- READ4_DESC(sc, PF_CONTROL));
-
- tot_copied = 0;
-
- while (chan->idx_tail != chan->idx_head) {
- dprintf("%s: idx_tail %d idx_head %d\n", __func__,
- chan->idx_tail, chan->idx_head);
- bus_dmamap_sync(chan->dma_tag, chan->dma_map[chan->idx_tail],
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
-
- desc = chan->descs[chan->idx_tail];
- if ((le32toh(desc->control) & CONTROL_OWN) != 0) {
- break;
- }
-
- tot_copied += le32toh(desc->transferred);
- st.error = 0;
- st.transferred = le32toh(desc->transferred);
- xchan_seg_done(xchan, &st);
-
- chan->idx_tail = msgdma_next_desc(chan, chan->idx_tail);
- atomic_subtract_int(&chan->descs_used_count, 1);
- }
-
- WRITE4_DESC(sc, PF_STATUS, PF_STATUS_IRQ);
-
- /* Finish operation */
- status.error = 0;
- status.transferred = tot_copied;
- xdma_callback(chan->xchan, &status);
-}
-
-static int
-msgdma_reset(struct msgdma_softc *sc)
-{
- int timeout;
-
- dprintf("%s: read status: %x\n", __func__, READ4(sc, 0x00));
- dprintf("%s: read control: %x\n", __func__, READ4(sc, 0x04));
- dprintf("%s: read 1: %x\n", __func__, READ4(sc, 0x08));
- dprintf("%s: read 2: %x\n", __func__, READ4(sc, 0x0C));
-
- WRITE4(sc, DMA_CONTROL, CONTROL_RESET);
-
- timeout = 100;
- do {
- if ((READ4(sc, DMA_STATUS) & STATUS_RESETTING) == 0)
- break;
- } while (timeout--);
-
- dprintf("timeout %d\n", timeout);
-
- if (timeout == 0)
- return (-1);
-
- dprintf("%s: read control after reset: %x\n",
- __func__, READ4(sc, DMA_CONTROL));
-
- return (0);
-}
-
-static int
-msgdma_probe(device_t dev)
-{
- int hwtype;
-
- if (!ofw_bus_status_okay(dev))
- return (ENXIO);
-
- hwtype = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
- if (hwtype == HWTYPE_NONE)
- return (ENXIO);
-
- device_set_desc(dev, "Altera mSGDMA");
-
- return (BUS_PROBE_DEFAULT);
-}
-
-static int
-msgdma_attach(device_t dev)
-{
- struct msgdma_softc *sc;
- phandle_t xref, node;
- int err;
-
- sc = device_get_softc(dev);
- sc->dev = dev;
-
- if (bus_alloc_resources(dev, msgdma_spec, sc->res)) {
- device_printf(dev, "could not allocate resources for device\n");
- return (ENXIO);
- }
-
- /* CSR memory interface */
- sc->bst = rman_get_bustag(sc->res[0]);
- sc->bsh = rman_get_bushandle(sc->res[0]);
-
- /* Descriptor memory interface */
- sc->bst_d = rman_get_bustag(sc->res[1]);
- sc->bsh_d = rman_get_bushandle(sc->res[1]);
-
- /* Setup interrupt handler */
- err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
- NULL, msgdma_intr, sc, &sc->ih);
- if (err) {
- device_printf(dev, "Unable to alloc interrupt resource.\n");
- return (ENXIO);
- }
-
- node = ofw_bus_get_node(dev);
- xref = OF_xref_from_node(node);
- OF_device_register_xref(xref, dev);
-
- if (msgdma_reset(sc) != 0)
- return (-1);
-
- WRITE4(sc, DMA_CONTROL, CONTROL_GIEM);
-
- return (0);
-}
-
-static int
-msgdma_detach(device_t dev)
-{
- struct msgdma_softc *sc;
-
- sc = device_get_softc(dev);
-
- return (0);
-}
-
-static void
-msgdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
-{
- struct msgdma_channel *chan;
-
- chan = (struct msgdma_channel *)arg;
- KASSERT(chan != NULL, ("xchan is NULL"));
-
- if (err) {
- chan->map_err = 1;
- return;
- }
-
- chan->descs_phys[chan->map_descr].ds_addr = segs[0].ds_addr;
- chan->descs_phys[chan->map_descr].ds_len = segs[0].ds_len;
-
- dprintf("map desc %d: descs phys %lx len %ld\n",
- chan->map_descr, segs[0].ds_addr, segs[0].ds_len);
-}
-
-static int
-msgdma_desc_free(struct msgdma_softc *sc, struct msgdma_channel *chan)
-{
- struct msgdma_desc *desc;
- int nsegments;
- int i;
-
- nsegments = chan->descs_num;
-
- for (i = 0; i < nsegments; i++) {
- desc = chan->descs[i];
- bus_dmamap_unload(chan->dma_tag, chan->dma_map[i]);
- bus_dmamem_free(chan->dma_tag, desc, chan->dma_map[i]);
- }
-
- bus_dma_tag_destroy(chan->dma_tag);
- free(chan->descs, M_DEVBUF);
- free(chan->dma_map, M_DEVBUF);
- free(chan->descs_phys, M_DEVBUF);
-
- return (0);
-}
-
-static int
-msgdma_desc_alloc(struct msgdma_softc *sc, struct msgdma_channel *chan,
- uint32_t desc_size, uint32_t align)
-{
- int nsegments;
- int err;
- int i;
-
- nsegments = chan->descs_num;
-
- dprintf("%s: nseg %d\n", __func__, nsegments);
-
- err = bus_dma_tag_create(
- bus_get_dma_tag(sc->dev),
- align, 0, /* alignment, boundary */
- BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
- BUS_SPACE_MAXADDR, /* highaddr */
- NULL, NULL, /* filter, filterarg */
- desc_size, 1, /* maxsize, nsegments*/
- desc_size, 0, /* maxsegsize, flags */
- NULL, NULL, /* lockfunc, lockarg */
- &chan->dma_tag);
- if (err) {
- device_printf(sc->dev,
- "%s: Can't create bus_dma tag.\n", __func__);
- return (-1);
- }
-
- /* Descriptors. */
- chan->descs = malloc(nsegments * sizeof(struct msgdma_desc *),
- M_DEVBUF, (M_WAITOK | M_ZERO));
- if (chan->descs == NULL) {
- device_printf(sc->dev,
- "%s: Can't allocate memory.\n", __func__);
- return (-1);
- }
- chan->dma_map = malloc(nsegments * sizeof(bus_dmamap_t),
- M_DEVBUF, (M_WAITOK | M_ZERO));
- chan->descs_phys = malloc(nsegments * sizeof(bus_dma_segment_t),
- M_DEVBUF, (M_WAITOK | M_ZERO));
-
- /* Allocate bus_dma memory for each descriptor. */
- for (i = 0; i < nsegments; i++) {
- err = bus_dmamem_alloc(chan->dma_tag, (void **)&chan->descs[i],
- BUS_DMA_WAITOK | BUS_DMA_ZERO, &chan->dma_map[i]);
- if (err) {
- device_printf(sc->dev,
- "%s: Can't allocate memory for descriptors.\n",
- __func__);
- return (-1);
- }
-
- chan->map_err = 0;
- chan->map_descr = i;
- err = bus_dmamap_load(chan->dma_tag, chan->dma_map[i], chan->descs[i],
- desc_size, msgdma_dmamap_cb, chan, BUS_DMA_WAITOK);
- if (err) {
- device_printf(sc->dev,
- "%s: Can't load DMA map.\n", __func__);
- return (-1);
- }
-
- if (chan->map_err != 0) {
- device_printf(sc->dev,
- "%s: Can't load DMA map.\n", __func__);
- return (-1);
- }
- }
-
- return (0);
-}
-
-static int
-msgdma_channel_alloc(device_t dev, struct xdma_channel *xchan)
-{
- struct msgdma_channel *chan;
- struct msgdma_softc *sc;
- int i;
-
- sc = device_get_softc(dev);
-
- for (i = 0; i < MSGDMA_NCHANNELS; i++) {
- chan = &sc->channels[i];
- if (chan->used == 0) {
- chan->xchan = xchan;
- xchan->chan = (void *)chan;
- if ((xchan->caps & XCHAN_CAP_IOMMU) == 0)
- xchan->caps |= XCHAN_CAP_BUSDMA;
- chan->index = i;
- chan->sc = sc;
- chan->used = 1;
- chan->idx_head = 0;
- chan->idx_tail = 0;
- chan->descs_used_count = 0;
- chan->descs_num = 1024;
-
- return (0);
- }
- }
-
- return (-1);
-}
-
-static int
-msgdma_channel_free(device_t dev, struct xdma_channel *xchan)
-{
- struct msgdma_channel *chan;
- struct msgdma_softc *sc;
-
- sc = device_get_softc(dev);
-
- chan = (struct msgdma_channel *)xchan->chan;
-
- msgdma_desc_free(sc, chan);
-
- chan->used = 0;
-
- return (0);
-}
-
-static int
-msgdma_channel_capacity(device_t dev, xdma_channel_t *xchan,
- uint32_t *capacity)
-{
- struct msgdma_channel *chan;
- uint32_t c;
-
- chan = (struct msgdma_channel *)xchan->chan;
-
- /* At least one descriptor must be left empty. */
- c = (chan->descs_num - chan->descs_used_count - 1);
-
- *capacity = c;
-
- return (0);
-}
-
-static int
-msgdma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
- struct xdma_sglist *sg, uint32_t sg_n)
-{
- struct msgdma_channel *chan;
- struct msgdma_desc *desc;
- struct msgdma_softc *sc;
- bus_addr_t src_addr_lo;
- bus_addr_t dst_addr_lo;
- uint32_t len;
- uint32_t tmp;
- int i;
-
- sc = device_get_softc(dev);
-
- chan = (struct msgdma_channel *)xchan->chan;
-
- for (i = 0; i < sg_n; i++) {
- src_addr_lo = sg[i].src_addr;
- dst_addr_lo = sg[i].dst_addr;
- len = (uint32_t)sg[i].len;
-
- dprintf("%s: src %x dst %x len %d\n", __func__,
- src_addr_lo, dst_addr_lo, len);
-
- desc = chan->descs[chan->idx_head];
-#if defined(ALTERA_MSGDMA_DESC_EXT) || defined(ALTERA_MSGDMA_DESC_PF_EXT)
- desc->read_hi = htole32(src_addr_lo >> 32);
- desc->write_hi = htole32(dst_addr_lo >> 32);
-#endif
- desc->read_lo = htole32(src_addr_lo);
- desc->write_lo = htole32(dst_addr_lo);
- desc->length = htole32(len);
- desc->transferred = 0;
- desc->status = 0;
- desc->reserved = 0;
- desc->control = 0;
-
- if (sg[i].direction == XDMA_MEM_TO_DEV) {
- if (sg[i].first == 1) {
- desc->control |= htole32(CONTROL_GEN_SOP);
- }
-
- if (sg[i].last == 1) {
- desc->control |= htole32(CONTROL_GEN_EOP);
- desc->control |= htole32(CONTROL_TC_IRQ_EN |
- CONTROL_ET_IRQ_EN | CONTROL_ERR_M);
- }
- } else {
- desc->control |= htole32(CONTROL_END_ON_EOP | (1 << 13));
- desc->control |= htole32(CONTROL_TC_IRQ_EN |
- CONTROL_ET_IRQ_EN | CONTROL_ERR_M);
- }
-
- tmp = chan->idx_head;
-
- atomic_add_int(&chan->descs_used_count, 1);
- chan->idx_head = msgdma_next_desc(chan, chan->idx_head);
-
- desc->control |= htole32(CONTROL_OWN | CONTROL_GO);
-
- bus_dmamap_sync(chan->dma_tag, chan->dma_map[tmp],
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- }
-
- return (0);
-}
-
-static int
-msgdma_channel_prep_sg(device_t dev, struct xdma_channel *xchan)
-{
- struct msgdma_channel *chan;
- struct msgdma_desc *desc;
- struct msgdma_softc *sc;
- uint32_t addr;
- uint32_t reg;
- int ret;
- int i;
-
- sc = device_get_softc(dev);
-
- dprintf("%s(%d)\n", __func__, device_get_unit(dev));
-
- chan = (struct msgdma_channel *)xchan->chan;
-
- ret = msgdma_desc_alloc(sc, chan, sizeof(struct msgdma_desc), 16);
- if (ret != 0) {
- device_printf(sc->dev,
- "%s: Can't allocate descriptors.\n", __func__);
- return (-1);
- }
-
- for (i = 0; i < chan->descs_num; i++) {
- desc = chan->descs[i];
-
- if (i == (chan->descs_num - 1)) {
- desc->next = htole32(chan->descs_phys[0].ds_addr);
- } else {
- desc->next = htole32(chan->descs_phys[i+1].ds_addr);
- }
-
- dprintf("%s(%d): desc %d vaddr %lx next paddr %x\n", __func__,
- device_get_unit(dev), i, (uint64_t)desc, le32toh(desc->next));
- }
-
- addr = chan->descs_phys[0].ds_addr;
- WRITE4_DESC(sc, PF_NEXT_LO, addr);
- WRITE4_DESC(sc, PF_NEXT_HI, 0);
- WRITE4_DESC(sc, PF_POLL_FREQ, 1000);
-
- reg = (PF_CONTROL_GIEM | PF_CONTROL_DESC_POLL_EN);
- reg |= PF_CONTROL_RUN;
- WRITE4_DESC(sc, PF_CONTROL, reg);
-
- return (0);
-}
-
-static int
-msgdma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
-{
- struct msgdma_channel *chan;
- struct msgdma_softc *sc;
-
- sc = device_get_softc(dev);
-
- chan = (struct msgdma_channel *)xchan->chan;
-
- switch (cmd) {
- case XDMA_CMD_BEGIN:
- case XDMA_CMD_TERMINATE:
- case XDMA_CMD_PAUSE:
- /* TODO: implement me */
- return (-1);
- }
-
- return (0);
-}
-
-#ifdef FDT
-static int
-msgdma_ofw_md_data(device_t dev, pcell_t *cells, int ncells, void **ptr)
-{
-
- return (0);
-}
-#endif
-
-static device_method_t msgdma_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, msgdma_probe),
- DEVMETHOD(device_attach, msgdma_attach),
- DEVMETHOD(device_detach, msgdma_detach),
-
- /* xDMA Interface */
- DEVMETHOD(xdma_channel_alloc, msgdma_channel_alloc),
- DEVMETHOD(xdma_channel_free, msgdma_channel_free),
- DEVMETHOD(xdma_channel_control, msgdma_channel_control),
-
- /* xDMA SG Interface */
- DEVMETHOD(xdma_channel_capacity, msgdma_channel_capacity),
- DEVMETHOD(xdma_channel_prep_sg, msgdma_channel_prep_sg),
- DEVMETHOD(xdma_channel_submit_sg, msgdma_channel_submit_sg),
-
-#ifdef FDT
- DEVMETHOD(xdma_ofw_md_data, msgdma_ofw_md_data),
-#endif
-
- DEVMETHOD_END
-};
-
-static driver_t msgdma_driver = {
- "msgdma",
- msgdma_methods,
- sizeof(struct msgdma_softc),
-};
-
-EARLY_DRIVER_MODULE(msgdma, simplebus, msgdma_driver, 0, 0,
- BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
diff --git a/sys/dev/altera/msgdma/msgdma.h b/sys/dev/altera/msgdma/msgdma.h
deleted file mode 100644
index 1e72d26b7c6e..000000000000
--- a/sys/dev/altera/msgdma/msgdma.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/*-
- * Copyright (c) 2017-2018 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include "opt_altera_msgdma.h"
-
-/* Altera mSGDMA registers. */
-#define DMA_STATUS 0x00
-#define STATUS_RESETTING (1 << 6)
-#define DMA_CONTROL 0x04
-#define CONTROL_GIEM (1 << 4) /* Global Interrupt Enable Mask */
-#define CONTROL_RESET (1 << 1) /* Reset Dispatcher */
-
-/* Descriptor fields. */
-#define CONTROL_GO (1 << 31) /* Commit all the descriptor info */
-#define CONTROL_OWN (1 << 30) /* Owned by hardware (prefetcher-enabled only) */
-#define CONTROL_EDE (1 << 24) /* Early done enable */
-#define CONTROL_ERR_S 16 /* Transmit Error, Error IRQ Enable */
-#define CONTROL_ERR_M (0xff << CONTROL_ERR_S)
-#define CONTROL_ET_IRQ_EN (1 << 15) /* Early Termination IRQ Enable */
-#define CONTROL_TC_IRQ_EN (1 << 14) /* Transfer Complete IRQ Enable */
-#define CONTROL_END_ON_EOP (1 << 12) /* End on EOP */
-#define CONTROL_PARK_WR (1 << 11) /* Park Writes */
-#define CONTROL_PARK_RD (1 << 10) /* Park Reads */
-#define CONTROL_GEN_EOP (1 << 9) /* Generate EOP */
-#define CONTROL_GEN_SOP (1 << 8) /* Generate SOP */
-#define CONTROL_TX_CHANNEL_S 0 /* Transmit Channel */
-#define CONTROL_TX_CHANNEL_M (0xff << CONTROL_TRANSMIT_CH_S)
-
-/* Prefetcher */
-#define PF_CONTROL 0x00
-#define PF_CONTROL_GIEM (1 << 3)
-#define PF_CONTROL_RESET (1 << 2)
-#define PF_CONTROL_DESC_POLL_EN (1 << 1)
-#define PF_CONTROL_RUN (1 << 0)
-#define PF_NEXT_LO 0x04
-#define PF_NEXT_HI 0x08
-#define PF_POLL_FREQ 0x0C
-#define PF_STATUS 0x10
-#define PF_STATUS_IRQ (1 << 0)
-
-#define READ4(_sc, _reg) \
- le32toh(bus_space_read_4(_sc->bst, _sc->bsh, _reg))
-#define WRITE4(_sc, _reg, _val) \
- bus_space_write_4(_sc->bst, _sc->bsh, _reg, htole32(_val))
-
-#define READ4_DESC(_sc, _reg) \
- le32toh(bus_space_read_4(_sc->bst_d, _sc->bsh_d, _reg))
-#define WRITE4_DESC(_sc, _reg, _val) \
- bus_space_write_4(_sc->bst_d, _sc->bsh_d, _reg, htole32(_val))
-
-#if defined(ALTERA_MSGDMA_DESC_STD)
-
-/* Standard descriptor format with prefetcher disabled. */
-struct msgdma_desc {
- uint32_t read_lo;
- uint32_t write_lo;
- uint32_t length;
- uint32_t control;
-};
-
-#elif defined(ALTERA_MSGDMA_DESC_EXT)
-
-/* Extended descriptor format with prefetcher disabled. */
-struct msgdma_desc {
- uint32_t read_lo;
- uint32_t write_lo;
- uint32_t length;
- uint8_t write_burst;
- uint8_t read_burst;
- uint16_t seq_num;
- uint16_t write_stride;
- uint16_t read_stride;
- uint32_t read_hi;
- uint32_t write_hi;
- uint32_t control;
-};
-
-#elif defined(ALTERA_MSGDMA_DESC_PF_STD)
-
-/* Standard descriptor format with prefetcher enabled. */
-struct msgdma_desc {
- uint32_t read_lo;
- uint32_t write_lo;
- uint32_t length;
- uint32_t next;
- uint32_t transferred;
- uint32_t status;
- uint32_t reserved;
- uint32_t control;
-};
-
-#elif defined(ALTERA_MSGDMA_DESC_PF_EXT)
-
-/* Extended descriptor format with prefetcher enabled. */
-struct msgdma_desc {
- uint32_t read_lo;
- uint32_t write_lo;
- uint32_t length;
- uint32_t next;
- uint32_t transferred;
- uint32_t status;
- uint32_t reserved;
- uint8_t write_burst;
- uint8_t read_burst;
- uint16_t seq_num;
- uint16_t write_stride;
- uint16_t read_stride;
- uint32_t read_hi;
- uint32_t write_hi;
- uint32_t next_hi;
- uint32_t reserved1;
- uint32_t reserved2;
- uint32_t reserved3;
- uint32_t control;
-};
-
-#else
-
-#error "mSGDMA descriptor format (kernel option) is not set."
-
-#endif
diff --git a/sys/dev/altera/pio/pio.c b/sys/dev/altera/pio/pio.c
deleted file mode 100644
index e002fc347f28..000000000000
--- a/sys/dev/altera/pio/pio.c
+++ /dev/null
@@ -1,208 +0,0 @@
-/*-
- * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/*
- * Altera PIO (Parallel IO) device driver
- */
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/bus.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/malloc.h>
-#include <sys/rman.h>
-#include <sys/timeet.h>
-#include <sys/timetc.h>
-
-#include <dev/fdt/fdt_common.h>
-#include <dev/ofw/openfirm.h>
-#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
-
-#include <machine/bus.h>
-#include <machine/fdt.h>
-#include <machine/cpu.h>
-
-#include <dev/altera/pio/pio.h>
-#include "pio_if.h"
-
-#define READ4(_sc, _reg) bus_read_4((_sc)->res[0], _reg)
-#define READ2(_sc, _reg) bus_read_2((_sc)->res[0], _reg)
-#define READ1(_sc, _reg) bus_read_1((_sc)->res[0], _reg)
-#define WRITE4(_sc, _reg, _val) bus_write_4((_sc)->res[0], _reg, _val)
-#define WRITE2(_sc, _reg, _val) bus_write_2((_sc)->res[0], _reg, _val)
-#define WRITE1(_sc, _reg, _val) bus_write_1((_sc)->res[0], _reg, _val)
-
-struct pio_softc {
- struct resource *res[2];
- bus_space_tag_t bst;
- bus_space_handle_t bsh;
- device_t dev;
- void *ih;
-};
-
-static struct resource_spec pio_spec[] = {
- { SYS_RES_MEMORY, 0, RF_ACTIVE },
- { SYS_RES_IRQ, 0, RF_ACTIVE },
- { -1, 0 }
-};
-
-static int
-pio_setup_irq(device_t dev, void *intr_handler, void *ih_user)
-{
- struct pio_softc *sc;
-
- sc = device_get_softc(dev);
-
- /* Setup interrupt handlers */
- if (bus_setup_intr(sc->dev, sc->res[1], INTR_TYPE_BIO | INTR_MPSAFE,
- NULL, intr_handler, ih_user, &sc->ih)) {
- device_printf(sc->dev, "Unable to setup intr\n");
- return (1);
- }
-
- return (0);
-}
-
-static int
-pio_teardown_irq(device_t dev)
-{
- struct pio_softc *sc;
-
- sc = device_get_softc(dev);
-
- bus_teardown_intr(sc->dev, sc->res[1], sc->ih);
-
- return (0);
-}
-
-static int
-pio_read(device_t dev)
-{
- struct pio_softc *sc;
-
- sc = device_get_softc(dev);
-
- return (READ4(sc, PIO_DATA));
-}
-
-static int
-pio_set(device_t dev, int bit, int enable)
-{
- struct pio_softc *sc;
-
- sc = device_get_softc(dev);
-
- if (enable)
- WRITE4(sc, PIO_OUTSET, bit);
- else
- WRITE4(sc, PIO_OUTCLR, bit);
-
- return (0);
-}
-
-static int
-pio_configure(device_t dev, int dir, int mask)
-{
- struct pio_softc *sc;
-
- sc = device_get_softc(dev);
-
- WRITE4(sc, PIO_INT_MASK, mask);
- WRITE4(sc, PIO_DIR, dir);
-
- return (0);
-}
-
-static int
-pio_probe(device_t dev)
-{
-
- if (!ofw_bus_status_okay(dev))
- return (ENXIO);
-
- if (!ofw_bus_is_compatible(dev, "altr,pio"))
- return (ENXIO);
-
- device_set_desc(dev, "Altera PIO");
- return (BUS_PROBE_DEFAULT);
-}
-
-static int
-pio_attach(device_t dev)
-{
- struct pio_softc *sc;
- struct fdt_ic *fic;
- phandle_t node;
-
- sc = device_get_softc(dev);
- sc->dev = dev;
-
- if (bus_alloc_resources(dev, pio_spec, sc->res)) {
- device_printf(dev, "could not allocate resources\n");
- return (ENXIO);
- }
-
- /* Memory interface */
- sc->bst = rman_get_bustag(sc->res[0]);
- sc->bsh = rman_get_bushandle(sc->res[0]);
-
- if ((node = ofw_bus_get_node(sc->dev)) == -1)
- return (ENXIO);
-
- fic = malloc(sizeof(*fic), M_DEVBUF, M_WAITOK|M_ZERO);
- fic->iph = node;
- fic->dev = dev;
- SLIST_INSERT_HEAD(&fdt_ic_list_head, fic, fdt_ics);
-
- return (0);
-}
-
-static device_method_t pio_methods[] = {
- DEVMETHOD(device_probe, pio_probe),
- DEVMETHOD(device_attach, pio_attach),
-
- /* pio_if.m */
- DEVMETHOD(pio_read, pio_read),
- DEVMETHOD(pio_configure, pio_configure),
- DEVMETHOD(pio_set, pio_set),
- DEVMETHOD(pio_setup_irq, pio_setup_irq),
- DEVMETHOD(pio_teardown_irq, pio_teardown_irq),
- DEVMETHOD_END
-};
-
-static driver_t pio_driver = {
- "altera_pio",
- pio_methods,
- sizeof(struct pio_softc),
-};
-
-DRIVER_MODULE(altera_pio, simplebus, pio_driver, 0, 0);
diff --git a/sys/dev/altera/pio/pio_if.m b/sys/dev/altera/pio/pio_if.m
deleted file mode 100644
index 068963816cc5..000000000000
--- a/sys/dev/altera/pio/pio_if.m
+++ /dev/null
@@ -1,64 +0,0 @@
-#-
-# Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
-# All rights reserved.
-#
-# This software was developed by SRI International and the University of
-# Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
-# ("CTSRD"), as part of the DARPA CRASH research programme.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-# SUCH DAMAGE.
-#
-#
-
-#include <sys/types.h>
-
-INTERFACE pio;
-
-#
-# PIO device methods
-#
-
-METHOD int read {
- device_t dev;
-};
-
-METHOD int setup_irq {
- device_t dev;
- void *handler;
- void *ih_user;
-};
-
-METHOD int teardown_irq {
- device_t dev;
-};
-
-METHOD int set {
- device_t dev;
- int bit;
- int enable;
-};
-
-METHOD int configure {
- device_t dev;
- int dir;
- int mask;
-}
diff --git a/sys/dev/altera/sdcard/altera_sdcard.c b/sys/dev/altera/sdcard/altera_sdcard.c
deleted file mode 100644
index 05caeead0b58..000000000000
--- a/sys/dev/altera/sdcard/altera_sdcard.c
+++ /dev/null
@@ -1,412 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/cdefs.h>
-#include "opt_altera_sdcard.h"
-
-#include <sys/param.h>
-#include <sys/bus.h>
-#include <sys/condvar.h>
-#include <sys/conf.h>
-#include <sys/bio.h>
-#include <sys/kernel.h>
-#include <sys/lock.h>
-#include <sys/malloc.h>
-#include <sys/module.h>
-#include <sys/mutex.h>
-#include <sys/rman.h>
-#include <sys/systm.h>
-#include <sys/taskqueue.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <geom/geom_disk.h>
-
-#include <dev/altera/sdcard/altera_sdcard.h>
-
-/*
- * Device driver for the Altera University Program Secure Data Card IP Core,
- * as described in the similarly named SOPC Builder IP Core specification.
- * This soft core is not a full SD host controller interface (SDHCI) but
- * instead provides a set of memory mapped registers and memory buffer that
- * mildly abstract the SD Card protocol, but without providing DMA or
- * interrupts. However, it does hide the details of voltage and
- * communications negotiation. This driver implements disk(9), but due to the
- * lack of interrupt support, must rely on timer-driven polling to determine
- * when I/Os have completed.
- *
- * TODO:
- *
- * 1. Implement DISKFLAG_CANDELETE / SD Card sector erase support.
- * 2. Implement d_ident from SD Card CID serial number field.
- * 3. Handle read-only SD Cards.
- * 4. Tune timeouts based on real-world SD Card speeds.
- */
-
-void
-altera_sdcard_attach(struct altera_sdcard_softc *sc)
-{
-
- ALTERA_SDCARD_LOCK_INIT(sc);
- ALTERA_SDCARD_CONDVAR_INIT(sc);
- sc->as_disk = NULL;
- bioq_init(&sc->as_bioq);
- sc->as_currentbio = NULL;
- sc->as_state = ALTERA_SDCARD_STATE_NOCARD;
- sc->as_taskqueue = taskqueue_create("altera_sdcardc taskq", M_WAITOK,
- taskqueue_thread_enqueue, &sc->as_taskqueue);
- taskqueue_start_threads(&sc->as_taskqueue, 1, PI_DISK,
- "altera_sdcardc%d taskqueue", sc->as_unit);
- TIMEOUT_TASK_INIT(sc->as_taskqueue, &sc->as_task, 0,
- altera_sdcard_task, sc);
-
- /*
- * Kick off timer-driven processing with a manual poll so that we
- * synchronously detect an already-inserted SD Card during the boot or
- * other driver attach point.
- */
- altera_sdcard_task(sc, 1);
-}
-
-void
-altera_sdcard_detach(struct altera_sdcard_softc *sc)
-{
-
- KASSERT(sc->as_taskqueue != NULL, ("%s: taskqueue not present",
- __func__));
-
- /*
- * Winding down the driver on detach is a bit complex. Update the
- * flags to indicate that a detach has been requested, and then wait
- * for in-progress I/O to wind down before continuing.
- */
- ALTERA_SDCARD_LOCK(sc);
- sc->as_flags |= ALTERA_SDCARD_FLAG_DETACHREQ;
- while (sc->as_state != ALTERA_SDCARD_STATE_DETACHED)
- ALTERA_SDCARD_CONDVAR_WAIT(sc);
- ALTERA_SDCARD_UNLOCK(sc);
-
- /*
- * Now wait for the possibly still executing taskqueue to drain. In
- * principle no more events will be scheduled as we've transitioned to
- * a detached state, but there might still be a request in execution.
- */
- while (taskqueue_cancel_timeout(sc->as_taskqueue, &sc->as_task, NULL))
- taskqueue_drain_timeout(sc->as_taskqueue, &sc->as_task);
-
- /*
- * Simulate a disk removal if one is present to deal with any pending
- * or queued I/O.
- */
- if (sc->as_disk != NULL)
- altera_sdcard_disk_remove(sc);
- KASSERT(bioq_first(&sc->as_bioq) == NULL,
- ("%s: non-empty bioq", __func__));
-
- /*
- * Free any remaining allocated resources.
- */
- taskqueue_free(sc->as_taskqueue);
- sc->as_taskqueue = NULL;
- ALTERA_SDCARD_CONDVAR_DESTROY(sc);
- ALTERA_SDCARD_LOCK_DESTROY(sc);
-}
-
-/*
- * Set up and start the next I/O. Transition to the I/O state, but allow the
- * caller to schedule the next timeout, as this may be called either from an
- * initial attach context, or from the task queue, which requires different
- * behaviour.
- */
-static void
-altera_sdcard_nextio(struct altera_sdcard_softc *sc)
-{
- struct bio *bp;
-
- ALTERA_SDCARD_LOCK_ASSERT(sc);
- KASSERT(sc->as_currentbio == NULL,
- ("%s: bio already active", __func__));
-
- bp = bioq_takefirst(&sc->as_bioq);
- if (bp == NULL)
- panic("%s: bioq empty", __func__);
- altera_sdcard_io_start(sc, bp);
- sc->as_state = ALTERA_SDCARD_STATE_IO;
-}
-
-static void
-altera_sdcard_task_nocard(struct altera_sdcard_softc *sc)
-{
-
- ALTERA_SDCARD_LOCK_ASSERT(sc);
-
- /*
- * Handle device driver detach.
- */
- if (sc->as_flags & ALTERA_SDCARD_FLAG_DETACHREQ) {
- sc->as_state = ALTERA_SDCARD_STATE_DETACHED;
- return;
- }
-
- /*
- * If there is no card insertion, remain in NOCARD.
- */
- if (!(altera_sdcard_read_asr(sc) & ALTERA_SDCARD_ASR_CARDPRESENT))
- return;
-
- /*
- * Read the CSD -- it may contain values that the driver can't handle,
- * either because of an unsupported version/feature, or because the
- * card is misbehaving. This triggers a transition to
- * ALTERA_SDCARD_STATE_BADCARD. We rely on the CSD read to print a
- * banner about how the card is problematic, since it has more
- * information. The bad card state allows us to print that banner
- * once rather than each time we notice the card is there, and still
- * bad.
- */
- if (altera_sdcard_read_csd(sc) != 0) {
- sc->as_state = ALTERA_SDCARD_STATE_BADCARD;
- return;
- }
-
- /*
- * Process card insertion and upgrade to the IDLE state.
- */
- altera_sdcard_disk_insert(sc);
- sc->as_state = ALTERA_SDCARD_STATE_IDLE;
-}
-
-static void
-altera_sdcard_task_badcard(struct altera_sdcard_softc *sc)
-{
-
- ALTERA_SDCARD_LOCK_ASSERT(sc);
-
- /*
- * Handle device driver detach.
- */
- if (sc->as_flags & ALTERA_SDCARD_FLAG_DETACHREQ) {
- sc->as_state = ALTERA_SDCARD_STATE_DETACHED;
- return;
- }
-
- /*
- * Handle safe card removal -- no teardown is required, just a state
- * transition.
- */
- if (!(altera_sdcard_read_asr(sc) & ALTERA_SDCARD_ASR_CARDPRESENT))
- sc->as_state = ALTERA_SDCARD_STATE_NOCARD;
-}
-
-static void
-altera_sdcard_task_idle(struct altera_sdcard_softc *sc)
-{
-
- ALTERA_SDCARD_LOCK_ASSERT(sc);
-
- /*
- * Handle device driver detach.
- */
- if (sc->as_flags & ALTERA_SDCARD_FLAG_DETACHREQ) {
- sc->as_state = ALTERA_SDCARD_STATE_DETACHED;
- return;
- }
-
- /*
- * Handle safe card removal.
- */
- if (!(altera_sdcard_read_asr(sc) & ALTERA_SDCARD_ASR_CARDPRESENT)) {
- altera_sdcard_disk_remove(sc);
- sc->as_state = ALTERA_SDCARD_STATE_NOCARD;
- }
-}
-
-static void
-altera_sdcard_task_io(struct altera_sdcard_softc *sc)
-{
- uint16_t asr;
-
- ALTERA_SDCARD_LOCK_ASSERT(sc);
- KASSERT(sc->as_currentbio != NULL, ("%s: no current I/O", __func__));
-
-#ifdef ALTERA_SDCARD_FAST_SIM
-recheck:
-#endif
- asr = altera_sdcard_read_asr(sc);
-
- /*
- * Check for unexpected card removal during an I/O.
- */
- if (!(asr & ALTERA_SDCARD_ASR_CARDPRESENT)) {
- altera_sdcard_disk_remove(sc);
- if (sc->as_flags & ALTERA_SDCARD_FLAG_DETACHREQ)
- sc->as_state = ALTERA_SDCARD_STATE_DETACHED;
- else
- sc->as_state = ALTERA_SDCARD_STATE_NOCARD;
- return;
- }
-
- /*
- * If the I/O isn't complete, remain in the IO state without further
- * action, even if DETACHREQ is in flight.
- */
- if (asr & ALTERA_SDCARD_ASR_CMDINPROGRESS)
- return;
-
- /*
- * Handle various forms of I/O completion, successful and otherwise.
- * The I/O layer may restart the transaction if an error occurred, in
- * which case remain in the IO state and reschedule.
- */
- if (!altera_sdcard_io_complete(sc, asr))
- return;
-
- /*
- * Now that I/O is complete, process detach requests in preference to
- * starting new I/O.
- */
- if (sc->as_flags & ALTERA_SDCARD_FLAG_DETACHREQ) {
- sc->as_state = ALTERA_SDCARD_STATE_DETACHED;
- return;
- }
-
- /*
- * Finally, either start the next I/O or transition to the IDLE state.
- */
- if (bioq_first(&sc->as_bioq) != NULL) {
- altera_sdcard_nextio(sc);
-#ifdef ALTERA_SDCARD_FAST_SIM
- goto recheck;
-#endif
- } else
- sc->as_state = ALTERA_SDCARD_STATE_IDLE;
-}
-
-static void
-altera_sdcard_task_rechedule(struct altera_sdcard_softc *sc)
-{
- int interval;
-
- /*
- * Reschedule based on new state. Or not, if detaching the device
- * driver. Treat a bad card as though it were no card at all.
- */
- switch (sc->as_state) {
- case ALTERA_SDCARD_STATE_NOCARD:
- case ALTERA_SDCARD_STATE_BADCARD:
- interval = ALTERA_SDCARD_TIMEOUT_NOCARD;
- break;
-
- case ALTERA_SDCARD_STATE_IDLE:
- interval = ALTERA_SDCARD_TIMEOUT_IDLE;
- break;
-
- case ALTERA_SDCARD_STATE_IO:
- if (sc->as_flags & ALTERA_SDCARD_FLAG_IOERROR)
- interval = ALTERA_SDCARD_TIMEOUT_IOERROR;
- else
- interval = ALTERA_SDCARD_TIMEOUT_IO;
- break;
-
- default:
- panic("%s: invalid exit state %d", __func__, sc->as_state);
- }
- taskqueue_enqueue_timeout(sc->as_taskqueue, &sc->as_task, interval);
-}
-
-/*
- * Because the Altera SD Card IP Core doesn't support interrupts, we do all
- * asynchronous work from a timeout. Poll at two different rates -- an
- * infrequent check for card insertion status changes, and a frequent one for
- * I/O completion. The task should never start in DETACHED, as that would
- * imply that a previous instance failed to cancel rather than reschedule.
- */
-void
-altera_sdcard_task(void *arg, int pending)
-{
- struct altera_sdcard_softc *sc;
-
- sc = arg;
- KASSERT(sc->as_state != ALTERA_SDCARD_STATE_DETACHED,
- ("%s: already in detached", __func__));
-
- ALTERA_SDCARD_LOCK(sc);
- switch (sc->as_state) {
- case ALTERA_SDCARD_STATE_NOCARD:
- altera_sdcard_task_nocard(sc);
- break;
-
- case ALTERA_SDCARD_STATE_BADCARD:
- altera_sdcard_task_badcard(sc);
- break;
-
- case ALTERA_SDCARD_STATE_IDLE:
- altera_sdcard_task_idle(sc);
- break;
-
- case ALTERA_SDCARD_STATE_IO:
- altera_sdcard_task_io(sc);
- break;
-
- default:
- panic("%s: invalid enter state %d", __func__, sc->as_state);
- }
-
- /*
- * If we have transitioned to DETACHED, signal the detach thread and
- * cancel the timeout-driven task. Otherwise reschedule on an
- * appropriate timeout.
- */
- if (sc->as_state == ALTERA_SDCARD_STATE_DETACHED)
- ALTERA_SDCARD_CONDVAR_SIGNAL(sc);
- else
- altera_sdcard_task_rechedule(sc);
- ALTERA_SDCARD_UNLOCK(sc);
-}
-
-void
-altera_sdcard_start(struct altera_sdcard_softc *sc)
-{
-
- ALTERA_SDCARD_LOCK_ASSERT(sc);
-
- KASSERT(sc->as_state == ALTERA_SDCARD_STATE_IDLE,
- ("%s: starting when not IDLE", __func__));
-
- taskqueue_cancel_timeout(sc->as_taskqueue, &sc->as_task, NULL);
- altera_sdcard_nextio(sc);
-#ifdef ALTERA_SDCARD_FAST_SIM
- altera_sdcard_task_io(sc);
-#endif
- altera_sdcard_task_rechedule(sc);
-}
diff --git a/sys/dev/altera/sdcard/altera_sdcard.h b/sys/dev/altera/sdcard/altera_sdcard.h
deleted file mode 100644
index 15a8cdad0be2..000000000000
--- a/sys/dev/altera/sdcard/altera_sdcard.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#ifndef _DEV_ALTERA_SDCARD_H_
-#define _DEV_ALTERA_SDCARD_H_
-
-#define ALTERA_SDCARD_CSD_SIZE 16
-struct altera_sdcard_csd {
- uint8_t csd_data[ALTERA_SDCARD_CSD_SIZE];
-} __aligned(2); /* CSD is read in 16-bit chunks, so align to match. */
-
-struct altera_sdcard_softc {
- device_t as_dev;
- int as_unit;
- struct resource *as_res;
- int as_rid;
- struct mtx as_lock;
- struct cv as_condvar;
- int as_state;
- int as_flags;
- struct disk *as_disk;
- struct taskqueue *as_taskqueue;
- struct timeout_task as_task;
-
- /*
- * Fields relating to in-progress and pending I/O, if any.
- */
- struct bio_queue_head as_bioq;
- struct bio *as_currentbio;
- u_int as_retriesleft;
-
- /*
- * Infrequently changing fields cached from the SD Card IP Core.
- */
- struct altera_sdcard_csd as_csd;
- uint8_t as_csd_structure; /* CSD version. */
- uint64_t as_mediasize;
-};
-
-#define ALTERA_SDCARD_LOCK(sc) mtx_lock(&(sc)->as_lock)
-#define ALTERA_SDCARD_LOCK_ASSERT(sc) mtx_assert(&(sc)->as_lock, MA_OWNED)
-#define ALTERA_SDCARD_LOCK_DESTROY(sc) mtx_destroy(&(sc)->as_lock)
-#define ALTERA_SDCARD_LOCK_INIT(sc) mtx_init(&(sc)->as_lock, \
- "altera_sdcard", NULL, MTX_DEF)
-#define ALTERA_SDCARD_UNLOCK(sc) mtx_unlock(&(sc)->as_lock)
-
-#define ALTERA_SDCARD_CONDVAR_DESTROY(sc) cv_destroy(&(sc)->as_condvar)
-#define ALTERA_SDCARD_CONDVAR_INIT(sc) cv_init(&(sc)->as_condvar, \
- "altera_sdcard_detach_wait")
-#define ALTERA_SDCARD_CONDVAR_SIGNAL(dc) cv_signal(&(sc)->as_condvar)
-#define ALTERA_SDCARD_CONDVAR_WAIT(sc) cv_wait(&(sc)->as_condvar, \
- &(sc)->as_lock)
-
-/*
- * States an instance can be in at any given moment.
- */
-#define ALTERA_SDCARD_STATE_NOCARD 1 /* No card inserted. */
-#define ALTERA_SDCARD_STATE_BADCARD 2 /* Card bad/not supported. */
-#define ALTERA_SDCARD_STATE_IDLE 3 /* Card present but idle. */
-#define ALTERA_SDCARD_STATE_IO 4 /* Card in I/O currently. */
-#define ALTERA_SDCARD_STATE_DETACHED 5 /* Driver is detaching. */
-
-/*
- * Different timeout intervals based on state. When just looking for a card
- * status change, check twice a second. When we're actively waiting on I/O
- * completion, check every millisecond.
- */
-#define ALTERA_SDCARD_TIMEOUT_NOCARD (hz/2)
-#define ALTERA_SDCARD_TIMEOUT_IDLE (hz/2)
-#define ALTERA_SDCARD_TIMEOUT_IO (1)
-#define ALTERA_SDCARD_TIMEOUT_IOERROR (hz/5)
-
-/*
- * Maximum number of retries on an I/O.
- */
-#define ALTERA_SDCARD_RETRY_LIMIT 10
-
-/*
- * Driver status flags.
- */
-#define ALTERA_SDCARD_FLAG_DETACHREQ 0x00000001 /* Detach requested. */
-#define ALTERA_SDCARD_FLAG_IOERROR 0x00000002 /* Error in progress. */
-
-/*
- * Functions for performing low-level register and memory I/O to/from the SD
- * Card IP Core. In general, only code in altera_sdcard_io.c is aware of the
- * hardware interface.
- */
-uint16_t altera_sdcard_read_asr(struct altera_sdcard_softc *sc);
-int altera_sdcard_read_csd(struct altera_sdcard_softc *sc);
-
-int altera_sdcard_io_complete(struct altera_sdcard_softc *sc,
- uint16_t asr);
-void altera_sdcard_io_start(struct altera_sdcard_softc *sc,
- struct bio *bp);
-
-/*
- * Constants for interpreting the SD Card Card Specific Data (CSD) register.
- */
-#define ALTERA_SDCARD_CSD_STRUCTURE_BYTE 15
-#define ALTERA_SDCARD_CSD_STRUCTURE_MASK 0xc0 /* 2 bits */
-#define ALTERA_SDCARD_CSD_STRUCTURE_RSHIFT 6
-
-#define ALTERA_SDCARD_CSD_READ_BL_LEN_BYTE 10
-#define ALTERA_SDCARD_CSD_READ_BL_LEN_MASK 0x0f /* 4 bits */
-
-/*
- * C_SIZE is a 12-bit field helpfully split over three differe bytes of CSD
- * data. Software ease of use was not a design consideration.
- */
-#define ALTERA_SDCARD_CSD_C_SIZE_BYTE0 7
-#define ALTERA_SDCARD_CSD_C_SIZE_MASK0 0xc0 /* top 2 bits */
-#define ALTERA_SDCARD_CSD_C_SIZE_RSHIFT0 6
-
-#define ALTERA_SDCARD_CSD_C_SIZE_BYTE1 8
-#define ALTERA_SDCARD_CSD_C_SIZE_MASK1 0xff /* 8 bits */
-#define ALTERA_SDCARD_CSD_C_SIZE_LSHIFT1 2
-
-#define ALTERA_SDCARD_CSD_C_SIZE_BYTE2 9
-#define ALTERA_SDCARD_CSD_C_SIZE_MASK2 0x03 /* bottom 2 bits */
-#define ALTERA_SDCARD_CSD_C_SIZE_LSHIFT2 10
-
-#define ALTERA_SDCARD_CSD_C_SIZE_MULT_BYTE0 5
-#define ALTERA_SDCARD_CSD_C_SIZE_MULT_MASK0 0x80 /* top 1 bit */
-#define ALTERA_SDCARD_CSD_C_SIZE_MULT_RSHIFT0 7
-
-#define ALTERA_SDCARD_CSD_C_SIZE_MULT_BYTE1 6
-#define ALTERA_SDCARD_CSD_C_SIZE_MULT_MASK1 0x03 /* bottom 2 bits */
-#define ALTERA_SDCARD_CSD_C_SIZE_MULT_LSHIFT1 1
-
-/*
- * I/O register/buffer offsets, from Table 4.1.1 in the Altera University
- * Program SD Card IP Core specification.
- */
-#define ALTERA_SDCARD_OFF_RXTX_BUFFER 0 /* 512-byte I/O buffer */
-#define ALTERA_SDCARD_OFF_CID 512 /* 16-byte Card ID number */
-#define ALTERA_SDCARD_OFF_CSD 528 /* 16-byte Card Specific Data */
-#define ALTERA_SDCARD_OFF_OCR 544 /* Operating Conditions Reg */
-#define ALTERA_SDCARD_OFF_SR 548 /* SD Card Status Register */
-#define ALTERA_SDCARD_OFF_RCA 552 /* Relative Card Address Reg */
-#define ALTERA_SDCARD_OFF_CMD_ARG 556 /* Command Argument Register */
-#define ALTERA_SDCARD_OFF_CMD 560 /* Command Register */
-#define ALTERA_SDCARD_OFF_ASR 564 /* Auxiliary Status Register */
-#define ALTERA_SDCARD_OFF_RR1 568 /* Response R1 */
-
-/*
- * The Altera IP Core provides a 16-bit "Additional Status Register" (ASR)
- * beyond those described in the SD Card specification that captures IP Core
- * transaction state, such as whether the last command is in progress, the
- * card has been removed, etc.
- */
-#define ALTERA_SDCARD_ASR_CMDVALID 0x0001
-#define ALTERA_SDCARD_ASR_CARDPRESENT 0x0002
-#define ALTERA_SDCARD_ASR_CMDINPROGRESS 0x0004
-#define ALTERA_SDCARD_ASR_SRVALID 0x0008
-#define ALTERA_SDCARD_ASR_CMDTIMEOUT 0x0010
-#define ALTERA_SDCARD_ASR_CMDDATAERROR 0x0020
-
-/*
- * The Altera IP Core claims to provide a 16-bit "Response R1" register (RR1)
- * to provide more detailed error reporting when a read or write fails.
- *
- * XXXRW: The specification claims that this field is 16-bit, but then
- * proceeds to define values as though it is 32-bit. In practice, 16-bit
- * seems more likely as the register is not 32-bit aligned.
- */
-#define ALTERA_SDCARD_RR1_INITPROCRUNNING 0x0100
-#define ALTERA_SDCARD_RR1_ERASEINTERRUPTED 0x0200
-#define ALTERA_SDCARD_RR1_ILLEGALCOMMAND 0x0400
-#define ALTERA_SDCARD_RR1_COMMANDCRCFAILED 0x0800
-#define ALTERA_SDCARD_RR1_ADDRESSMISALIGNED 0x1000
-#define ALTERA_SDCARD_RR1_ADDRBLOCKRANGE 0x2000
-
-/*
- * Not all RR1 values are "errors" per se -- check only for the ones that are
- * when performing error handling.
- */
-#define ALTERA_SDCARD_RR1_ERRORMASK \
- (ALTERA_SDCARD_RR1_ERASEINTERRUPTED | ALTERA_SDCARD_RR1_ILLEGALCOMMAND | \
- ALTERA_SDCARD_RR1_COMMANDCRCFAILED | ALTERA_SDCARD_RR1_ADDRESSMISALIGNED |\
- ALTERA_SDCARD_RR1_ADDRBLOCKRANGE)
-
-/*
- * Although SD Cards may have various sector sizes, the Altera IP Core
- * requires that I/O be done in 512-byte chunks.
- */
-#define ALTERA_SDCARD_SECTORSIZE 512
-
-/*
- * SD Card commands used in this driver.
- */
-#define ALTERA_SDCARD_CMD_SEND_RCA 0x03 /* Retrieve card RCA. */
-#define ALTERA_SDCARD_CMD_SEND_CSD 0x09 /* Retrieve CSD register. */
-#define ALTERA_SDCARD_CMD_SEND_CID 0x0A /* Retrieve CID register. */
-#define ALTERA_SDCARD_CMD_READ_BLOCK 0x11 /* Read block from disk. */
-#define ALTERA_SDCARD_CMD_WRITE_BLOCK 0x18 /* Write block to disk. */
-
-/*
- * Functions exposed by the device driver core to newbus(9) bus attachment
- * implementations.
- */
-void altera_sdcard_attach(struct altera_sdcard_softc *sc);
-void altera_sdcard_detach(struct altera_sdcard_softc *sc);
-void altera_sdcard_task(void *arg, int pending);
-
-/*
- * Functions exposed by the device driver core to the disk(9) front-end.
- */
-void altera_sdcard_start(struct altera_sdcard_softc *sc);
-
-/*
- * Functions relating to the implementation of disk(9) KPIs for the SD Card
- * driver.
- */
-void altera_sdcard_disk_insert(struct altera_sdcard_softc *sc);
-void altera_sdcard_disk_remove(struct altera_sdcard_softc *sc);
-
-#endif /* _DEV_ALTERA_SDCARD_H_ */
diff --git a/sys/dev/altera/sdcard/altera_sdcard_disk.c b/sys/dev/altera/sdcard/altera_sdcard_disk.c
deleted file mode 100644
index 87fdfb2a6475..000000000000
--- a/sys/dev/altera/sdcard/altera_sdcard_disk.c
+++ /dev/null
@@ -1,184 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/bus.h>
-#include <sys/condvar.h>
-#include <sys/conf.h>
-#include <sys/bio.h>
-#include <sys/kernel.h>
-#include <sys/lock.h>
-#include <sys/malloc.h>
-#include <sys/module.h>
-#include <sys/mutex.h>
-#include <sys/rman.h>
-#include <sys/systm.h>
-#include <sys/taskqueue.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <geom/geom_disk.h>
-
-#include <dev/altera/sdcard/altera_sdcard.h>
-
-static int
-altera_sdcard_disk_dump(void *arg, void *virtual, vm_offset_t physical,
- off_t offset, size_t length)
-{
-
- panic("%s: not yet", __func__);
-}
-
-static int
-altera_sdcard_disk_ioctl(struct disk *disk, u_long cmd, void *data, int fflag,
- struct thread *td)
-{
-
- /* XXXRW: more here? */
- return (EINVAL);
-}
-
-static void
-altera_sdcard_disk_strategy(struct bio *bp)
-{
- struct altera_sdcard_softc *sc;
-
- /*
- * Although the SD Card doesn't need sorting, we don't want to
- * introduce barriers, so use bioq_disksort().
- */
- sc = bp->bio_disk->d_drv1;
- ALTERA_SDCARD_LOCK(sc);
- switch (sc->as_state) {
- case ALTERA_SDCARD_STATE_NOCARD:
- device_printf(sc->as_dev, "%s: unexpected I/O on NOCARD",
- __func__);
- biofinish(bp, NULL, ENXIO);
- break;
-
- case ALTERA_SDCARD_STATE_BADCARD:
- device_printf(sc->as_dev, "%s: unexpected I/O on BADCARD",
- __func__);
- biofinish(bp, NULL, ENXIO);
- break;
-
- case ALTERA_SDCARD_STATE_DETACHED:
- device_printf(sc->as_dev, "%s: unexpected I/O on DETACHED",
- __func__);
- biofinish(bp, NULL, ENXIO);
-
- case ALTERA_SDCARD_STATE_IDLE:
- bioq_disksort(&sc->as_bioq, bp);
- altera_sdcard_start(sc);
- break;
-
- case ALTERA_SDCARD_STATE_IO:
- bioq_disksort(&sc->as_bioq, bp);
- break;
-
- default:
- panic("%s: invalid state %d", __func__, sc->as_state);
- }
- ALTERA_SDCARD_UNLOCK(sc);
-}
-
-void
-altera_sdcard_disk_insert(struct altera_sdcard_softc *sc)
-{
- struct disk *disk;
- uint64_t size;
-
- ALTERA_SDCARD_LOCK_ASSERT(sc);
-
- /*
- * Because the disk insertion routine occupies the driver instance's
- * task queue thread, and the disk(9) instance isn't hooked up yet by
- * definition, the only other source of events of concern is a thread
- * initiating driver detach. That thread has to issue a detach
- * request and await an ACK from the taskqueue thread. It is
- * therefore safe to drop the lock here.
- */
- ALTERA_SDCARD_UNLOCK(sc);
- disk = disk_alloc();
- disk->d_drv1 = sc;
- disk->d_name = "altera_sdcard";
- disk->d_unit = sc->as_unit;
- disk->d_strategy = altera_sdcard_disk_strategy;
- disk->d_dump = altera_sdcard_disk_dump;
- disk->d_ioctl = altera_sdcard_disk_ioctl;
- disk->d_sectorsize = ALTERA_SDCARD_SECTORSIZE;
- disk->d_mediasize = sc->as_mediasize;
- disk->d_maxsize = ALTERA_SDCARD_SECTORSIZE;
- sc->as_disk = disk;
- disk_create(disk, DISK_VERSION);
- ALTERA_SDCARD_LOCK(sc);
-
- /*
- * Print a pretty-ish card insertion string. We could stand to
- * decorate this further, e.g., with card vendor information.
- */
- size = sc->as_mediasize / (1000 * 1000);
- device_printf(sc->as_dev, "%juM SD Card inserted\n", (uintmax_t)size);
-}
-
-void
-altera_sdcard_disk_remove(struct altera_sdcard_softc *sc)
-{
- struct disk *disk;
-
- ALTERA_SDCARD_LOCK_ASSERT(sc);
- KASSERT(sc->as_disk != NULL, ("%s: as_disk NULL", __func__));
-
- /*
- * sc->as_state will be updated by the caller.
- *
- * XXXRW: Is it OK to call disk_destroy() under the mutex, or should
- * we be deferring that to the calling context once it is released?
- */
- disk = sc->as_disk;
- disk_gone(disk);
- disk_destroy(disk);
- sc->as_disk = NULL;
-
- /*
- * Cancel all outstanding I/O on the SD Card.
- */
- if (sc->as_currentbio != NULL) {
- device_printf(sc->as_dev, "%s: SD Card removed during I/O",
- __func__);
- biofinish(sc->as_currentbio, NULL, ENXIO);
- sc->as_currentbio = NULL;
- }
- bioq_flush(&sc->as_bioq, NULL, ENXIO);
- device_printf(sc->as_dev, "SD Card removed\n");
-}
diff --git a/sys/dev/altera/sdcard/altera_sdcard_fdt.c b/sys/dev/altera/sdcard/altera_sdcard_fdt.c
deleted file mode 100644
index c62b480eecc3..000000000000
--- a/sys/dev/altera/sdcard/altera_sdcard_fdt.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/bus.h>
-#include <sys/condvar.h>
-#include <sys/conf.h>
-#include <sys/bio.h>
-#include <sys/kernel.h>
-#include <sys/lock.h>
-#include <sys/malloc.h>
-#include <sys/module.h>
-#include <sys/mutex.h>
-#include <sys/rman.h>
-#include <sys/systm.h>
-#include <sys/taskqueue.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <geom/geom_disk.h>
-
-#include <dev/altera/sdcard/altera_sdcard.h>
-
-#include <dev/fdt/fdt_common.h>
-#include <dev/ofw/openfirm.h>
-#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
-
-/*
- * FDT bus attachment for the Altera SD Card IP core.
- */
-static int
-altera_sdcard_fdt_probe(device_t dev)
-{
-
- if (!ofw_bus_status_okay(dev))
- return (ENXIO);
-
- if (ofw_bus_is_compatible(dev, "altera,sdcard_11_2011")) {
- device_set_desc(dev, "Altera Secure Data Card IP Core");
- return (BUS_PROBE_DEFAULT);
- }
- return (ENXIO);
-}
-
-static int
-altera_sdcard_fdt_attach(device_t dev)
-{
- struct altera_sdcard_softc *sc;
-
- sc = device_get_softc(dev);
- sc->as_dev = dev;
- sc->as_unit = device_get_unit(dev);
- sc->as_rid = 0;
- sc->as_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &sc->as_rid, RF_ACTIVE);
- if (sc->as_res == NULL) {
- device_printf(dev, "couldn't map memory\n");
- return (ENXIO);
- }
- altera_sdcard_attach(sc);
- return (0);
-}
-
-static int
-altera_sdcard_fdt_detach(device_t dev)
-{
- struct altera_sdcard_softc *sc;
-
- sc = device_get_softc(dev);
- KASSERT(sc->as_res != NULL, ("%s: resources not allocated",
- __func__));
- altera_sdcard_detach(sc);
- bus_release_resource(dev, SYS_RES_MEMORY, sc->as_rid, sc->as_res);
- return (0);
-}
-
-static device_method_t altera_sdcard_fdt_methods[] = {
- DEVMETHOD(device_probe, altera_sdcard_fdt_probe),
- DEVMETHOD(device_attach, altera_sdcard_fdt_attach),
- DEVMETHOD(device_detach, altera_sdcard_fdt_detach),
- { 0, 0 }
-};
-
-static driver_t altera_sdcard_fdt_driver = {
- "altera_sdcardc",
- altera_sdcard_fdt_methods,
- sizeof(struct altera_sdcard_softc),
-};
-
-DRIVER_MODULE(altera_sdcard, simplebus, altera_sdcard_fdt_driver, 0, 0);
diff --git a/sys/dev/altera/sdcard/altera_sdcard_io.c b/sys/dev/altera/sdcard/altera_sdcard_io.c
deleted file mode 100644
index 5c3347960c75..000000000000
--- a/sys/dev/altera/sdcard/altera_sdcard_io.c
+++ /dev/null
@@ -1,446 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/bus.h>
-#include <sys/condvar.h>
-#include <sys/conf.h>
-#include <sys/bio.h>
-#include <sys/endian.h>
-#include <sys/kernel.h>
-#include <sys/lock.h>
-#include <sys/malloc.h>
-#include <sys/module.h>
-#include <sys/mutex.h>
-#include <sys/rman.h>
-#include <sys/systm.h>
-#include <sys/taskqueue.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <geom/geom_disk.h>
-
-#include <dev/altera/sdcard/altera_sdcard.h>
-
-int altera_sdcard_ignore_crc_errors = 1;
-int altera_sdcard_verify_rxtx_writes = 1;
-
-/*
- * Low-level I/O routines for the Altera SD Card University IP Core driver.
- *
- * XXXRW: Throughout, it is assumed that the IP Core handles multibyte
- * registers as little endian, as is the case for other Altera IP cores.
- * However, the specification makes no reference to endianness, so this
- * assumption might not always be correct.
- */
-uint16_t
-altera_sdcard_read_asr(struct altera_sdcard_softc *sc)
-{
-
- return (le16toh(bus_read_2(sc->as_res, ALTERA_SDCARD_OFF_ASR)));
-}
-
-static int
-altera_sdcard_process_csd0(struct altera_sdcard_softc *sc)
-{
- uint64_t c_size, c_size_mult, read_bl_len;
- uint8_t byte0, byte1, byte2;
-
- ALTERA_SDCARD_LOCK_ASSERT(sc);
-
- /*-
- * Compute card capacity per SD Card interface description as follows:
- *
- * Memory capacity = BLOCKNR * BLOCK_LEN
- *
- * Where:
- *
- * BLOCKNR = (C_SIZE + 1) * MULT
- * MULT = 2^(C_SIZE_MULT+2)
- * BLOCK_LEN = 2^READ_BL_LEN
- */
- read_bl_len = sc->as_csd.csd_data[ALTERA_SDCARD_CSD_READ_BL_LEN_BYTE];
- read_bl_len &= ALTERA_SDCARD_CSD_READ_BL_LEN_MASK;
-
- byte0 = sc->as_csd.csd_data[ALTERA_SDCARD_CSD_C_SIZE_BYTE0];
- byte0 &= ALTERA_SDCARD_CSD_C_SIZE_MASK0;
- byte1 = sc->as_csd.csd_data[ALTERA_SDCARD_CSD_C_SIZE_BYTE1];
- byte2 = sc->as_csd.csd_data[ALTERA_SDCARD_CSD_C_SIZE_BYTE2];
- byte2 &= ALTERA_SDCARD_CSD_C_SIZE_MASK2;
- c_size = (byte0 >> ALTERA_SDCARD_CSD_C_SIZE_RSHIFT0) |
- (byte1 << ALTERA_SDCARD_CSD_C_SIZE_LSHIFT1) |
- (byte2 << ALTERA_SDCARD_CSD_C_SIZE_LSHIFT2);
-
- byte0 = sc->as_csd.csd_data[ALTERA_SDCARD_CSD_C_SIZE_MULT_BYTE0];
- byte0 &= ALTERA_SDCARD_CSD_C_SIZE_MULT_MASK0;
- byte1 = sc->as_csd.csd_data[ALTERA_SDCARD_CSD_C_SIZE_MULT_BYTE1];
- byte1 &= ALTERA_SDCARD_CSD_C_SIZE_MULT_MASK1;
- c_size_mult = (byte0 >> ALTERA_SDCARD_CSD_C_SIZE_MULT_RSHIFT0) |
- (byte1 << ALTERA_SDCARD_CSD_C_SIZE_MULT_LSHIFT1);
-
- /*
- * If we're just getting back zero's, mark the card as bad, even
- * though it could just mean a Very Small Disk Indeed.
- */
- if (c_size == 0 && c_size_mult == 0 && read_bl_len == 0) {
- device_printf(sc->as_dev, "Ignored zero-size card\n");
- return (ENXIO);
- }
- sc->as_mediasize = (c_size + 1) * (1 << (c_size_mult + 2)) *
- (1 << read_bl_len);
- return (0);
-}
-
-int
-altera_sdcard_read_csd(struct altera_sdcard_softc *sc)
-{
- uint8_t csd_structure;
- int error;
-
- ALTERA_SDCARD_LOCK_ASSERT(sc);
-
- /*
- * XXXRW: Assume for now that when the SD Card IP Core negotiates
- * voltage/speed/etc, it must use the CSD register, and therefore
- * populates the SD Card IP Core's cache of the register value. This
- * means that we can read it without issuing further SD Card commands.
- * If this assumption proves false, we will (a) get back garbage and
- * (b) need to add additional states in the driver state machine in
- * order to query card properties before I/O can start.
- *
- * XXXRW: Treating this as an array of bytes, so no byte swapping --
- * is that a safe assumption?
- */
- KASSERT(((uintptr_t)&sc->as_csd.csd_data) % 2 == 0,
- ("%s: CSD buffer unaligned", __func__));
- bus_read_region_2(sc->as_res, ALTERA_SDCARD_OFF_CSD,
- (uint16_t *)sc->as_csd.csd_data, sizeof(sc->as_csd) / 2);
-
- /*
- * Interpret the loaded CSD, extracting certain fields and copying
- * them into the softc for easy software access.
- *
- * Currently, we support only CSD Version 1.0. If we detect a newer
- * version, suppress card detection.
- */
- csd_structure = sc->as_csd.csd_data[ALTERA_SDCARD_CSD_STRUCTURE_BYTE];
- csd_structure &= ALTERA_SDCARD_CSD_STRUCTURE_MASK;
- csd_structure >>= ALTERA_SDCARD_CSD_STRUCTURE_RSHIFT;
- sc->as_csd_structure = csd_structure;
-
- /*
- * Interpret the CSD field based on its version. Extract fields,
- * especially mediasize.
- *
- * XXXRW: Desirable to support further CSD versions here.
- */
- switch (sc->as_csd_structure) {
- case 0:
- error = altera_sdcard_process_csd0(sc);
- if (error)
- return (error);
- break;
-
- default:
- device_printf(sc->as_dev,
- "Ignored disk with unsupported CSD structure (%d)\n",
- sc->as_csd_structure);
- return (ENXIO);
- }
- return (0);
-}
-
-/*
- * XXXRW: The Altera IP Core specification indicates that RR1 is a 16-bit
- * register, but all bits it identifies are >16 bit. Most likely, RR1 is a
- * 32-bit register?
- */
-static uint16_t
-altera_sdcard_read_rr1(struct altera_sdcard_softc *sc)
-{
-
- return (le16toh(bus_read_2(sc->as_res, ALTERA_SDCARD_OFF_RR1)));
-}
-
-static void
-altera_sdcard_write_cmd_arg(struct altera_sdcard_softc *sc, uint32_t cmd_arg)
-{
-
- bus_write_4(sc->as_res, ALTERA_SDCARD_OFF_CMD_ARG, htole32(cmd_arg));
-}
-
-static void
-altera_sdcard_write_cmd(struct altera_sdcard_softc *sc, uint16_t cmd)
-{
-
- bus_write_2(sc->as_res, ALTERA_SDCARD_OFF_CMD, htole16(cmd));
-}
-
-static void
-altera_sdcard_read_rxtx_buffer(struct altera_sdcard_softc *sc, void *data,
- size_t len)
-{
-
- KASSERT((uintptr_t)data % 2 == 0,
- ("%s: unaligned data %p", __func__, data));
- KASSERT((len <= ALTERA_SDCARD_SECTORSIZE) && (len % 2 == 0),
- ("%s: invalid length %ju", __func__, len));
-
- bus_read_region_2(sc->as_res, ALTERA_SDCARD_OFF_RXTX_BUFFER,
- (uint16_t *)data, len / 2);
-}
-
-static void
-altera_sdcard_write_rxtx_buffer(struct altera_sdcard_softc *sc, void *data,
- size_t len)
-{
- u_int corrections, differences, i, retry_counter;
- uint16_t d, v;
-
- KASSERT((uintptr_t)data % 2 == 0,
- ("%s: unaligned data %p", __func__, data));
- KASSERT((len <= ALTERA_SDCARD_SECTORSIZE) && (len % 2 == 0),
- ("%s: invalid length %ju", __func__, len));
-
- retry_counter = 0;
- do {
- bus_write_region_2(sc->as_res, ALTERA_SDCARD_OFF_RXTX_BUFFER,
- (uint16_t *)data, len / 2);
-
- /*
- * XXXRW: Due to a possible hardware bug, the above call to
- * bus_write_region_2() might not succeed. If the workaround
- * is enabled, verify each write and retry until it succeeds.
- *
- * XXXRW: Do we want a limit counter for retries here?
- */
-recheck:
- corrections = 0;
- differences = 0;
- if (altera_sdcard_verify_rxtx_writes) {
- for (i = 0; i < ALTERA_SDCARD_SECTORSIZE; i += 2) {
- v = bus_read_2(sc->as_res,
- ALTERA_SDCARD_OFF_RXTX_BUFFER + i);
- d = *(uint16_t *)((uint8_t *)data + i);
- if (v != d) {
- if (retry_counter == 0) {
- bus_write_2(sc->as_res,
- ALTERA_SDCARD_OFF_RXTX_BUFFER + i,
- d);
- v = bus_read_2(sc->as_res,
- ALTERA_SDCARD_OFF_RXTX_BUFFER + i);
- if (v == d) {
- corrections++;
- device_printf(sc->as_dev,
- "%s: single word rewrite worked"
- " at offset %u\n",
- __func__, i);
- continue;
- }
- }
- differences++;
- device_printf(sc->as_dev,
- "%s: retrying write -- difference"
- " %u at offset %u, retry %u\n",
- __func__, differences, i,
- retry_counter);
- }
- }
- if (differences != 0) {
- retry_counter++;
- if (retry_counter == 1 &&
- corrections == differences)
- goto recheck;
- }
- }
- } while (differences != 0);
- if (retry_counter)
- device_printf(sc->as_dev, "%s: succeeded after %u retries\n",
- __func__, retry_counter);
-}
-
-static void
-altera_sdcard_io_start_internal(struct altera_sdcard_softc *sc,
- struct bio **bpp)
-{
- struct bio *bp;
-
- bp = *bpp;
-
- switch (bp->bio_cmd) {
- case BIO_READ:
- altera_sdcard_write_cmd_arg(sc, bp->bio_pblkno *
- ALTERA_SDCARD_SECTORSIZE);
- altera_sdcard_write_cmd(sc, ALTERA_SDCARD_CMD_READ_BLOCK);
- break;
-
- case BIO_WRITE:
- altera_sdcard_write_rxtx_buffer(sc, bp->bio_data,
- bp->bio_bcount);
- altera_sdcard_write_cmd_arg(sc, bp->bio_pblkno *
- ALTERA_SDCARD_SECTORSIZE);
- altera_sdcard_write_cmd(sc, ALTERA_SDCARD_CMD_WRITE_BLOCK);
- break;
-
- default:
- biofinish(bp, NULL, EOPNOTSUPP);
- *bpp = NULL;
- }
-}
-
-void
-altera_sdcard_io_start(struct altera_sdcard_softc *sc, struct bio *bp)
-{
-
- ALTERA_SDCARD_LOCK_ASSERT(sc);
- KASSERT(sc->as_currentbio == NULL,
- ("%s: bio already started", __func__));
-
- /*
- * We advertise a block size and maximum I/O size up the stack of the
- * SD Card IP Core sector size. Catch any attempts to not follow the
- * rules.
- */
- KASSERT(bp->bio_bcount == ALTERA_SDCARD_SECTORSIZE,
- ("%s: I/O size not %d", __func__, ALTERA_SDCARD_SECTORSIZE));
- altera_sdcard_io_start_internal(sc, &bp);
- sc->as_currentbio = bp;
- sc->as_retriesleft = ALTERA_SDCARD_RETRY_LIMIT;
-}
-
-/*
- * Handle completed I/O. ASR is passed in to avoid reading it more than once.
- * Return 1 if the I/O is actually complete (success, or retry limit
- * exceeded), or 0 if not.
- */
-int
-altera_sdcard_io_complete(struct altera_sdcard_softc *sc, uint16_t asr)
-{
- struct bio *bp;
- uint16_t rr1, mask;
- int error;
-
- ALTERA_SDCARD_LOCK_ASSERT(sc);
- KASSERT(!(asr & ALTERA_SDCARD_ASR_CMDINPROGRESS),
- ("%s: still in progress", __func__));
- KASSERT(asr & ALTERA_SDCARD_ASR_CARDPRESENT,
- ("%s: card removed", __func__));
-
- bp = sc->as_currentbio;
-
- /*-
- * Handle I/O retries if an error is returned by the device. Various
- * quirks handled in the process:
- *
- * 1. ALTERA_SDCARD_ASR_CMDDATAERROR is ignored for BIO_WRITE.
- * 2. ALTERA_SDCARD_RR1_COMMANDCRCFAILED is optionally ignored for
- * BIO_READ.
- */
- error = 0;
- rr1 = altera_sdcard_read_rr1(sc);
- switch (bp->bio_cmd) {
- case BIO_READ:
- mask = ALTERA_SDCARD_RR1_ERRORMASK;
- if (altera_sdcard_ignore_crc_errors)
- mask &= ~ALTERA_SDCARD_RR1_COMMANDCRCFAILED;
- if (asr & ALTERA_SDCARD_ASR_CMDTIMEOUT)
- error = EIO;
- else if ((asr & ALTERA_SDCARD_ASR_CMDDATAERROR) &&
- (rr1 & mask))
- error = EIO;
- else
- error = 0;
- break;
-
- case BIO_WRITE:
- if (asr & ALTERA_SDCARD_ASR_CMDTIMEOUT)
- error = EIO;
- else
- error = 0;
- break;
-
- default:
- break;
- }
- if (error) {
- sc->as_retriesleft--;
- if (sc->as_retriesleft == 0 || bootverbose)
- device_printf(sc->as_dev, "%s: %s operation block %ju "
- "length %ju failed; asr 0x%08x (rr1: 0x%04x)%s\n",
- __func__, bp->bio_cmd == BIO_READ ? "BIO_READ" :
- (bp->bio_cmd == BIO_WRITE ? "BIO_WRITE" :
- "unknown"),
- bp->bio_pblkno, bp->bio_bcount, asr, rr1,
- sc->as_retriesleft != 0 ? " retrying" : "");
- /*
- * This attempt experienced an error; possibly retry.
- */
- if (sc->as_retriesleft != 0) {
- sc->as_flags |= ALTERA_SDCARD_FLAG_IOERROR;
- altera_sdcard_io_start_internal(sc, &bp);
- return (0);
- }
- sc->as_flags &= ~ALTERA_SDCARD_FLAG_IOERROR;
- } else {
- /*
- * Successful I/O completion path.
- */
- if (sc->as_flags & ALTERA_SDCARD_FLAG_IOERROR) {
- device_printf(sc->as_dev, "%s: %s operation block %ju"
- " length %ju succeeded after %d retries\n",
- __func__, bp->bio_cmd == BIO_READ ? "BIO_READ" :
- (bp->bio_cmd == BIO_WRITE ? "write" : "unknown"),
- bp->bio_pblkno, bp->bio_bcount,
- ALTERA_SDCARD_RETRY_LIMIT - sc->as_retriesleft);
- sc->as_flags &= ~ALTERA_SDCARD_FLAG_IOERROR;
- }
- switch (bp->bio_cmd) {
- case BIO_READ:
- altera_sdcard_read_rxtx_buffer(sc, bp->bio_data,
- bp->bio_bcount);
- break;
-
- case BIO_WRITE:
- break;
-
- default:
- panic("%s: unsupported I/O operation %d", __func__,
- bp->bio_cmd);
- }
- bp->bio_resid = 0;
- error = 0;
- }
- biofinish(bp, NULL, error);
- sc->as_currentbio = NULL;
- return (1);
-}
diff --git a/sys/dev/altera/sdcard/altera_sdcard_nexus.c b/sys/dev/altera/sdcard/altera_sdcard_nexus.c
deleted file mode 100644
index 9e42f0b01ba2..000000000000
--- a/sys/dev/altera/sdcard/altera_sdcard_nexus.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012 Robert N. M. Watson
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/bus.h>
-#include <sys/condvar.h>
-#include <sys/conf.h>
-#include <sys/bio.h>
-#include <sys/kernel.h>
-#include <sys/lock.h>
-#include <sys/malloc.h>
-#include <sys/module.h>
-#include <sys/mutex.h>
-#include <sys/rman.h>
-#include <sys/systm.h>
-#include <sys/taskqueue.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <geom/geom_disk.h>
-
-#include <dev/altera/sdcard/altera_sdcard.h>
-
-/*
- * Nexus bus attachment for the Altera SD Card IP core. Appropriate for most
- * Altera FPGA SoC-style configurations in which the IP core will be exposed
- * to the processor via a memory-mapped Avalon bus.
- */
-static int
-altera_sdcard_nexus_probe(device_t dev)
-{
-
- device_set_desc(dev, "Altera Secure Data Card IP Core");
- return (BUS_PROBE_NOWILDCARD);
-}
-
-static int
-altera_sdcard_nexus_attach(device_t dev)
-{
- struct altera_sdcard_softc *sc;
-
- sc = device_get_softc(dev);
- sc->as_dev = dev;
- sc->as_unit = device_get_unit(dev);
- sc->as_rid = 0;
- sc->as_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &sc->as_rid, RF_ACTIVE);
- if (sc->as_res == NULL) {
- device_printf(dev, "couldn't map memory\n");
- return (ENXIO);
- }
- altera_sdcard_attach(sc);
- return (0);
-}
-
-static int
-altera_sdcard_nexus_detach(device_t dev)
-{
- struct altera_sdcard_softc *sc;
-
- sc = device_get_softc(dev);
- KASSERT(sc->as_res != NULL, ("%s: resources not allocated",
- __func__));
- altera_sdcard_detach(sc);
- bus_release_resource(dev, SYS_RES_MEMORY, sc->as_rid, sc->as_res);
- return (0);
-}
-
-static device_method_t altera_sdcard_nexus_methods[] = {
- DEVMETHOD(device_probe, altera_sdcard_nexus_probe),
- DEVMETHOD(device_attach, altera_sdcard_nexus_attach),
- DEVMETHOD(device_detach, altera_sdcard_nexus_detach),
- { 0, 0 }
-};
-
-static driver_t altera_sdcard_nexus_driver = {
- "altera_sdcardc",
- altera_sdcard_nexus_methods,
- sizeof(struct altera_sdcard_softc),
-};
-
-DRIVER_MODULE(altera_sdcard, nexus, altera_sdcard_nexus_driver, 0, 0);
diff --git a/sys/dev/altera/softdma/a_api.h b/sys/dev/altera/softdma/a_api.h
deleted file mode 100644
index 524d1bf2898a..000000000000
--- a/sys/dev/altera/softdma/a_api.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2012 Bjoern A. Zeeb
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-11-C-0249)
- * ("MRC2"), as part of the DARPA MRC research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * Altera, Embedded Peripherals IP, User Guide, v. 11.0, June 2011.
- * UG-01085-11.0.
- */
-
-#ifndef _A_API_H
-#define _A_API_H
-
-/* Table 16-1. Memory Map. */
-#define A_ONCHIP_FIFO_MEM_CORE_DATA 0x00
-#define A_ONCHIP_FIFO_MEM_CORE_METADATA 0x04
-
-#define A_ONCHIP_FIFO_MEM_CORE_SOP (1<<0)
-#define A_ONCHIP_FIFO_MEM_CORE_EOP (1<<1)
-#define A_ONCHIP_FIFO_MEM_CORE_EMPTY_MASK 0x000000f7
-#define A_ONCHIP_FIFO_MEM_CORE_EMPTY_SHIFT 2
- /* Reserved (1<<7) */
-#define A_ONCHIP_FIFO_MEM_CORE_CHANNEL_MASK 0x0000ff00
-#define A_ONCHIP_FIFO_MEM_CORE_CHANNEL_SHIFT 8
-#define A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK 0x00ff0000
-#define A_ONCHIP_FIFO_MEM_CORE_ERROR_SHIFT 16
- /* Reserved 0xff000000 */
-
-/* Table 16-3. FIFO Status Register Memory Map. */
-#define A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_FILL_LEVEL 0x00
-#define A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_I_STATUS 0x04
-#define A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT 0x08
-#define A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE 0x0c
-#define A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_ALMOSTFULL 0x10
-#define A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_ALMOSTEMPTY 0x14
-
-/* Table 16-5. Status Bit Field Descriptions. */
-#define A_ONCHIP_FIFO_MEM_CORE_STATUS_FULL (1<<0)
-#define A_ONCHIP_FIFO_MEM_CORE_STATUS_EMPTY (1<<1)
-#define A_ONCHIP_FIFO_MEM_CORE_STATUS_ALMOSTFULL (1<<2)
-#define A_ONCHIP_FIFO_MEM_CORE_STATUS_ALMOSTEMPTY (1<<3)
-#define A_ONCHIP_FIFO_MEM_CORE_STATUS_OVERFLOW (1<<4)
-#define A_ONCHIP_FIFO_MEM_CORE_STATUS_UNDERFLOW (1<<5)
-
-/* Table 16-6. Event Bit Field Descriptions. */
-/* XXX Datasheet has incorrect bit fields. Validate. */
-#define A_ONCHIP_FIFO_MEM_CORE_EVENT_FULL (1<<0)
-#define A_ONCHIP_FIFO_MEM_CORE_EVENT_EMPTY (1<<1)
-#define A_ONCHIP_FIFO_MEM_CORE_EVENT_ALMOSTFULL (1<<2)
-#define A_ONCHIP_FIFO_MEM_CORE_EVENT_ALMOSTEMPTY (1<<3)
-#define A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW (1<<4)
-#define A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW (1<<5)
-
-/* Table 16-7. InterruptEnable Bit Field Descriptions. */
-/* XXX Datasheet has incorrect bit fields. Validate. */
-#define A_ONCHIP_FIFO_MEM_CORE_INTR_FULL (1<<0)
-#define A_ONCHIP_FIFO_MEM_CORE_INTR_EMPTY (1<<1)
-#define A_ONCHIP_FIFO_MEM_CORE_INTR_ALMOSTFULL (1<<2)
-#define A_ONCHIP_FIFO_MEM_CORE_INTR_ALMOSTEMPTY (1<<3)
-#define A_ONCHIP_FIFO_MEM_CORE_INTR_OVERFLOW (1<<4)
-#define A_ONCHIP_FIFO_MEM_CORE_INTR_UNDERFLOW (1<<5)
-#define A_ONCHIP_FIFO_MEM_CORE_INTR_ALL \
- (A_ONCHIP_FIFO_MEM_CORE_INTR_EMPTY| \
- A_ONCHIP_FIFO_MEM_CORE_INTR_FULL| \
- A_ONCHIP_FIFO_MEM_CORE_INTR_ALMOSTEMPTY| \
- A_ONCHIP_FIFO_MEM_CORE_INTR_ALMOSTFULL| \
- A_ONCHIP_FIFO_MEM_CORE_INTR_OVERFLOW| \
- A_ONCHIP_FIFO_MEM_CORE_INTR_UNDERFLOW)
-
-#endif /* _A_API_H */
-
-/* end */
diff --git a/sys/dev/altera/softdma/softdma.c b/sys/dev/altera/softdma/softdma.c
deleted file mode 100644
index 4bea1315ce42..000000000000
--- a/sys/dev/altera/softdma/softdma.c
+++ /dev/null
@@ -1,882 +0,0 @@
-/*-
- * Copyright (c) 2017-2018 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/* This is driver for SoftDMA device built using Altera FIFO component. */
-
-#include <sys/cdefs.h>
-#include "opt_platform.h"
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/conf.h>
-#include <sys/bus.h>
-#include <sys/endian.h>
-#include <sys/kernel.h>
-#include <sys/kthread.h>
-#include <sys/module.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/resource.h>
-#include <sys/rman.h>
-
-#include <machine/bus.h>
-
-#ifdef FDT
-#include <dev/fdt/fdt_common.h>
-#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
-#endif
-
-#include <dev/altera/softdma/a_api.h>
-
-#include <dev/xdma/xdma.h>
-#include "xdma_if.h"
-
-#define SOFTDMA_DEBUG
-#undef SOFTDMA_DEBUG
-
-#ifdef SOFTDMA_DEBUG
-#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
-#else
-#define dprintf(fmt, ...)
-#endif
-
-#define AVALON_FIFO_TX_BASIC_OPTS_DEPTH 16
-#define SOFTDMA_NCHANNELS 1
-#define CONTROL_GEN_SOP (1 << 0)
-#define CONTROL_GEN_EOP (1 << 1)
-#define CONTROL_OWN (1 << 31)
-
-#define SOFTDMA_RX_EVENTS \
- (A_ONCHIP_FIFO_MEM_CORE_INTR_FULL | \
- A_ONCHIP_FIFO_MEM_CORE_INTR_OVERFLOW | \
- A_ONCHIP_FIFO_MEM_CORE_INTR_UNDERFLOW)
-#define SOFTDMA_TX_EVENTS \
- (A_ONCHIP_FIFO_MEM_CORE_INTR_EMPTY | \
- A_ONCHIP_FIFO_MEM_CORE_INTR_OVERFLOW | \
- A_ONCHIP_FIFO_MEM_CORE_INTR_UNDERFLOW)
-
-struct softdma_channel {
- struct softdma_softc *sc;
- struct mtx mtx;
- xdma_channel_t *xchan;
- struct proc *p;
- int used;
- int index;
- int run;
- uint32_t idx_tail;
- uint32_t idx_head;
- struct softdma_desc *descs;
-
- uint32_t descs_num;
- uint32_t descs_used_count;
-};
-
-struct softdma_desc {
- uint64_t src_addr;
- uint64_t dst_addr;
- uint32_t len;
- uint32_t access_width;
- uint32_t count;
- uint16_t src_incr;
- uint16_t dst_incr;
- uint32_t direction;
- struct softdma_desc *next;
- uint32_t transfered;
- uint32_t status;
- uint32_t reserved;
- uint32_t control;
-};
-
-struct softdma_softc {
- device_t dev;
- struct resource *res[3];
- bus_space_tag_t bst;
- bus_space_handle_t bsh;
- bus_space_tag_t bst_c;
- bus_space_handle_t bsh_c;
- void *ih;
- struct softdma_channel channels[SOFTDMA_NCHANNELS];
-};
-
-static struct resource_spec softdma_spec[] = {
- { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* fifo */
- { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* core */
- { SYS_RES_IRQ, 0, RF_ACTIVE },
- { -1, 0 }
-};
-
-static int softdma_probe(device_t dev);
-static int softdma_attach(device_t dev);
-static int softdma_detach(device_t dev);
-
-static inline uint32_t
-softdma_next_desc(struct softdma_channel *chan, uint32_t curidx)
-{
-
- return ((curidx + 1) % chan->descs_num);
-}
-
-static void
-softdma_mem_write(struct softdma_softc *sc, uint32_t reg, uint32_t val)
-{
-
- bus_write_4(sc->res[0], reg, htole32(val));
-}
-
-static uint32_t
-softdma_mem_read(struct softdma_softc *sc, uint32_t reg)
-{
- uint32_t val;
-
- val = bus_read_4(sc->res[0], reg);
-
- return (le32toh(val));
-}
-
-static void
-softdma_memc_write(struct softdma_softc *sc, uint32_t reg, uint32_t val)
-{
-
- bus_write_4(sc->res[1], reg, htole32(val));
-}
-
-static uint32_t
-softdma_memc_read(struct softdma_softc *sc, uint32_t reg)
-{
- uint32_t val;
-
- val = bus_read_4(sc->res[1], reg);
-
- return (le32toh(val));
-}
-
-static uint32_t
-softdma_fill_level(struct softdma_softc *sc)
-{
- uint32_t val;
-
- val = softdma_memc_read(sc,
- A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_FILL_LEVEL);
-
- return (val);
-}
-
-static uint32_t
-fifo_fill_level_wait(struct softdma_softc *sc)
-{
- uint32_t val;
-
- do
- val = softdma_fill_level(sc);
- while (val == AVALON_FIFO_TX_BASIC_OPTS_DEPTH);
-
- return (val);
-}
-
-static void
-softdma_intr(void *arg)
-{
- struct softdma_channel *chan;
- struct softdma_softc *sc;
- int reg;
- int err;
-
- sc = arg;
-
- chan = &sc->channels[0];
-
- reg = softdma_memc_read(sc, A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT);
-
- if (reg & (A_ONCHIP_FIFO_MEM_CORE_EVENT_OVERFLOW |
- A_ONCHIP_FIFO_MEM_CORE_EVENT_UNDERFLOW)) {
- /* Errors */
- err = (((reg & A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) >> \
- A_ONCHIP_FIFO_MEM_CORE_ERROR_SHIFT) & 0xff);
- }
-
- if (reg != 0) {
- softdma_memc_write(sc,
- A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_EVENT, reg);
- chan->run = 1;
- wakeup(chan);
- }
-}
-
-static int
-softdma_probe(device_t dev)
-{
-
- if (!ofw_bus_status_okay(dev))
- return (ENXIO);
-
- if (!ofw_bus_is_compatible(dev, "altr,softdma"))
- return (ENXIO);
-
- device_set_desc(dev, "SoftDMA");
-
- return (BUS_PROBE_DEFAULT);
-}
-
-static int
-softdma_attach(device_t dev)
-{
- struct softdma_softc *sc;
- phandle_t xref, node;
- int err;
-
- sc = device_get_softc(dev);
- sc->dev = dev;
-
- if (bus_alloc_resources(dev, softdma_spec, sc->res)) {
- device_printf(dev,
- "could not allocate resources for device\n");
- return (ENXIO);
- }
-
- /* FIFO memory interface */
- sc->bst = rman_get_bustag(sc->res[0]);
- sc->bsh = rman_get_bushandle(sc->res[0]);
-
- /* FIFO control memory interface */
- sc->bst_c = rman_get_bustag(sc->res[1]);
- sc->bsh_c = rman_get_bushandle(sc->res[1]);
-
- /* Setup interrupt handler */
- err = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
- NULL, softdma_intr, sc, &sc->ih);
- if (err) {
- device_printf(dev, "Unable to alloc interrupt resource.\n");
- return (ENXIO);
- }
-
- node = ofw_bus_get_node(dev);
- xref = OF_xref_from_node(node);
- OF_device_register_xref(xref, dev);
-
- return (0);
-}
-
-static int
-softdma_detach(device_t dev)
-{
- struct softdma_softc *sc;
-
- sc = device_get_softc(dev);
-
- return (0);
-}
-
-static int
-softdma_process_tx(struct softdma_channel *chan, struct softdma_desc *desc)
-{
- struct softdma_softc *sc;
- uint64_t addr;
- uint64_t buf;
- uint32_t word;
- uint32_t missing;
- uint32_t reg;
- int got_bits;
- int len;
-
- sc = chan->sc;
-
- fifo_fill_level_wait(sc);
-
- /* Set start of packet. */
- if (desc->control & CONTROL_GEN_SOP)
- softdma_mem_write(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA,
- A_ONCHIP_FIFO_MEM_CORE_SOP);
-
- got_bits = 0;
- buf = 0;
-
- addr = desc->src_addr;
- len = desc->len;
-
- if (addr & 1) {
- buf = (buf << 8) | *(uint8_t *)addr;
- got_bits += 8;
- addr += 1;
- len -= 1;
- }
-
- if (len >= 2 && addr & 2) {
- buf = (buf << 16) | *(uint16_t *)addr;
- got_bits += 16;
- addr += 2;
- len -= 2;
- }
-
- while (len >= 4) {
- buf = (buf << 32) | (uint64_t)*(uint32_t *)addr;
- addr += 4;
- len -= 4;
- word = (uint32_t)((buf >> got_bits) & 0xffffffff);
-
- fifo_fill_level_wait(sc);
- if (len == 0 && got_bits == 0 &&
- (desc->control & CONTROL_GEN_EOP) != 0)
- softdma_mem_write(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA,
- A_ONCHIP_FIFO_MEM_CORE_EOP);
- bus_write_4(sc->res[0], A_ONCHIP_FIFO_MEM_CORE_DATA, word);
- }
-
- if (len & 2) {
- buf = (buf << 16) | *(uint16_t *)addr;
- got_bits += 16;
- addr += 2;
- len -= 2;
- }
-
- if (len & 1) {
- buf = (buf << 8) | *(uint8_t *)addr;
- got_bits += 8;
- addr += 1;
- len -= 1;
- }
-
- if (got_bits >= 32) {
- got_bits -= 32;
- word = (uint32_t)((buf >> got_bits) & 0xffffffff);
-
- fifo_fill_level_wait(sc);
- if (len == 0 && got_bits == 0 &&
- (desc->control & CONTROL_GEN_EOP) != 0)
- softdma_mem_write(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA,
- A_ONCHIP_FIFO_MEM_CORE_EOP);
- bus_write_4(sc->res[0], A_ONCHIP_FIFO_MEM_CORE_DATA, word);
- }
-
- if (got_bits) {
- missing = 32 - got_bits;
- got_bits /= 8;
-
- fifo_fill_level_wait(sc);
- reg = A_ONCHIP_FIFO_MEM_CORE_EOP |
- ((4 - got_bits) << A_ONCHIP_FIFO_MEM_CORE_EMPTY_SHIFT);
- softdma_mem_write(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA, reg);
- word = (uint32_t)((buf << missing) & 0xffffffff);
- bus_write_4(sc->res[0], A_ONCHIP_FIFO_MEM_CORE_DATA, word);
- }
-
- return (desc->len);
-}
-
-static int
-softdma_process_rx(struct softdma_channel *chan, struct softdma_desc *desc)
-{
- uint32_t src_offs, dst_offs;
- struct softdma_softc *sc;
- uint32_t fill_level;
- uint32_t empty;
- uint32_t meta;
- uint32_t data;
- int sop_rcvd;
- int timeout;
- size_t len;
- int error;
-
- sc = chan->sc;
- empty = 0;
- src_offs = dst_offs = 0;
- error = 0;
-
- fill_level = softdma_fill_level(sc);
- if (fill_level == 0) {
- /* Nothing to receive. */
- return (0);
- }
-
- len = desc->len;
-
- sop_rcvd = 0;
- while (fill_level) {
- empty = 0;
- data = bus_read_4(sc->res[0], A_ONCHIP_FIFO_MEM_CORE_DATA);
- meta = softdma_mem_read(sc, A_ONCHIP_FIFO_MEM_CORE_METADATA);
-
- if (meta & A_ONCHIP_FIFO_MEM_CORE_ERROR_MASK) {
- error = 1;
- break;
- }
-
- if ((meta & A_ONCHIP_FIFO_MEM_CORE_CHANNEL_MASK) != 0) {
- error = 1;
- break;
- }
-
- if (meta & A_ONCHIP_FIFO_MEM_CORE_SOP) {
- sop_rcvd = 1;
- }
-
- if (meta & A_ONCHIP_FIFO_MEM_CORE_EOP) {
- empty = (meta & A_ONCHIP_FIFO_MEM_CORE_EMPTY_MASK) >>
- A_ONCHIP_FIFO_MEM_CORE_EMPTY_SHIFT;
- }
-
- if (sop_rcvd == 0) {
- error = 1;
- break;
- }
-
- if (empty == 0) {
- *(uint32_t *)(desc->dst_addr + dst_offs) = data;
- dst_offs += 4;
- } else if (empty == 1) {
- *(uint16_t *)(desc->dst_addr + dst_offs) =
- ((data >> 16) & 0xffff);
- dst_offs += 2;
-
- *(uint8_t *)(desc->dst_addr + dst_offs) =
- ((data >> 8) & 0xff);
- dst_offs += 1;
- } else {
- panic("empty %d\n", empty);
- }
-
- if (meta & A_ONCHIP_FIFO_MEM_CORE_EOP)
- break;
-
- fill_level = softdma_fill_level(sc);
- timeout = 100;
- while (fill_level == 0 && timeout--)
- fill_level = softdma_fill_level(sc);
- if (timeout == 0) {
- /* No EOP received. Broken packet. */
- error = 1;
- break;
- }
- }
-
- if (error) {
- return (-1);
- }
-
- return (dst_offs);
-}
-
-static uint32_t
-softdma_process_descriptors(struct softdma_channel *chan,
- xdma_transfer_status_t *status)
-{
- struct xdma_channel *xchan;
- struct softdma_desc *desc;
- struct softdma_softc *sc;
- xdma_transfer_status_t st;
- int ret;
-
- sc = chan->sc;
-
- xchan = chan->xchan;
-
- desc = &chan->descs[chan->idx_tail];
-
- while (desc != NULL) {
- if ((desc->control & CONTROL_OWN) == 0) {
- break;
- }
-
- if (desc->direction == XDMA_MEM_TO_DEV) {
- ret = softdma_process_tx(chan, desc);
- } else {
- ret = softdma_process_rx(chan, desc);
- if (ret == 0) {
- /* No new data available. */
- break;
- }
- }
-
- /* Descriptor processed. */
- desc->control = 0;
-
- if (ret >= 0) {
- st.error = 0;
- st.transferred = ret;
- } else {
- st.error = ret;
- st.transferred = 0;
- }
-
- xchan_seg_done(xchan, &st);
- atomic_subtract_int(&chan->descs_used_count, 1);
-
- if (ret >= 0) {
- status->transferred += ret;
- } else {
- status->error = 1;
- break;
- }
-
- chan->idx_tail = softdma_next_desc(chan, chan->idx_tail);
-
- /* Process next descriptor, if any. */
- desc = desc->next;
- }
-
- return (0);
-}
-
-static void
-softdma_worker(void *arg)
-{
- xdma_transfer_status_t status;
- struct softdma_channel *chan;
- struct softdma_softc *sc;
-
- chan = arg;
-
- sc = chan->sc;
-
- while (1) {
- mtx_lock(&chan->mtx);
-
- do {
- mtx_sleep(chan, &chan->mtx, 0, "softdma_wait", hz / 2);
- } while (chan->run == 0);
-
- status.error = 0;
- status.transferred = 0;
-
- softdma_process_descriptors(chan, &status);
-
- /* Finish operation */
- chan->run = 0;
- xdma_callback(chan->xchan, &status);
-
- mtx_unlock(&chan->mtx);
- }
-
-}
-
-static int
-softdma_proc_create(struct softdma_channel *chan)
-{
- struct softdma_softc *sc;
-
- sc = chan->sc;
-
- if (chan->p != NULL) {
- /* Already created */
- return (0);
- }
-
- mtx_init(&chan->mtx, "SoftDMA", NULL, MTX_DEF);
-
- if (kproc_create(softdma_worker, (void *)chan, &chan->p, 0, 0,
- "softdma_worker") != 0) {
- device_printf(sc->dev,
- "%s: Failed to create worker thread.\n", __func__);
- return (-1);
- }
-
- return (0);
-}
-
-static int
-softdma_channel_alloc(device_t dev, struct xdma_channel *xchan)
-{
- struct softdma_channel *chan;
- struct softdma_softc *sc;
- int i;
-
- sc = device_get_softc(dev);
-
- for (i = 0; i < SOFTDMA_NCHANNELS; i++) {
- chan = &sc->channels[i];
- if (chan->used == 0) {
- chan->xchan = xchan;
- xchan->chan = (void *)chan;
- xchan->caps |= XCHAN_CAP_NOSEG;
- chan->index = i;
- chan->idx_head = 0;
- chan->idx_tail = 0;
- chan->descs_used_count = 0;
- chan->descs_num = 1024;
- chan->sc = sc;
-
- if (softdma_proc_create(chan) != 0) {
- return (-1);
- }
-
- chan->used = 1;
-
- return (0);
- }
- }
-
- return (-1);
-}
-
-static int
-softdma_channel_free(device_t dev, struct xdma_channel *xchan)
-{
- struct softdma_channel *chan;
- struct softdma_softc *sc;
-
- sc = device_get_softc(dev);
-
- chan = (struct softdma_channel *)xchan->chan;
-
- if (chan->descs != NULL) {
- free(chan->descs, M_DEVBUF);
- }
-
- chan->used = 0;
-
- return (0);
-}
-
-static int
-softdma_desc_alloc(struct xdma_channel *xchan)
-{
- struct softdma_channel *chan;
- uint32_t nsegments;
-
- chan = (struct softdma_channel *)xchan->chan;
-
- nsegments = chan->descs_num;
-
- chan->descs = malloc(nsegments * sizeof(struct softdma_desc),
- M_DEVBUF, (M_WAITOK | M_ZERO));
-
- return (0);
-}
-
-static int
-softdma_channel_prep_sg(device_t dev, struct xdma_channel *xchan)
-{
- struct softdma_channel *chan;
- struct softdma_desc *desc;
- struct softdma_softc *sc;
- int ret;
- int i;
-
- sc = device_get_softc(dev);
-
- chan = (struct softdma_channel *)xchan->chan;
-
- ret = softdma_desc_alloc(xchan);
- if (ret != 0) {
- device_printf(sc->dev,
- "%s: Can't allocate descriptors.\n", __func__);
- return (-1);
- }
-
- for (i = 0; i < chan->descs_num; i++) {
- desc = &chan->descs[i];
-
- if (i == (chan->descs_num - 1)) {
- desc->next = &chan->descs[0];
- } else {
- desc->next = &chan->descs[i+1];
- }
- }
-
- return (0);
-}
-
-static int
-softdma_channel_capacity(device_t dev, xdma_channel_t *xchan,
- uint32_t *capacity)
-{
- struct softdma_channel *chan;
- uint32_t c;
-
- chan = (struct softdma_channel *)xchan->chan;
-
- /* At least one descriptor must be left empty. */
- c = (chan->descs_num - chan->descs_used_count - 1);
-
- *capacity = c;
-
- return (0);
-}
-
-static int
-softdma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
- struct xdma_sglist *sg, uint32_t sg_n)
-{
- struct softdma_channel *chan;
- struct softdma_desc *desc;
- struct softdma_softc *sc;
- uint32_t enqueued;
- uint32_t saved_dir;
- uint32_t tmp;
- uint32_t len;
- int i;
-
- sc = device_get_softc(dev);
-
- chan = (struct softdma_channel *)xchan->chan;
-
- enqueued = 0;
-
- for (i = 0; i < sg_n; i++) {
- len = (uint32_t)sg[i].len;
-
- desc = &chan->descs[chan->idx_head];
- desc->src_addr = sg[i].src_addr;
- desc->dst_addr = sg[i].dst_addr;
- if (sg[i].direction == XDMA_MEM_TO_DEV) {
- desc->src_incr = 1;
- desc->dst_incr = 0;
- } else {
- desc->src_incr = 0;
- desc->dst_incr = 1;
- }
- desc->direction = sg[i].direction;
- saved_dir = sg[i].direction;
- desc->len = len;
- desc->transfered = 0;
- desc->status = 0;
- desc->reserved = 0;
- desc->control = 0;
-
- if (sg[i].first == 1)
- desc->control |= CONTROL_GEN_SOP;
- if (sg[i].last == 1)
- desc->control |= CONTROL_GEN_EOP;
-
- tmp = chan->idx_head;
- chan->idx_head = softdma_next_desc(chan, chan->idx_head);
- atomic_add_int(&chan->descs_used_count, 1);
- desc->control |= CONTROL_OWN;
- enqueued += 1;
- }
-
- if (enqueued == 0)
- return (0);
-
- if (saved_dir == XDMA_MEM_TO_DEV) {
- chan->run = 1;
- wakeup(chan);
- } else
- softdma_memc_write(sc,
- A_ONCHIP_FIFO_MEM_CORE_STATUS_REG_INT_ENABLE,
- SOFTDMA_RX_EVENTS);
-
- return (0);
-}
-
-static int
-softdma_channel_request(device_t dev, struct xdma_channel *xchan,
- struct xdma_request *req)
-{
- struct softdma_channel *chan;
- struct softdma_desc *desc;
- struct softdma_softc *sc;
- int ret;
-
- sc = device_get_softc(dev);
-
- chan = (struct softdma_channel *)xchan->chan;
-
- ret = softdma_desc_alloc(xchan);
- if (ret != 0) {
- device_printf(sc->dev,
- "%s: Can't allocate descriptors.\n", __func__);
- return (-1);
- }
-
- desc = &chan->descs[0];
-
- desc->src_addr = req->src_addr;
- desc->dst_addr = req->dst_addr;
- desc->len = req->block_len;
- desc->src_incr = 1;
- desc->dst_incr = 1;
- desc->next = NULL;
-
- return (0);
-}
-
-static int
-softdma_channel_control(device_t dev, xdma_channel_t *xchan, int cmd)
-{
- struct softdma_channel *chan;
- struct softdma_softc *sc;
-
- sc = device_get_softc(dev);
-
- chan = (struct softdma_channel *)xchan->chan;
-
- switch (cmd) {
- case XDMA_CMD_BEGIN:
- case XDMA_CMD_TERMINATE:
- case XDMA_CMD_PAUSE:
- /* TODO: implement me */
- return (-1);
- }
-
- return (0);
-}
-
-#ifdef FDT
-static int
-softdma_ofw_md_data(device_t dev, pcell_t *cells,
- int ncells, void **ptr)
-{
-
- return (0);
-}
-#endif
-
-static device_method_t softdma_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, softdma_probe),
- DEVMETHOD(device_attach, softdma_attach),
- DEVMETHOD(device_detach, softdma_detach),
-
- /* xDMA Interface */
- DEVMETHOD(xdma_channel_alloc, softdma_channel_alloc),
- DEVMETHOD(xdma_channel_free, softdma_channel_free),
- DEVMETHOD(xdma_channel_request, softdma_channel_request),
- DEVMETHOD(xdma_channel_control, softdma_channel_control),
-
- /* xDMA SG Interface */
- DEVMETHOD(xdma_channel_prep_sg, softdma_channel_prep_sg),
- DEVMETHOD(xdma_channel_submit_sg, softdma_channel_submit_sg),
- DEVMETHOD(xdma_channel_capacity, softdma_channel_capacity),
-
-#ifdef FDT
- DEVMETHOD(xdma_ofw_md_data, softdma_ofw_md_data),
-#endif
-
- DEVMETHOD_END
-};
-
-static driver_t softdma_driver = {
- "softdma",
- softdma_methods,
- sizeof(struct softdma_softc),
-};
-
-EARLY_DRIVER_MODULE(softdma, simplebus, softdma_driver, 0, 0,
- BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LATE);
diff --git a/sys/dev/amdgpio/amdgpio.c b/sys/dev/amdgpio/amdgpio.c
index f39006d95805..20589ff71b0b 100644
--- a/sys/dev/amdgpio/amdgpio.c
+++ b/sys/dev/amdgpio/amdgpio.c
@@ -3,6 +3,10 @@
*
* Copyright (c) 2018 Advanced Micro Devices
* All rights reserved.
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Aymeric Wibo
+ * <obiwac@freebsd.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -51,11 +55,11 @@
#include <dev/acpica/acpivar.h>
#include <dev/gpio/gpiobusvar.h>
-#include "gpio_if.h"
#include "amdgpio.h"
static struct resource_spec amdgpio_spec[] = {
- { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE },
{ -1, 0, 0 }
};
@@ -196,7 +200,7 @@ static int
amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
{
struct amdgpio_softc *sc;
- uint32_t reg, val, allowed;
+ uint32_t reg, val;
sc = device_get_softc(dev);
@@ -204,18 +208,19 @@ amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
if (!amdgpio_valid_pin(sc, pin))
return (EINVAL);
- allowed = GPIO_PIN_INPUT | GPIO_PIN_OUTPUT;
+ if ((flags & ~AMDGPIO_DEFAULT_CAPS) != 0) {
+ device_printf(dev, "disallowed flags (0x%x) trying to be set "
+ "(allowed is 0x%x)\n", flags, AMDGPIO_DEFAULT_CAPS);
+ return (EINVAL);
+ }
- /*
- * Only directtion flag allowed
- */
- if (flags & ~allowed)
+ /* Either input or output must be selected. */
+ if ((flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) == 0)
return (EINVAL);
- /*
- * Not both directions simultaneously
- */
- if ((flags & allowed) == allowed)
+ /* Not both directions simultaneously. */
+ if ((flags & (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)) ==
+ (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT))
return (EINVAL);
/* Set the GPIO mode and state */
@@ -224,16 +229,21 @@ amdgpio_pin_setflags(device_t dev, uint32_t pin, uint32_t flags)
reg = AMDGPIO_PIN_REGISTER(pin);
val = amdgpio_read_4(sc, reg);
- if (flags & GPIO_PIN_INPUT) {
+ if ((flags & GPIO_PIN_INPUT) != 0)
val &= ~BIT(OUTPUT_ENABLE_OFF);
- sc->sc_gpio_pins[pin].gp_flags = GPIO_PIN_INPUT;
- } else {
+ else
val |= BIT(OUTPUT_ENABLE_OFF);
- sc->sc_gpio_pins[pin].gp_flags = GPIO_PIN_OUTPUT;
- }
+
+ val &= ~(BIT(PULL_DOWN_ENABLE_OFF) | BIT(PULL_UP_ENABLE_OFF));
+
+ if ((flags & GPIO_PIN_PULLDOWN) != 0)
+ val |= BIT(PULL_DOWN_ENABLE_OFF);
+ if ((flags & GPIO_PIN_PULLUP) != 0)
+ val |= BIT(PULL_UP_ENABLE_OFF);
amdgpio_write_4(sc, reg, val);
+ sc->sc_gpio_pins[pin].gp_flags = flags;
dprintf("pin %d flags 0x%x val 0x%x gp_flags 0x%x\n",
pin, flags, val, sc->sc_gpio_pins[pin].gp_flags);
@@ -359,11 +369,73 @@ amdgpio_probe(device_t dev)
return (rv);
}
+static void
+amdgpio_eoi_locked(struct amdgpio_softc *sc)
+{
+ uint32_t master_reg = amdgpio_read_4(sc, WAKE_INT_MASTER_REG);
+
+ AMDGPIO_ASSERT_LOCKED(sc);
+ master_reg |= EOI_MASK;
+ amdgpio_write_4(sc, WAKE_INT_MASTER_REG, master_reg);
+}
+
+static void
+amdgpio_eoi(struct amdgpio_softc *sc)
+{
+ AMDGPIO_LOCK(sc);
+ amdgpio_eoi_locked(sc);
+ AMDGPIO_UNLOCK(sc);
+}
+
+static int
+amdgpio_intr_filter(void *arg)
+{
+ struct amdgpio_softc *sc = arg;
+ int off, rv = FILTER_STRAY;
+ uint32_t reg;
+
+ /* We can lock in the filter routine as it is MTX_SPIN. */
+ AMDGPIO_LOCK(sc);
+
+ /*
+ * TODO Instead of just reading the registers of all pins, we should
+ * read WAKE_INT_STATUS_REG0/1. A bit set in here denotes a group of
+ * 4 pins where at least one has an interrupt for us. Then we can just
+ * iterate over those 4 pins.
+ *
+ * See GPIO_Interrupt_Status_Index_0 in BKDG.
+ */
+ for (size_t pin = 0; pin < AMD_GPIO_PINS_EXPOSED; pin++) {
+ off = AMDGPIO_PIN_REGISTER(pin);
+ reg = amdgpio_read_4(sc, off);
+ if ((reg & UNSERVICED_INTERRUPT_MASK) == 0)
+ continue;
+ /*
+ * Must write 1's to wake/interrupt status bits to clear them.
+ * We can do this simply by writing back to the register.
+ */
+ amdgpio_write_4(sc, off, reg);
+ }
+
+ amdgpio_eoi_locked(sc);
+ AMDGPIO_UNLOCK(sc);
+
+ rv = FILTER_HANDLED;
+ return (rv);
+}
+
+static void
+amdgpio_intr_handler(void *arg)
+{
+ /* TODO */
+}
+
static int
amdgpio_attach(device_t dev)
{
struct amdgpio_softc *sc;
- int i, pin, bank;
+ int i, pin, bank, reg;
+ uint32_t flags;
sc = device_get_softc(dev);
sc->sc_dev = dev;
@@ -386,6 +458,14 @@ amdgpio_attach(device_t dev)
sc->sc_bst = rman_get_bustag(sc->sc_res[0]);
sc->sc_bsh = rman_get_bushandle(sc->sc_res[0]);
+ /* Set up interrupt handler. */
+ if (bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_MISC | INTR_MPSAFE,
+ amdgpio_intr_filter, amdgpio_intr_handler, sc, &sc->sc_intr_handle)
+ != 0) {
+ device_printf(dev, "couldn't set up interrupt\n");
+ goto err_intr;
+ }
+
/* Initialize all possible pins to be Invalid */
for (i = 0; i < AMD_GPIO_PINS_MAX ; i++) {
snprintf(sc->sc_gpio_pins[i].gp_name, GPIOMAXNAME,
@@ -395,7 +475,12 @@ amdgpio_attach(device_t dev)
sc->sc_gpio_pins[i].gp_flags = 0;
}
- /* Initialize only driver exposed pins with appropriate capabilities */
+ /*
+ * Initialize only driver exposed pins with appropriate capabilities.
+ *
+ * XXX Also mask and disable interrupts on all pins, since we don't
+ * support them at the moment.
+ */
for (i = 0; i < AMD_GPIO_PINS_EXPOSED ; i++) {
pin = kernzp_pins[i].pin_num;
bank = pin/AMD_GPIO_PINS_PER_BANK;
@@ -406,19 +491,28 @@ amdgpio_attach(device_t dev)
sc->sc_gpio_pins[pin].gp_flags =
amdgpio_is_pin_output(sc, pin) ?
GPIO_PIN_OUTPUT : GPIO_PIN_INPUT;
+
+ reg = AMDGPIO_PIN_REGISTER(pin);
+ flags = amdgpio_read_4(sc, reg);
+ flags &= ~(1 << INTERRUPT_ENABLE_OFF);
+ flags &= ~(1 << INTERRUPT_MASK_OFF);
+ amdgpio_write_4(sc, reg, flags);
}
+ amdgpio_eoi(sc);
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
device_printf(dev, "could not attach gpiobus\n");
goto err_bus;
}
+ bus_attach_children(dev);
return (0);
err_bus:
+ bus_teardown_intr(dev, sc->sc_res[1], sc->sc_intr_handle);
+err_intr:
bus_release_resources(dev, amdgpio_spec, sc->sc_res);
-
err_rsrc:
AMDGPIO_LOCK_DESTROY(sc);
@@ -433,7 +527,8 @@ amdgpio_detach(device_t dev)
if (sc->sc_busdev)
gpiobus_detach_bus(dev);
-
+ if (sc->sc_intr_handle)
+ bus_teardown_intr(dev, sc->sc_res[1], sc->sc_intr_handle);
bus_release_resources(dev, amdgpio_spec, sc->sc_res);
AMDGPIO_LOCK_DESTROY(sc);
diff --git a/sys/dev/amdgpio/amdgpio.h b/sys/dev/amdgpio/amdgpio.h
index aca3039bfc98..3743eba23e17 100644
--- a/sys/dev/amdgpio/amdgpio.h
+++ b/sys/dev/amdgpio/amdgpio.h
@@ -50,7 +50,8 @@
AMD_GPIO_PINS_BANK1 + \
AMD_GPIO_PINS_BANK2 + \
AMD_GPIO_PINS_BANK3)
-#define AMDGPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT)
+#define AMDGPIO_DEFAULT_CAPS (GPIO_PIN_INPUT | GPIO_PIN_OUTPUT | \
+ GPIO_PIN_PULLDOWN | GPIO_PIN_PULLUP)
/* Register related macros */
#define AMDGPIO_PIN_REGISTER(pin) (pin * 4)
@@ -84,6 +85,9 @@
#define INTERRUPT_STS_OFF 28
#define WAKE_STS_OFF 29
+#define UNSERVICED_INTERRUPT_MASK \
+ ((1 << INTERRUPT_STS_OFF) | (1 << WAKE_STS_OFF))
+
#define DB_TMR_OUT_MASK 0xFUL
#define DB_CNTRL_MASK 0x3UL
#define ACTIVE_LEVEL_MASK 0x3UL
@@ -316,12 +320,13 @@ struct amdgpio_softc {
int sc_npins;
int sc_ngroups;
struct mtx sc_mtx;
- struct resource *sc_res[AMD_GPIO_NUM_PIN_BANK + 1];
+ struct resource *sc_res[2];
bus_space_tag_t sc_bst;
bus_space_handle_t sc_bsh;
struct gpio_pin sc_gpio_pins[AMD_GPIO_PINS_MAX];
const struct pin_info *sc_pin_info;
const struct amd_pingroup *sc_groups;
+ void *sc_intr_handle;
};
struct amdgpio_sysctl {
diff --git a/sys/dev/amdpm/amdpm.c b/sys/dev/amdpm/amdpm.c
index f479c0f5bcdd..d744c0aa5d4b 100644
--- a/sys/dev/amdpm/amdpm.c
+++ b/sys/dev/amdpm/amdpm.c
@@ -214,13 +214,13 @@ amdpm_attach(device_t dev)
mtx_init(&amdpm_sc->lock, device_get_nameunit(dev), "amdpm", MTX_DEF);
/* Allocate a new smbus device */
- amdpm_sc->smbus = device_add_child(dev, "smbus", -1);
+ amdpm_sc->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY);
if (!amdpm_sc->smbus) {
amdpm_detach(dev);
return (EINVAL);
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -229,11 +229,11 @@ static int
amdpm_detach(device_t dev)
{
struct amdpm_softc *amdpm_sc = device_get_softc(dev);
+ int error;
- if (amdpm_sc->smbus) {
- device_delete_child(dev, amdpm_sc->smbus);
- amdpm_sc->smbus = NULL;
- }
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
mtx_destroy(&amdpm_sc->lock);
if (amdpm_sc->res)
diff --git a/sys/dev/amdsbwd/amd_chipset.h b/sys/dev/amdsbwd/amd_chipset.h
index 1bc637a30845..44db7b3168d0 100644
--- a/sys/dev/amdsbwd/amd_chipset.h
+++ b/sys/dev/amdsbwd/amd_chipset.h
@@ -123,6 +123,7 @@
*/
#define AMDFCH41_WDT_FIXED_ADDR 0xfeb00000u
#define AMDFCH41_MMIO_ADDR 0xfed80000u
+#define AMDFCH41_MMIO_PM_OFF 0x0300
#define AMDFCH41_MMIO_SMBUS_OFF 0x0a00
#define AMDFCH41_MMIO_WDT_OFF 0x0b00
@@ -143,5 +144,6 @@
#define AMDFCH41_SMBUS_REVID 0x41
#define AMDCZ_SMBUS_DEVID 0x790b1022
#define AMDCZ49_SMBUS_REVID 0x49
+#define AMDCZ51_SMBUS_REVID 0x51
#define HYGONCZ_SMBUS_DEVID 0x790b1d94
diff --git a/sys/dev/amdsbwd/amdsbwd.c b/sys/dev/amdsbwd/amdsbwd.c
index b04aa8564ccd..d817a7b1364e 100644
--- a/sys/dev/amdsbwd/amdsbwd.c
+++ b/sys/dev/amdsbwd/amdsbwd.c
@@ -255,7 +255,7 @@ amdsbwd_identify(driver_t *driver, device_t parent)
if (resource_disabled("amdsbwd", 0))
return;
- if (device_find_child(parent, "amdsbwd", -1) != NULL)
+ if (device_find_child(parent, "amdsbwd", DEVICE_UNIT_ANY) != NULL)
return;
/*
@@ -271,7 +271,8 @@ amdsbwd_identify(driver_t *driver, device_t parent)
pci_get_devid(smb_dev) != HYGONCZ_SMBUS_DEVID)
return;
- child = BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "amdsbwd", -1);
+ child = BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "amdsbwd",
+ DEVICE_UNIT_ANY);
if (child == NULL)
device_printf(parent, "add amdsbwd child failed\n");
}
@@ -376,7 +377,6 @@ static void
amdsbwd_probe_fch41(device_t dev, struct resource *pmres, uint32_t *addr)
{
uint8_t val;
- char buf[36];
/*
* Enable decoding of watchdog MMIO address.
@@ -414,9 +414,8 @@ amdsbwd_probe_fch41(device_t dev, struct resource *pmres, uint32_t *addr)
amdsbwd_verbose_printf(dev, "AMDFCH41_PM_DECODE_EN3 value = %#04x\n",
val);
#endif
- snprintf(buf, sizeof(buf), "%s FCH Rev 41h+ Watchdog Timer",
+ device_set_descf(dev, "%s FCH Rev 41h+ Watchdog Timer",
cpu_vendor_id == CPU_VENDOR_HYGON ? "Hygon" : "AMD");
- device_set_desc_copy(dev, buf);
}
static int
diff --git a/sys/dev/amdsmb/amdsmb.c b/sys/dev/amdsmb/amdsmb.c
index 565e73cf1f64..3d2e7a5e0c19 100644
--- a/sys/dev/amdsmb/amdsmb.c
+++ b/sys/dev/amdsmb/amdsmb.c
@@ -159,13 +159,13 @@ amdsmb_attach(device_t dev)
mtx_init(&amdsmb_sc->lock, device_get_nameunit(dev), "amdsmb", MTX_DEF);
/* Allocate a new smbus device */
- amdsmb_sc->smbus = device_add_child(dev, "smbus", -1);
+ amdsmb_sc->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY);
if (!amdsmb_sc->smbus) {
amdsmb_detach(dev);
return (EINVAL);
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -174,11 +174,11 @@ static int
amdsmb_detach(device_t dev)
{
struct amdsmb_softc *amdsmb_sc = device_get_softc(dev);
+ int error;
- if (amdsmb_sc->smbus) {
- device_delete_child(dev, amdsmb_sc->smbus);
- amdsmb_sc->smbus = NULL;
- }
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
mtx_destroy(&amdsmb_sc->lock);
if (amdsmb_sc->res)
diff --git a/sys/dev/amdsmn/amdsmn.c b/sys/dev/amdsmn/amdsmn.c
index ddb5be4c2c3c..d19103738ec6 100644
--- a/sys/dev/amdsmn/amdsmn.c
+++ b/sys/dev/amdsmn/amdsmn.c
@@ -25,7 +25,7 @@
*/
/*
- * Driver for the AMD Family 15h and 17h CPU System Management Network.
+ * Driver for the AMD Family 15h, 17h, 19h, 1Ah CPU System Management Network.
*/
#include <sys/param.h>
@@ -57,9 +57,15 @@
#define PCI_DEVICE_ID_AMD_17H_ROOT 0x1450
#define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
#define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480 /* Also M70H, F19H M00H/M20H */
-#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
+#define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630 /* Also F19H M50H */
#define PCI_DEVICE_ID_AMD_19H_M10H_ROOT 0x14a4
-#define PCI_DEVICE_ID_AMD_19H_M60H_ROOT 0x14d8
+#define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5
+#define PCI_DEVICE_ID_AMD_19H_M60H_ROOT 0x14d8 /* Also F1AH M40H */
+#define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8
+#define PCI_DEVICE_ID_AMD_1AH_M00H_ROOT 0x153a
+#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT 0x1507
+#define PCI_DEVICE_ID_AMD_1AH_M60H_ROOT 0x1122
+
struct pciid;
struct amdsmn_softc {
@@ -111,10 +117,40 @@ static const struct pciid {
},
{
.amdsmn_vendorid = CPU_VENDOR_AMD,
+ .amdsmn_deviceid = PCI_DEVICE_ID_AMD_19H_M40H_ROOT,
+ .amdsmn_addr_reg = F17H_SMN_ADDR_REG,
+ .amdsmn_data_reg = F17H_SMN_DATA_REG,
+ },
+ {
+ .amdsmn_vendorid = CPU_VENDOR_AMD,
.amdsmn_deviceid = PCI_DEVICE_ID_AMD_19H_M60H_ROOT,
.amdsmn_addr_reg = F17H_SMN_ADDR_REG,
.amdsmn_data_reg = F17H_SMN_DATA_REG,
},
+ {
+ .amdsmn_vendorid = CPU_VENDOR_AMD,
+ .amdsmn_deviceid = PCI_DEVICE_ID_AMD_19H_M70H_ROOT,
+ .amdsmn_addr_reg = F17H_SMN_ADDR_REG,
+ .amdsmn_data_reg = F17H_SMN_DATA_REG,
+ },
+ {
+ .amdsmn_vendorid = CPU_VENDOR_AMD,
+ .amdsmn_deviceid = PCI_DEVICE_ID_AMD_1AH_M00H_ROOT,
+ .amdsmn_addr_reg = F17H_SMN_ADDR_REG,
+ .amdsmn_data_reg = F17H_SMN_DATA_REG,
+ },
+ {
+ .amdsmn_vendorid = CPU_VENDOR_AMD,
+ .amdsmn_deviceid = PCI_DEVICE_ID_AMD_1AH_M20H_ROOT,
+ .amdsmn_addr_reg = F17H_SMN_ADDR_REG,
+ .amdsmn_data_reg = F17H_SMN_DATA_REG,
+ },
+ {
+ .amdsmn_vendorid = CPU_VENDOR_AMD,
+ .amdsmn_deviceid = PCI_DEVICE_ID_AMD_1AH_M60H_ROOT,
+ .amdsmn_addr_reg = F17H_SMN_ADDR_REG,
+ .amdsmn_data_reg = F17H_SMN_DATA_REG,
+ },
};
/*
@@ -171,12 +207,12 @@ amdsmn_identify(driver_t *driver, device_t parent)
device_t child;
/* Make sure we're not being doubly invoked. */
- if (device_find_child(parent, "amdsmn", -1) != NULL)
+ if (device_find_child(parent, "amdsmn", DEVICE_UNIT_ANY) != NULL)
return;
if (!amdsmn_match(parent, NULL))
return;
- child = device_add_child(parent, "amdsmn", -1);
+ child = device_add_child(parent, "amdsmn", DEVICE_UNIT_ANY);
if (child == NULL)
device_printf(parent, "add amdsmn child failed\n");
}
@@ -185,7 +221,6 @@ static int
amdsmn_probe(device_t dev)
{
uint32_t family;
- char buf[64];
if (resource_disabled("amdsmn", 0))
return (ENXIO);
@@ -198,13 +233,13 @@ amdsmn_probe(device_t dev)
case 0x15:
case 0x17:
case 0x19:
+ case 0x1a:
break;
default:
return (ENXIO);
}
- snprintf(buf, sizeof(buf), "AMD Family %xh System Management Network",
+ device_set_descf(dev, "AMD Family %02Xh System Management Network",
family);
- device_set_desc_copy(dev, buf);
return (BUS_PROBE_GENERIC);
}
diff --git a/sys/dev/amdsmu/amdsmu.c b/sys/dev/amdsmu/amdsmu.c
new file mode 100644
index 000000000000..416f875c6176
--- /dev/null
+++ b/sys/dev/amdsmu/amdsmu.c
@@ -0,0 +1,466 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * This software was developed by Aymeric Wibo <obiwac@freebsd.org>
+ * under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/sysctl.h>
+
+#include <dev/pci/pcivar.h>
+#include <dev/amdsmu/amdsmu.h>
+
+static bool
+amdsmu_match(device_t dev, const struct amdsmu_product **product_out)
+{
+ const uint16_t vendorid = pci_get_vendor(dev);
+ const uint16_t deviceid = pci_get_device(dev);
+
+ for (size_t i = 0; i < nitems(amdsmu_products); i++) {
+ const struct amdsmu_product *prod = &amdsmu_products[i];
+
+ if (vendorid == prod->amdsmu_vendorid &&
+ deviceid == prod->amdsmu_deviceid) {
+ if (product_out != NULL)
+ *product_out = prod;
+ return (true);
+ }
+ }
+ return (false);
+}
+
+static void
+amdsmu_identify(driver_t *driver, device_t parent)
+{
+ if (device_find_child(parent, "amdsmu", -1) != NULL)
+ return;
+
+ if (amdsmu_match(parent, NULL)) {
+ if (device_add_child(parent, "amdsmu", -1) == NULL)
+ device_printf(parent, "add amdsmu child failed\n");
+ }
+}
+
+static int
+amdsmu_probe(device_t dev)
+{
+ if (resource_disabled("amdsmu", 0))
+ return (ENXIO);
+ if (!amdsmu_match(device_get_parent(dev), NULL))
+ return (ENXIO);
+ device_set_descf(dev, "AMD System Management Unit");
+
+ return (BUS_PROBE_GENERIC);
+}
+
+static enum amdsmu_res
+amdsmu_wait_res(device_t dev)
+{
+ struct amdsmu_softc *sc = device_get_softc(dev);
+ enum amdsmu_res res;
+
+ /*
+ * The SMU has a response ready for us when the response register is
+ * set. Otherwise, we must wait.
+ */
+ for (size_t i = 0; i < SMU_RES_READ_MAX; i++) {
+ res = amdsmu_read4(sc, SMU_REG_RESPONSE);
+ if (res != SMU_RES_WAIT)
+ return (res);
+ pause_sbt("amdsmu", ustosbt(SMU_RES_READ_PERIOD_US), 0,
+ C_HARDCLOCK);
+ }
+ device_printf(dev, "timed out waiting for response from SMU\n");
+ return (SMU_RES_WAIT);
+}
+
+static int
+amdsmu_cmd(device_t dev, enum amdsmu_msg msg, uint32_t arg, uint32_t *ret)
+{
+ struct amdsmu_softc *sc = device_get_softc(dev);
+ enum amdsmu_res res;
+
+ /* Wait for SMU to be ready. */
+ if (amdsmu_wait_res(dev) == SMU_RES_WAIT)
+ return (ETIMEDOUT);
+
+ /* Clear previous response. */
+ amdsmu_write4(sc, SMU_REG_RESPONSE, SMU_RES_WAIT);
+
+ /* Write out command to registers. */
+ amdsmu_write4(sc, SMU_REG_MESSAGE, msg);
+ amdsmu_write4(sc, SMU_REG_ARGUMENT, arg);
+
+ /* Wait for SMU response and handle it. */
+ res = amdsmu_wait_res(dev);
+
+ switch (res) {
+ case SMU_RES_WAIT:
+ return (ETIMEDOUT);
+ case SMU_RES_OK:
+ if (ret != NULL)
+ *ret = amdsmu_read4(sc, SMU_REG_ARGUMENT);
+ return (0);
+ case SMU_RES_REJECT_BUSY:
+ device_printf(dev, "SMU is busy\n");
+ return (EBUSY);
+ case SMU_RES_REJECT_PREREQ:
+ case SMU_RES_UNKNOWN:
+ case SMU_RES_FAILED:
+ device_printf(dev, "SMU error: %02x\n", res);
+ return (EIO);
+ }
+
+ return (EINVAL);
+}
+
+static int
+amdsmu_get_vers(device_t dev)
+{
+ int err;
+ uint32_t smu_vers;
+ struct amdsmu_softc *sc = device_get_softc(dev);
+
+ err = amdsmu_cmd(dev, SMU_MSG_GETSMUVERSION, 0, &smu_vers);
+ if (err != 0) {
+ device_printf(dev, "failed to get SMU version\n");
+ return (err);
+ }
+ sc->smu_program = (smu_vers >> 24) & 0xFF;
+ sc->smu_maj = (smu_vers >> 16) & 0xFF;
+ sc->smu_min = (smu_vers >> 8) & 0xFF;
+ sc->smu_rev = smu_vers & 0xFF;
+ device_printf(dev, "SMU version: %d.%d.%d (program %d)\n",
+ sc->smu_maj, sc->smu_min, sc->smu_rev, sc->smu_program);
+
+ return (0);
+}
+
+static int
+amdsmu_get_ip_blocks(device_t dev)
+{
+ struct amdsmu_softc *sc = device_get_softc(dev);
+ const uint16_t deviceid = pci_get_device(dev);
+ int err;
+ struct amdsmu_metrics *m = &sc->metrics;
+ bool active;
+ char sysctl_descr[32];
+
+ /* Get IP block count. */
+ switch (deviceid) {
+ case PCI_DEVICEID_AMD_REMBRANDT_ROOT:
+ sc->ip_block_count = 12;
+ break;
+ case PCI_DEVICEID_AMD_PHOENIX_ROOT:
+ sc->ip_block_count = 21;
+ break;
+ /* TODO How many IP blocks does Strix Point (and the others) have? */
+ case PCI_DEVICEID_AMD_STRIX_POINT_ROOT:
+ default:
+ sc->ip_block_count = nitems(amdsmu_ip_blocks_names);
+ }
+ KASSERT(sc->ip_block_count <= nitems(amdsmu_ip_blocks_names),
+ ("too many IP blocks for array"));
+
+ /* Get and print out IP blocks. */
+ err = amdsmu_cmd(dev, SMU_MSG_GET_SUP_CONSTRAINTS, 0,
+ &sc->active_ip_blocks);
+ if (err != 0) {
+ device_printf(dev, "failed to get IP blocks\n");
+ return (err);
+ }
+ device_printf(dev, "Active IP blocks: ");
+ for (size_t i = 0; i < sc->ip_block_count; i++) {
+ active = (sc->active_ip_blocks & (1 << i)) != 0;
+ sc->ip_blocks_active[i] = active;
+ if (!active)
+ continue;
+ printf("%s%s", amdsmu_ip_blocks_names[i],
+ i + 1 < sc->ip_block_count ? " " : "\n");
+ }
+
+ /* Create a sysctl node for IP blocks. */
+ sc->ip_blocks_sysctlnode = SYSCTL_ADD_NODE(sc->sysctlctx,
+ SYSCTL_CHILDREN(sc->sysctlnode), OID_AUTO, "ip_blocks",
+ CTLFLAG_RD, NULL, "SMU metrics");
+ if (sc->ip_blocks_sysctlnode == NULL) {
+ device_printf(dev, "could not add sysctl node for IP blocks\n");
+ return (ENOMEM);
+ }
+
+ /* Create a sysctl node for each IP block. */
+ for (size_t i = 0; i < sc->ip_block_count; i++) {
+ /* Create the sysctl node itself for the IP block. */
+ snprintf(sysctl_descr, sizeof sysctl_descr,
+ "Metrics about the %s AMD IP block",
+ amdsmu_ip_blocks_names[i]);
+ sc->ip_block_sysctlnodes[i] = SYSCTL_ADD_NODE(sc->sysctlctx,
+ SYSCTL_CHILDREN(sc->ip_blocks_sysctlnode), OID_AUTO,
+ amdsmu_ip_blocks_names[i], CTLFLAG_RD, NULL, sysctl_descr);
+ if (sc->ip_block_sysctlnodes[i] == NULL) {
+ device_printf(dev,
+ "could not add sysctl node for \"%s\"\n", sysctl_descr);
+ continue;
+ }
+ /*
+ * Create sysctls for if the IP block is currently active, last
+ * active time, and total active time.
+ */
+ SYSCTL_ADD_BOOL(sc->sysctlctx,
+ SYSCTL_CHILDREN(sc->ip_block_sysctlnodes[i]), OID_AUTO,
+ "active", CTLFLAG_RD, &sc->ip_blocks_active[i], 0,
+ "IP block is currently active");
+ SYSCTL_ADD_U64(sc->sysctlctx,
+ SYSCTL_CHILDREN(sc->ip_block_sysctlnodes[i]), OID_AUTO,
+ "last_time", CTLFLAG_RD, &m->ip_block_last_active_time[i],
+ 0, "How long the IP block was active for during the last"
+ " sleep (us)");
+#ifdef IP_BLOCK_TOTAL_ACTIVE_TIME
+ SYSCTL_ADD_U64(sc->sysctlctx,
+ SYSCTL_CHILDREN(sc->ip_block_sysctlnodes[i]), OID_AUTO,
+ "total_time", CTLFLAG_RD, &m->ip_block_total_active_time[i],
+ 0, "How long the IP block was active for during sleep in"
+ " total (us)");
+#endif
+ }
+ return (0);
+}
+
+static int
+amdsmu_init_metrics(device_t dev)
+{
+ struct amdsmu_softc *sc = device_get_softc(dev);
+ int err;
+ uint32_t metrics_addr_lo, metrics_addr_hi;
+ uint64_t metrics_addr;
+
+ /* Get physical address of logging buffer. */
+ err = amdsmu_cmd(dev, SMU_MSG_LOG_GETDRAM_ADDR_LO, 0, &metrics_addr_lo);
+ if (err != 0)
+ return (err);
+ err = amdsmu_cmd(dev, SMU_MSG_LOG_GETDRAM_ADDR_HI, 0, &metrics_addr_hi);
+ if (err != 0)
+ return (err);
+ metrics_addr = ((uint64_t) metrics_addr_hi << 32) | metrics_addr_lo;
+
+ /* Map memory of logging buffer. */
+ err = bus_space_map(sc->bus_tag, metrics_addr,
+ sizeof(struct amdsmu_metrics), 0, &sc->metrics_space);
+ if (err != 0) {
+ device_printf(dev, "could not map bus space for SMU metrics\n");
+ return (err);
+ }
+
+ /* Start logging for metrics. */
+ amdsmu_cmd(dev, SMU_MSG_LOG_RESET, 0, NULL);
+ amdsmu_cmd(dev, SMU_MSG_LOG_START, 0, NULL);
+ return (0);
+}
+
+static int
+amdsmu_dump_metrics(device_t dev)
+{
+ struct amdsmu_softc *sc = device_get_softc(dev);
+ int err;
+
+ err = amdsmu_cmd(dev, SMU_MSG_LOG_DUMP_DATA, 0, NULL);
+ if (err != 0) {
+ device_printf(dev, "failed to dump metrics\n");
+ return (err);
+ }
+ bus_space_read_region_4(sc->bus_tag, sc->metrics_space, 0,
+ (uint32_t *)&sc->metrics, sizeof(sc->metrics) / sizeof(uint32_t));
+
+ return (0);
+}
+
+static void
+amdsmu_fetch_idlemask(device_t dev)
+{
+ struct amdsmu_softc *sc = device_get_softc(dev);
+
+ sc->idlemask = amdsmu_read4(sc, SMU_REG_IDLEMASK);
+}
+
+static int
+amdsmu_attach(device_t dev)
+{
+ struct amdsmu_softc *sc = device_get_softc(dev);
+ int err;
+ uint32_t physbase_addr_lo, physbase_addr_hi;
+ uint64_t physbase_addr;
+ int rid = 0;
+ struct sysctl_oid *node;
+
+ /*
+ * Find physical base address for SMU.
+ * XXX I am a little confused about the masks here. I'm just copying
+ * what Linux does in the amd-pmc driver to get the base address.
+ */
+ pci_write_config(dev, SMU_INDEX_ADDRESS, SMU_PHYSBASE_ADDR_LO, 4);
+ physbase_addr_lo = pci_read_config(dev, SMU_INDEX_DATA, 4) & 0xFFF00000;
+
+ pci_write_config(dev, SMU_INDEX_ADDRESS, SMU_PHYSBASE_ADDR_HI, 4);
+ physbase_addr_hi = pci_read_config(dev, SMU_INDEX_DATA, 4) & 0x0000FFFF;
+
+ physbase_addr = (uint64_t)physbase_addr_hi << 32 | physbase_addr_lo;
+
+ /* Map memory for SMU and its registers. */
+ sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ if (sc->res == NULL) {
+ device_printf(dev, "could not allocate resource\n");
+ return (ENXIO);
+ }
+
+ sc->bus_tag = rman_get_bustag(sc->res);
+
+ if (bus_space_map(sc->bus_tag, physbase_addr,
+ SMU_MEM_SIZE, 0, &sc->smu_space) != 0) {
+ device_printf(dev, "could not map bus space for SMU\n");
+ err = ENXIO;
+ goto err_smu_space;
+ }
+ if (bus_space_map(sc->bus_tag, physbase_addr + SMU_REG_SPACE_OFF,
+ SMU_MEM_SIZE, 0, &sc->reg_space) != 0) {
+ device_printf(dev, "could not map bus space for SMU regs\n");
+ err = ENXIO;
+ goto err_reg_space;
+ }
+
+ /* sysctl stuff. */
+ sc->sysctlctx = device_get_sysctl_ctx(dev);
+ sc->sysctlnode = device_get_sysctl_tree(dev);
+
+ /* Get version & add sysctls. */
+ if ((err = amdsmu_get_vers(dev)) != 0)
+ goto err_dump;
+
+ SYSCTL_ADD_U8(sc->sysctlctx, SYSCTL_CHILDREN(sc->sysctlnode), OID_AUTO,
+ "program", CTLFLAG_RD, &sc->smu_program, 0, "SMU program number");
+ SYSCTL_ADD_U8(sc->sysctlctx, SYSCTL_CHILDREN(sc->sysctlnode), OID_AUTO,
+ "version_major", CTLFLAG_RD, &sc->smu_maj, 0,
+ "SMU firmware major version number");
+ SYSCTL_ADD_U8(sc->sysctlctx, SYSCTL_CHILDREN(sc->sysctlnode), OID_AUTO,
+ "version_minor", CTLFLAG_RD, &sc->smu_min, 0,
+ "SMU firmware minor version number");
+ SYSCTL_ADD_U8(sc->sysctlctx, SYSCTL_CHILDREN(sc->sysctlnode), OID_AUTO,
+ "version_revision", CTLFLAG_RD, &sc->smu_rev, 0,
+ "SMU firmware revision number");
+
+ /* Set up for getting metrics & add sysctls. */
+ if ((err = amdsmu_init_metrics(dev)) != 0)
+ goto err_dump;
+ if ((err = amdsmu_dump_metrics(dev)) != 0)
+ goto err_dump;
+
+ node = SYSCTL_ADD_NODE(sc->sysctlctx, SYSCTL_CHILDREN(sc->sysctlnode),
+ OID_AUTO, "metrics", CTLFLAG_RD, NULL, "SMU metrics");
+ if (node == NULL) {
+ device_printf(dev, "could not add sysctl node for metrics\n");
+ err = ENOMEM;
+ goto err_dump;
+ }
+
+ SYSCTL_ADD_U32(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ "table_version", CTLFLAG_RD, &sc->metrics.table_version, 0,
+ "SMU metrics table version");
+ SYSCTL_ADD_U32(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ "hint_count", CTLFLAG_RD, &sc->metrics.hint_count, 0,
+ "How many times the sleep hint was set");
+ SYSCTL_ADD_U32(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ "s0i3_last_entry_status", CTLFLAG_RD,
+ &sc->metrics.s0i3_last_entry_status, 0,
+ "1 if last S0i3 entry was successful");
+ SYSCTL_ADD_U32(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ "time_last_in_s0i2", CTLFLAG_RD, &sc->metrics.time_last_in_s0i2, 0,
+ "Time spent in S0i2 during last sleep (us)");
+ SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ "time_last_entering_s0i3", CTLFLAG_RD,
+ &sc->metrics.time_last_entering_s0i3, 0,
+ "Time spent entering S0i3 during last sleep (us)");
+ SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ "total_time_entering_s0i3", CTLFLAG_RD,
+ &sc->metrics.total_time_entering_s0i3, 0,
+ "Total time spent entering S0i3 (us)");
+ SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ "time_last_resuming", CTLFLAG_RD, &sc->metrics.time_last_resuming,
+ 0, "Time spent resuming from last sleep (us)");
+ SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ "total_time_resuming", CTLFLAG_RD, &sc->metrics.total_time_resuming,
+ 0, "Total time spent resuming from sleep (us)");
+ SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ "time_last_in_s0i3", CTLFLAG_RD, &sc->metrics.time_last_in_s0i3, 0,
+ "Time spent in S0i3 during last sleep (us)");
+ SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ "total_time_in_s0i3", CTLFLAG_RD, &sc->metrics.total_time_in_s0i3,
+ 0, "Total time spent in S0i3 (us)");
+ SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ "time_last_in_sw_drips", CTLFLAG_RD,
+ &sc->metrics.time_last_in_sw_drips, 0,
+ "Time spent in awake during last sleep (us)");
+ SYSCTL_ADD_U64(sc->sysctlctx, SYSCTL_CHILDREN(node), OID_AUTO,
+ "total_time_in_sw_drips", CTLFLAG_RD,
+ &sc->metrics.total_time_in_sw_drips, 0,
+ "Total time spent awake (us)");
+
+ /* Get IP blocks & add sysctls. */
+ err = amdsmu_get_ip_blocks(dev);
+ if (err != 0)
+ goto err_dump;
+
+ /* Get idlemask & add sysctl. */
+ amdsmu_fetch_idlemask(dev);
+ SYSCTL_ADD_U32(sc->sysctlctx, SYSCTL_CHILDREN(sc->sysctlnode), OID_AUTO,
+ "idlemask", CTLFLAG_RD, &sc->idlemask, 0, "SMU idlemask. This "
+ "value is not documented - only used to help AMD internally debug "
+ "issues");
+
+ return (0);
+err_dump:
+ bus_space_unmap(sc->bus_tag, sc->reg_space, SMU_MEM_SIZE);
+err_reg_space:
+ bus_space_unmap(sc->bus_tag, sc->smu_space, SMU_MEM_SIZE);
+err_smu_space:
+ bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res);
+ return (err);
+}
+
+static int
+amdsmu_detach(device_t dev)
+{
+ struct amdsmu_softc *sc = device_get_softc(dev);
+ int rid = 0;
+
+ bus_space_unmap(sc->bus_tag, sc->smu_space, SMU_MEM_SIZE);
+ bus_space_unmap(sc->bus_tag, sc->reg_space, SMU_MEM_SIZE);
+
+ bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res);
+ return (0);
+}
+
+static device_method_t amdsmu_methods[] = {
+ DEVMETHOD(device_identify, amdsmu_identify),
+ DEVMETHOD(device_probe, amdsmu_probe),
+ DEVMETHOD(device_attach, amdsmu_attach),
+ DEVMETHOD(device_detach, amdsmu_detach),
+ DEVMETHOD_END
+};
+
+static driver_t amdsmu_driver = {
+ "amdsmu",
+ amdsmu_methods,
+ sizeof(struct amdsmu_softc),
+};
+
+DRIVER_MODULE(amdsmu, hostb, amdsmu_driver, NULL, NULL);
+MODULE_VERSION(amdsmu, 1);
+MODULE_DEPEND(amdsmu, amdsmn, 1, 1, 1);
+MODULE_PNP_INFO("U16:vendor;U16:device", pci, amdsmu, amdsmu_products,
+ nitems(amdsmu_products));
diff --git a/sys/dev/amdsmu/amdsmu.h b/sys/dev/amdsmu/amdsmu.h
new file mode 100644
index 000000000000..025887f7fe5a
--- /dev/null
+++ b/sys/dev/amdsmu/amdsmu.h
@@ -0,0 +1,95 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * This software was developed by Aymeric Wibo <obiwac@freebsd.org>
+ * under sponsorship from the FreeBSD Foundation.
+ */
+#ifndef _AMDSMU_H_
+#define _AMDSMU_H_
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <machine/bus.h>
+#include <x86/cputypes.h>
+
+#include <dev/amdsmu/amdsmu_reg.h>
+
+#define SMU_RES_READ_PERIOD_US 50
+#define SMU_RES_READ_MAX 20000
+
+static const struct amdsmu_product {
+ uint16_t amdsmu_vendorid;
+ uint16_t amdsmu_deviceid;
+} amdsmu_products[] = {
+ { CPU_VENDOR_AMD, PCI_DEVICEID_AMD_REMBRANDT_ROOT },
+ { CPU_VENDOR_AMD, PCI_DEVICEID_AMD_PHOENIX_ROOT },
+ { CPU_VENDOR_AMD, PCI_DEVICEID_AMD_STRIX_POINT_ROOT },
+};
+
+static const char *const amdsmu_ip_blocks_names[] = {
+ "DISPLAY",
+ "CPU",
+ "GFX",
+ "VDD",
+ "ACP",
+ "VCN",
+ "ISP",
+ "NBIO",
+ "DF",
+ "USB3_0",
+ "USB3_1",
+ "LAPIC",
+ "USB3_2",
+ "USB3_3",
+ "USB3_4",
+ "USB4_0",
+ "USB4_1",
+ "MPM",
+ "JPEG",
+ "IPU",
+ "UMSCH",
+ "VPE",
+};
+
+CTASSERT(nitems(amdsmu_ip_blocks_names) <= 32);
+
+struct amdsmu_softc {
+ struct sysctl_ctx_list *sysctlctx;
+ struct sysctl_oid *sysctlnode;
+
+ struct resource *res;
+ bus_space_tag_t bus_tag;
+
+ bus_space_handle_t smu_space;
+ bus_space_handle_t reg_space;
+
+ uint8_t smu_program;
+ uint8_t smu_maj, smu_min, smu_rev;
+
+ uint32_t active_ip_blocks;
+ struct sysctl_oid *ip_blocks_sysctlnode;
+ size_t ip_block_count;
+ struct sysctl_oid *ip_block_sysctlnodes[nitems(amdsmu_ip_blocks_names)];
+ bool ip_blocks_active[nitems(amdsmu_ip_blocks_names)];
+
+ bus_space_handle_t metrics_space;
+ struct amdsmu_metrics metrics;
+ uint32_t idlemask;
+};
+
+static inline uint32_t
+amdsmu_read4(const struct amdsmu_softc *sc, bus_size_t reg)
+{
+ return (bus_space_read_4(sc->bus_tag, sc->reg_space, reg));
+}
+
+static inline void
+amdsmu_write4(const struct amdsmu_softc *sc, bus_size_t reg, uint32_t val)
+{
+ bus_space_write_4(sc->bus_tag, sc->reg_space, reg, val);
+}
+
+#endif /* _AMDSMU_H_ */
diff --git a/sys/dev/amdsmu/amdsmu_reg.h b/sys/dev/amdsmu/amdsmu_reg.h
new file mode 100644
index 000000000000..e685b34e6883
--- /dev/null
+++ b/sys/dev/amdsmu/amdsmu_reg.h
@@ -0,0 +1,84 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * This software was developed by Aymeric Wibo <obiwac@freebsd.org>
+ * under sponsorship from the FreeBSD Foundation.
+ */
+#ifndef _AMDSMU_REG_H_
+#define _AMDSMU_REG_H_
+
+#include <sys/types.h>
+
+/*
+ * TODO These are in common with amdtemp; should we find a way to factor these
+ * out? Also, there are way more of these. I couldn't find a centralized place
+ * which lists them though.
+ */
+#define PCI_DEVICEID_AMD_REMBRANDT_ROOT 0x14B5
+#define PCI_DEVICEID_AMD_PHOENIX_ROOT 0x14E8
+#define PCI_DEVICEID_AMD_STRIX_POINT_ROOT 0x14A4
+
+#define SMU_INDEX_ADDRESS 0xB8
+#define SMU_INDEX_DATA 0xBC
+
+#define SMU_PHYSBASE_ADDR_LO 0x13B102E8
+#define SMU_PHYSBASE_ADDR_HI 0x13B102EC
+
+#define SMU_MEM_SIZE 0x1000
+#define SMU_REG_SPACE_OFF 0x10000
+
+#define SMU_REG_MESSAGE 0x538
+#define SMU_REG_RESPONSE 0x980
+#define SMU_REG_ARGUMENT 0x9BC
+#define SMU_REG_IDLEMASK 0xD14
+
+enum amdsmu_res {
+ SMU_RES_WAIT = 0x00,
+ SMU_RES_OK = 0x01,
+ SMU_RES_REJECT_BUSY = 0xFC,
+ SMU_RES_REJECT_PREREQ = 0xFD,
+ SMU_RES_UNKNOWN = 0xFE,
+ SMU_RES_FAILED = 0xFF,
+};
+
+enum amdsmu_msg {
+ SMU_MSG_GETSMUVERSION = 0x02,
+ SMU_MSG_LOG_GETDRAM_ADDR_HI = 0x04,
+ SMU_MSG_LOG_GETDRAM_ADDR_LO = 0x05,
+ SMU_MSG_LOG_START = 0x06,
+ SMU_MSG_LOG_RESET = 0x07,
+ SMU_MSG_LOG_DUMP_DATA = 0x08,
+ SMU_MSG_GET_SUP_CONSTRAINTS = 0x09,
+};
+
+/* XXX Copied from Linux struct smu_metrics. */
+struct amdsmu_metrics {
+ uint32_t table_version;
+ uint32_t hint_count;
+ uint32_t s0i3_last_entry_status;
+ uint32_t time_last_in_s0i2;
+ uint64_t time_last_entering_s0i3;
+ uint64_t total_time_entering_s0i3;
+ uint64_t time_last_resuming;
+ uint64_t total_time_resuming;
+ uint64_t time_last_in_s0i3;
+ uint64_t total_time_in_s0i3;
+ uint64_t time_last_in_sw_drips;
+ uint64_t total_time_in_sw_drips;
+ /*
+ * This is how long each IP block was active for (us), i.e., blocking
+ * entry to S0i3. In Linux, these are called "timecondition_notmet_*".
+ *
+ * XXX Total active time for IP blocks seems to be buggy and reporting
+ * garbage (at least on Phoenix), so it's disabled for now. The last
+ * active time for the USB4_0 IP block also seems to be buggy.
+ */
+ uint64_t ip_block_last_active_time[32];
+#ifdef IP_BLOCK_TOTAL_ACTIVE_TIME
+ uint64_t ip_block_total_active_time[32];
+#endif
+} __attribute__((packed));
+
+#endif /* _AMDSMU_REG_H_ */
diff --git a/sys/dev/amdtemp/amdtemp.c b/sys/dev/amdtemp/amdtemp.c
index 56772432773e..79ccdc8c79fb 100644
--- a/sys/dev/amdtemp/amdtemp.c
+++ b/sys/dev/amdtemp/amdtemp.c
@@ -112,9 +112,14 @@ struct amdtemp_softc {
#define DEVICEID_AMD_HOSTB17H_ROOT 0x1450
#define DEVICEID_AMD_HOSTB17H_M10H_ROOT 0x15d0
#define DEVICEID_AMD_HOSTB17H_M30H_ROOT 0x1480 /* Also M70H, F19H M00H/M20H */
-#define DEVICEID_AMD_HOSTB17H_M60H_ROOT 0x1630
+#define DEVICEID_AMD_HOSTB17H_M60H_ROOT 0x1630 /* Also F19H M50H */
#define DEVICEID_AMD_HOSTB19H_M10H_ROOT 0x14a4
-#define DEVICEID_AMD_HOSTB19H_M60H_ROOT 0x14d8
+#define DEVICEID_AMD_HOSTB19H_M40H_ROOT 0x14b5
+#define DEVICEID_AMD_HOSTB19H_M60H_ROOT 0x14d8 /* Also F1AH M40H */
+#define DEVICEID_AMD_HOSTB19H_M70H_ROOT 0x14e8
+#define DEVICEID_AMD_HOSTB1AH_M00H_ROOT 0x153a
+#define DEVICEID_AMD_HOSTB1AH_M20H_ROOT 0x1507
+#define DEVICEID_AMD_HOSTB1AH_M60H_ROOT 0x1122
static const struct amdtemp_product {
uint16_t amdtemp_vendorid;
@@ -140,7 +145,12 @@ static const struct amdtemp_product {
{ VENDORID_AMD, DEVICEID_AMD_HOSTB17H_M30H_ROOT, false },
{ VENDORID_AMD, DEVICEID_AMD_HOSTB17H_M60H_ROOT, false },
{ VENDORID_AMD, DEVICEID_AMD_HOSTB19H_M10H_ROOT, false },
+ { VENDORID_AMD, DEVICEID_AMD_HOSTB19H_M40H_ROOT, false },
{ VENDORID_AMD, DEVICEID_AMD_HOSTB19H_M60H_ROOT, false },
+ { VENDORID_AMD, DEVICEID_AMD_HOSTB19H_M70H_ROOT, false },
+ { VENDORID_AMD, DEVICEID_AMD_HOSTB1AH_M00H_ROOT, false },
+ { VENDORID_AMD, DEVICEID_AMD_HOSTB1AH_M20H_ROOT, false },
+ { VENDORID_AMD, DEVICEID_AMD_HOSTB1AH_M60H_ROOT, false },
};
/*
@@ -162,7 +172,7 @@ static const struct amdtemp_product {
#define AMDTEMP_15H_M60H_REPTMP_CTRL 0xd8200ca4
/*
- * Reported Temperature, Family 17h
+ * Reported Temperature, Family 17h - 1Ah
*
* According to AMD OSRR for 17H, section 4.2.1, bits 31-21 of this register
* provide the current temp. bit 19, when clear, means the temp is reported in
@@ -226,6 +236,7 @@ static int32_t amdtemp_gettemp15hm60h(device_t dev, amdsensor_t sensor);
static int32_t amdtemp_gettemp17h(device_t dev, amdsensor_t sensor);
static void amdtemp_probe_ccd_sensors17h(device_t dev, uint32_t model);
static void amdtemp_probe_ccd_sensors19h(device_t dev, uint32_t model);
+static void amdtemp_probe_ccd_sensors1ah(device_t dev, uint32_t model);
static int amdtemp_sysctl(SYSCTL_HANDLER_ARGS);
static device_method_t amdtemp_methods[] = {
@@ -276,11 +287,11 @@ amdtemp_identify(driver_t *driver, device_t parent)
device_t child;
/* Make sure we're not being doubly invoked. */
- if (device_find_child(parent, "amdtemp", -1) != NULL)
+ if (device_find_child(parent, "amdtemp", DEVICE_UNIT_ANY) != NULL)
return;
if (amdtemp_match(parent, NULL)) {
- child = device_add_child(parent, "amdtemp", -1);
+ child = device_add_child(parent, "amdtemp", DEVICE_UNIT_ANY);
if (child == NULL)
device_printf(parent, "add amdtemp child failed\n");
}
@@ -289,21 +300,33 @@ amdtemp_identify(driver_t *driver, device_t parent)
static int
amdtemp_probe(device_t dev)
{
- uint32_t family, model;
+ uint32_t family, model, stepping;
- if (resource_disabled("amdtemp", 0))
+ if (resource_disabled("amdtemp", 0)) {
+ if (bootverbose)
+ device_printf(dev, "Resource disabled\n");
return (ENXIO);
- if (!amdtemp_match(device_get_parent(dev), NULL))
+ }
+ if (!amdtemp_match(device_get_parent(dev), NULL)) {
+ if (bootverbose)
+ device_printf(dev, "amdtemp_match() failed\n");
return (ENXIO);
+ }
family = CPUID_TO_FAMILY(cpu_id);
model = CPUID_TO_MODEL(cpu_id);
+ stepping = CPUID_TO_STEPPING(cpu_id);
switch (family) {
case 0x0f:
- if ((model == 0x04 && (cpu_id & CPUID_STEPPING) == 0) ||
- (model == 0x05 && (cpu_id & CPUID_STEPPING) <= 1))
+ if ((model == 0x04 && stepping == 0) ||
+ (model == 0x05 && stepping <= 1)) {
+ if (bootverbose)
+ device_printf(dev,
+ "Unsupported (Family=%02Xh, Model=%02Xh, Stepping=%02Xh)\n",
+ family, model, stepping);
return (ENXIO);
+ }
break;
case 0x10:
case 0x11:
@@ -313,11 +336,13 @@ amdtemp_probe(device_t dev)
case 0x16:
case 0x17:
case 0x19:
+ case 0x1a:
break;
default:
return (ENXIO);
}
- device_set_desc(dev, "AMD CPU On-Die Thermal Sensors");
+ device_set_descf(dev, "AMD Family %02Xh CPU On-Die Thermal Sensors",
+ family);
return (BUS_PROBE_GENERIC);
}
@@ -472,12 +497,13 @@ amdtemp_attach(device_t dev)
break;
case 0x17:
case 0x19:
+ case 0x1a:
sc->sc_ntemps = 1;
sc->sc_gettemp = amdtemp_gettemp17h;
needsmn = true;
break;
default:
- device_printf(dev, "Bogus family 0x%x\n", family);
+ device_printf(dev, "Bogus family %02Xh\n", family);
return (ENXIO);
}
@@ -486,7 +512,7 @@ amdtemp_attach(device_t dev)
device_get_parent(dev), "amdsmn", -1);
if (sc->sc_smn == NULL) {
if (bootverbose)
- device_printf(dev, "No SMN device found\n");
+ device_printf(dev, "No amdsmn(4) device found\n");
return (ENXIO);
}
}
@@ -502,7 +528,7 @@ amdtemp_attach(device_t dev)
device_printf(dev,
"Erratum 319: temperature measurement may be inaccurate\n");
if (bootverbose)
- device_printf(dev, "Found %d cores and %d sensors.\n",
+ device_printf(dev, "Found %d cores and %d sensors\n",
sc->sc_ncores,
sc->sc_ntemps > 1 ? sc->sc_ntemps * sc->sc_ncores : 1);
@@ -535,6 +561,8 @@ amdtemp_attach(device_t dev)
amdtemp_probe_ccd_sensors17h(dev, model);
else if (family == 0x19)
amdtemp_probe_ccd_sensors19h(dev, model);
+ else if (family == 0x1a)
+ amdtemp_probe_ccd_sensors1ah(dev, model);
else if (sc->sc_ntemps > 1) {
SYSCTL_ADD_PROC(sysctlctx,
SYSCTL_CHILDREN(sysctlnode),
@@ -848,7 +876,7 @@ amdtemp_probe_ccd_sensors17h(device_t dev, uint32_t model)
break;
default:
device_printf(dev,
- "Unrecognized Family 17h Model: %02xh\n", model);
+ "Unrecognized Family 17h Model: %02Xh\n", model);
return;
}
@@ -864,22 +892,48 @@ amdtemp_probe_ccd_sensors19h(device_t dev, uint32_t model)
switch (model) {
case 0x00 ... 0x0f: /* Zen3 EPYC "Milan" */
case 0x20 ... 0x2f: /* Zen3 Ryzen "Vermeer" */
+ case 0x50 ... 0x5f: /* Zen3 Ryzen "Cezanne" */
maxreg = 8;
_Static_assert((int)NUM_CCDS >= 8, "");
break;
- case 0x10 ... 0x1f:
+ case 0x10 ... 0x1f: /* Zen4 EPYC "Genoa" */
sc->sc_temp_base = AMDTEMP_ZEN4_10H_CCD_TMP_BASE;
maxreg = 12;
_Static_assert((int)NUM_CCDS >= 12, "");
break;
+ case 0x40 ... 0x4f: /* Zen3+ Ryzen "Rembrandt" */
case 0x60 ... 0x6f: /* Zen4 Ryzen "Raphael" */
+ case 0x70 ... 0x7f: /* Zen4 Ryzen "Phoenix" */
+ sc->sc_temp_base = AMDTEMP_ZEN4_CCD_TMP_BASE;
+ maxreg = 8;
+ _Static_assert((int)NUM_CCDS >= 8, "");
+ break;
+ default:
+ device_printf(dev,
+ "Unrecognized Family 19h Model: %02Xh\n", model);
+ return;
+ }
+
+ amdtemp_probe_ccd_sensors(dev, maxreg);
+}
+
+static void
+amdtemp_probe_ccd_sensors1ah(device_t dev, uint32_t model)
+{
+ struct amdtemp_softc *sc = device_get_softc(dev);
+ uint32_t maxreg;
+
+ switch (model) {
+ case 0x00 ... 0x2f: /* Zen5 EPYC "Turin" */
+ case 0x40 ... 0x4f: /* Zen5 Ryzen "Granite Ridge" */
+ case 0x60 ... 0x7f: /* ??? */
sc->sc_temp_base = AMDTEMP_ZEN4_CCD_TMP_BASE;
maxreg = 8;
_Static_assert((int)NUM_CCDS >= 8, "");
break;
default:
device_printf(dev,
- "Unrecognized Family 19h Model: %02xh\n", model);
+ "Unrecognized Family 1Ah Model: %02Xh\n", model);
return;
}
diff --git a/sys/dev/arcmsr/arcmsr.c b/sys/dev/arcmsr/arcmsr.c
index ea37da30db93..6de4372a67ea 100644
--- a/sys/dev/arcmsr/arcmsr.c
+++ b/sys/dev/arcmsr/arcmsr.c
@@ -5238,7 +5238,6 @@ static int arcmsr_probe(device_t dev)
{
u_int32_t id;
u_int16_t sub_device_id;
- static char buf[256];
char x_type[]={"unknown"};
char *type;
int raid6 = 1;
@@ -5313,9 +5312,8 @@ static int arcmsr_probe(device_t dev)
}
if(type == x_type)
return(ENXIO);
- sprintf(buf, "Areca %s Host Adapter RAID Controller %s\n%s\n",
- type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);
- device_set_desc_copy(dev, buf);
+ device_set_descf(dev, "Areca %s Host Adapter RAID Controller %s\n%s\n",
+ type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);
return (BUS_PROBE_DEFAULT);
}
/*
diff --git a/sys/dev/asmc/asmc.c b/sys/dev/asmc/asmc.c
index ab0a614aa50f..d99c1d56e67c 100644
--- a/sys/dev/asmc/asmc.c
+++ b/sys/dev/asmc/asmc.c
@@ -278,6 +278,12 @@ static const struct asmc_model asmc_models[] = {
ASMC_MBP113_TEMPS, ASMC_MBP113_TEMPNAMES, ASMC_MBP113_TEMPDESCS
},
+ {
+ "MacBookPro11,4", "Apple SMC MacBook Pro Retina Core i7 (mid 2015, 15-inch)",
+ ASMC_SMS_FUNCS_DISABLED, ASMC_FAN_FUNCS, ASMC_LIGHT_FUNCS,
+ ASMC_MBP114_TEMPS, ASMC_MBP114_TEMPNAMES, ASMC_MBP114_TEMPDESCS
+ },
+
/* The Mac Mini has no SMS */
{
"Macmini1,1", "Apple SMC Mac Mini",
@@ -343,6 +349,24 @@ static const struct asmc_model asmc_models[] = {
ASMC_MM52_TEMPS, ASMC_MM52_TEMPNAMES, ASMC_MM52_TEMPDESCS
},
+ /* The Mac Mini 6,1 has no SMS */
+ {
+ "Macmini6,1", "Apple SMC Mac Mini 6,1",
+ NULL, NULL, NULL,
+ ASMC_FAN_FUNCS2,
+ NULL, NULL, NULL,
+ ASMC_MM61_TEMPS, ASMC_MM61_TEMPNAMES, ASMC_MM61_TEMPDESCS
+ },
+
+ /* The Mac Mini 6,2 has no SMS */
+ {
+ "Macmini6,2", "Apple SMC Mac Mini 6,2",
+ NULL, NULL, NULL,
+ ASMC_FAN_FUNCS2,
+ NULL, NULL, NULL,
+ ASMC_MM62_TEMPS, ASMC_MM62_TEMPNAMES, ASMC_MM62_TEMPDESCS
+ },
+
/* The Mac Mini 7,1 has no SMS */
{
"Macmini7,1", "Apple SMC Mac Mini 7,1",
diff --git a/sys/dev/asmc/asmcvar.h b/sys/dev/asmc/asmcvar.h
index 42e3120ff4ff..d40dc1e7c8ff 100644
--- a/sys/dev/asmc/asmcvar.h
+++ b/sys/dev/asmc/asmcvar.h
@@ -432,6 +432,40 @@ struct asmc_softc {
"TM0S", "TP0P", "TPCD", "TW0P", "Ta0P", \
"TaSP", "Th1H", "Th2H", "Ts0P", "Ts0S", \
"Ts1S" }
+
+#define ASMC_MBP114_TEMPS { "IC0C", "ID0R", "IHDC", "IPBR", "IC0R", \
+ "IO3R", "IO5R", "IM0C", "IC1C", "IC2C", \
+ "IC3C", "ILDC", "IBLC", "IAPC", "IHSC", \
+ "ICMC", "TC0P", "TP0P", "TM0P", \
+ "Ta0P", "Th2H", "Th1H", "TW0P", "Ts0P", \
+ "Ts1P", "TB0T", "TB1T", "TB2T", "TH0A", "TH0B", \
+ "TC1C", "TC2C", "TC3C", "TC4C", "TCXC", \
+ "TCGC", "TPCD", "TCSA", "VC0C", "VD0R", \
+ "VP0R", "ALSL", "F0Ac", "F1Ac", "PCPC", \
+ "PCPG", "PCPT", "PSTR", "PDTR", NULL }
+
+#define ASMC_MBP114_TEMPNAMES { "IC0C", "ID0R", "IHDC", "IPBR", "IC0R", \
+ "IO3R", "IO5R", "IM0C", "IC1C", "IC2C", \
+ "IC3C", "ILDC", "IBLC", "IAPC", "IHSC", \
+ "ICMC", "TC0P", "TP0P", "TM0P", \
+ "Ta0P", "Th2H", "Th1H", "TW0P", "Ts0P", \
+ "Ts1P", "TB0T", "TB1T", "TB2T", "TH0A", "TH0B", \
+ "TC1C", "TC2C", "TC3C", "TC4C", "TCXC", \
+ "TCGC", "TPCD", "TCSA", "VC0C", "VD0R", \
+ "VP0R", "ALSL", "F0Ac", "F1Ac", "PCPC", \
+ "PCPG", "PCPT", "PSTR", "PDTR" }
+
+#define ASMC_MBP114_TEMPDESCS { "CPU High (CPU, I/O)", "DC In", "SSD", "Charger (BMON)", "CPU", \
+ "Other 3.3V", "Other 5V", "Memory", "Platform Controller Hub Core", "CPU Load Current Monitor", \
+ "CPU DDR", "LCD Panel", "LCD Backlight", "Airport", "Thunderbolt", \
+ "S2", "CPU Proximity", "Platform Controller Hub", "Memory Proximity", "Air Flow Proximity", \
+ "Left Fin Stack", "Right Fin Stack", "Airport Proximity", "Palm Rest", "Palm Rest Actuator", \
+ "Battery Max", "Battery Sensor 1", "Battery Sensor 2", "SSD A", "SSD B", \
+ "CPU Core 1", "CPU Core 2", "CPU Core 3", "CPU Core 4", "CPU PECI Die", \
+ "Intel GPU", "Platform Controller Hub PECI", "CPU System Agent Core", "CPU VCore", "DC In", \
+ "Pbus", "Ambient Light", "Leftside", "Rightside", "CPU Package Core", \
+ "CPU Package GPU", "CPU Package Total", "System Total", "DC In" }
+
#define ASMC_MM_TEMPS { "TN0P", "TN1P", NULL }
#define ASMC_MM_TEMPNAMES { "northbridge1", "northbridge2" }
#define ASMC_MM_TEMPDESCS { "Northbridge Point 1", \
@@ -530,6 +564,68 @@ struct asmc_softc {
"Power Supply Temperature", \
"Wireless Module Proximity Temperature", }
+#define ASMC_MM61_TEMPS { "TA0P", "TA1P", \
+ "TC0D", "TC0G", "TC0P", "TCPG", \
+ "TI0P", \
+ "TM0S", "TMBS", "TM0P", \
+ "TP0P", "TPCD", \
+ "Tp0C", \
+ "TW0P", NULL }
+
+#define ASMC_MM61_TEMPNAMES { "ambient_air_proximity", "ambient_cpu_pch_wireless_dimm", \
+ "cpu_die", "TC0G", "cpu_proximity", "TCPG", \
+ "thunderbolt_proximity", \
+ "memory_slot1", "memory_slot2", "memory_proximity", \
+ "pch_controller_proximity", "pch_controller_die", \
+ "pwr_supply", \
+ "wireless_proximity", NULL }
+
+#define ASMC_MM61_TEMPDESCS { "Ambient Air Proximity Temperature", \
+ "Combo Ambient CPU PCH Wireless DIMM Temperature", \
+ "CPU Die Temperature", \
+ NULL, \
+ "CPU Proximity Temperature", \
+ NULL, \
+ "Thunderbolt Proximity Temperature", \
+ "Memory Slot 1 Temperature", \
+ "Memory Slot 2 Temperature", \
+ "Memory Slots Proximity Temperature", \
+ "Platform Controller Hub Proximity Temperature", \
+ "Platform Controller Hub Die Temperature", \
+ "Power Supply Temperature", \
+ "Wireless Module Proximity Temperature", NULL }
+
+#define ASMC_MM62_TEMPS { "TA0P", "TA1P", \
+ "TC0D", "TC0G", "TC0P", "TCPG", \
+ "TI0P", \
+ "TM0S", "TMBS", "TM0P", \
+ "TP0P", "TPCD", \
+ "Tp0C", \
+ "TW0P", NULL }
+
+#define ASMC_MM62_TEMPNAMES { "ambient_air_proximity", "ambient_cpu_pch_wireless_dimm", \
+ "cpu_die", "TC0G", "cpu_proximity", "TCPG", \
+ "thunderbolt_proximity", \
+ "memory_slot1", "memory_slot2", "memory_proximity", \
+ "pch_controller_proximity", "pch_controller_die", \
+ "pwr_supply", \
+ "wireless_proximity", NULL }
+
+#define ASMC_MM62_TEMPDESCS { "Ambient Air Proximity Temperature", \
+ "Combo Ambient CPU PCH Wireless DIMM Temperature", \
+ "CPU Die Temperature", \
+ NULL, \
+ "CPU Proximity Temperature", \
+ NULL, \
+ "Thunderbolt Proximity Temperature", \
+ "Memory Slot 1 Temperature", \
+ "Memory Slot 2 Temperature", \
+ "Memory Slots Proximity Temperature", \
+ "Platform Controller Hub Proximity Temperature", \
+ "Platform Controller Hub Die Temperature", \
+ "Power Supply Temperature", \
+ "Wireless Module Proximity Temperature", NULL }
+
#define ASMC_MM71_TEMPS { "TA0p", "TA1p", \
"TA2p", "TC0c", \
"TC0p", "TC1c", \
diff --git a/sys/dev/ata/ata-all.c b/sys/dev/ata/ata-all.c
index 5bfa1ce3a629..2e77c0f6478e 100644
--- a/sys/dev/ata/ata-all.c
+++ b/sys/dev/ata/ata-all.c
@@ -37,11 +37,11 @@
#include <sys/bus.h>
#include <sys/bio.h>
#include <sys/malloc.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#include <sys/sema.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/ata-isa.c b/sys/dev/ata/ata-isa.c
index 0b76c00e9567..0255141b4d15 100644
--- a/sys/dev/ata/ata-isa.c
+++ b/sys/dev/ata/ata-isa.c
@@ -34,9 +34,9 @@
#include <sys/bus.h>
#include <sys/malloc.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/ata-pci.c b/sys/dev/ata/ata-pci.c
index 9cc815150665..cb44f98c406d 100644
--- a/sys/dev/ata/ata-pci.c
+++ b/sys/dev/ata/ata-pci.c
@@ -36,12 +36,15 @@
#include <sys/malloc.h>
#include <sys/sbuf.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
+
#include <vm/uma.h>
-#include <machine/stdarg.h>
+
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
+
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <dev/ata/ata-all.h>
@@ -60,7 +63,6 @@ int
ata_pci_probe(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(dev);
- char buffer[64];
/* is this a storage class device ? */
if (pci_get_class(dev) != PCIC_STORAGE)
@@ -70,8 +72,7 @@ ata_pci_probe(device_t dev)
if (pci_get_subclass(dev) != PCIS_STORAGE_IDE)
return (ENXIO);
- sprintf(buffer, "%s ATA controller", ata_pcivendor2str(dev));
- device_set_desc_copy(dev, buffer);
+ device_set_descf(dev, "%s ATA controller", ata_pcivendor2str(dev));
ctlr->chipinit = ata_generic_chipinit;
/* we are a low priority handler */
@@ -128,7 +129,7 @@ ata_pci_attach(device_t dev)
else
device_set_ivars(child, (void *)(intptr_t)unit);
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return 0;
}
@@ -136,9 +137,12 @@ int
ata_pci_detach(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(dev);
+ int error;
/* detach & delete all children */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
if (ctlr->r_irq) {
bus_teardown_intr(dev, ctlr->r_irq, ctlr->handle);
@@ -831,12 +835,10 @@ void
ata_set_desc(device_t dev)
{
struct ata_pci_controller *ctlr = device_get_softc(dev);
- char buffer[128];
- sprintf(buffer, "%s %s %s controller",
+ device_set_descf(dev, "%s %s %s controller",
ata_pcivendor2str(dev), ctlr->chip->text,
ata_mode2str(ctlr->chip->max_dma));
- device_set_desc_copy(dev, buffer);
}
const struct ata_chip_id *
diff --git a/sys/dev/ata/ata-sata.c b/sys/dev/ata/ata-sata.c
index 57fea391825b..b3155a5625c1 100644
--- a/sys/dev/ata/ata-sata.c
+++ b/sys/dev/ata/ata-sata.c
@@ -36,9 +36,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-acard.c b/sys/dev/ata/chipsets/ata-acard.c
index 806088797232..d1fc5ac028b7 100644
--- a/sys/dev/ata/chipsets/ata-acard.c
+++ b/sys/dev/ata/chipsets/ata-acard.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-acerlabs.c b/sys/dev/ata/chipsets/ata-acerlabs.c
index c34affcabc35..7d602eda6f63 100644
--- a/sys/dev/ata/chipsets/ata-acerlabs.c
+++ b/sys/dev/ata/chipsets/ata-acerlabs.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-amd.c b/sys/dev/ata/chipsets/ata-amd.c
index 19fce249fc08..31776028ee35 100644
--- a/sys/dev/ata/chipsets/ata-amd.c
+++ b/sys/dev/ata/chipsets/ata-amd.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-ati.c b/sys/dev/ata/chipsets/ata-ati.c
index 27eaefa1dc91..154e55428af5 100644
--- a/sys/dev/ata/chipsets/ata-ati.c
+++ b/sys/dev/ata/chipsets/ata-ati.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-cenatek.c b/sys/dev/ata/chipsets/ata-cenatek.c
index 96bb48d1c047..8f5012ed6ef0 100644
--- a/sys/dev/ata/chipsets/ata-cenatek.c
+++ b/sys/dev/ata/chipsets/ata-cenatek.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-cypress.c b/sys/dev/ata/chipsets/ata-cypress.c
index d956f79b1089..b43ea58f9272 100644
--- a/sys/dev/ata/chipsets/ata-cypress.c
+++ b/sys/dev/ata/chipsets/ata-cypress.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-cyrix.c b/sys/dev/ata/chipsets/ata-cyrix.c
index e0921b2c334d..12bfa3eec7ce 100644
--- a/sys/dev/ata/chipsets/ata-cyrix.c
+++ b/sys/dev/ata/chipsets/ata-cyrix.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-highpoint.c b/sys/dev/ata/chipsets/ata-highpoint.c
index bf9dfd22e6ad..9d6137829f6e 100644
--- a/sys/dev/ata/chipsets/ata-highpoint.c
+++ b/sys/dev/ata/chipsets/ata-highpoint.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
@@ -81,7 +81,7 @@ ata_highpoint_probe(device_t dev)
{ ATA_HPT366, 0x00, HPT_366, HPT_OLD, ATA_UDMA4, "HPT366" },
{ ATA_HPT302, 0x01, HPT_372, 0, ATA_UDMA6, "HPT302" },
{ 0, 0, 0, 0, 0, 0}};
- char buffer[64];
+ const char *channel;
if (pci_get_vendor(dev) != ATA_HIGHPOINT_ID)
return ENXIO;
@@ -89,16 +89,15 @@ ata_highpoint_probe(device_t dev)
if (!(idx = ata_match_chip(dev, ids)))
return ENXIO;
- strcpy(buffer, "HighPoint ");
- strcat(buffer, idx->text);
+ channel = "";
if (idx->cfg1 == HPT_374) {
if (pci_get_function(dev) == 0)
- strcat(buffer, " (channel 0+1)");
- if (pci_get_function(dev) == 1)
- strcat(buffer, " (channel 2+3)");
+ channel = " (channel 0+1)";
+ else if (pci_get_function(dev) == 1)
+ channel = " (channel 2+3)";
}
- sprintf(buffer, "%s %s controller", buffer, ata_mode2str(idx->max_dma));
- device_set_desc_copy(dev, buffer);
+ device_set_descf(dev, "Highpoint %s%s %s controller",
+ idx->text, channel, ata_mode2str(idx->max_dma));
ctlr->chip = idx;
ctlr->chipinit = ata_highpoint_chipinit;
return (BUS_PROBE_LOW_PRIORITY);
diff --git a/sys/dev/ata/chipsets/ata-intel.c b/sys/dev/ata/chipsets/ata-intel.c
index 866ac03133ab..f6ea4f8519f9 100644
--- a/sys/dev/ata/chipsets/ata-intel.c
+++ b/sys/dev/ata/chipsets/ata-intel.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-ite.c b/sys/dev/ata/chipsets/ata-ite.c
index bbcaeee194ae..3af1c3764396 100644
--- a/sys/dev/ata/chipsets/ata-ite.c
+++ b/sys/dev/ata/chipsets/ata-ite.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-jmicron.c b/sys/dev/ata/chipsets/ata-jmicron.c
index a24eb18f59da..b1671c5aa264 100644
--- a/sys/dev/ata/chipsets/ata-jmicron.c
+++ b/sys/dev/ata/chipsets/ata-jmicron.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
@@ -72,7 +72,6 @@ ata_jmicron_probe(device_t dev)
{ ATA_JMB368, 0, 0, 1, ATA_UDMA6, "JMB368" },
{ ATA_JMB368_2, 0, 0, 1, ATA_UDMA6, "JMB368" },
{ 0, 0, 0, 0, 0, 0}};
- char buffer[64];
if (pci_get_vendor(dev) != ATA_JMICRON_ID)
return ENXIO;
@@ -80,9 +79,8 @@ ata_jmicron_probe(device_t dev)
if (!(idx = ata_match_chip(dev, ids)))
return ENXIO;
- sprintf(buffer, "JMicron %s %s controller",
+ device_set_descf(dev, "JMicron %s %s controller",
idx->text, ata_mode2str(idx->max_dma));
- device_set_desc_copy(dev, buffer);
ctlr->chip = idx;
ctlr->chipinit = ata_jmicron_chipinit;
return (BUS_PROBE_LOW_PRIORITY);
@@ -112,10 +110,10 @@ ata_jmicron_chipinit(device_t dev)
pci_write_config(dev, 0x80, 0x01200000, 4);
/* Create AHCI subdevice if AHCI part present. */
if (ctlr->chip->cfg1) {
- child = device_add_child(dev, NULL, -1);
+ child = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (child != NULL) {
device_set_ivars(child, (void *)(intptr_t)-1);
- bus_generic_attach(dev);
+ bus_attach_children(dev);
}
}
ctlr->ch_attach = ata_jmicron_ch_attach;
diff --git a/sys/dev/ata/chipsets/ata-marvell.c b/sys/dev/ata/chipsets/ata-marvell.c
index f1feacd3f085..c0c9bb71e5c7 100644
--- a/sys/dev/ata/chipsets/ata-marvell.c
+++ b/sys/dev/ata/chipsets/ata-marvell.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
@@ -125,10 +125,10 @@ ata_marvell_chipinit(device_t dev)
return ENXIO;
/* Create AHCI subdevice if AHCI part present. */
if (ctlr->chip->cfg1) {
- child = device_add_child(dev, NULL, -1);
+ child = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (child != NULL) {
device_set_ivars(child, (void *)(intptr_t)-1);
- bus_generic_attach(dev);
+ bus_attach_children(dev);
}
}
ctlr->ch_attach = ata_marvell_ch_attach;
diff --git a/sys/dev/ata/chipsets/ata-micron.c b/sys/dev/ata/chipsets/ata-micron.c
index 092453865513..c85fbd580ccc 100644
--- a/sys/dev/ata/chipsets/ata-micron.c
+++ b/sys/dev/ata/chipsets/ata-micron.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-national.c b/sys/dev/ata/chipsets/ata-national.c
index eb5da805bc3c..75f24610d910 100644
--- a/sys/dev/ata/chipsets/ata-national.c
+++ b/sys/dev/ata/chipsets/ata-national.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-netcell.c b/sys/dev/ata/chipsets/ata-netcell.c
index f112634f7ea5..1bdff26f99e5 100644
--- a/sys/dev/ata/chipsets/ata-netcell.c
+++ b/sys/dev/ata/chipsets/ata-netcell.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-nvidia.c b/sys/dev/ata/chipsets/ata-nvidia.c
index 6e1769375f64..5078ed35214d 100644
--- a/sys/dev/ata/chipsets/ata-nvidia.c
+++ b/sys/dev/ata/chipsets/ata-nvidia.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-promise.c b/sys/dev/ata/chipsets/ata-promise.c
index 86cb0c4fe6c4..23b4d6d64b0b 100644
--- a/sys/dev/ata/chipsets/ata-promise.c
+++ b/sys/dev/ata/chipsets/ata-promise.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
@@ -166,7 +166,7 @@ ata_promise_probe(device_t dev)
{ ATA_PDC40719, 0, PR_MIO, PR_SATA2, ATA_SA300, "PDC40719" },
{ ATA_PDC40779, 0, PR_MIO, PR_SATA2, ATA_SA300, "PDC40779" },
{ 0, 0, 0, 0, 0, 0}};
- char buffer[64];
+ const char *channel;
uintptr_t devid = 0;
if (pci_get_vendor(dev) != ATA_PROMISE_ID)
@@ -182,10 +182,8 @@ ata_promise_probe(device_t dev)
devid == ATA_I960RM)
return ENXIO;
- strcpy(buffer, "Promise ");
- strcat(buffer, idx->text);
-
/* if we are on a FastTrak TX4, adjust the interrupt resource */
+ channel = NULL;
if ((idx->cfg2 & PR_TX4) && pci_get_class(GRANDPARENT(dev))==PCIC_BRIDGE &&
!BUS_READ_IVAR(device_get_parent(GRANDPARENT(dev)),
GRANDPARENT(dev), PCI_IVAR_DEVID, &devid) &&
@@ -194,18 +192,18 @@ ata_promise_probe(device_t dev)
if (pci_get_slot(dev) == 1) {
bus_get_resource(dev, SYS_RES_IRQ, 0, &start, &end);
- strcat(buffer, " (channel 0+1)");
+ channel = " (channel 0+1)";
}
else if (pci_get_slot(dev) == 2 && start && end) {
bus_set_resource(dev, SYS_RES_IRQ, 0, start, end);
- strcat(buffer, " (channel 2+3)");
+ channel = " (channel 2+3)";
}
else {
start = end = 0;
}
}
- sprintf(buffer, "%s %s controller", buffer, ata_mode2str(idx->max_dma));
- device_set_desc_copy(dev, buffer);
+ device_set_descf(dev, "Promise %s%s %s controller", idx->text,
+ channel == NULL ? "" : channel, ata_mode2str(idx->max_dma));
ctlr->chip = idx;
ctlr->chipinit = ata_promise_chipinit;
return (BUS_PROBE_LOW_PRIORITY);
diff --git a/sys/dev/ata/chipsets/ata-serverworks.c b/sys/dev/ata/chipsets/ata-serverworks.c
index c6a6a26959ea..66487b4ff564 100644
--- a/sys/dev/ata/chipsets/ata-serverworks.c
+++ b/sys/dev/ata/chipsets/ata-serverworks.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-siliconimage.c b/sys/dev/ata/chipsets/ata-siliconimage.c
index 75c7961caebc..d5ecb8f3cf59 100644
--- a/sys/dev/ata/chipsets/ata-siliconimage.c
+++ b/sys/dev/ata/chipsets/ata-siliconimage.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ata/chipsets/ata-sis.c b/sys/dev/ata/chipsets/ata-sis.c
index bf4c5c744289..b4861e694cb6 100644
--- a/sys/dev/ata/chipsets/ata-sis.c
+++ b/sys/dev/ata/chipsets/ata-sis.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
@@ -101,7 +101,6 @@ ata_sis_probe(device_t dev)
{ 0, 0, 0, 0, 0, 0 }};
static struct ata_chip_id id[] =
{{ ATA_SISSOUTH, 0x10, 0, 0, 0, "" }, { 0, 0, 0, 0, 0, 0 }};
- char buffer[64];
int found = 0;
if (pci_get_class(dev) != PCIC_STORAGE)
@@ -122,8 +121,8 @@ ata_sis_probe(device_t dev)
memcpy(&id[0], idx, sizeof(id[0]));
id[0].cfg1 = SIS_133NEW;
id[0].max_dma = ATA_UDMA6;
- sprintf(buffer, "SiS 962/963 %s controller",
- ata_mode2str(idx->max_dma));
+ device_set_descf(dev, "SiS 962/963 %s controller",
+ ata_mode2str(idx->max_dma));
}
pci_write_config(dev, 0x57, reg57, 1);
}
@@ -140,17 +139,17 @@ ata_sis_probe(device_t dev)
id[0].cfg1 = SIS_100NEW;
id[0].max_dma = ATA_UDMA5;
}
- sprintf(buffer, "SiS 961 %s controller",ata_mode2str(idx->max_dma));
+ device_set_descf(dev, "SiS 961 %s controller",
+ ata_mode2str(idx->max_dma));
}
pci_write_config(dev, 0x4a, reg4a, 1);
}
if (!found)
- sprintf(buffer,"SiS %s %s controller",
- idx->text, ata_mode2str(idx->max_dma));
+ device_set_descf(dev, "SiS %s %s controller",
+ idx->text, ata_mode2str(idx->max_dma));
else
idx = &id[0];
- device_set_desc_copy(dev, buffer);
ctlr->chip = idx;
ctlr->chipinit = ata_sis_chipinit;
return (BUS_PROBE_LOW_PRIORITY);
diff --git a/sys/dev/ata/chipsets/ata-via.c b/sys/dev/ata/chipsets/ata-via.c
index 4b7dd0f08554..7f23a5d6bf9a 100644
--- a/sys/dev/ata/chipsets/ata-via.c
+++ b/sys/dev/ata/chipsets/ata-via.c
@@ -37,9 +37,9 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
diff --git a/sys/dev/ath/ah_osdep.c b/sys/dev/ath/ah_osdep.c
index b9098121629d..6278d6dbdb8f 100644
--- a/sys/dev/ath/ah_osdep.c
+++ b/sys/dev/ath/ah_osdep.c
@@ -42,8 +42,7 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/conf.h>
-
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include <net/ethernet.h> /* XXX for ether_sprintf */
diff --git a/sys/dev/ath/ath_hal/ar5212/ar5212_rfgain.c b/sys/dev/ath/ath_hal/ar5212/ar5212_rfgain.c
index 9352c6decd82..04b75dd09662 100644
--- a/sys/dev/ath/ath_hal/ar5212/ar5212_rfgain.c
+++ b/sys/dev/ath/ath_hal/ar5212/ar5212_rfgain.c
@@ -324,7 +324,7 @@ ar5212GetRfgain(struct ath_hal *ah)
ahp->ah_rfgainState = HAL_RFGAIN_NEED_CHANGE;
/* for ap51 */
ahp->ah_cwCalRequire = AH_TRUE;
- /* Request IQ recalibration for temperature chang */
+ /* Request IQ recalibration for temperature change */
ahp->ah_bIQCalibration = IQ_CAL_INACTIVE;
}
}
diff --git a/sys/dev/ath/ath_hal/ar9002/ar9285_btcoex.c b/sys/dev/ath/ath_hal/ar9002/ar9285_btcoex.c
index 01a224cbbfe9..fb2700771ffa 100644
--- a/sys/dev/ath/ath_hal/ar9002/ar9285_btcoex.c
+++ b/sys/dev/ath/ath_hal/ar9002/ar9285_btcoex.c
@@ -54,7 +54,7 @@ ar9285BTCoexAntennaDiversity(struct ath_hal *ah)
!! (ahp->ah_btCoexFlag & HAL_BT_COEX_FLAG_ANT_DIV_ENABLE));
if ((ahp->ah_btCoexFlag & HAL_BT_COEX_FLAG_ANT_DIV_ALLOW) ||
- (AH5212(ah)->ah_diversity != HAL_ANT_VARIABLE)) {
+ (AH5212(ah)->ah_diversity == AH_TRUE)) {
if ((ahp->ah_btCoexFlag & HAL_BT_COEX_FLAG_ANT_DIV_ENABLE) &&
(AH5212(ah)->ah_antControl == HAL_ANT_VARIABLE)) {
/* Enable antenna diversity */
diff --git a/sys/dev/ath/ath_rate/amrr/amrr.c b/sys/dev/ath/ath_rate/amrr/amrr.c
index 81bd03484de5..8f696eba23eb 100644
--- a/sys/dev/ath/ath_rate/amrr/amrr.c
+++ b/sys/dev/ath/ath_rate/amrr/amrr.c
@@ -245,8 +245,11 @@ ath_rate_update(struct ath_softc *sc, struct ieee80211_node *ni, int rate)
* lowest hardware rate.
*/
if (ni->ni_rates.rs_nrates > 0) {
- ni->ni_txrate = ni->ni_rates.rs_rates[rate] & IEEE80211_RATE_VAL;
- amn->amn_tx_rix0 = sc->sc_rixmap[ni->ni_txrate];
+ uint8_t dot11rate;
+
+ dot11rate = ni->ni_rates.rs_rates[rate] & IEEE80211_RATE_VAL;
+ amn->amn_tx_rix0 = sc->sc_rixmap[dot11rate];
+ ieee80211_node_set_txrate_dot11rate(ni, dot11rate);
amn->amn_tx_rate0 = rt->info[amn->amn_tx_rix0].rateCode;
amn->amn_tx_rate0sp = amn->amn_tx_rate0 |
rt->info[amn->amn_tx_rix0].shortPreamble;
diff --git a/sys/dev/ath/ath_rate/onoe/onoe.c b/sys/dev/ath/ath_rate/onoe/onoe.c
index 8d86f13ef703..035d54b00b62 100644
--- a/sys/dev/ath/ath_rate/onoe/onoe.c
+++ b/sys/dev/ath/ath_rate/onoe/onoe.c
@@ -203,6 +203,7 @@ ath_rate_update(struct ath_softc *sc, struct ieee80211_node *ni, int rate)
struct ieee80211vap *vap = ni->ni_vap;
const HAL_RATE_TABLE *rt = sc->sc_currates;
u_int8_t rix;
+ uint8_t dot11rate;
KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode));
@@ -221,8 +222,9 @@ ath_rate_update(struct ath_softc *sc, struct ieee80211_node *ni, int rate)
if (ni->ni_rates.rs_nrates == 0)
goto done;
on->on_rix = rate;
- ni->ni_txrate = ni->ni_rates.rs_rates[rate] & IEEE80211_RATE_VAL;
- on->on_tx_rix0 = sc->sc_rixmap[ni->ni_txrate];
+ dot11rate = ni->ni_rates.rs_rates[rate] & IEEE80211_RATE_VAL;
+ ieee80211_node_set_txrate_dot11rate(ni, dot11rate);
+ on->on_tx_rix0 = sc->sc_rixmap[dot11rate];
on->on_tx_rate0 = rt->info[on->on_tx_rix0].rateCode;
on->on_tx_rate0sp = on->on_tx_rate0 |
@@ -389,7 +391,7 @@ ath_rate_ctl(void *arg, struct ieee80211_node *ni)
if (nrate != on->on_rix) {
IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_RATECTL, ni,
"%s: %dM -> %dM (%d ok, %d err, %d retr)", __func__,
- ni->ni_txrate / 2,
+ ieee80211_node_get_txrate_kbit(ni) / 1000,
(rs->rs_rates[nrate] & IEEE80211_RATE_VAL) / 2,
on->on_tx_ok, on->on_tx_err, on->on_tx_retr);
ath_rate_update(sc, ni, nrate);
diff --git a/sys/dev/ath/ath_rate/sample/sample.c b/sys/dev/ath/ath_rate/sample/sample.c
index 8e70699f708d..79bf08678249 100644
--- a/sys/dev/ath/ath_rate/sample/sample.c
+++ b/sys/dev/ath/ath_rate/sample/sample.c
@@ -179,7 +179,7 @@ ath_rate_sample_find_min_pktlength(struct ath_softc *sc,
const struct txschedule *sched = &sn->sched[rix0];
int max_pkt_length = 65530; // ATH_AGGR_MAXSIZE
// Note: this may not be true in all cases; need to check?
- int is_ht40 = (an->an_node.ni_chw == 40);
+ int is_ht40 = (an->an_node.ni_chw == NET80211_STA_RX_BW_40);
// Note: not great, but good enough..
int idx = is_ht40 ? MCS_HT40 : MCS_HT20;
@@ -397,7 +397,8 @@ pick_best_rate(struct ath_node *an, const HAL_RATE_TABLE *rt,
* be abstracted out and properly handled.
*/
if (an->an_node.ni_flags & IEEE80211_NODE_HT) {
- if (best_rate_tt == 0 || ((tt * 10) <= (best_rate_tt * 10))) {
+ if (best_rate_tt == 0 ||
+ ((tt * 9) <= (best_rate_tt * 10))) {
best_rate_tt = tt;
best_rate_rix = rix;
best_rate_pct = pct;
@@ -854,9 +855,14 @@ ath_rate_findrate(struct ath_softc *sc, struct ath_node *an,
/*
* Set the visible txrate for this node.
*/
- an->an_node.ni_txrate =
- (rt->info[best_rix].phy == IEEE80211_T_HT) ?
- MCS(best_rix) : DOT11RATE(best_rix);
+ if (rt->info[best_rix].phy == IEEE80211_T_HT)
+ ieee80211_node_set_txrate_ht_mcsrate(
+ &an->an_node,
+ MCS(best_rix) & IEEE80211_RATE_VAL);
+ else
+ ieee80211_node_set_txrate_dot11rate(
+ &an->an_node,
+ DOT11RATE(best_rix));
}
rix = sn->current_rix[size_bin];
sn->packets_since_switch[size_bin]++;
@@ -973,7 +979,7 @@ update_stats(struct ath_softc *sc, struct ath_node *an,
const int size_bin = size_to_bin(frame_size);
const int size = bin_to_size(size_bin);
int tt;
- int is_ht40 = (an->an_node.ni_chw == 40);
+ int is_ht40 = (an->an_node.ni_chw == NET80211_STA_RX_BW_40);
int pct;
if (!IS_RATE_DEFINED(sn, rix0))
@@ -1359,7 +1365,7 @@ ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni)
continue;
printf(" %d %s/%d", dot11rate(rt, rix), dot11rate_label(rt, rix),
calc_usecs_unicast_packet(sc, 1600, rix, 0,0,
- (ni->ni_chw == 40)));
+ (ni->ni_chw == NET80211_STA_RX_BW_40)));
}
printf("\n");
}
@@ -1390,7 +1396,7 @@ ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni)
sn->stats[y][rix].perfect_tx_time =
calc_usecs_unicast_packet(sc, size, rix, 0, 0,
- (ni->ni_chw == 40));
+ (ni->ni_chw == NET80211_STA_RX_BW_40));
sn->stats[y][rix].average_tx_time =
sn->stats[y][rix].perfect_tx_time;
}
@@ -1408,9 +1414,10 @@ ath_rate_ctl_reset(struct ath_softc *sc, struct ieee80211_node *ni)
#endif
/* set the visible bit-rate */
if (sn->static_rix != -1)
- ni->ni_txrate = DOT11RATE(sn->static_rix);
+ ieee80211_node_set_txrate_dot11rate(ni,
+ DOT11RATE(sn->static_rix));
else
- ni->ni_txrate = RATE(0);
+ ieee80211_node_set_txrate_dot11rate(ni, RATE(0));
#undef RATE
#undef DOT11RATE
}
diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c
index 094517da5689..1304b597c545 100644
--- a/sys/dev/ath/if_ath.c
+++ b/sys/dev/ath/if_ath.c
@@ -924,6 +924,9 @@ ath_attach(u_int16_t devid, struct ath_softc *sc)
| IEEE80211_C_PMGT /* Station side power mgmt */
| IEEE80211_C_SWSLEEP
;
+
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/*
* Query the hal to figure out h/w crypto support.
*/
@@ -2446,7 +2449,7 @@ ath_bmiss_vap(struct ieee80211vap *vap)
* against the next beacon.
*
* This handles three common beacon miss cases in STA powersave mode -
- * (a) the beacon TBTT isnt a multiple of bintval;
+ * (a) the beacon TBTT isn't a multiple of bintval;
* (b) the beacon was missed; and
* (c) the beacons are being delayed because the AP is busy and
* isn't reliably able to meet its TBTT.
diff --git a/sys/dev/ath/if_ath_keycache.c b/sys/dev/ath/if_ath_keycache.c
index bc94273bf5ce..a58625ad2803 100644
--- a/sys/dev/ath/if_ath_keycache.c
+++ b/sys/dev/ath/if_ath_keycache.c
@@ -434,8 +434,7 @@ ath_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
/*
* Only global keys should have key index assigned.
*/
- if (!(&vap->iv_nw_keys[0] <= k &&
- k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
+ if (!ieee80211_is_key_global(vap, k)) {
/* should not happen */
DPRINTF(sc, ATH_DEBUG_KEYCACHE,
"%s: bogus group key\n", __func__);
diff --git a/sys/dev/ath/if_ath_lna_div.c b/sys/dev/ath/if_ath_lna_div.c
index 1b20591fc64e..0755bb667716 100644
--- a/sys/dev/ath/if_ath_lna_div.c
+++ b/sys/dev/ath/if_ath_lna_div.c
@@ -96,12 +96,6 @@ ath_lna_div_attach(struct ath_softc *sc)
ss = malloc(sizeof(struct if_ath_ant_comb_state),
M_TEMP, M_WAITOK | M_ZERO);
- if (ss == NULL) {
- device_printf(sc->sc_dev, "%s: failed to allocate\n",
- __func__);
- /* Don't fail at this point */
- return (0);
- }
/* Fetch the hardware configuration */
OS_MEMZERO(&div_ant_conf, sizeof(div_ant_conf));
diff --git a/sys/dev/ath/if_ath_pci.c b/sys/dev/ath/if_ath_pci.c
index 72f0a802aa5f..a242eab7a694 100644
--- a/sys/dev/ath/if_ath_pci.c
+++ b/sys/dev/ath/if_ath_pci.c
@@ -269,11 +269,6 @@ ath_pci_attach(device_t dev)
__func__, fw->data);
sc->sc_eepromdata =
malloc(fw->datasize, M_TEMP, M_WAITOK | M_ZERO);
- if (! sc->sc_eepromdata) {
- device_printf(dev, "%s: can't malloc eepromdata\n",
- __func__);
- goto bad4;
- }
memcpy(sc->sc_eepromdata, fw->data, fw->datasize);
firmware_put(fw, 0);
}
diff --git a/sys/dev/ath/if_ath_spectral.c b/sys/dev/ath/if_ath_spectral.c
index 58f21b526e93..951d66605981 100644
--- a/sys/dev/ath/if_ath_spectral.c
+++ b/sys/dev/ath/if_ath_spectral.c
@@ -112,13 +112,6 @@ ath_spectral_attach(struct ath_softc *sc)
ss = malloc(sizeof(struct ath_spectral_state),
M_TEMP, M_WAITOK | M_ZERO);
-
- if (ss == NULL) {
- device_printf(sc->sc_dev, "%s: failed to alloc memory\n",
- __func__);
- return (-ENOMEM);
- }
-
sc->sc_spectral = ss;
(void) ath_hal_spectral_get_config(sc->sc_ah, &ss->spectral_state);
diff --git a/sys/dev/ath/if_ath_tx.c b/sys/dev/ath/if_ath_tx.c
index 69d0b5c00848..9ac591c14943 100644
--- a/sys/dev/ath/if_ath_tx.c
+++ b/sys/dev/ath/if_ath_tx.c
@@ -971,6 +971,12 @@ ath_legacy_xmit_handoff(struct ath_softc *sc, struct ath_txq *txq,
ath_tx_handoff_hw(sc, txq, bf);
}
+/*
+ * Setup a frame for encryption.
+ *
+ * If this fails, then an non-zero error is returned. The mbuf
+ * must be freed by the caller.
+ */
static int
ath_tx_tag_crypto(struct ath_softc *sc, struct ieee80211_node *ni,
struct mbuf *m0, int iswep, int isfrag, int *hdrlen, int *pktlen,
@@ -1133,8 +1139,7 @@ ath_tx_calc_duration(struct ath_softc *sc, struct ath_buf *bf)
* Calculate duration. This logically belongs in the 802.11
* layer but it lacks sufficient information to calculate it.
*/
- if ((flags & HAL_TXDESC_NOACK) == 0 &&
- (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) {
+ if ((flags & HAL_TXDESC_NOACK) == 0 && !IEEE80211_IS_CTL(wh)) {
u_int16_t dur;
if (shortPreamble)
dur = rt->info[rix].spAckDuration;
@@ -1548,6 +1553,10 @@ ath_tx_xmit_normal(struct ath_softc *sc, struct ath_txq *txq,
*
* Note that this may cause the mbuf to be reallocated, so
* m0 may not be valid.
+ *
+ * If there's a problem then the mbuf is freed and an error
+ * is returned. The ath_buf then needs to be freed by the
+ * caller.
*/
static int
ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
@@ -1589,6 +1598,10 @@ ath_tx_normal_setup(struct ath_softc *sc, struct ieee80211_node *ni,
*/
pktlen = m0->m_pkthdr.len - (hdrlen & 3);
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m0->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
/* Handle encryption twiddling if needed */
if (! ath_tx_tag_crypto(sc, ni, m0, iswep, isfrag, &hdrlen,
&pktlen, &keyix)) {
@@ -2051,7 +2064,7 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
*/
if (IEEE80211_QOS_HAS_SEQ(wh) &&
(! IEEE80211_IS_MULTICAST(wh->i_addr1)) &&
- (subtype != IEEE80211_FC0_SUBTYPE_QOS_NULL)) {
+ (! IEEE80211_IS_QOS_NULL(wh))) {
bf->bf_state.bfs_dobaw = 1;
}
}
@@ -2070,9 +2083,8 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
/* This also sets up the DMA map; crypto; frame parameters, etc */
r = ath_tx_normal_setup(sc, ni, bf, m0, txq);
-
if (r != 0)
- goto done;
+ return (r);
/* At this point m0 could have changed! */
m0 = bf->bf_m;
@@ -2129,7 +2141,6 @@ ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni,
ath_tx_leak_count_update(sc, tid, bf);
ath_tx_xmit_normal(sc, txq, bf);
#endif
-done:
return 0;
}
@@ -2202,6 +2213,10 @@ ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni,
* for QoS frames.
*/
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m0->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
/* Handle encryption twiddling if needed */
if (! ath_tx_tag_crypto(sc, ni,
m0, params->ibp_flags & IEEE80211_BPF_CRYPTO, 0,
@@ -2578,25 +2593,6 @@ badbad:
*/
/*
- * XXX doesn't belong here!
- */
-static int
-ieee80211_is_action(struct ieee80211_frame *wh)
-{
- /* Type: Management frame? */
- if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
- IEEE80211_FC0_TYPE_MGT)
- return 0;
-
- /* Subtype: Action frame? */
- if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) !=
- IEEE80211_FC0_SUBTYPE_ACTION)
- return 0;
-
- return 1;
-}
-
-/*
* Return an alternate TID for ADDBA request frames.
*
* Yes, this likely should be done in the net80211 layer.
@@ -2612,7 +2608,7 @@ ath_tx_action_frame_override_queue(struct ath_softc *sc,
uint16_t baparamset;
/* Not action frame? Bail */
- if (! ieee80211_is_action(wh))
+ if (! IEEE80211_IS_MGMT_ACTION(wh))
return 0;
/* XXX Not needed for frames we send? */
@@ -3001,6 +2997,8 @@ ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
ATH_TX_LOCK_ASSERT(sc);
+ /* TODO: can this use ieee80211_output_seqno_assign() now? */
+
/*
* Is it a QOS NULL Data frame? Give it a sequence number from
* the default TID (IEEE80211_NONQOS_TID.)
@@ -3011,7 +3009,7 @@ ath_tx_tid_seqno_assign(struct ath_softc *sc, struct ieee80211_node *ni,
* RX side.
*/
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
- if (subtype == IEEE80211_FC0_SUBTYPE_QOS_NULL) {
+ if (IEEE80211_IS_QOS_NULL(wh)) {
/* XXX no locking for this TID? This is a bit of a problem. */
seqno = ni->ni_txseqs[IEEE80211_NONQOS_TID];
INCR(ni->ni_txseqs[IEEE80211_NONQOS_TID], IEEE80211_SEQ_RANGE);
diff --git a/sys/dev/ath/if_ath_tx_ht.c b/sys/dev/ath/if_ath_tx_ht.c
index ca69d4558d76..f42058bacb0d 100644
--- a/sys/dev/ath/if_ath_tx_ht.c
+++ b/sys/dev/ath/if_ath_tx_ht.c
@@ -222,7 +222,6 @@ void
ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf)
{
struct ieee80211_node *ni = bf->bf_node;
- struct ieee80211vap *vap = ni->ni_vap;
struct ieee80211com *ic = ni->ni_ic;
const HAL_RATE_TABLE *rt = sc->sc_currates;
struct ath_rc_series *rc = bf->bf_state.bfs_rc;
@@ -284,7 +283,7 @@ ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf)
if (IS_HT_RATE(rate)) {
rc[i].flags |= ATH_RC_HT_FLAG;
- if (ni->ni_chw == 40)
+ if (ni->ni_chw == NET80211_STA_RX_BW_40)
rc[i].flags |= ATH_RC_CW40_FLAG;
/*
@@ -296,18 +295,14 @@ ath_tx_rate_fill_rcflags(struct ath_softc *sc, struct ath_buf *bf)
* and doesn't return the fractional part, so
* we are always "out" by some amount.
*/
- if (ni->ni_chw == 40 &&
- ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI40 &&
- ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 &&
- vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40 &&
+ if (ni->ni_chw == NET80211_STA_RX_BW_40 &&
+ ieee80211_ht_check_tx_shortgi_40(ni) &&
(bf->bf_flags & ATH_BUF_TOA_PROBE) == 0) {
rc[i].flags |= ATH_RC_SGI_FLAG;
}
- if (ni->ni_chw == 20 &&
- ic->ic_htcaps & IEEE80211_HTCAP_SHORTGI20 &&
- ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 &&
- vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20 &&
+ if (ni->ni_chw == NET80211_STA_RX_BW_20 &&
+ ieee80211_ht_check_tx_shortgi_20(ni) &&
(bf->bf_flags & ATH_BUF_TOA_PROBE) == 0) {
rc[i].flags |= ATH_RC_SGI_FLAG;
}
@@ -406,7 +401,6 @@ ath_compute_num_delims(struct ath_softc *sc, struct ath_buf *first_bf,
{
const HAL_RATE_TABLE *rt = sc->sc_currates;
struct ieee80211_node *ni = first_bf->bf_node;
- struct ieee80211vap *vap = ni->ni_vap;
int ndelim, mindelim = 0;
int mpdudensity; /* in 1/100'th of a microsecond */
int peer_mpdudensity; /* net80211 value */
@@ -418,17 +412,7 @@ ath_compute_num_delims(struct ath_softc *sc, struct ath_buf *first_bf,
/*
* Get the advertised density from the node.
*/
- peer_mpdudensity =
- _IEEE80211_MASKSHIFT(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY);
-
- /*
- * vap->iv_ampdu_density is a net80211 value, rather than the actual
- * density. Larger values are longer A-MPDU density spacing values,
- * and we want to obey larger configured / negotiated density values
- * per station if we get it.
- */
- if (vap->iv_ampdu_density > peer_mpdudensity)
- peer_mpdudensity = vap->iv_ampdu_density;
+ peer_mpdudensity = ieee80211_ht_get_node_ampdu_density(ni);
/*
* Convert the A-MPDU density net80211 value to a 1/100 microsecond
@@ -563,8 +547,6 @@ static int
ath_get_aggr_limit(struct ath_softc *sc, struct ieee80211_node *ni,
struct ath_buf *bf)
{
- struct ieee80211vap *vap = ni->ni_vap;
-
int amin = ATH_AGGR_MAXSIZE;
int i;
@@ -572,15 +554,9 @@ ath_get_aggr_limit(struct ath_softc *sc, struct ieee80211_node *ni,
if (sc->sc_aggr_limit > 0 && sc->sc_aggr_limit < ATH_AGGR_MAXSIZE)
amin = sc->sc_aggr_limit;
- /* Check the vap configured transmit limit */
- amin = MIN(amin, ath_rx_ampdu_to_byte(vap->iv_ampdu_limit));
-
- /*
- * Check the HTCAP field for the maximum size the node has
- * negotiated. If it's smaller than what we have, cap it there.
- */
- amin = MIN(amin, ath_rx_ampdu_to_byte(
- _IEEE80211_MASKSHIFT(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU)));
+ /* Check the vap and node configured transmit limit */
+ amin = MIN(amin,
+ ath_rx_ampdu_to_byte(ieee80211_ht_get_node_ampdu_limit(ni)));
for (i = 0; i < ATH_RC_NUM; i++) {
if (bf->bf_state.bfs_rc[i].tries == 0)
@@ -593,7 +569,7 @@ ath_get_aggr_limit(struct ath_softc *sc, struct ieee80211_node *ni,
"peer maxrxampdu=%d, max frame len=%d\n",
__func__,
sc->sc_aggr_limit,
- vap->iv_ampdu_limit,
+ ni->ni_vap->iv_ampdu_limit,
_IEEE80211_MASKSHIFT(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU),
amin);
diff --git a/sys/dev/atkbdc/atkbd.c b/sys/dev/atkbdc/atkbd.c
index 403afcd6c2e2..e046b59803b0 100644
--- a/sys/dev/atkbdc/atkbd.c
+++ b/sys/dev/atkbdc/atkbd.c
@@ -1249,6 +1249,7 @@ setup_kbd_port(KBDC kbdc, int port, int intr)
static int
get_kbd_echo(KBDC kbdc)
{
+ int data;
/* enable the keyboard port, but disable the keyboard intr. */
if (setup_kbd_port(kbdc, TRUE, FALSE))
/* CONTROLLER ERROR: there is very little we can do... */
@@ -1256,7 +1257,18 @@ get_kbd_echo(KBDC kbdc)
/* see if something is present */
write_kbd_command(kbdc, KBDC_ECHO);
- if (read_kbd_data(kbdc) != KBD_ECHO) {
+ data = read_kbd_data(kbdc);
+
+ /*
+ * Some i8042 falsely return KBD_ACK for ECHO comamnd.
+ * Thought it is not a correct behavior for AT keyboard, we accept
+ * and consume it to prevent resetting the whole keyboard after the
+ * first interrupt.
+ */
+ if (data == KBD_ACK)
+ data = read_kbd_data(kbdc);
+
+ if (data != KBD_ECHO) {
empty_both_buffers(kbdc, 10);
test_controller(kbdc);
test_kbd_port(kbdc);
diff --git a/sys/dev/atkbdc/atkbdc_isa.c b/sys/dev/atkbdc/atkbdc_isa.c
index 2f7b9eceda94..057ebbc7ec0d 100644
--- a/sys/dev/atkbdc/atkbdc_isa.c
+++ b/sys/dev/atkbdc/atkbdc_isa.c
@@ -238,8 +238,8 @@ atkbdc_isa_attach(device_t dev)
}
*(atkbdc_softc_t **)device_get_softc(dev) = sc;
- bus_generic_probe(dev);
- bus_generic_attach(dev);
+ bus_identify_children(dev);
+ bus_attach_children(dev);
return 0;
}
diff --git a/sys/dev/atkbdc/psm.c b/sys/dev/atkbdc/psm.c
index 1a02dbcec020..8563b5f93aa2 100644
--- a/sys/dev/atkbdc/psm.c
+++ b/sys/dev/atkbdc/psm.c
@@ -66,6 +66,7 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
+#include <sys/lock.h>
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/conf.h>
@@ -607,6 +608,7 @@ static d_read_t psmread;
static d_write_t psmwrite;
static d_ioctl_t psmioctl;
static d_poll_t psmpoll;
+static d_kqfilter_t psmkqfilter;
static int psmopen(struct psm_softc *);
static int psmclose(struct psm_softc *);
@@ -759,6 +761,7 @@ static struct cdevsw psm_cdevsw = {
.d_write = psmwrite,
.d_ioctl = psmioctl,
.d_poll = psmpoll,
+ .d_kqfilter = psmkqfilter,
.d_name = PSM_DRIVER_NAME,
};
@@ -1955,6 +1958,7 @@ psmattach(device_t dev)
sc->state = PSM_VALID;
callout_init(&sc->callout, 0);
callout_init(&sc->softcallout, 0);
+ knlist_init_mtx(&sc->rsel.si_note, &Giant);
/* Setup our interrupt handler */
rid = KBDC_RID_AUX;
@@ -2066,6 +2070,8 @@ psmdetach(device_t dev)
destroy_dev(sc->cdev);
destroy_dev(sc->bdev);
+ knlist_clear(&sc->rsel.si_note, 1);
+ knlist_destroy(&sc->rsel.si_note);
callout_drain(&sc->callout);
callout_drain(&sc->softcallout);
@@ -5221,6 +5227,7 @@ next:
wakeup(sc);
}
selwakeuppri(&sc->rsel, PZERO);
+ KNOTE_LOCKED(&sc->rsel.si_note, 0);
if (sc->async != NULL) {
pgsigio(&sc->async, SIGIO, 0);
}
@@ -5258,6 +5265,45 @@ psmpoll(struct cdev *dev, int events, struct thread *td)
return (revents);
}
+static void
+psmfilter_detach(struct knote *kn)
+{
+ struct psm_softc *sc = kn->kn_hook;
+
+ knlist_remove(&sc->rsel.si_note, kn, 0);
+}
+
+static int
+psmfilter(struct knote *kn, long hint)
+{
+ struct psm_softc *sc = kn->kn_hook;
+
+ GIANT_REQUIRED;
+
+ return (sc->queue.count != 0 ? 1 : 0);
+}
+
+static const struct filterops psmfiltops = {
+ .f_isfd = 1,
+ .f_detach = psmfilter_detach,
+ .f_event = psmfilter,
+};
+
+static int
+psmkqfilter(struct cdev *dev, struct knote *kn)
+{
+ struct psm_softc *sc = dev->si_drv1;
+
+ if (kn->kn_filter != EVFILT_READ)
+ return(EOPNOTSUPP);
+
+ kn->kn_fop = &psmfiltops;
+ kn->kn_hook = sc;
+ knlist_add(&sc->rsel.si_note, kn, 1);
+
+ return (0);
+}
+
/* vendor/model specific routines */
static int mouse_id_proc1(KBDC kbdc, int res, int scale, int *status)
diff --git a/sys/dev/atopcase/atopcase.c b/sys/dev/atopcase/atopcase.c
index db1258b77a69..8dc81046e47e 100644
--- a/sys/dev/atopcase/atopcase.c
+++ b/sys/dev/atopcase/atopcase.c
@@ -476,7 +476,7 @@ atopcase_add_child(struct atopcase_softc *sc, struct atopcase_child *ac,
goto exit;
}
- hidbus = device_add_child(sc->sc_dev, "hidbus", -1);
+ hidbus = device_add_child(sc->sc_dev, "hidbus", DEVICE_UNIT_ANY);
if (hidbus == NULL) {
device_printf(sc->sc_dev, "can't add child\n");
err = ENOMEM;
@@ -533,7 +533,8 @@ atopcase_init(struct atopcase_softc *sc)
if (sc->sc_tq != NULL)
taskqueue_enqueue_timeout(sc->sc_tq, &sc->sc_task, hz / 120);
- return (bus_generic_attach(sc->sc_dev));
+ bus_attach_children(sc->sc_dev);
+ return (0);
err:
return (err);
@@ -544,7 +545,7 @@ atopcase_destroy(struct atopcase_softc *sc)
{
int err;
- err = device_delete_children(sc->sc_dev);
+ err = bus_generic_detach(sc->sc_dev);
if (err)
return (err);
diff --git a/sys/dev/axgbe/if_axgbe.c b/sys/dev/axgbe/if_axgbe.c
index 65a546f8dae2..b35beca9c886 100644
--- a/sys/dev/axgbe/if_axgbe.c
+++ b/sys/dev/axgbe/if_axgbe.c
@@ -388,11 +388,6 @@ axgbe_attach(device_t dev)
OF_getprop(node, "mac-address", sc->mac_addr, ETHER_ADDR_LEN);
sc->prv.netdev = ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "Cannot alloc ifnet\n");
- return (ENXIO);
- }
-
sc->prv.dev = dev;
sc->prv.dmat = bus_get_dma_tag(dev);
sc->prv.phy.advertising = ADVERTISED_10000baseKR_Full |
diff --git a/sys/dev/axgbe/if_axgbe_pci.c b/sys/dev/axgbe/if_axgbe_pci.c
index 3e68525067fb..6bc4bd33e162 100644
--- a/sys/dev/axgbe/if_axgbe_pci.c
+++ b/sys/dev/axgbe/if_axgbe_pci.c
@@ -36,6 +36,7 @@
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
+#include <sys/smp.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@@ -560,11 +561,6 @@ axgbe_if_attach_pre(if_ctx_t ctx)
/* create the workqueue */
pdata->dev_workqueue = taskqueue_create("axgbe", M_WAITOK,
taskqueue_thread_enqueue, &pdata->dev_workqueue);
- if (pdata->dev_workqueue == NULL) {
- axgbe_error("Unable to allocate workqueue\n");
- ret = ENOMEM;
- goto free_channels;
- }
ret = taskqueue_start_threads(&pdata->dev_workqueue, 1, PI_NET,
"axgbe dev taskq");
if (ret) {
@@ -580,8 +576,6 @@ axgbe_if_attach_pre(if_ctx_t ctx)
free_task_queue:
taskqueue_free(pdata->dev_workqueue);
-
-free_channels:
axgbe_free_channels(sc);
release_bus_resource:
@@ -610,8 +604,6 @@ axgbe_set_counts(if_ctx_t ctx)
struct axgbe_if_softc *sc = iflib_get_softc(ctx);
struct xgbe_prv_data *pdata = &sc->pdata;
cpuset_t lcpus;
- int cpu_count, err;
- size_t len;
/* Set all function pointers */
xgbe_init_all_fptrs(pdata);
@@ -638,21 +630,12 @@ axgbe_set_counts(if_ctx_t ctx)
* number of Rx queues or maximum allowed
*/
- /* Get cpu count from sysctl */
- len = sizeof(cpu_count);
- err = kernel_sysctlbyname(curthread, "hw.ncpu", &cpu_count, &len, NULL,
- 0, NULL, 0);
- if (err) {
- axgbe_error("Unable to fetch number of cpus\n");
- cpu_count = 1;
- }
-
if (bus_get_cpus(pdata->dev, INTR_CPUS, sizeof(lcpus), &lcpus) != 0) {
axgbe_error("Unable to fetch CPU list\n");
/* TODO - handle CPU_COPY(&all_cpus, &lcpus); */
}
- DBGPR("ncpu %d intrcpu %d\n", cpu_count, CPU_COUNT(&lcpus));
+ DBGPR("ncpu %d intrcpu %d\n", mp_ncpus, CPU_COUNT(&lcpus));
pdata->tx_ring_count = min(CPU_COUNT(&lcpus), pdata->hw_feat.tx_ch_cnt);
pdata->tx_ring_count = min(pdata->tx_ring_count,
@@ -2380,11 +2363,11 @@ axgbe_if_promisc_set(if_ctx_t ctx, int flags)
axgbe_printf(1, "%s: MAC_PFR 0x%x drv_flags 0x%x if_flags 0x%x\n",
__func__, XGMAC_IOREAD(pdata, MAC_PFR), if_getdrvflags(ifp),
- if_getflags(ifp));
+ flags);
- if (if_getflags(ifp) & IFF_PPROMISC) {
+ if (flags & IFF_PROMISC) {
- axgbe_printf(1, "User requested to enter promisc mode\n");
+ axgbe_printf(1, "Requested to enter promisc mode\n");
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == 1) {
axgbe_printf(1, "Already in promisc mode\n");
@@ -2393,10 +2376,11 @@ axgbe_if_promisc_set(if_ctx_t ctx, int flags)
axgbe_printf(1, "Entering promisc mode\n");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
+ /* Disable VLAN filtering */
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
} else {
- axgbe_printf(1, "User requested to leave promisc mode\n");
+ axgbe_printf(1, "Requested to leave promisc mode\n");
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == 0) {
axgbe_printf(1, "Already not in promisc mode\n");
@@ -2405,6 +2389,7 @@ axgbe_if_promisc_set(if_ctx_t ctx, int flags)
axgbe_printf(1, "Leaving promisc mode\n");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
+ /* Enable VLAN filtering */
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
}
@@ -2430,7 +2415,8 @@ axgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
case IFCOUNTER_OPACKETS:
return (pstats->txframecount_gb);
case IFCOUNTER_OERRORS:
- return (pstats->txframecount_gb - pstats->txframecount_g);
+ return (if_get_counter_default(ifp, cnt) +
+ pstats->txframecount_gb - pstats->txframecount_g);
case IFCOUNTER_IBYTES:
return (pstats->rxoctetcount_gb);
case IFCOUNTER_OBYTES:
diff --git a/sys/dev/axgbe/xgbe-dev.c b/sys/dev/axgbe/xgbe-dev.c
index 39d0dab144a2..48f3f2b587f0 100644
--- a/sys/dev/axgbe/xgbe-dev.c
+++ b/sys/dev/axgbe/xgbe-dev.c
@@ -826,7 +826,7 @@ static int
xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
{
uint32_t crc;
- uint16_t vid;
+ size_t vid;
uint16_t vlan_hash_table = 0;
__le16 vid_le = 0;
@@ -834,14 +834,13 @@ xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
XGMAC_IOREAD(pdata, MAC_VLANHTR));
/* Generate the VLAN Hash Table value */
- for_each_set_bit(vid, pdata->active_vlans, VLAN_NVID) {
-
+ bit_foreach(pdata->active_vlans, VLAN_NVID, vid) {
/* Get the CRC32 value of the VLAN ID */
vid_le = cpu_to_le16(vid);
crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
vlan_hash_table |= (1 << crc);
- axgbe_printf(1, "%s: vid 0x%x vid_le 0x%x crc 0x%x "
+ axgbe_printf(1, "%s: vid 0x%lx vid_le 0x%x crc 0x%x "
"vlan_hash_table 0x%x\n", __func__, vid, vid_le, crc,
vlan_hash_table);
}
@@ -971,7 +970,7 @@ xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
{
unsigned int pr_mode, am_mode;
- pr_mode = ((if_getflags(pdata->netdev) & IFF_PPROMISC) != 0);
+ pr_mode = ((if_getflags(pdata->netdev) & IFF_PROMISC) != 0);
am_mode = ((if_getflags(pdata->netdev) & IFF_ALLMULTI) != 0);
xgbe_set_promiscuous_mode(pdata, pr_mode);
diff --git a/sys/dev/axgbe/xgbe-phy-v2.c b/sys/dev/axgbe/xgbe-phy-v2.c
index d08ed118a8a8..8c6069f83076 100644
--- a/sys/dev/axgbe/xgbe-phy-v2.c
+++ b/sys/dev/axgbe/xgbe-phy-v2.c
@@ -500,7 +500,7 @@ xgbe_phy_i2c_write(struct xgbe_prv_data *pdata, unsigned int target, void *val,
retry = 1;
again:
- /* Write the specfied register */
+ /* Write the specified register */
i2c_op.cmd = XGBE_I2C_CMD_WRITE;
i2c_op.target = target;
i2c_op.len = val_len;
@@ -539,7 +539,7 @@ again1:
retry = 1;
again2:
- /* Read the specfied register */
+ /* Read the specified register */
i2c_op.cmd = XGBE_I2C_CMD_READ;
i2c_op.target = target;
i2c_op.len = val_len;
@@ -3771,8 +3771,6 @@ xgbe_phy_init(struct xgbe_prv_data *pdata)
return (ret);
phy_data = malloc(sizeof(*phy_data), M_AXGBE, M_WAITOK | M_ZERO);
- if (!phy_data)
- return (-ENOMEM);
pdata->phy_data = phy_data;
phy_data->port_mode = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_MODE);
diff --git a/sys/dev/axgbe/xgbe-sysctl.c b/sys/dev/axgbe/xgbe-sysctl.c
index 16523381e1a3..df81b1d7f0ae 100644
--- a/sys/dev/axgbe/xgbe-sysctl.c
+++ b/sys/dev/axgbe/xgbe-sysctl.c
@@ -244,22 +244,6 @@ exit_bad_op(void)
return(-EINVAL);
}
-static inline unsigned
-fls_long(unsigned long l)
-{
-
- if (sizeof(l) == 4)
- return (fls(l));
- return (fls64(l));
-}
-
-static inline __attribute__((const))
-unsigned long __rounddown_pow_of_two(unsigned long n)
-{
-
- return (1UL << (fls_long(n) - 1));
-}
-
static inline int
get_ubuf(struct sysctl_req *req, char *ubuf)
{
@@ -1049,12 +1033,12 @@ sysctl_ringparam_handler(SYSCTL_HANDLER_ARGS)
return (-EINVAL);
}
- rx = __rounddown_pow_of_two(sys_op->rx_pending);
+ rx = rounddown_pow_of_two(sys_op->rx_pending);
if (rx != sys_op->rx_pending)
axgbe_printf(1, "rx ring param rounded to power of 2: %u\n",
rx);
- tx = __rounddown_pow_of_two(sys_op->tx_pending);
+ tx = rounddown_pow_of_two(sys_op->tx_pending);
if (tx != sys_op->tx_pending)
axgbe_printf(1, "tx ring param rounded to power of 2: %u\n",
tx);
diff --git a/sys/dev/axgbe/xgbe_osdep.h b/sys/dev/axgbe/xgbe_osdep.h
index cea8b4797bf9..40c1607b20e3 100644
--- a/sys/dev/axgbe/xgbe_osdep.h
+++ b/sys/dev/axgbe/xgbe_osdep.h
@@ -56,11 +56,6 @@ typedef uint32_t __le32;
#define le32_to_cpu(x) htole32(x)
#define cpu_to_le16(x) htole16(x)
-#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
-
typedef struct mtx spinlock_t;
static inline void
@@ -233,13 +228,6 @@ __ffsl(long mask)
}
static inline int
-fls64(uint64_t mask)
-{
-
- return (flsll(mask));
-}
-
-static inline int
get_bitmask_order(unsigned int count)
{
int order;
@@ -248,65 +236,4 @@ get_bitmask_order(unsigned int count)
return (order); /* We could be slightly more clever with -1 here... */
}
-static inline unsigned long
-find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
-{
- long mask;
- int offs;
- int bit;
- int pos;
-
- if (offset >= size)
- return (size);
- pos = offset / BITS_PER_LONG;
- offs = offset % BITS_PER_LONG;
- bit = BITS_PER_LONG * pos;
- addr += pos;
- if (offs) {
- mask = (*addr) & ~BITMAP_LAST_WORD_MASK(offs);
- if (mask)
- return (bit + __ffsl(mask));
- if (size - bit <= BITS_PER_LONG)
- return (size);
- bit += BITS_PER_LONG;
- addr++;
- }
- for (size -= bit; size >= BITS_PER_LONG;
- size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
- if (*addr == 0)
- continue;
- return (bit + __ffsl(*addr));
- }
- if (size) {
- mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
- if (mask)
- bit += __ffsl(mask);
- else
- bit += size;
- }
- return (bit);
-}
-
-static inline unsigned long
-find_first_bit(const unsigned long *addr, unsigned long size)
-{
- long mask;
- int bit;
-
- for (bit = 0; size >= BITS_PER_LONG;
- size -= BITS_PER_LONG, bit += BITS_PER_LONG, addr++) {
- if (*addr == 0)
- continue;
- return (bit + __ffsl(*addr));
- }
- if (size) {
- mask = (*addr) & BITMAP_LAST_WORD_MASK(size);
- if (mask)
- bit += __ffsl(mask);
- else
- bit += size;
- }
- return (bit);
-}
-
#endif /* _XGBE_OSDEP_H_ */
diff --git a/sys/dev/bce/if_bce.c b/sys/dev/bce/if_bce.c
index 33158b75486c..6cf39e035ea6 100644
--- a/sys/dev/bce/if_bce.c
+++ b/sys/dev/bce/if_bce.c
@@ -671,7 +671,6 @@ bce_probe(device_t dev)
{
const struct bce_type *t;
struct bce_softc *sc;
- char *descbuf;
u16 vid = 0, did = 0, svid = 0, sdid = 0;
t = bce_devs;
@@ -695,19 +694,10 @@ bce_probe(device_t dev)
if ((vid == t->bce_vid) && (did == t->bce_did) &&
((svid == t->bce_svid) || (t->bce_svid == PCI_ANY_ID)) &&
((sdid == t->bce_sdid) || (t->bce_sdid == PCI_ANY_ID))) {
- descbuf = malloc(BCE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
-
- if (descbuf == NULL)
- return(ENOMEM);
-
- /* Print out the device identity. */
- snprintf(descbuf, BCE_DEVDESC_MAX, "%s (%c%d)",
+ device_set_descf(dev, "%s (%c%d)",
t->bce_name, (((pci_read_config(dev,
PCIR_REVID, 4) & 0xf0) >> 4) + 'A'),
(pci_read_config(dev, PCIR_REVID, 4) & 0xf));
-
- device_set_desc_copy(dev, descbuf);
- free(descbuf, M_TEMP);
return(BUS_PROBE_DEFAULT);
}
t++;
@@ -1231,7 +1221,7 @@ bce_attach(device_t dev)
sc->bce_bc_ver[j++] = '.';
}
- /* Check if any management firwmare is enabled. */
+ /* Check if any management firmware is enabled. */
val = bce_shmem_rd(sc, BCE_PORT_FEATURE);
if (val & BCE_PORT_FEATURE_ASF_ENABLED) {
sc->bce_flags |= BCE_MFW_ENABLE_FLAG;
@@ -1361,12 +1351,6 @@ bce_attach(device_t dev)
/* Allocate an ifnet structure. */
ifp = sc->bce_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- BCE_PRINTF("%s(%d): Interface allocation failed!\n",
- __FILE__, __LINE__);
- rc = ENXIO;
- goto bce_attach_fail;
- }
/* Initialize the ifnet interface. */
if_setsoftc(ifp, sc);
@@ -1560,7 +1544,6 @@ bce_detach(device_t dev)
ifmedia_removeall(&sc->bce_ifmedia);
else {
bus_generic_detach(dev);
- device_delete_child(dev, sc->bce_miibus);
}
/* Release all remaining resources. */
@@ -1611,7 +1594,7 @@ bce_shutdown(device_t dev)
static u32
bce_reg_rd(struct bce_softc *sc, u32 offset)
{
- u32 val = REG_RD(sc, offset);
+ u32 val = bus_space_read_4(sc->bce_btag, sc->bce_bhandle, offset);
DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
__FUNCTION__, offset, val);
return val;
@@ -1628,7 +1611,7 @@ bce_reg_wr16(struct bce_softc *sc, u32 offset, u16 val)
{
DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%04X\n",
__FUNCTION__, offset, val);
- REG_WR16(sc, offset, val);
+ bus_space_write_2(sc->bce_btag, sc->bce_bhandle, offset, val);
}
/****************************************************************************/
@@ -1642,7 +1625,7 @@ bce_reg_wr(struct bce_softc *sc, u32 offset, u32 val)
{
DBPRINT(sc, BCE_INSANE_REG, "%s(); offset = 0x%08X, val = 0x%08X\n",
__FUNCTION__, offset, val);
- REG_WR(sc, offset, val);
+ bus_space_write_4(sc->bce_btag, sc->bce_bhandle, offset, val);
}
#endif
@@ -5797,7 +5780,7 @@ bce_fill_rx_chain(struct bce_softc *sc)
/* We should never end up pointing to a next page pointer. */
DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
BCE_PRINTF("%s(): Invalid rx_prod value: 0x%04X\n",
- __FUNCTION__, rx_prod));
+ __FUNCTION__, sc->rx_prod));
/* Write the mailbox and tell the chip about the waiting rx_bd's. */
REG_WR16(sc, MB_GET_CID_ADDR(RX_CID) + BCE_L2MQ_RX_HOST_BDIDX, prod);
@@ -5961,7 +5944,7 @@ bce_fill_pg_chain(struct bce_softc *sc)
DBRUNIF(((prod & USABLE_RX_BD_PER_PAGE) == USABLE_RX_BD_PER_PAGE),
BCE_PRINTF("%s(): Invalid pg_prod value: 0x%04X\n",
- __FUNCTION__, pg_prod));
+ __FUNCTION__, sc->pg_prod));
/*
* Write the mailbox and tell the chip about
@@ -9047,7 +9030,7 @@ bce_add_sysctls(struct bce_softc *sc)
CTLFLAG_RW, &bootcode_running_failure_sim_control,
0, "Debug control to force bootcode running failures");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_U16(ctx, children, OID_AUTO,
"rx_low_watermark",
CTLFLAG_RD, &sc->rx_low_watermark,
0, "Lowest level of free rx_bd's");
@@ -9057,7 +9040,7 @@ bce_add_sysctls(struct bce_softc *sc)
CTLFLAG_RD, &sc->rx_empty_count,
"Number of times the RX chain was empty");
- SYSCTL_ADD_INT(ctx, children, OID_AUTO,
+ SYSCTL_ADD_U16(ctx, children, OID_AUTO,
"tx_hi_watermark",
CTLFLAG_RD, &sc->tx_hi_watermark,
0, "Highest level of used tx_bd's");
@@ -11099,7 +11082,7 @@ bce_dump_rxp_state(struct bce_softc *sc, int regs)
for (int i = BCE_RXP_CPU_MODE; i < 0xe8fff; i += 0x10) {
/* Skip the big blank sapces */
- if (i < 0xc5400 && i > 0xdffff)
+ if (i < 0xc5400 || i > 0xdffff)
BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
"0x%08X 0x%08X\n", i,
REG_RD_IND(sc, i),
@@ -11217,7 +11200,7 @@ bce_dump_cp_state(struct bce_softc *sc, int regs)
for (int i = BCE_CP_CPU_MODE; i < 0x1aa000; i += 0x10) {
/* Skip the big blank spaces */
- if (i < 0x185400 && i > 0x19ffff)
+ if (i < 0x185400 || i > 0x19ffff)
BCE_PRINTF("0x%04X: 0x%08X 0x%08X "
"0x%08X 0x%08X\n", i,
REG_RD_IND(sc, i),
diff --git a/sys/dev/beri/beri_mem.c b/sys/dev/beri/beri_mem.c
deleted file mode 100644
index cc9dba34b494..000000000000
--- a/sys/dev/beri/beri_mem.c
+++ /dev/null
@@ -1,181 +0,0 @@
-/*-
- * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/*
- * BERI memory interface.
- */
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/bus.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/malloc.h>
-#include <sys/rman.h>
-#include <sys/timeet.h>
-#include <sys/timetc.h>
-#include <sys/conf.h>
-#include <sys/uio.h>
-
-#include <dev/fdt/fdt_common.h>
-#include <dev/ofw/openfirm.h>
-#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
-
-#include <machine/bus.h>
-#include <machine/fdt.h>
-#include <machine/cpu.h>
-#include <machine/intr.h>
-
-struct beri_mem_softc {
- struct resource *res[1];
- struct cdev *mem_cdev;
- device_t dev;
- int mem_size;
- int mem_start;
-};
-
-static struct resource_spec beri_mem_spec[] = {
- { SYS_RES_MEMORY, 0, RF_ACTIVE },
- { -1, 0 }
-};
-
-static int
-mem_open(struct cdev *dev, int flags __unused,
- int fmt __unused, struct thread *td __unused)
-{
- struct beri_mem_softc *sc;
-
- sc = dev->si_drv1;
-
- return (0);
-}
-
-static int
-mem_close(struct cdev *dev, int flags __unused,
- int fmt __unused, struct thread *td __unused)
-{
- struct beri_mem_softc *sc;
-
- sc = dev->si_drv1;
-
- return (0);
-}
-
-static int
-mem_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
- struct thread *td)
-{
-
- return (0);
-}
-
-static int
-mem_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot,
- vm_memattr_t *memattr)
-{
- struct beri_mem_softc *sc;
-
- sc = dev->si_drv1;
-
- if (offset < sc->mem_size) {
- *paddr = sc->mem_start + offset;
- return (0);
- }
-
- return (EINVAL);
-}
-
-static struct cdevsw mem_cdevsw = {
- .d_version = D_VERSION,
- .d_open = mem_open,
- .d_close = mem_close,
- .d_ioctl = mem_ioctl,
- .d_mmap = mem_mmap,
- .d_name = "BERI memory",
-};
-
-static int
-beri_mem_probe(device_t dev)
-{
-
- if (!ofw_bus_status_okay(dev))
- return (ENXIO);
-
- if (!ofw_bus_is_compatible(dev, "sri-cambridge,beri-mem"))
- return (ENXIO);
-
- device_set_desc(dev, "BERI memory");
- return (BUS_PROBE_DEFAULT);
-}
-
-static int
-beri_mem_attach(device_t dev)
-{
- struct beri_mem_softc *sc;
-
- sc = device_get_softc(dev);
- sc->dev = dev;
-
- if (bus_alloc_resources(dev, beri_mem_spec, sc->res)) {
- device_printf(dev, "could not allocate resources\n");
- return (ENXIO);
- }
-
- /* Memory info */
- sc->mem_size = rman_get_size(sc->res[0]);
- sc->mem_start = rman_get_start(sc->res[0]);
-
- sc->mem_cdev = make_dev(&mem_cdevsw, 0, UID_ROOT, GID_WHEEL,
- 0600, "beri_mem");
-
- if (sc->mem_cdev == NULL) {
- device_printf(dev, "Failed to create character device.\n");
- return (ENXIO);
- }
-
- sc->mem_cdev->si_drv1 = sc;
-
- return (0);
-}
-
-static device_method_t beri_mem_methods[] = {
- DEVMETHOD(device_probe, beri_mem_probe),
- DEVMETHOD(device_attach, beri_mem_attach),
- { 0, 0 }
-};
-
-static driver_t beri_mem_driver = {
- "beri_mem",
- beri_mem_methods,
- sizeof(struct beri_mem_softc),
-};
-
-DRIVER_MODULE(beri_mem, simplebus, beri_mem_driver, 0, 0);
diff --git a/sys/dev/beri/beri_ring.c b/sys/dev/beri/beri_ring.c
deleted file mode 100644
index 5ff0a74d8b07..000000000000
--- a/sys/dev/beri/beri_ring.c
+++ /dev/null
@@ -1,524 +0,0 @@
-/*-
- * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/*
- * SRI-Cambridge BERI soft processor <-> ARM core ring buffer.
- */
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/bus.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/malloc.h>
-#include <sys/rman.h>
-#include <sys/timeet.h>
-#include <sys/timetc.h>
-#include <sys/conf.h>
-#include <sys/uio.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/event.h>
-#include <sys/selinfo.h>
-
-#include <dev/fdt/fdt_common.h>
-#include <dev/ofw/openfirm.h>
-#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
-
-#include <machine/bus.h>
-#include <machine/fdt.h>
-#include <machine/cpu.h>
-#include <machine/intr.h>
-
-#define READ4(_sc, _reg) \
- bus_read_4((_sc)->res[0], _reg)
-#define WRITE4(_sc, _reg, _val) \
- bus_write_4((_sc)->res[0], _reg, _val)
-
-#define CDES_INT_EN (1 << 15)
-#define CDES_CAUSE_MASK 0x3
-#define CDES_CAUSE_SHIFT 13
-#define DEVNAME_MAXLEN 256
-
-typedef struct
-{
- uint16_t cdes;
- uint16_t interrupt_level;
- uint16_t in;
- uint16_t out;
-} control_reg_t;
-
-struct beri_softc {
- struct resource *res[3];
- bus_space_tag_t bst;
- bus_space_handle_t bsh;
- struct cdev *cdev;
- device_t dev;
- void *read_ih;
- void *write_ih;
- struct selinfo beri_rsel;
- struct mtx beri_mtx;
- int opened;
-
- char devname[DEVNAME_MAXLEN];
- int control_read;
- int control_write;
- int data_read;
- int data_write;
- int data_size;
-};
-
-static struct resource_spec beri_spec[] = {
- { SYS_RES_MEMORY, 0, RF_ACTIVE },
- { SYS_RES_IRQ, 0, RF_ACTIVE },
- { SYS_RES_IRQ, 1, RF_ACTIVE },
- { -1, 0 }
-};
-
-static control_reg_t
-get_control_reg(struct beri_softc *sc, int dir)
-{
- uint32_t offset;
- uint16_t dst[4];
- control_reg_t c;
- uint16_t *cp;
- int i;
-
- cp = (uint16_t *)&c;
-
- offset = dir ? sc->control_write : sc->control_read;
- ((uint32_t *)dst)[0] = READ4(sc, offset);
- ((uint32_t *)dst)[1] = READ4(sc, offset + 4);
-
- for (i = 0; i < 4; i++)
- cp[i] = dst[3 - i];
-
- return (c);
-}
-
-static void
-set_control_reg(struct beri_softc *sc, int dir, control_reg_t *c)
-{
- uint32_t offset;
- uint16_t src[4];
- uint16_t *cp;
- int i;
-
- cp = (uint16_t *)c;
-
- for (i = 0; i < 4; i++)
- src[3 - i] = cp[i];
-
- offset = dir ? sc->control_write : sc->control_read;
- WRITE4(sc, offset + 0, ((uint32_t *)src)[0]);
- WRITE4(sc, offset + 4, ((uint32_t *)src)[1]);
-}
-
-static int
-get_stock(struct beri_softc *sc, int dir, control_reg_t *c)
-{
- uint32_t fill;
-
- fill = (c->in - c->out + sc->data_size) % sc->data_size;
-
- if (dir)
- return (sc->data_size - fill - 1);
- else
- return (fill);
-}
-
-static void
-beri_intr_write(void *arg)
-{
- struct beri_softc *sc;
- control_reg_t c;
-
- sc = arg;
-
- c = get_control_reg(sc, 1);
- if (c.cdes & CDES_INT_EN) {
- c.cdes &= ~(CDES_INT_EN);
- set_control_reg(sc, 1, &c);
- }
-
- mtx_lock(&sc->beri_mtx);
- selwakeuppri(&sc->beri_rsel, PZERO + 1);
- KNOTE_LOCKED(&sc->beri_rsel.si_note, 0);
- mtx_unlock(&sc->beri_mtx);
-}
-
-static void
-beri_intr_read(void *arg)
-{
- struct beri_softc *sc;
- control_reg_t c;
-
- sc = arg;
-
- c = get_control_reg(sc, 0);
- if (c.cdes & CDES_INT_EN) {
- c.cdes &= ~(CDES_INT_EN);
- set_control_reg(sc, 0, &c);
- }
-
- mtx_lock(&sc->beri_mtx);
- selwakeuppri(&sc->beri_rsel, PZERO + 1);
- KNOTE_LOCKED(&sc->beri_rsel.si_note, 0);
- mtx_unlock(&sc->beri_mtx);
-}
-
-static int
-beri_open(struct cdev *dev, int flags __unused,
- int fmt __unused, struct thread *td __unused)
-{
- struct beri_softc *sc;
- control_reg_t c;
-
- sc = dev->si_drv1;
-
- if (sc->opened)
- return (1);
-
- /* Setup interrupt handlers */
- if (bus_setup_intr(sc->dev, sc->res[1], INTR_TYPE_BIO | INTR_MPSAFE,
- NULL, beri_intr_read, sc, &sc->read_ih)) {
- device_printf(sc->dev, "Unable to setup read intr\n");
- return (1);
- }
- if (bus_setup_intr(sc->dev, sc->res[2], INTR_TYPE_BIO | INTR_MPSAFE,
- NULL, beri_intr_write, sc, &sc->write_ih)) {
- device_printf(sc->dev, "Unable to setup write intr\n");
- return (1);
- }
-
- sc->opened = 1;
-
- /* Clear write buffer */
- c = get_control_reg(sc, 1);
- c.in = c.out;
- c.cdes = 0;
- set_control_reg(sc, 1, &c);
-
- /* Clear read buffer */
- c = get_control_reg(sc, 0);
- c.out = c.in;
- c.cdes = 0;
- set_control_reg(sc, 0, &c);
-
- return (0);
-}
-
-static int
-beri_close(struct cdev *dev, int flags __unused,
- int fmt __unused, struct thread *td __unused)
-{
- struct beri_softc *sc;
-
- sc = dev->si_drv1;
-
- if (sc->opened) {
- sc->opened = 0;
-
- /* Unsetup interrupt handlers */
- bus_teardown_intr(sc->dev, sc->res[1], sc->read_ih);
- bus_teardown_intr(sc->dev, sc->res[2], sc->write_ih);
- }
-
- return (0);
-}
-
-static int
-beri_rdwr(struct cdev *dev, struct uio *uio, int ioflag)
-{
- struct beri_softc *sc;
- uint32_t offset;
- control_reg_t c;
- uint16_t *ptr;
- uint8_t *dst;
- int stock;
- int dir;
- int amount;
- int count;
-
- sc = dev->si_drv1;
-
- dir = uio->uio_rw ? 1 : 0;
-
- c = get_control_reg(sc, dir);
- stock = get_stock(sc, dir, &c);
- if (stock < uio->uio_resid) {
- device_printf(sc->dev, "Err: no data/space available\n");
- return (1);
- }
-
- amount = uio->uio_resid;
- ptr = dir ? &c.in : &c.out;
- count = (sc->data_size - *ptr);
-
- offset = dir ? sc->data_write : sc->data_read;
- dst = (uint8_t *)(sc->bsh + offset);
-
- if (amount <= count) {
- uiomove(dst + *ptr, amount, uio);
- } else {
- uiomove(dst + *ptr, count, uio);
- uiomove(dst, (amount - count), uio);
- }
-
- *ptr = (*ptr + amount) % sc->data_size;
- set_control_reg(sc, dir, &c);
-
- return (0);
-}
-
-static int
-beri_kqread(struct knote *kn, long hint)
-{
- struct beri_softc *sc;
- control_reg_t c;
- int stock;
-
- sc = kn->kn_hook;
-
- c = get_control_reg(sc, 0);
- stock = get_stock(sc, 0, &c);
- if (stock) {
- kn->kn_data = stock;
- return (1);
- }
-
- kn->kn_data = 0;
-
- /* Wait at least one new byte in buffer */
- c.interrupt_level = 1;
-
- /* Enable interrupts */
- c.cdes |= (CDES_INT_EN);
- set_control_reg(sc, 0, &c);
-
- return (0);
-}
-
-static int
-beri_kqwrite(struct knote *kn, long hint)
-{
- struct beri_softc *sc;
- control_reg_t c;
- int stock;
-
- sc = kn->kn_hook;
-
- c = get_control_reg(sc, 1);
- stock = get_stock(sc, 1, &c);
- if (stock) {
- kn->kn_data = stock;
- return (1);
- }
-
- kn->kn_data = 0;
-
- /* Wait at least one free position in buffer */
- c.interrupt_level = sc->data_size - 2;
-
- /* Enable interrupts */
- c.cdes |= (CDES_INT_EN);
- set_control_reg(sc, 1, &c);
-
- return (0);
-}
-
-static void
-beri_kqdetach(struct knote *kn)
-{
- struct beri_softc *sc;
-
- sc = kn->kn_hook;
-
- knlist_remove(&sc->beri_rsel.si_note, kn, 0);
-}
-
-static struct filterops beri_read_filterops = {
- .f_isfd = 1,
- .f_attach = NULL,
- .f_detach = beri_kqdetach,
- .f_event = beri_kqread,
-};
-
-static struct filterops beri_write_filterops = {
- .f_isfd = 1,
- .f_attach = NULL,
- .f_detach = beri_kqdetach,
- .f_event = beri_kqwrite,
-};
-
-static int
-beri_kqfilter(struct cdev *dev, struct knote *kn)
-{
- struct beri_softc *sc;
-
- sc = dev->si_drv1;
-
- switch(kn->kn_filter) {
- case EVFILT_READ:
- kn->kn_fop = &beri_read_filterops;
- break;
- case EVFILT_WRITE:
- kn->kn_fop = &beri_write_filterops;
- break;
- default:
- return(EINVAL);
- }
-
- kn->kn_hook = sc;
- knlist_add(&sc->beri_rsel.si_note, kn, 0);
-
- return (0);
-}
-
-static struct cdevsw beri_cdevsw = {
- .d_version = D_VERSION,
- .d_open = beri_open,
- .d_close = beri_close,
- .d_write = beri_rdwr,
- .d_read = beri_rdwr,
- .d_kqfilter = beri_kqfilter,
- .d_name = "beri ring buffer",
-};
-
-static int
-parse_fdt(struct beri_softc *sc)
-{
- pcell_t dts_value[0];
- phandle_t node;
- int len;
-
- if ((node = ofw_bus_get_node(sc->dev)) == -1)
- return (ENXIO);
-
- /* get device name */
- if (OF_getprop(ofw_bus_get_node(sc->dev), "device_name",
- &sc->devname, sizeof(sc->devname)) <= 0) {
- device_printf(sc->dev, "Can't get device_name\n");
- return (ENXIO);
- }
-
- if ((len = OF_getproplen(node, "data_size")) <= 0)
- return (ENXIO);
- OF_getencprop(node, "data_size", dts_value, len);
- sc->data_size = dts_value[0];
-
- if ((len = OF_getproplen(node, "data_read")) <= 0)
- return (ENXIO);
- OF_getencprop(node, "data_read", dts_value, len);
- sc->data_read = dts_value[0];
-
- if ((len = OF_getproplen(node, "data_write")) <= 0)
- return (ENXIO);
- OF_getencprop(node, "data_write", dts_value, len);
- sc->data_write = dts_value[0];
-
- if ((len = OF_getproplen(node, "control_read")) <= 0)
- return (ENXIO);
- OF_getencprop(node, "control_read", dts_value, len);
- sc->control_read = dts_value[0];
-
- if ((len = OF_getproplen(node, "control_write")) <= 0)
- return (ENXIO);
- OF_getencprop(node, "control_write", dts_value, len);
- sc->control_write = dts_value[0];
-
- return (0);
-}
-
-static int
-beri_probe(device_t dev)
-{
-
- if (!ofw_bus_status_okay(dev))
- return (ENXIO);
-
- if (!ofw_bus_is_compatible(dev, "sri-cambridge,beri-ring"))
- return (ENXIO);
-
- device_set_desc(dev, "SRI-Cambridge BERI ring buffer");
- return (BUS_PROBE_DEFAULT);
-}
-
-static int
-beri_attach(device_t dev)
-{
- struct beri_softc *sc;
-
- sc = device_get_softc(dev);
- sc->dev = dev;
-
- if (bus_alloc_resources(dev, beri_spec, sc->res)) {
- device_printf(dev, "could not allocate resources\n");
- return (ENXIO);
- }
-
- /* Memory interface */
- sc->bst = rman_get_bustag(sc->res[0]);
- sc->bsh = rman_get_bushandle(sc->res[0]);
-
- if (parse_fdt(sc)) {
- device_printf(sc->dev, "Can't get FDT values\n");
- return (ENXIO);
- }
-
- sc->cdev = make_dev(&beri_cdevsw, 0, UID_ROOT, GID_WHEEL,
- S_IRWXU, "%s", sc->devname);
- if (sc->cdev == NULL) {
- device_printf(dev, "Failed to create character device.\n");
- return (ENXIO);
- }
-
- sc->cdev->si_drv1 = sc;
-
- mtx_init(&sc->beri_mtx, "beri_mtx", NULL, MTX_DEF);
- knlist_init_mtx(&sc->beri_rsel.si_note, &sc->beri_mtx);
-
- return (0);
-}
-
-static device_method_t beri_methods[] = {
- DEVMETHOD(device_probe, beri_probe),
- DEVMETHOD(device_attach, beri_attach),
- { 0, 0 }
-};
-
-static driver_t beri_driver = {
- "beri_ring",
- beri_methods,
- sizeof(struct beri_softc),
-};
-
-DRIVER_MODULE(beri_ring, simplebus, beri_driver, 0, 0);
diff --git a/sys/dev/beri/virtio/network/if_vtbe.c b/sys/dev/beri/virtio/network/if_vtbe.c
deleted file mode 100644
index de1a8ebaac97..000000000000
--- a/sys/dev/beri/virtio/network/if_vtbe.c
+++ /dev/null
@@ -1,648 +0,0 @@
-/*-
- * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/*
- * BERI Virtio Networking Frontend
- */
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/bus.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/malloc.h>
-#include <sys/rman.h>
-#include <sys/timeet.h>
-#include <sys/timetc.h>
-#include <sys/endian.h>
-#include <sys/lock.h>
-#include <sys/mbuf.h>
-#include <sys/mutex.h>
-#include <sys/socket.h>
-#include <sys/sockio.h>
-#include <sys/sysctl.h>
-#include <sys/mdioctl.h>
-#include <sys/conf.h>
-#include <sys/stat.h>
-#include <sys/uio.h>
-
-#include <dev/fdt/fdt_common.h>
-#include <dev/ofw/openfirm.h>
-#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
-
-#include <net/bpf.h>
-#include <net/if.h>
-#include <net/ethernet.h>
-#include <net/if_dl.h>
-#include <net/if_media.h>
-#include <net/if_types.h>
-#include <net/if_var.h>
-#include <net/if_vlan_var.h>
-
-#include <netinet/in.h>
-#include <netinet/udp.h>
-#include <netinet/tcp.h>
-
-#include <machine/bus.h>
-#include <machine/fdt.h>
-#include <machine/cpu.h>
-#include <machine/intr.h>
-
-#include <dev/beri/virtio/virtio.h>
-#include <dev/beri/virtio/virtio_mmio_platform.h>
-
-#include <dev/altera/pio/pio.h>
-
-#include <dev/virtio/mmio/virtio_mmio.h>
-#include <dev/virtio/network/virtio_net.h>
-#include <dev/virtio/virtio_ids.h>
-#include <dev/virtio/virtio_config.h>
-#include <dev/virtio/virtio_ring.h>
-
-#include "pio_if.h"
-
-#define DPRINTF(fmt, args...) printf(fmt, ##args)
-
-#define READ4(_sc, _reg) \
- bus_read_4((_sc)->res[0], _reg)
-#define WRITE4(_sc, _reg, _val) \
- bus_write_4((_sc)->res[0], _reg, _val)
-
-#define VTBE_LOCK(sc) mtx_lock(&(sc)->mtx)
-#define VTBE_UNLOCK(sc) mtx_unlock(&(sc)->mtx)
-#define VTBE_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED);
-#define VTBE_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED);
-
-/*
- * Driver data and defines.
- */
-#define DESC_COUNT 256
-
-struct vtbe_softc {
- struct resource *res[2];
- bus_space_tag_t bst;
- bus_space_handle_t bsh;
- device_t dev;
- if_t ifp;
- int if_flags;
- struct mtx mtx;
- boolean_t is_attached;
-
- int beri_mem_offset;
- device_t pio_send;
- device_t pio_recv;
- int opened;
-
- struct vqueue_info vs_queues[2];
- int vs_curq;
- int hdrsize;
-};
-
-static struct resource_spec vtbe_spec[] = {
- { SYS_RES_MEMORY, 0, RF_ACTIVE },
- { -1, 0 }
-};
-
-static void vtbe_txfinish_locked(struct vtbe_softc *sc);
-static void vtbe_rxfinish_locked(struct vtbe_softc *sc);
-static void vtbe_stop_locked(struct vtbe_softc *sc);
-static int pio_enable_irq(struct vtbe_softc *sc, int enable);
-
-static void
-vtbe_txstart_locked(struct vtbe_softc *sc)
-{
- struct iovec iov[DESC_COUNT];
- struct virtio_net_hdr *vnh;
- struct vqueue_info *vq;
- struct iovec *tiov;
- if_t ifp;
- struct mbuf *m;
- struct uio uio;
- int enqueued;
- int iolen;
- int error;
- int reg;
- int len;
- int n;
-
- VTBE_ASSERT_LOCKED(sc);
-
- /* RX queue */
- vq = &sc->vs_queues[0];
- if (!vq_has_descs(vq)) {
- return;
- }
-
- ifp = sc->ifp;
- if (if_getdrvflags(ifp) & IFF_DRV_OACTIVE) {
- return;
- }
-
- enqueued = 0;
-
- if (!vq_ring_ready(vq))
- return;
-
- vq->vq_save_used = be16toh(vq->vq_used->idx);
-
- for (;;) {
- if (!vq_has_descs(vq)) {
- if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
- break;
- }
-
- m = if_dequeue(ifp);
- if (m == NULL) {
- break;
- }
-
- n = vq_getchain(sc->beri_mem_offset, vq, iov,
- DESC_COUNT, NULL);
- KASSERT(n == 2,
- ("Unexpected amount of descriptors (%d)", n));
-
- tiov = getcopy(iov, n);
- vnh = iov[0].iov_base;
- memset(vnh, 0, sc->hdrsize);
-
- len = iov[1].iov_len;
- uio.uio_resid = len;
- uio.uio_iov = &tiov[1];
- uio.uio_segflg = UIO_SYSSPACE;
- uio.uio_iovcnt = 1;
- uio.uio_offset = 0;
- uio.uio_rw = UIO_READ;
-
- error = m_mbuftouio(&uio, m, 0);
- if (error)
- panic("m_mbuftouio failed\n");
-
- iolen = (len - uio.uio_resid + sc->hdrsize);
-
- free(tiov, M_DEVBUF);
- vq_relchain(vq, iov, n, iolen);
-
- if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
-
- BPF_MTAP(ifp, m);
- m_freem(m);
-
- ++enqueued;
- }
-
- if (enqueued != 0) {
- reg = htobe32(VIRTIO_MMIO_INT_VRING);
- WRITE4(sc, VIRTIO_MMIO_INTERRUPT_STATUS, reg);
-
- PIO_SET(sc->pio_send, Q_INTR, 1);
- }
-}
-
-static void
-vtbe_txstart(if_t ifp)
-{
- struct vtbe_softc *sc = if_getsoftc(ifp);
-
- VTBE_LOCK(sc);
- vtbe_txstart_locked(sc);
- VTBE_UNLOCK(sc);
-}
-
-static void
-vtbe_stop_locked(struct vtbe_softc *sc)
-{
- if_t ifp;
-
- VTBE_ASSERT_LOCKED(sc);
-
- ifp = sc->ifp;
- if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
-}
-
-static void
-vtbe_init_locked(struct vtbe_softc *sc)
-{
- if_t ifp = sc->ifp;
-
- VTBE_ASSERT_LOCKED(sc);
-
- if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
- return;
-
- if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
-}
-
-static void
-vtbe_init(void *if_softc)
-{
- struct vtbe_softc *sc = if_softc;
-
- VTBE_LOCK(sc);
- vtbe_init_locked(sc);
- VTBE_UNLOCK(sc);
-}
-
-static int
-vtbe_ioctl(if_t ifp, u_long cmd, caddr_t data)
-{
- struct ifmediareq *ifmr;
- struct vtbe_softc *sc;
- struct ifreq *ifr;
- int mask, error;
-
- sc = if_getsoftc(ifp);
- ifr = (struct ifreq *)data;
-
- error = 0;
- switch (cmd) {
- case SIOCSIFFLAGS:
- VTBE_LOCK(sc);
- if (if_getflags(ifp) & IFF_UP) {
- pio_enable_irq(sc, 1);
-
- if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
- vtbe_init_locked(sc);
- }
- } else {
- pio_enable_irq(sc, 0);
-
- if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
- vtbe_stop_locked(sc);
- }
- }
- sc->if_flags = if_getflags(ifp);
- VTBE_UNLOCK(sc);
- break;
- case SIOCADDMULTI:
- case SIOCDELMULTI:
- break;
- case SIOCSIFMEDIA:
- case SIOCGIFMEDIA:
- ifmr = (struct ifmediareq *)data;
- ifmr->ifm_count = 1;
- ifmr->ifm_status = (IFM_AVALID | IFM_ACTIVE);
- ifmr->ifm_active = (IFM_ETHER | IFM_10G_T | IFM_FDX);
- ifmr->ifm_current = ifmr->ifm_active;
- break;
- case SIOCSIFCAP:
- mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap;
- if (mask & IFCAP_VLAN_MTU) {
- if_togglecapenable(ifp, IFCAP_VLAN_MTU);
- }
- break;
-
- case SIOCSIFADDR:
- pio_enable_irq(sc, 1);
- default:
- error = ether_ioctl(ifp, cmd, data);
- break;
- }
-
- return (error);
-}
-
-static void
-vtbe_txfinish_locked(struct vtbe_softc *sc)
-{
- if_t ifp;
-
- VTBE_ASSERT_LOCKED(sc);
-
- ifp = sc->ifp;
-}
-
-static int
-vq_init(struct vtbe_softc *sc)
-{
- struct vqueue_info *vq;
- uint8_t *base;
- int size;
- int reg;
- int pfn;
-
- vq = &sc->vs_queues[sc->vs_curq];
- vq->vq_qsize = DESC_COUNT;
-
- reg = READ4(sc, VIRTIO_MMIO_QUEUE_PFN);
- pfn = be32toh(reg);
- vq->vq_pfn = pfn;
-
- size = vring_size(vq->vq_qsize, VRING_ALIGN);
- base = paddr_map(sc->beri_mem_offset,
- (pfn << PAGE_SHIFT), size);
-
- /* First pages are descriptors */
- vq->vq_desc = (struct vring_desc *)base;
- base += vq->vq_qsize * sizeof(struct vring_desc);
-
- /* Then avail ring */
- vq->vq_avail = (struct vring_avail *)base;
- base += (2 + vq->vq_qsize + 1) * sizeof(uint16_t);
-
- /* Then it's rounded up to the next page */
- base = (uint8_t *)roundup2((uintptr_t)base, VRING_ALIGN);
-
- /* And the last pages are the used ring */
- vq->vq_used = (struct vring_used *)base;
-
- /* Mark queue as allocated, and start at 0 when we use it. */
- vq->vq_flags = VQ_ALLOC;
- vq->vq_last_avail = 0;
-
- return (0);
-}
-
-static void
-vtbe_proc_rx(struct vtbe_softc *sc, struct vqueue_info *vq)
-{
- struct iovec iov[DESC_COUNT];
- struct iovec *tiov;
- if_t ifp;
- struct uio uio;
- struct mbuf *m;
- int iolen;
- int i;
- int n;
-
- ifp = sc->ifp;
-
- n = vq_getchain(sc->beri_mem_offset, vq, iov,
- DESC_COUNT, NULL);
-
- KASSERT(n >= 1 && n <= DESC_COUNT,
- ("wrong n %d", n));
-
- tiov = getcopy(iov, n);
-
- iolen = 0;
- for (i = 1; i < n; i++) {
- iolen += iov[i].iov_len;
- }
-
- uio.uio_resid = iolen;
- uio.uio_iov = &tiov[1];
- uio.uio_segflg = UIO_SYSSPACE;
- uio.uio_iovcnt = (n - 1);
- uio.uio_rw = UIO_WRITE;
-
- if ((m = m_uiotombuf(&uio, M_NOWAIT, 0, ETHER_ALIGN,
- M_PKTHDR)) == NULL) {
- if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
- goto done;
- }
-
- m->m_pkthdr.rcvif = ifp;
-
- if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
-
- CURVNET_SET(if_getvnet(ifp));
- VTBE_UNLOCK(sc);
- if_input(ifp, m);
- VTBE_LOCK(sc);
- CURVNET_RESTORE();
-
-done:
- free(tiov, M_DEVBUF);
- vq_relchain(vq, iov, n, iolen + sc->hdrsize);
-}
-
-static void
-vtbe_rxfinish_locked(struct vtbe_softc *sc)
-{
- struct vqueue_info *vq;
- int reg;
-
- /* TX queue */
- vq = &sc->vs_queues[1];
- if (!vq_ring_ready(vq))
- return;
-
- /* Process new descriptors */
- vq->vq_save_used = be16toh(vq->vq_used->idx);
-
- while (vq_has_descs(vq)) {
- vtbe_proc_rx(sc, vq);
- }
-
- /* Interrupt the other side */
- reg = htobe32(VIRTIO_MMIO_INT_VRING);
- WRITE4(sc, VIRTIO_MMIO_INTERRUPT_STATUS, reg);
-
- PIO_SET(sc->pio_send, Q_INTR, 1);
-}
-
-static void
-vtbe_intr(void *arg)
-{
- struct vtbe_softc *sc;
- int pending;
- uint32_t reg;
-
- sc = arg;
-
- VTBE_LOCK(sc);
-
- reg = PIO_READ(sc->pio_recv);
-
- /* Ack */
- PIO_SET(sc->pio_recv, reg, 0);
-
- pending = htobe32(reg);
- if (pending & Q_SEL) {
- reg = READ4(sc, VIRTIO_MMIO_QUEUE_SEL);
- sc->vs_curq = be32toh(reg);
- }
-
- if (pending & Q_PFN) {
- vq_init(sc);
- }
-
- if (pending & Q_NOTIFY) {
- /* beri rx / arm tx notify */
- vtbe_txfinish_locked(sc);
- }
-
- if (pending & Q_NOTIFY1) {
- vtbe_rxfinish_locked(sc);
- }
-
- VTBE_UNLOCK(sc);
-}
-
-static int
-vtbe_get_hwaddr(struct vtbe_softc *sc, uint8_t *hwaddr)
-{
- int rnd;
-
- /*
- * Generate MAC address, use 'bsd' + random 24 low-order bits.
- */
-
- rnd = arc4random() & 0x00ffffff;
-
- hwaddr[0] = 'b';
- hwaddr[1] = 's';
- hwaddr[2] = 'd';
- hwaddr[3] = rnd >> 16;
- hwaddr[4] = rnd >> 8;
- hwaddr[5] = rnd >> 0;
-
- return (0);
-}
-
-static int
-pio_enable_irq(struct vtbe_softc *sc, int enable)
-{
-
- /*
- * IRQ lines should be disabled while reprogram FPGA core.
- */
-
- if (enable) {
- if (sc->opened == 0) {
- sc->opened = 1;
- PIO_SETUP_IRQ(sc->pio_recv, vtbe_intr, sc);
- }
- } else {
- if (sc->opened == 1) {
- PIO_TEARDOWN_IRQ(sc->pio_recv);
- sc->opened = 0;
- }
- }
-
- return (0);
-}
-
-static int
-vtbe_probe(device_t dev)
-{
-
- if (!ofw_bus_status_okay(dev))
- return (ENXIO);
-
- if (!ofw_bus_is_compatible(dev, "sri-cambridge,beri-vtnet"))
- return (ENXIO);
-
- device_set_desc(dev, "Virtio BERI Ethernet Controller");
- return (BUS_PROBE_DEFAULT);
-}
-
-static int
-vtbe_attach(device_t dev)
-{
- uint8_t macaddr[ETHER_ADDR_LEN];
- struct vtbe_softc *sc;
- if_t ifp;
- int reg;
-
- sc = device_get_softc(dev);
- sc->dev = dev;
-
- sc->hdrsize = sizeof(struct virtio_net_hdr);
-
- if (bus_alloc_resources(dev, vtbe_spec, sc->res)) {
- device_printf(dev, "could not allocate resources\n");
- return (ENXIO);
- }
-
- /* Memory interface */
- sc->bst = rman_get_bustag(sc->res[0]);
- sc->bsh = rman_get_bushandle(sc->res[0]);
-
- mtx_init(&sc->mtx, device_get_nameunit(sc->dev),
- MTX_NETWORK_LOCK, MTX_DEF);
-
- if (setup_offset(dev, &sc->beri_mem_offset) != 0)
- return (ENXIO);
- if (setup_pio(dev, "pio-send", &sc->pio_send) != 0)
- return (ENXIO);
- if (setup_pio(dev, "pio-recv", &sc->pio_recv) != 0)
- return (ENXIO);
-
- /* Setup MMIO */
-
- /* Specify that we provide network device */
- reg = htobe32(VIRTIO_ID_NETWORK);
- WRITE4(sc, VIRTIO_MMIO_DEVICE_ID, reg);
-
- /* The number of desc we support */
- reg = htobe32(DESC_COUNT);
- WRITE4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX, reg);
-
- /* Our features */
- reg = htobe32(VIRTIO_NET_F_MAC |
- VIRTIO_F_NOTIFY_ON_EMPTY);
- WRITE4(sc, VIRTIO_MMIO_HOST_FEATURES, reg);
-
- /* Get MAC */
- if (vtbe_get_hwaddr(sc, macaddr)) {
- device_printf(sc->dev, "can't get mac\n");
- return (ENXIO);
- }
-
- /* Set up the ethernet interface. */
- sc->ifp = ifp = if_alloc(IFT_ETHER);
- if_setbaudrate(ifp, IF_Gbps(10));
- if_setsoftc(ifp, sc);
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX |
- IFF_MULTICAST | IFF_PROMISC);
- if_setcapabilities(ifp, IFCAP_VLAN_MTU);
- if_setcapenable(ifp, if_getcapabilities(ifp));
- if_setstartfn(ifp, vtbe_txstart);
- if_setioctlfn(ifp, vtbe_ioctl);
- if_setinitfn(ifp, vtbe_init);
- if_setsendqlen(ifp, DESC_COUNT - 1);
- if_setsendqready(ifp);
- if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
-
- /* All ready to run, attach the ethernet interface. */
- ether_ifattach(ifp, macaddr);
-
- sc->is_attached = true;
-
- return (0);
-}
-
-static device_method_t vtbe_methods[] = {
- DEVMETHOD(device_probe, vtbe_probe),
- DEVMETHOD(device_attach, vtbe_attach),
- { 0, 0 }
-};
-
-static driver_t vtbe_driver = {
- "vtbe",
- vtbe_methods,
- sizeof(struct vtbe_softc),
-};
-
-DRIVER_MODULE(vtbe, simplebus, vtbe_driver, 0, 0);
-MODULE_DEPEND(vtbe, ether, 1, 1, 1);
diff --git a/sys/dev/beri/virtio/virtio.c b/sys/dev/beri/virtio/virtio.c
deleted file mode 100644
index 60239b335020..000000000000
--- a/sys/dev/beri/virtio/virtio.c
+++ /dev/null
@@ -1,256 +0,0 @@
-/*-
- * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/*
- * BERI virtio mmio backend common methods
- */
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/bus.h>
-#include <sys/cdefs.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/malloc.h>
-#include <sys/rman.h>
-#include <sys/timeet.h>
-#include <sys/timetc.h>
-#include <sys/conf.h>
-#include <sys/uio.h>
-#include <sys/stat.h>
-#include <sys/time.h>
-#include <sys/event.h>
-#include <sys/selinfo.h>
-#include <sys/endian.h>
-#include <sys/rwlock.h>
-
-#include <machine/bus.h>
-#include <machine/fdt.h>
-#include <machine/cpu.h>
-#include <machine/intr.h>
-
-#include <dev/fdt/fdt_common.h>
-#include <dev/ofw/openfirm.h>
-#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
-
-#include <dev/beri/virtio/virtio.h>
-#include <dev/virtio/virtqueue.h>
-#include <dev/virtio/virtio_ring.h>
-#include <dev/altera/pio/pio.h>
-
-#include "pio_if.h"
-
-int
-vq_ring_ready(struct vqueue_info *vq)
-{
-
- return (vq->vq_flags & VQ_ALLOC);
-}
-
-int
-vq_has_descs(struct vqueue_info *vq)
-{
-
- return (vq_ring_ready(vq) && vq->vq_last_avail !=
- be16toh(vq->vq_avail->idx));
-}
-
-void *
-paddr_map(uint32_t offset, uint32_t phys, uint32_t size)
-{
- bus_space_handle_t bsh;
-
- if (bus_space_map(fdtbus_bs_tag, (phys + offset),
- size, 0, &bsh) != 0) {
- panic("Couldn't map 0x%08x\n", (phys + offset));
- }
-
- return (void *)(bsh);
-}
-
-void
-paddr_unmap(void *phys, uint32_t size)
-{
-
- bus_space_unmap(fdtbus_bs_tag, (bus_space_handle_t)phys, size);
-}
-
-static inline void
-_vq_record(uint32_t offs, int i, volatile struct vring_desc *vd,
- struct iovec *iov, int n_iov, uint16_t *flags) {
- if (i >= n_iov)
- return;
-
- iov[i].iov_base = paddr_map(offs, be64toh(vd->addr),
- be32toh(vd->len));
- iov[i].iov_len = be32toh(vd->len);
- if (flags != NULL)
- flags[i] = be16toh(vd->flags);
-}
-
-int
-vq_getchain(uint32_t offs, struct vqueue_info *vq,
- struct iovec *iov, int n_iov, uint16_t *flags)
-{
- volatile struct vring_desc *vdir, *vindir, *vp;
- int idx, ndesc, n_indir;
- int head, next;
- int i;
-
- idx = vq->vq_last_avail;
- ndesc = (be16toh(vq->vq_avail->idx) - idx);
- if (ndesc == 0)
- return (0);
-
- head = be16toh(vq->vq_avail->ring[idx & (vq->vq_qsize - 1)]);
- next = head;
-
- for (i = 0; i < VQ_MAX_DESCRIPTORS; next = be16toh(vdir->next)) {
- vdir = &vq->vq_desc[next];
- if ((be16toh(vdir->flags) & VRING_DESC_F_INDIRECT) == 0) {
- _vq_record(offs, i, vdir, iov, n_iov, flags);
- i++;
- } else {
- n_indir = be32toh(vdir->len) / 16;
- vindir = paddr_map(offs, be64toh(vdir->addr),
- be32toh(vdir->len));
- next = 0;
- for (;;) {
- vp = &vindir[next];
- _vq_record(offs, i, vp, iov, n_iov, flags);
- i+=1;
- if ((be16toh(vp->flags) & \
- VRING_DESC_F_NEXT) == 0)
- break;
- next = be16toh(vp->next);
- }
- paddr_unmap(__DEVOLATILE(void *, vindir), be32toh(vdir->len));
- }
-
- if ((be16toh(vdir->flags) & VRING_DESC_F_NEXT) == 0)
- return (i);
- }
-
- return (i);
-}
-
-void
-vq_relchain(struct vqueue_info *vq, struct iovec *iov, int n, uint32_t iolen)
-{
- volatile struct vring_used_elem *vue;
- volatile struct vring_used *vu;
- uint16_t head, uidx, mask;
- int i;
-
- mask = vq->vq_qsize - 1;
- vu = vq->vq_used;
- head = be16toh(vq->vq_avail->ring[vq->vq_last_avail++ & mask]);
-
- uidx = be16toh(vu->idx);
- vue = &vu->ring[uidx++ & mask];
- vue->id = htobe32(head);
-
- vue->len = htobe32(iolen);
- vu->idx = htobe16(uidx);
-
- /* Clean up */
- for (i = 0; i < n; i++) {
- paddr_unmap((void *)iov[i].iov_base, iov[i].iov_len);
- }
-}
-
-int
-setup_pio(device_t dev, char *name, device_t *pio_dev)
-{
- phandle_t pio_node;
- struct fdt_ic *ic;
- phandle_t xref;
- phandle_t node;
-
- if ((node = ofw_bus_get_node(dev)) == -1)
- return (ENXIO);
-
- if (OF_searchencprop(node, name, &xref,
- sizeof(xref)) == -1) {
- return (ENXIO);
- }
-
- pio_node = OF_node_from_xref(xref);
- SLIST_FOREACH(ic, &fdt_ic_list_head, fdt_ics) {
- if (ic->iph == pio_node) {
- *pio_dev = ic->dev;
- return (0);
- }
- }
-
- return (ENXIO);
-}
-
-int
-setup_offset(device_t dev, uint32_t *offset)
-{
- pcell_t dts_value[2];
- phandle_t mem_node;
- phandle_t xref;
- phandle_t node;
- int len;
-
- if ((node = ofw_bus_get_node(dev)) == -1)
- return (ENXIO);
-
- if (OF_searchencprop(node, "beri-mem", &xref,
- sizeof(xref)) == -1) {
- return (ENXIO);
- }
-
- mem_node = OF_node_from_xref(xref);
- if ((len = OF_getproplen(mem_node, "reg")) <= 0)
- return (ENXIO);
- OF_getencprop(mem_node, "reg", dts_value, len);
- *offset = dts_value[0];
-
- return (0);
-}
-
-struct iovec *
-getcopy(struct iovec *iov, int n)
-{
- struct iovec *tiov;
- int i;
-
- tiov = malloc(n * sizeof(struct iovec), M_DEVBUF, M_NOWAIT);
- for (i = 0; i < n; i++) {
- tiov[i].iov_base = iov[i].iov_base;
- tiov[i].iov_len = iov[i].iov_len;
- }
-
- return (tiov);
-}
diff --git a/sys/dev/beri/virtio/virtio.h b/sys/dev/beri/virtio/virtio.h
deleted file mode 100644
index fe142c1f25fd..000000000000
--- a/sys/dev/beri/virtio/virtio.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/*-
- * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#define READ2(_sc, _reg) \
- bus_read_2((_sc)->res[0], _reg)
-#define READ4(_sc, _reg) \
- bus_read_4((_sc)->res[0], _reg)
-#define WRITE2(_sc, _reg, _val) \
- bus_write_2((_sc)->res[0], _reg, _val)
-#define WRITE4(_sc, _reg, _val) \
- bus_write_4((_sc)->res[0], _reg, _val)
-
-#define PAGE_SHIFT 12
-#define VRING_ALIGN 4096
-
-#define VQ_ALLOC 0x01 /* set once we have a pfn */
-#define VQ_MAX_DESCRIPTORS 512
-
-struct vqueue_info {
- uint16_t vq_qsize; /* size of this queue (a power of 2) */
- uint16_t vq_num;
- uint16_t vq_flags;
- uint16_t vq_last_avail; /* a recent value of vq_avail->va_idx */
- uint16_t vq_save_used; /* saved vq_used->vu_idx; see vq_endchains */
- uint32_t vq_pfn; /* PFN of virt queue (not shifted!) */
-
- volatile struct vring_desc *vq_desc; /* descriptor array */
- volatile struct vring_avail *vq_avail; /* the "avail" ring */
- volatile struct vring_used *vq_used; /* the "used" ring */
-};
-
-int vq_ring_ready(struct vqueue_info *vq);
-int vq_has_descs(struct vqueue_info *vq);
-void * paddr_map(uint32_t offset, uint32_t phys, uint32_t size);
-void paddr_unmap(void *phys, uint32_t size);
-int vq_getchain(uint32_t beri_mem_offset, struct vqueue_info *vq,
- struct iovec *iov, int n_iov, uint16_t *flags);
-void vq_relchain(struct vqueue_info *vq, struct iovec *iov, int n, uint32_t iolen);
-struct iovec * getcopy(struct iovec *iov, int n);
-
-int setup_pio(device_t dev, char *name, device_t *pio_dev);
-int setup_offset(device_t dev, uint32_t *offset);
diff --git a/sys/dev/beri/virtio/virtio_block.c b/sys/dev/beri/virtio/virtio_block.c
deleted file mode 100644
index 0d4a37ea9772..000000000000
--- a/sys/dev/beri/virtio/virtio_block.c
+++ /dev/null
@@ -1,553 +0,0 @@
-/*-
- * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/*
- * BERI virtio block backend driver
- */
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/bus.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/rman.h>
-#include <sys/conf.h>
-#include <sys/stat.h>
-#include <sys/endian.h>
-#include <sys/disk.h>
-#include <sys/vnode.h>
-#include <sys/fcntl.h>
-#include <sys/kthread.h>
-#include <sys/buf.h>
-#include <sys/mdioctl.h>
-#include <sys/namei.h>
-
-#include <machine/bus.h>
-#include <machine/fdt.h>
-#include <machine/cpu.h>
-#include <machine/intr.h>
-
-#include <dev/fdt/fdt_common.h>
-#include <dev/ofw/openfirm.h>
-#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
-
-#include <dev/beri/virtio/virtio.h>
-#include <dev/beri/virtio/virtio_mmio_platform.h>
-#include <dev/altera/pio/pio.h>
-#include <dev/virtio/mmio/virtio_mmio.h>
-#include <dev/virtio/block/virtio_blk.h>
-#include <dev/virtio/virtio_ids.h>
-#include <dev/virtio/virtio_config.h>
-#include <dev/virtio/virtio_ring.h>
-
-#include "pio_if.h"
-
-#define DPRINTF(fmt, ...)
-
-/* We use indirect descriptors */
-#define NUM_DESCS 1
-#define NUM_QUEUES 1
-
-#define VTBLK_BLK_ID_BYTES 20
-#define VTBLK_MAXSEGS 256
-
-struct beri_vtblk_softc {
- struct resource *res[1];
- bus_space_tag_t bst;
- bus_space_handle_t bsh;
- struct cdev *cdev;
- device_t dev;
- int opened;
- device_t pio_recv;
- device_t pio_send;
- struct vqueue_info vs_queues[NUM_QUEUES];
- char ident[VTBLK_BLK_ID_BYTES];
- struct ucred *cred;
- struct vnode *vnode;
- struct thread *vtblk_ktd;
- struct sx sc_mtx;
- int beri_mem_offset;
- struct md_ioctl *mdio;
- struct virtio_blk_config *cfg;
-};
-
-static struct resource_spec beri_spec[] = {
- { SYS_RES_MEMORY, 0, RF_ACTIVE },
- { -1, 0 }
-};
-
-static int
-vtblk_rdwr(struct beri_vtblk_softc *sc, struct iovec *iov,
- int cnt, int offset, int operation, int iolen)
-{
- struct vnode *vp;
- struct mount *mp;
- struct uio auio;
- int error;
-
- bzero(&auio, sizeof(auio));
-
- vp = sc->vnode;
-
- KASSERT(vp != NULL, ("file not opened"));
-
- auio.uio_iov = iov;
- auio.uio_iovcnt = cnt;
- auio.uio_offset = offset;
- auio.uio_segflg = UIO_SYSSPACE;
- auio.uio_rw = operation;
- auio.uio_resid = iolen;
- auio.uio_td = curthread;
-
- if (operation == 0) {
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
- error = VOP_READ(vp, &auio, IO_DIRECT, sc->cred);
- VOP_UNLOCK(vp);
- } else {
- (void) vn_start_write(vp, &mp, V_WAIT);
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
- error = VOP_WRITE(vp, &auio, IO_SYNC, sc->cred);
- VOP_UNLOCK(vp);
- vn_finished_write(mp);
- }
-
- return (error);
-}
-
-static void
-vtblk_proc(struct beri_vtblk_softc *sc, struct vqueue_info *vq)
-{
- struct iovec iov[VTBLK_MAXSEGS + 2];
- uint16_t flags[VTBLK_MAXSEGS + 2];
- struct virtio_blk_outhdr *vbh;
- struct iovec *tiov;
- uint8_t *status;
- off_t offset;
- int iolen;
- int type;
- int i, n;
- int err;
-
- n = vq_getchain(sc->beri_mem_offset, vq, iov,
- VTBLK_MAXSEGS + 2, flags);
- KASSERT(n >= 2 && n <= VTBLK_MAXSEGS + 2,
- ("wrong n value %d", n));
-
- tiov = getcopy(iov, n);
- vbh = iov[0].iov_base;
-
- status = iov[n-1].iov_base;
- KASSERT(iov[n-1].iov_len == 1,
- ("iov_len == %d", iov[n-1].iov_len));
-
- type = be32toh(vbh->type) & ~VIRTIO_BLK_T_BARRIER;
- offset = be64toh(vbh->sector) * DEV_BSIZE;
-
- iolen = 0;
- for (i = 1; i < (n-1); i++) {
- iolen += iov[i].iov_len;
- }
-
- switch (type) {
- case VIRTIO_BLK_T_OUT:
- case VIRTIO_BLK_T_IN:
- err = vtblk_rdwr(sc, tiov + 1, i - 1,
- offset, type, iolen);
- break;
- case VIRTIO_BLK_T_GET_ID:
- /* Assume a single buffer */
- strncpy(iov[1].iov_base, sc->ident,
- MIN(iov[1].iov_len, sizeof(sc->ident)));
- err = 0;
- break;
- case VIRTIO_BLK_T_FLUSH:
- /* Possible? */
- default:
- err = -ENOSYS;
- break;
- }
-
- if (err < 0) {
- if (err == -ENOSYS) {
- *status = VIRTIO_BLK_S_UNSUPP;
- } else
- *status = VIRTIO_BLK_S_IOERR;
- } else
- *status = VIRTIO_BLK_S_OK;
-
- free(tiov, M_DEVBUF);
- vq_relchain(vq, iov, n, 1);
-}
-
-static int
-close_file(struct beri_vtblk_softc *sc, struct thread *td)
-{
- int error;
-
- if (sc->vnode != NULL) {
- vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
- sc->vnode->v_vflag &= ~VV_MD;
- VOP_UNLOCK(sc->vnode);
- error = vn_close(sc->vnode, (FREAD|FWRITE),
- sc->cred, td);
- if (error != 0)
- return (error);
- sc->vnode = NULL;
- }
-
- if (sc->cred != NULL)
- crfree(sc->cred);
-
- return (0);
-}
-
-static int
-open_file(struct beri_vtblk_softc *sc, struct thread *td)
-{
- struct nameidata nd;
- struct vattr vattr;
- int error;
- int flags;
-
- flags = (FREAD | FWRITE);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->mdio->md_file);
- error = vn_open(&nd, &flags, 0, NULL);
- if (error != 0)
- return (error);
- NDFREE_PNBUF(&nd);
-
- if (nd.ni_vp->v_type != VREG) {
- return (EINVAL);
- }
-
- error = VOP_GETATTR(nd.ni_vp, &vattr, td->td_ucred);
- if (error != 0)
- return (error);
-
- if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) {
- vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY);
- if (VN_IS_DOOMED(nd.ni_vp)) {
- return (1);
- }
- }
- nd.ni_vp->v_vflag |= VV_MD;
- VOP_UNLOCK(nd.ni_vp);
-
- sc->vnode = nd.ni_vp;
- sc->cred = crhold(td->td_ucred);
-
- return (0);
-}
-
-static int
-vtblk_notify(struct beri_vtblk_softc *sc)
-{
- struct vqueue_info *vq;
- int queue;
- int reg;
-
- vq = &sc->vs_queues[0];
- if (!vq_ring_ready(vq))
- return (0);
-
- if (!sc->opened)
- return (0);
-
- reg = READ2(sc, VIRTIO_MMIO_QUEUE_NOTIFY);
- queue = be16toh(reg);
-
- KASSERT(queue == 0, ("we support single queue only"));
-
- /* Process new descriptors */
- vq = &sc->vs_queues[queue];
- vq->vq_save_used = be16toh(vq->vq_used->idx);
- while (vq_has_descs(vq))
- vtblk_proc(sc, vq);
-
- /* Interrupt the other side */
- if ((be16toh(vq->vq_avail->flags) & VRING_AVAIL_F_NO_INTERRUPT) == 0) {
- reg = htobe32(VIRTIO_MMIO_INT_VRING);
- WRITE4(sc, VIRTIO_MMIO_INTERRUPT_STATUS, reg);
- PIO_SET(sc->pio_send, Q_INTR, 1);
- }
-
- return (0);
-}
-
-static int
-vq_init(struct beri_vtblk_softc *sc)
-{
- struct vqueue_info *vq;
- uint8_t *base;
- int size;
- int reg;
- int pfn;
-
- vq = &sc->vs_queues[0];
- vq->vq_qsize = NUM_DESCS;
-
- reg = READ4(sc, VIRTIO_MMIO_QUEUE_PFN);
- pfn = be32toh(reg);
- vq->vq_pfn = pfn;
-
- size = vring_size(vq->vq_qsize, VRING_ALIGN);
- base = paddr_map(sc->beri_mem_offset,
- (pfn << PAGE_SHIFT), size);
-
- /* First pages are descriptors */
- vq->vq_desc = (struct vring_desc *)base;
- base += vq->vq_qsize * sizeof(struct vring_desc);
-
- /* Then avail ring */
- vq->vq_avail = (struct vring_avail *)base;
- base += (2 + vq->vq_qsize + 1) * sizeof(uint16_t);
-
- /* Then it's rounded up to the next page */
- base = (uint8_t *)roundup2((uintptr_t)base, VRING_ALIGN);
-
- /* And the last pages are the used ring */
- vq->vq_used = (struct vring_used *)base;
-
- /* Mark queue as allocated, and start at 0 when we use it. */
- vq->vq_flags = VQ_ALLOC;
- vq->vq_last_avail = 0;
-
- return (0);
-}
-
-static void
-vtblk_thread(void *arg)
-{
- struct beri_vtblk_softc *sc;
- int err;
-
- sc = arg;
-
- sx_xlock(&sc->sc_mtx);
- for (;;) {
- err = msleep(sc, &sc->sc_mtx, PCATCH | PZERO, "prd", hz);
- vtblk_notify(sc);
- }
- sx_xunlock(&sc->sc_mtx);
-
- kthread_exit();
-}
-
-static int
-backend_info(struct beri_vtblk_softc *sc)
-{
- struct virtio_blk_config *cfg;
- uint32_t *s;
- int reg;
- int i;
-
- /* Specify that we provide block device */
- reg = htobe32(VIRTIO_ID_BLOCK);
- WRITE4(sc, VIRTIO_MMIO_DEVICE_ID, reg);
-
- /* Queue size */
- reg = htobe32(NUM_DESCS);
- WRITE4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX, reg);
-
- /* Our features */
- reg = htobe32(VIRTIO_RING_F_INDIRECT_DESC
- | VIRTIO_BLK_F_BLK_SIZE
- | VIRTIO_BLK_F_SEG_MAX);
- WRITE4(sc, VIRTIO_MMIO_HOST_FEATURES, reg);
-
- cfg = sc->cfg;
- cfg->capacity = htobe64(sc->mdio->md_mediasize / DEV_BSIZE);
- cfg->size_max = 0; /* not negotiated */
- cfg->seg_max = htobe32(VTBLK_MAXSEGS);
- cfg->blk_size = htobe32(DEV_BSIZE);
-
- s = (uint32_t *)cfg;
-
- for (i = 0; i < sizeof(struct virtio_blk_config); i+=4) {
- WRITE4(sc, VIRTIO_MMIO_CONFIG + i, *s);
- s+=1;
- }
-
- strncpy(sc->ident, "Virtio block backend", sizeof(sc->ident));
-
- return (0);
-}
-
-static void
-vtblk_intr(void *arg)
-{
- struct beri_vtblk_softc *sc;
- int pending;
- int reg;
-
- sc = arg;
-
- reg = PIO_READ(sc->pio_recv);
-
- /* Ack */
- PIO_SET(sc->pio_recv, reg, 0);
-
- pending = htobe32(reg);
-
- if (pending & Q_PFN) {
- vq_init(sc);
- }
-
- if (pending & Q_NOTIFY) {
- wakeup(sc);
- }
-}
-
-static int
-beri_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
- int flags, struct thread *td)
-{
- struct beri_vtblk_softc *sc;
- int err;
-
- sc = dev->si_drv1;
-
- switch (cmd) {
- case MDIOCATTACH:
- /* take file as argument */
- if (sc->vnode != NULL) {
- /* Already opened */
- return (1);
- }
- sc->mdio = (struct md_ioctl *)addr;
- backend_info(sc);
- DPRINTF("opening file, td 0x%08x\n", (int)td);
- err = open_file(sc, td);
- if (err)
- return (err);
- PIO_SETUP_IRQ(sc->pio_recv, vtblk_intr, sc);
- sc->opened = 1;
- break;
- case MDIOCDETACH:
- if (sc->vnode == NULL) {
- /* File not opened */
- return (1);
- }
- sc->opened = 0;
- DPRINTF("closing file, td 0x%08x\n", (int)td);
- err = close_file(sc, td);
- if (err)
- return (err);
- PIO_TEARDOWN_IRQ(sc->pio_recv);
- break;
- default:
- break;
- }
-
- return (0);
-}
-
-static struct cdevsw beri_cdevsw = {
- .d_version = D_VERSION,
- .d_ioctl = beri_ioctl,
- .d_name = "virtio block backend",
-};
-
-static int
-beri_vtblk_probe(device_t dev)
-{
-
- if (!ofw_bus_status_okay(dev))
- return (ENXIO);
-
- if (!ofw_bus_is_compatible(dev, "sri-cambridge,beri-vtblk"))
- return (ENXIO);
-
- device_set_desc(dev, "SRI-Cambridge BERI block");
- return (BUS_PROBE_DEFAULT);
-}
-
-static int
-beri_vtblk_attach(device_t dev)
-{
- struct beri_vtblk_softc *sc;
- int error;
-
- sc = device_get_softc(dev);
- sc->dev = dev;
-
- if (bus_alloc_resources(dev, beri_spec, sc->res)) {
- device_printf(dev, "could not allocate resources\n");
- return (ENXIO);
- }
-
- /* Memory interface */
- sc->bst = rman_get_bustag(sc->res[0]);
- sc->bsh = rman_get_bushandle(sc->res[0]);
-
- sc->cfg = malloc(sizeof(struct virtio_blk_config),
- M_DEVBUF, M_NOWAIT|M_ZERO);
-
- sx_init(&sc->sc_mtx, device_get_nameunit(sc->dev));
-
- error = kthread_add(vtblk_thread, sc, NULL, &sc->vtblk_ktd,
- 0, 0, "beri_virtio_block");
- if (error) {
- device_printf(dev, "cannot create kthread\n");
- return (ENXIO);
- }
-
- if (setup_offset(dev, &sc->beri_mem_offset) != 0)
- return (ENXIO);
- if (setup_pio(dev, "pio-send", &sc->pio_send) != 0)
- return (ENXIO);
- if (setup_pio(dev, "pio-recv", &sc->pio_recv) != 0)
- return (ENXIO);
-
- sc->cdev = make_dev(&beri_cdevsw, 0, UID_ROOT, GID_WHEEL,
- S_IRWXU, "beri_vtblk");
- if (sc->cdev == NULL) {
- device_printf(dev, "Failed to create character device.\n");
- return (ENXIO);
- }
-
- sc->cdev->si_drv1 = sc;
- return (0);
-}
-
-static device_method_t beri_vtblk_methods[] = {
- DEVMETHOD(device_probe, beri_vtblk_probe),
- DEVMETHOD(device_attach, beri_vtblk_attach),
- { 0, 0 }
-};
-
-static driver_t beri_vtblk_driver = {
- "beri_vtblk",
- beri_vtblk_methods,
- sizeof(struct beri_vtblk_softc),
-};
-
-DRIVER_MODULE(beri_vtblk, simplebus, beri_vtblk_driver, 0, 0);
diff --git a/sys/dev/beri/virtio/virtio_mmio_platform.c b/sys/dev/beri/virtio/virtio_mmio_platform.c
deleted file mode 100644
index b3ca26df28ee..000000000000
--- a/sys/dev/beri/virtio/virtio_mmio_platform.c
+++ /dev/null
@@ -1,307 +0,0 @@
-/*-
- * Copyright (c) 2014-2015 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
- *
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/*
- * BERI interface for Virtio MMIO bus.
- *
- * This driver provides interrupt-engine for software-implemented
- * Virtio MMIO backend.
- */
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/bus.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/malloc.h>
-#include <sys/rman.h>
-#include <sys/timeet.h>
-#include <sys/timetc.h>
-#include <sys/watchdog.h>
-
-#include <machine/bus.h>
-#include <machine/fdt.h>
-#include <machine/cpu.h>
-#include <machine/cache.h>
-
-#include <dev/fdt/fdt_common.h>
-#include <dev/ofw/openfirm.h>
-#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
-
-#include <dev/beri/virtio/virtio_mmio_platform.h>
-#include <dev/virtio/mmio/virtio_mmio.h>
-#include <dev/altera/pio/pio.h>
-
-#include "virtio_mmio_if.h"
-#include "pio_if.h"
-
-static void platform_intr(void *arg);
-
-struct virtio_mmio_platform_softc {
- struct resource *res[1];
- void *ih;
- bus_space_tag_t bst;
- bus_space_handle_t bsh;
- device_t dev;
- void (*intr_handler)(void *);
- void *ih_user;
- device_t pio_recv;
- device_t pio_send;
- int use_pio;
-};
-
-static int
-setup_pio(struct virtio_mmio_platform_softc *sc, char *name, device_t *dev)
-{
- phandle_t pio_node;
- struct fdt_ic *ic;
- phandle_t xref;
- phandle_t node;
-
- if ((node = ofw_bus_get_node(sc->dev)) == -1)
- return (ENXIO);
-
- if (OF_searchencprop(node, name, &xref,
- sizeof(xref)) == -1) {
- return (ENXIO);
- }
-
- pio_node = OF_node_from_xref(xref);
- SLIST_FOREACH(ic, &fdt_ic_list_head, fdt_ics) {
- if (ic->iph == pio_node) {
- *dev = ic->dev;
- PIO_CONFIGURE(*dev, PIO_OUT_ALL,
- PIO_UNMASK_ALL);
- return (0);
- }
- }
-
- return (ENXIO);
-}
-
-static int
-virtio_mmio_platform_probe(device_t dev)
-{
-
- if (!ofw_bus_status_okay(dev))
- return (ENXIO);
-
- if (!ofw_bus_is_compatible(dev, "beri,virtio_mmio_platform"))
- return (ENXIO);
-
- device_set_desc(dev, "Virtio MMIO platform");
- return (BUS_PROBE_DEFAULT);
-}
-
-static int
-virtio_mmio_platform_attach(device_t dev)
-{
- struct virtio_mmio_platform_softc *sc;
- struct fdt_ic *fic;
- phandle_t node;
-
- sc = device_get_softc(dev);
- sc->dev = dev;
- sc->use_pio = 1;
-
- if ((setup_pio(sc, "pio-send", &sc->pio_send) != 0) ||
- (setup_pio(sc, "pio-recv", &sc->pio_recv) != 0))
- sc->use_pio = 0;
-
- if ((node = ofw_bus_get_node(sc->dev)) == -1)
- return (ENXIO);
-
- fic = malloc(sizeof(*fic), M_DEVBUF, M_WAITOK|M_ZERO);
- fic->iph = node;
- fic->dev = dev;
- SLIST_INSERT_HEAD(&fdt_ic_list_head, fic, fdt_ics);
-
- return (0);
-}
-
-static int
-platform_prewrite(device_t dev, size_t offset, int val)
-{
- struct virtio_mmio_platform_softc *sc;
-
- sc = device_get_softc(dev);
-
- switch (offset) {
- case (VIRTIO_MMIO_QUEUE_NOTIFY):
- mips_dcache_wbinv_all();
- break;
- default:
- break;
- }
-
- return (0);
-}
-
-static int
-platform_note(device_t dev, size_t offset, int val)
-{
- struct virtio_mmio_platform_softc *sc;
- int note;
- int i;
-
- sc = device_get_softc(dev);
-
- switch (offset) {
- case (VIRTIO_MMIO_QUEUE_NOTIFY):
- if (val == 0)
- note = Q_NOTIFY;
- else if (val == 1)
- note = Q_NOTIFY1;
- else
- note = 0;
- break;
- case (VIRTIO_MMIO_QUEUE_PFN):
- note = Q_PFN;
- break;
- case (VIRTIO_MMIO_QUEUE_SEL):
- note = Q_SEL;
- break;
- default:
- note = 0;
- }
-
- if (note) {
- mips_dcache_wbinv_all();
-
- if (!sc->use_pio)
- return (0);
-
- PIO_SET(sc->pio_send, note, 1);
-
- /*
- * Wait until host ack the request.
- * Usually done within few cycles.
- * TODO: bad
- */
-
- for (i = 100; i > 0; i--) {
- if (PIO_READ(sc->pio_send) == 0)
- break;
- }
-
- if (i == 0)
- device_printf(sc->dev, "Warning: host busy\n");
- }
-
- return (0);
-}
-
-static void
-platform_intr(void *arg)
-{
- struct virtio_mmio_platform_softc *sc;
- int reg;
-
- sc = arg;
-
- if (sc->use_pio) {
- /* Read pending */
- reg = PIO_READ(sc->pio_recv);
-
- /* Ack */
- PIO_SET(sc->pio_recv, reg, 0);
- }
-
- /* Writeback, invalidate cache */
- mips_dcache_wbinv_all();
-
- if (sc->intr_handler != NULL)
- sc->intr_handler(sc->ih_user);
-}
-
-static int
-platform_setup_intr(device_t dev, device_t mmio_dev,
- void *intr_handler, void *ih_user)
-{
- struct virtio_mmio_platform_softc *sc;
- int rid;
-
- sc = device_get_softc(dev);
-
- sc->intr_handler = intr_handler;
- sc->ih_user = ih_user;
-
- if (sc->use_pio) {
- PIO_SETUP_IRQ(sc->pio_recv, platform_intr, sc);
- return (0);
- }
-
- rid = 0;
- sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_ACTIVE);
- if (!sc->res[0]) {
- device_printf(dev, "Can't allocate interrupt\n");
- return (ENXIO);
- }
-
- if (bus_setup_intr(dev, sc->res[0], INTR_TYPE_MISC | INTR_MPSAFE,
- NULL, platform_intr, sc, &sc->ih)) {
- device_printf(dev, "Can't setup the interrupt\n");
- return (ENXIO);
- }
-
- return (0);
-}
-
-static int
-platform_poll(device_t dev)
-{
-
- mips_dcache_wbinv_all();
-
- return (0);
-}
-
-static device_method_t virtio_mmio_platform_methods[] = {
- DEVMETHOD(device_probe, virtio_mmio_platform_probe),
- DEVMETHOD(device_attach, virtio_mmio_platform_attach),
-
- /* virtio_mmio_if.h */
- DEVMETHOD(virtio_mmio_prewrite, platform_prewrite),
- DEVMETHOD(virtio_mmio_note, platform_note),
- DEVMETHOD(virtio_mmio_poll, platform_poll),
- DEVMETHOD(virtio_mmio_setup_intr, platform_setup_intr),
- DEVMETHOD_END
-};
-
-static driver_t virtio_mmio_platform_driver = {
- "virtio_mmio_platform",
- virtio_mmio_platform_methods,
- sizeof(struct virtio_mmio_platform_softc),
-};
-
-DRIVER_MODULE(virtio_mmio_platform, simplebus, virtio_mmio_platform_driver,
- 0, 0);
diff --git a/sys/dev/bfe/if_bfe.c b/sys/dev/bfe/if_bfe.c
index 962dd24ae85c..2fb6938fbdc5 100644
--- a/sys/dev/bfe/if_bfe.c
+++ b/sys/dev/bfe/if_bfe.c
@@ -477,11 +477,6 @@ bfe_attach(device_t dev)
/* Set up ifnet structure */
ifp = sc->bfe_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "failed to if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -556,8 +551,6 @@ bfe_detach(device_t dev)
BFE_UNLOCK(sc);
bus_generic_detach(dev);
- if (sc->bfe_miibus != NULL)
- device_delete_child(dev, sc->bfe_miibus);
bfe_release_resources(sc);
bfe_dma_free(sc);
diff --git a/sys/dev/bge/if_bge.c b/sys/dev/bge/if_bge.c
index 15cd3b0df8e9..cf3084f9b768 100644
--- a/sys/dev/bge/if_bge.c
+++ b/sys/dev/bge/if_bge.c
@@ -2699,7 +2699,6 @@ bge_chipid(device_t dev)
static int
bge_probe(device_t dev)
{
- char buf[96];
char model[64];
const struct bge_revision *br;
const char *pname;
@@ -2727,9 +2726,8 @@ bge_probe(device_t dev)
br != NULL ? br->br_name :
"NetXtreme/NetLink Ethernet Controller");
}
- snprintf(buf, sizeof(buf), "%s, %sASIC rev. %#08x",
+ device_set_descf(dev, "%s, %sASIC rev. %#08x",
model, br != NULL ? "" : "unknown ", id);
- device_set_desc_copy(dev, buf);
return (BUS_PROBE_DEFAULT);
}
t++;
@@ -3714,11 +3712,6 @@ bge_attach(device_t dev)
/* Set up ifnet structure */
ifp = sc->bge_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(sc->bge_dev, "failed to if_alloc()\n");
- error = ENXIO;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -3897,12 +3890,6 @@ again:
~BGE_MSIMODE_ONE_SHOT_DISABLE);
sc->bge_tq = taskqueue_create_fast("bge_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->bge_tq);
- if (sc->bge_tq == NULL) {
- device_printf(dev, "could not create taskqueue.\n");
- ether_ifdetach(ifp);
- error = ENOMEM;
- goto fail;
- }
error = taskqueue_start_threads(&sc->bge_tq, 1, PI_NET,
"%s taskq", device_get_nameunit(sc->bge_dev));
if (error != 0) {
@@ -3962,7 +3949,6 @@ bge_detach(device_t dev)
ifmedia_removeall(&sc->bge_ifmedia);
else if (sc->bge_miibus != NULL) {
bus_generic_detach(dev);
- device_delete_child(dev, sc->bge_miibus);
}
bge_release_resources(sc);
diff --git a/sys/dev/bhnd/bcma/bcma.c b/sys/dev/bhnd/bcma/bcma.c
index c9df03cfb504..a83873b321c2 100644
--- a/sys/dev/bhnd/bcma/bcma.c
+++ b/sys/dev/bhnd/bcma/bcma.c
@@ -81,7 +81,6 @@ bcma_attach(device_t dev)
/* Enumerate children */
if ((error = bcma_add_children(dev))) {
- device_delete_children(dev);
return (error);
}
@@ -678,7 +677,7 @@ bcma_add_children(device_t bus)
bcma_erom = (struct bcma_erom *)erom;
while ((error = bcma_erom_next_corecfg(bcma_erom, &corecfg)) == 0) {
/* Add the child device */
- child = BUS_ADD_CHILD(bus, 0, NULL, -1);
+ child = BUS_ADD_CHILD(bus, 0, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
error = ENXIO;
goto cleanup;
diff --git a/sys/dev/bhnd/bhnd_subr.c b/sys/dev/bhnd/bhnd_subr.c
index 0d38c1ca8a24..4818fffd5659 100644
--- a/sys/dev/bhnd/bhnd_subr.c
+++ b/sys/dev/bhnd/bhnd_subr.c
@@ -1765,18 +1765,10 @@ void
bhnd_set_custom_core_desc(device_t dev, const char *dev_name)
{
const char *vendor_name;
- char *desc;
vendor_name = bhnd_get_vendor_name(dev);
- asprintf(&desc, M_BHND, "%s %s, rev %hhu", vendor_name, dev_name,
+ device_set_descf(dev, "%s %s, rev %hhu", vendor_name, dev_name,
bhnd_get_hwrev(dev));
-
- if (desc != NULL) {
- device_set_desc_copy(dev, desc);
- free(desc, M_BHND);
- } else {
- device_set_desc(dev, dev_name);
- }
}
/**
@@ -1802,7 +1794,6 @@ void
bhnd_set_default_bus_desc(device_t dev, const struct bhnd_chipid *chip_id)
{
const char *bus_name;
- char *desc;
char chip_name[BHND_CHIPID_MAX_NAMELEN];
/* Determine chip type's bus name */
@@ -1827,14 +1818,7 @@ bhnd_set_default_bus_desc(device_t dev, const struct bhnd_chipid *chip_id)
chip_id->chip_id);
/* Format and set device description */
- asprintf(&desc, M_BHND, "%s %s", chip_name, bus_name);
- if (desc != NULL) {
- device_set_desc_copy(dev, desc);
- free(desc, M_BHND);
- } else {
- device_set_desc(dev, bus_name);
- }
-
+ device_set_descf(dev, "%s %s", chip_name, bus_name);
}
/**
@@ -2186,7 +2170,7 @@ bhnd_bus_generic_get_nvram_var(device_t dev, device_t child, const char *name,
bus_topo_assert();
/* Look for a directly-attached NVRAM child */
- if ((nvram = device_find_child(dev, "bhnd_nvram", -1)) != NULL)
+ if ((nvram = device_find_child(dev, "bhnd_nvram", DEVICE_UNIT_ANY)) != NULL)
return BHND_NVRAM_GETVAR(nvram, name, buf, size, type);
/* Try to delegate to parent */
diff --git a/sys/dev/bhnd/bhndb/bhndb.c b/sys/dev/bhnd/bhndb/bhndb.c
index af62057690ac..f9d56a9b9226 100644
--- a/sys/dev/bhnd/bhndb/bhndb.c
+++ b/sys/dev/bhnd/bhndb/bhndb.c
@@ -558,7 +558,7 @@ bhndb_attach(device_t dev, struct bhnd_chipid *cid,
}
/* Add our bridged bus device */
- sc->bus_dev = BUS_ADD_CHILD(dev, BHND_PROBE_BUS, "bhnd", -1);
+ sc->bus_dev = BUS_ADD_CHILD(dev, BHND_PROBE_BUS, "bhnd", DEVICE_UNIT_ANY);
if (sc->bus_dev == NULL) {
error = ENXIO;
goto failed;
@@ -624,10 +624,6 @@ bhndb_generic_detach(device_t dev)
if ((error = bus_generic_detach(dev)))
return (error);
- /* Delete children */
- if ((error = device_delete_children(dev)))
- return (error);
-
/* Clean up our service registry */
if ((error = bhnd_service_registry_fini(&sc->services)))
return (error);
@@ -1037,7 +1033,7 @@ static int
bhndb_release_resource(device_t dev, device_t child, struct resource *r)
{
struct bhndb_softc *sc;
- struct resource_list_entry *rle;
+ struct resource_list_entry *rle = NULL;
bool passthrough;
int error;
@@ -1058,16 +1054,17 @@ bhndb_release_resource(device_t dev, device_t child, struct resource *r)
return (error);
}
+ /* Check for resource list entry */
+ if (!passthrough)
+ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child),
+ rman_get_type(r), rman_get_rid(r));
+
if ((error = rman_release_resource(r)))
return (error);
- if (!passthrough) {
- /* Clean resource list entry */
- rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child),
- rman_get_type(r), rman_get_rid(r));
- if (rle != NULL)
- rle->res = NULL;
- }
+ /* Clean resource list entry */
+ if (rle != NULL)
+ rle->res = NULL;
return (0);
}
diff --git a/sys/dev/bhnd/bhndb/bhndb_pci.c b/sys/dev/bhnd/bhndb/bhndb_pci.c
index 4e205614ce8a..0cdcba1daefb 100644
--- a/sys/dev/bhnd/bhndb/bhndb_pci.c
+++ b/sys/dev/bhnd/bhndb/bhndb_pci.c
@@ -458,8 +458,7 @@ bhndb_pci_attach(device_t dev)
goto cleanup;
/* Probe and attach our children */
- if ((error = bus_generic_attach(dev)))
- goto cleanup;
+ bus_attach_children(dev);
bhndb_pci_probe_free_core_table(cores);
diff --git a/sys/dev/bhnd/cores/chipc/bhnd_pmu_chipc.c b/sys/dev/bhnd/cores/chipc/bhnd_pmu_chipc.c
index 95b19d973e26..ce50fcd0ee1f 100644
--- a/sys/dev/bhnd/cores/chipc/bhnd_pmu_chipc.c
+++ b/sys/dev/bhnd/cores/chipc/bhnd_pmu_chipc.c
@@ -62,7 +62,6 @@ bhnd_pmu_chipc_probe(device_t dev)
struct chipc_caps *ccaps;
struct chipc_softc *chipc_sc;
device_t chipc;
- char desc[34];
int error;
uint32_t pcaps;
uint8_t rev;
@@ -87,8 +86,7 @@ bhnd_pmu_chipc_probe(device_t dev)
/* Set description */
rev = BHND_PMU_GET_BITS(pcaps, BHND_PMU_CAP_REV);
- snprintf(desc, sizeof(desc), "Broadcom ChipCommon PMU, rev %hhu", rev);
- device_set_desc_copy(dev, desc);
+ device_set_descf(dev, "Broadcom ChipCommon PMU, rev %hhu", rev);
return (BUS_PROBE_NOWILDCARD);
}
diff --git a/sys/dev/bhnd/cores/chipc/chipc.c b/sys/dev/bhnd/cores/chipc/chipc.c
index 60cb04400cb0..24697a8f0b17 100644
--- a/sys/dev/bhnd/cores/chipc/chipc.c
+++ b/sys/dev/bhnd/cores/chipc/chipc.c
@@ -211,13 +211,12 @@ chipc_attach(device_t dev)
* response to ChipCommin API requests.
*
* Since our children may need access to ChipCommon, this must be done
- * before attaching our children below (via bus_generic_attach).
+ * before attaching our children below (via bus_attach_children).
*/
if ((error = bhnd_register_provider(dev, BHND_SERVICE_CHIPC)))
goto failed;
- if ((error = bus_generic_attach(dev)))
- goto failed;
+ bus_attach_children(dev);
return (0);
@@ -245,9 +244,6 @@ chipc_detach(device_t dev)
if ((error = bus_generic_detach(dev)))
return (error);
- if ((error = device_delete_children(dev)))
- return (error);
-
if ((error = bhnd_deregister_provider(dev, BHND_SERVICE_ANY)))
return (error);
@@ -270,7 +266,7 @@ chipc_add_children(struct chipc_softc *sc)
if (sc->caps.nvram_src == BHND_NVRAM_SRC_SPROM ||
sc->caps.nvram_src == BHND_NVRAM_SRC_OTP)
{
- child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_nvram", -1);
+ child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_nvram", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(sc->dev, "failed to add nvram device\n");
return (ENXIO);
@@ -293,13 +289,13 @@ chipc_add_children(struct chipc_softc *sc)
* attached directly to the bhnd(4) bus -- not chipc.
*/
if (sc->caps.pmu && !sc->caps.aob) {
- child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pmu", -1);
+ child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pmu", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(sc->dev, "failed to add pmu\n");
return (ENXIO);
}
} else if (sc->caps.pwr_ctrl) {
- child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pwrctl", -1);
+ child = BUS_ADD_CHILD(sc->dev, 0, "bhnd_pwrctl", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(sc->dev, "failed to add pwrctl\n");
return (ENXIO);
@@ -307,7 +303,7 @@ chipc_add_children(struct chipc_softc *sc)
}
/* GPIO */
- child = BUS_ADD_CHILD(sc->dev, 0, "gpio", -1);
+ child = BUS_ADD_CHILD(sc->dev, 0, "gpio", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(sc->dev, "failed to add gpio\n");
return (ENXIO);
@@ -331,7 +327,7 @@ chipc_add_children(struct chipc_softc *sc)
irq_rid = 0;
mem_rid = 0;
- child = BUS_ADD_CHILD(sc->dev, 0, "uart", -1);
+ child = BUS_ADD_CHILD(sc->dev, 0, "uart", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(sc->dev, "failed to add uart%u\n", i);
return (ENXIO);
@@ -360,7 +356,7 @@ chipc_add_children(struct chipc_softc *sc)
if (flash_bus != NULL) {
int rid;
- child = BUS_ADD_CHILD(sc->dev, 0, flash_bus, -1);
+ child = BUS_ADD_CHILD(sc->dev, 0, flash_bus, DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(sc->dev, "failed to add %s device\n",
flash_bus);
@@ -894,6 +890,10 @@ chipc_release_resource(device_t dev, device_t child, struct resource *r)
if (cr == NULL)
return (EINVAL);
+ /* Cache rle */
+ rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child),
+ rman_get_type(r), rman_get_rid(r));
+
/* Deactivate resources */
error = bus_generic_rman_release_resource(dev, child, r);
if (error != 0)
@@ -903,8 +903,6 @@ chipc_release_resource(device_t dev, device_t child, struct resource *r)
chipc_release_region(sc, cr, RF_ALLOCATED);
/* Clear reference from the resource list entry if exists */
- rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child),
- rman_get_type(r), rman_get_rid(r));
if (rle != NULL)
rle->res = NULL;
diff --git a/sys/dev/bhnd/cores/chipc/chipc_gpio.c b/sys/dev/bhnd/cores/chipc/chipc_gpio.c
index a110bdda5fa7..429de0fc1fd8 100644
--- a/sys/dev/bhnd/cores/chipc/chipc_gpio.c
+++ b/sys/dev/bhnd/cores/chipc/chipc_gpio.c
@@ -173,11 +173,13 @@ chipc_gpio_attach(device_t dev)
if (CC_GPIO_QUIRK(sc, NO_GPIOC)) {
sc->gpiobus = NULL;
} else {
- if ((sc->gpiobus = gpiobus_attach_bus(dev)) == NULL) {
+ if ((sc->gpiobus = gpiobus_add_bus(dev)) == NULL) {
device_printf(dev, "failed to attach gpiobus\n");
error = ENXIO;
goto failed;
}
+
+ bus_attach_children(dev);
}
/* Register as the bus GPIO provider */
diff --git a/sys/dev/bhnd/cores/chipc/chipc_spi.c b/sys/dev/bhnd/cores/chipc/chipc_spi.c
index 75e4b5cb7bc4..290933e5ef25 100644
--- a/sys/dev/bhnd/cores/chipc/chipc_spi.c
+++ b/sys/dev/bhnd/cores/chipc/chipc_spi.c
@@ -107,7 +107,8 @@ chipc_spi_attach(device_t dev)
* XXX: This should be replaced with a DEVICE_IDENTIFY implementation
* in chipc-specific subclasses of the mx25l and at45d drivers.
*/
- if ((spibus = device_add_child(dev, "spibus", -1)) == NULL) {
+ if ((spibus = device_add_child(dev, "spibus",
+ DEVICE_UNIT_ANY)) == NULL) {
device_printf(dev, "failed to add spibus\n");
error = ENXIO;
goto failed;
@@ -115,14 +116,14 @@ chipc_spi_attach(device_t dev)
/* Let spibus perform full attach before we try to call
* BUS_ADD_CHILD() */
- if ((error = bus_generic_attach(dev)))
- goto failed;
+ bus_attach_children(dev);
/* Determine flash type and add the flash child */
ccaps = BHND_CHIPC_GET_CAPS(device_get_parent(dev));
flash_name = chipc_sflash_device_name(ccaps->flash_type);
if (flash_name != NULL) {
- flash_dev = BUS_ADD_CHILD(spibus, 0, flash_name, -1);
+ flash_dev = BUS_ADD_CHILD(spibus, 0, flash_name,
+ DEVICE_UNIT_ANY);
if (flash_dev == NULL) {
device_printf(dev, "failed to add %s\n", flash_name);
error = ENXIO;
diff --git a/sys/dev/bhnd/cores/pci/bhnd_pci.c b/sys/dev/bhnd/cores/pci/bhnd_pci.c
index 7fd104f3460c..45775916e5cf 100644
--- a/sys/dev/bhnd/cores/pci/bhnd_pci.c
+++ b/sys/dev/bhnd/cores/pci/bhnd_pci.c
@@ -125,7 +125,6 @@ int
bhnd_pci_generic_attach(device_t dev)
{
struct bhnd_pci_softc *sc;
- int error;
sc = device_get_softc(dev);
sc->dev = dev;
@@ -141,16 +140,9 @@ bhnd_pci_generic_attach(device_t dev)
BHND_PCI_LOCK_INIT(sc);
/* Probe and attach children */
- if ((error = bus_generic_attach(dev)))
- goto cleanup;
+ bus_attach_children(dev);
return (0);
-
-cleanup:
- bhnd_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res);
- BHND_PCI_LOCK_DESTROY(sc);
-
- return (error);
}
int
diff --git a/sys/dev/bhnd/cores/pcie2/bhnd_pcie2.c b/sys/dev/bhnd/cores/pcie2/bhnd_pcie2.c
index 4f880ed4dee9..15a93f9e0b93 100644
--- a/sys/dev/bhnd/cores/pcie2/bhnd_pcie2.c
+++ b/sys/dev/bhnd/cores/pcie2/bhnd_pcie2.c
@@ -89,7 +89,6 @@ int
bhnd_pcie2_generic_attach(device_t dev)
{
struct bhnd_pcie2_softc *sc;
- int error;
sc = device_get_softc(dev);
sc->dev = dev;
@@ -105,16 +104,9 @@ bhnd_pcie2_generic_attach(device_t dev)
BHND_PCIE2_LOCK_INIT(sc);
/* Probe and attach children */
- if ((error = bus_generic_attach(dev)))
- goto cleanup;
+ bus_attach_children(dev);
return (0);
-
-cleanup:
- bhnd_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res);
- BHND_PCIE2_LOCK_DESTROY(sc);
-
- return (error);
}
int
diff --git a/sys/dev/bhnd/cores/usb/bhnd_ehci.c b/sys/dev/bhnd/cores/usb/bhnd_ehci.c
deleted file mode 100644
index 765d7eaaa144..000000000000
--- a/sys/dev/bhnd/cores/usb/bhnd_ehci.c
+++ /dev/null
@@ -1,258 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (C) 2008 MARVELL INTERNATIONAL LTD.
- * Copyright (c) 2010, Aleksandr Rybalko <ray@ddteam.net>
- * All rights reserved.
- *
- * Developed by Semihalf.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of MARVELL nor the names of contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/cdefs.h>
-/*
- * BHND attachment driver for the USB Enhanced Host Controller.
- * Ported from ZRouter with insignificant adaptations for FreeBSD11.
- */
-
-#include "opt_bus.h"
-
-#include <sys/stdint.h>
-#include <sys/stddef.h>
-#include <sys/param.h>
-#include <sys/queue.h>
-#include <sys/rman.h>
-#include <sys/types.h>
-#include <sys/systm.h>
-#include <sys/kernel.h>
-#include <sys/bus.h>
-#include <sys/linker_set.h>
-#include <sys/module.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/condvar.h>
-#include <sys/sysctl.h>
-#include <sys/sx.h>
-#include <sys/unistd.h>
-#include <sys/callout.h>
-#include <sys/malloc.h>
-#include <sys/priv.h>
-
-#include <dev/usb/usb.h>
-#include <dev/usb/usbdi.h>
-
-#include <dev/usb/usb_core.h>
-#include <dev/usb/usb_busdma.h>
-#include <dev/usb/usb_process.h>
-#include <dev/usb/usb_util.h>
-
-#include <dev/usb/usb_controller.h>
-#include <dev/usb/usb_bus.h>
-#include <dev/usb/controller/ehci.h>
-#include <dev/usb/controller/ehcireg.h>
-
-#include <dev/bhnd/bhnd.h>
-
-#define EHCI_HC_DEVSTR "Broadcom EHCI"
-
-#define USB_BRIDGE_INTR_CAUSE 0x210
-#define USB_BRIDGE_INTR_MASK 0x214
-
-static device_attach_t bhnd_ehci_attach;
-static device_detach_t bhnd_ehci_detach;
-
-static int bhnd_ehci_probe(device_t self);
-static void bhnd_ehci_post_reset(struct ehci_softc *ehci_softc);
-
-static int
-bhnd_ehci_probe(device_t self)
-{
-
- device_set_desc(self, EHCI_HC_DEVSTR);
-
- return (BUS_PROBE_DEFAULT);
-}
-
-static void
-bhnd_ehci_post_reset(struct ehci_softc *ehci_softc)
-{
- uint32_t usbmode;
-
- /* Force HOST mode */
- usbmode = EOREAD4(ehci_softc, EHCI_USBMODE_NOLPM);
- usbmode &= ~EHCI_UM_CM;
- usbmode |= EHCI_UM_CM_HOST;
- EOWRITE4(ehci_softc, EHCI_USBMODE_NOLPM, usbmode);
-}
-
-static int
-bhnd_ehci_attach(device_t self)
-{
- ehci_softc_t *sc;
- int err;
- int rid;
-
- sc = device_get_softc(self);
- /* initialise some bus fields */
- sc->sc_bus.parent = self;
- sc->sc_bus.devices = sc->sc_devices;
- sc->sc_bus.devices_max = EHCI_MAX_DEVICES;
- sc->sc_bus.usbrev = USB_REV_2_0;
- sc->sc_bus.dma_bits = 32;
-
- /* get all DMA memory */
- if ((err = usb_bus_mem_alloc_all(&sc->sc_bus, USB_GET_DMA_TAG(self),
- &ehci_iterate_hw_softc)) != 0) {
- BHND_ERROR_DEV(self, "can't allocate DMA memory: %d", err);
- return (ENOMEM);
- }
-
- rid = 0;
- sc->sc_io_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid,
- RF_ACTIVE);
- if (!sc->sc_io_res) {
- BHND_ERROR_DEV(self, "Could not map memory");
- goto error;
- }
- sc->sc_io_tag = rman_get_bustag(sc->sc_io_res);
- sc->sc_io_hdl = rman_get_bushandle(sc->sc_io_res);
- sc->sc_io_size = rman_get_size(sc->sc_io_res);
-
- rid = 0;
- sc->sc_irq_res = bus_alloc_resource_any(self, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
-
- if (sc->sc_irq_res == NULL) {
- BHND_ERROR_DEV(self, "Could not allocate error irq");
- bhnd_ehci_detach(self);
- return (ENXIO);
- }
-
- sc->sc_bus.bdev = device_add_child(self, "usbus", -1);
- if (!sc->sc_bus.bdev) {
- BHND_ERROR_DEV(self, "Could not add USB device");
- goto error;
- }
- device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus);
- device_set_desc(sc->sc_bus.bdev, EHCI_HC_DEVSTR);
-
- sprintf(sc->sc_vendor, "Broadcom");
-
- err = bus_setup_intr(self, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
- NULL, (driver_intr_t *)ehci_interrupt, sc, &sc->sc_intr_hdl);
- if (err) {
- BHND_ERROR_DEV(self, "Could not setup irq, %d", err);
- sc->sc_intr_hdl = NULL;
- goto error;
- }
-
- sc->sc_flags |= EHCI_SCFLG_LOSTINTRBUG;
- sc->sc_vendor_post_reset = bhnd_ehci_post_reset;
-
- err = ehci_init(sc);
- if (!err) {
- err = device_probe_and_attach(sc->sc_bus.bdev);
- }
- if (err) {
- BHND_ERROR_DEV(self, "USB init failed err=%d", err);
- goto error;
- }
- return (0);
-
-error:
- bhnd_ehci_detach(self);
- return (ENXIO);
-}
-
-static int
-bhnd_ehci_detach(device_t self)
-{
- ehci_softc_t *sc;
- int err;
-
- sc = device_get_softc(self);
-
- /* during module unload there are lots of children leftover */
- device_delete_children(self);
-
- /*
- * disable interrupts that might have been switched on in ehci_init
- */
-#ifdef notyet
- if (sc->sc_io_res) {
- EWRITE4(sc, EHCI_USBINTR, 0);
- EWRITE4(sc, USB_BRIDGE_INTR_MASK, 0);
- }
-#endif
- if (sc->sc_irq_res && sc->sc_intr_hdl) {
- /*
- * only call ehci_detach() after ehci_init()
- */
- ehci_detach(sc);
-
- err = bus_teardown_intr(self, sc->sc_irq_res, sc->sc_intr_hdl);
-
- if (err)
- /* XXX or should we panic? */
- BHND_ERROR_DEV(self, "Could not tear down irq, %d", err);
-
- sc->sc_intr_hdl = NULL;
- }
- if (sc->sc_irq_res) {
- bus_release_resource(self, SYS_RES_IRQ, 0, sc->sc_irq_res);
- sc->sc_irq_res = NULL;
- }
- if (sc->sc_io_res) {
- bus_release_resource(self, SYS_RES_MEMORY, 0, sc->sc_io_res);
- sc->sc_io_res = NULL;
- }
- usb_bus_mem_free_all(&sc->sc_bus, &ehci_iterate_hw_softc);
-
- return (0);
-}
-
-static device_method_t ehci_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, bhnd_ehci_probe),
- DEVMETHOD(device_attach, bhnd_ehci_attach),
- DEVMETHOD(device_detach, bhnd_ehci_detach),
- DEVMETHOD(device_suspend, bus_generic_suspend),
- DEVMETHOD(device_resume, bus_generic_resume),
- DEVMETHOD(device_shutdown, bus_generic_shutdown),
-
- /* Bus interface */
- DEVMETHOD(bus_print_child, bus_generic_print_child),
- {0, 0}
-};
-
-static driver_t ehci_driver = {
- "ehci",
- ehci_methods,
- sizeof(ehci_softc_t),
-};
-
-DRIVER_MODULE(ehci, bhnd_usb, ehci_driver, 0, 0);
-MODULE_DEPEND(ehci, usb, 1, 1, 1);
diff --git a/sys/dev/bhnd/cores/usb/bhnd_ohci.c b/sys/dev/bhnd/cores/usb/bhnd_ohci.c
deleted file mode 100644
index c72c50be4675..000000000000
--- a/sys/dev/bhnd/cores/usb/bhnd_ohci.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 1998 The NetBSD Foundation, Inc.
- * Copyright (c) 2010, Aleksandr Rybalko <ray@ddteam.net>
- * All rights reserved.
- *
- * This code is derived from software contributed to The NetBSD Foundation
- * by Lennart Augustsson (augustss@carlstedt.se) at
- * Carlstedt Research & Technology.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <sys/cdefs.h>
-/*
- * USB Open Host Controller driver.
- *
- * OHCI spec: http://www.intel.com/design/usb/ohci11d.pdf
- */
-
-/* The low level controller code for OHCI has been split into
- * SIBA probes and OHCI specific code. This was done to facilitate the
- * sharing of code between *BSD's
- */
-
-#include <sys/stdint.h>
-#include <sys/stddef.h>
-#include <sys/param.h>
-#include <sys/queue.h>
-#include <sys/rman.h>
-#include <sys/types.h>
-#include <sys/systm.h>
-#include <sys/kernel.h>
-#include <sys/bus.h>
-#include <sys/linker_set.h>
-#include <sys/module.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/condvar.h>
-#include <sys/sysctl.h>
-#include <sys/sx.h>
-#include <sys/unistd.h>
-#include <sys/callout.h>
-#include <sys/malloc.h>
-#include <sys/priv.h>
-
-#include <dev/usb/usb.h>
-#include <dev/usb/usbdi.h>
-
-#include <dev/usb/usb_core.h>
-#include <dev/usb/usb_busdma.h>
-#include <dev/usb/usb_process.h>
-#include <dev/usb/usb_util.h>
-
-#include <dev/usb/usb_controller.h>
-#include <dev/usb/usb_bus.h>
-#include <dev/usb/controller/ohci.h>
-#include <dev/usb/controller/ohcireg.h>
-
-static device_probe_t bhnd_ohci_probe;
-static device_attach_t bhnd_ohci_attach;
-static device_detach_t bhnd_ohci_detach;
-
-static int
-bhnd_ohci_probe(device_t self)
-{
- device_set_desc(self, "Broadcom OHCI");
- return (0);
-}
-
-static int
-bhnd_ohci_attach(device_t self)
-{
- ohci_softc_t *sc;
- int rid;
- int err;
-
- sc = device_get_softc(self);
- /* initialise some bus fields */
- sc->sc_bus.parent = self;
- sc->sc_bus.devices = sc->sc_devices;
- sc->sc_bus.devices_max = OHCI_MAX_DEVICES;
- sc->sc_bus.dma_bits = 32;
-
- /* get all DMA memory */
- if (usb_bus_mem_alloc_all(&sc->sc_bus, USB_GET_DMA_TAG(self),
- &ohci_iterate_hw_softc)) {
- return (ENOMEM);
- }
- sc->sc_dev = self;
-
- rid = 0;
- sc->sc_io_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid,
- RF_ACTIVE);
- if (!sc->sc_io_res) {
- device_printf(self, "Could not map memory\n");
- goto error;
- }
- sc->sc_io_tag = rman_get_bustag(sc->sc_io_res);
- sc->sc_io_hdl = rman_get_bushandle(sc->sc_io_res);
- sc->sc_io_size = rman_get_size(sc->sc_io_res);
-
- rid = 0;
- sc->sc_irq_res = bus_alloc_resource_any(self, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
- if (sc->sc_irq_res == NULL) {
- device_printf(self, "Could not allocate irq\n");
- goto error;
- }
- sc->sc_bus.bdev = device_add_child(self, "usbus", -1);
- if (!sc->sc_bus.bdev) {
- device_printf(self, "Could not add USB device\n");
- goto error;
- }
- device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus);
-
- strlcpy(sc->sc_vendor, "Broadcom", sizeof(sc->sc_vendor));
-
- err = bus_setup_intr(self, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
- NULL, (driver_intr_t *)ohci_interrupt, sc, &sc->sc_intr_hdl);
-
- if (err) {
- device_printf(self, "Could not setup irq, %d\n", err);
- sc->sc_intr_hdl = NULL;
- goto error;
- }
- err = ohci_init(sc);
- if (!err) {
- err = device_probe_and_attach(sc->sc_bus.bdev);
- }
- if (err) {
- device_printf(self, "USB init failed\n");
- goto error;
- }
- return (0);
-
-error:
- bhnd_ohci_detach(self);
- return (ENXIO);
-}
-
-static int
-bhnd_ohci_detach(device_t self)
-{
- ohci_softc_t *sc;
-
- sc = device_get_softc(self);
-
- /* during module unload there are lots of children leftover */
- device_delete_children(self);
-
- if (sc->sc_irq_res && sc->sc_intr_hdl) {
- /*
- * only call ohci_detach() after ohci_init()
- */
- ohci_detach(sc);
-
- int err = bus_teardown_intr(self, sc->sc_irq_res, sc->sc_intr_hdl);
-
- if (err) {
- /* XXX or should we panic? */
- device_printf(self, "Could not tear down irq, %d\n",
- err);
- }
- sc->sc_intr_hdl = NULL;
- }
- if (sc->sc_irq_res) {
- bus_release_resource(self, SYS_RES_IRQ, 0, sc->sc_irq_res);
- sc->sc_irq_res = NULL;
- }
- if (sc->sc_io_res) {
- bus_release_resource(self, SYS_RES_MEMORY, 0,
- sc->sc_io_res);
- sc->sc_io_res = NULL;
- }
- usb_bus_mem_free_all(&sc->sc_bus, &ohci_iterate_hw_softc);
-
- return (0);
-}
-
-static device_method_t bhnd_ohci_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, bhnd_ohci_probe),
- DEVMETHOD(device_attach, bhnd_ohci_attach),
- DEVMETHOD(device_detach, bhnd_ohci_detach),
- DEVMETHOD(device_suspend, bus_generic_suspend),
- DEVMETHOD(device_resume, bus_generic_resume),
- DEVMETHOD(device_shutdown, bus_generic_shutdown),
-
- DEVMETHOD_END
-};
-
-static driver_t ohci_driver = {
- .name = "ohci",
- .methods = bhnd_ohci_methods,
- .size = sizeof(struct ohci_softc),
-};
-
-DRIVER_MODULE(ohci, bhnd_usb, ohci_driver, 0, 0);
-MODULE_DEPEND(ohci, usb, 1, 1, 1);
diff --git a/sys/dev/bhnd/cores/usb/bhnd_usb.c b/sys/dev/bhnd/cores/usb/bhnd_usb.c
deleted file mode 100644
index 7a86db79731f..000000000000
--- a/sys/dev/bhnd/cores/usb/bhnd_usb.c
+++ /dev/null
@@ -1,549 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2010, Aleksandr Rybalko <ray@ddteam.net>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice unmodified, this list of conditions, and the following
- * disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/cdefs.h>
-/*
- * Ported version of BroadCom USB core driver from ZRouter project
- */
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/errno.h>
-#include <sys/bus.h>
-#include <sys/rman.h>
-#include <sys/malloc.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-
-#include <dev/bhnd/bhnd.h>
-
-#include <dev/bhnd/cores/pmu/bhnd_pmureg.h>
-
-#include "bhnd_usbvar.h"
-
-/****************************** Variables ************************************/
-static const struct bhnd_device bhnd_usb_devs[] = {
- BHND_DEVICE(BCM, USB20H, "USB2.0 Host core", NULL),
- BHND_DEVICE_END
-};
-
-/****************************** Prototypes ***********************************/
-
-static int bhnd_usb_attach(device_t);
-static int bhnd_usb_probe(device_t);
-static device_t bhnd_usb_add_child(device_t dev, u_int order, const char *name,
- int unit);
-static int bhnd_usb_print_all_resources(device_t dev);
-static int bhnd_usb_print_child(device_t bus, device_t child);
-
-static struct resource * bhnd_usb_alloc_resource(device_t bus,
- device_t child, int type, int *rid,
- rman_res_t start, rman_res_t end,
- rman_res_t count, u_int flags);
-static int bhnd_usb_release_resource(device_t dev,
- device_t child, struct resource *r);
-
-static struct resource_list * bhnd_usb_get_reslist(device_t dev,
- device_t child);
-
-static int
-bhnd_usb_probe(device_t dev)
-{
- const struct bhnd_device *id;
-
- id = bhnd_device_lookup(dev, bhnd_usb_devs, sizeof(bhnd_usb_devs[0]));
- if (id == NULL)
- return (ENXIO);
-
- device_set_desc(dev, id->desc);
- return (BUS_PROBE_DEFAULT);
-}
-
-static int
-bhnd_usb_attach(device_t dev)
-{
- struct bhnd_usb_softc *sc;
- int rid;
- uint32_t tmp;
- int tries, err;
-
- sc = device_get_softc(dev);
-
- bhnd_reset_hw(dev, 0, 0);
-
- /*
- * Allocate the resources which the parent bus has already
- * determined for us.
- * XXX: There are few windows (usually 2), RID should be chip-specific
- */
- rid = 0;
- sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
- if (sc->sc_mem == NULL) {
- BHND_ERROR_DEV(dev, "unable to allocate memory");
- return (ENXIO);
- }
-
- sc->sc_bt = rman_get_bustag(sc->sc_mem);
- sc->sc_bh = rman_get_bushandle(sc->sc_mem);
- sc->sc_maddr = rman_get_start(sc->sc_mem);
- sc->sc_msize = rman_get_size(sc->sc_mem);
-
- rid = 0;
- sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
- if (sc->sc_irq == NULL) {
- BHND_ERROR_DEV(dev, "unable to allocate IRQ");
- return (ENXIO);
- }
-
- sc->sc_irqn = rman_get_start(sc->sc_irq);
-
- sc->mem_rman.rm_start = sc->sc_maddr;
- sc->mem_rman.rm_end = sc->sc_maddr + sc->sc_msize - 1;
- sc->mem_rman.rm_type = RMAN_ARRAY;
- sc->mem_rman.rm_descr = "BHND USB core I/O memory addresses";
- if (rman_init(&sc->mem_rman) != 0 ||
- rman_manage_region(&sc->mem_rman, sc->mem_rman.rm_start,
- sc->mem_rman.rm_end) != 0) {
- panic("%s: sc->mem_rman", __func__);
- }
-
- /* TODO: macros for registers */
- bus_write_4(sc->sc_mem, 0x200, 0x7ff);
- DELAY(100);
-
-#define OHCI_CONTROL 0x04
- bus_write_4(sc->sc_mem, OHCI_CONTROL, 0);
-
- if ( bhnd_get_device(dev) == BHND_COREID_USB20H) {
- uint32_t rev = bhnd_get_hwrev(dev);
- BHND_INFO_DEV(dev, "USB HOST 2.0 setup for rev %d", rev);
- if (rev == 1/* ? == 2 */) {
- /* SiBa code */
-
- /* Change Flush control reg */
- tmp = bus_read_4(sc->sc_mem, 0x400) & ~0x8;
- bus_write_4(sc->sc_mem, 0x400, tmp);
- tmp = bus_read_4(sc->sc_mem, 0x400);
- BHND_DEBUG_DEV(dev, "USB20H fcr: 0x%x", tmp);
-
- /* Change Shim control reg */
- tmp = bus_read_4(sc->sc_mem, 0x304) & ~0x100;
- bus_write_4(sc->sc_mem, 0x304, tmp);
- tmp = bus_read_4(sc->sc_mem, 0x304);
- BHND_DEBUG_DEV(dev, "USB20H shim: 0x%x", tmp);
- } else if (rev >= 5) {
- /* BCMA code */
- err = bhnd_alloc_pmu(dev);
- if(err) {
- BHND_ERROR_DEV(dev, "can't alloc pmu: %d", err);
- return (err);
- }
-
- err = bhnd_request_ext_rsrc(dev, 1);
- if(err) {
- BHND_ERROR_DEV(dev, "can't req ext: %d", err);
- return (err);
- }
- /* Take out of resets */
- bus_write_4(sc->sc_mem, 0x200, 0x4ff);
- DELAY(25);
- bus_write_4(sc->sc_mem, 0x200, 0x6ff);
- DELAY(25);
-
- /* Make sure digital and AFE are locked in USB PHY */
- bus_write_4(sc->sc_mem, 0x524, 0x6b);
- DELAY(50);
- bus_read_4(sc->sc_mem, 0x524);
- DELAY(50);
- bus_write_4(sc->sc_mem, 0x524, 0xab);
- DELAY(50);
- bus_read_4(sc->sc_mem, 0x524);
- DELAY(50);
- bus_write_4(sc->sc_mem, 0x524, 0x2b);
- DELAY(50);
- bus_read_4(sc->sc_mem, 0x524);
- DELAY(50);
- bus_write_4(sc->sc_mem, 0x524, 0x10ab);
- DELAY(50);
- bus_read_4(sc->sc_mem, 0x524);
-
- tries = 10000;
- for (;;) {
- DELAY(10);
- tmp = bus_read_4(sc->sc_mem, 0x528);
- if (tmp & 0xc000)
- break;
- if (--tries != 0)
- continue;
-
- tmp = bus_read_4(sc->sc_mem, 0x528);
- BHND_ERROR_DEV(dev, "USB20H mdio_rddata 0x%08x", tmp);
- }
-
- /* XXX: Puzzle code */
- bus_write_4(sc->sc_mem, 0x528, 0x80000000);
- bus_read_4(sc->sc_mem, 0x314);
- DELAY(265);
- bus_write_4(sc->sc_mem, 0x200, 0x7ff);
- DELAY(10);
-
- /* Take USB and HSIC out of non-driving modes */
- bus_write_4(sc->sc_mem, 0x510, 0);
- }
- }
-
- bus_generic_probe(dev);
-
- if (bhnd_get_device(dev) == BHND_COREID_USB20H &&
- ( bhnd_get_hwrev(dev) > 0))
- bhnd_usb_add_child(dev, 0, "ehci", -1);
- bhnd_usb_add_child(dev, 1, "ohci", -1);
-
- bus_generic_attach(dev);
-
- return (0);
-}
-
-static struct rman *
-bhnd_usb_get_rman(device_t bus, int type, u_int flags)
-{
- struct bhnd_usb_softc *sc = device_get_softc(bus);
-
- switch (type) {
- case SYS_RES_MEMORY:
- return (&sc->sc_mem_rman);
- default:
- return (NULL);
- }
-}
-
-static struct resource *
-bhnd_usb_alloc_resource(device_t bus, device_t child, int type, int *rid,
- rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
-{
- struct resource *rv;
- struct resource_list *rl;
- struct resource_list_entry *rle;
- int passthrough, isdefault;
-
- isdefault = RMAN_IS_DEFAULT_RANGE(start,end);
- passthrough = (device_get_parent(child) != bus);
- rle = NULL;
-
- if (!passthrough && isdefault) {
- BHND_INFO_DEV(bus, "trying allocate def %d - %d for %s", type,
- *rid, device_get_nameunit(child) );
-
- rl = BUS_GET_RESOURCE_LIST(bus, child);
- rle = resource_list_find(rl, type, *rid);
- if (rle == NULL)
- return (NULL);
- if (rle->res != NULL)
- panic("%s: resource entry is busy", __func__);
- start = rle->start;
- end = rle->end;
- count = rle->count;
- } else {
- BHND_INFO_DEV(bus, "trying allocate %d - %d (%jx-%jx) for %s", type,
- *rid, start, end, device_get_nameunit(child) );
- }
-
- /*
- * If the request is for a resource which we manage,
- * attempt to satisfy the allocation ourselves.
- */
- if (type == SYS_RES_MEMORY) {
- rv = bus_generic_rman_alloc_resource(bus, child, type, rid,
- start, end, count, flags);
- if (rv == NULL) {
- BHND_ERROR_DEV(bus, "could not allocate resource");
- return (NULL);
- }
-
- return (rv);
- }
-
- /*
- * Pass the request to the parent.
- */
- return (bus_generic_rl_alloc_resource(bus, child, type, rid, start, end,
- count, flags));
-}
-
-static struct resource_list *
-bhnd_usb_get_reslist(device_t dev, device_t child)
-{
- struct bhnd_usb_devinfo *sdi;
-
- sdi = device_get_ivars(child);
-
- return (&sdi->sdi_rl);
-}
-
-static int
-bhnd_usb_release_resource(device_t dev, device_t child,
- struct resource *r)
-{
- struct bhnd_usb_softc *sc;
- struct resource_list_entry *rle;
- bool passthrough;
- int error;
-
- sc = device_get_softc(dev);
- passthrough = (device_get_parent(child) != dev);
-
- /* Delegate to our parent device's bus if the requested resource type
- * isn't handled locally. */
- if (type != SYS_RES_MEMORY) {
- return (bus_generic_rl_release_resource(dev, child, r));
- }
-
- error = bus_generic_rman_release_resource(dev, child, r);
- if (error != 0)
- return (error);
-
- if (!passthrough) {
- /* Clean resource list entry */
- rle = resource_list_find(BUS_GET_RESOURCE_LIST(dev, child),
- rman_get_type(r), rman_get_rid(r));
- if (rle != NULL)
- rle->res = NULL;
- }
-
- return (0);
-}
-
-static int
-bhnd_usb_activate_resource(device_t dev, device_t child, struct resource *r)
-{
- if (type != SYS_RES_MEMORY)
- return (bus_generic_activate_resource(dev, child, r));
- return (bus_generic_rman_activate_resource(dev, child, r));
-}
-
-static int
-bhnd_usb_deactivate_resource(device_t dev, device_t child, struct resource *r)
-{
- if (type != SYS_RES_MEMORY)
- return (bus_generic_deactivate_resource(dev, child, r));
- return (bus_generic_rman_deactivate_resource(dev, child, r));
-}
-
-static int
-bhnd_usb_map_resource(device_t dev, device_t child, int type,
- struct resource *r, struct resource_map_request *argsp,
- struct resource_map *map)
-{
- struct bhnd_usb_softc *sc = device_get_softc(dev);
- struct resource_map_request args;
- rman_res_t length, start;
- int error;
-
- if (type != SYS_RES_MEMORY)
- return (bus_generic_map_resource(dev, child, type, r, argsp,
- map));
-
- /* Resources must be active to be mapped. */
- if (!(rman_get_flags(r) & RF_ACTIVE))
- return (ENXIO);
-
- resource_init_map_request(&args);
- error = resource_validate_map_request(r, argsp, &args, &start, &length);
- if (error)
- return (error);
-
- args.offset = start - rman_get_start(sc->sc_mem);
- args.length = length;
- return (bus_generic_map_resource(dev, child, type, sc->sc_mem, &args,
- map));
-}
-
-static int
-bhnd_usb_unmap_resource(device_t dev, device_t child, int type,
- struct resource *r, struct resource_map *map)
-{
- struct bhnd_usb_softc *sc = device_get_softc(dev);
-
- if (type == SYS_RES_MEMORY)
- r = sc->sc_mem;
- return (bus_generic_unmap_resource(dev, child, type, r, map));
-}
-
-static int
-bhnd_usb_print_all_resources(device_t dev)
-{
- struct bhnd_usb_devinfo *sdi;
- struct resource_list *rl;
- int retval;
-
- retval = 0;
- sdi = device_get_ivars(dev);
- rl = &sdi->sdi_rl;
-
- if (STAILQ_FIRST(rl))
- retval += printf(" at");
-
- retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%jx");
- retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
-
- return (retval);
-}
-
-static int
-bhnd_usb_print_child(device_t bus, device_t child)
-{
- int retval = 0;
-
- retval += bus_print_child_header(bus, child);
- retval += bhnd_usb_print_all_resources(child);
- if (device_get_flags(child))
- retval += printf(" flags %#x", device_get_flags(child));
- retval += printf(" on %s\n", device_get_nameunit(bus));
-
- return (retval);
-}
-
-static device_t
-bhnd_usb_add_child(device_t dev, u_int order, const char *name, int unit)
-{
- struct bhnd_usb_softc *sc;
- struct bhnd_usb_devinfo *sdi;
- device_t child;
- int error;
-
- sc = device_get_softc(dev);
-
- sdi = malloc(sizeof(struct bhnd_usb_devinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
- if (sdi == NULL)
- return (NULL);
-
- resource_list_init(&sdi->sdi_rl);
- sdi->sdi_irq_mapped = false;
-
- if (strncmp(name, "ohci", 4) == 0)
- {
- sdi->sdi_maddr = sc->sc_maddr + 0x000;
- sdi->sdi_msize = 0x200;
- }
- else if (strncmp(name, "ehci", 4) == 0)
- {
- sdi->sdi_maddr = sc->sc_maddr + 0x000;
- sdi->sdi_msize = 0x1000;
- }
- else
- {
- panic("Unknown subdevice");
- }
-
- /* Map the child's IRQ */
- if ((error = bhnd_map_intr(dev, 0, &sdi->sdi_irq))) {
- BHND_ERROR_DEV(dev, "could not map %s interrupt: %d", name,
- error);
- goto failed;
- }
- sdi->sdi_irq_mapped = true;
-
- BHND_INFO_DEV(dev, "%s: irq=%ju maddr=0x%jx", name, sdi->sdi_irq,
- sdi->sdi_maddr);
-
- /*
- * Add memory window and irq to child's resource list.
- */
- resource_list_add(&sdi->sdi_rl, SYS_RES_MEMORY, 0, sdi->sdi_maddr,
- sdi->sdi_maddr + sdi->sdi_msize - 1, sdi->sdi_msize);
-
- resource_list_add(&sdi->sdi_rl, SYS_RES_IRQ, 0, sdi->sdi_irq,
- sdi->sdi_irq, 1);
-
- child = device_add_child_ordered(dev, order, name, unit);
- if (child == NULL) {
- BHND_ERROR_DEV(dev, "could not add %s", name);
- goto failed;
- }
-
- device_set_ivars(child, sdi);
- return (child);
-
-failed:
- if (sdi->sdi_irq_mapped)
- bhnd_unmap_intr(dev, sdi->sdi_irq);
-
- resource_list_free(&sdi->sdi_rl);
-
- free(sdi, M_DEVBUF);
- return (NULL);
-}
-
-static void
-bhnd_usb_child_deleted(device_t dev, device_t child)
-{
- struct bhnd_usb_devinfo *dinfo;
-
- if ((dinfo = device_get_ivars(child)) == NULL)
- return;
-
- if (dinfo->sdi_irq_mapped)
- bhnd_unmap_intr(dev, dinfo->sdi_irq);
-
- resource_list_free(&dinfo->sdi_rl);
- free(dinfo, M_DEVBUF);
-}
-
-static device_method_t bhnd_usb_methods[] = {
- /* Device interface */
- DEVMETHOD(device_attach, bhnd_usb_attach),
- DEVMETHOD(device_probe, bhnd_usb_probe),
-
- /* Bus interface */
- DEVMETHOD(bus_add_child, bhnd_usb_add_child),
- DEVMETHOD(bus_child_deleted, bhnd_usb_child_deleted),
- DEVMETHOD(bus_alloc_resource, bhnd_usb_alloc_resource),
- DEVMETHOD(bus_get_resource_list, bhnd_usb_get_reslist),
- DEVMETHOD(bus_get_rman, bhnd_usb_get_rman),
- DEVMETHOD(bus_print_child, bhnd_usb_print_child),
- DEVMETHOD(bus_release_resource, bhnd_usb_release_resource),
- DEVMETHOD(bus_activate_resource, bhnd_usb_activate_resource),
- DEVMETHOD(bus_deactivate_resource, bhnd_usb_deactivate_resource),
- DEVMETHOD(bus_map_resource, bhnd_usb_map_resource),
- DEVMETHOD(bus_unmap_resource, bhnd_usb_unmap_resource),
- /* Bus interface: generic part */
- DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
- DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
-
- DEVMETHOD_END
-};
-
-DEFINE_CLASS_0(bhnd_usb, bhnd_usb_driver, bhnd_usb_methods,
- sizeof(struct bhnd_usb_softc));
-DRIVER_MODULE(bhnd_usb, bhnd, bhnd_usb_driver, 0, 0);
-
-MODULE_VERSION(bhnd_usb, 1);
diff --git a/sys/dev/bhnd/nvram/bhnd_nvram_private.h b/sys/dev/bhnd/nvram/bhnd_nvram_private.h
index 8513039c2c7c..bbf646ff1806 100644
--- a/sys/dev/bhnd/nvram/bhnd_nvram_private.h
+++ b/sys/dev/bhnd/nvram/bhnd_nvram_private.h
@@ -39,8 +39,7 @@
#ifdef _KERNEL
#include <sys/malloc.h>
-
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#else
#include <stdarg.h>
#include <stdbool.h>
diff --git a/sys/dev/bhnd/nvram/bhnd_nvram_value.h b/sys/dev/bhnd/nvram/bhnd_nvram_value.h
index 3e7e94d9c81d..f5c4e1e70b90 100644
--- a/sys/dev/bhnd/nvram/bhnd_nvram_value.h
+++ b/sys/dev/bhnd/nvram/bhnd_nvram_value.h
@@ -34,7 +34,7 @@
#include <sys/refcount.h>
#ifdef _KERNEL
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#else /* !_KERNEL */
#include <stdarg.h>
#endif /* _KERNEL */
diff --git a/sys/dev/bhnd/siba/siba.c b/sys/dev/bhnd/siba/siba.c
index 2688f3415aa5..a8e83f843506 100644
--- a/sys/dev/bhnd/siba/siba.c
+++ b/sys/dev/bhnd/siba/siba.c
@@ -85,7 +85,6 @@ siba_attach(device_t dev)
/* Enumerate children */
if ((error = siba_add_children(dev))) {
- device_delete_children(dev);
SIBA_LOCK_DESTROY(sc);
return (error);
}
@@ -1318,7 +1317,7 @@ siba_add_children(device_t dev)
goto failed;
/* Add the child device */
- child = BUS_ADD_CHILD(dev, 0, NULL, -1);
+ child = BUS_ADD_CHILD(dev, 0, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
error = ENXIO;
goto failed;
@@ -1373,12 +1372,7 @@ siba_add_children(device_t dev)
return (0);
failed:
- for (u_int i = 0; i < cid->ncores; i++) {
- if (children[i] == NULL)
- continue;
-
- device_delete_child(dev, children[i]);
- }
+ device_delete_children(dev);
free(cores, M_BHND);
free(children, M_BHND);
diff --git a/sys/dev/bnxt/bnxt.h b/sys/dev/bnxt/bnxt.h
deleted file mode 100644
index 0547bae91e09..000000000000
--- a/sys/dev/bnxt/bnxt.h
+++ /dev/null
@@ -1,848 +0,0 @@
-/*-
- * Broadcom NetXtreme-C/E network driver.
- *
- * Copyright (c) 2016 Broadcom, All Rights Reserved.
- * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <sys/cdefs.h>
-#ifndef _BNXT_H
-#define _BNXT_H
-
-#include <sys/param.h>
-#include <sys/socket.h>
-#include <sys/sysctl.h>
-#include <sys/taskqueue.h>
-#include <sys/bitstring.h>
-
-#include <machine/bus.h>
-
-#include <net/ethernet.h>
-#include <net/if.h>
-#include <net/if_var.h>
-#include <net/iflib.h>
-
-#include "hsi_struct_def.h"
-
-/* PCI IDs */
-#define BROADCOM_VENDOR_ID 0x14E4
-
-#define BCM57301 0x16c8
-#define BCM57302 0x16c9
-#define BCM57304 0x16ca
-#define BCM57311 0x16ce
-#define BCM57312 0x16cf
-#define BCM57314 0x16df
-#define BCM57402 0x16d0
-#define BCM57402_NPAR 0x16d4
-#define BCM57404 0x16d1
-#define BCM57404_NPAR 0x16e7
-#define BCM57406 0x16d2
-#define BCM57406_NPAR 0x16e8
-#define BCM57407 0x16d5
-#define BCM57407_NPAR 0x16ea
-#define BCM57407_SFP 0x16e9
-#define BCM57412 0x16d6
-#define BCM57412_NPAR1 0x16de
-#define BCM57412_NPAR2 0x16eb
-#define BCM57414 0x16d7
-#define BCM57414_NPAR1 0x16ec
-#define BCM57414_NPAR2 0x16ed
-#define BCM57416 0x16d8
-#define BCM57416_NPAR1 0x16ee
-#define BCM57416_NPAR2 0x16ef
-#define BCM57416_SFP 0x16e3
-#define BCM57417 0x16d9
-#define BCM57417_NPAR1 0x16c0
-#define BCM57417_NPAR2 0x16cc
-#define BCM57417_SFP 0x16e2
-#define BCM57454 0x1614
-#define BCM58700 0x16cd
-#define BCM57508 0x1750
-#define BCM57504 0x1751
-#define BCM57502 0x1752
-#define NETXTREME_C_VF1 0x16cb
-#define NETXTREME_C_VF2 0x16e1
-#define NETXTREME_C_VF3 0x16e5
-#define NETXTREME_E_VF1 0x16c1
-#define NETXTREME_E_VF2 0x16d3
-#define NETXTREME_E_VF3 0x16dc
-
-/* Maximum numbers of RX and TX descriptors. iflib requires this to be a power
- * of two. The hardware has no particular limitation. */
-#define BNXT_MAX_RXD ((INT32_MAX >> 1) + 1)
-#define BNXT_MAX_TXD ((INT32_MAX >> 1) + 1)
-
-#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
- CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
- CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
-
-#define BNXT_MAX_MTU 9600
-
-#define BNXT_RSS_HASH_TYPE_TCPV4 0
-#define BNXT_RSS_HASH_TYPE_UDPV4 1
-#define BNXT_RSS_HASH_TYPE_IPV4 2
-#define BNXT_RSS_HASH_TYPE_TCPV6 3
-#define BNXT_RSS_HASH_TYPE_UDPV6 4
-#define BNXT_RSS_HASH_TYPE_IPV6 5
-#define BNXT_GET_RSS_PROFILE_ID(rss_hash_type) ((rss_hash_type >> 1) & 0x1F)
-
-#define BNXT_NO_MORE_WOL_FILTERS 0xFFFF
-#define bnxt_wol_supported(softc) (!((softc)->flags & BNXT_FLAG_VF) && \
- ((softc)->flags & BNXT_FLAG_WOL_CAP ))
-
-/* 64-bit doorbell */
-#define DBR_INDEX_MASK 0x0000000000ffffffULL
-#define DBR_PI_LO_MASK 0xff000000UL
-#define DBR_PI_LO_SFT 24
-#define DBR_XID_MASK 0x000fffff00000000ULL
-#define DBR_XID_SFT 32
-#define DBR_PI_HI_MASK 0xf0000000000000ULL
-#define DBR_PI_HI_SFT 52
-#define DBR_PATH_L2 (0x1ULL << 56)
-#define DBR_VALID (0x1ULL << 58)
-#define DBR_TYPE_SQ (0x0ULL << 60)
-#define DBR_TYPE_RQ (0x1ULL << 60)
-#define DBR_TYPE_SRQ (0x2ULL << 60)
-#define DBR_TYPE_SRQ_ARM (0x3ULL << 60)
-#define DBR_TYPE_CQ (0x4ULL << 60)
-#define DBR_TYPE_CQ_ARMSE (0x5ULL << 60)
-#define DBR_TYPE_CQ_ARMALL (0x6ULL << 60)
-#define DBR_TYPE_CQ_ARMENA (0x7ULL << 60)
-#define DBR_TYPE_SRQ_ARMENA (0x8ULL << 60)
-#define DBR_TYPE_CQ_CUTOFF_ACK (0x9ULL << 60)
-#define DBR_TYPE_NQ (0xaULL << 60)
-#define DBR_TYPE_NQ_ARM (0xbULL << 60)
-#define DBR_TYPE_PUSH_START (0xcULL << 60)
-#define DBR_TYPE_PUSH_END (0xdULL << 60)
-#define DBR_TYPE_NULL (0xfULL << 60)
-
-#define BNXT_MAX_NUM_QUEUES 32
-
-/* Completion related defines */
-#define CMP_VALID(cmp, v_bit) \
- ((!!(((struct cmpl_base *)(cmp))->info3_v & htole32(CMPL_BASE_V))) == !!(v_bit) )
-
-/* Chip class phase 5 */
-#define BNXT_CHIP_P5(sc) ((softc->flags & BNXT_FLAG_CHIP_P5))
-
-#define DB_PF_OFFSET_P5 0x10000
-#define NQ_VALID(cmp, v_bit) \
- ((!!(((nq_cn_t *)(cmp))->v & htole32(NQ_CN_V))) == !!(v_bit) )
-
-#ifndef DIV_ROUND_UP
-#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
-#endif
-#ifndef roundup
-#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
-#endif
-
-#define NEXT_CP_CONS_V(ring, cons, v_bit) do { \
- if (__predict_false(++(cons) == (ring)->ring_size)) \
- ((cons) = 0, (v_bit) = !v_bit); \
-} while (0)
-
-#define RING_NEXT(ring, idx) (__predict_false(idx + 1 == (ring)->ring_size) ? \
- 0 : idx + 1)
-
-#define CMPL_PREFETCH_NEXT(cpr, idx) \
- __builtin_prefetch(&((struct cmpl_base *)(cpr)->ring.vaddr)[((idx) +\
- (CACHE_LINE_SIZE / sizeof(struct cmpl_base))) & \
- ((cpr)->ring.ring_size - 1)])
-
-/* Lock macros */
-#define BNXT_HWRM_LOCK_INIT(_softc, _name) \
- mtx_init(&(_softc)->hwrm_lock, _name, "BNXT HWRM Lock", MTX_DEF)
-#define BNXT_HWRM_LOCK(_softc) mtx_lock(&(_softc)->hwrm_lock)
-#define BNXT_HWRM_UNLOCK(_softc) mtx_unlock(&(_softc)->hwrm_lock)
-#define BNXT_HWRM_LOCK_DESTROY(_softc) mtx_destroy(&(_softc)->hwrm_lock)
-#define BNXT_HWRM_LOCK_ASSERT(_softc) mtx_assert(&(_softc)->hwrm_lock, \
- MA_OWNED)
-#define BNXT_IS_FLOW_CTRL_CHANGED(link_info) \
- ((link_info->last_flow_ctrl.tx != link_info->flow_ctrl.tx) || \
- (link_info->last_flow_ctrl.rx != link_info->flow_ctrl.rx) || \
- (link_info->last_flow_ctrl.autoneg != link_info->flow_ctrl.autoneg))
-
-/* Chip info */
-#define BNXT_TSO_SIZE UINT16_MAX
-
-#define min_t(type, x, y) ({ \
- type __min1 = (x); \
- type __min2 = (y); \
- __min1 < __min2 ? __min1 : __min2; })
-
-#define max_t(type, x, y) ({ \
- type __max1 = (x); \
- type __max2 = (y); \
- __max1 > __max2 ? __max1 : __max2; })
-
-#define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max)
-
-#define BNXT_IFMEDIA_ADD(supported, fw_speed, ifm_speed) do { \
- if ((supported) & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_ ## fw_speed) \
- ifmedia_add(softc->media, IFM_ETHER | (ifm_speed), 0, NULL); \
-} while(0)
-
-#define BNXT_MIN_FRAME_SIZE 52 /* Frames must be padded to this size for some A0 chips */
-
-extern const char bnxt_driver_version[];
-typedef void (*bnxt_doorbell_tx)(void *, uint16_t idx);
-typedef void (*bnxt_doorbell_rx)(void *, uint16_t idx);
-typedef void (*bnxt_doorbell_rx_cq)(void *, bool);
-typedef void (*bnxt_doorbell_tx_cq)(void *, bool);
-typedef void (*bnxt_doorbell_nq)(void *, bool);
-
-typedef struct bnxt_doorbell_ops {
- bnxt_doorbell_tx bnxt_db_tx;
- bnxt_doorbell_rx bnxt_db_rx;
- bnxt_doorbell_rx_cq bnxt_db_rx_cq;
- bnxt_doorbell_tx_cq bnxt_db_tx_cq;
- bnxt_doorbell_nq bnxt_db_nq;
-} bnxt_dooorbell_ops_t;
-/* NVRAM access */
-enum bnxt_nvm_directory_type {
- BNX_DIR_TYPE_UNUSED = 0,
- BNX_DIR_TYPE_PKG_LOG = 1,
- BNX_DIR_TYPE_UPDATE = 2,
- BNX_DIR_TYPE_CHIMP_PATCH = 3,
- BNX_DIR_TYPE_BOOTCODE = 4,
- BNX_DIR_TYPE_VPD = 5,
- BNX_DIR_TYPE_EXP_ROM_MBA = 6,
- BNX_DIR_TYPE_AVS = 7,
- BNX_DIR_TYPE_PCIE = 8,
- BNX_DIR_TYPE_PORT_MACRO = 9,
- BNX_DIR_TYPE_APE_FW = 10,
- BNX_DIR_TYPE_APE_PATCH = 11,
- BNX_DIR_TYPE_KONG_FW = 12,
- BNX_DIR_TYPE_KONG_PATCH = 13,
- BNX_DIR_TYPE_BONO_FW = 14,
- BNX_DIR_TYPE_BONO_PATCH = 15,
- BNX_DIR_TYPE_TANG_FW = 16,
- BNX_DIR_TYPE_TANG_PATCH = 17,
- BNX_DIR_TYPE_BOOTCODE_2 = 18,
- BNX_DIR_TYPE_CCM = 19,
- BNX_DIR_TYPE_PCI_CFG = 20,
- BNX_DIR_TYPE_TSCF_UCODE = 21,
- BNX_DIR_TYPE_ISCSI_BOOT = 22,
- BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24,
- BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25,
- BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26,
- BNX_DIR_TYPE_EXT_PHY = 27,
- BNX_DIR_TYPE_SHARED_CFG = 40,
- BNX_DIR_TYPE_PORT_CFG = 41,
- BNX_DIR_TYPE_FUNC_CFG = 42,
- BNX_DIR_TYPE_MGMT_CFG = 48,
- BNX_DIR_TYPE_MGMT_DATA = 49,
- BNX_DIR_TYPE_MGMT_WEB_DATA = 50,
- BNX_DIR_TYPE_MGMT_WEB_META = 51,
- BNX_DIR_TYPE_MGMT_EVENT_LOG = 52,
- BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53
-};
-
-enum bnxnvm_pkglog_field_index {
- BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
- BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
- BNX_PKG_LOG_FIELD_IDX_PKG_VERSION = 2,
- BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP = 3,
- BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM = 4,
- BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS = 5,
- BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK = 6
-};
-
-#define BNX_DIR_ORDINAL_FIRST 0
-#define BNX_DIR_EXT_NONE 0
-
-struct bnxt_bar_info {
- struct resource *res;
- bus_space_tag_t tag;
- bus_space_handle_t handle;
- bus_size_t size;
- int rid;
-};
-
-struct bnxt_flow_ctrl {
- bool rx;
- bool tx;
- bool autoneg;
-};
-
-struct bnxt_link_info {
- uint8_t media_type;
- uint8_t transceiver;
- uint8_t phy_addr;
- uint8_t phy_link_status;
- uint8_t wire_speed;
- uint8_t loop_back;
- uint8_t link_up;
- uint8_t last_link_up;
- uint8_t duplex;
- uint8_t last_duplex;
- uint8_t last_phy_type;
- struct bnxt_flow_ctrl flow_ctrl;
- struct bnxt_flow_ctrl last_flow_ctrl;
- uint8_t duplex_setting;
- uint8_t auto_mode;
-#define PHY_VER_LEN 3
- uint8_t phy_ver[PHY_VER_LEN];
- uint8_t phy_type;
-#define BNXT_PHY_STATE_ENABLED 0
-#define BNXT_PHY_STATE_DISABLED 1
- uint8_t phy_state;
-
- uint16_t link_speed;
- uint16_t support_speeds;
- uint16_t support_pam4_speeds;
- uint16_t auto_link_speeds;
- uint16_t auto_pam4_link_speeds;
- uint16_t force_link_speed;
- uint16_t force_pam4_link_speed;
- bool force_pam4_speed_set_by_user;
-
- uint16_t advertising;
- uint16_t advertising_pam4;
-
- uint32_t preemphasis;
- uint16_t support_auto_speeds;
- uint16_t support_force_speeds;
- uint16_t support_pam4_auto_speeds;
- uint16_t support_pam4_force_speeds;
-#define BNXT_SIG_MODE_NRZ HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_NRZ
-#define BNXT_SIG_MODE_PAM4 HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4
- uint8_t req_signal_mode;
-
- uint8_t active_fec_sig_mode;
- uint8_t sig_mode;
-
- /* copy of requested setting */
- uint8_t autoneg;
-#define BNXT_AUTONEG_SPEED 1
-#define BNXT_AUTONEG_FLOW_CTRL 2
- uint8_t req_duplex;
- uint16_t req_link_speed;
- uint8_t module_status;
- struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
-};
-
-enum bnxt_phy_type {
- BNXT_MEDIA_CR = 0,
- BNXT_MEDIA_LR,
- BNXT_MEDIA_SR,
- BNXT_MEDIA_KR,
- BNXT_MEDIA_END
-};
-
-enum bnxt_cp_type {
- BNXT_DEFAULT,
- BNXT_TX,
- BNXT_RX,
- BNXT_SHARED
-};
-
-struct bnxt_cos_queue {
- uint8_t id;
- uint8_t profile;
-};
-
-struct bnxt_func_info {
- uint32_t fw_fid;
- uint8_t mac_addr[ETHER_ADDR_LEN];
- uint16_t max_rsscos_ctxs;
- uint16_t max_cp_rings;
- uint16_t max_tx_rings;
- uint16_t max_rx_rings;
- uint16_t max_hw_ring_grps;
- uint16_t max_irqs;
- uint16_t max_l2_ctxs;
- uint16_t max_vnics;
- uint16_t max_stat_ctxs;
-};
-
-struct bnxt_pf_info {
-#define BNXT_FIRST_PF_FID 1
-#define BNXT_FIRST_VF_FID 128
- uint8_t port_id;
- uint32_t first_vf_id;
- uint16_t active_vfs;
- uint16_t max_vfs;
- uint32_t max_encap_records;
- uint32_t max_decap_records;
- uint32_t max_tx_em_flows;
- uint32_t max_tx_wm_flows;
- uint32_t max_rx_em_flows;
- uint32_t max_rx_wm_flows;
- unsigned long *vf_event_bmap;
- uint16_t hwrm_cmd_req_pages;
- void *hwrm_cmd_req_addr[4];
- bus_addr_t hwrm_cmd_req_dma_addr[4];
-};
-
-struct bnxt_vf_info {
- uint16_t fw_fid;
- uint8_t mac_addr[ETHER_ADDR_LEN];
- uint16_t max_rsscos_ctxs;
- uint16_t max_cp_rings;
- uint16_t max_tx_rings;
- uint16_t max_rx_rings;
- uint16_t max_hw_ring_grps;
- uint16_t max_l2_ctxs;
- uint16_t max_irqs;
- uint16_t max_vnics;
- uint16_t max_stat_ctxs;
- uint32_t vlan;
-#define BNXT_VF_QOS 0x1
-#define BNXT_VF_SPOOFCHK 0x2
-#define BNXT_VF_LINK_FORCED 0x4
-#define BNXT_VF_LINK_UP 0x8
- uint32_t flags;
- uint32_t func_flags; /* func cfg flags */
- uint32_t min_tx_rate;
- uint32_t max_tx_rate;
- void *hwrm_cmd_req_addr;
- bus_addr_t hwrm_cmd_req_dma_addr;
-};
-
-#define BNXT_PF(softc) (!((softc)->flags & BNXT_FLAG_VF))
-#define BNXT_VF(softc) ((softc)->flags & BNXT_FLAG_VF)
-
-struct bnxt_vlan_tag {
- SLIST_ENTRY(bnxt_vlan_tag) next;
- uint64_t filter_id;
- uint16_t tag;
-};
-
-struct bnxt_vnic_info {
- uint16_t id;
- uint16_t def_ring_grp;
- uint16_t cos_rule;
- uint16_t lb_rule;
- uint16_t mru;
-
- uint32_t rx_mask;
- struct iflib_dma_info mc_list;
- int mc_list_count;
-#define BNXT_MAX_MC_ADDRS 16
-
- uint32_t flags;
-#define BNXT_VNIC_FLAG_DEFAULT 0x01
-#define BNXT_VNIC_FLAG_BD_STALL 0x02
-#define BNXT_VNIC_FLAG_VLAN_STRIP 0x04
-
- uint64_t filter_id;
-
- uint16_t rss_id;
- uint32_t rss_hash_type;
- uint8_t rss_hash_key[HW_HASH_KEY_SIZE];
- struct iflib_dma_info rss_hash_key_tbl;
- struct iflib_dma_info rss_grp_tbl;
- SLIST_HEAD(vlan_head, bnxt_vlan_tag) vlan_tags;
- struct iflib_dma_info vlan_tag_list;
-};
-
-struct bnxt_grp_info {
- uint16_t stats_ctx;
- uint16_t grp_id;
- uint16_t rx_ring_id;
- uint16_t cp_ring_id;
- uint16_t ag_ring_id;
-};
-
-struct bnxt_ring {
- uint64_t paddr;
- vm_offset_t doorbell;
- caddr_t vaddr;
- struct bnxt_softc *softc;
- uint32_t ring_size; /* Must be a power of two */
- uint16_t id; /* Logical ID */
- uint16_t phys_id;
- uint16_t idx;
- struct bnxt_full_tpa_start *tpa_start;
-};
-
-struct bnxt_cp_ring {
- struct bnxt_ring ring;
- struct if_irq irq;
- uint32_t cons;
- bool v_bit; /* Value of valid bit */
- struct ctx_hw_stats *stats;
- uint32_t stats_ctx_id;
- uint32_t last_idx; /* Used by RX rings only
- * set to the last read pidx
- */
- uint64_t int_count;
-};
-
-struct bnxt_full_tpa_start {
- struct rx_tpa_start_cmpl low;
- struct rx_tpa_start_cmpl_hi high;
-};
-
-/* All the version information for the part */
-#define BNXT_VERSTR_SIZE (3*3+2+1) /* ie: "255.255.255\0" */
-#define BNXT_NAME_SIZE 17
-#define FW_VER_STR_LEN 32
-#define BC_HWRM_STR_LEN 21
-struct bnxt_ver_info {
- uint8_t hwrm_if_major;
- uint8_t hwrm_if_minor;
- uint8_t hwrm_if_update;
- char hwrm_if_ver[BNXT_VERSTR_SIZE];
- char driver_hwrm_if_ver[BNXT_VERSTR_SIZE];
- char hwrm_fw_ver[BNXT_VERSTR_SIZE];
- char mgmt_fw_ver[BNXT_VERSTR_SIZE];
- char netctrl_fw_ver[BNXT_VERSTR_SIZE];
- char roce_fw_ver[BNXT_VERSTR_SIZE];
- char fw_ver_str[FW_VER_STR_LEN];
- char phy_ver[BNXT_VERSTR_SIZE];
- char pkg_ver[64];
-
- char hwrm_fw_name[BNXT_NAME_SIZE];
- char mgmt_fw_name[BNXT_NAME_SIZE];
- char netctrl_fw_name[BNXT_NAME_SIZE];
- char roce_fw_name[BNXT_NAME_SIZE];
- char phy_vendor[BNXT_NAME_SIZE];
- char phy_partnumber[BNXT_NAME_SIZE];
-
- uint16_t chip_num;
- uint8_t chip_rev;
- uint8_t chip_metal;
- uint8_t chip_bond_id;
- uint8_t chip_type;
-
- uint8_t hwrm_min_major;
- uint8_t hwrm_min_minor;
- uint8_t hwrm_min_update;
-
- struct sysctl_ctx_list ver_ctx;
- struct sysctl_oid *ver_oid;
-};
-
-struct bnxt_nvram_info {
- uint16_t mfg_id;
- uint16_t device_id;
- uint32_t sector_size;
- uint32_t size;
- uint32_t reserved_size;
- uint32_t available_size;
-
- struct sysctl_ctx_list nvm_ctx;
- struct sysctl_oid *nvm_oid;
-};
-
-struct bnxt_func_qcfg {
- uint16_t alloc_completion_rings;
- uint16_t alloc_tx_rings;
- uint16_t alloc_rx_rings;
- uint16_t alloc_vnics;
-};
-
-struct bnxt_hw_lro {
- uint16_t enable;
- uint16_t is_mode_gro;
- uint16_t max_agg_segs;
- uint16_t max_aggs;
- uint32_t min_agg_len;
-};
-
-/* The hardware supports certain page sizes. Use the supported page sizes
- * to allocate the rings.
- */
-#if (PAGE_SHIFT < 12)
-#define BNXT_PAGE_SHIFT 12
-#elif (PAGE_SHIFT <= 13)
-#define BNXT_PAGE_SHIFT PAGE_SHIFT
-#elif (PAGE_SHIFT < 16)
-#define BNXT_PAGE_SHIFT 13
-#else
-#define BNXT_PAGE_SHIFT 16
-#endif
-
-#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
-
-#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
-#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
-struct bnxt_ring_mem_info {
- int nr_pages;
- int page_size;
- uint16_t flags;
-#define BNXT_RMEM_VALID_PTE_FLAG 1
-#define BNXT_RMEM_RING_PTE_FLAG 2
-#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
- uint16_t depth;
- uint8_t init_val;
- struct iflib_dma_info *pg_arr;
- struct iflib_dma_info pg_tbl;
- int vmem_size;
- void **vmem;
-};
-
-struct bnxt_ctx_pg_info {
- uint32_t entries;
- uint32_t nr_pages;
- struct iflib_dma_info ctx_arr[MAX_CTX_PAGES];
- struct bnxt_ring_mem_info ring_mem;
- struct bnxt_ctx_pg_info **ctx_pg_tbl;
-};
-
-struct bnxt_ctx_mem_info {
- uint32_t qp_max_entries;
- uint16_t qp_min_qp1_entries;
- uint16_t qp_max_l2_entries;
- uint16_t qp_entry_size;
- uint16_t srq_max_l2_entries;
- uint32_t srq_max_entries;
- uint16_t srq_entry_size;
- uint16_t cq_max_l2_entries;
- uint32_t cq_max_entries;
- uint16_t cq_entry_size;
- uint16_t vnic_max_vnic_entries;
- uint16_t vnic_max_ring_table_entries;
- uint16_t vnic_entry_size;
- uint32_t stat_max_entries;
- uint16_t stat_entry_size;
- uint16_t tqm_entry_size;
- uint32_t tqm_min_entries_per_ring;
- uint32_t tqm_max_entries_per_ring;
- uint32_t mrav_max_entries;
- uint16_t mrav_entry_size;
- uint16_t tim_entry_size;
- uint32_t tim_max_entries;
- uint8_t tqm_entries_multiple;
- uint8_t ctx_kind_initializer;
-
- uint32_t flags;
- #define BNXT_CTX_FLAG_INITED 0x01
-
- struct bnxt_ctx_pg_info qp_mem;
- struct bnxt_ctx_pg_info srq_mem;
- struct bnxt_ctx_pg_info cq_mem;
- struct bnxt_ctx_pg_info vnic_mem;
- struct bnxt_ctx_pg_info stat_mem;
- struct bnxt_ctx_pg_info mrav_mem;
- struct bnxt_ctx_pg_info tim_mem;
- struct bnxt_ctx_pg_info *tqm_mem[9];
-};
-
-struct bnxt_hw_resc {
- uint16_t min_rsscos_ctxs;
- uint16_t max_rsscos_ctxs;
- uint16_t min_cp_rings;
- uint16_t max_cp_rings;
- uint16_t resv_cp_rings;
- uint16_t min_tx_rings;
- uint16_t max_tx_rings;
- uint16_t resv_tx_rings;
- uint16_t max_tx_sch_inputs;
- uint16_t min_rx_rings;
- uint16_t max_rx_rings;
- uint16_t resv_rx_rings;
- uint16_t min_hw_ring_grps;
- uint16_t max_hw_ring_grps;
- uint16_t resv_hw_ring_grps;
- uint16_t min_l2_ctxs;
- uint16_t max_l2_ctxs;
- uint16_t min_vnics;
- uint16_t max_vnics;
- uint16_t resv_vnics;
- uint16_t min_stat_ctxs;
- uint16_t max_stat_ctxs;
- uint16_t resv_stat_ctxs;
- uint16_t max_nqs;
- uint16_t max_irqs;
- uint16_t resv_irqs;
-};
-
-#define BNXT_LLQ(q_profile) \
- ((q_profile) == HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE)
-#define BNXT_CNPQ(q_profile) \
- ((q_profile) == HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP)
-
-#define BNXT_HWRM_MAX_REQ_LEN (softc->hwrm_max_req_len)
-
-struct bnxt_softc_list {
- SLIST_ENTRY(bnxt_softc_list) next;
- struct bnxt_softc *softc;
-};
-
-struct bnxt_softc {
- device_t dev;
- if_ctx_t ctx;
- if_softc_ctx_t scctx;
- if_shared_ctx_t sctx;
- uint32_t domain;
- uint32_t bus;
- uint32_t slot;
- uint32_t function;
- uint32_t dev_fn;
- struct ifmedia *media;
- struct bnxt_ctx_mem_info *ctx_mem;
- struct bnxt_hw_resc hw_resc;
- struct bnxt_softc_list list;
-
- struct bnxt_bar_info hwrm_bar;
- struct bnxt_bar_info doorbell_bar;
- struct bnxt_link_info link_info;
-#define BNXT_FLAG_VF 0x0001
-#define BNXT_FLAG_NPAR 0x0002
-#define BNXT_FLAG_WOL_CAP 0x0004
-#define BNXT_FLAG_SHORT_CMD 0x0008
-#define BNXT_FLAG_FW_CAP_NEW_RM 0x0010
-#define BNXT_FLAG_CHIP_P5 0x0020
-#define BNXT_FLAG_TPA 0x0040
-#define BNXT_FLAG_FW_CAP_EXT_STATS 0x0080
- uint32_t flags;
-#define BNXT_STATE_LINK_CHANGE (0)
-#define BNXT_STATE_MAX (BNXT_STATE_LINK_CHANGE + 1)
- bitstr_t *state_bv;
- uint32_t total_msix;
-
- struct bnxt_func_info func;
- struct bnxt_func_qcfg fn_qcfg;
- struct bnxt_pf_info pf;
- struct bnxt_vf_info vf;
-
- uint16_t hwrm_cmd_seq;
- uint32_t hwrm_cmd_timeo; /* milliseconds */
- struct iflib_dma_info hwrm_cmd_resp;
- struct iflib_dma_info hwrm_short_cmd_req_addr;
- /* Interrupt info for HWRM */
- struct if_irq irq;
- struct mtx hwrm_lock;
- uint16_t hwrm_max_req_len;
- uint16_t hwrm_max_ext_req_len;
- uint32_t hwrm_spec_code;
-
-#define BNXT_MAX_COS_QUEUE 8
- uint8_t max_tc;
- uint8_t max_lltc; /* lossless TCs */
- struct bnxt_cos_queue q_info[BNXT_MAX_COS_QUEUE];
- uint8_t tc_to_qidx[BNXT_MAX_COS_QUEUE];
- uint8_t q_ids[BNXT_MAX_COS_QUEUE];
- uint8_t max_q;
-
- uint64_t admin_ticks;
- struct iflib_dma_info hw_rx_port_stats;
- struct iflib_dma_info hw_tx_port_stats;
- struct rx_port_stats *rx_port_stats;
- struct tx_port_stats *tx_port_stats;
-
- struct iflib_dma_info hw_tx_port_stats_ext;
- struct iflib_dma_info hw_rx_port_stats_ext;
- struct tx_port_stats_ext *tx_port_stats_ext;
- struct rx_port_stats_ext *rx_port_stats_ext;
-
- int num_cp_rings;
-
- struct bnxt_cp_ring *nq_rings;
-
- struct bnxt_ring *tx_rings;
- struct bnxt_cp_ring *tx_cp_rings;
- struct iflib_dma_info tx_stats[BNXT_MAX_NUM_QUEUES];
- int ntxqsets;
-
- struct bnxt_vnic_info vnic_info;
- struct bnxt_ring *ag_rings;
- struct bnxt_ring *rx_rings;
- struct bnxt_cp_ring *rx_cp_rings;
- struct bnxt_grp_info *grp_info;
- struct iflib_dma_info rx_stats[BNXT_MAX_NUM_QUEUES];
- int nrxqsets;
- uint16_t rx_buf_size;
-
- struct bnxt_cp_ring def_cp_ring;
- struct bnxt_cp_ring def_nq_ring;
- struct iflib_dma_info def_cp_ring_mem;
- struct iflib_dma_info def_nq_ring_mem;
- struct grouptask def_cp_task;
- struct bnxt_doorbell_ops db_ops;
-
- struct sysctl_ctx_list hw_stats;
- struct sysctl_oid *hw_stats_oid;
- struct sysctl_ctx_list hw_lro_ctx;
- struct sysctl_oid *hw_lro_oid;
- struct sysctl_ctx_list flow_ctrl_ctx;
- struct sysctl_oid *flow_ctrl_oid;
-
- struct bnxt_ver_info *ver_info;
- struct bnxt_nvram_info *nvm_info;
- bool wol;
- bool is_dev_init;
- struct bnxt_hw_lro hw_lro;
- uint8_t wol_filter_id;
- uint16_t rx_coal_usecs;
- uint16_t rx_coal_usecs_irq;
- uint16_t rx_coal_frames;
- uint16_t rx_coal_frames_irq;
- uint16_t tx_coal_usecs;
- uint16_t tx_coal_usecs_irq;
- uint16_t tx_coal_frames;
- uint16_t tx_coal_frames_irq;
-
-#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
-#define BNXT_DEF_STATS_COAL_TICKS 1000000
-#define BNXT_MIN_STATS_COAL_TICKS 250000
-#define BNXT_MAX_STATS_COAL_TICKS 1000000
-
-};
-
-struct bnxt_filter_info {
- STAILQ_ENTRY(bnxt_filter_info) next;
- uint64_t fw_l2_filter_id;
-#define INVALID_MAC_INDEX ((uint16_t)-1)
- uint16_t mac_index;
-
- /* Filter Characteristics */
- uint32_t flags;
- uint32_t enables;
- uint8_t l2_addr[ETHER_ADDR_LEN];
- uint8_t l2_addr_mask[ETHER_ADDR_LEN];
- uint16_t l2_ovlan;
- uint16_t l2_ovlan_mask;
- uint16_t l2_ivlan;
- uint16_t l2_ivlan_mask;
- uint8_t t_l2_addr[ETHER_ADDR_LEN];
- uint8_t t_l2_addr_mask[ETHER_ADDR_LEN];
- uint16_t t_l2_ovlan;
- uint16_t t_l2_ovlan_mask;
- uint16_t t_l2_ivlan;
- uint16_t t_l2_ivlan_mask;
- uint8_t tunnel_type;
- uint16_t mirror_vnic_id;
- uint32_t vni;
- uint8_t pri_hint;
- uint64_t l2_filter_id_hint;
-};
-
-#define I2C_DEV_ADDR_A0 0xa0
-#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
-
-/* Function declarations */
-void bnxt_report_link(struct bnxt_softc *softc);
-bool bnxt_check_hwrm_version(struct bnxt_softc *softc);
-struct bnxt_softc *bnxt_find_dev(uint32_t domain, uint32_t bus, uint32_t dev_fn, char *name);
-int bnxt_read_sfp_module_eeprom_info(struct bnxt_softc *bp, uint16_t i2c_addr,
- uint16_t page_number, uint8_t bank, bool bank_sel_en, uint16_t start_addr,
- uint16_t data_length, uint8_t *buf);
-uint8_t get_phy_type(struct bnxt_softc *softc);
-
-#endif /* _BNXT_H */
diff --git a/sys/dev/bnxt/bnxt_en/bnxt.h b/sys/dev/bnxt/bnxt_en/bnxt.h
new file mode 100644
index 000000000000..0ba7b5723b91
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_en/bnxt.h
@@ -0,0 +1,1390 @@
+/*-
+ * Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2016 Broadcom, All Rights Reserved.
+ * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#ifndef _BNXT_H
+#define _BNXT_H
+
+#include <sys/param.h>
+#include <sys/socket.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <sys/bitstring.h>
+
+#include <machine/bus.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/iflib.h>
+#include <linux/types.h>
+
+#include "hsi_struct_def.h"
+#include "bnxt_dcb.h"
+#include "bnxt_auxbus_compat.h"
+
+#define DFLT_HWRM_CMD_TIMEOUT 500
+
+/* PCI IDs */
+#define BROADCOM_VENDOR_ID 0x14E4
+
+#define BCM57301 0x16c8
+#define BCM57302 0x16c9
+#define BCM57304 0x16ca
+#define BCM57311 0x16ce
+#define BCM57312 0x16cf
+#define BCM57314 0x16df
+#define BCM57402 0x16d0
+#define BCM57402_NPAR 0x16d4
+#define BCM57404 0x16d1
+#define BCM57404_NPAR 0x16e7
+#define BCM57406 0x16d2
+#define BCM57406_NPAR 0x16e8
+#define BCM57407 0x16d5
+#define BCM57407_NPAR 0x16ea
+#define BCM57407_SFP 0x16e9
+#define BCM57412 0x16d6
+#define BCM57412_NPAR1 0x16de
+#define BCM57412_NPAR2 0x16eb
+#define BCM57414 0x16d7
+#define BCM57414_NPAR1 0x16ec
+#define BCM57414_NPAR2 0x16ed
+#define BCM57416 0x16d8
+#define BCM57416_NPAR1 0x16ee
+#define BCM57416_NPAR2 0x16ef
+#define BCM57416_SFP 0x16e3
+#define BCM57417 0x16d9
+#define BCM57417_NPAR1 0x16c0
+#define BCM57417_NPAR2 0x16cc
+#define BCM57417_SFP 0x16e2
+#define BCM57454 0x1614
+#define BCM58700 0x16cd
+#define BCM57508 0x1750
+#define BCM57504 0x1751
+#define BCM57504_NPAR 0x1801
+#define BCM57502 0x1752
+#define BCM57608 0x1760
+#define BCM57604 0x1761
+#define BCM57602 0x1762
+#define BCM57601 0x1763
+#define NETXTREME_C_VF1 0x16cb
+#define NETXTREME_C_VF2 0x16e1
+#define NETXTREME_C_VF3 0x16e5
+#define NETXTREME_E_VF1 0x16c1
+#define NETXTREME_E_VF2 0x16d3
+#define NETXTREME_E_VF3 0x16dc
+
+#define EVENT_DATA1_RESET_NOTIFY_FATAL(data1) \
+ (((data1) & \
+ HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+ HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
+
+#define BNXT_EVENT_ERROR_REPORT_TYPE(data1) \
+ (((data1) & \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK) >> \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT)
+
+#define BNXT_EVENT_INVALID_SIGNAL_DATA(data2) \
+ (((data2) & \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK) >> \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT)
+
+#define BNXT_EVENT_DBR_EPOCH(data) \
+ (((data) & HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_MASK) >> \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_EVENT_DATA1_EPOCH_SFT)
+
+#define BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2) \
+ (((data2) & \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_MASK) >> \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_THRESHOLD_TEMP_SFT)
+
+#define EVENT_DATA2_NVM_ERR_ADDR(data2) \
+ (((data2) & \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_MASK) >> \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA2_ERR_ADDR_SFT)
+
+#define EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1) \
+ (((data1) & \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR) == \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING)
+
+#define EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1) \
+ (((data1) & \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK) == \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_WRITE)
+
+#define EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1) \
+ (((data1) & \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_MASK) == \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_NVM_EVENT_DATA1_NVM_ERR_TYPE_ERASE)
+
+#define EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1) \
+ ((data1) & HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_MASK)
+
+#define BNXT_EVENT_THERMAL_CURRENT_TEMP(data2) \
+ ((data2) & HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK)
+
+#define EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1) \
+ (((data1) & \
+ HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+ HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION)
+
+#define EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2) \
+ ((data2) & \
+ HWRM_ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK)
+
+#define EVENT_DATA1_RECOVERY_ENABLED(data1) \
+ !!((data1) & \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED)
+
+#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \
+ !!((data1) & \
+ HWRM_ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC)
+
+#define INVALID_STATS_CTX_ID -1
+
+/* Maximum numbers of RX and TX descriptors. iflib requires this to be a power
+ * of two. The hardware has no particular limitation. */
+#define BNXT_MAX_RXD ((INT32_MAX >> 1) + 1)
+#define BNXT_MAX_TXD ((INT32_MAX >> 1) + 1)
+
+#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \
+ CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
+ CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
+
+#define BNXT_MAX_MTU 9600
+
+#define BNXT_RSS_HASH_TYPE_TCPV4 0
+#define BNXT_RSS_HASH_TYPE_UDPV4 1
+#define BNXT_RSS_HASH_TYPE_IPV4 2
+#define BNXT_RSS_HASH_TYPE_TCPV6 3
+#define BNXT_RSS_HASH_TYPE_UDPV6 4
+#define BNXT_RSS_HASH_TYPE_IPV6 5
+#define BNXT_GET_RSS_PROFILE_ID(rss_hash_type) ((rss_hash_type >> 1) & 0x1F)
+
+#define BNXT_NO_MORE_WOL_FILTERS 0xFFFF
+#define bnxt_wol_supported(softc) (!((softc)->flags & BNXT_FLAG_VF) && \
+ ((softc)->flags & BNXT_FLAG_WOL_CAP ))
+/* 64-bit doorbell */
+#define DBR_INDEX_MASK 0x0000000000ffffffULL
+#define DBR_PI_LO_MASK 0xff000000UL
+#define DBR_PI_LO_SFT 24
+#define DBR_EPOCH_MASK 0x01000000UL
+#define DBR_EPOCH_SFT 24
+#define DBR_TOGGLE_MASK 0x06000000UL
+#define DBR_TOGGLE_SFT 25
+#define DBR_XID_MASK 0x000fffff00000000ULL
+#define DBR_XID_SFT 32
+#define DBR_PI_HI_MASK 0xf0000000000000ULL
+#define DBR_PI_HI_SFT 52
+#define DBR_PATH_L2 (0x1ULL << 56)
+#define DBR_VALID (0x1ULL << 58)
+#define DBR_TYPE_SQ (0x0ULL << 60)
+#define DBR_TYPE_RQ (0x1ULL << 60)
+#define DBR_TYPE_SRQ (0x2ULL << 60)
+#define DBR_TYPE_SRQ_ARM (0x3ULL << 60)
+#define DBR_TYPE_CQ (0x4ULL << 60)
+#define DBR_TYPE_CQ_ARMSE (0x5ULL << 60)
+#define DBR_TYPE_CQ_ARMALL (0x6ULL << 60)
+#define DBR_TYPE_CQ_ARMENA (0x7ULL << 60)
+#define DBR_TYPE_SRQ_ARMENA (0x8ULL << 60)
+#define DBR_TYPE_CQ_CUTOFF_ACK (0x9ULL << 60)
+#define DBR_TYPE_NQ (0xaULL << 60)
+#define DBR_TYPE_NQ_ARM (0xbULL << 60)
+#define DBR_TYPE_PUSH_START (0xcULL << 60)
+#define DBR_TYPE_PUSH_END (0xdULL << 60)
+#define DBR_TYPE_NQ_MASK (0xeULL << 60)
+#define DBR_TYPE_NULL (0xfULL << 60)
+
+#define BNXT_MAX_L2_QUEUES 128
+#define BNXT_ROCE_IRQ_COUNT 9
+
+#define BNXT_MAX_NUM_QUEUES (BNXT_MAX_L2_QUEUES + BNXT_ROCE_IRQ_COUNT)
+
+/* Completion related defines */
+#define CMP_VALID(cmp, v_bit) \
+ ((!!(((struct cmpl_base *)(cmp))->info3_v & htole32(CMPL_BASE_V))) == !!(v_bit) )
+
+/* Chip class phase 5 */
+#define BNXT_CHIP_P5(sc) ((sc->flags & BNXT_FLAG_CHIP_P5))
+
+/* Chip class phase 7 */
+#define BNXT_CHIP_P7(sc) ((sc->flags & BNXT_FLAG_CHIP_P7))
+
+/* Chip class phase 5 plus */
+#define BNXT_CHIP_P5_PLUS(sc) \
+ (BNXT_CHIP_P5(sc) || BNXT_CHIP_P7(sc))
+
+#define DB_PF_OFFSET_P5 0x10000
+#define DB_VF_OFFSET_P5 0x4000
+#define NQ_VALID(cmp, v_bit) \
+ ((!!(((nq_cn_t *)(cmp))->v & htole32(NQ_CN_V))) == !!(v_bit) )
+
+#ifndef DIV_ROUND_UP
+#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
+#endif
+#ifndef roundup
+#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#endif
+
+#define NEXT_CP_CONS_V(ring, cons, v_bit) do { \
+ if (__predict_false(++(cons) == (ring)->ring_size)) \
+ ((cons) = 0, (v_bit) = !v_bit); \
+} while (0)
+
+#define RING_NEXT(ring, idx) (__predict_false(idx + 1 == (ring)->ring_size) ? \
+ 0 : idx + 1)
+
+#define CMPL_PREFETCH_NEXT(cpr, idx) \
+ __builtin_prefetch(&((struct cmpl_base *)(cpr)->ring.vaddr)[((idx) +\
+ (CACHE_LINE_SIZE / sizeof(struct cmpl_base))) & \
+ ((cpr)->ring.ring_size - 1)])
+
+/* Lock macros */
+#define BNXT_HWRM_LOCK_INIT(_softc, _name) \
+ mtx_init(&(_softc)->hwrm_lock, _name, "BNXT HWRM Lock", MTX_DEF)
+#define BNXT_HWRM_LOCK(_softc) mtx_lock(&(_softc)->hwrm_lock)
+#define BNXT_HWRM_UNLOCK(_softc) mtx_unlock(&(_softc)->hwrm_lock)
+#define BNXT_HWRM_LOCK_DESTROY(_softc) mtx_destroy(&(_softc)->hwrm_lock)
+#define BNXT_HWRM_LOCK_ASSERT(_softc) mtx_assert(&(_softc)->hwrm_lock, \
+ MA_OWNED)
+#define BNXT_IS_FLOW_CTRL_CHANGED(link_info) \
+ ((link_info->last_flow_ctrl.tx != link_info->flow_ctrl.tx) || \
+ (link_info->last_flow_ctrl.rx != link_info->flow_ctrl.rx) || \
+ (link_info->last_flow_ctrl.autoneg != link_info->flow_ctrl.autoneg))
+
+/* Chip info */
+#define BNXT_TSO_SIZE UINT16_MAX
+
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1 : __min2; })
+
+#define max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1 : __max2; })
+
+#define clamp_t(type, _x, min, max) min_t(type, max_t(type, _x, min), max)
+
+#define BNXT_IFMEDIA_ADD(supported, fw_speed, ifm_speed) do { \
+ if ((supported) & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_ ## fw_speed) \
+ ifmedia_add(softc->media, IFM_ETHER | (ifm_speed), 0, NULL); \
+} while(0)
+
+#define BNXT_MIN_FRAME_SIZE 52 /* Frames must be padded to this size for some A0 chips */
+
+#define BNXT_RX_STATS_EXT_OFFSET(counter) \
+ (offsetof(struct rx_port_stats_ext, counter) / 8)
+
+#define BNXT_RX_STATS_EXT_NUM_LEGACY \
+ BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks)
+
+#define BNXT_TX_STATS_EXT_OFFSET(counter) \
+ (offsetof(struct tx_port_stats_ext, counter) / 8)
+
+extern const char bnxt_driver_version[];
+typedef void (*bnxt_doorbell_tx)(void *, uint16_t idx);
+typedef void (*bnxt_doorbell_rx)(void *, uint16_t idx);
+typedef void (*bnxt_doorbell_rx_cq)(void *, bool);
+typedef void (*bnxt_doorbell_tx_cq)(void *, bool);
+typedef void (*bnxt_doorbell_nq)(void *, bool);
+
+typedef struct bnxt_doorbell_ops {
+ bnxt_doorbell_tx bnxt_db_tx;
+ bnxt_doorbell_rx bnxt_db_rx;
+ bnxt_doorbell_rx_cq bnxt_db_rx_cq;
+ bnxt_doorbell_tx_cq bnxt_db_tx_cq;
+ bnxt_doorbell_nq bnxt_db_nq;
+} bnxt_dooorbell_ops_t;
+/* NVRAM access */
+enum bnxt_nvm_directory_type {
+ BNX_DIR_TYPE_UNUSED = 0,
+ BNX_DIR_TYPE_PKG_LOG = 1,
+ BNX_DIR_TYPE_UPDATE = 2,
+ BNX_DIR_TYPE_CHIMP_PATCH = 3,
+ BNX_DIR_TYPE_BOOTCODE = 4,
+ BNX_DIR_TYPE_VPD = 5,
+ BNX_DIR_TYPE_EXP_ROM_MBA = 6,
+ BNX_DIR_TYPE_AVS = 7,
+ BNX_DIR_TYPE_PCIE = 8,
+ BNX_DIR_TYPE_PORT_MACRO = 9,
+ BNX_DIR_TYPE_APE_FW = 10,
+ BNX_DIR_TYPE_APE_PATCH = 11,
+ BNX_DIR_TYPE_KONG_FW = 12,
+ BNX_DIR_TYPE_KONG_PATCH = 13,
+ BNX_DIR_TYPE_BONO_FW = 14,
+ BNX_DIR_TYPE_BONO_PATCH = 15,
+ BNX_DIR_TYPE_TANG_FW = 16,
+ BNX_DIR_TYPE_TANG_PATCH = 17,
+ BNX_DIR_TYPE_BOOTCODE_2 = 18,
+ BNX_DIR_TYPE_CCM = 19,
+ BNX_DIR_TYPE_PCI_CFG = 20,
+ BNX_DIR_TYPE_TSCF_UCODE = 21,
+ BNX_DIR_TYPE_ISCSI_BOOT = 22,
+ BNX_DIR_TYPE_ISCSI_BOOT_IPV6 = 24,
+ BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6 = 25,
+ BNX_DIR_TYPE_ISCSI_BOOT_CFG6 = 26,
+ BNX_DIR_TYPE_EXT_PHY = 27,
+ BNX_DIR_TYPE_SHARED_CFG = 40,
+ BNX_DIR_TYPE_PORT_CFG = 41,
+ BNX_DIR_TYPE_FUNC_CFG = 42,
+ BNX_DIR_TYPE_MGMT_CFG = 48,
+ BNX_DIR_TYPE_MGMT_DATA = 49,
+ BNX_DIR_TYPE_MGMT_WEB_DATA = 50,
+ BNX_DIR_TYPE_MGMT_WEB_META = 51,
+ BNX_DIR_TYPE_MGMT_EVENT_LOG = 52,
+ BNX_DIR_TYPE_MGMT_AUDIT_LOG = 53
+};
+
+enum bnxnvm_pkglog_field_index {
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0,
+ BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1,
+ BNX_PKG_LOG_FIELD_IDX_PKG_VERSION = 2,
+ BNX_PKG_LOG_FIELD_IDX_PKG_TIMESTAMP = 3,
+ BNX_PKG_LOG_FIELD_IDX_PKG_CHECKSUM = 4,
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_ITEMS = 5,
+ BNX_PKG_LOG_FIELD_IDX_INSTALLED_MASK = 6
+};
+
+#define BNX_DIR_ORDINAL_FIRST 0
+#define BNX_DIR_EXT_NONE 0
+
+struct bnxt_bar_info {
+ struct resource *res;
+ bus_space_tag_t tag;
+ bus_space_handle_t handle;
+ bus_size_t size;
+ int rid;
+};
+
+struct bnxt_flow_ctrl {
+ bool rx;
+ bool tx;
+ bool autoneg;
+};
+
+struct bnxt_link_info {
+ uint8_t media_type;
+ uint8_t transceiver;
+ uint8_t phy_addr;
+ uint8_t phy_link_status;
+ uint8_t wire_speed;
+ uint8_t loop_back;
+ uint8_t link_up;
+ uint8_t last_link_up;
+ uint8_t duplex;
+ uint8_t last_duplex;
+ uint8_t last_phy_type;
+ struct bnxt_flow_ctrl flow_ctrl;
+ struct bnxt_flow_ctrl last_flow_ctrl;
+ uint8_t duplex_setting;
+ uint8_t auto_mode;
+#define PHY_VER_LEN 3
+ uint8_t phy_ver[PHY_VER_LEN];
+ uint8_t phy_type;
+#define BNXT_PHY_STATE_ENABLED 0
+#define BNXT_PHY_STATE_DISABLED 1
+ uint8_t phy_state;
+
+ uint16_t link_speed;
+ uint16_t support_speeds;
+ uint16_t support_speeds2;
+ uint16_t support_pam4_speeds;
+ uint16_t auto_link_speeds;
+ uint16_t auto_link_speeds2;
+ uint16_t auto_pam4_link_speeds;
+ uint16_t force_link_speed;
+ uint16_t force_link_speeds2;
+ uint16_t force_pam4_link_speed;
+
+ bool force_pam4_speed;
+ bool force_speed2_nrz;
+ bool force_pam4_56_speed2;
+ bool force_pam4_112_speed2;
+
+ uint16_t advertising;
+ uint16_t advertising_pam4;
+
+ uint32_t preemphasis;
+ uint16_t support_auto_speeds;
+ uint16_t support_force_speeds;
+ uint16_t support_pam4_auto_speeds;
+ uint16_t support_pam4_force_speeds;
+ uint16_t support_auto_speeds2;
+ uint16_t support_force_speeds2;
+#define BNXT_SIG_MODE_NRZ HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_NRZ
+#define BNXT_SIG_MODE_PAM4 HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4
+#define BNXT_SIG_MODE_PAM4_112 HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4_112
+ uint8_t req_signal_mode;
+
+ uint8_t active_fec_sig_mode;
+ uint8_t sig_mode;
+
+ /* copy of requested setting */
+ uint8_t autoneg;
+#define BNXT_AUTONEG_SPEED 1
+#define BNXT_AUTONEG_FLOW_CTRL 2
+ uint8_t req_duplex;
+ uint16_t req_link_speed;
+ uint8_t module_status;
+ struct hwrm_port_phy_qcfg_output phy_qcfg_resp;
+};
+
+enum bnxt_phy_type {
+ BNXT_MEDIA_CR = 0,
+ BNXT_MEDIA_LR,
+ BNXT_MEDIA_SR,
+ BNXT_MEDIA_ER,
+ BNXT_MEDIA_KR,
+ BNXT_MEDIA_AC,
+ BNXT_MEDIA_BASECX,
+ BNXT_MEDIA_BASET,
+ BNXT_MEDIA_BASEKX,
+ BNXT_MEDIA_BASESGMII,
+ BNXT_MEDIA_END
+};
+
+enum bnxt_cp_type {
+ BNXT_DEFAULT,
+ BNXT_TX,
+ BNXT_RX,
+ BNXT_SHARED
+};
+
+struct bnxt_queue_info {
+ uint8_t queue_id;
+ uint8_t queue_profile;
+};
+
+struct bnxt_func_info {
+ uint32_t fw_fid;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint16_t max_rsscos_ctxs;
+ uint16_t max_cp_rings;
+ uint16_t max_tx_rings;
+ uint16_t max_rx_rings;
+ uint16_t max_hw_ring_grps;
+ uint16_t max_irqs;
+ uint16_t max_l2_ctxs;
+ uint16_t max_vnics;
+ uint16_t max_stat_ctxs;
+};
+
+struct bnxt_pf_info {
+#define BNXT_FIRST_PF_FID 1
+#define BNXT_FIRST_VF_FID 128
+ uint8_t port_id;
+ uint32_t first_vf_id;
+ uint16_t active_vfs;
+ uint16_t max_vfs;
+ uint32_t max_encap_records;
+ uint32_t max_decap_records;
+ uint32_t max_tx_em_flows;
+ uint32_t max_tx_wm_flows;
+ uint32_t max_rx_em_flows;
+ uint32_t max_rx_wm_flows;
+ unsigned long *vf_event_bmap;
+ uint16_t hwrm_cmd_req_pages;
+ void *hwrm_cmd_req_addr[4];
+ bus_addr_t hwrm_cmd_req_dma_addr[4];
+};
+
+struct bnxt_vf_info {
+ uint16_t fw_fid;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint16_t max_rsscos_ctxs;
+ uint16_t max_cp_rings;
+ uint16_t max_tx_rings;
+ uint16_t max_rx_rings;
+ uint16_t max_hw_ring_grps;
+ uint16_t max_l2_ctxs;
+ uint16_t max_irqs;
+ uint16_t max_vnics;
+ uint16_t max_stat_ctxs;
+ uint32_t vlan;
+#define BNXT_VF_QOS 0x1
+#define BNXT_VF_SPOOFCHK 0x2
+#define BNXT_VF_LINK_FORCED 0x4
+#define BNXT_VF_LINK_UP 0x8
+ uint32_t flags;
+ uint32_t func_flags; /* func cfg flags */
+ uint32_t min_tx_rate;
+ uint32_t max_tx_rate;
+ void *hwrm_cmd_req_addr;
+ bus_addr_t hwrm_cmd_req_dma_addr;
+};
+
+#define BNXT_PF(softc) (!((softc)->flags & BNXT_FLAG_VF))
+#define BNXT_VF(softc) ((softc)->flags & BNXT_FLAG_VF)
+
+struct bnxt_vlan_tag {
+ SLIST_ENTRY(bnxt_vlan_tag) next;
+ uint64_t filter_id;
+ uint16_t tag;
+};
+
+struct bnxt_vnic_info {
+ uint16_t id;
+ uint16_t def_ring_grp;
+ uint16_t cos_rule;
+ uint16_t lb_rule;
+ uint16_t mru;
+
+ uint32_t rx_mask;
+ struct iflib_dma_info mc_list;
+ int mc_list_count;
+#define BNXT_MAX_MC_ADDRS 16
+
+ uint32_t flags;
+#define BNXT_VNIC_FLAG_DEFAULT 0x01
+#define BNXT_VNIC_FLAG_BD_STALL 0x02
+#define BNXT_VNIC_FLAG_VLAN_STRIP 0x04
+
+ uint64_t filter_id;
+
+ uint16_t rss_id;
+ uint32_t rss_hash_type;
+ uint8_t rss_hash_key[HW_HASH_KEY_SIZE];
+ struct iflib_dma_info rss_hash_key_tbl;
+ struct iflib_dma_info rss_grp_tbl;
+ SLIST_HEAD(vlan_head, bnxt_vlan_tag) vlan_tags;
+ struct iflib_dma_info vlan_tag_list;
+};
+
+struct bnxt_grp_info {
+ uint16_t stats_ctx;
+ uint16_t grp_id;
+ uint16_t rx_ring_id;
+ uint16_t cp_ring_id;
+ uint16_t ag_ring_id;
+};
+
+#define EPOCH_ARR_SZ 4096
+
+struct bnxt_ring {
+ uint64_t paddr;
+ vm_offset_t doorbell;
+ caddr_t vaddr;
+ struct bnxt_softc *softc;
+ uint32_t ring_size; /* Must be a power of two */
+ uint16_t id; /* Logical ID */
+ uint16_t phys_id;
+ uint16_t idx;
+ struct bnxt_full_tpa_start *tpa_start;
+ union {
+ u64 db_key64;
+ u32 db_key32;
+ };
+ uint32_t db_ring_mask;
+ uint32_t db_epoch_mask;
+ uint8_t db_epoch_shift;
+
+ uint64_t epoch_arr[EPOCH_ARR_SZ];
+ bool epoch_bit;
+
+};
+
+struct bnxt_cp_ring {
+ struct bnxt_ring ring;
+ struct if_irq irq;
+ uint32_t cons;
+ uint32_t raw_cons;
+ bool v_bit; /* Value of valid bit */
+ struct ctx_hw_stats *stats;
+ uint32_t stats_ctx_id;
+ uint32_t last_idx; /* Used by RX rings only
+ * set to the last read pidx
+ */
+ uint64_t int_count;
+ uint8_t toggle;
+ uint8_t type;
+#define Q_TYPE_TX 1
+#define Q_TYPE_RX 2
+};
+
+struct bnxt_full_tpa_start {
+ struct rx_tpa_start_cmpl low;
+ struct rx_tpa_start_cmpl_hi high;
+};
+
+/* All the version information for the part */
+#define BNXT_VERSTR_SIZE (3*3+2+1) /* ie: "255.255.255\0" */
+#define BNXT_NAME_SIZE 17
+#define FW_VER_STR_LEN 32
+#define BC_HWRM_STR_LEN 21
+struct bnxt_ver_info {
+ uint8_t hwrm_if_major;
+ uint8_t hwrm_if_minor;
+ uint8_t hwrm_if_update;
+ char hwrm_if_ver[BNXT_VERSTR_SIZE];
+ char driver_hwrm_if_ver[BNXT_VERSTR_SIZE];
+ char mgmt_fw_ver[FW_VER_STR_LEN];
+ char netctrl_fw_ver[FW_VER_STR_LEN];
+ char roce_fw_ver[FW_VER_STR_LEN];
+ char fw_ver_str[FW_VER_STR_LEN];
+ char phy_ver[BNXT_VERSTR_SIZE];
+ char pkg_ver[64];
+
+ char hwrm_fw_name[BNXT_NAME_SIZE];
+ char mgmt_fw_name[BNXT_NAME_SIZE];
+ char netctrl_fw_name[BNXT_NAME_SIZE];
+ char roce_fw_name[BNXT_NAME_SIZE];
+ char phy_vendor[BNXT_NAME_SIZE];
+ char phy_partnumber[BNXT_NAME_SIZE];
+
+ uint16_t chip_num;
+ uint8_t chip_rev;
+ uint8_t chip_metal;
+ uint8_t chip_bond_id;
+ uint8_t chip_type;
+
+ uint8_t hwrm_min_major;
+ uint8_t hwrm_min_minor;
+ uint8_t hwrm_min_update;
+ uint64_t fw_ver_code;
+#define BNXT_FW_VER_CODE(maj, min, bld, rsv) \
+ ((uint64_t)(maj) << 48 | (uint64_t)(min) << 32 | (uint64_t)(bld) << 16 | (rsv))
+#define BNXT_FW_MAJ(softc) ((softc)->ver_info->fw_ver_code >> 48)
+#define BNXT_FW_MIN(softc) (((softc)->ver_info->fw_ver_code >> 32) & 0xffff)
+#define BNXT_FW_BLD(softc) (((softc)->ver_info->fw_ver_code >> 16) & 0xffff)
+#define BNXT_FW_RSV(softc) (((softc)->ver_info->fw_ver_code) & 0xffff)
+
+ struct sysctl_ctx_list ver_ctx;
+ struct sysctl_oid *ver_oid;
+};
+
+struct bnxt_nvram_info {
+ uint16_t mfg_id;
+ uint16_t device_id;
+ uint32_t sector_size;
+ uint32_t size;
+ uint32_t reserved_size;
+ uint32_t available_size;
+
+ struct sysctl_ctx_list nvm_ctx;
+ struct sysctl_oid *nvm_oid;
+};
+
+struct bnxt_func_qcfg {
+ uint16_t alloc_completion_rings;
+ uint16_t alloc_tx_rings;
+ uint16_t alloc_rx_rings;
+ uint16_t alloc_vnics;
+};
+
+struct bnxt_hw_lro {
+ uint16_t enable;
+ uint16_t is_mode_gro;
+ uint16_t max_agg_segs;
+ uint16_t max_aggs;
+ uint32_t min_agg_len;
+};
+
+/* The hardware supports certain page sizes. Use the supported page sizes
+ * to allocate the rings.
+ */
+#if (PAGE_SHIFT < 12)
+#define BNXT_PAGE_SHIFT 12
+#elif (PAGE_SHIFT <= 13)
+#define BNXT_PAGE_SHIFT PAGE_SHIFT
+#elif (PAGE_SHIFT < 16)
+#define BNXT_PAGE_SHIFT 13
+#else
+#define BNXT_PAGE_SHIFT 16
+#endif
+
+#define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
+
+#define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8)
+#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES)
+
+struct bnxt_ring_mem_info {
+ int nr_pages;
+ int page_size;
+ uint16_t flags;
+#define BNXT_RMEM_VALID_PTE_FLAG 1
+#define BNXT_RMEM_RING_PTE_FLAG 2
+#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4
+ uint16_t depth;
+ struct bnxt_ctx_mem_type *ctx_mem;
+
+ struct iflib_dma_info *pg_arr;
+ struct iflib_dma_info pg_tbl;
+
+ int vmem_size;
+ void **vmem;
+};
+
+struct bnxt_ctx_pg_info {
+ uint32_t entries;
+ uint32_t nr_pages;
+ struct iflib_dma_info ctx_arr[MAX_CTX_PAGES];
+ struct bnxt_ring_mem_info ring_mem;
+ struct bnxt_ctx_pg_info **ctx_pg_tbl;
+};
+
+#define BNXT_MAX_TQM_SP_RINGS 1
+#define BNXT_MAX_TQM_FP_LEGACY_RINGS 8
+#define BNXT_MAX_TQM_FP_RINGS 9
+#define BNXT_MAX_TQM_LEGACY_RINGS \
+ (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_LEGACY_RINGS)
+#define BNXT_MAX_TQM_RINGS \
+ (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
+
+#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256
+#define BNXT_BACKING_STORE_CFG_LEN \
+ sizeof(struct hwrm_func_backing_store_cfg_input)
+
+#define BNXT_SET_CTX_PAGE_ATTR(attr) \
+do { \
+ if (BNXT_PAGE_SIZE == 0x2000) \
+ attr = HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_8K; \
+ else if (BNXT_PAGE_SIZE == 0x10000) \
+ attr = HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_64K; \
+ else \
+ attr = HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_PG_SIZE_PG_4K; \
+} while (0)
+
+struct bnxt_ctx_mem_type {
+ u16 type;
+ u16 entry_size;
+ u32 flags;
+#define BNXT_CTX_MEM_TYPE_VALID HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_TYPE_VALID
+ u32 instance_bmap;
+ u8 init_value;
+ u8 entry_multiple;
+ u16 init_offset;
+#define BNXT_CTX_INIT_INVALID_OFFSET 0xffff
+ u32 max_entries;
+ u32 min_entries;
+ u8 last:1;
+ u8 mem_valid:1;
+ u8 split_entry_cnt;
+#define BNXT_MAX_SPLIT_ENTRY 4
+ union {
+ struct {
+ u32 qp_l2_entries;
+ u32 qp_qp1_entries;
+ };
+ u32 srq_l2_entries;
+ u32 cq_l2_entries;
+ u32 vnic_entries;
+ struct {
+ u32 mrav_av_entries;
+ u32 mrav_num_entries_units;
+ };
+ u32 split[BNXT_MAX_SPLIT_ENTRY];
+ };
+ struct bnxt_ctx_pg_info *pg_info;
+};
+
+#define BNXT_CTX_QP HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_QP
+#define BNXT_CTX_SRQ HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SRQ
+#define BNXT_CTX_CQ HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CQ
+#define BNXT_CTX_VNIC HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_VNIC
+#define BNXT_CTX_STAT HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_STAT
+#define BNXT_CTX_STQM HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SP_TQM_RING
+#define BNXT_CTX_FTQM HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_FP_TQM_RING
+#define BNXT_CTX_MRAV HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_MRAV
+#define BNXT_CTX_TIM HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TIM
+#define BNXT_CTX_TKC HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TKC
+#define BNXT_CTX_RKC HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_RKC
+#define BNXT_CTX_MTQM HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_MP_TQM_RING
+#define BNXT_CTX_SQDBS HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SQ_DB_SHADOW
+#define BNXT_CTX_RQDBS HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_RQ_DB_SHADOW
+#define BNXT_CTX_SRQDBS HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SRQ_DB_SHADOW
+#define BNXT_CTX_CQDBS HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CQ_DB_SHADOW
+#define BNXT_CTX_QTKC HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_QUIC_TKC
+#define BNXT_CTX_QRKC HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_QUIC_RKC
+#define BNXT_CTX_MAX (BNXT_CTX_TIM + 1)
+#define BNXT_CTX_L2_MAX (BNXT_CTX_FTQM + 1)
+
+#define BNXT_CTX_V2_MAX (HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_ROCE_HWRM_TRACE + 1)
+#define BNXT_CTX_SRT_TRACE HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_SRT_TRACE
+#define BNXT_CTX_ROCE_HWRM_TRACE HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_ROCE_HWRM_TRACE
+#define BNXT_CTX_INV ((u16)-1)
+
+struct bnxt_ctx_mem_info {
+ u8 tqm_fp_rings_count;
+
+ u32 flags;
+ #define BNXT_CTX_FLAG_INITED 0x01
+ struct bnxt_ctx_mem_type ctx_arr[BNXT_CTX_V2_MAX];
+};
+
+struct bnxt_hw_resc {
+ uint16_t min_rsscos_ctxs;
+ uint16_t max_rsscos_ctxs;
+ uint16_t min_cp_rings;
+ uint16_t max_cp_rings;
+ uint16_t resv_cp_rings;
+ uint16_t min_tx_rings;
+ uint16_t max_tx_rings;
+ uint16_t resv_tx_rings;
+ uint16_t max_tx_sch_inputs;
+ uint16_t min_rx_rings;
+ uint16_t max_rx_rings;
+ uint16_t resv_rx_rings;
+ uint16_t min_hw_ring_grps;
+ uint16_t max_hw_ring_grps;
+ uint16_t resv_hw_ring_grps;
+ uint16_t min_l2_ctxs;
+ uint16_t max_l2_ctxs;
+ uint16_t min_vnics;
+ uint16_t max_vnics;
+ uint16_t resv_vnics;
+ uint16_t min_stat_ctxs;
+ uint16_t max_stat_ctxs;
+ uint16_t resv_stat_ctxs;
+ uint16_t max_nqs;
+ uint16_t max_irqs;
+ uint16_t resv_irqs;
+};
+
+enum bnxt_type_ets {
+ BNXT_TYPE_ETS_TSA = 0,
+ BNXT_TYPE_ETS_PRI2TC,
+ BNXT_TYPE_ETS_TCBW,
+ BNXT_TYPE_ETS_MAX
+};
+
+static const char *const BNXT_ETS_TYPE_STR[] = {
+ "tsa",
+ "pri2tc",
+ "tcbw",
+};
+
+static const char *const BNXT_ETS_HELP_STR[] = {
+ "X is 1 (strict), 0 (ets)",
+ "TC values for pri 0 to 7",
+ "TC BW values for pri 0 to 7, Sum should be 100",
+};
+
+#define BNXT_HWRM_MAX_REQ_LEN (softc->hwrm_max_req_len)
+
+struct bnxt_softc_list {
+ SLIST_ENTRY(bnxt_softc_list) next;
+ struct bnxt_softc *softc;
+};
+
+#ifndef BIT_ULL
+#define BIT_ULL(nr) (1ULL << (nr))
+#endif
+
+struct bnxt_aux_dev {
+ struct auxiliary_device aux_dev;
+ struct bnxt_en_dev *edev;
+ int id;
+};
+
+struct bnxt_msix_tbl {
+ uint32_t entry;
+ uint32_t vector;
+};
+
+enum bnxt_health_severity {
+ SEVERITY_NORMAL = 0,
+ SEVERITY_WARNING,
+ SEVERITY_RECOVERABLE,
+ SEVERITY_FATAL,
+};
+
+enum bnxt_health_remedy {
+ REMEDY_DEVLINK_RECOVER,
+ REMEDY_POWER_CYCLE_DEVICE,
+ REMEDY_POWER_CYCLE_HOST,
+ REMEDY_FW_UPDATE,
+ REMEDY_HW_REPLACE,
+};
+
+struct bnxt_fw_health {
+ u32 flags;
+ u32 polling_dsecs;
+ u32 master_func_wait_dsecs;
+ u32 normal_func_wait_dsecs;
+ u32 post_reset_wait_dsecs;
+ u32 post_reset_max_wait_dsecs;
+ u32 regs[4];
+ u32 mapped_regs[4];
+#define BNXT_FW_HEALTH_REG 0
+#define BNXT_FW_HEARTBEAT_REG 1
+#define BNXT_FW_RESET_CNT_REG 2
+#define BNXT_FW_RESET_INPROG_REG 3
+ u32 fw_reset_inprog_reg_mask;
+ u32 last_fw_heartbeat;
+ u32 last_fw_reset_cnt;
+ u8 enabled:1;
+ u8 primary:1;
+ u8 status_reliable:1;
+ u8 resets_reliable:1;
+ u8 tmr_multiplier;
+ u8 tmr_counter;
+ u8 fw_reset_seq_cnt;
+ u32 fw_reset_seq_regs[16];
+ u32 fw_reset_seq_vals[16];
+ u32 fw_reset_seq_delay_msec[16];
+ u32 echo_req_data1;
+ u32 echo_req_data2;
+ struct devlink_health_reporter *fw_reporter;
+ struct mutex lock;
+ enum bnxt_health_severity severity;
+ enum bnxt_health_remedy remedy;
+ u32 arrests;
+ u32 discoveries;
+ u32 survivals;
+ u32 fatalities;
+ u32 diagnoses;
+};
+
+#define BNXT_FW_HEALTH_REG_TYPE_MASK 3
+#define BNXT_FW_HEALTH_REG_TYPE_CFG 0
+#define BNXT_FW_HEALTH_REG_TYPE_GRC 1
+#define BNXT_FW_HEALTH_REG_TYPE_BAR0 2
+#define BNXT_FW_HEALTH_REG_TYPE_BAR1 3
+
+#define BNXT_FW_HEALTH_REG_TYPE(reg) ((reg) & BNXT_FW_HEALTH_REG_TYPE_MASK)
+#define BNXT_FW_HEALTH_REG_OFF(reg) ((reg) & ~BNXT_FW_HEALTH_REG_TYPE_MASK)
+
+#define BNXT_FW_HEALTH_WIN_BASE 0x3000
+#define BNXT_FW_HEALTH_WIN_MAP_OFF 8
+
+#define BNXT_FW_HEALTH_WIN_OFF(reg) (BNXT_FW_HEALTH_WIN_BASE + \
+ ((reg) & BNXT_GRC_OFFSET_MASK))
+
+#define BNXT_FW_STATUS_HEALTH_MSK 0xffff
+#define BNXT_FW_STATUS_HEALTHY 0x8000
+#define BNXT_FW_STATUS_SHUTDOWN 0x100000
+#define BNXT_FW_STATUS_RECOVERING 0x400000
+
+#define BNXT_FW_IS_HEALTHY(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) ==\
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_IS_BOOTING(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) < \
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_IS_ERR(sts) (((sts) & BNXT_FW_STATUS_HEALTH_MSK) > \
+ BNXT_FW_STATUS_HEALTHY)
+
+#define BNXT_FW_IS_RECOVERING(sts) (BNXT_FW_IS_ERR(sts) && \
+ ((sts) & BNXT_FW_STATUS_RECOVERING))
+
+#define BNXT_FW_RETRY 5
+#define BNXT_FW_IF_RETRY 10
+#define BNXT_FW_SLOT_RESET_RETRY 4
+
+#define BNXT_GRCPF_REG_CHIMP_COMM 0x0
+#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100
+#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
+#define BNXT_GRCPF_REG_SYNC_TIME 0x480
+#define BNXT_GRCPF_REG_SYNC_TIME_ADJ 0x488
+#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_PER_MSK 0xffffffUL
+#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_PER_SFT 0
+#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_VAL_MSK 0x1f000000UL
+#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_VAL_SFT 24
+#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_SIGN_MSK 0x20000000UL
+#define BNXT_GRCPF_REG_SYNC_TIME_ADJ_SIGN_SFT 29
+
+#define BNXT_GRC_REG_STATUS_P5 0x520
+
+#define BNXT_GRCPF_REG_KONG_COMM 0xA00
+#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00
+
+#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
+#define BNXT_CAG_REG_BASE 0x300000
+
+#define BNXT_GRC_REG_CHIP_NUM 0x48
+#define BNXT_GRC_REG_BASE 0x260000
+
+#define BNXT_TS_REG_TIMESYNC_TS0_LOWER 0x640180c
+#define BNXT_TS_REG_TIMESYNC_TS0_UPPER 0x6401810
+
+#define BNXT_GRC_BASE_MASK 0xfffff000
+#define BNXT_GRC_OFFSET_MASK 0x00000ffc
+
+#define NQE_CN_TYPE(type) ((type) & NQ_CN_TYPE_MASK)
+#define NQE_CN_TOGGLE(type) (((type) & NQ_CN_TOGGLE_MASK) >> \
+ NQ_CN_TOGGLE_SFT)
+
+#define DB_EPOCH(ring, idx) (((idx) & (ring)->db_epoch_mask) << \
+ ((ring)->db_epoch_shift))
+
+#define DB_TOGGLE(tgl) ((tgl) << DBR_TOGGLE_SFT)
+
+#define DB_RING_IDX_CMP(ring, idx) (((idx) & (ring)->db_ring_mask) | \
+ DB_EPOCH(ring, idx))
+
+#define DB_RING_IDX(ring, idx, bit) (((idx) & (ring)->db_ring_mask) | \
+ ((bit) << (24)))
+
+struct bnxt_softc {
+ device_t dev;
+ if_ctx_t ctx;
+ if_softc_ctx_t scctx;
+ if_shared_ctx_t sctx;
+ if_t ifp;
+ uint32_t domain;
+ uint32_t bus;
+ uint32_t slot;
+ uint32_t function;
+ uint32_t dev_fn;
+ struct ifmedia *media;
+ struct bnxt_ctx_mem_info *ctx_mem;
+ struct bnxt_hw_resc hw_resc;
+ struct bnxt_softc_list list;
+
+ struct bnxt_bar_info hwrm_bar;
+ struct bnxt_bar_info doorbell_bar;
+ struct bnxt_link_info link_info;
+#define BNXT_FLAG_VF 0x0001
+#define BNXT_FLAG_NPAR 0x0002
+#define BNXT_FLAG_WOL_CAP 0x0004
+#define BNXT_FLAG_SHORT_CMD 0x0008
+#define BNXT_FLAG_FW_CAP_NEW_RM 0x0010
+#define BNXT_FLAG_CHIP_P5 0x0020
+#define BNXT_FLAG_TPA 0x0040
+#define BNXT_FLAG_FW_CAP_EXT_STATS 0x0080
+#define BNXT_FLAG_MULTI_HOST 0x0100
+#define BNXT_FLAG_MULTI_ROOT 0x0200
+#define BNXT_FLAG_ROCEV1_CAP 0x0400
+#define BNXT_FLAG_ROCEV2_CAP 0x0800
+#define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | BNXT_FLAG_ROCEV2_CAP)
+#define BNXT_FLAG_CHIP_P7 0x1000
+ uint32_t flags;
+#define BNXT_STATE_LINK_CHANGE (0)
+#define BNXT_STATE_MAX (BNXT_STATE_LINK_CHANGE + 1)
+ bitstr_t *state_bv;
+
+ uint32_t total_irqs;
+ struct bnxt_msix_tbl *irq_tbl;
+
+ struct bnxt_func_info func;
+ struct bnxt_func_qcfg fn_qcfg;
+ struct bnxt_pf_info pf;
+ struct bnxt_vf_info vf;
+
+ uint16_t hwrm_cmd_seq;
+ uint32_t hwrm_cmd_timeo; /* milliseconds */
+ struct iflib_dma_info hwrm_cmd_resp;
+ struct iflib_dma_info hwrm_short_cmd_req_addr;
+ /* Interrupt info for HWRM */
+ struct if_irq irq;
+ struct mtx hwrm_lock;
+ uint16_t hwrm_max_req_len;
+ uint16_t hwrm_max_ext_req_len;
+ uint32_t hwrm_spec_code;
+
+#define BNXT_MAX_QUEUE 8
+ uint8_t max_tc;
+ uint8_t max_lltc;
+ struct bnxt_queue_info tx_q_info[BNXT_MAX_QUEUE];
+ struct bnxt_queue_info rx_q_info[BNXT_MAX_QUEUE];
+ uint8_t tc_to_qidx[BNXT_MAX_QUEUE];
+ uint8_t tx_q_ids[BNXT_MAX_QUEUE];
+ uint8_t rx_q_ids[BNXT_MAX_QUEUE];
+ uint8_t tx_max_q;
+ uint8_t rx_max_q;
+ uint8_t is_asym_q;
+
+ struct bnxt_ieee_ets *ieee_ets;
+ struct bnxt_ieee_pfc *ieee_pfc;
+ uint8_t dcbx_cap;
+ uint8_t default_pri;
+ uint8_t max_dscp_value;
+
+ uint64_t admin_ticks;
+ struct iflib_dma_info hw_rx_port_stats;
+ struct iflib_dma_info hw_tx_port_stats;
+ struct rx_port_stats *rx_port_stats;
+ struct tx_port_stats *tx_port_stats;
+
+ struct iflib_dma_info hw_tx_port_stats_ext;
+ struct iflib_dma_info hw_rx_port_stats_ext;
+ struct tx_port_stats_ext *tx_port_stats_ext;
+ struct rx_port_stats_ext *rx_port_stats_ext;
+
+ uint16_t fw_rx_stats_ext_size;
+ uint16_t fw_tx_stats_ext_size;
+ uint16_t hw_ring_stats_size;
+
+ uint8_t tx_pri2cos_idx[8];
+ uint8_t rx_pri2cos_idx[8];
+ bool pri2cos_valid;
+
+ uint64_t tx_bytes_pri[8];
+ uint64_t tx_packets_pri[8];
+ uint64_t rx_bytes_pri[8];
+ uint64_t rx_packets_pri[8];
+
+ uint8_t port_count;
+ int num_cp_rings;
+
+ struct bnxt_cp_ring *nq_rings;
+
+ struct bnxt_ring *tx_rings;
+ struct bnxt_cp_ring *tx_cp_rings;
+ struct iflib_dma_info tx_stats[BNXT_MAX_NUM_QUEUES];
+ int ntxqsets;
+
+ struct bnxt_vnic_info vnic_info;
+ struct bnxt_ring *ag_rings;
+ struct bnxt_ring *rx_rings;
+ struct bnxt_cp_ring *rx_cp_rings;
+ struct bnxt_grp_info *grp_info;
+ struct iflib_dma_info rx_stats[BNXT_MAX_NUM_QUEUES];
+ int nrxqsets;
+ uint16_t rx_buf_size;
+
+ struct bnxt_cp_ring def_cp_ring;
+ struct bnxt_cp_ring def_nq_ring;
+ struct iflib_dma_info def_cp_ring_mem;
+ struct iflib_dma_info def_nq_ring_mem;
+ struct task def_cp_task;
+ int db_size;
+ int legacy_db_size;
+ struct bnxt_doorbell_ops db_ops;
+
+ struct sysctl_ctx_list hw_stats;
+ struct sysctl_oid *hw_stats_oid;
+ struct sysctl_ctx_list hw_lro_ctx;
+ struct sysctl_oid *hw_lro_oid;
+ struct sysctl_ctx_list flow_ctrl_ctx;
+ struct sysctl_oid *flow_ctrl_oid;
+ struct sysctl_ctx_list dcb_ctx;
+ struct sysctl_oid *dcb_oid;
+
+ struct bnxt_ver_info *ver_info;
+ struct bnxt_nvram_info *nvm_info;
+ bool wol;
+ bool is_dev_init;
+ struct bnxt_hw_lro hw_lro;
+ uint8_t wol_filter_id;
+ uint16_t rx_coal_usecs;
+ uint16_t rx_coal_usecs_irq;
+ uint16_t rx_coal_frames;
+ uint16_t rx_coal_frames_irq;
+ uint16_t tx_coal_usecs;
+ uint16_t tx_coal_usecs_irq;
+ uint16_t tx_coal_frames;
+ uint16_t tx_coal_frames_irq;
+
+#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
+#define BNXT_DEF_STATS_COAL_TICKS 1000000
+#define BNXT_MIN_STATS_COAL_TICKS 250000
+#define BNXT_MAX_STATS_COAL_TICKS 1000000
+
+ uint64_t fw_cap;
+ #define BNXT_FW_CAP_SHORT_CMD BIT_ULL(0)
+ #define BNXT_FW_CAP_LLDP_AGENT BIT_ULL(1)
+ #define BNXT_FW_CAP_DCBX_AGENT BIT_ULL(2)
+ #define BNXT_FW_CAP_NEW_RM BIT_ULL(3)
+ #define BNXT_FW_CAP_IF_CHANGE BIT_ULL(4)
+ #define BNXT_FW_CAP_LINK_ADMIN BIT_ULL(5)
+ #define BNXT_FW_CAP_VF_RES_MIN_GUARANTEED BIT_ULL(6)
+ #define BNXT_FW_CAP_KONG_MB_CHNL BIT_ULL(7)
+ #define BNXT_FW_CAP_ADMIN_MTU BIT_ULL(8)
+ #define BNXT_FW_CAP_ADMIN_PF BIT_ULL(9)
+ #define BNXT_FW_CAP_OVS_64BIT_HANDLE BIT_ULL(10)
+ #define BNXT_FW_CAP_TRUSTED_VF BIT_ULL(11)
+ #define BNXT_FW_CAP_VF_VNIC_NOTIFY BIT_ULL(12)
+ #define BNXT_FW_CAP_ERROR_RECOVERY BIT_ULL(13)
+ #define BNXT_FW_CAP_PKG_VER BIT_ULL(14)
+ #define BNXT_FW_CAP_CFA_ADV_FLOW BIT_ULL(15)
+ #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16)
+ #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17)
+ #define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18)
+ #define BNXT_FW_CAP_SECURE_MODE BIT_ULL(19)
+ #define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20)
+ #define BNXT_FW_CAP_HOT_RESET BIT_ULL(21)
+ #define BNXT_FW_CAP_CRASHDUMP BIT_ULL(23)
+ #define BNXT_FW_CAP_VLAN_RX_STRIP BIT_ULL(24)
+ #define BNXT_FW_CAP_VLAN_TX_INSERT BIT_ULL(25)
+ #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED BIT_ULL(26)
+ #define BNXT_FW_CAP_CFA_EEM BIT_ULL(27)
+ #define BNXT_FW_CAP_DBG_QCAPS BIT_ULL(29)
+ #define BNXT_FW_CAP_RING_MONITOR BIT_ULL(30)
+ #define BNXT_FW_CAP_ECN_STATS BIT_ULL(31)
+ #define BNXT_FW_CAP_TRUFLOW BIT_ULL(32)
+ #define BNXT_FW_CAP_VF_CFG_FOR_PF BIT_ULL(33)
+ #define BNXT_FW_CAP_PTP_PPS BIT_ULL(34)
+ #define BNXT_FW_CAP_HOT_RESET_IF BIT_ULL(35)
+ #define BNXT_FW_CAP_LIVEPATCH BIT_ULL(36)
+ #define BNXT_FW_CAP_NPAR_1_2 BIT_ULL(37)
+ #define BNXT_FW_CAP_RSS_HASH_TYPE_DELTA BIT_ULL(38)
+ #define BNXT_FW_CAP_PTP_RTC BIT_ULL(39)
+ #define BNXT_FW_CAP_TRUFLOW_EN BIT_ULL(40)
+ #define BNXT_TRUFLOW_EN(bp) ((bp)->fw_cap & BNXT_FW_CAP_TRUFLOW_EN)
+ #define BNXT_FW_CAP_RX_ALL_PKT_TS BIT_ULL(41)
+ #define BNXT_FW_CAP_BACKING_STORE_V2 BIT_ULL(42)
+ #define BNXT_FW_CAP_DBR_SUPPORTED BIT_ULL(43)
+ #define BNXT_FW_CAP_GENERIC_STATS BIT_ULL(44)
+ #define BNXT_FW_CAP_DBR_PACING_SUPPORTED BIT_ULL(45)
+ #define BNXT_FW_CAP_PTP_PTM BIT_ULL(46)
+ #define BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO BIT_ULL(47)
+ #define BNXT_FW_CAP_ENABLE_RDMA_SRIOV BIT_ULL(48)
+ #define BNXT_FW_CAP_RSS_TCAM BIT_ULL(49)
+ uint32_t lpi_tmr_lo;
+ uint32_t lpi_tmr_hi;
+ /* copied from flags and flags2 in hwrm_port_phy_qcaps_output */
+ uint16_t phy_flags;
+#define BNXT_PHY_FL_EEE_CAP HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EEE_SUPPORTED
+#define BNXT_PHY_FL_EXT_LPBK HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EXTERNAL_LPBK_SUPPORTED
+#define BNXT_PHY_FL_AN_PHY_LPBK HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_AUTONEG_LPBK_SUPPORTED
+#define BNXT_PHY_FL_SHARED_PORT_CFG HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_SHARED_PHY_CFG_SUPPORTED
+#define BNXT_PHY_FL_PORT_STATS_NO_RESET HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_CUMULATIVE_COUNTERS_ON_RESET
+#define BNXT_PHY_FL_NO_PHY_LPBK HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_LOCAL_LPBK_NOT_SUPPORTED
+#define BNXT_PHY_FL_FW_MANAGED_LKDN HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_FW_MANAGED_LINK_DOWN
+#define BNXT_PHY_FL_NO_FCS HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_NO_FCS
+#define BNXT_PHY_FL_NO_PAUSE (HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_PAUSE_UNSUPPORTED << 8)
+#define BNXT_PHY_FL_NO_PFC (HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_PFC_UNSUPPORTED << 8)
+#define BNXT_PHY_FL_BANK_SEL (HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_BANK_ADDR_SUPPORTED << 8)
+ struct bnxt_aux_dev *aux_dev;
+ struct net_device *net_dev;
+ struct mtx en_ops_lock;
+ uint8_t port_partition_type;
+ struct bnxt_en_dev *edev;
+ unsigned long state;
+#define BNXT_STATE_OPEN 0
+#define BNXT_STATE_IN_SP_TASK 1
+#define BNXT_STATE_READ_STATS 2
+#define BNXT_STATE_FW_RESET_DET 3
+#define BNXT_STATE_IN_FW_RESET 4
+#define BNXT_STATE_ABORT_ERR 5
+#define BNXT_STATE_FW_FATAL_COND 6
+#define BNXT_STATE_DRV_REGISTERED 7
+#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
+#define BNXT_STATE_NAPI_DISABLED 9
+#define BNXT_STATE_L2_FILTER_RETRY 10
+#define BNXT_STATE_FW_ACTIVATE 11
+#define BNXT_STATE_RECOVER 12
+#define BNXT_STATE_FW_NON_FATAL_COND 13
+#define BNXT_STATE_FW_ACTIVATE_RESET 14
+#define BNXT_STATE_HALF_OPEN 15
+#define BNXT_NO_FW_ACCESS(bp) \
+ test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state)
+ struct pci_dev *pdev;
+
+ struct work_struct sp_task;
+ unsigned long sp_event;
+#define BNXT_RX_MASK_SP_EVENT 0
+#define BNXT_RX_NTP_FLTR_SP_EVENT 1
+#define BNXT_LINK_CHNG_SP_EVENT 2
+#define BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT 3
+#define BNXT_VXLAN_ADD_PORT_SP_EVENT 4
+#define BNXT_VXLAN_DEL_PORT_SP_EVENT 5
+#define BNXT_RESET_TASK_SP_EVENT 6
+#define BNXT_RST_RING_SP_EVENT 7
+#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8
+#define BNXT_PERIODIC_STATS_SP_EVENT 9
+#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10
+#define BNXT_RESET_TASK_SILENT_SP_EVENT 11
+#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12
+#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
+#define BNXT_LINK_SPEED_CHNG_SP_EVENT 14
+#define BNXT_FLOW_STATS_SP_EVENT 15
+#define BNXT_UPDATE_PHY_SP_EVENT 16
+#define BNXT_RING_COAL_NOW_SP_EVENT 17
+#define BNXT_FW_RESET_NOTIFY_SP_EVENT 18
+#define BNXT_FW_EXCEPTION_SP_EVENT 19
+#define BNXT_VF_VNIC_CHANGE_SP_EVENT 20
+#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
+#define BNXT_PTP_CURRENT_TIME_EVENT 22
+#define BNXT_FW_ECHO_REQUEST_SP_EVENT 23
+#define BNXT_VF_CFG_CHNG_SP_EVENT 24
+
+ struct delayed_work fw_reset_task;
+ int fw_reset_state;
+#define BNXT_FW_RESET_STATE_POLL_VF 1
+#define BNXT_FW_RESET_STATE_RESET_FW 2
+#define BNXT_FW_RESET_STATE_ENABLE_DEV 3
+#define BNXT_FW_RESET_STATE_POLL_FW 4
+#define BNXT_FW_RESET_STATE_OPENING 5
+#define BNXT_FW_RESET_STATE_POLL_FW_DOWN 6
+ u16 fw_reset_min_dsecs;
+#define BNXT_DFLT_FW_RST_MIN_DSECS 20
+ u16 fw_reset_max_dsecs;
+#define BNXT_DFLT_FW_RST_MAX_DSECS 60
+ unsigned long fw_reset_timestamp;
+
+ struct bnxt_fw_health *fw_health;
+};
+
+struct bnxt_filter_info {
+ STAILQ_ENTRY(bnxt_filter_info) next;
+ uint64_t fw_l2_filter_id;
+#define INVALID_MAC_INDEX ((uint16_t)-1)
+ uint16_t mac_index;
+
+ /* Filter Characteristics */
+ uint32_t flags;
+ uint32_t enables;
+ uint8_t l2_addr[ETHER_ADDR_LEN];
+ uint8_t l2_addr_mask[ETHER_ADDR_LEN];
+ uint16_t l2_ovlan;
+ uint16_t l2_ovlan_mask;
+ uint16_t l2_ivlan;
+ uint16_t l2_ivlan_mask;
+ uint8_t t_l2_addr[ETHER_ADDR_LEN];
+ uint8_t t_l2_addr_mask[ETHER_ADDR_LEN];
+ uint16_t t_l2_ovlan;
+ uint16_t t_l2_ovlan_mask;
+ uint16_t t_l2_ivlan;
+ uint16_t t_l2_ivlan_mask;
+ uint8_t tunnel_type;
+ uint16_t mirror_vnic_id;
+ uint32_t vni;
+ uint8_t pri_hint;
+ uint64_t l2_filter_id_hint;
+};
+
+#define I2C_DEV_ADDR_A0 0xa0
+#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
+
+/* Function declarations */
+void bnxt_report_link(struct bnxt_softc *softc);
+bool bnxt_check_hwrm_version(struct bnxt_softc *softc);
+struct bnxt_softc *bnxt_find_dev(uint32_t domain, uint32_t bus, uint32_t dev_fn, char *name);
+int bnxt_read_sfp_module_eeprom_info(struct bnxt_softc *bp, uint16_t i2c_addr,
+ uint16_t page_number, uint8_t bank, bool bank_sel_en, uint16_t start_addr,
+ uint16_t data_length, uint8_t *buf);
+void bnxt_dcb_init(struct bnxt_softc *softc);
+void bnxt_dcb_free(struct bnxt_softc *softc);
+uint8_t bnxt_dcb_setdcbx(struct bnxt_softc *softc, uint8_t mode);
+uint8_t bnxt_dcb_getdcbx(struct bnxt_softc *softc);
+int bnxt_dcb_ieee_getets(struct bnxt_softc *softc, struct bnxt_ieee_ets *ets);
+int bnxt_dcb_ieee_setets(struct bnxt_softc *softc, struct bnxt_ieee_ets *ets);
+uint8_t get_phy_type(struct bnxt_softc *softc);
+int bnxt_dcb_ieee_getpfc(struct bnxt_softc *softc, struct bnxt_ieee_pfc *pfc);
+int bnxt_dcb_ieee_setpfc(struct bnxt_softc *softc, struct bnxt_ieee_pfc *pfc);
+int bnxt_dcb_ieee_setapp(struct bnxt_softc *softc, struct bnxt_dcb_app *app);
+int bnxt_dcb_ieee_delapp(struct bnxt_softc *softc, struct bnxt_dcb_app *app);
+int bnxt_dcb_ieee_listapp(struct bnxt_softc *softc, struct bnxt_dcb_app *app,
+ size_t nitems, int *num_inputs);
+
+#endif /* _BNXT_H */
diff --git a/sys/dev/bnxt/bnxt_en/bnxt_auxbus_compat.c b/sys/dev/bnxt/bnxt_en/bnxt_auxbus_compat.c
new file mode 100644
index 000000000000..1014d360015a
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_en/bnxt_auxbus_compat.c
@@ -0,0 +1,194 @@
+/*-
+ * Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2024 Broadcom, All Rights Reserved.
+ * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+
+#include "bnxt_auxbus_compat.h"
+
+static struct list_head bnxt_aux_bus_dev_list = LINUX_LIST_HEAD_INIT(bnxt_aux_bus_dev_list);
+static struct list_head bnxt_aux_bus_drv_list = LINUX_LIST_HEAD_INIT(bnxt_aux_bus_drv_list);
+static DEFINE_MUTEX(bnxt_auxbus_lock);
+
+static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
+ const struct auxiliary_device *auxdev)
+{
+ for (; id->name[0]; id++) {
+ const char *p = strrchr(dev_name(&auxdev->dev), '.');
+ int match_size;
+
+ if (!p)
+ continue;
+ match_size = p - dev_name(&auxdev->dev);
+
+ if (strlen(id->name) == match_size &&
+ !strncmp(dev_name(&auxdev->dev), id->name, match_size))
+ return id;
+ }
+ return NULL;
+}
+
+int auxiliary_device_init(struct auxiliary_device *auxdev)
+{
+ struct device *dev = &auxdev->dev;
+ char *modname = KBUILD_MODNAME;
+ int ret;
+
+ if (!dev->parent) {
+ pr_err("auxiliary_device has a NULL dev->parent\n");
+ return -EINVAL;
+ }
+
+ if (!auxdev->name) {
+ pr_err("auxiliary_device has a NULL name\n");
+ return -EINVAL;
+ }
+
+ ret = dev_set_name(dev, "%s.%s.%d", modname, auxdev->name, auxdev->id);
+ if (ret) {
+ dev_err(dev, "auxiliary device dev_set_name failed: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int auxiliary_device_add(struct auxiliary_device *auxdev)
+{
+ const struct auxiliary_device_id *id;
+ struct auxiliary_driver *auxdrv = NULL;
+ bool found = true;
+ int ret = 0;
+
+ mutex_lock(&bnxt_auxbus_lock);
+ list_for_each_entry(auxdrv, &bnxt_aux_bus_drv_list, list) {
+ if (auxdrv) {
+ msleep(2 * 1000);
+
+ id = auxiliary_match_id(auxdrv->id_table, auxdev);
+ if (id) {
+ ret = auxdrv->probe(auxdev, id);
+ if (!ret)
+ auxdev->dev.driver = &auxdrv->driver;
+ else
+ found = false;
+ break;
+ }
+ }
+ }
+
+ if (found)
+ list_add_tail(&auxdev->list, &bnxt_aux_bus_dev_list);
+ mutex_unlock(&bnxt_auxbus_lock);
+
+ return ret;
+}
+
+void auxiliary_device_uninit(struct auxiliary_device *auxdev)
+{
+ return;
+}
+
+void auxiliary_device_delete(struct auxiliary_device *auxdev)
+{
+ struct auxiliary_driver *auxdrv;
+
+ mutex_lock(&bnxt_auxbus_lock);
+ list_for_each_entry(auxdrv, &bnxt_aux_bus_drv_list, list) {
+ if (auxdev->dev.driver != &auxdrv->driver)
+ continue;
+ if (auxdrv->remove)
+ auxdrv->remove(auxdev);
+ auxdev->dev.driver = NULL;
+ }
+ list_del(&auxdev->list);
+ mutex_unlock(&bnxt_auxbus_lock);
+}
+
+int auxiliary_driver_register(struct auxiliary_driver *auxdrv)
+{
+ const struct auxiliary_device_id *id;
+ struct auxiliary_device *auxdev;
+ int ret = 0;
+
+ if (WARN_ON(!auxdrv->probe) || WARN_ON(!auxdrv->id_table))
+ return -EINVAL;
+
+ if (auxdrv->name)
+ auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s.%s", KBUILD_MODNAME,
+ auxdrv->name);
+ else
+ auxdrv->driver.name = kasprintf(GFP_KERNEL, "%s", KBUILD_MODNAME);
+ if (!auxdrv->driver.name)
+ return -ENOMEM;
+
+ mutex_lock(&bnxt_auxbus_lock);
+ list_for_each_entry(auxdev, &bnxt_aux_bus_dev_list, list) {
+ if (auxdev->dev.driver)
+ continue;
+
+ id = auxiliary_match_id(auxdrv->id_table, auxdev);
+ if (id) {
+ ret = auxdrv->probe(auxdev, id);
+ if (ret)
+ continue;
+ auxdev->dev.driver = &auxdrv->driver;
+ }
+ }
+ list_add_tail(&auxdrv->list, &bnxt_aux_bus_drv_list);
+ mutex_unlock(&bnxt_auxbus_lock);
+ return 0;
+}
+EXPORT_SYMBOL(auxiliary_driver_register);
+
+void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv)
+{
+ struct auxiliary_device *auxdev;
+
+ /* PF auxiliary devices are added to the list first and then VF devices.
+ * If we remove PF aux device driver first, it causes failures while
+ * removing VF driver.
+ * We need to remove VF auxiliary drivers first, so walk backwards.
+ */
+ mutex_lock(&bnxt_auxbus_lock);
+ list_for_each_entry_reverse(auxdev, &bnxt_aux_bus_dev_list, list) {
+ if (auxdev->dev.driver != &auxdrv->driver)
+ continue;
+ if (auxdrv->remove)
+ auxdrv->remove(auxdev);
+ auxdev->dev.driver = NULL;
+ }
+ kfree(auxdrv->driver.name);
+ list_del(&auxdrv->list);
+ mutex_unlock(&bnxt_auxbus_lock);
+}
+EXPORT_SYMBOL(auxiliary_driver_unregister);
diff --git a/sys/dev/bnxt/bnxt_en/bnxt_auxbus_compat.h b/sys/dev/bnxt/bnxt_en/bnxt_auxbus_compat.h
new file mode 100644
index 000000000000..c4c9e789cf3e
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_en/bnxt_auxbus_compat.h
@@ -0,0 +1,76 @@
+/*-
+ * Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2024 Broadcom, All Rights Reserved.
+ * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_AUXILIARY_COMPAT_H_
+#define _BNXT_AUXILIARY_COMPAT_H_
+
+#include <linux/device.h>
+#include <linux/idr.h>
+
+#define KBUILD_MODNAME "if_bnxt"
+#define AUXILIARY_NAME_SIZE 32
+
+struct auxiliary_device_id {
+ char name[AUXILIARY_NAME_SIZE];
+ uint64_t driver_data;
+};
+#define MODULE_DEVICE_TABLE_BUS_auxiliary(_bus, _table)
+
+struct auxiliary_device {
+ struct device dev;
+ const char *name;
+ uint32_t id;
+ struct list_head list;
+};
+
+struct auxiliary_driver {
+ int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id);
+ void (*remove)(struct auxiliary_device *auxdev);
+ const char *name;
+ struct device_driver driver;
+ const struct auxiliary_device_id *id_table;
+ struct list_head list;
+};
+
+int auxiliary_device_init(struct auxiliary_device *auxdev);
+int auxiliary_device_add(struct auxiliary_device *auxdev);
+void auxiliary_device_uninit(struct auxiliary_device *auxdev);
+void auxiliary_device_delete(struct auxiliary_device *auxdev);
+int auxiliary_driver_register(struct auxiliary_driver *auxdrv);
+void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
+
+static inline void *auxiliary_get_drvdata(struct auxiliary_device *auxdev)
+{
+ return dev_get_drvdata(&auxdev->dev);
+}
+
+static inline void auxiliary_set_drvdata(struct auxiliary_device *auxdev, void *data)
+{
+ dev_set_drvdata(&auxdev->dev, data);
+}
+#endif /* _BNXT_AUXILIARY_COMPAT_H_ */
diff --git a/sys/dev/bnxt/bnxt_en/bnxt_dcb.c b/sys/dev/bnxt/bnxt_en/bnxt_dcb.c
new file mode 100644
index 000000000000..e0643f200021
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_en/bnxt_dcb.c
@@ -0,0 +1,864 @@
+/*-
+ * Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2024 Broadcom, All Rights Reserved.
+ * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/endian.h>
+#include <linux/errno.h>
+#include <linux/bitops.h>
+
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_dcb.h"
+#include "hsi_struct_def.h"
+
+static int
+bnxt_tx_queue_to_tc(struct bnxt_softc *softc, uint8_t queue_id)
+{
+ int i, j;
+
+ for (i = 0; i < softc->max_tc; i++) {
+ if (softc->tx_q_info[i].queue_id == queue_id) {
+ for (j = 0; j < softc->max_tc; j++) {
+ if (softc->tc_to_qidx[j] == i)
+ return j;
+ }
+ }
+ }
+ return -EINVAL;
+}
+
+static int
+bnxt_hwrm_queue_pri2cos_cfg(struct bnxt_softc *softc,
+ struct bnxt_ieee_ets *ets,
+ uint32_t path_dir)
+{
+ struct hwrm_queue_pri2cos_cfg_input req = {0};
+ struct bnxt_queue_info *q_info;
+ uint8_t *pri2cos;
+ int i;
+
+ bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_PRI2COS_CFG);
+
+ req.flags = htole32(path_dir | HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_IVLAN);
+ if (path_dir == HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR ||
+ path_dir == HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_TX)
+ q_info = softc->tx_q_info;
+ else
+ q_info = softc->rx_q_info;
+ pri2cos = &req.pri0_cos_queue_id;
+ for (i = 0; i < BNXT_IEEE_8021QAZ_MAX_TCS; i++) {
+ uint8_t qidx;
+
+ req.enables |= htole32(HWRM_QUEUE_PRI2COS_CFG_INPUT_ENABLES_PRI0_COS_QUEUE_ID << i);
+
+ qidx = softc->tc_to_qidx[ets->prio_tc[i]];
+ pri2cos[i] = q_info[qidx].queue_id;
+ }
+ return _hwrm_send_message(softc, &req, sizeof(req));
+}
+
+static int
+bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt_softc *softc, struct bnxt_ieee_ets *ets)
+{
+ struct hwrm_queue_pri2cos_qcfg_output *resp =
+ (void *)softc->hwrm_cmd_resp.idi_vaddr;
+ struct hwrm_queue_pri2cos_qcfg_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_PRI2COS_QCFG);
+
+ req.flags = htole32(HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN);
+ rc = _hwrm_send_message(softc, &req, sizeof(req));
+ if (!rc) {
+ uint8_t *pri2cos = &resp->pri0_cos_queue_id;
+ int i;
+
+ for (i = 0; i < BNXT_IEEE_8021QAZ_MAX_TCS; i++) {
+ uint8_t queue_id = pri2cos[i];
+ int tc;
+
+ tc = bnxt_tx_queue_to_tc(softc, queue_id);
+ if (tc >= 0)
+ ets->prio_tc[i] = tc;
+ }
+ }
+ return rc;
+}
+
+static int
+bnxt_hwrm_queue_cos2bw_cfg(struct bnxt_softc *softc, struct bnxt_ieee_ets *ets,
+ uint8_t max_tc)
+{
+ struct hwrm_queue_cos2bw_cfg_input req = {0};
+ struct bnxt_cos2bw_cfg cos2bw;
+ void *data;
+ int i;
+
+ bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_COS2BW_CFG);
+
+ for (i = 0; i < max_tc; i++) {
+ uint8_t qidx = softc->tc_to_qidx[i];
+
+ req.enables |=
+ htole32(HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID0_VALID << qidx);
+
+ memset(&cos2bw, 0, sizeof(cos2bw));
+ cos2bw.queue_id = softc->tx_q_info[qidx].queue_id;
+ if (ets->tc_tsa[i] == BNXT_IEEE_8021QAZ_TSA_STRICT) {
+ cos2bw.tsa =
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_SP;
+ cos2bw.pri_lvl = i;
+ } else {
+ cos2bw.tsa =
+ HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_ETS;
+ cos2bw.bw_weight = ets->tc_tx_bw[i];
+ /* older firmware requires min_bw to be set to the
+ * same weight value in percent.
+ */
+ if (BNXT_FW_MAJ(softc) < 218) {
+ cos2bw.min_bw =
+ htole32((ets->tc_tx_bw[i] * 100) |
+ BW_VALUE_UNIT_PERCENT1_100);
+ }
+ }
+ data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4);
+ memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
+ if (qidx == 0) {
+ req.queue_id0 = cos2bw.queue_id;
+ req.unused_0 = 0;
+ }
+ }
+ return _hwrm_send_message(softc, &req, sizeof(req));
+}
+
+static int
+bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt_softc *softc, struct bnxt_ieee_ets *ets)
+{
+ struct hwrm_queue_cos2bw_qcfg_output *resp =
+ (void *)softc->hwrm_cmd_resp.idi_vaddr;
+ struct hwrm_queue_cos2bw_qcfg_input req = {0};
+ struct bnxt_cos2bw_cfg cos2bw;
+ uint8_t *data;
+ int rc, i;
+
+ bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_COS2BW_QCFG);
+
+ rc = _hwrm_send_message(softc, &req, sizeof(req));
+ if (rc) {
+ return rc;
+ }
+
+ data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
+ for (i = 0; i < softc->max_tc; i++, data += sizeof(cos2bw.cfg)) {
+ int tc;
+
+ memcpy(&cos2bw.cfg, data, sizeof(cos2bw.cfg));
+ if (i == 0)
+ cos2bw.queue_id = resp->queue_id0;
+
+ tc = bnxt_tx_queue_to_tc(softc, cos2bw.queue_id);
+ if (tc < 0)
+ continue;
+
+ if (cos2bw.tsa == HWRM_QUEUE_COS2BW_QCFG_OUTPUT_QUEUE_ID0_TSA_ASSIGN_SP) {
+ ets->tc_tsa[tc] = BNXT_IEEE_8021QAZ_TSA_STRICT;
+ } else {
+ ets->tc_tsa[tc] = BNXT_IEEE_8021QAZ_TSA_ETS;
+ ets->tc_tx_bw[tc] = cos2bw.bw_weight;
+ }
+ }
+ return 0;
+}
+
+static int
+bnxt_queue_remap(struct bnxt_softc *softc, unsigned int lltc_mask)
+{
+ unsigned long qmap = 0;
+ int max = softc->max_tc;
+ int i, j, rc;
+
+ /* Assign lossless TCs first */
+ for (i = 0, j = 0; i < max; ) {
+ if (lltc_mask & (1 << i)) {
+ if (BNXT_LLQ(softc->rx_q_info[j].queue_profile)) {
+ softc->tc_to_qidx[i] = j;
+ __set_bit(j, &qmap);
+ i++;
+ }
+ j++;
+ continue;
+ }
+ i++;
+ }
+
+ for (i = 0, j = 0; i < max; i++) {
+ if (lltc_mask & (1 << i))
+ continue;
+ j = find_next_zero_bit(&qmap, max, j);
+ softc->tc_to_qidx[i] = j;
+ __set_bit(j, &qmap);
+ j++;
+ }
+
+ if (softc->ieee_ets) {
+ rc = bnxt_hwrm_queue_cos2bw_cfg(softc, softc->ieee_ets, softc->max_tc);
+ if (rc) {
+ device_printf(softc->dev, "failed to config BW, rc = %d\n", rc);
+ return rc;
+ }
+ rc = bnxt_hwrm_queue_pri2cos_cfg(softc, softc->ieee_ets,
+ HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR);
+ if (rc) {
+ device_printf(softc->dev, "failed to config prio, rc = %d\n", rc);
+ return rc;
+ }
+ }
+ return 0;
+}
+
+static int
+bnxt_hwrm_queue_pfc_cfg(struct bnxt_softc *softc, struct bnxt_ieee_pfc *pfc)
+{
+ struct hwrm_queue_pfcenable_cfg_input req = {0};
+ struct bnxt_ieee_ets *my_ets = softc->ieee_ets;
+ unsigned int tc_mask = 0, pri_mask = 0;
+ uint8_t i, pri, lltc_count = 0;
+ bool need_q_remap = false;
+
+ if (!my_ets)
+ return -EINVAL;
+
+ for (i = 0; i < softc->max_tc; i++) {
+ for (pri = 0; pri < BNXT_IEEE_8021QAZ_MAX_TCS; pri++) {
+ if ((pfc->pfc_en & (1 << pri)) &&
+ (my_ets->prio_tc[pri] == i)) {
+ pri_mask |= 1 << pri;
+ tc_mask |= 1 << i;
+ }
+ }
+ if (tc_mask & (1 << i))
+ lltc_count++;
+ }
+
+ if (lltc_count > softc->max_lltc) {
+ device_printf(softc->dev,
+ "Hardware doesn't support %d lossless queues "
+ "to configure PFC (cap %d)\n", lltc_count, softc->max_lltc);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < softc->max_tc; i++) {
+ if (tc_mask & (1 << i)) {
+ uint8_t qidx = softc->tc_to_qidx[i];
+
+ if (!BNXT_LLQ(softc->rx_q_info[qidx].queue_profile)) {
+ need_q_remap = true;
+ break;
+ }
+ }
+ }
+
+ if (need_q_remap)
+ bnxt_queue_remap(softc, tc_mask);
+
+ bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_PFCENABLE_CFG);
+
+ req.flags = htole32(pri_mask);
+ return _hwrm_send_message(softc, &req, sizeof(req));
+}
+
+static int
+bnxt_hwrm_queue_pfc_qcfg(struct bnxt_softc *softc, struct bnxt_ieee_pfc *pfc)
+{
+ struct hwrm_queue_pfcenable_qcfg_output *resp =
+ (void *)softc->hwrm_cmd_resp.idi_vaddr;
+ struct hwrm_queue_pfcenable_qcfg_input req = {0};
+ uint8_t pri_mask;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_PFCENABLE_QCFG);
+
+ rc = _hwrm_send_message(softc, &req, sizeof(req));
+ if (rc) {
+ return rc;
+ }
+
+ pri_mask = le32toh(resp->flags);
+ pfc->pfc_en = pri_mask;
+ return 0;
+}
+
+static int
+bnxt_hwrm_get_dcbx_app(struct bnxt_softc *softc, struct bnxt_dcb_app *app,
+ size_t nitems, int *num_inputs)
+{
+ struct hwrm_fw_get_structured_data_input get = {0};
+ struct hwrm_struct_data_dcbx_app *fw_app;
+ struct hwrm_struct_hdr *data;
+ struct iflib_dma_info dma_data;
+ size_t data_len;
+ int rc, n, i;
+
+ if (softc->hwrm_spec_code < 0x10601)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(softc, &get, HWRM_FW_GET_STRUCTURED_DATA);
+
+ n = BNXT_IEEE_8021QAZ_MAX_TCS;
+ data_len = sizeof(*data) + sizeof(*fw_app) * n;
+ rc = iflib_dma_alloc(softc->ctx, data_len, &dma_data,
+ BUS_DMA_NOWAIT);
+ if (rc)
+ return ENOMEM;
+ get.dest_data_addr = htole64(dma_data.idi_paddr);
+ get.structure_id = htole16(HWRM_STRUCT_HDR_STRUCT_ID_DCBX_APP);
+ get.subtype = htole16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
+ get.count = 0;
+ rc = _hwrm_send_message(softc, &get, sizeof(get));
+ if (rc)
+ goto set_app_exit;
+
+ data = (void *)dma_data.idi_vaddr;
+ fw_app = (struct hwrm_struct_data_dcbx_app *)(data + 1);
+
+ if (data->struct_id != htole16(HWRM_STRUCT_HDR_STRUCT_ID_DCBX_APP)) {
+ rc = -ENODEV;
+ goto set_app_exit;
+ }
+
+ n = data->count;
+ for (i = 0; i < n && *num_inputs < nitems; i++, fw_app++) {
+ app[*num_inputs].priority = fw_app->priority;
+ app[*num_inputs].protocol = htobe16(fw_app->protocol_id);
+ app[*num_inputs].selector = fw_app->protocol_selector;
+ (*num_inputs)++;
+ }
+
+set_app_exit:
+ iflib_dma_free(&dma_data);
+ return rc;
+}
+
+static int
+bnxt_hwrm_set_dcbx_app(struct bnxt_softc *softc, struct bnxt_dcb_app *app,
+ bool add)
+{
+ struct hwrm_fw_set_structured_data_input set = {0};
+ struct hwrm_fw_get_structured_data_input get = {0};
+ struct hwrm_struct_data_dcbx_app *fw_app;
+ struct hwrm_struct_hdr *data;
+ struct iflib_dma_info dma_data;
+ size_t data_len;
+ int rc, n, i;
+
+ if (softc->hwrm_spec_code < 0x10601)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(softc, &get, HWRM_FW_GET_STRUCTURED_DATA);
+
+ n = BNXT_IEEE_8021QAZ_MAX_TCS;
+ data_len = sizeof(*data) + sizeof(*fw_app) * n;
+ rc = iflib_dma_alloc(softc->ctx, data_len, &dma_data,
+ BUS_DMA_NOWAIT);
+ if (rc)
+ return ENOMEM;
+ get.dest_data_addr = htole64(dma_data.idi_paddr);
+ get.structure_id = htole16(HWRM_STRUCT_HDR_STRUCT_ID_DCBX_APP);
+ get.subtype = htole16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
+ get.count = 0;
+ rc = _hwrm_send_message(softc, &get, sizeof(get));
+ if (rc)
+ goto set_app_exit;
+
+ data = (void *)dma_data.idi_vaddr;
+ fw_app = (struct hwrm_struct_data_dcbx_app *)(data + 1);
+
+ if (data->struct_id != htole16(HWRM_STRUCT_HDR_STRUCT_ID_DCBX_APP)) {
+ rc = -ENODEV;
+ goto set_app_exit;
+ }
+
+ n = data->count;
+ for (i = 0; i < n; i++, fw_app++) {
+ if (fw_app->protocol_id == htobe16(app->protocol) &&
+ fw_app->protocol_selector == app->selector &&
+ fw_app->priority == app->priority) {
+ if (add)
+ goto set_app_exit;
+ else
+ break;
+ }
+ }
+ if (add) {
+ /* append */
+ n++;
+ fw_app->protocol_id = htobe16(app->protocol);
+ fw_app->protocol_selector = app->selector;
+ fw_app->priority = app->priority;
+ fw_app->valid = 1;
+ } else {
+ size_t len = 0;
+
+ /* not found, nothing to delete */
+ if (n == i)
+ goto set_app_exit;
+
+ len = (n - 1 - i) * sizeof(*fw_app);
+ if (len)
+ memmove(fw_app, fw_app + 1, len);
+ n--;
+ memset(fw_app + n, 0, sizeof(*fw_app));
+ }
+ data->count = n;
+ data->len = htole16(sizeof(*fw_app) * n);
+ data->subtype = htole16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL);
+
+ bnxt_hwrm_cmd_hdr_init(softc, &set, HWRM_FW_SET_STRUCTURED_DATA);
+
+ set.src_data_addr = htole64(dma_data.idi_paddr);
+ set.data_len = htole16(sizeof(*data) + sizeof(*fw_app) * n);
+ set.hdr_cnt = 1;
+ rc = _hwrm_send_message(softc, &set, sizeof(set));
+
+set_app_exit:
+ iflib_dma_free(&dma_data);
+ return rc;
+}
+
+static int
+bnxt_hwrm_queue_dscp_qcaps(struct bnxt_softc *softc)
+{
+ struct hwrm_queue_dscp_qcaps_output *resp =
+ (void *)softc->hwrm_cmd_resp.idi_vaddr;
+ struct hwrm_queue_dscp_qcaps_input req = {0};
+ int rc;
+
+ softc->max_dscp_value = 0;
+ if (softc->hwrm_spec_code < 0x10800 || BNXT_VF(softc))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_DSCP_QCAPS);
+
+ rc = _hwrm_send_message(softc, &req, sizeof(req));
+ if (!rc) {
+ softc->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
+ if (softc->max_dscp_value < 0x3f)
+ softc->max_dscp_value = 0;
+ }
+ return rc;
+}
+
+static int
+bnxt_hwrm_queue_dscp2pri_qcfg(struct bnxt_softc *softc, struct bnxt_dcb_app *app,
+ size_t nitems, int *num_inputs)
+{
+ struct hwrm_queue_dscp2pri_qcfg_input req = {0};
+ struct hwrm_queue_dscp2pri_qcfg_output *resp =
+ (void *)softc->hwrm_cmd_resp.idi_vaddr;
+ struct bnxt_dscp2pri_entry *dscp2pri;
+ struct iflib_dma_info dma_data;
+ int rc, entry_cnt;
+ int i;
+
+ if (softc->hwrm_spec_code < 0x10800)
+ return 0;
+
+ rc = iflib_dma_alloc(softc->ctx, sizeof(*dscp2pri) * 128, &dma_data,
+ BUS_DMA_NOWAIT);
+ if (rc)
+ return ENOMEM;
+
+ dscp2pri = (void *)dma_data.idi_vaddr;
+
+ bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_DSCP2PRI_QCFG);
+
+ req.dest_data_addr = htole64(dma_data.idi_paddr);
+ req.dest_data_buffer_size = htole16(sizeof(*dscp2pri) * 64);
+ req.port_id = htole16(softc->pf.port_id);
+ rc = _hwrm_send_message(softc, &req, sizeof(req));
+
+ if (rc)
+ goto end;
+
+ entry_cnt = le16toh(resp->entry_cnt);
+ for (i = 0; i < entry_cnt && *num_inputs < nitems; i++) {
+ app[*num_inputs].priority = dscp2pri[i].pri;
+ app[*num_inputs].protocol = dscp2pri[i].dscp;
+ app[*num_inputs].selector = BNXT_IEEE_8021QAZ_APP_SEL_DSCP;
+ (*num_inputs)++;
+ }
+
+end:
+ iflib_dma_free(&dma_data);
+ return rc;
+}
+
+static int
+bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt_softc *softc, struct bnxt_dcb_app *app,
+ bool add)
+{
+ struct hwrm_queue_dscp2pri_cfg_input req = {0};
+ struct bnxt_dscp2pri_entry *dscp2pri;
+ struct iflib_dma_info dma_data;
+ int rc;
+
+ if (softc->hwrm_spec_code < 0x10800)
+ return 0;
+
+ rc = iflib_dma_alloc(softc->ctx, sizeof(*dscp2pri), &dma_data,
+ BUS_DMA_NOWAIT);
+ if (rc)
+ return ENOMEM;
+
+ bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_DSCP2PRI_CFG);
+
+ req.src_data_addr = htole64(dma_data.idi_paddr);
+ dscp2pri = (void *)dma_data.idi_vaddr;
+ dscp2pri->dscp = app->protocol;
+ if (add)
+ dscp2pri->mask = 0x3f;
+ else
+ dscp2pri->mask = 0;
+ dscp2pri->pri = app->priority;
+ req.entry_cnt = htole16(1);
+ req.port_id = htole16(softc->pf.port_id);
+ rc = _hwrm_send_message(softc, &req, sizeof(req));
+
+ iflib_dma_free(&dma_data);
+ return rc;
+}
+
+static int
+bnxt_ets_validate(struct bnxt_softc *softc, struct bnxt_ieee_ets *ets, uint8_t *tc)
+{
+ int total_ets_bw = 0;
+ bool zero = false;
+ uint8_t max_tc = 0;
+ int i;
+
+ for (i = 0; i < BNXT_IEEE_8021QAZ_MAX_TCS; i++) {
+ if (ets->prio_tc[i] > softc->max_tc) {
+ device_printf(softc->dev, "priority to TC mapping exceeds TC count %d\n",
+ ets->prio_tc[i]);
+ return -EINVAL;
+ }
+ if (ets->prio_tc[i] > max_tc)
+ max_tc = ets->prio_tc[i];
+
+ if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > softc->max_tc)
+ return -EINVAL;
+
+ switch (ets->tc_tsa[i]) {
+ case BNXT_IEEE_8021QAZ_TSA_STRICT:
+ break;
+ case BNXT_IEEE_8021QAZ_TSA_ETS:
+ total_ets_bw += ets->tc_tx_bw[i];
+ zero = zero || !ets->tc_tx_bw[i];
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+ }
+ if (total_ets_bw > 100) {
+ device_printf(softc->dev, "rejecting ETS config exceeding available bandwidth\n");
+ return -EINVAL;
+ }
+ if (zero && total_ets_bw == 100) {
+ device_printf(softc->dev, "rejecting ETS config starving a TC\n");
+ return -EINVAL;
+ }
+
+ if (max_tc >= softc->max_tc)
+ *tc = softc->max_tc;
+ else
+ *tc = max_tc + 1;
+ return 0;
+}
+
+int
+bnxt_dcb_ieee_getets(struct bnxt_softc *softc, struct bnxt_ieee_ets *ets)
+{
+ struct bnxt_ieee_ets *my_ets = softc->ieee_ets;
+ int rc;
+
+ if (!my_ets)
+ return 0;
+
+ rc = bnxt_hwrm_queue_cos2bw_qcfg(softc, my_ets);
+ if (rc)
+ goto error;
+ rc = bnxt_hwrm_queue_pri2cos_qcfg(softc, my_ets);
+ if (rc)
+ goto error;
+
+ if (ets) {
+ ets->cbs = my_ets->cbs;
+ ets->ets_cap = softc->max_tc;
+ memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw));
+ memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
+ }
+ return 0;
+error:
+ return rc;
+}
+
+int
+bnxt_dcb_ieee_setets(struct bnxt_softc *softc, struct bnxt_ieee_ets *ets)
+{
+ uint8_t max_tc = 0;
+ int rc;
+
+ if (!(softc->dcbx_cap & BNXT_DCB_CAP_DCBX_VER_IEEE) ||
+ !(softc->dcbx_cap & BNXT_DCB_CAP_DCBX_HOST))
+ return -EINVAL;
+
+ rc = bnxt_ets_validate(softc, ets, &max_tc);
+ if (rc)
+ return rc;
+
+ rc = bnxt_hwrm_queue_cos2bw_cfg(softc, ets, max_tc);
+ if (rc)
+ goto error;
+
+ if (!softc->is_asym_q) {
+ rc = bnxt_hwrm_queue_pri2cos_cfg(softc, ets,
+ HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_BIDIR);
+ if (rc)
+ goto error;
+ } else {
+ rc = bnxt_hwrm_queue_pri2cos_cfg(softc, ets,
+ HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX);
+ if (rc)
+ goto error;
+
+ rc = bnxt_hwrm_queue_pri2cos_cfg(softc, ets,
+ HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX);
+ if (rc)
+ goto error;
+ }
+
+ memcpy(softc->ieee_ets, ets, sizeof(*ets));
+ return 0;
+error:
+ return rc;
+}
+
+int
+bnxt_dcb_ieee_getpfc(struct bnxt_softc *softc, struct bnxt_ieee_pfc *pfc)
+{
+ struct bnxt_ieee_pfc *my_pfc = softc->ieee_pfc;
+ int rc;
+
+ if (!my_pfc)
+ return -1;
+
+ pfc->pfc_cap = softc->max_lltc;
+
+ rc = bnxt_hwrm_queue_pfc_qcfg(softc, my_pfc);
+ if (rc)
+ return 0;
+
+ pfc->pfc_en = my_pfc->pfc_en;
+ pfc->mbc = my_pfc->mbc;
+ pfc->delay = my_pfc->delay;
+
+ return 0;
+}
+
+int
+bnxt_dcb_ieee_setpfc(struct bnxt_softc *softc, struct bnxt_ieee_pfc *pfc)
+{
+ struct bnxt_ieee_pfc *my_pfc = softc->ieee_pfc;
+ int rc;
+
+ if (!my_pfc)
+ return -1;
+
+ if (!(softc->dcbx_cap & BNXT_DCB_CAP_DCBX_VER_IEEE) ||
+ !(softc->dcbx_cap & BNXT_DCB_CAP_DCBX_HOST) ||
+ (softc->phy_flags & BNXT_PHY_FL_NO_PAUSE))
+ return -EINVAL;
+
+ rc = bnxt_hwrm_queue_pfc_cfg(softc, pfc);
+ if (!rc)
+ memcpy(my_pfc, pfc, sizeof(*my_pfc));
+
+ return rc;
+}
+
+static int
+bnxt_dcb_ieee_dscp_app_prep(struct bnxt_softc *softc, struct bnxt_dcb_app *app)
+{
+ if (app->selector == BNXT_IEEE_8021QAZ_APP_SEL_DSCP) {
+ if (!softc->max_dscp_value)
+ return -ENOTSUPP;
+ if (app->protocol > softc->max_dscp_value)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int
+bnxt_dcb_ieee_setapp(struct bnxt_softc *softc, struct bnxt_dcb_app *app)
+{
+ int rc;
+
+
+ if (!(softc->dcbx_cap & BNXT_DCB_CAP_DCBX_VER_IEEE) ||
+ !(softc->dcbx_cap & BNXT_DCB_CAP_DCBX_HOST))
+ return -EINVAL;
+
+ rc = bnxt_dcb_ieee_dscp_app_prep(softc, app);
+ if (rc)
+ return rc;
+
+ if ((app->selector == BNXT_IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == ETH_P_ROCE) ||
+ (app->selector == BNXT_IEEE_8021QAZ_APP_SEL_DGRAM &&
+ app->protocol == ROCE_V2_UDP_DPORT))
+ rc = bnxt_hwrm_set_dcbx_app(softc, app, true);
+
+ if (app->selector == BNXT_IEEE_8021QAZ_APP_SEL_DSCP)
+ rc = bnxt_hwrm_queue_dscp2pri_cfg(softc, app, true);
+
+ return rc;
+}
+
+int
+bnxt_dcb_ieee_delapp(struct bnxt_softc *softc, struct bnxt_dcb_app *app)
+{
+ int rc;
+
+ if (!(softc->dcbx_cap & BNXT_DCB_CAP_DCBX_VER_IEEE) ||
+ !(softc->dcbx_cap & BNXT_DCB_CAP_DCBX_HOST))
+ return -EINVAL;
+
+ rc = bnxt_dcb_ieee_dscp_app_prep(softc, app);
+ if (rc)
+ return rc;
+
+ if ((app->selector == BNXT_IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+ app->protocol == ETH_P_ROCE) ||
+ (app->selector == BNXT_IEEE_8021QAZ_APP_SEL_DGRAM &&
+ app->protocol == ROCE_V2_UDP_DPORT))
+ rc = bnxt_hwrm_set_dcbx_app(softc, app, false);
+
+ if (app->selector == BNXT_IEEE_8021QAZ_APP_SEL_DSCP)
+ rc = bnxt_hwrm_queue_dscp2pri_cfg(softc, app, false);
+
+ return rc;
+}
+
+int
+bnxt_dcb_ieee_listapp(struct bnxt_softc *softc, struct bnxt_dcb_app *app,
+ size_t nitems, int *num_inputs)
+{
+ bnxt_hwrm_get_dcbx_app(softc, app, nitems, num_inputs);
+ bnxt_hwrm_queue_dscp2pri_qcfg(softc, app, nitems, num_inputs);
+
+ return 0;
+}
+
+uint8_t
+bnxt_dcb_getdcbx(struct bnxt_softc *softc)
+{
+ return softc->dcbx_cap;
+}
+
+uint8_t
+bnxt_dcb_setdcbx(struct bnxt_softc *softc, uint8_t mode)
+{
+ /* All firmware DCBX settings are set in NVRAM */
+ if (softc->dcbx_cap & BNXT_DCB_CAP_DCBX_LLD_MANAGED)
+ return 1;
+
+ /*
+ * Do't allow editing CAP_DCBX_LLD_MANAGED since it is driven
+ * based on FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED
+ */
+ if ((softc->dcbx_cap & BNXT_DCB_CAP_DCBX_LLD_MANAGED) !=
+ (mode & BNXT_DCB_CAP_DCBX_LLD_MANAGED))
+ return 1;
+
+ if (mode & BNXT_DCB_CAP_DCBX_HOST) {
+ if (BNXT_VF(softc) || (softc->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
+ return 1;
+
+ /* only support BNXT_IEEE */
+ if ((mode & BNXT_DCB_CAP_DCBX_VER_CEE) ||
+ !(mode & BNXT_DCB_CAP_DCBX_VER_IEEE))
+ return 1;
+ }
+
+ if (mode == softc->dcbx_cap)
+ return 0;
+
+ softc->dcbx_cap = mode;
+ return 0;
+}
+
+void
+bnxt_dcb_init(struct bnxt_softc *softc)
+{
+ struct bnxt_ieee_ets ets = {0};
+ struct bnxt_ieee_pfc pfc = {0};
+
+ softc->dcbx_cap = 0;
+
+ if (softc->hwrm_spec_code < 0x10501)
+ return;
+
+ softc->ieee_ets = malloc(sizeof(struct bnxt_ieee_ets), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!softc->ieee_ets)
+ return;
+
+ softc->ieee_pfc = malloc(sizeof(struct bnxt_ieee_pfc), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!softc->ieee_pfc)
+ return;
+
+ bnxt_hwrm_queue_dscp_qcaps(softc);
+ softc->dcbx_cap = BNXT_DCB_CAP_DCBX_VER_IEEE;
+ if (BNXT_PF(softc) && !(softc->fw_cap & BNXT_FW_CAP_LLDP_AGENT))
+ softc->dcbx_cap |= BNXT_DCB_CAP_DCBX_HOST;
+ else if (softc->fw_cap & BNXT_FW_CAP_DCBX_AGENT)
+ softc->dcbx_cap |= BNXT_DCB_CAP_DCBX_LLD_MANAGED;
+
+ bnxt_dcb_ieee_setets(softc, &ets);
+ bnxt_dcb_ieee_setpfc(softc, &pfc);
+
+}
+
+void
+bnxt_dcb_free(struct bnxt_softc *softc)
+{
+ free(softc->ieee_ets, M_DEVBUF);
+ softc->ieee_ets = NULL;
+ free(softc->ieee_pfc, M_DEVBUF);
+ softc->ieee_pfc = NULL;
+}
diff --git a/sys/dev/bnxt/bnxt_en/bnxt_dcb.h b/sys/dev/bnxt/bnxt_en/bnxt_dcb.h
new file mode 100644
index 000000000000..fa68401583ca
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_en/bnxt_dcb.h
@@ -0,0 +1,131 @@
+/*-
+ * Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2024 Broadcom, All Rights Reserved.
+ * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _BNXT_DCB_H
+#define _BNXT_DCB_H
+
+#define BNXT_IEEE_8021QAZ_MAX_TCS 8
+#define BNXT_IEEE_8021QAZ_TSA_STRICT 0
+#define BNXT_IEEE_8021QAZ_TSA_ETS 2
+#define BNXT_IEEE_8021QAZ_TSA_VENDOR 255
+
+#define BNXT_DCB_CAP_DCBX_HOST 0x01
+#define BNXT_DCB_CAP_DCBX_LLD_MANAGED 0x02
+#define BNXT_DCB_CAP_DCBX_VER_CEE 0x04
+#define BNXT_DCB_CAP_DCBX_VER_IEEE 0x08
+#define BNXT_DCB_CAP_DCBX_STATIC 0x10
+
+#ifndef __struct_group
+#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
+ union { \
+ struct { MEMBERS } ATTRS; \
+ struct TAG { MEMBERS } ATTRS NAME; \
+ }
+#endif
+#ifndef struct_group_attr
+#define struct_group_attr(NAME, ATTRS, MEMBERS...) \
+ __struct_group(/* no tag */, NAME, ATTRS, MEMBERS)
+#endif
+
+struct bnxt_cos2bw_cfg {
+ uint8_t pad[3];
+ struct_group_attr(cfg, __packed,
+ uint8_t queue_id;
+ uint32_t min_bw;
+ uint32_t max_bw;
+#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
+ uint8_t tsa;
+ uint8_t pri_lvl;
+ uint8_t bw_weight;
+ );
+ uint8_t unused;
+};
+
+struct bnxt_dscp2pri_entry {
+ uint8_t dscp;
+ uint8_t mask;
+ uint8_t pri;
+};
+
+struct bnxt_ieee_ets {
+ uint8_t willing;
+ uint8_t ets_cap;
+ uint8_t cbs;
+ uint8_t tc_tx_bw[BNXT_IEEE_8021QAZ_MAX_TCS];
+ uint8_t tc_rx_bw[BNXT_IEEE_8021QAZ_MAX_TCS];
+ uint8_t tc_tsa[BNXT_IEEE_8021QAZ_MAX_TCS];
+ uint8_t prio_tc[BNXT_IEEE_8021QAZ_MAX_TCS];
+ uint8_t tc_reco_bw[BNXT_IEEE_8021QAZ_MAX_TCS];
+ uint8_t tc_reco_tsa[BNXT_IEEE_8021QAZ_MAX_TCS];
+ uint8_t reco_prio_tc[BNXT_IEEE_8021QAZ_MAX_TCS];
+} __attribute__ ((__packed__));
+
+struct bnxt_ieee_pfc {
+ uint8_t pfc_cap;
+ uint8_t pfc_en;
+ uint8_t mbc;
+ uint16_t delay;
+ uint64_t requests[BNXT_IEEE_8021QAZ_MAX_TCS];
+ uint64_t indications[BNXT_IEEE_8021QAZ_MAX_TCS];
+} __attribute__ ((__packed__));
+
+struct bnxt_dcb_app {
+ uint8_t selector;
+ uint8_t priority;
+ uint16_t protocol;
+} __attribute__ ((__packed__));
+
+struct bnxt_eee {
+ uint32_t cmd;
+ uint32_t supported;
+ uint32_t advertised;
+ uint32_t lp_advertised;
+ uint32_t eee_active;
+ uint32_t eee_enabled;
+ uint32_t tx_lpi_enabled;
+ uint32_t tx_lpi_timer;
+ uint32_t reserved[2];
+} __attribute__ ((__packed__));
+
+#define BNXT_IEEE_8021QAZ_APP_SEL_ETHERTYPE 1
+#define BNXT_IEEE_8021QAZ_APP_SEL_STREAM 2
+#define BNXT_IEEE_8021QAZ_APP_SEL_DGRAM 3
+#define BNXT_IEEE_8021QAZ_APP_SEL_ANY 4
+#define BNXT_IEEE_8021QAZ_APP_SEL_DSCP 5
+#define ETH_P_ROCE 0x8915
+#define ROCE_V2_UDP_DPORT 4791
+
+#define BNXT_LLQ(q_profile) \
+ ((q_profile) == HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE || \
+ (q_profile) == HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_NIC)
+#define BNXT_CNPQ(q_profile) \
+ ((q_profile) == HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP)
+
+#define HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL 0x0300
+
+#endif
diff --git a/sys/dev/bnxt/bnxt_hwrm.c b/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c
index 37238b857ef5..9e7f4614d9f9 100644
--- a/sys/dev/bnxt/bnxt_hwrm.c
+++ b/sys/dev/bnxt/bnxt_en/bnxt_hwrm.c
@@ -28,6 +28,7 @@
#include <sys/cdefs.h>
#include <sys/endian.h>
+#include <linux/pci.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
@@ -42,13 +43,42 @@ static void bnxt_hwrm_set_pause_common(struct bnxt_softc *softc,
struct hwrm_port_phy_cfg_input *req);
static void bnxt_hwrm_set_eee(struct bnxt_softc *softc,
struct hwrm_port_phy_cfg_input *req);
-static int _hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
-static int hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
-static void bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *, void *, uint16_t);
/* NVRam stuff has a five minute timeout */
#define BNXT_NVM_TIMEO (5 * 60 * 1000)
+#define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
+ BNXT_RX_STATS_EXT_OFFSET(counter##_cos0)
+
+#define BNXT_TX_STATS_PRI_ENTRY(counter, n) \
+ BNXT_TX_STATS_EXT_OFFSET(counter##_cos0)
+
+#define BNXT_RX_STATS_PRI_ENTRIES(counter) \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 0), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 1), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 2), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 3), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 4), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 5), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 6), \
+ BNXT_RX_STATS_PRI_ENTRY(counter, 7)
+
+#define BNXT_TX_STATS_PRI_ENTRIES(counter) \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 0), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 1), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 2), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 3), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 4), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 5), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
+ BNXT_TX_STATS_PRI_ENTRY(counter, 7)
+
+
+long bnxt_rx_bytes_pri_arr_base_off[] = {BNXT_RX_STATS_PRI_ENTRIES(rx_bytes)};
+long bnxt_rx_pkts_pri_arr_base_off[] = {BNXT_RX_STATS_PRI_ENTRIES(rx_packets)};
+long bnxt_tx_bytes_pri_arr_base_off[] = {BNXT_TX_STATS_PRI_ENTRIES(tx_bytes)};
+long bnxt_tx_pkts_pri_arr_base_off[] = {BNXT_TX_STATS_PRI_ENTRIES(tx_packets)};
+
static int
bnxt_hwrm_err_map(uint16_t err)
{
@@ -97,7 +127,7 @@ bnxt_free_hwrm_dma_mem(struct bnxt_softc *softc)
return;
}
-static void
+void
bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
uint16_t req_type)
{
@@ -109,7 +139,7 @@ bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *softc, void *request,
req->resp_addr = htole64(softc->hwrm_cmd_resp.idi_paddr);
}
-static int
+int
_hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
{
struct input *req = msg;
@@ -125,6 +155,10 @@ _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
req->seq_id = htole16(softc->hwrm_cmd_seq++);
memset(resp, 0, PAGE_SIZE);
+ if (BNXT_NO_FW_ACCESS(softc) &&
+ (req->req_type != HWRM_FUNC_RESET && req->req_type != HWRM_VER_GET))
+ return -EINVAL;
+
if ((softc->flags & BNXT_FLAG_SHORT_CMD) ||
msg_len > BNXT_HWRM_MAX_REQ_LEN) {
void *short_cmd_req = softc->hwrm_short_cmd_req_addr.idi_vaddr;
@@ -222,7 +256,7 @@ _hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
return 0;
}
-static int
+int
hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
{
int rc;
@@ -234,13 +268,16 @@ hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len)
}
int
-bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc)
+bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc, uint32_t path_dir)
{
int rc = 0;
struct hwrm_queue_qportcfg_input req = {0};
struct hwrm_queue_qportcfg_output *resp =
(void *)softc->hwrm_cmd_resp.idi_vaddr;
- uint8_t i, j, *qptr;
+ uint8_t max_tc, max_lltc, *max_q;
+ uint8_t queue_profile, queue_id;
+ struct bnxt_queue_info *q_info;
+ uint8_t i, j, *qptr, *q_ids;
bool no_rdma;
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_QPORTCFG);
@@ -254,29 +291,56 @@ bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc)
rc = -EINVAL;
goto qportcfg_exit;
}
- softc->max_tc = resp->max_configurable_queues;
- softc->max_lltc = resp->max_configurable_lossless_queues;
- if (softc->max_tc > BNXT_MAX_COS_QUEUE)
- softc->max_tc = BNXT_MAX_COS_QUEUE;
- /* Currently no RDMA support */
- no_rdma = true;
+ if (resp->queue_cfg_info & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG) {
+ softc->is_asym_q = true;
+ /* bnxt_init_cosq_names(softc, path_dir); */
+ } else {
+ softc->is_asym_q = false;
+ /* bnxt_free_stats_cosqnames_mem(softc); */
+ }
+
+ max_tc = min_t(uint8_t, resp->max_configurable_queues, BNXT_MAX_QUEUE);
+ max_lltc = resp->max_configurable_lossless_queues;
+ /*
+ * No RDMA support yet.
+ * no_rdma = !(softc->flags & BNXT_FLAG_ROCE_CAP);
+ */
+ no_rdma = true;
qptr = &resp->queue_id0;
- for (i = 0, j = 0; i < softc->max_tc; i++) {
- softc->q_info[j].id = *qptr;
- softc->q_ids[i] = *qptr++;
- softc->q_info[j].profile = *qptr++;
+
+ if (path_dir == HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX) {
+ q_info = softc->tx_q_info;
+ q_ids = softc->tx_q_ids;
+ max_q = &softc->tx_max_q;
+ } else {
+ q_info = softc->rx_q_info;
+ q_ids = softc->rx_q_ids;
+ max_q = &softc->rx_max_q;
+ }
+
+ for (i = 0, j = 0; i < max_tc; i++) {
+ queue_id = *qptr;
+ qptr++;
+
+ queue_profile = *qptr;
+ qptr++;
+
+ q_info[j].queue_id = queue_id;
+ q_info[j].queue_profile = queue_profile;
+ q_ids[i] = queue_id;
+
softc->tc_to_qidx[j] = j;
- if (!BNXT_CNPQ(softc->q_info[j].profile) ||
- (no_rdma && BNXT_PF(softc)))
+
+ if (!BNXT_CNPQ(q_info[j].queue_profile) ||
+ (no_rdma && BNXT_PF(softc)))
j++;
}
- softc->max_q = softc->max_tc;
- softc->max_tc = max_t(uint32_t, j, 1);
-
- if (resp->queue_cfg_info & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_ASYM_CFG)
- softc->max_tc = 1;
+ *max_q = max_tc;
+ max_tc = max_t(uint8_t, j, 1);
+ softc->max_tc = softc->max_tc ? min(softc->max_tc, max_tc) : max_tc;
+ softc->max_lltc = softc->max_lltc ? min(softc->max_lltc, max_lltc) : max_lltc;
if (softc->max_lltc > softc->max_tc)
softc->max_lltc = softc->max_tc;
@@ -286,6 +350,102 @@ qportcfg_exit:
return rc;
}
+static int bnxt_alloc_all_ctx_pg_info(struct bnxt_softc *softc, int ctx_max)
+{
+ struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
+ u16 type;
+
+ for (type = 0; type < ctx_max; type++) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ int n = 1;
+
+ if (!ctxm->max_entries || ctxm->pg_info)
+ continue;
+
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ ctxm->pg_info = kcalloc(n, sizeof(*ctxm->pg_info), GFP_ATOMIC);
+ if (!ctxm->pg_info)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_type *ctxm,
+ u8 init_val, u8 init_offset,
+ bool init_mask_set)
+{
+ ctxm->init_value = init_val;
+ ctxm->init_offset = BNXT_CTX_INIT_INVALID_OFFSET;
+ if (init_mask_set)
+ ctxm->init_offset = init_offset * 4;
+ else
+ ctxm->init_value = 0;
+}
+
+#define BNXT_CTX_INIT_VALID(flags) \
+ (!!((flags) & \
+ HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_ENABLE_CTX_KIND_INIT))
+
+static int
+bnxt_hwrm_func_backing_store_qcaps_v2(struct bnxt_softc *softc)
+{
+ struct hwrm_func_backing_store_qcaps_v2_input req = {0};
+ struct hwrm_func_backing_store_qcaps_v2_output *resp =
+ (void *)softc->hwrm_cmd_resp.idi_vaddr;
+ struct bnxt_ctx_mem_info *ctx = NULL;
+ u16 type;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_QCAPS_V2);
+
+ ctx = malloc(sizeof(*ctx), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!ctx)
+ return -ENOMEM;
+
+ softc->ctx_mem = ctx;
+
+ BNXT_HWRM_LOCK(softc);
+ for (type = 0; type < BNXT_CTX_V2_MAX; ) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ u8 init_val, init_off, i;
+ __le32 *p;
+ u32 flags;
+
+ req.type = cpu_to_le16(type);
+ rc = _hwrm_send_message(softc, &req, sizeof(req));
+ if (rc)
+ goto ctx_done;
+ flags = le32_to_cpu(resp->flags);
+ type = le16_to_cpu(resp->next_valid_type);
+ if (!(flags & HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_TYPE_VALID))
+ continue;
+
+ ctxm->type = le16_to_cpu(resp->type);
+ ctxm->flags = flags;
+
+ ctxm->entry_size = le16_to_cpu(resp->entry_size);
+ ctxm->instance_bmap = le32_to_cpu(resp->instance_bit_map);
+ ctxm->entry_multiple = resp->entry_multiple;
+ ctxm->max_entries = le32_to_cpu(resp->max_num_entries);
+ ctxm->min_entries = le32_to_cpu(resp->min_num_entries);
+ init_val = resp->ctx_init_value;
+ init_off = resp->ctx_init_offset;
+ bnxt_init_ctx_initializer(ctxm, init_val, init_off,
+ BNXT_CTX_INIT_VALID(flags));
+ ctxm->split_entry_cnt = min_t(u8, resp->subtype_valid_cnt,
+ BNXT_MAX_SPLIT_ENTRY);
+ for (i = 0, p = &resp->split_entry_0; i < ctxm->split_entry_cnt;
+ i++, p++)
+ ctxm->split[i] = le32_to_cpu(*p);
+ }
+ rc = bnxt_alloc_all_ctx_pg_info(softc, BNXT_CTX_V2_MAX);
+
+ctx_done:
+ BNXT_HWRM_UNLOCK(softc);
+ return rc;
+}
+
int bnxt_hwrm_func_backing_store_qcaps(struct bnxt_softc *softc)
{
struct hwrm_func_backing_store_qcaps_input req = {0};
@@ -293,63 +453,115 @@ int bnxt_hwrm_func_backing_store_qcaps(struct bnxt_softc *softc)
(void *)softc->hwrm_cmd_resp.idi_vaddr;
int rc;
- if (softc->hwrm_spec_code < 0x10902 || BNXT_VF(softc) || softc->ctx_mem)
+ if (softc->hwrm_spec_code < 0x10902 || softc->ctx_mem)
+ return 0;
+
+ if (BNXT_CHIP_P7(softc)) {
+ if (softc->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
+ return bnxt_hwrm_func_backing_store_qcaps_v2(softc);
+ }
+
+ if (BNXT_VF(softc))
return 0;
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_QCAPS);
BNXT_HWRM_LOCK(softc);
rc = _hwrm_send_message(softc, &req, sizeof(req));
if (!rc) {
- struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx;
- int i;
+ u8 init_val, init_idx = 0;
+ u16 init_mask;
- ctx = malloc(sizeof(*ctx), M_DEVBUF, M_NOWAIT | M_ZERO);
+ ctx = softc->ctx_mem;
if (!ctx) {
- rc = -ENOMEM;
- goto ctx_err;
+ ctx = malloc(sizeof(*ctx), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto ctx_err;
+ }
+ softc->ctx_mem = ctx;
}
- ctx_pg = malloc(sizeof(*ctx_pg) * (softc->max_q + 1),
- M_DEVBUF, M_NOWAIT | M_ZERO);
- if (!ctx_pg) {
- free(ctx, M_DEVBUF);
- rc = -ENOMEM;
- goto ctx_err;
+ init_val = resp->ctx_kind_initializer;
+ init_mask = le16_to_cpu(resp->ctx_init_mask);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ ctxm->max_entries = le32_to_cpu(resp->qp_max_entries);
+ ctxm->qp_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
+ ctxm->qp_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
+ ctxm->entry_size = le16_to_cpu(resp->qp_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->qp_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ ctxm->srq_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
+ ctxm->max_entries = le32_to_cpu(resp->srq_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->srq_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->srq_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ ctxm->cq_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
+ ctxm->max_entries = le32_to_cpu(resp->cq_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->cq_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val, resp->cq_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ ctxm->vnic_entries = le32_to_cpu(resp->vnic_max_vnic_entries);
+ ctxm->max_entries = ctxm->vnic_entries +
+ le16_to_cpu(resp->vnic_max_ring_table_entries);
+ ctxm->entry_size = le16_to_cpu(resp->vnic_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->vnic_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ ctxm->max_entries = le32_to_cpu(resp->stat_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->stat_entry_size);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->stat_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
+ ctxm->entry_size = le16_to_cpu(resp->tqm_entry_size);
+ ctxm->min_entries = le32_to_cpu(resp->tqm_min_entries_per_ring);
+ ctxm->max_entries = le32_to_cpu(resp->tqm_max_entries_per_ring);
+ ctxm->entry_multiple = resp->tqm_entries_multiple;
+ if (!ctxm->entry_multiple)
+ ctxm->entry_multiple = 1;
+
+ memcpy(&ctx->ctx_arr[BNXT_CTX_FTQM], ctxm, sizeof(*ctxm));
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
+ ctxm->max_entries = le32_to_cpu(resp->mrav_max_entries);
+ ctxm->entry_size = le16_to_cpu(resp->mrav_entry_size);
+ ctxm->mrav_num_entries_units =
+ le16_to_cpu(resp->mrav_num_entries_units);
+ bnxt_init_ctx_initializer(ctxm, init_val,
+ resp->mrav_init_offset,
+ (init_mask & (1 << init_idx++)) != 0);
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ ctxm->entry_size = le16_to_cpu(resp->tim_entry_size);
+ ctxm->max_entries = le32_to_cpu(resp->tim_max_entries);
+
+ ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
+ if (!ctx->tqm_fp_rings_count)
+ ctx->tqm_fp_rings_count = softc->tx_max_q;
+ else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_LEGACY_RINGS)
+ ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_LEGACY_RINGS;
+ if (ctx->tqm_fp_rings_count == BNXT_MAX_TQM_FP_LEGACY_RINGS &&
+ softc->hwrm_max_ext_req_len >= BNXT_BACKING_STORE_CFG_LEN) {
+ ctx->tqm_fp_rings_count += resp->tqm_fp_rings_count_ext;
+ if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
+ ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
}
- for (i = 0; i < softc->max_q + 1; i++, ctx_pg++)
- ctx->tqm_mem[i] = ctx_pg;
-
- softc->ctx_mem = ctx;
- ctx->qp_max_entries = le32toh(resp->qp_max_entries);
- ctx->qp_min_qp1_entries = le16toh(resp->qp_min_qp1_entries);
- ctx->qp_max_l2_entries = le16toh(resp->qp_max_l2_entries);
- ctx->qp_entry_size = le16toh(resp->qp_entry_size);
- ctx->srq_max_l2_entries = le16toh(resp->srq_max_l2_entries);
- ctx->srq_max_entries = le32toh(resp->srq_max_entries);
- ctx->srq_entry_size = le16toh(resp->srq_entry_size);
- ctx->cq_max_l2_entries = le16toh(resp->cq_max_l2_entries);
- ctx->cq_max_entries = le32toh(resp->cq_max_entries);
- ctx->cq_entry_size = le16toh(resp->cq_entry_size);
- ctx->vnic_max_vnic_entries =
- le16toh(resp->vnic_max_vnic_entries);
- ctx->vnic_max_ring_table_entries =
- le16toh(resp->vnic_max_ring_table_entries);
- ctx->vnic_entry_size = le16toh(resp->vnic_entry_size);
- ctx->stat_max_entries = le32toh(resp->stat_max_entries);
- ctx->stat_entry_size = le16toh(resp->stat_entry_size);
- ctx->tqm_entry_size = le16toh(resp->tqm_entry_size);
- ctx->tqm_min_entries_per_ring =
- le32toh(resp->tqm_min_entries_per_ring);
- ctx->tqm_max_entries_per_ring =
- le32toh(resp->tqm_max_entries_per_ring);
- ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
- if (!ctx->tqm_entries_multiple)
- ctx->tqm_entries_multiple = 1;
- ctx->mrav_max_entries = le32toh(resp->mrav_max_entries);
- ctx->mrav_entry_size = le16toh(resp->mrav_entry_size);
- ctx->tim_entry_size = le16toh(resp->tim_entry_size);
- ctx->tim_max_entries = le32toh(resp->tim_max_entries);
- ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
+ memcpy(ctxm, &ctx->ctx_arr[BNXT_CTX_STQM], sizeof(*ctxm));
+ ctxm->instance_bmap = (1 << ctx->tqm_fp_rings_count) - 1;
+
+ rc = bnxt_alloc_all_ctx_pg_info(softc, BNXT_CTX_MAX);
} else {
rc = 0;
}
@@ -368,14 +580,11 @@ ctx_err:
static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, uint8_t *pg_attr,
uint64_t *pg_dir)
{
- uint8_t pg_size = 0;
+ if (!rmem->nr_pages)
+ return;
- if (BNXT_PAGE_SHIFT == 13)
- pg_size = 1 << 4;
- else if (BNXT_PAGE_SIZE == 16)
- pg_size = 2 << 4;
+ BNXT_SET_CTX_PAGE_ATTR(*pg_attr);
- *pg_attr = pg_size;
if (rmem->depth >= 1) {
if (rmem->depth == 2)
*pg_attr |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2;
@@ -392,150 +601,216 @@ int bnxt_hwrm_func_backing_store_cfg(struct bnxt_softc *softc, uint32_t enables)
struct hwrm_func_backing_store_cfg_input req = {0};
struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
struct bnxt_ctx_pg_info *ctx_pg;
- uint32_t *num_entries, req_len = sizeof(req);
- uint64_t *pg_dir;
- uint8_t *pg_attr;
- int i, rc;
- uint32_t ena;
+ struct bnxt_ctx_mem_type *ctxm;
+ u32 req_len = sizeof(req);
+ __le32 *num_entries;
+ u32 ena, flags = 0;
+ __le64 *pg_dir;
+ u8 *pg_attr;
+ int i;
if (!ctx)
return 0;
+ if (req_len > softc->hwrm_max_ext_req_len)
+ req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
+
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_CFG);
req.enables = htole32(enables);
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP) {
- ctx_pg = &ctx->qp_mem;
- req.qp_num_entries = htole32(ctx_pg->entries);
- req.qp_num_qp1_entries = htole16(ctx->qp_min_qp1_entries);
- req.qp_num_l2_entries = htole16(ctx->qp_max_l2_entries);
- req.qp_entry_size = htole16(ctx->qp_entry_size);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ ctx_pg = ctxm->pg_info;
+ req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.qp_num_qp1_entries = cpu_to_le16(ctxm->qp_qp1_entries);
+ req.qp_num_l2_entries = cpu_to_le16(ctxm->qp_l2_entries);
+ req.qp_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.qpc_pg_size_qpc_lvl,
&req.qpc_page_dir);
}
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ) {
- ctx_pg = &ctx->srq_mem;
- req.srq_num_entries = htole32(ctx_pg->entries);
- req.srq_num_l2_entries = htole16(ctx->srq_max_l2_entries);
- req.srq_entry_size = htole16(ctx->srq_entry_size);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ ctx_pg = ctxm->pg_info;
+ req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.srq_num_l2_entries = cpu_to_le16(ctxm->srq_l2_entries);
+ req.srq_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.srq_pg_size_srq_lvl,
&req.srq_page_dir);
}
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ) {
- ctx_pg = &ctx->cq_mem;
- req.cq_num_entries = htole32(ctx_pg->entries);
- req.cq_num_l2_entries = htole16(ctx->cq_max_l2_entries);
- req.cq_entry_size = htole16(ctx->cq_entry_size);
- bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ ctx_pg = ctxm->pg_info;
+ req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.cq_num_l2_entries = cpu_to_le16(ctxm->cq_l2_entries);
+ req.cq_entry_size = cpu_to_le16(ctxm->entry_size);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.cq_pg_size_cq_lvl,
&req.cq_page_dir);
}
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV) {
- ctx_pg = &ctx->mrav_mem;
- req.mrav_num_entries = htole32(ctx_pg->entries);
- req.mrav_entry_size = htole16(ctx->mrav_entry_size);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
+ ctx_pg = ctxm->pg_info;
+ req.mrav_num_entries = cpu_to_le32(ctx_pg->entries);
+ if (ctxm->mrav_num_entries_units)
+ flags |=
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_FLAGS_MRAV_RESERVATION_SPLIT;
+ req.mrav_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.mrav_pg_size_mrav_lvl,
&req.mrav_page_dir);
}
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM) {
- ctx_pg = &ctx->tim_mem;
- req.tim_num_entries = htole32(ctx_pg->entries);
- req.tim_entry_size = htole16(ctx->tim_entry_size);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ ctx_pg = ctxm->pg_info;
+ req.tim_num_entries = cpu_to_le32(ctx_pg->entries);
+ req.tim_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.tim_pg_size_tim_lvl,
&req.tim_page_dir);
}
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC) {
- ctx_pg = &ctx->vnic_mem;
- req.vnic_num_vnic_entries =
- htole16(ctx->vnic_max_vnic_entries);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ ctx_pg = ctxm->pg_info;
+ req.vnic_num_vnic_entries = cpu_to_le16(ctxm->vnic_entries);
req.vnic_num_ring_table_entries =
- htole16(ctx->vnic_max_ring_table_entries);
- req.vnic_entry_size = htole16(ctx->vnic_entry_size);
+ cpu_to_le16(ctxm->max_entries - ctxm->vnic_entries);
+ req.vnic_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.vnic_pg_size_vnic_lvl,
&req.vnic_page_dir);
}
if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT) {
- ctx_pg = &ctx->stat_mem;
- req.stat_num_entries = htole32(ctx->stat_max_entries);
- req.stat_entry_size = htole16(ctx->stat_entry_size);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ ctx_pg = ctxm->pg_info;
+ req.stat_num_entries = cpu_to_le32(ctxm->max_entries);
+ req.stat_entry_size = cpu_to_le16(ctxm->entry_size);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
&req.stat_pg_size_stat_lvl,
&req.stat_page_dir);
}
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
for (i = 0, num_entries = &req.tqm_sp_num_entries,
pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req.tqm_sp_page_dir,
- ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP;
- i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP,
+ ctx_pg = ctxm->pg_info;
+ i < BNXT_MAX_TQM_LEGACY_RINGS;
+ ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[i],
+ i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
- req.tqm_entry_size = htole16(ctx->tqm_entry_size);
- ctx_pg = ctx->tqm_mem[i];
- *num_entries = htole32(ctx_pg->entries);
+ req.tqm_entry_size = cpu_to_le16(ctxm->entry_size);
+ *num_entries = cpu_to_le32(ctx_pg->entries);
bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
}
-
- if (req_len > softc->hwrm_max_ext_req_len)
- req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN;
-
- rc = hwrm_send_message(softc, &req, req_len);
- if (rc)
- rc = -EIO;
- return rc;
+ if (enables & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8) {
+ pg_attr = &req.tqm_ring8_pg_size_tqm_ring_lvl;
+ pg_dir = &req.tqm_ring8_page_dir;
+ ctx_pg = &ctx->ctx_arr[BNXT_CTX_FTQM].pg_info[8];
+ req.tqm_ring8_num_entries = cpu_to_le32(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
+ }
+ req.flags = cpu_to_le32(flags);
+ return hwrm_send_message(softc, &req, req_len);
}
int bnxt_hwrm_func_resc_qcaps(struct bnxt_softc *softc, bool all)
{
- struct hwrm_func_resource_qcaps_output *resp =
+ struct hwrm_func_resource_qcaps_output *resp =
(void *)softc->hwrm_cmd_resp.idi_vaddr;
- struct hwrm_func_resource_qcaps_input req = {0};
- struct bnxt_hw_resc *hw_resc = &softc->hw_resc;
- int rc;
+ struct hwrm_func_resource_qcaps_input req = {0};
+ struct bnxt_hw_resc *hw_resc = &softc->hw_resc;
+ int rc;
- bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESOURCE_QCAPS);
- req.fid = htole16(0xffff);
+ bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_RESOURCE_QCAPS);
+ req.fid = htole16(0xffff);
BNXT_HWRM_LOCK(softc);
- rc = _hwrm_send_message(softc, &req, sizeof(req));
- if (rc) {
- rc = -EIO;
- goto hwrm_func_resc_qcaps_exit;
- }
-
- hw_resc->max_tx_sch_inputs = le16toh(resp->max_tx_scheduler_inputs);
- if (!all)
- goto hwrm_func_resc_qcaps_exit;
-
- hw_resc->min_rsscos_ctxs = le16toh(resp->min_rsscos_ctx);
- hw_resc->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
- hw_resc->min_cp_rings = le16toh(resp->min_cmpl_rings);
- hw_resc->max_cp_rings = le16toh(resp->max_cmpl_rings);
- hw_resc->min_tx_rings = le16toh(resp->min_tx_rings);
- hw_resc->max_tx_rings = le16toh(resp->max_tx_rings);
- hw_resc->min_rx_rings = le16toh(resp->min_rx_rings);
- hw_resc->max_rx_rings = le16toh(resp->max_rx_rings);
- hw_resc->min_hw_ring_grps = le16toh(resp->min_hw_ring_grps);
- hw_resc->max_hw_ring_grps = le16toh(resp->max_hw_ring_grps);
- hw_resc->min_l2_ctxs = le16toh(resp->min_l2_ctxs);
- hw_resc->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
- hw_resc->min_vnics = le16toh(resp->min_vnics);
- hw_resc->max_vnics = le16toh(resp->max_vnics);
- hw_resc->min_stat_ctxs = le16toh(resp->min_stat_ctx);
- hw_resc->max_stat_ctxs = le16toh(resp->max_stat_ctx);
+ rc = _hwrm_send_message(softc, &req, sizeof(req));
+ if (rc) {
+ rc = -EIO;
+ goto hwrm_func_resc_qcaps_exit;
+ }
- if (BNXT_CHIP_P5(softc)) {
- hw_resc->max_nqs = le16toh(resp->max_msix);
- hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
- }
+ hw_resc->max_tx_sch_inputs = le16toh(resp->max_tx_scheduler_inputs);
+ if (!all)
+ goto hwrm_func_resc_qcaps_exit;
+
+ hw_resc->min_rsscos_ctxs = le16toh(resp->min_rsscos_ctx);
+ hw_resc->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
+ hw_resc->min_cp_rings = le16toh(resp->min_cmpl_rings);
+ hw_resc->max_cp_rings = le16toh(resp->max_cmpl_rings);
+ hw_resc->min_tx_rings = le16toh(resp->min_tx_rings);
+ hw_resc->max_tx_rings = le16toh(resp->max_tx_rings);
+ hw_resc->min_rx_rings = le16toh(resp->min_rx_rings);
+ hw_resc->max_rx_rings = le16toh(resp->max_rx_rings);
+ hw_resc->min_hw_ring_grps = le16toh(resp->min_hw_ring_grps);
+ hw_resc->max_hw_ring_grps = le16toh(resp->max_hw_ring_grps);
+ hw_resc->min_l2_ctxs = le16toh(resp->min_l2_ctxs);
+ hw_resc->max_l2_ctxs = le16toh(resp->max_l2_ctxs);
+ hw_resc->min_vnics = le16toh(resp->min_vnics);
+ hw_resc->max_vnics = le16toh(resp->max_vnics);
+ hw_resc->min_stat_ctxs = le16toh(resp->min_stat_ctx);
+ hw_resc->max_stat_ctxs = le16toh(resp->max_stat_ctx);
+
+ if (BNXT_CHIP_P5_PLUS(softc)) {
+ hw_resc->max_nqs = le16toh(resp->max_msix);
+ hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
+ }
hwrm_func_resc_qcaps_exit:
BNXT_HWRM_UNLOCK(softc);
- return rc;
+ return rc;
+}
+
+int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt_softc *softc,
+ struct bnxt_ctx_mem_type *ctxm,
+ bool last)
+{
+ struct hwrm_func_backing_store_cfg_v2_input req = {0};
+ u32 instance_bmap = ctxm->instance_bmap;
+ int i, j, rc = 0, n = 1;
+ __le32 *p;
+
+ if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID) || !ctxm->pg_info)
+ return 0;
+
+ if (instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ else
+ instance_bmap = 1;
+
+ bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_BACKING_STORE_CFG_V2);
+
+ BNXT_HWRM_LOCK(softc);
+ req.type = cpu_to_le16(ctxm->type);
+ req.entry_size = cpu_to_le16(ctxm->entry_size);
+ req.subtype_valid_cnt = ctxm->split_entry_cnt;
+ for (i = 0, p = &req.split_entry_0; i < ctxm->split_entry_cnt; i++)
+ p[i] = cpu_to_le32(ctxm->split[i]);
+ for (i = 0, j = 0; j < n && !rc; i++) {
+ struct bnxt_ctx_pg_info *ctx_pg;
+
+ if (!(instance_bmap & (1 << i)))
+ continue;
+ req.instance = cpu_to_le16(i);
+ ctx_pg = &ctxm->pg_info[j++];
+ if (!ctx_pg->entries)
+ continue;
+ req.num_entries = cpu_to_le32(ctx_pg->entries);
+ bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
+ &req.page_size_pbl_level,
+ &req.page_dir);
+ if (last && j == n)
+ req.flags =
+ cpu_to_le32(HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_FLAGS_BS_CFG_ALL_DONE);
+ rc = _hwrm_send_message(softc, &req, sizeof(req));
+ }
+ BNXT_HWRM_UNLOCK(softc);
+ return rc;
}
int
@@ -599,8 +874,6 @@ bnxt_hwrm_ver_get(struct bnxt_softc *softc)
softc->ver_info->hwrm_if_major = resp->hwrm_intf_maj_8b;
softc->ver_info->hwrm_if_minor = resp->hwrm_intf_min_8b;
softc->ver_info->hwrm_if_update = resp->hwrm_intf_upd_8b;
- snprintf(softc->ver_info->hwrm_fw_ver, BNXT_VERSTR_SIZE, "%d.%d.%d",
- resp->hwrm_fw_major, resp->hwrm_fw_minor, resp->hwrm_fw_build);
strlcpy(softc->ver_info->driver_hwrm_if_ver, HWRM_VERSION_STR,
BNXT_VERSTR_SIZE);
strlcpy(softc->ver_info->hwrm_fw_name, resp->hwrm_fw_name,
@@ -622,9 +895,9 @@ bnxt_hwrm_ver_get(struct bnxt_softc *softc)
strlcpy(softc->ver_info->mgmt_fw_name, nastr, BNXT_NAME_SIZE);
}
else {
- snprintf(softc->ver_info->mgmt_fw_ver, BNXT_VERSTR_SIZE,
- "%d.%d.%d", resp->mgmt_fw_major, resp->mgmt_fw_minor,
- resp->mgmt_fw_build);
+ snprintf(softc->ver_info->mgmt_fw_ver, FW_VER_STR_LEN,
+ "%d.%d.%d.%d", resp->mgmt_fw_major, resp->mgmt_fw_minor,
+ resp->mgmt_fw_build, resp->mgmt_fw_patch);
strlcpy(softc->ver_info->mgmt_fw_name, resp->mgmt_fw_name,
BNXT_NAME_SIZE);
}
@@ -636,9 +909,9 @@ bnxt_hwrm_ver_get(struct bnxt_softc *softc)
BNXT_NAME_SIZE);
}
else {
- snprintf(softc->ver_info->netctrl_fw_ver, BNXT_VERSTR_SIZE,
- "%d.%d.%d", resp->netctrl_fw_major, resp->netctrl_fw_minor,
- resp->netctrl_fw_build);
+ snprintf(softc->ver_info->netctrl_fw_ver, FW_VER_STR_LEN,
+ "%d.%d.%d.%d", resp->netctrl_fw_major, resp->netctrl_fw_minor,
+ resp->netctrl_fw_build, resp->netctrl_fw_patch);
strlcpy(softc->ver_info->netctrl_fw_name, resp->netctrl_fw_name,
BNXT_NAME_SIZE);
}
@@ -649,8 +922,8 @@ bnxt_hwrm_ver_get(struct bnxt_softc *softc)
}
else {
snprintf(softc->ver_info->roce_fw_ver, BNXT_VERSTR_SIZE,
- "%d.%d.%d", resp->roce_fw_major, resp->roce_fw_minor,
- resp->roce_fw_build);
+ "%d.%d.%d.%d", resp->roce_fw_major, resp->roce_fw_minor,
+ resp->roce_fw_build, resp->roce_fw_patch);
strlcpy(softc->ver_info->roce_fw_name, resp->roce_fw_name,
BNXT_NAME_SIZE);
}
@@ -669,6 +942,7 @@ bnxt_hwrm_ver_get(struct bnxt_softc *softc)
len = BC_HWRM_STR_LEN;
}
+ softc->ver_info->fw_ver_code = BNXT_FW_VER_CODE(fw_maj, fw_min, fw_bld, fw_rsv);
snprintf (softc->ver_info->fw_ver_str, len, "%d.%d.%d.%d",
fw_maj, fw_min, fw_bld, fw_rsv);
@@ -678,6 +952,7 @@ bnxt_hwrm_ver_get(struct bnxt_softc *softc)
snprintf(softc->ver_info->fw_ver_str + fw_ver_len,
FW_VER_STR_LEN - fw_ver_len - 1, "/pkg %s",
resp->active_pkg_name);
+ softc->fw_cap |= BNXT_FW_CAP_PKG_VER;
}
softc->ver_info->chip_num = le16toh(resp->chip_num);
@@ -690,38 +965,135 @@ bnxt_hwrm_ver_get(struct bnxt_softc *softc)
softc->hwrm_max_req_len = le16toh(resp->max_req_win_len);
softc->hwrm_max_ext_req_len = le16toh(resp->max_ext_req_len);
}
-#define DFLT_HWRM_CMD_TIMEOUT 500
softc->hwrm_cmd_timeo = le16toh(resp->def_req_timeout);
if (!softc->hwrm_cmd_timeo)
softc->hwrm_cmd_timeo = DFLT_HWRM_CMD_TIMEOUT;
-
dev_caps_cfg = le32toh(resp->dev_caps_cfg);
if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
(dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
softc->flags |= BNXT_FLAG_SHORT_CMD;
+ if ((dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
+ (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
+ softc->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
+
+ if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL;
+
+ if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE;
+
+ if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_TRUSTED_VF;
+
+ if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW;
+
+ if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_EEM_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_CFA_EEM;
+
+ if (dev_caps_cfg & HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_TRUFLOW_EN;
+
fail:
BNXT_HWRM_UNLOCK(softc);
return rc;
}
-int
-bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *softc)
-{
+static const u16 bnxt_async_events_arr[] = {
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PHC_UPDATE,
+};
+
+int bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *bp, unsigned long *bmap, int bmap_size,
+ bool async_only)
+{
+ DECLARE_BITMAP(async_events_bmap, 256);
+ u32 *events = (u32 *)async_events_bmap;
+ struct hwrm_func_drv_rgtr_output *resp =
+ (void *)bp->hwrm_cmd_resp.idi_vaddr;
struct hwrm_func_drv_rgtr_input req = {0};
+ u32 flags = 0;
+ int rc;
+ int i;
- bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
-
- req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
- HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE);
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR);
+ req.ver_maj = HWRM_VERSION_MAJOR;
+ req.ver_min = HWRM_VERSION_MINOR;
+ req.ver_upd = HWRM_VERSION_UPDATE;
+
+ req.enables = htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE |
+ HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
+ HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
+
+ if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET)
+ flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_HOT_RESET_SUPPORT;
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
+ flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT |
+ HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT;
+ if (bp->fw_cap & BNXT_FW_CAP_NPAR_1_2)
+ flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_NPAR_1_2_SUPPORT;
+ flags |= HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ASYM_QUEUE_CFG_SUPPORT;
+ req.flags = htole32(flags);
req.os_type = htole16(HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD);
- req.ver_maj = __FreeBSD_version / 100000;
- req.ver_min = (__FreeBSD_version / 1000) % 100;
- req.ver_upd = (__FreeBSD_version / 100) % 10;
+ if (BNXT_PF(bp)) {
+ req.enables |=
+ htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD);
+ }
+
+ if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
+ req.flags |= cpu_to_le32(HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FLOW_HANDLE_64BIT_MODE);
- return hwrm_send_message(softc, &req, sizeof(req));
+ memset(async_events_bmap, 0, sizeof(async_events_bmap));
+ for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
+ u16 event_id = bnxt_async_events_arr[i];
+
+ if (event_id == HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
+ !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
+ continue;
+ }
+ __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+ }
+ if (bmap && bmap_size) {
+ for (i = 0; i < bmap_size; i++) {
+ if (test_bit(i, bmap))
+ __set_bit(i, async_events_bmap);
+ }
+ }
+ for (i = 0; i < 8; i++)
+ req.async_event_fwd[i] |= htole32(events[i]);
+
+ if (async_only)
+ req.enables =
+ htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
+
+ rc = hwrm_send_message(bp, &req, sizeof(req));
+
+ if (!rc) {
+ if (resp->flags &
+ le32toh(HWRM_FUNC_DRV_RGTR_OUTPUT_FLAGS_IF_CHANGE_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
+ }
+
+
+ return rc;
}
int
@@ -766,6 +1138,7 @@ bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
struct hwrm_func_qcaps_output *resp =
(void *)softc->hwrm_cmd_resp.idi_vaddr;
struct bnxt_func_info *func = &softc->func;
+ uint32_t flags, flags_ext, flags_ext2;
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCAPS);
req.fid = htole16(0xffff);
@@ -775,13 +1148,78 @@ bnxt_hwrm_func_qcaps(struct bnxt_softc *softc)
if (rc)
goto fail;
- if (resp->flags &
- htole32(HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED))
+ flags = htole32(resp->flags);
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WOL_MAGICPKT_SUPPORTED)
softc->flags |= BNXT_FLAG_WOL_CAP;
- if (resp->flags &
- htole32(HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED))
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
softc->flags |= BNXT_FLAG_FW_CAP_EXT_STATS;
+ /* Enable RoCE only on Thor devices */
+ if (BNXT_CHIP_P5_PLUS(softc)) {
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V1_SUPPORTED)
+ softc->flags |= BNXT_FLAG_ROCEV1_CAP;
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ROCE_V2_SUPPORTED)
+ softc->flags |= BNXT_FLAG_ROCEV2_CAP;
+ }
+
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_LINK_ADMIN_STATUS_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_LINK_ADMIN;
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADMIN_PF_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_ADMIN_PF;
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_HOT_RESET_CAPABLE)
+ softc->fw_cap |= BNXT_FW_CAP_HOT_RESET;
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE)
+ softc->fw_cap |= BNXT_FW_CAP_ERROR_RECOVERY;
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PCIE_STATS_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_PCIE_STATS_SUPPORTED;
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_STATS_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_EXT_STATS_SUPPORTED;
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ERR_RECOVER_RELOAD)
+ softc->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_NOTIFY_VF_DEF_VNIC_CHNG_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_VF_VNIC_NOTIFY;
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_CRASHDUMP_CMD_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_CRASHDUMP;
+ if (!(flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
+ softc->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
+ if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
+
+ flags_ext = htole32(resp->flags_ext);
+ if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
+ if (BNXT_PF(softc) && (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_ECN_STATS_SUPPORTED))
+ softc->fw_cap |= BNXT_FW_CAP_ECN_STATS;
+
+ if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_PTP_PPS_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_PTP_PPS;
+ if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_PTP_PTM_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_PTP_PTM;
+ if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_PTP_64BIT_RTC_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_PTP_RTC;
+ if (BNXT_PF(softc) && (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
+ softc->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
+ if (BNXT_PF(softc) && (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
+ softc->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
+ if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_NPAR_1_2_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_NPAR_1_2;
+ if (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_BS_V2_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_BACKING_STORE_V2;
+ if (BNXT_PF(softc) &&
+ (flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_VF_CFG_ASYNC_FOR_PF_SUPPORTED))
+ softc->fw_cap |= BNXT_FW_CAP_VF_CFG_FOR_PF;
+
+ flags_ext2 = htole32(resp->flags_ext2);
+ if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_RX_ALL_PKTS_TIMESTAMPS_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
+ if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_DBR_SUPPORTED;
+ if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ||
+ flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_DBR_PACING_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_DBR_PACING_SUPPORTED;
+
+ if (flags_ext2 & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_GENERIC_STATS_SUPPORTED)
+ softc->fw_cap |= BNXT_FW_CAP_GENERIC_STATS;
func->fw_fid = le16toh(resp->fid);
memcpy(func->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
func->max_rsscos_ctxs = le16toh(resp->max_rsscos_ctx);
@@ -820,26 +1258,75 @@ fail:
int
bnxt_hwrm_func_qcfg(struct bnxt_softc *softc)
{
- struct hwrm_func_qcfg_input req = {0};
- struct hwrm_func_qcfg_output *resp =
- (void *)softc->hwrm_cmd_resp.idi_vaddr;
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp =
+ (void *)softc->hwrm_cmd_resp.idi_vaddr;
struct bnxt_func_qcfg *fn_qcfg = &softc->fn_qcfg;
- int rc;
+ uint32_t min_db_offset = 0;
+ uint16_t flags;
+ int rc;
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_QCFG);
- req.fid = htole16(0xffff);
+ req.fid = htole16(0xffff);
BNXT_HWRM_LOCK(softc);
rc = _hwrm_send_message(softc, &req, sizeof(req));
- if (rc)
- goto fail;
+ if (rc)
+ goto end;
fn_qcfg->alloc_completion_rings = le16toh(resp->alloc_cmpl_rings);
fn_qcfg->alloc_tx_rings = le16toh(resp->alloc_tx_rings);
fn_qcfg->alloc_rx_rings = le16toh(resp->alloc_rx_rings);
fn_qcfg->alloc_vnics = le16toh(resp->alloc_vnics);
-fail:
+
+ switch (resp->port_partition_type) {
+ case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
+ case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_2:
+ case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
+ case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
+ softc->port_partition_type = resp->port_partition_type;
+ break;
+ }
+
+ flags = le16toh(resp->flags);
+ if (flags & (HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED |
+ HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_LLDP_AGENT_ENABLED)) {
+ softc->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
+ if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_FW_DCBX_AGENT_ENABLED)
+ softc->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
+ }
+ if (BNXT_PF(softc) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
+ softc->flags |= BNXT_FLAG_MULTI_HOST;
+ if (BNXT_PF(softc) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_ROOT))
+ softc->flags |= BNXT_FLAG_MULTI_ROOT;
+ if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_SECURE_MODE_ENABLED)
+ softc->fw_cap |= BNXT_FW_CAP_SECURE_MODE;
+ if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_RING_MONITOR_ENABLED)
+ softc->fw_cap |= BNXT_FW_CAP_RING_MONITOR;
+ if (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_ENABLE_RDMA_SRIOV)
+ softc->fw_cap |= BNXT_FW_CAP_ENABLE_RDMA_SRIOV;
+
+ if (softc->db_size)
+ goto end;
+
+ softc->legacy_db_size = le16_to_cpu(resp->legacy_l2_db_size_kb) * 1024;
+
+ if (BNXT_CHIP_P5(softc)) {
+ if (BNXT_PF(softc))
+ min_db_offset = DB_PF_OFFSET_P5;
+ else
+ min_db_offset = DB_VF_OFFSET_P5;
+ softc->legacy_db_size = min_db_offset;
+ }
+
+ softc->db_size = roundup2(le16_to_cpu(resp->l2_doorbell_bar_size_kb) *
+ 1024, PAGE_SIZE);
+ if (!softc->db_size || softc->db_size > pci_resource_len(softc->pdev, 2) ||
+ softc->db_size <= min_db_offset)
+ softc->db_size = pci_resource_len(softc->pdev, 2);
+
+ end:
BNXT_HWRM_UNLOCK(softc);
- return rc;
+ return rc;
}
int
@@ -882,15 +1369,24 @@ bnxt_hwrm_set_link_common(struct bnxt_softc *softc,
req->flags |=
htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG);
} else {
- req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE);
- if (link_info->force_pam4_speed_set_by_user) {
+ if (link_info->force_speed2_nrz ||
+ link_info->force_pam4_56_speed2 ||
+ link_info->force_pam4_112_speed2) {
+ req->force_link_speeds2 = htole16(fw_link_speed);
+ req->enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_LINK_SPEEDS2);
+ link_info->force_speed2_nrz = false;
+ link_info->force_pam4_56_speed2 = false;
+ link_info->force_pam4_112_speed2 = false;
+ } else if (link_info->force_pam4_speed) {
req->force_pam4_link_speed = htole16(fw_link_speed);
req->enables |= htole32(HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAM4_LINK_SPEED);
- link_info->force_pam4_speed_set_by_user = false;
+ link_info->force_pam4_speed = false;
} else {
req->force_link_speed = htole16(fw_link_speed);
}
+
+ req->flags |= htole32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE);
}
/* tell chimp that the setting takes effect immediately */
@@ -1000,21 +1496,14 @@ bnxt_hwrm_set_link_setting(struct bnxt_softc *softc, bool set_pause,
int
bnxt_hwrm_vnic_set_hds(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
{
- struct hwrm_vnic_plcmodes_cfg_input req = {0};
+ struct hwrm_vnic_plcmodes_cfg_input req = {0};
- if (!BNXT_CHIP_P5(softc))
+ if (!BNXT_CHIP_P5_PLUS(softc))
return 0;
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_PLCMODES_CFG);
- /*
- * TBD -- Explore these flags
- * 1. VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4
- * 2. VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6
- * 3. req.jumbo_thresh
- * 4. req.hds_threshold
- */
- req.flags = htole32(HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
+ req.flags = htole32(HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
req.vnic_id = htole16(vnic->id);
return hwrm_send_message(softc, &req, sizeof(req));
@@ -1033,7 +1522,7 @@ bnxt_hwrm_vnic_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic)
req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
if (vnic->flags & BNXT_VNIC_FLAG_VLAN_STRIP)
req.flags |= htole32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
- if (BNXT_CHIP_P5 (softc)) {
+ if (BNXT_CHIP_P5_PLUS (softc)) {
req.default_rx_ring_id =
htole16(softc->rx_rings[0].phys_id);
req.default_cmpl_ring_id =
@@ -1173,7 +1662,7 @@ bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
return EDOOFUS;
}
- if (BNXT_CHIP_P5 (softc))
+ if (BNXT_CHIP_P5_PLUS (softc))
return 0;
resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
@@ -1204,7 +1693,7 @@ bnxt_hwrm_ring_grp_free(struct bnxt_softc *softc, struct bnxt_grp_info *grp)
if (grp->grp_id == (uint16_t)HWRM_NA_SIGNATURE)
return 0;
- if (BNXT_CHIP_P5 (softc))
+ if (BNXT_CHIP_P5_PLUS (softc))
return 0;
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_RING_GRP_FREE);
@@ -1282,48 +1771,48 @@ bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
req.length = htole32(ring->ring_size);
switch (type) {
- case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
cp_ring = &softc->tx_cp_rings[idx];
- req.cmpl_ring_id = htole16(cp_ring->ring.phys_id);
+ req.cmpl_ring_id = htole16(cp_ring->ring.phys_id);
/* queue_id - what CoS queue the TX ring is associated with */
- req.queue_id = htole16(softc->q_info[0].id);
+ req.queue_id = htole16(softc->tx_q_info[0].queue_id);
- req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
+ req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
req.enables |= htole32(
HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
- break;
- case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
- if (!BNXT_CHIP_P5(softc))
+ break;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
+ if (!BNXT_CHIP_P5_PLUS(softc))
break;
cp_ring = &softc->rx_cp_rings[idx];
- req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
+ req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
req.rx_buf_size = htole16(softc->rx_buf_size);
- req.enables |= htole32(
+ req.enables |= htole32(
HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
- break;
- case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
- if (!BNXT_CHIP_P5(softc)) {
- req.ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
+ break;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG:
+ if (!BNXT_CHIP_P5_PLUS(softc)) {
+ req.ring_type = HWRM_RING_ALLOC_INPUT_RING_TYPE_RX;
break;
- }
+ }
cp_ring = &softc->rx_cp_rings[idx];
- req.rx_ring_id = htole16(softc->rx_rings[idx].phys_id);
+ req.rx_ring_id = htole16(softc->rx_rings[idx].phys_id);
req.stat_ctx_id = htole32(cp_ring->stats_ctx_id);
req.rx_buf_size = htole16(softc->rx_buf_size);
- req.enables |= htole32(
- HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
- HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
- HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
- break;
- case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
- if (!BNXT_CHIP_P5(softc)) {
- req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
+ req.enables |= htole32(
+ HWRM_RING_ALLOC_INPUT_ENABLES_RX_RING_ID_VALID |
+ HWRM_RING_ALLOC_INPUT_ENABLES_RX_BUF_SIZE_VALID |
+ HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
+ break;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
+ if (!BNXT_CHIP_P5_PLUS(softc)) {
+ req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
break;
}
@@ -1331,14 +1820,15 @@ bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
req.nq_ring_id = htole16(softc->nq_rings[idx].ring.phys_id);
req.enables |= htole32(
HWRM_RING_ALLOC_INPUT_ENABLES_NQ_RING_ID_VALID);
- break;
- case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
- req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
- break;
- default:
- printf("hwrm alloc invalid ring type %d\n", type);
- return -1;
- }
+ break;
+ case HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ:
+ req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
+ break;
+ default:
+ device_printf(softc->dev,
+ "hwrm alloc invalid ring type %d\n", type);
+ return -1;
+ }
BNXT_HWRM_LOCK(softc);
rc = _hwrm_send_message(softc, &req, sizeof(req));
@@ -1395,7 +1885,10 @@ bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
req.update_period_ms = htole32(1000);
req.stats_dma_addr = htole64(paddr);
- if (BNXT_CHIP_P5(softc))
+
+ if (BNXT_CHIP_P7(softc))
+ req.stats_dma_length = htole16(sizeof(struct ctx_hw_stats_ext));
+ else if (BNXT_CHIP_P5(softc))
req.stats_dma_length = htole16(sizeof(struct ctx_hw_stats_ext) - 8);
else
req.stats_dma_length = htole16(sizeof(struct ctx_hw_stats));
@@ -1431,25 +1924,142 @@ bnxt_hwrm_port_qstats(struct bnxt_softc *softc)
return rc;
}
+static int bnxt_hwrm_pri2cos_idx(struct bnxt_softc *softc, uint32_t path_dir)
+{
+ struct hwrm_queue_pri2cos_qcfg_input req = {0};
+ struct hwrm_queue_pri2cos_qcfg_output *resp;
+ uint8_t *pri2cos_idx, *q_ids, max_q;
+ int rc, i, j;
+ uint8_t *pri2cos;
-void
+ bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_QUEUE_PRI2COS_QCFG);
+ resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
+
+ req.flags = htole32(HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_IVLAN |
+ path_dir);
+ rc = hwrm_send_message(softc, &req, sizeof(req));
+
+ if (rc)
+ return rc;
+
+ if (path_dir == HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX) {
+ pri2cos_idx = softc->tx_pri2cos_idx;
+ q_ids = softc->tx_q_ids;
+ max_q = softc->tx_max_q;
+ } else {
+ pri2cos_idx = softc->rx_pri2cos_idx;
+ q_ids = softc->rx_q_ids;
+ max_q = softc->rx_max_q;
+ }
+
+ pri2cos = &resp->pri0_cos_queue_id;
+
+ for (i = 0; i < BNXT_MAX_QUEUE; i++) {
+ uint8_t queue_id = pri2cos[i];
+ uint8_t queue_idx;
+
+ /* Per port queue IDs start from 0, 10, 20, etc */
+ queue_idx = queue_id % 10;
+ if (queue_idx > BNXT_MAX_QUEUE) {
+ softc->pri2cos_valid = false;
+ rc = -EINVAL;
+ return rc;
+ }
+
+ for (j = 0; j < max_q; j++) {
+ if (q_ids[j] == queue_id)
+ pri2cos_idx[i] = queue_idx;
+ }
+ }
+
+ softc->pri2cos_valid = true;
+
+ return rc;
+}
+
+int
bnxt_hwrm_port_qstats_ext(struct bnxt_softc *softc)
{
struct hwrm_port_qstats_ext_input req = {0};
+ struct hwrm_port_qstats_ext_output *resp;
+ int rc = 0, i;
+ uint32_t tx_stat_size;
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_QSTATS_EXT);
+ resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
+ tx_stat_size = sizeof(struct tx_port_stats_ext);
req.port_id = htole16(softc->pf.port_id);
- req.tx_stat_size = htole16(sizeof(struct tx_port_stats_ext));
+ req.tx_stat_size = htole16(tx_stat_size);
req.rx_stat_size = htole16(sizeof(struct rx_port_stats_ext));
req.rx_stat_host_addr = htole64(softc->hw_rx_port_stats_ext.idi_paddr);
req.tx_stat_host_addr = htole64(softc->hw_tx_port_stats_ext.idi_paddr);
- BNXT_HWRM_LOCK(softc);
- _hwrm_send_message(softc, &req, sizeof(req));
- BNXT_HWRM_UNLOCK(softc);
+ rc = hwrm_send_message(softc, &req, sizeof(req));
- return;
+ if (!rc) {
+ softc->fw_rx_stats_ext_size =
+ le16toh(resp->rx_stat_size) / 8;
+ if (BNXT_FW_MAJ(softc) < 220 && !BNXT_CHIP_P7(softc) &&
+ softc->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
+ softc->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
+
+ softc->fw_tx_stats_ext_size = tx_stat_size ?
+ le16toh(resp->tx_stat_size) / 8 : 0;
+ } else {
+ softc->fw_rx_stats_ext_size = 0;
+ softc->fw_tx_stats_ext_size = 0;
+ }
+
+ if (softc->fw_tx_stats_ext_size <=
+ offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) {
+ softc->pri2cos_valid = false;
+ return rc;
+ }
+
+ rc = bnxt_hwrm_pri2cos_idx(softc, HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_TX);
+ if (rc)
+ return rc;
+
+ if (softc->is_asym_q) {
+ rc = bnxt_hwrm_pri2cos_idx(softc, HWRM_QUEUE_PRI2COS_QCFG_INPUT_FLAGS_PATH_RX);
+ if (rc)
+ return rc;
+ } else {
+ memcpy(softc->rx_pri2cos_idx, softc->tx_pri2cos_idx, sizeof(softc->rx_pri2cos_idx));
+ }
+
+ u64 *rx_port_stats_ext = (u64 *)softc->hw_rx_port_stats_ext.idi_vaddr;
+ u64 *tx_port_stats_ext = (u64 *)softc->hw_tx_port_stats_ext.idi_vaddr;
+
+ if (softc->pri2cos_valid) {
+ for (i = 0; i < 8; i++) {
+ long n = bnxt_rx_bytes_pri_arr_base_off[i] +
+ softc->rx_pri2cos_idx[i];
+
+ softc->rx_bytes_pri[i] = *(rx_port_stats_ext + n);
+ }
+ for (i = 0; i < 8; i++) {
+ long n = bnxt_rx_pkts_pri_arr_base_off[i] +
+ softc->rx_pri2cos_idx[i];
+
+ softc->rx_packets_pri[i] = *(rx_port_stats_ext + n);
+ }
+ for (i = 0; i < 8; i++) {
+ long n = bnxt_tx_bytes_pri_arr_base_off[i] +
+ softc->tx_pri2cos_idx[i];
+
+ softc->tx_bytes_pri[i] = *(tx_port_stats_ext + n);
+ }
+ for (i = 0; i < 8; i++) {
+ long n = bnxt_tx_pkts_pri_arr_base_off[i] +
+ softc->tx_pri2cos_idx[i];
+
+ softc->tx_packets_pri[i] = *(tx_port_stats_ext + n);
+ }
+ }
+
+ return rc;
}
int
@@ -1594,12 +2204,15 @@ bnxt_hwrm_rss_cfg(struct bnxt_softc *softc, struct bnxt_vnic_info *vnic,
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_VNIC_RSS_CFG);
+ if (BNXT_CHIP_P7(softc))
+ req.flags |= HWRM_VNIC_RSS_CFG_INPUT_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT;
+
req.hash_type = htole32(hash_type);
req.ring_grp_tbl_addr = htole64(vnic->rss_grp_tbl.idi_paddr);
req.hash_key_tbl_addr = htole64(vnic->rss_hash_key_tbl.idi_paddr);
req.rss_ctx_idx = htole16(vnic->rss_id);
req.hash_mode_flags = HWRM_FUNC_SPD_CFG_INPUT_HASH_MODE_FLAGS_DEFAULT;
- if (BNXT_CHIP_P5(softc)) {
+ if (BNXT_CHIP_P5_PLUS(softc)) {
req.vnic_id = htole16(vnic->id);
req.ring_table_pair_index = 0x0;
}
@@ -1646,7 +2259,7 @@ bnxt_cfg_async_cr(struct bnxt_softc *softc)
req.fid = htole16(0xffff);
req.enables = htole32(HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
req.async_event_cr = htole16(softc->nq_rings[0].ring.phys_id);
else
req.async_event_cr = htole16(softc->def_cp_ring.ring.phys_id);
@@ -2231,7 +2844,7 @@ int bnxt_read_sfp_module_eeprom_info(struct bnxt_softc *softc, uint16_t i2c_addr
HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_PAGE_OFFSET : 0) |
(bank_sel_en ?
HWRM_PORT_PHY_I2C_READ_INPUT_ENABLES_BANK_NUMBER : 0));
- rc = hwrm_send_message(softc, &req, sizeof(req));
+ rc = _hwrm_send_message(softc, &req, sizeof(req));
if (!rc)
memcpy(buf + byte_offset, output->data, xfer_size);
byte_offset += xfer_size;
@@ -2251,9 +2864,9 @@ bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc)
(void *)softc->hwrm_cmd_resp.idi_vaddr;
int rc = 0;
- BNXT_HWRM_LOCK(softc);
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_PORT_PHY_QCFG);
+ BNXT_HWRM_LOCK(softc);
rc = _hwrm_send_message(softc, &req, sizeof(req));
if (rc)
goto exit;
@@ -2331,6 +2944,10 @@ bnxt_hwrm_port_phy_qcfg(struct bnxt_softc *softc)
if (softc->hwrm_spec_code >= 0x10504)
link_info->active_fec_sig_mode = resp->active_fec_signal_mode;
+ link_info->support_speeds2 = le16toh(resp->support_speeds2);
+ link_info->auto_link_speeds2 = le16toh(resp->auto_link_speeds2);
+ link_info->force_link_speeds2 = le16toh(resp->force_link_speeds2);
+
exit:
BNXT_HWRM_UNLOCK(softc);
return rc;
@@ -2342,7 +2959,9 @@ bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp)
if (!resp->supported_speeds_auto_mode &&
!resp->supported_speeds_force_mode &&
!resp->supported_pam4_speeds_auto_mode &&
- !resp->supported_pam4_speeds_force_mode)
+ !resp->supported_pam4_speeds_force_mode &&
+ !resp->supported_speeds2_auto_mode &&
+ !resp->supported_speeds2_force_mode)
return true;
return false;
@@ -2366,6 +2985,15 @@ int bnxt_hwrm_phy_qcaps(struct bnxt_softc *softc)
if (rc)
goto exit;
+ softc->phy_flags = resp->flags | (resp->flags2 << 8);
+ if (resp->flags & HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_EEE_SUPPORTED) {
+
+ softc->lpi_tmr_lo = le32toh(resp->tx_lpi_timer_low) &
+ HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_LOW_MASK;
+ softc->lpi_tmr_hi = le32toh(resp->valid_tx_lpi_timer_high) &
+ HWRM_PORT_PHY_QCAPS_OUTPUT_TX_LPI_TIMER_HIGH_MASK;
+ }
+
if (softc->hwrm_spec_code >= 0x10a01) {
if (bnxt_phy_qcaps_no_speed(resp)) {
link_info->phy_state = BNXT_PHY_STATE_DISABLED;
@@ -2376,6 +3004,7 @@ int bnxt_hwrm_phy_qcaps(struct bnxt_softc *softc)
/* Phy re-enabled, reprobe the speeds */
link_info->support_auto_speeds = 0;
link_info->support_pam4_auto_speeds = 0;
+ link_info->support_auto_speeds2 = 0;
}
}
if (resp->supported_speeds_auto_mode)
@@ -2391,6 +3020,14 @@ int bnxt_hwrm_phy_qcaps(struct bnxt_softc *softc)
link_info->support_pam4_force_speeds =
le16toh(resp->supported_pam4_speeds_force_mode);
+ if (resp->supported_speeds2_auto_mode)
+ link_info->support_auto_speeds2 =
+ le16toh(resp->supported_speeds2_auto_mode);
+
+ if (resp->supported_speeds2_force_mode)
+ link_info->support_force_speeds2 =
+ le16toh(resp->supported_speeds2_force_mode);
+
exit:
BNXT_HWRM_UNLOCK(softc);
return rc;
@@ -2473,120 +3110,72 @@ static void bnxt_hwrm_set_coal_params(struct bnxt_softc *softc, uint32_t max_fra
int bnxt_hwrm_set_coal(struct bnxt_softc *softc)
{
- int i, rc = 0;
- struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
- req_tx = {0}, *req;
- uint16_t max_buf, max_buf_irq;
- uint16_t buf_tmr, buf_tmr_irq;
- uint32_t flags;
+ int i, rc = 0;
+ struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
+ req_tx = {0}, *req;
+ uint16_t max_buf, max_buf_irq;
+ uint16_t buf_tmr, buf_tmr_irq;
+ uint32_t flags;
- bnxt_hwrm_cmd_hdr_init(softc, &req_rx,
- HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
- bnxt_hwrm_cmd_hdr_init(softc, &req_tx,
- HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
+ bnxt_hwrm_cmd_hdr_init(softc, &req_rx,
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
+ bnxt_hwrm_cmd_hdr_init(softc, &req_tx,
+ HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS);
- /* Each rx completion (2 records) should be DMAed immediately.
- * DMA 1/4 of the completion buffers at a time.
- */
- max_buf = min_t(uint16_t, softc->rx_coal_frames / 4, 2);
- /* max_buf must not be zero */
- max_buf = clamp_t(uint16_t, max_buf, 1, 63);
- max_buf_irq = clamp_t(uint16_t, softc->rx_coal_frames_irq, 1, 63);
- buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs);
- /* buf timer set to 1/4 of interrupt timer */
- buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
- buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs_irq);
- buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
-
- flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
-
- /* RING_IDLE generates more IRQs for lower latency. Enable it only
- * if coal_usecs is less than 25 us.
- */
- if (softc->rx_coal_usecs < 25)
- flags |= HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
-
- bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
- buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
-
- /* max_buf must not be zero */
- max_buf = clamp_t(uint16_t, softc->tx_coal_frames, 1, 63);
- max_buf_irq = clamp_t(uint16_t, softc->tx_coal_frames_irq, 1, 63);
- buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs);
- /* buf timer set to 1/4 of interrupt timer */
- buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
- buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs_irq);
- buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
- flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
- bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
- buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
-
- for (i = 0; i < softc->nrxqsets; i++) {
+ /* Each rx completion (2 records) should be DMAed immediately.
+ * DMA 1/4 of the completion buffers at a time.
+ */
+ max_buf = min_t(uint16_t, softc->rx_coal_frames / 4, 2);
+ /* max_buf must not be zero */
+ max_buf = clamp_t(uint16_t, max_buf, 1, 63);
+ max_buf_irq = clamp_t(uint16_t, softc->rx_coal_frames_irq, 1, 63);
+ buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs);
+ /* buf timer set to 1/4 of interrupt timer */
+ buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
+ buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->rx_coal_usecs_irq);
+ buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
+
+ flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
+
+ /* RING_IDLE generates more IRQs for lower latency. Enable it only
+ * if coal_usecs is less than 25 us.
+ */
+ if (softc->rx_coal_usecs < 25)
+ flags |= HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_RING_IDLE;
+
+ bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
+ buf_tmr_irq << 16 | buf_tmr, flags, &req_rx);
+
+ /* max_buf must not be zero */
+ max_buf = clamp_t(uint16_t, softc->tx_coal_frames, 1, 63);
+ max_buf_irq = clamp_t(uint16_t, softc->tx_coal_frames_irq, 1, 63);
+ buf_tmr = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs);
+ /* buf timer set to 1/4 of interrupt timer */
+ buf_tmr = max_t(uint16_t, buf_tmr / 4, 1);
+ buf_tmr_irq = BNXT_USEC_TO_COAL_TIMER(softc->tx_coal_usecs_irq);
+ buf_tmr_irq = max_t(uint16_t, buf_tmr_irq, 1);
+ flags = HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_FLAGS_TIMER_RESET;
+ bnxt_hwrm_set_coal_params(softc, max_buf_irq << 16 | max_buf,
+ buf_tmr_irq << 16 | buf_tmr, flags, &req_tx);
+
+ for (i = 0; i < softc->nrxqsets; i++) {
req = &req_rx;
- /*
- * TBD:
- * Check if Tx also needs to be done
- * So far, Tx processing has been done in softirq contest
- *
- * req = &req_tx;
- */
req->ring_id = htole16(softc->grp_info[i].cp_ring_id);
- rc = hwrm_send_message(softc, req, sizeof(*req));
- if (rc)
- break;
- }
- return rc;
-}
-
-int bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *softc, unsigned long *bmap,
- int bmap_size)
-{
- struct hwrm_func_drv_rgtr_input req = {0};
- bitstr_t *async_events_bmap;
- uint32_t *events;
- int i;
-
-#define BNXT_MAX_NUM_ASYNC_EVENTS 256
- async_events_bmap = bit_alloc(BNXT_MAX_NUM_ASYNC_EVENTS, M_DEVBUF,
- M_WAITOK|M_ZERO);
- events = (uint32_t *)async_events_bmap;
-
- bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_FUNC_DRV_RGTR);
-
- req.enables =
- htole32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
-
- memset(async_events_bmap, 0, sizeof(BNXT_MAX_NUM_ASYNC_EVENTS / 8));
-
- bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE);
- bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
- bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED);
- bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE);
- bit_set(async_events_bmap, HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
-
- if (bmap && bmap_size) {
- for (i = 0; i < bmap_size; i++) {
- if (bit_test(bmap, i))
- bit_set(async_events_bmap, i);
- }
+ rc = hwrm_send_message(softc, req, sizeof(*req));
+ if (rc)
+ break;
}
-
- for (i = 0; i < 8; i++)
- req.async_event_fwd[i] |= htole32(events[i]);
-
- free(async_events_bmap, M_DEVBUF);
-
- return hwrm_send_message(softc, &req, sizeof(req));
+ return rc;
}
void bnxt_hwrm_ring_info_get(struct bnxt_softc *softc, uint8_t ring_type,
- uint32_t ring_id, uint32_t *prod, uint32_t *cons)
+ uint32_t ring_id, uint32_t *prod, uint32_t *cons)
{
- hwrm_dbg_ring_info_get_input_t req = {0};
- hwrm_dbg_ring_info_get_output_t *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
- int rc = 0;
+ hwrm_dbg_ring_info_get_input_t req = {0};
+ hwrm_dbg_ring_info_get_output_t *resp = (void *)softc->hwrm_cmd_resp.idi_vaddr;
+ int rc = 0;
*prod = *cons = 0xffffffff;
bnxt_hwrm_cmd_hdr_init(softc, &req, HWRM_DBG_RING_INFO_GET);
diff --git a/sys/dev/bnxt/bnxt_hwrm.h b/sys/dev/bnxt/bnxt_en/bnxt_hwrm.h
index 930ff424ecb8..126cad977c82 100644
--- a/sys/dev/bnxt/bnxt_hwrm.h
+++ b/sys/dev/bnxt/bnxt_en/bnxt_hwrm.h
@@ -32,12 +32,15 @@
#define BNXT_PAUSE_TX (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX)
#define BNXT_PAUSE_RX (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX)
-#define BNXT_AUTO_PAUSE_AUTONEG_PAUSE \
- (HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE)
+#define BNXT_AUTO_PAUSE_AUTONEG_PAUSE \
+ (HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE)
#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input)
#define BNXT_BACKING_STORE_CFG_LEGACY_LEN 256
+#define SHORT_HWRM_CMD_TIMEOUT 500
/* HWRM Function Prototypes */
+int
+hwrm_send_message(struct bnxt_softc *softc, void *msg, uint32_t msg_len);
int bnxt_alloc_hwrm_dma_mem(struct bnxt_softc *softc);
void bnxt_free_hwrm_dma_mem(struct bnxt_softc *softc);
int bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
@@ -45,8 +48,9 @@ int bnxt_hwrm_ring_alloc(struct bnxt_softc *softc, uint8_t type,
int bnxt_hwrm_ring_free(struct bnxt_softc *softc, uint32_t type,
struct bnxt_ring *ring, int cmpl_ring_id);
int bnxt_hwrm_ver_get(struct bnxt_softc *softc);
-int bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc);
-int bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *softc);
+int bnxt_hwrm_queue_qportcfg(struct bnxt_softc *softc, uint32_t path_dir);
+int bnxt_hwrm_func_drv_rgtr(struct bnxt_softc *bp, unsigned long *bmap, int bmap_size,
+ bool async_only);
int bnxt_hwrm_func_drv_unrgtr(struct bnxt_softc *softc, bool shutdown);
int bnxt_hwrm_func_qcaps(struct bnxt_softc *softc);
int bnxt_hwrm_func_qcfg(struct bnxt_softc *softc);
@@ -64,7 +68,7 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr,
uint64_t paddr);
int bnxt_hwrm_stat_ctx_free(struct bnxt_softc *softc, struct bnxt_cp_ring *cpr);
int bnxt_hwrm_port_qstats(struct bnxt_softc *softc);
-void bnxt_hwrm_port_qstats_ext(struct bnxt_softc *softc);
+int bnxt_hwrm_port_qstats_ext(struct bnxt_softc *softc);
int bnxt_hwrm_ring_grp_alloc(struct bnxt_softc *softc,
struct bnxt_grp_info *grp);
int bnxt_hwrm_ring_grp_free(struct bnxt_softc *softc, struct bnxt_grp_info *gr);
@@ -122,13 +126,19 @@ int bnxt_hwrm_alloc_wol_fltr(struct bnxt_softc *softc);
int bnxt_hwrm_free_wol_fltr(struct bnxt_softc *softc);
int bnxt_hwrm_set_coal(struct bnxt_softc *softc);
int bnxt_hwrm_func_rgtr_async_events(struct bnxt_softc *softc, unsigned long *bmap,
- int bmap_size);
+ int bmap_size);
int bnxt_hwrm_func_backing_store_qcaps(struct bnxt_softc *softc);
int bnxt_hwrm_func_backing_store_cfg(struct bnxt_softc *softc, uint32_t);
+int bnxt_hwrm_func_backing_store_cfg_v2(struct bnxt_softc *softc,
+ struct bnxt_ctx_mem_type *ctxm,
+ bool last);
int bnxt_hwrm_func_resc_qcaps(struct bnxt_softc *softc, bool all);
int bnxt_hwrm_reserve_pf_rings (struct bnxt_softc *softc);
void bnxt_hwrm_ring_info_get(struct bnxt_softc *softc, uint8_t ring_type,
- uint32_t ring_id, uint32_t *prod, uint32_t *);
+ uint32_t ring_id, uint32_t *prod, uint32_t *);
int bnxt_hwrm_passthrough(struct bnxt_softc *softc, void *req, uint32_t req_len,
void *resp, uint32_t resp_len, uint32_t timeout);
+int _hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
+int hwrm_send_message(struct bnxt_softc *, void *, uint32_t);
+void bnxt_hwrm_cmd_hdr_init(struct bnxt_softc *, void *, uint16_t);
#endif
diff --git a/sys/dev/bnxt/bnxt_ioctl.h b/sys/dev/bnxt/bnxt_en/bnxt_ioctl.h
index 370cf89f3c96..370cf89f3c96 100644
--- a/sys/dev/bnxt/bnxt_ioctl.h
+++ b/sys/dev/bnxt/bnxt_en/bnxt_ioctl.h
diff --git a/sys/dev/bnxt/bnxt_mgmt.c b/sys/dev/bnxt/bnxt_en/bnxt_mgmt.c
index 7185fa2f5c0a..bbc12b96d8c6 100644
--- a/sys/dev/bnxt/bnxt_mgmt.c
+++ b/sys/dev/bnxt/bnxt_en/bnxt_mgmt.c
@@ -96,6 +96,71 @@ bnxt_mgmt_loader(struct module *m, int what, void *arg)
}
static int
+bnxt_mgmt_process_dcb(struct cdev *dev, u_long cmd, caddr_t data,
+ int flag, struct thread *td)
+{
+ struct bnxt_softc *softc = NULL;
+ struct bnxt_mgmt_dcb mgmt_dcb = {};
+ void *user_ptr;
+ int ret = 0;
+
+ memcpy(&user_ptr, data, sizeof(user_ptr));
+ if (copyin(user_ptr, &mgmt_dcb, sizeof(mgmt_dcb))) {
+ printf("%s: %s:%d Failed to copy data from user\n",
+ DRIVER_NAME, __FUNCTION__, __LINE__);
+ return -EFAULT;
+ }
+ softc = bnxt_find_dev(mgmt_dcb.hdr.domain, mgmt_dcb.hdr.bus,
+ mgmt_dcb.hdr.devfn, NULL);
+ if (!softc) {
+ printf("%s: %s:%d unable to find softc reference\n",
+ DRIVER_NAME, __FUNCTION__, __LINE__);
+ return -ENODEV;
+ }
+
+ switch (mgmt_dcb.op) {
+ case BNXT_MGMT_DCB_GET_ETS:
+ bnxt_dcb_ieee_getets(softc, &mgmt_dcb.req.ets);
+ break;
+ case BNXT_MGMT_DCB_SET_ETS:
+ bnxt_dcb_ieee_setets(softc, &mgmt_dcb.req.ets);
+ break;
+ case BNXT_MGMT_DCB_GET_PFC:
+ bnxt_dcb_ieee_getpfc(softc, &mgmt_dcb.req.pfc);
+ break;
+ case BNXT_MGMT_DCB_SET_PFC:
+ bnxt_dcb_ieee_setpfc(softc, &mgmt_dcb.req.pfc);
+ break;
+ case BNXT_MGMT_DCB_SET_APP:
+ bnxt_dcb_ieee_setapp(softc, &mgmt_dcb.req.app_tlv.app[0]);
+ break;
+ case BNXT_MGMT_DCB_DEL_APP:
+ bnxt_dcb_ieee_delapp(softc, &mgmt_dcb.req.app_tlv.app[0]);
+ break;
+ case BNXT_MGMT_DCB_LIST_APP:
+ bnxt_dcb_ieee_listapp(softc, &mgmt_dcb.req.app_tlv.app[0],
+ nitems(mgmt_dcb.req.app_tlv.app),
+ &mgmt_dcb.req.app_tlv.num_app);
+ break;
+ default:
+ device_printf(softc->dev, "%s:%d Invalid op 0x%x\n",
+ __FUNCTION__, __LINE__, mgmt_dcb.op);
+ ret = -EFAULT;
+ goto end;
+ }
+
+ if (copyout(&mgmt_dcb, user_ptr, sizeof(mgmt_dcb))) {
+ device_printf(softc->dev, "%s:%d Failed to copy response to user\n",
+ __FUNCTION__, __LINE__);
+ ret = -EFAULT;
+ goto end;
+ }
+
+end:
+ return ret;
+}
+
+static int
bnxt_mgmt_process_hwrm(struct cdev *dev, u_long cmd, caddr_t data,
int flag, struct thread *td)
{
@@ -141,19 +206,7 @@ bnxt_mgmt_process_hwrm(struct cdev *dev, u_long cmd, caddr_t data,
}
req = malloc(msg_temp.len_req, M_BNXT, M_WAITOK | M_ZERO);
- if(!req) {
- device_printf(softc->dev, "%s:%d Memory allocation failed",
- __FUNCTION__, __LINE__);
- return -ENOMEM;
- }
-
resp = malloc(msg_temp.len_resp, M_BNXT, M_WAITOK | M_ZERO);
- if(!resp) {
- device_printf(softc->dev, "%s:%d Memory allocation failed",
- __FUNCTION__, __LINE__);
- ret = -ENOMEM;
- goto end;
- }
if (copyin((void *)msg_temp.usr_req, req, msg_temp.len_req)) {
device_printf(softc->dev, "%s:%d Failed to copy data from user\n",
@@ -173,12 +226,6 @@ bnxt_mgmt_process_hwrm(struct cdev *dev, u_long cmd, caddr_t data,
(num_ind * sizeof(struct dma_info));
msg2 = malloc(size, M_BNXT, M_WAITOK | M_ZERO);
- if(!msg2) {
- device_printf(softc->dev, "%s:%d Memory allocation failed",
- __FUNCTION__, __LINE__);
- ret = -ENOMEM;
- goto end;
- }
if (copyin((void *)mgmt_req.req.hreq, msg2, size)) {
device_printf(softc->dev, "%s:%d Failed to copy"
@@ -345,9 +392,12 @@ bnxt_mgmt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
break;
case BNXT_MGMT_OPCODE_PASSTHROUGH_HWRM:
mtx_lock(&mgmt_lock);
- ret = bnxt_mgmt_process_hwrm(dev, cmd, data, flag, td);
+ ret = bnxt_mgmt_process_hwrm(dev, cmd, data, flag, td);
mtx_unlock(&mgmt_lock);
break;
+ case BNXT_MGMT_OPCODE_DCB_OPS:
+ ret = bnxt_mgmt_process_dcb(dev, cmd, data, flag, td);
+ break;
default:
printf("%s: Unknown command 0x%lx\n", DRIVER_NAME, cmd);
ret = -EINVAL;
diff --git a/sys/dev/bnxt/bnxt_mgmt.h b/sys/dev/bnxt/bnxt_en/bnxt_mgmt.h
index f130ad386809..8489a223adef 100644
--- a/sys/dev/bnxt/bnxt_mgmt.h
+++ b/sys/dev/bnxt/bnxt_en/bnxt_mgmt.h
@@ -41,6 +41,7 @@
#define BNXT_MGMT_OPCODE_GET_DEV_INFO 0x80000000
#define BNXT_MGMT_OPCODE_PASSTHROUGH_HWRM 0x80000001
+#define BNXT_MGMT_OPCODE_DCB_OPS 0x80000002
#define BNXT_MGMT_MAX_HWRM_REQ_LENGTH HWRM_MAX_REQ_LEN
#define BNXT_MGMT_MAX_HWRM_RESP_LENGTH (512)
@@ -118,9 +119,31 @@ struct bnxt_mgmt_req_hdr {
};
struct bnxt_mgmt_req {
- struct bnxt_mgmt_req_hdr hdr;
- union {
- uint64_t hreq;
- } req;
+ struct bnxt_mgmt_req_hdr hdr;
+ union {
+ uint64_t hreq;
+ } req;
};
+struct bnxt_mgmt_app_tlv {
+ uint32_t num_app;
+ struct bnxt_dcb_app app[128];
+} __attribute__ ((__packed__));
+
+struct bnxt_mgmt_dcb {
+ struct bnxt_mgmt_req_hdr hdr;
+#define BNXT_MGMT_DCB_GET_ETS 0x1
+#define BNXT_MGMT_DCB_SET_ETS 0x2
+#define BNXT_MGMT_DCB_GET_PFC 0x3
+#define BNXT_MGMT_DCB_SET_PFC 0x4
+#define BNXT_MGMT_DCB_SET_APP 0x5
+#define BNXT_MGMT_DCB_DEL_APP 0x6
+#define BNXT_MGMT_DCB_LIST_APP 0x7
+#define BNXT_MGMT_DCB_MAX BNXT_MGMT_DCB_LIST_APP
+ uint32_t op;
+ union {
+ struct bnxt_ieee_ets ets;
+ struct bnxt_ieee_pfc pfc;
+ struct bnxt_mgmt_app_tlv app_tlv;
+ } req;
+} __attribute__ ((__packed__));
diff --git a/sys/dev/bnxt/bnxt_sysctl.c b/sys/dev/bnxt/bnxt_en/bnxt_sysctl.c
index 09e79a72f68f..51438e657546 100644
--- a/sys/dev/bnxt/bnxt_sysctl.c
+++ b/sys/dev/bnxt/bnxt_en/bnxt_sysctl.c
@@ -29,11 +29,17 @@
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/ctype.h>
+#include <linux/delay.h>
#include "bnxt.h"
#include "bnxt_hwrm.h"
#include "bnxt_sysctl.h"
+DEFINE_MUTEX(tmp_mutex); /* mutex lock for driver */
+extern void bnxt_fw_reset(struct bnxt_softc *bp);
+extern void bnxt_queue_sp_work(struct bnxt_softc *bp);
+extern void
+process_nq(struct bnxt_softc *softc, uint16_t nqid);
/*
* We want to create:
* dev.bnxt.0.hwstats.txq0
@@ -105,6 +111,16 @@ bnxt_init_sysctl_ctx(struct bnxt_softc *softc)
return ENOMEM;
}
+ sysctl_ctx_init(&softc->dcb_ctx);
+ ctx = device_get_sysctl_ctx(softc->dev);
+ softc->dcb_oid = SYSCTL_ADD_NODE(ctx,
+ SYSCTL_CHILDREN(device_get_sysctl_tree(softc->dev)), OID_AUTO,
+ "dcb", CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "Data Center Bridging");
+ if (!softc->dcb_oid) {
+ sysctl_ctx_free(&softc->dcb_ctx);
+ return ENOMEM;
+ }
+
return 0;
}
@@ -151,6 +167,14 @@ bnxt_free_sysctl_ctx(struct bnxt_softc *softc)
softc->flow_ctrl_oid = NULL;
}
+ if (softc->dcb_oid != NULL) {
+ orc = sysctl_ctx_free(&softc->dcb_ctx);
+ if (orc)
+ rc = orc;
+ else
+ softc->dcb_oid = NULL;
+ }
+
return rc;
}
@@ -529,7 +553,7 @@ bnxt_create_port_stats_sysctls(struct bnxt_softc *softc)
"rx_stat_err", CTLFLAG_RD,
&softc->rx_port_stats->rx_stat_err, "Received stat err");
- if (BNXT_CHIP_P5(softc) &&
+ if (BNXT_CHIP_P5_PLUS(softc) &&
(softc->flags & BNXT_FLAG_FW_CAP_EXT_STATS)) {
SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
"tx_bytes_cos0", CTLFLAG_RD,
@@ -581,6 +605,55 @@ bnxt_create_port_stats_sysctls(struct bnxt_softc *softc)
&softc->tx_port_stats_ext->tx_packets_cos7, "Transmitted packets count cos7");
SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_bytes_pri0", CTLFLAG_RD,
+ &softc->tx_bytes_pri[0], "Transmitted bytes count pri0");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_packets_pri0", CTLFLAG_RD,
+ &softc->tx_packets_pri[0], "Transmitted packets count pri0");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_bytes_pri1", CTLFLAG_RD,
+ &softc->tx_bytes_pri[1], "Transmitted bytes count pri1");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_packets_pri1", CTLFLAG_RD,
+ &softc->tx_packets_pri[1], "Transmitted packets count pri1");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_bytes_pri2", CTLFLAG_RD,
+ &softc->tx_bytes_pri[2], "Transmitted bytes count pri2");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_packets_pri2", CTLFLAG_RD,
+ &softc->tx_packets_pri[2], "Transmitted packets count pri2");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_bytes_pri3", CTLFLAG_RD,
+ &softc->tx_bytes_pri[3], "Transmitted bytes count pri3");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_packets_pri3", CTLFLAG_RD,
+ &softc->tx_packets_pri[3], "Transmitted packets count pri3");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_bytes_pri4", CTLFLAG_RD,
+ &softc->tx_bytes_pri[4], "Transmitted bytes count pri4");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_packets_pri4", CTLFLAG_RD,
+ &softc->tx_packets_pri[4], "Transmitted packets count pri4");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_bytes_pri5", CTLFLAG_RD,
+ &softc->tx_bytes_pri[5], "Transmitted bytes count pri5");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_packets_pri5", CTLFLAG_RD,
+ &softc->tx_packets_pri[5], "Transmitted packets count pri5");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_bytes_pri6", CTLFLAG_RD,
+ &softc->tx_bytes_pri[6], "Transmitted bytes count pri6");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_packets_pri6", CTLFLAG_RD,
+ &softc->tx_packets_pri[6], "Transmitted packets count pri6");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_bytes_pri7", CTLFLAG_RD,
+ &softc->tx_bytes_pri[7], "Transmitted bytes count pri7");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "tx_packets_pri7", CTLFLAG_RD,
+ &softc->tx_packets_pri[7], "Transmitted packets count pri7");
+
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
"pfc_pri0_tx_duration_us", CTLFLAG_RD,
&softc->tx_port_stats_ext->pfc_pri0_tx_duration_us, "Time duration between"
"XON to XOFF and XOFF to XON for pri0");
@@ -714,6 +787,55 @@ bnxt_create_port_stats_sysctls(struct bnxt_softc *softc)
&softc->rx_port_stats_ext->rx_packets_cos7, "Received packets count cos7");
SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_bytes_pri0", CTLFLAG_RD,
+ &softc->rx_bytes_pri[0], "Received bytes count pri0");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_packets_pri0", CTLFLAG_RD,
+ &softc->rx_packets_pri[0], "Received packets count pri0");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_bytes_pri1", CTLFLAG_RD,
+ &softc->rx_bytes_pri[1], "Received bytes count pri1");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_packets_pri1", CTLFLAG_RD,
+ &softc->rx_packets_pri[1], "Received packets count pri1");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_bytes_pri2", CTLFLAG_RD,
+ &softc->rx_bytes_pri[2], "Received bytes count pri2");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_packets_pri2", CTLFLAG_RD,
+ &softc->rx_packets_pri[2], "Received packets count pri2");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_bytes_pri3", CTLFLAG_RD,
+ &softc->rx_bytes_pri[3], "Received bytes count pri3");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_packets_pri3", CTLFLAG_RD,
+ &softc->rx_packets_pri[3], "Received packets count pri3");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_bytes_pri4", CTLFLAG_RD,
+ &softc->rx_bytes_pri[4], "Received bytes count pri4");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_packets_pri4", CTLFLAG_RD,
+ &softc->rx_packets_pri[4], "Received packets count pri4");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_bytes_pri5", CTLFLAG_RD,
+ &softc->rx_bytes_pri[5], "Received bytes count pri5");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_packets_pri5", CTLFLAG_RD,
+ &softc->rx_packets_pri[5], "Received packets count pri5");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_bytes_pri6", CTLFLAG_RD,
+ &softc->rx_bytes_pri[6], "Received bytes count pri6");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_packets_pri6", CTLFLAG_RD,
+ &softc->rx_packets_pri[6], "Received packets count pri6");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_bytes_pri7", CTLFLAG_RD,
+ &softc->rx_bytes_pri[7], "Received bytes count pri7");
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "rx_packets_pri7", CTLFLAG_RD,
+ &softc->rx_packets_pri[7], "Received packets count pri7");
+
+ SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
"pfc_pri0_rx_duration_us", CTLFLAG_RD,
&softc->rx_port_stats_ext->pfc_pri0_rx_duration_us, "Time duration in receiving"
"between XON to XOFF and XOFF to XON for pri0");
@@ -880,7 +1002,7 @@ bnxt_create_rx_sysctls(struct bnxt_softc *softc, int rxr)
if (!oid)
return ENOMEM;
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
SYSCTL_ADD_QUAD(&softc->hw_stats, SYSCTL_CHILDREN(oid), OID_AUTO,
"nq_num_ints", CTLFLAG_RD, &softc->nq_rings[rxr].int_count,
"Num Interrupts");
@@ -1052,9 +1174,6 @@ bnxt_create_ver_sysctls(struct bnxt_softc *softc)
"driver_hwrm_if", CTLFLAG_RD, vi->driver_hwrm_if_ver, 0,
"HWRM firmware version");
SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
- "hwrm_fw", CTLFLAG_RD, vi->hwrm_fw_ver, 0,
- "HWRM firmware version");
- SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
"mgmt_fw", CTLFLAG_RD, vi->mgmt_fw_ver, 0,
"management firmware version");
SYSCTL_ADD_STRING(&vi->ver_ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
@@ -1436,6 +1555,46 @@ bnxt_set_coal_tx_frames_irq(SYSCTL_HANDLER_ARGS) {
return rc;
}
+static
+void simulate_reset(struct bnxt_softc *bp, char *fwcli_string)
+{
+ struct hwrm_dbg_fw_cli_input req = {0};
+ int rc = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_FW_CLI);
+ req.cmpl_ring = -1;
+ req.target_id = -1;
+ req.cli_cmd_len = strlen(fwcli_string);
+ req.host_buf_len = 64 * 1024;
+ strcpy((char *)req.cli_cmd, fwcli_string);
+
+ BNXT_HWRM_LOCK(bp);
+ rc = _hwrm_send_message(bp, &req, sizeof(req));
+ if (rc) {
+ device_printf(bp->dev, " Manual FW fault failed, rc:%x\n", rc);
+ }
+ BNXT_HWRM_UNLOCK(bp);
+}
+
+static int
+bnxt_reset_ctrl(SYSCTL_HANDLER_ARGS) {
+ struct bnxt_softc *softc = arg1;
+ int rc = 0;
+ char buf[50] = {0};
+
+ if (softc == NULL)
+ return EBUSY;
+
+ rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+ if (rc || req->newptr == NULL)
+ return rc;
+
+ if (BNXT_CHIP_P5_PLUS(softc))
+ simulate_reset(softc, buf);
+
+ return rc;
+}
+
int
bnxt_create_config_sysctls_pre(struct bnxt_softc *softc)
{
@@ -1460,35 +1619,43 @@ bnxt_create_config_sysctls_pre(struct bnxt_softc *softc)
SYSCTL_ADD_CONST_STRING(ctx, children, OID_AUTO, "if_name", CTLFLAG_RD,
if_name(iflib_get_ifp(softc->ctx)), "interface name");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_rx_usecs",
- CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_rx_usecs",
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
bnxt_set_coal_rx_usecs, "I", "interrupt coalescing Rx Usecs");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_rx_frames",
- CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_rx_frames",
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
bnxt_set_coal_rx_frames, "I", "interrupt coalescing Rx Frames");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_rx_usecs_irq",
- CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_rx_usecs_irq",
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
bnxt_set_coal_rx_usecs_irq, "I",
"interrupt coalescing Rx Usecs IRQ");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_rx_frames_irq",
- CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_rx_frames_irq",
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
bnxt_set_coal_rx_frames_irq, "I",
"interrupt coalescing Rx Frames IRQ");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_tx_usecs",
- CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_tx_usecs",
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
bnxt_set_coal_tx_usecs, "I", "interrupt coalescing Tx Usces");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_tx_frames",
- CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
- bnxt_set_coal_tx_frames, "I", "interrupt coalescing Tx Frames");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_tx_usecs_irq",
- CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_tx_frames",
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
+ bnxt_set_coal_tx_frames, "I", "interrupt coalescing Tx Frames");
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_tx_usecs_irq",
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
bnxt_set_coal_tx_usecs_irq, "I",
- "interrupt coalescing Tx Usecs IRQ");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_tx_frames_irq",
- CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
+ "interrupt coalescing Tx Usecs IRQ");
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_tx_frames_irq",
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, softc, 0,
bnxt_set_coal_tx_frames_irq, "I",
"interrupt coalescing Tx Frames IRQ");
-
+ SYSCTL_ADD_U32(ctx, children, OID_AUTO, "flags", CTLFLAG_RD,
+ &softc->flags, 0, "flags");
+ SYSCTL_ADD_U64(ctx, children, OID_AUTO, "fw_cap", CTLFLAG_RD,
+ &softc->fw_cap, 0, "FW caps");
+
+ SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
+ "reset_ctrl", CTLTYPE_STRING | CTLFLAG_RWTUN, softc,
+ 0, bnxt_reset_ctrl, "A",
+ "Issue controller reset: 0 / 1");
return 0;
}
@@ -1618,6 +1785,349 @@ bnxt_create_hw_lro_sysctls(struct bnxt_softc *softc)
return 0;
}
+static int
+bnxt_dcb_dcbx_cap(SYSCTL_HANDLER_ARGS)
+{
+ struct bnxt_softc *softc = arg1;
+ int val;
+ int rc;
+
+ val = bnxt_dcb_getdcbx(softc);
+ rc = sysctl_handle_int(oidp, &val, 0, req);
+ if (rc || !req->newptr)
+ return rc;
+
+ bnxt_dcb_setdcbx(softc, val);
+
+ return rc;
+}
+
+static char
+bnxt_ets_tsa_to_str(struct bnxt_softc *softc, uint32_t tc)
+{
+ switch (softc->ieee_ets->tc_tsa[tc]) {
+ case BNXT_IEEE_8021QAZ_TSA_STRICT:
+ return 's';
+ case BNXT_IEEE_8021QAZ_TSA_ETS:
+ return 'e';
+ default:
+ return 'X';
+
+ }
+}
+
+static uint32_t
+bnxt_ets_str_to_tsa(char tsa_str)
+{
+ switch (tsa_str) {
+ case 's':
+ return BNXT_IEEE_8021QAZ_TSA_STRICT;
+ case 'e':
+ return BNXT_IEEE_8021QAZ_TSA_ETS;
+ default:
+ return -1;
+ }
+}
+
+static int
+bnxt_ets_get_val(struct bnxt_softc *softc, uint32_t type, uint32_t tc)
+{
+ switch (type) {
+ case BNXT_TYPE_ETS_TSA:
+ if (softc->ieee_ets)
+ return softc->ieee_ets->tc_tsa[tc];
+ break;
+ case BNXT_TYPE_ETS_PRI2TC:
+ if (softc->ieee_ets)
+ return softc->ieee_ets->prio_tc[tc];
+ break;
+ case BNXT_TYPE_ETS_TCBW:
+ if (softc->ieee_ets)
+ return softc->ieee_ets->tc_tx_bw[tc];
+ break;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
+static void
+bnxt_pfc_get_string(struct bnxt_softc *softc, char *buf, struct bnxt_ieee_pfc *pfc)
+{
+ uint32_t i;
+ bool found = false;
+
+ for (i = 0; i < BNXT_IEEE_8021QAZ_MAX_TCS; i++) {
+ if (pfc->pfc_en & (1 << i)) {
+ if (found)
+ buf += sprintf(buf, ", ");
+ buf += sprintf(buf, "%d", i);
+ found = true;
+ }
+ }
+
+ if (!found)
+ buf += sprintf(buf, "none");
+}
+
+static const char *bnxt_get_tlv_selector_str(uint8_t selector)
+{
+ switch (selector) {
+ case BNXT_IEEE_8021QAZ_APP_SEL_ETHERTYPE:
+ return "Ethertype";
+ case BNXT_IEEE_8021QAZ_APP_SEL_DGRAM:
+ return "UDP or DCCP";
+ case BNXT_IEEE_8021QAZ_APP_SEL_DSCP:
+ return "DSCP";
+ default:
+ return "Unknown";
+ }
+}
+
+static void
+bnxt_app_tlv_get_string(struct sbuf *sb, struct bnxt_dcb_app *app, int num)
+{
+ int i;
+
+ if (num == 0) {
+ sbuf_printf(sb, " None");
+ return;
+ }
+
+ sbuf_putc(sb, '\n');
+ for (i = 0; i < num; i++) {
+ sbuf_printf(sb, "\tAPP#%0d:\tpri: %d,\tSel: %d,\t%s: %d\n",
+ i,
+ app[i].priority,
+ app[i].selector,
+ bnxt_get_tlv_selector_str(app[i].selector),
+ app[i].protocol);
+ }
+}
+
+static void
+bnxt_ets_get_string(struct bnxt_softc *softc, char *buf)
+{
+ uint32_t type, i;
+
+ type = BNXT_TYPE_ETS_TSA;
+ for (type = 0; type < BNXT_TYPE_ETS_MAX; type++) {
+ for (i = 0; i < BNXT_IEEE_8021QAZ_MAX_TCS; i++) {
+ if (i == 0)
+ buf += sprintf(buf, "%s:", BNXT_ETS_TYPE_STR[type]);
+
+ if (!softc->ieee_ets)
+ buf += sprintf(buf, "x");
+ else if (type == BNXT_TYPE_ETS_TSA)
+ buf += sprintf(buf, "%c", bnxt_ets_tsa_to_str(softc, i));
+ else
+ buf += sprintf(buf, "%d", bnxt_ets_get_val(softc, type, i));
+
+ if (i != BNXT_IEEE_8021QAZ_MAX_TCS - 1)
+ buf += sprintf(buf, ",");
+ }
+ if (type != BNXT_TYPE_ETS_MAX - 1)
+ buf += sprintf(buf, "#");
+ }
+}
+
+static int
+bnxt_dcb_list_app(SYSCTL_HANDLER_ARGS)
+{
+ struct sbuf sb;
+ struct bnxt_dcb_app app[128] = {0};
+ struct bnxt_softc *softc = arg1;
+ int rc, num_inputs = 0;
+
+ sbuf_new_for_sysctl(&sb, NULL, 128, req);
+ bnxt_dcb_ieee_listapp(softc, app, nitems(app), &num_inputs);
+ bnxt_app_tlv_get_string(&sb, app, num_inputs);
+ rc = sbuf_finish(&sb);
+ sbuf_delete(&sb);
+ return rc;
+}
+
+static int
+bnxt_dcb_del_app(SYSCTL_HANDLER_ARGS)
+{
+ struct bnxt_softc *softc = arg1;
+ struct bnxt_dcb_app app = {0};
+ char buf[256] = {0};
+ int rc, num_inputs;
+
+ rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+ if (rc || req->newptr == NULL)
+ return rc;
+
+ num_inputs = sscanf(buf, "%hhu,%hhu,%hd", &app.priority, &app.selector, &app.protocol);
+
+ if (num_inputs != 3) {
+ device_printf(softc->dev,
+ "Invalid app tlv syntax, inputs = %d\n", num_inputs);
+ return EINVAL;
+ }
+
+ bnxt_dcb_ieee_delapp(softc, &app);
+
+ return rc;
+}
+static int
+bnxt_dcb_set_app(SYSCTL_HANDLER_ARGS)
+{
+ struct bnxt_softc *softc = arg1;
+ struct bnxt_dcb_app app = {0};
+ char buf[256] = {0};
+ int rc, num_inputs;
+
+ rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+ if (rc || req->newptr == NULL)
+ return rc;
+
+ num_inputs = sscanf(buf, "%hhu,%hhu,%hd", &app.priority, &app.selector, &app.protocol);
+
+ if (num_inputs != 3) {
+ device_printf(softc->dev,
+ "Invalid app tlv syntax, inputs = %d\n", num_inputs);
+ return EINVAL;
+ }
+
+ bnxt_dcb_ieee_setapp(softc, &app);
+
+ return rc;
+}
+
+static int
+bnxt_dcb_pfc(SYSCTL_HANDLER_ARGS)
+{
+ struct bnxt_softc *softc = arg1;
+ struct bnxt_ieee_pfc pfc = {0};
+ int rc, i, num_inputs;
+ char buf[256] = {0};
+ int pri_mask = 0;
+ char pri[8];
+
+ rc = bnxt_dcb_ieee_getpfc(softc, &pfc);
+ if (!rc)
+ bnxt_pfc_get_string(softc, buf, &pfc);
+ else
+ sprintf(buf, "## getpfc failed with error %d ##", rc);
+
+ rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+ if (rc || req->newptr == NULL)
+ return rc;
+
+ /* Check for 'none' string first */
+ if (sscanf(buf, "%s", buf) == 1) {
+ if (strncmp(buf, "none", 8) == 0) {
+ goto configure;
+ }
+ }
+ num_inputs = sscanf(buf, "%hhu,%hhu,%hhu,%hhu,%hhu,%hhu,%hhu,%hhu",
+ &pri[0], &pri[1], &pri[2], &pri[3], &pri[4],
+ &pri[5], &pri[6], &pri[7]);
+
+ if (num_inputs < 1 || num_inputs > 8) {
+ device_printf(softc->dev,
+ "Invalid pfc syntax, inputs = %d\n", num_inputs);
+ return EINVAL;
+ }
+
+ for (i = 0; i < num_inputs; i++) {
+ if (pri[i] > 7 || pri[i] < 0) {
+ device_printf(softc->dev,
+ "Invalid priority %d. Valid priorties are "
+ "from 0 to 7 and string \"none\".\n", pri[i]);
+ return EINVAL;
+ }
+
+ pri_mask |= (1 << pri[i]) & 0xFF;
+ }
+
+configure:
+ pfc.pfc_en = pri_mask;
+ rc = bnxt_dcb_ieee_setpfc(softc, &pfc);
+ if (rc)
+ device_printf(softc->dev,
+ "setpfc failed with status %d\n", rc);
+ return rc;
+}
+
+static int
+bnxt_dcb_ets(SYSCTL_HANDLER_ARGS)
+{
+ struct bnxt_softc *softc = arg1;
+ struct bnxt_ieee_ets ets = {0};
+ int rc = 0, i, num_inputs;
+ char buf[256] = {0};
+ char tsa[8];
+
+ rc = bnxt_dcb_ieee_getets(softc, &ets);
+ if (!rc)
+ bnxt_ets_get_string(softc, buf);
+ else
+ sprintf(buf, "## getets failed with error %d ##", rc);
+
+ rc = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+ if (rc || req->newptr == NULL)
+ return rc;
+
+ num_inputs = sscanf(buf, "tsa:%c,%c,%c,%c,%c,%c,%c,%c#"
+ "pri2tc:%hhu,%hhu,%hhu,%hhu,%hhu,%hhu,%hhu,%hhu#"
+ "tcbw:%hhu,%hhu,%hhu,%hhu,%hhu,%hhu,%hhu,%hhu",
+ &tsa[0], &tsa[1], &tsa[2], &tsa[3], &tsa[4], &tsa[5], &tsa[6], &tsa[7],
+ &ets.prio_tc[0], &ets.prio_tc[1], &ets.prio_tc[2], &ets.prio_tc[3],
+ &ets.prio_tc[4], &ets.prio_tc[5], &ets.prio_tc[6], &ets.prio_tc[7],
+ &ets.tc_tx_bw[0], &ets.tc_tx_bw[1], &ets.tc_tx_bw[2], &ets.tc_tx_bw[3],
+ &ets.tc_tx_bw[4], &ets.tc_tx_bw[5], &ets.tc_tx_bw[6], &ets.tc_tx_bw[7]);
+
+ if (num_inputs != 24)
+ return EINVAL;
+
+ for ( i= 0; i < 8; i++)
+ ets.tc_tsa[i] = bnxt_ets_str_to_tsa(tsa[i]);
+
+ rc = bnxt_dcb_ieee_setets(softc, &ets);
+
+ return rc;
+}
+
+int
+bnxt_create_dcb_sysctls(struct bnxt_softc *softc)
+{
+ struct sysctl_oid *oid = softc->dcb_oid;
+
+ if (!oid)
+ return ENOMEM;
+
+ SYSCTL_ADD_PROC(&softc->dcb_ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
+ "dcbx_cap", CTLTYPE_INT | CTLFLAG_RWTUN, softc,
+ 0, bnxt_dcb_dcbx_cap, "A",
+ "Enable DCB Capability Exchange Protocol (DCBX) capabilities");
+
+ SYSCTL_ADD_PROC(&softc->dcb_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "ets",
+ CTLTYPE_STRING | CTLFLAG_RWTUN, softc, 0,
+ bnxt_dcb_ets, "A", "Enhanced Transmission Selection (ETS)");
+
+ SYSCTL_ADD_PROC(&softc->dcb_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "pfc",
+ CTLTYPE_STRING | CTLFLAG_RWTUN, softc, 0,
+ bnxt_dcb_pfc, "A", "Enhanced Transmission Selection (ETS)");
+
+ SYSCTL_ADD_PROC(&softc->dcb_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "set_apptlv",
+ CTLTYPE_STRING | CTLFLAG_WR, softc, 0,
+ bnxt_dcb_set_app, "A", "Set App TLV");
+
+ SYSCTL_ADD_PROC(&softc->dcb_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "del_apptlv",
+ CTLTYPE_STRING | CTLFLAG_WR, softc, 0,
+ bnxt_dcb_del_app, "A", "Delete App TLV");
+
+ SYSCTL_ADD_PROC(&softc->dcb_ctx, SYSCTL_CHILDREN(oid), OID_AUTO, "list_apptlv",
+ CTLTYPE_STRING | CTLFLAG_RD, softc, 0,
+ bnxt_dcb_list_app, "A", "List all App TLVs");
+
+ return 0;
+}
+
int
bnxt_create_config_sysctls_post(struct bnxt_softc *softc)
{
diff --git a/sys/dev/bnxt/bnxt_sysctl.h b/sys/dev/bnxt/bnxt_en/bnxt_sysctl.h
index a2ca3a8a559f..d4c9e908b9c1 100644
--- a/sys/dev/bnxt/bnxt_sysctl.h
+++ b/sys/dev/bnxt/bnxt_en/bnxt_sysctl.h
@@ -40,3 +40,5 @@ int bnxt_create_config_sysctls_pre(struct bnxt_softc *softc);
int bnxt_create_config_sysctls_post(struct bnxt_softc *softc);
int bnxt_create_hw_lro_sysctls(struct bnxt_softc *softc);
int bnxt_create_pause_fc_sysctls(struct bnxt_softc *softc);
+int bnxt_create_dcb_sysctls(struct bnxt_softc *softc);
+int bnxt_create_dcb_ets_sysctls(struct bnxt_softc *softc);
diff --git a/sys/dev/bnxt/bnxt_txrx.c b/sys/dev/bnxt/bnxt_en/bnxt_txrx.c
index 98575234c515..2e10de6f0174 100644
--- a/sys/dev/bnxt/bnxt_txrx.c
+++ b/sys/dev/bnxt/bnxt_en/bnxt_txrx.c
@@ -97,6 +97,7 @@ bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
uint16_t lflags;
uint32_t cfa_meta;
int seg = 0;
+ uint8_t wrap = 0;
/* If we have offloads enabled, we need to use two BDs. */
if ((pi->ipi_csum_flags & (CSUM_OFFLOAD | CSUM_TSO | CSUM_IP)) ||
@@ -123,7 +124,18 @@ bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
if (need_hi) {
flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
+ /* Handle wrapping */
+ if (pi->ipi_new_pidx == txr->ring_size - 1)
+ wrap = 1;
+
pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
+
+ /* Toggle epoch bit on wrap */
+ if (wrap && pi->ipi_new_pidx == 0)
+ txr->epoch_bit = !txr->epoch_bit;
+ if (pi->ipi_new_pidx < EPOCH_ARR_SZ)
+ txr->epoch_arr[pi->ipi_new_pidx] = txr->epoch_bit;
+
tbdh = &((struct tx_bd_long_hi *)txr->vaddr)[pi->ipi_new_pidx];
tbdh->kid_or_ts_high_mss = htole16(pi->ipi_tso_segsz);
tbdh->kid_or_ts_low_hdr_size = htole16((pi->ipi_ehdrlen + pi->ipi_ip_hlen +
@@ -157,7 +169,15 @@ bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
for (; seg < pi->ipi_nsegs; seg++) {
tbd->flags_type = htole16(flags_type);
+
+ if (pi->ipi_new_pidx == txr->ring_size - 1)
+ wrap = 1;
pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
+ if (wrap && pi->ipi_new_pidx == 0)
+ txr->epoch_bit = !txr->epoch_bit;
+ if (pi->ipi_new_pidx < EPOCH_ARR_SZ)
+ txr->epoch_arr[pi->ipi_new_pidx] = txr->epoch_bit;
+
tbd = &((struct tx_bd_long *)txr->vaddr)[pi->ipi_new_pidx];
tbd->len = htole16(pi->ipi_segs[seg].ds_len);
tbd->addr = htole64(pi->ipi_segs[seg].ds_addr);
@@ -165,7 +185,13 @@ bnxt_isc_txd_encap(void *sc, if_pkt_info_t pi)
}
flags_type |= TX_BD_SHORT_FLAGS_PACKET_END;
tbd->flags_type = htole16(flags_type);
+ if (pi->ipi_new_pidx == txr->ring_size - 1)
+ wrap = 1;
pi->ipi_new_pidx = RING_NEXT(txr, pi->ipi_new_pidx);
+ if (wrap && pi->ipi_new_pidx == 0)
+ txr->epoch_bit = !txr->epoch_bit;
+ if (pi->ipi_new_pidx < EPOCH_ARR_SZ)
+ txr->epoch_arr[pi->ipi_new_pidx] = txr->epoch_bit;
return 0;
}
@@ -189,16 +215,21 @@ bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear)
struct tx_cmpl *cmpl = (struct tx_cmpl *)cpr->ring.vaddr;
int avail = 0;
uint32_t cons = cpr->cons;
+ uint32_t raw_cons = cpr->raw_cons;
bool v_bit = cpr->v_bit;
bool last_v_bit;
uint32_t last_cons;
+ uint32_t last_raw_cons;
uint16_t type;
uint16_t err;
for (;;) {
last_cons = cons;
+ last_raw_cons = raw_cons;
last_v_bit = v_bit;
+
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
+ raw_cons++;
CMPL_PREFETCH_NEXT(cpr, cons);
if (!CMP_VALID(&cmpl[cons], v_bit))
@@ -226,8 +257,10 @@ bnxt_isc_txd_credits_update(void *sc, uint16_t txqid, bool clear)
default:
if (type & 1) {
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
- if (!CMP_VALID(&cmpl[cons], v_bit))
+ raw_cons++;
+ if (!CMP_VALID(&cmpl[cons], v_bit)) {
goto done;
+ }
}
device_printf(softc->dev,
"Unhandled TX completion type %u\n", type);
@@ -238,6 +271,7 @@ done:
if (clear && avail) {
cpr->cons = last_cons;
+ cpr->raw_cons = last_raw_cons;
cpr->v_bit = last_v_bit;
softc->db_ops.bnxt_db_tx_cq(cpr, 0);
}
@@ -284,9 +318,16 @@ bnxt_isc_rxd_refill(void *sc, if_rxd_update_t iru)
rxbd[pidx].opaque = (((rxqid & 0xff) << 24) | (flid << 16)
| (frag_idxs[i]));
rxbd[pidx].addr = htole64(paddrs[i]);
- if (++pidx == rx_ring->ring_size)
+
+ /* Increment pidx and handle wrap-around */
+ if (++pidx == rx_ring->ring_size) {
pidx = 0;
+ rx_ring->epoch_bit = !rx_ring->epoch_bit;
+ }
+ if (pidx < EPOCH_ARR_SZ)
+ rx_ring->epoch_arr[pidx] = rx_ring->epoch_bit;
}
+
return;
}
@@ -337,6 +378,7 @@ bnxt_isc_rxd_available(void *sc, uint16_t rxqid, qidx_t idx, qidx_t budget)
type = le16toh(cmp[cons].type) & CMPL_BASE_TYPE_MASK;
switch (type) {
case CMPL_BASE_TYPE_RX_L2:
+ case CMPL_BASE_TYPE_RX_L2_V3:
rcp = (void *)&cmp[cons];
ags = (rcp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) >>
RX_PKT_CMPL_AGG_BUFS_SFT;
@@ -470,6 +512,7 @@ bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
/* Now the second 16-byte BD */
NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
+ cpr->raw_cons++;
ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
rcph = &((struct rx_pkt_cmpl_hi *)cpr->ring.vaddr)[cpr->cons];
@@ -501,6 +544,7 @@ bnxt_pkt_get_l2(struct bnxt_softc *softc, if_rxd_info_t ri,
/* And finally the ag ring stuff. */
for (i=1; i < ri->iri_nfrags; i++) {
NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
+ cpr->raw_cons++;
ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
@@ -551,6 +595,7 @@ bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri,
/* Now the second 16-byte BD */
NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
+ cpr->raw_cons++;
ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
flags2 = le32toh(tpas->high.flags2);
@@ -576,6 +621,7 @@ bnxt_pkt_get_tpa(struct bnxt_softc *softc, if_rxd_info_t ri,
/* Now the ag ring stuff. */
for (i=1; i < ri->iri_nfrags; i++) {
NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
+ cpr->raw_cons++;
ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
acp = &((struct rx_abuf_cmpl *)cpr->ring.vaddr)[cpr->cons];
@@ -612,6 +658,7 @@ bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri)
for (;;) {
NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
+ cpr->raw_cons++;
ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
CMPL_PREFETCH_NEXT(cpr, cpr->cons);
cmp = &((struct cmpl_base *)cpr->ring.vaddr)[cpr->cons];
@@ -621,10 +668,12 @@ bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri)
switch (type) {
case CMPL_BASE_TYPE_RX_L2:
+ case CMPL_BASE_TYPE_RX_L2_V3:
return bnxt_pkt_get_l2(softc, ri, cpr, flags_type);
case CMPL_BASE_TYPE_RX_TPA_END:
return bnxt_pkt_get_tpa(softc, ri, cpr, flags_type);
case CMPL_BASE_TYPE_RX_TPA_START:
+ case CMPL_BASE_TYPE_RX_TPA_START_V3:
rtpa = (void *)&cmp_q[cpr->cons];
agg_id = (rtpa->agg_id &
RX_TPA_START_CMPL_AGG_ID_MASK) >>
@@ -632,6 +681,7 @@ bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri)
softc->rx_rings[ri->iri_qsidx].tpa_start[agg_id].low = *rtpa;
NEXT_CP_CONS_V(&cpr->ring, cpr->cons, cpr->v_bit);
+ cpr->raw_cons++;
ri->iri_cidx = RING_NEXT(&cpr->ring, ri->iri_cidx);
CMPL_PREFETCH_NEXT(cpr, cpr->cons);
@@ -645,6 +695,7 @@ bnxt_isc_rxd_pkt_get(void *sc, if_rxd_info_t ri)
if (type & 1) {
NEXT_CP_CONS_V(&cpr->ring, cpr->cons,
cpr->v_bit);
+ cpr->raw_cons++;
ri->iri_cidx = RING_NEXT(&cpr->ring,
ri->iri_cidx);
CMPL_PREFETCH_NEXT(cpr, cpr->cons);
diff --git a/sys/dev/bnxt/bnxt_en/bnxt_ulp.c b/sys/dev/bnxt/bnxt_en/bnxt_ulp.c
new file mode 100644
index 000000000000..3c1f62cb4da3
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_en/bnxt_ulp.c
@@ -0,0 +1,526 @@
+/*-
+ * Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2024 Broadcom, All Rights Reserved.
+ * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/bitmap.h>
+#include <linux/rcupdate.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_var.h>
+#include <net/ethernet.h>
+#include <net/iflib.h>
+
+#include "hsi_struct_def.h"
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_ulp.h"
+
+void bnxt_destroy_irq(struct bnxt_softc *softc);
+
+static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id,
+ struct bnxt_ulp_ops *ulp_ops, void *handle)
+{
+ struct bnxt_softc *bp = edev->softc;
+ struct bnxt_ulp *ulp;
+ int rc = 0;
+
+ if (ulp_id >= BNXT_MAX_ULP)
+ return -EINVAL;
+
+ mtx_lock(&bp->en_ops_lock);
+ ulp = &edev->ulp_tbl[ulp_id];
+ if (rcu_access_pointer(ulp->ulp_ops)) {
+ device_printf(bp->dev, "ulp id %d already registered\n", ulp_id);
+ rc = -EBUSY;
+ goto exit;
+ }
+
+ edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
+ atomic_set(&ulp->ref_count, 0);
+ ulp->handle = handle;
+ rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
+
+ if (ulp_id == BNXT_ROCE_ULP) {
+ if (test_bit(BNXT_STATE_OPEN, &bp->state) && bp->is_dev_init)
+ bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info);
+ }
+
+exit:
+ mtx_unlock(&bp->en_ops_lock);
+ return rc;
+}
+
+static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
+{
+ struct bnxt_softc *bp = edev->softc;
+ struct bnxt_ulp *ulp;
+ int i = 0;
+
+ if (ulp_id >= BNXT_MAX_ULP)
+ return -EINVAL;
+
+ ulp = &edev->ulp_tbl[ulp_id];
+ if (!rcu_access_pointer(ulp->ulp_ops)) {
+ device_printf(bp->dev, "ulp id %d not registered\n", ulp_id);
+ return -EINVAL;
+ }
+ if (ulp_id == BNXT_ROCE_ULP && ulp->msix_requested)
+ edev->en_ops->bnxt_free_msix(edev, ulp_id);
+
+ mtx_lock(&bp->en_ops_lock);
+ RCU_INIT_POINTER(ulp->ulp_ops, NULL);
+ synchronize_rcu();
+ ulp->max_async_event_id = 0;
+ ulp->async_events_bmap = NULL;
+ while (atomic_read(&ulp->ref_count) != 0 && i < 10) {
+ msleep(100);
+ i++;
+ }
+ mtx_unlock(&bp->en_ops_lock);
+ return 0;
+}
+
+static void bnxt_fill_msix_vecs(struct bnxt_softc *bp, struct bnxt_msix_entry *ent)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ int num_msix, idx, i;
+
+ num_msix = edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
+ idx = edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
+ for (i = 0; i < num_msix; i++) {
+ ent[i].vector = bp->irq_tbl[idx + i].vector;
+ ent[i].ring_idx = idx + i;
+ if (BNXT_CHIP_P5_PLUS(bp))
+ ent[i].db_offset = DB_PF_OFFSET_P5;
+ else
+ ent[i].db_offset = (idx + i) * 0x80;
+
+ }
+}
+
+static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
+ struct bnxt_msix_entry *ent, int num_msix)
+{
+ struct bnxt_softc *bp = edev->softc;
+ int avail_msix, idx;
+
+ if (ulp_id != BNXT_ROCE_ULP)
+ return -EINVAL;
+
+ if (edev->ulp_tbl[ulp_id].msix_requested)
+ return -EAGAIN;
+
+ idx = bp->total_irqs - BNXT_ROCE_IRQ_COUNT;
+ avail_msix = BNXT_ROCE_IRQ_COUNT;
+
+ mtx_lock(&bp->en_ops_lock);
+ edev->ulp_tbl[ulp_id].msix_base = idx;
+ edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
+
+ bnxt_fill_msix_vecs(bp, ent);
+ edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
+ mtx_unlock(&bp->en_ops_lock);
+ return avail_msix;
+}
+
+static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
+{
+ struct bnxt_softc *bp = edev->softc;
+
+ if (ulp_id != BNXT_ROCE_ULP)
+ return -EINVAL;
+
+ if (!(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ return 0;
+
+ mtx_lock(&bp->en_ops_lock);
+ edev->ulp_tbl[ulp_id].msix_requested = 0;
+ edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
+ if (edev->flags & BNXT_EN_FLAG_ULP_STOPPED)
+ goto stopped;
+
+stopped:
+ mtx_unlock(&bp->en_ops_lock);
+
+ return 0;
+}
+
+int bnxt_get_ulp_msix_num(struct bnxt_softc *bp)
+{
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ struct bnxt_en_dev *edev = bp->edev;
+
+ return edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested;
+ }
+ return 0;
+}
+
+int bnxt_get_ulp_msix_base(struct bnxt_softc *bp)
+{
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ struct bnxt_en_dev *edev = bp->edev;
+
+ if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
+ return edev->ulp_tbl[BNXT_ROCE_ULP].msix_base;
+ }
+ return 0;
+}
+
+static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id,
+ struct bnxt_fw_msg *fw_msg)
+{
+ struct bnxt_softc *softc = edev->softc;
+ int rc;
+
+ if ((ulp_id != BNXT_ROCE_ULP) && softc->fw_reset_state)
+ return -EBUSY;
+
+ rc = bnxt_hwrm_passthrough(softc, fw_msg->msg, fw_msg->msg_len, fw_msg->resp,
+ fw_msg->resp_max_len, fw_msg->timeout);
+ return rc;
+}
+
+static void bnxt_ulp_get(struct bnxt_ulp *ulp)
+{
+ atomic_inc(&ulp->ref_count);
+}
+
+static void bnxt_ulp_put(struct bnxt_ulp *ulp)
+{
+ atomic_dec(&ulp->ref_count);
+}
+
+void bnxt_ulp_stop(struct bnxt_softc *bp)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
+ edev->en_state = bp->state;
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ ops = ulp->ulp_ops;
+ if (!ops || !ops->ulp_stop)
+ continue;
+ ops->ulp_stop(ulp->handle);
+ }
+}
+
+void bnxt_ulp_start(struct bnxt_softc *bp, int err)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ edev->flags &= ~BNXT_EN_FLAG_ULP_STOPPED;
+ edev->en_state = bp->state;
+
+ if (err)
+ return;
+
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ ops = ulp->ulp_ops;
+ if (!ops || !ops->ulp_start)
+ continue;
+ ops->ulp_start(ulp->handle);
+ }
+}
+
+void bnxt_ulp_sriov_cfg(struct bnxt_softc *bp, int num_vfs)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ rcu_read_lock();
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_sriov_config) {
+ rcu_read_unlock();
+ continue;
+ }
+ bnxt_ulp_get(ulp);
+ rcu_read_unlock();
+ ops->ulp_sriov_config(ulp->handle, num_vfs);
+ bnxt_ulp_put(ulp);
+ }
+}
+
+void bnxt_ulp_shutdown(struct bnxt_softc *bp)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ ops = ulp->ulp_ops;
+ if (!ops || !ops->ulp_shutdown)
+ continue;
+ ops->ulp_shutdown(ulp->handle);
+ }
+}
+
+void bnxt_ulp_irq_stop(struct bnxt_softc *bp)
+{
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+
+ if (!edev || !(edev->flags & BNXT_EN_FLAG_MSIX_REQUESTED))
+ return;
+
+ if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[BNXT_ROCE_ULP];
+
+ if (!ulp->msix_requested)
+ return;
+
+ ops = ulp->ulp_ops;
+ if (!ops || !ops->ulp_irq_stop)
+ return;
+ ops->ulp_irq_stop(ulp->handle);
+ }
+}
+
+void bnxt_ulp_async_events(struct bnxt_softc *bp, struct hwrm_async_event_cmpl *cmpl)
+{
+ u16 event_id = le16_to_cpu(cmpl->event_id);
+ struct bnxt_en_dev *edev = bp->edev;
+ struct bnxt_ulp_ops *ops;
+ int i;
+
+ if (!edev)
+ return;
+
+ rcu_read_lock();
+ for (i = 0; i < BNXT_MAX_ULP; i++) {
+ struct bnxt_ulp *ulp = &edev->ulp_tbl[i];
+
+ ops = rcu_dereference(ulp->ulp_ops);
+ if (!ops || !ops->ulp_async_notifier)
+ continue;
+ if (!ulp->async_events_bmap ||
+ event_id > ulp->max_async_event_id)
+ continue;
+
+ /* Read max_async_event_id first before testing the bitmap. */
+ rmb();
+ if (edev->flags & BNXT_EN_FLAG_ULP_STOPPED)
+ continue;
+
+ if (test_bit(event_id, ulp->async_events_bmap))
+ ops->ulp_async_notifier(ulp->handle, cmpl);
+ }
+ rcu_read_unlock();
+}
+
+static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
+ unsigned long *events_bmap, u16 max_id)
+{
+ struct bnxt_softc *bp = edev->softc;
+ struct bnxt_ulp *ulp;
+
+ if (ulp_id >= BNXT_MAX_ULP)
+ return -EINVAL;
+
+ mtx_lock(&bp->en_ops_lock);
+ ulp = &edev->ulp_tbl[ulp_id];
+ ulp->async_events_bmap = events_bmap;
+ wmb();
+ ulp->max_async_event_id = max_id;
+ bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
+ mtx_unlock(&bp->en_ops_lock);
+ return 0;
+}
+
+void bnxt_destroy_irq(struct bnxt_softc *softc)
+{
+ kfree(softc->irq_tbl);
+}
+
+static int bnxt_populate_irq(struct bnxt_softc *softc)
+{
+ struct resource_list *rl = NULL;
+ struct resource_list_entry *rle = NULL;
+ struct bnxt_msix_tbl *irq_tbl = NULL;
+ struct pci_devinfo *dinfo = NULL;
+ int i;
+
+ softc->total_irqs = softc->scctx->isc_nrxqsets + BNXT_ROCE_IRQ_COUNT;
+ irq_tbl = kzalloc(softc->total_irqs * sizeof(*softc->irq_tbl), GFP_KERNEL);
+
+ if (!irq_tbl) {
+ device_printf(softc->dev, "Failed to allocate IRQ table\n");
+ return -1;
+ }
+ dinfo = device_get_ivars(softc->pdev->dev.bsddev);
+ rl = &dinfo->resources;
+ rle = resource_list_find(rl, SYS_RES_IRQ, 1);
+ softc->pdev->dev.irq_start = rle->start;
+ softc->pdev->dev.irq_end = rle->start + softc->total_irqs;
+
+ for (i = 0; i < softc->total_irqs; i++) {
+ irq_tbl[i].entry = i;
+ irq_tbl[i].vector = softc->pdev->dev.irq_start + i;
+ }
+
+ softc->irq_tbl = irq_tbl;
+
+ return 0;
+}
+
+static const struct bnxt_en_ops bnxt_en_ops_tbl = {
+ .bnxt_register_device = bnxt_register_dev,
+ .bnxt_unregister_device = bnxt_unregister_dev,
+ .bnxt_request_msix = bnxt_req_msix_vecs,
+ .bnxt_free_msix = bnxt_free_msix_vecs,
+ .bnxt_send_fw_msg = bnxt_send_msg,
+ .bnxt_register_fw_async_events = bnxt_register_async_events,
+};
+
+void bnxt_aux_dev_release(struct device *dev)
+{
+ struct bnxt_aux_dev *bnxt_adev =
+ container_of(dev, struct bnxt_aux_dev, aux_dev.dev);
+ struct bnxt_softc *bp = bnxt_adev->edev->softc;
+
+ kfree(bnxt_adev->edev);
+ bnxt_adev->edev = NULL;
+ bp->edev = NULL;
+}
+
+static inline void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt_softc *bp)
+{
+ edev->en_ops = &bnxt_en_ops_tbl;
+ edev->net = bp->ifp;
+ edev->pdev = bp->pdev;
+ edev->softc = bp;
+ edev->l2_db_size = bp->db_size;
+ mtx_init(&bp->en_ops_lock, "Ethernet ops lock", NULL, MTX_DEF);
+
+ if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
+ edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
+ if (bp->flags & BNXT_FLAG_ROCEV2_CAP)
+ edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP;
+ if (bp->is_asym_q)
+ edev->flags |= BNXT_EN_FLAG_ASYM_Q;
+ edev->hwrm_bar = bp->hwrm_bar;
+ edev->port_partition_type = bp->port_partition_type;
+ edev->ulp_version = BNXT_ULP_VERSION;
+}
+
+int bnxt_rdma_aux_device_del(struct bnxt_softc *softc)
+{
+ struct bnxt_aux_dev *bnxt_adev = softc->aux_dev;
+ struct auxiliary_device *adev;
+
+ adev = &bnxt_adev->aux_dev;
+ auxiliary_device_delete(adev);
+ auxiliary_device_uninit(adev);
+ bnxt_destroy_irq(softc);
+
+ return 0;
+}
+
+int bnxt_rdma_aux_device_add(struct bnxt_softc *bp)
+{
+ struct bnxt_aux_dev *bnxt_adev = bp->aux_dev;
+ struct bnxt_en_dev *edev = bnxt_adev->edev;
+ struct auxiliary_device *aux_dev;
+ int ret = -1;
+
+ if (bnxt_populate_irq(bp))
+ return ret;
+
+ device_printf(bp->dev, "V:D:SV:SD %x:%x:%x:%x, irq 0x%x, "
+ "devfn 0x%x, cla 0x%x, rev 0x%x, msi_en 0x%x\n",
+ bp->pdev->vendor, bp->pdev->device, bp->pdev->subsystem_vendor,
+ bp->pdev->subsystem_device, bp->pdev->irq, bp->pdev->devfn,
+ bp->pdev->class, bp->pdev->revision, bp->pdev->msi_enabled);
+
+ aux_dev = &bnxt_adev->aux_dev;
+ aux_dev->id = bnxt_adev->id;
+ aux_dev->name = "rdma";
+ aux_dev->dev.parent = &bp->pdev->dev;
+ aux_dev->dev.release = bnxt_aux_dev_release;
+
+ if (!edev) {
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ return -ENOMEM;
+ }
+
+ bnxt_set_edev_info(edev, bp);
+ bnxt_adev->edev = edev;
+ bp->edev = edev;
+
+ ret = auxiliary_device_init(aux_dev);
+ if (ret)
+ goto err_free_edev;
+
+ ret = auxiliary_device_add(aux_dev);
+ if (ret)
+ goto err_dev_uninit;
+
+ return 0;
+err_dev_uninit:
+ auxiliary_device_uninit(aux_dev);
+err_free_edev:
+ kfree(edev);
+ bnxt_adev->edev = NULL;
+ bp->edev = NULL;
+ return ret;
+}
diff --git a/sys/dev/bnxt/bnxt_en/bnxt_ulp.h b/sys/dev/bnxt/bnxt_en/bnxt_ulp.h
new file mode 100644
index 000000000000..0108293046d7
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_en/bnxt_ulp.h
@@ -0,0 +1,161 @@
+/*-
+ * Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2024 Broadcom, All Rights Reserved.
+ * The term Broadcom refers to Broadcom Limited and/or its subsidiaries
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS'
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef BNXT_ULP_H
+#define BNXT_ULP_H
+
+#include <linux/rcupdate.h>
+#include "bnxt.h"
+
+#define BNXT_ROCE_ULP 0
+#define BNXT_OTHER_ULP 1
+#define BNXT_MAX_ULP 2
+
+#define BNXT_MIN_ROCE_CP_RINGS 2
+#define BNXT_MIN_ROCE_STAT_CTXS 1
+
+struct hwrm_async_event_cmpl;
+struct bnxt_softc;
+struct bnxt_bar_info;
+
+struct bnxt_msix_entry {
+ uint32_t vector;
+ uint32_t ring_idx;
+ uint32_t db_offset;
+};
+
+struct bnxt_ulp_ops {
+ void (*ulp_async_notifier)(void *, struct hwrm_async_event_cmpl *);
+ void (*ulp_stop)(void *);
+ void (*ulp_start)(void *);
+ void (*ulp_sriov_config)(void *, int);
+ void (*ulp_shutdown)(void *);
+ void (*ulp_irq_stop)(void *);
+ void (*ulp_irq_restart)(void *, struct bnxt_msix_entry *);
+};
+
+struct bnxt_fw_msg {
+ void *msg;
+ int msg_len;
+ void *resp;
+ int resp_max_len;
+ int timeout;
+};
+
+struct bnxt_ulp {
+ void *handle;
+ struct bnxt_ulp_ops __rcu *ulp_ops;
+ unsigned long *async_events_bmap;
+ u16 max_async_event_id;
+ u16 msix_requested;
+ u16 msix_base;
+ atomic_t ref_count;
+};
+
+struct bnxt_en_dev {
+ struct ifnet *net;
+ struct pci_dev *pdev;
+ struct bnxt_softc *softc;
+ u32 flags;
+ #define BNXT_EN_FLAG_ROCEV1_CAP 0x1
+ #define BNXT_EN_FLAG_ROCEV2_CAP 0x2
+ #define BNXT_EN_FLAG_ROCE_CAP (BNXT_EN_FLAG_ROCEV1_CAP | \
+ BNXT_EN_FLAG_ROCEV2_CAP)
+ #define BNXT_EN_FLAG_MSIX_REQUESTED 0x4
+ #define BNXT_EN_FLAG_ULP_STOPPED 0x8
+ #define BNXT_EN_FLAG_ASYM_Q 0x10
+ #define BNXT_EN_FLAG_MULTI_HOST 0x20
+#define BNXT_EN_ASYM_Q(edev) ((edev)->flags & BNXT_EN_FLAG_ASYM_Q)
+#define BNXT_EN_MH(edev) ((edev)->flags & BNXT_EN_FLAG_MULTI_HOST)
+ const struct bnxt_en_ops *en_ops;
+ struct bnxt_ulp ulp_tbl[BNXT_MAX_ULP];
+ int l2_db_size; /* Doorbell BAR size in
+ * bytes mapped by L2
+ * driver.
+ */
+ int l2_db_size_nc; /* Doorbell BAR size in
+ * bytes mapped as non-
+ * cacheable.
+ */
+ u32 ulp_version; /* bnxt_re checks the
+ * ulp_version is correct
+ * to ensure compatibility
+ * with bnxt_en.
+ */
+ #define BNXT_ULP_VERSION 0x695a0008 /* Change this when any interface
+ * structure or API changes
+ * between bnxt_en and bnxt_re.
+ */
+ unsigned long en_state;
+ void __iomem *bar0;
+ u16 hw_ring_stats_size;
+ u16 pf_port_id;
+ u8 port_partition_type;
+#define BNXT_EN_NPAR(edev) ((edev)->port_partition_type)
+ u8 port_count;
+ struct bnxt_dbr *en_dbr;
+ struct bnxt_bar_info hwrm_bar;
+ u32 espeed;
+};
+
+struct bnxt_en_ops {
+ int (*bnxt_register_device)(struct bnxt_en_dev *, int,
+ struct bnxt_ulp_ops *, void *);
+ int (*bnxt_unregister_device)(struct bnxt_en_dev *, int);
+ int (*bnxt_request_msix)(struct bnxt_en_dev *, int,
+ struct bnxt_msix_entry *, int);
+ int (*bnxt_free_msix)(struct bnxt_en_dev *, int);
+ int (*bnxt_send_fw_msg)(struct bnxt_en_dev *, int,
+ struct bnxt_fw_msg *);
+ int (*bnxt_register_fw_async_events)(struct bnxt_en_dev *, int,
+ unsigned long *, u16);
+ int (*bnxt_dbr_complete)(struct bnxt_en_dev *, int, u32);
+};
+
+static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id)
+{
+ if (edev && rcu_access_pointer(edev->ulp_tbl[ulp_id].ulp_ops))
+ return true;
+ return false;
+}
+
+int bnxt_get_ulp_msix_num(struct bnxt_softc *bp);
+int bnxt_get_ulp_msix_base(struct bnxt_softc *bp);
+int bnxt_get_ulp_stat_ctxs(struct bnxt_softc *bp);
+void bnxt_ulp_stop(struct bnxt_softc *bp);
+void bnxt_ulp_start(struct bnxt_softc *bp, int err);
+void bnxt_ulp_sriov_cfg(struct bnxt_softc *bp, int num_vfs);
+void bnxt_ulp_shutdown(struct bnxt_softc *bp);
+void bnxt_ulp_irq_stop(struct bnxt_softc *bp);
+void bnxt_ulp_irq_restart(struct bnxt_softc *bp, int err);
+void bnxt_ulp_async_events(struct bnxt_softc *bp, struct hwrm_async_event_cmpl *cmpl);
+struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev);
+void bnxt_aux_dev_release(struct device *dev);
+int bnxt_rdma_aux_device_add(struct bnxt_softc *bp);
+int bnxt_rdma_aux_device_del(struct bnxt_softc *bp);
+#endif
diff --git a/sys/dev/bnxt/convert_hsi.pl b/sys/dev/bnxt/bnxt_en/convert_hsi.pl
index 19f5d2d3aea1..19f5d2d3aea1 100755
--- a/sys/dev/bnxt/convert_hsi.pl
+++ b/sys/dev/bnxt/bnxt_en/convert_hsi.pl
diff --git a/sys/dev/bnxt/hsi_struct_def.h b/sys/dev/bnxt/bnxt_en/hsi_struct_def.h
index 12b112c42ca7..5914c70ce671 100644
--- a/sys/dev/bnxt/hsi_struct_def.h
+++ b/sys/dev/bnxt/bnxt_en/hsi_struct_def.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright (c) 2016 Broadcom, All Rights Reserved.
+ * Copyright (c) 2025 Broadcom, All Rights Reserved.
* The term Broadcom refers to Broadcom Limited and/or its subsidiaries
*
* Redistribution and use in source and binary forms, with or without
@@ -31,7 +31,7 @@
__FBSDID("$FreeBSD$");
/*
- * Copyright(c) 2001-2023, Broadcom. All rights reserved. The
+ * Copyright(c) 2001-2025, Broadcom. All rights reserved. The
* term Broadcom refers to Broadcom Inc. and/or its subsidiaries.
* Proprietary and Confidential Information.
*
@@ -45,6 +45,10 @@ __FBSDID("$FreeBSD$");
#ifndef _HSI_STRUCT_DEF_H_
#define _HSI_STRUCT_DEF_H_
+#if defined(HAVE_STDINT_H)
+#include <stdint.h>
+#endif
+
/* This is the HWRM command header. */
/* hwrm_cmd_hdr (size:128b/16B) */
@@ -111,6 +115,10 @@ typedef struct hwrm_resp_hdr {
#define TLV_TYPE_QUERY_ROCE_CC_GEN1 UINT32_C(0x4)
/* RoCE slow path command to modify CC Gen1 support. */
#define TLV_TYPE_MODIFY_ROCE_CC_GEN1 UINT32_C(0x5)
+/* RoCE slow path command to query CC Gen2 support. */
+#define TLV_TYPE_QUERY_ROCE_CC_GEN2 UINT32_C(0x6)
+/* RoCE slow path command to modify CC Gen2 support. */
+#define TLV_TYPE_MODIFY_ROCE_CC_GEN2 UINT32_C(0x7)
/* Engine CKV - The Alias key EC curve and ECC public key information. */
#define TLV_TYPE_ENGINE_CKV_ALIAS_ECC_PUBLIC_KEY UINT32_C(0x8001)
/* Engine CKV - Initialization vector. */
@@ -193,14 +201,14 @@ typedef struct tlv {
typedef struct input {
/*
- * This value indicates what type of request this is. The format
+ * This value indicates what type of request this is. The format
* for the rest of the command is determined by this field.
*/
uint16_t req_type;
/*
* This value indicates the what completion ring the request will
- * be optionally completed on. If the value is -1, then no
- * CR completion will be generated. Any other value must be a
+ * be optionally completed on. If the value is -1, then no
+ * CR completion will be generated. Any other value must be a
* valid CR ring_id value for this function.
*/
uint16_t cmpl_ring;
@@ -216,7 +224,7 @@ typedef struct input {
uint16_t target_id;
/*
* This is the host address where the response will be written
- * when the request is complete. This area must be 16B aligned
+ * when the request is complete. This area must be 16B aligned
* and must be cleared to zero before the request is made.
*/
uint64_t resp_addr;
@@ -238,7 +246,7 @@ typedef struct output {
/* This field provides original sequence number of the command. */
uint16_t seq_id;
/*
- * This field is the length of the response in bytes. The
+ * This field is the length of the response in bytes. The
* last byte of the response is a valid flag that will read
* as '1' when the command has been completely written to
* memory.
@@ -374,6 +382,14 @@ typedef struct hwrm_short_input {
((x) == 0x85 ? "HWRM_QUEUE_VLANPRI2PRI_CFG": \
((x) == 0x86 ? "HWRM_QUEUE_GLOBAL_CFG": \
((x) == 0x87 ? "HWRM_QUEUE_GLOBAL_QCFG": \
+ ((x) == 0x88 ? "HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG": \
+ ((x) == 0x89 ? "HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG": \
+ ((x) == 0x8a ? "HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG": \
+ ((x) == 0x8b ? "HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG": \
+ ((x) == 0x8c ? "HWRM_QUEUE_QCAPS": \
+ ((x) == 0x8d ? "HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG": \
+ ((x) == 0x8e ? "HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG": \
+ ((x) == 0x8f ? "HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG": \
((x) == 0x90 ? "HWRM_CFA_L2_FILTER_ALLOC": \
((x) == 0x91 ? "HWRM_CFA_L2_FILTER_FREE": \
((x) == 0x92 ? "HWRM_CFA_L2_FILTER_CFG": \
@@ -392,6 +408,7 @@ typedef struct hwrm_short_input {
((x) == 0xa0 ? "HWRM_TUNNEL_DST_PORT_QUERY": \
((x) == 0xa1 ? "HWRM_TUNNEL_DST_PORT_ALLOC": \
((x) == 0xa2 ? "HWRM_TUNNEL_DST_PORT_FREE": \
+ ((x) == 0xa3 ? "HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG": \
((x) == 0xaf ? "HWRM_STAT_CTX_ENG_QUERY": \
((x) == 0xb0 ? "HWRM_STAT_CTX_ALLOC": \
((x) == 0xb1 ? "HWRM_STAT_CTX_FREE": \
@@ -439,6 +456,7 @@ typedef struct hwrm_short_input {
((x) == 0xdb ? "HWRM_PORT_EP_TX_CFG": \
((x) == 0xdc ? "HWRM_PORT_CFG": \
((x) == 0xdd ? "HWRM_PORT_QCFG": \
+ ((x) == 0xdf ? "HWRM_PORT_MAC_QCAPS": \
((x) == 0xe0 ? "HWRM_TEMP_MONITOR_QUERY": \
((x) == 0xe1 ? "HWRM_REG_POWER_QUERY": \
((x) == 0xe2 ? "HWRM_CORE_FREQUENCY_QUERY": \
@@ -456,7 +474,7 @@ typedef struct hwrm_short_input {
((x) == 0xfa ? "HWRM_CFA_METER_INSTANCE_CFG": \
((x) == 0xfd ? "HWRM_CFA_VFR_ALLOC": \
((x) == 0xfe ? "HWRM_CFA_VFR_FREE": \
- "Unknown decode" )))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) : \
+ "Unknown decode" )))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) : \
(((x) < 0x180) ? \
((x) == 0x100 ? "HWRM_CFA_VF_PAIR_ALLOC": \
((x) == 0x101 ? "HWRM_CFA_VF_PAIR_FREE": \
@@ -500,6 +518,7 @@ typedef struct hwrm_short_input {
((x) == 0x127 ? "HWRM_CFA_LAG_GROUP_MEMBER_UNRGTR": \
((x) == 0x128 ? "HWRM_CFA_TLS_FILTER_ALLOC": \
((x) == 0x129 ? "HWRM_CFA_TLS_FILTER_FREE": \
+ ((x) == 0x12a ? "HWRM_CFA_RELEASE_AFM_FUNC": \
((x) == 0x12e ? "HWRM_ENGINE_CKV_STATUS": \
((x) == 0x12f ? "HWRM_ENGINE_CKV_CKEK_ADD": \
((x) == 0x130 ? "HWRM_ENGINE_CKV_CKEK_DELETE": \
@@ -539,7 +558,7 @@ typedef struct hwrm_short_input {
((x) == 0x163 ? "HWRM_ENGINE_NQ_FREE": \
((x) == 0x164 ? "HWRM_ENGINE_ON_DIE_RQE_CREDITS": \
((x) == 0x165 ? "HWRM_ENGINE_FUNC_QCFG": \
- "Unknown decode" ))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) : \
+ "Unknown decode" )))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))) : \
(((x) < 0x200) ? \
((x) == 0x190 ? "HWRM_FUNC_RESOURCE_QCAPS": \
((x) == 0x191 ? "HWRM_FUNC_VF_RESOURCE_CFG": \
@@ -570,7 +589,17 @@ typedef struct hwrm_short_input {
((x) == 0x1aa ? "HWRM_FUNC_DBR_RECOVERY_COMPLETED": \
((x) == 0x1ab ? "HWRM_FUNC_SYNCE_CFG": \
((x) == 0x1ac ? "HWRM_FUNC_SYNCE_QCFG": \
- "Unknown decode" ))))))))))))))))))))))))))))) : \
+ ((x) == 0x1ad ? "HWRM_FUNC_KEY_CTX_FREE": \
+ ((x) == 0x1ae ? "HWRM_FUNC_LAG_MODE_CFG": \
+ ((x) == 0x1af ? "HWRM_FUNC_LAG_MODE_QCFG": \
+ ((x) == 0x1b0 ? "HWRM_FUNC_LAG_CREATE": \
+ ((x) == 0x1b1 ? "HWRM_FUNC_LAG_UPDATE": \
+ ((x) == 0x1b2 ? "HWRM_FUNC_LAG_FREE": \
+ ((x) == 0x1b3 ? "HWRM_FUNC_LAG_QCFG": \
+ ((x) == 0x1c2 ? "HWRM_FUNC_TIMEDTX_PACING_RATE_ADD": \
+ ((x) == 0x1c3 ? "HWRM_FUNC_TIMEDTX_PACING_RATE_DELETE": \
+ ((x) == 0x1c4 ? "HWRM_FUNC_TIMEDTX_PACING_RATE_QUERY": \
+ "Unknown decode" ))))))))))))))))))))))))))))))))))))))) : \
(((x) < 0x280) ? \
((x) == 0x200 ? "HWRM_SELFTEST_QLIST": \
((x) == 0x201 ? "HWRM_SELFTEST_EXEC": \
@@ -586,9 +615,9 @@ typedef struct hwrm_short_input {
((x) == 0x20b ? "HWRM_MFG_FRU_EEPROM_READ": \
((x) == 0x20c ? "HWRM_MFG_SOC_IMAGE": \
((x) == 0x20d ? "HWRM_MFG_SOC_QSTATUS": \
- ((x) == 0x20e ? "HWRM_MFG_PARAM_SEEPROM_SYNC": \
- ((x) == 0x20f ? "HWRM_MFG_PARAM_SEEPROM_READ": \
- ((x) == 0x210 ? "HWRM_MFG_PARAM_SEEPROM_HEALTH": \
+ ((x) == 0x20e ? "HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE": \
+ ((x) == 0x20f ? "HWRM_MFG_PARAM_CRITICAL_DATA_READ": \
+ ((x) == 0x210 ? "HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH": \
((x) == 0x211 ? "HWRM_MFG_PRVSN_EXPORT_CSR": \
((x) == 0x212 ? "HWRM_MFG_PRVSN_IMPORT_CERT": \
((x) == 0x213 ? "HWRM_MFG_PRVSN_GET_STATE": \
@@ -597,12 +626,27 @@ typedef struct hwrm_short_input {
((x) == 0x216 ? "HWRM_MFG_SELFTEST_QLIST": \
((x) == 0x217 ? "HWRM_MFG_SELFTEST_EXEC": \
((x) == 0x218 ? "HWRM_STAT_GENERIC_QSTATS": \
- "Unknown decode" ))))))))))))))))))))))))) : \
+ ((x) == 0x219 ? "HWRM_MFG_PRVSN_EXPORT_CERT": \
+ ((x) == 0x21a ? "HWRM_STAT_DB_ERROR_QSTATS": \
+ ((x) == 0x230 ? "HWRM_PORT_POE_CFG": \
+ ((x) == 0x231 ? "HWRM_PORT_POE_QCFG": \
+ ((x) == 0x258 ? "HWRM_UDCC_QCAPS": \
+ ((x) == 0x259 ? "HWRM_UDCC_CFG": \
+ ((x) == 0x25a ? "HWRM_UDCC_QCFG": \
+ ((x) == 0x25b ? "HWRM_UDCC_SESSION_CFG": \
+ ((x) == 0x25c ? "HWRM_UDCC_SESSION_QCFG": \
+ ((x) == 0x25d ? "HWRM_UDCC_SESSION_QUERY": \
+ ((x) == 0x25e ? "HWRM_UDCC_COMP_CFG": \
+ ((x) == 0x25f ? "HWRM_UDCC_COMP_QCFG": \
+ ((x) == 0x260 ? "HWRM_UDCC_COMP_QUERY": \
+ ((x) == 0x261 ? "HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS": \
+ ((x) == 0x262 ? "HWRM_QUEUE_PFCWD_TIMEOUT_CFG": \
+ ((x) == 0x263 ? "HWRM_QUEUE_PFCWD_TIMEOUT_QCFG": \
+ "Unknown decode" ))))))))))))))))))))))))))))))))))))))))) : \
(((x) < 0x300) ? \
((x) == 0x2bc ? "HWRM_TF": \
((x) == 0x2bd ? "HWRM_TF_VERSION_GET": \
((x) == 0x2c6 ? "HWRM_TF_SESSION_OPEN": \
- ((x) == 0x2c7 ? "HWRM_TF_SESSION_ATTACH": \
((x) == 0x2c8 ? "HWRM_TF_SESSION_REGISTER": \
((x) == 0x2c9 ? "HWRM_TF_SESSION_UNREGISTER": \
((x) == 0x2ca ? "HWRM_TF_SESSION_CLOSE": \
@@ -617,14 +661,6 @@ typedef struct hwrm_short_input {
((x) == 0x2da ? "HWRM_TF_TBL_TYPE_GET": \
((x) == 0x2db ? "HWRM_TF_TBL_TYPE_SET": \
((x) == 0x2dc ? "HWRM_TF_TBL_TYPE_BULK_GET": \
- ((x) == 0x2e2 ? "HWRM_TF_CTXT_MEM_ALLOC": \
- ((x) == 0x2e3 ? "HWRM_TF_CTXT_MEM_FREE": \
- ((x) == 0x2e4 ? "HWRM_TF_CTXT_MEM_RGTR": \
- ((x) == 0x2e5 ? "HWRM_TF_CTXT_MEM_UNRGTR": \
- ((x) == 0x2e6 ? "HWRM_TF_EXT_EM_QCAPS": \
- ((x) == 0x2e7 ? "HWRM_TF_EXT_EM_OP": \
- ((x) == 0x2e8 ? "HWRM_TF_EXT_EM_CFG": \
- ((x) == 0x2e9 ? "HWRM_TF_EXT_EM_QCFG": \
((x) == 0x2ea ? "HWRM_TF_EM_INSERT": \
((x) == 0x2eb ? "HWRM_TF_EM_DELETE": \
((x) == 0x2ec ? "HWRM_TF_EM_HASH_INSERT": \
@@ -637,7 +673,13 @@ typedef struct hwrm_short_input {
((x) == 0x2fd ? "HWRM_TF_GLOBAL_CFG_GET": \
((x) == 0x2fe ? "HWRM_TF_IF_TBL_SET": \
((x) == 0x2ff ? "HWRM_TF_IF_TBL_GET": \
- "Unknown decode" )))))))))))))))))))))))))))))))))))))) : \
+ "Unknown decode" ))))))))))))))))))))))))))))) : \
+ (((x) < 0x380) ? \
+ ((x) == 0x300 ? "HWRM_TF_RESC_USAGE_SET": \
+ ((x) == 0x301 ? "HWRM_TF_RESC_USAGE_QUERY": \
+ ((x) == 0x302 ? "HWRM_TF_TBL_TYPE_ALLOC": \
+ ((x) == 0x303 ? "HWRM_TF_TBL_TYPE_FREE": \
+ "Unknown decode" )))) : \
(((x) < 0x400) ? \
((x) == 0x380 ? "HWRM_TFC_TBL_SCOPE_QCAPS": \
((x) == 0x381 ? "HWRM_TFC_TBL_SCOPE_ID_ALLOC": \
@@ -663,11 +705,17 @@ typedef struct hwrm_short_input {
((x) == 0x395 ? "HWRM_TFC_TCAM_ALLOC": \
((x) == 0x396 ? "HWRM_TFC_TCAM_ALLOC_SET": \
((x) == 0x397 ? "HWRM_TFC_TCAM_FREE": \
- "Unknown decode" )))))))))))))))))))))))) : \
+ ((x) == 0x398 ? "HWRM_TFC_IF_TBL_SET": \
+ ((x) == 0x399 ? "HWRM_TFC_IF_TBL_GET": \
+ ((x) == 0x39a ? "HWRM_TFC_TBL_SCOPE_CONFIG_GET": \
+ ((x) == 0x39b ? "HWRM_TFC_RESC_USAGE_QUERY": \
+ "Unknown decode" )))))))))))))))))))))))))))) : \
(((x) < 0x480) ? \
((x) == 0x400 ? "HWRM_SV": \
"Unknown decode" ) : \
(((x) < 0xff80) ? \
+ ((x) == 0xff0e ? "HWRM_DBG_SERDES_TEST": \
+ ((x) == 0xff0f ? "HWRM_DBG_LOG_BUFFER_FLUSH": \
((x) == 0xff10 ? "HWRM_DBG_READ_DIRECT": \
((x) == 0xff11 ? "HWRM_DBG_READ_INDIRECT": \
((x) == 0xff12 ? "HWRM_DBG_WRITE_DIRECT": \
@@ -696,8 +744,13 @@ typedef struct hwrm_short_input {
((x) == 0xff29 ? "HWRM_DBG_USEQ_RUN": \
((x) == 0xff2a ? "HWRM_DBG_USEQ_DELIVERY_REQ": \
((x) == 0xff2b ? "HWRM_DBG_USEQ_RESP_HDR": \
- "Unknown decode" )))))))))))))))))))))))))))) : \
- (((x) <= 0xffff) ? \
+ ((x) == 0xff2c ? "HWRM_DBG_COREDUMP_CAPTURE": \
+ ((x) == 0xff2d ? "HWRM_DBG_PTRACE": \
+ ((x) == 0xff2e ? "HWRM_DBG_SIM_CABLE_STATE": \
+ "Unknown decode" ))))))))))))))))))))))))))))))))) : \
+ (((x) <= UINT16_MAX) ? \
+ ((x) == 0xffea ? "HWRM_NVM_GET_VPD_FIELD_INFO": \
+ ((x) == 0xffeb ? "HWRM_NVM_SET_VPD_FIELD_INFO": \
((x) == 0xffec ? "HWRM_NVM_DEFRAG": \
((x) == 0xffed ? "HWRM_NVM_REQ_ARBITRATION": \
((x) == 0xffee ? "HWRM_NVM_FACTORY_DEFAULTS": \
@@ -718,8 +771,8 @@ typedef struct hwrm_short_input {
((x) == 0xfffd ? "HWRM_NVM_READ": \
((x) == 0xfffe ? "HWRM_NVM_WRITE": \
((x) == 0xffff ? "HWRM_NVM_RAW_WRITE_BLK": \
- "Unknown decode" )))))))))))))))))))) : \
- "Unknown decode" ))))))))))
+ "Unknown decode" )))))))))))))))))))))) : \
+ "Unknown decode" )))))))))))
/*
@@ -800,7 +853,7 @@ typedef struct cmd_nums {
#define HWRM_FUNC_VLAN_QCFG UINT32_C(0x34)
#define HWRM_QUEUE_PFCENABLE_QCFG UINT32_C(0x35)
#define HWRM_QUEUE_PFCENABLE_CFG UINT32_C(0x36)
- #define HWRM_QUEUE_PRI2COS_QCFG UINT32_C(0x37)
+ #define HWRM_QUEUE_PRI2COS_QCFG UINT32_C(0x37)
#define HWRM_QUEUE_PRI2COS_CFG UINT32_C(0x38)
#define HWRM_QUEUE_COS2BW_QCFG UINT32_C(0x39)
#define HWRM_QUEUE_COS2BW_CFG UINT32_C(0x3a)
@@ -848,6 +901,14 @@ typedef struct cmd_nums {
#define HWRM_QUEUE_VLANPRI2PRI_CFG UINT32_C(0x85)
#define HWRM_QUEUE_GLOBAL_CFG UINT32_C(0x86)
#define HWRM_QUEUE_GLOBAL_QCFG UINT32_C(0x87)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG UINT32_C(0x88)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG UINT32_C(0x89)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG UINT32_C(0x8a)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG UINT32_C(0x8b)
+ #define HWRM_QUEUE_QCAPS UINT32_C(0x8c)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_QCFG UINT32_C(0x8d)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG UINT32_C(0x8e)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_QCFG UINT32_C(0x8f)
#define HWRM_CFA_L2_FILTER_ALLOC UINT32_C(0x90)
#define HWRM_CFA_L2_FILTER_FREE UINT32_C(0x91)
#define HWRM_CFA_L2_FILTER_CFG UINT32_C(0x92)
@@ -871,6 +932,7 @@ typedef struct cmd_nums {
#define HWRM_TUNNEL_DST_PORT_QUERY UINT32_C(0xa0)
#define HWRM_TUNNEL_DST_PORT_ALLOC UINT32_C(0xa1)
#define HWRM_TUNNEL_DST_PORT_FREE UINT32_C(0xa2)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG UINT32_C(0xa3)
#define HWRM_STAT_CTX_ENG_QUERY UINT32_C(0xaf)
#define HWRM_STAT_CTX_ALLOC UINT32_C(0xb0)
#define HWRM_STAT_CTX_FREE UINT32_C(0xb1)
@@ -926,6 +988,8 @@ typedef struct cmd_nums {
#define HWRM_PORT_EP_TX_CFG UINT32_C(0xdb)
#define HWRM_PORT_CFG UINT32_C(0xdc)
#define HWRM_PORT_QCFG UINT32_C(0xdd)
+ /* Queries MAC capabilities for the specified port */
+ #define HWRM_PORT_MAC_QCAPS UINT32_C(0xdf)
#define HWRM_TEMP_MONITOR_QUERY UINT32_C(0xe0)
#define HWRM_REG_POWER_QUERY UINT32_C(0xe1)
#define HWRM_CORE_FREQUENCY_QUERY UINT32_C(0xe2)
@@ -1029,7 +1093,12 @@ typedef struct cmd_nums {
#define HWRM_CFA_TLS_FILTER_ALLOC UINT32_C(0x128)
/* Experimental */
#define HWRM_CFA_TLS_FILTER_FREE UINT32_C(0x129)
- /* Engine CKV - Get the current allocation status of keys provisioned in the key vault. */
+ /* Release an AFM function for TF control */
+ #define HWRM_CFA_RELEASE_AFM_FUNC UINT32_C(0x12a)
+ /*
+ * Engine CKV - Get the current allocation status of keys provisioned in
+ * the key vault.
+ */
#define HWRM_ENGINE_CKV_STATUS UINT32_C(0x12e)
/* Engine CKV - Add a new CKEK used to encrypt keys. */
#define HWRM_ENGINE_CKV_CKEK_ADD UINT32_C(0x12f)
@@ -1089,7 +1158,10 @@ typedef struct cmd_nums {
#define HWRM_ENGINE_STATS_CLEAR UINT32_C(0x156)
/* Engine - Query the statistics accumulator for an Engine. */
#define HWRM_ENGINE_STATS_QUERY UINT32_C(0x157)
- /* Engine - Query statistics counters for continuous errors from all CDDIP Engines. */
+ /*
+ * Engine - Query statistics counters for continuous errors from all CDDIP
+ * Engines.
+ */
#define HWRM_ENGINE_STATS_QUERY_CONTINUOUS_ERROR UINT32_C(0x158)
/* Engine - Allocate an Engine RQ. */
#define HWRM_ENGINE_RQ_ALLOC UINT32_C(0x15e)
@@ -1171,13 +1243,39 @@ typedef struct cmd_nums {
#define HWRM_FUNC_SYNCE_CFG UINT32_C(0x1ab)
/* Queries SyncE configurations. */
#define HWRM_FUNC_SYNCE_QCFG UINT32_C(0x1ac)
+ /* The command is used to deallocate KTLS or QUIC key contexts. */
+ #define HWRM_FUNC_KEY_CTX_FREE UINT32_C(0x1ad)
+ /* The command is used to configure link aggr group mode. */
+ #define HWRM_FUNC_LAG_MODE_CFG UINT32_C(0x1ae)
+ /* The command is used to query link aggr group mode. */
+ #define HWRM_FUNC_LAG_MODE_QCFG UINT32_C(0x1af)
+ /* The command is used to create a link aggr group. */
+ #define HWRM_FUNC_LAG_CREATE UINT32_C(0x1b0)
+ /* The command is used to update a link aggr group. */
+ #define HWRM_FUNC_LAG_UPDATE UINT32_C(0x1b1)
+ /* The command is used to free a link aggr group. */
+ #define HWRM_FUNC_LAG_FREE UINT32_C(0x1b2)
+ /* The command is used to query a link aggr group. */
+ #define HWRM_FUNC_LAG_QCFG UINT32_C(0x1b3)
+ /* This command is use to add TimeTX packet pacing rate. */
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_ADD UINT32_C(0x1c2)
+ /*
+ * This command is use to delete TimeTX packet pacing rate
+ * from the rate table.
+ */
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_DELETE UINT32_C(0x1c3)
+ /*
+ * This command is used to retrieve all the TimeTX pacing rates
+ * from the rate table that have been added for the function.
+ */
+ #define HWRM_FUNC_TIMEDTX_PACING_RATE_QUERY UINT32_C(0x1c4)
/* Experimental */
#define HWRM_SELFTEST_QLIST UINT32_C(0x200)
/* Experimental */
#define HWRM_SELFTEST_EXEC UINT32_C(0x201)
/* Experimental */
#define HWRM_SELFTEST_IRQ UINT32_C(0x202)
- /* Experimental */
+ /* Experimental (deprecated) */
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA UINT32_C(0x203)
/* Experimental */
#define HWRM_PCIE_QSTATS UINT32_C(0x204)
@@ -1202,12 +1300,12 @@ typedef struct cmd_nums {
#define HWRM_MFG_SOC_IMAGE UINT32_C(0x20c)
/* Retrieves the SoC status and image provisioning information */
#define HWRM_MFG_SOC_QSTATUS UINT32_C(0x20d)
- /* Tells the fw to program the seeprom memory */
- #define HWRM_MFG_PARAM_SEEPROM_SYNC UINT32_C(0x20e)
- /* Tells the fw to read the seeprom memory */
- #define HWRM_MFG_PARAM_SEEPROM_READ UINT32_C(0x20f)
- /* Tells the fw to get the health of seeprom data */
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH UINT32_C(0x210)
+ /* Tells the fw to finalize the critical data (store and lock it) */
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE UINT32_C(0x20e)
+ /* Tells the fw to read the critical data */
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_READ UINT32_C(0x20f)
+ /* Tells the fw to get the health of critical data */
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH UINT32_C(0x210)
/*
* The command is used for certificate provisioning to export a
* Certificate Signing Request (CSR) from the device.
@@ -1242,6 +1340,59 @@ typedef struct cmd_nums {
#define HWRM_MFG_SELFTEST_EXEC UINT32_C(0x217)
/* Queries the generic stats */
#define HWRM_STAT_GENERIC_QSTATS UINT32_C(0x218)
+ /*
+ * The command is used for certificate provisioning to export a
+ * certificate chain from the device.
+ */
+ #define HWRM_MFG_PRVSN_EXPORT_CERT UINT32_C(0x219)
+ /* Query the statistics for doorbell drops due to various error conditions. */
+ #define HWRM_STAT_DB_ERROR_QSTATS UINT32_C(0x21a)
+ /*
+ * The command is used to enable/disable the power on ethernet for
+ * a particular I/O expander port.
+ */
+ #define HWRM_PORT_POE_CFG UINT32_C(0x230)
+ /*
+ * The command is used to query whether the power on ethernet
+ * is enabled/disabled for a particular I/O expander port.
+ */
+ #define HWRM_PORT_POE_QCFG UINT32_C(0x231)
+ /*
+ * This command returns the capabilities related to User Defined
+ * Congestion Control on a function.
+ */
+ #define HWRM_UDCC_QCAPS UINT32_C(0x258)
+ /* This command configures User Defined Congestion Control on a function. */
+ #define HWRM_UDCC_CFG UINT32_C(0x259)
+ /*
+ * This command queries the configuration of User Defined Congestion
+ * Control on a function.
+ */
+ #define HWRM_UDCC_QCFG UINT32_C(0x25a)
+ /* This command configures an existing UDCC session. */
+ #define HWRM_UDCC_SESSION_CFG UINT32_C(0x25b)
+ /* This command queries the configuration of a UDCC session. */
+ #define HWRM_UDCC_SESSION_QCFG UINT32_C(0x25c)
+ /* This command queries the UDCC session. */
+ #define HWRM_UDCC_SESSION_QUERY UINT32_C(0x25d)
+ /* This command configures the computation unit. */
+ #define HWRM_UDCC_COMP_CFG UINT32_C(0x25e)
+ /* This command queries the configuration of the computation unit. */
+ #define HWRM_UDCC_COMP_QCFG UINT32_C(0x25f)
+ /* This command queries the status and statistics of the computation unit. */
+ #define HWRM_UDCC_COMP_QUERY UINT32_C(0x260)
+ /*
+ * This command is used to query the pfc watchdog max configurable
+ * timeout value.
+ */
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS UINT32_C(0x261)
+ /* This command is used to set the PFC watchdog timeout value. */
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG UINT32_C(0x262)
+ /*
+ * This command is used to query the current configured pfc watchdog
+ * timeout value.
+ */
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG UINT32_C(0x263)
/* Experimental */
#define HWRM_TF UINT32_C(0x2bc)
/* Experimental */
@@ -1249,8 +1400,6 @@ typedef struct cmd_nums {
/* Experimental */
#define HWRM_TF_SESSION_OPEN UINT32_C(0x2c6)
/* Experimental */
- #define HWRM_TF_SESSION_ATTACH UINT32_C(0x2c7)
- /* Experimental */
#define HWRM_TF_SESSION_REGISTER UINT32_C(0x2c8)
/* Experimental */
#define HWRM_TF_SESSION_UNREGISTER UINT32_C(0x2c9)
@@ -1279,22 +1428,6 @@ typedef struct cmd_nums {
/* Experimental */
#define HWRM_TF_TBL_TYPE_BULK_GET UINT32_C(0x2dc)
/* Experimental */
- #define HWRM_TF_CTXT_MEM_ALLOC UINT32_C(0x2e2)
- /* Experimental */
- #define HWRM_TF_CTXT_MEM_FREE UINT32_C(0x2e3)
- /* Experimental */
- #define HWRM_TF_CTXT_MEM_RGTR UINT32_C(0x2e4)
- /* Experimental */
- #define HWRM_TF_CTXT_MEM_UNRGTR UINT32_C(0x2e5)
- /* Experimental */
- #define HWRM_TF_EXT_EM_QCAPS UINT32_C(0x2e6)
- /* Experimental */
- #define HWRM_TF_EXT_EM_OP UINT32_C(0x2e7)
- /* Experimental */
- #define HWRM_TF_EXT_EM_CFG UINT32_C(0x2e8)
- /* Experimental */
- #define HWRM_TF_EXT_EM_QCFG UINT32_C(0x2e9)
- /* Experimental */
#define HWRM_TF_EM_INSERT UINT32_C(0x2ea)
/* Experimental */
#define HWRM_TF_EM_DELETE UINT32_C(0x2eb)
@@ -1318,6 +1451,14 @@ typedef struct cmd_nums {
#define HWRM_TF_IF_TBL_SET UINT32_C(0x2fe)
/* Experimental */
#define HWRM_TF_IF_TBL_GET UINT32_C(0x2ff)
+ /* Experimental */
+ #define HWRM_TF_RESC_USAGE_SET UINT32_C(0x300)
+ /* Experimental */
+ #define HWRM_TF_RESC_USAGE_QUERY UINT32_C(0x301)
+ /* Truflow command to allocate a table */
+ #define HWRM_TF_TBL_TYPE_ALLOC UINT32_C(0x302)
+ /* Truflow command to free a table */
+ #define HWRM_TF_TBL_TYPE_FREE UINT32_C(0x303)
/* TruFlow command to check firmware table scope capabilities. */
#define HWRM_TFC_TBL_SCOPE_QCAPS UINT32_C(0x380)
/* TruFlow command to allocate a table scope ID and create the pools. */
@@ -1330,9 +1471,9 @@ typedef struct cmd_nums {
#define HWRM_TFC_TBL_SCOPE_FID_ADD UINT32_C(0x384)
/* TruFlow command to remove a FID from a table scope. */
#define HWRM_TFC_TBL_SCOPE_FID_REM UINT32_C(0x385)
- /* TruFlow command to allocate a table scope pool. */
+ /* DEPRECATED */
#define HWRM_TFC_TBL_SCOPE_POOL_ALLOC UINT32_C(0x386)
- /* TruFlow command to free a table scope pool. */
+ /* DEPRECATED */
#define HWRM_TFC_TBL_SCOPE_POOL_FREE UINT32_C(0x387)
/* Experimental */
#define HWRM_TFC_SESSION_ID_ALLOC UINT32_C(0x388)
@@ -1366,8 +1507,20 @@ typedef struct cmd_nums {
#define HWRM_TFC_TCAM_ALLOC_SET UINT32_C(0x396)
/* TruFlow command to free a TCAM entry. */
#define HWRM_TFC_TCAM_FREE UINT32_C(0x397)
+ /* Truflow command to set an interface table entry */
+ #define HWRM_TFC_IF_TBL_SET UINT32_C(0x398)
+ /* Truflow command to get an interface table entry */
+ #define HWRM_TFC_IF_TBL_GET UINT32_C(0x399)
+ /* TruFlow command to get configured info about a table scope. */
+ #define HWRM_TFC_TBL_SCOPE_CONFIG_GET UINT32_C(0x39a)
+ /* TruFlow command to query the resource usage state. */
+ #define HWRM_TFC_RESC_USAGE_QUERY UINT32_C(0x39b)
/* Experimental */
#define HWRM_SV UINT32_C(0x400)
+ /* Run a PCIe or Ethernet serdes test and retrieve test data. */
+ #define HWRM_DBG_SERDES_TEST UINT32_C(0xff0e)
+ /* Flush any trace buffer data that has not been sent to the host. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH UINT32_C(0xff0f)
/* Experimental */
#define HWRM_DBG_READ_DIRECT UINT32_C(0xff10)
/* Experimental */
@@ -1423,6 +1576,20 @@ typedef struct cmd_nums {
#define HWRM_DBG_USEQ_DELIVERY_REQ UINT32_C(0xff2a)
/* Experimental */
#define HWRM_DBG_USEQ_RESP_HDR UINT32_C(0xff2b)
+ /*
+ * This command is used to request the firmware to store a coredump
+ * into Host memory previously specified with the
+ * HWRM_DBG_CRASHDUMP_MEDIUM_CFG API
+ */
+ #define HWRM_DBG_COREDUMP_CAPTURE UINT32_C(0xff2c)
+ #define HWRM_DBG_PTRACE UINT32_C(0xff2d)
+ /*
+ * This command is used to request the firmware to simulate cable insert
+ * or removal.
+ */
+ #define HWRM_DBG_SIM_CABLE_STATE UINT32_C(0xff2e)
+ #define HWRM_NVM_GET_VPD_FIELD_INFO UINT32_C(0xffea)
+ #define HWRM_NVM_SET_VPD_FIELD_INFO UINT32_C(0xffeb)
#define HWRM_NVM_DEFRAG UINT32_C(0xffec)
#define HWRM_NVM_REQ_ARBITRATION UINT32_C(0xffed)
/* Experimental */
@@ -1518,14 +1685,14 @@ typedef struct ret_codes {
#define HWRM_ERR_CODE_NO_FLOW_COUNTER_DURING_ALLOC UINT32_C(0xc)
/*
* This error code is only reported by firmware when the registered
- * driver instances requested to offloaded a flow but was unable to because
- * the requested key's hash collides with the installed keys.
+ * driver instances requested to offloaded a flow but was unable to
+ * because the requested key's hash collides with the installed keys.
*/
#define HWRM_ERR_CODE_KEY_HASH_COLLISION UINT32_C(0xd)
/*
* This error code is only reported by firmware when the registered
- * driver instances requested to offloaded a flow but was unable to because
- * the same key has already been installed.
+ * driver instances requested to offloaded a flow but was unable to
+ * because the same key has already been installed.
*/
#define HWRM_ERR_CODE_KEY_ALREADY_EXISTS UINT32_C(0xe)
/*
@@ -1534,8 +1701,8 @@ typedef struct ret_codes {
*/
#define HWRM_ERR_CODE_HWRM_ERROR UINT32_C(0xf)
/*
- * Firmware is unable to service the request at the present time. Caller
- * may try again later.
+ * Firmware is unable to service the request at the present time.
+ * Caller may try again later.
*/
#define HWRM_ERR_CODE_BUSY UINT32_C(0x10)
/*
@@ -1551,6 +1718,11 @@ typedef struct ret_codes {
*/
#define HWRM_ERR_CODE_PF_UNAVAILABLE UINT32_C(0x12)
/*
+ * This error code is reported by Firmware when the specific entity
+ * requested by the host is not present or does not exist.
+ */
+ #define HWRM_ERR_CODE_ENTITY_NOT_PRESENT UINT32_C(0x13)
+ /*
* This value indicates that the HWRM response is in TLV format and
* should be interpreted as one or more TLVs starting with the
* hwrm_resp_hdr TLV. This value is not an indication of any error
@@ -1587,11 +1759,12 @@ typedef struct ret_codes {
((x) == 0x10 ? "BUSY": \
((x) == 0x11 ? "RESOURCE_LOCKED": \
((x) == 0x12 ? "PF_UNAVAILABLE": \
- "Unknown decode" ))))))))))))))))))) : \
+ ((x) == 0x13 ? "ENTITY_NOT_PRESENT": \
+ "Unknown decode" )))))))))))))))))))) : \
(((x) < 0x8080) ? \
((x) == 0x8000 ? "TLV_ENCAPSULATED_RESPONSE": \
"Unknown decode" ) : \
- (((x) <= 0xffff) ? \
+ (((x) <= UINT16_MAX) ? \
((x) == 0xfffe ? "UNKNOWN_ERR": \
((x) == 0xffff ? "CMD_NOT_SUPPORTED": \
"Unknown decode" )) : \
@@ -1614,7 +1787,7 @@ typedef struct hwrm_err_output {
/* This field provides original sequence number of the command. */
uint16_t seq_id;
/*
- * This field is the length of the response in bytes. The
+ * This field is the length of the response in bytes. The
* last byte of the response is a valid flag that will read
* as '1' when the command has been completely written to
* memory.
@@ -1631,9 +1804,9 @@ typedef struct hwrm_err_output {
uint8_t cmd_err;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -1644,7 +1817,12 @@ typedef struct hwrm_err_output {
* applicable (All F's). Need to cast it the size of the field if needed.
*/
#define HWRM_NA_SIGNATURE ((uint32_t)(-1))
-/* hwrm_func_buf_rgtr */
+/*
+ * This is reflecting the size of the PF mailbox and not the maximum
+ * command size for any of the HWRM command structures. To determine
+ * the maximum size of an HWRM command supported by the firmware, see
+ * the max_ext_req_len field in the response of the HWRM_VER_GET command.
+ */
#define HWRM_MAX_REQ_LEN 128
/* hwrm_cfa_flow_info */
#define HWRM_MAX_RESP_LEN 704
@@ -1668,10 +1846,10 @@ typedef struct hwrm_err_output {
#define HWRM_TARGET_ID_TOOLS 0xFFFD
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
-#define HWRM_VERSION_UPDATE 2
+#define HWRM_VERSION_UPDATE 3
/* non-zero means beta version */
-#define HWRM_VERSION_RSVD 136
-#define HWRM_VERSION_STR "1.10.2.136"
+#define HWRM_VERSION_RSVD 61
+#define HWRM_VERSION_STR "1.10.3.61"
/****************
* hwrm_ver_get *
@@ -1887,47 +2065,52 @@ typedef struct hwrm_ver_get_output {
/*
* If set to 1, then the KONG host mailbox channel is supported.
* If set to 0, then the KONG host mailbox channel is not supported.
- * By default, this flag should be 0 for older version of core firmware.
+ * By default, this flag should be 0 for older version of core
+ * firmware.
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED UINT32_C(0x10)
/*
- * If set to 1, then the 64bit flow handle is supported in addition to the
- * legacy 16bit flow handle. If set to 0, then the 64bit flow handle is not
- * supported. By default, this flag should be 0 for older version of core firmware.
+ * If set to 1, then the 64bit flow handle is supported in addition
+ * to the legacy 16bit flow handle. If set to 0, then the 64bit flow
+ * handle is not supported. By default, this flag should be 0 for
+ * older version of core firmware.
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED UINT32_C(0x20)
/*
- * If set to 1, then filter type can be provided in filter_alloc or filter_cfg
- * filter types like L2 for l2 traffic and ROCE for roce & l2 traffic.
- * If set to 0, then filter types not supported.
- * By default, this flag should be 0 for older version of core firmware.
+ * If set to 1, then filter type can be provided in filter_alloc or
+ * filter_cfg filter types like L2 for l2 traffic and ROCE for roce &
+ * l2 traffic. If set to 0, then filter types not supported. By
+ * default, this flag should be 0 for older version of core firmware.
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED UINT32_C(0x40)
/*
- * If set to 1, firmware is capable to support virtio vSwitch offload model.
- * If set to 0, firmware can't supported virtio vSwitch offload model.
- * By default, this flag should be 0 for older version of core firmware.
+ * If set to 1, firmware is capable to support virtio vSwitch offload
+ * model. If set to 0, firmware can't supported virtio vSwitch
+ * offload model.
+ * By default, this flag should be 0 for older version of core
+ * firmware.
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED UINT32_C(0x80)
/*
* If set to 1, firmware is capable to support trusted VF.
* If set to 0, firmware is not capable to support trusted VF.
- * By default, this flag should be 0 for older version of core firmware.
+ * By default, this flag should be 0 for older version of core
+ * firmware.
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED UINT32_C(0x100)
/*
* If set to 1, firmware is capable to support flow aging.
* If set to 0, firmware is not capable to support flow aging.
- * By default, this flag should be 0 for older version of core firmware.
- * (deprecated)
+ * By default, this flag should be 0 for older version of core
+ * firmware. (deprecated)
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED UINT32_C(0x200)
/*
- * If set to 1, firmware is capable to support advanced flow counters like,
- * Meter drop counters and EEM counters.
- * If set to 0, firmware is not capable to support advanced flow counters.
- * By default, this flag should be 0 for older version of core firmware.
- * (deprecated)
+ * If set to 1, firmware is capable to support advanced flow counters
+ * like, Meter drop counters and EEM counters.
+ * If set to 0, firmware is not capable to support advanced flow
+ * counters. By default, this flag should be 0 for older version of
+ * core firmware. (deprecated)
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED UINT32_C(0x400)
/*
@@ -1935,28 +2118,30 @@ typedef struct hwrm_ver_get_output {
* Extended Exact Match(EEM) feature.
* If set to 0, firmware is not capable to support the use of the
* CFA EEM feature.
- * By default, this flag should be 0 for older version of core firmware.
- * (deprecated)
+ * By default, this flag should be 0 for older version of core
+ * firmware. (deprecated)
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_EEM_SUPPORTED UINT32_C(0x800)
/*
- * If set to 1, the firmware is able to support advance CFA flow management
- * features reported in the HWRM_CFA_FLOW_MGNT_QCAPS.
- * If set to 0, then the firmware doesn’t support the advance CFA flow management
- * features.
- * By default, this flag should be 0 for older version of core firmware.
+ * If set to 1, the firmware is able to support advance CFA flow
+ * management features reported in the HWRM_CFA_FLOW_MGNT_QCAPS.
+ * If set to 0, then the firmware doesn't support the advance CFA
+ * flow management features.
+ * By default, this flag should be 0 for older version of core
+ * firmware.
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED UINT32_C(0x1000)
/*
* Deprecated and replaced with cfa_truflow_supported.
* If set to 1, the firmware is able to support TFLIB features.
- * If set to 0, then the firmware doesn’t support TFLIB features.
- * By default, this flag should be 0 for older version of core firmware.
+ * If set to 0, then the firmware doesn't support TFLIB features.
+ * By default, this flag should be 0 for older version of core
+ * firmware.
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED UINT32_C(0x2000)
/*
* If set to 1, the firmware is able to support TruFlow features.
- * If set to 0, then the firmware doesn’t support TruFlow features.
+ * If set to 0, then the firmware doesn't support TruFlow features.
* By default, this flag should be 0 for older version of
* core firmware.
*/
@@ -1967,6 +2152,13 @@ typedef struct hwrm_ver_get_output {
*/
#define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE UINT32_C(0x8000)
/*
+ * If set to 1, then firmware is able to support the secure solution
+ * feature.
+ * If set to 0, then firmware does not support the secure solution
+ * feature.
+ */
+ #define HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SECURE_SOC_CAPABLE UINT32_C(0x10000)
+ /*
* This field represents the major version of RoCE firmware.
* A change in major version represents a major release.
*/
@@ -2018,7 +2210,10 @@ typedef struct hwrm_ver_get_output {
uint8_t chip_metal;
/* This field returns the bond id of the chip. */
uint8_t chip_bond_id;
- /* This value indicates the type of platform used for chip implementation. */
+ /*
+ * This value indicates the type of platform used for chip
+ * implementation.
+ */
uint8_t chip_platform_type;
/* ASIC */
#define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC UINT32_C(0x0)
@@ -2071,8 +2266,8 @@ typedef struct hwrm_ver_get_output {
* host drivers that it has not completed resource initialization
* required for data path operations. Host drivers should not send
* any HWRM command that requires data path resources. Firmware will
- * fail those commands with HWRM_ERR_CODE_BUSY. Host drivers can retry
- * those commands once both the flags are cleared.
+ * fail those commands with HWRM_ERR_CODE_BUSY. Host drivers can
+ * retry those commands once both the flags are cleared.
* If this flag and dev_not_rdy flag are set to 0, device is ready
* to accept all HWRM commands.
*/
@@ -2232,9 +2427,9 @@ typedef struct hwrm_ver_get_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -2801,11 +2996,11 @@ typedef struct crypto_presync_bd_cmd {
* Typically, presync BDs are used for packet retransmissions. Source
* port sends all the packets in order over the network to destination
* port and packets get dropped in the network. The destination port
- * will request retranmission of dropped packets and source port driver
- * will send presync BD to setup the transmitter appropriately. It will
- * provide the start and end TCP sequence number of the data to be
- * transmitted. HW keeps two sets of context variable, one for in order
- * traffic and one for retransmission traffic. HW is designed to
+ * will request retransmission of dropped packets and source port
+ * driver will send presync BD to setup the transmitter appropriately.
+ * It will provide the start and end TCP sequence number of the data to
+ * be transmitted. HW keeps two sets of context variable, one for in
+ * order traffic and one for retransmission traffic. HW is designed to
* transmit everything posted in the presync BD and return to in order
* mode after that. No inorder context variables are updated in the
* process. There is a special case where packets can be dropped
@@ -2955,22 +3150,22 @@ typedef struct ce_bds_quic_add_data_msg {
* exchanged as part of sessions setup between the two end
* points for QUIC operations.
*/
- uint64_t quic_iv_lo;
+ uint8_t quic_iv_lo[8];
/*
* Most-significant 32 bits (of 96) of additional IV that is
* exchanged as part of sessions setup between the two end
* points for QUIC operations.
*/
- uint32_t quic_iv_hi;
+ uint8_t quic_iv_hi[4];
uint32_t unused_1;
/*
* Key used for encrypting or decrypting records. The Key is exchanged
* as part of sessions setup between the two end points through this
* mid-path BD.
*/
- uint32_t session_key[8];
+ uint8_t session_key[32];
/* Header protection key. */
- uint32_t hp_key[8];
+ uint8_t hp_key[32];
/* Packet number associated with the QUIC connection. */
uint64_t pkt_number;
} ce_bds_quic_add_data_msg_t, *pce_bds_quic_add_data_msg_t;
@@ -3359,7 +3554,7 @@ typedef struct tx_bd_long_hi {
* 0xffff.
*
* If set to one when LSO is '1', then the IPID will be treated
- * as a 15b number and will be wrapped if it exceeds a value 0f
+ * as a 15b number and will be wrapped if it exceeds a value of
* 0x7fff.
*/
#define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40)
@@ -3418,7 +3613,7 @@ typedef struct tx_bd_long_hi {
* will be the following behavior for all cases independent of
* settings of inner LSO and checksum offload BD flags.
* If outer UDP checksum is 0, then do not update it.
- * If outer UDP checksum is non zero, then the hardware should
+ * If outer UDP checksum is non zero, then the hardware should
* compute and update it.
*/
#define TX_BD_LONG_LFLAGS_OT_IP_CHKSUM UINT32_C(0x2000)
@@ -3554,7 +3749,7 @@ typedef struct tx_bd_long_hi {
* - Wh+/SR - this option is not supported.
* - Thor - cfa_meta[15:0] is used for metadata output if en_bd_meta
* is set in the Lookup Table.
- * - SR2 - {4’d0, cfa_meta[27:0]} is used for metadata output if
+ * - SR2 - {4'd0, cfa_meta[27:0]} is used for metadata output if
* en_bd_meta is set in the Lookup Table.
*/
#define TX_BD_LONG_CFA_META_KEY_METADATA_TRANSFER (UINT32_C(0x2) << 28)
@@ -3859,7 +4054,7 @@ typedef struct tx_bd_long_inline {
* - Wh+/SR - this option is not supported.
* - Thor - cfa_meta[15:0] is used for metadata output if en_bd_meta
* is set in the Lookup Table.
- * - SR2 - {4’d0, cfa_meta[27:0]} is used for metadata output if
+ * - SR2 - {4'd0, cfa_meta[27:0]} is used for metadata output if
* en_bd_meta is set in the Lookup Table.
*/
#define TX_BD_LONG_INLINE_CFA_META_KEY_METADATA_TRANSFER (UINT32_C(0x2) << 28)
@@ -3987,6 +4182,95 @@ typedef struct tx_bd_presync_cmd {
uint32_t unused_1;
} tx_bd_presync_cmd_t, *ptx_bd_presync_cmd_t;
+/*
+ * This structure is used to send additional information for transmitting
+ * packets using timed transmit scheduling. It must only to be applied as
+ * the second BD of a BD chain that represents a packet. Any subsequent
+ * BDs will follow the timed transmit BD.
+ */
+/* tx_bd_timedtx (size:128b/16B) */
+
+typedef struct tx_bd_timedtx {
+ uint16_t flags_type;
+ /* This value identifies the type of buffer descriptor. */
+ #define TX_BD_TIMEDTX_TYPE_MASK UINT32_C(0x3f)
+ #define TX_BD_TIMEDTX_TYPE_SFT 0
+ /*
+ * Indicates a timed transmit BD. This is a 16b BD that is inserted
+ * into a packet BD chain immediately after the first BD. It is used
+ * to control the flow in a timed transmit operation.
+ */
+ #define TX_BD_TIMEDTX_TYPE_TX_BD_TIMEDTX UINT32_C(0xa)
+ #define TX_BD_TIMEDTX_TYPE_LAST TX_BD_TIMEDTX_TYPE_TX_BD_TIMEDTX
+ /* Unless otherwise stated, sub-fields of this field are always valid. */
+ #define TX_BD_TIMEDTX_FLAGS_MASK UINT32_C(0xffc0)
+ #define TX_BD_TIMEDTX_FLAGS_SFT 6
+ /*
+ * This value identifies the kind of buffer timed transmit mode that
+ * is to be enabled for the packet.
+ */
+ #define TX_BD_TIMEDTX_FLAGS_KIND_MASK UINT32_C(0x1c0)
+ #define TX_BD_TIMEDTX_FLAGS_KIND_SFT 6
+ /*
+ * This timed transmit mode indicates that the packet will be
+ * scheduled and send immediately (or as soon as possible), once
+ * it is scheduled in the transmitter.
+ * Note: This mode is similar to regular (non-timed transmit)
+ * operation. Its main purpose is to cancel pace mode timed
+ * transmit.
+ */
+ #define TX_BD_TIMEDTX_FLAGS_KIND_ASAP (UINT32_C(0x0) << 6)
+ /*
+ * This timed transmit mode is used to schedule transmission of
+ * the packet no earlier than the time given in the tx_time
+ * field of the BD.
+ * Note: In case subsequent packets don't include a timed transmit
+ * BD, they will be scheduled subsequently for transmission
+ * without any timed transmit constraint.
+ */
+ #define TX_BD_TIMEDTX_FLAGS_KIND_SO_TXTIME (UINT32_C(0x1) << 6)
+ /*
+ * This timed transmit mode is used to enable rate control for the
+ * flow (QP) at a rate as defined by the rate field of this BD.
+ * Note: In case subsequent, adjacent packets on the same flow
+ * don't include a timed transmit BD, they will continue to be
+ * paced by the transmitter at the same rate as given in this BD.
+ */
+ #define TX_BD_TIMEDTX_FLAGS_KIND_PACE (UINT32_C(0x2) << 6)
+ #define TX_BD_TIMEDTX_FLAGS_KIND_LAST TX_BD_TIMEDTX_FLAGS_KIND_PACE
+ /*
+ * This field exists in all Tx BDs. It doesn't apply to this particular
+ * BD type since the BD never represents an SGL or inline data; i.e. it
+ * is only a command. This field must be zero.
+ */
+ /*
+ * Note that if this field is not zero, a fatal length error will be
+ * generated as it will be included in the aggregate of SGE lengths for
+ * the packet.
+ */
+ uint16_t len;
+ /*
+ * This field represents the rate of the flow (QP) in terms of KB/s.
+ * This applies to pace mode timed transmit.
+ */
+ uint32_t rate;
+ /*
+ * Applying this rate to a QP will result in this and all subsequent
+ * packets of the flow being paced at the given rate, until such time
+ * that the timed transmit mode is either changed or the rate is
+ * updated in a future packet on the flow.
+ * This field is applicable only if flags.kind is pace.
+ */
+ #define TX_BD_TIMEDTX_RATE_VAL_MASK UINT32_C(0x1ffffff)
+ #define TX_BD_TIMEDTX_RATE_VAL_SFT 0
+ /*
+ * This field represents the nano-second time to transmit the
+ * corresponding packet using SO_TXTIME mode of timed transmit.
+ * This field is applicable only if flags.kind is so_txtime.
+ */
+ uint64_t tx_time;
+} tx_bd_timedtx_t, *ptx_bd_timedtx_t;
+
/* rx_prod_pkt_bd (size:128b/16B) */
typedef struct rx_prod_pkt_bd {
@@ -6349,7 +6633,19 @@ typedef struct rx_pkt_v3_cmpl {
* is not applicable.
*/
#define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_11 (UINT32_C(0xb) << 7)
- #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_LAST RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_11
+ /* The RSS hash was computed over tunnel context and tunnel ID field. */
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_12 (UINT32_C(0xc) << 7)
+ /*
+ * The RSS hash was computed over tunnel source IP address, tunnel
+ * destination IP address, and tunnel ID field.
+ */
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_13 (UINT32_C(0xd) << 7)
+ /*
+ * The RSS hash was computed over tunnel source IP address, tunnel
+ * destination IP address, tunnel context, and tunnel ID field.
+ */
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_14 (UINT32_C(0xe) << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_LAST RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_14
uint16_t metadata1_payload_offset;
/*
* If truncation placement is not used, this value indicates the offset
@@ -6619,16 +6915,12 @@ typedef struct rx_pkt_v3_cmpl_hi {
*/
#define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (UINT32_C(0x5) << 9)
/*
- * Indicates that the IP checksum failed its check in the tunnel
- * header.
- */
- #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_T_IP_CS_ERROR (UINT32_C(0x6) << 9)
- /*
- * Indicates that the L4 checksum failed its check in the tunnel
+ * Indicates that the physical packet is shorter than that claimed
+ * by the tunnel header length. Valid for GTPv1-U packets.
* header.
*/
- #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_T_L4_CS_ERROR (UINT32_C(0x7) << 9)
- #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_LAST RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_T_L4_CS_ERROR
+ #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_T_TOTAL_ERROR (UINT32_C(0x6) << 9)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_LAST RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_T_TOTAL_ERROR
/*
* This indicates that there was an error in the inner
* portion of the packet when this
@@ -6683,17 +6975,7 @@ typedef struct rx_pkt_v3_cmpl_hi {
* for TCP.
*/
#define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (UINT32_C(0x8) << 12)
- /*
- * Indicates that the IP checksum failed its check in the
- * inner header.
- */
- #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_IP_CS_ERROR (UINT32_C(0x9) << 12)
- /*
- * Indicates that the L4 checksum failed its check in the
- * inner header.
- */
- #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_L4_CS_ERROR (UINT32_C(0xa) << 12)
- #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_LAST RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_L4_CS_ERROR
+ #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_LAST RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
/*
* This is data from the CFA block as indicated by the meta_format
* field.
@@ -7623,7 +7905,7 @@ typedef struct rx_tpa_start_v2_cmpl_hi {
#define RX_TPA_START_V2_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100)
/*
* This indicates that the complete 1's complement checksum was
- * calculated for the packet in the affregation.
+ * calculated for the packet in the aggregation.
*/
#define RX_TPA_START_V2_CMPL_FLAGS2_COMPLETE_CHECKSUM_CALC UINT32_C(0x200)
/*
@@ -8602,7 +8884,7 @@ typedef struct rx_tpa_v2_start_cmpl_hi {
/* This value indicates what format the metadata field is. */
#define RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
#define RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_SFT 4
- /* No metadata informtaion. Value is zero. */
+ /* No metadata information. Value is zero. */
#define RX_TPA_V2_START_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
/*
* The metadata field contains the VLAN tag and TPID value.
@@ -8619,7 +8901,7 @@ typedef struct rx_tpa_v2_start_cmpl_hi {
* - VXLAN = VNI[23:0] -> VXLAN Network ID
* - Geneve (NGE) = VNI[23:0] a-> Virtual Network Identifier.
* - NVGRE = TNI[23:0] -> Tenant Network ID
- * - GRE = KEY[31:0 -> key fieled with bit mask. zero if K = 0
+ * - GRE = KEY[31:0] -> key field with bit mask. Zero if K = 0
* - IPV4 = 0 (not populated)
* - IPV6 = Flow Label[19:0]
* - PPPoE = sessionID[15:0]
@@ -9511,7 +9793,7 @@ typedef struct hwrm_async_event_cmpl {
*/
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD UINT32_C(0x46)
/*
- * An event from firmware indicating that the RSS capabilites have
+ * An event from firmware indicating that the RSS capabilities have
* changed.
*/
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RSS_CHANGE UINT32_C(0x47)
@@ -9529,8 +9811,47 @@ typedef struct hwrm_async_event_cmpl {
* doorbell copy region.
*/
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR UINT32_C(0x49)
+ /*
+ * An event from firmware indicating that the XID partition was not
+ * allocated/freed by the FW successfully for the request that is
+ * encapsulated in the HWRM_EXEC_FWD_RESP by the PF driver for VF.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR UINT32_C(0x4a)
+ /*
+ * A UDCC session has been modified in the FW. The session_id can be
+ * used by the driver to retrieve information related to the UDCC
+ * session.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE UINT32_C(0x4b)
+ /*
+ * Used to notify the host that the firmware has DMA-ed additional
+ * debug data to the host buffer. This is effectively a producer index
+ * update. The host driver can utilize this information to determine
+ * how much of its host buffer has been populated by the firmware.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER UINT32_C(0x4c)
+ /*
+ * Memory mapping between GPA and HPA has been configured for
+ * a peer device. Inform driver to pick up the new mapping.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE UINT32_C(0x4d)
+ /*
+ * Used to notify representor endpoint in the driver about pair creation
+ * in the firmware.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_REPRESENTOR_PAIR_CHANGE UINT32_C(0x4e)
+ /*
+ * VF statistics context change. Informs PF driver that a VF
+ * statistics context has either been allocated or freed.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_STAT_CHANGE UINT32_C(0x4f)
+ /*
+ * coredump collection into host DMA address. Informs PF driver that
+ * the coredump has been captured.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HOST_COREDUMP UINT32_C(0x50)
/* Maximum Registrable event id. */
- #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID UINT32_C(0x4a)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID UINT32_C(0x51)
/*
* A trace log message. This contains firmware trace logs string
* embedded in the asynchronous message. This is an experimental
@@ -9604,8 +9925,15 @@ typedef struct hwrm_async_event_cmpl {
((x) == 0x47 ? "RSS_CHANGE": \
((x) == 0x48 ? "DOORBELL_PACING_NQ_UPDATE": \
((x) == 0x49 ? "HW_DOORBELL_RECOVERY_READ_ERROR": \
- ((x) == 0x4a ? "MAX_RGTR_EVENT_ID": \
- "Unknown decode" ))))))))))))))))))))))))))))))))))))))))))) : \
+ ((x) == 0x4a ? "CTX_ERROR": \
+ ((x) == 0x4b ? "UDCC_SESSION_CHANGE": \
+ ((x) == 0x4c ? "DBG_BUF_PRODUCER": \
+ ((x) == 0x4d ? "PEER_MMAP_CHANGE": \
+ ((x) == 0x4e ? "REPRESENTOR_PAIR_CHANGE": \
+ ((x) == 0x4f ? "VF_STAT_CHANGE": \
+ ((x) == 0x50 ? "HOST_COREDUMP": \
+ ((x) == 0x51 ? "MAX_RGTR_EVENT_ID": \
+ "Unknown decode" )))))))))))))))))))))))))))))))))))))))))))))))))) : \
(((x) < 0x100) ? \
((x) == 0xfe ? "FW_TRACE_MSG": \
((x) == 0xff ? "HWRM_ERROR": \
@@ -10042,6 +10370,30 @@ typedef struct hwrm_async_event_cmpl_port_phy_cfg_change {
#define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_LAST HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_ID_PORT_PHY_CFG_CHANGE
/* Event specific data */
uint32_t event_data2;
+ /*
+ * This value indicates the current status of the optics module on
+ * this port. the same information can be found in the module_status
+ * field of the HWRM_PORT_PHY_QCFG response
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA2_MODULE_STATUS_MASK UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA2_MODULE_STATUS_SFT 0
+ /* Module is inserted and accepted */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA2_MODULE_STATUS_NONE UINT32_C(0x0)
+ /* Module is rejected and transmit side Laser is disabled. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA2_MODULE_STATUS_DISABLETX UINT32_C(0x1)
+ /* Module mismatch warning. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA2_MODULE_STATUS_MISMATCH UINT32_C(0x2)
+ /* Module is rejected and powered down. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA2_MODULE_STATUS_PWRDOWN UINT32_C(0x3)
+ /* Module is not inserted. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA2_MODULE_STATUS_NOTINSERTED UINT32_C(0x4)
+ /* Module is powered down because of over current fault. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA2_MODULE_STATUS_CURRENTFAULT UINT32_C(0x5)
+ /* Module is overheated. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA2_MODULE_STATUS_OVERHEATED UINT32_C(0x6)
+ /* Module status is not applicable. */
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA2_MODULE_STATUS_NOTAPPLICABLE UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA2_MODULE_STATUS_LAST HWRM_ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA2_MODULE_STATUS_NOTAPPLICABLE
uint8_t opaque_v;
/*
* This value is written by the NIC such that it will be different
@@ -10136,7 +10488,7 @@ typedef struct hwrm_async_event_cmpl_reset_notify {
* 16-lsb timestamp (100-msec resolution)
* The Maximum Firmware Reset bail out value in the order of 100
* milliseconds. The driver instances will use this value to reinitiate
- * the registration process again if the core firmware didn’t set the
+ * the registration process again if the core firmware didn't set the
* state bit.
*/
uint16_t timestamp_hi;
@@ -10748,6 +11100,13 @@ typedef struct hwrm_async_event_cmpl_vf_cfg_change {
* If set to 0, then this bit should be ignored.
*/
#define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE UINT32_C(0x10)
+ /*
+ * If this bit is set to 1, then the control of VF was relinquished
+ * back to the firmware flow manager following the function takeover
+ * by TruFlow.
+ * If set to 0, then this bit should be ignored.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TF_OWNERSHIP_RELEASE UINT32_C(0x20)
} hwrm_async_event_cmpl_vf_cfg_change_t, *phwrm_async_event_cmpl_vf_cfg_change_t;
/* hwrm_async_event_cmpl_llfc_pfc_change (size:128b/16B) */
@@ -11241,8 +11600,8 @@ typedef struct hwrm_async_event_cmpl_quiesce_done {
#define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_OPAQUE_SFT 8
/*
* Additional information about internal hardware state related to
- * idle/quiesce state. QUIESCE may succeed per quiesce_status
- * regardless of idle_state_flags. If QUIESCE fails, the host may
+ * idle/quiesce state. QUIESCE may succeed per quiesce_status
+ * regardless of idle_state_flags. If QUIESCE fails, the host may
* inspect idle_state_flags to determine whether a retry is warranted.
*/
#define HWRM_ASYNC_EVENT_CMPL_QUIESCE_DONE_EVENT_DATA2_IDLE_STATE_FLAGS_MASK UINT32_C(0xff0000)
@@ -11854,6 +12213,316 @@ typedef struct hwrm_async_event_cmpl_hw_doorbell_recovery_read_error {
#define HWRM_ASYNC_EVENT_CMPL_HW_DOORBELL_RECOVERY_READ_ERROR_EVENT_DATA1_READ_ERROR_FLAGS_CQ_ERR UINT32_C(0x8)
} hwrm_async_event_cmpl_hw_doorbell_recovery_read_error_t, *phwrm_async_event_cmpl_hw_doorbell_recovery_read_error_t;
+/* hwrm_async_event_cmpl_ctx_error (size:128b/16B) */
+
+typedef struct hwrm_async_event_cmpl_ctx_error {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_LAST HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /*
+ * This async notification message is used to inform the PF driver
+ * that firmware fails to allocate/free the contexts requested. This
+ * message is only valid in the XID partition scheme. Given the start
+ * xid and the number of contexts in error, the PF driver will figure
+ * out the corresponding XID partition(s) in error.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_ID_CTX_ERROR UINT32_C(0x4a)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_ID_LAST HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_ID_CTX_ERROR
+ /* Event specific data */
+ uint32_t event_data2;
+ /* Context operation code */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE UINT32_C(0x1)
+ /* Context alloc failure */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE_ALLOC UINT32_C(0x0)
+ /* Context free failure */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE_FREE UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE_LAST HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_CTX_OP_CODE_FREE
+ /* Number of contexts in error */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_NUM_CTXS_MASK UINT32_C(0xfffe)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_NUM_CTXS_SFT 1
+ /* Function ID which the XID partitions are associated with */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_FID_MASK UINT32_C(0xffff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA2_FID_SFT 16
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_OPAQUE_SFT 1
+ /* 8-lsb timestamp (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Starting XID that has error */
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA1_START_XID_MASK UINT32_C(0xffffffff)
+ #define HWRM_ASYNC_EVENT_CMPL_CTX_ERROR_EVENT_DATA1_START_XID_SFT 0
+} hwrm_async_event_cmpl_ctx_error_t, *phwrm_async_event_cmpl_ctx_error_t;
+
+/* hwrm_async_event_udcc_session_change (size:128b/16B) */
+
+typedef struct hwrm_async_event_udcc_session_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_LAST HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /*
+ * This async notification message is used to inform the PF driver
+ * that firmware has modified a UDCC session.
+ */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_ID_UDCC_SESSION_CHANGE UINT32_C(0x4b)
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_ID_LAST HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_ID_UDCC_SESSION_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ /* UDCC Session id operation code */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_MASK UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_SFT 0
+ /* session_id has been created */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_CREATED UINT32_C(0x0)
+ /* session_id has been freed */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_FREED UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_LAST HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA2_SESSION_ID_OP_CODE_FREED
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* UDCC session id which was modified */
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA1_UDCC_SESSION_ID_MASK UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_UDCC_SESSION_CHANGE_EVENT_DATA1_UDCC_SESSION_ID_SFT 0
+} hwrm_async_event_udcc_session_change_t, *phwrm_async_event_udcc_session_change_t;
+
+/* hwrm_async_event_representor_pair_change (size:128b/16B) */
+
+typedef struct hwrm_async_event_representor_pair_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_TYPE_LAST HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /*
+ * This async notification message is used to inform the driver
+ * that firmware has modified a representor pair.
+ */
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_ID_REPRESENTOR_PAIR_CHANGE UINT32_C(0x4e)
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_ID_LAST HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_ID_REPRESENTOR_PAIR_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ /* Representor pair operation code */
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA2_PAIR_OP_CODE_MASK UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA2_PAIR_OP_CODE_SFT 0
+ /* pair has been created */
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA2_PAIR_OP_CODE_CREATED UINT32_C(0x0)
+ /* pair has been deleted */
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA2_PAIR_OP_CODE_DELETED UINT32_C(0x1)
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA2_PAIR_OP_CODE_LAST HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA2_PAIR_OP_CODE_DELETED
+ /* DSCP insert operation code */
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA2_DSCP_OP_CODE_MASK UINT32_C(0xff00)
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA2_DSCP_OP_CODE_SFT 8
+ /* allow dscp modification */
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA2_DSCP_OP_CODE_MODIFY (UINT32_C(0x0) << 8)
+ /* skip dscp modification */
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA2_DSCP_OP_CODE_IGNORE (UINT32_C(0x1) << 8)
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA2_DSCP_OP_CODE_LAST HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA2_DSCP_OP_CODE_IGNORE
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Representor endpoint fid which was modified */
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA1_PAIR_EP_FID_MASK UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA1_PAIR_EP_FID_SFT 0
+ /* Representor uplink fid which was modified */
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA1_PAIR_REP_FID_MASK UINT32_C(0xffff0000)
+ #define HWRM_ASYNC_EVENT_REPRESENTOR_PAIR_CHANGE_EVENT_DATA1_PAIR_REP_FID_SFT 16
+} hwrm_async_event_representor_pair_change_t, *phwrm_async_event_representor_pair_change_t;
+
+/* hwrm_async_event_cmpl_dbg_buf_producer (size:128b/16B) */
+
+typedef struct hwrm_async_event_cmpl_dbg_buf_producer {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_LAST HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /*
+ * Used to notify the host that the firmware has DMA-ed additional
+ * debug data to the host buffer. This is effectively a producer index
+ * update. The host driver can utilize this information to determine
+ * how much of its host buffer has been populated by the firmware.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER UINT32_C(0x4c)
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_LAST HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_ID_DBG_BUF_PRODUCER
+ /* Event specific data */
+ uint32_t event_data2;
+ /*
+ * Specifies the current host buffer offset. Data up to this offset
+ * has been populated by the firmware. For example, if the firmware
+ * has DMA-ed 8192 bytes to the host buffer, then this field has a
+ * value of 8192. This field rolls over to zero once the firmware
+ * writes the last page of the host buffer
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURRENT_BUFFER_OFFSET_MASK UINT32_C(0xffffffff)
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA2_CURRENT_BUFFER_OFFSET_SFT 0
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_OPAQUE_SFT 1
+ /* 8-lsb timestamp from POR (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp from POR (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Type of trace buffer that has been updated. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_MASK UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SFT 0
+ /* SRT trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT_TRACE UINT32_C(0x0)
+ /* SRT2 trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_SRT2_TRACE UINT32_C(0x1)
+ /* CRT trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT_TRACE UINT32_C(0x2)
+ /* CRT2 trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CRT2_TRACE UINT32_C(0x3)
+ /* RIGP0 trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP0_TRACE UINT32_C(0x4)
+ /* L2 HWRM trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_L2_HWRM_TRACE UINT32_C(0x5)
+ /* RoCE HWRM trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_ROCE_HWRM_TRACE UINT32_C(0x6)
+ /* Context Accelerator CPU 0 trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA0_TRACE UINT32_C(0x7)
+ /* Context Accelerator CPU 1 trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA1_TRACE UINT32_C(0x8)
+ /* Context Accelerator CPU 2 trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_CA2_TRACE UINT32_C(0x9)
+ /* RIGP1 trace. */
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP1_TRACE UINT32_C(0xa)
+ #define HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_LAST HWRM_ASYNC_EVENT_CMPL_DBG_BUF_PRODUCER_EVENT_DATA1_TYPE_RIGP1_TRACE
+} hwrm_async_event_cmpl_dbg_buf_producer_t, *phwrm_async_event_cmpl_dbg_buf_producer_t;
+
+/* hwrm_async_event_cmpl_peer_mmap_change (size:128b/16B) */
+
+typedef struct hwrm_async_event_cmpl_peer_mmap_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_TYPE_LAST HWRM_ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /*
+ * This async notification message is used to inform the driver
+ * that the memory mapping for a peer device is set. The driver
+ * will need to query using get_structured_data.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_EVENT_ID_PEER_MMAP_CHANGE UINT32_C(0x4d)
+ #define HWRM_ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_EVENT_ID_LAST HWRM_ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_EVENT_ID_PEER_MMAP_CHANGE
+ /* Event specific data. */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_PEER_MMAP_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+} hwrm_async_event_cmpl_peer_mmap_change_t, *phwrm_async_event_cmpl_peer_mmap_change_t;
+
/* hwrm_async_event_cmpl_fw_trace_msg (size:128b/16B) */
typedef struct hwrm_async_event_cmpl_fw_trace_msg {
@@ -12036,10 +12705,10 @@ typedef struct hwrm_async_event_cmpl_error_report_base {
/* Event specific data */
uint32_t event_data1;
/* Indicates the type of error being reported. */
- #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK UINT32_C(0xff)
#define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
/* Reserved */
- #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED UINT32_C(0x0)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED UINT32_C(0x0)
/*
* The NIC was subjected to an extended pause storm which caused it
* to disable flow control in order to avoid stalling the Tx path.
@@ -12052,7 +12721,7 @@ typedef struct hwrm_async_event_cmpl_error_report_base {
* it. The pin number on which this signal was received is stored
* in event_data2 as pin_id.
*/
- #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL UINT32_C(0x2)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL UINT32_C(0x2)
/*
* There was a low level error with an NVM write or erase.
* See nvm_err_type for more details.
@@ -12063,13 +12732,18 @@ typedef struct hwrm_async_event_cmpl_error_report_base {
* threshold is crossed, it indicates one or more doorbells for
* the function were dropped by hardware.
*/
- #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD UINT32_C(0x4)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD UINT32_C(0x4)
/*
* Indicates the NIC's temperature has crossed one of the thermal
* thresholds.
*/
- #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD UINT32_C(0x5)
- #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD UINT32_C(0x5)
+ /*
+ * Speed change not supported with dual rate transceivers
+ * on this board.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED UINT32_C(0x6)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
} hwrm_async_event_cmpl_error_report_base_t, *phwrm_async_event_cmpl_error_report_base_t;
#define GET_ERROR_REPORT_TYPE(x) \
@@ -12080,7 +12754,8 @@ typedef struct hwrm_async_event_cmpl_error_report_base {
((x) == 0x3 ? "NVM": \
((x) == 0x4 ? "DOORBELL_DROP_THRESHOLD": \
((x) == 0x5 ? "THERMAL_THRESHOLD": \
- "Unknown decode" )))))) : \
+ ((x) == 0x6 ? "DUAL_DATA_RATE_NOT_SUPPORTED": \
+ "Unknown decode" ))))))) : \
"Unknown decode" )
@@ -12359,7 +13034,7 @@ typedef struct hwrm_async_event_cmpl_error_report_thermal {
#define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_LAST HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_ID_ERROR_REPORT
/* Event specific data. */
uint32_t event_data2;
- /* Current temperature. In Celsius */
+ /* Current temperature. In Celsius */
#define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_MASK UINT32_C(0xff)
#define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA2_CURRENT_TEMP_SFT 0
/*
@@ -12425,6 +13100,168 @@ typedef struct hwrm_async_event_cmpl_error_report_thermal {
#define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
} hwrm_async_event_cmpl_error_report_thermal_t, *phwrm_async_event_cmpl_error_report_thermal_t;
+/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */
+
+typedef struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /*
+ * This async notification message is used to inform
+ * the driver that an error has occurred which may need
+ * the attention of the administrator.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT UINT32_C(0x45)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT
+ /* Event specific data. */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1
+ /* 8-lsb timestamp (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* Indicates the type of error being reported. */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK UINT32_C(0xff)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0
+ /*
+ * Speed change not supported with dual rate transceivers
+ * on this board.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED UINT32_C(0x6)
+ #define HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
+} hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported_t, *phwrm_async_event_cmpl_error_report_dual_data_rate_not_supported_t;
+
+/* hwrm_async_event_cmpl_vf_stat_change (size:128b/16B) */
+
+typedef struct hwrm_async_event_cmpl_vf_stat_change {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_TYPE_LAST HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /*
+ * VF statistics context change. Informs PF driver that a VF
+ * statistics context has either been allocated or freed.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_EVENT_ID_VF_STAT_CHANGE UINT32_C(0x4f)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_EVENT_ID_LAST HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_EVENT_ID_VF_STAT_CHANGE
+ /* Event specific data */
+ uint32_t event_data2;
+ /*
+ * VF ID that allocated the stats context. This is zero-based and
+ * relative to each PF.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_EVENT_DATA2_VF_ID_MASK UINT32_C(0xffff)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_EVENT_DATA2_VF_ID_SFT 0
+ /*
+ * A value of zero signals to the PF driver that it can free the host
+ * buffer associated with the statistics context.
+ * A non-zero values signals to the PF driver that it should allocate
+ * a host buffer for the statistics context and inform the firmware
+ * via HWRM_STAT_CTX_ALLOC. The PF driver must provide the sequence id
+ * in the corresponding HWRM_STAT_CTX_ALLOC request so that firmware
+ * can correlate it to the VF statistics context.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_EVENT_DATA2_ACTION_SEQUENCE_ID_MASK UINT32_C(0xffff0000)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_EVENT_DATA2_ACTION_SEQUENCE_ID_SFT 16
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_OPAQUE_SFT 1
+ /* 8-lsb timestamp (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+ /* VF statistics context identifier */
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_EVENT_DATA1_STAT_CTX_ID_MASK UINT32_C(0xffffffff)
+ #define HWRM_ASYNC_EVENT_CMPL_VF_STAT_CHANGE_EVENT_DATA1_STAT_CTX_ID_SFT 0
+} hwrm_async_event_cmpl_vf_stat_change_t, *phwrm_async_event_cmpl_vf_stat_change_t;
+
+/* hwrm_async_event_cmpl_host_coredump (size:128b/16B) */
+
+typedef struct hwrm_async_event_cmpl_host_coredump {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_HOST_COREDUMP_TYPE_MASK UINT32_C(0x3f)
+ #define HWRM_ASYNC_EVENT_CMPL_HOST_COREDUMP_TYPE_SFT 0
+ /* HWRM Asynchronous Event Information */
+ #define HWRM_ASYNC_EVENT_CMPL_HOST_COREDUMP_TYPE_HWRM_ASYNC_EVENT UINT32_C(0x2e)
+ #define HWRM_ASYNC_EVENT_CMPL_HOST_COREDUMP_TYPE_LAST HWRM_ASYNC_EVENT_CMPL_HOST_COREDUMP_TYPE_HWRM_ASYNC_EVENT
+ /* Identifiers of events. */
+ uint16_t event_id;
+ /*
+ * coredump collection into host DMA address. Informs PF driver that
+ * the coredump has been captured.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_HOST_COREDUMP_EVENT_ID_HOST_COREDUMP UINT32_C(0x50)
+ #define HWRM_ASYNC_EVENT_CMPL_HOST_COREDUMP_EVENT_ID_LAST HWRM_ASYNC_EVENT_CMPL_HOST_COREDUMP_EVENT_ID_HOST_COREDUMP
+ /* Event specific data */
+ uint32_t event_data2;
+ uint8_t opaque_v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define HWRM_ASYNC_EVENT_CMPL_HOST_COREDUMP_V UINT32_C(0x1)
+ /* opaque is 7 b */
+ #define HWRM_ASYNC_EVENT_CMPL_HOST_COREDUMP_OPAQUE_MASK UINT32_C(0xfe)
+ #define HWRM_ASYNC_EVENT_CMPL_HOST_COREDUMP_OPAQUE_SFT 1
+ /* 8-lsb timestamp (100-msec resolution) */
+ uint8_t timestamp_lo;
+ /* 16-lsb timestamp (100-msec resolution) */
+ uint16_t timestamp_hi;
+ /* Event specific data */
+ uint32_t event_data1;
+} hwrm_async_event_cmpl_host_coredump_t, *phwrm_async_event_cmpl_host_coredump_t;
+
/* metadata_base_msg (size:64b/8B) */
typedef struct metadata_base_msg {
@@ -12879,7 +13716,7 @@ typedef struct tx_doorbell {
uint32_t key_idx;
/*
* BD Index of next BD that will be used to transmit data
- * on the TX ring mapped to this door bell. NIC may
+ * on the TX ring mapped to this door bell. NIC may
* read and process all BDs up to, but not including this
* index.
*/
@@ -12887,7 +13724,7 @@ typedef struct tx_doorbell {
#define TX_DOORBELL_IDX_SFT 0
/*
* This value indicates the type of door bell operation
- * that is begin requested. This value is '0' for TX
+ * that is begin requested. This value is '0' for TX
* door bell operations.
*/
#define TX_DOORBELL_KEY_MASK UINT32_C(0xf0000000)
@@ -12903,7 +13740,7 @@ typedef struct rx_doorbell {
uint32_t key_idx;
/*
* BD Index of next BD that will be used for an empty receive
- * buffer on the RX ring mapped to this door bell. NIC may
+ * buffer on the RX ring mapped to this door bell. NIC may
* read and process all BDs up to, but not including this
* index.
*/
@@ -12911,7 +13748,7 @@ typedef struct rx_doorbell {
#define RX_DOORBELL_IDX_SFT 0
/*
* This value indicates the type of door bell operation
- * that is begin requested. This value is '1' for RX
+ * that is begin requested. This value is '1' for RX
* door bell operations.
*/
#define RX_DOORBELL_KEY_MASK UINT32_C(0xf0000000)
@@ -12935,20 +13772,20 @@ typedef struct cmpl_doorbell {
#define CMPL_DOORBELL_IDX_SFT 0
/*
* This indicates if the BDIDX value is valid for this
- * update when it is '1'. When it is '0', the BDIDX
+ * update when it is '1'. When it is '0', the BDIDX
* value should be ignored.
*/
#define CMPL_DOORBELL_IDX_VALID UINT32_C(0x4000000)
/*
* This bit indicates the new interrupt mask state for the
- * interrupt associated with the BDIDX. A '1', means the
- * interrupt is to be masked. A '0' indicates the interrupt
+ * interrupt associated with the BDIDX. A '1', means the
+ * interrupt is to be masked. A '0' indicates the interrupt
* is to be unmasked.
*/
#define CMPL_DOORBELL_MASK UINT32_C(0x8000000)
/*
* This value indicates the type of door bell operation
- * that is begin requested. This value is '2' for CMP
+ * that is begin requested. This value is '2' for CMP
* door bell operations.
*/
#define CMPL_DOORBELL_KEY_MASK UINT32_C(0xf0000000)
@@ -12975,7 +13812,7 @@ typedef struct status_doorbell {
#define STATUS_DOORBELL_IDX_SFT 0
/*
* This value indicates the type of door bell operation
- * that is begin requested. This value is '3' for Status
+ * that is begin requested. This value is '3' for Status
* door bell operations.
*/
#define STATUS_DOORBELL_KEY_MASK UINT32_C(0xf0000000)
@@ -13002,14 +13839,14 @@ typedef struct push32_doorbell {
* A value of 1 is invalid since backup must start with a
* long 32B BE.
* A value of 2 indicates just the first 32B BE.
- * A value of 3 indicates 32B+16B BD. etc.
+ * A value of 3 indicates 32B+16B BD. etc.
* A value of 0 indicates 16x16B BD spaces are consumed.
*/
#define PUSH32_DOORBELL_SZ_MASK UINT32_C(0xf000000)
#define PUSH32_DOORBELL_SZ_SFT 24
/*
* This value indicates the type of door bell operation
- * that is begin requested. This value is 4 for push
+ * that is begin requested. This value is 4 for push
* door bell operations.
*/
#define PUSH32_DOORBELL_KEY_MASK UINT32_C(0xf0000000)
@@ -13036,7 +13873,7 @@ typedef struct push32_doorbell {
#define PUSH32_DOORBELL_FLAGS_SFT 6
/*
* If set to 1, the packet ends with the data in the buffer
- * pointed to by this descriptor. This flag must be
+ * pointed to by this descriptor. This flag must be
* valid on every BD.
*
* This bit must be set on all push doorbells.
@@ -13137,9 +13974,9 @@ typedef struct push32_doorbell {
*
* This bit must be valid on the first BD of a packet.
*
- * Packet must be 64B or longer when this flag is set. It is not
+ * Packet must be 64B or longer when this flag is set. It is not
* useful to use this bit with any form of TX offload such as
- * CSO or LSO. The intent is that the packet from the host already
+ * CSO or LSO. The intent is that the packet from the host already
* has a valid Ethernet CRC on the packet.
*/
#define PUSH32_DOORBELL_LFLAGS_NOCRC UINT32_C(0x4)
@@ -13156,21 +13993,21 @@ typedef struct push32_doorbell {
* of the packet associated with this descriptor.
*
* For outer UDP checksum, global outer UDP checksum TE_NIC register
- * needs to be enabled. If the global outer UDP checksum TE_NIC register
- * bit is set, outer UDP checksum will be calculated for the following
- * cases:
- * 1. Packets with tcp_udp_chksum flag set to offload checksum for inner
- * packet AND the inner packet is TCP/UDP. If the inner packet is ICMP for
- * example (non-TCP/UDP), even if the tcp_udp_chksum is set, the outer UDP
- * checksum will not be calculated.
- * 2. Packets with lso flag set which implies inner TCP checksum calculation
- * as part of LSO operation.
+ * needs to be enabled. If the global outer UDP checksum TE_NIC
+ * register bit is set, outer UDP checksum will be calculated for the
+ * following cases:
+ * 1. Packets with tcp_udp_chksum flag set to offload checksum for
+ * inner packet AND the inner packet is TCP/UDP. If the inner packet is
+ * ICMP for example (non-TCP/UDP), even if the tcp_udp_chksum is set,
+ * the outer UDP checksum will not be calculated.
+ * 2. Packets with lso flag set which implies inner TCP checksum
+ * calculation as part of LSO operation.
*/
#define PUSH32_DOORBELL_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
/*
* If set to 1, the device will treat this packet with LSO(Large
* Send Offload) processing for both normal or encapsulated
- * packets, which is a form of TCP segmentation. When this bit
+ * packets, which is a form of TCP segmentation. When this bit
* is 1, the hdr_size and mss fields must be valid. The driver
* doesn't need to set t_ip_chksum, ip_chksum, and tcp_udp_chksum
* flags since the controller will replace the appropriate
@@ -13185,7 +14022,7 @@ typedef struct push32_doorbell {
* 0xffff.
*
* If set to one when LSO is '1', then the IPID will be treated
- * as a 15b number and will be wrapped if it exceeds a value 0f
+ * as a 15b number and will be wrapped if it exceeds a value of
* 0x7fff.
*/
#define PUSH32_DOORBELL_LFLAGS_IPID_FMT UINT32_C(0x40)
@@ -13203,12 +14040,12 @@ typedef struct push32_doorbell {
#define PUSH32_DOORBELL_LFLAGS_T_IPID UINT32_C(0x80)
/*
* If set to '1', then the RoCE ICRC will be appended to the
- * packet. Packet must be a valid RoCE format packet.
+ * packet. Packet must be a valid RoCE format packet.
*/
#define PUSH32_DOORBELL_LFLAGS_ROCE_CRC UINT32_C(0x100)
/*
* If set to '1', then the FCoE CRC will be appended to the
- * packet. Packet must be a valid FCoE format packet.
+ * packet. Packet must be a valid FCoE format packet.
*/
#define PUSH32_DOORBELL_LFLAGS_FCOE_CRC UINT32_C(0x200)
uint16_t hdr_size;
@@ -13291,7 +14128,7 @@ typedef struct push32_doorbell {
#define PUSH32_DOORBELL_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28)
#define PUSH32_DOORBELL_CFA_META_KEY_LAST PUSH32_DOORBELL_CFA_META_KEY_VLAN_TAG
/*
- * This is the data for the push packet. If the packet
+ * This is the data for the push packet. If the packet
* data does not fit in the first pass, data writing
* can continue at offset 4 of the doorbell for up to 4 additional
* passes for a total data size of 512B maximum.
@@ -13345,8 +14182,8 @@ typedef struct hwrm_func_reset_input {
* The ID of the VF that this PF is trying to reset.
* Only the parent PF shall be allowed to reset a child VF.
*
- * A parent PF driver shall use this field only when a specific child VF
- * is requested to be reset.
+ * A parent PF driver shall use this field only when a specific child
+ * VF is requested to be reset.
*/
uint16_t vf_id;
/* This value indicates the level of a function reset. */
@@ -13392,9 +14229,9 @@ typedef struct hwrm_func_reset_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -13464,16 +14301,16 @@ typedef struct hwrm_func_getfid_output {
/* The length of the response data in number of bytes. */
uint16_t resp_len;
/*
- * FID value. This value is used to identify operations on the PCI
+ * FID value. This value is used to identify operations on the PCI
* bus as belonging to a particular PCI function.
*/
uint16_t fid;
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -13546,9 +14383,9 @@ typedef struct hwrm_func_vf_alloc_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -13622,9 +14459,9 @@ typedef struct hwrm_func_vf_free_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -13635,7 +14472,7 @@ typedef struct hwrm_func_vf_free_output {
********************/
-/* hwrm_func_vf_cfg_input (size:512b/64B) */
+/* hwrm_func_vf_cfg_input (size:576b/72B) */
typedef struct hwrm_func_vf_cfg_input {
/* The HWRM command request type. */
@@ -13671,12 +14508,12 @@ typedef struct hwrm_func_vf_cfg_input {
* This bit must be '1' for the mtu field to be
* configured.
*/
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU UINT32_C(0x1)
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_MTU UINT32_C(0x1)
/*
* This bit must be '1' for the guest_vlan field to be
* configured.
*/
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN UINT32_C(0x2)
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_GUEST_VLAN UINT32_C(0x2)
/*
* This bit must be '1' for the async_event_cr field to be
* configured.
@@ -13686,7 +14523,7 @@ typedef struct hwrm_func_vf_cfg_input {
* This bit must be '1' for the dflt_mac_addr field to be
* configured.
*/
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x8)
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_DFLT_MAC_ADDR UINT32_C(0x8)
/*
* This bit must be '1' for the num_rsscos_ctxs field to be
* configured.
@@ -13701,17 +14538,17 @@ typedef struct hwrm_func_vf_cfg_input {
* This bit must be '1' for the num_tx_rings field to be
* configured.
*/
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS UINT32_C(0x40)
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_RINGS UINT32_C(0x40)
/*
* This bit must be '1' for the num_rx_rings field to be
* configured.
*/
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS UINT32_C(0x80)
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_RINGS UINT32_C(0x80)
/*
* This bit must be '1' for the num_l2_ctxs field to be
* configured.
*/
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS UINT32_C(0x100)
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_L2_CTXS UINT32_C(0x100)
/*
* This bit must be '1' for the num_vnics field to be
* configured.
@@ -13721,22 +14558,32 @@ typedef struct hwrm_func_vf_cfg_input {
* This bit must be '1' for the num_stat_ctxs field to be
* configured.
*/
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS UINT32_C(0x400)
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_STAT_CTXS UINT32_C(0x400)
/*
* This bit must be '1' for the num_hw_ring_grps field to be
* configured.
*/
#define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS UINT32_C(0x800)
/*
- * This bit must be '1' for the num_tx_key_ctxs field to be
- * configured.
+ * This bit must be '1' for the num_ktls_tx_key_ctxs field to
+ * be configured.
*/
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_TX_KEY_CTXS UINT32_C(0x1000)
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_KTLS_TX_KEY_CTXS UINT32_C(0x1000)
/*
- * This bit must be '1' for the num_rx_key_ctxs field to be
- * configured.
+ * This bit must be '1' for the num_ktls_rx_key_ctxs field to
+ * be configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_KTLS_RX_KEY_CTXS UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the num_quic_tx_key_ctxs field to
+ * be configured.
*/
- #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_RX_KEY_CTXS UINT32_C(0x2000)
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_QUIC_TX_KEY_CTXS UINT32_C(0x4000)
+ /*
+ * This bit must be '1' for the num_quic_rx_key_ctxs field to
+ * be configured.
+ */
+ #define HWRM_FUNC_VF_CFG_INPUT_ENABLES_NUM_QUIC_RX_KEY_CTXS UINT32_C(0x8000)
/*
* The maximum transmission unit requested on the function.
* The HWRM should make sure that the mtu of
@@ -13799,10 +14646,10 @@ typedef struct hwrm_func_vf_cfg_input {
#define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RX_ASSETS_TEST UINT32_C(0x2)
/*
* This bit requests that the firmware test to see if all the assets
- * requested in this command (i.e. number of CMPL rings) are available.
- * The firmware will return an error if the requested assets are
- * not available. The firmware will NOT reserve the assets if they
- * are available.
+ * requested in this command (i.e. number of CMPL rings) are
+ * available. The firmware will return an error if the requested
+ * assets are not available. The firmware will NOT reserve the assets
+ * if they are available.
*/
#define HWRM_FUNC_VF_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST UINT32_C(0x4)
/*
@@ -13815,10 +14662,10 @@ typedef struct hwrm_func_vf_cfg_input {
#define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RSSCOS_CTX_ASSETS_TEST UINT32_C(0x8)
/*
* This bit requests that the firmware test to see if all the assets
- * requested in this command (i.e. number of ring groups) are available.
- * The firmware will return an error if the requested assets are
- * not available. The firmware will NOT reserve the assets if they
- * are available.
+ * requested in this command (i.e. number of ring groups) are
+ * available. The firmware will return an error if the requested
+ * assets are not available. The firmware will NOT reserve the assets
+ * if they are available.
*/
#define HWRM_FUNC_VF_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST UINT32_C(0x10)
/*
@@ -13876,11 +14723,17 @@ typedef struct hwrm_func_vf_cfg_input {
uint16_t num_stat_ctxs;
/* The number of HW ring groups requested for the VF. */
uint16_t num_hw_ring_grps;
- /* Number of Tx Key Contexts requested. */
- uint32_t num_tx_key_ctxs;
- /* Number of Rx Key Contexts requested. */
- uint32_t num_rx_key_ctxs;
- uint8_t unused[4];
+ /* Number of KTLS Tx Key Contexts requested. */
+ uint32_t num_ktls_tx_key_ctxs;
+ /* Number of KTLS Rx Key Contexts requested. */
+ uint32_t num_ktls_rx_key_ctxs;
+ /* The number of MSI-X vectors requested for the VF. */
+ uint16_t num_msix;
+ uint8_t unused[2];
+ /* Number of QUIC Tx Key Contexts requested. */
+ uint32_t num_quic_tx_key_ctxs;
+ /* Number of QUIC Rx Key Contexts requested. */
+ uint32_t num_quic_rx_key_ctxs;
} hwrm_func_vf_cfg_input_t, *phwrm_func_vf_cfg_input_t;
/* hwrm_func_vf_cfg_output (size:128b/16B) */
@@ -13897,9 +14750,9 @@ typedef struct hwrm_func_vf_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -13952,7 +14805,7 @@ typedef struct hwrm_func_qcaps_input {
uint8_t unused_0[6];
} hwrm_func_qcaps_input_t, *phwrm_func_qcaps_input_t;
-/* hwrm_func_qcaps_output (size:768b/96B) */
+/* hwrm_func_qcaps_output (size:1152b/144B) */
typedef struct hwrm_func_qcaps_output {
/* The specific error status for the command. */
@@ -13964,7 +14817,7 @@ typedef struct hwrm_func_qcaps_output {
/* The length of the response data in number of bytes. */
uint16_t resp_len;
/*
- * FID value. This value is used to identify operations on the PCI
+ * FID value. This value is used to identify operations on the PCI
* bus as belonging to a particular PCI function.
*/
uint16_t fid;
@@ -14095,7 +14948,8 @@ typedef struct hwrm_func_qcaps_output {
/*
* If the query is for a VF, then this flag shall be ignored,
* If this query is for a PF and this flag is set to 1,
- * then the PF has the administrative privilege to configure another PF
+ * then the PF has the administrative privilege to configure another
+ * PF.
*/
#define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_ADMIN_PF_SUPPORTED UINT32_C(0x40000)
/*
@@ -14507,7 +15361,7 @@ typedef struct hwrm_func_qcaps_output {
#define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_QUIC_SUPPORTED UINT32_C(0x2)
/*
* When this bit is '1', it indicates that KDNet mode is
- * supported on the port for this function. This bit is
+ * supported on the port for this function. This bit is
* never set for a VF.
*/
#define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_KDNET_SUPPORTED UINT32_C(0x4)
@@ -14562,12 +15416,151 @@ typedef struct hwrm_func_qcaps_output {
/*
* When this bit is '1', it indicates that the hardware based
* link aggregation group (L2 and RoCE) feature is supported.
+ * This LAG feature is only supported on the THOR2 or newer NIC
+ * with multiple ports.
*/
#define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_HW_LAG_SUPPORTED UINT32_C(0x400)
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED UINT32_C(0x800)
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_STEERING_TAG_SUPPORTED UINT32_C(0x1000)
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED UINT32_C(0x2000)
- #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED UINT32_C(0x4000)
+ /*
+ * When this bit is '1', it indicates all contexts can be stored
+ * on chip instead of using host based backing store memory.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_ON_CHIP_CTX_SUPPORTED UINT32_C(0x800)
+ /*
+ * When this bit is '1', it indicates that the HW supports
+ * using a steering tag in the memory transactions targeting
+ * L2 or RoCE ring resources.
+ * Steering Tags are system-specific values that must follow the
+ * encoding requirements of the hardware platform. On devices that
+ * support steering to multiple address domains, a value of 0 in
+ * bit 0 of the steering tag specifies the address is associated
+ * with the SOC address space, and a value of 1 indicates the
+ * address is associated with the host address space.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_STEERING_TAG_SUPPORTED UINT32_C(0x1000)
+ /*
+ * When this bit is '1', it indicates that driver can enable
+ * support for an enhanced VF scale.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_ENHANCED_VF_SCALE_SUPPORTED UINT32_C(0x2000)
+ /*
+ * When this bit is '1', it indicates that FW is capable of
+ * supporting partition based XID management for KTLS/QUIC
+ * Tx/Rx Key Context types.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_KEY_XID_PARTITION_SUPPORTED UINT32_C(0x4000)
+ /*
+ * This bit is only valid on the condition that both
+ * 'ktls_supported' and 'quic_supported' flags are set. When this
+ * bit is valid, it conveys information below:
+ * 1. If it is set to '1', it indicates that the firmware allows the
+ * driver to run KTLS and QUIC concurrently;
+ * 2. If it is cleared to '0', it indicates that the driver has to
+ * make sure all crypto connections on all functions are of the
+ * same type, i.e., either KTLS or QUIC.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_CONCURRENT_KTLS_QUIC_SUPPORTED UINT32_C(0x8000)
+ /*
+ * When this bit is '1', it indicates that the device supports
+ * setting a cross TC cap on a scheduler queue.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_SCHQ_CROSS_TC_CAP_SUPPORTED UINT32_C(0x10000)
+ /*
+ * When this bit is '1', it indicates that the device supports
+ * setting a per TC cap on a scheduler queue.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_SCHQ_PER_TC_CAP_SUPPORTED UINT32_C(0x20000)
+ /*
+ * When this bit is '1', it indicates that the device supports
+ * setting a per TC reservation on a scheduler queues.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_SCHQ_PER_TC_RESERVATION_SUPPORTED UINT32_C(0x40000)
+ /*
+ * When this bit is '1', it indicates that firmware supports query
+ * for statistics related to invalid doorbell errors and drops.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_DB_ERROR_STATS_SUPPORTED UINT32_C(0x80000)
+ /*
+ * When this bit is '1', it indicates that the device supports
+ * VF RoCE resource management.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED UINT32_C(0x100000)
+ /*
+ * When this bit is '1', it indicates that the device supports
+ * UDCC management.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_UDCC_SUPPORTED UINT32_C(0x200000)
+ /*
+ * When this bit is '1', it indicates that the device supports Timed
+ * Transmit TxTime scheduling; this is applicable to L2 flows only.
+ * It is expected that host software assigns each packet a transmit
+ * time and posts packets for transmit in time order. NIC hardware
+ * transmits the packet at time assigned by software.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED UINT32_C(0x400000)
+ /*
+ * This bit indicates the method used for the advertisement of the
+ * max resource limit for the PF and its VFs.
+ * When this bit is '1', it indicates that the maximum resource
+ * limits for both RoCE and L2 are software defined. These limits
+ * are queried using the HWRM backing store qcaps v1
+ * and v2(max_num_entries). For RoCE, the resource limits are
+ * derived from nvm options. For L2, the resources will continue
+ * to use FW enforced SW limits based on chip config and per PF
+ * function NVM resource parameters.
+ * If this bit is '0', the FW will use to legacy behavior.
+ * For RoCE, the maximum resource values supported by the chip will
+ * be returned. For L2, the maximum resource values returned will
+ * be the FW enforced SW limits based on chip config and per PF
+ * function NVM resource parameters.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED UINT32_C(0x800000)
+ /*
+ * When this bit is '1', it indicates that the device supports
+ * migrating ingress NIC flows to Truflow.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED UINT32_C(0x1000000)
+ /*
+ * When this bit is '1', it indicates that the Firmware supports
+ * query and clear of the port loopback statistics.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_LPBK_STATS_SUPPORTED UINT32_C(0x2000000)
+ /*
+ * When this bit is '1', it indicates that the device supports
+ * migrating egress NIC flows to Truflow.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED UINT32_C(0x4000000)
+ /*
+ * When this bit is '1', it indicates that the device supports
+ * multiple lossless CoS queues.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED UINT32_C(0x8000000)
+ /*
+ * When this bit is '1', it indicates that the firmware supports
+ * peer memory map storing feature.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_PEER_MMAP_SUPPORTED UINT32_C(0x10000000)
+ /*
+ * When this bit is '1', it indicates that the device supports Timed
+ * Transmit packet pacing; this is applicable to L2 flows only.
+ * Host software passes the transmit rate of an L2 flow to the
+ * hardware and hardware uses this rate to derive the transmit time
+ * for scheduling packet transmission of the flow.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_TIMED_TX_PACING_SUPPORTED UINT32_C(0x20000000)
+ /*
+ * When this bit is '1', it indicates that the device supports VF
+ * statistics ejection. Firmware is capable of copying VF statistics
+ * to two host buffers - one buffer allocated by VF driver and
+ * another buffer allocated by the parent PF driver. This bit is
+ * only set on a PF.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_VF_STAT_EJECTION_SUPPORTED UINT32_C(0x40000000)
+ /*
+ * When this bit is '1', it indicates that the parent PF allocated
+ * the Host DMA buffer to capture the coredump. So that any VF
+ * driver instance can issue HWRM_DBG_COREDUMP_CAPTURE command
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_HOST_COREDUMP_SUPPORTED UINT32_C(0x80000000)
uint16_t tunnel_disable_flag;
/*
* When this bit is '1', it indicates that the VXLAN parsing
@@ -14609,10 +15602,96 @@ typedef struct hwrm_func_qcaps_output {
* is disabled in hardware
*/
#define HWRM_FUNC_QCAPS_OUTPUT_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE UINT32_C(0x80)
- uint8_t unused_1;
+ uint16_t xid_partition_cap;
+ /*
+ * When this bit is '1', it indicates that FW is capable of
+ * supporting partition based XID management for Tx crypto
+ * key contexts.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_XID_PARTITION_CAP_TX_CK UINT32_C(0x1)
+ /*
+ * When this bit is '1', it indicates that FW is capable of
+ * supporting partition based XID management for Rx crypto
+ * key contexts.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_XID_PARTITION_CAP_RX_CK UINT32_C(0x2)
+ /*
+ * This value uniquely identifies the hardware NIC used by the
+ * function. The value returned will be the same for all functions.
+ * A value of 00-00-00-00-00-00-00-00 indicates no device serial number
+ * is currently configured. This is the same value that is returned by
+ * PCIe Capability Device Serial Number.
+ */
+ uint8_t device_serial_number[8];
+ /*
+ * This field is only valid in the XID partition mode. It indicates
+ * the number contexts per partition.
+ */
+ uint16_t ctxs_per_partition;
+ /*
+ * The maximum number of tso segments that NIC can handle during the
+ * large segmentation offload.
+ * If this field is zero, that means there is no limit on the TSO
+ * segment limit.
+ * Note that this field will be zero for older firmware that
+ * doesn't report the max TSO segment limit.
+ */
+ uint16_t max_tso_segs;
+ /*
+ * The maximum number of address vectors that may be allocated across
+ * all VFs for the function. This is valid only on the PF with VF RoCE
+ * (SR-IOV) enabled. Returns zero if this command is called on a PF
+ * with VF RoCE (SR-IOV) disabled or on a VF.
+ */
+ uint32_t roce_vf_max_av;
+ /*
+ * The maximum number of completion queues that may be allocated across
+ * all VFs for the function. This is valid only on the PF with VF RoCE
+ * (SR-IOV) enabled. Returns zero if this command is called on a PF
+ * with VF RoCE (SR-IOV) disabled or on a VF.
+ */
+ uint32_t roce_vf_max_cq;
+ /*
+ * The maximum number of memory regions plus memory windows that may be
+ * allocated across all VFs for the function. This is valid only on the
+ * PF with VF RoCE (SR-IOV) enabled. Returns zero if this command is
+ * called on a PF with VF RoCE (SR-IOV) disabled or on a VF.
+ */
+ uint32_t roce_vf_max_mrw;
+ /*
+ * The maximum number of queue pairs that may be allocated across
+ * all VFs for the function. This is valid only on the PF with VF RoCE
+ * (SR-IOV) enabled. Returns zero if this command is called on a PF
+ * with VF RoCE (SR-IOV) disabled or on a VF.
+ */
+ uint32_t roce_vf_max_qp;
+ /*
+ * The maximum number of shared receive queues that may be allocated
+ * across all VFs for the function. This is valid only on the PF with
+ * VF RoCE (SR-IOV) enabled. Returns zero if this command is called on
+ * a PF with VF RoCE (SR-IOV) disabled or on a VF.
+ */
+ uint32_t roce_vf_max_srq;
+ /*
+ * The maximum number of GIDs that may be allocated across all VFs for
+ * the function. This is valid only on the PF with VF RoCE (SR-IOV)
+ * enabled. Returns zero if this command is called on a PF with VF RoCE
+ * (SR-IOV) disabled or on a VF.
+ */
+ uint32_t roce_vf_max_gid;
+ uint32_t flags_ext3;
+ /*
+ * When this bit is '1', firmware supports the driver using
+ * FUNC_CFG (or FUNC_VF_CFG) to decrease resource reservations
+ * while some resources are still allocated. An error is returned
+ * if the driver tries to set the reservation to be less than the
+ * number of allocated resources.
+ */
+ #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT3_RM_RSV_WHILE_ALLOC_CAP UINT32_C(0x1)
+ uint8_t unused_3[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -14668,7 +15747,7 @@ typedef struct hwrm_func_qcfg_input {
uint8_t unused_0[6];
} hwrm_func_qcfg_input_t, *phwrm_func_qcfg_input_t;
-/* hwrm_func_qcfg_output (size:1024b/128B) */
+/* hwrm_func_qcfg_output (size:1280b/160B) */
typedef struct hwrm_func_qcfg_output {
/* The specific error status for the command. */
@@ -14680,7 +15759,7 @@ typedef struct hwrm_func_qcfg_output {
/* The length of the response data in number of bytes. */
uint16_t resp_len;
/*
- * FID value. This value is used to identify operations on the PCI
+ * FID value. This value is used to identify operations on the PCI
* bus as belonging to a particular PCI function.
*/
uint16_t fid;
@@ -14747,14 +15826,14 @@ typedef struct hwrm_func_qcfg_output {
* If the function that is being queried is a PF, then the HWRM shall
* set this field to 0 and the HWRM client shall ignore this field.
* If the function that is being queried is a VF, then the HWRM shall
- * set this field to 1 if the queried VF is trusted, otherwise the HWRM
- * shall set this field to 0.
+ * set this field to 1 if the queried VF is trusted, otherwise the
+ * HWRM shall set this field to 0.
*/
#define HWRM_FUNC_QCFG_OUTPUT_FLAGS_TRUSTED_VF UINT32_C(0x40)
/*
- * If set to 1, then secure mode is enabled for this function or device.
- * If set to 0, then secure mode is disabled (or normal mode) for this
- * function or device.
+ * If set to 1, then secure mode is enabled for this function or
+ * device. If set to 0, then secure mode is disabled (or normal mode)
+ * for this function or device.
*/
#define HWRM_FUNC_QCFG_OUTPUT_FLAGS_SECURE_MODE_ENABLED UINT32_C(0x80)
/*
@@ -14809,6 +15888,12 @@ typedef struct hwrm_func_qcfg_output {
*/
#define HWRM_FUNC_QCFG_OUTPUT_FLAGS_ENABLE_RDMA_SRIOV UINT32_C(0x4000)
/*
+ * When set to 1, indicates the field roce_vnic_id in the structure
+ * is valid. If this bit is 0, the driver should not use the
+ * 'roce_vnic_id' field.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS_ROCE_VNIC_ID_VALID UINT32_C(0x8000)
+ /*
* This value is current MAC address configured for this
* function. A value of 00-00-00-00-00-00 indicates no
* MAC address is currently configured.
@@ -14889,10 +15974,10 @@ typedef struct hwrm_func_qcfg_output {
#define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN UINT32_C(0xff)
#define HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_LAST HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_UNKNOWN
/*
- * This field will indicate number of physical functions on this port_partition.
- * HWRM shall return unavail (i.e. value of 0) for this field
- * when this command is used to query VF's configuration or
- * from older firmware that doesn't support this field.
+ * This field will indicate number of physical functions on this
+ * port_partition. HWRM shall return unavail (i.e. value of 0) for this
+ * field when this command is used to query VF's configuration or from
+ * older firmware that doesn't support this field.
*/
uint8_t port_pf_cnt;
/* number of PFs is not available */
@@ -15000,7 +16085,10 @@ typedef struct hwrm_func_qcfg_output {
#define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (UINT32_C(0x0) << 2)
/* Admin link state is in forced up mode. */
#define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (UINT32_C(0x1) << 2)
- /* Admin link state is in auto mode - follows the physical link state. */
+ /*
+ * Admin link state is in auto mode - follows the physical link
+ * state.
+ */
#define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_AUTO (UINT32_C(0x2) << 2)
#define HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_LAST HWRM_FUNC_QCFG_OUTPUT_OPTIONS_LINK_ADMIN_STATE_AUTO
/* Reserved for future. */
@@ -15042,7 +16130,7 @@ typedef struct hwrm_func_qcfg_output {
*/
uint16_t alloc_msix;
/*
- * The number of registered VF’s associated with the PF. This field
+ * The number of registered VF's associated with the PF. This field
* should be ignored when the request received on the VF interface.
* This field will be updated on the PF interface to initiate
* the unregister request on PF in the HOT Reset Process.
@@ -15050,14 +16138,22 @@ typedef struct hwrm_func_qcfg_output {
uint16_t registered_vfs;
/*
* The size of the doorbell BAR in KBytes reserved for L2 including
- * any area that is shared between L2 and RoCE. The L2 driver
- * should only map the L2 portion of the doorbell BAR. Any rounding
+ * any area that is shared between L2 and RoCE. The L2 driver
+ * should only map the L2 portion of the doorbell BAR. Any rounding
* of the BAR size to the native CPU page size should be performed
- * by the driver. If the value is zero, no special partitioning
+ * by the driver. If the value is zero, no special partitioning
* of the doorbell BAR between L2 and RoCE is required.
*/
uint16_t l2_doorbell_bar_size_kb;
- uint8_t unused_1;
+ /*
+ * A bitmask indicating the active endpoints. Each bit represents a
+ * specific endpoint, with bit 0 indicating EP 0 and bit 3 indicating
+ * EP 3. For example:
+ * - a single root system would return 0x1
+ * - a 2x8 system (where EPs 0 and 2 are active) would return 0x5
+ * - a 4x4 system (where EPs 0-3 are active) would return 0xF
+ */
+ uint8_t active_endpoints;
/*
* For backward compatibility this field must be set to 1.
* Older drivers might look for this field to be 1 before
@@ -15065,21 +16161,22 @@ typedef struct hwrm_func_qcfg_output {
*/
uint8_t always_1;
/*
- * This GRC address location is used by the Host driver interfaces to poll
- * the adapter ready state to re-initiate the registration process again
- * after receiving the RESET Notify event.
+ * This GRC address location is used by the Host driver interfaces to
+ * poll the adapter ready state to re-initiate the registration process
+ * again after receiving the RESET Notify event.
*/
uint32_t reset_addr_poll;
/*
- * This field specifies legacy L2 doorbell size in KBytes. Drivers should use
- * this value to find out the doorbell page offset from the BAR.
+ * This field specifies legacy L2 doorbell size in KBytes. Drivers
+ * should use this value to find out the doorbell page offset from the
+ * BAR.
*/
uint16_t legacy_l2_db_size_kb;
uint16_t svif_info;
/*
- * This field specifies the source virtual interface of the function being
- * queried. Drivers can use this to program svif field in the L2 context
- * table
+ * This field specifies the source virtual interface of the function
+ * being queried. Drivers can use this to program svif field in the
+ * L2 context table
*/
#define HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_MASK UINT32_C(0x7fff)
#define HWRM_FUNC_QCFG_OUTPUT_SVIF_INFO_SVIF_SFT 0
@@ -15141,7 +16238,11 @@ typedef struct hwrm_func_qcfg_output {
/* DB page size is 4MB. */
#define HWRM_FUNC_QCFG_OUTPUT_DB_PAGE_SIZE_4MB UINT32_C(0xa)
#define HWRM_FUNC_QCFG_OUTPUT_DB_PAGE_SIZE_LAST HWRM_FUNC_QCFG_OUTPUT_DB_PAGE_SIZE_4MB
- uint8_t unused_2[2];
+ /*
+ * RoCE VNIC ID for the function. If the function does not have a valid
+ * RoCE vnic id, then the roce_vnic_id_valid bit in flags is set to 0.
+ */
+ uint16_t roce_vnic_id;
/*
* Minimum guaranteed bandwidth for the network partition made up
* of the caller physical function and all its child virtual
@@ -15208,10 +16309,15 @@ typedef struct hwrm_func_qcfg_output {
* value is used if ring MTU is not specified.
*/
uint16_t host_mtu;
- uint8_t unused_3[2];
+ uint16_t flags2;
+ /*
+ * If set to 1, then VF drivers are requested to insert a DSCP
+ * value into all outgoing L2 packets such that DSCP=VF ID modulo 64
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_FLAGS2_SRIOV_DSCP_INSERT_ENABLED UINT32_C(0x1)
uint8_t unused_4[2];
/*
- * KDNet mode for the port for this function. If a VF, KDNet
+ * KDNet mode for the port for this function. If a VF, KDNet
* mode is always disabled.
*/
uint8_t port_kdnet_mode;
@@ -15226,22 +16332,92 @@ typedef struct hwrm_func_qcfg_output {
*/
uint8_t kdnet_pcie_function;
/*
- * Function ID of the KDNET function on this port. If the
+ * Function ID of the KDNET function on this port. If the
* KDNET partition does not exist and the FW supports this
* feature, 0xffff will be returned.
*/
uint16_t port_kdnet_fid;
uint8_t unused_5[2];
- /* Number of Tx Key Contexts allocated. */
- uint32_t alloc_tx_key_ctxs;
- /* Number of Rx Key Contexts allocated. */
- uint32_t alloc_rx_key_ctxs;
- uint8_t unused_6[7];
+ /* Number of KTLS Tx Key Contexts allocated. */
+ uint32_t num_ktls_tx_key_ctxs;
+ /* Number of KTLS Rx Key Contexts allocated. */
+ uint32_t num_ktls_rx_key_ctxs;
+ /*
+ * The LAG idx of this function. The lag_id is per port and the
+ * valid lag_id is from 0 to 7, if there is no valid lag_id,
+ * 0xff will be returned.
+ * This HW lag id is used for Truflow programming only.
+ */
+ uint8_t lag_id;
+ /* Partition interface for this function. */
+ uint8_t parif;
+ /*
+ * The LAG ID of a hardware link aggregation group (LAG) whose
+ * member ports include the port of this function. The LAG was
+ * previously created using HWRM_FUNC_LAG_CREATE. If the port of this
+ * function is not a member of any LAG, the fw_lag_id will be 0xff.
+ */
+ uint8_t fw_lag_id;
+ uint8_t unused_6;
+ /* Number of QUIC Tx Key Contexts allocated. */
+ uint32_t num_quic_tx_key_ctxs;
+ /* Number of QUIC Rx Key Contexts allocated. */
+ uint32_t num_quic_rx_key_ctxs;
+ /*
+ * Number of AVs per VF. Only valid for PF. This field is ignored
+ * when the flag, l2_vf_resource_mgmt, is not set in RoCE
+ * initialize_fw.
+ */
+ uint32_t roce_max_av_per_vf;
+ /*
+ * Number of CQs per VF. Only valid for PF. This field is ignored when
+ * the flag, l2_vf_resource_mgmt, is not set in RoCE initialize_fw.
+ */
+ uint32_t roce_max_cq_per_vf;
+ /*
+ * Number of MR/MWs per VF. Only valid for PF. This field is ignored
+ * when the flag, l2_vf_resource_mgmt, is not set in RoCE
+ * initialize_fw.
+ */
+ uint32_t roce_max_mrw_per_vf;
+ /*
+ * Number of QPs per VF. Only valid for PF. This field is ignored when
+ * the flag, l2_vf_resource_mgmt, is not set in RoCE initialize_fw.
+ */
+ uint32_t roce_max_qp_per_vf;
+ /*
+ * Number of SRQs per VF. Only valid for PF. This field is ignored
+ * when the flag, l2_vf_resource_mgmt, is not set in RoCE
+ * initialize_fw.
+ */
+ uint32_t roce_max_srq_per_vf;
+ /*
+ * Number of GIDs per VF. Only valid for PF. This field is ignored
+ * when the flag, l2_vf_resource_mgmt, is not set in RoCE
+ * initialize_fw.
+ */
+ uint32_t roce_max_gid_per_vf;
+ /*
+ * Bitmap of context types that have XID partition enabled.
+ * Only valid for PF.
+ */
+ uint16_t xid_partition_cfg;
+ /*
+ * When this bit is '1', it indicates that driver enables XID
+ * partition on Tx crypto key contexts.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_XID_PARTITION_CFG_TX_CK UINT32_C(0x1)
+ /*
+ * When this bit is '1', it indicates that driver enables XID
+ * partition on Rx crypto key contexts.
+ */
+ #define HWRM_FUNC_QCFG_OUTPUT_XID_PARTITION_CFG_RX_CK UINT32_C(0x2)
+ uint8_t unused_7;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -15252,7 +16428,7 @@ typedef struct hwrm_func_qcfg_output {
*****************/
-/* hwrm_func_cfg_input (size:1024b/128B) */
+/* hwrm_func_cfg_input (size:1280b/160B) */
typedef struct hwrm_func_cfg_input {
/* The HWRM command request type. */
@@ -15345,9 +16521,10 @@ typedef struct hwrm_func_cfg_input {
#define HWRM_FUNC_CFG_INPUT_FLAGS_VIRT_MAC_PERSIST UINT32_C(0x800)
/*
* This bit only applies to the VF. If this bit is set, the statistic
- * context counters will not be cleared when the statistic context is freed
- * or a function reset is called on VF. This bit will be cleared when the PF
- * is unloaded or a function reset is called on the PF.
+ * context counters will not be cleared when the statistic context is
+ * freed or a function reset is called on VF. This bit will be
+ * cleared when the PF is unloaded or a function reset is called on
+ * the PF.
*/
#define HWRM_FUNC_CFG_INPUT_FLAGS_NO_AUTOCLEAR_STATISTIC UINT32_C(0x1000)
/*
@@ -15368,10 +16545,10 @@ typedef struct hwrm_func_cfg_input {
#define HWRM_FUNC_CFG_INPUT_FLAGS_RX_ASSETS_TEST UINT32_C(0x4000)
/*
* This bit requests that the firmware test to see if all the assets
- * requested in this command (i.e. number of CMPL rings) are available.
- * The firmware will return an error if the requested assets are
- * not available. The firmware will NOT reserve the assets if they
- * are available.
+ * requested in this command (i.e. number of CMPL rings) are
+ * available. The firmware will return an error if the requested
+ * assets are not available. The firmware will NOT reserve the assets
+ * if they are available.
*/
#define HWRM_FUNC_CFG_INPUT_FLAGS_CMPL_ASSETS_TEST UINT32_C(0x8000)
/*
@@ -15384,10 +16561,10 @@ typedef struct hwrm_func_cfg_input {
#define HWRM_FUNC_CFG_INPUT_FLAGS_RSSCOS_CTX_ASSETS_TEST UINT32_C(0x10000)
/*
* This bit requests that the firmware test to see if all the assets
- * requested in this command (i.e. number of ring groups) are available.
- * The firmware will return an error if the requested assets are
- * not available. The firmware will NOT reserve the assets if they
- * are available.
+ * requested in this command (i.e. number of ring groups) are
+ * available. The firmware will return an error if the requested
+ * assets are not available. The firmware will NOT reserve the assets
+ * if they are available.
*/
#define HWRM_FUNC_CFG_INPUT_FLAGS_RING_GRP_ASSETS_TEST UINT32_C(0x20000)
/*
@@ -15492,14 +16669,6 @@ typedef struct hwrm_func_cfg_input {
* on this request if the TX_METADATA is enabled for this function.
*/
#define HWRM_FUNC_CFG_INPUT_FLAGS_BD_METADATA_DISABLE UINT32_C(0x40000000)
- /*
- * If this bit is set to 1, the driver is requesting FW to see if
- * all the assets requested in this command (i.e. number of KTLS/
- * QUIC key contexts) are available. The firmware will return an
- * error if the requested assets are not available. The firmware
- * will NOT reserve the assets if they are available.
- */
- #define HWRM_FUNC_CFG_INPUT_FLAGS_KEY_CTX_ASSETS_TEST UINT32_C(0x80000000)
uint32_t enables;
/*
* This bit must be '1' for the admin_mtu field to be
@@ -15653,15 +16822,15 @@ typedef struct hwrm_func_cfg_input {
*/
#define HWRM_FUNC_CFG_INPUT_ENABLES_HOST_MTU UINT32_C(0x20000000)
/*
- * This bit must be '1' for the number of Tx Key Contexts
- * field to be configured.
+ * This bit must be '1' for the num_ktls_tx_key_ctxs field to be
+ * configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_TX_KEY_CTXS UINT32_C(0x40000000)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_KTLS_TX_KEY_CTXS UINT32_C(0x40000000)
/*
- * This bit must be '1' for the number of Rx Key Contexts
- * field to be configured.
+ * This bit must be '1' for the num_ktls_rx_key_ctxs field to be
+ * configured.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES_RX_KEY_CTXS UINT32_C(0x80000000)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES_KTLS_RX_KEY_CTXS UINT32_C(0x80000000)
/*
* This field can be used by the admin PF to configure
* mtu of foster PFs.
@@ -15853,7 +17022,7 @@ typedef struct hwrm_func_cfg_input {
* to configure the EVB mode, it sets the evb_mode_cfg_not_supported
* flag in HWRM_FUNC_QCAPS command response for the function.
* The HWRM takes into account the switching of EVB mode from one to
- * another and reconfigure hardware resources as reqiured. The
+ * another and reconfigure hardware resources as required. The
* switching from VEB to VEPA mode requires the disabling of the
* loopback traffic. Additionally, source knockouts are handled
* differently in VEB and VEPA modes.
@@ -15886,7 +17055,10 @@ typedef struct hwrm_func_cfg_input {
#define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_FORCED_DOWN (UINT32_C(0x0) << 2)
/* Admin state is forced up. */
#define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_FORCED_UP (UINT32_C(0x1) << 2)
- /* Admin state is in auto mode - is to follow the physical link state. */
+ /*
+ * Admin state is in auto mode - is to follow the physical link
+ * state.
+ */
#define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_AUTO (UINT32_C(0x2) << 2)
#define HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_LAST HWRM_FUNC_CFG_INPUT_OPTIONS_LINK_ADMIN_STATE_AUTO
/* Reserved for future. */
@@ -15903,61 +17075,61 @@ typedef struct hwrm_func_cfg_input {
/*
* When this bit is '1', the caller requests to enable a MPC
* channel with destination to the TX crypto engine block.
- * When this bit is ‘0’, this flag has no effect.
+ * When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TCE_ENABLE UINT32_C(0x1)
/*
* When this bit is '1', the caller requests to disable a MPC
* channel with destination to the TX crypto engine block.
- * When this bit is ‘0’, this flag has no effect.
+ * When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TCE_DISABLE UINT32_C(0x2)
/*
* When this bit is '1', the caller requests to enable a MPC
* channel with destination to the RX crypto engine block.
- * When this bit is ‘0’, this flag has no effect.
+ * When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RCE_ENABLE UINT32_C(0x4)
/*
* When this bit is '1', the caller requests to disable a MPC
* channel with destination to the RX crypto engine block.
- * When this bit is ‘0’, this flag has no effect.
+ * When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RCE_DISABLE UINT32_C(0x8)
/*
* When this bit is '1', the caller requests to enable a MPC
* channel with destination to the TX configurable flow processing
- * block. When this bit is ‘0’, this flag has no effect.
+ * block. When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TE_CFA_ENABLE UINT32_C(0x10)
/*
* When this bit is '1', the caller requests to disable a MPC
* channel with destination to the TX configurable flow processing
- * block. When this bit is ‘0’, this flag has no effect.
+ * block. When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_TE_CFA_DISABLE UINT32_C(0x20)
/*
* When this bit is '1', the caller requests to enable a MPC
* channel with destination to the RX configurable flow processing
- * block. When this bit is ‘0’, this flag has no effect.
+ * block. When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RE_CFA_ENABLE UINT32_C(0x40)
/*
* When this bit is '1', the caller requests to disable a MPC
* channel with destination to the RX configurable flow processing
- * block. When this bit is ‘0’, this flag has no effect.
+ * block. When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_RE_CFA_DISABLE UINT32_C(0x80)
/*
* When this bit is '1', the caller requests to enable a MPC
* channel with destination to the primate processor block.
- * When this bit is ‘0’, this flag has no effect.
+ * When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_PRIMATE_ENABLE UINT32_C(0x100)
/*
* When this bit is '1', the caller requests to disable a MPC
* channel with destination to the primate processor block.
- * When this bit is ‘0’, this flag has no effect.
+ * When this bit is '0', this flag has no effect.
*/
#define HWRM_FUNC_CFG_INPUT_MPC_CHNLS_PRIMATE_DISABLE UINT32_C(0x200)
/*
@@ -16045,7 +17217,23 @@ typedef struct hwrm_func_cfg_input {
* ring that is assigned to a function has a valid mtu.
*/
uint16_t host_mtu;
- uint8_t unused_0[4];
+ uint32_t flags2;
+ /*
+ * If this bit is set to 1, the driver is requesting the firmware
+ * to see if the assets (i.e., the number of KTLS key contexts)
+ * requested in this command are available. The firmware will return
+ * an error if the requested assets are not available. The firmware
+ * will NOT reserve the assets if they are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS2_KTLS_KEY_CTX_ASSETS_TEST UINT32_C(0x1)
+ /*
+ * If this bit is set to 1, the driver is requesting the firmware
+ * to see if the assets (i.e., the number of QUIC key contexts)
+ * requested in this command are available. The firmware will return
+ * an error if the requested assets are not available. The firmware
+ * will NOT reserve the assets if they are available.
+ */
+ #define HWRM_FUNC_CFG_INPUT_FLAGS2_QUIC_KEY_CTX_ASSETS_TEST UINT32_C(0x2)
uint32_t enables2;
/*
* This bit must be '1' for the kdnet_mode field to be
@@ -16057,10 +17245,55 @@ typedef struct hwrm_func_cfg_input {
* configured. Legacy controller core FW may silently ignore
* the db_page_size programming request through this command.
*/
- #define HWRM_FUNC_CFG_INPUT_ENABLES2_DB_PAGE_SIZE UINT32_C(0x2)
+ #define HWRM_FUNC_CFG_INPUT_ENABLES2_DB_PAGE_SIZE UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the num_quic_tx_key_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES2_QUIC_TX_KEY_CTXS UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the num_quic_rx_key_ctxs field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES2_QUIC_RX_KEY_CTXS UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the roce_max_av_per_vf field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES2_ROCE_MAX_AV_PER_VF UINT32_C(0x10)
+ /*
+ * This bit must be '1' for the roce_max_cq_per_vf field to be
+ * configured. Only valid for PF.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES2_ROCE_MAX_CQ_PER_VF UINT32_C(0x20)
+ /*
+ * This bit must be '1' for the roce_max_mrw_per_vf field to be
+ * configured. Only valid for PF.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES2_ROCE_MAX_MRW_PER_VF UINT32_C(0x40)
+ /*
+ * This bit must be '1' for the roce_max_qp_per_vf field to be
+ * configured.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES2_ROCE_MAX_QP_PER_VF UINT32_C(0x80)
+ /*
+ * This bit must be '1' for the roce_max_srq_per_vf field to be
+ * configured. Only valid for PF.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES2_ROCE_MAX_SRQ_PER_VF UINT32_C(0x100)
+ /*
+ * This bit must be '1' for the roce_max_gid_per_vf field to be
+ * configured. Only valid for PF.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES2_ROCE_MAX_GID_PER_VF UINT32_C(0x200)
/*
- * KDNet mode for the port for this function. If NPAR is
- * also configured on this port, it takes precedence. KDNet
+ * This bit must be '1' for the xid_partition_cfg field to be
+ * configured. Only valid for PF.
+ */
+ #define HWRM_FUNC_CFG_INPUT_ENABLES2_XID_PARTITION_CFG UINT32_C(0x400)
+ /*
+ * KDNet mode for the port for this function. If NPAR is
+ * also configured on this port, it takes precedence. KDNet
* mode is ignored for a VF.
*/
uint8_t port_kdnet_mode;
@@ -16103,11 +17336,42 @@ typedef struct hwrm_func_cfg_input {
#define HWRM_FUNC_CFG_INPUT_DB_PAGE_SIZE_4MB UINT32_C(0xa)
#define HWRM_FUNC_CFG_INPUT_DB_PAGE_SIZE_LAST HWRM_FUNC_CFG_INPUT_DB_PAGE_SIZE_4MB
uint8_t unused_1[2];
- /* Number of Tx Key Contexts requested. */
- uint32_t num_tx_key_ctxs;
- /* Number of Rx Key Contexts requested. */
- uint32_t num_rx_key_ctxs;
- uint8_t unused_2[4];
+ /* Number of KTLS Tx Key Contexts requested. */
+ uint32_t num_ktls_tx_key_ctxs;
+ /* Number of KTLS Rx Key Contexts requested. */
+ uint32_t num_ktls_rx_key_ctxs;
+ /* Number of QUIC Tx Key Contexts requested. */
+ uint32_t num_quic_tx_key_ctxs;
+ /* Number of QUIC Rx Key Contexts requested. */
+ uint32_t num_quic_rx_key_ctxs;
+ /* Number of AVs per VF. Only valid for PF. */
+ uint32_t roce_max_av_per_vf;
+ /* Number of CQs per VF. Only valid for PF. */
+ uint32_t roce_max_cq_per_vf;
+ /* Number of MR/MWs per VF. Only valid for PF. */
+ uint32_t roce_max_mrw_per_vf;
+ /* Number of QPs per VF. Only valid for PF. */
+ uint32_t roce_max_qp_per_vf;
+ /* Number of SRQs per VF. Only valid for PF. */
+ uint32_t roce_max_srq_per_vf;
+ /* Number of GIDs per VF. Only valid for PF. */
+ uint32_t roce_max_gid_per_vf;
+ /*
+ * Bitmap of context types that have XID partition enabled.
+ * Only valid for PF.
+ */
+ uint16_t xid_partition_cfg;
+ /*
+ * When this bit is '1', it indicates that driver enables XID
+ * partition on Tx crypto key contexts.
+ */
+ #define HWRM_FUNC_CFG_INPUT_XID_PARTITION_CFG_TX_CK UINT32_C(0x1)
+ /*
+ * When this bit is '1', it indicates that driver enables XID
+ * partition on Rx crypto key contexts.
+ */
+ #define HWRM_FUNC_CFG_INPUT_XID_PARTITION_CFG_RX_CK UINT32_C(0x2)
+ uint16_t unused_2;
} hwrm_func_cfg_input_t, *phwrm_func_cfg_input_t;
/* hwrm_func_cfg_output (size:128b/16B) */
@@ -16124,9 +17388,9 @@ typedef struct hwrm_func_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -16240,7 +17504,7 @@ typedef struct hwrm_func_qstats_output {
uint64_t tx_bcast_pkts;
/*
* Number of transmitted packets that were discarded due to
- * internal NIC resource problems. For transmit, this
+ * internal NIC resource problems. For transmit, this
* can only happen if TMP is configured to allow dropping
* in HOL blocking conditions, which is not a normal
* configuration.
@@ -16267,7 +17531,7 @@ typedef struct hwrm_func_qstats_output {
uint64_t rx_bcast_pkts;
/*
* Number of received packets that were discarded on the function
- * due to resource limitations. This can happen for 3 reasons.
+ * due to resource limitations. This can happen for 3 reasons.
* # The BD used for the packet has a bad format.
* # There were no BDs available in the ring for the packet.
* # There were no BDs available on-chip for the packet.
@@ -16298,7 +17562,7 @@ typedef struct hwrm_func_qstats_output {
* cleared. Firmware starts the sequence from zero. It increments
* the sequence number every time the statistics of the function
* are cleared, which can be triggered by a clear statistics request
- * or by freeing all statistics contexts of the function. If an user
+ * or by freeing all statistics contexts of the function. If a user
* is interested in knowing if the statistics have been cleared
* since the last query, it can keep track of this sequence number
* between queries.
@@ -16307,9 +17571,9 @@ typedef struct hwrm_func_qstats_output {
uint8_t unused_0[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -16448,9 +17712,9 @@ typedef struct hwrm_func_qstats_ext_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -16515,9 +17779,9 @@ typedef struct hwrm_func_clr_stats_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -16581,9 +17845,9 @@ typedef struct hwrm_func_vf_resc_free_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -16655,14 +17919,15 @@ typedef struct hwrm_func_drv_rgtr_input {
#define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_16BIT_VER_MODE UINT32_C(0x4)
/*
* When this bit is '1', the function is indicating support of
- * 64bit flow handle. The firmware that only supports 64bit flow
+ * 64bit flow handle. The firmware that only supports 64bit flow
* handle should check this bit before allowing processing of
- * HWRM_CFA_FLOW_XXX commands from the requesting function as firmware
- * with 64bit flow handle support can only be compatible with drivers
- * that support 64bit flow handle. The legacy drivers that don't support
- * 64bit flow handle won't be able to use HWRM_CFA_FLOW_XXX commands when
- * running with new firmware that only supports 64bit flow handle. The new
- * firmware support 64bit flow handle returns HWRM_ERR_CODE_CMD_NOT_SUPPORTED
+ * HWRM_CFA_FLOW_XXX commands from the requesting function as
+ * firmware with 64bit flow handle support can only be compatible
+ * with drivers that support 64bit flow handle. The legacy drivers
+ * that don't support 64bit flow handle won't be able to use
+ * HWRM_CFA_FLOW_XXX commands when running with new firmware that
+ * only supports 64bit flow handle. The new firmware support 64bit
+ * flow handle returns HWRM_ERR_CODE_CMD_NOT_SUPPORTED
* status to the legacy driver when encounters these commands.
*/
#define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FLOW_HANDLE_64BIT_MODE UINT32_C(0x8)
@@ -16690,11 +17955,12 @@ typedef struct hwrm_func_drv_rgtr_input {
#define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ERROR_RECOVERY_SUPPORT UINT32_C(0x20)
/*
* When this bit is 1, the function is indicating the support of the
- * Master capability. The Firmware will use this capability to select the
- * Master function. The master function will be used to initiate
- * designated functionality like error recovery etc… If none of the
- * registered PF’s or trusted VF’s indicate this support, then
- * firmware will select the 1st registered PF as Master capable instance.
+ * Master capability. The Firmware will use this capability to select
+ * the Master function. The master function will be used to initiate
+ * designated functionality like error recovery etc. If none of the
+ * registered PF's or trusted VF's indicate this support, then
+ * firmware will select the 1st registered PF as Master capable
+ * instance.
*/
#define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_MASTER_SUPPORT UINT32_C(0x40)
/*
@@ -16730,6 +17996,22 @@ typedef struct hwrm_func_drv_rgtr_input {
* corresponding queue configuration on the RX side
*/
#define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_ASYM_QUEUE_CFG_SUPPORT UINT32_C(0x400)
+ /*
+ * When this bit is 1, the function's driver is indicating to the
+ * firmware that the Ingress NIC flows will be programmed by the
+ * TruFlow application and the firmware flow manager should reject
+ * flow-create commands that programs ingress lookup flows for this
+ * function.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_TF_INGRESS_NIC_FLOW_MODE UINT32_C(0x800)
+ /*
+ * When this bit is 1, the function's driver is indicating to the
+ * firmware that the Egress NIC flows will be programmed by the
+ * TruFlow application and the firmware flow manager should reject
+ * flow-create commands that programs Egress lookup flows for this
+ * function.
+ */
+ #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_TF_EGRESS_NIC_FLOW_MODE UINT32_C(0x1000)
uint32_t enables;
/*
* This bit must be '1' for the os_type field to be
@@ -16756,7 +18038,10 @@ typedef struct hwrm_func_drv_rgtr_input {
* configured.
*/
#define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD UINT32_C(0x10)
- /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */
+ /*
+ * This value indicates the type of OS. The values are based on
+ * CIM_OperatingSystem.mof file as published by the DMTF.
+ */
uint16_t os_type;
/* Unknown */
#define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
@@ -16854,9 +18139,9 @@ typedef struct hwrm_func_drv_rgtr_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -16921,9 +18206,9 @@ typedef struct hwrm_func_drv_unrgtr_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17057,9 +18342,9 @@ typedef struct hwrm_func_buf_rgtr_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17129,9 +18414,9 @@ typedef struct hwrm_func_buf_unrgtr_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17181,7 +18466,17 @@ typedef struct hwrm_func_drv_qver_input {
* function.
*/
uint16_t fid;
- uint8_t unused_0[2];
+ /*
+ * This field is used to indicate the driver type.
+ * L2 or RoCE
+ */
+ uint8_t driver_type;
+ /* L2 driver version */
+ #define HWRM_FUNC_DRV_QVER_INPUT_DRIVER_TYPE_L2 UINT32_C(0x0)
+ /* RoCE driver version */
+ #define HWRM_FUNC_DRV_QVER_INPUT_DRIVER_TYPE_ROCE UINT32_C(0x1)
+ #define HWRM_FUNC_DRV_QVER_INPUT_DRIVER_TYPE_LAST HWRM_FUNC_DRV_QVER_INPUT_DRIVER_TYPE_ROCE
+ uint8_t unused_0;
} hwrm_func_drv_qver_input_t, *phwrm_func_drv_qver_input_t;
/* hwrm_func_drv_qver_output (size:256b/32B) */
@@ -17195,7 +18490,10 @@ typedef struct hwrm_func_drv_qver_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* This value indicates the type of OS. The values are based on CIM_OperatingSystem.mof file as published by the DMTF. */
+ /*
+ * This value indicates the type of OS. The values are based on
+ * CIM_OperatingSystem.mof file as published by the DMTF.
+ */
uint16_t os_type;
/* Unknown */
#define HWRM_FUNC_DRV_QVER_OUTPUT_OS_TYPE_UNKNOWN UINT32_C(0x0)
@@ -17238,9 +18536,9 @@ typedef struct hwrm_func_drv_qver_output {
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17291,7 +18589,7 @@ typedef struct hwrm_func_resource_qcaps_input {
uint8_t unused_0[6];
} hwrm_func_resource_qcaps_input_t, *phwrm_func_resource_qcaps_input_t;
-/* hwrm_func_resource_qcaps_output (size:576b/72B) */
+/* hwrm_func_resource_qcaps_output (size:704b/88B) */
typedef struct hwrm_func_resource_qcaps_output {
/* The specific error status for the command. */
@@ -17302,13 +18600,22 @@ typedef struct hwrm_func_resource_qcaps_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Maximum guaranteed number of VFs supported by PF. Not applicable for VFs. */
+ /*
+ * Maximum guaranteed number of VFs supported by PF. Not applicable for
+ * VFs.
+ */
uint16_t max_vfs;
- /* Maximum guaranteed number of MSI-X vectors supported by function */
+ /* Maximum guaranteed number of MSI-X vectors supported by function. */
uint16_t max_msix;
- /* Hint of strategy to be used by PF driver to reserve resources for its VF */
+ /*
+ * Hint of strategy to be used by PF driver to reserve resources for
+ * its VF.
+ */
uint16_t vf_reservation_strategy;
- /* The PF driver should evenly divide its remaining resources among all VFs. */
+ /*
+ * The PF driver should evenly divide its remaining resources among
+ * all VFs.
+ */
#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MAXIMAL UINT32_C(0x0)
/* The PF driver should only reserve minimal resources for each VF. */
#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL UINT32_C(0x1)
@@ -17318,7 +18625,7 @@ typedef struct hwrm_func_resource_qcaps_output {
*/
#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC UINT32_C(0x2)
#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_LAST HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_VF_RESERVATION_STRATEGY_MINIMAL_STATIC
- /* Minimum guaranteed number of RSS/COS contexts */
+ /* Minimum guaranteed number of RSS/COS contexts. */
uint16_t min_rsscos_ctx;
/* Maximum non-guaranteed number of RSS/COS contexts */
uint16_t max_rsscos_ctx;
@@ -17351,32 +18658,42 @@ typedef struct hwrm_func_resource_qcaps_output {
/* Maximum non-guaranteed number of ring groups */
uint16_t max_hw_ring_grps;
/*
- * Maximum number of inputs into the transmit scheduler for this function.
- * The number of TX rings assigned to the function cannot exceed this value.
+ * Maximum number of inputs into the transmit scheduler for this
+ * function. The number of TX rings assigned to the function cannot
+ * exceed this value.
*/
uint16_t max_tx_scheduler_inputs;
uint16_t flags;
/*
* When this bit is '1', it indicates that VF_RESOURCE_CFG supports
- * feature to reserve all minimum resources when minimum >= 1, otherwise
- * returns an error.
+ * feature to reserve all minimum resources when minimum >= 1,
+ * otherwise returns an error.
*/
#define HWRM_FUNC_RESOURCE_QCAPS_OUTPUT_FLAGS_MIN_GUARANTEED UINT32_C(0x1)
- uint8_t unused_0[2];
- /* Minimum guaranteed number of Tx Key Contexts */
- uint32_t min_tx_key_ctxs;
- /* Maximum non-guaranteed number of Tx Key Contexts */
- uint32_t max_tx_key_ctxs;
- /* Minimum guaranteed number of Rx Key Contexts */
- uint32_t min_rx_key_ctxs;
- /* Maximum non-guaranteed number of Rx Key Contexts */
- uint32_t max_rx_key_ctxs;
- uint8_t unused_1[3];
+ /* Minimum guaranteed number of MSI-X vectors supported by function */
+ uint16_t min_msix;
+ /* Minimum guaranteed number of KTLS Tx Key Contexts */
+ uint32_t min_ktls_tx_key_ctxs;
+ /* Maximum non-guaranteed number of KTLS Tx Key Contexts */
+ uint32_t max_ktls_tx_key_ctxs;
+ /* Minimum guaranteed number of KTLS Rx Key Contexts */
+ uint32_t min_ktls_rx_key_ctxs;
+ /* Maximum non-guaranteed number of KTLS Rx Key Contexts */
+ uint32_t max_ktls_rx_key_ctxs;
+ /* Minimum guaranteed number of QUIC Tx Key Contexts */
+ uint32_t min_quic_tx_key_ctxs;
+ /* Maximum non-guaranteed number of QUIC Tx Key Contexts */
+ uint32_t max_quic_tx_key_ctxs;
+ /* Minimum guaranteed number of QUIC Rx Key Contexts */
+ uint32_t min_quic_rx_key_ctxs;
+ /* Maximum non-guaranteed number of QUIC Rx Key Contexts */
+ uint32_t max_quic_rx_key_ctxs;
+ uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17387,7 +18704,7 @@ typedef struct hwrm_func_resource_qcaps_output {
*****************************/
-/* hwrm_func_vf_resource_cfg_input (size:576b/72B) */
+/* hwrm_func_vf_resource_cfg_input (size:704b/88B) */
typedef struct hwrm_func_vf_resource_cfg_input {
/* The HWRM command request type. */
@@ -17461,18 +18778,27 @@ typedef struct hwrm_func_vf_resource_cfg_input {
* error, keep all existing reservations before the call.
*/
#define HWRM_FUNC_VF_RESOURCE_CFG_INPUT_FLAGS_MIN_GUARANTEED UINT32_C(0x1)
- uint8_t unused_0[2];
- /* Minimum guaranteed number of Tx Key Contexts */
- uint32_t min_tx_key_ctxs;
- /* Maximum non-guaranteed number of Tx Key Contexts */
- uint32_t max_tx_key_ctxs;
- /* Minimum guaranteed number of Rx Key Contexts */
- uint32_t min_rx_key_ctxs;
- /* Maximum non-guaranteed number of Rx Key Contexts */
- uint32_t max_rx_key_ctxs;
+ /* Minimum guaranteed number of MSI-X vectors for the function */
+ uint16_t min_msix;
+ /* Minimum guaranteed number of KTLS Tx Key Contexts */
+ uint32_t min_ktls_tx_key_ctxs;
+ /* Maximum non-guaranteed number of KTLS Tx Key Contexts */
+ uint32_t max_ktls_tx_key_ctxs;
+ /* Minimum guaranteed number of KTLS Rx Key Contexts */
+ uint32_t min_ktls_rx_key_ctxs;
+ /* Maximum non-guaranteed number of KTLS Rx Key Contexts */
+ uint32_t max_ktls_rx_key_ctxs;
+ /* Minimum guaranteed number of QUIC Tx Key Contexts */
+ uint32_t min_quic_tx_key_ctxs;
+ /* Maximum non-guaranteed number of QUIC Tx Key Contexts */
+ uint32_t max_quic_tx_key_ctxs;
+ /* Minimum guaranteed number of QUIC Rx Key Contexts */
+ uint32_t min_quic_rx_key_ctxs;
+ /* Maximum non-guaranteed number of QUIC Rx Key Contexts */
+ uint32_t max_quic_rx_key_ctxs;
} hwrm_func_vf_resource_cfg_input_t, *phwrm_func_vf_resource_cfg_input_t;
-/* hwrm_func_vf_resource_cfg_output (size:320b/40B) */
+/* hwrm_func_vf_resource_cfg_output (size:384b/48B) */
typedef struct hwrm_func_vf_resource_cfg_output {
/* The specific error status for the command. */
@@ -17499,16 +18825,20 @@ typedef struct hwrm_func_vf_resource_cfg_output {
uint16_t reserved_stat_ctx;
/* Reserved number of ring groups */
uint16_t reserved_hw_ring_grps;
- /* Actual number of Tx Key Contexts reserved */
- uint32_t reserved_tx_key_ctxs;
- /* Actual number of Rx Key Contexts reserved */
- uint32_t reserved_rx_key_ctxs;
+ /* Actual number of KTLS Tx Key Contexts reserved */
+ uint32_t reserved_ktls_tx_key_ctxs;
+ /* Actual number of KTLS Rx Key Contexts reserved */
+ uint32_t reserved_ktls_rx_key_ctxs;
+ /* Actual number of QUIC Tx Key Contexts reserved */
+ uint32_t reserved_quic_tx_key_ctxs;
+ /* Actual number of QUIC Rx Key Contexts reserved */
+ uint32_t reserved_quic_rx_key_ctxs;
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17592,11 +18922,17 @@ typedef struct hwrm_func_backing_store_qcaps_output {
uint16_t cq_entry_size;
/* Maximum number of VNIC context entries supported for this function. */
uint16_t vnic_max_vnic_entries;
- /* Maximum number of Ring table context entries supported for this function. */
+ /*
+ * Maximum number of Ring table context entries supported for this
+ * function.
+ */
uint16_t vnic_max_ring_table_entries;
/* Number of bytes that must be allocated for each context entry. */
uint16_t vnic_entry_size;
- /* Maximum number of statistic context entries supported for this function. */
+ /*
+ * Maximum number of statistic context entries supported for this
+ * function.
+ */
uint32_t stat_max_entries;
/* Number of bytes that must be allocated for each context entry. */
uint16_t stat_entry_size;
@@ -17618,7 +18954,8 @@ typedef struct hwrm_func_backing_store_qcaps_output {
* num_entries = num_vnics + num_l2_tx_rings + 2 * num_roce_qps + tqm_min_size
*
* Where:
- * num_vnics is the number of VNICs allocated in the VNIC backing store
+ * num_vnics is the number of VNICs allocated in the VNIC backing
+ * store
* num_l2_tx_rings is the number of L2 rings in the QP backing store
* num_roce_qps is the number of RoCE QPs in the QP backing store
* tqm_min_size is tqm_min_entries_per_ring reported by
@@ -17783,13 +19120,18 @@ typedef struct hwrm_func_backing_store_qcaps_output {
* function.
*/
uint32_t rkc_max_entries;
+ /*
+ * Additional number of RoCE QP context entries required for this
+ * function to support fast QP destroy feature.
+ */
+ uint16_t fast_qpmd_qp_num_entries;
/* Reserved for future. */
- uint8_t rsvd1[7];
+ uint8_t rsvd1[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -17905,12 +19247,12 @@ typedef struct hwrm_func_backing_store_cfg_input {
* This bit must be '1' for the vnic fields to be
* configured.
*/
- #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC UINT32_C(0x8)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC UINT32_C(0x8)
/*
* This bit must be '1' for the stat fields to be
* configured.
*/
- #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT UINT32_C(0x10)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT UINT32_C(0x10)
/*
* This bit must be '1' for the tqm_sp fields to be
* configured.
@@ -17960,7 +19302,7 @@ typedef struct hwrm_func_backing_store_cfg_input {
* This bit must be '1' for the mrav fields to be
* configured.
*/
- #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV UINT32_C(0x4000)
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV UINT32_C(0x4000)
/*
* This bit must be '1' for the tim fields to be
* configured.
@@ -17991,6 +19333,11 @@ typedef struct hwrm_func_backing_store_cfg_input {
* fields to be configured.
*/
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_RKC UINT32_C(0x100000)
+ /*
+ * This bit must be '1' for the number of QPs reserved for fast
+ * qp modify destroy feature to be configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP_FAST_QPMD UINT32_C(0x200000)
/* QPC page size and level. */
uint8_t qpc_pg_size_qpc_lvl;
/* QPC PBL indirect levels. */
@@ -18000,7 +19347,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_QPC_LVL_LVL_2
/* QPC page size. */
@@ -18028,7 +19378,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_SRQ_LVL_LVL_2
/* SRQ page size. */
@@ -18056,7 +19409,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_CQ_LVL_LVL_2
/* CQ page size. */
@@ -18084,7 +19440,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_VNIC_LVL_LVL_2
/* VNIC page size. */
@@ -18112,7 +19471,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_STAT_LVL_LVL_2
/* Stat page size. */
@@ -18140,7 +19502,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_SP_LVL_LVL_2
/* TQM slow path page size. */
@@ -18168,7 +19533,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING0_LVL_LVL_2
/* TQM ring 0 page size. */
@@ -18196,7 +19564,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING1_LVL_LVL_2
/* TQM ring 1 page size. */
@@ -18224,7 +19595,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING2_LVL_LVL_2
/* TQM ring 2 page size. */
@@ -18252,7 +19626,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING3_LVL_LVL_2
/* TQM ring 3 page size. */
@@ -18280,7 +19657,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING4_LVL_LVL_2
/* TQM ring 4 page size. */
@@ -18308,7 +19688,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING5_LVL_LVL_2
/* TQM ring 5 page size. */
@@ -18336,7 +19719,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING6_LVL_LVL_2
/* TQM ring 6 page size. */
@@ -18364,7 +19750,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_TQM_RING7_LVL_LVL_2
/* TQM ring 7 page size. */
@@ -18392,7 +19781,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_MRAV_LVL_LVL_2
/* MR/AV page size. */
@@ -18420,7 +19812,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_TIM_LVL_LVL_2
/* Timer page size. */
@@ -18487,11 +19882,11 @@ typedef struct hwrm_func_backing_store_cfg_input {
* num_entries = num_vnics + num_l2_tx_rings + 2 * num_roce_qps + tqm_min_size
*
* Where:
- * num_vnics is the number of VNICs allocated in the VNIC backing store
- * num_l2_tx_rings is the number of L2 rings in the QP backing store
- * num_roce_qps is the number of RoCE QPs in the QP backing store
- * tqm_min_size is tqm_min_entries_per_ring reported by
- * HWRM_FUNC_BACKING_STORE_QCAPS
+ * num_vnics is the number of VNICs allocated in the VNIC backing
+ * store num_l2_tx_rings is the number of L2 rings in the QP backing
+ * store num_roce_qps is the number of RoCE QPs in the QP backing
+ * store tqm_min_size is tqm_min_entries_per_ring reported by
+ * HWRM_FUNC_BACKING_STORE_QCAPS
*
* Note that TQM ring sizes cannot be extended while the system is
* operational. If a PF driver needs to extend a TQM ring, it needs
@@ -18777,7 +20172,10 @@ typedef struct hwrm_func_backing_store_cfg_input {
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TKC_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TKC_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TKC_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_TKC_LVL_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_TKC_LVL_LVL_2
/* Tx KTLS context page size. */
@@ -18827,8 +20225,11 @@ typedef struct hwrm_func_backing_store_cfg_input {
/* 1GB. */
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_RKC_PG_SIZE_PG_1G (UINT32_C(0x5) << 4)
#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_RKC_PG_SIZE_LAST HWRM_FUNC_BACKING_STORE_CFG_INPUT_RKC_PG_SIZE_PG_1G
- /* Reserved for future. */
- uint8_t rsvd[2];
+ /*
+ * Number of RoCE QP context entries reserved for this
+ * function to support fast QP modify destroy feature.
+ */
+ uint16_t qp_num_fast_qpmd_entries;
} hwrm_func_backing_store_cfg_input_t, *phwrm_func_backing_store_cfg_input_t;
/* hwrm_func_backing_store_cfg_output (size:128b/16B) */
@@ -18845,9 +20246,9 @@ typedef struct hwrm_func_backing_store_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -18935,12 +20336,12 @@ typedef struct hwrm_func_backing_store_qcfg_output {
* This bit must be '1' for the vnic fields to be
* configured.
*/
- #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_ENABLES_VNIC UINT32_C(0x8)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_ENABLES_VNIC UINT32_C(0x8)
/*
* This bit must be '1' for the stat fields to be
* configured.
*/
- #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_ENABLES_STAT UINT32_C(0x10)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_ENABLES_STAT UINT32_C(0x10)
/*
* This bit must be '1' for the tqm_sp fields to be
* configured.
@@ -18990,7 +20391,7 @@ typedef struct hwrm_func_backing_store_qcfg_output {
* This bit must be '1' for the mrav fields to be
* configured.
*/
- #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_ENABLES_MRAV UINT32_C(0x4000)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_ENABLES_MRAV UINT32_C(0x4000)
/*
* This bit must be '1' for the tim fields to be
* configured.
@@ -19021,6 +20422,11 @@ typedef struct hwrm_func_backing_store_qcfg_output {
* fields to be configured.
*/
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_ENABLES_RKC UINT32_C(0x100000)
+ /*
+ * This bit must be '1' for the number of QPs reserved for fast
+ * qp modify destroy feature to be configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_ENABLES_QP_FAST_QPMD UINT32_C(0x200000)
/* QPC page size and level. */
uint8_t qpc_pg_size_qpc_lvl;
/* QPC PBL indirect levels. */
@@ -19030,7 +20436,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_QPC_LVL_LVL_2
/* QPC page size. */
@@ -19058,7 +20467,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_SRQ_LVL_LVL_2
/* SRQ page size. */
@@ -19086,7 +20498,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_CQ_LVL_LVL_2
/* CQ page size. */
@@ -19114,7 +20529,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_VNIC_LVL_LVL_2
/* VNIC page size. */
@@ -19142,7 +20560,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_STAT_LVL_LVL_2
/* Stat page size. */
@@ -19170,7 +20591,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_SP_LVL_LVL_2
/* TQM slow path page size. */
@@ -19198,7 +20622,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING0_LVL_LVL_2
/* TQM ring 0 page size. */
@@ -19226,7 +20653,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING1_LVL_LVL_2
/* TQM ring 1 page size. */
@@ -19254,7 +20684,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING2_LVL_LVL_2
/* TQM ring 2 page size. */
@@ -19282,7 +20715,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING3_LVL_LVL_2
/* TQM ring 3 page size. */
@@ -19310,7 +20746,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING4_LVL_LVL_2
/* TQM ring 4 page size. */
@@ -19338,7 +20777,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING5_LVL_LVL_2
/* TQM ring 5 page size. */
@@ -19366,7 +20808,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING6_LVL_LVL_2
/* TQM ring 6 page size. */
@@ -19394,7 +20839,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TQM_RING7_LVL_LVL_2
/* TQM ring 7 page size. */
@@ -19422,7 +20870,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_MRAV_LVL_LVL_2
/* MR/AV page size. */
@@ -19450,7 +20901,10 @@ typedef struct hwrm_func_backing_store_qcfg_output {
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2 UINT32_C(0x2)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_TIM_LVL_LVL_2
/* Timer page size. */
@@ -19734,10 +21188,15 @@ typedef struct hwrm_func_backing_store_qcfg_output {
/* 1GB. */
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_RKC_PG_SIZE_PG_1G (UINT32_C(0x5) << 4)
#define HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_RKC_PG_SIZE_LAST HWRM_FUNC_BACKING_STORE_QCFG_OUTPUT_RKC_PG_SIZE_PG_1G
- uint8_t unused_1[5];
+ /*
+ * Number of RoCE QP context entries required for this
+ * function to support fast QP modify destroy feature.
+ */
+ uint16_t qp_num_fast_qpmd_entries;
+ uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as 1
+ * is completely written to RAM. This field should be read as 1
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field
@@ -20101,7 +21560,7 @@ typedef struct hwrm_error_recovery_qcfg_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field
@@ -20164,9 +21623,9 @@ typedef struct hwrm_func_echo_response_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -20312,9 +21771,9 @@ typedef struct hwrm_func_ptp_pin_qcfg_output {
uint8_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -20502,9 +21961,9 @@ typedef struct hwrm_func_ptp_pin_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -20632,7 +22091,9 @@ typedef struct hwrm_func_ptp_cfg_input {
#define HWRM_FUNC_PTP_CFG_INPUT_PTP_FREQ_ADJ_DLL_PHASE_8K UINT32_C(0x2)
/* 10Mhz sync in frequency. */
#define HWRM_FUNC_PTP_CFG_INPUT_PTP_FREQ_ADJ_DLL_PHASE_10M UINT32_C(0x3)
- #define HWRM_FUNC_PTP_CFG_INPUT_PTP_FREQ_ADJ_DLL_PHASE_LAST HWRM_FUNC_PTP_CFG_INPUT_PTP_FREQ_ADJ_DLL_PHASE_10M
+ /* 25Mhz sync in frequency. */
+ #define HWRM_FUNC_PTP_CFG_INPUT_PTP_FREQ_ADJ_DLL_PHASE_25M UINT32_C(0x4)
+ #define HWRM_FUNC_PTP_CFG_INPUT_PTP_FREQ_ADJ_DLL_PHASE_LAST HWRM_FUNC_PTP_CFG_INPUT_PTP_FREQ_ADJ_DLL_PHASE_25M
uint8_t unused_0[3];
/*
* Period in nanoseconds (ns) for external signal
@@ -20684,9 +22145,9 @@ typedef struct hwrm_func_ptp_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -20771,9 +22232,9 @@ typedef struct hwrm_func_ptp_ts_query_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -20915,9 +22376,9 @@ typedef struct hwrm_func_ptp_ext_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -21012,20 +22473,229 @@ typedef struct hwrm_func_ptp_ext_qcfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} hwrm_func_ptp_ext_qcfg_output_t, *phwrm_func_ptp_ext_qcfg_output_t;
+/*************************************
+ * hwrm_func_timedtx_pacing_rate_add *
+ *************************************/
+
+
+/* hwrm_func_timedtx_pacing_rate_add_input (size:192b/24B) */
+
+typedef struct hwrm_func_timedtx_pacing_rate_add_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This field indicates TimedTx pacing rate in kbps.
+ * The driver needs to add the rate into the hardware rate table
+ * before requesting the pacing rate for a flow in TimedTX BD and
+ * this addition should be done for each function rather than for
+ * each flow/QP within the function.
+ */
+ uint32_t rate;
+ uint8_t unused_0[4];
+} hwrm_func_timedtx_pacing_rate_add_input_t, *phwrm_func_timedtx_pacing_rate_add_input_t;
+
+/* hwrm_func_timedtx_pacing_rate_add_output (size:128b/16B) */
+
+typedef struct hwrm_func_timedtx_pacing_rate_add_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This field indicates the logical rate ID that is assigned to the
+ * rate in the rate table. The driver should use this ID for future
+ * reference to this rate.
+ */
+ uint16_t rate_id;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_func_timedtx_pacing_rate_add_output_t, *phwrm_func_timedtx_pacing_rate_add_output_t;
+
+/****************************************
+ * hwrm_func_timedtx_pacing_rate_delete *
+ ****************************************/
+
+
+/* hwrm_func_timedtx_pacing_rate_delete_input (size:192b/24B) */
+
+typedef struct hwrm_func_timedtx_pacing_rate_delete_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * The logical rate ID that is returned in the TimedTX pacing rate
+ * add operation.
+ */
+ uint16_t rate_id;
+ uint8_t unused_0[6];
+} hwrm_func_timedtx_pacing_rate_delete_input_t, *phwrm_func_timedtx_pacing_rate_delete_input_t;
+
+/* hwrm_func_timedtx_pacing_rate_delete_output (size:128b/16B) */
+
+typedef struct hwrm_func_timedtx_pacing_rate_delete_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_func_timedtx_pacing_rate_delete_output_t, *phwrm_func_timedtx_pacing_rate_delete_output_t;
+
+/***************************************
+ * hwrm_func_timedtx_pacing_rate_query *
+ ***************************************/
+
+
+/* hwrm_func_timedtx_pacing_rate_query_input (size:192b/24B) */
+
+typedef struct hwrm_func_timedtx_pacing_rate_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t unused_0[8];
+} hwrm_func_timedtx_pacing_rate_query_input_t, *phwrm_func_timedtx_pacing_rate_query_input_t;
+
+/* hwrm_func_timedtx_pacing_rate_query_output (size:4224b/528B) */
+
+typedef struct hwrm_func_timedtx_pacing_rate_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This field indicates the rates that the function has added into
+ * the hardware rate table. This is an array of 128 entries. Starting
+ * with index 0, registered rates are populated in the initial entries
+ * of the array, remaining entries are filled up with 0.
+ */
+ uint32_t rates[128];
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_func_timedtx_pacing_rate_query_output_t, *phwrm_func_timedtx_pacing_rate_query_output_t;
+
/***************************
* hwrm_func_key_ctx_alloc *
***************************/
-/* hwrm_func_key_ctx_alloc_input (size:320b/40B) */
+/* hwrm_func_key_ctx_alloc_input (size:384b/48B) */
typedef struct hwrm_func_key_ctx_alloc_input {
/* The HWRM command request type. */
@@ -21058,9 +22728,26 @@ typedef struct hwrm_func_key_ctx_alloc_input {
uint64_t resp_addr;
/* Function ID. */
uint16_t fid;
- /* Number of Key Contexts to be allocated. */
+ /*
+ * Number of Key Contexts to be allocated.
+ * When running in the XID partition mode, if the call is made by
+ * a VF driver, this field specifies the number of XIDs requested
+ * by the VF driver. The XID partitions are managed by the PF
+ * driver in XID partition mode and the VF command will be
+ * redirected to the PF driver. The PF driver may reduce this
+ * number if it cannot allocate a big enough block of XID
+ * partitions to satisfy the request.
+ * This field must not exceed the maximum batch size specified in
+ * the max_key_ctxs_alloc field of the HWRM_FUNC_QCAPS response,
+ * must not be zero, and must be integer multiples of the
+ * partition size specified in the ctxs_per_partition field of
+ * the HWRM_FUNC_QCAPS response.
+ */
uint16_t num_key_ctxs;
- /* DMA buffer size in bytes. */
+ /*
+ * DMA buffer size in bytes. This field in invalid in the XID
+ * partition mode.
+ */
uint32_t dma_bufr_size_bytes;
/* Key Context type. */
uint8_t key_ctx_type;
@@ -21074,11 +22761,24 @@ typedef struct hwrm_func_key_ctx_alloc_input {
#define HWRM_FUNC_KEY_CTX_ALLOC_INPUT_KEY_CTX_TYPE_QUIC_RX UINT32_C(0x3)
#define HWRM_FUNC_KEY_CTX_ALLOC_INPUT_KEY_CTX_TYPE_LAST HWRM_FUNC_KEY_CTX_ALLOC_INPUT_KEY_CTX_TYPE_QUIC_RX
uint8_t unused_0[7];
- /* Host DMA address to send back KTLS context IDs. */
+ /*
+ * Host DMA address to send back KTLS context IDs. This field is
+ * invalid in the XID partition mode.
+ */
uint64_t host_dma_addr;
+ /*
+ * This field is only used by the PF driver that manages the XID
+ * partitions. This field specifies the starting XID of one or
+ * more contiguous XID partitions allocated by the PF driver.
+ * This field is not used by the VF driver.
+ * If the call is successful, this starting XID value will be
+ * returned in the partition_start_xid field of the response.
+ */
+ uint32_t partition_start_xid;
+ uint8_t unused_1[4];
} hwrm_func_key_ctx_alloc_input_t, *phwrm_func_key_ctx_alloc_input_t;
-/* hwrm_func_key_ctx_alloc_output (size:128b/16B) */
+/* hwrm_func_key_ctx_alloc_output (size:192b/24B) */
typedef struct hwrm_func_key_ctx_alloc_output {
/* The specific error status for the command. */
@@ -21089,7 +22789,7 @@ typedef struct hwrm_func_key_ctx_alloc_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Actual number of Key Contexts allocated. */
+ /* Number of Key Contexts that have been allocated. */
uint16_t num_key_ctxs_allocated;
/* Control flags. */
uint8_t flags;
@@ -21097,21 +22797,116 @@ typedef struct hwrm_func_key_ctx_alloc_output {
* When set, it indicates that all key contexts allocated by this
* command are contiguous. As a result, the driver has to read the
* start context ID from the first entry of the DMA data buffer
- * and figures out the end context ID by “start context ID +
- * num_key_ctxs_allocated - 1”.
+ * and figures out the end context ID by 'start context ID +
+ * num_key_ctxs_allocated - 1'. In XID partition mode,
+ * this bit should always be set.
*/
#define HWRM_FUNC_KEY_CTX_ALLOC_OUTPUT_FLAGS_KEY_CTXS_CONTIGUOUS UINT32_C(0x1)
- uint8_t unused_0[4];
+ uint8_t unused_0;
+ /*
+ * This field is only valid in the XID partition mode. It indicates
+ * the starting XID that has been allocated.
+ */
+ uint32_t partition_start_xid;
+ uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} hwrm_func_key_ctx_alloc_output_t, *phwrm_func_key_ctx_alloc_output_t;
+/**************************
+ * hwrm_func_key_ctx_free *
+ **************************/
+
+
+/* hwrm_func_key_ctx_free_input (size:256b/32B) */
+
+typedef struct hwrm_func_key_ctx_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Function ID. */
+ uint16_t fid;
+ /* Key Context type. */
+ uint8_t key_ctx_type;
+ /* KTLS Tx Key Context type. */
+ #define HWRM_FUNC_KEY_CTX_FREE_INPUT_KEY_CTX_TYPE_TX UINT32_C(0x0)
+ /* KTLS Rx Key Context type. */
+ #define HWRM_FUNC_KEY_CTX_FREE_INPUT_KEY_CTX_TYPE_RX UINT32_C(0x1)
+ /* QUIC Tx Key Context type. */
+ #define HWRM_FUNC_KEY_CTX_FREE_INPUT_KEY_CTX_TYPE_QUIC_TX UINT32_C(0x2)
+ /* QUIC Rx Key Context type. */
+ #define HWRM_FUNC_KEY_CTX_FREE_INPUT_KEY_CTX_TYPE_QUIC_RX UINT32_C(0x3)
+ #define HWRM_FUNC_KEY_CTX_FREE_INPUT_KEY_CTX_TYPE_LAST HWRM_FUNC_KEY_CTX_FREE_INPUT_KEY_CTX_TYPE_QUIC_RX
+ uint8_t unused_0;
+ /* Starting XID of the partition that needs to be freed. */
+ uint32_t partition_start_xid;
+ /*
+ * Number of entries to be freed.
+ * When running in the XID partition mode, this field is only
+ * used by the PF driver that manages the XID partitions.
+ * The PF driver specifies the number of XIDs to be freed and
+ * this number is always equal to the number of XIDs previously
+ * allocated successfully using HWRM_FUNC_KEY_CTX_ALLOC.
+ * This field is not used by the VF driver.
+ */
+ uint16_t num_entries;
+ uint8_t unused_1[6];
+} hwrm_func_key_ctx_free_input_t, *phwrm_func_key_ctx_free_input_t;
+
+/* hwrm_func_key_ctx_free_output (size:128b/16B) */
+
+typedef struct hwrm_func_key_ctx_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t rsvd0[7];
+ /*
+ * This field is used in Output records to indicate that the
+ * output is completely written to RAM. This field should be
+ * read as '1' to indicate that the output has been completely
+ * written. When writing a command completion or response to
+ * an internal processor, the order of writes has to be such
+ * that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_func_key_ctx_free_output_t, *phwrm_func_key_ctx_free_output_t;
+
/**********************************
* hwrm_func_backing_store_cfg_v2 *
**********************************/
@@ -21153,49 +22948,79 @@ typedef struct hwrm_func_backing_store_cfg_v2_input {
/* Queue pair. */
#define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_QP UINT32_C(0x0)
/* Shared receive queue. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SRQ UINT32_C(0x1)
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SRQ UINT32_C(0x1)
/* Completion queue. */
#define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CQ UINT32_C(0x2)
/* Virtual NIC. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_VNIC UINT32_C(0x3)
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_VNIC UINT32_C(0x3)
/* Statistic context. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_STAT UINT32_C(0x4)
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_STAT UINT32_C(0x4)
/* Slow-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SP_TQM_RING UINT32_C(0x5)
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SP_TQM_RING UINT32_C(0x5)
/* Fast-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_FP_TQM_RING UINT32_C(0x6)
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_FP_TQM_RING UINT32_C(0x6)
/* Memory Region and Memory Address Vector Context. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_MRAV UINT32_C(0xe)
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_MRAV UINT32_C(0xe)
/* TIM. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TIM UINT32_C(0xf)
- /* Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TKC UINT32_C(0x13)
- /* Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_RKC UINT32_C(0x14)
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TIM UINT32_C(0xf)
+ /* Tx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TX_CK UINT32_C(0x13)
+ /* Rx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_RX_CK UINT32_C(0x14)
/* Mid-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_MP_TQM_RING UINT32_C(0x15)
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_MP_TQM_RING UINT32_C(0x15)
/* SQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SQ_DB_SHADOW UINT32_C(0x16)
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SQ_DB_SHADOW UINT32_C(0x16)
/* RQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_RQ_DB_SHADOW UINT32_C(0x17)
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_RQ_DB_SHADOW UINT32_C(0x17)
/* SRQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SRQ_DB_SHADOW UINT32_C(0x18)
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SRQ_DB_SHADOW UINT32_C(0x18)
/* CQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CQ_DB_SHADOW UINT32_C(0x19)
- /* QUIC Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_QUIC_TKC UINT32_C(0x1a)
- /* QUIC Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_QUIC_RKC UINT32_C(0x1b)
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CQ_DB_SHADOW UINT32_C(0x19)
+ /* CFA table scope context. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TBL_SCOPE UINT32_C(0x1c)
+ /* XID partition context. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_XID_PARTITION UINT32_C(0x1d)
+ /* SRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SRT_TRACE UINT32_C(0x1e)
+ /* SRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_SRT2_TRACE UINT32_C(0x1f)
+ /* CRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CRT_TRACE UINT32_C(0x20)
+ /* CRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CRT2_TRACE UINT32_C(0x21)
+ /* RIGP0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_RIGP0_TRACE UINT32_C(0x22)
+ /* L2 HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_L2_HWRM_TRACE UINT32_C(0x23)
+ /* RoCE HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_ROCE_HWRM_TRACE UINT32_C(0x24)
+ /* TimedTx pacing TQM ring. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_TTX_PACING_TQM_RING UINT32_C(0x25)
+ /* Context Accelerator CPU 0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CA0_TRACE UINT32_C(0x26)
+ /* Context Accelerator CPU 1 trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CA1_TRACE UINT32_C(0x27)
+ /* Context Accelerator CPU 2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_CA2_TRACE UINT32_C(0x28)
+ /* RIGP1 trace. */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_RIGP1_TRACE UINT32_C(0x29)
/* Invalid type. */
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_INVALID UINT32_C(0xffff)
- #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_LAST HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_INVALID
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_INVALID UINT32_C(0xffff)
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_LAST HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_TYPE_INVALID
/*
* Instance of the backing store type. It is zero-based,
* which means "0" indicates the first instance. For backing
* stores with single instance only, leave this field to 0.
* 1. If the backing store type is MPC TQM ring, use the following
- * instance value to MPC client mapping:
+ * instance value to map to MPC clients:
* TCE (0), RCE (1), TE_CFA(2), RE_CFA (3), PRIMATE(4)
+ * 2. If the backing store type is TBL_SCOPE, use the following
+ * instance value to map to table scope regions:
+ * RE_CFA_LKUP (0), RE_CFA_ACT (1), TE_CFA_LKUP(2), TE_CFA_ACT (3)
+ * 3. If the backing store type is XID partition, use the following
+ * instance value to map to context types:
+ * TX_CK (0), RX_CK (1)
*/
uint16_t instance;
/* Control flags. */
@@ -21222,10 +23047,10 @@ typedef struct hwrm_func_backing_store_cfg_v2_input {
* The size specified in the command will be the new size to be
* configured. The operation is only valid when the specific backing
* store has been configured before. Otherwise, the firmware will
- * return an error. The driver needs to zero out the “entry_size”,
- * “flags”, “page_dir”, and “page_size_pbl_level” fields, and the
+ * return an error. The driver needs to zero out the 'entry_size',
+ * 'flags', 'page_dir', and 'page_size_pbl_level' fields, and the
* firmware will ignore these inputs. Further, the firmware expects
- * the “num_entries” and any valid split entries to be no less than
+ * the 'num_entries' and any valid split entries to be no less than
* the initial value that has been configured. If not, it will
* return an error code.
*/
@@ -21290,7 +23115,9 @@ typedef struct hwrm_func_backing_store_cfg_v2_input {
* | SRQ | srq_split_entries |
* | CQ | cq_split_entries |
* | VINC | vnic_split_entries |
- * | MRAV | marv_split_entries |
+ * | MRAV | mrav_split_entries |
+ * | TS | ts_split_entries |
+ * | CK | ck_split_entries |
*/
uint32_t split_entry_0;
/* Split entry #1. */
@@ -21299,6 +23126,20 @@ typedef struct hwrm_func_backing_store_cfg_v2_input {
uint32_t split_entry_2;
/* Split entry #3. */
uint32_t split_entry_3;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the next_bs_offset field to be
+ * configured.
+ */
+ #define HWRM_FUNC_BACKING_STORE_CFG_V2_INPUT_ENABLES_NEXT_BS_OFFSET UINT32_C(0x1)
+ /*
+ * This field specifies the next byte offset of the backing store
+ * for the firmware to use. The driver can use this field to
+ * direct the firmware to resume the logging-to-host from
+ * the host buffer where the firmware was lastly written
+ * before it restarts, e.g. due to an error recovery.
+ */
+ uint32_t next_bs_offset;
} hwrm_func_backing_store_cfg_v2_input_t, *phwrm_func_backing_store_cfg_v2_input_t;
/* hwrm_func_backing_store_cfg_v2_output (size:128b/16B) */
@@ -21315,7 +23156,7 @@ typedef struct hwrm_func_backing_store_cfg_v2_output {
uint8_t rsvd0[7];
/*
* This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
+ * output is completely written to RAM. This field should be
* read as '1' to indicate that the output has been completely
* written. When writing a command completion or response to
* an internal processor, the order of writes has to be such
@@ -21365,46 +23206,79 @@ typedef struct hwrm_func_backing_store_qcfg_v2_input {
/* Queue pair. */
#define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_QP UINT32_C(0x0)
/* Shared receive queue. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_SRQ UINT32_C(0x1)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_SRQ UINT32_C(0x1)
/* Completion queue. */
#define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_CQ UINT32_C(0x2)
/* Virtual NIC. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_VNIC UINT32_C(0x3)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_VNIC UINT32_C(0x3)
/* Statistic context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_STAT UINT32_C(0x4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_STAT UINT32_C(0x4)
/* Slow-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_SP_TQM_RING UINT32_C(0x5)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_SP_TQM_RING UINT32_C(0x5)
/* Fast-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_FP_TQM_RING UINT32_C(0x6)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_FP_TQM_RING UINT32_C(0x6)
/* Memory Region and Memory Address Vector Context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_MRAV UINT32_C(0xe)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_MRAV UINT32_C(0xe)
/* TIM. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_TIM UINT32_C(0xf)
- /* Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_TKC UINT32_C(0x13)
- /* Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_RKC UINT32_C(0x14)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_TIM UINT32_C(0xf)
+ /* Tx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_TX_CK UINT32_C(0x13)
+ /* Rx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_RX_CK UINT32_C(0x14)
/* Mid-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_MP_TQM_RING UINT32_C(0x15)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_MP_TQM_RING UINT32_C(0x15)
/* SQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_SQ_DB_SHADOW UINT32_C(0x16)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_SQ_DB_SHADOW UINT32_C(0x16)
/* RQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_RQ_DB_SHADOW UINT32_C(0x17)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_RQ_DB_SHADOW UINT32_C(0x17)
/* SRQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_SRQ_DB_SHADOW UINT32_C(0x18)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_SRQ_DB_SHADOW UINT32_C(0x18)
/* CQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_CQ_DB_SHADOW UINT32_C(0x19)
- /* QUIC Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_QUIC_TKC UINT32_C(0x1a)
- /* QUIC Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_QUIC_RKC UINT32_C(0x1b)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_CQ_DB_SHADOW UINT32_C(0x19)
+ /* CFA table scope context. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_TBL_SCOPE UINT32_C(0x1c)
+ /* VF XID partition in-use table. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_XID_PARTITION_TABLE UINT32_C(0x1d)
+ /* SRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_SRT_TRACE UINT32_C(0x1e)
+ /* SRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_SRT2_TRACE UINT32_C(0x1f)
+ /* CRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_CRT_TRACE UINT32_C(0x20)
+ /* CRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_CRT2_TRACE UINT32_C(0x21)
+ /* RIGP0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_RIGP0_TRACE UINT32_C(0x22)
+ /* L2 HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_L2_HWRM_TRACE UINT32_C(0x23)
+ /* RoCE HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_ROCE_HWRM_TRACE UINT32_C(0x24)
+ /* TimedTx pacing TQM ring. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_TTX_PACING_TQM_RING UINT32_C(0x25)
+ /* Context Accelerator CPU 0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_CA0_TRACE UINT32_C(0x26)
+ /* Context Accelerator CPU 1 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_CA1_TRACE UINT32_C(0x27)
+ /* Context Accelerator CPU 2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_CA2_TRACE UINT32_C(0x28)
+ /* RIGP1 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_RIGP1_TRACE UINT32_C(0x29)
/* Invalid type. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_INVALID UINT32_C(0xffff)
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_LAST HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_INVALID
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_INVALID UINT32_C(0xffff)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_LAST HWRM_FUNC_BACKING_STORE_QCFG_V2_INPUT_TYPE_INVALID
/*
* Instance of the backing store type. It is zero-based,
* which means "0" indicates the first instance. For backing
* stores with single instance only, leave this field to 0.
+ * 1. If the backing store type is MPC TQM ring, use the following
+ * instance value to map to MPC clients:
+ * TCE (0), RCE (1), TE_CFA(2), RE_CFA (3), PRIMATE(4)
+ * 2. If the backing store type is TBL_SCOPE, use the following
+ * instance value to map to table scope regions:
+ * RE_CFA_LKUP (0), RE_CFA_ACT (1), TE_CFA_LKUP(2), TE_CFA_ACT (3)
+ * 3. If the backing store type is XID partition, use the following
+ * instance value to map to context types:
+ * TX_CK (0), RX_CK (1)
*/
uint16_t instance;
uint8_t rsvd[4];
@@ -21424,40 +23298,73 @@ typedef struct hwrm_func_backing_store_qcfg_v2_output {
/* Type of backing store to be configured. */
uint16_t type;
/* Queue pair. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_QP UINT32_C(0x0)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_QP UINT32_C(0x0)
/* Shared receive queue. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_SRQ UINT32_C(0x1)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_SRQ UINT32_C(0x1)
/* Completion queue. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_CQ UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_CQ UINT32_C(0x2)
/* Virtual NIC. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_VNIC UINT32_C(0x3)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_VNIC UINT32_C(0x3)
/* Statistic context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_STAT UINT32_C(0x4)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_STAT UINT32_C(0x4)
/* Slow-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_SP_TQM_RING UINT32_C(0x5)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_SP_TQM_RING UINT32_C(0x5)
/* Fast-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_FP_TQM_RING UINT32_C(0x6)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_FP_TQM_RING UINT32_C(0x6)
/* Memory Region and Memory Address Vector Context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_MRAV UINT32_C(0xe)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_MRAV UINT32_C(0xe)
/* TIM. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_TIM UINT32_C(0xf)
- /* Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_TKC UINT32_C(0x13)
- /* Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_RKC UINT32_C(0x14)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_TIM UINT32_C(0xf)
+ /* Tx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_TX_CK UINT32_C(0x13)
+ /* Rx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_RX_CK UINT32_C(0x14)
/* Mid-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_MP_TQM_RING UINT32_C(0x15)
- /* QUIC Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_QUIC_TKC UINT32_C(0x1a)
- /* QUIC Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_QUIC_RKC UINT32_C(0x1b)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_MP_TQM_RING UINT32_C(0x15)
+ /* CFA table scope context. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_TBL_SCOPE UINT32_C(0x1c)
+ /* XID partition context. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_XID_PARTITION UINT32_C(0x1d)
+ /* SRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_SRT_TRACE UINT32_C(0x1e)
+ /* SRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_SRT2_TRACE UINT32_C(0x1f)
+ /* CRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_CRT_TRACE UINT32_C(0x20)
+ /* CRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_CRT2_TRACE UINT32_C(0x21)
+ /* RIGP0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_RIGP0_TRACE UINT32_C(0x22)
+ /* L2 HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_L2_HWRM_TRACE UINT32_C(0x23)
+ /* RoCE HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_ROCE_HWRM_TRACE UINT32_C(0x24)
+ /* TimedTx pacing TQM ring. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_TTX_PACING_TQM_RING UINT32_C(0x25)
+ /* Context Accelerator CPU 0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_CA0_TRACE UINT32_C(0x26)
+ /* Context Accelerator CPU 1 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_CA1_TRACE UINT32_C(0x27)
+ /* Context Accelerator CPU 2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_CA2_TRACE UINT32_C(0x28)
+ /* RIGP1 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_RIGP1_TRACE UINT32_C(0x29)
/* Invalid type. */
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_INVALID UINT32_C(0xffff)
- #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_LAST HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_INVALID
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_INVALID UINT32_C(0xffff)
+ #define HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_LAST HWRM_FUNC_BACKING_STORE_QCFG_V2_OUTPUT_TYPE_INVALID
/*
* Instance of the backing store type. It is zero-based,
* which means "0" indicates the first instance. For backing
* stores with single instance only, leave this field to 0.
+ * 1. If the backing store type is MPC TQM ring, use the following
+ * instance value to map to MPC clients:
+ * TCE (0), RCE (1), TE_CFA(2), RE_CFA (3), PRIMATE(4)
+ * 2. If the backing store type is TBL_SCOPE, use the following
+ * instance value to map to table scope regions:
+ * RE_CFA_LKUP (0), RE_CFA_ACT (1), TE_CFA_LKUP(2), TE_CFA_ACT (3)
+ * 3. If the backing store type is XID partition, use the following
+ * instance value to map to context types:
+ * TX_CK (0), RX_CK (1)
*/
uint16_t instance;
/* Control flags. */
@@ -21521,7 +23428,9 @@ typedef struct hwrm_func_backing_store_qcfg_v2_output {
* | SRQ | srq_split_entries |
* | CQ | cq_split_entries |
* | VINC | vnic_split_entries |
- * | MRAV | marv_split_entries |
+ * | MRAV | mrav_split_entries |
+ * | TS | ts_split_entries |
+ * | CK | ck_split_entries |
*/
uint32_t split_entry_0;
/* Split entry #1. */
@@ -21533,7 +23442,7 @@ typedef struct hwrm_func_backing_store_qcfg_v2_output {
uint8_t rsvd2[7];
/*
* This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
+ * output is completely written to RAM. This field should be
* read as '1' to indicate that the output has been completely
* written. When writing a command completion or response to
* an internal processor, the order of writes has to be such
@@ -21550,7 +23459,12 @@ typedef struct qpc_split_entries {
uint32_t qp_num_l2_entries;
/* Number of QP1 entries. */
uint32_t qp_num_qp1_entries;
- uint64_t rsvd;
+ /*
+ * Number of RoCE QP context entries required for this
+ * function to support fast QP modify destroy feature.
+ */
+ uint32_t qp_num_fast_qpmd_entries;
+ uint32_t rsvd;
} qpc_split_entries_t, *pqpc_split_entries_t;
/* Common structure to cast SRQ split entries. This casting is required in the following HWRM command inputs/outputs if the backing store type is SRQ. 1. hwrm_func_backing_store_cfg_v2_input 2. hwrm_func_backing_store_qcfg_v2_output 3. hwrm_func_backing_store_qcaps_v2_output */
@@ -21593,6 +23507,37 @@ typedef struct mrav_split_entries {
uint64_t rsvd2;
} mrav_split_entries_t, *pmrav_split_entries_t;
+/* Common structure to cast TBL_SCOPE split entries. This casting is required in the following HWRM command inputs/outputs if the backing store type is TBL_SCOPE. 1. hwrm_func_backing_store_cfg_v2_input 2. hwrm_func_backing_store_qcfg_v2_output 3. hwrm_func_backing_store_qcaps_v2_output */
+/* ts_split_entries (size:128b/16B) */
+
+typedef struct ts_split_entries {
+ /* Max number of TBL_SCOPE region entries (QCAPS). */
+ uint32_t region_num_entries;
+ /* tsid to configure (CFG). */
+ uint8_t tsid;
+ /*
+ * Lkup static bucket count (power of 2).
+ * Array is indexed by enum cfa_dir
+ */
+ uint8_t lkup_static_bkt_cnt_exp[2];
+ uint8_t rsvd;
+ uint64_t rsvd2;
+} ts_split_entries_t, *pts_split_entries_t;
+
+/* Common structure to cast crypto key split entries. This casting is required in the following HWRM command inputs/outputs if the backing store type is TX_CK or RX_CK. 1. hwrm_func_backing_store_cfg_v2_input 2. hwrm_func_backing_store_qcfg_v2_output 3. hwrm_func_backing_store_qcaps_v2_output */
+/* ck_split_entries (size:128b/16B) */
+
+typedef struct ck_split_entries {
+ /*
+ * Number of QUIC backing store entries. That means the number of KTLS
+ * backing store entries is the difference between this number and the
+ * total number of crypto key entries.
+ */
+ uint32_t num_quic_entries;
+ uint32_t rsvd;
+ uint64_t rsvd2;
+} ck_split_entries_t, *pck_split_entries_t;
+
/************************************
* hwrm_func_backing_store_qcaps_v2 *
************************************/
@@ -21634,42 +23579,66 @@ typedef struct hwrm_func_backing_store_qcaps_v2_input {
/* Queue pair. */
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_QP UINT32_C(0x0)
/* Shared receive queue. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_SRQ UINT32_C(0x1)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_SRQ UINT32_C(0x1)
/* Completion queue. */
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_CQ UINT32_C(0x2)
/* Virtual NIC. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_VNIC UINT32_C(0x3)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_VNIC UINT32_C(0x3)
/* Statistic context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_STAT UINT32_C(0x4)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_STAT UINT32_C(0x4)
/* Slow-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_SP_TQM_RING UINT32_C(0x5)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_SP_TQM_RING UINT32_C(0x5)
/* Fast-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_FP_TQM_RING UINT32_C(0x6)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_FP_TQM_RING UINT32_C(0x6)
/* Memory Region and Memory Address Vector Context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_MRAV UINT32_C(0xe)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_MRAV UINT32_C(0xe)
/* TIM. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_TIM UINT32_C(0xf)
- /* Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_TKC UINT32_C(0x13)
- /* Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_RKC UINT32_C(0x14)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_TIM UINT32_C(0xf)
+ /* Tx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_TX_CK UINT32_C(0x13)
+ /* Rx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_RX_CK UINT32_C(0x14)
/* Mid-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_MP_TQM_RING UINT32_C(0x15)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_MP_TQM_RING UINT32_C(0x15)
/* SQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_SQ_DB_SHADOW UINT32_C(0x16)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_SQ_DB_SHADOW UINT32_C(0x16)
/* RQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_RQ_DB_SHADOW UINT32_C(0x17)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_RQ_DB_SHADOW UINT32_C(0x17)
/* SRQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_SRQ_DB_SHADOW UINT32_C(0x18)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_SRQ_DB_SHADOW UINT32_C(0x18)
/* CQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_CQ_DB_SHADOW UINT32_C(0x19)
- /* QUIC Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_QUIC_TKC UINT32_C(0x1a)
- /* QUIC Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_QUIC_RKC UINT32_C(0x1b)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_CQ_DB_SHADOW UINT32_C(0x19)
+ /* CFA table scope context. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_TBL_SCOPE UINT32_C(0x1c)
+ /* XID partition context. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_XID_PARTITION UINT32_C(0x1d)
+ /* SRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_SRT_TRACE UINT32_C(0x1e)
+ /* SRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_SRT2_TRACE UINT32_C(0x1f)
+ /* CRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_CRT_TRACE UINT32_C(0x20)
+ /* CRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_CRT2_TRACE UINT32_C(0x21)
+ /* RIGP0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_RIGP0_TRACE UINT32_C(0x22)
+ /* L2 HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_L2_HWRM_TRACE UINT32_C(0x23)
+ /* RoCE HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_ROCE_HWRM_TRACE UINT32_C(0x24)
+ /* TimedTx pacing TQM ring. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_TTX_PACING_TQM_RING UINT32_C(0x25)
+ /* Context Accelerator CPU 0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_CA0_TRACE UINT32_C(0x26)
+ /* Context Accelerator CPU 1 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_CA1_TRACE UINT32_C(0x27)
+ /* Context Accelerator CPU 2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_CA2_TRACE UINT32_C(0x28)
+ /* RIGP1 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_RIGP1_TRACE UINT32_C(0x29)
/* Invalid type. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_INVALID UINT32_C(0xffff)
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_LAST HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_INVALID
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_INVALID UINT32_C(0xffff)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_LAST HWRM_FUNC_BACKING_STORE_QCAPS_V2_INPUT_TYPE_INVALID
uint8_t rsvd[6];
} hwrm_func_backing_store_qcaps_v2_input_t, *phwrm_func_backing_store_qcaps_v2_input_t;
@@ -21689,53 +23658,77 @@ typedef struct hwrm_func_backing_store_qcaps_v2_output {
/* Queue pair. */
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_QP UINT32_C(0x0)
/* Shared receive queue. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SRQ UINT32_C(0x1)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SRQ UINT32_C(0x1)
/* Completion queue. */
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_CQ UINT32_C(0x2)
/* Virtual NIC. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_VNIC UINT32_C(0x3)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_VNIC UINT32_C(0x3)
/* Statistic context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_STAT UINT32_C(0x4)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_STAT UINT32_C(0x4)
/* Slow-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SP_TQM_RING UINT32_C(0x5)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SP_TQM_RING UINT32_C(0x5)
/* Fast-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_FP_TQM_RING UINT32_C(0x6)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_FP_TQM_RING UINT32_C(0x6)
/* Memory Region and Memory Address Vector Context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_MRAV UINT32_C(0xe)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_MRAV UINT32_C(0xe)
/* TIM. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_TIM UINT32_C(0xf)
- /* KTLS Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_TKC UINT32_C(0x13)
- /* KTLS Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_RKC UINT32_C(0x14)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_TIM UINT32_C(0xf)
+ /* Tx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_TX_CK UINT32_C(0x13)
+ /* Rx crypto key. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_RX_CK UINT32_C(0x14)
/* Mid-path TQM ring. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_MP_TQM_RING UINT32_C(0x15)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_MP_TQM_RING UINT32_C(0x15)
/* SQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SQ_DB_SHADOW UINT32_C(0x16)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SQ_DB_SHADOW UINT32_C(0x16)
/* RQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_RQ_DB_SHADOW UINT32_C(0x17)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_RQ_DB_SHADOW UINT32_C(0x17)
/* SRQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SRQ_DB_SHADOW UINT32_C(0x18)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SRQ_DB_SHADOW UINT32_C(0x18)
/* CQ Doorbell shadow region. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_CQ_DB_SHADOW UINT32_C(0x19)
- /* QUIC Tx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_QUIC_TKC UINT32_C(0x1a)
- /* QUIC Rx key context. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_QUIC_RKC UINT32_C(0x1b)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_CQ_DB_SHADOW UINT32_C(0x19)
+ /* CFA table scope context. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_TBL_SCOPE UINT32_C(0x1c)
+ /* XID partition context. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_XID_PARTITION UINT32_C(0x1d)
+ /* SRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SRT_TRACE UINT32_C(0x1e)
+ /* SRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_SRT2_TRACE UINT32_C(0x1f)
+ /* CRT trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_CRT_TRACE UINT32_C(0x20)
+ /* CRT2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_CRT2_TRACE UINT32_C(0x21)
+ /* RIGP0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_RIGP0_TRACE UINT32_C(0x22)
+ /* L2 HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_L2_HWRM_TRACE UINT32_C(0x23)
+ /* RoCE HWRM trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_ROCE_HWRM_TRACE UINT32_C(0x24)
+ /* TimedTx pacing TQM ring. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_TTX_PACING_TQM_RING UINT32_C(0x25)
+ /* Context Accelerator CPU 0 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_CA0_TRACE UINT32_C(0x26)
+ /* Context Accelerator CPU 1 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_CA1_TRACE UINT32_C(0x27)
+ /* Context Accelerator CPU 2 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_CA2_TRACE UINT32_C(0x28)
+ /* RIGP1 trace. */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_RIGP1_TRACE UINT32_C(0x29)
/* Invalid type. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_INVALID UINT32_C(0xffff)
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_LAST HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_INVALID
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_INVALID UINT32_C(0xffff)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_LAST HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_TYPE_INVALID
/* Number of bytes per backing store entry. */
uint16_t entry_size;
/* Control flags. */
uint32_t flags;
/*
* When set, it indicates the context type should be initialized
- * with the “ctx_init_value” at the specified offset.
+ * with the 'ctx_init_value' at the specified offset.
*/
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_ENABLE_CTX_KIND_INIT UINT32_C(0x1)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_ENABLE_CTX_KIND_INIT UINT32_C(0x1)
/* When set, it indicates the context type is valid. */
- #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_TYPE_VALID UINT32_C(0x2)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_TYPE_VALID UINT32_C(0x2)
/*
* When set, it indicates the region for this type is not a regular
* context memory but a driver managed memory that is created,
@@ -21743,11 +23736,39 @@ typedef struct hwrm_func_backing_store_qcaps_v2_output {
*/
#define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_DRIVER_MANAGED_MEMORY UINT32_C(0x4)
/*
+ * When set, it indicates the support of the following capability
+ * that is specific to the QP type:
+ * - For 2-port adapters, the ability to extend the RoCE QP
+ * entries configured on a PF, during some network events such as
+ * Link Down. These additional entries count is included in the
+ * advertised 'max_num_entries'.
+ * - The count of RoCE QP entries, derived from 'max_num_entries'
+ * (max_num_entries - qp_num_qp1_entries - qp_num_l2_entries -
+ * qp_num_fast_qpmd_entries, note qp_num_fast_qpmd_entries is
+ * always zero when QPs are pseudo-statically allocated), includes
+ * the count of QPs that can be migrated from the other PF (e.g.,
+ * during network link down). Therefore, during normal operation
+ * when both PFs are active, the supported number of RoCE QPs for
+ * each of the PF is half of the advertised value.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_ROCE_QP_PSEUDO_STATIC_ALLOC UINT32_C(0x8)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_FW_DBG_TRACE UINT32_C(0x10)
+
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_FW_BIN_DBG_TRACE UINT32_C(0x20)
+
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_FLAGS_NEXT_BS_OFFSET UINT32_C(0x40)
+ /*
* Bit map of the valid instances associated with the
* backing store type.
* 1. If the backing store type is MPC TQM ring, use the following
- * bit to MPC client mapping:
+ * bits to map to MPC clients:
* TCE (0), RCE (1), TE_CFA(2), RE_CFA (3), PRIMATE(4)
+ * 2. If the backing store type is TBL_SCOPE, use the following
+ * bits to map to table scope regions:
+ * RE_CFA_LKUP (0), RE_CFA_ACT (1), TE_CFA_LKUP(2), TE_CFA_ACT (3)
+ * 3. If the backing store type is VF XID partition in-use table, use
+ * the following bits to map to context types:
+ * TX_CK (0), RX_CK (1)
*/
uint32_t instance_bit_map;
/*
@@ -21794,7 +23815,37 @@ typedef struct hwrm_func_backing_store_qcaps_v2_output {
* | 4 | All four split entries have valid data. |
*/
uint8_t subtype_valid_cnt;
- uint8_t rsvd2;
+ /*
+ * Bitmap that indicates if each of the 'split_entry' denotes an
+ * exact count (i.e., min = max). When the exact count bit is set,
+ * it indicates the exact number of entries as advertised has to be
+ * configured. The 'split_entry' to be set to contain exact count by
+ * this bitmap needs to be a valid split entry specified by
+ * 'subtype_valid_cnt'.
+ */
+ uint8_t exact_cnt_bit_map;
+ /*
+ * When this bit is '1', it indicates 'split_entry_0' contains
+ * an exact count.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_0_EXACT UINT32_C(0x1)
+ /*
+ * When this bit is '1', it indicates 'split_entry_1' contains
+ * an exact count.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_1_EXACT UINT32_C(0x2)
+ /*
+ * When this bit is '1', it indicates 'split_entry_2' contains
+ * an exact count.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_2_EXACT UINT32_C(0x4)
+ /*
+ * When this bit is '1', it indicates 'split_entry_3' contains
+ * an exact count.
+ */
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_EXACT_CNT_BIT_MAP_SPLIT_ENTRY_3_EXACT UINT32_C(0x8)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_EXACT_CNT_BIT_MAP_UNUSED_MASK UINT32_C(0xf0)
+ #define HWRM_FUNC_BACKING_STORE_QCAPS_V2_OUTPUT_EXACT_CNT_BIT_MAP_UNUSED_SFT 4
/*
* Split entry #0. Note that the four split entries (as a group)
* must be cast to a type-specific data structure first before
@@ -21806,7 +23857,8 @@ typedef struct hwrm_func_backing_store_qcaps_v2_output {
* | SRQ | srq_split_entries |
* | CQ | cq_split_entries |
* | VINC | vnic_split_entries |
- * | MRAV | marv_split_entries |
+ * | MRAV | mrav_split_entries |
+ * | TS | ts_split_entries |
*/
uint32_t split_entry_0;
/* Split entry #1. */
@@ -21818,7 +23870,7 @@ typedef struct hwrm_func_backing_store_qcaps_v2_output {
uint8_t rsvd3[3];
/*
* This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
+ * output is completely written to RAM. This field should be
* read as '1' to indicate that the output has been completely
* written. When writing a command completion or response to
* an internal processor, the order of writes has to be such
@@ -21885,7 +23937,7 @@ typedef struct hwrm_func_dbr_pacing_cfg_input {
*/
#define HWRM_FUNC_DBR_PACING_CFG_INPUT_ENABLES_PACING_THRESHOLD_VALID UINT32_C(0x2)
/*
- * Specify primary function’s NQ ID to receive the doorbell pacing
+ * Specify primary function's NQ ID to receive the doorbell pacing
* threshold crossing events.
*/
uint32_t primary_nq_id;
@@ -21911,7 +23963,7 @@ typedef struct hwrm_func_dbr_pacing_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -22083,9 +24135,10 @@ typedef struct hwrm_func_dbr_pacing_qcfg_output {
*/
uint8_t dbr_throttling_aeq_arm_reg_val;
uint8_t unused_3[3];
- uint32_t dbr_stat_db_max_fifo_depth;
+ /* This field indicates the maximum depth of the doorbell FIFO. */
+ uint32_t dbr_stat_db_max_fifo_depth;
/*
- * Specifies primary function’s NQ ID.
+ * Specifies primary function's NQ ID.
* A value of 0xFFFF FFFF indicates NQ ID is invalid.
*/
uint32_t primary_nq_id;
@@ -22097,7 +24150,7 @@ typedef struct hwrm_func_dbr_pacing_qcfg_output {
uint8_t unused_4[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -22158,7 +24211,7 @@ typedef struct hwrm_func_dbr_pacing_broadcast_event_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -22253,9 +24306,9 @@ typedef struct hwrm_func_dbr_pacing_nqlist_query_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -22323,7 +24376,7 @@ typedef struct hwrm_func_dbr_recovery_completed_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -22428,9 +24481,9 @@ typedef struct hwrm_func_synce_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -22510,14 +24563,817 @@ typedef struct hwrm_func_synce_qcfg_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} hwrm_func_synce_qcfg_output_t, *phwrm_func_synce_qcfg_output_t;
+/************************
+ * hwrm_func_lag_create *
+ ************************/
+
+
+/* hwrm_func_lag_create_input (size:192b/24B) */
+
+typedef struct hwrm_func_lag_create_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t enables;
+ /*
+ * This bit must be '1' for the active_port_map field to be
+ * configured.
+ */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ENABLES_ACTIVE_PORT_MAP UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the member_port_map field to be
+ * configured.
+ */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ENABLES_MEMBER_PORT_MAP UINT32_C(0x2)
+ /* This bit must be '1' for the aggr_mode field to be configured. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ENABLES_AGGR_MODE UINT32_C(0x4)
+ /* rsvd1 is 5 b */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ENABLES_RSVD1_MASK UINT32_C(0xf8)
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ENABLES_RSVD1_SFT 3
+ /*
+ * This is the bitmap of all active ports in the LAG. Each bit
+ * represents a front panel port of the device. Ports are numbered
+ * from 0 to n - 1 on a device with n ports. The number of front panel
+ * ports is specified in the port_cnt field of the HWRM_PORT_PHY_QCAPS
+ * response. The active_port_map must always be a subset of the
+ * member_port_map. An active port is eligible to send and receive
+ * traffic.
+ *
+ * If the LAG mode is active-backup, only one port can be an active
+ * port at a given time. All other ports in the member_port_map that
+ * are not the active port are backup port. When the active port
+ * fails, another member port takes over to become the active port.
+ * The driver should use HWRM_FUNC_LAG_UPDATE to update
+ * the active_port_map by only setting the port bit of the new active
+ * port.
+ *
+ * In active-active, balance_xor or 802_3_ad mode, all member ports
+ * can be active ports. If the driver determines that an active
+ * port is down or unable to function, it should use
+ * HWRM_FUNC_LAG_UPDATE to update the active_port_map by clearing
+ * the port bit that has failed.
+ */
+ uint8_t active_port_map;
+ /* If this bit is set to '1', the port0 is a lag active port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ACTIVE_PORT_MAP_PORT_0 UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag active port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ACTIVE_PORT_MAP_PORT_1 UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag active port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ACTIVE_PORT_MAP_PORT_2 UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag active port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ACTIVE_PORT_MAP_PORT_3 UINT32_C(0x8)
+ /* rsvd3 is 4 b */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ACTIVE_PORT_MAP_RSVD3_MASK UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_CREATE_INPUT_ACTIVE_PORT_MAP_RSVD3_SFT 4
+ /*
+ * This is the bitmap of all member ports in the LAG. Each bit
+ * represents a front panel port of the device. Ports are numbered
+ * from 0 to n - 1 on a device with n ports. The number of front panel
+ * ports is specified in the port_cnt field of the HWRM_PORT_PHY_QCAPS
+ * response. There must be at least 2 ports in the member ports and
+ * each must not be a member of another LAG. Note that on a 4-port
+ * device, there can be either 2 ports or 4 ports in the member ports.
+ * Using 3 member ports is not supported.
+ */
+ uint8_t member_port_map;
+ /* If this bit is set to '1', the port0 is a lag member port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_MEMBER_PORT_MAP_PORT_0 UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag member port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_MEMBER_PORT_MAP_PORT_1 UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag member port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_MEMBER_PORT_MAP_PORT_2 UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag member port. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_MEMBER_PORT_MAP_PORT_3 UINT32_C(0x8)
+ /* rsvd4 is 4 b */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_MEMBER_PORT_MAP_RSVD4_MASK UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_CREATE_INPUT_MEMBER_PORT_MAP_RSVD4_SFT 4
+ /* Link aggregation mode being used. */
+ uint8_t link_aggr_mode;
+ /* active active mode. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_AGGR_MODE_ACTIVE_ACTIVE UINT32_C(0x1)
+ /* active backup mode. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_AGGR_MODE_ACTIVE_BACKUP UINT32_C(0x2)
+ /* Balance XOR mode. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_AGGR_MODE_BALANCE_XOR UINT32_C(0x3)
+ /* 802.3AD mode. */
+ #define HWRM_FUNC_LAG_CREATE_INPUT_AGGR_MODE_802_3_AD UINT32_C(0x4)
+ #define HWRM_FUNC_LAG_CREATE_INPUT_AGGR_MODE_LAST HWRM_FUNC_LAG_CREATE_INPUT_AGGR_MODE_802_3_AD
+ uint8_t unused_0[4];
+} hwrm_func_lag_create_input_t, *phwrm_func_lag_create_input_t;
+
+/* hwrm_func_lag_create_output (size:128b/16B) */
+
+typedef struct hwrm_func_lag_create_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * LAG ID of the created LAG. This LAG ID will also be returned
+ * in the HWRM_FUNC_QCFG response of all member ports.
+ */
+ uint8_t fw_lag_id;
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_func_lag_create_output_t, *phwrm_func_lag_create_output_t;
+
+/************************
+ * hwrm_func_lag_update *
+ ************************/
+
+
+/* hwrm_func_lag_update_input (size:192b/24B) */
+
+typedef struct hwrm_func_lag_update_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Link aggregation group ID of the LAG to be updated. */
+ uint8_t fw_lag_id;
+ uint8_t enables;
+ /*
+ * This bit must be '1' for the active_port_map field to be
+ * updated.
+ */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ENABLES_ACTIVE_PORT_MAP UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the member_port_map field to be
+ * updated.
+ */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ENABLES_MEMBER_PORT_MAP UINT32_C(0x2)
+ /* This bit must be '1' for the aggr_mode field to be updated. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ENABLES_AGGR_MODE UINT32_C(0x4)
+ /* rsvd1 is 5 b */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ENABLES_RSVD1_MASK UINT32_C(0xf8)
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ENABLES_RSVD1_SFT 3
+ /*
+ * This is the bitmap of all active ports in the LAG. Each bit
+ * represents a front panel port of the device. Ports are numbered
+ * from 0 to n - 1 on a device with n ports. The number of front panel
+ * ports is specified in the port_cnt field of the HWRM_PORT_PHY_QCAPS
+ * response. The active_port_map must always be a subset of the
+ * member_port_map. An active port is eligible to send and receive
+ * traffic.
+ *
+ * If the LAG mode is active-backup, only one port can be an active
+ * port at a given time. All other ports in the member_port_map that
+ * are not the active port are backup port. When the active port
+ * fails, another member port takes over to become the active port.
+ * The driver should use HWRM_FUNC_LAG_UPDATE to update
+ * the active_port_map by only setting the port bit of the new active
+ * port.
+ *
+ * In active-active, balance_xor or 802_3_ad mode, all member ports
+ * can be active ports. If the driver determines that an active
+ * port is down or unable to function, it should use
+ * HWRM_FUNC_LAG_UPDATE to update the active_port_map by clearing
+ * the port bit that has failed.
+ */
+ uint8_t active_port_map;
+ /* If this bit is set to '1', the port0 is a lag active port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ACTIVE_PORT_MAP_PORT_0 UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag active port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ACTIVE_PORT_MAP_PORT_1 UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag active port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ACTIVE_PORT_MAP_PORT_2 UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag active port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ACTIVE_PORT_MAP_PORT_3 UINT32_C(0x8)
+ /* rsvd3 is 4 b */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ACTIVE_PORT_MAP_RSVD3_MASK UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_ACTIVE_PORT_MAP_RSVD3_SFT 4
+ /*
+ * This is the bitmap of all member ports in the LAG. Each bit
+ * represents a front panel port of the device. Ports are numbered
+ * from 0 to n - 1 on a device with n ports. The number of front panel
+ * ports is specified in the port_cnt field of the HWRM_PORT_PHY_QCAPS
+ * response. There must be at least 2 ports in the member ports and
+ * each must not be a member of another LAG. Note that on a 4-port
+ * device, there can be either 2 ports or 4 ports in the member ports.
+ * Using 3 member ports is not supported.
+ */
+ uint8_t member_port_map;
+ /* If this bit is set to '1', the port0 is a lag member port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_MEMBER_PORT_MAP_PORT_0 UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag member port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_MEMBER_PORT_MAP_PORT_1 UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag member port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_MEMBER_PORT_MAP_PORT_2 UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag member port. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_MEMBER_PORT_MAP_PORT_3 UINT32_C(0x8)
+ /* rsvd4 is 4 b */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_MEMBER_PORT_MAP_RSVD4_MASK UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_MEMBER_PORT_MAP_RSVD4_SFT 4
+ /* Link aggregation mode being used. */
+ uint8_t link_aggr_mode;
+ /* active active mode. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_AGGR_MODE_ACTIVE_ACTIVE UINT32_C(0x1)
+ /* active backup mode. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_AGGR_MODE_ACTIVE_BACKUP UINT32_C(0x2)
+ /* Balance XOR mode. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_AGGR_MODE_BALANCE_XOR UINT32_C(0x3)
+ /* 802.3AD mode. */
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_AGGR_MODE_802_3_AD UINT32_C(0x4)
+ #define HWRM_FUNC_LAG_UPDATE_INPUT_AGGR_MODE_LAST HWRM_FUNC_LAG_UPDATE_INPUT_AGGR_MODE_802_3_AD
+ uint8_t unused_0[3];
+} hwrm_func_lag_update_input_t, *phwrm_func_lag_update_input_t;
+
+/* hwrm_func_lag_update_output (size:128b/16B) */
+
+typedef struct hwrm_func_lag_update_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_func_lag_update_output_t, *phwrm_func_lag_update_output_t;
+
+/**********************
+ * hwrm_func_lag_free *
+ **********************/
+
+
+/* hwrm_func_lag_free_input (size:192b/24B) */
+
+typedef struct hwrm_func_lag_free_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Link aggregation group ID of the LAG to be freed. */
+ uint8_t fw_lag_id;
+ uint8_t unused_0[7];
+} hwrm_func_lag_free_input_t, *phwrm_func_lag_free_input_t;
+
+/* hwrm_func_lag_free_output (size:128b/16B) */
+
+typedef struct hwrm_func_lag_free_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_func_lag_free_output_t, *phwrm_func_lag_free_output_t;
+
+/**********************
+ * hwrm_func_lag_qcfg *
+ **********************/
+
+
+/* hwrm_func_lag_qcfg_input (size:192b/24B) */
+
+typedef struct hwrm_func_lag_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Link aggregation group ID of the LAG to be queried. */
+ uint8_t fw_lag_id;
+ uint8_t unused_0[7];
+} hwrm_func_lag_qcfg_input_t, *phwrm_func_lag_qcfg_input_t;
+
+/* hwrm_func_lag_qcfg_output (size:128b/16B) */
+
+typedef struct hwrm_func_lag_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This is the bitmap of all active ports in the LAG. Each bit
+ * represents a front panel port of the device. Ports are numbered
+ * from 0 to n - 1 on a device with n ports. The number of front panel
+ * ports is specified in the port_cnt field of the HWRM_PORT_PHY_QCAPS
+ * response. The active_port_map must always be a subset of the
+ * member_port_map. An active port is eligible to send and receive
+ * traffic.
+ *
+ * If the LAG mode is active-backup, only one port can be an active
+ * port at a given time. All other ports in the member_port_map that
+ * are not the active port are backup port. When the active port
+ * fails, another member port takes over to become the active port.
+ * The driver should use HWRM_FUNC_LAG_UPDATE to update
+ * the active_port_map by only setting the port bit of the new active
+ * port.
+ *
+ * In active-active, balance_xor or 802_3_ad mode, all member ports
+ * can be active ports. If the driver determines that an active
+ * port is down or unable to function, it should use
+ * HWRM_FUNC_LAG_UPDATE to update the active_port_map by clearing
+ * the port bit that has failed.
+ */
+ uint8_t active_port_map;
+ /* If this bit is set to '1', the port0 is a lag active port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_0 UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag active port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_1 UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag active port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_2 UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag active port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_3 UINT32_C(0x8)
+ /* rsvd3 is 4 b */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_ACTIVE_PORT_MAP_RSVD3_MASK UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_ACTIVE_PORT_MAP_RSVD3_SFT 4
+ /*
+ * This is the bitmap of all member ports in the LAG. Each bit
+ * represents a front panel port of the device. Ports are numbered
+ * from 0 to n - 1 on a device with n ports. The number of front panel
+ * ports is specified in the port_cnt field of the HWRM_PORT_PHY_QCAPS
+ * response. There must be at least 2 ports in the member ports and
+ * each must not be a member of another LAG. Note that on a 4-port
+ * device, there can be either 2 ports or 4 ports in the member ports.
+ * Using 3 member ports is not supported.
+ */
+ uint8_t member_port_map;
+ /* If this bit is set to '1', the port0 is a lag member port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_0 UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag member port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_1 UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag member port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_2 UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag member port. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_3 UINT32_C(0x8)
+ /* rsvd4 is 4 b */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_MEMBER_PORT_MAP_RSVD4_MASK UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_MEMBER_PORT_MAP_RSVD4_SFT 4
+ /* Link aggregation mode being used. */
+ uint8_t link_aggr_mode;
+ /* active active mode. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_AGGR_MODE_ACTIVE_ACTIVE UINT32_C(0x1)
+ /* active backup mode. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_AGGR_MODE_ACTIVE_BACKUP UINT32_C(0x2)
+ /* Balance XOR mode. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_AGGR_MODE_BALANCE_XOR UINT32_C(0x3)
+ /* 802.3AD mode. */
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_AGGR_MODE_802_3_AD UINT32_C(0x4)
+ #define HWRM_FUNC_LAG_QCFG_OUTPUT_AGGR_MODE_LAST HWRM_FUNC_LAG_QCFG_OUTPUT_AGGR_MODE_802_3_AD
+ uint8_t unused_0[4];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_func_lag_qcfg_output_t, *phwrm_func_lag_qcfg_output_t;
+
+/**************************
+ * hwrm_func_lag_mode_cfg *
+ **************************/
+
+
+/* hwrm_func_lag_mode_cfg_input (size:192b/24B) */
+
+typedef struct hwrm_func_lag_mode_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t enables;
+ /*
+ * This bit must be '1' for the link aggregation enable or
+ * disable flags to be configured.
+ */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ENABLES_FLAGS UINT32_C(0x1)
+ /*
+ * This bit must be '1' for the active_port_map field to be
+ * configured.
+ */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ENABLES_ACTIVE_PORT_MAP UINT32_C(0x2)
+ /*
+ * This bit must be '1' for the member_port_map field to be
+ * configured.
+ */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ENABLES_MEMBER_PORT_MAP UINT32_C(0x4)
+ /* This bit must be '1' for the aggr_mode field to be configured. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ENABLES_AGGR_MODE UINT32_C(0x8)
+ /* This bit must be '1' for the lag id field to be configured. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ENABLES_LAG_ID UINT32_C(0x10)
+ /* rsvd1 is 3 b */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ENABLES_RSVD1_MASK UINT32_C(0xe0)
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ENABLES_RSVD1_SFT 5
+ uint8_t flags;
+ /*
+ * If this bit is set to 1, the driver is requesting FW to disable
+ * link aggregation feature during run time.
+ */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_FLAGS_AGGR_DISABLE UINT32_C(0x1)
+ /*
+ * If this bit is set to 1, the driver is requesting FW to enable
+ * link aggregation feature during run time.
+ */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_FLAGS_AGGR_ENABLE UINT32_C(0x2)
+ /* rsvd2 is 6 b */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_FLAGS_RSVD2_MASK UINT32_C(0xfc)
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_FLAGS_RSVD2_SFT 2
+ /*
+ * This is the bitmap of all active ports in the LAG. Each bit
+ * represents a front panel port of the device starting from port 0.
+ * The number of front panel ports is specified in the port_cnt field
+ * of the HWRM_PORT_PHY_QCAPS response.
+ * The term "active port" is one of member ports which is eligible to
+ * send or receive the traffic.
+ * In the active-backup mode, only one member port is active port at
+ * any given time. If the active port fails, another member port
+ * automatically takes over the active role to ensure continuous
+ * network connectivity.
+ * In the active-active, balance_xor or 802_3_ad mode, all member ports
+ * could be active port, if link status on one port is down, driver
+ * needs to send the NIC a new active-port bitmap with marking this
+ * port as not active port.
+ * The PORT_2 and PORT_3 are only valid if the NIC has four front
+ * panel ports.
+ */
+ uint8_t active_port_map;
+ /* If this bit is set to '1', the port0 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ACTIVE_PORT_MAP_PORT_0 UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ACTIVE_PORT_MAP_PORT_1 UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ACTIVE_PORT_MAP_PORT_2 UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ACTIVE_PORT_MAP_PORT_3 UINT32_C(0x8)
+ /* rsvd3 is 4 b */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ACTIVE_PORT_MAP_RSVD3_MASK UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_ACTIVE_PORT_MAP_RSVD3_SFT 4
+ /*
+ * This is the bitmap of all member ports in the LAG. Each bit
+ * represents a front panel port of the device starting from port 0.
+ * The number of front panel ports is specified in the port_cnt field
+ * of the HWRM_PORT_PHY_QCAPS response.
+ * The term "member port" refers to a front panel port that is added to
+ * the bond group as a slave device. These member ports are combined to
+ * create a logical bond interface.
+ * For a 4-port NIC, the LAG member port combination can consist of
+ * either two ports or four ports. However, it is important to note
+ * that the case with three ports in the same lag group is not
+ * supported.
+ * The PORT_2 and PORT_3 are only valid if the NIC has four front
+ * panel ports. There could be a case to use multiple LAG groups,
+ * for example, if the NIC has four front panel ports, the lag feature
+ * can use up to two LAG groups, with two ports assigned to each group.
+ */
+ uint8_t member_port_map;
+ /* If this bit is set to '1', the port0 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_MEMBER_PORT_MAP_PORT_0 UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_MEMBER_PORT_MAP_PORT_1 UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_MEMBER_PORT_MAP_PORT_2 UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_MEMBER_PORT_MAP_PORT_3 UINT32_C(0x8)
+ /* rsvd4 is 4 b */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_MEMBER_PORT_MAP_RSVD4_MASK UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_MEMBER_PORT_MAP_RSVD4_SFT 4
+ /* Link aggregation mode being used. */
+ uint8_t link_aggr_mode;
+ /* active active mode. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_AGGR_MODE_ACTIVE_ACTIVE UINT32_C(0x1)
+ /* active backup mode. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_AGGR_MODE_ACTIVE_BACKUP UINT32_C(0x2)
+ /* Balance XOR mode. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_AGGR_MODE_BALANCE_XOR UINT32_C(0x3)
+ /* 802.3AD mode. */
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_AGGR_MODE_802_3_AD UINT32_C(0x4)
+ #define HWRM_FUNC_LAG_MODE_CFG_INPUT_AGGR_MODE_LAST HWRM_FUNC_LAG_MODE_CFG_INPUT_AGGR_MODE_802_3_AD
+ /* Link aggregation group idx being used. */
+ uint8_t lag_id;
+ uint8_t unused_0[2];
+} hwrm_func_lag_mode_cfg_input_t, *phwrm_func_lag_mode_cfg_input_t;
+
+/* hwrm_func_lag_mode_cfg_output (size:128b/16B) */
+
+typedef struct hwrm_func_lag_mode_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Link aggregation group idx being used. */
+ uint8_t lag_id;
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_func_lag_mode_cfg_output_t, *phwrm_func_lag_mode_cfg_output_t;
+
+/***************************
+ * hwrm_func_lag_mode_qcfg *
+ ***************************/
+
+
+/* hwrm_func_lag_mode_qcfg_input (size:192b/24B) */
+
+typedef struct hwrm_func_lag_mode_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint8_t unused_0[8];
+} hwrm_func_lag_mode_qcfg_input_t, *phwrm_func_lag_mode_qcfg_input_t;
+
+/* hwrm_func_lag_mode_qcfg_output (size:128b/16B) */
+
+typedef struct hwrm_func_lag_mode_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t aggr_enabled;
+ /*
+ * This flag is used to query whether link aggregation is enabled
+ * or disabled during run time.
+ */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_AGGR_ENABLED UINT32_C(0x1)
+ /* rsvd1 is 7 b */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_RSVD1_MASK UINT32_C(0xfe)
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_RSVD1_SFT 1
+ /*
+ * This is the bitmap of all active ports in the LAG. Each bit
+ * represents a front panel port of the device starting from port 0.
+ * The number of front panel ports is specified in the port_cnt field
+ * of the HWRM_PORT_PHY_QCAPS response.
+ * The term "active port" is one of member ports which is eligible to
+ * send or receive the traffic.
+ * In the active-backup mode, only one member port is active port at
+ * any given time. If the active port fails, another member port
+ * automatically takes over the active role to ensure continuous
+ * network connectivity.
+ * In the active-active, balance_xor or 802_3_ad mode, all member ports
+ * could be active port, if link status on one port is down, driver
+ * needs to send the NIC a new active-port bitmap with marking this
+ * port as not active port.
+ * The PORT_2 and PORT_3 are only valid if the NIC has four front
+ * panel ports.
+ */
+ uint8_t active_port_map;
+ /* If this bit is set to '1', the port0 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_0 UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_1 UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_2 UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag active port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_ACTIVE_PORT_MAP_PORT_3 UINT32_C(0x8)
+ /* rsvd2 is 4 b */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_ACTIVE_PORT_MAP_RSVD2_MASK UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_ACTIVE_PORT_MAP_RSVD2_SFT 4
+ /*
+ * This is the bitmap of all member ports in the LAG. Each bit
+ * represents a front panel port of the device starting from port 0.
+ * The number of front panel ports is specified in the port_cnt field
+ * of the HWRM_PORT_PHY_QCAPS response.
+ * The term "member port" refers to a front panel port that is added to
+ * the bond group as a slave device. These member ports are combined to
+ * create a logical bond interface.
+ * For a 4-port NIC, the LAG member port combination can consist of
+ * either two ports or four ports. However, it is important to note
+ * that the case with three ports in the same lag group is not
+ * supported.
+ * The PORT_2 and PORT_3 are only valid if the NIC has four front
+ * panel ports. There could be a case to use multiple LAG groups,
+ * for example, if the NIC has four front panel ports, the lag feature
+ * can use up to two LAG groups, with two ports assigned to each group.
+ */
+ uint8_t member_port_map;
+ /* If this bit is set to '1', the port0 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_0 UINT32_C(0x1)
+ /* If this bit is set to '1', the port1 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_1 UINT32_C(0x2)
+ /* If this bit is set to '1', the port2 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_2 UINT32_C(0x4)
+ /* If this bit is set to '1', the port3 is a lag member port. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_MEMBER_PORT_MAP_PORT_3 UINT32_C(0x8)
+ /* rsvd3 is 4 b */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_MEMBER_PORT_MAP_RSVD3_MASK UINT32_C(0xf0)
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_MEMBER_PORT_MAP_RSVD3_SFT 4
+ /* Link aggregation mode being used. */
+ uint8_t link_aggr_mode;
+ /* active active mode. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_AGGR_MODE_ACTIVE_ACTIVE UINT32_C(0x1)
+ /* active backup mode. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_AGGR_MODE_ACTIVE_BACKUP UINT32_C(0x2)
+ /* Balance XOR mode. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_AGGR_MODE_BALANCE_XOR UINT32_C(0x3)
+ /* 802.3AD mode. */
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_AGGR_MODE_802_3_AD UINT32_C(0x4)
+ #define HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_AGGR_MODE_LAST HWRM_FUNC_LAG_MODE_QCFG_OUTPUT_AGGR_MODE_802_3_AD
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_func_lag_mode_qcfg_output_t, *phwrm_func_lag_mode_qcfg_output_t;
+
/***********************
* hwrm_func_vlan_qcfg *
***********************/
@@ -22582,8 +25438,8 @@ typedef struct hwrm_func_vlan_qcfg_output {
uint8_t stag_pcp;
uint8_t unused_1;
/*
- * S-TAG TPID value configured for the function. This field is specified in
- * network byte order.
+ * S-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
*/
uint16_t stag_tpid;
/* C-TAG VLAN identifier configured for the function. */
@@ -22592,8 +25448,8 @@ typedef struct hwrm_func_vlan_qcfg_output {
uint8_t ctag_pcp;
uint8_t unused_2;
/*
- * C-TAG TPID value configured for the function. This field is specified in
- * network byte order.
+ * C-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
*/
uint16_t ctag_tpid;
/* Future use. */
@@ -22603,9 +25459,9 @@ typedef struct hwrm_func_vlan_qcfg_output {
uint8_t unused_3[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -22692,8 +25548,8 @@ typedef struct hwrm_func_vlan_cfg_input {
uint8_t stag_pcp;
uint8_t unused_1;
/*
- * S-TAG TPID value configured for the function. This field is specified in
- * network byte order.
+ * S-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
*/
uint16_t stag_tpid;
/* C-TAG VLAN identifier configured for the function. */
@@ -22702,8 +25558,8 @@ typedef struct hwrm_func_vlan_cfg_input {
uint8_t ctag_pcp;
uint8_t unused_2;
/*
- * C-TAG TPID value configured for the function. This field is specified in
- * network byte order.
+ * C-TAG TPID value configured for the function. This field is
+ * specified in network byte order.
*/
uint16_t ctag_tpid;
/* Future use. */
@@ -22727,9 +25583,9 @@ typedef struct hwrm_func_vlan_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -22803,9 +25659,9 @@ typedef struct hwrm_func_vf_vnic_ids_query_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -22913,9 +25769,9 @@ typedef struct hwrm_func_vf_bw_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -22984,8 +25840,8 @@ typedef struct hwrm_func_vf_bw_qcfg_output {
uint16_t resp_len;
/*
* The number of VF functions that are being queried.
- * The inline response space allows the host to query up to 50 VFs' rate
- * scale percentage
+ * The inline response space allows the host to query up to 50 VFs'
+ * rate scale percentage.
*/
uint16_t num_vfs;
uint16_t unused[3];
@@ -23036,9 +25892,9 @@ typedef struct hwrm_func_vf_bw_qcfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -23083,11 +25939,11 @@ typedef struct hwrm_func_drv_if_change_input {
uint32_t flags;
/*
* When this bit is '1', the function driver is indicating
- * that the IF state is changing to UP state. The call should
+ * that the IF state is changing to UP state. The call should
* be made at the beginning of the driver's open call before
- * resources are allocated. After making the call, the driver
+ * resources are allocated. After making the call, the driver
* should check the response to see if any resources may have
- * changed (see the response below). If the driver fails
+ * changed (see the response below). If the driver fails
* the open call, the driver should make this call again with
* this bit cleared to indicate that the IF state is not UP.
* During the driver's close call when the IF state is changing
@@ -23112,22 +25968,32 @@ typedef struct hwrm_func_drv_if_change_output {
uint32_t flags;
/*
* When this bit is '1', it indicates that the resources reserved
- * for this function may have changed. The driver should check
+ * for this function may have changed. The driver should check
* resource capabilities and reserve resources again before
* allocating resources.
*/
#define HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_RESC_CHANGE UINT32_C(0x1)
/*
- * When this bit is '1', it indicates that the firmware got changed / reset.
- * The driver should do complete re-initialization when that bit is set.
+ * When this bit is '1', it indicates that the firmware got changed /
+ * reset. The driver should do complete re-initialization when that
+ * bit is set.
*/
#define HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_HOT_FW_RESET_DONE UINT32_C(0x2)
+ /*
+ * When this bit is '1', it indicates that capabilities
+ * for this function may have changed. The driver should
+ * query for changes to capabilities.
+ * The CAPS_CHANGE bit will only be set when it is safe for the
+ * driver to completely re-initialize all resources for the function
+ * including any children VFs.
+ */
+ #define HWRM_FUNC_DRV_IF_CHANGE_OUTPUT_FLAGS_CAPS_CHANGE UINT32_C(0x4)
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -23316,9 +26182,9 @@ typedef struct hwrm_func_host_pf_ids_query_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -23513,9 +26379,9 @@ typedef struct hwrm_func_spd_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -23673,9 +26539,9 @@ typedef struct hwrm_func_spd_qcfg_output {
uint8_t unused_2[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -23686,7 +26552,7 @@ typedef struct hwrm_func_spd_qcfg_output {
*********************/
-/* hwrm_port_phy_cfg_input (size:448b/56B) */
+/* hwrm_port_phy_cfg_input (size:512b/64B) */
typedef struct hwrm_port_phy_cfg_input {
/* The HWRM command request type. */
@@ -23733,7 +26599,7 @@ typedef struct hwrm_port_phy_cfg_input {
* settings specified in this command.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY UINT32_C(0x1)
- /* deprecated bit. Do not use!!! */
+ /* deprecated bit. Do not use!!! */
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_DEPRECATED UINT32_C(0x2)
/*
* When this bit is set to '1', and the force_pam4_link_speed
@@ -23793,33 +26659,33 @@ typedef struct hwrm_port_phy_cfg_input {
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI_DISABLE UINT32_C(0x80)
/*
- * When set to 1, then the HWRM shall enable FEC autonegotitation
- * on this port if supported. When enabled, at least one of the
+ * When set to 1, then the HWRM shall enable FEC autonegotiation
+ * on this port if supported. When enabled, at least one of the
* FEC modes must be advertised by enabling the fec_clause_74_enable,
* fec_clause_91_enable, fec_rs544_1xn_enable, fec_rs544_ieee_enable,
- * fec_rs272_1xn_enable, or fec_rs272_ieee_enable flag. If none
+ * fec_rs272_1xn_enable, or fec_rs272_ieee_enable flag. If none
* of the FEC mode is currently enabled, the HWRM shall choose
* a default advertisement setting.
* The default advertisement setting can be queried by calling
- * hwrm_port_phy_qcfg. Note that the link speed must be
+ * hwrm_port_phy_qcfg. Note that the link speed must be
* in autonegotiation mode for FEC autonegotiation to take effect.
* When set to 0, then this flag shall be ignored.
- * If FEC autonegotiation is not supported, then the HWRM shall ignore this
- * flag.
+ * If FEC autonegotiation is not supported, then the HWRM shall
+ * ignore this flag.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_ENABLE UINT32_C(0x100)
/*
* When set to 1, then the HWRM shall disable FEC autonegotiation
- * on this port and use forced FEC mode. In forced FEC mode, one
+ * on this port and use forced FEC mode. In forced FEC mode, one
* or more FEC forced settings under the same clause can be set.
* When set to 0, then this flag shall be ignored.
- * If FEC autonegotiation is not supported, then the HWRM shall ignore this
- * flag.
+ * If FEC autonegotiation is not supported, then the HWRM shall
+ * ignore this flag.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_AUTONEG_DISABLE UINT32_C(0x200)
/*
- * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire Code)
- * on this port if supported, by advertising FEC CLAUSE 74 if
+ * When set to 1, then the HWRM shall enable FEC CLAUSE 74 (Fire
+ * Code) on this port if supported, by advertising FEC CLAUSE 74 if
* FEC autonegotiation is enabled or force enabled otherwise.
* When set to 0, then this flag shall be ignored.
* If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this
@@ -23827,9 +26693,9 @@ typedef struct hwrm_port_phy_cfg_input {
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE74_ENABLE UINT32_C(0x400)
/*
- * When set to 1, then the HWRM shall disable FEC CLAUSE 74 (Fire Code)
- * on this port if supported, by not advertising FEC CLAUSE 74 if
- * FEC autonegotiation is enabled or force disabled otherwise.
+ * When set to 1, then the HWRM shall disable FEC CLAUSE 74 (Fire
+ * Code) on this port if supported, by not advertising FEC CLAUSE 74
+ * if FEC autonegotiation is enabled or force disabled otherwise.
* When set to 0, then this flag shall be ignored.
* If FEC CLAUSE 74 is not supported, then the HWRM shall ignore this
* flag.
@@ -23839,8 +26705,8 @@ typedef struct hwrm_port_phy_cfg_input {
* When set to 1, then the HWRM shall enable FEC CLAUSE 91
* (Reed Solomon RS(528,514) for NRZ) on this port if supported,
* by advertising FEC RS(528,514) if FEC autonegotiation is enabled
- * or force enabled otherwise. In forced FEC mode, this flag
- * will only take effect if the speed is NRZ. Additional
+ * or force enabled otherwise. In forced FEC mode, this flag
+ * will only take effect if the speed is NRZ. Additional
* RS544 or RS272 flags (also under clause 91) may be set for PAM4
* in forced FEC mode.
* When set to 0, then this flag shall be ignored.
@@ -23852,8 +26718,8 @@ typedef struct hwrm_port_phy_cfg_input {
* When set to 1, then the HWRM shall disable FEC CLAUSE 91
* (Reed Solomon RS(528,514) for NRZ) on this port if supported, by
* not advertising RS(528,514) if FEC autonegotiation is enabled or
- * force disabled otherwise. When set to 0, then this flag shall be
- * ignored. If FEC RS(528,514) is not supported, then the HWRM
+ * force disabled otherwise. When set to 0, then this flag shall be
+ * ignored. If FEC RS(528,514) is not supported, then the HWRM
* shall ignore this flag.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_CLAUSE91_DISABLE UINT32_C(0x2000)
@@ -23881,7 +26747,7 @@ typedef struct hwrm_port_phy_cfg_input {
* on this port if supported, by advertising FEC RS544_1XN if
* FEC autonegotiation is enabled or force enabled otherwise.
* In forced mode, this flag will only take effect if the speed is
- * PAM4. If this flag and fec_rs544_ieee_enable are set, the
+ * PAM4. If this flag and fec_rs544_ieee_enable are set, the
* HWRM shall choose one of the RS544 modes.
* When set to 0, then this flag shall be ignored.
* If FEC RS544_1XN is not supported, then the HWRM shall ignore this
@@ -23893,8 +26759,8 @@ typedef struct hwrm_port_phy_cfg_input {
* on this port if supported, by not advertising FEC RS544_1XN if
* FEC autonegotiation is enabled or force disabled otherwise.
* When set to 0, then this flag shall be ignored.
- * If FEC RS544_1XN is not supported, then the HWRM shall ignore this
- * flag.
+ * If FEC RS544_1XN is not supported, then the HWRM shall ignore
+ * this flag.
*/
#define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FEC_RS544_1XN_DISABLE UINT32_C(0x10000)
/*
@@ -23902,7 +26768,7 @@ typedef struct hwrm_port_phy_cfg_input {
* on this port if supported, by advertising FEC RS(544,514) if
* FEC autonegotiation is enabled or force enabled otherwise.
* In forced mode, this flag will only take effect if the speed is
- * PAM4. If this flag and fec_rs544_1xn_enable are set, the
+ * PAM4. If this flag and fec_rs544_1xn_enable are set, the
* HWRM shall choose one of the RS544 modes.
* When set to 0, then this flag shall be ignored.
* If FEC RS(544,514) is not supported, then the HWRM shall ignore
@@ -23923,8 +26789,8 @@ typedef struct hwrm_port_phy_cfg_input {
* on this port if supported, by advertising FEC RS272_1XN if
* FEC autonegotiation is enabled or force enabled otherwise.
* In forced mode, this flag will only take effect if the speed is
- * PAM4. If this flag and fec_rs272_ieee_enable are set, the
- * HWRM shall choose one of the RS272 modes. Note that RS272
+ * PAM4. If this flag and fec_rs272_ieee_enable are set, the
+ * HWRM shall choose one of the RS272 modes. Note that RS272
* and RS544 modes cannot be set at the same time in forced FEC mode.
* When set to 0, then this flag shall be ignored.
* If FEC RS272_1XN is not supported, then the HWRM shall ignore this
@@ -23945,8 +26811,8 @@ typedef struct hwrm_port_phy_cfg_input {
* on this port if supported, by advertising FEC RS(272,257) if
* FEC autonegotiation is enabled or force enabled otherwise.
* In forced mode, this flag will only take effect if the speed is
- * PAM4. If this flag and fec_rs272_1xn_enable are set, the
- * HWRM shall choose one of the RS272 modes. Note that RS272
+ * PAM4. If this flag and fec_rs272_1xn_enable are set, the
+ * HWRM shall choose one of the RS272 modes. Note that RS272
* and RS544 modes cannot be set at the same time in forced FEC mode.
* When set to 0, then this flag shall be ignored.
* If FEC RS(272,257) is not supported, then the HWRM shall ignore
@@ -24028,11 +26894,21 @@ typedef struct hwrm_port_phy_cfg_input {
* be configured.
*/
#define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAM4_LINK_SPEED_MASK UINT32_C(0x1000)
+ /*
+ * This bit must be '1' for the force_link_speeds2 field to be
+ * configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_LINK_SPEEDS2 UINT32_C(0x2000)
+ /*
+ * This bit must be '1' for the auto_link_speeds2_mask field to
+ * be configured.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEEDS2_MASK UINT32_C(0x4000)
/* Port ID of port that is to be configured. */
uint16_t port_id;
/*
* This is the speed that will be used if the force
- * bit is '1'. If unsupported speed is selected, an error
+ * bit is '1'. If unsupported speed is selected, an error
* will be generated.
*/
uint16_t force_link_speed;
@@ -24069,18 +26945,19 @@ typedef struct hwrm_port_phy_cfg_input {
/* Select all possible speeds for autoneg mode. */
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
/*
- * Select only the auto_link_speed speed for autoneg mode. This mode has
- * been DEPRECATED. An HWRM client should not use this mode.
+ * Select only the auto_link_speed speed for autoneg mode. This mode
+ * has been DEPRECATED. An HWRM client should not use this mode.
*/
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
/*
- * Select the auto_link_speed or any speed below that speed for autoneg.
- * This mode has been DEPRECATED. An HWRM client should not use this mode.
+ * Select the auto_link_speed or any speed below that speed for
+ * autoneg. This mode has been DEPRECATED. An HWRM client should not
+ * use this mode.
*/
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
/*
- * Select the speeds based on the corresponding link speed mask values
- * that are provided. The included speeds are specified in the
+ * Select the speeds based on the corresponding link speed mask
+ * values that are provided. The included speeds are specified in the
* auto_link_speed and auto_pam4_link_speed fields.
*/
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK UINT32_C(0x4)
@@ -24128,10 +27005,29 @@ typedef struct hwrm_port_phy_cfg_input {
* 1, auto_pause bits should be ignored and should be set to 0.
*/
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4)
- uint8_t unused_0;
+ /*
+ * This field is only used by management firmware to communicate with
+ * core firmware regarding phy_port_cfg.
+ * It mainly used to notify core firmware that management firmware is
+ * using port for NCSI over RMII communication or not.
+ */
+ uint8_t mgmt_flag;
+ /*
+ * Bit denoting if management firmware is using the link for
+ * NCSI over RMII communication.
+ * When set to 1, management firmware is no longer using the given
+ * port.
+ * When set to 0, management firmware is using the given port.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_MGMT_FLAG_LINK_RELEASE UINT32_C(0x1)
+ /*
+ * Validity bit, set to 1 to indicate other bits in mgmt_flags are
+ * valid.
+ */
+ #define HWRM_PORT_PHY_CFG_INPUT_MGMT_FLAG_MGMT_VALID UINT32_C(0x80)
/*
* This is the speed that will be used if the autoneg_mode
- * is "one_speed" or "one_or_below". If an unsupported speed
+ * is "one_speed" or "one_or_below". If an unsupported speed
* is selected, an error will be generated.
*/
uint16_t auto_link_speed;
@@ -24160,7 +27056,7 @@ typedef struct hwrm_port_phy_cfg_input {
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_LAST HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB
/*
* This is a mask of link speeds that will be used if
- * autoneg_mode is "mask". If unsupported speed is enabled
+ * autoneg_mode is "mask". If unsupported speed is enabled
* an error will be generated.
*/
uint16_t auto_link_speed_mask;
@@ -24201,7 +27097,7 @@ typedef struct hwrm_port_phy_cfg_input {
#define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_LAST HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON
/* This value controls the loopback setting for the PHY. */
uint8_t lpbk;
- /* No loopback is selected. Normal operation. */
+ /* No loopback is selected. Normal operation. */
#define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE UINT32_C(0x0)
/*
* The HW will be configured with local loopback such that
@@ -24217,7 +27113,8 @@ typedef struct hwrm_port_phy_cfg_input {
/*
* The HW will be configured with external loopback such that
* host data is sent on the transmitter and based on the external
- * loopback connection the data will be received without modification.
+ * loopback connection the data will be received without
+ * modification.
*/
#define HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL UINT32_C(0x3)
#define HWRM_PORT_PHY_CFG_INPUT_LPBK_LAST HWRM_PORT_PHY_CFG_INPUT_LPBK_EXTERNAL
@@ -24239,7 +27136,7 @@ typedef struct hwrm_port_phy_cfg_input {
uint8_t unused_1;
/*
* This value controls the pre-emphasis to be used for the
- * link. Driver should not set this value (use
+ * link. Driver should not set this value (use
* enable.preemphasis = 0) unless driver is sure of setting.
* Normally HWRM FW will determine proper pre-emphasis.
*/
@@ -24270,7 +27167,7 @@ typedef struct hwrm_port_phy_cfg_input {
#define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB UINT32_C(0x40)
/*
* This is the speed that will be used if the force and force_pam4
- * bits are '1'. If unsupported speed is selected, an error
+ * bits are '1'. If unsupported speed is selected, an error
* will be generated.
*/
uint16_t force_pam4_link_speed;
@@ -24294,7 +27191,76 @@ typedef struct hwrm_port_phy_cfg_input {
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_PAM4_SPEED_MASK_50G UINT32_C(0x1)
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_PAM4_SPEED_MASK_100G UINT32_C(0x2)
#define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_PAM4_SPEED_MASK_200G UINT32_C(0x4)
- uint8_t unused_2[2];
+ /*
+ * This is the speed that will be used if the force_link_speeds2
+ * bit is '1'. If unsupported speed is selected, an error
+ * will be generated.
+ */
+ uint16_t force_link_speeds2;
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_1GB UINT32_C(0xa)
+ /* 10Gb (NRZ: 10G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_10GB UINT32_C(0x64)
+ /* 25Gb (NRZ: 25G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_25GB UINT32_C(0xfa)
+ /* 40Gb (NRZ: 10G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_40GB UINT32_C(0x190)
+ /* 50Gb (NRZ: 25G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB UINT32_C(0x1f4)
+ /* 100Gb (NRZ: 25G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB UINT32_C(0x3e8)
+ /* 50Gb (PAM4-56: 50G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB_PAM4_56 UINT32_C(0x1f5)
+ /* 100Gb (PAM4-56: 50G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_56 UINT32_C(0x3e9)
+ /* 200Gb (PAM4-56: 50G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_56 UINT32_C(0x7d1)
+ /* 400Gb (PAM4-56: 50G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_56 UINT32_C(0xfa1)
+ /* 100Gb (PAM4-112: 100G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_112 UINT32_C(0x3ea)
+ /* 200Gb (PAM4-112: 100G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_112 UINT32_C(0x7d2)
+ /* 400Gb (PAM4-112: 100G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_112 UINT32_C(0xfa2)
+ /* 800Gb (PAM4-112: 100G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_800GB_PAM4_112 UINT32_C(0x1f42)
+ #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_LAST HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ /*
+ * This is a mask of link speeds that will be used if
+ * auto_link_speeds2_mask bit in the "enables" field is 1.
+ * If unsupported speed is enabled an error will be generated.
+ */
+ uint16_t auto_link_speeds2_mask;
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_1GB UINT32_C(0x1)
+ /* 10Gb (NRZ: 10G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_10GB UINT32_C(0x2)
+ /* 25Gb (NRZ: 25G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_25GB UINT32_C(0x4)
+ /* 40Gb (NRZ: 10G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_40GB UINT32_C(0x8)
+ /* 50Gb (NRZ: 25G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_50GB UINT32_C(0x10)
+ /* 100Gb (NRZ: 25G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_100GB UINT32_C(0x20)
+ /* 50Gb (PAM4-56: 50G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 UINT32_C(0x40)
+ /* 100Gb (PAM4-56: 50G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 UINT32_C(0x80)
+ /* 200Gb (PAM4-56: 50G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 UINT32_C(0x100)
+ /* 400Gb (PAM4-56: 50G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 UINT32_C(0x200)
+ /* 100Gb (PAM4-112: 100G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 UINT32_C(0x400)
+ /* 200Gb (PAM4-112: 100G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 UINT32_C(0x800)
+ /* 400Gb (PAM4-112: 100G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 UINT32_C(0x1000)
+ /* 800Gb (PAM4-112: 100G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 UINT32_C(0x2000)
+ uint8_t unused_2[6];
} hwrm_port_phy_cfg_input_t, *phwrm_port_phy_cfg_input_t;
/* hwrm_port_phy_cfg_output (size:128b/16B) */
@@ -24311,9 +27277,9 @@ typedef struct hwrm_port_phy_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -24339,7 +27305,7 @@ typedef struct hwrm_port_phy_cfg_cmd_err {
* but if a 0 is returned at any time then this should
* be treated as an un recoverable failure,
*
- * retry interval in milli seconds is returned in opaque_1.
+ * retry interval in milliseconds is returned in opaque_1.
* This specifies the time that user should wait before
* issuing the next port_phy_cfg command.
*/
@@ -24418,9 +27384,11 @@ typedef struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_SFT 0
/* NRZ signaling */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_NRZ UINT32_C(0x0)
- /* PAM4 signaling */
+ /* PAM4-56 signaling */
#define HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4 UINT32_C(0x1)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_LAST HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4
+ /* PAM4-112 signaling */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4_112 UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_LAST HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4_112
/* This value indicates the current active FEC mode. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_MASK UINT32_C(0xf0)
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_SFT 4
@@ -24428,15 +27396,15 @@ typedef struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_FEC_NONE_ACTIVE (UINT32_C(0x0) << 4)
/* FEC CLAUSE 74 (Fire Code) active, autonegotiated or forced. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (UINT32_C(0x1) << 4)
- /* FEC CLAUSE 91 RS(528,514) active, autonegoatiated or forced. */
+ /* FEC CLAUSE 91 RS(528,514) active, autonegotiated or forced. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (UINT32_C(0x2) << 4)
- /* FEC RS544_1XN active, autonegoatiated or forced. */
+ /* FEC RS544_1XN active, autonegotiated or forced. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (UINT32_C(0x3) << 4)
- /* FEC RS(544,528) active, autonegoatiated or forced. */
+ /* FEC RS(544,528) active, autonegotiated or forced. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (UINT32_C(0x4) << 4)
/* FEC RS272_1XN active, autonegotiated or forced. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (UINT32_C(0x5) << 4)
- /* FEC RS(272,257) active, autonegoatiated or forced. */
+ /* FEC RS(272,257) active, autonegotiated or forced. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (UINT32_C(0x6) << 4)
#define HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_LAST HWRM_PORT_PHY_QCFG_OUTPUT_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
/*
@@ -24467,6 +27435,10 @@ typedef struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB UINT32_C(0x3e8)
/* 200Gb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB UINT32_C(0x7d0)
+ /* 400Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_400GB UINT32_C(0xfa0)
+ /* 800Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_800GB UINT32_C(0x1f40)
/* 10Mb link speed */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB UINT32_C(0xffff)
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_LAST HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB
@@ -24566,13 +27538,14 @@ typedef struct hwrm_port_phy_qcfg_output {
/* Select all possible speeds for autoneg mode. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
/*
- * Select only the auto_link_speed speed for autoneg mode. This mode has
- * been DEPRECATED. An HWRM client should not use this mode.
+ * Select only the auto_link_speed speed for autoneg mode. This mode
+ * has been DEPRECATED. An HWRM client should not use this mode.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
/*
- * Select the auto_link_speed or any speed below that speed for autoneg.
- * This mode has been DEPRECATED. An HWRM client should not use this mode.
+ * Select the auto_link_speed or any speed below that speed for
+ * autoneg. This mode has been DEPRECATED. An HWRM client should not
+ * use this mode.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
/*
@@ -24684,7 +27657,7 @@ typedef struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_LAST HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON
/* Current setting for loopback. */
uint8_t lpbk;
- /* No loopback is selected. Normal operation. */
+ /* No loopback is selected. Normal operation. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
/*
* The HW will be configured with local loopback such that
@@ -24700,7 +27673,8 @@ typedef struct hwrm_port_phy_qcfg_output {
/*
* The HW will be configured with external loopback such that
* host data is sent on the transmitter and based on the external
- * loopback connection the data will be received without modification.
+ * loopback connection the data will be received without
+ * modification.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL UINT32_C(0x3)
#define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LAST HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_EXTERNAL
@@ -24737,6 +27711,8 @@ typedef struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED UINT32_C(0x4)
/* Module is powered down because of over current fault. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_CURRENTFAULT UINT32_C(0x5)
+ /* Module is overheated. */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_OVERHEATED UINT32_C(0x6)
/* Module status is not applicable. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE UINT32_C(0xff)
#define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_LAST HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE
@@ -24830,7 +27806,51 @@ typedef struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR2 UINT32_C(0x26)
/* 100G_BASEER2 */
#define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER2 UINT32_C(0x27)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_LAST HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER2
+ /* 400G_BASECR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR UINT32_C(0x28)
+ /* 100G_BASESR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR UINT32_C(0x29)
+ /* 100G_BASELR */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR UINT32_C(0x2a)
+ /* 100G_BASEER */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER UINT32_C(0x2b)
+ /* 200G_BASECR2 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR2 UINT32_C(0x2c)
+ /* 200G_BASESR2 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR2 UINT32_C(0x2d)
+ /* 200G_BASELR2 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR2 UINT32_C(0x2e)
+ /* 200G_BASEER2 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER2 UINT32_C(0x2f)
+ /* 400G_BASECR8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASECR8 UINT32_C(0x30)
+ /* 200G_BASESR8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASESR8 UINT32_C(0x31)
+ /* 400G_BASELR8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASELR8 UINT32_C(0x32)
+ /* 400G_BASEER8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASEER8 UINT32_C(0x33)
+ /* 400G_BASECR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASECR4 UINT32_C(0x34)
+ /* 400G_BASESR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASESR4 UINT32_C(0x35)
+ /* 400G_BASELR4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASELR4 UINT32_C(0x36)
+ /* 400G_BASEER4 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASEER4 UINT32_C(0x37)
+ /* 800G_BASECR8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_800G_BASECR8 UINT32_C(0x38)
+ /* 800G_BASESR8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_800G_BASESR8 UINT32_C(0x39)
+ /* 800G_BASELR8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_800G_BASELR8 UINT32_C(0x3a)
+ /* 800G_BASEER8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_800G_BASEER8 UINT32_C(0x3b)
+ /* 800G_BASEFR8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_800G_BASEFR8 UINT32_C(0x3c)
+ /* 800G_BASEDR8 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_800G_BASEDR8 UINT32_C(0x3d)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_LAST HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_800G_BASEDR8
/* This value represents a media type. */
uint8_t media_type;
/* Unknown */
@@ -24862,9 +27882,9 @@ typedef struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK UINT32_C(0xe0)
#define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5
/*
- * When set to 1, Energy Efficient Ethernet (EEE) mode is enabled.
- * Speeds for autoneg with EEE mode enabled
- * are based on eee_link_speed_mask.
+ * When set to 1, Energy Efficient Ethernet (EEE) mode is
+ * enabled. Speeds for autoneg with EEE mode enabled are based on
+ * eee_link_speed_mask.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED UINT32_C(0x20)
/*
@@ -24954,13 +27974,14 @@ typedef struct hwrm_port_phy_qcfg_output {
/* Select all possible speeds for autoneg mode. */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS UINT32_C(0x1)
/*
- * Select only the auto_link_speed speed for autoneg mode. This mode has
- * been DEPRECATED. An HWRM client should not use this mode.
+ * Select only the auto_link_speed speed for autoneg mode. This mode
+ * has been DEPRECATED. An HWRM client should not use this mode.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED UINT32_C(0x2)
/*
- * Select the auto_link_speed or any speed below that speed for autoneg.
- * This mode has been DEPRECATED. An HWRM client should not use this mode.
+ * Select the auto_link_speed or any speed below that speed for
+ * autoneg. This mode has been DEPRECATED. An HWRM client should not
+ * use this mode.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW UINT32_C(0x3)
/*
@@ -25042,43 +28063,55 @@ typedef struct hwrm_port_phy_qcfg_output {
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP (UINT32_C(0xc) << 24)
/* QSFP+ */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS (UINT32_C(0xd) << 24)
- /* QSFP28 */
+ /* QSFP28/QSFP56 or later */
#define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 (UINT32_C(0x11) << 24)
- #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_LAST HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28
+ /* QSFP-DD */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPDD (UINT32_C(0x18) << 24)
+ /* QSFP112 */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP112 (UINT32_C(0x1e) << 24)
+ /* SFP-DD CMIS */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFPDD (UINT32_C(0x1f) << 24)
+ /* SFP CMIS */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_CSFP (UINT32_C(0x20) << 24)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_LAST HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_CSFP
/*
* This value represents the current configuration of
* Forward Error Correction (FEC) on the port.
*/
uint16_t fec_cfg;
/*
- * When set to 1, then FEC is not supported on this port. If this flag
- * is set to 1, then all other FEC configuration flags shall be ignored.
- * When set to 0, then FEC is supported as indicated by other
- * configuration flags.
+ * When set to 1, then FEC is not supported on this port. If this
+ * flag is set to 1, then all other FEC configuration flags shall be
+ * ignored. When set to 0, then FEC is supported as indicated by
+ * other configuration flags.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_NONE_SUPPORTED UINT32_C(0x1)
/*
* When set to 1, then FEC autonegotiation is supported on this port.
- * When set to 0, then FEC autonegotiation is not supported on this port.
+ * When set to 0, then FEC autonegotiation is not supported on this
+ * port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_SUPPORTED UINT32_C(0x2)
/*
* When set to 1, then FEC autonegotiation is enabled on this port.
* When set to 0, then FEC autonegotiation is disabled if supported.
- * This flag should be ignored if FEC autonegotiation is not supported on this port.
+ * This flag should be ignored if FEC autonegotiation is not
+ * supported on this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_AUTONEG_ENABLED UINT32_C(0x4)
/*
- * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on this port.
- * When set to 0, then FEC CLAUSE 74 (Fire Code) is not supported on this port.
+ * When set to 1, then FEC CLAUSE 74 (Fire Code) is supported on this
+ * port. When set to 0, then FEC CLAUSE 74 (Fire Code) is not
+ * supported on this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_SUPPORTED UINT32_C(0x8)
/*
* When set to 1, then FEC CLAUSE 74 (Fire Code) is enabled on this
* port. This means that FEC CLAUSE 74 is either advertised if
* FEC autonegotiation is enabled or FEC CLAUSE 74 is force enabled.
- * When set to 0, then FEC CLAUSE 74 (Fire Code) is disabled if supported.
- * This flag should be ignored if FEC CLAUSE 74 is not supported on this port.
+ * When set to 0, then FEC CLAUSE 74 (Fire Code) is disabled if
+ * supported. This flag should be ignored if FEC CLAUSE 74 is not
+ * supported on this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE74_ENABLED UINT32_C(0x10)
/*
@@ -25091,9 +28124,10 @@ typedef struct hwrm_port_phy_qcfg_output {
* When set to 1, then FEC CLAUSE 91 (Reed Solomon RS(528,514) for
* NRZ) is enabled on this port. This means that FEC RS(528,514) is
* either advertised if FEC autonegotiation is enabled or FEC
- * RS(528,514) is force enabled. When set to 0, then FEC RS(528,514)
+ * RS(528,514) is force enabled. When set to 0, then FEC RS(528,514)
* is disabled if supported.
- * This flag should be ignored if FEC CLAUSE 91 is not supported on this port.
+ * This flag should be ignored if FEC CLAUSE 91 is not supported on
+ * this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_CLAUSE91_ENABLED UINT32_C(0x40)
/*
@@ -25106,7 +28140,8 @@ typedef struct hwrm_port_phy_qcfg_output {
* port. This means that FEC RS544_1XN is either advertised if
* FEC autonegotiation is enabled or FEC RS544_1XN is force enabled.
* When set to 0, then FEC RS544_1XN is disabled if supported.
- * This flag should be ignored if FEC RS544_1XN is not supported on this port.
+ * This flag should be ignored if FEC RS544_1XN is not supported on
+ * this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_RS544_1XN_ENABLED UINT32_C(0x100)
/*
@@ -25118,8 +28153,9 @@ typedef struct hwrm_port_phy_qcfg_output {
* When set to 1, then RS(544,514) is enabled on this
* port. This means that FEC RS(544,514) is either advertised if
* FEC autonegotiation is enabled or FEC RS(544,514) is force
- * enabled. When set to 0, then FEC RS(544,514) is disabled if supported.
- * This flag should be ignored if FEC RS(544,514) is not supported on this port.
+ * enabled. When set to 0, then FEC RS(544,514) is disabled if
+ * supported. This flag should be ignored if FEC RS(544,514) is not
+ * supported on this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_RS544_IEEE_ENABLED UINT32_C(0x400)
/*
@@ -25131,8 +28167,10 @@ typedef struct hwrm_port_phy_qcfg_output {
* When set to 1, then RS272_1XN is enabled on this
* port. This means that FEC RS272_1XN is either advertised if
* FEC autonegotiation is enabled or FEC RS272_1XN is force
- * enabled. When set to 0, then FEC RS272_1XN is disabled if supported.
- * This flag should be ignored if FEC RS272_1XN is not supported on this port.
+ * enabled. When set to 0, then FEC RS272_1XN is disabled if
+ * supported.
+ * This flag should be ignored if FEC RS272_1XN is not supported on
+ * this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_RS272_1XN_ENABLED UINT32_C(0x1000)
/*
@@ -25144,8 +28182,10 @@ typedef struct hwrm_port_phy_qcfg_output {
* When set to 1, then RS(272,257) is enabled on this
* port. This means that FEC RS(272,257) is either advertised if
* FEC autonegotiation is enabled or FEC RS(272,257) is force
- * enabled. When set to 0, then FEC RS(272,257) is disabled if supported.
- * This flag should be ignored if FEC RS(272,257) is not supported on this port.
+ * enabled. When set to 0, then FEC RS(272,257) is disabled if
+ * supported.
+ * This flag should be ignored if FEC RS(272,257) is not supported on
+ * this port.
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_FEC_CFG_FEC_RS272_IEEE_ENABLED UINT32_C(0x4000)
/*
@@ -25168,6 +28208,11 @@ typedef struct hwrm_port_phy_qcfg_output {
*/
#define HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_SIGNAL_MODE_KNOWN UINT32_C(0x2)
/*
+ * When this bit is '1', speeds2 fields are used to get
+ * speed details.
+ */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_OPTION_FLAGS_SPEEDS2_SUPPORTED UINT32_C(0x4)
+ /*
* Up to 16 bytes of null padded ASCII string representing
* PHY vendor.
* If the string is set to null, then the vendor name is not
@@ -25232,12 +28277,125 @@ typedef struct hwrm_port_phy_qcfg_output {
uint8_t link_down_reason;
/* Remote fault */
#define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_DOWN_REASON_RF UINT32_C(0x1)
- uint8_t unused_0[7];
+ /*
+ * The supported speeds for the port. This is a bit mask.
+ * For each speed that is supported, the corresponding
+ * bit will be set to '1'. This is valid only if speeds2_supported
+ * is set in option_flags
+ */
+ uint16_t support_speeds2;
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_1GB UINT32_C(0x1)
+ /* 10Gb (NRZ: 10G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_10GB UINT32_C(0x2)
+ /* 25Gb (NRZ: 25G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_25GB UINT32_C(0x4)
+ /* 40Gb (NRZ: 10G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_40GB UINT32_C(0x8)
+ /* 50Gb (NRZ: 25G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB UINT32_C(0x10)
+ /* 100Gb (NRZ: 25G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB UINT32_C(0x20)
+ /* 50Gb (PAM4-56: 50G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB_PAM4_56 UINT32_C(0x40)
+ /* 100Gb (PAM4-56: 50G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_56 UINT32_C(0x80)
+ /* 200Gb (PAM4-56: 50G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_56 UINT32_C(0x100)
+ /* 400Gb (PAM4-56: 50G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_56 UINT32_C(0x200)
+ /* 100Gb (PAM4-112: 100G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_112 UINT32_C(0x400)
+ /* 200Gb (PAM4-112: 100G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_112 UINT32_C(0x800)
+ /* 400Gb (PAM4-112: 100G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_112 UINT32_C(0x1000)
+ /* 800Gb (PAM4-112: 100G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_800GB_PAM4_112 UINT32_C(0x2000)
+ /*
+ * Current setting of forced link speed. When the link speed is not
+ * being forced, this value shall be set to 0.
+ * This field is valid only if speeds2_supported is set in
+ * option_flags.
+ */
+ uint16_t force_link_speeds2;
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_1GB UINT32_C(0xa)
+ /* 10Gb (NRZ: 10G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_10GB UINT32_C(0x64)
+ /* 25Gb (NRZ: 25G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_25GB UINT32_C(0xfa)
+ /* 40Gb (NRZ: 10G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_40GB UINT32_C(0x190)
+ /* 50Gb (NRZ: 25G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_50GB UINT32_C(0x1f4)
+ /* 100Gb (NRZ: 25G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_100GB UINT32_C(0x3e8)
+ /* 50Gb (PAM4-56: 50G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_50GB_PAM4_56 UINT32_C(0x1f5)
+ /* 100Gb (PAM4-56: 50G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_56 UINT32_C(0x3e9)
+ /* 200Gb (PAM4-56: 50G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_56 UINT32_C(0x7d1)
+ /* 400Gb (PAM4-56: 50G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_56 UINT32_C(0xfa1)
+ /* 100Gb (PAM4-112: 100G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_112 UINT32_C(0x3ea)
+ /* 200Gb (PAM4-112: 100G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_112 UINT32_C(0x7d2)
+ /* 400Gb (PAM4-112: 100G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_112 UINT32_C(0xfa2)
+ /* 800Gb (PAM4-112: 100G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_800GB_PAM4_112 UINT32_C(0x1f42)
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_LAST HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ /*
+ * Current setting of auto_link speed_mask that is used to advertise
+ * speeds during autonegotiation.
+ * This field is only valid when auto_mode is set to "mask".
+ * and if speeds2_supported is set in option_flags
+ * The speeds specified in this field shall be a subset of
+ * supported speeds on this port.
+ */
+ uint16_t auto_link_speeds2;
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_1GB UINT32_C(0x1)
+ /* 10Gb (NRZ: 10G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_10GB UINT32_C(0x2)
+ /* 25Gb (NRZ: 25G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_25GB UINT32_C(0x4)
+ /* 40Gb (NRZ: 10G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_40GB UINT32_C(0x8)
+ /* 50Gb (NRZ: 25G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_50GB UINT32_C(0x10)
+ /* 100Gb (NRZ: 25G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_100GB UINT32_C(0x20)
+ /* 50Gb (PAM4-56: 50G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_50GB_PAM4_56 UINT32_C(0x40)
+ /* 100Gb (PAM4-56: 50G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_100GB_PAM4_56 UINT32_C(0x80)
+ /* 200Gb (PAM4-56: 50G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_200GB_PAM4_56 UINT32_C(0x100)
+ /* 400Gb (PAM4-56: 50G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_400GB_PAM4_56 UINT32_C(0x200)
+ /* 100Gb (PAM4-112: 100G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_100GB_PAM4_112 UINT32_C(0x400)
+ /* 200Gb (PAM4-112: 100G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_200GB_PAM4_112 UINT32_C(0x800)
+ /* 400Gb (PAM4-112: 100G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_400GB_PAM4_112 UINT32_C(0x1000)
+ /* 800Gb (PAM4-112: 100G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEEDS2_800GB_PAM4_112 UINT32_C(0x2000)
+ /*
+ * This field is indicate the number of lanes used to transfer
+ * data. If the link is down, the value is zero.
+ * This is valid only if speeds2_supported is set in option_flags.
+ */
+ uint8_t active_lanes;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -25420,13 +28578,13 @@ typedef struct hwrm_port_mac_cfg_input {
*/
#define HWRM_PORT_MAC_CFG_INPUT_ENABLES_DSCP2COS_MAP_PRI UINT32_C(0x20)
/*
- * This bit must be '1' for the rx_ts_capture_ptp_msg_type field to be
- * configured.
+ * This bit must be '1' for the rx_ts_capture_ptp_msg_type field to
+ * be configured.
*/
#define HWRM_PORT_MAC_CFG_INPUT_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE UINT32_C(0x40)
/*
- * This bit must be '1' for the tx_ts_capture_ptp_msg_type field to be
- * configured.
+ * This bit must be '1' for the tx_ts_capture_ptp_msg_type field to
+ * be configured.
*/
#define HWRM_PORT_MAC_CFG_INPUT_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE UINT32_C(0x80)
/*
@@ -25444,6 +28602,11 @@ typedef struct hwrm_port_mac_cfg_input {
* configured.
*/
#define HWRM_PORT_MAC_CFG_INPUT_ENABLES_PTP_ADJ_PHASE UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the ptp_load_control field to
+ * be configured.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_ENABLES_PTP_LOAD_CONTROL UINT32_C(0x800)
/* Port ID of port that is to be configured. */
uint16_t port_id;
/*
@@ -25453,7 +28616,7 @@ typedef struct hwrm_port_mac_cfg_input {
uint8_t ipg;
/* This value controls the loopback setting for the MAC. */
uint8_t lpbk;
- /* No loopback is selected. Normal operation. */
+ /* No loopback is selected. Normal operation. */
#define HWRM_PORT_MAC_CFG_INPUT_LPBK_NONE UINT32_C(0x0)
/*
* The HW will be configured with local loopback such that
@@ -25618,7 +28781,25 @@ typedef struct hwrm_port_mac_cfg_input {
* of sync timer updates (measured in parts per billion).
*/
int32_t ptp_freq_adj_ppb;
- uint8_t unused_1[4];
+ uint8_t unused_1[3];
+ /*
+ * This value controls how PTP configuration like freq_adj and
+ * phase are loaded in the hardware block.
+ */
+ uint8_t ptp_load_control;
+ /* PTP configuration is not loaded in hardware. */
+ #define HWRM_PORT_MAC_CFG_INPUT_PTP_LOAD_CONTROL_NONE UINT32_C(0x0)
+ /*
+ * PTP configuration will be loaded immediately in the hardware
+ * block. By default, it will always be immediate.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_PTP_LOAD_CONTROL_IMMEDIATE UINT32_C(0x1)
+ /*
+ * PTP configuration will loaded at the next Pulse per second (PPS)
+ * event in the hardware block.
+ */
+ #define HWRM_PORT_MAC_CFG_INPUT_PTP_LOAD_CONTROL_PPS_EVENT UINT32_C(0x2)
+ #define HWRM_PORT_MAC_CFG_INPUT_PTP_LOAD_CONTROL_LAST HWRM_PORT_MAC_CFG_INPUT_PTP_LOAD_CONTROL_PPS_EVENT
/*
* This unsigned field specifies the phase offset to be applied
* to the PHC (PTP Hardware Clock). This field is specified in
@@ -25656,7 +28837,7 @@ typedef struct hwrm_port_mac_cfg_output {
uint8_t ipg;
/* Current value of the loopback value. */
uint8_t lpbk;
- /* No loopback is selected. Normal operation. */
+ /* No loopback is selected. Normal operation. */
#define HWRM_PORT_MAC_CFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
/*
* The HW will be configured with local loopback such that
@@ -25673,9 +28854,9 @@ typedef struct hwrm_port_mac_cfg_output {
uint8_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -25754,7 +28935,7 @@ typedef struct hwrm_port_mac_qcfg_output {
uint8_t ipg;
/* The loopback setting for the MAC. */
uint8_t lpbk;
- /* No loopback is selected. Normal operation. */
+ /* No loopback is selected. Normal operation. */
#define HWRM_PORT_MAC_QCFG_OUTPUT_LPBK_NONE UINT32_C(0x0)
/*
* The HW will be configured with local loopback such that
@@ -25942,20 +29123,35 @@ typedef struct hwrm_port_mac_qcfg_output {
uint8_t unused_1;
uint16_t port_svif_info;
/*
- * This field specifies the source virtual interface of the port being
- * queried. Drivers can use this to program port svif field in the
- * L2 context table
+ * This field specifies the source virtual interface of the port
+ * being queried. Drivers can use this to program port svif field in
+ * the L2 context table.
*/
#define HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_MASK UINT32_C(0x7fff)
#define HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_SFT 0
/* This field specifies whether port_svif is valid or not */
#define HWRM_PORT_MAC_QCFG_OUTPUT_PORT_SVIF_INFO_PORT_SVIF_VALID UINT32_C(0x8000)
- uint8_t unused_2[5];
+ /*
+ * This field indicates the configured load control for PTP
+ * time of day (TOD) block.
+ */
+ uint8_t ptp_load_control;
+ /* Indicates the current load control is none. */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_PTP_LOAD_CONTROL_NONE UINT32_C(0x0)
+ /* Indicates the current load control is immediate. */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_PTP_LOAD_CONTROL_IMMEDIATE UINT32_C(0x1)
+ /*
+ * Indicates current load control is at next Pulse per Second (PPS)
+ * event.
+ */
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_PTP_LOAD_CONTROL_PPS_EVENT UINT32_C(0x2)
+ #define HWRM_PORT_MAC_QCFG_OUTPUT_PTP_LOAD_CONTROL_LAST HWRM_PORT_MAC_QCFG_OUTPUT_PTP_LOAD_CONTROL_PPS_EVENT
+ uint8_t unused_2[4];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -26044,6 +29240,11 @@ typedef struct hwrm_port_mac_ptp_qcfg_output {
* configured 64bit RTC.
*/
#define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_RTC_CONFIGURED UINT32_C(0x20)
+ /*
+ * When this bit is set to '1', it indicates that current time
+ * exposed to driver is 64bit.
+ */
+ #define HWRM_PORT_MAC_PTP_QCFG_OUTPUT_FLAGS_64B_PHC_TIME UINT32_C(0x40)
uint8_t unused_0[3];
/*
* Offset of the PTP register for the lower 32 bits of timestamp
@@ -26094,9 +29295,9 @@ typedef struct hwrm_port_mac_ptp_qcfg_output {
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -26536,9 +29737,9 @@ typedef struct hwrm_port_qstats_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -26580,42 +29781,90 @@ typedef struct tx_port_stats_ext {
uint64_t tx_packets_cos6;
/* Total number of tx packets count on cos queue 7 */
uint64_t tx_packets_cos7;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 0 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 0
+ */
uint64_t pfc_pri0_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 0 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 0
+ */
uint64_t pfc_pri0_tx_transitions;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 1 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 1
+ */
uint64_t pfc_pri1_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 1 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 1
+ */
uint64_t pfc_pri1_tx_transitions;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 2 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 2
+ */
uint64_t pfc_pri2_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 2 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 2
+ */
uint64_t pfc_pri2_tx_transitions;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 3 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 3
+ */
uint64_t pfc_pri3_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 3 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 3
+ */
uint64_t pfc_pri3_tx_transitions;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 4 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 4
+ */
uint64_t pfc_pri4_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 4 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 4
+ */
uint64_t pfc_pri4_tx_transitions;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 5 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 5
+ */
uint64_t pfc_pri5_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 5 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 5
+ */
uint64_t pfc_pri5_tx_transitions;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 6 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 6
+ */
uint64_t pfc_pri6_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 6 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 6
+ */
uint64_t pfc_pri6_tx_transitions;
- /* time duration between transmitting a XON -> XOFF and a subsequent XOFF -> XON for priority 7 */
+ /*
+ * time duration between transmitting a XON -> XOFF and a subsequent XOFF
+ * -> XON for priority 7
+ */
uint64_t pfc_pri7_tx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 7 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 7
+ */
uint64_t pfc_pri7_tx_transitions;
} tx_port_stats_ext_t, *ptx_port_stats_ext_t;
/* Port Rx Statistics extended Format */
-/* rx_port_stats_ext (size:3776b/472B) */
+/* rx_port_stats_ext (size:3904b/488B) */
typedef struct rx_port_stats_ext {
/* Number of times link state changed to down */
@@ -26624,7 +29873,10 @@ typedef struct rx_port_stats_ext {
uint64_t continuous_pause_events;
/* Number of times the active rings pause bit resumed back */
uint64_t resume_pause_events;
- /* Number of times, the ROCE cos queue PFC is disabled to avoid pause flood/burst */
+ /*
+ * Number of times, the ROCE cos queue PFC is disabled to avoid pause
+ * flood/burst
+ */
uint64_t continuous_roce_pause_events;
/* Number of times, the ROCE cos queue PFC is enabled back */
uint64_t resume_roce_pause_events;
@@ -26660,45 +29912,94 @@ typedef struct rx_port_stats_ext {
uint64_t rx_packets_cos6;
/* Total number of rx packets count on cos queue 7 */
uint64_t rx_packets_cos7;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 0 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 0
+ */
uint64_t pfc_pri0_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 0 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 0
+ */
uint64_t pfc_pri0_rx_transitions;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 1 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 1
+ */
uint64_t pfc_pri1_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 1 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 1
+ */
uint64_t pfc_pri1_rx_transitions;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 2 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 2
+ */
uint64_t pfc_pri2_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 2 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 2
+ */
uint64_t pfc_pri2_rx_transitions;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 3 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 3
+ */
uint64_t pfc_pri3_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 3 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 3
+ */
uint64_t pfc_pri3_rx_transitions;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 4 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 4
+ */
uint64_t pfc_pri4_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 4 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 4
+ */
uint64_t pfc_pri4_rx_transitions;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 5 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 5
+ */
uint64_t pfc_pri5_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 5 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 5
+ */
uint64_t pfc_pri5_rx_transitions;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 6 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 6
+ */
uint64_t pfc_pri6_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 6 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 6
+ */
uint64_t pfc_pri6_rx_transitions;
- /* time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for priority 7 */
+ /*
+ * time duration receiving a XON -> XOFF and a subsequent XOFF -> XON for
+ * priority 7
+ */
uint64_t pfc_pri7_rx_duration_us;
- /* Number of times, a XON -> XOFF and XOFF -> XON transitions occur for priority 7 */
+ /*
+ * Number of times, a XON -> XOFF and XOFF -> XON transitions occur for
+ * priority 7
+ */
uint64_t pfc_pri7_rx_transitions;
/* Total number of received bits */
uint64_t rx_bits;
/* The number of events where the port receive buffer was over 85% full */
uint64_t rx_buffer_passed_threshold;
/*
- * The number of symbol errors that wasn't corrected by FEC correction
- * algorithm
+ * This counter represents uncorrected symbol errors post-FEC and may not
+ * be populated in all cases. Each uncorrected FEC block may result in
+ * one or more symbol errors.
*/
uint64_t rx_pcs_symbol_err;
/* The number of corrected bits on the port according to active FEC */
@@ -26742,6 +30043,21 @@ typedef struct rx_port_stats_ext {
* FEC function in the PHY
*/
uint64_t rx_fec_uncorrectable_blocks;
+ /*
+ * Total number of packets that are dropped due to not matching
+ * any RX filter rules. This value is zero on the non supported
+ * controllers. This counter is per controller, Firmware reports the
+ * same value on active ports. This counter does not include the
+ * packet discards because of no available buffers.
+ */
+ uint64_t rx_filter_miss;
+ /*
+ * This field represents the number of FEC symbol errors by counting
+ * once for each 10-bit symbol corrected by FEC block.
+ * rx_fec_corrected_blocks will be incremented if all symbol errors in a
+ * codeword gets corrected.
+ */
+ uint64_t rx_fec_symbol_err;
} rx_port_stats_ext_t, *prx_port_stats_ext_t;
/*
@@ -27256,9 +30572,9 @@ typedef struct hwrm_port_qstats_ext_output {
#define HWRM_PORT_QSTATS_EXT_OUTPUT_FLAGS_CLEAR_ROCE_COUNTERS_SUPPORTED UINT32_C(0x1)
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -27331,16 +30647,15 @@ typedef struct hwrm_port_qstats_ext_pfc_wd_output {
* statistics block in bytes.
*/
uint16_t pfc_wd_stat_size;
- uint8_t flags;
+ uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
- uint8_t unused_0[4];
} hwrm_port_qstats_ext_pfc_wd_output_t, *phwrm_port_qstats_ext_pfc_wd_output_t;
/*************************
@@ -27348,7 +30663,7 @@ typedef struct hwrm_port_qstats_ext_pfc_wd_output {
*************************/
-/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
+/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
typedef struct hwrm_port_lpbk_qstats_input {
/* The HWRM command request type. */
@@ -27379,9 +30694,30 @@ typedef struct hwrm_port_lpbk_qstats_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /*
+ * The size of the loopback statistics buffer passed in the
+ * loopback_stat_host_addr in bytes.
+ * Firmware will not exceed this size when it DMAs the
+ * statistics structure to the host. The actual DMA size
+ * will be returned in the response.
+ */
+ uint16_t lpbk_stat_size;
+ uint8_t flags;
+ /*
+ * This bit is set to 1 when request is for a counter mask,
+ * representing the width of each of the stats counters, rather
+ * than counters themselves.
+ */
+ #define HWRM_PORT_LPBK_QSTATS_INPUT_FLAGS_COUNTER_MASK UINT32_C(0x1)
+ uint8_t unused_0[5];
+ /*
+ * This is the host address where
+ * loopback statistics will be stored
+ */
+ uint64_t lpbk_stat_host_addr;
} hwrm_port_lpbk_qstats_input_t, *phwrm_port_lpbk_qstats_input_t;
-/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
+/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
typedef struct hwrm_port_lpbk_qstats_output {
/* The specific error status for the command. */
@@ -27392,6 +30728,29 @@ typedef struct hwrm_port_lpbk_qstats_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
+ /*
+ * The size of the loopback statistics block in bytes DMA'ed by the
+ * firmware. Note that this size will never exceed the lpbk_stat_size
+ * field passed in by the driver in the hwrm_port_lpbk_qstats_input
+ * structure.
+ */
+ uint16_t lpbk_stat_size;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} hwrm_port_lpbk_qstats_output_t, *phwrm_port_lpbk_qstats_output_t;
+
+/* Loopback Port Statistic Format */
+/* port_lpbk_stats (size:640b/80B) */
+
+typedef struct port_lpbk_stats {
/* Number of transmitted unicast frames */
uint64_t lpbk_ucast_frames;
/* Number of transmitted multicast frames */
@@ -27404,24 +30763,15 @@ typedef struct hwrm_port_lpbk_qstats_output {
uint64_t lpbk_mcast_bytes;
/* Number of transmitted bytes for broadcast traffic */
uint64_t lpbk_bcast_bytes;
- /* Total Tx Drops for loopback traffic reported by STATS block */
- uint64_t tx_stat_discard;
- /* Total Tx Error Drops for loopback traffic reported by STATS block */
- uint64_t tx_stat_error;
- /* Total Rx Drops for loopback traffic reported by STATS block */
- uint64_t rx_stat_discard;
- /* Total Rx Error Drops for loopback traffic reported by STATS block */
- uint64_t rx_stat_error;
- uint8_t unused_0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
- * the order of writes has to be such that this field is written last.
- */
- uint8_t valid;
-} hwrm_port_lpbk_qstats_output_t, *phwrm_port_lpbk_qstats_output_t;
+ /* Number of dropped tx packets */
+ uint64_t lpbk_tx_discards;
+ /* Number of error dropped tx packets */
+ uint64_t lpbk_tx_errors;
+ /* Number of dropped rx packets */
+ uint64_t lpbk_rx_discards;
+ /* Number of error dropped rx packets */
+ uint64_t lpbk_rx_errors;
+} port_lpbk_stats_t, *pport_lpbk_stats_t;
/************************
* hwrm_port_ecn_qstats *
@@ -27505,9 +30855,9 @@ typedef struct hwrm_port_ecn_qstats_output {
uint8_t unused_0[4];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -27612,7 +30962,8 @@ typedef struct hwrm_port_clr_stats_input {
* RoCE associated TX/RX cos counters
* CNP associated TX/RX cos counters
* RoCE/CNP specific TX/RX flow counters
- * Firmware will determine the RoCE/CNP cos queue based on qos profile.
+ * Firmware will determine the RoCE/CNP cos queue based on qos
+ * profile.
* This flag is honored only when RoCE is enabled on that port.
*/
#define HWRM_PORT_CLR_STATS_INPUT_FLAGS_ROCE_COUNTERS UINT32_C(0x1)
@@ -27633,9 +30984,9 @@ typedef struct hwrm_port_clr_stats_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -27646,7 +30997,7 @@ typedef struct hwrm_port_clr_stats_output {
****************************/
-/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
+/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
typedef struct hwrm_port_lpbk_clr_stats_input {
/* The HWRM command request type. */
@@ -27677,6 +31028,9 @@ typedef struct hwrm_port_lpbk_clr_stats_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* Port ID of port that is to be queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
} hwrm_port_lpbk_clr_stats_input_t, *phwrm_port_lpbk_clr_stats_input_t;
/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
@@ -27693,9 +31047,9 @@ typedef struct hwrm_port_lpbk_clr_stats_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -27819,9 +31173,9 @@ typedef struct hwrm_port_ts_query_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -27868,7 +31222,7 @@ typedef struct hwrm_port_phy_qcaps_input {
uint8_t unused_0[6];
} hwrm_port_phy_qcaps_input_t, *phwrm_port_phy_qcaps_input_t;
-/* hwrm_port_phy_qcaps_output (size:256b/32B) */
+/* hwrm_port_phy_qcaps_output (size:320b/40B) */
typedef struct hwrm_port_phy_qcaps_output {
/* The specific error status for the command. */
@@ -27897,9 +31251,9 @@ typedef struct hwrm_port_phy_qcaps_output {
*/
#define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS_AUTONEG_LPBK_SUPPORTED UINT32_C(0x4)
/*
- * Indicates if the configuration of shared PHY settings is supported.
- * In cases where a physical port is shared by multiple functions
- * (e.g. NPAR, multihost, etc), the configuration of PHY
+ * Indicates if the configuration of shared PHY settings is
+ * supported. In cases where a physical port is shared by multiple
+ * functions (e.g. NPAR, multihost, etc), the configuration of PHY
* settings may not be allowed. Callers to HWRM_PORT_PHY_CFG will
* get an HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED error in this case.
*/
@@ -27907,7 +31261,7 @@ typedef struct hwrm_port_phy_qcaps_output {
/*
* If set to 1, it indicates that the port counters and extended
* port counters will not reset when the firmware shuts down or
- * resets the PHY. These counters will only be reset during power
+ * resets the PHY. These counters will only be reset during power
* cycle or by calling HWRM_PORT_CLR_STATS.
* If set to 0, the state of the counters is unspecified when
* firmware shuts down or resets the PHY.
@@ -28096,13 +31450,24 @@ typedef struct hwrm_port_phy_qcaps_output {
* If set to 1, then this field indicates that
* priority-based flow control is not supported.
*/
- #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_PFC_UNSUPPORTED UINT32_C(0x2)
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_PFC_UNSUPPORTED UINT32_C(0x2)
/*
* If set to 1, then this field indicates that
* bank based addressing is supported in firmware.
*/
#define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_BANK_ADDR_SUPPORTED UINT32_C(0x4)
/*
+ * If set to 1, then this field indicates that
+ * supported_speed2 field is to be used in lieu of all
+ * supported_speed variants.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_SPEEDS2_SUPPORTED UINT32_C(0x8)
+ /*
+ * If set to 1, then this field indicates that
+ * the device does not support remote loopback.
+ */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_FLAGS2_REMOTE_LPBK_UNSUPPORTED UINT32_C(0x10)
+ /*
* Number of internal ports for this device. This field allows the FW
* to advertise how many internal ports are present. Manufacturing
* tools uses this to determine how many internal ports should have
@@ -28110,11 +31475,85 @@ typedef struct hwrm_port_phy_qcaps_output {
* option "HPTN_MODE" is set to 1.
*/
uint8_t internal_port_cnt;
+ uint8_t unused_0;
+ /*
+ * This is a bit mask to indicate what speeds are supported
+ * as forced speeds on this link.
+ * For each speed that can be forced on this link, the
+ * corresponding mask bit shall be set to '1'.
+ * This field is valid only if speeds2_supported bit is set in flags2
+ */
+ uint16_t supported_speeds2_force_mode;
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_1GB UINT32_C(0x1)
+ /* 10Gb (NRZ: 10G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_10GB UINT32_C(0x2)
+ /* 25Gb (NRZ: 25G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_25GB UINT32_C(0x4)
+ /* 40Gb (NRZ: 10G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_40GB UINT32_C(0x8)
+ /* 50Gb (NRZ: 25G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_50GB UINT32_C(0x10)
+ /* 100Gb (NRZ: 25G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_100GB UINT32_C(0x20)
+ /* 50Gb (PAM4-56: 50G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 UINT32_C(0x40)
+ /* 100Gb (PAM4-56: 50G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 UINT32_C(0x80)
+ /* 200Gb (PAM4-56: 50G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 UINT32_C(0x100)
+ /* 400Gb (PAM4-56: 50G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 UINT32_C(0x200)
+ /* 100Gb (PAM4-112: 100G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 UINT32_C(0x400)
+ /* 200Gb (PAM4-112: 100G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 UINT32_C(0x800)
+ /* 400Gb (PAM4-112: 100G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 UINT32_C(0x1000)
+ /* 800Gb (PAM4-112: 100G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 UINT32_C(0x2000)
+ /*
+ * This is a bit mask to indicate what speeds are supported
+ * for autonegotiation on this link.
+ * For each speed that can be autonegotiated on this link, the
+ * corresponding mask bit shall be set to '1'.
+ * This field is valid only if speeds2_supported bit is set in flags2
+ */
+ uint16_t supported_speeds2_auto_mode;
+ /* 1Gb link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_1GB UINT32_C(0x1)
+ /* 10Gb (NRZ: 10G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_10GB UINT32_C(0x2)
+ /* 25Gb (NRZ: 25G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_25GB UINT32_C(0x4)
+ /* 40Gb (NRZ: 10G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_40GB UINT32_C(0x8)
+ /* 50Gb (NRZ: 25G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_50GB UINT32_C(0x10)
+ /* 100Gb (NRZ: 25G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_100GB UINT32_C(0x20)
+ /* 50Gb (PAM4-56: 50G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 UINT32_C(0x40)
+ /* 100Gb (PAM4-56: 50G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 UINT32_C(0x80)
+ /* 200Gb (PAM4-56: 50G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 UINT32_C(0x100)
+ /* 400Gb (PAM4-56: 50G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 UINT32_C(0x200)
+ /* 100Gb (PAM4-112: 100G per lane, 1 lane) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 UINT32_C(0x400)
+ /* 200Gb (PAM4-112: 100G per lane, 2 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 UINT32_C(0x800)
+ /* 400Gb (PAM4-112: 100G per lane, 4 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 UINT32_C(0x1000)
+ /* 800Gb (PAM4-112: 100G per lane, 8 lanes) link speed */
+ #define HWRM_PORT_PHY_QCAPS_OUTPUT_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 UINT32_C(0x2000)
+ uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -28203,9 +31642,9 @@ typedef struct hwrm_port_phy_i2c_write_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -28294,9 +31733,9 @@ typedef struct hwrm_port_phy_i2c_read_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -28373,9 +31812,9 @@ typedef struct hwrm_port_phy_mdio_write_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -28452,9 +31891,9 @@ typedef struct hwrm_port_phy_mdio_read_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -28846,9 +32285,9 @@ typedef struct hwrm_port_led_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -29142,9 +32581,9 @@ typedef struct hwrm_port_led_qcfg_output {
uint8_t unused_4[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -29460,9 +32899,9 @@ typedef struct hwrm_port_led_qcaps_output {
uint8_t unused_4[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -29508,7 +32947,8 @@ typedef struct hwrm_port_prbs_test_input {
uint64_t resp_data_addr;
/*
* Size of the buffer pointed to by resp_data_addr. The firmware may
- * use this entire buffer or less than the entire buffer, but never more.
+ * use this entire buffer or less than the entire buffer, but never
+ * more.
*/
uint16_t data_len;
uint16_t flags;
@@ -29577,6 +33017,8 @@ typedef struct hwrm_port_prbs_test_input {
* If fec_stat_t0_t7 is set, fec_stat_t8_t15 field will be ignored.
*/
#define HWRM_PORT_PRBS_TEST_INPUT_PRBS_CONFIG_FEC_STAT_T8_T15 UINT32_C(0x10)
+ /* If set, prbs test will run t-code project as well. */
+ #define HWRM_PORT_PRBS_TEST_INPUT_PRBS_CONFIG_T_CODE UINT32_C(0x20)
/* Duration in seconds to run the PRBS test. */
uint16_t timeout;
/*
@@ -29617,9 +33059,9 @@ typedef struct hwrm_port_prbs_test_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -29664,13 +33106,20 @@ typedef struct hwrm_port_dsc_dump_input {
/* Host address where response diagnostic data is returned. */
uint64_t resp_data_addr;
/*
- * Size of the buffer pointed to by resp_data_addr. The firmware
+ * Size of the host buffer pointed to by resp_data_addr. The firmware
* may use this entire buffer or less than the entire buffer, but
* never more.
*/
uint16_t data_len;
uint16_t unused_0;
- uint32_t unused_1;
+ /*
+ * Ignored by the start command.
+ * In legacy buffer mode, this is ignored. The transfer starts
+ * at buffer offset zero and must be transferred in one command.
+ * In big buffer mode, this is the offset into the NIC buffer for
+ * the current retrieve command to start.
+ */
+ uint32_t data_offset;
/* Port ID of port where dsc dump to be collected. */
uint16_t port_id;
/* Diag level specified by the user */
@@ -29703,20 +33152,48 @@ typedef struct hwrm_port_dsc_dump_input {
#define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_TIMESTAMP UINT32_C(0xc)
#define HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_LAST HWRM_PORT_DSC_DUMP_INPUT_DIAG_LEVEL_SRDS_DIAG_TIMESTAMP
/*
- * This field is a lane number
- * on which to collect the dsc dump
+ * This field is the lane number on which to collect the dsc dump.
+ * If this is 0xFFFF, the dsc dump will be collected for all lanes,
+ * if the hardware and firmware support this feature.
*/
uint16_t lane_number;
- /*
- * Configuration bits.
- * Use enable bit to start dsc dump or retrieve dump
- */
+ /* Configuration bits. */
uint16_t dsc_dump_config;
/*
* Set 0 to retrieve the dsc dump
* Set 1 to start the dsc dump
+ * Some configuration parameter for the dscdump report are
+ * set by the start request, and can not be modified until the
+ * retrieve operation is complete, on the next start.
*/
#define HWRM_PORT_DSC_DUMP_INPUT_DSC_DUMP_CONFIG_START_RETRIEVE UINT32_C(0x1)
+ /*
+ * Set 0 to limit the report size to 65535 bytes.
+ * Set 1 to allow a larger buffer size.
+ * This can only be set 1 in the start operation.
+ * If this is set 0 in the start operation, the firmware will
+ * assume it needs to only expose up to 65535 bytes of the report,
+ * and only allow a single retrieve operation to retrieve the
+ * entire report. This mode will truncate longer reports.
+ * If this is set 1 in the start operation, the firmware will
+ * report the full size of the report (up to the firmware's limit),
+ * permit retrieve operations to hold the buffer using the config
+ * defer_close, and honour the data_offset value so later data
+ * in the report can be retrieved.
+ */
+ #define HWRM_PORT_DSC_DUMP_INPUT_DSC_DUMP_CONFIG_BIG_BUFFER UINT32_C(0x2)
+ /*
+ * Set 0 on the last 'retrieve' to release the firmware buffer
+ * Set 1 on the other 'retrieve' to hold the firmware buffer
+ * This only affects retrieve operations.
+ * In big_buffer mode, this allows the driver or tool to tell
+ * the firmware to keep the report around, as it intends to read
+ * more of it in. The final read must set this to zero, to tell
+ * the firmware the report buffer can be released.
+ * This only works if the start request specified big_buffer as
+ * one; it is ignored otherwise.
+ */
+ #define HWRM_PORT_DSC_DUMP_INPUT_DSC_DUMP_CONFIG_DEFER_CLOSE UINT32_C(0x4)
} hwrm_port_dsc_dump_input_t, *phwrm_port_dsc_dump_input_t;
/* hwrm_port_dsc_dump_output (size:128b/16B) */
@@ -29730,15 +33207,49 @@ typedef struct hwrm_port_dsc_dump_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Total length of stored data. */
+ /*
+ * Total length of stored data; if big_buffer is one, this
+ * only contains the lower 16 bits of the total length.
+ * In legacy buffer mode, this is zero in the 'start' response.
+ * In big buffer mode, this has the size of the report even
+ * in the 'start' response.
+ * In both modes, this contains the number of bytes written
+ * to the host in 'retrieve' responses.
+ */
uint16_t total_data_len;
- uint16_t unused_0;
- uint8_t unused_1[3];
+ /*
+ * The upper 16 bits of the total length of stored data.
+ * In legacy buffer mode, this will always be zero.
+ * In big buffer mode, this will be populated even in the
+ * 'start' response.
+ * This is always zero for 'retrieve' responses.
+ */
+ uint16_t total_data_len_high;
+ uint8_t unused_1[2];
+ /* Result information bits. */
+ uint8_t flags;
+ /*
+ * Set according to the start request's input big_buffer.
+ * If this is zero, it indicates the function is acting per
+ * legacy behaviour -- it will report a buffer size up to almost
+ * 64KiB, and allow only one retrieval request before releasing
+ * the firmware buffer containing the report (total_data_len_high
+ * will be zero). The request's data_offset field and defer_close
+ * and use_offset config flags are ignored.
+ * If this is one, it indicates support for (and request of)
+ * support for larger reports. The full 32b report size (up to the
+ * firmware buffer limit) is provided by the start response in
+ * total_data_len (low 16b) and total_data_len_high (high 16b),
+ * and retrieve requests may keep the buffer using the defer_close
+ * flag, and retrieve the later parts of the report using the
+ * data_offset field.
+ */
+ #define HWRM_PORT_DSC_DUMP_OUTPUT_FLAGS_BIG_BUFFER UINT32_C(0x1)
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -29865,7 +33376,7 @@ typedef struct hwrm_port_sfp_sideband_cfg_output {
uint8_t unused[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written. When
* writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
@@ -29980,7 +33491,7 @@ typedef struct hwrm_port_sfp_sideband_qcfg_output {
uint8_t unused[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written. When
* writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
@@ -30035,7 +33546,7 @@ typedef struct hwrm_port_phy_mdio_bus_acquire_input {
*/
uint16_t client_id;
/*
- * Timeout in milli seconds, MDIO BUS will be released automatically
+ * Timeout in milliseconds, MDIO BUS will be released automatically
* after this time, if another mdio acquire command is not received
* within the timeout window from the same client.
* A 0xFFFF will hold the bus until this bus is released.
@@ -30064,9 +33575,9 @@ typedef struct hwrm_port_phy_mdio_bus_acquire_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -30135,9 +33646,9 @@ typedef struct hwrm_port_phy_mdio_bus_release_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -30182,10 +33693,20 @@ typedef struct hwrm_port_tx_fir_cfg_input {
/* Modulation types of TX FIR: NRZ, PAM4. */
uint8_t mod_type;
/* For NRZ */
- #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_NRZ UINT32_C(0x0)
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_NRZ UINT32_C(0x0)
/* For PAM4 */
- #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_PAM4 UINT32_C(0x1)
- #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_LAST HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_PAM4
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_PAM4 UINT32_C(0x1)
+ /* For Optical NRZ */
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_C2M_NRZ UINT32_C(0x2)
+ /* For Optical PAM4 */
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_C2M_PAM4 UINT32_C(0x3)
+ /* For DAC PAM4 112G */
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_PAM4_112 UINT32_C(0x4)
+ /* For Optical PAM4 112G */
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_C2M_PAM4_112G UINT32_C(0x5)
+ /* For LPO PAM4 112G */
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_LPO_PAM4_112G UINT32_C(0x6)
+ #define HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_LAST HWRM_PORT_TX_FIR_CFG_INPUT_MOD_TYPE_LPO_PAM4_112G
/* The lane mask of the lane TX FIR will be configured. */
uint8_t lane_mask;
uint8_t unused_0[2];
@@ -30214,9 +33735,9 @@ typedef struct hwrm_port_tx_fir_cfg_output {
uint8_t unused[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -30261,10 +33782,20 @@ typedef struct hwrm_port_tx_fir_qcfg_input {
/* Modulation types of TX FIR: NRZ, PAM4. */
uint8_t mod_type;
/* For NRZ */
- #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_NRZ UINT32_C(0x0)
- /* For PAM4 */
- #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_PAM4 UINT32_C(0x1)
- #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_LAST HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_PAM4
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_NRZ UINT32_C(0x0)
+ /* For PAM4 56G */
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_PAM4 UINT32_C(0x1)
+ /* For Optical NRZ */
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_C2M_NRZ UINT32_C(0x2)
+ /* For Optical PAM4 56G */
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_C2M_PAM4 UINT32_C(0x3)
+ /* For DAC PAM4 112G */
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_PAM4_112 UINT32_C(0x4)
+ /* For Optical PAM4 112G */
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_C2M_PAM4_112 UINT32_C(0x5)
+ /* For LPO PAM4 112G */
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_LPO_PAM4_112 UINT32_C(0x6)
+ #define HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_LAST HWRM_PORT_TX_FIR_QCFG_INPUT_MOD_TYPE_LPO_PAM4_112
/* The ID of the lane TX FIR will be queried. */
uint8_t lane_id;
uint8_t unused[6];
@@ -30292,9 +33823,9 @@ typedef struct hwrm_port_tx_fir_qcfg_output {
uint8_t unused[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -30398,9 +33929,9 @@ typedef struct hwrm_port_ep_tx_cfg_input {
*/
uint8_t ep2_min_bw;
/*
- * Specifies the maximum portion of the port's bandwidth that the set of
- * PFs and VFs on PCIe endpoint 2 may use. The value is a percentage of
- * the link bandwidth, from 0 to 100. A value of 0 indicates no
+ * Specifies the maximum portion of the port's bandwidth that the set
+ * of PFs and VFs on PCIe endpoint 2 may use. The value is a percentage
+ * of the link bandwidth, from 0 to 100. A value of 0 indicates no
* maximum rate.
*/
uint8_t ep2_max_bw;
@@ -30675,9 +34206,9 @@ typedef struct hwrm_port_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -30756,15 +34287,224 @@ typedef struct hwrm_port_qcfg_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} hwrm_port_qcfg_output_t, *phwrm_port_qcfg_output_t;
/***********************
+ * hwrm_port_mac_qcaps *
+ ***********************/
+
+
+/* hwrm_port_mac_qcaps_input (size:192b/24B) */
+
+typedef struct hwrm_port_mac_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Port ID of port that is being queried. */
+ uint16_t port_id;
+ uint8_t unused_0[6];
+} hwrm_port_mac_qcaps_input_t, *phwrm_port_mac_qcaps_input_t;
+
+/* hwrm_port_mac_qcaps_output (size:128b/16B) */
+
+typedef struct hwrm_port_mac_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* MAC capability flags */
+ uint8_t flags;
+ /*
+ * If set to 1, then this field indicates that the
+ * MAC does not support local loopback.
+ */
+ #define HWRM_PORT_MAC_QCAPS_OUTPUT_FLAGS_LOCAL_LPBK_NOT_SUPPORTED UINT32_C(0x1)
+ /*
+ * If set to 1, then this field indicates that the
+ * MAC is capable of supporting remote loopback.
+ */
+ #define HWRM_PORT_MAC_QCAPS_OUTPUT_FLAGS_REMOTE_LPBK_SUPPORTED UINT32_C(0x2)
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_port_mac_qcaps_output_t, *phwrm_port_mac_qcaps_output_t;
+
+/*********************
+ * hwrm_port_poe_cfg *
+ *********************/
+
+
+/* hwrm_port_poe_cfg_input (size:192b/24B) */
+
+typedef struct hwrm_port_poe_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Expander port index for which PoE has to be enabled/disabled */
+ uint8_t exp_port_idx;
+ /* PoE enable/disable flag */
+ uint8_t flags;
+ /* This field indicates that the PoE has to be enabled. */
+ #define HWRM_PORT_POE_CFG_INPUT_FLAGS_ENABLE_POE UINT32_C(0x1)
+ uint8_t unused_0[6];
+} hwrm_port_poe_cfg_input_t, *phwrm_port_poe_cfg_input_t;
+
+/* hwrm_port_poe_cfg_output (size:128b/16B) */
+
+typedef struct hwrm_port_poe_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_port_poe_cfg_output_t, *phwrm_port_poe_cfg_output_t;
+
+/**********************
+ * hwrm_port_poe_qcfg *
+ **********************/
+
+
+/* hwrm_port_poe_qcfg_input (size:192b/24B) */
+
+typedef struct hwrm_port_poe_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Expander port which is queried */
+ uint8_t exp_port_idx;
+ uint8_t unused_0[7];
+} hwrm_port_poe_qcfg_input_t, *phwrm_port_poe_qcfg_input_t;
+
+/* hwrm_port_poe_qcfg_output (size:128b/16B) */
+
+typedef struct hwrm_port_poe_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* This field indicates if the PoE is enabled/disabled */
+ uint8_t status;
+ /* This field indicates that the PoE is enabled. */
+ #define HWRM_PORT_POE_QCFG_OUTPUT_STATUS_POE_ENABLED UINT32_C(0x1)
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_port_poe_qcfg_output_t, *phwrm_port_poe_qcfg_output_t;
+
+/***********************
* hwrm_queue_qportcfg *
***********************/
@@ -31299,8 +35039,8 @@ typedef struct hwrm_queue_qportcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -31396,8 +35136,8 @@ typedef struct hwrm_queue_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -31441,9 +35181,9 @@ typedef struct hwrm_queue_cfg_input {
uint64_t resp_addr;
uint32_t flags;
/*
- * Enumeration denoting the RX, TX, or both directions applicable to the resource.
- * This enumeration is used for resources that are similar for both
- * TX and RX paths of the chip.
+ * Enumeration denoting the RX, TX, or both directions applicable to
+ * the resource. This enumeration is used for resources that are
+ * similar for both TX and RX paths of the chip.
*/
#define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3)
#define HWRM_QUEUE_CFG_INPUT_FLAGS_PATH_SFT 0
@@ -31500,8 +35240,8 @@ typedef struct hwrm_queue_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -31600,8 +35340,8 @@ typedef struct hwrm_queue_pfcenable_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -31700,8 +35440,8 @@ typedef struct hwrm_queue_pfcenable_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -31851,8 +35591,8 @@ typedef struct hwrm_queue_pri2cos_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -31896,9 +35636,9 @@ typedef struct hwrm_queue_pri2cos_cfg_input {
uint64_t resp_addr;
uint32_t flags;
/*
- * Enumeration denoting the RX, TX, or both directions applicable to the resource.
- * This enumeration is used for resources that are similar for both
- * TX and RX paths of the chip.
+ * Enumeration denoting the RX, TX, or both directions applicable to
+ * the resource. This enumeration is used for resources that are
+ * similar for both TX and RX paths of the chip.
*/
#define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_MASK UINT32_C(0x3)
#define HWRM_QUEUE_PRI2COS_CFG_INPUT_FLAGS_PATH_SFT 0
@@ -31974,7 +35714,7 @@ typedef struct hwrm_queue_pri2cos_cfg_input {
*/
uint8_t pri1_cos_queue_id;
/*
- * CoS Queue assigned to priority 2 This value can only
+ * CoS Queue assigned to priority 2. This value can only
* be changed before traffic has started.
*/
uint8_t pri2_cos_queue_id;
@@ -32021,8 +35761,8 @@ typedef struct hwrm_queue_pri2cos_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -32794,8 +36534,8 @@ typedef struct hwrm_queue_cos2bw_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -33608,8 +37348,8 @@ typedef struct hwrm_queue_cos2bw_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -33680,8 +37420,8 @@ typedef struct hwrm_queue_dscp_qcaps_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -33765,8 +37505,8 @@ typedef struct hwrm_queue_dscp2pri_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -33860,8 +37600,8 @@ typedef struct hwrm_queue_dscp2pri_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -33945,8 +37685,8 @@ typedef struct hwrm_queue_mpls_qcaps_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -34068,8 +37808,8 @@ typedef struct hwrm_queue_mplstc2pri_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -34170,7 +37910,7 @@ typedef struct hwrm_queue_mplstc2pri_cfg_input {
*/
uint8_t tc1_pri_queue_id;
/*
- * pri assigned to MPLS TC(EXP) 2 This value can only
+ * pri assigned to MPLS TC(EXP) 2. This value can only
* be changed before traffic has started.
*/
uint8_t tc2_pri_queue_id;
@@ -34216,8 +37956,8 @@ typedef struct hwrm_queue_mplstc2pri_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -34288,8 +38028,8 @@ typedef struct hwrm_queue_vlanpri_qcaps_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -34403,8 +38143,8 @@ typedef struct hwrm_queue_vlanpri2pri_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -34551,8 +38291,8 @@ typedef struct hwrm_queue_vlanpri2pri_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -34683,8 +38423,8 @@ typedef struct hwrm_queue_global_cfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -34826,13 +38566,1657 @@ typedef struct hwrm_queue_global_qcfg_output {
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
} hwrm_queue_global_qcfg_output_t, *phwrm_queue_global_qcfg_output_t;
+/****************************************
+ * hwrm_queue_adptv_qos_rx_feature_qcfg *
+ ****************************************/
+
+
+/* hwrm_queue_adptv_qos_rx_feature_qcfg_input (size:128b/16B) */
+
+typedef struct hwrm_queue_adptv_qos_rx_feature_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} hwrm_queue_adptv_qos_rx_feature_qcfg_input_t, *phwrm_queue_adptv_qos_rx_feature_qcfg_input_t;
+
+/* hwrm_queue_adptv_qos_rx_feature_qcfg_output (size:128b/16B) */
+
+typedef struct hwrm_queue_adptv_qos_rx_feature_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Bitmask indicating which RX CoS queues are enabled or disabled.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * A value of 0 indicates that the queue is not enabled.
+ * A value of 1 indicates that the queue is enabled.
+ */
+ uint8_t queue_enable;
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE UINT32_C(0x1)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_DISABLED UINT32_C(0x0)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED UINT32_C(0x1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE UINT32_C(0x2)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_DISABLED (UINT32_C(0x0) << 1)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED (UINT32_C(0x1) << 1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE UINT32_C(0x4)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_DISABLED (UINT32_C(0x0) << 2)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED (UINT32_C(0x1) << 2)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE UINT32_C(0x8)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_DISABLED (UINT32_C(0x0) << 3)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED (UINT32_C(0x1) << 3)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE UINT32_C(0x10)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_DISABLED (UINT32_C(0x0) << 4)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED (UINT32_C(0x1) << 4)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE UINT32_C(0x20)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_DISABLED (UINT32_C(0x0) << 5)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED (UINT32_C(0x1) << 5)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE UINT32_C(0x40)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_DISABLED (UINT32_C(0x0) << 6)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED (UINT32_C(0x1) << 6)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE UINT32_C(0x80)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_DISABLED (UINT32_C(0x0) << 7)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED (UINT32_C(0x1) << 7)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED
+ /*
+ * Bitmask indicating which CoS queues are lossy or lossless.
+ * This setting is kept same across Rx and Tx directions, despite
+ * the name mentioning only Rx. Each bit represents a specific queue
+ * where bit 0 represents queue 0 and bit 7 represents queue 7.
+ * A value of 0 indicates that the queue is lossy.
+ * A value of 1 indicates that the queue is lossless.
+ */
+ uint8_t queue_mode;
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID0_MODE UINT32_C(0x1)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID0_MODE_LOSSY UINT32_C(0x0)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID0_MODE_LOSSLESS UINT32_C(0x1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID0_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID0_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID1_MODE UINT32_C(0x2)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID1_MODE_LOSSY (UINT32_C(0x0) << 1)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID1_MODE_LOSSLESS (UINT32_C(0x1) << 1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID1_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID1_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID2_MODE UINT32_C(0x4)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID2_MODE_LOSSY (UINT32_C(0x0) << 2)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID2_MODE_LOSSLESS (UINT32_C(0x1) << 2)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID2_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID2_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID3_MODE UINT32_C(0x8)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID3_MODE_LOSSY (UINT32_C(0x0) << 3)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID3_MODE_LOSSLESS (UINT32_C(0x1) << 3)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID3_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID3_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID4_MODE UINT32_C(0x10)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID4_MODE_LOSSY (UINT32_C(0x0) << 4)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID4_MODE_LOSSLESS (UINT32_C(0x1) << 4)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID4_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID4_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID5_MODE UINT32_C(0x20)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID5_MODE_LOSSY (UINT32_C(0x0) << 5)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID5_MODE_LOSSLESS (UINT32_C(0x1) << 5)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID5_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID5_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID6_MODE UINT32_C(0x40)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID6_MODE_LOSSY (UINT32_C(0x0) << 6)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID6_MODE_LOSSLESS (UINT32_C(0x1) << 6)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID6_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID6_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID7_MODE UINT32_C(0x80)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID7_MODE_LOSSY (UINT32_C(0x0) << 7)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID7_MODE_LOSSLESS (UINT32_C(0x1) << 7)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID7_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_QCFG_OUTPUT_QUEUE_MODE_QID7_MODE_LOSSLESS
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_queue_adptv_qos_rx_feature_qcfg_output_t, *phwrm_queue_adptv_qos_rx_feature_qcfg_output_t;
+
+/***************************************
+ * hwrm_queue_adptv_qos_rx_feature_cfg *
+ ***************************************/
+
+
+/* hwrm_queue_adptv_qos_rx_feature_cfg_input (size:192b/24B) */
+
+typedef struct hwrm_queue_adptv_qos_rx_feature_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /* This bit must be '1' for the queue_enable field to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_ENABLES_QUEUE_ENABLE UINT32_C(0x1)
+ /* This bit must be '1' for the queue_mode field to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_ENABLES_QUEUE_MODE UINT32_C(0x2)
+ /*
+ * Bitmask indicating which RX CoS queues are enabled or disabled.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * A value of 0 indicates that the queue is not enabled.
+ * A value of 1 indicates that the queue is enabled.
+ */
+ uint8_t queue_enable;
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE UINT32_C(0x1)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_DISABLED UINT32_C(0x0)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED UINT32_C(0x1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE UINT32_C(0x2)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_DISABLED (UINT32_C(0x0) << 1)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED (UINT32_C(0x1) << 1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE UINT32_C(0x4)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_DISABLED (UINT32_C(0x0) << 2)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED (UINT32_C(0x1) << 2)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE UINT32_C(0x8)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_DISABLED (UINT32_C(0x0) << 3)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED (UINT32_C(0x1) << 3)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE UINT32_C(0x10)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_DISABLED (UINT32_C(0x0) << 4)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED (UINT32_C(0x1) << 4)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE UINT32_C(0x20)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_DISABLED (UINT32_C(0x0) << 5)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED (UINT32_C(0x1) << 5)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE UINT32_C(0x40)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_DISABLED (UINT32_C(0x0) << 6)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED (UINT32_C(0x1) << 6)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE UINT32_C(0x80)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_DISABLED (UINT32_C(0x0) << 7)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED (UINT32_C(0x1) << 7)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED
+ /*
+ * Bitmask indicating which CoS queues are lossy or lossless.
+ * This setting is kept symmetric (or same) across Tx and Rx.
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * A value of 0 indicates that the queue is lossy.
+ * A value of 1 indicates that the queue is lossless.
+ */
+ uint8_t queue_mode;
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID0_MODE UINT32_C(0x1)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID0_MODE_LOSSY UINT32_C(0x0)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID0_MODE_LOSSLESS UINT32_C(0x1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID0_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID0_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID1_MODE UINT32_C(0x2)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID1_MODE_LOSSY (UINT32_C(0x0) << 1)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID1_MODE_LOSSLESS (UINT32_C(0x1) << 1)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID1_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID1_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID2_MODE UINT32_C(0x4)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID2_MODE_LOSSY (UINT32_C(0x0) << 2)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID2_MODE_LOSSLESS (UINT32_C(0x1) << 2)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID2_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID2_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID3_MODE UINT32_C(0x8)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID3_MODE_LOSSY (UINT32_C(0x0) << 3)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID3_MODE_LOSSLESS (UINT32_C(0x1) << 3)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID3_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID3_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID4_MODE UINT32_C(0x10)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID4_MODE_LOSSY (UINT32_C(0x0) << 4)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID4_MODE_LOSSLESS (UINT32_C(0x1) << 4)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID4_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID4_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID5_MODE UINT32_C(0x20)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID5_MODE_LOSSY (UINT32_C(0x0) << 5)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID5_MODE_LOSSLESS (UINT32_C(0x1) << 5)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID5_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID5_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID6_MODE UINT32_C(0x40)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID6_MODE_LOSSY (UINT32_C(0x0) << 6)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID6_MODE_LOSSLESS (UINT32_C(0x1) << 6)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID6_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID6_MODE_LOSSLESS
+ /* If set to 0, then the queue is lossy, else lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID7_MODE UINT32_C(0x80)
+ /* Lossy (best-effort). */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID7_MODE_LOSSY (UINT32_C(0x0) << 7)
+ /* Lossless. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID7_MODE_LOSSLESS (UINT32_C(0x1) << 7)
+ #define HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID7_MODE_LAST HWRM_QUEUE_ADPTV_QOS_RX_FEATURE_CFG_INPUT_QUEUE_MODE_QID7_MODE_LOSSLESS
+ uint8_t unused_0[2];
+} hwrm_queue_adptv_qos_rx_feature_cfg_input_t, *phwrm_queue_adptv_qos_rx_feature_cfg_input_t;
+
+/* hwrm_queue_adptv_qos_rx_feature_cfg_output (size:128b/16B) */
+
+typedef struct hwrm_queue_adptv_qos_rx_feature_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_queue_adptv_qos_rx_feature_cfg_output_t, *phwrm_queue_adptv_qos_rx_feature_cfg_output_t;
+
+/****************************************
+ * hwrm_queue_adptv_qos_tx_feature_qcfg *
+ ****************************************/
+
+
+/* hwrm_queue_adptv_qos_tx_feature_qcfg_input (size:128b/16B) */
+
+typedef struct hwrm_queue_adptv_qos_tx_feature_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} hwrm_queue_adptv_qos_tx_feature_qcfg_input_t, *phwrm_queue_adptv_qos_tx_feature_qcfg_input_t;
+
+/* hwrm_queue_adptv_qos_tx_feature_qcfg_output (size:128b/16B) */
+
+typedef struct hwrm_queue_adptv_qos_tx_feature_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Bitmask indicating which TX CoS queues are enabled or disabled.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * A value of 0 indicates that the queue is not enabled.
+ * A value of 1 indicates that the queue is enabled.
+ */
+ uint8_t queue_enable;
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE UINT32_C(0x1)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_DISABLED UINT32_C(0x0)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED UINT32_C(0x1)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE UINT32_C(0x2)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_DISABLED (UINT32_C(0x0) << 1)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED (UINT32_C(0x1) << 1)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE UINT32_C(0x4)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_DISABLED (UINT32_C(0x0) << 2)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED (UINT32_C(0x1) << 2)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE UINT32_C(0x8)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_DISABLED (UINT32_C(0x0) << 3)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED (UINT32_C(0x1) << 3)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE UINT32_C(0x10)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_DISABLED (UINT32_C(0x0) << 4)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED (UINT32_C(0x1) << 4)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE UINT32_C(0x20)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_DISABLED (UINT32_C(0x0) << 5)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED (UINT32_C(0x1) << 5)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE UINT32_C(0x40)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_DISABLED (UINT32_C(0x0) << 6)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED (UINT32_C(0x1) << 6)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE UINT32_C(0x80)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_DISABLED (UINT32_C(0x0) << 7)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED (UINT32_C(0x1) << 7)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_QCFG_OUTPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED
+ uint8_t unused_0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_queue_adptv_qos_tx_feature_qcfg_output_t, *phwrm_queue_adptv_qos_tx_feature_qcfg_output_t;
+
+/***************************************
+ * hwrm_queue_adptv_qos_tx_feature_cfg *
+ ***************************************/
+
+
+/* hwrm_queue_adptv_qos_tx_feature_cfg_input (size:192b/24B) */
+
+typedef struct hwrm_queue_adptv_qos_tx_feature_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /* This bit must be '1' for the queue_enable field to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_ENABLES_QUEUE_ENABLE UINT32_C(0x1)
+ /*
+ * Bitmask indicating which TX CoS queues are enabled or disabled.
+ *
+ * Each bit represents a specific queue where bit 0 represents
+ * queue 0 and bit 7 represents queue 7.
+ * A value of 0 indicates that the queue is not enabled.
+ * A value of 1 indicates that the queue is enabled.
+ */
+ uint8_t queue_enable;
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE UINT32_C(0x1)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_DISABLED UINT32_C(0x0)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED UINT32_C(0x1)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID0_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE UINT32_C(0x2)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_DISABLED (UINT32_C(0x0) << 1)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED (UINT32_C(0x1) << 1)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID1_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE UINT32_C(0x4)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_DISABLED (UINT32_C(0x0) << 2)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED (UINT32_C(0x1) << 2)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID2_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE UINT32_C(0x8)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_DISABLED (UINT32_C(0x0) << 3)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED (UINT32_C(0x1) << 3)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID3_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE UINT32_C(0x10)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_DISABLED (UINT32_C(0x0) << 4)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED (UINT32_C(0x1) << 4)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID4_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE UINT32_C(0x20)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_DISABLED (UINT32_C(0x0) << 5)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED (UINT32_C(0x1) << 5)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID5_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE UINT32_C(0x40)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_DISABLED (UINT32_C(0x0) << 6)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED (UINT32_C(0x1) << 6)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID6_ENABLE_ENABLED
+ /* If set to 1, then the queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE UINT32_C(0x80)
+ /* Queue is disabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_DISABLED (UINT32_C(0x0) << 7)
+ /* Queue is enabled. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED (UINT32_C(0x1) << 7)
+ #define HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_LAST HWRM_QUEUE_ADPTV_QOS_TX_FEATURE_CFG_INPUT_QUEUE_ENABLE_QID7_ENABLE_ENABLED
+ uint8_t unused_0[3];
+} hwrm_queue_adptv_qos_tx_feature_cfg_input_t, *phwrm_queue_adptv_qos_tx_feature_cfg_input_t;
+
+/* hwrm_queue_adptv_qos_tx_feature_cfg_output (size:128b/16B) */
+
+typedef struct hwrm_queue_adptv_qos_tx_feature_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_queue_adptv_qos_tx_feature_cfg_output_t, *phwrm_queue_adptv_qos_tx_feature_cfg_output_t;
+
+/********************
+ * hwrm_queue_qcaps *
+ ********************/
+
+
+/* hwrm_queue_qcaps_input (size:128b/16B) */
+
+typedef struct hwrm_queue_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} hwrm_queue_qcaps_input_t, *phwrm_queue_qcaps_input_t;
+
+/* hwrm_queue_qcaps_output (size:256b/32B) */
+
+typedef struct hwrm_queue_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Adaptive QoS RX feature parameter capability flags. */
+ uint32_t rx_feature_params;
+ /*
+ * When this bit is '1' the capability to configure queue_enable
+ * is supported.
+ * If set to '0', then the capability to configure queue_enable
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_FEATURE_PARAMS_QUEUE_ENABLE_CAP UINT32_C(0x1)
+ /*
+ * When this bit is '1' the capability to configure queue_mode
+ * is supported.
+ * If set to '0', then the capability to configure queue_mode
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_FEATURE_PARAMS_QUEUE_MODE_CAP UINT32_C(0x2)
+ /* Adaptive QoS TX feature parameter capability flags. */
+ uint32_t tx_feature_params;
+ /*
+ * When this bit is '1' the capability to configure queue_enable
+ * is supported.
+ * If set to '0', then the capability to configure queue_enable
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_FEATURE_PARAMS_QUEUE_ENABLE_CAP UINT32_C(0x1)
+ /*
+ * The maximum number of queues that can be configured on this device.
+ * Valid values range from 1 through 8.
+ */
+ uint8_t max_configurable_queues;
+ uint8_t unused_0[3];
+ /* Adaptive QoS RX tuning parameter capability flags. */
+ uint32_t rx_tuning_params;
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_WFQ_COST_CAP UINT32_C(0x1)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_WFQ_UPPER_FACTOR_CAP UINT32_C(0x2)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_HYST_WINDOW_SIZE_FACTOR_CAP UINT32_C(0x4)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_PCIE_BW_EFF_CAP UINT32_C(0x8)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_XOFF_HEADROOM_FACTOR_CAP UINT32_C(0x10)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_L2_MIN_LATENCY_CAP UINT32_C(0x20)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_L2_MAX_LATENCY_CAP UINT32_C(0x40)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_ROCE_MIN_LATENCY_CAP UINT32_C(0x80)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_ROCE_MAX_LATENCY_CAP UINT32_C(0x100)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_L2_PIPE_COS_LATENCY_CAP UINT32_C(0x200)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_ROCE_PIPE_COS_LATENCY_CAP UINT32_C(0x400)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_COS_SHARED_MIN_RATIO_CAP UINT32_C(0x800)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_RSVD_CELLS_LIMIT_RATIO_CAP UINT32_C(0x1000)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_RX_TUNING_PARAMS_SHAPER_REFILL_TIMER_CAP UINT32_C(0x2000)
+ /* Adaptive QoS TX tuning parameter capability flags. */
+ uint32_t tx_tuning_params;
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_WFQ_COST_CAP UINT32_C(0x1)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_WFQ_UPPER_FACTOR_CAP UINT32_C(0x2)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_HYST_WINDOW_SIZE_FACTOR_CAP UINT32_C(0x4)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_RSVD_CELLS_LIMIT_RATIO_CAP UINT32_C(0x8)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_L2_MIN_LATENCY_CAP UINT32_C(0x10)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_L2_MAX_LATENCY_CAP UINT32_C(0x20)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_ROCE_MIN_LATENCY_CAP UINT32_C(0x40)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_ROCE_MAX_LATENCY_CAP UINT32_C(0x80)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_MAX_TBM_CELLS_PRERESERVED_CAP UINT32_C(0x100)
+ /*
+ * When this bit is '1' the capability to configure the option
+ * is supported.
+ * If set to '0', then the capability to configure the option
+ * is not supported.
+ */
+ #define HWRM_QUEUE_QCAPS_OUTPUT_TX_TUNING_PARAMS_SHAPER_REFILL_TIMER_CAP UINT32_C(0x200)
+ uint8_t unused_1[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_queue_qcaps_output_t, *phwrm_queue_qcaps_output_t;
+
+/***************************************
+ * hwrm_queue_adptv_qos_rx_tuning_qcfg *
+ ***************************************/
+
+
+/* hwrm_queue_adptv_qos_rx_tuning_qcfg_input (size:128b/16B) */
+
+typedef struct hwrm_queue_adptv_qos_rx_tuning_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} hwrm_queue_adptv_qos_rx_tuning_qcfg_input_t, *phwrm_queue_adptv_qos_rx_tuning_qcfg_input_t;
+
+/* hwrm_queue_adptv_qos_rx_tuning_qcfg_output (size:576b/72B) */
+
+typedef struct hwrm_queue_adptv_qos_rx_tuning_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Indicates max credit as required by hardware. */
+ uint32_t wfq_cost;
+ /*
+ * Specifies a factor that determines the upper bound for each
+ * cos_wfq_credit_weight.
+ */
+ uint32_t wfq_upper_factor;
+ /*
+ * The algorithm multiplies this factor by the MRU size to compute the
+ * hysteresis window size which in turn is used in deassert
+ * threshold calculations.
+ */
+ uint32_t hyst_window_size_factor;
+ /*
+ * Specifies PCIe BW efficiency in the range of 0-100%. System
+ * characterization determines the value of this parameter. A value of
+ * less than 100% accounts for internal PCIe over-subscription. The
+ * algorithm uses this parameter to determine the PCIe BW available
+ * for transferring received packets to the host.
+ */
+ uint32_t pcie_bw_eff;
+ /* Scales the number of cells for xoff. */
+ uint32_t xoff_headroom_factor;
+ /*
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
+ */
+ uint32_t l2_min_latency;
+ /*
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
+ */
+ uint32_t l2_max_latency;
+ /*
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
+ */
+ uint32_t roce_min_latency;
+ /*
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
+ */
+ uint32_t roce_max_latency;
+ /*
+ * The algorithm uses this parameter to calculate the number of cells
+ * to be excluded from the total buffer pool to account for the
+ * latency of pipeline post RE_DEC to PCIe block. Its value is derived
+ * from system characterization.
+ */
+ uint32_t l2_pipe_cos_latency;
+ /*
+ * The algorithm uses this parameter to calculate the number of cells
+ * to be excluded from the total buffer pool to account for the
+ * latency of pipeline post RE_DEC to PCIe block. Its value is derived
+ * from system characterization.
+ */
+ uint32_t roce_pipe_cos_latency;
+ /* Sets the minimum number of shared cells each cos queue can have. */
+ uint32_t cos_shared_min_ratio;
+ /*
+ * The parameter limits the total reserved cells. If the computed
+ * total reserved cells becomes larger than rsvd_cells_limit_ratio x
+ * port_cells_avail, then the reserved cells are set to the limit
+ * value. Its range of values is 0-50%.
+ */
+ uint32_t rsvd_cells_limit_ratio;
+ /*
+ * This parameter is used to compute the time interval for
+ * replenishing the shaper credit buckets for all RX cos queues.
+ */
+ uint32_t shaper_refill_timer;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_queue_adptv_qos_rx_tuning_qcfg_output_t, *phwrm_queue_adptv_qos_rx_tuning_qcfg_output_t;
+
+/**************************************
+ * hwrm_queue_adptv_qos_rx_tuning_cfg *
+ **************************************/
+
+
+/* hwrm_queue_adptv_qos_rx_tuning_cfg_input (size:640b/80B) */
+
+typedef struct hwrm_queue_adptv_qos_rx_tuning_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_WFQ_COST UINT32_C(0x1)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_WFQ_UPPER_FACTOR UINT32_C(0x2)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_HYST_WINDOW_SIZE_FACTOR UINT32_C(0x4)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_PCIE_BW_EFF UINT32_C(0x8)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_XOFF_HEADROOM_FACTOR UINT32_C(0x10)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_L2_MIN_LATENCY UINT32_C(0x20)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_L2_MAX_LATENCY UINT32_C(0x40)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_ROCE_MIN_LATENCY UINT32_C(0x80)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_ROCE_MAX_LATENCY UINT32_C(0x100)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_L2_PIPE_COS_LATENCY UINT32_C(0x200)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_ROCE_PIPE_COS_LATENCY UINT32_C(0x400)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_COS_SHARED_MIN_RATIO UINT32_C(0x800)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_RSVD_CELLS_LIMIT_RATIO UINT32_C(0x1000)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_RX_TUNING_CFG_INPUT_ENABLES_SHAPER_REFILL_TIMER UINT32_C(0x2000)
+ /* Indicates max credit as required by hardware. */
+ uint32_t wfq_cost;
+ /*
+ * Specifies a factor that determines the upper bound for each
+ * cos_wfq_credit_weight.
+ */
+ uint32_t wfq_upper_factor;
+ /*
+ * The algorithm multiplies this factor by the MRU size to compute the
+ * hysteresis window size which in turn is used in deassert
+ * threshold calculations.
+ */
+ uint32_t hyst_window_size_factor;
+ /*
+ * Specifies PCIe BW efficiency in the range of 0-100%. System
+ * characterization determines the value of this parameter. A value of
+ * less than 100% accounts for internal PCIe over-subscription. The
+ * algorithm uses this parameter to determine the PCIe BW available
+ * for transferring received packets to the host.
+ */
+ uint32_t pcie_bw_eff;
+ /* Scales the number of cells for xoff. */
+ uint32_t xoff_headroom_factor;
+ /*
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
+ */
+ uint32_t l2_min_latency;
+ /*
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
+ */
+ uint32_t l2_max_latency;
+ /*
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
+ */
+ uint32_t roce_min_latency;
+ /*
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
+ */
+ uint32_t roce_max_latency;
+ /*
+ * The algorithm uses this parameter to calculate the number of cells
+ * to be excluded from the total buffer pool to account for the
+ * latency of pipeline post RE_DEC to PCIe block. Its value is derived
+ * from system characterization.
+ */
+ uint32_t l2_pipe_cos_latency;
+ /*
+ * The algorithm uses this parameter to calculate the number of cells
+ * to be excluded from the total buffer pool to account for the
+ * latency of pipeline post RE_DEC to PCIe block. Its value is derived
+ * from system characterization.
+ */
+ uint32_t roce_pipe_cos_latency;
+ /* Sets the minimum number of shared cells each cos queue can have. */
+ uint32_t cos_shared_min_ratio;
+ /*
+ * The parameter limits the total reserved cells. If the computed
+ * total reserved cells becomes larger than rsvd_cells_limit_ratio x
+ * port_cells_avail, then the reserved cells are set to the limit
+ * value. Its range of values is 0-50%.
+ */
+ uint32_t rsvd_cells_limit_ratio;
+ /*
+ * This parameter is used to compute the time interval for
+ * replenishing the shaper credit buckets for all RX cos queues.
+ */
+ uint32_t shaper_refill_timer;
+ uint8_t unused_0[4];
+} hwrm_queue_adptv_qos_rx_tuning_cfg_input_t, *phwrm_queue_adptv_qos_rx_tuning_cfg_input_t;
+
+/* hwrm_queue_adptv_qos_rx_tuning_cfg_output (size:128b/16B) */
+
+typedef struct hwrm_queue_adptv_qos_rx_tuning_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_queue_adptv_qos_rx_tuning_cfg_output_t, *phwrm_queue_adptv_qos_rx_tuning_cfg_output_t;
+
+/***************************************
+ * hwrm_queue_adptv_qos_tx_tuning_qcfg *
+ ***************************************/
+
+
+/* hwrm_queue_adptv_qos_tx_tuning_qcfg_input (size:128b/16B) */
+
+typedef struct hwrm_queue_adptv_qos_tx_tuning_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} hwrm_queue_adptv_qos_tx_tuning_qcfg_input_t, *phwrm_queue_adptv_qos_tx_tuning_qcfg_input_t;
+
+/* hwrm_queue_adptv_qos_tx_tuning_qcfg_output (size:448b/56B) */
+
+typedef struct hwrm_queue_adptv_qos_tx_tuning_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Indicates max credit as required by hardware. */
+ uint32_t wfq_cost;
+ /*
+ * Specifies a factor that determines the upper bound for each
+ * cos_wfq_credit_weight.
+ */
+ uint32_t wfq_upper_factor;
+ /*
+ * The algorithm multiplies this factor by the MRU size to compute the
+ * hysteresis window size which in turn is used in deassert
+ * threshold calculations.
+ */
+ uint32_t hyst_window_size_factor;
+ /*
+ * The parameter limits the total reserved cells. If the computed
+ * total reserved cells becomes larger than rsvd_cells_limit_ratio x
+ * port_cells_avail, then the reserved cells are set to the limit
+ * value. Its range of values is 0-50%.
+ */
+ uint32_t rsvd_cells_limit_ratio;
+ /*
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
+ */
+ uint32_t l2_min_latency;
+ /*
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
+ */
+ uint32_t l2_max_latency;
+ /*
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
+ */
+ uint32_t roce_min_latency;
+ /*
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
+ */
+ uint32_t roce_max_latency;
+ /* Specifies the number of reserved cells TRP requires per cos queue. */
+ uint32_t max_tbm_cells_prereserved;
+ /*
+ * This parameter is used to compute the time interval for
+ * replenishing the shaper credit buckets for all TX cos queues.
+ */
+ uint32_t shaper_refill_timer;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_queue_adptv_qos_tx_tuning_qcfg_output_t, *phwrm_queue_adptv_qos_tx_tuning_qcfg_output_t;
+
+/**************************************
+ * hwrm_queue_adptv_qos_tx_tuning_cfg *
+ **************************************/
+
+
+/* hwrm_queue_adptv_qos_tx_tuning_cfg_input (size:512b/64B) */
+
+typedef struct hwrm_queue_adptv_qos_tx_tuning_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_WFQ_COST UINT32_C(0x1)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_WFQ_UPPER_FACTOR UINT32_C(0x2)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_HYST_WINDOW_SIZE_FACTOR UINT32_C(0x4)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_RSVD_CELLS_LIMIT_RATIO UINT32_C(0x8)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_L2_MIN_LATENCY UINT32_C(0x10)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_L2_MAX_LATENCY UINT32_C(0x20)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_ROCE_MIN_LATENCY UINT32_C(0x40)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_ROCE_MAX_LATENCY UINT32_C(0x80)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_MAX_TBM_CELLS_PRERESERVED UINT32_C(0x100)
+ /* This bit must be '1' for the option to be configured. */
+ #define HWRM_QUEUE_ADPTV_QOS_TX_TUNING_CFG_INPUT_ENABLES_SHAPER_REFILL_TIMER UINT32_C(0x200)
+ /* Indicates max credit as required by hardware. */
+ uint32_t wfq_cost;
+ /*
+ * Specifies a factor that determines the upper bound for each
+ * cos_wfq_credit_weight.
+ */
+ uint32_t wfq_upper_factor;
+ /*
+ * The algorithm multiplies this factor by the MRU size to compute the
+ * hysteresis window size which in turn is used in deassert
+ * threshold calculations.
+ */
+ uint32_t hyst_window_size_factor;
+ /*
+ * The parameter limits the total reserved cells. If the computed
+ * total reserved cells becomes larger than rsvd_cells_limit_ratio x
+ * port_cells_avail, then the reserved cells are set to the limit
+ * value. Its range of values is 0-50%.
+ */
+ uint32_t rsvd_cells_limit_ratio;
+ /*
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
+ */
+ uint32_t l2_min_latency;
+ /*
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for L2. Its value is derived from system
+ * characterization.
+ */
+ uint32_t l2_max_latency;
+ /*
+ * It is used to calculate the number of reserved cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
+ */
+ uint32_t roce_min_latency;
+ /*
+ * It is used to calculate the number of shared cells for cos queues
+ * configured for RoCE. Its value is derived from system
+ * characterization.
+ */
+ uint32_t roce_max_latency;
+ /* Specifies the number of reserved cells TRP requires per cos queue. */
+ uint32_t max_tbm_cells_prereserved;
+ /*
+ * This parameter is used to compute the time interval for
+ * replenishing the shaper credit buckets for all TX cos queues.
+ */
+ uint32_t shaper_refill_timer;
+ uint8_t unused_0[4];
+} hwrm_queue_adptv_qos_tx_tuning_cfg_input_t, *phwrm_queue_adptv_qos_tx_tuning_cfg_input_t;
+
+/* hwrm_queue_adptv_qos_tx_tuning_cfg_output (size:128b/16B) */
+
+typedef struct hwrm_queue_adptv_qos_tx_tuning_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_queue_adptv_qos_tx_tuning_cfg_output_t, *phwrm_queue_adptv_qos_tx_tuning_cfg_output_t;
+
+/**********************************
+ * hwrm_queue_pfcwd_timeout_qcaps *
+ **********************************/
+
+
+/* hwrm_queue_pfcwd_timeout_qcaps_input (size:128b/16B) */
+
+typedef struct hwrm_queue_pfcwd_timeout_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} hwrm_queue_pfcwd_timeout_qcaps_input_t, *phwrm_queue_pfcwd_timeout_qcaps_input_t;
+
+/* hwrm_queue_pfcwd_timeout_qcaps_output (size:128b/16B) */
+
+typedef struct hwrm_queue_pfcwd_timeout_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Max configurable pfc watchdog timeout value in msec. */
+ uint16_t max_pfcwd_timeout;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_queue_pfcwd_timeout_qcaps_output_t, *phwrm_queue_pfcwd_timeout_qcaps_output_t;
+
+/********************************
+ * hwrm_queue_pfcwd_timeout_cfg *
+ ********************************/
+
+
+/* hwrm_queue_pfcwd_timeout_cfg_input (size:192b/24B) */
+
+typedef struct hwrm_queue_pfcwd_timeout_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * pfc watchdog timeout value in msec.
+ * A value of 0 means firmware will disable the PFC watchdog.
+ * A value of 0xffff means firmware will reset the timeout
+ * value to Hardware defaults. Anywhere between 0 to 0xffff is
+ * valid range for timeout value depending on the Hardware
+ * capability.
+ */
+ uint16_t pfcwd_timeout_value;
+ uint8_t unused_0[6];
+} hwrm_queue_pfcwd_timeout_cfg_input_t, *phwrm_queue_pfcwd_timeout_cfg_input_t;
+
+/* hwrm_queue_pfcwd_timeout_cfg_output (size:128b/16B) */
+
+typedef struct hwrm_queue_pfcwd_timeout_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_queue_pfcwd_timeout_cfg_output_t, *phwrm_queue_pfcwd_timeout_cfg_output_t;
+
+/*********************************
+ * hwrm_queue_pfcwd_timeout_qcfg *
+ *********************************/
+
+
+/* hwrm_queue_pfcwd_timeout_qcfg_input (size:128b/16B) */
+
+typedef struct hwrm_queue_pfcwd_timeout_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} hwrm_queue_pfcwd_timeout_qcfg_input_t, *phwrm_queue_pfcwd_timeout_qcfg_input_t;
+
+/* hwrm_queue_pfcwd_timeout_qcfg_output (size:128b/16B) */
+
+typedef struct hwrm_queue_pfcwd_timeout_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Current configured pfc watchdog timeout value in msec.
+ * A value of 0 means PFC watchdog functionality is disabled.
+ */
+ uint16_t pfcwd_timeout_value;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_queue_pfcwd_timeout_qcfg_output_t, *phwrm_queue_pfcwd_timeout_qcfg_output_t;
+
/*******************
* hwrm_vnic_alloc *
*******************/
@@ -34906,9 +40290,9 @@ typedef struct hwrm_vnic_alloc_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -34980,23 +40364,18 @@ typedef struct hwrm_vnic_update_input {
#define HWRM_VNIC_UPDATE_INPUT_VNIC_STATE_LAST HWRM_VNIC_UPDATE_INPUT_VNIC_STATE_DROP
/*
* The metadata format type used in all the RX packet completions
- * going through this VNIC.
+ * going through this VNIC. This value is product specific. Refer to
+ * the L2 HSI completion ring structures for the detailed
+ * descriptions. For Thor and Thor2, it corresponds to 'meta_format'
+ * in 'rx_pkt_cmpl_hi' and 'rx_pkt_v3_cmpl_hi', respectively.
*/
uint8_t metadata_format_type;
- /* No metadata information. */
- #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_NONE UINT32_C(0x0)
- /*
- * Action record pointer (table_scope[4:0], act_rec_ptr[25:0],
- * vtag[19:0]).
- */
- #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_ACT_RECORD_PTR UINT32_C(0x1)
- /* Tunnel ID (tunnel_id[31:0], vtag[19:0]) */
- #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_TUNNEL_ID UINT32_C(0x2)
- /* Custom header data (updated_chdr_data[31:0], vtag[19:0]) */
- #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_CUSTOM_HDR_DATA UINT32_C(0x3)
- /* Header offsets (hdr_offsets[31:0], vtag[19:0]) */
- #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_HDR_OFFSETS UINT32_C(0x4)
- #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_LAST HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_HDR_OFFSETS
+ #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_0 UINT32_C(0x0)
+ #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_1 UINT32_C(0x1)
+ #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_2 UINT32_C(0x2)
+ #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_3 UINT32_C(0x3)
+ #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_4 UINT32_C(0x4)
+ #define HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_LAST HWRM_VNIC_UPDATE_INPUT_METADATA_FORMAT_TYPE_4
/*
* The maximum receive unit of the vnic.
* Each vnic is associated with a function.
@@ -35023,7 +40402,7 @@ typedef struct hwrm_vnic_update_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -35087,9 +40466,9 @@ typedef struct hwrm_vnic_free_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -35237,25 +40616,28 @@ typedef struct hwrm_vnic_cfg_input {
#define HWRM_VNIC_CFG_INPUT_ENABLES_DEFAULT_CMPL_RING_ID UINT32_C(0x40)
/* This bit must be '1' for the queue_id field to be configured. */
#define HWRM_VNIC_CFG_INPUT_ENABLES_QUEUE_ID UINT32_C(0x80)
- /* This bit must be '1' for the rx_csum_v2_mode field to be configured. */
+ /*
+ * This bit must be '1' for the rx_csum_v2_mode field to be
+ * configured.
+ */
#define HWRM_VNIC_CFG_INPUT_ENABLES_RX_CSUM_V2_MODE UINT32_C(0x100)
/* This bit must be '1' for the l2_cqe_mode field to be configured. */
#define HWRM_VNIC_CFG_INPUT_ENABLES_L2_CQE_MODE UINT32_C(0x200)
/* Logical vnic ID */
uint16_t vnic_id;
/*
- * Default Completion ring for the VNIC. This ring will
+ * Default Completion ring for the VNIC. This ring will
* be chosen if packet does not match any RSS rules and if
* there is no COS rule.
*/
uint16_t dflt_ring_grp;
/*
- * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
* there is no RSS rule.
*/
uint16_t rss_rule;
/*
- * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
* there is no COS rule.
*/
uint16_t cos_rule;
@@ -35274,7 +40656,7 @@ typedef struct hwrm_vnic_cfg_input {
*/
uint16_t mru;
/*
- * Default Rx ring for the VNIC. This ring will
+ * Default Rx ring for the VNIC. This ring will
* be chosen if packet does not match any RSS rules.
* The aggregation ring associated with the Rx ring is
* implied based on the Rx ring specified when the
@@ -35282,16 +40664,17 @@ typedef struct hwrm_vnic_cfg_input {
*/
uint16_t default_rx_ring_id;
/*
- * Default completion ring for the VNIC. This ring will
+ * Default completion ring for the VNIC. This ring will
* be chosen if packet does not match any RSS rules.
*/
uint16_t default_cmpl_ring_id;
/*
- * When specified, only incoming packets classified to the specified CoS
- * queue ID will be arriving on this VNIC. Packet priority to CoS mapping
- * rules can be specified using HWRM_QUEUE_PRI2COS_CFG. In this mode,
- * ntuple filters with VNIC destination specified are invalid since they
- * conflict with the CoS to VNIC steering rules in this mode.
+ * When specified, only incoming packets classified to the specified
+ * CoS queue ID will be arriving on this VNIC. Packet priority to CoS
+ * mapping rules can be specified using HWRM_QUEUE_PRI2COS_CFG. In this
+ * mode, ntuple filters with VNIC destination specified are invalid
+ * since they conflict with the CoS to VNIC steering rules in this
+ * mode.
*
* If this field is not specified, packet to VNIC steering will be
* subject to the standard L2 filter rules and any additional ntuple
@@ -35311,7 +40694,7 @@ typedef struct hwrm_vnic_cfg_input {
* the number of header groups in the delivered packet with a valid
* L4 checksum are reported. Valid checksums are counted from the
* outermost header group to the innermost header group, stopping at
- * the first error. This is the default checksum mode supported if
+ * the first error. This is the default checksum mode supported if
* the driver doesn't explicitly configure the RX checksum mode.
*/
#define HWRM_VNIC_CFG_INPUT_RX_CSUM_V2_MODE_DEFAULT UINT32_C(0x0)
@@ -35377,9 +40760,9 @@ typedef struct hwrm_vnic_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -35448,12 +40831,12 @@ typedef struct hwrm_vnic_qcfg_output {
/* Default Completion ring for the VNIC. */
uint16_t dflt_ring_grp;
/*
- * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if
* there is no RSS rule.
*/
uint16_t rss_rule;
/*
- * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
+ * RSS ID for COS rule/table structure. 0xFF... (All Fs) if
* there is no COS rule.
*/
uint16_t cos_rule;
@@ -35533,9 +40916,9 @@ typedef struct hwrm_vnic_qcfg_output {
/* When this bit is '1' it indicates port cos_mapping_mode enabled. */
#define HWRM_VNIC_QCFG_OUTPUT_FLAGS_PORTCOS_MAPPING_MODE UINT32_C(0x100)
/*
- * When returned with a valid CoS Queue id, the CoS Queue/VNIC association
- * is valid. Otherwise it will return 0xFFFF to indicate no VNIC/CoS
- * queue association.
+ * When returned with a valid CoS Queue id, the CoS Queue/VNIC
+ * association is valid. Otherwise it will return 0xFFFF to indicate no
+ * VNIC/CoS queue association.
*/
uint16_t queue_id;
/*
@@ -35553,7 +40936,7 @@ typedef struct hwrm_vnic_qcfg_output {
#define HWRM_VNIC_QCFG_OUTPUT_RX_CSUM_V2_MODE_DEFAULT UINT32_C(0x0)
/*
* This value indicates that the VNIC is configured to use the RX
- * checksum ‘all_ok’ mode for all the rings associated with this
+ * checksum 'all_ok' mode for all the rings associated with this
* VNIC.
*/
#define HWRM_VNIC_QCFG_OUTPUT_RX_CSUM_V2_MODE_ALL_OK UINT32_C(0x1)
@@ -35586,12 +40969,33 @@ typedef struct hwrm_vnic_qcfg_output {
*/
#define HWRM_VNIC_QCFG_OUTPUT_L2_CQE_MODE_MIXED UINT32_C(0x2)
#define HWRM_VNIC_QCFG_OUTPUT_L2_CQE_MODE_LAST HWRM_VNIC_QCFG_OUTPUT_L2_CQE_MODE_MIXED
- uint8_t unused_1[3];
+ /*
+ * This field conveys the metadata format type that has been
+ * configured. This value is product specific. Refer to the L2 HSI
+ * completion ring structures for the detailed descriptions. For Thor
+ * and Thor2, it corresponds to 'meta_format' in 'rx_pkt_cmpl_hi' and
+ * 'rx_pkt_v3_cmpl_hi', respectively.
+ */
+ uint8_t metadata_format_type;
+ #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_0 UINT32_C(0x0)
+ #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_1 UINT32_C(0x1)
+ #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_2 UINT32_C(0x2)
+ #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_3 UINT32_C(0x3)
+ #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_4 UINT32_C(0x4)
+ #define HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_LAST HWRM_VNIC_QCFG_OUTPUT_METADATA_FORMAT_TYPE_4
+ /* This field conveys the VNIC operation state. */
+ uint8_t vnic_state;
+ /* Normal operation state. */
+ #define HWRM_VNIC_QCFG_OUTPUT_VNIC_STATE_NORMAL UINT32_C(0x0)
+ /* Drop all packets. */
+ #define HWRM_VNIC_QCFG_OUTPUT_VNIC_STATE_DROP UINT32_C(0x1)
+ #define HWRM_VNIC_QCFG_OUTPUT_VNIC_STATE_LAST HWRM_VNIC_QCFG_OUTPUT_VNIC_STATE_DROP
+ uint8_t unused_1;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -35710,10 +41114,10 @@ typedef struct hwrm_vnic_qcaps_output {
/*
* When this bit is '1', it indicates that firmware supports the
* ability to steer incoming packets from one CoS queue to one
- * VNIC. This optional feature can then be enabled
- * using HWRM_VNIC_CFG on any VNIC. This feature is only
- * available when NVM option “enable_cos_classification” is set
- * to 1. If set to '0', firmware does not support this feature.
+ * VNIC. This optional feature can then be enabled
+ * using HWRM_VNIC_CFG on any VNIC. This feature is only
+ * available when NVM option 'enable_cos_classification' is set
+ * to 1. If set to '0', firmware does not support this feature.
*/
#define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_COS_ASSIGNMENT_CAP UINT32_C(0x100)
/*
@@ -35771,8 +41175,8 @@ typedef struct hwrm_vnic_qcaps_output {
#define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RING_SELECT_MODE_TOEPLITZ_CAP UINT32_C(0x8000)
/*
* When this bit is '1', it indicates that HW is capable of using
- * XOR algorithm. This mode uses XOR algorithm to hash the packets
- * according to the configured hash type and hash mode. The XOR
+ * XOR algorithm. This mode uses 'XOR' algorithm to hash the packets
+ * according to the configured hash type and hash mode. The XOR
* hash results and the provided XOR RSS indirection table are
* used to determine the RSS rings. Host drivers provided hash key
* is not honored in this mode.
@@ -35785,7 +41189,7 @@ typedef struct hwrm_vnic_qcaps_output {
* algorithm to calculate the hash to convey it in the RX
* completions. Host drivers should provide Toeplitz hash key.
* As HW uses innermost packets checksum to distribute the packets
- * across the rings, host drivers can't convey hash mode to choose
+ * across the rings, host drivers can't convey hash mode to choose
* outer headers to calculate Toeplitz hash. FW will fail such
* configuration.
*/
@@ -35848,6 +41252,8 @@ typedef struct hwrm_vnic_qcaps_output {
#define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_RSS_PROF_TCAM_MODE_ENABLED UINT32_C(0x8000000)
/* When this bit is '1' FW supports VNIC hash mode. */
#define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_VNIC_RSS_HASH_MODE_CAP UINT32_C(0x10000000)
+ /* When this bit is set to '1', hardware supports tunnel TPA. */
+ #define HWRM_VNIC_QCAPS_OUTPUT_FLAGS_HW_TUNNEL_TPA_CAP UINT32_C(0x20000000)
/*
* This field advertises the maximum concurrent TPA aggregations
* supported by the VNIC on new devices that support TPA v2 or v3.
@@ -35857,9 +41263,9 @@ typedef struct hwrm_vnic_qcaps_output {
uint8_t unused_1[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -35870,7 +41276,7 @@ typedef struct hwrm_vnic_qcaps_output {
*********************/
-/* hwrm_vnic_tpa_cfg_input (size:320b/40B) */
+/* hwrm_vnic_tpa_cfg_input (size:384b/48B) */
typedef struct hwrm_vnic_tpa_cfg_input {
/* The HWRM command request type. */
@@ -35982,8 +41388,13 @@ typedef struct hwrm_vnic_tpa_cfg_input {
* configured.
*/
#define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_TIMER UINT32_C(0x4)
- /* deprecated bit. Do not use!!! */
+ /* deprecated bit. Do not use!!! */
#define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the tnl_tpa_en_bitmap field to be
+ * configured.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_ENABLES_TNL_TPA_EN UINT32_C(0x10)
/* Logical vnic ID */
uint16_t vnic_id;
/*
@@ -36034,10 +41445,104 @@ typedef struct hwrm_vnic_tpa_cfg_input {
/*
* This is the minimum amount of payload length required to
* start an aggregation context. This field is deprecated and
- * should be set to 0. The minimum length is set by firmware
+ * should be set to 0. The minimum length is set by firmware
* and can be queried using hwrm_vnic_tpa_qcfg.
*/
uint32_t min_agg_len;
+ /*
+ * If the device supports hardware tunnel TPA feature, as indicated by
+ * the HWRM_VNIC_QCAPS command, this field is used to configure the
+ * tunnel types to be enabled. Each bit corresponds to a specific
+ * tunnel type. If a bit is set to '1', then the associated tunnel
+ * type is enabled; otherwise, it is disabled.
+ */
+ uint32_t tnl_tpa_en_bitmap;
+ /*
+ * When this bit is '1', enable VXLAN encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', enable GENEVE encapsulated packets
+ * for aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GENEVE UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', enable NVGRE encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_NVGRE UINT32_C(0x4)
+ /*
+ * When this bit is set to '1', enable GRE encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GRE UINT32_C(0x8)
+ /*
+ * When this bit is set to '1', enable IPV4 encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_IPV4 UINT32_C(0x10)
+ /*
+ * When this bit is set to '1', enable IPV6 encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_IPV6 UINT32_C(0x20)
+ /*
+ * When this bit is '1', enable VXLAN_GPE encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN_GPE UINT32_C(0x40)
+ /*
+ * When this bit is '1', enable VXLAN_CUSTOMER1 encapsulated packets
+ * for aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_VXLAN_CUST1 UINT32_C(0x80)
+ /*
+ * When this bit is '1', enable GRE_CUSTOMER1 encapsulated packets
+ * for aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_GRE_CUST1 UINT32_C(0x100)
+ /*
+ * When this bit is '1', enable UPAR1 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR1 UINT32_C(0x200)
+ /*
+ * When this bit is '1', enable UPAR2 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR2 UINT32_C(0x400)
+ /*
+ * When this bit is '1', enable UPAR3 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR3 UINT32_C(0x800)
+ /*
+ * When this bit is '1', enable UPAR4 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR4 UINT32_C(0x1000)
+ /*
+ * When this bit is '1', enable UPAR5 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR5 UINT32_C(0x2000)
+ /*
+ * When this bit is '1', enable UPAR6 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR6 UINT32_C(0x4000)
+ /*
+ * When this bit is '1', enable UPAR7 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR7 UINT32_C(0x8000)
+ /*
+ * When this bit is '1', enable UPAR8 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_CFG_INPUT_TNL_TPA_EN_BITMAP_UPAR8 UINT32_C(0x10000)
+ uint8_t unused_1[4];
} hwrm_vnic_tpa_cfg_input_t, *phwrm_vnic_tpa_cfg_input_t;
/* hwrm_vnic_tpa_cfg_output (size:128b/16B) */
@@ -36054,9 +41559,9 @@ typedef struct hwrm_vnic_tpa_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -36215,12 +41720,105 @@ typedef struct hwrm_vnic_tpa_qcfg_output {
* start an aggregation context.
*/
uint32_t min_agg_len;
- uint8_t unused_0[7];
+ /*
+ * If the device supports hardware tunnel TPA feature, as indicated by
+ * the HWRM_VNIC_QCAPS command, this field conveys the bitmap of the
+ * tunnel types that have been configured. Each bit corresponds to a
+ * specific tunnel type. If a bit is set to '1', then the associated
+ * tunnel type is enabled; otherwise, it is disabled.
+ */
+ uint32_t tnl_tpa_en_bitmap;
+ /*
+ * When this bit is '1', enable VXLAN encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_VXLAN UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', enable GENEVE encapsulated packets
+ * for aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_GENEVE UINT32_C(0x2)
+ /*
+ * When this bit is set to '1', enable NVGRE encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_NVGRE UINT32_C(0x4)
+ /*
+ * When this bit is set to '1', enable GRE encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_GRE UINT32_C(0x8)
+ /*
+ * When this bit is set to '1', enable IPV4 encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_IPV4 UINT32_C(0x10)
+ /*
+ * When this bit is set to '1', enable IPV6 encapsulated packets
+ * for aggregation..
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_IPV6 UINT32_C(0x20)
+ /*
+ * When this bit is '1', enable VXLAN_GPE encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_VXLAN_GPE UINT32_C(0x40)
+ /*
+ * When this bit is '1', enable VXLAN_CUSTOMER1 encapsulated packets
+ * for aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_VXLAN_CUST1 UINT32_C(0x80)
+ /*
+ * When this bit is '1', enable GRE_CUSTOMER1 encapsulated packets
+ * for aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_GRE_CUST1 UINT32_C(0x100)
+ /*
+ * When this bit is '1', enable UPAR1 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR1 UINT32_C(0x200)
+ /*
+ * When this bit is '1', enable UPAR2 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR2 UINT32_C(0x400)
+ /*
+ * When this bit is '1', enable UPAR3 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR3 UINT32_C(0x800)
+ /*
+ * When this bit is '1', enable UPAR4 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR4 UINT32_C(0x1000)
+ /*
+ * When this bit is '1', enable UPAR5 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR5 UINT32_C(0x2000)
+ /*
+ * When this bit is '1', enable UPAR6 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR6 UINT32_C(0x4000)
+ /*
+ * When this bit is '1', enable UPAR7 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR7 UINT32_C(0x8000)
+ /*
+ * When this bit is '1', enable UPAR8 encapsulated packets for
+ * aggregation.
+ */
+ #define HWRM_VNIC_TPA_QCFG_OUTPUT_TNL_TPA_EN_BITMAP_UPAR8 UINT32_C(0x10000)
+ uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -36413,7 +42011,7 @@ typedef struct hwrm_vnic_rss_cfg_input {
* specified headers. It is an error to set this flag concurrently
* with hash_type_exclude.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_FLAGS_HASH_TYPE_INCLUDE UINT32_C(0x1)
+ #define HWRM_VNIC_RSS_CFG_INPUT_FLAGS_HASH_TYPE_INCLUDE UINT32_C(0x1)
/*
* When this bit is '1', it indicates that the hash_type field is
* interpreted as a change relative the current configuration. Each
@@ -36425,7 +42023,12 @@ typedef struct hwrm_vnic_rss_cfg_input {
* remove the specified headers. It is an error to set this flag
* concurrently with hash_type_include.
*/
- #define HWRM_VNIC_RSS_CFG_INPUT_FLAGS_HASH_TYPE_EXCLUDE UINT32_C(0x2)
+ #define HWRM_VNIC_RSS_CFG_INPUT_FLAGS_HASH_TYPE_EXCLUDE UINT32_C(0x2)
+ /*
+ * When this bit is '1', it indicates that the support of setting
+ * ipsec hash_types by the host drivers.
+ */
+ #define HWRM_VNIC_RSS_CFG_INPUT_FLAGS_IPSEC_HASH_TYPE_CFG_SUPPORT UINT32_C(0x4)
uint8_t ring_select_mode;
/*
* In this mode, HW uses Toeplitz algorithm and provided Toeplitz
@@ -36471,9 +42074,9 @@ typedef struct hwrm_vnic_rss_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -36714,9 +42317,9 @@ typedef struct hwrm_vnic_rss_qcfg_output {
uint8_t unused_1[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -36896,7 +42499,7 @@ typedef struct hwrm_vnic_plcmodes_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -37041,7 +42644,7 @@ typedef struct hwrm_vnic_plcmodes_qcfg_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -37104,9 +42707,9 @@ typedef struct hwrm_vnic_rss_cos_lb_ctx_alloc_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -37167,9 +42770,9 @@ typedef struct hwrm_vnic_rss_cos_lb_ctx_free_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -37252,6 +42855,11 @@ typedef struct hwrm_ring_alloc_input {
* configured.
*/
#define HWRM_RING_ALLOC_INPUT_ENABLES_MPC_CHNLS_TYPE UINT32_C(0x400)
+ /*
+ * This bit must be '1' for the steering_tag field to be
+ * configured.
+ */
+ #define HWRM_RING_ALLOC_INPUT_ENABLES_STEERING_TAG_VALID UINT32_C(0x800)
/* Ring Type. */
uint8_t ring_type;
/* L2 Completion Ring (CR) */
@@ -37388,7 +42996,7 @@ typedef struct hwrm_ring_alloc_input {
/* Used by a PF driver to associate a SCHQ with one of its TX rings. */
uint16_t schq_id;
/*
- * Number of 16B units in the ring. Minimum size for
+ * Number of 16B units in the ring. Minimum size for
* a ring is 16 16B entries.
*/
uint32_t length;
@@ -37465,7 +43073,8 @@ typedef struct hwrm_ring_alloc_input {
*/
#define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_MASK UINT32_C(0xff00)
#define HWRM_RING_ALLOC_INPUT_RING_ARB_CFG_ARB_POLICY_PARAM_SFT 8
- uint16_t unused_3;
+ /* Steering tag to use for memory transactions. */
+ uint16_t steering_tag;
/*
* This field is reserved for the future use.
* It shall be set to 0.
@@ -37601,9 +43210,9 @@ typedef struct hwrm_ring_alloc_output {
uint8_t unused_0[2];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -37706,9 +43315,9 @@ typedef struct hwrm_ring_free_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -37761,7 +43370,7 @@ typedef struct hwrm_ring_reset_input {
/* RoCE Notification Completion Ring (ROCE_CR) */
#define HWRM_RING_RESET_INPUT_RING_TYPE_ROCE_CMPL UINT32_C(0x3)
/*
- * Rx Ring Group. This is to reset rx and aggregation in an atomic
+ * Rx Ring Group. This is to reset rx and aggregation in an atomic
* operation. Completion ring associated with this ring group is
* not reset.
*/
@@ -37802,9 +43411,9 @@ typedef struct hwrm_ring_reset_output {
uint8_t consumer_idx[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -37957,7 +43566,7 @@ typedef struct hwrm_ring_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -38097,7 +43706,7 @@ typedef struct hwrm_ring_qcfg_output {
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -38182,8 +43791,8 @@ typedef struct hwrm_ring_aggint_qcaps_output {
*/
#define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_NUM_CMPL_DMA_AGGR UINT32_C(0x10)
/*
- * When this bit is set to '1', num_cmpl_dma_aggr_during_int can be configured
- * on completion rings.
+ * When this bit is set to '1', num_cmpl_dma_aggr_during_int can be
+ * configured on completion rings.
*/
#define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT UINT32_C(0x20)
/*
@@ -38192,8 +43801,8 @@ typedef struct hwrm_ring_aggint_qcaps_output {
*/
#define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_CMPL_AGGR_DMA_TMR UINT32_C(0x40)
/*
- * When this bit is set to '1', cmpl_aggr_dma_tmr_during_int can be configured
- * on completion rings.
+ * When this bit is set to '1', cmpl_aggr_dma_tmr_during_int can be
+ * configured on completion rings.
*/
#define HWRM_RING_AGGINT_QCAPS_OUTPUT_CMPL_PARAMS_CMPL_AGGR_DMA_TMR_DURING_INT UINT32_C(0x80)
/*
@@ -38240,9 +43849,9 @@ typedef struct hwrm_ring_aggint_qcaps_output {
uint8_t unused_0[1];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -38356,9 +43965,9 @@ typedef struct hwrm_ring_cmpl_ring_qaggint_params_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -38465,8 +44074,8 @@ typedef struct hwrm_ring_cmpl_ring_cfg_aggint_params_input {
*/
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR UINT32_C(0x1)
/*
- * This bit must be '1' for the num_cmpl_dma_aggr_during_int field to be
- * configured.
+ * This bit must be '1' for the num_cmpl_dma_aggr_during_int field to
+ * be configured.
*/
#define HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS_INPUT_ENABLES_NUM_CMPL_DMA_AGGR_DURING_INT UINT32_C(0x2)
/*
@@ -38506,9 +44115,9 @@ typedef struct hwrm_ring_cmpl_ring_cfg_aggint_params_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -38562,7 +44171,7 @@ typedef struct hwrm_ring_grp_alloc_input {
uint16_t rr;
/*
* This value identifies the aggregation RR associated with
- * the ring group. If this value is 0xFF... (All Fs), then no
+ * the ring group. If this value is 0xFF... (All Fs), then no
* Aggregation ring will be set.
*/
uint16_t ar;
@@ -38585,7 +44194,7 @@ typedef struct hwrm_ring_grp_alloc_output {
/* The length of the response data in number of bytes. */
uint16_t resp_len;
/*
- * This is the ring group ID value. Use this value to program
+ * This is the ring group ID value. Use this value to program
* the default ring group for the VNIC or as table entries
* in an RSS/COS context.
*/
@@ -38593,9 +44202,9 @@ typedef struct hwrm_ring_grp_alloc_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -38656,9 +44265,9 @@ typedef struct hwrm_ring_grp_free_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -39128,9 +44737,9 @@ typedef struct hwrm_ring_schq_alloc_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -39187,8 +44796,8 @@ typedef struct hwrm_ring_schq_cfg_input {
uint32_t flags;
/* The tc_max_bw array and the max_bw parameters are valid */
#define HWRM_RING_SCHQ_CFG_INPUT_FLAGS_TC_MAX_BW_ENABLED UINT32_C(0x1)
- /* The tc_min_bw array is valid */
- #define HWRM_RING_SCHQ_CFG_INPUT_FLAGS_TC_MIN_BW_ENABLED UINT32_C(0x2)
+ /* The tc_bw_reservation array is valid */
+ #define HWRM_RING_SCHQ_CFG_INPUT_FLAGS_TC_RESERVATION_ENABLED UINT32_C(0x2)
/* Maximum bandwidth of the traffic class, specified in Mbps. */
uint32_t max_bw_tc0;
/* Maximum bandwidth of the traffic class, specified in Mbps. */
@@ -39206,61 +44815,61 @@ typedef struct hwrm_ring_schq_cfg_input {
/* Maximum bandwidth of the traffic class, specified in Mbps. */
uint32_t max_bw_tc7;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc0;
+ uint32_t tc_bw_reservation0;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc1;
+ uint32_t tc_bw_reservation1;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc2;
+ uint32_t tc_bw_reservation2;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc3;
+ uint32_t tc_bw_reservation3;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc4;
+ uint32_t tc_bw_reservation4;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc5;
+ uint32_t tc_bw_reservation5;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc6;
+ uint32_t tc_bw_reservation6;
/*
- * Bandwidth reservation for the traffic class, specified in Mbps.
+ * Bandwidth reservation for the traffic class, specified in percent.
* A value of zero signifies that traffic belonging to this class
* shares the bandwidth reservation for the same traffic class of
* the default SCHQ.
*/
- uint32_t min_bw_tc7;
+ uint32_t tc_bw_reservation7;
/*
* Indicates the max bandwidth for all enabled traffic classes in
* this SCHQ, specified in Mbps.
@@ -39283,9 +44892,9 @@ typedef struct hwrm_ring_schq_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -39346,9 +44955,9 @@ typedef struct hwrm_ring_schq_free_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -39874,7 +45483,7 @@ typedef struct hwrm_cfa_l2_filter_free_output {
**************************/
-/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
+/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */
typedef struct hwrm_cfa_l2_filter_cfg_input {
/* The HWRM command request type. */
@@ -39916,7 +45525,7 @@ typedef struct hwrm_cfa_l2_filter_cfg_input {
#define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_TX UINT32_C(0x0)
/* rx path */
#define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX UINT32_C(0x1)
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_LAST HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_PATH_RX
/*
* Setting of this flag indicates drop action. If this flag is not
* set, then it should be considered accept action.
@@ -39929,12 +45538,30 @@ typedef struct hwrm_cfa_l2_filter_cfg_input {
#define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_MASK UINT32_C(0xc)
#define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_SFT 2
/* To support old drivers */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_NO_ROCE_L2 (UINT32_C(0x0) << 2)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_NO_ROCE_L2 (UINT32_C(0x0) << 2)
/* Only L2 traffic */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_L2 (UINT32_C(0x1) << 2)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_L2 (UINT32_C(0x1) << 2)
/* Roce & L2 traffic */
- #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_ROCE (UINT32_C(0x2) << 2)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_ROCE (UINT32_C(0x2) << 2)
#define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_LAST HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_TRAFFIC_ROCE
+ /*
+ * Enumeration denoting how the L2 Context TCAM remap operation is
+ * updated.
+ */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_MASK UINT32_C(0x30)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_SFT 4
+ /* No change to remap opcode */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_NO_UPDATE (UINT32_C(0x0) << 4)
+ /* Bypass CFA Lookup */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_BYPASS_LKUP (UINT32_C(0x1) << 4)
+ /* Enable CFA Lookup */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_ENABLE_LKUP (UINT32_C(0x2) << 4)
+ /*
+ * Restore the remap opcode originally programmed by firmware flow
+ * manager
+ */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_RESTORE_FW_OP (UINT32_C(0x3) << 4)
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_LAST HWRM_CFA_L2_FILTER_CFG_INPUT_FLAGS_REMAP_OP_RESTORE_FW_OP
uint32_t enables;
/*
* This bit must be '1' for the dst_id field to be
@@ -39947,6 +45574,16 @@ typedef struct hwrm_cfa_l2_filter_cfg_input {
*/
#define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_NEW_MIRROR_VNIC_ID UINT32_C(0x2)
/*
+ * This bit must be '1' for the prof_func field to be configured in
+ * the remap entry.
+ */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_PROF_FUNC UINT32_C(0x4)
+ /*
+ * This bit must be '1' for the l2_context_id field to be configured
+ * in the remap entry.
+ */
+ #define HWRM_CFA_L2_FILTER_CFG_INPUT_ENABLES_L2_CONTEXT_ID UINT32_C(0x8)
+ /*
* This value identifies a set of CFA data structures used for an L2
* context.
*/
@@ -39963,6 +45600,22 @@ typedef struct hwrm_cfa_l2_filter_cfg_input {
* mirrored.
*/
uint32_t new_mirror_vnic_id;
+ /*
+ * Profile function value to be programmed into the L2 context entry's
+ * remap. This will be used by the host application to program the CFA
+ * Profile TCAM entry for further classification. A value of 0xFFFFFFFF
+ * indicates that the profile function should be restored to the value
+ * originally programmed by the firmware flow manager.
+ */
+ uint32_t prof_func;
+ /*
+ * L2 context ID value to be programmed into the L2 context entry's
+ * remap. This will be used by the host application to program the CFA
+ * Lookup entry for further classification. A value of 0xFFFFFFFF
+ * indicates that the profile function should be restored to the value
+ * originally programmed by the firmware flow manager.
+ */
+ uint32_t l2_context_id;
} hwrm_cfa_l2_filter_cfg_input_t, *phwrm_cfa_l2_filter_cfg_input_t;
/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
@@ -41358,13 +47011,16 @@ typedef struct hwrm_cfa_ntuple_filter_alloc_input {
/*
* Setting of this flag indicates that the dst_id field contains RFS
* ring table index. If this is not set it indicates dst_id is VNIC
- * or VPORT or function ID. Note dest_fid and dest_rfs_ring_idx
- * can’t be set at the same time.
+ * or VPORT or function ID. Note dest_fid and dest_rfs_ring_idx
+ * can't be set at the same time. Updated drivers should pass ring
+ * idx in the rfs_ring_tbl_idx field if the firmware indicates
+ * support for the new field in the HWRM_CFA_ADV_FLOW_MGMT_QCAPS
+ * response.
*/
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DEST_RFS_RING_IDX UINT32_C(0x20)
/*
* Setting of this flag indicates that when the ntuple filter is
- * created, the L2 context should not be used in the filter. This
+ * created, the L2 context should not be used in the filter. This
* allows packet from different L2 contexts to match and be directed
* to the same destination.
*/
@@ -41455,17 +47111,17 @@ typedef struct hwrm_cfa_ntuple_filter_alloc_input {
* configured.
*/
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID UINT32_C(0x10000)
- /*
- * This bit must be '1' for the mirror_vnic_id field to be
- * configured.
- */
+ /* This flag is deprecated. */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID UINT32_C(0x20000)
/*
* This bit must be '1' for the dst_macaddr field to be
* configured.
*/
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR UINT32_C(0x40000)
- /* This flag is deprecated. */
+ /*
+ * This bit must be '1' for the rfs_ring_tbl_idx field to
+ * be configured.
+ */
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_RFS_RING_TBL_IDX UINT32_C(0x80000)
/*
* This value identifies a set of CFA data structures used for an L2
@@ -41494,7 +47150,7 @@ typedef struct hwrm_cfa_ntuple_filter_alloc_input {
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6)
#define HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
/*
- * The value of protocol filed in IP header.
+ * The value of protocol field in IP header.
* Applies to UDP and TCP traffic.
* 6 - TCP
* 17 - UDP
@@ -41524,10 +47180,12 @@ typedef struct hwrm_cfa_ntuple_filter_alloc_input {
*/
uint16_t dst_id;
/*
- * Logical VNIC ID of the VNIC where traffic is
- * mirrored.
+ * If set, this value shall represent the ring table
+ * index for receive flow steering. Note that this offset
+ * was formerly used for the mirror_vnic_id field, which
+ * is no longer supported.
*/
- uint16_t mirror_vnic_id;
+ uint16_t rfs_ring_tbl_idx;
/*
* This value indicates the tunnel type for this filter.
* If this field is not specified, then the filter shall
@@ -41837,13 +47495,13 @@ typedef struct hwrm_cfa_ntuple_filter_cfg_input {
/*
* Setting of this flag indicates that the new_dst_id field contains
* RFS ring table index. If this is not set it indicates new_dst_id
- * is VNIC or VPORT or function ID. Note dest_fid and
- * dest_rfs_ring_idx can’t be set at the same time.
+ * is VNIC or VPORT or function ID. Note dest_fid and
+ * dest_rfs_ring_idx can't be set at the same time.
*/
#define HWRM_CFA_NTUPLE_FILTER_CFG_INPUT_FLAGS_DEST_RFS_RING_IDX UINT32_C(0x2)
/*
* Setting of this flag indicates that when the ntuple filter is
- * created, the L2 context should not be used in the filter. This
+ * created, the L2 context should not be used in the filter. This
* allows packet from different L2 contexts to match and be directed
* to the same destination.
*/
@@ -42168,7 +47826,7 @@ typedef struct hwrm_cfa_em_flow_alloc_input {
#define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6)
#define HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_LAST HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
/*
- * The value of protocol filed in IP header.
+ * The value of protocol field in IP header.
* Applies to UDP and TCP traffic.
* 6 - TCP
* 17 - UDP
@@ -43548,7 +49206,7 @@ typedef struct hwrm_cfa_decap_filter_alloc_input {
#define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6)
#define HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST HWRM_CFA_DECAP_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
/*
- * The value of protocol filed in IP header.
+ * The value of protocol field in IP header.
* Applies to UDP and TCP traffic.
* 6 - TCP
* 17 - UDP
@@ -44749,7 +50407,7 @@ typedef struct hwrm_cfa_flow_stats_output {
* If a flow has been hit, the bit representing the flow will be 1.
* Likewise, if a flow has not, the bit representing the flow
* will be 0. Mapping will match flow numbers where bitX is for flowX
- * (ex: bit 0 is flow0). This only applies for NIC flows. Upon
+ * (ex: bit 0 is flow0). This only applies for NIC flows. Upon
* reading of the flow, the bit will be cleared for the flow and only
* set again when traffic is received by the flow.
*/
@@ -46954,7 +52612,7 @@ typedef struct hwrm_cfa_eem_qcaps_output {
/*
* When set to 1, indicates the FW supports the Centralized
* Memory Model. The concept designates one entity for the
- * memory allocation while all others ‘subscribe’ to it.
+ * memory allocation while all others 'subscribe' to it.
*/
#define HWRM_CFA_EEM_QCAPS_OUTPUT_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED UINT32_C(0x4)
/*
@@ -47490,10 +53148,16 @@ typedef struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NIC_FLOW_STATS_SUPPORTED UINT32_C(0x80000)
/*
* If set to 1, firmware is capable of supporting these additional
- * ip_protoccols: ICMP, ICMPV6, RSVD for ntuple rules. By default,
+ * ip_protocols: ICMP, ICMPV6, RSVD for ntuple rules. By default,
* this flag should be 0 for older version of firmware.
*/
#define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_NTUPLE_FLOW_RX_EXT_IP_PROTO_SUPPORTED UINT32_C(0x100000)
+ /*
+ * Value of 1 to indicate that firmware supports setting of
+ * rfs_ring_tbl_idx (new offset) in HWRM_CFA_NTUPLE_ALLOC command.
+ * Value of 0 indicates ring tbl idx should be passed using dst_id.
+ */
+ #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS_OUTPUT_FLAGS_RFS_RING_TBL_IDX_V3_SUPPORTED UINT32_C(0x200000)
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
@@ -47629,7 +53293,7 @@ typedef struct hwrm_cfa_lag_group_member_rgtr_input {
/*
* Transmit based on packet header ntuple hash. Packet with only
* layer 2 headers will hash using the destination MAC, source MAC
- * and Ethertype fields. Packets with layer 3 (IP) headers will
+ * and Ethertype fields. Packets with layer 3 (IP) headers will
* hash using the destination MAC, source MAC, IP protocol/next
* header, source IP address and destination IP address. Packets
* with layer 4 (TCP/UDP) headers will hash using the destination
@@ -47744,7 +53408,7 @@ typedef struct hwrm_cfa_lag_group_member_unrgtr_output {
*****************************/
-/* hwrm_cfa_tls_filter_alloc_input (size:704b/88B) */
+/* hwrm_cfa_tls_filter_alloc_input (size:768b/96B) */
typedef struct hwrm_cfa_tls_filter_alloc_input {
/* The HWRM command request type. */
@@ -47781,47 +53445,47 @@ typedef struct hwrm_cfa_tls_filter_alloc_input {
* This bit must be '1' for the l2_filter_id field to be
* configured.
*/
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID UINT32_C(0x1)
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID UINT32_C(0x1)
/*
* This bit must be '1' for the ethertype field to be
* configured.
*/
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE UINT32_C(0x2)
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE UINT32_C(0x2)
/*
* This bit must be '1' for the ipaddr_type field to be
* configured.
*/
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE UINT32_C(0x4)
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE UINT32_C(0x4)
/*
* This bit must be '1' for the src_ipaddr field to be
* configured.
*/
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR UINT32_C(0x8)
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR UINT32_C(0x8)
/*
* This bit must be '1' for the dst_ipaddr field to be
* configured.
*/
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR UINT32_C(0x10)
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR UINT32_C(0x10)
/*
* This bit must be '1' for the ip_protocol field to be
* configured.
*/
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL UINT32_C(0x20)
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL UINT32_C(0x20)
/*
* This bit must be '1' for the src_port field to be
* configured.
*/
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT UINT32_C(0x40)
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT UINT32_C(0x40)
/*
* This bit must be '1' for the dst_port field to be
* configured.
*/
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_DST_PORT UINT32_C(0x80)
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_DST_PORT UINT32_C(0x80)
/*
* This bit must be '1' for the kid field to be
* configured.
*/
- #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_KID UINT32_C(0x100)
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_KID UINT32_C(0x100)
/*
* This bit must be '1' for the dst_id field to be
* configured.
@@ -47833,6 +53497,11 @@ typedef struct hwrm_cfa_tls_filter_alloc_input {
*/
#define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID UINT32_C(0x400)
/*
+ * This bit must be '1' for the quic_dst_connect_id field to be
+ * configured.
+ */
+ #define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_ENABLES_QUIC_DST_CONNECT_ID UINT32_C(0x800)
+ /*
* This value identifies a set of CFA data structures used for an L2
* context.
*/
@@ -47855,7 +53524,7 @@ typedef struct hwrm_cfa_tls_filter_alloc_input {
#define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 UINT32_C(0x6)
#define HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_LAST HWRM_CFA_TLS_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV6
/*
- * The value of protocol filed in IP header.
+ * The value of protocol field in IP header.
* Applies to UDP and TCP traffic.
* 6 - TCP
* 17 - UDP
@@ -47902,10 +53571,12 @@ typedef struct hwrm_cfa_tls_filter_alloc_input {
*/
uint16_t dst_port;
/*
- * The Key Context Identifier (KID) for use with KTLS.
+ * The Key Context Identifier (KID) for use with KTLS or QUIC.
* KID is limited to 20-bits.
*/
uint32_t kid;
+ /* The Destination Connection ID of QUIC. */
+ uint64_t quic_dst_connect_id;
} hwrm_cfa_tls_filter_alloc_input_t, *phwrm_cfa_tls_filter_alloc_input_t;
/* hwrm_cfa_tls_filter_alloc_output (size:192b/24B) */
@@ -48027,6 +53698,93 @@ typedef struct hwrm_cfa_tls_filter_free_output {
uint8_t valid;
} hwrm_cfa_tls_filter_free_output_t, *phwrm_cfa_tls_filter_free_output_t;
+/*****************************
+ * hwrm_cfa_release_afm_func *
+ *****************************/
+
+
+/* hwrm_cfa_release_afm_func_input (size:256b/32B) */
+
+typedef struct hwrm_cfa_release_afm_func_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Function identifier, may be of type efid, rfid or dfid. */
+ uint16_t fid;
+ /* Representor function identifier. */
+ uint16_t rfid;
+ /* Fid type. */
+ uint8_t type;
+ /* Endpoint fid. */
+ #define HWRM_CFA_RELEASE_AFM_FUNC_INPUT_TYPE_EFID UINT32_C(0x1)
+ /* Representor fid. */
+ #define HWRM_CFA_RELEASE_AFM_FUNC_INPUT_TYPE_RFID UINT32_C(0x2)
+ /* Redirect fid. */
+ #define HWRM_CFA_RELEASE_AFM_FUNC_INPUT_TYPE_DFID UINT32_C(0x3)
+ #define HWRM_CFA_RELEASE_AFM_FUNC_INPUT_TYPE_LAST HWRM_CFA_RELEASE_AFM_FUNC_INPUT_TYPE_DFID
+ uint8_t unused_0[3];
+ /*
+ * Flags used to control AFMs actions when releasing the function.
+ * Only used when type is dfid.
+ */
+ uint32_t flags;
+ /* Remove broadcast. */
+ #define HWRM_CFA_RELEASE_AFM_FUNC_INPUT_FLAGS_BC_REM UINT32_C(0x1)
+ /* Remove multicast. */
+ #define HWRM_CFA_RELEASE_AFM_FUNC_INPUT_FLAGS_MC_REM UINT32_C(0x2)
+ /* Remove promiscuous. */
+ #define HWRM_CFA_RELEASE_AFM_FUNC_INPUT_FLAGS_PROMISC_REM UINT32_C(0x4)
+ uint32_t unused_1;
+} hwrm_cfa_release_afm_func_input_t, *phwrm_cfa_release_afm_func_input_t;
+
+/* hwrm_cfa_release_afm_func_output (size:128b/16B) */
+
+typedef struct hwrm_cfa_release_afm_func_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} hwrm_cfa_release_afm_func_output_t, *phwrm_cfa_release_afm_func_output_t;
+
/***********
* hwrm_tf *
***********/
@@ -48098,7 +53856,7 @@ typedef struct hwrm_tf_output {
* This field is used in Output records to indicate that the
* output is completely written to RAM. This field should be
* read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
+ * completely written. When writing a command completion or
* response to an internal processor, the order of writes has
* to be such that this field is written last.
*/
@@ -48273,91 +54031,6 @@ typedef struct hwrm_tf_session_open_output {
uint8_t valid;
} hwrm_tf_session_open_output_t, *phwrm_tf_session_open_output_t;
-/**************************
- * hwrm_tf_session_attach *
- **************************/
-
-
-/* hwrm_tf_session_attach_input (size:704b/88B) */
-
-typedef struct hwrm_tf_session_attach_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /*
- * Unique session identifier for the session that the attach
- * request want to attach to. This value originates from the
- * shared session memory that the attach request opened by
- * way of the 'attach name' that was passed in to the core
- * attach API.
- * The fw_session_id of the attach session includes PCIe bus
- * info to distinguish the PF and session info to identify
- * the associated TruFlow session.
- */
- uint32_t attach_fw_session_id;
- /* unused. */
- uint32_t unused0;
- /* Name of the session it self. */
- uint8_t session_name[64];
-} hwrm_tf_session_attach_input_t, *phwrm_tf_session_attach_input_t;
-
-/* hwrm_tf_session_attach_output (size:128b/16B) */
-
-typedef struct hwrm_tf_session_attach_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /*
- * Unique session identifier for the session created by the
- * firmware. It includes PCIe bus info to distinguish the PF
- * and session info to identify the associated TruFlow
- * session. This fw_session_id is unique to the attach
- * request.
- */
- uint32_t fw_session_id;
- /* unused. */
- uint8_t unused0[3];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
- */
- uint8_t valid;
-} hwrm_tf_session_attach_output_t, *phwrm_tf_session_attach_output_t;
-
/****************************
* hwrm_tf_session_register *
****************************/
@@ -48898,91 +54571,6 @@ typedef struct hwrm_tf_session_resc_alloc_output {
uint8_t valid;
} hwrm_tf_session_resc_alloc_output_t, *phwrm_tf_session_resc_alloc_output_t;
-/*****************************
- * hwrm_tf_session_resc_free *
- *****************************/
-
-
-/* hwrm_tf_session_resc_free_input (size:256b/32B) */
-
-typedef struct hwrm_tf_session_resc_free_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
- /* Control flags. */
- uint16_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR_LAST HWRM_TF_SESSION_RESC_FREE_INPUT_FLAGS_DIR_TX
- /*
- * Defines the size, in bytes, of the provided free_addr
- * buffer.
- */
- uint16_t free_size;
- /*
- * This is the DMA address for the free input data array
- * buffer. Array is of tf_rm_resc_entry type. Size of the
- * buffer is provided by the 'free_size' field of this
- * message.
- */
- uint64_t free_addr;
-} hwrm_tf_session_resc_free_input_t, *phwrm_tf_session_resc_free_input_t;
-
-/* hwrm_tf_session_resc_free_output (size:128b/16B) */
-
-typedef struct hwrm_tf_session_resc_free_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
- /*
- * This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal
- * processor, the order of writes has to be such that this field is
- * written last.
- */
- uint8_t valid;
-} hwrm_tf_session_resc_free_output_t, *phwrm_tf_session_resc_free_output_t;
-
/******************************
* hwrm_tf_session_resc_flush *
******************************/
@@ -49037,7 +54625,7 @@ typedef struct hwrm_tf_session_resc_flush_input {
uint16_t flush_size;
/*
* This is the DMA address for the flush input data array
- * buffer. Array of tf_rm_resc_entry type. Size of the
+ * buffer. Array of tf_rm_resc_entry type. Size of the
* buffer is provided by the 'flush_size' field in this
* message.
*/
@@ -49192,14 +54780,14 @@ typedef struct tf_rm_resc_entry {
uint16_t stride;
} tf_rm_resc_entry_t, *ptf_rm_resc_entry_t;
-/************************
- * hwrm_tf_tbl_type_get *
- ************************/
+/**************************
+ * hwrm_tf_tbl_type_alloc *
+ **************************/
-/* hwrm_tf_tbl_type_get_input (size:256b/32B) */
+/* hwrm_tf_tbl_type_alloc_input (size:192b/24B) */
-typedef struct hwrm_tf_tbl_type_get_input {
+typedef struct hwrm_tf_tbl_type_alloc_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -49233,31 +54821,34 @@ typedef struct hwrm_tf_tbl_type_get_input {
/* Control flags. */
uint16_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_LAST HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_TX
- /*
- * When set use the special access register access to clear
- * the table entry on read.
- */
- #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_CLEAR_ON_READ UINT32_C(0x2)
- /* unused. */
- uint8_t unused0[2];
- /*
- * Type of the resource, defined globally in the
- * hwrm_tf_resc_type enum.
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_FLAGS_DIR_LAST HWRM_TF_TBL_TYPE_ALLOC_INPUT_FLAGS_DIR_TX
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_BLKTYPE_BLKTYPE_CFA UINT32_C(0x0)
+ /* RXP gparse block type */
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_BLKTYPE_BLKTYPE_RXP UINT32_C(0x1)
+ /* RE gparse block type */
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE UINT32_C(0x3)
+ #define HWRM_TF_TBL_TYPE_ALLOC_INPUT_BLKTYPE_LAST HWRM_TF_TBL_TYPE_ALLOC_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
+ /*
+ * This field is blktype specific. For any of the UPAR types it is
+ * set to a non-zero value in case of a re-alloc, specifies a
+ * tunnel-type of dynamic UPAR tunnel.
*/
- uint32_t type;
- /* Index of the type to retrieve. */
- uint32_t index;
-} hwrm_tf_tbl_type_get_input_t, *phwrm_tf_tbl_type_get_input_t;
+ uint8_t type;
+} hwrm_tf_tbl_type_alloc_input_t, *phwrm_tf_tbl_type_alloc_input_t;
-/* hwrm_tf_tbl_type_get_output (size:2240b/280B) */
+/* hwrm_tf_tbl_type_alloc_output (size:128b/16B) */
-typedef struct hwrm_tf_tbl_type_get_output {
+typedef struct hwrm_tf_tbl_type_alloc_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -49268,14 +54859,13 @@ typedef struct hwrm_tf_tbl_type_get_output {
uint16_t resp_len;
/* Response code. */
uint32_t resp_code;
- /* Response size. */
- uint16_t size;
- /* unused */
- uint16_t unused0;
- /* Response data. */
- uint8_t data[256];
+ /*
+ * Table entry allocated by the firmware using the
+ * parameters above.
+ */
+ uint16_t idx_tbl_id;
/* unused */
- uint8_t unused1[7];
+ uint8_t unused0;
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -49285,16 +54875,16 @@ typedef struct hwrm_tf_tbl_type_get_output {
* is written last.
*/
uint8_t valid;
-} hwrm_tf_tbl_type_get_output_t, *phwrm_tf_tbl_type_get_output_t;
+} hwrm_tf_tbl_type_alloc_output_t, *phwrm_tf_tbl_type_alloc_output_t;
/************************
- * hwrm_tf_tbl_type_set *
+ * hwrm_tf_tbl_type_get *
************************/
-/* hwrm_tf_tbl_type_set_input (size:1024b/128B) */
+/* hwrm_tf_tbl_type_get_input (size:256b/32B) */
-typedef struct hwrm_tf_tbl_type_set_input {
+typedef struct hwrm_tf_tbl_type_get_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -49328,16 +54918,30 @@ typedef struct hwrm_tf_tbl_type_set_input {
/* Control flags. */
uint16_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_LAST HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_TX
- /* Indicate table data is being sent via DMA. */
- #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_LAST HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_DIR_TX
+ /*
+ * When set use the special access register access to clear
+ * the table entry on read.
+ */
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_FLAGS_CLEAR_ON_READ UINT32_C(0x2)
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_BLKTYPE_BLKTYPE_CFA UINT32_C(0x0)
+ /* RXP gparse block type */
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_BLKTYPE_BLKTYPE_RXP UINT32_C(0x1)
+ /* RE gparse block type */
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE UINT32_C(0x3)
+ #define HWRM_TF_TBL_TYPE_GET_INPUT_BLKTYPE_LAST HWRM_TF_TBL_TYPE_GET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
/* unused. */
- uint8_t unused0[2];
+ uint8_t unused0;
/*
* Type of the resource, defined globally in the
* hwrm_tf_resc_type enum.
@@ -49345,17 +54949,11 @@ typedef struct hwrm_tf_tbl_type_set_input {
uint32_t type;
/* Index of the type to retrieve. */
uint32_t index;
- /* Size of the data to set. */
- uint16_t size;
- /* unused */
- uint8_t unused1[6];
- /* Data to be set. */
- uint8_t data[88];
-} hwrm_tf_tbl_type_set_input_t, *phwrm_tf_tbl_type_set_input_t;
+} hwrm_tf_tbl_type_get_input_t, *phwrm_tf_tbl_type_get_input_t;
-/* hwrm_tf_tbl_type_set_output (size:128b/16B) */
+/* hwrm_tf_tbl_type_get_output (size:2240b/280B) */
-typedef struct hwrm_tf_tbl_type_set_output {
+typedef struct hwrm_tf_tbl_type_get_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -49364,8 +54962,16 @@ typedef struct hwrm_tf_tbl_type_set_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
+ /* Response code. */
+ uint32_t resp_code;
+ /* Response size. */
+ uint16_t size;
+ /* unused */
+ uint16_t unused0;
+ /* Response data. */
+ uint8_t data[256];
+ /* unused */
+ uint8_t unused1[7];
/*
* This field is used in Output records to indicate that the output
* is completely written to RAM. This field should be read as '1'
@@ -49375,413 +54981,16 @@ typedef struct hwrm_tf_tbl_type_set_output {
* is written last.
*/
uint8_t valid;
-} hwrm_tf_tbl_type_set_output_t, *phwrm_tf_tbl_type_set_output_t;
-
-/**************************
- * hwrm_tf_ctxt_mem_alloc *
- **************************/
-
-
-/* hwrm_tf_ctxt_mem_alloc_input (size:192b/24B) */
-
-typedef struct hwrm_tf_ctxt_mem_alloc_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* Size in KB of memory to be allocated. */
- uint32_t mem_size;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
-} hwrm_tf_ctxt_mem_alloc_input_t, *phwrm_tf_ctxt_mem_alloc_input_t;
-
-/* hwrm_tf_ctxt_mem_alloc_output (size:192b/24B) */
-
-typedef struct hwrm_tf_ctxt_mem_alloc_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* Pointer to the PBL, or PDL depending on number of levels */
- uint64_t page_dir;
- /* Size of memory allocated. */
- uint32_t mem_size;
- /* Counter PBL indirect levels. */
- uint8_t page_level;
- /* PBL pointer is physical start address. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0)
- /* PBL pointer points to PTE table. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1)
- /*
- * PBL pointer points to PDE table with each entry pointing
- * to PTE tables.
- */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2)
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_LEVEL_LAST HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_LEVEL_LVL_2
- /* Page size. */
- uint8_t page_size;
- /* 4KB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_4K UINT32_C(0x0)
- /* 8KB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_8K UINT32_C(0x1)
- /* 64KB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_64K UINT32_C(0x4)
- /* 128KB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_128K UINT32_C(0x5)
- /* 256KB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_256K UINT32_C(0x6)
- /* 512KB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_512K UINT32_C(0x7)
- /* 1MB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_1M UINT32_C(0x8)
- /* 2MB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_2M UINT32_C(0x9)
- /* 4MB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_4M UINT32_C(0xa)
- /* 8MB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_8M UINT32_C(0xb)
- /* 1GB page size. */
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_1G UINT32_C(0x12)
- #define HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_LAST HWRM_TF_CTXT_MEM_ALLOC_OUTPUT_PAGE_SIZE_1G
- /* unused. */
- uint8_t unused0;
- /*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
- */
- uint8_t valid;
-} hwrm_tf_ctxt_mem_alloc_output_t, *phwrm_tf_ctxt_mem_alloc_output_t;
-
-/*************************
- * hwrm_tf_ctxt_mem_free *
- *************************/
-
-
-/* hwrm_tf_ctxt_mem_free_input (size:320b/40B) */
-
-typedef struct hwrm_tf_ctxt_mem_free_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
- /* Counter PBL indirect levels. */
- uint8_t page_level;
- /* PBL pointer is physical start address. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0)
- /* PBL pointer points to PTE table. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1)
- /*
- * PBL pointer points to PDE table with each entry pointing
- * to PTE tables.
- */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2)
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_LEVEL_LAST HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_LEVEL_LVL_2
- /* Page size. */
- uint8_t page_size;
- /* 4KB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_4K UINT32_C(0x0)
- /* 8KB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_8K UINT32_C(0x1)
- /* 64KB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_64K UINT32_C(0x4)
- /* 128KB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_128K UINT32_C(0x5)
- /* 256KB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_256K UINT32_C(0x6)
- /* 512KB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_512K UINT32_C(0x7)
- /* 1MB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_1M UINT32_C(0x8)
- /* 2MB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_2M UINT32_C(0x9)
- /* 4MB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_4M UINT32_C(0xa)
- /* 8MB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_8M UINT32_C(0xb)
- /* 1GB page size. */
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_1G UINT32_C(0x12)
- #define HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_LAST HWRM_TF_CTXT_MEM_FREE_INPUT_PAGE_SIZE_1G
- /* unused. */
- uint8_t unused0[2];
- /* Pointer to the PBL, or PDL depending on number of levels */
- uint64_t page_dir;
- /* Size of memory allocated. */
- uint32_t mem_size;
- /* unused. */
- uint8_t unused1[4];
-} hwrm_tf_ctxt_mem_free_input_t, *phwrm_tf_ctxt_mem_free_input_t;
-
-/* hwrm_tf_ctxt_mem_free_output (size:128b/16B) */
-
-typedef struct hwrm_tf_ctxt_mem_free_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
- /*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
- */
- uint8_t valid;
-} hwrm_tf_ctxt_mem_free_output_t, *phwrm_tf_ctxt_mem_free_output_t;
-
-/*************************
- * hwrm_tf_ctxt_mem_rgtr *
- *************************/
-
-
-/* hwrm_tf_ctxt_mem_rgtr_input (size:256b/32B) */
-
-typedef struct hwrm_tf_ctxt_mem_rgtr_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* Control flags. */
- uint16_t flags;
- /* Counter PBL indirect levels. */
- uint8_t page_level;
- /* PBL pointer is physical start address. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_0 UINT32_C(0x0)
- /* PBL pointer points to PTE table. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_1 UINT32_C(0x1)
- /*
- * PBL pointer points to PDE table with each entry pointing
- * to PTE tables.
- */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_2 UINT32_C(0x2)
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LAST HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_LEVEL_LVL_2
- /* Page size. */
- uint8_t page_size;
- /* 4KB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_4K UINT32_C(0x0)
- /* 8KB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_8K UINT32_C(0x1)
- /* 64KB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_64K UINT32_C(0x4)
- /* 128KB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_128K UINT32_C(0x5)
- /* 256KB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_256K UINT32_C(0x6)
- /* 512KB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_512K UINT32_C(0x7)
- /* 1MB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1M UINT32_C(0x8)
- /* 2MB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_2M UINT32_C(0x9)
- /* 4MB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_4M UINT32_C(0xa)
- /* 8MB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_8M UINT32_C(0xb)
- /* 1GB page size. */
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1G UINT32_C(0x12)
- #define HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_LAST HWRM_TF_CTXT_MEM_RGTR_INPUT_PAGE_SIZE_1G
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
- /* Pointer to the PBL, or PDL depending on number of levels */
- uint64_t page_dir;
-} hwrm_tf_ctxt_mem_rgtr_input_t, *phwrm_tf_ctxt_mem_rgtr_input_t;
-
-/* hwrm_tf_ctxt_mem_rgtr_output (size:128b/16B) */
-
-typedef struct hwrm_tf_ctxt_mem_rgtr_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /*
- * Id/Handle to the recently register context memory. This
- * handle is passed to the TF session.
- */
- uint16_t ctx_id;
- /* unused. */
- uint8_t unused0[5];
- /*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
- */
- uint8_t valid;
-} hwrm_tf_ctxt_mem_rgtr_output_t, *phwrm_tf_ctxt_mem_rgtr_output_t;
-
-/***************************
- * hwrm_tf_ctxt_mem_unrgtr *
- ***************************/
-
-
-/* hwrm_tf_ctxt_mem_unrgtr_input (size:192b/24B) */
-
-typedef struct hwrm_tf_ctxt_mem_unrgtr_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /*
- * Id/Handle to the recently register context memory. This
- * handle is passed to the TF session.
- */
- uint16_t ctx_id;
- /* unused. */
- uint8_t unused0[2];
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
-} hwrm_tf_ctxt_mem_unrgtr_input_t, *phwrm_tf_ctxt_mem_unrgtr_input_t;
-
-/* hwrm_tf_ctxt_mem_unrgtr_output (size:128b/16B) */
-
-typedef struct hwrm_tf_ctxt_mem_unrgtr_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
- /*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
- */
- uint8_t valid;
-} hwrm_tf_ctxt_mem_unrgtr_output_t, *phwrm_tf_ctxt_mem_unrgtr_output_t;
+} hwrm_tf_tbl_type_get_output_t, *phwrm_tf_tbl_type_get_output_t;
/************************
- * hwrm_tf_ext_em_qcaps *
+ * hwrm_tf_tbl_type_set *
************************/
-/* hwrm_tf_ext_em_qcaps_input (size:192b/24B) */
+/* hwrm_tf_tbl_type_set_input (size:1024b/128B) */
-typedef struct hwrm_tf_ext_em_qcaps_input {
+typedef struct hwrm_tf_tbl_type_set_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -49810,221 +55019,50 @@ typedef struct hwrm_tf_ext_em_qcaps_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Control flags. */
- uint32_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_LAST HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_DIR_TX
- /* When set to 1, all offloaded flows will be sent to EXT EM. */
- #define HWRM_TF_EXT_EM_QCAPS_INPUT_FLAGS_PREFERRED_OFFLOAD UINT32_C(0x2)
/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
uint32_t fw_session_id;
-} hwrm_tf_ext_em_qcaps_input_t, *phwrm_tf_ext_em_qcaps_input_t;
-
-/* hwrm_tf_ext_em_qcaps_output (size:384b/48B) */
-
-typedef struct hwrm_tf_ext_em_qcaps_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- uint32_t flags;
- /*
- * When set to 1, indicates the FW supports the Centralized
- * Memory Model. The concept designates one entity for the
- * memory allocation while all others ‘subscribe’ to it.
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_FLAGS_CENTRALIZED_MEMORY_MODEL_SUPPORTED UINT32_C(0x1)
- /*
- * When set to 1, indicates the FW supports the Detached
- * Centralized Memory Model. The memory is allocated and managed
- * as a separate entity. All PFs and VFs will be granted direct
- * or semi-direct access to the allocated memory while none of
- * which can interfere with the management of the memory.
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_FLAGS_DETACHED_CENTRALIZED_MEMORY_MODEL_SUPPORTED UINT32_C(0x2)
- /* When set to 1, indicates FW support for host based EEM memory. */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_FLAGS_HOST_MEMORY_SUPPORTED UINT32_C(0x4)
- /* When set to 1, indicates FW support for on-chip based EEM memory. */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_FLAGS_FW_MEMORY_SUPPORTED UINT32_C(0x8)
- /* unused. */
- uint32_t unused0;
- /* Support flags. */
- uint32_t supported;
- /*
- * If set to 1, then EXT EM KEY0 table is supported using
- * crc32 hash.
- * If set to 0, EXT EM KEY0 table is not supported.
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_KEY0_TABLE UINT32_C(0x1)
- /*
- * If set to 1, then EXT EM KEY1 table is supported using
- * lookup3 hash.
- * If set to 0, EXT EM KEY1 table is not supported.
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_KEY1_TABLE UINT32_C(0x2)
- /*
- * If set to 1, then EXT EM External Record table is supported.
- * If set to 0, EXT EM External Record table is not
- * supported. (This table includes action record, EFC
- * pointers, encap pointers)
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_EXTERNAL_RECORD_TABLE UINT32_C(0x4)
- /*
- * If set to 1, then EXT EM External Flow Counters table is
- * supported.
- * If set to 0, EXT EM External Flow Counters table is not
- * supported.
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE UINT32_C(0x8)
- /*
- * If set to 1, then FID table used for implicit flow flush
- * is supported.
- * If set to 0, then FID table used for implicit flow flush
- * is not supported.
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_FID_TABLE UINT32_C(0x10)
- /*
- * If set to 1, then table scopes are supported.
- * If set to 0, then table scopes are not supported.
- */
- #define HWRM_TF_EXT_EM_QCAPS_OUTPUT_SUPPORTED_TBL_SCOPES UINT32_C(0x20)
- /*
- * The maximum number of entries supported by EXT EM. When
- * configuring the host memory the number of numbers of
- * entries that can supported are -
- * 32k, 64k 128k, 256k, 512k, 1M, 2M, 4M, 8M, 32M, 64M,
- * 128M entries.
- * Any value that are not these values, the FW will round
- * down to the closest support number of entries.
- */
- uint32_t max_entries_supported;
- /*
- * The entry size in bytes of each entry in the EXT EM
- * KEY0/KEY1 tables.
- */
- uint16_t key_entry_size;
- /*
- * The entry size in bytes of each entry in the EXT EM RECORD
- * tables.
- */
- uint16_t record_entry_size;
- /* The entry size in bytes of each entry in the EXT EM EFC tables. */
- uint16_t efc_entry_size;
- /* The FID size in bytes of each entry in the EXT EM FID tables. */
- uint16_t fid_entry_size;
- /* Maximum number of ctxt mem allocations allowed. */
- uint32_t max_ctxt_mem_allocs;
- /*
- * Maximum number of static buckets that can be assigned to lookup
- * table scopes.
- */
- uint32_t max_static_buckets;
- /* unused. */
- uint8_t unused1[7];
- /*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
- */
- uint8_t valid;
-} hwrm_tf_ext_em_qcaps_output_t, *phwrm_tf_ext_em_qcaps_output_t;
-
-/*********************
- * hwrm_tf_ext_em_op *
- *********************/
-
-
-/* hwrm_tf_ext_em_op_input (size:256b/32B) */
-
-typedef struct hwrm_tf_ext_em_op_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
/* Control flags. */
uint16_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR_LAST HWRM_TF_EXT_EM_OP_INPUT_FLAGS_DIR_TX
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_LAST HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DIR_TX
+ /* Indicate table data is being sent via DMA. */
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_BLKTYPE_BLKTYPE_CFA UINT32_C(0x0)
+ /* RXP gparse block type */
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_BLKTYPE_BLKTYPE_RXP UINT32_C(0x1)
+ /* RE gparse block type */
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE UINT32_C(0x3)
+ #define HWRM_TF_TBL_TYPE_SET_INPUT_BLKTYPE_LAST HWRM_TF_TBL_TYPE_SET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
/* unused. */
- uint16_t unused0;
- /* The number of EXT EM key table entries to be configured. */
- uint16_t op;
- /* This value is reserved and should not be used. */
- #define HWRM_TF_EXT_EM_OP_INPUT_OP_RESERVED UINT32_C(0x0)
- /*
- * To properly stop EXT EM and ensure there are no DMA's,
- * the caller must disable EXT EM for the given PF, using
- * this call. This will safely disable EXT EM and ensure
- * that all DMA'ed to the keys/records/efc have been
- * completed.
- */
- #define HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_DISABLE UINT32_C(0x1)
- /*
- * Once the EXT EM host memory has been configured, EXT EM
- * options have been configured. Then the caller should
- * enable EXT EM for the given PF. Note once this call has
- * been made, then the EXT EM mechanism will be active and
- * DMA's will occur as packets are processed.
- */
- #define HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_ENABLE UINT32_C(0x2)
+ uint8_t unused0;
/*
- * Clear EXT EM settings for the given PF so that the
- * register values are reset back to their initial state.
+ * Type of the resource, defined globally in the
+ * hwrm_tf_resc_type enum.
*/
- #define HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_CLEANUP UINT32_C(0x3)
- #define HWRM_TF_EXT_EM_OP_INPUT_OP_LAST HWRM_TF_EXT_EM_OP_INPUT_OP_EXT_EM_CLEANUP
- /* unused. */
- uint16_t unused1;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
- /* unused. */
- uint32_t unused2;
-} hwrm_tf_ext_em_op_input_t, *phwrm_tf_ext_em_op_input_t;
+ uint32_t type;
+ /* Index of the type to retrieve. */
+ uint32_t index;
+ /* Size of the data to set. */
+ uint16_t size;
+ /* unused */
+ uint8_t unused1[6];
+ /* Data to be set. */
+ uint8_t data[88];
+} hwrm_tf_tbl_type_set_input_t, *phwrm_tf_tbl_type_set_input_t;
-/* hwrm_tf_ext_em_op_output (size:128b/16B) */
+/* hwrm_tf_tbl_type_set_output (size:128b/16B) */
-typedef struct hwrm_tf_ext_em_op_output {
+typedef struct hwrm_tf_tbl_type_set_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -50036,24 +55074,24 @@ typedef struct hwrm_tf_ext_em_op_output {
/* unused. */
uint8_t unused0[7];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
-} hwrm_tf_ext_em_op_output_t, *phwrm_tf_ext_em_op_output_t;
+} hwrm_tf_tbl_type_set_output_t, *phwrm_tf_tbl_type_set_output_t;
-/**********************
- * hwrm_tf_ext_em_cfg *
- **********************/
+/*************************
+ * hwrm_tf_tbl_type_free *
+ *************************/
-/* hwrm_tf_ext_em_cfg_input (size:512b/64B) */
+/* hwrm_tf_tbl_type_free_input (size:256b/32B) */
-typedef struct hwrm_tf_ext_em_cfg_input {
+typedef struct hwrm_tf_tbl_type_free_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -50082,212 +55120,42 @@ typedef struct hwrm_tf_ext_em_cfg_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
/* Control flags. */
- uint32_t flags;
+ uint16_t flags;
/* Indicates the flow direction. */
- #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR UINT32_C(0x1)
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_FLAGS_DIR UINT32_C(0x1)
/* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
/* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_LAST HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_DIR_TX
- /* When set to 1, all offloaded flows will be sent to EXT EM. */
- #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_PREFERRED_OFFLOAD UINT32_C(0x2)
- /* When set to 1, secondary, 0 means primary. */
- #define HWRM_TF_EXT_EM_CFG_INPUT_FLAGS_SECONDARY_PF UINT32_C(0x4)
- /*
- * Group_id which used by Firmware to identify memory pools belonging
- * to certain group.
- */
- uint16_t group_id;
- /*
- * Dynamically reconfigure EEM pending cache every 1/10th of second.
- * If set to 0 it will disable the EEM HW flush of the pending cache.
- */
- uint8_t flush_interval;
- /* unused. */
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_FLAGS_DIR_LAST HWRM_TF_TBL_TYPE_FREE_INPUT_FLAGS_DIR_TX
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_BLKTYPE_BLKTYPE_CFA UINT32_C(0x0)
+ /* RXP gparse block type */
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_BLKTYPE_BLKTYPE_RXP UINT32_C(0x1)
+ /* RE gparse block type */
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE UINT32_C(0x3)
+ #define HWRM_TF_TBL_TYPE_FREE_INPUT_BLKTYPE_LAST HWRM_TF_TBL_TYPE_FREE_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
+ /* Unused */
uint8_t unused0;
/*
- * Configured EXT EM with the given number of entries. All
- * the EXT EM tables KEY0, KEY1, RECORD, EFC all have the
- * same number of entries and all tables will be configured
- * using this value. Current minimum value is 32k. Current
- * maximum value is 128M.
- */
- uint32_t num_entries;
- uint32_t enables;
- /*
- * This bit must be '1' for the group_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_GROUP_ID UINT32_C(0x1)
- /*
- * This bit must be '1' for the flush_interval field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_FLUSH_INTERVAL UINT32_C(0x2)
- /*
- * This bit must be '1' for the num_entries field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_NUM_ENTRIES UINT32_C(0x4)
- /*
- * This bit must be '1' for the key0_ctx_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_KEY0_CTX_ID UINT32_C(0x8)
- /*
- * This bit must be '1' for the key1_ctx_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_KEY1_CTX_ID UINT32_C(0x10)
- /*
- * This bit must be '1' for the record_ctx_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_RECORD_CTX_ID UINT32_C(0x20)
- /*
- * This bit must be '1' for the efc_ctx_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_EFC_CTX_ID UINT32_C(0x40)
- /*
- * This bit must be '1' for the fid_ctx_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_FID_CTX_ID UINT32_C(0x80)
- /*
- * This bit must be '1' for the action_ctx_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_ACTION_CTX_ID UINT32_C(0x100)
- /*
- * This bit must be '1' for the action_tbl_scope field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_ACTION_TBL_SCOPE UINT32_C(0x200)
- /*
- * This bit must be '1' for the lkup_ctx_id field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_LKUP_CTX_ID UINT32_C(0x400)
- /*
- * This bit must be '1' for the lkup_tbl_scope field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_LKUP_TBL_SCOPE UINT32_C(0x800)
- /*
- * This bit must be '1' for the lkup_static_buckets field to be
- * configured.
- */
- #define HWRM_TF_EXT_EM_CFG_INPUT_ENABLES_LKUP_STATIC_BUCKETS UINT32_C(0x1000)
- /* Configured EXT EM with the given context if for KEY0 table. */
- uint16_t key0_ctx_id;
- /* Configured EXT EM with the given context if for KEY1 table. */
- uint16_t key1_ctx_id;
- /* Configured EXT EM with the given context if for RECORD table. */
- uint16_t record_ctx_id;
- /* Configured EXT EM with the given context if for EFC table. */
- uint16_t efc_ctx_id;
- /* Configured EXT EM with the given context if for EFC table. */
- uint16_t fid_ctx_id;
- /* Context id of action table scope. */
- uint16_t action_ctx_id;
- /* Table scope id used for action record entries. */
- uint16_t action_tbl_scope;
- /* Context id of lookup table scope. */
- uint16_t lkup_ctx_id;
- /* Table scope id used for EM lookup entries. */
- uint16_t lkup_tbl_scope;
- /* unused. */
- uint16_t unused1;
- /*
- * Number of 32B static buckets to be allocated at the beginning
- * of table scope.
- */
- uint32_t lkup_static_buckets;
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
- /* unused. */
- uint32_t unused2;
-} hwrm_tf_ext_em_cfg_input_t, *phwrm_tf_ext_em_cfg_input_t;
-
-/* hwrm_tf_ext_em_cfg_output (size:128b/16B) */
-
-typedef struct hwrm_tf_ext_em_cfg_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* unused. */
- uint8_t unused0[7];
- /*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * Table entry to be freed by the firmware using the parameters
+ * above.
*/
- uint8_t valid;
-} hwrm_tf_ext_em_cfg_output_t, *phwrm_tf_ext_em_cfg_output_t;
-
-/***********************
- * hwrm_tf_ext_em_qcfg *
- ***********************/
-
-
-/* hwrm_tf_ext_em_qcfg_input (size:192b/24B) */
-
-typedef struct hwrm_tf_ext_em_qcfg_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* Control flags. */
- uint32_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR_LAST HWRM_TF_EXT_EM_QCFG_INPUT_FLAGS_DIR_TX
- /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
- uint32_t fw_session_id;
-} hwrm_tf_ext_em_qcfg_input_t, *phwrm_tf_ext_em_qcfg_input_t;
+ uint16_t idx_tbl_id;
+ /* Unused */
+ uint8_t unused1[6];
+} hwrm_tf_tbl_type_free_input_t, *phwrm_tf_tbl_type_free_input_t;
-/* hwrm_tf_ext_em_qcfg_output (size:448b/56B) */
+/* hwrm_tf_tbl_type_free_output (size:128b/16B) */
-typedef struct hwrm_tf_ext_em_qcfg_output {
+typedef struct hwrm_tf_tbl_type_free_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -50296,92 +55164,20 @@ typedef struct hwrm_tf_ext_em_qcfg_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Control flags. */
- uint32_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR_LAST HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_DIR_TX
- /* When set to 1, all offloaded flows will be sent to EXT EM. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_FLAGS_PREFERRED_OFFLOAD UINT32_C(0x2)
- /* The number of entries the FW has configured for EXT EM. */
- uint32_t num_entries;
- /* Configured EXT EM with the given context if for KEY0 table. */
- uint16_t key0_ctx_id;
- /* Configured EXT EM with the given context if for KEY1 table. */
- uint16_t key1_ctx_id;
- /* Configured EXT EM with the given context if for RECORD table. */
- uint16_t record_ctx_id;
- /* Configured EXT EM with the given context if for EFC table. */
- uint16_t efc_ctx_id;
- /* Configured EXT EM with the given context if for EFC table. */
- uint16_t fid_ctx_id;
- /* unused. */
- uint16_t unused0;
- uint32_t supported;
- /* This bit must be '1' for the group_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_GROUP_ID UINT32_C(0x1)
- /* This bit must be '1' for the flush_interval field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_FLUSH_INTERVAL UINT32_C(0x2)
- /* This bit must be '1' for the num_entries field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_NUM_ENTRIES UINT32_C(0x4)
- /* This bit must be '1' for the key0_ctx_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_KEY0_CTX_ID UINT32_C(0x8)
- /* This bit must be '1' for the key1_ctx_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_KEY1_CTX_ID UINT32_C(0x10)
- /* This bit must be '1' for the record_ctx_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_RECORD_CTX_ID UINT32_C(0x20)
- /* This bit must be '1' for the efc_ctx_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_EFC_CTX_ID UINT32_C(0x40)
- /* This bit must be '1' for the fid_ctx_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_FID_CTX_ID UINT32_C(0x80)
- /* This bit must be '1' for the action_ctx_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_ACTION_CTX_ID UINT32_C(0x100)
- /* This bit must be '1' for the action_tbl_scope field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_ACTION_TBL_SCOPE UINT32_C(0x200)
- /* This bit must be '1' for the lkup_ctx_id field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_LKUP_CTX_ID UINT32_C(0x400)
- /* This bit must be '1' for the lkup_tbl_scope field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_LKUP_TBL_SCOPE UINT32_C(0x800)
- /* This bit must be '1' for the lkup_static_buckets field is set. */
- #define HWRM_TF_EXT_EM_QCFG_OUTPUT_SUPPORTED_LKUP_STATIC_BUCKETS UINT32_C(0x1000)
- /*
- * Group id is used by firmware to identify memory pools belonging
- * to certain group.
- */
- uint16_t group_id;
- /* EEM pending cache flush interval in 1/10th of second. */
- uint8_t flush_interval;
- /* unused. */
- uint8_t unused1;
- /* Context id of action table scope. */
- uint16_t action_ctx_id;
- /* Table scope id used for action record entries. */
- uint16_t action_tbl_scope;
- /* Context id of lookup table scope. */
- uint16_t lkup_ctx_id;
- /* Table scope id used for EM lookup entries. */
- uint16_t lkup_tbl_scope;
- /*
- * Number of 32B static buckets to be allocated at the beginning
- * of table scope.
- */
- uint32_t lkup_static_buckets;
- /* unused. */
- uint8_t unused2[7];
+ /* Response code. */
+ uint32_t resp_code;
+ /* unused */
+ uint8_t unused0[3];
/*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
*/
uint8_t valid;
-} hwrm_tf_ext_em_qcfg_output_t, *phwrm_tf_ext_em_qcfg_output_t;
+} hwrm_tf_tbl_type_free_output_t, *phwrm_tf_tbl_type_free_output_t;
/*********************
* hwrm_tf_em_insert *
@@ -50462,7 +55258,16 @@ typedef struct hwrm_tf_em_insert_output {
/* Number of word entries consumed by the key. */
uint8_t num_of_entries;
/* unused. */
- uint32_t unused0;
+ uint8_t unused0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
+ */
+ uint8_t valid;
} hwrm_tf_em_insert_output_t, *phwrm_tf_em_insert_output_t;
/**************************
@@ -50512,6 +55317,8 @@ typedef struct hwrm_tf_em_hash_insert_input {
/* If this bit is set to 1, then it indicates tx flow. */
#define HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
#define HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DIR_LAST HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DIR_TX
+ /* Indicates table data is being sent via DMA. */
+ #define HWRM_TF_EM_HASH_INSERT_INPUT_FLAGS_DMA UINT32_C(0x2)
/* Number of bits in the EM record. */
uint16_t em_record_size_bits;
/* CRC32 hash of key. */
@@ -50544,7 +55351,16 @@ typedef struct hwrm_tf_em_hash_insert_output {
/* Number of word entries consumed by the key. */
uint8_t num_of_entries;
/* unused. */
- uint32_t unused0;
+ uint8_t unused0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
+ */
+ uint8_t valid;
} hwrm_tf_em_hash_insert_output_t, *phwrm_tf_em_hash_insert_output_t;
/*********************
@@ -50620,7 +55436,16 @@ typedef struct hwrm_tf_em_delete_output {
/* Original stack allocation index. */
uint16_t em_index;
/* unused. */
- uint16_t unused0[3];
+ uint8_t unused0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
+ */
+ uint8_t valid;
} hwrm_tf_em_delete_output_t, *phwrm_tf_em_delete_output_t;
/*******************
@@ -50694,7 +55519,16 @@ typedef struct hwrm_tf_em_move_output {
/* Index of old entry. */
uint16_t em_index;
/* unused. */
- uint16_t unused0[3];
+ uint8_t unused0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
+ */
+ uint8_t valid;
} hwrm_tf_em_move_output_t, *phwrm_tf_em_move_output_t;
/********************
@@ -50793,7 +55627,7 @@ typedef struct hwrm_tf_tcam_set_output {
* This field is used in Output records to indicate that the
* output is completely written to RAM. This field should be
* read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
+ * completely written. When writing a command completion or
* response to an internal processor, the order of writes has
* to be such that this field is written last.
*/
@@ -50890,7 +55724,7 @@ typedef struct hwrm_tf_tcam_get_output {
* This field is used in Output records to indicate that the
* output is completely written to RAM. This field should be
* read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
+ * completely written. When writing a command completion or
* response to an internal processor, the order of writes has
* to be such that this field is written last.
*/
@@ -50974,7 +55808,7 @@ typedef struct hwrm_tf_tcam_move_output {
* This field is used in Output records to indicate that the
* output is completely written to RAM. This field should be
* read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
+ * completely written. When writing a command completion or
* response to an internal processor, the order of writes has
* to be such that this field is written last.
*/
@@ -51058,7 +55892,7 @@ typedef struct hwrm_tf_tcam_free_output {
* This field is used in Output records to indicate that the
* output is completely written to RAM. This field should be
* read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
+ * completely written. When writing a command completion or
* response to an internal processor, the order of writes has
* to be such that this field is written last.
*/
@@ -51112,6 +55946,8 @@ typedef struct hwrm_tf_global_cfg_set_input {
/* If this bit is set to 1, then it indicates tx flow. */
#define HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
#define HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_LAST HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DIR_TX
+ /* Indicate device data is being sent via DMA. */
+ #define HWRM_TF_GLOBAL_CFG_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
/* Global Cfg type */
uint32_t type;
/* Offset of the type */
@@ -51143,7 +55979,7 @@ typedef struct hwrm_tf_global_cfg_set_output {
* This field is used in Output records to indicate that the
* output is completely written to RAM. This field should be
* read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
+ * completely written. When writing a command completion or
* response to an internal processor, the order of writes has
* to be such that this field is written last.
*/
@@ -51207,7 +56043,7 @@ typedef struct hwrm_tf_global_cfg_get_input {
uint8_t unused0[6];
} hwrm_tf_global_cfg_get_input_t, *phwrm_tf_global_cfg_get_input_t;
-/* hwrm_tf_global_cfg_get_output (size:256b/32B) */
+/* hwrm_tf_global_cfg_get_output (size:2240b/280B) */
typedef struct hwrm_tf_global_cfg_get_output {
/* The specific error status for the command. */
@@ -51223,7 +56059,18 @@ typedef struct hwrm_tf_global_cfg_get_output {
/* unused. */
uint8_t unused0[6];
/* Data to set */
- uint8_t data[16];
+ uint8_t data[256];
+ /* unused. */
+ uint8_t unused1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
} hwrm_tf_global_cfg_get_output_t, *phwrm_tf_global_cfg_get_output_t;
/**********************
@@ -51655,6 +56502,208 @@ typedef struct hwrm_tf_session_hotup_state_get_output {
uint8_t valid;
} hwrm_tf_session_hotup_state_get_output_t, *phwrm_tf_session_hotup_state_get_output_t;
+/**************************
+ * hwrm_tf_resc_usage_set *
+ **************************/
+
+
+/* hwrm_tf_resc_usage_set_input (size:1024b/128B) */
+
+typedef struct hwrm_tf_resc_usage_set_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint16_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_LAST HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DIR_TX
+ /* Indicate table data is being sent via DMA. */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
+ /* Types of the resource to set their usage state. */
+ uint16_t types;
+ /* WC TCAM Pool */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_WC_TCAM UINT32_C(0x1)
+ /* EM Internal Memory Pool */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_EM UINT32_C(0x2)
+ /* Meter Instance */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_METER UINT32_C(0x4)
+ /* Counter Record Table */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_COUNTER UINT32_C(0x8)
+ /* Action Record Table */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_ACTION UINT32_C(0x10)
+ /* ACT MODIFY/ENCAP Record Table */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_ACT_MOD_ENCAP UINT32_C(0x20)
+ /* Source Property SMAC Record Table */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_SP_SMAC UINT32_C(0x40)
+ /* All Resource Types */
+ #define HWRM_TF_RESC_USAGE_SET_INPUT_TYPES_ALL UINT32_C(0x80)
+ /* Size of the data to set. */
+ uint16_t size;
+ /* unused */
+ uint8_t unused1[6];
+ /* Data to be set. */
+ uint8_t data[96];
+} hwrm_tf_resc_usage_set_input_t, *phwrm_tf_resc_usage_set_input_t;
+
+/* hwrm_tf_resc_usage_set_output (size:128b/16B) */
+
+typedef struct hwrm_tf_resc_usage_set_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* unused. */
+ uint8_t unused0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
+ */
+ uint8_t valid;
+} hwrm_tf_resc_usage_set_output_t, *phwrm_tf_resc_usage_set_output_t;
+
+/****************************
+ * hwrm_tf_resc_usage_query *
+ ****************************/
+
+
+/* hwrm_tf_resc_usage_query_input (size:256b/32B) */
+
+typedef struct hwrm_tf_resc_usage_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
+ uint32_t fw_session_id;
+ /* Control flags. */
+ uint16_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_LAST HWRM_TF_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_TX
+ /* unused. */
+ uint8_t unused0[2];
+ /* Types of the resource to retrieve their usage state. */
+ uint16_t types;
+ /* WC TCAM Pool */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_WC_TCAM UINT32_C(0x1)
+ /* EM Internal Memory Pool */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_EM UINT32_C(0x2)
+ /* Meter Instance */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_METER UINT32_C(0x4)
+ /* Counter Record Table */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_COUNTER UINT32_C(0x8)
+ /* Action Record Table */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_ACTION UINT32_C(0x10)
+ /* ACT MODIFY/ENCAP Record Table */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_ACT_MOD_ENCAP UINT32_C(0x20)
+ /* Source Property SMAC Record Table */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_SP_SMAC UINT32_C(0x40)
+ /* All Resource Types */
+ #define HWRM_TF_RESC_USAGE_QUERY_INPUT_TYPES_ALL UINT32_C(0x80)
+ /* unused */
+ uint8_t unused1[6];
+} hwrm_tf_resc_usage_query_input_t, *phwrm_tf_resc_usage_query_input_t;
+
+/* hwrm_tf_resc_usage_query_output (size:960b/120B) */
+
+typedef struct hwrm_tf_resc_usage_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Response code. */
+ uint32_t resp_code;
+ /* Response size. */
+ uint16_t size;
+ /* unused */
+ uint16_t unused0;
+ /* Response data. */
+ uint8_t data[96];
+ /* unused */
+ uint8_t unused1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
+ */
+ uint8_t valid;
+} hwrm_tf_resc_usage_query_output_t, *phwrm_tf_resc_usage_query_output_t;
+
/****************************
* hwrm_tfc_tbl_scope_qcaps *
****************************/
@@ -51722,7 +56771,7 @@ typedef struct hwrm_tfc_tbl_scope_qcaps_output {
uint8_t tbl_scope_capable;
/*
* log2 of the number of lookup static buckets that a table scope can
- * support. This field is only valid if tbl_scope_capable is not zero.
+ * support. This field is only valid if tbl_scope_capable is not zero.
*/
uint8_t max_lkup_static_buckets_exp;
/* unused. */
@@ -51752,7 +56801,7 @@ typedef struct hwrm_tfc_tbl_scope_qcaps_output {
* a fid_cnt of 0 that also means that the table scope ID has
* been freed.
*/
-/* hwrm_tfc_tbl_scope_id_alloc_input (size:192b/24B) */
+/* hwrm_tfc_tbl_scope_id_alloc_input (size:256b/32B) */
typedef struct hwrm_tfc_tbl_scope_id_alloc_input {
/* The HWRM command request type. */
@@ -51783,26 +56832,37 @@ typedef struct hwrm_tfc_tbl_scope_id_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
/* The maximum number of pools for this table scope. */
uint16_t max_pools;
/* Non-zero if this table scope is shared. */
uint8_t shared;
/*
* The size of the lookup pools per direction expressed as
- * log2(max_records/max_pools). That is, size=2^exp.
+ * log2(max_records/max_pools). That is, size=2^exp.
*
* Array is indexed by enum cfa_dir.
*/
uint8_t lkup_pool_sz_exp[2];
/*
* The size of the action pools per direction expressed as
- * log2(max_records/max_pools). That is, size=2^exp.
+ * log2(max_records/max_pools). That is, size=2^exp.
*
* Array is indexed by enum cfa_dir.
*/
uint8_t act_pool_sz_exp[2];
+ /* Application type. 0 (AFM), 1 (TF) */
+ uint8_t app_type;
/* unused. */
- uint8_t unused0;
+ uint8_t unused0[6];
} hwrm_tfc_tbl_scope_id_alloc_input_t, *phwrm_tfc_tbl_scope_id_alloc_input_t;
/* hwrm_tfc_tbl_scope_id_alloc_output (size:128b/16B) */
@@ -51886,7 +56946,7 @@ typedef struct hwrm_tfc_tbl_scope_config_input {
/*
* The number of minimum sized lkup records per direction.
* In this usage, records are the minimum lookup memory
- * allocation unit in a table scope. This value is the total
+ * allocation unit in a table scope. This value is the total
* memory required for buckets and entries.
*
* Array is indexed by enum cfa_dir.
@@ -52051,10 +57111,19 @@ typedef struct hwrm_tfc_tbl_scope_fid_add_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
/* The table scope ID. */
uint8_t tsid;
/* unused. */
- uint8_t unused0[7];
+ uint8_t unused0[5];
} hwrm_tfc_tbl_scope_fid_add_input_t, *phwrm_tfc_tbl_scope_fid_add_input_t;
/* hwrm_tfc_tbl_scope_fid_add_output (size:128b/16B) */
@@ -52120,10 +57189,19 @@ typedef struct hwrm_tfc_tbl_scope_fid_rem_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
/* The table scope ID. */
uint8_t tsid;
/* unused. */
- uint8_t unused0[7];
+ uint8_t unused0[5];
} hwrm_tfc_tbl_scope_fid_rem_input_t, *phwrm_tfc_tbl_scope_fid_rem_input_t;
/* hwrm_tfc_tbl_scope_fid_rem_output (size:128b/16B) */
@@ -52152,176 +57230,6 @@ typedef struct hwrm_tfc_tbl_scope_fid_rem_output {
uint8_t valid;
} hwrm_tfc_tbl_scope_fid_rem_output_t, *phwrm_tfc_tbl_scope_fid_rem_output_t;
-/*********************************
- * hwrm_tfc_tbl_scope_pool_alloc *
- *********************************/
-
-
-/* hwrm_tfc_tbl_scope_pool_alloc_input (size:192b/24B) */
-
-typedef struct hwrm_tfc_tbl_scope_pool_alloc_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* Table Scope ID */
- uint8_t tsid;
- /* Control flags. Direction and type. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_DIR_LAST HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_DIR_TX
- /* Indicates the table type. */
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_TYPE UINT32_C(0x2)
- /* Lookup table */
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_TYPE_LOOKUP (UINT32_C(0x0) << 1)
- /* Action table */
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_TYPE_ACTION (UINT32_C(0x1) << 1)
- #define HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_TYPE_LAST HWRM_TFC_TBL_SCOPE_POOL_ALLOC_INPUT_FLAGS_TYPE_ACTION
- /* Unused */
- uint8_t unused[6];
-} hwrm_tfc_tbl_scope_pool_alloc_input_t, *phwrm_tfc_tbl_scope_pool_alloc_input_t;
-
-/* hwrm_tfc_tbl_scope_pool_alloc_output (size:128b/16B) */
-
-typedef struct hwrm_tfc_tbl_scope_pool_alloc_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* Pool ID */
- uint16_t pool_id;
- /* Pool size exponent. An exponent of 0 indicates a failure. */
- uint8_t pool_sz_exp;
- /* unused. */
- uint8_t unused1[4];
- /*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
- */
- uint8_t valid;
-} hwrm_tfc_tbl_scope_pool_alloc_output_t, *phwrm_tfc_tbl_scope_pool_alloc_output_t;
-
-/********************************
- * hwrm_tfc_tbl_scope_pool_free *
- ********************************/
-
-
-/* hwrm_tfc_tbl_scope_pool_free_input (size:192b/24B) */
-
-typedef struct hwrm_tfc_tbl_scope_pool_free_input {
- /* The HWRM command request type. */
- uint16_t req_type;
- /*
- * The completion ring to send the completion event on. This should
- * be the NQ ID returned from the `nq_alloc` HWRM command.
- */
- uint16_t cmpl_ring;
- /*
- * The sequence ID is used by the driver for tracking multiple
- * commands. This ID is treated as opaque data by the firmware and
- * the value is returned in the `hwrm_resp_hdr` upon completion.
- */
- uint16_t seq_id;
- /*
- * The target ID of the command:
- * * 0x0-0xFFF8 - The function ID
- * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
- * * 0xFFFD - Reserved for user-space HWRM interface
- * * 0xFFFF - HWRM
- */
- uint16_t target_id;
- /*
- * A physical address pointer pointing to a host buffer that the
- * command's response data will be written. This can be either a host
- * physical address (HPA) or a guest physical address (GPA) and must
- * point to a physically contiguous block of memory.
- */
- uint64_t resp_addr;
- /* Table Scope ID */
- uint8_t tsid;
- /* Control flags. Direction and type. */
- uint8_t flags;
- /* Indicates the flow direction. */
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_DIR UINT32_C(0x1)
- /* If this bit set to 0, then it indicates rx flow. */
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
- /* If this bit is set to 1, then it indicates tx flow. */
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_DIR_LAST HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_DIR_TX
- /* Indicates the table type. */
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_TYPE UINT32_C(0x2)
- /* Lookup table */
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_TYPE_LOOKUP (UINT32_C(0x0) << 1)
- /* Action table */
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_TYPE_ACTION (UINT32_C(0x1) << 1)
- #define HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_TYPE_LAST HWRM_TFC_TBL_SCOPE_POOL_FREE_INPUT_FLAGS_TYPE_ACTION
- /* Pool ID */
- uint16_t pool_id;
- /* Unused */
- uint8_t unused[4];
-} hwrm_tfc_tbl_scope_pool_free_input_t, *phwrm_tfc_tbl_scope_pool_free_input_t;
-
-/* hwrm_tfc_tbl_scope_pool_free_output (size:128b/16B) */
-
-typedef struct hwrm_tfc_tbl_scope_pool_free_output {
- /* The specific error status for the command. */
- uint16_t error_code;
- /* The HWRM command request type. */
- uint16_t req_type;
- /* The sequence ID from the original command. */
- uint16_t seq_id;
- /* The length of the response data in number of bytes. */
- uint16_t resp_len;
- /* unused. */
- uint8_t unused1[7];
- /*
- * This field is used in Output records to indicate that the
- * output is completely written to RAM. This field should be
- * read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
- * response to an internal processor, the order of writes has
- * to be such that this field is written last.
- */
- uint8_t valid;
-} hwrm_tfc_tbl_scope_pool_free_output_t, *phwrm_tfc_tbl_scope_pool_free_output_t;
-
/*****************************
* hwrm_tfc_session_id_alloc *
*****************************/
@@ -52330,11 +57238,11 @@ typedef struct hwrm_tfc_tbl_scope_pool_free_output {
/*
* Allocate a TFC session. Requests the firmware to allocate a TFC
* session identifier and associate a forwarding function with the
- * session. Though there's not an explicit matching free for a session
+ * session. Though there's not an explicit matching free for a session
* id alloc, dis-associating the last fid from a session id (fid_cnt goes
* to 0), will result in this session id being freed automatically.
*/
-/* hwrm_tfc_session_id_alloc_input (size:128b/16B) */
+/* hwrm_tfc_session_id_alloc_input (size:192b/24B) */
typedef struct hwrm_tfc_session_id_alloc_input {
/* The HWRM command request type. */
@@ -52365,6 +57273,17 @@ typedef struct hwrm_tfc_session_id_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /* Unused field */
+ uint8_t unused0[6];
} hwrm_tfc_session_id_alloc_input_t, *phwrm_tfc_session_id_alloc_input_t;
/* hwrm_tfc_session_id_alloc_output (size:128b/16B) */
@@ -52437,12 +57356,21 @@ typedef struct hwrm_tfc_session_fid_add_input {
*/
uint64_t resp_addr;
/*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
* Unique session identifier for the session created by the
* firmware.
*/
uint16_t sid;
/* Unused field */
- uint8_t unused0[6];
+ uint8_t unused0[4];
} hwrm_tfc_session_fid_add_input_t, *phwrm_tfc_session_fid_add_input_t;
/* hwrm_tfc_session_fid_add_output (size:128b/16B) */
@@ -52515,12 +57443,21 @@ typedef struct hwrm_tfc_session_fid_rem_input {
*/
uint64_t resp_addr;
/*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
* Unique session identifier for the session created by the
* firmware.
*/
uint16_t sid;
/* Unused field */
- uint8_t unused0[6];
+ uint8_t unused0[4];
} hwrm_tfc_session_fid_rem_input_t, *phwrm_tfc_session_fid_rem_input_t;
/* hwrm_tfc_session_fid_rem_output (size:128b/16B) */
@@ -52593,6 +57530,15 @@ typedef struct hwrm_tfc_ident_alloc_input {
*/
uint64_t resp_addr;
/*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
* Unique session identifier for the session created by the
* firmware. Will be used to track this identifier.
*/
@@ -52621,7 +57567,7 @@ typedef struct hwrm_tfc_ident_alloc_input {
#define HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID UINT32_C(0x2)
#define HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_LAST HWRM_TFC_IDENT_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID
/* Unused field */
- uint8_t unused0[3];
+ uint8_t unused0;
} hwrm_tfc_ident_alloc_input_t, *phwrm_tfc_ident_alloc_input_t;
/* hwrm_tfc_ident_alloc_output (size:128b/16B) */
@@ -52696,6 +57642,15 @@ typedef struct hwrm_tfc_ident_free_input {
*/
uint64_t resp_addr;
/*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
* Unique session identifier for the session created by the
* firmware. Will be used to validate this request.
*/
@@ -52716,8 +57671,6 @@ typedef struct hwrm_tfc_ident_free_input {
#define HWRM_TFC_IDENT_FREE_INPUT_FLAGS_DIR_LAST HWRM_TFC_IDENT_FREE_INPUT_FLAGS_DIR_TX
/* The resource identifier to be freed */
uint16_t ident_id;
- /* Reserved */
- uint8_t unused0[2];
} hwrm_tfc_ident_free_input_t, *phwrm_tfc_ident_free_input_t;
/* hwrm_tfc_ident_free_output (size:128b/16B) */
@@ -52781,6 +57734,15 @@ typedef struct hwrm_tfc_idx_tbl_alloc_input {
*/
uint64_t resp_addr;
/*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
* Unique session id for the session created by the
* firmware. Will be used to track this index table entry
* only if track type is track_type_sid.
@@ -52796,8 +57758,13 @@ typedef struct hwrm_tfc_idx_tbl_alloc_input {
#define HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
#define HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR_LAST HWRM_TFC_IDX_TBL_ALLOC_INPUT_FLAGS_DIR_TX
/*
- * CFA resource subtype. For definitions, please see
- * cfa_v3/include/cfa_resources.h.
+ * This field is blktype specific.
+ * For blktype CFA - CFA resource subtype. For definitions,
+ * please see cfa_v3/include/cfa_resources.h.
+ * For blktype rxp, re_gparse, te_gparse -
+ * Tunnel Type. A value of zero (or unknown) means alloc. A known
+ * value (previously allocated dynamic UPAR for tunnel_type) means
+ * realloc. Will fail if a realloc is for previously allocated FID,
*/
uint8_t subtype;
/* Describes the type of tracking id to be used */
@@ -52809,8 +57776,17 @@ typedef struct hwrm_tfc_idx_tbl_alloc_input {
/* Tracked by function id */
#define HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID UINT32_C(0x2)
#define HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_LAST HWRM_TFC_IDX_TBL_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID
- /* Reserved */
- uint8_t unused0[3];
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_BLKTYPE_BLKTYPE_CFA UINT32_C(0x0)
+ /* RXP gparse block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_BLKTYPE_BLKTYPE_RXP UINT32_C(0x1)
+ /* RE gparse block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE UINT32_C(0x3)
+ #define HWRM_TFC_IDX_TBL_ALLOC_INPUT_BLKTYPE_LAST HWRM_TFC_IDX_TBL_ALLOC_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
} hwrm_tfc_idx_tbl_alloc_input_t, *phwrm_tfc_idx_tbl_alloc_input_t;
/* hwrm_tfc_idx_tbl_alloc_output (size:128b/16B) */
@@ -52879,6 +57855,15 @@ typedef struct hwrm_tfc_idx_tbl_alloc_set_input {
*/
uint64_t resp_addr;
/*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
* Unique session id for the session created by the
* firmware. Will be used to track this index table entry
* only if track type is track_type_sid.
@@ -52899,8 +57884,13 @@ typedef struct hwrm_tfc_idx_tbl_alloc_set_input {
*/
#define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_FLAGS_DMA UINT32_C(0x2)
/*
- * CFA resource subtype. For definitions, please see
- * cfa_v3/include/cfa_resources.h.
+ * This field is blktype specific.
+ * For blktype CFA - CFA resource subtype. For definitions,
+ * please see cfa_v3/include/cfa_resources.h.
+ * For blktype rxp, re_gparse, te_gparse -
+ * Tunnel Type. A value of zero (or unknown) means alloc. A known
+ * value (previously allocated dynamic UPAR for tunnel_type) means
+ * realloc. Will fail if a realloc is for previously allocated FID,
*/
uint8_t subtype;
/* Describes the type of tracking id to be used */
@@ -52912,17 +57902,28 @@ typedef struct hwrm_tfc_idx_tbl_alloc_set_input {
/* Tracked by function id */
#define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_FID UINT32_C(0x2)
#define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_LAST HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_FID
- /* Reserved */
- uint8_t unused0;
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_BLKTYPE_BLKTYPE_CFA UINT32_C(0x0)
+ /* RXP gparse block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_BLKTYPE_BLKTYPE_RXP UINT32_C(0x1)
+ /* RE gparse block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE UINT32_C(0x3)
+ #define HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_BLKTYPE_LAST HWRM_TFC_IDX_TBL_ALLOC_SET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
/* The size of the index table entry in bytes. */
uint16_t data_size;
+ /* Reserved */
+ uint8_t unused1[6];
/* The location of the dma buffer */
uint64_t dma_addr;
/*
- * Index table data located at offset 0. If dma bit is set,
+ * Index table data located at offset 0. If dma bit is set,
* then this field contains the DMA buffer pointer.
*/
- uint8_t dev_data[104];
+ uint8_t dev_data[96];
} hwrm_tfc_idx_tbl_alloc_set_input_t, *phwrm_tfc_idx_tbl_alloc_set_input_t;
/* hwrm_tfc_idx_tbl_alloc_set_output (size:128b/16B) */
@@ -53010,6 +58011,15 @@ typedef struct hwrm_tfc_idx_tbl_set_input {
*/
uint8_t subtype;
/*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
* Session id associated with the firmware. Will be used
* for validation if the track type matches.
*/
@@ -53021,13 +58031,26 @@ typedef struct hwrm_tfc_idx_tbl_set_input {
uint16_t idx_tbl_id;
/* The size of the index table entry in bytes. */
uint16_t data_size;
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_BLKTYPE_BLKTYPE_CFA UINT32_C(0x0)
+ /* RXP gparse block type */
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_BLKTYPE_BLKTYPE_RXP UINT32_C(0x1)
+ /* RE gparse block type */
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE UINT32_C(0x3)
+ #define HWRM_TFC_IDX_TBL_SET_INPUT_BLKTYPE_LAST HWRM_TFC_IDX_TBL_SET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
+ /* unused. */
+ uint8_t unused0[5];
/* The location of the dma buffer */
uint64_t dma_addr;
/*
- * Index table data located at offset 0. If dma bit is set,
+ * Index table data located at offset 0. If dma bit is set,
* then this field contains the DMA buffer pointer.
*/
- uint8_t dev_data[104];
+ uint8_t dev_data[96];
} hwrm_tfc_idx_tbl_set_input_t, *phwrm_tfc_idx_tbl_set_input_t;
/* hwrm_tfc_idx_tbl_set_output (size:128b/16B) */
@@ -53059,7 +58082,7 @@ typedef struct hwrm_tfc_idx_tbl_set_output {
************************/
-/* hwrm_tfc_idx_tbl_get_input (size:256b/32B) */
+/* hwrm_tfc_idx_tbl_get_input (size:320b/40B) */
typedef struct hwrm_tfc_idx_tbl_get_input {
/* The HWRM command request type. */
@@ -53110,6 +58133,15 @@ typedef struct hwrm_tfc_idx_tbl_get_input {
*/
uint8_t subtype;
/*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
* Session id associated with the firmware. Will be used
* for validation if the track type matches.
*/
@@ -53121,6 +58153,19 @@ typedef struct hwrm_tfc_idx_tbl_get_input {
uint16_t idx_tbl_id;
/* The size of the index table entry buffer in bytes. */
uint16_t buffer_size;
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_BLKTYPE_BLKTYPE_CFA UINT32_C(0x0)
+ /* RXP block type */
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_BLKTYPE_BLKTYPE_RXP UINT32_C(0x1)
+ /* RE gparse block type */
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE UINT32_C(0x2)
+ /* TE gparse block type */
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE UINT32_C(0x3)
+ #define HWRM_TFC_IDX_TBL_GET_INPUT_BLKTYPE_LAST HWRM_TFC_IDX_TBL_GET_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
+ /* unused. */
+ uint8_t unused0[5];
/* The location of the response dma buffer */
uint64_t dma_addr;
} hwrm_tfc_idx_tbl_get_input_t, *phwrm_tfc_idx_tbl_get_input_t;
@@ -53156,7 +58201,7 @@ typedef struct hwrm_tfc_idx_tbl_get_output {
*************************/
-/* hwrm_tfc_idx_tbl_free_input (size:192b/24B) */
+/* hwrm_tfc_idx_tbl_free_input (size:256b/32B) */
typedef struct hwrm_tfc_idx_tbl_free_input {
/* The HWRM command request type. */
@@ -53202,14 +58247,34 @@ typedef struct hwrm_tfc_idx_tbl_free_input {
*/
uint8_t subtype;
/*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
* Session id associated with the firmware. Will be used
* for validation if the track type matches.
*/
uint16_t sid;
/* Index table id to be freed by the firmware. */
uint16_t idx_tbl_id;
- /* Reserved */
- uint8_t unused0[2];
+ /* Specifies which block this idx table alloc request is for */
+ uint8_t blktype;
+ /* CFA block type */
+ #define HWRM_TFC_IDX_TBL_FREE_INPUT_BLKTYPE_BLKTYPE_CFA UINT32_C(0x0)
+ /* RXP block type */
+ #define HWRM_TFC_IDX_TBL_FREE_INPUT_BLKTYPE_BLKTYPE_RXP UINT32_C(0x1)
+ /* RE parse block type */
+ #define HWRM_TFC_IDX_TBL_FREE_INPUT_BLKTYPE_BLKTYPE_RE_GPARSE UINT32_C(0x2)
+ /* TE parse block type */
+ #define HWRM_TFC_IDX_TBL_FREE_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE UINT32_C(0x3)
+ #define HWRM_TFC_IDX_TBL_FREE_INPUT_BLKTYPE_LAST HWRM_TFC_IDX_TBL_FREE_INPUT_BLKTYPE_BLKTYPE_TE_GPARSE
+ /* unused. */
+ uint8_t unused0[7];
} hwrm_tfc_idx_tbl_free_input_t, *phwrm_tfc_idx_tbl_free_input_t;
/* hwrm_tfc_idx_tbl_free_output (size:128b/16B) */
@@ -53300,6 +58365,15 @@ typedef struct hwrm_tfc_global_id_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ /*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
/* Firmware session id returned when HWRM_TF_SESSION_OPEN is sent. */
uint16_t sid;
/* Global domain id. */
@@ -53310,8 +58384,6 @@ typedef struct hwrm_tfc_global_id_alloc_input {
* request entries.
*/
uint16_t req_cnt;
- /* unused. */
- uint8_t unused0[2];
/*
* This is the DMA address for the request input data array
* buffer. Array is of tfc_global_id_hwrm_req type. Size of the
@@ -53398,6 +58470,15 @@ typedef struct hwrm_tfc_tcam_set_input {
*/
uint64_t resp_addr;
/*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
* Session id associated with the firmware. Will be used
* for validation if the track type matches.
*/
@@ -53425,7 +58506,7 @@ typedef struct hwrm_tfc_tcam_set_input {
*/
uint8_t subtype;
/* unused. */
- uint8_t unused0[6];
+ uint8_t unused0[4];
/* The location of the response dma buffer */
uint64_t dma_addr;
/*
@@ -53452,7 +58533,7 @@ typedef struct hwrm_tfc_tcam_set_output {
* This field is used in Output records to indicate that the
* output is completely written to RAM. This field should be
* read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
+ * completely written. When writing a command completion or
* response to an internal processor, the order of writes has
* to be such that this field is written last.
*/
@@ -53510,14 +58591,21 @@ typedef struct hwrm_tfc_tcam_get_input {
*/
uint8_t subtype;
/*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
* Session id associated with the firmware. Will be used
* for validation if the track type matches.
*/
uint16_t sid;
/* Logical TCAM ID. */
uint16_t tcam_id;
- /* unused. */
- uint8_t unused0[2];
} hwrm_tfc_tcam_get_input_t, *phwrm_tfc_tcam_get_input_t;
/* hwrm_tfc_tcam_get_output (size:2368b/296B) */
@@ -53548,7 +58636,7 @@ typedef struct hwrm_tfc_tcam_get_output {
* This field is used in Output records to indicate that the
* output is completely written to RAM. This field should be
* read as '1' to indicate that the output has been
- * completely written. When writing a command completion or
+ * completely written. When writing a command completion or
* response to an internal processor, the order of writes has
* to be such that this field is written last.
*/
@@ -53606,6 +58694,15 @@ typedef struct hwrm_tfc_tcam_alloc_input {
*/
uint8_t subtype;
/*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
* Unique session id for the session created by the
* firmware. Will be used to track this index table entry
* only if track type is track_type_sid.
@@ -53625,7 +58722,7 @@ typedef struct hwrm_tfc_tcam_alloc_input {
#define HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID UINT32_C(0x2)
#define HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_LAST HWRM_TFC_TCAM_ALLOC_INPUT_TRACK_TYPE_TRACK_TYPE_FID
/* Unused. */
- uint8_t unused0[7];
+ uint8_t unused0[5];
} hwrm_tfc_tcam_alloc_input_t, *phwrm_tfc_tcam_alloc_input_t;
/* hwrm_tfc_tcam_alloc_output (size:128b/16B) */
@@ -53710,6 +58807,15 @@ typedef struct hwrm_tfc_tcam_alloc_set_input {
*/
uint8_t subtype;
/*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
* Unique session id for the session created by the
* firmware. Will be used to track this index table entry
* only if track type is track_type_sid.
@@ -53731,11 +58837,11 @@ typedef struct hwrm_tfc_tcam_alloc_set_input {
#define HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_FID UINT32_C(0x2)
#define HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_LAST HWRM_TFC_TCAM_ALLOC_SET_INPUT_TRACK_TYPE_TRACK_TYPE_FID
/* Unused */
- uint8_t unused[5];
+ uint8_t unused[3];
/* The location of the response dma buffer */
uint64_t dma_addr;
/*
- * Index table data located at offset 0. If dma bit is set,
+ * Index table data located at offset 0. If dma bit is set,
* then this field contains the DMA buffer pointer.
*/
uint8_t dev_data[96];
@@ -53818,14 +58924,21 @@ typedef struct hwrm_tfc_tcam_free_input {
*/
uint8_t subtype;
/*
+ * Function ID.
+ * If running on a trusted VF or PF, the fid field can be used to
+ * specify that the function is a non-trusted VF of the parent PF.
+ * If this command is used for the target_id itself, this field is
+ * set to 0xffff. A non-trusted VF cannot specify a valid FID in this
+ * field.
+ */
+ uint16_t fid;
+ /*
* Session id associated with the firmware. Will be used
* for validation if the track type matches.
*/
uint16_t sid;
/* Logical TCAM ID. */
uint16_t tcam_id;
- /* Reserved */
- uint8_t unused0[2];
} hwrm_tfc_tcam_free_input_t, *phwrm_tfc_tcam_free_input_t;
/* hwrm_tfc_tcam_free_output (size:128b/16B) */
@@ -53852,6 +58965,371 @@ typedef struct hwrm_tfc_tcam_free_output {
uint8_t valid;
} hwrm_tfc_tcam_free_output_t, *phwrm_tfc_tcam_free_output_t;
+/***********************
+ * hwrm_tfc_if_tbl_set *
+ ***********************/
+
+
+/* hwrm_tfc_if_tbl_set_input (size:960b/120B) */
+
+typedef struct hwrm_tfc_if_tbl_set_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Session identifier. */
+ uint16_t sid;
+ /* Function identifier. */
+ uint16_t fid;
+ /*
+ * Subtype identifying IF table type. See
+ * cfa_v3/include/cfa_resources.h.
+ */
+ uint8_t subtype;
+ /* Control flags. */
+ uint8_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TFC_IF_TBL_SET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TFC_IF_TBL_SET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TFC_IF_TBL_SET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_IF_TBL_SET_INPUT_FLAGS_DIR_LAST HWRM_TFC_IF_TBL_SET_INPUT_FLAGS_DIR_TX
+ /* Table entry index. */
+ uint16_t index;
+ /* Size of data in data field. */
+ uint8_t data_size;
+ /* Reserved */
+ uint8_t unused0[7];
+ /* Table data. */
+ uint8_t data[88];
+} hwrm_tfc_if_tbl_set_input_t, *phwrm_tfc_if_tbl_set_input_t;
+
+/* hwrm_tfc_if_tbl_set_output (size:128b/16B) */
+
+typedef struct hwrm_tfc_if_tbl_set_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Reserved */
+ uint8_t unused0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
+ */
+ uint8_t valid;
+} hwrm_tfc_if_tbl_set_output_t, *phwrm_tfc_if_tbl_set_output_t;
+
+/***********************
+ * hwrm_tfc_if_tbl_get *
+ ***********************/
+
+
+/* hwrm_tfc_if_tbl_get_input (size:256b/32B) */
+
+typedef struct hwrm_tfc_if_tbl_get_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Session identifier. */
+ uint16_t sid;
+ /* Function identifier. */
+ uint16_t fid;
+ /*
+ * Subtype identifying IF table type. See
+ * cfa_v3/include/cfa_resources.h.
+ */
+ uint8_t subtype;
+ /* Control flags. */
+ uint8_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TFC_IF_TBL_GET_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TFC_IF_TBL_GET_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TFC_IF_TBL_GET_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_IF_TBL_GET_INPUT_FLAGS_DIR_LAST HWRM_TFC_IF_TBL_GET_INPUT_FLAGS_DIR_TX
+ /* Table entry index. */
+ uint16_t index;
+ /* Size of data in data field. */
+ uint8_t data_size;
+ /* Reserved */
+ uint8_t unused0[7];
+} hwrm_tfc_if_tbl_get_input_t, *phwrm_tfc_if_tbl_get_input_t;
+
+/* hwrm_tfc_if_tbl_get_output (size:960b/120B) */
+
+typedef struct hwrm_tfc_if_tbl_get_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Session identifier. */
+ uint16_t sid;
+ /* Function identifier. */
+ uint16_t fid;
+ /*
+ * Subtype identifying IF table type. See
+ * cfa_v3/include/cfa_resources.h.
+ */
+ uint8_t subtype;
+ /* Control flags. */
+ uint8_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TFC_IF_TBL_GET_OUTPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TFC_IF_TBL_GET_OUTPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TFC_IF_TBL_GET_OUTPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_IF_TBL_GET_OUTPUT_FLAGS_DIR_LAST HWRM_TFC_IF_TBL_GET_OUTPUT_FLAGS_DIR_TX
+ /* Table entry index. */
+ uint16_t index;
+ /* Size of data in data field. */
+ uint8_t data_size;
+ /* Reserved */
+ uint8_t unused0[7];
+ /* Table data. */
+ uint8_t data[88];
+ /* Reserved */
+ uint8_t unused1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
+ */
+ uint8_t valid;
+} hwrm_tfc_if_tbl_get_output_t, *phwrm_tfc_if_tbl_get_output_t;
+
+/*********************************
+ * hwrm_tfc_tbl_scope_config_get *
+ *********************************/
+
+
+/* TruFlow command to return whether the table scope is fully configured. */
+/* hwrm_tfc_tbl_scope_config_get_input (size:192b/24B) */
+
+typedef struct hwrm_tfc_tbl_scope_config_get_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* The table scope ID. */
+ uint8_t tsid;
+ /* unused. */
+ uint8_t unused0[7];
+} hwrm_tfc_tbl_scope_config_get_input_t, *phwrm_tfc_tbl_scope_config_get_input_t;
+
+/* hwrm_tfc_tbl_scope_config_get_output (size:128b/16B) */
+
+typedef struct hwrm_tfc_tbl_scope_config_get_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* If set to 1, the table scope is configured. */
+ uint8_t configured;
+ /* unused. */
+ uint8_t unused0[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
+ */
+ uint8_t valid;
+} hwrm_tfc_tbl_scope_config_get_output_t, *phwrm_tfc_tbl_scope_config_get_output_t;
+
+/*****************************
+ * hwrm_tfc_resc_usage_query *
+ *****************************/
+
+
+/* hwrm_tfc_resc_usage_query_input (size:256b/32B) */
+
+typedef struct hwrm_tfc_resc_usage_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Session identifier. */
+ uint16_t sid;
+ /* Function identifier. */
+ uint16_t fid;
+ /* Control flags. */
+ uint8_t flags;
+ /* Indicates the flow direction. */
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_FLAGS_DIR UINT32_C(0x1)
+ /* If this bit set to 0, then it indicates rx flow. */
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_RX UINT32_C(0x0)
+ /* If this bit is set to 1, then it indicates tx flow. */
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_TX UINT32_C(0x1)
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_LAST HWRM_TFC_RESC_USAGE_QUERY_INPUT_FLAGS_DIR_TX
+ /* Describes the type of tracking id to be used */
+ uint8_t track_type;
+ /* Invalid track type */
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_TRACK_TYPE_TRACK_TYPE_INVALID UINT32_C(0x0)
+ /* Tracked by session id */
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_TRACK_TYPE_TRACK_TYPE_SID UINT32_C(0x1)
+ /* Tracked by function id */
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_TRACK_TYPE_TRACK_TYPE_FID UINT32_C(0x2)
+ #define HWRM_TFC_RESC_USAGE_QUERY_INPUT_TRACK_TYPE_LAST HWRM_TFC_RESC_USAGE_QUERY_INPUT_TRACK_TYPE_TRACK_TYPE_FID
+ /* Size of data in data field. */
+ uint16_t data_size;
+ /* unused */
+ uint8_t unused1[8];
+} hwrm_tfc_resc_usage_query_input_t, *phwrm_tfc_resc_usage_query_input_t;
+
+/* hwrm_tfc_resc_usage_query_output (size:960b/120B) */
+
+typedef struct hwrm_tfc_resc_usage_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Response code. */
+ uint32_t resp_code;
+ /* Size of data in data field. */
+ uint16_t data_size;
+ /* unused */
+ uint16_t unused0;
+ /* Response data. */
+ uint8_t data[96];
+ /* unused */
+ uint8_t unused1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field
+ * is written last.
+ */
+ uint8_t valid;
+} hwrm_tfc_resc_usage_query_output_t, *phwrm_tfc_resc_usage_query_output_t;
+
/******************************
* hwrm_tunnel_dst_port_query *
******************************/
@@ -53891,27 +59369,56 @@ typedef struct hwrm_tunnel_dst_port_query_input {
/* Tunnel Type. */
uint8_t tunnel_type;
/* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
/* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
/* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
#define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_V4 UINT32_C(0x9)
- /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ /*
+ * Enhance Generic Routing Encapsulation (GRE version 1) inside IP
+ * datagram payload
+ */
#define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_IPGRE_V1 UINT32_C(0xa)
/* Use fixed layer 2 ether type of 0xFFFF */
#define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_L2_ETYPE UINT32_C(0xb)
- /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 UINT32_C(0xc)
+ /*
+ * IPV6 over virtual eXtensible Local Area Network with GPE header
+ * (IPV6oVXLANGPE)
+ */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 UINT32_C(0xc)
/* Custom GRE uses UPAR to parse customized GRE packets */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_CUSTOM_GRE UINT32_C(0xd)
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_CUSTOM_GRE UINT32_C(0xd)
/* Enhanced Common Packet Radio Interface (eCPRI) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ECPRI UINT32_C(0xe)
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ECPRI UINT32_C(0xe)
/* IPv6 Segment Routing (SRv6) */
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_SRV6 UINT32_C(0xf)
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_SRV6 UINT32_C(0xf)
/* Generic Protocol Extension for VXLAN (VXLAN-GPE) */
#define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_GPE UINT32_C(0x10)
- #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_LAST HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_VXLAN_GPE
- uint8_t unused_0[7];
+ /* Generic Routing Encapsulation */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_GRE UINT32_C(0x11)
+ /* ULP Dynamic UPAR tunnel */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR UINT32_C(0x12)
+ /* ULP Dynamic UPAR tunnel reserved 1 */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 UINT32_C(0x13)
+ /* ULP Dynamic UPAR tunnel reserved 2 */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 UINT32_C(0x14)
+ /* ULP Dynamic UPAR tunnel reserved 3 */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 UINT32_C(0x15)
+ /* ULP Dynamic UPAR tunnel reserved 4 */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 UINT32_C(0x16)
+ /* ULP Dynamic UPAR tunnel reserved 5 */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 UINT32_C(0x17)
+ /* ULP Dynamic UPAR tunnel reserved 6 */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 UINT32_C(0x18)
+ /* ULP Dynamic UPAR tunnel reserved 7 */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 UINT32_C(0x19)
+ #define HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_LAST HWRM_TUNNEL_DST_PORT_QUERY_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ /*
+ * This field is used to specify the next protocol value defined in the
+ * corresponding RFC spec for the applicable tunnel type.
+ */
+ uint8_t tunnel_next_proto;
+ uint8_t unused_0[6];
} hwrm_tunnel_dst_port_query_input_t, *phwrm_tunnel_dst_port_query_input_t;
/* hwrm_tunnel_dst_port_query_output (size:128b/16B) */
@@ -53966,12 +59473,24 @@ typedef struct hwrm_tunnel_dst_port_query_output {
#define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR6 UINT32_C(0x40)
/* This bit will be '1' when UPAR7 is IN_USE */
#define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_UPAR_IN_USE_UPAR7 UINT32_C(0x80)
- uint8_t unused_0[2];
+ /*
+ * This field is used to convey the status of non udp port based
+ * tunnel parsing at chip level and at function level.
+ */
+ uint8_t status;
+ /* This bit will be '1' when tunnel parsing is enabled globally. */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_STATUS_CHIP_LEVEL UINT32_C(0x1)
+ /*
+ * This bit will be '1' when tunnel parsing is enabled
+ * on the corresponding function.
+ */
+ #define HWRM_TUNNEL_DST_PORT_QUERY_OUTPUT_STATUS_FUNC_LEVEL UINT32_C(0x2)
+ uint8_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -54016,27 +59535,58 @@ typedef struct hwrm_tunnel_dst_port_alloc_input {
/* Tunnel Type. */
uint8_t tunnel_type;
/* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
/* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
/* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
#define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_V4 UINT32_C(0x9)
- /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ /*
+ * Enhance Generic Routing Encapsulation (GRE version 1) inside IP
+ * datagram payload
+ */
#define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_IPGRE_V1 UINT32_C(0xa)
/* Use fixed layer 2 ether type of 0xFFFF */
#define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_L2_ETYPE UINT32_C(0xb)
- /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 UINT32_C(0xc)
- /* Custom GRE uses UPAR to parse customized GRE packets. This is not supported. */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_CUSTOM_GRE UINT32_C(0xd)
+ /*
+ * IPV6 over virtual eXtensible Local Area Network with GPE header
+ * (IPV6oVXLANGPE)
+ */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 UINT32_C(0xc)
+ /*
+ * Custom GRE uses UPAR to parse customized GRE packets. This is not
+ * supported.
+ */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_CUSTOM_GRE UINT32_C(0xd)
/* Enhanced Common Packet Radio Interface (eCPRI) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI UINT32_C(0xe)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ECPRI UINT32_C(0xe)
/* IPv6 Segment Routing (SRv6) */
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_SRV6 UINT32_C(0xf)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_SRV6 UINT32_C(0xf)
/* Generic Protocol Extension for VXLAN (VXLAN-GPE) */
#define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE UINT32_C(0x10)
- #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_LAST HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN_GPE
- uint8_t unused_0;
+ /* Generic Routing Encapsulation */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GRE UINT32_C(0x11)
+ /* ULP Dynamic UPAR tunnel */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR UINT32_C(0x12)
+ /* ULP Dynamic UPAR tunnel reserved 1 */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 UINT32_C(0x13)
+ /* ULP Dynamic UPAR tunnel reserved 2 */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 UINT32_C(0x14)
+ /* ULP Dynamic UPAR tunnel reserved 3 */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 UINT32_C(0x15)
+ /* ULP Dynamic UPAR tunnel reserved 4 */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 UINT32_C(0x16)
+ /* ULP Dynamic UPAR tunnel reserved 5 */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 UINT32_C(0x17)
+ /* ULP Dynamic UPAR tunnel reserved 6 */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 UINT32_C(0x18)
+ /* ULP Dynamic UPAR tunnel reserved 7 */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 UINT32_C(0x19)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_LAST HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ /*
+ * This field is used to specify the next protocol value defined in the
+ * corresponding RFC spec for the applicable tunnel type.
+ */
+ uint8_t tunnel_next_proto;
/*
* This field represents the value of L4 destination port used
* for the given tunnel type. This field is valid for
@@ -54048,7 +59598,7 @@ typedef struct hwrm_tunnel_dst_port_alloc_input {
* A value of 0 shall fail the command.
*/
uint16_t tunnel_dst_port_val;
- uint8_t unused_1[4];
+ uint8_t unused_0[4];
} hwrm_tunnel_dst_port_alloc_input_t, *phwrm_tunnel_dst_port_alloc_input_t;
/* hwrm_tunnel_dst_port_alloc_output (size:128b/16B) */
@@ -54063,8 +59613,8 @@ typedef struct hwrm_tunnel_dst_port_alloc_output {
/* The length of the response data in number of bytes. */
uint16_t resp_len;
/*
- * Identifier of a tunnel L4 destination port value. Only applies to tunnel
- * types that has l4 destination port parameters.
+ * Identifier of a tunnel L4 destination port value. Only applies to
+ * tunnel types that has l4 destination port parameters.
*/
uint16_t tunnel_dst_port_id;
/* Error information */
@@ -54075,7 +59625,9 @@ typedef struct hwrm_tunnel_dst_port_alloc_output {
#define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_ALLOCATED UINT32_C(0x1)
/* Out of resources error */
#define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_NO_RESOURCE UINT32_C(0x2)
- #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_LAST HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_NO_RESOURCE
+ /* Tunnel type is already enabled */
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_ENABLED UINT32_C(0x3)
+ #define HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_LAST HWRM_TUNNEL_DST_PORT_ALLOC_OUTPUT_ERROR_INFO_ERR_ENABLED
/*
* This field represents the UPAR usage status.
* Available UPARs on wh+ are UPAR0 and UPAR1
@@ -54102,9 +59654,9 @@ typedef struct hwrm_tunnel_dst_port_alloc_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -54149,33 +59701,64 @@ typedef struct hwrm_tunnel_dst_port_free_input {
/* Tunnel Type. */
uint8_t tunnel_type;
/* Virtual eXtensible Local Area Network (VXLAN) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN UINT32_C(0x1)
/* Generic Network Virtualization Encapsulation (Geneve) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE UINT32_C(0x5)
/* IPV4 over virtual eXtensible Local Area Network (IPV4oVXLAN) */
#define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_V4 UINT32_C(0x9)
- /* Enhance Generic Routing Encapsulation (GRE version 1) inside IP datagram payload */
+ /*
+ * Enhance Generic Routing Encapsulation (GRE version 1) inside IP
+ * datagram payload
+ */
#define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_IPGRE_V1 UINT32_C(0xa)
/* Use fixed layer 2 ether type of 0xFFFF */
#define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_L2_ETYPE UINT32_C(0xb)
- /* IPV6 over virtual eXtensible Local Area Network with GPE header (IPV6oVXLANGPE) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 UINT32_C(0xc)
- /* Custom GRE uses UPAR to parse customized GRE packets. This is not supported. */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_CUSTOM_GRE UINT32_C(0xd)
+ /*
+ * IPV6 over virtual eXtensible Local Area Network with GPE header
+ * (IPV6oVXLANGPE)
+ */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_GPE_V6 UINT32_C(0xc)
+ /*
+ * Custom GRE uses UPAR to parse customized GRE packets. This is not
+ * supported.
+ */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_CUSTOM_GRE UINT32_C(0xd)
/* Enhanced Common Packet Radio Interface (eCPRI) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ECPRI UINT32_C(0xe)
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ECPRI UINT32_C(0xe)
/* IPv6 Segment Routing (SRv6) */
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_SRV6 UINT32_C(0xf)
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_SRV6 UINT32_C(0xf)
/* Generic Protocol Extension for VXLAN (VXLAN-GPE) */
#define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_GPE UINT32_C(0x10)
- #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_LAST HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN_GPE
- uint8_t unused_0;
- /*
- * Identifier of a tunnel L4 destination port value. Only applies to tunnel
- * types that has l4 destination port parameters.
+ /* Generic Routing Encapsulation */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GRE UINT32_C(0x11)
+ /* ULP Dynamic UPAR tunnel */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR UINT32_C(0x12)
+ /* ULP Dynamic UPAR tunnel reserved 1 */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 UINT32_C(0x13)
+ /* ULP Dynamic UPAR tunnel reserved 2 */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 UINT32_C(0x14)
+ /* ULP Dynamic UPAR tunnel reserved 3 */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 UINT32_C(0x15)
+ /* ULP Dynamic UPAR tunnel reserved 4 */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 UINT32_C(0x16)
+ /* ULP Dynamic UPAR tunnel reserved 5 */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 UINT32_C(0x17)
+ /* ULP Dynamic UPAR tunnel reserved 6 */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 UINT32_C(0x18)
+ /* ULP Dynamic UPAR tunnel reserved 7 */
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 UINT32_C(0x19)
+ #define HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_LAST HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
+ /*
+ * This field is used to specify the next protocol value defined in the
+ * corresponding RFC spec for the applicable tunnel type.
+ */
+ uint8_t tunnel_next_proto;
+ /*
+ * Identifier of a tunnel L4 destination port value. Only applies to
+ * tunnel types that has l4 destination port parameters.
*/
uint16_t tunnel_dst_port_id;
- uint8_t unused_1[4];
+ uint8_t unused_0[4];
} hwrm_tunnel_dst_port_free_input_t, *phwrm_tunnel_dst_port_free_input_t;
/* hwrm_tunnel_dst_port_free_output (size:128b/16B) */
@@ -54201,9 +59784,9 @@ typedef struct hwrm_tunnel_dst_port_free_output {
uint8_t unused_1[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -54361,7 +59944,7 @@ typedef struct ctx_eng_stats {
***********************/
-/* hwrm_stat_ctx_alloc_input (size:256b/32B) */
+/* hwrm_stat_ctx_alloc_input (size:384b/48B) */
typedef struct hwrm_stat_ctx_alloc_input {
/* The HWRM command request type. */
@@ -54425,7 +60008,19 @@ typedef struct hwrm_stat_ctx_alloc_input {
* When this bit is set to '0', the statistics context shall be
* used for network traffic or engine traffic.
*/
- #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1)
+ #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE UINT32_C(0x1)
+ /*
+ * When this bit is set to '1', the PF is requesting a duplicate
+ * host buffer used for VF statistics. The stat_ctx_id and
+ * alloc_seq_id fields must be populated when this bit is set to
+ * '1'. The stat_ctx_id indicates the VF statistics context that
+ * should be copied to this host buffer. The stat_ctx_id and
+ * alloc_seq_id should be copied from the vf_stat_change event
+ * received by the PF. This bit can only be set for a PF. An error
+ * is returned if a VF sets this bit. This bit is only supported if
+ * vf_stat_ejection_supported is '1' in func_qcaps.
+ */
+ #define HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_DUP_HOST_BUF UINT32_C(0x2)
uint8_t unused_0;
/*
* This is the size of the structure (ctx_hw_stats or
@@ -54433,6 +60028,27 @@ typedef struct hwrm_stat_ctx_alloc_input {
* for the periodic DMA updates.
*/
uint16_t stats_dma_length;
+ uint16_t flags;
+ /* This stats context uses the steering tag specified in the command. */
+ #define HWRM_STAT_CTX_ALLOC_INPUT_FLAGS_STEERING_TAG_VALID UINT32_C(0x1)
+ /*
+ * Steering tag to use for memory transactions from the periodic DMA
+ * updates. 'steering_tag_valid' should be set and 'steering_tag'
+ * should be specified, when the 'steering_tag_supported' bit is set
+ * under the 'flags_ext2' field of the hwrm_func_qcaps_output.
+ */
+ uint16_t steering_tag;
+ /*
+ * Only valid when dup_host_buf is '1'. This value should be copied
+ * from the vf_stat_change event.
+ */
+ uint32_t stat_ctx_id;
+ /*
+ * Only valid when dup_host_buf is '1'. This value should be copied
+ * from the vf_stat_change event.
+ */
+ uint16_t alloc_seq_id;
+ uint8_t unused_1[6];
} hwrm_stat_ctx_alloc_input_t, *phwrm_stat_ctx_alloc_input_t;
/* hwrm_stat_ctx_alloc_output (size:128b/16B) */
@@ -54451,9 +60067,9 @@ typedef struct hwrm_stat_ctx_alloc_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -54516,9 +60132,9 @@ typedef struct hwrm_stat_ctx_free_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -54626,9 +60242,9 @@ typedef struct hwrm_stat_ctx_query_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -54740,9 +60356,9 @@ typedef struct hwrm_stat_ext_ctx_query_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -54844,9 +60460,9 @@ typedef struct hwrm_stat_ctx_eng_query_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -54907,9 +60523,9 @@ typedef struct hwrm_stat_ctx_clr_stats_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -54981,9 +60597,9 @@ typedef struct hwrm_pcie_qstats_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -55066,7 +60682,7 @@ typedef struct hwrm_stat_generic_qstats_input {
* The size of the generic statistics buffer passed in the
* generic_stat_host_addr in bytes.
* Firmware will not exceed this size when it DMAs the
- * statistics structure to the host. The actual DMA size
+ * statistics structure to the host. The actual DMA size
* will be returned in the response.
*/
uint16_t generic_stat_size;
@@ -55101,7 +60717,7 @@ typedef struct hwrm_stat_generic_qstats_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -55111,7 +60727,7 @@ typedef struct hwrm_stat_generic_qstats_output {
} hwrm_stat_generic_qstats_output_t, *phwrm_stat_generic_qstats_output_t;
/* Generic Statistic Format */
-/* generic_sw_hw_stats (size:1408b/176B) */
+/* generic_sw_hw_stats (size:1472b/184B) */
typedef struct generic_sw_hw_stats {
/*
@@ -55156,34 +60772,34 @@ typedef struct generic_sw_hw_stats {
/* Available completion flow control data credits. */
uint64_t pcie_credit_fc_cmpl_data_posted;
/*
- * Displays Time information of the longest completon time from any of
- * the 4 tags for the caller PF. The unit of time recorded is in
+ * Displays Time information of the longest completion time from any of
+ * the 4 tags for the caller PF. The unit of time recorded is in
* microseconds.
*/
uint64_t pcie_cmpl_longest;
/*
- * Displays Time information of the shortest completon time from any of
- * the 4 tags for the caller PF. The unit of time recorded is in
+ * Displays Time information of the shortest completion time from any
+ * of the 4 tags for the caller PF. The unit of time recorded is in
* microseconds.
*/
uint64_t pcie_cmpl_shortest;
/*
- * This field containts the total number of CFCQ 'misses' observed for
+ * This field contains the total number of CFCQ 'misses' observed for
* all the PF's.
*/
uint64_t cache_miss_count_cfcq;
/*
- * This field containts the total number of CFCS 'misses' observed for
+ * This field contains the total number of CFCS 'misses' observed for
* all the PF's.
*/
uint64_t cache_miss_count_cfcs;
/*
- * This field containts the total number of CFCC 'misses' observed for
+ * This field contains the total number of CFCC 'misses' observed for
* all the PF's.
*/
uint64_t cache_miss_count_cfcc;
/*
- * This field containts the total number of CFCM 'misses' observed
+ * This field contains the total number of CFCM 'misses' observed
* for all the PF's.
*/
uint64_t cache_miss_count_cfcm;
@@ -55205,8 +60821,107 @@ typedef struct generic_sw_hw_stats {
* the hardware based doorbell drop recovery feature.
*/
uint64_t hw_db_recov_dbs_recovered;
+ /*
+ * Total number of out of order doorbell messages dropped.
+ * This counter is only applicable for devices that support
+ * the hardware based doorbell drop recovery feature.
+ */
+ uint64_t hw_db_recov_oo_drop_count;
} generic_sw_hw_stats_t, *pgeneric_sw_hw_stats_t;
+/*****************************
+ * hwrm_stat_db_error_qstats *
+ *****************************/
+
+
+/* hwrm_stat_db_error_qstats_input (size:128b/16B) */
+
+typedef struct hwrm_stat_db_error_qstats_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} hwrm_stat_db_error_qstats_input_t, *phwrm_stat_db_error_qstats_input_t;
+
+/* hwrm_stat_db_error_qstats_output (size:320b/40B) */
+
+typedef struct hwrm_stat_db_error_qstats_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Specifies count of doorbells dropped due to RoCE SQs or L2
+ * Tx Rings being in invalid state.
+ */
+ uint32_t tx_db_drop_invalid_qp_state;
+ /*
+ * Specifies count of doorbells dropped due to RoCE RQs/SRQs or
+ * L2 Rx Rings being used in invalid state.
+ */
+ uint32_t rx_db_drop_invalid_rq_state;
+ /*
+ * Specifies count of doorbells dropped for any doorbell type
+ * due to formatting errors such as illegal doorbell message
+ * type, index out of range etc.
+ */
+ uint32_t tx_db_drop_format_error;
+ /*
+ * Specifies count of express mode doorbells dropped for any
+ * doorbell type due to error conditions such as DPI check,
+ * context load error etc.
+ */
+ uint32_t express_db_dropped_misc_error;
+ /*
+ * Specifies count of express mode doorbells dropped due to
+ * RoCE SQ overflow.
+ */
+ uint32_t express_db_dropped_sq_overflow;
+ /*
+ * Specifies count of express mode doorbells dropped due to
+ * RoCE RQ overflow.
+ */
+ uint32_t express_db_dropped_rq_overflow;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} hwrm_stat_db_error_qstats_output_t, *phwrm_stat_db_error_qstats_output_t;
+
/*****************
* hwrm_fw_reset *
*****************/
@@ -55254,8 +60969,8 @@ typedef struct hwrm_fw_reset_input {
/* RoCE control processor */
#define HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_ROCE UINT32_C(0x3)
/*
- * Host (in multi-host environment): This is only valid if requester is IPC.
- * Reinit host hardware resources and PCIe.
+ * Host (in multi-host environment): This is only valid if requester
+ * is IPC. Reinit host hardware resources and PCIe.
*/
#define HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_HOST UINT32_C(0x4)
/*
@@ -55266,17 +60981,17 @@ typedef struct hwrm_fw_reset_input {
/* Reset all blocks of the chip (including all processors) */
#define HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP UINT32_C(0x6)
/*
- * Host (in multi-host environment): This is only valid if requester is IPC.
- * Reinit host hardware resources.
+ * Host (in multi-host environment): This is only valid if requester
+ * is IPC. Reinit host hardware resources.
*/
#define HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_HOST_RESOURCE_REINIT UINT32_C(0x7)
/*
- * Activate firmware that has been programmed to NVM. The
+ * Activate firmware that has been programmed to NVM. The
* activation is done in an impactless manner as part of the scheme
* where hwrm_fw_state_backup precedes the call, and
- * hwrm_fw_state_restore follows it. Before this call returns, FW
+ * hwrm_fw_state_restore follows it. Before this call returns, FW
* status is set to a non-0x8000 value to disambiguate reset pending
- * from reset complete. The reset process begins after this call
+ * from reset complete. The reset process begins after this call
* returns to ensure this HWRM has completed before reset begins.
*/
#define HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_IMPACTLESS_ACTIVATION UINT32_C(0x8)
@@ -55301,8 +61016,9 @@ typedef struct hwrm_fw_reset_input {
uint8_t flags;
/*
* When this bit is '1', then the core firmware initiates
- * the reset only after graceful shut down of all registered instances.
- * If not, the device will continue with the existing firmware.
+ * the reset only after graceful shut down of all registered
+ * instances. If not, the device will continue with the existing
+ * firmware.
*/
#define HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL UINT32_C(0x1)
/*
@@ -55340,9 +61056,9 @@ typedef struct hwrm_fw_reset_output {
uint8_t unused_0[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -55394,9 +61110,15 @@ typedef struct hwrm_fw_qstatus_input {
#define HWRM_FW_QSTATUS_INPUT_EMBEDDED_PROC_TYPE_NETCTRL UINT32_C(0x2)
/* RoCE control processor */
#define HWRM_FW_QSTATUS_INPUT_EMBEDDED_PROC_TYPE_ROCE UINT32_C(0x3)
- /* Host (in multi-host environment): This is only valid if requester is IPC */
+ /*
+ * Host (in multi-host environment): This is only valid if requester
+ * is IPC
+ */
#define HWRM_FW_QSTATUS_INPUT_EMBEDDED_PROC_TYPE_HOST UINT32_C(0x4)
- /* AP processor complex (in multi-host environment). Use host_idx to control which core is reset */
+ /*
+ * AP processor complex (in multi-host environment). Use host_idx to
+ * control which core is reset
+ */
#define HWRM_FW_QSTATUS_INPUT_EMBEDDED_PROC_TYPE_AP UINT32_C(0x5)
/* Reset all blocks of the chip (including all processors) */
#define HWRM_FW_QSTATUS_INPUT_EMBEDDED_PROC_TYPE_CHIP UINT32_C(0x6)
@@ -55444,9 +61166,9 @@ typedef struct hwrm_fw_qstatus_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -55530,9 +61252,9 @@ typedef struct hwrm_fw_set_time_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -55615,9 +61337,9 @@ typedef struct hwrm_fw_get_time_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -55638,12 +61360,20 @@ typedef struct hwrm_struct_hdr {
#define HWRM_STRUCT_HDR_STRUCT_ID_DCBX_APP UINT32_C(0x421)
/* DCBX state configuration structured data ID for all DCBX features. */
#define HWRM_STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE UINT32_C(0x422)
- /* LLDP generic structured data ID. This is used with GET_STRUCTURED_DATA only. */
+ /*
+ * LLDP generic structured data ID. This is used with
+ * GET_STRUCTURED_DATA only.
+ */
#define HWRM_STRUCT_HDR_STRUCT_ID_LLDP_GENERIC UINT32_C(0x424)
- /* LLDP device structured data ID. This is used with GET_STRUCTURED_DATA only. */
+ /*
+ * LLDP device structured data ID. This is used with
+ * GET_STRUCTURED_DATA only.
+ */
#define HWRM_STRUCT_HDR_STRUCT_ID_LLDP_DEVICE UINT32_C(0x426)
/* Power Backup info */
#define HWRM_STRUCT_HDR_STRUCT_ID_POWER_BKUP UINT32_C(0x427)
+ /* Guest physical address to Host physical address mapping */
+ #define HWRM_STRUCT_HDR_STRUCT_ID_PEER_MMAP UINT32_C(0x429)
/* reserved for AFM usage. */
#define HWRM_STRUCT_HDR_STRUCT_ID_AFM_OPAQUE UINT32_C(0x1)
/* Port description. */
@@ -55662,9 +61392,9 @@ typedef struct hwrm_struct_hdr {
/* This value indicates the subtype. */
uint16_t subtype;
/*
- * This value indicates the count of 64-bit values that point to the next header.
- * A value of 0 means that this is the last element. The value is a count of 64-bit
- * words from the beginning of the current header.
+ * This value indicates the count of 64-bit values that point to the next
+ * header. A value of 0 means that this is the last element. The value is
+ * a count of 64-bit words from the beginning of the current header.
*/
uint16_t next_offset;
/* This value indicates this is the last element */
@@ -55676,8 +61406,9 @@ typedef struct hwrm_struct_hdr {
typedef struct hwrm_struct_data_dcbx_ets {
/*
- * This field indicates if this configuration is ETS recommendation or ETS configuration.
- * A value 1 means it is ETS configuration, A value of 2 means it is a ETS recommendation.
+ * This field indicates if this configuration is ETS recommendation or
+ * ETS configuration. A value 1 means it is ETS configuration, A value of
+ * 2 means it is a ETS recommendation.
*/
uint8_t destination;
/* ETS configuration */
@@ -55753,8 +61484,9 @@ typedef struct hwrm_struct_data_dcbx_ets {
typedef struct hwrm_struct_data_dcbx_pfc {
/*
- * This field indicates PFC priority bit map. A value of '0' indicates PFC
- * is disabled. A value of '1' indicates PFC is enabled on that priority.
+ * This field indicates PFC priority bit map. A value of '0' indicates
+ * PFC is disabled. A value of '1' indicates PFC is enabled on that
+ * priority.
*/
uint8_t pfc_priority_bitmap;
/*
@@ -55764,7 +61496,8 @@ typedef struct hwrm_struct_data_dcbx_pfc {
uint8_t max_pfc_tcs;
/*
* This field indicates if MACSec bypass capability is enabled. A value
- * of '1' indicates MBC is enabled. A value of '0' indicates MBC is disabled.
+ * of '1' indicates MBC is enabled. A value of '0' indicates MBC is
+ * disabled.
*/
uint8_t mbc;
uint8_t unused_0[5];
@@ -55826,7 +61559,10 @@ typedef struct hwrm_struct_data_dcbx_feature_state {
#define HWRM_STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_LAST HWRM_STRUCT_DATA_DCBX_FEATURE_STATE_APP_STATE_ADVERTISE_BIT_POS
/* unused. */
uint8_t unused[3];
- /* This field is used to reset the DCBX configuration to factory defaults. */
+ /*
+ * This field is used to reset the DCBX configuration to factory
+ * defaults.
+ */
uint8_t resets;
/* reset ETS configuration. */
#define HWRM_STRUCT_DATA_DCBX_FEATURE_STATE_RESETS_RESET_ETS UINT32_C(0x1)
@@ -55956,8 +61692,8 @@ typedef struct hwrm_struct_data_lldp_device {
typedef struct hwrm_struct_data_port_description {
/*
- * Port #. Port number starts at 0 and anything greater than number of ports
- * minus 1 is an error.
+ * Port #. Port number starts at 0 and anything greater than number of
+ * ports minus 1 is an error.
*/
uint8_t port_id;
uint8_t unused_0[7];
@@ -56021,9 +61757,9 @@ typedef struct hwrm_struct_data_power_information {
uint32_t bkup_power_info_ver;
/* Platform backup power count */
uint32_t platform_bkup_power_count;
- /* Load in milli Watt */
+ /* Load in milliwatts */
uint32_t load_milli_watt;
- /* Backup time in milli seconds */
+ /* Backup time in milliseconds */
uint32_t bkup_time_milli_seconds;
/* Backup power status */
uint32_t bkup_power_status;
@@ -56031,6 +61767,77 @@ typedef struct hwrm_struct_data_power_information {
uint32_t bkup_power_charge_time;
} hwrm_struct_data_power_information_t, *phwrm_struct_data_power_information_t;
+/*
+ * All mappings (upto 8) for a function will be sent down
+ * at the same time. If entries are sent down for the same
+ * function again, the existing saved entries will be
+ * overwritten.
+ */
+/* hwrm_struct_data_peer_mmap (size:1600b/200B) */
+
+typedef struct hwrm_struct_data_peer_mmap {
+ /*
+ * Target function ID for the mappings. The fid should
+ * be 0xffff for current PF or a valid VF fid for child
+ * VF of the current PF.
+ */
+ uint16_t fid;
+ /*
+ * Number of mappings for this function. The count has to
+ * be > 0 and <= 8. Maximum of 8 mappings are supported.
+ */
+ uint16_t count;
+ uint32_t unused_0;
+ /* Host Physical Address for mapping 0. */
+ uint64_t hpa_0;
+ /* Guest Physical Address for mapping 0. */
+ uint64_t gpa_0;
+ /* Size in Kilobytes for mapping 0. */
+ uint64_t size_0;
+ /* Host Physical Address for mapping 1. */
+ uint64_t hpa_1;
+ /* Guest Physical Address for mapping 1. */
+ uint64_t gpa_1;
+ /* Size in Kilobytes for mapping 1. */
+ uint64_t size_1;
+ /* Host Physical Address for mapping 2. */
+ uint64_t hpa_2;
+ /* Guest Physical Address for mapping 2. */
+ uint64_t gpa_2;
+ /* Size in Kilobytes for mapping 2. */
+ uint64_t size_2;
+ /* Host Physical Address for mapping 3. */
+ uint64_t hpa_3;
+ /* Guest Physical Address for mapping 3. */
+ uint64_t gpa_3;
+ /* Size in Kilobytes for mapping 3. */
+ uint64_t size_3;
+ /* Host Physical Address for mapping 4. */
+ uint64_t hpa_4;
+ /* Guest Physical Address for mapping 4. */
+ uint64_t gpa_4;
+ /* Size in Kilobytes for mapping 4. */
+ uint64_t size_4;
+ /* Host Physical Address for mapping 5. */
+ uint64_t hpa_5;
+ /* Guest Physical Address for mapping 5. */
+ uint64_t gpa_5;
+ /* Size in Kilobytes for mapping 5. */
+ uint64_t size_5;
+ /* Host Physical Address for mapping 6. */
+ uint64_t hpa_6;
+ /* Guest Physical Address for mapping 6. */
+ uint64_t gpa_6;
+ /* Size in Kilobytes for mapping 6. */
+ uint64_t size_6;
+ /* Host Physical Address for mapping 7. */
+ uint64_t hpa_7;
+ /* Guest Physical Address for mapping 7. */
+ uint64_t gpa_7;
+ /* Size in Kilobytes for mapping 7. */
+ uint64_t size_7;
+} hwrm_struct_data_peer_mmap_t, *phwrm_struct_data_peer_mmap_t;
+
/* hwrm_struct_data_msix_per_vf (size:320b/40B) */
typedef struct hwrm_struct_data_msix_per_vf {
@@ -56138,9 +61945,9 @@ typedef struct hwrm_fw_set_structured_data_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -56210,21 +62017,23 @@ typedef struct hwrm_fw_get_structured_data_input {
/* size of data in bytes */
uint16_t data_len;
/*
- * Structure_id is the id of the structure data requesting and count is a
- * requested number of instances of this data requested. The actual number
- * will be returned in count_of_headers
+ * Structure_id is the id of the structure data requesting and count is
+ * a requested number of instances of this data requested. The actual
+ * number will be returned in count_of_headers
*/
uint16_t structure_id;
/*
- * Subtype is an optional field used to specify additional information of the data
- * being retrieved. For example, if data can be categorized as "live" vs "saved"
- * then this field can be used to provide an indication of "saved" vs "live" data.
- * Not all structured data supports subtypes and if they are supported then the
- * structured data will specify the valid values. If structured data is requested
- * that supports subtypes but no subtype is given then it is implementation specific
- * what will be returned. Some structure data can support a subtype of "All" which
- * would cause a list of structures to be returned for all supported subtypes. "All"
- * is only used on the hwrm_get_structured_data command.
+ * Subtype is an optional field used to specify additional information
+ * of the data being retrieved. For example, if data can be categorized
+ * as "live" vs "saved" then this field can be used to provide an
+ * indication of "saved" vs "live" data. Not all structured data
+ * supports subtypes and if they are supported then the structured data
+ * will specify the valid values. If structured data is requested that
+ * supports subtypes but no subtype is given then it is implementation
+ * specific what will be returned. Some structure data can support a
+ * subtype of "All" which would cause a list of structures to be
+ * returned for all supported subtypes. "All" is only used on the
+ * hwrm_get_structured_data command.
*/
uint16_t subtype;
#define HWRM_FW_GET_STRUCTURED_DATA_INPUT_SUBTYPE_UNUSED UINT32_C(0x0)
@@ -56237,7 +62046,7 @@ typedef struct hwrm_fw_get_structured_data_input {
#define HWRM_FW_GET_STRUCTURED_DATA_INPUT_SUBTYPE_NON_TPMR_OPERATIONAL UINT32_C(0x202)
#define HWRM_FW_GET_STRUCTURED_DATA_INPUT_SUBTYPE_HOST_OPERATIONAL UINT32_C(0x300)
#define HWRM_FW_GET_STRUCTURED_DATA_INPUT_SUBTYPE_LAST HWRM_FW_GET_STRUCTURED_DATA_INPUT_SUBTYPE_HOST_OPERATIONAL
- /* Number of elements. This allows support of arrayed data */
+ /* Number of elements. This allows support of arrayed data */
uint8_t count;
uint8_t unused_0;
} hwrm_fw_get_structured_data_input_t, *phwrm_fw_get_structured_data_input_t;
@@ -56261,9 +62070,9 @@ typedef struct hwrm_fw_get_structured_data_output {
uint8_t unused_0[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -56345,10 +62154,12 @@ typedef struct hwrm_fw_ipc_msg_input {
/* Command ID */
uint16_t command_id;
/* RoCE LAG message */
- #define HWRM_FW_IPC_MSG_INPUT_COMMAND_ID_ROCE_LAG UINT32_C(0x1)
+ #define HWRM_FW_IPC_MSG_INPUT_COMMAND_ID_ROCE_LAG UINT32_C(0x1)
/* Query information on PF mapping for x86 and MAIA. */
- #define HWRM_FW_IPC_MSG_INPUT_COMMAND_ID_MHB_HOST UINT32_C(0x2)
- #define HWRM_FW_IPC_MSG_INPUT_COMMAND_ID_LAST HWRM_FW_IPC_MSG_INPUT_COMMAND_ID_MHB_HOST
+ #define HWRM_FW_IPC_MSG_INPUT_COMMAND_ID_MHB_HOST UINT32_C(0x2)
+ /* RoCE driver version details to be sent to chimp */
+ #define HWRM_FW_IPC_MSG_INPUT_COMMAND_ID_ROCE_DRVR_VERSION UINT32_C(0x3)
+ #define HWRM_FW_IPC_MSG_INPUT_COMMAND_ID_LAST HWRM_FW_IPC_MSG_INPUT_COMMAND_ID_ROCE_DRVR_VERSION
/* Source processor for this command. */
uint8_t src_processor;
/* Chimp processor */
@@ -56387,9 +62198,9 @@ typedef struct hwrm_fw_ipc_msg_output {
uint8_t reserved48[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -56459,9 +62270,9 @@ typedef struct hwrm_fw_ipc_mailbox_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -56549,9 +62360,9 @@ typedef struct hwrm_fw_ecn_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -56612,9 +62423,9 @@ typedef struct hwrm_fw_ecn_qcfg_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -56737,12 +62548,42 @@ typedef struct hwrm_fw_health_check_output {
* or '1' if they do not match.
*/
#define HWRM_FW_HEALTH_CHECK_OUTPUT_FW_STATUS_FRU_MISMATCH UINT32_C(0x1000)
+ /*
+ * This bit is '0' if the primary CRT2 was used this boot,
+ * or '1' if the secondary CRT2 was used.
+ */
+ #define HWRM_FW_HEALTH_CHECK_OUTPUT_FW_STATUS_CRT2_BOOTED UINT32_C(0x2000)
+ /*
+ * This bit is '0' if the primary and secondary CRT2 images
+ * match, or '1' if they do not match.
+ */
+ #define HWRM_FW_HEALTH_CHECK_OUTPUT_FW_STATUS_CRT2_MISMATCH UINT32_C(0x4000)
+ /*
+ * This bit is '0' if the primary GXRT was used this boot,
+ * or '1' if the secondary GXRT was used.
+ */
+ #define HWRM_FW_HEALTH_CHECK_OUTPUT_FW_STATUS_GXRT_BOOTED UINT32_C(0x8000)
+ /*
+ * This bit is '0' if the primary and secondary GXRT images
+ * match, or '1' if they do not match.
+ */
+ #define HWRM_FW_HEALTH_CHECK_OUTPUT_FW_STATUS_GXRT_MISMATCH UINT32_C(0x10000)
+ /*
+ * This bit is '0' if the primary SRT2 was used this boot,
+ * or '1' if the secondary SRT2 was used.
+ */
+ #define HWRM_FW_HEALTH_CHECK_OUTPUT_FW_STATUS_SRT2_BOOTED UINT32_C(0x20000)
+ /*
+ * This bit is '0' if the primary and secondary SRT2 images
+ * match, or '1' if they do not match.
+ */
+ #define HWRM_FW_HEALTH_CHECK_OUTPUT_FW_STATUS_SRT2_MISMATCH UINT32_C(0x40000)
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -56807,12 +62648,12 @@ typedef struct hwrm_fw_livepatch_query_output {
uint16_t resp_len;
/*
* This field represents the patch version string of the NVM installed
- * livepatch. (ASCII chars with NULL at the end).
+ * livepatch. (ASCII chars with NULL at the end).
*/
char install_ver[32];
/*
* This field represents the patch version string of the active
- * livepatch. (ASCII chars with NULL at the end).
+ * livepatch. (ASCII chars with NULL at the end).
*/
char active_ver[32];
uint16_t status_flags;
@@ -56823,9 +62664,9 @@ typedef struct hwrm_fw_livepatch_query_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -56871,7 +62712,7 @@ typedef struct hwrm_fw_livepatch_input {
uint8_t opcode;
/*
* Activate a livepatch that is NVM installed or via direct load
- * from host memory. Activate will authenticate a signed patch,
+ * from host memory. Activate will authenticate a signed patch,
* verify the patch version for compatibility and apply the
* livepatch to existing firmware at run-time.
*/
@@ -56894,7 +62735,7 @@ typedef struct hwrm_fw_livepatch_input {
/* Load a livepatch currently installed on NVM. */
#define HWRM_FW_LIVEPATCH_INPUT_LOADTYPE_NVM_INSTALL UINT32_C(0x1)
/*
- * Load a livepatch directly from host memory. The livepatch image
+ * Load a livepatch directly from host memory. The livepatch image
* is available at host_addr.
*/
#define HWRM_FW_LIVEPATCH_INPUT_LOADTYPE_MEMORY_DIRECT UINT32_C(0x2)
@@ -56921,9 +62762,9 @@ typedef struct hwrm_fw_livepatch_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -56947,16 +62788,16 @@ typedef struct hwrm_fw_livepatch_cmd_err {
#define HWRM_FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED UINT32_C(0x3)
/* Livepatch image is not installed in NVRAM. */
#define HWRM_FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED UINT32_C(0x4)
- /* Deactivate failed. Firmware is not currently patched. */
+ /* Deactivate failed. Firmware is not currently patched. */
#define HWRM_FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED UINT32_C(0x5)
/* Authentication of a signed livepatch failed. */
#define HWRM_FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL UINT32_C(0x6)
- /* Livepatch header check failed. Patch incompatible. */
+ /* Livepatch header check failed. Patch incompatible. */
#define HWRM_FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER UINT32_C(0x7)
/* Livepatch size incompatible. */
#define HWRM_FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE UINT32_C(0x8)
/*
- * Activate failed. Firmware has already been patched. Deactivate
+ * Activate failed. Firmware has already been patched. Deactivate
* existing livepatch before proceeding.
*/
#define HWRM_FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED UINT32_C(0x9)
@@ -57047,11 +62888,32 @@ typedef struct hwrm_fw_sync_input {
*/
#define HWRM_FW_SYNC_INPUT_SYNC_ACTION_SYNC_FRU UINT32_C(0x40)
/*
+ * If action is '1' (sync) and this bit is set, the CRT2
+ * images will be synchronized, copying from the instance
+ * used for boot to the other instance, if they currently
+ * do not match.
+ */
+ #define HWRM_FW_SYNC_INPUT_SYNC_ACTION_SYNC_CRT2 UINT32_C(0x80)
+ /*
+ * If action is '1' (sync) and this bit is set, the GXRT
+ * images will be synchronized, copying from the instance
+ * used for boot to the other instance, if they currently
+ * do not match.
+ */
+ #define HWRM_FW_SYNC_INPUT_SYNC_ACTION_SYNC_GXRT UINT32_C(0x100)
+ /*
+ * If action is '1' (sync) and this bit is set, the SRT2
+ * images will be synchronized, copying from the instance
+ * used for boot to the other instance, if they currently
+ * do not match.
+ */
+ #define HWRM_FW_SYNC_INPUT_SYNC_ACTION_SYNC_SRT2 UINT32_C(0x200)
+ /*
* A value of '1' instructs the firmware to perform an image
* synchronization of the firmware types denoted by the
- * sync_sbi, sync_srt, sync_crt bits. A value of '0' just
- * requests the status for the previously requested sync
- * operation.
+ * sync_sbi, sync_srt, sync_crt, sync_crt2, sync_srt2 bits.
+ * A value of '0' just requests the status for the previously
+ * requested sync operation.
*/
#define HWRM_FW_SYNC_INPUT_SYNC_ACTION_ACTION UINT32_C(0x80000000)
uint8_t unused_0[4];
@@ -57085,7 +62947,7 @@ typedef struct hwrm_fw_sync_output {
#define HWRM_FW_SYNC_OUTPUT_SYNC_STATUS_ERR_CODE_GENERAL UINT32_C(0x3)
#define HWRM_FW_SYNC_OUTPUT_SYNC_STATUS_ERR_CODE_LAST HWRM_FW_SYNC_OUTPUT_SYNC_STATUS_ERR_CODE_GENERAL
/*
- * This bit is '1' if the syncronization request has completed
+ * This bit is '1' if the synchronization request has completed
* with an error; the 'err_code' field can be used to obtain
* information about error type.
*/
@@ -57093,7 +62955,7 @@ typedef struct hwrm_fw_sync_output {
/*
* This bit is '0' if the previously requested synchronization
* command is still in progress, or '1' if the previously
- * requested sync command has completed. If '1', the 'sync_err'
+ * requested sync command has completed. If '1', the 'sync_err'
* field will indicate if it completed successfully or with
* an error.
*/
@@ -57101,9 +62963,9 @@ typedef struct hwrm_fw_sync_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -57160,7 +63022,7 @@ typedef struct hwrm_fw_state_qcaps_output {
uint16_t resp_len;
/*
* This field indicates the size in bytes required by host backup
- * memory. Host software should allocate memory according to this
+ * memory. Host software should allocate memory according to this
* size requirement and pass the allocated memory to the
* HWRM_FW_STATE_BACKUP and HWRM_FW_STATE_RESTORE commands in the form
* of PBL data as specified in those commands.
@@ -57193,7 +63055,7 @@ typedef struct hwrm_fw_state_qcaps_output {
uint32_t fw_status_blackout;
/*
* This field indicates a max time for firmware to poll for status
- * 0x8000 before assuming a reset failure occurred. This time does
+ * 0x8000 before assuming a reset failure occurred. This time does
* not include fw_status_blackout time which would immediately precede
* this wait.
*/
@@ -57202,9 +63064,9 @@ typedef struct hwrm_fw_state_qcaps_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -57350,9 +63212,9 @@ typedef struct hwrm_fw_state_unquiesce_output {
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -57459,7 +63321,7 @@ typedef struct hwrm_fw_state_backup_output {
* This bit is '0' if the backout was done in a way that firmware
* may continue running normally after the backup, for example if
* the host elects to skip the subsequent reset and restore for any
- * reason. A value of '1' indicates the act of backing up has left
+ * reason. A value of '1' indicates the act of backing up has left
* the firmware/device in a state where subsequent reset is
* required, for example of probing state of a queue leaves changes
* state in a way that is detectable by users.
@@ -57471,9 +63333,9 @@ typedef struct hwrm_fw_state_backup_output {
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -57581,7 +63443,7 @@ typedef struct hwrm_fw_state_restore_output {
/*
* If a failure occurs (complete is 0), restore attempts to
* completely roll back any state applied so that the failure
- * results in no state change. This flag indicates whether that
+ * results in no state change. This flag indicates whether that
* rollback completed successfully and thoroughly.
*/
#define HWRM_FW_STATE_RESTORE_OUTPUT_RESTORE_STATUS_FAILURE_ROLLBACK_COMPLETED UINT32_C(0x40000000)
@@ -57590,9 +63452,9 @@ typedef struct hwrm_fw_state_restore_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -57717,7 +63579,7 @@ typedef struct hwrm_fw_secure_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -57794,9 +63656,9 @@ typedef struct hwrm_exec_fwd_resp_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -57870,9 +63732,9 @@ typedef struct hwrm_reject_fwd_resp_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -57924,7 +63786,7 @@ typedef struct hwrm_fwd_resp_input {
uint16_t encap_resp_target_id;
/*
* This value indicates the completion ring the encapsulated
- * response will be optionally completed on. If the value is
+ * response will be optionally completed on. If the value is
* -1, then no CR completion shall be generated for the
* encapsulated response. Any other value must be a
* valid CR ring_id value. If a valid encap_resp_cmpl_ring
@@ -57961,9 +63823,9 @@ typedef struct hwrm_fwd_resp_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -58033,9 +63895,9 @@ typedef struct hwrm_fwd_async_event_cmpl_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -58079,7 +63941,7 @@ typedef struct hwrm_temp_monitor_query_input {
uint64_t resp_addr;
} hwrm_temp_monitor_query_input_t, *phwrm_temp_monitor_query_input_t;
-/* hwrm_temp_monitor_query_output (size:128b/16B) */
+/* hwrm_temp_monitor_query_output (size:192b/24B) */
typedef struct hwrm_temp_monitor_query_output {
/* The specific error status for the command. */
@@ -58131,6 +63993,11 @@ typedef struct hwrm_temp_monitor_query_output {
*/
#define HWRM_TEMP_MONITOR_QUERY_OUTPUT_FLAGS_EXT_TEMP_FIELDS_AVAILABLE UINT32_C(0x10)
/*
+ * "1" in this bit indicates the thermal threshold values are
+ * available.
+ */
+ #define HWRM_TEMP_MONITOR_QUERY_OUTPUT_FLAGS_THRESHOLD_VALUES_AVAILABLE UINT32_C(0x20)
+ /*
* This field encodes the current device temperature in Celsius.
* This field is unsigned and the value range of 0 to 255 is used to
* indicate a temperature range from -64 to +191. The actual
@@ -58158,10 +64025,33 @@ typedef struct hwrm_temp_monitor_query_output {
*/
uint8_t om_temp2;
/*
+ * This field reports the device's threshold value for reporting
+ * a warning indication. The temperature is reported in Celsius.
+ */
+ uint8_t warn_threshold;
+ /*
+ * This field reports the device's threshold value for reporting
+ * a critical indication. The temperature is reported in Celsius.
+ */
+ uint8_t critical_threshold;
+ /*
+ * This field reports the device's threshold value for reporting
+ * a fatal indication. The temperature is reported in Celsius.
+ */
+ uint8_t fatal_threshold;
+ /*
+ * This field reports the threshold value at which the device will
+ * a perform a self shutdown. The temperature is reported in Celsius.
+ * If the value is zero, then that indicates self shutdown is not
+ * configured.
+ */
+ uint8_t shutdown_threshold;
+ uint8_t unused_0[4];
+ /*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -58235,9 +64125,9 @@ typedef struct hwrm_reg_power_query_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -58297,9 +64187,9 @@ typedef struct hwrm_core_frequency_query_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -58413,9 +64303,9 @@ typedef struct hwrm_reg_power_histogram_output {
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -58625,9 +64515,9 @@ typedef struct hwrm_wol_filter_alloc_output {
uint8_t unused_0[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -58708,9 +64598,9 @@ typedef struct hwrm_wol_filter_free_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -58869,9 +64759,9 @@ typedef struct hwrm_wol_filter_qcfg_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -58963,9 +64853,9 @@ typedef struct hwrm_wol_reason_qcfg_output {
uint8_t unused_0[4];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -59009,7 +64899,7 @@ typedef struct hwrm_dbg_read_direct_input {
uint64_t resp_addr;
/*
* host address where the data content will be written
- * when the request is complete. This area must be 16B aligned.
+ * when the request is complete. This area must be 16B aligned.
*/
uint64_t host_dest_addr;
/* address(in ChiMP view) to start reading */
@@ -59038,9 +64928,9 @@ typedef struct hwrm_dbg_read_direct_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -59104,9 +64994,9 @@ typedef struct hwrm_dbg_write_direct_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -59150,7 +65040,7 @@ typedef struct hwrm_dbg_read_indirect_input {
uint64_t resp_addr;
/*
* host address where the data content will be written
- * when the request is complete. This area must be 16B aligned.
+ * when the request is complete. This area must be 16B aligned.
*/
uint64_t host_dest_addr;
/* Length of host buffer used for transferring debug data. */
@@ -59241,9 +65131,9 @@ typedef struct hwrm_dbg_read_indirect_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -59362,9 +65252,9 @@ typedef struct hwrm_dbg_write_indirect_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -59450,9 +65340,9 @@ typedef struct hwrm_dbg_dump_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -59496,7 +65386,15 @@ typedef struct hwrm_dbg_erase_nvm_input {
uint64_t resp_addr;
uint16_t flags;
/* If set to 1, then erase all locations in persistent storage. */
- #define HWRM_DBG_ERASE_NVM_INPUT_FLAGS_ERASE_ALL UINT32_C(0x1)
+ #define HWRM_DBG_ERASE_NVM_INPUT_FLAGS_ERASE_ALL UINT32_C(0x1)
+ /*
+ * This bit is only used when external secure SoC is used for
+ * Secure boot. This bit is utilized to differentiate between
+ * erase for NIC or Security SoC non-volatile storage on the
+ * device. If this bit is set, then erases all locations in the
+ * persistent storage of the secure SoC non-volatile storage device.
+ */
+ #define HWRM_DBG_ERASE_NVM_INPUT_FLAGS_SECURITY_SOC_NVM UINT32_C(0x2)
uint8_t unused_0[6];
} hwrm_dbg_erase_nvm_input_t, *phwrm_dbg_erase_nvm_input_t;
@@ -59514,9 +65412,9 @@ typedef struct hwrm_dbg_erase_nvm_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -59588,7 +65486,7 @@ typedef struct hwrm_dbg_cfg_input {
/*
* If set to 1, firmware is allowed to be unresponsive to heartbeat
* health checks, allowing for JTAG debugging scenarios where the
- * debugger has the firmware processes stopped indefinitely. This
+ * debugger has the firmware processes stopped indefinitely. This
* flag has effect only on debug builds of firmware.
*/
#define HWRM_DBG_CFG_INPUT_FLAGS_JTAG_DEBUG UINT32_C(0x20)
@@ -59615,9 +65513,9 @@ typedef struct hwrm_dbg_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -59760,9 +65658,9 @@ typedef struct hwrm_dbg_crashdump_header_output {
uint8_t unused_2[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -59838,9 +65736,9 @@ typedef struct hwrm_dbg_crashdump_erase_output {
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -59903,7 +65801,7 @@ typedef struct hwrm_dbg_qcaps_output {
/* The length of the response data in number of bytes. */
uint16_t resp_len;
/*
- * FID value. This value is used to identify operations on the PCI
+ * FID value. This value is used to identify operations on the PCI
* bus as belonging to a particular PCI function.
*/
uint16_t fid;
@@ -59920,19 +65818,39 @@ typedef struct hwrm_dbg_qcaps_output {
#define HWRM_DBG_QCAPS_OUTPUT_COREDUMP_COMPONENT_DISABLE_CAPS_NVRAM UINT32_C(0x1)
uint32_t flags;
/* If 1, FW supports writing a crashdump to NVM. */
- #define HWRM_DBG_QCAPS_OUTPUT_FLAGS_CRASHDUMP_NVM UINT32_C(0x1)
+ #define HWRM_DBG_QCAPS_OUTPUT_FLAGS_CRASHDUMP_NVM UINT32_C(0x1)
/* If 1, FW supports writing a crashdump to host ddr. */
#define HWRM_DBG_QCAPS_OUTPUT_FLAGS_CRASHDUMP_HOST_DDR UINT32_C(0x2)
/* If 1, FW supports writing a crashdump to soc ddr. */
#define HWRM_DBG_QCAPS_OUTPUT_FLAGS_CRASHDUMP_SOC_DDR UINT32_C(0x4)
/* If 1, FW supports USEQ operations */
- #define HWRM_DBG_QCAPS_OUTPUT_FLAGS_USEQ UINT32_C(0x8)
+ #define HWRM_DBG_QCAPS_OUTPUT_FLAGS_USEQ UINT32_C(0x8)
+ /*
+ * If 1, FW supports writing a coredump to host ddr.
+ * The driver instance can allocate the Host memory to
+ * capture coredump.
+ */
+ #define HWRM_DBG_QCAPS_OUTPUT_FLAGS_COREDUMP_HOST_DDR UINT32_C(0x10)
+ /*
+ * If 1, FW supports HWRM_DBG_COREDUMP_CAPTURE command to collect the
+ * coredump into the Host memory address. The driver instance can
+ * invoke the command to collect coredump upon any fatal event.
+ * Tx timeout is an example scenario.
+ */
+ #define HWRM_DBG_QCAPS_OUTPUT_FLAGS_COREDUMP_HOST_CAPTURE UINT32_C(0x20)
+ /*
+ * If 1, FW supports the PTrace capability.PTrace(PEX Trace Capture)
+ * provides a means for capturing and buffering PCIe TLPs, DLLPs and
+ * ordered sets following in both directions through a PEX station.
+ * This capability is advertised only on PF's.
+ */
+ #define HWRM_DBG_QCAPS_OUTPUT_FLAGS_PTRACE UINT32_C(0x40)
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -60018,13 +65936,13 @@ typedef struct hwrm_dbg_qcfg_output {
/* The length of the response data in number of bytes. */
uint16_t resp_len;
/*
- * FID value. This value is used to identify operations on the PCI
+ * FID value. This value is used to identify operations on the PCI
* bus as belonging to a particular PCI function.
*/
uint16_t fid;
uint8_t unused_0[2];
/*
- * Size in bytes of a coredump file created by the FW. This takes into
+ * Size in bytes of a coredump file created by the FW. This takes into
* consideration any components selected in the
* coredump_component_disable_flags field from hwrm_dbg_qcfg_input.
*/
@@ -60046,7 +65964,7 @@ typedef struct hwrm_dbg_qcfg_output {
*/
#define HWRM_DBG_QCFG_OUTPUT_FLAGS_FW_TRACE UINT32_C(0x4)
/*
- * If set to 1, then completion ring logging is enabled for the
+ * If set to 1, then completion ring logging is enabled for the
* secondary firmware. Disabled otherwise.
*/
#define HWRM_DBG_QCFG_OUTPUT_FLAGS_FW_TRACE_SECONDARY UINT32_C(0x8)
@@ -60058,7 +65976,7 @@ typedef struct hwrm_dbg_qcfg_output {
/*
* If set to 1, firmware is allowed to be unresponsive to heartbeat
* health checks, allowing for JTAG debugging scenarios where the
- * debugger has the firmware processes stopped indefinitely. This
+ * debugger has the firmware processes stopped indefinitely. This
* flag has effect only on debug builds of firmware.
*/
#define HWRM_DBG_QCFG_OUTPUT_FLAGS_JTAG_DEBUG UINT32_C(0x20)
@@ -60070,16 +65988,16 @@ typedef struct hwrm_dbg_qcfg_output {
uint16_t async_cmpl_ring;
uint8_t unused_2[2];
/*
- * Size in bytes of a crashdump file created by the FW. Uses input
+ * Size in bytes of a crashdump file created by the FW. Uses input
* flags to determine medium destination and corresponding size.
*/
uint32_t crashdump_size;
uint8_t unused_3[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -60188,9 +66106,9 @@ typedef struct hwrm_dbg_crashdump_medium_cfg_output {
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -60267,7 +66185,7 @@ typedef struct hwrm_dbg_coredump_list_input {
uint64_t resp_addr;
/*
* host address where the data content will be written
- * when the request is complete. This area must be 16B aligned.
+ * when the request is complete. This area must be 16B aligned.
*/
uint64_t host_dest_addr;
/* Length of host buffer used for transferring debug data. */
@@ -60309,9 +66227,9 @@ typedef struct hwrm_dbg_coredump_list_output {
uint8_t unused_1;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -60364,8 +66282,18 @@ typedef struct hwrm_dbg_coredump_initiate_input {
/*
* bit 0: live data
* bit 1: crashed data
+ * bit 2: collect context l1 cache
*/
uint8_t seg_flags;
+ /* Not Used. */
+ #define HWRM_DBG_COREDUMP_INITIATE_INPUT_SEG_FLAGS_LIVE_DATA UINT32_C(0x1)
+ /* Not Used. */
+ #define HWRM_DBG_COREDUMP_INITIATE_INPUT_SEG_FLAGS_CRASH_DATA UINT32_C(0x2)
+ /*
+ * If this bit is set, this setting will enforce firmware to collect
+ * CFCx l1 cache.
+ */
+ #define HWRM_DBG_COREDUMP_INITIATE_INPUT_SEG_FLAGS_COLLECT_CTX_L1_CACHE UINT32_C(0x4)
/* Not used. */
uint8_t unused_1[7];
} hwrm_dbg_coredump_initiate_input_t, *phwrm_dbg_coredump_initiate_input_t;
@@ -60384,9 +66312,9 @@ typedef struct hwrm_dbg_coredump_initiate_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -60452,7 +66380,7 @@ typedef struct hwrm_dbg_coredump_retrieve_input {
uint64_t resp_addr;
/*
* host address where the data content will be written
- * when the request is complete. This area must be 16B aligned.
+ * when the request is complete. This area must be 16B aligned.
*/
uint64_t host_dest_addr;
/* Length of host buffer used for transferring debug data. */
@@ -60504,9 +66432,9 @@ typedef struct hwrm_dbg_coredump_retrieve_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -60550,13 +66478,14 @@ typedef struct hwrm_dbg_i2c_cmd_input {
uint64_t resp_addr;
/*
* host address where the data content will be read or written.
- * For master write, data content will be read from host memory and write
- * to i2c slave. (size defined by write_size)
- * For master read, data content will be read from i2c slave and write to
- * the host memory. (size defined by read_size)
- * For master write/read, data content will be first read from host memory
- * and write to i2c slave. (size defined by write_size) then data read from
- * i2c slave will be written back to the same host memory. (size defined by read_size)
+ * For master write, data content will be read from host memory and
+ * write to i2c slave. (size defined by write_size)
+ * For master read, data content will be read from i2c slave and write
+ * to the host memory. (size defined by read_size)
+ * For master write/read, data content will be first read from host
+ * memory and write to i2c slave. (size defined by write_size) then
+ * data read from i2c slave will be written back to the same host
+ * memory. (size defined by read_size)
*/
uint64_t host_dest_addr;
/* read size in bytes, valid only for master read and write/read */
@@ -60564,7 +66493,8 @@ typedef struct hwrm_dbg_i2c_cmd_input {
/* write size in bytes, valid only for master write and write/read */
uint16_t write_size;
/*
- * instance of i2c channel for this operation. Valid if multiple instances
+ * instance of i2c channel for this operation. Valid if multiple
+ * instances
* of i2c channels are connected to external i2c devices.
*/
uint8_t chnl_id;
@@ -60607,9 +66537,9 @@ typedef struct hwrm_dbg_i2c_cmd_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -60681,9 +66611,9 @@ typedef struct hwrm_dbg_fw_cli_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -60761,12 +66691,18 @@ typedef struct hwrm_dbg_ring_info_get_output {
* Not valid for other ring types.
*/
uint32_t cag_vector_ctrl;
- uint8_t unused_0[3];
+ /*
+ * Steering Tag. The current value of the steering tag for the ring.
+ * The steering tag is only valid if it is advertised by Firmware in
+ * flags_ext2.steering_tag_supported of hwrm_func_qcaps response.
+ */
+ uint16_t st_tag;
+ uint8_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -60845,9 +66781,9 @@ typedef struct hwrm_dbg_drv_trace_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -60889,9 +66825,15 @@ typedef struct hwrm_dbg_useq_alloc_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- /* Number size of the allocation, in bytes, for the USEQ in the code words array */
+ /*
+ * Number size of the allocation, in bytes, for the USEQ in the code
+ * words array
+ */
uint32_t size;
- /* Number of bytes executing the USEQ will produce. Must be a multiple of 4 */
+ /*
+ * Number of bytes executing the USEQ will produce. Must be a multiple
+ * of 4
+ */
uint16_t output_bytes;
/* This field is reserved */
uint16_t unused_0;
@@ -60931,9 +66873,9 @@ typedef struct hwrm_dbg_useq_alloc_output {
uint16_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint32_t valid;
@@ -61013,9 +66955,9 @@ typedef struct hwrm_dbg_useq_free_output {
uint32_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint32_t valid;
@@ -61100,9 +67042,9 @@ typedef struct hwrm_dbg_useq_flush_output {
uint32_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint32_t valid;
@@ -61149,9 +67091,9 @@ typedef struct hwrm_dbg_useq_cw_cfg_input {
/*
* The code words given in this message will be placed
* at this offset from the starting code word for this
- * usid. NOTE: when offset is zero, the first 6 32-bit
+ * usid. NOTE: when offset is zero, the first 6 32-bit
* words may contain values for F0-F7 as well as the
- * main code word index. This is determined by checking
+ * main code word index. This is determined by checking
* the usid_ctrl_present flag.
*/
uint16_t offset;
@@ -61167,14 +67109,14 @@ typedef struct hwrm_dbg_useq_cw_cfg_input {
uint16_t flags;
/*
* When set, the opaque data begins with a block of control
- * information to be associated with the usid. This includes
+ * information to be associated with the usid. This includes
* F0-F7 code word indexes as well as the code word index for
* main.
*/
#define HWRM_DBG_USEQ_CW_CFG_INPUT_FLAGS_USID_CTRL_PRESENT UINT32_C(0x1)
/*
* When set, opaque contains a 64b host address used to DMA
- * the entire code word sequence. The offset within the
+ * the entire code word sequence. The offset within the
* opaque data depends on the state of other flags.
*/
#define HWRM_DBG_USEQ_CW_CFG_INPUT_FLAGS_USE_DMA UINT32_C(0x2)
@@ -61296,9 +67238,9 @@ typedef struct hwrm_dbg_useq_qcaps_output {
uint32_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint32_t valid;
@@ -61345,20 +67287,21 @@ typedef struct hwrm_dbg_useq_sched_cfg_input {
/* This value will leave the global scheduler in its current state */
#define HWRM_DBG_USEQ_SCHED_CFG_INPUT_NO_CHANGE UINT32_C(0x0)
/*
- * This value disables the global scheduler. This mode must be used
+ * This value disables the global scheduler. This mode must be used
* when the RUN command is being used to run individual sequences.
*/
#define HWRM_DBG_USEQ_SCHED_CFG_INPUT_DISABLE UINT32_C(0x1)
/*
- * This value enables the global scheduler. When enabled, USEQs will
+ * This value enables the global scheduler. When enabled, USEQs will
* be scheduled based on their polling intervals
*/
#define HWRM_DBG_USEQ_SCHED_CFG_INPUT_ENABLE UINT32_C(0x2)
#define HWRM_DBG_USEQ_SCHED_CFG_INPUT_LAST HWRM_DBG_USEQ_SCHED_CFG_INPUT_ENABLE
/*
- * The given polling interval will be associated with this USID. A value
- * of -1 indicates that the USID is invalid. The invalid USID is used when
- * using this message only for global scheduler configuration.
+ * The given polling interval will be associated with this USID. A
+ * value of -1 indicates that the USID is invalid. The invalid USID is
+ * used when using this message only for global scheduler
+ * configuration.
*/
uint16_t usid;
/* This value represents microseconds between runs of the USEQ */
@@ -61397,9 +67340,9 @@ typedef struct hwrm_dbg_useq_sched_cfg_output {
uint32_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint32_t valid;
@@ -61448,34 +67391,40 @@ typedef struct hwrm_dbg_useq_run_input {
/* This run type will execute the requested USEQ only a single time */
#define HWRM_DBG_USEQ_RUN_INPUT_RUN_TYPE_SINGLE UINT32_C(0x0)
/*
- * This run type will execute the requested USEQ a number of times given
- * by run_cnt with a run interval given by the run_interval parameter.
+ * This run type will execute the requested USEQ a number of times
+ * given by run_cnt with a run interval given by the run_interval
+ * parameter.
*/
#define HWRM_DBG_USEQ_RUN_INPUT_RUN_TYPE_CNT UINT32_C(0x1)
/*
- * This run type will execute the requested USEQ as many times as it needs
- * to fill an entire buffer to return to the host. The runs will occur
- * with a run interval given by the run_interval parameter.
+ * This run type will execute the requested USEQ as many times as it
+ * needs to fill an entire buffer to return to the host. The runs
+ * will occur with a run interval given by the run_interval
+ * parameter.
*/
#define HWRM_DBG_USEQ_RUN_INPUT_RUN_TYPE_FILL_BUF UINT32_C(0x2)
#define HWRM_DBG_USEQ_RUN_INPUT_RUN_TYPE_LAST HWRM_DBG_USEQ_RUN_INPUT_RUN_TYPE_FILL_BUF
/*
- * If indicated by flags, this represents the number of times to run the USEQ.
- * Note that runs are stopped if the buffer fills prior regardless of the
- * number of runs. For example, if a run_cnt of 10 is specified and 3 runs
- * results in the buffer being full then only 3 runs are executed.
+ * If indicated by flags, this represents the number of times to run
+ * the USEQ. Note that runs are stopped if the buffer fills prior
+ * regardless of the number of runs. For example, if a run_cnt of 10 is
+ * specified and 3 runs results in the buffer being full then only 3
+ * runs are executed.
*/
uint8_t run_cnt;
/*
- * This value represents microseconds between runs of the USEQ when running
- * multiple times as indicated by flags.
+ * This value represents microseconds between runs of the USEQ when
+ * running multiple times as indicated by flags.
*/
uint32_t run_interval;
- /* Address of the host buffer where collected USEQ output data will be placed */
+ /*
+ * Address of the host buffer where collected USEQ output data will be
+ * placed
+ */
uint64_t host_dest_addr;
/*
- * Size, in bytes, of the memory associated with host_dest_addr. It is expected
- * that this is >= 4096
+ * Size, in bytes, of the memory associated with host_dest_addr. It is
+ * expected that this is >= 4096
*/
uint32_t host_dest_len;
/* This field is reserved */
@@ -61511,16 +67460,16 @@ typedef struct hwrm_dbg_useq_run_output {
/* Reserved */
uint8_t useq_resp_unused_0[3];
/*
- * The length, in bytes, of the amount of data placed in the corresponding
- * host_dest_addr given in the input message. This will always be a multiple
- * of 4096
+ * The length, in bytes, of the amount of data placed in the
+ * corresponding host_dest_addr given in the input message. This will
+ * always be a multiple of 4096
*/
uint32_t host_dest_filled_len;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint32_t valid;
@@ -61563,14 +67512,15 @@ typedef struct hwrm_dbg_useq_delivery_req_input {
*/
uint64_t resp_addr;
/*
- * Eight destination addresses provide host memory space for FW to deliver
- * USEQ output details. A value of 0x0 for the address can be used to
- * inform FW that the buffer is not available.
+ * Eight destination addresses provide host memory space for FW to
+ * deliver USEQ output details. A value of 0x0 for the address can be
+ * used to inform FW that the buffer is not available.
*/
uint64_t host_dest_addrs[8];
/*
- * The length, in bytes, of the corresponding host_dest_addrs array entry. Each
- * valid hist_dest_addrs entry must have a len of at least 4096 bytes
+ * The length, in bytes, of the corresponding host_dest_addrs array
+ * entry. Each valid hist_dest_addrs entry must have a len of at least
+ * 4096 bytes.
*/
uint32_t host_dest_len[8];
} hwrm_dbg_useq_delivery_req_input_t, *phwrm_dbg_useq_delivery_req_input_t;
@@ -61604,29 +67554,578 @@ typedef struct hwrm_dbg_useq_delivery_req_output {
/* Reserved */
uint8_t useq_resp_unused_0[3];
/*
- * The length, in bytes, of the amount of data placed in the corresponding
- * host_dest_addrs entry given in the input message. This will always be a
- * multiple of 4096
+ * The length, in bytes, of the amount of data placed in the
+ * corresponding host_dest_addrs entry given in the input message. This
+ * will always be a multiple of 4096.
*/
uint32_t host_dest_filled_len[8];
/* This field is reserved */
uint32_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint32_t valid;
} hwrm_dbg_useq_delivery_req_output_t, *phwrm_dbg_useq_delivery_req_output_t;
+/*****************************
+ * hwrm_dbg_log_buffer_flush *
+ *****************************/
+
+
+/* hwrm_dbg_log_buffer_flush_input (size:192b/24B) */
+
+typedef struct hwrm_dbg_log_buffer_flush_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Type of trace buffer to flush. */
+ uint16_t type;
+ /* SRT trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_SRT_TRACE UINT32_C(0x0)
+ /* SRT2 trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_SRT2_TRACE UINT32_C(0x1)
+ /* CRT trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_CRT_TRACE UINT32_C(0x2)
+ /* CRT2 trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_CRT2_TRACE UINT32_C(0x3)
+ /* RIGP0 trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_RIGP0_TRACE UINT32_C(0x4)
+ /* L2 HWRM trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_L2_HWRM_TRACE UINT32_C(0x5)
+ /* RoCE HWRM trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_ROCE_HWRM_TRACE UINT32_C(0x6)
+ /* Context Accelerator CPU 0 trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_CA0_TRACE UINT32_C(0x7)
+ /* Context Accelerator CPU 1 trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_CA1_TRACE UINT32_C(0x8)
+ /* Context Accelerator CPU 2 trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_CA2_TRACE UINT32_C(0x9)
+ /* RIGP1 trace. */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_RIGP1_TRACE UINT32_C(0xa)
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_LAST HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_TYPE_RIGP1_TRACE
+ uint8_t unused_1[2];
+ /* Control flags. */
+ uint32_t flags;
+ /*
+ * When set, it indicates that all buffers should be flushed.
+ * The type will be ignored.
+ */
+ #define HWRM_DBG_LOG_BUFFER_FLUSH_INPUT_FLAGS_FLUSH_ALL_BUFFERS UINT32_C(0x1)
+} hwrm_dbg_log_buffer_flush_input_t, *phwrm_dbg_log_buffer_flush_input_t;
+
+/* hwrm_dbg_log_buffer_flush_output (size:128b/16B) */
+
+typedef struct hwrm_dbg_log_buffer_flush_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * Specifies the current host buffer offset. Data up to this offset
+ * has been populated by the firmware. For example, if the firmware
+ * has DMA-ed 8192 bytes to the host buffer, then this field has a
+ * value of 8192. This field rolls over to zero once the firmware
+ * writes the last page of the host buffer
+ */
+ uint32_t current_buffer_offset;
+ uint8_t unused_1[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_dbg_log_buffer_flush_output_t, *phwrm_dbg_log_buffer_flush_output_t;
+
+/************************
+ * hwrm_dbg_serdes_test *
+ ************************/
+
+
+/* hwrm_dbg_serdes_test_input (size:320b/40B) */
+
+typedef struct hwrm_dbg_serdes_test_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* Host address data is to DMA'd to. */
+ uint64_t resp_data_addr;
+ /*
+ * This field contains the offset into the captured data to begin
+ * copying the data to the host from. This should be set to 0 on the
+ * initial call to this command.
+ */
+ uint32_t resp_data_offset;
+ /*
+ * Size of the buffer pointed to by resp_data_addr. The firmware may
+ * use this entire buffer or less than the entire buffer, but never
+ * more.
+ */
+ uint16_t data_len;
+ /*
+ * This field allows this command to request the individual serdes
+ * tests to be run using this command.
+ */
+ uint8_t flags;
+ /* Unused. */
+ #define HWRM_DBG_SERDES_TEST_INPUT_FLAGS_UNUSED_TEST_MASK UINT32_C(0x7)
+ #define HWRM_DBG_SERDES_TEST_INPUT_FLAGS_UNUSED_TEST_SFT 0
+ /* Display eye_projection */
+ #define HWRM_DBG_SERDES_TEST_INPUT_FLAGS_EYE_PROJECTION UINT32_C(0x8)
+ /* Run the PCIe serdes test. */
+ #define HWRM_DBG_SERDES_TEST_INPUT_FLAGS_PCIE_SERDES_TEST UINT32_C(0x10)
+ /* Run the Ethernet serdes test. */
+ #define HWRM_DBG_SERDES_TEST_INPUT_FLAGS_ETHERNET_SERDES_TEST UINT32_C(0x20)
+ uint8_t options;
+ /*
+ * This field represents the lane number on which tools wants to
+ * retrieve eye plot. This field is valid only when pcie_serdes_test
+ * or ethernet_serdes_test flag is set. For pcie_serdes_test, the
+ * maximum value is the device pcie lane width minus 1. For
+ * ethernet_serdes_test, the maximum value is the total lanes of
+ * the network port minus 1. Valid values from 0 to 16.
+ */
+ #define HWRM_DBG_SERDES_TEST_INPUT_OPTIONS_LANE_NO_MASK UINT32_C(0xf)
+ #define HWRM_DBG_SERDES_TEST_INPUT_OPTIONS_LANE_NO_SFT 0
+ /* This value indicates the Horizontal or vertical plot direction. */
+ #define HWRM_DBG_SERDES_TEST_INPUT_OPTIONS_DIRECTION UINT32_C(0x10)
+ /* Value 0 indicates Horizontal plot request. */
+ #define HWRM_DBG_SERDES_TEST_INPUT_OPTIONS_DIRECTION_HORIZONTAL (UINT32_C(0x0) << 4)
+ /* Value 1 indicates vertical plot request. */
+ #define HWRM_DBG_SERDES_TEST_INPUT_OPTIONS_DIRECTION_VERTICAL (UINT32_C(0x1) << 4)
+ #define HWRM_DBG_SERDES_TEST_INPUT_OPTIONS_DIRECTION_LAST HWRM_DBG_SERDES_TEST_INPUT_OPTIONS_DIRECTION_VERTICAL
+ /* This value indicates eye projection type */
+ #define HWRM_DBG_SERDES_TEST_INPUT_OPTIONS_PROJ_TYPE UINT32_C(0x20)
+ /*
+ * Value 0 indicates left/top projection in horizontal/vertical
+ * This value is valid only when eye_projection flag was set.
+ */
+ #define HWRM_DBG_SERDES_TEST_INPUT_OPTIONS_PROJ_TYPE_LEFT_TOP (UINT32_C(0x0) << 5)
+ /*
+ * Value 1 indicates right/bottom projection in
+ * horizontal/vertical. This value is valid only when
+ * eye_projection flag was set.
+ */
+ #define HWRM_DBG_SERDES_TEST_INPUT_OPTIONS_PROJ_TYPE_RIGHT_BOTTOM (UINT32_C(0x1) << 5)
+ #define HWRM_DBG_SERDES_TEST_INPUT_OPTIONS_PROJ_TYPE_LAST HWRM_DBG_SERDES_TEST_INPUT_OPTIONS_PROJ_TYPE_RIGHT_BOTTOM
+ /* Reserved for future. */
+ #define HWRM_DBG_SERDES_TEST_INPUT_OPTIONS_RSVD_MASK UINT32_C(0xc0)
+ #define HWRM_DBG_SERDES_TEST_INPUT_OPTIONS_RSVD_SFT 6
+ /*
+ * This field allows this command to request a specific targetBER
+ * to be run using this command.
+ */
+ uint8_t targetBER;
+ /* When collecting an eyescope, measure with a target BER of 1e-8 */
+ #define HWRM_DBG_SERDES_TEST_INPUT_TARGETBER_BER_1E8 UINT32_C(0x0)
+ /* When collecting an eyescope, measure with a target BER of 1e-9 */
+ #define HWRM_DBG_SERDES_TEST_INPUT_TARGETBER_BER_1E9 UINT32_C(0x1)
+ /* When collecting an eyescope, measure with a target BER of 1e-10 */
+ #define HWRM_DBG_SERDES_TEST_INPUT_TARGETBER_BER_1E10 UINT32_C(0x2)
+ /* When collecting an eyescope, measure with a target BER of 1e-11 */
+ #define HWRM_DBG_SERDES_TEST_INPUT_TARGETBER_BER_1E11 UINT32_C(0x3)
+ /* When collecting an eyescope, measure with a target BER of 1e-12 */
+ #define HWRM_DBG_SERDES_TEST_INPUT_TARGETBER_BER_1E12 UINT32_C(0x4)
+ #define HWRM_DBG_SERDES_TEST_INPUT_TARGETBER_LAST HWRM_DBG_SERDES_TEST_INPUT_TARGETBER_BER_1E12
+ /*
+ * This field allows this command to specify the action to take when
+ * collecting an eyescope.
+ */
+ uint8_t action;
+ /*
+ * Value 0 indicates that collection of the eyescope should be
+ * returned synchronously in the output. This only applies to
+ * a targetBER of 1e-8.
+ */
+ #define HWRM_DBG_SERDES_TEST_INPUT_ACTION_SYNCHRONOUS UINT32_C(0x0)
+ /*
+ * Value 1 indicates to the firmware to start the collection of the
+ * eyescope.
+ */
+ #define HWRM_DBG_SERDES_TEST_INPUT_ACTION_START UINT32_C(0x1)
+ /*
+ * Value 2 indicates to the firmware to respond with a progress
+ * percentage of the current eyescope collection from 0.0 to 100.0.
+ */
+ #define HWRM_DBG_SERDES_TEST_INPUT_ACTION_PROGRESS UINT32_C(0x2)
+ /*
+ * Value 3 indicates to stop the eyescope. if the progress
+ * percentage is 100.0, the data will be DMAed back to
+ * resp_data_addr.
+ */
+ #define HWRM_DBG_SERDES_TEST_INPUT_ACTION_STOP UINT32_C(0x3)
+ #define HWRM_DBG_SERDES_TEST_INPUT_ACTION_LAST HWRM_DBG_SERDES_TEST_INPUT_ACTION_STOP
+ uint8_t unused[6];
+} hwrm_dbg_serdes_test_input_t, *phwrm_dbg_serdes_test_input_t;
+
+/* hwrm_dbg_serdes_test_output (size:192b/24B) */
+
+typedef struct hwrm_dbg_serdes_test_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Total length of stored data. */
+ uint16_t total_data_len;
+ /*
+ * Amount of data DMA'd to host by this call. The driver can use this
+ * field along with the total_data_len field above to determine the
+ * value to write to the resp_data_offset field in the next call
+ * if more than one call to these commands is required to retrieve all
+ * the stored data.
+ */
+ uint16_t copied_data_len;
+ /*
+ * Percentage of completion of collection of BER values from the
+ * current eyescope operation in tenths of a percentage. 0 (0.0) to
+ * 1000 (100.0).
+ */
+ uint16_t progress_percent;
+ /* Timeout in seconds for timeout of an individual BER point. */
+ uint16_t timeout;
+ uint8_t flags;
+ /*
+ * This value indicates the structure of data returned by the
+ * firmware when DMA'ed to resp_data_addr.
+ */
+ #define HWRM_DBG_SERDES_TEST_OUTPUT_FLAGS_BIT_COUNT_TYPE UINT32_C(0x1)
+ /*
+ * Value 0 indicates that bit_count value is a raw total
+ * such that BER = error_count / bit_count.
+ */
+ #define HWRM_DBG_SERDES_TEST_OUTPUT_FLAGS_BIT_COUNT_TYPE_BIT_COUNT_TOTAL UINT32_C(0x0)
+ /*
+ * Value 1 indicates that bit count is a power of
+ * 2 that bit_count is normalized to. A Value of 42 indicates
+ * that BER = error_count / 2^42
+ */
+ #define HWRM_DBG_SERDES_TEST_OUTPUT_FLAGS_BIT_COUNT_TYPE_BIT_COUNT_POW2 UINT32_C(0x1)
+ #define HWRM_DBG_SERDES_TEST_OUTPUT_FLAGS_BIT_COUNT_TYPE_LAST HWRM_DBG_SERDES_TEST_OUTPUT_FLAGS_BIT_COUNT_TYPE_BIT_COUNT_POW2
+ /* Reserved for future. */
+ #define HWRM_DBG_SERDES_TEST_OUTPUT_FLAGS_RSVD_MASK UINT32_C(0xfe)
+ #define HWRM_DBG_SERDES_TEST_OUTPUT_FLAGS_RSVD_SFT 1
+ uint8_t unused_0;
+ /*
+ * Size of header prepended to the bit_count and error_count array.
+ * Use this value to skip forward to the bit_count and error_count
+ * array.
+ */
+ uint16_t hdr_size;
+ uint8_t unused_1[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_dbg_serdes_test_output_t, *phwrm_dbg_serdes_test_output_t;
+
+/*****************************
+ * hwrm_dbg_coredump_capture *
+ *****************************/
+
+
+/* hwrm_dbg_coredump_capture_input (size:128b/16B) */
+
+typedef struct hwrm_dbg_coredump_capture_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} hwrm_dbg_coredump_capture_input_t, *phwrm_dbg_coredump_capture_input_t;
+
+/* hwrm_dbg_coredump_capture_output (size:128b/16B) */
+
+typedef struct hwrm_dbg_coredump_capture_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_dbg_coredump_capture_output_t, *phwrm_dbg_coredump_capture_output_t;
+
+/****************************
+ * hwrm_dbg_sim_cable_state *
+ ****************************/
+
+
+/* hwrm_dbg_sim_cable_state_input (size:192b/24B) */
+
+typedef struct hwrm_dbg_sim_cable_state_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* This field allows this command to specify the action to take. */
+ uint8_t action;
+ /* Value 0 indicates to the firmware to insert the cable. */
+ #define HWRM_DBG_SIM_CABLE_STATE_INPUT_ACTION_INSERT UINT32_C(0x0)
+ /* Value 1 indicates to the firmware to remove the cable. */
+ #define HWRM_DBG_SIM_CABLE_STATE_INPUT_ACTION_REMOVE UINT32_C(0x1)
+ #define HWRM_DBG_SIM_CABLE_STATE_INPUT_ACTION_LAST HWRM_DBG_SIM_CABLE_STATE_INPUT_ACTION_REMOVE
+ uint8_t unused_0[7];
+} hwrm_dbg_sim_cable_state_input_t, *phwrm_dbg_sim_cable_state_input_t;
+
+/* hwrm_dbg_sim_cable_state_output (size:128b/16B) */
+
+typedef struct hwrm_dbg_sim_cable_state_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_dbg_sim_cable_state_output_t, *phwrm_dbg_sim_cable_state_output_t;
+
+/*******************
+ * hwrm_dbg_ptrace *
+ *******************/
+
+
+/* hwrm_dbg_ptrace_input (size:320b/40B) */
+
+typedef struct hwrm_dbg_ptrace_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Physical address pointer pointing to a host buffer that the PDI
+ * command's input request. This can be either a host physical address
+ * (HPA) or a guest physical address (GPA) and must point to a
+ * physically contiguous block of memory.
+ */
+ uint64_t pdi_cmd_buf_addr;
+ /*
+ * Physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t pdi_resp_buf_addr;
+ /* Host PDI request buffer length. */
+ uint32_t pdi_req_buf_len;
+ uint16_t seq_no;
+ uint16_t flags;
+ /*
+ * when this flag is set, register access will be enabled for the
+ * ICAP Tx/Egress block.
+ */
+ #define HWRM_DBG_PTRACE_INPUT_FLAGS_SELECT_IN UINT32_C(0x1)
+ /*
+ * when this flag is set, register access will be enabled for the
+ * ICAP Rx/Ingress block.
+ */
+ #define HWRM_DBG_PTRACE_INPUT_FLAGS_SELECT_OUT UINT32_C(0x2)
+ /*
+ * when this flag is set, capture will be started for both Tx and
+ * Rx directions simultaneously.
+ */
+ #define HWRM_DBG_PTRACE_INPUT_FLAGS_GLOBAL_START UINT32_C(0x4)
+ /*
+ * when this flag is set, capture will be stopped for both Tx and
+ * Rx directions simultaneously.
+ */
+ #define HWRM_DBG_PTRACE_INPUT_FLAGS_GLOBAL_STOP UINT32_C(0x8)
+} hwrm_dbg_ptrace_input_t, *phwrm_dbg_ptrace_input_t;
+
+/* hwrm_dbg_ptrace_output (size:128b/16B) */
+
+typedef struct hwrm_dbg_ptrace_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint16_t flags;
+ /*
+ * When this flag is set, it indicates that there is more data
+ * available.
+ * Issue the request again with the next sequence number.
+ */
+ #define HWRM_DBG_PTRACE_OUTPUT_FLAGS_MORE UINT32_C(0x1)
+ uint16_t data_len;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output is
+ * completely written to RAM. This field should be read as '1' to
+ * indicate that the output has been completely written. When writing
+ * a command completion or response to an internal processor, the order
+ * of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_dbg_ptrace_output_t, *phwrm_dbg_ptrace_output_t;
+
/**************************
* hwrm_nvm_raw_write_blk *
**************************/
-/* hwrm_nvm_raw_write_blk_input (size:256b/32B) */
+/* hwrm_nvm_raw_write_blk_input (size:320b/40B) */
typedef struct hwrm_nvm_raw_write_blk_input {
/* The HWRM command request type. */
@@ -61664,11 +68163,22 @@ typedef struct hwrm_nvm_raw_write_blk_input {
uint64_t host_src_addr;
/*
* 32-bit Destination Address.
- * This is the NVRAM byte-offset where the source data will be written to.
+ * This is the NVRAM byte-offset where the source data will be written
+ * to.
*/
uint32_t dest_addr;
/* Length of data to be written, in bytes. */
uint32_t len;
+ uint8_t flags;
+ /*
+ * This bit is only used when external secure SoC is used for
+ * secure boot. This bit is utilized to differentiate between
+ * writes for NIC or Security SoC non-volatile storage on the
+ * device. If this bit is set, then this write is for the
+ * Security SoC non-volatile storage on the device.
+ */
+ #define HWRM_NVM_RAW_WRITE_BLK_INPUT_FLAGS_SECURITY_SOC_NVM UINT32_C(0x1)
+ uint8_t unused_0[7];
} hwrm_nvm_raw_write_blk_input_t, *phwrm_nvm_raw_write_blk_input_t;
/* hwrm_nvm_raw_write_blk_output (size:128b/16B) */
@@ -61685,9 +68195,9 @@ typedef struct hwrm_nvm_raw_write_blk_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -61758,9 +68268,9 @@ typedef struct hwrm_nvm_read_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -61771,7 +68281,7 @@ typedef struct hwrm_nvm_read_output {
*********************/
-/* hwrm_nvm_raw_dump_input (size:256b/32B) */
+/* hwrm_nvm_raw_dump_input (size:320b/40B) */
typedef struct hwrm_nvm_raw_dump_input {
/* The HWRM command request type. */
@@ -61811,6 +68321,16 @@ typedef struct hwrm_nvm_raw_dump_input {
uint32_t offset;
/* Total length of NVRAM contents to be read, in bytes. */
uint32_t len;
+ uint8_t flags;
+ /*
+ * This bit is only used when external secure SoC is used for
+ * secure boot. This bit is utilized to differentiate between
+ * read for NIC or Security SoC non-volatile storage on the
+ * device. If this bit is set, then this read is for the Security
+ * SoC non-volatile storage on the device.
+ */
+ #define HWRM_NVM_RAW_DUMP_INPUT_FLAGS_SECURITY_SOC_NVM UINT32_C(0x1)
+ uint8_t unused_0[7];
} hwrm_nvm_raw_dump_input_t, *phwrm_nvm_raw_dump_input_t;
/* hwrm_nvm_raw_dump_output (size:128b/16B) */
@@ -61827,9 +68347,9 @@ typedef struct hwrm_nvm_raw_dump_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -61892,9 +68412,9 @@ typedef struct hwrm_nvm_get_dir_entries_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -61956,9 +68476,9 @@ typedef struct hwrm_nvm_get_dir_info_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -62012,7 +68532,8 @@ typedef struct hwrm_nvm_write_input {
uint16_t dir_type;
/*
* Directory ordinal.
- * The 0-based instance of the combined Directory Entry Type and Extension.
+ * The 0-based instance of the combined Directory Entry Type and
+ * Extension.
*/
uint16_t dir_ordinal;
/*
@@ -62060,13 +68581,14 @@ typedef struct hwrm_nvm_write_input {
* The requested length of the allocated NVM for the item, in bytes.
* This value may be greater than or equal to the specified data
* length (dir_data_length).
- * If this value is less than the specified data length, it will be ignored.
- * The response will contain the actual allocated item length,
+ * If this value is less than the specified data length, it will be
+ * ignored. The response will contain the actual allocated item length,
* which may be greater than the requested item length.
* The purpose for allocating more than the required number of bytes
* for an item's data is to pre-allocate extra storage (padding) to
* accommodate the potential future growth of an item (e.g. upgraded
- * firmware with a size increase, log growth, expanded configuration data).
+ * firmware with a size increase, log growth, expanded configuration
+ * data).
*/
uint32_t dir_item_length;
/*
@@ -62106,9 +68628,9 @@ typedef struct hwrm_nvm_write_output {
uint8_t unused_0;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -62216,9 +68738,9 @@ typedef struct hwrm_nvm_modify_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -62320,9 +68842,9 @@ typedef struct hwrm_nvm_find_dir_entry_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -62383,9 +68905,9 @@ typedef struct hwrm_nvm_erase_dir_entry_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -62396,7 +68918,7 @@ typedef struct hwrm_nvm_erase_dir_entry_output {
*************************/
-/* hwrm_nvm_get_dev_info_input (size:128b/16B) */
+/* hwrm_nvm_get_dev_info_input (size:192b/24B) */
typedef struct hwrm_nvm_get_dev_info_input {
/* The HWRM command request type. */
@@ -62427,9 +68949,20 @@ typedef struct hwrm_nvm_get_dev_info_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ uint8_t flags;
+ /*
+ * This bit is only used when external secure SoC is used for
+ * secure boot.This bit is utilized to differentiate between
+ * device information for NIC or Security SoC non-volatile
+ * storage on the device. If this bit is set, then device
+ * information for the Security SoC non-volatile storage on the
+ * device.
+ */
+ #define HWRM_NVM_GET_DEV_INFO_INPUT_FLAGS_SECURITY_SOC_NVM UINT32_C(0x1)
+ uint8_t unused_0[7];
} hwrm_nvm_get_dev_info_input_t, *phwrm_nvm_get_dev_info_input_t;
-/* hwrm_nvm_get_dev_info_output (size:640b/80B) */
+/* hwrm_nvm_get_dev_info_output (size:768b/96B) */
typedef struct hwrm_nvm_get_dev_info_output {
/* The specific error status for the command. */
@@ -62450,7 +68983,7 @@ typedef struct hwrm_nvm_get_dev_info_output {
uint32_t nvram_size;
uint32_t reserved_size;
/*
- * Available size that can be used, in bytes. Available size is the
+ * Available size that can be used, in bytes. Available size is the
* NVRAM size take away the used size and reserved size.
*/
uint32_t available_size;
@@ -62534,12 +69067,54 @@ typedef struct hwrm_nvm_get_dev_info_output {
* of the roce firmware.
*/
uint16_t roce_fw_patch;
+ /*
+ * This field represents the major version of network control firmware,
+ * stored in the flash.
+ */
+ uint16_t netctrl_fw_major;
+ /*
+ * This field represents the minor version of network control firmware,
+ * stored in the flash.
+ */
+ uint16_t netctrl_fw_minor;
+ /*
+ * This field represents the build version of network control firmware,
+ * stored in the flash.
+ */
+ uint16_t netctrl_fw_build;
+ /*
+ * This field can be used to represent firmware branches or customer
+ * specific releases tied to a specific (major, minor, build) version
+ * of the network control firmware.
+ */
+ uint16_t netctrl_fw_patch;
+ /*
+ * This field represents the major version of SRT2 firmware, stored in
+ * the flash.
+ */
+ uint16_t srt2_fw_major;
+ /*
+ * This field represents the minor version of SRT2 firmware, stored in
+ * the flash.
+ */
+ uint16_t srt2_fw_minor;
+ /*
+ * This field represents the build version of SRT2 firmware, stored in
+ * the flash.
+ */
+ uint16_t srt2_fw_build;
+ /*
+ * This field can be used to represent firmware branches or customer
+ * specific releases tied to a specific (major, minor, build) version
+ * of the SRT2 firmware.
+ */
+ uint16_t srt2_fw_patch;
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -62625,9 +69200,9 @@ typedef struct hwrm_nvm_mod_dir_entry_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -62703,9 +69278,9 @@ typedef struct hwrm_nvm_verify_update_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -62810,8 +69385,9 @@ typedef struct hwrm_nvm_install_update_output {
uint16_t resp_len;
/*
* Bit-mask of successfully installed items.
- * Bit-0 corresponding to the first packaged item, Bit-1 for the second item, etc.
- * A value of 0 indicates that no items were successfully installed.
+ * Bit-0 corresponding to the first packaged item, Bit-1 for the second
+ * item, etc. A value of 0 indicates that no items were successfully
+ * installed.
*/
uint64_t installed_items;
/* result is 8 b corresponding to BCMRETVAL error codes */
@@ -62905,9 +69481,9 @@ typedef struct hwrm_nvm_install_update_output {
uint8_t unused_0[4];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -62987,9 +69563,9 @@ typedef struct hwrm_nvm_flush_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -63079,8 +69655,8 @@ typedef struct hwrm_nvm_get_variable_input {
uint16_t index_3;
uint8_t flags;
/*
- * When this bit is set to 1, the factory default value will be returned,
- * 0 returns the operational value.
+ * When this bit is set to 1, the factory default value will be
+ * returned, 0 returns the operational value.
*/
#define HWRM_NVM_GET_VARIABLE_INPUT_FLAGS_FACTORY_DFLT UINT32_C(0x1)
uint8_t unused_0;
@@ -63117,9 +69693,9 @@ typedef struct hwrm_nvm_get_variable_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -63250,9 +69826,9 @@ typedef struct hwrm_nvm_set_variable_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -63371,9 +69947,9 @@ typedef struct hwrm_nvm_validate_option_output {
uint8_t unused_0[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -63482,9 +70058,9 @@ typedef struct hwrm_nvm_factory_defaults_output {
uint8_t unused_0[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -63572,9 +70148,9 @@ typedef struct hwrm_nvm_req_arbitration_output {
uint8_t unused_0[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -63636,9 +70212,9 @@ typedef struct hwrm_nvm_defrag_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -63660,13 +70236,163 @@ typedef struct hwrm_nvm_defrag_cmd_err {
uint8_t unused_0[7];
} hwrm_nvm_defrag_cmd_err_t, *phwrm_nvm_defrag_cmd_err_t;
+/*******************************
+ * hwrm_nvm_get_vpd_field_info *
+ *******************************/
+
+
+/* hwrm_nvm_get_vpd_field_info_input (size:192b/24B) */
+
+typedef struct hwrm_nvm_get_vpd_field_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * Tag ID of the requested field. To request the Product Name
+ * a value of [0x00, 0x82] should be used. All other fields
+ * would use the two byte hexadecimal value of the ASCII
+ * characters. The first letter of the ASCII keyword is recorded
+ * in tag_id[0] and the next letter in tag_id[1].
+ */
+ uint8_t tag_id[2];
+ uint8_t unused_0[6];
+} hwrm_nvm_get_vpd_field_info_input_t, *phwrm_nvm_get_vpd_field_info_input_t;
+
+/* hwrm_nvm_get_vpd_field_info_output (size:2176b/272B) */
+
+typedef struct hwrm_nvm_get_vpd_field_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Data retrieved from VPD field */
+ uint8_t data[256];
+ /* size of data retrieved in bytes */
+ uint16_t data_len;
+ uint8_t unused_0[5];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_nvm_get_vpd_field_info_output_t, *phwrm_nvm_get_vpd_field_info_output_t;
+
+/*******************************
+ * hwrm_nvm_set_vpd_field_info *
+ *******************************/
+
+
+/* hwrm_nvm_set_vpd_field_info_input (size:256b/32B) */
+
+typedef struct hwrm_nvm_set_vpd_field_info_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This is the host address where
+ * VPD data value will be copied from
+ */
+ uint64_t host_src_addr;
+ /*
+ * Tag ID of the requested field. To request the Product Name
+ * a value of [0x00, 0x82] should be used. All other fields
+ * would use the two byte hexadecimal value of the ASCII
+ * characters. The first letter of the ASCII keyword is recorded
+ * in tag_id[0] and the next letter in tag_id[1].
+ */
+ uint8_t tag_id[2];
+ /* size of data in bytes */
+ uint16_t data_len;
+ uint8_t unused_0[4];
+} hwrm_nvm_set_vpd_field_info_input_t, *phwrm_nvm_set_vpd_field_info_input_t;
+
+/* hwrm_nvm_set_vpd_field_info_output (size:128b/16B) */
+
+typedef struct hwrm_nvm_set_vpd_field_info_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_nvm_set_vpd_field_info_output_t, *phwrm_nvm_set_vpd_field_info_output_t;
+
#define ROCE_SP_HSI_VERSION_MAJOR 1
#define ROCE_SP_HSI_VERSION_MINOR 8
#define ROCE_SP_HSI_VERSION_UPDATE 4
#define ROCE_SP_HSI_VERSION_STR "1.8.4"
/*
- * Following is the signature for ROCE_SP_HSI message field that indicates not
- * applicable (All F's). Need to cast it the size of the field if needed.
+ * Following is the signature for ROCE_SP_HSI message field that indicates
+ * not applicable (All F's). Need to cast it the size of the field if
+ * needed.
*/
#define ROCE_SP_HSI_NA_SIGNATURE ((uint32_t)(-1))
@@ -63708,107 +70434,161 @@ typedef struct cmdq_base {
* Create QP command allocates QP context with the specified
* SQ, RQ/SRQ, CQ and other parameters.
*/
- #define CMDQ_BASE_OPCODE_CREATE_QP UINT32_C(0x1)
+ #define CMDQ_BASE_OPCODE_CREATE_QP UINT32_C(0x1)
/*
* Destroy QP command deletes the QP context and ceases
* any further reference.
*/
- #define CMDQ_BASE_OPCODE_DESTROY_QP UINT32_C(0x2)
+ #define CMDQ_BASE_OPCODE_DESTROY_QP UINT32_C(0x2)
/*
* Modify QP command changes QP states and other QP specific
* parameters.
*/
- #define CMDQ_BASE_OPCODE_MODIFY_QP UINT32_C(0x3)
+ #define CMDQ_BASE_OPCODE_MODIFY_QP UINT32_C(0x3)
/* Query QP command retrieves info about the specified QP. */
- #define CMDQ_BASE_OPCODE_QUERY_QP UINT32_C(0x4)
+ #define CMDQ_BASE_OPCODE_QUERY_QP UINT32_C(0x4)
/* Create SRQ command allocates a SRQ with the specified parameters. */
- #define CMDQ_BASE_OPCODE_CREATE_SRQ UINT32_C(0x5)
+ #define CMDQ_BASE_OPCODE_CREATE_SRQ UINT32_C(0x5)
/* Destroy SRQ command deletes and flushes the specified SRQ. */
- #define CMDQ_BASE_OPCODE_DESTROY_SRQ UINT32_C(0x6)
+ #define CMDQ_BASE_OPCODE_DESTROY_SRQ UINT32_C(0x6)
/* Query SRP command retrieves info about the specified SRQ. */
- #define CMDQ_BASE_OPCODE_QUERY_SRQ UINT32_C(0x8)
+ #define CMDQ_BASE_OPCODE_QUERY_SRQ UINT32_C(0x8)
/* Create CQ command allocates a CQ with the specified parameters. */
- #define CMDQ_BASE_OPCODE_CREATE_CQ UINT32_C(0x9)
+ #define CMDQ_BASE_OPCODE_CREATE_CQ UINT32_C(0x9)
/* Destroy CQ command deletes and flushes the specified CQ. */
- #define CMDQ_BASE_OPCODE_DESTROY_CQ UINT32_C(0xa)
+ #define CMDQ_BASE_OPCODE_DESTROY_CQ UINT32_C(0xa)
/* Resize CQ command resizes the specified CQ. */
- #define CMDQ_BASE_OPCODE_RESIZE_CQ UINT32_C(0xc)
+ #define CMDQ_BASE_OPCODE_RESIZE_CQ UINT32_C(0xc)
/*
* Allocate MRW command allocates a MR/MW with the specified parameters
* and returns the region's L_KEY/R_KEY
*/
- #define CMDQ_BASE_OPCODE_ALLOCATE_MRW UINT32_C(0xd)
- /* De-allocate key command frees a MR/MW entry associated with the specified key. */
- #define CMDQ_BASE_OPCODE_DEALLOCATE_KEY UINT32_C(0xe)
+ #define CMDQ_BASE_OPCODE_ALLOCATE_MRW UINT32_C(0xd)
+ /*
+ * De-allocate key command frees a MR/MW entry associated with the
+ * specified key.
+ */
+ #define CMDQ_BASE_OPCODE_DEALLOCATE_KEY UINT32_C(0xe)
/* Register MR command registers memory to the specified MR. */
- #define CMDQ_BASE_OPCODE_REGISTER_MR UINT32_C(0xf)
+ #define CMDQ_BASE_OPCODE_REGISTER_MR UINT32_C(0xf)
/* Deregister MR command de-registers memory from the specified MR. */
- #define CMDQ_BASE_OPCODE_DEREGISTER_MR UINT32_C(0x10)
+ #define CMDQ_BASE_OPCODE_DEREGISTER_MR UINT32_C(0x10)
/* Add GID command adds a GID to the local address table. */
- #define CMDQ_BASE_OPCODE_ADD_GID UINT32_C(0x11)
+ #define CMDQ_BASE_OPCODE_ADD_GID UINT32_C(0x11)
/* Delete GID command deletes a GID from the local address table. */
- #define CMDQ_BASE_OPCODE_DELETE_GID UINT32_C(0x12)
+ #define CMDQ_BASE_OPCODE_DELETE_GID UINT32_C(0x12)
/* Modify GID command modifies a GID in the local address table. */
- #define CMDQ_BASE_OPCODE_MODIFY_GID UINT32_C(0x17)
+ #define CMDQ_BASE_OPCODE_MODIFY_GID UINT32_C(0x17)
/* Query GID command queries a GID in the local address table. */
- #define CMDQ_BASE_OPCODE_QUERY_GID UINT32_C(0x18)
+ #define CMDQ_BASE_OPCODE_QUERY_GID UINT32_C(0x18)
/* Create QP1 command allocates a QP1 only. */
- #define CMDQ_BASE_OPCODE_CREATE_QP1 UINT32_C(0x13)
+ #define CMDQ_BASE_OPCODE_CREATE_QP1 UINT32_C(0x13)
/* Destroy QP1 command deletes and flushes the specified QP1. */
- #define CMDQ_BASE_OPCODE_DESTROY_QP1 UINT32_C(0x14)
+ #define CMDQ_BASE_OPCODE_DESTROY_QP1 UINT32_C(0x14)
/* Create AH command allocates an AH with the specified parameters. */
- #define CMDQ_BASE_OPCODE_CREATE_AH UINT32_C(0x15)
+ #define CMDQ_BASE_OPCODE_CREATE_AH UINT32_C(0x15)
/* Destroy AH command deletes the specified AH. */
- #define CMDQ_BASE_OPCODE_DESTROY_AH UINT32_C(0x16)
+ #define CMDQ_BASE_OPCODE_DESTROY_AH UINT32_C(0x16)
/*
* Initialize firmware command initializes the firmware with
* the specified parameters.
*/
- #define CMDQ_BASE_OPCODE_INITIALIZE_FW UINT32_C(0x80)
+ #define CMDQ_BASE_OPCODE_INITIALIZE_FW UINT32_C(0x80)
/* De-initialize firmware command deinitializes the firmware. */
- #define CMDQ_BASE_OPCODE_DEINITIALIZE_FW UINT32_C(0x81)
+ #define CMDQ_BASE_OPCODE_DEINITIALIZE_FW UINT32_C(0x81)
/* Stop the function */
- #define CMDQ_BASE_OPCODE_STOP_FUNC UINT32_C(0x82)
+ #define CMDQ_BASE_OPCODE_STOP_FUNC UINT32_C(0x82)
/* Query the HW capabilities for the function. */
- #define CMDQ_BASE_OPCODE_QUERY_FUNC UINT32_C(0x83)
+ #define CMDQ_BASE_OPCODE_QUERY_FUNC UINT32_C(0x83)
/*
* Set the following resources for the function:
* - Max QP, CQ, MR+MW, SRQ per PF
* - Max QP, CQ, MR+MW, SRQ per VF
*/
- #define CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES UINT32_C(0x84)
- /* Read the current state of any internal resource context. Can only be issued from a PF. */
- #define CMDQ_BASE_OPCODE_READ_CONTEXT UINT32_C(0x85)
- /* Send a request from VF to pass a command to the PF. VF HSI is suspended until the PF returns the response */
- #define CMDQ_BASE_OPCODE_VF_BACKCHANNEL_REQUEST UINT32_C(0x86)
- /* Read VF memory (primarily to get the backchannel request blob). Can only be issued from a PF. */
- #define CMDQ_BASE_OPCODE_READ_VF_MEMORY UINT32_C(0x87)
- /* Write VF memory (primarily to put the backchannel response blob), and reenable VF HSI (post a CAG completion to it). Can only be issued from a PF. */
- #define CMDQ_BASE_OPCODE_COMPLETE_VF_REQUEST UINT32_C(0x88)
- /* Extend resource (QPC, MRW, CQ, SRQ) array, after the host allocates more. Can only be issued from a PF. */
- #define CMDQ_BASE_OPCODE_EXTEND_CONTEXT_ARRRAY UINT32_C(0x89)
+ #define CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES UINT32_C(0x84)
+ /*
+ * Read the current state of any internal resource context. Can only be
+ * issued from a PF.
+ */
+ #define CMDQ_BASE_OPCODE_READ_CONTEXT UINT32_C(0x85)
+ /*
+ * Send a request from VF to pass a command to the PF. VF HSI is
+ * suspended until the PF returns the response
+ */
+ #define CMDQ_BASE_OPCODE_VF_BACKCHANNEL_REQUEST UINT32_C(0x86)
+ /*
+ * Read VF memory (primarily to get the backchannel request blob). Can
+ * only be issued from a PF.
+ */
+ #define CMDQ_BASE_OPCODE_READ_VF_MEMORY UINT32_C(0x87)
+ /*
+ * Write VF memory (primarily to put the backchannel response blob),
+ * and reenable VF HSI (post a CAG completion to it). Can only be
+ * issued from a PF.
+ */
+ #define CMDQ_BASE_OPCODE_COMPLETE_VF_REQUEST UINT32_C(0x88)
+ /*
+ * Deprecated.
+ * Extend resource (QPC, MRW, CQ, SRQ) array, after the host allocates
+ * more. Can only be issued from a PF.
+ */
+ #define CMDQ_BASE_OPCODE_EXTEND_CONTEXT_ARRAY_DEPRECATED UINT32_C(0x89)
/* Map TC to COS. Can only be issued from a PF. */
- #define CMDQ_BASE_OPCODE_MAP_TC_TO_COS UINT32_C(0x8a)
+ #define CMDQ_BASE_OPCODE_MAP_TC_TO_COS UINT32_C(0x8a)
/* Query version. */
- #define CMDQ_BASE_OPCODE_QUERY_VERSION UINT32_C(0x8b)
+ #define CMDQ_BASE_OPCODE_QUERY_VERSION UINT32_C(0x8b)
/* Modify congestion control. Can only be issued from a PF. */
- #define CMDQ_BASE_OPCODE_MODIFY_ROCE_CC UINT32_C(0x8c)
+ #define CMDQ_BASE_OPCODE_MODIFY_ROCE_CC UINT32_C(0x8c)
/* Query congestion control. */
- #define CMDQ_BASE_OPCODE_QUERY_ROCE_CC UINT32_C(0x8d)
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_CC UINT32_C(0x8d)
/* Query RoCE statistics. */
- #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS UINT32_C(0x8e)
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS UINT32_C(0x8e)
/* Set LAG mode. */
- #define CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE UINT32_C(0x8f)
+ #define CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE UINT32_C(0x8f)
/* Modify CQ */
- #define CMDQ_BASE_OPCODE_MODIFY_CQ UINT32_C(0x90)
+ #define CMDQ_BASE_OPCODE_MODIFY_CQ UINT32_C(0x90)
/*
- * Query QP for a PF other than the requesting PF. Also can query for more
- * than one QP.
+ * Query QP for a PF other than the requesting PF. Also can query for
+ * more than one QP.
*/
- #define CMDQ_BASE_OPCODE_QUERY_QP_EXTEND UINT32_C(0x91)
+ #define CMDQ_BASE_OPCODE_QUERY_QP_EXTEND UINT32_C(0x91)
/* Query extended RoCE statistics. */
- #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT UINT32_C(0x92)
- #define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT UINT32_C(0x92)
+ /*
+ * This command updates the QP context id ranges on the PF,
+ * to orchestrate QP context id range migration.
+ * This command is valid for devices that
+ * support the pseudo-static QP allocation feature.
+ */
+ #define CMDQ_BASE_OPCODE_ORCHESTRATE_QID_MIGRATION UINT32_C(0x93)
+ /*
+ * This command allocates a batch of the requested count of QPs
+ * in a sequential range.
+ */
+ #define CMDQ_BASE_OPCODE_CREATE_QP_BATCH UINT32_C(0x94)
+ /*
+ * This command deletes a batch of the requested count of QPs.
+ * The starting QP ID can be specified to request a batch deletion
+ * of a sequential range.
+ */
+ #define CMDQ_BASE_OPCODE_DESTROY_QP_BATCH UINT32_C(0x95)
+ /*
+ * This command allocates an extended RoCE statistics context
+ * that supports periodic DMA to a host address. The extended
+ * statistics context id can be assigned by the driver
+ * via `create_qp`, `create_qp_batch`, or `modify_qp` to a specific QP,
+ * a subset of QPs or to all QPs of a specific function.
+ * These statistics can be queried via `query_roce_stats_ext_v2`.
+ */
+ #define CMDQ_BASE_OPCODE_ALLOCATE_ROCE_STATS_EXT_CTX UINT32_C(0x96)
+ /* This command deallocates an extended RoCE statistics context. */
+ #define CMDQ_BASE_OPCODE_DEALLOCATE_ROCE_STATS_EXT_CTX UINT32_C(0x97)
+ /*
+ * This command queries extended RoCE statistics for context
+ * allocated via `allocate_roce_stats_ext_ctx`.
+ */
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT_V2 UINT32_C(0x98)
+ #define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT_V2
/* Size of the command in 16-byte units. */
uint8_t cmd_size;
/* Flags and attribs of the command. */
@@ -63853,6 +70633,390 @@ typedef struct creq_base {
uint8_t reserved48[6];
} creq_base_t, *pcreq_base_t;
+/* creq_resp_sb_hdr (size:64b/8B) */
+
+typedef struct creq_resp_sb_hdr {
+ /* Command opcode. */
+ uint8_t opcode;
+ /* Query QP command response. */
+ #define CREQ_RESP_SB_HDR_OPCODE_QUERY_QP UINT32_C(0x4)
+ /* Query SRQ command response. */
+ #define CREQ_RESP_SB_HDR_OPCODE_QUERY_SRQ UINT32_C(0x8)
+ /* Query GID command response. */
+ #define CREQ_RESP_SB_HDR_OPCODE_QUERY_GID UINT32_C(0x18)
+ /* Query info PF command response */
+ #define CREQ_RESP_SB_HDR_OPCODE_QUERY_FUNC UINT32_C(0x83)
+ /* Query version response. */
+ #define CREQ_RESP_SB_HDR_OPCODE_QUERY_VERSION UINT32_C(0x8b)
+ /* Query congestion control response. */
+ #define CREQ_RESP_SB_HDR_OPCODE_QUERY_ROCE_CC UINT32_C(0x8d)
+ /* Query RoCE statistics response. */
+ #define CREQ_RESP_SB_HDR_OPCODE_QUERY_ROCE_STATS UINT32_C(0x8e)
+ /* Query QP extended response. */
+ #define CREQ_RESP_SB_HDR_OPCODE_QUERY_QP_EXTEND UINT32_C(0x91)
+ /* Query extended RoCE statistics response. */
+ #define CREQ_RESP_SB_HDR_OPCODE_QUERY_ROCE_STATS_EXT UINT32_C(0x92)
+ /* Query extended RoCE statistics v2 response. */
+ #define CREQ_RESP_SB_HDR_OPCODE_QUERY_ROCE_STATS_EXT_V2 UINT32_C(0x98)
+ #define CREQ_RESP_SB_HDR_OPCODE_LAST CREQ_RESP_SB_HDR_OPCODE_QUERY_ROCE_STATS_EXT_V2
+ /* Status of the response. */
+ uint8_t status;
+ /* Driver supplied handle to associate the command and the response. */
+ uint16_t cookie;
+ /* Flags and attribs of the command. */
+ uint16_t flags;
+ /* Size of the response buffer in 16-byte units. */
+ uint8_t resp_size;
+ uint8_t reserved8;
+} creq_resp_sb_hdr_t, *pcreq_resp_sb_hdr_t;
+
+/*
+ * Structure to be used for the qp_params array of
+ * the `create_qp_batch` command.
+ */
+/* create_qp_batch_data (size:768b/96B) */
+
+typedef struct create_qp_batch_data {
+ /* QP handle. */
+ uint64_t qp_handle;
+ /* Create QP flags. */
+ uint32_t qp_flags;
+ /*
+ * SRQ is used.
+ * This flag is not supported on express mode QPs.
+ */
+ #define CREATE_QP_BATCH_DATA_QP_FLAGS_SRQ_USED UINT32_C(0x1)
+ /* post CQE for all SQ WQEs. */
+ #define CREATE_QP_BATCH_DATA_QP_FLAGS_FORCE_COMPLETION UINT32_C(0x2)
+ /* This QP can use reserved L_Key */
+ #define CREATE_QP_BATCH_DATA_QP_FLAGS_RESERVED_LKEY_ENABLE UINT32_C(0x4)
+ /* This QP can fast register physical memory */
+ #define CREATE_QP_BATCH_DATA_QP_FLAGS_FR_PMR_ENABLED UINT32_C(0x8)
+ /* This QP can send variable sized WQEs. */
+ #define CREATE_QP_BATCH_DATA_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED UINT32_C(0x10)
+ /*
+ * WQEs with inline data sent on this QP are able to flow
+ * through an optimized transmit path to lower latency. This
+ * transmit path is opportunistic and not guaranteed to always
+ * occur.
+ */
+ #define CREATE_QP_BATCH_DATA_QP_FLAGS_OPTIMIZED_TRANSMIT_ENABLED UINT32_C(0x20)
+ /*
+ * For UD QPs the default responder CQE format is `cq_res_ud`.
+ * This flag specifies the `cq_res_ud_cfa` format to be used
+ * instead.
+ */
+ #define CREATE_QP_BATCH_DATA_QP_FLAGS_RESPONDER_UD_CQE_WITH_CFA UINT32_C(0x40)
+ /*
+ * This QP must be included in the extended RoCE statistics
+ * that can be queried via `query_roce_stats_ext`.
+ */
+ #define CREATE_QP_BATCH_DATA_QP_FLAGS_EXT_STATS_ENABLED UINT32_C(0x80)
+ /* This QP uses express mode. */
+ #define CREATE_QP_BATCH_DATA_QP_FLAGS_EXPRESS_MODE_ENABLED UINT32_C(0x100)
+ /* This QP uses the steering tag specified in the command. */
+ #define CREATE_QP_BATCH_DATA_QP_FLAGS_STEERING_TAG_VALID UINT32_C(0x200)
+ /*
+ * This QP can be used for RDMA Read or Atomic operations.
+ * This value is used to optimize metadata memory allocation
+ * when the device supports `internal_queue_memory` feature.
+ */
+ #define CREATE_QP_BATCH_DATA_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED UINT32_C(0x400)
+ /*
+ * This QP must be included in the extended RoCE statistics context
+ * specified in the field `ext_stats_ctx_id`
+ */
+ #define CREATE_QP_BATCH_DATA_QP_FLAGS_EXT_STATS_CTX_VALID UINT32_C(0x800)
+ /* The schq_id field passed in by the caller is valid. */
+ #define CREATE_QP_BATCH_DATA_QP_FLAGS_SCHQ_ID_VALID UINT32_C(0x1000)
+ #define CREATE_QP_BATCH_DATA_QP_FLAGS_LAST CREATE_QP_BATCH_DATA_QP_FLAGS_SCHQ_ID_VALID
+ /* Supported QP types. */
+ uint8_t type;
+ /* Reliable Connection. */
+ #define CREATE_QP_BATCH_DATA_TYPE_RC UINT32_C(0x2)
+ /* Unreliable Datagram. */
+ #define CREATE_QP_BATCH_DATA_TYPE_UD UINT32_C(0x4)
+ /* Raw Ethertype. */
+ #define CREATE_QP_BATCH_DATA_TYPE_RAW_ETHERTYPE UINT32_C(0x6)
+ /* General Services Interface on QP1 over UD. */
+ #define CREATE_QP_BATCH_DATA_TYPE_GSI UINT32_C(0x7)
+ #define CREATE_QP_BATCH_DATA_TYPE_LAST CREATE_QP_BATCH_DATA_TYPE_GSI
+ uint8_t sq_pg_size_sq_lvl;
+ /*
+ * SQ PBL indirect levels.
+ * This field is ignored for express mode QPs.
+ */
+ #define CREATE_QP_BATCH_DATA_SQ_LVL_MASK UINT32_C(0xf)
+ #define CREATE_QP_BATCH_DATA_SQ_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define CREATE_QP_BATCH_DATA_SQ_LVL_LVL_0 UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define CREATE_QP_BATCH_DATA_SQ_LVL_LVL_1 UINT32_C(0x1)
+ /*
+ * PBL pointer points to PDE table with each entry pointing to
+ * PTE tables.
+ */
+ #define CREATE_QP_BATCH_DATA_SQ_LVL_LVL_2 UINT32_C(0x2)
+ #define CREATE_QP_BATCH_DATA_SQ_LVL_LAST CREATE_QP_BATCH_DATA_SQ_LVL_LVL_2
+ /*
+ * SQ page size.
+ * This field is ignored for express mode QPs.
+ */
+ #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_MASK UINT32_C(0xf0)
+ #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_PG_4K (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_PG_8K (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_PG_64K (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_PG_2M (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_PG_8M (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_PG_1G (UINT32_C(0x5) << 4)
+ #define CREATE_QP_BATCH_DATA_SQ_PG_SIZE_LAST CREATE_QP_BATCH_DATA_SQ_PG_SIZE_PG_1G
+ uint8_t rq_pg_size_rq_lvl;
+ /*
+ * RQ PBL indirect levels.
+ * This field is ignored for express mode QPs.
+ */
+ #define CREATE_QP_BATCH_DATA_RQ_LVL_MASK UINT32_C(0xf)
+ #define CREATE_QP_BATCH_DATA_RQ_LVL_SFT 0
+ /* PBL pointer is physical start address. */
+ #define CREATE_QP_BATCH_DATA_RQ_LVL_LVL_0 UINT32_C(0x0)
+ /* PBL pointer points to PTE table. */
+ #define CREATE_QP_BATCH_DATA_RQ_LVL_LVL_1 UINT32_C(0x1)
+ /*
+ * PBL pointer points to PDE table with each entry pointing to
+ * PTE tables.
+ */
+ #define CREATE_QP_BATCH_DATA_RQ_LVL_LVL_2 UINT32_C(0x2)
+ #define CREATE_QP_BATCH_DATA_RQ_LVL_LAST CREATE_QP_BATCH_DATA_RQ_LVL_LVL_2
+ /*
+ * RQ page size.
+ * This field is ignored for express mode QPs.
+ */
+ #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_MASK UINT32_C(0xf0)
+ #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_SFT 4
+ /* 4KB. */
+ #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_PG_4K (UINT32_C(0x0) << 4)
+ /* 8KB. */
+ #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_PG_8K (UINT32_C(0x1) << 4)
+ /* 64KB. */
+ #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_PG_64K (UINT32_C(0x2) << 4)
+ /* 2MB. */
+ #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_PG_2M (UINT32_C(0x3) << 4)
+ /* 8MB. */
+ #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_PG_8M (UINT32_C(0x4) << 4)
+ /* 1GB. */
+ #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_PG_1G (UINT32_C(0x5) << 4)
+ #define CREATE_QP_BATCH_DATA_RQ_PG_SIZE_LAST CREATE_QP_BATCH_DATA_RQ_PG_SIZE_PG_1G
+ uint8_t unused_0;
+ /* Doorbell page index. */
+ uint32_t dpi;
+ /*
+ * When the SQ is configured to use variable-size WQE, 'sq_size'
+ * denotes the SQ size with a unit of 16B. When the SQ is configured
+ * to use fixed-size WQE, 'sq_size' denotes the max number of SQ WQEs.
+ */
+ uint32_t sq_size;
+ /* Max number of RQ wqes. */
+ uint32_t rq_size;
+ uint16_t sq_fwo_sq_sge;
+ /*
+ * Max send SGEs per SWQE. This is only applicable to fixed-size
+ * WQE support. On variable-size WQE, this is ignored.
+ */
+ #define CREATE_QP_BATCH_DATA_SQ_SGE_MASK UINT32_C(0xf)
+ #define CREATE_QP_BATCH_DATA_SQ_SGE_SFT 0
+ /*
+ * Offset of First WQE in the first SQ page, in 128 byte units.
+ * This field is ignored for express mode QPs.
+ */
+ #define CREATE_QP_BATCH_DATA_SQ_FWO_MASK UINT32_C(0xfff0)
+ #define CREATE_QP_BATCH_DATA_SQ_FWO_SFT 4
+ uint16_t rq_fwo_rq_sge;
+ /*
+ * Max recv SGEs per RWQE.
+ * On chips with variable-size WQE support, a value of zero implies
+ * 30 SGEs.
+ */
+ #define CREATE_QP_BATCH_DATA_RQ_SGE_MASK UINT32_C(0xf)
+ #define CREATE_QP_BATCH_DATA_RQ_SGE_SFT 0
+ /*
+ * Offset of First WQE in the first RQ page, in 128 byte units.
+ * This field is ignored for express mode QPs.
+ */
+ #define CREATE_QP_BATCH_DATA_RQ_FWO_MASK UINT32_C(0xfff0)
+ #define CREATE_QP_BATCH_DATA_RQ_FWO_SFT 4
+ /* Send CQ context id. */
+ uint32_t scq_cid;
+ /* Receive CQ context id. */
+ uint32_t rcq_cid;
+ /* SRQ context id. */
+ uint32_t srq_cid;
+ /* Protection domain id. */
+ uint32_t pd_id;
+ /*
+ * SQ PBL physical address.
+ * This field is ignored for express mode QPs.
+ */
+ uint64_t sq_pbl;
+ /*
+ * RQ PBL physical address.
+ * This field is ignored for express mode QPs.
+ */
+ uint64_t rq_pbl;
+ /*
+ * IRRQ address. This field is ignored on devices that
+ * support the `internal_queue_memory` feature.
+ */
+ uint64_t irrq_addr;
+ /*
+ * ORRQ address. This field is ignored on devices that
+ * support the `internal_queue_memory` feature.
+ */
+ uint64_t orrq_addr;
+ /*
+ * xid to use for the non-QP1 QP.
+ * The requested xid must be within the valid range
+ * of the predetermined assignment scheme of the
+ * pseudo static QP allocation feature. The valid range
+ * for the data QPs is determined by the start_qid and
+ * max_qp fields of query_func response. When the value is zero,
+ * firmware will automatically choose an xid from its free pool.
+ * QP1 allocation, indicated by specifying `type` field as gsi,
+ * must specify a request_xid as zero.
+ * This field is ignored on devices that do not support
+ * the pseudo static QP allocation feature.
+ */
+ uint32_t request_xid;
+ /* Steering tag to use for memory transactions. */
+ uint16_t steering_tag;
+ /*
+ * This value is used to optimize metadata memory allocation when
+ * the device supports `internal_queue_memory` feature.
+ * When the SQ is configured to use variable-size WQEs, the SQ size is
+ * only specified in units of 16 Bytes. This value hints the max number
+ * of WQEs that would ever be present on the SQ.
+ */
+ uint16_t sq_max_num_wqes;
+ /* Extended RoCE statistics context id. */
+ uint32_t ext_stats_ctx_id;
+ /*
+ * Identifies the new scheduling queue to associate with
+ * the RoCE QP. A value of zero indicates that the QP is being
+ * created with the default scheduling queue. Can only be specified
+ * by the PF driver. VFs get assigned a scheduling queue based on PF
+ * configuration (via HWRM_FUNC_CFG). Specified scheduling queue id is
+ * allocated by firmware (via HWRM_SCHQ_ALLOC) when the device supports
+ * the `scheduling queue` feature.
+ */
+ uint16_t schq_id;
+ uint16_t reserved16;
+} create_qp_batch_data_t, *pcreate_qp_batch_data_t;
+
+/* Periodic extended RoCE statistics context DMA to host. */
+/* roce_stats_ext_ctx (size:1856b/232B) */
+
+typedef struct roce_stats_ext_ctx {
+ /* Number of transmitted Atomic request packets without errors. */
+ uint64_t tx_atomic_req_pkts;
+ /* Number of transmitted Read request packets without errors. */
+ uint64_t tx_read_req_pkts;
+ /* Number of transmitted Read response packets without errors. */
+ uint64_t tx_read_res_pkts;
+ /* Number of transmitted Write request packets without errors. */
+ uint64_t tx_write_req_pkts;
+ /* Number of transmitted RC Send packets without errors. */
+ uint64_t tx_rc_send_req_pkts;
+ /*
+ * Number of transmitted UD Send (including QP1) packets
+ * without errors.
+ */
+ uint64_t tx_ud_send_req_pkts;
+ /* Number of transmitted CNPs. Includes DCN_CNPs. */
+ uint64_t tx_cnp_pkts;
+ /*
+ * Number of transmitted RoCE packets.
+ * This includes RC, UD, RawEth, and QP1 packets
+ */
+ uint64_t tx_roce_pkts;
+ /*
+ * Number of transmitted RoCE header and payload bytes.
+ * This includes RC, UD, RawEth, and QP1 packets.
+ */
+ uint64_t tx_roce_bytes;
+ /*
+ * Number of drops that occurred to lack of buffers.
+ * This count includes RC sends, RC writes with immediate,
+ * UD sends, RawEth, and QP1 packets dropped due to lack of buffers.
+ */
+ uint64_t rx_out_of_buffer_pkts;
+ /* Number of packets that were received out of sequence. */
+ uint64_t rx_out_of_sequence_pkts;
+ /*
+ * Number of duplicate read/atomic requests resulting in responder
+ * hardware retransmission.
+ */
+ uint64_t dup_req;
+ /*
+ * Number of missing response packets resulting in hardware
+ * retransmission.
+ */
+ uint64_t missing_resp;
+ /*
+ * Number of sequence error NAKs received resulting in hardware
+ * retransmission.
+ */
+ uint64_t seq_err_naks_rcvd;
+ /* Number of RNR NAKs received resulting in hardware retransmission. */
+ uint64_t rnr_naks_rcvd;
+ /* Number of timeouts resulting in hardware retransmission. */
+ uint64_t to_retransmits;
+ /* Number of received Atomic request packets without errors. */
+ uint64_t rx_atomic_req_pkts;
+ /* Number of received Read request packets without errors. */
+ uint64_t rx_read_req_pkts;
+ /* Number of received Read response packets without errors. */
+ uint64_t rx_read_res_pkts;
+ /* Number of received Write request packets without errors. */
+ uint64_t rx_write_req_pkts;
+ /* Number of received RC Send packets without errors. */
+ uint64_t rx_rc_send_pkts;
+ /* Number of received UD Send packets without errors. */
+ uint64_t rx_ud_send_pkts;
+ /* Number of received DCN payload cut packets. */
+ uint64_t rx_dcn_payload_cut;
+ /* Number of received ECN-marked packets. */
+ uint64_t rx_ecn_marked_pkts;
+ /* Number of received CNP packets. Includes DCN_CNPs. */
+ uint64_t rx_cnp_pkts;
+ /*
+ * Number of received RoCE packets including RoCE packets with errors.
+ * This includes RC, UD, RawEth, and QP1 packets
+ */
+ uint64_t rx_roce_pkts;
+ /*
+ * Number of received RoCE header and payload bytes including RoCE
+ * packets with errors.
+ * This includes RC, UD, RawEth, and QP1 packets.
+ */
+ uint64_t rx_roce_bytes;
+ /*
+ * Number of received RoCE packets without errors.
+ * This includes RC, UD, RawEth, and QP1 packets
+ */
+ uint64_t rx_roce_good_pkts;
+ /*
+ * Number of received RoCE header and payload bytes without errors.
+ * This includes RC, UD, RawEth, and QP1 packets.
+ */
+ uint64_t rx_roce_good_bytes;
+} roce_stats_ext_ctx_t, *proce_stats_ext_ctx_t;
+
/*****************
* query_version *
*****************/
@@ -63935,7 +71099,7 @@ typedef struct creq_query_version_resp {
*****************/
-/* cmdq_initialize_fw (size:896b/112B) */
+/* cmdq_initialize_fw (size:1024b/128B) */
typedef struct cmdq_initialize_fw {
/* Command opcode. */
@@ -63960,6 +71124,15 @@ typedef struct cmdq_initialize_fw {
* feature is supported.
*/
#define CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED UINT32_C(0x2)
+ /* When set, the driver version is provided. */
+ #define CMDQ_INITIALIZE_FW_FLAGS_DRV_VERSION UINT32_C(0x4)
+ /* When set, driver supports optimizing Modify QP operation. */
+ #define CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED UINT32_C(0x8)
+ /*
+ * When set, the VF RoCE resources will be managed by the L2
+ * driver via func_cfg.
+ */
+ #define CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT UINT32_C(0x10)
/* Driver supplied handle to associate the command and the response. */
uint16_t cookie;
/* Size of the response buffer in 16-byte units. */
@@ -63975,7 +71148,10 @@ typedef struct cmdq_initialize_fw {
#define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to
+ * PTE tables.
+ */
#define CMDQ_INITIALIZE_FW_QPC_LVL_LVL_2 UINT32_C(0x2)
#define CMDQ_INITIALIZE_FW_QPC_LVL_LAST CMDQ_INITIALIZE_FW_QPC_LVL_LVL_2
/* QPC page size. */
@@ -64002,7 +71178,10 @@ typedef struct cmdq_initialize_fw {
#define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define CMDQ_INITIALIZE_FW_MRW_LVL_LVL_2 UINT32_C(0x2)
#define CMDQ_INITIALIZE_FW_MRW_LVL_LAST CMDQ_INITIALIZE_FW_MRW_LVL_LVL_2
/* MRW page size. */
@@ -64029,7 +71208,10 @@ typedef struct cmdq_initialize_fw {
#define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_2 UINT32_C(0x2)
#define CMDQ_INITIALIZE_FW_SRQ_LVL_LAST CMDQ_INITIALIZE_FW_SRQ_LVL_LVL_2
/* SRQ page size. */
@@ -64056,7 +71238,10 @@ typedef struct cmdq_initialize_fw {
#define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define CMDQ_INITIALIZE_FW_CQ_LVL_LVL_2 UINT32_C(0x2)
#define CMDQ_INITIALIZE_FW_CQ_LVL_LAST CMDQ_INITIALIZE_FW_CQ_LVL_LVL_2
/* CQ page size. */
@@ -64083,7 +71268,10 @@ typedef struct cmdq_initialize_fw {
#define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define CMDQ_INITIALIZE_FW_TQM_LVL_LVL_2 UINT32_C(0x2)
#define CMDQ_INITIALIZE_FW_TQM_LVL_LAST CMDQ_INITIALIZE_FW_TQM_LVL_LVL_2
/* TQM page size. */
@@ -64110,7 +71298,10 @@ typedef struct cmdq_initialize_fw {
#define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define CMDQ_INITIALIZE_FW_TIM_LVL_LVL_2 UINT32_C(0x2)
#define CMDQ_INITIALIZE_FW_TIM_LVL_LAST CMDQ_INITIALIZE_FW_TIM_LVL_LVL_2
/* TIM page size. */
@@ -64130,7 +71321,10 @@ typedef struct cmdq_initialize_fw {
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G (UINT32_C(0x5) << 4)
#define CMDQ_INITIALIZE_FW_TIM_PG_SIZE_LAST CMDQ_INITIALIZE_FW_TIM_PG_SIZE_PG_1G
uint16_t log2_dbr_pg_size;
- /* Log base 2 of DBR page size - 12. 0 for 4KB. HW supported values are enumerated below. */
+ /*
+ * Log base 2 of DBR page size - 12. 0 for 4KB. HW supported values
+ * are enumerated below.
+ */
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_MASK UINT32_C(0xf)
#define CMDQ_INITIALIZE_FW_LOG2_DBR_PG_SIZE_SFT 0
/* 4KB. */
@@ -64181,15 +71375,30 @@ typedef struct cmdq_initialize_fw {
uint64_t tqm_page_dir;
/* TIM page directory. */
uint64_t tim_page_dir;
- /* Number of QPs. */
+ /*
+ * Number of QPs. This field is ignored when the backing store HWRM's
+ * are used.
+ */
uint32_t number_of_qp;
- /* Number of MRWs. */
+ /*
+ * Number of MRWs. This field is ignored when the backing store HWRM's
+ * are used.
+ */
uint32_t number_of_mrw;
- /* Number of SRQs. */
+ /*
+ * Number of SRQs. This field is ignored when the backing store HWRM's
+ * are used.
+ */
uint32_t number_of_srq;
- /* Number of CQs. */
+ /*
+ * Number of CQs. This field is ignored when the backing store HWRM's
+ * are used.
+ */
uint32_t number_of_cq;
- /* Number of QPs per VF. */
+ /*
+ * Number of QPs per VF. This field must be set to zero when the flag,
+ * l2_vf_resource_mgmt, is set and RoCE SRIOV is enabled.
+ */
uint32_t max_qp_per_vf;
/*
* If the MR/AV split reservation flag is not set, then this field
@@ -64204,16 +71413,44 @@ typedef struct cmdq_initialize_fw {
* `max_av_per_vf`. The granularity of these values is defined by
* the `mrav_num_entries_unit` field returned by the
* `backing_store_qcaps` command.
+ *
+ * This field must be set to zero when the flag, l2_vf_resource_mgmt,
+ * is set and RoCE SRIOV is enabled.
*/
uint32_t max_mrw_per_vf;
- /* Number of SRQs per VF. */
+ /*
+ * Number of SRQs per VF. This field must be set to zero when the flag,
+ * l2_vf_resource_mgmt, is set and RoCE SRIOV is enabled.
+ */
uint32_t max_srq_per_vf;
- /* Number of CQs per VF. */
+ /*
+ * Number of CQs per VF. This field must be set to zero when the flag,
+ * l2_vf_resource_mgmt, is set and RoCE SRIOV is enabled.
+ */
uint32_t max_cq_per_vf;
- /* Number of GIDs per VF. */
+ /*
+ * Number of GIDs per VF. This field must be set to zero when the flag,
+ * l2_vf_resource_mgmt, is set and RoCE SRIOV is enabled.
+ */
uint32_t max_gid_per_vf;
/* Statistics context index for this function. */
uint32_t stat_ctx_id;
+ /* The driver HSI major version number. */
+ uint8_t drv_hsi_ver_maj;
+ /* The driver HSI minor version number. */
+ uint8_t drv_hsi_ver_min;
+ /* The driver HSI update version number. */
+ uint8_t drv_hsi_ver_upd;
+ /* This is the 40bit unused. */
+ uint8_t unused40[5];
+ /* The driver build major version number. */
+ uint16_t drv_build_ver_maj;
+ /* The driver build minor version number. */
+ uint16_t drv_build_ver_min;
+ /* The driver build update version number. */
+ uint16_t drv_build_ver_upd;
+ /* The driver build patch version number. */
+ uint16_t drv_build_ver_patch;
} cmdq_initialize_fw_t, *pcmdq_initialize_fw_t;
/* creq_initialize_fw_resp (size:128b/16B) */
@@ -64319,7 +71556,7 @@ typedef struct creq_deinitialize_fw_resp {
*************/
-/* cmdq_create_qp (size:768b/96B) */
+/* cmdq_create_qp (size:896b/112B) */
typedef struct cmdq_create_qp {
/* Command opcode. */
@@ -64345,7 +71582,10 @@ typedef struct cmdq_create_qp {
uint64_t qp_handle;
/* Create QP flags. */
uint32_t qp_flags;
- /* SRQ is used. */
+ /*
+ * SRQ is used.
+ * This flag is not supported on express mode QPs.
+ */
#define CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED UINT32_C(0x1)
/* post CQE for all SQ WQEs. */
#define CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION UINT32_C(0x2)
@@ -64373,7 +71613,24 @@ typedef struct cmdq_create_qp {
* that can be queried via `query_roce_stats_ext`.
*/
#define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED UINT32_C(0x80)
- #define CMDQ_CREATE_QP_QP_FLAGS_LAST CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED
+ /* This QP uses express mode. */
+ #define CMDQ_CREATE_QP_QP_FLAGS_EXPRESS_MODE_ENABLED UINT32_C(0x100)
+ /* This QP uses the steering tag specified in the command. */
+ #define CMDQ_CREATE_QP_QP_FLAGS_STEERING_TAG_VALID UINT32_C(0x200)
+ /*
+ * This QP can be used for RDMA Read or Atomic operations.
+ * This value is used to optimize metadata memory allocation
+ * when the device supports `internal_queue_memory` feature.
+ */
+ #define CMDQ_CREATE_QP_QP_FLAGS_RDMA_READ_OR_ATOMICS_USED UINT32_C(0x400)
+ /*
+ * This QP must be included in the extended RoCE statistics context
+ * specified in the field `ext_stats_ctx_id`
+ */
+ #define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_CTX_VALID UINT32_C(0x800)
+ /* The schq_id field passed in by the caller is valid. */
+ #define CMDQ_CREATE_QP_QP_FLAGS_SCHQ_ID_VALID UINT32_C(0x1000)
+ #define CMDQ_CREATE_QP_QP_FLAGS_LAST CMDQ_CREATE_QP_QP_FLAGS_SCHQ_ID_VALID
/* Supported QP types. */
uint8_t type;
/* Reliable Connection. */
@@ -64386,17 +71643,26 @@ typedef struct cmdq_create_qp {
#define CMDQ_CREATE_QP_TYPE_GSI UINT32_C(0x7)
#define CMDQ_CREATE_QP_TYPE_LAST CMDQ_CREATE_QP_TYPE_GSI
uint8_t sq_pg_size_sq_lvl;
- /* SQ PBL indirect levels. */
+ /*
+ * SQ PBL indirect levels.
+ * This field is ignored for express mode QPs.
+ */
#define CMDQ_CREATE_QP_SQ_LVL_MASK UINT32_C(0xf)
#define CMDQ_CREATE_QP_SQ_LVL_SFT 0
/* PBL pointer is physical start address. */
#define CMDQ_CREATE_QP_SQ_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define CMDQ_CREATE_QP_SQ_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to
+ * PTE tables.
+ */
#define CMDQ_CREATE_QP_SQ_LVL_LVL_2 UINT32_C(0x2)
#define CMDQ_CREATE_QP_SQ_LVL_LAST CMDQ_CREATE_QP_SQ_LVL_LVL_2
- /* SQ page size. */
+ /*
+ * SQ page size.
+ * This field is ignored for express mode QPs.
+ */
#define CMDQ_CREATE_QP_SQ_PG_SIZE_MASK UINT32_C(0xf0)
#define CMDQ_CREATE_QP_SQ_PG_SIZE_SFT 4
/* 4KB. */
@@ -64413,17 +71679,26 @@ typedef struct cmdq_create_qp {
#define CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G (UINT32_C(0x5) << 4)
#define CMDQ_CREATE_QP_SQ_PG_SIZE_LAST CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G
uint8_t rq_pg_size_rq_lvl;
- /* RQ PBL indirect levels. */
+ /*
+ * RQ PBL indirect levels.
+ * This field is ignored for express mode QPs.
+ */
#define CMDQ_CREATE_QP_RQ_LVL_MASK UINT32_C(0xf)
#define CMDQ_CREATE_QP_RQ_LVL_SFT 0
/* PBL pointer is physical start address. */
#define CMDQ_CREATE_QP_RQ_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define CMDQ_CREATE_QP_RQ_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to
+ * PTE tables.
+ */
#define CMDQ_CREATE_QP_RQ_LVL_LVL_2 UINT32_C(0x2)
#define CMDQ_CREATE_QP_RQ_LVL_LAST CMDQ_CREATE_QP_RQ_LVL_LVL_2
- /* RQ page size. */
+ /*
+ * RQ page size.
+ * This field is ignored for express mode QPs.
+ */
#define CMDQ_CREATE_QP_RQ_PG_SIZE_MASK UINT32_C(0xf0)
#define CMDQ_CREATE_QP_RQ_PG_SIZE_SFT 4
/* 4KB. */
@@ -64442,25 +71717,39 @@ typedef struct cmdq_create_qp {
uint8_t unused_0;
/* Doorbell page index. */
uint32_t dpi;
- /* Max number of SQ wqes. */
+ /*
+ * When the SQ is configured to use variable-size WQE, 'sq_size'
+ * denotes the SQ size with a unit of 16B. When the SQ is configured
+ * to use fixed-size WQE, 'sq_size' denotes the max number of SQ WQEs.
+ */
uint32_t sq_size;
/* Max number of RQ wqes. */
uint32_t rq_size;
uint16_t sq_fwo_sq_sge;
- /* Max send SGEs per SWQE. */
+ /*
+ * Max send SGEs per SWQE. This is only applicable to fixed-size
+ * WQE support. On variable-size WQE, this is ignored.
+ */
#define CMDQ_CREATE_QP_SQ_SGE_MASK UINT32_C(0xf)
#define CMDQ_CREATE_QP_SQ_SGE_SFT 0
- /* Offset of First WQE in the first SQ page, in 128 byte units */
+ /*
+ * Offset of First WQE in the first SQ page, in 128 byte units.
+ * This field is ignored for express mode QPs.
+ */
#define CMDQ_CREATE_QP_SQ_FWO_MASK UINT32_C(0xfff0)
#define CMDQ_CREATE_QP_SQ_FWO_SFT 4
uint16_t rq_fwo_rq_sge;
/*
* Max recv SGEs per RWQE.
- * On chips with variable-size WQE support, a value of zero implies 30 SGEs.
+ * On chips with variable-size WQE support, a value of zero implies
+ * 30 SGEs.
*/
#define CMDQ_CREATE_QP_RQ_SGE_MASK UINT32_C(0xf)
#define CMDQ_CREATE_QP_RQ_SGE_SFT 0
- /* Offset of First WQE in the first RQ page, in 128 byte units */
+ /*
+ * Offset of First WQE in the first RQ page, in 128 byte units.
+ * This field is ignored for express mode QPs.
+ */
#define CMDQ_CREATE_QP_RQ_FWO_MASK UINT32_C(0xfff0)
#define CMDQ_CREATE_QP_RQ_FWO_SFT 4
/* Send CQ context id. */
@@ -64471,14 +71760,63 @@ typedef struct cmdq_create_qp {
uint32_t srq_cid;
/* Protection domain id. */
uint32_t pd_id;
- /* SQ PBL physical address. */
+ /*
+ * SQ PBL physical address.
+ * This field is ignored for express mode QPs.
+ */
uint64_t sq_pbl;
- /* RQ PBL physical address. */
+ /*
+ * RQ PBL physical address.
+ * This field is ignored for express mode QPs.
+ */
uint64_t rq_pbl;
- /* IRRQ address. */
+ /*
+ * IRRQ address. This field is ignored on devices that
+ * support the `internal_queue_memory` feature.
+ */
uint64_t irrq_addr;
- /* ORRQ address. */
+ /*
+ * ORRQ address. This field is ignored on devices that
+ * support the `internal_queue_memory` feature.
+ */
uint64_t orrq_addr;
+ /*
+ * xid to use for the non-QP1 QP.
+ * The requested xid must be within the valid range
+ * of the predetermined assignment scheme of the
+ * pseudo static QP allocation feature. The valid range
+ * for the data QPs is determined by the start_qid and
+ * max_qp fields of query_func response. When the value is zero,
+ * firmware will automatically choose an xid from its free pool.
+ * QP1 allocation, indicated by specifying `type` field as gsi,
+ * must specify a request_xid as zero.
+ * This field is ignored on devices that do not support
+ * the pseudo static QP allocation feature.
+ */
+ uint32_t request_xid;
+ /* Steering tag to use for memory transactions. */
+ uint16_t steering_tag;
+ /*
+ * This value is used to optimize metadata memory allocation when
+ * the device supports `internal_queue_memory` feature.
+ * When the SQ is configured to use variable-size WQEs, the SQ size is
+ * only specified in units of 16 Bytes. This value hints the max number
+ * of WQEs that would ever be present on the SQ.
+ */
+ uint16_t sq_max_num_wqes;
+ /* Extended RoCE statistics context id. */
+ uint32_t ext_stats_ctx_id;
+ /*
+ * Identifies the new scheduling queue to associate with
+ * the RoCE QP. A value of zero indicates that the QP is being
+ * created with the default scheduling queue. Can only be specified
+ * by the PF driver. VFs get assigned a scheduling queue based on PF
+ * configuration (via HWRM_FUNC_CFG). Specified scheduling queue id is
+ * allocated by firmware (via HWRM_SCHQ_ALLOC) when the device supports
+ * the `scheduling queue` feature.
+ */
+ uint16_t schq_id;
+ uint16_t reserved16;
} cmdq_create_qp_t, *pcmdq_create_qp_t;
/* creq_create_qp_resp (size:128b/16B) */
@@ -64597,7 +71935,7 @@ typedef struct creq_destroy_qp_resp {
*************/
-/* cmdq_modify_qp (size:1024b/128B) */
+/* cmdq_modify_qp (size:1152b/144B) */
typedef struct cmdq_modify_qp {
/* Command opcode. */
@@ -64612,11 +71950,37 @@ typedef struct cmdq_modify_qp {
uint8_t cmd_size;
/* Flags and attribs of the command. */
uint16_t flags;
+ /*
+ * This field, used by firmware for optimizing Modify QP operation,
+ * must be set when the driver has indicated support for the
+ * optimize_modify_qp_supported feature in cmdq_initialize_fw and
+ * when QP Type RC is configured to use SRQ.
+ */
+ #define CMDQ_MODIFY_QP_FLAGS_SRQ_USED UINT32_C(0x1)
+ /*
+ * This field must be set when the driver has indicated that the
+ * qp should be excluded from udcc sessions.
+ */
+ #define CMDQ_MODIFY_QP_FLAGS_EXCLUDE_QP_UDCC UINT32_C(0x2)
/* Driver supplied handle to associate the command and the response. */
uint16_t cookie;
/* Size of the response buffer in 16-byte units. */
uint8_t resp_size;
- uint8_t reserved8;
+ /*
+ * This field, used by firmware for optimizing Modify QP operation,
+ * must be set when the driver has indicated support for the
+ * optimize_modify_qp_supported feature in cmdq_initialize_fw.
+ */
+ uint8_t qp_type;
+ /* Reliable Connection. */
+ #define CMDQ_MODIFY_QP_QP_TYPE_RC UINT32_C(0x2)
+ /* Unreliable Datagram. */
+ #define CMDQ_MODIFY_QP_QP_TYPE_UD UINT32_C(0x4)
+ /* Raw Ethertype. */
+ #define CMDQ_MODIFY_QP_QP_TYPE_RAW_ETHERTYPE UINT32_C(0x6)
+ /* General Services Interface on QP1 over UD. */
+ #define CMDQ_MODIFY_QP_QP_TYPE_GSI UINT32_C(0x7)
+ #define CMDQ_MODIFY_QP_QP_TYPE_LAST CMDQ_MODIFY_QP_QP_TYPE_GSI
/* Host address of the response. */
uint64_t resp_addr;
/* Modify mask signifies the field that is requesting the change. */
@@ -64730,7 +72094,10 @@ typedef struct cmdq_modify_qp {
#define CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE UINT32_C(0x2)
/* Remote read access. */
#define CMDQ_MODIFY_QP_ACCESS_REMOTE_READ UINT32_C(0x4)
- /* Remote atomic access. */
+ /*
+ * Remote atomic access. Applicable to devices that support
+ * Atomic operations.
+ */
#define CMDQ_MODIFY_QP_ACCESS_REMOTE_ATOMIC UINT32_C(0x8)
/* P_KEY. */
uint16_t pkey;
@@ -64811,7 +72178,7 @@ typedef struct cmdq_modify_qp {
uint16_t sq_sge;
/* Max recv SGEs per RWQE. */
uint16_t rq_sge;
- /* Max inline data length (upto 120 bytes). */
+ /* Max inline data length (up to 120 bytes). */
uint32_t max_inline_data;
/* Destination QP id. */
uint32_t dest_qp_id;
@@ -64832,6 +72199,29 @@ typedef struct cmdq_modify_qp {
uint64_t irrq_addr;
/* ORRQ address. */
uint64_t orrq_addr;
+ /*
+ * Extended Modify mask signifies the field that is requesting the
+ * change.
+ */
+ uint32_t ext_modify_mask;
+ /* Extended RoCE statistics context id change */
+ #define CMDQ_MODIFY_QP_EXT_MODIFY_MASK_EXT_STATS_CTX UINT32_C(0x1)
+ /* The schq_id field is valid */
+ #define CMDQ_MODIFY_QP_EXT_MODIFY_MASK_SCHQ_ID_VALID UINT32_C(0x2)
+ /* Extended RoCE statistics context id. */
+ uint32_t ext_stats_ctx_id;
+ /*
+ * Identifies the new scheduling queue to associate to the RoCE QP.
+ * A value of zero indicates that the QP is being modified to use
+ * the default scheduling queue. Specified scheduling queue id is
+ * allocated by firmware (via HWRM_SCHQ_ALLOC) when the device supports
+ * the `scheduling queue` feature.
+ */
+ uint16_t schq_id;
+ /* unused_0 is 16 b */
+ uint16_t unused_0;
+ /* reserved32 is 32 b */
+ uint32_t reserved32;
} cmdq_modify_qp_t, *pcmdq_modify_qp_t;
/* creq_modify_qp_resp (size:128b/16B) */
@@ -65016,7 +72406,12 @@ typedef struct creq_query_qp_resp_sb {
uint16_t pkey;
/* Q_KEY. */
uint32_t qkey;
- uint32_t reserved32;
+ /*
+ * UDP source port used in RoCEv2 packets. Valid only when
+ * change_udp_src_port_wqe_supported feature is advertised.
+ */
+ uint16_t udp_src_port;
+ uint16_t reserved16;
/* Destination GID. */
uint32_t dgid[4];
/* Flow label. */
@@ -65083,7 +72478,7 @@ typedef struct creq_query_qp_resp_sb {
uint16_t sq_sge;
/* Max recv SGEs per RWQE (NOT SUPPORTED BY HARDWARE). */
uint16_t rq_sge;
- /* Max inline data length (upto 120 bytes). */
+ /* Max inline data length (up to 120 bytes). */
uint32_t max_inline_data;
/* Destination QP id. */
uint32_t dest_qp_id;
@@ -65116,8 +72511,8 @@ typedef struct cmdq_query_qp_extend {
/* Command opcode. */
uint8_t opcode;
/*
- * Query QP extend command retrieves info about multiple QPs associated
- * with a specific PF.
+ * Query QP extend command retrieves info about multiple QPs
+ * associated with a specific PF.
*/
#define CMDQ_QUERY_QP_EXTEND_OPCODE_QUERY_QP_EXTEND UINT32_C(0x91)
#define CMDQ_QUERY_QP_EXTEND_OPCODE_LAST CMDQ_QUERY_QP_EXTEND_OPCODE_QUERY_QP_EXTEND
@@ -65129,7 +72524,10 @@ typedef struct cmdq_query_qp_extend {
uint16_t cookie;
/* Size of the response buffer in 16-byte units. */
uint8_t resp_size;
- /* Number of QPs for which FW needs to query and provide info back to host. */
+ /*
+ * Number of QPs for which FW needs to query and provide info back to
+ * host.
+ */
uint8_t num_qps;
/* Host address of the response. */
uint64_t resp_addr;
@@ -65144,8 +72542,8 @@ typedef struct cmdq_query_qp_extend {
/* When set the vf_num is valid. */
#define CMDQ_QUERY_QP_EXTEND_VF_VALID UINT32_C(0x1000000)
/*
- * This is the current index where firmware left off for query qp. Driver
- * will pass this back in the next query_qp_extend command.
+ * This is the current index where firmware left off for query qp.
+ * Driver will pass this back in the next query_qp_extend command.
*/
uint32_t current_index;
} cmdq_query_qp_extend_t, *pcmdq_query_qp_extend_t;
@@ -65186,8 +72584,8 @@ typedef struct creq_query_qp_extend_resp {
#define CREQ_QUERY_QP_EXTEND_RESP_EVENT_LAST CREQ_QUERY_QP_EXTEND_RESP_EVENT_QUERY_QP_EXTEND
uint16_t reserved16;
/*
- * This is the current index where firmware left off for query qp. Driver
- * will pass this back in the next query_qp_extend command.
+ * This is the current index where firmware left off for query qp.
+ * Driver will pass this back in the next query_qp_extend command.
*/
uint32_t current_index;
} creq_query_qp_extend_resp_t, *pcreq_query_qp_extend_resp_t;
@@ -65257,7 +72655,7 @@ typedef struct creq_query_qp_extend_resp_sb {
uint32_t dest_qp_id;
/* Statistic collection ID allocated for this QP. */
uint8_t stat_collection_id;
- uint8_t reservred_8;
+ uint8_t reserved2_8;
uint16_t reserved_16;
} creq_query_qp_extend_resp_sb_t, *pcreq_query_qp_extend_resp_sb_t;
@@ -65386,7 +72784,7 @@ typedef struct creq_query_qp_extend_resp_sb_tlv {
uint32_t dest_qp_id;
/* Statistic collection ID allocated for this QP. */
uint8_t stat_collection_id;
- uint8_t reservred_8;
+ uint8_t reserved2_8;
uint16_t reserved_16;
} creq_query_qp_extend_resp_sb_tlv_t, *pcreq_query_qp_extend_resp_sb_tlv_t;
@@ -65395,7 +72793,7 @@ typedef struct creq_query_qp_extend_resp_sb_tlv {
**************/
-/* cmdq_create_srq (size:384b/48B) */
+/* cmdq_create_srq (size:512b/64B) */
typedef struct cmdq_create_srq {
/* Command opcode. */
@@ -65407,6 +72805,8 @@ typedef struct cmdq_create_srq {
uint8_t cmd_size;
/* Flags and attribs of the command. */
uint16_t flags;
+ /* This SRQ uses the steering tag specified in the command. */
+ #define CMDQ_CREATE_SRQ_FLAGS_STEERING_TAG_VALID UINT32_C(0x1)
/* Driver supplied handle to associate the command and the response. */
uint16_t cookie;
/* Size of the response buffer in 16-byte units. */
@@ -65424,7 +72824,10 @@ typedef struct cmdq_create_srq {
#define CMDQ_CREATE_SRQ_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define CMDQ_CREATE_SRQ_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define CMDQ_CREATE_SRQ_LVL_LVL_2 UINT32_C(0x2)
#define CMDQ_CREATE_SRQ_LVL_LAST CMDQ_CREATE_SRQ_LVL_LVL_2
/* page size. */
@@ -65455,14 +72858,27 @@ typedef struct cmdq_create_srq {
#define CMDQ_CREATE_SRQ_UNUSED4_SFT 12
/* Max number of SRQ wqes. */
uint16_t srq_size;
- /* Offset of first WQE in the first page of SRQ, in 128 byte units */
uint16_t srq_fwo;
+ /* Offset of first WQE in the first page of SRQ, in 128 byte units */
+ #define CMDQ_CREATE_SRQ_SRQ_FWO_MASK UINT32_C(0xfff)
+ #define CMDQ_CREATE_SRQ_SRQ_FWO_SFT 0
+ /*
+ * Max SGEs per SRQ WQE. This field is enabled if flag,
+ * create_srq_sge_supported, is set in query_func response.
+ */
+ #define CMDQ_CREATE_SRQ_SRQ_SGE_MASK UINT32_C(0xf000)
+ #define CMDQ_CREATE_SRQ_SRQ_SGE_SFT 12
/* Doorbell page index. */
uint32_t dpi;
/* Protection domain id. */
uint32_t pd_id;
/* RQ PBL physical address. */
uint64_t pbl;
+ /* Steering tag to use for memory transactions. */
+ uint16_t steering_tag;
+ uint8_t reserved48[6];
+ /* reserved64 is 64 b */
+ uint64_t reserved64;
} cmdq_create_srq_t, *pcmdq_create_srq_t;
/* creq_create_srq_resp (size:128b/16B) */
@@ -65568,7 +72984,10 @@ typedef struct creq_destroy_srq_resp {
uint16_t enable_for_arm[3];
#define CREQ_DESTROY_SRQ_RESP_UNUSED0_MASK UINT32_C(0xffff)
#define CREQ_DESTROY_SRQ_RESP_UNUSED0_SFT 0
- /* Set to 1 if this SRQ is allowed to be armed for threshold async event */
+ /*
+ * Set to 1 if this SRQ is allowed to be armed for threshold async
+ * event
+ */
#define CREQ_DESTROY_SRQ_RESP_ENABLE_FOR_ARM_MASK UINT32_C(0x30000)
#define CREQ_DESTROY_SRQ_RESP_ENABLE_FOR_ARM_SFT 16
} creq_destroy_srq_resp_t, *pcreq_destroy_srq_resp_t;
@@ -65671,7 +73090,7 @@ typedef struct creq_query_srq_resp_sb {
*************/
-/* cmdq_create_cq (size:384b/48B) */
+/* cmdq_create_cq (size:512b/64B) */
typedef struct cmdq_create_cq {
/* Command opcode. */
@@ -65694,6 +73113,25 @@ typedef struct cmdq_create_cq {
* by the driver when HW based doorbell recovery is enabled.
*/
#define CMDQ_CREATE_CQ_FLAGS_DISABLE_CQ_OVERFLOW_DETECTION UINT32_C(0x1)
+ /* This CQ uses the steering tag specified in the command. */
+ #define CMDQ_CREATE_CQ_FLAGS_STEERING_TAG_VALID UINT32_C(0x2)
+ /*
+ * This CQ uses the infinite CQ mode.
+ * In the infinite CQ mode, all CQEs are written to the same
+ * address. Note that this mode implies a HW client is
+ * handling each entry instantly and avoiding overwrites.
+ * The following limitations apply when this mode is enabled:
+ * -cq_size field must be 1
+ * -disable_cq_overflow_detection flag must be true.
+ * -the CQ will never be armed.
+ * -the consumer index of CQ will never be changed
+ */
+ #define CMDQ_CREATE_CQ_FLAGS_INFINITE_CQ_MODE UINT32_C(0x4)
+ /*
+ * This CQ uses coalescing data specified in the command.
+ * This feature is not supported if infinite_cq_mode is also enabled.
+ */
+ #define CMDQ_CREATE_CQ_FLAGS_COALESCING_VALID UINT32_C(0x8)
/* Driver supplied handle to associate the command and the response. */
uint16_t cookie;
/* Size of the response buffer in 16-byte units. */
@@ -65711,7 +73149,10 @@ typedef struct cmdq_create_cq {
#define CMDQ_CREATE_CQ_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define CMDQ_CREATE_CQ_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define CMDQ_CREATE_CQ_LVL_LVL_2 UINT32_C(0x2)
#define CMDQ_CREATE_CQ_LVL_LAST CMDQ_CREATE_CQ_LVL_LVL_2
/* page size. */
@@ -65746,6 +73187,45 @@ typedef struct cmdq_create_cq {
uint32_t cq_size;
/* CQ PBL physical address. */
uint64_t pbl;
+ /* Steering tag to use for memory transactions. */
+ uint16_t steering_tag;
+ uint8_t reserved16[2];
+ uint32_t coalescing;
+ /*
+ * Buffer Max time before flushing buffer (units of 1us). This
+ * specifies the maximum time before completion buffers are
+ * flushed out to host memory even if the number of coalesced
+ * buffers is less than the threshold. buf_maxtime is 9 bits.
+ */
+ #define CMDQ_CREATE_CQ_BUF_MAXTIME_MASK UINT32_C(0x1ff)
+ #define CMDQ_CREATE_CQ_BUF_MAXTIME_SFT 0
+ /*
+ * This specifies the number of buffers coalesced before sending
+ * to memory during normal operation. Buffer unit is 16B
+ * completions. normal_maxbuf is 5 bits.
+ */
+ #define CMDQ_CREATE_CQ_NORMAL_MAXBUF_MASK UINT32_C(0x3e00)
+ #define CMDQ_CREATE_CQ_NORMAL_MAXBUF_SFT 9
+ /*
+ * This specifies the number of buffers coalesced before sending
+ * to memory when the interrupt is masked. Buffer unit is 16B
+ * completions. during_maxbuf is 5 bits.
+ */
+ #define CMDQ_CREATE_CQ_DURING_MAXBUF_MASK UINT32_C(0x7c000)
+ #define CMDQ_CREATE_CQ_DURING_MAXBUF_SFT 14
+ /*
+ * This field is used to enable ring for global idle mode interrupt
+ * generation. This mode will generate a notification (interrupt)
+ * if armed when only one completion has been generated if the chip
+ * is globally idle as determined by the device.
+ * enable_ring_idle_mode is 1 bit.
+ */
+ #define CMDQ_CREATE_CQ_ENABLE_RING_IDLE_MODE UINT32_C(0x80000)
+ /* unused12 is 12 b */
+ #define CMDQ_CREATE_CQ_UNUSED12_MASK UINT32_C(0xfff00000)
+ #define CMDQ_CREATE_CQ_UNUSED12_SFT 20
+ /* reserved64 is 64 b */
+ uint64_t reserved64;
} cmdq_create_cq_t, *pcmdq_create_cq_t;
/* creq_create_cq_resp (size:128b/16B) */
@@ -65858,8 +73338,9 @@ typedef struct creq_destroy_cq_resp {
#define CREQ_DESTROY_CQ_RESP_CQ_ARM_LVL_MASK UINT32_C(0x3)
#define CREQ_DESTROY_CQ_RESP_CQ_ARM_LVL_SFT 0
/*
- * The total number of CNQ events for the CQ, incremented on each CNQ event for the CQ
- * (including firmware-generated CQ error notification).
+ * The total number of CNQ events for the CQ, incremented on each CNQ
+ * event for the CQ (including firmware-generated CQ error
+ * notification).
*/
uint16_t total_cnq_events;
uint16_t reserved16;
@@ -65899,7 +73380,10 @@ typedef struct cmdq_resize_cq {
#define CMDQ_RESIZE_CQ_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define CMDQ_RESIZE_CQ_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define CMDQ_RESIZE_CQ_LVL_LVL_2 UINT32_C(0x2)
#define CMDQ_RESIZE_CQ_LVL_LAST CMDQ_RESIZE_CQ_LVL_LVL_2
/* page size. */
@@ -66085,8 +73569,8 @@ typedef struct cmdq_allocate_mrw {
/* Command opcode. */
uint8_t opcode;
/*
- * Allocate MRW command allocates a MR/MW with the specified parameters
- * and returns the region's L_KEY/R_KEY
+ * Allocate MRW command allocates a MR/MW with the specified
+ * parameters and returns the region's L_KEY/R_KEY
*/
#define CMDQ_ALLOCATE_MRW_OPCODE_ALLOCATE_MRW UINT32_C(0xd)
#define CMDQ_ALLOCATE_MRW_OPCODE_LAST CMDQ_ALLOCATE_MRW_OPCODE_ALLOCATE_MRW
@@ -66108,25 +73592,30 @@ typedef struct cmdq_allocate_mrw {
#define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MASK UINT32_C(0xf)
#define CMDQ_ALLOCATE_MRW_MRW_FLAGS_SFT 0
/* Allocate Memory Region */
- #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR UINT32_C(0x0)
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR UINT32_C(0x0)
/* Allocate Physical Memory Region */
- #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR UINT32_C(0x1)
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR UINT32_C(0x1)
/* Allocate Memory Window (type 1) */
- #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 UINT32_C(0x2)
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 UINT32_C(0x2)
/* Allocate Memory Window (type 2A) */
- #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A UINT32_C(0x3)
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A UINT32_C(0x3)
/* Allocate Memory Window (type 2B) */
- #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B UINT32_C(0x4)
+ #define CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B UINT32_C(0x4)
#define CMDQ_ALLOCATE_MRW_MRW_FLAGS_LAST CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B
- /* unused4 is 4 b */
- #define CMDQ_ALLOCATE_MRW_UNUSED4_MASK UINT32_C(0xf0)
- #define CMDQ_ALLOCATE_MRW_UNUSED4_SFT 4
+ /*
+ * This Memory Region / Memory Window uses the
+ * steering tag specified in the command.
+ */
+ #define CMDQ_ALLOCATE_MRW_STEERING_TAG_VALID UINT32_C(0x10)
+ /* unused3 is 3 b */
+ #define CMDQ_ALLOCATE_MRW_UNUSED3_MASK UINT32_C(0xe0)
+ #define CMDQ_ALLOCATE_MRW_UNUSED3_SFT 5
/* Access flags. */
uint8_t access;
/* Consumer owns the key */
#define CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY UINT32_C(0x20)
- /* unused16 is 16 b */
- uint16_t unused16;
+ /* Steering tag to use for memory transactions. */
+ uint16_t steering_tag;
/* Protection domain id. */
uint32_t pd_id;
} cmdq_allocate_mrw_t, *pcmdq_allocate_mrw_t;
@@ -66178,7 +73667,10 @@ typedef struct creq_allocate_mrw_resp {
typedef struct cmdq_deallocate_key {
/* Command opcode. */
uint8_t opcode;
- /* De-allocate key command frees a MR/MW entry associated with the specified key. */
+ /*
+ * De-allocate key command frees a MR/MW entry associated with the
+ * specified key.
+ */
#define CMDQ_DEALLOCATE_KEY_OPCODE_DEALLOCATE_KEY UINT32_C(0xe)
#define CMDQ_DEALLOCATE_KEY_OPCODE_LAST CMDQ_DEALLOCATE_KEY_OPCODE_DEALLOCATE_KEY
/* Size of the command in 16-byte units. */
@@ -66252,12 +73744,16 @@ typedef struct creq_deallocate_key_resp {
#define CREQ_DEALLOCATE_KEY_RESP_EVENT_LAST CREQ_DEALLOCATE_KEY_RESP_EVENT_DEALLOCATE_KEY
uint16_t reserved16;
/*
- * This is advisory data to facilitate eventual destruction of lingering memory regions in Windows.
- * For memory window, it contains non-zero HWID of a region this window was bound to (without the 8-bit key portion).
- * The host may check if the region is lingering in destroyed state and try to destroy it now.
- * For memory region, if deallocation fails because there are windows bound to this region, this field will contain
- * approximate number of those windows. This number is read from the context right before the
- * deregistration is attempted and can potentially be slightly different from the current number.
+ * This is advisory data to facilitate eventual destruction of
+ * lingering memory regions in Windows. For memory window, it contains
+ * non-zero HWID of a region this window was bound to (without the
+ * 8-bit key portion). The host may check if the region is lingering in
+ * destroyed state and try to destroy it now. For memory region, if
+ * deallocation fails because there are windows bound to this region,
+ * this field will contain approximate number of those windows. This
+ * number is read from the context right before the deregistration is
+ * attempted and can potentially be slightly different from the current
+ * number.
*/
uint32_t bound_window_info;
} creq_deallocate_key_resp_t, *pcreq_deallocate_key_resp_t;
@@ -66267,7 +73763,7 @@ typedef struct creq_deallocate_key_resp {
***************/
-/* cmdq_register_mr (size:384b/48B) */
+/* cmdq_register_mr (size:512b/64B) */
typedef struct cmdq_register_mr {
/* Command opcode. */
@@ -66285,7 +73781,15 @@ typedef struct cmdq_register_mr {
* the `key` field doesn't hold a valid L_KEY and is instead
* overloaded to hold the Protection Domain ID `pd_id`.
*/
- #define CMDQ_REGISTER_MR_FLAGS_ALLOC_MR UINT32_C(0x1)
+ #define CMDQ_REGISTER_MR_FLAGS_ALLOC_MR UINT32_C(0x1)
+ /*
+ * This MR uses the steering tag specified in the command.
+ * This flag can only be enabled when the command is used
+ * to allocate a new MR first.
+ */
+ #define CMDQ_REGISTER_MR_FLAGS_STEERING_TAG_VALID UINT32_C(0x2)
+ /* When set, enable per MR relaxed ordering support. */
+ #define CMDQ_REGISTER_MR_FLAGS_ENABLE_RO UINT32_C(0x4)
/* Driver supplied handle to associate the command and the response. */
uint16_t cookie;
/* Size of the response buffer in 16-byte units. */
@@ -66301,10 +73805,16 @@ typedef struct cmdq_register_mr {
#define CMDQ_REGISTER_MR_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define CMDQ_REGISTER_MR_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define CMDQ_REGISTER_MR_LVL_LVL_2 UINT32_C(0x2)
#define CMDQ_REGISTER_MR_LVL_LAST CMDQ_REGISTER_MR_LVL_LVL_2
- /* Log base 2 of page size; 12 is the minimum for 4KB. HW supported values are enumerated below. */
+ /*
+ * Log base 2 of page size; 12 is the minimum for 4KB. HW supported
+ * values are enumerated below.
+ */
#define CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK UINT32_C(0x7c)
#define CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT 2
/* 4KB. */
@@ -66341,7 +73851,10 @@ typedef struct cmdq_register_mr {
/* Indicate Zero Based Virtual Address (ZBVA). */
#define CMDQ_REGISTER_MR_ACCESS_ZERO_BASED UINT32_C(0x20)
uint16_t log2_pbl_pg_size;
- /* Log base 2 of PBL page size; 12 is the minimum for 4KB. HW supported values are enumerated below */
+ /*
+ * Log base 2 of PBL page size; 12 is the minimum for 4KB. HW
+ * supported values are enumerated below
+ */
#define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK UINT32_C(0x1f)
#define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT 0
/* 4KB. */
@@ -66377,6 +73890,11 @@ typedef struct cmdq_register_mr {
uint64_t va;
/* Size of the MR. */
uint64_t mr_size;
+ /* Steering tag to use for memory transactions. */
+ uint16_t steering_tag;
+ uint8_t reserved48[6];
+ /* reserved64 is 64 b */
+ uint64_t reserved64;
} cmdq_register_mr_t, *pcmdq_register_mr_t;
/* creq_register_mr_resp (size:128b/16B) */
@@ -66481,9 +73999,11 @@ typedef struct creq_deregister_mr_resp {
#define CREQ_DEREGISTER_MR_RESP_EVENT_LAST CREQ_DEREGISTER_MR_RESP_EVENT_DEREGISTER_MR
uint16_t reserved16;
/*
- * If deregister fails because there are windows bound to this region, this field will contain
- * approximate number of those windows. This number is read from the context right before the
- * deregistration is attempted and can potentially be slightly different from the current number.
+ * If deregister fails because there are windows bound to this region,
+ * this field will contain approximate number of those windows. This
+ * number is read from the context right before the deregistration is
+ * attempted and can potentially be slightly different from the current
+ * number.
*/
uint32_t bound_windows;
} creq_deregister_mr_resp_t, *pcreq_deregister_mr_resp_t;
@@ -66543,7 +74063,10 @@ typedef struct cmdq_add_gid {
/* TPID = Configurable 3. */
#define CMDQ_ADD_GID_VLAN_TPID_TPID_CFG3 (UINT32_C(0x7) << 12)
#define CMDQ_ADD_GID_VLAN_TPID_LAST CMDQ_ADD_GID_VLAN_TPID_TPID_CFG3
- /* Setting this bit to 1 enables insertion of a VLAN Tag to a RoCE header. */
+ /*
+ * Setting this bit to 1 enables insertion of a VLAN Tag to a RoCE
+ * header.
+ */
#define CMDQ_ADD_GID_VLAN_VLAN_EN UINT32_C(0x8000)
/* Identifier field in the IP header. */
uint16_t ipid;
@@ -66554,7 +74077,10 @@ typedef struct cmdq_add_gid {
/* stats_ctx_id is 15 b */
#define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_ID_MASK UINT32_C(0x7fff)
#define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_ID_SFT 0
- /* Setting this bit to 1 enables use of own stats context ID instead of per-function */
+ /*
+ * Setting this bit to 1 enables use of own stats context ID
+ * instead of per-function.
+ */
#define CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID UINT32_C(0x8000)
uint32_t unused_0;
} cmdq_add_gid_t, *pcmdq_add_gid_t;
@@ -66715,7 +74241,10 @@ typedef struct cmdq_modify_gid {
/* TPID = Configurable 3. */
#define CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG3 (UINT32_C(0x7) << 12)
#define CMDQ_MODIFY_GID_VLAN_TPID_LAST CMDQ_MODIFY_GID_VLAN_TPID_TPID_CFG3
- /* Setting this bit to 1 enables insertion of a VLAN Tag to a RoCE header. */
+ /*
+ * Setting this bit to 1 enables insertion of a VLAN Tag to a RoCE
+ * header.
+ */
#define CMDQ_MODIFY_GID_VLAN_VLAN_EN UINT32_C(0x8000)
/* Identifier field in the IP header. */
uint16_t ipid;
@@ -66726,7 +74255,10 @@ typedef struct cmdq_modify_gid {
/* stats_ctx_id is 15 b */
#define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_ID_MASK UINT32_C(0x7fff)
#define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_ID_SFT 0
- /* Setting this bit to 1 enables use of own stats context ID instead of per-function */
+ /*
+ * Setting this bit to 1 enables use of own stats context ID
+ * instead of per-function.
+ */
#define CMDQ_MODIFY_GID_STATS_CTX_STATS_CTX_VALID UINT32_C(0x8000)
uint16_t unused_0;
} cmdq_modify_gid_t, *pcmdq_modify_gid_t;
@@ -66884,7 +74416,10 @@ typedef struct creq_query_gid_resp_sb {
/* TPID = Configurable 3. */
#define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG3 (UINT32_C(0x7) << 12)
#define CREQ_QUERY_GID_RESP_SB_VLAN_TPID_LAST CREQ_QUERY_GID_RESP_SB_VLAN_TPID_TPID_CFG3
- /* Setting this bit to 1 enables insertion of a VLAN Tag to a RoCE header. */
+ /*
+ * Setting this bit to 1 enables insertion of a VLAN Tag to a RoCE
+ * header.
+ */
#define CREQ_QUERY_GID_RESP_SB_VLAN_VLAN_EN UINT32_C(0x8000)
/* Identifier field in the IP header. */
uint16_t ipid;
@@ -66941,7 +74476,10 @@ typedef struct cmdq_create_qp1 {
#define CMDQ_CREATE_QP1_SQ_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define CMDQ_CREATE_QP1_SQ_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define CMDQ_CREATE_QP1_SQ_LVL_LVL_2 UINT32_C(0x2)
#define CMDQ_CREATE_QP1_SQ_LVL_LAST CMDQ_CREATE_QP1_SQ_LVL_LVL_2
/* SQ page size. */
@@ -66968,7 +74506,10 @@ typedef struct cmdq_create_qp1 {
#define CMDQ_CREATE_QP1_RQ_LVL_LVL_0 UINT32_C(0x0)
/* PBL pointer points to PTE table. */
#define CMDQ_CREATE_QP1_RQ_LVL_LVL_1 UINT32_C(0x1)
- /* PBL pointer points to PDE table with each entry pointing to PTE tables. */
+ /*
+ * PBL pointer points to PDE table with each entry pointing to PTE
+ * tables.
+ */
#define CMDQ_CREATE_QP1_RQ_LVL_LVL_2 UINT32_C(0x2)
#define CMDQ_CREATE_QP1_RQ_LVL_LAST CMDQ_CREATE_QP1_RQ_LVL_LVL_2
/* RQ page size. */
@@ -67306,16 +74847,17 @@ typedef struct cmdq_query_roce_stats {
/* Flags and attribs of the command. */
uint16_t flags;
/*
- * When this bit is set FW will use the collection_id to extract RoCE statistics.
- * If function_id is also specified the FW will return stats corresponding to the
- * collection for the function_id specified.
+ * When this bit is set FW will use the collection_id to extract
+ * RoCE statistics. If function_id is also specified the FW will
+ * return stats corresponding to the collection for the function_id
+ * specified.
*/
#define CMDQ_QUERY_ROCE_STATS_FLAGS_COLLECTION_ID UINT32_C(0x1)
/*
- * When this bit is set FW will use the function_id to extract RoCE statistics.
- * When collection is specified then FW will return the specific collection
- * stats and if the collection is not specified then FW will return the default
- * stats which will be for all QPs.
+ * When this bit is set FW will use the function_id to extract RoCE
+ * statistics. When collection is specified then FW will return the
+ * specific collection stats and if the collection is not specified
+ * then FW will return the default stats which will be for all QPs.
*/
#define CMDQ_QUERY_ROCE_STATS_FLAGS_FUNCTION_ID UINT32_C(0x2)
/* Driver supplied handle to associate the command and the response. */
@@ -67377,7 +74919,7 @@ typedef struct creq_query_roce_stats_resp {
} creq_query_roce_stats_resp_t, *pcreq_query_roce_stats_resp_t;
/* Query RoCE Stats command response side buffer structure. */
-/* creq_query_roce_stats_resp_sb (size:2944b/368B) */
+/* creq_query_roce_stats_resp_sb (size:3072b/384B) */
typedef struct creq_query_roce_stats_resp_sb {
/* Command opcode. */
@@ -67482,6 +75024,10 @@ typedef struct creq_query_roce_stats_resp_sb {
uint64_t active_qp_count_p2;
/* active_qp_count_p3 is 64 b */
uint64_t active_qp_count_p3;
+ /* express mode SQ doorbell overflow error 64b counter. */
+ uint64_t xp_sq_overflow_err;
+ /* express mode RQ doorbell overflow error 64b counter. */
+ uint64_t xp_rq_overflow_error;
} creq_query_roce_stats_resp_sb_t, *pcreq_query_roce_stats_resp_sb_t;
/************************
@@ -67502,16 +75048,17 @@ typedef struct cmdq_query_roce_stats_ext {
/* Flags and attribs of the command. */
uint16_t flags;
/*
- * When this bit is set FW will use the collection_id to extract RoCE statistics.
- * If function_id is also specified the FW will return stats corresponding to the
- * collection for the function_id specified.
+ * When this bit is set FW will use the collection_id to extract
+ * RoCE statistics. If function_id is also specified the FW will
+ * return stats corresponding to the collection for the function_id
+ * specified.
*/
#define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_COLLECTION_ID UINT32_C(0x1)
/*
- * When this bit is set FW will use the function_id to extract RoCE statistics.
- * When collection is specified then FW will return the specific collection
- * stats and if the collection is not specified then FW will return the default
- * stats which will be for all QPs.
+ * When this bit is set FW will use the function_id to extract RoCE
+ * statistics. When collection is specified then FW will return the
+ * specific collection stats and if the collection is not specified
+ * then FW will return the default stats which will be for all QPs.
*/
#define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID UINT32_C(0x2)
/* Driver supplied handle to associate the command and the response. */
@@ -67573,7 +75120,7 @@ typedef struct creq_query_roce_stats_ext_resp {
} creq_query_roce_stats_ext_resp_t, *pcreq_query_roce_stats_ext_resp_t;
/* Query extended RoCE Stats command response side buffer structure. */
-/* creq_query_roce_stats_ext_resp_sb (size:1984b/248B) */
+/* creq_query_roce_stats_ext_resp_sb (size:2304b/288B) */
typedef struct creq_query_roce_stats_ext_resp_sb {
/* Command opcode. */
@@ -67689,7 +75236,8 @@ typedef struct creq_query_roce_stats_ext_resp_sb {
*/
uint64_t to_retransmit;
/*
- * Number of duplicate read requests resulting in HW retransmission.
+ * Number of duplicate read/atomic requests resulting in HW
+ * retransmission.
* This counter is only applicable for devices that support
* hardware based retransmission.
*/
@@ -67702,6 +75250,39 @@ typedef struct creq_query_roce_stats_ext_resp_sb {
uint64_t rx_dcn_payload_cut;
/* Number of transmitted packets that bypassed the transmit engine. */
uint64_t te_bypassed;
+ /*
+ * Number of transmitted DCN CNP packets.
+ * This counter is only applicable for devices that support
+ * the DCN Payload Cut feature.
+ */
+ uint64_t tx_dcn_cnp;
+ /*
+ * Number of received DCN CNP packets.
+ * This counter is only applicable for devices that support
+ * the DCN Payload Cut feature.
+ */
+ uint64_t rx_dcn_cnp;
+ /*
+ * Number of received DCN payload cut packets.
+ * This counter is only applicable for devices that support
+ * the DCN Payload Cut feature.
+ */
+ uint64_t rx_payload_cut;
+ /*
+ * Number of received DCN payload cut packets that are ignored
+ * because they failed the PSN checks.
+ * This counter is only applicable for devices that support
+ * the DCN Payload Cut feature.
+ */
+ uint64_t rx_payload_cut_ignored;
+ /*
+ * Number of received DCN CNP packets that are ignored either
+ * because the ECN is not enabled on the QP or the ECN is enabled
+ * but the CNP packets do not pass the packet validation checks.
+ * This counter is only applicable for devices that support
+ * the DCN Payload Cut feature.
+ */
+ uint64_t rx_dcn_cnp_ignored;
} creq_query_roce_stats_ext_resp_sb_t, *pcreq_query_roce_stats_ext_resp_sb_t;
/**************
@@ -67768,7 +75349,7 @@ typedef struct creq_query_func_resp {
} creq_query_func_resp_t, *pcreq_query_func_resp_t;
/* Query function command response side buffer structure. */
-/* creq_query_func_resp_sb (size:1088b/136B) */
+/* creq_query_func_resp_sb (size:1280b/160B) */
typedef struct creq_query_func_resp_sb {
/* Command opcode. */
@@ -67787,17 +75368,26 @@ typedef struct creq_query_func_resp_sb {
uint8_t reserved8;
/* Max MR size supported. */
uint64_t max_mr_size;
- /* Max QP supported. */
+ /*
+ * Max QP supported.
+ * For devices that support the pseudo static allocation scheme,
+ * this count:
+ * -excludes the QP1 count.
+ * -includes the count of QPs that can be migrated from the other PF
+ * Therefore, during normal operation when both PFs are active,
+ * the supported number of RoCE QPs for each of the PF is half
+ * of the advertised value.
+ */
uint32_t max_qp;
/* Max WQEs per QP. */
uint16_t max_qp_wr;
/* Device capability flags. */
uint16_t dev_cap_flags;
/* Allow QP resizing. */
- #define CREQ_QUERY_FUNC_RESP_SB_RESIZE_QP UINT32_C(0x1)
+ #define CREQ_QUERY_FUNC_RESP_SB_RESIZE_QP UINT32_C(0x1)
/* Specifies Congestion Control (CC) generation. */
#define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_MASK UINT32_C(0xe)
- #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_SFT 1
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_SFT 1
/*
* Includes support for DCTCP and TCP CC algorithms,
* enabling operation in networks where PFC is enabled.
@@ -67808,7 +75398,8 @@ typedef struct creq_query_func_resp_sb {
* enabling fast ramp up and convergence,
* as well as operation in networks where PFC is not enabled.
* Includes a number of parameters that are different from cc_gen0
- * chips as well as new parameters. TCP CC algorithm is not supported.
+ * chips as well as new parameters. TCP CC algorithm is not
+ * supported.
*/
#define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1 (UINT32_C(0x1) << 1)
/*
@@ -67816,21 +75407,29 @@ typedef struct creq_query_func_resp_sb {
* reduce_init_en, reduce_init_cong_free_rtts_th, random_no_red_en,
* actual_cr_shift_correction_en, quota_period_adjust_en
*/
- #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1_EXT (UINT32_C(0x2) << 1)
- #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_LAST CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1_EXT
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN1_EXT (UINT32_C(0x2) << 1)
+ /*
+ * Enhances cc_gen1_ext support, to include support for DCN/SARA.
+ * Enables query and modification of Queue level table attributes,
+ * which are used by the hardware to determine the QP's flow rate
+ * based on congestion level and thereby reduce RoCE packet drop
+ * due to network congestion.
+ */
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN2 (UINT32_C(0x3) << 1)
+ #define CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_LAST CREQ_QUERY_FUNC_RESP_SB_CC_GENERATION_CC_GEN2
/*
* Support for the extended RoCE statistics is available. These
* statistics are queried via the `query_roce_stats_ext` command
* and are enabled on a per-QP basis via `create_qp`.
*/
- #define CREQ_QUERY_FUNC_RESP_SB_EXT_STATS UINT32_C(0x10)
+ #define CREQ_QUERY_FUNC_RESP_SB_EXT_STATS UINT32_C(0x10)
/*
* Support for both allocating and registering a new MR via the
* `register_mr` command is available. With this feature the
* `allocate_mrw` command does not have to be called before
* registering.
*/
- #define CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC UINT32_C(0x20)
+ #define CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC UINT32_C(0x20)
/*
* Support for optimized transmit path to lower latency for WQEs
* with inline data.
@@ -67841,13 +75440,40 @@ typedef struct creq_query_func_resp_sb {
* the following CQE types:
* RES_UD, RES_RAWETH_QP1, RES_UD_CFA
*/
- #define CREQ_QUERY_FUNC_RESP_SB_CQE_V2 UINT32_C(0x80)
+ #define CREQ_QUERY_FUNC_RESP_SB_CQE_V2 UINT32_C(0x80)
/* Support for ping pong push mode is available. */
#define CREQ_QUERY_FUNC_RESP_SB_PINGPONG_PUSH_MODE UINT32_C(0x100)
/* Support for hardware requester retransmission is enabled. */
- #define CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED UINT32_C(0x200)
+ #define CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED UINT32_C(0x200)
/* Support for hardware responder retransmission is enabled. */
- #define CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED UINT32_C(0x400)
+ #define CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED UINT32_C(0x400)
+ /* Support for link aggregation is enabled. */
+ #define CREQ_QUERY_FUNC_RESP_SB_LINK_AGGR_SUPPORTED UINT32_C(0x800)
+ /* link_aggr_supported is valid. */
+ #define CREQ_QUERY_FUNC_RESP_SB_LINK_AGGR_SUPPORTED_VALID UINT32_C(0x1000)
+ /*
+ * Support for pseudo static QP allocation is enabled.
+ * This feature enables the following capabilities:
+ * - QP context ID space is pseudo-static partitioned across PFs.
+ * - An application can use a predetermined
+ * QP context ID assignment scheme for specific operations.
+ * - For 2-port adapters, the application can migrate the QP context
+ * ID range across PFs, using the `orchestrate_qid_migration` HWRM,
+ * during network events such as Link Down.
+ */
+ #define CREQ_QUERY_FUNC_RESP_SB_PSEUDO_STATIC_QP_ALLOC_SUPPORTED UINT32_C(0x2000)
+ /*
+ * Support for Express Mode is enabled.
+ * For Express mode, the QP resources (SQ/RQ) are allocated in
+ * on-chip queue memory. The host driver should not allocate memory
+ * for these queue structures.
+ */
+ #define CREQ_QUERY_FUNC_RESP_SB_EXPRESS_MODE_SUPPORTED UINT32_C(0x4000)
+ /*
+ * IRRQ/ORRQ and MSN Table structures are allocated in internal
+ * queue memory.
+ */
+ #define CREQ_QUERY_FUNC_RESP_SB_INTERNAL_QUEUE_MEMORY UINT32_C(0x8000)
/* Max CQs supported. */
uint32_t max_cq;
/* Max CQEs per CQ supported. */
@@ -67855,15 +75481,19 @@ typedef struct creq_query_func_resp_sb {
/* Max PDs supported. */
uint32_t max_pd;
/*
- * Max SGEs per QP WQE supported. On chips with variable-size WQE support,
- * this field is applicable only for the backward compatible mode.
+ * Max SGEs per QP WQE supported. On chips with variable-size WQE
+ * support, this field is applicable only for the backward compatible
+ * mode.
*/
uint8_t max_sge;
/* Max SGEs per SRQ WQE supported. */
uint8_t max_srq_sge;
/* Max outstanding RDMA read & atomic supported. */
uint8_t max_qp_rd_atom;
- /* Max outstanding RDMA read & atomic that can be sent from an initiator. */
+ /*
+ * Max outstanding RDMA read & atomic that can be sent from an
+ * initiator.
+ */
uint8_t max_qp_init_rd_atom;
/* Max MRs supported. */
uint32_t max_mr;
@@ -67893,21 +75523,117 @@ typedef struct creq_query_func_resp_sb {
/* Max GIDs supported. */
uint32_t max_gid;
/*
- * An array of 48 8-bit values to specify allocation multiplier for TQM host buffer regions.
- * Each region occupies 16 MB of TQM PBL address space: 0x00000000, 0x01000000, 0x02000000, etc.
- * The host needs to allocate (<Number of QPs>*multiplier, rounded up to page size) of physical memory for non-zero slots
- * and map the pages to the corresponding 16MB regions.
- * Typically there are total 3 non-zero values in this array, their values are 16, 16, 12.
- * Cu+ will only populate up to index 11. SR may populate up to index 47.
+ * An array of 48 8-bit values to specify allocation multiplier for TQM
+ * host buffer regions. Each region occupies 16 MB of TQM PBL address
+ * space: 0x00000000, 0x01000000, 0x02000000, etc.
+ * The host needs to allocate (<Number of QPs>*multiplier, rounded up
+ * to page size) of physical memory for non-zero slots and map the
+ * pages to the corresponding 16MB regions. Typically there are total
+ * 3 non-zero values in this array, their values are 16, 16, 12.
+ * Cu+ will only populate up to index 11. SR may populate up to
+ * index 47.
*/
uint32_t tqm_alloc_reqs[12];
/* Max Doorbell page indices supported. */
uint32_t max_dpi;
/* Max SGEs per QP WQE supported in the variable-size WQE mode. */
uint8_t max_sge_var_wqe;
- uint8_t reserved_8;
+ /* Device capability extended flags. */
+ uint8_t dev_cap_ext_flags;
+ /* RDMA Atomic operations are not supported. */
+ #define CREQ_QUERY_FUNC_RESP_SB_ATOMIC_OPS_NOT_SUPPORTED UINT32_C(0x1)
+ /* Support driver version registration. */
+ #define CREQ_QUERY_FUNC_RESP_SB_DRV_VERSION_RGTR_SUPPORTED UINT32_C(0x2)
+ /* Support for batch allocation of QPs is enabled. */
+ #define CREQ_QUERY_FUNC_RESP_SB_CREATE_QP_BATCH_SUPPORTED UINT32_C(0x4)
+ /* Support for batch deletion of QPs is enabled. */
+ #define CREQ_QUERY_FUNC_RESP_SB_DESTROY_QP_BATCH_SUPPORTED UINT32_C(0x8)
+ /*
+ * Support for extended RoCE statistics context
+ * with periodic DMA is enabled. The statistics contexts
+ * are allocated via `allocate_roce_stats_ext_ctx`
+ * and deallocated via `deallocate_roce_stats_ext_ctx`.
+ * These contexts are assigned on a per-QP, per-group of QPs
+ * or per-function basis via `create_qp`, `create_qp_batch`
+ * or `modify_qp`command.
+ * In addition to periodic DMA to a host address,
+ * these statistics can be queried via `query_roce_stats_ext_v2`.
+ */
+ #define CREQ_QUERY_FUNC_RESP_SB_ROCE_STATS_EXT_CTX_SUPPORTED UINT32_C(0x10)
+ /*
+ * Support for the srq_sge field in the create_srq command is
+ * enabled.
+ */
+ #define CREQ_QUERY_FUNC_RESP_SB_CREATE_SRQ_SGE_SUPPORTED UINT32_C(0x20)
+ /* Support for fixed size SQ wqe (128B) is disabled. */
+ #define CREQ_QUERY_FUNC_RESP_SB_FIXED_SIZE_WQE_DISABLED UINT32_C(0x40)
+ /* Support for DCN (Drop Congestion Notification) is enabled. */
+ #define CREQ_QUERY_FUNC_RESP_SB_DCN_SUPPORTED UINT32_C(0x80)
/* Max inline data supported in the variable-size WQE mode. */
uint16_t max_inline_data_var_wqe;
+ /*
+ * starting xid of the predetermined assignment scheme supported
+ * by the pseudo static allocation feature. Note that for a PF,
+ * the start_qid is itself pseudo_static, and can change when the QP
+ * context id range is migrated by the driver using the
+ * cmdq_orchestrate_qid_migration. The supported QP count is
+ * available in the `max_qp` field of `cmdq_query_func`.
+ */
+ uint32_t start_qid;
+ /*
+ * Max number of MSN table entries supported for devices that support
+ * the `internal_queue_memory` feature.
+ */
+ uint8_t max_msn_table_size;
+ /* reserved8_1 is 8 b */
+ uint8_t reserved8_1;
+ /* Device capability extended flags_2 */
+ uint16_t dev_cap_ext_flags_2;
+ /* Firmware support for optimizing Modify QP operation */
+ #define CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED UINT32_C(0x1)
+ /*
+ * Device supports changing UDP source port of RoCEv2 packets using
+ * WQE.
+ */
+ #define CREQ_QUERY_FUNC_RESP_SB_CHANGE_UDP_SRC_PORT_WQE_SUPPORTED UINT32_C(0x2)
+ /* Device supports CQ Coalescing. */
+ #define CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED UINT32_C(0x4)
+ /*
+ * Device allows a memory region to be designated as
+ * relaxed-ordering enabled or disabled.
+ */
+ #define CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED UINT32_C(0x8)
+ /* The type of lookup table used for requester retransmission. */
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_MASK UINT32_C(0x30)
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_SFT 4
+ /* Requester Retransmission uses a PSN table in host memory. */
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_PSN_TABLE (UINT32_C(0x0) << 4)
+ /* Requester Retransmission uses an MSN table in host memory. */
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_MSN_TABLE (UINT32_C(0x1) << 4)
+ /*
+ * Requester Retransmission uses an MSN table in Device Internal
+ * Queue Memory.
+ */
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (UINT32_C(0x2) << 4)
+ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
+ /*
+ * Max number of 16B IQM memory slots supported by SQ or RQ
+ * when QP is in express mode.
+ * This field is only valid for express mode QPs.
+ */
+ uint16_t max_xp_qp_size;
+ /*
+ * Max number of QPs that can be created in one `create_qp_batch`
+ * command.
+ */
+ uint16_t create_qp_batch_size;
+ /*
+ * Max number of QPs that can be destroyed in one `destroy_qp_batch`
+ * command.
+ */
+ uint16_t destroy_qp_batch_size;
+ uint16_t reserved16;
+ uint64_t reserved64;
} creq_query_func_resp_sb_t, *pcreq_query_func_resp_sb_t;
/**********************
@@ -67943,15 +75669,34 @@ typedef struct cmdq_set_func_resources {
uint8_t reserved8;
/* Host address of the response. */
uint64_t resp_addr;
- /* Number of QPs. It is the responsibility of the host to first extend the existing PBL with new addresses to pages to handle the adjustment. Must be greater or equal to current. */
+ /*
+ * Number of QPs. It is the responsibility of the host to first extend
+ * the existing PBL with new addresses to pages to handle the
+ * adjustment. Must be greater or equal to current.
+ */
uint32_t number_of_qp;
- /* Number of MRWs. It is the responsibility of the host to first extend the existing PBL with new addresses to pages to handle the adjustment. Must be greater or equal to current. */
+ /*
+ * Number of MRWs. It is the responsibility of the host to first extend
+ * the existing PBL with new addresses to pages to handle the
+ * adjustment. Must be greater or equal to current.
+ */
uint32_t number_of_mrw;
- /* Number of SRQs. It is the responsibility of the host to first extend the existing PBL with new addresses to pages to handle the adjustment. Must be greater or equal to current. */
+ /*
+ * Number of SRQs. It is the responsibility of the host to first extend
+ * the existing PBL with new addresses to pages to handle the
+ * adjustment. Must be greater or equal to current.
+ */
uint32_t number_of_srq;
- /* Number of CQs. It is the responsibility of the host to first extend the existing PBL with new addresses to pages to handle the adjustment. Must be greater or equal to current. */
+ /*
+ * Number of CQs. It is the responsibility of the host to first extend
+ * the existing PBL with new addresses to pages to handle the
+ * adjustment. Must be greater or equal to current.
+ */
uint32_t number_of_cq;
- /* Number of QPs per VF. */
+ /*
+ * Number of QPs per VF. This field must be set to zero when the flag,
+ * l2_vf_resource_mgmt, is set and RoCE SRIOV is enabled.
+ */
uint32_t max_qp_per_vf;
/*
* If the MR/AV split reservation flag is not set, then this field
@@ -67966,13 +75711,25 @@ typedef struct cmdq_set_func_resources {
* `max_av_per_vf`. The granularity of these values is defined by
* the `mrav_num_entries_unit` field returned by the
* `backing_store_qcaps` command.
+ *
+ * This field must be set to zero when the flag, l2_vf_resource_mgmt,
+ * is set and RoCE SRIOV is enabled.
*/
uint32_t max_mrw_per_vf;
- /* Number of SRQs per VF. */
+ /*
+ * Number of SRQs per VF. This field must be set to zero when the flag,
+ * l2_vf_resource_mgmt, is set and RoCE SRIOV is enabled.
+ */
uint32_t max_srq_per_vf;
- /* Number of CQs per VF. */
+ /*
+ * Number of CQs per VF. This field must be set to zero when the flag,
+ * l2_vf_resource_mgmt, is set and RoCE SRIOV is enabled.
+ */
uint32_t max_cq_per_vf;
- /* Number of GIDs per VF. */
+ /*
+ * Number of GIDs per VF. This field must be set to zero when the flag,
+ * l2_vf_resource_mgmt, is set and RoCE SRIOV is enabled.
+ */
uint32_t max_gid_per_vf;
/* Statistics context index for this function. */
uint32_t stat_ctx_id;
@@ -68086,7 +75843,10 @@ typedef struct creq_stop_func_resp {
typedef struct cmdq_read_context {
/* Command opcode. */
uint8_t opcode;
- /* Read the current state of any internal resource context. Can only be issued from a PF. */
+ /*
+ * Read the current state of any internal resource context. Can only
+ * be issued from a PF.
+ */
#define CMDQ_READ_CONTEXT_OPCODE_READ_CONTEXT UINT32_C(0x85)
#define CMDQ_READ_CONTEXT_OPCODE_LAST CMDQ_READ_CONTEXT_OPCODE_READ_CONTEXT
/* Size of the command in 16-byte units. */
@@ -68100,23 +75860,33 @@ typedef struct cmdq_read_context {
uint8_t reserved8;
/* Host address of the response. */
uint64_t resp_addr;
- uint32_t type_xid;
/* Context ID */
- #define CMDQ_READ_CONTEXT_XID_MASK UINT32_C(0xffffff)
- #define CMDQ_READ_CONTEXT_XID_SFT 0
+ uint32_t xid;
/* Context type */
- #define CMDQ_READ_CONTEXT_TYPE_MASK UINT32_C(0xff000000)
- #define CMDQ_READ_CONTEXT_TYPE_SFT 24
- /* Read QPC. The context (448 bytes) goes to resp_addr (as is, without a header), and resp_size should be set to 28 (448/16) */
- #define CMDQ_READ_CONTEXT_TYPE_QPC (UINT32_C(0x0) << 24)
- /* Read CQ. The context (64 bytes) goes to resp_addr (as is, without a header), and resp_size should be set to 4 (64/16) */
- #define CMDQ_READ_CONTEXT_TYPE_CQ (UINT32_C(0x1) << 24)
- /* Read MRW. The context (128 bytes) goes to resp_addr (as is, without a header), and resp_size should be set to 8 (128/16) */
- #define CMDQ_READ_CONTEXT_TYPE_MRW (UINT32_C(0x2) << 24)
- /* Read SRQ. The context (64 bytes) goes to resp_addr (as is, without a header), and resp_size should be set to 4 (64/16) */
- #define CMDQ_READ_CONTEXT_TYPE_SRQ (UINT32_C(0x3) << 24)
- #define CMDQ_READ_CONTEXT_TYPE_LAST CMDQ_READ_CONTEXT_TYPE_SRQ
- uint32_t unused_0;
+ uint8_t type;
+ /*
+ * Read QPC. The context (448 bytes) goes to resp_addr (as is,
+ * without a header), and resp_size should be set to 28
+ * (448/16).
+ */
+ #define CMDQ_READ_CONTEXT_TYPE_QPC UINT32_C(0x0)
+ /*
+ * Read CQ. The context (64 bytes) goes to resp_addr (as is,
+ * without a header), and resp_size should be set to 4 (64/16)
+ */
+ #define CMDQ_READ_CONTEXT_TYPE_CQ UINT32_C(0x1)
+ /*
+ * Read MRW. The context (128 bytes) goes to resp_addr (as is,
+ * without a header), and resp_size should be set to 8 (128/16)
+ */
+ #define CMDQ_READ_CONTEXT_TYPE_MRW UINT32_C(0x2)
+ /*
+ * Read SRQ. The context (64 bytes) goes to resp_addr (as is,
+ * without a header), and resp_size should be set to 4 (64/16)
+ */
+ #define CMDQ_READ_CONTEXT_TYPE_SRQ UINT32_C(0x3)
+ #define CMDQ_READ_CONTEXT_TYPE_LAST CMDQ_READ_CONTEXT_TYPE_SRQ
+ uint8_t unused_0[3];
} cmdq_read_context_t, *pcmdq_read_context_t;
/* creq_read_context (size:128b/16B) */
@@ -68149,7 +75919,10 @@ typedef struct creq_read_context {
#define CREQ_READ_CONTEXT_V UINT32_C(0x1)
/* Event or command opcode. */
uint8_t event;
- /* Read the current state of any internal resource context. Can only be issued from a PF. */
+ /*
+ * Read the current state of any internal resource context. Can only
+ * be issued from a PF.
+ */
#define CREQ_READ_CONTEXT_EVENT_READ_CONTEXT UINT32_C(0x85)
#define CREQ_READ_CONTEXT_EVENT_LAST CREQ_READ_CONTEXT_EVENT_READ_CONTEXT
uint16_t reserved16;
@@ -68322,7 +76095,10 @@ typedef struct creq_query_roce_cc_resp_sb {
/* IP TOS ECN. */
#define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK UINT32_C(0x3)
#define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT 0
- /* IP TOS DSCP. */
+ /*
+ * IP TOS DSCP. When multi-lossless queue feature is enabled,
+ * query applies only to the default traffic class (1).
+ */
#define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK UINT32_C(0xfc)
#define CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT 2
/* Congestion Probability averaging factor. */
@@ -68470,7 +76246,10 @@ typedef struct creq_query_roce_cc_resp_sb_tlv {
/* IP TOS ECN. */
#define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_ECN_MASK UINT32_C(0x3)
#define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_ECN_SFT 0
- /* IP TOS DSCP. */
+ /*
+ * IP TOS DSCP. When multi-lossless queue feature is enabled,
+ * query applies only to the default traffic class (1).
+ */
#define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_DSCP_MASK UINT32_C(0xfc)
#define CREQ_QUERY_ROCE_CC_RESP_SB_TLV_TOS_DSCP_SFT 2
/* Congestion Probability averaging factor. */
@@ -68586,9 +76365,15 @@ typedef struct creq_query_roce_cc_gen1_resp_sb_tlv {
uint64_t reserved64;
/* High order bits of inactivity threshold. */
uint16_t inactivity_th_hi;
- /* The number of uS between generation of CNPs when cc_mode is probabilistic marking. */
+ /*
+ * The number of uS between generation of CNPs when cc_mode is
+ * probabilistic marking.
+ */
uint16_t min_time_between_cnps;
- /* The starting value of congestion probability. Input range is 0 - 1023. */
+ /*
+ * The starting value of congestion probability. Input range
+ * is 0 - 1023.
+ */
uint16_t init_cp;
/*
* In tr_update_mode 0, Target Rate (TR) is updated to
@@ -68692,8 +76477,9 @@ typedef struct creq_query_roce_cc_gen1_resp_sb_tlv {
uint8_t tr_prob_factor;
/*
* Threshold to ensure fairness between requester and responder.
- * If CR is less than the fairness threshold and a quota period has passed
- * priority will be given to the path that did not last transfer data.
+ * If CR is less than the fairness threshold and a quota period has
+ * passed priority will be given to the path that did not last
+ * transfer data.
*/
uint16_t fairness_cr_th;
/* Log based rate reduction divider. */
@@ -68714,9 +76500,15 @@ typedef struct creq_query_roce_cc_gen1_resp_sb_tlv {
* updating CP to track CR.
*/
uint16_t cp_exp_update_th;
- /* The threshold on congestion free RTTs above which AI can increase to 16. */
+ /*
+ * The threshold on congestion free RTTs above which AI can increase
+ * to 16.
+ */
uint16_t high_exp_ai_rtts_th1;
- /* The threshold on congestion free RTTs above which AI can increase to 32. */
+ /*
+ * The threshold on congestion free RTTs above which AI can increase
+ * to 32.
+ */
uint16_t high_exp_ai_rtts_th2;
/*
* The number of congestion free RTTs above which
@@ -68751,19 +76543,101 @@ typedef struct creq_query_roce_cc_gen1_resp_sb_tlv {
*/
uint8_t reduce_init_en;
/*
- * Minimum threshold value for number of congestion free RTTs before reducing
- * to init values for CR, TR, and CP when reduce_init_en is enabled.
+ * Minimum threshold value for number of congestion free RTTs before
+ * reducing to init values for CR, TR, and CP when reduce_init_en is
+ * enabled.
*/
uint16_t reduce_init_cong_free_rtts_th;
/* Enables random no reduction of CR. */
uint8_t random_no_red_en;
- /* Enables coarse correction to actual CR when actual RTT is longer than nominal. */
+ /*
+ * Enables coarse correction to actual CR when actual RTT is longer
+ * than nominal.
+ */
uint8_t actual_cr_shift_correction_en;
/* Enables adjustment to refill quota. */
uint8_t quota_period_adjust_en;
uint8_t reserved[5];
} creq_query_roce_cc_gen1_resp_sb_tlv_t, *pcreq_query_roce_cc_gen1_resp_sb_tlv_t;
+/* creq_query_roce_cc_gen2_resp_sb_tlv (size:512b/64B) */
+
+typedef struct creq_query_roce_cc_gen2_resp_sb_tlv {
+ /*
+ * The command discriminator is used to differentiate between various
+ * types of HWRM messages. This includes legacy HWRM and RoCE slowpath
+ * command messages as well as newer TLV encapsulated HWRM commands.
+ *
+ * For TLV encapsulated messages this field must be 0x8000.
+ */
+ uint16_t cmd_discr;
+ uint8_t reserved_8b;
+ uint8_t tlv_flags;
+ /*
+ * Indicates the presence of additional TLV encapsulated data
+ * follows this TLV.
+ */
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_MORE UINT32_C(0x1)
+ /* Last TLV in a sequence of TLVs. */
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_MORE_LAST UINT32_C(0x0)
+ /* More TLVs follow this TLV. */
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_MORE_NOT_LAST UINT32_C(0x1)
+ /*
+ * When an HWRM receiver detects a TLV type that it does not
+ * support with the TLV required flag set, the receiver must
+ * reject the HWRM message with an error code indicating an
+ * unsupported TLV type.
+ */
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED UINT32_C(0x2)
+ /* No */
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED_NO (UINT32_C(0x0) << 1)
+ /* Yes */
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES (UINT32_C(0x1) << 1)
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED_LAST CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_TLV_FLAGS_REQUIRED_YES
+ /*
+ * This field defines the TLV type value which is divided into
+ * two ranges to differentiate between global and local TLV types.
+ * Global TLV types must be unique across all defined TLV types.
+ * Local TLV types are valid only for extensions to a given
+ * HWRM message and may be repeated across different HWRM message
+ * types. There is a direct correlation of each HWRM message type
+ * to a single global TLV type value.
+ *
+ * Global TLV range: `0 - (63k-1)`
+ *
+ * Local TLV range: `63k - (64k-1)`
+ */
+ uint16_t tlv_type;
+ /*
+ * Length of the message data encapsulated by this TLV in bytes.
+ * This length does not include the size of the TLV header itself
+ * and it must be an integer multiple of 8B.
+ */
+ uint16_t length;
+ uint64_t reserved64;
+ /*
+ * DCN queue level threshold values associated with DCN queue
+ * level table indices 0 to 7.
+ */
+ uint16_t dcn_qlevel_tbl_thr[8];
+ /*
+ * DCN queue level table action values.
+ * Returns CR, INC_CNP, UPD_IMM & TR fields associated with
+ * DCN queue level table indices 0 to 7.
+ */
+ uint32_t dcn_qlevel_tbl_act[8];
+ /* DCN queue level current rate. */
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_CR_MASK UINT32_C(0x3fff)
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_CR_SFT 0
+ /* DCN queue level increment CNP count. */
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_INC_CNP UINT32_C(0x4000)
+ /* DCN queue level update CR and TR immediately. */
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_UPD_IMM UINT32_C(0x8000)
+ /* DCN queue level target rate */
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_TR_MASK UINT32_C(0x3fff0000)
+ #define CREQ_QUERY_ROCE_CC_GEN2_RESP_SB_TLV_DCN_QLEVEL_TBL_ACT_TR_SFT 16
+} creq_query_roce_cc_gen2_resp_sb_tlv_t, *pcreq_query_roce_cc_gen2_resp_sb_tlv_t;
+
/***********************
* cmdq_modify_roce_cc *
***********************/
@@ -68848,7 +76722,10 @@ typedef struct cmdq_modify_roce_cc {
/* IP TOS ECN. Valid values are 1 or 2 when ECN is enabled. */
#define CMDQ_MODIFY_ROCE_CC_TOS_ECN_MASK UINT32_C(0x3)
#define CMDQ_MODIFY_ROCE_CC_TOS_ECN_SFT 0
- /* IP TOS DSCP. */
+ /*
+ * IP TOS DSCP. When multi-lossless queue feature is enabled,
+ * update applies only to the default traffic class (1).
+ */
#define CMDQ_MODIFY_ROCE_CC_TOS_DSCP_MASK UINT32_C(0xfc)
#define CMDQ_MODIFY_ROCE_CC_TOS_DSCP_SFT 2
uint8_t alt_vlan_pcp;
@@ -68967,7 +76844,10 @@ typedef struct cmdq_modify_roce_cc_tlv {
* and it must be an integer multiple of 8B.
*/
uint16_t length;
- /* Size of the tlv encapsulated command, including all tlvs and extension data in 16-byte units. */
+ /*
+ * Size of the tlv encapsulated command, including all tlvs and
+ * extension data in 16-byte units.
+ */
uint8_t total_size;
uint8_t reserved56[7];
/* Command opcode. */
@@ -69046,7 +76926,10 @@ typedef struct cmdq_modify_roce_cc_tlv {
/* IP TOS ECN. Valid values are 1 or 2 when ECN is enabled. */
#define CMDQ_MODIFY_ROCE_CC_TLV_TOS_ECN_MASK UINT32_C(0x3)
#define CMDQ_MODIFY_ROCE_CC_TLV_TOS_ECN_SFT 0
- /* IP TOS DSCP. */
+ /*
+ * IP TOS DSCP. When multi-lossless queue feature is enabled,
+ * update applies only to the default traffic class (1).
+ */
#define CMDQ_MODIFY_ROCE_CC_TLV_TOS_DSCP_MASK UINT32_C(0xfc)
#define CMDQ_MODIFY_ROCE_CC_TLV_TOS_DSCP_SFT 2
uint8_t alt_vlan_pcp;
@@ -69165,7 +77048,10 @@ typedef struct cmdq_modify_roce_cc_gen1_tlv {
uint64_t reserved64;
/* Modify mask signifies the field that is requesting the change. */
uint64_t modify_mask;
- /* Update the number of uS between generation of CNPs for probabilistic marking mode. */
+ /*
+ * Update the number of uS between generation of CNPs for
+ * probabilistic marking mode.
+ */
#define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_MIN_TIME_BETWEEN_CNPS UINT32_C(0x1)
/*
* Update starting value of Congestion Probability (CP).
@@ -69211,7 +77097,10 @@ typedef struct cmdq_modify_roce_cc_gen1_tlv {
#define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RTT_JITTER_EN UINT32_C(0x8000)
/* Update number of bytes per usec. */
#define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_LINK_BYTES_PER_USEC UINT32_C(0x10000)
- /* Update threshold used to reset QPC CC state to its initial state. */
+ /*
+ * Update threshold used to reset QPC CC state to its initial
+ * state.
+ */
#define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RESET_CC_CR_TH UINT32_C(0x20000)
/* Update number of valid lsbits in CR and TR */
#define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CR_WIDTH UINT32_C(0x40000)
@@ -69299,7 +77188,10 @@ typedef struct cmdq_modify_roce_cc_gen1_tlv {
#define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_CC_ACK_BYTES UINT32_C(0x4000000000)L
/* Update enable of reduction of CR, TR, and CP to init values. */
#define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCE_INIT_EN UINT32_C(0x8000000000)L
- /* Update threshold used for reduction of CR, TR, and CP to init values. */
+ /*
+ * Update threshold used for reduction of CR, TR, and CP to init
+ * values.
+ */
#define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_REDUCE_INIT_CONG_FREE_RTTS_TH UINT32_C(0x10000000000)L
/* Update enable of random no reduction of CR. */
#define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_RANDOM_NO_RED_EN UINT32_C(0x20000000000)L
@@ -69309,9 +77201,15 @@ typedef struct cmdq_modify_roce_cc_gen1_tlv {
#define CMDQ_MODIFY_ROCE_CC_GEN1_TLV_MODIFY_MASK_QUOTA_PERIOD_ADJUST_EN UINT32_C(0x80000000000)L
/* High order bits of inactivity threshold. */
uint16_t inactivity_th_hi;
- /* The number of uS between generation of CNPs when cc_mode is probabilistic marking. */
+ /*
+ * The number of uS between generation of CNPs when cc_mode is
+ * probabilistic marking.
+ */
uint16_t min_time_between_cnps;
- /* The starting value of congestion probability. Input range is 0 - 1023. */
+ /*
+ * The starting value of congestion probability. Input range
+ * is 0 - 1023.
+ */
uint16_t init_cp;
/*
* In tr_update_mode 0, Target Rate (TR) is updated to
@@ -69415,8 +77313,9 @@ typedef struct cmdq_modify_roce_cc_gen1_tlv {
uint8_t tr_prob_factor;
/*
* Threshold to ensure fairness between requester and responder.
- * If CR is less than the fairness threshold and a quota period has passed
- * priority will be given to the path that did not last transfer data.
+ * If CR is less than the fairness threshold and a quota period has
+ * passed priority will be given to the path that did not last
+ * transfer data.
*/
uint16_t fairness_cr_th;
/* Log based rate reduction divider. */
@@ -69437,9 +77336,15 @@ typedef struct cmdq_modify_roce_cc_gen1_tlv {
* updating CP to track CR.
*/
uint16_t cp_exp_update_th;
- /* The threshold on congestion free RTTs above which AI can increase to 16. */
+ /*
+ * The threshold on congestion free RTTs above which AI can increase
+ * to 16.
+ */
uint16_t high_exp_ai_rtts_th1;
- /* The threshold on congestion free RTTs above which AI can increase to 32. */
+ /*
+ * The threshold on congestion free RTTs above which AI can increase
+ * to 32.
+ */
uint16_t high_exp_ai_rtts_th2;
/*
* The number of congestion free RTTs above which
@@ -69474,19 +77379,122 @@ typedef struct cmdq_modify_roce_cc_gen1_tlv {
*/
uint8_t reduce_init_en;
/*
- * Minimum threshold value for number of congestion free RTTs before reducing
- * to init values for CR, TR, and CP when reduce_init_en is enabled.
+ * Minimum threshold value for number of congestion free RTTs before
+ * reducing to init values for CR, TR, and CP when reduce_init_en is
+ * enabled.
*/
uint16_t reduce_init_cong_free_rtts_th;
/* Enables random no reduction of CR. */
uint8_t random_no_red_en;
- /* Enables coarse correction to actual CR when actual RTT is longer than nominal. */
+ /*
+ * Enables coarse correction to actual CR when actual RTT is longer
+ * than nominal.
+ */
uint8_t actual_cr_shift_correction_en;
/* Enables adjustment to refill quota. */
uint8_t quota_period_adjust_en;
uint8_t reserved[5];
} cmdq_modify_roce_cc_gen1_tlv_t, *pcmdq_modify_roce_cc_gen1_tlv_t;
+/* cmdq_modify_roce_cc_gen2_tlv (size:256b/32B) */
+
+typedef struct cmdq_modify_roce_cc_gen2_tlv {
+ /*
+ * The command discriminator is used to differentiate between various
+ * types of HWRM messages. This includes legacy HWRM and RoCE slowpath
+ * command messages as well as newer TLV encapsulated HWRM commands.
+ *
+ * For TLV encapsulated messages this field must be 0x8000.
+ */
+ uint16_t cmd_discr;
+ uint8_t reserved_8b;
+ uint8_t tlv_flags;
+ /*
+ * Indicates the presence of additional TLV encapsulated data
+ * follows this TLV.
+ */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_MORE UINT32_C(0x1)
+ /* Last TLV in a sequence of TLVs. */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_MORE_LAST UINT32_C(0x0)
+ /* More TLVs follow this TLV. */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_MORE_NOT_LAST UINT32_C(0x1)
+ /*
+ * When an HWRM receiver detects a TLV type that it does not
+ * support with the TLV required flag set, the receiver must
+ * reject the HWRM message with an error code indicating an
+ * unsupported TLV type.
+ */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED UINT32_C(0x2)
+ /* No */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED_NO (UINT32_C(0x0) << 1)
+ /* Yes */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED_YES (UINT32_C(0x1) << 1)
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED_LAST CMDQ_MODIFY_ROCE_CC_GEN2_TLV_TLV_FLAGS_REQUIRED_YES
+ /*
+ * This field defines the TLV type value which is divided into
+ * two ranges to differentiate between global and local TLV types.
+ * Global TLV types must be unique across all defined TLV types.
+ * Local TLV types are valid only for extensions to a given
+ * HWRM message and may be repeated across different HWRM message
+ * types. There is a direct correlation of each HWRM message type
+ * to a single global TLV type value.
+ *
+ * Global TLV range: `0 - (63k-1)`
+ *
+ * Local TLV range: `63k - (64k-1)`
+ */
+ uint16_t tlv_type;
+ /*
+ * Length of the message data encapsulated by this TLV in bytes.
+ * This length does not include the size of the TLV header itself
+ * and it must be an integer multiple of 8B.
+ */
+ uint16_t length;
+ uint64_t reserved64;
+ /* Modify mask signifies the field that is requesting the change. */
+ uint64_t modify_mask;
+ /*
+ * Modify the specific DCN queue level table index data.
+ * This must be set, to select the table index that needs an
+ * update.
+ */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_IDX UINT32_C(0x1)
+ /* Modify the DCN queue level threshold. */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_THR UINT32_C(0x2)
+ /* Modify DCN queue level current rate. */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_CR UINT32_C(0x4)
+ /* Modify DCN queue level increment CNP count. */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_INC_CNP UINT32_C(0x8)
+ /* Modify DCN queue level update current & target rate immediately. */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_UPD_IMM UINT32_C(0x10)
+ /* Modify DCN queue level target rate. */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_MODIFY_MASK_DCN_QLEVEL_TBL_TR UINT32_C(0x20)
+ /* DCN queue level table index. Valid values are from 0 to 7. */
+ uint8_t dcn_qlevel_tbl_idx;
+ uint8_t reserved8;
+ /*
+ * DCN queue level threshold value associated with a DCN queue
+ * level table index.
+ */
+ uint16_t dcn_qlevel_tbl_thr;
+ /*
+ * DCN queue level table action.
+ * Updates CR, INC_CNP, UPD_IMM & TR fields associated with the
+ * DCN queue level table index.
+ */
+ uint32_t dcn_qlevel_tbl_act;
+ /* DCN queue level current rate. */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_CR_MASK UINT32_C(0x3fff)
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_CR_SFT 0
+ /* DCN queue level increment CNP count. */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_INC_CNP UINT32_C(0x4000)
+ /* DCN queue level update CR and TR immediately. */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_UPD_IMM UINT32_C(0x8000)
+ /* DCN queue level target rate */
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_TR_MASK UINT32_C(0x3fff0000)
+ #define CMDQ_MODIFY_ROCE_CC_GEN2_TLV_DCN_QLEVEL_TBL_ACT_TR_SFT 16
+} cmdq_modify_roce_cc_gen2_tlv_t, *pcmdq_modify_roce_cc_gen2_tlv_t;
+
/* creq_modify_roce_cc_resp (size:128b/16B) */
typedef struct creq_modify_roce_cc_resp {
@@ -69632,7 +77640,10 @@ typedef struct creq_set_link_aggr_mode_resources_resp {
typedef struct cmdq_vf_backchannel_request {
/* Command opcode. */
uint8_t opcode;
- /* Send a request from VF to pass a command to the PF. VF HSI is suspended until the PF returns the response */
+ /*
+ * Send a request from VF to pass a command to the PF. VF HSI is
+ * suspended until the PF returns the response.
+ */
#define CMDQ_VF_BACKCHANNEL_REQUEST_OPCODE_VF_BACKCHANNEL_REQUEST UINT32_C(0x86)
#define CMDQ_VF_BACKCHANNEL_REQUEST_OPCODE_LAST CMDQ_VF_BACKCHANNEL_REQUEST_OPCODE_VF_BACKCHANNEL_REQUEST
/* Size of the command in 16-byte units. */
@@ -69648,7 +77659,10 @@ typedef struct cmdq_vf_backchannel_request {
uint64_t resp_addr;
/* Address of command request structure in VF space */
uint64_t command_addr;
- /* Command request length (up to 4K). An optional address of the extended response buffer should be provided in the request */
+ /*
+ * Command request length (up to 4K). An optional address of the extended
+ * response buffer should be provided in the request.
+ */
uint16_t command_length;
uint8_t unused_0[6];
} cmdq_vf_backchannel_request_t, *pcmdq_vf_backchannel_request_t;
@@ -69659,7 +77673,10 @@ typedef struct cmdq_vf_backchannel_request {
typedef struct cmdq_read_vf_memory {
/* Command opcode. */
uint8_t opcode;
- /* Read VF memory (primarily to get the backchannel request blob). Can only be issued from a PF. */
+ /*
+ * Read VF memory (primarily to get the backchannel request blob). Can
+ * only be issued from a PF.
+ */
#define CMDQ_READ_VF_MEMORY_OPCODE_READ_VF_MEMORY UINT32_C(0x87)
#define CMDQ_READ_VF_MEMORY_OPCODE_LAST CMDQ_READ_VF_MEMORY_OPCODE_READ_VF_MEMORY
/* Size of the command in 16-byte units. */
@@ -69688,7 +77705,11 @@ typedef struct cmdq_read_vf_memory {
typedef struct cmdq_complete_vf_request {
/* Command opcode. */
uint8_t opcode;
- /* Write VF memory (primarily to put the backchannel response blob), and reenable VF HSI (post a CAG completion to it). Can only be issued from a PF. */
+ /*
+ * Write VF memory (primarily to put the backchannel response blob),
+ * and reenable VF HSI (post a CAG completion to it). Can only be
+ * issued from a PF.
+ */
#define CMDQ_COMPLETE_VF_REQUEST_OPCODE_COMPLETE_VF_REQUEST UINT32_C(0x88)
#define CMDQ_COMPLETE_VF_REQUEST_OPCODE_LAST CMDQ_COMPLETE_VF_REQUEST_OPCODE_COMPLETE_VF_REQUEST
/* Size of the command in 16-byte units. */
@@ -69702,7 +77723,10 @@ typedef struct cmdq_complete_vf_request {
uint8_t reserved8;
/* Host address of the response. */
uint64_t resp_addr;
- /* Optional address of extended response in VF space to write. Length is in resp_size in 16 byte units. */
+ /*
+ * Optional address of extended response in VF space to write. Length is
+ * in resp_size in 16 byte units.
+ */
uint64_t addr;
/* Completion misc field to VF CREQ */
uint32_t vf_misc;
@@ -69716,6 +77740,587 @@ typedef struct cmdq_complete_vf_request {
uint32_t unused_1;
} cmdq_complete_vf_request_t, *pcmdq_complete_vf_request_t;
+/*****************************
+ * orchestrate_qid_migration *
+ *****************************/
+
+
+/* cmdq_orchestrate_qid_migration (size:256b/32B) */
+
+typedef struct cmdq_orchestrate_qid_migration {
+ /* Command opcode. */
+ uint8_t opcode;
+ /*
+ * This command updates the QP context id ranges on the PF,
+ * to orchestrate QP context id range migration for devices that
+ * support the pseudo-static QP allocation feature.
+ */
+ #define CMDQ_ORCHESTRATE_QID_MIGRATION_OPCODE_ORCHESTRATE_QID_MIGRATION UINT32_C(0x93)
+ #define CMDQ_ORCHESTRATE_QID_MIGRATION_OPCODE_LAST CMDQ_ORCHESTRATE_QID_MIGRATION_OPCODE_ORCHESTRATE_QID_MIGRATION
+ /* Size of the command in 16-byte units. */
+ uint8_t cmd_size;
+ /* Flags and attribs of the command. */
+ uint16_t flags;
+ /* Driver supplied handle to associate the command and the response. */
+ uint16_t cookie;
+ /* Size of the response buffer in 16-byte units. */
+ uint8_t resp_size;
+ uint8_t reserved8;
+ /* Host address of the response. */
+ uint64_t resp_addr;
+ uint8_t qid_migration_flags;
+ /* Flags to orchestrate QP context ID range migration amongst PFs. */
+ #define CMDQ_ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_MASK UINT32_C(0xf)
+ #define CMDQ_ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_SFT 0
+ /* Enable the PF's native QP context ID range. */
+ #define CMDQ_ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_ENABLE_NATIVE_QID_RANGE UINT32_C(0x0)
+ /* Enable the PF's extended QP context ID range. */
+ #define CMDQ_ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_ENABLE_EXTENDED_QID_RANGE UINT32_C(0x1)
+ /* Disable the PF's native QP context ID range. */
+ #define CMDQ_ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_DISABLE_NATIVE_QID_RANGE UINT32_C(0x2)
+ /* Disable the PF's extended QP context ID range. */
+ #define CMDQ_ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_DISABLE_EXTENDED_QID_RANGE UINT32_C(0x3)
+ #define CMDQ_ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_LAST CMDQ_ORCHESTRATE_QID_MIGRATION_QID_MIGRATION_FLAGS_DISABLE_EXTENDED_QID_RANGE
+ /* unused4 is 4 b */
+ #define CMDQ_ORCHESTRATE_QID_MIGRATION_UNUSED4_MASK UINT32_C(0xf0)
+ #define CMDQ_ORCHESTRATE_QID_MIGRATION_UNUSED4_SFT 4
+ uint8_t reserved56[7];
+ /* reserved64 is 64 b */
+ uint64_t reserved64;
+} cmdq_orchestrate_qid_migration_t, *pcmdq_orchestrate_qid_migration_t;
+
+/* creq_orchestrate_qid_migration_resp (size:128b/16B) */
+
+typedef struct creq_orchestrate_qid_migration_resp {
+ uint8_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define CREQ_ORCHESTRATE_QID_MIGRATION_RESP_TYPE_MASK UINT32_C(0x3f)
+ #define CREQ_ORCHESTRATE_QID_MIGRATION_RESP_TYPE_SFT 0
+ /* QP Async Notification */
+ #define CREQ_ORCHESTRATE_QID_MIGRATION_RESP_TYPE_QP_EVENT UINT32_C(0x38)
+ #define CREQ_ORCHESTRATE_QID_MIGRATION_RESP_TYPE_LAST CREQ_ORCHESTRATE_QID_MIGRATION_RESP_TYPE_QP_EVENT
+ /* Status of the response. */
+ uint8_t status;
+ /* Driver supplied handle to associate the command and the response. */
+ uint16_t cookie;
+ uint32_t reserved32;
+ uint8_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define CREQ_ORCHESTRATE_QID_MIGRATION_RESP_V UINT32_C(0x1)
+ /* Event or command opcode. */
+ uint8_t event;
+ /* Orchestrate QPID migration command response. */
+ #define CREQ_ORCHESTRATE_QID_MIGRATION_RESP_EVENT_ORCHESTRATE_QID_MIGRATION UINT32_C(0x93)
+ #define CREQ_ORCHESTRATE_QID_MIGRATION_RESP_EVENT_LAST CREQ_ORCHESTRATE_QID_MIGRATION_RESP_EVENT_ORCHESTRATE_QID_MIGRATION
+ uint8_t reserved48[6];
+} creq_orchestrate_qid_migration_resp_t, *pcreq_orchestrate_qid_migration_resp_t;
+
+/*******************
+ * create_qp_batch *
+ *******************/
+
+
+/* cmdq_create_qp_batch (size:384b/48B) */
+
+typedef struct cmdq_create_qp_batch {
+ /* Command opcode. */
+ uint8_t opcode;
+ /* This command allocates a batch of QPs in a sequential range. */
+ #define CMDQ_CREATE_QP_BATCH_OPCODE_CREATE_QP_BATCH UINT32_C(0x94)
+ #define CMDQ_CREATE_QP_BATCH_OPCODE_LAST CMDQ_CREATE_QP_BATCH_OPCODE_CREATE_QP_BATCH
+ /* Size of the command in 16-byte units. */
+ uint8_t cmd_size;
+ /* Flags and attribs of the command. */
+ uint16_t flags;
+ /* Driver supplied handle to associate the command and the response. */
+ uint16_t cookie;
+ /* Size of the response buffer in 16-byte units. */
+ uint8_t resp_size;
+ uint8_t reserved8;
+ /* Host address of the response. */
+ uint64_t resp_addr;
+ /* Starting QP context id to be used for the sequential range. */
+ uint32_t start_xid;
+ /* Count of QPs to be allocated */
+ uint32_t count;
+ /* Size of an individual element of the qp_params_array. */
+ uint32_t per_qp_param_size;
+ uint32_t reserved32;
+ /*
+ * Host DMA address of the array of per-QP parameters.
+ * Per-QP parameters are identical to those of the
+ * `create_qp` command and specified by the
+ * `create_qp_batch_data` structure.
+ */
+ uint64_t qp_params_array;
+ /* reserved64 is 64 b */
+ uint64_t reserved64;
+} cmdq_create_qp_batch_t, *pcmdq_create_qp_batch_t;
+
+/* creq_create_qp_batch_resp (size:128b/16B) */
+
+typedef struct creq_create_qp_batch_resp {
+ uint8_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define CREQ_CREATE_QP_BATCH_RESP_TYPE_MASK UINT32_C(0x3f)
+ #define CREQ_CREATE_QP_BATCH_RESP_TYPE_SFT 0
+ /* QP Async Notification */
+ #define CREQ_CREATE_QP_BATCH_RESP_TYPE_QP_EVENT UINT32_C(0x38)
+ #define CREQ_CREATE_QP_BATCH_RESP_TYPE_LAST CREQ_CREATE_QP_BATCH_RESP_TYPE_QP_EVENT
+ /* Status of the response. */
+ uint8_t status;
+ /* Driver supplied handle to associate the command and the response. */
+ uint16_t cookie;
+ uint32_t reserved32;
+ uint8_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define CREQ_CREATE_QP_BATCH_RESP_V UINT32_C(0x1)
+ /* Event or command opcode. */
+ uint8_t event;
+ /* Create batch QPs command response. */
+ #define CREQ_CREATE_QP_BATCH_RESP_EVENT_CREATE_QP_BATCH UINT32_C(0x94)
+ #define CREQ_CREATE_QP_BATCH_RESP_EVENT_LAST CREQ_CREATE_QP_BATCH_RESP_EVENT_CREATE_QP_BATCH
+ uint16_t reserved16;
+ /* Count of QPs successfully created. */
+ uint32_t count;
+} creq_create_qp_batch_resp_t, *pcreq_create_qp_batch_resp_t;
+
+/********************
+ * destroy_qp_batch *
+ ********************/
+
+
+/* cmdq_destroy_qp_batch (size:256b/32B) */
+
+typedef struct cmdq_destroy_qp_batch {
+ /* Command opcode. */
+ uint8_t opcode;
+ /*
+ * This command deletes the batch of requested count of QPs.
+ * The starting QP ID can be specified to request a batch deletion
+ * of a sequential range.
+ */
+ #define CMDQ_DESTROY_QP_BATCH_OPCODE_DESTROY_QP_BATCH UINT32_C(0x95)
+ #define CMDQ_DESTROY_QP_BATCH_OPCODE_LAST CMDQ_DESTROY_QP_BATCH_OPCODE_DESTROY_QP_BATCH
+ /* Size of the command in 16-byte units. */
+ uint8_t cmd_size;
+ /* Flags and attribs of the command. */
+ uint16_t flags;
+ /* Driver supplied handle to associate the command and the response. */
+ uint16_t cookie;
+ /* Size of the response buffer in 16-byte units. */
+ uint8_t resp_size;
+ uint8_t reserved8;
+ /* Host address of the response. */
+ uint64_t resp_addr;
+ /* Starting QP context id to be used for the sequential range. */
+ uint32_t start_xid;
+ /*
+ * Count of QPs to be deleted. A value of zero implies all QPs
+ * are to be deleted.
+ */
+ uint32_t count;
+ /* reserved64 is 64 b */
+ uint64_t reserved64;
+} cmdq_destroy_qp_batch_t, *pcmdq_destroy_qp_batch_t;
+
+/* creq_destroy_qp_batch_resp (size:128b/16B) */
+
+typedef struct creq_destroy_qp_batch_resp {
+ uint8_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define CREQ_DESTROY_QP_BATCH_RESP_TYPE_MASK UINT32_C(0x3f)
+ #define CREQ_DESTROY_QP_BATCH_RESP_TYPE_SFT 0
+ /* QP Async Notification */
+ #define CREQ_DESTROY_QP_BATCH_RESP_TYPE_QP_EVENT UINT32_C(0x38)
+ #define CREQ_DESTROY_QP_BATCH_RESP_TYPE_LAST CREQ_DESTROY_QP_BATCH_RESP_TYPE_QP_EVENT
+ /* Status of the response. */
+ uint8_t status;
+ /* Driver supplied handle to associate the command and the response. */
+ uint16_t cookie;
+ uint32_t reserved32;
+ uint8_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define CREQ_DESTROY_QP_BATCH_RESP_V UINT32_C(0x1)
+ /* Event or command opcode. */
+ uint8_t event;
+ /* Destroy batch QPs command response. */
+ #define CREQ_DESTROY_QP_BATCH_RESP_EVENT_DESTROY_QP_BATCH UINT32_C(0x95)
+ #define CREQ_DESTROY_QP_BATCH_RESP_EVENT_LAST CREQ_DESTROY_QP_BATCH_RESP_EVENT_DESTROY_QP_BATCH
+ uint16_t reserved16;
+ /* Count of QPs successfully destroyed. */
+ uint32_t count;
+} creq_destroy_qp_batch_resp_t, *pcreq_destroy_qp_batch_resp_t;
+
+/*******************************
+ * allocate_roce_stats_ext_ctx *
+ *******************************/
+
+
+/* cmdq_allocate_roce_stats_ext_ctx (size:256b/32B) */
+
+typedef struct cmdq_allocate_roce_stats_ext_ctx {
+ /* Command opcode. */
+ uint8_t opcode;
+ /*
+ * This command allocates an extended RoCE statistics context
+ * that supports periodic DMA to a host address. The extended
+ * statistics context id can be assigned by the driver,
+ * via `create_qp`, `create_qp_batch` or `modify_qp` to a
+ * specific QP, a subset of QPs or to all QPs of a specific function.
+ * These statistics can be queried via `query_roce_stats_ext_v2`.
+ */
+ #define CMDQ_ALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_ALLOCATE_ROCE_STATS_EXT_CTX UINT32_C(0x96)
+ #define CMDQ_ALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_LAST CMDQ_ALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_ALLOCATE_ROCE_STATS_EXT_CTX
+ /* Size of the command in 16-byte units. */
+ uint8_t cmd_size;
+ /* Flags and attribs of the command. */
+ uint16_t flags;
+ /* Driver supplied handle to associate the command and the response. */
+ uint16_t cookie;
+ /* Size of the response buffer in 16-byte units. */
+ uint8_t resp_size;
+ uint8_t reserved8;
+ /* Host address of the response. */
+ uint64_t resp_addr;
+ /*
+ * This is the address to be programmed in the statistic block
+ * by the firmware to support periodic DMA of the statistics.
+ */
+ uint64_t stats_dma_addr;
+ /*
+ * The statistic block update period in ms.
+ * e.g. 250ms, 500ms, 750ms, 1000ms.
+ * If update_period_ms is 0, then the stats update
+ * shall be never done and the DMA address shall not be used.
+ * In this case, the statistics can only be read by
+ * `query_roce_stats_ext_v2` command.
+ */
+ uint32_t update_period_ms;
+ /* Steering tag to use for memory transactions. */
+ uint16_t steering_tag;
+ uint16_t reserved16;
+} cmdq_allocate_roce_stats_ext_ctx_t, *pcmdq_allocate_roce_stats_ext_ctx_t;
+
+/* creq_allocate_roce_stats_ext_ctx_resp (size:128b/16B) */
+
+typedef struct creq_allocate_roce_stats_ext_ctx_resp {
+ uint8_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_MASK UINT32_C(0x3f)
+ #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_SFT 0
+ /* QP Async Notification */
+ #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_QP_EVENT UINT32_C(0x38)
+ #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_LAST CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_QP_EVENT
+ /* Status of the response. */
+ uint8_t status;
+ /* Driver supplied handle to associate the command and the response. */
+ uint16_t cookie;
+ /* Extended RoCE statistics context id. */
+ uint32_t roce_stats_ext_xid;
+ uint8_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_V UINT32_C(0x1)
+ /* Event or command opcode. */
+ uint8_t event;
+ /* Allocate extended RoCE statistics context command response. */
+ #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_ALLOCATE_ROCE_STATS_EXT_CTX UINT32_C(0x96)
+ #define CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_LAST CREQ_ALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_ALLOCATE_ROCE_STATS_EXT_CTX
+ uint8_t reserved48[6];
+} creq_allocate_roce_stats_ext_ctx_resp_t, *pcreq_allocate_roce_stats_ext_ctx_resp_t;
+
+/*********************************
+ * deallocate_roce_stats_ext_ctx *
+ *********************************/
+
+
+/* cmdq_deallocate_roce_stats_ext_ctx (size:256b/32B) */
+
+typedef struct cmdq_deallocate_roce_stats_ext_ctx {
+ /* Command opcode. */
+ uint8_t opcode;
+ /* This command deallocates an extended RoCE statistics context. */
+ #define CMDQ_DEALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_DEALLOCATE_ROCE_STATS_EXT_CTX UINT32_C(0x97)
+ #define CMDQ_DEALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_LAST CMDQ_DEALLOCATE_ROCE_STATS_EXT_CTX_OPCODE_DEALLOCATE_ROCE_STATS_EXT_CTX
+ /* Size of the command in 16-byte units. */
+ uint8_t cmd_size;
+ /* Flags and attribs of the command. */
+ uint16_t flags;
+ /* Driver supplied handle to associate the command and the response. */
+ uint16_t cookie;
+ /* Size of the response buffer in 16-byte units. */
+ uint8_t resp_size;
+ uint8_t reserved8;
+ /* Host address of the response. */
+ uint64_t resp_addr;
+ /* Extended RoCE statistics context id. */
+ uint32_t roce_stats_ext_xid;
+ uint32_t reserved32;
+ /* reserved64 is 64 b */
+ uint64_t reserved64;
+} cmdq_deallocate_roce_stats_ext_ctx_t, *pcmdq_deallocate_roce_stats_ext_ctx_t;
+
+/* creq_deallocate_roce_stats_ext_ctx_resp (size:128b/16B) */
+
+typedef struct creq_deallocate_roce_stats_ext_ctx_resp {
+ uint8_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_MASK UINT32_C(0x3f)
+ #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_SFT 0
+ /* QP Async Notification */
+ #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_QP_EVENT UINT32_C(0x38)
+ #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_LAST CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_TYPE_QP_EVENT
+ /* Status of the response. */
+ uint8_t status;
+ /* Driver supplied handle to associate the command and the response. */
+ uint16_t cookie;
+ /* Extended RoCE statistics context id. */
+ uint32_t roce_stats_ext_xid;
+ uint8_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_V UINT32_C(0x1)
+ /* Event or command opcode. */
+ uint8_t event;
+ /* Deallocate extended RoCE statistics context command response. */
+ #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_DEALLOCATE_ROCE_STATS_EXT_CTX UINT32_C(0x97)
+ #define CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_LAST CREQ_DEALLOCATE_ROCE_STATS_EXT_CTX_RESP_EVENT_DEALLOCATE_ROCE_STATS_EXT_CTX
+ uint8_t reserved48[6];
+} creq_deallocate_roce_stats_ext_ctx_resp_t, *pcreq_deallocate_roce_stats_ext_ctx_resp_t;
+
+/***************************
+ * query_roce_stats_ext_v2 *
+ ***************************/
+
+
+/* cmdq_query_roce_stats_ext_v2 (size:256b/32B) */
+
+typedef struct cmdq_query_roce_stats_ext_v2 {
+ /* Command opcode. */
+ uint8_t opcode;
+ /*
+ * Query extended RoCE statistics for devices that support
+ * `roce_stats_ext_ctx_supported` feature.
+ */
+ #define CMDQ_QUERY_ROCE_STATS_EXT_V2_OPCODE_QUERY_ROCE_STATS_EXT_V2 UINT32_C(0x98)
+ #define CMDQ_QUERY_ROCE_STATS_EXT_V2_OPCODE_LAST CMDQ_QUERY_ROCE_STATS_EXT_V2_OPCODE_QUERY_ROCE_STATS_EXT_V2
+ /* Size of the command in 16-byte units. */
+ uint8_t cmd_size;
+ /* Flags and attribs of the command. */
+ uint16_t flags;
+ /* Driver supplied handle to associate the command and the response. */
+ uint16_t cookie;
+ /* Size of the response buffer in 16-byte units. */
+ uint8_t resp_size;
+ uint8_t reserved8;
+ /* Host address of the response. */
+ uint64_t resp_addr;
+ /* Extended RoCE statistics context id. */
+ uint32_t roce_stats_ext_xid;
+ uint32_t reserved32;
+ /* reserved64 is 64 b */
+ uint64_t reserved64;
+} cmdq_query_roce_stats_ext_v2_t, *pcmdq_query_roce_stats_ext_v2_t;
+
+/* creq_query_roce_stats_ext_v2_resp (size:128b/16B) */
+
+typedef struct creq_query_roce_stats_ext_v2_resp {
+ uint8_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
+ * records.
+ */
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_MASK UINT32_C(0x3f)
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_SFT 0
+ /* QP Async Notification */
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_QP_EVENT UINT32_C(0x38)
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_LAST CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_TYPE_QP_EVENT
+ /* Status of the response. */
+ uint8_t status;
+ /* Driver supplied handle to associate the command and the response. */
+ uint16_t cookie;
+ /* Side buffer size in 16-byte units */
+ uint32_t size;
+ uint8_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_V UINT32_C(0x1)
+ /* Event or command opcode. */
+ uint8_t event;
+ /* Query extended RoCE statistics v2. */
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_EVENT_QUERY_ROCE_STATS_EXT_V2 UINT32_C(0x98)
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_EVENT_LAST CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_EVENT_QUERY_ROCE_STATS_EXT_V2
+ uint8_t reserved48[6];
+} creq_query_roce_stats_ext_v2_resp_t, *pcreq_query_roce_stats_ext_v2_resp_t;
+
+/* Query extended RoCE Stats command response side buffer structure. */
+/* creq_query_roce_stats_ext_v2_resp_sb (size:1920b/240B) */
+
+typedef struct creq_query_roce_stats_ext_v2_resp_sb {
+ /* Command opcode. */
+ uint8_t opcode;
+ /* Query extended RoCE statistics v2. */
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT_V2 UINT32_C(0x98)
+ #define CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_SB_OPCODE_LAST CREQ_QUERY_ROCE_STATS_EXT_V2_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT_V2
+ /* Status of the response. */
+ uint8_t status;
+ /* Driver supplied handle to associate the command and the response. */
+ uint16_t cookie;
+ /* Flags and attribs of the command. */
+ uint16_t flags;
+ /* Size of the response buffer in 16-byte units. */
+ uint8_t resp_size;
+ uint8_t rsvd;
+ /* Number of transmitted Atomic request packets without errors. */
+ uint64_t tx_atomic_req_pkts;
+ /* Number of transmitted Read request packets without errors. */
+ uint64_t tx_read_req_pkts;
+ /* Number of transmitted Read response packets without errors. */
+ uint64_t tx_read_res_pkts;
+ /* Number of transmitted Write request packets without errors. */
+ uint64_t tx_write_req_pkts;
+ /* Number of transmitted RC Send packets without errors. */
+ uint64_t tx_rc_send_req_pkts;
+ /*
+ * Number of transmitted UD Send (including QP1) packets
+ * without errors.
+ */
+ uint64_t tx_ud_send_req_pkts;
+ /* Number of transmitted CNPs. Includes DCN_CNPs. */
+ uint64_t tx_cnp_pkts;
+ /*
+ * Number of transmitted RoCE packets.
+ * This includes RC, UD, RawEth, and QP1 packets
+ */
+ uint64_t tx_roce_pkts;
+ /*
+ * Number of transmitted RoCE header and payload bytes.
+ * This includes RC, UD, RawEth, and QP1 packets.
+ */
+ uint64_t tx_roce_bytes;
+ /*
+ * Number of drops that occurred to lack of buffers.
+ * This count includes RC sends, RC writes with immediate,
+ * UD sends, RawEth, and QP1 packets dropped due to lack of buffers.
+ */
+ uint64_t rx_out_of_buffer_pkts;
+ /* Number of packets that were received out of sequence. */
+ uint64_t rx_out_of_sequence_pkts;
+ /*
+ * Number of duplicate read/atomic requests resulting in responder
+ * hardware retransmission.
+ */
+ uint64_t dup_req;
+ /*
+ * Number of missing response packets resulting in hardware
+ * retransmission.
+ */
+ uint64_t missing_resp;
+ /*
+ * Number of sequence error NAKs received resulting in hardware
+ * retransmission.
+ */
+ uint64_t seq_err_naks_rcvd;
+ /* Number of RNR NAKs received resulting in hardware retransmission. */
+ uint64_t rnr_naks_rcvd;
+ /* Number of timeouts resulting in hardware retransmission. */
+ uint64_t to_retransmits;
+ /* Number of received Atomic request packets without errors. */
+ uint64_t rx_atomic_req_pkts;
+ /* Number of received Read request packets without errors. */
+ uint64_t rx_read_req_pkts;
+ /* Number of received Read response packets without errors. */
+ uint64_t rx_read_res_pkts;
+ /* Number of received Write request packets without errors. */
+ uint64_t rx_write_req_pkts;
+ /* Number of received RC Send packets without errors. */
+ uint64_t rx_rc_send_pkts;
+ /* Number of received UD Send packets without errors. */
+ uint64_t rx_ud_send_pkts;
+ /* Number of received DCN payload cut packets. */
+ uint64_t rx_dcn_payload_cut;
+ /* Number of received ECN-marked packets. */
+ uint64_t rx_ecn_marked_pkts;
+ /* Number of received CNP packets. Includes DCN_CNPs. */
+ uint64_t rx_cnp_pkts;
+ /*
+ * Number of received RoCE packets including RoCE packets with errors.
+ * This includes RC, UD, RawEth, and QP1 packets
+ */
+ uint64_t rx_roce_pkts;
+ /*
+ * Number of received RoCE header and payload bytes including RoCE
+ * packets with errors.
+ * This includes RC, UD, RawEth, and QP1 packets.
+ */
+ uint64_t rx_roce_bytes;
+ /*
+ * Number of received RoCE packets without errors.
+ * This includes RC, UD, RawEth, and QP1 packets
+ */
+ uint64_t rx_roce_good_pkts;
+ /*
+ * Number of received RoCE header and payload bytes without errors.
+ * This includes RC, UD, RawEth, and QP1 packets.
+ */
+ uint64_t rx_roce_good_bytes;
+} creq_query_roce_stats_ext_v2_resp_sb_t, *pcreq_query_roce_stats_ext_v2_resp_sb_t;
+
/* RoCE function async event notifications. */
/* creq_func_event (size:128b/16B) */
@@ -69786,7 +78391,10 @@ typedef struct creq_func_event {
#define CREQ_FUNC_EVENT_EVENT_TIM_ERROR UINT32_C(0xb)
/* A VF sent a backchannel command request */
#define CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST UINT32_C(0x80)
- /* Communication resource (QPC, CQ, SRQ, MRW) exhausted, and resource array extension is enabled */
+ /*
+ * Communication resource (QPC, CQ, SRQ, MRW) exhausted, and resource
+ * array extension is enabled.
+ */
#define CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED UINT32_C(0x81)
#define CREQ_FUNC_EVENT_EVENT_LAST CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED
uint8_t reserved48[6];
@@ -69894,7 +78502,10 @@ typedef struct creq_qp_event {
#define CREQ_QP_EVENT_EVENT_QUERY_FUNC UINT32_C(0x83)
/* Set function resources command response. */
#define CREQ_QP_EVENT_EVENT_SET_FUNC_RESOURCES UINT32_C(0x84)
- /* Read the current state of any internal resource context. Can only be issued from a PF. */
+ /*
+ * Read the current state of any internal resource context. Can only be
+ * issued from a PF.
+ */
#define CREQ_QP_EVENT_EVENT_READ_CONTEXT UINT32_C(0x85)
/* Map TC to COS response. */
#define CREQ_QP_EVENT_EVENT_MAP_TC_TO_COS UINT32_C(0x8a)
@@ -69909,8 +78520,8 @@ typedef struct creq_qp_event {
/* Set LAG mode. */
#define CREQ_QP_EVENT_EVENT_SET_LINK_AGGR_MODE UINT32_C(0x8f)
/*
- * Query QP for a PF other than the requesting PF. Also can query for more
- * than one QP.
+ * Query QP for a PF other than the requesting PF. Also can query for
+ * more than one QP.
*/
#define CREQ_QP_EVENT_EVENT_QUERY_QP_EXTEND UINT32_C(0x91)
/* QP error notification event. */
@@ -69944,6 +78555,236 @@ typedef struct creq_qp_error_notification {
uint8_t req_slow_path_state;
/* requestor error reason */
uint8_t req_err_state_reason;
+ /* No error. */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_NO_ERROR UINT32_C(0x0)
+ /*
+ * Requester detected opcode error.
+ * * First, only, middle, last for incoming RDMA read
+ * responses are improperly ordered with respect to previous
+ * (PSN) packet.
+ * * First or middle packet is not full MTU size.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_OPCODE_ERROR UINT32_C(0x1)
+ /*
+ * Transport timeout retry limit exceeded.
+ * The requestor retried the same unacked PSN request packet
+ * too many times.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TIMEOUT_RETRY_LIMIT UINT32_C(0x2)
+ /*
+ * RNR NAK retry limit exceeded.
+ * The requestor received an RNR NAK with the same NAK PSN
+ * too many times.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RNR_TIMEOUT_RETRY_LIMIT UINT32_C(0x3)
+ /*
+ * NAK arrival, When NAK code is 1, Invalid Request.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_1 UINT32_C(0x4)
+ /*
+ * NAK arrival, When NAK code is 2, Remote Access Error.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_2 UINT32_C(0x5)
+ /*
+ * NAK arrival, When NAK code is 3, Remote Operational Error.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_3 UINT32_C(0x6)
+ /*
+ * NAK arrival. When NAK code is 4, Invalid RD Request.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_NAK_ARRIVAL_4 UINT32_C(0x7)
+ /*
+ * Local memory error.
+ * An SGE described an inaccessible memory.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_MEMORY_ERROR UINT32_C(0x8)
+ /*
+ * Local memory error.
+ * An SGE described an inaccessible memory.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_MEMORY_ERROR UINT32_C(0x9)
+ /*
+ * Read response length error.
+ * The read response payload size does not match the read
+ * length of the request.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_READ_RESP_LENGTH UINT32_C(0xa)
+ /*
+ * Invalid read response.
+ * A read response arrived and had a PSN that was not in the
+ * reply range of any outstanding read request on the ORRQ.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_READ_RESP UINT32_C(0xb)
+ /*
+ * Illegal bind.
+ * * No MW with the specified R_Key exists.
+ * * No MR with the specified L_Key exists.
+ * * A bind request was performed on a window that was already
+ * bound.
+ * * A bind request was performed for an underlying MR that
+ * is not registered.
+ * * A bind request was performed for a memory area that exceeds
+ * the range of the underlying MR.
+ * * A bind request was performed with a set of permissions
+ * that are looser than the permissions of the underlying MR.
+ * * Domain error MW - When QP's PD does not match MW PD.
+ * * Domain error MR - When QP's PD does not match parent MR's
+ * PD.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_BIND UINT32_C(0xc)
+ /*
+ * Illegal fast register.
+ * * No MR with the specified L_Key exists.
+ * * A fast register request was performed on a non-
+ * physical MR.
+ * * A fast register request was performed on a physical MR
+ * that is already registered.
+ * * A fast register request was performed on a physical MR
+ * that does not have a page list allocated (has not been
+ * initialized).
+ * * The number of pages being registered exceeds the capacity
+ * of the physical MR.
+ * * The length of the registration is not possible with the
+ * actual number of pages provided.
+ * * Domain error - when QP's PD does not match PMR PD.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_FAST_REG UINT32_C(0xd)
+ /*
+ * Illegal invalidate.
+ * * No MR with the specified L_Key exists.
+ * * No MW with the specified R_Key exists.
+ * * An invalidate was performed against a non-physical MR.
+ * * An invalidate was performed against a physical MR that
+ * is not registered.
+ * * An invalidate was performed against a MW that is not
+ * bound.
+ * * The PD of the MR/MW being invalidated does not match the PD
+ * of the QP.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ILLEGAL_INVALIDATE UINT32_C(0xe)
+ /*
+ * Completion Error.
+ * No CQE space available on queue, or CQ not in VALID state.
+ * This is a Completion Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CMP_ERROR UINT32_C(0xf)
+ /*
+ * Local memory error while retransmitting WQE.
+ * An SQ SGE described an inaccessible memory.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETRAN_LOCAL_ERROR UINT32_C(0x10)
+ /*
+ * Problem found in the format of a WQE in the SQ.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_WQE_FORMAT_ERROR UINT32_C(0x11)
+ /*
+ * Problem was found in the format of an ORRQ entry.
+ * This is a RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_ORRQ_FORMAT_ERROR UINT32_C(0x12)
+ /*
+ * A UD send attempted to use an invalid AVID.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_AVID_ERROR UINT32_C(0x13)
+ /*
+ * A UD send attempted to use an AVID that is outside of its
+ * QP's protection domain.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_AV_DOMAIN_ERROR UINT32_C(0x14)
+ /*
+ * A load error occurred on an attempt to load the CQ Context.
+ * This is a Completion Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_CQ_LOAD_ERROR UINT32_C(0x15)
+ /*
+ * There was an attempt to process a WQE from the SQ that
+ * corresponds to an operation that is unsupported for the
+ * corresponding QP.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SERV_TYPE_ERROR UINT32_C(0x16)
+ /*
+ * There was an attempt to process a WQE from the SQ that
+ * corresponds to an operation that is unsupported for the
+ * corresponding QP, according to the supported_operations QPC
+ * field.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_INVALID_OP_ERROR UINT32_C(0x17)
+ /*
+ * A fatal error was detected on an attempt to read from
+ * or write to PCIe on the transmit side. This error is
+ * detected by the TX side (or CAGR), but has the priority
+ * of a Completion Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_TX_PCI_ERROR UINT32_C(0x18)
+ /*
+ * A fatal error was detected on an attempt to read from
+ * or write to PCIe on the receive side. This error is detected
+ * by the RX side (or CAGR), but has the priority of a
+ * Completion Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RX_PCI_ERROR UINT32_C(0x19)
+ /*
+ * When processing a WQE from the SQ, TWE detected an error
+ * such that the wqe_size given in the header is larger than
+ * the delta between sq_work_idx and sq_prod_idx. This error
+ * has priority over the non-error case that occurs when TWE
+ * detects that it simply doesn't have enough slots fetched
+ * to execute the WQE during the current residency.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PROD_WQE_MSMTCH_ERROR UINT32_C(0x1a)
+ /*
+ * When reading the MSN table to initiate HW retransmit, RWE
+ * found that to_retransmit_psn was not within the range defined
+ * by start_psn and next_psn in the corresponding MSN table
+ * entry. To_retransmit_psn must be greater than or equal to
+ * start_psn and less than next_psn in order for the range check
+ * to succeed.
+ * This is a RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_PSN_RANGE_CHECK_ERROR UINT32_C(0x1b)
+ /*
+ * While retransmitting, TWE detected one of several possible
+ * error detection scenarios related to the improper setup of
+ * retransmission. These include a category or errors known as
+ * retx_end_error where the retransmission end does not line up
+ * sequentially with the WQE index and PSN upon continuing on
+ * with the regular transmission that follows the
+ * retransmission. It also includes the error condition in which
+ * the retransmission Work Request has gen_dup_read_request set
+ * and the WQE fetched by TWE is not an RDMA Read or Atomic WQE.
+ * Please see TWE requirements for a full list of the various
+ * possible retransmit setup error cases. These error cases
+ * apply to H/W and F/W retransmission, alike.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_RETX_SETUP_ERROR UINT32_C(0x1c)
+ /*
+ * An express doorbell was posted that overflowed the SQ. The
+ * doorbell is dropped, along with all subsequent doorbells for
+ * this SQ. This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SQ_OVERFLOW UINT32_C(0x1d)
+ #define CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_LAST CREQ_QP_ERROR_NOTIFICATION_REQ_ERR_STATE_REASON_REQ_SQ_OVERFLOW
/* QP context id */
uint32_t xid;
uint8_t v;
@@ -69961,6 +78802,211 @@ typedef struct creq_qp_error_notification {
/* responder slow path state */
uint8_t res_slow_path_state;
uint8_t res_err_state_reason;
+ /* No error. */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_NO_ERROR UINT32_C(0x0)
+ /*
+ * Incoming Send, RDMA write, or RDMA read exceeds the maximum
+ * transfer length. Detected on RX first and only packets for
+ * write. Detected on RX request for read.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEED_MAX UINT32_C(0x1)
+ /*
+ * RDMA write payload size does not match write length. Detected
+ * when total write payload is not equal to the RDMA write
+ * length that was given in the first or only packet of the
+ * request.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH UINT32_C(0x2)
+ /*
+ * Send payload exceeds RQ/SRQ WQE buffer capacity. The total
+ * send payload that arrived is more than the size of the WQE
+ * buffer that was fetched from the RQ/SRQ.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE UINT32_C(0x3)
+ /*
+ * Responder detected opcode error.
+ * * First, only, middle, last or incoming requests are
+ * improperly ordered with respect to previous (PSN) packet.
+ * * First or middle packet is not full MTU size.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_OPCODE_ERROR UINT32_C(0x4)
+ /*
+ * PSN sequence error retry limit exceeded.
+ * The responder encountered a PSN sequence error for the
+ * same PSN too many times. This can occur via implicit or
+ * explicit NAK.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT UINT32_C(0x5)
+ /*
+ * Invalid R_Key.
+ * An incoming request contained an R_Key that did not reference
+ * a valid MR/MW. This error may be detected by the RX engine
+ * for RDMA write or by the TX engine for RDMA read
+ * (detected while servicing IRRQ).
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY UINT32_C(0x6)
+ /*
+ * Domain error.
+ * An incoming request specified an R_Key which
+ * referenced a MR/MW that was not in the same PD as the QP on
+ * which the request arrived.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR UINT32_C(0x7)
+ /*
+ * No permission.
+ * An incoming request contained an R_Key that referenced a
+ * MR/MW which did not have the access permission needed for
+ * the operation.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION UINT32_C(0x8)
+ /*
+ * Range error.
+ * An incoming request had a combination of R_Key,VA, and
+ * length that was out of bounds of the associated MR/MW.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR UINT32_C(0x9)
+ /*
+ * Invalid R_Key.
+ * An incoming request contained an R_Key that did not
+ * reference a valid MR/MW. This error may be detected
+ * by the RX engine for RDMA write or by the TX engine
+ * for RDMA read (detected while servicing IRRQ).
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY UINT32_C(0xa)
+ /*
+ * Domain error.
+ * An incoming request specified an R_Key which referenced
+ * a MR/MW that was not in the same PD as the QP on
+ * which the request arrived.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR UINT32_C(0xb)
+ /*
+ * No permission.
+ * An incoming request contained an R_Key that referenced a
+ * MR/MW which did not have the access permission needed for
+ * the operation.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION UINT32_C(0xc)
+ /*
+ * Range error.
+ * An incoming request had a combination of R_Key, VA, and
+ * length that was out of bounds of the associated MR/MW.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR UINT32_C(0xd)
+ /*
+ * IRRQ overflow.
+ * The peer sent us more RDMA read or atomic requests than
+ * the negotiated maximum.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW UINT32_C(0xe)
+ /*
+ * Unsupported opcode.
+ * The peer sent us a request with an opcode for a request
+ * type that is not supported on this QP.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE UINT32_C(0xf)
+ /*
+ * Unaligned atomic operation. The VA of an atomic request
+ * is on a memory boundary that prevents atomic execution.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC UINT32_C(0x10)
+ /*
+ * Remote invalidate error.
+ * A send with invalidate request arrived in which the
+ * R_Key to invalidate did not describe a MR/MW which could
+ * be invalidated. RQ WQE completes with error status.
+ * This error is only reported if the send operation did
+ * not fail. If the send operation failed then the remote
+ * invalidate error is not reported.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_REM_INVALIDATE UINT32_C(0x11)
+ /*
+ * Local memory error. An RQ/SRQ SGE described an inaccessible
+ * memory.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_MEMORY_ERROR UINT32_C(0x12)
+ /*
+ * SRQ in error. The QP is moving to error state because it
+ * found SRQ it uses in error.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_ERROR UINT32_C(0x13)
+ /*
+ * Completion error. No CQE space available on queue or CQ not
+ * in VALID state.
+ * This is a Completion Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CMP_ERROR UINT32_C(0x14)
+ /*
+ * Invalid R_Key while resending responses to duplicate request.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_INVALID_DUP_RKEY UINT32_C(0x15)
+ /*
+ * Problem was found in the format of a WQE in the RQ/SRQ.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR UINT32_C(0x16)
+ /*
+ * Problem was found in the format of an IRRQ entry.
+ * This is a TX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_IRRQ_FORMAT_ERROR UINT32_C(0x17)
+ /*
+ * A load error occurred on an attempt to load the CQ Context.
+ * This is a Completion Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR UINT32_C(0x18)
+ /*
+ * A load error occurred on an attempt to load the SRQ Context.
+ * This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR UINT32_C(0x19)
+ /*
+ * A fatal error was detected on an attempt to read from or
+ * write to PCIe on the transmit side. This error is detected
+ * by the TX side, but has the priority of a Completion
+ * Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR UINT32_C(0x1b)
+ /*
+ * A fatal error was detected on an attempt to read from or
+ * write to PCIe on the receive side. This error is detected
+ * by the RX side (or CAGR), but has the priority of a Completion
+ * Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR UINT32_C(0x1c)
+ /*
+ * When searching the IRRQ to respond to a duplicate request,
+ * RWE could not find the duplicate request in the entire IRRQ.
+ * This is a RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_PSN_NOT_FOUND UINT32_C(0x1d)
+ /*
+ * An express doorbell was posted that overflowed the RQ. The
+ * doorbell is dropped, along with all subsequent doorbells for
+ * this RQ. This is an RX Detected Error.
+ */
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RQ_OVERFLOW UINT32_C(0x1e)
+ #define CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_LAST CREQ_QP_ERROR_NOTIFICATION_RES_ERR_STATE_REASON_RES_RQ_OVERFLOW
/*
* Final SQ Consumer Index value. Any additional SQ WQEs will
* have to be completed by the user provider.
@@ -70031,78 +79077,165 @@ typedef struct sq_base {
/* This field defines the type of SQ WQE. */
uint8_t wqe_type;
/* Send */
- #define SQ_BASE_WQE_TYPE_SEND UINT32_C(0x0)
+ #define SQ_BASE_WQE_TYPE_SEND UINT32_C(0x0)
/*
* Send with Immediate
*
* Allowed only on reliable connection (RC) and
* unreliable datagram (UD) SQs.
*/
- #define SQ_BASE_WQE_TYPE_SEND_W_IMMEAD UINT32_C(0x1)
+ #define SQ_BASE_WQE_TYPE_SEND_W_IMMEAD UINT32_C(0x1)
/*
* Send with Invalidate.
*
* Allowed only on reliable connection (RC) SQs.
*/
- #define SQ_BASE_WQE_TYPE_SEND_W_INVALID UINT32_C(0x2)
+ #define SQ_BASE_WQE_TYPE_SEND_W_INVALID UINT32_C(0x2)
/*
* RDMA Write.
*
* Allowed only on reliable connection (RC) SQs.
*/
- #define SQ_BASE_WQE_TYPE_WRITE_WQE UINT32_C(0x4)
+ #define SQ_BASE_WQE_TYPE_WRITE_WQE UINT32_C(0x4)
/*
* RDMA Write with Immediate.
*
* Allowed only on reliable connection (RC) SQs.
*/
- #define SQ_BASE_WQE_TYPE_WRITE_W_IMMEAD UINT32_C(0x5)
+ #define SQ_BASE_WQE_TYPE_WRITE_W_IMMEAD UINT32_C(0x5)
/*
* RDMA Read.
*
* Allowed only on reliable connection (RC) SQs.
*/
- #define SQ_BASE_WQE_TYPE_READ_WQE UINT32_C(0x6)
+ #define SQ_BASE_WQE_TYPE_READ_WQE UINT32_C(0x6)
/*
* Atomic Compare/Swap.
*
* Allowed only on reliable connection (RC) SQs.
*/
- #define SQ_BASE_WQE_TYPE_ATOMIC_CS UINT32_C(0x8)
+ #define SQ_BASE_WQE_TYPE_ATOMIC_CS UINT32_C(0x8)
/*
* Atomic Fetch/Add.
*
* Allowed only on reliable connection (RC) SQs.
*/
- #define SQ_BASE_WQE_TYPE_ATOMIC_FA UINT32_C(0xb)
+ #define SQ_BASE_WQE_TYPE_ATOMIC_FA UINT32_C(0xb)
/*
* Local Invalidate.
*
* Allowed only on reliable connection (RC) SQs.
*/
- #define SQ_BASE_WQE_TYPE_LOCAL_INVALID UINT32_C(0xc)
+ #define SQ_BASE_WQE_TYPE_LOCAL_INVALID UINT32_C(0xc)
/*
* FR-PMR (Fast Register Physical Memory Region)
*
* Allowed only on reliable connection (RC) SQs.
*/
- #define SQ_BASE_WQE_TYPE_FR_PMR UINT32_C(0xd)
+ #define SQ_BASE_WQE_TYPE_FR_PMR UINT32_C(0xd)
/*
* Memory Bind
*
* Allowed only on reliable connection (RC) SQs.
*/
- #define SQ_BASE_WQE_TYPE_BIND UINT32_C(0xe)
+ #define SQ_BASE_WQE_TYPE_BIND UINT32_C(0xe)
/*
* FR-PPMR (Fast Register Proxy Physical Memory Region)
*
* Allowed only on reliable connection (RC) SQs.
*/
- #define SQ_BASE_WQE_TYPE_FR_PPMR UINT32_C(0xf)
- #define SQ_BASE_WQE_TYPE_LAST SQ_BASE_WQE_TYPE_FR_PPMR
+ #define SQ_BASE_WQE_TYPE_FR_PPMR UINT32_C(0xf)
+ /* Send V3 */
+ #define SQ_BASE_WQE_TYPE_SEND_V3 UINT32_C(0x10)
+ /*
+ * Send with Immediate V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_BASE_WQE_TYPE_SEND_W_IMMED_V3 UINT32_C(0x11)
+ /*
+ * Send with Invalidate V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_BASE_WQE_TYPE_SEND_W_INVALID_V3 UINT32_C(0x12)
+ /*
+ * UD Send V3
+ *
+ * Allowed only on unreliable datagram (UD) SQs.
+ */
+ #define SQ_BASE_WQE_TYPE_UDSEND_V3 UINT32_C(0x13)
+ /*
+ * UD Send with Immediate V3
+ *
+ * Allowed only on unreliable datagram (UD) SQs.
+ */
+ #define SQ_BASE_WQE_TYPE_UDSEND_W_IMMED_V3 UINT32_C(0x14)
+ /*
+ * RDMA Write V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_BASE_WQE_TYPE_WRITE_WQE_V3 UINT32_C(0x15)
+ /*
+ * RDMA Write with Immediate V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_BASE_WQE_TYPE_WRITE_W_IMMED_V3 UINT32_C(0x16)
+ /*
+ * RDMA Read V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_BASE_WQE_TYPE_READ_WQE_V3 UINT32_C(0x17)
+ /*
+ * Atomic Compare/Swap V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_BASE_WQE_TYPE_ATOMIC_CS_V3 UINT32_C(0x18)
+ /*
+ * Atomic Fetch/Add V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_BASE_WQE_TYPE_ATOMIC_FA_V3 UINT32_C(0x19)
+ /*
+ * Local Invalidate V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_BASE_WQE_TYPE_LOCAL_INVALID_V3 UINT32_C(0x1a)
+ /*
+ * FR-PMR (Fast Register Physical Memory Region) V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_BASE_WQE_TYPE_FR_PMR_V3 UINT32_C(0x1b)
+ /*
+ * Memory Bind V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_BASE_WQE_TYPE_BIND_V3 UINT32_C(0x1c)
+ /* RawEth/QP1 Send V3 */
+ #define SQ_BASE_WQE_TYPE_RAWQP1SEND_V3 UINT32_C(0x1d)
+ /* Change UDP Source Port V3 */
+ #define SQ_BASE_WQE_TYPE_CHANGE_UDPSRCPORT_V3 UINT32_C(0x1e)
+ #define SQ_BASE_WQE_TYPE_LAST SQ_BASE_WQE_TYPE_CHANGE_UDPSRCPORT_V3
uint8_t unused_0[7];
} sq_base_t, *psq_base_t;
+/*
+ * Most SQ WQEs contain SGEs used to define the SGL used to map payload
+ * data in host memory. The number of SGE structures is defined by the
+ * wqe_size field. SGE structures are aligned to 16B boundaries.
+ *
+ * In backward-compatible modes there can be 2, 4 or 6 SGEs (based on
+ * the mode). In variable-sized WQE mode there can be 0-30 SGE
+ * structures.
+ */
/* sq_sge (size:128b/16B) */
typedef struct sq_sge {
@@ -70276,7 +79409,7 @@ typedef struct sq_send {
* valid 16 bytes units other than the WQE structure can be
* SGEs (Scatter Gather Elements) OR inline data.
*
- * While this field defines the valid WQE size. The actual
+ * While this field defines the valid WQE size. The actual
* total WQE size is always 128B.
*/
uint8_t wqe_size;
@@ -70292,7 +79425,7 @@ typedef struct sq_send {
uint32_t length;
/*
* When in the SQ of a UD QP, indicates the q_key to be used in
- * the transmitted packet. However, if the most significant bit
+ * the transmitted packet. However, if the most significant bit
* of this field is set, then the q_key will be taken from QP
* context, rather than from this field.
*
@@ -70331,7 +79464,7 @@ typedef struct sq_send {
* SGEs based on the wqe_size field.
*
* When inline=1, this area is filled with payload data for the
- * send based on the length_or_AVID field. Bits [7:0] of word 0
+ * send based on the length_or_AVID field. Bits [7:0] of word 0
* hold the first byte to go out on the wire.
*/
uint32_t data[24];
@@ -70412,7 +79545,7 @@ typedef struct sq_send_hdr {
* valid 16 bytes units other than the WQE structure can be
* SGEs (Scatter Gather Elements) OR inline data.
*
- * While this field defines the valid WQE size. The actual
+ * While this field defines the valid WQE size. The actual
* total WQE size is always 128B.
*/
uint8_t wqe_size;
@@ -70428,7 +79561,7 @@ typedef struct sq_send_hdr {
uint32_t length;
/*
* When in the SQ of a UD QP, indicates the q_key to be used in
- * the transmitted packet. However, if the most significant bit
+ * the transmitted packet. However, if the most significant bit
* of this field is set, then the q_key will be taken from QP
* context, rather than from this field.
*
@@ -70511,7 +79644,7 @@ typedef struct sq_send_raweth_qp1 {
* valid 16 bytes units other than the WQE structure can be
* SGEs (Scatter Gather Elements) OR inline data.
*
- * While this field defines the valid WQE size. The actual
+ * While this field defines the valid WQE size. The actual
* total WQE size is always 128B.
*/
uint8_t wqe_size;
@@ -70532,7 +79665,7 @@ typedef struct sq_send_raweth_qp1 {
*/
#define SQ_SEND_RAWETH_QP1_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1)
/*
- * If set to 1, the controller replaces the IP checksum of the
+ * If set to 1, the controller replaces the IP checksum of the
* normal packets, or the inner IP checksum of the encapsulated
* packets with the hardware calculated IP checksum for the
* packet associated with this descriptor.
@@ -70546,9 +79679,9 @@ typedef struct sq_send_raweth_qp1 {
*
* This bit must be valid on the first BD of a packet.
*
- * Packet must be 64B or longer when this flag is set. It is not
- * usefull to use this bit with any form of TX offload such as
- * CSO or LSO. The intent is that the packet from the host already
+ * Packet must be 64B or longer when this flag is set. It is not
+ * useful to use this bit with any form of TX offload such as
+ * CSO or LSO. The intent is that the packet from the host already
* has a valid Ethernet CRC on the packet.
*/
#define SQ_SEND_RAWETH_QP1_LFLAGS_NOCRC UINT32_C(0x4)
@@ -70562,7 +79695,7 @@ typedef struct sq_send_raweth_qp1 {
/*
* If set to 1, The controller replaces the tunnel IP checksum
* field with hardware calculated IP checksum for the IP header
- * of the packet associated with this descriptor. In case of
+ * of the packet associated with this descriptor. In case of
* VXLAN, the controller also replaces the outer header UDP
* checksum with hardware calculated UDP checksum for the packet
* associated with this descriptor.
@@ -70570,12 +79703,12 @@ typedef struct sq_send_raweth_qp1 {
#define SQ_SEND_RAWETH_QP1_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
/*
* If set to '1', then the RoCE ICRC will be appended to the
- * packet. Packet must be a valid RoCE format packet.
+ * packet. Packet must be a valid RoCE format packet.
*/
#define SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC UINT32_C(0x100)
/*
* If set to '1', then the FCoE CRC will be appended to the
- * packet. Packet must be a valid FCoE format packet.
+ * packet. Packet must be a valid FCoE format packet.
*/
#define SQ_SEND_RAWETH_QP1_LFLAGS_FCOE_CRC UINT32_C(0x200)
/*
@@ -70655,7 +79788,7 @@ typedef struct sq_send_raweth_qp1 {
* SGEs based on the wqe_size field.
*
* When inline=1, this area is filled with payload data for the
- * send based on the length_or_AVID field. Bits [7:0] of word 0
+ * send based on the length_or_AVID field. Bits [7:0] of word 0
* hold the first byte to go out on the wire.
*/
uint32_t data[24];
@@ -70708,7 +79841,7 @@ typedef struct sq_send_raweth_qp1_hdr {
* valid 16 bytes units other than the WQE structure can be
* SGEs (Scatter Gather Elements) OR inline data.
*
- * While this field defines the valid WQE size. The actual
+ * While this field defines the valid WQE size. The actual
* total WQE size is always 128B.
*/
uint8_t wqe_size;
@@ -70729,7 +79862,7 @@ typedef struct sq_send_raweth_qp1_hdr {
*/
#define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1)
/*
- * If set to 1, the controller replaces the IP checksum of the
+ * If set to 1, the controller replaces the IP checksum of the
* normal packets, or the inner IP checksum of the encapsulated
* packets with the hardware calculated IP checksum for the
* packet associated with this descriptor.
@@ -70743,9 +79876,9 @@ typedef struct sq_send_raweth_qp1_hdr {
*
* This bit must be valid on the first BD of a packet.
*
- * Packet must be 64B or longer when this flag is set. It is not
- * usefull to use this bit with any form of TX offload such as
- * CSO or LSO. The intent is that the packet from the host already
+ * Packet must be 64B or longer when this flag is set. It is not
+ * useful to use this bit with any form of TX offload such as
+ * CSO or LSO. The intent is that the packet from the host already
* has a valid Ethernet CRC on the packet.
*/
#define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_NOCRC UINT32_C(0x4)
@@ -70759,7 +79892,7 @@ typedef struct sq_send_raweth_qp1_hdr {
/*
* If set to 1, The controller replaces the tunnel IP checksum
* field with hardware calculated IP checksum for the IP header
- * of the packet associated with this descriptor. In case of
+ * of the packet associated with this descriptor. In case of
* VXLAN, the controller also replaces the outer header UDP
* checksum with hardware calculated UDP checksum for the packet
* associated with this descriptor.
@@ -70767,12 +79900,12 @@ typedef struct sq_send_raweth_qp1_hdr {
#define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
/*
* If set to '1', then the RoCE ICRC will be appended to the
- * packet. Packet must be a valid RoCE format packet.
+ * packet. Packet must be a valid RoCE format packet.
*/
#define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_ROCE_CRC UINT32_C(0x100)
/*
* If set to '1', then the FCoE CRC will be appended to the
- * packet. Packet must be a valid FCoE format packet.
+ * packet. Packet must be a valid FCoE format packet.
*/
#define SQ_SEND_RAWETH_QP1_HDR_LFLAGS_FCOE_CRC UINT32_C(0x200)
/*
@@ -70904,7 +80037,7 @@ typedef struct sq_rdma {
#define SQ_RDMA_FLAGS_SE UINT32_C(0x8)
/*
* Indicate that inline data is posted to the SQ following
- * this WQE. This bit may be 1 only for write operations.
+ * this WQE. This bit may be 1 only for write operations.
*/
#define SQ_RDMA_FLAGS_INLINE UINT32_C(0x10)
/*
@@ -70923,7 +80056,7 @@ typedef struct sq_rdma {
* valid 16 bytes units other than the WQE structure can be
* SGEs (Scatter Gather Elements) OR inline data.
*
- * While this field defines the valid WQE size. The actual
+ * While this field defines the valid WQE size. The actual
* total WQE size is always 128B.
*/
uint8_t wqe_size;
@@ -70957,7 +80090,7 @@ typedef struct sq_rdma {
* SGEs based on the wqe_size field.
*
* When inline=1, this area is filled with payload data for the
- * write based on the length field. Bits [7:0] of word 0
+ * write based on the length field. Bits [7:0] of word 0
* hold the first byte to go out on the wire.
*/
uint32_t data[24];
@@ -71018,7 +80151,7 @@ typedef struct sq_rdma_hdr {
#define SQ_RDMA_HDR_FLAGS_SE UINT32_C(0x8)
/*
* Indicate that inline data is posted to the SQ following
- * this WQE. This bit may be 1 only for write operations.
+ * this WQE. This bit may be 1 only for write operations.
*/
#define SQ_RDMA_HDR_FLAGS_INLINE UINT32_C(0x10)
/*
@@ -71037,7 +80170,7 @@ typedef struct sq_rdma_hdr {
* valid 16 bytes units other than the WQE structure can be
* SGEs (Scatter Gather Elements) OR inline data.
*
- * While this field defines the valid WQE size. The actual
+ * While this field defines the valid WQE size. The actual
* total WQE size is always 128B.
*/
uint8_t wqe_size;
@@ -71146,7 +80279,7 @@ typedef struct sq_atomic {
/*
* The first 16B of the data field must be filled with a single
* SGE. This will be used to store the return value from the
- * Atomic Ack response. The size of the single SGE must be 8B.
+ * Atomic Ack response. The size of the single SGE must be 8B.
*/
uint32_t data[24];
} sq_atomic_t, *psq_atomic_t;
@@ -71405,8 +80538,8 @@ typedef struct sq_fr_pmr {
*/
#define SQ_FR_PMR_FLAGS_DEBUG_TRACE UINT32_C(0x40)
/*
- * This is the new access control for the MR. '1' means
- * the operation is allowed. '0' means operation is
+ * This is the new access control for the MR. '1' means
+ * the operation is allowed. '0' means operation is
* not allowed.
*/
uint8_t access_cntl;
@@ -71575,20 +80708,21 @@ typedef struct sq_fr_pmr {
#define SQ_FR_PMR_NUMLEVELS_SFT 6
/*
* A zero level PBL means that the VA is the physical address used
- * for the operation. No translation is done by the PTU.
+ * for the operation. No translation is done by the PTU.
*/
#define SQ_FR_PMR_NUMLEVELS_PHYSICAL (UINT32_C(0x0) << 6)
/*
* A one layer translation is provided between the logical and
- * physical address. The PBL points to a physical page that contains
- * PBE values that point to actual pg_size physical pages.
+ * physical address. The PBL points to a physical page that
+ * contains PBE values that point to actual pg_size physical pages.
*/
#define SQ_FR_PMR_NUMLEVELS_LAYER1 (UINT32_C(0x1) << 6)
/*
* A two layer translation is provided between the logical and
- * physical address. The PBL points to a physical page that contains
- * PDE values that in turn point to pbl_pg_size physical pages that contain
- * PBE values that point to actual physical pages.
+ * physical address. The PBL points to a physical page that
+ * contains PDE values that in turn point to pbl_pg_size physical
+ * pages that contain PBE values that point to actual physical
+ * pages.
*/
#define SQ_FR_PMR_NUMLEVELS_LAYER2 (UINT32_C(0x2) << 6)
#define SQ_FR_PMR_NUMLEVELS_LAST SQ_FR_PMR_NUMLEVELS_LAYER2
@@ -71647,8 +80781,8 @@ typedef struct sq_fr_pmr_hdr {
*/
#define SQ_FR_PMR_HDR_FLAGS_DEBUG_TRACE UINT32_C(0x40)
/*
- * This is the new access control for the MR. '1' means
- * the operation is allowed. '0' means operation is
+ * This is the new access control for the MR. '1' means
+ * the operation is allowed. '0' means operation is
* not allowed.
*/
uint8_t access_cntl;
@@ -71817,20 +80951,21 @@ typedef struct sq_fr_pmr_hdr {
#define SQ_FR_PMR_HDR_NUMLEVELS_SFT 6
/*
* A zero level PBL means that the VA is the physical address used
- * for the operation. No translation is done by the PTU.
+ * for the operation. No translation is done by the PTU.
*/
#define SQ_FR_PMR_HDR_NUMLEVELS_PHYSICAL (UINT32_C(0x0) << 6)
/*
* A one layer translation is provided between the logical and
- * physical address. The PBL points to a physical page that contains
- * PBE values that point to actual pg_size physical pages.
+ * physical address. The PBL points to a physical page that
+ * contains PBE values that point to actual pg_size physical pages.
*/
#define SQ_FR_PMR_HDR_NUMLEVELS_LAYER1 (UINT32_C(0x1) << 6)
/*
* A two layer translation is provided between the logical and
- * physical address. The PBL points to a physical page that contains
- * PDE values that in turn point to pbl_pg_size physical pages that contain
- * PBE values that point to actual physical pages.
+ * physical address. The PBL points to a physical page that
+ * contains PDE values that in turn point to pbl_pg_size physical
+ * pages that contain PBE values that point to actual physical
+ * pages.
*/
#define SQ_FR_PMR_HDR_NUMLEVELS_LAYER2 (UINT32_C(0x2) << 6)
#define SQ_FR_PMR_HDR_NUMLEVELS_LAST SQ_FR_PMR_HDR_NUMLEVELS_LAYER2
@@ -71887,8 +81022,8 @@ typedef struct sq_fr_ppmr {
*/
#define SQ_FR_PPMR_FLAGS_DEBUG_TRACE UINT32_C(0x40)
/*
- * This is the new access control for the MR. '1' means
- * the operation is allowed. '0' means operation is
+ * This is the new access control for the MR. '1' means
+ * the operation is allowed. '0' means operation is
* not allowed.
*/
uint8_t access_cntl;
@@ -72061,20 +81196,21 @@ typedef struct sq_fr_ppmr {
#define SQ_FR_PPMR_NUMLEVELS_SFT 6
/*
* A zero level PBL means that the VA is the physical address used
- * for the operation. No translation is done by the PTU.
+ * for the operation. No translation is done by the PTU.
*/
#define SQ_FR_PPMR_NUMLEVELS_PHYSICAL (UINT32_C(0x0) << 6)
/*
* A one layer translation is provided between the logical and
- * physical address. The PBL points to a physical page that contains
- * PBE values that point to actual pg_size physical pages.
+ * physical address. The PBL points to a physical page that
+ * contains PBE values that point to actual pg_size physical pages.
*/
#define SQ_FR_PPMR_NUMLEVELS_LAYER1 (UINT32_C(0x1) << 6)
/*
* A two layer translation is provided between the logical and
- * physical address. The PBL points to a physical page that contains
- * PDE values that in turn point to pbl_pg_size physical pages that contain
- * PBE values that point to actual physical pages.
+ * physical address. The PBL points to a physical page that
+ * contains PDE values that in turn point to pbl_pg_size physical
+ * pages that contain PBE values that point to actual physical
+ * pages.
*/
#define SQ_FR_PPMR_NUMLEVELS_LAYER2 (UINT32_C(0x2) << 6)
#define SQ_FR_PPMR_NUMLEVELS_LAST SQ_FR_PPMR_NUMLEVELS_LAYER2
@@ -72133,8 +81269,8 @@ typedef struct sq_fr_ppmr_hdr {
*/
#define SQ_FR_PPMR_HDR_FLAGS_DEBUG_TRACE UINT32_C(0x40)
/*
- * This is the new access control for the MR. '1' means
- * the operation is allowed. '0' means operation is
+ * This is the new access control for the MR. '1' means
+ * the operation is allowed. '0' means operation is
* not allowed.
*/
uint8_t access_cntl;
@@ -72307,20 +81443,21 @@ typedef struct sq_fr_ppmr_hdr {
#define SQ_FR_PPMR_HDR_NUMLEVELS_SFT 6
/*
* A zero level PBL means that the VA is the physical address used
- * for the operation. No translation is done by the PTU.
+ * for the operation. No translation is done by the PTU.
*/
#define SQ_FR_PPMR_HDR_NUMLEVELS_PHYSICAL (UINT32_C(0x0) << 6)
/*
* A one layer translation is provided between the logical and
- * physical address. The PBL points to a physical page that contains
- * PBE values that point to actual pg_size physical pages.
+ * physical address. The PBL points to a physical page that
+ * contains PBE values that point to actual pg_size physical pages.
*/
#define SQ_FR_PPMR_HDR_NUMLEVELS_LAYER1 (UINT32_C(0x1) << 6)
/*
* A two layer translation is provided between the logical and
- * physical address. The PBL points to a physical page that contains
- * PDE values that in turn point to pbl_pg_size physical pages that contain
- * PBE values that point to actual physical pages.
+ * physical address. The PBL points to a physical page that
+ * contains PDE values that in turn point to pbl_pg_size physical
+ * pages that contain PBE values that point to actual physical
+ * pages.
*/
#define SQ_FR_PPMR_HDR_NUMLEVELS_LAYER2 (UINT32_C(0x2) << 6)
#define SQ_FR_PPMR_HDR_NUMLEVELS_LAST SQ_FR_PPMR_HDR_NUMLEVELS_LAYER2
@@ -72383,8 +81520,8 @@ typedef struct sq_bind {
*/
#define SQ_BIND_FLAGS_DEBUG_TRACE UINT32_C(0x40)
/*
- * This is the new access control for the MR. '1' means
- * the operation is allowed. '0' means operation is
+ * This is the new access control for the MR. '1' means
+ * the operation is allowed. '0' means operation is
* not allowed.
*/
uint8_t access_cntl;
@@ -72394,7 +81531,7 @@ typedef struct sq_bind {
* Local Write Access.
*
* Local accesses are never allowed for memory windows, so this
- * bit must always be zero in a bind WQE. If this bit is ever
+ * bit must always be zero in a bind WQE. If this bit is ever
* set, the bind will fail with an errored completion.
*/
#define SQ_BIND_ACCESS_CNTL_LOCAL_WRITE UINT32_C(0x1)
@@ -72404,23 +81541,25 @@ typedef struct sq_bind {
* Remote Write Access.
*
* Note that, if this bit is set, then the parent region to which
- * the window is being bound must allow local writes. If this is not
- * the case, then the bind will fail with an errored completion.
+ * the window is being bound must allow local writes. If this is
+ * not the case, then the bind will fail with an errored
+ * completion.
*/
#define SQ_BIND_ACCESS_CNTL_REMOTE_WRITE UINT32_C(0x4)
/*
* Remote Atomic Access.
*
* Note that, if this bit is set, then the parent region to which
- * the window is being bound must allow local writes. If this is not
- * the case, then the bind will fail with an errored completion.
+ * the window is being bound must allow local writes. If this is
+ * not the case, then the bind will fail with an errored
+ * completion.
*/
#define SQ_BIND_ACCESS_CNTL_REMOTE_ATOMIC UINT32_C(0x8)
/*
* Window Binding Allowed.
*
* It is never allowed to bind windows to windows, so this bit
- * must always be zero in a bind WQE. If this bit is ever set,
+ * must always be zero in a bind WQE. If this bit is ever set,
* the bind will fail with an errored completion.
*/
#define SQ_BIND_ACCESS_CNTL_WINDOW_BIND UINT32_C(0x10)
@@ -72429,27 +81568,28 @@ typedef struct sq_bind {
uint8_t mw_type_zero_based;
/*
* If this bit is set, then the newly-bound memory window will be
- * zero-based. If clear, then the newly-bound memory window will be
+ * zero-based. If clear, then the newly-bound memory window will be
* non-zero-based.
*/
#define SQ_BIND_ZERO_BASED UINT32_C(0x1)
/*
- * If type1 is specified, then this WQE performs a "bind memory window"
- * operation on a type1 window. If type2 is specified, then this WQE
- * performs a "post send bind memory window" operation on a type2
- * window.
+ * If type1 is specified, then this WQE performs a "bind memory
+ * window" operation on a type1 window. If type2 is specified, then
+ * this WQE performs a "post send bind memory window" operation on a
+ * type2 window.
*
- * Note that the bind WQE cannot change the type of the memory window.
+ * Note that the bind WQE cannot change the type of the memory
+ * window.
*
- * If a "bind memory window" operation is attempted on a memory window
- * that was allocated as type2, then the bind will fail with an errored
- * completion, as "bind memory window" is allowed only on type1 memory
- * windows.
+ * If a "bind memory window" operation is attempted on a memory
+ * window that was allocated as type2, then the bind will fail with
+ * an errored completion, as "bind memory window" is allowed only on
+ * type1 memory windows.
*
- * Similarly, if a "post send bind memory window" operation is attempted
- * on a memory window that was allocated as type1, then the bind will fail
- * with an errored completions, as "post send bind memory window" is allowed
- * only on type2 memory windows.
+ * Similarly, if a "post send bind memory window" operation is
+ * attempted on a memory window that was allocated as type1, then the
+ * bind will fail with an errored completions, as "post send bind
+ * memory window" is allowed only on type2 memory windows.
*/
#define SQ_BIND_MW_TYPE UINT32_C(0x2)
/* Type 1 Bind Memory Window */
@@ -72536,8 +81676,8 @@ typedef struct sq_bind_hdr {
*/
#define SQ_BIND_HDR_FLAGS_DEBUG_TRACE UINT32_C(0x40)
/*
- * This is the new access control for the MR. '1' means
- * the operation is allowed. '0' means operation is
+ * This is the new access control for the MR. '1' means
+ * the operation is allowed. '0' means operation is
* not allowed.
*/
uint8_t access_cntl;
@@ -72547,7 +81687,7 @@ typedef struct sq_bind_hdr {
* Local Write Access.
*
* Local accesses are never allowed for memory windows, so this
- * bit must always be zero in a bind WQE. If this bit is ever
+ * bit must always be zero in a bind WQE. If this bit is ever
* set, the bind will fail with an errored completion.
*/
#define SQ_BIND_HDR_ACCESS_CNTL_LOCAL_WRITE UINT32_C(0x1)
@@ -72557,23 +81697,25 @@ typedef struct sq_bind_hdr {
* Remote Write Access.
*
* Note that, if this bit is set, then the parent region to which
- * the window is being bound must allow local writes. If this is not
- * the case, then the bind will fail with an errored completion.
+ * the window is being bound must allow local writes. If this is
+ * not the case, then the bind will fail with an errored
+ * completion.
*/
#define SQ_BIND_HDR_ACCESS_CNTL_REMOTE_WRITE UINT32_C(0x4)
/*
* Remote Atomic Access.
*
* Note that, if this bit is set, then the parent region to which
- * the window is being bound must allow local writes. If this is not
- * the case, then the bind will fail with an errored completion.
+ * the window is being bound must allow local writes. If this is
+ * not the case, then the bind will fail with an errored
+ * completion.
*/
#define SQ_BIND_HDR_ACCESS_CNTL_REMOTE_ATOMIC UINT32_C(0x8)
/*
* Window Binding Allowed.
*
* It is never allowed to bind windows to windows, so this bit
- * must always be zero in a bind WQE. If this bit is ever set,
+ * must always be zero in a bind WQE. If this bit is ever set,
* the bind will fail with an errored completion.
*/
#define SQ_BIND_HDR_ACCESS_CNTL_WINDOW_BIND UINT32_C(0x10)
@@ -72582,27 +81724,28 @@ typedef struct sq_bind_hdr {
uint8_t mw_type_zero_based;
/*
* If this bit is set, then the newly-bound memory window will be
- * zero-based. If clear, then the newly-bound memory window will be
+ * zero-based. If clear, then the newly-bound memory window will be
* non-zero-based.
*/
#define SQ_BIND_HDR_ZERO_BASED UINT32_C(0x1)
/*
- * If type1 is specified, then this WQE performs a "bind memory window"
- * operation on a type1 window. If type2 is specified, then this WQE
- * performs a "post send bind memory window" operation on a type2
- * window.
+ * If type1 is specified, then this WQE performs a "bind memory
+ * window" operation on a type1 window. If type2 is specified, then
+ * this WQE performs a "post send bind memory window" operation on a
+ * type2 window.
*
- * Note that the bind WQE cannot change the type of the memory window.
+ * Note that the bind WQE cannot change the type of the memory
+ * window.
*
- * If a "bind memory window" operation is attempted on a memory window
- * that was allocated as type2, then the bind will fail with an errored
- * completion, as "bind memory window" is allowed only on type1 memory
- * windows.
+ * If a "bind memory window" operation is attempted on a memory
+ * window that was allocated as type2, then the bind will fail with
+ * an errored completion, as "bind memory window" is allowed only on
+ * type1 memory windows.
*
- * Similarly, if a "post send bind memory window" operation is attempted
- * on a memory window that was allocated as type1, then the bind will fail
- * with an errored completions, as "post send bind memory window" is allowed
- * only on type2 memory windows.
+ * Similarly, if a "post send bind memory window" operation is
+ * attempted on a memory window that was allocated as type1, then the
+ * bind will fail with an errored completions, as "post send bind
+ * memory window" is allowed only on type2 memory windows.
*/
#define SQ_BIND_HDR_MW_TYPE UINT32_C(0x2)
/* Type 1 Bind Memory Window */
@@ -72634,6 +81777,2736 @@ typedef struct sq_bind_hdr {
uint8_t reserved24[3];
} sq_bind_hdr_t, *psq_bind_hdr_t;
+/*
+ * This V3 version of structure is not accessible from host software, but is documented here (in the SW section) anyway.
+ * This is the MSN Table (located in IQM). The table is written by the RoCE transmitter when sending wire operation WQEs. It is used to provide the RoCE receiver with information about the SQ WQEs in order to make requester completions and to perform requester HW retransmission. The number of entries in the table is configured in the QPC and must be equal to the maximum number of WQEs that can be present in the SQ at one time, rounded up to the nearest power of two.
+ */
+/* sq_msn_search_v3 (size:128b/16B) */
+
+typedef struct sq_msn_search_v3 {
+ uint64_t idx_psn;
+ /* Start PSN of the WQE. */
+ #define SQ_MSN_SEARCH_V3_START_PSN_MASK UINT32_C(0xffffff)
+ #define SQ_MSN_SEARCH_V3_START_PSN_SFT 0
+ /* Next PSN. Equal to the start PSN of the next WQE. */
+ #define SQ_MSN_SEARCH_V3_NEXT_PSN_MASK UINT32_C(0xffffff000000)L
+ #define SQ_MSN_SEARCH_V3_NEXT_PSN_SFT 24
+ /*
+ * Start index. For variable-size WQEs, this field indicates the
+ * starting slot index that corresponds to the WQE. In
+ * backward-compatible mode, this is the starting WQE index.
+ */
+ #define SQ_MSN_SEARCH_V3_START_IDX_MASK UINT32_C(0xffff000000000000)L
+ #define SQ_MSN_SEARCH_V3_START_IDX_SFT 48
+ /*
+ * This value will be returned in the completion if the completion
+ * is signaled.
+ */
+ uint32_t wqe_opaque;
+ /* The size of the WQE in units of 16B chunks. */
+ uint8_t wqe_size;
+ uint8_t signal;
+ /* Set if completion signaling is requested. */
+ #define SQ_MSN_SEARCH_V3_SGNLD UINT32_C(0x1)
+ /*
+ * Set if at least one signaled local memory operation WQE is
+ * present in the SQ between the previous wire-operation WQE
+ * and this WQE.
+ */
+ #define SQ_MSN_SEARCH_V3_PREV_SGNLD_LOCAL_MEM_WQE UINT32_C(0x2)
+ uint16_t reserved;
+} sq_msn_search_v3_t, *psq_msn_search_v3_t;
+
+/* SQ Send WQE V3 for RC SQs. */
+/* sq_send_v3 (size:1024b/128B) */
+
+typedef struct sq_send_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /* Send V3 */
+ #define SQ_SEND_V3_WQE_TYPE_SEND_V3 UINT32_C(0x10)
+ /*
+ * Send with Immediate V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_SEND_V3_WQE_TYPE_SEND_W_IMMED_V3 UINT32_C(0x11)
+ /*
+ * Send with Invalidate V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_SEND_V3_WQE_TYPE_SEND_W_INVALID_V3 UINT32_C(0x12)
+ #define SQ_SEND_V3_WQE_TYPE_LAST SQ_SEND_V3_WQE_TYPE_SEND_W_INVALID_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled completion
+ * the controller should not generate a CQE unless there was
+ * an error. This refers to the CQE on the sender side. (The se
+ * flag refers to the receiver side).
+ */
+ #define SQ_SEND_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic WQEs
+ * on the SQ before executing this WQE.
+ *
+ * This flag must be zero for a UD send.
+ */
+ #define SQ_SEND_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all
+ * previous SQ's WQEs before executing this WQE.
+ *
+ * This flag must be zero for a UD send.
+ */
+ #define SQ_SEND_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * Solicit event flag. Indication sent in BTH header to the
+ * receiver to generate a Completion Event Notification, i.e.
+ * CNQE.
+ */
+ #define SQ_SEND_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * Indicate that inline data is posted to the SQ in the data
+ * area of this WQE.
+ */
+ #define SQ_SEND_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * If set to 1, then the timestamp from the WQE is used. If
+ * cleared to 0, then TWE provides the timestamp.
+ */
+ #define SQ_SEND_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_SEND_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ /* */
+ uint8_t wqe_size;
+ /*
+ * The number of 16 bytes chunks of data including this first
+ * word of the request that are a valid part of the request. The
+ * valid 16 bytes units other than the WQE structure can be
+ * SGEs (Scatter Gather Elements) OR inline data.
+ *
+ * Note: Since the WQE header consumes only one slot (16 bytes)
+ * for this type of WQE, and the maximum number of SGEs supported
+ * by the device is 30, this field must never exceed 31.
+ */
+ #define SQ_SEND_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_SEND_V3_WQE_SIZE_SFT 0
+ uint8_t inline_length;
+ /*
+ * When inline flag is '1', this field determines the number of
+ * bytes that are valid in the last 16B unit of the inline WQE.
+ * Zero means all 16 bytes are valid. One means only bits 7:0 of
+ * the last 16B unit are valid. This means the total size of the
+ * inline data is determined by a combination of the wqe_size field
+ * and this inline_length field.
+ *
+ * `inline_size = ((wqe_size - 1) * 16) - data_offset_in_bytes +
+ * ((inline_length == 0 ) ? 16 : inline_length)
+ *
+ * Where data_offset_in_bytes is the offset within the WQE where
+ * the data field starts.
+ *
+ * Note that this field is not applicable for zero-length inline
+ * WQEs.
+ */
+ #define SQ_SEND_V3_INLINE_LENGTH_MASK UINT32_C(0xf)
+ #define SQ_SEND_V3_INLINE_LENGTH_SFT 0
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * Either invalidate key (R_Key of the remote host) that will
+ * be send with IETH (Invalidate ETH) if wqe_type is of Send
+ * with Invalidate, or immediate value that will be sent with
+ * ImmDt header if wqe_type is Send with Immediate.
+ */
+ uint32_t inv_key_or_imm_data;
+ uint32_t timestamp;
+ /*
+ * This field specifies a 24-bit timestamp that can be passed
+ * down the TX path and optionally logged in the TXP timestamp
+ * histogram.
+ */
+ #define SQ_SEND_V3_TIMESTAMP_MASK UINT32_C(0xffffff)
+ #define SQ_SEND_V3_TIMESTAMP_SFT 0
+ /*
+ * When inline=0, then this area is filled with from 1 to 30 SGEs
+ * based on the wqe_size field.
+ *
+ * When inline=1, this area is filled with payload data for the
+ * send. Length of data is described in the inline_length field.
+ * Bits [7:0] of word 0 hold the first byte to go out on the wire.
+ */
+ uint32_t data[28];
+} sq_send_v3_t, *psq_send_v3_t;
+
+/* Send SQ WQE V3 header. */
+/* sq_send_hdr_v3 (size:128b/16B) */
+
+typedef struct sq_send_hdr_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /* Send V3 */
+ #define SQ_SEND_HDR_V3_WQE_TYPE_SEND_V3 UINT32_C(0x10)
+ /*
+ * Send with Immediate V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_SEND_HDR_V3_WQE_TYPE_SEND_W_IMMED_V3 UINT32_C(0x11)
+ /*
+ * Send with Invalidate V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_SEND_HDR_V3_WQE_TYPE_SEND_W_INVALID_V3 UINT32_C(0x12)
+ #define SQ_SEND_HDR_V3_WQE_TYPE_LAST SQ_SEND_HDR_V3_WQE_TYPE_SEND_W_INVALID_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled completion
+ * the controller should not generate a CQE unless there was
+ * an error. This refers to the CQE on the sender side. (The se
+ * flag refers to the receiver side).
+ */
+ #define SQ_SEND_HDR_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic WQEs
+ * on the SQ before executing this WQE.
+ *
+ * This flag must be zero for a UD send.
+ */
+ #define SQ_SEND_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all
+ * previous SQ's WQEs before executing this WQE.
+ *
+ * This flag must be zero for a UD send.
+ */
+ #define SQ_SEND_HDR_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * Solicit event flag. Indication sent in BTH header to the
+ * receiver to generate a Completion Event Notification, i.e.
+ * CNQE.
+ */
+ #define SQ_SEND_HDR_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * Indicate that inline data is posted to the SQ in the data
+ * area of this WQE.
+ */
+ #define SQ_SEND_HDR_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * If set to 1, then the timestamp from the WQE is used. If
+ * cleared to 0, then TWE provides the timestamp.
+ */
+ #define SQ_SEND_HDR_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_SEND_HDR_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ /* */
+ uint8_t wqe_size;
+ /*
+ * The number of 16 bytes chunks of data including this first
+ * word of the request that are a valid part of the request. The
+ * valid 16 bytes units other than the WQE structure can be
+ * SGEs (Scatter Gather Elements) OR inline data.
+ *
+ * Note: Since the WQE header consumes only one slot (16 bytes)
+ * for this type of WQE, and the maximum number of SGEs supported
+ * by the device is 30, this field must never exceed 31.
+ */
+ #define SQ_SEND_HDR_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_SEND_HDR_V3_WQE_SIZE_SFT 0
+ uint8_t inline_length;
+ /*
+ * When inline flag is '1', this field determines the number of
+ * bytes that are valid in the last 16B unit of the inline WQE.
+ * Zero means all 16 bytes are valid. One means only bits 7:0 of
+ * the last 16B unit are valid. This means the total size of the
+ * inline data is determined by a combination of the wqe_size field
+ * and this inline_length field.
+ *
+ * `inline_size = ((wqe_size - 1) * 16) - data_offset_in_bytes +
+ * ((inline_length == 0 ) ? 16 : inline_length)
+ *
+ * Where data_offset_in_bytes is the offset within the WQE where
+ * the data field starts.
+ *
+ * Note that this field is not applicable for zero-length inline
+ * WQEs.
+ */
+ #define SQ_SEND_HDR_V3_INLINE_LENGTH_MASK UINT32_C(0xf)
+ #define SQ_SEND_HDR_V3_INLINE_LENGTH_SFT 0
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * Either invalidate key (R_Key of the remote host) that will
+ * be send with IETH (Invalidate ETH) if wqe_type is of Send
+ * with Invalidate, or immediate value that will be sent with
+ * ImmDt header if wqe_type is Send with Immediate.
+ */
+ uint32_t inv_key_or_imm_data;
+ uint32_t timestamp;
+ /*
+ * This field specifies a 24-bit timestamp that can be passed
+ * down the TX path and optionally logged in the TXP timestamp
+ * histogram.
+ */
+ #define SQ_SEND_HDR_V3_TIMESTAMP_MASK UINT32_C(0xffffff)
+ #define SQ_SEND_HDR_V3_TIMESTAMP_SFT 0
+} sq_send_hdr_v3_t, *psq_send_hdr_v3_t;
+
+/* SQ WQE V3 for Raw Ethernet and QP1 */
+/* sq_rawqp1send_v3 (size:1024b/128B) */
+
+typedef struct sq_rawqp1send_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /* RawEth/QP1 Send V3 */
+ #define SQ_RAWQP1SEND_V3_WQE_TYPE_RAWQP1SEND_V3 UINT32_C(0x1d)
+ #define SQ_RAWQP1SEND_V3_WQE_TYPE_LAST SQ_RAWQP1SEND_V3_WQE_TYPE_RAWQP1SEND_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled completion
+ * the controller should not generate a CQE unless there was
+ * an error. This refers to the CQE on the sender side. (The se
+ * flag refers to the receiver side).
+ */
+ #define SQ_RAWQP1SEND_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic WQEs
+ * on the SQ before executing this WQE.
+ *
+ * This flag must be zero for a QP1 send.
+ */
+ #define SQ_RAWQP1SEND_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all
+ * previous SQ's WQEs before executing this WQE.
+ *
+ * This flag must be zero for a QP1 send.
+ */
+ #define SQ_RAWQP1SEND_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * Solicit event flag. Indication sent in BTH header to the
+ * receiver to generate a Completion Event Notification, i.e.
+ * CNQE.
+ *
+ * This flag must be zero for a QP1 send.
+ */
+ #define SQ_RAWQP1SEND_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * Indicate that inline data is posted to the SQ in the data
+ * area of this WQE.
+ */
+ #define SQ_RAWQP1SEND_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * If set to 1, then the timestamp from the WQE is used. If
+ * cleared to 0, then TWE provides the timestamp.
+ */
+ #define SQ_RAWQP1SEND_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_RAWQP1SEND_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ /* */
+ uint8_t wqe_size;
+ /*
+ * The number of 16 bytes chunks of data including this first
+ * word of the request that are a valid part of the request. The
+ * valid 16 bytes units other than the WQE structure can be
+ * SGEs (Scatter Gather Elements) OR inline data.
+ *
+ * This field shall never exceed 32 for WQEs of this type.
+ */
+ #define SQ_RAWQP1SEND_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_RAWQP1SEND_V3_WQE_SIZE_SFT 0
+ uint8_t inline_length;
+ /*
+ * When inline flag is '1', this field determines the number of
+ * bytes that are valid in the last 16B unit of the inline WQE.
+ * Zero means all 16 bytes are valid. One means only bits 7:0 of
+ * the last 16B unit are valid. This means the total size of the
+ * inline data is determined by a combination of the wqe_size field
+ * and this inline_length field.
+ *
+ * `inline_size = ((wqe_size - 1) * 16) - data_offset_in_bytes +
+ * ((inline_length == 0 ) ? 16 : inline_length)
+ *
+ * Where data_offset_in_bytes is the offset within the WQE where
+ * the data field starts.
+ *
+ * Note that this field is not applicable for zero-length inline
+ * WQEs.
+ */
+ #define SQ_RAWQP1SEND_V3_INLINE_LENGTH_MASK UINT32_C(0xf)
+ #define SQ_RAWQP1SEND_V3_INLINE_LENGTH_SFT 0
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * All bits in this field must be valid on the first BD of a packet.
+ * Their value on other BDs of the packet will be ignored.
+ */
+ uint16_t lflags;
+ /*
+ * If set to 1, the controller replaces the TCP/UPD checksum
+ * fields of normal TCP/UPD checksum, or the inner TCP/UDP
+ * checksum field of the encapsulated TCP/UDP packets with the
+ * hardware calculated TCP/UDP checksum for the packet associated
+ * with this descriptor.
+ *
+ * This bit must be valid on the first BD of a packet.
+ */
+ #define SQ_RAWQP1SEND_V3_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1)
+ /*
+ * If set to 1, the controller replaces the IP checksum of the
+ * normal packets, or the inner IP checksum of the encapsulated
+ * packets with the hardware calculated IP checksum for the
+ * packet associated with this descriptor.
+ *
+ * This bit must be valid on the first BD of a packet.
+ */
+ #define SQ_RAWQP1SEND_V3_LFLAGS_IP_CHKSUM UINT32_C(0x2)
+ /*
+ * If set to 1, the controller will not append an Ethernet CRC
+ * to the end of the frame.
+ *
+ * This bit must be valid on the first BD of a packet.
+ *
+ * Packet must be 64B or longer when this flag is set. It is not
+ * useful to use this bit with any form of TX offload such as
+ * CSO or LSO. The intent is that the packet from the host already
+ * has a valid Ethernet CRC on the packet.
+ */
+ #define SQ_RAWQP1SEND_V3_LFLAGS_NOCRC UINT32_C(0x4)
+ /*
+ * If set to 1, The controller replaces the tunnel IP checksum
+ * field with hardware calculated IP checksum for the IP header
+ * of the packet associated with this descriptor. In case of
+ * VXLAN, the controller also replaces the outer header UDP
+ * checksum with hardware calculated UDP checksum for the packet
+ * associated with this descriptor.
+ */
+ #define SQ_RAWQP1SEND_V3_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
+ /*
+ * If set to 1, The controller replaces the Outer-tunnel IP
+ * checksum field with hardware calculated IP checksum for the IP
+ * header of the packet associated with this descriptor.
+ *
+ * For outer UDP checksum, it will be the following behavior for
+ * all cases independent of settings of inner LSO and checksum
+ * offload BD flags:
+ *
+ * - If outer UDP checksum is 0, then do not update it.
+ * - If outer UDP checksum is non zero, then the hardware should
+ * compute and update it.
+ */
+ #define SQ_RAWQP1SEND_V3_LFLAGS_OT_IP_CHKSUM UINT32_C(0x20)
+ /*
+ * If set to '1', then the RoCE ICRC will be appended to the
+ * packet. Packet must be a valid RoCE format packet.
+ */
+ #define SQ_RAWQP1SEND_V3_LFLAGS_ROCE_CRC UINT32_C(0x100)
+ /*
+ * If set to '1', then the FCoE CRC will be appended to the
+ * packet. Packet must be a valid FCoE format packet.
+ */
+ #define SQ_RAWQP1SEND_V3_LFLAGS_FCOE_CRC UINT32_C(0x200)
+ /*
+ * This value selects a CFA action to perform on the packet.
+ * Set this value to zero if no CFA action is desired.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ uint16_t cfa_action;
+ /*
+ * This value selects a CFA action to perform on the packet.
+ * Set this value to zero if no CFA action is desired.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ uint16_t cfa_action_high;
+ /*
+ * This value selects bits 25:16 of the CFA action to perform on
+ * the packet. See the cfa_action field for more information.
+ */
+ #define SQ_RAWQP1SEND_V3_CFA_ACTION_HIGH_MASK UINT32_C(0x3ff)
+ #define SQ_RAWQP1SEND_V3_CFA_ACTION_HIGH_SFT 0
+ uint16_t reserved_2;
+ /*
+ * This value is action meta-data that defines CFA edit operations
+ * that are done in addition to any action editing.
+ */
+ uint32_t cfa_meta;
+ /* When key=1, This is the VLAN tag VID value. */
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_VID_MASK UINT32_C(0xfff)
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_VID_SFT 0
+ /* When key=1, This is the VLAN tag DE value. */
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_DE UINT32_C(0x1000)
+ /* When key=1, This is the VLAN tag PRI value. */
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000)
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_PRI_SFT 13
+ /* When key=1, This is the VLAN tag TPID select value. */
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000)
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_SFT 16
+ /* 0x88a8 */
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16)
+ /* 0x8100 */
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16)
+ /* 0x9100 */
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16)
+ /* 0x9200 */
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16)
+ /* 0x9300 */
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16)
+ /* Value programmed in CFA VLANTPID register. */
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16)
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_LAST SQ_RAWQP1SEND_V3_CFA_META_VLAN_TPID_TPIDCFG
+ /* When key=1, This is the VLAN tag TPID select value. */
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000)
+ #define SQ_RAWQP1SEND_V3_CFA_META_VLAN_RESERVED_SFT 19
+ /*
+ * This field identifies the type of edit to be performed
+ * on the packet.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ #define SQ_RAWQP1SEND_V3_CFA_META_KEY_MASK UINT32_C(0xf0000000)
+ #define SQ_RAWQP1SEND_V3_CFA_META_KEY_SFT 28
+ /* No editing */
+ #define SQ_RAWQP1SEND_V3_CFA_META_KEY_NONE (UINT32_C(0x0) << 28)
+ /*
+ * - meta[17:16] - TPID select value (0 = 0x8100).
+ * - meta[15:12] - PRI/DE value.
+ * - meta[11:0] - VID value.
+ */
+ #define SQ_RAWQP1SEND_V3_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28)
+ #define SQ_RAWQP1SEND_V3_CFA_META_KEY_LAST SQ_RAWQP1SEND_V3_CFA_META_KEY_VLAN_TAG
+ uint32_t timestamp;
+ /*
+ * This field specifies a 24-bit timestamp that can be passed
+ * down the TX path and optionally logged in the TXP timestamp
+ * histogram.
+ */
+ #define SQ_RAWQP1SEND_V3_TIMESTAMP_MASK UINT32_C(0xffffff)
+ #define SQ_RAWQP1SEND_V3_TIMESTAMP_SFT 0
+ uint64_t reserved_3;
+ /*
+ * When inline=0, then this area is filled with from 1 to 6 SGEs
+ * based on the wqe_size field.
+ *
+ * When inline=1, this area is filled with payload data for the
+ * send. Length of data is described in the inline_length field.
+ * Bits [7:0] of word 0 hold the first byte to go out on the wire.
+ */
+ uint32_t data[24];
+} sq_rawqp1send_v3_t, *psq_rawqp1send_v3_t;
+
+/* SQ WQE V3 structure for Raw Ethernet and QP1 SQs. */
+/* sq_rawqp1send_hdr_v3 (size:256b/32B) */
+
+typedef struct sq_rawqp1send_hdr_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /* RawEth/QP1 Send V3 */
+ #define SQ_RAWQP1SEND_HDR_V3_WQE_TYPE_RAWQP1SEND_V3 UINT32_C(0x1d)
+ #define SQ_RAWQP1SEND_HDR_V3_WQE_TYPE_LAST SQ_RAWQP1SEND_HDR_V3_WQE_TYPE_RAWQP1SEND_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled completion
+ * the controller should not generate a CQE unless there was
+ * an error. This refers to the CQE on the sender side. (The se
+ * flag refers to the receiver side).
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic WQEs
+ * on the SQ before executing this WQE.
+ *
+ * This flag must be zero for a QP1 send.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all
+ * previous SQ's WQEs before executing this WQE.
+ *
+ * This flag must be zero for a QP1 send.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * Solicit event flag. Indication sent in BTH header to the
+ * receiver to generate a Completion Event Notification, i.e.
+ * CNQE.
+ *
+ * This flag must be zero for a QP1 send.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * Indicate that inline data is posted to the SQ in the data
+ * area of this WQE.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * If set to 1, then the timestamp from the WQE is used. If
+ * cleared to 0, then TWE provides the timestamp.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ /* */
+ uint8_t wqe_size;
+ /*
+ * The number of 16 bytes chunks of data including this first
+ * word of the request that are a valid part of the request. The
+ * valid 16 bytes units other than the WQE structure can be
+ * SGEs (Scatter Gather Elements) OR inline data.
+ *
+ * This field shall never exceed 32 for WQEs of this type.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_RAWQP1SEND_HDR_V3_WQE_SIZE_SFT 0
+ uint8_t inline_length;
+ /*
+ * When inline flag is '1', this field determines the number of
+ * bytes that are valid in the last 16B unit of the inline WQE.
+ * Zero means all 16 bytes are valid. One means only bits 7:0 of
+ * the last 16B unit are valid. This means the total size of the
+ * inline data is determined by a combination of the wqe_size field
+ * and this inline_length field.
+ *
+ * `inline_size = ((wqe_size - 1) * 16) - data_offset_in_bytes +
+ * ((inline_length == 0 ) ? 16 : inline_length)
+ *
+ * Where data_offset_in_bytes is the offset within the WQE where
+ * the data field starts.
+ *
+ * Note that this field is not applicable for zero-length inline
+ * WQEs.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_INLINE_LENGTH_MASK UINT32_C(0xf)
+ #define SQ_RAWQP1SEND_HDR_V3_INLINE_LENGTH_SFT 0
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * All bits in this field must be valid on the first BD of a packet.
+ * Their value on other BDs of the packet will be ignored.
+ */
+ uint16_t lflags;
+ /*
+ * If set to 1, the controller replaces the TCP/UPD checksum
+ * fields of normal TCP/UPD checksum, or the inner TCP/UDP
+ * checksum field of the encapsulated TCP/UDP packets with the
+ * hardware calculated TCP/UDP checksum for the packet associated
+ * with this descriptor.
+ *
+ * This bit must be valid on the first BD of a packet.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1)
+ /*
+ * If set to 1, the controller replaces the IP checksum of the
+ * normal packets, or the inner IP checksum of the encapsulated
+ * packets with the hardware calculated IP checksum for the
+ * packet associated with this descriptor.
+ *
+ * This bit must be valid on the first BD of a packet.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_IP_CHKSUM UINT32_C(0x2)
+ /*
+ * If set to 1, the controller will not append an Ethernet CRC
+ * to the end of the frame.
+ *
+ * This bit must be valid on the first BD of a packet.
+ *
+ * Packet must be 64B or longer when this flag is set. It is not
+ * useful to use this bit with any form of TX offload such as
+ * CSO or LSO. The intent is that the packet from the host already
+ * has a valid Ethernet CRC on the packet.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_NOCRC UINT32_C(0x4)
+ /*
+ * If set to 1, The controller replaces the tunnel IP checksum
+ * field with hardware calculated IP checksum for the IP header
+ * of the packet associated with this descriptor. In case of
+ * VXLAN, the controller also replaces the outer header UDP
+ * checksum with hardware calculated UDP checksum for the packet
+ * associated with this descriptor.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_T_IP_CHKSUM UINT32_C(0x10)
+ /*
+ * If set to 1, The controller replaces the Outer-tunnel IP
+ * checksum field with hardware calculated IP checksum for the IP
+ * header of the packet associated with this descriptor.
+ *
+ * For outer UDP checksum, it will be the following behavior for
+ * all cases independent of settings of inner LSO and checksum
+ * offload BD flags:
+ *
+ * - If outer UDP checksum is 0, then do not update it.
+ * - If outer UDP checksum is non zero, then the hardware should
+ * compute and update it.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_OT_IP_CHKSUM UINT32_C(0x20)
+ /*
+ * If set to '1', then the RoCE ICRC will be appended to the
+ * packet. Packet must be a valid RoCE format packet.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_ROCE_CRC UINT32_C(0x100)
+ /*
+ * If set to '1', then the FCoE CRC will be appended to the
+ * packet. Packet must be a valid FCoE format packet.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_LFLAGS_FCOE_CRC UINT32_C(0x200)
+ /*
+ * This value selects a CFA action to perform on the packet.
+ * Set this value to zero if no CFA action is desired.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ uint16_t cfa_action;
+ /*
+ * This value selects a CFA action to perform on the packet.
+ * Set this value to zero if no CFA action is desired.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ uint16_t cfa_action_high;
+ /*
+ * This value selects bits 25:16 of the CFA action to perform on
+ * the packet. See the cfa_action field for more information.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_ACTION_HIGH_MASK UINT32_C(0x3ff)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_ACTION_HIGH_SFT 0
+ uint16_t reserved_2;
+ /*
+ * This value is action meta-data that defines CFA edit operations
+ * that are done in addition to any action editing.
+ */
+ uint32_t cfa_meta;
+ /* When key=1, This is the VLAN tag VID value. */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_VID_MASK UINT32_C(0xfff)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_VID_SFT 0
+ /* When key=1, This is the VLAN tag DE value. */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_DE UINT32_C(0x1000)
+ /* When key=1, This is the VLAN tag PRI value. */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_PRI_SFT 13
+ /* When key=1, This is the VLAN tag TPID select value. */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_SFT 16
+ /* 0x88a8 */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16)
+ /* 0x8100 */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16)
+ /* 0x9100 */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16)
+ /* 0x9200 */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16)
+ /* 0x9300 */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16)
+ /* Value programmed in CFA VLANTPID register. */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_LAST SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_TPID_TPIDCFG
+ /* When key=1, This is the VLAN tag TPID select value. */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_VLAN_RESERVED_SFT 19
+ /*
+ * This field identifies the type of edit to be performed
+ * on the packet.
+ *
+ * This value must be valid on the first BD of a packet.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_MASK UINT32_C(0xf0000000)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_SFT 28
+ /* No editing */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_NONE (UINT32_C(0x0) << 28)
+ /*
+ * - meta[17:16] - TPID select value (0 = 0x8100).
+ * - meta[15:12] - PRI/DE value.
+ * - meta[11:0] - VID value.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28)
+ #define SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_LAST SQ_RAWQP1SEND_HDR_V3_CFA_META_KEY_VLAN_TAG
+ uint32_t timestamp;
+ /*
+ * This field specifies a 24-bit timestamp that can be passed
+ * down the TX path and optionally logged in the TXP timestamp
+ * histogram.
+ */
+ #define SQ_RAWQP1SEND_HDR_V3_TIMESTAMP_MASK UINT32_C(0xffffff)
+ #define SQ_RAWQP1SEND_HDR_V3_TIMESTAMP_SFT 0
+ uint64_t reserved_3;
+} sq_rawqp1send_hdr_v3_t, *psq_rawqp1send_hdr_v3_t;
+
+/* SQ Send WQE V3 for UD SQs. */
+/* sq_udsend_v3 (size:1024b/128B) */
+
+typedef struct sq_udsend_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /*
+ * UD Send V3
+ *
+ * Allowed only on unreliable datagram (UD) SQs.
+ */
+ #define SQ_UDSEND_V3_WQE_TYPE_UDSEND_V3 UINT32_C(0x13)
+ /*
+ * UD Send with Immediate V3
+ *
+ * Allowed only on unreliable datagram (UD) SQs.
+ */
+ #define SQ_UDSEND_V3_WQE_TYPE_UDSEND_W_IMMED_V3 UINT32_C(0x14)
+ #define SQ_UDSEND_V3_WQE_TYPE_LAST SQ_UDSEND_V3_WQE_TYPE_UDSEND_W_IMMED_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled completion
+ * the controller should not generate a CQE unless there was
+ * an error. This refers to the CQE on the sender side. (The se
+ * flag refers to the receiver side).
+ */
+ #define SQ_UDSEND_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic WQEs
+ * on the SQ before executing this WQE.
+ *
+ * This flag must be zero for a UD send.
+ */
+ #define SQ_UDSEND_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all
+ * previous SQ's WQEs before executing this WQE.
+ *
+ * This flag must be zero for a UD send.
+ */
+ #define SQ_UDSEND_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * Solicit event flag. Indication sent in BTH header to the
+ * receiver to generate a Completion Event Notification, i.e.
+ * CNQE.
+ */
+ #define SQ_UDSEND_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * Indicate that inline data is posted to the SQ in the data
+ * area of this WQE.
+ */
+ #define SQ_UDSEND_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * If set to 1, then the timestamp from the WQE is used. If
+ * cleared to 0, then TWE provides the timestamp.
+ */
+ #define SQ_UDSEND_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_UDSEND_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ /* */
+ uint8_t wqe_size;
+ /*
+ * The number of 16 bytes chunks of data including this first
+ * word of the request that are a valid part of the request. The
+ * valid 16 bytes units other than the WQE structure can be
+ * SGEs (Scatter Gather Elements) OR inline data.
+ *
+ * This field shall never exceed 32 for WQEs of this type.
+ */
+ #define SQ_UDSEND_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_UDSEND_V3_WQE_SIZE_SFT 0
+ uint8_t inline_length;
+ /*
+ * When inline flag is '1', this field determines the number of
+ * bytes that are valid in the last 16B unit of the inline WQE.
+ * Zero means all 16 bytes are valid. One means only bits 7:0 of
+ * the last 16B unit are valid. This means the total size of the
+ * inline data is determined by a combination of the wqe_size field
+ * and this inline_length field.
+ *
+ * `inline_size = ((wqe_size - 1) * 16) - data_offset_in_bytes +
+ * ((inline_length == 0 ) ? 16 : inline_length)
+ *
+ * Where data_offset_in_bytes is the offset within the WQE where
+ * the data field starts.
+ *
+ * Note that this field is not applicable for zero-length inline
+ * WQEs.
+ */
+ #define SQ_UDSEND_V3_INLINE_LENGTH_MASK UINT32_C(0xf)
+ #define SQ_UDSEND_V3_INLINE_LENGTH_SFT 0
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * Immediate value that will be sent with ImmDt header if wqe_type is
+ * UD Send with Immediate.
+ */
+ uint32_t imm_data;
+ /*
+ * When in the SQ of a UD QP, indicates the q_key to be used in
+ * the transmitted packet. However, if the most significant bit
+ * of this field is set, then the q_key will be taken from QP
+ * context, rather than from this field.
+ *
+ * When in the SQ of a non-UD QP, this field is reserved and
+ * should be filled with zeros.
+ */
+ uint32_t q_key;
+ /*
+ * When in the SQ of a UD QP, indicates the destination QP to be
+ * used in the transmitted packet.
+ *
+ * When in the SQ of a non-UD QP, this field is reserved and
+ * should be filled with zeros.
+ */
+ uint32_t dst_qp;
+ #define SQ_UDSEND_V3_DST_QP_MASK UINT32_C(0xffffff)
+ #define SQ_UDSEND_V3_DST_QP_SFT 0
+ uint32_t avid;
+ /*
+ * If the serv_type is 'UD', then this field supplies the AVID
+ * (Address Vector ID).
+ */
+ #define SQ_UDSEND_V3_AVID_MASK UINT32_C(0x3ff)
+ #define SQ_UDSEND_V3_AVID_SFT 0
+ uint32_t reserved2;
+ uint32_t timestamp;
+ /*
+ * This field specifies a 24-bit timestamp that can be passed
+ * down the TX path and optionally logged in the TXP timestamp
+ * histogram.
+ */
+ #define SQ_UDSEND_V3_TIMESTAMP_MASK UINT32_C(0xffffff)
+ #define SQ_UDSEND_V3_TIMESTAMP_SFT 0
+ /*
+ * When inline=0, then this area is filled with from 1 to 30 SGEs
+ * based on the wqe_size field.
+ *
+ * When inline=1, this area is filled with payload data for the
+ * send. Length of data is described in the inline_length field.
+ * Bits [7:0] of word 0 hold the first byte to go out on the wire.
+ */
+ uint32_t data[24];
+} sq_udsend_v3_t, *psq_udsend_v3_t;
+
+/* SQ WQE V3 header for UD SQs. */
+/* sq_udsend_hdr_v3 (size:256b/32B) */
+
+typedef struct sq_udsend_hdr_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /*
+ * UD Send V3
+ *
+ * Allowed only on unreliable datagram (UD) SQs.
+ */
+ #define SQ_UDSEND_HDR_V3_WQE_TYPE_UDSEND_V3 UINT32_C(0x13)
+ /*
+ * UD Send with Immediate V3
+ *
+ * Allowed only on unreliable datagram (UD) SQs.
+ */
+ #define SQ_UDSEND_HDR_V3_WQE_TYPE_UDSEND_W_IMMED_V3 UINT32_C(0x14)
+ #define SQ_UDSEND_HDR_V3_WQE_TYPE_LAST SQ_UDSEND_HDR_V3_WQE_TYPE_UDSEND_W_IMMED_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled completion
+ * the controller should not generate a CQE unless there was
+ * an error. This refers to the CQE on the sender side. (The se
+ * flag refers to the receiver side).
+ */
+ #define SQ_UDSEND_HDR_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic WQEs
+ * on the SQ before executing this WQE.
+ *
+ * This flag must be zero for a UD send.
+ */
+ #define SQ_UDSEND_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all
+ * previous SQ's WQEs before executing this WQE.
+ *
+ * This flag must be zero for a UD send.
+ */
+ #define SQ_UDSEND_HDR_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * Solicit event flag. Indication sent in BTH header to the
+ * receiver to generate a Completion Event Notification, i.e.
+ * CNQE.
+ */
+ #define SQ_UDSEND_HDR_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * Indicate that inline data is posted to the SQ in the data
+ * area of this WQE.
+ */
+ #define SQ_UDSEND_HDR_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * If set to 1, then the timestamp from the WQE is used. If
+ * cleared to 0, then TWE provides the timestamp.
+ */
+ #define SQ_UDSEND_HDR_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_UDSEND_HDR_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ /* */
+ uint8_t wqe_size;
+ /*
+ * The number of 16 bytes chunks of data including this first
+ * word of the request that are a valid part of the request. The
+ * valid 16 bytes units other than the WQE structure can be
+ * SGEs (Scatter Gather Elements) OR inline data.
+ *
+ * This field shall never exceed 32 for WQEs of this type.
+ */
+ #define SQ_UDSEND_HDR_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_UDSEND_HDR_V3_WQE_SIZE_SFT 0
+ uint8_t inline_length;
+ /*
+ * When inline flag is '1', this field determines the number of
+ * bytes that are valid in the last 16B unit of the inline WQE.
+ * Zero means all 16 bytes are valid. One means only bits 7:0 of
+ * the last 16B unit are valid. This means the total size of the
+ * inline data is determined by a combination of the wqe_size field
+ * and this inline_length field.
+ *
+ * `inline_size = ((wqe_size - 1) * 16) - data_offset_in_bytes +
+ * ((inline_length == 0 ) ? 16 : inline_length)
+ *
+ * Where data_offset_in_bytes is the offset within the WQE where
+ * the data field starts.
+ *
+ * Note that this field is not applicable for zero-length inline
+ * WQEs.
+ */
+ #define SQ_UDSEND_HDR_V3_INLINE_LENGTH_MASK UINT32_C(0xf)
+ #define SQ_UDSEND_HDR_V3_INLINE_LENGTH_SFT 0
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * Immediate value that will be sent with ImmDt header if wqe_type is
+ * UD Send with Immediate.
+ */
+ uint32_t imm_data;
+ /*
+ * When in the SQ of a UD QP, indicates the q_key to be used in
+ * the transmitted packet. However, if the most significant bit
+ * of this field is set, then the q_key will be taken from QP
+ * context, rather than from this field.
+ *
+ * When in the SQ of a non-UD QP, this field is reserved and
+ * should be filled with zeros.
+ */
+ uint32_t q_key;
+ /*
+ * When in the SQ of a UD QP, indicates the destination QP to be
+ * used in the transmitted packet.
+ *
+ * When in the SQ of a non-UD QP, this field is reserved and
+ * should be filled with zeros.
+ */
+ uint32_t dst_qp;
+ #define SQ_UDSEND_HDR_V3_DST_QP_MASK UINT32_C(0xffffff)
+ #define SQ_UDSEND_HDR_V3_DST_QP_SFT 0
+ uint32_t avid;
+ /*
+ * If the serv_type is 'UD', then this field supplies the AVID
+ * (Address Vector ID).
+ */
+ #define SQ_UDSEND_HDR_V3_AVID_MASK UINT32_C(0x3ff)
+ #define SQ_UDSEND_HDR_V3_AVID_SFT 0
+ uint32_t reserved2;
+ uint32_t timestamp;
+ /*
+ * This field specifies a 24-bit timestamp that can be passed
+ * down the TX path and optionally logged in the TXP timestamp
+ * histogram.
+ */
+ #define SQ_UDSEND_HDR_V3_TIMESTAMP_MASK UINT32_C(0xffffff)
+ #define SQ_UDSEND_HDR_V3_TIMESTAMP_SFT 0
+} sq_udsend_hdr_v3_t, *psq_udsend_hdr_v3_t;
+
+/* SQ RDMA WQE V3 for RC SQs. */
+/* sq_rdma_v3 (size:1024b/128B) */
+
+typedef struct sq_rdma_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /*
+ * RDMA Write V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_RDMA_V3_WQE_TYPE_WRITE_WQE_V3 UINT32_C(0x15)
+ /*
+ * RDMA Write with Immediate V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_RDMA_V3_WQE_TYPE_WRITE_W_IMMED_V3 UINT32_C(0x16)
+ /*
+ * RDMA Read V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_RDMA_V3_WQE_TYPE_READ_WQE_V3 UINT32_C(0x17)
+ #define SQ_RDMA_V3_WQE_TYPE_LAST SQ_RDMA_V3_WQE_TYPE_READ_WQE_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled
+ * completion the controller should not generate a CQE
+ * unless there was an error. This refers to CQE on the
+ * sender side (The se flag refers to the receiver side).
+ */
+ #define SQ_RDMA_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic
+ * WQEs on the SQ before executing this WQE
+ */
+ #define SQ_RDMA_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all previous
+ * SQ's WQEs before executing this WQE.
+ */
+ #define SQ_RDMA_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * Solicit event. Indication sent in BTH header to the
+ * receiver to generate a Completion Event Notification,
+ * i.e. CNQE.
+ */
+ #define SQ_RDMA_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * Indicate that inline data is posted to the SQ following
+ * this WQE. This bit may be 1 only for write operations.
+ */
+ #define SQ_RDMA_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * If set to 1, then the timestamp from the WQE is used. If
+ * cleared to 0, then TWE provides the timestamp.
+ */
+ #define SQ_RDMA_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_RDMA_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ /* */
+ uint8_t wqe_size;
+ /*
+ * The number of 16 bytes chunks of data including this first
+ * word of the request that are a valid part of the request. The
+ * valid 16 bytes units other than the WQE structure can be
+ * SGEs (Scatter Gather Elements) OR inline data.
+ *
+ * This field shall never exceed 32 for WQEs of this type.
+ */
+ #define SQ_RDMA_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_RDMA_V3_WQE_SIZE_SFT 0
+ uint8_t inline_length;
+ /*
+ * When inline flag is '1', this field determines the number of
+ * bytes that are valid in the last 16B unit of the inline WQE.
+ * Zero means all 16 bytes are valid. One means only bits 7:0 of
+ * the last 16B unit are valid. This means the total size of the
+ * inline data is determined by a combination of the wqe_size field
+ * and this inline_length field.
+ *
+ * `inline_size = ((wqe_size - 1) * 16) - data_offset_in_bytes +
+ * ((inline_length == 0 ) ? 16 : inline_length)
+ *
+ * Where data_offset_in_bytes is the offset within the WQE where
+ * the data field starts.
+ *
+ * Note that this field is not applicable for zero-length inline
+ * WQEs.
+ */
+ #define SQ_RDMA_V3_INLINE_LENGTH_MASK UINT32_C(0xf)
+ #define SQ_RDMA_V3_INLINE_LENGTH_SFT 0
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * Immediate data - valid for RDMA Write with immediate and
+ * causes the controller to add immDt header with this value
+ */
+ uint32_t imm_data;
+ uint32_t reserved2;
+ /* Remote VA sent to the destination QP */
+ uint64_t remote_va;
+ /*
+ * R_Key provided by remote node when the connection was
+ * established and placed in the RETH header. It identify the
+ * MRW on the remote host
+ */
+ uint32_t remote_key;
+ uint32_t timestamp;
+ /*
+ * This field specifies a 24-bit timestamp that can be passed
+ * down the TX path and optionally logged in the TXP timestamp
+ * histogram.
+ */
+ #define SQ_RDMA_V3_TIMESTAMP_MASK UINT32_C(0xffffff)
+ #define SQ_RDMA_V3_TIMESTAMP_SFT 0
+ /*
+ * When inline=0, then this area is filled with from 1 to 30 SGEs
+ * based on the wqe_size field.
+ *
+ * When inline=1, this area is filled with payload data for the send.
+ * Length of data is described in the inline_length field. Bits [7:0]
+ * of word 0 hold the first byte to go out on the wire.
+ */
+ uint32_t data[24];
+} sq_rdma_v3_t, *psq_rdma_v3_t;
+
+/* SQ RDMA WQE V3 header for RC SQs. */
+/* sq_rdma_hdr_v3 (size:256b/32B) */
+
+typedef struct sq_rdma_hdr_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /*
+ * RDMA Write V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_RDMA_HDR_V3_WQE_TYPE_WRITE_WQE_V3 UINT32_C(0x15)
+ /*
+ * RDMA Write with Immediate V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_RDMA_HDR_V3_WQE_TYPE_WRITE_W_IMMED_V3 UINT32_C(0x16)
+ /*
+ * RDMA Read V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_RDMA_HDR_V3_WQE_TYPE_READ_WQE_V3 UINT32_C(0x17)
+ #define SQ_RDMA_HDR_V3_WQE_TYPE_LAST SQ_RDMA_HDR_V3_WQE_TYPE_READ_WQE_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled
+ * completion the controller should not generate a CQE
+ * unless there was an error. This refers to CQE on the
+ * sender side (The se flag refers to the receiver side).
+ */
+ #define SQ_RDMA_HDR_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic
+ * WQEs on the SQ before executing this WQE
+ */
+ #define SQ_RDMA_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all previous
+ * SQ's WQEs before executing this WQE.
+ */
+ #define SQ_RDMA_HDR_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * Solicit event. Indication sent in BTH header to the
+ * receiver to generate a Completion Event Notification,
+ * i.e. CNQE.
+ */
+ #define SQ_RDMA_HDR_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * Indicate that inline data is posted to the SQ following
+ * this WQE. This bit may be 1 only for write operations.
+ */
+ #define SQ_RDMA_HDR_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * If set to 1, then the timestamp from the WQE is used. If
+ * cleared to 0, then TWE provides the timestamp.
+ */
+ #define SQ_RDMA_HDR_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_RDMA_HDR_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ /* */
+ uint8_t wqe_size;
+ /*
+ * The number of 16 bytes chunks of data including this first
+ * word of the request that are a valid part of the request. The
+ * valid 16 bytes units other than the WQE structure can be
+ * SGEs (Scatter Gather Elements) OR inline data.
+ *
+ * This field shall never exceed 32 for WQEs of this type.
+ */
+ #define SQ_RDMA_HDR_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_RDMA_HDR_V3_WQE_SIZE_SFT 0
+ uint8_t inline_length;
+ /*
+ * When inline flag is '1', this field determines the number of
+ * bytes that are valid in the last 16B unit of the inline WQE.
+ * Zero means all 16 bytes are valid. One means only bits 7:0 of
+ * the last 16B unit are valid. This means the total size of the
+ * inline data is determined by a combination of the wqe_size field
+ * and this inline_length field.
+ *
+ * `inline_size = ((wqe_size - 1) * 16) - data_offset_in_bytes +
+ * ((inline_length == 0 ) ? 16 : inline_length)
+ *
+ * Where data_offset_in_bytes is the offset within the WQE where
+ * the data field starts.
+ *
+ * Note that this field is not applicable for zero-length inline
+ * WQEs.
+ */
+ #define SQ_RDMA_HDR_V3_INLINE_LENGTH_MASK UINT32_C(0xf)
+ #define SQ_RDMA_HDR_V3_INLINE_LENGTH_SFT 0
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * Immediate data - valid for RDMA Write with immediate and
+ * causes the controller to add immDt header with this value
+ */
+ uint32_t imm_data;
+ uint32_t reserved2;
+ /* Remote VA sent to the destination QP */
+ uint64_t remote_va;
+ /*
+ * R_Key provided by remote node when the connection was
+ * established and placed in the RETH header. It identify the
+ * MRW on the remote host
+ */
+ uint32_t remote_key;
+ uint32_t timestamp;
+ /*
+ * This field specifies a 24-bit timestamp that can be passed
+ * down the TX path and optionally logged in the TXP timestamp
+ * histogram.
+ */
+ #define SQ_RDMA_HDR_V3_TIMESTAMP_MASK UINT32_C(0xffffff)
+ #define SQ_RDMA_HDR_V3_TIMESTAMP_SFT 0
+} sq_rdma_hdr_v3_t, *psq_rdma_hdr_v3_t;
+
+/* SQ Atomic V3 WQE for RC SQs. */
+/* sq_atomic_v3 (size:448b/56B) */
+
+typedef struct sq_atomic_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /*
+ * Atomic Compare/Swap V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_ATOMIC_V3_WQE_TYPE_ATOMIC_CS_V3 UINT32_C(0x18)
+ /*
+ * Atomic Fetch/Add V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_ATOMIC_V3_WQE_TYPE_ATOMIC_FA_V3 UINT32_C(0x19)
+ #define SQ_ATOMIC_V3_WQE_TYPE_LAST SQ_ATOMIC_V3_WQE_TYPE_ATOMIC_FA_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled
+ * completion the controller should not generate a CQE
+ * unless there was an error. This refers to CQE on the
+ * sender side (The se flag refers to the receiver side).
+ */
+ #define SQ_ATOMIC_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic
+ * WQEs on the SQ before executing this WQE
+ */
+ #define SQ_ATOMIC_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all previous
+ * SQ's WQEs before executing this WQE.
+ */
+ #define SQ_ATOMIC_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * Solicit event. Indication sent in BTH header to the
+ * receiver to generate a Completion Event Notification,
+ * i.e. CNQE.
+ */
+ #define SQ_ATOMIC_V3_FLAGS_SE UINT32_C(0x8)
+ /* NA for this WQE */
+ #define SQ_ATOMIC_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * The atomic WQE does not have a timestamp field, so this field is
+ * ignored and should be zero.
+ */
+ #define SQ_ATOMIC_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_ATOMIC_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ /* */
+ uint8_t wqe_size;
+ /*
+ * The size of the WQE in units of 16B chunks.
+ *
+ * For the Atomic WQE, this field will always have a value of 4.
+ */
+ #define SQ_ATOMIC_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_ATOMIC_V3_WQE_SIZE_SFT 0
+ uint8_t reserved1;
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * R_Key provided by remote node when the connection was
+ * established and placed in the AETH header. It identifies the
+ * MRW on the remote host.
+ */
+ uint32_t remote_key;
+ uint32_t reserved2;
+ /* Remote VA sent to the destination QP */
+ uint64_t remote_va;
+ /*
+ * For compare/swap, this is the data value to be placed in the
+ * remote host at the specified remote_VA if the comparison succeeds.
+ *
+ * For fetch/add, this is the value to be added to the data in the
+ * remote host at the specified remote_VA.
+ */
+ uint64_t swap_data;
+ /*
+ * For compare/swap, this is the data value to be compared with the
+ * value in the remote host at the specified remote_VA.
+ *
+ * This field is not used for fetch/add.
+ */
+ uint64_t cmp_data;
+ /*
+ * The virtual address in local memory or a physical address when
+ * l_key value is a reserved value of a physical address. Driver
+ * configures this value in the chip and the chip compares l_key in
+ * SGEs with that reserved value, if equal it access the physical
+ * address specified. The chip however MUST verify that the QP allows
+ * the use reserved key.
+ */
+ uint64_t va_or_pa;
+ /*
+ * Local Key associated with this registered MR; The 24 msb of the
+ * key used to index the MRW Table and the 8 lsb are compared with
+ * the 8 bits key part stored in the MRWC. The PBL in the MRW Context
+ * is used to translate the above VA to physical address.
+ */
+ uint32_t l_key;
+ /*
+ * Size of SGE in bytes; Based on page size of the system the chip
+ * knows how many entries are in the PBL
+ *
+ * This field must have a value of 8 for an Atomic WQE.
+ */
+ uint32_t size;
+} sq_atomic_v3_t, *psq_atomic_v3_t;
+
+/* SQ Atomic WQE V3 header for RC SQs. */
+/* sq_atomic_hdr_v3 (size:320b/40B) */
+
+typedef struct sq_atomic_hdr_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /*
+ * Atomic Compare/Swap V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_ATOMIC_HDR_V3_WQE_TYPE_ATOMIC_CS_V3 UINT32_C(0x18)
+ /*
+ * Atomic Fetch/Add V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_ATOMIC_HDR_V3_WQE_TYPE_ATOMIC_FA_V3 UINT32_C(0x19)
+ #define SQ_ATOMIC_HDR_V3_WQE_TYPE_LAST SQ_ATOMIC_HDR_V3_WQE_TYPE_ATOMIC_FA_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled
+ * completion the controller should not generate a CQE
+ * unless there was an error. This refers to CQE on the
+ * sender side (The se flag refers to the receiver side).
+ */
+ #define SQ_ATOMIC_HDR_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic
+ * WQEs on the SQ before executing this WQE
+ */
+ #define SQ_ATOMIC_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all previous
+ * SQ's WQEs before executing this WQE.
+ */
+ #define SQ_ATOMIC_HDR_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * Solicit event. Indication sent in BTH header to the
+ * receiver to generate a Completion Event Notification,
+ * i.e. CNQE.
+ */
+ #define SQ_ATOMIC_HDR_V3_FLAGS_SE UINT32_C(0x8)
+ /* NA for this WQE */
+ #define SQ_ATOMIC_HDR_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * The atomic WQE does not have a timestamp field, so this field is
+ * ignored and should be zero.
+ */
+ #define SQ_ATOMIC_HDR_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_ATOMIC_HDR_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ /* */
+ uint8_t wqe_size;
+ /*
+ * The size of the WQE in units of 16B chunks.
+ *
+ * For the Atomic WQE, this field will always have a value of 4.
+ */
+ #define SQ_ATOMIC_HDR_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_ATOMIC_HDR_V3_WQE_SIZE_SFT 0
+ uint8_t reserved1;
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * R_Key provided by remote node when the connection was
+ * established and placed in the AETH header. It identifies the
+ * MRW on the remote host.
+ */
+ uint32_t remote_key;
+ uint32_t reserved2;
+ /* Remote VA sent to the destination QP */
+ uint64_t remote_va;
+ /*
+ * For compare/swap, this is the data value to be placed in the
+ * remote host at the specified remote_VA if the comparison succeeds.
+ *
+ * For fetch/add, this is the value to be added to the data in the
+ * remote host at the specified remote_VA.
+ */
+ uint64_t swap_data;
+ /*
+ * For compare/swap, this is the data value to be compared with the
+ * value in the remote host at the specified remote_VA.
+ *
+ * This field is not used for fetch/add.
+ */
+ uint64_t cmp_data;
+} sq_atomic_hdr_v3_t, *psq_atomic_hdr_v3_t;
+
+/* SQ Local Invalidate WQE V3 for RC SQs. */
+/* sq_localinvalidate_v3 (size:128b/16B) */
+
+typedef struct sq_localinvalidate_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /*
+ * Local Invalidate V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_LOCALINVALIDATE_V3_WQE_TYPE_LOCAL_INVALID_V3 UINT32_C(0x1a)
+ #define SQ_LOCALINVALIDATE_V3_WQE_TYPE_LAST SQ_LOCALINVALIDATE_V3_WQE_TYPE_LOCAL_INVALID_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled
+ * completion the controller should not generate a CQE
+ * unless there was an error. This refers to CQE on the
+ * sender side (The se flag refers to the receiver side).
+ */
+ #define SQ_LOCALINVALIDATE_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic
+ * WQEs on the SQ before executing this WQE
+ */
+ #define SQ_LOCALINVALIDATE_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all previous
+ * SQ's WQEs before executing this WQE.
+ */
+ #define SQ_LOCALINVALIDATE_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_LOCALINVALIDATE_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_LOCALINVALIDATE_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_LOCALINVALIDATE_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_LOCALINVALIDATE_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ /* */
+ uint8_t wqe_size;
+ /*
+ * The size of the WQE in units of 16B chunks.
+ *
+ * For the Local Invalidate WQE, this field will always have
+ * a value of 1.
+ */
+ #define SQ_LOCALINVALIDATE_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_LOCALINVALIDATE_V3_WQE_SIZE_SFT 0
+ uint8_t reserved1;
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * The local key for the MR/W to invalidate; 24 msb of the key
+ * are used to index the MRW table, 8 lsb are compared with the
+ * 8 bit key in the MRWC
+ */
+ uint32_t inv_l_key;
+ uint32_t reserved2;
+} sq_localinvalidate_v3_t, *psq_localinvalidate_v3_t;
+
+/* SQ Local Invalidate WQE V3 header for RC SQs. */
+/* sq_localinvalidate_hdr_v3 (size:128b/16B) */
+
+typedef struct sq_localinvalidate_hdr_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /*
+ * Local Invalidate V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_LOCALINVALIDATE_HDR_V3_WQE_TYPE_LOCAL_INVALID_V3 UINT32_C(0x1a)
+ #define SQ_LOCALINVALIDATE_HDR_V3_WQE_TYPE_LAST SQ_LOCALINVALIDATE_HDR_V3_WQE_TYPE_LOCAL_INVALID_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled
+ * completion the controller should not generate a CQE
+ * unless there was an error. This refers to CQE on the
+ * sender side (The se flag refers to the receiver side).
+ */
+ #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic
+ * WQEs on the SQ before executing this WQE
+ */
+ #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all previous
+ * SQ's WQEs before executing this WQE.
+ */
+ #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_LOCALINVALIDATE_HDR_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ /* */
+ uint8_t wqe_size;
+ /*
+ * The size of the WQE in units of 16B chunks.
+ *
+ * For the Local Invalidate WQE, this field will always have
+ * a value of 1.
+ */
+ #define SQ_LOCALINVALIDATE_HDR_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_LOCALINVALIDATE_HDR_V3_WQE_SIZE_SFT 0
+ uint8_t reserved1;
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * The local key for the MR/W to invalidate; 24 msb of the key
+ * are used to index the MRW table, 8 lsb are compared with the
+ * 8 bit key in the MRWC
+ */
+ uint32_t inv_l_key;
+ uint32_t reserved2;
+} sq_localinvalidate_hdr_v3_t, *psq_localinvalidate_hdr_v3_t;
+
+/*
+ * SQ FR-PMR WQE V3 for RC SQs.
+ *
+ * The FR-PMR WQE must be padded to 3 slots (48 bytes) in the SQ, even
+ * though the final 8 bytes are not shown here.
+ */
+/* sq_fr_pmr_v3 (size:320b/40B) */
+
+typedef struct sq_fr_pmr_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /*
+ * FR-PMR (Fast Register Physical Memory Region) V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_FR_PMR_V3_WQE_TYPE_FR_PMR_V3 UINT32_C(0x1b)
+ #define SQ_FR_PMR_V3_WQE_TYPE_LAST SQ_FR_PMR_V3_WQE_TYPE_FR_PMR_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled
+ * completion the controller should not generate a CQE
+ * unless there was an error. This refers to CQE on the
+ * sender side (The se flag refers to the receiver side).
+ */
+ #define SQ_FR_PMR_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic
+ * WQEs on the SQ before executing this WQE
+ */
+ #define SQ_FR_PMR_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all previous
+ * SQ's WQEs before executing this WQE.
+ */
+ #define SQ_FR_PMR_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_FR_PMR_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_FR_PMR_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_FR_PMR_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_FR_PMR_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ /* */
+ uint8_t wqe_size_zero_based;
+ /*
+ * The size of the WQE in units of 16B chunks.
+ *
+ * For the FR-PMR WQE, this field will always have a value of 3.
+ */
+ #define SQ_FR_PMR_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_FR_PMR_V3_WQE_SIZE_SFT 0
+ /*
+ * If this is set, the PMR will be zero-based. If clear, the PMR
+ * will be non-zero-based.
+ */
+ #define SQ_FR_PMR_V3_ZERO_BASED UINT32_C(0x40)
+ /*
+ * This is the new access control for the MR. '1' means
+ * the operation is allowed. '0' means operation is
+ * not allowed.
+ */
+ uint8_t access_cntl;
+ /* Local Write Access */
+ #define SQ_FR_PMR_V3_ACCESS_CNTL_LOCAL_WRITE UINT32_C(0x1)
+ /* Remote Read Access */
+ #define SQ_FR_PMR_V3_ACCESS_CNTL_REMOTE_READ UINT32_C(0x2)
+ /* Remote Write Access */
+ #define SQ_FR_PMR_V3_ACCESS_CNTL_REMOTE_WRITE UINT32_C(0x4)
+ /* Remote Atomic Access */
+ #define SQ_FR_PMR_V3_ACCESS_CNTL_REMOTE_ATOMIC UINT32_C(0x8)
+ /* Window Binding Allowed */
+ #define SQ_FR_PMR_V3_ACCESS_CNTL_WINDOW_BIND UINT32_C(0x10)
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * Local Key; 24 msb of the key are used to index the MRW
+ * table, 8 lsb are assigned to the 8 bit key_lsb field in
+ * the MRWC.
+ */
+ uint32_t l_key;
+ uint16_t page_size_log;
+ /*
+ * This value controls the page size for leaf memory pages in
+ * a PBL. While many page sizes are supported only the following
+ * should be tested - 4k, 8k, 64k, 256k, 1m, 2m, 4m, 1g
+ */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_MASK UINT32_C(0x1f)
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_SFT 0
+ /* Page size is 4KB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_4K UINT32_C(0x0)
+ /* Page size is 8KB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8K UINT32_C(0x1)
+ /* Page size is 16KB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_16K UINT32_C(0x2)
+ /* Page size is 32KB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_32K UINT32_C(0x3)
+ /* Page size is 64KB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_64K UINT32_C(0x4)
+ /* Page size is 128KB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_128K UINT32_C(0x5)
+ /* Page size is 256KB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_256K UINT32_C(0x6)
+ /* Page size is 512KB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_512K UINT32_C(0x7)
+ /* Page size is 1MB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_1M UINT32_C(0x8)
+ /* Page size is 2MB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_2M UINT32_C(0x9)
+ /* Page size is 4MB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_4M UINT32_C(0xa)
+ /* Page size is 8MB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8M UINT32_C(0xb)
+ /* Page size is 16MB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_16M UINT32_C(0xc)
+ /* Page size is 32MB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_32M UINT32_C(0xd)
+ /* Page size is 64MB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_64M UINT32_C(0xe)
+ /* Page size is 128MB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_128M UINT32_C(0xf)
+ /* Page size is 256MB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_256M UINT32_C(0x10)
+ /* Page size is 512MB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_512M UINT32_C(0x11)
+ /* Page size is 1GB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_1G UINT32_C(0x12)
+ /* Page size is 2GB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_2G UINT32_C(0x13)
+ /* Page size is 4GB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_4G UINT32_C(0x14)
+ /* Page size is 8GB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8G UINT32_C(0x15)
+ /* Page size is 16GB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_16G UINT32_C(0x16)
+ /* Page size is 32GB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_32G UINT32_C(0x17)
+ /* Page size is 64GB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_64G UINT32_C(0x18)
+ /* Page size is 128GB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_128G UINT32_C(0x19)
+ /* Page size is 256GB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_256G UINT32_C(0x1a)
+ /* Page size is 512GB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_512G UINT32_C(0x1b)
+ /* Page size is 1TB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_1T UINT32_C(0x1c)
+ /* Page size is 2TB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_2T UINT32_C(0x1d)
+ /* Page size is 4TB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_4T UINT32_C(0x1e)
+ /* Page size is 8TB. */
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8T UINT32_C(0x1f)
+ #define SQ_FR_PMR_V3_PAGE_SIZE_LOG_LAST SQ_FR_PMR_V3_PAGE_SIZE_LOG_PGSZ_8T
+ /*
+ * This value controls the page size for page table elements
+ * within a PBL. While many page sizes are supported only the
+ * following should be tested - 4k, 8k, 64k, 256k, 1m, 2m, 4m, 1g
+ */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_MASK UINT32_C(0x3e0)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_SFT 5
+ /* Page size is 4KB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4K (UINT32_C(0x0) << 5)
+ /* Page size is 8KB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8K (UINT32_C(0x1) << 5)
+ /* Page size is 16KB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16K (UINT32_C(0x2) << 5)
+ /* Page size is 32KB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32K (UINT32_C(0x3) << 5)
+ /* Page size is 64KB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64K (UINT32_C(0x4) << 5)
+ /* Page size is 128KB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128K (UINT32_C(0x5) << 5)
+ /* Page size is 256KB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256K (UINT32_C(0x6) << 5)
+ /* Page size is 512KB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512K (UINT32_C(0x7) << 5)
+ /* Page size is 1MB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1M (UINT32_C(0x8) << 5)
+ /* Page size is 2MB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2M (UINT32_C(0x9) << 5)
+ /* Page size is 4MB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4M (UINT32_C(0xa) << 5)
+ /* Page size is 8MB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8M (UINT32_C(0xb) << 5)
+ /* Page size is 16MB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16M (UINT32_C(0xc) << 5)
+ /* Page size is 32MB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32M (UINT32_C(0xd) << 5)
+ /* Page size is 64MB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64M (UINT32_C(0xe) << 5)
+ /* Page size is 128MB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128M (UINT32_C(0xf) << 5)
+ /* Page size is 256MB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256M (UINT32_C(0x10) << 5)
+ /* Page size is 512MB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512M (UINT32_C(0x11) << 5)
+ /* Page size is 1GB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1G (UINT32_C(0x12) << 5)
+ /* Page size is 2GB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2G (UINT32_C(0x13) << 5)
+ /* Page size is 4GB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4G (UINT32_C(0x14) << 5)
+ /* Page size is 8GB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8G (UINT32_C(0x15) << 5)
+ /* Page size is 16GB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16G (UINT32_C(0x16) << 5)
+ /* Page size is 32GB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32G (UINT32_C(0x17) << 5)
+ /* Page size is 64GB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64G (UINT32_C(0x18) << 5)
+ /* Page size is 128GB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128G (UINT32_C(0x19) << 5)
+ /* Page size is 256GB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256G (UINT32_C(0x1a) << 5)
+ /* Page size is 512GB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512G (UINT32_C(0x1b) << 5)
+ /* Page size is 1TB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1T (UINT32_C(0x1c) << 5)
+ /* Page size is 2TB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2T (UINT32_C(0x1d) << 5)
+ /* Page size is 4TB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4T (UINT32_C(0x1e) << 5)
+ /* Page size is 8TB. */
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8T (UINT32_C(0x1f) << 5)
+ #define SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PMR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8T
+ /* Number of levels of PBL for translation */
+ #define SQ_FR_PMR_V3_NUMLEVELS_MASK UINT32_C(0xc00)
+ #define SQ_FR_PMR_V3_NUMLEVELS_SFT 10
+ /*
+ * A zero level PBL means that the VA is the physical address
+ * used for the operation. No translation is done by the PTU.
+ */
+ #define SQ_FR_PMR_V3_NUMLEVELS_PHYSICAL (UINT32_C(0x0) << 10)
+ /*
+ * A one layer translation is provided between the logical and
+ * physical address. The PBL points to a physical page that
+ * contains PBE values that point to actual pg_size physical
+ * pages.
+ */
+ #define SQ_FR_PMR_V3_NUMLEVELS_LAYER1 (UINT32_C(0x1) << 10)
+ /*
+ * A two layer translation is provided between the logical and
+ * physical address. The PBL points to a physical page that
+ * contains PDE values that in turn point to pbl_pg_size
+ * physical pages that contain PBE values that point to actual
+ * physical pages.
+ */
+ #define SQ_FR_PMR_V3_NUMLEVELS_LAYER2 (UINT32_C(0x2) << 10)
+ #define SQ_FR_PMR_V3_NUMLEVELS_LAST SQ_FR_PMR_V3_NUMLEVELS_LAYER2
+ uint16_t reserved;
+ /* Local Virtual Address */
+ uint64_t va;
+ /* Length in bytes of registered MR */
+ uint64_t length;
+ /* Pointer to the PBL, or PDL depending on number of levels */
+ uint64_t pbl_ptr;
+} sq_fr_pmr_v3_t, *psq_fr_pmr_v3_t;
+
+/* SQ FR-PMR WQE V3 header for RC SQs. */
+/* sq_fr_pmr_hdr_v3 (size:320b/40B) */
+
+typedef struct sq_fr_pmr_hdr_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /*
+ * FR-PMR (Fast Register Physical Memory Region) V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_FR_PMR_HDR_V3_WQE_TYPE_FR_PMR_V3 UINT32_C(0x1b)
+ #define SQ_FR_PMR_HDR_V3_WQE_TYPE_LAST SQ_FR_PMR_HDR_V3_WQE_TYPE_FR_PMR_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled
+ * completion the controller should not generate a CQE
+ * unless there was an error. This refers to CQE on the
+ * sender side (The se flag refers to the receiver side).
+ */
+ #define SQ_FR_PMR_HDR_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic
+ * WQEs on the SQ before executing this WQE
+ */
+ #define SQ_FR_PMR_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all previous
+ * SQ's WQEs before executing this WQE.
+ */
+ #define SQ_FR_PMR_HDR_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_FR_PMR_HDR_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_FR_PMR_HDR_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_FR_PMR_HDR_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_FR_PMR_HDR_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ /* */
+ uint8_t wqe_size_zero_based;
+ /*
+ * The size of the WQE in units of 16B chunks.
+ *
+ * For the FR-PMR WQE, this field will always have a value of 3.
+ */
+ #define SQ_FR_PMR_HDR_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_FR_PMR_HDR_V3_WQE_SIZE_SFT 0
+ /*
+ * If this is set, the PMR will be zero-based. If clear, the PMR
+ * will be non-zero-based.
+ */
+ #define SQ_FR_PMR_HDR_V3_ZERO_BASED UINT32_C(0x40)
+ /*
+ * This is the new access control for the MR. '1' means
+ * the operation is allowed. '0' means operation is
+ * not allowed.
+ */
+ uint8_t access_cntl;
+ /* Local Write Access */
+ #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_LOCAL_WRITE UINT32_C(0x1)
+ /* Remote Read Access */
+ #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_REMOTE_READ UINT32_C(0x2)
+ /* Remote Write Access */
+ #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_REMOTE_WRITE UINT32_C(0x4)
+ /* Remote Atomic Access */
+ #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_REMOTE_ATOMIC UINT32_C(0x8)
+ /* Window Binding Allowed */
+ #define SQ_FR_PMR_HDR_V3_ACCESS_CNTL_WINDOW_BIND UINT32_C(0x10)
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * Local Key; 24 msb of the key are used to index the MRW
+ * table, 8 lsb are assigned to the 8 bit key_lsb field in
+ * the MRWC.
+ */
+ uint32_t l_key;
+ uint16_t page_size_log;
+ /*
+ * This value controls the page size for leaf memory pages in
+ * a PBL. While many page sizes are supported only the following
+ * should be tested - 4k, 8k, 64k, 256k, 1m, 2m, 4m, 1g
+ */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_MASK UINT32_C(0x1f)
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_SFT 0
+ /* Page size is 4KB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_4K UINT32_C(0x0)
+ /* Page size is 8KB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8K UINT32_C(0x1)
+ /* Page size is 16KB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_16K UINT32_C(0x2)
+ /* Page size is 32KB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_32K UINT32_C(0x3)
+ /* Page size is 64KB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_64K UINT32_C(0x4)
+ /* Page size is 128KB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_128K UINT32_C(0x5)
+ /* Page size is 256KB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_256K UINT32_C(0x6)
+ /* Page size is 512KB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_512K UINT32_C(0x7)
+ /* Page size is 1MB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_1M UINT32_C(0x8)
+ /* Page size is 2MB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_2M UINT32_C(0x9)
+ /* Page size is 4MB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_4M UINT32_C(0xa)
+ /* Page size is 8MB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8M UINT32_C(0xb)
+ /* Page size is 16MB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_16M UINT32_C(0xc)
+ /* Page size is 32MB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_32M UINT32_C(0xd)
+ /* Page size is 64MB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_64M UINT32_C(0xe)
+ /* Page size is 128MB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_128M UINT32_C(0xf)
+ /* Page size is 256MB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_256M UINT32_C(0x10)
+ /* Page size is 512MB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_512M UINT32_C(0x11)
+ /* Page size is 1GB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_1G UINT32_C(0x12)
+ /* Page size is 2GB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_2G UINT32_C(0x13)
+ /* Page size is 4GB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_4G UINT32_C(0x14)
+ /* Page size is 8GB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8G UINT32_C(0x15)
+ /* Page size is 16GB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_16G UINT32_C(0x16)
+ /* Page size is 32GB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_32G UINT32_C(0x17)
+ /* Page size is 64GB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_64G UINT32_C(0x18)
+ /* Page size is 128GB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_128G UINT32_C(0x19)
+ /* Page size is 256GB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_256G UINT32_C(0x1a)
+ /* Page size is 512GB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_512G UINT32_C(0x1b)
+ /* Page size is 1TB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_1T UINT32_C(0x1c)
+ /* Page size is 2TB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_2T UINT32_C(0x1d)
+ /* Page size is 4TB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_4T UINT32_C(0x1e)
+ /* Page size is 8TB. */
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8T UINT32_C(0x1f)
+ #define SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_LAST SQ_FR_PMR_HDR_V3_PAGE_SIZE_LOG_PGSZ_8T
+ /*
+ * This value controls the page size for page table elements
+ * within a PBL. While many page sizes are supported only the
+ * following should be tested - 4k, 8k, 64k, 256k, 1m, 2m, 4m, 1g
+ */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_MASK UINT32_C(0x3e0)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_SFT 5
+ /* Page size is 4KB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4K (UINT32_C(0x0) << 5)
+ /* Page size is 8KB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8K (UINT32_C(0x1) << 5)
+ /* Page size is 16KB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16K (UINT32_C(0x2) << 5)
+ /* Page size is 32KB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32K (UINT32_C(0x3) << 5)
+ /* Page size is 64KB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64K (UINT32_C(0x4) << 5)
+ /* Page size is 128KB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128K (UINT32_C(0x5) << 5)
+ /* Page size is 256KB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256K (UINT32_C(0x6) << 5)
+ /* Page size is 512KB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512K (UINT32_C(0x7) << 5)
+ /* Page size is 1MB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1M (UINT32_C(0x8) << 5)
+ /* Page size is 2MB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2M (UINT32_C(0x9) << 5)
+ /* Page size is 4MB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4M (UINT32_C(0xa) << 5)
+ /* Page size is 8MB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8M (UINT32_C(0xb) << 5)
+ /* Page size is 16MB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16M (UINT32_C(0xc) << 5)
+ /* Page size is 32MB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32M (UINT32_C(0xd) << 5)
+ /* Page size is 64MB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64M (UINT32_C(0xe) << 5)
+ /* Page size is 128MB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128M (UINT32_C(0xf) << 5)
+ /* Page size is 256MB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256M (UINT32_C(0x10) << 5)
+ /* Page size is 512MB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512M (UINT32_C(0x11) << 5)
+ /* Page size is 1GB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1G (UINT32_C(0x12) << 5)
+ /* Page size is 2GB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2G (UINT32_C(0x13) << 5)
+ /* Page size is 4GB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4G (UINT32_C(0x14) << 5)
+ /* Page size is 8GB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8G (UINT32_C(0x15) << 5)
+ /* Page size is 16GB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_16G (UINT32_C(0x16) << 5)
+ /* Page size is 32GB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_32G (UINT32_C(0x17) << 5)
+ /* Page size is 64GB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_64G (UINT32_C(0x18) << 5)
+ /* Page size is 128GB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_128G (UINT32_C(0x19) << 5)
+ /* Page size is 256GB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_256G (UINT32_C(0x1a) << 5)
+ /* Page size is 512GB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_512G (UINT32_C(0x1b) << 5)
+ /* Page size is 1TB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_1T (UINT32_C(0x1c) << 5)
+ /* Page size is 2TB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_2T (UINT32_C(0x1d) << 5)
+ /* Page size is 4TB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_4T (UINT32_C(0x1e) << 5)
+ /* Page size is 8TB. */
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8T (UINT32_C(0x1f) << 5)
+ #define SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_LAST SQ_FR_PMR_HDR_V3_PBL_PAGE_SIZE_LOG_PGSZ_8T
+ /* Number of levels of PBL for translation */
+ #define SQ_FR_PMR_HDR_V3_NUMLEVELS_MASK UINT32_C(0xc00)
+ #define SQ_FR_PMR_HDR_V3_NUMLEVELS_SFT 10
+ /*
+ * A zero level PBL means that the VA is the physical address
+ * used for the operation. No translation is done by the PTU.
+ */
+ #define SQ_FR_PMR_HDR_V3_NUMLEVELS_PHYSICAL (UINT32_C(0x0) << 10)
+ /*
+ * A one layer translation is provided between the logical and
+ * physical address. The PBL points to a physical page that
+ * contains PBE values that point to actual pg_size physical
+ * pages.
+ */
+ #define SQ_FR_PMR_HDR_V3_NUMLEVELS_LAYER1 (UINT32_C(0x1) << 10)
+ /*
+ * A two layer translation is provided between the logical and
+ * physical address. The PBL points to a physical page that
+ * contains PDE values that in turn point to pbl_pg_size
+ * physical pages that contain PBE values that point to actual
+ * physical pages.
+ */
+ #define SQ_FR_PMR_HDR_V3_NUMLEVELS_LAYER2 (UINT32_C(0x2) << 10)
+ #define SQ_FR_PMR_HDR_V3_NUMLEVELS_LAST SQ_FR_PMR_HDR_V3_NUMLEVELS_LAYER2
+ uint16_t reserved;
+ /* Local Virtual Address */
+ uint64_t va;
+ /* Length in bytes of registered MR */
+ uint64_t length;
+ /* Pointer to the PBL, or PDL depending on number of levels */
+ uint64_t pbl_ptr;
+} sq_fr_pmr_hdr_v3_t, *psq_fr_pmr_hdr_v3_t;
+
+/*
+ * SQ Bind WQE V3. This WQE can perform either:
+ * * type1 "bind memory window", if mw_type==Type1
+ * * type2 "post send bind memory window", if mw_type==Type2
+ */
+/* sq_bind_v3 (size:256b/32B) */
+
+typedef struct sq_bind_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /*
+ * Memory Bind V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_BIND_V3_WQE_TYPE_BIND_V3 UINT32_C(0x1c)
+ #define SQ_BIND_V3_WQE_TYPE_LAST SQ_BIND_V3_WQE_TYPE_BIND_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled
+ * completion the controller should not generate a CQE
+ * unless there was an error. This refers to CQE on the
+ * sender side (The se flag refers to the receiver side).
+ */
+ #define SQ_BIND_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic
+ * WQEs on the SQ before executing this WQE
+ */
+ #define SQ_BIND_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all previous
+ * SQ's WQEs before executing this WQE.
+ */
+ #define SQ_BIND_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_BIND_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_BIND_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_BIND_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_BIND_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ uint8_t wqe_size_zero_based_mw_type;
+ /*
+ * The size of the WQE in units of 16B chunks.
+ *
+ * For the Bind WQE, this field will always have a value of 2.
+ */
+ #define SQ_BIND_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_BIND_V3_WQE_SIZE_SFT 0
+ /*
+ * If this bit is set, then the newly-bound memory window will be
+ * zero-based. If clear, then the newly-bound memory window will be
+ * non-zero-based.
+ */
+ #define SQ_BIND_V3_ZERO_BASED UINT32_C(0x40)
+ /*
+ * If type1 is specified, then this WQE performs a "bind memory
+ * window" operation on a type1 window. If type2 is specified, then
+ * this WQE performs a "post send bind memory window" operation on a
+ * type2 window.
+ *
+ * Note that the bind WQE cannot change the type of the memory
+ * window.
+ *
+ * If a "bind memory window" operation is attempted on a memory
+ * window that was allocated as type2, then the bind will fail with
+ * an errored completion, as "bind memory window" is allowed only on
+ * type1 memory windows.
+ *
+ * Similarly, if a "post send bind memory window" operation is
+ * attempted on a memory window that was allocated as type1, then the
+ * bind will fail with an errored completions, as "post send bind
+ * memory window" is allowed only on type2 memory windows.
+ */
+ #define SQ_BIND_V3_MW_TYPE UINT32_C(0x80)
+ /* Type 1 Bind Memory Window */
+ #define SQ_BIND_V3__TYPE1 (UINT32_C(0x0) << 7)
+ /* Type 2 Post Send Bind Memory Window */
+ #define SQ_BIND_V3__TYPE2 (UINT32_C(0x1) << 7)
+ #define SQ_BIND_V3__LAST SQ_BIND_V3__TYPE2
+ /*
+ * This is the new access control for the MR. '1' means
+ * the operation is allowed. '0' means operation is
+ * not allowed.
+ */
+ uint8_t access_cntl;
+ /*
+ * Local Write Access.
+ *
+ * Local accesses are never allowed for memory windows, so this
+ * bit must always be zero in a bind WQE. If this bit is ever
+ * set, the bind will fail with an errored completion.
+ */
+ #define SQ_BIND_V3_ACCESS_CNTL_LOCAL_WRITE UINT32_C(0x1)
+ /* Remote Read Access */
+ #define SQ_BIND_V3_ACCESS_CNTL_REMOTE_READ UINT32_C(0x2)
+ /*
+ * Remote Write Access.
+ *
+ * Note that, if this bit is set, then the parent region to which
+ * the window is being bound must allow local writes. If this is not
+ * the case, then the bind will fail with an errored completion.
+ */
+ #define SQ_BIND_V3_ACCESS_CNTL_REMOTE_WRITE UINT32_C(0x4)
+ /*
+ * Remote Atomic Access.
+ *
+ * Note that, if this bit is set, then the parent region to which
+ * the window is being bound must allow local writes. If this is not
+ * the case, then the bind will fail with an errored completion.
+ */
+ #define SQ_BIND_V3_ACCESS_CNTL_REMOTE_ATOMIC UINT32_C(0x8)
+ /*
+ * Window Binding Allowed.
+ *
+ * It is never allowed to bind windows to windows, so this bit
+ * must always be zero in a bind WQE. If this bit is ever set,
+ * the bind will fail with an errored completion.
+ */
+ #define SQ_BIND_V3_ACCESS_CNTL_WINDOW_BIND UINT32_C(0x10)
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * The L_Key of the parent MR; 24 msb of the key are used to
+ * index the MRW table, 8 lsb are compared with the 8 bit key
+ * in the MRWC.
+ */
+ uint32_t parent_l_key;
+ /*
+ * Local Key; 24 msb of the key are used to index the memory
+ * window being bound in the MRW table, 8 lsb are assign to the
+ * 8 bit key_lsb field in the MRWC.
+ */
+ uint32_t l_key;
+ /* Local Virtual Address */
+ uint64_t va;
+ /*
+ * Length in bytes of registered MW; 40 bits as this is the max
+ * size of an MR/W
+ */
+ uint64_t length;
+} sq_bind_v3_t, *psq_bind_v3_t;
+
+/*
+ * SQ Bind WQE V3 header. This WQE can perform either:
+ * * type1 "bind memory window", if mw_type==Type1
+ * * type2 "post send bind memory window", if mw_type==Type2
+ */
+/* sq_bind_hdr_v3 (size:256b/32B) */
+
+typedef struct sq_bind_hdr_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /*
+ * Memory Bind V3
+ *
+ * Allowed only on reliable connection (RC) SQs.
+ */
+ #define SQ_BIND_HDR_V3_WQE_TYPE_BIND_V3 UINT32_C(0x1c)
+ #define SQ_BIND_HDR_V3_WQE_TYPE_LAST SQ_BIND_HDR_V3_WQE_TYPE_BIND_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled
+ * completion the controller should not generate a CQE
+ * unless there was an error. This refers to CQE on the
+ * sender side (The se flag refers to the receiver side).
+ */
+ #define SQ_BIND_HDR_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic
+ * WQEs on the SQ before executing this WQE
+ */
+ #define SQ_BIND_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all previous
+ * SQ's WQEs before executing this WQE.
+ */
+ #define SQ_BIND_HDR_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_BIND_HDR_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_BIND_HDR_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_BIND_HDR_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_BIND_HDR_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ uint8_t wqe_size_zero_based_mw_type;
+ /*
+ * The size of the WQE in units of 16B chunks.
+ *
+ * For the Bind WQE, this field will always have a value of 2.
+ */
+ #define SQ_BIND_HDR_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_BIND_HDR_V3_WQE_SIZE_SFT 0
+ /*
+ * If this bit is set, then the newly-bound memory window will be
+ * zero-based. If clear, then the newly-bound memory window will be
+ * non-zero-based.
+ */
+ #define SQ_BIND_HDR_V3_ZERO_BASED UINT32_C(0x40)
+ /*
+ * If type1 is specified, then this WQE performs a "bind memory
+ * window" operation on a type1 window. If type2 is specified, then
+ * this WQE performs a "post send bind memory window" operation on a
+ * type2 window.
+ *
+ * Note that the bind WQE cannot change the type of the memory
+ * window.
+ *
+ * If a "bind memory window" operation is attempted on a memory
+ * window that was allocated as type2, then the bind will fail with
+ * an errored completion, as "bind memory window" is allowed only on
+ * type1 memory windows.
+ *
+ * Similarly, if a "post send bind memory window" operation is
+ * attempted on a memory window that was allocated as type1, then the
+ * bind will fail with an errored completions, as "post send bind
+ * memory window" is allowed only on type2 memory windows.
+ */
+ #define SQ_BIND_HDR_V3_MW_TYPE UINT32_C(0x80)
+ /* Type 1 Bind Memory Window */
+ #define SQ_BIND_HDR_V3__TYPE1 (UINT32_C(0x0) << 7)
+ /* Type 2 Post Send Bind Memory Window */
+ #define SQ_BIND_HDR_V3__TYPE2 (UINT32_C(0x1) << 7)
+ #define SQ_BIND_HDR_V3__LAST SQ_BIND_HDR_V3__TYPE2
+ /*
+ * This is the new access control for the MR. '1' means
+ * the operation is allowed. '0' means operation is
+ * not allowed.
+ */
+ uint8_t access_cntl;
+ /*
+ * Local Write Access.
+ *
+ * Local accesses are never allowed for memory windows, so this
+ * bit must always be zero in a bind WQE. If this bit is ever
+ * set, the bind will fail with an errored completion.
+ */
+ #define SQ_BIND_HDR_V3_ACCESS_CNTL_LOCAL_WRITE UINT32_C(0x1)
+ /* Remote Read Access */
+ #define SQ_BIND_HDR_V3_ACCESS_CNTL_REMOTE_READ UINT32_C(0x2)
+ /*
+ * Remote Write Access.
+ *
+ * Note that, if this bit is set, then the parent region to which
+ * the window is being bound must allow local writes. If this is not
+ * the case, then the bind will fail with an errored completion.
+ */
+ #define SQ_BIND_HDR_V3_ACCESS_CNTL_REMOTE_WRITE UINT32_C(0x4)
+ /*
+ * Remote Atomic Access.
+ *
+ * Note that, if this bit is set, then the parent region to which
+ * the window is being bound must allow local writes. If this is not
+ * the case, then the bind will fail with an errored completion.
+ */
+ #define SQ_BIND_HDR_V3_ACCESS_CNTL_REMOTE_ATOMIC UINT32_C(0x8)
+ /*
+ * Window Binding Allowed.
+ *
+ * It is never allowed to bind windows to windows, so this bit
+ * must always be zero in a bind WQE. If this bit is ever set,
+ * the bind will fail with an errored completion.
+ */
+ #define SQ_BIND_HDR_V3_ACCESS_CNTL_WINDOW_BIND UINT32_C(0x10)
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /*
+ * The L_Key of the parent MR; 24 msb of the key are used to
+ * index the MRW table, 8 lsb are compared with the 8 bit key
+ * in the MRWC.
+ */
+ uint32_t parent_l_key;
+ /*
+ * Local Key; 24 msb of the key are used to index the memory
+ * window being bound in the MRW table, 8 lsb are assign to the
+ * 8 bit key_lsb field in the MRWC.
+ */
+ uint32_t l_key;
+ /* Local Virtual Address */
+ uint64_t va;
+ /*
+ * Length in bytes of registered MW; 40 bits as this is the max
+ * size of an MR/W
+ */
+ uint64_t length;
+} sq_bind_hdr_v3_t, *psq_bind_hdr_v3_t;
+
+/*
+ * This is the Change UDP Source Port WQE V3 structure. It is supported
+ * for both RC and UD QP's.
+ *
+ * It is recommended to set the uc_fence flag for this WQE, so that the
+ * source port does not change while there are unacknowledged packets.
+ */
+/* sq_change_udpsrcport_v3 (size:128b/16B) */
+
+typedef struct sq_change_udpsrcport_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /* Change UDP Source Port V3 */
+ #define SQ_CHANGE_UDPSRCPORT_V3_WQE_TYPE_CHANGE_UDPSRCPORT_V3 UINT32_C(0x1e)
+ #define SQ_CHANGE_UDPSRCPORT_V3_WQE_TYPE_LAST SQ_CHANGE_UDPSRCPORT_V3_WQE_TYPE_CHANGE_UDPSRCPORT_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled
+ * completion the controller should not generate a CQE
+ * unless there was an error. This refers to CQE on the
+ * sender side (The se flag refers to the receiver side).
+ */
+ #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic
+ * WQEs on the SQ before executing this WQE
+ */
+ #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all previous
+ * SQ's WQEs before executing this WQE.
+ *
+ * It is recommended to set this flag for Change UDP Source Port
+ * WQE's.
+ */
+ #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_CHANGE_UDPSRCPORT_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ uint8_t wqe_size;
+ /*
+ * The size of the WQE in units of 16B chunks.
+ *
+ * For the Change UDP Source Port WQE, this field will always have
+ * a value of 1.
+ */
+ #define SQ_CHANGE_UDPSRCPORT_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_CHANGE_UDPSRCPORT_V3_WQE_SIZE_SFT 0
+ uint8_t reserved_1;
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /* The new value for the QP's UDP source port. */
+ uint16_t udp_src_port;
+ uint16_t reserved_2;
+ uint32_t reserved_3;
+} sq_change_udpsrcport_v3_t, *psq_change_udpsrcport_v3_t;
+
+/* SQ Change UDP Source Port WQE V3 header */
+/* sq_change_udpsrcport_hdr_v3 (size:128b/16B) */
+
+typedef struct sq_change_udpsrcport_hdr_v3 {
+ /* This field defines the type of SQ WQE. */
+ uint8_t wqe_type;
+ /* Change UDP Source Port V3 */
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_TYPE_CHANGE_UDPSRCPORT_V3 UINT32_C(0x1e)
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_TYPE_LAST SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_TYPE_CHANGE_UDPSRCPORT_V3
+ uint8_t flags;
+ /*
+ * Set if completion signaling is requested. If this bit is
+ * 0, and the SQ is configured to support Unsignaled
+ * completion the controller should not generate a CQE
+ * unless there was an error. This refers to CQE on the
+ * sender side (The se flag refers to the receiver side).
+ */
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_SIGNAL_COMP UINT32_C(0x1)
+ /*
+ * Indication to complete all previous RDMA Read or Atomic
+ * WQEs on the SQ before executing this WQE
+ */
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_RD_OR_ATOMIC_FENCE UINT32_C(0x2)
+ /*
+ * Unconditional fence. Indication to complete all previous
+ * SQ's WQEs before executing this WQE.
+ *
+ * It is recommended to set this flag for Change UDP Source Port
+ * WQE's.
+ */
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_UC_FENCE UINT32_C(0x4)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_SE UINT32_C(0x8)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_INLINE UINT32_C(0x10)
+ /*
+ * This flag is not applicable and should be 0 for a local memory
+ * operation WQE.
+ */
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_WQE_TS_EN UINT32_C(0x20)
+ /*
+ * When set to '1', this operation will cause a trace capture in
+ * each block it passes through.
+ */
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_FLAGS_DEBUG_TRACE UINT32_C(0x40)
+ uint8_t wqe_size;
+ /*
+ * The size of the WQE in units of 16B chunks.
+ *
+ * For the Change UDP Source Port WQE, this field will always have
+ * a value of 1.
+ */
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_SIZE_MASK UINT32_C(0x3f)
+ #define SQ_CHANGE_UDPSRCPORT_HDR_V3_WQE_SIZE_SFT 0
+ uint8_t reserved_1;
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+ /* The new value for the QP's UDP source port. */
+ uint16_t udp_src_port;
+ uint16_t reserved_2;
+ uint32_t reserved_3;
+} sq_change_udpsrcport_hdr_v3_t, *psq_change_udpsrcport_hdr_v3_t;
+
/* RQ/SRQ WQE */
/* rq_wqe (size:1024b/128B) */
@@ -72641,7 +84514,7 @@ typedef struct rq_wqe {
/* wqe_type is 8 b */
uint8_t wqe_type;
/*
- * RQ/SRQ WQE. This WQE is used for posting buffers on
+ * RQ/SRQ WQE. This WQE is used for posting buffers on
* an RQ or SRQ.
*/
#define RQ_WQE_WQE_TYPE_RCV UINT32_C(0x80)
@@ -72650,7 +84523,7 @@ typedef struct rq_wqe {
uint8_t flags;
/*
* Specify the total number 16B chunks that make up the valid
- * portion of the WQE. This includes the first chunk that is the
+ * portion of the WQE. This includes the first chunk that is the
* WQE structure and up to 6 SGE structures.
*
* While the valid area is defined by the wqe_size field, the
@@ -72682,7 +84555,7 @@ typedef struct rq_wqe_hdr {
/* wqe_type is 8 b */
uint8_t wqe_type;
/*
- * RQ/SRQ WQE. This WQE is used for posting buffers on
+ * RQ/SRQ WQE. This WQE is used for posting buffers on
* an RQ or SRQ.
*/
#define RQ_WQE_HDR_WQE_TYPE_RCV UINT32_C(0x80)
@@ -72691,7 +84564,7 @@ typedef struct rq_wqe_hdr {
uint8_t flags;
/*
* Specify the total number 16B chunks that make up the valid
- * portion of the WQE. This includes the first chunk that is the
+ * portion of the WQE. This includes the first chunk that is the
* WQE structure and up to 6 SGE structures.
*
* While the valid area is defined by the wqe_size field, the
@@ -72711,6 +84584,65 @@ typedef struct rq_wqe_hdr {
uint8_t reserved128[16];
} rq_wqe_hdr_t, *prq_wqe_hdr_t;
+/* RQ/SRQ WQE V3 */
+/* rq_wqe_v3 (size:4096b/512B) */
+
+typedef struct rq_wqe_v3 {
+ /* wqe_type is 8 b */
+ uint8_t wqe_type;
+ /*
+ * RQ/SRQ WQE V3. This WQE is used for posting buffers on
+ * an RQ or SRQ.
+ */
+ #define RQ_WQE_V3_WQE_TYPE_RCV_V3 UINT32_C(0x90)
+ #define RQ_WQE_V3_WQE_TYPE_LAST RQ_WQE_V3_WQE_TYPE_RCV_V3
+ /* No flags supported for this WQE type. */
+ uint8_t flags;
+ /*
+ * Specify the total number 16B chunks that make up the valid portion
+ * of the WQE. This includes the first chunk that is the WQE
+ * structure and up to 30 SGE structures. The maximum value for this
+ * field is 32, representing a maximum-sized WQE of 512B.
+ */
+ uint8_t wqe_size;
+ uint8_t reserved1;
+ /* This value will be returned in the completion. */
+ uint32_t opaque;
+ uint64_t reserved2;
+ /*
+ * The data field for RQ WQE is filled with from 1 to 30 SGE
+ * structures as defined by the wqe_size field.
+ */
+ uint32_t data[124];
+} rq_wqe_v3_t, *prq_wqe_v3_t;
+
+/* RQ/SRQ WQE V3 header. */
+/* rq_wqe_hdr_v3 (size:128b/16B) */
+
+typedef struct rq_wqe_hdr_v3 {
+ /* wqe_type is 8 b */
+ uint8_t wqe_type;
+ /*
+ * RQ/SRQ WQE V3. This WQE is used for posting buffers on
+ * an RQ or SRQ.
+ */
+ #define RQ_WQE_HDR_V3_WQE_TYPE_RCV_V3 UINT32_C(0x90)
+ #define RQ_WQE_HDR_V3_WQE_TYPE_LAST RQ_WQE_HDR_V3_WQE_TYPE_RCV_V3
+ /* No flags supported for this WQE type. */
+ uint8_t flags;
+ /*
+ * Specify the total number 16B chunks that make up the valid portion
+ * of the WQE. This includes the first chunk that is the WQE
+ * structure and up to 30 SGE structures. The maximum value for this
+ * field is 32, representing a maximum-sized WQE of 512B.
+ */
+ uint8_t wqe_size;
+ uint8_t reserved1;
+ /* This value will be returned in the completion. */
+ uint32_t opaque;
+ uint64_t reserved2;
+} rq_wqe_hdr_v3_t, *prq_wqe_hdr_v3_t;
+
/* cq_base (size:256b/32B) */
typedef struct cq_base {
@@ -72719,14 +84651,14 @@ typedef struct cq_base {
uint64_t reserved64_3;
uint8_t cqe_type_toggle;
/*
- * Indicate valid completion - written by the chip. Cumulus
+ * Indicate valid completion - written by the chip. The NIC
* toggle this bit each time it finished consuming all PBL
- * entries
+ * entries.
*/
- #define CQ_BASE_TOGGLE UINT32_C(0x1)
- /* This field defines the type of SQ WQE. */
- #define CQ_BASE_CQE_TYPE_MASK UINT32_C(0x1e)
- #define CQ_BASE_CQE_TYPE_SFT 1
+ #define CQ_BASE_TOGGLE UINT32_C(0x1)
+ /* This field defines the type of CQE. */
+ #define CQ_BASE_CQE_TYPE_MASK UINT32_C(0x1e)
+ #define CQ_BASE_CQE_TYPE_SFT 1
/*
* Requester completion - This is used for both RC and UD SQ
* completions.
@@ -72736,17 +84668,17 @@ typedef struct cq_base {
* Responder RC Completion - This is used for both RQ and SRQ
* completions for RC service QPs.
*/
- #define CQ_BASE_CQE_TYPE_RES_RC (UINT32_C(0x1) << 1)
+ #define CQ_BASE_CQE_TYPE_RES_RC (UINT32_C(0x1) << 1)
/*
* Responder UD Completion - This is used for both RQ and SRQ
* completion for UD service QPs.
*/
- #define CQ_BASE_CQE_TYPE_RES_UD (UINT32_C(0x2) << 1)
+ #define CQ_BASE_CQE_TYPE_RES_UD (UINT32_C(0x2) << 1)
/*
* Responder RawEth and QP1 Completion - This is used for RQ
* completion for RawEth service and QP1 service QPs.
*/
- #define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1 (UINT32_C(0x3) << 1)
+ #define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1 (UINT32_C(0x3) << 1)
/*
* Responder UD completion with CFA. This is used for both RQ
* and SQ completion for UD service QPs. It includes cfa fields
@@ -72754,22 +84686,188 @@ typedef struct cq_base {
*/
#define CQ_BASE_CQE_TYPE_RES_UD_CFA (UINT32_C(0x4) << 1)
/*
+ * Requester completion V3 - This is used for both RC and UD SQ
+ * completions.
+ */
+ #define CQ_BASE_CQE_TYPE_REQ_V3 (UINT32_C(0x8) << 1)
+ /*
+ * Responder RC Completion V3 - This is used for both RQ and SRQ
+ * completions for RC service QPs.
+ */
+ #define CQ_BASE_CQE_TYPE_RES_RC_V3 (UINT32_C(0x9) << 1)
+ /*
+ * Responder UD Completion V3 - This is used for both RQ and SRQ
+ * completion for UD service QPs. It is also used for QP1 QPs
+ * that are treated as UD.
+ */
+ #define CQ_BASE_CQE_TYPE_RES_UD_V3 (UINT32_C(0xa) << 1)
+ /*
+ * Responder RawEth and QP1 Completion V3 - This is used for RQ and
+ * SRQ completion for RawEth service. It is also used for QP1 QPs
+ * that are treated as RawEth.
+ */
+ #define CQ_BASE_CQE_TYPE_RES_RAWETH_QP1_V3 (UINT32_C(0xb) << 1)
+ /*
+ * Responder UD Completion with CFA V3 - This is used for both RQ
+ * and SRQ completion for UD service QPs. It includes CFA fields
+ * (some of which carry VLAN information), in place of the QP
+ * handle. It is also used for QP1 QPs that are treated as UD.
+ */
+ #define CQ_BASE_CQE_TYPE_RES_UD_CFA_V3 (UINT32_C(0xc) << 1)
+ /*
* NO_OP completion - This is used to indicate that no
- * operation completion.
+ * operation completed.
*/
- #define CQ_BASE_CQE_TYPE_NO_OP (UINT32_C(0xd) << 1)
+ #define CQ_BASE_CQE_TYPE_NO_OP (UINT32_C(0xd) << 1)
/*
* Terminal completion - This is used to indicate that no
* further completions will be made for this QP on this CQ.
*/
#define CQ_BASE_CQE_TYPE_TERMINAL (UINT32_C(0xe) << 1)
- /* Cut off CQE; for CQ resize see CQ and SRQ Resize */
- #define CQ_BASE_CQE_TYPE_CUT_OFF (UINT32_C(0xf) << 1)
- #define CQ_BASE_CQE_TYPE_LAST CQ_BASE_CQE_TYPE_CUT_OFF
+ /*
+ * Cut off CQE; for CQ resize. This CQE is written to the "old"
+ * CQ as the last CQE written. SW may use this to know when the
+ * "old" CQ can be destroyed.
+ */
+ #define CQ_BASE_CQE_TYPE_CUT_OFF (UINT32_C(0xf) << 1)
+ #define CQ_BASE_CQE_TYPE_LAST CQ_BASE_CQE_TYPE_CUT_OFF
/* This field indicates the status for the CQE. */
uint8_t status;
+ /* The operation completed successfully. */
+ #define CQ_BASE_STATUS_OK UINT32_C(0x0)
+ /*
+ * An unexpected BTH opcode or a First/Middle packet that is not
+ * the full MTU size was returned by the responder.
+ *
+ * This is a fatal error detected by the requester Rx.
+ */
+ #define CQ_BASE_STATUS_BAD_RESPONSE_ERR UINT32_C(0x1)
+ /*
+ * Generated for a WQE posted to the local SQ when the sum of the
+ * lengths of the SGEs in the WQE exceeds the maximum message
+ * length of 2^31 bytes.
+ *
+ * Generated for a WQE posted to the local RQ/SRQ when the sum of
+ * the lengths of the SGEs in the WQE is too small to receive the
+ * (valid) incoming message or the length of the incoming message
+ * is greater than the maximum message size supported.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_BASE_STATUS_LOCAL_LENGTH_ERR UINT32_C(0x2)
+ /*
+ * This indicates that the packet was too long for the WQE provided
+ * on the SRQ/RQ.
+ *
+ * This is not a fatal error. All the fields in the CQE are valid.
+ */
+ #define CQ_BASE_STATUS_HW_LOCAL_LENGTH_ERR UINT32_C(0x3)
+ /*
+ * An internal QP consistency error was detected while processing
+ * this Work Request. For requester, this could be an SQ WQE format
+ * error or an operation specified in the WQE that is not supported
+ * for the QP. For responder, this is an RQ/SRQ WQE format error.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_BASE_STATUS_LOCAL_QP_OPERATION_ERR UINT32_C(0x4)
+ /*
+ * An SGE in the locally posted WQE does not reference a Memory
+ * Region that is valid for the requested operation. If this error
+ * is generated for an SGE using the reserved l_key, this means
+ * that the reserved l_key is not enabled.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_BASE_STATUS_LOCAL_PROTECTION_ERR UINT32_C(0x5)
+ /*
+ * A protection error occurred on a local data buffer during the
+ * processing of a RDMA Write with Immediate Data operation sent
+ * from the remote node.
+ *
+ * This is a fatal error detected by the responder Rx. Only the
+ * opaque field in the CQE is valid.
+ */
+ #define CQ_BASE_STATUS_LOCAL_ACCESS_ERROR UINT32_C(0x6)
+ /*
+ * The SSC detected an error on a local memory operation from the
+ * SQ (fast-register, local invalidate, or bind).
+ *
+ * This is a fatal error detected by the requester Tx.
+ */
+ #define CQ_BASE_STATUS_MEMORY_MGT_OPERATION_ERR UINT32_C(0x7)
+ /*
+ * An invalid message was received by the responder. This could be
+ * an operation that is not supported by this QP, an IRRQ overflow
+ * error, or the length in an RDMA operation is greater than the
+ * maximum message size (2^31 bytes).
+ *
+ * This is a fatal error detected by the responder and communicated
+ * back to the requester using a NAK-Invalid Request. For responder
+ * CQEs, only the opaque field is valid.
+ */
+ #define CQ_BASE_STATUS_REMOTE_INVALID_REQUEST_ERR UINT32_C(0x8)
+ /*
+ * A protection error occurred on a remote data buffer to be read
+ * by an RDMA Read, written by an RDMA Write or accessed by an
+ * atomic operation. This error is reported only on RDMA operations
+ * or atomic operations.
+ *
+ * This is a fatal error detected by the responder and communicated
+ * back to the requester using a NAK-Remote Access Violation.
+ */
+ #define CQ_BASE_STATUS_REMOTE_ACCESS_ERR UINT32_C(0x9)
+ /*
+ * The operation could not be completed successfully by the
+ * responder. Possible causes include an RQ/SRQ WQE format error,
+ * an SSC error when validating an SGE from an RQ/SRQ WQE, or the
+ * message received was too long for the RQ/SRQ WQE.
+ *
+ * This is a fatal error detected by the responder and communicated
+ * back to the requester using a NAK-Remote Operation Error.
+ */
+ #define CQ_BASE_STATUS_REMOTE_OPERATION_ERR UINT32_C(0xa)
+ /*
+ * The RNR NAK retry count was exceeded while trying to send this
+ * message.
+ *
+ * This is a fatal error detected by the requester.
+ */
+ #define CQ_BASE_STATUS_RNR_NAK_RETRY_CNT_ERR UINT32_C(0xb)
+ /*
+ * The local transport timeout retry counter was exceeded while
+ * trying to send this message.
+ *
+ * This is a fatal error detected by the requester.
+ */
+ #define CQ_BASE_STATUS_TRANSPORT_RETRY_CNT_ERR UINT32_C(0xc)
+ /*
+ * A WQE was in process or outstanding when the QP transitioned
+ * into the Error State.
+ */
+ #define CQ_BASE_STATUS_WORK_REQUEST_FLUSHED_ERR UINT32_C(0xd)
+ /*
+ * A WQE had already been taken off the RQ/SRQ when a fatal error
+ * was detected on responder Rx. Only the opaque field in the CQE
+ * is valid.
+ */
+ #define CQ_BASE_STATUS_HW_FLUSH_ERR UINT32_C(0xe)
+ /*
+ * A WQE was posted to the SQ/RQ that caused it to overflow. For
+ * requester CQEs, it was the SQ that overflowed. For responder
+ * CQEs, it was the RQ that overflowed.
+ */
+ #define CQ_BASE_STATUS_OVERFLOW_ERR UINT32_C(0xf)
+ #define CQ_BASE_STATUS_LAST CQ_BASE_STATUS_OVERFLOW_ERR
uint16_t reserved16;
- uint32_t reserved32;
+ /*
+ * This value is from the WQE that is being completed. This field is
+ * only applicable to V3 version of CQEs.
+ */
+ uint32_t opaque;
} cq_base_t, *pcq_base_t;
/* Requester CQ CQE */
@@ -72783,7 +84881,7 @@ typedef struct cq_req {
uint64_t qp_handle;
/*
* SQ Consumer Index - points to the entry just past the last WQE
- * that has been completed by the chip. Wraps around at
+ * that has been completed by the chip. Wraps around at
* QPC.sq_size (i.e. the valid range of the SQ Consumer Index is 0
* to (QPC.sq_size - 1)).
*/
@@ -72798,7 +84896,7 @@ typedef struct cq_req {
* entries
*/
#define CQ_REQ_TOGGLE UINT32_C(0x1)
- /* This field defines the type of SQ WQE. */
+ /* This field defines the type of CQE. */
#define CQ_REQ_CQE_TYPE_MASK UINT32_C(0x1e)
#define CQ_REQ_CQE_TYPE_SFT 1
/*
@@ -72883,7 +84981,7 @@ typedef struct cq_res_rc {
* entries
*/
#define CQ_RES_RC_TOGGLE UINT32_C(0x1)
- /* This field defines the type of SQ WQE. */
+ /* This field defines the type of CQE. */
#define CQ_RES_RC_CQE_TYPE_MASK UINT32_C(0x1e)
#define CQ_RES_RC_CQE_TYPE_SFT 1
/*
@@ -72989,7 +85087,7 @@ typedef struct cq_res_ud {
* entries
*/
#define CQ_RES_UD_TOGGLE UINT32_C(0x1)
- /* This field defines the type of SQ WQE. */
+ /* This field defines the type of CQE. */
#define CQ_RES_UD_CQE_TYPE_MASK UINT32_C(0x1e)
#define CQ_RES_UD_CQE_TYPE_SFT 1
/*
@@ -73009,7 +85107,7 @@ typedef struct cq_res_ud {
* This indicates that write access was not allowed for
* at least one of the SGEs in the WQE.
*
- * This is a fatal error. Only the srq_or_rq_wr_id is field
+ * This is a fatal error. Only the srq_or_rq_wr_id is field
* is valid.
*/
#define CQ_RES_UD_STATUS_LOCAL_ACCESS_ERROR UINT32_C(0x1)
@@ -73017,7 +85115,7 @@ typedef struct cq_res_ud {
* This indicates that the packet was too long for the WQE
* provided on the SRQ/RQ.
*
- * This is not a fatal error. All the fields are valid.
+ * This is not a fatal error. All the fields are valid.
*/
#define CQ_RES_UD_STATUS_HW_LOCAL_LENGTH_ERR UINT32_C(0x2)
/* LOCAL_PROTECTION_ERR is 3 */
@@ -73061,7 +85159,7 @@ typedef struct cq_res_ud {
*/
#define CQ_RES_UD_FLAGS_META_FORMAT_MASK UINT32_C(0x3c0)
#define CQ_RES_UD_FLAGS_META_FORMAT_SFT 6
- /* No metadata information. Value is zero. */
+ /* No metadata information. Value is zero. */
#define CQ_RES_UD_FLAGS_META_FORMAT_NONE (UINT32_C(0x0) << 6)
/*
* The metadata field contains the VLAN tag and TPID value.
@@ -73159,7 +85257,7 @@ typedef struct cq_res_ud_v2 {
* entries
*/
#define CQ_RES_UD_V2_TOGGLE UINT32_C(0x1)
- /* This field defines the type of SQ WQE. */
+ /* This field defines the type of CQE. */
#define CQ_RES_UD_V2_CQE_TYPE_MASK UINT32_C(0x1e)
#define CQ_RES_UD_V2_CQE_TYPE_SFT 1
/*
@@ -73179,7 +85277,7 @@ typedef struct cq_res_ud_v2 {
* This indicates that write access was not allowed for
* at least one of the SGEs in the WQE.
*
- * This is a fatal error. Only the srq_or_rq_wr_id is field
+ * This is a fatal error. Only the srq_or_rq_wr_id is field
* is valid.
*/
#define CQ_RES_UD_V2_STATUS_LOCAL_ACCESS_ERROR UINT32_C(0x1)
@@ -73187,7 +85285,7 @@ typedef struct cq_res_ud_v2 {
* This indicates that the packet was too long for the WQE
* provided on the SRQ/RQ.
*
- * This is not a fatal error. All the fields are valid.
+ * This is not a fatal error. All the fields are valid.
*/
#define CQ_RES_UD_V2_STATUS_HW_LOCAL_LENGTH_ERR UINT32_C(0x2)
/* LOCAL_PROTECTION_ERR is 3 */
@@ -73228,7 +85326,7 @@ typedef struct cq_res_ud_v2 {
/* The field indicates what format the metadata field is. */
#define CQ_RES_UD_V2_FLAGS_META_FORMAT_MASK UINT32_C(0x3c0)
#define CQ_RES_UD_V2_FLAGS_META_FORMAT_SFT 6
- /* No metadata information. Value is zero. */
+ /* No metadata information. Value is zero. */
#define CQ_RES_UD_V2_FLAGS_META_FORMAT_NONE (UINT32_C(0x0) << 6)
/*
* The {metadata1, metadata0} fields contain the vtag
@@ -73326,7 +85424,7 @@ typedef struct cq_res_ud_cfa {
uint32_t qid;
/*
* This value indicates the QPID associated with this operation.
- * The driver will use the qid from thie CQE to map a QP handle
+ * The driver will use the qid from this CQE to map a QP handle
* in the completion record returned to the application.
*/
#define CQ_RES_UD_CFA_QID_MASK UINT32_C(0xfffff)
@@ -73361,7 +85459,7 @@ typedef struct cq_res_ud_cfa {
* entries
*/
#define CQ_RES_UD_CFA_TOGGLE UINT32_C(0x1)
- /* This field defines the type of SQ WQE. */
+ /* This field defines the type of CQE. */
#define CQ_RES_UD_CFA_CQE_TYPE_MASK UINT32_C(0x1e)
#define CQ_RES_UD_CFA_CQE_TYPE_SFT 1
/*
@@ -73524,7 +85622,7 @@ typedef struct cq_res_ud_cfa_v2 {
uint32_t qid;
/*
* This value indicates the QPID associated with this operation.
- * The driver will use the qid from thie CQE to map a QP handle
+ * The driver will use the qid from this CQE to map a QP handle
* in the completion record returned to the application.
*/
#define CQ_RES_UD_CFA_V2_QID_MASK UINT32_C(0xfffff)
@@ -73556,7 +85654,7 @@ typedef struct cq_res_ud_cfa_v2 {
* entries
*/
#define CQ_RES_UD_CFA_V2_TOGGLE UINT32_C(0x1)
- /* This field defines the type of SQ WQE. */
+ /* This field defines the type of CQE. */
#define CQ_RES_UD_CFA_V2_CQE_TYPE_MASK UINT32_C(0x1e)
#define CQ_RES_UD_CFA_V2_CQE_TYPE_SFT 1
/*
@@ -73627,7 +85725,7 @@ typedef struct cq_res_ud_cfa_v2 {
/* The field indicates what format the metadata field is. */
#define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_MASK UINT32_C(0x3c0)
#define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_SFT 6
- /* No metadata information. Value is zero. */
+ /* No metadata information. Value is zero. */
#define CQ_RES_UD_CFA_V2_FLAGS_META_FORMAT_NONE (UINT32_C(0x0) << 6)
/*
* The {metadata1, metadata0} fields contain the vtag
@@ -73720,7 +85818,7 @@ typedef struct cq_res_raweth_qp1 {
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_SFT 0
/*
* When this bit is '1', it indicates a packet that has an
- * error of some type. Type of error is indicated in
+ * error of some type. Type of error is indicated in
* raweth_qp1_errors.
*/
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ERROR UINT32_C(0x1)
@@ -73744,31 +85842,36 @@ typedef struct cq_res_raweth_qp1 {
/*
* TCP Packet:
* Indicates that the packet was IP and TCP.
- * This indicates that the raweth_qp1_payload_offset field is valid.
+ * This indicates that the raweth_qp1_payload_offset field is
+ * valid.
*/
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 6)
/*
* UDP Packet:
* Indicates that the packet was IP and UDP.
- * This indicates that the raweth_qp1_payload_offset field is valid.
+ * This indicates that the raweth_qp1_payload_offset field is
+ * valid.
*/
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_UDP (UINT32_C(0x3) << 6)
/*
* FCoE Packet:
* Indicates that the packet was recognized as a FCoE.
- * This also indicates that the raweth_qp1_payload_offset field is valid.
+ * This also indicates that the raweth_qp1_payload_offset field
+ * is valid.
*/
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_FCOE (UINT32_C(0x4) << 6)
/*
* RoCE Packet:
* Indicates that the packet was recognized as a RoCE.
- * This also indicates that the raweth_qp1_payload_offset field is valid.
+ * This also indicates that the raweth_qp1_payload_offset field
+ * is valid.
*/
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE (UINT32_C(0x5) << 6)
/*
* ICMP Packet:
* Indicates that the packet was recognized as ICMP.
- * This indicates that the raweth_qp1_payload_offset field is valid.
+ * This indicates that the raweth_qp1_payload_offset field is
+ * valid.
*/
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ICMP (UINT32_C(0x7) << 6)
/*
@@ -73807,7 +85910,7 @@ typedef struct cq_res_raweth_qp1 {
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_T_L4_CS_ERROR UINT32_C(0x80)
/*
* This indicates that there was a CRC error on either an FCoE
- * or RoCE packet. The itype indicates the packet type.
+ * or RoCE packet. The itype indicates the packet type.
*/
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_ERRORS_CRC_ERROR UINT32_C(0x100)
/*
@@ -73939,23 +86042,27 @@ typedef struct cq_res_raweth_qp1 {
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC UINT32_C(0x2)
/*
* This indicates that the ip checksum was calculated for the
- * tunnel header and that the t_ip_cs_error field indicates if there
- * was an error.
+ * tunnel header and that the t_ip_cs_error field indicates if
+ * there was an error.
*/
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
/*
* This indicates that the UDP checksum was
- * calculated for the tunnel packet and that the t_l4_cs_error field
- * indicates if there was an error.
+ * calculated for the tunnel packet and that the t_l4_cs_error
+ * field indicates if there was an error.
*/
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
- /* This value indicates what format the raweth_qp1_metadata field is. */
+ /*
+ * This value indicates what format the raweth_qp1_metadata field
+ * is.
+ */
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_SFT 4
- /* No metadata information. Value is zero. */
+ /* No metadata information. Value is zero. */
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
/*
- * The raweth_qp1_metadata field contains the VLAN tag and TPID value.
+ * The raweth_qp1_metadata field contains the VLAN tag and TPID
+ * value.
* - raweth_qp1_metadata[11:0] contains the vlan VID value.
* - raweth_qp1_metadata[12] contains the vlan DE value.
* - raweth_qp1_metadata[15:13] contains the vlan PRI value.
@@ -73992,7 +86099,7 @@ typedef struct cq_res_raweth_qp1 {
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_LAST CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET
/*
* This field indicates the IP type for the inner-most IP header.
- * A value of '0' indicates IPv4. A value of '1' indicates IPv6.
+ * A value of '0' indicates IPv4. A value of '1' indicates IPv6.
* This value is only valid if itype indicates a packet
* with an IP header.
*/
@@ -74009,9 +86116,9 @@ typedef struct cq_res_raweth_qp1 {
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_EXT_META_FORMAT_MASK UINT32_C(0xc00)
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_EXT_META_FORMAT_SFT 10
/*
- * This value is the complete 1's complement checksum calculated from
- * the start of the outer L3 header to the end of the packet (not
- * including the ethernet crc). It is valid when the
+ * This value is the complete 1's complement checksum calculated
+ * from the start of the outer L3 header to the end of the packet
+ * (not including the ethernet crc). It is valid when the
* 'complete_checksum_calc' flag is set.
*/
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_MASK UINT32_C(0xffff0000)
@@ -74041,7 +86148,7 @@ typedef struct cq_res_raweth_qp1 {
* entries
*/
#define CQ_RES_RAWETH_QP1_TOGGLE UINT32_C(0x1)
- /* This field defines the type of SQ WQE. */
+ /* This field defines the type of CQE. */
#define CQ_RES_RAWETH_QP1_CQE_TYPE_MASK UINT32_C(0x1e)
#define CQ_RES_RAWETH_QP1_CQE_TYPE_SFT 1
/*
@@ -74061,7 +86168,7 @@ typedef struct cq_res_raweth_qp1 {
* This indicates that write access was not allowed for
* at least one of the SGEs in the WQE.
*
- * This is a fatal error. Only the srq_or_rq_wr_id is field
+ * This is a fatal error. Only the srq_or_rq_wr_id is field
* is valid.
*/
#define CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR UINT32_C(0x1)
@@ -74069,7 +86176,7 @@ typedef struct cq_res_raweth_qp1 {
* This indicates that the packet was too long for the WQE
* provided on the RQ.
*
- * This is not a fatal error. All the fields are valid.
+ * This is not a fatal error. All the fields are valid.
*/
#define CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR UINT32_C(0x2)
/* LOCAL_PROTECTION_ERR is 3 */
@@ -74103,10 +86210,9 @@ typedef struct cq_res_raweth_qp1 {
#define CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK UINT32_C(0xfffff)
#define CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_SFT 0
/*
- * This value indicates the offset in bytes from the beginning of the packet
- * where the inner payload starts. This value is valid for TCP, UDP,
- * FCoE, and RoCE packets.
- *
+ * This value indicates the offset in bytes from the beginning of the
+ * packet where the inner payload starts. This value is valid for
+ * TCP, UDP, FCoE, and RoCE packets.
* A value of zero indicates an offset of 256 bytes.
*/
#define CQ_RES_RAWETH_QP1_RAWETH_QP1_PAYLOAD_OFFSET_MASK UINT32_C(0xff000000)
@@ -74129,7 +86235,7 @@ typedef struct cq_res_raweth_qp1_v2 {
#define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_SFT 0
/*
* When this bit is '1', it indicates a packet that has an
- * error of some type. Type of error is indicated in
+ * error of some type. Type of error is indicated in
* raweth_qp1_errors.
*/
#define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS_ERROR UINT32_C(0x1)
@@ -74221,7 +86327,7 @@ typedef struct cq_res_raweth_qp1_v2 {
#define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_T_L4_CS_ERROR UINT32_C(0x80)
/*
* This indicates that there was a CRC error on either an FCoE
- * or RoCE packet. The itype indicates the packet type.
+ * or RoCE packet. The itype indicates the packet type.
*/
#define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_ERRORS_CRC_ERROR UINT32_C(0x100)
/*
@@ -74415,7 +86521,7 @@ typedef struct cq_res_raweth_qp1_v2 {
#define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_LAST CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET
/*
* This field indicates the IP type for the inner-most IP header.
- * A value of '0' indicates IPv4. A value of '1' indicates IPv6.
+ * A value of '0' indicates IPv4. A value of '1' indicates IPv6.
* This value is only valid if itype indicates a packet
* with an IP header.
*/
@@ -74460,7 +86566,7 @@ typedef struct cq_res_raweth_qp1_v2 {
* entries
*/
#define CQ_RES_RAWETH_QP1_V2_TOGGLE UINT32_C(0x1)
- /* This field defines the type of SQ WQE. */
+ /* This field defines the type of CQE. */
#define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_MASK UINT32_C(0x1e)
#define CQ_RES_RAWETH_QP1_V2_CQE_TYPE_SFT 1
/*
@@ -74480,7 +86586,7 @@ typedef struct cq_res_raweth_qp1_v2 {
* This indicates that write access was not allowed for
* at least one of the SGEs in the WQE.
*
- * This is a fatal error. Only the srq_or_rq_wr_id is field
+ * This is a fatal error. Only the srq_or_rq_wr_id is field
* is valid.
*/
#define CQ_RES_RAWETH_QP1_V2_STATUS_LOCAL_ACCESS_ERROR UINT32_C(0x1)
@@ -74488,7 +86594,7 @@ typedef struct cq_res_raweth_qp1_v2 {
* This indicates that the packet was too long for the WQE
* provided on the RQ.
*
- * This is not a fatal error. All the fields are valid.
+ * This is not a fatal error. All the fields are valid.
*/
#define CQ_RES_RAWETH_QP1_V2_STATUS_HW_LOCAL_LENGTH_ERR UINT32_C(0x2)
/* LOCAL_PROTECTION_ERR is 3 */
@@ -74543,7 +86649,7 @@ typedef struct cq_res_raweth_qp1_v2 {
#define CQ_RES_RAWETH_QP1_V2_CFA_METADATA1_VALID UINT32_C(0x800000)
/*
* This value indicates the offset in bytes from the beginning of
- * the packet where the inner payload starts. This value is valid
+ * the packet where the inner payload starts. This value is valid
* for TCP, UDP, FCoE, and RoCE packets.
*
* A value of zero indicates an offset of 256 bytes.
@@ -74552,7 +86658,10 @@ typedef struct cq_res_raweth_qp1_v2 {
#define CQ_RES_RAWETH_QP1_V2_RAWETH_QP1_PAYLOAD_OFFSET_SFT 24
} cq_res_raweth_qp1_v2_t, *pcq_res_raweth_qp1_v2_t;
-/* Terminal CQE */
+/*
+ * This is the terminal CQE structure. This CQE is generated to
+ * indicate that no further completions will be generated for this QP.
+ */
/* cq_terminal (size:256b/32B) */
typedef struct cq_terminal {
@@ -74562,12 +86671,12 @@ typedef struct cq_terminal {
*/
uint64_t qp_handle;
/*
- * Final SQ Consumer Index value. Any additional SQ WQEs will
+ * Final SQ Consumer Index value. Any additional SQ WQEs will
* have to be completed by the user provider.
*/
uint16_t sq_cons_idx;
/*
- * Final RQ Consumer Index value. Any additional RQ WQEs will
+ * Final RQ Consumer Index value. Any additional RQ WQEs will
* have to be completed by the user provider.
*/
uint16_t rq_cons_idx;
@@ -74580,7 +86689,7 @@ typedef struct cq_terminal {
* entries
*/
#define CQ_TERMINAL_TOGGLE UINT32_C(0x1)
- /* This field defines the type of SQ WQE. */
+ /* This field defines the type of CQE. */
#define CQ_TERMINAL_CQE_TYPE_MASK UINT32_C(0x1e)
#define CQ_TERMINAL_CQE_TYPE_SFT 1
/*
@@ -74591,7 +86700,7 @@ typedef struct cq_terminal {
#define CQ_TERMINAL_CQE_TYPE_LAST CQ_TERMINAL_CQE_TYPE_TERMINAL
/* This field indicates the status for the CQE. */
uint8_t status;
- /* OK is 0 */
+ /* The operation completed successfully. */
#define CQ_TERMINAL_STATUS_OK UINT32_C(0x0)
#define CQ_TERMINAL_STATUS_LAST CQ_TERMINAL_STATUS_OK
uint16_t reserved16;
@@ -74607,12 +86716,12 @@ typedef struct cq_cutoff {
uint64_t reserved64_3;
uint8_t cqe_type_toggle;
/*
- * Indicate valid completion - written by the chip. Cumulus
- * toggle this bit each time it finished consuming all PBL
+ * Indicate valid completion - written by the chip. The NIC
+ * toggles this bit each time it finished consuming all PBL
* entries
*/
#define CQ_CUTOFF_TOGGLE UINT32_C(0x1)
- /* This field defines the type of SQ WQE. */
+ /* This field defines the type of CQE. */
#define CQ_CUTOFF_CQE_TYPE_MASK UINT32_C(0x1e)
#define CQ_CUTOFF_CQE_TYPE_SFT 1
/* Cut off CQE; for CQ resize see CQ and SRQ Resize */
@@ -74623,7 +86732,9 @@ typedef struct cq_cutoff {
* acknowledge this CQ resize operation. When this CQE is
* processed, the driver should send a CQ_CUTOFF_ACK doorbell
* to the chip to let the chip know that the resize operation
- * is complete. This value is used by HW to detect old and
+ * is complete.
+ *
+ * This value is used by HW to detect old and
* stale CQ_CUTOFF_ACK doorbells that are caused by having
* a backup doorbell location or by PCI or other reordering
* problems. Only doorbells with the latest value will be honored.
@@ -74634,13 +86745,1227 @@ typedef struct cq_cutoff {
#define CQ_CUTOFF_RESIZE_TOGGLE_SFT 5
/* This field indicates the status for the CQE. */
uint8_t status;
- /* OK is 0 */
+ /* The operation completed successfully. */
#define CQ_CUTOFF_STATUS_OK UINT32_C(0x0)
#define CQ_CUTOFF_STATUS_LAST CQ_CUTOFF_STATUS_OK
uint16_t reserved16;
uint32_t reserved32;
} cq_cutoff_t, *pcq_cutoff_t;
+/* No-Op CQE */
+/* cq_no_op (size:256b/32B) */
+
+typedef struct cq_no_op {
+ uint64_t reserved64_1;
+ uint64_t reserved64_2;
+ uint64_t reserved64_3;
+ uint8_t cqe_type_toggle;
+ /*
+ * Indicate valid completion - written by the chip. The NIC
+ * toggles this bit each time it finished consuming all PBL
+ * entries.
+ */
+ #define CQ_NO_OP_TOGGLE UINT32_C(0x1)
+ /* This field defines the type of CQE. */
+ #define CQ_NO_OP_CQE_TYPE_MASK UINT32_C(0x1e)
+ #define CQ_NO_OP_CQE_TYPE_SFT 1
+ /*
+ * NO-OP completion - This is used to indicate that no operation
+ * completed.
+ */
+ #define CQ_NO_OP_CQE_TYPE_NO_OP (UINT32_C(0xd) << 1)
+ #define CQ_NO_OP_CQE_TYPE_LAST CQ_NO_OP_CQE_TYPE_NO_OP
+ /* This field indicates the status for the CQE. */
+ uint8_t status;
+ /* The operation completed successfully. */
+ #define CQ_NO_OP_STATUS_OK UINT32_C(0x0)
+ #define CQ_NO_OP_STATUS_LAST CQ_NO_OP_STATUS_OK
+ uint16_t reserved16;
+ uint32_t reserved32;
+} cq_no_op_t, *pcq_no_op_t;
+
+/*
+ * This is the Requester CQE V3 structure. This is used to complete each
+ * signaled SQ WQE. The sq_cons_idx and opaque is used to indicate
+ * which WQE has been completed. When a WQE is completed, it indicates
+ * that all WQEs before it in the SQ are also completed without error.
+ * Space freed by completed WQEs can be used for new WQEs.
+ */
+/* cq_req_v3 (size:256b/32B) */
+
+typedef struct cq_req_v3 {
+ /*
+ * This is an application level ID used to identify the
+ * QP and its SQ and RQ.
+ */
+ uint64_t qp_handle;
+ /*
+ * SQ Consumer Index - points to the entry just past the last WQE
+ * that has been completed by the chip. Wraps around at QPC.sq_size
+ * (i.e. the valid range of the SQ Consumer Index is 0 to
+ * (QPC.sq_size - 1)). The sq_cons_idx is in 16B units (as is
+ * QPC.sq_size).
+ *
+ * User can determine available space in the SQ by comparing
+ * sq_cons_idx to a sq_prod_idx maintained by the user. When the two
+ * values are equal, the SQ is empty. When
+ * (sq_prod_idx+1)%QPC.sq_size==sq_cons_idx, the queue is full.
+ */
+ uint16_t sq_cons_idx;
+ uint16_t reserved1;
+ uint32_t reserved2;
+ uint64_t reserved3;
+ uint8_t cqe_type_toggle;
+ /*
+ * Indicate valid completion - written by the chip. The NIC
+ * toggles this bit each time it finished consuming all PBL
+ * entries.
+ */
+ #define CQ_REQ_V3_TOGGLE UINT32_C(0x1)
+ /* This field defines the type of CQE. */
+ #define CQ_REQ_V3_CQE_TYPE_MASK UINT32_C(0x1e)
+ #define CQ_REQ_V3_CQE_TYPE_SFT 1
+ /*
+ * Requester completion V3 - This is used for both RC and UD SQ
+ * completions.
+ */
+ #define CQ_REQ_V3_CQE_TYPE_REQ_V3 (UINT32_C(0x8) << 1)
+ #define CQ_REQ_V3_CQE_TYPE_LAST CQ_REQ_V3_CQE_TYPE_REQ_V3
+ /*
+ * When this bit is '1', it indicates that the packet completed
+ * was transmitted using the push accelerated data provided by
+ * the driver. When this bit is '0', it indicates that the packet
+ * had not push acceleration data written or was executed as a
+ * normal packet even though push data was provided.
+ *
+ * Note: This field is intended to be used for driver-generated push
+ * statistics. As such, It is not applicable for RC since not all
+ * RC packets return a CQE.
+ */
+ #define CQ_REQ_V3_PUSH UINT32_C(0x20)
+ /* This field indicates the status for the CQE. */
+ uint8_t status;
+ /* The operation completed successfully. */
+ #define CQ_REQ_V3_STATUS_OK UINT32_C(0x0)
+ /*
+ * An unexpected BTH opcode or a First/Middle packet that is not
+ * the full MTU size was returned by the responder.
+ *
+ * This is a fatal error detected by the requester Rx.
+ */
+ #define CQ_REQ_V3_STATUS_BAD_RESPONSE_ERR UINT32_C(0x1)
+ /*
+ * Generated for a WQE posted to the local SQ when the sum of the
+ * lengths of the SGEs in the WQE exceeds the maximum message
+ * length of 2^31 bytes.
+ *
+ * Generated for a WQE posted to the local RQ/SRQ when the sum of
+ * the lengths of the SGEs in the WQE is too small to receive the
+ * (valid) incoming message or the length of the incoming message
+ * is greater than the maximum message size supported.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_REQ_V3_STATUS_LOCAL_LENGTH_ERR UINT32_C(0x2)
+ /*
+ * An internal QP consistency error was detected while processing
+ * this Work Request. For requester, this could be an SQ WQE format
+ * error or an operation specified in the WQE that is not supported
+ * for the QP. For responder, this is an RQ/SRQ WQE format error.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_REQ_V3_STATUS_LOCAL_QP_OPERATION_ERR UINT32_C(0x4)
+ /*
+ * An SGE in the locally posted WQE does not reference a Memory
+ * Region that is valid for the requested operation. If this error
+ * is generated for an SGE using the reserved l_key, this means
+ * that the reserved l_key is not enabled.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_REQ_V3_STATUS_LOCAL_PROTECTION_ERR UINT32_C(0x5)
+ /*
+ * The SSC detected an error on a local memory operation from the
+ * SQ (fast-register, local invalidate, or bind).
+ *
+ * This is a fatal error detected by the requester Tx.
+ */
+ #define CQ_REQ_V3_STATUS_MEMORY_MGT_OPERATION_ERR UINT32_C(0x7)
+ /*
+ * An invalid message was received by the responder. This could be
+ * an operation that is not supported by this QP, an IRRQ overflow
+ * error, or the length in an RDMA operation is greater than the
+ * maximum message size (2^31 bytes).
+ *
+ * This is a fatal error detected by the responder and communicated
+ * back to the requester using a NAK-Invalid Request. For responder
+ * CQEs, only the opaque field is valid.
+ */
+ #define CQ_REQ_V3_STATUS_REMOTE_INVALID_REQUEST_ERR UINT32_C(0x8)
+ /*
+ * A protection error occurred on a remote data buffer to be read
+ * by an RDMA Read, written by an RDMA Write or accessed by an
+ * atomic operation. This error is reported only on RDMA operations
+ * or atomic operations.
+ *
+ * This is a fatal error detected by the responder and communicated
+ * back to the requester using a NAK-Remote Access Violation.
+ */
+ #define CQ_REQ_V3_STATUS_REMOTE_ACCESS_ERR UINT32_C(0x9)
+ /*
+ * The operation could not be completed successfully by the
+ * responder. Possible causes include an RQ/SRQ WQE format error,
+ * an SSC error when validating an SGE from an RQ/SRQ WQE, or the
+ * message received was too long for the RQ/SRQ WQE.
+ *
+ * This is a fatal error detected by the responder and communicated
+ * back to the requester using a NAK-Remote Operation Error.
+ */
+ #define CQ_REQ_V3_STATUS_REMOTE_OPERATION_ERR UINT32_C(0xa)
+ /*
+ * The RNR NAK retry count was exceeded while trying to send this
+ * message.
+ *
+ * This is a fatal error detected by the requester.
+ */
+ #define CQ_REQ_V3_STATUS_RNR_NAK_RETRY_CNT_ERR UINT32_C(0xb)
+ /*
+ * The local transport timeout retry counter was exceeded while
+ * trying to send this message.
+ *
+ * This is a fatal error detected by the requester.
+ */
+ #define CQ_REQ_V3_STATUS_TRANSPORT_RETRY_CNT_ERR UINT32_C(0xc)
+ /*
+ * A WQE was in process or outstanding when the QP transitioned
+ * into the Error State.
+ */
+ #define CQ_REQ_V3_STATUS_WORK_REQUEST_FLUSHED_ERR UINT32_C(0xd)
+ /*
+ * A WQE was posted to the SQ/RQ that caused it to overflow. For
+ * requester CQEs, it was the SQ that overflowed. For responder
+ * CQEs, it was the RQ that overflowed.
+ */
+ #define CQ_REQ_V3_STATUS_OVERFLOW_ERR UINT32_C(0xf)
+ #define CQ_REQ_V3_STATUS_LAST CQ_REQ_V3_STATUS_OVERFLOW_ERR
+ uint16_t reserved4;
+ /* This value is from the WQE that is being completed. */
+ uint32_t opaque;
+} cq_req_v3_t, *pcq_req_v3_t;
+
+/*
+ * This is the Responder RQ/SRQ CQE V3 structure for RC QPs. This is
+ * used to complete each RQ/SRQ WQE. When the WQE is completed, it
+ * indicates that there is room for one more WQE on the corresponding
+ * RQ/SRQ.
+ *
+ * User can determine available space in the RQ/SRQ by comparing
+ * a rq_cons_idx to a rq_prod_idx, both maintained by the user. The
+ * range for rq_prod/cons_idx is from 0 to QPC.rq_size-1. The
+ * rq_prod_idx value increments by one for each WQE that is added to
+ * the RQ/SRQ by the user. Value must be wrapped at rq_size. The
+ * rq_cons_idx value increments by one for each WQE that is completed
+ * from that particular RQ/SRQ. The qp_handle can be used by the user
+ * to determine what RQ/SRQ to increment. Value must also be wrapped at
+ * rq_size. When the two values are equal, the RQ/SRQ is empty. When
+ * (rq_prod_idx+1)%QPC.rq_size==rq_cons_idx, the queue is full.
+ */
+/* cq_res_rc_v3 (size:256b/32B) */
+
+typedef struct cq_res_rc_v3 {
+ /*
+ * The length of the message's payload in bytes, stored in
+ * the SGEs
+ */
+ uint32_t length;
+ /*
+ * Immediate data in case the imm_flag set, R_Key to be
+ * invalidated in case inv_flag is set.
+ */
+ uint32_t imm_data_or_inv_r_key;
+ /*
+ * This is an application level ID used to identify the
+ * QP and its SQ and RQ.
+ */
+ uint64_t qp_handle;
+ /*
+ * Opaque value - valid when inv_flag is set. Used by driver
+ * to reference the buffer used to store PBL when the MR was
+ * fast registered. The driver can reclaim this buffer after
+ * an MR was remotely invalidated. The controller take that
+ * value from the MR referenced by R_Key
+ */
+ uint64_t mr_handle;
+ uint8_t cqe_type_toggle;
+ /*
+ * Indicate valid completion - written by the chip. The NIC
+ * toggles this bit each time it finished consuming all PBL
+ * entries.
+ */
+ #define CQ_RES_RC_V3_TOGGLE UINT32_C(0x1)
+ /* This field defines the type of CQE. */
+ #define CQ_RES_RC_V3_CQE_TYPE_MASK UINT32_C(0x1e)
+ #define CQ_RES_RC_V3_CQE_TYPE_SFT 1
+ /*
+ * Responder RC Completion - This is used for both RQ and SRQ
+ * completions for RC service QPs.
+ */
+ #define CQ_RES_RC_V3_CQE_TYPE_RES_RC_V3 (UINT32_C(0x9) << 1)
+ #define CQ_RES_RC_V3_CQE_TYPE_LAST CQ_RES_RC_V3_CQE_TYPE_RES_RC_V3
+ /* This field indicates the status for the CQE. */
+ uint8_t status;
+ /* The operation completed successfully. */
+ #define CQ_RES_RC_V3_STATUS_OK UINT32_C(0x0)
+ /*
+ * Generated for a WQE posted to the local SQ when the sum of the
+ * lengths of the SGEs in the WQE exceeds the maximum message
+ * length of 2^31 bytes.
+ *
+ * Generated for a WQE posted to the local RQ/SRQ when the sum of
+ * the lengths of the SGEs in the WQE is too small to receive the
+ * (valid) incoming message or the length of the incoming message
+ * is greater than the maximum message size supported.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_RES_RC_V3_STATUS_LOCAL_LENGTH_ERR UINT32_C(0x2)
+ /*
+ * An internal QP consistency error was detected while processing
+ * this Work Request. For requester, this could be an SQ WQE format
+ * error or an operation specified in the WQE that is not supported
+ * for the QP. For responder, this is an RQ/SRQ WQE format error.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_RES_RC_V3_STATUS_LOCAL_QP_OPERATION_ERR UINT32_C(0x4)
+ /*
+ * An SGE in the locally posted WQE does not reference a Memory
+ * Region that is valid for the requested operation. If this error
+ * is generated for an SGE using the reserved l_key, this means
+ * that the reserved l_key is not enabled.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_RES_RC_V3_STATUS_LOCAL_PROTECTION_ERR UINT32_C(0x5)
+ /*
+ * A protection error occurred on a local data buffer during the
+ * processing of a RDMA Write with Immediate Data operation sent
+ * from the remote node.
+ *
+ * This is a fatal error detected by the responder Rx. Only the
+ * opaque field in the CQE is valid.
+ */
+ #define CQ_RES_RC_V3_STATUS_LOCAL_ACCESS_ERROR UINT32_C(0x6)
+ /*
+ * An invalid message was received by the responder. This could be
+ * an operation that is not supported by this QP, an IRRQ overflow
+ * error, or the length in an RDMA operation is greater than the
+ * maximum message size (2^31 bytes).
+ *
+ * This is a fatal error detected by the responder and communicated
+ * back to the requester using a NAK-Invalid Request. For responder
+ * CQEs, only the opaque field is valid.
+ */
+ #define CQ_RES_RC_V3_STATUS_REMOTE_INVALID_REQUEST_ERR UINT32_C(0x8)
+ /*
+ * A WQE was in process or outstanding when the QP transitioned
+ * into the Error State.
+ */
+ #define CQ_RES_RC_V3_STATUS_WORK_REQUEST_FLUSHED_ERR UINT32_C(0xd)
+ /*
+ * A WQE had already been taken off the RQ/SRQ when a fatal error
+ * was detected on responder Rx. Only the opaque field in the CQE
+ * is valid.
+ */
+ #define CQ_RES_RC_V3_STATUS_HW_FLUSH_ERR UINT32_C(0xe)
+ /*
+ * A WQE was posted to the SQ/RQ that caused it to overflow. For
+ * requester CQEs, it was the SQ that overflowed. For responder
+ * CQEs, it was the RQ that overflowed.
+ */
+ #define CQ_RES_RC_V3_STATUS_OVERFLOW_ERR UINT32_C(0xf)
+ #define CQ_RES_RC_V3_STATUS_LAST CQ_RES_RC_V3_STATUS_OVERFLOW_ERR
+ uint16_t flags;
+ /*
+ * This flag indicates that the completion is for a SRQ entry
+ * rather than for an RQ entry.
+ */
+ #define CQ_RES_RC_V3_FLAGS_SRQ UINT32_C(0x1)
+ /* CQE relates to RQ WQE. */
+ #define CQ_RES_RC_V3_FLAGS_SRQ_RQ UINT32_C(0x0)
+ /* CQE relates to SRQ WQE. */
+ #define CQ_RES_RC_V3_FLAGS_SRQ_SRQ UINT32_C(0x1)
+ #define CQ_RES_RC_V3_FLAGS_SRQ_LAST CQ_RES_RC_V3_FLAGS_SRQ_SRQ
+ /* Immediate data indicator */
+ #define CQ_RES_RC_V3_FLAGS_IMM UINT32_C(0x2)
+ /* R_Key invalidate indicator */
+ #define CQ_RES_RC_V3_FLAGS_INV UINT32_C(0x4)
+ #define CQ_RES_RC_V3_FLAGS_RDMA UINT32_C(0x8)
+ /* CQE relates to an incoming Send request */
+ #define CQ_RES_RC_V3_FLAGS_RDMA_SEND (UINT32_C(0x0) << 3)
+ /* CQE relates to incoming RDMA Write request */
+ #define CQ_RES_RC_V3_FLAGS_RDMA_RDMA_WRITE (UINT32_C(0x1) << 3)
+ #define CQ_RES_RC_V3_FLAGS_RDMA_LAST CQ_RES_RC_V3_FLAGS_RDMA_RDMA_WRITE
+ /* This value is from the WQE that is being completed. */
+ uint32_t opaque;
+} cq_res_rc_v3_t, *pcq_res_rc_v3_t;
+
+/*
+ * This is the Responder RQ/SRQ CQE V3 structure for UD QPs and QP1 QPs
+ * that are treated as UD. This is used to complete RQ/SRQ WQE's. When
+ * the WQE is completed, it indicates that there is room for one more
+ * WQE on the corresponding RQ/SRQ.
+ *
+ * User can determine available space in the RQ/SRQ by comparing
+ * a rq_cons_idx to a rq_prod_idx, both maintained by the user. The
+ * range for rq_prod/cons_idx is from 0 to QPC.rq_size-1. The
+ * rq_prod_idx value increments by one for each WQE that is added to
+ * the RQ/SRQ by the user. Value must be wrapped at rq_size. The
+ * rq_cons_idx value increments by one for each WQE that is completed
+ * from that particular RQ/SRQ. The qp_handle can be used by the user
+ * to determine what RQ/SRQ to increment. Value must also be wrapped at
+ * rq_size. When the two values are equal, the RQ/SRQ is empty. When
+ * (rq_prod_idx+1)%QPC.rq_size==rq_cons_idx, the queue is full.
+ */
+/* cq_res_ud_v3 (size:256b/32B) */
+
+typedef struct cq_res_ud_v3 {
+ uint16_t length;
+ /*
+ * The length of the message's payload in bytes, stored in
+ * the SGEs
+ */
+ #define CQ_RES_UD_V3_LENGTH_MASK UINT32_C(0x3fff)
+ #define CQ_RES_UD_V3_LENGTH_SFT 0
+ uint8_t reserved1;
+ /* Upper 8b of the Source QP value from the DETH header. */
+ uint8_t src_qp_high;
+ /* Immediate data in case the imm_flag set. */
+ uint32_t imm_data;
+ /*
+ * This is an application level ID used to identify the
+ * QP and its SQ and RQ.
+ */
+ uint64_t qp_handle;
+ /*
+ * Source MAC address for the UD message placed in the WQE
+ * that is completed by this CQE.
+ */
+ uint16_t src_mac[3];
+ /* Lower 16b of the Source QP value from the DETH header. */
+ uint16_t src_qp_low;
+ uint8_t cqe_type_toggle;
+ /*
+ * Indicate valid completion - written by the chip. The NIC
+ * toggles this bit each time it finished consuming all PBL
+ * entries.
+ */
+ #define CQ_RES_UD_V3_TOGGLE UINT32_C(0x1)
+ /* This field defines the type of CQE. */
+ #define CQ_RES_UD_V3_CQE_TYPE_MASK UINT32_C(0x1e)
+ #define CQ_RES_UD_V3_CQE_TYPE_SFT 1
+ /*
+ * Responder UD Completion - This is used for both RQ and SRQ
+ * completion for UD service QPs. It is also used for QP1 QPs
+ * that are treated as UD.
+ */
+ #define CQ_RES_UD_V3_CQE_TYPE_RES_UD_V3 (UINT32_C(0xa) << 1)
+ #define CQ_RES_UD_V3_CQE_TYPE_LAST CQ_RES_UD_V3_CQE_TYPE_RES_UD_V3
+ /* This field indicates the status for the CQE. */
+ uint8_t status;
+ /* The operation completed successfully. */
+ #define CQ_RES_UD_V3_STATUS_OK UINT32_C(0x0)
+ /*
+ * This indicates that the packet was too long for the WQE provided
+ * on the SRQ/RQ.
+ *
+ * This is not a fatal error. All the fields in the CQE are valid.
+ */
+ #define CQ_RES_UD_V3_STATUS_HW_LOCAL_LENGTH_ERR UINT32_C(0x3)
+ /*
+ * An internal QP consistency error was detected while processing
+ * this Work Request. For requester, this could be an SQ WQE format
+ * error or an operation specified in the WQE that is not supported
+ * for the QP. For responder, this is an RQ/SRQ WQE format error.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_RES_UD_V3_STATUS_LOCAL_QP_OPERATION_ERR UINT32_C(0x4)
+ /*
+ * An SGE in the locally posted WQE does not reference a Memory
+ * Region that is valid for the requested operation. If this error
+ * is generated for an SGE using the reserved l_key, this means
+ * that the reserved l_key is not enabled.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_RES_UD_V3_STATUS_LOCAL_PROTECTION_ERR UINT32_C(0x5)
+ /*
+ * A WQE was in process or outstanding when the QP transitioned
+ * into the Error State.
+ */
+ #define CQ_RES_UD_V3_STATUS_WORK_REQUEST_FLUSHED_ERR UINT32_C(0xd)
+ /*
+ * A WQE had already been taken off the RQ/SRQ when a fatal error
+ * was detected on responder Rx. Only the opaque field in the CQE
+ * is valid.
+ */
+ #define CQ_RES_UD_V3_STATUS_HW_FLUSH_ERR UINT32_C(0xe)
+ /*
+ * A WQE was posted to the SQ/RQ that caused it to overflow. For
+ * requester CQEs, it was the SQ that overflowed. For responder
+ * CQEs, it was the RQ that overflowed.
+ */
+ #define CQ_RES_UD_V3_STATUS_OVERFLOW_ERR UINT32_C(0xf)
+ #define CQ_RES_UD_V3_STATUS_LAST CQ_RES_UD_V3_STATUS_OVERFLOW_ERR
+ uint16_t flags;
+ /*
+ * This flag indicates that the completion is for a SRQ entry
+ * rather than for an RQ entry.
+ */
+ #define CQ_RES_UD_V3_FLAGS_SRQ UINT32_C(0x1)
+ /* CQE relates to RQ WQE. */
+ #define CQ_RES_UD_V3_FLAGS_SRQ_RQ UINT32_C(0x0)
+ /* CQE relates to SRQ WQE. */
+ #define CQ_RES_UD_V3_FLAGS_SRQ_SRQ UINT32_C(0x1)
+ #define CQ_RES_UD_V3_FLAGS_SRQ_LAST CQ_RES_UD_V3_FLAGS_SRQ_SRQ
+ /* Immediate data indicator */
+ #define CQ_RES_UD_V3_FLAGS_IMM UINT32_C(0x2)
+ #define CQ_RES_UD_V3_FLAGS_UNUSED_MASK UINT32_C(0xc)
+ #define CQ_RES_UD_V3_FLAGS_UNUSED_SFT 2
+ #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_MASK UINT32_C(0x30)
+ #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_SFT 4
+ /* RoCEv1 Message */
+ #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_V1 (UINT32_C(0x0) << 4)
+ /* RoCEv2 IPv4 Message */
+ #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_V2IPV4 (UINT32_C(0x2) << 4)
+ /* RoCEv2 IPv6 Message */
+ #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_V2IPV6 (UINT32_C(0x3) << 4)
+ #define CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_V3_FLAGS_ROCE_IP_VER_V2IPV6
+ /* This value is from the WQE that is being completed. */
+ uint32_t opaque;
+} cq_res_ud_v3_t, *pcq_res_ud_v3_t;
+
+/*
+ * This is the Responder RQ/SRQ CQE V3 structure for RawEth. This is
+ * used to complete RQ/SRQ WQE's. When the WQE is completed, it
+ * indicates that there is room for one more WQE on the corresponding
+ * RQ/SRQ.
+ *
+ * User can determine available space in the RQ/SRQ by comparing
+ * a rq_cons_idx to a rq_prod_idx, both maintained by the user. The
+ * range for rq_prod/cons_idx is from 0 to QPC.rq_size-1. The
+ * rq_prod_idx value increments by one for each WQE that is added to
+ * the RQ/SRQ by the user. Value must be wrapped at rq_size. The
+ * rq_cons_idx value increments by one for each WQE that is completed
+ * from that particular RQ/SRQ. The qp_handle can be used by the user
+ * to determine what RQ/SRQ to increment. Value must also be wrapped at
+ * rq_size. When the two values are equal, the RQ/SRQ is empty. When
+ * (rq_prod_idx+1)%QPC.rq_size==rq_cons_idx, the queue is full.
+ */
+/* cq_res_raweth_qp1_v3 (size:256b/32B) */
+
+typedef struct cq_res_raweth_qp1_v3 {
+ uint16_t length;
+ /*
+ * The length of the message's payload in bytes, stored in
+ * the SGEs
+ */
+ #define CQ_RES_RAWETH_QP1_V3_LENGTH_MASK UINT32_C(0x3fff)
+ #define CQ_RES_RAWETH_QP1_V3_LENGTH_SFT 0
+ uint16_t raweth_qp1_flags_cfa_metadata1;
+ /*
+ * When this bit is '1', it indicates a packet that has an
+ * error of some type. Type of error is indicated in
+ * raweth_qp1_errors.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_ERROR UINT32_C(0x1)
+ /*
+ * This value indicates what the inner packet determined for the
+ * packet was.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_MASK UINT32_C(0x3c0)
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_SFT 6
+ /*
+ * Not Known:
+ * Indicates that the packet type was not known.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_NOT_KNOWN (UINT32_C(0x0) << 6)
+ /*
+ * IP Packet:
+ * Indicates that the packet was an IP packet, but further
+ * classification was not possible.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_IP (UINT32_C(0x1) << 6)
+ /*
+ * TCP Packet:
+ * Indicates that the packet was IP and TCP.
+ * This indicates that the raweth_qp1_payload_offset field is
+ * valid.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_TCP (UINT32_C(0x2) << 6)
+ /*
+ * UDP Packet:
+ * Indicates that the packet was IP and UDP.
+ * This indicates that the raweth_qp1_payload_offset field is
+ * valid.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_UDP (UINT32_C(0x3) << 6)
+ /*
+ * FCoE Packet:
+ * Indicates that the packet was recognized as a FCoE.
+ * This also indicates that the raweth_qp1_payload_offset field is
+ * valid.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_FCOE (UINT32_C(0x4) << 6)
+ /*
+ * RoCE Packet:
+ * Indicates that the packet was recognized as a RoCE.
+ * This also indicates that the raweth_qp1_payload_offset field is
+ * valid.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_ROCE (UINT32_C(0x5) << 6)
+ /*
+ * ICMP Packet:
+ * Indicates that the packet was recognized as ICMP.
+ * This indicates that the raweth_qp1_payload_offset field is
+ * valid.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_ICMP (UINT32_C(0x7) << 6)
+ /*
+ * PtP packet wo/timestamp:
+ * Indicates that the packet was recognized as a PtP
+ * packet.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_PTP_WO_TIMESTAMP (UINT32_C(0x8) << 6)
+ /*
+ * PtP packet w/timestamp:
+ * Indicates that the packet was recognized as a PtP
+ * packet and that a timestamp was taken for the packet.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_PTP_W_TIMESTAMP (UINT32_C(0x9) << 6)
+ #define CQ_RES_RAWETH_QP1_V3_ITYPE_LAST CQ_RES_RAWETH_QP1_V3_ITYPE_PTP_W_TIMESTAMP
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_MASK UINT32_C(0xf000)
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_SFT 12
+ /* When meta_format != 0, this value is the VLAN TPID_SEL. */
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_TPID_SEL_MASK UINT32_C(0x7000)
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_TPID_SEL_SFT 12
+ /* When meta_format != 0, this value is the VLAN valid. */
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA1_VALID UINT32_C(0x8000)
+ uint16_t raweth_qp1_errors;
+ /*
+ * This indicates that there was an error in the IP header
+ * checksum.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_IP_CS_ERROR UINT32_C(0x10)
+ /*
+ * This indicates that there was an error in the TCP, UDP
+ * or ICMP checksum.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_L4_CS_ERROR UINT32_C(0x20)
+ /*
+ * This indicates that there was an error in the tunnel
+ * IP header checksum.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_IP_CS_ERROR UINT32_C(0x40)
+ /*
+ * This indicates that there was an error in the tunnel
+ * UDP checksum.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_L4_CS_ERROR UINT32_C(0x80)
+ /*
+ * This indicates that there was a CRC error on either an FCoE
+ * or RoCE packet. The itype indicates the packet type.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_CRC_ERROR UINT32_C(0x100)
+ /*
+ * This indicates that there was an error in the tunnel
+ * portion of the packet when this
+ * field is non-zero.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_MASK UINT32_C(0xe00)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_SFT 9
+ /*
+ * No additional error occurred on the tunnel portion
+ * of the packet of the packet does not have a tunnel.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 9)
+ /*
+ * Indicates that IP header version does not match
+ * expectation from L2 Ethertype for IPv4 and IPv6
+ * in the tunnel header.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (UINT32_C(0x1) << 9)
+ /*
+ * Indicates that header length is out of range in the
+ * tunnel header. Valid for
+ * IPv4.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (UINT32_C(0x2) << 9)
+ /*
+ * Indicates that physical packet is shorter than that claimed
+ * by the tunnel l3 header length. Valid for IPv4, or IPv6
+ * tunnel packet packets.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (UINT32_C(0x3) << 9)
+ /*
+ * Indicates that the physical packet is shorter than that
+ * claimed by the tunnel UDP header length for a tunnel
+ * UDP packet that is not fragmented.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (UINT32_C(0x4) << 9)
+ /*
+ * indicates that the IPv4 TTL or IPv6 hop limit check
+ * have failed (e.g. TTL = 0) in the tunnel header. Valid
+ * for IPv4, and IPv6.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (UINT32_C(0x5) << 9)
+ /*
+ * Indicates that the physical packet is shorter than that
+ * claimed by the tunnel header length. Valid for GTPv1-U
+ * packets.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_TOTAL_ERROR (UINT32_C(0x6) << 9)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_LAST CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_T_PKT_ERROR_T_TOTAL_ERROR
+ /*
+ * This indicates that there was an error in the inner
+ * portion of the packet when this
+ * field is non-zero.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_MASK UINT32_C(0xf000)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_SFT 12
+ /*
+ * No additional error occurred on the tunnel portion
+ * of the packet of the packet does not have a tunnel.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 12)
+ /*
+ * Indicates that IP header version does not match
+ * expectation from L2 Ethertype for IPv4 and IPv6 or that
+ * option other than VFT was parsed on
+ * FCoE packet.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_VERSION (UINT32_C(0x1) << 12)
+ /*
+ * indicates that header length is out of range. Valid for
+ * IPv4 and RoCE
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (UINT32_C(0x2) << 12)
+ /*
+ * indicates that the IPv4 TTL or IPv6 hop limit check
+ * have failed (e.g. TTL = 0). Valid for IPv4, and IPv6
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L3_BAD_TTL (UINT32_C(0x3) << 12)
+ /*
+ * Indicates that physical packet is shorter than that
+ * claimed by the l3 header length. Valid for IPv4,
+ * IPv6 packet or RoCE packets.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (UINT32_C(0x4) << 12)
+ /*
+ * Indicates that the physical packet is shorter than that
+ * claimed by the UDP header length for a UDP packet that is
+ * not fragmented.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (UINT32_C(0x5) << 12)
+ /*
+ * Indicates that TCP header length > IP payload. Valid for
+ * TCP packets only.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (UINT32_C(0x6) << 12)
+ /* Indicates that TCP header length < 5. Valid for TCP. */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (UINT32_C(0x7) << 12)
+ /*
+ * Indicates that TCP option headers result in a TCP header
+ * size that does not match data offset in TCP header. Valid
+ * for TCP.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (UINT32_C(0x8) << 12)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_LAST CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
+ /* This is data from the CFA as indicated by the meta_format field. */
+ uint16_t cfa_metadata0;
+ /* When meta_format=1, this value is the VLAN VID. */
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_VID_MASK UINT32_C(0xfff)
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_VID_SFT 0
+ /* When meta_format=1, this value is the VLAN DE. */
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_DE UINT32_C(0x1000)
+ /* When meta_format=1, this value is the VLAN PRI. */
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_PRI_MASK UINT32_C(0xe000)
+ #define CQ_RES_RAWETH_QP1_V3_CFA_METADATA0_PRI_SFT 13
+ /*
+ * This is an application level ID used to identify the
+ * QP and its SQ and RQ.
+ */
+ uint64_t qp_handle;
+ uint32_t raweth_qp1_flags2;
+ /*
+ * This indicates that the ip checksum was calculated for the
+ * inner packet and that the ip_cs_error field indicates if there
+ * was an error.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_IP_CS_CALC UINT32_C(0x1)
+ /*
+ * This indicates that the TCP, UDP or ICMP checksum was
+ * calculated for the inner packet and that the l4_cs_error field
+ * indicates if there was an error.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_L4_CS_CALC UINT32_C(0x2)
+ /*
+ * This indicates that the ip checksum was calculated for the
+ * tunnel header and that the t_ip_cs_error field indicates if
+ * there was an error.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_CS_CALC UINT32_C(0x4)
+ /*
+ * This indicates that the UDP checksum was
+ * calculated for the tunnel packet and that the t_l4_cs_error
+ * field indicates if there was an error.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_L4_CS_CALC UINT32_C(0x8)
+ /* The field indicates what format the metadata field is. */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_SFT 4
+ /* No metadata information. Values are zero. */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4)
+ /*
+ * The {metadata1, metadata0} fields contain the vtag
+ * information:
+ *
+ * - vtag[19:0] = {valid, tpid_sel[2:0], pri[2:0], de, vid[11:0]}
+ *
+ * The metadata2 field contains the table scope
+ * and action record pointer.
+ *
+ * - metadata2[25:0] contains the action record pointer.
+ * - metadata2[31:26] contains the table scope.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_ACT_REC_PTR (UINT32_C(0x1) << 4)
+ /*
+ * The {metadata1, metadata0} fields contain the vtag
+ * information:
+ *
+ * - vtag[19:0] = {valid, tpid_sel[2:0], pri[2:0], de, vid[11:0]}
+ *
+ * The metadata2 field contains the Tunnel ID value, justified
+ * to LSB.
+ *
+ * - VXLAN = VNI[23:0] -> VXLAN Network ID
+ * - Geneve (NGE) = VNI[23:0] a-> Virtual Network Identifier
+ * - NVGRE = TNI[23:0] -> Tenant Network ID
+ * - GRE = KEY[31:0] -> key field with bit mask. zero if K=0
+ * - IPv4 = 0 (not populated)
+ * - IPv6 = Flow Label[19:0]
+ * - PPPoE = sessionID[15:0]
+ * - MPLs = Outer label[19:0]
+ * - UPAR = Selected[31:0] with bit mask
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_TUNNEL_ID (UINT32_C(0x2) << 4)
+ /*
+ * The {metadata1, metadata0} fields contain the vtag
+ * information:
+ *
+ * - vtag[19:0] = {valid, tpid_sel[2:0], pri[2:0],de, vid[11:0]}
+ *
+ * The metadata2 field contains the 32b metadata from the
+ * prepended header (chdr_data).
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_CHDR_DATA (UINT32_C(0x3) << 4)
+ /*
+ * The {metadata1, metadata0} fields contain the vtag
+ * information:
+ *
+ * - vtag[19:0] = {valid, tpid_sel[2:0], pri[2:0], de, vid[11:0]}
+ *
+ * The metadata2 field contains the outer_l3_offset,
+ * inner_l2_offset, inner_l3_offset, and inner_l4_size.
+ *
+ * - metadata2[8:0] contains the outer_l3_offset.
+ * - metadata2[17:9] contains the inner_l2_offset.
+ * - metadata2[26:18] contains the inner_l3_offset.
+ * - metadata2[31:27] contains the inner_l4_size.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET (UINT32_C(0x4) << 4)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_LAST CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_META_FORMAT_HDR_OFFSET
+ /*
+ * This field indicates the IP type for the inner-most IP header.
+ * A value of '0' indicates IPv4. A value of '1' indicates IPv6.
+ * This value is only valid if itype indicates a packet
+ * with an IP header.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_IP_TYPE UINT32_C(0x100)
+ /*
+ * This indicates that the complete 1's complement checksum was
+ * calculated for the packet.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_CALC UINT32_C(0x200)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE UINT32_C(0x400)
+ /* Indicates that the Tunnel IP type was IPv4. */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE_IPV4 (UINT32_C(0x0) << 10)
+ /* Indicates that the Tunnel IP type was IPv6. */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE_IPV6 (UINT32_C(0x1) << 10)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE_LAST CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_T_IP_TYPE_IPV6
+ /*
+ * This value is the complete 1's complement checksum calculated
+ * from the start of the outer L3 header to the end of the packet
+ * (not including the ethernet crc). It is valid when the
+ * 'complete_checksum_calc' flag is set.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_MASK UINT32_C(0xffff0000)
+ #define CQ_RES_RAWETH_QP1_V3_RAWETH_QP1_FLAGS2_COMPLETE_CHECKSUM_SFT 16
+ /*
+ * This is data from the CFA block as indicated by the meta_format
+ * field.
+ *
+ * - meta_format 0 - none - metadata2 = 0 - not valid/not stripped
+ * - meta_format 1 - act_rec_ptr - metadata2 = {table_scope[5:0],
+ * act_rec_ptr[25:0]}
+ * - meta_format 2 - tunnel_id - metadata2 = tunnel_id[31:0]
+ * - meta_format 3 - chdr_data - metadata2 = updated_chdr_data[31:0]
+ * - meta_format 4 - hdr_offsets - metadata2 = hdr_offsets[31:0]
+ */
+ uint32_t cfa_metadata2;
+ uint8_t cqe_type_toggle;
+ /*
+ * Indicate valid completion - written by the chip. The NIC
+ * toggles this bit each time it finished consuming all PBL
+ * entries.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_TOGGLE UINT32_C(0x1)
+ /* This field defines the type of CQE. */
+ #define CQ_RES_RAWETH_QP1_V3_CQE_TYPE_MASK UINT32_C(0x1e)
+ #define CQ_RES_RAWETH_QP1_V3_CQE_TYPE_SFT 1
+ /*
+ * Responder RawEth and QP1 Completion - This is used for RQ and
+ * SRQ completion for RawEth service. It is also used for QP1 QPs
+ * that are treated as RawEth.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_CQE_TYPE_RES_RAWETH_QP1_V3 (UINT32_C(0xb) << 1)
+ #define CQ_RES_RAWETH_QP1_V3_CQE_TYPE_LAST CQ_RES_RAWETH_QP1_V3_CQE_TYPE_RES_RAWETH_QP1_V3
+ /* This field indicates the status for the CQE. */
+ uint8_t status;
+ /* The operation completed successfully. */
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_OK UINT32_C(0x0)
+ /*
+ * This indicates that the packet was too long for the WQE provided
+ * on the SRQ/RQ.
+ *
+ * This is not a fatal error. All the fields in the CQE are valid.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_HW_LOCAL_LENGTH_ERR UINT32_C(0x3)
+ /*
+ * An internal QP consistency error was detected while processing
+ * this Work Request. For requester, this could be an SQ WQE format
+ * error or an operation specified in the WQE that is not supported
+ * for the QP. For responder, this is an RQ/SRQ WQE format error.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_LOCAL_QP_OPERATION_ERR UINT32_C(0x4)
+ /*
+ * An SGE in the locally posted WQE does not reference a Memory
+ * Region that is valid for the requested operation. If this error
+ * is generated for an SGE using the reserved l_key, this means
+ * that the reserved l_key is not enabled.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_LOCAL_PROTECTION_ERR UINT32_C(0x5)
+ /*
+ * A WQE was in process or outstanding when the QP transitioned
+ * into the Error State.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_WORK_REQUEST_FLUSHED_ERR UINT32_C(0xd)
+ /*
+ * A WQE had already been taken off the RQ/SRQ when a fatal error
+ * was detected on responder Rx. Only the opaque field in the CQE
+ * is valid.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_HW_FLUSH_ERR UINT32_C(0xe)
+ /*
+ * A WQE was posted to the SQ/RQ that caused it to overflow. For
+ * requester CQEs, it was the SQ that overflowed. For responder
+ * CQEs, it was the RQ that overflowed.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_OVERFLOW_ERR UINT32_C(0xf)
+ #define CQ_RES_RAWETH_QP1_V3_STATUS_LAST CQ_RES_RAWETH_QP1_V3_STATUS_OVERFLOW_ERR
+ uint8_t flags;
+ /*
+ * This flag indicates that the completion is for a SRQ entry
+ * rather than for an RQ entry.
+ */
+ #define CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ UINT32_C(0x1)
+ /* CQE relates to RQ WQE. */
+ #define CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ_RQ UINT32_C(0x0)
+ /* CQE relates to SRQ WQE. */
+ #define CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ_SRQ UINT32_C(0x1)
+ #define CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ_LAST CQ_RES_RAWETH_QP1_V3_FLAGS_SRQ_SRQ
+ /*
+ * This value indicates the offset in bytes from the beginning of the
+ * packet where the inner payload starts. This value is valid for
+ * TCP, UDP, FCoE, and RoCE packets.
+ *
+ * A value of zero indicates an offset of 256 bytes.
+ */
+ uint8_t raweth_qp1_payload_offset;
+ /* This value is from the WQE that is being completed. */
+ uint32_t opaque;
+} cq_res_raweth_qp1_v3_t, *pcq_res_raweth_qp1_v3_t;
+
+/*
+ * This is the Responder RQ/SRQ CQE V3 structure for UD QPs and QP1 QPs
+ * treated as UD. This is used to complete RQ/SRQ WQE's. It differs
+ * from the Res_UD CQE in that it carries additional CFA fields, in
+ * place of the QP handle. (Instead of the QP handle, this CQE carries
+ * the QID. It is up to the user to map the QID back to a QP handle.)
+ * When the WQE is completed, it indicates that there is room for one
+ * more WQE on the corresponding RQ/SRQ.
+ *
+ * User can determine available space in the RQ/SRQ by comparing
+ * a rq_cons_idx to a rq_prod_idx, both maintained by the user. The
+ * range for rq_prod/cons_idx is from 0 to QPC.rq_size-1. The
+ * rq_prod_idx value increments by one for each WQE that is added to
+ * the RQ/SRQ by the user. Value must be wrapped at rq_size. The
+ * rq_cons_idx value increments by one for each WQE that is completed
+ * from that particular RQ/SRQ. The qp_handle can be used by the user
+ * to determine what RQ/SRQ to increment. Value must also be wrapped at
+ * rq_size. When the two values are equal, the RQ/SRQ is empty. When
+ * (rq_prod_idx+1)%QPC.rq_size==rq_cons_idx, the queue is full.
+ */
+/* cq_res_ud_cfa_v3 (size:256b/32B) */
+
+typedef struct cq_res_ud_cfa_v3 {
+ uint16_t length;
+ /*
+ * The length of the message's payload in bytes, stored in
+ * the SGEs
+ */
+ #define CQ_RES_UD_CFA_V3_LENGTH_MASK UINT32_C(0x3fff)
+ #define CQ_RES_UD_CFA_V3_LENGTH_SFT 0
+ /* This is data from the CFA as indicated by the meta_format field. */
+ uint16_t cfa_metadata0;
+ /* When meta_format=1, this value is the VLAN VID. */
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA0_VID_MASK UINT32_C(0xfff)
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA0_VID_SFT 0
+ /* When meta_format=1, this value is the VLAN DE. */
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA0_DE UINT32_C(0x1000)
+ /* When meta_format=1, this value is the VLAN PRI. */
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA0_PRI_MASK UINT32_C(0xe000)
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA0_PRI_SFT 13
+ /* Immediate data in case the imm_flag set. */
+ uint32_t imm_data;
+ uint32_t qid_cfa_metadata1_src_qp_high;
+ /*
+ * This value indicates the QPID associated with this operation.
+ *
+ * The driver will use the qid from this CQE to map a QP handle
+ * in the completion record returned to the application.
+ */
+ #define CQ_RES_UD_CFA_V3_QID_MASK UINT32_C(0x7ff)
+ #define CQ_RES_UD_CFA_V3_QID_SFT 0
+ #define CQ_RES_UD_CFA_V3_UNUSED_MASK UINT32_C(0xff800)
+ #define CQ_RES_UD_CFA_V3_UNUSED_SFT 11
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA1_MASK UINT32_C(0xf00000)
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA1_SFT 20
+ /* When meta_format != 0, this value is the VLAN TPID_SEL. */
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA1_TPID_SEL_MASK UINT32_C(0x700000)
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA1_TPID_SEL_SFT 20
+ /* When meta_format != 0, this value is the VLAN valid. */
+ #define CQ_RES_UD_CFA_V3_CFA_METADATA1_VALID UINT32_C(0x800000)
+ /* Upper 8b of the Source QP value from the DETH header. */
+ #define CQ_RES_UD_CFA_V3_SRC_QP_HIGH_MASK UINT32_C(0xff000000)
+ #define CQ_RES_UD_CFA_V3_SRC_QP_HIGH_SFT 24
+ /*
+ * This is data from the CFA block as indicated by the meta_format
+ * field.
+ *
+ * - meta_format 0 - none - metadata2 = 0 - not valid/not stripped
+ * - meta_format 1 - act_rec_ptr - metadata2 = {table_scope[5:0],
+ * act_rec_ptr[25:0]}
+ * - meta_format 2 - tunnel_id - metadata2 = tunnel_id[31:0]
+ * - meta_format 3 - chdr_data - metadata2 = updated_chdr_data[31:0]
+ * - meta_format 4 - hdr_offsets - metadata2 = hdr_offsets[31:0]
+ */
+ uint32_t cfa_metadata2;
+ /*
+ * Source MAC address for the UD message placed in the WQE
+ * that is completed by this CQE.
+ */
+ uint16_t src_mac[3];
+ /* Lower 16b of the Source QP value from the DETH header. */
+ uint16_t src_qp_low;
+ uint8_t cqe_type_toggle;
+ /*
+ * Indicate valid completion - written by the chip. The NIC
+ * toggles this bit each time it finished consuming all PBL
+ * entries
+ */
+ #define CQ_RES_UD_CFA_V3_TOGGLE UINT32_C(0x1)
+ /* This field defines the type of CQE. */
+ #define CQ_RES_UD_CFA_V3_CQE_TYPE_MASK UINT32_C(0x1e)
+ #define CQ_RES_UD_CFA_V3_CQE_TYPE_SFT 1
+ /*
+ * Responder UD Completion with CFA - This is used for both RQ
+ * and SRQ completion for UD service QPs. It includes cfa fields
+ * (some of which carry VLAN information), in place of the QP
+ * handle. It is also used for QP1 QPs that are treated as UD.
+ */
+ #define CQ_RES_UD_CFA_V3_CQE_TYPE_RES_UD_CFA_V3 (UINT32_C(0xc) << 1)
+ #define CQ_RES_UD_CFA_V3_CQE_TYPE_LAST CQ_RES_UD_CFA_V3_CQE_TYPE_RES_UD_CFA_V3
+ /* This field indicates the status for the CQE. */
+ uint8_t status;
+ /* The operation completed successfully. */
+ #define CQ_RES_UD_CFA_V3_STATUS_OK UINT32_C(0x0)
+ /*
+ * This indicates that the packet was too long for the WQE provided
+ * on the SRQ/RQ.
+ *
+ * This is not a fatal error. All the fields in the CQE are valid.
+ */
+ #define CQ_RES_UD_CFA_V3_STATUS_HW_LOCAL_LENGTH_ERR UINT32_C(0x3)
+ /*
+ * An internal QP consistency error was detected while processing
+ * this Work Request. For requester, this could be an SQ WQE format
+ * error or an operation specified in the WQE that is not supported
+ * for the QP. For responder, this is an RQ/SRQ WQE format error.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_RES_UD_CFA_V3_STATUS_LOCAL_QP_OPERATION_ERR UINT32_C(0x4)
+ /*
+ * An SGE in the locally posted WQE does not reference a Memory
+ * Region that is valid for the requested operation. If this error
+ * is generated for an SGE using the reserved l_key, this means
+ * that the reserved l_key is not enabled.
+ *
+ * This is a fatal error detected by the requester Tx or responder
+ * Rx. For responder CQEs, only the opaque field is valid.
+ */
+ #define CQ_RES_UD_CFA_V3_STATUS_LOCAL_PROTECTION_ERR UINT32_C(0x5)
+ /*
+ * A WQE was in process or outstanding when the QP transitioned
+ * into the Error State.
+ */
+ #define CQ_RES_UD_CFA_V3_STATUS_WORK_REQUEST_FLUSHED_ERR UINT32_C(0xd)
+ /*
+ * A WQE had already been taken off the RQ/SRQ when a fatal error
+ * was detected on responder Rx. Only the opaque field in the CQE
+ * is valid.
+ */
+ #define CQ_RES_UD_CFA_V3_STATUS_HW_FLUSH_ERR UINT32_C(0xe)
+ /*
+ * A WQE was posted to the SQ/RQ that caused it to overflow. For
+ * requester CQEs, it was the SQ that overflowed. For responder
+ * CQEs, it was the RQ that overflowed.
+ */
+ #define CQ_RES_UD_CFA_V3_STATUS_OVERFLOW_ERR UINT32_C(0xf)
+ #define CQ_RES_UD_CFA_V3_STATUS_LAST CQ_RES_UD_CFA_V3_STATUS_OVERFLOW_ERR
+ uint16_t flags;
+ /*
+ * This flag indicates that the completion is for a SRQ entry
+ * rather than for an RQ entry.
+ */
+ #define CQ_RES_UD_CFA_V3_FLAGS_SRQ UINT32_C(0x1)
+ /* CQE relates to RQ WQE. */
+ #define CQ_RES_UD_CFA_V3_FLAGS_SRQ_RQ UINT32_C(0x0)
+ /* CQE relates to SRQ WQE. */
+ #define CQ_RES_UD_CFA_V3_FLAGS_SRQ_SRQ UINT32_C(0x1)
+ #define CQ_RES_UD_CFA_V3_FLAGS_SRQ_LAST CQ_RES_UD_CFA_V3_FLAGS_SRQ_SRQ
+ /* Immediate data indicator */
+ #define CQ_RES_UD_CFA_V3_FLAGS_IMM UINT32_C(0x2)
+ #define CQ_RES_UD_CFA_V3_FLAGS_UNUSED_MASK UINT32_C(0xc)
+ #define CQ_RES_UD_CFA_V3_FLAGS_UNUSED_SFT 2
+ #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_MASK UINT32_C(0x30)
+ #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_SFT 4
+ /* RoCEv1 Message */
+ #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_V1 (UINT32_C(0x0) << 4)
+ /* RoCEv2 IPv4 Message */
+ #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_V2IPV4 (UINT32_C(0x2) << 4)
+ /* RoCEv2 IPv6 Message */
+ #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_V2IPV6 (UINT32_C(0x3) << 4)
+ #define CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_LAST CQ_RES_UD_CFA_V3_FLAGS_ROCE_IP_VER_V2IPV6
+ /* The field indicates what format the metadata field is. */
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_MASK UINT32_C(0x3c0)
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_SFT 6
+ /* No metadata information. Value is zero. */
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_NONE (UINT32_C(0x0) << 6)
+ /*
+ * The {metadata1, metadata0} fields contain the vtag
+ * information:
+ *
+ * - vtag[19:0] = {valid, tpid_sel[2:0], pri[2:0], de, vid[11:0]}
+ *
+ * The metadata2 field contains the table scope
+ * and action record pointer.
+ *
+ * - metadata2[25:0] contains the action record pointer.
+ * - metadata2[31:26] contains the table scope.
+ */
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_ACT_REC_PTR (UINT32_C(0x1) << 6)
+ /*
+ * The {metadata1, metadata0} fields contain the vtag
+ * information:
+ *
+ * - vtag[19:0] = {valid, tpid_sel[2:0], pri[2:0], de, vid[11:0]}
+ *
+ * The metadata2 field contains the Tunnel ID
+ * value, justified to LSB.
+ *
+ * - VXLAN = VNI[23:0] -> VXLAN Network ID
+ * - Geneve (NGE) = VNI[23:0] a-> Virtual Network Identifier
+ * - NVGRE = TNI[23:0] -> Tenant Network ID
+ * - GRE = KEY[31:0] -> key field with bit mask. zero if K=0
+ * - IPv4 = 0 (not populated)
+ * - IPv6 = Flow Label[19:0]
+ * - PPPoE = sessionID[15:0]
+ * - MPLs = Outer label[19:0]
+ * - UPAR = Selected[31:0] with bit mask
+ */
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_TUNNEL_ID (UINT32_C(0x2) << 6)
+ /*
+ * The {metadata1, metadata0} fields contain the vtag
+ * information:
+ *
+ * - vtag[19:0] = {valid, tpid_sel[2:0], pri[2:0],de, vid[11:0]}
+ *
+ * The metadata2 field contains the 32b metadata from the
+ * prepended header (chdr_data).
+ */
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_CHDR_DATA (UINT32_C(0x3) << 6)
+ /*
+ * The {metadata1, metadata0} fields contain the vtag
+ * information:
+ *
+ * - vtag[19:0] = {valid, tpid_sel[2:0], pri[2:0], de, vid[11:0]}
+ *
+ * The metadata2 field contains the outer_l3_offset,
+ * inner_l2_offset, inner_l3_offset, and inner_l4_size.
+ *
+ * - metadata2[8:0] contains the outer_l3_offset.
+ * - metadata2[17:9] contains the inner_l2_offset.
+ * - metadata2[26:18] contains the inner_l3_offset.
+ * - metadata2[31:27] contains the inner_l4_size.
+ */
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_HDR_OFFSET (UINT32_C(0x4) << 6)
+ #define CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_LAST CQ_RES_UD_CFA_V3_FLAGS_META_FORMAT_HDR_OFFSET
+ /*
+ * This value will be returned in the completion if the completion is
+ * signaled.
+ */
+ uint32_t opaque;
+} cq_res_ud_cfa_v3_t, *pcq_res_ud_cfa_v3_t;
+
/* nq_base (size:128b/16B) */
typedef struct nq_base {
@@ -74648,8 +87973,8 @@ typedef struct nq_base {
/*
* This field indicates the exact type of the completion.
* By convention, the LSB identifies the length of the
- * record in 16B units. Even values indicate 16B
- * records. Odd values indicate 32B
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
* records.
*/
#define NQ_BASE_TYPE_MASK UINT32_C(0x3f)
@@ -74664,7 +87989,9 @@ typedef struct nq_base {
#define NQ_BASE_TYPE_QP_EVENT UINT32_C(0x38)
/* Function Async Notification */
#define NQ_BASE_TYPE_FUNC_EVENT UINT32_C(0x3a)
- #define NQ_BASE_TYPE_LAST NQ_BASE_TYPE_FUNC_EVENT
+ /* NQ Reassign Notification */
+ #define NQ_BASE_TYPE_NQ_REASSIGN UINT32_C(0x3c)
+ #define NQ_BASE_TYPE_LAST NQ_BASE_TYPE_NQ_REASSIGN
/* info10 is 10 b */
#define NQ_BASE_INFO10_MASK UINT32_C(0xffc0)
#define NQ_BASE_INFO10_SFT 6
@@ -74676,8 +88003,8 @@ typedef struct nq_base {
uint64_t info63_v;
/*
* This value is written by the NIC such that it will be different
- * for each pass through the completion queue. The even passes
- * will write 1. The odd passes will write 0.
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
#define NQ_BASE_V UINT32_C(0x1)
/* info63 is 63 b */
@@ -74693,8 +88020,8 @@ typedef struct nq_cn {
/*
* This field indicates the exact type of the completion.
* By convention, the LSB identifies the length of the
- * record in 16B units. Even values indicate 16B
- * records. Odd values indicate 32B
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
* records.
*/
#define NQ_CN_TYPE_MASK UINT32_C(0x3f)
@@ -74706,25 +88033,31 @@ typedef struct nq_cn {
* This field carries the toggle value that must be used to
* re-arm this CQ. The toggle value should be copied into the
* doorbell used to CQ_ARMENA, CQ_ARMALL or CQ_ARMSE doorbells.
+ *
+ * This value is used by HW to detect old and stale CQ_ARMENA,
+ * CQ_ARMALL, or CQ_ARMSE doorbells that are caused by having
+ * a backup doorbell location or by PCI or other reordering
+ * problems. Only the doorbells that match the latest value of
+ * toggle will be honored.
*/
#define NQ_CN_TOGGLE_MASK UINT32_C(0xc0)
#define NQ_CN_TOGGLE_SFT 6
uint16_t reserved16;
/*
* This is an application level ID used to identify the
- * CQ. This field carries the lower 32b of the value.
+ * CQ. This field carries the lower 32b of the value.
*/
uint32_t cq_handle_low;
uint32_t v;
/*
* This value is written by the NIC such that it will be different
- * for each pass through the completion queue. The even passes
- * will write 1. The odd passes will write 0.
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
#define NQ_CN_V UINT32_C(0x1)
/*
* This is an application level ID used to identify the
- * CQ. This field carries the upper 32b of the value.
+ * CQ. This field carries the upper 32b of the value.
*/
uint32_t cq_handle_high;
} nq_cn_t, *pnq_cn_t;
@@ -74737,9 +88070,8 @@ typedef struct nq_srq_event {
/*
* This field indicates the exact type of the completion.
* By convention, the LSB identifies the length of the
- * record in 16B units. Even values indicate 16B
- * records. Odd values indicate 32B
- * records.
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B records.
*/
#define NQ_SRQ_EVENT_TYPE_MASK UINT32_C(0x3f)
#define NQ_SRQ_EVENT_TYPE_SFT 0
@@ -74764,20 +88096,20 @@ typedef struct nq_srq_event {
uint16_t reserved16;
/*
* This is the SRQ handle value for the queue that has
- * reached it's event threshold. This field carries the
+ * reached it's event threshold. This field carries the
* lower 32b of the value.
*/
uint32_t srq_handle_low;
uint32_t v;
/*
* This value is written by the NIC such that it will be different
- * for each pass through the completion queue. The even passes
- * will write 1. The odd passes will write 0.
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
#define NQ_SRQ_EVENT_V UINT32_C(0x1)
/*
* This is the SRQ handle value for the queue that has
- * reached it's event threshold. This field carries the
+ * reached it's event threshold. This field carries the
* upper 32b of the value.
*/
uint32_t srq_handle_high;
@@ -74791,8 +88123,8 @@ typedef struct nq_dbq_event {
/*
* This field indicates the exact type of the completion.
* By convention, the LSB identifies the length of the
- * record in 16B units. Even values indicate 16B
- * records. Odd values indicate 32B
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B
* records.
*/
#define NQ_DBQ_EVENT_TYPE_MASK UINT32_C(0x3f)
@@ -74826,14 +88158,14 @@ typedef struct nq_dbq_event {
uint32_t v;
/*
* This value is written by the NIC such that it will be different
- * for each pass through the completion queue. The even passes
- * will write 1. The odd passes will write 0.
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
*/
#define NQ_DBQ_EVENT_V UINT32_C(0x1)
uint32_t db_type_db_xid;
/*
* DB 'XID' field from doorbell that crossed the async event
- * threshold. This is a QPID, SID, or CID, depending on
+ * threshold. This is a QPID, SID, or CID, depending on
* the db_type field.
*/
#define NQ_DBQ_EVENT_DB_XID_MASK UINT32_C(0xfffff)
@@ -74846,6 +88178,46 @@ typedef struct nq_dbq_event {
#define NQ_DBQ_EVENT_DB_TYPE_SFT 28
} nq_dbq_event_t, *pnq_dbq_event_t;
+/*
+ * This completion indicates that the NQ Reassign doorbell has been
+ * executed by the CQ processing block and no further NQE will arrive
+ * for this CQ on this NQ.
+ */
+/* nq_reassign (size:128b/16B) */
+
+typedef struct nq_reassign {
+ uint16_t type;
+ /*
+ * This field indicates the exact type of the completion.
+ * By convention, the LSB identifies the length of the
+ * record in 16B units. Even values indicate 16B
+ * records. Odd values indicate 32B records.
+ */
+ #define NQ_REASSIGN_TYPE_MASK UINT32_C(0x3f)
+ #define NQ_REASSIGN_TYPE_SFT 0
+ /* NQ Reassign Notification */
+ #define NQ_REASSIGN_TYPE_NQ_REASSIGN UINT32_C(0x3c)
+ #define NQ_REASSIGN_TYPE_LAST NQ_REASSIGN_TYPE_NQ_REASSIGN
+ uint16_t reserved16;
+ /*
+ * This is an application level ID used to identify the
+ * CQ. This field carries the lower 32b of the value.
+ */
+ uint32_t cq_handle_low;
+ uint32_t v;
+ /*
+ * This value is written by the NIC such that it will be different
+ * for each pass through the completion queue. The even passes
+ * will write 1. The odd passes will write 0.
+ */
+ #define NQ_REASSIGN_V UINT32_C(0x1)
+ /*
+ * This is an application level ID used to identify the
+ * CQ. This field carries the upper 32b of the value.
+ */
+ uint32_t cq_handle_high;
+} nq_reassign_t, *pnq_reassign_t;
+
/* Input Read Request Queue (IRRQ) Message */
/* xrrq_irrq (size:256b/32B) */
@@ -74874,9 +88246,9 @@ typedef struct xrrq_irrq {
uint32_t msn;
/*
* The value of QPC.pending_ack_msn after it is incremented as a
- * result of receiving the read/atomic request. IRRQ.msn-1 will be
- * placed in the MSN field of the first response and IRRQ.msn will
- * placed in the MSN field of the last or only response.
+ * result of receiving the read/atomic request. IRRQ.msn-1 will
+ * be placed in the MSN field of the first response and IRRQ.msn
+ * will placed in the MSN field of the last or only response.
*/
#define XRRQ_IRRQ_MSN_MASK UINT32_C(0xffffff)
#define XRRQ_IRRQ_MSN_SFT 0
@@ -74892,7 +88264,10 @@ typedef struct xrrq_irrq {
uint64_t va_or_atomic_result;
/* The key to the MR/W in the request */
uint32_t rdma_r_key;
- /* Length in bytes of the data requested. Length must be 8 if type is atomic. */
+ /*
+ * Length in bytes of the data requested. Length must be 8 if type is
+ * atomic.
+ */
uint32_t length;
} xrrq_irrq_t, *pxrrq_irrq_t;
@@ -74921,18 +88296,21 @@ typedef struct xrrq_orrq {
* If num_sges is 2 or more for an RDMA Read request, then
* the first_sge_phy_or_sing_sge_va field carries the
* physical address in host memory where the first sge is
- * stored. The single_sge_l_key and single_sge_size fields
+ * stored. The single_sge_l_key and single_sge_size fields
* are unused in this case.
*
* A special case is a zero-length, zero-sge RDMA read request
- * WQE. In this situation, num_sges will be 1. However,
+ * WQE. In this situation, num_sges will be 1. However,
* first_sge_phy_or_sing_sge_va, single_sge_l_key, and
* single_sge_size will all be populated with zeros.
*/
#define XRRQ_ORRQ_NUM_SGES_MASK UINT32_C(0xf800)
#define XRRQ_ORRQ_NUM_SGES_SFT 11
uint16_t reserved16;
- /* Length in bytes of the data requested. Length must be 8 if type is atomic. */
+ /*
+ * Length in bytes of the data requested. Length must be 8 if type is
+ * atomic.
+ */
uint32_t length;
uint32_t psn;
/* The PSN of the outstanding outgoing request */
@@ -74968,25 +88346,25 @@ typedef struct xrrq_orrq {
typedef struct ptu_pte {
uint64_t page_next_to_last_last_valid;
/*
- * This field indicates if the PTE is valid. A value of '0'
- * indicates that the page is not valid. A value of '1'
- * indicates that the page is valid. A reference to an
+ * This field indicates if the PTE is valid. A value of '0'
+ * indicates that the page is not valid. A value of '1'
+ * indicates that the page is valid. A reference to an
* invalid page will return a PTU error.
*/
#define PTU_PTE_VALID UINT32_C(0x1)
/*
* This field is used only for "ring" PBLs that are used for
- * SQ, RQ, SRQ, or CQ structures. For all other PBL structures,
- * this bit should be zero. When this bit is '1', it indicates
+ * SQ, RQ, SRQ, or CQ structures. For all other PBL structures,
+ * this bit should be zero. When this bit is '1', it indicates
* that the page pointed to by this PTE is the last page in the
- * ring. A prefetch for the ring should use the first PTE in
+ * ring. A prefetch for the ring should use the first PTE in
* the PBL.
*/
#define PTU_PTE_LAST UINT32_C(0x2)
/*
* This field is used only for "ring" PBLs that are used for
- * SQ, RQ, SRQ, or CQ structures. For all other PBL structures,
- * this bit should be zero. When this bit is '1', it indicates
+ * SQ, RQ, SRQ, or CQ structures. For all other PBL structures,
+ * this bit should be zero. When this bit is '1', it indicates
* that this is the next-to-last page of the PBL.
*/
#define PTU_PTE_NEXT_TO_LAST UINT32_C(0x4)
@@ -74995,10 +88373,10 @@ typedef struct ptu_pte {
#define PTU_PTE_UNUSED_SFT 3
/*
* This is the upper bits of the physical page controlled by
- * this PTE. If the page is larger than 4KB, then the unused
+ * this PTE. If the page is larger than 4KB, then the unused
* lower bits of the page address should be zero.
*/
- #define PTU_PTE_PAGE_MASK UINT32_C(0xfffff000)
+ #define PTU_PTE_PAGE_MASK UINT32_C(0xfffffffffffff000)L
#define PTU_PTE_PAGE_SFT 12
} ptu_pte_t, *pptu_pte_t;
@@ -75008,9 +88386,9 @@ typedef struct ptu_pte {
typedef struct ptu_pde {
uint64_t page_valid;
/*
- * This field indicates if the PTE is valid. A value of '0'
- * indicates that the page is not valid. A value of '1'
- * indicates that the page is valid. A reference to an
+ * This field indicates if the PTE is valid. A value of '0'
+ * indicates that the page is not valid. A value of '1'
+ * indicates that the page is valid. A reference to an
* invalid page will return a PTU error.
*/
#define PTU_PDE_VALID UINT32_C(0x1)
@@ -75019,15 +88397,15 @@ typedef struct ptu_pde {
#define PTU_PDE_UNUSED_SFT 1
/*
* This is the upper bits of the physical page controlled by
- * this PTE. If the page is larger than 4KB, then the unused
+ * this PTE. If the page is larger than 4KB, then the unused
* lower bits of the page address should be zero.
*/
- #define PTU_PDE_PAGE_MASK UINT32_C(0xfffff000)
+ #define PTU_PDE_PAGE_MASK UINT32_C(0xfffffffffffff000)L
#define PTU_PDE_PAGE_SFT 12
} ptu_pde_t, *pptu_pde_t;
/*
- * This is the 64b doorbell format. The host writes this message
+ * This is the 64b doorbell format. The host writes this message
* format directly to byte offset 0 of the appropriate doorbell page.
*/
/* dbc_dbc (size:64b/8B) */
@@ -75067,11 +88445,11 @@ typedef struct dbc_dbc {
/*
* The toggle value is used in CQ_ARMENA, CQ_ARMSE, CQ_ARMALL,
* SRQ_ARMENA, SRQ_ARM, and CQ_CUTOFF_ACK doorbells to qualify the
- * doorbell as valid. This value should be taken from the latest
+ * doorbell as valid. This value should be taken from the latest
* NQE or cutoff completion.
*
* Doorbells of the above types with the wrong toggle value will
- * be ignored. This is how old values in of backup doorbells
+ * be ignored. This is how old values in of backup doorbells
* are ignored.
*/
#define DBC_DBC_TOGGLE_MASK UINT32_C(0x6000000)
@@ -75081,7 +88459,7 @@ typedef struct dbc_dbc {
* This value identifies the resource that the doorbell is intended
* to notify.
*
- * For SQ and RQ, this is the QPID. For SRQ, this is the SID. For
+ * For SQ and RQ, this is the QPID. For SRQ, this is the SID. For
* CQ, this is the CID. For NQ, this is the NID.
*
* Bits [19:16] of this values must be zero for a SID value.
@@ -75119,20 +88497,20 @@ typedef struct dbc_dbc {
#define DBC_DBC_TYPE_MASK UINT32_C(0xf0000000)
#define DBC_DBC_TYPE_SFT 28
/*
- * This is a SQ producer index update. It indicates one or more
+ * This is a SQ producer index update. It indicates one or more
* new entries have been written to the SQ for the QPID indicated
* on the xID field. This type is valid for L2, RoCE and Engine
* path.
*/
#define DBC_DBC_TYPE_SQ (UINT32_C(0x0) << 28)
/*
- * This is a RQ producer index update. It indicates one or more
+ * This is a RQ producer index update. It indicates one or more
* new entries have been written to the RQ for the QPID indicated
* on the xID field. This type is valid for RoCE path.
*/
#define DBC_DBC_TYPE_RQ (UINT32_C(0x1) << 28)
/*
- * This is a SRQ producer index update. It indicates one or more
+ * This is a SRQ producer index update. It indicates one or more
* new entries have been written to the SRQ for the SID indicated
* on the xID field. This type is valid for L2 and RoCE path.
*/
@@ -75146,7 +88524,7 @@ typedef struct dbc_dbc {
*/
#define DBC_DBC_TYPE_SRQ_ARM (UINT32_C(0x3) << 28)
/*
- * This is a CQ consumer index update. It indicates one or more
+ * This is a CQ consumer index update. It indicates one or more
* entries have been processed off the CQ indicated on the xID
* field.This type is valid for L2, RoCE and Engine path.
*/
@@ -75163,7 +88541,7 @@ typedef struct dbc_dbc {
*/
#define DBC_DBC_TYPE_CQ_ARMALL (UINT32_C(0x6) << 28)
/*
- * This is a CQ arm enable message. This message must be sent
+ * This is a CQ arm enable message. This message must be sent
* from the privileged driver before a new CQ_ARMSE or CQ_ARMALL
* message will be accepted.
*
@@ -75173,7 +88551,7 @@ typedef struct dbc_dbc {
#define DBC_DBC_TYPE_CQ_ARMENA (UINT32_C(0x7) << 28)
/*
* This doorbell command enables the SRQ async event
- * to be armed. This message must be sent from the privileged
+ * to be armed. This message must be sent from the privileged
* driver before a new SRQ_ARM message will be accepted.
* The xID field must identify the SID that is begin enabled
* for arm.
@@ -75222,7 +88600,202 @@ typedef struct dbc_dbc {
} dbc_dbc_t, *pdbc_dbc_t;
/*
- * This is the 32b doorbell format. The host writes this message
+ * This is the 64b doorbell copy format. The host writes this DB to
+ * the doorbell copy memory. Upon a HW Doorbell Drop Recovery process,
+ * it would be DMAed into HW for recovering the dropped doorbell.
+ */
+/* dbc_dbc64 (size:64b/8B) */
+
+typedef struct dbc_dbc64 {
+ uint64_t dbc;
+ /*
+ * This value is the index being written.
+ *
+ * For SQ, RQ, and SRQ, this is the producer index and the unit is
+ * 16B of queue space for L2 path and for the Engine path. For RoCE
+ * path there is a legacy mode with 128B unit size and a variable
+ * size WQE mode with 16B unit size of queue space. This mode is
+ * configured in the QP.
+ *
+ * For CQ this is the consumer index and the unit is 32B of queue
+ * space for the RoCE/Engine path and the CQ index unit is 16B of
+ * queue space for the L2 path.
+ *
+ * For NQ this is the consumer index and the unit is always 16B of
+ * queue space.
+ *
+ * The index size is 24b for L2 and engine paths and 16b for the
+ * RoCE path. Unused bits should be written as zero.
+ */
+ #define DBC_DBC64_INDEX_MASK UINT32_C(0xffffff)
+ #define DBC_DBC64_INDEX_SFT 0
+ /*
+ * The epoch bit provides a frame of reference for the queue index.
+ * S/W will toggle this bit in the doorbell each time index range is
+ * wrapped. This allows the receiving HW block to more efficiently
+ * detect out-of-order doorbells and to ignore the older doorbells.
+ * Out-of-order doorbells occur normally during dropped doorbell
+ * recovery.
+ */
+ #define DBC_DBC64_EPOCH UINT32_C(0x1000000)
+ /*
+ * The toggle value is used in CQ_ARMENA, CQ_ARMSE, CQ_ARMALL,
+ * SRQ_ARMENA, SRQ_ARM, and CQ_CUTOFF_ACK doorbells to qualify the
+ * doorbell as valid. This value should be taken from the latest
+ * NQE or cutoff completion.
+ *
+ * Doorbells of the above types with the wrong toggle value will
+ * be ignored. This is how old values in of backup doorbells
+ * are ignored.
+ */
+ #define DBC_DBC64_TOGGLE_MASK UINT32_C(0x6000000)
+ #define DBC_DBC64_TOGGLE_SFT 25
+ /*
+ * This value identifies the resource that the doorbell is intended
+ * to notify.
+ *
+ * For SQ and RQ, this is the QPID. For SRQ, this is the SID. For
+ * CQ, this is the CID. For NQ, this is the NID.
+ *
+ * Bits [51:48] of this values must be zero for a SID value.
+ */
+ #define DBC_DBC64_XID_MASK UINT32_C(0xfffff00000000)L
+ #define DBC_DBC64_XID_SFT 32
+ /*
+ * This value defines the intended doorbell path between RoCE and
+ * L2.
+ */
+ #define DBC_DBC64_PATH_MASK UINT32_C(0x300000000000000)L
+ #define DBC_DBC64_PATH_SFT 56
+ /* This is a RoCE doorbell message. */
+ #define DBC_DBC64_PATH_ROCE (UINT32_C(0x0)L << 56)
+ /* This is a L2 doorbell message. */
+ #define DBC_DBC64_PATH_L2 (UINT32_C(0x1)L << 56)
+ /* Engine path doorbell. */
+ #define DBC_DBC64_PATH_ENGINE (UINT32_C(0x2)L << 56)
+ #define DBC_DBC64_PATH_LAST DBC_DBC64_PATH_ENGINE
+ /*
+ * This indicates it is valid doorbell update. It should be set for
+ * each doorbell written to the chip and set when doorbell message is
+ * written to the backup doorbell location. The bit should be cleared
+ * in the backup doorbell location at time zero to indicate that the
+ * backup doorbell has not yet been written.
+ */
+ #define DBC_DBC64_VALID UINT32_C(0x400000000000000)L
+ /*
+ * When this bit is set to one, the chip will capture debug
+ * information for the doorbell ring. This is intended to only be
+ * used on SQ doorbell rings.
+ */
+ #define DBC_DBC64_DEBUG_TRACE UINT32_C(0x800000000000000)L
+ /* This value identifies the type of doorbell being written. */
+ #define DBC_DBC64_TYPE_MASK UINT32_C(0xf000000000000000)L
+ #define DBC_DBC64_TYPE_SFT 60
+ /*
+ * This is a SQ producer index update. It indicates one or more
+ * new entries have been written to the SQ for the QPID indicated
+ * on the xID field. This type is valid for L2, RoCE and Engine
+ * path.
+ */
+ #define DBC_DBC64_TYPE_SQ (UINT32_C(0x0)L << 60)
+ /*
+ * This is a RQ producer index update. It indicates one or more
+ * new entries have been written to the RQ for the QPID indicated
+ * on the xID field. This type is valid for RoCE path.
+ */
+ #define DBC_DBC64_TYPE_RQ (UINT32_C(0x1)L << 60)
+ /*
+ * This is a SRQ producer index update. It indicates one or more
+ * new entries have been written to the SRQ for the SID indicated
+ * on the xID field. This type is valid for L2 and RoCE path.
+ */
+ #define DBC_DBC64_TYPE_SRQ (UINT32_C(0x2)L << 60)
+ /*
+ * This doorbell command arms the SRQ async event.
+ * The xID field must identify the SID that is begin armed.
+ * The index field is will set the arm threshold such that
+ * a notification will be generated if less than that number
+ * or SRQ entries are posted. This type is valid for RoCE path.
+ */
+ #define DBC_DBC64_TYPE_SRQ_ARM (UINT32_C(0x3)L << 60)
+ /*
+ * This is a CQ consumer index update. It indicates one or more
+ * entries have been processed off the CQ indicated on the xID
+ * field.This type is valid for L2, RoCE and Engine path.
+ */
+ #define DBC_DBC64_TYPE_CQ (UINT32_C(0x4)L << 60)
+ /*
+ * this is a CQ consumer index update that also arms the CQ for
+ * solicited events. This type is valid for RoCE path.
+ */
+ #define DBC_DBC64_TYPE_CQ_ARMSE (UINT32_C(0x5)L << 60)
+ /*
+ * This is a CQ consumer index update that also arms the CQ
+ * for any new CQE. This type is valid for L2, RoCE and Engine
+ * path.
+ */
+ #define DBC_DBC64_TYPE_CQ_ARMALL (UINT32_C(0x6)L << 60)
+ /*
+ * This is a CQ arm enable message. This message must be sent
+ * from the privileged driver before a new CQ_ARMSE or CQ_ARMALL
+ * message will be accepted.
+ *
+ * This doorbell can only be sent from the privileged (first)
+ * doorbell page of a function.
+ */
+ #define DBC_DBC64_TYPE_CQ_ARMENA (UINT32_C(0x7)L << 60)
+ /*
+ * This doorbell command enables the SRQ async event
+ * to be armed. This message must be sent from the privileged
+ * driver before a new SRQ_ARM message will be accepted.
+ * The xID field must identify the SID that is begin enabled
+ * for arm.
+ *
+ * This doorbell can only be sent from the privileged (first)
+ * doorbell page of a function.
+ */
+ #define DBC_DBC64_TYPE_SRQ_ARMENA (UINT32_C(0x8)L << 60)
+ /*
+ * This doorbell command indicates that the cutoff CQE has
+ * been processed and the driver is now processing completions
+ * from the new CQ.
+ *
+ * The index field for this doorbell type must be zero.
+ */
+ #define DBC_DBC64_TYPE_CQ_CUTOFF_ACK (UINT32_C(0x9)L << 60)
+ /*
+ * This is a NQ consumer index update. It indicates one or more
+ * entries have been processed off the NQ indicated on the xID
+ * field. This type is valid for L2, RoCE and Engine path.
+ */
+ #define DBC_DBC64_TYPE_NQ (UINT32_C(0xa)L << 60)
+ /*
+ * This is a NQ consumer index update that also arms the NQ for
+ * any new NQE. This type is valid for L2, RoCE and Engine path.
+ */
+ #define DBC_DBC64_TYPE_NQ_ARM (UINT32_C(0xb)L << 60)
+ /*
+ * This is a NQ consumer index update that also arms the NQ for
+ * any new NQE. It is used for the legacy INT mask. This type
+ * is valid for L2, RoCE and Engine path.
+ */
+ #define DBC_DBC64_TYPE_NQ_MASK (UINT32_C(0xe)L << 60)
+ /*
+ * This doorbell command is used during doorbell moderation
+ * to consume system BW and help prevent doorbell FIFO
+ * overflow.
+ *
+ * All other fields should be zero for NULL doorbell.
+ * For doorbell recovery, NULL doorbell type in the Application
+ * table indicates that it is the last QP entry for the function.
+ * This type is valid for L2, RoCE and Engine path.
+ */
+ #define DBC_DBC64_TYPE_NULL (UINT32_C(0xf)L << 60)
+ #define DBC_DBC64_TYPE_LAST DBC_DBC64_TYPE_NULL
+} dbc_dbc64_t, *pdbc_dbc64_t;
+
+/*
+ * This is the 32b doorbell format. The host writes this message
* format directly to byte offset 8 of the appropriate doorbell page.
*/
/* dbc_dbc32 (size:32b/4B) */
@@ -75233,7 +88806,7 @@ typedef struct dbc_dbc32 {
* This value identifies the resource that the doorbell is intended
* to notify.
*
- * For SQ and RQ, this is the QPID. For SRQ, this is the SID. For
+ * For SQ and RQ, this is the QPID. For SRQ, this is the SID. For
* CQ, this is the CID.
*
* Bits [19:16] of this values must be zero for a SID value.
@@ -75255,8 +88828,8 @@ typedef struct dbc_dbc32 {
* When abs=0, this value is the value to add to the appropriate
* index value.
*
- * When abs=1, this value is the new value for the index. Absolute
- * value is used when the queue is being wrapped. When abs=1,
+ * When abs=1, this value is the new value for the index. Absolute
+ * value is used when the queue is being wrapped. When abs=1,
* the incr value follows the same rules as the index value
* in the 64b doorbell.
*/
@@ -75268,46 +88841,47 @@ typedef struct dbc_dbc32 {
#define DBC_DBC32_TYPE_MASK UINT32_C(0xe0000000)
#define DBC_DBC32_TYPE_SFT 29
/*
- * This is a SQ producer index update. It indicates one or more
- * new entries have been written to the SQ for the QPID indicated
- * on the xID field.
+ * This is a SQ producer index update. It indicates one or more
+ * new entries have been written to the SQ for the QPID
+ * indicated on the xID field.
*/
#define DBC_DBC32_TYPE_SQ (UINT32_C(0x0) << 29)
#define DBC_DBC32_TYPE_LAST DBC_DBC32_TYPE_SQ
} dbc_dbc32_t, *pdbc_dbc32_t;
/*
- * This is the 64b Push Start doorbell format. The host writes this message
- * format directly to offset of each push associated WCB (write combine
- * buffer) within doorbell page. WCB#0 = offset 16, WCB#1 = offset 24,
- * WCB#2 = offset 32, ... The start doorbell is followed by write combining
- * data to the WCB and then that is followed by a end doorbell.
+ * This is the 64b Push Start doorbell format. The host writes this
+ * message format directly to offset of each push associated WCB (write
+ * combine buffer) within doorbell page. WCB#0 = offset 16, WCB#1 =
+ * offset 24, WCB#2 = offset 32, ... The start doorbell is followed by
+ * write combining data to the WCB and then that is followed by a end
+ * doorbell.
*/
/* db_push_start (size:64b/8B) */
typedef struct db_push_start {
uint64_t db;
/*
- * This is the push index and should be the SQ slot index, aligned
- * to the start of the corresponding push WQE/packet in the Send
- * Queue.
+ * This is the push index and should be the SQ slot index,
+ * aligned to the start of the corresponding push WQE/packet in
+ * the Send Queue.
*
* The index size is 16b for RoCE path and 24b for L2 and Engine
* paths. Any unused bits should be written as zero.
*
* The index unit is 16B for L2 path. For RoCE there is a legacy
- * mode with 128B unit size and a variable size mode with 16B unit
- * size. For Engine mode, the unit size is 16B, where RQEs are
- * always 128B - so it always increments by eight 16B slots per
- * RQE.
- *
- * > This field is not used by the older versions of the chip, but
- * > is used in this and future revisions of the chip. In older
- * > versions of the chip, the driver is required to complete the
- * > push doorbell operation by following it with a regular doorbell
- * > which will be used to properly increment the producer index.
- * > This extra doorbell write is not needed on this and future
- * > versions of the chip.
+ * mode with 128B unit size and a variable size mode with 16B
+ * unit size. For Engine mode, the unit size is 16B, where RQEs
+ * are always 128B - so it always increments by eight 16B slots
+ * per RQE.
+ *
+ * > This field is not used by the older versions of the chip,
+ * > but is used in this and future revisions of the chip. In
+ * > older versions of the chip, the driver is required to
+ * > complete the push doorbell operation by following it with a
+ * > regular doorbell which will be used to properly increment
+ * > the producer index. This extra doorbell write is not needed
+ * > on this and future versions of the chip.
*/
#define DB_PUSH_START_DB_INDEX_MASK UINT32_C(0xffffff)
#define DB_PUSH_START_DB_INDEX_SFT 0
@@ -75319,8 +88893,8 @@ typedef struct db_push_start {
#define DB_PUSH_START_DB_PI_LO_MASK UINT32_C(0xff000000)
#define DB_PUSH_START_DB_PI_LO_SFT 24
/*
- * This value identifies the resource that the doorbell is intended
- * to notify.
+ * This value identifies the resource that the doorbell is
+ * intended to notify.
*
* This is the QPID.
*/
@@ -75338,14 +88912,14 @@ typedef struct db_push_start {
#define DB_PUSH_START_DB_TYPE_SFT 60
/*
* This is a SQ producer index update for Push. It indicates
- * one or more new entries have been written to the SQ for the
- * QPID indicated on the `xid` field.
+ * one or more new entries have been written to the SQ for
+ * the QPID indicated on the `xid` field.
*/
#define DB_PUSH_START_DB_TYPE_PUSH_START (UINT32_C(0xc)L << 60)
/*
* This is a SQ producer index update for Push. It indicates
- * one or more new entries have been written to the SQ for the
- * QPID indicated on the `xid` field.
+ * one or more new entries have been written to the SQ for
+ * the QPID indicated on the `xid` field.
*/
#define DB_PUSH_START_DB_TYPE_PUSH_END (UINT32_C(0xd)L << 60)
#define DB_PUSH_START_DB_TYPE_LAST DB_PUSH_START_DB_TYPE_PUSH_END
@@ -75355,32 +88929,33 @@ typedef struct db_push_start {
* This is the 64b Push End doorbell format. The host writes this message
* format directly to offset of each push associated WCB (write combine
* buffer) within doorbell page. WCB#0 = offset 16, WCB#1 = offset 24,
- * WCB#2 = offset 32, ... The start doorbell is followed by write combining
- * data to the WCB and then that is followed by a end doorbell.
+ * WCB#2 = offset 32, ... The start doorbell is followed by write
+ * combining data to the WCB and then that is followed by a end doorbell.
*/
/* db_push_end (size:64b/8B) */
typedef struct db_push_end {
uint64_t db;
/*
- * This is the producer index and should be the queue index of the
- * last WQE written plus the length field contained in that WQE.
- * For example, if the length is 8 index units and the WQE was
- * written to the first location in the queue (zero), this index
- * should be written to 8. The index should point to the start of
- * the first location that has not been filled in with WQE data.
+ * This is the producer index and should be the queue index of
+ * the last WQE written plus the length field contained in that
+ * WQE. For example, if the length is 8 index units and the WQE
+ * was written to the first location in the queue (zero), this
+ * index should be written to 8. The index should point to the
+ * start of the first location that has not been filled in with
+ * WQE data.
*
- * For L2 and Engine SQ, the index unit is 16B. For RoCE there are
- * two modes. For Legacy fixed size RQE mode, the unit is 128B. For
- * variable size RQE mode, the unit is 16B.
+ * For L2 and Engine SQ, the index unit is 16B. For RoCE there
+ * are two modes. For Legacy fixed size RQE mode, the unit is
+ * 128B. For variable size RQE mode, the unit is 16B.
*
* The index size is 24b for L2 and engine paths and 16b for the
* RoCE path. Unused bits should be written as zero.
*
- * > In past revisions of this chip, this field was the push index
- * > rather than the producer index. For this version of the chip
- * > and future versions of the chip, this field must be the
- * > producer index, as described above.
+ * > In past revisions of this chip, this field was the push
+ * > index rather than the producer index. For this version of
+ * > the chip and future versions of the chip, this field must be
+ * > the producer index, as described above.
* >
* > Also, in past revisions of this chip, an additional
* > doorbell write was needed to communicate the producer index.
@@ -75397,8 +88972,8 @@ typedef struct db_push_end {
#define DB_PUSH_END_DB_PI_LO_MASK UINT32_C(0xff000000)
#define DB_PUSH_END_DB_PI_LO_SFT 24
/*
- * This value identifies the resource that the doorbell is intended
- * to notify.
+ * This value identifies the resource that the doorbell is
+ * intended to notify.
*
* This is the QPID.
*/
@@ -75435,14 +89010,14 @@ typedef struct db_push_end {
#define DB_PUSH_END_DB_TYPE_SFT 60
/*
* This is a SQ producer index update for Push. It indicates
- * one or more new entries have been written to the SQ for the
- * QPID indicated on the `xid` field.
+ * one or more new entries have been written to the SQ for
+ * the QPID indicated on the `xid` field.
*/
#define DB_PUSH_END_DB_TYPE_PUSH_START (UINT32_C(0xc)L << 60)
/*
* This is a SQ producer index update for Push. It indicates
- * one or more new entries have been written to the SQ for the
- * QPID indicated on the `xid` field.
+ * one or more new entries have been written to the SQ for
+ * the QPID indicated on the `xid` field.
*/
#define DB_PUSH_END_DB_TYPE_PUSH_END (UINT32_C(0xd)L << 60)
#define DB_PUSH_END_DB_TYPE_LAST DB_PUSH_END_DB_TYPE_PUSH_END
@@ -75540,14 +89115,19 @@ typedef struct dbc_absolute_db_32 {
* Out-of-order doorbells occur normally during dropped doorbell
* recovery.
*/
- #define DBC_ABSOLUTE_DB_32_EPOCH UINT32_C(0x10000)
+ #define DBC_ABSOLUTE_DB_32_EPOCH UINT32_C(0x10000)
/*
- * The resize_toggle bit tells that the CQ cutoff is done.
- * Every time CQ is resized by CQ cutoff, this bit toggles when it
- * is done. If this bit toggles, HW can restart to use the resized
- * CQ.
+ * The toggle value is used in CQ_ARMENA, CQ_ARMSE, CQ_ARMALL,
+ * SRQ_ARMENA, SRQ_ARM, and CQ_CUTOFF_ACK doorbells to qualify the
+ * doorbell as valid. This value should be taken from the latest NQE
+ * or cutoff completion.
+ *
+ * Doorbells of the above types with the wrong toggle value will be
+ * ignored. This is how old values in of backup doorbells are
+ * ignored.
*/
- #define DBC_ABSOLUTE_DB_32_RESIZE_TOGGLE UINT32_C(0x20000)
+ #define DBC_ABSOLUTE_DB_32_TOGGLE_MASK UINT32_C(0x60000)
+ #define DBC_ABSOLUTE_DB_32_TOGGLE_SFT 17
/*
* This value identifies the resource that the doorbell is intended
* to notify.
@@ -75556,18 +89136,18 @@ typedef struct dbc_absolute_db_32 {
* value into the full xID value by looking up the base xID for this
* particular function and adding the mxID value to that base value.
*/
- #define DBC_ABSOLUTE_DB_32_MXID_MASK UINT32_C(0xfc0000)
- #define DBC_ABSOLUTE_DB_32_MXID_SFT 18
+ #define DBC_ABSOLUTE_DB_32_MXID_MASK UINT32_C(0x1f80000)
+ #define DBC_ABSOLUTE_DB_32_MXID_SFT 19
/*
* This value defines the intended doorbell path between RoCE and
* L2.
*/
- #define DBC_ABSOLUTE_DB_32_PATH_MASK UINT32_C(0x3000000)
- #define DBC_ABSOLUTE_DB_32_PATH_SFT 24
+ #define DBC_ABSOLUTE_DB_32_PATH_MASK UINT32_C(0x6000000)
+ #define DBC_ABSOLUTE_DB_32_PATH_SFT 25
/* This is a RoCE doorbell message. */
- #define DBC_ABSOLUTE_DB_32_PATH_ROCE (UINT32_C(0x0) << 24)
+ #define DBC_ABSOLUTE_DB_32_PATH_ROCE (UINT32_C(0x0) << 25)
/* This is a L2 doorbell message. */
- #define DBC_ABSOLUTE_DB_32_PATH_L2 (UINT32_C(0x1) << 24)
+ #define DBC_ABSOLUTE_DB_32_PATH_L2 (UINT32_C(0x1) << 25)
#define DBC_ABSOLUTE_DB_32_PATH_LAST DBC_ABSOLUTE_DB_32_PATH_L2
/*
* This indicates it is valid doorbell update. It should be set for
@@ -75576,35 +89156,29 @@ typedef struct dbc_absolute_db_32 {
* in the backup doorbell location at time zero to indicate that the
* backup doorbell has not yet been written.
*/
- #define DBC_ABSOLUTE_DB_32_VALID UINT32_C(0x4000000)
- /*
- * When this bit is set to one, the chip will capture debug
- * information for the doorbell ring. This is intended to only be
- * used on SQ doorbell rings.
- */
- #define DBC_ABSOLUTE_DB_32_DEBUG_TRACE UINT32_C(0x8000000)
+ #define DBC_ABSOLUTE_DB_32_VALID UINT32_C(0x8000000)
/* This value identifies the type of doorbell being written. */
#define DBC_ABSOLUTE_DB_32_TYPE_MASK UINT32_C(0xf0000000)
#define DBC_ABSOLUTE_DB_32_TYPE_SFT 28
/*
- * This is a SQ producer index update. It indicates one or more
+ * This is a SQ producer index update. It indicates one or more
* new entries have been written to the SQ for the QPID indicated
* on the xID field. This type is valid for L2, RoCE and Engine
* path.
*/
- #define DBC_ABSOLUTE_DB_32_TYPE_SQ (UINT32_C(0x0) << 28)
+ #define DBC_ABSOLUTE_DB_32_TYPE_SQ (UINT32_C(0x0) << 28)
/*
- * This is a RQ producer index update. It indicates one or more
+ * This is a RQ producer index update. It indicates one or more
* new entries have been written to the RQ for the QPID indicated
* on the xID field. This type is valid for RoCE path.
*/
- #define DBC_ABSOLUTE_DB_32_TYPE_RQ (UINT32_C(0x1) << 28)
+ #define DBC_ABSOLUTE_DB_32_TYPE_RQ (UINT32_C(0x1) << 28)
/*
- * This is a SRQ producer index update. It indicates one or more
+ * This is a SRQ producer index update. It indicates one or more
* new entries have been written to the SRQ for the SID indicated
* on the xID field. This type is valid for L2 and RoCE path.
*/
- #define DBC_ABSOLUTE_DB_32_TYPE_SRQ (UINT32_C(0x2) << 28)
+ #define DBC_ABSOLUTE_DB_32_TYPE_SRQ (UINT32_C(0x2) << 28)
/*
* This doorbell command arms the SRQ async event.
* The xID field must identify the SID that is begin armed.
@@ -75614,11 +89188,11 @@ typedef struct dbc_absolute_db_32 {
*/
#define DBC_ABSOLUTE_DB_32_TYPE_SRQ_ARM (UINT32_C(0x3) << 28)
/*
- * This is a CQ consumer index update. It indicates one or more
+ * This is a CQ consumer index update. It indicates one or more
* entries have been processed off the CQ indicated on the xID
* field.This type is valid for L2, RoCE and Engine path.
*/
- #define DBC_ABSOLUTE_DB_32_TYPE_CQ (UINT32_C(0x4) << 28)
+ #define DBC_ABSOLUTE_DB_32_TYPE_CQ (UINT32_C(0x4) << 28)
/*
* this is a CQ consumer index update that also arms the CQ for
* solicited events. This type is valid for RoCE path.
@@ -75629,13 +89203,34 @@ typedef struct dbc_absolute_db_32 {
* for any new CQE. This type is valid for L2, RoCE and Engine
* path.
*/
- #define DBC_ABSOLUTE_DB_32_TYPE_CQ_ARMALL (UINT32_C(0x6) << 28)
+ #define DBC_ABSOLUTE_DB_32_TYPE_CQ_ARMALL (UINT32_C(0x6) << 28)
+ /*
+ * This is a CQ arm enable message. This message must be sent from
+ * the privileged driver before a new CQ_ARMSE or CQ_ARMALL message
+ * will be accepted from user space (non-privileged doorbell page).
+ * The index and epoch for this doorbell type are unused.
+ *
+ * This doorbell can only be sent from the privileged (first)
+ * doorbell page of a function.
+ */
+ #define DBC_ABSOLUTE_DB_32_TYPE_CQ_ARMENA (UINT32_C(0x7) << 28)
+ /*
+ * This doorbell command enables the SRQ async event to be armed.
+ * This message must be sent from the privileged driver before a
+ * new SRQ_ARM message will be accepted from user space.
+ * The xID field must identify the SID that is being enabled for
+ * arm. The index and epoch for this doorbell type are unused.
+ *
+ * This doorbell can only be sent from the privileged (first)
+ * doorbell page of a function.
+ */
+ #define DBC_ABSOLUTE_DB_32_TYPE_SRQ_ARMENA (UINT32_C(0x8) << 28)
/*
* This is a NQ consumer index update. It indicates one or more
* entries have been processed off the NQ indicated on the xID
* field. This type is valid for L2, RoCE and Engine path.
*/
- #define DBC_ABSOLUTE_DB_32_TYPE_NQ (UINT32_C(0xa) << 28)
+ #define DBC_ABSOLUTE_DB_32_TYPE_NQ (UINT32_C(0xa) << 28)
/*
* This is a NQ consumer index update that also arms the NQ for
* any new NQE. This type is valid for L2, RoCE and Engine path.
@@ -75707,20 +89302,20 @@ typedef struct dbc_relative_db_32 {
#define DBC_RELATIVE_DB_32_TYPE_MASK UINT32_C(0xe0000000)
#define DBC_RELATIVE_DB_32_TYPE_SFT 29
/*
- * This is a SQ producer index update. It indicates one or more
+ * This is a SQ producer index update. It indicates one or more
* new entries have been written to the SQ for the QPID indicated
* on the xID field. This type is valid for L2, RoCE and Engine
* path.
*/
#define DBC_RELATIVE_DB_32_TYPE_SQ (UINT32_C(0x0) << 29)
/*
- * This is a SRQ producer index update. It indicates one or more
+ * This is a SRQ producer index update. It indicates one or more
* new entries have been written to the SRQ for the SID indicated
* on the xID field. This type is valid for L2 and RoCE path.
*/
#define DBC_RELATIVE_DB_32_TYPE_SRQ (UINT32_C(0x1) << 29)
/*
- * This is a CQ consumer index update. It indicates one or more
+ * This is a CQ consumer index update. It indicates one or more
* entries have been processed off the CQ indicated on the xID
* field.This type is valid for L2, RoCE and Engine path.
*/
@@ -75759,7 +89354,7 @@ typedef struct dbc_relative_db_32 {
/* dbc_drk (size:128b/16B) */
typedef struct dbc_drk {
- uint32_t db_format_linked_last_valid;
+ uint32_t db_format_linked_last_valid_stride_size;
/*
* This indicates it is valid entry. It should be set for each
* doorbell written to the chip. The bit should be cleared at time
@@ -75785,11 +89380,55 @@ typedef struct dbc_drk {
*/
#define DBC_DRK_DB_FORMAT_B32A (UINT32_C(0x1) << 3)
#define DBC_DRK_DB_FORMAT_LAST DBC_DRK_DB_FORMAT_B32A
+ /*
+ * This field controls the stride feature. The stride feature is
+ * more bandwidth efficient on the PCIE bus when only a small number
+ * of doorbells are used in each cache line.
+ */
+ #define DBC_DRK_STRIDE_MASK UINT32_C(0x300)
+ #define DBC_DRK_STRIDE_SFT 8
+ /*
+ * When stride is off, the DBR will read all the bytes in
+ * an application page until a NULL doorbell is found or
+ * the end of the 4K page is reached.
+ */
+ #define DBC_DRK_STRIDE_OFF (UINT32_C(0x0) << 8)
+ /*
+ * When stride is 1, the DBR will read the 'size' doorbells,
+ * starting at the next 64B cache line boundary or until
+ * a NULL doorbell is found in the application page or
+ * the end of the 4K page is reached.
+ */
+ #define DBC_DRK_STRIDE_SZ64 (UINT32_C(0x1) << 8)
+ /*
+ * When stride is 2, the DBR will read the 'size' doorbells,
+ * starting at the next 128B cache line boundary or until
+ * a NULL doorbell is found in the application page or
+ * the end of the 4K page is reached.
+ */
+ #define DBC_DRK_STRIDE_SZ128 (UINT32_C(0x2) << 8)
+ #define DBC_DRK_STRIDE_LAST DBC_DRK_STRIDE_SZ128
+ /*
+ * This value controls how many doorbells are read at each stride
+ * when stride mode is in use.
+ */
+ #define DBC_DRK_SIZE_MASK UINT32_C(0xc00)
+ #define DBC_DRK_SIZE_SFT 10
+ /* 4*8B is read at the start of each stride. */
+ #define DBC_DRK_SIZE_FOUR (UINT32_C(0x0) << 10)
+ /* 1*8B is read at the start of each stride. */
+ #define DBC_DRK_SIZE_ONE (UINT32_C(0x1) << 10)
+ /* 2*8B is read at the start of each stride. */
+ #define DBC_DRK_SIZE_TWO (UINT32_C(0x2) << 10)
+ /* 3*8B is read at the start of each stride. */
+ #define DBC_DRK_SIZE_THREE (UINT32_C(0x3) << 10)
+ #define DBC_DRK_SIZE_LAST DBC_DRK_SIZE_THREE
uint32_t pi;
/*
* Page Index portion of DPI{VF_VALID,VFID,PI}. The pi needs to match
- * the value from the context DPI for the operation to be valid or the
- * pi must be zero, indicating a write from the privileged driver.
+ * the value from the context DPI for the operation to be valid or
+ * the pi must be zero, indicating a write from the privileged
+ * driver.
*
* pi in the kernel memory table is there for DBR to generate the DPI
* message to the client.
@@ -75807,6 +89446,420 @@ typedef struct dbc_drk {
} dbc_drk_t, *pdbc_drk_t;
/*
+ * The kernel memory structure is per-type (SQ, RQ, SRQ/SRQ_ARM and
+ * CQ/CQ_ARMSE/CQ_ARMALL). Each kernel driver will support a table for
+ * the doorbell recovery.
+ */
+/* dbc_drk64 (size:128b/16B) */
+
+typedef struct dbc_drk64 {
+ uint64_t flags;
+ /*
+ * This indicates it is valid entry. It should be set for each
+ * doorbell written to the chip. The bit should be cleared at time
+ * zero to indicate that it has not yet been written. The bit i
+ * should be cleared when the function for the table is disabled.
+ */
+ #define DBC_DRK64_VALID UINT32_C(0x1)
+ /* This indicates it is last entry for the table. */
+ #define DBC_DRK64_LAST UINT32_C(0x2)
+ /* This indicates it is entry for the next 4KB kernel memory pointer. */
+ #define DBC_DRK64_LINKED UINT32_C(0x4)
+ /*
+ * This field indicates if the doorbells in the table are 32b
+ * absolute or 64b format.
+ */
+ #define DBC_DRK64_DB_FORMAT UINT32_C(0x8)
+ /* The doorbells are 64b format. */
+ #define DBC_DRK64_DB_FORMAT_B64 (UINT32_C(0x0) << 3)
+ /*
+ * The doorbells are in the absolute 32b format. The doorbell
+ * is in the right-most half of the 64b space provided in the
+ * application table entry.
+ */
+ #define DBC_DRK64_DB_FORMAT_B32A (UINT32_C(0x1) << 3)
+ #define DBC_DRK64_DB_FORMAT_LAST DBC_DRK64_DB_FORMAT_B32A
+ /*
+ * This field controls the stride feature. The stride feature is
+ * more bandwidth efficient on the PCIE bus when only a small number
+ * of doorbells are used in each cache line.
+ */
+ #define DBC_DRK64_STRIDE_MASK UINT32_C(0x300)
+ #define DBC_DRK64_STRIDE_SFT 8
+ /*
+ * When stride is off, the DBR will read all the bytes in
+ * an application page until a NULL doorbell is found or
+ * the end of the 4K page is reached.
+ */
+ #define DBC_DRK64_STRIDE_OFF (UINT32_C(0x0) << 8)
+ /*
+ * When stride is 1, the DBR will read the 'size' doorbells,
+ * starting at the next 64B cache line boundary or until
+ * a NULL doorbell is found in the application page or
+ * the end of the 4K page is reached.
+ */
+ #define DBC_DRK64_STRIDE_SZ64 (UINT32_C(0x1) << 8)
+ /*
+ * When stride is 2, the DBR will read the 'size' doorbells,
+ * starting at the next 128B cache line boundary or until
+ * a NULL doorbell is found in the application page or
+ * the end of the 4K page is reached.
+ */
+ #define DBC_DRK64_STRIDE_SZ128 (UINT32_C(0x2) << 8)
+ #define DBC_DRK64_STRIDE_LAST DBC_DRK64_STRIDE_SZ128
+ /*
+ * This value controls how many doorbells are read at each stride
+ * when stride mode is in use.
+ */
+ #define DBC_DRK64_SIZE_MASK UINT32_C(0xc00)
+ #define DBC_DRK64_SIZE_SFT 10
+ /* 4*8B is read at the start of each stride. */
+ #define DBC_DRK64_SIZE_FOUR (UINT32_C(0x0) << 10)
+ /* 1*8B is read at the start of each stride. */
+ #define DBC_DRK64_SIZE_ONE (UINT32_C(0x1) << 10)
+ /* 2*8B is read at the start of each stride. */
+ #define DBC_DRK64_SIZE_TWO (UINT32_C(0x2) << 10)
+ /* 3*8B is read at the start of each stride. */
+ #define DBC_DRK64_SIZE_THREE (UINT32_C(0x3) << 10)
+ #define DBC_DRK64_SIZE_LAST DBC_DRK64_SIZE_THREE
+ /*
+ * Page Index portion of DPI{VF_VALID,VFID,PI}. The pi needs to match
+ * the value from the context DPI for the operation to be valid or
+ * the pi must be zero, indicating a write from the privileged
+ * driver.
+ *
+ * pi in the kernel memory table is there for DBR to generate the DPI
+ * message to the client.
+ */
+ #define DBC_DRK64_PI_MASK UINT32_C(0xffff00000000)L
+ #define DBC_DRK64_PI_SFT 32
+ /*
+ * It is the application memory page(4KB) pointer when linked = 0.
+ * It is the next kernel memory page(4KB) pointer when linked = 1.
+ * The pointer doesn't have to be aligned to the page(4KB) but it
+ * should be aligned to 128B boundary. This means that the bottom
+ * 7b of the pointer must be zero.
+ */
+ uint64_t memptr;
+} dbc_drk64_t, *pdbc_drk64_t;
+
+/*
+ * This is the 64b doorbell format. The host writes this message
+ * format directly to byte offset 0 of the appropriate doorbell page.
+ */
+/* dbc_dbc_v3 (size:64b/8B) */
+
+typedef struct dbc_dbc_v3 {
+ uint32_t index;
+ /*
+ * This value is the index being written.
+ *
+ * For SQ/RQ/SRQ, this is the producer index. It should be set to
+ * the queue index of the last WQE/BD written plus the number of
+ * index units in the WQE/BD. For example, if the number of index
+ * units in an SQ WQE is 8 and the WQE was written to the first
+ * location in the queue (zero), this index should be written to 8.
+ * The index should point to the start of the first location that
+ * has not been filled in with WQE/BD data. For SQ (both RoCE and
+ * L2), the index unit is 16B. For RQ/SRQ, the index unit is 1 WQE
+ * (RoCE) or 1 BD (L2).
+ *
+ * For CQ, this is the consumer index and should be the starting
+ * queue index of the last CQE processed plus the size of the last
+ * processed CQE in index units. The index should point to the
+ * start of the first CQE in the queue that has not been processed.
+ * The index unit is 16B.
+ *
+ * For NQ, this is the consumer index and should be the starting
+ * queue index of the last NQE processed plus the size of the last
+ * processed NQE in index units. The index should point to the
+ * start of the first NQE in the queue that has not been processed.
+ * The index unit is 16B.
+ */
+ #define DBC_DBC_V3_INDEX_MASK UINT32_C(0xffffff)
+ #define DBC_DBC_V3_INDEX_SFT 0
+ /*
+ * The epoch bit provides a frame of reference for the queue index.
+ * S/W will toggle this bit in the doorbell each time index range is
+ * wrapped. This allows the receiving HW block to more efficiently
+ * detect out-of-order doorbells and to ignore the older doorbells.
+ * Out-of-order doorbells occur normally during dropped doorbell
+ * recovery.
+ */
+ #define DBC_DBC_V3_EPOCH UINT32_C(0x1000000)
+ /*
+ * The toggle value is used in CQ_ARMENA, CQ_ARMSE, CQ_ARMALL,
+ * SRQ_ARMENA, SRQ_ARM, and CQ_CUTOFF_ACK doorbells to qualify the
+ * doorbell as valid. This value should be taken from the latest
+ * NQE or cutoff completion.
+ *
+ * Doorbells of the above types with the wrong toggle value will
+ * be ignored. This is how old values in of backup doorbells
+ * are ignored.
+ */
+ #define DBC_DBC_V3_TOGGLE_MASK UINT32_C(0x6000000)
+ #define DBC_DBC_V3_TOGGLE_SFT 25
+ uint32_t type_path_xid;
+ /*
+ * This value identifies the resource that the doorbell is intended
+ * to notify.
+ *
+ * For SQ and RQ, this is the QPID. For SRQ, this is the SID. For
+ * CQ, this is the CID. For NQ, this is the NID.
+ *
+ * Unused bits (for example bits [11:7] of the SID value) must be
+ * zero.
+ */
+ #define DBC_DBC_V3_XID_MASK UINT32_C(0xfff)
+ #define DBC_DBC_V3_XID_SFT 0
+ /*
+ * This value defines the intended doorbell path between RoCE and
+ * L2.
+ */
+ #define DBC_DBC_V3_PATH_MASK UINT32_C(0x3000000)
+ #define DBC_DBC_V3_PATH_SFT 24
+ /* This is a RoCE doorbell message. */
+ #define DBC_DBC_V3_PATH_ROCE (UINT32_C(0x0) << 24)
+ /* This is a L2 doorbell message. */
+ #define DBC_DBC_V3_PATH_L2 (UINT32_C(0x1) << 24)
+ #define DBC_DBC_V3_PATH_LAST DBC_DBC_V3_PATH_L2
+ /*
+ * This indicates it is valid doorbell update. It should be set for
+ * each doorbell written to the chip and set when doorbell message is
+ * written to the backup doorbell location. The bit should be cleared
+ * in the backup doorbell location at time zero to indicate that the
+ * backup doorbell has not yet been written.
+ */
+ #define DBC_DBC_V3_VALID UINT32_C(0x4000000)
+ /*
+ * When this bit is set to one, the chip will capture debug
+ * information for the doorbell ring. This is intended to only be
+ * used on SQ doorbell rings.
+ */
+ #define DBC_DBC_V3_DEBUG_TRACE UINT32_C(0x8000000)
+ /* This value identifies the type of doorbell being written. */
+ #define DBC_DBC_V3_TYPE_MASK UINT32_C(0xf0000000)
+ #define DBC_DBC_V3_TYPE_SFT 28
+ /*
+ * This is a SQ producer index update. It indicates one or more
+ * new entries have been written to the SQ for the QPID indicated
+ * on the xID field. This type is valid for L2 and RoCE path.
+ */
+ #define DBC_DBC_V3_TYPE_SQ (UINT32_C(0x0) << 28)
+ /*
+ * This is a RQ producer index update. It indicates one or more
+ * new entries have been written to the RQ for the QPID indicated
+ * on the xID field. This type is valid for RoCE path.
+ */
+ #define DBC_DBC_V3_TYPE_RQ (UINT32_C(0x1) << 28)
+ /*
+ * This is a SRQ producer index update. It indicates one or more
+ * new entries have been written to the SRQ for the SID indicated
+ * on the xID field. This type is valid for L2 and RoCE path.
+ */
+ #define DBC_DBC_V3_TYPE_SRQ (UINT32_C(0x2) << 28)
+ /*
+ * This doorbell command arms the SRQ async event. The xID field
+ * must identify the SID that is begin armed. The index field is
+ * will set the arm threshold such that a notification will be
+ * generated if less than that number or SRQ entries are posted.
+ *
+ * This type is valid for RoCE path.
+ */
+ #define DBC_DBC_V3_TYPE_SRQ_ARM (UINT32_C(0x3) << 28)
+ /*
+ * CQ doorbell is used to update the consumer index for the CQ
+ * for overflow detection. It should only be sent if overflow
+ * detection is enabled for the CQ. Keep in mind that if
+ * doorbells are being dropped due to PCIE ordering rules, you
+ * may get a false overflow detection if you are checking for CQ
+ * overflow.
+ *
+ * This type is valid for L2 and RoCE path.
+ */
+ #define DBC_DBC_V3_TYPE_CQ (UINT32_C(0x4) << 28)
+ /*
+ * This is a CQ consumer index update that also arms the CQ for
+ * solicited events. This is for roce only not for l2.
+ *
+ * The index is used as the location of the last CQE that was
+ * processed by the driver. The new interrupt will be generated
+ * based on this location.
+ *
+ * This type is valid for RoCE path.
+ */
+ #define DBC_DBC_V3_TYPE_CQ_ARMSE (UINT32_C(0x5) << 28)
+ /*
+ * This is a CQ consumer index update that also arms the CQ for
+ * any new CQE.
+ *
+ * The index is used as the location of the last CQE that was
+ * processed by the driver. The new interrupt will be generated
+ * based on this location.
+ *
+ * This type is valid for L2 and RoCE path.
+ */
+ #define DBC_DBC_V3_TYPE_CQ_ARMALL (UINT32_C(0x6) << 28)
+ /*
+ * This is a CQ arm enable message. This message must be sent
+ * from the privileged driver before a new CQ_ARMSE or CQ_ARMALL
+ * message will be accepted from user space (non-privileged
+ * doorbell page). The index and epoch for this doorbell type are
+ * unused.
+ *
+ * This doorbell can only be sent from the privileged (first)
+ * doorbell page of a function.
+ */
+ #define DBC_DBC_V3_TYPE_CQ_ARMENA (UINT32_C(0x7) << 28)
+ /*
+ * This doorbell command enables the SRQ async event to be armed.
+ * This message must be sent from the privileged driver before
+ * a new SRQ_ARM message will be accepted from user space. The
+ * xID field must identify the SID that is being enabled for arm.
+ * The index and epoch for this doorbell type are unused.
+ *
+ * This doorbell can only be sent from the privileged (first)
+ * doorbell page of a function.
+ */
+ #define DBC_DBC_V3_TYPE_SRQ_ARMENA (UINT32_C(0x8) << 28)
+ /*
+ * This doorbell type is used to acknowledge a cutoff completion
+ * in the CQ. The index and epoch for this doorbell type are
+ * unused. This doorbell is sent when the cutoff completion has
+ * been processed and the old CQ in a CQ resize operation is no
+ * longer needed.
+ *
+ * The index and epoch must be valid for this doorbell if
+ * overflow checking is enabled for the CQ.
+ */
+ #define DBC_DBC_V3_TYPE_CQ_CUTOFF_ACK (UINT32_C(0x9) << 28)
+ /*
+ * This is a NQ consumer index update. It indicates one or more
+ * entries have been processed off the NQ indicated on the xID
+ * field. It will also mask the NQ for any new NQE. This type is
+ * valid for L2 and RoCE path.
+ *
+ * Thor is broken in that it doesn't mask a legacy INTA interrupt
+ * when used at the start of an ISR, as it is supposed to be.
+ *
+ * type=NQ masks the current interrupt. When the iSR starts, it
+ * writes a type=NQ with the current consumer index. For legacy
+ * PCI interrupts, this needs to mask the interrupt so the legacy
+ * interrupt is deasserted. Then the driver does some work and
+ * writes some more type=NQ. Finally the driver stops the ISR and
+ * does a type=NQ_ARM to get another interrupt (when needed). The
+ * only reason to use type=NQ_MASK is to back out of the armed
+ * state. In that request, the index update is not required.
+ */
+ #define DBC_DBC_V3_TYPE_NQ (UINT32_C(0xa) << 28)
+ /*
+ * This is a NQ consumer index update that also arms the NQ for
+ * any new NQE.
+ *
+ * This type is valid for L2 and RoCE path.
+ */
+ #define DBC_DBC_V3_TYPE_NQ_ARM (UINT32_C(0xb) << 28)
+ /*
+ * This doorbell will assign a new NQ to a CQ. This is handy if
+ * the user wants to change which interrupt handler is going to
+ * process a particular CQ. This doorbell must be sent from the
+ * privileged driver.
+ *
+ * The xID must be the CID for the CQ that needs to be changed.
+ * The index value is the NQID of the new NQ that will be used
+ * for future notifications. epoch and toggle are ignored for
+ * this doorbell type.
+ *
+ * The CQ will disarm notifications and generate a NQE to the old
+ * NQ with the nq_reassign type value. The chip will guarantee
+ * that no notification will be sent to the old NQ after the
+ * nq_reassign NQE has been sent.
+ *
+ * This type is valid for L2 and RoCE CQs.
+ */
+ #define DBC_DBC_V3_TYPE_CQ_REASSIGN (UINT32_C(0xc) << 28)
+ /*
+ * This masks the NQ for any new NQE. This will NOT update the NQ
+ * consumer index.
+ *
+ * This type is valid for L2 and RoCE path.
+ */
+ #define DBC_DBC_V3_TYPE_NQ_MASK (UINT32_C(0xe) << 28)
+ /*
+ * All other fields should be zero for NULL doorbell.
+ *
+ * For doorbell recovery, NULL doorbell type in the Application
+ * table indicates that it is the last QP entry for the function.
+ * This type is valid for L2 and RoCE path.
+ */
+ #define DBC_DBC_V3_TYPE_NULL (UINT32_C(0xf) << 28)
+ #define DBC_DBC_V3_TYPE_LAST DBC_DBC_V3_TYPE_NULL
+} dbc_dbc_v3_t, *pdbc_dbc_v3_t;
+
+/*
+ * This is the RoCE Express Doorbell format. The host writes this
+ * message format directly to offset 0x40 of the appropriate doorbell
+ * page. Express doorbells are used when the chip will be owning the
+ * SQ, RQ, and SRQ as well as the producer indexes for each queue. This
+ * provides a simple fastpath programming model.
+ *
+ * Express doorbell must be received by the chip as a single TLP
+ * message.
+ */
+/* dbc_xp (size:512b/64B) */
+
+typedef struct dbc_xp {
+ uint32_t reserved;
+ uint32_t type_xid;
+ /*
+ * This value identifies the resource that the doorbell is intended
+ * to notify.
+ *
+ * For SQ and RQ, this is the QPID. For SRQ, this is the SID. For
+ * CQ, this is the CID. For NQ, this is the NID.
+ *
+ * Unused bits (for example bits [11:7] of the SID value) must be
+ * zero.
+ */
+ #define DBC_XP_XID_MASK UINT32_C(0xfff)
+ #define DBC_XP_XID_SFT 0
+ /*
+ * When this bit is set to one, the chip will capture debug
+ * information for the doorbell ring. This is intended to only be
+ * used on SQ doorbell rings.
+ */
+ #define DBC_XP_DEBUG_TRACE UINT32_C(0x1000000)
+ /* This value identifies the type of doorbell being written. */
+ #define DBC_XP_TYPE_MASK UINT32_C(0xf0000000)
+ #define DBC_XP_TYPE_SFT 28
+ /*
+ * This is a SQ producer index update. It indicates one or more
+ * new entries have been written to the SQ for the QPID indicated
+ * on the xID field. This type is valid for L2, RoCE and Engine
+ * path.
+ */
+ #define DBC_XP_TYPE_SQ (UINT32_C(0x0) << 28)
+ /*
+ * This is a RQ producer index update. It indicates one or more
+ * new entries have been written to the RQ for the QPID indicated
+ * on the xID field. This type is valid for RoCE path.
+ */
+ #define DBC_XP_TYPE_RQ (UINT32_C(0x1) << 28)
+ /*
+ * This is a SRQ producer index update. It indicates one or more
+ * new entries have been written to the SRQ for the SID indicated
+ * on the xID field. This type is valid for L2 and RoCE path.
+ */
+ #define DBC_XP_TYPE_SRQ (UINT32_C(0x2) << 28)
+ #define DBC_XP_TYPE_LAST DBC_XP_TYPE_SRQ
+ /*
+ * This field hold one express WQE. The WQE must be appropriate for
+ * the queue selected by the type field.
+ */
+ uint32_t wqe[14];
+} dbc_xp_t, *pdbc_xp_t;
+
+/*
* This is a firmware status register that indicates the software status
* exposed by the firmware to the host.
*
@@ -75820,9 +89873,10 @@ typedef struct fw_status_reg {
* These bits indicate the status as being reported by the firmware.
*
* The value should be interpreted as follows:
- * A value below 0x8000 is an indication that the firmware is still in the
- * process of starting up and is not ready. The host driver should
- * continue waiting with a timeout for firmware status to be ready.
+ * A value below 0x8000 is an indication that the firmware is still
+ * in the process of starting up and is not ready. The host driver
+ * should continue waiting with a timeout for firmware status to be
+ * ready.
* > 0x0000 to 0x00FF : SBL state information
* > 0x0200 to 0x02FF : SBI state information
* > 0x0400 to 0x04FF : SRT state information
@@ -75830,14 +89884,15 @@ typedef struct fw_status_reg {
* > 0x0800 to 0x08FF : External Firmware state information
* > 0x0A00 to 0x0FFF : Reserved for future fw functionality
*
- * A value of 0x8000 indicates firmware is ready and healthy. The host
- * driver can start initiating HWRM commands to the firmware.
+ * A value of 0x8000 indicates firmware is ready and healthy. The
+ * host driver can start initiating HWRM commands to the firmware.
*
- * A value over 0x8000 is an indication that the firmware has detected
- * a fatal error, this error could be in one of the hardware block or
- * in a software module. The lower 8 bits indicate a block/module
- * specific error and the upper 8 bits identify the hardware block
- * or firmware module that was the source of the error.
+ * A value over 0x8000 is an indication that the firmware has
+ * detected a fatal error, this error could be in one of the hardware
+ * block or in a software module. The lower 8 bits indicate a
+ * block/module specific error and the upper 8 bits identify the
+ * hardware block or firmware module that was the source of the
+ * error.
* > 0x81XX - 0xBFXX : 63 ASIC blocks
* > 0xC0XX to 0xFDXX : 62 Firmware modules
* > 0xFE00 to 0xFEFF : External firmware module
@@ -75863,10 +89918,10 @@ typedef struct fw_status_reg {
* recoverable with a full reset.
*
* This bit should be used by host software and deployment models
- * that support error recovery by resetting the controller. A recovery
- * should be attempted from a fatal error condition only if this bit
- * is set. This bit is meaningful only when the code field is greater
- * than 0x8000 (32768 decimal).
+ * that support error recovery by resetting the controller. A
+ * recovery should be attempted from a fatal error condition only if
+ * this bit is set. This bit is meaningful only when the code field
+ * is greater than 0x8000 (32768 decimal).
*/
#define FW_STATUS_REG_RECOVERABLE UINT32_C(0x20000)
/*
@@ -75874,51 +89929,52 @@ typedef struct fw_status_reg {
* currently recording a crash dump.
*
* This bit provides a hint to the host driver if the firmware is
- * currently recording a crash dump. Host driers should avoid resetting
- * the controller when a crash dump is in progress if possible. This
- * bit is meaningful only when the code field is greater than
- * 0x8000 (32768 decimal).
+ * currently recording a crash dump. Host driers should avoid
+ * resetting the controller when a crash dump is in progress if
+ * possible. This bit is meaningful only when the code field is
+ * greater than 0x8000 (32768 decimal).
*/
#define FW_STATUS_REG_CRASHDUMP_ONGOING UINT32_C(0x40000)
/*
- * Crash dump is available. If set indicates that a firmware crash dump
- * was recorded before and is now available.
+ * Crash dump is available. If set indicates that a firmware crash
+ * dump was recorded before and is now available.
*
- * This bit provides indication to the host driver that the firmware has
- * completed a crash dump. This bit is meaningful only when the code
- * field is greater than 0x8000 (32768 decimal).
+ * This bit provides indication to the host driver that the firmware
+ * has completed a crash dump. This bit is meaningful only when the
+ * code field is greater than 0x8000 (32768 decimal).
*/
#define FW_STATUS_REG_CRASHDUMP_COMPLETE UINT32_C(0x80000)
/*
- * This bit is used to indicate device state when it enters the shutdown mode
- * and stopped the communication with the host. The host should initiate the
- * reload of firmware image or initiate the reset to bring the device to the
- * normal operational state and re-establish the communication.
+ * This bit is used to indicate device state when it enters the
+ * shutdown mode and stopped the communication with the host. The
+ * host should initiate the reload of firmware image or initiate the
+ * reset to bring the device to the normal operational state and
+ * re-establish the communication.
*
- * This bit is meaningful only when the code field is greater than 0x8000
- * (32768 decimal).
+ * This bit is meaningful only when the code field is greater than
+ * 0x8000 (32768 decimal).
*/
#define FW_STATUS_REG_SHUTDOWN UINT32_C(0x100000)
/*
* This bit will be set to 1 by the FW when FW crashed without master
* function.
*
- * This bit is controller specific, not all products will support this bit.
- * This bit is valid only when the code field is greater than 0x8000
- * (32768 decimal).
+ * This bit is controller specific, not all products will support
+ * this bit. This bit is valid only when the code field is greater
+ * than 0x8000 (32768 decimal).
*/
#define FW_STATUS_REG_CRASHED_NO_MASTER UINT32_C(0x200000)
/*
- * The firmware sets this bit to 1 when the firmware has taken an exception
- * and expects to initiate error recovery.
+ * The firmware sets this bit to 1 when the firmware has taken an
+ * exception and expects to initiate error recovery.
*
* This bit is valid only when the code field is greater than 0x8000
* (32768 decimal).
*/
#define FW_STATUS_REG_RECOVERING UINT32_C(0x400000)
/*
- * The SBL sets this bit to indicate whether manu_debug pin is detected high
- * or low.
+ * The SBL sets this bit to indicate whether manu_debug pin is
+ * detected high or low.
*/
#define FW_STATUS_REG_MANU_DEBUG_STATUS UINT32_C(0x800000)
} fw_status_reg_t, *pfw_status_reg_t;
@@ -76037,9 +90093,9 @@ typedef struct hwrm_selftest_qlist_output {
#define HWRM_SELFTEST_QLIST_OUTPUT_AVAILABLE_TESTS_REGISTER_TEST UINT32_C(0x4)
/* Can run the memory test. */
#define HWRM_SELFTEST_QLIST_OUTPUT_AVAILABLE_TESTS_MEMORY_TEST UINT32_C(0x8)
- /* Can run the PCIe serdes test. */
+ /* Can run the PCIe serdes test. (deprecated) */
#define HWRM_SELFTEST_QLIST_OUTPUT_AVAILABLE_TESTS_PCIE_SERDES_TEST UINT32_C(0x10)
- /* Can run the Ethernet serdes test. */
+ /* Can run the Ethernet serdes test. (deprecated) */
#define HWRM_SELFTEST_QLIST_OUTPUT_AVAILABLE_TESTS_ETHERNET_SERDES_TEST UINT32_C(0x20)
uint8_t offline_tests;
/* The NVM test is an offline test. */
@@ -76050,9 +90106,9 @@ typedef struct hwrm_selftest_qlist_output {
#define HWRM_SELFTEST_QLIST_OUTPUT_OFFLINE_TESTS_REGISTER_TEST UINT32_C(0x4)
/* The memory test is an offline test. */
#define HWRM_SELFTEST_QLIST_OUTPUT_OFFLINE_TESTS_MEMORY_TEST UINT32_C(0x8)
- /* The PCIe serdes test is an offline test. */
+ /* The PCIe serdes test is an offline test. (deprecated) */
#define HWRM_SELFTEST_QLIST_OUTPUT_OFFLINE_TESTS_PCIE_SERDES_TEST UINT32_C(0x10)
- /* The Ethernet serdes test is an offline test. */
+ /* The Ethernet serdes test is an offline test. (deprecated) */
#define HWRM_SELFTEST_QLIST_OUTPUT_OFFLINE_TESTS_ETHERNET_SERDES_TEST UINT32_C(0x20)
uint8_t unused_0;
/*
@@ -76062,48 +90118,14 @@ typedef struct hwrm_selftest_qlist_output {
uint16_t test_timeout;
uint8_t unused_1[2];
/*
- * This field represents the name of the NVM test (ASCII chars
- * with NULL at the end).
- */
- char test0_name[32];
- /*
- * This field represents the name of the link test (ASCII chars
- * with NULL at the end).
- */
- char test1_name[32];
- /*
- * This field represents the name of the register test (ASCII chars
- * with NULL at the end).
- */
- char test2_name[32];
- /*
- * This field represents the name of the memory test (ASCII chars
+ * This field represents array of 8 test name strings (ASCII chars
* with NULL at the end).
*/
- char test3_name[32];
- /*
- * This field represents the name of the PCIe serdes test (ASCII chars
- * with NULL at the end).
- */
- char test4_name[32];
- /*
- * This field represents the name of the Ethernet serdes test (ASCII chars
- * with NULL at the end).
- */
- char test5_name[32];
- /*
- * This field represents the name of some future test (ASCII chars
- * with NULL at the end).
- */
- char test6_name[32];
- /*
- * This field represents the name of some future test (ASCII chars
- * with NULL at the end).
- */
- char test7_name[32];
+ char test_name[8][32];
/*
* The lowest available target BER that is supported by FW eyescope.
- * A Value of 3 indicates that FW supports 1e-8, 1e-9, 1e-10, and 1e-11.
+ * A Value of 3 indicates that FW supports 1e-8, 1e-9, 1e-10, and
+ * 1e-11. (deprecated)
*/
uint8_t eyescope_target_BER_support;
/* Eyescope supports a target BER of 1e-8 */
@@ -76120,9 +90142,9 @@ typedef struct hwrm_selftest_qlist_output {
uint8_t unused_2[6];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -76174,9 +90196,9 @@ typedef struct hwrm_selftest_exec_input {
#define HWRM_SELFTEST_EXEC_INPUT_FLAGS_REGISTER_TEST UINT32_C(0x4)
/* Run the memory test. */
#define HWRM_SELFTEST_EXEC_INPUT_FLAGS_MEMORY_TEST UINT32_C(0x8)
- /* Run the PCIe serdes test. */
+ /* Run the PCIe serdes test. (deprecated) */
#define HWRM_SELFTEST_EXEC_INPUT_FLAGS_PCIE_SERDES_TEST UINT32_C(0x10)
- /* Run the Ethernet serdes test. */
+ /* Run the Ethernet serdes test. (deprecated) */
#define HWRM_SELFTEST_EXEC_INPUT_FLAGS_ETHERNET_SERDES_TEST UINT32_C(0x20)
uint8_t unused_0[7];
} hwrm_selftest_exec_input_t, *phwrm_selftest_exec_input_t;
@@ -76202,33 +90224,52 @@ typedef struct hwrm_selftest_exec_output {
#define HWRM_SELFTEST_EXEC_OUTPUT_REQUESTED_TESTS_REGISTER_TEST UINT32_C(0x4)
/* A request was made to run the memory test. */
#define HWRM_SELFTEST_EXEC_OUTPUT_REQUESTED_TESTS_MEMORY_TEST UINT32_C(0x8)
- /* A request was made to run the PCIe serdes test. */
+ /* A request was made to run the PCIe serdes test. (deprecated) */
#define HWRM_SELFTEST_EXEC_OUTPUT_REQUESTED_TESTS_PCIE_SERDES_TEST UINT32_C(0x10)
- /* A request was made to run the Ethernet serdes test. */
+ /* A request was made to run the Ethernet serdes test. (deprecated) */
#define HWRM_SELFTEST_EXEC_OUTPUT_REQUESTED_TESTS_ETHERNET_SERDES_TEST UINT32_C(0x20)
/*
- * If a test was requested to be run as seen in the requested_tests field,
- * this bit indicates whether the test was successful(1) or failed(0).
+ * If a test was requested to be run as seen in the requested_tests
+ * field, this bit indicates whether the test was successful(1) or
+ * failed(0).
*/
uint8_t test_success;
- /* If requested, a value of 1 indicates the NVM test completed successfully. */
+ /*
+ * If requested, a value of 1 indicates the NVM test completed
+ * successfully.
+ */
#define HWRM_SELFTEST_EXEC_OUTPUT_TEST_SUCCESS_NVM_TEST UINT32_C(0x1)
- /* If requested, a value of 1 indicates the link test completed successfully. */
+ /*
+ * If requested, a value of 1 indicates the link test completed
+ * successfully.
+ */
#define HWRM_SELFTEST_EXEC_OUTPUT_TEST_SUCCESS_LINK_TEST UINT32_C(0x2)
- /* If requested, a value of 1 indicates the register test completed successfully. */
+ /*
+ * If requested, a value of 1 indicates the register test completed
+ * successfully.
+ */
#define HWRM_SELFTEST_EXEC_OUTPUT_TEST_SUCCESS_REGISTER_TEST UINT32_C(0x4)
- /* If requested, a value of 1 indicates the memory test completed successfully. */
+ /*
+ * If requested, a value of 1 indicates the memory test completed
+ * successfully.
+ */
#define HWRM_SELFTEST_EXEC_OUTPUT_TEST_SUCCESS_MEMORY_TEST UINT32_C(0x8)
- /* If requested, a value of 1 indicates the PCIe serdes test completed successfully. */
+ /*
+ * If requested, a value of 1 indicates the PCIe serdes test
+ * completed successfully. (deprecated)
+ */
#define HWRM_SELFTEST_EXEC_OUTPUT_TEST_SUCCESS_PCIE_SERDES_TEST UINT32_C(0x10)
- /* If requested, a value of 1 indicates the Ethernet serdes test completed successfully. */
+ /*
+ * If requested, a value of 1 indicates the Ethernet serdes test
+ * completed successfully. (deprecated)
+ */
#define HWRM_SELFTEST_EXEC_OUTPUT_TEST_SUCCESS_ETHERNET_SERDES_TEST UINT32_C(0x20)
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -76286,9 +90327,9 @@ typedef struct hwrm_selftest_irq_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -76340,12 +90381,13 @@ typedef struct hwrm_selftest_retrieve_serdes_data_input {
uint32_t resp_data_offset;
/*
* Size of the buffer pointed to by resp_data_addr. The firmware may
- * use this entire buffer or less than the entire buffer, but never more.
+ * use this entire buffer or less than the entire buffer, but never
+ * more.
*/
uint16_t data_len;
/*
- * This field allows this command to request the individual serdes tests
- * to be run using this command.
+ * This field allows this command to request the individual serdes
+ * tests to be run using this command.
*/
uint8_t flags;
/* Unused. */
@@ -76360,7 +90402,8 @@ typedef struct hwrm_selftest_retrieve_serdes_data_input {
uint8_t options;
/*
* This field represents the PCIE lane number on which tools wants to
- * retrieve eye plot. This field is valid only when ‘pcie_serdes_test’ flag is set.
+ * retrieve eye plot. This field is valid only when 'pcie_serdes_test'
+ * flag is set.
* Valid values from 0 to 16.
*/
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA_INPUT_OPTIONS_PCIE_LANE_NO_MASK UINT32_C(0xf)
@@ -76380,8 +90423,9 @@ typedef struct hwrm_selftest_retrieve_serdes_data_input {
*/
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA_INPUT_OPTIONS_PROJ_TYPE_LEFT_TOP (UINT32_C(0x0) << 5)
/*
- * Value 1 indicates right/bottom projection in horizontal/vertical
- * This value is valid only when eye_projection flag was set.
+ * Value 1 indicates right/bottom projection in
+ * horizontal/vertical. This value is valid only when
+ * eye_projection flag was set.
*/
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA_INPUT_OPTIONS_PROJ_TYPE_RIGHT_BOTTOM (UINT32_C(0x1) << 5)
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA_INPUT_OPTIONS_PROJ_TYPE_LAST HWRM_SELFTEST_RETRIEVE_SERDES_DATA_INPUT_OPTIONS_PROJ_TYPE_RIGHT_BOTTOM
@@ -76411,20 +90455,24 @@ typedef struct hwrm_selftest_retrieve_serdes_data_input {
uint8_t action;
/*
* Value 0 indicates that collection of the eyescope should be
- * returned synchronously in the output. This only applies to
+ * returned synchronously in the output. This only applies to
* a targetBER of 1e-8.
*/
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA_INPUT_ACTION_SYNCHRONOUS UINT32_C(0x0)
- /* Value 1 indicates to the firmware to start the collection of the eyescope. */
+ /*
+ * Value 1 indicates to the firmware to start the collection of the
+ * eyescope.
+ */
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA_INPUT_ACTION_START UINT32_C(0x1)
/*
- * Value 2 indicates to the firmware to respond with a progress percentage
- * of the current eyescope collection from 0.0 to 100.0.
+ * Value 2 indicates to the firmware to respond with a progress
+ * percentage of the current eyescope collection from 0.0 to 100.0.
*/
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA_INPUT_ACTION_PROGRESS UINT32_C(0x2)
/*
- * Value 3 indicates to stop the eyescope. if the progress percentage
- * is 100.0, the data will be DMAed back to resp_data_addr.
+ * Value 3 indicates to stop the eyescope. if the progress
+ * percentage is 100.0, the data will be DMAed back to
+ * resp_data_addr.
*/
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA_INPUT_ACTION_STOP UINT32_C(0x3)
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA_INPUT_ACTION_LAST HWRM_SELFTEST_RETRIEVE_SERDES_DATA_INPUT_ACTION_STOP
@@ -76453,16 +90501,17 @@ typedef struct hwrm_selftest_retrieve_serdes_data_output {
*/
uint16_t copied_data_len;
/*
- * Percentage of completion of collection of BER values from the current
- * eyescope operation in tenths of a percentage. 0 (0.0) to 1000 (100.0)
+ * Percentage of completion of collection of BER values from the
+ * current eyescope operation in tenths of a percentage. 0 (0.0) to
+ * 1000 (100.0).
*/
uint16_t progress_percent;
/* Timeout in seconds for timeout of an individual BER point. */
uint16_t timeout;
uint8_t flags;
/*
- * This value indicates the structure of data returned by the firmware
- * when DMA'ed to resp_data_addr
+ * This value indicates the structure of data returned by the
+ * firmware when DMA'ed to resp_data_addr.
*/
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA_OUTPUT_FLAGS_BIT_COUNT_TYPE UINT32_C(0x1)
/*
@@ -76472,7 +90521,7 @@ typedef struct hwrm_selftest_retrieve_serdes_data_output {
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA_OUTPUT_FLAGS_BIT_COUNT_TYPE_BIT_COUNT_TOTAL UINT32_C(0x0)
/*
* Value 1 indicates that bit count is a power of
- * 2 that bit_count is normalized to. A Value of 42 indicates
+ * 2 that bit_count is normalized to. A Value of 42 indicates
* that BER = error_count / 2^42
*/
#define HWRM_SELFTEST_RETRIEVE_SERDES_DATA_OUTPUT_FLAGS_BIT_COUNT_TYPE_BIT_COUNT_POW2 UINT32_C(0x1)
@@ -76483,15 +90532,16 @@ typedef struct hwrm_selftest_retrieve_serdes_data_output {
uint8_t unused_0;
/*
* Size of header prepended to the bit_count and error_count array.
- * Use this value to skip forward to the bit_count and error_count array.
+ * Use this value to skip forward to the bit_count and error_count
+ * array.
*/
uint16_t hdr_size;
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -76555,9 +90605,9 @@ typedef struct hwrm_mfg_fru_write_control_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -76634,9 +90684,9 @@ typedef struct hwrm_mfg_timers_query_output {
uint8_t unused_0[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -76683,7 +90733,7 @@ typedef struct hwrm_mfg_otp_cfg_input {
* This bit must be '1' for the crid field to be
* configured.
*/
- #define HWRM_MFG_OTP_CFG_INPUT_ENABLES_CRID UINT32_C(0x1)
+ #define HWRM_MFG_OTP_CFG_INPUT_ENABLES_CRID UINT32_C(0x1)
/*
* This bit must be '1' for the srt_rev_id field to be
* configured.
@@ -76699,6 +90749,11 @@ typedef struct hwrm_mfg_otp_cfg_input {
* configured.
*/
#define HWRM_MFG_OTP_CFG_INPUT_ENABLES_SBI_REV_ID UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the max_speed field to be
+ * configured.
+ */
+ #define HWRM_MFG_OTP_CFG_INPUT_ENABLES_MAX_SPEED_SELECT UINT32_C(0x10)
/* This field indicates the crid value to be set. */
uint16_t crid_cfg_value;
/* This field indicates the srt rev id value to be set. */
@@ -76707,7 +90762,18 @@ typedef struct hwrm_mfg_otp_cfg_input {
uint16_t crt_rev_id_cfg_value;
/* This field indicates the sbi rev id value to be set. */
uint16_t sbi_rev_id_cfg_value;
- uint8_t unused_0[6];
+ /* This field indicates the max speed value to be set. */
+ uint16_t max_speed_cfg_value;
+ /* max speed value not configured. */
+ #define HWRM_MFG_OTP_CFG_INPUT_MAX_SPEED_CFG_VALUE_NOT_CONFIGURED UINT32_C(0x0)
+ /* max speed value 50G. */
+ #define HWRM_MFG_OTP_CFG_INPUT_MAX_SPEED_CFG_VALUE_50G UINT32_C(0x1)
+ /* max speed value 100G. */
+ #define HWRM_MFG_OTP_CFG_INPUT_MAX_SPEED_CFG_VALUE_100G UINT32_C(0x2)
+ /* max speed value 200G. */
+ #define HWRM_MFG_OTP_CFG_INPUT_MAX_SPEED_CFG_VALUE_200G UINT32_C(0x3)
+ #define HWRM_MFG_OTP_CFG_INPUT_MAX_SPEED_CFG_VALUE_LAST HWRM_MFG_OTP_CFG_INPUT_MAX_SPEED_CFG_VALUE_200G
+ uint8_t unused_0[4];
} hwrm_mfg_otp_cfg_input_t, *phwrm_mfg_otp_cfg_input_t;
/* hwrm_mfg_otp_cfg_output (size:128b/16B) */
@@ -76724,9 +90790,9 @@ typedef struct hwrm_mfg_otp_cfg_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -76773,7 +90839,7 @@ typedef struct hwrm_mfg_otp_qcfg_input {
* This bit must be '1' for the crid field to be
* queried.
*/
- #define HWRM_MFG_OTP_QCFG_INPUT_ENABLES_CRID UINT32_C(0x1)
+ #define HWRM_MFG_OTP_QCFG_INPUT_ENABLES_CRID UINT32_C(0x1)
/*
* This bit must be '1' for the srt_rev_id field to be
* queried.
@@ -76789,10 +90855,15 @@ typedef struct hwrm_mfg_otp_qcfg_input {
* queried.
*/
#define HWRM_MFG_OTP_QCFG_INPUT_ENABLES_SBI_REV_ID UINT32_C(0x8)
+ /*
+ * This bit must be '1' for the max_speed field to be
+ * queried.
+ */
+ #define HWRM_MFG_OTP_QCFG_INPUT_ENABLES_MAX_SPEED_SELECT UINT32_C(0x10)
uint8_t unused_0[6];
} hwrm_mfg_otp_qcfg_input_t, *phwrm_mfg_otp_qcfg_input_t;
-/* hwrm_mfg_otp_qcfg_output (size:192b/24B) */
+/* hwrm_mfg_otp_qcfg_output (size:256b/32B) */
typedef struct hwrm_mfg_otp_qcfg_output {
/* The specific error status for the command. */
@@ -76816,12 +90887,29 @@ typedef struct hwrm_mfg_otp_qcfg_output {
uint16_t crt_rev_id;
/* This field indicates the current sbi rev id value. */
uint16_t sbi_rev_id;
- uint8_t unused_0[3];
+ /* This field indicates the current max speed value. */
+ uint16_t max_speed;
+ /* max speed value not configured. */
+ #define HWRM_MFG_OTP_QCFG_OUTPUT_MAX_SPEED_NOT_CONFIGURED UINT32_C(0x0)
+ /* max speed value 50G. */
+ #define HWRM_MFG_OTP_QCFG_OUTPUT_MAX_SPEED_50G UINT32_C(0x1)
+ /* max speed value 100G. */
+ #define HWRM_MFG_OTP_QCFG_OUTPUT_MAX_SPEED_100G UINT32_C(0x2)
+ /* max speed value 200G. */
+ #define HWRM_MFG_OTP_QCFG_OUTPUT_MAX_SPEED_200G UINT32_C(0x3)
+ #define HWRM_MFG_OTP_QCFG_OUTPUT_MAX_SPEED_LAST HWRM_MFG_OTP_QCFG_OUTPUT_MAX_SPEED_200G
+ /* This field sets a bitmap for new enabled fields. */
+ uint16_t enables_bitmap;
+ /* This bit checks max speed cfg enable. */
+ #define HWRM_MFG_OTP_QCFG_OUTPUT_ENABLES_BITMAP_MAX_SPEED UINT32_C(0x10)
+ /* This bit validates this enable bitmap. */
+ #define HWRM_MFG_OTP_QCFG_OUTPUT_ENABLES_BITMAP_ENABLES_VALID UINT32_C(0x8000)
+ uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -76903,9 +90991,9 @@ typedef struct hwrm_mfg_hdma_test_output {
uint8_t unused_0[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -76979,9 +91067,9 @@ typedef struct hwrm_mfg_fru_eeprom_write_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -77059,9 +91147,9 @@ typedef struct hwrm_mfg_fru_eeprom_read_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -77184,7 +91272,7 @@ typedef struct hwrm_mfg_soc_image_output {
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -77280,7 +91368,7 @@ typedef struct hwrm_mfg_soc_qstatus_output {
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -77289,14 +91377,14 @@ typedef struct hwrm_mfg_soc_qstatus_output {
uint8_t valid;
} hwrm_mfg_soc_qstatus_output_t, *phwrm_mfg_soc_qstatus_output_t;
-/*******************************
- * hwrm_mfg_param_seeprom_sync *
- *******************************/
+/*****************************************
+ * hwrm_mfg_param_critical_data_finalize *
+ *****************************************/
-/* hwrm_mfg_param_seeprom_sync_input (size:640b/80B) */
+/* hwrm_mfg_param_critical_data_finalize_input (size:192b/24B) */
-typedef struct hwrm_mfg_param_seeprom_sync_input {
+typedef struct hwrm_mfg_param_critical_data_finalize_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -77325,34 +91413,19 @@ typedef struct hwrm_mfg_param_seeprom_sync_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
+ uint16_t flags;
/*
- * The host (DMA) buffer physical addr for the firmware to read from.
- * This buffer is populated with the parameter binary bits which is
- * going to be programmed into the seeprom memory.
- */
- uint64_t data_addr;
- /* Size of the buffer pointed to by data_addr. */
- uint16_t data_len;
- /* The offset within the SEEPROM to start programming. */
- uint16_t offset;
- uint32_t flags;
- /*
- * This bit must be '1' to sync the parameters available in factory
- * config to seeprom binary before writing to seeprom
- */
- #define HWRM_MFG_PARAM_SEEPROM_SYNC_INPUT_FLAGS_FAC_CFG_SYNC UINT32_C(0x1)
- /*
- * This bit must be '1' for the seeprom data to be written to
- * a specified address with out any change in the binary
+ * Set to 1 if you wish to unlock and erase the region
+ * before finalizing the data.
*/
- #define HWRM_MFG_PARAM_SEEPROM_SYNC_INPUT_FLAGS_WRITE_BINARY_ONLY UINT32_C(0x80000000)
- /* Reserved for future use. */
- uint8_t reserved[48];
-} hwrm_mfg_param_seeprom_sync_input_t, *phwrm_mfg_param_seeprom_sync_input_t;
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE_INPUT_FLAGS_FORCE UINT32_C(0x1)
+ uint16_t unused_0;
+ uint32_t unused_1;
+} hwrm_mfg_param_critical_data_finalize_input_t, *phwrm_mfg_param_critical_data_finalize_input_t;
-/* hwrm_mfg_param_seeprom_sync_output (size:128b/16B) */
+/* hwrm_mfg_param_critical_data_finalize_output (size:128b/16B) */
-typedef struct hwrm_mfg_param_seeprom_sync_output {
+typedef struct hwrm_mfg_param_critical_data_finalize_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -77361,28 +91434,36 @@ typedef struct hwrm_mfg_param_seeprom_sync_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Total length of data written to the seeprom memory. */
- uint16_t total_data_len;
- uint16_t unused_0;
- uint8_t unused_1[3];
+ /* Total length of data finalized. */
+ uint32_t total_data_len;
+ uint16_t error_status;
+ /* Critical data region was already locked */
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE_OUTPUT_ERROR_STATUS_ALREADY_LOCKED UINT32_C(0x1)
+ /* Flash region was not entirely empty */
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE_OUTPUT_ERROR_STATUS_NOT_EMPTY UINT32_C(0x2)
+ /* FACT_CFG was missing for write to critical cfg */
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE_OUTPUT_ERROR_STATUS_MISSING_FACT_CFG UINT32_C(0x4)
+ /* VPD was missing for write to critical cfg */
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_FINALIZE_OUTPUT_ERROR_STATUS_MISSING_VPD UINT32_C(0x8)
+ uint8_t unused_1;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
-} hwrm_mfg_param_seeprom_sync_output_t, *phwrm_mfg_param_seeprom_sync_output_t;
+} hwrm_mfg_param_critical_data_finalize_output_t, *phwrm_mfg_param_critical_data_finalize_output_t;
-/*******************************
- * hwrm_mfg_param_seeprom_read *
- *******************************/
+/*************************************
+ * hwrm_mfg_param_critical_data_read *
+ *************************************/
-/* hwrm_mfg_param_seeprom_read_input (size:256b/32B) */
+/* hwrm_mfg_param_critical_data_read_input (size:256b/32B) */
-typedef struct hwrm_mfg_param_seeprom_read_input {
+typedef struct hwrm_mfg_param_critical_data_read_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -77413,8 +91494,8 @@ typedef struct hwrm_mfg_param_seeprom_read_input {
uint64_t resp_addr;
/*
* The host (DMA) buffer physical addr for the firmware to write to.
- * This buffer is populated with the parameter binary bits which is
- * going to be read from the seeprom memory.
+ * This buffer is populated with data read from the
+ * critical data storage location.
*/
uint64_t data_addr;
/*
@@ -77422,15 +91503,14 @@ typedef struct hwrm_mfg_param_seeprom_read_input {
* use this entire buffer or less than the entire buffer, but never
* more.
*/
- uint16_t data_len;
- /* The offset within the SEEPROM to start reading. */
- uint16_t offset;
- uint8_t unused[4];
-} hwrm_mfg_param_seeprom_read_input_t, *phwrm_mfg_param_seeprom_read_input_t;
+ uint32_t data_len;
+ /* The offset within the critical data to start reading. */
+ uint32_t offset;
+} hwrm_mfg_param_critical_data_read_input_t, *phwrm_mfg_param_critical_data_read_input_t;
-/* hwrm_mfg_param_seeprom_read_output (size:128b/16B) */
+/* hwrm_mfg_param_critical_data_read_output (size:128b/16B) */
-typedef struct hwrm_mfg_param_seeprom_read_output {
+typedef struct hwrm_mfg_param_critical_data_read_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -77440,27 +91520,27 @@ typedef struct hwrm_mfg_param_seeprom_read_output {
/* The length of the response data in number of bytes. */
uint16_t resp_len;
/* Total length of data written to the host memory. */
- uint16_t total_data_len;
- uint16_t unused_0[2];
+ uint32_t total_data_len;
+ uint16_t unused_0;
+ uint8_t unused_1;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
- uint8_t unused_1;
-} hwrm_mfg_param_seeprom_read_output_t, *phwrm_mfg_param_seeprom_read_output_t;
+} hwrm_mfg_param_critical_data_read_output_t, *phwrm_mfg_param_critical_data_read_output_t;
-/*********************************
- * hwrm_mfg_param_seeprom_health *
- *********************************/
+/***************************************
+ * hwrm_mfg_param_critical_data_health *
+ ***************************************/
-/* hwrm_mfg_param_seeprom_health_input (size:192b/24B) */
+/* hwrm_mfg_param_critical_data_health_input (size:192b/24B) */
-typedef struct hwrm_mfg_param_seeprom_health_input {
+typedef struct hwrm_mfg_param_critical_data_health_input {
/* The HWRM command request type. */
uint16_t req_type;
/*
@@ -77490,11 +91570,11 @@ typedef struct hwrm_mfg_param_seeprom_health_input {
*/
uint64_t resp_addr;
uint64_t unused_0;
-} hwrm_mfg_param_seeprom_health_input_t, *phwrm_mfg_param_seeprom_health_input_t;
+} hwrm_mfg_param_critical_data_health_input_t, *phwrm_mfg_param_critical_data_health_input_t;
-/* hwrm_mfg_param_seeprom_health_output (size:128b/16B) */
+/* hwrm_mfg_param_critical_data_health_output (size:128b/16B) */
-typedef struct hwrm_mfg_param_seeprom_health_output {
+typedef struct hwrm_mfg_param_critical_data_health_output {
/* The specific error status for the command. */
uint16_t error_code;
/* The HWRM command request type. */
@@ -77504,37 +91584,25 @@ typedef struct hwrm_mfg_param_seeprom_health_output {
/* The length of the response data in number of bytes. */
uint16_t resp_len;
uint32_t health_status;
- /* No response from the device */
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH_OUTPUT_HEALTH_STATUS_NO_RESPONSE UINT32_C(0x1)
+ /* region entirely empty */
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH_OUTPUT_HEALTH_STATUS_IS_EMPTY UINT32_C(0x1)
/* Data checksum fail */
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH_OUTPUT_HEALTH_STATUS_CHECKSUM_FAIL UINT32_C(0x2)
- /* Mac address not populated */
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH_OUTPUT_HEALTH_STATUS_NO_MAC_ADDRESS UINT32_C(0x4)
- /* Part number not populated */
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH_OUTPUT_HEALTH_STATUS_NO_PART_NUMBER UINT32_C(0x8)
- /* Serial number not populated */
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH_OUTPUT_HEALTH_STATUS_NO_SR_NUMBER UINT32_C(0x10)
- /* Package description not populated */
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH_OUTPUT_HEALTH_STATUS_NO_PKG_DESCRIPTION UINT32_C(0x20)
- uint16_t health_code;
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH_OUTPUT_HEALTH_CODE_SUCCESS UINT32_C(0x0)
- /* No response from the device */
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH_OUTPUT_HEALTH_CODE_NO_RESPONSE UINT32_C(0x1)
- /* Data checksum fail */
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH_OUTPUT_HEALTH_CODE_CHECKSUM_FAIL UINT32_C(0x2)
- /* Mac address not populated */
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH_OUTPUT_HEALTH_CODE_NO_MAC_ADDRESS UINT32_C(0x3)
- #define HWRM_MFG_PARAM_SEEPROM_HEALTH_OUTPUT_HEALTH_CODE_LAST HWRM_MFG_PARAM_SEEPROM_HEALTH_OUTPUT_HEALTH_CODE_NO_MAC_ADDRESS
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH_OUTPUT_HEALTH_STATUS_CHECKSUM_FAIL UINT32_C(0x2)
+ /* Malformed data (header/footer) */
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH_OUTPUT_HEALTH_STATUS_MALFORMED_DATA UINT32_C(0x4)
+ /* Critical data not locked */
+ #define HWRM_MFG_PARAM_CRITICAL_DATA_HEALTH_OUTPUT_HEALTH_STATUS_NOT_LOCKED UINT32_C(0x8)
+ uint16_t unused_1;
+ uint8_t unused_2;
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
- uint8_t unused_1;
-} hwrm_mfg_param_seeprom_health_output_t, *phwrm_mfg_param_seeprom_health_output_t;
+} hwrm_mfg_param_critical_data_health_output_t, *phwrm_mfg_param_critical_data_health_output_t;
/*****************************
* hwrm_mfg_prvsn_export_csr *
@@ -77573,16 +91641,24 @@ typedef struct hwrm_mfg_prvsn_export_csr_input {
*/
uint64_t resp_addr;
/*
- * 64-bit Host destination address. This is the host address where
+ * 64-bit Host destination address. This is the host address where
* data will be written.
*/
uint64_t host_dest_addr;
- /* Provisioning slot number. 0-indexed. */
+ /* Provisioning slot number. 0-indexed. */
uint8_t slot;
uint8_t unused_0;
/* Size in bytes of the available host buffer. */
uint16_t host_buf_len;
- uint32_t unused_1;
+ uint8_t flags;
+ /*
+ * This bit is only used when external secure SoC is used for
+ * secure boot. If this bit is set, export a certificate signing
+ * request (CSR) from the security SoC non-volatile storage on
+ * the device.
+ */
+ #define HWRM_MFG_PRVSN_EXPORT_CSR_INPUT_FLAGS_SECURE_SOC_SUPPORT UINT32_C(0x1)
+ uint8_t unused_1[3];
} hwrm_mfg_prvsn_export_csr_input_t, *phwrm_mfg_prvsn_export_csr_input_t;
/* hwrm_mfg_prvsn_export_csr_output (size:128b/16B) */
@@ -77596,7 +91672,7 @@ typedef struct hwrm_mfg_prvsn_export_csr_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Provisioning slot number. 0-indexed. */
+ /* Provisioning slot number. 0-indexed. */
uint8_t slot;
uint8_t unused_0;
/* Size in bytes of the exported CSR. */
@@ -77604,7 +91680,7 @@ typedef struct hwrm_mfg_prvsn_export_csr_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -77668,16 +91744,24 @@ typedef struct hwrm_mfg_prvsn_import_cert_input {
*/
uint64_t resp_addr;
/*
- * 64-bit Host source address. This is the host address where
+ * 64-bit Host source address. This is the host address where
* source data is located.
*/
uint64_t host_src_addr;
- /* Provisioning slot number. 0-indexed. */
+ /* Provisioning slot number. 0-indexed. */
uint8_t slot;
uint8_t unused_0;
/* Size in bytes of the certificate chain. */
uint16_t cert_len;
- uint32_t unused_1;
+ uint8_t flags;
+ /*
+ * This bit is only used when external secure SoC is used for
+ * secure boot. If this bit is set, then import a HSM-signed
+ * certificate chain to security SoC non-volatile storage on
+ * the device.
+ */
+ #define HWRM_MFG_PRVSN_IMPORT_CERT_INPUT_FLAGS_SECURE_SOC_SUPPORT UINT32_C(0x1)
+ uint8_t unused_1[3];
} hwrm_mfg_prvsn_import_cert_input_t, *phwrm_mfg_prvsn_import_cert_input_t;
/* hwrm_mfg_prvsn_import_cert_output (size:128b/16B) */
@@ -77691,7 +91775,7 @@ typedef struct hwrm_mfg_prvsn_import_cert_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- /* Provisioning slot number. 0-indexed. */
+ /* Provisioning slot number. 0-indexed. */
uint8_t slot;
/* Provisioned state */
uint8_t state;
@@ -77703,7 +91787,7 @@ typedef struct hwrm_mfg_prvsn_import_cert_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -77788,7 +91872,7 @@ typedef struct hwrm_mfg_prvsn_get_state_output {
/* Flag indicating if provision get state is valid. */
uint8_t get_state_valid;
/*
- * Provision get state is invalid. The attestation agent has not
+ * Provision get state is invalid. The attestation agent has not
* yet initialized and not completed verification of the
* provisioned certificate chain.
* The slot_status field is undetermined.
@@ -77798,7 +91882,7 @@ typedef struct hwrm_mfg_prvsn_get_state_output {
#define HWRM_MFG_PRVSN_GET_STATE_OUTPUT_GET_STATE_VALID_SPDM UINT32_C(0x1)
/* Provision get state is valid for Cerberus. */
#define HWRM_MFG_PRVSN_GET_STATE_OUTPUT_GET_STATE_VALID_CERBERUS UINT32_C(0x2)
- /* Provision get state is valid. There is no attestation agent. */
+ /* Provision get state is valid. There is no attestation agent. */
#define HWRM_MFG_PRVSN_GET_STATE_OUTPUT_GET_STATE_VALID_NONE UINT32_C(0xff)
#define HWRM_MFG_PRVSN_GET_STATE_OUTPUT_GET_STATE_VALID_LAST HWRM_MFG_PRVSN_GET_STATE_OUTPUT_GET_STATE_VALID_NONE
/*
@@ -77822,7 +91906,7 @@ typedef struct hwrm_mfg_prvsn_get_state_output {
uint8_t unused_0[5];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -77831,6 +91915,118 @@ typedef struct hwrm_mfg_prvsn_get_state_output {
uint8_t valid;
} hwrm_mfg_prvsn_get_state_output_t, *phwrm_mfg_prvsn_get_state_output_t;
+/******************************
+ * hwrm_mfg_prvsn_export_cert *
+ ******************************/
+
+
+/* hwrm_mfg_prvsn_export_cert_input (size:256b/32B) */
+
+typedef struct hwrm_mfg_prvsn_export_cert_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * 64-bit Host destination address. This is the host address where
+ * data will be written.
+ */
+ uint64_t host_dest_addr;
+ /* Provisioning slot number. 0-indexed. */
+ uint8_t slot;
+ uint8_t unused_0;
+ /* Size in bytes of the available host buffer. */
+ uint16_t host_buf_len;
+ uint8_t flags;
+ /*
+ * This bit is only used when external secure SoC is used
+ * for secure boot. If this bit is set, then export the
+ * provisioned certificate from the security SoC non-volatile
+ * storage device.
+ */
+ #define HWRM_MFG_PRVSN_EXPORT_CERT_INPUT_FLAGS_SECURE_SOC_SUPPORT UINT32_C(0x1)
+ uint8_t unused_1[3];
+} hwrm_mfg_prvsn_export_cert_input_t, *phwrm_mfg_prvsn_export_cert_input_t;
+
+/* hwrm_mfg_prvsn_export_cert_output (size:128b/16B) */
+
+typedef struct hwrm_mfg_prvsn_export_cert_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* Provisioning slot number. 0-indexed. */
+ uint8_t slot;
+ uint8_t unused_0;
+ /*
+ * Size in bytes of the exported certificate chain. If there are no
+ * certificates provisioned for the specified slot, the device will
+ * return a successful response with cert_len equal to 0.
+ */
+ uint16_t cert_len;
+ uint8_t unused_1[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written.
+ * When writing a command completion or response to an internal
+ * processor, the order of writes has to be such that this field is
+ * written last.
+ */
+ uint8_t valid;
+} hwrm_mfg_prvsn_export_cert_output_t, *phwrm_mfg_prvsn_export_cert_output_t;
+
+/* hwrm_mfg_prvsn_export_cert_cmd_err (size:64b/8B) */
+
+typedef struct hwrm_mfg_prvsn_export_cert_cmd_err {
+ /*
+ * command specific error codes that goes to
+ * the cmd_err field in Common HWRM Error Response.
+ */
+ uint8_t code;
+ /* Unknown error. */
+ #define HWRM_MFG_PRVSN_EXPORT_CERT_CMD_ERR_CODE_UNKNOWN UINT32_C(0x0)
+ /* Slot invalid */
+ #define HWRM_MFG_PRVSN_EXPORT_CERT_CMD_ERR_CODE_SLOT_INVALID UINT32_C(0x1)
+ /*
+ * The provisioned certificates are invalid due to device ID change,
+ * NVRAM corruption or another reason.
+ */
+ #define HWRM_MFG_PRVSN_EXPORT_CERT_CMD_ERR_CODE_CERT_INVALID UINT32_C(0x2)
+ /* Host provided buffer is too small */
+ #define HWRM_MFG_PRVSN_EXPORT_CERT_CMD_ERR_CODE_BUFFER_LENGTH UINT32_C(0x3)
+ #define HWRM_MFG_PRVSN_EXPORT_CERT_CMD_ERR_CODE_LAST HWRM_MFG_PRVSN_EXPORT_CERT_CMD_ERR_CODE_BUFFER_LENGTH
+ uint8_t unused_0[7];
+} hwrm_mfg_prvsn_export_cert_cmd_err_t, *phwrm_mfg_prvsn_export_cert_cmd_err_t;
+
/********************************
* hwrm_mfg_get_nvm_measurement *
********************************/
@@ -77869,7 +92065,7 @@ typedef struct hwrm_mfg_get_nvm_measurement_input {
uint64_t resp_addr;
} hwrm_mfg_get_nvm_measurement_input_t, *phwrm_mfg_get_nvm_measurement_input_t;
-/* hwrm_mfg_get_nvm_measurement_output (size:448b/56B) */
+/* hwrm_mfg_get_nvm_measurement_output (size:704b/88B) */
typedef struct hwrm_mfg_get_nvm_measurement_output {
/* The specific error status for the command. */
@@ -77883,7 +92079,7 @@ typedef struct hwrm_mfg_get_nvm_measurement_output {
/* Flag indicating if the hash returned is valid. */
uint8_t hash_state;
/*
- * Measurement hash is invalid. There was an error
+ * Measurement hash is invalid. There was an error
* calculating the hash or firmware does not support NVM
* measurement.
*/
@@ -77901,13 +92097,22 @@ typedef struct hwrm_mfg_get_nvm_measurement_output {
/* Measurement is calculated in real time */
#define HWRM_MFG_GET_NVM_MEASUREMENT_OUTPUT_CALC_TIME_LIVE UINT32_C(0x1)
#define HWRM_MFG_GET_NVM_MEASUREMENT_OUTPUT_CALC_TIME_LAST HWRM_MFG_GET_NVM_MEASUREMENT_OUTPUT_CALC_TIME_LIVE
- uint8_t unused_0[6];
- /* Thirty two bytes HMAC SHA256 hash of NVM configuration. */
- uint8_t hash[32];
+ /* Flag indicating the hash type when hash_state is valid. */
+ uint8_t hash_type;
+ /* Measurement hash is SHA256(32 bytes). */
+ #define HWRM_MFG_GET_NVM_MEASUREMENT_OUTPUT_HASH_TYPE_SHA256 UINT32_C(0x0)
+ /* Measurement hash is SHA384(48 bytes). */
+ #define HWRM_MFG_GET_NVM_MEASUREMENT_OUTPUT_HASH_TYPE_SHA384 UINT32_C(0x1)
+ /* Measurement hash is SHA512(64 bytes). */
+ #define HWRM_MFG_GET_NVM_MEASUREMENT_OUTPUT_HASH_TYPE_SHA512 UINT32_C(0x2)
+ #define HWRM_MFG_GET_NVM_MEASUREMENT_OUTPUT_HASH_TYPE_LAST HWRM_MFG_GET_NVM_MEASUREMENT_OUTPUT_HASH_TYPE_SHA512
+ uint8_t unused_0[5];
+ /* NVM configuration hash with length indicated by hash_type. */
+ uint8_t hash[64];
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -77998,7 +92203,7 @@ typedef struct hwrm_mfg_psoc_qstatus_output {
uint8_t unused_2[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
+ * is completely written to RAM. This field should be read as '1'
* to indicate that the output has been completely written.
* When writing a command completion or response to an internal
* processor, the order of writes has to be such that this field is
@@ -78097,9 +92302,9 @@ typedef struct hwrm_mfg_selftest_qlist_output {
uint8_t unused_2[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -78249,9 +92454,9 @@ typedef struct hwrm_mfg_selftest_exec_output {
uint8_t unused_1[3];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -78293,8 +92498,29 @@ typedef struct hwrm_oem_cmd_input {
* point to a physically contiguous block of memory.
*/
uint64_t resp_addr;
- uint32_t IANA;
- uint32_t unused_0;
+ /*
+ * The organization owning the message format. Set this field
+ * to 0x14e4 when used for Broadcom internal use when
+ * the naming authority is set to PCI_SIG.
+ */
+ uint32_t oem_id;
+ /* The naming authority used for setting the oem_id. */
+ uint8_t naming_authority;
+ /* Invalid naming authority */
+ #define HWRM_OEM_CMD_INPUT_NAMING_AUTHORITY_INVALID UINT32_C(0x0)
+ /* PCI_SIG naming authority numbering is used */
+ #define HWRM_OEM_CMD_INPUT_NAMING_AUTHORITY_PCI_SIG UINT32_C(0x1)
+ #define HWRM_OEM_CMD_INPUT_NAMING_AUTHORITY_LAST HWRM_OEM_CMD_INPUT_NAMING_AUTHORITY_PCI_SIG
+ /* The message family within the organization. */
+ uint8_t message_family;
+ /* Invalid message family */
+ #define HWRM_OEM_CMD_INPUT_MESSAGE_FAMILY_INVALID UINT32_C(0x0)
+ /* This message is targeted for Truflow */
+ #define HWRM_OEM_CMD_INPUT_MESSAGE_FAMILY_TRUFLOW UINT32_C(0x1)
+ /* This message is targeted for RoCE */
+ #define HWRM_OEM_CMD_INPUT_MESSAGE_FAMILY_ROCE UINT32_C(0x2)
+ #define HWRM_OEM_CMD_INPUT_MESSAGE_FAMILY_LAST HWRM_OEM_CMD_INPUT_MESSAGE_FAMILY_ROCE
+ uint16_t unused;
/* This field contains the vendor specific command data. */
uint32_t oem_data[26];
} hwrm_oem_cmd_input_t, *phwrm_oem_cmd_input_t;
@@ -78310,16 +92536,21 @@ typedef struct hwrm_oem_cmd_output {
uint16_t seq_id;
/* The length of the response data in number of bytes. */
uint16_t resp_len;
- uint32_t IANA;
- uint32_t unused_0;
+ /* The organization owning the message format. */
+ uint32_t oem_id;
+ /* The naming authority used for setting the oem_id. */
+ uint8_t naming_authority;
+ /* The message family within the organization. */
+ uint8_t message_family;
+ uint16_t unused;
/* This field contains the vendor specific response data. */
uint32_t oem_data[18];
uint8_t unused_1[7];
/*
* This field is used in Output records to indicate that the output
- * is completely written to RAM. This field should be read as '1'
- * to indicate that the output has been completely written.
- * When writing a command completion or response to an internal processor,
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
* the order of writes has to be such that this field is written last.
*/
uint8_t valid;
@@ -78378,4 +92609,820 @@ typedef struct hwrm_sv_output {
uint32_t opaque[32];
} hwrm_sv_output_t, *phwrm_sv_output_t;
+/*******************
+ * hwrm_udcc_qcaps *
+ *******************/
+
+
+/* hwrm_udcc_qcaps_input (size:128b/16B) */
+
+typedef struct hwrm_udcc_qcaps_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} hwrm_udcc_qcaps_input_t, *phwrm_udcc_qcaps_input_t;
+
+/* hwrm_udcc_qcaps_output (size:192b/24B) */
+
+typedef struct hwrm_udcc_qcaps_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This field represents guaranteed minimum number of UDCC sessions
+ * available to the function.
+ */
+ uint16_t min_sessions;
+ /*
+ * This field represents unguaranteed maximum number of UDCC sessions
+ * available to the function.
+ */
+ uint16_t max_sessions;
+ /*
+ * This value indicates the type of session being modified by the
+ * UDCC.
+ */
+ uint8_t session_type;
+ /* sessions are allocated on a per destination basis. */
+ #define HWRM_UDCC_QCAPS_OUTPUT_SESSION_TYPE_PER_DESTINATION UINT32_C(0x0)
+ /* sessions are allocated on a per QP basis. */
+ #define HWRM_UDCC_QCAPS_OUTPUT_SESSION_TYPE_PER_QP UINT32_C(0x1)
+ #define HWRM_UDCC_QCAPS_OUTPUT_SESSION_TYPE_LAST HWRM_UDCC_QCAPS_OUTPUT_SESSION_TYPE_PER_QP
+ uint8_t unused_0[3];
+ /*
+ * This field represents the maximum number of bytes of UDCC program
+ * configuration data that one hwrm_udcc_comp_cfg request or
+ * hwrm_udcc_comp_qcfg response can transfer.
+ * The value is determined by the UDCC firmware.
+ */
+ uint16_t max_comp_cfg_xfer;
+ /*
+ * This field represents the maximum number of bytes of UDCC program
+ * status or statistics data that one hwrm_udcc_comp_query response
+ * can transfer. The value is determined by the UDCC firmware.
+ */
+ uint16_t max_comp_data_xfer;
+ uint8_t unused_1[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_udcc_qcaps_output_t, *phwrm_udcc_qcaps_output_t;
+
+/*****************
+ * hwrm_udcc_cfg *
+ *****************/
+
+
+/* hwrm_udcc_cfg_input (size:192b/24B) */
+
+typedef struct hwrm_udcc_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /*
+ * This bit must be '1' for the udcc_mode field to be
+ * configured.
+ */
+ #define HWRM_UDCC_CFG_INPUT_ENABLES_UDCC_MODE UINT32_C(0x1)
+ /* UDCC mode for this function. */
+ uint8_t udcc_mode;
+ /* UDCC is not enabled. */
+ #define HWRM_UDCC_CFG_INPUT_UDCC_MODE_DISABLED UINT32_C(0x0)
+ /* UDCC is enabled. */
+ #define HWRM_UDCC_CFG_INPUT_UDCC_MODE_ENABLED UINT32_C(0x1)
+ #define HWRM_UDCC_CFG_INPUT_UDCC_MODE_LAST HWRM_UDCC_CFG_INPUT_UDCC_MODE_ENABLED
+ uint8_t unused_1[3];
+} hwrm_udcc_cfg_input_t, *phwrm_udcc_cfg_input_t;
+
+/* hwrm_udcc_cfg_output (size:128b/16B) */
+
+typedef struct hwrm_udcc_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_udcc_cfg_output_t, *phwrm_udcc_cfg_output_t;
+
+/******************
+ * hwrm_udcc_qcfg *
+ ******************/
+
+
+/* hwrm_udcc_qcfg_input (size:128b/16B) */
+
+typedef struct hwrm_udcc_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+} hwrm_udcc_qcfg_input_t, *phwrm_udcc_qcfg_input_t;
+
+/* hwrm_udcc_qcfg_output (size:128b/16B) */
+
+typedef struct hwrm_udcc_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* UDCC mode for this function. */
+ uint8_t udcc_mode;
+ uint8_t unused_1[6];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_udcc_qcfg_output_t, *phwrm_udcc_qcfg_output_t;
+
+/*************************
+ * hwrm_udcc_session_cfg *
+ *************************/
+
+
+/* hwrm_udcc_session_cfg_input (size:384b/48B) */
+
+typedef struct hwrm_udcc_session_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ uint32_t enables;
+ /* This bit must be '1' for the session_state to be configured. */
+ #define HWRM_UDCC_SESSION_CFG_INPUT_ENABLES_SESSION_STATE UINT32_C(0x1)
+ /* This bit must be '1' for the dest_mac to be configured. */
+ #define HWRM_UDCC_SESSION_CFG_INPUT_ENABLES_DEST_MAC UINT32_C(0x2)
+ /* This bit must be '1' for the src_mac to be configured. */
+ #define HWRM_UDCC_SESSION_CFG_INPUT_ENABLES_SRC_MAC UINT32_C(0x4)
+ /* This bit must be '1' for the tx_stats_record to be configured. */
+ #define HWRM_UDCC_SESSION_CFG_INPUT_ENABLES_TX_STATS_RECORD UINT32_C(0x8)
+ /* This bit must be '1' for the rx_stats_record to be configured. */
+ #define HWRM_UDCC_SESSION_CFG_INPUT_ENABLES_RX_STATS_RECORD UINT32_C(0x10)
+ /* State to configure for the session. */
+ uint8_t session_state;
+ /*
+ * This bit is set if the session is to be enabled and have firmware
+ * querying it for events. The bit is cleared if the session is to
+ * be disabled in firmware.
+ */
+ #define HWRM_UDCC_SESSION_CFG_INPUT_SESSION_STATE_ENABLED UINT32_C(0x1)
+ /* UDCC flow is not created in driver. */
+ #define HWRM_UDCC_SESSION_CFG_INPUT_SESSION_STATE_FLOW_NOT_CREATED UINT32_C(0x2)
+ /* UDCC flow is now deleted in driver. */
+ #define HWRM_UDCC_SESSION_CFG_INPUT_SESSION_STATE_FLOW_HAS_BEEN_DELETED UINT32_C(0x4)
+ uint8_t unused_1;
+ /* A handle for the session to be configured, if previously allocated. */
+ uint16_t session_id;
+ /* destination mac address used for the session. */
+ uint8_t dest_mac[6];
+ uint16_t unused_2;
+ /* source mac address used for the session. */
+ uint8_t src_mac[6];
+ uint16_t unused_3;
+ /*
+ * address for the tx flow statistics record to be sampled by the
+ * UDCC firmware. Session must be disabled to take effect.
+ */
+ uint32_t tx_stats_record;
+ /*
+ * address for the rx flow statistics record to be sampled by the
+ * UDCC firmware. Session must be disabled to take effect.
+ */
+ uint32_t rx_stats_record;
+} hwrm_udcc_session_cfg_input_t, *phwrm_udcc_session_cfg_input_t;
+
+/* hwrm_udcc_session_cfg_output (size:128b/16B) */
+
+typedef struct hwrm_udcc_session_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_udcc_session_cfg_output_t, *phwrm_udcc_session_cfg_output_t;
+
+/**************************
+ * hwrm_udcc_session_qcfg *
+ **************************/
+
+
+/* hwrm_udcc_session_qcfg_input (size:192b/24B) */
+
+typedef struct hwrm_udcc_session_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* A handle for the session to be queried, if previously allocated. */
+ uint16_t session_id;
+ uint8_t unused_0[6];
+} hwrm_udcc_session_qcfg_input_t, *phwrm_udcc_session_qcfg_input_t;
+
+/* hwrm_udcc_session_qcfg_output (size:512b/64B) */
+
+typedef struct hwrm_udcc_session_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* session_state specifying configuration of the session. */
+ uint8_t session_state;
+ /*
+ * This bit is set if the session is enabled and firmware is
+ * querying it for events. The bit is cleared if no querying
+ * should occur for this session.
+ */
+ #define HWRM_UDCC_SESSION_QCFG_OUTPUT_SESSION_STATE_ENABLED UINT32_C(0x1)
+ /* UDCC flow is not created in driver. */
+ #define HWRM_UDCC_SESSION_QCFG_OUTPUT_SESSION_STATE_FLOW_NOT_CREATED UINT32_C(0x2)
+ /* UDCC flow is now deleted in driver. */
+ #define HWRM_UDCC_SESSION_QCFG_OUTPUT_SESSION_STATE_FLOW_HAS_BEEN_DELETED UINT32_C(0x4)
+ uint8_t unused_0;
+ /* destination mac address used for the session. */
+ uint8_t dest_mac[6];
+ /*
+ * a 4 byte or 16 byte IP address, depending on whether the ip_type
+ * specifies IPv4 or IPv6. For IPv4 addresses, the first 4 bytes of the
+ * 16 byte field are used; the remaining 12 bytes are not used.
+ */
+ uint32_t dest_ip[4];
+ uint8_t unused_1[2];
+ /* source mac address used for the session. */
+ uint8_t src_mac[6];
+ /* source QP number used for the session. */
+ uint32_t src_qp_num;
+ /* destination QP number used for the session. */
+ uint32_t dest_qp_num;
+ /*
+ * address for the tx flow statistics record to be sampled by the
+ * UDCC firmware.
+ */
+ uint32_t tx_stats_record;
+ /*
+ * address for the rx flow statistics record to be sampled by the
+ * UDCC firmware.
+ */
+ uint32_t rx_stats_record;
+ uint8_t unused_2[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_udcc_session_qcfg_output_t, *phwrm_udcc_session_qcfg_output_t;
+
+/***************************
+ * hwrm_udcc_session_query *
+ ***************************/
+
+
+/* hwrm_udcc_session_query_input (size:192b/24B) */
+
+typedef struct hwrm_udcc_session_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /* A handle for the session to be queried, if previously allocated. */
+ uint16_t session_id;
+ uint8_t unused_0[6];
+} hwrm_udcc_session_query_input_t, *phwrm_udcc_session_query_input_t;
+
+/* hwrm_udcc_session_query_output (size:640b/80B) */
+
+typedef struct hwrm_udcc_session_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /* field for the minimum RTT value (in ns) for the session. */
+ uint32_t min_rtt_ns;
+ /* field for the maximum RTT value (in ns) for the session. */
+ uint32_t max_rtt_ns;
+ /*
+ * field for the current configured rate (in Mbps) for the
+ * session.
+ */
+ uint32_t cur_rate_mbps;
+ /*
+ * count for the number of events sent from FW to the UDCC
+ * program.
+ */
+ uint32_t tx_event_count;
+ /*
+ * count for the number of CNP events sent from FW to the UDCC
+ * program.
+ */
+ uint32_t cnp_rx_event_count;
+ /*
+ * count for the number of RTT request events received by the FW from
+ * the UDCC program.
+ */
+ uint32_t rtt_req_count;
+ /*
+ * count for the number of RTT response events sent by the FW to the
+ * UDCC program.
+ */
+ uint32_t rtt_resp_count;
+ /* count for the number of bytes transmitted for the session. */
+ uint32_t tx_bytes_count;
+ /* count for the number of packets transmitted for the session. */
+ uint32_t tx_packets_count;
+ /* count of initiator probes transmitted for the session. */
+ uint32_t init_probes_sent;
+ /* count of terminator probes received for the session. */
+ uint32_t term_probes_recv;
+ /* count of CNP packets received for the session. */
+ uint32_t cnp_packets_recv;
+ /* count of retransmission timeout events received for the session. */
+ uint32_t rto_event_recv;
+ /* count of sequence error NAK events received for the session. */
+ uint32_t seq_err_nak_recv;
+ /* the current number of qps associated with the session. */
+ uint32_t qp_count;
+ /* count for the number of Tx events detected for the session. */
+ uint32_t tx_event_detect_count;
+ uint8_t unused_1[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_udcc_session_query_output_t, *phwrm_udcc_session_query_output_t;
+
+/**********************
+ * hwrm_udcc_comp_cfg *
+ **********************/
+
+
+/* hwrm_udcc_comp_cfg_input (size:576b/72B) */
+
+typedef struct hwrm_udcc_comp_cfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This field holds the configuration arguments, which can be used
+ * to specify the context of the configuration data, e.g. type,
+ * session ID, etc. It is possible not all arg_buf are utilized.
+ * The format and meaning of the arguments are internal to
+ * the UDCC program.
+ */
+ uint8_t arg_buf[40];
+ /*
+ * This field specifies the number of bytes in arg_buf that are
+ * configuration arguments. It can be zero if there are no arguments.
+ */
+ uint32_t arg_len;
+ /*
+ * This field specifies the length of the configuration data
+ * stored in the host memory. The host driver shall guarantee
+ * this number is not greater than the maximum configuration
+ * transfer size that is specified by the max_comp_cfg_xfer
+ * field of hwrm_udcc_qcaps_output.
+ */
+ uint32_t cfg_len;
+ /*
+ * This field specifies the address of the host memory where
+ * the configuration data is stored. The format and meaning of
+ * the configuration data are internal to the UDCC program.
+ */
+ uint64_t cfg_host_addr;
+} hwrm_udcc_comp_cfg_input_t, *phwrm_udcc_comp_cfg_input_t;
+
+/* hwrm_udcc_comp_cfg_output (size:128b/16B) */
+
+typedef struct hwrm_udcc_comp_cfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ uint8_t unused_0[7];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_udcc_comp_cfg_output_t, *phwrm_udcc_comp_cfg_output_t;
+
+/***********************
+ * hwrm_udcc_comp_qcfg *
+ ***********************/
+
+
+/* hwrm_udcc_comp_qcfg_input (size:576b/72B) */
+
+typedef struct hwrm_udcc_comp_qcfg_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This field holds the query arguments, which can be used to
+ * specify the context of the query, e.g. type, session ID, etc.
+ * It is possible not all arg_buf are utilized.
+ * The format and meaning of the arguments are internal to
+ * the UDCC program.
+ */
+ uint8_t arg_buf[40];
+ /*
+ * This field specifies the number of bytes in arg_buf that are
+ * query arguments. It can be zero if there are no arguments.
+ */
+ uint32_t arg_len;
+ /*
+ * This field specifies the size of the buffer in the host memory
+ * for receiving the configuration data. The host driver shall
+ * guarantee the size of the buffer is not smaller than
+ * the maximum configuration transfer size that is specified by
+ * the max_comp_cfg_xfer field of hwrm_udcc_qcaps_output.
+ */
+ uint32_t cfg_host_buf_size;
+ /*
+ * This field specifies the address of the host memory where
+ * the queried configuration to be stored.
+ */
+ uint64_t cfg_host_addr;
+} hwrm_udcc_comp_qcfg_input_t, *phwrm_udcc_comp_qcfg_input_t;
+
+/* hwrm_udcc_comp_qcfg_output (size:128b/16B) */
+
+typedef struct hwrm_udcc_comp_qcfg_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This field specifies the length of configuration data transferred
+ * into the host memory. The amount of data transferred is up to
+ * the maximum configuration transfer size that is specified by
+ * the max_comp_cfg_xfer field of hwrm_udcc_qcaps_output.
+ */
+ uint32_t cfg_len;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_udcc_comp_qcfg_output_t, *phwrm_udcc_comp_qcfg_output_t;
+
+/************************
+ * hwrm_udcc_comp_query *
+ ************************/
+
+
+/* hwrm_udcc_comp_query_input (size:576b/72B) */
+
+typedef struct hwrm_udcc_comp_query_input {
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /*
+ * The completion ring to send the completion event on. This should
+ * be the NQ ID returned from the `nq_alloc` HWRM command.
+ */
+ uint16_t cmpl_ring;
+ /*
+ * The sequence ID is used by the driver for tracking multiple
+ * commands. This ID is treated as opaque data by the firmware and
+ * the value is returned in the `hwrm_resp_hdr` upon completion.
+ */
+ uint16_t seq_id;
+ /*
+ * The target ID of the command:
+ * * 0x0-0xFFF8 - The function ID
+ * * 0xFFF8-0xFFFC, 0xFFFE - Reserved for internal processors
+ * * 0xFFFD - Reserved for user-space HWRM interface
+ * * 0xFFFF - HWRM
+ */
+ uint16_t target_id;
+ /*
+ * A physical address pointer pointing to a host buffer that the
+ * command's response data will be written. This can be either a host
+ * physical address (HPA) or a guest physical address (GPA) and must
+ * point to a physically contiguous block of memory.
+ */
+ uint64_t resp_addr;
+ /*
+ * This field holds the query arguments, which can be used to
+ * specify the context of the query, e.g. type, session ID, etc.
+ * It is possible not all arg_buf are utilized.
+ * The format and meaning of the arguments are internal to
+ * the UDCC program.
+ */
+ uint8_t arg_buf[40];
+ /*
+ * This field specifies the number of bytes in arg_buf that are
+ * query arguments. It can be zero if there are no arguments.
+ */
+ uint32_t arg_len;
+ /*
+ * This field specifies the size of the buffer in the host memory
+ * for receiving the status or statistics data. The host driver
+ * shall guarantee the size of the buffer is not smaller than
+ * the maximum data transfer size that is specified by
+ * the max_comp_data_xfer field of hwrm_udcc_qcaps_output.
+ */
+ uint32_t data_host_buf_size;
+ /*
+ * This field specifies the address of the host memory where
+ * the queried data to be stored.
+ */
+ uint64_t data_host_addr;
+} hwrm_udcc_comp_query_input_t, *phwrm_udcc_comp_query_input_t;
+
+/* hwrm_udcc_comp_query_output (size:128b/16B) */
+
+typedef struct hwrm_udcc_comp_query_output {
+ /* The specific error status for the command. */
+ uint16_t error_code;
+ /* The HWRM command request type. */
+ uint16_t req_type;
+ /* The sequence ID from the original command. */
+ uint16_t seq_id;
+ /* The length of the response data in number of bytes. */
+ uint16_t resp_len;
+ /*
+ * This field specifies the length of status or statistics data
+ * transferred into the host memory. The amount of data transferred
+ * is up to the maximum data transfer size that is specified by
+ * the max_comp_data_xfer field of hwrm_udcc_qcaps_output.
+ */
+ uint32_t data_len;
+ uint8_t unused_0[3];
+ /*
+ * This field is used in Output records to indicate that the output
+ * is completely written to RAM. This field should be read as '1'
+ * to indicate that the output has been completely written. When
+ * writing a command completion or response to an internal processor,
+ * the order of writes has to be such that this field is written last.
+ */
+ uint8_t valid;
+} hwrm_udcc_comp_query_output_t, *phwrm_udcc_comp_query_output_t;
+
#endif /* _HSI_STRUCT_DEF_H_ */
diff --git a/sys/dev/bnxt/if_bnxt.c b/sys/dev/bnxt/bnxt_en/if_bnxt.c
index 9fd76301d6b3..471e26a4b252 100644
--- a/sys/dev/bnxt/if_bnxt.c
+++ b/sys/dev/bnxt/bnxt_en/if_bnxt.c
@@ -40,7 +40,6 @@
#include <machine/resource.h>
#include <dev/pci/pcireg.h>
-#include <dev/pci/pcivar.h>
#include <net/if.h>
#include <net/if_dl.h>
@@ -49,6 +48,15 @@
#include <net/ethernet.h>
#include <net/iflib.h>
+#define WANT_NATIVE_PCI_GET_SLOT
+#include <linux/pci.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/idr.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rcupdate.h>
#include "opt_inet.h"
#include "opt_inet6.h"
#include "opt_rss.h"
@@ -61,6 +69,8 @@
#include "bnxt_sysctl.h"
#include "hsi_struct_def.h"
#include "bnxt_mgmt.h"
+#include "bnxt_ulp.h"
+#include "bnxt_auxbus_compat.h"
/*
* PCI Device ID Table
@@ -134,8 +144,18 @@ static const pci_vendor_info_t bnxt_vendor_info_array[] =
"Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
PVID(BROADCOM_VENDOR_ID, BCM57504,
"Broadcom BCM57504 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
+ PVID(BROADCOM_VENDOR_ID, BCM57504_NPAR,
+ "Broadcom BCM57504 NetXtreme-E Ethernet Partition"),
PVID(BROADCOM_VENDOR_ID, BCM57502,
"Broadcom BCM57502 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet"),
+ PVID(BROADCOM_VENDOR_ID, BCM57608,
+ "Broadcom BCM57608 NetXtreme-E 25Gb/50Gb/100Gb/200Gb/400Gb Ethernet"),
+ PVID(BROADCOM_VENDOR_ID, BCM57604,
+ "Broadcom BCM57604 NetXtreme-E 25Gb/50Gb/100Gb/200Gb Ethernet"),
+ PVID(BROADCOM_VENDOR_ID, BCM57602,
+ "Broadcom BCM57602 NetXtreme-E 25Gb/50Gb Ethernet"),
+ PVID(BROADCOM_VENDOR_ID, BCM57601,
+ "Broadcom BCM57601 NetXtreme-E 25Gb/50Gb Ethernet"),
PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF1,
"Broadcom NetXtreme-C Ethernet Virtual Function"),
PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF2,
@@ -160,6 +180,8 @@ static const pci_vendor_info_t bnxt_vendor_info_array[] =
SLIST_HEAD(softc_list, bnxt_softc_list) pf_list;
int bnxt_num_pfs = 0;
+void
+process_nq(struct bnxt_softc *softc, uint16_t nqid);
static void *bnxt_register(device_t dev);
/* Soft queue setup and teardown */
@@ -216,7 +238,7 @@ static void bnxt_clear_ids(struct bnxt_softc *softc);
static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr);
static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr);
static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr);
-static void bnxt_def_cp_task(void *context);
+static void bnxt_def_cp_task(void *context, int pending);
static void bnxt_handle_async_event(struct bnxt_softc *softc,
struct cmpl_base *cmpl);
static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link);
@@ -225,7 +247,12 @@ static int bnxt_wol_config(if_ctx_t ctx);
static bool bnxt_if_needs_restart(if_ctx_t, enum iflib_restart_event);
static int bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c);
static void bnxt_get_port_module_status(struct bnxt_softc *softc);
+static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc);
+static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc);
+static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay);
+void bnxt_queue_sp_work(struct bnxt_softc *bp);
+void bnxt_fw_reset(struct bnxt_softc *bp);
/*
* Device Interface Declaration
*/
@@ -248,12 +275,38 @@ static driver_t bnxt_driver = {
DRIVER_MODULE(bnxt, pci, bnxt_driver, 0, 0);
-MODULE_DEPEND(bnxt, pci, 1, 1, 1);
-MODULE_DEPEND(bnxt, ether, 1, 1, 1);
-MODULE_DEPEND(bnxt, iflib, 1, 1, 1);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DEPEND(if_bnxt, pci, 1, 1, 1);
+MODULE_DEPEND(if_bnxt, ether, 1, 1, 1);
+MODULE_DEPEND(if_bnxt, iflib, 1, 1, 1);
+MODULE_DEPEND(if_bnxt, linuxkpi, 1, 1, 1);
+MODULE_VERSION(if_bnxt, 1);
IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array);
+void writel_fbsd(struct bnxt_softc *bp, u32, u8, u32);
+u32 readl_fbsd(struct bnxt_softc *bp, u32, u8);
+
+u32 readl_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx)
+{
+
+ if (!bar_idx)
+ return bus_space_read_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off);
+ else
+ return bus_space_read_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off);
+}
+
+void writel_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx, u32 val)
+{
+
+ if (!bar_idx)
+ bus_space_write_4(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off, htole32(val));
+ else
+ bus_space_write_4(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off, htole32(val));
+}
+
+static DEFINE_IDA(bnxt_aux_dev_ids);
+
static device_method_t bnxt_iflib_methods[] = {
DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc),
DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc),
@@ -303,7 +356,7 @@ static driver_t bnxt_iflib_driver = {
* iflib shared context
*/
-#define BNXT_DRIVER_VERSION "2.20.0.1"
+#define BNXT_DRIVER_VERSION "230.0.133.0"
const char bnxt_driver_version[] = BNXT_DRIVER_VERSION;
extern struct if_txrx bnxt_txrx;
static struct if_shared_ctx bnxt_sctx_init = {
@@ -331,14 +384,20 @@ static struct if_shared_ctx bnxt_sctx_init = {
.isc_ntxd_min = {16, 16, 16},
.isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2,
PAGE_SIZE / sizeof(struct tx_bd_short),
- PAGE_SIZE / sizeof(struct cmpl_base) * 2},
+ /* NQ depth 4096 */
+ PAGE_SIZE / sizeof(struct cmpl_base) * 16},
.isc_ntxd_max = {BNXT_MAX_TXD, BNXT_MAX_TXD, BNXT_MAX_TXD},
- .isc_admin_intrcnt = 1,
+ .isc_admin_intrcnt = BNXT_ROCE_IRQ_COUNT,
.isc_vendor_info = bnxt_vendor_info_array,
.isc_driver_version = bnxt_driver_version,
};
+#define PCI_SUBSYSTEM_ID 0x2e
+static struct workqueue_struct *bnxt_pf_wq;
+
+extern void bnxt_destroy_irq(struct bnxt_softc *softc);
+
/*
* Device Methods
*/
@@ -369,6 +428,18 @@ bnxt_nq_free(struct bnxt_softc *softc)
softc->nq_rings = NULL;
}
+
+static void
+bnxt_set_db_mask(struct bnxt_softc *bp, struct bnxt_ring *db,
+ u32 ring_type)
+{
+ if (BNXT_CHIP_P7(bp)) {
+ db->db_epoch_mask = db->db_ring_mask + 1;
+ db->db_epoch_shift = DBR_EPOCH_SFT - ilog2(db->db_epoch_mask);
+
+ }
+}
+
/*
* Device Dependent Configuration Functions
*/
@@ -384,7 +455,7 @@ bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
softc = iflib_get_softc(ctx);
- if (BNXT_CHIP_P5(softc)) {
+ if (BNXT_CHIP_P5_PLUS(softc)) {
bnxt_nq_alloc(softc, ntxqsets);
if (!softc->nq_rings) {
device_printf(iflib_get_dev(ctx),
@@ -429,28 +500,32 @@ bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
softc->tx_cp_rings[i].ring.idx = i;
softc->tx_cp_rings[i].ring.id =
(softc->scctx->isc_nrxqsets * 2) + 1 + i;
- softc->tx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
- DB_PF_OFFSET_P5: softc->tx_cp_rings[i].ring.id * 0x80;
+ softc->tx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
+ softc->legacy_db_size: softc->tx_cp_rings[i].ring.id * 0x80;
softc->tx_cp_rings[i].ring.ring_size =
softc->scctx->isc_ntxd[0];
+ softc->tx_cp_rings[i].ring.db_ring_mask =
+ softc->tx_cp_rings[i].ring.ring_size - 1;
softc->tx_cp_rings[i].ring.vaddr = vaddrs[i * ntxqs];
softc->tx_cp_rings[i].ring.paddr = paddrs[i * ntxqs];
+
/* Set up the TX ring */
softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
softc->tx_rings[i].softc = softc;
softc->tx_rings[i].idx = i;
softc->tx_rings[i].id =
(softc->scctx->isc_nrxqsets * 2) + 1 + i;
- softc->tx_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
- DB_PF_OFFSET_P5 : softc->tx_rings[i].id * 0x80;
+ softc->tx_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
+ softc->legacy_db_size : softc->tx_rings[i].id * 0x80;
softc->tx_rings[i].ring_size = softc->scctx->isc_ntxd[1];
+ softc->tx_rings[i].db_ring_mask = softc->tx_rings[i].ring_size - 1;
softc->tx_rings[i].vaddr = vaddrs[i * ntxqs + 1];
softc->tx_rings[i].paddr = paddrs[i * ntxqs + 1];
bnxt_create_tx_sysctls(softc, i);
- if (BNXT_CHIP_P5(softc)) {
+ if (BNXT_CHIP_P5_PLUS(softc)) {
/* Set up the Notification ring (NQ) */
softc->nq_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE;
softc->nq_rings[i].ring.phys_id =
@@ -458,11 +533,13 @@ bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
softc->nq_rings[i].ring.softc = softc;
softc->nq_rings[i].ring.idx = i;
softc->nq_rings[i].ring.id = i;
- softc->nq_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
- DB_PF_OFFSET_P5 : softc->nq_rings[i].ring.id * 0x80;
+ softc->nq_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
+ softc->legacy_db_size : softc->nq_rings[i].ring.id * 0x80;
softc->nq_rings[i].ring.ring_size = softc->scctx->isc_ntxd[2];
+ softc->nq_rings[i].ring.db_ring_mask = softc->nq_rings[i].ring.ring_size - 1;
softc->nq_rings[i].ring.vaddr = vaddrs[i * ntxqs + 2];
softc->nq_rings[i].ring.paddr = paddrs[i * ntxqs + 2];
+ softc->nq_rings[i].type = Q_TYPE_TX;
}
}
@@ -617,13 +694,16 @@ bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
softc->rx_cp_rings[i].ring.softc = softc;
softc->rx_cp_rings[i].ring.idx = i;
softc->rx_cp_rings[i].ring.id = i + 1;
- softc->rx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5(softc)) ?
- DB_PF_OFFSET_P5 : softc->rx_cp_rings[i].ring.id * 0x80;
+ softc->rx_cp_rings[i].ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
+ softc->legacy_db_size : softc->rx_cp_rings[i].ring.id * 0x80;
/*
* If this ring overflows, RX stops working.
*/
softc->rx_cp_rings[i].ring.ring_size =
softc->scctx->isc_nrxd[0];
+ softc->rx_cp_rings[i].ring.db_ring_mask =
+ softc->rx_cp_rings[i].ring.ring_size - 1;
+
softc->rx_cp_rings[i].ring.vaddr = vaddrs[i * nrxqs];
softc->rx_cp_rings[i].ring.paddr = paddrs[i * nrxqs];
@@ -632,9 +712,11 @@ bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
softc->rx_rings[i].softc = softc;
softc->rx_rings[i].idx = i;
softc->rx_rings[i].id = i + 1;
- softc->rx_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
- DB_PF_OFFSET_P5 : softc->rx_rings[i].id * 0x80;
+ softc->rx_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
+ softc->legacy_db_size : softc->rx_rings[i].id * 0x80;
softc->rx_rings[i].ring_size = softc->scctx->isc_nrxd[1];
+ softc->rx_rings[i].db_ring_mask =
+ softc->rx_rings[i].ring_size -1;
softc->rx_rings[i].vaddr = vaddrs[i * nrxqs + 1];
softc->rx_rings[i].paddr = paddrs[i * nrxqs + 1];
@@ -648,15 +730,15 @@ bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs,
"Unable to allocate space for TPA\n");
goto tpa_alloc_fail;
}
-
/* Allocate the AG ring */
softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE;
softc->ag_rings[i].softc = softc;
softc->ag_rings[i].idx = i;
softc->ag_rings[i].id = nrxqsets + i + 1;
- softc->ag_rings[i].doorbell = (BNXT_CHIP_P5(softc)) ?
- DB_PF_OFFSET_P5 : softc->ag_rings[i].id * 0x80;
+ softc->ag_rings[i].doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
+ softc->legacy_db_size : softc->ag_rings[i].id * 0x80;
softc->ag_rings[i].ring_size = softc->scctx->isc_nrxd[2];
+ softc->ag_rings[i].db_ring_mask = softc->ag_rings[i].ring_size - 1;
softc->ag_rings[i].vaddr = vaddrs[i * nrxqs + 2];
softc->ag_rings[i].paddr = paddrs[i * nrxqs + 2];
@@ -770,26 +852,43 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc *softc)
return rc;
}
-static void bnxt_free_ring(struct bnxt_softc *bp, struct bnxt_ring_mem_info *rmem)
+static void bnxt_free_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
{
- int i;
+ int i;
- for (i = 0; i < rmem->nr_pages; i++) {
- if (!rmem->pg_arr[i].idi_vaddr)
- continue;
+ for (i = 0; i < rmem->nr_pages; i++) {
+ if (!rmem->pg_arr[i].idi_vaddr)
+ continue;
iflib_dma_free(&rmem->pg_arr[i]);
- rmem->pg_arr[i].idi_vaddr = NULL;
- }
- if (rmem->pg_tbl.idi_vaddr) {
+ rmem->pg_arr[i].idi_vaddr = NULL;
+ }
+ if (rmem->pg_tbl.idi_vaddr) {
iflib_dma_free(&rmem->pg_tbl);
- rmem->pg_tbl.idi_vaddr = NULL;
+ rmem->pg_tbl.idi_vaddr = NULL;
+
+ }
+ if (rmem->vmem_size && *rmem->vmem) {
+ free(*rmem->vmem, M_DEVBUF);
+ *rmem->vmem = NULL;
+ }
+}
+
+static void bnxt_init_ctx_mem(struct bnxt_ctx_mem_type *ctxm, void *p, int len)
+{
+ u8 init_val = ctxm->init_value;
+ u16 offset = ctxm->init_offset;
+ u8 *p2 = p;
+ int i;
- }
- if (rmem->vmem_size && *rmem->vmem) {
- free(*rmem->vmem, M_DEVBUF);
- *rmem->vmem = NULL;
- }
+ if (!init_val)
+ return;
+ if (offset == BNXT_CTX_INIT_INVALID_OFFSET) {
+ memset(p, init_val, len);
+ return;
+ }
+ for (i = 0; i < len; i += ctxm->entry_size)
+ *(p2 + i + offset) = init_val;
}
static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *rmem)
@@ -820,8 +919,9 @@ static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *
if (rc)
return -ENOMEM;
- if (rmem->init_val)
- memset(rmem->pg_arr[i].idi_vaddr, rmem->init_val, rmem->page_size);
+ if (rmem->ctx_mem)
+ bnxt_init_ctx_mem(rmem->ctx_mem, rmem->pg_arr[i].idi_vaddr,
+ rmem->page_size);
if (rmem->nr_pages > 1 || rmem->depth > 0) {
if (i == rmem->nr_pages - 2 &&
@@ -844,11 +944,12 @@ static int bnxt_alloc_ring(struct bnxt_softc *softc, struct bnxt_ring_mem_info *
return 0;
}
-#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES \
+
+#define HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES \
(HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_QP | \
- HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ | \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_SRQ | \
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_CQ | \
- HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC | \
+ HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_VNIC | \
HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_STAT)
static int bnxt_alloc_ctx_mem_blk(struct bnxt_softc *softc,
@@ -866,14 +967,14 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt_softc *softc,
}
static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
- struct bnxt_ctx_pg_info *ctx_pg, uint32_t mem_size,
- uint8_t depth, bool use_init_val)
+ struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
+ u8 depth, struct bnxt_ctx_mem_type *ctxm)
{
struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
int rc;
if (!mem_size)
- return 0;
+ return -EINVAL;
ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) {
@@ -884,8 +985,8 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
int nr_tbls, i;
rmem->depth = 2;
- ctx_pg->ctx_pg_tbl = malloc(MAX_CTX_PAGES * sizeof(ctx_pg),
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ ctx_pg->ctx_pg_tbl = kzalloc(MAX_CTX_PAGES * sizeof(ctx_pg),
+ GFP_KERNEL);
if (!ctx_pg->ctx_pg_tbl)
return -ENOMEM;
nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES);
@@ -896,7 +997,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
for (i = 0; i < nr_tbls; i++) {
struct bnxt_ctx_pg_info *pg_tbl;
- pg_tbl = malloc(sizeof(*pg_tbl), M_DEVBUF, M_NOWAIT | M_ZERO);
+ pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL);
if (!pg_tbl)
return -ENOMEM;
ctx_pg->ctx_pg_tbl[i] = pg_tbl;
@@ -904,8 +1005,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
memcpy(&rmem->pg_tbl, &ctx_pg->ctx_arr[i], sizeof(struct iflib_dma_info));
rmem->depth = 1;
rmem->nr_pages = MAX_CTX_PAGES;
- if (use_init_val)
- rmem->init_val = softc->ctx_mem->ctx_kind_initializer;
+ rmem->ctx_mem = ctxm;
if (i == (nr_tbls - 1)) {
int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
@@ -920,8 +1020,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt_softc *softc,
rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
if (rmem->nr_pages > 1 || depth)
rmem->depth = 1;
- if (use_init_val)
- rmem->init_val = softc->ctx_mem->ctx_kind_initializer;
+ rmem->ctx_mem = ctxm;
rc = bnxt_alloc_ctx_mem_blk(softc, ctx_pg);
}
return rc;
@@ -949,51 +1048,131 @@ static void bnxt_free_ctx_pg_tbls(struct bnxt_softc *softc,
free(pg_tbl , M_DEVBUF);
ctx_pg->ctx_pg_tbl[i] = NULL;
}
- free(ctx_pg->ctx_pg_tbl , M_DEVBUF);
+ kfree(ctx_pg->ctx_pg_tbl);
ctx_pg->ctx_pg_tbl = NULL;
}
bnxt_free_ring(softc, rmem);
ctx_pg->nr_pages = 0;
}
+static int bnxt_setup_ctxm_pg_tbls(struct bnxt_softc *softc,
+ struct bnxt_ctx_mem_type *ctxm, u32 entries,
+ u8 pg_lvl)
+{
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, rc = 0, n = 1;
+ u32 mem_size;
+
+ if (!ctxm->entry_size || !ctx_pg)
+ return -EINVAL;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ if (ctxm->entry_multiple)
+ entries = roundup(entries, ctxm->entry_multiple);
+ entries = clamp_t(u32, entries, ctxm->min_entries, ctxm->max_entries);
+ mem_size = entries * ctxm->entry_size;
+ for (i = 0; i < n && !rc; i++) {
+ ctx_pg[i].entries = entries;
+ rc = bnxt_alloc_ctx_pg_tbls(softc, &ctx_pg[i], mem_size, pg_lvl,
+ ctxm->init_value ? ctxm : NULL);
+ }
+ if (!rc)
+ ctxm->mem_valid = 1;
+ return rc;
+}
+
static void bnxt_free_ctx_mem(struct bnxt_softc *softc)
{
struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
- int i;
+ u16 type;
if (!ctx)
return;
- if (ctx->tqm_mem[0]) {
- for (i = 0; i < softc->max_q + 1; i++) {
- if (!ctx->tqm_mem[i])
- continue;
- bnxt_free_ctx_pg_tbls(softc, ctx->tqm_mem[i]);
- }
- free(ctx->tqm_mem[0] , M_DEVBUF);
- ctx->tqm_mem[0] = NULL;
+ for (type = 0; type < BNXT_CTX_MAX; type++) {
+ struct bnxt_ctx_mem_type *ctxm = &ctx->ctx_arr[type];
+ struct bnxt_ctx_pg_info *ctx_pg = ctxm->pg_info;
+ int i, n = 1;
+
+ if (!ctx_pg)
+ continue;
+ if (ctxm->instance_bmap)
+ n = hweight32(ctxm->instance_bmap);
+ for (i = 0; i < n; i++)
+ bnxt_free_ctx_pg_tbls(softc, &ctx_pg[i]);
+
+ kfree(ctx_pg);
+ ctxm->pg_info = NULL;
}
- bnxt_free_ctx_pg_tbls(softc, &ctx->tim_mem);
- bnxt_free_ctx_pg_tbls(softc, &ctx->mrav_mem);
- bnxt_free_ctx_pg_tbls(softc, &ctx->stat_mem);
- bnxt_free_ctx_pg_tbls(softc, &ctx->vnic_mem);
- bnxt_free_ctx_pg_tbls(softc, &ctx->cq_mem);
- bnxt_free_ctx_pg_tbls(softc, &ctx->srq_mem);
- bnxt_free_ctx_pg_tbls(softc, &ctx->qp_mem);
ctx->flags &= ~BNXT_CTX_FLAG_INITED;
- free(softc->ctx_mem, M_DEVBUF);
+ kfree(ctx);
softc->ctx_mem = NULL;
}
+static int
+bnxt_backing_store_cfg_v2(struct bnxt_softc *softc, u32 ena)
+{
+ struct bnxt_ctx_mem_info *ctx = softc->ctx_mem;
+ struct bnxt_ctx_mem_type *ctxm;
+ u16 last_type = BNXT_CTX_INV;
+ int rc = 0;
+ u16 type;
+
+ if (BNXT_PF(softc)) {
+ for (type = BNXT_CTX_SRT_TRACE; type <= BNXT_CTX_ROCE_HWRM_TRACE; type++) {
+ ctxm = &ctx->ctx_arr[type];
+ if (!(ctxm->flags & BNXT_CTX_MEM_TYPE_VALID))
+ continue;
+ rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
+ if (rc) {
+ device_printf(softc->dev, "Unable to setup ctx page for type:0x%x.\n", type);
+ rc = 0;
+ continue;
+ }
+ /* ckp TODO: this is trace buffer related stuff, so keeping it diabled now. needs revisit */
+ //bnxt_bs_trace_init(bp, ctxm, type - BNXT_CTX_SRT_TRACE);
+ last_type = type;
+ }
+ }
+
+ if (last_type == BNXT_CTX_INV) {
+ if (!ena)
+ return 0;
+ else if (ena & HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM)
+ last_type = BNXT_CTX_MAX - 1;
+ else
+ last_type = BNXT_CTX_L2_MAX - 1;
+ }
+ ctx->ctx_arr[last_type].last = 1;
+
+ for (type = 0 ; type < BNXT_CTX_V2_MAX; type++) {
+ ctxm = &ctx->ctx_arr[type];
+
+ if (!ctxm->mem_valid)
+ continue;
+ rc = bnxt_hwrm_func_backing_store_cfg_v2(softc, ctxm, ctxm->last);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc)
{
struct bnxt_ctx_pg_info *ctx_pg;
+ struct bnxt_ctx_mem_type *ctxm;
struct bnxt_ctx_mem_info *ctx;
- uint32_t mem_size, ena, entries;
+ u32 l2_qps, qp1_qps, max_qps;
+ u32 ena, entries_sp, entries;
+ u32 srqs, max_srqs, min;
+ u32 num_mr, num_ah;
+ u32 extra_srqs = 0;
+ u32 extra_qps = 0;
+ u8 pg_lvl = 1;
int i, rc;
- if (!BNXT_CHIP_P5(softc))
+ if (!BNXT_CHIP_P5_PLUS(softc))
return 0;
rc = bnxt_hwrm_func_backing_store_qcaps(softc);
@@ -1006,97 +1185,115 @@ static int bnxt_alloc_ctx_mem(struct bnxt_softc *softc)
if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
return 0;
- ctx_pg = &ctx->qp_mem;
- ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
- (1024 * 64); /* FIXME: Enable 64K QPs */
- mem_size = ctx->qp_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
+ ena = 0;
+ if (BNXT_VF(softc))
+ goto skip_legacy;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ l2_qps = ctxm->qp_l2_entries;
+ qp1_qps = ctxm->qp_qp1_entries;
+ max_qps = ctxm->max_entries;
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ srqs = ctxm->srq_l2_entries;
+ max_srqs = ctxm->max_entries;
+ if (softc->flags & BNXT_FLAG_ROCE_CAP) {
+ pg_lvl = 2;
+ extra_qps = min_t(u32, 65536, max_qps - l2_qps - qp1_qps);
+ extra_srqs = min_t(u32, 8192, max_srqs - srqs);
+ }
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_QP];
+ rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps,
+ pg_lvl);
if (rc)
return rc;
- ctx_pg = &ctx->srq_mem;
- /* FIXME: Temporarily enable 8K RoCE SRQs */
- ctx_pg->entries = ctx->srq_max_l2_entries + (1024 * 8);
- mem_size = ctx->srq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_SRQ];
+ rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, srqs + extra_srqs, pg_lvl);
if (rc)
return rc;
- ctx_pg = &ctx->cq_mem;
- /* FIXME: Temporarily enable 64K RoCE CQ */
- ctx_pg->entries = ctx->cq_max_l2_entries + (1024 * 64 * 2);
- mem_size = ctx->cq_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_CQ];
+ rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->cq_l2_entries +
+ extra_qps * 2, pg_lvl);
if (rc)
return rc;
- ctx_pg = &ctx->vnic_mem;
- ctx_pg->entries = ctx->vnic_max_vnic_entries +
- ctx->vnic_max_ring_table_entries;
- mem_size = ctx->vnic_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 1, true);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_VNIC];
+ rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
if (rc)
return rc;
- ctx_pg = &ctx->stat_mem;
- ctx_pg->entries = ctx->stat_max_entries;
- mem_size = ctx->stat_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 1, true);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STAT];
+ rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, ctxm->max_entries, 1);
if (rc)
return rc;
- ctx_pg = &ctx->mrav_mem;
- /* FIXME: Temporarily enable 256K RoCE MRs */
- ctx_pg->entries = 1024 * 256;
- mem_size = ctx->mrav_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, true);
+ if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
+ goto skip_rdma;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_MRAV];
+ ctx_pg = ctxm->pg_info;
+ /* 128K extra is needed to accomodate static AH context
+ * allocation by f/w.
+ */
+ num_mr = min_t(u32, ctxm->max_entries / 2, 1024 * 256);
+ num_ah = min_t(u32, num_mr, 1024 * 128);
+ rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, num_mr + num_ah, 2);
if (rc)
return rc;
+ ctx_pg->entries = num_mr + num_ah;
ena = HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_MRAV;
+ if (ctxm->mrav_num_entries_units)
+ ctx_pg->entries =
+ ((num_mr / ctxm->mrav_num_entries_units) << 16) |
+ (num_ah / ctxm->mrav_num_entries_units);
- ctx_pg = &ctx->tim_mem;
- /* Firmware needs number of TIM entries equal to
- * number of Total QP contexts enabled, including
- * L2 QPs.
- */
- ctx_pg->entries = ctx->qp_min_qp1_entries +
- ctx->qp_max_l2_entries + 1024 * 64;
- /* FIXME: L2 driver is not able to create queue depth
- * worth of 1M 32bit timers. Need a fix when l2-roce
- * interface is well designed.
- */
- mem_size = ctx->tim_entry_size * ctx_pg->entries;
- rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, false);
+ ctxm = &ctx->ctx_arr[BNXT_CTX_TIM];
+ rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, l2_qps + qp1_qps + extra_qps, 1);
if (rc)
return rc;
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TIM;
- /* FIXME: Temporarily increase the TQM queue depth
- * by 1K for 1K RoCE QPs.
- */
- entries = ctx->qp_max_l2_entries + 1024 * 64;
- entries = roundup(entries, ctx->tqm_entries_multiple);
- entries = clamp_t(uint32_t, entries, ctx->tqm_min_entries_per_ring,
- ctx->tqm_max_entries_per_ring);
- for (i = 0; i < softc->max_q + 1; i++) {
- ctx_pg = ctx->tqm_mem[i];
- ctx_pg->entries = entries;
- mem_size = ctx->tqm_entry_size * entries;
- rc = bnxt_alloc_ctx_pg_tbls(softc, ctx_pg, mem_size, 2, false);
+skip_rdma:
+ ctxm = &ctx->ctx_arr[BNXT_CTX_STQM];
+ min = ctxm->min_entries;
+ entries_sp = ctx->ctx_arr[BNXT_CTX_VNIC].vnic_entries + l2_qps +
+ 2 * (extra_qps + qp1_qps) + min;
+ rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries_sp, 2);
if (rc)
return rc;
- ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
+
+ ctxm = &ctx->ctx_arr[BNXT_CTX_FTQM];
+ entries = l2_qps + 2 * (extra_qps + qp1_qps);
+ rc = bnxt_setup_ctxm_pg_tbls(softc, ctxm, entries, 2);
+ if (rc)
+ return rc;
+ for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
+ if (i < BNXT_MAX_TQM_LEGACY_RINGS)
+ ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_SP << i;
+ else
+ ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_ENABLES_TQM_RING8;
}
ena |= HWRM_FUNC_BACKING_STORE_CFG_INPUT_DFLT_ENABLES;
- rc = bnxt_hwrm_func_backing_store_cfg(softc, ena);
- if (rc)
+
+skip_legacy:
+ if (BNXT_CHIP_P7(softc)) {
+ if (softc->fw_cap & BNXT_FW_CAP_BACKING_STORE_V2)
+ rc = bnxt_backing_store_cfg_v2(softc, ena);
+ } else {
+ rc = bnxt_hwrm_func_backing_store_cfg(softc, ena);
+ }
+ if (rc) {
device_printf(softc->dev, "Failed configuring context mem, rc = %d.\n",
- rc);
- else
- ctx->flags |= BNXT_CTX_FLAG_INITED;
+ rc);
+ return rc;
+ }
+ ctx->flags |= BNXT_CTX_FLAG_INITED;
return 0;
}
+
/*
* If we update the index, a write barrier is needed after the write to ensure
* the completion ring has space before the RX/TX ring does. Since we can't
@@ -1239,6 +1436,141 @@ static void bnxt_thor_db_nq(void *db_ptr, bool enable_irq)
BUS_SPACE_BARRIER_WRITE);
}
+static void
+bnxt_thor2_db_rx(void *db_ptr, uint16_t idx)
+{
+ struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
+ struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
+ uint64_t db_val;
+
+ if (idx >= ring->ring_size) {
+ device_printf(ring->softc->dev, "%s: BRCM DBG: idx: %d crossed boundary\n", __func__, idx);
+ return;
+ }
+
+ db_val = ((DBR_PATH_L2 | DBR_TYPE_SRQ | DBR_VALID | idx) |
+ ((uint64_t)ring->phys_id << DBR_XID_SFT));
+
+ /* Add the PI index */
+ db_val |= DB_RING_IDX(ring, idx, ring->epoch_arr[idx]);
+
+ bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
+ BUS_SPACE_BARRIER_WRITE);
+ bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
+ htole64(db_val));
+}
+
+static void
+bnxt_thor2_db_tx(void *db_ptr, uint16_t idx)
+{
+ struct bnxt_ring *ring = (struct bnxt_ring *) db_ptr;
+ struct bnxt_bar_info *db_bar = &ring->softc->doorbell_bar;
+ uint64_t db_val;
+
+ if (idx >= ring->ring_size) {
+ device_printf(ring->softc->dev, "%s: BRCM DBG: idx: %d crossed boundary\n", __func__, idx);
+ return;
+ }
+
+ db_val = ((DBR_PATH_L2 | DBR_TYPE_SQ | DBR_VALID | idx) |
+ ((uint64_t)ring->phys_id << DBR_XID_SFT));
+
+ /* Add the PI index */
+ db_val |= DB_RING_IDX(ring, idx, ring->epoch_arr[idx]);
+
+ bus_space_barrier(db_bar->tag, db_bar->handle, ring->doorbell, 8,
+ BUS_SPACE_BARRIER_WRITE);
+ bus_space_write_8(db_bar->tag, db_bar->handle, ring->doorbell,
+ htole64(db_val));
+}
+
+static void
+bnxt_thor2_db_rx_cq(void *db_ptr, bool enable_irq)
+{
+ struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
+ struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
+ u64 db_msg = { 0 };
+ uint32_t cons = cpr->raw_cons;
+ uint32_t toggle = 0;
+
+ if (cons == UINT32_MAX)
+ cons = 0;
+
+ if (enable_irq == true)
+ toggle = cpr->toggle;
+
+ db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
+ DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
+
+ if (enable_irq)
+ db_msg |= DBR_TYPE_CQ_ARMALL;
+ else
+ db_msg |= DBR_TYPE_CQ;
+
+ bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
+ BUS_SPACE_BARRIER_WRITE);
+ bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
+ htole64(*(uint64_t *)&db_msg));
+ bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
+ BUS_SPACE_BARRIER_WRITE);
+}
+
+static void
+bnxt_thor2_db_tx_cq(void *db_ptr, bool enable_irq)
+{
+ struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
+ struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
+ u64 db_msg = { 0 };
+ uint32_t cons = cpr->raw_cons;
+ uint32_t toggle = 0;
+
+ if (enable_irq == true)
+ toggle = cpr->toggle;
+
+ db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
+ DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
+
+ if (enable_irq)
+ db_msg |= DBR_TYPE_CQ_ARMALL;
+ else
+ db_msg |= DBR_TYPE_CQ;
+
+ bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
+ BUS_SPACE_BARRIER_WRITE);
+ bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
+ htole64(*(uint64_t *)&db_msg));
+ bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
+ BUS_SPACE_BARRIER_WRITE);
+}
+
+static void
+bnxt_thor2_db_nq(void *db_ptr, bool enable_irq)
+{
+ struct bnxt_cp_ring *cpr = (struct bnxt_cp_ring *) db_ptr;
+ struct bnxt_bar_info *db_bar = &cpr->ring.softc->doorbell_bar;
+ u64 db_msg = { 0 };
+ uint32_t cons = cpr->raw_cons;
+ uint32_t toggle = 0;
+
+ if (enable_irq == true)
+ toggle = cpr->toggle;
+
+ db_msg = DBR_PATH_L2 | ((u64)cpr->ring.phys_id << DBR_XID_SFT) | DBR_VALID |
+ DB_RING_IDX_CMP(&cpr->ring, cons) | DB_TOGGLE(toggle);
+
+ if (enable_irq)
+ db_msg |= DBR_TYPE_NQ_ARM;
+ else
+ db_msg |= DBR_TYPE_NQ_MASK;
+
+ bus_space_barrier(db_bar->tag, db_bar->handle, cpr->ring.doorbell, 8,
+ BUS_SPACE_BARRIER_WRITE);
+ bus_space_write_8(db_bar->tag, db_bar->handle, cpr->ring.doorbell,
+ htole64(*(uint64_t *)&db_msg));
+ bus_space_barrier(db_bar->tag, db_bar->handle, 0, db_bar->size,
+ BUS_SPACE_BARRIER_WRITE);
+}
+
struct bnxt_softc *bnxt_find_dev(uint32_t domain, uint32_t bus, uint32_t dev_fn, char *dev_name)
{
struct bnxt_softc_list *sc = NULL;
@@ -1261,6 +1593,702 @@ struct bnxt_softc *bnxt_find_dev(uint32_t domain, uint32_t bus, uint32_t dev_fn,
return NULL;
}
+
+static void bnxt_verify_asym_queues(struct bnxt_softc *softc)
+{
+ uint8_t i, lltc = 0;
+
+ if (!softc->max_lltc)
+ return;
+
+ /* Verify that lossless TX and RX queues are in the same index */
+ for (i = 0; i < softc->max_tc; i++) {
+ if (BNXT_LLQ(softc->tx_q_info[i].queue_profile) &&
+ BNXT_LLQ(softc->rx_q_info[i].queue_profile))
+ lltc++;
+ }
+ softc->max_lltc = min(softc->max_lltc, lltc);
+}
+
+static int bnxt_hwrm_poll(struct bnxt_softc *bp)
+{
+ struct hwrm_ver_get_output *resp =
+ (void *)bp->hwrm_cmd_resp.idi_vaddr;
+ struct hwrm_ver_get_input req = {0};
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET);
+
+ req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req.hwrm_intf_min = HWRM_VERSION_MINOR;
+ req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+
+ rc = _hwrm_send_message(bp, &req, sizeof(req));
+ if (rc)
+ return rc;
+
+ if (resp->flags & HWRM_VER_GET_OUTPUT_FLAGS_DEV_NOT_RDY)
+ rc = -EAGAIN;
+
+ return rc;
+}
+
+static void bnxt_rtnl_lock_sp(struct bnxt_softc *bp)
+{
+ /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
+ * set. If the device is being closed, bnxt_close() may be holding
+ * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
+ * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
+ */
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_lock();
+}
+
+static void bnxt_rtnl_unlock_sp(struct bnxt_softc *bp)
+{
+ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_unlock();
+}
+
+static void bnxt_fw_fatal_close(struct bnxt_softc *softc)
+{
+ bnxt_disable_intr(softc->ctx);
+ if (pci_is_enabled(softc->pdev))
+ pci_disable_device(softc->pdev);
+}
+
+static u32 bnxt_fw_health_readl(struct bnxt_softc *bp, int reg_idx)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg = fw_health->regs[reg_idx];
+ u32 reg_type, reg_off, val = 0;
+
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
+ reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
+ switch (reg_type) {
+ case BNXT_FW_HEALTH_REG_TYPE_CFG:
+ pci_read_config_dword(bp->pdev, reg_off, &val);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_GRC:
+ reg_off = fw_health->mapped_regs[reg_idx];
+ fallthrough;
+ case BNXT_FW_HEALTH_REG_TYPE_BAR0:
+ val = readl_fbsd(bp, reg_off, 0);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_BAR1:
+ val = readl_fbsd(bp, reg_off, 2);
+ break;
+ }
+ if (reg_idx == BNXT_FW_RESET_INPROG_REG)
+ val &= fw_health->fw_reset_inprog_reg_mask;
+ return val;
+}
+
+static void bnxt_fw_reset_close(struct bnxt_softc *bp)
+{
+ int i;
+ bnxt_ulp_stop(bp);
+ /* When firmware is in fatal state, quiesce device and disable
+ * bus master to prevent any potential bad DMAs before freeing
+ * kernel memory.
+ */
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
+ u16 val = 0;
+
+ val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
+ if (val == 0xffff) {
+ bp->fw_reset_min_dsecs = 0;
+ }
+ bnxt_fw_fatal_close(bp);
+ }
+
+ iflib_request_reset(bp->ctx);
+ bnxt_stop(bp->ctx);
+ bnxt_hwrm_func_drv_unrgtr(bp, false);
+
+ for (i = bp->nrxqsets-1; i>=0; i--) {
+ if (BNXT_CHIP_P5_PLUS(bp))
+ iflib_irq_free(bp->ctx, &bp->nq_rings[i].irq);
+ else
+ iflib_irq_free(bp->ctx, &bp->rx_cp_rings[i].irq);
+
+ }
+ if (pci_is_enabled(bp->pdev))
+ pci_disable_device(bp->pdev);
+ pci_disable_busmaster(bp->dev);
+ bnxt_free_ctx_mem(bp);
+}
+
+static bool is_bnxt_fw_ok(struct bnxt_softc *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ bool no_heartbeat = false, has_reset = false;
+ u32 val;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
+ if (val == fw_health->last_fw_heartbeat)
+ no_heartbeat = true;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ if (val != fw_health->last_fw_reset_cnt)
+ has_reset = true;
+
+ if (!no_heartbeat && has_reset)
+ return true;
+
+ return false;
+}
+
+void bnxt_fw_reset(struct bnxt_softc *bp)
+{
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
+ !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ int tmo;
+ set_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bnxt_fw_reset_close(bp);
+
+ if ((bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD)) {
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN;
+ tmo = HZ / 10;
+ } else {
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ tmo = bp->fw_reset_min_dsecs * HZ /10;
+ }
+ bnxt_queue_fw_reset_work(bp, tmo);
+ }
+ bnxt_rtnl_unlock_sp(bp);
+}
+
+static void bnxt_queue_fw_reset_work(struct bnxt_softc *bp, unsigned long delay)
+{
+ if (!(test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)))
+ return;
+
+ if (BNXT_PF(bp))
+ queue_delayed_work(bnxt_pf_wq, &bp->fw_reset_task, delay);
+ else
+ schedule_delayed_work(&bp->fw_reset_task, delay);
+}
+
+void bnxt_queue_sp_work(struct bnxt_softc *bp)
+{
+ if (BNXT_PF(bp))
+ queue_work(bnxt_pf_wq, &bp->sp_task);
+ else
+ schedule_work(&bp->sp_task);
+}
+
+static void bnxt_fw_reset_writel(struct bnxt_softc *bp, int reg_idx)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg = fw_health->fw_reset_seq_regs[reg_idx];
+ u32 val = fw_health->fw_reset_seq_vals[reg_idx];
+ u32 reg_type, reg_off, delay_msecs;
+
+ delay_msecs = fw_health->fw_reset_seq_delay_msec[reg_idx];
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(reg);
+ reg_off = BNXT_FW_HEALTH_REG_OFF(reg);
+ switch (reg_type) {
+ case BNXT_FW_HEALTH_REG_TYPE_CFG:
+ pci_write_config_dword(bp->pdev, reg_off, val);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_GRC:
+ writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4, 0, reg_off & BNXT_GRC_BASE_MASK);
+ reg_off = (reg_off & BNXT_GRC_OFFSET_MASK) + 0x2000;
+ fallthrough;
+ case BNXT_FW_HEALTH_REG_TYPE_BAR0:
+ writel_fbsd(bp, reg_off, 0, val);
+ break;
+ case BNXT_FW_HEALTH_REG_TYPE_BAR1:
+ writel_fbsd(bp, reg_off, 2, val);
+ break;
+ }
+ if (delay_msecs) {
+ pci_read_config_dword(bp->pdev, 0, &val);
+ msleep(delay_msecs);
+ }
+}
+
+static void bnxt_reset_all(struct bnxt_softc *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ int i, rc;
+
+ if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
+ bp->fw_reset_timestamp = jiffies;
+ return;
+ }
+
+ if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_HOST) {
+ for (i = 0; i < fw_health->fw_reset_seq_cnt; i++)
+ bnxt_fw_reset_writel(bp, i);
+ } else if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) {
+ struct hwrm_fw_reset_input req = {0};
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET);
+ req.target_id = htole16(HWRM_TARGET_ID_KONG);
+ req.embedded_proc_type = HWRM_FW_RESET_INPUT_EMBEDDED_PROC_TYPE_CHIP;
+ req.selfrst_status = HWRM_FW_RESET_INPUT_SELFRST_STATUS_SELFRSTASAP;
+ req.flags = HWRM_FW_RESET_INPUT_FLAGS_RESET_GRACEFUL;
+ rc = hwrm_send_message(bp, &req, sizeof(req));
+
+ if (rc != -ENODEV)
+ device_printf(bp->dev, "Unable to reset FW rc=%d\n", rc);
+ }
+ bp->fw_reset_timestamp = jiffies;
+}
+
+static int __bnxt_alloc_fw_health(struct bnxt_softc *bp)
+{
+ if (bp->fw_health)
+ return 0;
+
+ bp->fw_health = kzalloc(sizeof(*bp->fw_health), GFP_KERNEL);
+ if (!bp->fw_health)
+ return -ENOMEM;
+
+ mutex_init(&bp->fw_health->lock);
+ return 0;
+}
+
+static int bnxt_alloc_fw_health(struct bnxt_softc *bp)
+{
+ int rc;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) &&
+ !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ return 0;
+
+ rc = __bnxt_alloc_fw_health(bp);
+ if (rc) {
+ bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ return rc;
+ }
+
+ return 0;
+}
+
+static inline void __bnxt_map_fw_health_reg(struct bnxt_softc *bp, u32 reg)
+{
+ writel_fbsd(bp, BNXT_GRCPF_REG_WINDOW_BASE_OUT + BNXT_FW_HEALTH_WIN_MAP_OFF, 0, reg & BNXT_GRC_BASE_MASK);
+}
+
+static int bnxt_map_fw_health_regs(struct bnxt_softc *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg_base = 0xffffffff;
+ int i;
+
+ bp->fw_health->status_reliable = false;
+ bp->fw_health->resets_reliable = false;
+ /* Only pre-map the monitoring GRC registers using window 3 */
+ for (i = 0; i < 4; i++) {
+ u32 reg = fw_health->regs[i];
+
+ if (BNXT_FW_HEALTH_REG_TYPE(reg) != BNXT_FW_HEALTH_REG_TYPE_GRC)
+ continue;
+ if (reg_base == 0xffffffff)
+ reg_base = reg & BNXT_GRC_BASE_MASK;
+ if ((reg & BNXT_GRC_BASE_MASK) != reg_base)
+ return -ERANGE;
+ fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
+ }
+ bp->fw_health->status_reliable = true;
+ bp->fw_health->resets_reliable = true;
+ if (reg_base == 0xffffffff)
+ return 0;
+
+ __bnxt_map_fw_health_reg(bp, reg_base);
+ return 0;
+}
+
+static void bnxt_inv_fw_health_reg(struct bnxt_softc *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 reg_type;
+
+ if (!fw_health)
+ return;
+
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
+ if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
+ fw_health->status_reliable = false;
+
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
+ if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
+ fw_health->resets_reliable = false;
+}
+
+static int bnxt_hwrm_error_recovery_qcfg(struct bnxt_softc *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ struct hwrm_error_recovery_qcfg_output *resp =
+ (void *)bp->hwrm_cmd_resp.idi_vaddr;
+ struct hwrm_error_recovery_qcfg_input req = {0};
+ int rc, i;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG);
+ rc = _hwrm_send_message(bp, &req, sizeof(req));
+
+ if (rc)
+ goto err_recovery_out;
+ fw_health->flags = le32toh(resp->flags);
+ if ((fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU) &&
+ !(bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL)) {
+ rc = -EINVAL;
+ goto err_recovery_out;
+ }
+ fw_health->polling_dsecs = le32toh(resp->driver_polling_freq);
+ fw_health->master_func_wait_dsecs =
+ le32toh(resp->master_func_wait_period);
+ fw_health->normal_func_wait_dsecs =
+ le32toh(resp->normal_func_wait_period);
+ fw_health->post_reset_wait_dsecs =
+ le32toh(resp->master_func_wait_period_after_reset);
+ fw_health->post_reset_max_wait_dsecs =
+ le32toh(resp->max_bailout_time_after_reset);
+ fw_health->regs[BNXT_FW_HEALTH_REG] =
+ le32toh(resp->fw_health_status_reg);
+ fw_health->regs[BNXT_FW_HEARTBEAT_REG] =
+ le32toh(resp->fw_heartbeat_reg);
+ fw_health->regs[BNXT_FW_RESET_CNT_REG] =
+ le32toh(resp->fw_reset_cnt_reg);
+ fw_health->regs[BNXT_FW_RESET_INPROG_REG] =
+ le32toh(resp->reset_inprogress_reg);
+ fw_health->fw_reset_inprog_reg_mask =
+ le32toh(resp->reset_inprogress_reg_mask);
+ fw_health->fw_reset_seq_cnt = resp->reg_array_cnt;
+ if (fw_health->fw_reset_seq_cnt >= 16) {
+ rc = -EINVAL;
+ goto err_recovery_out;
+ }
+ for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) {
+ fw_health->fw_reset_seq_regs[i] =
+ le32toh(resp->reset_reg[i]);
+ fw_health->fw_reset_seq_vals[i] =
+ le32toh(resp->reset_reg_val[i]);
+ fw_health->fw_reset_seq_delay_msec[i] =
+ le32toh(resp->delay_after_reset[i]);
+ }
+err_recovery_out:
+ if (!rc)
+ rc = bnxt_map_fw_health_regs(bp);
+ if (rc)
+ bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
+ return rc;
+}
+
+static int bnxt_drv_rgtr(struct bnxt_softc *bp)
+{
+ int rc;
+
+ /* determine whether we can support error recovery before
+ * registering with FW
+ */
+ if (bnxt_alloc_fw_health(bp)) {
+ device_printf(bp->dev, "no memory for firmware error recovery\n");
+ } else {
+ rc = bnxt_hwrm_error_recovery_qcfg(bp);
+ if (rc)
+ device_printf(bp->dev, "hwrm query error recovery failure rc: %d\n",
+ rc);
+ }
+ rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false); //sumit dbg: revisit the params
+ if (rc)
+ return -ENODEV;
+ return 0;
+}
+
+static bool bnxt_fw_reset_timeout(struct bnxt_softc *bp)
+{
+ return time_after(jiffies, bp->fw_reset_timestamp +
+ (bp->fw_reset_max_dsecs * HZ / 10));
+}
+
+static int bnxt_open(struct bnxt_softc *bp)
+{
+ int rc = 0;
+ if (BNXT_PF(bp))
+ rc = bnxt_hwrm_nvm_get_dev_info(bp, &bp->nvm_info->mfg_id,
+ &bp->nvm_info->device_id, &bp->nvm_info->sector_size,
+ &bp->nvm_info->size, &bp->nvm_info->reserved_size,
+ &bp->nvm_info->available_size);
+
+ /* Get the queue config */
+ rc = bnxt_hwrm_queue_qportcfg(bp, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
+ if (rc) {
+ device_printf(bp->dev, "reinit: hwrm qportcfg (tx) failed\n");
+ return rc;
+ }
+ if (bp->is_asym_q) {
+ rc = bnxt_hwrm_queue_qportcfg(bp,
+ HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
+ if (rc) {
+ device_printf(bp->dev, "re-init: hwrm qportcfg (rx) failed\n");
+ return rc;
+ }
+ bnxt_verify_asym_queues(bp);
+ } else {
+ bp->rx_max_q = bp->tx_max_q;
+ memcpy(bp->rx_q_info, bp->tx_q_info, sizeof(bp->rx_q_info));
+ memcpy(bp->rx_q_ids, bp->tx_q_ids, sizeof(bp->rx_q_ids));
+ }
+ /* Get the HW capabilities */
+ rc = bnxt_hwrm_func_qcaps(bp);
+ if (rc)
+ return rc;
+
+ /* Register the driver with the FW */
+ rc = bnxt_drv_rgtr(bp);
+ if (rc)
+ return rc;
+ if (bp->hwrm_spec_code >= 0x10803) {
+ rc = bnxt_alloc_ctx_mem(bp);
+ if (rc) {
+ device_printf(bp->dev, "attach: alloc_ctx_mem failed\n");
+ return rc;
+ }
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ if (!rc)
+ bp->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
+ }
+
+ if (BNXT_CHIP_P5_PLUS(bp))
+ bnxt_hwrm_reserve_pf_rings(bp);
+ /* Get the current configuration of this function */
+ rc = bnxt_hwrm_func_qcfg(bp);
+ if (rc) {
+ device_printf(bp->dev, "re-init: hwrm func qcfg failed\n");
+ return rc;
+ }
+
+ bnxt_msix_intr_assign(bp->ctx, 0);
+ bnxt_init(bp->ctx);
+ bnxt_intr_enable(bp->ctx);
+
+ if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ bnxt_ulp_start(bp, 0);
+ }
+ }
+
+ device_printf(bp->dev, "Network interface is UP and operational\n");
+
+ return rc;
+}
+static void bnxt_fw_reset_abort(struct bnxt_softc *bp, int rc)
+{
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
+ bnxt_ulp_start(bp, rc);
+ }
+ bp->fw_reset_state = 0;
+}
+
+static void bnxt_fw_reset_task(struct work_struct *work)
+{
+ struct bnxt_softc *bp = container_of(work, struct bnxt_softc, fw_reset_task.work);
+ int rc = 0;
+
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
+ device_printf(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n");
+ return;
+ }
+
+ switch (bp->fw_reset_state) {
+ case BNXT_FW_RESET_STATE_POLL_FW_DOWN: {
+ u32 val;
+
+ val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (!(val & BNXT_FW_STATUS_SHUTDOWN) &&
+ !bnxt_fw_reset_timeout(bp)) {
+ bnxt_queue_fw_reset_work(bp, HZ / 5);
+ return;
+ }
+
+ if (!bp->fw_health->primary) {
+ u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
+
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
+ return;
+ }
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
+ }
+ fallthrough;
+ case BNXT_FW_RESET_STATE_RESET_FW:
+ bnxt_reset_all(bp);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
+ return;
+ case BNXT_FW_RESET_STATE_ENABLE_DEV:
+ bnxt_inv_fw_health_reg(bp);
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
+ !bp->fw_reset_min_dsecs) {
+ u16 val;
+
+ val = pci_read_config(bp->dev, PCI_SUBSYSTEM_ID, 2);
+ if (val == 0xffff) {
+ if (bnxt_fw_reset_timeout(bp)) {
+ device_printf(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
+ rc = -ETIMEDOUT;
+ goto fw_reset_abort;
+ }
+ bnxt_queue_fw_reset_work(bp, HZ / 1000);
+ return;
+ }
+ }
+ clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
+ if (!pci_is_enabled(bp->pdev)) {
+ if (pci_enable_device(bp->pdev)) {
+ device_printf(bp->dev, "Cannot re-enable PCI device\n");
+ rc = -ENODEV;
+ goto fw_reset_abort;
+ }
+ }
+ pci_set_master(bp->pdev);
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW;
+ fallthrough;
+ case BNXT_FW_RESET_STATE_POLL_FW:
+ bp->hwrm_cmd_timeo = SHORT_HWRM_CMD_TIMEOUT;
+ rc = bnxt_hwrm_poll(bp);
+ if (rc) {
+ if (bnxt_fw_reset_timeout(bp)) {
+ device_printf(bp->dev, "Firmware reset aborted\n");
+ goto fw_reset_abort_status;
+ }
+ bnxt_queue_fw_reset_work(bp, HZ / 5);
+ return;
+ }
+ bp->hwrm_cmd_timeo = DFLT_HWRM_CMD_TIMEOUT;
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_OPENING;
+ fallthrough;
+ case BNXT_FW_RESET_STATE_OPENING:
+ rc = bnxt_open(bp);
+ if (rc) {
+ device_printf(bp->dev, "bnxt_open() failed during FW reset\n");
+ bnxt_fw_reset_abort(bp, rc);
+ rtnl_unlock();
+ return;
+ }
+
+ if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
+ bp->fw_health->enabled) {
+ bp->fw_health->last_fw_reset_cnt =
+ bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ }
+ bp->fw_reset_state = 0;
+ smp_mb__before_atomic();
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ bnxt_ulp_start(bp, 0);
+ clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ set_bit(BNXT_STATE_OPEN, &bp->state);
+ rtnl_unlock();
+ }
+ return;
+
+fw_reset_abort_status:
+ if (bp->fw_health->status_reliable ||
+ (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) {
+ u32 sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+
+ device_printf(bp->dev, "fw_health_status 0x%x\n", sts);
+ }
+fw_reset_abort:
+ rtnl_lock();
+ bnxt_fw_reset_abort(bp, rc);
+ rtnl_unlock();
+}
+
+static void bnxt_force_fw_reset(struct bnxt_softc *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+ u32 wait_dsecs;
+
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
+ test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ return;
+ bnxt_fw_reset_close(bp);
+ wait_dsecs = fw_health->master_func_wait_dsecs;
+ if (fw_health->primary) {
+ if (fw_health->flags & HWRM_ERROR_RECOVERY_QCFG_OUTPUT_FLAGS_CO_CPU)
+ wait_dsecs = 0;
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
+ } else {
+ bp->fw_reset_timestamp = jiffies + wait_dsecs * HZ / 10;
+ wait_dsecs = fw_health->normal_func_wait_dsecs;
+ bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
+ }
+
+ bp->fw_reset_min_dsecs = fw_health->post_reset_wait_dsecs;
+ bp->fw_reset_max_dsecs = fw_health->post_reset_max_wait_dsecs;
+ bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
+}
+
+static void bnxt_fw_exception(struct bnxt_softc *bp)
+{
+ device_printf(bp->dev, "Detected firmware fatal condition, initiating reset\n");
+ set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ bnxt_rtnl_lock_sp(bp);
+ bnxt_force_fw_reset(bp);
+ bnxt_rtnl_unlock_sp(bp);
+}
+
+static void __bnxt_fw_recover(struct bnxt_softc *bp)
+{
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
+ test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
+ bnxt_fw_reset(bp);
+ else
+ bnxt_fw_exception(bp);
+}
+
+static void bnxt_devlink_health_fw_report(struct bnxt_softc *bp)
+{
+ struct bnxt_fw_health *fw_health = bp->fw_health;
+
+ if (!fw_health)
+ return;
+
+ if (!fw_health->fw_reporter) {
+ __bnxt_fw_recover(bp);
+ return;
+ }
+}
+
+static void bnxt_sp_task(struct work_struct *work)
+{
+ struct bnxt_softc *bp = container_of(work, struct bnxt_softc, sp_task);
+
+ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ smp_mb__after_atomic();
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ return;
+ }
+
+ if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
+ test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
+ bnxt_devlink_health_fw_report(bp);
+ else
+ bnxt_fw_reset(bp);
+ }
+
+ if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
+ if (!is_bnxt_fw_ok(bp))
+ bnxt_devlink_health_fw_report(bp);
+ }
+ smp_mb__before_atomic();
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+}
+
/* Device setup and teardown */
static int
bnxt_attach_pre(if_ctx_t ctx)
@@ -1288,6 +2316,7 @@ bnxt_attach_pre(if_ctx_t ctx)
case BCM57414_NPAR2:
case BCM57416_NPAR1:
case BCM57416_NPAR2:
+ case BCM57504_NPAR:
softc->flags |= BNXT_FLAG_NPAR;
break;
case NETXTREME_C_VF1:
@@ -1300,7 +2329,6 @@ bnxt_attach_pre(if_ctx_t ctx)
break;
}
-#define PCI_DEVFN(device, func) ((((device) & 0x1f) << 3) | ((func) & 0x07))
softc->domain = pci_get_domain(softc->dev);
softc->bus = pci_get_bus(softc->dev);
softc->slot = pci_get_slot(softc->dev);
@@ -1315,8 +2343,24 @@ bnxt_attach_pre(if_ctx_t ctx)
pci_enable_busmaster(softc->dev);
- if (bnxt_pci_mapping(softc))
- return (ENXIO);
+ if (bnxt_pci_mapping(softc)) {
+ device_printf(softc->dev, "PCI mapping failed\n");
+ rc = ENXIO;
+ goto pci_map_fail;
+ }
+
+ softc->pdev = kzalloc(sizeof(*softc->pdev), GFP_KERNEL);
+ if (!softc->pdev) {
+ device_printf(softc->dev, "pdev alloc failed\n");
+ rc = -ENOMEM;
+ goto free_pci_map;
+ }
+
+ rc = linux_pci_attach_device(softc->dev, NULL, NULL, softc->pdev);
+ if (rc) {
+ device_printf(softc->dev, "Failed to attach Linux PCI device 0x%x\n", rc);
+ goto pci_attach_fail;
+ }
/* HWRM setup/init */
BNXT_HWRM_LOCK_INIT(softc, device_get_nameunit(softc->dev));
@@ -1356,22 +2400,37 @@ bnxt_attach_pre(if_ctx_t ctx)
if ((softc->ver_info->chip_num == BCM57508) ||
(softc->ver_info->chip_num == BCM57504) ||
- (softc->ver_info->chip_num == BCM57502))
+ (softc->ver_info->chip_num == BCM57504_NPAR) ||
+ (softc->ver_info->chip_num == BCM57502) ||
+ (softc->ver_info->chip_num == BCM57601) ||
+ (softc->ver_info->chip_num == BCM57602) ||
+ (softc->ver_info->chip_num == BCM57604))
softc->flags |= BNXT_FLAG_CHIP_P5;
+ if (softc->ver_info->chip_num == BCM57608)
+ softc->flags |= BNXT_FLAG_CHIP_P7;
+
softc->flags |= BNXT_FLAG_TPA;
- /* No TPA for Thor A0 */
- if (BNXT_CHIP_P5(softc) && (!softc->ver_info->chip_rev) &&
+ if (BNXT_CHIP_P5_PLUS(softc) && (!softc->ver_info->chip_rev) &&
(!softc->ver_info->chip_metal))
softc->flags &= ~BNXT_FLAG_TPA;
- /* TBD ++ Add TPA support from Thor B1 */
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
softc->flags &= ~BNXT_FLAG_TPA;
/* Get NVRAM info */
if (BNXT_PF(softc)) {
+ if (!bnxt_pf_wq) {
+ bnxt_pf_wq =
+ create_singlethread_workqueue("bnxt_pf_wq");
+ if (!bnxt_pf_wq) {
+ device_printf(softc->dev, "Unable to create workqueue.\n");
+ rc = -ENOMEM;
+ goto nvm_alloc_fail;
+ }
+ }
+
softc->nvm_info = malloc(sizeof(struct bnxt_nvram_info),
M_DEVBUF, M_NOWAIT | M_ZERO);
if (softc->nvm_info == NULL) {
@@ -1393,6 +2452,12 @@ bnxt_attach_pre(if_ctx_t ctx)
softc->db_ops.bnxt_db_rx_cq = bnxt_thor_db_rx_cq;
softc->db_ops.bnxt_db_tx_cq = bnxt_thor_db_tx_cq;
softc->db_ops.bnxt_db_nq = bnxt_thor_db_nq;
+ } else if (BNXT_CHIP_P7(softc)) {
+ softc->db_ops.bnxt_db_tx = bnxt_thor2_db_tx;
+ softc->db_ops.bnxt_db_rx = bnxt_thor2_db_rx;
+ softc->db_ops.bnxt_db_rx_cq = bnxt_thor2_db_rx_cq;
+ softc->db_ops.bnxt_db_tx_cq = bnxt_thor2_db_tx_cq;
+ softc->db_ops.bnxt_db_nq = bnxt_thor2_db_nq;
} else {
softc->db_ops.bnxt_db_tx = bnxt_cuw_db_tx;
softc->db_ops.bnxt_db_rx = bnxt_cuw_db_rx;
@@ -1400,25 +2465,39 @@ bnxt_attach_pre(if_ctx_t ctx)
softc->db_ops.bnxt_db_tx_cq = bnxt_cuw_db_cq;
}
- /* Register the driver with the FW */
- rc = bnxt_hwrm_func_drv_rgtr(softc);
- if (rc) {
- device_printf(softc->dev, "attach: hwrm drv rgtr failed\n");
- goto drv_rgtr_fail;
- }
-
- rc = bnxt_hwrm_func_rgtr_async_events(softc, NULL, 0);
- if (rc) {
- device_printf(softc->dev, "attach: hwrm rgtr async evts failed\n");
- goto drv_rgtr_fail;
- }
/* Get the queue config */
- rc = bnxt_hwrm_queue_qportcfg(softc);
+ rc = bnxt_hwrm_queue_qportcfg(softc, HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX);
if (rc) {
- device_printf(softc->dev, "attach: hwrm qportcfg failed\n");
+ device_printf(softc->dev, "attach: hwrm qportcfg (tx) failed\n");
goto failed;
}
+ if (softc->is_asym_q) {
+ rc = bnxt_hwrm_queue_qportcfg(softc,
+ HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
+ if (rc) {
+ device_printf(softc->dev, "attach: hwrm qportcfg (rx) failed\n");
+ return rc;
+ }
+ bnxt_verify_asym_queues(softc);
+ } else {
+ softc->rx_max_q = softc->tx_max_q;
+ memcpy(softc->rx_q_info, softc->tx_q_info, sizeof(softc->rx_q_info));
+ memcpy(softc->rx_q_ids, softc->tx_q_ids, sizeof(softc->rx_q_ids));
+ }
+
+ /* Get the HW capabilities */
+ rc = bnxt_hwrm_func_qcaps(softc);
+ if (rc)
+ goto failed;
+
+ /*
+ * Register the driver with the FW
+ * Register the async events with the FW
+ */
+ rc = bnxt_drv_rgtr(softc);
+ if (rc)
+ goto failed;
if (softc->hwrm_spec_code >= 0x10803) {
rc = bnxt_alloc_ctx_mem(softc);
@@ -1431,11 +2510,6 @@ bnxt_attach_pre(if_ctx_t ctx)
softc->flags |= BNXT_FLAG_FW_CAP_NEW_RM;
}
- /* Get the HW capabilities */
- rc = bnxt_hwrm_func_qcaps(softc);
- if (rc)
- goto failed;
-
/* Get the current configuration of this function */
rc = bnxt_hwrm_func_qcfg(softc);
if (rc) {
@@ -1467,7 +2541,7 @@ bnxt_attach_pre(if_ctx_t ctx)
/* Get the queue config */
bnxt_get_wol_settings(softc);
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
bnxt_hwrm_reserve_pf_rings(softc);
rc = bnxt_hwrm_func_qcfg(softc);
if (rc) {
@@ -1540,17 +2614,17 @@ bnxt_attach_pre(if_ctx_t ctx)
softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE;
softc->def_cp_ring.ring.softc = softc;
softc->def_cp_ring.ring.id = 0;
- softc->def_cp_ring.ring.doorbell = (BNXT_CHIP_P5(softc)) ?
- DB_PF_OFFSET_P5 : softc->def_cp_ring.ring.id * 0x80;
+ softc->def_cp_ring.ring.doorbell = (BNXT_CHIP_P5_PLUS(softc)) ?
+ softc->legacy_db_size : softc->def_cp_ring.ring.id * 0x80;
softc->def_cp_ring.ring.ring_size = PAGE_SIZE /
sizeof(struct cmpl_base);
+ softc->def_cp_ring.ring.db_ring_mask = softc->def_cp_ring.ring.ring_size -1 ;
rc = iflib_dma_alloc(ctx,
sizeof(struct cmpl_base) * softc->def_cp_ring.ring.ring_size,
&softc->def_cp_ring_mem, 0);
softc->def_cp_ring.ring.vaddr = softc->def_cp_ring_mem.idi_vaddr;
softc->def_cp_ring.ring.paddr = softc->def_cp_ring_mem.idi_paddr;
- iflib_config_gtask_init(ctx, &softc->def_cp_task, bnxt_def_cp_task,
- "dflt_cp");
+ iflib_config_task_init(ctx, &softc->def_cp_task, bnxt_def_cp_task);
rc = bnxt_init_sysctl_ctx(softc);
if (rc)
@@ -1581,6 +2655,14 @@ bnxt_attach_pre(if_ctx_t ctx)
if (rc)
goto failed;
+ rc = bnxt_create_dcb_sysctls(softc);
+ if (rc)
+ goto failed;
+
+ set_bit(BNXT_STATE_OPEN, &softc->state);
+ INIT_WORK(&softc->sp_task, bnxt_sp_task);
+ INIT_DELAYED_WORK(&softc->fw_reset_task, bnxt_fw_reset_task);
+
/* Initialize the vlan list */
SLIST_INIT(&softc->vnic_info.vlan_tags);
softc->vnic_info.vlan_tag_list.idi_vaddr = NULL;
@@ -1593,7 +2675,6 @@ failed:
bnxt_free_sysctl_ctx(softc);
init_sysctl_failed:
bnxt_hwrm_func_drv_unrgtr(softc, false);
-drv_rgtr_fail:
if (BNXT_PF(softc))
free(softc->nvm_info, M_DEVBUF);
nvm_alloc_fail:
@@ -1605,7 +2686,14 @@ ver_alloc_fail:
bnxt_free_hwrm_dma_mem(softc);
dma_fail:
BNXT_HWRM_LOCK_DESTROY(softc);
+ if (softc->pdev)
+ linux_pci_detach_device(softc->pdev);
+pci_attach_fail:
+ kfree(softc->pdev);
+ softc->pdev = NULL;
+free_pci_map:
bnxt_pci_mapping_free(softc);
+pci_map_fail:
pci_disable_busmaster(softc->dev);
return (rc);
}
@@ -1617,6 +2705,7 @@ bnxt_attach_post(if_ctx_t ctx)
if_t ifp = iflib_get_ifp(ctx);
int rc;
+ softc->ifp = ifp;
bnxt_create_config_sysctls_post(softc);
/* Update link state etc... */
@@ -1626,6 +2715,7 @@ bnxt_attach_post(if_ctx_t ctx)
/* Needs to be done after probing the phy */
bnxt_create_ver_sysctls(softc);
+ ifmedia_removeall(softc->media);
bnxt_add_media_types(softc);
ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO);
@@ -1633,6 +2723,8 @@ bnxt_attach_post(if_ctx_t ctx)
ETHER_CRC_LEN;
softc->rx_buf_size = min(softc->scctx->isc_max_frame_size, BNXT_PAGE_SIZE);
+ bnxt_dcb_init(softc);
+ bnxt_rdma_aux_device_init(softc);
failed:
return rc;
@@ -1646,6 +2738,10 @@ bnxt_detach(if_ctx_t ctx)
struct bnxt_vlan_tag *tmp;
int i;
+ bnxt_rdma_aux_device_uninit(softc);
+ cancel_delayed_work_sync(&softc->fw_reset_task);
+ cancel_work_sync(&softc->sp_task);
+ bnxt_dcb_free(softc);
SLIST_REMOVE(&pf_list, &softc->list, bnxt_softc_list, next);
bnxt_num_pfs--;
bnxt_wol_config(ctx);
@@ -1655,10 +2751,9 @@ bnxt_detach(if_ctx_t ctx)
bnxt_free_ctx_mem(softc);
bnxt_clear_ids(softc);
iflib_irq_free(ctx, &softc->def_cp_ring.irq);
- iflib_config_gtask_deinit(&softc->def_cp_task);
/* We need to free() these here... */
for (i = softc->nrxqsets-1; i>=0; i--) {
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
iflib_irq_free(ctx, &softc->nq_rings[i].irq);
else
iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq);
@@ -1683,6 +2778,11 @@ bnxt_detach(if_ctx_t ctx)
bnxt_free_hwrm_short_cmd_req(softc);
BNXT_HWRM_LOCK_DESTROY(softc);
+ if (!bnxt_num_pfs && bnxt_pf_wq)
+ destroy_workqueue(bnxt_pf_wq);
+
+ if (softc->pdev)
+ linux_pci_detach_device(softc->pdev);
free(softc->state_bv, M_DEVBUF);
pci_disable_busmaster(softc->dev);
bnxt_pci_mapping_free(softc);
@@ -1759,7 +2859,7 @@ bnxt_hwrm_resource_free(struct bnxt_softc *softc)
if (rc)
goto fail;
- if (BNXT_CHIP_P5(softc)) {
+ if (BNXT_CHIP_P5_PLUS(softc)) {
rc = bnxt_hwrm_ring_free(softc,
HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
&softc->nq_rings[i].ring,
@@ -1782,7 +2882,7 @@ static void
bnxt_func_reset(struct bnxt_softc *softc)
{
- if (!BNXT_CHIP_P5(softc)) {
+ if (!BNXT_CHIP_P5_PLUS(softc)) {
bnxt_hwrm_func_reset(softc);
return;
}
@@ -1798,7 +2898,7 @@ bnxt_rss_grp_tbl_init(struct bnxt_softc *softc)
int i, j;
for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) {
- if (BNXT_CHIP_P5(softc)) {
+ if (BNXT_CHIP_P5_PLUS(softc)) {
rgt[i++] = htole16(softc->rx_rings[j].phys_id);
rgt[i] = htole16(softc->rx_cp_rings[j].ring.phys_id);
} else {
@@ -1836,6 +2936,77 @@ static void bnxt_get_port_module_status(struct bnxt_softc *softc)
}
}
+static void bnxt_aux_dev_free(struct bnxt_softc *softc)
+{
+ kfree(softc->aux_dev);
+ softc->aux_dev = NULL;
+}
+
+static struct bnxt_aux_dev *bnxt_aux_dev_init(struct bnxt_softc *softc)
+{
+ struct bnxt_aux_dev *bnxt_adev;
+
+ msleep(1000 * 2);
+ bnxt_adev = kzalloc(sizeof(*bnxt_adev), GFP_KERNEL);
+ if (!bnxt_adev)
+ return ERR_PTR(-ENOMEM);
+
+ return bnxt_adev;
+}
+
+static void bnxt_rdma_aux_device_uninit(struct bnxt_softc *softc)
+{
+ struct bnxt_aux_dev *bnxt_adev = softc->aux_dev;
+
+ /* Skip if no auxiliary device init was done. */
+ if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
+ return;
+
+ if (IS_ERR_OR_NULL(bnxt_adev))
+ return;
+
+ bnxt_rdma_aux_device_del(softc);
+
+ if (bnxt_adev->id >= 0)
+ ida_free(&bnxt_aux_dev_ids, bnxt_adev->id);
+
+ bnxt_aux_dev_free(softc);
+}
+
+static void bnxt_rdma_aux_device_init(struct bnxt_softc *softc)
+{
+ int rc;
+
+ if (!(softc->flags & BNXT_FLAG_ROCE_CAP))
+ return;
+
+ softc->aux_dev = bnxt_aux_dev_init(softc);
+ if (IS_ERR_OR_NULL(softc->aux_dev)) {
+ device_printf(softc->dev, "Failed to init auxiliary device for ROCE\n");
+ goto skip_aux_init;
+ }
+
+ softc->aux_dev->id = ida_alloc(&bnxt_aux_dev_ids, GFP_KERNEL);
+ if (softc->aux_dev->id < 0) {
+ device_printf(softc->dev, "ida alloc failed for ROCE auxiliary device\n");
+ bnxt_aux_dev_free(softc);
+ goto skip_aux_init;
+ }
+
+ msleep(1000 * 2);
+ /* If aux bus init fails, continue with netdev init. */
+ rc = bnxt_rdma_aux_device_add(softc);
+ if (rc) {
+ device_printf(softc->dev, "Failed to add auxiliary device for ROCE\n");
+ msleep(1000 * 2);
+ ida_free(&bnxt_aux_dev_ids, softc->aux_dev->id);
+ }
+ device_printf(softc->dev, "%s:%d Added auxiliary device (id %d) for ROCE \n",
+ __func__, __LINE__, softc->aux_dev->id);
+skip_aux_init:
+ return;
+}
+
/* Device configuration */
static void
bnxt_init(if_ctx_t ctx)
@@ -1845,7 +3016,7 @@ bnxt_init(if_ctx_t ctx)
int i;
int rc;
- if (!BNXT_CHIP_P5(softc)) {
+ if (!BNXT_CHIP_P5_PLUS(softc)) {
rc = bnxt_hwrm_func_reset(softc);
if (rc)
return;
@@ -1856,8 +3027,7 @@ bnxt_init(if_ctx_t ctx)
softc->is_dev_init = true;
bnxt_clear_ids(softc);
- // TBD -- Check if it is needed for Thor as well
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
goto skip_def_cp_ring;
/* Allocate the default completion ring */
softc->def_cp_ring.cons = UINT32_MAX;
@@ -1866,6 +3036,8 @@ bnxt_init(if_ctx_t ctx)
rc = bnxt_hwrm_ring_alloc(softc,
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
&softc->def_cp_ring.ring);
+ bnxt_set_db_mask(softc, &softc->def_cp_ring.ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
if (rc)
goto fail;
skip_def_cp_ring:
@@ -1876,15 +3048,18 @@ skip_def_cp_ring:
if (rc)
goto fail;
- if (BNXT_CHIP_P5(softc)) {
+ if (BNXT_CHIP_P5_PLUS(softc)) {
/* Allocate the NQ */
softc->nq_rings[i].cons = 0;
+ softc->nq_rings[i].raw_cons = 0;
softc->nq_rings[i].v_bit = 1;
softc->nq_rings[i].last_idx = UINT32_MAX;
bnxt_mark_cpr_invalid(&softc->nq_rings[i]);
rc = bnxt_hwrm_ring_alloc(softc,
HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ,
&softc->nq_rings[i].ring);
+ bnxt_set_db_mask(softc, &softc->nq_rings[i].ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ);
if (rc)
goto fail;
@@ -1892,21 +3067,27 @@ skip_def_cp_ring:
}
/* Allocate the completion ring */
softc->rx_cp_rings[i].cons = UINT32_MAX;
+ softc->rx_cp_rings[i].raw_cons = UINT32_MAX;
softc->rx_cp_rings[i].v_bit = 1;
softc->rx_cp_rings[i].last_idx = UINT32_MAX;
+ softc->rx_cp_rings[i].toggle = 0;
bnxt_mark_cpr_invalid(&softc->rx_cp_rings[i]);
rc = bnxt_hwrm_ring_alloc(softc,
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
&softc->rx_cp_rings[i].ring);
+ bnxt_set_db_mask(softc, &softc->rx_cp_rings[i].ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
if (rc)
goto fail;
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
/* Allocate the RX ring */
rc = bnxt_hwrm_ring_alloc(softc,
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, &softc->rx_rings[i]);
+ bnxt_set_db_mask(softc, &softc->rx_rings[i],
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_RX);
if (rc)
goto fail;
softc->db_ops.bnxt_db_rx(&softc->rx_rings[i], 0);
@@ -1915,6 +3096,8 @@ skip_def_cp_ring:
rc = bnxt_hwrm_ring_alloc(softc,
HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG,
&softc->ag_rings[i]);
+ bnxt_set_db_mask(softc, &softc->ag_rings[i],
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_RX_AGG);
if (rc)
goto fail;
softc->db_ops.bnxt_db_rx(&softc->ag_rings[i], 0);
@@ -1977,21 +3160,27 @@ skip_def_cp_ring:
/* Allocate the completion ring */
softc->tx_cp_rings[i].cons = UINT32_MAX;
+ softc->tx_cp_rings[i].raw_cons = UINT32_MAX;
softc->tx_cp_rings[i].v_bit = 1;
+ softc->tx_cp_rings[i].toggle = 0;
bnxt_mark_cpr_invalid(&softc->tx_cp_rings[i]);
rc = bnxt_hwrm_ring_alloc(softc,
HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL,
&softc->tx_cp_rings[i].ring);
+ bnxt_set_db_mask(softc, &softc->tx_cp_rings[i].ring,
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL);
if (rc)
goto fail;
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
softc->db_ops.bnxt_db_tx_cq(&softc->tx_cp_rings[i], 1);
/* Allocate the TX ring */
rc = bnxt_hwrm_ring_alloc(softc,
HWRM_RING_ALLOC_INPUT_RING_TYPE_TX,
&softc->tx_rings[i]);
+ bnxt_set_db_mask(softc, &softc->tx_rings[i],
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_TX);
if (rc)
goto fail;
softc->db_ops.bnxt_db_tx(&softc->tx_rings[i], 0);
@@ -2129,17 +3318,15 @@ bnxt_media_change(if_ctx_t ctx)
struct ifmedia *ifm = iflib_get_media(ctx);
struct ifmediareq ifmr;
int rc;
+ struct bnxt_link_info *link_info = &softc->link_info;
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return EINVAL;
- softc->link_info.req_signal_mode =
- HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
-
switch (IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_100_T:
- softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
- softc->link_info.req_link_speed =
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+ link_info->req_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB;
break;
case IFM_1000_KX:
@@ -2147,103 +3334,229 @@ bnxt_media_change(if_ctx_t ctx)
case IFM_1000_CX:
case IFM_1000_SX:
case IFM_1000_LX:
- softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
- softc->link_info.req_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB;
+
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+
+ if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB;
+
+ } else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_1GB) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_1GB;
+ link_info->force_speed2_nrz = true;
+ }
+
break;
+
case IFM_2500_KX:
case IFM_2500_T:
- softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
- softc->link_info.req_link_speed =
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+ link_info->req_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB;
break;
case IFM_10G_CR1:
case IFM_10G_KR:
case IFM_10G_LR:
case IFM_10G_SR:
- softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
- softc->link_info.req_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
+
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+
+ if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
+
+ } else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_10GB) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_10GB;
+ link_info->force_speed2_nrz = true;
+ }
+
break;
case IFM_20G_KR2:
- softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
- softc->link_info.req_link_speed =
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+ link_info->req_link_speed =
HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB;
break;
case IFM_25G_CR:
case IFM_25G_KR:
case IFM_25G_SR:
- softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
- softc->link_info.req_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB;
+ case IFM_25G_LR:
+
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+
+ if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB;
+
+ } else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_25GB) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_25GB;
+ link_info->force_speed2_nrz = true;
+ }
+
break;
+
case IFM_40G_CR4:
case IFM_40G_KR4:
case IFM_40G_LR4:
case IFM_40G_SR4:
case IFM_40G_XLAUI:
case IFM_40G_XLAUI_AC:
- softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
- softc->link_info.req_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
+
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+
+ if (link_info->support_speeds & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
+
+ } else if (link_info->support_speeds2 & HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_40GB) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_40GB;
+ link_info->force_speed2_nrz = true;
+ }
+
break;
+
case IFM_50G_CR2:
case IFM_50G_KR2:
+ case IFM_50G_KR4:
case IFM_50G_SR2:
- softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
- softc->link_info.req_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
+ case IFM_50G_LR2:
+
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+
+ if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
+
+ } else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB;
+ link_info->force_speed2_nrz = true;
+ }
+
break;
+
case IFM_50G_CP:
case IFM_50G_LR:
case IFM_50G_SR:
case IFM_50G_KR_PAM4:
- softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
- softc->link_info.req_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB;
- softc->link_info.req_signal_mode =
- HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
- softc->link_info.force_pam4_speed_set_by_user = true;
+
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+
+ if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_50G) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_50GB;
+ link_info->force_pam4_speed = true;
+
+ } else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_50GB_PAM4_56) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_50GB_PAM4_56;
+ link_info->force_pam4_56_speed2 = true;
+ }
+
break;
+
case IFM_100G_CR4:
case IFM_100G_KR4:
case IFM_100G_LR4:
case IFM_100G_SR4:
- softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
- softc->link_info.req_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
+ case IFM_100G_AUI4:
+
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+
+ if (link_info->support_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
+
+ } else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB;
+ link_info->force_speed2_nrz = true;
+ }
+
break;
+
case IFM_100G_CP2:
case IFM_100G_SR2:
- case IFM_100G_KR_PAM4:
case IFM_100G_KR2_PAM4:
- softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
- softc->link_info.req_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB;
- softc->link_info.req_signal_mode =
- HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
- softc->link_info.force_pam4_speed_set_by_user = true;
+ case IFM_100G_AUI2:
+
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+
+ if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_100G) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_100GB;
+ link_info->force_pam4_speed = true;
+
+ } else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_56) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_56;
+ link_info->force_pam4_56_speed2 = true;
+ }
+
+ break;
+
+ case IFM_100G_KR_PAM4:
+ case IFM_100G_CR_PAM4:
+ case IFM_100G_DR:
+ case IFM_100G_AUI2_AC:
+
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+
+ if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_100GB_PAM4_112) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_100GB_PAM4_112;
+ link_info->force_pam4_112_speed2 = true;
+ }
+
break;
+
case IFM_200G_SR4:
case IFM_200G_FR4:
case IFM_200G_LR4:
case IFM_200G_DR4:
case IFM_200G_CR4_PAM4:
case IFM_200G_KR4_PAM4:
- softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED;
- softc->link_info.req_link_speed =
- HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
- softc->link_info.force_pam4_speed_set_by_user = true;
- softc->link_info.req_signal_mode =
- HWRM_PORT_PHY_QCFG_OUTPUT_SIGNAL_MODE_PAM4;
+
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+
+ if (link_info->support_pam4_speeds & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_PAM4_SPEEDS_200G) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_PAM4_LINK_SPEED_200GB;
+ link_info->force_pam4_speed = true;
+
+ } else if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_56) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_56;
+ link_info->force_pam4_56_speed2 = true;
+ }
+
+ break;
+
+ case IFM_200G_AUI4:
+
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+
+ if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_200GB_PAM4_112) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_200GB_PAM4_112;
+ link_info->force_pam4_112_speed2 = true;
+ }
+
break;
+
+ case IFM_400G_FR8:
+ case IFM_400G_LR8:
+ case IFM_400G_AUI8:
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+
+ if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_56) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_56;
+ link_info->force_pam4_56_speed2 = true;
+ }
+
+ break;
+
+ case IFM_400G_AUI8_AC:
+ case IFM_400G_DR4:
+ link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+
+ if (link_info->support_speeds2 & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS2_400GB_PAM4_112) {
+ link_info->req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEEDS2_400GB_PAM4_112;
+ link_info->force_pam4_112_speed2 = true;
+ }
+
+ break;
+
case IFM_1000_T:
- softc->link_info.advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
- softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
+ link_info->advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
+ link_info->autoneg |= BNXT_AUTONEG_SPEED;
break;
case IFM_10G_T:
- softc->link_info.advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
- softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
+ link_info->advertising = HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
+ link_info->autoneg |= BNXT_AUTONEG_SPEED;
break;
default:
device_printf(softc->dev,
@@ -2251,9 +3564,10 @@ bnxt_media_change(if_ctx_t ctx)
/* Fall-through */
case IFM_AUTO:
// Auto
- softc->link_info.autoneg |= BNXT_AUTONEG_SPEED;
+ link_info->autoneg |= BNXT_AUTONEG_SPEED;
break;
}
+
rc = bnxt_hwrm_set_link_setting(softc, true, true, true);
bnxt_media_status(softc->ctx, &ifmr);
return rc;
@@ -2312,11 +3626,11 @@ bnxt_update_admin_status(if_ctx_t ctx)
bnxt_hwrm_port_qstats(softc);
- if (BNXT_CHIP_P5(softc) &&
+ if (BNXT_CHIP_P5_PLUS(softc) &&
(softc->flags & BNXT_FLAG_FW_CAP_EXT_STATS))
bnxt_hwrm_port_qstats_ext(softc);
- if (BNXT_CHIP_P5(softc)) {
+ if (BNXT_CHIP_P5_PLUS(softc)) {
struct ifmediareq ifmr;
if (bit_test(softc->state_bv, BNXT_STATE_LINK_CHANGE)) {
@@ -2349,10 +3663,11 @@ bnxt_do_enable_intr(struct bnxt_cp_ring *cpr)
{
struct bnxt_softc *softc = cpr->ring.softc;
+
if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
return;
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
softc->db_ops.bnxt_db_nq(cpr, 1);
else
softc->db_ops.bnxt_db_rx_cq(cpr, 1);
@@ -2366,7 +3681,7 @@ bnxt_do_disable_intr(struct bnxt_cp_ring *cpr)
if (cpr->ring.phys_id == (uint16_t)HWRM_NA_SIGNATURE)
return;
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
softc->db_ops.bnxt_db_nq(cpr, 0);
else
softc->db_ops.bnxt_db_rx_cq(cpr, 0);
@@ -2381,7 +3696,7 @@ bnxt_intr_enable(if_ctx_t ctx)
bnxt_do_enable_intr(&softc->def_cp_ring);
for (i = 0; i < softc->nrxqsets; i++)
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 1);
else
softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 1);
@@ -2395,7 +3710,7 @@ bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
else
softc->db_ops.bnxt_db_rx_cq(&softc->tx_cp_rings[qid], 1);
@@ -2431,30 +3746,40 @@ bnxt_process_async_msg(struct bnxt_cp_ring *cpr, tx_cmpl_t *cmpl)
}
}
-static void
+void
process_nq(struct bnxt_softc *softc, uint16_t nqid)
{
struct bnxt_cp_ring *cpr = &softc->nq_rings[nqid];
nq_cn_t *cmp = (nq_cn_t *) cpr->ring.vaddr;
+ struct bnxt_cp_ring *tx_cpr = &softc->tx_cp_rings[nqid];
+ struct bnxt_cp_ring *rx_cpr = &softc->rx_cp_rings[nqid];
bool v_bit = cpr->v_bit;
uint32_t cons = cpr->cons;
+ uint32_t raw_cons = cpr->raw_cons;
uint16_t nq_type, nqe_cnt = 0;
while (1) {
- if (!NQ_VALID(&cmp[cons], v_bit))
+ if (!NQ_VALID(&cmp[cons], v_bit)) {
goto done;
+ }
nq_type = NQ_CN_TYPE_MASK & cmp[cons].type;
- if (nq_type != NQ_CN_TYPE_CQ_NOTIFICATION)
+ if (NQE_CN_TYPE(nq_type) != NQ_CN_TYPE_CQ_NOTIFICATION) {
bnxt_process_async_msg(cpr, (tx_cmpl_t *)&cmp[cons]);
+ } else {
+ tx_cpr->toggle = NQE_CN_TOGGLE(cmp[cons].type);
+ rx_cpr->toggle = NQE_CN_TOGGLE(cmp[cons].type);
+ }
NEXT_CP_CONS_V(&cpr->ring, cons, v_bit);
+ raw_cons++;
nqe_cnt++;
}
done:
if (nqe_cnt) {
cpr->cons = cons;
+ cpr->raw_cons = raw_cons;
cpr->v_bit = v_bit;
}
}
@@ -2464,7 +3789,7 @@ bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid)
{
struct bnxt_softc *softc = iflib_get_softc(ctx);
- if (BNXT_CHIP_P5(softc)) {
+ if (BNXT_CHIP_P5_PLUS(softc)) {
process_nq(softc, qid);
softc->db_ops.bnxt_db_nq(&softc->nq_rings[qid], 1);
}
@@ -2484,7 +3809,7 @@ bnxt_disable_intr(if_ctx_t ctx)
* update the index
*/
for (i = 0; i < softc->nrxqsets; i++)
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
softc->db_ops.bnxt_db_nq(&softc->nq_rings[i], 0);
else
softc->db_ops.bnxt_db_rx_cq(&softc->rx_cp_rings[i], 0);
@@ -2504,7 +3829,7 @@ bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
int i;
char irq_name[16];
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
goto skip_default_cp;
rc = iflib_irq_alloc_generic(ctx, &softc->def_cp_ring.irq,
@@ -2518,7 +3843,7 @@ bnxt_msix_intr_assign(if_ctx_t ctx, int msix)
skip_default_cp:
for (i=0; i<softc->scctx->isc_nrxqsets; i++) {
- if (BNXT_CHIP_P5(softc)) {
+ if (BNXT_CHIP_P5_PLUS(softc)) {
irq = &softc->nq_rings[i].irq;
id = softc->nq_rings[i].ring.id;
ring = &softc->nq_rings[i];
@@ -2965,7 +4290,7 @@ bnxt_i2c_req(if_ctx_t ctx, struct ifi2creq *i2c)
return -EOPNOTSUPP;
/* This feature is not supported in older firmware versions */
- if (!BNXT_CHIP_P5(softc) ||
+ if (!BNXT_CHIP_P5_PLUS(softc) ||
(softc->hwrm_spec_code < 0x10202))
return -EOPNOTSUPP;
@@ -2985,6 +4310,7 @@ bnxt_probe_phy(struct bnxt_softc *softc)
struct bnxt_link_info *link_info = &softc->link_info;
int rc = 0;
+ softc->phy_flags = 0;
rc = bnxt_hwrm_phy_qcaps(softc);
if (rc) {
device_printf(softc->dev,
@@ -3023,57 +4349,161 @@ bnxt_probe_phy(struct bnxt_softc *softc)
}
static void
-add_media(struct bnxt_softc *softc, uint8_t media_type, uint16_t supported,
- uint16_t supported_pam4)
+add_media(struct bnxt_softc *softc, u8 media_type, u16 supported_NRZ_speeds,
+ u16 supported_pam4_speeds, u16 supported_speeds2)
{
+
switch (media_type) {
case BNXT_MEDIA_CR:
- BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_CP);
- BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_100G, IFM_100G_CP2);
- BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_CR4_PAM4);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_CR4);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_CR2);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_CR4);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_CR);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_CR1);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_CX);
+
+ BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_CP);
+ BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_CP2);
+ BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_CR4_PAM4);
+
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_CR4);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_CR2);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_CR4);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_CR);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_CR1);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_CX);
+ /* thor2 nrz*/
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_CR4);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_CR2);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_CR4);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_CR);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_CR1);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_CX);
+ /* thor2 PAM56 */
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_CP);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_CP2);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_CR4_PAM4);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_AUI8);
+ /* thor2 PAM112 */
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_CR_PAM4);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
+
break;
case BNXT_MEDIA_LR:
- BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_LR);
- BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_LR4);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_LR4);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_LR2);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_LR4);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_LR);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_LR);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_LX);
+ BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_LR);
+ BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_LR4);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_LR4);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_LR2);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_LR4);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_LR);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_LR);
+ /* thor2 nrz*/
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_LR4);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_LR2);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_LR4);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_LR);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_LR);
+ /* thor2 PAM56 */
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_LR);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_AUI2);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_LR4);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_LR8);
+ /* thor2 PAM112 */
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
+
break;
case BNXT_MEDIA_SR:
- BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_SR);
- BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_100G, IFM_100G_SR2);
- BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_SR4);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_SR4);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_SR2);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_SR4);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_SR);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_SR);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SX);
+ BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_SR);
+ BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_SR2);
+ BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_SR4);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_SR4);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_SR2);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_SR4);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_SR);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_SR);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_SX);
+ /* thor2 nrz*/
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB, IFM_100G_SR4);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB, IFM_50G_SR2);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_SR4);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_SR);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_SR);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_SX);
+ /* thor2 PAM56 */
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_SR);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_SR2);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_SR4);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_AUI8);
+ /* thor2 PAM112 */
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_DR4);
+ break;
+
+ case BNXT_MEDIA_ER:
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_ER4);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_AUI4);
+ /* thor2 PAM56 */
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_50GB_PAM4_56, IFM_50G_LR);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_56, IFM_100G_AUI2);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_56, IFM_200G_LR4);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_56, IFM_400G_FR8);
+ /* thor2 PAM112 */
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_100GB_PAM4_112, IFM_100G_AUI2_AC);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_200GB_PAM4_112, IFM_200G_AUI4_AC);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_400GB_PAM4_112, IFM_400G_AUI8_AC);
break;
case BNXT_MEDIA_KR:
- BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_50G, IFM_50G_KR_PAM4);
- BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_100G, IFM_100G_KR2_PAM4);
- BNXT_IFMEDIA_ADD(supported_pam4, PAM4_SPEEDS_200G, IFM_200G_KR4_PAM4);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_KR4);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_KR2);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_KR4);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_KR4);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_KR);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_20GB, IFM_20G_KR2);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX);
+ BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_50G, IFM_50G_KR_PAM4);
+ BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_100G, IFM_100G_KR2_PAM4);
+ BNXT_IFMEDIA_ADD(supported_pam4_speeds, PAM4_SPEEDS_200G, IFM_200G_KR4_PAM4);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100GB, IFM_100G_KR4);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_KR2);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_50GB, IFM_50G_KR4);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_KR4);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_KR);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_20GB, IFM_20G_KR2);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_KR);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_KX);
+ break;
+
+ case BNXT_MEDIA_AC:
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_25GB, IFM_25G_ACC);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_AOC);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_XLAUI);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_40GB, IFM_40G_XLAUI_AC);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_25GB, IFM_25G_ACC);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_AOC);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_XLAUI);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_40GB, IFM_40G_XLAUI_AC);
+ break;
+
+ case BNXT_MEDIA_BASECX:
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_CX);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_CX);
+ break;
+
+ case BNXT_MEDIA_BASET:
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_T);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_2_5GB, IFM_2500_T);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_T);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_100MB, IFM_100_T);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10MB, IFM_10_T);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_T);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_T);
+ break;
+
+ case BNXT_MEDIA_BASEKX:
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_10GB, IFM_10G_KR);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_2_5GB, IFM_2500_KX);
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_KX);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_10GB, IFM_10G_KR);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_KX);
+ break;
+
+ case BNXT_MEDIA_BASESGMII:
+ BNXT_IFMEDIA_ADD(supported_NRZ_speeds, SPEEDS_1GB, IFM_1000_SGMII);
+ BNXT_IFMEDIA_ADD(supported_speeds2, SPEEDS2_1GB, IFM_1000_SGMII);
break;
default:
@@ -3088,11 +4518,12 @@ static void
bnxt_add_media_types(struct bnxt_softc *softc)
{
struct bnxt_link_info *link_info = &softc->link_info;
- uint16_t supported = 0, supported_pam4 = 0;
+ uint16_t supported_NRZ_speeds = 0, supported_pam4_speeds = 0, supported_speeds2 = 0;
uint8_t phy_type = get_phy_type(softc), media_type;
- supported = link_info->support_speeds;
- supported_pam4 = link_info->support_pam4_speeds;
+ supported_NRZ_speeds = link_info->support_speeds;
+ supported_speeds2 = link_info->support_speeds2;
+ supported_pam4_speeds = link_info->support_pam4_speeds;
/* Auto is always supported */
ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
@@ -3101,40 +4532,75 @@ bnxt_add_media_types(struct bnxt_softc *softc)
return;
switch (phy_type) {
- case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4:
- case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR2:
- case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASECR:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR:
+
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASECR:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR2:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR4:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASECR8:
+
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASECR2:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASECR4:
+
media_type = BNXT_MEDIA_CR;
break;
- case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4:
- case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASELR:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR:
+
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASELR:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR2:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR4:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASELR8:
+
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASELR2:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASELR4:
+
media_type = BNXT_MEDIA_LR;
break;
- case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4:
- case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASESR:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR:
- case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
- case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
- case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX:
+
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASESR:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR2:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR4:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASESR8:
+
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASESR2:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASESR4:
+
media_type = BNXT_MEDIA_SR;
break;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4:
+
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_50G_BASEER:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER2:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER4:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASEER8:
+
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_200G_BASEER2:
+ case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_400G_BASEER4:
+
+ media_type = BNXT_MEDIA_ER;
+ break;
+
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR:
@@ -3142,57 +4608,53 @@ bnxt_add_media_types(struct bnxt_softc *softc)
break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE:
- BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_ACC);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_AOC);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_XLAUI);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_XLAUI_AC);
- return;
+ media_type = BNXT_MEDIA_AC;
+ break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX:
- BNXT_IFMEDIA_ADD(supported, SPEEDS_1GBHD, IFM_1000_CX);
- return;
+ media_type = BNXT_MEDIA_BASECX;
+ break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET:
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE:
- BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_T);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_T);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_T);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_100MB, IFM_100_T);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_10MB, IFM_10_T);
- return;
+ media_type = BNXT_MEDIA_BASET;
+ break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX:
- BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_KX);
- BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX);
- return;
+ media_type = BNXT_MEDIA_BASEKX;
+ break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY:
- BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SGMII);
- return;
+ media_type = BNXT_MEDIA_BASESGMII;
+ break;
case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN:
/* Only Autoneg is supported for TYPE_UNKNOWN */
- return;
+ break;
default:
/* Only Autoneg is supported for new phy type values */
device_printf(softc->dev, "phy type %d not supported by driver\n", phy_type);
- return;
+ break;
}
- /* add_media is invoked twice, once with a firmware speed mask of 0 and a valid
- * value for both NRZ and PAM4 sig mode. This ensures accurate display of all
- * supported medias and currently configured media in the "ifconfig -m" output
- */
-
- if (link_info->sig_mode == BNXT_SIG_MODE_PAM4) {
- add_media(softc, media_type, supported, 0);
- add_media(softc, media_type, 0, supported_pam4);
- } else {
- add_media(softc, media_type, 0, supported_pam4);
- add_media(softc, media_type, supported, 0);
+ switch (link_info->sig_mode) {
+ case BNXT_SIG_MODE_NRZ:
+ if (supported_NRZ_speeds != 0)
+ add_media(softc, media_type, supported_NRZ_speeds, 0, 0);
+ else
+ add_media(softc, media_type, 0, 0, supported_speeds2);
+ break;
+ case BNXT_SIG_MODE_PAM4:
+ if (supported_pam4_speeds != 0)
+ add_media(softc, media_type, 0, supported_pam4_speeds, 0);
+ else
+ add_media(softc, media_type, 0, 0, supported_speeds2);
+ break;
+ case BNXT_SIG_MODE_PAM4_112:
+ add_media(softc, media_type, 0, 0, supported_speeds2);
+ break;
}
return;
@@ -3286,6 +4748,41 @@ exit:
return rc;
}
+#define ETHTOOL_SPEED_1000 1000
+#define ETHTOOL_SPEED_10000 10000
+#define ETHTOOL_SPEED_20000 20000
+#define ETHTOOL_SPEED_25000 25000
+#define ETHTOOL_SPEED_40000 40000
+#define ETHTOOL_SPEED_50000 50000
+#define ETHTOOL_SPEED_100000 100000
+#define ETHTOOL_SPEED_200000 200000
+#define ETHTOOL_SPEED_UNKNOWN -1
+
+static u32
+bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
+{
+ switch (fw_link_speed) {
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
+ return ETHTOOL_SPEED_1000;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
+ return ETHTOOL_SPEED_10000;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
+ return ETHTOOL_SPEED_20000;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
+ return ETHTOOL_SPEED_25000;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
+ return ETHTOOL_SPEED_40000;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
+ return ETHTOOL_SPEED_50000;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
+ return ETHTOOL_SPEED_100000;
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
+ return ETHTOOL_SPEED_200000;
+ default:
+ return ETHTOOL_SPEED_UNKNOWN;
+ }
+}
+
void
bnxt_report_link(struct bnxt_softc *softc)
{
@@ -3293,6 +4790,10 @@ bnxt_report_link(struct bnxt_softc *softc)
const char *duplex = NULL, *flow_ctrl = NULL;
const char *signal_mode = "";
+ if(softc->edev)
+ softc->edev->espeed =
+ bnxt_fw_to_ethtool_speed(link_info->link_speed);
+
if (link_info->link_up == link_info->last_link_up) {
if (!link_info->link_up)
return;
@@ -3326,7 +4827,10 @@ bnxt_report_link(struct bnxt_softc *softc)
signal_mode = "(NRZ) ";
break;
case BNXT_SIG_MODE_PAM4:
- signal_mode = "(PAM4) ";
+ signal_mode = "(PAM4 56Gbps) ";
+ break;
+ case BNXT_SIG_MODE_PAM4_112:
+ signal_mode = "(PAM4 112Gbps) ";
break;
default:
break;
@@ -3364,7 +4868,7 @@ bnxt_handle_isr(void *arg)
cpr->int_count++;
/* Disable further interrupts for this queue */
- if (!BNXT_CHIP_P5(softc))
+ if (!BNXT_CHIP_P5_PLUS(softc))
softc->db_ops.bnxt_db_rx_cq(cpr, 0);
return FILTER_SCHEDULE_THREAD;
@@ -3376,7 +4880,7 @@ bnxt_handle_def_cp(void *arg)
struct bnxt_softc *softc = arg;
softc->db_ops.bnxt_db_rx_cq(&softc->def_cp_ring, 0);
- GROUPTASK_ENQUEUE(&softc->def_cp_task);
+ iflib_config_task_enqueue(softc->ctx, &softc->def_cp_task);
return FILTER_HANDLED;
}
@@ -3425,22 +4929,189 @@ bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr)
cmp[i].info3_v = !cpr->v_bit;
}
+static void bnxt_event_error_report(struct bnxt_softc *softc, u32 data1, u32 data2)
+{
+ u32 err_type = BNXT_EVENT_ERROR_REPORT_TYPE(data1);
+
+ switch (err_type) {
+ case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL:
+ device_printf(softc->dev,
+ "1PPS: Received invalid signal on pin%u from the external source. Please fix the signal and reconfigure the pin\n",
+ BNXT_EVENT_INVALID_SIGNAL_DATA(data2));
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM:
+ device_printf(softc->dev,
+ "Pause Storm detected!\n");
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD:
+ device_printf(softc->dev,
+ "One or more MMIO doorbells dropped by the device! epoch: 0x%x\n",
+ BNXT_EVENT_DBR_EPOCH(data1));
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM: {
+ const char *nvm_err_str;
+
+ if (EVENT_DATA1_NVM_ERR_TYPE_WRITE(data1))
+ nvm_err_str = "nvm write error";
+ else if (EVENT_DATA1_NVM_ERR_TYPE_ERASE(data1))
+ nvm_err_str = "nvm erase error";
+ else
+ nvm_err_str = "unrecognized nvm error";
+
+ device_printf(softc->dev,
+ "%s reported at address 0x%x\n", nvm_err_str,
+ (u32)EVENT_DATA2_NVM_ERR_ADDR(data2));
+ break;
+ }
+ case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD: {
+ char *threshold_type;
+ char *dir_str;
+
+ switch (EVENT_DATA1_THERMAL_THRESHOLD_TYPE(data1)) {
+ case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_WARN:
+ threshold_type = "warning";
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_CRITICAL:
+ threshold_type = "critical";
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_FATAL:
+ threshold_type = "fatal";
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_THRESHOLD_TYPE_SHUTDOWN:
+ threshold_type = "shutdown";
+ break;
+ default:
+ device_printf(softc->dev,
+ "Unknown Thermal threshold type event\n");
+ return;
+ }
+ if (EVENT_DATA1_THERMAL_THRESHOLD_DIR_INCREASING(data1))
+ dir_str = "above";
+ else
+ dir_str = "below";
+ device_printf(softc->dev,
+ "Chip temperature has gone %s the %s thermal threshold!\n",
+ dir_str, threshold_type);
+ device_printf(softc->dev,
+ "Temperature (In Celsius), Current: %u, threshold: %u\n",
+ BNXT_EVENT_THERMAL_CURRENT_TEMP(data2),
+ BNXT_EVENT_THERMAL_THRESHOLD_TEMP(data2));
+ break;
+ }
+ case HWRM_ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
+ device_printf(softc->dev,
+ "Speed change is not supported with dual rate transceivers on this board\n");
+ break;
+
+ default:
+ device_printf(softc->dev,
+ "FW reported unknown error type: %u, data1: 0x%x data2: 0x%x\n",
+ err_type, data1, data2);
+ break;
+ }
+}
+
static void
bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
{
struct hwrm_async_event_cmpl *ae = (void *)cmpl;
uint16_t async_id = le16toh(ae->event_id);
struct ifmediareq ifmr;
+ char *type_str;
+ char *status_desc;
+ struct bnxt_fw_health *fw_health;
+ u32 data1 = le32toh(ae->event_data1);
+ u32 data2 = le32toh(ae->event_data2);
switch (async_id) {
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE:
- if (BNXT_CHIP_P5(softc))
+ if (BNXT_CHIP_P5_PLUS(softc))
bit_set(softc->state_bv, BNXT_STATE_LINK_CHANGE);
else
bnxt_media_status(softc->ctx, &ifmr);
break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: {
+ bnxt_event_error_report(softc, data1, data2);
+ goto async_event_process_exit;
+ }
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD:
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE:
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
+ type_str = "Solicited";
+
+ if (!softc->fw_health)
+ goto async_event_process_exit;
+
+ softc->fw_reset_timestamp = jiffies;
+ softc->fw_reset_min_dsecs = ae->timestamp_lo;
+ if (!softc->fw_reset_min_dsecs)
+ softc->fw_reset_min_dsecs = BNXT_DFLT_FW_RST_MIN_DSECS;
+ softc->fw_reset_max_dsecs = le16toh(ae->timestamp_hi);
+ if (!softc->fw_reset_max_dsecs)
+ softc->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
+ if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
+ set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &softc->state);
+ } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
+ type_str = "Fatal";
+ softc->fw_health->fatalities++;
+ set_bit(BNXT_STATE_FW_FATAL_COND, &softc->state);
+ } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
+ EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
+ type_str = "Non-fatal";
+ softc->fw_health->survivals++;
+ set_bit(BNXT_STATE_FW_NON_FATAL_COND, &softc->state);
+ }
+ device_printf(softc->dev,
+ "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
+ type_str, data1, data2,
+ softc->fw_reset_min_dsecs * 100,
+ softc->fw_reset_max_dsecs * 100);
+ set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &softc->sp_event);
+ break;
+ }
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
+ fw_health = softc->fw_health;
+ status_desc = "healthy";
+ u32 status;
+
+ if (!fw_health)
+ goto async_event_process_exit;
+
+ if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
+ fw_health->enabled = false;
+ device_printf(softc->dev, "Driver recovery watchdog is disabled\n");
+ break;
+ }
+ fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
+ fw_health->tmr_multiplier =
+ DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
+ HZ * 10);
+ fw_health->tmr_counter = fw_health->tmr_multiplier;
+ if (!fw_health->enabled)
+ fw_health->last_fw_heartbeat =
+ bnxt_fw_health_readl(softc, BNXT_FW_HEARTBEAT_REG);
+ fw_health->last_fw_reset_cnt =
+ bnxt_fw_health_readl(softc, BNXT_FW_RESET_CNT_REG);
+ status = bnxt_fw_health_readl(softc, BNXT_FW_HEALTH_REG);
+ if (status != BNXT_FW_STATUS_HEALTHY)
+ status_desc = "unhealthy";
+ device_printf(softc->dev,
+ "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
+ fw_health->primary ? "primary" : "backup", status,
+ status_desc, fw_health->last_fw_reset_cnt);
+ if (!fw_health->enabled) {
+ /* Make sure tmr_counter is set and seen by
+ * bnxt_health_check() before setting enabled
+ */
+ smp_mb();
+ fw_health->enabled = true;
+ }
+ goto async_event_process_exit;
+ }
+
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED:
@@ -3458,14 +5129,18 @@ bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl)
"Unhandled async completion type %u\n", async_id);
break;
default:
- device_printf(softc->dev,
- "Unknown async completion type %u\n", async_id);
+ dev_dbg(softc->dev, "Unknown Async event completion type %u\n",
+ async_id);
break;
}
+ bnxt_queue_sp_work(softc);
+
+async_event_process_exit:
+ bnxt_ulp_async_events(softc, ae);
}
static void
-bnxt_def_cp_task(void *context)
+bnxt_def_cp_task(void *context, int pending)
{
if_ctx_t ctx = context;
struct bnxt_softc *softc = iflib_get_softc(ctx);
@@ -3495,8 +5170,10 @@ bnxt_def_cp_task(void *context)
break;
case CMPL_BASE_TYPE_TX_L2:
case CMPL_BASE_TYPE_RX_L2:
+ case CMPL_BASE_TYPE_RX_L2_V3:
case CMPL_BASE_TYPE_RX_AGG:
case CMPL_BASE_TYPE_RX_TPA_START:
+ case CMPL_BASE_TYPE_RX_TPA_START_V3:
case CMPL_BASE_TYPE_RX_TPA_END:
case CMPL_BASE_TYPE_STAT_EJECT:
case CMPL_BASE_TYPE_HWRM_DONE:
@@ -3507,12 +5184,12 @@ bnxt_def_cp_task(void *context)
case CMPL_BASE_TYPE_DBQ_EVENT:
case CMPL_BASE_TYPE_QP_EVENT:
case CMPL_BASE_TYPE_FUNC_EVENT:
- device_printf(softc->dev,
- "Unhandled completion type %u\n", type);
+ dev_dbg(softc->dev, "Unhandled Async event completion type %u\n",
+ type);
break;
default:
- device_printf(softc->dev,
- "Unknown completion type %u\n", type);
+ dev_dbg(softc->dev, "Unknown Async event completion type %u\n",
+ type);
break;
}
}
@@ -3617,6 +5294,8 @@ bnxt_get_baudrate(struct bnxt_link_info *link)
return IF_Mbps(10);
case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_200GB:
return IF_Gbps(200);
+ case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_400GB:
+ return IF_Gbps(400);
}
return IF_Gbps(100);
}
diff --git a/sys/dev/bnxt/bnxt_re/bnxt_re-abi.h b/sys/dev/bnxt/bnxt_re/bnxt_re-abi.h
new file mode 100644
index 000000000000..8f48609e7f6f
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/bnxt_re-abi.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Uverbs ABI header file
+ */
+
+#ifndef __BNXT_RE_UVERBS_ABI_H__
+#define __BNXT_RE_UVERBS_ABI_H__
+
+#include <asm/types.h>
+#include <linux/types.h>
+
+#define BNXT_RE_ABI_VERSION 6
+
+enum {
+ BNXT_RE_COMP_MASK_UCNTX_WC_DPI_ENABLED = 0x01,
+ BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED = 0x02,
+ BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED = 0x04,
+ BNXT_RE_COMP_MASK_UCNTX_MQP_EX_SUPPORTED = 0x08,
+ BNXT_RE_COMP_MASK_UCNTX_DBR_PACING_ENABLED = 0x10,
+ BNXT_RE_COMP_MASK_UCNTX_DBR_RECOVERY_ENABLED = 0x20,
+ BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED = 0x40
+};
+
+enum {
+ BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT = 0x01,
+ BNXT_RE_COMP_MASK_REQ_UCNTX_RSVD_WQE = 0x02
+};
+
+struct bnxt_re_uctx_req {
+ __aligned_u64 comp_mask;
+};
+
+#define BNXT_RE_CHIP_ID0_CHIP_NUM_SFT 0x00
+#define BNXT_RE_CHIP_ID0_CHIP_REV_SFT 0x10
+#define BNXT_RE_CHIP_ID0_CHIP_MET_SFT 0x18
+struct bnxt_re_uctx_resp {
+ __u32 dev_id;
+ __u32 max_qp;
+ __u32 pg_size;
+ __u32 cqe_sz;
+ __u32 max_cqd;
+ __u32 chip_id0;
+ __u32 chip_id1;
+ __u32 modes;
+ __aligned_u64 comp_mask;
+} __attribute__((packed));
+
+enum {
+ BNXT_RE_COMP_MASK_PD_HAS_WC_DPI = 0x01,
+ BNXT_RE_COMP_MASK_PD_HAS_DBR_BAR_ADDR = 0x02,
+};
+
+struct bnxt_re_pd_resp {
+ __u32 pdid;
+ __u32 dpi;
+ __u64 dbr;
+ __u64 comp_mask;
+ __u32 wcdpi;
+ __u64 dbr_bar_addr;
+} __attribute__((packed));
+
+enum {
+ BNXT_RE_COMP_MASK_CQ_HAS_DB_INFO = 0x01,
+ BNXT_RE_COMP_MASK_CQ_HAS_WC_DPI = 0x02,
+ BNXT_RE_COMP_MASK_CQ_HAS_CQ_PAGE = 0x04,
+};
+
+enum {
+ BNXT_RE_COMP_MASK_CQ_REQ_HAS_CAP_MASK = 0x1
+};
+
+enum {
+ BNXT_RE_COMP_MASK_CQ_REQ_CAP_DBR_RECOVERY = 0x1,
+ BNXT_RE_COMP_MASK_CQ_REQ_CAP_DBR_PACING_NOTIFY = 0x2
+};
+
+#define BNXT_RE_IS_DBR_PACING_NOTIFY_CQ(_req) \
+ (_req.comp_mask & BNXT_RE_COMP_MASK_CQ_REQ_HAS_CAP_MASK && \
+ _req.cq_capability & BNXT_RE_COMP_MASK_CQ_REQ_CAP_DBR_PACING_NOTIFY)
+
+#define BNXT_RE_IS_DBR_RECOV_CQ(_req) \
+ (_req.comp_mask & BNXT_RE_COMP_MASK_CQ_REQ_HAS_CAP_MASK && \
+ _req.cq_capability & BNXT_RE_COMP_MASK_CQ_REQ_CAP_DBR_RECOVERY)
+
+struct bnxt_re_cq_req {
+ __u64 cq_va;
+ __u64 cq_handle;
+ __aligned_u64 comp_mask;
+ __u16 cq_capability;
+} __attribute__((packed));
+
+struct bnxt_re_cq_resp {
+ __u32 cqid;
+ __u32 tail;
+ __u32 phase;
+ __u32 rsvd;
+ __aligned_u64 comp_mask;
+ __u32 dpi;
+ __u64 dbr;
+ __u32 wcdpi;
+ __u64 uctx_cq_page;
+} __attribute__((packed));
+
+struct bnxt_re_resize_cq_req {
+ __u64 cq_va;
+} __attribute__((packed));
+
+struct bnxt_re_qp_req {
+ __u64 qpsva;
+ __u64 qprva;
+ __u64 qp_handle;
+} __attribute__((packed));
+
+struct bnxt_re_qp_resp {
+ __u32 qpid;
+} __attribute__((packed));
+
+struct bnxt_re_srq_req {
+ __u64 srqva;
+ __u64 srq_handle;
+} __attribute__((packed));
+
+struct bnxt_re_srq_resp {
+ __u32 srqid;
+} __attribute__((packed));
+
+/* Modify QP */
+enum {
+ BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN_MASK = 0x1,
+ BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN = 0x1,
+ BNXT_RE_COMP_MASK_MQP_EX_PATH_MTU_MASK = 0x2
+};
+
+struct bnxt_re_modify_qp_ex_req {
+ __aligned_u64 comp_mask;
+ __u32 dpi;
+ __u32 rsvd;
+} __packed;
+
+struct bnxt_re_modify_qp_ex_resp {
+ __aligned_u64 comp_mask;
+ __u32 ppp_st_idx;
+ __u32 path_mtu;
+} __packed;
+
+enum bnxt_re_shpg_offt {
+ BNXT_RE_BEG_RESV_OFFT = 0x00,
+ BNXT_RE_AVID_OFFT = 0x10,
+ BNXT_RE_AVID_SIZE = 0x04,
+ BNXT_RE_END_RESV_OFFT = 0xFF0
+};
+#endif
diff --git a/sys/dev/bnxt/bnxt_re/bnxt_re.h b/sys/dev/bnxt/bnxt_re/bnxt_re.h
new file mode 100644
index 000000000000..fe7a27f4e216
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/bnxt_re.h
@@ -0,0 +1,1077 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: main (header)
+ */
+
+#ifndef __BNXT_RE_H__
+#define __BNXT_RE_H__
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/spinlock.h>
+#include <net/ipv6.h>
+#include <linux/if_ether.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_umem.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_mad.h>
+#include <rdma/ib_cache.h>
+#include <linux/pci.h>
+
+#include "bnxt.h"
+#include "bnxt_ulp.h"
+#include "hsi_struct_def.h"
+#include "qplib_res.h"
+#include "qplib_sp.h"
+#include "qplib_fp.h"
+#include "qplib_rcfw.h"
+#include "ib_verbs.h"
+#include "stats.h"
+
+#define ROCE_DRV_MODULE_NAME "bnxt_re"
+#define ROCE_DRV_MODULE_VERSION "230.0.133.0"
+#define ROCE_DRV_MODULE_RELDATE "April 22, 2024"
+
+#define BNXT_RE_REF_WAIT_COUNT 20
+#define BNXT_RE_ROCE_V1_ETH_TYPE 0x8915
+#define BNXT_RE_ROCE_V2_PORT_NO 4791
+#define BNXT_RE_RES_FREE_WAIT_COUNT 1000
+
+#define BNXT_RE_PAGE_SHIFT_4K (12)
+#define BNXT_RE_PAGE_SHIFT_8K (13)
+#define BNXT_RE_PAGE_SHIFT_64K (16)
+#define BNXT_RE_PAGE_SHIFT_2M (21)
+#define BNXT_RE_PAGE_SHIFT_8M (23)
+#define BNXT_RE_PAGE_SHIFT_1G (30)
+
+#define BNXT_RE_PAGE_SIZE_4K BIT(BNXT_RE_PAGE_SHIFT_4K)
+#define BNXT_RE_PAGE_SIZE_8K BIT(BNXT_RE_PAGE_SHIFT_8K)
+#define BNXT_RE_PAGE_SIZE_64K BIT(BNXT_RE_PAGE_SHIFT_64K)
+#define BNXT_RE_PAGE_SIZE_2M BIT(BNXT_RE_PAGE_SHIFT_2M)
+#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M)
+#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G)
+
+#define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G)
+#define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39)
+#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
+
+/* Number of MRs to reserve for PF, leaving remainder for VFs */
+#define BNXT_RE_RESVD_MR_FOR_PF (32 * 1024)
+#define BNXT_RE_MAX_GID_PER_VF 128
+
+#define BNXT_RE_MAX_VF_QPS_PER_PF (6 * 1024)
+
+/**
+ * min_not_zero - return the minimum that is _not_ zero, unless both are zero
+ * @x: value1
+ * @y: value2
+ */
+#ifndef min_not_zero
+#define min_not_zero(x, y) ({ \
+ typeof(x) __x = (x); \
+ typeof(y) __y = (y); \
+ __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
+#endif
+
+struct ib_mr_init_attr {
+ int max_reg_descriptors;
+ u32 flags;
+};
+
+struct bnxt_re_dev;
+
+int bnxt_re_register_netdevice_notifier(struct notifier_block *nb);
+int bnxt_re_unregister_netdevice_notifier(struct notifier_block *nb);
+int ib_register_device_compat(struct bnxt_re_dev *rdev);
+
+#ifndef __struct_group
+#define __struct_group(TAG, NAME, ATTRS, MEMBERS...) \
+ union { \
+ struct { MEMBERS } ATTRS; \
+ struct TAG { MEMBERS } ATTRS NAME; \
+ }
+#endif /* __struct_group */
+#ifndef struct_group_attr
+#define struct_group_attr(NAME, ATTRS, MEMBERS...) \
+ __struct_group(/* no tag */, NAME, ATTRS, MEMBERS)
+#endif /* struct_group_attr */
+/*
+ * Percentage of resources of each type reserved for PF.
+ * Remaining resources are divided equally among VFs.
+ * [0, 100]
+ */
+
+#define BNXT_RE_RQ_WQE_THRESHOLD 32
+#define BNXT_RE_UD_QP_HW_STALL 0x400000
+
+/*
+ * Setting the default ack delay value to 16, which means
+ * the default timeout is approx. 260ms(4 usec * 2 ^(timeout))
+ */
+
+#define BNXT_RE_DEFAULT_ACK_DELAY 16
+#define BNXT_RE_BOND_PF_MAX 2
+
+#define BNXT_RE_STATS_CTX_UPDATE_TIMER 250
+#define BNXT_RE_30SEC_MSEC (30 * 1000)
+
+#define BNXT_RE_BOND_RESCHED_CNT 10
+
+#define BNXT_RE_CHIP_NUM_57454 0xC454
+#define BNXT_RE_CHIP_NUM_57452 0xC452
+
+#define BNXT_RE_CHIP_NUM_5745X(chip_num) \
+ ((chip_num) == BNXT_RE_CHIP_NUM_57454 || \
+ (chip_num) == BNXT_RE_CHIP_NUM_57452)
+
+#define BNXT_RE_MIN_KERNEL_QP_TX_DEPTH 4096
+#define BNXT_RE_STOP_QPS_BUDGET 200
+
+#define BNXT_RE_HWRM_CMD_TIMEOUT(rdev) \
+ ((rdev)->chip_ctx->hwrm_cmd_max_timeout * 1000)
+
+extern unsigned int min_tx_depth;
+extern struct mutex bnxt_re_dev_lock;
+extern struct mutex bnxt_re_mutex;
+extern struct list_head bnxt_re_dev_list;
+
+struct bnxt_re_ring_attr {
+ dma_addr_t *dma_arr;
+ int pages;
+ int type;
+ u32 depth;
+ u32 lrid; /* Logical ring id */
+ u16 flags;
+ u8 mode;
+ u8 rsvd;
+};
+
+#define BNXT_RE_MAX_DEVICES 256
+#define BNXT_RE_MSIX_FROM_MOD_PARAM -1
+#define BNXT_RE_MIN_MSIX 2
+#define BNXT_RE_MAX_MSIX_VF 2
+#define BNXT_RE_MAX_MSIX_PF 9
+#define BNXT_RE_MAX_MSIX_NPAR_PF 5
+#define BNXT_RE_MAX_MSIX 64
+#define BNXT_RE_MAX_MSIX_GEN_P5_PF BNXT_RE_MAX_MSIX
+#define BNXT_RE_GEN_P5_MAX_VF 64
+
+struct bnxt_re_nq_record {
+ struct bnxt_msix_entry msix_entries[BNXT_RE_MAX_MSIX];
+ /* FP Notification Queue (CQ & SRQ) */
+ struct bnxt_qplib_nq nq[BNXT_RE_MAX_MSIX];
+ int num_msix;
+ int max_init;
+ struct mutex load_lock;
+};
+
+struct bnxt_re_work {
+ struct work_struct work;
+ unsigned long event;
+ struct bnxt_re_dev *rdev;
+ struct ifnet *vlan_dev;
+ bool do_lag;
+
+ /* netdev where we received the event */
+ struct ifnet *netdev;
+ struct auxiliary_device *adev;
+};
+
+/*
+ * Data structure and defines to handle
+ * recovery
+ */
+#define BNXT_RE_RECOVERY_IB_UNINIT_WAIT_RETRY 20
+#define BNXT_RE_RECOVERY_IB_UNINIT_WAIT_TIME_MS 30000 /* 30sec timeout */
+#define BNXT_RE_PRE_RECOVERY_REMOVE 0x1
+#define BNXT_RE_COMPLETE_REMOVE 0x2
+#define BNXT_RE_POST_RECOVERY_INIT 0x4
+#define BNXT_RE_COMPLETE_INIT 0x8
+#define BNXT_RE_COMPLETE_SHUTDOWN 0x10
+
+/* QP1 SQ entry data strucutre */
+struct bnxt_re_sqp_entries {
+ u64 wrid;
+ struct bnxt_qplib_sge sge;
+ /* For storing the actual qp1 cqe */
+ struct bnxt_qplib_cqe cqe;
+ struct bnxt_re_qp *qp1_qp;
+};
+
+/* GSI QP mode enum */
+enum bnxt_re_gsi_mode {
+ BNXT_RE_GSI_MODE_INVALID = 0,
+ BNXT_RE_GSI_MODE_ALL = 1,
+ BNXT_RE_GSI_MODE_ROCE_V1,
+ BNXT_RE_GSI_MODE_ROCE_V2_IPV4,
+ BNXT_RE_GSI_MODE_ROCE_V2_IPV6,
+ BNXT_RE_GSI_MODE_UD
+};
+
+enum bnxt_re_roce_cap {
+ BNXT_RE_FLAG_ROCEV1_CAP = 1,
+ BNXT_RE_FLAG_ROCEV2_CAP,
+ BNXT_RE_FLAG_ROCEV1_V2_CAP,
+};
+
+#define BNXT_RE_MAX_GSI_SQP_ENTRIES 1024
+struct bnxt_re_gsi_context {
+ u8 gsi_qp_mode;
+ bool first_cq_created;
+ /* Start: used only in gsi_mode_all */
+ struct bnxt_re_qp *gsi_qp;
+ struct bnxt_re_qp *gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
+ struct bnxt_re_sqp_entries *sqp_tbl;
+ /* End: used only in gsi_mode_all */
+};
+
+struct bnxt_re_tc_rec {
+ u8 cos_id_roce;
+ u8 tc_roce;
+ u8 cos_id_cnp;
+ u8 tc_cnp;
+ u8 tc_def;
+ u8 cos_id_def;
+ u8 max_tc;
+ u8 roce_prio;
+ u8 cnp_prio;
+ u8 roce_dscp;
+ u8 cnp_dscp;
+ u8 prio_valid;
+ u8 dscp_valid;
+ bool ecn_enabled;
+ bool serv_type_enabled;
+ u64 cnp_dscp_bv;
+ u64 roce_dscp_bv;
+};
+
+struct bnxt_re_dscp2pri {
+ u8 dscp;
+ u8 mask;
+ u8 pri;
+};
+
+struct bnxt_re_cos2bw_cfg {
+ u8 pad[3];
+ struct_group_attr(cfg, __packed,
+ u8 queue_id;
+ __le32 min_bw;
+ __le32 max_bw;
+ u8 tsa;
+ u8 pri_lvl;
+ u8 bw_weight;
+ );
+ u8 unused;
+};
+
+#define BNXT_RE_AEQ_IDX 0
+#define BNXT_RE_MAX_SGID_ENTRIES 256
+
+#define BNXT_RE_DBGFS_FILE_MEM 65536
+enum {
+ BNXT_RE_STATS_QUERY = 1,
+ BNXT_RE_QP_QUERY = 2,
+ BNXT_RE_SERVICE_FN_QUERY = 3,
+};
+
+struct bnxt_re_dbg_file {
+ struct bnxt_re_dev *rdev;
+ u32 type;
+ union {
+ struct bnxt_qplib_query_stats_info sinfo;
+ struct bnxt_qplib_query_fn_info fninfo;
+ }params;
+ char dbg_buf[BNXT_RE_DBGFS_FILE_MEM];
+};
+
+struct bnxt_re_debug_entries {
+ /* Dir entries */
+ struct dentry *qpinfo_dir;
+ struct dentry *service_fn_dir;
+ /* file entries */
+ struct dentry *stat_query;
+ struct bnxt_re_dbg_file stat_file;
+ struct dentry *qplist_query;
+ struct bnxt_re_dbg_file qp_file;
+ struct dentry *service_fn_query;
+ struct bnxt_re_dbg_file service_fn_file;
+};
+
+struct bnxt_re_en_dev_info {
+ struct list_head en_list;
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_re_dev *rdev;
+ unsigned long flags;
+#define BNXT_RE_FLAG_EN_DEV_NETDEV_REG 0
+#define BNXT_RE_FLAG_EN_DEV_PRIMARY_DEV 1
+#define BNXT_RE_FLAG_EN_DEV_SECONDARY_DEV 2
+ u8 wqe_mode;
+ u8 gsi_mode;
+ bool te_bypass;
+ bool ib_uninit_done;
+ u32 num_msix_requested;
+ wait_queue_head_t waitq;
+};
+
+#define BNXT_RE_DB_FIFO_ROOM_MASK_P5 0x1FFF8000
+#define BNXT_RE_MAX_FIFO_DEPTH_P5 0x2c00
+#define BNXT_RE_DB_FIFO_ROOM_SHIFT 15
+
+#define BNXT_RE_DB_FIFO_ROOM_MASK_P7 0x3FFF8000
+#define BNXT_RE_MAX_FIFO_DEPTH_P7 0x8000
+
+#define BNXT_RE_DB_FIFO_ROOM_MASK(ctx) \
+ (_is_chip_p7((ctx)) ? \
+ BNXT_RE_DB_FIFO_ROOM_MASK_P7 :\
+ BNXT_RE_DB_FIFO_ROOM_MASK_P5)
+#define BNXT_RE_MAX_FIFO_DEPTH(ctx) \
+ (_is_chip_p7((ctx)) ? \
+ BNXT_RE_MAX_FIFO_DEPTH_P7 :\
+ BNXT_RE_MAX_FIFO_DEPTH_P5)
+
+struct bnxt_dbq_nq_list {
+ int num_nql_entries;
+ u16 nq_id[16];
+};
+
+#define BNXT_RE_ASYNC_ERR_REP_BASE(_type) \
+ (ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_##_type)
+
+#define BNXT_RE_ASYNC_ERR_DBR_TRESH(_type) \
+ (ASYNC_EVENT_CMPL_ERROR_REPORT_DOORBELL_DROP_THRESHOLD_##_type)
+
+#define BNXT_RE_EVENT_DBR_EPOCH(data) \
+ (((data) & \
+ BNXT_RE_ASYNC_ERR_DBR_TRESH(EVENT_DATA1_EPOCH_MASK)) >> \
+ BNXT_RE_ASYNC_ERR_DBR_TRESH(EVENT_DATA1_EPOCH_SFT))
+
+#define BNXT_RE_EVENT_ERROR_REPORT_TYPE(data1) \
+ (((data1) & \
+ BNXT_RE_ASYNC_ERR_REP_BASE(TYPE_MASK)) >> \
+ BNXT_RE_ASYNC_ERR_REP_BASE(TYPE_SFT))
+
+#define BNXT_RE_DBR_LIST_ADD(_rdev, _res, _type) \
+{ \
+ spin_lock(&(_rdev)->res_list[_type].lock); \
+ list_add_tail(&(_res)->dbr_list, \
+ &(_rdev)->res_list[_type].head); \
+ spin_unlock(&(_rdev)->res_list[_type].lock); \
+}
+
+#define BNXT_RE_DBR_LIST_DEL(_rdev, _res, _type) \
+{ \
+ spin_lock(&(_rdev)->res_list[_type].lock); \
+ list_del(&(_res)->dbr_list); \
+ spin_unlock(&(_rdev)->res_list[_type].lock); \
+}
+
+#define BNXT_RE_CQ_PAGE_LIST_ADD(_uctx, _cq) \
+{ \
+ mutex_lock(&(_uctx)->cq_lock); \
+ list_add_tail(&(_cq)->cq_list, &(_uctx)->cq_list); \
+ mutex_unlock(&(_uctx)->cq_lock); \
+}
+
+#define BNXT_RE_CQ_PAGE_LIST_DEL(_uctx, _cq) \
+{ \
+ mutex_lock(&(_uctx)->cq_lock); \
+ list_del(&(_cq)->cq_list); \
+ mutex_unlock(&(_uctx)->cq_lock); \
+}
+
+#define BNXT_RE_NETDEV_EVENT(event, x) \
+ do { \
+ if ((event) == (x)) \
+ return #x; \
+ } while (0)
+
+/* Do not change the seq of this enum which is followed by dbr recov */
+enum {
+ BNXT_RE_RES_TYPE_CQ = 0,
+ BNXT_RE_RES_TYPE_UCTX,
+ BNXT_RE_RES_TYPE_QP,
+ BNXT_RE_RES_TYPE_SRQ,
+ BNXT_RE_RES_TYPE_MAX
+};
+
+struct bnxt_re_dbr_res_list {
+ struct list_head head;
+ spinlock_t lock;
+};
+
+struct bnxt_re_dbr_drop_recov_work {
+ struct work_struct work;
+ struct bnxt_re_dev *rdev;
+ u32 curr_epoch;
+};
+
+struct bnxt_re_aer_work {
+ struct work_struct work;
+ struct bnxt_re_dev *rdev;
+};
+
+struct bnxt_re_dbq_stats {
+ u64 fifo_occup_slab_1;
+ u64 fifo_occup_slab_2;
+ u64 fifo_occup_slab_3;
+ u64 fifo_occup_slab_4;
+ u64 fifo_occup_water_mark;
+ u64 do_pacing_slab_1;
+ u64 do_pacing_slab_2;
+ u64 do_pacing_slab_3;
+ u64 do_pacing_slab_4;
+ u64 do_pacing_slab_5;
+ u64 do_pacing_water_mark;
+};
+
+/* Device debug statistics */
+struct bnxt_re_drv_dbg_stats {
+ struct bnxt_re_dbq_stats dbq;
+};
+
+/* DB pacing counters */
+struct bnxt_re_dbr_sw_stats {
+ u64 dbq_int_recv;
+ u64 dbq_int_en;
+ u64 dbq_pacing_resched;
+ u64 dbq_pacing_complete;
+ u64 dbq_pacing_alerts;
+ u64 dbr_drop_recov_events;
+ u64 dbr_drop_recov_timeouts;
+ u64 dbr_drop_recov_timeout_users;
+ u64 dbr_drop_recov_event_skips;
+};
+
+struct bnxt_re_dev {
+ struct ib_device ibdev;
+ struct list_head list;
+ atomic_t ref_count;
+ atomic_t sched_count;
+ unsigned long flags;
+#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
+#define BNXT_RE_FLAG_IBDEV_REGISTERED 1
+#define BNXT_RE_FLAG_GOT_MSIX 2
+#define BNXT_RE_FLAG_HAVE_L2_REF 3
+#define BNXT_RE_FLAG_ALLOC_RCFW 4
+#define BNXT_RE_FLAG_NET_RING_ALLOC 5
+#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 6
+#define BNXT_RE_FLAG_ALLOC_CTX 7
+#define BNXT_RE_FLAG_STATS_CTX_ALLOC 8
+#define BNXT_RE_FLAG_STATS_CTX2_ALLOC 9
+#define BNXT_RE_FLAG_RCFW_CHANNEL_INIT 10
+#define BNXT_RE_FLAG_WORKER_REG 11
+#define BNXT_RE_FLAG_TBLS_ALLOCINIT 12
+#define BNXT_RE_FLAG_SETUP_NQ 13
+#define BNXT_RE_FLAG_BOND_DEV_REGISTERED 14
+#define BNXT_RE_FLAG_PER_PORT_DEBUG_INFO 15
+#define BNXT_RE_FLAG_DEV_LIST_INITIALIZED 16
+#define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17
+#define BNXT_RE_FLAG_INIT_DCBX_CC_PARAM 18
+#define BNXT_RE_FLAG_STOP_IN_PROGRESS 20
+#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
+#define BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS 30
+ struct ifnet *netdev;
+ struct auxiliary_device *adev;
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_re_nq_record nqr;
+ int id;
+ struct delayed_work worker;
+ u16 worker_30s;
+ struct bnxt_re_tc_rec tc_rec[2];
+ u8 cur_prio_map;
+ /* RCFW Channel */
+ struct bnxt_qplib_rcfw rcfw;
+ /* Device Resources */
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_res qplib_res;
+ struct bnxt_qplib_dpi dpi_privileged;
+ struct bnxt_qplib_cc_param cc_param;
+ struct mutex cc_lock;
+ struct mutex qp_lock;
+ struct list_head qp_list;
+ u8 roce_mode;
+
+ /* Max of 2 lossless traffic class supported per port */
+ u16 cosq[2];
+ /* Start: QP for handling QP1 packets */
+ struct bnxt_re_gsi_context gsi_ctx;
+ /* End: QP for handling QP1 packets */
+ bool is_virtfn;
+ u32 num_vfs;
+ u32 espeed;
+ /*
+ * For storing the speed of slave interfaces.
+ * Same as espeed when bond is not configured
+ */
+ u32 sl_espeed;
+ /* To be used for a workaround for ISER stack */
+ u32 min_tx_depth;
+ /* To enable qp debug info. Disabled during driver load */
+ u32 en_qp_dbg;
+ /* Array to handle gid mapping */
+ char *gid_map;
+
+ struct bnxt_re_device_stats stats;
+ struct bnxt_re_drv_dbg_stats *dbg_stats;
+ /* debugfs to expose per port information*/
+ struct dentry *port_debug_dir;
+ struct dentry *info;
+ struct dentry *drv_dbg_stats;
+ struct dentry *sp_perf_stats;
+ struct dentry *pdev_debug_dir;
+ struct dentry *pdev_qpinfo_dir;
+ struct bnxt_re_debug_entries *dbg_ent;
+ struct workqueue_struct *resolve_wq;
+ struct list_head mac_wq_list;
+ struct workqueue_struct *dcb_wq;
+ struct workqueue_struct *aer_wq;
+ u32 event_bitmap[3];
+ bool unreg_sched;
+ u64 dbr_throttling_reg_off;
+ u64 dbr_aeq_arm_reg_off;
+ u64 dbr_db_fifo_reg_off;
+ void *dbr_page;
+ u64 dbr_bar_addr;
+ u32 pacing_algo_th;
+ u32 pacing_en_int_th;
+ u32 do_pacing_save;
+ struct workqueue_struct *dbq_wq;
+ struct workqueue_struct *dbr_drop_recov_wq;
+ struct work_struct dbq_fifo_check_work;
+ struct delayed_work dbq_pacing_work;
+ /* protect DB pacing */
+ struct mutex dbq_lock;
+ /* Control DBR pacing feature. Set if enabled */
+ bool dbr_pacing;
+ /* Control DBR recovery feature. Set if enabled */
+ bool dbr_drop_recov;
+ bool user_dbr_drop_recov;
+ /* DBR recovery feature. Set if running */
+ bool dbr_recovery_on;
+ u32 user_dbr_drop_recov_timeout;
+ /*
+ * Value used for pacing algo when pacing is active
+ */
+#define BNXT_RE_MAX_DBR_DO_PACING 0xFFFF
+ u32 dbr_do_pacing;
+ u32 dbq_watermark; /* Current watermark set in HW registers */
+ u32 dbq_nq_id; /* Current NQ ID for DBQ events */
+ u32 dbq_pacing_time; /* ms */
+ u32 dbr_def_do_pacing; /* do_pacing when no congestion */
+ u32 dbr_evt_curr_epoch;
+ bool dbq_int_disable;
+
+ bool mod_exit;
+ struct bnxt_re_dbr_sw_stats *dbr_sw_stats;
+ struct bnxt_re_dbr_res_list res_list[BNXT_RE_RES_TYPE_MAX];
+ struct bnxt_dbq_nq_list nq_list;
+ char dev_name[IB_DEVICE_NAME_MAX];
+ atomic_t dbq_intr_running;
+ u32 num_msix_requested;
+ unsigned char *dev_addr; /* For netdev->dev_addr */
+};
+
+#define BNXT_RE_RESOLVE_RETRY_COUNT_US 5000000 /* 5 sec */
+struct bnxt_re_resolve_dmac_work{
+ struct work_struct work;
+ struct list_head list;
+ struct bnxt_re_dev *rdev;
+ struct ib_ah_attr *ah_attr;
+ struct bnxt_re_ah_info *ah_info;
+ atomic_t status_wait;
+};
+
+static inline u8 bnxt_re_get_prio(u8 prio_map)
+{
+ u8 prio = 0xFF;
+
+ for (prio = 0; prio < 8; prio++)
+ if (prio_map & (1UL << prio))
+ break;
+ return prio;
+}
+
+/* This should be called with bnxt_re_dev_lock mutex held */
+static inline bool __bnxt_re_is_rdev_valid(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_dev *tmp_rdev;
+
+ list_for_each_entry(tmp_rdev, &bnxt_re_dev_list, list) {
+ if (rdev == tmp_rdev)
+ return true;
+ }
+ return false;
+}
+
+static inline bool bnxt_re_is_rdev_valid(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_dev *tmp_rdev;
+
+ mutex_lock(&bnxt_re_dev_lock);
+ list_for_each_entry(tmp_rdev, &bnxt_re_dev_list, list) {
+ if (rdev == tmp_rdev) {
+ mutex_unlock(&bnxt_re_dev_lock);
+ return true;
+ }
+ }
+ mutex_unlock(&bnxt_re_dev_lock);
+
+ pr_debug("bnxt_re: %s : Invalid rdev received rdev = %p\n",
+ __func__, rdev);
+ return false;
+}
+
+int bnxt_re_send_hwrm_cmd(struct bnxt_re_dev *rdev, void *cmd,
+ int cmdlen);
+void bnxt_re_stopqps_and_ib_uninit(struct bnxt_re_dev *rdev);
+int bnxt_re_set_hwrm_dscp2pri(struct bnxt_re_dev *rdev,
+ struct bnxt_re_dscp2pri *d2p, u16 count,
+ u16 target_id);
+int bnxt_re_query_hwrm_dscp2pri(struct bnxt_re_dev *rdev,
+ struct bnxt_re_dscp2pri *d2p, u16 *count,
+ u16 target_id);
+int bnxt_re_query_hwrm_qportcfg(struct bnxt_re_dev *rdev,
+ struct bnxt_re_tc_rec *cnprec, u16 tid);
+int bnxt_re_hwrm_cos2bw_qcfg(struct bnxt_re_dev *rdev, u16 target_id,
+ struct bnxt_re_cos2bw_cfg *cfg);
+int bnxt_re_hwrm_cos2bw_cfg(struct bnxt_re_dev *rdev, u16 target_id,
+ struct bnxt_re_cos2bw_cfg *cfg);
+int bnxt_re_hwrm_pri2cos_cfg(struct bnxt_re_dev *rdev,
+ u16 target_id, u16 port_id,
+ u8 *cos_id_map, u8 pri_map);
+int bnxt_re_prio_vlan_tx_update(struct bnxt_re_dev *rdev);
+int bnxt_re_get_slot_pf_count(struct bnxt_re_dev *rdev);
+struct bnxt_re_dev *bnxt_re_get_peer_pf(struct bnxt_re_dev *rdev);
+struct bnxt_re_dev *bnxt_re_from_netdev(struct ifnet *netdev);
+u8 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev, u8 selector);
+struct bnxt_qplib_nq * bnxt_re_get_nq(struct bnxt_re_dev *rdev);
+void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq);
+
+#define to_bnxt_re(ptr, type, member) \
+ container_of(ptr, type, member)
+
+#define to_bnxt_re_dev(ptr, member) \
+ container_of((ptr), struct bnxt_re_dev, member)
+
+/* Even number functions from port 0 and odd number from port 1 */
+#define BNXT_RE_IS_PORT0(rdev) (!(rdev->en_dev->pdev->devfn & 1))
+
+#define BNXT_RE_ROCE_V1_PACKET 0
+#define BNXT_RE_ROCEV2_IPV4_PACKET 2
+#define BNXT_RE_ROCEV2_IPV6_PACKET 3
+#define BNXT_RE_ACTIVE_MAP_PORT1 0x1 /*port-1 active */
+#define BNXT_RE_ACTIVE_MAP_PORT2 0x2 /*port-2 active */
+
+#define BNXT_RE_MEMBER_PORT_MAP (BNXT_RE_ACTIVE_MAP_PORT1 | \
+ BNXT_RE_ACTIVE_MAP_PORT2)
+
+#define rdev_to_dev(rdev) ((rdev) ? (&(rdev)->ibdev.dev) : NULL)
+
+void bnxt_re_set_dma_device(struct ib_device *ibdev, struct bnxt_re_dev *rdev);
+bool bnxt_re_is_rdev_valid(struct bnxt_re_dev *rdev);
+
+#define bnxt_re_rdev_ready(rdev) (bnxt_re_is_rdev_valid(rdev) && \
+ (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)))
+#define BNXT_RE_SRIOV_CFG_TIMEOUT 6
+
+int bnxt_re_get_device_stats(struct bnxt_re_dev *rdev);
+void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 removal_type,
+ struct auxiliary_device *aux_dev);
+void bnxt_re_destroy_lag(struct bnxt_re_dev **rdev);
+int bnxt_re_add_device(struct bnxt_re_dev **rdev,
+ struct ifnet *netdev,
+ u8 qp_mode, u8 op_type, u8 wqe_mode, u32 num_msix_requested,
+ struct auxiliary_device *aux_dev);
+void bnxt_re_create_base_interface(bool primary);
+int bnxt_re_schedule_work(struct bnxt_re_dev *rdev, unsigned long event,
+ struct ifnet *vlan_dev,
+ struct ifnet *netdev,
+ struct auxiliary_device *aux_dev);
+void bnxt_re_get_link_speed(struct bnxt_re_dev *rdev);
+int _bnxt_re_ib_init(struct bnxt_re_dev *rdev);
+int _bnxt_re_ib_init2(struct bnxt_re_dev *rdev);
+void bnxt_re_init_resolve_wq(struct bnxt_re_dev *rdev);
+void bnxt_re_uninit_resolve_wq(struct bnxt_re_dev *rdev);
+
+/* The rdev ref_count is to protect immature removal of the device */
+static inline void bnxt_re_hold(struct bnxt_re_dev *rdev)
+{
+ atomic_inc(&rdev->ref_count);
+ dev_dbg(rdev_to_dev(rdev),
+ "Hold ref_count = 0x%x", atomic_read(&rdev->ref_count));
+}
+
+static inline void bnxt_re_put(struct bnxt_re_dev *rdev)
+{
+ atomic_dec(&rdev->ref_count);
+ dev_dbg(rdev_to_dev(rdev),
+ "Put ref_count = 0x%x", atomic_read(&rdev->ref_count));
+}
+
+/*
+* Responder Error reason codes
+* FIXME: Remove these when the defs
+* are properly included in hsi header
+*/
+enum res_err_state_reason {
+ /* No error. */
+ CFCQ_RES_ERR_STATE_REASON_NO_ERROR = 0,
+ /*
+ * Incoming Send, RDMA write, or RDMA read exceeds the maximum
+ * transfer length. Detected on RX first and only packets for
+ * write. Detected on RX request for read. This is an RX
+ * Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_EXCEED_MAX,
+ /*
+ * RDMA write payload size does not match write length. Detected
+ * when total write payload is not equal to the RDMA write
+ * length that was given in the first or only packet of the
+ * request. This is an RX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH,
+ /*
+ * Send payload exceeds RQ/SRQ WQE buffer capacity. The total
+ * send payload that arrived is more than the size of the WQE
+ * buffer that was fetched from the RQ/SRQ. This is an RX
+ * Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE,
+ /*
+ * Responder detected opcode error. * First, only, middle, last
+ * for incoming requests are improperly ordered with respect to
+ * previous (PSN) packet. * First or middle packet is not full
+ * MTU size. This is an RX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_OPCODE_ERROR,
+ /*
+ * PSN sequence error retry limit exceeded. The responder
+ * encountered a PSN sequence error for the same PSN too many
+ * times. This can occur via implicit or explicit NAK. This is
+ * an RX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT,
+ /*
+ * Invalid R_Key. An incoming request contained an R_Key that
+ * did not reference a valid MR/MW. This error may be detected
+ * by the RX engine for RDMA write or by the TX engine for RDMA
+ * read (detected while servicing IRRQ). This is an RX Detected
+ * Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY,
+ /*
+ * Domain error. An incoming request specified an R_Key which
+ * referenced a MR/MW that was not in the same PD as the QP on
+ * which the request arrived. This is an RX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR,
+ /*
+ * No permission. An incoming request contained an R_Key that
+ * referenced a MR/MW which did not have the access permission
+ * needed for the operation. This is an RX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION,
+ /*
+ * Range error. An incoming request had a combination of R_Key,
+ * VA, and length that was out of bounds of the associated
+ * MR/MW. This is an RX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR,
+ /*
+ * Invalid R_Key. An incoming request contained an R_Key that
+ * did not reference a valid MR/MW. This error may be detected
+ * by the RX engine for RDMA write or by the TX engine for RDMA
+ * read (detected while servicing IRRQ). This is a TX Detected
+ * Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY,
+ /*
+ * Domain error. An incoming request specified an R_Key which
+ * referenced a MR/MW that was not in the same PD as the QP on
+ * which the request arrived. This is a TX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR,
+ /*
+ * No permission. An incoming request contained an R_Key that
+ * referenced a MR/MW which did not have the access permission
+ * needed for the operation. This is a TX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION,
+ /*
+ * Range error. An incoming request had a combination of R_Key,
+ * VA, and length that was out of bounds of the associated
+ * MR/MW. This is a TX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR,
+ /*
+ * IRRQ overflow. The peer sent us more RDMA read or atomic
+ * requests than the negotiated maximum. This is an RX Detected
+ * Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW,
+ /*
+ * Unsupported opcode. The peer sent us a request with an opcode
+ * for a request type that is not supported on this QP. This is
+ * an RX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE,
+ /*
+ * Unaligned atomic operation. The VA of an atomic request is on
+ * a memory boundary that prevents atomic execution. This is an
+ * RX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC,
+ /*
+ * Remote invalidate error. A send with invalidate request
+ * arrived in which the R_Key to invalidate did not describe a
+ * MR/MW which could be invalidated. RQ WQE completes with error
+ * status. This error is only reported if the send operation did
+ * not fail. If the send operation failed then the remote
+ * invalidate error is not reported. This is an RX Detected
+ * Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_REM_INVALIDATE,
+ /*
+ * Local memory error. An RQ/SRQ SGE described an inaccessible
+ * memory. This is an RX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_MEMORY_ERROR,
+ /*
+ * SRQ in error. The QP is moving to error state because it
+ * found SRQ it uses in error. This is an RX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_SRQ_ERROR,
+ /*
+ * Completion error. No CQE space available on queue or CQ not
+ * in VALID state. This is a Completion Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_CMP_ERROR,
+ /*
+ * Invalid R_Key while resending responses to duplicate request.
+ * This is a TX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_IVALID_DUP_RKEY,
+ /*
+ * Problem was found in the format of a WQE in the RQ/SRQ. This
+ * is an RX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR,
+ /*
+ * A load error occurred on an attempt to load the CQ Context.
+ * This is a Completion Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR = 0x18,
+ /*
+ * A load error occurred on an attempt to load the SRQ Context.
+ * This is an RX Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR,
+ /*
+ * A fatal error was detected on an attempt to read from or
+ * write to PCIe on the transmit side. This error is detected by
+ * the TX side, but has the priority of a Completion Detected
+ * Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR = 0x1b,
+ /*
+ * A fatal error was detected on an attempt to read from or
+ * write to PCIe on the receive side. This error is detected by
+ * the RX side (or CAGR), but has the priority of a Completion
+ * Detected Error.
+ */
+ CFCQ_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR = 0x1c
+};
+
+int bnxt_re_host_pf_id_query(struct bnxt_re_dev *rdev,
+ struct bnxt_qplib_query_fn_info *fn_info,
+ u32 *pf_mask, u32 *first_pf);
+
+/* Default DCBx and CC values */
+#define BNXT_RE_DEFAULT_CNP_DSCP 48
+#define BNXT_RE_DEFAULT_CNP_PRI 7
+#define BNXT_RE_DEFAULT_ROCE_DSCP 26
+#define BNXT_RE_DEFAULT_ROCE_PRI 3
+
+#define BNXT_RE_DEFAULT_L2_BW 50
+#define BNXT_RE_DEFAULT_ROCE_BW 50
+
+#define ROCE_PRIO_VALID 0x0
+#define CNP_PRIO_VALID 0x1
+#define ROCE_DSCP_VALID 0x0
+#define CNP_DSCP_VALID 0x1
+
+int bnxt_re_get_pri_dscp_settings(struct bnxt_re_dev *rdev,
+ u16 target_id,
+ struct bnxt_re_tc_rec *tc_rec);
+
+int bnxt_re_setup_dscp(struct bnxt_re_dev *rdev);
+int bnxt_re_clear_dscp(struct bnxt_re_dev *rdev);
+int bnxt_re_setup_cnp_cos(struct bnxt_re_dev *rdev, bool reset);
+
+static inline enum ib_port_state bnxt_re_get_link_state(struct bnxt_re_dev *rdev)
+{
+ if (if_getdrvflags(rdev->netdev) & IFF_DRV_RUNNING &&
+ if_getlinkstate(rdev->netdev) == LINK_STATE_UP)
+ return IB_PORT_ACTIVE;
+ return IB_PORT_DOWN;
+}
+
+static inline int bnxt_re_link_state(struct bnxt_re_dev *rdev)
+{
+ return bnxt_re_get_link_state(rdev) == IB_PORT_ACTIVE ? 1:0;
+}
+
+static inline int is_cc_enabled(struct bnxt_re_dev *rdev)
+{
+ return rdev->cc_param.enable;
+}
+
+static inline void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev,
+ struct input *hdr, u16 opcd,
+ u16 crid, u16 trid)
+{
+ hdr->req_type = cpu_to_le16(opcd);
+ hdr->cmpl_ring = cpu_to_le16(crid);
+ hdr->target_id = cpu_to_le16(trid);
+}
+
+static inline void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg,
+ void *msg, int msg_len, void *resp,
+ int resp_max_len, int timeout)
+{
+ fw_msg->msg = msg;
+ fw_msg->msg_len = msg_len;
+ fw_msg->resp = resp;
+ fw_msg->resp_max_len = resp_max_len;
+ fw_msg->timeout = timeout;
+}
+
+static inline bool is_qport_service_type_supported(struct bnxt_re_dev *rdev)
+{
+ return rdev->tc_rec[0].serv_type_enabled;
+}
+
+static inline bool is_bnxt_roce_queue(struct bnxt_re_dev *rdev, u8 ser_prof, u8 prof_type)
+{
+ if (is_qport_service_type_supported(rdev))
+ return (prof_type & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_TYPE_ROCE);
+ else
+ return (ser_prof == HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS_ROCE);
+}
+
+static inline bool is_bnxt_cnp_queue(struct bnxt_re_dev *rdev, u8 ser_prof, u8 prof_type)
+{
+ if (is_qport_service_type_supported(rdev))
+ return (prof_type & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_TYPE_CNP);
+ else
+ return (ser_prof == HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY_ROCE_CNP);
+}
+
+#define BNXT_RE_MAP_SH_PAGE 0x0
+#define BNXT_RE_MAP_WC 0x1
+#define BNXT_RE_DBR_PAGE 0x2
+#define BNXT_RE_MAP_DB_RECOVERY_PAGE 0x3
+
+#define BNXT_RE_DBR_RECOV_USERLAND_TIMEOUT (20) /* 20 ms */
+#define BNXT_RE_DBR_INT_TIME 5 /* ms */
+#define BNXT_RE_PACING_EN_INT_THRESHOLD 50 /* Entries in DB FIFO */
+#define BNXT_RE_PACING_ALGO_THRESHOLD 250 /* Entries in DB FIFO */
+/* Percentage of DB FIFO depth */
+#define BNXT_RE_PACING_DBQ_THRESHOLD BNXT_RE_PACING_DBQ_HIGH_WATERMARK
+
+#define BNXT_RE_PACING_ALARM_TH_MULTIPLE(ctx) (_is_chip_p7(ctx) ? 0 : 2)
+
+/*
+ * Maximum Percentage of configurable DB FIFO depth.
+ * The Doorbell FIFO depth is 0x2c00. But the DBR_REG_DB_THROTTLING register has only 12 bits
+ * to program the high watermark. This means user can configure maximum 36% only(4095/11264).
+ */
+#define BNXT_RE_PACING_DBQ_HIGH_WATERMARK 36
+
+/* Default do_pacing value when there is no congestion */
+#define BNXT_RE_DBR_DO_PACING_NO_CONGESTION 0x7F /* 1 in 512 probability */
+
+enum {
+ BNXT_RE_DBQ_EVENT_SCHED = 0,
+ BNXT_RE_DBR_PACING_EVENT = 1,
+ BNXT_RE_DBR_NQ_PACING_NOTIFICATION = 2,
+};
+
+struct bnxt_re_dbq_work {
+ struct work_struct work;
+ struct bnxt_re_dev *rdev;
+ struct hwrm_async_event_cmpl cmpl;
+ u32 event;
+};
+
+int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
+int bnxt_re_enable_dbr_pacing(struct bnxt_re_dev *rdev);
+int bnxt_re_disable_dbr_pacing(struct bnxt_re_dev *rdev);
+int bnxt_re_set_dbq_throttling_reg(struct bnxt_re_dev *rdev,
+ u16 nq_id, u32 throttle);
+void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev);
+int bnxt_re_hwrm_pri2cos_qcfg(struct bnxt_re_dev *rdev, struct bnxt_re_tc_rec *tc_rec,
+ u16 target_id);
+void writel_fbsd(struct bnxt_softc *bp, u32, u8, u32);
+u32 readl_fbsd(struct bnxt_softc *bp, u32, u8);
+
+static inline unsigned int bnxt_re_get_total_mr_mw_count(struct bnxt_re_dev *rdev)
+{
+ return (atomic_read(&rdev->stats.rsors.mr_count) +
+ atomic_read(&rdev->stats.rsors.mw_count));
+}
+
+static inline void bnxt_re_set_def_pacing_threshold(struct bnxt_re_dev *rdev)
+{
+ rdev->qplib_res.pacing_data->pacing_th = rdev->pacing_algo_th;
+ rdev->qplib_res.pacing_data->alarm_th =
+ rdev->pacing_algo_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE(rdev->chip_ctx);
+}
+
+static inline void bnxt_re_set_def_do_pacing(struct bnxt_re_dev *rdev)
+{
+ rdev->qplib_res.pacing_data->do_pacing = rdev->dbr_def_do_pacing;
+}
+
+static inline void bnxt_re_set_pacing_dev_state(struct bnxt_re_dev *rdev)
+{
+ rdev->qplib_res.pacing_data->dev_err_state =
+ test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+}
+#endif
diff --git a/sys/dev/bnxt/bnxt_re/ib_verbs.c b/sys/dev/bnxt/bnxt_re/ib_verbs.c
new file mode 100644
index 000000000000..0383a16757aa
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/ib_verbs.c
@@ -0,0 +1,5498 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: IB Verbs interpreter
+ */
+
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <rdma/uverbs_ioctl.h>
+
+#include "bnxt_re.h"
+#include "ib_verbs.h"
+
+static inline
+struct scatterlist *get_ib_umem_sgl(struct ib_umem *umem, u32 *nmap)
+{
+
+ *nmap = umem->nmap;
+ return umem->sg_head.sgl;
+}
+
+static inline void bnxt_re_peer_mem_release(struct ib_umem *umem)
+{
+ dev_dbg(NULL, "ib_umem_release getting invoked \n");
+ ib_umem_release(umem);
+}
+
+void bnxt_re_resolve_dmac_task(struct work_struct *work)
+{
+ int rc = -1;
+ struct bnxt_re_dev *rdev;
+ struct ib_ah_attr *ah_attr;
+ struct bnxt_re_resolve_dmac_work *dmac_work =
+ container_of(work, struct bnxt_re_resolve_dmac_work, work);
+
+ rdev = dmac_work->rdev;
+ ah_attr = dmac_work->ah_attr;
+ rc = ib_resolve_eth_dmac(&rdev->ibdev, ah_attr);
+ if (rc)
+ dev_err(rdev_to_dev(dmac_work->rdev),
+ "Failed to resolve dest mac rc = %d\n", rc);
+ atomic_set(&dmac_work->status_wait, rc << 8);
+}
+
+static int __from_ib_access_flags(int iflags)
+{
+ int qflags = 0;
+
+ if (iflags & IB_ACCESS_LOCAL_WRITE)
+ qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
+ if (iflags & IB_ACCESS_REMOTE_READ)
+ qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
+ if (iflags & IB_ACCESS_REMOTE_WRITE)
+ qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
+ if (iflags & IB_ACCESS_REMOTE_ATOMIC)
+ qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
+ if (iflags & IB_ACCESS_MW_BIND)
+ qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
+ if (iflags & IB_ZERO_BASED)
+ qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
+ if (iflags & IB_ACCESS_ON_DEMAND)
+ qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
+ return qflags;
+};
+
+static enum ib_access_flags __to_ib_access_flags(int qflags)
+{
+ enum ib_access_flags iflags = 0;
+
+ if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
+ iflags |= IB_ACCESS_LOCAL_WRITE;
+ if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
+ iflags |= IB_ACCESS_REMOTE_WRITE;
+ if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
+ iflags |= IB_ACCESS_REMOTE_READ;
+ if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
+ iflags |= IB_ACCESS_REMOTE_ATOMIC;
+ if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
+ iflags |= IB_ACCESS_MW_BIND;
+ if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
+ iflags |= IB_ZERO_BASED;
+ if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
+ iflags |= IB_ACCESS_ON_DEMAND;
+ return iflags;
+};
+
+static int bnxt_re_copy_to_udata(struct bnxt_re_dev *rdev, void *data,
+ int len, struct ib_udata *udata)
+{
+ int rc;
+
+ rc = ib_copy_to_udata(udata, data, len);
+ if (rc)
+ dev_err(rdev_to_dev(rdev),
+ "ucontext copy failed from %ps rc %d\n",
+ __builtin_return_address(0), rc);
+
+ return rc;
+}
+
+struct ifnet *bnxt_re_get_netdev(struct ib_device *ibdev,
+ u8 port_num)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct ifnet *netdev = NULL;
+
+ rcu_read_lock();
+
+ if (!rdev || !rdev->netdev)
+ goto end;
+
+ netdev = rdev->netdev;
+
+ /* In case of active-backup bond mode, return active slave */
+ if (netdev)
+ dev_hold(netdev);
+
+end:
+ rcu_read_unlock();
+ return netdev;
+}
+
+int bnxt_re_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *ib_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+
+ memset(ib_attr, 0, sizeof(*ib_attr));
+
+ memcpy(&ib_attr->fw_ver, dev_attr->fw_ver, 4);
+ bnxt_qplib_get_guid(rdev->dev_addr, (u8 *)&ib_attr->sys_image_guid);
+ ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
+ ib_attr->page_size_cap = dev_attr->page_size_cap;
+ ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
+ ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
+ ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
+ ib_attr->max_qp = dev_attr->max_qp;
+ ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
+ /*
+ * Read and set from the module param 'min_tx_depth'
+ * only once after the driver load
+ */
+ if (rdev->min_tx_depth == 1 &&
+ min_tx_depth < dev_attr->max_qp_wqes)
+ rdev->min_tx_depth = min_tx_depth;
+ ib_attr->device_cap_flags =
+ IB_DEVICE_CURR_QP_STATE_MOD
+ | IB_DEVICE_RC_RNR_NAK_GEN
+ | IB_DEVICE_SHUTDOWN_PORT
+ | IB_DEVICE_SYS_IMAGE_GUID
+ | IB_DEVICE_LOCAL_DMA_LKEY
+ | IB_DEVICE_RESIZE_MAX_WR
+ | IB_DEVICE_PORT_ACTIVE_EVENT
+ | IB_DEVICE_N_NOTIFY_CQ
+ | IB_DEVICE_MEM_WINDOW
+ | IB_DEVICE_MEM_WINDOW_TYPE_2B
+ | IB_DEVICE_MEM_MGT_EXTENSIONS;
+ ib_attr->max_send_sge = dev_attr->max_qp_sges;
+ ib_attr->max_recv_sge = dev_attr->max_qp_sges;
+ ib_attr->max_sge_rd = dev_attr->max_qp_sges;
+ ib_attr->max_cq = dev_attr->max_cq;
+ ib_attr->max_cqe = dev_attr->max_cq_wqes;
+ ib_attr->max_mr = dev_attr->max_mr;
+ ib_attr->max_pd = dev_attr->max_pd;
+ ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
+ ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
+ if (dev_attr->is_atomic) {
+ ib_attr->atomic_cap = IB_ATOMIC_GLOB;
+ ib_attr->masked_atomic_cap = IB_ATOMIC_GLOB;
+ }
+ ib_attr->max_ee_rd_atom = 0;
+ ib_attr->max_res_rd_atom = 0;
+ ib_attr->max_ee_init_rd_atom = 0;
+ ib_attr->max_ee = 0;
+ ib_attr->max_rdd = 0;
+ ib_attr->max_mw = dev_attr->max_mw;
+ ib_attr->max_raw_ipv6_qp = 0;
+ ib_attr->max_raw_ethy_qp = dev_attr->max_raw_ethy_qp;
+ ib_attr->max_mcast_grp = 0;
+ ib_attr->max_mcast_qp_attach = 0;
+ ib_attr->max_total_mcast_qp_attach = 0;
+ ib_attr->max_ah = dev_attr->max_ah;
+ ib_attr->max_srq = dev_attr->max_srq;
+ ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
+ ib_attr->max_srq_sge = dev_attr->max_srq_sges;
+
+ ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS;
+ ib_attr->max_pkeys = 1;
+ ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY;
+ ib_attr->sig_prot_cap = 0;
+ ib_attr->sig_guard_cap = 0;
+ ib_attr->odp_caps.general_caps = 0;
+
+ return 0;
+}
+
+int bnxt_re_modify_device(struct ib_device *ibdev,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify)
+{
+ dev_dbg(rdev_to_dev(rdev), "Modify device with mask 0x%x\n",
+ device_modify_mask);
+
+ switch (device_modify_mask) {
+ case IB_DEVICE_MODIFY_SYS_IMAGE_GUID:
+ /* Modify the GUID requires the modification of the GID table */
+ /* GUID should be made as READ-ONLY */
+ break;
+ case IB_DEVICE_MODIFY_NODE_DESC:
+ /* Node Desc should be made as READ-ONLY */
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+static void __to_ib_speed_width(u32 espeed, u8 *speed, u8 *width)
+{
+ switch (espeed) {
+ case SPEED_1000:
+ *speed = IB_SPEED_SDR;
+ *width = IB_WIDTH_1X;
+ break;
+ case SPEED_10000:
+ *speed = IB_SPEED_QDR;
+ *width = IB_WIDTH_1X;
+ break;
+ case SPEED_20000:
+ *speed = IB_SPEED_DDR;
+ *width = IB_WIDTH_4X;
+ break;
+ case SPEED_25000:
+ *speed = IB_SPEED_EDR;
+ *width = IB_WIDTH_1X;
+ break;
+ case SPEED_40000:
+ *speed = IB_SPEED_QDR;
+ *width = IB_WIDTH_4X;
+ break;
+ case SPEED_50000:
+ *speed = IB_SPEED_EDR;
+ *width = IB_WIDTH_2X;
+ break;
+ case SPEED_100000:
+ *speed = IB_SPEED_EDR;
+ *width = IB_WIDTH_4X;
+ break;
+ case SPEED_200000:
+ *speed = IB_SPEED_HDR;
+ *width = IB_WIDTH_4X;
+ break;
+ default:
+ *speed = IB_SPEED_SDR;
+ *width = IB_WIDTH_1X;
+ break;
+ }
+}
+
+/* Port */
+int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_attr *port_attr)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ u8 active_speed = 0, active_width = 0;
+
+ dev_dbg(rdev_to_dev(rdev), "QUERY PORT with port_num 0x%x\n", port_num);
+ memset(port_attr, 0, sizeof(*port_attr));
+
+ port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
+ port_attr->state = bnxt_re_get_link_state(rdev);
+ if (port_attr->state == IB_PORT_ACTIVE)
+ port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
+ port_attr->max_mtu = IB_MTU_4096;
+ port_attr->active_mtu = iboe_get_mtu(if_getmtu(rdev->netdev));
+ port_attr->gid_tbl_len = dev_attr->max_sgid;
+ port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
+ IB_PORT_DEVICE_MGMT_SUP |
+ IB_PORT_VENDOR_CLASS_SUP |
+ IB_PORT_IP_BASED_GIDS;
+
+ port_attr->max_msg_sz = (u32)BNXT_RE_MAX_MR_SIZE_LOW;
+ port_attr->bad_pkey_cntr = 0;
+ port_attr->qkey_viol_cntr = 0;
+ port_attr->pkey_tbl_len = dev_attr->max_pkey;
+ port_attr->lid = 0;
+ port_attr->sm_lid = 0;
+ port_attr->lmc = 0;
+ port_attr->max_vl_num = 4;
+ port_attr->sm_sl = 0;
+ port_attr->subnet_timeout = 0;
+ port_attr->init_type_reply = 0;
+ rdev->espeed = rdev->en_dev->espeed;
+
+ if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
+ __to_ib_speed_width(rdev->espeed, &active_speed,
+ &active_width);
+
+ port_attr->active_speed = active_speed;
+ port_attr->active_width = active_width;
+
+ return 0;
+}
+
+int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
+ int port_modify_mask,
+ struct ib_port_modify *port_modify)
+{
+ dev_dbg(rdev_to_dev(rdev), "Modify port with mask 0x%x\n",
+ port_modify_mask);
+
+ switch (port_modify_mask) {
+ case IB_PORT_SHUTDOWN:
+ break;
+ case IB_PORT_INIT_TYPE:
+ break;
+ case IB_PORT_RESET_QKEY_CNTR:
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct ib_port_attr port_attr;
+
+ if (bnxt_re_query_port(ibdev, port_num, &port_attr))
+ return -EINVAL;
+
+ immutable->pkey_tbl_len = port_attr.pkey_tbl_len;
+ immutable->gid_tbl_len = port_attr.gid_tbl_len;
+ if (rdev->roce_mode == BNXT_RE_FLAG_ROCEV1_CAP)
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE;
+ else if (rdev->roce_mode == BNXT_RE_FLAG_ROCEV2_CAP)
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ else
+ immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE |
+ RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
+ immutable->max_mad_size = IB_MGMT_MAD_SIZE;
+ return 0;
+}
+
+void bnxt_re_compat_qfwstr(void)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+
+ sprintf(str, "%d.%d.%d.%d", rdev->dev_attr->fw_ver[0],
+ rdev->dev_attr->fw_ver[1], rdev->dev_attr->fw_ver[2],
+ rdev->dev_attr->fw_ver[3]);
+}
+
+int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
+ u16 index, u16 *pkey)
+{
+ if (index > 0)
+ return -EINVAL;
+
+ *pkey = IB_DEFAULT_PKEY_FULL;
+
+ return 0;
+}
+
+int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
+ int index, union ib_gid *gid)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ int rc = 0;
+
+ /* Ignore port_num */
+ memset(gid, 0, sizeof(*gid));
+ rc = bnxt_qplib_get_sgid(&rdev->qplib_res,
+ &rdev->qplib_res.sgid_tbl, index,
+ (struct bnxt_qplib_gid *)gid);
+ return rc;
+}
+
+int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
+ unsigned int index, void **context)
+{
+ int rc = 0;
+ struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+ struct bnxt_qplib_gid *gid_to_del;
+ u16 vlan_id = 0xFFFF;
+
+ /* Delete the entry from the hardware */
+ ctx = *context;
+ if (!ctx) {
+ dev_err(rdev_to_dev(rdev), "GID entry has no ctx?!\n");
+ return -EINVAL;
+ }
+ if (sgid_tbl && sgid_tbl->active) {
+ if (ctx->idx >= sgid_tbl->max) {
+ dev_dbg(rdev_to_dev(rdev), "GID index out of range?!\n");
+ return -EINVAL;
+ }
+ gid_to_del = &sgid_tbl->tbl[ctx->idx].gid;
+ vlan_id = sgid_tbl->tbl[ctx->idx].vlan_id;
+ ctx->refcnt--;
+ /* DEL_GID is called via WQ context(netdevice_event_work_handler)
+ * or via the ib_unregister_device path. In the former case QP1
+ * may not be destroyed yet, in which case just return as FW
+ * needs that entry to be present and will fail it's deletion.
+ * We could get invoked again after QP1 is destroyed OR get an
+ * ADD_GID call with a different GID value for the same index
+ * where we issue MODIFY_GID cmd to update the GID entry -- TBD
+ */
+ if (ctx->idx == 0 &&
+ rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
+ (rdev->gsi_ctx.gsi_sqp ||
+ rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD)) {
+ dev_dbg(rdev_to_dev(rdev),
+ "Trying to delete GID0 while QP1 is alive\n");
+ if (!ctx->refcnt) {
+ rdev->gid_map[index] = -1;
+ ctx_tbl = sgid_tbl->ctx;
+ ctx_tbl[ctx->idx] = NULL;
+ kfree(ctx);
+ }
+ return 0;
+ }
+ rdev->gid_map[index] = -1;
+ if (!ctx->refcnt) {
+ rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del,
+ vlan_id, true);
+ if (!rc) {
+ dev_dbg(rdev_to_dev(rdev), "GID remove success\n");
+ ctx_tbl = sgid_tbl->ctx;
+ ctx_tbl[ctx->idx] = NULL;
+ kfree(ctx);
+ } else {
+ dev_err(rdev_to_dev(rdev),
+ "Remove GID failed rc = 0x%x\n", rc);
+ }
+ }
+ } else {
+ dev_dbg(rdev_to_dev(rdev), "GID sgid_tbl does not exist!\n");
+ return -EINVAL;
+ }
+ return rc;
+}
+
+int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr, void **context)
+{
+ int rc;
+ u32 tbl_idx = 0;
+ u16 vlan_id = 0xFFFF;
+ struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+ if ((attr->ndev) && is_vlan_dev(attr->ndev))
+ vlan_id = vlan_dev_vlan_id(attr->ndev);
+
+ rc = bnxt_qplib_add_sgid(sgid_tbl, gid,
+ rdev->dev_addr,
+ vlan_id, true, &tbl_idx);
+ if (rc == -EALREADY) {
+ dev_dbg(rdev_to_dev(rdev), "GID %pI6 is already present\n", gid);
+ ctx_tbl = sgid_tbl->ctx;
+ if (!ctx_tbl[tbl_idx]) {
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->idx = tbl_idx;
+ ctx->refcnt = 1;
+ ctx_tbl[tbl_idx] = ctx;
+ } else {
+ ctx_tbl[tbl_idx]->refcnt++;
+ }
+ *context = ctx_tbl[tbl_idx];
+ /* tbl_idx is the HW table index and index is the stack index */
+ rdev->gid_map[index] = tbl_idx;
+ return 0;
+ } else if (rc < 0) {
+ dev_err(rdev_to_dev(rdev), "Add GID failed rc = 0x%x\n", rc);
+ return rc;
+ } else {
+ ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ dev_err(rdev_to_dev(rdev), "Add GID ctx failed\n");
+ return -ENOMEM;
+ }
+ ctx_tbl = sgid_tbl->ctx;
+ ctx->idx = tbl_idx;
+ ctx->refcnt = 1;
+ ctx_tbl[tbl_idx] = ctx;
+ /* tbl_idx is the HW table index and index is the stack index */
+ rdev->gid_map[index] = tbl_idx;
+ *context = ctx;
+ }
+ return rc;
+}
+
+enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
+ u8 port_num)
+{
+ return IB_LINK_LAYER_ETHERNET;
+}
+
+static void bnxt_re_legacy_create_fence_wqe(struct bnxt_re_pd *pd)
+{
+ struct bnxt_re_legacy_fence_data *fence = &pd->fence;
+ struct ib_mr *ib_mr = &fence->mr->ib_mr;
+ struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
+ struct bnxt_re_dev *rdev = pd->rdev;
+
+ if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
+ memset(wqe, 0, sizeof(*wqe));
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
+ wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+ wqe->bind.zero_based = false;
+ wqe->bind.parent_l_key = ib_mr->lkey;
+ wqe->bind.va = (u64)fence->va;
+ wqe->bind.length = fence->size;
+ wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
+ wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
+
+ /* Save the initial rkey in fence structure for now;
+ * wqe->bind.r_key will be set at (re)bind time.
+ */
+ fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
+}
+
+static int bnxt_re_legacy_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
+{
+ struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
+ qplib_qp);
+ struct ib_pd *ib_pd = qp->ib_qp.pd;
+ struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
+ struct bnxt_re_legacy_fence_data *fence = &pd->fence;
+ struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
+ struct bnxt_qplib_swqe wqe;
+ int rc;
+
+ /* TODO: Need SQ locking here when Fence WQE
+ * posting moves up into bnxt_re from bnxt_qplib.
+ */
+ memcpy(&wqe, fence_wqe, sizeof(wqe));
+ wqe.bind.r_key = fence->bind_rkey;
+ fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
+
+ dev_dbg(rdev_to_dev(qp->rdev),
+ "Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
+ wqe.bind.r_key, qp->qplib_qp.id, pd);
+ rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+ if (rc) {
+ dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
+ return rc;
+ }
+ bnxt_qplib_post_send_db(&qp->qplib_qp);
+
+ return rc;
+}
+
+static int bnxt_re_legacy_create_fence_mr(struct bnxt_re_pd *pd)
+{
+ int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
+ struct bnxt_re_legacy_fence_data *fence = &pd->fence;
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_qplib_mrinfo mrinfo;
+ struct bnxt_re_mr *mr = NULL;
+ struct ib_mw *ib_mw = NULL;
+ dma_addr_t dma_addr = 0;
+ u32 max_mr_count;
+ u64 pbl_tbl;
+ int rc;
+
+ if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
+ memset(&mrinfo, 0, sizeof(mrinfo));
+ /* Allocate a small chunk of memory and dma-map it */
+ fence->va = kzalloc(BNXT_RE_LEGACY_FENCE_BYTES, GFP_KERNEL);
+ if (!fence->va)
+ return -ENOMEM;
+ dma_addr = ib_dma_map_single(&rdev->ibdev, fence->va,
+ BNXT_RE_LEGACY_FENCE_BYTES,
+ DMA_BIDIRECTIONAL);
+ rc = ib_dma_mapping_error(&rdev->ibdev, dma_addr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
+ rc = -EIO;
+ fence->dma_addr = 0;
+ goto free_va;
+ }
+ fence->dma_addr = dma_addr;
+
+ /* Allocate a MR */
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ goto free_dma_addr;
+ fence->mr = mr;
+ mr->rdev = rdev;
+ mr->qplib_mr.pd = &pd->qplib_pd;
+ mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+ mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
+ if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) {
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
+ goto free_mr;
+ }
+ /* Register MR */
+ mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ }
+ mr->qplib_mr.va = (u64)fence->va;
+ mr->qplib_mr.total_size = BNXT_RE_LEGACY_FENCE_BYTES;
+ pbl_tbl = dma_addr;
+
+ mrinfo.mrw = &mr->qplib_mr;
+ mrinfo.ptes = &pbl_tbl;
+ mrinfo.sg.npages = BNXT_RE_LEGACY_FENCE_PBL_SIZE;
+
+ mrinfo.sg.nmap = 0;
+ mrinfo.sg.sghead = 0;
+ mrinfo.sg.pgshft = PAGE_SHIFT;
+ mrinfo.sg.pgsize = PAGE_SIZE;
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
+ goto free_mr;
+ }
+ mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ mr->ib_mr.rkey = mr->qplib_mr.rkey;
+ atomic_inc(&rdev->stats.rsors.mr_count);
+ max_mr_count = atomic_read(&rdev->stats.rsors.mr_count);
+ if (max_mr_count > (atomic_read(&rdev->stats.rsors.max_mr_count)))
+ atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
+
+ ib_mw = bnxt_re_alloc_mw(&pd->ibpd, IB_MW_TYPE_1, NULL);
+ /* Create a fence MW only for kernel consumers */
+ if (!ib_mw) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to create fence-MW for PD: %p\n", pd);
+ rc = -EINVAL;
+ goto free_mr;
+ }
+ fence->mw = ib_mw;
+
+ bnxt_re_legacy_create_fence_wqe(pd);
+ return 0;
+
+free_mr:
+ if (mr->ib_mr.lkey) {
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ atomic_dec(&rdev->stats.rsors.mr_count);
+ }
+ kfree(mr);
+ fence->mr = NULL;
+
+free_dma_addr:
+ ib_dma_unmap_single(&rdev->ibdev, fence->dma_addr,
+ BNXT_RE_LEGACY_FENCE_BYTES, DMA_BIDIRECTIONAL);
+ fence->dma_addr = 0;
+
+free_va:
+ kfree(fence->va);
+ fence->va = NULL;
+ return rc;
+}
+
+static void bnxt_re_legacy_destroy_fence_mr(struct bnxt_re_pd *pd)
+{
+ struct bnxt_re_legacy_fence_data *fence = &pd->fence;
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_re_mr *mr = fence->mr;
+
+ if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
+ if (fence->mw) {
+ bnxt_re_dealloc_mw(fence->mw);
+ fence->mw = NULL;
+ }
+ if (mr) {
+ if (mr->ib_mr.rkey)
+ bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
+ false);
+ if (mr->ib_mr.lkey)
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ kfree(mr);
+ fence->mr = NULL;
+ atomic_dec(&rdev->stats.rsors.mr_count);
+ }
+ if (fence->dma_addr) {
+ ib_dma_unmap_single(&rdev->ibdev, fence->dma_addr,
+ BNXT_RE_LEGACY_FENCE_BYTES,
+ DMA_BIDIRECTIONAL);
+ fence->dma_addr = 0;
+ }
+ kfree(fence->va);
+ fence->va = NULL;
+}
+
+
+static int bnxt_re_get_user_dpi(struct bnxt_re_dev *rdev,
+ struct bnxt_re_ucontext *cntx)
+{
+ struct bnxt_qplib_chip_ctx *cctx = rdev->chip_ctx;
+ int ret = 0;
+ u8 type;
+ /* Allocate DPI in alloc_pd or in create_cq to avoid failing of
+ * ibv_devinfo and family of application when DPIs are depleted.
+ */
+ type = BNXT_QPLIB_DPI_TYPE_UC;
+ ret = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &cntx->dpi, cntx, type);
+ if (ret) {
+ dev_err(rdev_to_dev(rdev), "Alloc doorbell page failed!\n");
+ goto out;
+ }
+
+ if (cctx->modes.db_push) {
+ type = BNXT_QPLIB_DPI_TYPE_WC;
+ ret = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &cntx->wcdpi,
+ cntx, type);
+ if (ret)
+ dev_err(rdev_to_dev(rdev), "push dp alloc failed\n");
+ }
+out:
+ return ret;
+}
+
+/* Protection Domains */
+void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ int rc;
+
+ bnxt_re_legacy_destroy_fence_mr(pd);
+
+ rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res,
+ &rdev->qplib_res.pd_tbl,
+ &pd->qplib_pd);
+ if (rc)
+ dev_err_ratelimited(rdev_to_dev(rdev),
+ "%s failed rc = %d\n", __func__, rc);
+ atomic_dec(&rdev->stats.rsors.pd_count);
+
+ return;
+}
+
+int bnxt_re_alloc_pd(struct ib_pd *pd_in,
+ struct ib_udata *udata)
+{
+ struct ib_pd *ibpd = pd_in;
+ struct ib_device *ibdev = ibpd->device;
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct bnxt_re_ucontext *ucntx =
+ rdma_udata_to_drv_context(udata, struct bnxt_re_ucontext,
+ ibucontext);
+ u32 max_pd_count;
+ int rc;
+ struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ibpd);
+
+ pd->rdev = rdev;
+ if (bnxt_qplib_alloc_pd(&rdev->qplib_res, &pd->qplib_pd)) {
+ dev_err(rdev_to_dev(rdev),
+ "Allocate HW Protection Domain failed!\n");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ if (udata) {
+ struct bnxt_re_pd_resp resp = {};
+
+ if (!ucntx->dpi.dbr) {
+ rc = bnxt_re_get_user_dpi(rdev, ucntx);
+ if (rc)
+ goto dbfail;
+ }
+
+ resp.pdid = pd->qplib_pd.id;
+ /* Still allow mapping this DBR to the new user PD. */
+ resp.dpi = ucntx->dpi.dpi;
+ resp.dbr = (u64)ucntx->dpi.umdbr;
+ /* Copy only on a valid wcpdi */
+ if (ucntx->wcdpi.dpi) {
+ resp.wcdpi = ucntx->wcdpi.dpi;
+ resp.comp_mask = BNXT_RE_COMP_MASK_PD_HAS_WC_DPI;
+ }
+ if (rdev->dbr_pacing) {
+ WARN_ON(!rdev->dbr_bar_addr);
+ resp.dbr_bar_addr = (u64)rdev->dbr_bar_addr;
+ resp.comp_mask |= BNXT_RE_COMP_MASK_PD_HAS_DBR_BAR_ADDR;
+ }
+
+ rc = bnxt_re_copy_to_udata(rdev, &resp,
+ min(udata->outlen, sizeof(resp)),
+ udata);
+ if (rc)
+ goto dbfail;
+ }
+
+ if (!udata)
+ if (bnxt_re_legacy_create_fence_mr(pd))
+ dev_warn(rdev_to_dev(rdev),
+ "Failed to create Fence-MR\n");
+
+ atomic_inc(&rdev->stats.rsors.pd_count);
+ max_pd_count = atomic_read(&rdev->stats.rsors.pd_count);
+ if (max_pd_count > atomic_read(&rdev->stats.rsors.max_pd_count))
+ atomic_set(&rdev->stats.rsors.max_pd_count, max_pd_count);
+
+ return 0;
+dbfail:
+ (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
+ &pd->qplib_pd);
+fail:
+ return rc;
+}
+
+/* Address Handles */
+void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
+{
+ struct bnxt_re_ah *ah = to_bnxt_re(ib_ah, struct bnxt_re_ah, ibah);
+ struct bnxt_re_dev *rdev = ah->rdev;
+ int rc = 0;
+ bool block = true;
+
+ block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
+
+ rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
+ if (rc)
+ dev_err_ratelimited(rdev_to_dev(rdev),
+ "%s id = %d blocking %d failed rc = %d\n",
+ __func__, ah->qplib_ah.id, block, rc);
+ atomic_dec(&rdev->stats.rsors.ah_count);
+
+ return;
+}
+
+static u8 _to_bnxt_re_nw_type(enum rdma_network_type ntype)
+{
+ u8 nw_type;
+ switch (ntype) {
+ case RDMA_NETWORK_IPV4:
+ nw_type = CMDQ_CREATE_AH_TYPE_V2IPV4;
+ break;
+ case RDMA_NETWORK_IPV6:
+ nw_type = CMDQ_CREATE_AH_TYPE_V2IPV6;
+ break;
+ default:
+ nw_type = CMDQ_CREATE_AH_TYPE_V1;
+ break;
+ }
+ return nw_type;
+}
+
+static inline int
+bnxt_re_get_cached_gid(struct ib_device *dev, u8 port_num, int index,
+ union ib_gid *sgid, struct ib_gid_attr **sgid_attr,
+ struct ib_global_route *grh, struct ib_ah *ah)
+{
+ int ret = 0;
+
+ ret = ib_get_cached_gid(dev, port_num, index, sgid, *sgid_attr);
+ return ret;
+}
+
+static inline enum rdma_network_type
+bnxt_re_gid_to_network_type(struct ib_gid_attr *sgid_attr,
+ union ib_gid *sgid)
+{
+ return ib_gid_to_network_type(sgid_attr->gid_type, sgid);
+}
+
+static int bnxt_re_get_ah_info(struct bnxt_re_dev *rdev,
+ struct ib_ah_attr *ah_attr,
+ struct bnxt_re_ah_info *ah_info)
+{
+ struct ib_gid_attr *gattr;
+ enum rdma_network_type ib_ntype;
+ u8 ntype;
+ union ib_gid *gid;
+ int rc = 0;
+
+ gid = &ah_info->sgid;
+ gattr = &ah_info->sgid_attr;
+
+ rc = bnxt_re_get_cached_gid(&rdev->ibdev, 1, ah_attr->grh.sgid_index,
+ gid, &gattr, &ah_attr->grh, NULL);
+ if (rc)
+ return rc;
+
+ /* Get vlan tag */
+ if (gattr->ndev) {
+ if (is_vlan_dev(gattr->ndev))
+ ah_info->vlan_tag = vlan_dev_vlan_id(gattr->ndev);
+ if_rele(gattr->ndev);
+ }
+
+ /* Get network header type for this GID */
+
+ ib_ntype = bnxt_re_gid_to_network_type(gattr, gid);
+ ntype = _to_bnxt_re_nw_type(ib_ntype);
+ ah_info->nw_type = ntype;
+
+ return rc;
+}
+
+static u8 _get_sgid_index(struct bnxt_re_dev *rdev, u8 gindx)
+{
+ gindx = rdev->gid_map[gindx];
+ return gindx;
+}
+
+static int bnxt_re_init_dmac(struct bnxt_re_dev *rdev, struct ib_ah_attr *ah_attr,
+ struct bnxt_re_ah_info *ah_info, bool is_user,
+ struct bnxt_re_ah *ah)
+{
+ int rc = 0;
+ u8 *dmac;
+
+ if (is_user && !rdma_is_multicast_addr((struct in6_addr *)
+ ah_attr->grh.dgid.raw) &&
+ !rdma_link_local_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
+
+ u32 retry_count = BNXT_RE_RESOLVE_RETRY_COUNT_US;
+ struct bnxt_re_resolve_dmac_work *resolve_dmac_work;
+
+
+ resolve_dmac_work = kzalloc(sizeof(*resolve_dmac_work), GFP_ATOMIC);
+
+ resolve_dmac_work->rdev = rdev;
+ resolve_dmac_work->ah_attr = ah_attr;
+ resolve_dmac_work->ah_info = ah_info;
+
+ atomic_set(&resolve_dmac_work->status_wait, 1);
+ INIT_WORK(&resolve_dmac_work->work, bnxt_re_resolve_dmac_task);
+ queue_work(rdev->resolve_wq, &resolve_dmac_work->work);
+
+ do {
+ rc = atomic_read(&resolve_dmac_work->status_wait) & 0xFF;
+ if (!rc)
+ break;
+ udelay(1);
+ } while (--retry_count);
+ if (atomic_read(&resolve_dmac_work->status_wait)) {
+ INIT_LIST_HEAD(&resolve_dmac_work->list);
+ list_add_tail(&resolve_dmac_work->list,
+ &rdev->mac_wq_list);
+ return -EFAULT;
+ }
+ kfree(resolve_dmac_work);
+ }
+ dmac = ROCE_DMAC(ah_attr);
+ if (dmac)
+ memcpy(ah->qplib_ah.dmac, dmac, ETH_ALEN);
+ return rc;
+}
+
+int bnxt_re_create_ah(struct ib_ah *ah_in, struct ib_ah_attr *attr,
+ u32 flags, struct ib_udata *udata)
+{
+
+ struct ib_ah *ib_ah = ah_in;
+ struct ib_pd *ib_pd = ib_ah->pd;
+ struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ibah);
+ struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ibpd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_re_ah_info ah_info;
+ u32 max_ah_count;
+ bool is_user;
+ int rc;
+ bool block = true;
+ struct ib_ah_attr *ah_attr = attr;
+ block = !(flags & RDMA_CREATE_AH_SLEEPABLE);
+
+ if (!(ah_attr->ah_flags & IB_AH_GRH))
+ dev_err(rdev_to_dev(rdev), "ah_attr->ah_flags GRH is not set\n");
+
+ ah->rdev = rdev;
+ ah->qplib_ah.pd = &pd->qplib_pd;
+ is_user = ib_pd->uobject ? true : false;
+
+ /* Supply the configuration for the HW */
+ memcpy(ah->qplib_ah.dgid.data, ah_attr->grh.dgid.raw,
+ sizeof(union ib_gid));
+ ah->qplib_ah.sgid_index = _get_sgid_index(rdev, ah_attr->grh.sgid_index);
+ if (ah->qplib_ah.sgid_index == 0xFF) {
+ dev_err(rdev_to_dev(rdev), "invalid sgid_index!\n");
+ rc = -EINVAL;
+ goto fail;
+ }
+ ah->qplib_ah.host_sgid_index = ah_attr->grh.sgid_index;
+ ah->qplib_ah.traffic_class = ah_attr->grh.traffic_class;
+ ah->qplib_ah.flow_label = ah_attr->grh.flow_label;
+ ah->qplib_ah.hop_limit = ah_attr->grh.hop_limit;
+ ah->qplib_ah.sl = ah_attr->sl;
+ rc = bnxt_re_get_ah_info(rdev, ah_attr, &ah_info);
+ if (rc)
+ goto fail;
+ ah->qplib_ah.nw_type = ah_info.nw_type;
+
+ rc = bnxt_re_init_dmac(rdev, ah_attr, &ah_info, is_user, ah);
+ if (rc)
+ goto fail;
+
+ rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, block);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Allocate HW Address Handle failed!\n");
+ goto fail;
+ }
+
+ /* Write AVID to shared page. */
+ if (ib_pd->uobject) {
+ struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
+ struct bnxt_re_ucontext *uctx;
+ unsigned long flag;
+ u32 *wrptr;
+
+ uctx = to_bnxt_re(ib_uctx, struct bnxt_re_ucontext, ibucontext);
+ spin_lock_irqsave(&uctx->sh_lock, flag);
+ wrptr = (u32 *)((u8 *)uctx->shpg + BNXT_RE_AVID_OFFT);
+ *wrptr = ah->qplib_ah.id;
+ wmb(); /* make sure cache is updated. */
+ spin_unlock_irqrestore(&uctx->sh_lock, flag);
+ }
+ atomic_inc(&rdev->stats.rsors.ah_count);
+ max_ah_count = atomic_read(&rdev->stats.rsors.ah_count);
+ if (max_ah_count > atomic_read(&rdev->stats.rsors.max_ah_count))
+ atomic_set(&rdev->stats.rsors.max_ah_count, max_ah_count);
+
+ return 0;
+fail:
+ return rc;
+}
+
+int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct ib_ah_attr *ah_attr)
+{
+ return 0;
+}
+
+int bnxt_re_query_ah(struct ib_ah *ib_ah, struct ib_ah_attr *ah_attr)
+{
+ struct bnxt_re_ah *ah = to_bnxt_re(ib_ah, struct bnxt_re_ah, ibah);
+
+ memcpy(ah_attr->grh.dgid.raw, ah->qplib_ah.dgid.data,
+ sizeof(union ib_gid));
+ ah_attr->grh.sgid_index = ah->qplib_ah.host_sgid_index;
+ ah_attr->grh.traffic_class = ah->qplib_ah.traffic_class;
+ ah_attr->sl = ah->qplib_ah.sl;
+ memcpy(ROCE_DMAC(ah_attr), ah->qplib_ah.dmac, ETH_ALEN);
+ ah_attr->ah_flags = IB_AH_GRH;
+ ah_attr->port_num = 1;
+ ah_attr->static_rate = 0;
+
+ return 0;
+}
+
+/* Shared Receive Queues */
+void bnxt_re_destroy_srq(struct ib_srq *ib_srq,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq, ibsrq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+ struct bnxt_qplib_srq *qplib_srq = &srq->qplib_srq;
+ int rc = 0;
+
+
+ rc = bnxt_qplib_destroy_srq(&rdev->qplib_res, qplib_srq);
+ if (rc)
+ dev_err_ratelimited(rdev_to_dev(rdev),
+ "%s id = %d failed rc = %d\n",
+ __func__, qplib_srq->id, rc);
+
+ if (srq->umem && !IS_ERR(srq->umem))
+ ib_umem_release(srq->umem);
+
+ atomic_dec(&rdev->stats.rsors.srq_count);
+
+ return;
+}
+
+static u16 _max_rwqe_sz(int nsge)
+{
+ return sizeof(struct rq_wqe_hdr) + (nsge * sizeof(struct sq_sge));
+}
+
+static u16 bnxt_re_get_rwqe_size(struct bnxt_qplib_qp *qplqp,
+ int rsge, int max)
+{
+ if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
+ rsge = max;
+
+ return _max_rwqe_sz(rsge);
+}
+
+static inline
+struct ib_umem *ib_umem_get_compat(struct bnxt_re_dev *rdev,
+ struct ib_ucontext *ucontext,
+ struct ib_udata *udata,
+ unsigned long addr,
+ size_t size, int access, int dmasync)
+{
+ return ib_umem_get(ucontext, addr, size, access, dmasync);
+}
+
+static inline
+struct ib_umem *ib_umem_get_flags_compat(struct bnxt_re_dev *rdev,
+ struct ib_ucontext *ucontext,
+ struct ib_udata *udata,
+ unsigned long addr,
+ size_t size, int access, int dmasync)
+{
+ return ib_umem_get_compat(rdev, ucontext, udata, addr, size,
+ access, 0);
+}
+
+static inline size_t ib_umem_num_pages_compat(struct ib_umem *umem)
+{
+ return ib_umem_num_pages(umem);
+}
+
+static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
+ struct bnxt_re_pd *pd,
+ struct bnxt_re_srq *srq,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_sg_info *sginfo;
+ struct bnxt_qplib_srq *qplib_srq;
+ struct bnxt_re_ucontext *cntx;
+ struct ib_ucontext *context;
+ struct bnxt_re_srq_req ureq;
+ struct ib_umem *umem;
+ int rc, bytes = 0;
+
+ context = pd->ibpd.uobject->context;
+ cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ibucontext);
+ qplib_srq = &srq->qplib_srq;
+ sginfo = &qplib_srq->sginfo;
+
+ if (udata->inlen < sizeof(ureq))
+ dev_warn(rdev_to_dev(rdev),
+ "Update the library ulen %d klen %d\n",
+ (unsigned int)udata->inlen,
+ (unsigned int)sizeof(ureq));
+
+ rc = ib_copy_from_udata(&ureq, udata,
+ min(udata->inlen, sizeof(ureq)));
+ if (rc)
+ return rc;
+
+ bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
+ bytes = PAGE_ALIGN(bytes);
+ umem = ib_umem_get_compat(rdev, context, udata, ureq.srqva, bytes,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(umem)) {
+ dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed with %ld\n",
+ __func__, PTR_ERR(umem));
+ return PTR_ERR(umem);
+ }
+
+ srq->umem = umem;
+ sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap);
+ sginfo->npages = ib_umem_num_pages_compat(umem);
+ qplib_srq->srq_handle = ureq.srq_handle;
+ qplib_srq->dpi = &cntx->dpi;
+ qplib_srq->is_user = true;
+
+ return 0;
+}
+
+int bnxt_re_create_srq(struct ib_srq *srq_in, struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_re_ucontext *cntx = NULL;
+ struct ib_ucontext *context;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_pd *pd;
+ int rc, entries;
+ struct ib_srq *ib_srq = srq_in;
+ struct ib_pd *ib_pd = ib_srq->pd;
+ struct bnxt_re_srq *srq =
+ container_of(ib_srq, struct bnxt_re_srq, ibsrq);
+ u32 max_srq_count;
+
+ pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
+ rdev = pd->rdev;
+ dev_attr = rdev->dev_attr;
+
+ if (rdev->mod_exit) {
+ dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
+ rc = -EIO;
+ goto exit;
+ }
+
+ if (srq_init_attr->srq_type != IB_SRQT_BASIC) {
+ dev_err(rdev_to_dev(rdev), "SRQ type not supported\n");
+ rc = -ENOTSUPP;
+ goto exit;
+ }
+
+ if (udata) {
+ context = pd->ibpd.uobject->context;
+ cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ibucontext);
+ }
+
+ if (atomic_read(&rdev->stats.rsors.srq_count) >= dev_attr->max_srq) {
+ dev_err(rdev_to_dev(rdev), "Create SRQ failed - max exceeded(SRQs)\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
+ dev_err(rdev_to_dev(rdev), "Create SRQ failed - max exceeded(SRQ_WQs)\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ srq->rdev = rdev;
+ srq->qplib_srq.pd = &pd->qplib_pd;
+ srq->qplib_srq.dpi = &rdev->dpi_privileged;
+
+ /* Allocate 1 more than what's provided so posting max doesn't
+ mean empty */
+ entries = srq_init_attr->attr.max_wr + 1;
+ entries = bnxt_re_init_depth(entries, cntx);
+ if (entries > dev_attr->max_srq_wqes + 1)
+ entries = dev_attr->max_srq_wqes + 1;
+
+ srq->qplib_srq.wqe_size = _max_rwqe_sz(6); /* 128 byte wqe size */
+ srq->qplib_srq.max_wqe = entries;
+ srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
+ srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
+ srq->srq_limit = srq_init_attr->attr.srq_limit;
+ srq->qplib_srq.eventq_hw_ring_id = rdev->nqr.nq[0].ring_id;
+ srq->qplib_srq.sginfo.pgsize = PAGE_SIZE;
+ srq->qplib_srq.sginfo.pgshft = PAGE_SHIFT;
+
+ if (udata) {
+ rc = bnxt_re_init_user_srq(rdev, pd, srq, udata);
+ if (rc)
+ goto fail;
+ }
+
+ rc = bnxt_qplib_create_srq(&rdev->qplib_res, &srq->qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Create HW SRQ failed!\n");
+ goto fail;
+ }
+
+ if (udata) {
+ struct bnxt_re_srq_resp resp;
+
+ resp.srqid = srq->qplib_srq.id;
+ rc = bnxt_re_copy_to_udata(rdev, &resp,
+ min(udata->outlen, sizeof(resp)),
+ udata);
+ if (rc) {
+ bnxt_qplib_destroy_srq(&rdev->qplib_res, &srq->qplib_srq);
+ goto fail;
+ }
+ }
+ atomic_inc(&rdev->stats.rsors.srq_count);
+ max_srq_count = atomic_read(&rdev->stats.rsors.srq_count);
+ if (max_srq_count > atomic_read(&rdev->stats.rsors.max_srq_count))
+ atomic_set(&rdev->stats.rsors.max_srq_count, max_srq_count);
+ spin_lock_init(&srq->lock);
+
+ return 0;
+fail:
+ if (udata && srq->umem && !IS_ERR(srq->umem)) {
+ ib_umem_release(srq->umem);
+ srq->umem = NULL;
+ }
+exit:
+ return rc;
+}
+
+int bnxt_re_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq,
+ ibsrq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+ int rc;
+
+ switch (srq_attr_mask) {
+ case IB_SRQ_MAX_WR:
+ /* SRQ resize is not supported */
+ break;
+ case IB_SRQ_LIMIT:
+ /* Change the SRQ threshold */
+ if (srq_attr->srq_limit > srq->qplib_srq.max_wqe)
+ return -EINVAL;
+
+ srq->qplib_srq.threshold = srq_attr->srq_limit;
+ rc = bnxt_qplib_modify_srq(&rdev->qplib_res, &srq->qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Modify HW SRQ failed!\n");
+ return rc;
+ }
+ /* On success, update the shadow */
+ srq->srq_limit = srq_attr->srq_limit;
+
+ if (udata) {
+ /* Build and send response back to udata */
+ rc = bnxt_re_copy_to_udata(rdev, srq, 0, udata);
+ if (rc)
+ return rc;
+ }
+ break;
+ default:
+ dev_err(rdev_to_dev(rdev),
+ "Unsupported srq_attr_mask 0x%x\n", srq_attr_mask);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int bnxt_re_query_srq(struct ib_srq *ib_srq, struct ib_srq_attr *srq_attr)
+{
+ struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq,
+ ibsrq);
+ struct bnxt_re_dev *rdev = srq->rdev;
+ int rc;
+
+ rc = bnxt_qplib_query_srq(&rdev->qplib_res, &srq->qplib_srq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Query HW SRQ (0x%x) failed! rc = %d\n",
+ srq->qplib_srq.id, rc);
+ return rc;
+ }
+ srq_attr->max_wr = srq->qplib_srq.max_wqe;
+ srq_attr->max_sge = srq->qplib_srq.max_sge;
+ srq_attr->srq_limit = srq->qplib_srq.threshold;
+
+ return 0;
+}
+
+int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct bnxt_re_srq *srq = to_bnxt_re(ib_srq, struct bnxt_re_srq,
+ ibsrq);
+ struct bnxt_qplib_swqe wqe = {};
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&srq->lock, flags);
+ while (wr) {
+ /* Transcribe each ib_recv_wr to qplib_swqe */
+ wqe.num_sge = wr->num_sge;
+ wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
+ wqe.wr_id = wr->wr_id;
+ wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
+ rc = bnxt_qplib_post_srq_recv(&srq->qplib_srq, &wqe);
+ if (rc) {
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ spin_unlock_irqrestore(&srq->lock, flags);
+
+ return rc;
+}
+
+unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->scq->cq_lock, flags);
+ if (qp->rcq && qp->rcq != qp->scq)
+ spin_lock(&qp->rcq->cq_lock);
+
+ return flags;
+}
+
+void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
+ unsigned long flags)
+{
+ if (qp->rcq && qp->rcq != qp->scq)
+ spin_unlock(&qp->rcq->cq_lock);
+ spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
+}
+
+/* Queue Pairs */
+static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
+{
+ struct bnxt_re_qp *gsi_sqp;
+ struct bnxt_re_ah *gsi_sah;
+ struct bnxt_re_dev *rdev;
+ unsigned long flags;
+ int rc = 0;
+
+ rdev = qp->rdev;
+ gsi_sqp = rdev->gsi_ctx.gsi_sqp;
+ gsi_sah = rdev->gsi_ctx.gsi_sah;
+
+ /* remove from active qp list */
+ mutex_lock(&rdev->qp_lock);
+ list_del(&gsi_sqp->list);
+ mutex_unlock(&rdev->qp_lock);
+
+ if (gsi_sah) {
+ dev_dbg(rdev_to_dev(rdev), "Destroy the shadow AH\n");
+ rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &gsi_sah->qplib_ah,
+ true);
+ if (rc)
+ dev_err(rdev_to_dev(rdev),
+ "Destroy HW AH for shadow QP failed!\n");
+ atomic_dec(&rdev->stats.rsors.ah_count);
+ }
+
+ dev_dbg(rdev_to_dev(rdev), "Destroy the shadow QP\n");
+ rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Destroy Shadow QP failed\n");
+
+ /* Clean the CQ for shadow QP completions */
+ flags = bnxt_re_lock_cqs(gsi_sqp);
+ bnxt_qplib_clean_qp(&gsi_sqp->qplib_qp);
+ bnxt_re_unlock_cqs(gsi_sqp, flags);
+
+ bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+ bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &gsi_sqp->qplib_qp);
+ kfree(rdev->gsi_ctx.sqp_tbl);
+ kfree(gsi_sah);
+ kfree(gsi_sqp);
+ rdev->gsi_ctx.gsi_sqp = NULL;
+ rdev->gsi_ctx.gsi_sah = NULL;
+ rdev->gsi_ctx.sqp_tbl = NULL;
+ atomic_dec(&rdev->stats.rsors.qp_count);
+
+ return 0;
+}
+
+static void bnxt_re_dump_debug_stats(struct bnxt_re_dev *rdev, u32 active_qps)
+{
+ u32 total_qp = 0;
+ u64 avg_time = 0;
+ int i;
+
+ if (!rdev->rcfw.sp_perf_stats_enabled)
+ return;
+
+ switch (active_qps) {
+ case 1:
+ /* Potential hint for Test Stop */
+ for (i = 0; i < RCFW_MAX_STAT_INDEX; i++) {
+ if (rdev->rcfw.qp_destroy_stats[i]) {
+ total_qp++;
+ avg_time += rdev->rcfw.qp_destroy_stats[i];
+ }
+ }
+ if (total_qp >= 0 || avg_time >= 0)
+ dev_dbg(rdev_to_dev(rdev),
+ "Perf Debug: %ps Total (%d) QP destroyed in (%ld) msec\n",
+ __builtin_return_address(0), total_qp,
+ (long)jiffies_to_msecs(avg_time));
+ break;
+ case 2:
+ /* Potential hint for Test Start */
+ dev_dbg(rdev_to_dev(rdev),
+ "Perf Debug: %ps active_qps = %d\n",
+ __builtin_return_address(0), active_qps);
+ break;
+ default:
+ /* Potential hint to know latency of QP destroy.
+ * Average time taken for 1K QP Destroy.
+ */
+ if (active_qps > 1024 && !(active_qps % 1024))
+ dev_dbg(rdev_to_dev(rdev),
+ "Perf Debug: %ps Active QP (%d) Watermark (%d)\n",
+ __builtin_return_address(0), active_qps,
+ atomic_read(&rdev->stats.rsors.max_qp_count));
+ break;
+ }
+}
+
+int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
+{
+ struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_re_dev *rdev = qp->rdev;
+ unsigned long flags;
+ u32 active_qps;
+ int rc;
+
+ mutex_lock(&rdev->qp_lock);
+ list_del(&qp->list);
+ active_qps = atomic_dec_return(&rdev->stats.rsors.qp_count);
+ if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RC)
+ atomic_dec(&rdev->stats.rsors.rc_qp_count);
+ else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD)
+ atomic_dec(&rdev->stats.rsors.ud_qp_count);
+ mutex_unlock(&rdev->qp_lock);
+
+ rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
+ if (rc)
+ dev_err_ratelimited(rdev_to_dev(rdev),
+ "%s id = %d failed rc = %d\n",
+ __func__, qp->qplib_qp.id, rc);
+
+ if (!ib_qp->uobject) {
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+ }
+
+ bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
+ if (ib_qp->qp_type == IB_QPT_GSI &&
+ rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
+ if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL &&
+ rdev->gsi_ctx.gsi_sqp) {
+ bnxt_re_destroy_gsi_sqp(qp);
+ }
+ bnxt_qplib_free_hdr_buf(&rdev->qplib_res, &qp->qplib_qp);
+ }
+
+ if (qp->rumem && !IS_ERR(qp->rumem))
+ ib_umem_release(qp->rumem);
+ if (qp->sumem && !IS_ERR(qp->sumem))
+ ib_umem_release(qp->sumem);
+ kfree(qp);
+
+ bnxt_re_dump_debug_stats(rdev, active_qps);
+
+ return 0;
+}
+
+static u8 __from_ib_qp_type(enum ib_qp_type type)
+{
+ switch (type) {
+ case IB_QPT_GSI:
+ return CMDQ_CREATE_QP1_TYPE_GSI;
+ case IB_QPT_RC:
+ return CMDQ_CREATE_QP_TYPE_RC;
+ case IB_QPT_UD:
+ return CMDQ_CREATE_QP_TYPE_UD;
+ case IB_QPT_RAW_ETHERTYPE:
+ return CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE;
+ default:
+ return IB_QPT_MAX;
+ }
+}
+
+static u16 _get_swqe_sz(int nsge)
+{
+ return sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge);
+}
+
+static int bnxt_re_get_swqe_size(int ilsize, int nsge)
+{
+ u16 wqe_size, calc_ils;
+
+ wqe_size = _get_swqe_sz(nsge);
+ if (ilsize) {
+ calc_ils = (sizeof(struct sq_send_hdr) + ilsize);
+ wqe_size = max_t(int, calc_ils, wqe_size);
+ wqe_size = ALIGN(wqe_size, 32);
+ }
+ return wqe_size;
+}
+
+static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_q *sq;
+ int align, ilsize;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ sq = &qplqp->sq;
+ dev_attr = rdev->dev_attr;
+
+ align = sizeof(struct sq_send_hdr);
+ ilsize = ALIGN(init_attr->cap.max_inline_data, align);
+
+ sq->wqe_size = bnxt_re_get_swqe_size(ilsize, sq->max_sge);
+ if (sq->wqe_size > _get_swqe_sz(dev_attr->max_qp_sges))
+ return -EINVAL;
+ /* For Cu/Wh and gen p5 backward compatibility mode
+ * wqe size is fixed to 128 bytes
+ */
+ if (sq->wqe_size < _get_swqe_sz(dev_attr->max_qp_sges) &&
+ qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
+ sq->wqe_size = _get_swqe_sz(dev_attr->max_qp_sges);
+
+ if (init_attr->cap.max_inline_data) {
+ qplqp->max_inline_data = sq->wqe_size -
+ sizeof(struct sq_send_hdr);
+ init_attr->cap.max_inline_data = qplqp->max_inline_data;
+ if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
+ sq->max_sge = qplqp->max_inline_data /
+ sizeof(struct sq_sge);
+ }
+
+ return 0;
+}
+
+static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev,
+ struct bnxt_re_pd *pd, struct bnxt_re_qp *qp,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_sg_info *sginfo;
+ struct bnxt_qplib_qp *qplib_qp;
+ struct bnxt_re_ucontext *cntx;
+ struct ib_ucontext *context;
+ struct bnxt_re_qp_req ureq;
+ struct ib_umem *umem;
+ int rc, bytes = 0;
+ int psn_nume;
+ int psn_sz;
+
+ qplib_qp = &qp->qplib_qp;
+ context = pd->ibpd.uobject->context;
+ cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ibucontext);
+ sginfo = &qplib_qp->sq.sginfo;
+
+ if (udata->inlen < sizeof(ureq))
+ dev_warn(rdev_to_dev(rdev),
+ "Update the library ulen %d klen %d\n",
+ (unsigned int)udata->inlen,
+ (unsigned int)sizeof(ureq));
+
+ rc = ib_copy_from_udata(&ureq, udata,
+ min(udata->inlen, sizeof(ureq)));
+ if (rc)
+ return rc;
+
+ bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
+ /* Consider mapping PSN search memory only for RC QPs. */
+ if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
+ psn_sz = _is_chip_gen_p5_p7(rdev->chip_ctx) ?
+ sizeof(struct sq_psn_search_ext) :
+ sizeof(struct sq_psn_search);
+ if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
+ psn_sz = sizeof(struct sq_msn_search);
+ psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+ qplib_qp->sq.max_wqe :
+ ((qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size) /
+ sizeof(struct bnxt_qplib_sge));
+ if (BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
+ psn_nume = roundup_pow_of_two(psn_nume);
+
+ bytes += (psn_nume * psn_sz);
+ }
+ bytes = PAGE_ALIGN(bytes);
+ umem = ib_umem_get_compat(rdev, context, udata, ureq.qpsva, bytes,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(umem)) {
+ dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed with %ld\n",
+ __func__, PTR_ERR(umem));
+ return PTR_ERR(umem);
+ }
+
+ qp->sumem = umem;
+ /* pgsize and pgshft were initialize already. */
+ sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap);
+ sginfo->npages = ib_umem_num_pages_compat(umem);
+ qplib_qp->qp_handle = ureq.qp_handle;
+
+ if (!qp->qplib_qp.srq) {
+ sginfo = &qplib_qp->rq.sginfo;
+ bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
+ bytes = PAGE_ALIGN(bytes);
+ umem = ib_umem_get_compat(rdev,
+ context, udata, ureq.qprva, bytes,
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(umem)) {
+ dev_err(rdev_to_dev(rdev),
+ "%s: ib_umem_get failed ret =%ld\n",
+ __func__, PTR_ERR(umem));
+ goto rqfail;
+ }
+ qp->rumem = umem;
+ /* pgsize and pgshft were initialize already. */
+ sginfo->sghead = get_ib_umem_sgl(umem, &sginfo->nmap);
+ sginfo->npages = ib_umem_num_pages_compat(umem);
+ }
+
+ qplib_qp->dpi = &cntx->dpi;
+ qplib_qp->is_user = true;
+
+ return 0;
+rqfail:
+ ib_umem_release(qp->sumem);
+ qp->sumem = NULL;
+ qplib_qp->sq.sginfo.sghead = NULL;
+ qplib_qp->sq.sginfo.nmap = 0;
+
+ return PTR_ERR(umem);
+}
+
+static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah(struct bnxt_re_pd *pd,
+ struct bnxt_qplib_res *qp1_res,
+ struct bnxt_qplib_qp *qp1_qp)
+{
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_re_ah *ah;
+ union ib_gid sgid;
+ int rc;
+
+ ah = kzalloc(sizeof(*ah), GFP_KERNEL);
+ if (!ah) {
+ dev_err(rdev_to_dev(rdev), "Allocate Address Handle failed!\n");
+ return NULL;
+ }
+ memset(ah, 0, sizeof(*ah));
+ ah->rdev = rdev;
+ ah->qplib_ah.pd = &pd->qplib_pd;
+
+ rc = bnxt_re_query_gid(&rdev->ibdev, 1, 0, &sgid);
+ if (rc)
+ goto fail;
+
+ /* supply the dgid data same as sgid */
+ memcpy(ah->qplib_ah.dgid.data, &sgid.raw,
+ sizeof(union ib_gid));
+ ah->qplib_ah.sgid_index = 0;
+
+ ah->qplib_ah.traffic_class = 0;
+ ah->qplib_ah.flow_label = 0;
+ ah->qplib_ah.hop_limit = 1;
+ ah->qplib_ah.sl = 0;
+ /* Have DMAC same as SMAC */
+ ether_addr_copy(ah->qplib_ah.dmac, rdev->dev_addr);
+ dev_dbg(rdev_to_dev(rdev), "ah->qplib_ah.dmac = %x:%x:%x:%x:%x:%x\n",
+ ah->qplib_ah.dmac[0], ah->qplib_ah.dmac[1], ah->qplib_ah.dmac[2],
+ ah->qplib_ah.dmac[3], ah->qplib_ah.dmac[4], ah->qplib_ah.dmac[5]);
+
+ rc = bnxt_qplib_create_ah(&rdev->qplib_res, &ah->qplib_ah, true);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Allocate HW AH for Shadow QP failed!\n");
+ goto fail;
+ }
+ dev_dbg(rdev_to_dev(rdev), "AH ID = %d\n", ah->qplib_ah.id);
+ atomic_inc(&rdev->stats.rsors.ah_count);
+
+ return ah;
+fail:
+ kfree(ah);
+ return NULL;
+}
+
+void bnxt_re_update_shadow_ah(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *gsi_qp;
+ struct bnxt_re_ah *sah;
+ struct bnxt_re_pd *pd;
+ struct ib_pd *ib_pd;
+ int rc;
+
+ if (!rdev)
+ return;
+
+ sah = rdev->gsi_ctx.gsi_sah;
+
+ dev_dbg(rdev_to_dev(rdev), "Updating the AH\n");
+ if (sah) {
+ /* Check if the AH created with current mac address */
+ if (!compare_ether_header(sah->qplib_ah.dmac, rdev->dev_addr)) {
+ dev_dbg(rdev_to_dev(rdev),
+ "Not modifying shadow AH during AH update\n");
+ return;
+ }
+
+ gsi_qp = rdev->gsi_ctx.gsi_qp;
+ ib_pd = gsi_qp->ib_qp.pd;
+ pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
+ rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
+ &sah->qplib_ah, false);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to destroy shadow AH during AH update\n");
+ return;
+ }
+ atomic_dec(&rdev->stats.rsors.ah_count);
+ kfree(sah);
+ rdev->gsi_ctx.gsi_sah = NULL;
+
+ sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
+ &gsi_qp->qplib_qp);
+ if (!sah) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to update AH for ShadowQP\n");
+ return;
+ }
+ rdev->gsi_ctx.gsi_sah = sah;
+ atomic_inc(&rdev->stats.rsors.ah_count);
+ }
+}
+
+static struct bnxt_re_qp *bnxt_re_create_shadow_qp(struct bnxt_re_pd *pd,
+ struct bnxt_qplib_res *qp1_res,
+ struct bnxt_qplib_qp *qp1_qp)
+{
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_re_qp *qp;
+ int rc;
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp) {
+ dev_err(rdev_to_dev(rdev), "Allocate internal UD QP failed!\n");
+ return NULL;
+ }
+ memset(qp, 0, sizeof(*qp));
+ qp->rdev = rdev;
+
+ /* Initialize the shadow QP structure from the QP1 values */
+ ether_addr_copy(qp->qplib_qp.smac, rdev->dev_addr);
+ qp->qplib_qp.pd = &pd->qplib_pd;
+ qp->qplib_qp.qp_handle = (u64)&qp->qplib_qp;
+ qp->qplib_qp.type = IB_QPT_UD;
+
+ qp->qplib_qp.max_inline_data = 0;
+ qp->qplib_qp.sig_type = true;
+
+ /* Shadow QP SQ depth should be same as QP1 RQ depth */
+ qp->qplib_qp.sq.wqe_size = bnxt_re_get_swqe_size(0, 6);
+ qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.sq.max_sge = 2;
+ /* Q full delta can be 1 since it is internal QP */
+ qp->qplib_qp.sq.q_full_delta = 1;
+ qp->qplib_qp.sq.sginfo.pgsize = PAGE_SIZE;
+ qp->qplib_qp.sq.sginfo.pgshft = PAGE_SHIFT;
+
+ qp->qplib_qp.scq = qp1_qp->scq;
+ qp->qplib_qp.rcq = qp1_qp->rcq;
+
+ qp->qplib_qp.rq.wqe_size = _max_rwqe_sz(6); /* 128 Byte wqe size */
+ qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
+ qp->qplib_qp.rq.sginfo.pgsize = PAGE_SIZE;
+ qp->qplib_qp.rq.sginfo.pgshft = PAGE_SHIFT;
+ /* Q full delta can be 1 since it is internal QP */
+ qp->qplib_qp.rq.q_full_delta = 1;
+ qp->qplib_qp.mtu = qp1_qp->mtu;
+ qp->qplib_qp.dpi = &rdev->dpi_privileged;
+
+ rc = bnxt_qplib_alloc_hdr_buf(qp1_res, &qp->qplib_qp, 0,
+ BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6);
+ if (rc)
+ goto fail;
+
+ rc = bnxt_qplib_create_qp(qp1_res, &qp->qplib_qp);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "create HW QP failed!\n");
+ goto qp_fail;
+ }
+
+ dev_dbg(rdev_to_dev(rdev), "Created shadow QP with ID = %d\n",
+ qp->qplib_qp.id);
+ spin_lock_init(&qp->sq_lock);
+ INIT_LIST_HEAD(&qp->list);
+ mutex_lock(&rdev->qp_lock);
+ list_add_tail(&qp->list, &rdev->qp_list);
+ atomic_inc(&rdev->stats.rsors.qp_count);
+ mutex_unlock(&rdev->qp_lock);
+ return qp;
+qp_fail:
+ bnxt_qplib_free_hdr_buf(qp1_res, &qp->qplib_qp);
+fail:
+ kfree(qp);
+ return NULL;
+}
+
+static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr, void *cntx)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_q *rq;
+ int entries;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ rq = &qplqp->rq;
+ dev_attr = rdev->dev_attr;
+
+ if (init_attr->srq) {
+ struct bnxt_re_srq *srq;
+
+ srq = to_bnxt_re(init_attr->srq, struct bnxt_re_srq, ibsrq);
+ if (!srq) {
+ dev_err(rdev_to_dev(rdev), "SRQ not found\n");
+ return -EINVAL;
+ }
+ qplqp->srq = &srq->qplib_srq;
+ rq->max_wqe = 0;
+ } else {
+ rq->max_sge = init_attr->cap.max_recv_sge;
+ if (rq->max_sge > dev_attr->max_qp_sges)
+ rq->max_sge = dev_attr->max_qp_sges;
+ init_attr->cap.max_recv_sge = rq->max_sge;
+ rq->wqe_size = bnxt_re_get_rwqe_size(qplqp, rq->max_sge,
+ dev_attr->max_qp_sges);
+
+ /* Allocate 1 more than what's provided so posting max doesn't
+ mean empty */
+ entries = init_attr->cap.max_recv_wr + 1;
+ entries = bnxt_re_init_depth(entries, cntx);
+ rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
+ rq->q_full_delta = 0;
+ rq->sginfo.pgsize = PAGE_SIZE;
+ rq->sginfo.pgshft = PAGE_SHIFT;
+ }
+
+ return 0;
+}
+
+static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = rdev->dev_attr;
+
+ if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD)
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+}
+
+static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ void *cntx)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_q *sq;
+ int diff = 0;
+ int entries;
+ int rc;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ sq = &qplqp->sq;
+ dev_attr = rdev->dev_attr;
+
+ sq->max_sge = init_attr->cap.max_send_sge;
+ if (sq->max_sge > dev_attr->max_qp_sges) {
+ sq->max_sge = dev_attr->max_qp_sges;
+ init_attr->cap.max_send_sge = sq->max_sge;
+ }
+ rc = bnxt_re_setup_swqe_size(qp, init_attr);
+ if (rc)
+ return rc;
+ /*
+ * Change the SQ depth if user has requested minimum using
+ * configfs. Only supported for kernel consumers. Setting
+ * min_tx_depth to 4096 to handle iser SQ full condition
+ * in most of the newer OS distros
+ */
+ entries = init_attr->cap.max_send_wr;
+ if (!cntx && rdev->min_tx_depth && init_attr->qp_type != IB_QPT_GSI) {
+ /*
+ * If users specify any value greater than 1 use min_tx_depth
+ * provided by user for comparison. Else, compare it with the
+ * BNXT_RE_MIN_KERNEL_QP_TX_DEPTH and adjust it accordingly.
+ */
+ if (rdev->min_tx_depth > 1 && entries < rdev->min_tx_depth)
+ entries = rdev->min_tx_depth;
+ else if (entries < BNXT_RE_MIN_KERNEL_QP_TX_DEPTH)
+ entries = BNXT_RE_MIN_KERNEL_QP_TX_DEPTH;
+ }
+ diff = bnxt_re_get_diff(cntx, rdev->chip_ctx);
+ entries = bnxt_re_init_depth(entries + diff + 1, cntx);
+ sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
+ sq->q_full_delta = diff + 1;
+ /*
+ * Reserving one slot for Phantom WQE. Application can
+ * post one extra entry in this case. But allowing this to avoid
+ * unexpected Queue full condition
+ */
+ sq->q_full_delta -= 1; /* becomes 0 for gen-p5 */
+ sq->sginfo.pgsize = PAGE_SIZE;
+ sq->sginfo.pgshft = PAGE_SHIFT;
+ return 0;
+}
+
+static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
+ struct ib_qp_init_attr *init_attr,
+ void *cntx)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ int entries;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = rdev->dev_attr;
+
+ if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
+ entries = init_attr->cap.max_send_wr + 1;
+ entries = bnxt_re_init_depth(entries, cntx);
+ qplqp->sq.max_wqe = min_t(u32, entries,
+ dev_attr->max_qp_wqes + 1);
+ qplqp->sq.q_full_delta = qplqp->sq.max_wqe -
+ init_attr->cap.max_send_wr;
+ qplqp->sq.max_sge++; /* Need one extra sge to put UD header */
+ if (qplqp->sq.max_sge > dev_attr->max_qp_sges)
+ qplqp->sq.max_sge = dev_attr->max_qp_sges;
+ }
+}
+
+static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
+ struct ib_qp_init_attr *init_attr)
+{
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+ struct bnxt_re_gsi_context *gsi_ctx;
+ int qptype;
+
+ chip_ctx = rdev->chip_ctx;
+ gsi_ctx = &rdev->gsi_ctx;
+
+ qptype = __from_ib_qp_type(init_attr->qp_type);
+ if (qptype == IB_QPT_MAX) {
+ dev_err(rdev_to_dev(rdev), "QP type 0x%x not supported\n",
+ qptype);
+ qptype = -EINVAL;
+ goto out;
+ }
+
+ if (_is_chip_gen_p5_p7(chip_ctx) && init_attr->qp_type == IB_QPT_GSI) {
+ /* For Thor always force UD mode. */
+ qptype = CMDQ_CREATE_QP_TYPE_GSI;
+ gsi_ctx->gsi_qp_mode = BNXT_RE_GSI_MODE_UD;
+ }
+out:
+ return qptype;
+}
+
+static int bnxt_re_init_qp_wqe_mode(struct bnxt_re_dev *rdev)
+{
+ return rdev->chip_ctx->modes.wqe_mode;
+}
+
+static int bnxt_re_init_qp_attr(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_re_ucontext *cntx = NULL;
+ struct ib_ucontext *context;
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
+ int rc = 0, qptype;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ dev_attr = rdev->dev_attr;
+
+ if (udata) {
+ context = pd->ibpd.uobject->context;
+ cntx = to_bnxt_re(context, struct bnxt_re_ucontext, ibucontext);
+ }
+
+ /* Setup misc params */
+ qplqp->is_user = false;
+ qplqp->pd = &pd->qplib_pd;
+ qplqp->qp_handle = (u64)qplqp;
+ qplqp->sig_type = ((init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) ?
+ true : false);
+ qptype = bnxt_re_init_qp_type(rdev, init_attr);
+ if (qptype < 0) {
+ rc = qptype;
+ goto out;
+ }
+ qplqp->type = (u8)qptype;
+ qplqp->wqe_mode = bnxt_re_init_qp_wqe_mode(rdev);
+ ether_addr_copy(qplqp->smac, rdev->dev_addr);
+
+ if (init_attr->qp_type == IB_QPT_RC) {
+ qplqp->max_rd_atomic = dev_attr->max_qp_rd_atom;
+ qplqp->max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
+ }
+ qplqp->mtu = ib_mtu_enum_to_int(iboe_get_mtu(if_getmtu(rdev->netdev)));
+ qplqp->dpi = &rdev->dpi_privileged; /* Doorbell page */
+ if (init_attr->create_flags) {
+ dev_dbg(rdev_to_dev(rdev),
+ "QP create flags 0x%x not supported\n",
+ init_attr->create_flags);
+ return -EOPNOTSUPP;
+ }
+
+ /* Setup CQs */
+ if (init_attr->send_cq) {
+ cq = to_bnxt_re(init_attr->send_cq, struct bnxt_re_cq, ibcq);
+ if (!cq) {
+ dev_err(rdev_to_dev(rdev), "Send CQ not found\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ qplqp->scq = &cq->qplib_cq;
+ qp->scq = cq;
+ }
+
+ if (init_attr->recv_cq) {
+ cq = to_bnxt_re(init_attr->recv_cq, struct bnxt_re_cq, ibcq);
+ if (!cq) {
+ dev_err(rdev_to_dev(rdev), "Receive CQ not found\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ qplqp->rcq = &cq->qplib_cq;
+ qp->rcq = cq;
+ }
+
+ /* Setup RQ/SRQ */
+ rc = bnxt_re_init_rq_attr(qp, init_attr, cntx);
+ if (rc)
+ goto out;
+ if (init_attr->qp_type == IB_QPT_GSI)
+ bnxt_re_adjust_gsi_rq_attr(qp);
+
+ /* Setup SQ */
+ rc = bnxt_re_init_sq_attr(qp, init_attr, cntx);
+ if (rc)
+ goto out;
+ if (init_attr->qp_type == IB_QPT_GSI)
+ bnxt_re_adjust_gsi_sq_attr(qp, init_attr, cntx);
+
+ if (udata) /* This will update DPI and qp_handle */
+ rc = bnxt_re_init_user_qp(rdev, pd, qp, udata);
+out:
+ return rc;
+}
+
+static int bnxt_re_create_shadow_gsi(struct bnxt_re_qp *qp,
+ struct bnxt_re_pd *pd)
+{
+ struct bnxt_re_sqp_entries *sqp_tbl = NULL;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_qp *sqp;
+ struct bnxt_re_ah *sah;
+ int rc = 0;
+
+ rdev = qp->rdev;
+ /* Create a shadow QP to handle the QP1 traffic */
+ sqp_tbl = kzalloc(sizeof(*sqp_tbl) * BNXT_RE_MAX_GSI_SQP_ENTRIES,
+ GFP_KERNEL);
+ if (!sqp_tbl)
+ return -ENOMEM;
+ rdev->gsi_ctx.sqp_tbl = sqp_tbl;
+
+ sqp = bnxt_re_create_shadow_qp(pd, &rdev->qplib_res, &qp->qplib_qp);
+ if (!sqp) {
+ rc = -ENODEV;
+ dev_err(rdev_to_dev(rdev),
+ "Failed to create Shadow QP for QP1\n");
+ goto out;
+ }
+ rdev->gsi_ctx.gsi_sqp = sqp;
+
+ sqp->rcq = qp->rcq;
+ sqp->scq = qp->scq;
+ sah = bnxt_re_create_shadow_qp_ah(pd, &rdev->qplib_res,
+ &qp->qplib_qp);
+ if (!sah) {
+ bnxt_qplib_destroy_qp(&rdev->qplib_res,
+ &sqp->qplib_qp);
+ rc = -ENODEV;
+ dev_err(rdev_to_dev(rdev),
+ "Failed to create AH entry for ShadowQP\n");
+ goto out;
+ }
+ rdev->gsi_ctx.gsi_sah = sah;
+
+ return 0;
+out:
+ kfree(sqp_tbl);
+ return rc;
+}
+
+static int __get_rq_hdr_buf_size(u8 gsi_mode)
+{
+ return (gsi_mode == BNXT_RE_GSI_MODE_ALL) ?
+ BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 :
+ BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE;
+}
+
+static int __get_sq_hdr_buf_size(u8 gsi_mode)
+{
+ return (gsi_mode != BNXT_RE_GSI_MODE_ROCE_V1) ?
+ BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 :
+ BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE;
+}
+
+static int bnxt_re_create_gsi_qp(struct bnxt_re_qp *qp, struct bnxt_re_pd *pd)
+{
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_qplib_res *res;
+ struct bnxt_re_dev *rdev;
+ u32 sstep, rstep;
+ u8 gsi_mode;
+ int rc = 0;
+
+ rdev = qp->rdev;
+ qplqp = &qp->qplib_qp;
+ res = &rdev->qplib_res;
+ gsi_mode = rdev->gsi_ctx.gsi_qp_mode;
+
+ rstep = __get_rq_hdr_buf_size(gsi_mode);
+ sstep = __get_sq_hdr_buf_size(gsi_mode);
+ rc = bnxt_qplib_alloc_hdr_buf(res, qplqp, sstep, rstep);
+ if (rc)
+ goto out;
+
+ rc = bnxt_qplib_create_qp1(res, qplqp);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "create HW QP1 failed!\n");
+ goto out;
+ }
+
+ if (gsi_mode == BNXT_RE_GSI_MODE_ALL)
+ rc = bnxt_re_create_shadow_gsi(qp, pd);
+out:
+ return rc;
+}
+
+static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev,
+ struct ib_qp_init_attr *init_attr,
+ struct bnxt_qplib_dev_attr *dev_attr)
+{
+ bool rc = true;
+ int ilsize;
+
+ ilsize = ALIGN(init_attr->cap.max_inline_data, sizeof(struct sq_sge));
+ if ((init_attr->cap.max_send_wr > dev_attr->max_qp_wqes) ||
+ (init_attr->cap.max_recv_wr > dev_attr->max_qp_wqes) ||
+ (init_attr->cap.max_send_sge > dev_attr->max_qp_sges) ||
+ (init_attr->cap.max_recv_sge > dev_attr->max_qp_sges) ||
+ (ilsize > dev_attr->max_inline_data)) {
+ dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded! "
+ "0x%x/0x%x 0x%x/0x%x 0x%x/0x%x "
+ "0x%x/0x%x 0x%x/0x%x\n",
+ init_attr->cap.max_send_wr, dev_attr->max_qp_wqes,
+ init_attr->cap.max_recv_wr, dev_attr->max_qp_wqes,
+ init_attr->cap.max_send_sge, dev_attr->max_qp_sges,
+ init_attr->cap.max_recv_sge, dev_attr->max_qp_sges,
+ init_attr->cap.max_inline_data,
+ dev_attr->max_inline_data);
+ rc = false;
+ }
+ return rc;
+}
+
+static inline struct
+bnxt_re_qp *__get_qp_from_qp_in(struct ib_pd *qp_in,
+ struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ dev_err(rdev_to_dev(rdev), "Allocate QP failed!\n");
+ return qp;
+}
+
+struct ib_qp *bnxt_re_create_qp(struct ib_pd *qp_in,
+ struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd;
+ struct ib_pd *ib_pd = qp_in;
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_re_dev *rdev;
+ u32 active_qps, tmp_qps;
+ struct bnxt_re_qp *qp;
+ int rc;
+
+ pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
+ rdev = pd->rdev;
+ dev_attr = rdev->dev_attr;
+ if (rdev->mod_exit) {
+ rc = -EIO;
+ dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
+ goto exit;
+ }
+
+ if (atomic_read(&rdev->stats.rsors.qp_count) >= dev_attr->max_qp) {
+ dev_err(rdev_to_dev(rdev), "Create QP failed - max exceeded(QPs Alloc'd %u of max %u)\n",
+ atomic_read(&rdev->stats.rsors.qp_count), dev_attr->max_qp);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ rc = bnxt_re_test_qp_limits(rdev, qp_init_attr, dev_attr);
+ if (!rc) {
+ rc = -EINVAL;
+ goto exit;
+ }
+ qp = __get_qp_from_qp_in(qp_in, rdev);
+ if (!qp) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ qp->rdev = rdev;
+
+ rc = bnxt_re_init_qp_attr(qp, pd, qp_init_attr, udata);
+ if (rc)
+ goto fail;
+
+ if (qp_init_attr->qp_type == IB_QPT_GSI &&
+ !_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ rc = bnxt_re_create_gsi_qp(qp, pd);
+ if (rc == -ENODEV)
+ goto qp_destroy;
+ if (rc)
+ goto fail;
+ } else {
+ rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "create HW QP failed!\n");
+ goto free_umem;
+ }
+
+ if (udata) {
+ struct bnxt_re_qp_resp resp;
+
+ resp.qpid = qp->qplib_qp.id;
+ rc = bnxt_re_copy_to_udata(rdev, &resp,
+ min(udata->outlen, sizeof(resp)),
+ udata);
+ if (rc)
+ goto qp_destroy;
+ }
+ }
+
+ qp->ib_qp.qp_num = qp->qplib_qp.id;
+ if (qp_init_attr->qp_type == IB_QPT_GSI)
+ rdev->gsi_ctx.gsi_qp = qp;
+ spin_lock_init(&qp->sq_lock);
+ spin_lock_init(&qp->rq_lock);
+ INIT_LIST_HEAD(&qp->list);
+ mutex_lock(&rdev->qp_lock);
+ list_add_tail(&qp->list, &rdev->qp_list);
+ mutex_unlock(&rdev->qp_lock);
+ atomic_inc(&rdev->stats.rsors.qp_count);
+ active_qps = atomic_read(&rdev->stats.rsors.qp_count);
+ if (active_qps > atomic_read(&rdev->stats.rsors.max_qp_count))
+ atomic_set(&rdev->stats.rsors.max_qp_count, active_qps);
+
+ bnxt_re_dump_debug_stats(rdev, active_qps);
+
+ /* Get the counters for RC QPs and UD QPs */
+ if (qp_init_attr->qp_type == IB_QPT_RC) {
+ tmp_qps = atomic_inc_return(&rdev->stats.rsors.rc_qp_count);
+ if (tmp_qps > atomic_read(&rdev->stats.rsors.max_rc_qp_count))
+ atomic_set(&rdev->stats.rsors.max_rc_qp_count, tmp_qps);
+ } else if (qp_init_attr->qp_type == IB_QPT_UD) {
+ tmp_qps = atomic_inc_return(&rdev->stats.rsors.ud_qp_count);
+ if (tmp_qps > atomic_read(&rdev->stats.rsors.max_ud_qp_count))
+ atomic_set(&rdev->stats.rsors.max_ud_qp_count, tmp_qps);
+ }
+
+ return &qp->ib_qp;
+
+qp_destroy:
+ bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
+free_umem:
+ if (udata) {
+ if (qp->rumem && !IS_ERR(qp->rumem))
+ ib_umem_release(qp->rumem);
+ if (qp->sumem && !IS_ERR(qp->sumem))
+ ib_umem_release(qp->sumem);
+ }
+fail:
+ kfree(qp);
+exit:
+ return ERR_PTR(rc);
+}
+
+static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
+ struct bnxt_re_qp *qp1_qp,
+ int qp_attr_mask)
+{
+ struct bnxt_re_qp *qp = rdev->gsi_ctx.gsi_sqp;
+ int rc = 0;
+
+ if (qp_attr_mask & IB_QP_STATE) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
+ qp->qplib_qp.state = qp1_qp->qplib_qp.state;
+ }
+ if (qp_attr_mask & IB_QP_PKEY_INDEX) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
+ qp->qplib_qp.pkey_index = qp1_qp->qplib_qp.pkey_index;
+ }
+
+ if (qp_attr_mask & IB_QP_QKEY) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
+ /* Using a Random QKEY */
+ qp->qplib_qp.qkey = BNXT_RE_QP_RANDOM_QKEY;
+ }
+ if (qp_attr_mask & IB_QP_SQ_PSN) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
+ qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn;
+ }
+
+ rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Modify Shadow QP for QP1 failed\n");
+ return rc;
+}
+
+static u32 ipv4_from_gid(u8 *gid)
+{
+ return (gid[15] << 24 | gid[14] << 16 | gid[13] << 8 | gid[12]);
+}
+
+static u16 get_source_port(struct bnxt_re_dev *rdev,
+ struct bnxt_re_qp *qp)
+{
+ u8 ip_off, data[48], smac[ETH_ALEN];
+ u16 crc = 0, buf_len = 0, i;
+ u8 addr_len;
+ u32 qpn;
+
+ if (qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6) {
+ addr_len = 6;
+ ip_off = 10;
+ } else {
+ addr_len = 4;
+ ip_off = 12;
+ }
+
+ memcpy(smac, qp->qplib_qp.smac, ETH_ALEN);
+
+ memset(data, 0, 48);
+ memcpy(data, qp->qplib_qp.ah.dmac, ETH_ALEN);
+ buf_len += ETH_ALEN;
+
+ memcpy(data + buf_len, smac, ETH_ALEN);
+ buf_len += ETH_ALEN;
+
+ memcpy(data + buf_len, qp->qplib_qp.ah.dgid.data + ip_off, addr_len);
+ buf_len += addr_len;
+
+ memcpy(data + buf_len, qp->qp_info_entry.sgid.raw + ip_off, addr_len);
+ buf_len += addr_len;
+
+ qpn = htonl(qp->qplib_qp.dest_qpn);
+ memcpy(data + buf_len, (u8 *)&qpn + 1, 3);
+ buf_len += 3;
+
+ for (i = 0; i < buf_len; i++)
+ crc = crc16(crc, (data + i), 1);
+
+ return crc;
+}
+
+static void bnxt_re_update_qp_info(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp)
+{
+ u16 type;
+
+ type = __from_hw_to_ib_qp_type(qp->qplib_qp.type);
+
+ /* User-space can extract ip address with sgid_index. */
+ if (ipv6_addr_v4mapped((struct in6_addr *)&qp->qplib_qp.ah.dgid)) {
+ qp->qp_info_entry.s_ip.ipv4_addr = ipv4_from_gid(qp->qp_info_entry.sgid.raw);
+ qp->qp_info_entry.d_ip.ipv4_addr = ipv4_from_gid(qp->qplib_qp.ah.dgid.data);
+ } else {
+ memcpy(&qp->qp_info_entry.s_ip.ipv6_addr, qp->qp_info_entry.sgid.raw,
+ sizeof(qp->qp_info_entry.s_ip.ipv6_addr));
+ memcpy(&qp->qp_info_entry.d_ip.ipv6_addr, qp->qplib_qp.ah.dgid.data,
+ sizeof(qp->qp_info_entry.d_ip.ipv6_addr));
+ }
+
+ if (type == IB_QPT_RC &&
+ (qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4 ||
+ qp->qplib_qp.nw_type == CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6)) {
+ qp->qp_info_entry.s_port = get_source_port(rdev, qp);
+ }
+ qp->qp_info_entry.d_port = BNXT_RE_QP_DEST_PORT;
+}
+
+static void bnxt_qplib_manage_flush_qp(struct bnxt_re_qp *qp)
+{
+ struct bnxt_qplib_q *rq, *sq;
+ unsigned long flags;
+
+ if (qp->sumem)
+ return;
+
+ if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ rq = &qp->qplib_qp.rq;
+ sq = &qp->qplib_qp.sq;
+
+ dev_dbg(rdev_to_dev(qp->rdev),
+ "Move QP = %p to flush list\n", qp);
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_add_flush_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+
+ if (sq->hwq.prod != sq->hwq.cons)
+ bnxt_re_handle_cqn(&qp->scq->qplib_cq);
+
+ if (qp->rcq && (qp->rcq != qp->scq) &&
+ (rq->hwq.prod != rq->hwq.cons))
+ bnxt_re_handle_cqn(&qp->rcq->qplib_cq);
+ }
+
+ if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
+ dev_dbg(rdev_to_dev(qp->rdev),
+ "Move QP = %p out of flush list\n", qp);
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+ }
+}
+
+bool ib_modify_qp_is_ok_compat(enum ib_qp_state cur_state,
+ enum ib_qp_state next_state,
+ enum ib_qp_type type,
+ enum ib_qp_attr_mask mask)
+{
+ return (ib_modify_qp_is_ok(cur_state, next_state,
+ type, mask));
+}
+
+int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_udata *udata)
+{
+ enum ib_qp_state curr_qp_state, new_qp_state;
+ struct bnxt_re_modify_qp_ex_resp resp = {};
+ struct bnxt_re_modify_qp_ex_req ureq = {};
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_qplib_ppp *ppp = NULL;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_qp *qp;
+ struct ib_gid_attr *sgid_attr;
+ struct ib_gid_attr gid_attr;
+ union ib_gid sgid, *gid_ptr = NULL;
+ u8 nw_type;
+ int rc, entries, status;
+ bool is_copy_to_udata = false;
+ bool is_qpmtu_high = false;
+
+ qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
+ rdev = qp->rdev;
+ dev_attr = rdev->dev_attr;
+
+ qp->qplib_qp.modify_flags = 0;
+ ppp = &qp->qplib_qp.ppp;
+ if (qp_attr_mask & IB_QP_STATE) {
+ curr_qp_state = __to_ib_qp_state(qp->qplib_qp.cur_qp_state);
+ new_qp_state = qp_attr->qp_state;
+ if (!ib_modify_qp_is_ok_compat(curr_qp_state, new_qp_state,
+ ib_qp->qp_type, qp_attr_mask)) {
+ dev_err(rdev_to_dev(rdev),"invalid attribute mask=0x%x"
+ " specified for qpn=0x%x of type=0x%x"
+ " current_qp_state=0x%x, new_qp_state=0x%x\n",
+ qp_attr_mask, ib_qp->qp_num, ib_qp->qp_type,
+ curr_qp_state, new_qp_state);
+ return -EINVAL;
+ }
+ dev_dbg(rdev_to_dev(rdev), "%s:%d INFO attribute mask=0x%x qpn=0x%x "
+ "of type=0x%x current_qp_state=0x%x, new_qp_state=0x%x\n",
+ __func__, __LINE__, qp_attr_mask, ib_qp->qp_num,
+ ib_qp->qp_type, curr_qp_state, new_qp_state);
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_STATE;
+ qp->qplib_qp.state = __from_ib_qp_state(qp_attr->qp_state);
+
+ if (udata && curr_qp_state == IB_QPS_RESET &&
+ new_qp_state == IB_QPS_INIT) {
+ if (!ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ if (ureq.comp_mask &
+ BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN_MASK) {
+ ppp->req = BNXT_QPLIB_PPP_REQ;
+ ppp->dpi = ureq.dpi;
+ }
+ }
+ }
+ }
+ if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_EN_SQD_ASYNC_NOTIFY;
+ qp->qplib_qp.en_sqd_async_notify = true;
+ }
+ if (qp_attr_mask & IB_QP_ACCESS_FLAGS) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS;
+ qp->qplib_qp.access =
+ __from_ib_access_flags(qp_attr->qp_access_flags);
+ /* LOCAL_WRITE access must be set to allow RC receive */
+ qp->qplib_qp.access |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
+ qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_WRITE;
+ qp->qplib_qp.access |= CMDQ_MODIFY_QP_ACCESS_REMOTE_READ;
+ }
+ if (qp_attr_mask & IB_QP_PKEY_INDEX) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PKEY;
+ qp->qplib_qp.pkey_index = qp_attr->pkey_index;
+ }
+ if (qp_attr_mask & IB_QP_QKEY) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_QKEY;
+ qp->qplib_qp.qkey = qp_attr->qkey;
+ }
+ if (qp_attr_mask & IB_QP_AV) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
+ CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
+ CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
+ CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
+ CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
+ CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
+ CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
+ memcpy(qp->qplib_qp.ah.dgid.data, qp_attr->ah_attr.grh.dgid.raw,
+ sizeof(qp->qplib_qp.ah.dgid.data));
+ qp->qplib_qp.ah.flow_label = qp_attr->ah_attr.grh.flow_label;
+ qp->qplib_qp.ah.sgid_index = _get_sgid_index(rdev,
+ qp_attr->ah_attr.grh.sgid_index);
+ qp->qplib_qp.ah.host_sgid_index = qp_attr->ah_attr.grh.sgid_index;
+ qp->qplib_qp.ah.hop_limit = qp_attr->ah_attr.grh.hop_limit;
+ qp->qplib_qp.ah.traffic_class =
+ qp_attr->ah_attr.grh.traffic_class;
+ qp->qplib_qp.ah.sl = qp_attr->ah_attr.sl;
+ ether_addr_copy(qp->qplib_qp.ah.dmac, ROCE_DMAC(&qp_attr->ah_attr));
+ sgid_attr = &gid_attr;
+ status = bnxt_re_get_cached_gid(&rdev->ibdev, 1,
+ qp_attr->ah_attr.grh.sgid_index,
+ &sgid, &sgid_attr,
+ &qp_attr->ah_attr.grh, NULL);
+ if (!status)
+ if_rele(sgid_attr->ndev);
+ gid_ptr = &sgid;
+ if (sgid_attr->ndev) {
+ memcpy(qp->qplib_qp.smac, rdev->dev_addr,
+ ETH_ALEN);
+ nw_type = bnxt_re_gid_to_network_type(sgid_attr, &sgid);
+ dev_dbg(rdev_to_dev(rdev),
+ "Connection using the nw_type %d\n", nw_type);
+ switch (nw_type) {
+ case RDMA_NETWORK_IPV4:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV4;
+ break;
+ case RDMA_NETWORK_IPV6:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV2_IPV6;
+ break;
+ default:
+ qp->qplib_qp.nw_type =
+ CMDQ_MODIFY_QP_NETWORK_TYPE_ROCEV1;
+ break;
+ }
+ }
+ memcpy(&qp->qp_info_entry.sgid, gid_ptr, sizeof(qp->qp_info_entry.sgid));
+ }
+
+ /* MTU settings allowed only during INIT -> RTR */
+ if (qp_attr->qp_state == IB_QPS_RTR) {
+ bnxt_re_init_qpmtu(qp, if_getmtu(rdev->netdev), qp_attr_mask, qp_attr,
+ &is_qpmtu_high);
+ if (udata && !ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
+ if (ureq.comp_mask & BNXT_RE_COMP_MASK_MQP_EX_PATH_MTU_MASK) {
+ resp.comp_mask |= BNXT_RE_COMP_MASK_MQP_EX_PATH_MTU_MASK;
+ resp.path_mtu = qp->qplib_qp.mtu;
+ is_copy_to_udata = true;
+ } else if (is_qpmtu_high) {
+ dev_err(rdev_to_dev(rdev), "qp %#x invalid mtu\n",
+ qp->qplib_qp.id);
+ return -EINVAL;
+ }
+ }
+ }
+
+ if (qp_attr_mask & IB_QP_TIMEOUT) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT;
+ qp->qplib_qp.timeout = qp_attr->timeout;
+ }
+ if (qp_attr_mask & IB_QP_RETRY_CNT) {
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT;
+ qp->qplib_qp.retry_cnt = qp_attr->retry_cnt;
+ }
+ if (qp_attr_mask & IB_QP_RNR_RETRY) {
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY;
+ qp->qplib_qp.rnr_retry = qp_attr->rnr_retry;
+ }
+ if (qp_attr_mask & IB_QP_MIN_RNR_TIMER) {
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER;
+ qp->qplib_qp.min_rnr_timer = qp_attr->min_rnr_timer;
+ }
+ if (qp_attr_mask & IB_QP_RQ_PSN) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN;
+ qp->qplib_qp.rq.psn = qp_attr->rq_psn;
+ }
+ if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC;
+ /* Cap the max_rd_atomic to device max */
+ if (qp_attr->max_rd_atomic > dev_attr->max_qp_rd_atom)
+ dev_dbg(rdev_to_dev(rdev),
+ "max_rd_atomic requested %d is > device max %d\n",
+ qp_attr->max_rd_atomic,
+ dev_attr->max_qp_rd_atom);
+ qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic,
+ dev_attr->max_qp_rd_atom);
+ }
+ if (qp_attr_mask & IB_QP_SQ_PSN) {
+ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN;
+ qp->qplib_qp.sq.psn = qp_attr->sq_psn;
+ }
+ if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+ if (qp_attr->max_dest_rd_atomic >
+ dev_attr->max_qp_init_rd_atom) {
+ dev_err(rdev_to_dev(rdev),
+ "max_dest_rd_atomic requested %d is > device max %d\n",
+ qp_attr->max_dest_rd_atomic,
+ dev_attr->max_qp_init_rd_atom);
+ return -EINVAL;
+ }
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC;
+ qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic;
+ }
+ if (qp_attr_mask & IB_QP_CAP) {
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SIZE |
+ CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SIZE |
+ CMDQ_MODIFY_QP_MODIFY_MASK_SQ_SGE |
+ CMDQ_MODIFY_QP_MODIFY_MASK_RQ_SGE |
+ CMDQ_MODIFY_QP_MODIFY_MASK_MAX_INLINE_DATA;
+ if ((qp_attr->cap.max_send_wr >= dev_attr->max_qp_wqes) ||
+ (qp_attr->cap.max_recv_wr >= dev_attr->max_qp_wqes) ||
+ (qp_attr->cap.max_send_sge >= dev_attr->max_qp_sges) ||
+ (qp_attr->cap.max_recv_sge >= dev_attr->max_qp_sges) ||
+ (qp_attr->cap.max_inline_data >=
+ dev_attr->max_inline_data)) {
+ dev_err(rdev_to_dev(rdev),
+ "Create QP failed - max exceeded\n");
+ return -EINVAL;
+ }
+ entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
+ if (entries > dev_attr->max_qp_wqes)
+ entries = dev_attr->max_qp_wqes;
+ entries = min_t(u32, entries, dev_attr->max_qp_wqes);
+ qp->qplib_qp.sq.max_wqe = entries;
+ qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
+ qp_attr->cap.max_send_wr;
+ /*
+ * Reserving one slot for Phantom WQE. Some application can
+ * post one extra entry in this case. Allowing this to avoid
+ * unexpected Queue full condition
+ */
+ qp->qplib_qp.sq.q_full_delta -= 1;
+ qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
+ if (qp->qplib_qp.rq.max_wqe) {
+ entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
+ if (entries > dev_attr->max_qp_wqes)
+ entries = dev_attr->max_qp_wqes;
+ qp->qplib_qp.rq.max_wqe = entries;
+ qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
+ qp_attr->cap.max_recv_wr;
+ qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
+ } else {
+ /* SRQ was used prior, just ignore the RQ caps */
+ }
+ }
+ if (qp_attr_mask & IB_QP_DEST_QPN) {
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID;
+ qp->qplib_qp.dest_qpn = qp_attr->dest_qp_num;
+ }
+
+ rc = bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Modify HW QP failed!\n");
+ return rc;
+ }
+ if (qp_attr_mask & IB_QP_STATE)
+ bnxt_qplib_manage_flush_qp(qp);
+ if (ureq.comp_mask & BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN_MASK &&
+ ppp->st_idx_en & CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_ENABLED) {
+ resp.comp_mask |= BNXT_RE_COMP_MASK_MQP_EX_PPP_REQ_EN;
+ resp.ppp_st_idx = ppp->st_idx_en >>
+ BNXT_QPLIB_PPP_ST_IDX_SHIFT;
+ is_copy_to_udata = true;
+ }
+
+ if (is_copy_to_udata) {
+ rc = bnxt_re_copy_to_udata(rdev, &resp,
+ min(udata->outlen, sizeof(resp)),
+ udata);
+ if (rc)
+ return rc;
+ }
+
+ if (ib_qp->qp_type == IB_QPT_GSI &&
+ rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL &&
+ rdev->gsi_ctx.gsi_sqp)
+ rc = bnxt_re_modify_shadow_qp(rdev, qp, qp_attr_mask);
+ /*
+ * Update info when qp_info_info
+ */
+ bnxt_re_update_qp_info(rdev, qp);
+ return rc;
+}
+
+int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
+{
+ struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_re_dev *rdev = qp->rdev;
+ struct bnxt_qplib_qp *qplib_qp;
+ int rc;
+
+ qplib_qp = kcalloc(1, sizeof(*qplib_qp), GFP_KERNEL);
+ if (!qplib_qp)
+ return -ENOMEM;
+
+ qplib_qp->id = qp->qplib_qp.id;
+ qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
+
+ rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Query HW QP (0x%x) failed! rc = %d\n",
+ qplib_qp->id, rc);
+ goto free_mem;
+ }
+ qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
+ qp_attr->cur_qp_state = __to_ib_qp_state(qplib_qp->cur_qp_state);
+ qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
+ qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
+ qp_attr->pkey_index = qplib_qp->pkey_index;
+ qp_attr->qkey = qplib_qp->qkey;
+ memcpy(qp_attr->ah_attr.grh.dgid.raw, qplib_qp->ah.dgid.data,
+ sizeof(qplib_qp->ah.dgid.data));
+ qp_attr->ah_attr.grh.flow_label = qplib_qp->ah.flow_label;
+ qp_attr->ah_attr.grh.sgid_index = qplib_qp->ah.host_sgid_index;
+ qp_attr->ah_attr.grh.hop_limit = qplib_qp->ah.hop_limit;
+ qp_attr->ah_attr.grh.traffic_class = qplib_qp->ah.traffic_class;
+ qp_attr->ah_attr.sl = qplib_qp->ah.sl;
+ ether_addr_copy(ROCE_DMAC(&qp_attr->ah_attr), qplib_qp->ah.dmac);
+ qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
+ qp_attr->timeout = qplib_qp->timeout;
+ qp_attr->retry_cnt = qplib_qp->retry_cnt;
+ qp_attr->rnr_retry = qplib_qp->rnr_retry;
+ qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
+ qp_attr->rq_psn = qplib_qp->rq.psn;
+ qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
+ qp_attr->sq_psn = qplib_qp->sq.psn;
+ qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
+ qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
+ IB_SIGNAL_REQ_WR;
+ qp_attr->dest_qp_num = qplib_qp->dest_qpn;
+
+ qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
+ qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
+ qp_attr->cap.max_recv_wr = qp->qplib_qp.rq.max_wqe;
+ qp_attr->cap.max_recv_sge = qp->qplib_qp.rq.max_sge;
+ qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
+ qp_init_attr->cap = qp_attr->cap;
+
+free_mem:
+ kfree(qplib_qp);
+ return rc;
+}
+
+/* Builders */
+
+/* For Raw, the application is responsible to build the entire packet */
+static void bnxt_re_build_raw_send(const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ switch (wr->send_flags) {
+ case IB_SEND_IP_CSUM:
+ wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
+ break;
+ default:
+ /* Pad HW RoCE iCRC */
+ wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
+ break;
+ }
+}
+
+/* For QP1, the driver must build the entire RoCE (v1/v2) packet hdr
+ * as according to the sgid and AV
+ */
+static int bnxt_re_build_qp1_send(struct bnxt_re_qp *qp, const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe, int payload_size)
+{
+ struct bnxt_re_ah *ah = to_bnxt_re(ud_wr(wr)->ah, struct bnxt_re_ah,
+ ibah);
+ struct bnxt_qplib_ah *qplib_ah = &ah->qplib_ah;
+ struct bnxt_qplib_sge sge;
+ int i, rc = 0;
+ union ib_gid sgid;
+ u16 vlan_id;
+ u8 *ptmac;
+ void *buf;
+
+ memset(&qp->qp1_hdr, 0, sizeof(qp->qp1_hdr));
+
+ /* Get sgid */
+ rc = bnxt_re_query_gid(&qp->rdev->ibdev, 1, qplib_ah->sgid_index, &sgid);
+ if (rc)
+ return rc;
+
+ /* ETH */
+ qp->qp1_hdr.eth_present = 1;
+ ptmac = ah->qplib_ah.dmac;
+ memcpy(qp->qp1_hdr.eth.dmac_h, ptmac, 4);
+ ptmac += 4;
+ memcpy(qp->qp1_hdr.eth.dmac_l, ptmac, 2);
+
+ ptmac = qp->qplib_qp.smac;
+ memcpy(qp->qp1_hdr.eth.smac_h, ptmac, 2);
+ ptmac += 2;
+ memcpy(qp->qp1_hdr.eth.smac_l, ptmac, 4);
+
+ qp->qp1_hdr.eth.type = cpu_to_be16(BNXT_QPLIB_ETHTYPE_ROCEV1);
+
+ /* For vlan, check the sgid for vlan existence */
+ vlan_id = rdma_get_vlan_id(&sgid);
+ if (vlan_id && vlan_id < 0x1000) {
+ qp->qp1_hdr.vlan_present = 1;
+ qp->qp1_hdr.eth.type = cpu_to_be16(ETH_P_8021Q);
+ }
+ /* GRH */
+ qp->qp1_hdr.grh_present = 1;
+ qp->qp1_hdr.grh.ip_version = 6;
+ qp->qp1_hdr.grh.payload_length =
+ cpu_to_be16((IB_BTH_BYTES + IB_DETH_BYTES + payload_size + 7)
+ & ~3);
+ qp->qp1_hdr.grh.next_header = 0x1b;
+ memcpy(qp->qp1_hdr.grh.source_gid.raw, sgid.raw, sizeof(sgid));
+ memcpy(qp->qp1_hdr.grh.destination_gid.raw, qplib_ah->dgid.data,
+ sizeof(sgid));
+
+ /* BTH */
+ if (wr->opcode == IB_WR_SEND_WITH_IMM) {
+ qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
+ qp->qp1_hdr.immediate_present = 1;
+ } else {
+ qp->qp1_hdr.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
+ }
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ qp->qp1_hdr.bth.solicited_event = 1;
+ qp->qp1_hdr.bth.pad_count = (4 - payload_size) & 3;
+ /* P_key for QP1 is for all members */
+ qp->qp1_hdr.bth.pkey = cpu_to_be16(0xFFFF);
+ qp->qp1_hdr.bth.destination_qpn = IB_QP1;
+ qp->qp1_hdr.bth.ack_req = 0;
+ qp->send_psn++;
+ qp->send_psn &= BTH_PSN_MASK;
+ qp->qp1_hdr.bth.psn = cpu_to_be32(qp->send_psn);
+ /* DETH */
+ /* Use the priviledged Q_Key for QP1 */
+ qp->qp1_hdr.deth.qkey = cpu_to_be32(IB_QP1_QKEY);
+ qp->qp1_hdr.deth.source_qpn = IB_QP1;
+
+ /* Pack the QP1 to the transmit buffer */
+ buf = bnxt_qplib_get_qp1_sq_buf(&qp->qplib_qp, &sge);
+ if (!buf) {
+ dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!\n");
+ rc = -ENOMEM;
+ }
+ for (i = wqe->num_sge; i; i--) {
+ wqe->sg_list[i].addr = wqe->sg_list[i - 1].addr;
+ wqe->sg_list[i].lkey = wqe->sg_list[i - 1].lkey;
+ wqe->sg_list[i].size = wqe->sg_list[i - 1].size;
+ }
+ wqe->sg_list[0].addr = sge.addr;
+ wqe->sg_list[0].lkey = sge.lkey;
+ wqe->sg_list[0].size = sge.size;
+ wqe->num_sge++;
+
+ return rc;
+}
+
+static int bnxt_re_build_gsi_send(struct bnxt_re_qp *qp,
+ const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_re_dev *rdev;
+ int rc, indx, len = 0;
+
+ rdev = qp->rdev;
+
+ /* Mode UD is applicable to Gen P5 only */
+ if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD)
+ return 0;
+
+ for (indx = 0; indx < wr->num_sge; indx++) {
+ wqe->sg_list[indx].addr = wr->sg_list[indx].addr;
+ wqe->sg_list[indx].lkey = wr->sg_list[indx].lkey;
+ wqe->sg_list[indx].size = wr->sg_list[indx].length;
+ len += wr->sg_list[indx].length;
+ }
+ rc = bnxt_re_build_qp1_send(qp, wr, wqe, len);
+ wqe->rawqp1.lflags |= SQ_SEND_RAWETH_QP1_LFLAGS_ROCE_CRC;
+
+ return rc;
+}
+
+/* For the MAD layer, it only provides the recv SGE the size of
+ ib_grh + MAD datagram. No Ethernet headers, Ethertype, BTH, DETH,
+ nor RoCE iCRC. The Cu+ solution must provide buffer for the entire
+ receive packet (334 bytes) with no VLAN and then copy the GRH
+ and the MAD datagram out to the provided SGE.
+*/
+
+static int bnxt_re_build_qp1_recv(struct bnxt_re_qp *qp,
+ const struct ib_recv_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_re_dev *rdev = qp->rdev;
+ struct bnxt_qplib_sge ref, sge;
+ u8 udp_hdr_size = 0;
+ u8 ip_hdr_size = 0;
+ int rc = 0;
+ int size;
+
+ if (bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) {
+ /* Create 5 SGEs as according to the following:
+ * Ethernet header (14)
+ * ib_grh (40) - as provided from the wr
+ * ib_bth + ib_deth + UDP(RoCE v2 only) (28)
+ * MAD (256) - as provided from the wr
+ * iCRC (4)
+ */
+
+ /* Set RoCE v2 header size and offsets */
+ if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ROCE_V2_IPV4)
+ ip_hdr_size = 20;
+ if (rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_ROCE_V1)
+ udp_hdr_size = 8;
+
+ /* Save the reference from ULP */
+ ref.addr = wr->sg_list[0].addr;
+ ref.lkey = wr->sg_list[0].lkey;
+ ref.size = wr->sg_list[0].length;
+
+ /* SGE 1 */
+ size = sge.size;
+ wqe->sg_list[0].addr = sge.addr;
+ wqe->sg_list[0].lkey = sge.lkey;
+ wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE;
+ size -= wqe->sg_list[0].size;
+ if (size <= 0) {
+ dev_err(rdev_to_dev(qp->rdev),"QP1 rq buffer is empty!\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+ sge.size = (u32)size;
+ sge.addr += wqe->sg_list[0].size;
+
+ /* SGE 2 */
+ /* In case of RoCE v2 ipv4 lower 20 bytes should have IP hdr */
+ wqe->sg_list[1].addr = ref.addr + ip_hdr_size;
+ wqe->sg_list[1].lkey = ref.lkey;
+ wqe->sg_list[1].size = sizeof(struct ib_grh) - ip_hdr_size;
+ ref.size -= wqe->sg_list[1].size;
+ if (ref.size <= 0) {
+ dev_err(rdev_to_dev(qp->rdev),
+ "QP1 ref buffer is empty!\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+ ref.addr += wqe->sg_list[1].size + ip_hdr_size;
+
+ /* SGE 3 */
+ wqe->sg_list[2].addr = sge.addr;
+ wqe->sg_list[2].lkey = sge.lkey;
+ wqe->sg_list[2].size = BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE +
+ udp_hdr_size;
+ size -= wqe->sg_list[2].size;
+ if (size <= 0) {
+ dev_err(rdev_to_dev(qp->rdev),
+ "QP1 rq buffer is empty!\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+ sge.size = (u32)size;
+ sge.addr += wqe->sg_list[2].size;
+
+ /* SGE 4 */
+ wqe->sg_list[3].addr = ref.addr;
+ wqe->sg_list[3].lkey = ref.lkey;
+ wqe->sg_list[3].size = ref.size;
+ ref.size -= wqe->sg_list[3].size;
+ if (ref.size) {
+ dev_err(rdev_to_dev(qp->rdev),
+ "QP1 ref buffer is incorrect!\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+ /* SGE 5 */
+ wqe->sg_list[4].addr = sge.addr;
+ wqe->sg_list[4].lkey = sge.lkey;
+ wqe->sg_list[4].size = sge.size;
+ size -= wqe->sg_list[4].size;
+ if (size) {
+ dev_err(rdev_to_dev(qp->rdev),
+ "QP1 rq buffer is incorrect!\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+ sge.size = (u32)size;
+ wqe->num_sge = 5;
+ } else {
+ dev_err(rdev_to_dev(qp->rdev), "QP1 buffer is empty!\n");
+ rc = -ENOMEM;
+ }
+done:
+ return rc;
+}
+
+static int bnxt_re_build_qp1_shadow_qp_recv(struct bnxt_re_qp *qp,
+ const struct ib_recv_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_re_sqp_entries *sqp_entry;
+ struct bnxt_qplib_sge sge;
+ struct bnxt_re_dev *rdev;
+ u32 rq_prod_index;
+ int rc = 0;
+
+ rdev = qp->rdev;
+
+ rq_prod_index = bnxt_qplib_get_rq_prod_index(&qp->qplib_qp);
+
+ if (bnxt_qplib_get_qp1_rq_buf(&qp->qplib_qp, &sge)) {
+ /* Create 1 SGE to receive the entire
+ * ethernet packet
+ */
+ /* SGE 1 */
+ wqe->sg_list[0].addr = sge.addr;
+ /* TODO check the lkey to be used */
+ wqe->sg_list[0].lkey = sge.lkey;
+ wqe->sg_list[0].size = BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2;
+ if (sge.size < wqe->sg_list[0].size) {
+ dev_err(rdev_to_dev(qp->rdev),
+ "QP1 rq buffer is empty!\n");
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[rq_prod_index];
+ sqp_entry->sge.addr = wr->sg_list[0].addr;
+ sqp_entry->sge.lkey = wr->sg_list[0].lkey;
+ sqp_entry->sge.size = wr->sg_list[0].length;
+ /* Store the wrid for reporting completion */
+ sqp_entry->wrid = wqe->wr_id;
+ /* change the wqe->wrid to table index */
+ wqe->wr_id = rq_prod_index;
+ }
+done:
+ return rc;
+}
+
+static bool is_ud_qp(struct bnxt_re_qp *qp)
+{
+ return (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD ||
+ qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_GSI);
+}
+
+static int bnxt_re_build_send_wqe(struct bnxt_re_qp *qp,
+ const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_re_ah *ah = NULL;
+
+ if(is_ud_qp(qp)) {
+ ah = to_bnxt_re(ud_wr(wr)->ah, struct bnxt_re_ah, ibah);
+ wqe->send.q_key = ud_wr(wr)->remote_qkey;
+ wqe->send.dst_qp = ud_wr(wr)->remote_qpn;
+ wqe->send.avid = ah->qplib_ah.id;
+ }
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND;
+ break;
+ case IB_WR_SEND_WITH_IMM:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM;
+ wqe->send.imm_data = wr->ex.imm_data;
+ break;
+ case IB_WR_SEND_WITH_INV:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV;
+ wqe->send.inv_key = wr->ex.invalidate_rkey;
+ break;
+ default:
+ dev_err(rdev_to_dev(qp->rdev), "%s Invalid opcode %d!\n",
+ __func__, wr->opcode);
+ return -EINVAL;
+ }
+ if (wr->send_flags & IB_SEND_SIGNALED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+ if (wr->send_flags & IB_SEND_FENCE)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
+ if (wr->send_flags & IB_SEND_INLINE)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
+
+ return 0;
+}
+
+static int bnxt_re_build_rdma_wqe(const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ switch (wr->opcode) {
+ case IB_WR_RDMA_WRITE:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE;
+ break;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM;
+ wqe->rdma.imm_data = wr->ex.imm_data;
+ break;
+ case IB_WR_RDMA_READ:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_RDMA_READ;
+ wqe->rdma.inv_key = wr->ex.invalidate_rkey;
+ break;
+ default:
+ return -EINVAL;
+ }
+ wqe->rdma.remote_va = rdma_wr(wr)->remote_addr;
+ wqe->rdma.r_key = rdma_wr(wr)->rkey;
+ if (wr->send_flags & IB_SEND_SIGNALED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+ if (wr->send_flags & IB_SEND_FENCE)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
+ if (wr->send_flags & IB_SEND_INLINE)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_INLINE;
+
+ return 0;
+}
+
+static int bnxt_re_build_atomic_wqe(const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ switch (wr->opcode) {
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
+ wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
+ wqe->atomic.swap_data = atomic_wr(wr)->swap;
+ break;
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD;
+ wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
+ break;
+ default:
+ return -EINVAL;
+ }
+ wqe->atomic.remote_va = atomic_wr(wr)->remote_addr;
+ wqe->atomic.r_key = atomic_wr(wr)->rkey;
+ if (wr->send_flags & IB_SEND_SIGNALED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+ if (wr->send_flags & IB_SEND_FENCE)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
+ return 0;
+}
+
+static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
+ wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
+ if (wr->send_flags & IB_SEND_SIGNALED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+ if (wr->send_flags & IB_SEND_FENCE)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+ if (wr->send_flags & IB_SEND_SOLICITED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT;
+
+ return 0;
+}
+
+static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_re_mr *mr = to_bnxt_re(wr->mr, struct bnxt_re_mr, ib_mr);
+ struct bnxt_qplib_frpl *qplib_frpl = &mr->qplib_frpl;
+ int reg_len, i, access = wr->access;
+
+ if (mr->npages > qplib_frpl->max_pg_ptrs) {
+ dev_err_ratelimited(rdev_to_dev(mr->rdev),
+ " %s: failed npages %d > %d\n", __func__,
+ mr->npages, qplib_frpl->max_pg_ptrs);
+ return -EINVAL;
+ }
+
+ wqe->frmr.pbl_ptr = (__le64 *)qplib_frpl->hwq.pbl_ptr[0];
+ wqe->frmr.pbl_dma_ptr = qplib_frpl->hwq.pbl_dma_ptr[0];
+ wqe->frmr.levels = qplib_frpl->hwq.level;
+ wqe->frmr.page_list = mr->pages;
+ wqe->frmr.page_list_len = mr->npages;
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
+
+ if (wr->wr.send_flags & IB_SEND_SIGNALED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+ if (access & IB_ACCESS_LOCAL_WRITE)
+ wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
+ if (access & IB_ACCESS_REMOTE_READ)
+ wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_READ;
+ if (access & IB_ACCESS_REMOTE_WRITE)
+ wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_WRITE;
+ if (access & IB_ACCESS_REMOTE_ATOMIC)
+ wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_REMOTE_ATOMIC;
+ if (access & IB_ACCESS_MW_BIND)
+ wqe->frmr.access_cntl |= SQ_FR_PMR_ACCESS_CNTL_WINDOW_BIND;
+
+ /* TODO: OFED provides the rkey of the MR instead of the lkey */
+ wqe->frmr.l_key = wr->key;
+ wqe->frmr.length = wr->mr->length;
+ wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
+ wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
+ wqe->frmr.va = wr->mr->iova;
+ reg_len = wqe->frmr.page_list_len * wr->mr->page_size;
+
+ if (wqe->frmr.length > reg_len) {
+ dev_err_ratelimited(rdev_to_dev(mr->rdev),
+ "%s: bnxt_re_mr 0x%px len (%d > %d)\n",
+ __func__, (void *)mr, wqe->frmr.length,
+ reg_len);
+
+ for (i = 0; i < mr->npages; i++)
+ dev_dbg(rdev_to_dev(mr->rdev),
+ "%s: build_reg_wqe page[%d] = 0x%llx\n",
+ __func__, i, mr->pages[i]);
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void bnxt_re_set_sg_list(const struct ib_send_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ wqe->sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
+ wqe->num_sge = wr->num_sge;
+}
+
+static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
+{
+ if ((qp->ib_qp.qp_type == IB_QPT_UD || qp->ib_qp.qp_type == IB_QPT_GSI ||
+ qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
+ qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
+ int qp_attr_mask;
+ struct ib_qp_attr qp_attr;
+
+ qp_attr_mask = IB_QP_STATE;
+ qp_attr.qp_state = IB_QPS_RTS;
+ bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
+ qp->qplib_qp.wqe_cnt = 0;
+ }
+}
+
+static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
+ struct bnxt_re_qp *qp,
+ const struct ib_send_wr *wr)
+{
+ struct bnxt_qplib_swqe wqe;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&qp->sq_lock, flags);
+ while (wr) {
+ /* House keeping */
+ memset(&wqe, 0, sizeof(wqe));
+ /* Common */
+ if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
+ dev_err(rdev_to_dev(rdev),
+ "Limit exceeded for Send SGEs\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ bnxt_re_set_sg_list(wr, &wqe);
+ wqe.wr_id = wr->wr_id;
+ wqe.type = BNXT_QPLIB_SWQE_TYPE_SEND;
+ rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
+ if (rc)
+ break;
+
+ rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "bad_wr seen with opcode = 0x%x rc = %d\n",
+ wr->opcode, rc);
+ break;
+ }
+ wr = wr->next;
+ }
+ bnxt_qplib_post_send_db(&qp->qplib_qp);
+ bnxt_ud_qp_hw_stall_workaround(qp);
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+ return rc;
+}
+
+static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
+{
+ /* Need unconditional fence for non-wire memory opcode
+ * to work as expected.
+ */
+ if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
+ wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+}
+
+int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+{
+ struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_qplib_sge sge[6];
+ struct bnxt_qplib_swqe wqe;
+ struct bnxt_re_dev *rdev;
+ unsigned long flags;
+ int rc = 0;
+
+ rdev = qp->rdev;
+ spin_lock_irqsave(&qp->sq_lock, flags);
+ while (wr) {
+ /* House keeping */
+ memset(&wqe, 0, sizeof(wqe));
+ /* Common */
+ if (wr->num_sge > qp->qplib_qp.sq.max_sge) {
+ dev_err(rdev_to_dev(rdev),
+ "Limit exceeded for Send SGEs\n");
+ rc = -EINVAL;
+ goto bad;
+ }
+
+ bnxt_re_set_sg_list(wr, &wqe);
+ wqe.wr_id = wr->wr_id;
+
+ switch (wr->opcode) {
+ case IB_WR_SEND:
+ case IB_WR_SEND_WITH_IMM:
+ if (ib_qp->qp_type == IB_QPT_GSI &&
+ rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
+ memset(sge, 0, sizeof(sge));
+ wqe.sg_list = sge;
+ rc = bnxt_re_build_gsi_send(qp, wr, &wqe);
+ if (rc)
+ goto bad;
+ } else if (ib_qp->qp_type == IB_QPT_RAW_ETHERTYPE) {
+ bnxt_re_build_raw_send(wr, &wqe);
+ }
+ switch (wr->send_flags) {
+ case IB_SEND_IP_CSUM:
+ wqe.rawqp1.lflags |=
+ SQ_SEND_RAWETH_QP1_LFLAGS_IP_CHKSUM;
+ break;
+ default:
+ break;
+ }
+ fallthrough;
+ case IB_WR_SEND_WITH_INV:
+ rc = bnxt_re_build_send_wqe(qp, wr, &wqe);
+ break;
+ case IB_WR_RDMA_WRITE:
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ case IB_WR_RDMA_READ:
+ rc = bnxt_re_build_rdma_wqe(wr, &wqe);
+ break;
+ case IB_WR_ATOMIC_CMP_AND_SWP:
+ case IB_WR_ATOMIC_FETCH_AND_ADD:
+ rc = bnxt_re_build_atomic_wqe(wr, &wqe);
+ break;
+ case IB_WR_RDMA_READ_WITH_INV:
+ dev_err(rdev_to_dev(rdev),
+ "RDMA Read with Invalidate is not supported\n");
+ rc = -EINVAL;
+ goto bad;
+ case IB_WR_LOCAL_INV:
+ rc = bnxt_re_build_inv_wqe(wr, &wqe);
+ break;
+ case IB_WR_REG_MR:
+ rc = bnxt_re_build_reg_wqe(reg_wr(wr), &wqe);
+ break;
+ default:
+ /* Unsupported WRs */
+ dev_err(rdev_to_dev(rdev),
+ "WR (0x%x) is not supported\n", wr->opcode);
+ rc = -EINVAL;
+ goto bad;
+ }
+
+ if (likely(!rc)) {
+ if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
+ bnxt_re_legacy_set_uc_fence(&wqe);
+ rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
+ }
+bad:
+ if (unlikely(rc)) {
+ dev_err(rdev_to_dev(rdev),
+ "bad_wr seen with opcode = 0x%x\n", wr->opcode);
+ *bad_wr = wr;
+ break;
+ }
+ wr = wr->next;
+ }
+ bnxt_qplib_post_send_db(&qp->qplib_qp);
+ if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
+ bnxt_ud_qp_hw_stall_workaround(qp);
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+
+ return rc;
+}
+
+static int bnxt_re_post_recv_shadow_qp(struct bnxt_re_dev *rdev,
+ struct bnxt_re_qp *qp,
+ struct ib_recv_wr *wr)
+{
+ struct bnxt_qplib_swqe wqe;
+ int rc = 0;
+
+ /* rq lock can be pardoned here. */
+ while (wr) {
+ /* House keeping */
+ memset(&wqe, 0, sizeof(wqe));
+ /* Common */
+ if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
+ dev_err(rdev_to_dev(rdev),
+ "Limit exceeded for Receive SGEs\n");
+ rc = -EINVAL;
+ goto bad;
+ }
+
+ wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
+ wqe.num_sge = wr->num_sge;
+ wqe.wr_id = wr->wr_id;
+ wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
+ rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
+bad:
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "bad_wr seen with RQ post\n");
+ break;
+ }
+ wr = wr->next;
+ }
+ bnxt_qplib_post_recv_db(&qp->qplib_qp);
+ return rc;
+}
+
+static int bnxt_re_build_gsi_recv(struct bnxt_re_qp *qp,
+ const struct ib_recv_wr *wr,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_re_dev *rdev = qp->rdev;
+ int rc = 0;
+
+ if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL)
+ rc = bnxt_re_build_qp1_shadow_qp_recv(qp, wr, wqe);
+ else
+ rc = bnxt_re_build_qp1_recv(qp, wr, wqe);
+
+ return rc;
+}
+
+int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr)
+{
+ struct bnxt_re_qp *qp = to_bnxt_re(ib_qp, struct bnxt_re_qp, ib_qp);
+ struct bnxt_qplib_sge sge[6];
+ struct bnxt_qplib_swqe wqe;
+ unsigned long flags;
+ u32 count = 0;
+ int rc = 0;
+
+ spin_lock_irqsave(&qp->rq_lock, flags);
+ while (wr) {
+ memset(&wqe, 0, sizeof(wqe));
+ if (wr->num_sge > qp->qplib_qp.rq.max_sge) {
+ dev_err(rdev_to_dev(qp->rdev),
+ "Limit exceeded for Receive SGEs\n");
+ rc = -EINVAL;
+ goto bad;
+ }
+ wqe.num_sge = wr->num_sge;
+ wqe.sg_list = (struct bnxt_qplib_sge *)wr->sg_list;
+ wqe.wr_id = wr->wr_id;
+ wqe.type = BNXT_QPLIB_SWQE_TYPE_RECV;
+
+ if (ib_qp->qp_type == IB_QPT_GSI &&
+ qp->rdev->gsi_ctx.gsi_qp_mode != BNXT_RE_GSI_MODE_UD) {
+ memset(sge, 0, sizeof(sge));
+ wqe.sg_list = sge;
+ rc = bnxt_re_build_gsi_recv(qp, wr, &wqe);
+ if (rc)
+ goto bad;
+ }
+ rc = bnxt_qplib_post_recv(&qp->qplib_qp, &wqe);
+bad:
+ if (rc) {
+ dev_err(rdev_to_dev(qp->rdev),
+ "bad_wr seen with RQ post\n");
+ *bad_wr = wr;
+ break;
+ }
+ /* Ring DB if the RQEs posted reaches a threshold value */
+ if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
+ bnxt_qplib_post_recv_db(&qp->qplib_qp);
+ count = 0;
+ }
+ wr = wr->next;
+ }
+
+ if (count)
+ bnxt_qplib_post_recv_db(&qp->qplib_qp);
+ spin_unlock_irqrestore(&qp->rq_lock, flags);
+
+ return rc;
+}
+
+/* Completion Queues */
+void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
+{
+ struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
+ struct bnxt_re_dev *rdev = cq->rdev;
+ int rc = 0;
+
+ if (cq->uctx_cq_page) {
+ BNXT_RE_CQ_PAGE_LIST_DEL(cq->uctx, cq);
+ free_page((u64)cq->uctx_cq_page);
+ cq->uctx_cq_page = NULL;
+ }
+ if (cq->is_dbr_soft_cq && cq->uctx) {
+ void *dbr_page;
+
+ if (cq->uctx->dbr_recov_cq) {
+ dbr_page = cq->uctx->dbr_recov_cq_page;
+ cq->uctx->dbr_recov_cq_page = NULL;
+ cq->uctx->dbr_recov_cq = NULL;
+ free_page((unsigned long)dbr_page);
+ }
+ goto end;
+ }
+ /* CQ getting destroyed. Set this state for cqn handler */
+ spin_lock_bh(&cq->qplib_cq.compl_lock);
+ cq->qplib_cq.destroyed = true;
+ spin_unlock_bh(&cq->qplib_cq.compl_lock);
+ if (ib_cq->poll_ctx == IB_POLL_WORKQUEUE ||
+ ib_cq->poll_ctx == IB_POLL_UNBOUND_WORKQUEUE)
+ cancel_work_sync(&ib_cq->work);
+
+ rc = bnxt_qplib_destroy_cq(&rdev->qplib_res, &cq->qplib_cq);
+ if (rc)
+ dev_err_ratelimited(rdev_to_dev(rdev),
+ "%s id = %d failed rc = %d\n",
+ __func__, cq->qplib_cq.id, rc);
+
+ bnxt_re_put_nq(rdev, cq->qplib_cq.nq);
+ if (cq->umem && !IS_ERR(cq->umem))
+ ib_umem_release(cq->umem);
+
+ kfree(cq->cql);
+ atomic_dec(&rdev->stats.rsors.cq_count);
+end:
+ return;
+}
+
+static inline struct
+bnxt_re_cq *__get_cq_from_cq_in(struct ib_cq *cq_in,
+ struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_cq *cq;
+ cq = container_of(cq_in, struct bnxt_re_cq, ibcq);
+ return cq;
+}
+
+int bnxt_re_create_cq(struct ib_cq *cq_in,
+ const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_re_ucontext *uctx = NULL;
+ struct ib_ucontext *context = NULL;
+ struct bnxt_qplib_cq *qplcq;
+ struct bnxt_re_cq_req ureq;
+ struct bnxt_re_dev *rdev;
+ int rc, entries;
+ struct bnxt_re_cq *cq;
+ u32 max_active_cqs;
+ int cqe = attr->cqe;
+
+ if (attr->flags)
+ return -EOPNOTSUPP;
+
+ rdev = rdev_from_cq_in(cq_in);
+ if (rdev->mod_exit) {
+ rc = -EIO;
+ dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
+ goto exit;
+ }
+ if (udata) {
+ uctx = rdma_udata_to_drv_context(udata,
+ struct bnxt_re_ucontext,
+ ibucontext);
+ context = &uctx->ibucontext;
+ }
+ dev_attr = rdev->dev_attr;
+
+ if (atomic_read(&rdev->stats.rsors.cq_count) >= dev_attr->max_cq) {
+ dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded(CQs)\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+ /* Validate CQ fields */
+ if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
+ dev_err(rdev_to_dev(rdev), "Create CQ failed - max exceeded(CQ_WQs)\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ cq = __get_cq_from_cq_in(cq_in, rdev);
+ if (!cq) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ cq->rdev = rdev;
+ cq->uctx = uctx;
+ qplcq = &cq->qplib_cq;
+ qplcq->cq_handle = (u64)qplcq;
+ /*
+ * Since CQ is for QP1 is shared with Shadow CQ, the size
+ * should be double the size. There is no way to identify
+ * whether this CQ is for GSI QP. So assuming that the first
+ * CQ created is for QP1
+ */
+ if (!udata && !rdev->gsi_ctx.first_cq_created &&
+ rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL) {
+ rdev->gsi_ctx.first_cq_created = true;
+ /*
+ * Total CQE required for the CQ = CQE for QP1 RQ +
+ * CQE for Shadow QP SQEs + CQE for Shadow QP RQEs.
+ * Max entries of shadow QP SQ and RQ = QP1 RQEs = cqe
+ */
+ cqe *= 3;
+ }
+
+ entries = bnxt_re_init_depth(cqe + 1, uctx);
+ if (entries > dev_attr->max_cq_wqes + 1)
+ entries = dev_attr->max_cq_wqes + 1;
+
+ qplcq->sginfo.pgshft = PAGE_SHIFT;
+ qplcq->sginfo.pgsize = PAGE_SIZE;
+ if (udata) {
+ if (udata->inlen < sizeof(ureq))
+ dev_warn(rdev_to_dev(rdev),
+ "Update the library ulen %d klen %d\n",
+ (unsigned int)udata->inlen,
+ (unsigned int)sizeof(ureq));
+
+ rc = ib_copy_from_udata(&ureq, udata,
+ min(udata->inlen, sizeof(ureq)));
+ if (rc)
+ goto fail;
+
+ if (BNXT_RE_IS_DBR_PACING_NOTIFY_CQ(ureq)) {
+ cq->is_dbr_soft_cq = true;
+ goto success;
+ }
+
+ if (BNXT_RE_IS_DBR_RECOV_CQ(ureq)) {
+ void *dbr_page;
+ u32 *epoch;
+
+ dbr_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!dbr_page) {
+ dev_err(rdev_to_dev(rdev),
+ "DBR recov CQ page allocation failed!");
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ /* memset the epoch and epoch_ack to 0 */
+ epoch = dbr_page;
+ epoch[0] = 0x0;
+ epoch[1] = 0x0;
+
+ uctx->dbr_recov_cq = cq;
+ uctx->dbr_recov_cq_page = dbr_page;
+
+ cq->is_dbr_soft_cq = true;
+ goto success;
+ }
+
+ cq->umem = ib_umem_get_compat
+ (rdev, context, udata, ureq.cq_va,
+ entries * sizeof(struct cq_base),
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(cq->umem)) {
+ rc = PTR_ERR(cq->umem);
+ dev_err(rdev_to_dev(rdev),
+ "%s: ib_umem_get failed! rc = %d\n",
+ __func__, rc);
+ goto fail;
+ }
+ qplcq->sginfo.sghead = get_ib_umem_sgl(cq->umem,
+ &qplcq->sginfo.nmap);
+ qplcq->sginfo.npages = ib_umem_num_pages_compat(cq->umem);
+ if (!uctx->dpi.dbr) {
+ rc = bnxt_re_get_user_dpi(rdev, uctx);
+ if (rc)
+ goto c2fail;
+ }
+ qplcq->dpi = &uctx->dpi;
+ } else {
+ cq->max_cql = entries > MAX_CQL_PER_POLL ? MAX_CQL_PER_POLL : entries;
+ cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe),
+ GFP_KERNEL);
+ if (!cq->cql) {
+ dev_err(rdev_to_dev(rdev),
+ "Allocate CQL for %d failed!\n", cq->max_cql);
+ rc = -ENOMEM;
+ goto fail;
+ }
+ qplcq->dpi = &rdev->dpi_privileged;
+ }
+ /*
+ * Allocating the NQ in a round robin fashion. nq_alloc_cnt is a
+ * used for getting the NQ index.
+ */
+ qplcq->max_wqe = entries;
+ qplcq->nq = bnxt_re_get_nq(rdev);
+ qplcq->cnq_hw_ring_id = qplcq->nq->ring_id;
+
+ rc = bnxt_qplib_create_cq(&rdev->qplib_res, qplcq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Create HW CQ failed!\n");
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&cq->cq_list);
+ cq->ibcq.cqe = entries;
+ cq->cq_period = qplcq->period;
+
+ atomic_inc(&rdev->stats.rsors.cq_count);
+ max_active_cqs = atomic_read(&rdev->stats.rsors.cq_count);
+ if (max_active_cqs > atomic_read(&rdev->stats.rsors.max_cq_count))
+ atomic_set(&rdev->stats.rsors.max_cq_count, max_active_cqs);
+ spin_lock_init(&cq->cq_lock);
+
+ if (udata) {
+ struct bnxt_re_cq_resp resp;
+
+ resp.cqid = qplcq->id;
+ resp.tail = qplcq->hwq.cons;
+ resp.phase = qplcq->period;
+ resp.comp_mask = 0;
+ resp.dbr = (u64)uctx->dpi.umdbr;
+ resp.dpi = uctx->dpi.dpi;
+ resp.comp_mask |= BNXT_RE_COMP_MASK_CQ_HAS_DB_INFO;
+ /* Copy only on a valid wcpdi */
+ if (uctx->wcdpi.dpi) {
+ resp.wcdpi = uctx->wcdpi.dpi;
+ resp.comp_mask |= BNXT_RE_COMP_MASK_CQ_HAS_WC_DPI;
+ }
+
+ if (_is_chip_p7(rdev->chip_ctx)) {
+ cq->uctx_cq_page = (void *)__get_free_page(GFP_KERNEL);
+
+ if (!cq->uctx_cq_page) {
+ dev_err(rdev_to_dev(rdev),
+ "CQ page allocation failed!\n");
+ bnxt_qplib_destroy_cq(&rdev->qplib_res, qplcq);
+ rc = -ENOMEM;
+ goto c2fail;
+ }
+
+ resp.uctx_cq_page = (u64)cq->uctx_cq_page;
+ resp.comp_mask |= BNXT_RE_COMP_MASK_CQ_HAS_CQ_PAGE;
+ }
+
+ rc = bnxt_re_copy_to_udata(rdev, &resp,
+ min(udata->outlen, sizeof(resp)),
+ udata);
+ if (rc) {
+ free_page((u64)cq->uctx_cq_page);
+ cq->uctx_cq_page = NULL;
+ bnxt_qplib_destroy_cq(&rdev->qplib_res, qplcq);
+ goto c2fail;
+ }
+
+ if (cq->uctx_cq_page)
+ BNXT_RE_CQ_PAGE_LIST_ADD(uctx, cq);
+ }
+
+success:
+ return 0;
+c2fail:
+ if (udata && cq->umem && !IS_ERR(cq->umem))
+ ib_umem_release(cq->umem);
+fail:
+ if (cq) {
+ if (cq->cql)
+ kfree(cq->cql);
+ }
+exit:
+ return rc;
+}
+
+int bnxt_re_modify_cq(struct ib_cq *ib_cq, u16 cq_count, u16 cq_period)
+{
+ struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
+ struct bnxt_re_dev *rdev = cq->rdev;
+ int rc;
+
+ if ((cq->cq_count != cq_count) || (cq->cq_period != cq_period)) {
+ cq->qplib_cq.count = cq_count;
+ cq->qplib_cq.period = cq_period;
+ rc = bnxt_qplib_modify_cq(&rdev->qplib_res, &cq->qplib_cq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Modify HW CQ %#x failed!\n",
+ cq->qplib_cq.id);
+ return rc;
+ }
+ /* On success, update the shadow */
+ cq->cq_count = cq_count;
+ cq->cq_period = cq_period;
+ }
+ return 0;
+}
+
+static void bnxt_re_resize_cq_complete(struct bnxt_re_cq *cq)
+{
+ struct bnxt_re_dev *rdev = cq->rdev;
+
+ bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
+
+ cq->qplib_cq.max_wqe = cq->resize_cqe;
+ if (cq->resize_umem) {
+ ib_umem_release(cq->umem);
+ cq->umem = cq->resize_umem;
+ cq->resize_umem = NULL;
+ cq->resize_cqe = 0;
+ }
+}
+
+int bnxt_re_resize_cq(struct ib_cq *ib_cq, int cqe, struct ib_udata *udata)
+{
+ struct bnxt_qplib_sg_info sginfo = {};
+ struct bnxt_qplib_dpi *orig_dpi = NULL;
+ struct bnxt_qplib_dev_attr *dev_attr;
+ struct bnxt_re_ucontext *uctx = NULL;
+ struct bnxt_re_resize_cq_req ureq;
+ struct ib_ucontext *context = NULL;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_cq *cq;
+ int rc, entries;
+
+ /* Don't allow more than one resize request at the same time.
+ * TODO: need a mutex here when we support kernel consumers of resize.
+ */
+ cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
+ rdev = cq->rdev;
+ dev_attr = rdev->dev_attr;
+ if (ib_cq->uobject) {
+ uctx = rdma_udata_to_drv_context(udata,
+ struct bnxt_re_ucontext,
+ ibucontext);
+ context = &uctx->ibucontext;
+ }
+
+ if (cq->resize_umem) {
+ dev_err(rdev_to_dev(rdev), "Resize CQ %#x failed - Busy\n",
+ cq->qplib_cq.id);
+ return -EBUSY;
+ }
+
+ /* Check the requested cq depth out of supported depth */
+ if (cqe < 1 || cqe > dev_attr->max_cq_wqes) {
+ dev_err(rdev_to_dev(rdev), "Resize CQ %#x failed - max exceeded\n",
+ cq->qplib_cq.id);
+ return -EINVAL;
+ }
+
+ entries = bnxt_re_init_depth(cqe + 1, uctx);
+ entries = min_t(u32, (u32)entries, dev_attr->max_cq_wqes + 1);
+
+ /* Check to see if the new requested size can be handled by already
+ * existing CQ
+ */
+ if (entries == cq->ibcq.cqe) {
+ dev_info(rdev_to_dev(rdev), "CQ is already at size %d\n", cqe);
+ return 0;
+ }
+
+ if (ib_cq->uobject && udata) {
+ if (udata->inlen < sizeof(ureq))
+ dev_warn(rdev_to_dev(rdev),
+ "Update the library ulen %d klen %d\n",
+ (unsigned int)udata->inlen,
+ (unsigned int)sizeof(ureq));
+
+ rc = ib_copy_from_udata(&ureq, udata,
+ min(udata->inlen, sizeof(ureq)));
+ if (rc)
+ goto fail;
+
+ dev_dbg(rdev_to_dev(rdev), "%s: va %p\n", __func__,
+ (void *)ureq.cq_va);
+ cq->resize_umem = ib_umem_get_compat
+ (rdev,
+ context, udata, ureq.cq_va,
+ entries * sizeof(struct cq_base),
+ IB_ACCESS_LOCAL_WRITE, 1);
+ if (IS_ERR(cq->resize_umem)) {
+ rc = PTR_ERR(cq->resize_umem);
+ cq->resize_umem = NULL;
+ dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed! rc = %d\n",
+ __func__, rc);
+ goto fail;
+ }
+ cq->resize_cqe = entries;
+ dev_dbg(rdev_to_dev(rdev), "%s: ib_umem_get() success\n",
+ __func__);
+ memcpy(&sginfo, &cq->qplib_cq.sginfo, sizeof(sginfo));
+ orig_dpi = cq->qplib_cq.dpi;
+
+ cq->qplib_cq.sginfo.sghead = get_ib_umem_sgl(cq->resize_umem,
+ &cq->qplib_cq.sginfo.nmap);
+ cq->qplib_cq.sginfo.npages =
+ ib_umem_num_pages_compat(cq->resize_umem);
+ cq->qplib_cq.sginfo.pgsize = PAGE_SIZE;
+ cq->qplib_cq.sginfo.pgshft = PAGE_SHIFT;
+ cq->qplib_cq.dpi = &uctx->dpi;
+ } else {
+ /* TODO: kernel consumer */
+ }
+
+ rc = bnxt_qplib_resize_cq(&rdev->qplib_res, &cq->qplib_cq, entries);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Resize HW CQ %#x failed!\n",
+ cq->qplib_cq.id);
+ goto fail;
+ }
+
+ cq->ibcq.cqe = cq->resize_cqe;
+ /* For kernel consumers complete resize here. For uverbs consumers,
+ * we complete it in the context of ibv_poll_cq().
+ */
+ if (!cq->resize_umem)
+ bnxt_qplib_resize_cq_complete(&rdev->qplib_res, &cq->qplib_cq);
+
+ atomic_inc(&rdev->stats.rsors.resize_count);
+ return 0;
+
+fail:
+ if (cq->resize_umem) {
+ ib_umem_release(cq->resize_umem);
+ cq->resize_umem = NULL;
+ cq->resize_cqe = 0;
+ memcpy(&cq->qplib_cq.sginfo, &sginfo, sizeof(sginfo));
+ cq->qplib_cq.dpi = orig_dpi;
+ }
+ return rc;
+}
+
+static enum ib_wc_status __req_to_ib_wc_status(u8 qstatus)
+{
+ switch(qstatus) {
+ case CQ_REQ_STATUS_OK:
+ return IB_WC_SUCCESS;
+ case CQ_REQ_STATUS_BAD_RESPONSE_ERR:
+ return IB_WC_BAD_RESP_ERR;
+ case CQ_REQ_STATUS_LOCAL_LENGTH_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case CQ_REQ_STATUS_LOCAL_QP_OPERATION_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case CQ_REQ_STATUS_LOCAL_PROTECTION_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case CQ_REQ_STATUS_MEMORY_MGT_OPERATION_ERR:
+ return IB_WC_GENERAL_ERR;
+ case CQ_REQ_STATUS_REMOTE_INVALID_REQUEST_ERR:
+ return IB_WC_REM_INV_REQ_ERR;
+ case CQ_REQ_STATUS_REMOTE_ACCESS_ERR:
+ return IB_WC_REM_ACCESS_ERR;
+ case CQ_REQ_STATUS_REMOTE_OPERATION_ERR:
+ return IB_WC_REM_OP_ERR;
+ case CQ_REQ_STATUS_RNR_NAK_RETRY_CNT_ERR:
+ return IB_WC_RNR_RETRY_EXC_ERR;
+ case CQ_REQ_STATUS_TRANSPORT_RETRY_CNT_ERR:
+ return IB_WC_RETRY_EXC_ERR;
+ case CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ default:
+ return IB_WC_GENERAL_ERR;
+ }
+ return 0;
+}
+
+static enum ib_wc_status __rawqp1_to_ib_wc_status(u8 qstatus)
+{
+ switch(qstatus) {
+ case CQ_RES_RAWETH_QP1_STATUS_OK:
+ return IB_WC_SUCCESS;
+ case CQ_RES_RAWETH_QP1_STATUS_LOCAL_ACCESS_ERROR:
+ return IB_WC_LOC_ACCESS_ERR;
+ case CQ_RES_RAWETH_QP1_STATUS_HW_LOCAL_LENGTH_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case CQ_RES_RAWETH_QP1_STATUS_LOCAL_PROTECTION_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case CQ_RES_RAWETH_QP1_STATUS_LOCAL_QP_OPERATION_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case CQ_RES_RAWETH_QP1_STATUS_MEMORY_MGT_OPERATION_ERR:
+ return IB_WC_GENERAL_ERR;
+ case CQ_RES_RAWETH_QP1_STATUS_WORK_REQUEST_FLUSHED_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ case CQ_RES_RAWETH_QP1_STATUS_HW_FLUSH_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ default:
+ return IB_WC_GENERAL_ERR;
+ }
+}
+
+static enum ib_wc_status __rc_to_ib_wc_status(u8 qstatus)
+{
+ switch(qstatus) {
+ case CQ_RES_RC_STATUS_OK:
+ return IB_WC_SUCCESS;
+ case CQ_RES_RC_STATUS_LOCAL_ACCESS_ERROR:
+ return IB_WC_LOC_ACCESS_ERR;
+ case CQ_RES_RC_STATUS_LOCAL_LENGTH_ERR:
+ return IB_WC_LOC_LEN_ERR;
+ case CQ_RES_RC_STATUS_LOCAL_PROTECTION_ERR:
+ return IB_WC_LOC_PROT_ERR;
+ case CQ_RES_RC_STATUS_LOCAL_QP_OPERATION_ERR:
+ return IB_WC_LOC_QP_OP_ERR;
+ case CQ_RES_RC_STATUS_MEMORY_MGT_OPERATION_ERR:
+ return IB_WC_GENERAL_ERR;
+ case CQ_RES_RC_STATUS_REMOTE_INVALID_REQUEST_ERR:
+ return IB_WC_REM_INV_REQ_ERR;
+ case CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ case CQ_RES_RC_STATUS_HW_FLUSH_ERR:
+ return IB_WC_WR_FLUSH_ERR;
+ default:
+ return IB_WC_GENERAL_ERR;
+ }
+}
+
+static void bnxt_re_process_req_wc(struct ib_wc *wc, struct bnxt_qplib_cqe *cqe)
+{
+ switch (cqe->type) {
+ case BNXT_QPLIB_SWQE_TYPE_SEND:
+ wc->opcode = IB_WC_SEND;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
+ wc->opcode = IB_WC_SEND;
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
+ wc->opcode = IB_WC_SEND;
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
+ wc->opcode = IB_WC_RDMA_WRITE;
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
+ wc->opcode = IB_WC_RDMA_READ;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
+ wc->opcode = IB_WC_COMP_SWAP;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
+ wc->opcode = IB_WC_FETCH_ADD;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
+ wc->opcode = IB_WC_LOCAL_INV;
+ break;
+ case BNXT_QPLIB_SWQE_TYPE_REG_MR:
+ wc->opcode = IB_WC_REG_MR;
+ break;
+ default:
+ wc->opcode = IB_WC_SEND;
+ break;
+ }
+
+ wc->status = __req_to_ib_wc_status(cqe->status);
+}
+
+static int bnxt_re_check_packet_type(u16 raweth_qp1_flags, u16 raweth_qp1_flags2)
+{
+ bool is_ipv6 = false, is_ipv4 = false;
+
+ /* raweth_qp1_flags Bit 9-6 indicates itype */
+
+ if ((raweth_qp1_flags & CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
+ != CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS_ITYPE_ROCE)
+ return -1;
+
+ if (raweth_qp1_flags2 &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_CS_CALC &&
+ raweth_qp1_flags2 &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_L4_CS_CALC) {
+ /* raweth_qp1_flags2 Bit 8 indicates ip_type. 0-v4 1 - v6 */
+ (raweth_qp1_flags2 &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_IP_TYPE) ?
+ (is_ipv6 = true) : (is_ipv4 = true);
+ return ((is_ipv6) ?
+ BNXT_RE_ROCEV2_IPV6_PACKET :
+ BNXT_RE_ROCEV2_IPV4_PACKET);
+ } else {
+ return BNXT_RE_ROCE_V1_PACKET;
+ }
+}
+
+static bool bnxt_re_is_loopback_packet(struct bnxt_re_dev *rdev,
+ void *rq_hdr_buf)
+{
+ u8 *tmp_buf = NULL;
+ struct ethhdr *eth_hdr;
+ u16 eth_type;
+ bool rc = false;
+
+ tmp_buf = (u8 *)rq_hdr_buf;
+ /*
+ * If dest mac is not same as I/F mac, this could be a
+ * loopback address or multicast address, check whether
+ * it is a loopback packet
+ */
+ if (!ether_addr_equal(tmp_buf, rdev->dev_addr)) {
+ tmp_buf += 4;
+ /* Check the ether type */
+ eth_hdr = (struct ethhdr *)tmp_buf;
+ eth_type = ntohs(eth_hdr->h_proto);
+ switch (eth_type) {
+ case BNXT_QPLIB_ETHTYPE_ROCEV1:
+ rc = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static bool bnxt_re_is_vlan_in_packet(struct bnxt_re_dev *rdev,
+ void *rq_hdr_buf,
+ struct bnxt_qplib_cqe *cqe)
+{
+ struct vlan_hdr *vlan_hdr;
+ struct ethhdr *eth_hdr;
+ u8 *tmp_buf = NULL;
+ u16 eth_type;
+
+ tmp_buf = (u8 *)rq_hdr_buf;
+ /* Check the ether type */
+ eth_hdr = (struct ethhdr *)tmp_buf;
+ eth_type = ntohs(eth_hdr->h_proto);
+ if (eth_type == ETH_P_8021Q) {
+ tmp_buf += sizeof(struct ethhdr);
+ vlan_hdr = (struct vlan_hdr *)tmp_buf;
+ cqe->raweth_qp1_metadata =
+ ntohs(vlan_hdr->h_vlan_TCI) |
+ (eth_type <<
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
+ cqe->raweth_qp1_flags2 |=
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN;
+ return true;
+ }
+
+ return false;
+}
+
+static int bnxt_re_process_raw_qp_packet_receive(struct bnxt_re_qp *gsi_qp,
+ struct bnxt_qplib_cqe *cqe)
+{
+ struct bnxt_re_sqp_entries *sqp_entry = NULL;
+ struct bnxt_qplib_hdrbuf *hdr_buf;
+ dma_addr_t shrq_hdr_buf_map;
+ struct ib_sge s_sge[2] = {};
+ struct ib_sge r_sge[2] = {};
+ struct ib_recv_wr rwr = {};
+ struct bnxt_re_ah *gsi_sah;
+ struct bnxt_re_qp *gsi_sqp;
+ dma_addr_t rq_hdr_buf_map;
+ struct bnxt_re_dev *rdev;
+ struct ib_send_wr *swr;
+ u32 skip_bytes = 0;
+ void *rq_hdr_buf;
+ int pkt_type = 0;
+ u32 offset = 0;
+ u32 tbl_idx;
+ int rc;
+ struct ib_ud_wr udwr = {};
+
+ swr = &udwr.wr;
+ rdev = gsi_qp->rdev;
+ gsi_sqp = rdev->gsi_ctx.gsi_sqp;
+ tbl_idx = cqe->wr_id;
+
+ hdr_buf = gsi_qp->qplib_qp.rq_hdr_buf;
+ rq_hdr_buf = (u8 *) hdr_buf->va + tbl_idx * hdr_buf->step;
+ rq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_qp->qplib_qp,
+ tbl_idx);
+ /* Shadow QP header buffer */
+ shrq_hdr_buf_map = bnxt_qplib_get_qp_buf_from_index(&gsi_sqp->qplib_qp,
+ tbl_idx);
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
+
+ /* Find packet type from the cqe */
+ pkt_type = bnxt_re_check_packet_type(cqe->raweth_qp1_flags,
+ cqe->raweth_qp1_flags2);
+ if (pkt_type < 0) {
+ dev_err(rdev_to_dev(rdev), "Not handling this packet\n");
+ return -EINVAL;
+ }
+
+ /* Adjust the offset for the user buffer and post in the rq */
+
+ if (pkt_type == BNXT_RE_ROCEV2_IPV4_PACKET)
+ offset = 20;
+
+ /*
+ * QP1 loopback packet has 4 bytes of internal header before
+ * ether header. Skip these four bytes.
+ */
+ if (bnxt_re_is_loopback_packet(rdev, rq_hdr_buf))
+ skip_bytes = 4;
+
+ if (bnxt_re_is_vlan_in_packet(rdev, rq_hdr_buf, cqe))
+ skip_bytes += VLAN_HLEN;
+
+ /* Store this cqe */
+ memcpy(&sqp_entry->cqe, cqe, sizeof(struct bnxt_qplib_cqe));
+ sqp_entry->qp1_qp = gsi_qp;
+
+ /* First send SGE . Skip the ether header*/
+ s_sge[0].addr = rq_hdr_buf_map + BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE
+ + skip_bytes;
+ s_sge[0].lkey = 0xFFFFFFFF;
+ s_sge[0].length = offset ? BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 :
+ BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6;
+
+ /* Second Send SGE */
+ s_sge[1].addr = s_sge[0].addr + s_sge[0].length +
+ BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE;
+ if (pkt_type != BNXT_RE_ROCE_V1_PACKET)
+ s_sge[1].addr += 8;
+ s_sge[1].lkey = 0xFFFFFFFF;
+ s_sge[1].length = 256;
+
+ /* First recv SGE */
+ r_sge[0].addr = shrq_hdr_buf_map;
+ r_sge[0].lkey = 0xFFFFFFFF;
+ r_sge[0].length = 40;
+
+ r_sge[1].addr = sqp_entry->sge.addr + offset;
+ r_sge[1].lkey = sqp_entry->sge.lkey;
+ r_sge[1].length = BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 + 256 - offset;
+
+ /* Create receive work request */
+ rwr.num_sge = 2;
+ rwr.sg_list = r_sge;
+ rwr.wr_id = tbl_idx;
+ rwr.next = NULL;
+
+ rc = bnxt_re_post_recv_shadow_qp(rdev, gsi_sqp, &rwr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to post Rx buffers to shadow QP\n");
+ return -ENOMEM;
+ }
+
+ swr->num_sge = 2;
+ swr->sg_list = s_sge;
+ swr->wr_id = tbl_idx;
+ swr->opcode = IB_WR_SEND;
+ swr->next = NULL;
+
+ gsi_sah = rdev->gsi_ctx.gsi_sah;
+ udwr.ah = &gsi_sah->ibah;
+ udwr.remote_qpn = gsi_sqp->qplib_qp.id;
+ udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
+ /* post data received in the send queue */
+ rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
+
+ return rc;
+}
+
+static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
+ struct bnxt_qplib_cqe *cqe)
+{
+ wc->opcode = IB_WC_RECV;
+ wc->status = __rawqp1_to_ib_wc_status(cqe->status);
+ wc->wc_flags |= IB_WC_GRH;
+}
+
+static void bnxt_re_process_res_rc_wc(struct ib_wc *wc,
+ struct bnxt_qplib_cqe *cqe)
+{
+ wc->opcode = IB_WC_RECV;
+ wc->status = __rc_to_ib_wc_status(cqe->status);
+
+ if (cqe->flags & CQ_RES_RC_FLAGS_IMM)
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ if (cqe->flags & CQ_RES_RC_FLAGS_INV)
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ if ((cqe->flags & (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM)) ==
+ (CQ_RES_RC_FLAGS_RDMA | CQ_RES_RC_FLAGS_IMM))
+ wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
+}
+
+/* Returns TRUE if pkt has valid VLAN and if VLAN id is non-zero */
+static bool bnxt_re_is_nonzero_vlanid_pkt(struct bnxt_qplib_cqe *orig_cqe,
+ u16 *vid, u8 *sl)
+{
+ u32 metadata;
+ u16 tpid;
+ bool ret = false;
+ metadata = orig_cqe->raweth_qp1_metadata;
+ if (orig_cqe->raweth_qp1_flags2 &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_FLAGS2_META_FORMAT_VLAN) {
+ tpid = ((metadata &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_MASK) >>
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_TPID_SFT);
+ if (tpid == ETH_P_8021Q) {
+ *vid = metadata &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_VID_MASK;
+ *sl = (metadata &
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_MASK) >>
+ CQ_RES_RAWETH_QP1_RAWETH_QP1_METADATA_PRI_SFT;
+ ret = !!(*vid);
+ }
+ }
+
+ return ret;
+}
+
+static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
+ struct ib_wc *wc,
+ struct bnxt_qplib_cqe *cqe)
+{
+ u32 tbl_idx;
+ struct bnxt_re_dev *rdev = gsi_sqp->rdev;
+ struct bnxt_re_qp *gsi_qp = NULL;
+ struct bnxt_qplib_cqe *orig_cqe = NULL;
+ struct bnxt_re_sqp_entries *sqp_entry = NULL;
+ int nw_type;
+ u16 vlan_id;
+ u8 sl;
+
+ tbl_idx = cqe->wr_id;
+
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
+ gsi_qp = sqp_entry->qp1_qp;
+ orig_cqe = &sqp_entry->cqe;
+
+ wc->wr_id = sqp_entry->wrid;
+ wc->byte_len = orig_cqe->length;
+ wc->qp = &gsi_qp->ib_qp;
+
+ wc->ex.imm_data = orig_cqe->immdata;
+ wc->src_qp = orig_cqe->src_qp;
+ memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
+ if (bnxt_re_is_nonzero_vlanid_pkt(orig_cqe, &vlan_id, &sl)) {
+ if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
+ wc->sl = sl;
+ wc->vlan_id = vlan_id;
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ }
+ }
+ wc->port_num = 1;
+ wc->vendor_err = orig_cqe->status;
+
+ wc->opcode = IB_WC_RECV;
+ wc->status = __rawqp1_to_ib_wc_status(orig_cqe->status);
+ wc->wc_flags |= IB_WC_GRH;
+
+ nw_type = bnxt_re_check_packet_type(orig_cqe->raweth_qp1_flags,
+ orig_cqe->raweth_qp1_flags2);
+ if(nw_type >= 0)
+ dev_dbg(rdev_to_dev(rdev), "%s nw_type = %d\n", __func__, nw_type);
+}
+
+static void bnxt_re_process_res_ud_wc(struct bnxt_re_dev *rdev,
+ struct bnxt_re_qp *qp, struct ib_wc *wc,
+ struct bnxt_qplib_cqe *cqe)
+{
+ u16 vlan_id = 0;
+
+ wc->opcode = IB_WC_RECV;
+ wc->status = __rc_to_ib_wc_status(cqe->status);
+ if (cqe->flags & CQ_RES_UD_FLAGS_IMM)
+ wc->wc_flags |= IB_WC_WITH_IMM;
+ if (cqe->flags & CQ_RES_RC_FLAGS_INV)
+ wc->wc_flags |= IB_WC_WITH_INVALIDATE;
+ /* report only on GSI QP for Thor */
+ if (rdev->gsi_ctx.gsi_qp->qplib_qp.id == qp->qplib_qp.id &&
+ rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_UD) {
+ wc->wc_flags |= IB_WC_GRH;
+ memcpy(wc->smac, cqe->smac, ETH_ALEN);
+ wc->wc_flags |= IB_WC_WITH_SMAC;
+ if (_is_cqe_v2_supported(rdev->dev_attr->dev_cap_flags)) {
+ if (cqe->flags & CQ_RES_UD_V2_FLAGS_META_FORMAT_MASK) {
+ if (cqe->cfa_meta &
+ BNXT_QPLIB_CQE_CFA_META1_VALID)
+ vlan_id = (cqe->cfa_meta & 0xFFF);
+ }
+ } else if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
+ vlan_id = (cqe->cfa_meta & 0xFFF);
+ }
+ /* Mark only if vlan_id is non zero */
+ if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
+ wc->vlan_id = vlan_id;
+ wc->wc_flags |= IB_WC_WITH_VLAN;
+ }
+ }
+}
+
+static int bnxt_re_legacy_send_phantom_wqe(struct bnxt_re_qp *qp)
+{
+ struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&qp->sq_lock, flags);
+
+ rc = bnxt_re_legacy_bind_fence_mw(lib_qp);
+ if (!rc) {
+ lib_qp->sq.phantom_wqe_cnt++;
+ dev_dbg(&lib_qp->sq.hwq.pdev->dev,
+ "qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
+ lib_qp->id, lib_qp->sq.hwq.prod,
+ HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
+ lib_qp->sq.phantom_wqe_cnt);
+ }
+
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+ return rc;
+}
+
+int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
+{
+ struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
+ struct bnxt_re_dev *rdev = cq->rdev;
+ struct bnxt_re_qp *qp;
+ struct bnxt_qplib_cqe *cqe;
+ int i, ncqe, budget, init_budget;
+ struct bnxt_qplib_q *sq;
+ struct bnxt_qplib_qp *lib_qp;
+ u32 tbl_idx;
+ struct bnxt_re_sqp_entries *sqp_entry = NULL;
+ unsigned long flags;
+ u8 gsi_mode;
+
+ /*
+ * DB recovery CQ; only process the door bell pacing alert from
+ * the user lib
+ */
+ if (cq->is_dbr_soft_cq) {
+ bnxt_re_pacing_alert(rdev);
+ return 0;
+ }
+
+ /* User CQ; the only processing we do is to
+ * complete any pending CQ resize operation.
+ */
+ if (cq->umem) {
+ if (cq->resize_umem)
+ bnxt_re_resize_cq_complete(cq);
+ return 0;
+ }
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+
+ budget = min_t(u32, num_entries, cq->max_cql);
+ init_budget = budget;
+ if (!cq->cql) {
+ dev_err(rdev_to_dev(rdev), "POLL CQ no CQL to use\n");
+ goto exit;
+ }
+ cqe = &cq->cql[0];
+ gsi_mode = rdev->gsi_ctx.gsi_qp_mode;
+ while (budget) {
+ lib_qp = NULL;
+ ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
+ if (lib_qp) {
+ sq = &lib_qp->sq;
+ if (sq->legacy_send_phantom == true) {
+ qp = container_of(lib_qp, struct bnxt_re_qp, qplib_qp);
+ if (bnxt_re_legacy_send_phantom_wqe(qp) == -ENOMEM)
+ dev_err(rdev_to_dev(rdev),
+ "Phantom failed! Scheduled to send again\n");
+ else
+ sq->legacy_send_phantom = false;
+ }
+ }
+ if (ncqe < budget)
+ ncqe += bnxt_qplib_process_flush_list(&cq->qplib_cq,
+ cqe + ncqe,
+ budget - ncqe);
+
+ if (!ncqe)
+ break;
+
+ for (i = 0; i < ncqe; i++, cqe++) {
+ /* Transcribe each qplib_wqe back to ib_wc */
+ memset(wc, 0, sizeof(*wc));
+
+ wc->wr_id = cqe->wr_id;
+ wc->byte_len = cqe->length;
+ qp = to_bnxt_re((struct bnxt_qplib_qp *)cqe->qp_handle,
+ struct bnxt_re_qp, qplib_qp);
+ if (!qp) {
+ dev_err(rdev_to_dev(rdev),
+ "POLL CQ bad QP handle\n");
+ continue;
+ }
+ wc->qp = &qp->ib_qp;
+ wc->ex.imm_data = cqe->immdata;
+ wc->src_qp = cqe->src_qp;
+ memcpy(wc->smac, cqe->smac, ETH_ALEN);
+ wc->port_num = 1;
+ wc->vendor_err = cqe->status;
+
+ switch(cqe->opcode) {
+ case CQ_BASE_CQE_TYPE_REQ:
+ if (gsi_mode == BNXT_RE_GSI_MODE_ALL &&
+ qp->qplib_qp.id ==
+ rdev->gsi_ctx.gsi_sqp->qplib_qp.id) {
+ /* Handle this completion with
+ * the stored completion */
+ dev_dbg(rdev_to_dev(rdev),
+ "Skipping this UD Send CQ\n");
+ memset(wc, 0, sizeof(*wc));
+ continue;
+ }
+ bnxt_re_process_req_wc(wc, cqe);
+ break;
+ case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
+ if (gsi_mode == BNXT_RE_GSI_MODE_ALL) {
+ if (!cqe->status) {
+ int rc = 0;
+ rc = bnxt_re_process_raw_qp_packet_receive(qp, cqe);
+ if (!rc) {
+ memset(wc, 0,
+ sizeof(*wc));
+ continue;
+ }
+ cqe->status = -1;
+ }
+ /* Errors need not be looped back.
+ * But change the wr_id to the one
+ * stored in the table
+ */
+ tbl_idx = cqe->wr_id;
+ sqp_entry = &rdev->gsi_ctx.sqp_tbl[tbl_idx];
+ wc->wr_id = sqp_entry->wrid;
+ }
+
+ bnxt_re_process_res_rawqp1_wc(wc, cqe);
+ break;
+ case CQ_BASE_CQE_TYPE_RES_RC:
+ bnxt_re_process_res_rc_wc(wc, cqe);
+ break;
+ case CQ_BASE_CQE_TYPE_RES_UD:
+ if (gsi_mode == BNXT_RE_GSI_MODE_ALL &&
+ qp->qplib_qp.id ==
+ rdev->gsi_ctx.gsi_sqp->qplib_qp.id) {
+ /* Handle this completion with
+ * the stored completion
+ */
+ dev_dbg(rdev_to_dev(rdev),
+ "Handling the UD receive CQ\n");
+ if (cqe->status) {
+ /* TODO handle this completion as a failure in
+ * loopback porocedure
+ */
+ continue;
+ } else {
+ bnxt_re_process_res_shadow_qp_wc(qp, wc, cqe);
+ break;
+ }
+ }
+ bnxt_re_process_res_ud_wc(rdev, qp, wc, cqe);
+ break;
+ default:
+ dev_err(rdev_to_dev(cq->rdev),
+ "POLL CQ type 0x%x not handled, skip!\n",
+ cqe->opcode);
+ continue;
+ }
+ wc++;
+ budget--;
+ }
+ }
+exit:
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+ return init_budget - budget;
+}
+
+int bnxt_re_req_notify_cq(struct ib_cq *ib_cq,
+ enum ib_cq_notify_flags ib_cqn_flags)
+{
+ struct bnxt_re_cq *cq = to_bnxt_re(ib_cq, struct bnxt_re_cq, ibcq);
+ int type = 0, rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->cq_lock, flags);
+ /* Trigger on the very next completion */
+ if (ib_cqn_flags & IB_CQ_NEXT_COMP)
+ type = DBC_DBC_TYPE_CQ_ARMALL;
+ /* Trigger on the next solicited completion */
+ else if (ib_cqn_flags & IB_CQ_SOLICITED)
+ type = DBC_DBC_TYPE_CQ_ARMSE;
+
+ bnxt_qplib_req_notify_cq(&cq->qplib_cq, type);
+
+ /* Poll to see if there are missed events */
+ if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
+ !(bnxt_qplib_is_cq_empty(&cq->qplib_cq)))
+ rc = 1;
+
+ spin_unlock_irqrestore(&cq->cq_lock, flags);
+
+ return rc;
+}
+
+/* Memory Regions */
+struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *ib_pd, int mr_access_flags)
+{
+ struct bnxt_qplib_mrinfo mrinfo;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_mr *mr;
+ struct bnxt_re_pd *pd;
+ u32 max_mr_count;
+ u64 pbl = 0;
+ int rc;
+
+ memset(&mrinfo, 0, sizeof(mrinfo));
+ pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
+ rdev = pd->rdev;
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ dev_err(rdev_to_dev(rdev),
+ "Allocate memory for DMA MR failed!\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ mr->rdev = rdev;
+ mr->qplib_mr.pd = &pd->qplib_pd;
+ mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
+ mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+
+ /* Allocate and register 0 as the address */
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Allocate DMA MR failed!\n");
+ goto fail;
+ }
+ mr->qplib_mr.total_size = -1; /* Infinite length */
+ mrinfo.ptes = &pbl;
+ mrinfo.sg.npages = 0;
+ mrinfo.sg.pgsize = PAGE_SIZE;
+ mrinfo.sg.pgshft = PAGE_SHIFT;
+ mrinfo.sg.pgsize = PAGE_SIZE;
+ mrinfo.mrw = &mr->qplib_mr;
+ mrinfo.is_dma = true;
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Register DMA MR failed!\n");
+ goto fail_mr;
+ }
+ mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ if (mr_access_flags & (IB_ACCESS_REMOTE_WRITE | IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_ATOMIC))
+ mr->ib_mr.rkey = mr->ib_mr.lkey;
+ atomic_inc(&rdev->stats.rsors.mr_count);
+ max_mr_count = atomic_read(&rdev->stats.rsors.mr_count);
+ if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
+ atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
+
+ return &mr->ib_mr;
+
+fail_mr:
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+fail:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
+{
+ struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr);
+ struct bnxt_re_dev *rdev = mr->rdev;
+ int rc = 0;
+
+ rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Dereg MR failed (%d): rc - %#x\n",
+ mr->qplib_mr.lkey, rc);
+
+ if (mr->pages) {
+ bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
+ &mr->qplib_frpl);
+ kfree(mr->pages);
+ mr->npages = 0;
+ mr->pages = NULL;
+ }
+ if (!IS_ERR(mr->ib_umem) && mr->ib_umem) {
+ mr->is_invalcb_active = false;
+ bnxt_re_peer_mem_release(mr->ib_umem);
+ }
+ kfree(mr);
+ atomic_dec(&rdev->stats.rsors.mr_count);
+ return 0;
+}
+
+static int bnxt_re_set_page(struct ib_mr *ib_mr, u64 addr)
+{
+ struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr);
+
+ if (unlikely(mr->npages == mr->qplib_frpl.max_pg_ptrs))
+ return -ENOMEM;
+
+ mr->pages[mr->npages++] = addr;
+ dev_dbg(NULL, "%s: ibdev %p Set MR pages[%d] = 0x%lx\n",
+ ROCE_DRV_MODULE_NAME, ib_mr->device, mr->npages - 1,
+ mr->pages[mr->npages - 1]);
+ return 0;
+}
+
+int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset)
+{
+ struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr);
+
+ mr->npages = 0;
+ return ib_sg_to_pages(ib_mr, sg, sg_nents,
+ sg_offset, bnxt_re_set_page);
+}
+
+struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
+ u32 max_num_sg, struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_re_mr *mr;
+ u32 max_mr_count;
+ int rc;
+
+ dev_dbg(rdev_to_dev(rdev), "Alloc MR\n");
+ if (type != IB_MR_TYPE_MEM_REG) {
+ dev_dbg(rdev_to_dev(rdev), "MR type 0x%x not supported\n", type);
+ return ERR_PTR(-EINVAL);
+ }
+ if (max_num_sg > MAX_PBL_LVL_1_PGS) {
+ dev_dbg(rdev_to_dev(rdev), "Max SG exceeded\n");
+ return ERR_PTR(-EINVAL);
+ }
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ dev_err(rdev_to_dev(rdev), "Allocate MR mem failed!\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ mr->rdev = rdev;
+ mr->qplib_mr.pd = &pd->qplib_pd;
+ mr->qplib_mr.flags = BNXT_QPLIB_FR_PMR;
+ mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
+
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Allocate MR failed!\n");
+ goto fail;
+ }
+ mr->ib_mr.lkey = mr->qplib_mr.lkey;
+ mr->ib_mr.rkey = mr->ib_mr.lkey;
+ mr->pages = kzalloc(sizeof(u64) * max_num_sg, GFP_KERNEL);
+ if (!mr->pages) {
+ dev_err(rdev_to_dev(rdev),
+ "Allocate MR page list mem failed!\n");
+ rc = -ENOMEM;
+ goto fail_mr;
+ }
+ rc = bnxt_qplib_alloc_fast_reg_page_list(&rdev->qplib_res,
+ &mr->qplib_frpl, max_num_sg);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Allocate HW Fast reg page list failed!\n");
+ goto free_page;
+ }
+ dev_dbg(rdev_to_dev(rdev), "Alloc MR pages = 0x%p\n", mr->pages);
+
+ atomic_inc(&rdev->stats.rsors.mr_count);
+ max_mr_count = atomic_read(&rdev->stats.rsors.mr_count);
+ if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
+ atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
+ return &mr->ib_mr;
+
+free_page:
+ kfree(mr->pages);
+fail_mr:
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+fail:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+/* Memory Windows */
+struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_re_mw *mw;
+ u32 max_mw_count;
+ int rc;
+
+ mw = kzalloc(sizeof(*mw), GFP_KERNEL);
+ if (!mw) {
+ dev_err(rdev_to_dev(rdev), "Allocate MW failed!\n");
+ rc = -ENOMEM;
+ goto exit;
+ }
+ mw->rdev = rdev;
+ mw->qplib_mw.pd = &pd->qplib_pd;
+
+ mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
+ CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
+ CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Allocate MW failed!\n");
+ goto fail;
+ }
+ mw->ib_mw.rkey = mw->qplib_mw.rkey;
+ atomic_inc(&rdev->stats.rsors.mw_count);
+ max_mw_count = atomic_read(&rdev->stats.rsors.mw_count);
+ if (max_mw_count > atomic_read(&rdev->stats.rsors.max_mw_count))
+ atomic_set(&rdev->stats.rsors.max_mw_count, max_mw_count);
+
+ return &mw->ib_mw;
+fail:
+ kfree(mw);
+exit:
+ return ERR_PTR(rc);
+}
+
+int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
+{
+ struct bnxt_re_mw *mw = to_bnxt_re(ib_mw, struct bnxt_re_mw, ib_mw);
+ struct bnxt_re_dev *rdev = mw->rdev;
+ int rc;
+
+ rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
+ return rc;
+ }
+
+ kfree(mw);
+ atomic_dec(&rdev->stats.rsors.mw_count);
+ return rc;
+}
+
+static int bnxt_re_page_size_ok(int page_shift)
+{
+ switch (page_shift) {
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_8K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_64K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_2M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256K:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_4M:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256MB:
+ case CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_1G:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static int bnxt_re_get_page_shift(struct ib_umem *umem,
+ u64 va, u64 st, u64 cmask)
+{
+ int pgshft;
+
+ pgshft = ilog2(umem->page_size);
+
+ return pgshft;
+}
+
+static int bnxt_re_get_num_pages(struct ib_umem *umem, u64 start, u64 length, int page_shift)
+{
+ int npages = 0;
+
+ if (page_shift == PAGE_SHIFT) {
+ npages = ib_umem_num_pages_compat(umem);
+ } else {
+ npages = ALIGN(length, BIT(page_shift)) / BIT(page_shift);
+ if (start % BIT(page_shift))
+ npages++;
+ }
+ return npages;
+}
+
+/* uverbs */
+struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata)
+{
+ struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
+ struct bnxt_re_dev *rdev = pd->rdev;
+ struct bnxt_qplib_mrinfo mrinfo;
+ int umem_pgs, page_shift, rc;
+ struct bnxt_re_mr *mr;
+ struct ib_umem *umem;
+ u32 max_mr_count;
+ int npages;
+
+ dev_dbg(rdev_to_dev(rdev), "Reg user MR\n");
+
+ if (bnxt_re_get_total_mr_mw_count(rdev) >= rdev->dev_attr->max_mr)
+ return ERR_PTR(-ENOMEM);
+
+ if (rdev->mod_exit) {
+ dev_dbg(rdev_to_dev(rdev), "%s(): in mod_exit, just return!\n", __func__);
+ return ERR_PTR(-EIO);
+ }
+ memset(&mrinfo, 0, sizeof(mrinfo));
+ if (length > BNXT_RE_MAX_MR_SIZE) {
+ dev_err(rdev_to_dev(rdev), "Requested MR Size: %lu "
+ "> Max supported: %ld\n", length, BNXT_RE_MAX_MR_SIZE);
+ return ERR_PTR(-ENOMEM);
+ }
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr) {
+ dev_err(rdev_to_dev(rdev), "Allocate MR failed!\n");
+ return ERR_PTR (-ENOMEM);
+ }
+ mr->rdev = rdev;
+ mr->qplib_mr.pd = &pd->qplib_pd;
+ mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
+ mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_MR;
+
+ if (!_is_alloc_mr_unified(rdev->qplib_res.dattr)) {
+ rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Alloc MR failed!\n");
+ goto fail;
+ }
+ /* The fixed portion of the rkey is the same as the lkey */
+ mr->ib_mr.rkey = mr->qplib_mr.rkey;
+ }
+
+ umem = ib_umem_get_flags_compat(rdev, ib_pd->uobject->context,
+ udata, start, length,
+ mr_access_flags, 0);
+ if (IS_ERR(umem)) {
+ rc = PTR_ERR(umem);
+ dev_err(rdev_to_dev(rdev), "%s: ib_umem_get failed! rc = %d\n",
+ __func__, rc);
+ goto free_mr;
+ }
+ mr->ib_umem = umem;
+
+ mr->qplib_mr.va = virt_addr;
+ umem_pgs = ib_umem_num_pages_compat(umem);
+ if (!umem_pgs) {
+ dev_err(rdev_to_dev(rdev), "umem is invalid!\n");
+ rc = -EINVAL;
+ goto free_umem;
+ }
+ mr->qplib_mr.total_size = length;
+ page_shift = bnxt_re_get_page_shift(umem, virt_addr, start,
+ rdev->dev_attr->page_size_cap);
+ if (!bnxt_re_page_size_ok(page_shift)) {
+ dev_err(rdev_to_dev(rdev), "umem page size unsupported!\n");
+ rc = -EFAULT;
+ goto free_umem;
+ }
+ npages = bnxt_re_get_num_pages(umem, start, length, page_shift);
+
+ /* Map umem buf ptrs to the PBL */
+ mrinfo.sg.npages = npages;
+ mrinfo.sg.sghead = get_ib_umem_sgl(umem, &mrinfo.sg.nmap);
+ mrinfo.sg.pgshft = page_shift;
+ mrinfo.sg.pgsize = BIT(page_shift);
+
+ mrinfo.mrw = &mr->qplib_mr;
+
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Reg user MR failed!\n");
+ goto free_umem;
+ }
+
+ mr->ib_mr.lkey = mr->ib_mr.rkey = mr->qplib_mr.lkey;
+ atomic_inc(&rdev->stats.rsors.mr_count);
+ max_mr_count = atomic_read(&rdev->stats.rsors.mr_count);
+ if (max_mr_count > atomic_read(&rdev->stats.rsors.max_mr_count))
+ atomic_set(&rdev->stats.rsors.max_mr_count, max_mr_count);
+
+ return &mr->ib_mr;
+
+free_umem:
+ bnxt_re_peer_mem_release(mr->ib_umem);
+free_mr:
+ if (!_is_alloc_mr_unified(rdev->qplib_res.dattr))
+ bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+fail:
+ kfree(mr);
+ return ERR_PTR(rc);
+}
+
+int
+bnxt_re_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_pd *ib_pd, struct ib_udata *udata)
+{
+ struct bnxt_re_mr *mr = to_bnxt_re(ib_mr, struct bnxt_re_mr, ib_mr);
+ struct bnxt_re_pd *pd = to_bnxt_re(ib_pd, struct bnxt_re_pd, ibpd);
+ int umem_pgs = 0, page_shift = PAGE_SHIFT, rc;
+ struct bnxt_re_dev *rdev = mr->rdev;
+ struct bnxt_qplib_mrinfo mrinfo;
+ struct ib_umem *umem;
+ u32 npages;
+
+ /* TODO: Must decipher what to modify based on the flags */
+ memset(&mrinfo, 0, sizeof(mrinfo));
+ if (flags & IB_MR_REREG_TRANS) {
+ umem = ib_umem_get_flags_compat(rdev, ib_pd->uobject->context,
+ udata, start, length,
+ mr_access_flags, 0);
+ if (IS_ERR(umem)) {
+ rc = PTR_ERR(umem);
+ dev_err(rdev_to_dev(rdev),
+ "%s: ib_umem_get failed! ret = %d\n",
+ __func__, rc);
+ goto fail;
+ }
+ mr->ib_umem = umem;
+
+ mr->qplib_mr.va = virt_addr;
+ umem_pgs = ib_umem_num_pages_compat(umem);
+ if (!umem_pgs) {
+ dev_err(rdev_to_dev(rdev), "umem is invalid!\n");
+ rc = -EINVAL;
+ goto fail_free_umem;
+ }
+ mr->qplib_mr.total_size = length;
+ page_shift = bnxt_re_get_page_shift(umem, virt_addr, start,
+ rdev->dev_attr->page_size_cap);
+ if (!bnxt_re_page_size_ok(page_shift)) {
+ dev_err(rdev_to_dev(rdev),
+ "umem page size unsupported!\n");
+ rc = -EFAULT;
+ goto fail_free_umem;
+ }
+ npages = bnxt_re_get_num_pages(umem, start, length, page_shift);
+ /* Map umem buf ptrs to the PBL */
+ mrinfo.sg.npages = npages;
+ mrinfo.sg.sghead = get_ib_umem_sgl(umem, &mrinfo.sg.nmap);
+ mrinfo.sg.pgshft = page_shift;
+ mrinfo.sg.pgsize = BIT(page_shift);
+ }
+
+ mrinfo.mrw = &mr->qplib_mr;
+ if (flags & IB_MR_REREG_PD)
+ mr->qplib_mr.pd = &pd->qplib_pd;
+
+ if (flags & IB_MR_REREG_ACCESS)
+ mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
+
+ rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mrinfo, false);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Rereg user MR failed!\n");
+ goto fail_free_umem;
+ }
+ mr->ib_mr.rkey = mr->qplib_mr.rkey;
+
+ return 0;
+
+fail_free_umem:
+ bnxt_re_peer_mem_release(mr->ib_umem);
+fail:
+ return rc;
+}
+
+static int bnxt_re_check_abi_version(struct bnxt_re_dev *rdev)
+{
+ struct ib_device *ibdev = &rdev->ibdev;
+ u32 uverbs_abi_ver;
+
+ uverbs_abi_ver = GET_UVERBS_ABI_VERSION(ibdev);
+ dev_dbg(rdev_to_dev(rdev), "ABI version requested %d\n",
+ uverbs_abi_ver);
+ if (uverbs_abi_ver != BNXT_RE_ABI_VERSION) {
+ dev_dbg(rdev_to_dev(rdev), " is different from the device %d \n",
+ BNXT_RE_ABI_VERSION);
+ return -EPERM;
+ }
+ return 0;
+}
+
+int bnxt_re_alloc_ucontext(struct ib_ucontext *uctx_in,
+ struct ib_udata *udata)
+{
+ struct ib_ucontext *ctx = uctx_in;
+ struct ib_device *ibdev = ctx->device;
+ struct bnxt_re_ucontext *uctx =
+ container_of(ctx, struct bnxt_re_ucontext, ibucontext);
+
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct bnxt_qplib_dev_attr *dev_attr = rdev->dev_attr;
+ struct bnxt_re_uctx_resp resp = {};
+ struct bnxt_re_uctx_req ureq = {};
+ struct bnxt_qplib_chip_ctx *cctx;
+ u32 chip_met_rev_num;
+ bool genp5 = false;
+ int rc;
+
+ cctx = rdev->chip_ctx;
+ rc = bnxt_re_check_abi_version(rdev);
+ if (rc)
+ goto fail;
+
+ uctx->rdev = rdev;
+ uctx->shpg = (void *)__get_free_page(GFP_KERNEL);
+ if (!uctx->shpg) {
+ dev_err(rdev_to_dev(rdev), "shared memory allocation failed!\n");
+ rc = -ENOMEM;
+ goto fail;
+ }
+ spin_lock_init(&uctx->sh_lock);
+ if (BNXT_RE_ABI_VERSION >= 4) {
+ chip_met_rev_num = cctx->chip_num;
+ chip_met_rev_num |= ((u32)cctx->chip_rev & 0xFF) <<
+ BNXT_RE_CHIP_ID0_CHIP_REV_SFT;
+ chip_met_rev_num |= ((u32)cctx->chip_metal & 0xFF) <<
+ BNXT_RE_CHIP_ID0_CHIP_MET_SFT;
+ resp.chip_id0 = chip_met_rev_num;
+ resp.chip_id1 = 0; /* future extension of chip info */
+ }
+
+ if (BNXT_RE_ABI_VERSION != 4) {
+ /*Temp, Use idr_alloc instead*/
+ resp.dev_id = rdev->en_dev->pdev->devfn;
+ resp.max_qp = rdev->qplib_res.hctx->qp_ctx.max;
+ }
+
+ genp5 = _is_chip_gen_p5_p7(cctx);
+ if (BNXT_RE_ABI_VERSION > 5) {
+ resp.modes = genp5 ? cctx->modes.wqe_mode : 0;
+ if (rdev->dev_attr && BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags))
+ resp.comp_mask = BNXT_RE_COMP_MASK_UCNTX_HW_RETX_ENABLED;
+ }
+
+ resp.pg_size = PAGE_SIZE;
+ resp.cqe_sz = sizeof(struct cq_base);
+ resp.max_cqd = dev_attr->max_cq_wqes;
+ if (genp5 && cctx->modes.db_push) {
+ resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_WC_DPI_ENABLED;
+ if (_is_chip_p7(cctx) &&
+ !(dev_attr->dev_cap_flags &
+ CREQ_QUERY_FUNC_RESP_SB_PINGPONG_PUSH_MODE))
+ resp.comp_mask &=
+ ~BNXT_RE_COMP_MASK_UCNTX_WC_DPI_ENABLED;
+ }
+
+ resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_MQP_EX_SUPPORTED;
+
+ if (rdev->dbr_pacing)
+ resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_DBR_PACING_ENABLED;
+
+ if (rdev->dbr_drop_recov && rdev->user_dbr_drop_recov)
+ resp.comp_mask |= BNXT_RE_COMP_MASK_UCNTX_DBR_RECOVERY_ENABLED;
+
+ if (udata->inlen >= sizeof(ureq)) {
+ rc = ib_copy_from_udata(&ureq, udata,
+ min(udata->inlen, sizeof(ureq)));
+ if (rc)
+ goto cfail;
+ if (bnxt_re_init_pow2_flag(&ureq, &resp))
+ dev_warn(rdev_to_dev(rdev),
+ "Enabled roundup logic. Library bug?\n");
+ if (bnxt_re_init_rsvd_wqe_flag(&ureq, &resp, genp5))
+ dev_warn(rdev_to_dev(rdev),
+ "Rsvd wqe in use! Try the updated library.\n");
+ } else {
+ dev_warn(rdev_to_dev(rdev),
+ "Enabled roundup logic. Update the library!\n");
+ resp.comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED;
+
+ dev_warn(rdev_to_dev(rdev),
+ "Rsvd wqe in use. Update the library!\n");
+ resp.comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
+ }
+
+ uctx->cmask = (uint64_t)resp.comp_mask;
+ rc = bnxt_re_copy_to_udata(rdev, &resp,
+ min(udata->outlen, sizeof(resp)),
+ udata);
+ if (rc)
+ goto cfail;
+
+ INIT_LIST_HEAD(&uctx->cq_list);
+ mutex_init(&uctx->cq_lock);
+
+ return 0;
+cfail:
+ free_page((u64)uctx->shpg);
+ uctx->shpg = NULL;
+fail:
+ return rc;
+}
+
+void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx)
+{
+ struct bnxt_re_ucontext *uctx = to_bnxt_re(ib_uctx,
+ struct bnxt_re_ucontext,
+ ibucontext);
+ struct bnxt_re_dev *rdev = uctx->rdev;
+ int rc = 0;
+
+ if (uctx->shpg)
+ free_page((u64)uctx->shpg);
+
+ if (uctx->dpi.dbr) {
+ /* Free DPI only if this is the first PD allocated by the
+ * application and mark the context dpi as NULL
+ */
+ if (_is_chip_gen_p5_p7(rdev->chip_ctx) && uctx->wcdpi.dbr) {
+ rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
+ &uctx->wcdpi);
+ if (rc)
+ dev_err(rdev_to_dev(rdev),
+ "dealloc push dp failed\n");
+ uctx->wcdpi.dbr = NULL;
+ }
+
+ rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
+ &uctx->dpi);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!\n");
+ /* Don't fail, continue*/
+ uctx->dpi.dbr = NULL;
+ }
+ return;
+}
+
+static struct bnxt_re_cq *is_bnxt_re_cq_page(struct bnxt_re_ucontext *uctx,
+ u64 pg_off)
+{
+ struct bnxt_re_cq *cq = NULL, *tmp_cq;
+
+ if (!_is_chip_p7(uctx->rdev->chip_ctx))
+ return NULL;
+
+ mutex_lock(&uctx->cq_lock);
+ list_for_each_entry(tmp_cq, &uctx->cq_list, cq_list) {
+ if (((u64)tmp_cq->uctx_cq_page >> PAGE_SHIFT) == pg_off) {
+ cq = tmp_cq;
+ break;
+ }
+ }
+ mutex_unlock(&uctx->cq_lock);
+ return cq;
+}
+
+/* Helper function to mmap the virtual memory from user app */
+int bnxt_re_mmap(struct ib_ucontext *ib_uctx, struct vm_area_struct *vma)
+{
+ struct bnxt_re_ucontext *uctx = to_bnxt_re(ib_uctx,
+ struct bnxt_re_ucontext,
+ ibucontext);
+ struct bnxt_re_dev *rdev = uctx->rdev;
+ struct bnxt_re_cq *cq = NULL;
+ int rc = 0;
+ u64 pfn;
+
+ switch (vma->vm_pgoff) {
+ case BNXT_RE_MAP_SH_PAGE:
+ pfn = vtophys(uctx->shpg) >> PAGE_SHIFT;
+ return rdma_user_mmap_io(&uctx->ibucontext, vma, pfn, PAGE_SIZE, vma->vm_page_prot, NULL);
+ dev_dbg(rdev_to_dev(rdev), "%s:%d uctx->shpg 0x%lx, vtophys(uctx->shpg) 0x%lx, pfn = 0x%lx \n",
+ __func__, __LINE__, (u64) uctx->shpg, vtophys(uctx->shpg), pfn);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Shared page mapping failed!\n");
+ rc = -EAGAIN;
+ }
+ return rc;
+ case BNXT_RE_MAP_WC:
+ vma->vm_page_prot =
+ pgprot_writecombine(vma->vm_page_prot);
+ pfn = (uctx->wcdpi.umdbr >> PAGE_SHIFT);
+ if (!pfn)
+ return -EFAULT;
+ break;
+ case BNXT_RE_DBR_PAGE:
+ /* Driver doesn't expect write access request */
+ if (vma->vm_flags & VM_WRITE)
+ return -EFAULT;
+
+ pfn = vtophys(rdev->dbr_page) >> PAGE_SHIFT;
+ if (!pfn)
+ return -EFAULT;
+ break;
+ case BNXT_RE_MAP_DB_RECOVERY_PAGE:
+ pfn = vtophys(uctx->dbr_recov_cq_page) >> PAGE_SHIFT;
+ if (!pfn)
+ return -EFAULT;
+ break;
+ default:
+ cq = is_bnxt_re_cq_page(uctx, vma->vm_pgoff);
+ if (cq) {
+ pfn = vtophys((void *)cq->uctx_cq_page) >> PAGE_SHIFT;
+ rc = rdma_user_mmap_io(&uctx->ibucontext, vma, pfn, PAGE_SIZE, vma->vm_page_prot, NULL);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "CQ page mapping failed!\n");
+ rc = -EAGAIN;
+ }
+ goto out;
+ } else {
+ vma->vm_page_prot =
+ pgprot_noncached(vma->vm_page_prot);
+ pfn = vma->vm_pgoff;
+ }
+ break;
+ }
+
+ rc = rdma_user_mmap_io(&uctx->ibucontext, vma, pfn, PAGE_SIZE, vma->vm_page_prot, NULL);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "DPI mapping failed!\n");
+ return -EAGAIN;
+ }
+ rc = __bnxt_re_set_vma_data(uctx, vma);
+out:
+ return rc;
+}
+
+int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ const struct ib_mad_hdr *in_mad, size_t in_mad_size,
+ struct ib_mad_hdr *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index)
+{
+ return IB_MAD_RESULT_SUCCESS;
+}
+
+void bnxt_re_disassociate_ucntx(struct ib_ucontext *ib_uctx)
+{
+}
diff --git a/sys/dev/bnxt/bnxt_re/ib_verbs.h b/sys/dev/bnxt/bnxt_re/ib_verbs.h
new file mode 100644
index 000000000000..cb9f7974e92d
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/ib_verbs.h
@@ -0,0 +1,632 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: IB Verbs interpreter (header)
+ */
+
+#ifndef __BNXT_RE_IB_VERBS_H__
+#define __BNXT_RE_IB_VERBS_H__
+
+#include <rdma/ib_addr.h>
+#include "bnxt_re-abi.h"
+#include "qplib_res.h"
+#include "qplib_fp.h"
+
+struct bnxt_re_dev;
+
+#define BNXT_RE_ROCE_V2_UDP_SPORT 0x8CD1
+#define BNXT_RE_QP_RANDOM_QKEY 0x81818181
+
+#ifndef IB_MTU_8192
+#define IB_MTU_8192 8192
+#endif
+
+#ifndef SPEED_1000
+#define SPEED_1000 1000
+#endif
+
+#ifndef SPEED_10000
+#define SPEED_10000 10000
+#endif
+
+#ifndef SPEED_20000
+#define SPEED_20000 20000
+#endif
+
+#ifndef SPEED_25000
+#define SPEED_25000 25000
+#endif
+
+#ifndef SPEED_40000
+#define SPEED_40000 40000
+#endif
+
+#ifndef SPEED_50000
+#define SPEED_50000 50000
+#endif
+
+#ifndef SPEED_100000
+#define SPEED_100000 100000
+#endif
+
+#ifndef SPEED_200000
+#define SPEED_200000 200000
+#endif
+
+#ifndef IB_SPEED_HDR
+#define IB_SPEED_HDR 64
+#endif
+
+#define RDMA_NETWORK_IPV4 1
+#define RDMA_NETWORK_IPV6 2
+
+#define ROCE_DMAC(x) (x)->dmac
+
+#define dma_rmb() rmb()
+
+#define compat_ib_alloc_device(size) ib_alloc_device(size);
+
+#define rdev_from_cq_in(cq_in) to_bnxt_re_dev(cq_in->device, ibdev)
+
+#define GET_UVERBS_ABI_VERSION(ibdev) (ibdev->uverbs_abi_ver)
+
+#define CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_PG_256MB 0x1cUL
+
+#define IB_POLL_UNBOUND_WORKQUEUE IB_POLL_WORKQUEUE
+
+#define BNXT_RE_LEGACY_FENCE_BYTES 64
+#define BNXT_RE_LEGACY_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_LEGACY_FENCE_BYTES, PAGE_SIZE)
+
+
+static inline struct
+bnxt_re_cq *__get_cq_from_cq_in(struct ib_cq *cq_in,
+ struct bnxt_re_dev *rdev);
+static inline struct
+bnxt_re_qp *__get_qp_from_qp_in(struct ib_pd *qp_in,
+ struct bnxt_re_dev *rdev);
+
+static inline bool
+bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev, u16 vlan_id);
+
+#define bnxt_re_compat_qfwstr(void) \
+ bnxt_re_query_fw_str(struct ib_device *ibdev, \
+ char *str, size_t str_len)
+
+static inline
+struct scatterlist *get_ib_umem_sgl(struct ib_umem *umem, u32 *nmap);
+
+struct bnxt_re_gid_ctx {
+ u32 idx;
+ u32 refcnt;
+};
+
+struct bnxt_re_legacy_fence_data {
+ u32 size;
+ void *va;
+ dma_addr_t dma_addr;
+ struct bnxt_re_mr *mr;
+ struct ib_mw *mw;
+ struct bnxt_qplib_swqe bind_wqe;
+ u32 bind_rkey;
+};
+
+struct bnxt_re_pd {
+ struct ib_pd ibpd;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_pd qplib_pd;
+ struct bnxt_re_legacy_fence_data fence;
+};
+
+struct bnxt_re_ah {
+ struct ib_ah ibah;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_ah qplib_ah;
+};
+
+struct bnxt_re_srq {
+ struct ib_srq ibsrq;
+ struct bnxt_re_dev *rdev;
+ u32 srq_limit;
+ struct bnxt_qplib_srq qplib_srq;
+ struct ib_umem *umem;
+ spinlock_t lock;
+};
+
+union ip_addr {
+ u32 ipv4_addr;
+ u8 ipv6_addr[16];
+};
+
+struct bnxt_re_qp_info_entry {
+ union ib_gid sgid;
+ union ib_gid dgid;
+ union ip_addr s_ip;
+ union ip_addr d_ip;
+ u16 s_port;
+#define BNXT_RE_QP_DEST_PORT 4791
+ u16 d_port;
+};
+
+struct bnxt_re_qp {
+ struct ib_qp ib_qp;
+ struct list_head list;
+ struct bnxt_re_dev *rdev;
+ spinlock_t sq_lock;
+ spinlock_t rq_lock;
+ struct bnxt_qplib_qp qplib_qp;
+ struct ib_umem *sumem;
+ struct ib_umem *rumem;
+ /* QP1 */
+ u32 send_psn;
+ struct ib_ud_header qp1_hdr;
+ struct bnxt_re_cq *scq;
+ struct bnxt_re_cq *rcq;
+ struct dentry *qp_info_pdev_dentry;
+ struct bnxt_re_qp_info_entry qp_info_entry;
+ void *qp_data;
+};
+
+struct bnxt_re_cq {
+ struct ib_cq ibcq;
+ struct list_head cq_list;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_ucontext *uctx;
+ spinlock_t cq_lock;
+ u16 cq_count;
+ u16 cq_period;
+ struct bnxt_qplib_cq qplib_cq;
+ struct bnxt_qplib_cqe *cql;
+#define MAX_CQL_PER_POLL 1024
+ u32 max_cql;
+ struct ib_umem *umem;
+ struct ib_umem *resize_umem;
+ struct ib_ucontext *context;
+ int resize_cqe;
+ /* list of cq per uctx. Used only for Thor-2 */
+ void *uctx_cq_page;
+ void *dbr_recov_cq_page;
+ bool is_dbr_soft_cq;
+};
+
+struct bnxt_re_mr {
+ struct bnxt_re_dev *rdev;
+ struct ib_mr ib_mr;
+ struct ib_umem *ib_umem;
+ struct bnxt_qplib_mrw qplib_mr;
+ u32 npages;
+ u64 *pages;
+ struct bnxt_qplib_frpl qplib_frpl;
+ bool is_invalcb_active;
+};
+
+struct bnxt_re_frpl {
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_frpl qplib_frpl;
+ u64 *page_list;
+};
+
+struct bnxt_re_mw {
+ struct bnxt_re_dev *rdev;
+ struct ib_mw ib_mw;
+ struct bnxt_qplib_mrw qplib_mw;
+};
+
+struct bnxt_re_ucontext {
+ struct ib_ucontext ibucontext;
+ struct bnxt_re_dev *rdev;
+ struct list_head cq_list;
+ struct bnxt_qplib_dpi dpi;
+ struct bnxt_qplib_dpi wcdpi;
+ void *shpg;
+ spinlock_t sh_lock;
+ uint64_t cmask;
+ struct mutex cq_lock; /* Protect cq list */
+ void *dbr_recov_cq_page;
+ struct bnxt_re_cq *dbr_recov_cq;
+};
+
+struct bnxt_re_ah_info {
+ union ib_gid sgid;
+ struct ib_gid_attr sgid_attr;
+ u16 vlan_tag;
+ u8 nw_type;
+};
+
+struct ifnet *bnxt_re_get_netdev(struct ib_device *ibdev,
+ u8 port_num);
+
+int bnxt_re_query_device(struct ib_device *ibdev,
+ struct ib_device_attr *ib_attr,
+ struct ib_udata *udata);
+int bnxt_re_modify_device(struct ib_device *ibdev,
+ int device_modify_mask,
+ struct ib_device_modify *device_modify);
+int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_attr *port_attr);
+int bnxt_re_modify_port(struct ib_device *ibdev, u8 port_num,
+ int port_modify_mask,
+ struct ib_port_modify *port_modify);
+int bnxt_re_get_port_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable);
+void bnxt_re_compat_qfwstr(void);
+int bnxt_re_query_pkey(struct ib_device *ibdev, u8 port_num,
+ u16 index, u16 *pkey);
+int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
+ unsigned int index, void **context);
+int bnxt_re_add_gid(struct ib_device *ibdev, u8 port_num,
+ unsigned int index, const union ib_gid *gid,
+ const struct ib_gid_attr *attr, void **context);
+int bnxt_re_query_gid(struct ib_device *ibdev, u8 port_num,
+ int index, union ib_gid *gid);
+enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
+ u8 port_num);
+int bnxt_re_alloc_pd(struct ib_pd *pd_in, struct ib_udata *udata);
+void bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata);
+
+int bnxt_re_create_ah(struct ib_ah *ah_in, struct ib_ah_attr *attr,
+ u32 flags, struct ib_udata *udata);
+
+int bnxt_re_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
+int bnxt_re_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
+
+void bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags);
+int bnxt_re_create_srq(struct ib_srq *srq_in,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_udata *udata);
+int bnxt_re_modify_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr,
+ enum ib_srq_attr_mask srq_attr_mask,
+ struct ib_udata *udata);
+int bnxt_re_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
+void bnxt_re_destroy_srq(struct ib_srq *ib_srq,
+ struct ib_udata *udata);
+int bnxt_re_post_srq_recv(struct ib_srq *ib_srq, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+struct ib_qp *bnxt_re_create_qp(struct ib_pd *qp_in,
+ struct ib_qp_init_attr *qp_init_attr,
+ struct ib_udata *udata);
+int bnxt_re_modify_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_udata *udata);
+int bnxt_re_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
+ int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
+int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata);
+int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr);
+int bnxt_re_post_recv(struct ib_qp *ib_qp, const struct ib_recv_wr *wr,
+ const struct ib_recv_wr **bad_wr);
+int bnxt_re_create_cq(struct ib_cq *cq_in,
+ const struct ib_cq_init_attr *attr,
+ struct ib_udata *udata);
+void bnxt_re_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata);
+int bnxt_re_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
+int bnxt_re_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata);
+int bnxt_re_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
+int bnxt_re_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
+struct ib_mr *bnxt_re_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
+int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg,
+ int sg_nents, unsigned int *sg_offset);
+struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
+ u32 max_num_sg, struct ib_udata *udata);
+int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata);
+struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
+ struct ib_udata *udata);
+int bnxt_re_dealloc_mw(struct ib_mw *mw);
+struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags,
+ struct ib_udata *udata);
+int
+bnxt_re_rereg_user_mr(struct ib_mr *mr, int flags, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
+ struct ib_udata *udata);
+int bnxt_re_alloc_ucontext(struct ib_ucontext *uctx_in,
+ struct ib_udata *udata);
+void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx);
+int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
+int bnxt_re_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
+ const struct ib_wc *wc, const struct ib_grh *grh,
+ const struct ib_mad_hdr *in_mad, size_t in_mad_size,
+ struct ib_mad_hdr *out_mad, size_t *out_mad_size,
+ u16 *out_mad_pkey_index);
+unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp);
+void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp, unsigned long flags);
+void bnxt_re_disassociate_ucntx(struct ib_ucontext *ibcontext);
+static inline int __bnxt_re_set_vma_data(void *bnxt_re_uctx,
+ struct vm_area_struct *vma);
+void bnxt_re_update_shadow_ah(struct bnxt_re_dev *rdev);
+void bnxt_re_handle_cqn(struct bnxt_qplib_cq *cq);
+static inline int
+bnxt_re_get_cached_gid(struct ib_device *dev, u8 port_num, int index,
+ union ib_gid *sgid, struct ib_gid_attr **sgid_attr,
+ struct ib_global_route *grh, struct ib_ah *ah);
+static inline enum rdma_network_type
+bnxt_re_gid_to_network_type(struct ib_gid_attr *sgid_attr,
+ union ib_gid *sgid);
+static inline
+struct ib_umem *ib_umem_get_compat(struct bnxt_re_dev *rdev,
+ struct ib_ucontext *ucontext,
+ struct ib_udata *udata,
+ unsigned long addr,
+ size_t size, int access, int dmasync);
+static inline
+struct ib_umem *ib_umem_get_flags_compat(struct bnxt_re_dev *rdev,
+ struct ib_ucontext *ucontext,
+ struct ib_udata *udata,
+ unsigned long addr,
+ size_t size, int access, int dmasync);
+static inline size_t ib_umem_num_pages_compat(struct ib_umem *umem);
+static inline void bnxt_re_peer_mem_release(struct ib_umem *umem);
+void bnxt_re_resolve_dmac_task(struct work_struct *work);
+
+static inline enum ib_qp_type __from_hw_to_ib_qp_type(u8 type)
+{
+ switch (type) {
+ case CMDQ_CREATE_QP1_TYPE_GSI:
+ case CMDQ_CREATE_QP_TYPE_GSI:
+ return IB_QPT_GSI;
+ case CMDQ_CREATE_QP_TYPE_RC:
+ return IB_QPT_RC;
+ case CMDQ_CREATE_QP_TYPE_UD:
+ return IB_QPT_UD;
+ case CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE:
+ return IB_QPT_RAW_ETHERTYPE;
+ default:
+ return IB_QPT_MAX;
+ }
+}
+
+static inline u8 __from_ib_qp_state(enum ib_qp_state state)
+{
+ switch (state) {
+ case IB_QPS_RESET:
+ return CMDQ_MODIFY_QP_NEW_STATE_RESET;
+ case IB_QPS_INIT:
+ return CMDQ_MODIFY_QP_NEW_STATE_INIT;
+ case IB_QPS_RTR:
+ return CMDQ_MODIFY_QP_NEW_STATE_RTR;
+ case IB_QPS_RTS:
+ return CMDQ_MODIFY_QP_NEW_STATE_RTS;
+ case IB_QPS_SQD:
+ return CMDQ_MODIFY_QP_NEW_STATE_SQD;
+ case IB_QPS_SQE:
+ return CMDQ_MODIFY_QP_NEW_STATE_SQE;
+ case IB_QPS_ERR:
+ default:
+ return CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ }
+}
+
+static inline u32 __from_ib_mtu(enum ib_mtu mtu)
+{
+ switch (mtu) {
+ case IB_MTU_256:
+ return CMDQ_MODIFY_QP_PATH_MTU_MTU_256;
+ case IB_MTU_512:
+ return CMDQ_MODIFY_QP_PATH_MTU_MTU_512;
+ case IB_MTU_1024:
+ return CMDQ_MODIFY_QP_PATH_MTU_MTU_1024;
+ case IB_MTU_2048:
+ return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
+ case IB_MTU_4096:
+ return CMDQ_MODIFY_QP_PATH_MTU_MTU_4096;
+ default:
+ return CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
+ }
+}
+
+static inline enum ib_mtu __to_ib_mtu(u32 mtu)
+{
+ switch (mtu & CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK) {
+ case CMDQ_MODIFY_QP_PATH_MTU_MTU_256:
+ return IB_MTU_256;
+ case CMDQ_MODIFY_QP_PATH_MTU_MTU_512:
+ return IB_MTU_512;
+ case CMDQ_MODIFY_QP_PATH_MTU_MTU_1024:
+ return IB_MTU_1024;
+ case CMDQ_MODIFY_QP_PATH_MTU_MTU_2048:
+ return IB_MTU_2048;
+ case CMDQ_MODIFY_QP_PATH_MTU_MTU_4096:
+ return IB_MTU_4096;
+ case CMDQ_MODIFY_QP_PATH_MTU_MTU_8192:
+ return IB_MTU_8192;
+ default:
+ return IB_MTU_2048;
+ }
+}
+
+static inline enum ib_qp_state __to_ib_qp_state(u8 state)
+{
+ switch (state) {
+ case CMDQ_MODIFY_QP_NEW_STATE_RESET:
+ return IB_QPS_RESET;
+ case CMDQ_MODIFY_QP_NEW_STATE_INIT:
+ return IB_QPS_INIT;
+ case CMDQ_MODIFY_QP_NEW_STATE_RTR:
+ return IB_QPS_RTR;
+ case CMDQ_MODIFY_QP_NEW_STATE_RTS:
+ return IB_QPS_RTS;
+ case CMDQ_MODIFY_QP_NEW_STATE_SQD:
+ return IB_QPS_SQD;
+ case CMDQ_MODIFY_QP_NEW_STATE_SQE:
+ return IB_QPS_SQE;
+ case CMDQ_MODIFY_QP_NEW_STATE_ERR:
+ default:
+ return IB_QPS_ERR;
+ }
+}
+
+static inline int bnxt_re_init_pow2_flag(struct bnxt_re_uctx_req *req,
+ struct bnxt_re_uctx_resp *resp)
+{
+ resp->comp_mask |= BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED;
+ if (!(req->comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_POW2_SUPPORT)) {
+ resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline u32 bnxt_re_init_depth(u32 ent, struct bnxt_re_ucontext *uctx)
+{
+ return uctx ? (uctx->cmask & BNXT_RE_COMP_MASK_UCNTX_POW2_DISABLED) ?
+ ent : roundup_pow_of_two(ent) : ent;
+}
+
+static inline int bnxt_re_init_rsvd_wqe_flag(struct bnxt_re_uctx_req *req,
+ struct bnxt_re_uctx_resp *resp,
+ bool genp5)
+{
+ resp->comp_mask |= BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
+ if (!(req->comp_mask & BNXT_RE_COMP_MASK_REQ_UCNTX_RSVD_WQE)) {
+ resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
+ return -EINVAL;
+ } else if (!genp5) {
+ resp->comp_mask &= ~BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED;
+ }
+ return 0;
+}
+
+static inline u32 bnxt_re_get_diff(struct bnxt_re_ucontext *uctx,
+ struct bnxt_qplib_chip_ctx *cctx)
+{
+ if (!uctx) {
+ /* return res-wqe only for gen p4 for user resource */
+ return _is_chip_gen_p5_p7(cctx) ? 0 : BNXT_QPLIB_RESERVED_QP_WRS;
+ } else if (uctx->cmask & BNXT_RE_COMP_MASK_UCNTX_RSVD_WQE_DISABLED) {
+ return 0;
+ }
+ /* old lib */
+ return BNXT_QPLIB_RESERVED_QP_WRS;
+}
+
+static inline void bnxt_re_init_qpmtu(struct bnxt_re_qp *qp, int mtu,
+ int mask, struct ib_qp_attr *qp_attr,
+ bool *is_qpmtu_high)
+{
+ int qpmtu, qpmtu_int;
+ int ifmtu, ifmtu_int;
+
+ ifmtu = iboe_get_mtu(mtu);
+ ifmtu_int = ib_mtu_enum_to_int(ifmtu);
+ qpmtu = ifmtu;
+ qpmtu_int = ifmtu_int;
+ if (mask & IB_QP_PATH_MTU) {
+ qpmtu = qp_attr->path_mtu;
+ qpmtu_int = ib_mtu_enum_to_int(qpmtu);
+ if (qpmtu_int > ifmtu_int) {
+ /* Trim the QP path mtu to interface mtu and update
+ * the new mtu to user qp for retransmission psn
+ * calculations.
+ */
+ qpmtu = ifmtu;
+ qpmtu_int = ifmtu_int;
+ *is_qpmtu_high = true;
+ }
+ }
+ qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
+ qp->qplib_qp.mtu = qpmtu_int;
+ qp->qplib_qp.modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
+}
+
+inline unsigned long compare_ether_header(void *a, void *b)
+{
+ u32 *a32 = (u32 *)((u8 *)a + 2);
+ u32 *b32 = (u32 *)((u8 *)b + 2);
+
+ return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) |
+ (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]);
+}
+
+struct vlan_hdr {
+ __be16 h_vlan_TCI;
+ __be16 h_vlan_encapsulated_proto;
+};
+
+inline uint16_t
+crc16(uint16_t crc, const void *buffer, unsigned int len)
+{
+ const unsigned char *cp = buffer;
+ /* CRC table for the CRC-16. The poly is 0x8005 (x16 + x15 + x2 + 1). */
+ static uint16_t const crc16_table[256] = {
+ 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241,
+ 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440,
+ 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40,
+ 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841,
+ 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40,
+ 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41,
+ 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641,
+ 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040,
+ 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240,
+ 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441,
+ 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41,
+ 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840,
+ 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41,
+ 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40,
+ 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640,
+ 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041,
+ 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240,
+ 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441,
+ 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41,
+ 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840,
+ 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41,
+ 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40,
+ 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640,
+ 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041,
+ 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241,
+ 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440,
+ 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40,
+ 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841,
+ 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40,
+ 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41,
+ 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641,
+ 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040
+ };
+
+ while (len--)
+ crc = (((crc >> 8) & 0xffU) ^
+ crc16_table[(crc ^ *cp++) & 0xffU]) & 0x0000ffffU;
+ return crc;
+}
+
+static inline int __bnxt_re_set_vma_data(void *bnxt_re_uctx,
+ struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+static inline bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
+ u16 vlan_id)
+{
+ bool ret = true;
+ /*
+ * Check if the vlan is configured in the host.
+ * If not configured, it can be a transparent
+ * VLAN. So dont report the vlan id.
+ */
+ return ret;
+}
+
+#endif
diff --git a/sys/dev/bnxt/bnxt_re/main.c b/sys/dev/bnxt/bnxt_re/main.c
new file mode 100644
index 000000000000..3d26d21f3fc7
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/main.c
@@ -0,0 +1,4467 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Main component of the bnxt_re driver
+ */
+
+#include <linux/if_ether.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/fs.h>
+#include <rdma/ib_user_verbs.h>
+#include <rdma/ib_addr.h>
+#include <rdma/ib_cache.h>
+#include <dev/mlx5/port.h>
+#include <dev/mlx5/vport.h>
+#include <linux/list.h>
+#include <rdma/ib_smi.h>
+#include <rdma/ib_umem.h>
+#include <linux/in.h>
+#include <linux/etherdevice.h>
+
+#include "bnxt_re.h"
+#include "ib_verbs.h"
+#include "bnxt_re-abi.h"
+#include "bnxt.h"
+
+static char drv_version[] =
+ "Broadcom NetXtreme-C/E RoCE Driver " ROCE_DRV_MODULE_NAME \
+ " v" ROCE_DRV_MODULE_VERSION " (" ROCE_DRV_MODULE_RELDATE ")\n";
+
+#define BNXT_RE_DESC "Broadcom NetXtreme RoCE"
+#define BNXT_ADEV_NAME "if_bnxt"
+
+MODULE_DESCRIPTION("Broadcom NetXtreme-C/E RoCE Driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DEPEND(bnxt_re, linuxkpi, 1, 1, 1);
+MODULE_DEPEND(bnxt_re, ibcore, 1, 1, 1);
+MODULE_DEPEND(bnxt_re, if_bnxt, 1, 1, 1);
+MODULE_VERSION(bnxt_re, 1);
+
+
+DEFINE_MUTEX(bnxt_re_mutex); /* mutex lock for driver */
+
+static unsigned int restrict_mrs = 0;
+module_param(restrict_mrs, uint, 0);
+MODULE_PARM_DESC(restrict_mrs, " Restrict the no. of MRs 0 = 256K , 1 = 64K");
+
+unsigned int restrict_stats = 0;
+module_param(restrict_stats, uint, 0);
+MODULE_PARM_DESC(restrict_stats, "Restrict stats query frequency to ethtool coalesce value. Disabled by default");
+
+unsigned int enable_fc = 1;
+module_param(enable_fc, uint, 0);
+MODULE_PARM_DESC(enable_fc, "Enable default PFC, CC,ETS during driver load. 1 - fc enable, 0 - fc disable - Default is 1");
+
+unsigned int min_tx_depth = 1;
+module_param(min_tx_depth, uint, 0);
+MODULE_PARM_DESC(min_tx_depth, "Minimum TX depth - Default is 1");
+
+static uint8_t max_msix_vec[BNXT_RE_MAX_DEVICES] = {0};
+static unsigned int max_msix_vec_argc;
+module_param_array(max_msix_vec, byte, &max_msix_vec_argc, 0444);
+MODULE_PARM_DESC(max_msix_vec, "Max MSI-x vectors per PF (2 - 64) - Default is 64");
+
+unsigned int cmdq_shadow_qd = RCFW_CMD_NON_BLOCKING_SHADOW_QD;
+module_param_named(cmdq_shadow_qd, cmdq_shadow_qd, uint, 0644);
+MODULE_PARM_DESC(cmdq_shadow_qd, "Perf Stat Debug: Shadow QD Range (1-64) - Default is 64");
+
+
+/* globals */
+struct list_head bnxt_re_dev_list = LINUX_LIST_HEAD_INIT(bnxt_re_dev_list);
+static int bnxt_re_probe_count;
+
+DEFINE_MUTEX(bnxt_re_dev_lock);
+static u32 gmod_exit;
+static u32 gadd_dev_inprogress;
+
+static void bnxt_re_task(struct work_struct *work_task);
+static struct workqueue_struct *bnxt_re_wq;
+static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev);
+static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
+ u32 *offset);
+static int bnxt_re_ib_init(struct bnxt_re_dev *rdev);
+static void bnxt_re_ib_init_2(struct bnxt_re_dev *rdev);
+void _bnxt_re_remove(struct auxiliary_device *adev);
+void writel_fbsd(struct bnxt_softc *bp, u32, u8, u32);
+u32 readl_fbsd(struct bnxt_softc *bp, u32, u8);
+static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev);
+
+int bnxt_re_register_netdevice_notifier(struct notifier_block *nb)
+{
+ int rc;
+ rc = register_netdevice_notifier(nb);
+ return rc;
+}
+
+int bnxt_re_unregister_netdevice_notifier(struct notifier_block *nb)
+{
+ int rc;
+ rc = unregister_netdevice_notifier(nb);
+ return rc;
+}
+
+void bnxt_re_set_dma_device(struct ib_device *ibdev, struct bnxt_re_dev *rdev)
+{
+ ibdev->dma_device = &rdev->en_dev->pdev->dev;
+}
+
+void bnxt_re_init_resolve_wq(struct bnxt_re_dev *rdev)
+{
+ rdev->resolve_wq = create_singlethread_workqueue("bnxt_re_resolve_wq");
+ INIT_LIST_HEAD(&rdev->mac_wq_list);
+}
+
+void bnxt_re_uninit_resolve_wq(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_resolve_dmac_work *tmp_work = NULL, *tmp_st;
+ if (!rdev->resolve_wq)
+ return;
+ flush_workqueue(rdev->resolve_wq);
+ list_for_each_entry_safe(tmp_work, tmp_st, &rdev->mac_wq_list, list) {
+ list_del(&tmp_work->list);
+ kfree(tmp_work);
+ }
+ destroy_workqueue(rdev->resolve_wq);
+ rdev->resolve_wq = NULL;
+}
+
+u32 readl_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx)
+{
+
+ if (bar_idx)
+ return bus_space_read_8(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off);
+ else
+ return bus_space_read_8(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off);
+}
+
+void writel_fbsd(struct bnxt_softc *bp, u32 reg_off, u8 bar_idx, u32 val)
+{
+ if (bar_idx)
+ bus_space_write_8(bp->doorbell_bar.tag, bp->doorbell_bar.handle, reg_off, htole32(val));
+ else
+ bus_space_write_8(bp->hwrm_bar.tag, bp->hwrm_bar.handle, reg_off, htole32(val));
+}
+
+static void bnxt_re_update_fifo_occup_slabs(struct bnxt_re_dev *rdev,
+ u32 fifo_occup)
+{
+ if (fifo_occup > rdev->dbg_stats->dbq.fifo_occup_water_mark)
+ rdev->dbg_stats->dbq.fifo_occup_water_mark = fifo_occup;
+
+ if (fifo_occup > 8 * rdev->pacing_algo_th)
+ rdev->dbg_stats->dbq.fifo_occup_slab_4++;
+ else if (fifo_occup > 4 * rdev->pacing_algo_th)
+ rdev->dbg_stats->dbq.fifo_occup_slab_3++;
+ else if (fifo_occup > 2 * rdev->pacing_algo_th)
+ rdev->dbg_stats->dbq.fifo_occup_slab_2++;
+ else if (fifo_occup > rdev->pacing_algo_th)
+ rdev->dbg_stats->dbq.fifo_occup_slab_1++;
+}
+
+static void bnxt_re_update_do_pacing_slabs(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
+
+ if (pacing_data->do_pacing > rdev->dbg_stats->dbq.do_pacing_water_mark)
+ rdev->dbg_stats->dbq.do_pacing_water_mark = pacing_data->do_pacing;
+
+ if (pacing_data->do_pacing > 16 * rdev->dbr_def_do_pacing)
+ rdev->dbg_stats->dbq.do_pacing_slab_5++;
+ else if (pacing_data->do_pacing > 8 * rdev->dbr_def_do_pacing)
+ rdev->dbg_stats->dbq.do_pacing_slab_4++;
+ else if (pacing_data->do_pacing > 4 * rdev->dbr_def_do_pacing)
+ rdev->dbg_stats->dbq.do_pacing_slab_3++;
+ else if (pacing_data->do_pacing > 2 * rdev->dbr_def_do_pacing)
+ rdev->dbg_stats->dbq.do_pacing_slab_2++;
+ else if (pacing_data->do_pacing > rdev->dbr_def_do_pacing)
+ rdev->dbg_stats->dbq.do_pacing_slab_1++;
+}
+
+static bool bnxt_re_is_qp1_qp(struct bnxt_re_qp *qp)
+{
+ return qp->ib_qp.qp_type == IB_QPT_GSI;
+}
+
+static struct bnxt_re_qp *bnxt_re_get_qp1_qp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ mutex_lock(&rdev->qp_lock);
+ list_for_each_entry(qp, &rdev->qp_list, list) {
+ if (bnxt_re_is_qp1_qp(qp)) {
+ mutex_unlock(&rdev->qp_lock);
+ return qp;
+ }
+ }
+ mutex_unlock(&rdev->qp_lock);
+ return NULL;
+}
+
+/* Set the maximum number of each resource that the driver actually wants
+ * to allocate. This may be up to the maximum number the firmware has
+ * reserved for the function. The driver may choose to allocate fewer
+ * resources than the firmware maximum.
+ */
+static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_max_res dev_res = {};
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_qplib_dev_attr *attr;
+ struct bnxt_qplib_ctx *hctx;
+ int i;
+
+ attr = rdev->dev_attr;
+ hctx = rdev->qplib_res.hctx;
+ cctx = rdev->chip_ctx;
+
+ bnxt_qplib_max_res_supported(cctx, &rdev->qplib_res, &dev_res, false);
+ if (!_is_chip_gen_p5_p7(cctx)) {
+ hctx->qp_ctx.max = min_t(u32, dev_res.max_qp, attr->max_qp);
+ hctx->mrw_ctx.max = min_t(u32, dev_res.max_mr, attr->max_mr);
+ /* To accommodate 16k MRs and 16k AHs,
+ * driver has to allocate 32k backing store memory
+ */
+ hctx->mrw_ctx.max *= 2;
+ hctx->srq_ctx.max = min_t(u32, dev_res.max_srq, attr->max_srq);
+ hctx->cq_ctx.max = min_t(u32, dev_res.max_cq, attr->max_cq);
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ hctx->tqm_ctx.qcount[i] = attr->tqm_alloc_reqs[i];
+ } else {
+ hctx->qp_ctx.max = attr->max_qp ? attr->max_qp : dev_res.max_qp;
+ hctx->mrw_ctx.max = attr->max_mr ? attr->max_mr : dev_res.max_mr;
+ hctx->srq_ctx.max = attr->max_srq ? attr->max_srq : dev_res.max_srq;
+ hctx->cq_ctx.max = attr->max_cq ? attr->max_cq : dev_res.max_cq;
+ }
+}
+
+static void bnxt_re_limit_vf_res(struct bnxt_re_dev *rdev,
+ struct bnxt_qplib_vf_res *vf_res,
+ u32 num_vf)
+{
+ struct bnxt_qplib_chip_ctx *cctx = rdev->chip_ctx;
+ struct bnxt_qplib_max_res dev_res = {};
+
+ bnxt_qplib_max_res_supported(cctx, &rdev->qplib_res, &dev_res, true);
+ vf_res->max_qp = dev_res.max_qp / num_vf;
+ vf_res->max_srq = dev_res.max_srq / num_vf;
+ vf_res->max_cq = dev_res.max_cq / num_vf;
+ /*
+ * MR and AH shares the same backing store, the value specified
+ * for max_mrw is split into half by the FW for MR and AH
+ */
+ vf_res->max_mrw = dev_res.max_mr * 2 / num_vf;
+ vf_res->max_gid = BNXT_RE_MAX_GID_PER_VF;
+}
+
+static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx;
+
+ hctx = rdev->qplib_res.hctx;
+ memset(&hctx->vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
+ bnxt_re_limit_pf_res(rdev);
+
+ if (rdev->num_vfs)
+ bnxt_re_limit_vf_res(rdev, &hctx->vf_res, rdev->num_vfs);
+}
+
+static void bnxt_re_dettach_irq(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_rcfw *rcfw = NULL;
+ struct bnxt_qplib_nq *nq;
+ int indx;
+
+ rcfw = &rdev->rcfw;
+ for (indx = 0; indx < rdev->nqr.max_init; indx++) {
+ nq = &rdev->nqr.nq[indx];
+ mutex_lock(&nq->lock);
+ bnxt_qplib_nq_stop_irq(nq, false);
+ mutex_unlock(&nq->lock);
+ }
+
+ bnxt_qplib_rcfw_stop_irq(rcfw, false);
+}
+
+static void bnxt_re_detach_err_device(struct bnxt_re_dev *rdev)
+{
+ /* Free the MSIx vectors only so that L2 can proceed with MSIx disable */
+ bnxt_re_dettach_irq(rdev);
+
+ /* Set the state as detached to prevent sending any more commands */
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+ wake_up_all(&rdev->rcfw.cmdq.waitq);
+}
+
+#define MAX_DSCP_PRI_TUPLE 64
+
+struct bnxt_re_dcb_work {
+ struct work_struct work;
+ struct bnxt_re_dev *rdev;
+ struct hwrm_async_event_cmpl cmpl;
+};
+
+static void bnxt_re_init_dcb_wq(struct bnxt_re_dev *rdev)
+{
+ rdev->dcb_wq = create_singlethread_workqueue("bnxt_re_dcb_wq");
+}
+
+static void bnxt_re_uninit_dcb_wq(struct bnxt_re_dev *rdev)
+{
+ if (!rdev->dcb_wq)
+ return;
+ flush_workqueue(rdev->dcb_wq);
+ destroy_workqueue(rdev->dcb_wq);
+ rdev->dcb_wq = NULL;
+}
+
+static void bnxt_re_init_aer_wq(struct bnxt_re_dev *rdev)
+{
+ rdev->aer_wq = create_singlethread_workqueue("bnxt_re_aer_wq");
+}
+
+static void bnxt_re_uninit_aer_wq(struct bnxt_re_dev *rdev)
+{
+ if (!rdev->aer_wq)
+ return;
+ flush_workqueue(rdev->aer_wq);
+ destroy_workqueue(rdev->aer_wq);
+ rdev->aer_wq = NULL;
+}
+
+static int bnxt_re_update_qp1_tos_dscp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_qp *qp;
+
+ if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return 0;
+
+ qp = bnxt_re_get_qp1_qp(rdev);
+ if (!qp)
+ return 0;
+
+ qp->qplib_qp.modify_flags = CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP;
+ qp->qplib_qp.tos_dscp = rdev->cc_param.qp1_tos_dscp;
+
+ return bnxt_qplib_modify_qp(&rdev->qplib_res, &qp->qplib_qp);
+}
+
+static void bnxt_re_reconfigure_dscp(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_cc_param *cc_param;
+ struct bnxt_re_tc_rec *tc_rec;
+ bool update_cc = false;
+ u8 dscp_user;
+ int rc;
+
+ cc_param = &rdev->cc_param;
+ tc_rec = &rdev->tc_rec[0];
+
+ if (!(cc_param->roce_dscp_user || cc_param->cnp_dscp_user))
+ return;
+
+ if (cc_param->cnp_dscp_user) {
+ dscp_user = (cc_param->cnp_dscp_user & 0x3f);
+ if ((tc_rec->cnp_dscp_bv & (1ul << dscp_user)) &&
+ (cc_param->alt_tos_dscp != dscp_user)) {
+ cc_param->alt_tos_dscp = dscp_user;
+ cc_param->mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP;
+ update_cc = true;
+ }
+ }
+
+ if (cc_param->roce_dscp_user) {
+ dscp_user = (cc_param->roce_dscp_user & 0x3f);
+ if ((tc_rec->roce_dscp_bv & (1ul << dscp_user)) &&
+ (cc_param->tos_dscp != dscp_user)) {
+ cc_param->tos_dscp = dscp_user;
+ cc_param->mask |= CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP;
+ update_cc = true;
+ }
+ }
+
+ if (update_cc) {
+ rc = bnxt_qplib_modify_cc(&rdev->qplib_res, cc_param);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "Failed to apply cc settings\n");
+ }
+}
+
+static void bnxt_re_dcb_wq_task(struct work_struct *work)
+{
+ struct bnxt_qplib_cc_param *cc_param;
+ struct bnxt_re_tc_rec *tc_rec;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_re_dcb_work *dcb_work =
+ container_of(work, struct bnxt_re_dcb_work, work);
+ int rc;
+
+ rdev = dcb_work->rdev;
+ if (!rdev)
+ goto exit;
+
+ mutex_lock(&rdev->cc_lock);
+
+ cc_param = &rdev->cc_param;
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, cc_param);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to query ccparam rc:%d", rc);
+ goto fail;
+ }
+ tc_rec = &rdev->tc_rec[0];
+ /*
+ * Upon the receival of DCB Async event:
+ * If roce_dscp or cnp_dscp or both (which user configured using configfs)
+ * is in the list, re-program the value using modify_roce_cc command
+ */
+ bnxt_re_reconfigure_dscp(rdev);
+
+ cc_param->roce_pri = tc_rec->roce_prio;
+ if (cc_param->qp1_tos_dscp != cc_param->tos_dscp) {
+ cc_param->qp1_tos_dscp = cc_param->tos_dscp;
+ rc = bnxt_re_update_qp1_tos_dscp(rdev);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "%s:Failed to modify QP1 rc:%d",
+ __func__, rc);
+ goto fail;
+ }
+ }
+
+fail:
+ mutex_unlock(&rdev->cc_lock);
+exit:
+ kfree(dcb_work);
+}
+
+static int bnxt_re_hwrm_dbr_pacing_broadcast_event(struct bnxt_re_dev *rdev)
+{
+ struct hwrm_func_dbr_pacing_broadcast_event_output resp = {0};
+ struct hwrm_func_dbr_pacing_broadcast_event_input req = {0};
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_fw_msg fw_msg;
+ int rc;
+
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
+ HWRM_FUNC_DBR_PACING_BROADCAST_EVENT, -1, -1);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc) {
+ dev_dbg(rdev_to_dev(rdev),
+ "Failed to send dbr pacing broadcast event rc:%d", rc);
+ return rc;
+ }
+ return 0;
+}
+
+static int bnxt_re_hwrm_dbr_pacing_nqlist_query(struct bnxt_re_dev *rdev)
+{
+ struct hwrm_func_dbr_pacing_nqlist_query_output resp = {0};
+ struct hwrm_func_dbr_pacing_nqlist_query_input req = {0};
+ struct bnxt_dbq_nq_list *nq_list = &rdev->nq_list;
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ bool primary_found = false;
+ struct bnxt_fw_msg fw_msg;
+ struct bnxt_qplib_nq *nq;
+ int rc, i, j = 1;
+ u16 *nql_ptr;
+
+ nq = &rdev->nqr.nq[0];
+
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
+ HWRM_FUNC_DBR_PACING_NQLIST_QUERY, -1, -1);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to send dbr pacing nq list query rc:%d", rc);
+ return rc;
+ }
+ nq_list->num_nql_entries = le32_to_cpu(resp.num_nqs);
+ nql_ptr = &resp.nq_ring_id0;
+ /* populate the nq_list of the primary function with list received
+ * from FW. Fill the NQ IDs of secondary functions from index 1 to
+ * num_nql_entries - 1. Fill the nq_list->nq_id[0] with the
+ * nq_id of the primary pf
+ */
+ for (i = 0; i < nq_list->num_nql_entries; i++) {
+ u16 nq_id = *nql_ptr;
+
+ dev_dbg(rdev_to_dev(rdev),
+ "nq_list->nq_id[%d] = %d\n", i, nq_id);
+ if (nq_id != nq->ring_id) {
+ nq_list->nq_id[j] = nq_id;
+ j++;
+ } else {
+ primary_found = true;
+ nq_list->nq_id[0] = nq->ring_id;
+ }
+ nql_ptr++;
+ }
+ if (primary_found)
+ bnxt_qplib_dbr_pacing_set_primary_pf(rdev->chip_ctx, 1);
+
+ return 0;
+}
+
+static void __wait_for_fifo_occupancy_below_th(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
+ u32 read_val, fifo_occup;
+ bool first_read = true;
+
+ /* loop shouldn't run infintely as the occupancy usually goes
+ * below pacing algo threshold as soon as pacing kicks in.
+ */
+ while (1) {
+ read_val = readl_fbsd(rdev->en_dev->softc, rdev->dbr_db_fifo_reg_off, 0);
+ fifo_occup = pacing_data->fifo_max_depth -
+ ((read_val & pacing_data->fifo_room_mask) >>
+ pacing_data->fifo_room_shift);
+ /* Fifo occupancy cannot be greater the MAX FIFO depth */
+ if (fifo_occup > pacing_data->fifo_max_depth)
+ break;
+
+ if (first_read) {
+ bnxt_re_update_fifo_occup_slabs(rdev, fifo_occup);
+ first_read = false;
+ }
+ if (fifo_occup < pacing_data->pacing_th)
+ break;
+ }
+}
+
+static void bnxt_re_set_default_pacing_data(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
+
+ pacing_data->do_pacing = rdev->dbr_def_do_pacing;
+ pacing_data->pacing_th = rdev->pacing_algo_th;
+ pacing_data->alarm_th =
+ pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE(rdev->chip_ctx);
+}
+
+#define CAG_RING_MASK 0x7FF
+#define CAG_RING_SHIFT 17
+#define WATERMARK_MASK 0xFFF
+#define WATERMARK_SHIFT 0
+
+static bool bnxt_re_check_if_dbq_intr_triggered(struct bnxt_re_dev *rdev)
+{
+ u32 read_val;
+ int j;
+
+ for (j = 0; j < 10; j++) {
+ read_val = readl_fbsd(rdev->en_dev->softc, rdev->dbr_aeq_arm_reg_off, 0);
+ dev_dbg(rdev_to_dev(rdev), "AEQ ARM status = 0x%x\n",
+ read_val);
+ if (!read_val)
+ return true;
+ }
+ return false;
+}
+
+int bnxt_re_set_dbq_throttling_reg(struct bnxt_re_dev *rdev, u16 nq_id, u32 throttle)
+{
+ u32 cag_ring_water_mark = 0, read_val;
+ u32 throttle_val;
+
+ /* Convert throttle percentage to value */
+ throttle_val = (rdev->qplib_res.pacing_data->fifo_max_depth * throttle) / 100;
+
+ if (bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) {
+ cag_ring_water_mark = (nq_id & CAG_RING_MASK) << CAG_RING_SHIFT |
+ (throttle_val & WATERMARK_MASK);
+ writel_fbsd(rdev->en_dev->softc, rdev->dbr_throttling_reg_off, 0, cag_ring_water_mark);
+ read_val = readl_fbsd(rdev->en_dev->softc , rdev->dbr_throttling_reg_off, 0);
+ dev_dbg(rdev_to_dev(rdev),
+ "%s: dbr_throttling_reg_off read_val = 0x%x\n",
+ __func__, read_val);
+ if (read_val != cag_ring_water_mark) {
+ dev_dbg(rdev_to_dev(rdev),
+ "nq_id = %d write_val=0x%x read_val=0x%x\n",
+ nq_id, cag_ring_water_mark, read_val);
+ return 1;
+ }
+ }
+ writel_fbsd(rdev->en_dev->softc, rdev->dbr_aeq_arm_reg_off, 0, 1);
+ return 0;
+}
+
+static void bnxt_re_set_dbq_throttling_for_non_primary(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_dbq_nq_list *nq_list;
+ struct bnxt_qplib_nq *nq;
+ int i;
+
+ nq_list = &rdev->nq_list;
+ /* Run a loop for other Active functions if this is primary function */
+ if (bnxt_qplib_dbr_pacing_is_primary_pf(rdev->chip_ctx)) {
+ dev_dbg(rdev_to_dev(rdev), "%s: nq_list->num_nql_entries= %d\n",
+ __func__, nq_list->num_nql_entries);
+ nq = &rdev->nqr.nq[0];
+ for (i = nq_list->num_nql_entries - 1; i > 0; i--) {
+ u16 nq_id = nq_list->nq_id[i];
+ if (nq)
+ dev_dbg(rdev_to_dev(rdev),
+ "%s: nq_id = %d cur_fn_ring_id = %d\n",
+ __func__, nq_id, nq->ring_id);
+ if (bnxt_re_set_dbq_throttling_reg
+ (rdev, nq_id, 0))
+ break;
+ bnxt_re_check_if_dbq_intr_triggered(rdev);
+ }
+ }
+}
+
+static void bnxt_re_handle_dbr_nq_pacing_notification(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_nq *nq;
+ int rc = 0;
+
+ nq = &rdev->nqr.nq[0];
+
+ /* Query the NQ list*/
+ rc = bnxt_re_hwrm_dbr_pacing_nqlist_query(rdev);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to Query NQ list rc= %d", rc);
+ return;
+ }
+ /*Configure GRC access for Throttling and aeq_arm register */
+ writel_fbsd(rdev->en_dev->softc, BNXT_GRCPF_REG_WINDOW_BASE_OUT + 28, 0,
+ rdev->chip_ctx->dbr_aeq_arm_reg & BNXT_GRC_BASE_MASK);
+
+ rdev->dbr_throttling_reg_off =
+ (rdev->chip_ctx->dbr_throttling_reg &
+ BNXT_GRC_OFFSET_MASK) + 0x8000;
+ rdev->dbr_aeq_arm_reg_off =
+ (rdev->chip_ctx->dbr_aeq_arm_reg &
+ BNXT_GRC_OFFSET_MASK) + 0x8000;
+
+ bnxt_re_set_dbq_throttling_reg(rdev, nq->ring_id, rdev->dbq_watermark);
+}
+
+static void bnxt_re_dbq_wq_task(struct work_struct *work)
+{
+ struct bnxt_re_dbq_work *dbq_work =
+ container_of(work, struct bnxt_re_dbq_work, work);
+ struct bnxt_re_dev *rdev;
+
+ rdev = dbq_work->rdev;
+
+ if (!rdev)
+ goto exit;
+ switch (dbq_work->event) {
+ case BNXT_RE_DBQ_EVENT_SCHED:
+ dev_dbg(rdev_to_dev(rdev), "%s: Handle DBQ Pacing event\n",
+ __func__);
+ if (!bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx))
+ bnxt_re_hwrm_dbr_pacing_broadcast_event(rdev);
+ else
+ bnxt_re_pacing_alert(rdev);
+ break;
+ case BNXT_RE_DBR_PACING_EVENT:
+ dev_dbg(rdev_to_dev(rdev), "%s: Sched interrupt/pacing worker\n",
+ __func__);
+ if (_is_chip_p7(rdev->chip_ctx))
+ bnxt_re_pacing_alert(rdev);
+ else if (!rdev->chip_ctx->modes.dbr_pacing_v0)
+ bnxt_re_hwrm_dbr_pacing_qcfg(rdev);
+ break;
+ case BNXT_RE_DBR_NQ_PACING_NOTIFICATION:
+ bnxt_re_handle_dbr_nq_pacing_notification(rdev);
+ /* Issue a broadcast event to notify other functions
+ * that primary changed
+ */
+ bnxt_re_hwrm_dbr_pacing_broadcast_event(rdev);
+ break;
+ }
+exit:
+ kfree(dbq_work);
+}
+
+static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct bnxt_re_dcb_work *dcb_work;
+ struct bnxt_re_dbq_work *dbq_work;
+ struct bnxt_re_dev *rdev;
+ u16 event_id;
+ u32 data1;
+ u32 data2 = 0;
+
+ if (!cmpl) {
+ pr_err("Async event, bad completion\n");
+ return;
+ }
+
+ if (!en_info || !en_info->en_dev) {
+ pr_err("Async event, bad en_info or en_dev\n");
+ return;
+ }
+ rdev = en_info->rdev;
+
+ event_id = le16_to_cpu(cmpl->event_id);
+ data1 = le32_to_cpu(cmpl->event_data1);
+ data2 = le32_to_cpu(cmpl->event_data2);
+
+ if (!rdev || !rdev_to_dev(rdev)) {
+ dev_dbg(NULL, "Async event, bad rdev or netdev\n");
+ return;
+ }
+
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags) ||
+ !test_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
+ dev_dbg(NULL, "Async event, device already detached\n");
+ return;
+ }
+ if (data2 >= 0)
+ dev_dbg(rdev_to_dev(rdev), "Async event_id = %d data1 = %d data2 = %d",
+ event_id, data1, data2);
+
+ switch (event_id) {
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE:
+ /* Not handling the event in older FWs */
+ if (!is_qport_service_type_supported(rdev))
+ break;
+ if (!rdev->dcb_wq)
+ break;
+ dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC);
+ if (!dcb_work)
+ break;
+
+ dcb_work->rdev = rdev;
+ memcpy(&dcb_work->cmpl, cmpl, sizeof(*cmpl));
+ INIT_WORK(&dcb_work->work, bnxt_re_dcb_wq_task);
+ queue_work(rdev->dcb_wq, &dcb_work->work);
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY:
+ if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
+ /* Set rcfw flag to control commands send to Bono */
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+ /* Set bnxt_re flag to control commands send via L2 driver */
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+ wake_up_all(&rdev->rcfw.cmdq.waitq);
+ }
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD:
+ if (!rdev->dbr_pacing)
+ break;
+ dbq_work = kzalloc(sizeof(*dbq_work), GFP_ATOMIC);
+ if (!dbq_work)
+ goto unlock;
+ dbq_work->rdev = rdev;
+ dbq_work->event = BNXT_RE_DBR_PACING_EVENT;
+ INIT_WORK(&dbq_work->work, bnxt_re_dbq_wq_task);
+ queue_work(rdev->dbq_wq, &dbq_work->work);
+ rdev->dbr_sw_stats->dbq_int_recv++;
+ break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE:
+ if (!rdev->dbr_pacing)
+ break;
+
+ dbq_work = kzalloc(sizeof(*dbq_work), GFP_ATOMIC);
+ if (!dbq_work)
+ goto unlock;
+ dbq_work->rdev = rdev;
+ dbq_work->event = BNXT_RE_DBR_NQ_PACING_NOTIFICATION;
+ INIT_WORK(&dbq_work->work, bnxt_re_dbq_wq_task);
+ queue_work(rdev->dbq_wq, &dbq_work->work);
+ break;
+
+ default:
+ break;
+ }
+unlock:
+ return;
+}
+
+static void bnxt_re_db_fifo_check(struct work_struct *work)
+{
+ struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
+ dbq_fifo_check_work);
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+ u32 pacing_save;
+
+ if (!mutex_trylock(&rdev->dbq_lock))
+ return;
+ pacing_data = rdev->qplib_res.pacing_data;
+ pacing_save = rdev->do_pacing_save;
+ __wait_for_fifo_occupancy_below_th(rdev);
+ cancel_delayed_work_sync(&rdev->dbq_pacing_work);
+ if (rdev->dbr_recovery_on)
+ goto recovery_on;
+ if (pacing_save > rdev->dbr_def_do_pacing) {
+ /* Double the do_pacing value during the congestion */
+ pacing_save = pacing_save << 1;
+ } else {
+ /*
+ * when a new congestion is detected increase the do_pacing
+ * by 8 times. And also increase the pacing_th by 4 times. The
+ * reason to increase pacing_th is to give more space for the
+ * queue to oscillate down without getting empty, but also more
+ * room for the queue to increase without causing another alarm.
+ */
+ pacing_save = pacing_save << 3;
+ pacing_data->pacing_th = rdev->pacing_algo_th * 4;
+ }
+
+ if (pacing_save > BNXT_RE_MAX_DBR_DO_PACING)
+ pacing_save = BNXT_RE_MAX_DBR_DO_PACING;
+
+ pacing_data->do_pacing = pacing_save;
+ rdev->do_pacing_save = pacing_data->do_pacing;
+ pacing_data->alarm_th =
+ pacing_data->pacing_th * BNXT_RE_PACING_ALARM_TH_MULTIPLE(rdev->chip_ctx);
+recovery_on:
+ schedule_delayed_work(&rdev->dbq_pacing_work,
+ msecs_to_jiffies(rdev->dbq_pacing_time));
+ rdev->dbr_sw_stats->dbq_pacing_alerts++;
+ mutex_unlock(&rdev->dbq_lock);
+}
+
+static void bnxt_re_pacing_timer_exp(struct work_struct *work)
+{
+ struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
+ dbq_pacing_work.work);
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+ u32 read_val, fifo_occup;
+ struct bnxt_qplib_nq *nq;
+
+ if (!mutex_trylock(&rdev->dbq_lock))
+ return;
+
+ pacing_data = rdev->qplib_res.pacing_data;
+ read_val = readl_fbsd(rdev->en_dev->softc , rdev->dbr_db_fifo_reg_off, 0);
+ fifo_occup = pacing_data->fifo_max_depth -
+ ((read_val & pacing_data->fifo_room_mask) >>
+ pacing_data->fifo_room_shift);
+
+ if (fifo_occup > pacing_data->pacing_th)
+ goto restart_timer;
+
+ /*
+ * Instead of immediately going back to the default do_pacing
+ * reduce it by 1/8 times and restart the timer.
+ */
+ pacing_data->do_pacing = pacing_data->do_pacing - (pacing_data->do_pacing >> 3);
+ pacing_data->do_pacing = max_t(u32, rdev->dbr_def_do_pacing, pacing_data->do_pacing);
+ /*
+ * If the fifo_occup is less than the interrupt enable threshold
+ * enable the interrupt on the primary PF.
+ */
+ if (rdev->dbq_int_disable && fifo_occup < rdev->pacing_en_int_th) {
+ if (bnxt_qplib_dbr_pacing_is_primary_pf(rdev->chip_ctx)) {
+ if (!rdev->chip_ctx->modes.dbr_pacing_v0) {
+ nq = &rdev->nqr.nq[0];
+ bnxt_re_set_dbq_throttling_reg(rdev, nq->ring_id,
+ rdev->dbq_watermark);
+ rdev->dbr_sw_stats->dbq_int_en++;
+ rdev->dbq_int_disable = false;
+ }
+ }
+ }
+ if (pacing_data->do_pacing <= rdev->dbr_def_do_pacing) {
+ bnxt_re_set_default_pacing_data(rdev);
+ rdev->dbr_sw_stats->dbq_pacing_complete++;
+ goto dbq_unlock;
+ }
+restart_timer:
+ schedule_delayed_work(&rdev->dbq_pacing_work,
+ msecs_to_jiffies(rdev->dbq_pacing_time));
+ bnxt_re_update_do_pacing_slabs(rdev);
+ rdev->dbr_sw_stats->dbq_pacing_resched++;
+dbq_unlock:
+ rdev->do_pacing_save = pacing_data->do_pacing;
+ mutex_unlock(&rdev->dbq_lock);
+}
+
+void bnxt_re_pacing_alert(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+
+ if (!rdev->dbr_pacing)
+ return;
+ mutex_lock(&rdev->dbq_lock);
+ pacing_data = rdev->qplib_res.pacing_data;
+
+ /*
+ * Increase the alarm_th to max so that other user lib instances do not
+ * keep alerting the driver.
+ */
+ pacing_data->alarm_th = pacing_data->fifo_max_depth;
+ pacing_data->do_pacing = BNXT_RE_MAX_DBR_DO_PACING;
+ cancel_work_sync(&rdev->dbq_fifo_check_work);
+ schedule_work(&rdev->dbq_fifo_check_work);
+ mutex_unlock(&rdev->dbq_lock);
+}
+
+void bnxt_re_schedule_dbq_event(struct bnxt_qplib_res *res)
+{
+ struct bnxt_re_dbq_work *dbq_work;
+ struct bnxt_re_dev *rdev;
+
+ rdev = container_of(res, struct bnxt_re_dev, qplib_res);
+
+ atomic_set(&rdev->dbq_intr_running, 1);
+
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ goto exit;
+ /* Run the loop to send dbq event to other functions
+ * for newer FW
+ */
+ if (bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx) &&
+ !rdev->chip_ctx->modes.dbr_pacing_v0)
+ bnxt_re_set_dbq_throttling_for_non_primary(rdev);
+
+ dbq_work = kzalloc(sizeof(*dbq_work), GFP_ATOMIC);
+ if (!dbq_work)
+ goto exit;
+ dbq_work->rdev = rdev;
+ dbq_work->event = BNXT_RE_DBQ_EVENT_SCHED;
+ INIT_WORK(&dbq_work->work, bnxt_re_dbq_wq_task);
+ queue_work(rdev->dbq_wq, &dbq_work->work);
+ rdev->dbr_sw_stats->dbq_int_recv++;
+ rdev->dbq_int_disable = true;
+exit:
+ atomic_set(&rdev->dbq_intr_running, 0);
+}
+
+static void bnxt_re_free_msix(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ int rc;
+
+ rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "netdev %p free_msix failed! rc = 0x%x",
+ rdev->netdev, rc);
+}
+
+static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ int rc = 0, num_msix_want, num_msix_got;
+ struct bnxt_msix_entry *entry;
+
+ /*
+ * Request MSIx based on the function type. This is
+ * a temporory solution to enable max VFs when NPAR is
+ * enabled.
+ * TODO - change the scheme with an adapter specific check
+ * as the latest adapters can support more NQs. For now
+ * this change satisfy all adapter versions.
+ */
+
+ if (rdev->is_virtfn)
+ num_msix_want = BNXT_RE_MAX_MSIX_VF;
+ else if (BNXT_EN_NPAR(en_dev))
+ num_msix_want = BNXT_RE_MAX_MSIX_NPAR_PF;
+ else if (_is_chip_gen_p5_p7(rdev->chip_ctx))
+ num_msix_want = rdev->num_msix_requested ?: BNXT_RE_MAX_MSIX_GEN_P5_PF;
+ else
+ num_msix_want = BNXT_RE_MAX_MSIX_PF;
+
+ /*
+ * Since MSIX vectors are used for both NQs and CREQ, we should try to
+ * allocate num_online_cpus + 1 by taking into account the CREQ. This
+ * leaves the number of MSIX vectors for NQs match the number of CPUs
+ * and allows the system to be fully utilized
+ */
+ num_msix_want = min_t(u32, num_msix_want, num_online_cpus() + 1);
+ num_msix_want = min_t(u32, num_msix_want, BNXT_RE_MAX_MSIX);
+ num_msix_want = max_t(u32, num_msix_want, BNXT_RE_MIN_MSIX);
+
+ entry = rdev->nqr.msix_entries;
+
+ num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
+ entry, num_msix_want);
+ if (num_msix_got < BNXT_RE_MIN_MSIX) {
+ rc = -EINVAL;
+ goto done;
+ }
+ if (num_msix_got != num_msix_want)
+ dev_warn(rdev_to_dev(rdev),
+ "bnxt_request_msix: wanted %d vectors, got %d\n",
+ num_msix_want, num_msix_got);
+
+ rdev->nqr.num_msix = num_msix_got;
+ return 0;
+done:
+ if (num_msix_got)
+ bnxt_re_free_msix(rdev);
+ return rc;
+}
+
+static int __wait_for_ib_unregister(struct bnxt_re_dev *rdev,
+ struct bnxt_re_en_dev_info *en_info)
+{
+ u64 timeout = 0;
+ u32 cur_prod = 0, cur_cons = 0;
+ int retry = 0, rc = 0, ret = 0;
+
+ cur_prod = rdev->rcfw.cmdq.hwq.prod;
+ cur_cons = rdev->rcfw.cmdq.hwq.cons;
+ timeout = msecs_to_jiffies(BNXT_RE_RECOVERY_IB_UNINIT_WAIT_TIME_MS);
+ retry = BNXT_RE_RECOVERY_IB_UNINIT_WAIT_RETRY;
+ /* During module exit, increase timeout ten-fold to 100 mins to wait
+ * as long as possible for ib_unregister() to complete
+ */
+ if (rdev->mod_exit)
+ retry *= 10;
+ do {
+ /*
+ * Since the caller of this function invokes with bnxt_re_mutex held,
+ * release it to avoid holding a lock while in wait / sleep mode.
+ */
+ mutex_unlock(&bnxt_re_mutex);
+ rc = wait_event_timeout(en_info->waitq,
+ en_info->ib_uninit_done,
+ timeout);
+ mutex_lock(&bnxt_re_mutex);
+
+ if (!bnxt_re_is_rdev_valid(rdev))
+ break;
+
+ if (rc)
+ break;
+
+ if (!RCFW_NO_FW_ACCESS(&rdev->rcfw)) {
+ /* No need to check for cmdq stall during module exit,
+ * wait for ib unregister to complete
+ */
+ if (!rdev->mod_exit)
+ ret = __check_cmdq_stall(&rdev->rcfw, &cur_prod, &cur_cons);
+ if (ret || en_info->ib_uninit_done)
+ break;
+ }
+ } while (retry--);
+
+ return rc;
+}
+
+static int bnxt_re_handle_start(struct auxiliary_device *adev)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_re_dev *rdev = NULL;
+ struct ifnet *real_dev;
+ struct bnxt_en_dev *en_dev;
+ struct ifnet *netdev;
+ int rc = 0;
+
+ if (!en_info || !en_info->en_dev) {
+ pr_err("Start, bad en_info or en_dev\n");
+ return -EINVAL;
+ }
+ netdev = en_info->en_dev->net;
+ if (en_info->rdev) {
+ dev_info(rdev_to_dev(en_info->rdev),
+ "%s: Device is already added adev %p rdev: %p\n",
+ __func__, adev, en_info->rdev);
+ return 0;
+ }
+
+ en_dev = en_info->en_dev;
+ real_dev = rdma_vlan_dev_real_dev(netdev);
+ if (!real_dev)
+ real_dev = netdev;
+ rc = bnxt_re_add_device(&rdev, real_dev,
+ en_info->gsi_mode,
+ BNXT_RE_POST_RECOVERY_INIT,
+ en_info->wqe_mode,
+ en_info->num_msix_requested, adev);
+ if (rc) {
+ /* Add device failed. Unregister the device.
+ * This has to be done explicitly as
+ * bnxt_re_stop would not have unregistered
+ */
+ rtnl_lock();
+ en_dev->en_ops->bnxt_unregister_device(en_dev, BNXT_ROCE_ULP);
+ rtnl_unlock();
+ mutex_lock(&bnxt_re_dev_lock);
+ gadd_dev_inprogress--;
+ mutex_unlock(&bnxt_re_dev_lock);
+ return rc;
+ }
+ rdev->adev = adev;
+ rtnl_lock();
+ bnxt_re_get_link_speed(rdev);
+ rtnl_unlock();
+ rc = bnxt_re_ib_init(rdev);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed ib_init\n");
+ return rc;
+ }
+ bnxt_re_ib_init_2(rdev);
+
+ return rc;
+}
+
+static void bnxt_re_stop(void *handle)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct ifnet *netdev;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_en_dev *en_dev;
+ int rc = 0;
+
+ rtnl_unlock();
+ mutex_lock(&bnxt_re_mutex);
+ if (!en_info || !en_info->en_dev) {
+ pr_err("Stop, bad en_info or en_dev\n");
+ goto exit;
+ }
+ netdev = en_info->en_dev->net;
+ rdev = en_info->rdev;
+ if (!rdev)
+ goto exit;
+
+ if (!bnxt_re_is_rdev_valid(rdev))
+ goto exit;
+
+ /*
+ * Check if fw has undergone reset or is in a fatal condition.
+ * If so, set flags so that no further commands are sent down to FW
+ */
+ en_dev = rdev->en_dev;
+ if (en_dev->en_state & BNXT_STATE_FW_FATAL_COND ||
+ en_dev->en_state & BNXT_STATE_FW_RESET_DET) {
+ /* Set rcfw flag to control commands send to Bono */
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+ /* Set bnxt_re flag to control commands send via L2 driver */
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+ wake_up_all(&rdev->rcfw.cmdq.waitq);
+ }
+
+ if (test_bit(BNXT_RE_FLAG_STOP_IN_PROGRESS, &rdev->flags))
+ goto exit;
+ set_bit(BNXT_RE_FLAG_STOP_IN_PROGRESS, &rdev->flags);
+
+ en_info->wqe_mode = rdev->chip_ctx->modes.wqe_mode;
+ en_info->gsi_mode = rdev->gsi_ctx.gsi_qp_mode;
+ en_info->num_msix_requested = rdev->num_msix_requested;
+ en_info->ib_uninit_done = false;
+
+ if (rdev->dbr_pacing)
+ bnxt_re_set_pacing_dev_state(rdev);
+
+ dev_info(rdev_to_dev(rdev), "%s: L2 driver notified to stop."
+ "Attempting to stop and Dispatching event "
+ "to inform the stack\n", __func__);
+ init_waitqueue_head(&en_info->waitq);
+ /* Schedule a work item to handle IB UNINIT for recovery */
+ bnxt_re_schedule_work(rdev, NETDEV_UNREGISTER,
+ NULL, netdev, rdev->adev);
+ rc = __wait_for_ib_unregister(rdev, en_info);
+ if (!bnxt_re_is_rdev_valid(rdev))
+ goto exit;
+ if (!rc) {
+ dev_info(rdev_to_dev(rdev), "%s: Attempt to stop failed\n",
+ __func__);
+ bnxt_re_detach_err_device(rdev);
+ goto exit;
+ }
+ bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, rdev->adev);
+exit:
+ mutex_unlock(&bnxt_re_mutex);
+ /* Take rtnl_lock before return, bnxt_re_stop is called with rtnl_lock */
+ rtnl_lock();
+
+ return;
+}
+
+static void bnxt_re_start(void *handle)
+{
+ rtnl_unlock();
+ mutex_lock(&bnxt_re_mutex);
+ if (bnxt_re_handle_start((struct auxiliary_device *)handle))
+ pr_err("Failed to start RoCE device");
+ mutex_unlock(&bnxt_re_mutex);
+ /* Take rtnl_lock before return, bnxt_re_start is called with rtnl_lock */
+ rtnl_lock();
+ return;
+}
+
+static void bnxt_re_shutdown(void *p)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(p);
+ struct bnxt_re_dev *rdev;
+
+ if (!en_info) {
+ pr_err("Shutdown, bad en_info\n");
+ return;
+ }
+ rtnl_unlock();
+ mutex_lock(&bnxt_re_mutex);
+ rdev = en_info->rdev;
+ if (!rdev || !bnxt_re_is_rdev_valid(rdev))
+ goto exit;
+
+ /* rtnl_lock held by L2 before coming here */
+ bnxt_re_stopqps_and_ib_uninit(rdev);
+ bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, rdev->adev);
+exit:
+ mutex_unlock(&bnxt_re_mutex);
+ rtnl_lock();
+ return;
+}
+
+static void bnxt_re_stop_irq(void *handle)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct bnxt_qplib_rcfw *rcfw = NULL;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_nq *nq;
+ int indx;
+
+ if (!en_info) {
+ pr_err("Stop irq, bad en_info\n");
+ return;
+ }
+ rdev = en_info->rdev;
+
+ if (!rdev)
+ return;
+
+ rcfw = &rdev->rcfw;
+ for (indx = 0; indx < rdev->nqr.max_init; indx++) {
+ nq = &rdev->nqr.nq[indx];
+ mutex_lock(&nq->lock);
+ bnxt_qplib_nq_stop_irq(nq, false);
+ mutex_unlock(&nq->lock);
+ }
+
+ if (test_bit(BNXT_RE_FLAG_ALLOC_RCFW, &rdev->flags))
+ bnxt_qplib_rcfw_stop_irq(rcfw, false);
+}
+
+static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
+ struct bnxt_msix_entry *msix_ent = NULL;
+ struct bnxt_qplib_rcfw *rcfw = NULL;
+ struct bnxt_re_dev *rdev;
+ struct bnxt_qplib_nq *nq;
+ int indx, rc, vec;
+
+ if (!en_info) {
+ pr_err("Start irq, bad en_info\n");
+ return;
+ }
+ rdev = en_info->rdev;
+ if (!rdev)
+ return;
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ return;
+ msix_ent = rdev->nqr.msix_entries;
+ rcfw = &rdev->rcfw;
+
+ if (!ent) {
+ /* Not setting the f/w timeout bit in rcfw.
+ * During the driver unload the first command
+ * to f/w will timeout and that will set the
+ * timeout bit.
+ */
+ dev_err(rdev_to_dev(rdev), "Failed to re-start IRQs\n");
+ return;
+ }
+
+ /* Vectors may change after restart, so update with new vectors
+ * in device structure.
+ */
+ for (indx = 0; indx < rdev->nqr.num_msix; indx++)
+ rdev->nqr.msix_entries[indx].vector = ent[indx].vector;
+
+ if (test_bit(BNXT_RE_FLAG_ALLOC_RCFW, &rdev->flags)) {
+ rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
+ false);
+ if (rc) {
+ dev_warn(rdev_to_dev(rdev),
+ "Failed to reinit CREQ\n");
+ return;
+ }
+ }
+ for (indx = 0 ; indx < rdev->nqr.max_init; indx++) {
+ nq = &rdev->nqr.nq[indx];
+ vec = indx + 1;
+ rc = bnxt_qplib_nq_start_irq(nq, indx, msix_ent[vec].vector,
+ false);
+ if (rc) {
+ dev_warn(rdev_to_dev(rdev),
+ "Failed to reinit NQ index %d\n", indx);
+ return;
+ }
+ }
+}
+
+/*
+ * Except for ulp_async_notifier, the remaining ulp_ops
+ * below are called with rtnl_lock held
+ */
+static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
+ .ulp_async_notifier = bnxt_re_async_notifier,
+ .ulp_stop = bnxt_re_stop,
+ .ulp_start = bnxt_re_start,
+ .ulp_shutdown = bnxt_re_shutdown,
+ .ulp_irq_stop = bnxt_re_stop_irq,
+ .ulp_irq_restart = bnxt_re_start_irq,
+};
+
+static inline const char *bnxt_re_netevent(unsigned long event)
+{
+ BNXT_RE_NETDEV_EVENT(event, NETDEV_UP);
+ BNXT_RE_NETDEV_EVENT(event, NETDEV_DOWN);
+ BNXT_RE_NETDEV_EVENT(event, NETDEV_CHANGE);
+ BNXT_RE_NETDEV_EVENT(event, NETDEV_REGISTER);
+ BNXT_RE_NETDEV_EVENT(event, NETDEV_UNREGISTER);
+ BNXT_RE_NETDEV_EVENT(event, NETDEV_CHANGEADDR);
+ return "Unknown";
+}
+
+/* RoCE -> Net driver */
+
+/* Driver registration routines used to let the networking driver (bnxt_en)
+ * to know that the RoCE driver is now installed */
+static void bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ int rc;
+
+ rtnl_lock();
+ rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
+ BNXT_ROCE_ULP);
+ rtnl_unlock();
+ if (rc)
+ dev_err(rdev_to_dev(rdev), "netdev %p unregister failed! rc = 0x%x",
+ rdev->en_dev->net, rc);
+
+ clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+}
+
+static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ int rc = 0;
+
+ rtnl_lock();
+ rc = en_dev->en_ops->bnxt_register_device(en_dev,
+ BNXT_ROCE_ULP,
+ &bnxt_re_ulp_ops,
+ rdev->adev);
+ rtnl_unlock();
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "netdev %p register failed! rc = 0x%x",
+ rdev->netdev, rc);
+ return rc;
+ }
+
+ return rc;
+}
+
+static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_qplib_res *res;
+ u32 l2db_len = 0;
+ u32 offset = 0;
+ u32 barlen;
+ int rc;
+
+ res = &rdev->qplib_res;
+ en_dev = rdev->en_dev;
+ cctx = rdev->chip_ctx;
+
+ /* Issue qcfg */
+ rc = bnxt_re_hwrm_qcfg(rdev, &l2db_len, &offset);
+ if (rc)
+ dev_info(rdev_to_dev(rdev),
+ "Couldn't get DB bar size, Low latency framework is disabled\n");
+ /* set register offsets for both UC and WC */
+ if (_is_chip_p7(cctx))
+ res->dpi_tbl.ucreg.offset = offset;
+ else
+ res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET :
+ BNXT_QPLIB_DBR_PF_DB_OFFSET;
+ res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
+
+ /* If WC mapping is disabled by L2 driver then en_dev->l2_db_size
+ * is equal to the DB-Bar actual size. This indicates that L2
+ * is mapping entire bar as UC-. RoCE driver can't enable WC mapping
+ * in such cases and DB-push will be disabled.
+ */
+ barlen = pci_resource_len(res->pdev, RCFW_DBR_PCI_BAR_REGION);
+ if (cctx->modes.db_push && l2db_len && en_dev->l2_db_size != barlen) {
+ res->dpi_tbl.wcreg.offset = en_dev->l2_db_size;
+ dev_info(rdev_to_dev(rdev),
+ "Low latency framework is enabled\n");
+ }
+
+ return;
+}
+
+static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
+{
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_en_dev *en_dev;
+
+ en_dev = rdev->en_dev;
+ cctx = rdev->chip_ctx;
+ cctx->modes.wqe_mode = _is_chip_gen_p5_p7(rdev->chip_ctx) ?
+ mode : BNXT_QPLIB_WQE_MODE_STATIC;
+ cctx->modes.te_bypass = false;
+ if (bnxt_re_hwrm_qcaps(rdev))
+ dev_err(rdev_to_dev(rdev),
+ "Failed to query hwrm qcaps\n");
+ /*
+ * TODO: Need a better mechanism for spreading of the
+ * 512 extended PPP pages in the presence of VF and
+ * NPAR, until then not enabling push
+ */
+ if (_is_chip_p7(rdev->chip_ctx) && cctx->modes.db_push) {
+ if (rdev->is_virtfn || BNXT_EN_NPAR(en_dev))
+ cctx->modes.db_push = false;
+ }
+
+ rdev->roce_mode = en_dev->flags & BNXT_EN_FLAG_ROCE_CAP;
+ dev_dbg(rdev_to_dev(rdev),
+ "RoCE is supported on the device - caps:0x%x",
+ rdev->roce_mode);
+ if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
+ rdev->roce_mode = BNXT_RE_FLAG_ROCEV2_CAP;
+ cctx->hw_stats_size = en_dev->hw_ring_stats_size;
+}
+
+static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+ struct bnxt_qplib_res *res;
+
+ if (!rdev->chip_ctx)
+ return;
+
+ res = &rdev->qplib_res;
+ bnxt_qplib_unmap_db_bar(res);
+
+ kfree(res->hctx);
+ res->rcfw = NULL;
+ kfree(rdev->dev_attr);
+ rdev->dev_attr = NULL;
+
+ chip_ctx = rdev->chip_ctx;
+ rdev->chip_ctx = NULL;
+ res->cctx = NULL;
+ res->hctx = NULL;
+ res->pdev = NULL;
+ res->netdev = NULL;
+ kfree(chip_ctx);
+}
+
+static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
+{
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+ struct bnxt_en_dev *en_dev;
+ int rc;
+
+ en_dev = rdev->en_dev;
+ /* Supply pci device to qplib */
+ rdev->qplib_res.pdev = en_dev->pdev;
+ rdev->qplib_res.netdev = rdev->netdev;
+ rdev->qplib_res.en_dev = en_dev;
+
+ chip_ctx = kzalloc(sizeof(*chip_ctx), GFP_KERNEL);
+ if (!chip_ctx)
+ return -ENOMEM;
+ rdev->chip_ctx = chip_ctx;
+ rdev->qplib_res.cctx = chip_ctx;
+ rc = bnxt_re_query_hwrm_intf_version(rdev);
+ if (rc)
+ goto fail;
+ rdev->dev_attr = kzalloc(sizeof(*rdev->dev_attr), GFP_KERNEL);
+ if (!rdev->dev_attr) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ rdev->qplib_res.dattr = rdev->dev_attr;
+ rdev->qplib_res.rcfw = &rdev->rcfw;
+ rdev->qplib_res.is_vf = rdev->is_virtfn;
+
+ rdev->qplib_res.hctx = kzalloc(sizeof(*rdev->qplib_res.hctx),
+ GFP_KERNEL);
+ if (!rdev->qplib_res.hctx) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ bnxt_re_set_drv_mode(rdev, wqe_mode);
+
+ bnxt_re_set_db_offset(rdev);
+ rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
+ if (rc)
+ goto fail;
+
+ rc = bnxt_qplib_enable_atomic_ops_to_root(en_dev->pdev);
+ if (rc)
+ dev_dbg(rdev_to_dev(rdev),
+ "platform doesn't support global atomics");
+
+ return 0;
+fail:
+ kfree(rdev->chip_ctx);
+ rdev->chip_ctx = NULL;
+
+ kfree(rdev->dev_attr);
+ rdev->dev_attr = NULL;
+
+ kfree(rdev->qplib_res.hctx);
+ rdev->qplib_res.hctx = NULL;
+ return rc;
+}
+
+static u16 bnxt_re_get_rtype(struct bnxt_re_dev *rdev) {
+ return _is_chip_gen_p5_p7(rdev->chip_ctx) ?
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_NQ :
+ HWRM_RING_ALLOC_INPUT_RING_TYPE_ROCE_CMPL;
+}
+
+static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
+{
+ int rc = -EINVAL;
+ struct hwrm_ring_free_input req = {0};
+ struct hwrm_ring_free_output resp;
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_fw_msg fw_msg;
+
+ if (!en_dev)
+ return rc;
+
+ /* To avoid unnecessary error messages during recovery.
+ * HW is anyway in error state. So dont send down the command */
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ return 0;
+
+ /* allocation had failed, no need to issue hwrm */
+ if (fw_ring_id == 0xffff)
+ return 0;
+
+ memset(&fw_msg, 0, sizeof(fw_msg));
+
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
+ req.ring_type = bnxt_re_get_rtype(rdev);
+ req.ring_id = cpu_to_le16(fw_ring_id);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to free HW ring with rc = 0x%x", rc);
+ return rc;
+ }
+ dev_dbg(rdev_to_dev(rdev), "HW ring freed with id = 0x%x\n",
+ fw_ring_id);
+
+ return rc;
+}
+
+static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev,
+ struct bnxt_re_ring_attr *ring_attr,
+ u16 *fw_ring_id)
+{
+ int rc = -EINVAL;
+ struct hwrm_ring_alloc_input req = {0};
+ struct hwrm_ring_alloc_output resp;
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_fw_msg fw_msg;
+
+ if (!en_dev)
+ return rc;
+
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
+ req.flags = cpu_to_le16(ring_attr->flags);
+ req.enables = 0;
+ req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]);
+ if (ring_attr->pages > 1) {
+ /* Page size is in log2 units */
+ req.page_size = BNXT_PAGE_SHIFT;
+ req.page_tbl_depth = 1;
+ } else {
+ req.page_size = 4;
+ req.page_tbl_depth = 0;
+ }
+
+ req.fbo = 0;
+ /* Association of ring index with doorbell index and MSIX number */
+ req.logical_id = cpu_to_le16(ring_attr->lrid);
+ req.length = cpu_to_le32(ring_attr->depth + 1);
+ req.ring_type = ring_attr->type;
+ req.int_mode = ring_attr->mode;
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to allocate HW ring with rc = 0x%x", rc);
+ return rc;
+ }
+ *fw_ring_id = le16_to_cpu(resp.ring_id);
+ dev_dbg(rdev_to_dev(rdev),
+ "HW ring allocated with id = 0x%x at slot 0x%x",
+ resp.ring_id, ring_attr->lrid);
+
+ return rc;
+}
+
+static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
+ u32 fw_stats_ctx_id, u16 tid)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_stat_ctx_free_input req = {0};
+ struct hwrm_stat_ctx_free_output resp;
+ struct bnxt_fw_msg fw_msg;
+ int rc = -EINVAL;
+
+ if (!en_dev)
+ return rc;
+
+ /* To avoid unnecessary error messages during recovery.
+ * HW is anyway in error state. So dont send down the command */
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ return 0;
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, tid);
+ req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to free HW stats ctx with rc = 0x%x", rc);
+ return rc;
+ }
+ dev_dbg(rdev_to_dev(rdev),
+ "HW stats ctx freed with id = 0x%x", fw_stats_ctx_id);
+
+ return rc;
+}
+
+static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, u16 tid)
+{
+ struct hwrm_stat_ctx_alloc_output resp = {};
+ struct hwrm_stat_ctx_alloc_input req = {};
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_qplib_stats *stat;
+ struct bnxt_qplib_ctx *hctx;
+ struct bnxt_fw_msg fw_msg;
+ int rc = 0;
+
+ hctx = rdev->qplib_res.hctx;
+ stat = (tid == 0xffff) ? &hctx->stats : &hctx->stats2;
+ stat->fw_id = INVALID_STATS_CTX_ID;
+
+ if (!en_dev)
+ return -EINVAL;
+
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
+ HWRM_STAT_CTX_ALLOC, -1, tid);
+ req.update_period_ms = cpu_to_le32(1000);
+ req.stats_dma_length = rdev->chip_ctx->hw_stats_size;
+ req.stats_dma_addr = cpu_to_le64(stat->dma_map);
+ req.stat_ctx_flags = HWRM_STAT_CTX_ALLOC_INPUT_STAT_CTX_FLAGS_ROCE;
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to allocate HW stats ctx, rc = 0x%x", rc);
+ return rc;
+ }
+ stat->fw_id = le32_to_cpu(resp.stat_ctx_id);
+ dev_dbg(rdev_to_dev(rdev), "HW stats ctx allocated with id = 0x%x",
+ stat->fw_id);
+
+ return rc;
+}
+
+static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev)
+{
+ const struct bnxt_en_ops *en_ops;
+
+ if (rdev->is_virtfn ||
+ test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ return;
+
+ memset(rdev->event_bitmap, 0, sizeof(rdev->event_bitmap));
+ en_ops = rdev->en_dev->en_ops;
+ if (en_ops->bnxt_register_fw_async_events
+ (rdev->en_dev, BNXT_ROCE_ULP,
+ (unsigned long *)rdev->event_bitmap,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE))
+ dev_err(rdev_to_dev(rdev),
+ "Failed to unregister async event");
+}
+
+static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev)
+{
+ const struct bnxt_en_ops *en_ops;
+
+ if (rdev->is_virtfn)
+ return;
+
+ rdev->event_bitmap[0] |=
+ BIT(HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE) |
+ BIT(HWRM_ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY);
+
+ rdev->event_bitmap[2] |=
+ BIT(HWRM_ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT - 64);
+ rdev->event_bitmap[2] |=
+ BIT(HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_THRESHOLD - 64) |
+ BIT(HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE - 64);
+ en_ops = rdev->en_dev->en_ops;
+ if (en_ops->bnxt_register_fw_async_events
+ (rdev->en_dev, BNXT_ROCE_ULP,
+ (unsigned long *)rdev->event_bitmap,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DOORBELL_PACING_NQ_UPDATE))
+ dev_err(rdev_to_dev(rdev),
+ "Failed to reg Async event");
+}
+
+static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_ver_get_output resp = {0};
+ struct hwrm_ver_get_input req = {0};
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_fw_msg fw_msg;
+ int rc = 0;
+
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
+ HWRM_VER_GET, -1, -1);
+ req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
+ req.hwrm_intf_min = HWRM_VERSION_MINOR;
+ req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to query HW version, rc = 0x%x", rc);
+ return rc;
+ }
+ cctx = rdev->chip_ctx;
+ cctx->hwrm_intf_ver = (u64) le16_to_cpu(resp.hwrm_intf_major) << 48 |
+ (u64) le16_to_cpu(resp.hwrm_intf_minor) << 32 |
+ (u64) le16_to_cpu(resp.hwrm_intf_build) << 16 |
+ le16_to_cpu(resp.hwrm_intf_patch);
+
+ cctx->hwrm_cmd_max_timeout = le16_to_cpu(resp.max_req_timeout);
+
+ if (!cctx->hwrm_cmd_max_timeout)
+ cctx->hwrm_cmd_max_timeout = RCFW_FW_STALL_MAX_TIMEOUT;
+
+ cctx->chip_num = le16_to_cpu(resp.chip_num);
+ cctx->chip_rev = resp.chip_rev;
+ cctx->chip_metal = resp.chip_metal;
+ return 0;
+}
+
+/* Query device config using common hwrm */
+static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
+ u32 *offset)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_func_qcfg_output resp = {0};
+ struct hwrm_func_qcfg_input req = {0};
+ struct bnxt_fw_msg fw_msg;
+ int rc;
+
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
+ HWRM_FUNC_QCFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to query config, rc = %#x", rc);
+ return rc;
+ }
+
+ *db_len = PAGE_ALIGN(le16_to_cpu(resp.l2_doorbell_bar_size_kb) * 1024);
+ *offset = PAGE_ALIGN(le16_to_cpu(resp.legacy_l2_db_size_kb) * 1024);
+ return 0;
+}
+
+/* Query function capabilities using common hwrm */
+int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_func_qcaps_output resp = {0};
+ struct hwrm_func_qcaps_input req = {0};
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_fw_msg fw_msg;
+ u8 push_enable = false;
+ int rc;
+
+ cctx = rdev->chip_ctx;
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
+ HWRM_FUNC_QCAPS, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to query capabilities, rc = %#x", rc);
+ return rc;
+ }
+ if (_is_chip_p7(rdev->chip_ctx))
+ push_enable =
+ (resp.flags_ext &
+ HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_PPP_PUSH_MODE_SUPPORTED) ?
+ true : false;
+ else
+ push_enable =
+ (resp.flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_WCB_PUSH_MODE) ?
+ true : false;
+ cctx->modes.db_push = push_enable;
+
+ cctx->modes.dbr_pacing =
+ resp.flags_ext & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT_DBR_PACING_SUPPORTED ?
+ true : false;
+ cctx->modes.dbr_pacing_ext =
+ resp.flags_ext2 &
+ HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED ?
+ true : false;
+ cctx->modes.dbr_drop_recov =
+ (resp.flags_ext2 &
+ HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_SW_DBR_DROP_RECOVERY_SUPPORTED) ?
+ true : false;
+ cctx->modes.dbr_pacing_v0 =
+ (resp.flags_ext2 &
+ HWRM_FUNC_QCAPS_OUTPUT_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED) ?
+ true : false;
+ dev_dbg(rdev_to_dev(rdev),
+ "%s: cctx->modes.dbr_pacing = %d cctx->modes.dbr_pacing_ext = %d, dbr_drop_recov %d\n",
+ __func__, cctx->modes.dbr_pacing, cctx->modes.dbr_pacing_ext, cctx->modes.dbr_drop_recov);
+
+ return 0;
+}
+
+static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_db_pacing_data *pacing_data = rdev->qplib_res.pacing_data;
+ struct hwrm_func_dbr_pacing_qcfg_output resp = {0};
+ struct hwrm_func_dbr_pacing_qcfg_input req = {0};
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_fw_msg fw_msg;
+ u32 primary_nq_id;
+ int rc;
+
+ cctx = rdev->chip_ctx;
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
+ HWRM_FUNC_DBR_PACING_QCFG, -1, -1);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc) {
+ dev_dbg(rdev_to_dev(rdev),
+ "Failed to query dbr pacing config, rc = %#x", rc);
+ return rc;
+ }
+
+ primary_nq_id = le32_to_cpu(resp.primary_nq_id);
+ if (primary_nq_id == 0xffffffff &&
+ !bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) {
+ dev_err(rdev_to_dev(rdev), "%s:%d Invoke bnxt_qplib_dbr_pacing_set_primary_pf with 1\n",
+ __func__, __LINE__);
+ bnxt_qplib_dbr_pacing_set_primary_pf(rdev->chip_ctx, 1);
+ }
+
+ if (bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) {
+ struct bnxt_qplib_nq *nq;
+
+ nq = &rdev->nqr.nq[0];
+ /* Reset the primary capability */
+ if (nq->ring_id != primary_nq_id)
+ bnxt_qplib_dbr_pacing_set_primary_pf(rdev->chip_ctx, 0);
+ }
+
+ if ((resp.dbr_stat_db_fifo_reg &
+ HWRM_FUNC_DBR_PACING_QCFG_OUTPUT_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK) ==
+ HWRM_FUNC_DBR_PACING_QCFG_OUTPUT_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC)
+ cctx->dbr_stat_db_fifo =
+ resp.dbr_stat_db_fifo_reg &
+ ~HWRM_FUNC_DBR_PACING_QCFG_OUTPUT_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK;
+
+ if ((resp.dbr_throttling_aeq_arm_reg &
+ HWRM_FUNC_DBR_PACING_QCFG_OUTPUT_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK)
+ == HWRM_FUNC_DBR_PACING_QCFG_OUTPUT_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC) {
+ cctx->dbr_aeq_arm_reg = resp.dbr_throttling_aeq_arm_reg &
+ ~HWRM_FUNC_DBR_PACING_QCFG_OUTPUT_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK;
+ cctx->dbr_throttling_reg = cctx->dbr_aeq_arm_reg - 4;
+ }
+ pacing_data->fifo_max_depth = le32_to_cpu(resp.dbr_stat_db_max_fifo_depth);
+ if (!pacing_data->fifo_max_depth)
+ pacing_data->fifo_max_depth = BNXT_RE_MAX_FIFO_DEPTH(cctx);
+ pacing_data->fifo_room_mask = le32_to_cpu(resp.dbr_stat_db_fifo_reg_fifo_room_mask);
+ pacing_data->fifo_room_shift = resp.dbr_stat_db_fifo_reg_fifo_room_shift;
+ dev_dbg(rdev_to_dev(rdev),
+ "%s: nq:0x%x primary_pf:%d db_fifo:0x%x aeq_arm:0x%x i"
+ "fifo_max_depth 0x%x , resp.dbr_stat_db_max_fifo_depth 0x%x);\n",
+ __func__, resp.primary_nq_id, cctx->modes.dbr_primary_pf,
+ cctx->dbr_stat_db_fifo, cctx->dbr_aeq_arm_reg,
+ pacing_data->fifo_max_depth,
+ le32_to_cpu(resp.dbr_stat_db_max_fifo_depth));
+ return 0;
+}
+
+static int bnxt_re_hwrm_dbr_pacing_cfg(struct bnxt_re_dev *rdev, bool enable)
+{
+ struct hwrm_func_dbr_pacing_cfg_output resp = {0};
+ struct hwrm_func_dbr_pacing_cfg_input req = {0};
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_fw_msg fw_msg;
+ int rc;
+
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ return 0;
+
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
+ HWRM_FUNC_DBR_PACING_CFG, -1, -1);
+ if (enable) {
+ req.flags = HWRM_FUNC_DBR_PACING_CFG_INPUT_FLAGS_DBR_NQ_EVENT_ENABLE;
+ req.enables =
+ cpu_to_le32(HWRM_FUNC_DBR_PACING_CFG_INPUT_ENABLES_PRIMARY_NQ_ID_VALID |
+ HWRM_FUNC_DBR_PACING_CFG_INPUT_ENABLES_PACING_THRESHOLD_VALID);
+ } else {
+ req.flags = HWRM_FUNC_DBR_PACING_CFG_INPUT_FLAGS_DBR_NQ_EVENT_DISABLE;
+ }
+ req.primary_nq_id = cpu_to_le32(rdev->dbq_nq_id);
+ req.pacing_threshold = cpu_to_le32(rdev->dbq_watermark);
+ dev_dbg(rdev_to_dev(rdev), "%s: nq_id = 0x%x pacing_threshold = 0x%x",
+ __func__, req.primary_nq_id, req.pacing_threshold);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc) {
+ dev_dbg(rdev_to_dev(rdev),
+ "Failed to set dbr pacing config, rc = %#x", rc);
+ return rc;
+ }
+ return 0;
+}
+
+/* Net -> RoCE driver */
+
+/* Device */
+struct bnxt_re_dev *bnxt_re_from_netdev(struct ifnet *netdev)
+{
+ struct bnxt_re_dev *rdev;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(rdev, &bnxt_re_dev_list, list) {
+ if (rdev->netdev == netdev) {
+ rcu_read_unlock();
+ dev_dbg(rdev_to_dev(rdev),
+ "netdev (%p) found, ref_count = 0x%x",
+ netdev, atomic_read(&rdev->ref_count));
+ return rdev;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
+static ssize_t show_rev(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
+
+ return scnprintf(buf, PAGE_SIZE, "0x%x\n", rdev->en_dev->pdev->vendor);
+}
+
+
+static ssize_t show_hca(struct device *device, struct device_attribute *attr,
+ char *buf)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(device, ibdev.dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", rdev->ibdev.node_desc);
+}
+
+static DEVICE_ATTR(hw_rev, 0444, show_rev, NULL);
+static DEVICE_ATTR(hca_type, 0444, show_hca, NULL);
+static struct device_attribute *bnxt_re_attributes[] = {
+ &dev_attr_hw_rev,
+ &dev_attr_hca_type
+};
+
+int ib_register_device_compat(struct bnxt_re_dev *rdev)
+{
+ struct ib_device *ibdev = &rdev->ibdev;
+ char name[IB_DEVICE_NAME_MAX];
+
+ memset(name, 0, IB_DEVICE_NAME_MAX);
+ strlcpy(name, "bnxt_re%d", IB_DEVICE_NAME_MAX);
+
+ strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
+
+ return ib_register_device(ibdev, NULL);
+}
+
+static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
+{
+ struct ib_device *ibdev = &rdev->ibdev;
+ int ret = 0;
+
+ /* ib device init */
+ ibdev->owner = THIS_MODULE;
+ ibdev->uverbs_abi_ver = BNXT_RE_ABI_VERSION;
+ ibdev->node_type = RDMA_NODE_IB_CA;
+ strlcpy(ibdev->node_desc, BNXT_RE_DESC " HCA",
+ strlen(BNXT_RE_DESC) + 5);
+ ibdev->phys_port_cnt = 1;
+
+ bnxt_qplib_get_guid(rdev->dev_addr, (u8 *)&ibdev->node_guid);
+
+ /* Data path irqs is one less than the max msix vectors */
+ ibdev->num_comp_vectors = rdev->nqr.num_msix - 1;
+ bnxt_re_set_dma_device(ibdev, rdev);
+ ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
+
+ /* User space */
+ ibdev->uverbs_cmd_mask =
+ (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
+ (1ull << IB_USER_VERBS_CMD_REG_MR) |
+ (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
+ (1ull << IB_USER_VERBS_CMD_REREG_MR) |
+ (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
+ (1ull << IB_USER_VERBS_CMD_ALLOC_MW) |
+ (1ull << IB_USER_VERBS_CMD_DEALLOC_MW) |
+ (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
+ (1ull << IB_USER_VERBS_CMD_MODIFY_AH) |
+ (1ull << IB_USER_VERBS_CMD_QUERY_AH) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
+
+ ibdev->uverbs_ex_cmd_mask = (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP);
+ ibdev->uverbs_cmd_mask |= (1ull << IB_USER_VERBS_CMD_POLL_CQ);
+
+#define bnxt_re_ib_ah bnxt_re_ah
+#define bnxt_re_ib_cq bnxt_re_cq
+#define bnxt_re_ib_pd bnxt_re_pd
+#define bnxt_re_ib_srq bnxt_re_srq
+#define bnxt_re_ib_ucontext bnxt_re_ucontext
+ INIT_IB_DEVICE_OPS(&ibdev->ops, bnxt_re, BNXT_RE);
+
+ ibdev->query_device = bnxt_re_query_device;
+ ibdev->modify_device = bnxt_re_modify_device;
+ ibdev->query_port = bnxt_re_query_port;
+ ibdev->modify_port = bnxt_re_modify_port;
+ ibdev->get_port_immutable = bnxt_re_get_port_immutable;
+ ibdev->query_pkey = bnxt_re_query_pkey;
+ ibdev->query_gid = bnxt_re_query_gid;
+ ibdev->get_netdev = bnxt_re_get_netdev;
+ ibdev->add_gid = bnxt_re_add_gid;
+ ibdev->del_gid = bnxt_re_del_gid;
+ ibdev->get_link_layer = bnxt_re_get_link_layer;
+ ibdev->alloc_pd = bnxt_re_alloc_pd;
+ ibdev->dealloc_pd = bnxt_re_dealloc_pd;
+ ibdev->create_ah = bnxt_re_create_ah;
+ ibdev->modify_ah = bnxt_re_modify_ah;
+ ibdev->query_ah = bnxt_re_query_ah;
+ ibdev->destroy_ah = bnxt_re_destroy_ah;
+ ibdev->create_srq = bnxt_re_create_srq;
+ ibdev->modify_srq = bnxt_re_modify_srq;
+ ibdev->query_srq = bnxt_re_query_srq;
+ ibdev->destroy_srq = bnxt_re_destroy_srq;
+ ibdev->post_srq_recv = bnxt_re_post_srq_recv;
+ ibdev->create_qp = bnxt_re_create_qp;
+ ibdev->modify_qp = bnxt_re_modify_qp;
+ ibdev->query_qp = bnxt_re_query_qp;
+ ibdev->destroy_qp = bnxt_re_destroy_qp;
+ ibdev->post_send = bnxt_re_post_send;
+ ibdev->post_recv = bnxt_re_post_recv;
+ ibdev->create_cq = bnxt_re_create_cq;
+ ibdev->modify_cq = bnxt_re_modify_cq;
+ ibdev->destroy_cq = bnxt_re_destroy_cq;
+ ibdev->resize_cq = bnxt_re_resize_cq;
+ ibdev->poll_cq = bnxt_re_poll_cq;
+ ibdev->req_notify_cq = bnxt_re_req_notify_cq;
+ ibdev->get_dma_mr = bnxt_re_get_dma_mr;
+ ibdev->get_hw_stats = bnxt_re_get_hw_stats;
+ ibdev->alloc_hw_stats = bnxt_re_alloc_hw_port_stats;
+ ibdev->dereg_mr = bnxt_re_dereg_mr;
+ ibdev->alloc_mr = bnxt_re_alloc_mr;
+ ibdev->map_mr_sg = bnxt_re_map_mr_sg;
+ ibdev->alloc_mw = bnxt_re_alloc_mw;
+ ibdev->dealloc_mw = bnxt_re_dealloc_mw;
+ ibdev->reg_user_mr = bnxt_re_reg_user_mr;
+ ibdev->rereg_user_mr = bnxt_re_rereg_user_mr;
+ ibdev->disassociate_ucontext = bnxt_re_disassociate_ucntx;
+ ibdev->alloc_ucontext = bnxt_re_alloc_ucontext;
+ ibdev->dealloc_ucontext = bnxt_re_dealloc_ucontext;
+ ibdev->mmap = bnxt_re_mmap;
+ ibdev->process_mad = bnxt_re_process_mad;
+
+ ret = ib_register_device_compat(rdev);
+ return ret;
+}
+
+static void bnxt_re_dev_dealloc(struct bnxt_re_dev *rdev)
+{
+ int i = BNXT_RE_REF_WAIT_COUNT;
+
+ dev_dbg(rdev_to_dev(rdev), "%s:Remove the device %p\n", __func__, rdev);
+ /* Wait for rdev refcount to come down */
+ while ((atomic_read(&rdev->ref_count) > 1) && i--)
+ msleep(100);
+
+ if (atomic_read(&rdev->ref_count) > 1)
+ dev_err(rdev_to_dev(rdev),
+ "Failed waiting for ref count to deplete %d",
+ atomic_read(&rdev->ref_count));
+
+ atomic_set(&rdev->ref_count, 0);
+ if_rele(rdev->netdev);
+ rdev->netdev = NULL;
+ synchronize_rcu();
+
+ kfree(rdev->gid_map);
+ kfree(rdev->dbg_stats);
+ ib_dealloc_device(&rdev->ibdev);
+}
+
+static struct bnxt_re_dev *bnxt_re_dev_alloc(struct ifnet *netdev,
+ struct bnxt_en_dev *en_dev)
+{
+ struct bnxt_re_dev *rdev;
+ u32 count;
+
+ /* Allocate bnxt_re_dev instance here */
+ rdev = (struct bnxt_re_dev *)compat_ib_alloc_device(sizeof(*rdev));
+ if (!rdev) {
+ pr_err("%s: bnxt_re_dev allocation failure!",
+ ROCE_DRV_MODULE_NAME);
+ return NULL;
+ }
+ /* Default values */
+ atomic_set(&rdev->ref_count, 0);
+ rdev->netdev = netdev;
+ dev_hold(rdev->netdev);
+ rdev->en_dev = en_dev;
+ rdev->id = rdev->en_dev->pdev->devfn;
+ INIT_LIST_HEAD(&rdev->qp_list);
+ mutex_init(&rdev->qp_lock);
+ mutex_init(&rdev->cc_lock);
+ mutex_init(&rdev->dbq_lock);
+ bnxt_re_clear_rsors_stat(&rdev->stats.rsors);
+ rdev->cosq[0] = rdev->cosq[1] = 0xFFFF;
+ rdev->min_tx_depth = 1;
+ rdev->stats.stats_query_sec = 1;
+ /* Disable priority vlan as the default mode is DSCP based PFC */
+ rdev->cc_param.disable_prio_vlan_tx = 1;
+
+ /* Initialize worker for DBR Pacing */
+ INIT_WORK(&rdev->dbq_fifo_check_work, bnxt_re_db_fifo_check);
+ INIT_DELAYED_WORK(&rdev->dbq_pacing_work, bnxt_re_pacing_timer_exp);
+ rdev->gid_map = kzalloc(sizeof(*(rdev->gid_map)) *
+ BNXT_RE_MAX_SGID_ENTRIES,
+ GFP_KERNEL);
+ if (!rdev->gid_map) {
+ ib_dealloc_device(&rdev->ibdev);
+ return NULL;
+ }
+ for(count = 0; count < BNXT_RE_MAX_SGID_ENTRIES; count++)
+ rdev->gid_map[count] = -1;
+
+ rdev->dbg_stats = kzalloc(sizeof(*rdev->dbg_stats), GFP_KERNEL);
+ if (!rdev->dbg_stats) {
+ ib_dealloc_device(&rdev->ibdev);
+ return NULL;
+ }
+
+ return rdev;
+}
+
+static int bnxt_re_handle_unaffi_async_event(
+ struct creq_func_event *unaffi_async)
+{
+ switch (unaffi_async->event) {
+ case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
+ case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int bnxt_re_handle_qp_async_event(void *qp_event, struct bnxt_re_qp *qp)
+{
+ struct creq_qp_error_notification *err_event;
+ struct ib_event event;
+ unsigned int flags;
+
+ if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
+ !qp->qplib_qp.is_user) {
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_add_flush_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+ }
+ memset(&event, 0, sizeof(event));
+ event.device = &qp->rdev->ibdev;
+ event.element.qp = &qp->ib_qp;
+ event.event = IB_EVENT_QP_FATAL;
+
+ err_event = qp_event;
+ switch(err_event->res_err_state_reason) {
+ case CFCQ_RES_ERR_STATE_REASON_RES_EXCEED_MAX:
+ case CFCQ_RES_ERR_STATE_REASON_RES_PAYLOAD_LENGTH_MISMATCH:
+ case CFCQ_RES_ERR_STATE_REASON_RES_OPCODE_ERROR:
+ case CFCQ_RES_ERR_STATE_REASON_RES_PSN_SEQ_ERROR_RETRY_LIMIT:
+ case CFCQ_RES_ERR_STATE_REASON_RES_RX_INVALID_R_KEY:
+ case CFCQ_RES_ERR_STATE_REASON_RES_RX_DOMAIN_ERROR:
+ case CFCQ_RES_ERR_STATE_REASON_RES_RX_NO_PERMISSION:
+ case CFCQ_RES_ERR_STATE_REASON_RES_RX_RANGE_ERROR:
+ case CFCQ_RES_ERR_STATE_REASON_RES_TX_INVALID_R_KEY:
+ case CFCQ_RES_ERR_STATE_REASON_RES_TX_DOMAIN_ERROR:
+ case CFCQ_RES_ERR_STATE_REASON_RES_TX_NO_PERMISSION:
+ case CFCQ_RES_ERR_STATE_REASON_RES_TX_RANGE_ERROR:
+ case CFCQ_RES_ERR_STATE_REASON_RES_IVALID_DUP_RKEY:
+ case CFCQ_RES_ERR_STATE_REASON_RES_UNALIGN_ATOMIC:
+ event.event = IB_EVENT_QP_ACCESS_ERR;
+ break;
+ case CFCQ_RES_ERR_STATE_REASON_RES_EXCEEDS_WQE:
+ case CFCQ_RES_ERR_STATE_REASON_RES_WQE_FORMAT_ERROR:
+ case CFCQ_RES_ERR_STATE_REASON_RES_SRQ_LOAD_ERROR:
+ case CFCQ_RES_ERR_STATE_REASON_RES_UNSUPPORTED_OPCODE:
+ case CFCQ_RES_ERR_STATE_REASON_RES_REM_INVALIDATE:
+ event.event = IB_EVENT_QP_REQ_ERR;
+ break;
+ case CFCQ_RES_ERR_STATE_REASON_RES_IRRQ_OFLOW:
+ case CFCQ_RES_ERR_STATE_REASON_RES_CMP_ERROR:
+ case CFCQ_RES_ERR_STATE_REASON_RES_CQ_LOAD_ERROR:
+ case CFCQ_RES_ERR_STATE_REASON_RES_TX_PCI_ERROR:
+ case CFCQ_RES_ERR_STATE_REASON_RES_RX_PCI_ERROR:
+ case CFCQ_RES_ERR_STATE_REASON_RES_MEMORY_ERROR:
+ case CFCQ_RES_ERR_STATE_REASON_RES_SRQ_ERROR:
+ event.event = IB_EVENT_QP_FATAL;
+ break;
+ default:
+ if (qp->qplib_qp.srq)
+ event.event = IB_EVENT_QP_LAST_WQE_REACHED;
+ break;
+ }
+
+ if (err_event->res_err_state_reason)
+ dev_err(rdev_to_dev(qp->rdev),
+ "%s %s qp_id: %d cons (%d %d) req (%d %d) res (%d %d)\n",
+ __func__, qp->qplib_qp.is_user ? "user" : "kernel",
+ qp->qplib_qp.id,
+ err_event->sq_cons_idx,
+ err_event->rq_cons_idx,
+ err_event->req_slow_path_state,
+ err_event->req_err_state_reason,
+ err_event->res_slow_path_state,
+ err_event->res_err_state_reason);
+
+ if (event.device && qp->ib_qp.event_handler)
+ qp->ib_qp.event_handler(&event, qp->ib_qp.qp_context);
+
+ return 0;
+}
+
+static int bnxt_re_handle_cq_async_error(void *event, struct bnxt_re_cq *cq)
+{
+ struct creq_cq_error_notification *cqerr;
+ bool send = false;
+
+ cqerr = event;
+ switch (cqerr->cq_err_reason) {
+ case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_INVALID_ERROR:
+ case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_OVERFLOW_ERROR:
+ case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_REQ_CQ_LOAD_ERROR:
+ case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_INVALID_ERROR:
+ case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_OVERFLOW_ERROR:
+ case CREQ_CQ_ERROR_NOTIFICATION_CQ_ERR_REASON_RES_CQ_LOAD_ERROR:
+ send = true;
+ default:
+ break;
+ }
+
+ if (send && cq->ibcq.event_handler) {
+ struct ib_event ibevent = {};
+
+ ibevent.event = IB_EVENT_CQ_ERR;
+ ibevent.element.cq = &cq->ibcq;
+ ibevent.device = &cq->rdev->ibdev;
+
+ dev_err(rdev_to_dev(cq->rdev),
+ "%s err reason %d\n", __func__, cqerr->cq_err_reason);
+ cq->ibcq.event_handler(&ibevent, cq->ibcq.cq_context);
+ }
+
+ cq->qplib_cq.is_cq_err_event = true;
+
+ return 0;
+}
+
+static int bnxt_re_handle_affi_async_event(struct creq_qp_event *affi_async,
+ void *obj)
+{
+ struct bnxt_qplib_qp *qplqp;
+ struct bnxt_qplib_cq *qplcq;
+ struct bnxt_re_qp *qp;
+ struct bnxt_re_cq *cq;
+ int rc = 0;
+ u8 event;
+
+ if (!obj)
+ return rc; /* QP was already dead, still return success */
+
+ event = affi_async->event;
+ switch (event) {
+ case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
+ qplqp = obj;
+ qp = container_of(qplqp, struct bnxt_re_qp, qplib_qp);
+ rc = bnxt_re_handle_qp_async_event(affi_async, qp);
+ break;
+ case CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION:
+ qplcq = obj;
+ cq = container_of(qplcq, struct bnxt_re_cq, qplib_cq);
+ rc = bnxt_re_handle_cq_async_error(affi_async, cq);
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int bnxt_re_aeq_handler(struct bnxt_qplib_rcfw *rcfw,
+ void *aeqe, void *obj)
+{
+ struct creq_func_event *unaffi_async;
+ struct creq_qp_event *affi_async;
+ u8 type;
+ int rc;
+
+ type = ((struct creq_base *)aeqe)->type;
+ if (type == CREQ_BASE_TYPE_FUNC_EVENT) {
+ unaffi_async = aeqe;
+ rc = bnxt_re_handle_unaffi_async_event(unaffi_async);
+ } else {
+ affi_async = aeqe;
+ rc = bnxt_re_handle_affi_async_event(affi_async, obj);
+ }
+
+ return rc;
+}
+
+static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_srq *handle, u8 event)
+{
+ struct bnxt_re_srq *srq = to_bnxt_re(handle, struct bnxt_re_srq,
+ qplib_srq);
+ struct ib_event ib_event;
+
+ if (srq == NULL) {
+ pr_err("%s: SRQ is NULL, SRQN not handled",
+ ROCE_DRV_MODULE_NAME);
+ return -EINVAL;
+ }
+ ib_event.device = &srq->rdev->ibdev;
+ ib_event.element.srq = &srq->ibsrq;
+ if (event == NQ_SRQ_EVENT_EVENT_SRQ_THRESHOLD_EVENT)
+ ib_event.event = IB_EVENT_SRQ_LIMIT_REACHED;
+ else
+ ib_event.event = IB_EVENT_SRQ_ERR;
+
+ if (srq->ibsrq.event_handler) {
+ /* Lock event_handler? */
+ (*srq->ibsrq.event_handler)(&ib_event,
+ srq->ibsrq.srq_context);
+ }
+ return 0;
+}
+
+static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_cq *handle)
+{
+ struct bnxt_re_cq *cq = to_bnxt_re(handle, struct bnxt_re_cq,
+ qplib_cq);
+ u32 *cq_ptr;
+
+ if (cq == NULL) {
+ pr_err("%s: CQ is NULL, CQN not handled",
+ ROCE_DRV_MODULE_NAME);
+ return -EINVAL;
+ }
+ /* CQ already in destroy path. Do not handle any more events */
+ if (handle->destroyed || !atomic_read(&cq->ibcq.usecnt)) {
+ if (!handle->destroyed)
+ dev_dbg(NULL, "%s: CQ being destroyed, CQN not handled",
+ ROCE_DRV_MODULE_NAME);
+ return 0;
+ }
+
+ if (cq->ibcq.comp_handler) {
+ if (cq->uctx_cq_page) {
+ cq_ptr = (u32 *)cq->uctx_cq_page;
+ *cq_ptr = cq->qplib_cq.toggle;
+ }
+ /* Lock comp_handler? */
+ (*cq->ibcq.comp_handler)(&cq->ibcq, cq->ibcq.cq_context);
+ }
+
+ return 0;
+}
+
+struct bnxt_qplib_nq *bnxt_re_get_nq(struct bnxt_re_dev *rdev)
+{
+ int min, indx;
+
+ mutex_lock(&rdev->nqr.load_lock);
+ for (indx = 0, min = 0; indx < (rdev->nqr.num_msix - 1); indx++) {
+ if (rdev->nqr.nq[min].load > rdev->nqr.nq[indx].load)
+ min = indx;
+ }
+ rdev->nqr.nq[min].load++;
+ mutex_unlock(&rdev->nqr.load_lock);
+
+ return &rdev->nqr.nq[min];
+}
+
+void bnxt_re_put_nq(struct bnxt_re_dev *rdev, struct bnxt_qplib_nq *nq)
+{
+ mutex_lock(&rdev->nqr.load_lock);
+ nq->load--;
+ mutex_unlock(&rdev->nqr.load_lock);
+}
+
+static bool bnxt_re_check_min_attr(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_dev_attr *attr;
+ bool rc = true;
+
+ attr = rdev->dev_attr;
+
+ if (!attr->max_cq || !attr->max_qp ||
+ !attr->max_sgid || !attr->max_mr) {
+ dev_err(rdev_to_dev(rdev),"Insufficient RoCE resources");
+ dev_dbg(rdev_to_dev(rdev),
+ "max_cq = %d, max_qp = %d, max_dpi = %d, max_sgid = %d, max_mr = %d",
+ attr->max_cq, attr->max_qp, attr->max_dpi,
+ attr->max_sgid, attr->max_mr);
+ rc = false;
+ }
+ return rc;
+}
+
+static void bnxt_re_dispatch_event(struct ib_device *ibdev, struct ib_qp *qp,
+ u8 port_num, enum ib_event_type event)
+{
+ struct ib_event ib_event;
+
+ ib_event.device = ibdev;
+ if (qp) {
+ ib_event.element.qp = qp;
+ ib_event.event = event;
+ if (qp->event_handler)
+ qp->event_handler(&ib_event, qp->qp_context);
+ } else {
+ ib_event.element.port_num = port_num;
+ ib_event.event = event;
+ ib_dispatch_event(&ib_event);
+ }
+
+ dev_dbg(rdev_to_dev(to_bnxt_re_dev(ibdev, ibdev)),
+ "ibdev %p Event 0x%x port_num 0x%x", ibdev, event, port_num);
+}
+
+static bool bnxt_re_is_qp1_or_shadow_qp(struct bnxt_re_dev *rdev,
+ struct bnxt_re_qp *qp)
+{
+ if (rdev->gsi_ctx.gsi_qp_mode == BNXT_RE_GSI_MODE_ALL)
+ return (qp->ib_qp.qp_type == IB_QPT_GSI) ||
+ (qp == rdev->gsi_ctx.gsi_sqp);
+ else
+ return (qp->ib_qp.qp_type == IB_QPT_GSI);
+}
+
+static void bnxt_re_stop_all_nonqp1_nonshadow_qps(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_qp *qpl_qp;
+ bool dev_detached = false;
+ struct ib_qp_attr qp_attr;
+ int num_qps_stopped = 0;
+ int mask = IB_QP_STATE;
+ struct bnxt_re_qp *qp;
+ unsigned long flags;
+
+ if (!rdev)
+ return;
+
+restart:
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ dev_detached = true;
+
+ qp_attr.qp_state = IB_QPS_ERR;
+ mutex_lock(&rdev->qp_lock);
+ list_for_each_entry(qp, &rdev->qp_list, list) {
+ qpl_qp = &qp->qplib_qp;
+ if (dev_detached || !bnxt_re_is_qp1_or_shadow_qp(rdev, qp)) {
+ if (qpl_qp->state !=
+ CMDQ_MODIFY_QP_NEW_STATE_RESET &&
+ qpl_qp->state !=
+ CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ if (dev_detached) {
+ /*
+ * Cant actually send the command down,
+ * marking the state for bookkeeping
+ */
+ qpl_qp->state =
+ CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ qpl_qp->cur_qp_state = qpl_qp->state;
+ if (!qpl_qp->is_user) {
+ /* Add to flush list */
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_add_flush_qp(qpl_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+ }
+ } else {
+ num_qps_stopped++;
+ bnxt_re_modify_qp(&qp->ib_qp,
+ &qp_attr, mask,
+ NULL);
+ }
+
+ bnxt_re_dispatch_event(&rdev->ibdev, &qp->ib_qp,
+ 1, IB_EVENT_QP_FATAL);
+ /*
+ * 1. Release qp_lock after a budget to unblock other verb
+ * requests (like qp_destroy) from stack.
+ * 2. Traverse through the qp_list freshly as addition / deletion
+ * might have happened since qp_lock is getting released here.
+ */
+ if (num_qps_stopped % BNXT_RE_STOP_QPS_BUDGET == 0) {
+ mutex_unlock(&rdev->qp_lock);
+ goto restart;
+ }
+ }
+ }
+ }
+
+ mutex_unlock(&rdev->qp_lock);
+}
+
+static int bnxt_re_update_gid(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
+ struct bnxt_qplib_gid gid;
+ u16 gid_idx, index;
+ int rc = 0;
+
+ if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
+ return 0;
+
+ if (sgid_tbl == NULL) {
+ dev_err(rdev_to_dev(rdev), "QPLIB: SGID table not allocated");
+ return -EINVAL;
+ }
+
+ for (index = 0; index < sgid_tbl->active; index++) {
+ gid_idx = sgid_tbl->hw_id[index];
+
+ if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero,
+ sizeof(bnxt_qplib_gid_zero)))
+ continue;
+ /* Need to modify the VLAN enable setting of non VLAN GID only
+ * as setting is done for VLAN GID while adding GID
+ *
+ * If disable_prio_vlan_tx is enable, then we'll need to remove the
+ * vlan entry from the sgid_tbl.
+ */
+ if (sgid_tbl->vlan[index] == true)
+ continue;
+
+ memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid));
+
+ rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
+ rdev->dev_addr);
+ }
+
+ return rc;
+}
+
+static void bnxt_re_clear_cc(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_cc_param *cc_param = &rdev->cc_param;
+
+ if (_is_chip_p7(rdev->chip_ctx)) {
+ cc_param->mask = CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP;
+ } else {
+ cc_param->mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE |
+ CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
+ CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN);
+
+ if (!is_qport_service_type_supported(rdev))
+ cc_param->mask |=
+ (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP |
+ CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP |
+ CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP);
+ }
+
+ cc_param->cur_mask = cc_param->mask;
+
+ if (bnxt_qplib_modify_cc(&rdev->qplib_res, cc_param))
+ dev_err(rdev_to_dev(rdev), "Failed to modify cc\n");
+}
+
+static int bnxt_re_setup_cc(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_cc_param *cc_param = &rdev->cc_param;
+ int rc;
+
+ if (_is_chip_p7(rdev->chip_ctx)) {
+ cc_param->enable = 0x0;
+ cc_param->mask = CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP;
+ } else {
+ cc_param->enable = 0x1;
+ cc_param->mask = (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_CC_MODE |
+ CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ENABLE_CC |
+ CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_ECN);
+
+ if (!is_qport_service_type_supported(rdev))
+ cc_param->mask |=
+ (CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_VLAN_PCP |
+ CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_ALT_TOS_DSCP |
+ CMDQ_MODIFY_ROCE_CC_MODIFY_MASK_TOS_DSCP);
+ }
+
+ cc_param->cur_mask = cc_param->mask;
+
+ rc = bnxt_qplib_modify_cc(&rdev->qplib_res, cc_param);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to modify cc\n");
+ return rc;
+ }
+ /* Reset the programming mask */
+ cc_param->mask = 0;
+ if (cc_param->qp1_tos_dscp != cc_param->tos_dscp) {
+ cc_param->qp1_tos_dscp = cc_param->tos_dscp;
+ rc = bnxt_re_update_qp1_tos_dscp(rdev);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "%s:Failed to modify QP1:%d",
+ __func__, rc);
+ goto clear;
+ }
+ }
+ return 0;
+
+clear:
+ bnxt_re_clear_cc(rdev);
+ return rc;
+}
+
+int bnxt_re_query_hwrm_dscp2pri(struct bnxt_re_dev *rdev,
+ struct bnxt_re_dscp2pri *d2p, u16 *count,
+ u16 target_id)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_queue_dscp2pri_qcfg_input req;
+ struct hwrm_queue_dscp2pri_qcfg_output resp;
+ struct bnxt_re_dscp2pri *dscp2pri;
+ struct bnxt_fw_msg fw_msg;
+ u16 in_count = *count;
+ dma_addr_t dma_handle;
+ int rc = 0, i;
+ u16 data_len;
+ u8 *kmem;
+
+ data_len = *count * sizeof(*dscp2pri);
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ memset(&req, 0, sizeof(req));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
+ HWRM_QUEUE_DSCP2PRI_QCFG, -1, target_id);
+ req.port_id = (target_id == 0xFFFF) ? en_dev->pf_port_id : 1;
+
+ kmem = dma_zalloc_coherent(&en_dev->pdev->dev, data_len, &dma_handle,
+ GFP_KERNEL);
+ if (!kmem) {
+ dev_err(rdev_to_dev(rdev),
+ "dma_zalloc_coherent failure, length = %u\n",
+ (unsigned)data_len);
+ return -ENOMEM;
+ }
+ req.dest_data_addr = cpu_to_le64(dma_handle);
+ req.dest_data_buffer_size = cpu_to_le16(data_len);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc)
+ goto out;
+
+ /* Upload the DSCP-MASK-PRI tuple(s) */
+ dscp2pri = (struct bnxt_re_dscp2pri *)kmem;
+ for (i = 0; i < le16_to_cpu(resp.entry_cnt) && i < in_count; i++) {
+ d2p[i].dscp = dscp2pri->dscp;
+ d2p[i].mask = dscp2pri->mask;
+ d2p[i].pri = dscp2pri->pri;
+ dscp2pri++;
+ }
+ *count = le16_to_cpu(resp.entry_cnt);
+out:
+ dma_free_coherent(&en_dev->pdev->dev, data_len, kmem, dma_handle);
+ return rc;
+}
+
+int bnxt_re_prio_vlan_tx_update(struct bnxt_re_dev *rdev)
+{
+ /* Remove the VLAN from the GID entry */
+ if (rdev->cc_param.disable_prio_vlan_tx)
+ rdev->qplib_res.prio = false;
+ else
+ rdev->qplib_res.prio = true;
+
+ return bnxt_re_update_gid(rdev);
+}
+
+int bnxt_re_set_hwrm_dscp2pri(struct bnxt_re_dev *rdev,
+ struct bnxt_re_dscp2pri *d2p, u16 count,
+ u16 target_id)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_queue_dscp2pri_cfg_input req;
+ struct hwrm_queue_dscp2pri_cfg_output resp;
+ struct bnxt_fw_msg fw_msg;
+ struct bnxt_re_dscp2pri *dscp2pri;
+ int i, rc, data_len = 3 * 256;
+ dma_addr_t dma_handle;
+ u8 *kmem;
+
+ memset(&req, 0, sizeof(req));
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
+ HWRM_QUEUE_DSCP2PRI_CFG, -1, target_id);
+ req.port_id = (target_id == 0xFFFF) ? en_dev->pf_port_id : 1;
+
+ kmem = dma_alloc_coherent(&en_dev->pdev->dev, data_len, &dma_handle,
+ GFP_KERNEL);
+ if (!kmem) {
+ dev_err(rdev_to_dev(rdev),
+ "dma_alloc_coherent failure, length = %u\n",
+ (unsigned)data_len);
+ return -ENOMEM;
+ }
+ req.src_data_addr = cpu_to_le64(dma_handle);
+
+ /* Download the DSCP-MASK-PRI tuple(s) */
+ dscp2pri = (struct bnxt_re_dscp2pri *)kmem;
+ for (i = 0; i < count; i++) {
+ dscp2pri->dscp = d2p[i].dscp;
+ dscp2pri->mask = d2p[i].mask;
+ dscp2pri->pri = d2p[i].pri;
+ dscp2pri++;
+ }
+
+ req.entry_cnt = cpu_to_le16(count);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ dma_free_coherent(&en_dev->pdev->dev, data_len, kmem, dma_handle);
+ return rc;
+}
+
+int bnxt_re_query_hwrm_qportcfg(struct bnxt_re_dev *rdev,
+ struct bnxt_re_tc_rec *tc_rec, u16 tid)
+{
+ u8 max_tc, tc, *qptr, *type_ptr0, *type_ptr1;
+ struct hwrm_queue_qportcfg_output resp = {0};
+ struct hwrm_queue_qportcfg_input req = {0};
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_fw_msg fw_msg;
+ bool def_init = false;
+ u8 *tmp_type;
+ u8 cos_id;
+ int rc;
+
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_QUEUE_QPORTCFG,
+ -1, tid);
+ req.port_id = (tid == 0xFFFF) ? en_dev->pf_port_id : 1;
+ if (BNXT_EN_ASYM_Q(en_dev))
+ req.flags = htole32(HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX);
+
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc)
+ return rc;
+
+ if (!resp.max_configurable_queues)
+ return -EINVAL;
+
+ max_tc = resp.max_configurable_queues;
+ tc_rec->max_tc = max_tc;
+
+ if (resp.queue_cfg_info & HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_CFG_INFO_USE_PROFILE_TYPE)
+ tc_rec->serv_type_enabled = true;
+
+ qptr = &resp.queue_id0;
+ type_ptr0 = &resp.queue_id0_service_profile_type;
+ type_ptr1 = &resp.queue_id1_service_profile_type;
+ for (tc = 0; tc < max_tc; tc++) {
+ tmp_type = tc ? type_ptr1 + (tc - 1) : type_ptr0;
+
+ cos_id = *qptr++;
+ /* RoCE CoS queue is the first cos queue.
+ * For MP12 and MP17 order is 405 and 141015.
+ */
+ if (is_bnxt_roce_queue(rdev, *qptr, *tmp_type)) {
+ tc_rec->cos_id_roce = cos_id;
+ tc_rec->tc_roce = tc;
+ } else if (is_bnxt_cnp_queue(rdev, *qptr, *tmp_type)) {
+ tc_rec->cos_id_cnp = cos_id;
+ tc_rec->tc_cnp = tc;
+ } else if (!def_init) {
+ def_init = true;
+ tc_rec->tc_def = tc;
+ tc_rec->cos_id_def = cos_id;
+ }
+ qptr++;
+ }
+
+ return rc;
+}
+
+int bnxt_re_hwrm_cos2bw_qcfg(struct bnxt_re_dev *rdev, u16 target_id,
+ struct bnxt_re_cos2bw_cfg *cfg)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_queue_cos2bw_qcfg_output resp;
+ struct hwrm_queue_cos2bw_qcfg_input req = {0};
+ struct bnxt_fw_msg fw_msg;
+ int rc, indx;
+ void *data;
+
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
+ HWRM_QUEUE_COS2BW_QCFG, -1, target_id);
+ req.port_id = (target_id == 0xFFFF) ? en_dev->pf_port_id : 1;
+
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc)
+ return rc;
+ data = &resp.queue_id0 + offsetof(struct bnxt_re_cos2bw_cfg,
+ queue_id);
+ for (indx = 0; indx < 8; indx++, data += (sizeof(cfg->cfg))) {
+ memcpy(&cfg->cfg, data, sizeof(cfg->cfg));
+ if (indx == 0)
+ cfg->queue_id = resp.queue_id0;
+ cfg++;
+ }
+
+ return rc;
+}
+
+int bnxt_re_hwrm_cos2bw_cfg(struct bnxt_re_dev *rdev, u16 target_id,
+ struct bnxt_re_cos2bw_cfg *cfg)
+{
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct hwrm_queue_cos2bw_cfg_input req = {0};
+ struct hwrm_queue_cos2bw_cfg_output resp = {0};
+ struct bnxt_fw_msg fw_msg;
+ void *data;
+ int indx;
+ int rc;
+
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
+ HWRM_QUEUE_COS2BW_CFG, -1, target_id);
+ req.port_id = (target_id == 0xFFFF) ? en_dev->pf_port_id : 1;
+
+ /* Chimp wants enable bit to retain previous
+ * config done by L2 driver
+ */
+ for (indx = 0; indx < 8; indx++) {
+ if (cfg[indx].queue_id < 40) {
+ req.enables |= cpu_to_le32(
+ HWRM_QUEUE_COS2BW_CFG_INPUT_ENABLES_COS_QUEUE_ID0_VALID <<
+ indx);
+ }
+
+ data = (char *)&req.unused_0 + indx * (sizeof(*cfg) - 4);
+ memcpy(data, &cfg[indx].queue_id, sizeof(*cfg) - 4);
+ if (indx == 0) {
+ req.queue_id0 = cfg[0].queue_id;
+ req.unused_0 = 0;
+ }
+ }
+
+ memset(&resp, 0, sizeof(resp));
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ return rc;
+}
+
+int bnxt_re_host_pf_id_query(struct bnxt_re_dev *rdev,
+ struct bnxt_qplib_query_fn_info *fn_info,
+ u32 *pf_mask, u32 *first_pf)
+{
+ struct hwrm_func_host_pf_ids_query_output resp = {0};
+ struct hwrm_func_host_pf_ids_query_input req;
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_fw_msg fw_msg;
+ int rc;
+
+ memset(&fw_msg, 0, sizeof(fw_msg));
+ memset(&req, 0, sizeof(req));
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
+ HWRM_FUNC_HOST_PF_IDS_QUERY, -1, -1);
+ /* To query the info from the host EPs */
+ switch (fn_info->host) {
+ case HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_HOST_SOC:
+ case HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_HOST_EP_0:
+ case HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_HOST_EP_1:
+ case HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_HOST_EP_2:
+ case HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_HOST_EP_3:
+ req.host = fn_info->host;
+ break;
+ default:
+ req.host = HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_HOST_EP_0;
+ break;
+ }
+
+ req.filter = fn_info->filter;
+ if (req.filter > HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_FILTER_ROCE)
+ req.filter = HWRM_FUNC_HOST_PF_IDS_QUERY_INPUT_FILTER_ALL;
+
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+
+
+ *first_pf = le16_to_cpu(resp.first_pf_id);
+ *pf_mask = le16_to_cpu(resp.pf_ordinal_mask);
+
+ return rc;
+}
+
+static void bnxt_re_put_stats_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx;
+ struct bnxt_qplib_res *res;
+ u16 tid = 0xffff;
+
+ res = &rdev->qplib_res;
+ hctx = res->hctx;
+
+ if (test_and_clear_bit(BNXT_RE_FLAG_STATS_CTX_ALLOC, &rdev->flags)) {
+ bnxt_re_net_stats_ctx_free(rdev, hctx->stats.fw_id, tid);
+ bnxt_qplib_free_stat_mem(res, &hctx->stats);
+ }
+}
+
+static void bnxt_re_put_stats2_ctx(struct bnxt_re_dev *rdev)
+{
+ test_and_clear_bit(BNXT_RE_FLAG_STATS_CTX2_ALLOC, &rdev->flags);
+}
+
+static int bnxt_re_get_stats_ctx(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ctx *hctx;
+ struct bnxt_qplib_res *res;
+ u16 tid = 0xffff;
+ int rc;
+
+ res = &rdev->qplib_res;
+ hctx = res->hctx;
+
+ rc = bnxt_qplib_alloc_stat_mem(res->pdev, rdev->chip_ctx, &hctx->stats);
+ if (rc)
+ return -ENOMEM;
+ rc = bnxt_re_net_stats_ctx_alloc(rdev, tid);
+ if (rc)
+ goto free_stat_mem;
+ set_bit(BNXT_RE_FLAG_STATS_CTX_ALLOC, &rdev->flags);
+
+ return 0;
+
+free_stat_mem:
+ bnxt_qplib_free_stat_mem(res, &hctx->stats);
+
+ return rc;
+}
+
+static int bnxt_re_update_dev_attr(struct bnxt_re_dev *rdev)
+{
+ int rc;
+
+ rc = bnxt_qplib_get_dev_attr(&rdev->rcfw);
+ if (rc)
+ return rc;
+ if (!bnxt_re_check_min_attr(rdev))
+ return -EINVAL;
+ return 0;
+}
+
+static void bnxt_re_free_tbls(struct bnxt_re_dev *rdev)
+{
+ bnxt_qplib_clear_tbls(&rdev->qplib_res);
+ bnxt_qplib_free_tbls(&rdev->qplib_res);
+}
+
+static int bnxt_re_alloc_init_tbls(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
+ u8 pppp_factor = 0;
+ int rc;
+
+ /*
+ * TODO: Need a better mechanism for spreading of the
+ * 512 extended PPP pages. For now, spreading it
+ * based on port_count
+ */
+ if (_is_chip_p7(chip_ctx) && chip_ctx->modes.db_push)
+ pppp_factor = rdev->en_dev->port_count;
+ rc = bnxt_qplib_alloc_tbls(&rdev->qplib_res, pppp_factor);
+ if (rc)
+ return rc;
+ bnxt_qplib_init_tbls(&rdev->qplib_res);
+ set_bit(BNXT_RE_FLAG_TBLS_ALLOCINIT, &rdev->flags);
+
+ return 0;
+}
+
+static void bnxt_re_clean_nqs(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_nq *nq;
+ int i;
+
+ if (!rdev->nqr.max_init)
+ return;
+
+ for (i = (rdev->nqr.max_init - 1) ; i >= 0; i--) {
+ nq = &rdev->nqr.nq[i];
+ bnxt_qplib_disable_nq(nq);
+ bnxt_re_net_ring_free(rdev, nq->ring_id);
+ bnxt_qplib_free_nq_mem(nq);
+ }
+ rdev->nqr.max_init = 0;
+}
+
+static int bnxt_re_setup_nqs(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_ring_attr rattr = {};
+ struct bnxt_qplib_nq *nq;
+ int rc, i;
+ int depth;
+ u32 offt;
+ u16 vec;
+
+ mutex_init(&rdev->nqr.load_lock);
+ /*
+ * TODO: Optimize the depth based on the
+ * number of NQs.
+ */
+ depth = BNXT_QPLIB_NQE_MAX_CNT;
+ for (i = 0; i < rdev->nqr.num_msix - 1; i++) {
+ nq = &rdev->nqr.nq[i];
+ vec = rdev->nqr.msix_entries[i + 1].vector;
+ offt = rdev->nqr.msix_entries[i + 1].db_offset;
+ nq->hwq.max_elements = depth;
+ rc = bnxt_qplib_alloc_nq_mem(&rdev->qplib_res, nq);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to get mem for NQ %d, rc = 0x%x",
+ i, rc);
+ goto fail_mem;
+ }
+
+ rattr.dma_arr = nq->hwq.pbl[PBL_LVL_0].pg_map_arr;
+ rattr.pages = nq->hwq.pbl[rdev->nqr.nq[i].hwq.level].pg_count;
+ rattr.type = bnxt_re_get_rtype(rdev);
+ rattr.mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
+ rattr.depth = nq->hwq.max_elements - 1;
+ rattr.lrid = rdev->nqr.msix_entries[i + 1].ring_idx;
+
+ /* Set DBR pacing capability on the first NQ ring only */
+ if (!i && bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx))
+ rattr.flags = HWRM_RING_ALLOC_INPUT_FLAGS_NQ_DBR_PACING;
+ else
+ rattr.flags = 0;
+
+ rc = bnxt_re_net_ring_alloc(rdev, &rattr, &nq->ring_id);
+ if (rc) {
+ nq->ring_id = 0xffff; /* Invalid ring-id */
+ dev_err(rdev_to_dev(rdev),
+ "Failed to get fw id for NQ %d, rc = 0x%x",
+ i, rc);
+ goto fail_ring;
+ }
+
+ rc = bnxt_qplib_enable_nq(nq, i, vec, offt,
+ &bnxt_re_cqn_handler,
+ &bnxt_re_srqn_handler);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to enable NQ %d, rc = 0x%x", i, rc);
+ goto fail_en;
+ }
+ }
+
+ rdev->nqr.max_init = i;
+ return 0;
+fail_en:
+ /* *nq was i'th nq */
+ bnxt_re_net_ring_free(rdev, nq->ring_id);
+fail_ring:
+ bnxt_qplib_free_nq_mem(nq);
+fail_mem:
+ rdev->nqr.max_init = i;
+ return rc;
+}
+
+static void bnxt_re_sysfs_destroy_file(struct bnxt_re_dev *rdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++)
+ device_remove_file(&rdev->ibdev.dev, bnxt_re_attributes[i]);
+}
+
+static int bnxt_re_sysfs_create_file(struct bnxt_re_dev *rdev)
+{
+ int i, j, rc = 0;
+
+ for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) {
+ rc = device_create_file(&rdev->ibdev.dev,
+ bnxt_re_attributes[i]);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to create IB sysfs with rc = 0x%x", rc);
+ /* Must clean up all created device files */
+ for (j = 0; j < i; j++)
+ device_remove_file(&rdev->ibdev.dev,
+ bnxt_re_attributes[j]);
+ clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
+ ib_unregister_device(&rdev->ibdev);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* worker thread for polling periodic events. Now used for QoS programming*/
+static void bnxt_re_worker(struct work_struct *work)
+{
+ struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev,
+ worker.work);
+ int rc;
+
+ /* QoS is in 30s cadence for PFs*/
+ if (!rdev->is_virtfn && !rdev->worker_30s--)
+ rdev->worker_30s = 30;
+ /* Use trylock for bnxt_re_dev_lock as this can be
+ * held for long time by debugfs show path while issuing
+ * HWRMS. If the debugfs name update is not done in this
+ * iteration, the driver will check for the same in the
+ * next schedule of the worker i.e after 1 sec.
+ */
+ if (mutex_trylock(&bnxt_re_dev_lock))
+ mutex_unlock(&bnxt_re_dev_lock);
+
+ if (!rdev->stats.stats_query_sec)
+ goto resched;
+
+ if (test_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS, &rdev->flags) &&
+ (rdev->is_virtfn ||
+ !_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags))) {
+ if (!(rdev->stats.stats_query_counter++ %
+ rdev->stats.stats_query_sec)) {
+ rc = bnxt_re_get_qos_stats(rdev);
+ if (rc && rc != -ENOMEM)
+ clear_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS,
+ &rdev->flags);
+ }
+ }
+
+resched:
+ schedule_delayed_work(&rdev->worker, msecs_to_jiffies(1000));
+}
+
+static int bnxt_re_alloc_dbr_sw_stats_mem(struct bnxt_re_dev *rdev)
+{
+ if (!(rdev->dbr_drop_recov || rdev->dbr_pacing))
+ return 0;
+
+ rdev->dbr_sw_stats = kzalloc(sizeof(*rdev->dbr_sw_stats), GFP_KERNEL);
+ if (!rdev->dbr_sw_stats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void bnxt_re_free_dbr_sw_stats_mem(struct bnxt_re_dev *rdev)
+{
+ kfree(rdev->dbr_sw_stats);
+ rdev->dbr_sw_stats = NULL;
+}
+
+static int bnxt_re_initialize_dbr_drop_recov(struct bnxt_re_dev *rdev)
+{
+ rdev->dbr_drop_recov_wq =
+ create_singlethread_workqueue("bnxt_re_dbr_drop_recov");
+ if (!rdev->dbr_drop_recov_wq) {
+ dev_err(rdev_to_dev(rdev), "DBR Drop Revov wq alloc failed!");
+ return -EINVAL;
+ }
+ rdev->dbr_drop_recov = true;
+
+ /* Enable configfs setting dbr_drop_recov by default*/
+ rdev->user_dbr_drop_recov = true;
+
+ rdev->user_dbr_drop_recov_timeout = BNXT_RE_DBR_RECOV_USERLAND_TIMEOUT;
+ return 0;
+}
+
+static void bnxt_re_deinitialize_dbr_drop_recov(struct bnxt_re_dev *rdev)
+{
+ if (rdev->dbr_drop_recov_wq) {
+ flush_workqueue(rdev->dbr_drop_recov_wq);
+ destroy_workqueue(rdev->dbr_drop_recov_wq);
+ rdev->dbr_drop_recov_wq = NULL;
+ }
+ rdev->dbr_drop_recov = false;
+}
+
+static int bnxt_re_initialize_dbr_pacing(struct bnxt_re_dev *rdev)
+{
+ int rc;
+
+ /* Allocate a page for app use */
+ rdev->dbr_page = (void *)__get_free_page(GFP_KERNEL);
+ if (!rdev->dbr_page) {
+ dev_err(rdev_to_dev(rdev), "DBR page allocation failed!");
+ return -ENOMEM;
+ }
+ memset((u8 *)rdev->dbr_page, 0, PAGE_SIZE);
+ rdev->qplib_res.pacing_data = (struct bnxt_qplib_db_pacing_data *)rdev->dbr_page;
+ rc = bnxt_re_hwrm_dbr_pacing_qcfg(rdev);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to query dbr pacing config %d\n", rc);
+ goto fail;
+ }
+ /* Create a work queue for scheduling dbq event */
+ rdev->dbq_wq = create_singlethread_workqueue("bnxt_re_dbq");
+ if (!rdev->dbq_wq) {
+ dev_err(rdev_to_dev(rdev), "DBQ wq alloc failed!");
+ rc = -ENOMEM;
+ goto fail;
+ }
+ /* MAP grc window 2 for reading db fifo depth */
+ writel_fbsd(rdev->en_dev->softc, BNXT_GRCPF_REG_WINDOW_BASE_OUT + 4, 0,
+ rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_BASE_MASK);
+ rdev->dbr_db_fifo_reg_off =
+ (rdev->chip_ctx->dbr_stat_db_fifo & BNXT_GRC_OFFSET_MASK) +
+ 0x2000;
+ rdev->qplib_res.pacing_data->grc_reg_offset = rdev->dbr_db_fifo_reg_off;
+
+ rdev->dbr_bar_addr =
+ pci_resource_start(rdev->qplib_res.pdev, 0) +
+ rdev->dbr_db_fifo_reg_off;
+
+ /* Percentage of DB FIFO */
+ rdev->dbq_watermark = BNXT_RE_PACING_DBQ_THRESHOLD;
+ rdev->pacing_en_int_th = BNXT_RE_PACING_EN_INT_THRESHOLD;
+ rdev->pacing_algo_th = BNXT_RE_PACING_ALGO_THRESHOLD;
+ rdev->dbq_pacing_time = BNXT_RE_DBR_INT_TIME;
+ rdev->dbr_def_do_pacing = BNXT_RE_DBR_DO_PACING_NO_CONGESTION;
+ rdev->do_pacing_save = rdev->dbr_def_do_pacing;
+ bnxt_re_set_default_pacing_data(rdev);
+ dev_dbg(rdev_to_dev(rdev), "Initialized db pacing\n");
+
+ return 0;
+fail:
+ free_page((u64)rdev->dbr_page);
+ rdev->dbr_page = NULL;
+ return rc;
+}
+
+static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev)
+{
+ if (rdev->dbq_wq)
+ flush_workqueue(rdev->dbq_wq);
+
+ cancel_work_sync(&rdev->dbq_fifo_check_work);
+ cancel_delayed_work_sync(&rdev->dbq_pacing_work);
+
+ if (rdev->dbq_wq) {
+ destroy_workqueue(rdev->dbq_wq);
+ rdev->dbq_wq = NULL;
+ }
+
+ if (rdev->dbr_page)
+ free_page((u64)rdev->dbr_page);
+ rdev->dbr_page = NULL;
+ rdev->dbr_pacing = false;
+}
+
+/* enable_dbr_pacing needs to be done only for older FWs
+ * where host selects primary function. ie. pacing_ext
+ * flags is not set.
+ */
+int bnxt_re_enable_dbr_pacing(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_nq *nq;
+
+ nq = &rdev->nqr.nq[0];
+ rdev->dbq_nq_id = nq->ring_id;
+
+ if (!bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx) &&
+ bnxt_qplib_dbr_pacing_is_primary_pf(rdev->chip_ctx)) {
+ if (bnxt_re_hwrm_dbr_pacing_cfg(rdev, true)) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to set dbr pacing config\n");
+ return -EIO;
+ }
+ /* MAP grc window 8 for ARMing the NQ DBQ */
+ writel_fbsd(rdev->en_dev->softc, BNXT_GRCPF_REG_WINDOW_BASE_OUT + 28 , 0,
+ rdev->chip_ctx->dbr_aeq_arm_reg & BNXT_GRC_BASE_MASK);
+ rdev->dbr_aeq_arm_reg_off =
+ (rdev->chip_ctx->dbr_aeq_arm_reg &
+ BNXT_GRC_OFFSET_MASK) + 0x8000;
+ writel_fbsd(rdev->en_dev->softc, rdev->dbr_aeq_arm_reg_off , 0, 1);
+ }
+
+ return 0;
+}
+
+/* disable_dbr_pacing needs to be done only for older FWs
+ * where host selects primary function. ie. pacing_ext
+ * flags is not set.
+ */
+
+int bnxt_re_disable_dbr_pacing(struct bnxt_re_dev *rdev)
+{
+ int rc = 0;
+
+ if (!bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx) &&
+ bnxt_qplib_dbr_pacing_is_primary_pf(rdev->chip_ctx))
+ rc = bnxt_re_hwrm_dbr_pacing_cfg(rdev, false);
+
+ return rc;
+}
+
+static void bnxt_re_ib_uninit(struct bnxt_re_dev *rdev)
+{
+ if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
+ bnxt_re_sysfs_destroy_file(rdev);
+ /* Cleanup ib dev */
+ ib_unregister_device(&rdev->ibdev);
+ clear_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
+ return;
+ }
+}
+
+static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type)
+{
+ struct bnxt_qplib_dpi *kdpi;
+ int rc, wait_count = BNXT_RE_RES_FREE_WAIT_COUNT;
+
+ bnxt_re_net_unregister_async_event(rdev);
+
+ bnxt_re_put_stats2_ctx(rdev);
+ if (test_and_clear_bit(BNXT_RE_FLAG_DEV_LIST_INITIALIZED,
+ &rdev->flags)) {
+ /* did the caller hold the lock? */
+ mutex_lock(&bnxt_re_dev_lock);
+ list_del_rcu(&rdev->list);
+ mutex_unlock(&bnxt_re_dev_lock);
+ }
+
+ bnxt_re_uninit_resolve_wq(rdev);
+ bnxt_re_uninit_dcb_wq(rdev);
+ bnxt_re_uninit_aer_wq(rdev);
+
+ bnxt_re_deinitialize_dbr_drop_recov(rdev);
+
+ if (bnxt_qplib_dbr_pacing_en(rdev->chip_ctx))
+ (void)bnxt_re_disable_dbr_pacing(rdev);
+
+ if (test_and_clear_bit(BNXT_RE_FLAG_WORKER_REG, &rdev->flags)) {
+ cancel_delayed_work_sync(&rdev->worker);
+ }
+
+ /* Wait for ULPs to release references */
+ while (atomic_read(&rdev->stats.rsors.cq_count) && --wait_count)
+ usleep_range(500, 1000);
+ if (!wait_count)
+ dev_err(rdev_to_dev(rdev),
+ "CQ resources not freed by stack, count = 0x%x",
+ atomic_read(&rdev->stats.rsors.cq_count));
+
+ kdpi = &rdev->dpi_privileged;
+ if (kdpi->umdbr) { /* kernel DPI was allocated with success */
+ (void)bnxt_qplib_dealloc_dpi(&rdev->qplib_res, kdpi);
+ /*
+ * Driver just need to know no command had failed
+ * during driver load sequence and below command is
+ * required indeed. Piggybacking dpi allocation status.
+ */
+ }
+
+ /* Protect the device uninitialization and start_irq/stop_irq L2
+ * callbacks with rtnl lock to avoid race condition between these calls
+ */
+ rtnl_lock();
+ if (test_and_clear_bit(BNXT_RE_FLAG_SETUP_NQ, &rdev->flags))
+ bnxt_re_clean_nqs(rdev);
+ rtnl_unlock();
+
+ if (test_and_clear_bit(BNXT_RE_FLAG_TBLS_ALLOCINIT, &rdev->flags))
+ bnxt_re_free_tbls(rdev);
+ if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_INIT, &rdev->flags)) {
+ rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
+ if (rc)
+ dev_warn(rdev_to_dev(rdev),
+ "Failed to deinitialize fw, rc = 0x%x", rc);
+ }
+
+ bnxt_re_put_stats_ctx(rdev);
+
+ if (test_and_clear_bit(BNXT_RE_FLAG_ALLOC_CTX, &rdev->flags))
+ bnxt_qplib_free_hwctx(&rdev->qplib_res);
+
+ rtnl_lock();
+ if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags))
+ bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
+
+ if (rdev->dbr_pacing)
+ bnxt_re_deinitialize_dbr_pacing(rdev);
+
+ bnxt_re_free_dbr_sw_stats_mem(rdev);
+
+ if (test_and_clear_bit(BNXT_RE_FLAG_NET_RING_ALLOC, &rdev->flags))
+ bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id);
+
+ if (test_and_clear_bit(BNXT_RE_FLAG_ALLOC_RCFW, &rdev->flags))
+ bnxt_qplib_free_rcfw_channel(&rdev->qplib_res);
+
+ if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags))
+ bnxt_re_free_msix(rdev);
+ rtnl_unlock();
+
+ bnxt_re_destroy_chip_ctx(rdev);
+
+ if (op_type != BNXT_RE_PRE_RECOVERY_REMOVE) {
+ if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED,
+ &rdev->flags))
+ bnxt_re_unregister_netdev(rdev);
+ }
+}
+
+static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type, u8 wqe_mode)
+{
+ struct bnxt_re_ring_attr rattr = {};
+ struct bnxt_qplib_creq_ctx *creq;
+ int vec, offset;
+ int rc = 0;
+
+ if (op_type != BNXT_RE_POST_RECOVERY_INIT) {
+ /* Registered a new RoCE device instance to netdev */
+ rc = bnxt_re_register_netdev(rdev);
+ if (rc)
+ return -EINVAL;
+ }
+ set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+
+ rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Failed to get chip context rc 0x%x", rc);
+ bnxt_re_unregister_netdev(rdev);
+ clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+ rc = -EINVAL;
+ return rc;
+ }
+
+ /* Protect the device initialization and start_irq/stop_irq L2 callbacks
+ * with rtnl lock to avoid race condition between these calls
+ */
+ rtnl_lock();
+ rc = bnxt_re_request_msix(rdev);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Requesting MSI-X vectors failed with rc = 0x%x", rc);
+ rc = -EINVAL;
+ goto release_rtnl;
+ }
+ set_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags);
+
+ /* Establish RCFW Communication Channel to initialize the context
+ memory for the function and all child VFs */
+ rc = bnxt_qplib_alloc_rcfw_channel(&rdev->qplib_res);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to alloc mem for rcfw, rc = %#x\n", rc);
+ goto release_rtnl;
+ }
+ set_bit(BNXT_RE_FLAG_ALLOC_RCFW, &rdev->flags);
+
+ creq = &rdev->rcfw.creq;
+ rattr.dma_arr = creq->hwq.pbl[PBL_LVL_0].pg_map_arr;
+ rattr.pages = creq->hwq.pbl[creq->hwq.level].pg_count;
+ rattr.type = bnxt_re_get_rtype(rdev);
+ rattr.mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
+ rattr.depth = BNXT_QPLIB_CREQE_MAX_CNT - 1;
+ rattr.lrid = rdev->nqr.msix_entries[BNXT_RE_AEQ_IDX].ring_idx;
+ rc = bnxt_re_net_ring_alloc(rdev, &rattr, &creq->ring_id);
+ if (rc) {
+ creq->ring_id = 0xffff;
+ dev_err(rdev_to_dev(rdev),
+ "Failed to allocate CREQ fw id with rc = 0x%x", rc);
+ goto release_rtnl;
+ }
+
+ if (!rdev->chip_ctx)
+ goto release_rtnl;
+ /* Program the NQ ID for DBQ notification */
+ if (rdev->chip_ctx->modes.dbr_pacing_v0 ||
+ bnxt_qplib_dbr_pacing_en(rdev->chip_ctx) ||
+ bnxt_qplib_dbr_pacing_ext_en(rdev->chip_ctx)) {
+ rc = bnxt_re_initialize_dbr_pacing(rdev);
+ if (!rc)
+ rdev->dbr_pacing = true;
+ else
+ rdev->dbr_pacing = false;
+ dev_dbg(rdev_to_dev(rdev), "%s: initialize db pacing ret %d\n",
+ __func__, rc);
+ }
+
+ vec = rdev->nqr.msix_entries[BNXT_RE_AEQ_IDX].vector;
+ offset = rdev->nqr.msix_entries[BNXT_RE_AEQ_IDX].db_offset;
+ rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw, vec, offset,
+ &bnxt_re_aeq_handler);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to enable RCFW channel with rc = 0x%x", rc);
+ goto release_rtnl;
+ }
+ set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags);
+
+ rc = bnxt_re_update_dev_attr(rdev);
+ if (rc)
+ goto release_rtnl;
+ bnxt_re_set_resource_limits(rdev);
+ if (!rdev->is_virtfn && !_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ rc = bnxt_qplib_alloc_hwctx(&rdev->qplib_res);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to alloc hw contexts, rc = 0x%x", rc);
+ goto release_rtnl;
+ }
+ set_bit(BNXT_RE_FLAG_ALLOC_CTX, &rdev->flags);
+ }
+
+ rc = bnxt_re_get_stats_ctx(rdev);
+ if (rc)
+ goto release_rtnl;
+
+ rc = bnxt_qplib_init_rcfw(&rdev->rcfw, rdev->is_virtfn);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to initialize fw with rc = 0x%x", rc);
+ goto release_rtnl;
+ }
+ set_bit(BNXT_RE_FLAG_RCFW_CHANNEL_INIT, &rdev->flags);
+
+ /* Based resource count on the 'new' device caps */
+ rc = bnxt_re_update_dev_attr(rdev);
+ if (rc)
+ goto release_rtnl;
+ rc = bnxt_re_alloc_init_tbls(rdev);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "tbls alloc-init failed rc = %#x",
+ rc);
+ goto release_rtnl;
+ }
+ rc = bnxt_re_setup_nqs(rdev);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev), "NQs alloc-init failed rc = %#x\n",
+ rc);
+ if (rdev->nqr.max_init == 0)
+ goto release_rtnl;
+
+ dev_warn(rdev_to_dev(rdev),
+ "expected nqs %d available nqs %d\n",
+ rdev->nqr.num_msix, rdev->nqr.max_init);
+ }
+ set_bit(BNXT_RE_FLAG_SETUP_NQ, &rdev->flags);
+ rtnl_unlock();
+
+ rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res, &rdev->dpi_privileged,
+ rdev, BNXT_QPLIB_DPI_TYPE_KERNEL);
+ if (rc)
+ goto fail;
+
+ if (rdev->dbr_pacing)
+ bnxt_re_enable_dbr_pacing(rdev);
+
+ if (rdev->chip_ctx->modes.dbr_drop_recov)
+ bnxt_re_initialize_dbr_drop_recov(rdev);
+
+ rc = bnxt_re_alloc_dbr_sw_stats_mem(rdev);
+ if (rc)
+ goto fail;
+
+ /* This block of code is needed for error recovery support */
+ if (!rdev->is_virtfn) {
+ struct bnxt_re_tc_rec *tc_rec;
+
+ tc_rec = &rdev->tc_rec[0];
+ rc = bnxt_re_query_hwrm_qportcfg(rdev, tc_rec, 0xFFFF);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to query port config rc:%d", rc);
+ return rc;
+ }
+
+ /* Query f/w defaults of CC params */
+ rc = bnxt_qplib_query_cc_param(&rdev->qplib_res, &rdev->cc_param);
+ if (rc)
+ dev_warn(rdev_to_dev(rdev),
+ "Failed to query CC defaults\n");
+ if (1) {
+ rdev->num_vfs = pci_num_vf(rdev->en_dev->pdev);
+ if (rdev->num_vfs) {
+ bnxt_re_set_resource_limits(rdev);
+ bnxt_qplib_set_func_resources(&rdev->qplib_res);
+ }
+ }
+ }
+ INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker);
+ set_bit(BNXT_RE_FLAG_WORKER_REG, &rdev->flags);
+ schedule_delayed_work(&rdev->worker, msecs_to_jiffies(1000));
+
+ bnxt_re_init_dcb_wq(rdev);
+ bnxt_re_init_aer_wq(rdev);
+ bnxt_re_init_resolve_wq(rdev);
+ mutex_lock(&bnxt_re_dev_lock);
+ list_add_tail_rcu(&rdev->list, &bnxt_re_dev_list);
+ /* Added to the list, not in progress anymore */
+ gadd_dev_inprogress--;
+ set_bit(BNXT_RE_FLAG_DEV_LIST_INITIALIZED, &rdev->flags);
+ mutex_unlock(&bnxt_re_dev_lock);
+
+
+ return rc;
+release_rtnl:
+ rtnl_unlock();
+fail:
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
+
+ return rc;
+}
+
+static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
+{
+ int rc = 0;
+
+ rc = bnxt_re_register_ib(rdev);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Register IB failed with rc = 0x%x", rc);
+ goto fail;
+ }
+ if (bnxt_re_sysfs_create_file(rdev)) {
+ bnxt_re_stopqps_and_ib_uninit(rdev);
+ goto fail;
+ }
+
+ set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
+ set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
+ set_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS, &rdev->flags);
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
+
+ return rc;
+fail:
+ bnxt_re_dev_uninit(rdev, BNXT_RE_COMPLETE_REMOVE);
+ return rc;
+}
+
+/* wrapper for ib_init funcs */
+int _bnxt_re_ib_init(struct bnxt_re_dev *rdev)
+{
+ return bnxt_re_ib_init(rdev);
+}
+
+/* wrapper for aux init funcs */
+int _bnxt_re_ib_init2(struct bnxt_re_dev *rdev)
+{
+ bnxt_re_ib_init_2(rdev);
+ return 0; /* add return for future proof */
+}
+
+static void bnxt_re_dev_unreg(struct bnxt_re_dev *rdev)
+{
+ bnxt_re_dev_dealloc(rdev);
+}
+
+
+static int bnxt_re_dev_reg(struct bnxt_re_dev **rdev, struct ifnet *netdev,
+ struct bnxt_en_dev *en_dev)
+{
+ struct ifnet *realdev = NULL;
+
+ realdev = netdev;
+ if (realdev)
+ dev_dbg(NULL, "%s: realdev = %p netdev = %p\n", __func__,
+ realdev, netdev);
+ /*
+ * Note:
+ * The first argument to bnxt_re_dev_alloc() is 'netdev' and
+ * not 'realdev', since in the case of bonding we want to
+ * register the bonded virtual netdev (master) to the ib stack.
+ * And 'en_dev' (for L2/PCI communication) is the first slave
+ * device (PF0 on the card).
+ * In the case of a regular netdev, both netdev and the en_dev
+ * correspond to the same device.
+ */
+ *rdev = bnxt_re_dev_alloc(netdev, en_dev);
+ if (!*rdev) {
+ pr_err("%s: netdev %p not handled",
+ ROCE_DRV_MODULE_NAME, netdev);
+ return -ENOMEM;
+ }
+ bnxt_re_hold(*rdev);
+
+ return 0;
+}
+
+void bnxt_re_get_link_speed(struct bnxt_re_dev *rdev)
+{
+ rdev->espeed = rdev->en_dev->espeed;
+ return;
+}
+
+void bnxt_re_stopqps_and_ib_uninit(struct bnxt_re_dev *rdev)
+{
+ dev_dbg(rdev_to_dev(rdev), "%s: Stopping QPs, IB uninit on rdev: %p\n",
+ __func__, rdev);
+ bnxt_re_stop_all_nonqp1_nonshadow_qps(rdev);
+ bnxt_re_ib_uninit(rdev);
+}
+
+void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
+ struct auxiliary_device *aux_dev)
+{
+ struct bnxt_re_en_dev_info *en_info;
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_rcfw *rcfw;
+
+ rcfw = &rdev->rcfw;
+ cmdq = &rcfw->cmdq;
+ if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
+ set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
+
+ dev_dbg(rdev_to_dev(rdev), "%s: Removing rdev: %p\n", __func__, rdev);
+ bnxt_re_dev_uninit(rdev, op_type);
+ en_info = auxiliary_get_drvdata(aux_dev);
+ if (en_info) {
+ rtnl_lock();
+ en_info->rdev = NULL;
+ rtnl_unlock();
+ if (op_type != BNXT_RE_PRE_RECOVERY_REMOVE) {
+ clear_bit(BNXT_RE_FLAG_EN_DEV_PRIMARY_DEV, &en_info->flags);
+ clear_bit(BNXT_RE_FLAG_EN_DEV_SECONDARY_DEV, &en_info->flags);
+ clear_bit(BNXT_RE_FLAG_EN_DEV_NETDEV_REG, &en_info->flags);
+ }
+ }
+ bnxt_re_dev_unreg(rdev);
+}
+
+int bnxt_re_add_device(struct bnxt_re_dev **rdev,
+ struct ifnet *netdev,
+ u8 qp_mode, u8 op_type, u8 wqe_mode,
+ u32 num_msix_requested,
+ struct auxiliary_device *aux_dev)
+{
+ struct bnxt_re_en_dev_info *en_info;
+ struct bnxt_en_dev *en_dev;
+ int rc = 0;
+
+ en_info = auxiliary_get_drvdata(aux_dev);
+ en_dev = en_info->en_dev;
+
+ mutex_lock(&bnxt_re_dev_lock);
+ /* Check if driver already in mod exit and aux_dev is valid */
+ if (gmod_exit || !aux_dev) {
+ mutex_unlock(&bnxt_re_dev_lock);
+ return -ENODEV;
+ }
+ /* Add device in progress */
+ gadd_dev_inprogress++;
+ mutex_unlock(&bnxt_re_dev_lock);
+
+ rc = bnxt_re_dev_reg(rdev, netdev, en_dev);
+ if (rc) {
+ dev_dbg(NULL, "Failed to create add device for netdev %p\n",
+ netdev);
+ /*
+ * For BNXT_RE_POST_RECOVERY_INIT special case
+ * called from bnxt_re_start, the work is
+ * complete only after, bnxt_re_start completes
+ * bnxt_unregister_device in case of failure.
+ * So bnxt_re_start will decrement gadd_dev_inprogress
+ * in case of failure.
+ */
+ if (op_type != BNXT_RE_POST_RECOVERY_INIT) {
+ mutex_lock(&bnxt_re_dev_lock);
+ gadd_dev_inprogress--;
+ mutex_unlock(&bnxt_re_dev_lock);
+ }
+ return rc;
+ }
+
+ if (rc != 0)
+ goto ref_error;
+
+ /*
+ * num_msix_requested = BNXT_RE_MSIX_FROM_MOD_PARAM indicates fresh driver load.
+ * Otherwaise, this invocation can be the result of lag create / destroy,
+ * err revovery, hot fw upgrade, etc..
+ */
+ if (num_msix_requested == BNXT_RE_MSIX_FROM_MOD_PARAM) {
+ if (bnxt_re_probe_count < BNXT_RE_MAX_DEVICES)
+ num_msix_requested = max_msix_vec[bnxt_re_probe_count++];
+ else
+ /* Consider as default when probe_count exceeds its limit */
+ num_msix_requested = 0;
+
+ /* if user specifies only one value, use the same for all PFs */
+ if (max_msix_vec_argc == 1)
+ num_msix_requested = max_msix_vec[0];
+ }
+
+ (*rdev)->num_msix_requested = num_msix_requested;
+ (*rdev)->gsi_ctx.gsi_qp_mode = qp_mode;
+ (*rdev)->adev = aux_dev;
+ (*rdev)->dev_addr = en_dev->softc->func.mac_addr;
+ /* Before updating the rdev pointer in bnxt_re_en_dev_info structure,
+ * take the rtnl lock to avoid accessing invalid rdev pointer from
+ * L2 ULP callbacks. This is applicable in all the places where rdev
+ * pointer is updated in bnxt_re_en_dev_info.
+ */
+ rtnl_lock();
+ en_info->rdev = *rdev;
+ rtnl_unlock();
+ rc = bnxt_re_dev_init(*rdev, op_type, wqe_mode);
+ if (rc) {
+ref_error:
+ bnxt_re_dev_unreg(*rdev);
+ *rdev = NULL;
+ /*
+ * For BNXT_RE_POST_RECOVERY_INIT special case
+ * called from bnxt_re_start, the work is
+ * complete only after, bnxt_re_start completes
+ * bnxt_unregister_device in case of failure.
+ * So bnxt_re_start will decrement gadd_dev_inprogress
+ * in case of failure.
+ */
+ if (op_type != BNXT_RE_POST_RECOVERY_INIT) {
+ mutex_lock(&bnxt_re_dev_lock);
+ gadd_dev_inprogress--;
+ mutex_unlock(&bnxt_re_dev_lock);
+ }
+ }
+ dev_dbg(rdev_to_dev(*rdev), "%s: Adding rdev: %p\n", __func__, *rdev);
+ if (!rc) {
+ set_bit(BNXT_RE_FLAG_EN_DEV_NETDEV_REG, &en_info->flags);
+ }
+ return rc;
+}
+
+struct bnxt_re_dev *bnxt_re_get_peer_pf(struct bnxt_re_dev *rdev)
+{
+ struct pci_dev *pdev_in = rdev->en_dev->pdev;
+ int tmp_bus_num, bus_num = pdev_in->bus->number;
+ int tmp_dev_num, dev_num = PCI_SLOT(pdev_in->devfn);
+ int tmp_func_num, func_num = PCI_FUNC(pdev_in->devfn);
+ struct bnxt_re_dev *tmp_rdev;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tmp_rdev, &bnxt_re_dev_list, list) {
+ tmp_bus_num = tmp_rdev->en_dev->pdev->bus->number;
+ tmp_dev_num = PCI_SLOT(tmp_rdev->en_dev->pdev->devfn);
+ tmp_func_num = PCI_FUNC(tmp_rdev->en_dev->pdev->devfn);
+
+ if (bus_num == tmp_bus_num && dev_num == tmp_dev_num &&
+ func_num != tmp_func_num) {
+ rcu_read_unlock();
+ return tmp_rdev;
+ }
+ }
+ rcu_read_unlock();
+ return NULL;
+}
+
+
+int bnxt_re_schedule_work(struct bnxt_re_dev *rdev, unsigned long event,
+ struct ifnet *vlan_dev,
+ struct ifnet *netdev,
+ struct auxiliary_device *adev)
+{
+ struct bnxt_re_work *re_work;
+
+ /* Allocate for the deferred task */
+ re_work = kzalloc(sizeof(*re_work), GFP_KERNEL);
+ if (!re_work)
+ return -ENOMEM;
+
+ re_work->rdev = rdev;
+ re_work->event = event;
+ re_work->vlan_dev = vlan_dev;
+ re_work->adev = adev;
+ INIT_WORK(&re_work->work, bnxt_re_task);
+ if (rdev)
+ atomic_inc(&rdev->sched_count);
+ re_work->netdev = netdev;
+ queue_work(bnxt_re_wq, &re_work->work);
+
+ return 0;
+}
+
+
+int bnxt_re_get_slot_pf_count(struct bnxt_re_dev *rdev)
+{
+ struct pci_dev *pdev_in = rdev->en_dev->pdev;
+ int tmp_bus_num, bus_num = pdev_in->bus->number;
+ int tmp_dev_num, dev_num = PCI_SLOT(pdev_in->devfn);
+ struct bnxt_re_dev *tmp_rdev;
+ int pf_cnt = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(tmp_rdev, &bnxt_re_dev_list, list) {
+ tmp_bus_num = tmp_rdev->en_dev->pdev->bus->number;
+ tmp_dev_num = PCI_SLOT(tmp_rdev->en_dev->pdev->devfn);
+
+ if (bus_num == tmp_bus_num && dev_num == tmp_dev_num)
+ pf_cnt++;
+ }
+ rcu_read_unlock();
+ return pf_cnt;
+}
+
+/* Handle all deferred netevents tasks */
+static void bnxt_re_task(struct work_struct *work)
+{
+ struct bnxt_re_en_dev_info *en_info;
+ struct auxiliary_device *aux_dev;
+ struct bnxt_re_work *re_work;
+ struct bnxt_re_dev *rdev;
+
+ re_work = container_of(work, struct bnxt_re_work, work);
+
+ mutex_lock(&bnxt_re_mutex);
+ rdev = re_work->rdev;
+
+ /*
+ * If the previous rdev is deleted due to bond creation
+ * do not handle the event
+ */
+ if (!bnxt_re_is_rdev_valid(rdev))
+ goto exit;
+
+ /* Ignore the event, if the device is not registred with IB stack. This
+ * is to avoid handling any event while the device is added/removed.
+ */
+ if (rdev && !test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
+ dev_dbg(rdev_to_dev(rdev), "%s: Ignoring netdev event 0x%lx",
+ __func__, re_work->event);
+ goto done;
+ }
+
+ /* Extra check to silence coverity. We shouldn't handle any event
+ * when rdev is NULL.
+ */
+ if (!rdev)
+ goto exit;
+
+ dev_dbg(rdev_to_dev(rdev), "Scheduled work for event 0x%lx",
+ re_work->event);
+
+ switch (re_work->event) {
+ case NETDEV_UP:
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
+ IB_EVENT_PORT_ACTIVE);
+ bnxt_re_net_register_async_event(rdev);
+ break;
+
+ case NETDEV_DOWN:
+ bnxt_qplib_dbr_pacing_set_primary_pf(rdev->chip_ctx, 0);
+ bnxt_re_stop_all_nonqp1_nonshadow_qps(rdev);
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
+ IB_EVENT_PORT_ERR);
+ break;
+
+ case NETDEV_CHANGE:
+ if (bnxt_re_get_link_state(rdev) == IB_PORT_DOWN) {
+ bnxt_re_stop_all_nonqp1_nonshadow_qps(rdev);
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
+ IB_EVENT_PORT_ERR);
+ break;
+ } else if (bnxt_re_get_link_state(rdev) == IB_PORT_ACTIVE) {
+ bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
+ IB_EVENT_PORT_ACTIVE);
+ }
+
+ /* temporarily disable the check for SR2 */
+ if (!bnxt_qplib_query_cc_param(&rdev->qplib_res,
+ &rdev->cc_param) &&
+ !_is_chip_p7(rdev->chip_ctx)) {
+ /*
+ * Disable CC for 10G speed
+ * for non p5 devices
+ */
+ if (rdev->sl_espeed == SPEED_10000 &&
+ !_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ if (rdev->cc_param.enable)
+ bnxt_re_clear_cc(rdev);
+ } else {
+ if (!rdev->cc_param.enable &&
+ rdev->cc_param.admin_enable)
+ bnxt_re_setup_cc(rdev);
+ }
+ }
+ break;
+
+ case NETDEV_UNREGISTER:
+ bnxt_re_stopqps_and_ib_uninit(rdev);
+ aux_dev = rdev->adev;
+ if (re_work->adev)
+ goto done;
+
+ bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, aux_dev);
+
+ break;
+
+ default:
+ break;
+ }
+done:
+ if (rdev) {
+ /* memory barrier to guarantee task completion
+ * before decrementing sched count
+ */
+ mmiowb();
+ atomic_dec(&rdev->sched_count);
+ }
+exit:
+ if (re_work->adev && re_work->event == NETDEV_UNREGISTER) {
+ en_info = auxiliary_get_drvdata(re_work->adev);
+ en_info->ib_uninit_done = true;
+ wake_up(&en_info->waitq);
+ }
+ kfree(re_work);
+ mutex_unlock(&bnxt_re_mutex);
+}
+
+/*
+ "Notifier chain callback can be invoked for the same chain from
+ different CPUs at the same time".
+
+ For cases when the netdev is already present, our call to the
+ register_netdevice_notifier() will actually get the rtnl_lock()
+ before sending NETDEV_REGISTER and (if up) NETDEV_UP
+ events.
+
+ But for cases when the netdev is not already present, the notifier
+ chain is subjected to be invoked from different CPUs simultaneously.
+
+ This is protected by the netdev_mutex.
+*/
+static int bnxt_re_netdev_event(struct notifier_block *notifier,
+ unsigned long event, void *ptr)
+{
+ struct ifnet *real_dev, *netdev;
+ struct bnxt_re_dev *rdev = NULL;
+
+ netdev = netdev_notifier_info_to_ifp(ptr);
+ real_dev = rdma_vlan_dev_real_dev(netdev);
+ if (!real_dev)
+ real_dev = netdev;
+ /* In case of bonding,this will be bond's rdev */
+ rdev = bnxt_re_from_netdev(real_dev);
+
+ if (!rdev)
+ goto exit;
+
+ dev_info(rdev_to_dev(rdev), "%s: Event = %s (0x%lx), rdev %s (real_dev %s)\n",
+ __func__, bnxt_re_netevent(event), event,
+ rdev ? rdev->netdev ? if_getdname(rdev->netdev) : "->netdev = NULL" : "= NULL",
+ (real_dev == netdev) ? "= netdev" : if_getdname(real_dev));
+
+ if (!test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
+ goto exit;
+
+ bnxt_re_hold(rdev);
+
+ if (real_dev != netdev) {
+ switch (event) {
+ case NETDEV_UP:
+ bnxt_re_schedule_work(rdev, event, netdev,
+ NULL, NULL);
+ break;
+ case NETDEV_DOWN:
+ break;
+ default:
+ break;
+ }
+ goto done;
+ }
+
+ switch (event) {
+ case NETDEV_CHANGEADDR:
+ if (!_is_chip_gen_p5_p7(rdev->chip_ctx))
+ bnxt_re_update_shadow_ah(rdev);
+ bnxt_qplib_get_guid(rdev->dev_addr,
+ (u8 *)&rdev->ibdev.node_guid);
+ break;
+
+ case NETDEV_CHANGE:
+ bnxt_re_get_link_speed(rdev);
+ bnxt_re_schedule_work(rdev, event, NULL, NULL, NULL);
+ break;
+ case NETDEV_UNREGISTER:
+ /* netdev notifier will call NETDEV_UNREGISTER again later since
+ * we are still holding the reference to the netdev
+ */
+
+ /*
+ * Workaround to avoid ib_unregister hang. Check for module
+ * reference and dont free up the device if the reference
+ * is non zero. Checking only for PF functions.
+ */
+
+ if (rdev) {
+ dev_info(rdev_to_dev(rdev),
+ "bnxt_re:Unreg recvd when module refcnt > 0");
+ dev_info(rdev_to_dev(rdev),
+ "bnxt_re:Close all apps using bnxt_re devs");
+ dev_info(rdev_to_dev(rdev),
+ "bnxt_re:Remove the configfs entry created for the device");
+ dev_info(rdev_to_dev(rdev),
+ "bnxt_re:Refer documentation for details");
+ goto done;
+ }
+
+ if (atomic_read(&rdev->sched_count) > 0)
+ goto done;
+ if (!rdev->unreg_sched) {
+ bnxt_re_schedule_work(rdev, NETDEV_UNREGISTER,
+ NULL, NULL, NULL);
+ rdev->unreg_sched = true;
+ goto done;
+ }
+
+ break;
+ default:
+ break;
+ }
+done:
+ if (rdev)
+ bnxt_re_put(rdev);
+exit:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block bnxt_re_netdev_notifier = {
+ .notifier_call = bnxt_re_netdev_event
+};
+
+static void bnxt_re_remove_base_interface(struct bnxt_re_dev *rdev,
+ struct auxiliary_device *adev)
+{
+ bnxt_re_stopqps_and_ib_uninit(rdev);
+ bnxt_re_remove_device(rdev, BNXT_RE_COMPLETE_REMOVE, adev);
+ auxiliary_set_drvdata(adev, NULL);
+}
+
+/*
+ * bnxt_re_remove - Removes the roce aux device
+ * @adev - aux device pointer
+ *
+ * This function removes the roce device. This gets
+ * called in the mod exit path and pci unbind path.
+ * If the rdev is bond interace, destroys the lag
+ * in module exit path, and in pci unbind case
+ * destroys the lag and recreates other base interface.
+ * If the device is already removed in error recovery
+ * path, it just unregister with the L2.
+ */
+static void bnxt_re_remove(struct auxiliary_device *adev)
+{
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(adev);
+ struct bnxt_en_dev *en_dev;
+ struct bnxt_re_dev *rdev;
+ bool primary_dev = false;
+ bool secondary_dev = false;
+
+ if (!en_info)
+ return;
+
+ mutex_lock(&bnxt_re_mutex);
+ en_dev = en_info->en_dev;
+
+ rdev = en_info->rdev;
+
+ if (rdev && bnxt_re_is_rdev_valid(rdev)) {
+ if (pci_channel_offline(rdev->rcfw.pdev))
+ set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
+
+ if (test_bit(BNXT_RE_FLAG_EN_DEV_PRIMARY_DEV, &en_info->flags))
+ primary_dev = true;
+ if (test_bit(BNXT_RE_FLAG_EN_DEV_SECONDARY_DEV, &en_info->flags))
+ secondary_dev = true;
+
+ /*
+ * en_dev_info of primary device and secondary device have the
+ * same rdev pointer when LAG is configured. This rdev pointer
+ * is rdev of bond interface.
+ */
+ if (!primary_dev && !secondary_dev) {
+ /* removal of non bond interface */
+ bnxt_re_remove_base_interface(rdev, adev);
+ } else {
+ /*
+ * removal of bond primary/secondary interface. In this
+ * case bond device is already removed, so rdev->binfo
+ * is NULL.
+ */
+ auxiliary_set_drvdata(adev, NULL);
+ }
+ } else {
+ /* device is removed from ulp stop, unregister the net dev */
+ if (test_bit(BNXT_RE_FLAG_EN_DEV_NETDEV_REG, &en_info->flags)) {
+ rtnl_lock();
+ en_dev->en_ops->bnxt_unregister_device(en_dev,
+ BNXT_ROCE_ULP);
+ rtnl_unlock();
+ }
+ }
+ mutex_unlock(&bnxt_re_mutex);
+ return;
+}
+
+/* wrapper for all external user context callers */
+void _bnxt_re_remove(struct auxiliary_device *adev)
+{
+ bnxt_re_remove(adev);
+}
+
+static void bnxt_re_ib_init_2(struct bnxt_re_dev *rdev)
+{
+ int rc;
+
+ rc = bnxt_re_get_device_stats(rdev);
+ if (rc)
+ dev_err(rdev_to_dev(rdev),
+ "Failed initial device stat query");
+
+ bnxt_re_net_register_async_event(rdev);
+}
+
+static int bnxt_re_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
+{
+ struct bnxt_aux_dev *aux_dev =
+ container_of(adev, struct bnxt_aux_dev, aux_dev);
+ struct bnxt_re_en_dev_info *en_info;
+ struct bnxt_en_dev *en_dev = NULL;
+ struct bnxt_re_dev *rdev;
+ int rc = -ENODEV;
+
+ if (aux_dev)
+ en_dev = aux_dev->edev;
+
+ if (!en_dev)
+ return rc;
+
+ if (en_dev->ulp_version != BNXT_ULP_VERSION) {
+ pr_err("%s: probe error: bnxt_en ulp version magic %x is not compatible!\n",
+ ROCE_DRV_MODULE_NAME, en_dev->ulp_version);
+ return -EINVAL;
+ }
+
+ en_info = kzalloc(sizeof(*en_info), GFP_KERNEL);
+ if (!en_info)
+ return -ENOMEM;
+ memset(en_info, 0, sizeof(struct bnxt_re_en_dev_info));
+ en_info->en_dev = en_dev;
+ auxiliary_set_drvdata(adev, en_info);
+
+ mutex_lock(&bnxt_re_mutex);
+ rc = bnxt_re_add_device(&rdev, en_dev->net,
+ BNXT_RE_GSI_MODE_ALL,
+ BNXT_RE_COMPLETE_INIT,
+ BNXT_QPLIB_WQE_MODE_STATIC,
+ BNXT_RE_MSIX_FROM_MOD_PARAM, adev);
+ if (rc) {
+ mutex_unlock(&bnxt_re_mutex);
+ return rc;
+ }
+
+ rc = bnxt_re_ib_init(rdev);
+ if (rc)
+ goto err;
+
+ bnxt_re_ib_init_2(rdev);
+
+ dev_dbg(rdev_to_dev(rdev), "%s: adev: %p\n", __func__, adev);
+ rdev->adev = adev;
+
+ mutex_unlock(&bnxt_re_mutex);
+
+ return 0;
+
+err:
+ mutex_unlock(&bnxt_re_mutex);
+ bnxt_re_remove(adev);
+
+ return rc;
+}
+
+static const struct auxiliary_device_id bnxt_re_id_table[] = {
+ { .name = BNXT_ADEV_NAME ".rdma", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, bnxt_re_id_table);
+
+static struct auxiliary_driver bnxt_re_driver = {
+ .name = "rdma",
+ .probe = bnxt_re_probe,
+ .remove = bnxt_re_remove,
+ .id_table = bnxt_re_id_table,
+};
+
+static int __init bnxt_re_mod_init(void)
+{
+ int rc = 0;
+
+ pr_info("%s: %s", ROCE_DRV_MODULE_NAME, drv_version);
+
+ bnxt_re_wq = create_singlethread_workqueue("bnxt_re");
+ if (!bnxt_re_wq)
+ return -ENOMEM;
+
+ rc = bnxt_re_register_netdevice_notifier(&bnxt_re_netdev_notifier);
+ if (rc) {
+ pr_err("%s: Cannot register to netdevice_notifier",
+ ROCE_DRV_MODULE_NAME);
+ goto err_netdev;
+ }
+
+ INIT_LIST_HEAD(&bnxt_re_dev_list);
+
+ rc = auxiliary_driver_register(&bnxt_re_driver);
+ if (rc) {
+ pr_err("%s: Failed to register auxiliary driver\n",
+ ROCE_DRV_MODULE_NAME);
+ goto err_auxdrv;
+ }
+
+ return 0;
+
+err_auxdrv:
+ bnxt_re_unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
+
+err_netdev:
+ destroy_workqueue(bnxt_re_wq);
+
+ return rc;
+}
+
+static void __exit bnxt_re_mod_exit(void)
+{
+ gmod_exit = 1;
+ auxiliary_driver_unregister(&bnxt_re_driver);
+
+ bnxt_re_unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
+
+ if (bnxt_re_wq)
+ destroy_workqueue(bnxt_re_wq);
+}
+
+module_init(bnxt_re_mod_init);
+module_exit(bnxt_re_mod_exit);
diff --git a/sys/dev/bnxt/bnxt_re/qplib_fp.c b/sys/dev/bnxt/bnxt_re/qplib_fp.c
new file mode 100644
index 000000000000..3f1b02406f7f
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/qplib_fp.c
@@ -0,0 +1,3544 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Fast Path Operators
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/if_ether.h>
+#include <linux/hardirq.h>
+#include <rdma/ib_mad.h>
+
+#include "hsi_struct_def.h"
+#include "qplib_tlv.h"
+#include "qplib_res.h"
+#include "qplib_rcfw.h"
+#include "qplib_sp.h"
+#include "qplib_fp.h"
+#include "ib_verbs.h"
+
+static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp);
+
+static void bnxt_re_legacy_cancel_phantom_processing(struct bnxt_qplib_qp *qp)
+{
+ qp->sq.condition = false;
+ qp->sq.legacy_send_phantom = false;
+ qp->sq.single = false;
+}
+
+static void __bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_cq *scq, *rcq;
+
+ scq = qp->scq;
+ rcq = qp->rcq;
+
+ if (!qp->sq.flushed) {
+ dev_dbg(&scq->hwq.pdev->dev,
+ "QPLIB: FP: Adding to SQ Flush list = %p\n",
+ qp);
+ bnxt_re_legacy_cancel_phantom_processing(qp);
+ list_add_tail(&qp->sq_flush, &scq->sqf_head);
+ qp->sq.flushed = true;
+ }
+ if (!qp->srq) {
+ if (!qp->rq.flushed) {
+ dev_dbg(&rcq->hwq.pdev->dev,
+ "QPLIB: FP: Adding to RQ Flush list = %p\n",
+ qp);
+ list_add_tail(&qp->rq_flush, &rcq->rqf_head);
+ qp->rq.flushed = true;
+ }
+ }
+}
+
+static void bnxt_qplib_acquire_cq_flush_locks(struct bnxt_qplib_qp *qp)
+ __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock)
+{
+ /* Interrupts are already disabled in calling functions */
+ spin_lock(&qp->scq->flush_lock);
+ if (qp->scq == qp->rcq)
+ __acquire(&qp->rcq->flush_lock);
+ else
+ spin_lock(&qp->rcq->flush_lock);
+}
+
+static void bnxt_qplib_release_cq_flush_locks(struct bnxt_qplib_qp *qp)
+ __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock)
+{
+ if (qp->scq == qp->rcq)
+ __release(&qp->rcq->flush_lock);
+ else
+ spin_unlock(&qp->rcq->flush_lock);
+ spin_unlock(&qp->scq->flush_lock);
+}
+
+void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp)
+{
+
+ bnxt_qplib_acquire_cq_flush_locks(qp);
+ __bnxt_qplib_add_flush_qp(qp);
+ bnxt_qplib_release_cq_flush_locks(qp);
+}
+
+static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
+{
+ if (qp->sq.flushed) {
+ qp->sq.flushed = false;
+ list_del(&qp->sq_flush);
+ }
+ if (!qp->srq) {
+ if (qp->rq.flushed) {
+ qp->rq.flushed = false;
+ list_del(&qp->rq_flush);
+ }
+ }
+}
+
+void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
+{
+
+ bnxt_qplib_acquire_cq_flush_locks(qp);
+ __clean_cq(qp->scq, (u64)(unsigned long)qp);
+ qp->sq.hwq.prod = 0;
+ qp->sq.hwq.cons = 0;
+ qp->sq.swq_start = 0;
+ qp->sq.swq_last = 0;
+ __clean_cq(qp->rcq, (u64)(unsigned long)qp);
+ qp->rq.hwq.prod = 0;
+ qp->rq.hwq.cons = 0;
+ qp->rq.swq_start = 0;
+ qp->rq.swq_last = 0;
+
+ __bnxt_qplib_del_flush_qp(qp);
+ bnxt_qplib_release_cq_flush_locks(qp);
+}
+
+static void bnxt_qpn_cqn_sched_task(struct work_struct *work)
+{
+ struct bnxt_qplib_nq_work *nq_work =
+ container_of(work, struct bnxt_qplib_nq_work, work);
+
+ struct bnxt_qplib_cq *cq = nq_work->cq;
+ struct bnxt_qplib_nq *nq = nq_work->nq;
+
+ if (cq && nq) {
+ spin_lock_bh(&cq->compl_lock);
+ if (nq->cqn_handler) {
+ dev_dbg(&nq->res->pdev->dev,
+ "%s:Trigger cq = %p event nq = %p\n",
+ __func__, cq, nq);
+ nq->cqn_handler(nq, cq);
+ }
+ spin_unlock_bh(&cq->compl_lock);
+ }
+ kfree(nq_work);
+}
+
+static void bnxt_qplib_put_hdr_buf(struct pci_dev *pdev,
+ struct bnxt_qplib_hdrbuf *buf)
+{
+ dma_free_coherent(&pdev->dev, buf->len, buf->va, buf->dma_map);
+ kfree(buf);
+}
+
+static void *bnxt_qplib_get_hdr_buf(struct pci_dev *pdev, u32 step, u32 cnt)
+{
+ struct bnxt_qplib_hdrbuf *hdrbuf;
+ u32 len;
+
+ hdrbuf = kmalloc(sizeof(*hdrbuf), GFP_KERNEL);
+ if (!hdrbuf)
+ return NULL;
+
+ len = ALIGN((step * cnt), PAGE_SIZE);
+ hdrbuf->va = dma_alloc_coherent(&pdev->dev, len,
+ &hdrbuf->dma_map, GFP_KERNEL);
+ if (!hdrbuf->va)
+ goto out;
+
+ hdrbuf->len = len;
+ hdrbuf->step = step;
+ return hdrbuf;
+out:
+ kfree(hdrbuf);
+ return NULL;
+}
+
+void bnxt_qplib_free_hdr_buf(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp)
+{
+ if (qp->rq_hdr_buf) {
+ bnxt_qplib_put_hdr_buf(res->pdev, qp->rq_hdr_buf);
+ qp->rq_hdr_buf = NULL;
+ }
+
+ if (qp->sq_hdr_buf) {
+ bnxt_qplib_put_hdr_buf(res->pdev, qp->sq_hdr_buf);
+ qp->sq_hdr_buf = NULL;
+ }
+}
+
+int bnxt_qplib_alloc_hdr_buf(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp, u32 sstep, u32 rstep)
+{
+ struct pci_dev *pdev;
+ int rc = 0;
+
+ pdev = res->pdev;
+ if (sstep) {
+ qp->sq_hdr_buf = bnxt_qplib_get_hdr_buf(pdev, sstep,
+ qp->sq.max_wqe);
+ if (!qp->sq_hdr_buf) {
+ dev_err(&pdev->dev, "QPLIB: Failed to get sq_hdr_buf\n");
+ return -ENOMEM;
+ }
+ }
+
+ if (rstep) {
+ qp->rq_hdr_buf = bnxt_qplib_get_hdr_buf(pdev, rstep,
+ qp->rq.max_wqe);
+ if (!qp->rq_hdr_buf) {
+ rc = -ENOMEM;
+ dev_err(&pdev->dev, "QPLIB: Failed to get rq_hdr_buf\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ bnxt_qplib_free_hdr_buf(res, qp);
+ return rc;
+}
+
+/*
+ * clean_nq - Invalidate cqe from given nq.
+ * @cq - Completion queue
+ *
+ * Traverse whole notification queue and invalidate any completion
+ * associated cq handler provided by caller.
+ * Note - This function traverse the hardware queue but do not update
+ * consumer index. Invalidated cqe(marked from this function) will be
+ * ignored from actual completion of notification queue.
+ */
+static void clean_nq(struct bnxt_qplib_cq *cq)
+{
+ struct bnxt_qplib_hwq *nq_hwq = NULL;
+ struct bnxt_qplib_nq *nq = NULL;
+ struct nq_base *hw_nqe = NULL;
+ struct nq_cn *nqcne = NULL;
+ u32 peek_flags, peek_cons;
+ u64 q_handle;
+ u32 type;
+ int i;
+
+ nq = cq->nq;
+ nq_hwq = &nq->hwq;
+
+ spin_lock_bh(&nq_hwq->lock);
+ peek_flags = nq->nq_db.dbinfo.flags;
+ peek_cons = nq_hwq->cons;
+ for (i = 0; i < nq_hwq->max_elements; i++) {
+ hw_nqe = bnxt_qplib_get_qe(nq_hwq, peek_cons, NULL);
+ if (!NQE_CMP_VALID(hw_nqe, peek_flags))
+ break;
+
+ /* The valid test of the entry must be done first
+ * before reading any further.
+ */
+ dma_rmb();
+ type = le16_to_cpu(hw_nqe->info10_type) &
+ NQ_BASE_TYPE_MASK;
+
+ /* Processing only NQ_BASE_TYPE_CQ_NOTIFICATION */
+ if (type == NQ_BASE_TYPE_CQ_NOTIFICATION) {
+ nqcne = (struct nq_cn *)hw_nqe;
+
+ q_handle = le32_to_cpu(nqcne->cq_handle_low);
+ q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) << 32;
+ if (q_handle == (u64)cq) {
+ nqcne->cq_handle_low = 0;
+ nqcne->cq_handle_high = 0;
+ cq->cnq_events++;
+ }
+ }
+ bnxt_qplib_hwq_incr_cons(nq_hwq->max_elements, &peek_cons,
+ 1, &peek_flags);
+ }
+ spin_unlock_bh(&nq_hwq->lock);
+}
+
+/*
+ * Wait for receiving all NQEs for this CQ.
+ * clean_nq is tried 100 times, each time clean_cq
+ * loops upto budget times. budget is based on the
+ * number of CQs shared by that NQ. So any NQE from
+ * CQ would be already in the NQ.
+ */
+static void __wait_for_all_nqes(struct bnxt_qplib_cq *cq, u16 cnq_events)
+{
+ u32 retry_cnt = 100;
+ u16 total_events;
+
+ if (!cnq_events) {
+ clean_nq(cq);
+ return;
+ }
+ while (retry_cnt--) {
+ total_events = cq->cnq_events;
+
+ /* Increment total_events by 1 if any CREQ event received with CQ notification */
+ if (cq->is_cq_err_event)
+ total_events++;
+
+ if (cnq_events == total_events) {
+ dev_dbg(&cq->nq->res->pdev->dev,
+ "QPLIB: NQ cleanup - Received all NQ events\n");
+ return;
+ }
+ msleep(1);
+ clean_nq(cq);
+ }
+}
+
+static void bnxt_qplib_service_nq(unsigned long data)
+{
+ struct bnxt_qplib_nq *nq = (struct bnxt_qplib_nq *)data;
+ struct bnxt_qplib_hwq *nq_hwq = &nq->hwq;
+ int budget = nq->budget;
+ struct bnxt_qplib_res *res;
+ struct bnxt_qplib_cq *cq;
+ struct pci_dev *pdev;
+ struct nq_base *nqe;
+ u32 hw_polled = 0;
+ u64 q_handle;
+ u32 type;
+
+ res = nq->res;
+ pdev = res->pdev;
+
+ spin_lock_bh(&nq_hwq->lock);
+ /* Service the NQ until empty or budget expired */
+ while (budget--) {
+ nqe = bnxt_qplib_get_qe(nq_hwq, nq_hwq->cons, NULL);
+ if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags))
+ break;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ type = le16_to_cpu(nqe->info10_type) & NQ_BASE_TYPE_MASK;
+ switch (type) {
+ case NQ_BASE_TYPE_CQ_NOTIFICATION:
+ {
+ struct nq_cn *nqcne = (struct nq_cn *)nqe;
+
+ q_handle = le32_to_cpu(nqcne->cq_handle_low);
+ q_handle |= (u64)le32_to_cpu(nqcne->cq_handle_high) << 32;
+ cq = (struct bnxt_qplib_cq *)q_handle;
+ if (!cq)
+ break;
+ cq->toggle = (le16_to_cpu(nqe->info10_type) & NQ_CN_TOGGLE_MASK) >> NQ_CN_TOGGLE_SFT;
+ cq->dbinfo.toggle = cq->toggle;
+ bnxt_qplib_armen_db(&cq->dbinfo,
+ DBC_DBC_TYPE_CQ_ARMENA);
+ spin_lock_bh(&cq->compl_lock);
+ atomic_set(&cq->arm_state, 0) ;
+ if (!nq->cqn_handler(nq, (cq)))
+ nq->stats.num_cqne_processed++;
+ else
+ dev_warn(&pdev->dev,
+ "QPLIB: cqn - type 0x%x not handled\n",
+ type);
+ cq->cnq_events++;
+ spin_unlock_bh(&cq->compl_lock);
+ break;
+ }
+ case NQ_BASE_TYPE_SRQ_EVENT:
+ {
+ struct bnxt_qplib_srq *srq;
+ struct nq_srq_event *nqsrqe =
+ (struct nq_srq_event *)nqe;
+
+ q_handle = le32_to_cpu(nqsrqe->srq_handle_low);
+ q_handle |= (u64)le32_to_cpu(nqsrqe->srq_handle_high) << 32;
+ srq = (struct bnxt_qplib_srq *)q_handle;
+ bnxt_qplib_armen_db(&srq->dbinfo,
+ DBC_DBC_TYPE_SRQ_ARMENA);
+ if (!nq->srqn_handler(nq,
+ (struct bnxt_qplib_srq *)q_handle,
+ nqsrqe->event))
+ nq->stats.num_srqne_processed++;
+ else
+ dev_warn(&pdev->dev,
+ "QPLIB: SRQ event 0x%x not handled\n",
+ nqsrqe->event);
+ break;
+ }
+ default:
+ dev_warn(&pdev->dev,
+ "QPLIB: nqe with opcode = 0x%x not handled\n",
+ type);
+ break;
+ }
+ hw_polled++;
+ bnxt_qplib_hwq_incr_cons(nq_hwq->max_elements, &nq_hwq->cons,
+ 1, &nq->nq_db.dbinfo.flags);
+ }
+ nqe = bnxt_qplib_get_qe(nq_hwq, nq_hwq->cons, NULL);
+ if (!NQE_CMP_VALID(nqe, nq->nq_db.dbinfo.flags)) {
+ nq->stats.num_nq_rearm++;
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
+ } else if (nq->requested) {
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
+ nq->stats.num_tasklet_resched++;
+ }
+ dev_dbg(&pdev->dev, "QPLIB: cqn/srqn/dbqn \n");
+ if (hw_polled >= 0)
+ dev_dbg(&pdev->dev,
+ "QPLIB: serviced %llu/%llu/%llu budget 0x%x reaped 0x%x\n",
+ nq->stats.num_cqne_processed, nq->stats.num_srqne_processed,
+ nq->stats.num_dbqne_processed, budget, hw_polled);
+ dev_dbg(&pdev->dev,
+ "QPLIB: resched_cnt = %llu arm_count = %llu\n",
+ nq->stats.num_tasklet_resched, nq->stats.num_nq_rearm);
+ spin_unlock_bh(&nq_hwq->lock);
+}
+
+static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance)
+{
+ struct bnxt_qplib_nq *nq = dev_instance;
+ struct bnxt_qplib_hwq *nq_hwq = &nq->hwq;
+ u32 sw_cons;
+
+ /* Prefetch the NQ element */
+ sw_cons = HWQ_CMP(nq_hwq->cons, nq_hwq);
+ if (sw_cons >= 0)
+ prefetch(bnxt_qplib_get_qe(nq_hwq, sw_cons, NULL));
+
+ bnxt_qplib_service_nq((unsigned long)nq);
+
+ return IRQ_HANDLED;
+}
+
+void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
+{
+ struct bnxt_qplib_res *res;
+
+ if (!nq->requested)
+ return;
+
+ nq->requested = false;
+ res = nq->res;
+ /* Mask h/w interrupt */
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, false);
+ /* Sync with last running IRQ handler */
+ synchronize_irq(nq->msix_vec);
+ free_irq(nq->msix_vec, nq);
+ kfree(nq->name);
+ nq->name = NULL;
+}
+
+void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
+{
+ if (nq->cqn_wq) {
+ destroy_workqueue(nq->cqn_wq);
+ nq->cqn_wq = NULL;
+ }
+ /* Make sure the HW is stopped! */
+ bnxt_qplib_nq_stop_irq(nq, true);
+
+ nq->nq_db.reg.bar_reg = NULL;
+ nq->nq_db.db = NULL;
+
+ nq->cqn_handler = NULL;
+ nq->srqn_handler = NULL;
+ nq->msix_vec = 0;
+}
+
+int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ int msix_vector, bool need_init)
+{
+ struct bnxt_qplib_res *res;
+ int rc;
+
+ res = nq->res;
+ if (nq->requested)
+ return -EFAULT;
+
+ nq->msix_vec = msix_vector;
+ nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s\n",
+ nq_indx, pci_name(res->pdev));
+ if (!nq->name)
+ return -ENOMEM;
+ rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
+ if (rc) {
+ kfree(nq->name);
+ nq->name = NULL;
+ return rc;
+ }
+ nq->requested = true;
+ bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
+
+ return rc;
+}
+
+static void bnxt_qplib_map_nq_db(struct bnxt_qplib_nq *nq, u32 reg_offt)
+{
+ struct bnxt_qplib_reg_desc *dbreg;
+ struct bnxt_qplib_nq_db *nq_db;
+ struct bnxt_qplib_res *res;
+
+ nq_db = &nq->nq_db;
+ res = nq->res;
+ dbreg = &res->dpi_tbl.ucreg;
+
+ nq_db->reg.bar_id = dbreg->bar_id;
+ nq_db->reg.bar_base = dbreg->bar_base;
+ nq_db->reg.bar_reg = dbreg->bar_reg + reg_offt;
+ nq_db->reg.len = _is_chip_gen_p5_p7(res->cctx) ? sizeof(u64) :
+ sizeof(u32);
+
+ nq_db->dbinfo.db = nq_db->reg.bar_reg;
+ nq_db->dbinfo.hwq = &nq->hwq;
+ nq_db->dbinfo.xid = nq->ring_id;
+ nq_db->dbinfo.seed = nq->ring_id;
+ nq_db->dbinfo.flags = 0;
+ spin_lock_init(&nq_db->dbinfo.lock);
+ nq_db->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
+ nq_db->dbinfo.res = nq->res;
+
+ return;
+}
+
+int bnxt_qplib_enable_nq(struct bnxt_qplib_nq *nq, int nq_idx,
+ int msix_vector, int bar_reg_offset,
+ cqn_handler_t cqn_handler,
+ srqn_handler_t srqn_handler)
+{
+ struct pci_dev *pdev;
+ int rc;
+
+ pdev = nq->res->pdev;
+ nq->cqn_handler = cqn_handler;
+ nq->srqn_handler = srqn_handler;
+ nq->load = 0;
+ mutex_init(&nq->lock);
+
+ /* Have a task to schedule CQ notifiers in post send case */
+ nq->cqn_wq = create_singlethread_workqueue("bnxt_qplib_nq\n");
+ if (!nq->cqn_wq)
+ return -ENOMEM;
+
+ bnxt_qplib_map_nq_db(nq, bar_reg_offset);
+ rc = bnxt_qplib_nq_start_irq(nq, nq_idx, msix_vector, true);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "QPLIB: Failed to request irq for nq-idx %d\n", nq_idx);
+ goto fail;
+ }
+ dev_dbg(&pdev->dev, "QPLIB: NQ max = 0x%x\n", nq->hwq.max_elements);
+
+ return 0;
+fail:
+ bnxt_qplib_disable_nq(nq);
+ return rc;
+}
+
+void bnxt_qplib_free_nq_mem(struct bnxt_qplib_nq *nq)
+{
+ if (nq->hwq.max_elements) {
+ bnxt_qplib_free_hwq(nq->res, &nq->hwq);
+ nq->hwq.max_elements = 0;
+ }
+}
+
+int bnxt_qplib_alloc_nq_mem(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_nq *nq)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+
+ nq->res = res;
+ if (!nq->hwq.max_elements ||
+ nq->hwq.max_elements > BNXT_QPLIB_NQE_MAX_CNT)
+ nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
+
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.depth = nq->hwq.max_elements;
+ hwq_attr.stride = sizeof(struct nq_base);
+ hwq_attr.type = _get_hwq_type(res);
+ if (bnxt_qplib_alloc_init_hwq(&nq->hwq, &hwq_attr)) {
+ dev_err(&res->pdev->dev, "QPLIB: FP NQ allocation failed\n");
+ return -ENOMEM;
+ }
+ nq->budget = 8;
+ return 0;
+}
+
+/* SRQ */
+static int __qplib_destroy_srq(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_srq *srq)
+{
+ struct creq_destroy_srq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_destroy_srq req = {};
+ /* Configure the request */
+ req.srq_cid = cpu_to_le32(srq->id);
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_SRQ,
+ sizeof(req));
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ return bnxt_qplib_rcfw_send_message(rcfw, &msg);
+}
+
+int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ int rc;
+
+ rc = __qplib_destroy_srq(rcfw, srq);
+ if (rc)
+ return rc;
+ bnxt_qplib_free_hwq(res, &srq->hwq);
+ kfree(srq->swq);
+ return 0;
+}
+
+int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_create_srq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_create_srq req = {};
+ u16 pg_sz_lvl = 0;
+ u16 srq_size;
+ int rc, idx;
+
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &srq->sginfo;
+ hwq_attr.depth = srq->max_wqe;
+ hwq_attr.stride = srq->wqe_size;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
+ if (rc)
+ goto exit;
+ /* Configure the request */
+ req.dpi = cpu_to_le32(srq->dpi->dpi);
+ req.srq_handle = cpu_to_le64((uintptr_t)srq);
+ srq_size = min_t(u32, srq->hwq.depth, U16_MAX);
+ req.srq_size = cpu_to_le16(srq_size);
+ pg_sz_lvl |= (_get_base_pg_size(&srq->hwq) <<
+ CMDQ_CREATE_SRQ_PG_SIZE_SFT);
+ pg_sz_lvl |= (srq->hwq.level & CMDQ_CREATE_SRQ_LVL_MASK);
+ req.pg_size_lvl = cpu_to_le16(pg_sz_lvl);
+ req.pbl = cpu_to_le64(_get_base_addr(&srq->hwq));
+ req.pd_id = cpu_to_le32(srq->pd->id);
+ req.eventq_id = cpu_to_le16(srq->eventq_hw_ring_id);
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_SRQ,
+ sizeof(req));
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto fail;
+ if (!srq->is_user) {
+ srq->swq = kcalloc(srq->hwq.depth, sizeof(*srq->swq),
+ GFP_KERNEL);
+ if (!srq->swq)
+ goto srq_fail;
+ srq->start_idx = 0;
+ srq->last_idx = srq->hwq.depth - 1;
+ for (idx = 0; idx < srq->hwq.depth; idx++)
+ srq->swq[idx].next_idx = idx + 1;
+ srq->swq[srq->last_idx].next_idx = -1;
+ }
+
+ spin_lock_init(&srq->lock);
+ srq->id = le32_to_cpu(resp.xid);
+ srq->cctx = res->cctx;
+ srq->dbinfo.hwq = &srq->hwq;
+ srq->dbinfo.xid = srq->id;
+ srq->dbinfo.db = srq->dpi->dbr;
+ srq->dbinfo.max_slot = 1;
+ srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
+ srq->dbinfo.flags = 0;
+ spin_lock_init(&srq->dbinfo.lock);
+ srq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
+ srq->dbinfo.shadow_key_arm_ena = BNXT_QPLIB_DBR_KEY_INVALID;
+ srq->dbinfo.res = res;
+ srq->dbinfo.seed = srq->id;
+ if (srq->threshold)
+ bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
+ srq->arm_req = false;
+ return 0;
+srq_fail:
+ __qplib_destroy_srq(rcfw, srq);
+fail:
+ bnxt_qplib_free_hwq(res, &srq->hwq);
+exit:
+ return rc;
+}
+
+int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ u32 avail = 0;
+
+ avail = __bnxt_qplib_get_avail(srq_hwq);
+ if (avail <= srq->threshold) {
+ srq->arm_req = false;
+ bnxt_qplib_srq_arm_db(&srq->dbinfo);
+ } else {
+ /* Deferred arming */
+ srq->arm_req = true;
+ }
+ return 0;
+}
+
+int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_query_srq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct creq_query_srq_resp_sb *sb;
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ struct cmdq_query_srq req = {};
+ int rc = 0;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_SRQ,
+ sizeof(req));
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ req.srq_cid = cpu_to_le32(srq->id);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ /* TODO: What to do with the query? */
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
+
+ return rc;
+}
+
+int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_qplib_hwq *srq_hwq = &srq->hwq;
+ struct sq_sge *hw_sge;
+ struct rq_wqe *srqe;
+ int i, rc = 0, next;
+ u32 avail;
+
+ spin_lock(&srq_hwq->lock);
+ if (srq->start_idx == srq->last_idx) {
+ dev_err(&srq_hwq->pdev->dev, "QPLIB: FP: SRQ (0x%x) is full!\n",
+ srq->id);
+ rc = -EINVAL;
+ spin_unlock(&srq_hwq->lock);
+ goto done;
+ }
+ next = srq->start_idx;
+ srq->start_idx = srq->swq[next].next_idx;
+ spin_unlock(&srq_hwq->lock);
+
+ srqe = bnxt_qplib_get_qe(srq_hwq, srq_hwq->prod, NULL);
+ memset(srqe, 0, srq->wqe_size);
+ /* Calculate wqe_size and data_len */
+ for (i = 0, hw_sge = (struct sq_sge *)srqe->data;
+ i < wqe->num_sge; i++, hw_sge++) {
+ hw_sge->va_or_pa = cpu_to_le64(wqe->sg_list[i].addr);
+ hw_sge->l_key = cpu_to_le32(wqe->sg_list[i].lkey);
+ hw_sge->size = cpu_to_le32(wqe->sg_list[i].size);
+ }
+ srqe->wqe_type = wqe->type;
+ srqe->flags = wqe->flags;
+ srqe->wqe_size = wqe->num_sge +
+ ((offsetof(typeof(*srqe), data) + 15) >> 4);
+ if (!wqe->num_sge)
+ srqe->wqe_size++;
+ srqe->wr_id |= cpu_to_le32((u32)next);
+ srq->swq[next].wr_id = wqe->wr_id;
+ bnxt_qplib_hwq_incr_prod(&srq->dbinfo, srq_hwq, srq->dbinfo.max_slot);
+ /* retaining srq_hwq->cons for this logic actually the lock is only
+ * required to read srq_hwq->cons.
+ */
+ spin_lock(&srq_hwq->lock);
+ avail = __bnxt_qplib_get_avail(srq_hwq);
+ spin_unlock(&srq_hwq->lock);
+ /* Ring DB */
+ bnxt_qplib_ring_prod_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ);
+ if (srq->arm_req && avail <= srq->threshold) {
+ srq->arm_req = false;
+ bnxt_qplib_srq_arm_db(&srq->dbinfo);
+ }
+done:
+ return rc;
+}
+
+/* QP */
+static int __qplib_destroy_qp(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_qp *qp)
+{
+ struct creq_destroy_qp_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_destroy_qp req = {};
+
+ req.qp_cid = cpu_to_le32(qp->id);
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_QP,
+ sizeof(req));
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ return bnxt_qplib_rcfw_send_message(rcfw, &msg);
+}
+
+static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
+{
+ int rc = 0;
+ int indx;
+
+ que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
+ if (!que->swq) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ que->swq_start = 0;
+ que->swq_last = que->max_wqe - 1;
+ for (indx = 0; indx < que->max_wqe; indx++)
+ que->swq[indx].next_idx = indx + 1;
+ que->swq[que->swq_last].next_idx = 0; /* Make it circular */
+ que->swq_last = 0;
+out:
+ return rc;
+}
+
+static struct bnxt_qplib_swq *bnxt_qplib_get_swqe(struct bnxt_qplib_q *que,
+ u32 *swq_idx)
+{
+ u32 idx;
+
+ idx = que->swq_start;
+ if (swq_idx)
+ *swq_idx = idx;
+ return &que->swq[idx];
+}
+
+static void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q *que, u32 idx)
+{
+ que->swq_start = que->swq[idx].next_idx;
+}
+
+static u32 bnxt_qplib_get_stride(void)
+{
+ return sizeof(struct sq_sge);
+}
+
+static u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que)
+{
+ u8 stride;
+
+ stride = bnxt_qplib_get_stride();
+ return (que->wqe_size * que->max_wqe) / stride;
+}
+
+static u32 _set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode)
+{
+ /* For Variable mode supply number of 16B slots */
+ return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+ que->max_wqe : bnxt_qplib_get_depth(que);
+}
+
+static u32 _set_sq_max_slot(u8 wqe_mode)
+{
+ /* for static mode index divisor is 8 */
+ return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+ sizeof(struct sq_send) / sizeof(struct sq_sge) : 1;
+}
+
+static u32 _set_rq_max_slot(struct bnxt_qplib_q *que)
+{
+ return (que->wqe_size / sizeof(struct sq_sge));
+}
+
+int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_create_qp1_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_q *sq = &qp->sq;
+ struct bnxt_qplib_q *rq = &qp->rq;
+ struct cmdq_create_qp1 req = {};
+ struct bnxt_qplib_reftbl *tbl;
+ unsigned long flag;
+ u8 pg_sz_lvl = 0;
+ u32 qp_flags = 0;
+ int rc;
+
+ /* General */
+ req.type = qp->type;
+ req.dpi = cpu_to_le32(qp->dpi->dpi);
+ req.qp_handle = cpu_to_le64(qp->qp_handle);
+ /* SQ */
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sq->sginfo;
+ hwq_attr.stride = bnxt_qplib_get_stride();
+ hwq_attr.depth = bnxt_qplib_get_depth(sq);
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
+ if (rc)
+ goto exit;
+
+ req.sq_size = cpu_to_le32(_set_sq_size(sq, qp->wqe_mode));
+ req.sq_pbl = cpu_to_le64(_get_base_addr(&sq->hwq));
+ pg_sz_lvl = _get_base_pg_size(&sq->hwq) <<
+ CMDQ_CREATE_QP1_SQ_PG_SIZE_SFT;
+ pg_sz_lvl |= ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK) <<
+ CMDQ_CREATE_QP1_SQ_LVL_SFT);
+ req.sq_pg_size_sq_lvl = pg_sz_lvl;
+ req.sq_fwo_sq_sge = cpu_to_le16(((0 << CMDQ_CREATE_QP1_SQ_FWO_SFT) &
+ CMDQ_CREATE_QP1_SQ_FWO_MASK) |
+ (sq->max_sge &
+ CMDQ_CREATE_QP1_SQ_SGE_MASK));
+ req.scq_cid = cpu_to_le32(qp->scq->id);
+
+ /* RQ */
+ if (!qp->srq) {
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &rq->sginfo;
+ hwq_attr.stride = bnxt_qplib_get_stride();
+ hwq_attr.depth = bnxt_qplib_get_depth(rq);
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
+ if (rc)
+ goto fail_sq;
+ req.rq_size = cpu_to_le32(rq->max_wqe);
+ req.rq_pbl = cpu_to_le64(_get_base_addr(&rq->hwq));
+ pg_sz_lvl = _get_base_pg_size(&rq->hwq) <<
+ CMDQ_CREATE_QP1_RQ_PG_SIZE_SFT;
+ pg_sz_lvl |= ((rq->hwq.level & CMDQ_CREATE_QP1_RQ_LVL_MASK) <<
+ CMDQ_CREATE_QP1_RQ_LVL_SFT);
+ req.rq_pg_size_rq_lvl = pg_sz_lvl;
+ req.rq_fwo_rq_sge =
+ cpu_to_le16(((0 << CMDQ_CREATE_QP1_RQ_FWO_SFT) &
+ CMDQ_CREATE_QP1_RQ_FWO_MASK) |
+ (rq->max_sge &
+ CMDQ_CREATE_QP1_RQ_SGE_MASK));
+ } else {
+ /* SRQ */
+ qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_SRQ_USED;
+ req.srq_cid = cpu_to_le32(qp->srq->id);
+ }
+ req.rcq_cid = cpu_to_le32(qp->rcq->id);
+
+ qp_flags |= CMDQ_CREATE_QP1_QP_FLAGS_RESERVED_LKEY_ENABLE;
+ req.qp_flags = cpu_to_le32(qp_flags);
+ req.pd_id = cpu_to_le32(qp->pd->id);
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_QP1,
+ sizeof(req));
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto fail_rq;
+
+ rc = bnxt_qplib_alloc_init_swq(sq);
+ if (rc)
+ goto sq_swq;
+
+ if (!qp->srq) {
+ rc = bnxt_qplib_alloc_init_swq(rq);
+ if (rc)
+ goto rq_swq;
+ }
+
+ qp->id = le32_to_cpu(resp.xid);
+ qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
+ qp->cctx = res->cctx;
+ sq->dbinfo.hwq = &sq->hwq;
+ sq->dbinfo.xid = qp->id;
+ sq->dbinfo.db = qp->dpi->dbr;
+ sq->dbinfo.max_slot = _set_sq_max_slot(qp->wqe_mode);
+ sq->dbinfo.flags = 0;
+ spin_lock_init(&sq->dbinfo.lock);
+ sq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
+ sq->dbinfo.res = res;
+ if (rq->max_wqe) {
+ rq->dbinfo.hwq = &rq->hwq;
+ rq->dbinfo.xid = qp->id;
+ rq->dbinfo.db = qp->dpi->dbr;
+ rq->dbinfo.max_slot = _set_rq_max_slot(rq);
+ rq->dbinfo.flags = 0;
+ spin_lock_init(&rq->dbinfo.lock);
+ rq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
+ rq->dbinfo.res = res;
+ }
+
+ tbl = &res->reftbl.qpref;
+ spin_lock_irqsave(&tbl->lock, flag);
+ tbl->rec[tbl->max].xid = qp->id;
+ tbl->rec[tbl->max].handle = qp;
+ spin_unlock_irqrestore(&tbl->lock, flag);
+
+ return 0;
+rq_swq:
+ kfree(sq->swq);
+sq_swq:
+ __qplib_destroy_qp(rcfw, qp);
+fail_rq:
+ bnxt_qplib_free_hwq(res, &rq->hwq);
+fail_sq:
+ bnxt_qplib_free_hwq(res, &sq->hwq);
+exit:
+ return rc;
+}
+
+static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
+{
+ struct bnxt_qplib_hwq *sq_hwq;
+ struct bnxt_qplib_q *sq;
+ u64 fpsne, psn_pg;
+ u16 indx_pad = 0;
+
+ sq = &qp->sq;
+ sq_hwq = &sq->hwq;
+ /* First psn entry */
+ fpsne = (u64)bnxt_qplib_get_qe(sq_hwq, sq_hwq->depth, &psn_pg);
+ if (!IS_ALIGNED(fpsne, PAGE_SIZE))
+ indx_pad = (fpsne & ~PAGE_MASK) / size;
+ sq_hwq->pad_pgofft = indx_pad;
+ sq_hwq->pad_pg = (u64 *)psn_pg;
+ sq_hwq->pad_stride = size;
+}
+
+int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct bnxt_qplib_sg_info sginfo = {};
+ struct creq_create_qp_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_q *sq = &qp->sq;
+ struct bnxt_qplib_q *rq = &qp->rq;
+ struct cmdq_create_qp req = {};
+ struct bnxt_qplib_reftbl *tbl;
+ struct bnxt_qplib_hwq *xrrq;
+ int rc, req_size, psn_sz;
+ unsigned long flag;
+ u8 pg_sz_lvl = 0;
+ u32 qp_flags = 0;
+ u32 qp_idx;
+ u16 nsge;
+ u32 sqsz;
+
+ qp->cctx = res->cctx;
+ if (res->dattr)
+ qp->dev_cap_flags = res->dattr->dev_cap_flags;
+ /* General */
+ req.type = qp->type;
+ req.dpi = cpu_to_le32(qp->dpi->dpi);
+ req.qp_handle = cpu_to_le64(qp->qp_handle);
+
+ /* SQ */
+ if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
+ psn_sz = _is_chip_gen_p5_p7(qp->cctx) ?
+ sizeof(struct sq_psn_search_ext) :
+ sizeof(struct sq_psn_search);
+ if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
+ psn_sz = sizeof(struct sq_msn_search);
+ qp->msn = 0;
+ }
+ } else {
+ psn_sz = 0;
+ }
+
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sq->sginfo;
+ hwq_attr.stride = bnxt_qplib_get_stride();
+ hwq_attr.depth = bnxt_qplib_get_depth(sq);
+ hwq_attr.aux_stride = psn_sz;
+ hwq_attr.aux_depth = (psn_sz) ?
+ _set_sq_size(sq, qp->wqe_mode) : 0;
+ /* Update msn tbl size */
+ if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
+ if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
+ hwq_attr.aux_depth = roundup_pow_of_two(_set_sq_size(sq, qp->wqe_mode));
+ else
+ hwq_attr.aux_depth = roundup_pow_of_two(_set_sq_size(sq, qp->wqe_mode)) / 2;
+ qp->msn_tbl_sz = hwq_attr.aux_depth;
+ qp->msn = 0;
+ }
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
+ if (rc)
+ goto exit;
+
+ sqsz = _set_sq_size(sq, qp->wqe_mode);
+ /* 0xffff is the max sq size hw limits to */
+ if (sqsz > BNXT_QPLIB_MAX_SQSZ) {
+ pr_err("QPLIB: FP: QP (0x%x) exceeds sq size %d\n", qp->id, sqsz);
+ goto fail_sq;
+ }
+ req.sq_size = cpu_to_le32(sqsz);
+ req.sq_pbl = cpu_to_le64(_get_base_addr(&sq->hwq));
+ pg_sz_lvl = _get_base_pg_size(&sq->hwq) <<
+ CMDQ_CREATE_QP_SQ_PG_SIZE_SFT;
+ pg_sz_lvl |= ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK) <<
+ CMDQ_CREATE_QP_SQ_LVL_SFT);
+ req.sq_pg_size_sq_lvl = pg_sz_lvl;
+ req.sq_fwo_sq_sge = cpu_to_le16(((0 << CMDQ_CREATE_QP_SQ_FWO_SFT) &
+ CMDQ_CREATE_QP_SQ_FWO_MASK) |
+ ((BNXT_RE_HW_RETX(qp->dev_cap_flags)) ?
+ BNXT_MSN_TBLE_SGE : sq->max_sge &
+ CMDQ_CREATE_QP_SQ_SGE_MASK));
+ req.scq_cid = cpu_to_le32(qp->scq->id);
+
+ /* RQ/SRQ */
+ if (!qp->srq) {
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &rq->sginfo;
+ hwq_attr.stride = bnxt_qplib_get_stride();
+ hwq_attr.depth = bnxt_qplib_get_depth(rq);
+ hwq_attr.aux_stride = 0;
+ hwq_attr.aux_depth = 0;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
+ if (rc)
+ goto fail_sq;
+ req.rq_size = cpu_to_le32(rq->max_wqe);
+ req.rq_pbl = cpu_to_le64(_get_base_addr(&rq->hwq));
+ pg_sz_lvl = _get_base_pg_size(&rq->hwq) <<
+ CMDQ_CREATE_QP_RQ_PG_SIZE_SFT;
+ pg_sz_lvl |= ((rq->hwq.level & CMDQ_CREATE_QP_RQ_LVL_MASK) <<
+ CMDQ_CREATE_QP_RQ_LVL_SFT);
+ req.rq_pg_size_rq_lvl = pg_sz_lvl;
+ nsge = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+ res->dattr->max_qp_sges : rq->max_sge;
+ req.rq_fwo_rq_sge =
+ cpu_to_le16(((0 << CMDQ_CREATE_QP_RQ_FWO_SFT) &
+ CMDQ_CREATE_QP_RQ_FWO_MASK) |
+ (nsge & CMDQ_CREATE_QP_RQ_SGE_MASK));
+ } else {
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_SRQ_USED;
+ req.srq_cid = cpu_to_le32(qp->srq->id);
+ }
+ req.rcq_cid = cpu_to_le32(qp->rcq->id);
+
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE;
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED;
+ if (qp->sig_type)
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
+ if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
+ if (res->cctx->modes.te_bypass)
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_OPTIMIZED_TRANSMIT_ENABLED;
+ if (res->dattr &&
+ bnxt_ext_stats_supported(qp->cctx, res->dattr->dev_cap_flags, res->is_vf))
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
+ req.qp_flags = cpu_to_le32(qp_flags);
+
+ /* ORRQ and IRRQ */
+ if (psn_sz) {
+ xrrq = &qp->orrq;
+ xrrq->max_elements =
+ ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
+ req_size = xrrq->max_elements *
+ BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE + PAGE_SIZE - 1;
+ req_size &= ~(PAGE_SIZE - 1);
+ sginfo.pgsize = req_size;
+ sginfo.pgshft = PAGE_SHIFT;
+
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.depth = xrrq->max_elements;
+ hwq_attr.stride = BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE;
+ hwq_attr.aux_stride = 0;
+ hwq_attr.aux_depth = 0;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
+ if (rc)
+ goto fail_rq;
+ req.orrq_addr = cpu_to_le64(_get_base_addr(xrrq));
+
+ xrrq = &qp->irrq;
+ xrrq->max_elements = IRD_LIMIT_TO_IRRQ_SLOTS(
+ qp->max_dest_rd_atomic);
+ req_size = xrrq->max_elements *
+ BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE + PAGE_SIZE - 1;
+ req_size &= ~(PAGE_SIZE - 1);
+ sginfo.pgsize = req_size;
+ hwq_attr.depth = xrrq->max_elements;
+ hwq_attr.stride = BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE;
+ rc = bnxt_qplib_alloc_init_hwq(xrrq, &hwq_attr);
+ if (rc)
+ goto fail_orrq;
+ req.irrq_addr = cpu_to_le64(_get_base_addr(xrrq));
+ }
+ req.pd_id = cpu_to_le32(qp->pd->id);
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_QP,
+ sizeof(req));
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto fail;
+
+ if (!qp->is_user) {
+ rc = bnxt_qplib_alloc_init_swq(sq);
+ if (rc)
+ goto swq_sq;
+ if (!qp->srq) {
+ rc = bnxt_qplib_alloc_init_swq(rq);
+ if (rc)
+ goto swq_rq;
+ }
+ if (psn_sz)
+ bnxt_qplib_init_psn_ptr(qp, psn_sz);
+ }
+ qp->id = le32_to_cpu(resp.xid);
+ qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
+ INIT_LIST_HEAD(&qp->sq_flush);
+ INIT_LIST_HEAD(&qp->rq_flush);
+
+ sq->dbinfo.hwq = &sq->hwq;
+ sq->dbinfo.xid = qp->id;
+ sq->dbinfo.db = qp->dpi->dbr;
+ sq->dbinfo.max_slot = _set_sq_max_slot(qp->wqe_mode);
+ sq->dbinfo.flags = 0;
+ spin_lock_init(&sq->dbinfo.lock);
+ sq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
+ sq->dbinfo.res = res;
+ sq->dbinfo.seed = qp->id;
+ if (rq->max_wqe) {
+ rq->dbinfo.hwq = &rq->hwq;
+ rq->dbinfo.xid = qp->id;
+ rq->dbinfo.db = qp->dpi->dbr;
+ rq->dbinfo.max_slot = _set_rq_max_slot(rq);
+ rq->dbinfo.flags = 0;
+ spin_lock_init(&rq->dbinfo.lock);
+ rq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
+ rq->dbinfo.res = res;
+ rq->dbinfo.seed = qp->id;
+ }
+
+ tbl = &res->reftbl.qpref;
+ qp_idx = map_qp_id_to_tbl_indx(qp->id, tbl);
+ spin_lock_irqsave(&tbl->lock, flag);
+ tbl->rec[qp_idx].xid = qp->id;
+ tbl->rec[qp_idx].handle = qp;
+ spin_unlock_irqrestore(&tbl->lock, flag);
+
+ return 0;
+swq_rq:
+ kfree(sq->swq);
+swq_sq:
+ __qplib_destroy_qp(rcfw, qp);
+fail:
+ bnxt_qplib_free_hwq(res, &qp->irrq);
+fail_orrq:
+ bnxt_qplib_free_hwq(res, &qp->orrq);
+fail_rq:
+ bnxt_qplib_free_hwq(res, &rq->hwq);
+fail_sq:
+ bnxt_qplib_free_hwq(res, &sq->hwq);
+exit:
+ return rc;
+}
+
+static void __filter_modify_flags(struct bnxt_qplib_qp *qp)
+{
+ switch (qp->cur_qp_state) {
+ case CMDQ_MODIFY_QP_NEW_STATE_RESET:
+ switch (qp->state) {
+ case CMDQ_MODIFY_QP_NEW_STATE_INIT:
+ break;
+ default:
+ break;
+ }
+ break;
+ case CMDQ_MODIFY_QP_NEW_STATE_INIT:
+ switch (qp->state) {
+ case CMDQ_MODIFY_QP_NEW_STATE_RTR:
+ if (!(qp->modify_flags &
+ CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU)) {
+ qp->modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
+ qp->path_mtu = CMDQ_MODIFY_QP_PATH_MTU_MTU_2048;
+ }
+ qp->modify_flags &=
+ ~CMDQ_MODIFY_QP_MODIFY_MASK_VLAN_ID;
+ /* Bono FW requires the max_dest_rd_atomic to be >= 1 */
+ if (qp->max_dest_rd_atomic < 1)
+ qp->max_dest_rd_atomic = 1;
+ qp->modify_flags &= ~CMDQ_MODIFY_QP_MODIFY_MASK_SRC_MAC;
+ /* Bono FW 20.6.5 requires SGID_INDEX to be configured */
+ if (!(qp->modify_flags &
+ CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX)) {
+ qp->modify_flags |=
+ CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX;
+ qp->ah.sgid_index = 0;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ case CMDQ_MODIFY_QP_NEW_STATE_RTR:
+ switch (qp->state) {
+ case CMDQ_MODIFY_QP_NEW_STATE_RTS:
+ /* Bono FW requires the max_rd_atomic to be >= 1 */
+ if (qp->max_rd_atomic < 1)
+ qp->max_rd_atomic = 1;
+ qp->modify_flags &=
+ ~(CMDQ_MODIFY_QP_MODIFY_MASK_PKEY |
+ CMDQ_MODIFY_QP_MODIFY_MASK_DGID |
+ CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL |
+ CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX |
+ CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT |
+ CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS |
+ CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC |
+ CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU |
+ CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN |
+ CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER |
+ CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC |
+ CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID);
+ break;
+ default:
+ break;
+ }
+ break;
+ case CMDQ_MODIFY_QP_NEW_STATE_RTS:
+ break;
+ case CMDQ_MODIFY_QP_NEW_STATE_SQD:
+ break;
+ case CMDQ_MODIFY_QP_NEW_STATE_SQE:
+ break;
+ case CMDQ_MODIFY_QP_NEW_STATE_ERR:
+ break;
+ default:
+ break;
+ }
+}
+
+int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_modify_qp_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_modify_qp req = {};
+ bool ppp_requested = false;
+ u32 temp32[4];
+ u32 bmask;
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_MODIFY_QP,
+ sizeof(req));
+
+ /* Filter out the qp_attr_mask based on the state->new transition */
+ __filter_modify_flags(qp);
+ bmask = qp->modify_flags;
+ req.modify_mask = cpu_to_le32(qp->modify_flags);
+ req.qp_cid = cpu_to_le32(qp->id);
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_STATE) {
+ req.network_type_en_sqd_async_notify_new_state =
+ (qp->state & CMDQ_MODIFY_QP_NEW_STATE_MASK) |
+ (qp->en_sqd_async_notify == true ?
+ CMDQ_MODIFY_QP_EN_SQD_ASYNC_NOTIFY : 0);
+ if (__can_request_ppp(qp)) {
+ req.path_mtu_pingpong_push_enable =
+ CMDQ_MODIFY_QP_PINGPONG_PUSH_ENABLE;
+ req.pingpong_push_dpi = qp->ppp.dpi;
+ ppp_requested = true;
+ }
+ }
+ req.network_type_en_sqd_async_notify_new_state |= qp->nw_type;
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS) {
+ req.access = qp->access;
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
+ req.pkey = IB_DEFAULT_PKEY_FULL;
+
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY) {
+ req.qkey = cpu_to_le32(qp->qkey);
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DGID) {
+ memcpy(temp32, qp->ah.dgid.data, sizeof(struct bnxt_qplib_gid));
+ req.dgid[0] = cpu_to_le32(temp32[0]);
+ req.dgid[1] = cpu_to_le32(temp32[1]);
+ req.dgid[2] = cpu_to_le32(temp32[2]);
+ req.dgid[3] = cpu_to_le32(temp32[3]);
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL) {
+ req.flow_label = cpu_to_le32(qp->ah.flow_label);
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) {
+ req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[qp->ah.sgid_index]);
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT) {
+ req.hop_limit = qp->ah.hop_limit;
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TRAFFIC_CLASS) {
+ req.traffic_class = qp->ah.traffic_class;
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_MAC) {
+ memcpy(req.dest_mac, qp->ah.dmac, 6);
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU) {
+ req.path_mtu_pingpong_push_enable = qp->path_mtu;
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TIMEOUT) {
+ req.timeout = qp->timeout;
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RETRY_CNT) {
+ req.retry_cnt = qp->retry_cnt;
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RNR_RETRY) {
+ req.rnr_retry = qp->rnr_retry;
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MIN_RNR_TIMER) {
+ req.min_rnr_timer = qp->min_rnr_timer;
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_RQ_PSN) {
+ req.rq_psn = cpu_to_le32(qp->rq.psn);
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN) {
+ req.sq_psn = cpu_to_le32(qp->sq.psn);
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC) {
+ req.max_rd_atomic =
+ ORD_LIMIT_TO_ORRQ_SLOTS(qp->max_rd_atomic);
+ }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC) {
+ req.max_dest_rd_atomic =
+ IRD_LIMIT_TO_IRRQ_SLOTS(qp->max_dest_rd_atomic);
+ }
+ req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements);
+ req.rq_size = cpu_to_le32(qp->rq.hwq.max_elements);
+ req.sq_sge = cpu_to_le16(qp->sq.max_sge);
+ req.rq_sge = cpu_to_le16(qp->rq.max_sge);
+ req.max_inline_data = cpu_to_le32(qp->max_inline_data);
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_DEST_QP_ID)
+ req.dest_qp_id = cpu_to_le32(qp->dest_qpn);
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ENABLE_CC)
+ req.enable_cc = cpu_to_le16(CMDQ_MODIFY_QP_ENABLE_CC);
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TOS_ECN)
+ req.tos_dscp_tos_ecn =
+ ((qp->tos_ecn << CMDQ_MODIFY_QP_TOS_ECN_SFT) &
+ CMDQ_MODIFY_QP_TOS_ECN_MASK);
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_TOS_DSCP)
+ req.tos_dscp_tos_ecn |=
+ ((qp->tos_dscp << CMDQ_MODIFY_QP_TOS_DSCP_SFT) &
+ CMDQ_MODIFY_QP_TOS_DSCP_MASK);
+ req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ msg.qp_state = qp->state;
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc == -ETIMEDOUT && (qp->state == CMDQ_MODIFY_QP_NEW_STATE_ERR)) {
+ qp->cur_qp_state = qp->state;
+ return 0;
+ } else if (rc) {
+ return rc;
+ }
+ if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RTR)
+ qp->lag_src_mac = be32_to_cpu(resp.lag_src_mac);
+
+ if (ppp_requested)
+ qp->ppp.st_idx_en = resp.pingpong_push_state_index_enabled;
+
+ qp->cur_qp_state = qp->state;
+ return 0;
+}
+
+int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_query_qp_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ struct creq_query_qp_resp_sb *sb;
+ struct cmdq_query_qp req = {};
+ u32 temp32[4];
+ int i, rc;
+
+ sbuf.size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+ sb = sbuf.sb;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_QP,
+ sizeof(req));
+ req.qp_cid = cpu_to_le32(qp->id);
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto bail;
+
+ /* Extract the context from the side buffer */
+ qp->state = sb->en_sqd_async_notify_state &
+ CREQ_QUERY_QP_RESP_SB_STATE_MASK;
+ qp->cur_qp_state = qp->state;
+ qp->en_sqd_async_notify = sb->en_sqd_async_notify_state &
+ CREQ_QUERY_QP_RESP_SB_EN_SQD_ASYNC_NOTIFY ?
+ true : false;
+ qp->access = sb->access;
+ qp->pkey_index = le16_to_cpu(sb->pkey);
+ qp->qkey = le32_to_cpu(sb->qkey);
+
+ temp32[0] = le32_to_cpu(sb->dgid[0]);
+ temp32[1] = le32_to_cpu(sb->dgid[1]);
+ temp32[2] = le32_to_cpu(sb->dgid[2]);
+ temp32[3] = le32_to_cpu(sb->dgid[3]);
+ memcpy(qp->ah.dgid.data, temp32, sizeof(qp->ah.dgid.data));
+
+ qp->ah.flow_label = le32_to_cpu(sb->flow_label);
+
+ qp->ah.sgid_index = 0;
+ for (i = 0; i < res->sgid_tbl.max; i++) {
+ if (res->sgid_tbl.hw_id[i] == le16_to_cpu(sb->sgid_index)) {
+ qp->ah.sgid_index = i;
+ break;
+ }
+ }
+ if (i == res->sgid_tbl.max)
+ dev_dbg(&res->pdev->dev,
+ "QPLIB: SGID not found qp->id = 0x%x sgid_index = 0x%x\n",
+ qp->id, le16_to_cpu(sb->sgid_index));
+
+ qp->ah.hop_limit = sb->hop_limit;
+ qp->ah.traffic_class = sb->traffic_class;
+ memcpy(qp->ah.dmac, sb->dest_mac, ETH_ALEN);
+ qp->ah.vlan_id = le16_to_cpu(sb->path_mtu_dest_vlan_id) &
+ CREQ_QUERY_QP_RESP_SB_VLAN_ID_MASK >>
+ CREQ_QUERY_QP_RESP_SB_VLAN_ID_SFT;
+ qp->path_mtu = le16_to_cpu(sb->path_mtu_dest_vlan_id) &
+ CREQ_QUERY_QP_RESP_SB_PATH_MTU_MASK;
+ qp->timeout = sb->timeout;
+ qp->retry_cnt = sb->retry_cnt;
+ qp->rnr_retry = sb->rnr_retry;
+ qp->min_rnr_timer = sb->min_rnr_timer;
+ qp->rq.psn = le32_to_cpu(sb->rq_psn);
+ qp->max_rd_atomic = ORRQ_SLOTS_TO_ORD_LIMIT(sb->max_rd_atomic);
+ qp->sq.psn = le32_to_cpu(sb->sq_psn);
+ qp->max_dest_rd_atomic =
+ IRRQ_SLOTS_TO_IRD_LIMIT(sb->max_dest_rd_atomic);
+ qp->sq.max_wqe = qp->sq.hwq.max_elements;
+ qp->rq.max_wqe = qp->rq.hwq.max_elements;
+ qp->sq.max_sge = le16_to_cpu(sb->sq_sge);
+ qp->rq.max_sge = le16_to_cpu(sb->rq_sge);
+ qp->max_inline_data = le32_to_cpu(sb->max_inline_data);
+ qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
+ memcpy(qp->smac, sb->src_mac, ETH_ALEN);
+ qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
+ qp->port_id = le16_to_cpu(sb->port_id);
+bail:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
+
+
+static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
+{
+ struct bnxt_qplib_hwq *cq_hwq = &cq->hwq;
+ u32 peek_flags, peek_cons;
+ struct cq_base *hw_cqe;
+ int i;
+
+ peek_flags = cq->dbinfo.flags;
+ peek_cons = cq_hwq->cons;
+ for (i = 0; i < cq_hwq->depth; i++) {
+ hw_cqe = bnxt_qplib_get_qe(cq_hwq, peek_cons, NULL);
+ if (CQE_CMP_VALID(hw_cqe, peek_flags)) {
+ dma_rmb();
+ switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
+ case CQ_BASE_CQE_TYPE_REQ:
+ case CQ_BASE_CQE_TYPE_TERMINAL:
+ {
+ struct cq_req *cqe = (struct cq_req *)hw_cqe;
+
+ if (qp == le64_to_cpu(cqe->qp_handle))
+ cqe->qp_handle = 0;
+ break;
+ }
+ case CQ_BASE_CQE_TYPE_RES_RC:
+ case CQ_BASE_CQE_TYPE_RES_UD:
+ case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
+ {
+ struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe;
+
+ if (qp == le64_to_cpu(cqe->qp_handle))
+ cqe->qp_handle = 0;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ bnxt_qplib_hwq_incr_cons(cq_hwq->depth, &peek_cons,
+ 1, &peek_flags);
+ }
+}
+
+int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct bnxt_qplib_reftbl *tbl;
+ unsigned long flags;
+ u32 qp_idx;
+ int rc;
+
+ tbl = &res->reftbl.qpref;
+ qp_idx = map_qp_id_to_tbl_indx(qp->id, tbl);
+ spin_lock_irqsave(&tbl->lock, flags);
+ tbl->rec[qp_idx].xid = BNXT_QPLIB_QP_ID_INVALID;
+ tbl->rec[qp_idx].handle = NULL;
+ spin_unlock_irqrestore(&tbl->lock, flags);
+
+ rc = __qplib_destroy_qp(rcfw, qp);
+ if (rc) {
+ spin_lock_irqsave(&tbl->lock, flags);
+ tbl->rec[qp_idx].xid = qp->id;
+ tbl->rec[qp_idx].handle = qp;
+ spin_unlock_irqrestore(&tbl->lock, flags);
+ return rc;
+ }
+
+ return 0;
+}
+
+void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp)
+{
+ if (qp->irrq.max_elements)
+ bnxt_qplib_free_hwq(res, &qp->irrq);
+ if (qp->orrq.max_elements)
+ bnxt_qplib_free_hwq(res, &qp->orrq);
+
+ if (!qp->is_user)
+ kfree(qp->rq.swq);
+ bnxt_qplib_free_hwq(res, &qp->rq.hwq);
+
+ if (!qp->is_user)
+ kfree(qp->sq.swq);
+ bnxt_qplib_free_hwq(res, &qp->sq.hwq);
+}
+
+void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_sge *sge)
+{
+ struct bnxt_qplib_q *sq = &qp->sq;
+ struct bnxt_qplib_hdrbuf *buf;
+ u32 sw_prod;
+
+ memset(sge, 0, sizeof(*sge));
+
+ buf = qp->sq_hdr_buf;
+ if (buf) {
+ sw_prod = sq->swq_start;
+ sge->addr = (dma_addr_t)(buf->dma_map + sw_prod * buf->step);
+ sge->lkey = 0xFFFFFFFF;
+ sge->size = buf->step;
+ return buf->va + sw_prod * sge->size;
+ }
+ return NULL;
+}
+
+u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_q *rq = &qp->rq;
+
+ return rq->swq_start;
+}
+
+void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_sge *sge)
+{
+ struct bnxt_qplib_q *rq = &qp->rq;
+ struct bnxt_qplib_hdrbuf *buf;
+ u32 sw_prod;
+
+ memset(sge, 0, sizeof(*sge));
+
+ buf = qp->rq_hdr_buf;
+ if (buf) {
+ sw_prod = rq->swq_start;
+ sge->addr = (dma_addr_t)(buf->dma_map + sw_prod * buf->step);
+ sge->lkey = 0xFFFFFFFF;
+ sge->size = buf->step;
+ return buf->va + sw_prod * sge->size;
+ }
+ return NULL;
+}
+
+/* Fil the MSN table into the next psn row */
+static void bnxt_qplib_fill_msn_search(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ struct bnxt_qplib_swq *swq)
+{
+ struct sq_msn_search *msns;
+ u32 start_psn, next_psn;
+ u16 start_idx;
+
+ msns = (struct sq_msn_search *)swq->psn_search;
+ msns->start_idx_next_psn_start_psn = 0;
+
+ start_psn = swq->start_psn;
+ next_psn = swq->next_psn;
+ start_idx = swq->slot_idx;
+ msns->start_idx_next_psn_start_psn |=
+ bnxt_re_update_msn_tbl(start_idx, next_psn, start_psn);
+ pr_debug("QP_LIB MSN %d START_IDX %u NEXT_PSN %u START_PSN %u\n",
+ qp->msn,
+ (u16)
+ cpu_to_le16(BNXT_RE_MSN_IDX(msns->start_idx_next_psn_start_psn)),
+ (u32)
+ cpu_to_le32(BNXT_RE_MSN_NPSN(msns->start_idx_next_psn_start_psn)),
+ (u32)
+ cpu_to_le32(BNXT_RE_MSN_SPSN(msns->start_idx_next_psn_start_psn)));
+ qp->msn++;
+ qp->msn %= qp->msn_tbl_sz;
+}
+
+static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ struct bnxt_qplib_swq *swq)
+{
+ struct sq_psn_search_ext *psns_ext;
+ struct sq_psn_search *psns;
+ u32 flg_npsn;
+ u32 op_spsn;
+
+ if (!swq->psn_search)
+ return;
+
+ /* Handle MSN differently on cap flags */
+ if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
+ bnxt_qplib_fill_msn_search(qp, wqe, swq);
+ return;
+ }
+ psns = (struct sq_psn_search *)swq->psn_search;
+ psns_ext = (struct sq_psn_search_ext *)swq->psn_search;
+
+ op_spsn = ((swq->start_psn << SQ_PSN_SEARCH_START_PSN_SFT) &
+ SQ_PSN_SEARCH_START_PSN_MASK);
+ op_spsn |= ((wqe->type << SQ_PSN_SEARCH_OPCODE_SFT) &
+ SQ_PSN_SEARCH_OPCODE_MASK);
+ flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
+ SQ_PSN_SEARCH_NEXT_PSN_MASK);
+
+ if (_is_chip_gen_p5_p7(qp->cctx)) {
+ psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
+ psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
+ psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
+ } else {
+ psns->opcode_start_psn = cpu_to_le32(op_spsn);
+ psns->flags_next_psn = cpu_to_le32(flg_npsn);
+ }
+}
+
+static u16 _calc_ilsize(struct bnxt_qplib_swqe *wqe)
+{
+ u16 size = 0;
+ int indx;
+
+ for (indx = 0; indx < wqe->num_sge; indx++)
+ size += wqe->sg_list[indx].size;
+ return size;
+}
+
+static int bnxt_qplib_put_inline(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe,
+ u32 *sw_prod)
+{
+ struct bnxt_qplib_hwq *sq_hwq;
+ int len, t_len, offt = 0;
+ int t_cplen = 0, cplen;
+ bool pull_dst = true;
+ void *il_dst = NULL;
+ void *il_src = NULL;
+ int indx;
+
+ sq_hwq = &qp->sq.hwq;
+ t_len = 0;
+ for (indx = 0; indx < wqe->num_sge; indx++) {
+ len = wqe->sg_list[indx].size;
+ il_src = (void *)wqe->sg_list[indx].addr;
+ t_len += len;
+ if (t_len > qp->max_inline_data)
+ goto bad;
+ while (len) {
+ if (pull_dst) {
+ pull_dst = false;
+ il_dst = bnxt_qplib_get_qe(sq_hwq, ((*sw_prod) %
+ sq_hwq->depth), NULL);
+ (*sw_prod)++;
+ t_cplen = 0;
+ offt = 0;
+ }
+ cplen = min_t(int, len, sizeof(struct sq_sge));
+ cplen = min_t(int, cplen,
+ (sizeof(struct sq_sge) - offt));
+ memcpy(il_dst, il_src, cplen);
+ t_cplen += cplen;
+ il_src += cplen;
+ il_dst += cplen;
+ offt += cplen;
+ len -= cplen;
+ if (t_cplen == sizeof(struct sq_sge))
+ pull_dst = true;
+ }
+ }
+
+ return t_len;
+bad:
+ return -ENOMEM;
+}
+
+static int bnxt_qplib_put_sges(struct bnxt_qplib_hwq *sq_hwq,
+ struct bnxt_qplib_sge *ssge,
+ u32 nsge, u32 *sw_prod)
+{
+ struct sq_sge *dsge;
+ int indx, len = 0;
+
+ for (indx = 0; indx < nsge; indx++, (*sw_prod)++) {
+ dsge = bnxt_qplib_get_qe(sq_hwq, ((*sw_prod) % sq_hwq->depth), NULL);
+ dsge->va_or_pa = cpu_to_le64(ssge[indx].addr);
+ dsge->l_key = cpu_to_le32(ssge[indx].lkey);
+ dsge->size = cpu_to_le32(ssge[indx].size);
+ len += ssge[indx].size;
+ }
+ return len;
+}
+
+static u16 _calculate_wqe_byte(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe, u16 *wqe_byte)
+{
+ u16 wqe_size;
+ u32 ilsize;
+ u16 nsge;
+
+ nsge = wqe->num_sge;
+ if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE) {
+ ilsize = _calc_ilsize(wqe);
+ wqe_size = (ilsize > qp->max_inline_data) ?
+ qp->max_inline_data : ilsize;
+ wqe_size = ALIGN(wqe_size, sizeof(struct sq_sge));
+ } else {
+ wqe_size = nsge * sizeof(struct sq_sge);
+ }
+ /* Adding sq_send_hdr is a misnomer, for rq also hdr size is same. */
+ wqe_size += sizeof(struct sq_send_hdr);
+ if (wqe_byte)
+ *wqe_byte = wqe_size;
+ return wqe_size / sizeof(struct sq_sge);
+}
+
+static u16 _translate_q_full_delta(struct bnxt_qplib_q *que, u16 wqe_bytes)
+{
+ /* For Cu/Wh delta = 128, stride = 16, wqe_bytes = 128
+ * For Gen-p5 B/C mode delta = 0, stride = 16, wqe_bytes = 128.
+ * For Gen-p5 delta = 0, stride = 16, 32 <= wqe_bytes <= 512.
+ * when 8916 is disabled.
+ */
+ return (que->q_full_delta * wqe_bytes) / que->hwq.element_size;
+}
+
+static void bnxt_qplib_pull_psn_buff(struct bnxt_qplib_qp *qp, struct bnxt_qplib_q *sq,
+ struct bnxt_qplib_swq *swq, bool hw_retx)
+{
+ struct bnxt_qplib_hwq *sq_hwq;
+ u32 pg_num, pg_indx;
+ void *buff;
+ u32 tail;
+
+ sq_hwq = &sq->hwq;
+ if (!sq_hwq->pad_pg)
+ return;
+
+ tail = swq->slot_idx / sq->dbinfo.max_slot;
+ if (hw_retx)
+ tail %= qp->msn_tbl_sz;
+ pg_num = (tail + sq_hwq->pad_pgofft) / (PAGE_SIZE / sq_hwq->pad_stride);
+ pg_indx = (tail + sq_hwq->pad_pgofft) % (PAGE_SIZE / sq_hwq->pad_stride);
+ buff = (void *)(sq_hwq->pad_pg[pg_num] + pg_indx * sq_hwq->pad_stride);
+ /* the start ptr for buff is same ie after the SQ */
+ swq->psn_search = buff;
+}
+
+void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_q *sq = &qp->sq;
+
+ bnxt_qplib_ring_prod_db(&sq->dbinfo, DBC_DBC_TYPE_SQ);
+}
+
+int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_qplib_nq_work *nq_work = NULL;
+ int i, rc = 0, data_len = 0, pkt_num = 0;
+ struct bnxt_qplib_q *sq = &qp->sq;
+ struct bnxt_qplib_hwq *sq_hwq;
+ struct bnxt_qplib_swq *swq;
+ bool sch_handler = false;
+ u16 slots_needed;
+ void *base_hdr;
+ void *ext_hdr;
+ __le32 temp32;
+ u16 qfd_slots;
+ u8 wqe_slots;
+ u16 wqe_size;
+ u32 sw_prod;
+ u32 wqe_idx;
+
+ sq_hwq = &sq->hwq;
+ if (qp->state != CMDQ_MODIFY_QP_NEW_STATE_RTS &&
+ qp->state != CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ dev_err(&sq_hwq->pdev->dev,
+ "QPLIB: FP: QP (0x%x) is in the 0x%x state\n",
+ qp->id, qp->state);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ wqe_slots = _calculate_wqe_byte(qp, wqe, &wqe_size);
+ slots_needed = (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+ sq->dbinfo.max_slot : wqe_slots;
+ qfd_slots = _translate_q_full_delta(sq, wqe_size);
+ if (bnxt_qplib_queue_full(sq_hwq, (slots_needed + qfd_slots))) {
+ dev_err(&sq_hwq->pdev->dev,
+ "QPLIB: FP: QP (0x%x) SQ is full!\n", qp->id);
+ dev_err(&sq_hwq->pdev->dev,
+ "QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x slots = %#x\n",
+ HWQ_CMP(sq_hwq->prod, sq_hwq),
+ HWQ_CMP(sq_hwq->cons, sq_hwq),
+ sq_hwq->max_elements, qfd_slots, slots_needed);
+ dev_err(&sq_hwq->pdev->dev,
+ "QPLIB: phantom_wqe_cnt: %d phantom_cqe_cnt: %d\n",
+ sq->phantom_wqe_cnt, sq->phantom_cqe_cnt);
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ sw_prod = sq_hwq->prod;
+ swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
+ swq->slot_idx = sw_prod;
+ bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags));
+
+ swq->wr_id = wqe->wr_id;
+ swq->type = wqe->type;
+ swq->flags = wqe->flags;
+ swq->slots = slots_needed;
+ swq->start_psn = sq->psn & BTH_PSN_MASK;
+ if (qp->sig_type || wqe->flags & BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP)
+ swq->flags |= SQ_SEND_FLAGS_SIGNAL_COMP;
+
+ dev_dbg(&sq_hwq->pdev->dev,
+ "QPLIB: FP: QP(0x%x) post SQ wr_id[%d] = 0x%llx\n",
+ qp->id, wqe_idx, swq->wr_id);
+ if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ sch_handler = true;
+ dev_dbg(&sq_hwq->pdev->dev,
+ "%s Error QP. Scheduling for poll_cq\n", __func__);
+ goto queue_err;
+ }
+
+ base_hdr = bnxt_qplib_get_qe(sq_hwq, sw_prod, NULL);
+ sw_prod++;
+ ext_hdr = bnxt_qplib_get_qe(sq_hwq, (sw_prod % sq_hwq->depth), NULL);
+ sw_prod++;
+ memset(base_hdr, 0, sizeof(struct sq_sge));
+ memset(ext_hdr, 0, sizeof(struct sq_sge));
+
+ if (wqe->flags & BNXT_QPLIB_SWQE_FLAGS_INLINE)
+ data_len = bnxt_qplib_put_inline(qp, wqe, &sw_prod);
+ else
+ data_len = bnxt_qplib_put_sges(sq_hwq, wqe->sg_list,
+ wqe->num_sge, &sw_prod);
+ if (data_len < 0)
+ goto queue_err;
+ /* Specifics */
+ switch (wqe->type) {
+ case BNXT_QPLIB_SWQE_TYPE_SEND:
+ if (qp->type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE ||
+ qp->type == CMDQ_CREATE_QP1_TYPE_GSI) {
+ /* Assemble info for Raw Ethertype QPs */
+ struct sq_send_raweth_qp1_hdr *sqe = base_hdr;
+ struct sq_raw_ext_hdr *ext_sqe = ext_hdr;
+
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->wqe_size = wqe_slots;
+ sqe->cfa_action = cpu_to_le16(wqe->rawqp1.cfa_action);
+ sqe->lflags = cpu_to_le16(wqe->rawqp1.lflags);
+ sqe->length = cpu_to_le32(data_len);
+ ext_sqe->cfa_meta = cpu_to_le32((wqe->rawqp1.cfa_meta &
+ SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_MASK) <<
+ SQ_SEND_RAWETH_QP1_CFA_META_VLAN_VID_SFT);
+
+ dev_dbg(&sq_hwq->pdev->dev,
+ "QPLIB: FP: RAW/QP1 Send WQE:\n"
+ "\twqe_type = 0x%x\n"
+ "\tflags = 0x%x\n"
+ "\twqe_size = 0x%x\n"
+ "\tlflags = 0x%x\n"
+ "\tcfa_action = 0x%x\n"
+ "\tlength = 0x%x\n"
+ "\tcfa_meta = 0x%x\n",
+ sqe->wqe_type, sqe->flags, sqe->wqe_size,
+ sqe->lflags, sqe->cfa_action,
+ sqe->length, ext_sqe->cfa_meta);
+ break;
+ }
+ fallthrough;
+ case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM:
+ fallthrough;
+ case BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV:
+ {
+ struct sq_send_hdr *sqe = base_hdr;
+ struct sq_ud_ext_hdr *ext_sqe = ext_hdr;
+
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->wqe_size = wqe_slots;
+ sqe->inv_key_or_imm_data = cpu_to_le32(wqe->send.inv_key);
+ if (qp->type == CMDQ_CREATE_QP_TYPE_UD ||
+ qp->type == CMDQ_CREATE_QP_TYPE_GSI) {
+ sqe->q_key = cpu_to_le32(wqe->send.q_key);
+ sqe->length = cpu_to_le32(data_len);
+ ext_sqe->dst_qp = cpu_to_le32(
+ wqe->send.dst_qp & SQ_SEND_DST_QP_MASK);
+ ext_sqe->avid = cpu_to_le32(wqe->send.avid &
+ SQ_SEND_AVID_MASK);
+ sq->psn = (sq->psn + 1) & BTH_PSN_MASK;
+ } else {
+ sqe->length = cpu_to_le32(data_len);
+ if (qp->mtu)
+ pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
+ if (!pkt_num)
+ pkt_num = 1;
+ sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
+ }
+ dev_dbg(&sq_hwq->pdev->dev,
+ "QPLIB: FP: Send WQE:\n"
+ "\twqe_type = 0x%x\n"
+ "\tflags = 0x%x\n"
+ "\twqe_size = 0x%x\n"
+ "\tinv_key/immdata = 0x%x\n"
+ "\tq_key = 0x%x\n"
+ "\tdst_qp = 0x%x\n"
+ "\tlength = 0x%x\n"
+ "\tavid = 0x%x\n",
+ sqe->wqe_type, sqe->flags, sqe->wqe_size,
+ sqe->inv_key_or_imm_data, sqe->q_key, ext_sqe->dst_qp,
+ sqe->length, ext_sqe->avid);
+ break;
+ }
+ case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE:
+ /* fall-thru */
+ case BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM:
+ /* fall-thru */
+ case BNXT_QPLIB_SWQE_TYPE_RDMA_READ:
+ {
+ struct sq_rdma_hdr *sqe = base_hdr;
+ struct sq_rdma_ext_hdr *ext_sqe = ext_hdr;
+
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->wqe_size = wqe_slots;
+ sqe->imm_data = cpu_to_le32(wqe->rdma.inv_key);
+ sqe->length = cpu_to_le32((u32)data_len);
+ ext_sqe->remote_va = cpu_to_le64(wqe->rdma.remote_va);
+ ext_sqe->remote_key = cpu_to_le32(wqe->rdma.r_key);
+ if (qp->mtu)
+ pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
+ if (!pkt_num)
+ pkt_num = 1;
+ sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
+
+ dev_dbg(&sq_hwq->pdev->dev,
+ "QPLIB: FP: RDMA WQE:\n"
+ "\twqe_type = 0x%x\n"
+ "\tflags = 0x%x\n"
+ "\twqe_size = 0x%x\n"
+ "\timmdata = 0x%x\n"
+ "\tlength = 0x%x\n"
+ "\tremote_va = 0x%llx\n"
+ "\tremote_key = 0x%x\n",
+ sqe->wqe_type, sqe->flags, sqe->wqe_size,
+ sqe->imm_data, sqe->length, ext_sqe->remote_va,
+ ext_sqe->remote_key);
+ break;
+ }
+ case BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP:
+ /* fall-thru */
+ case BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD:
+ {
+ struct sq_atomic_hdr *sqe = base_hdr;
+ struct sq_atomic_ext_hdr *ext_sqe = ext_hdr;
+
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->remote_key = cpu_to_le32(wqe->atomic.r_key);
+ sqe->remote_va = cpu_to_le64(wqe->atomic.remote_va);
+ ext_sqe->swap_data = cpu_to_le64(wqe->atomic.swap_data);
+ ext_sqe->cmp_data = cpu_to_le64(wqe->atomic.cmp_data);
+ if (qp->mtu)
+ pkt_num = (data_len + qp->mtu - 1) / qp->mtu;
+ if (!pkt_num)
+ pkt_num = 1;
+ sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK;
+ break;
+ }
+ case BNXT_QPLIB_SWQE_TYPE_LOCAL_INV:
+ {
+ struct sq_localinvalidate_hdr *sqe = base_hdr;
+
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->inv_l_key = cpu_to_le32(wqe->local_inv.inv_l_key);
+
+ dev_dbg(&sq_hwq->pdev->dev,
+ "QPLIB: FP: LOCAL INV WQE:\n"
+ "\twqe_type = 0x%x\n"
+ "\tflags = 0x%x\n"
+ "\tinv_l_key = 0x%x\n",
+ sqe->wqe_type, sqe->flags, sqe->inv_l_key);
+ break;
+ }
+ case BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR:
+ {
+ struct sq_fr_pmr_hdr *sqe = base_hdr;
+ struct sq_fr_pmr_ext_hdr *ext_sqe = ext_hdr;
+
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->access_cntl = wqe->frmr.access_cntl |
+ SQ_FR_PMR_ACCESS_CNTL_LOCAL_WRITE;
+ sqe->zero_based_page_size_log =
+ (wqe->frmr.pg_sz_log & SQ_FR_PMR_PAGE_SIZE_LOG_MASK) <<
+ SQ_FR_PMR_PAGE_SIZE_LOG_SFT |
+ (wqe->frmr.zero_based == true ? SQ_FR_PMR_ZERO_BASED : 0);
+ sqe->l_key = cpu_to_le32(wqe->frmr.l_key);
+ /* TODO: OFED only provides length of MR up to 32-bits for FRMR */
+ temp32 = cpu_to_le32(wqe->frmr.length);
+ memcpy(sqe->length, &temp32, sizeof(wqe->frmr.length));
+ sqe->numlevels_pbl_page_size_log =
+ ((wqe->frmr.pbl_pg_sz_log <<
+ SQ_FR_PMR_PBL_PAGE_SIZE_LOG_SFT) &
+ SQ_FR_PMR_PBL_PAGE_SIZE_LOG_MASK) |
+ ((wqe->frmr.levels << SQ_FR_PMR_NUMLEVELS_SFT) &
+ SQ_FR_PMR_NUMLEVELS_MASK);
+ if (!wqe->frmr.levels && !wqe->frmr.pbl_ptr) {
+ ext_sqe->pblptr = cpu_to_le64(wqe->frmr.page_list[0]);
+ } else {
+ for (i = 0; i < wqe->frmr.page_list_len; i++)
+ wqe->frmr.pbl_ptr[i] = cpu_to_le64(
+ wqe->frmr.page_list[i] |
+ PTU_PTE_VALID);
+ ext_sqe->pblptr = cpu_to_le64(wqe->frmr.pbl_dma_ptr);
+ }
+ ext_sqe->va = cpu_to_le64(wqe->frmr.va);
+ dev_dbg(&sq_hwq->pdev->dev,
+ "QPLIB: FP: FRMR WQE:\n"
+ "\twqe_type = 0x%x\n"
+ "\tflags = 0x%x\n"
+ "\taccess_cntl = 0x%x\n"
+ "\tzero_based_page_size_log = 0x%x\n"
+ "\tl_key = 0x%x\n"
+ "\tlength = 0x%x\n"
+ "\tnumlevels_pbl_page_size_log = 0x%x\n"
+ "\tpblptr = 0x%llx\n"
+ "\tva = 0x%llx\n",
+ sqe->wqe_type, sqe->flags, sqe->access_cntl,
+ sqe->zero_based_page_size_log, sqe->l_key,
+ *(u32 *)sqe->length, sqe->numlevels_pbl_page_size_log,
+ ext_sqe->pblptr, ext_sqe->va);
+ break;
+ }
+ case BNXT_QPLIB_SWQE_TYPE_BIND_MW:
+ {
+ struct sq_bind_hdr *sqe = base_hdr;
+ struct sq_bind_ext_hdr *ext_sqe = ext_hdr;
+
+ sqe->wqe_type = wqe->type;
+ sqe->flags = wqe->flags;
+ sqe->access_cntl = wqe->bind.access_cntl;
+ sqe->mw_type_zero_based = wqe->bind.mw_type |
+ (wqe->bind.zero_based == true ? SQ_BIND_ZERO_BASED : 0);
+ sqe->parent_l_key = cpu_to_le32(wqe->bind.parent_l_key);
+ sqe->l_key = cpu_to_le32(wqe->bind.r_key);
+ ext_sqe->va = cpu_to_le64(wqe->bind.va);
+ ext_sqe->length_lo = cpu_to_le32(wqe->bind.length);
+ dev_dbg(&sq_hwq->pdev->dev,
+ "QPLIB: FP: BIND WQE:\n"
+ "\twqe_type = 0x%x\n"
+ "\tflags = 0x%x\n"
+ "\taccess_cntl = 0x%x\n"
+ "\tmw_type_zero_based = 0x%x\n"
+ "\tparent_l_key = 0x%x\n"
+ "\tl_key = 0x%x\n"
+ "\tva = 0x%llx\n"
+ "\tlength = 0x%x\n",
+ sqe->wqe_type, sqe->flags, sqe->access_cntl,
+ sqe->mw_type_zero_based, sqe->parent_l_key,
+ sqe->l_key, sqe->va, ext_sqe->length_lo);
+ break;
+ }
+ default:
+ /* Bad wqe, return error */
+ rc = -EINVAL;
+ goto done;
+ }
+ swq->next_psn = sq->psn & BTH_PSN_MASK;
+ bnxt_qplib_fill_psn_search(qp, wqe, swq);
+
+queue_err:
+ bnxt_qplib_swq_mod_start(sq, wqe_idx);
+ bnxt_qplib_hwq_incr_prod(&sq->dbinfo, sq_hwq, swq->slots);
+ qp->wqe_cnt++;
+done:
+ if (sch_handler) {
+ nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
+ if (nq_work) {
+ nq_work->cq = qp->scq;
+ nq_work->nq = qp->scq->nq;
+ INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
+ queue_work(qp->scq->nq->cqn_wq, &nq_work->work);
+ } else {
+ dev_err(&sq->hwq.pdev->dev,
+ "QPLIB: FP: Failed to allocate SQ nq_work!\n");
+ rc = -ENOMEM;
+ }
+ }
+ return rc;
+}
+
+void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp)
+{
+ struct bnxt_qplib_q *rq = &qp->rq;
+
+ bnxt_qplib_ring_prod_db(&rq->dbinfo, DBC_DBC_TYPE_RQ);
+}
+
+void bnxt_re_handle_cqn(struct bnxt_qplib_cq *cq)
+{
+ struct bnxt_qplib_nq *nq;
+
+ if (!(cq && cq->nq))
+ return;
+
+ nq = cq->nq;
+ spin_lock_bh(&cq->compl_lock);
+ if (nq->cqn_handler) {
+ dev_dbg(&nq->res->pdev->dev,
+ "%s:Trigger cq = %p event nq = %p\n",
+ __func__, cq, nq);
+ nq->cqn_handler(nq, cq);
+ }
+ spin_unlock_bh(&cq->compl_lock);
+}
+
+int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe)
+{
+ struct bnxt_qplib_nq_work *nq_work = NULL;
+ struct bnxt_qplib_q *rq = &qp->rq;
+ struct bnxt_qplib_hwq *rq_hwq;
+ struct bnxt_qplib_swq *swq;
+ bool sch_handler = false;
+ struct rq_wqe_hdr *base_hdr;
+ struct rq_ext_hdr *ext_hdr;
+ struct sq_sge *dsge;
+ u8 wqe_slots;
+ u32 wqe_idx;
+ u32 sw_prod;
+ int rc = 0;
+
+ rq_hwq = &rq->hwq;
+ if (qp->state == CMDQ_MODIFY_QP_NEW_STATE_RESET) {
+ dev_err(&rq_hwq->pdev->dev,
+ "QPLIB: FP: QP (0x%x) is in the 0x%x state\n",
+ qp->id, qp->state);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ wqe_slots = _calculate_wqe_byte(qp, wqe, NULL);
+ if (bnxt_qplib_queue_full(rq_hwq, rq->dbinfo.max_slot)) {
+ dev_err(&rq_hwq->pdev->dev,
+ "QPLIB: FP: QP (0x%x) RQ is full!\n", qp->id);
+ rc = -EINVAL;
+ goto done;
+ }
+
+ swq = bnxt_qplib_get_swqe(rq, &wqe_idx);
+ swq->wr_id = wqe->wr_id;
+ swq->slots = rq->dbinfo.max_slot;
+ dev_dbg(&rq_hwq->pdev->dev,
+ "QPLIB: FP: post RQ wr_id[%d] = 0x%llx\n",
+ wqe_idx, swq->wr_id);
+ if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
+ sch_handler = true;
+ dev_dbg(&rq_hwq->pdev->dev, "%s Error QP. Sched a flushed cmpl\n",
+ __func__);
+ goto queue_err;
+ }
+
+ sw_prod = rq_hwq->prod;
+ base_hdr = bnxt_qplib_get_qe(rq_hwq, sw_prod, NULL);
+ sw_prod++;
+ ext_hdr = bnxt_qplib_get_qe(rq_hwq, (sw_prod % rq_hwq->depth), NULL);
+ sw_prod++;
+ memset(base_hdr, 0, sizeof(struct sq_sge));
+ memset(ext_hdr, 0, sizeof(struct sq_sge));
+
+ if (!wqe->num_sge) {
+ dsge = bnxt_qplib_get_qe(rq_hwq, (sw_prod % rq_hwq->depth), NULL);
+ dsge->size = 0;
+ wqe_slots++;
+ } else {
+ bnxt_qplib_put_sges(rq_hwq, wqe->sg_list, wqe->num_sge, &sw_prod);
+ }
+ base_hdr->wqe_type = wqe->type;
+ base_hdr->flags = wqe->flags;
+ base_hdr->wqe_size = wqe_slots;
+ base_hdr->wr_id |= cpu_to_le32(wqe_idx);
+queue_err:
+ bnxt_qplib_swq_mod_start(rq, wqe_idx);
+ bnxt_qplib_hwq_incr_prod(&rq->dbinfo, &rq->hwq, swq->slots);
+done:
+ if (sch_handler) {
+ nq_work = kzalloc(sizeof(*nq_work), GFP_ATOMIC);
+ if (nq_work) {
+ nq_work->cq = qp->rcq;
+ nq_work->nq = qp->rcq->nq;
+ INIT_WORK(&nq_work->work, bnxt_qpn_cqn_sched_task);
+ queue_work(qp->rcq->nq->cqn_wq, &nq_work->work);
+ } else {
+ dev_err(&rq->hwq.pdev->dev,
+ "QPLIB: FP: Failed to allocate RQ nq_work!\n");
+ rc = -ENOMEM;
+ }
+ }
+ return rc;
+}
+
+/* CQ */
+int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_create_cq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_create_cq req = {};
+ struct bnxt_qplib_reftbl *tbl;
+ unsigned long flag;
+ u32 pg_sz_lvl = 0;
+ int rc;
+
+ hwq_attr.res = res;
+ hwq_attr.depth = cq->max_wqe;
+ hwq_attr.stride = sizeof(struct cq_base);
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ hwq_attr.sginfo = &cq->sginfo;
+ rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
+ if (rc)
+ goto exit;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_CQ,
+ sizeof(req));
+
+ if (!cq->dpi) {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: FP: CREATE_CQ failed due to NULL DPI\n");
+ return -EINVAL;
+ }
+ req.dpi = cpu_to_le32(cq->dpi->dpi);
+ req.cq_handle = cpu_to_le64(cq->cq_handle);
+
+ req.cq_size = cpu_to_le32(cq->max_wqe);
+ req.pbl = cpu_to_le64(_get_base_addr(&cq->hwq));
+ pg_sz_lvl = _get_base_pg_size(&cq->hwq) << CMDQ_CREATE_CQ_PG_SIZE_SFT;
+ pg_sz_lvl |= ((cq->hwq.level & CMDQ_CREATE_CQ_LVL_MASK) <<
+ CMDQ_CREATE_CQ_LVL_SFT);
+ req.pg_size_lvl = cpu_to_le32(pg_sz_lvl);
+
+ req.cq_fco_cnq_id = cpu_to_le32(
+ (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
+ CMDQ_CREATE_CQ_CNQ_ID_SFT);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto fail;
+ cq->id = le32_to_cpu(resp.xid);
+ cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
+ init_waitqueue_head(&cq->waitq);
+ INIT_LIST_HEAD(&cq->sqf_head);
+ INIT_LIST_HEAD(&cq->rqf_head);
+ spin_lock_init(&cq->flush_lock);
+ spin_lock_init(&cq->compl_lock);
+
+ /* init dbinfo */
+ cq->cctx = res->cctx;
+ cq->dbinfo.hwq = &cq->hwq;
+ cq->dbinfo.xid = cq->id;
+ cq->dbinfo.db = cq->dpi->dbr;
+ cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
+ cq->dbinfo.flags = 0;
+ cq->dbinfo.toggle = 0;
+ cq->dbinfo.res = res;
+ cq->dbinfo.seed = cq->id;
+ spin_lock_init(&cq->dbinfo.lock);
+ cq->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
+ cq->dbinfo.shadow_key_arm_ena = BNXT_QPLIB_DBR_KEY_INVALID;
+
+ tbl = &res->reftbl.cqref;
+ spin_lock_irqsave(&tbl->lock, flag);
+ tbl->rec[GET_TBL_INDEX(cq->id, tbl)].xid = cq->id;
+ tbl->rec[GET_TBL_INDEX(cq->id, tbl)].handle = cq;
+ spin_unlock_irqrestore(&tbl->lock, flag);
+
+ bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
+ return 0;
+
+fail:
+ bnxt_qplib_free_hwq(res, &cq->hwq);
+exit:
+ return rc;
+}
+
+int bnxt_qplib_modify_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+{
+ /* TODO: Modify CQ threshold are passed to the HW via DBR */
+ return 0;
+}
+
+void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cq *cq)
+{
+ bnxt_qplib_free_hwq(res, &cq->hwq);
+ memcpy(&cq->hwq, &cq->resize_hwq, sizeof(cq->hwq));
+ /* Reset only the cons bit in the flags */
+ cq->dbinfo.flags &= ~(1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT);
+
+ /* Tell HW to switch over to the new CQ */
+ if (!cq->resize_hwq.is_user)
+ bnxt_qplib_cq_coffack_db(&cq->dbinfo);
+}
+
+int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
+ int new_cqes)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_resize_cq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_resize_cq req = {};
+ u32 pgsz = 0, lvl = 0, nsz = 0;
+ struct bnxt_qplib_pbl *pbl;
+ u16 count = -1;
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_RESIZE_CQ,
+ sizeof(req));
+
+ hwq_attr.sginfo = &cq->sginfo;
+ hwq_attr.res = res;
+ hwq_attr.depth = new_cqes;
+ hwq_attr.stride = sizeof(struct cq_base);
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&cq->resize_hwq, &hwq_attr);
+ if (rc)
+ return rc;
+
+ dev_dbg(&rcfw->pdev->dev, "QPLIB: FP: %s: pbl_lvl: %d\n", __func__,
+ cq->resize_hwq.level);
+ req.cq_cid = cpu_to_le32(cq->id);
+ pbl = &cq->resize_hwq.pbl[PBL_LVL_0];
+ pgsz = ((pbl->pg_size == ROCE_PG_SIZE_4K ? CMDQ_RESIZE_CQ_PG_SIZE_PG_4K :
+ pbl->pg_size == ROCE_PG_SIZE_8K ? CMDQ_RESIZE_CQ_PG_SIZE_PG_8K :
+ pbl->pg_size == ROCE_PG_SIZE_64K ? CMDQ_RESIZE_CQ_PG_SIZE_PG_64K :
+ pbl->pg_size == ROCE_PG_SIZE_2M ? CMDQ_RESIZE_CQ_PG_SIZE_PG_2M :
+ pbl->pg_size == ROCE_PG_SIZE_8M ? CMDQ_RESIZE_CQ_PG_SIZE_PG_8M :
+ pbl->pg_size == ROCE_PG_SIZE_1G ? CMDQ_RESIZE_CQ_PG_SIZE_PG_1G :
+ CMDQ_RESIZE_CQ_PG_SIZE_PG_4K) & CMDQ_RESIZE_CQ_PG_SIZE_MASK);
+ lvl = (cq->resize_hwq.level << CMDQ_RESIZE_CQ_LVL_SFT) &
+ CMDQ_RESIZE_CQ_LVL_MASK;
+ nsz = (new_cqes << CMDQ_RESIZE_CQ_NEW_CQ_SIZE_SFT) &
+ CMDQ_RESIZE_CQ_NEW_CQ_SIZE_MASK;
+ req.new_cq_size_pg_size_lvl = cpu_to_le32(nsz|pgsz|lvl);
+ req.new_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+
+ if (!cq->resize_hwq.is_user)
+ set_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto fail;
+
+ if (!cq->resize_hwq.is_user) {
+wait:
+ /* Wait here for the HW to switch the CQ over */
+ if (wait_event_interruptible_timeout(cq->waitq,
+ !test_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags),
+ msecs_to_jiffies(CQ_RESIZE_WAIT_TIME_MS)) ==
+ -ERESTARTSYS && count--)
+ goto wait;
+
+ if (test_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags)) {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: FP: RESIZE_CQ timed out\n");
+ rc = -ETIMEDOUT;
+ goto fail;
+ }
+
+ bnxt_qplib_resize_cq_complete(res, cq);
+ }
+
+ return 0;
+fail:
+ if (!cq->resize_hwq.is_user) {
+ bnxt_qplib_free_hwq(res, &cq->resize_hwq);
+ clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
+ }
+ return rc;
+}
+
+void bnxt_qplib_free_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+{
+ bnxt_qplib_free_hwq(res, &cq->hwq);
+}
+
+static void bnxt_qplib_sync_cq(struct bnxt_qplib_cq *cq)
+{
+ struct bnxt_qplib_nq *nq = cq->nq;
+ /* Flush any pending work and synchronize irq */
+ flush_workqueue(cq->nq->cqn_wq);
+ mutex_lock(&nq->lock);
+ if (nq->requested)
+ synchronize_irq(nq->msix_vec);
+ mutex_unlock(&nq->lock);
+}
+
+int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_destroy_cq_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_destroy_cq req = {};
+ struct bnxt_qplib_reftbl *tbl;
+ u16 total_cnq_events;
+ unsigned long flag;
+ int rc;
+
+ tbl = &res->reftbl.cqref;
+ spin_lock_irqsave(&tbl->lock, flag);
+ tbl->rec[GET_TBL_INDEX(cq->id, tbl)].handle = NULL;
+ tbl->rec[GET_TBL_INDEX(cq->id, tbl)].xid = 0;
+ spin_unlock_irqrestore(&tbl->lock, flag);
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_CQ,
+ sizeof(req));
+
+ req.cq_cid = cpu_to_le32(cq->id);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+
+ total_cnq_events = le16_to_cpu(resp.total_cnq_events);
+ if (total_cnq_events >= 0)
+ dev_dbg(&rcfw->pdev->dev,
+ "%s: cq_id = 0x%x cq = 0x%p resp.total_cnq_events = 0x%x\n",
+ __func__, cq->id, cq, total_cnq_events);
+ __wait_for_all_nqes(cq, total_cnq_events);
+ bnxt_qplib_sync_cq(cq);
+ bnxt_qplib_free_hwq(res, &cq->hwq);
+ return 0;
+}
+
+static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_cqe **pcqe, int *budget)
+{
+ struct bnxt_qplib_cqe *cqe;
+ u32 start, last;
+ int rc = 0;
+
+ /* Now complete all outstanding SQEs with FLUSHED_ERR */
+ start = sq->swq_start;
+ cqe = *pcqe;
+ while (*budget) {
+ last = sq->swq_last;
+ if (start == last) {
+ break;
+ }
+ /* Skip the FENCE WQE completions */
+ if (sq->swq[last].wr_id == BNXT_QPLIB_FENCE_WRID) {
+ bnxt_re_legacy_cancel_phantom_processing(qp);
+ goto skip_compl;
+ }
+
+ memset(cqe, 0, sizeof(*cqe));
+ cqe->status = CQ_REQ_STATUS_WORK_REQUEST_FLUSHED_ERR;
+ cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
+ cqe->qp_handle = (u64)qp;
+ cqe->wr_id = sq->swq[last].wr_id;
+ cqe->src_qp = qp->id;
+ cqe->type = sq->swq[last].type;
+ dev_dbg(&sq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Processed terminal Req \n");
+ dev_dbg(&sq->hwq.pdev->dev,
+ "QPLIB: wr_id[%d] = 0x%llx with status 0x%x\n",
+ last, cqe->wr_id, cqe->status);
+ cqe++;
+ (*budget)--;
+skip_compl:
+ bnxt_qplib_hwq_incr_cons(sq->hwq.depth,
+ &sq->hwq.cons,
+ sq->swq[last].slots,
+ &sq->dbinfo.flags);
+ sq->swq_last = sq->swq[last].next_idx;
+ }
+ *pcqe = cqe;
+ if (!*budget && sq->swq_last != start)
+ /* Out of budget */
+ rc = -EAGAIN;
+ dev_dbg(&sq->hwq.pdev->dev, "QPLIB: FP: Flush SQ rc = 0x%x\n", rc);
+
+ return rc;
+}
+
+static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_cqe **pcqe, int *budget)
+{
+ struct bnxt_qplib_cqe *cqe;
+ u32 start, last;
+ int opcode = 0;
+ int rc = 0;
+
+ switch (qp->type) {
+ case CMDQ_CREATE_QP1_TYPE_GSI:
+ opcode = CQ_BASE_CQE_TYPE_RES_RAWETH_QP1;
+ break;
+ case CMDQ_CREATE_QP_TYPE_RC:
+ opcode = CQ_BASE_CQE_TYPE_RES_RC;
+ break;
+ case CMDQ_CREATE_QP_TYPE_UD:
+ opcode = CQ_BASE_CQE_TYPE_RES_UD;
+ break;
+ }
+
+ /* Flush the rest of the RQ */
+ start = rq->swq_start;
+ cqe = *pcqe;
+ while (*budget) {
+ last = rq->swq_last;
+ if (last == start)
+ break;
+ memset(cqe, 0, sizeof(*cqe));
+ cqe->status =
+ CQ_RES_RC_STATUS_WORK_REQUEST_FLUSHED_ERR;
+ cqe->opcode = opcode;
+ cqe->qp_handle = (u64)qp;
+ cqe->wr_id = rq->swq[last].wr_id;
+ dev_dbg(&rq->hwq.pdev->dev, "QPLIB: FP: CQ Processed Res RC \n");
+ dev_dbg(&rq->hwq.pdev->dev,
+ "QPLIB: rq[%d] = 0x%llx with status 0x%x\n",
+ last, cqe->wr_id, cqe->status);
+ cqe++;
+ (*budget)--;
+ bnxt_qplib_hwq_incr_cons(rq->hwq.depth,
+ &rq->hwq.cons,
+ rq->swq[last].slots,
+ &rq->dbinfo.flags);
+ rq->swq_last = rq->swq[last].next_idx;
+ }
+ *pcqe = cqe;
+ if (!*budget && rq->swq_last != start)
+ /* Out of budget */
+ rc = -EAGAIN;
+
+ dev_dbg(&rq->hwq.pdev->dev, "QPLIB: FP: Flush RQ rc = 0x%x\n", rc);
+ return rc;
+}
+
+void bnxt_qplib_mark_qp_error(void *qp_handle)
+{
+ struct bnxt_qplib_qp *qp = qp_handle;
+
+ if (!qp)
+ return;
+
+ /* Must block new posting of SQ and RQ */
+ qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
+ qp->state = qp->cur_qp_state;
+
+ /* Add qp to flush list of the CQ */
+ if (!qp->is_user)
+ bnxt_qplib_add_flush_qp(qp);
+}
+
+/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
+ * CQE is track from sw_cq_cons to max_element but valid only if VALID=1
+ */
+static int bnxt_re_legacy_do_wa9060(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_cq *cq,
+ u32 cq_cons, u32 swq_last,
+ u32 cqe_sq_cons)
+{
+ struct bnxt_qplib_q *sq = &qp->sq;
+ struct bnxt_qplib_swq *swq;
+ u32 peek_sw_cq_cons, peek_sq_cons_idx, peek_flags;
+ struct cq_terminal *peek_term_hwcqe;
+ struct cq_req *peek_req_hwcqe;
+ struct bnxt_qplib_qp *peek_qp;
+ struct bnxt_qplib_q *peek_sq;
+ struct cq_base *peek_hwcqe;
+ int i, rc = 0;
+
+ /* Check for the psn_search marking before completing */
+ swq = &sq->swq[swq_last];
+ if (swq->psn_search &&
+ le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
+ /* Unmark */
+ swq->psn_search->flags_next_psn = cpu_to_le32
+ (le32_to_cpu(swq->psn_search->flags_next_psn)
+ & ~0x80000000);
+ dev_dbg(&cq->hwq.pdev->dev,
+ "FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
+ cq_cons, qp->id, swq_last, cqe_sq_cons);
+ sq->condition = true;
+ sq->legacy_send_phantom = true;
+
+ /* TODO: Only ARM if the previous SQE is ARMALL */
+ bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMALL);
+
+ rc = -EAGAIN;
+ goto out;
+ }
+ if (sq->condition == true) {
+ /* Peek at the completions */
+ peek_flags = cq->dbinfo.flags;
+ peek_sw_cq_cons = cq_cons;
+ i = cq->hwq.depth;
+ while (i--) {
+ peek_hwcqe = bnxt_qplib_get_qe(&cq->hwq,
+ peek_sw_cq_cons, NULL);
+ /* If the next hwcqe is VALID */
+ if (CQE_CMP_VALID(peek_hwcqe, peek_flags)) {
+ /* If the next hwcqe is a REQ */
+ dma_rmb();
+ switch (peek_hwcqe->cqe_type_toggle &
+ CQ_BASE_CQE_TYPE_MASK) {
+ case CQ_BASE_CQE_TYPE_REQ:
+ peek_req_hwcqe = (struct cq_req *)
+ peek_hwcqe;
+ peek_qp = (struct bnxt_qplib_qp *)
+ le64_to_cpu(
+ peek_req_hwcqe->qp_handle);
+ peek_sq = &peek_qp->sq;
+ peek_sq_cons_idx =
+ ((le16_to_cpu(
+ peek_req_hwcqe->sq_cons_idx)
+ - 1) % sq->max_wqe);
+ /* If the hwcqe's sq's wr_id matches */
+ if (peek_sq == sq &&
+ sq->swq[peek_sq_cons_idx].wr_id ==
+ BNXT_QPLIB_FENCE_WRID) {
+ /* Unbreak only if the phantom
+ comes back */
+ dev_dbg(&cq->hwq.pdev->dev,
+ "FP: Process Req qp=0x%x current sq cons sw=0x%x cqe=0x%x\n",
+ qp->id, swq_last,
+ cqe_sq_cons);
+ sq->condition = false;
+ sq->single = true;
+ sq->phantom_cqe_cnt++;
+ dev_dbg(&cq->hwq.pdev->dev,
+ "qp %#x condition restored at peek cq_cons=%#x sq_cons_idx %#x, phantom_cqe_cnt: %d unmark\n",
+ peek_qp->id,
+ peek_sw_cq_cons,
+ peek_sq_cons_idx,
+ sq->phantom_cqe_cnt);
+ rc = 0;
+ goto out;
+ }
+ break;
+
+ case CQ_BASE_CQE_TYPE_TERMINAL:
+ /* In case the QP has gone into the
+ error state */
+ peek_term_hwcqe = (struct cq_terminal *)
+ peek_hwcqe;
+ peek_qp = (struct bnxt_qplib_qp *)
+ le64_to_cpu(
+ peek_term_hwcqe->qp_handle);
+ if (peek_qp == qp) {
+ sq->condition = false;
+ rc = 0;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+ /* Valid but not the phantom, so keep looping */
+ } else {
+ /* Not valid yet, just exit and wait */
+ rc = -EINVAL;
+ goto out;
+ }
+ bnxt_qplib_hwq_incr_cons(cq->hwq.depth,
+ &peek_sw_cq_cons,
+ 1, &peek_flags);
+ }
+ dev_err(&cq->hwq.pdev->dev,
+ "Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x\n",
+ cq_cons, qp->id, swq_last, cqe_sq_cons);
+ rc = -EINVAL;
+ }
+out:
+ return rc;
+}
+
+static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
+ struct cq_req *hwcqe,
+ struct bnxt_qplib_cqe **pcqe, int *budget,
+ u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
+{
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *sq;
+ struct bnxt_qplib_cqe *cqe;
+ u32 cqe_sq_cons;
+ struct bnxt_qplib_swq *swq;
+ int rc = 0;
+
+ qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle);
+ dev_dbg(&cq->hwq.pdev->dev, "FP: Process Req qp=0x%p\n", qp);
+ if (!qp) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: Process Req qp is NULL\n");
+ return -EINVAL;
+ }
+ sq = &qp->sq;
+
+ cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
+ if (qp->sq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto done;
+ }
+
+ /* Require to walk the sq's swq to fabricate CQEs for all previously
+ * signaled SWQEs due to CQE aggregation from the current sq cons
+ * to the cqe_sq_cons
+ */
+ cqe = *pcqe;
+ while (*budget) {
+ if (sq->swq_last == cqe_sq_cons)
+ /* Done */
+ break;
+
+ swq = &sq->swq[sq->swq_last];
+ memset(cqe, 0, sizeof(*cqe));
+ cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
+ cqe->qp_handle = (u64)qp;
+ cqe->src_qp = qp->id;
+ cqe->wr_id = swq->wr_id;
+
+ if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
+ goto skip;
+
+ cqe->type = swq->type;
+
+ /* For the last CQE, check for status. For errors, regardless
+ * of the request being signaled or not, it must complete with
+ * the hwcqe error status
+ */
+ if (swq->next_idx == cqe_sq_cons &&
+ hwcqe->status != CQ_REQ_STATUS_OK) {
+ cqe->status = hwcqe->status;
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Processed Req \n");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: QP 0x%x wr_id[%d] = 0x%lx vendor type 0x%x with vendor status 0x%x\n",
+ cqe->src_qp, sq->swq_last, cqe->wr_id, cqe->type, cqe->status);
+ cqe++;
+ (*budget)--;
+ bnxt_qplib_mark_qp_error(qp);
+ } else {
+ /* Before we complete, do WA 9060 */
+ if (!_is_chip_gen_p5_p7(qp->cctx)) {
+ if (bnxt_re_legacy_do_wa9060(qp, cq, cq_cons,
+ sq->swq_last,
+ cqe_sq_cons)) {
+ *lib_qp = qp;
+ goto out;
+ }
+ }
+ if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
+
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Processed Req \n");
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id[%d] = 0x%llx \n",
+ sq->swq_last, cqe->wr_id);
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: with status 0x%x\n", cqe->status);
+ cqe->status = CQ_REQ_STATUS_OK;
+ cqe++;
+ (*budget)--;
+ }
+ }
+skip:
+ bnxt_qplib_hwq_incr_cons(sq->hwq.depth, &sq->hwq.cons,
+ swq->slots, &sq->dbinfo.flags);
+ sq->swq_last = swq->next_idx;
+ if (sq->single == true)
+ break;
+ }
+out:
+ *pcqe = cqe;
+ if (sq->swq_last != cqe_sq_cons) {
+ /* Out of budget */
+ rc = -EAGAIN;
+ goto done;
+ }
+ /* Back to normal completion mode only after it has completed all of
+ the WC for this CQE */
+ sq->single = false;
+done:
+ return rc;
+}
+
+static void bnxt_qplib_release_srqe(struct bnxt_qplib_srq *srq, u32 tag)
+{
+ spin_lock(&srq->hwq.lock);
+ srq->swq[srq->last_idx].next_idx = (int)tag;
+ srq->last_idx = (int)tag;
+ srq->swq[srq->last_idx].next_idx = -1;
+ bnxt_qplib_hwq_incr_cons(srq->hwq.depth, &srq->hwq.cons,
+ srq->dbinfo.max_slot, &srq->dbinfo.flags);
+ spin_unlock(&srq->hwq.lock);
+}
+
+static int bnxt_qplib_cq_process_res_rc(struct bnxt_qplib_cq *cq,
+ struct cq_res_rc *hwcqe,
+ struct bnxt_qplib_cqe **pcqe,
+ int *budget)
+{
+ struct bnxt_qplib_srq *srq;
+ struct bnxt_qplib_cqe *cqe;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *rq;
+ u32 wr_id_idx;
+ int rc = 0;
+
+ qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle);
+ if (!qp) {
+ dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq RC qp is NULL\n");
+ return -EINVAL;
+ }
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto done;
+ }
+
+ cqe = *pcqe;
+ cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
+ cqe->length = le32_to_cpu(hwcqe->length);
+ cqe->invrkey = le32_to_cpu(hwcqe->imm_data_or_inv_r_key);
+ cqe->mr_handle = le64_to_cpu(hwcqe->mr_handle);
+ cqe->flags = le16_to_cpu(hwcqe->flags);
+ cqe->status = hwcqe->status;
+ cqe->qp_handle = (u64)(unsigned long)qp;
+
+ wr_id_idx = le32_to_cpu(hwcqe->srq_or_rq_wr_id) &
+ CQ_RES_RC_SRQ_OR_RQ_WR_ID_MASK;
+ if (cqe->flags & CQ_RES_RC_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: SRQ used but not defined??\n");
+ return -EINVAL;
+ }
+ if (wr_id_idx > srq->hwq.depth - 1) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process RC \n");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x\n",
+ wr_id_idx, srq->hwq.depth);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ dev_dbg(&srq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Processed RC SRQ wr_id[%d] = 0x%llx\n",
+ wr_id_idx, cqe->wr_id);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > (rq->max_wqe - 1)) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process RC \n");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x\n",
+ wr_id_idx, rq->hwq.depth);
+ return -EINVAL;
+ }
+ if (wr_id_idx != rq->swq_last)
+ return -EINVAL;
+ cqe->wr_id = rq->swq[rq->swq_last].wr_id;
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Processed RC RQ wr_id[%d] = 0x%llx\n",
+ rq->swq_last, cqe->wr_id);
+ cqe++;
+ (*budget)--;
+ bnxt_qplib_hwq_incr_cons(rq->hwq.depth, &rq->hwq.cons,
+ rq->swq[rq->swq_last].slots,
+ &rq->dbinfo.flags);
+ rq->swq_last = rq->swq[rq->swq_last].next_idx;
+ *pcqe = cqe;
+
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK)
+ bnxt_qplib_mark_qp_error(qp);
+ }
+done:
+ return rc;
+}
+
+static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
+ struct cq_res_ud_v2 *hwcqe,
+ struct bnxt_qplib_cqe **pcqe,
+ int *budget)
+{
+ struct bnxt_qplib_srq *srq;
+ struct bnxt_qplib_cqe *cqe;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *rq;
+ u32 wr_id_idx;
+ int rc = 0;
+ u16 *smac;
+
+ qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle);
+ if (!qp) {
+ dev_err(&cq->hwq.pdev->dev, "QPLIB: process_cq UD qp is NULL\n");
+ return -EINVAL;
+ }
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto done;
+ }
+ cqe = *pcqe;
+ cqe->opcode = hwcqe->cqe_type_toggle & CQ_RES_UD_V2_CQE_TYPE_MASK;
+ cqe->length = le32_to_cpu((hwcqe->length & CQ_RES_UD_V2_LENGTH_MASK));
+ cqe->cfa_meta = le16_to_cpu(hwcqe->cfa_metadata0);
+ /* V2 format has metadata1 */
+ cqe->cfa_meta |= (((le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id) &
+ CQ_RES_UD_V2_CFA_METADATA1_MASK) >>
+ CQ_RES_UD_V2_CFA_METADATA1_SFT) <<
+ BNXT_QPLIB_META1_SHIFT);
+ cqe->invrkey = le32_to_cpu(hwcqe->imm_data);
+ cqe->flags = le16_to_cpu(hwcqe->flags);
+ cqe->status = hwcqe->status;
+ cqe->qp_handle = (u64)(unsigned long)qp;
+ smac = (u16 *)cqe->smac;
+ smac[2] = ntohs(le16_to_cpu(hwcqe->src_mac[0]));
+ smac[1] = ntohs(le16_to_cpu(hwcqe->src_mac[1]));
+ smac[0] = ntohs(le16_to_cpu(hwcqe->src_mac[2]));
+ wr_id_idx = le32_to_cpu(hwcqe->src_qp_high_srq_or_rq_wr_id)
+ & CQ_RES_UD_V2_SRQ_OR_RQ_WR_ID_MASK;
+ cqe->src_qp = le16_to_cpu(hwcqe->src_qp_low) |
+ ((le32_to_cpu(
+ hwcqe->src_qp_high_srq_or_rq_wr_id) &
+ CQ_RES_UD_V2_SRC_QP_HIGH_MASK) >> 8);
+
+ if (cqe->flags & CQ_RES_UD_V2_FLAGS_SRQ) {
+ srq = qp->srq;
+ if (!srq) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: SRQ used but not defined??\n");
+ return -EINVAL;
+ }
+ if (wr_id_idx > srq->hwq.depth - 1) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process UD \n");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x\n",
+ wr_id_idx, srq->hwq.depth);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ bnxt_qplib_release_srqe(srq, wr_id_idx);
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Processed UD SRQ wr_id[%d] = 0x%llx\n",
+ wr_id_idx, cqe->wr_id);
+ cqe++;
+ (*budget)--;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > (rq->max_wqe - 1)) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process UD \n");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded RQ max 0x%x\n",
+ wr_id_idx, rq->hwq.depth);
+ return -EINVAL;
+ }
+ if (rq->swq_last != wr_id_idx)
+ return -EINVAL;
+
+ cqe->wr_id = rq->swq[rq->swq_last].wr_id;
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Processed UD RQ wr_id[%d] = 0x%llx\n",
+ rq->swq_last, cqe->wr_id);
+ cqe++;
+ (*budget)--;
+ bnxt_qplib_hwq_incr_cons(rq->hwq.depth, &rq->hwq.cons,
+ rq->swq[rq->swq_last].slots,
+ &rq->dbinfo.flags);
+ rq->swq_last = rq->swq[rq->swq_last].next_idx;
+ *pcqe = cqe;
+
+ if (hwcqe->status != CQ_RES_UD_V2_STATUS_OK)
+ bnxt_qplib_mark_qp_error(qp);
+ }
+done:
+ return rc;
+}
+
+bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
+{
+
+ struct cq_base *hw_cqe;
+ unsigned long flags;
+ bool rc = true;
+
+ spin_lock_irqsave(&cq->hwq.lock, flags);
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
+
+ /* Check for Valid bit. If the CQE is valid, return false */
+ rc = !CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags);
+ spin_unlock_irqrestore(&cq->hwq.lock, flags);
+ return rc;
+}
+
+static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq,
+ struct cq_res_raweth_qp1 *hwcqe,
+ struct bnxt_qplib_cqe **pcqe,
+ int *budget)
+{
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_q *rq;
+ struct bnxt_qplib_srq *srq;
+ struct bnxt_qplib_cqe *cqe;
+ u32 wr_id_idx;
+ int rc = 0;
+
+ qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle);
+ if (!qp) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: process_cq Raw/QP1 qp is NULL\n");
+ return -EINVAL;
+ }
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto done;
+ }
+ cqe = *pcqe;
+ cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
+ cqe->flags = le16_to_cpu(hwcqe->flags);
+ cqe->qp_handle = (u64)(unsigned long)qp;
+
+ wr_id_idx = le32_to_cpu(hwcqe->raweth_qp1_payload_offset_srq_or_rq_wr_id)
+ & CQ_RES_RAWETH_QP1_SRQ_OR_RQ_WR_ID_MASK;
+ cqe->src_qp = qp->id;
+ if (qp->id == 1 && !cqe->length) {
+ /* Add workaround for the length misdetection */
+ cqe->length = 296;
+ } else {
+ cqe->length = le16_to_cpu(hwcqe->length);
+ }
+ cqe->pkey_index = qp->pkey_index;
+ memcpy(cqe->smac, qp->smac, 6);
+
+ cqe->raweth_qp1_flags = le16_to_cpu(hwcqe->raweth_qp1_flags);
+ cqe->raweth_qp1_flags2 = le32_to_cpu(hwcqe->raweth_qp1_flags2);
+ cqe->raweth_qp1_metadata = le32_to_cpu(hwcqe->raweth_qp1_metadata);
+
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: raweth_qp1_flags = 0x%x raweth_qp1_flags2 = 0x%x\n",
+ cqe->raweth_qp1_flags, cqe->raweth_qp1_flags2);
+
+ if (cqe->flags & CQ_RES_RAWETH_QP1_FLAGS_SRQ_SRQ) {
+ srq = qp->srq;
+ if (!srq) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: SRQ used but not defined??\n");
+ return -EINVAL;
+ }
+ if (wr_id_idx > srq->hwq.depth - 1) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process Raw/QP1 \n");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id idx 0x%x exceeded SRQ max 0x%x\n",
+ wr_id_idx, srq->hwq.depth);
+ return -EINVAL;
+ }
+ cqe->wr_id = srq->swq[wr_id_idx].wr_id;
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Processed Raw/QP1 SRQ \n");
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id[%d] = 0x%llx with status = 0x%x\n",
+ wr_id_idx, cqe->wr_id, hwcqe->status);
+ cqe++;
+ (*budget)--;
+ srq->hwq.cons++;
+ *pcqe = cqe;
+ } else {
+ rq = &qp->rq;
+ if (wr_id_idx > (rq->max_wqe - 1)) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process Raw/QP1 RQ wr_id \n");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: ix 0x%x exceeded RQ max 0x%x\n",
+ wr_id_idx, rq->max_wqe);
+ return -EINVAL;
+ }
+ if (wr_id_idx != rq->swq_last)
+ return -EINVAL;
+ cqe->wr_id = rq->swq[rq->swq_last].wr_id;
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Processed Raw/QP1 RQ \n");
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id[%d] = 0x%llx with status = 0x%x\n",
+ wr_id_idx, cqe->wr_id, hwcqe->status);
+ cqe++;
+ (*budget)--;
+ bnxt_qplib_hwq_incr_cons(rq->hwq.depth, &rq->hwq.cons,
+ rq->swq[wr_id_idx].slots,
+ &rq->dbinfo.flags);
+ rq->swq_last = rq->swq[rq->swq_last].next_idx;
+ *pcqe = cqe;
+
+ if (hwcqe->status != CQ_RES_RC_STATUS_OK)
+ bnxt_qplib_mark_qp_error(qp);
+ }
+done:
+ return rc;
+}
+
+static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
+ struct cq_terminal *hwcqe,
+ struct bnxt_qplib_cqe **pcqe,
+ int *budget)
+{
+ struct bnxt_qplib_q *sq, *rq;
+ struct bnxt_qplib_cqe *cqe;
+ struct bnxt_qplib_qp *qp;
+ u32 swq_last;
+ u32 cqe_cons;
+ int rc = 0;
+
+ /* Check the Status */
+ if (hwcqe->status != CQ_TERMINAL_STATUS_OK)
+ dev_warn(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process Terminal Error status = 0x%x\n",
+ hwcqe->status);
+
+ qp = (struct bnxt_qplib_qp *)le64_to_cpu(hwcqe->qp_handle);
+ if (!qp)
+ return -EINVAL;
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process terminal for qp (0x%x)\n", qp->id);
+
+ /* Terminal CQE requires all posted RQEs to complete with FLUSHED_ERR
+ * from the current rq->cons to the rq->prod regardless what the
+ * rq->cons the terminal CQE indicates.
+ */
+ bnxt_qplib_mark_qp_error(qp);
+
+ sq = &qp->sq;
+ rq = &qp->rq;
+
+ cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
+ if (cqe_cons == 0xFFFF)
+ goto do_rq;
+
+ cqe_cons %= sq->max_wqe;
+ if (qp->sq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ goto sq_done;
+ }
+
+ /* Terminal CQE can also include aggregated successful CQEs prior.
+ So we must complete all CQEs from the current sq's cons to the
+ cq_cons with status OK */
+ cqe = *pcqe;
+ while (*budget) {
+ /*sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);*/
+ swq_last = sq->swq_last;
+ if (swq_last == cqe_cons)
+ break;
+ if (sq->swq[swq_last].flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
+ memset(cqe, 0, sizeof(*cqe));
+ cqe->status = CQ_REQ_STATUS_OK;
+ cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
+ cqe->qp_handle = (u64)qp;
+ cqe->src_qp = qp->id;
+ cqe->wr_id = sq->swq[swq_last].wr_id;
+ cqe->type = sq->swq[swq_last].type;
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Processed terminal Req \n");
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: wr_id[%d] = 0x%llx with status 0x%x\n",
+ swq_last, cqe->wr_id, cqe->status);
+ cqe++;
+ (*budget)--;
+ }
+ bnxt_qplib_hwq_incr_cons(sq->hwq.depth, &sq->hwq.cons,
+ sq->swq[swq_last].slots,
+ &sq->dbinfo.flags);
+ sq->swq_last = sq->swq[swq_last].next_idx;
+ }
+ *pcqe = cqe;
+ if (!*budget && swq_last != cqe_cons) {
+ /* Out of budget */
+ rc = -EAGAIN;
+ goto sq_done;
+ }
+sq_done:
+ if (rc)
+ return rc;
+do_rq:
+ cqe_cons = le16_to_cpu(hwcqe->rq_cons_idx);
+ if (cqe_cons == 0xFFFF) {
+ goto done;
+ } else if (cqe_cons > (rq->max_wqe - 1)) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Processed terminal \n");
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: reported rq_cons_idx 0x%x exceeds max 0x%x\n",
+ cqe_cons, rq->hwq.depth);
+ goto done;
+ }
+ if (qp->rq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QPLIB: QP in Flush QP = %p\n", __func__, qp);
+ rc = 0;
+ goto rq_done;
+ }
+
+rq_done:
+done:
+ return rc;
+}
+
+static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
+ struct cq_cutoff *hwcqe)
+{
+ /* Check the Status */
+ if (hwcqe->status != CQ_CUTOFF_STATUS_OK) {
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: FP: CQ Process Cutoff Error status = 0x%x\n",
+ hwcqe->status);
+ return -EINVAL;
+ }
+ clear_bit(CQ_FLAGS_RESIZE_IN_PROG, &cq->flags);
+ wake_up_interruptible(&cq->waitq);
+
+ dev_dbg(&cq->hwq.pdev->dev, "QPLIB: FP: CQ Processed Cutoff\n");
+ return 0;
+}
+
+int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
+ struct bnxt_qplib_cqe *cqe,
+ int num_cqes)
+{
+ struct bnxt_qplib_qp *qp = NULL;
+ u32 budget = num_cqes;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cq->flush_lock, flags);
+ list_for_each_entry(qp, &cq->sqf_head, sq_flush) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: Flushing SQ QP= %p\n",
+ qp);
+ __flush_sq(&qp->sq, qp, &cqe, &budget);
+ }
+
+ list_for_each_entry(qp, &cq->rqf_head, rq_flush) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: FP: Flushing RQ QP= %p\n",
+ qp);
+ __flush_rq(&qp->rq, qp, &cqe, &budget);
+ }
+ spin_unlock_irqrestore(&cq->flush_lock, flags);
+
+ return num_cqes - budget;
+}
+
+int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
+ int num_cqes, struct bnxt_qplib_qp **lib_qp)
+{
+ struct cq_base *hw_cqe;
+ u32 hw_polled = 0;
+ int budget, rc = 0;
+ u8 type;
+
+ budget = num_cqes;
+
+ while (budget) {
+ hw_cqe = bnxt_qplib_get_qe(&cq->hwq, cq->hwq.cons, NULL);
+
+ /* Check for Valid bit */
+ if (!CQE_CMP_VALID(hw_cqe, cq->dbinfo.flags))
+ break;
+
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ /* From the device's respective CQE format to qplib_wc*/
+ type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
+ switch (type) {
+ case CQ_BASE_CQE_TYPE_REQ:
+ rc = bnxt_qplib_cq_process_req(cq,
+ (struct cq_req *)hw_cqe, &cqe, &budget,
+ cq->hwq.cons, lib_qp);
+ break;
+ case CQ_BASE_CQE_TYPE_RES_RC:
+ rc = bnxt_qplib_cq_process_res_rc(cq,
+ (struct cq_res_rc *)hw_cqe, &cqe,
+ &budget);
+ break;
+ case CQ_BASE_CQE_TYPE_RES_UD:
+ rc = bnxt_qplib_cq_process_res_ud(cq,
+ (struct cq_res_ud_v2 *)hw_cqe,
+ &cqe, &budget);
+ break;
+ case CQ_BASE_CQE_TYPE_RES_RAWETH_QP1:
+ rc = bnxt_qplib_cq_process_res_raweth_qp1(cq,
+ (struct cq_res_raweth_qp1 *)
+ hw_cqe, &cqe, &budget);
+ break;
+ case CQ_BASE_CQE_TYPE_TERMINAL:
+ rc = bnxt_qplib_cq_process_terminal(cq,
+ (struct cq_terminal *)hw_cqe,
+ &cqe, &budget);
+ break;
+ case CQ_BASE_CQE_TYPE_CUT_OFF:
+ bnxt_qplib_cq_process_cutoff(cq,
+ (struct cq_cutoff *)hw_cqe);
+ /* Done processing this CQ */
+ goto exit;
+ default:
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: process_cq unknown type 0x%x\n",
+ hw_cqe->cqe_type_toggle &
+ CQ_BASE_CQE_TYPE_MASK);
+ rc = -EINVAL;
+ break;
+ }
+ if (rc < 0) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "QPLIB: process_cqe rc = 0x%x\n", rc);
+ if (rc == -EAGAIN)
+ break;
+ /* Error while processing the CQE, just skip to the
+ next one */
+ if (type != CQ_BASE_CQE_TYPE_TERMINAL)
+ dev_err(&cq->hwq.pdev->dev,
+ "QPLIB: process_cqe error rc = 0x%x\n",
+ rc);
+ }
+ hw_polled++;
+ bnxt_qplib_hwq_incr_cons(cq->hwq.depth, &cq->hwq.cons,
+ 1, &cq->dbinfo.flags);
+ }
+ if (hw_polled)
+ bnxt_qplib_ring_db(&cq->dbinfo, DBC_DBC_TYPE_CQ);
+exit:
+ return num_cqes - budget;
+}
+
+void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type)
+{
+ cq->dbinfo.toggle = cq->toggle;
+ if (arm_type)
+ bnxt_qplib_ring_db(&cq->dbinfo, arm_type);
+ /* Using cq->arm_state variable to track whether to issue cq handler */
+ atomic_set(&cq->arm_state, 1);
+}
+
+void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp)
+{
+ flush_workqueue(qp->scq->nq->cqn_wq);
+ if (qp->scq != qp->rcq)
+ flush_workqueue(qp->rcq->nq->cqn_wq);
+}
diff --git a/sys/dev/bnxt/bnxt_re/qplib_fp.h b/sys/dev/bnxt/bnxt_re/qplib_fp.h
new file mode 100644
index 000000000000..527c377f0aa5
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/qplib_fp.h
@@ -0,0 +1,638 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Fast Path Operators (header)
+ */
+
+#ifndef __BNXT_QPLIB_FP_H__
+#define __BNXT_QPLIB_FP_H__
+
+/* Temp header structures for SQ */
+struct sq_ud_ext_hdr {
+ __le32 dst_qp;
+ __le32 avid;
+ __le64 rsvd;
+};
+
+struct sq_raw_ext_hdr {
+ __le32 cfa_meta;
+ __le32 rsvd0;
+ __le64 rsvd1;
+};
+
+struct sq_rdma_ext_hdr {
+ __le64 remote_va;
+ __le32 remote_key;
+ __le32 rsvd;
+};
+
+struct sq_atomic_ext_hdr {
+ __le64 swap_data;
+ __le64 cmp_data;
+};
+
+struct sq_fr_pmr_ext_hdr {
+ __le64 pblptr;
+ __le64 va;
+};
+
+struct sq_bind_ext_hdr {
+ __le64 va;
+ __le32 length_lo;
+ __le32 length_hi;
+};
+
+struct rq_ext_hdr {
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+#define BNXT_QPLIB_ETHTYPE_ROCEV1 0x8915
+
+struct bnxt_qplib_srq {
+ struct bnxt_qplib_pd *pd;
+ struct bnxt_qplib_dpi *dpi;
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_qplib_cq *cq;
+ struct bnxt_qplib_swq *swq;
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_db_info dbinfo;
+ struct bnxt_qplib_sg_info sginfo;
+ u64 srq_handle;
+ u32 id;
+ u16 wqe_size;
+ u32 max_wqe;
+ u32 max_sge;
+ u32 threshold;
+ bool arm_req;
+ int start_idx;
+ int last_idx;
+ u16 eventq_hw_ring_id;
+ bool is_user;
+ spinlock_t lock;
+};
+
+struct bnxt_qplib_sge {
+ u64 addr;
+ u32 size;
+ u32 lkey;
+};
+
+/*
+ * Buffer space for ETH(14), IP or GRH(40), UDP header(8)
+ * and ib_bth + ib_deth (20).
+ * Max required is 82 when RoCE V2 is enabled
+ */
+
+/*
+ * RoCE V1 (38 bytes needed)
+ * +------------+----------+--------+--------+-------+
+ * |Eth-hdr(14B)| GRH (40B)|bth+deth| Mad | iCRC |
+ * | | supplied | 20B |payload | 4B |
+ * | | by user |supplied| 256B | |
+ * | | mad | |by user | |
+ * | | | | | |
+ * | sge 1 | sge 2 | sge 3 | sge 4 | sge 5 |
+ * +------------+----------+--------+--------+-------+
+ */
+
+/*
+ * RoCE V2-IPv4 (46 Bytes needed)
+ * +------------+----------+--------+--------+-------+
+ * |Eth-hdr(14B)| IP-hdr |UDP-hdr | Mad | iCRC |
+ * | | supplied | 8B |payload | 4B |
+ * | | by user |bth+deth| 256B | |
+ * | | mad lower| 20B |supplied| |
+ * | | 20B out | (sge 3)|by user | |
+ * | | of 40B | | | |
+ * | | grh space| | | |
+ * | sge 1 | sge 2 | sge 3 | sge 4 | sge 5 |
+ * +------------+----------+--------+--------+-------+
+ */
+
+/*
+ * RoCE V2-IPv6 (46 Bytes needed)
+ * +------------+----------+--------+--------+-------+
+ * |Eth-hdr(14B)| IPv6 |UDP-hdr | Mad | iCRC |
+ * | | supplied | 8B |payload | 4B |
+ * | | by user |bth+deth| 256B | |
+ * | | mad lower| 20B |supplied| |
+ * | | 40 bytes | |by user | |
+ * | | grh space| | | |
+ * | | | | | |
+ * | sge 1 | sge 2 | sge 3 | sge 4 | sge 5 |
+ * +------------+----------+--------+--------+-------+
+ */
+
+#define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE 74
+#define BNXT_QPLIB_MAX_QP1_SQ_HDR_SIZE_V2 86
+#define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE 46
+#define BNXT_QPLIB_MAX_QP1_RQ_ETH_HDR_SIZE 14
+#define BNXT_QPLIB_MAX_QP1_RQ_HDR_SIZE_V2 512
+#define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV4 20
+#define BNXT_QPLIB_MAX_GRH_HDR_SIZE_IPV6 40
+#define BNXT_QPLIB_MAX_QP1_RQ_BDETH_HDR_SIZE 20
+#define BNXT_QPLIB_MAX_SQSZ 0xFFFF
+
+struct bnxt_qplib_hdrbuf {
+ dma_addr_t dma_map;
+ void *va;
+ u32 len;
+ u32 step;
+};
+
+struct bnxt_qplib_swq {
+ u64 wr_id;
+ int next_idx;
+ u8 type;
+ u8 flags;
+ u32 start_psn;
+ u32 next_psn;
+ u32 slot_idx;
+ u8 slots;
+ /* WIP: make it void * to handle legacy also */
+ struct sq_psn_search *psn_search;
+ void *inline_data;
+};
+
+struct bnxt_qplib_swqe {
+ /* General */
+#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
+#define BNXT_QPLIB_QP1_DUMMY_WRID 0x44554D59 /* "DUMY" */
+ u64 wr_id;
+ u8 reqs_type;
+ u8 type;
+#define BNXT_QPLIB_SWQE_TYPE_SEND 0
+#define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_IMM 1
+#define BNXT_QPLIB_SWQE_TYPE_SEND_WITH_INV 2
+#define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE 4
+#define BNXT_QPLIB_SWQE_TYPE_RDMA_WRITE_WITH_IMM 5
+#define BNXT_QPLIB_SWQE_TYPE_RDMA_READ 6
+#define BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP 8
+#define BNXT_QPLIB_SWQE_TYPE_ATOMIC_FETCH_AND_ADD 11
+#define BNXT_QPLIB_SWQE_TYPE_LOCAL_INV 12
+#define BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR 13
+#define BNXT_QPLIB_SWQE_TYPE_REG_MR 13
+#define BNXT_QPLIB_SWQE_TYPE_BIND_MW 14
+#define BNXT_QPLIB_SWQE_TYPE_RECV 128
+#define BNXT_QPLIB_SWQE_TYPE_RECV_RDMA_IMM 129
+ u8 flags;
+#define BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP (1 << 0)
+#define BNXT_QPLIB_SWQE_FLAGS_RD_ATOMIC_FENCE (1 << 1)
+#define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE (1 << 2)
+#define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT (1 << 3)
+#define BNXT_QPLIB_SWQE_FLAGS_INLINE (1 << 4)
+ struct bnxt_qplib_sge *sg_list;
+ int num_sge;
+
+ union {
+ /* Send, with imm, inval key */
+ struct {
+ union {
+ __be32 imm_data;
+ u32 inv_key;
+ };
+ u32 q_key;
+ u32 dst_qp;
+ u16 avid;
+ } send;
+
+ /* Send Raw Ethernet and QP1 */
+ struct {
+ u16 lflags;
+ u16 cfa_action;
+ u32 cfa_meta;
+ } rawqp1;
+
+ /* RDMA write, with imm, read */
+ struct {
+ union {
+ __be32 imm_data;
+ u32 inv_key;
+ };
+ u64 remote_va;
+ u32 r_key;
+ } rdma;
+
+ /* Atomic cmp/swap, fetch/add */
+ struct {
+ u64 remote_va;
+ u32 r_key;
+ u64 swap_data;
+ u64 cmp_data;
+ } atomic;
+
+ /* Local Invalidate */
+ struct {
+ u32 inv_l_key;
+ } local_inv;
+
+ /* FR-PMR */
+ struct {
+ u8 access_cntl;
+ u8 pg_sz_log;
+ bool zero_based;
+ u32 l_key;
+ u32 length;
+ u8 pbl_pg_sz_log;
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_4K 0
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_8K 1
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_64K 4
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_256K 6
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_1M 8
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_2M 9
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_4M 10
+#define BNXT_QPLIB_SWQE_PAGE_SIZE_1G 18
+ u8 levels;
+#define PAGE_SHIFT_4K 12
+ __le64 *pbl_ptr;
+ dma_addr_t pbl_dma_ptr;
+ u64 *page_list;
+ u16 page_list_len;
+ u64 va;
+ } frmr;
+
+ /* Bind */
+ struct {
+ u8 access_cntl;
+#define BNXT_QPLIB_BIND_SWQE_ACCESS_LOCAL_WRITE (1 << 0)
+#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_READ (1 << 1)
+#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_WRITE (1 << 2)
+#define BNXT_QPLIB_BIND_SWQE_ACCESS_REMOTE_ATOMIC (1 << 3)
+#define BNXT_QPLIB_BIND_SWQE_ACCESS_WINDOW_BIND (1 << 4)
+ bool zero_based;
+ u8 mw_type;
+ u32 parent_l_key;
+ u32 r_key;
+ u64 va;
+ u32 length;
+ } bind;
+ };
+};
+
+struct bnxt_qplib_q {
+ struct bnxt_qplib_swq *swq;
+ struct bnxt_qplib_db_info dbinfo;
+ struct bnxt_qplib_sg_info sginfo;
+ struct bnxt_qplib_hwq hwq;
+ u32 max_wqe;
+ u16 max_sge;
+ u16 wqe_size;
+ u16 q_full_delta;
+ u32 psn;
+ bool condition;
+ bool single;
+ bool legacy_send_phantom;
+ u32 phantom_wqe_cnt;
+ u32 phantom_cqe_cnt;
+ u32 next_cq_cons;
+ bool flushed;
+ u32 swq_start;
+ u32 swq_last;
+};
+
+#define BNXT_QPLIB_PPP_REQ 0x1
+#define BNXT_QPLIB_PPP_ST_IDX_SHIFT 0x1
+
+struct bnxt_qplib_ppp {
+ u32 dpi;
+ u8 req;
+ u8 st_idx_en;
+};
+
+struct bnxt_qplib_qp {
+ struct bnxt_qplib_pd *pd;
+ struct bnxt_qplib_dpi *dpi;
+ struct bnxt_qplib_chip_ctx *cctx;
+ u64 qp_handle;
+#define BNXT_QPLIB_QP_ID_INVALID 0xFFFFFFFF
+ u32 id;
+ u8 type;
+ u8 sig_type;
+ u8 wqe_mode;
+ u8 state;
+ u8 cur_qp_state;
+ u8 is_user;
+ u64 modify_flags;
+ u32 max_inline_data;
+ u32 mtu;
+ u32 path_mtu;
+ bool en_sqd_async_notify;
+ u16 pkey_index;
+ u32 qkey;
+ u32 dest_qp_id;
+ u8 access;
+ u8 timeout;
+ u8 retry_cnt;
+ u8 rnr_retry;
+ u64 wqe_cnt;
+ u32 min_rnr_timer;
+ u32 max_rd_atomic;
+ u32 max_dest_rd_atomic;
+ u32 dest_qpn;
+ u8 smac[6];
+ u16 vlan_id;
+ u8 nw_type;
+ u16 port_id;
+ struct bnxt_qplib_ah ah;
+ struct bnxt_qplib_ppp ppp;
+
+#define BTH_PSN_MASK ((1 << 24) - 1)
+ /* SQ */
+ struct bnxt_qplib_q sq;
+ /* RQ */
+ struct bnxt_qplib_q rq;
+ /* SRQ */
+ struct bnxt_qplib_srq *srq;
+ /* CQ */
+ struct bnxt_qplib_cq *scq;
+ struct bnxt_qplib_cq *rcq;
+ /* IRRQ and ORRQ */
+ struct bnxt_qplib_hwq irrq;
+ struct bnxt_qplib_hwq orrq;
+ /* Header buffer for QP1 */
+ struct bnxt_qplib_hdrbuf *sq_hdr_buf;
+ struct bnxt_qplib_hdrbuf *rq_hdr_buf;
+
+ /* ToS */
+ u8 tos_ecn;
+ u8 tos_dscp;
+ /* To track the SQ and RQ flush list */
+ struct list_head sq_flush;
+ struct list_head rq_flush;
+ /* 4 bytes of QP's scrabled mac received from FW */
+ u32 lag_src_mac;
+ u32 msn;
+ u32 msn_tbl_sz;
+ /* get devflags in PI code */
+ u16 dev_cap_flags;
+};
+
+
+#define CQE_CMP_VALID(hdr, pass) \
+ (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
+ !(pass & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
+
+static inline u32 __bnxt_qplib_get_avail(struct bnxt_qplib_hwq *hwq)
+{
+ int cons, prod, avail;
+
+ /* False full is possible retrying post-send makes sense */
+ cons = hwq->cons;
+ prod = hwq->prod;
+ avail = cons - prod;
+ if (cons <= prod)
+ avail += hwq->depth;
+ return avail;
+}
+
+static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_hwq *hwq, u8 slots)
+{
+ return __bnxt_qplib_get_avail(hwq) <= slots;
+}
+
+struct bnxt_qplib_cqe {
+ u8 status;
+ u8 type;
+ u8 opcode;
+ u32 length;
+ /* Lower 16 is cfa_metadata0, Upper 16 is cfa_metadata1 */
+ u32 cfa_meta;
+#define BNXT_QPLIB_META1_SHIFT 16
+#define BNXT_QPLIB_CQE_CFA_META1_VALID 0x80000UL
+ u64 wr_id;
+ union {
+ __be32 immdata;
+ u32 invrkey;
+ };
+ u64 qp_handle;
+ u64 mr_handle;
+ u16 flags;
+ u8 smac[6];
+ u32 src_qp;
+ u16 raweth_qp1_flags;
+ u16 raweth_qp1_errors;
+ u16 raweth_qp1_cfa_code;
+ u32 raweth_qp1_flags2;
+ u32 raweth_qp1_metadata;
+ u8 raweth_qp1_payload_offset;
+ u16 pkey_index;
+};
+
+#define BNXT_QPLIB_QUEUE_START_PERIOD 0x01
+struct bnxt_qplib_cq {
+ struct bnxt_qplib_dpi *dpi;
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_qplib_nq *nq;
+ struct bnxt_qplib_db_info dbinfo;
+ struct bnxt_qplib_sg_info sginfo;
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_hwq resize_hwq;
+ struct list_head sqf_head;
+ struct list_head rqf_head;
+ u32 max_wqe;
+ u32 id;
+ u16 count;
+ u16 period;
+ u32 cnq_hw_ring_id;
+ u64 cq_handle;
+ atomic_t arm_state;
+#define CQ_RESIZE_WAIT_TIME_MS 500
+ unsigned long flags;
+#define CQ_FLAGS_RESIZE_IN_PROG 1
+ wait_queue_head_t waitq;
+ spinlock_t flush_lock; /* lock flush queue list */
+ spinlock_t compl_lock; /* synch CQ handlers */
+ u16 cnq_events;
+ bool is_cq_err_event;
+ bool destroyed;
+ u8 toggle;
+};
+
+#define BNXT_QPLIB_MAX_IRRQE_ENTRY_SIZE sizeof(struct xrrq_irrq)
+#define BNXT_QPLIB_MAX_ORRQE_ENTRY_SIZE sizeof(struct xrrq_orrq)
+#define IRD_LIMIT_TO_IRRQ_SLOTS(x) (2 * x + 2)
+#define IRRQ_SLOTS_TO_IRD_LIMIT(s) ((s >> 1) - 1)
+#define ORD_LIMIT_TO_ORRQ_SLOTS(x) (x + 1)
+#define ORRQ_SLOTS_TO_ORD_LIMIT(s) (s - 1)
+
+#define NQE_CMP_VALID(hdr, pass) \
+ (!!(le32_to_cpu((hdr)->info63_v & 0xffffffff) & NQ_BASE_V) == \
+ !(pass & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
+
+#define BNXT_QPLIB_NQE_MAX_CNT (128 * 1024)
+
+/* MSN table print macros for debugging */
+#define BNXT_RE_MSN_IDX(m) (((m) & SQ_MSN_SEARCH_START_IDX_MASK) >> \
+ SQ_MSN_SEARCH_START_IDX_SFT)
+#define BNXT_RE_MSN_NPSN(m) (((m) & SQ_MSN_SEARCH_NEXT_PSN_MASK) >> \
+ SQ_MSN_SEARCH_NEXT_PSN_SFT)
+#define BNXT_RE_MSN_SPSN(m) (((m) & SQ_MSN_SEARCH_START_PSN_MASK) >> \
+ SQ_MSN_SEARCH_START_PSN_SFT)
+#define BNXT_MSN_TBLE_SGE 6
+
+struct bnxt_qplib_nq_stats {
+ u64 num_dbqne_processed;
+ u64 num_srqne_processed;
+ u64 num_cqne_processed;
+ u64 num_tasklet_resched;
+ u64 num_nq_rearm;
+};
+
+struct bnxt_qplib_nq_db {
+ struct bnxt_qplib_reg_desc reg;
+ void __iomem *db;
+ struct bnxt_qplib_db_info dbinfo;
+};
+
+typedef int (*cqn_handler_t)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_cq *cq);
+typedef int (*srqn_handler_t)(struct bnxt_qplib_nq *nq,
+ struct bnxt_qplib_srq *srq, u8 event);
+
+struct bnxt_qplib_nq {
+ struct bnxt_qplib_res *res;
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_nq_db nq_db;
+
+ char *name;
+ u16 ring_id;
+ int msix_vec;
+ bool requested;
+ int budget;
+ u32 load;
+ struct mutex lock;
+
+ cqn_handler_t cqn_handler;
+ srqn_handler_t srqn_handler;
+ struct workqueue_struct *cqn_wq;
+ struct bnxt_qplib_nq_stats stats;
+};
+
+struct bnxt_qplib_nq_work {
+ struct work_struct work;
+ struct bnxt_qplib_nq *nq;
+ struct bnxt_qplib_cq *cq;
+};
+
+static inline dma_addr_t
+bnxt_qplib_get_qp_buf_from_index(struct bnxt_qplib_qp *qp, u32 index)
+{
+ struct bnxt_qplib_hdrbuf *buf;
+
+ buf = qp->rq_hdr_buf;
+ return (buf->dma_map + index * buf->step);
+}
+
+void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill);
+void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq);
+int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
+ int msix_vector, bool need_init);
+int bnxt_qplib_enable_nq(struct bnxt_qplib_nq *nq, int nq_idx,
+ int msix_vector, int bar_reg_offset,
+ cqn_handler_t cqn_handler,
+ srqn_handler_t srq_handler);
+int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_modify_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_destroy_srq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_srq *srq);
+int bnxt_qplib_post_srq_recv(struct bnxt_qplib_srq *srq,
+ struct bnxt_qplib_swqe *wqe);
+int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
+int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
+int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
+int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
+int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
+void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp);
+void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
+void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_sge *sge);
+void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_sge *sge);
+u32 bnxt_qplib_get_rq_prod_index(struct bnxt_qplib_qp *qp);
+void bnxt_qplib_post_send_db(struct bnxt_qplib_qp *qp);
+int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe);
+void bnxt_qplib_post_recv_db(struct bnxt_qplib_qp *qp);
+int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
+ struct bnxt_qplib_swqe *wqe);
+int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
+int bnxt_qplib_modify_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
+int bnxt_qplib_resize_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq,
+ int new_cqes);
+void bnxt_qplib_resize_cq_complete(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cq *cq);
+int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
+void bnxt_qplib_free_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
+int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
+ int num, struct bnxt_qplib_qp **qp);
+bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq);
+void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
+void bnxt_qplib_free_nq_mem(struct bnxt_qplib_nq *nq);
+int bnxt_qplib_alloc_nq_mem(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_nq *nq);
+void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
+void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);
+int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq,
+ struct bnxt_qplib_cqe *cqe,
+ int num_cqes);
+void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp);
+void bnxt_qplib_free_hdr_buf(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp);
+int bnxt_qplib_alloc_hdr_buf(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp, u32 slen, u32 rlen);
+
+static inline bool __can_request_ppp(struct bnxt_qplib_qp *qp)
+{
+ bool can_request = false;
+
+ if (qp->cur_qp_state == CMDQ_MODIFY_QP_NEW_STATE_RESET &&
+ qp->state == CMDQ_MODIFY_QP_NEW_STATE_INIT &&
+ qp->ppp.req &&
+ !(qp->ppp.st_idx_en &
+ CREQ_MODIFY_QP_RESP_PINGPONG_PUSH_ENABLED))
+ can_request = true;
+ return can_request;
+}
+
+/* MSN table update inlin */
+static inline uint64_t bnxt_re_update_msn_tbl(uint32_t st_idx, uint32_t npsn, uint32_t start_psn)
+{
+ return cpu_to_le64((((u64)(st_idx) << SQ_MSN_SEARCH_START_IDX_SFT) &
+ SQ_MSN_SEARCH_START_IDX_MASK) |
+ (((u64)(npsn) << SQ_MSN_SEARCH_NEXT_PSN_SFT) &
+ SQ_MSN_SEARCH_NEXT_PSN_MASK) |
+ (((start_psn) << SQ_MSN_SEARCH_START_PSN_SFT) &
+ SQ_MSN_SEARCH_START_PSN_MASK));
+}
+
+void bnxt_re_schedule_dbq_event(struct bnxt_qplib_res *res);
+#endif
diff --git a/sys/dev/bnxt/bnxt_re/qplib_rcfw.c b/sys/dev/bnxt/bnxt_re/qplib_rcfw.c
new file mode 100644
index 000000000000..7e3453a1e044
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/qplib_rcfw.c
@@ -0,0 +1,1338 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: RDMA Controller HW interface
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/hardirq.h>
+#include <linux/device.h>
+
+#include "hsi_struct_def.h"
+#include "qplib_tlv.h"
+#include "qplib_res.h"
+#include "qplib_sp.h"
+#include "qplib_rcfw.h"
+#include "bnxt_re.h"
+
+static void bnxt_qplib_service_creq(unsigned long data);
+
+int __check_cmdq_stall(struct bnxt_qplib_rcfw *rcfw,
+ u32 *cur_prod, u32 *cur_cons)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+
+ cmdq = &rcfw->cmdq;
+
+ if (*cur_prod == cmdq->hwq.prod &&
+ *cur_cons == cmdq->hwq.cons)
+ /* No activity on CMDQ or CREQ. FW down */
+ return -ETIMEDOUT;
+
+ *cur_prod = cmdq->hwq.prod;
+ *cur_cons = cmdq->hwq.cons;
+ return 0;
+}
+
+static int bnxt_qplib_map_rc(u8 opcode)
+{
+ switch (opcode) {
+ case CMDQ_BASE_OPCODE_DESTROY_QP:
+ case CMDQ_BASE_OPCODE_DESTROY_SRQ:
+ case CMDQ_BASE_OPCODE_DESTROY_CQ:
+ case CMDQ_BASE_OPCODE_DEALLOCATE_KEY:
+ case CMDQ_BASE_OPCODE_DEREGISTER_MR:
+ case CMDQ_BASE_OPCODE_DELETE_GID:
+ case CMDQ_BASE_OPCODE_DESTROY_QP1:
+ case CMDQ_BASE_OPCODE_DESTROY_AH:
+ case CMDQ_BASE_OPCODE_DEINITIALIZE_FW:
+ case CMDQ_BASE_OPCODE_MODIFY_ROCE_CC:
+ case CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE:
+ return 0;
+ default:
+ return -ETIMEDOUT;
+ }
+}
+
+/**
+ * bnxt_re_is_fw_stalled - Check firmware health
+ * @rcfw - rcfw channel instance of rdev
+ * @cookie - cookie to track the command
+ *
+ * If firmware has not responded any rcfw command within
+ * rcfw->max_timeout, consider firmware as stalled.
+ *
+ * Returns:
+ * 0 if firmware is responding
+ * -ENODEV if firmware is not responding
+ */
+static int bnxt_re_is_fw_stalled(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_crsqe *crsqe;
+
+ crsqe = &rcfw->crsqe_tbl[cookie];
+ cmdq = &rcfw->cmdq;
+
+ if (time_after(jiffies, cmdq->last_seen +
+ (rcfw->max_timeout * HZ))) {
+ dev_warn_ratelimited(&rcfw->pdev->dev,
+ "%s: FW STALL Detected. cmdq[%#x]=%#x waited (%ld > %d) msec active %d\n",
+ __func__, cookie, crsqe->opcode,
+ (long)jiffies_to_msecs(jiffies - cmdq->last_seen),
+ rcfw->max_timeout * 1000,
+ crsqe->is_in_used);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+/**
+ * __wait_for_resp - Don't hold the cpu context and wait for response
+ * @rcfw - rcfw channel instance of rdev
+ * @cookie - cookie to track the command
+ *
+ * Wait for command completion in sleepable context.
+ *
+ * Returns:
+ * 0 if command is completed by firmware.
+ * Non zero error code for rest of the case.
+ */
+static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_crsqe *crsqe;
+ unsigned long issue_time;
+ int ret;
+
+ cmdq = &rcfw->cmdq;
+ issue_time = jiffies;
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ do {
+ if (RCFW_NO_FW_ACCESS(rcfw))
+ return bnxt_qplib_map_rc(crsqe->opcode);
+ if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
+ return -ETIMEDOUT;
+
+ /* Non zero means command completed */
+ ret = wait_event_timeout(cmdq->waitq,
+ !crsqe->is_in_used ||
+ RCFW_NO_FW_ACCESS(rcfw),
+ msecs_to_jiffies(rcfw->max_timeout * 1000));
+
+ if (!crsqe->is_in_used)
+ return 0;
+ /*
+ * Take care if interrupt miss or other cases like DBR drop
+ */
+ bnxt_qplib_service_creq((unsigned long)rcfw);
+ dev_warn_ratelimited(&rcfw->pdev->dev,
+ "Non-Blocking QPLIB: cmdq[%#x]=%#x waited (%lu) msec bit %d\n",
+ cookie, crsqe->opcode,
+ (long)jiffies_to_msecs(jiffies - issue_time),
+ crsqe->is_in_used);
+
+ if (!crsqe->is_in_used)
+ return 0;
+
+ ret = bnxt_re_is_fw_stalled(rcfw, cookie);
+ if (ret)
+ return ret;
+
+ } while (true);
+};
+
+/**
+ * __block_for_resp - hold the cpu context and wait for response
+ * @rcfw - rcfw channel instance of rdev
+ * @cookie - cookie to track the command
+ *
+ * This function will hold the cpu (non-sleepable context) and
+ * wait for command completion. Maximum holding interval is 8 second.
+ *
+ * Returns:
+ * -ETIMEOUT if command is not completed in specific time interval.
+ * 0 if command is completed by firmware.
+ */
+static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
+ struct bnxt_qplib_crsqe *crsqe;
+ unsigned long issue_time = 0;
+
+ issue_time = jiffies;
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ do {
+ if (RCFW_NO_FW_ACCESS(rcfw))
+ return bnxt_qplib_map_rc(crsqe->opcode);
+ if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
+ return -ETIMEDOUT;
+
+ udelay(1);
+
+ /* Below call is must since there can be a deadlock
+ * if interrupt is mapped to the same cpu
+ */
+ bnxt_qplib_service_creq((unsigned long)rcfw);
+ if (!crsqe->is_in_used)
+ return 0;
+
+ } while (time_before(jiffies, issue_time + (8 * HZ)));
+
+ dev_warn_ratelimited(&rcfw->pdev->dev,
+ "Blocking QPLIB: cmdq[%#x]=%#x taken (%lu) msec",
+ cookie, crsqe->opcode,
+ (long)jiffies_to_msecs(jiffies - issue_time));
+
+ return -ETIMEDOUT;
+};
+
+/* __send_message_no_waiter - get cookie and post the message.
+ * @rcfw - rcfw channel instance of rdev
+ * @msg - qplib message internal
+ *
+ * This function will just post and don't bother about completion.
+ * Current design of this function is -
+ * user must hold the completion queue hwq->lock.
+ * user must have used existing completion and free the resources.
+ * this function will not check queue full condition.
+ * this function will explicitly set is_waiter_alive=false.
+ * current use case is - send destroy_ah if create_ah is return
+ * after waiter of create_ah is lost. It can be extended for other
+ * use case as well.
+ *
+ * Returns: Nothing
+ *
+ */
+static void __send_message_no_waiter(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_cmdqmsg *msg)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
+ struct bnxt_qplib_hwq *cmdq_hwq = &cmdq->hwq;
+ struct bnxt_qplib_crsqe *crsqe;
+ struct bnxt_qplib_cmdqe *cmdqe;
+ u32 sw_prod, cmdq_prod, bsize;
+ u16 cookie;
+ u8 *preq;
+
+ cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
+ __set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie));
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ /* Set cmd_size in terms of 16B slots in req. */
+ bsize = bnxt_qplib_set_cmd_slots(msg->req);
+ /* GET_CMD_SIZE would return number of slots in either case of tlv
+ * and non-tlv commands after call to bnxt_qplib_set_cmd_slots()
+ */
+ crsqe->send_timestamp = jiffies;
+ crsqe->is_internal_cmd = true;
+ crsqe->is_waiter_alive = false;
+ crsqe->is_in_used = true;
+ crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz);
+
+ preq = (u8 *)msg->req;
+ do {
+ /* Locate the next cmdq slot */
+ sw_prod = HWQ_CMP(cmdq_hwq->prod, cmdq_hwq);
+ cmdqe = bnxt_qplib_get_qe(cmdq_hwq, sw_prod, NULL);
+ /* Copy a segment of the req cmd to the cmdq */
+ memset(cmdqe, 0, sizeof(*cmdqe));
+ memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe)));
+ preq += min_t(u32, bsize, sizeof(*cmdqe));
+ bsize -= min_t(u32, bsize, sizeof(*cmdqe));
+ cmdq_hwq->prod++;
+ } while (bsize > 0);
+ cmdq->seq_num++;
+
+ cmdq_prod = cmdq_hwq->prod & 0xFFFF;
+ atomic_inc(&rcfw->timeout_send);
+ /* ring CMDQ DB */
+ wmb();
+ writel(cmdq_prod, cmdq->cmdq_mbox.prod);
+ writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
+}
+
+static int __send_message(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_cmdqmsg *msg)
+{
+ u32 bsize, free_slots, required_slots;
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_crsqe *crsqe;
+ struct bnxt_qplib_cmdqe *cmdqe;
+ struct bnxt_qplib_hwq *cmdq_hwq;
+ u32 sw_prod, cmdq_prod;
+ struct pci_dev *pdev;
+ unsigned long flags;
+ u16 cookie;
+ u8 opcode;
+ u8 *preq;
+
+ cmdq = &rcfw->cmdq;
+ cmdq_hwq = &cmdq->hwq;
+ pdev = rcfw->pdev;
+ opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz);
+
+ /* Cmdq are in 16-byte units, each request can consume 1 or more
+ cmdqe */
+ spin_lock_irqsave(&cmdq_hwq->lock, flags);
+ required_slots = bnxt_qplib_get_cmd_slots(msg->req);
+ free_slots = HWQ_FREE_SLOTS(cmdq_hwq);
+ cookie = cmdq->seq_num & RCFW_MAX_COOKIE_VALUE;
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ if (required_slots >= free_slots) {
+ dev_warn_ratelimited(&pdev->dev,
+ "QPLIB: RCFW: CMDQ is full req/free %d/%d!\n",
+ required_slots, free_slots);
+ rcfw->cmdq_full_dbg++;
+ spin_unlock_irqrestore(&cmdq_hwq->lock, flags);
+ return -EAGAIN;
+ }
+
+ if (crsqe->is_in_used)
+ panic("QPLIB: Cookie was not requested %d\n",
+ cookie);
+
+ if (msg->block)
+ cookie |= RCFW_CMD_IS_BLOCKING;
+ __set_cmdq_base_cookie(msg->req, msg->req_sz, cpu_to_le16(cookie));
+
+ /* Set cmd_size in terms of 16B slots in req. */
+ bsize = bnxt_qplib_set_cmd_slots(msg->req);
+ /* GET_CMD_SIZE would return number of slots in either case of tlv
+ * and non-tlv commands after call to bnxt_qplib_set_cmd_slots()
+ */
+ crsqe->send_timestamp = jiffies;
+ crsqe->free_slots = free_slots;
+ crsqe->resp = (struct creq_qp_event *)msg->resp;
+ crsqe->resp->cookie = cpu_to_le16(cookie);
+ crsqe->is_internal_cmd = false;
+ crsqe->is_waiter_alive = true;
+ crsqe->is_in_used = true;
+ crsqe->opcode = opcode;
+ crsqe->requested_qp_state = msg->qp_state;
+
+ crsqe->req_size = __get_cmdq_base_cmd_size(msg->req, msg->req_sz);
+ if (__get_cmdq_base_resp_size(msg->req, msg->req_sz) && msg->sb) {
+ struct bnxt_qplib_rcfw_sbuf *sbuf = msg->sb;
+
+ __set_cmdq_base_resp_addr(msg->req, msg->req_sz,
+ cpu_to_le64(sbuf->dma_addr));
+ __set_cmdq_base_resp_size(msg->req, msg->req_sz,
+ ALIGN(sbuf->size, BNXT_QPLIB_CMDQE_UNITS) /
+ BNXT_QPLIB_CMDQE_UNITS);
+ }
+
+ preq = (u8 *)msg->req;
+ do {
+ /* Locate the next cmdq slot */
+ sw_prod = HWQ_CMP(cmdq_hwq->prod, cmdq_hwq);
+ cmdqe = bnxt_qplib_get_qe(cmdq_hwq, sw_prod, NULL);
+ /* Copy a segment of the req cmd to the cmdq */
+ memset(cmdqe, 0, sizeof(*cmdqe));
+ memcpy(cmdqe, preq, min_t(u32, bsize, sizeof(*cmdqe)));
+ preq += min_t(u32, bsize, sizeof(*cmdqe));
+ bsize -= min_t(u32, bsize, sizeof(*cmdqe));
+ cmdq_hwq->prod++;
+ } while (bsize > 0);
+ cmdq->seq_num++;
+
+ cmdq_prod = cmdq_hwq->prod & 0xFFFF;
+ if (test_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags)) {
+ /* The very first doorbell write
+ * is required to set this flag
+ * which prompts the FW to reset
+ * its internal pointers
+ */
+ cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
+ clear_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
+ }
+ /* ring CMDQ DB */
+ wmb();
+ writel(cmdq_prod, cmdq->cmdq_mbox.prod);
+ writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db);
+
+ dev_dbg(&pdev->dev, "QPLIB: RCFW sent request with 0x%x 0x%x 0x%x\n",
+ cmdq_prod, cmdq_hwq->prod, crsqe->req_size);
+ dev_dbg(&pdev->dev,
+ "QPLIB: opcode 0x%x with cookie 0x%x at cmdq/crsq 0x%p/0x%p\n",
+ opcode,
+ __get_cmdq_base_cookie(msg->req, msg->req_sz),
+ cmdqe, crsqe);
+ spin_unlock_irqrestore(&cmdq_hwq->lock, flags);
+ /* Return the CREQ response pointer */
+ return 0;
+}
+
+/**
+ * __poll_for_resp - self poll completion for rcfw command
+ * @rcfw - rcfw channel instance of rdev
+ * @cookie - cookie to track the command
+ *
+ * It works same as __wait_for_resp except this function will
+ * do self polling in sort interval since interrupt is disabled.
+ * This function can not be called from non-sleepable context.
+ *
+ * Returns:
+ * -ETIMEOUT if command is not completed in specific time interval.
+ * 0 if command is completed by firmware.
+ */
+static int __poll_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
+ struct bnxt_qplib_crsqe *crsqe;
+ unsigned long issue_time;
+ int ret;
+
+ issue_time = jiffies;
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ do {
+ if (RCFW_NO_FW_ACCESS(rcfw))
+ return bnxt_qplib_map_rc(crsqe->opcode);
+ if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
+ return -ETIMEDOUT;
+
+ usleep_range(1000, 1001);
+
+ bnxt_qplib_service_creq((unsigned long)rcfw);
+ if (!crsqe->is_in_used)
+ return 0;
+
+ if (jiffies_to_msecs(jiffies - issue_time) >
+ (rcfw->max_timeout * 1000)) {
+ dev_warn_ratelimited(&rcfw->pdev->dev,
+ "Self Polling QPLIB: cmdq[%#x]=%#x taken (%lu) msec",
+ cookie, crsqe->opcode,
+ (long)jiffies_to_msecs(jiffies - issue_time));
+ ret = bnxt_re_is_fw_stalled(rcfw, cookie);
+ if (ret)
+ return ret;
+ }
+ } while (true);
+
+};
+
+static int __send_message_basic_sanity(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_cmdqmsg *msg, u8 opcode)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+
+ cmdq = &rcfw->cmdq;
+
+ /* Prevent posting if f/w is not in a state to process */
+ if (RCFW_NO_FW_ACCESS(rcfw))
+ return -ENXIO;
+
+ if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags))
+ return -ETIMEDOUT;
+
+ if (test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
+ opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
+ dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!\n");
+ return -EINVAL;
+ }
+
+ if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &cmdq->flags) &&
+ (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
+ opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
+ opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: RCFW not initialized, reject opcode 0x%x\n",
+ opcode);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+/* This function will just post and do not bother about completion */
+static void __destroy_timedout_ah(struct bnxt_qplib_rcfw *rcfw,
+ struct creq_create_ah_resp *create_ah_resp)
+{
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_destroy_ah req = {};
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_AH,
+ sizeof(req));
+ req.ah_cid = create_ah_resp->xid;
+ msg.req = (struct cmdq_base *)&req;
+ msg.req_sz = sizeof(req);
+ __send_message_no_waiter(rcfw, &msg);
+ dev_warn_ratelimited(&rcfw->pdev->dev,
+ "From %s: ah_cid = %d timeout_send %d\n", __func__,
+ req.ah_cid,
+ atomic_read(&rcfw->timeout_send));
+}
+
+/**
+ * __bnxt_qplib_rcfw_send_message - qplib interface to send
+ * and complete rcfw command.
+ * @rcfw - rcfw channel instance of rdev
+ * @msg - qplib message internal
+ *
+ * This function does not account shadow queue depth. It will send
+ * all the command unconditionally as long as send queue is not full.
+ *
+ * Returns:
+ * 0 if command completed by firmware.
+ * Non zero if the command is not completed by firmware.
+ */
+static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_cmdqmsg *msg)
+{
+ struct bnxt_qplib_crsqe *crsqe;
+ struct creq_qp_event *event;
+ unsigned long flags;
+ u16 cookie;
+ int rc = 0;
+ u8 opcode;
+
+ opcode = __get_cmdq_base_opcode(msg->req, msg->req_sz);
+
+ rc = __send_message_basic_sanity(rcfw, msg, opcode);
+ if (rc)
+ return rc == -ENXIO ? bnxt_qplib_map_rc(opcode) : rc;
+
+ rc = __send_message(rcfw, msg);
+ if (rc)
+ return rc;
+
+ cookie = le16_to_cpu(__get_cmdq_base_cookie(msg->req,
+ msg->req_sz)) & RCFW_MAX_COOKIE_VALUE;
+
+
+ if (msg->block)
+ rc = __block_for_resp(rcfw, cookie);
+ else if (atomic_read(&rcfw->rcfw_intr_enabled))
+ rc = __wait_for_resp(rcfw, cookie);
+ else
+ rc = __poll_for_resp(rcfw, cookie);
+
+ if (rc) {
+ /* First check if it is FW stall.
+ * Use hwq.lock to avoid race with actual completion.
+ */
+ spin_lock_irqsave(&rcfw->cmdq.hwq.lock, flags);
+ crsqe = &rcfw->crsqe_tbl[cookie];
+ crsqe->is_waiter_alive = false;
+ if (rc == -ENODEV)
+ set_bit(FIRMWARE_STALL_DETECTED, &rcfw->cmdq.flags);
+ spin_unlock_irqrestore(&rcfw->cmdq.hwq.lock, flags);
+
+ return -ETIMEDOUT;
+ }
+
+ event = (struct creq_qp_event *)msg->resp;
+ if (event->status) {
+ /* failed with status */
+ dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x (%s) status %d\n",
+ cookie, opcode, GET_OPCODE_TYPE(opcode), event->status);
+ rc = -EFAULT;
+ /*
+ * Workaround to avoid errors in the stack during bond
+ * creation and deletion.
+ * Disable error returned for ADD_GID/DEL_GID
+ */
+ if (opcode == CMDQ_BASE_OPCODE_ADD_GID ||
+ opcode == CMDQ_BASE_OPCODE_DELETE_GID)
+ rc = 0;
+ }
+
+ dev_dbg(&pdev->dev, "QPLIB: %s:%d - op 0x%x (%s), cookie 0x%x -- Return: e->status 0x%x, rc = 0x%x\n",
+ __func__, __LINE__, opcode, GET_OPCODE_TYPE(opcode), cookie, event->status, rc);
+ return rc;
+}
+
+/**
+ * bnxt_qplib_rcfw_send_message - qplib interface to send
+ * and complete rcfw command.
+ * @rcfw - rcfw channel instance of rdev
+ * @msg - qplib message internal
+ *
+ * Driver interact with Firmware through rcfw channel/slow path in two ways.
+ * a. Blocking rcfw command send. In this path, driver cannot hold
+ * the context for longer period since it is holding cpu until
+ * command is not completed.
+ * b. Non-blocking rcfw command send. In this path, driver can hold the
+ * context for longer period. There may be many pending command waiting
+ * for completion because of non-blocking nature.
+ *
+ * Driver will use shadow queue depth. Current queue depth of 8K
+ * (due to size of rcfw message it can be actual ~4K rcfw outstanding)
+ * is not optimal for rcfw command processing in firmware.
+ * RCFW_CMD_NON_BLOCKING_SHADOW_QD is defined as 64.
+ * Restrict at max 64 Non-Blocking rcfw commands.
+ * Do not allow more than 64 non-blocking command to the Firmware.
+ * Allow all blocking commands until there is no queue full.
+ *
+ * Returns:
+ * 0 if command completed by firmware.
+ * Non zero if the command is not completed by firmware.
+ */
+int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_cmdqmsg *msg)
+{
+ int ret;
+
+ if (!msg->block) {
+ down(&rcfw->rcfw_inflight);
+ ret = __bnxt_qplib_rcfw_send_message(rcfw, msg);
+ up(&rcfw->rcfw_inflight);
+ } else {
+ ret = __bnxt_qplib_rcfw_send_message(rcfw, msg);
+ }
+
+ return ret;
+}
+
+static void bnxt_re_add_perf_stats(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_crsqe *crsqe)
+{
+ u32 latency_msec, dest_stats_id;
+ u64 *dest_stats_ptr = NULL;
+
+ latency_msec = jiffies_to_msecs(rcfw->cmdq.last_seen -
+ crsqe->send_timestamp);
+ if (latency_msec/1000 < RCFW_MAX_LATENCY_SEC_SLAB_INDEX)
+ rcfw->rcfw_lat_slab_sec[latency_msec/1000]++;
+
+ if (!rcfw->sp_perf_stats_enabled)
+ return;
+
+ if (latency_msec < RCFW_MAX_LATENCY_MSEC_SLAB_INDEX)
+ rcfw->rcfw_lat_slab_msec[latency_msec]++;
+
+ switch (crsqe->opcode) {
+ case CMDQ_BASE_OPCODE_CREATE_QP:
+ dest_stats_id = rcfw->qp_create_stats_id++;
+ dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX;
+ dest_stats_ptr = &rcfw->qp_create_stats[dest_stats_id];
+ break;
+ case CMDQ_BASE_OPCODE_DESTROY_QP:
+ dest_stats_id = rcfw->qp_destroy_stats_id++;
+ dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX;
+ dest_stats_ptr = &rcfw->qp_destroy_stats[dest_stats_id];
+ break;
+ case CMDQ_BASE_OPCODE_REGISTER_MR:
+ dest_stats_id = rcfw->mr_create_stats_id++;
+ dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX;
+ dest_stats_ptr = &rcfw->mr_create_stats[dest_stats_id];
+ break;
+ case CMDQ_BASE_OPCODE_DEREGISTER_MR:
+ case CMDQ_BASE_OPCODE_DEALLOCATE_KEY:
+ dest_stats_id = rcfw->mr_destroy_stats_id++;
+ dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX;
+ dest_stats_ptr = &rcfw->mr_destroy_stats[dest_stats_id];
+ break;
+ case CMDQ_BASE_OPCODE_MODIFY_QP:
+ if (crsqe->requested_qp_state != IB_QPS_ERR)
+ break;
+ dest_stats_id = rcfw->qp_modify_stats_id++;
+ dest_stats_id = dest_stats_id % RCFW_MAX_STAT_INDEX;
+ dest_stats_ptr = &rcfw->qp_modify_stats[dest_stats_id];
+ break;
+ default:
+ break;
+ }
+ if (dest_stats_ptr)
+ *dest_stats_ptr = max_t(unsigned long,
+ (rcfw->cmdq.last_seen - crsqe->send_timestamp), 1);
+
+}
+
+/* Completions */
+static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
+ struct creq_qp_event *event,
+ u32 *num_wait)
+{
+ struct bnxt_qplib_hwq *cmdq_hwq = &rcfw->cmdq.hwq;
+ struct creq_cq_error_notification *cqerr;
+ struct creq_qp_error_notification *qperr;
+ struct bnxt_qplib_crsqe *crsqe;
+ struct bnxt_qplib_reftbl *tbl;
+ struct bnxt_qplib_qp *qp;
+ struct bnxt_qplib_cq *cq;
+ u16 cookie, blocked = 0;
+ struct pci_dev *pdev;
+ bool is_waiter_alive;
+ unsigned long flags;
+ u32 wait_cmds = 0;
+ u32 xid, qp_idx;
+ u32 req_size;
+ int rc = 0;
+
+ pdev = rcfw->pdev;
+ switch (event->event) {
+ case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
+ tbl = &rcfw->res->reftbl.qpref;
+ qperr = (struct creq_qp_error_notification *)event;
+ xid = le32_to_cpu(qperr->xid);
+ qp_idx = map_qp_id_to_tbl_indx(xid, tbl);
+ spin_lock(&tbl->lock);
+ qp = tbl->rec[qp_idx].handle;
+ if (!qp) {
+ spin_unlock(&tbl->lock);
+ break;
+ }
+ bnxt_qplib_mark_qp_error(qp);
+ rc = rcfw->creq.aeq_handler(rcfw, event, qp);
+ spin_unlock(&tbl->lock);
+ /*
+ * Keeping these prints as debug to avoid flooding of log
+ * messages during modify QP to error state by applications
+ */
+ dev_dbg(&pdev->dev, "QPLIB: QP Error encountered!\n");
+ dev_dbg(&pdev->dev,
+ "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
+ xid, qperr->req_err_state_reason,
+ qperr->res_err_state_reason);
+ break;
+ case CREQ_QP_EVENT_EVENT_CQ_ERROR_NOTIFICATION:
+ tbl = &rcfw->res->reftbl.cqref;
+ cqerr = (struct creq_cq_error_notification *)event;
+ xid = le32_to_cpu(cqerr->xid);
+ spin_lock(&tbl->lock);
+ cq = tbl->rec[GET_TBL_INDEX(xid, tbl)].handle;
+ if (!cq) {
+ spin_unlock(&tbl->lock);
+ break;
+ }
+ rc = rcfw->creq.aeq_handler(rcfw, event, cq);
+ spin_unlock(&tbl->lock);
+ dev_dbg(&pdev->dev, "QPLIB: CQ error encountered!\n");
+ break;
+ default:
+ /*
+ * Command Response
+ * cmdq hwq lock needs to be acquired to synchronize
+ * the command send and completion reaping. This function
+ * is always called with creq hwq lock held. So there is no
+ * chance of deadlock here as the locking is in correct sequence.
+ * Using the nested variant of spin_lock to annotate
+ */
+ spin_lock_irqsave_nested(&cmdq_hwq->lock, flags,
+ SINGLE_DEPTH_NESTING);
+ cookie = le16_to_cpu(event->cookie);
+ blocked = cookie & RCFW_CMD_IS_BLOCKING;
+ cookie &= RCFW_MAX_COOKIE_VALUE;
+
+ crsqe = &rcfw->crsqe_tbl[cookie];
+
+ bnxt_re_add_perf_stats(rcfw, crsqe);
+
+ if (WARN_ONCE(test_bit(FIRMWARE_STALL_DETECTED,
+ &rcfw->cmdq.flags),
+ "QPLIB: Unreponsive rcfw channel detected.!!")) {
+ dev_info(&pdev->dev, "rcfw timedout: cookie = %#x,"
+ " latency_msec = %ld free_slots = %d\n", cookie,
+ (long)jiffies_to_msecs(rcfw->cmdq.last_seen -
+ crsqe->send_timestamp),
+ crsqe->free_slots);
+ spin_unlock_irqrestore(&cmdq_hwq->lock, flags);
+ return rc;
+ }
+
+ if (crsqe->is_internal_cmd && !event->status)
+ atomic_dec(&rcfw->timeout_send);
+
+ if (crsqe->is_waiter_alive) {
+ if (crsqe->resp)
+ memcpy(crsqe->resp, event, sizeof(*event));
+ if (!blocked)
+ wait_cmds++;
+ }
+
+ req_size = crsqe->req_size;
+ is_waiter_alive = crsqe->is_waiter_alive;
+
+ crsqe->req_size = 0;
+ if (!crsqe->is_waiter_alive)
+ crsqe->resp = NULL;
+ crsqe->is_in_used = false;
+ /* Consumer is updated so that __send_message_no_waiter
+ * can never see queue full.
+ * It is safe since we are still holding cmdq_hwq->lock.
+ */
+ cmdq_hwq->cons += req_size;
+
+ /* This is a case to handle below scenario -
+ * Create AH is completed successfully by firmware,
+ * but completion took more time and driver already lost
+ * the context of create_ah from caller.
+ * We have already return failure for create_ah verbs,
+ * so let's destroy the same address vector since it is
+ * no more used in stack. We don't care about completion
+ * in __send_message_no_waiter.
+ * If destroy_ah is failued by firmware, there will be AH
+ * resource leak and relatively not critical + unlikely
+ * scenario. Current design is not to handle such case.
+ */
+ if (!is_waiter_alive && !event->status &&
+ event->event == CREQ_QP_EVENT_EVENT_CREATE_AH)
+ __destroy_timedout_ah(rcfw,
+ (struct creq_create_ah_resp *)
+ event);
+
+ spin_unlock_irqrestore(&cmdq_hwq->lock, flags);
+ }
+ *num_wait += wait_cmds;
+ return rc;
+}
+
+/* SP - CREQ Completion handlers */
+static void bnxt_qplib_service_creq(unsigned long data)
+{
+ struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
+ struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;
+ struct bnxt_qplib_res *res;
+ u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
+ struct bnxt_qplib_hwq *creq_hwq = &creq->hwq;
+ struct creq_base *creqe;
+ struct pci_dev *pdev;
+ unsigned long flags;
+ u32 num_wakeup = 0;
+ int rc;
+
+ pdev = rcfw->pdev;
+ res = rcfw->res;
+ /* Service the CREQ until empty */
+ spin_lock_irqsave(&creq_hwq->lock, flags);
+ while (budget > 0) {
+ if (RCFW_NO_FW_ACCESS(rcfw)) {
+ spin_unlock_irqrestore(&creq_hwq->lock, flags);
+ return;
+ }
+ creqe = bnxt_qplib_get_qe(creq_hwq, creq_hwq->cons, NULL);
+ if (!CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags))
+ break;
+ /* The valid test of the entry must be done first before
+ * reading any further.
+ */
+ dma_rmb();
+ type = creqe->type & CREQ_BASE_TYPE_MASK;
+ rcfw->cmdq.last_seen = jiffies;
+
+ switch (type) {
+ case CREQ_BASE_TYPE_QP_EVENT:
+ bnxt_qplib_process_qp_event
+ (rcfw,(struct creq_qp_event *)creqe,
+ &num_wakeup);
+ creq->stats.creq_qp_event_processed++;
+ break;
+ case CREQ_BASE_TYPE_FUNC_EVENT:
+ rc = rcfw->creq.aeq_handler(rcfw, creqe, NULL);
+ if (rc)
+ dev_warn(&pdev->dev,
+ "QPLIB: async event type = 0x%x not handled",
+ type);
+ creq->stats.creq_func_event_processed++;
+ break;
+ default:
+ if (type != HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT) {
+ dev_warn(&pdev->dev,
+ "QPLIB: op_event = 0x%x not handled\n",
+ type);
+ }
+ break;
+ }
+ budget--;
+ bnxt_qplib_hwq_incr_cons(creq_hwq->max_elements, &creq_hwq->cons,
+ 1, &creq->creq_db.dbinfo.flags);
+ }
+ if (budget == CREQ_ENTRY_POLL_BUDGET &&
+ !CREQ_CMP_VALID(creqe, creq->creq_db.dbinfo.flags)) {
+ /* No completions received during this poll. Enable interrupt now */
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);
+ creq->stats.creq_arm_count++;
+ dev_dbg(&pdev->dev, "QPLIB: Num of Func (0x%llx) \n",
+ creq->stats.creq_func_event_processed);
+ dev_dbg(&pdev->dev, "QPLIB: QP (0x%llx) events processed\n",
+ creq->stats.creq_qp_event_processed);
+ dev_dbg(&pdev->dev, "QPLIB: Armed:%#llx resched:%#llx \n",
+ creq->stats.creq_arm_count,
+ creq->stats.creq_tasklet_schedule_count);
+ } else if (creq->requested) {
+ /*
+ * Currently there is no bottom half implementation to process
+ * completions, all completions are processed in interrupt context
+ * only. So enable interrupts.
+ */
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);
+ creq->stats.creq_tasklet_schedule_count++;
+ }
+ spin_unlock_irqrestore(&creq_hwq->lock, flags);
+ if (num_wakeup)
+ wake_up_all(&rcfw->cmdq.waitq);
+}
+
+static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
+{
+ struct bnxt_qplib_rcfw *rcfw = dev_instance;
+
+ bnxt_qplib_service_creq((unsigned long)rcfw);
+ return IRQ_HANDLED;
+}
+
+/* RCFW */
+int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
+{
+ struct creq_deinitialize_fw_resp resp = {};
+ struct cmdq_deinitialize_fw req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DEINITIALIZE_FW,
+ sizeof(req));
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL,
+ sizeof(req), sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+ clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
+ return 0;
+}
+
+int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int is_virtfn)
+{
+ struct creq_initialize_fw_resp resp = {};
+ struct cmdq_initialize_fw req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_qplib_ctx *hctx;
+ struct bnxt_qplib_res *res;
+ struct bnxt_qplib_hwq *hwq;
+ int rc;
+
+ res = rcfw->res;
+ cctx = res->cctx;
+ hctx = res->hctx;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_INITIALIZE_FW,
+ sizeof(req));
+ /* Supply (log-base-2-of-host-page-size - base-page-shift)
+ * to bono to adjust the doorbell page sizes.
+ */
+ req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
+ RCFW_DBR_BASE_PAGE_SHIFT);
+ /*
+ * VFs need not setup the HW context area, PF
+ * shall setup this area for VF. Skipping the
+ * HW programming
+ */
+ if (is_virtfn || _is_chip_gen_p5_p7(cctx))
+ goto skip_ctx_setup;
+
+ hwq = &hctx->qp_ctx.hwq;
+ req.qpc_page_dir = cpu_to_le64(_get_base_addr(hwq));
+ req.number_of_qp = cpu_to_le32(hwq->max_elements);
+ req.qpc_pg_size_qpc_lvl = (_get_pte_pg_size(hwq) <<
+ CMDQ_INITIALIZE_FW_QPC_PG_SIZE_SFT) |
+ (u8)hwq->level;
+
+ hwq = &hctx->mrw_ctx.hwq;
+ req.mrw_page_dir = cpu_to_le64(_get_base_addr(hwq));
+ req.number_of_mrw = cpu_to_le32(hwq->max_elements);
+ req.mrw_pg_size_mrw_lvl = (_get_pte_pg_size(hwq) <<
+ CMDQ_INITIALIZE_FW_MRW_PG_SIZE_SFT) |
+ (u8)hwq->level;
+
+ hwq = &hctx->srq_ctx.hwq;
+ req.srq_page_dir = cpu_to_le64(_get_base_addr(hwq));
+ req.number_of_srq = cpu_to_le32(hwq->max_elements);
+ req.srq_pg_size_srq_lvl = (_get_pte_pg_size(hwq) <<
+ CMDQ_INITIALIZE_FW_SRQ_PG_SIZE_SFT) |
+ (u8)hwq->level;
+
+ hwq = &hctx->cq_ctx.hwq;
+ req.cq_page_dir = cpu_to_le64(_get_base_addr(hwq));
+ req.number_of_cq = cpu_to_le32(hwq->max_elements);
+ req.cq_pg_size_cq_lvl = (_get_pte_pg_size(hwq) <<
+ CMDQ_INITIALIZE_FW_CQ_PG_SIZE_SFT) |
+ (u8)hwq->level;
+
+ hwq = &hctx->tim_ctx.hwq;
+ req.tim_page_dir = cpu_to_le64(_get_base_addr(hwq));
+ req.tim_pg_size_tim_lvl = (_get_pte_pg_size(hwq) <<
+ CMDQ_INITIALIZE_FW_TIM_PG_SIZE_SFT) |
+ (u8)hwq->level;
+ hwq = &hctx->tqm_ctx.pde;
+ req.tqm_page_dir = cpu_to_le64(_get_base_addr(hwq));
+ req.tqm_pg_size_tqm_lvl = (_get_pte_pg_size(hwq) <<
+ CMDQ_INITIALIZE_FW_TQM_PG_SIZE_SFT) |
+ (u8)hwq->level;
+skip_ctx_setup:
+ if (BNXT_RE_HW_RETX(res->dattr->dev_cap_flags))
+ req.flags |= CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED;
+ req.stat_ctx_id = cpu_to_le32(hctx->stats.fw_id);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL,
+ sizeof(req), sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+ set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->cmdq.flags);
+
+ return 0;
+}
+
+void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_res *res)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+
+ vfree(rcfw->rcfw_lat_slab_msec);
+ rcfw->rcfw_lat_slab_msec = NULL;
+ vfree(rcfw->qp_create_stats);
+ rcfw->qp_create_stats = NULL;
+ vfree(rcfw->qp_destroy_stats);
+ rcfw->qp_destroy_stats = NULL;
+ vfree(rcfw->mr_create_stats);
+ rcfw->mr_create_stats = NULL;
+ vfree(rcfw->mr_destroy_stats);
+ rcfw->mr_destroy_stats = NULL;
+ vfree(rcfw->qp_modify_stats);
+ rcfw->qp_modify_stats = NULL;
+ rcfw->sp_perf_stats_enabled = false;
+
+ kfree(rcfw->crsqe_tbl);
+ rcfw->crsqe_tbl = NULL;
+
+ bnxt_qplib_free_hwq(res, &rcfw->cmdq.hwq);
+ bnxt_qplib_free_hwq(res, &rcfw->creq.hwq);
+ rcfw->pdev = NULL;
+}
+
+int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct bnxt_qplib_sg_info sginfo = {};
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_creq_ctx *creq;
+
+ rcfw->pdev = res->pdev;
+ rcfw->res = res;
+ cmdq = &rcfw->cmdq;
+ creq = &rcfw->creq;
+
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.res = rcfw->res;
+ hwq_attr.depth = BNXT_QPLIB_CREQE_MAX_CNT;
+ hwq_attr.stride = BNXT_QPLIB_CREQE_UNITS;
+ hwq_attr.type = _get_hwq_type(res);
+
+ if (bnxt_qplib_alloc_init_hwq(&creq->hwq, &hwq_attr)) {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: HW channel CREQ allocation failed\n");
+ return -ENOMEM;
+ }
+
+ sginfo.pgsize = BNXT_QPLIB_CMDQE_PAGE_SIZE;
+ hwq_attr.depth = BNXT_QPLIB_CMDQE_MAX_CNT & 0x7FFFFFFF;
+ hwq_attr.stride = BNXT_QPLIB_CMDQE_UNITS;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ if (bnxt_qplib_alloc_init_hwq(&cmdq->hwq, &hwq_attr)) {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: HW channel CMDQ allocation failed\n");
+ goto fail_free_creq_hwq;
+ }
+
+ rcfw->crsqe_tbl = kcalloc(cmdq->hwq.max_elements,
+ sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
+ if (!rcfw->crsqe_tbl) {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: HW channel CRSQ allocation failed\n");
+ goto fail_free_cmdq_hwq;
+ }
+
+ rcfw->max_timeout = res->cctx->hwrm_cmd_max_timeout;
+
+ rcfw->sp_perf_stats_enabled = false;
+ rcfw->rcfw_lat_slab_msec = vzalloc(sizeof(u32) *
+ RCFW_MAX_LATENCY_MSEC_SLAB_INDEX);
+ rcfw->qp_create_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX);
+ rcfw->qp_destroy_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX);
+ rcfw->mr_create_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX);
+ rcfw->mr_destroy_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX);
+ rcfw->qp_modify_stats = vzalloc(sizeof(u64) * RCFW_MAX_STAT_INDEX);
+
+ if (rcfw->rcfw_lat_slab_msec &&
+ rcfw->qp_create_stats &&
+ rcfw->qp_destroy_stats &&
+ rcfw->mr_create_stats &&
+ rcfw->mr_destroy_stats &&
+ rcfw->qp_modify_stats)
+ rcfw->sp_perf_stats_enabled = true;
+
+ return 0;
+fail_free_cmdq_hwq:
+ bnxt_qplib_free_hwq(res, &rcfw->cmdq.hwq);
+fail_free_creq_hwq:
+ bnxt_qplib_free_hwq(res, &rcfw->creq.hwq);
+ return -ENOMEM;
+}
+
+void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
+{
+ struct bnxt_qplib_creq_ctx *creq;
+ struct bnxt_qplib_res *res;
+
+ creq = &rcfw->creq;
+ res = rcfw->res;
+
+ if (!creq->requested)
+ return;
+
+ creq->requested = false;
+ /* Mask h/w interrupts */
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, false);
+ /* Sync with last running IRQ-handler */
+ synchronize_irq(creq->msix_vec);
+ free_irq(creq->msix_vec, rcfw);
+ kfree(creq->irq_name);
+ creq->irq_name = NULL;
+ /* rcfw_intr_enabled should not be greater than 1. Debug
+ * print to check if that is the case
+ */
+ if (atomic_read(&rcfw->rcfw_intr_enabled) > 1) {
+ dev_err(&rcfw->pdev->dev,
+ "%s: rcfw->rcfw_intr_enabled = 0x%x\n", __func__,
+ atomic_read(&rcfw->rcfw_intr_enabled));
+ }
+ atomic_set(&rcfw->rcfw_intr_enabled, 0);
+ rcfw->num_irq_stopped++;
+}
+
+void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
+{
+ struct bnxt_qplib_creq_ctx *creq;
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+
+ creq = &rcfw->creq;
+ cmdq = &rcfw->cmdq;
+ /* Make sure the HW channel is stopped! */
+ bnxt_qplib_rcfw_stop_irq(rcfw, true);
+
+ creq->creq_db.reg.bar_reg = NULL;
+ creq->creq_db.db = NULL;
+
+ if (cmdq->cmdq_mbox.reg.bar_reg) {
+ iounmap(cmdq->cmdq_mbox.reg.bar_reg);
+ cmdq->cmdq_mbox.reg.bar_reg = NULL;
+ cmdq->cmdq_mbox.prod = NULL;
+ cmdq->cmdq_mbox.db = NULL;
+ }
+
+ creq->aeq_handler = NULL;
+ creq->msix_vec = 0;
+}
+
+int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+ bool need_init)
+{
+ struct bnxt_qplib_creq_ctx *creq;
+ struct bnxt_qplib_res *res;
+ int rc;
+
+ creq = &rcfw->creq;
+ res = rcfw->res;
+
+ if (creq->requested)
+ return -EFAULT;
+
+ creq->msix_vec = msix_vector;
+
+ creq->irq_name = kasprintf(GFP_KERNEL, "bnxt_re-creq@pci:%s\n",
+ pci_name(res->pdev));
+ if (!creq->irq_name)
+ return -ENOMEM;
+
+ rc = request_irq(creq->msix_vec, bnxt_qplib_creq_irq, 0,
+ creq->irq_name, rcfw);
+ if (rc) {
+ kfree(creq->irq_name);
+ creq->irq_name = NULL;
+ return rc;
+ }
+ creq->requested = true;
+
+ bnxt_qplib_ring_nq_db(&creq->creq_db.dbinfo, res->cctx, true);
+
+ rcfw->num_irq_started++;
+ /* Debug print to check rcfw interrupt enable/disable is invoked
+ * out of sequence
+ */
+ if (atomic_read(&rcfw->rcfw_intr_enabled) > 0) {
+ dev_err(&rcfw->pdev->dev,
+ "%s: rcfw->rcfw_intr_enabled = 0x%x\n", __func__,
+ atomic_read(&rcfw->rcfw_intr_enabled));
+ }
+ atomic_inc(&rcfw->rcfw_intr_enabled);
+ return 0;
+}
+
+static int bnxt_qplib_map_cmdq_mbox(struct bnxt_qplib_rcfw *rcfw)
+{
+ struct bnxt_qplib_cmdq_mbox *mbox;
+ resource_size_t bar_reg;
+ struct pci_dev *pdev;
+
+ pdev = rcfw->pdev;
+ mbox = &rcfw->cmdq.cmdq_mbox;
+
+ mbox->reg.bar_id = RCFW_COMM_PCI_BAR_REGION;
+ mbox->reg.len = RCFW_COMM_SIZE;
+ mbox->reg.bar_base = pci_resource_start(pdev, mbox->reg.bar_id);
+ if (!mbox->reg.bar_base) {
+ dev_err(&pdev->dev,
+ "QPLIB: CMDQ BAR region %d resc start is 0!\n",
+ mbox->reg.bar_id);
+ return -ENOMEM;
+ }
+
+ bar_reg = mbox->reg.bar_base + RCFW_COMM_BASE_OFFSET;
+ mbox->reg.len = RCFW_COMM_SIZE;
+ mbox->reg.bar_reg = ioremap(bar_reg, mbox->reg.len);
+ if (!mbox->reg.bar_reg) {
+ dev_err(&pdev->dev,
+ "QPLIB: CMDQ BAR region %d mapping failed\n",
+ mbox->reg.bar_id);
+ return -ENOMEM;
+ }
+
+ mbox->prod = (void __iomem *)((char *)mbox->reg.bar_reg +
+ RCFW_PF_VF_COMM_PROD_OFFSET);
+ mbox->db = (void __iomem *)((char *)mbox->reg.bar_reg +
+ RCFW_COMM_TRIG_OFFSET);
+ return 0;
+}
+
+static int bnxt_qplib_map_creq_db(struct bnxt_qplib_rcfw *rcfw, u32 reg_offt)
+{
+ struct bnxt_qplib_creq_db *creq_db;
+ struct bnxt_qplib_reg_desc *dbreg;
+ struct bnxt_qplib_res *res;
+
+ res = rcfw->res;
+ creq_db = &rcfw->creq.creq_db;
+ dbreg = &res->dpi_tbl.ucreg;
+
+ creq_db->reg.bar_id = dbreg->bar_id;
+ creq_db->reg.bar_base = dbreg->bar_base;
+ creq_db->reg.bar_reg = dbreg->bar_reg + reg_offt;
+ creq_db->reg.len = _is_chip_gen_p5_p7(res->cctx) ? sizeof(u64) :
+ sizeof(u32);
+
+ creq_db->dbinfo.db = creq_db->reg.bar_reg;
+ creq_db->dbinfo.hwq = &rcfw->creq.hwq;
+ creq_db->dbinfo.xid = rcfw->creq.ring_id;
+ creq_db->dbinfo.seed = rcfw->creq.ring_id;
+ creq_db->dbinfo.flags = 0;
+ spin_lock_init(&creq_db->dbinfo.lock);
+ creq_db->dbinfo.shadow_key = BNXT_QPLIB_DBR_KEY_INVALID;
+ creq_db->dbinfo.res = rcfw->res;
+
+ return 0;
+}
+
+static void bnxt_qplib_start_rcfw(struct bnxt_qplib_rcfw *rcfw)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_creq_ctx *creq;
+ struct bnxt_qplib_cmdq_mbox *mbox;
+ struct cmdq_init init = {0};
+
+ cmdq = &rcfw->cmdq;
+ creq = &rcfw->creq;
+ mbox = &cmdq->cmdq_mbox;
+
+ init.cmdq_pbl = cpu_to_le64(cmdq->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
+ init.cmdq_size_cmdq_lvl = cpu_to_le16(
+ ((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) &
+ CMDQ_INIT_CMDQ_SIZE_MASK) |
+ ((cmdq->hwq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
+ CMDQ_INIT_CMDQ_LVL_MASK));
+ init.creq_ring_id = cpu_to_le16(creq->ring_id);
+ /* Write to the Bono mailbox register */
+ __iowrite32_copy(mbox->reg.bar_reg, &init, sizeof(init) / 4);
+}
+
+int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
+ int msix_vector,
+ int cp_bar_reg_off,
+ aeq_handler_t aeq_handler)
+{
+ struct bnxt_qplib_cmdq_ctx *cmdq;
+ struct bnxt_qplib_creq_ctx *creq;
+ int rc;
+
+ cmdq = &rcfw->cmdq;
+ creq = &rcfw->creq;
+
+ /* Clear to defaults */
+ cmdq->seq_num = 0;
+ set_bit(FIRMWARE_FIRST_FLAG, &cmdq->flags);
+ init_waitqueue_head(&cmdq->waitq);
+
+ creq->stats.creq_qp_event_processed = 0;
+ creq->stats.creq_func_event_processed = 0;
+ creq->aeq_handler = aeq_handler;
+
+ rc = bnxt_qplib_map_cmdq_mbox(rcfw);
+ if (rc)
+ return rc;
+
+ rc = bnxt_qplib_map_creq_db(rcfw, cp_bar_reg_off);
+ if (rc)
+ return rc;
+
+ rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
+ if (rc) {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: Failed to request IRQ for CREQ rc = 0x%x\n", rc);
+ bnxt_qplib_disable_rcfw_channel(rcfw);
+ return rc;
+ }
+
+ rcfw->curr_shadow_qd = min_not_zero(cmdq_shadow_qd,
+ (unsigned int)RCFW_CMD_NON_BLOCKING_SHADOW_QD);
+ sema_init(&rcfw->rcfw_inflight, rcfw->curr_shadow_qd);
+ dev_dbg(&rcfw->pdev->dev,
+ "Perf Debug: shadow qd %d\n", rcfw->curr_shadow_qd);
+ bnxt_qplib_start_rcfw(rcfw);
+
+ return 0;
+}
diff --git a/sys/dev/bnxt/bnxt_re/qplib_rcfw.h b/sys/dev/bnxt/bnxt_re/qplib_rcfw.h
new file mode 100644
index 000000000000..f117525daacb
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/qplib_rcfw.h
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: RDMA Controller HW interface (header)
+ */
+
+#ifndef __BNXT_QPLIB_RCFW_H__
+#define __BNXT_QPLIB_RCFW_H__
+
+#include <linux/semaphore.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <net/ipv6.h>
+#include <linux/if_ether.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/interrupt.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+
+#include "qplib_tlv.h"
+
+#define RCFW_CMDQ_TRIG_VAL 1
+#define RCFW_COMM_PCI_BAR_REGION 0
+#define RCFW_COMM_CONS_PCI_BAR_REGION 2
+#define RCFW_COMM_BASE_OFFSET 0x600
+#define RCFW_PF_VF_COMM_PROD_OFFSET 0xc
+#define RCFW_COMM_TRIG_OFFSET 0x100
+#define RCFW_COMM_SIZE 0x104
+
+#define RCFW_DBR_PCI_BAR_REGION 2
+#define RCFW_DBR_BASE_PAGE_SHIFT 12
+#define RCFW_MAX_LATENCY_SEC_SLAB_INDEX 128
+#define RCFW_MAX_LATENCY_MSEC_SLAB_INDEX 3000
+#define RCFW_MAX_STAT_INDEX 0xFFFF
+#define RCFW_FW_STALL_MAX_TIMEOUT 40
+
+#define GET_OPCODE_TYPE(x) \
+ ((x) == 0x1 ? "CREATE_QP": \
+ ((x) == 0x2 ? "DESTROY_QP": \
+ ((x) == 0x3 ? "MODIFY_QP": \
+ ((x) == 0x4 ? "QUERY_QP": \
+ ((x) == 0x5 ? "CREATE_SRQ": \
+ ((x) == 0x6 ? "DESTROY_SRQ": \
+ ((x) == 0x8 ? "QUERY_SRQ": \
+ ((x) == 0x9 ? "CREATE_CQ": \
+ ((x) == 0xa ? "DESTROY_CQ": \
+ ((x) == 0xc ? "RESIZE_CQ": \
+ ((x) == 0xd ? "ALLOCATE_MRW": \
+ ((x) == 0xe ? "DEALLOCATE_KEY": \
+ ((x) == 0xf ? "REGISTER_MR": \
+ ((x) == 0x10 ? "DEREGISTER_MR": \
+ ((x) == 0x11 ? "ADD_GID": \
+ ((x) == 0x12 ? "DELETE_GID": \
+ ((x) == 0x17 ? "MODIFY_GID": \
+ ((x) == 0x18 ? "QUERY_GID": \
+ ((x) == 0x13 ? "CREATE_QP1": \
+ ((x) == 0x14 ? "DESTROY_QP1": \
+ ((x) == 0x15 ? "CREATE_AH": \
+ ((x) == 0x16 ? "DESTROY_AH": \
+ ((x) == 0x80 ? "INITIALIZE_FW": \
+ ((x) == 0x81 ? "DEINITIALIZE_FW": \
+ ((x) == 0x82 ? "STOP_FUNC": \
+ ((x) == 0x83 ? "QUERY_FUNC": \
+ ((x) == 0x84 ? "SET_FUNC_RESOURCES": \
+ ((x) == 0x85 ? "READ_CONTEXT": \
+ ((x) == 0x86 ? "VF_BACKCHANNEL_REQUEST": \
+ ((x) == 0x87 ? "READ_VF_MEMORY": \
+ ((x) == 0x88 ? "COMPLETE_VF_REQUEST": \
+ ((x) == 0x89 ? "EXTEND_CONTEXT_ARRRAY": \
+ ((x) == 0x8a ? "MAP_TC_TO_COS": \
+ ((x) == 0x8b ? "QUERY_VERSION": \
+ ((x) == 0x8c ? "MODIFY_ROCE_CC": \
+ ((x) == 0x8d ? "QUERY_ROCE_CC": \
+ ((x) == 0x8e ? "QUERY_ROCE_STATS": \
+ ((x) == 0x8f ? "SET_LINK_AGGR_MODE": \
+ ((x) == 0x90 ? "MODIFY_CQ": \
+ ((x) == 0x91 ? "QUERY_QP_EXTEND": \
+ ((x) == 0x92 ? "QUERY_ROCE_STATS_EXT": \
+ "Unknown OPCODE" \
+ )))))))))))))))))))))))))))))))))))))))))
+
+extern unsigned int cmdq_shadow_qd;
+/* Cmdq contains a fix number of a 16-Byte slots */
+struct bnxt_qplib_cmdqe {
+ u8 data[16];
+};
+#define BNXT_QPLIB_CMDQE_UNITS sizeof(struct bnxt_qplib_cmdqe)
+
+static inline void bnxt_qplib_rcfw_cmd_prep(void *r, u8 opcode, u8 cmd_size)
+{
+ struct cmdq_base *req = r;
+
+ req->opcode = opcode;
+ req->cmd_size = cmd_size;
+}
+
+/* Shadow queue depth for non blocking command */
+#define RCFW_CMD_NON_BLOCKING_SHADOW_QD 64
+#define RCFW_CMD_DEV_ERR_CHECK_TIME_MS 1000 /* 1 Second time out*/
+#define RCFW_ERR_RETRY_COUNT (RCFW_CMD_WAIT_TIME_MS / RCFW_CMD_DEV_ERR_CHECK_TIME_MS)
+
+/* CMDQ elements */
+#define BNXT_QPLIB_CMDQE_MAX_CNT 8192
+#define BNXT_QPLIB_CMDQE_BYTES (BNXT_QPLIB_CMDQE_MAX_CNT * \
+ BNXT_QPLIB_CMDQE_UNITS)
+#define BNXT_QPLIB_CMDQE_NPAGES ((BNXT_QPLIB_CMDQE_BYTES % \
+ PAGE_SIZE) ? \
+ ((BNXT_QPLIB_CMDQE_BYTES / \
+ PAGE_SIZE) + 1) : \
+ (BNXT_QPLIB_CMDQE_BYTES / \
+ PAGE_SIZE))
+#define BNXT_QPLIB_CMDQE_PAGE_SIZE (BNXT_QPLIB_CMDQE_NPAGES * \
+ PAGE_SIZE)
+
+#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT
+#define RCFW_MAX_COOKIE_VALUE (BNXT_QPLIB_CMDQE_MAX_CNT - 1)
+#define RCFW_CMD_IS_BLOCKING 0x8000
+#define RCFW_NO_FW_ACCESS(rcfw) \
+ (test_bit(ERR_DEVICE_DETACHED, &(rcfw)->cmdq.flags) || \
+ pci_channel_offline((rcfw)->pdev))
+
+/* Crsq buf is 1024-Byte */
+struct bnxt_qplib_crsbe {
+ u8 data[1024];
+};
+
+/* Get the number of command units required for the req. The
+ * function returns correct value only if called before
+ * setting using bnxt_qplib_set_cmd_slots
+ */
+static inline u32 bnxt_qplib_get_cmd_slots(struct cmdq_base *req)
+{
+ u32 cmd_units = 0;
+
+ if (HAS_TLV_HEADER(req)) {
+ struct roce_tlv *tlv_req = (struct roce_tlv *)req;
+ cmd_units = tlv_req->total_size;
+ } else {
+ cmd_units = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
+ BNXT_QPLIB_CMDQE_UNITS;
+ }
+ return cmd_units;
+}
+
+/* Set the cmd_size to a factor of CMDQE unit */
+static inline u32 bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
+{
+ u32 cmd_byte = 0;
+
+ if (HAS_TLV_HEADER(req)) {
+ struct roce_tlv *tlv_req = (struct roce_tlv *)req;
+ cmd_byte = tlv_req->total_size * BNXT_QPLIB_CMDQE_UNITS;
+ } else {
+ cmd_byte = req->cmd_size;
+ req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
+ BNXT_QPLIB_CMDQE_UNITS;
+ }
+
+ return cmd_byte;
+}
+
+/* CREQ */
+/* Allocate 1 per QP for async error notification for now */
+#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
+#define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */
+
+#define CREQ_CMP_VALID(hdr, pass) \
+ (!!((hdr)->v & CREQ_BASE_V) == \
+ !(pass & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK))
+
+#define CREQ_ENTRY_POLL_BUDGET 8
+
+typedef int (*aeq_handler_t)(struct bnxt_qplib_rcfw *, void *, void *);
+
+struct bnxt_qplib_crsqe {
+ struct creq_qp_event *resp;
+ u32 req_size;
+ bool is_waiter_alive;
+ bool is_internal_cmd;
+ bool is_in_used;
+
+ /* Free slots at the time of submission */
+ u32 free_slots;
+ unsigned long send_timestamp;
+ u8 opcode;
+ u8 requested_qp_state;
+};
+
+struct bnxt_qplib_rcfw_sbuf {
+ void *sb;
+ dma_addr_t dma_addr;
+ u32 size;
+};
+
+#define BNXT_QPLIB_OOS_COUNT_MASK 0xFFFFFFFF
+
+#define FIRMWARE_INITIALIZED_FLAG (0)
+#define FIRMWARE_FIRST_FLAG (31)
+#define FIRMWARE_STALL_DETECTED (3)
+#define ERR_DEVICE_DETACHED (4)
+struct bnxt_qplib_cmdq_mbox {
+ struct bnxt_qplib_reg_desc reg;
+ void __iomem *prod;
+ void __iomem *db;
+};
+
+struct bnxt_qplib_cmdq_ctx {
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_cmdq_mbox cmdq_mbox;
+ wait_queue_head_t waitq;
+ unsigned long flags;
+ unsigned long last_seen;
+ u32 seq_num;
+};
+
+struct bnxt_qplib_creq_db {
+ struct bnxt_qplib_reg_desc reg;
+ void __iomem *db;
+ struct bnxt_qplib_db_info dbinfo;
+};
+
+struct bnxt_qplib_creq_stat {
+ u64 creq_arm_count;
+ u64 creq_tasklet_schedule_count;
+ u64 creq_qp_event_processed;
+ u64 creq_func_event_processed;
+};
+
+struct bnxt_qplib_creq_ctx {
+ struct bnxt_qplib_hwq hwq;
+ struct bnxt_qplib_creq_db creq_db;
+ struct bnxt_qplib_creq_stat stats;
+ aeq_handler_t aeq_handler;
+ u16 ring_id;
+ int msix_vec;
+ bool requested;
+ char *irq_name;
+};
+
+/* RCFW Communication Channels */
+#define BNXT_QPLIB_RCFW_SEND_RETRY_COUNT 4000
+struct bnxt_qplib_rcfw {
+ struct pci_dev *pdev;
+ struct bnxt_qplib_res *res;
+ struct bnxt_qplib_cmdq_ctx cmdq;
+ struct bnxt_qplib_creq_ctx creq;
+ struct bnxt_qplib_crsqe *crsqe_tbl;
+ u32 rcfw_lat_slab_sec[RCFW_MAX_LATENCY_SEC_SLAB_INDEX];
+
+ /* Slow path Perf Stats */
+ bool sp_perf_stats_enabled;
+ u32 *rcfw_lat_slab_msec;
+ u64 *qp_create_stats;
+ u64 *qp_destroy_stats;
+ u64 *qp_modify_stats;
+ u64 *mr_create_stats;
+ u64 *mr_destroy_stats;
+ u32 qp_create_stats_id;
+ u32 qp_destroy_stats_id;
+ u32 qp_modify_stats_id;
+ u32 mr_create_stats_id;
+ u32 mr_destroy_stats_id;
+ bool init_oos_stats;
+ u64 oos_prev;
+ u32 num_irq_stopped;
+ u32 num_irq_started;
+ u32 poll_in_intr_en;
+ u32 poll_in_intr_dis;
+ atomic_t rcfw_intr_enabled;
+ u32 cmdq_full_dbg;
+ struct semaphore rcfw_inflight;
+ unsigned int curr_shadow_qd;
+ atomic_t timeout_send;
+ /* cached from chip cctx for quick reference in slow path */
+ u16 max_timeout;
+};
+
+struct bnxt_qplib_cmdqmsg {
+ struct cmdq_base *req;
+ struct creq_base *resp;
+ void *sb;
+ u32 req_sz;
+ u32 res_sz;
+ u8 block;
+ u8 qp_state;
+};
+
+static inline void bnxt_qplib_fill_cmdqmsg(struct bnxt_qplib_cmdqmsg *msg,
+ void *req, void *resp, void *sb,
+ u32 req_sz, u32 res_sz, u8 block)
+{
+ msg->req = req;
+ msg->resp = resp;
+ msg->sb = sb;
+ msg->req_sz = req_sz;
+ msg->res_sz = res_sz;
+ msg->block = block;
+}
+
+void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_res *res);
+int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res);
+void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
+void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
+ bool need_init);
+int bnxt_qplib_enable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw,
+ int msix_vector,
+ int cp_bar_reg_off,
+ aeq_handler_t aeq_handler);
+
+struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
+ struct bnxt_qplib_rcfw *rcfw,
+ u32 size);
+void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_rcfw_sbuf *sbuf);
+int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_cmdqmsg *msg);
+
+int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int is_virtfn);
+void bnxt_qplib_mark_qp_error(void *qp_handle);
+int __check_cmdq_stall(struct bnxt_qplib_rcfw *rcfw,
+ u32 *cur_prod, u32 *cur_cons);
+#endif
diff --git a/sys/dev/bnxt/bnxt_re/qplib_res.c b/sys/dev/bnxt/bnxt_re/qplib_res.c
new file mode 100644
index 000000000000..f527af031176
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/qplib_res.c
@@ -0,0 +1,1226 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: QPLib resource manager
+ */
+
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/inetdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+
+#include <net/ipv6.h>
+#include <rdma/ib_verbs.h>
+
+#include "hsi_struct_def.h"
+#include "qplib_res.h"
+#include "qplib_sp.h"
+#include "qplib_rcfw.h"
+#include "bnxt.h"
+#include "bnxt_ulp.h"
+
+uint8_t _get_chip_gen_p5_type(struct bnxt_qplib_chip_ctx *cctx)
+{
+ /* Extend this for granular type */
+ return(BNXT_RE_DEFAULT);
+}
+
+inline bool _is_alloc_mr_unified(struct bnxt_qplib_dev_attr *dattr)
+{
+ return dattr->dev_cap_flags &
+ CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC;
+}
+
+/* PBL */
+static void __free_pbl(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_pbl *pbl, bool is_umem)
+{
+ struct pci_dev *pdev;
+ int i;
+
+ pdev = res->pdev;
+ if (is_umem == false) {
+ for (i = 0; i < pbl->pg_count; i++) {
+ if (pbl->pg_arr[i]) {
+ dma_free_coherent(&pdev->dev, pbl->pg_size,
+ (void *)((u64)pbl->pg_arr[i] &
+ PAGE_MASK),
+ pbl->pg_map_arr[i]);
+ }
+ else
+ dev_warn(&pdev->dev,
+ "QPLIB: PBL free pg_arr[%d] empty?!\n",
+ i);
+ pbl->pg_arr[i] = NULL;
+ }
+ }
+
+ if (pbl->pg_arr) {
+ vfree(pbl->pg_arr);
+ pbl->pg_arr = NULL;
+ }
+ if (pbl->pg_map_arr) {
+ vfree(pbl->pg_map_arr);
+ pbl->pg_map_arr = NULL;
+ }
+ pbl->pg_count = 0;
+ pbl->pg_size = 0;
+}
+
+struct qplib_sg {
+ dma_addr_t pg_map_arr;
+ u32 size;
+};
+
+static int __fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
+ struct bnxt_qplib_sg_info *sginfo)
+{
+ int sg_indx, pg_indx, tmp_size, offset;
+ struct qplib_sg *tmp_sg = NULL;
+ struct scatterlist *sg;
+ u64 pmask, addr;
+
+ tmp_sg = vzalloc(sginfo->nmap * sizeof(struct qplib_sg));
+ if (!tmp_sg)
+ return -ENOMEM;
+
+ pmask = BIT_ULL(sginfo->pgshft) - 1;
+ sg_indx = 0;
+ for_each_sg(sginfo->sghead, sg, sginfo->nmap, sg_indx) {
+ tmp_sg[sg_indx].pg_map_arr = sg_dma_address(sg);
+ tmp_sg[sg_indx].size = sg_dma_len(sg);
+ }
+ pg_indx = 0;
+ for (sg_indx = 0; sg_indx < sginfo->nmap; sg_indx++) {
+ tmp_size = tmp_sg[sg_indx].size;
+ offset = 0;
+ while (tmp_size > 0) {
+ addr = tmp_sg[sg_indx].pg_map_arr + offset;
+ if ((!sg_indx && !pg_indx) || !(addr & pmask)) {
+ pbl->pg_map_arr[pg_indx] = addr &(~pmask);
+ pbl->pg_count++;
+ pg_indx++;
+ }
+ offset += sginfo->pgsize;
+ tmp_size -= sginfo->pgsize;
+ }
+ }
+
+ vfree(tmp_sg);
+ return 0;
+}
+
+static int bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
+ struct bnxt_qplib_sg_info *sginfo)
+{
+ int rc = 0;
+
+ rc = __fill_user_dma_pages(pbl, sginfo);
+
+ return rc;
+}
+
+static int __alloc_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl,
+ struct bnxt_qplib_sg_info *sginfo)
+{
+ struct pci_dev *pdev;
+ bool is_umem = false;
+ int i;
+
+ if (sginfo->nopte)
+ return 0;
+
+ pdev = res->pdev;
+ /* page ptr arrays */
+ pbl->pg_arr = vmalloc(sginfo->npages * sizeof(void *));
+ if (!pbl->pg_arr)
+ return -ENOMEM;
+
+ pbl->pg_map_arr = vmalloc(sginfo->npages * sizeof(dma_addr_t));
+ if (!pbl->pg_map_arr) {
+ vfree(pbl->pg_arr);
+ return -ENOMEM;
+ }
+ pbl->pg_count = 0;
+ pbl->pg_size = sginfo->pgsize;
+ if (!sginfo->sghead) {
+ for (i = 0; i < sginfo->npages; i++) {
+ pbl->pg_arr[i] = dma_zalloc_coherent(&pdev->dev,
+ pbl->pg_size,
+ &pbl->pg_map_arr[i],
+ GFP_KERNEL);
+ if (!pbl->pg_arr[i])
+ goto fail;
+ pbl->pg_count++;
+ }
+ } else {
+ is_umem = true;
+ if (bnxt_qplib_fill_user_dma_pages(pbl, sginfo))
+ goto fail;
+ }
+
+ return 0;
+fail:
+ __free_pbl(res, pbl, is_umem);
+ return -ENOMEM;
+}
+
+/* HWQ */
+void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_hwq *hwq)
+{
+ int i;
+
+ if (!hwq->max_elements)
+ return;
+ if (hwq->level >= PBL_LVL_MAX)
+ return;
+
+ for (i = 0; i < hwq->level + 1; i++) {
+ if (i == hwq->level)
+ __free_pbl(res, &hwq->pbl[i], hwq->is_user);
+ else
+ __free_pbl(res, &hwq->pbl[i], false);
+ }
+
+ hwq->level = PBL_LVL_MAX;
+ hwq->max_elements = 0;
+ hwq->element_size = 0;
+ hwq->prod = hwq->cons = 0;
+ hwq->cp_bit = 0;
+}
+
+/* All HWQs are power of 2 in size */
+int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
+ struct bnxt_qplib_hwq_attr *hwq_attr)
+{
+ u32 npages = 0, depth, stride, aux_pages = 0;
+ dma_addr_t *src_phys_ptr, **dst_virt_ptr;
+ struct bnxt_qplib_sg_info sginfo = {};
+ u32 aux_size = 0, npbl, npde;
+ void *umem;
+ struct bnxt_qplib_res *res;
+ u32 aux_slots, pg_size;
+ struct pci_dev *pdev;
+ int i, rc, lvl;
+
+ res = hwq_attr->res;
+ pdev = res->pdev;
+ umem = hwq_attr->sginfo->sghead;
+ pg_size = hwq_attr->sginfo->pgsize;
+ hwq->level = PBL_LVL_MAX;
+
+ depth = roundup_pow_of_two(hwq_attr->depth);
+ stride = roundup_pow_of_two(hwq_attr->stride);
+ if (hwq_attr->aux_depth) {
+ aux_slots = hwq_attr->aux_depth;
+ aux_size = roundup_pow_of_two(hwq_attr->aux_stride);
+ aux_pages = (aux_slots * aux_size) / pg_size;
+ if ((aux_slots * aux_size) % pg_size)
+ aux_pages++;
+ }
+
+ if (!umem) {
+ hwq->is_user = false;
+ npages = (depth * stride) / pg_size + aux_pages;
+ if ((depth * stride) % pg_size)
+ npages++;
+ if (!npages)
+ return -EINVAL;
+ hwq_attr->sginfo->npages = npages;
+ } else {
+ hwq->is_user = true;
+ npages = hwq_attr->sginfo->npages;
+ npages = (npages * (u64)pg_size) /
+ BIT_ULL(hwq_attr->sginfo->pgshft);
+ if ((hwq_attr->sginfo->npages * (u64)pg_size) %
+ BIT_ULL(hwq_attr->sginfo->pgshft))
+ npages++;
+ }
+ if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
+ /* This request is Level 0, map PTE */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
+ if (rc)
+ goto fail;
+ hwq->level = PBL_LVL_0;
+ goto done;
+ }
+
+ if (npages >= MAX_PBL_LVL_0_PGS) {
+ if (npages > MAX_PBL_LVL_1_PGS) {
+ u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
+ 0 : PTU_PTE_VALID;
+ /* 2 levels of indirection */
+ npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
+ if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
+ npbl++;
+ npde = npbl >> MAX_PDL_LVL_SHIFT;
+ if(npbl % BIT(MAX_PDL_LVL_SHIFT))
+ npde++;
+ /* Alloc PDE pages */
+ sginfo.pgsize = npde * PAGE_SIZE;
+ sginfo.npages = 1;
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
+
+ /* Alloc PBL pages */
+ sginfo.npages = npbl;
+ sginfo.pgsize = PAGE_SIZE;
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1], &sginfo);
+ if (rc)
+ goto fail;
+ /* Fill PDL with PBL page pointers */
+ dst_virt_ptr =
+ (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
+ src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
+ if (hwq_attr->type == HWQ_TYPE_MR) {
+ /* For MR it is expected that we supply only 1 contigous
+ * page i.e only 1 entry in the PDL that will contain
+ * all the PBLs for the user supplied memory region
+ */
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
+ dst_virt_ptr[0][i] = src_phys_ptr[i] |
+ flag;
+ } else {
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
+ dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
+ src_phys_ptr[i] | PTU_PDE_VALID;
+ }
+ /* Alloc or init PTEs */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
+ hwq_attr->sginfo);
+ if (rc)
+ goto fail;
+ hwq->level = PBL_LVL_2;
+ if (hwq_attr->sginfo->nopte)
+ goto done;
+ /* Fill PBLs with PTE pointers */
+ dst_virt_ptr =
+ (dma_addr_t **)hwq->pbl[PBL_LVL_1].pg_arr;
+ src_phys_ptr = hwq->pbl[PBL_LVL_2].pg_map_arr;
+ for (i = 0; i < hwq->pbl[PBL_LVL_2].pg_count; i++) {
+ dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
+ src_phys_ptr[i] | PTU_PTE_VALID;
+ }
+ if (hwq_attr->type == HWQ_TYPE_QUEUE) {
+ /* Find the last pg of the size */
+ i = hwq->pbl[PBL_LVL_2].pg_count;
+ dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
+ PTU_PTE_LAST;
+ if (i > 1)
+ dst_virt_ptr[PTR_PG(i - 2)]
+ [PTR_IDX(i - 2)] |=
+ PTU_PTE_NEXT_TO_LAST;
+ }
+ } else { /* pages < 512 npbl = 1, npde = 0 */
+ u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
+ 0 : PTU_PTE_VALID;
+
+ /* 1 level of indirection */
+ npbl = npages >> MAX_PBL_LVL_1_PGS_SHIFT;
+ if (npages % BIT(MAX_PBL_LVL_1_PGS_SHIFT))
+ npbl++;
+ sginfo.npages = npbl;
+ sginfo.pgsize = PAGE_SIZE;
+ /* Alloc PBL page */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
+ if (rc)
+ goto fail;
+ /* Alloc or init PTEs */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_1],
+ hwq_attr->sginfo);
+ if (rc)
+ goto fail;
+ hwq->level = PBL_LVL_1;
+ if (hwq_attr->sginfo->nopte)
+ goto done;
+ /* Fill PBL with PTE pointers */
+ dst_virt_ptr =
+ (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
+ src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
+ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
+ dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
+ src_phys_ptr[i] | flag;
+ if (hwq_attr->type == HWQ_TYPE_QUEUE) {
+ /* Find the last pg of the size */
+ i = hwq->pbl[PBL_LVL_1].pg_count;
+ dst_virt_ptr[PTR_PG(i - 1)][PTR_IDX(i - 1)] |=
+ PTU_PTE_LAST;
+ if (i > 1)
+ dst_virt_ptr[PTR_PG(i - 2)]
+ [PTR_IDX(i - 2)] |=
+ PTU_PTE_NEXT_TO_LAST;
+ }
+ }
+ }
+done:
+ hwq->prod = 0;
+ hwq->cons = 0;
+ hwq->pdev = pdev;
+ hwq->depth = hwq_attr->depth;
+ hwq->max_elements = depth;
+ hwq->element_size = stride;
+ hwq->qe_ppg = (pg_size/stride);
+
+ if (hwq->level >= PBL_LVL_MAX)
+ goto fail;
+ /* For direct access to the elements */
+ lvl = hwq->level;
+ if (hwq_attr->sginfo->nopte && hwq->level)
+ lvl = hwq->level - 1;
+ hwq->pbl_ptr = hwq->pbl[lvl].pg_arr;
+ hwq->pbl_dma_ptr = hwq->pbl[lvl].pg_map_arr;
+ spin_lock_init(&hwq->lock);
+
+ return 0;
+fail:
+ bnxt_qplib_free_hwq(res, hwq);
+ return -ENOMEM;
+}
+
+/* Context Tables */
+void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res)
+{
+ struct bnxt_qplib_ctx *hctx;
+ int i;
+
+ hctx = res->hctx;
+ bnxt_qplib_free_hwq(res, &hctx->qp_ctx.hwq);
+ bnxt_qplib_free_hwq(res, &hctx->mrw_ctx.hwq);
+ bnxt_qplib_free_hwq(res, &hctx->srq_ctx.hwq);
+ bnxt_qplib_free_hwq(res, &hctx->cq_ctx.hwq);
+ bnxt_qplib_free_hwq(res, &hctx->tim_ctx.hwq);
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ bnxt_qplib_free_hwq(res, &hctx->tqm_ctx.qtbl[i]);
+ /* restor original pde level before destroy */
+ hctx->tqm_ctx.pde.level = hctx->tqm_ctx.pde_level;
+ bnxt_qplib_free_hwq(res, &hctx->tqm_ctx.pde);
+}
+
+static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *hctx)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ struct bnxt_qplib_tqm_ctx *tqmctx;
+ int rc = 0;
+ int i;
+
+ tqmctx = &hctx->tqm_ctx;
+
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.res = res;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ hwq_attr.depth = 512;
+ hwq_attr.stride = sizeof(u64);
+ /* Alloc pdl buffer */
+ rc = bnxt_qplib_alloc_init_hwq(&tqmctx->pde, &hwq_attr);
+ if (rc)
+ goto out;
+ /* Save original pdl level */
+ tqmctx->pde_level = tqmctx->pde.level;
+
+ hwq_attr.stride = 1;
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++) {
+ if (!tqmctx->qcount[i])
+ continue;
+ hwq_attr.depth = hctx->qp_ctx.max * tqmctx->qcount[i];
+ rc = bnxt_qplib_alloc_init_hwq(&tqmctx->qtbl[i], &hwq_attr);
+ if (rc)
+ goto out;
+ }
+out:
+ return rc;
+}
+
+static void bnxt_qplib_map_tqm_pgtbl(struct bnxt_qplib_tqm_ctx *ctx)
+{
+ struct bnxt_qplib_hwq *qtbl_hwq;
+ dma_addr_t *dma_ptr;
+ __le64 **pbl_ptr, *ptr;
+ int i, j, k;
+ int fnz_idx = -1;
+ int pg_count;
+
+ pbl_ptr = (__le64 **)ctx->pde.pbl_ptr;
+
+ for (i = 0, j = 0; i < MAX_TQM_ALLOC_REQ;
+ i++, j += MAX_TQM_ALLOC_BLK_SIZE) {
+ qtbl_hwq = &ctx->qtbl[i];
+ if (!qtbl_hwq->max_elements)
+ continue;
+ if (fnz_idx == -1)
+ fnz_idx = i; /* first non-zero index */
+ switch (qtbl_hwq->level) {
+ case PBL_LVL_2:
+ pg_count = qtbl_hwq->pbl[PBL_LVL_1].pg_count;
+ for (k = 0; k < pg_count; k++) {
+ ptr = &pbl_ptr[PTR_PG(j + k)][PTR_IDX(j + k)];
+ dma_ptr = &qtbl_hwq->pbl[PBL_LVL_1].pg_map_arr[k];
+ *ptr = cpu_to_le64(*dma_ptr | PTU_PTE_VALID);
+ }
+ break;
+ case PBL_LVL_1:
+ case PBL_LVL_0:
+ default:
+ ptr = &pbl_ptr[PTR_PG(j)][PTR_IDX(j)];
+ *ptr = cpu_to_le64(qtbl_hwq->pbl[PBL_LVL_0].pg_map_arr[0] |
+ PTU_PTE_VALID);
+ break;
+ }
+ }
+ if (fnz_idx == -1)
+ fnz_idx = 0;
+ /* update pde level as per page table programming */
+ ctx->pde.level = (ctx->qtbl[fnz_idx].level == PBL_LVL_2) ? PBL_LVL_2 :
+ ctx->qtbl[fnz_idx].level + 1;
+}
+
+static int bnxt_qplib_setup_tqm_rings(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *hctx)
+{
+ int rc = 0;
+
+ rc = bnxt_qplib_alloc_tqm_rings(res, hctx);
+ if (rc)
+ goto fail;
+
+ bnxt_qplib_map_tqm_pgtbl(&hctx->tqm_ctx);
+fail:
+ return rc;
+}
+
+/*
+ * Routine: bnxt_qplib_alloc_hwctx
+ * Description:
+ * Context tables are memories which are used by the chip.
+ * The 6 tables defined are:
+ * QPC ctx - holds QP states
+ * MRW ctx - holds memory region and window
+ * SRQ ctx - holds shared RQ states
+ * CQ ctx - holds completion queue states
+ * TQM ctx - holds Tx Queue Manager context
+ * TIM ctx - holds timer context
+ * Depending on the size of the tbl requested, either a 1 Page Buffer List
+ * or a 1-to-2-stage indirection Page Directory List + 1 PBL is used
+ * instead.
+ * Table might be employed as follows:
+ * For 0 < ctx size <= 1 PAGE, 0 level of ind is used
+ * For 1 PAGE < ctx size <= 512 entries size, 1 level of ind is used
+ * For 512 < ctx size <= MAX, 2 levels of ind is used
+ * Returns:
+ * 0 if success, else -ERRORS
+ */
+int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ struct bnxt_qplib_ctx *hctx;
+ struct bnxt_qplib_hwq *hwq;
+ int rc = 0;
+
+ hctx = res->hctx;
+ /* QPC Tables */
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.pgshft = PAGE_SHIFT;
+ hwq_attr.sginfo = &sginfo;
+
+ hwq_attr.res = res;
+ hwq_attr.depth = hctx->qp_ctx.max;
+ hwq_attr.stride = BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ hwq = &hctx->qp_ctx.hwq;
+ rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr);
+ if (rc)
+ goto fail;
+
+ /* MRW Tables */
+ hwq_attr.depth = hctx->mrw_ctx.max;
+ hwq_attr.stride = BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE;
+ hwq = &hctx->mrw_ctx.hwq;
+ rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr);
+ if (rc)
+ goto fail;
+
+ /* SRQ Tables */
+ hwq_attr.depth = hctx->srq_ctx.max;
+ hwq_attr.stride = BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE;
+ hwq = &hctx->srq_ctx.hwq;
+ rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr);
+ if (rc)
+ goto fail;
+
+ /* CQ Tables */
+ hwq_attr.depth = hctx->cq_ctx.max;
+ hwq_attr.stride = BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE;
+ hwq = &hctx->cq_ctx.hwq;
+ rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr);
+ if (rc)
+ goto fail;
+
+ /* TQM Buffer */
+ rc = bnxt_qplib_setup_tqm_rings(res, hctx);
+ if (rc)
+ goto fail;
+ /* TIM Buffer */
+ hwq_attr.depth = hctx->qp_ctx.max * 16;
+ hwq_attr.stride = 1;
+ hwq = &hctx->tim_ctx.hwq;
+ rc = bnxt_qplib_alloc_init_hwq(hwq, &hwq_attr);
+ if (rc)
+ goto fail;
+
+ return 0;
+fail:
+ bnxt_qplib_free_hwctx(res);
+ return rc;
+}
+
+/* GUID */
+void bnxt_qplib_get_guid(const u8 *dev_addr, u8 *guid)
+{
+ u8 mac[ETH_ALEN];
+
+ /* MAC-48 to EUI-64 mapping */
+ memcpy(mac, dev_addr, ETH_ALEN);
+ guid[0] = mac[0] ^ 2;
+ guid[1] = mac[1];
+ guid[2] = mac[2];
+ guid[3] = 0xff;
+ guid[4] = 0xfe;
+ guid[5] = mac[3];
+ guid[6] = mac[4];
+ guid[7] = mac[5];
+}
+
+static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res)
+{
+ struct bnxt_qplib_sgid_tbl *sgid_tbl;
+
+ sgid_tbl = &res->sgid_tbl;
+
+ if (sgid_tbl->tbl) {
+ kfree(sgid_tbl->tbl);
+ sgid_tbl->tbl = NULL;
+ kfree(sgid_tbl->hw_id);
+ sgid_tbl->hw_id = NULL;
+ kfree(sgid_tbl->ctx);
+ sgid_tbl->ctx = NULL;
+ kfree(sgid_tbl->vlan);
+ sgid_tbl->vlan = NULL;
+ } else {
+ dev_dbg(&res->pdev->dev, "QPLIB: SGID tbl not present");
+ }
+ sgid_tbl->max = 0;
+ sgid_tbl->active = 0;
+}
+
+static void bnxt_qplib_free_reftbls(struct bnxt_qplib_res *res)
+{
+ struct bnxt_qplib_reftbl *tbl;
+
+ tbl = &res->reftbl.srqref;
+ vfree(tbl->rec);
+
+ tbl = &res->reftbl.cqref;
+ vfree(tbl->rec);
+
+ tbl = &res->reftbl.qpref;
+ vfree(tbl->rec);
+}
+
+static int bnxt_qplib_alloc_reftbl(struct bnxt_qplib_reftbl *tbl, u32 max)
+{
+ tbl->max = max;
+ tbl->rec = vzalloc(sizeof(*tbl->rec) * max);
+ if (!tbl->rec)
+ return -ENOMEM;
+ spin_lock_init(&tbl->lock);
+ return 0;
+}
+
+static int bnxt_qplib_alloc_reftbls(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_dev_attr *dattr)
+{
+ u32 max_cq = BNXT_QPLIB_MAX_CQ_COUNT;
+ struct bnxt_qplib_reftbl *tbl;
+ u32 res_cnt;
+ int rc;
+
+ /*
+ * Allocating one extra entry to hold QP1 info.
+ * Store QP1 info at the last entry of the table.
+ * Decrement the tbl->max by one so that modulo
+ * operation to get the qp table index from qp id
+ * returns any value between 0 and max_qp-1
+ */
+ res_cnt = max_t(u32, BNXT_QPLIB_MAX_QPC_COUNT + 1, dattr->max_qp);
+ tbl = &res->reftbl.qpref;
+ rc = bnxt_qplib_alloc_reftbl(tbl, res_cnt);
+ if (rc)
+ goto fail;
+ tbl->max--;
+
+ if (_is_chip_gen_p5_p7(res->cctx))
+ max_cq = BNXT_QPLIB_MAX_CQ_COUNT_P5;
+ res_cnt = max_t(u32, max_cq, dattr->max_cq);
+ tbl = &res->reftbl.cqref;
+ rc = bnxt_qplib_alloc_reftbl(tbl, res_cnt);
+ if (rc)
+ goto fail;
+
+ res_cnt = max_t(u32, BNXT_QPLIB_MAX_SRQC_COUNT, dattr->max_cq);
+ tbl = &res->reftbl.srqref;
+ rc = bnxt_qplib_alloc_reftbl(tbl, BNXT_QPLIB_MAX_SRQC_COUNT);
+ if (rc)
+ goto fail;
+
+ return 0;
+fail:
+ return rc;
+}
+
+static int bnxt_qplib_alloc_sgid_tbl(struct bnxt_qplib_res *res, u16 max)
+{
+ struct bnxt_qplib_sgid_tbl *sgid_tbl;
+
+ sgid_tbl = &res->sgid_tbl;
+
+ sgid_tbl->tbl = kcalloc(max, sizeof(*sgid_tbl->tbl), GFP_KERNEL);
+ if (!sgid_tbl->tbl)
+ return -ENOMEM;
+
+ sgid_tbl->hw_id = kcalloc(max, sizeof(u32), GFP_KERNEL);
+ if (!sgid_tbl->hw_id)
+ goto free_tbl;
+
+ sgid_tbl->ctx = kcalloc(max, sizeof(void *), GFP_KERNEL);
+ if (!sgid_tbl->ctx)
+ goto free_hw_id;
+
+ sgid_tbl->vlan = kcalloc(max, sizeof(u8), GFP_KERNEL);
+ if (!sgid_tbl->vlan)
+ goto free_ctx;
+
+ sgid_tbl->max = max;
+ return 0;
+free_ctx:
+ kfree(sgid_tbl->ctx);
+free_hw_id:
+ kfree(sgid_tbl->hw_id);
+free_tbl:
+ kfree(sgid_tbl->tbl);
+ return -ENOMEM;
+};
+
+static void bnxt_qplib_cleanup_sgid_tbl(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_sgid_tbl *sgid_tbl)
+{
+ int i;
+
+ for (i = 0; i < sgid_tbl->max; i++) {
+ if (memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
+ sizeof(bnxt_qplib_gid_zero)))
+ bnxt_qplib_del_sgid(sgid_tbl, &sgid_tbl->tbl[i].gid,
+ sgid_tbl->tbl[i].vlan_id, true);
+ }
+ memset(sgid_tbl->tbl, 0, sizeof(*sgid_tbl->tbl) * sgid_tbl->max);
+ memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
+ memset(sgid_tbl->vlan, 0, sizeof(u8) * sgid_tbl->max);
+ sgid_tbl->active = 0;
+}
+
+static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct ifnet *netdev)
+{
+ u32 i;
+
+ for (i = 0; i < sgid_tbl->max; i++)
+ sgid_tbl->tbl[i].vlan_id = 0xffff;
+ memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
+}
+
+/* PDs */
+int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res, struct bnxt_qplib_pd *pd)
+{
+ u32 bit_num;
+ int rc = 0;
+ struct bnxt_qplib_pd_tbl *pdt = &res->pd_tbl;
+
+ mutex_lock(&res->pd_tbl_lock);
+ bit_num = find_first_bit(pdt->tbl, pdt->max);
+ if (bit_num == pdt->max - 1) {/* Last bit is reserved */
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ /* Found unused PD */
+ clear_bit(bit_num, pdt->tbl);
+ pd->id = bit_num;
+fail:
+ mutex_unlock(&res->pd_tbl_lock);
+ return rc;
+}
+
+int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_pd_tbl *pdt,
+ struct bnxt_qplib_pd *pd)
+{
+ mutex_lock(&res->pd_tbl_lock);
+ if (test_and_set_bit(pd->id, pdt->tbl)) {
+ dev_warn(&res->pdev->dev, "Freeing an unused PD? pdn = %d\n",
+ pd->id);
+ mutex_unlock(&res->pd_tbl_lock);
+ return -EINVAL;
+ }
+ /* Reset to reserved pdid. */
+ pd->id = pdt->max - 1;
+
+ mutex_unlock(&res->pd_tbl_lock);
+ return 0;
+}
+
+static void bnxt_qplib_free_pd_tbl(struct bnxt_qplib_pd_tbl *pdt)
+{
+ if (pdt->tbl) {
+ kfree(pdt->tbl);
+ pdt->tbl = NULL;
+ }
+ pdt->max = 0;
+}
+
+static int bnxt_qplib_alloc_pd_tbl(struct bnxt_qplib_res *res, u32 max)
+{
+ struct bnxt_qplib_pd_tbl *pdt;
+ u32 bytes;
+
+ pdt = &res->pd_tbl;
+
+ max++; /* One extra for reserved pdid. */
+ bytes = DIV_ROUND_UP(max, 8);
+
+ if (!bytes)
+ bytes = 1;
+ pdt->tbl = kmalloc(bytes, GFP_KERNEL);
+ if (!pdt->tbl) {
+ dev_err(&res->pdev->dev,
+ "QPLIB: PD tbl allocation failed for size = %d\n", bytes);
+ return -ENOMEM;
+ }
+ pdt->max = max;
+ memset((u8 *)pdt->tbl, 0xFF, bytes);
+ mutex_init(&res->pd_tbl_lock);
+
+ return 0;
+}
+
+/* DPIs */
+int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_dpi *dpi,
+ void *app, u8 type)
+{
+ struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
+ struct bnxt_qplib_reg_desc *reg;
+ u32 bit_num;
+ u64 umaddr;
+ int rc = 0;
+
+ reg = &dpit->wcreg;
+ mutex_lock(&res->dpi_tbl_lock);
+ if (type == BNXT_QPLIB_DPI_TYPE_WC && _is_chip_p7(res->cctx) &&
+ !dpit->avail_ppp) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ bit_num = find_first_bit(dpit->tbl, dpit->max);
+ if (bit_num == dpit->max) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+ /* Found unused DPI */
+ clear_bit(bit_num, dpit->tbl);
+ dpit->app_tbl[bit_num] = app;
+ dpi->bit = bit_num;
+ dpi->dpi = bit_num + (reg->offset - dpit->ucreg.offset) / PAGE_SIZE;
+
+ umaddr = reg->bar_base + reg->offset + bit_num * PAGE_SIZE;
+ dpi->umdbr = umaddr;
+ switch (type) {
+ case BNXT_QPLIB_DPI_TYPE_KERNEL:
+ /* privileged dbr was already mapped just initialize it. */
+ dpi->umdbr = dpit->ucreg.bar_base +
+ dpit->ucreg.offset + bit_num * PAGE_SIZE;
+ dpi->dbr = dpit->priv_db;
+ dpi->dpi = dpi->bit;
+ break;
+ case BNXT_QPLIB_DPI_TYPE_WC:
+ dpi->dbr = ioremap_wc(umaddr, PAGE_SIZE);
+ if (_is_chip_p7(res->cctx) && dpi->dbr)
+ dpit->avail_ppp--;
+ break;
+ default:
+ dpi->dbr = ioremap(umaddr, PAGE_SIZE);
+ }
+ if (!dpi->dbr) {
+ dev_err(&res->pdev->dev, "QPLIB: DB remap failed, type = %d\n",
+ type);
+ rc = -ENOMEM;
+ }
+ dpi->type = type;
+fail:
+ mutex_unlock(&res->dpi_tbl_lock);
+ return rc;
+}
+
+int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_dpi *dpi)
+{
+ struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
+ int rc = 0;
+
+ mutex_lock(&res->dpi_tbl_lock);
+ if (dpi->bit >= dpit->max) {
+ dev_warn(&res->pdev->dev,
+ "Invalid DPI? dpi = %d, bit = %d\n",
+ dpi->dpi, dpi->bit);
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ if (dpi->dpi && dpi->type != BNXT_QPLIB_DPI_TYPE_KERNEL) {
+ if (dpi->type == BNXT_QPLIB_DPI_TYPE_WC &&
+ _is_chip_p7(res->cctx) && dpi->dbr)
+ dpit->avail_ppp++;
+ pci_iounmap(res->pdev, dpi->dbr);
+ }
+
+ if (test_and_set_bit(dpi->bit, dpit->tbl)) {
+ dev_warn(&res->pdev->dev,
+ "Freeing an unused DPI? dpi = %d, bit = %d\n",
+ dpi->dpi, dpi->bit);
+ rc = -EINVAL;
+ goto fail;
+ }
+ if (dpit->app_tbl)
+ dpit->app_tbl[dpi->bit] = NULL;
+ memset(dpi, 0, sizeof(*dpi));
+fail:
+ mutex_unlock(&res->dpi_tbl_lock);
+ return rc;
+}
+
+static void bnxt_qplib_free_dpi_tbl(struct bnxt_qplib_dpi_tbl *dpit)
+{
+ kfree(dpit->tbl);
+ kfree(dpit->app_tbl);
+ dpit->tbl = NULL;
+ dpit->app_tbl = NULL;
+ dpit->max = 0;
+}
+
+static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_dev_attr *dev_attr,
+ u8 pppp_factor)
+{
+ struct bnxt_qplib_dpi_tbl *dpit;
+ struct bnxt_qplib_reg_desc *reg;
+ unsigned long bar_len;
+ u32 dbr_offset;
+ u32 bytes;
+
+ dpit = &res->dpi_tbl;
+ reg = &dpit->wcreg;
+
+ if (!_is_chip_gen_p5_p7(res->cctx)) {
+ /* Offest should come from L2 driver */
+ dbr_offset = dev_attr->l2_db_size;
+ dpit->ucreg.offset = dbr_offset;
+ dpit->wcreg.offset = dbr_offset;
+ }
+
+ bar_len = pci_resource_len(res->pdev, reg->bar_id);
+ dpit->max = (bar_len - reg->offset) / PAGE_SIZE;
+ if (dev_attr->max_dpi)
+ dpit->max = min_t(u32, dpit->max, dev_attr->max_dpi);
+
+ dpit->app_tbl = kzalloc(dpit->max * sizeof(void*), GFP_KERNEL);
+ if (!dpit->app_tbl) {
+ dev_err(&res->pdev->dev,
+ "QPLIB: DPI app tbl allocation failed");
+ return -ENOMEM;
+ }
+
+ bytes = dpit->max >> 3;
+ if (!bytes)
+ bytes = 1;
+
+ dpit->tbl = kmalloc(bytes, GFP_KERNEL);
+ if (!dpit->tbl) {
+ kfree(dpit->app_tbl);
+ dev_err(&res->pdev->dev,
+ "QPLIB: DPI tbl allocation failed for size = %d\n",
+ bytes);
+ return -ENOMEM;
+ }
+
+ memset((u8 *)dpit->tbl, 0xFF, bytes);
+ /*
+ * On SR2, 2nd doorbell page of each function
+ * is reserved for L2 PPP. Now that the tbl is
+ * initialized, mark it as unavailable. By default
+ * RoCE can make use of the 512 extended pages for
+ * PPP.
+ */
+ if (_is_chip_p7(res->cctx)) {
+ clear_bit(1, dpit->tbl);
+ if (pppp_factor)
+ dpit->avail_ppp =
+ BNXT_QPLIB_MAX_EXTENDED_PPP_PAGES / pppp_factor;
+ }
+ mutex_init(&res->dpi_tbl_lock);
+ dpit->priv_db = dpit->ucreg.bar_reg + dpit->ucreg.offset;
+
+ return 0;
+}
+
+/* Stats */
+void bnxt_qplib_free_stat_mem(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_stats *stats)
+{
+ struct pci_dev *pdev;
+
+ pdev = res->pdev;
+ if (stats->dma)
+ dma_free_coherent(&pdev->dev, stats->size,
+ stats->dma, stats->dma_map);
+
+ memset(stats, 0, sizeof(*stats));
+ stats->fw_id = -1;
+}
+
+int bnxt_qplib_alloc_stat_mem(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
+ struct bnxt_qplib_stats *stats)
+{
+ cctx->hw_stats_size = 168;
+
+ memset(stats, 0, sizeof(*stats));
+ stats->fw_id = -1;
+ stats->size = cctx->hw_stats_size;
+ stats->dma = dma_alloc_coherent(&pdev->dev, stats->size,
+ &stats->dma_map, GFP_KERNEL);
+ if (!stats->dma) {
+ dev_err(&pdev->dev, "QPLIB: Stats DMA allocation failed");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/* Resource */
+int bnxt_qplib_stop_res(struct bnxt_qplib_res *res)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_stop_func_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_stop_func req = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_STOP_FUNC,
+ sizeof(req));
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ return rc;
+}
+
+void bnxt_qplib_clear_tbls(struct bnxt_qplib_res *res)
+{
+ bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
+}
+
+int bnxt_qplib_init_tbls(struct bnxt_qplib_res *res)
+{
+ bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
+
+ return 0;
+}
+
+void bnxt_qplib_free_tbls(struct bnxt_qplib_res *res)
+{
+ bnxt_qplib_free_sgid_tbl(res);
+ bnxt_qplib_free_pd_tbl(&res->pd_tbl);
+ bnxt_qplib_free_dpi_tbl(&res->dpi_tbl);
+ bnxt_qplib_free_reftbls(res);
+}
+
+int bnxt_qplib_alloc_tbls(struct bnxt_qplib_res *res, u8 pppp_factor)
+{
+ struct bnxt_qplib_dev_attr *dev_attr;
+ int rc = 0;
+
+ dev_attr = res->dattr;
+
+ rc = bnxt_qplib_alloc_reftbls(res, dev_attr);
+ if (rc)
+ goto fail;
+
+ rc = bnxt_qplib_alloc_sgid_tbl(res, dev_attr->max_sgid);
+ if (rc)
+ goto fail;
+
+ rc = bnxt_qplib_alloc_pd_tbl(res, dev_attr->max_pd);
+ if (rc)
+ goto fail;
+
+ rc = bnxt_qplib_alloc_dpi_tbl(res, dev_attr, pppp_factor);
+ if (rc)
+ goto fail;
+
+ return 0;
+fail:
+ bnxt_qplib_free_tbls(res);
+ return rc;
+}
+
+void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res)
+{
+ struct bnxt_qplib_reg_desc *reg;
+
+ reg = &res->dpi_tbl.ucreg;
+ if (reg->bar_reg)
+ pci_iounmap(res->pdev, reg->bar_reg);
+ reg->bar_reg = NULL;
+ reg->bar_base = 0;
+ reg->len = 0;
+ reg->bar_id = 0; /* Zero? or ff */
+}
+
+int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res)
+{
+ struct bnxt_qplib_reg_desc *ucreg;
+ struct bnxt_qplib_reg_desc *wcreg;
+
+ wcreg = &res->dpi_tbl.wcreg;
+ wcreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
+ if (!res || !res->pdev || !wcreg)
+ return -1;
+ wcreg->bar_base = pci_resource_start(res->pdev, wcreg->bar_id);
+ /* No need to set the wcreg->len here */
+
+ ucreg = &res->dpi_tbl.ucreg;
+ ucreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
+ ucreg->bar_base = pci_resource_start(res->pdev, ucreg->bar_id);
+
+ ucreg->offset = 65536;
+
+ ucreg->len = ucreg->offset + PAGE_SIZE;
+
+ if (!ucreg->len || ((ucreg->len & (PAGE_SIZE - 1)) != 0)) {
+ dev_err(&res->pdev->dev, "QPLIB: invalid dbr length %d\n",
+ (int)ucreg->len);
+ return -EINVAL;
+ }
+ ucreg->bar_reg = ioremap(ucreg->bar_base, ucreg->len);
+ if (!ucreg->bar_reg) {
+ dev_err(&res->pdev->dev, "privileged dpi map failed!\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
+ * @dev: the PCI device
+ * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
+ * PCI_EXP_DEVCAP2_ATOMIC_COMP32
+ * PCI_EXP_DEVCAP2_ATOMIC_COMP64
+ * PCI_EXP_DEVCAP2_ATOMIC_COMP128
+ *
+ * Return 0 if all upstream bridges support AtomicOp routing, egress
+ * blocking is disabled on all upstream ports, and the root port supports
+ * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
+ * AtomicOp completion), or negative otherwise.
+ */
+int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
+{
+ struct pci_bus *bus = dev->bus;
+ struct pci_dev *bridge;
+ u32 cap;
+
+ if (!pci_is_pcie(dev))
+ return -EINVAL;
+
+ /*
+ * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
+ * AtomicOp requesters. For now, we only support endpoints as
+ * requesters and root ports as completers. No endpoints as
+ * completers, and no peer-to-peer.
+ */
+
+ switch (pci_pcie_type(dev)) {
+ case PCI_EXP_TYPE_ENDPOINT:
+ case PCI_EXP_TYPE_LEG_END:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ bridge = bus->self;
+
+ pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
+
+ switch (pci_pcie_type(bridge)) {
+ case PCI_EXP_TYPE_DOWNSTREAM:
+ if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
+ return -EINVAL;
+ break;
+
+ /* Ensure root port supports all the sizes we care about */
+ case PCI_EXP_TYPE_ROOT_PORT:
+ if ((cap & cap_mask) != cap_mask)
+ return -EINVAL;
+ break;
+ }
+ return 0;
+}
+
+int bnxt_qplib_enable_atomic_ops_to_root(struct pci_dev *dev)
+{
+ u16 ctl2;
+
+ if(pci_enable_atomic_ops_to_root(dev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
+ pci_enable_atomic_ops_to_root(dev, PCI_EXP_DEVCAP2_ATOMIC_COMP64))
+ return -EOPNOTSUPP;
+
+ pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctl2);
+ return !(ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
+}
diff --git a/sys/dev/bnxt/bnxt_re/qplib_res.h b/sys/dev/bnxt/bnxt_re/qplib_res.h
new file mode 100644
index 000000000000..6468207a49aa
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/qplib_res.h
@@ -0,0 +1,840 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: QPLib resource manager (header)
+ */
+
+#ifndef __BNXT_QPLIB_RES_H__
+#define __BNXT_QPLIB_RES_H__
+
+#include "hsi_struct_def.h"
+
+extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
+
+#define CHIP_NUM_57508 0x1750
+#define CHIP_NUM_57504 0x1751
+#define CHIP_NUM_57502 0x1752
+#define CHIP_NUM_58818 0xd818
+#define CHIP_NUM_57608 0x1760
+
+#define BNXT_QPLIB_MAX_QPC_COUNT (64 * 1024)
+#define BNXT_QPLIB_MAX_SRQC_COUNT (64 * 1024)
+#define BNXT_QPLIB_MAX_CQ_COUNT (64 * 1024)
+#define BNXT_QPLIB_MAX_CQ_COUNT_P5 (128 * 1024)
+
+#define BNXT_QPLIB_DBR_VALID (0x1UL << 26)
+#define BNXT_QPLIB_DBR_EPOCH_SHIFT 24
+#define BNXT_QPLIB_DBR_TOGGLE_SHIFT 25
+
+#define BNXT_QPLIB_DBR_PF_DB_OFFSET 0x10000
+#define BNXT_QPLIB_DBR_VF_DB_OFFSET 0x4000
+
+#define BNXT_QPLIB_DBR_KEY_INVALID -1
+
+/* chip gen type */
+#define BNXT_RE_DEFAULT 0xf
+
+enum bnxt_qplib_wqe_mode {
+ BNXT_QPLIB_WQE_MODE_STATIC = 0x00,
+ BNXT_QPLIB_WQE_MODE_VARIABLE = 0x01,
+ BNXT_QPLIB_WQE_MODE_INVALID = 0x02
+};
+
+#define BNXT_RE_PUSH_MODE_NONE 0
+#define BNXT_RE_PUSH_MODE_WCB 1
+#define BNXT_RE_PUSH_MODE_PPP 2
+#define BNXT_RE_PUSH_ENABLED(mode) ((mode) == BNXT_RE_PUSH_MODE_WCB ||\
+ (mode) == BNXT_RE_PUSH_MODE_PPP)
+#define BNXT_RE_PPP_ENABLED(cctx) ((cctx)->modes.db_push_mode ==\
+ BNXT_RE_PUSH_MODE_PPP)
+#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */
+#define PCI_EXP_DEVCAP2_ATOMIC_COMP32 0x00000080 /* 32b AtomicOp completion */
+#define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* 64b AtomicOp completion */
+#define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */
+#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */
+
+int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
+
+struct bnxt_qplib_drv_modes {
+ u8 wqe_mode;
+ u8 te_bypass;
+ u8 db_push;
+ /* To control advanced cc params display in configfs */
+ u8 cc_pr_mode;
+ /* Other modes to follow here e.g. GSI QP mode */
+ u8 dbr_pacing;
+ u8 dbr_pacing_ext;
+ u8 dbr_drop_recov;
+ u8 dbr_primary_pf;
+ u8 dbr_pacing_v0;
+};
+
+struct bnxt_qplib_chip_ctx {
+ u16 chip_num;
+ u8 chip_rev;
+ u8 chip_metal;
+ u64 hwrm_intf_ver;
+ struct bnxt_qplib_drv_modes modes;
+ u32 dbr_stat_db_fifo;
+ u32 dbr_aeq_arm_reg;
+ u32 dbr_throttling_reg;
+ u16 hw_stats_size;
+ u16 hwrm_cmd_max_timeout;
+};
+
+static inline bool _is_chip_num_p7(u16 chip_num)
+{
+ return (chip_num == CHIP_NUM_58818 ||
+ chip_num == CHIP_NUM_57608);
+}
+
+static inline bool _is_chip_p7(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return _is_chip_num_p7(cctx->chip_num);
+}
+
+/* SR2 is Gen P5 */
+static inline bool _is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return (cctx->chip_num == CHIP_NUM_57508 ||
+ cctx->chip_num == CHIP_NUM_57504 ||
+ cctx->chip_num == CHIP_NUM_57502);
+}
+
+static inline bool _is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return (_is_chip_gen_p5(cctx) || _is_chip_p7(cctx));
+}
+
+static inline bool _is_wqe_mode_variable(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE;
+}
+
+struct bnxt_qplib_db_pacing_data {
+ u32 do_pacing;
+ u32 pacing_th;
+ u32 dev_err_state;
+ u32 alarm_th;
+ u32 grc_reg_offset;
+ u32 fifo_max_depth;
+ u32 fifo_room_mask;
+ u8 fifo_room_shift;
+};
+
+static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return cctx->modes.dbr_pacing;
+}
+
+static inline u8 bnxt_qplib_dbr_pacing_ext_en(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return cctx->modes.dbr_pacing_ext;
+}
+
+static inline u8 bnxt_qplib_dbr_pacing_is_primary_pf(struct bnxt_qplib_chip_ctx *cctx)
+{
+ return cctx->modes.dbr_primary_pf;
+}
+
+static inline void bnxt_qplib_dbr_pacing_set_primary_pf
+ (struct bnxt_qplib_chip_ctx *cctx, u8 val)
+{
+ cctx->modes.dbr_primary_pf = val;
+}
+
+/* Defines for handling the HWRM version check */
+#define HWRM_VERSION_DEV_ATTR_MAX_DPI 0x1000A0000000D
+#define HWRM_VERSION_ROCE_STATS_FN_ID 0x1000A00000045
+
+#define PTR_CNT_PER_PG (PAGE_SIZE / sizeof(void *))
+#define PTR_MAX_IDX_PER_PG (PTR_CNT_PER_PG - 1)
+#define PTR_PG(x) (((x) & ~PTR_MAX_IDX_PER_PG) / PTR_CNT_PER_PG)
+#define PTR_IDX(x) ((x) & PTR_MAX_IDX_PER_PG)
+
+#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
+#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \
+ ((HWQ_CMP(hwq->prod, hwq)\
+ - HWQ_CMP(hwq->cons, hwq))\
+ & (hwq->max_elements - 1)))
+enum bnxt_qplib_hwq_type {
+ HWQ_TYPE_CTX,
+ HWQ_TYPE_QUEUE,
+ HWQ_TYPE_L2_CMPL,
+ HWQ_TYPE_MR
+};
+
+#define MAX_PBL_LVL_0_PGS 1
+#define MAX_PBL_LVL_1_PGS 512
+#define MAX_PBL_LVL_1_PGS_SHIFT 9
+#define MAX_PDL_LVL_SHIFT 9
+
+enum bnxt_qplib_pbl_lvl {
+ PBL_LVL_0,
+ PBL_LVL_1,
+ PBL_LVL_2,
+ PBL_LVL_MAX
+};
+
+#define ROCE_PG_SIZE_4K (4 * 1024)
+#define ROCE_PG_SIZE_8K (8 * 1024)
+#define ROCE_PG_SIZE_64K (64 * 1024)
+#define ROCE_PG_SIZE_2M (2 * 1024 * 1024)
+#define ROCE_PG_SIZE_8M (8 * 1024 * 1024)
+#define ROCE_PG_SIZE_1G (1024 * 1024 * 1024)
+enum bnxt_qplib_hwrm_pg_size {
+ BNXT_QPLIB_HWRM_PG_SIZE_4K = 0,
+ BNXT_QPLIB_HWRM_PG_SIZE_8K = 1,
+ BNXT_QPLIB_HWRM_PG_SIZE_64K = 2,
+ BNXT_QPLIB_HWRM_PG_SIZE_2M = 3,
+ BNXT_QPLIB_HWRM_PG_SIZE_8M = 4,
+ BNXT_QPLIB_HWRM_PG_SIZE_1G = 5,
+};
+
+struct bnxt_qplib_reg_desc {
+ u8 bar_id;
+ resource_size_t bar_base;
+ unsigned long offset;
+ void __iomem *bar_reg;
+ size_t len;
+};
+
+struct bnxt_qplib_pbl {
+ u32 pg_count;
+ u32 pg_size;
+ void **pg_arr;
+ dma_addr_t *pg_map_arr;
+};
+
+struct bnxt_qplib_sg_info {
+ struct scatterlist *sghead;
+ u32 nmap;
+ u32 npages;
+ u32 pgshft;
+ u32 pgsize;
+ bool nopte;
+};
+
+struct bnxt_qplib_hwq_attr {
+ struct bnxt_qplib_res *res;
+ struct bnxt_qplib_sg_info *sginfo;
+ enum bnxt_qplib_hwq_type type;
+ u32 depth;
+ u32 stride;
+ u32 aux_stride;
+ u32 aux_depth;
+};
+
+struct bnxt_qplib_hwq {
+ struct pci_dev *pdev;
+ spinlock_t lock;
+ struct bnxt_qplib_pbl pbl[PBL_LVL_MAX];
+ enum bnxt_qplib_pbl_lvl level; /* 0, 1, or 2 */
+ void **pbl_ptr; /* ptr for easy access
+ to the PBL entries */
+ dma_addr_t *pbl_dma_ptr; /* ptr for easy access
+ to the dma_addr */
+ u32 max_elements;
+ u32 depth; /* original requested depth */
+ u16 element_size; /* Size of each entry */
+ u16 qe_ppg; /* queue entry per page */
+
+ u32 prod; /* raw */
+ u32 cons; /* raw */
+ u8 cp_bit;
+ u8 is_user;
+ u64 *pad_pg;
+ u32 pad_stride;
+ u32 pad_pgofft;
+};
+
+struct bnxt_qplib_db_info {
+ void __iomem *db;
+ void __iomem *priv_db;
+ struct bnxt_qplib_hwq *hwq;
+ struct bnxt_qplib_res *res;
+ u32 xid;
+ u32 max_slot;
+ u32 flags;
+ u8 toggle;
+ spinlock_t lock;
+ u64 shadow_key;
+ u64 shadow_key_arm_ena;
+ u32 seed; /* For DB pacing */
+};
+
+enum bnxt_qplib_db_info_flags_mask {
+ BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT = 0x0UL,
+ BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT = 0x1UL,
+ BNXT_QPLIB_FLAG_EPOCH_CONS_MASK = 0x1UL,
+ BNXT_QPLIB_FLAG_EPOCH_PROD_MASK = 0x2UL,
+};
+
+enum bnxt_qplib_db_epoch_flag_shift {
+ BNXT_QPLIB_DB_EPOCH_CONS_SHIFT = BNXT_QPLIB_DBR_EPOCH_SHIFT,
+ BNXT_QPLIB_DB_EPOCH_PROD_SHIFT = (BNXT_QPLIB_DBR_EPOCH_SHIFT - 1)
+};
+
+/* Tables */
+struct bnxt_qplib_pd_tbl {
+ unsigned long *tbl;
+ u32 max;
+};
+
+struct bnxt_qplib_sgid_tbl {
+ struct bnxt_qplib_gid_info *tbl;
+ u16 *hw_id;
+ u16 max;
+ u16 active;
+ void *ctx;
+ bool *vlan;
+};
+
+enum {
+ BNXT_QPLIB_DPI_TYPE_KERNEL = 0,
+ BNXT_QPLIB_DPI_TYPE_UC = 1,
+ BNXT_QPLIB_DPI_TYPE_WC = 2
+};
+
+struct bnxt_qplib_dpi {
+ u32 dpi;
+ u32 bit;
+ void __iomem *dbr;
+ u64 umdbr;
+ u8 type;
+};
+
+#define BNXT_QPLIB_MAX_EXTENDED_PPP_PAGES 512
+struct bnxt_qplib_dpi_tbl {
+ void **app_tbl;
+ unsigned long *tbl;
+ u16 max;
+ u16 avail_ppp;
+ struct bnxt_qplib_reg_desc ucreg; /* Hold entire DB bar. */
+ struct bnxt_qplib_reg_desc wcreg;
+ void __iomem *priv_db;
+};
+
+struct bnxt_qplib_stats {
+ dma_addr_t dma_map;
+ void *dma;
+ u32 size;
+ u32 fw_id;
+};
+
+struct bnxt_qplib_vf_res {
+ u32 max_qp;
+ u32 max_mrw;
+ u32 max_srq;
+ u32 max_cq;
+ u32 max_gid;
+};
+
+#define BNXT_QPLIB_MAX_QP_CTX_ENTRY_SIZE 448
+#define BNXT_QPLIB_MAX_SRQ_CTX_ENTRY_SIZE 64
+#define BNXT_QPLIB_MAX_CQ_CTX_ENTRY_SIZE 64
+#define BNXT_QPLIB_MAX_MRW_CTX_ENTRY_SIZE 128
+
+#define MAX_TQM_ALLOC_REQ 48
+#define MAX_TQM_ALLOC_BLK_SIZE 8
+struct bnxt_qplib_tqm_ctx {
+ struct bnxt_qplib_hwq pde;
+ enum bnxt_qplib_pbl_lvl pde_level; /* Original level */
+ struct bnxt_qplib_hwq qtbl[MAX_TQM_ALLOC_REQ];
+ u8 qcount[MAX_TQM_ALLOC_REQ];
+};
+
+struct bnxt_qplib_hctx {
+ struct bnxt_qplib_hwq hwq;
+ u32 max;
+};
+
+struct bnxt_qplib_refrec {
+ void *handle;
+ u32 xid;
+};
+
+struct bnxt_qplib_reftbl {
+ struct bnxt_qplib_refrec *rec;
+ u32 max;
+ spinlock_t lock; /* reftbl lock */
+};
+
+struct bnxt_qplib_reftbls {
+ struct bnxt_qplib_reftbl qpref;
+ struct bnxt_qplib_reftbl cqref;
+ struct bnxt_qplib_reftbl srqref;
+};
+
+#define GET_TBL_INDEX(id, tbl) ((id) % (((tbl)->max) - 1))
+static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_reftbl *tbl)
+{
+ return (qid == 1) ? tbl->max : GET_TBL_INDEX(qid, tbl);
+}
+
+/*
+ * This structure includes the number of various roce resource table sizes
+ * actually allocated by the driver. May be less than the maximums the firmware
+ * allows if the driver imposes lower limits than the firmware.
+ */
+struct bnxt_qplib_ctx {
+ struct bnxt_qplib_hctx qp_ctx;
+ struct bnxt_qplib_hctx mrw_ctx;
+ struct bnxt_qplib_hctx srq_ctx;
+ struct bnxt_qplib_hctx cq_ctx;
+ struct bnxt_qplib_hctx tim_ctx;
+ struct bnxt_qplib_tqm_ctx tqm_ctx;
+
+ struct bnxt_qplib_stats stats;
+ struct bnxt_qplib_stats stats2;
+ struct bnxt_qplib_vf_res vf_res;
+};
+
+struct bnxt_qplib_res {
+ struct pci_dev *pdev;
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_qplib_dev_attr *dattr;
+ struct bnxt_qplib_ctx *hctx;
+ struct ifnet *netdev;
+ struct bnxt_en_dev *en_dev;
+
+ struct bnxt_qplib_rcfw *rcfw;
+
+ struct bnxt_qplib_pd_tbl pd_tbl;
+ struct mutex pd_tbl_lock;
+ struct bnxt_qplib_sgid_tbl sgid_tbl;
+ struct bnxt_qplib_dpi_tbl dpi_tbl;
+ struct mutex dpi_tbl_lock;
+ struct bnxt_qplib_reftbls reftbl;
+ bool prio;
+ bool is_vf;
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+};
+
+struct bnxt_qplib_query_stats_info {
+ u32 function_id;
+ u8 collection_id;
+ bool vf_valid;
+};
+
+struct bnxt_qplib_query_qp_info {
+ u32 function_id;
+ u32 num_qps;
+ u32 start_index;
+ bool vf_valid;
+};
+
+struct bnxt_qplib_query_fn_info {
+ bool vf_valid;
+ u32 host;
+ u32 filter;
+};
+
+
+#define to_bnxt_qplib(ptr, type, member) \
+ container_of(ptr, type, member)
+
+struct bnxt_qplib_pd;
+struct bnxt_qplib_dev_attr;
+
+bool _is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx);
+bool _is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx *cctx);
+bool _is_chip_a0(struct bnxt_qplib_chip_ctx *cctx);
+bool _is_chip_p7(struct bnxt_qplib_chip_ctx *cctx);
+bool _is_alloc_mr_unified(struct bnxt_qplib_dev_attr *dattr);
+void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_hwq *hwq);
+int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
+ struct bnxt_qplib_hwq_attr *hwq_attr);
+void bnxt_qplib_get_guid(const u8 *dev_addr, u8 *guid);
+int bnxt_qplib_alloc_pd(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_pd *pd);
+int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_pd_tbl *pd_tbl,
+ struct bnxt_qplib_pd *pd);
+int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_dpi *dpi,
+ void *app, u8 type);
+int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_dpi *dpi);
+int bnxt_qplib_stop_res(struct bnxt_qplib_res *res);
+void bnxt_qplib_clear_tbls(struct bnxt_qplib_res *res);
+int bnxt_qplib_init_tbls(struct bnxt_qplib_res *res);
+void bnxt_qplib_free_tbls(struct bnxt_qplib_res *res);
+int bnxt_qplib_alloc_tbls(struct bnxt_qplib_res *res, u8 pppp_factor);
+void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res);
+int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res);
+int bnxt_qplib_alloc_stat_mem(struct pci_dev *pdev,
+ struct bnxt_qplib_chip_ctx *cctx,
+ struct bnxt_qplib_stats *stats);
+void bnxt_qplib_free_stat_mem(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_stats *stats);
+
+int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res);
+void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res);
+int bnxt_qplib_enable_atomic_ops_to_root(struct pci_dev *dev);
+u8 _get_chip_gen_p5_type(struct bnxt_qplib_chip_ctx *cctx);
+
+static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq,
+ u32 indx, u64 *pg)
+{
+ u32 pg_num, pg_idx;
+
+ pg_num = (indx / hwq->qe_ppg);
+ pg_idx = (indx % hwq->qe_ppg);
+ if (pg)
+ *pg = (u64)&hwq->pbl_ptr[pg_num];
+ return (void *)((u8 *)hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx);
+}
+
+static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info *dbinfo,
+ struct bnxt_qplib_hwq *hwq, u32 cnt)
+{
+ /* move prod and update toggle/epoch if wrap around */
+ hwq->prod += cnt;
+ if (hwq->prod >= hwq->depth) {
+ hwq->prod %= hwq->depth;
+ dbinfo->flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_PROD_SHIFT;
+ }
+}
+
+static inline void bnxt_qplib_hwq_incr_cons(u32 max_elements, u32 *cons,
+ u32 cnt, u32 *dbinfo_flags)
+{
+ /* move cons and update toggle/epoch if wrap around */
+ *cons += cnt;
+ if (*cons >= max_elements) {
+ *cons %= max_elements;
+ *dbinfo_flags ^= 1UL << BNXT_QPLIB_FLAG_EPOCH_CONS_SHIFT;
+ }
+}
+
+static inline u8 _get_pte_pg_size(struct bnxt_qplib_hwq *hwq)
+{
+ u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
+ struct bnxt_qplib_pbl *pbl;
+
+ pbl = &hwq->pbl[hwq->level];
+ switch (pbl->pg_size) {
+ case ROCE_PG_SIZE_4K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
+ break;
+ case ROCE_PG_SIZE_8K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K;
+ break;
+ case ROCE_PG_SIZE_64K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K;
+ break;
+ case ROCE_PG_SIZE_2M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M;
+ break;
+ case ROCE_PG_SIZE_8M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M;
+ break;
+ case ROCE_PG_SIZE_1G: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G;
+ break;
+ default:
+ break;
+ }
+ return pg_size;
+}
+
+static inline u64 _get_base_addr(struct bnxt_qplib_hwq *hwq)
+{
+ return hwq->pbl[PBL_LVL_0].pg_map_arr[0];
+}
+
+static inline u8 _get_base_pg_size(struct bnxt_qplib_hwq *hwq)
+{
+ u8 pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
+ struct bnxt_qplib_pbl *pbl;
+
+ pbl = &hwq->pbl[PBL_LVL_0];
+ switch (pbl->pg_size) {
+ case ROCE_PG_SIZE_4K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_4K;
+ break;
+ case ROCE_PG_SIZE_8K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8K;
+ break;
+ case ROCE_PG_SIZE_64K: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_64K;
+ break;
+ case ROCE_PG_SIZE_2M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_2M;
+ break;
+ case ROCE_PG_SIZE_8M: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_8M;
+ break;
+ case ROCE_PG_SIZE_1G: pg_size = BNXT_QPLIB_HWRM_PG_SIZE_1G;
+ break;
+ default:
+ break;
+ }
+ return pg_size;
+}
+
+static inline enum bnxt_qplib_hwq_type _get_hwq_type(struct bnxt_qplib_res *res)
+{
+ return _is_chip_gen_p5_p7(res->cctx) ? HWQ_TYPE_QUEUE : HWQ_TYPE_L2_CMPL;
+}
+
+static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
+{
+ return dev_cap_flags &
+ CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
+}
+
+static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx,
+ u16 flags, bool virtfn)
+{
+ return (_is_ext_stats_supported(flags) &&
+ ((virtfn && _is_chip_p7(ctx)) || (!virtfn)));
+}
+
+static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
+{
+ return dev_cap_flags &
+ (CREQ_QUERY_FUNC_RESP_SB_HW_REQUESTER_RETX_ENABLED |
+ CREQ_QUERY_FUNC_RESP_SB_HW_RESPONDER_RETX_ENABLED);
+}
+
+/* Disable HW_RETX */
+#define BNXT_RE_HW_RETX(a) _is_hw_retx_supported((a))
+
+static inline bool _is_cqe_v2_supported(u16 dev_cap_flags)
+{
+ return dev_cap_flags &
+ CREQ_QUERY_FUNC_RESP_SB_CQE_V2;
+}
+
+#define BNXT_DB_FIFO_ROOM_MASK 0x1fff8000
+#define BNXT_DB_FIFO_ROOM_SHIFT 15
+#define BNXT_MAX_FIFO_DEPTH 0x2c00
+
+#define BNXT_DB_PACING_ALGO_THRESHOLD 250
+#define BNXT_DEFAULT_PACING_PROBABILITY 0xFFFF
+
+#define BNXT_DBR_PACING_WIN_BASE 0x2000
+#define BNXT_DBR_PACING_WIN_MAP_OFF 4
+#define BNXT_DBR_PACING_WIN_OFF(reg) (BNXT_DBR_PACING_WIN_BASE + \
+
+static inline void bnxt_qplib_ring_db32(struct bnxt_qplib_db_info *info,
+ bool arm)
+{
+ u32 key = 0;
+
+ key = info->hwq->cons | (CMPL_DOORBELL_IDX_VALID |
+ (CMPL_DOORBELL_KEY_CMPL & CMPL_DOORBELL_KEY_MASK));
+ if (!arm)
+ key |= CMPL_DOORBELL_MASK;
+ /* memory barrier */
+ wmb();
+ writel(key, info->db);
+}
+
+#define BNXT_QPLIB_INIT_DBHDR(xid, type, indx, toggle) \
+ (((u64)(((xid) & DBC_DBC_XID_MASK) | DBC_DBC_PATH_ROCE | \
+ (type) | BNXT_QPLIB_DBR_VALID) << 32) | (indx) | \
+ ((toggle) << (BNXT_QPLIB_DBR_TOGGLE_SHIFT)))
+
+static inline void bnxt_qplib_write_db(struct bnxt_qplib_db_info *info,
+ u64 key, void __iomem *db,
+ u64 *shadow_key)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+ *shadow_key = key;
+ writeq(key, db);
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+static inline void __replay_writeq(u64 key, void __iomem *db)
+{
+ /* No need to replay uninitialised shadow_keys */
+ if (key != BNXT_QPLIB_DBR_KEY_INVALID)
+ writeq(key, db);
+}
+
+static inline void bnxt_qplib_replay_db(struct bnxt_qplib_db_info *info,
+ bool is_arm_ena)
+
+{
+ if (!spin_trylock_irq(&info->lock))
+ return;
+
+ if (is_arm_ena)
+ __replay_writeq(info->shadow_key_arm_ena, info->priv_db);
+ else
+ __replay_writeq(info->shadow_key, info->db);
+
+ spin_unlock_irq(&info->lock);
+}
+
+static inline void bnxt_qplib_ring_db(struct bnxt_qplib_db_info *info,
+ u32 type)
+{
+ u64 key = 0;
+ u32 indx;
+ u8 toggle = 0;
+
+ if (type == DBC_DBC_TYPE_CQ_ARMALL ||
+ type == DBC_DBC_TYPE_CQ_ARMSE)
+ toggle = info->toggle;
+
+ indx = ((info->hwq->cons & DBC_DBC_INDEX_MASK) |
+ ((info->flags & BNXT_QPLIB_FLAG_EPOCH_CONS_MASK) <<
+ BNXT_QPLIB_DB_EPOCH_CONS_SHIFT));
+
+ key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, toggle);
+ bnxt_qplib_write_db(info, key, info->db, &info->shadow_key);
+}
+
+static inline void bnxt_qplib_ring_prod_db(struct bnxt_qplib_db_info *info,
+ u32 type)
+{
+ u64 key = 0;
+ u32 indx;
+
+ indx = (((info->hwq->prod / info->max_slot) & DBC_DBC_INDEX_MASK) |
+ ((info->flags & BNXT_QPLIB_FLAG_EPOCH_PROD_MASK) <<
+ BNXT_QPLIB_DB_EPOCH_PROD_SHIFT));
+ key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, indx, 0);
+ bnxt_qplib_write_db(info, key, info->db, &info->shadow_key);
+}
+
+static inline void bnxt_qplib_armen_db(struct bnxt_qplib_db_info *info,
+ u32 type)
+{
+ u64 key = 0;
+ u8 toggle = 0;
+
+ if (type == DBC_DBC_TYPE_CQ_ARMENA)
+ toggle = info->toggle;
+ /* Index always at 0 */
+ key = BNXT_QPLIB_INIT_DBHDR(info->xid, type, 0, toggle);
+ bnxt_qplib_write_db(info, key, info->priv_db,
+ &info->shadow_key_arm_ena);
+}
+
+static inline void bnxt_qplib_cq_coffack_db(struct bnxt_qplib_db_info *info)
+{
+ u64 key = 0;
+
+ /* Index always at 0 */
+ key = BNXT_QPLIB_INIT_DBHDR(info->xid, DBC_DBC_TYPE_CQ_CUTOFF_ACK, 0, 0);
+ bnxt_qplib_write_db(info, key, info->priv_db, &info->shadow_key);
+}
+
+static inline void bnxt_qplib_srq_arm_db(struct bnxt_qplib_db_info *info)
+{
+ u64 key = 0;
+
+ /* Index always at 0 */
+ key = BNXT_QPLIB_INIT_DBHDR(info->xid, DBC_DBC_TYPE_SRQ_ARM, 0, 0);
+ bnxt_qplib_write_db(info, key, info->priv_db, &info->shadow_key);
+}
+
+static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info,
+ struct bnxt_qplib_chip_ctx *cctx,
+ bool arm)
+{
+ u32 type;
+
+ type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
+ if (_is_chip_gen_p5_p7(cctx))
+ bnxt_qplib_ring_db(info, type);
+ else
+ bnxt_qplib_ring_db32(info, arm);
+}
+
+struct bnxt_qplib_max_res {
+ u32 max_qp;
+ u32 max_mr;
+ u32 max_cq;
+ u32 max_srq;
+ u32 max_ah;
+ u32 max_pd;
+};
+
+/*
+ * Defines for maximum resources supported for chip revisions
+ * Maximum PDs supported are restricted to Max QPs
+ * GENP4 - Wh+
+ * DEFAULT - Thor
+ */
+#define BNXT_QPLIB_GENP4_PF_MAX_QP (16 * 1024)
+#define BNXT_QPLIB_GENP4_PF_MAX_MRW (16 * 1024)
+#define BNXT_QPLIB_GENP4_PF_MAX_CQ (16 * 1024)
+#define BNXT_QPLIB_GENP4_PF_MAX_SRQ (1 * 1024)
+#define BNXT_QPLIB_GENP4_PF_MAX_AH (16 * 1024)
+#define BNXT_QPLIB_GENP4_PF_MAX_PD BNXT_QPLIB_GENP4_PF_MAX_QP
+
+#define BNXT_QPLIB_DEFAULT_PF_MAX_QP (64 * 1024)
+#define BNXT_QPLIB_DEFAULT_PF_MAX_MRW (256 * 1024)
+#define BNXT_QPLIB_DEFAULT_PF_MAX_CQ (64 * 1024)
+#define BNXT_QPLIB_DEFAULT_PF_MAX_SRQ (4 * 1024)
+#define BNXT_QPLIB_DEFAULT_PF_MAX_AH (64 * 1024)
+#define BNXT_QPLIB_DEFAULT_PF_MAX_PD BNXT_QPLIB_DEFAULT_PF_MAX_QP
+
+#define BNXT_QPLIB_DEFAULT_VF_MAX_QP (6 * 1024)
+#define BNXT_QPLIB_DEFAULT_VF_MAX_MRW (6 * 1024)
+#define BNXT_QPLIB_DEFAULT_VF_MAX_CQ (6 * 1024)
+#define BNXT_QPLIB_DEFAULT_VF_MAX_SRQ (4 * 1024)
+#define BNXT_QPLIB_DEFAULT_VF_MAX_AH (6 * 1024)
+#define BNXT_QPLIB_DEFAULT_VF_MAX_PD BNXT_QPLIB_DEFAULT_VF_MAX_QP
+
+static inline void bnxt_qplib_max_res_supported(struct bnxt_qplib_chip_ctx *cctx,
+ struct bnxt_qplib_res *qpl_res,
+ struct bnxt_qplib_max_res *max_res,
+ bool vf_res_limit)
+{
+ switch (cctx->chip_num) {
+ case CHIP_NUM_57608:
+ case CHIP_NUM_58818:
+ case CHIP_NUM_57504:
+ case CHIP_NUM_57502:
+ case CHIP_NUM_57508:
+ if (!qpl_res->is_vf) {
+ max_res->max_qp = BNXT_QPLIB_DEFAULT_PF_MAX_QP;
+ max_res->max_mr = BNXT_QPLIB_DEFAULT_PF_MAX_MRW;
+ max_res->max_cq = BNXT_QPLIB_DEFAULT_PF_MAX_CQ;
+ max_res->max_srq = BNXT_QPLIB_DEFAULT_PF_MAX_SRQ;
+ max_res->max_ah = BNXT_QPLIB_DEFAULT_PF_MAX_AH;
+ max_res->max_pd = BNXT_QPLIB_DEFAULT_PF_MAX_PD;
+ } else {
+ max_res->max_qp = BNXT_QPLIB_DEFAULT_VF_MAX_QP;
+ max_res->max_mr = BNXT_QPLIB_DEFAULT_VF_MAX_MRW;
+ max_res->max_cq = BNXT_QPLIB_DEFAULT_VF_MAX_CQ;
+ max_res->max_srq = BNXT_QPLIB_DEFAULT_VF_MAX_SRQ;
+ max_res->max_ah = BNXT_QPLIB_DEFAULT_VF_MAX_AH;
+ max_res->max_pd = BNXT_QPLIB_DEFAULT_VF_MAX_PD;
+ }
+ break;
+ default:
+ /* Wh+/Stratus max resources */
+ max_res->max_qp = BNXT_QPLIB_GENP4_PF_MAX_QP;
+ max_res->max_mr = BNXT_QPLIB_GENP4_PF_MAX_MRW;
+ max_res->max_cq = BNXT_QPLIB_GENP4_PF_MAX_CQ;
+ max_res->max_srq = BNXT_QPLIB_GENP4_PF_MAX_SRQ;
+ max_res->max_ah = BNXT_QPLIB_GENP4_PF_MAX_AH;
+ max_res->max_pd = BNXT_QPLIB_GENP4_PF_MAX_PD;
+ break;
+ }
+}
+#endif
diff --git a/sys/dev/bnxt/bnxt_re/qplib_sp.c b/sys/dev/bnxt/bnxt_re/qplib_sp.c
new file mode 100644
index 000000000000..c414718a816f
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/qplib_sp.c
@@ -0,0 +1,1234 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Slow Path Operators
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/if_ether.h>
+#include <linux/printk.h>
+
+#include "hsi_struct_def.h"
+#include "qplib_tlv.h"
+#include "qplib_res.h"
+#include "qplib_rcfw.h"
+#include "qplib_sp.h"
+
+const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0 }};
+
+/* Device */
+static u8 bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
+{
+ u16 pcie_ctl2 = 0;
+
+ if (!_is_chip_gen_p5_p7(rcfw->res->cctx))
+ return false;
+ pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, &pcie_ctl2);
+ return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
+}
+
+static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, char *fw_ver)
+{
+ struct creq_query_version_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_query_version req = {};
+ int rc = 0;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_VERSION,
+ sizeof(req));
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc) {
+ dev_err(&rcfw->pdev->dev, "QPLIB: Failed to query version\n");
+ return;
+ }
+ fw_ver[0] = resp.fw_maj;
+ fw_ver[1] = resp.fw_minor;
+ fw_ver[2] = resp.fw_bld;
+ fw_ver[3] = resp.fw_rsvd;
+}
+
+int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw)
+{
+ struct creq_query_func_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct creq_query_func_resp_sb *sb;
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ struct bnxt_qplib_dev_attr *attr;
+ struct bnxt_qplib_chip_ctx *cctx;
+ struct cmdq_query_func req = {};
+ u8 *tqm_alloc;
+ int i, rc = 0;
+ u32 temp;
+ u8 chip_gen = BNXT_RE_DEFAULT;
+
+ cctx = rcfw->res->cctx;
+ attr = rcfw->res->dattr;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_FUNC,
+ sizeof(req));
+
+ sbuf.size = sizeof(*sb);
+ sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+
+ sb = sbuf.sb;
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto bail;
+ /* Extract the context from the side buffer */
+ chip_gen = _get_chip_gen_p5_type(cctx);
+ attr->max_qp = le32_to_cpu(sb->max_qp);
+ attr->max_qp = min_t(u32, attr->max_qp, BNXT_RE_MAX_QP_SUPPORTED(chip_gen));
+ /* max_qp value reported by FW does not include the QP1 */
+ attr->max_qp += 1;
+ attr->max_qp_rd_atom =
+ sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
+ BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom;
+ attr->max_qp_init_rd_atom =
+ sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
+ BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
+ /* Report 1 less than the max_qp_wqes reported by FW as driver adds
+ * one extra entry while creating the qp
+ */
+ attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
+ /* Adjust for max_qp_wqes for variable wqe */
+ if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) {
+ attr->max_qp_wqes = (BNXT_MAX_SQ_SIZE) /
+ (BNXT_MAX_VAR_WQE_SIZE / BNXT_SGE_SIZE) - 1;
+ }
+ if (!_is_chip_gen_p5_p7(cctx)) {
+ /*
+ * 128 WQEs needs to be reserved for the HW (8916). Prevent
+ * reporting the max number for gen-p4 only.
+ */
+ attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS;
+ }
+ attr->max_qp_sges = sb->max_sge;
+ if (_is_chip_gen_p5_p7(cctx) &&
+ cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
+ attr->max_qp_sges = sb->max_sge_var_wqe;
+ attr->max_cq = le32_to_cpu(sb->max_cq);
+ attr->max_cq = min_t(u32, attr->max_cq, BNXT_RE_MAX_CQ_SUPPORTED(chip_gen));
+
+ attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
+ attr->max_cq_wqes = min_t(u32, BNXT_QPLIB_MAX_CQ_WQES, attr->max_cq_wqes);
+
+ attr->max_cq_sges = attr->max_qp_sges;
+ attr->max_mr = le32_to_cpu(sb->max_mr);
+ attr->max_mr = min_t(u32, attr->max_mr, BNXT_RE_MAX_MRW_SUPPORTED(chip_gen));
+ attr->max_mw = le32_to_cpu(sb->max_mw);
+ attr->max_mw = min_t(u32, attr->max_mw, BNXT_RE_MAX_MRW_SUPPORTED(chip_gen));
+
+ attr->max_mr_size = le64_to_cpu(sb->max_mr_size);
+ attr->max_pd = BNXT_QPLIB_MAX_PD;
+ attr->max_raw_ethy_qp = le32_to_cpu(sb->max_raw_eth_qp);
+ attr->max_ah = le32_to_cpu(sb->max_ah);
+ attr->max_ah = min_t(u32, attr->max_ah, BNXT_RE_MAX_AH_SUPPORTED(chip_gen));
+
+ attr->max_fmr = le32_to_cpu(sb->max_fmr);
+ attr->max_map_per_fmr = sb->max_map_per_fmr;
+
+ attr->max_srq = le16_to_cpu(sb->max_srq);
+ attr->max_srq = min_t(u32, attr->max_srq, BNXT_RE_MAX_SRQ_SUPPORTED(chip_gen));
+ attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
+ attr->max_srq_sges = sb->max_srq_sge;
+ attr->max_pkey = 1;
+
+ attr->max_inline_data = !cctx->modes.wqe_mode ?
+ le32_to_cpu(sb->max_inline_data) :
+ le16_to_cpu(sb->max_inline_data_var_wqe);
+ if (!_is_chip_p7(cctx)) {
+ attr->l2_db_size = (sb->l2_db_space_size + 1) *
+ (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
+ }
+ attr->max_sgid = le32_to_cpu(sb->max_gid);
+
+ /* TODO: remove this hack for statically allocated gid_map */
+ bnxt_re_set_max_gid(&attr->max_sgid);
+
+ attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
+ attr->page_size_cap = BIT_ULL(28) | BIT_ULL(21) | BIT_ULL(12);
+
+ bnxt_qplib_query_version(rcfw, attr->fw_ver);
+
+ for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) {
+ temp = le32_to_cpu(sb->tqm_alloc_reqs[i]);
+ tqm_alloc = (u8 *)&temp;
+ attr->tqm_alloc_reqs[i * 4] = *tqm_alloc;
+ attr->tqm_alloc_reqs[i * 4 + 1] = *(++tqm_alloc);
+ attr->tqm_alloc_reqs[i * 4 + 2] = *(++tqm_alloc);
+ attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
+ }
+
+ if (rcfw->res->cctx->hwrm_intf_ver >= HWRM_VERSION_DEV_ATTR_MAX_DPI)
+ attr->max_dpi = le32_to_cpu(sb->max_dpi);
+
+ attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
+bail:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
+
+int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res)
+{
+ struct creq_set_func_resources_resp resp = {};
+ struct cmdq_set_func_resources req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_rcfw *rcfw;
+ struct bnxt_qplib_ctx *hctx;
+ int rc = 0;
+
+ rcfw = res->rcfw;
+ hctx = res->hctx;
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_SET_FUNC_RESOURCES,
+ sizeof(req));
+
+ req.number_of_qp = cpu_to_le32(hctx->qp_ctx.max);
+ req.number_of_mrw = cpu_to_le32(hctx->mrw_ctx.max);
+ req.number_of_srq = cpu_to_le32(hctx->srq_ctx.max);
+ req.number_of_cq = cpu_to_le32(hctx->cq_ctx.max);
+
+ req.max_qp_per_vf = cpu_to_le32(hctx->vf_res.max_qp);
+ req.max_mrw_per_vf = cpu_to_le32(hctx->vf_res.max_mrw);
+ req.max_srq_per_vf = cpu_to_le32(hctx->vf_res.max_srq);
+ req.max_cq_per_vf = cpu_to_le32(hctx->vf_res.max_cq);
+ req.max_gid_per_vf = cpu_to_le32(hctx->vf_res.max_gid);
+
+ /* Keep the old stats context id of PF */
+ req.stat_ctx_id = cpu_to_le32(hctx->stats.fw_id);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ dev_err(&res->pdev->dev,
+ "QPLIB: Failed to set function resources\n");
+
+ return rc;
+}
+
+int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct bnxt_qplib_gid *gid, u16 gid_idx, const u8 *smac)
+{
+ struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
+ struct bnxt_qplib_res,
+ sgid_tbl);
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_modify_gid_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_modify_gid req = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_MODIFY_GID,
+ sizeof(req));
+
+ req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]);
+ req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]);
+ req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]);
+ req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]);
+ if (res->prio) {
+ req.vlan |= cpu_to_le16(CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
+ CMDQ_ADD_GID_VLAN_VLAN_EN);
+ }
+
+ /* MAC in network format */
+ req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
+ req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
+ req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
+ req.gid_index = cpu_to_le16(gid_idx);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc) {
+ dev_err(&res->pdev->dev,
+ "QPLIB: update SGID table failed\n");
+ return rc;
+ }
+ return 0;
+}
+
+/* SGID */
+int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
+ struct bnxt_qplib_gid *gid)
+{
+ if (index > sgid_tbl->max) {
+ dev_err(&res->pdev->dev,
+ "QPLIB: Index %d exceeded SGID table max (%d)\n",
+ index, sgid_tbl->max);
+ return -EINVAL;
+ }
+ memcpy(gid, &sgid_tbl->tbl[index].gid, sizeof(*gid));
+ return 0;
+}
+
+int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct bnxt_qplib_gid *gid,
+ u16 vlan_id, bool update)
+{
+ struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
+ struct bnxt_qplib_res,
+ sgid_tbl);
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ int index;
+
+ if (sgid_tbl == NULL) {
+ dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated\n");
+ return -EINVAL;
+ }
+ /* Do we need a sgid_lock here? */
+ if (!sgid_tbl->active) {
+ dev_err(&res->pdev->dev,
+ "QPLIB: SGID table has no active entries\n");
+ return -ENOMEM;
+ }
+ for (index = 0; index < sgid_tbl->max; index++) {
+ if (!memcmp(&sgid_tbl->tbl[index].gid, gid, sizeof(*gid)) &&
+ vlan_id == sgid_tbl->tbl[index].vlan_id)
+ break;
+ }
+ if (index == sgid_tbl->max) {
+ dev_warn(&res->pdev->dev, "GID not found in the SGID table\n");
+ return 0;
+ }
+
+ if (update) {
+ struct creq_delete_gid_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_delete_gid req = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DELETE_GID,
+ sizeof(req));
+ if (sgid_tbl->hw_id[index] == 0xFFFF) {
+ dev_err(&res->pdev->dev,
+ "QPLIB: GID entry contains an invalid HW id");
+ return -EINVAL;
+ }
+ req.gid_index = cpu_to_le16(sgid_tbl->hw_id[index]);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+ }
+ memcpy(&sgid_tbl->tbl[index].gid, &bnxt_qplib_gid_zero,
+ sizeof(bnxt_qplib_gid_zero));
+ sgid_tbl->tbl[index].vlan_id = 0xFFFF;
+ sgid_tbl->vlan[index] = false;
+ sgid_tbl->active--;
+ dev_dbg(&res->pdev->dev,
+ "QPLIB: SGID deleted hw_id[0x%x] = 0x%x active = 0x%x\n",
+ index, sgid_tbl->hw_id[index], sgid_tbl->active);
+ sgid_tbl->hw_id[index] = (u16)-1;
+
+ return 0;
+}
+
+int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ const union ib_gid *gid, const u8 *smac, u16 vlan_id,
+ bool update, u32 *index)
+{
+ struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
+ struct bnxt_qplib_res,
+ sgid_tbl);
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ int i, free_idx;
+
+ if (sgid_tbl == NULL) {
+ dev_err(&res->pdev->dev, "QPLIB: SGID table not allocated\n");
+ return -EINVAL;
+ }
+ /* Do we need a sgid_lock here? */
+ if (sgid_tbl->active == sgid_tbl->max) {
+ dev_err(&res->pdev->dev, "QPLIB: SGID table is full\n");
+ return -ENOMEM;
+ }
+ free_idx = sgid_tbl->max;
+ for (i = 0; i < sgid_tbl->max; i++) {
+ if (!memcmp(&sgid_tbl->tbl[i], gid, sizeof(*gid)) &&
+ sgid_tbl->tbl[i].vlan_id == vlan_id) {
+ dev_dbg(&res->pdev->dev,
+ "QPLIB: SGID entry already exist in entry %d!\n",
+ i);
+ *index = i;
+ return -EALREADY;
+ } else if (!memcmp(&sgid_tbl->tbl[i], &bnxt_qplib_gid_zero,
+ sizeof(bnxt_qplib_gid_zero)) &&
+ free_idx == sgid_tbl->max) {
+ free_idx = i;
+ }
+ }
+ if (free_idx == sgid_tbl->max) {
+ dev_err(&res->pdev->dev,
+ "QPLIB: SGID table is FULL but count is not MAX??\n");
+ return -ENOMEM;
+ }
+ if (update) {
+ struct creq_add_gid_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_add_gid req = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_ADD_GID,
+ sizeof(req));
+
+ req.gid[0] = cpu_to_be32(((u32 *)gid->raw)[3]);
+ req.gid[1] = cpu_to_be32(((u32 *)gid->raw)[2]);
+ req.gid[2] = cpu_to_be32(((u32 *)gid->raw)[1]);
+ req.gid[3] = cpu_to_be32(((u32 *)gid->raw)[0]);
+ /* driver should ensure that all RoCE traffic is always VLAN tagged
+ * if RoCE traffic is running on non-zero VLAN ID or
+ * RoCE traffic is running on non-zero Priority.
+ */
+ if ((vlan_id != 0xFFFF) || res->prio) {
+ if (vlan_id != 0xFFFF)
+ req.vlan = cpu_to_le16(vlan_id &
+ CMDQ_ADD_GID_VLAN_VLAN_ID_MASK);
+ req.vlan |=
+ cpu_to_le16(CMDQ_ADD_GID_VLAN_TPID_TPID_8100 |
+ CMDQ_ADD_GID_VLAN_VLAN_EN);
+ }
+
+ /* MAC in network format */
+ req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]);
+ req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]);
+ req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+ sgid_tbl->hw_id[free_idx] = le32_to_cpu(resp.xid);
+ }
+
+ if (vlan_id != 0xFFFF)
+ sgid_tbl->vlan[free_idx] = true;
+
+ memcpy(&sgid_tbl->tbl[free_idx], gid, sizeof(*gid));
+ sgid_tbl->tbl[free_idx].vlan_id = vlan_id;
+ sgid_tbl->active++;
+ dev_dbg(&res->pdev->dev,
+ "QPLIB: SGID added hw_id[0x%x] = 0x%x active = 0x%x\n",
+ free_idx, sgid_tbl->hw_id[free_idx], sgid_tbl->active);
+
+ *index = free_idx;
+ /* unlock */
+ return 0;
+}
+
+/* AH */
+int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+ bool block)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_create_ah_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_create_ah req = {};
+ u32 temp32[4];
+ u16 temp16[3];
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_CREATE_AH,
+ sizeof(req));
+
+ memcpy(temp32, ah->dgid.data, sizeof(struct bnxt_qplib_gid));
+ req.dgid[0] = cpu_to_le32(temp32[0]);
+ req.dgid[1] = cpu_to_le32(temp32[1]);
+ req.dgid[2] = cpu_to_le32(temp32[2]);
+ req.dgid[3] = cpu_to_le32(temp32[3]);
+
+ req.type = ah->nw_type;
+ req.hop_limit = ah->hop_limit;
+ req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id[ah->sgid_index]);
+ req.dest_vlan_id_flow_label = cpu_to_le32((ah->flow_label &
+ CMDQ_CREATE_AH_FLOW_LABEL_MASK) |
+ CMDQ_CREATE_AH_DEST_VLAN_ID_MASK);
+ req.pd_id = cpu_to_le32(ah->pd->id);
+ req.traffic_class = ah->traffic_class;
+
+ /* MAC in network format */
+ memcpy(temp16, ah->dmac, ETH_ALEN);
+ req.dest_mac[0] = cpu_to_le16(temp16[0]);
+ req.dest_mac[1] = cpu_to_le16(temp16[1]);
+ req.dest_mac[2] = cpu_to_le16(temp16[2]);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), block);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+
+ ah->id = le32_to_cpu(resp.xid);
+ /* for Cu/Wh AHID 0 is not valid */
+ if (!_is_chip_gen_p5_p7(res->cctx) && !ah->id)
+ rc = -EINVAL;
+
+ return rc;
+}
+
+int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+ bool block)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_destroy_ah_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_destroy_ah req = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DESTROY_AH,
+ sizeof(req));
+
+ req.ah_cid = cpu_to_le32(ah->id);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), block);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ return rc;
+}
+
+/* MRW */
+int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
+{
+ struct creq_deallocate_key_resp resp = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct cmdq_deallocate_key req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ int rc;
+
+ if (mrw->lkey == 0xFFFFFFFF) {
+ dev_info(&res->pdev->dev,
+ "QPLIB: SP: Free a reserved lkey MRW\n");
+ return 0;
+ }
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DEALLOCATE_KEY,
+ sizeof(req));
+
+ req.mrw_flags = mrw->type;
+
+ if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
+ (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
+ (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
+ req.key = cpu_to_le32(mrw->rkey);
+ else
+ req.key = cpu_to_le32(mrw->lkey);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+
+ if (mrw->hwq.max_elements)
+ bnxt_qplib_free_hwq(res, &mrw->hwq);
+
+ return 0;
+}
+
+int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_allocate_mrw_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_allocate_mrw req = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_ALLOCATE_MRW,
+ sizeof(req));
+
+ req.pd_id = cpu_to_le32(mrw->pd->id);
+ req.mrw_flags = mrw->type;
+ if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR &&
+ mrw->flags & BNXT_QPLIB_FR_PMR) ||
+ mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A ||
+ mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B)
+ req.access = CMDQ_ALLOCATE_MRW_ACCESS_CONSUMER_OWNED_KEY;
+ req.mrw_handle = cpu_to_le64((uintptr_t)mrw);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+ if ((mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1) ||
+ (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2A) ||
+ (mrw->type == CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B))
+ mrw->rkey = le32_to_cpu(resp.xid);
+ else
+ mrw->lkey = le32_to_cpu(resp.xid);
+
+ return 0;
+}
+
+int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
+ bool block)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_deregister_mr_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_deregister_mr req = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_DEREGISTER_MR,
+ sizeof(req));
+
+ req.lkey = cpu_to_le32(mrw->lkey);
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), block);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ return rc;
+
+ if (mrw->hwq.max_elements) {
+ mrw->va = 0;
+ mrw->total_size = 0;
+ bnxt_qplib_free_hwq(res, &mrw->hwq);
+ }
+
+ return 0;
+}
+
+int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_mrinfo *mrinfo,
+ bool block)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_register_mr_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_register_mr req = {};
+ struct bnxt_qplib_mrw *mr;
+ u32 buf_pg_size;
+ u32 pg_size;
+ u16 level;
+ u16 flags;
+ int rc;
+
+ mr = mrinfo->mrw;
+ buf_pg_size = 0x01ULL << mrinfo->sg.pgshft;
+ if (mrinfo->sg.npages) {
+ /* Free the hwq if it already exist, must be a rereg */
+ if (mr->hwq.max_elements)
+ bnxt_qplib_free_hwq(res, &mr->hwq);
+ /* Use system PAGE_SIZE */
+ hwq_attr.res = res;
+ hwq_attr.depth = mrinfo->sg.npages;
+ hwq_attr.stride = PAGE_SIZE;
+ hwq_attr.type = HWQ_TYPE_MR;
+ hwq_attr.sginfo = &mrinfo->sg;
+ rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr);
+ if (rc) {
+ dev_err(&res->pdev->dev,
+ "SP: Reg MR memory allocation failed\n");
+ return -ENOMEM;
+ }
+ }
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_REGISTER_MR,
+ sizeof(req));
+ /* Configure the request */
+ if (mrinfo->is_dma) {
+ /* No PBL provided, just use system PAGE_SIZE */
+ level = 0;
+ req.pbl = 0;
+ pg_size = PAGE_SIZE;
+ } else {
+ level = mr->hwq.level;
+ req.pbl = cpu_to_le64(mr->hwq.pbl[PBL_LVL_0].pg_map_arr[0]);
+ }
+
+ pg_size = buf_pg_size ? buf_pg_size : PAGE_SIZE;
+ req.log2_pg_size_lvl = (level << CMDQ_REGISTER_MR_LVL_SFT) |
+ ((ilog2(pg_size) <<
+ CMDQ_REGISTER_MR_LOG2_PG_SIZE_SFT) &
+ CMDQ_REGISTER_MR_LOG2_PG_SIZE_MASK);
+ req.log2_pbl_pg_size = cpu_to_le16(((ilog2(PAGE_SIZE) <<
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_SFT) &
+ CMDQ_REGISTER_MR_LOG2_PBL_PG_SIZE_MASK));
+ req.access = (mr->flags & 0xFFFF);
+ req.va = cpu_to_le64(mr->va);
+ req.key = cpu_to_le32(mr->lkey);
+ if (_is_alloc_mr_unified(res->dattr)) {
+ flags = 0;
+ req.key = cpu_to_le32(mr->pd->id);
+ flags |= CMDQ_REGISTER_MR_FLAGS_ALLOC_MR;
+ req.flags = cpu_to_le16(flags);
+ }
+ req.mr_size = cpu_to_le64(mr->total_size);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), block);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto fail;
+
+ if (_is_alloc_mr_unified(res->dattr)) {
+ mr->lkey = le32_to_cpu(resp.xid);
+ mr->rkey = mr->lkey;
+ }
+
+ return 0;
+fail:
+ if (mr->hwq.max_elements)
+ bnxt_qplib_free_hwq(res, &mr->hwq);
+ return rc;
+}
+
+int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_frpl *frpl,
+ int max_pg_ptrs)
+{
+ struct bnxt_qplib_hwq_attr hwq_attr = {};
+ struct bnxt_qplib_sg_info sginfo = {};
+ int pg_ptrs, rc;
+
+ /* Re-calculate the max to fit the HWQ allocation model */
+ pg_ptrs = roundup_pow_of_two(max_pg_ptrs);
+
+ sginfo.pgsize = PAGE_SIZE;
+ sginfo.nopte = true;
+
+ hwq_attr.res = res;
+ hwq_attr.depth = pg_ptrs;
+ hwq_attr.stride = PAGE_SIZE;
+ hwq_attr.sginfo = &sginfo;
+ hwq_attr.type = HWQ_TYPE_CTX;
+ rc = bnxt_qplib_alloc_init_hwq(&frpl->hwq, &hwq_attr);
+ if (!rc)
+ frpl->max_pg_ptrs = pg_ptrs;
+
+ return rc;
+}
+
+void bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_frpl *frpl)
+{
+ bnxt_qplib_free_hwq(res, &frpl->hwq);
+}
+
+int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids)
+{
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_map_tc_to_cos_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_map_tc_to_cos req = {};
+ int rc;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_MAP_TC_TO_COS,
+ sizeof(req));
+ req.cos0 = cpu_to_le16(cids[0]);
+ req.cos1 = cpu_to_le16(cids[1]);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ return rc;
+}
+
+static void bnxt_qplib_fill_cc_gen1(struct cmdq_modify_roce_cc_gen1_tlv *ext_req,
+ struct bnxt_qplib_cc_param_ext *cc_ext)
+{
+ ext_req->modify_mask = cpu_to_le64(cc_ext->ext_mask);
+ cc_ext->ext_mask = 0;
+ ext_req->inactivity_th_hi = cpu_to_le16(cc_ext->inact_th_hi);
+ ext_req->min_time_between_cnps = cpu_to_le16(cc_ext->min_delta_cnp);
+ ext_req->init_cp = cpu_to_le16(cc_ext->init_cp);
+ ext_req->tr_update_mode = cc_ext->tr_update_mode;
+ ext_req->tr_update_cycles = cc_ext->tr_update_cyls;
+ ext_req->fr_num_rtts = cc_ext->fr_rtt;
+ ext_req->ai_rate_increase = cc_ext->ai_rate_incr;
+ ext_req->reduction_relax_rtts_th = cpu_to_le16(cc_ext->rr_rtt_th);
+ ext_req->additional_relax_cr_th = cpu_to_le16(cc_ext->ar_cr_th);
+ ext_req->cr_min_th = cpu_to_le16(cc_ext->cr_min_th);
+ ext_req->bw_avg_weight = cc_ext->bw_avg_weight;
+ ext_req->actual_cr_factor = cc_ext->cr_factor;
+ ext_req->max_cp_cr_th = cpu_to_le16(cc_ext->cr_th_max_cp);
+ ext_req->cp_bias_en = cc_ext->cp_bias_en;
+ ext_req->cp_bias = cc_ext->cp_bias;
+ ext_req->cnp_ecn = cc_ext->cnp_ecn;
+ ext_req->rtt_jitter_en = cc_ext->rtt_jitter_en;
+ ext_req->link_bytes_per_usec = cpu_to_le16(cc_ext->bytes_per_usec);
+ ext_req->reset_cc_cr_th = cpu_to_le16(cc_ext->cc_cr_reset_th);
+ ext_req->cr_width = cc_ext->cr_width;
+ ext_req->quota_period_min = cc_ext->min_quota;
+ ext_req->quota_period_max = cc_ext->max_quota;
+ ext_req->quota_period_abs_max = cc_ext->abs_max_quota;
+ ext_req->tr_lower_bound = cpu_to_le16(cc_ext->tr_lb);
+ ext_req->cr_prob_factor = cc_ext->cr_prob_fac;
+ ext_req->tr_prob_factor = cc_ext->tr_prob_fac;
+ ext_req->fairness_cr_th = cpu_to_le16(cc_ext->fair_cr_th);
+ ext_req->red_div = cc_ext->red_div;
+ ext_req->cnp_ratio_th = cc_ext->cnp_ratio_th;
+ ext_req->exp_ai_rtts = cpu_to_le16(cc_ext->ai_ext_rtt);
+ ext_req->exp_ai_cr_cp_ratio = cc_ext->exp_crcp_ratio;
+ ext_req->use_rate_table = cc_ext->low_rate_en;
+ ext_req->cp_exp_update_th = cpu_to_le16(cc_ext->cpcr_update_th);
+ ext_req->high_exp_ai_rtts_th1 = cpu_to_le16(cc_ext->ai_rtt_th1);
+ ext_req->high_exp_ai_rtts_th2 = cpu_to_le16(cc_ext->ai_rtt_th2);
+ ext_req->actual_cr_cong_free_rtts_th = cpu_to_le16(cc_ext->cf_rtt_th);
+ ext_req->severe_cong_cr_th1 = cpu_to_le16(cc_ext->sc_cr_th1);
+ ext_req->severe_cong_cr_th2 = cpu_to_le16(cc_ext->sc_cr_th2);
+ ext_req->link64B_per_rtt = cpu_to_le32(cc_ext->l64B_per_rtt);
+ ext_req->cc_ack_bytes = cc_ext->cc_ack_bytes;
+ ext_req->reduce_init_cong_free_rtts_th = cpu_to_le16(cc_ext->reduce_cf_rtt_th);
+}
+
+int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param)
+{
+ struct bnxt_qplib_tlv_modify_cc_req tlv_req = {};
+ struct creq_modify_roce_cc_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_modify_roce_cc *req;
+ int req_size;
+ void *cmd;
+ int rc;
+
+ /* Prepare the older base command */
+ req = &tlv_req.base_req;
+ cmd = req;
+ req_size = sizeof(*req);
+ bnxt_qplib_rcfw_cmd_prep(req, CMDQ_BASE_OPCODE_MODIFY_ROCE_CC,
+ sizeof(*req));
+ req->modify_mask = cpu_to_le32(cc_param->mask);
+ req->enable_cc = cc_param->enable;
+ req->g = cc_param->g;
+ req->num_phases_per_state = cc_param->nph_per_state;
+ req->time_per_phase = cc_param->time_pph;
+ req->pkts_per_phase = cc_param->pkts_pph;
+ req->init_cr = cpu_to_le16(cc_param->init_cr);
+ req->init_tr = cpu_to_le16(cc_param->init_tr);
+ req->tos_dscp_tos_ecn = (cc_param->tos_dscp <<
+ CMDQ_MODIFY_ROCE_CC_TOS_DSCP_SFT) |
+ (cc_param->tos_ecn &
+ CMDQ_MODIFY_ROCE_CC_TOS_ECN_MASK);
+ req->alt_vlan_pcp = cc_param->alt_vlan_pcp;
+ req->alt_tos_dscp = cpu_to_le16(cc_param->alt_tos_dscp);
+ req->rtt = cpu_to_le16(cc_param->rtt);
+ req->tcp_cp = cpu_to_le16(cc_param->tcp_cp);
+ req->cc_mode = cc_param->cc_mode;
+ req->inactivity_th = cpu_to_le16(cc_param->inact_th);
+
+ /* For chip gen P5 onwards fill extended cmd and header */
+ if (_is_chip_gen_p5_p7(res->cctx)) {
+ struct roce_tlv *hdr;
+ u32 payload;
+ u32 chunks;
+
+ cmd = &tlv_req;
+ req_size = sizeof(tlv_req);
+ /* Prepare primary tlv header */
+ hdr = &tlv_req.tlv_hdr;
+ chunks = CHUNKS(sizeof(struct bnxt_qplib_tlv_modify_cc_req));
+ payload = sizeof(struct cmdq_modify_roce_cc);
+ ROCE_1ST_TLV_PREP(hdr, chunks, payload, true);
+ /* Prepare secondary tlv header */
+ hdr = (struct roce_tlv *)&tlv_req.ext_req;
+ payload = sizeof(struct cmdq_modify_roce_cc_gen1_tlv) -
+ sizeof(struct roce_tlv);
+ ROCE_EXT_TLV_PREP(hdr, TLV_TYPE_MODIFY_ROCE_CC_GEN1, payload,
+ false, true);
+ bnxt_qplib_fill_cc_gen1(&tlv_req.ext_req, &cc_param->cc_ext);
+ }
+
+ bnxt_qplib_fill_cmdqmsg(&msg, cmd, &resp, NULL, req_size,
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
+ return rc;
+}
+
+static void bnxt_qplib_read_cc_gen1(struct bnxt_qplib_cc_param_ext *cc_ext,
+ struct creq_query_roce_cc_gen1_resp_sb_tlv *sb)
+{
+ cc_ext->inact_th_hi = le16_to_cpu(sb->inactivity_th_hi);
+ cc_ext->min_delta_cnp = le16_to_cpu(sb->min_time_between_cnps);
+ cc_ext->init_cp = le16_to_cpu(sb->init_cp);
+ cc_ext->tr_update_mode = sb->tr_update_mode;
+ cc_ext->tr_update_cyls = sb->tr_update_cycles;
+ cc_ext->fr_rtt = sb->fr_num_rtts;
+ cc_ext->ai_rate_incr = sb->ai_rate_increase;
+ cc_ext->rr_rtt_th = le16_to_cpu(sb->reduction_relax_rtts_th);
+ cc_ext->ar_cr_th = le16_to_cpu(sb->additional_relax_cr_th);
+ cc_ext->cr_min_th = le16_to_cpu(sb->cr_min_th);
+ cc_ext->bw_avg_weight = sb->bw_avg_weight;
+ cc_ext->cr_factor = sb->actual_cr_factor;
+ cc_ext->cr_th_max_cp = le16_to_cpu(sb->max_cp_cr_th);
+ cc_ext->cp_bias_en = sb->cp_bias_en;
+ cc_ext->cp_bias = sb->cp_bias;
+ cc_ext->cnp_ecn = sb->cnp_ecn;
+ cc_ext->rtt_jitter_en = sb->rtt_jitter_en;
+ cc_ext->bytes_per_usec = le16_to_cpu(sb->link_bytes_per_usec);
+ cc_ext->cc_cr_reset_th = le16_to_cpu(sb->reset_cc_cr_th);
+ cc_ext->cr_width = sb->cr_width;
+ cc_ext->min_quota = sb->quota_period_min;
+ cc_ext->max_quota = sb->quota_period_max;
+ cc_ext->abs_max_quota = sb->quota_period_abs_max;
+ cc_ext->tr_lb = le16_to_cpu(sb->tr_lower_bound);
+ cc_ext->cr_prob_fac = sb->cr_prob_factor;
+ cc_ext->tr_prob_fac = sb->tr_prob_factor;
+ cc_ext->fair_cr_th = le16_to_cpu(sb->fairness_cr_th);
+ cc_ext->red_div = sb->red_div;
+ cc_ext->cnp_ratio_th = sb->cnp_ratio_th;
+ cc_ext->ai_ext_rtt = le16_to_cpu(sb->exp_ai_rtts);
+ cc_ext->exp_crcp_ratio = sb->exp_ai_cr_cp_ratio;
+ cc_ext->low_rate_en = sb->use_rate_table;
+ cc_ext->cpcr_update_th = le16_to_cpu(sb->cp_exp_update_th);
+ cc_ext->ai_rtt_th1 = le16_to_cpu(sb->high_exp_ai_rtts_th1);
+ cc_ext->ai_rtt_th2 = le16_to_cpu(sb->high_exp_ai_rtts_th2);
+ cc_ext->cf_rtt_th = le16_to_cpu(sb->actual_cr_cong_free_rtts_th);
+ cc_ext->sc_cr_th1 = le16_to_cpu(sb->severe_cong_cr_th1);
+ cc_ext->sc_cr_th2 = le16_to_cpu(sb->severe_cong_cr_th2);
+ cc_ext->l64B_per_rtt = le32_to_cpu(sb->link64B_per_rtt);
+ cc_ext->cc_ack_bytes = sb->cc_ack_bytes;
+ cc_ext->reduce_cf_rtt_th = le16_to_cpu(sb->reduce_init_cong_free_rtts_th);
+}
+
+int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param)
+{
+ struct creq_query_roce_cc_gen1_resp_sb_tlv *gen1_sb;
+ struct bnxt_qplib_tlv_query_rcc_sb *ext_sb;
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_query_roce_cc_resp resp = {};
+ struct creq_query_roce_cc_resp_sb *sb;
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_query_roce_cc req = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ size_t resp_size;
+ int rc;
+
+ /* Query the parameters from chip */
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_ROCE_CC,
+ sizeof(req));
+ if (_is_chip_gen_p5_p7(res->cctx))
+ resp_size = sizeof(*ext_sb);
+ else
+ resp_size = sizeof(*sb);
+ sbuf.size = ALIGN(resp_size, BNXT_QPLIB_CMDQE_UNITS);
+ sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(res->rcfw, &msg);
+ if (rc) {
+ dev_dbg(&res->pdev->dev, "%s:Query CC param failed:0x%x\n",
+ __func__, rc);
+ goto out;
+ }
+
+ ext_sb = sbuf.sb;
+ gen1_sb = &ext_sb->gen1_sb;
+ sb = _is_chip_gen_p5_p7(res->cctx) ? &ext_sb->base_sb :
+ (struct creq_query_roce_cc_resp_sb *)ext_sb;
+
+ cc_param->enable = sb->enable_cc & CREQ_QUERY_ROCE_CC_RESP_SB_ENABLE_CC;
+ cc_param->tos_ecn = (sb->tos_dscp_tos_ecn &
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_MASK) >>
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_ECN_SFT;
+ cc_param->tos_dscp = (sb->tos_dscp_tos_ecn &
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_MASK) >>
+ CREQ_QUERY_ROCE_CC_RESP_SB_TOS_DSCP_SFT;
+ cc_param->alt_tos_dscp = sb->alt_tos_dscp;
+ cc_param->alt_vlan_pcp = sb->alt_vlan_pcp;
+
+ cc_param->g = sb->g;
+ cc_param->nph_per_state = sb->num_phases_per_state;
+ cc_param->init_cr = le16_to_cpu(sb->init_cr);
+ cc_param->init_tr = le16_to_cpu(sb->init_tr);
+ cc_param->cc_mode = sb->cc_mode;
+ cc_param->inact_th = le16_to_cpu(sb->inactivity_th);
+ cc_param->rtt = le16_to_cpu(sb->rtt);
+ cc_param->tcp_cp = le16_to_cpu(sb->tcp_cp);
+ cc_param->time_pph = sb->time_per_phase;
+ cc_param->pkts_pph = sb->pkts_per_phase;
+ if (_is_chip_gen_p5_p7(res->cctx))
+ bnxt_qplib_read_cc_gen1(&cc_param->cc_ext, gen1_sb);
+out:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
+
+
+int bnxt_qplib_get_roce_error_stats(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_roce_stats *stats,
+ struct bnxt_qplib_query_stats_info *sinfo)
+{
+ struct creq_query_roce_stats_resp resp = {};
+ struct creq_query_roce_stats_resp_sb *sb;
+ struct cmdq_query_roce_stats req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ u16 cmd_flags = 0;
+ u32 fn_id = 0;
+ int rc = 0;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_QUERY_ROCE_STATS,
+ sizeof(req));
+
+ sbuf.size = sizeof(*sb);
+ sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb)
+ return -ENOMEM;
+ sb = sbuf.sb;
+
+ if (rcfw->res->cctx->hwrm_intf_ver >= HWRM_VERSION_ROCE_STATS_FN_ID) {
+ if (sinfo->function_id != 0xFFFFFFFF) {
+ cmd_flags = CMDQ_QUERY_ROCE_STATS_FLAGS_FUNCTION_ID;
+ if (sinfo->vf_valid) {
+ fn_id = CMDQ_QUERY_ROCE_STATS_VF_VALID;
+ fn_id |= (sinfo->function_id <<
+ CMDQ_QUERY_ROCE_STATS_VF_NUM_SFT) &
+ CMDQ_QUERY_ROCE_STATS_VF_NUM_MASK;
+ } else {
+ fn_id = sinfo->function_id &
+ CMDQ_QUERY_ROCE_STATS_PF_NUM_MASK;
+ }
+ }
+
+ req.flags = cpu_to_le16(cmd_flags);
+ req.function_id = cpu_to_le32(fn_id);
+
+ if (sinfo->collection_id != 0xFF) {
+ cmd_flags |= CMDQ_QUERY_ROCE_STATS_FLAGS_COLLECTION_ID;
+ req.collection_id = sinfo->collection_id;
+ }
+ } else {
+ /* For older HWRM version, the command length has to be
+ * adjusted. 8 bytes are more in the newer command.
+ * So subtract these 8 bytes for older HWRM version.
+ * command units are adjusted inside
+ * bnxt_qplib_rcfw_send_message.
+ */
+ req.cmd_size -= 8;
+ }
+
+ req.resp_size = sbuf.size / BNXT_QPLIB_CMDQE_UNITS;
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto bail;
+ /* Extract the context from the side buffer */
+ stats->to_retransmits = le64_to_cpu(sb->to_retransmits);
+ stats->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd);
+ stats->max_retry_exceeded = le64_to_cpu(sb->max_retry_exceeded);
+ stats->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd);
+ stats->missing_resp = le64_to_cpu(sb->missing_resp);
+ stats->unrecoverable_err = le64_to_cpu(sb->unrecoverable_err);
+ stats->bad_resp_err = le64_to_cpu(sb->bad_resp_err);
+ stats->local_qp_op_err = le64_to_cpu(sb->local_qp_op_err);
+ stats->local_protection_err = le64_to_cpu(sb->local_protection_err);
+ stats->mem_mgmt_op_err = le64_to_cpu(sb->mem_mgmt_op_err);
+ stats->remote_invalid_req_err = le64_to_cpu(sb->remote_invalid_req_err);
+ stats->remote_access_err = le64_to_cpu(sb->remote_access_err);
+ stats->remote_op_err = le64_to_cpu(sb->remote_op_err);
+ stats->dup_req = le64_to_cpu(sb->dup_req);
+ stats->res_exceed_max = le64_to_cpu(sb->res_exceed_max);
+ stats->res_length_mismatch = le64_to_cpu(sb->res_length_mismatch);
+ stats->res_exceeds_wqe = le64_to_cpu(sb->res_exceeds_wqe);
+ stats->res_opcode_err = le64_to_cpu(sb->res_opcode_err);
+ stats->res_rx_invalid_rkey = le64_to_cpu(sb->res_rx_invalid_rkey);
+ stats->res_rx_domain_err = le64_to_cpu(sb->res_rx_domain_err);
+ stats->res_rx_no_perm = le64_to_cpu(sb->res_rx_no_perm);
+ stats->res_rx_range_err = le64_to_cpu(sb->res_rx_range_err);
+ stats->res_tx_invalid_rkey = le64_to_cpu(sb->res_tx_invalid_rkey);
+ stats->res_tx_domain_err = le64_to_cpu(sb->res_tx_domain_err);
+ stats->res_tx_no_perm = le64_to_cpu(sb->res_tx_no_perm);
+ stats->res_tx_range_err = le64_to_cpu(sb->res_tx_range_err);
+ stats->res_irrq_oflow = le64_to_cpu(sb->res_irrq_oflow);
+ stats->res_unsup_opcode = le64_to_cpu(sb->res_unsup_opcode);
+ stats->res_unaligned_atomic = le64_to_cpu(sb->res_unaligned_atomic);
+ stats->res_rem_inv_err = le64_to_cpu(sb->res_rem_inv_err);
+ stats->res_mem_error = le64_to_cpu(sb->res_mem_error);
+ stats->res_srq_err = le64_to_cpu(sb->res_srq_err);
+ stats->res_cmp_err = le64_to_cpu(sb->res_cmp_err);
+ stats->res_invalid_dup_rkey = le64_to_cpu(sb->res_invalid_dup_rkey);
+ stats->res_wqe_format_err = le64_to_cpu(sb->res_wqe_format_err);
+ stats->res_cq_load_err = le64_to_cpu(sb->res_cq_load_err);
+ stats->res_srq_load_err = le64_to_cpu(sb->res_srq_load_err);
+ stats->res_tx_pci_err = le64_to_cpu(sb->res_tx_pci_err);
+ stats->res_rx_pci_err = le64_to_cpu(sb->res_rx_pci_err);
+
+ if (!rcfw->init_oos_stats) {
+ rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
+ rcfw->init_oos_stats = true;
+ } else {
+ stats->res_oos_drop_count += (le64_to_cpu(sb->res_oos_drop_count) -
+ rcfw->oos_prev) &
+ BNXT_QPLIB_OOS_COUNT_MASK;
+ rcfw->oos_prev = le64_to_cpu(sb->res_oos_drop_count);
+ }
+
+ stats->active_qp_count_p0 = le64_to_cpu(sb->active_qp_count_p0);
+ stats->active_qp_count_p1 = le64_to_cpu(sb->active_qp_count_p1);
+ stats->active_qp_count_p2 = le64_to_cpu(sb->active_qp_count_p2);
+ stats->active_qp_count_p3 = le64_to_cpu(sb->active_qp_count_p3);
+bail:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
+
+int bnxt_qplib_set_link_aggr_mode(struct bnxt_qplib_res *res,
+ u8 aggr_mode, u8 member_port_map,
+ u8 active_port_map, bool aggr_en,
+ u32 stats_fw_id)
+{
+ struct creq_set_link_aggr_mode_resources_resp resp = {};
+ struct cmdq_set_link_aggr_mode_cc req = {};
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct bnxt_qplib_cmdqmsg msg = {};
+ int rc = 0;
+
+ bnxt_qplib_rcfw_cmd_prep(&req, CMDQ_BASE_OPCODE_SET_LINK_AGGR_MODE,
+ sizeof(req));
+
+ req.aggr_enable = aggr_en;
+ req.active_port_map = active_port_map;
+ req.member_port_map = member_port_map;
+ req.link_aggr_mode = aggr_mode;
+
+ /* need to specify only second port stats ctx id for now */
+ req.stat_ctx_id[1] = cpu_to_le16((u16)(stats_fw_id));
+
+ req.modify_mask =
+ cpu_to_le32(CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_AGGR_EN |
+ CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_ACTIVE_PORT_MAP |
+ CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_MEMBER_PORT_MAP |
+ CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_AGGR_MODE |
+ CMDQ_SET_LINK_AGGR_MODE_MODIFY_MASK_STAT_CTX_ID);
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ dev_err(&res->pdev->dev,
+ "QPLIB: Failed to set link aggr mode, %#x\n", rc);
+
+ return rc;
+}
+
+int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
+ struct bnxt_qplib_ext_stat *estat,
+ struct bnxt_qplib_query_stats_info *sinfo)
+{
+ struct creq_query_roce_stats_ext_resp resp = {};
+ struct creq_query_roce_stats_ext_resp_sb *sb;
+ struct cmdq_query_roce_stats_ext req = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct bnxt_qplib_rcfw_sbuf sbuf;
+ int rc;
+
+ sbuf.size = sizeof(*sb);
+ sbuf.sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf.size,
+ &sbuf.dma_addr, GFP_KERNEL);
+ if (!sbuf.sb) {
+ dev_err(&rcfw->pdev->dev,
+ "QPLIB: SP: QUERY_ROCE_STATS_EXT alloc sb failed\n");
+ return -ENOMEM;
+ }
+ sb = sbuf.sb;
+
+ bnxt_qplib_rcfw_cmd_prep(&req,
+ CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS,
+ sizeof(req));
+ req.resp_size = sbuf.size;
+ req.resp_addr = cpu_to_le64(sbuf.dma_addr);
+ req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
+ if (_is_chip_p7(rcfw->res->cctx) && rcfw->res->is_vf) {
+ if (sinfo->vf_valid)
+ req.function_id =
+ cpu_to_le32(CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID |
+ (fid << CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT));
+ else
+ req.flags = cpu_to_le16(0);
+ } else {
+ req.function_id = cpu_to_le32(fid);
+ }
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, &sbuf, sizeof(req),
+ sizeof(resp), 0);
+ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
+ if (rc)
+ goto bail;
+
+ /* dump when dyndbg is enabled */
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, sb, sizeof(*sb));
+ estat->tx_atomic_req = le64_to_cpu(sb->tx_atomic_req_pkts);
+ estat->tx_read_req = le64_to_cpu(sb->tx_read_req_pkts);
+ estat->tx_read_res = le64_to_cpu(sb->tx_read_res_pkts);
+ estat->tx_write_req = le64_to_cpu(sb->tx_write_req_pkts);
+ estat->tx_send_req = le64_to_cpu(sb->tx_send_req_pkts);
+ estat->tx_roce_pkts = le64_to_cpu(sb->tx_roce_pkts);
+ estat->tx_roce_bytes = le64_to_cpu(sb->tx_roce_bytes);
+ estat->rx_atomic_req = le64_to_cpu(sb->rx_atomic_req_pkts);
+ estat->rx_read_req = le64_to_cpu(sb->rx_read_req_pkts);
+ estat->rx_read_res = le64_to_cpu(sb->rx_read_res_pkts);
+ estat->rx_write_req = le64_to_cpu(sb->rx_write_req_pkts);
+ estat->rx_send_req = le64_to_cpu(sb->rx_send_req_pkts);
+ estat->rx_roce_pkts = le64_to_cpu(sb->rx_roce_pkts);
+ estat->rx_roce_bytes = le64_to_cpu(sb->rx_roce_bytes);
+ estat->rx_roce_good_pkts = le64_to_cpu(sb->rx_roce_good_pkts);
+ estat->rx_roce_good_bytes = le64_to_cpu(sb->rx_roce_good_bytes);
+ estat->rx_out_of_buffer = le64_to_cpu(sb->rx_out_of_buffer_pkts);
+ estat->rx_out_of_sequence = le64_to_cpu(sb->rx_out_of_sequence_pkts);
+ estat->tx_cnp = le64_to_cpu(sb->tx_cnp_pkts);
+ estat->rx_cnp = le64_to_cpu(sb->rx_cnp_pkts);
+ estat->rx_ecn_marked = le64_to_cpu(sb->rx_ecn_marked_pkts);
+ estat->seq_err_naks_rcvd = le64_to_cpu(sb->seq_err_naks_rcvd);
+ estat->rnr_naks_rcvd = le64_to_cpu(sb->rnr_naks_rcvd);
+ estat->missing_resp = le64_to_cpu(sb->missing_resp);
+ estat->to_retransmits = le64_to_cpu(sb->to_retransmit);
+ estat->dup_req = le64_to_cpu(sb->dup_req);
+ estat->rx_dcn_payload_cut = le64_to_cpu(sb->rx_dcn_payload_cut);
+ estat->te_bypassed = le64_to_cpu(sb->te_bypassed);
+bail:
+ dma_free_coherent(&rcfw->pdev->dev, sbuf.size,
+ sbuf.sb, sbuf.dma_addr);
+ return rc;
+}
diff --git a/sys/dev/bnxt/bnxt_re/qplib_sp.h b/sys/dev/bnxt/bnxt_re/qplib_sp.h
new file mode 100644
index 000000000000..e306db3b9d8e
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/qplib_sp.h
@@ -0,0 +1,432 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: Slow Path Operators (header)
+ */
+
+#ifndef __BNXT_QPLIB_SP_H__
+#define __BNXT_QPLIB_SP_H__
+
+#include <rdma/ib_verbs.h>
+
+#define BNXT_QPLIB_RESERVED_QP_WRS 128
+
+/* Resource maximums reported by the firmware */
+struct bnxt_qplib_dev_attr {
+#define FW_VER_ARR_LEN 4
+ u8 fw_ver[FW_VER_ARR_LEN];
+ u16 max_sgid;
+ u16 max_mrw;
+ u32 max_qp;
+#define BNXT_QPLIB_MAX_OUT_RD_ATOM 126
+ u32 max_qp_rd_atom;
+ u32 max_qp_init_rd_atom;
+ u32 max_qp_wqes;
+ u32 max_qp_sges;
+ u32 max_cq;
+ /* HW supports only 8K entries in PBL.
+ * So max CQEs that can be supported per CQ is 1M.
+ */
+#define BNXT_QPLIB_MAX_CQ_WQES 0xfffff
+ u32 max_cq_wqes;
+ u32 max_cq_sges;
+ u32 max_mr;
+ u64 max_mr_size;
+#define BNXT_QPLIB_MAX_PD (64 * 1024)
+ u32 max_pd;
+ u32 max_mw;
+ u32 max_raw_ethy_qp;
+ u32 max_ah;
+ u32 max_fmr;
+ u32 max_map_per_fmr;
+ u32 max_srq;
+ u32 max_srq_wqes;
+ u32 max_srq_sges;
+ u32 max_pkey;
+ u32 max_inline_data;
+ u32 l2_db_size;
+ u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
+ u8 is_atomic;
+ u16 dev_cap_flags;
+ u64 page_size_cap;
+ u32 max_dpi;
+};
+
+struct bnxt_qplib_pd {
+ u32 id;
+};
+
+struct bnxt_qplib_gid {
+ u8 data[16];
+};
+
+struct bnxt_qplib_gid_info {
+ struct bnxt_qplib_gid gid;
+ u16 vlan_id;
+};
+
+struct bnxt_qplib_ah {
+ struct bnxt_qplib_gid dgid;
+ struct bnxt_qplib_pd *pd;
+ u32 id;
+ u8 sgid_index;
+ u8 host_sgid_index; /* For Query AH if the hw table and SW table are differnt */
+ u8 traffic_class;
+ u32 flow_label;
+ u8 hop_limit;
+ u8 sl;
+ u8 dmac[6];
+ u16 vlan_id;
+ u8 nw_type;
+ u8 enable_cc;
+};
+
+struct bnxt_qplib_mrw {
+ struct bnxt_qplib_pd *pd;
+ int type;
+ u32 flags;
+#define BNXT_QPLIB_FR_PMR 0x80000000
+ u32 lkey;
+ u32 rkey;
+#define BNXT_QPLIB_RSVD_LKEY 0xFFFFFFFF
+ u64 va;
+ u64 total_size;
+ u32 npages;
+ u64 mr_handle;
+ struct bnxt_qplib_hwq hwq;
+};
+
+struct bnxt_qplib_mrinfo {
+ struct bnxt_qplib_mrw *mrw;
+ struct bnxt_qplib_sg_info sg;
+ u64 *ptes;
+ bool is_dma;
+};
+
+struct bnxt_qplib_frpl {
+ int max_pg_ptrs;
+ struct bnxt_qplib_hwq hwq;
+};
+
+struct bnxt_qplib_cc_param_ext {
+ u64 ext_mask;
+ u16 inact_th_hi;
+ u16 min_delta_cnp;
+ u16 init_cp;
+ u8 tr_update_mode;
+ u8 tr_update_cyls;
+ u8 fr_rtt;
+ u8 ai_rate_incr;
+ u16 rr_rtt_th;
+ u16 ar_cr_th;
+ u16 cr_min_th;
+ u8 bw_avg_weight;
+ u8 cr_factor;
+ u16 cr_th_max_cp;
+ u8 cp_bias_en;
+ u8 cp_bias;
+ u8 cnp_ecn;
+ u8 rtt_jitter_en;
+ u16 bytes_per_usec;
+ u16 cc_cr_reset_th;
+ u8 cr_width;
+ u8 min_quota;
+ u8 max_quota;
+ u8 abs_max_quota;
+ u16 tr_lb;
+ u8 cr_prob_fac;
+ u8 tr_prob_fac;
+ u16 fair_cr_th;
+ u8 red_div;
+ u8 cnp_ratio_th;
+ u16 ai_ext_rtt;
+ u8 exp_crcp_ratio;
+ u8 low_rate_en;
+ u16 cpcr_update_th;
+ u16 ai_rtt_th1;
+ u16 ai_rtt_th2;
+ u16 cf_rtt_th;
+ u16 sc_cr_th1; /* severe congestion cr threshold 1 */
+ u16 sc_cr_th2; /* severe congestion cr threshold 2 */
+ u32 l64B_per_rtt;
+ u8 cc_ack_bytes;
+ u16 reduce_cf_rtt_th;
+};
+
+struct bnxt_qplib_cc_param {
+ u8 alt_vlan_pcp;
+ u16 alt_tos_dscp;
+#define BNXT_QPLIB_USER_DSCP_VALID 0x80
+ u8 cnp_dscp_user;
+ u8 roce_dscp_user;
+ u8 cc_mode;
+ u8 enable;
+ u16 inact_th;
+ u16 init_cr;
+ u16 init_tr;
+ u16 rtt;
+ u8 g;
+ u8 nph_per_state;
+ u8 time_pph;
+ u8 pkts_pph;
+ u8 tos_ecn;
+ u8 tos_dscp;
+ u8 qp1_tos_dscp;
+ u16 tcp_cp;
+ struct bnxt_qplib_cc_param_ext cc_ext;
+ u8 disable_prio_vlan_tx;
+ /* Mask used while programming the configfs values */
+ u32 mask;
+ /* Mask used while displaying the configfs values */
+ u32 cur_mask;
+ u8 roce_pri;
+#define BNXT_QPLIB_CC_PARAM_MASK_VLAN_TX_DISABLE 0x40000
+#define BNXT_QPLIB_CC_PARAM_MASK_ROCE_PRI 0x80000
+ /* prev value to clear dscp table */
+ u8 prev_roce_pri;
+ u8 prev_alt_vlan_pcp;
+ u8 prev_tos_dscp;
+ u16 prev_alt_tos_dscp;
+ /* To track if admin has enabled ECN explicitly */
+ u8 admin_enable;
+};
+
+struct bnxt_qplib_roce_stats {
+ u64 to_retransmits;
+ u64 seq_err_naks_rcvd;
+ /* seq_err_naks_rcvd is 64 b */
+ u64 max_retry_exceeded;
+ /* max_retry_exceeded is 64 b */
+ u64 rnr_naks_rcvd;
+ /* rnr_naks_rcvd is 64 b */
+ u64 missing_resp;
+ u64 unrecoverable_err;
+ /* unrecoverable_err is 64 b */
+ u64 bad_resp_err;
+ /* bad_resp_err is 64 b */
+ u64 local_qp_op_err;
+ /* local_qp_op_err is 64 b */
+ u64 local_protection_err;
+ /* local_protection_err is 64 b */
+ u64 mem_mgmt_op_err;
+ /* mem_mgmt_op_err is 64 b */
+ u64 remote_invalid_req_err;
+ /* remote_invalid_req_err is 64 b */
+ u64 remote_access_err;
+ /* remote_access_err is 64 b */
+ u64 remote_op_err;
+ /* remote_op_err is 64 b */
+ u64 dup_req;
+ /* dup_req is 64 b */
+ u64 res_exceed_max;
+ /* res_exceed_max is 64 b */
+ u64 res_length_mismatch;
+ /* res_length_mismatch is 64 b */
+ u64 res_exceeds_wqe;
+ /* res_exceeds_wqe is 64 b */
+ u64 res_opcode_err;
+ /* res_opcode_err is 64 b */
+ u64 res_rx_invalid_rkey;
+ /* res_rx_invalid_rkey is 64 b */
+ u64 res_rx_domain_err;
+ /* res_rx_domain_err is 64 b */
+ u64 res_rx_no_perm;
+ /* res_rx_no_perm is 64 b */
+ u64 res_rx_range_err;
+ /* res_rx_range_err is 64 b */
+ u64 res_tx_invalid_rkey;
+ /* res_tx_invalid_rkey is 64 b */
+ u64 res_tx_domain_err;
+ /* res_tx_domain_err is 64 b */
+ u64 res_tx_no_perm;
+ /* res_tx_no_perm is 64 b */
+ u64 res_tx_range_err;
+ /* res_tx_range_err is 64 b */
+ u64 res_irrq_oflow;
+ /* res_irrq_oflow is 64 b */
+ u64 res_unsup_opcode;
+ /* res_unsup_opcode is 64 b */
+ u64 res_unaligned_atomic;
+ /* res_unaligned_atomic is 64 b */
+ u64 res_rem_inv_err;
+ /* res_rem_inv_err is 64 b */
+ u64 res_mem_error;
+ /* res_mem_error is 64 b */
+ u64 res_srq_err;
+ /* res_srq_err is 64 b */
+ u64 res_cmp_err;
+ /* res_cmp_err is 64 b */
+ u64 res_invalid_dup_rkey;
+ /* res_invalid_dup_rkey is 64 b */
+ u64 res_wqe_format_err;
+ /* res_wqe_format_err is 64 b */
+ u64 res_cq_load_err;
+ /* res_cq_load_err is 64 b */
+ u64 res_srq_load_err;
+ /* res_srq_load_err is 64 b */
+ u64 res_tx_pci_err;
+ /* res_tx_pci_err is 64 b */
+ u64 res_rx_pci_err;
+ /* res_rx_pci_err is 64 b */
+ u64 res_oos_drop_count;
+ /* res_oos_drop_count */
+ u64 active_qp_count_p0;
+ /* port 0 active qps */
+ u64 active_qp_count_p1;
+ /* port 1 active qps */
+ u64 active_qp_count_p2;
+ /* port 2 active qps */
+ u64 active_qp_count_p3;
+ /* port 3 active qps */
+};
+
+struct bnxt_qplib_ext_stat {
+ u64 tx_atomic_req;
+ u64 tx_read_req;
+ u64 tx_read_res;
+ u64 tx_write_req;
+ u64 tx_send_req;
+ u64 tx_roce_pkts;
+ u64 tx_roce_bytes;
+ u64 rx_atomic_req;
+ u64 rx_read_req;
+ u64 rx_read_res;
+ u64 rx_write_req;
+ u64 rx_send_req;
+ u64 rx_roce_pkts;
+ u64 rx_roce_bytes;
+ u64 rx_roce_good_pkts;
+ u64 rx_roce_good_bytes;
+ u64 rx_out_of_buffer;
+ u64 rx_out_of_sequence;
+ u64 tx_cnp;
+ u64 rx_cnp;
+ u64 rx_ecn_marked;
+ u64 seq_err_naks_rcvd;
+ u64 rnr_naks_rcvd;
+ u64 missing_resp;
+ u64 to_retransmits;
+ u64 dup_req;
+ u64 rx_dcn_payload_cut;
+ u64 te_bypassed;
+};
+
+#define BNXT_QPLIB_ACCESS_LOCAL_WRITE (1 << 0)
+#define BNXT_QPLIB_ACCESS_REMOTE_READ (1 << 1)
+#define BNXT_QPLIB_ACCESS_REMOTE_WRITE (1 << 2)
+#define BNXT_QPLIB_ACCESS_REMOTE_ATOMIC (1 << 3)
+#define BNXT_QPLIB_ACCESS_MW_BIND (1 << 4)
+#define BNXT_QPLIB_ACCESS_ZERO_BASED (1 << 5)
+#define BNXT_QPLIB_ACCESS_ON_DEMAND (1 << 6)
+
+int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
+ struct bnxt_qplib_gid *gid);
+int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
+int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ const union ib_gid *gid, const u8 *mac, u16 vlan_id,
+ bool update, u32 *index);
+int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
+ struct bnxt_qplib_gid *gid, u16 gid_idx, const u8 *smac);
+int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw);
+int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res);
+int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+ bool block);
+int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+ bool block);
+int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw);
+int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
+ bool block);
+int bnxt_qplib_reg_mr(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_mrinfo *mrinfo, bool block);
+int bnxt_qplib_free_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mr);
+int bnxt_qplib_alloc_fast_reg_mr(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_mrw *mr, int max);
+int bnxt_qplib_alloc_fast_reg_page_list(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_frpl *frpl, int max);
+void bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_frpl *frpl);
+int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids);
+int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param);
+int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param);
+int bnxt_qplib_set_link_aggr_mode(struct bnxt_qplib_res *res,
+ u8 aggr_mode, u8 member_port_map,
+ u8 active_port_map, bool aggr_en,
+ u32 stats_fw_id);
+int bnxt_qplib_get_roce_error_stats(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_roce_stats *stats,
+ struct bnxt_qplib_query_stats_info *sinfo);
+int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
+ struct bnxt_qplib_ext_stat *estat,
+ struct bnxt_qplib_query_stats_info *sinfo);
+static inline void bnxt_re_set_max_gid(u16 *max_sgid);
+bool ib_modify_qp_is_ok_compat(enum ib_qp_state cur_state, enum ib_qp_state next_state,
+ enum ib_qp_type type, enum ib_qp_attr_mask mask);
+
+#define BNXT_MAX_SQ_SIZE 0xFFFF
+#define BNXT_MAX_VAR_WQE_SIZE 512
+#define BNXT_SGE_SIZE 16
+
+/* PF defines */
+#define BNXT_RE_MAX_QP_SUPPORTED(chip_gen) \
+ chip_gen == BNXT_RE_DEFAULT ? (64 * 1024) : 0
+
+#define BNXT_RE_MAX_MRW_SUPPORTED(chip_gen) \
+ chip_gen == BNXT_RE_DEFAULT ? (256 * 1024) : 0
+
+#define BNXT_RE_MAX_CQ_SUPPORTED(chip_gen) \
+ chip_gen == BNXT_RE_DEFAULT ? (64 * 1024) : 0
+
+#define BNXT_RE_MAX_SRQ_SUPPORTED(chip_gen) \
+ chip_gen == BNXT_RE_DEFAULT ? (4 * 1024) : 0
+
+#define BNXT_RE_MAX_AH_SUPPORTED(chip_gen) \
+ chip_gen == BNXT_RE_DEFAULT ? (64 * 1024) : 0
+
+/* VF defines */
+#define BNXT_RE_VF_MAX_QP_SUPPORTED(chip_gen) \
+ chip_gen == BNXT_RE_DEFAULT ? (6 * 1024) : 0
+
+#define BNXT_RE_VF_MAX_MRW_SUPPORTED(chip_gen) \
+ chip_gen == BNXT_RE_DEFAULT ? (6 * 1024) : 0
+
+#define BNXT_RE_VF_MAX_CQ_SUPPORTED(chip_gen) \
+ chip_gen == BNXT_RE_DEFAULT ? (6 * 1024) : 0
+
+#define BNXT_RE_VF_MAX_SRQ_SUPPORTED(chip_gen) \
+ chip_gen == BNXT_RE_DEFAULT ? (4 * 1024) : 0
+
+static inline void bnxt_re_set_max_gid(u16 *max_sgid)
+{
+ *max_sgid = max_t(u32, 256, *max_sgid);
+ *max_sgid = min_t(u32, 256, *max_sgid);
+}
+
+#endif
diff --git a/sys/dev/bnxt/bnxt_re/qplib_tlv.h b/sys/dev/bnxt/bnxt_re/qplib_tlv.h
new file mode 100644
index 000000000000..eeaea41a37e6
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/qplib_tlv.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (c) 2017 - 2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QPLIB_TLV_H__
+#define __QPLIB_TLV_H__
+
+struct roce_tlv {
+ struct tlv tlv;
+ u8 total_size;
+ u8 unused[7];
+};
+
+#define CHUNK_SIZE 16
+#define CHUNKS(x) (((x) + CHUNK_SIZE - 1) / CHUNK_SIZE)
+
+#define ROCE_1ST_TLV_PREP(rtlv, tot_chunks, content_bytes, more) \
+ do { \
+ (rtlv)->tlv.cmd_discr = CMD_DISCR_TLV_ENCAP; \
+ (rtlv)->tlv.tlv_type = TLV_TYPE_ROCE_SP_COMMAND; \
+ (rtlv)->tlv.length = (content_bytes); \
+ (rtlv)->tlv.flags = TLV_FLAGS_REQUIRED; \
+ (rtlv)->tlv.flags |= (more) ? TLV_FLAGS_MORE : 0; \
+ (rtlv)->total_size = (tot_chunks); \
+ } while (0)
+
+#define ROCE_EXT_TLV_PREP(rtlv, ext_type, content_bytes, more, reqd) \
+ do { \
+ (rtlv)->tlv.cmd_discr = CMD_DISCR_TLV_ENCAP; \
+ (rtlv)->tlv.tlv_type = (ext_type); \
+ (rtlv)->tlv.length = (content_bytes); \
+ (rtlv)->tlv.flags |= (more) ? TLV_FLAGS_MORE : 0; \
+ (rtlv)->tlv.flags |= (reqd) ? TLV_FLAGS_REQUIRED : 0; \
+ } while (0)
+
+/*
+ * TLV size in units of 16 byte chunks
+ */
+#define TLV_SIZE ((sizeof(struct roce_tlv) + 15) / 16)
+/*
+ * TLV length in bytes
+ */
+#define TLV_BYTES (TLV_SIZE * 16)
+
+#define HAS_TLV_HEADER(msg) (((struct tlv *)(msg))->cmd_discr == CMD_DISCR_TLV_ENCAP)
+#define GET_TLV_DATA(tlv) ((void *)&((uint8_t *)(tlv))[TLV_BYTES])
+
+static inline u8 __get_cmdq_base_opcode(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->opcode;
+ else
+ return req->opcode;
+}
+
+static inline void __set_cmdq_base_opcode(struct cmdq_base *req,
+ u32 size, u8 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->opcode = val;
+ else
+ req->opcode = val;
+}
+
+static inline __le16 __get_cmdq_base_cookie(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->cookie;
+ else
+ return req->cookie;
+}
+
+static inline void __set_cmdq_base_cookie(struct cmdq_base *req,
+ u32 size, __le16 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->cookie = val;
+ else
+ req->cookie = val;
+}
+
+static inline __le64 __get_cmdq_base_resp_addr(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr;
+ else
+ return req->resp_addr;
+}
+
+static inline void __set_cmdq_base_resp_addr(struct cmdq_base *req,
+ u32 size, __le64 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->resp_addr = val;
+ else
+ req->resp_addr = val;
+}
+
+static inline u8 __get_cmdq_base_resp_size(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->resp_size;
+ else
+ return req->resp_size;
+}
+
+static inline void __set_cmdq_base_resp_size(struct cmdq_base *req,
+ u32 size, u8 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->resp_size = val;
+ else
+ req->resp_size = val;
+}
+
+static inline u8 __get_cmdq_base_cmd_size(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct roce_tlv *)(req))->total_size;
+ else
+ return req->cmd_size;
+}
+
+static inline void __set_cmdq_base_cmd_size(struct cmdq_base *req,
+ u32 size, u8 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->cmd_size = val;
+ else
+ req->cmd_size = val;
+}
+
+static inline __le16 __get_cmdq_base_flags(struct cmdq_base *req, u32 size)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ return ((struct cmdq_base *)GET_TLV_DATA(req))->flags;
+ else
+ return req->flags;
+}
+
+static inline void __set_cmdq_base_flags(struct cmdq_base *req,
+ u32 size, __le16 val)
+{
+ if (HAS_TLV_HEADER(req) && size > TLV_BYTES)
+ ((struct cmdq_base *)GET_TLV_DATA(req))->flags = val;
+ else
+ req->flags = val;
+}
+
+struct bnxt_qplib_tlv_modify_cc_req {
+ struct roce_tlv tlv_hdr;
+ struct cmdq_modify_roce_cc base_req;
+ __le64 tlvpad;
+ struct cmdq_modify_roce_cc_gen1_tlv ext_req;
+};
+
+struct bnxt_qplib_tlv_query_rcc_sb {
+ struct roce_tlv tlv_hdr;
+ struct creq_query_roce_cc_resp_sb base_sb;
+ struct creq_query_roce_cc_gen1_resp_sb_tlv gen1_sb;
+};
+#endif
diff --git a/sys/dev/bnxt/bnxt_re/stats.c b/sys/dev/bnxt/bnxt_re/stats.c
new file mode 100644
index 000000000000..7b0e6097aae6
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/stats.c
@@ -0,0 +1,773 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: statistics related functions
+ */
+
+#include "bnxt_re.h"
+#include "bnxt.h"
+
+int bnxt_re_get_flow_stats_from_service_pf(struct bnxt_re_dev *rdev,
+ struct bnxt_re_flow_counters *stats,
+ struct bnxt_qplib_query_stats_info *sinfo)
+{
+ struct hwrm_cfa_flow_stats_output resp = {};
+ struct hwrm_cfa_flow_stats_input req = {};
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_fw_msg fw_msg = {};
+ u16 target_id;
+ int rc = 0;
+
+ if (sinfo->function_id == 0xFFFFFFFF)
+ target_id = -1;
+ else
+ target_id = sinfo->function_id + 1;
+
+ /* Issue HWRM cmd to read flow counters for CNP tx and rx */
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_CFA_FLOW_STATS, -1, target_id);
+ req.num_flows = cpu_to_le16(6);
+ req.flow_handle_0 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT);
+ req.flow_handle_1 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT |
+ HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
+ req.flow_handle_2 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT);
+ req.flow_handle_3 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT |
+ HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
+ req.flow_handle_4 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT);
+ req.flow_handle_5 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT |
+ HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
+ bnxt_re_fill_fw_msg(&fw_msg, &req, sizeof(req), &resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to get CFA Flow stats : rc = 0x%x\n", rc);
+ return rc;
+ }
+
+ stats->cnp_stats.cnp_tx_pkts = le64_to_cpu(resp.packet_0);
+ stats->cnp_stats.cnp_tx_bytes = le64_to_cpu(resp.byte_0);
+ stats->cnp_stats.cnp_rx_pkts = le64_to_cpu(resp.packet_1);
+ stats->cnp_stats.cnp_rx_bytes = le64_to_cpu(resp.byte_1);
+
+ stats->ro_stats.tx_pkts = le64_to_cpu(resp.packet_2) +
+ le64_to_cpu(resp.packet_4);
+ stats->ro_stats.tx_bytes = le64_to_cpu(resp.byte_2) +
+ le64_to_cpu(resp.byte_4);
+ stats->ro_stats.rx_pkts = le64_to_cpu(resp.packet_3) +
+ le64_to_cpu(resp.packet_5);
+ stats->ro_stats.rx_bytes = le64_to_cpu(resp.byte_3) +
+ le64_to_cpu(resp.byte_5);
+
+ return 0;
+}
+
+int bnxt_re_get_qos_stats(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_re_ro_counters roce_only_tmp[2] = {{}, {}};
+ struct bnxt_re_cnp_counters tmp_counters[2] = {{}, {}};
+ struct hwrm_cfa_flow_stats_output resp = {};
+ struct hwrm_cfa_flow_stats_input req = {};
+ struct bnxt_en_dev *en_dev = rdev->en_dev;
+ struct bnxt_fw_msg fw_msg = {};
+ struct bnxt_re_cc_stat *cnps;
+ struct bnxt_re_rstat *dstat;
+ int rc = 0;
+ u64 bytes;
+ u64 pkts;
+
+ /* Issue HWRM cmd to read flow counters for CNP tx and rx */
+ bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_CFA_FLOW_STATS, -1, -1);
+ req.num_flows = cpu_to_le16(6);
+ req.flow_handle_0 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT);
+ req.flow_handle_1 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_CNP_CNT |
+ HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
+ req.flow_handle_2 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT);
+ req.flow_handle_3 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV1_CNT |
+ HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
+ req.flow_handle_4 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT);
+ req.flow_handle_5 = cpu_to_le16(HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_ROCEV2_CNT |
+ HWRM_CFA_FLOW_INFO_INPUT_FLOW_HANDLE_DIR_RX);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev));
+ rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
+ if (rc) {
+ dev_err(rdev_to_dev(rdev),
+ "Failed to get CFA Flow stats : rc = 0x%x\n", rc);
+ goto done;
+ }
+
+ tmp_counters[0].cnp_tx_pkts = le64_to_cpu(resp.packet_0);
+ tmp_counters[0].cnp_tx_bytes = le64_to_cpu(resp.byte_0);
+ tmp_counters[0].cnp_rx_pkts = le64_to_cpu(resp.packet_1);
+ tmp_counters[0].cnp_rx_bytes = le64_to_cpu(resp.byte_1);
+
+ roce_only_tmp[0].tx_pkts = le64_to_cpu(resp.packet_2) +
+ le64_to_cpu(resp.packet_4);
+ roce_only_tmp[0].tx_bytes = le64_to_cpu(resp.byte_2) +
+ le64_to_cpu(resp.byte_4);
+ roce_only_tmp[0].rx_pkts = le64_to_cpu(resp.packet_3) +
+ le64_to_cpu(resp.packet_5);
+ roce_only_tmp[0].rx_bytes = le64_to_cpu(resp.byte_3) +
+ le64_to_cpu(resp.byte_5);
+
+ cnps = &rdev->stats.cnps;
+ dstat = &rdev->stats.dstat;
+ if (!cnps->is_first) {
+ /* First query done.. */
+ cnps->is_first = true;
+ cnps->prev[0].cnp_tx_pkts = tmp_counters[0].cnp_tx_pkts;
+ cnps->prev[0].cnp_tx_bytes = tmp_counters[0].cnp_tx_bytes;
+ cnps->prev[0].cnp_rx_pkts = tmp_counters[0].cnp_rx_pkts;
+ cnps->prev[0].cnp_rx_bytes = tmp_counters[0].cnp_rx_bytes;
+
+ cnps->prev[1].cnp_tx_pkts = tmp_counters[1].cnp_tx_pkts;
+ cnps->prev[1].cnp_tx_bytes = tmp_counters[1].cnp_tx_bytes;
+ cnps->prev[1].cnp_rx_pkts = tmp_counters[1].cnp_rx_pkts;
+ cnps->prev[1].cnp_rx_bytes = tmp_counters[1].cnp_rx_bytes;
+
+ dstat->prev[0].tx_pkts = roce_only_tmp[0].tx_pkts;
+ dstat->prev[0].tx_bytes = roce_only_tmp[0].tx_bytes;
+ dstat->prev[0].rx_pkts = roce_only_tmp[0].rx_pkts;
+ dstat->prev[0].rx_bytes = roce_only_tmp[0].rx_bytes;
+
+ dstat->prev[1].tx_pkts = roce_only_tmp[1].tx_pkts;
+ dstat->prev[1].tx_bytes = roce_only_tmp[1].tx_bytes;
+ dstat->prev[1].rx_pkts = roce_only_tmp[1].rx_pkts;
+ dstat->prev[1].rx_bytes = roce_only_tmp[1].rx_bytes;
+ } else {
+ u64 byte_mask, pkts_mask;
+ u64 diff;
+
+ byte_mask = bnxt_re_get_cfa_stat_mask(rdev->chip_ctx,
+ BYTE_MASK);
+ pkts_mask = bnxt_re_get_cfa_stat_mask(rdev->chip_ctx,
+ PKTS_MASK);
+ /*
+ * Calculate the number of cnp packets and use
+ * the value to calculate the CRC bytes.
+ * Multply pkts with 4 and add it to total bytes
+ */
+ pkts = bnxt_re_stat_diff(tmp_counters[0].cnp_tx_pkts,
+ &cnps->prev[0].cnp_tx_pkts,
+ pkts_mask);
+ cnps->cur[0].cnp_tx_pkts += pkts;
+ diff = bnxt_re_stat_diff(tmp_counters[0].cnp_tx_bytes,
+ &cnps->prev[0].cnp_tx_bytes,
+ byte_mask);
+ bytes = diff + pkts * 4;
+ cnps->cur[0].cnp_tx_bytes += bytes;
+ pkts = bnxt_re_stat_diff(tmp_counters[0].cnp_rx_pkts,
+ &cnps->prev[0].cnp_rx_pkts,
+ pkts_mask);
+ cnps->cur[0].cnp_rx_pkts += pkts;
+ bytes = bnxt_re_stat_diff(tmp_counters[0].cnp_rx_bytes,
+ &cnps->prev[0].cnp_rx_bytes,
+ byte_mask);
+ cnps->cur[0].cnp_rx_bytes += bytes;
+
+ /*
+ * Calculate the number of cnp packets and use
+ * the value to calculate the CRC bytes.
+ * Multply pkts with 4 and add it to total bytes
+ */
+ pkts = bnxt_re_stat_diff(tmp_counters[1].cnp_tx_pkts,
+ &cnps->prev[1].cnp_tx_pkts,
+ pkts_mask);
+ cnps->cur[1].cnp_tx_pkts += pkts;
+ diff = bnxt_re_stat_diff(tmp_counters[1].cnp_tx_bytes,
+ &cnps->prev[1].cnp_tx_bytes,
+ byte_mask);
+ cnps->cur[1].cnp_tx_bytes += diff + pkts * 4;
+ pkts = bnxt_re_stat_diff(tmp_counters[1].cnp_rx_pkts,
+ &cnps->prev[1].cnp_rx_pkts,
+ pkts_mask);
+ cnps->cur[1].cnp_rx_pkts += pkts;
+ bytes = bnxt_re_stat_diff(tmp_counters[1].cnp_rx_bytes,
+ &cnps->prev[1].cnp_rx_bytes,
+ byte_mask);
+ cnps->cur[1].cnp_rx_bytes += bytes;
+
+ pkts = bnxt_re_stat_diff(roce_only_tmp[0].tx_pkts,
+ &dstat->prev[0].tx_pkts,
+ pkts_mask);
+ dstat->cur[0].tx_pkts += pkts;
+ diff = bnxt_re_stat_diff(roce_only_tmp[0].tx_bytes,
+ &dstat->prev[0].tx_bytes,
+ byte_mask);
+ dstat->cur[0].tx_bytes += diff + pkts * 4;
+ pkts = bnxt_re_stat_diff(roce_only_tmp[0].rx_pkts,
+ &dstat->prev[0].rx_pkts,
+ pkts_mask);
+ dstat->cur[0].rx_pkts += pkts;
+
+ bytes = bnxt_re_stat_diff(roce_only_tmp[0].rx_bytes,
+ &dstat->prev[0].rx_bytes,
+ byte_mask);
+ dstat->cur[0].rx_bytes += bytes;
+ pkts = bnxt_re_stat_diff(roce_only_tmp[1].tx_pkts,
+ &dstat->prev[1].tx_pkts,
+ pkts_mask);
+ dstat->cur[1].tx_pkts += pkts;
+ diff = bnxt_re_stat_diff(roce_only_tmp[1].tx_bytes,
+ &dstat->prev[1].tx_bytes,
+ byte_mask);
+ dstat->cur[1].tx_bytes += diff + pkts * 4;
+ pkts = bnxt_re_stat_diff(roce_only_tmp[1].rx_pkts,
+ &dstat->prev[1].rx_pkts,
+ pkts_mask);
+ dstat->cur[1].rx_pkts += pkts;
+ bytes = bnxt_re_stat_diff(roce_only_tmp[1].rx_bytes,
+ &dstat->prev[1].rx_bytes,
+ byte_mask);
+ dstat->cur[1].rx_bytes += bytes;
+ }
+done:
+ return rc;
+}
+
+static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
+ u8 indx, struct bnxt_qplib_ext_stat *s)
+{
+ struct bnxt_re_ext_roce_stats *e_errs;
+ struct bnxt_re_cnp_counters *cnp;
+ struct bnxt_re_ext_rstat *ext_d;
+ struct bnxt_re_ro_counters *ro;
+
+ cnp = &rdev->stats.cnps.cur[indx];
+ ro = &rdev->stats.dstat.cur[indx];
+ ext_d = &rdev->stats.dstat.ext_rstat[indx];
+ e_errs = &rdev->stats.dstat.e_errs;
+
+ cnp->cnp_tx_pkts = s->tx_cnp;
+ cnp->cnp_rx_pkts = s->rx_cnp;
+ /* In bonding mode do not duplicate other stats */
+ if (indx)
+ return;
+ cnp->ecn_marked = s->rx_ecn_marked;
+
+ ro->tx_pkts = s->tx_roce_pkts;
+ ro->tx_bytes = s->tx_roce_bytes;
+ ro->rx_pkts = s->rx_roce_pkts;
+ ro->rx_bytes = s->rx_roce_bytes;
+
+ ext_d->tx.atomic_req = s->tx_atomic_req;
+ ext_d->tx.read_req = s->tx_read_req;
+ ext_d->tx.read_resp = s->tx_read_res;
+ ext_d->tx.write_req = s->tx_write_req;
+ ext_d->tx.send_req = s->tx_send_req;
+ ext_d->rx.atomic_req = s->rx_atomic_req;
+ ext_d->rx.read_req = s->rx_read_req;
+ ext_d->rx.read_resp = s->rx_read_res;
+ ext_d->rx.write_req = s->rx_write_req;
+ ext_d->rx.send_req = s->rx_send_req;
+ ext_d->grx.rx_pkts = s->rx_roce_good_pkts;
+ ext_d->grx.rx_bytes = s->rx_roce_good_bytes;
+ ext_d->rx_dcn_payload_cut = s->rx_dcn_payload_cut;
+ ext_d->te_bypassed = s->te_bypassed;
+ e_errs->oob = s->rx_out_of_buffer;
+ e_errs->oos = s->rx_out_of_sequence;
+ e_errs->seq_err_naks_rcvd = s->seq_err_naks_rcvd;
+ e_errs->rnr_naks_rcvd = s->rnr_naks_rcvd;
+ e_errs->missing_resp = s->missing_resp;
+ e_errs->to_retransmits = s->to_retransmits;
+ e_errs->dup_req = s->dup_req;
+}
+
+static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_ext_stat estat[2] = {{}, {}};
+ struct bnxt_qplib_query_stats_info sinfo;
+ u32 fid;
+ int rc;
+
+ fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
+ /* Set default values for sinfo */
+ sinfo.function_id = 0xFFFFFFFF;
+ sinfo.collection_id = 0xFF;
+ sinfo.vf_valid = false;
+ rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, &estat[0], &sinfo);
+ if (rc)
+ goto done;
+ bnxt_re_copy_ext_stats(rdev, 0, &estat[0]);
+
+done:
+ return rc;
+}
+
+static void bnxt_re_copy_rstat(struct bnxt_re_rdata_counters *d,
+ struct ctx_hw_stats_ext *s,
+ bool is_thor)
+{
+ d->tx_ucast_pkts = le64_to_cpu(s->tx_ucast_pkts);
+ d->tx_mcast_pkts = le64_to_cpu(s->tx_mcast_pkts);
+ d->tx_bcast_pkts = le64_to_cpu(s->tx_bcast_pkts);
+ d->tx_discard_pkts = le64_to_cpu(s->tx_discard_pkts);
+ d->tx_error_pkts = le64_to_cpu(s->tx_error_pkts);
+ d->tx_ucast_bytes = le64_to_cpu(s->tx_ucast_bytes);
+ /* Add four bytes of CRC bytes per packet */
+ d->tx_ucast_bytes += d->tx_ucast_pkts * 4;
+ d->tx_mcast_bytes = le64_to_cpu(s->tx_mcast_bytes);
+ d->tx_bcast_bytes = le64_to_cpu(s->tx_bcast_bytes);
+ d->rx_ucast_pkts = le64_to_cpu(s->rx_ucast_pkts);
+ d->rx_mcast_pkts = le64_to_cpu(s->rx_mcast_pkts);
+ d->rx_bcast_pkts = le64_to_cpu(s->rx_bcast_pkts);
+ d->rx_discard_pkts = le64_to_cpu(s->rx_discard_pkts);
+ d->rx_error_pkts = le64_to_cpu(s->rx_error_pkts);
+ d->rx_ucast_bytes = le64_to_cpu(s->rx_ucast_bytes);
+ d->rx_mcast_bytes = le64_to_cpu(s->rx_mcast_bytes);
+ d->rx_bcast_bytes = le64_to_cpu(s->rx_bcast_bytes);
+ if (is_thor) {
+ d->rx_agg_pkts = le64_to_cpu(s->rx_tpa_pkt);
+ d->rx_agg_bytes = le64_to_cpu(s->rx_tpa_bytes);
+ d->rx_agg_events = le64_to_cpu(s->rx_tpa_events);
+ d->rx_agg_aborts = le64_to_cpu(s->rx_tpa_errors);
+ }
+}
+
+static void bnxt_re_get_roce_data_stats(struct bnxt_re_dev *rdev)
+{
+ bool is_thor = _is_chip_gen_p5_p7(rdev->chip_ctx);
+ struct bnxt_re_rdata_counters *rstat;
+
+ rstat = &rdev->stats.dstat.rstat[0];
+ bnxt_re_copy_rstat(rstat, rdev->qplib_res.hctx->stats.dma, is_thor);
+
+}
+
+int bnxt_re_get_device_stats(struct bnxt_re_dev *rdev)
+{
+ struct bnxt_qplib_query_stats_info sinfo;
+ int rc = 0;
+
+ /* Stats are in 1s cadence */
+ if (test_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS, &rdev->flags)) {
+ if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
+ rdev->is_virtfn))
+ rc = bnxt_re_get_ext_stat(rdev);
+ else
+ rc = bnxt_re_get_qos_stats(rdev);
+
+ if (rc && rc != -ENOMEM)
+ clear_bit(BNXT_RE_FLAG_ISSUE_CFA_FLOW_STATS,
+ &rdev->flags);
+ }
+
+ if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) {
+ bnxt_re_get_roce_data_stats(rdev);
+
+ /* Set default values for sinfo */
+ sinfo.function_id = 0xFFFFFFFF;
+ sinfo.collection_id = 0xFF;
+ sinfo.vf_valid = false;
+ rc = bnxt_qplib_get_roce_error_stats(&rdev->rcfw,
+ &rdev->stats.dstat.errs,
+ &sinfo);
+ if (rc && rc != -ENOMEM)
+ clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
+ &rdev->flags);
+ }
+
+ return rc;
+}
+
+static const char * const bnxt_re_stat_descs[] = {
+ "link_state",
+ "max_qp",
+ "max_srq",
+ "max_cq",
+ "max_mr",
+ "max_mw",
+ "max_ah",
+ "max_pd",
+ "active_qp",
+ "active_rc_qp",
+ "active_ud_qp",
+ "active_srq",
+ "active_cq",
+ "active_mr",
+ "active_mw",
+ "active_ah",
+ "active_pd",
+ "qp_watermark",
+ "rc_qp_watermark",
+ "ud_qp_watermark",
+ "srq_watermark",
+ "cq_watermark",
+ "mr_watermark",
+ "mw_watermark",
+ "ah_watermark",
+ "pd_watermark",
+ "resize_cq_count",
+ "hw_retransmission",
+ "recoverable_errors",
+ "rx_pkts",
+ "rx_bytes",
+ "tx_pkts",
+ "tx_bytes",
+ "cnp_tx_pkts",
+ "cnp_tx_bytes",
+ "cnp_rx_pkts",
+ "cnp_rx_bytes",
+ "roce_only_rx_pkts",
+ "roce_only_rx_bytes",
+ "roce_only_tx_pkts",
+ "roce_only_tx_bytes",
+ "rx_roce_error_pkts",
+ "rx_roce_discard_pkts",
+ "tx_roce_error_pkts",
+ "tx_roce_discards_pkts",
+ "res_oob_drop_count",
+ "tx_atomic_req",
+ "rx_atomic_req",
+ "tx_read_req",
+ "tx_read_resp",
+ "rx_read_req",
+ "rx_read_resp",
+ "tx_write_req",
+ "rx_write_req",
+ "tx_send_req",
+ "rx_send_req",
+ "rx_good_pkts",
+ "rx_good_bytes",
+ "rx_dcn_payload_cut",
+ "te_bypassed",
+ "rx_ecn_marked_pkts",
+ "max_retry_exceeded",
+ "to_retransmits",
+ "seq_err_naks_rcvd",
+ "rnr_naks_rcvd",
+ "missing_resp",
+ "dup_reqs",
+ "unrecoverable_err",
+ "bad_resp_err",
+ "local_qp_op_err",
+ "local_protection_err",
+ "mem_mgmt_op_err",
+ "remote_invalid_req_err",
+ "remote_access_err",
+ "remote_op_err",
+ "res_exceed_max",
+ "res_length_mismatch",
+ "res_exceeds_wqe",
+ "res_opcode_err",
+ "res_rx_invalid_rkey",
+ "res_rx_domain_err",
+ "res_rx_no_perm",
+ "res_rx_range_err",
+ "res_tx_invalid_rkey",
+ "res_tx_domain_err",
+ "res_tx_no_perm",
+ "res_tx_range_err",
+ "res_irrq_oflow",
+ "res_unsup_opcode",
+ "res_unaligned_atomic",
+ "res_rem_inv_err",
+ "res_mem_error64",
+ "res_srq_err",
+ "res_cmp_err",
+ "res_invalid_dup_rkey",
+ "res_wqe_format_err",
+ "res_cq_load_err",
+ "res_srq_load_err",
+ "res_tx_pci_err",
+ "res_rx_pci_err",
+ "res_oos_drop_count",
+ "num_irq_started",
+ "num_irq_stopped",
+ "poll_in_intr_en",
+ "poll_in_intr_dis",
+ "cmdq_full_dbg_cnt",
+ "fw_service_prof_type_sup",
+ "dbq_int_recv",
+ "dbq_int_en",
+ "dbq_pacing_resched",
+ "dbq_pacing_complete",
+ "dbq_pacing_alerts",
+ "dbq_dbr_fifo_reg"
+
+};
+
+static void bnxt_re_print_ext_stat(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats)
+{
+ struct bnxt_re_cnp_counters *cnp;
+ struct bnxt_re_ext_rstat *ext_s;
+
+ ext_s = &rdev->stats.dstat.ext_rstat[0];
+ cnp = &rdev->stats.cnps.cur[0];
+
+ stats->value[BNXT_RE_TX_ATOMIC_REQ] = ext_s->tx.atomic_req;
+ stats->value[BNXT_RE_RX_ATOMIC_REQ] = ext_s->rx.atomic_req;
+ stats->value[BNXT_RE_TX_READ_REQ] = ext_s->tx.read_req;
+ stats->value[BNXT_RE_TX_READ_RESP] = ext_s->tx.read_resp;
+ stats->value[BNXT_RE_RX_READ_REQ] = ext_s->rx.read_req;
+ stats->value[BNXT_RE_RX_READ_RESP] = ext_s->rx.read_resp;
+ stats->value[BNXT_RE_TX_WRITE_REQ] = ext_s->tx.write_req;
+ stats->value[BNXT_RE_RX_WRITE_REQ] = ext_s->rx.write_req;
+ stats->value[BNXT_RE_TX_SEND_REQ] = ext_s->tx.send_req;
+ stats->value[BNXT_RE_RX_SEND_REQ] = ext_s->rx.send_req;
+ stats->value[BNXT_RE_RX_GOOD_PKTS] = ext_s->grx.rx_pkts;
+ stats->value[BNXT_RE_RX_GOOD_BYTES] = ext_s->grx.rx_bytes;
+ if (_is_chip_p7(rdev->chip_ctx)) {
+ stats->value[BNXT_RE_RX_DCN_PAYLOAD_CUT] = ext_s->rx_dcn_payload_cut;
+ stats->value[BNXT_RE_TE_BYPASSED] = ext_s->te_bypassed;
+ }
+ stats->value[BNXT_RE_RX_ECN_MARKED_PKTS] = cnp->ecn_marked;
+}
+
+static void bnxt_re_print_roce_only_counters(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats)
+{
+ struct bnxt_re_ro_counters *roce_only = &rdev->stats.dstat.cur[0];
+
+ stats->value[BNXT_RE_ROCE_ONLY_RX_PKTS] = roce_only->rx_pkts;
+ stats->value[BNXT_RE_ROCE_ONLY_RX_BYTES] = roce_only->rx_bytes;
+ stats->value[BNXT_RE_ROCE_ONLY_TX_PKTS] = roce_only->tx_pkts;
+ stats->value[BNXT_RE_ROCE_ONLY_TX_BYTES] = roce_only->tx_bytes;
+}
+
+static void bnxt_re_print_normal_total_counters(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats)
+{
+ struct bnxt_re_ro_counters *roce_only;
+ struct bnxt_re_cc_stat *cnps;
+
+ cnps = &rdev->stats.cnps;
+ roce_only = &rdev->stats.dstat.cur[0];
+
+ stats->value[BNXT_RE_RX_PKTS] = cnps->cur[0].cnp_rx_pkts + roce_only->rx_pkts;
+ stats->value[BNXT_RE_RX_BYTES] = cnps->cur[0].cnp_rx_bytes + roce_only->rx_bytes;
+ stats->value[BNXT_RE_TX_PKTS] = cnps->cur[0].cnp_tx_pkts + roce_only->tx_pkts;
+ stats->value[BNXT_RE_TX_BYTES] = cnps->cur[0].cnp_tx_bytes + roce_only->tx_bytes;
+}
+
+static void bnxt_re_print_normal_counters(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *rstats)
+{
+ struct bnxt_re_rdata_counters *stats;
+ struct bnxt_re_cc_stat *cnps;
+ bool en_disp;
+
+ stats = &rdev->stats.dstat.rstat[0];
+ cnps = &rdev->stats.cnps;
+ en_disp = !_is_chip_gen_p5_p7(rdev->chip_ctx);
+
+ bnxt_re_print_normal_total_counters(rdev, rstats);
+ if (!rdev->is_virtfn) {
+ rstats->value[BNXT_RE_CNP_TX_PKTS] = cnps->cur[0].cnp_tx_pkts;
+ if (en_disp)
+ rstats->value[BNXT_RE_CNP_TX_BYTES] = cnps->cur[0].cnp_tx_bytes;
+ rstats->value[BNXT_RE_CNP_RX_PKTS] = cnps->cur[0].cnp_rx_pkts;
+ if (en_disp)
+ rstats->value[BNXT_RE_CNP_RX_BYTES] = cnps->cur[0].cnp_rx_bytes;
+ }
+ /* Print RoCE only bytes.. CNP counters include RoCE packets also */
+ bnxt_re_print_roce_only_counters(rdev, rstats);
+
+ rstats->value[BNXT_RE_RX_ROCE_ERROR_PKTS] = stats ? stats->rx_error_pkts : 0;
+ rstats->value[BNXT_RE_RX_ROCE_DISCARD_PKTS] = stats ? stats->rx_discard_pkts : 0;
+ if (!en_disp) {
+ rstats->value[BNXT_RE_TX_ROCE_ERROR_PKTS] = stats ? stats->tx_error_pkts : 0;
+ rstats->value[BNXT_RE_TX_ROCE_DISCARDS_PKTS] = stats ? stats->tx_discard_pkts : 0;
+ }
+
+ if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
+ rdev->is_virtfn)) {
+ rstats->value[BNXT_RE_RES_OOB_DROP_COUNT] = rdev->stats.dstat.e_errs.oob;
+ bnxt_re_print_ext_stat(rdev, rstats);
+ }
+}
+
+static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats)
+{
+ struct bnxt_re_dbr_sw_stats *dbr_sw_stats = rdev->dbr_sw_stats;
+
+ stats->value[BNXT_RE_DBQ_PACING_RESCHED] = dbr_sw_stats->dbq_pacing_resched;
+ stats->value[BNXT_RE_DBQ_PACING_CMPL] = dbr_sw_stats->dbq_pacing_complete;
+ stats->value[BNXT_RE_DBQ_PACING_ALERT] = dbr_sw_stats->dbq_pacing_alerts;
+ stats->value[BNXT_RE_DBQ_DBR_FIFO_REG] = readl_fbsd(rdev->en_dev->softc,
+ rdev->dbr_db_fifo_reg_off, 0);
+}
+
+int bnxt_re_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u8 port, int index)
+{
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ struct bnxt_re_ext_roce_stats *e_errs;
+ struct bnxt_re_rdata_counters *rstat;
+ struct bnxt_qplib_roce_stats *errs;
+ unsigned long tstamp_diff;
+ struct pci_dev *pdev;
+ int sched_msec;
+ int rc = 0;
+
+ if (!port || !stats)
+ return -EINVAL;
+
+ if (!rdev)
+ return -ENODEV;
+
+ if (!__bnxt_re_is_rdev_valid(rdev)) {
+ return -ENODEV;
+ }
+
+ pdev = rdev->en_dev->pdev;
+ errs = &rdev->stats.dstat.errs;
+ rstat = &rdev->stats.dstat.rstat[0];
+ e_errs = &rdev->stats.dstat.e_errs;
+#define BNXT_RE_STATS_CTX_UPDATE_TIMER 250
+ sched_msec = BNXT_RE_STATS_CTX_UPDATE_TIMER;
+ tstamp_diff = jiffies - rdev->stats.read_tstamp;
+ if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags)) {
+ if (/* restrict_stats && */ tstamp_diff < msecs_to_jiffies(sched_msec))
+ goto skip_query;
+ rc = bnxt_re_get_device_stats(rdev);
+ if (rc)
+ dev_err(rdev_to_dev(rdev),
+ "Failed to query device stats\n");
+ rdev->stats.read_tstamp = jiffies;
+ }
+
+ if (rdev->dbr_pacing)
+ bnxt_re_copy_db_pacing_stats(rdev, stats);
+
+skip_query:
+
+ if (rdev->netdev)
+ stats->value[BNXT_RE_LINK_STATE] = bnxt_re_link_state(rdev);
+ stats->value[BNXT_RE_MAX_QP] = rdev->dev_attr->max_qp;
+ stats->value[BNXT_RE_MAX_SRQ] = rdev->dev_attr->max_srq;
+ stats->value[BNXT_RE_MAX_CQ] = rdev->dev_attr->max_cq;
+ stats->value[BNXT_RE_MAX_MR] = rdev->dev_attr->max_mr;
+ stats->value[BNXT_RE_MAX_MW] = rdev->dev_attr->max_mw;
+ stats->value[BNXT_RE_MAX_AH] = rdev->dev_attr->max_ah;
+ stats->value[BNXT_RE_MAX_PD] = rdev->dev_attr->max_pd;
+ stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&rdev->stats.rsors.qp_count);
+ stats->value[BNXT_RE_ACTIVE_RC_QP] = atomic_read(&rdev->stats.rsors.rc_qp_count);
+ stats->value[BNXT_RE_ACTIVE_UD_QP] = atomic_read(&rdev->stats.rsors.ud_qp_count);
+ stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&rdev->stats.rsors.srq_count);
+ stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&rdev->stats.rsors.cq_count);
+ stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&rdev->stats.rsors.mr_count);
+ stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&rdev->stats.rsors.mw_count);
+ stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&rdev->stats.rsors.ah_count);
+ stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&rdev->stats.rsors.pd_count);
+ stats->value[BNXT_RE_QP_WATERMARK] = atomic_read(&rdev->stats.rsors.max_qp_count);
+ stats->value[BNXT_RE_RC_QP_WATERMARK] = atomic_read(&rdev->stats.rsors.max_rc_qp_count);
+ stats->value[BNXT_RE_UD_QP_WATERMARK] = atomic_read(&rdev->stats.rsors.max_ud_qp_count);
+ stats->value[BNXT_RE_SRQ_WATERMARK] = atomic_read(&rdev->stats.rsors.max_srq_count);
+ stats->value[BNXT_RE_CQ_WATERMARK] = atomic_read(&rdev->stats.rsors.max_cq_count);
+ stats->value[BNXT_RE_MR_WATERMARK] = atomic_read(&rdev->stats.rsors.max_mr_count);
+ stats->value[BNXT_RE_MW_WATERMARK] = atomic_read(&rdev->stats.rsors.max_mw_count);
+ stats->value[BNXT_RE_AH_WATERMARK] = atomic_read(&rdev->stats.rsors.max_ah_count);
+ stats->value[BNXT_RE_PD_WATERMARK] = atomic_read(&rdev->stats.rsors.max_pd_count);
+ stats->value[BNXT_RE_RESIZE_CQ_COUNT] = atomic_read(&rdev->stats.rsors.resize_count);
+ stats->value[BNXT_RE_HW_RETRANSMISSION] = BNXT_RE_HW_RETX(rdev->dev_attr->dev_cap_flags) ? 1 : 0;
+ stats->value[BNXT_RE_RECOVERABLE_ERRORS] = rstat ? rstat->tx_bcast_pkts : 0;
+
+ bnxt_re_print_normal_counters(rdev, stats);
+
+
+ stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] = errs->max_retry_exceeded;
+ if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
+ rdev->is_virtfn) &&
+ _is_hw_retx_supported(rdev->dev_attr->dev_cap_flags)) {
+ stats->value[BNXT_RE_TO_RETRANSMITS] = e_errs->to_retransmits;
+ stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] = e_errs->seq_err_naks_rcvd;
+ stats->value[BNXT_RE_RNR_NAKS_RCVD] = e_errs->rnr_naks_rcvd;
+ stats->value[BNXT_RE_MISSING_RESP] = e_errs->missing_resp;
+ stats->value[BNXT_RE_DUP_REQS] = e_errs->dup_req;
+ } else {
+ stats->value[BNXT_RE_TO_RETRANSMITS] = errs->to_retransmits;
+ stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] = errs->seq_err_naks_rcvd;
+ stats->value[BNXT_RE_RNR_NAKS_RCVD] = errs->rnr_naks_rcvd;
+ stats->value[BNXT_RE_MISSING_RESP] = errs->missing_resp;
+ stats->value[BNXT_RE_DUP_REQS] = errs->dup_req;
+ }
+
+ stats->value[BNXT_RE_UNRECOVERABLE_ERR] = errs->unrecoverable_err;
+ stats->value[BNXT_RE_BAD_RESP_ERR] = errs->bad_resp_err;
+ stats->value[BNXT_RE_LOCAL_QP_OP_ERR] = errs->local_qp_op_err;
+ stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] = errs->local_protection_err;
+ stats->value[BNXT_RE_MEM_MGMT_OP_ERR] = errs->mem_mgmt_op_err;
+ stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] = errs->remote_invalid_req_err;
+ stats->value[BNXT_RE_REMOTE_ACCESS_ERR] = errs->remote_access_err;
+ stats->value[BNXT_RE_REMOTE_OP_ERR] = errs->remote_op_err;
+ stats->value[BNXT_RE_RES_EXCEED_MAX] = errs->res_exceed_max;
+ stats->value[BNXT_RE_RES_LENGTH_MISMATCH] = errs->res_length_mismatch;
+ stats->value[BNXT_RE_RES_EXCEEDS_WQE] = errs->res_exceeds_wqe;
+ stats->value[BNXT_RE_RES_OPCODE_ERR] = errs->res_opcode_err;
+ stats->value[BNXT_RE_RES_RX_INVALID_RKEY] = errs->res_rx_invalid_rkey;
+ stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] = errs->res_rx_domain_err;
+ stats->value[BNXT_RE_RES_RX_NO_PERM] = errs->res_rx_no_perm;
+ stats->value[BNXT_RE_RES_RX_RANGE_ERR] = errs->res_rx_range_err;
+ stats->value[BNXT_RE_RES_TX_INVALID_RKEY] = errs->res_tx_invalid_rkey;
+ stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] = errs->res_tx_domain_err;
+ stats->value[BNXT_RE_RES_TX_NO_PERM] = errs->res_tx_no_perm;
+ stats->value[BNXT_RE_RES_TX_RANGE_ERR] = errs->res_tx_range_err;
+ stats->value[BNXT_RE_RES_IRRQ_OFLOW] = errs->res_irrq_oflow;
+ stats->value[BNXT_RE_RES_UNSUP_OPCODE] = errs->res_unsup_opcode;
+ stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] = errs->res_unaligned_atomic;
+ stats->value[BNXT_RE_RES_REM_INV_ERR] = errs->res_rem_inv_err;
+ stats->value[BNXT_RE_RES_MEM_ERROR64] = errs->res_mem_error;
+ stats->value[BNXT_RE_RES_SRQ_ERR] = errs->res_srq_err;
+ stats->value[BNXT_RE_RES_CMP_ERR] = errs->res_cmp_err;
+ stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] = errs->res_invalid_dup_rkey;
+ stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] = errs->res_wqe_format_err;
+ stats->value[BNXT_RE_RES_CQ_LOAD_ERR] = errs->res_cq_load_err;
+ stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] = errs->res_srq_load_err;
+ stats->value[BNXT_RE_RES_TX_PCI_ERR] = errs->res_tx_pci_err;
+ stats->value[BNXT_RE_RES_RX_PCI_ERR] = errs->res_rx_pci_err;
+
+
+ if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
+ rdev->is_virtfn)) {
+ stats->value[BNXT_RE_RES_OOS_DROP_COUNT] = e_errs->oos;
+ } else {
+ /* Display on function 0 as OOS counters are chip-wide */
+ if (PCI_FUNC(pdev->devfn) == 0)
+ stats->value[BNXT_RE_RES_OOS_DROP_COUNT] = errs->res_oos_drop_count;
+ }
+ stats->value[BNXT_RE_NUM_IRQ_STARTED] = rdev->rcfw.num_irq_started;
+ stats->value[BNXT_RE_NUM_IRQ_STOPPED] = rdev->rcfw.num_irq_stopped;
+ stats->value[BNXT_RE_POLL_IN_INTR_EN] = rdev->rcfw.poll_in_intr_en;
+ stats->value[BNXT_RE_POLL_IN_INTR_DIS] = rdev->rcfw.poll_in_intr_dis;
+ stats->value[BNXT_RE_CMDQ_FULL_DBG_CNT] = rdev->rcfw.cmdq_full_dbg;
+ if (!rdev->is_virtfn)
+ stats->value[BNXT_RE_FW_SERVICE_PROF_TYPE_SUP] = is_qport_service_type_supported(rdev);
+
+ return ARRAY_SIZE(bnxt_re_stat_descs);
+}
+
+struct rdma_hw_stats *bnxt_re_alloc_hw_port_stats(struct ib_device *ibdev,
+ u8 port_num)
+{
+ return rdma_alloc_hw_stats_struct(bnxt_re_stat_descs,
+ ARRAY_SIZE(bnxt_re_stat_descs),
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+}
diff --git a/sys/dev/bnxt/bnxt_re/stats.h b/sys/dev/bnxt/bnxt_re/stats.h
new file mode 100644
index 000000000000..748d8165947b
--- /dev/null
+++ b/sys/dev/bnxt/bnxt_re/stats.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2015-2024, Broadcom. All rights reserved. The term
+ * Broadcom refers to Broadcom Limited and/or its subsidiaries.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Description: statistics related data structures
+ */
+
+#ifndef __STATS_H__
+#define __STATS_H__
+
+#define BNXT_RE_CFA_STAT_BYTES_MASK 0xFFFFFFFFF
+#define BNXT_RE_CFA_STAT_PKTS_MASK 0xFFFFFFF
+enum {
+ BYTE_MASK = 0,
+ PKTS_MASK = 1
+};
+
+struct bnxt_re_cnp_counters {
+ u64 cnp_tx_pkts;
+ u64 cnp_tx_bytes;
+ u64 cnp_rx_pkts;
+ u64 cnp_rx_bytes;
+ u64 ecn_marked;
+};
+
+struct bnxt_re_ro_counters {
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 rx_pkts;
+ u64 rx_bytes;
+};
+
+struct bnxt_re_flow_counters {
+ struct bnxt_re_ro_counters ro_stats;
+ struct bnxt_re_cnp_counters cnp_stats;
+};
+
+struct bnxt_re_ext_cntr {
+ u64 atomic_req;
+ u64 read_req;
+ u64 read_resp;
+ u64 write_req;
+ u64 send_req;
+};
+
+struct bnxt_re_ext_good {
+ u64 rx_pkts;
+ u64 rx_bytes;
+};
+
+struct bnxt_re_ext_rstat {
+ struct bnxt_re_ext_cntr tx;
+ struct bnxt_re_ext_cntr rx;
+ struct bnxt_re_ext_good grx;
+ u64 rx_dcn_payload_cut;
+ u64 te_bypassed;
+};
+
+struct bnxt_re_rdata_counters {
+ u64 tx_ucast_pkts;
+ u64 tx_mcast_pkts;
+ u64 tx_bcast_pkts;
+ u64 tx_discard_pkts;
+ u64 tx_error_pkts;
+ u64 tx_ucast_bytes;
+ u64 tx_mcast_bytes;
+ u64 tx_bcast_bytes;
+ u64 rx_ucast_pkts;
+ u64 rx_mcast_pkts;
+ u64 rx_bcast_pkts;
+ u64 rx_discard_pkts;
+ u64 rx_error_pkts;
+ u64 rx_ucast_bytes;
+ u64 rx_mcast_bytes;
+ u64 rx_bcast_bytes;
+ u64 rx_agg_pkts;
+ u64 rx_agg_bytes;
+ u64 rx_agg_events;
+ u64 rx_agg_aborts;
+};
+
+struct bnxt_re_cc_stat {
+ struct bnxt_re_cnp_counters prev[2];
+ struct bnxt_re_cnp_counters cur[2];
+ bool is_first;
+};
+
+struct bnxt_re_ext_roce_stats {
+ u64 oob;
+ u64 oos;
+ u64 seq_err_naks_rcvd;
+ u64 rnr_naks_rcvd;
+ u64 missing_resp;
+ u64 to_retransmits;
+ u64 dup_req;
+};
+
+struct bnxt_re_rstat {
+ struct bnxt_re_ro_counters prev[2];
+ struct bnxt_re_ro_counters cur[2];
+ struct bnxt_re_rdata_counters rstat[2];
+ struct bnxt_re_ext_rstat ext_rstat[2];
+ struct bnxt_re_ext_roce_stats e_errs;
+ struct bnxt_qplib_roce_stats errs;
+ unsigned long long prev_oob;
+};
+
+struct bnxt_re_res_cntrs {
+ atomic_t qp_count;
+ atomic_t rc_qp_count;
+ atomic_t ud_qp_count;
+ atomic_t cq_count;
+ atomic_t srq_count;
+ atomic_t mr_count;
+ atomic_t mw_count;
+ atomic_t ah_count;
+ atomic_t pd_count;
+ atomic_t resize_count;
+ atomic_t max_qp_count;
+ atomic_t max_rc_qp_count;
+ atomic_t max_ud_qp_count;
+ atomic_t max_cq_count;
+ atomic_t max_srq_count;
+ atomic_t max_mr_count;
+ atomic_t max_mw_count;
+ atomic_t max_ah_count;
+ atomic_t max_pd_count;
+};
+
+struct bnxt_re_device_stats {
+ struct bnxt_re_rstat dstat;
+ struct bnxt_re_res_cntrs rsors;
+ struct bnxt_re_cc_stat cnps;
+ unsigned long read_tstamp;
+ /* To be used in case to disable stats query from worker or change
+ * query interval. 0 means stats_query disabled.
+ */
+ u32 stats_query_sec;
+ /* A free running counter to be used along with stats_query_sec to
+ * decide whether to issue the command to FW.
+ */
+ u32 stats_query_counter;
+};
+
+static inline u64 bnxt_re_get_cfa_stat_mask(struct bnxt_qplib_chip_ctx *cctx,
+ bool type)
+{
+ u64 mask;
+
+ if (type == BYTE_MASK) {
+ mask = BNXT_RE_CFA_STAT_BYTES_MASK; /* 36 bits */
+ if (_is_chip_gen_p5_p7(cctx))
+ mask >>= 0x01; /* 35 bits */
+ } else {
+ mask = BNXT_RE_CFA_STAT_PKTS_MASK; /* 28 bits */
+ if (_is_chip_gen_p5_p7(cctx))
+ mask |= (0x10000000ULL); /* 29 bits */
+ }
+
+ return mask;
+}
+
+static inline u64 bnxt_re_stat_diff(u64 cur, u64 *prev, u64 mask)
+{
+ u64 diff;
+
+ if (!cur)
+ return 0;
+ diff = (cur - *prev) & mask;
+ if (diff)
+ *prev = cur;
+ return diff;
+}
+
+static inline void bnxt_re_clear_rsors_stat(struct bnxt_re_res_cntrs *rsors)
+{
+ atomic_set(&rsors->qp_count, 0);
+ atomic_set(&rsors->cq_count, 0);
+ atomic_set(&rsors->srq_count, 0);
+ atomic_set(&rsors->mr_count, 0);
+ atomic_set(&rsors->mw_count, 0);
+ atomic_set(&rsors->ah_count, 0);
+ atomic_set(&rsors->pd_count, 0);
+ atomic_set(&rsors->resize_count, 0);
+ atomic_set(&rsors->max_qp_count, 0);
+ atomic_set(&rsors->max_cq_count, 0);
+ atomic_set(&rsors->max_srq_count, 0);
+ atomic_set(&rsors->max_mr_count, 0);
+ atomic_set(&rsors->max_mw_count, 0);
+ atomic_set(&rsors->max_ah_count, 0);
+ atomic_set(&rsors->max_pd_count, 0);
+ atomic_set(&rsors->max_rc_qp_count, 0);
+ atomic_set(&rsors->max_ud_qp_count, 0);
+}
+
+enum bnxt_re_hw_stats {
+ BNXT_RE_LINK_STATE,
+ BNXT_RE_MAX_QP,
+ BNXT_RE_MAX_SRQ,
+ BNXT_RE_MAX_CQ,
+ BNXT_RE_MAX_MR,
+ BNXT_RE_MAX_MW,
+ BNXT_RE_MAX_AH,
+ BNXT_RE_MAX_PD,
+ BNXT_RE_ACTIVE_QP,
+ BNXT_RE_ACTIVE_RC_QP,
+ BNXT_RE_ACTIVE_UD_QP,
+ BNXT_RE_ACTIVE_SRQ,
+ BNXT_RE_ACTIVE_CQ,
+ BNXT_RE_ACTIVE_MR,
+ BNXT_RE_ACTIVE_MW,
+ BNXT_RE_ACTIVE_AH,
+ BNXT_RE_ACTIVE_PD,
+ BNXT_RE_QP_WATERMARK,
+ BNXT_RE_RC_QP_WATERMARK,
+ BNXT_RE_UD_QP_WATERMARK,
+ BNXT_RE_SRQ_WATERMARK,
+ BNXT_RE_CQ_WATERMARK,
+ BNXT_RE_MR_WATERMARK,
+ BNXT_RE_MW_WATERMARK,
+ BNXT_RE_AH_WATERMARK,
+ BNXT_RE_PD_WATERMARK,
+ BNXT_RE_RESIZE_CQ_COUNT,
+ BNXT_RE_HW_RETRANSMISSION,
+ BNXT_RE_RECOVERABLE_ERRORS,
+ BNXT_RE_RX_PKTS,
+ BNXT_RE_RX_BYTES,
+ BNXT_RE_TX_PKTS,
+ BNXT_RE_TX_BYTES,
+ BNXT_RE_CNP_TX_PKTS,
+ BNXT_RE_CNP_TX_BYTES,
+ BNXT_RE_CNP_RX_PKTS,
+ BNXT_RE_CNP_RX_BYTES,
+ BNXT_RE_ROCE_ONLY_RX_PKTS,
+ BNXT_RE_ROCE_ONLY_RX_BYTES,
+ BNXT_RE_ROCE_ONLY_TX_PKTS,
+ BNXT_RE_ROCE_ONLY_TX_BYTES,
+ BNXT_RE_RX_ROCE_ERROR_PKTS,
+ BNXT_RE_RX_ROCE_DISCARD_PKTS,
+ BNXT_RE_TX_ROCE_ERROR_PKTS,
+ BNXT_RE_TX_ROCE_DISCARDS_PKTS,
+ BNXT_RE_RES_OOB_DROP_COUNT,
+ BNXT_RE_TX_ATOMIC_REQ,
+ BNXT_RE_RX_ATOMIC_REQ,
+ BNXT_RE_TX_READ_REQ,
+ BNXT_RE_TX_READ_RESP,
+ BNXT_RE_RX_READ_REQ,
+ BNXT_RE_RX_READ_RESP,
+ BNXT_RE_TX_WRITE_REQ,
+ BNXT_RE_RX_WRITE_REQ,
+ BNXT_RE_TX_SEND_REQ,
+ BNXT_RE_RX_SEND_REQ,
+ BNXT_RE_RX_GOOD_PKTS,
+ BNXT_RE_RX_GOOD_BYTES,
+ BNXT_RE_RX_DCN_PAYLOAD_CUT,
+ BNXT_RE_TE_BYPASSED,
+ BNXT_RE_RX_ECN_MARKED_PKTS,
+ BNXT_RE_MAX_RETRY_EXCEEDED,
+ BNXT_RE_TO_RETRANSMITS,
+ BNXT_RE_SEQ_ERR_NAKS_RCVD,
+ BNXT_RE_RNR_NAKS_RCVD,
+ BNXT_RE_MISSING_RESP,
+ BNXT_RE_DUP_REQS,
+ BNXT_RE_UNRECOVERABLE_ERR,
+ BNXT_RE_BAD_RESP_ERR,
+ BNXT_RE_LOCAL_QP_OP_ERR,
+ BNXT_RE_LOCAL_PROTECTION_ERR,
+ BNXT_RE_MEM_MGMT_OP_ERR,
+ BNXT_RE_REMOTE_INVALID_REQ_ERR,
+ BNXT_RE_REMOTE_ACCESS_ERR,
+ BNXT_RE_REMOTE_OP_ERR,
+ BNXT_RE_RES_EXCEED_MAX,
+ BNXT_RE_RES_LENGTH_MISMATCH,
+ BNXT_RE_RES_EXCEEDS_WQE,
+ BNXT_RE_RES_OPCODE_ERR,
+ BNXT_RE_RES_RX_INVALID_RKEY,
+ BNXT_RE_RES_RX_DOMAIN_ERR,
+ BNXT_RE_RES_RX_NO_PERM,
+ BNXT_RE_RES_RX_RANGE_ERR,
+ BNXT_RE_RES_TX_INVALID_RKEY,
+ BNXT_RE_RES_TX_DOMAIN_ERR,
+ BNXT_RE_RES_TX_NO_PERM,
+ BNXT_RE_RES_TX_RANGE_ERR,
+ BNXT_RE_RES_IRRQ_OFLOW,
+ BNXT_RE_RES_UNSUP_OPCODE,
+ BNXT_RE_RES_UNALIGNED_ATOMIC,
+ BNXT_RE_RES_REM_INV_ERR,
+ BNXT_RE_RES_MEM_ERROR64,
+ BNXT_RE_RES_SRQ_ERR,
+ BNXT_RE_RES_CMP_ERR,
+ BNXT_RE_RES_INVALID_DUP_RKEY,
+ BNXT_RE_RES_WQE_FORMAT_ERR,
+ BNXT_RE_RES_CQ_LOAD_ERR,
+ BNXT_RE_RES_SRQ_LOAD_ERR,
+ BNXT_RE_RES_TX_PCI_ERR,
+ BNXT_RE_RES_RX_PCI_ERR,
+ BNXT_RE_RES_OOS_DROP_COUNT,
+ BNXT_RE_NUM_IRQ_STARTED,
+ BNXT_RE_NUM_IRQ_STOPPED,
+ BNXT_RE_POLL_IN_INTR_EN,
+ BNXT_RE_POLL_IN_INTR_DIS,
+ BNXT_RE_CMDQ_FULL_DBG_CNT,
+ BNXT_RE_FW_SERVICE_PROF_TYPE_SUP,
+ BNXT_RE_DBQ_INT_RECV,
+ BNXT_RE_DBQ_INT_EN,
+ BNXT_RE_DBQ_PACING_RESCHED,
+ BNXT_RE_DBQ_PACING_CMPL,
+ BNXT_RE_DBQ_PACING_ALERT,
+ BNXT_RE_DBQ_DBR_FIFO_REG,
+ BNXT_RE_DBQ_NUM_EXT_COUNTERS
+};
+
+#define BNXT_RE_NUM_STD_COUNTERS (BNXT_RE_OUT_OF_SEQ_ERR + 1)
+
+struct bnxt_re_stats {
+ struct bnxt_qplib_roce_stats errs;
+ struct bnxt_qplib_ext_stat ext_stat;
+};
+
+struct rdma_hw_stats *bnxt_re_alloc_hw_port_stats(struct ib_device *ibdev,
+ u8 port_num);
+int bnxt_re_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u8 port, int index);
+int bnxt_re_get_device_stats(struct bnxt_re_dev *rdev);
+int bnxt_re_get_flow_stats_from_service_pf(struct bnxt_re_dev *rdev,
+ struct bnxt_re_flow_counters *stats,
+ struct bnxt_qplib_query_stats_info *sinfo);
+int bnxt_re_get_qos_stats(struct bnxt_re_dev *rdev);
+#endif /* __STATS_H__ */
diff --git a/sys/dev/bwi/if_bwi.c b/sys/dev/bwi/if_bwi.c
index f9a6b2357cc0..85146d4c4010 100644
--- a/sys/dev/bwi/if_bwi.c
+++ b/sys/dev/bwi/if_bwi.c
@@ -498,6 +498,9 @@ bwi_attach(struct bwi_softc *sc)
IEEE80211_C_BGSCAN |
IEEE80211_C_MONITOR;
ic->ic_opmode = IEEE80211_M_STA;
+
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
ieee80211_ifattach(ic);
ic->ic_headroom = sizeof(struct bwi_txbuf_hdr);
@@ -1361,6 +1364,7 @@ bwi_start_locked(struct bwi_softc *sc)
(m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
ni = (struct ieee80211_node *) m->m_pkthdr.rcvif;
wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m);
if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) != 0 &&
ieee80211_crypto_encap(ni, m) == NULL) {
if_inc_counter(ni->ni_vap->iv_ifp,
@@ -2916,7 +2920,7 @@ bwi_encap(struct bwi_softc *sc, int idx, struct mbuf *m,
uint32_t mac_ctrl;
uint16_t phy_ctrl;
bus_addr_t paddr;
- int type, ismcast, pkt_len, error, rix;
+ int type, ismcast, pkt_len, error;
#if 0
const uint8_t *p;
int i;
@@ -2943,15 +2947,10 @@ bwi_encap(struct bwi_softc *sc, int idx, struct mbuf *m,
} else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
rate = rate_fb = tp->ucastrate;
} else {
- rix = ieee80211_ratectl_rate(ni, NULL, pkt_len);
- rate = ni->ni_txrate;
-
- if (rix > 0) {
- rate_fb = ni->ni_rates.rs_rates[rix-1] &
- IEEE80211_RATE_VAL;
- } else {
- rate_fb = rate;
- }
+ ieee80211_ratectl_rate(ni, NULL, pkt_len);
+ rate = ieee80211_node_get_txrate_dot11rate(ni);
+ /* TODO: assign rate_fb the previous rate, if available */
+ rate_fb = rate;
}
tb->tb_rate[0] = rate;
tb->tb_rate[1] = rate_fb;
diff --git a/sys/dev/bwn/if_bwn.c b/sys/dev/bwn/if_bwn.c
index 742ed63a92aa..ec9d56661034 100644
--- a/sys/dev/bwn/if_bwn.c
+++ b/sys/dev/bwn/if_bwn.c
@@ -774,6 +774,7 @@ bwn_attach_post(struct bwn_softc *sc)
;
ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; /* s/w bmiss */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
/* Determine the NVRAM variable containing our MAC address */
core_unit = bhnd_get_core_unit(sc->sc_dev);
@@ -999,6 +1000,7 @@ bwn_start(struct bwn_softc *sc)
continue;
}
wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
@@ -2987,11 +2989,7 @@ bwn_dma_ringsetup(struct bwn_mac *mac, int controller_index,
return (dr);
fail2:
- if (dr->dr_txhdr_cache != NULL) {
- contigfree(dr->dr_txhdr_cache,
- (dr->dr_numslots / BWN_TX_SLOTS_PER_FRAME) *
- BWN_MAXTXHDRSIZE, M_DEVBUF);
- }
+ free(dr->dr_txhdr_cache, M_DEVBUF);
fail1:
free(dr->dr_meta, M_DEVBUF);
fail0:
@@ -3009,11 +3007,7 @@ bwn_dma_ringfree(struct bwn_dma_ring **dr)
bwn_dma_free_descbufs(*dr);
bwn_dma_free_ringmemory(*dr);
- if ((*dr)->dr_txhdr_cache != NULL) {
- contigfree((*dr)->dr_txhdr_cache,
- ((*dr)->dr_numslots / BWN_TX_SLOTS_PER_FRAME) *
- BWN_MAXTXHDRSIZE, M_DEVBUF);
- }
+ free((*dr)->dr_txhdr_cache, M_DEVBUF);
free((*dr)->dr_meta, M_DEVBUF);
free(*dr, M_DEVBUF);
@@ -6402,7 +6396,7 @@ bwn_set_txhdr(struct bwn_mac *mac, struct ieee80211_node *ni,
uint8_t *prot_ptr;
unsigned int len;
uint32_t macctl = 0;
- int rts_rate, rts_rate_fb, ismcast, isshort, rix, type;
+ int rts_rate, rts_rate_fb, ismcast, isshort, type;
uint16_t phyctl = 0;
uint8_t rate, rate_fb;
int fill_phy_ctl1 = 0;
@@ -6428,14 +6422,10 @@ bwn_set_txhdr(struct bwn_mac *mac, struct ieee80211_node *ni,
else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
rate = rate_fb = tp->ucastrate;
else {
- rix = ieee80211_ratectl_rate(ni, NULL, 0);
- rate = ni->ni_txrate;
-
- if (rix > 0)
- rate_fb = ni->ni_rates.rs_rates[rix - 1] &
- IEEE80211_RATE_VAL;
- else
- rate_fb = rate;
+ ieee80211_ratectl_rate(ni, NULL, 0);
+ rate = ieee80211_node_get_txrate_dot11rate(ni);
+ /* TODO: assign rate_fb the previous rate, if available */
+ rate_fb = rate;
}
sc->sc_tx_rate = rate;
diff --git a/sys/dev/bwn/if_bwn_pci.c b/sys/dev/bwn/if_bwn_pci.c
index e2ba15dd67c8..a64c53acf40f 100644
--- a/sys/dev/bwn/if_bwn_pci.c
+++ b/sys/dev/bwn/if_bwn_pci.c
@@ -182,24 +182,13 @@ bwn_pci_attach(device_t dev)
sc->quirks = ident->quirks;
/* Attach bridge device */
- if ((error = bhndb_attach_bridge(dev, &sc->bhndb_dev, -1)))
+ if ((error = bhndb_attach_bridge(dev, &sc->bhndb_dev, DEVICE_UNIT_ANY)))
return (ENXIO);
/* Success */
return (0);
}
-static int
-bwn_pci_detach(device_t dev)
-{
- int error;
-
- if ((error = bus_generic_detach(dev)))
- return (error);
-
- return (device_delete_children(dev));
-}
-
static void
bwn_pci_probe_nomatch(device_t dev, device_t child)
{
@@ -268,7 +257,7 @@ static device_method_t bwn_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, bwn_pci_probe),
DEVMETHOD(device_attach, bwn_pci_attach),
- DEVMETHOD(device_detach, bwn_pci_detach),
+ DEVMETHOD(device_detach, bus_generic_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
DEVMETHOD(device_suspend, bus_generic_suspend),
DEVMETHOD(device_resume, bus_generic_resume),
diff --git a/sys/dev/bxe/bxe.c b/sys/dev/bxe/bxe.c
index 8350d02cb2a3..3e7120a42a90 100644
--- a/sys/dev/bxe/bxe.c
+++ b/sys/dev/bxe/bxe.c
@@ -2489,7 +2489,6 @@ static int
bxe_probe(device_t dev)
{
struct bxe_device_type *t;
- char *descbuf;
uint16_t did, sdid, svid, vid;
/* Find our device structure */
@@ -2506,20 +2505,12 @@ bxe_probe(device_t dev)
if ((vid == t->bxe_vid) && (did == t->bxe_did) &&
((svid == t->bxe_svid) || (t->bxe_svid == PCI_ANY_ID)) &&
((sdid == t->bxe_sdid) || (t->bxe_sdid == PCI_ANY_ID))) {
- descbuf = malloc(BXE_DEVDESC_MAX, M_TEMP, M_NOWAIT);
- if (descbuf == NULL)
- return (ENOMEM);
-
- /* Print out the device identity. */
- snprintf(descbuf, BXE_DEVDESC_MAX,
+ device_set_descf(dev,
"%s (%c%d) BXE v:%s", t->bxe_name,
(((pci_read_config(dev, PCIR_REVID, 4) &
0xf0) >> 4) + 'A'),
(pci_read_config(dev, PCIR_REVID, 4) & 0xf),
BXE_DRIVER_VERSION);
-
- device_set_desc_copy(dev, descbuf);
- free(descbuf, M_TEMP);
return (BUS_PROBE_DEFAULT);
}
t++;
@@ -12183,7 +12174,7 @@ bxe_set_rx_mode(struct bxe_softc *sc)
if (if_getflags(ifp) & IFF_PROMISC) {
rx_mode = BXE_RX_MODE_PROMISC;
} else if ((if_getflags(ifp) & IFF_ALLMULTI) ||
- ((if_getamcount(ifp) > BXE_MAX_MULTICAST) &&
+ (if_llmaddr_count(ifp) > BXE_MAX_MULTICAST &&
CHIP_IS_E1(sc))) {
rx_mode = BXE_RX_MODE_ALLMULTI;
} else {
@@ -12974,7 +12965,7 @@ bxe_init(void *xsc)
BXE_CORE_UNLOCK(sc);
}
-static int
+static void
bxe_init_ifnet(struct bxe_softc *sc)
{
if_t ifp;
@@ -12994,10 +12985,7 @@ bxe_init_ifnet(struct bxe_softc *sc)
BLOGI(sc, "IFMEDIA flags : %x\n", sc->ifmedia.ifm_media);
/* allocate the ifnet structure */
- if ((ifp = if_gethandle(IFT_ETHER)) == NULL) {
- BLOGE(sc, "Interface allocation failed!\n");
- return (ENXIO);
- }
+ ifp = if_gethandle(IFT_ETHER);
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
@@ -13043,8 +13031,6 @@ bxe_init_ifnet(struct bxe_softc *sc)
/* Attach driver debugnet methods. */
DEBUGNET_SET(ifp, bxe);
-
- return (0);
}
static void
@@ -16270,12 +16256,7 @@ bxe_attach(device_t dev)
bxe_get_phy_info(sc);
/* initialize the FreeBSD ifnet interface */
- if (bxe_init_ifnet(sc) != 0) {
- bxe_release_mutexes(sc);
- bxe_deallocate_bars(sc);
- pci_disable_busmaster(dev);
- return (ENXIO);
- }
+ bxe_init_ifnet(sc);
if (bxe_add_cdev(sc) != 0) {
if (sc->ifp != NULL) {
diff --git a/sys/dev/bxe/bxe.h b/sys/dev/bxe/bxe.h
index 0c7e6232dbdb..79d2792f7d6f 100644
--- a/sys/dev/bxe/bxe.h
+++ b/sys/dev/bxe/bxe.h
@@ -126,16 +126,6 @@
#ifndef roundup
#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
#endif
-#ifndef ilog2
-static inline
-int bxe_ilog2(int x)
-{
- int log = 0;
- while (x >>= 1) log++;
- return (log);
-}
-#define ilog2(x) bxe_ilog2(x)
-#endif
#include "ecore_sp.h"
diff --git a/sys/dev/bxe/ecore_sp.h b/sys/dev/bxe/ecore_sp.h
index f39c908f7530..917f27549c1b 100644
--- a/sys/dev/bxe/ecore_sp.h
+++ b/sys/dev/bxe/ecore_sp.h
@@ -159,7 +159,7 @@ typedef struct mtx ECORE_MUTEX_SPIN;
#define ECORE_FREE(_s, _buf, _size) free(_buf, M_TEMP)
#define SC_ILT(sc) ((sc)->ilt)
-#define ILOG2(x) bxe_ilog2(x)
+#define ILOG2(x) ilog2(x)
#define ECORE_ILT_ZALLOC(x, y, size) \
do { \
diff --git a/sys/dev/cadence/if_cgem.c b/sys/dev/cadence/if_cgem.c
index 9d2b1d71883e..4a7180fd15a4 100644
--- a/sys/dev/cadence/if_cgem.c
+++ b/sys/dev/cadence/if_cgem.c
@@ -1813,11 +1813,6 @@ cgem_attach(device_t dev)
/* Set up ifnet structure. */
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "could not allocate ifnet structure\n");
- cgem_detach(dev);
- return (ENOMEM);
- }
if_setsoftc(ifp, sc);
if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -1914,10 +1909,7 @@ cgem_detach(device_t dev)
ether_ifdetach(sc->ifp);
}
- if (sc->miibus != NULL) {
- device_delete_child(dev, sc->miibus);
- sc->miibus = NULL;
- }
+ bus_generic_detach(dev);
/* Release resources. */
if (sc->mem_res != NULL) {
@@ -1970,8 +1962,6 @@ cgem_detach(device_t dev)
sc->mbuf_dma_tag = NULL;
}
- bus_generic_detach(dev);
-
if (sc->clk_tsuclk)
clk_release(sc->clk_tsuclk);
if (sc->clk_rxclk)
diff --git a/sys/dev/cardbus/cardbus.c b/sys/dev/cardbus/cardbus.c
index ffa90ebb6224..e4d546799482 100644
--- a/sys/dev/cardbus/cardbus.c
+++ b/sys/dev/cardbus/cardbus.c
@@ -93,13 +93,10 @@ static int
cardbus_attach(device_t cbdev)
{
struct cardbus_softc *sc;
-#ifdef PCI_RES_BUS
int rid;
-#endif
sc = device_get_softc(cbdev);
sc->sc_dev = cbdev;
-#ifdef PCI_RES_BUS
rid = 0;
sc->sc_bus = bus_alloc_resource(cbdev, PCI_RES_BUS, &rid,
pcib_get_bus(cbdev), pcib_get_bus(cbdev), 1, 0);
@@ -107,25 +104,18 @@ cardbus_attach(device_t cbdev)
device_printf(cbdev, "failed to allocate bus number\n");
return (ENXIO);
}
-#else
- device_printf(cbdev, "Your bus numbers may be AFU\n");
-#endif
return (0);
}
static int
cardbus_detach(device_t cbdev)
{
-#ifdef PCI_RES_BUS
struct cardbus_softc *sc;
-#endif
cardbus_detach_card(cbdev);
-#ifdef PCI_RES_BUS
sc = device_get_softc(cbdev);
device_printf(cbdev, "Freeing up the allocatd bus\n");
(void)bus_release_resource(cbdev, PCI_RES_BUS, 0, sc->sc_bus);
-#endif
return (0);
}
@@ -208,7 +198,7 @@ cardbus_attach_card(device_t cbdev)
if (dinfo->pci.cfg.mfdev)
cardbusfunchigh = PCI_FUNCMAX;
- child = device_add_child(cbdev, NULL, -1);
+ child = device_add_child(cbdev, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
DEVPRINTF((cbdev, "Cannot add child!\n"));
pci_freecfg((struct pci_devinfo *)dinfo);
@@ -222,6 +212,7 @@ cardbus_attach_card(device_t cbdev)
DEVPRINTF((cbdev, "Warning: Bogus CIS ignored\n"));
pci_cfg_save(dinfo->pci.cfg.dev, &dinfo->pci, 0);
pci_cfg_restore(dinfo->pci.cfg.dev, &dinfo->pci);
+ pci_clear_pme(child);
cardbus_device_setup_regs(&dinfo->pci.cfg);
pci_add_resources(cbdev, child, 1, dinfo->mprefetchable);
pci_print_verbose(&dinfo->pci);
@@ -255,8 +246,6 @@ cardbus_detach_card(device_t cbdev)
bus_topo_lock();
err = bus_generic_detach(cbdev);
- if (err == 0)
- err = device_delete_children(cbdev);
bus_topo_unlock();
if (err)
return (err);
diff --git a/sys/dev/cardbus/cardbusvar.h b/sys/dev/cardbus/cardbusvar.h
index 5ebedf2e2621..12ad42ee94f7 100644
--- a/sys/dev/cardbus/cardbusvar.h
+++ b/sys/dev/cardbus/cardbusvar.h
@@ -68,9 +68,7 @@ struct cardbus_devinfo
struct cardbus_softc
{
device_t sc_dev;
-#ifdef PCI_RES_BUS
struct resource *sc_bus;
-#endif
};
/*
diff --git a/sys/dev/cas/if_cas.c b/sys/dev/cas/if_cas.c
index 0cf17cf04b33..fed2c3a3a051 100644
--- a/sys/dev/cas/if_cas.c
+++ b/sys/dev/cas/if_cas.c
@@ -188,8 +188,6 @@ cas_attach(struct cas_softc *sc)
/* Set up ifnet structure. */
ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- return (ENOSPC);
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(sc->sc_dev),
device_get_unit(sc->sc_dev));
@@ -207,11 +205,6 @@ cas_attach(struct cas_softc *sc)
TASK_INIT(&sc->sc_tx_task, 1, cas_tx_task, ifp);
sc->sc_tq = taskqueue_create_fast("cas_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->sc_tq);
- if (sc->sc_tq == NULL) {
- device_printf(sc->sc_dev, "could not create taskqueue\n");
- error = ENXIO;
- goto fail_ifnet;
- }
error = taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->sc_dev));
if (error != 0) {
@@ -464,7 +457,6 @@ cas_attach(struct cas_softc *sc)
bus_dma_tag_destroy(sc->sc_pdmatag);
fail_taskq:
taskqueue_free(sc->sc_tq);
- fail_ifnet:
if_free(ifp);
return (error);
}
@@ -485,7 +477,7 @@ cas_detach(struct cas_softc *sc)
taskqueue_drain(sc->sc_tq, &sc->sc_tx_task);
if_free(ifp);
taskqueue_free(sc->sc_tq);
- device_delete_child(sc->sc_dev, sc->sc_miibus);
+ bus_generic_detach(sc->sc_dev);
for (i = 0; i < CAS_NRXDESC; i++)
if (sc->sc_rxdsoft[i].rxds_dmamap != NULL)
diff --git a/sys/dev/cesa/cesa.c b/sys/dev/cesa/cesa.c
index 2dcdb0258844..405b619d6e5b 100644
--- a/sys/dev/cesa/cesa.c
+++ b/sys/dev/cesa/cesa.c
@@ -1183,31 +1183,11 @@ cesa_attach_late(device_t dev)
soc_id(&d, &r);
switch (d) {
- case MV_DEV_88F6281:
- case MV_DEV_88F6282:
- /* Check if CESA peripheral device has power turned on */
- if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) ==
- CPU_PM_CTRL_CRYPTO) {
- device_printf(dev, "not powered on\n");
- return (ENXIO);
- }
- sc->sc_tperr = 0;
- break;
case MV_DEV_88F6828:
case MV_DEV_88F6820:
case MV_DEV_88F6810:
sc->sc_tperr = 0;
break;
- case MV_DEV_MV78100:
- case MV_DEV_MV78100_Z0:
- /* Check if CESA peripheral device has power turned on */
- if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) !=
- CPU_PM_CTRL_CRYPTO) {
- device_printf(dev, "not powered on\n");
- return (ENXIO);
- }
- sc->sc_tperr = CESA_ICR_TPERR;
- break;
default:
return (ENXIO);
}
diff --git a/sys/dev/cfe/cfe_resource.c b/sys/dev/cfe/cfe_resource.c
index d5047e045f37..78c5f149ba7d 100644
--- a/sys/dev/cfe/cfe_resource.c
+++ b/sys/dev/cfe/cfe_resource.c
@@ -81,7 +81,7 @@ cferes_identify(driver_t* driver, device_t parent)
struct cferes_softc *sc;
uint64_t addr, len, type;
- child = BUS_ADD_CHILD(parent, 100, "cferes", -1);
+ child = BUS_ADD_CHILD(parent, 100, "cferes", DEVICE_UNIT_ANY);
device_set_driver(child, driver);
sc = device_get_softc(child);
diff --git a/sys/dev/cfi/cfi_core.c b/sys/dev/cfi/cfi_core.c
index 4e137140372c..12d3964719ba 100644
--- a/sys/dev/cfi/cfi_core.c
+++ b/sys/dev/cfi/cfi_core.c
@@ -194,7 +194,6 @@ cfi_fmtsize(uint32_t sz)
int
cfi_probe(device_t dev)
{
- char desc[80];
struct cfi_softc *sc;
char *vend_str;
int error;
@@ -279,9 +278,7 @@ cfi_probe(device_t dev)
if (error)
goto out;
- snprintf(desc, sizeof(desc), "%s - %s", vend_str,
- cfi_fmtsize(sc->sc_size));
- device_set_desc_copy(dev, desc);
+ device_set_descf(dev, "%s - %s", vend_str, cfi_fmtsize(sc->sc_size));
out:
bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rid, sc->sc_res);
@@ -448,8 +445,8 @@ cfi_attach(device_t dev)
}
#endif
- device_add_child(dev, "cfid", -1);
- bus_generic_attach(dev);
+ device_add_child(dev, "cfid", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/chromebook_platform/chromebook_platform.c b/sys/dev/chromebook_platform/chromebook_platform.c
index cd5ad76a8701..f873338a3ba6 100644
--- a/sys/dev/chromebook_platform/chromebook_platform.c
+++ b/sys/dev/chromebook_platform/chromebook_platform.c
@@ -70,11 +70,11 @@ chromebook_i2c_identify(driver_t *driver, device_t bus)
return;
for (i = 0; i < nitems(slaves); i++) {
- if (device_find_child(bus, slaves[i].name, -1) != NULL)
+ if (device_find_child(bus, slaves[i].name, DEVICE_UNIT_ANY) != NULL)
continue;
if (slaves[i].pci_id != pci_get_devid(controller))
continue;
- child = BUS_ADD_CHILD(bus, 0, slaves[i].name, -1);
+ child = BUS_ADD_CHILD(bus, 0, slaves[i].name, DEVICE_UNIT_ANY);
if (child != NULL)
iicbus_set_addr(child, slaves[i].addr);
}
diff --git a/sys/dev/ciss/ciss.c b/sys/dev/ciss/ciss.c
index 6723d05e0cfe..d4ede91f6b35 100644
--- a/sys/dev/ciss/ciss.c
+++ b/sys/dev/ciss/ciss.c
@@ -242,15 +242,46 @@ static struct cdevsw ciss_cdevsw = {
.d_name = "ciss",
};
+SYSCTL_NODE(_hw, OID_AUTO, ciss, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "CISS sysctl tunables");
+
+/*
+ * This tunable can be used to force a specific initiator id
+ */
+static int ciss_initiator_id = CAM_TARGET_WILDCARD;
+SYSCTL_INT(_hw_ciss, OID_AUTO, initiator_id, CTLFLAG_RDTUN,
+ &ciss_initiator_id, 0,
+ "force a specific initiator id");
+
+/*
+ * This tunable can be used to force a specific initiator id
+ */
+static int ciss_base_transfer_speed = 132 * 1024;
+SYSCTL_INT(_hw_ciss, OID_AUTO, base_transfer_speed, CTLFLAG_RDTUN,
+ &ciss_base_transfer_speed, 0,
+ "force a specific base transfer_speed");
+
+/*
+ * This tunable can be set to make the driver be more verbose
+ */
+static int ciss_verbose = 0;
+SYSCTL_INT(_hw_ciss, OID_AUTO, verbose, CTLFLAG_RWTUN, &ciss_verbose, 0,
+ "enable verbose messages");
+
/*
* This tunable can be set at boot time and controls whether physical devices
* that are marked hidden by the firmware should be exposed anyways.
*/
static unsigned int ciss_expose_hidden_physical = 0;
TUNABLE_INT("hw.ciss.expose_hidden_physical", &ciss_expose_hidden_physical);
+SYSCTL_INT(_hw_ciss, OID_AUTO, expose_hidden_physical, CTLFLAG_RWTUN,
+ &ciss_expose_hidden_physical, 0,
+ "expose hidden physical drives");
static unsigned int ciss_nop_message_heartbeat = 0;
TUNABLE_INT("hw.ciss.nop_message_heartbeat", &ciss_nop_message_heartbeat);
+SYSCTL_INT(_hw_ciss, OID_AUTO, nop_message_heartbeat, CTLFLAG_RWTUN,
+ &ciss_nop_message_heartbeat, 0,
+ "nop heartbeat messages");
/*
* This tunable can force a particular transport to be used:
@@ -260,6 +291,9 @@ TUNABLE_INT("hw.ciss.nop_message_heartbeat", &ciss_nop_message_heartbeat);
*/
static int ciss_force_transport = 0;
TUNABLE_INT("hw.ciss.force_transport", &ciss_force_transport);
+SYSCTL_INT(_hw_ciss, OID_AUTO, force_transport, CTLFLAG_RDTUN,
+ &ciss_force_transport, 0,
+ "use default (0), force simple (1) or force performant (2) transport");
/*
* This tunable can force a particular interrupt delivery method to be used:
@@ -269,6 +303,9 @@ TUNABLE_INT("hw.ciss.force_transport", &ciss_force_transport);
*/
static int ciss_force_interrupt = 0;
TUNABLE_INT("hw.ciss.force_interrupt", &ciss_force_interrupt);
+SYSCTL_INT(_hw_ciss, OID_AUTO, force_interrupt, CTLFLAG_RDTUN,
+ &ciss_force_interrupt, 0,
+ "use default (0), force INTx (1) or force MSIx(2) interrupts");
/************************************************************************
* CISS adapters amazingly don't have a defined programming interface
@@ -904,7 +941,7 @@ ciss_setup_msix(struct ciss_softc *sc)
}
sc->ciss_msi = val;
- if (bootverbose)
+ if (bootverbose || ciss_verbose)
ciss_printf(sc, "Using %d MSIX interrupt%s\n", val,
(val != 1) ? "s" : "");
@@ -1122,7 +1159,7 @@ ciss_init_requests(struct ciss_softc *sc)
debug_called(1);
- if (bootverbose)
+ if (bootverbose || ciss_verbose)
ciss_printf(sc, "using %d of %d available commands\n",
sc->ciss_max_requests, sc->ciss_cfg->max_outstanding_commands);
@@ -1254,13 +1291,22 @@ ciss_identify_adapter(struct ciss_softc *sc)
if (sc->ciss_cfg->max_physical_supported == 0)
sc->ciss_cfg->max_physical_supported = CISS_MAX_PHYSICAL;
/* print information */
- if (bootverbose) {
+ if (bootverbose || ciss_verbose) {
ciss_printf(sc, " %d logical drive%s configured\n",
sc->ciss_id->configured_logical_drives,
(sc->ciss_id->configured_logical_drives == 1) ? "" : "s");
ciss_printf(sc, " firmware %4.4s\n", sc->ciss_id->running_firmware_revision);
ciss_printf(sc, " %d SCSI channels\n", sc->ciss_id->scsi_chip_count);
+ if (ciss_verbose > 1) {
+ ciss_printf(sc, " %d FC channels\n", sc->ciss_id->fibre_chip_count);
+ ciss_printf(sc, " %d enclosures\n", sc->ciss_id->bEnclosureCount);
+ ciss_printf(sc, " %d expanders\n", sc->ciss_id->bExpanderCount);
+ ciss_printf(sc, " maximum blocks: %d\n", sc->ciss_id->maximum_blocks);
+ ciss_printf(sc, " controller clock: %d\n", sc->ciss_id->controller_clock);
+ ciss_printf(sc, " %d MB controller memory\n", sc->ciss_id->total_controller_mem_mb);
+ }
+
ciss_printf(sc, " signature '%.4s'\n", sc->ciss_cfg->signature);
ciss_printf(sc, " valence %d\n", sc->ciss_cfg->valence);
ciss_printf(sc, " supported I/O methods 0x%b\n",
@@ -1426,7 +1472,7 @@ ciss_init_logical(struct ciss_softc *sc)
/*
* Save logical drive information.
*/
- if (bootverbose) {
+ if (bootverbose || ciss_verbose) {
ciss_printf(sc, "%d logical drive%s\n",
ndrives, (ndrives > 1 || ndrives == 0) ? "s" : "");
}
@@ -1501,11 +1547,14 @@ ciss_init_physical(struct ciss_softc *sc)
nphys = (ntohl(cll->list_size) / sizeof(union ciss_device_address));
- if (bootverbose) {
+ if (bootverbose || ciss_verbose) {
ciss_printf(sc, "%d physical device%s\n",
nphys, (nphys > 1 || nphys == 0) ? "s" : "");
}
+ /* Per-controller highest target number seen */
+ sc->ciss_max_physical_target = 0;
+
/*
* Figure out the bus mapping.
* Logical buses include both the local logical bus for local arrays and
@@ -1588,6 +1637,8 @@ ciss_init_physical(struct ciss_softc *sc)
}
ciss_filter_physical(sc, cll);
+ if (bootverbose || ciss_verbose)
+ ciss_printf(sc, "max physical target id: %d\n", sc->ciss_max_physical_target);
out:
if (cll != NULL)
@@ -1637,6 +1688,10 @@ ciss_filter_physical(struct ciss_softc *sc, struct ciss_lun_report *cll)
target = CISS_EXTRA_TARGET2(ea);
sc->ciss_physical[bus][target].cp_address = cll->lun[i];
sc->ciss_physical[bus][target].cp_online = 1;
+
+ if ((target > sc->ciss_max_physical_target) &&
+ (cll->lun[i].physical.mode != CISS_HDR_ADDRESS_MODE_MASK_PERIPHERAL))
+ sc->ciss_max_physical_target = target;
}
return (0);
@@ -1769,7 +1824,7 @@ ciss_identify_logical(struct ciss_softc *sc, struct ciss_ldrive *ld)
/*
* Print the drive's basic characteristics.
*/
- if (bootverbose) {
+ if (bootverbose || ciss_verbose) {
ciss_printf(sc, "logical drive (b%dt%d): %s, %dMB ",
CISS_LUN_TO_BUS(ld->cl_address.logical.lun),
CISS_LUN_TO_TARGET(ld->cl_address.logical.lun),
@@ -2299,13 +2354,14 @@ _ciss_report_request(struct ciss_request *cr, int *command_status, int *scsi_sta
/*
* We don't consider data under/overrun an error for the Report
- * Logical/Physical LUNs commands.
+ * Logical/Physical LUNs, INQUIRY & RECEIVE_DIAGNOSTIC commands.
*/
if ((cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) &&
((ce->command_status == CISS_CMD_STATUS_DATA_OVERRUN) ||
(ce->command_status == CISS_CMD_STATUS_DATA_UNDERRUN)) &&
((cc->cdb.cdb[0] == CISS_OPCODE_REPORT_LOGICAL_LUNS) ||
(cc->cdb.cdb[0] == CISS_OPCODE_REPORT_PHYSICAL_LUNS) ||
+ (cc->cdb.cdb[0] == RECEIVE_DIAGNOSTIC) ||
(cc->cdb.cdb[0] == INQUIRY))) {
cc->header.host_tag &= ~CISS_HDR_HOST_TAG_ERROR;
debug(2, "ignoring irrelevant under/overrun error");
@@ -2333,10 +2389,13 @@ _ciss_report_request(struct ciss_request *cr, int *command_status, int *scsi_sta
*scsi_status = -1;
}
}
- if (bootverbose && ce->command_status != CISS_CMD_STATUS_DATA_UNDERRUN)
- ciss_printf(cr->cr_sc, "command status 0x%x (%s) scsi status 0x%x\n",
+ if ((bootverbose || ciss_verbose > 3 || (ciss_verbose > 2 && ce->scsi_status != 0)) &&
+ (ce->command_status != CISS_CMD_STATUS_DATA_UNDERRUN)) {
+ ciss_printf(cr->cr_sc, "command status 0x%x (%s) scsi status 0x%x (opcode 0x%02x)\n",
ce->command_status, ciss_name_command_status(ce->command_status),
- ce->scsi_status);
+ ce->scsi_status,
+ cc->cdb.cdb[0]);
+ }
if (ce->command_status == CISS_CMD_STATUS_INVALID_COMMAND) {
ciss_printf(cr->cr_sc, "invalid command, offense size %d at %d, value 0x%x, function %s\n",
ce->additional_error_info.invalid_command.offense_size,
@@ -3020,15 +3079,18 @@ ciss_cam_action(struct cam_sim *sim, union ccb *ccb)
cpi->hba_inquiry = PI_TAG_ABLE; /* XXX is this correct? */
cpi->target_sprt = 0;
cpi->hba_misc = 0;
- cpi->max_target = sc->ciss_cfg->max_logical_supported;
+ cpi->max_target = MAX(sc->ciss_max_physical_target, sc->ciss_cfg->max_logical_supported);
cpi->max_lun = 0; /* 'logical drive' channel only */
- cpi->initiator_id = sc->ciss_cfg->max_logical_supported;
+ if (ciss_initiator_id != CAM_TARGET_WILDCARD)
+ cpi->initiator_id = ciss_initiator_id;
+ else
+ cpi->initiator_id = sc->ciss_cfg->max_logical_supported;
strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
strlcpy(cpi->hba_vid, "CISS", HBA_IDLEN);
strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
cpi->unit_number = cam_sim_unit(sim);
cpi->bus_id = cam_sim_bus(sim);
- cpi->base_transfer_speed = 132 * 1024; /* XXX what to set this to? */
+ cpi->base_transfer_speed = ciss_base_transfer_speed;
cpi->transport = XPORT_SPI;
cpi->transport_version = 2;
cpi->protocol = PROTO_SCSI;
@@ -4145,8 +4207,20 @@ ciss_notify_thread(void *arg)
cr = ciss_dequeue_notify(sc);
- if (cr == NULL)
- panic("cr null");
+ if (cr == NULL) {
+ /*
+ * We get a NULL message sometimes when unplugging/replugging
+ * stuff But this indicates a bug, since we only wake this thread
+ * when we (a) set the THREAD_SHUT flag, or (b) we have enqueued
+ * something. Since it's reported around errors, it may be a
+ * locking bug related to ciss_flags being modified in multiple
+ * threads some without ciss_mtx held. Or there's some other
+ * way we either fail to sleep or corrupt the ciss_flags.
+ */
+ ciss_printf(sc, "Driver bug: NULL notify event received\n");
+ continue;
+ }
+
cn = (struct ciss_notify *)cr->cr_data;
switch (cn->class) {
diff --git a/sys/dev/ciss/cissvar.h b/sys/dev/ciss/cissvar.h
index 50d154456674..58275f83732c 100644
--- a/sys/dev/ciss/cissvar.h
+++ b/sys/dev/ciss/cissvar.h
@@ -184,7 +184,7 @@ struct ciss_softc
{
/* bus connections */
device_t ciss_dev; /* bus attachment */
- struct cdev *ciss_dev_t; /* control device */
+ struct cdev *ciss_dev_t; /* control device */
struct resource *ciss_regs_resource; /* register interface window */
int ciss_regs_rid; /* resource ID */
@@ -236,6 +236,7 @@ struct ciss_softc
int ciss_max_bus_number; /* maximum bus number */
int ciss_max_logical_bus;
int ciss_max_physical_bus;
+ int ciss_max_physical_target; /* highest physical target number */
struct cam_devq *ciss_cam_devq;
struct cam_sim **ciss_cam_sim;
diff --git a/sys/dev/clk/allwinner/aw_ccu.c b/sys/dev/clk/allwinner/aw_ccu.c
index 9ee08f8f2dd1..b7a088bb68f6 100644
--- a/sys/dev/clk/allwinner/aw_ccu.c
+++ b/sys/dev/clk/allwinner/aw_ccu.c
@@ -220,7 +220,8 @@ aw_ccu_attach(device_t dev)
device_probe_and_attach(cdev);
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static device_method_t aw_ccu_methods[] = {
diff --git a/sys/dev/clk/allwinner/ccu_d1.c b/sys/dev/clk/allwinner/ccu_d1.c
new file mode 100644
index 000000000000..29fa3c9e5bd5
--- /dev/null
+++ b/sys/dev/clk/allwinner/ccu_d1.c
@@ -0,0 +1,1062 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Julien Cassette <julien.cassette@gmail.com>
+ * Copyright (c) 2024 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Mitchell Horne
+ * <mhorne@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#include <machine/bus.h>
+
+#include <dev/fdt/simplebus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/clk/clk_div.h>
+#include <dev/clk/clk_fixed.h>
+#include <dev/clk/clk_mux.h>
+
+#include <dev/clk/allwinner/aw_ccung.h>
+
+#include <dt-bindings/clock/sun20i-d1-ccu.h>
+#include <dt-bindings/reset/sun20i-d1-ccu.h>
+
+static struct aw_ccung_reset ccu_d1_resets[] = {
+ CCU_RESET(RST_MBUS, 0x540, 30)
+ CCU_RESET(RST_BUS_DE, 0x60C, 16)
+ CCU_RESET(RST_BUS_DI, 0x62C, 16)
+ CCU_RESET(RST_BUS_G2D, 0x63C, 16)
+ CCU_RESET(RST_BUS_CE, 0x68C, 16)
+ CCU_RESET(RST_BUS_VE, 0x69C, 16)
+ CCU_RESET(RST_BUS_DMA, 0x70C, 16)
+ CCU_RESET(RST_BUS_MSGBOX0, 0x71C, 16)
+ CCU_RESET(RST_BUS_MSGBOX1, 0x71C, 17)
+ CCU_RESET(RST_BUS_MSGBOX2, 0x71C, 18)
+ CCU_RESET(RST_BUS_SPINLOCK, 0x72C, 16)
+ CCU_RESET(RST_BUS_HSTIMER, 0x73C, 16)
+ CCU_RESET(RST_BUS_DBG, 0x78C, 16)
+ CCU_RESET(RST_BUS_PWM, 0x7AC, 16)
+ CCU_RESET(RST_BUS_DRAM, 0x80C, 16)
+ CCU_RESET(RST_BUS_MMC0, 0x84C, 16)
+ CCU_RESET(RST_BUS_MMC1, 0x84C, 17)
+ CCU_RESET(RST_BUS_MMC2, 0x84C, 18)
+ CCU_RESET(RST_BUS_UART0, 0x90C, 16)
+ CCU_RESET(RST_BUS_UART1, 0x90C, 17)
+ CCU_RESET(RST_BUS_UART2, 0x90C, 18)
+ CCU_RESET(RST_BUS_UART3, 0x90C, 19)
+ CCU_RESET(RST_BUS_UART4, 0x90C, 20)
+ CCU_RESET(RST_BUS_UART5, 0x90C, 21)
+ CCU_RESET(RST_BUS_I2C0, 0x91C, 16)
+ CCU_RESET(RST_BUS_I2C1, 0x91C, 17)
+ CCU_RESET(RST_BUS_I2C2, 0x91C, 18)
+ CCU_RESET(RST_BUS_I2C3, 0x91C, 19)
+ CCU_RESET(RST_BUS_SPI0, 0x96C, 16)
+ CCU_RESET(RST_BUS_SPI1, 0x96C, 17)
+ CCU_RESET(RST_BUS_EMAC, 0x97C, 16)
+ CCU_RESET(RST_BUS_IR_TX, 0x9CC, 16)
+ CCU_RESET(RST_BUS_GPADC, 0x9EC, 16)
+ CCU_RESET(RST_BUS_THS, 0x9FC, 16)
+ CCU_RESET(RST_BUS_I2S0, 0xA20, 16)
+ CCU_RESET(RST_BUS_I2S1, 0xA20, 17)
+ CCU_RESET(RST_BUS_I2S2, 0xA20, 18)
+ CCU_RESET(RST_BUS_SPDIF, 0xA2C, 16)
+ CCU_RESET(RST_BUS_DMIC, 0xA4C, 16)
+ CCU_RESET(RST_BUS_AUDIO, 0xA5C, 16)
+ CCU_RESET(RST_USB_PHY0, 0xA70, 30)
+ CCU_RESET(RST_USB_PHY1, 0xA74, 30)
+ CCU_RESET(RST_BUS_OHCI0, 0xA8C, 16)
+ CCU_RESET(RST_BUS_OHCI1, 0xA8C, 17)
+ CCU_RESET(RST_BUS_EHCI0, 0xA8C, 20)
+ CCU_RESET(RST_BUS_EHCI1, 0xA8C, 21)
+ CCU_RESET(RST_BUS_OTG, 0xA8C, 24)
+ CCU_RESET(RST_BUS_LRADC, 0xA9C, 16)
+ CCU_RESET(RST_BUS_DPSS_TOP, 0xABC, 16)
+ CCU_RESET(RST_BUS_MIPI_DSI, 0xB4C, 16)
+ CCU_RESET(RST_BUS_TCON_LCD0, 0xB7C, 16)
+ CCU_RESET(RST_BUS_TCON_TV, 0xB9C, 16)
+ CCU_RESET(RST_BUS_LVDS0, 0xBAC, 16)
+ CCU_RESET(RST_BUS_TVE, 0xBBC, 17)
+ CCU_RESET(RST_BUS_TVE_TOP, 0xBBC, 16)
+ CCU_RESET(RST_BUS_TVD, 0xBDC, 17)
+ CCU_RESET(RST_BUS_TVD_TOP, 0xBDC, 16)
+ CCU_RESET(RST_BUS_LEDC, 0xBFC, 16)
+ CCU_RESET(RST_BUS_CSI, 0xC1C, 16)
+ CCU_RESET(RST_BUS_TPADC, 0xC5C, 16)
+ CCU_RESET(RST_DSP, 0xC7C, 16)
+ CCU_RESET(RST_BUS_DSP_CFG, 0xC7C, 17)
+ CCU_RESET(RST_BUS_DSP_DBG, 0xC7C, 18)
+ CCU_RESET(RST_BUS_RISCV_CFG, 0xD0C, 16)
+ CCU_RESET(RST_BUS_CAN0, 0x92C, 16)
+ CCU_RESET(RST_BUS_CAN1, 0x92C, 17)
+};
+
+static struct aw_ccung_gate ccu_d1_gates[] = {
+ CCU_GATE(CLK_BUS_DE, "bus-de", "psi-ahb", 0x60C, 0)
+ CCU_GATE(CLK_BUS_DI, "bus-di", "psi-ahb", 0x62C, 0)
+ CCU_GATE(CLK_BUS_G2D, "bus-g2d", "psi-ahb", 0x63C, 0)
+ CCU_GATE(CLK_BUS_CE, "bus-ce", "psi-ahb", 0x68C, 0)
+ CCU_GATE(CLK_BUS_VE, "bus-ve", "psi-ahb", 0x690, 0)
+ CCU_GATE(CLK_BUS_DMA, "bus-dma", "psi-ahb", 0x70C, 0)
+ CCU_GATE(CLK_BUS_MSGBOX0, "bus-msgbox0", "psi-ahb", 0x71C, 0)
+ CCU_GATE(CLK_BUS_MSGBOX1, "bus-msgbox1", "psi-ahb", 0x71C, 1)
+ CCU_GATE(CLK_BUS_MSGBOX2, "bus-msgbox2", "psi-ahb", 0x71C, 2)
+ CCU_GATE(CLK_BUS_SPINLOCK, "bus-spinlock", "psi-ahb", 0x72C, 0)
+ CCU_GATE(CLK_BUS_HSTIMER, "bus-hstimer", "psi-ahb", 0x73C, 0)
+ CCU_GATE(CLK_AVS, "avs", "dcxo", 0x740, 31)
+ CCU_GATE(CLK_BUS_DBG, "bus-dbg", "psi-ahb", 0x78C, 0)
+ CCU_GATE(CLK_BUS_PWM, "bus-pwm", "psi-ahb", 0x7AC, 0)
+ CCU_GATE(CLK_BUS_IOMMU, "bus-iommu", "apb0", 0x7BC, 0)
+ CCU_GATE(CLK_MBUS_DMA, "mbus-dma", "mbus", 0x804, 0)
+ CCU_GATE(CLK_MBUS_VE, "mbus-ve", "mbus", 0x804, 1)
+ CCU_GATE(CLK_MBUS_CE, "mbus-ce", "mbus", 0x804, 2)
+ CCU_GATE(CLK_MBUS_TVIN, "mbus-tvin", "mbus", 0x804, 7)
+ CCU_GATE(CLK_MBUS_CSI, "mbus-csi", "mbus", 0x804, 8)
+ CCU_GATE(CLK_MBUS_G2D, "mbus-g2d", "mbus", 0x804, 10)
+ CCU_GATE(CLK_MBUS_RISCV, "mbus-riscv", "mbus", 0x804, 11)
+ CCU_GATE(CLK_BUS_DRAM, "bus-dram", "psi-ahb", 0x80C, 0)
+ CCU_GATE(CLK_BUS_MMC0, "bus-mmc0", "psi-ahb", 0x84C, 0)
+ CCU_GATE(CLK_BUS_MMC1, "bus-mmc1", "psi-ahb", 0x84C, 1)
+ CCU_GATE(CLK_BUS_MMC2, "bus-mmc2", "psi-ahb", 0x84C, 2)
+ CCU_GATE(CLK_BUS_UART0, "bus-uart0", "apb1", 0x90C, 0)
+ CCU_GATE(CLK_BUS_UART1, "bus-uart1", "apb1", 0x90C, 1)
+ CCU_GATE(CLK_BUS_UART2, "bus-uart2", "apb1", 0x90C, 2)
+ CCU_GATE(CLK_BUS_UART3, "bus-uart3", "apb1", 0x90C, 3)
+ CCU_GATE(CLK_BUS_UART4, "bus-uart4", "apb1", 0x90C, 4)
+ CCU_GATE(CLK_BUS_UART5, "bus-uart5", "apb1", 0x90C, 5)
+ CCU_GATE(CLK_BUS_I2C0, "bus-i2c0", "apb1", 0x91C, 0)
+ CCU_GATE(CLK_BUS_I2C1, "bus-i2c1", "apb1", 0x91C, 1)
+ CCU_GATE(CLK_BUS_I2C2, "bus-i2c2", "apb1", 0x91C, 2)
+ CCU_GATE(CLK_BUS_I2C3, "bus-i2c3", "apb1", 0x91C, 3)
+ CCU_GATE(CLK_BUS_SPI0, "bus-spi0", "psi-ahb", 0x96C, 0)
+ CCU_GATE(CLK_BUS_SPI1, "bus-spi1", "psi-ahb", 0x96C, 1)
+ CCU_GATE(CLK_BUS_EMAC, "bus-emac", "psi-ahb", 0x97C, 0)
+ CCU_GATE(CLK_BUS_IR_TX, "bus-ir-tx", "apb0", 0x9CC, 0)
+ CCU_GATE(CLK_BUS_GPADC, "bus-gpadc", "apb0", 0x9EC, 0)
+ CCU_GATE(CLK_BUS_THS, "bus-ths", "apb0", 0x9FC, 0)
+ CCU_GATE(CLK_BUS_I2S0, "bus-i2s0", "apb0", 0xA10, 0)
+ CCU_GATE(CLK_BUS_I2S1, "bus-i2s1", "apb0", 0xA10, 1)
+ CCU_GATE(CLK_BUS_I2S2, "bus-i2s2", "apb0", 0xA10, 2)
+ CCU_GATE(CLK_BUS_SPDIF, "bus-spdif", "apb0", 0xA2C, 0)
+ CCU_GATE(CLK_BUS_DMIC, "bus-dmic", "apb0", 0xA4C, 0)
+ CCU_GATE(CLK_BUS_AUDIO, "bus-audio", "apb0", 0xA5C, 0)
+ CCU_GATE(CLK_BUS_OHCI0, "bus-ohci0", "psi-ahb", 0xA8C, 0)
+ CCU_GATE(CLK_BUS_OHCI1, "bus-ohci1", "psi-ahb", 0xA8C, 1)
+ CCU_GATE(CLK_BUS_EHCI0, "bus-ehci0", "psi-ahb", 0xA8C, 4)
+ CCU_GATE(CLK_BUS_EHCI1, "bus-ehci1", "psi-ahb", 0xA8C, 5)
+ CCU_GATE(CLK_BUS_OTG, "bus-otg", "psi-ahb", 0xA8C, 8)
+ CCU_GATE(CLK_BUS_LRADC, "bus-lradc", "apb0", 0xA9C, 0)
+ CCU_GATE(CLK_BUS_DPSS_TOP, "bus-dpss-top", "psi-ahb", 0xABC, 0)
+ CCU_GATE(CLK_BUS_MIPI_DSI, "bus-mipi-dsi", "psi-ahb", 0xB4C, 0)
+ CCU_GATE(CLK_BUS_TCON_LCD0, "bus-tcon-lcd0", "psi-ahb", 0xB7C, 0)
+ CCU_GATE(CLK_BUS_TCON_TV, "bus-tcon-tv", "psi-ahb", 0xB9C, 0)
+ CCU_GATE(CLK_BUS_TVE_TOP, "bus-tve-top", "psi-ahb", 0xBBC, 0)
+ CCU_GATE(CLK_BUS_TVE, "bus-tve", "psi-ahb", 0xBBC, 1)
+ CCU_GATE(CLK_BUS_TVD_TOP, "bus-tvd-top", "psi-ahb", 0xBDC, 0)
+ CCU_GATE(CLK_BUS_TVD, "bus-tvd", "psi-ahb", 0xBDC, 1)
+ CCU_GATE(CLK_BUS_LEDC, "bus-ledc", "psi-ahb", 0xBFC, 0)
+ CCU_GATE(CLK_BUS_CSI, "bus-csi", "psi-ahb", 0xC1C, 0)
+ CCU_GATE(CLK_BUS_TPADC, "bus-tpadc", "apb0", 0xC5C, 0)
+ CCU_GATE(CLK_BUS_TZMA, "bus-tzma", "apb0", 0xC6C, 0)
+ CCU_GATE(CLK_BUS_DSP_CFG, "bus-dsp-cfg", "psi-ahb", 0xC7C, 1)
+ CCU_GATE(CLK_BUS_RISCV_CFG, "bus-riscv-cfg", "psi-ahb", 0xD0C, 0)
+ CCU_GATE(CLK_BUS_CAN0, "bus-can0", "apb1", 0x92C, 0)
+ CCU_GATE(CLK_BUS_CAN1, "bus-can1", "apb1", 0x92C, 1)
+};
+
+static const char *pll_cpux_parents[] = { "dcxo" };
+NP_CLK(pll_cpux_clk,
+ CLK_PLL_CPUX, /* id */
+ "pll_cpux", /* name */
+ pll_cpux_parents, /* parents */
+ 0x0, /* offset */
+ 8, 8, 0, 0, /* n factor */
+ 0, 2, 0, 0, /* p factor */
+ 27, /* gate */
+ 28, 1000, /* lock */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */
+
+static const char *pll_ddr0_parents[] = { "dcxo" };
+NMM_CLK(pll_ddr0_clk,
+ CLK_PLL_DDR0, /* id */
+ "pll_ddr0", /* name */
+ pll_ddr0_parents, /* parents */
+ 0x10, /* offset */
+ 8, 7, 0, 0, /* n factor */
+ 0, 1, 0, 0, /* m0 factor */
+ 1, 1, 0, 0, /* m1 factor */
+ 27, /* gate */
+ 28, 1000, /* lock */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */
+
+/* PLL_PERIPH(4X) = 24 MHz * N / M1 / M0 */
+static const char *pll_periph0_4x_parents[] = { "dcxo" };
+NMM_CLK(pll_periph0_4x_clk,
+ CLK_PLL_PERIPH0_4X, /* id */
+ "pll_periph0_4x", /* name */
+ pll_periph0_4x_parents, /* parents */
+ 0x20, /* offset */
+ 8, 8, 0, 0, /* n factor */
+ 0, 1, 0, 0, /* m0 factor */
+ 1, 1, 0, 0, /* m1 factor */
+ 27, /* gate */
+ 28, 1000, /* lock */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */
+
+/* PLL_PERIPH0(2X) = 24 MHz * N / M / P0 */
+static const char *pll_periph0_2x_parents[] = { "pll_periph0_4x" };
+M_CLK(pll_periph0_2x_clk,
+ CLK_PLL_PERIPH0_2X, /* id */
+ "pll_periph0_2x", /* name */
+ pll_periph0_2x_parents, /* parents */
+ 0x20, /* offset */
+ 16, 3, 0, 0, /* m factor */
+ 0, 0, /* mux */
+ 0, /* gate */
+ 0); /* flags */
+
+/* PLL_PERIPH0(800M) = 24 MHz * N / M / P1 */
+static const char *pll_periph0_800m_parents[] = { "pll_periph0_4x" };
+M_CLK(pll_periph0_800m_clk,
+ CLK_PLL_PERIPH0_800M, /* id */
+ "pll_periph0_800m", /* name */
+ pll_periph0_800m_parents, /* parents */
+ 0x20, /* offset */
+ 20, 3, 0, 0, /* m factor */
+ 0, 0, /* mux */
+ 0, /* gate */
+ 0); /* flags */
+
+/* PLL_PERIPH0(1X) = 24 MHz * N / M / P0 / 2 */
+static const char *pll_periph0_parents[] = { "pll_periph0_2x" };
+FIXED_CLK(pll_periph0_clk,
+ CLK_PLL_PERIPH0, /* id */
+ "pll_periph0", /* name */
+ pll_periph0_parents, /* parents */
+ 0, /* freq */
+ 1, /* mult */
+ 2, /* div */
+ 0); /* flags */
+
+/* For child clocks: InputFreq * N / M */
+static const char *pll_video0_parents[] = { "dcxo" };
+NP_CLK(pll_video0_clk,
+ CLK_PLL_VIDEO0, /* id */
+ "pll_video0", /* name */
+ pll_video0_parents, /* parents */
+ 0x40, /* offset */
+ 8, 7, 0, 0, /* n factor */
+ 1, 1, 0, 0, /* p factor */
+ 27, /* gate */
+ 28, 1000, /* lock */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */
+
+/* PLL_VIDEO0(4X) = InputFreq * N / M / D */
+/* D is only for testing */
+static const char *pll_video0_4x_parents[] = { "pll_video0" };
+M_CLK(pll_video0_4x_clk,
+ CLK_PLL_VIDEO0_4X, /* id */
+ "pll_video0_4x", /* name */
+ pll_video0_4x_parents, /* parents */
+ 0x40, /* offset */
+ 0, 1, 0, 0, /* m factor */
+ 0, 0, /* mux */
+ 0, /* gate */
+ 0); /* flags */
+
+/* PLL_VIDEO0(2X) = InputFreq * N / M / 2 */
+static const char *pll_video0_2x_parents[] = { "pll_video0" };
+FIXED_CLK(pll_video0_2x_clk,
+ CLK_PLL_VIDEO0_2X, /* id */
+ "pll_video0_2x", /* name */
+ pll_video0_2x_parents, /* parents */
+ 0, /* freq */
+ 1, /* mult */
+ 2, /* div */
+ 0); /* flags */
+
+/* For child clocks: InputFreq * N / M */
+static const char *pll_video1_parents[] = { "dcxo" };
+NP_CLK(pll_video1_clk,
+ CLK_PLL_VIDEO1, /* id */
+ "pll_video1", /* name */
+ pll_video1_parents, /* parents */
+ 0x48, /* offset */
+ 8, 7, 0, 0, /* n factor */
+ 1, 1, 0, 0, /* p factor */
+ 27, /* gate */
+ 28, 1000, /* lock */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */
+
+/* PLL_VIDEO1(4X) = InputFreq * N / M / D */
+/* D is only for testing */
+static const char *pll_video1_4x_parents[] = { "pll_video1" };
+M_CLK(pll_video1_4x_clk,
+ CLK_PLL_VIDEO1_4X, /* id */
+ "pll_video1_4x", /* name */
+ pll_video1_4x_parents, /* parents */
+ 0x48, /* offset */
+ 0, 1, 0, 0, /* m factor */
+ 0, 0, /* mux */
+ 0, /* gate */
+ 0); /* flags */
+
+/* PLL_VIDEO1(2X) = InputFreq * N / M / 2 */
+static const char *pll_video1_2x_parents[] = { "pll_video1" };
+FIXED_CLK(pll_video1_2x_clk,
+ CLK_PLL_VIDEO1_2X, /* id */
+ "pll_video1_2x", /* name */
+ pll_video1_2x_parents, /* parents */
+ 0, /* freq */
+ 1, /* mult */
+ 2, /* div */
+ 0); /* flags */
+
+static const char *pll_ve_parents[] = { "dcxo" };
+NMM_CLK(pll_ve_clk,
+ CLK_PLL_VE, /* id */
+ "pll_ve", /* name */
+ pll_ve_parents, /* parents */
+ 0x58, /* offset */
+ 8, 7, 0, 0, /* n factor */
+ 0, 1, 0, 0, /* m0 factor */
+ 1, 1, 0, 0, /* m1 factor */
+ 27, /* gate */
+ 28, 1000, /* lock */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */
+
+/* For child clocks: 24MHz * N / M1 / M0 */
+static const char *pll_audio0_4x_parents[] = { "dcxo" };
+NMM_CLK(pll_audio0_4x_clk,
+ CLK_PLL_AUDIO0_4X, /* id */
+ "pll_audio0_4x", /* name */
+ pll_audio0_4x_parents, /* parents */
+ 0x78, /* offset */
+ 8, 7, 0, 0, /* n factor */
+ 0, 1, 0, 0, /* m0 factor */
+ 1, 1, 0, 0, /* m1 factor */
+ 27, /* gate */
+ 28, 1000, /* lock */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */
+
+/* PLL_AUDIO0(2X) = (24MHz * N / M1 / M0) / P / 2 */
+static const char *pll_audio0_2x_parents[] = { "pll_audio0_4x" };
+FIXED_CLK(pll_audio0_2x_clk,
+ CLK_PLL_AUDIO0_2X, /* id */
+ "pll_audio0_2x", /* name */
+ pll_audio0_2x_parents, /* parents */
+ 0, /* freq */
+ 1, /* mult */
+ 2, /* div */
+ 0); /* flags */
+
+/* PLL_AUDIO0(1X) = 24MHz * N / M1 / M0 / P / 2 */
+static const char *pll_audio0_parents[] = { "pll_audio0_2x" };
+FIXED_CLK(pll_audio0_clk,
+ CLK_PLL_AUDIO0, /* id */
+ "pll_audio0", /* name */
+ pll_audio0_parents, /* parents */
+ 0, /* freq */
+ 1, /* mult */
+ 2, /* div */
+ 0); /* flags */
+
+/* For child clocks: 24MHz * N / M */
+static const char *pll_audio1_parents[] = { "dcxo" };
+NP_CLK(pll_audio1_clk,
+ CLK_PLL_AUDIO1, /* id */
+ "pll_audio1", /* name */
+ pll_audio1_parents, /* parents */
+ 0x80, /* offset */
+ 8, 7, 0, 0, /* n factor */
+ 1, 1, 0, 0, /* p factor */
+ 27, /* gate */
+ 28, 1000, /* lock */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_LOCK); /* flags */
+
+/* PLL_AUDIO1(DIV2) = 24MHz * N / M / P0 */
+static const char *pll_audio1_div2_parents[] = { "pll_audio1" };
+M_CLK(pll_audio1_div2_clk,
+ CLK_PLL_AUDIO1_DIV2, /* id */
+ "pll_audio1_div2", /* name */
+ pll_audio1_div2_parents, /* parents */
+ 0x80, /* offset */
+ 16, 3, 0, 0, /* m factor */
+ 0, 0, /* mux */
+ 0, /* gate */
+ 0); /* flags */
+
+/* PLL_AUDIO1(DIV5) = 24MHz * N / M / P1 */
+static const char *pll_audio1_div5_parents[] = { "pll_audio1" };
+M_CLK(pll_audio1_div5_clk,
+ CLK_PLL_AUDIO1_DIV5, /* id */
+ "pll_audio1_div5", /* name */
+ pll_audio1_div5_parents, /* parents */
+ 0x80, /* offset */
+ 20, 3, 0, 0, /* m factor */
+ 0, 0, /* mux */
+ 0, /* gate */
+ 0); /* flags */
+
+static const char *cpux_parents[] = { "dcxo", "osc32k", "iosc", "pll_cpux",
+ "pll_periph0", "pll_periph0_2x", "pll_periph0_800m" };
+M_CLK(cpux_clk,
+ CLK_CPUX, /* id */
+ "cpux", /* name */
+ cpux_parents, /* parents */
+ 0x500, /* offset */
+ 16, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* m factor */
+ 24, 3, /* mux */
+ 0, /* gate */
+ AW_CLK_HAS_MUX | AW_CLK_SET_PARENT); /* flags */
+
+static const char *cpux_axi_parents[] = { "cpux" };
+M_CLK(cpux_axi_clk,
+ CLK_CPUX_AXI, /* id */
+ "cpux_axi", /* name */
+ cpux_axi_parents, /* parents */
+ 0x500, /* offset */
+ 0, 2, 0, 0, /* m factor */
+ 0, 0, /* mux */
+ 0, /* gate */
+ 0); /* flags */
+
+static const char *cpux_apb_parents[] = { "cpux" };
+M_CLK(cpux_apb_clk,
+ CLK_CPUX_APB, /* id */
+ "cpux_apb", /* name */
+ cpux_apb_parents, /* parents */
+ 0x500, /* offset */
+ 8, 2, 0, 0, /* m factor */
+ 0, 0, /* mux */
+ 0, /* gate */
+ 0); /* flags */
+
+static const char *psi_ahb_parents[] = { "dcxo", "osc32k", "iosc",
+ "pll_periph0" };
+NM_CLK(psi_ahb_clk,
+ CLK_PSI_AHB, "psi-ahb", psi_ahb_parents, /* id, name, parents */
+ 0x510, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 2, 0, 0, /* m factor */
+ 24, 2, /* mux */
+ 0, /* gate */
+ AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */
+
+static const char *apb0_parents[] = { "dcxo", "osc32k", "psi-ahb", "pll_periph0" };
+NM_CLK(apb0_clk,
+ CLK_APB0, "apb0", apb0_parents, /* id, name, parents */
+ 0x520, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 2, 0, 0, /* m factor */
+ 24, 2, /* mux */
+ 0, /* gate */
+ AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */
+
+static const char *apb1_parents[] = { "dcxo", "osc32k", "psi-ahb", "pll_periph0" };
+NM_CLK(apb1_clk,
+ CLK_APB1, "apb1", apb1_parents, /* id, name, parents */
+ 0x524, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 2, 0, 0, /* m factor */
+ 24, 2, /* mux */
+ 0, /* gate */
+ AW_CLK_HAS_MUX | AW_CLK_REPARENT); /* flags */
+
+static const char *mbus_parents[] = { "dram" };
+FIXED_CLK(mbus_clk,
+ CLK_MBUS, "mbus", mbus_parents, /* id, name, parents */
+ 0, /* freq */
+ 1, /* mult */
+ 4, /* div */
+ 0); /* flags */
+
+static const char *de_parents[] = { "pll_periph0_2x", "pll_video0_4x",
+ "pll_video1_4x", "pll_audio1_div2" };
+M_CLK(de_clk,
+ CLK_DE, "de", de_parents, /* id, name, parents */
+ 0x600, /* offset */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *di_parents[] = { "pll_periph0_2x", "pll_video0_4x",
+ "pll_video1_4x", "pll_audio1_div2" };
+M_CLK(di_clk,
+ CLK_DI, "di", di_parents, /* id, name, parents */
+ 0x620, /* offset */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *g2d_parents[] = { "pll_periph0_2x", "pll_video0_4x",
+ "pll_video1_4x", "pll_audio1_div2" };
+M_CLK(g2d_clk,
+ CLK_G2D, "g2d", g2d_parents, /* id, name, parents */
+ 0x630, /* offset */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *ce_parents[] = { "dcxo", "pll_periph0_2x", "pll_periph0" };
+NM_CLK(ce_clk,
+ CLK_CE, "ce", ce_parents, /* id, name, parents */
+ 0x680, /* offset */
+ 8, 2, 0, 0, /* n factor */
+ 0, 4, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *ve_parents[] = { "pll_ve", "pll_periph0_2x" };
+M_CLK(ve_clk,
+ CLK_VE, "ve", ve_parents, /* id, name, parents */
+ 0x690, /* offset */
+ 0, 5, 0, 0, /* m factor */
+ 24, 1, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | /* flags */
+ AW_CLK_REPARENT);
+
+static const char *dram_parents[] = { "pll_ddr0", "pll_audio1_div2",
+ "pll_periph0_2x", "pll_periph0_800m" };
+NM_CLK(dram_clk,
+ CLK_DRAM, "dram", dram_parents, /* id, name, parents */
+ 0x800, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 2, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX | /* flags */
+ AW_CLK_REPARENT);
+
+/* SMHC0 */
+static const char *mmc0_parents[] = { "dcxo", "pll_periph0", "pll_periph0_2x",
+ "pll_audio1_div2" };
+NM_CLK(mmc0_clk,
+ CLK_MMC0, "mmc0", mmc0_parents, /* id, name, parents */
+ 0x830, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 4, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+/* SMHC1 */
+static const char *mmc1_parents[] = { "dcxo", "pll_periph0", "pll_periph0_2x",
+ "pll_audio1_div2" };
+NM_CLK(mmc1_clk,
+ CLK_MMC1, "mmc1", mmc1_parents, /* id, name, parents */
+ 0x834, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 4, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+/* SMHC2 */
+static const char *mmc2_parents[] = { "dcxo", "pll_periph0", "pll_periph0_2x",
+ "pll_periph0_800m", "pll_audio1_div2" };
+NM_CLK(mmc2_clk,
+ CLK_MMC2, "mmc2", mmc2_parents, /* id, name, parents */
+ 0x838, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 4, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *spi0_parents[] = { "dcxo", "pll_periph0", "pll_periph0_2x",
+ "pll_audio1_div2", "pll_audio1_div5" };
+NM_CLK(spi0_clk,
+ CLK_SPI0, "spi0", spi0_parents, /* id, name, parents */
+ 0x940, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 4, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *spi1_parents[] = { "dcxo", "pll_periph0", "pll_periph0_2x",
+ "pll_audio1_div2", "pll_audio1_div5" };
+NM_CLK(spi1_clk,
+ CLK_SPI1, "spi1", spi1_parents, /* id, name, parents */
+ 0x944, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 4, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+/* Use M_CLK to have gate */
+static const char *emac_25m_parents[] = { "pll_periph0" };
+M_CLK(emac_25m_clk,
+ CLK_EMAC_25M, /* id */
+ "emac_25m", /* name */
+ emac_25m_parents, /* parents */
+ 0x970, /* offset */
+ 0, 0, 24, AW_CLK_FACTOR_FIXED, /* m factor */
+ 0, 0, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_REPARENT); /* flags */
+
+static const char *irtx_parents[] = { "dcxo", "pll_periph0" };
+NM_CLK(irtx_clk,
+ CLK_IR_TX, "irtx", irtx_parents, /* id, name, parents */
+ 0x9C0, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 4, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *i2s0_parents[] = { "pll_audio0", "pll_audio0_4x",
+ "pll_audio1_div2", "pll_audio1_div5" };
+NM_CLK(i2s0_clk,
+ CLK_I2S0, "i2s0", i2s0_parents, /* id, name, parents */
+ 0xA10, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *i2s1_parents[] = { "pll_audio0", "pll_audio0_4x",
+ "pll_audio1_div2", "pll_audio1_div5" };
+NM_CLK(i2s1_clk,
+ CLK_I2S1, "i2s1", i2s1_parents, /* id, name, parents */
+ 0xA14, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *i2s2_parents[] = { "pll_audio0", "pll_audio0_4x",
+ "pll_audio1_div2", "pll_audio1_div5" };
+NM_CLK(i2s2_clk,
+ CLK_I2S2, "i2s2", i2s2_parents, /* id, name, parents */
+ 0xA18, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *i2s2_asrc_parents[] = { "pll_audio0_4x", "pll_periph0",
+ "pll_audio1_div2", "pll_audio1_div5" };
+NM_CLK(i2s2_asrc_clk,
+ CLK_I2S2_ASRC, /* id */
+ "i2s2_asrc", /* name */
+ i2s2_asrc_parents, /* parents */
+ 0xA1C, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+/* OWA_TX */
+static const char *spdif_tx_parents[] = { "pll_audio0", "pll_audio0_4x",
+ "pll_audio1_div2", "pll_audio1_div5" };
+NM_CLK(spdif_tx_clk,
+ CLK_SPDIF_TX, "spdif_tx", spdif_tx_parents, /* id, name, parents */
+ 0xA24, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+/* OWA_RX */
+static const char *spdif_rx_parents[] = { "pll_periph0", "pll_audio1_div2",
+ "pll_audio1_div5" };
+NM_CLK(spdif_rx_clk,
+ CLK_SPDIF_RX, "spdif_rx", spdif_rx_parents, /* id, name, parents */
+ 0xA28, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *dmic_parents[] = { "pll_audio0", "pll_audio1_div2",
+ "pll_audio1_div5" };
+NM_CLK(dmic_clk,
+ CLK_DMIC, "dmic", dmic_parents, /* id, name, parents */
+ 0xA40, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *audio_dac_parents[] = { "pll_audio0", "pll_audio1_div2",
+ "pll_audio1_div5" };
+NM_CLK(audio_dac_clk,
+ CLK_AUDIO_DAC, /* id */
+ "audio_dac", /* name */
+ audio_dac_parents, /* parents */
+ 0xA50, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *audio_adc_parents[] = { "pll_audio0", "pll_audio1_div2",
+ "pll_audio1_div5" };
+NM_CLK(audio_adc_clk,
+ CLK_AUDIO_ADC, /* id */
+ "audio_adc", /* name */
+ audio_adc_parents, /* parents */
+ 0xA54, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+/*
+ * XXX: These USB clocks are unusual, and can't be modeled fully with any of
+ * our existing clk classes.
+ *
+ * The clocks have three parents; they output 12M when assigned to the first
+ * two, and the third is direct (32K).
+ *
+ * Thus a divider table like the following would be needed:
+ * struct clk_div_table usb_ohci_div_table[] = {
+ * { .value = 0, .divider = 50 },
+ * { .value = 1, .divider = 2 },
+ * { .value = 2, .divider = 1 },
+ * { },
+ * };
+ *
+ * But we also require a gate.
+ *
+ * To work around this, model the clocks as if they had only one parent.
+ */
+static const char *usb_ohci_parents[] = { "pll_periph0",
+ /*"dcxo", "osc32k"*/ };
+M_CLK(usb_ohci0_clk,
+ CLK_USB_OHCI0, /* id */
+ "usb_ohci0", /* name */
+ usb_ohci_parents, /* parents */
+ 0xA70, /* offset */
+ 0, 0, 50, AW_CLK_FACTOR_FIXED, /* m factor */
+ 24, 2, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE /* | AW_CLK_HAS_MUX */); /* flags */
+
+M_CLK(usb_ohci1_clk,
+ CLK_USB_OHCI1, /* id */
+ "usb_ohci1", /* name */
+ usb_ohci_parents, /* parents */
+ 0xA74, /* offset */
+ 0, 0, 50, AW_CLK_FACTOR_FIXED, /* m factor */
+ 24, 2, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE /* | AW_CLK_HAS_MUX */); /* flags */
+
+
+static const char *dsi_parents[] = { "dcxo", "pll_periph0", "pll_video0_2x",
+ "pll_video1_2x", "pll_audio1_div2" };
+M_CLK(dsi_clk,
+ CLK_MIPI_DSI, "mipi-dsi", dsi_parents, /* id, name, parents */
+ 0xB24, /* offset */
+ 0, 4, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *tconlcd_parents[] = { "pll_video0", "pll_video0_4x",
+ "pll_video1", "pll_video1_4x", "pll_periph0_2x", "pll_audio1_div2" };
+NM_CLK(tconlcd_clk,
+ CLK_TCON_LCD0, "tcon-lcd0", tconlcd_parents, /* id, name, parents */
+ 0xB60, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 4, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *tcontv_parents[] = { "pll_video0", "pll_video0_4x",
+ "pll_video1", "pll_video1_4x", "pll_periph0_2x", "pll_audio1_div2" };
+NM_CLK(tcontv_clk,
+ CLK_TCON_TV, "tcon-tv", tcontv_parents, /* id, name, parents */
+ 0xB80, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 4, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *tve_parents[] = { "pll_video0", "pll_video0_4x",
+ "pll_video1", "pll_video1_4x", "pll_periph0_2x", "pll_audio1_div2" };
+NM_CLK(tve_clk,
+ CLK_TVE, "tve", tve_parents, /* id, name, parents */
+ 0xBB0, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 4, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *tvd_parents[] = { "dcxo", "pll_video0", "pll_video1",
+ "pll_periph0" };
+M_CLK(tvd_clk,
+ CLK_TVD, "tvd", tvd_parents, /* id, name, parents */
+ 0xBC0, /* offset */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *ledc_parents[] = { "dcxo", "pll_periph0" };
+NM_CLK(ledc_clk,
+ CLK_LEDC, "ledc", ledc_parents, /* id, name, parents */
+ 0xBF0, /* offset */
+ 8, 2, 0, AW_CLK_FACTOR_POWER_OF_TWO, /* n factor */
+ 0, 4, 0, 0, /* m factor */
+ 24, 1, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *csi_top_parents[] = { "pll_periph0_2x", "pll_video0_2x",
+ "pll_video1_2x" };
+M_CLK(csi_top_clk,
+ CLK_CSI_TOP, "csi-top", csi_top_parents, /* id, name, parents */
+ 0xC04, /* offset */
+ 0, 4, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *csi_mclk_parents[] = { "dcxo", "pll_periph0",
+ "pll_video0", "pll_video1", "pll_audio1_div2", "pll_audio1_div5" };
+M_CLK(csi_mclk,
+ CLK_CSI_MCLK, /* id */
+ "csi-mclk", /* name */
+ csi_mclk_parents, /* parents */
+ 0xC08, /* offset */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+/* Use M_CLK to have mux and gate */
+static const char *tpadc_parents[] = { "dcxo", "pll_audio0" };
+M_CLK(tpadc_clk,
+ CLK_TPADC, "tpadc", tpadc_parents, /* id, name, parents */
+ 0xC50, /* offset */
+ 0, 0, 1, AW_CLK_FACTOR_FIXED, /* m factor */
+ 24, 2, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *dsp_parents[] = { "dcxo", "osc32k", "iosc",
+ "pll_periph0_2x", "pll_audio1_div2" };
+M_CLK(dsp_clk,
+ CLK_DSP, "dsp", dsp_parents, /* id, name, parents */
+ 0xC70, /* offset */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 31, /* gate */
+ AW_CLK_HAS_GATE | AW_CLK_HAS_MUX |
+ AW_CLK_REPARENT); /* flags */
+
+static const char *riscv_parents[] = { "dcxo", "osc32k", "iosc",
+ "pll_periph0_800m", "pll_periph0", "pll_cpux", "pll_audio1_div2" };
+M_CLK(riscv_clk,
+ CLK_RISCV, "riscv", riscv_parents, /* id, name, parents */
+ 0xD00, /* offset */
+ 0, 5, 0, 0, /* m factor */
+ 24, 3, /* mux */
+ 0, /* gate */
+ AW_CLK_HAS_MUX | AW_CLK_SET_PARENT); /* flags */
+
+static const char *riscv_axi_parents[] = { "riscv" };
+static struct clk_div_table riscv_axi_div_table[] = {
+ { .value = 1, .divider = 2 },
+ { .value = 2, .divider = 3 },
+ { .value = 3, .divider = 4 },
+ { },
+};
+DIV_CLK(riscv_axi_clk,
+ CLK_RISCV_AXI, /* id */
+ "riscv_axi", riscv_axi_parents, /* name, parents */
+ 0xD00, /* offset */
+ 8, 2, /* shift, width */
+ CLK_DIV_WITH_TABLE, /* flags */
+ riscv_axi_div_table); /* table */
+
+/* TODO FANOUT */
+
+static struct aw_ccung_clk ccu_d1_clks[] = {
+ { .type = AW_CLK_NP, .clk.np = &pll_cpux_clk },
+ { .type = AW_CLK_NMM, .clk.nmm = &pll_ddr0_clk },
+ { .type = AW_CLK_NMM, .clk.nmm = &pll_periph0_4x_clk },
+ { .type = AW_CLK_M, .clk.m = &pll_periph0_2x_clk },
+ { .type = AW_CLK_M, .clk.m = &pll_periph0_800m_clk },
+ { .type = AW_CLK_FIXED, .clk.fixed = &pll_periph0_clk },
+ { .type = AW_CLK_NP, .clk.np = &pll_video0_clk },
+ { .type = AW_CLK_M, .clk.m = &pll_video0_4x_clk },
+ { .type = AW_CLK_FIXED, .clk.fixed = &pll_video0_2x_clk },
+ { .type = AW_CLK_NP, .clk.np = &pll_video1_clk },
+ { .type = AW_CLK_M, .clk.m = &pll_video1_4x_clk },
+ { .type = AW_CLK_FIXED, .clk.fixed = &pll_video1_2x_clk },
+ { .type = AW_CLK_NMM, .clk.nmm = &pll_ve_clk },
+ { .type = AW_CLK_NMM, .clk.nmm = &pll_audio0_4x_clk },
+ { .type = AW_CLK_FIXED, .clk.fixed = &pll_audio0_2x_clk },
+ { .type = AW_CLK_FIXED, .clk.fixed = &pll_audio0_clk },
+ { .type = AW_CLK_NP, .clk.np = &pll_audio1_clk },
+ { .type = AW_CLK_M, .clk.m = &pll_audio1_div2_clk },
+ { .type = AW_CLK_M, .clk.m = &pll_audio1_div5_clk },
+ { .type = AW_CLK_M, .clk.m = &cpux_clk },
+ { .type = AW_CLK_M, .clk.m = &cpux_axi_clk },
+ { .type = AW_CLK_M, .clk.m = &cpux_apb_clk },
+ { .type = AW_CLK_NM, .clk.nm = &psi_ahb_clk },
+ { .type = AW_CLK_NM, .clk.nm = &apb0_clk },
+ { .type = AW_CLK_NM, .clk.nm = &apb1_clk },
+ { .type = AW_CLK_FIXED, .clk.fixed = &mbus_clk },
+ { .type = AW_CLK_M, .clk.m = &de_clk },
+ { .type = AW_CLK_M, .clk.m = &di_clk },
+ { .type = AW_CLK_M, .clk.m = &g2d_clk },
+ { .type = AW_CLK_NM, .clk.nm = &ce_clk },
+ { .type = AW_CLK_M, .clk.m = &ve_clk },
+ { .type = AW_CLK_NM, .clk.nm = &dram_clk },
+ { .type = AW_CLK_NM, .clk.nm = &mmc0_clk },
+ { .type = AW_CLK_NM, .clk.nm = &mmc1_clk },
+ { .type = AW_CLK_NM, .clk.nm = &mmc2_clk },
+ { .type = AW_CLK_NM, .clk.nm = &spi0_clk },
+ { .type = AW_CLK_NM, .clk.nm = &spi1_clk },
+ { .type = AW_CLK_M, .clk.m = &emac_25m_clk },
+ { .type = AW_CLK_NM, .clk.nm = &irtx_clk },
+ { .type = AW_CLK_NM, .clk.nm = &i2s0_clk },
+ { .type = AW_CLK_NM, .clk.nm = &i2s1_clk },
+ { .type = AW_CLK_NM, .clk.nm = &i2s2_clk },
+ { .type = AW_CLK_NM, .clk.nm = &i2s2_asrc_clk },
+ { .type = AW_CLK_NM, .clk.nm = &spdif_tx_clk },
+ { .type = AW_CLK_NM, .clk.nm = &spdif_rx_clk },
+ { .type = AW_CLK_NM, .clk.nm = &dmic_clk },
+ { .type = AW_CLK_NM, .clk.nm = &audio_dac_clk },
+ { .type = AW_CLK_NM, .clk.nm = &audio_adc_clk },
+ { .type = AW_CLK_M, .clk.m = &usb_ohci0_clk },
+ { .type = AW_CLK_M, .clk.m = &usb_ohci1_clk },
+ { .type = AW_CLK_M, .clk.m = &dsi_clk },
+ { .type = AW_CLK_NM, .clk.nm = &tconlcd_clk },
+ { .type = AW_CLK_NM, .clk.nm = &tcontv_clk },
+ { .type = AW_CLK_NM, .clk.nm = &tve_clk },
+ { .type = AW_CLK_M, .clk.m = &tvd_clk },
+ { .type = AW_CLK_NM, .clk.nm = &ledc_clk },
+ { .type = AW_CLK_M, .clk.m = &csi_top_clk },
+ { .type = AW_CLK_M, .clk.m = &csi_mclk },
+ { .type = AW_CLK_M, .clk.m = &tpadc_clk },
+ { .type = AW_CLK_M, .clk.m = &dsp_clk },
+ { .type = AW_CLK_M, .clk.m = &riscv_clk },
+ { .type = AW_CLK_DIV, .clk.div = &riscv_axi_clk},
+};
+
+static int
+ccu_d1_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_is_compatible(dev, "allwinner,sun20i-d1-ccu"))
+ return (ENXIO);
+
+ device_set_desc(dev, "Allwinner D1 Clock Controller Unit");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+ccu_d1_attach(device_t dev)
+{
+ struct aw_ccung_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ sc->resets = ccu_d1_resets;
+ sc->nresets = nitems(ccu_d1_resets);
+ sc->gates = ccu_d1_gates;
+ sc->ngates = nitems(ccu_d1_gates);
+ sc->clks = ccu_d1_clks;
+ sc->nclks = nitems(ccu_d1_clks);
+
+ return (aw_ccung_attach(dev));
+}
+
+static device_method_t ccu_d1_methods[] = {
+ DEVMETHOD(device_probe, ccu_d1_probe),
+ DEVMETHOD(device_attach, ccu_d1_attach),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(ccu_d1, ccu_d1_driver, ccu_d1_methods,
+ sizeof(struct aw_ccung_softc), aw_ccung_driver);
+
+EARLY_DRIVER_MODULE(ccu_d1, simplebus, ccu_d1_driver, 0, 0,
+ BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE);
diff --git a/sys/dev/clk/clk_bus.c b/sys/dev/clk/clk_bus.c
index 622ff1fbf01d..2eb25fff6542 100644
--- a/sys/dev/clk/clk_bus.c
+++ b/sys/dev/clk/clk_bus.c
@@ -69,7 +69,8 @@ ofw_clkbus_attach(device_t dev)
device_probe_and_attach(cdev);
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static device_method_t ofw_clkbus_methods[] = {
diff --git a/sys/dev/clk/clk_fixed.c b/sys/dev/clk/clk_fixed.c
index 7aec5b92dfe0..f8dcfb8378cd 100644
--- a/sys/dev/clk/clk_fixed.c
+++ b/sys/dev/clk/clk_fixed.c
@@ -269,7 +269,8 @@ clk_fixed_attach(device_t dev)
OF_prop_free(__DECONST(char *, def.clkdef.name));
OF_prop_free(def.clkdef.parent_names);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
fail:
OF_prop_free(__DECONST(char *, def.clkdef.name));
diff --git a/sys/dev/clk/starfive/jh7110_clk.c b/sys/dev/clk/starfive/jh7110_clk.c
new file mode 100644
index 000000000000..adb5707b3f64
--- /dev/null
+++ b/sys/dev/clk/starfive/jh7110_clk.c
@@ -0,0 +1,277 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Emmanuel Vadot <manu@freebsd.org>
+ * Copyright (c) 2022 Mitchell Horne <mhorne@FreeBSD.org>
+ * Copyright (c) 2024 Jari Sihvola <jsihv@gmx.com>
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+
+#include <machine/bus.h>
+#include <machine/intr.h>
+#include <machine/resource.h>
+
+#include <dev/clk/clk.h>
+#include <dev/hwreset/hwreset.h>
+
+#include <dt-bindings/clock/starfive,jh7110-crg.h>
+
+#include <dev/clk/starfive/jh7110_clk.h>
+
+#include "clkdev_if.h"
+#include "hwreset_if.h"
+
+#define JH7110_DIV_MASK 0xffffff
+#define JH7110_MUX_SHIFT 24
+#define JH7110_MUX_MASK 0x3f000000
+#define JH7110_ENABLE_SHIFT 31
+
+#define REG_SIZE 4
+
+struct jh7110_clk_sc {
+ uint32_t offset;
+ uint32_t flags;
+ uint64_t d_max;
+ int id;
+};
+
+#define DIV_ROUND_CLOSEST(n, d) (((n) + (d) / 2) / (d))
+
+#define READ4(_sc, _off) \
+ bus_read_4(_sc->mem_res, _off)
+#define WRITE4(_sc, _off, _val) \
+ bus_write_4(_sc->mem_res, _off, _val)
+
+#define DEVICE_LOCK(_clk) \
+ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
+#define DEVICE_UNLOCK(_clk) \
+ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
+
+/* Reset functions */
+
+int
+jh7110_reset_assert(device_t dev, intptr_t id, bool assert)
+{
+ struct jh7110_clkgen_softc *sc;
+ uint32_t regvalue, offset, bitmask = 1UL << id % 32;
+
+ sc = device_get_softc(dev);
+ offset = sc->reset_selector_offset + id / 32 * 4;
+
+ mtx_lock(&sc->mtx);
+
+ regvalue = READ4(sc, offset);
+
+ if (assert)
+ regvalue |= bitmask;
+ else
+ regvalue &= ~bitmask;
+ WRITE4(sc, offset, regvalue);
+
+ mtx_unlock(&sc->mtx);
+
+ return (0);
+}
+
+int
+jh7110_reset_is_asserted(device_t dev, intptr_t id, bool *reset)
+{
+ struct jh7110_clkgen_softc *sc;
+ uint32_t regvalue, offset, bitmask;
+
+ sc = device_get_softc(dev);
+ offset = sc->reset_status_offset + id / 32 * 4;
+
+ mtx_lock(&sc->mtx);
+
+ regvalue = READ4(sc, offset);
+ bitmask = 1UL << id % 32;
+
+ mtx_unlock(&sc->mtx);
+
+ *reset = (regvalue & bitmask) == 0;
+
+ return (0);
+}
+
+/* Clock functions */
+
+static int
+jh7110_clk_init(struct clknode *clk, device_t dev)
+{
+ struct jh7110_clkgen_softc *sc;
+ struct jh7110_clk_sc *sc_clk;
+ uint32_t reg;
+ int idx = 0;
+
+ sc = device_get_softc(clknode_get_device(clk));
+ sc_clk = clknode_get_softc(clk);
+
+ if (sc_clk->flags & JH7110_CLK_HAS_MUX) {
+ DEVICE_LOCK(clk);
+ reg = READ4(sc, sc_clk->offset);
+ DEVICE_UNLOCK(clk);
+ idx = (reg & JH7110_MUX_MASK) >> JH7110_MUX_SHIFT;
+ }
+
+ clknode_init_parent_idx(clk, idx);
+
+ return (0);
+}
+
+static int
+jh7110_clk_set_gate(struct clknode *clk, bool enable)
+{
+ struct jh7110_clkgen_softc *sc;
+ struct jh7110_clk_sc *sc_clk;
+ uint32_t reg;
+
+ sc = device_get_softc(clknode_get_device(clk));
+ sc_clk = clknode_get_softc(clk);
+
+ if ((sc_clk->flags & JH7110_CLK_HAS_GATE) == 0)
+ return (0);
+
+ DEVICE_LOCK(clk);
+
+ reg = READ4(sc, sc_clk->offset);
+ if (enable)
+ reg |= (1 << JH7110_ENABLE_SHIFT);
+ else
+ reg &= ~(1 << JH7110_ENABLE_SHIFT);
+ WRITE4(sc, sc_clk->offset, reg);
+
+ DEVICE_UNLOCK(clk);
+
+ return (0);
+}
+
+static int
+jh7110_clk_set_mux(struct clknode *clk, int idx)
+{
+ struct jh7110_clkgen_softc *sc;
+ struct jh7110_clk_sc *sc_clk;
+ uint32_t reg;
+
+ sc = device_get_softc(clknode_get_device(clk));
+ sc_clk = clknode_get_softc(clk);
+
+ if ((sc_clk->flags & JH7110_CLK_HAS_MUX) == 0)
+ return (ENXIO);
+
+ /* Checking index size */
+ if ((idx & (JH7110_MUX_MASK >> JH7110_MUX_SHIFT)) != idx)
+ return (EINVAL);
+
+ DEVICE_LOCK(clk);
+
+ reg = READ4(sc, sc_clk->offset) & ~JH7110_MUX_MASK;
+ reg |= idx << JH7110_MUX_SHIFT;
+ WRITE4(sc, sc_clk->offset, reg);
+
+ DEVICE_UNLOCK(clk);
+
+ return (0);
+}
+
+static int
+jh7110_clk_recalc_freq(struct clknode *clk, uint64_t *freq)
+{
+ struct jh7110_clkgen_softc *sc;
+ struct jh7110_clk_sc *sc_clk;
+ uint32_t divisor;
+
+ sc = device_get_softc(clknode_get_device(clk));
+ sc_clk = clknode_get_softc(clk);
+
+ /* Returning error here causes panic */
+ if ((sc_clk->flags & JH7110_CLK_HAS_DIV) == 0)
+ return (0);
+
+ DEVICE_LOCK(clk);
+
+ divisor = READ4(sc, sc_clk->offset) & JH7110_DIV_MASK;
+
+ DEVICE_UNLOCK(clk);
+
+ if (divisor)
+ *freq = *freq / divisor;
+ else
+ *freq = 0;
+
+ return (0);
+}
+
+static int
+jh7110_clk_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout,
+ int flags, int *done)
+{
+ struct jh7110_clkgen_softc *sc;
+ struct jh7110_clk_sc *sc_clk;
+ uint32_t divisor;
+
+ sc = device_get_softc(clknode_get_device(clk));
+ sc_clk = clknode_get_softc(clk);
+
+ if ((sc_clk->flags & JH7110_CLK_HAS_DIV) == 0)
+ return (0);
+
+ divisor = MIN(MAX(DIV_ROUND_CLOSEST(fin, *fout), 1UL), sc_clk->d_max);
+
+ if (flags & CLK_SET_DRYRUN)
+ goto done;
+
+ DEVICE_LOCK(clk);
+
+ divisor |= READ4(sc, sc_clk->offset) & ~JH7110_DIV_MASK;
+ WRITE4(sc, sc_clk->offset, divisor);
+
+ DEVICE_UNLOCK(clk);
+
+done:
+ *fout = divisor;
+ *done = 1;
+
+ return (0);
+}
+
+static clknode_method_t jh7110_clknode_methods[] = {
+ /* Device interface */
+ CLKNODEMETHOD(clknode_init, jh7110_clk_init),
+ CLKNODEMETHOD(clknode_set_gate, jh7110_clk_set_gate),
+ CLKNODEMETHOD(clknode_set_mux, jh7110_clk_set_mux),
+ CLKNODEMETHOD(clknode_recalc_freq, jh7110_clk_recalc_freq),
+ CLKNODEMETHOD(clknode_set_freq, jh7110_clk_set_freq),
+ CLKNODEMETHOD_END
+};
+
+DEFINE_CLASS_1(jh7110_clknode, jh7110_clknode_class, jh7110_clknode_methods,
+ sizeof(struct jh7110_clk_sc), clknode_class);
+
+int
+jh7110_clk_register(struct clkdom *clkdom, const struct jh7110_clk_def *clkdef)
+{
+ struct clknode *clk;
+ struct jh7110_clk_sc *sc;
+
+ clk = clknode_create(clkdom, &jh7110_clknode_class, &clkdef->clkdef);
+ if (clk == NULL)
+ return (-1);
+
+ sc = clknode_get_softc(clk);
+
+ sc->offset = clkdef->clkdef.id * REG_SIZE;
+
+ sc->flags = clkdef->flags;
+ sc->id = clkdef->clkdef.id;
+ sc->d_max = clkdef->d_max;
+
+ clknode_register(clkdom, clk);
+
+ return (0);
+}
diff --git a/sys/dev/clk/starfive/jh7110_clk.h b/sys/dev/clk/starfive/jh7110_clk.h
new file mode 100644
index 000000000000..882f82032d44
--- /dev/null
+++ b/sys/dev/clk/starfive/jh7110_clk.h
@@ -0,0 +1,72 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Jari Sihvola <jsihv@gmx.com>
+ */
+
+#ifndef _JH7110_CLK_H_
+#define _JH7110_CLK_H_
+
+#include <dev/clk/clk.h>
+
+#define JH7110_CLK_HAS_GATE 0x01
+#define JH7110_CLK_HAS_MUX 0x02
+#define JH7110_CLK_HAS_DIV 0x04
+#define JH7110_CLK_HAS_INV 0x08
+
+#define AONCRG_RESET_SELECTOR 0x38
+#define AONCRG_RESET_STATUS 0x3c
+#define STGCRG_RESET_SELECTOR 0x74
+#define STGCRG_RESET_STATUS 0x78
+#define SYSCRG_RESET_SELECTOR 0x2f8
+#define SYSCRG_RESET_STATUS 0x308
+
+struct jh7110_clkgen_softc {
+ struct mtx mtx;
+ struct clkdom *clkdom;
+ struct resource *mem_res;
+ uint32_t reset_status_offset;
+ uint32_t reset_selector_offset;
+};
+
+struct jh7110_clk_def {
+ struct clknode_init_def clkdef;
+ uint32_t offset;
+ uint32_t flags;
+ uint64_t d_max;
+};
+
+#define JH7110_CLK(_idx, _name, _pn, _d_max, _flags) \
+{ \
+ .clkdef.id = _idx, \
+ .clkdef.name = _name, \
+ .clkdef.parent_names = _pn, \
+ .clkdef.parent_cnt = nitems(_pn), \
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS, \
+ .flags = _flags, \
+ .d_max = _d_max, \
+}
+
+#define JH7110_GATE(_idx, _name, _pn) \
+ JH7110_CLK(_idx, _name, _pn, 0, JH7110_CLK_HAS_GATE)
+#define JH7110_MUX(_idx, _name, _pn) \
+ JH7110_CLK(_idx, _name, _pn, 0, JH7110_CLK_HAS_MUX)
+#define JH7110_DIV(_idx, _name, _pn, _d_max) \
+ JH7110_CLK(_idx, _name, _pn, _d_max, JH7110_CLK_HAS_DIV)
+#define JH7110_GATEMUX(_idx, _name, _pn) \
+ JH7110_CLK(_idx, _name, _pn, 0, JH7110_CLK_HAS_GATE | \
+ JH7110_CLK_HAS_MUX)
+#define JH7110_GATEDIV(_idx, _name, _pn, _d_max) \
+ JH7110_CLK(_idx, _name, _pn, _d_max, JH7110_CLK_HAS_GATE | \
+ JH7110_CLK_HAS_DIV)
+#define JH7110_INV(_idx, _name, _pn) \
+ JH7110_CLK(_idx, _name, _pn, 0, JH7110_CLK_HAS_INV)
+
+int jh7110_clk_register(struct clkdom *clkdom,
+ const struct jh7110_clk_def *clkdef);
+int jh7110_ofw_map(struct clkdom *clkdom, uint32_t ncells, phandle_t *cells,
+ struct clknode **clk);
+int jh7110_reset_is_asserted(device_t dev, intptr_t id, bool *reset);
+int jh7110_reset_assert(device_t dev, intptr_t id, bool assert);
+
+#endif /* _JH7110_CLK_H_ */
diff --git a/sys/dev/clk/starfive/jh7110_clk_aon.c b/sys/dev/clk/starfive/jh7110_clk_aon.c
new file mode 100644
index 000000000000..21b15142835e
--- /dev/null
+++ b/sys/dev/clk/starfive/jh7110_clk_aon.c
@@ -0,0 +1,168 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright 2016 Michal Meloun <mmel@FreeBSD.org>
+ * Copyright (c) 2020 Oskar Holmlund <oskar.holmlund@ohdata.se>
+ * Copyright (c) 2024 Jari Sihvola <jsihv@gmx.com>
+ */
+
+/* Clocks for JH7110 AON group. PLL driver must be attached before this. */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/resource.h>
+#include <sys/rman.h>
+#include <machine/bus.h>
+
+#include <dev/fdt/simplebus.h>
+#include <dev/hwreset/hwreset.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/clk/clk.h>
+#include <dev/clk/starfive/jh7110_clk.h>
+
+#include <dt-bindings/clock/starfive,jh7110-crg.h>
+
+#include "clkdev_if.h"
+#include "hwreset_if.h"
+
+static struct ofw_compat_data compat_data[] = {
+ { "starfive,jh7110-aoncrg", 1 },
+ { NULL, 0 }
+};
+
+static struct resource_spec res_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_SHAREABLE },
+ RESOURCE_SPEC_END
+};
+
+/* parents */
+static const char *gmac0_axi_p[] = { "stg_axiahb" };
+static const char *gmac0_ahb_p[] = { "stg_axiahb" };
+static const char *gmac0_tx_inv_p[] = { "gmac0_tx" };
+static const char *gmac0_tx_p[] = { "gmac0_gtxclk", "gmac0_rmii_rtx" };
+static const char *gmac0_rmii_rtx_p[] = { "gmac0_rmii_refin" };
+
+/* AON clocks */
+static const struct jh7110_clk_def aon_clks[] = {
+ JH7110_GATE(JH7110_AONCLK_GMAC0_AXI, "gmac0_axi", gmac0_axi_p),
+ JH7110_GATE(JH7110_AONCLK_GMAC0_AHB, "gmac0_ahb", gmac0_ahb_p),
+ JH7110_GATEMUX(JH7110_AONCLK_GMAC0_TX, "gmac0_tx", gmac0_tx_p),
+ JH7110_INV(JH7110_AONCLK_GMAC0_TX_INV, "gmac0_tx_inv", gmac0_tx_inv_p),
+ JH7110_DIV(JH7110_AONCLK_GMAC0_RMII_RTX, "gmac0_rmii_rtx",
+ gmac0_rmii_rtx_p, 30),
+};
+
+static int
+jh7110_clk_aon_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "StarFive JH7110 AON clock generator");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+jh7110_clk_aon_attach(device_t dev)
+{
+ struct jh7110_clkgen_softc *sc;
+ int err;
+
+ sc = device_get_softc(dev);
+
+ sc->reset_status_offset = AONCRG_RESET_STATUS;
+ sc->reset_selector_offset = AONCRG_RESET_SELECTOR;
+
+ mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF);
+
+ err = bus_alloc_resources(dev, res_spec, &sc->mem_res);
+ if (err != 0) {
+ device_printf(dev, "Couldn't allocate resources, error %d\n",
+ err);
+ return (ENXIO);
+ }
+
+ sc->clkdom = clkdom_create(dev);
+ if (sc->clkdom == NULL) {
+ device_printf(dev, "Couldn't create clkdom, error %d\n", err);
+ return (ENXIO);
+ }
+
+ for (int i = 0; i < nitems(aon_clks); i++) {
+ err = jh7110_clk_register(sc->clkdom, &aon_clks[i]);
+ if (err != 0) {
+ device_printf(dev,
+ "Couldn't register clk %s, error %d\n",
+ aon_clks[i].clkdef.name, err);
+ return (ENXIO);
+ }
+ }
+
+ if (clkdom_finit(sc->clkdom) != 0)
+ panic("Cannot finalize clkdom initialization\n");
+
+ if (bootverbose)
+ clkdom_dump(sc->clkdom);
+
+ hwreset_register_ofw_provider(dev);
+
+ return (0);
+}
+
+static void
+jh7110_clk_aon_device_lock(device_t dev)
+{
+ struct jh7110_clkgen_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_lock(&sc->mtx);
+}
+
+static void
+jh7110_clk_aon_device_unlock(device_t dev)
+{
+ struct jh7110_clkgen_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_unlock(&sc->mtx);
+}
+
+static int
+jh7110_clk_aon_detach(device_t dev)
+{
+ /* Detach not supported */
+ return (EBUSY);
+}
+
+static device_method_t jh7110_clk_aon_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, jh7110_clk_aon_probe),
+ DEVMETHOD(device_attach, jh7110_clk_aon_attach),
+ DEVMETHOD(device_detach, jh7110_clk_aon_detach),
+
+ /* clkdev interface */
+ DEVMETHOD(clkdev_device_lock, jh7110_clk_aon_device_lock),
+ DEVMETHOD(clkdev_device_unlock, jh7110_clk_aon_device_unlock),
+
+ /* Reset interface */
+ DEVMETHOD(hwreset_assert, jh7110_reset_assert),
+ DEVMETHOD(hwreset_is_asserted, jh7110_reset_is_asserted),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(jh7110_aon, jh7110_aon_driver, jh7110_clk_aon_methods,
+ sizeof(struct jh7110_clkgen_softc));
+EARLY_DRIVER_MODULE(jh7110_aon, simplebus, jh7110_aon_driver, 0, 0,
+ BUS_PASS_BUS + BUS_PASS_ORDER_LATE);
+MODULE_VERSION(jh7110_aon, 1);
diff --git a/sys/dev/clk/starfive/jh7110_clk_pll.c b/sys/dev/clk/starfive/jh7110_clk_pll.c
new file mode 100644
index 000000000000..5882f33984ae
--- /dev/null
+++ b/sys/dev/clk/starfive/jh7110_clk_pll.c
@@ -0,0 +1,386 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Jari Sihvola <jsihv@gmx.com>
+ * Copyright (c) 2024 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Mitchell Horne
+ * <mhorne@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+
+#include <machine/bus.h>
+
+#include <dev/fdt/simplebus.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/clk/clk.h>
+#include <dev/clk/starfive/jh7110_clk.h>
+#include <dev/clk/starfive/jh7110_clk_pll.h>
+#include <dev/syscon/syscon.h>
+
+#include <dt-bindings/clock/starfive,jh7110-crg.h>
+
+#include "clkdev_if.h"
+#include "syscon_if.h"
+
+#define JH7110_SYS_SYSCON_SYSCFG24 0x18
+#define JH7110_SYS_SYSCON_SYSCFG28 0x1c
+#define JH7110_SYS_SYSCON_SYSCFG32 0x20
+#define JH7110_SYS_SYSCON_SYSCFG36 0x24
+#define JH7110_SYS_SYSCON_SYSCFG40 0x28
+#define JH7110_SYS_SYSCON_SYSCFG44 0x2c
+#define JH7110_SYS_SYSCON_SYSCFG48 0x30
+#define JH7110_SYS_SYSCON_SYSCFG52 0x34
+
+#define DEVICE_LOCK(_clk) \
+ CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
+#define DEVICE_UNLOCK(_clk) \
+ CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
+
+#define PLL_MASK_FILL(sc, id) \
+do { \
+ sc->dacpd_mask = PLL## id ##_DACPD_MASK; \
+ sc->dsmpd_mask = PLL## id ##_DSMPD_MASK; \
+ sc->fbdiv_mask = PLL## id ##_FBDIV_MASK; \
+ sc->frac_mask = PLL## id ##_FRAC_MASK; \
+ sc->prediv_mask = PLL## id ##_PREDIV_MASK; \
+ sc->postdiv1_mask = PLL## id ##_POSTDIV1_MASK; \
+} while (0)
+
+#define PLL_SHIFT_FILL(sc, id) \
+do { \
+ sc->dacpd_shift = PLL## id ##_DACPD_SHIFT; \
+ sc->dsmpd_shift = PLL## id ##_DSMPD_SHIFT; \
+ sc->fbdiv_shift = PLL## id ##_FBDIV_SHIFT; \
+ sc->frac_shift = PLL## id ##_FRAC_SHIFT; \
+ sc->prediv_shift = PLL## id ##_PREDIV_SHIFT; \
+ sc->postdiv1_shift = PLL## id ##_POSTDIV1_SHIFT; \
+} while (0)
+
+struct jh7110_clk_pll_softc {
+ struct mtx mtx;
+ struct clkdom *clkdom;
+ struct syscon *syscon;
+};
+
+struct jh7110_pll_clknode_softc {
+ uint32_t dacpd_offset;
+ uint32_t dsmpd_offset;
+ uint32_t fbdiv_offset;
+ uint32_t frac_offset;
+ uint32_t prediv_offset;
+ uint32_t postdiv1_offset;
+
+ uint32_t dacpd_mask;
+ uint32_t dsmpd_mask;
+ uint32_t fbdiv_mask;
+ uint32_t frac_mask;
+ uint32_t prediv_mask;
+ uint32_t postdiv1_mask;
+
+ uint32_t dacpd_shift;
+ uint32_t dsmpd_shift;
+ uint32_t fbdiv_shift;
+ uint32_t frac_shift;
+ uint32_t prediv_shift;
+ uint32_t postdiv1_shift;
+
+ const struct jh7110_pll_syscon_value *syscon_arr;
+ int syscon_nitems;
+};
+
+static const char *pll_parents[] = { "osc" };
+
+static struct jh7110_clk_def pll_out_clks[] = {
+ {
+ .clkdef.id = JH7110_PLLCLK_PLL0_OUT,
+ .clkdef.name = "pll0_out",
+ .clkdef.parent_names = pll_parents,
+ .clkdef.parent_cnt = nitems(pll_parents),
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS,
+ },
+ {
+ .clkdef.id = JH7110_PLLCLK_PLL1_OUT,
+ .clkdef.name = "pll1_out",
+ .clkdef.parent_names = pll_parents,
+ .clkdef.parent_cnt = nitems(pll_parents),
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS,
+ },
+ {
+ .clkdef.id = JH7110_PLLCLK_PLL2_OUT,
+ .clkdef.name = "pll2_out",
+ .clkdef.parent_names = pll_parents,
+ .clkdef.parent_cnt = nitems(pll_parents),
+ .clkdef.flags = CLK_NODE_STATIC_STRINGS,
+ },
+};
+
+static int jh7110_clk_pll_register(struct clkdom *clkdom,
+ struct jh7110_clk_def *clkdef);
+
+static int
+jh7110_clk_pll_recalc_freq(struct clknode *clk, uint64_t *freq)
+{
+ struct jh7110_clk_pll_softc *sc;
+ struct jh7110_pll_clknode_softc *clk_sc;
+ uint32_t dacpd, dsmpd, fbdiv, prediv, postdiv1;
+ uint64_t frac, fcal = 0;
+
+ sc = device_get_softc(clknode_get_device(clk));
+ clk_sc = clknode_get_softc(clk);
+
+ DEVICE_LOCK(clk);
+
+ dacpd = (SYSCON_READ_4(sc->syscon, clk_sc->dacpd_offset) & clk_sc->dacpd_mask) >>
+ clk_sc->dacpd_shift;
+ dsmpd = (SYSCON_READ_4(sc->syscon, clk_sc->dsmpd_offset) & clk_sc->dsmpd_mask) >>
+ clk_sc->dsmpd_shift;
+ fbdiv = (SYSCON_READ_4(sc->syscon, clk_sc->fbdiv_offset) & clk_sc->fbdiv_mask) >>
+ clk_sc->fbdiv_shift;
+ prediv = (SYSCON_READ_4(sc->syscon, clk_sc->prediv_offset) & clk_sc->prediv_mask) >>
+ clk_sc->prediv_shift;
+ postdiv1 = (SYSCON_READ_4(sc->syscon, clk_sc->postdiv1_offset) &
+ clk_sc->postdiv1_mask) >> clk_sc->postdiv1_shift;
+ frac = (SYSCON_READ_4(sc->syscon, clk_sc->frac_offset) & clk_sc->frac_mask) >>
+ clk_sc->frac_shift;
+
+ DEVICE_UNLOCK(clk);
+
+ /* dacpd and dsmpd both being 0 entails Fraction Multiple Mode */
+ if (dacpd == 0 && dsmpd == 0)
+ fcal = frac * FRAC_PATR_SIZE / (1 << 24);
+
+ *freq = *freq / FRAC_PATR_SIZE * (fbdiv * FRAC_PATR_SIZE + fcal) /
+ prediv / (1 << postdiv1);
+
+ return (0);
+}
+
+static int
+jh7110_clk_pll_set_freq(struct clknode *clk, uint64_t fin, uint64_t *fout,
+ int flags, int *done)
+{
+ struct jh7110_clk_pll_softc *sc;
+ struct jh7110_pll_clknode_softc *clk_sc;
+ const struct jh7110_pll_syscon_value *syscon_val = NULL;
+
+ sc = device_get_softc(clknode_get_device(clk));
+ clk_sc = clknode_get_softc(clk);
+
+ for (int i = 0; i != clk_sc->syscon_nitems; i++) {
+ if (*fout == clk_sc->syscon_arr[i].freq) {
+ syscon_val = &clk_sc->syscon_arr[i];
+ }
+ }
+
+ if (syscon_val == NULL) {
+ printf("%s: tried to set an unknown frequency %ju for %s\n",
+ __func__, *fout, clknode_get_name(clk));
+ return (EINVAL);
+ }
+
+ if ((flags & CLK_SET_DRYRUN) != 0) {
+ *done = 1;
+ return (0);
+ }
+
+ DEVICE_LOCK(clk);
+
+ SYSCON_MODIFY_4(sc->syscon, clk_sc->dacpd_offset, clk_sc->dacpd_mask,
+ syscon_val->dacpd << clk_sc->dacpd_shift & clk_sc->dacpd_mask);
+ SYSCON_MODIFY_4(sc->syscon, clk_sc->dsmpd_offset, clk_sc->dsmpd_mask,
+ syscon_val->dsmpd << clk_sc->dsmpd_shift & clk_sc->dsmpd_mask);
+ SYSCON_MODIFY_4(sc->syscon, clk_sc->prediv_offset, clk_sc->prediv_mask,
+ syscon_val->prediv << clk_sc->prediv_shift & clk_sc->prediv_mask);
+ SYSCON_MODIFY_4(sc->syscon, clk_sc->fbdiv_offset, clk_sc->fbdiv_mask,
+ syscon_val->fbdiv << clk_sc->fbdiv_shift & clk_sc->fbdiv_mask);
+ SYSCON_MODIFY_4(sc->syscon, clk_sc->postdiv1_offset,
+ clk_sc->postdiv1_mask, (syscon_val->postdiv1 >> 1) <<
+ clk_sc->postdiv1_shift & clk_sc->postdiv1_mask);
+
+ if (!syscon_val->dacpd && !syscon_val->dsmpd) {
+ SYSCON_MODIFY_4(sc->syscon, clk_sc->frac_offset, clk_sc->frac_mask,
+ syscon_val->frac << clk_sc->frac_shift & clk_sc->frac_mask);
+ }
+
+ DEVICE_UNLOCK(clk);
+
+ *done = 1;
+ return (0);
+}
+
+static int
+jh7110_clk_pll_init(struct clknode *clk, device_t dev)
+{
+ clknode_init_parent_idx(clk, 0);
+
+ return (0);
+}
+
+static int
+jh7110_clk_pll_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_is_compatible(dev, "starfive,jh7110-pll"))
+ return (ENXIO);
+
+ device_set_desc(dev, "StarFive JH7110 PLL clock generator");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+jh7110_clk_pll_attach(device_t dev)
+{
+ struct jh7110_clk_pll_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF);
+
+ sc->clkdom = clkdom_create(dev);
+ if (sc->clkdom == NULL) {
+ device_printf(dev, "Couldn't create clkdom\n");
+ return (ENXIO);
+ }
+
+ error = syscon_get_by_ofw_node(dev, OF_parent(ofw_bus_get_node(dev)),
+ &sc->syscon);
+ if (error != 0) {
+ device_printf(dev, "Couldn't get syscon handle of parent\n");
+ return (error);
+ }
+
+ for (int i = 0; i < nitems(pll_out_clks); i++) {
+ error = jh7110_clk_pll_register(sc->clkdom, &pll_out_clks[i]);
+ if (error != 0)
+ device_printf(dev, "Couldn't register clock %s: %d\n",
+ pll_out_clks[i].clkdef.name, error);
+ }
+
+ error = clkdom_finit(sc->clkdom);
+ if (error != 0) {
+ device_printf(dev, "clkdom_finit() returned %d\n", error);
+ }
+
+ if (bootverbose)
+ clkdom_dump(sc->clkdom);
+
+ return (0);
+}
+
+static void
+jh7110_clk_pll_device_lock(device_t dev)
+{
+ struct jh7110_clk_pll_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_lock(&sc->mtx);
+}
+
+static void
+jh7110_clk_pll_device_unlock(device_t dev)
+{
+ struct jh7110_clk_pll_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_unlock(&sc->mtx);
+}
+
+static clknode_method_t jh7110_pllnode_methods[] = {
+ /* Device interface */
+ CLKNODEMETHOD(clknode_init, jh7110_clk_pll_init),
+ CLKNODEMETHOD(clknode_recalc_freq, jh7110_clk_pll_recalc_freq),
+ CLKNODEMETHOD(clknode_set_freq, jh7110_clk_pll_set_freq),
+
+ CLKNODEMETHOD_END
+};
+
+static device_method_t jh7110_clk_pll_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, jh7110_clk_pll_probe),
+ DEVMETHOD(device_attach, jh7110_clk_pll_attach),
+
+ /* clkdev interface */
+ DEVMETHOD(clkdev_device_lock, jh7110_clk_pll_device_lock),
+ DEVMETHOD(clkdev_device_unlock, jh7110_clk_pll_device_unlock),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(jh7110_pllnode, jh7110_pllnode_class, jh7110_pllnode_methods,
+ sizeof(struct jh7110_pll_clknode_softc), clknode_class);
+DEFINE_CLASS_0(jh7110_clk_pll, jh7110_clk_pll_driver, jh7110_clk_pll_methods,
+ sizeof(struct jh7110_clk_pll_softc));
+EARLY_DRIVER_MODULE(jh7110_clk_pll, simplebus, jh7110_clk_pll_driver, 0, 0,
+ BUS_PASS_BUS + BUS_PASS_ORDER_EARLY);
+MODULE_VERSION(jh7110_clk_pll, 1);
+
+int
+jh7110_clk_pll_register(struct clkdom *clkdom, struct jh7110_clk_def *clkdef)
+{
+ struct clknode *clk = NULL;
+ struct jh7110_pll_clknode_softc *sc;
+
+ clk = clknode_create(clkdom, &jh7110_pllnode_class, &clkdef->clkdef);
+ if (clk == NULL)
+ return (1);
+
+ sc = clknode_get_softc(clk);
+
+ switch (clkdef->clkdef.id) {
+ case JH7110_PLLCLK_PLL0_OUT:
+ sc->syscon_arr = jh7110_pll0_syscon_freq;
+ sc->syscon_nitems = nitems(jh7110_pll0_syscon_freq);
+ PLL_MASK_FILL(sc, 0);
+ PLL_SHIFT_FILL(sc, 0);
+ sc->dacpd_offset = JH7110_SYS_SYSCON_SYSCFG24;
+ sc->dsmpd_offset = JH7110_SYS_SYSCON_SYSCFG24;
+ sc->fbdiv_offset = JH7110_SYS_SYSCON_SYSCFG28;
+ sc->frac_offset = JH7110_SYS_SYSCON_SYSCFG32;
+ sc->prediv_offset = JH7110_SYS_SYSCON_SYSCFG36;
+ sc->postdiv1_offset = JH7110_SYS_SYSCON_SYSCFG32;
+ break;
+ case JH7110_PLLCLK_PLL1_OUT:
+ sc->syscon_arr = jh7110_pll1_syscon_freq;
+ sc->syscon_nitems = nitems(jh7110_pll1_syscon_freq);
+ PLL_MASK_FILL(sc, 1);
+ PLL_SHIFT_FILL(sc, 1);
+ sc->dacpd_offset = JH7110_SYS_SYSCON_SYSCFG36;
+ sc->dsmpd_offset = JH7110_SYS_SYSCON_SYSCFG36;
+ sc->fbdiv_offset = JH7110_SYS_SYSCON_SYSCFG36;
+ sc->frac_offset = JH7110_SYS_SYSCON_SYSCFG40;
+ sc->prediv_offset = JH7110_SYS_SYSCON_SYSCFG44;
+ sc->postdiv1_offset = JH7110_SYS_SYSCON_SYSCFG40;
+ break;
+ case JH7110_PLLCLK_PLL2_OUT:
+ sc->syscon_arr = jh7110_pll2_syscon_freq;
+ sc->syscon_nitems = nitems(jh7110_pll2_syscon_freq);
+ PLL_MASK_FILL(sc, 2);
+ PLL_SHIFT_FILL(sc, 2);
+ sc->dacpd_offset = JH7110_SYS_SYSCON_SYSCFG44;
+ sc->dsmpd_offset = JH7110_SYS_SYSCON_SYSCFG44;
+ sc->fbdiv_offset = JH7110_SYS_SYSCON_SYSCFG44;
+ sc->frac_offset = JH7110_SYS_SYSCON_SYSCFG48;
+ sc->prediv_offset = JH7110_SYS_SYSCON_SYSCFG52;
+ sc->postdiv1_offset = JH7110_SYS_SYSCON_SYSCFG48;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ clknode_register(clkdom, clk);
+
+ return (0);
+}
diff --git a/sys/dev/clk/starfive/jh7110_clk_pll.h b/sys/dev/clk/starfive/jh7110_clk_pll.h
new file mode 100644
index 000000000000..700072a4b465
--- /dev/null
+++ b/sys/dev/clk/starfive/jh7110_clk_pll.h
@@ -0,0 +1,211 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * StarFive JH7110 PLL Clock Generator Driver
+ *
+ * Copyright (C) 2022 Xingyu Wu <xingyu.wu@starfivetech.com>
+ */
+
+#define PLL0_DACPD_SHIFT 24
+#define PLL0_DACPD_MASK 0x1000000
+#define PLL_0_DACPD_SHIFT 24
+#define PLL_0_DACPD_MASK 0x1000000
+
+#define PLL0_DSMPD_SHIFT 25
+#define PLL0_DSMPD_MASK 0x2000000
+#define PLL0_FBDIV_SHIFT 0
+#define PLL0_FBDIV_MASK 0xFFF
+#define PLL0_FRAC_SHIFT 0
+#define PLL0_FRAC_MASK 0xFFFFFF
+#define PLL0_POSTDIV1_SHIFT 28
+#define PLL0_POSTDIV1_MASK 0x30000000
+#define PLL0_PREDIV_SHIFT 0
+#define PLL0_PREDIV_MASK 0x3F
+
+#define PLL1_DACPD_SHIFT 15
+#define PLL1_DACPD_MASK 0x8000
+#define PLL1_DSMPD_SHIFT 16
+#define PLL1_DSMPD_MASK 0x10000
+#define PLL1_FBDIV_SHIFT 17
+#define PLL1_FBDIV_MASK 0x1FFE0000
+#define PLL1_FRAC_SHIFT 0
+#define PLL1_FRAC_MASK 0xFFFFFF
+#define PLL1_POSTDIV1_SHIFT 28
+#define PLL1_POSTDIV1_MASK 0x30000000
+#define PLL1_PREDIV_SHIFT 0
+#define PLL1_PREDIV_MASK 0x3F
+
+#define PLL2_DACPD_SHIFT 15
+#define PLL2_DACPD_MASK 0x8000
+#define PLL2_DSMPD_SHIFT 16
+#define PLL2_DSMPD_MASK 0x10000
+#define PLL2_FBDIV_SHIFT 17
+#define PLL2_FBDIV_MASK 0x1FFE0000
+#define PLL2_FRAC_SHIFT 0
+#define PLL2_FRAC_MASK 0xFFFFFF
+#define PLL2_POSTDIV1_SHIFT 28
+#define PLL2_POSTDIV1_MASK 0x30000000
+#define PLL2_PREDIV_SHIFT 0
+#define PLL2_PREDIV_MASK 0x3F
+
+#define FRAC_PATR_SIZE 1000
+
+struct jh7110_pll_syscon_value {
+ uint64_t freq;
+ uint32_t prediv;
+ uint32_t fbdiv;
+ uint32_t postdiv1;
+ uint32_t dacpd;
+ uint32_t dsmpd;
+ uint32_t frac;
+};
+
+enum starfive_pll0_freq_value {
+ PLL0_FREQ_375_VALUE = 375000000,
+ PLL0_FREQ_500_VALUE = 500000000,
+ PLL0_FREQ_625_VALUE = 625000000,
+ PLL0_FREQ_750_VALUE = 750000000,
+ PLL0_FREQ_875_VALUE = 875000000,
+ PLL0_FREQ_1000_VALUE = 1000000000,
+ PLL0_FREQ_1250_VALUE = 1250000000,
+ PLL0_FREQ_1375_VALUE = 1375000000,
+ PLL0_FREQ_1500_VALUE = 1500000000
+};
+
+enum starfive_pll0_freq {
+ PLL0_FREQ_375 = 0,
+ PLL0_FREQ_500,
+ PLL0_FREQ_625,
+ PLL0_FREQ_750,
+ PLL0_FREQ_875,
+ PLL0_FREQ_1000,
+ PLL0_FREQ_1250,
+ PLL0_FREQ_1375,
+ PLL0_FREQ_1500,
+ PLL0_FREQ_MAX = PLL0_FREQ_1500
+};
+
+enum starfive_pll1_freq_value {
+ PLL1_FREQ_1066_VALUE = 1066000000,
+};
+
+enum starfive_pll1_freq {
+ PLL1_FREQ_1066 = 0,
+};
+
+enum starfive_pll2_freq_value {
+ PLL2_FREQ_1188_VALUE = 1188000000,
+ PLL2_FREQ_12288_VALUE = 1228800000,
+};
+
+enum starfive_pll2_freq {
+ PLL2_FREQ_1188 = 0,
+ PLL2_FREQ_12288,
+};
+
+static const struct jh7110_pll_syscon_value
+ jh7110_pll0_syscon_freq[] = {
+ [PLL0_FREQ_375] = {
+ .freq = PLL0_FREQ_375_VALUE,
+ .prediv = 8,
+ .fbdiv = 125,
+ .postdiv1 = 1,
+ .dacpd = 1,
+ .dsmpd = 1,
+ },
+ [PLL0_FREQ_500] = {
+ .freq = PLL0_FREQ_500_VALUE,
+ .prediv = 6,
+ .fbdiv = 125,
+ .postdiv1 = 1,
+ .dacpd = 1,
+ .dsmpd = 1,
+ },
+ [PLL0_FREQ_625] = {
+ .freq = PLL0_FREQ_625_VALUE,
+ .prediv = 24,
+ .fbdiv = 625,
+ .postdiv1 = 1,
+ .dacpd = 1,
+ .dsmpd = 1,
+ },
+ [PLL0_FREQ_750] = {
+ .freq = PLL0_FREQ_750_VALUE,
+ .prediv = 4,
+ .fbdiv = 125,
+ .postdiv1 = 1,
+ .dacpd = 1,
+ .dsmpd = 1,
+ },
+ [PLL0_FREQ_875] = {
+ .freq = PLL0_FREQ_875_VALUE,
+ .prediv = 24,
+ .fbdiv = 875,
+ .postdiv1 = 1,
+ .dacpd = 1,
+ .dsmpd = 1,
+ },
+ [PLL0_FREQ_1000] = {
+ .freq = PLL0_FREQ_1000_VALUE,
+ .prediv = 3,
+ .fbdiv = 125,
+ .postdiv1 = 1,
+ .dacpd = 1,
+ .dsmpd = 1,
+ },
+ [PLL0_FREQ_1250] = {
+ .freq = PLL0_FREQ_1250_VALUE,
+ .prediv = 12,
+ .fbdiv = 625,
+ .postdiv1 = 1,
+ .dacpd = 1,
+ .dsmpd = 1,
+ },
+ [PLL0_FREQ_1375] = {
+ .freq = PLL0_FREQ_1375_VALUE,
+ .prediv = 24,
+ .fbdiv = 1375,
+ .postdiv1 = 1,
+ .dacpd = 1,
+ .dsmpd = 1,
+ },
+ [PLL0_FREQ_1500] = {
+ .freq = PLL0_FREQ_1500_VALUE,
+ .prediv = 2,
+ .fbdiv = 125,
+ .postdiv1 = 1,
+ .dacpd = 1,
+ .dsmpd = 1,
+ },
+};
+
+static const struct jh7110_pll_syscon_value
+ jh7110_pll1_syscon_freq[] = {
+ [PLL1_FREQ_1066] = {
+ .freq = PLL1_FREQ_1066_VALUE,
+ .prediv = 12,
+ .fbdiv = 533,
+ .postdiv1 = 1,
+ .dacpd = 1,
+ .dsmpd = 1,
+ },
+};
+
+static const struct jh7110_pll_syscon_value
+ jh7110_pll2_syscon_freq[] = {
+ [PLL2_FREQ_1188] = {
+ .freq = PLL2_FREQ_1188_VALUE,
+ .prediv = 2,
+ .fbdiv = 99,
+ .postdiv1 = 1,
+ .dacpd = 1,
+ .dsmpd = 1,
+ },
+ [PLL2_FREQ_12288] = {
+ .freq = PLL2_FREQ_12288_VALUE,
+ .prediv = 5,
+ .fbdiv = 256,
+ .postdiv1 = 1,
+ .dacpd = 1,
+ .dsmpd = 1,
+ },
+};
diff --git a/sys/dev/clk/starfive/jh7110_clk_stg.c b/sys/dev/clk/starfive/jh7110_clk_stg.c
new file mode 100644
index 000000000000..af30e640ecf6
--- /dev/null
+++ b/sys/dev/clk/starfive/jh7110_clk_stg.c
@@ -0,0 +1,204 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Jari Sihvola <jsihv@gmx.com>
+ */
+
+/* Clocks for STG group. PLL_OUT & SYS clocks must be registered first. */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/mutex.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/resource.h>
+#include <sys/rman.h>
+#include <machine/bus.h>
+
+#include <dev/fdt/simplebus.h>
+#include <dev/hwreset/hwreset.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/clk/clk.h>
+#include <dev/clk/starfive/jh7110_clk.h>
+
+#include <dt-bindings/clock/starfive,jh7110-crg.h>
+
+#include "clkdev_if.h"
+#include "hwreset_if.h"
+
+static struct ofw_compat_data compat_data[] = {
+ { "starfive,jh7110-stgcrg", 1 },
+ { NULL, 0 }
+};
+
+static struct resource_spec res_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE },
+ RESOURCE_SPEC_END
+};
+
+/* parents */
+static const char *e2_rtc_p[] = { "osc" };
+static const char *e2_core_p[] = { "stg_axiahb" };
+static const char *e2_dbg_p[] = { "stg_axiahb" };
+
+static const char *pcie_slv_main_p[] = { "stg_axiahb" };
+static const char *pcie0_tl_p[] = { "stg_axiahb" };
+static const char *pcie1_tl_p[] = { "stg_axiahb" };
+static const char *pcie0_axi_mst0_p[] = { "stg_axiahb" };
+static const char *pcie1_axi_mst0_p[] = { "stg_axiahb" };
+static const char *pcie0_apb_p[] = { "apb_bus" };
+static const char *pcie1_apb_p[] = { "apb_bus" };
+
+static const char *usb0_lpm_p[] = { "osc" };
+static const char *usb0_stb_p[] = { "osc" };
+static const char *usb0_apb_p[] = { "apb_bus" };
+static const char *usb0_utmi_apb_p[] = { "apb_bus" };
+static const char *usb0_axi_p[] = { "stg_axiahb" };
+static const char *usb0_app_125_p[] = { "usb_125m" };
+static const char *usb0_refclk_p[] = { "osc" };
+
+static const char *dma1p_axi_p[] = { "stg_axiahb" };
+static const char *dma1p_ahb_p[] = { "stg_axiahb" };
+
+/* STG clocks */
+static const struct jh7110_clk_def stg_clks[] = {
+ JH7110_GATE(JH7110_STGCLK_USB0_APB, "usb0_apb", usb0_apb_p),
+ JH7110_GATE(JH7110_STGCLK_USB0_UTMI_APB, "usb0_utmi_apb",
+ usb0_utmi_apb_p),
+ JH7110_GATE(JH7110_STGCLK_USB0_AXI, "usb0_axi", usb0_axi_p),
+ JH7110_GATEDIV(JH7110_STGCLK_USB0_LPM, "usb0_lpm", usb0_lpm_p, 2),
+ JH7110_GATEDIV(JH7110_STGCLK_USB0_STB, "usb0_stb", usb0_stb_p, 4),
+ JH7110_GATE(JH7110_STGCLK_USB0_APP_125, "usb0_app_125", usb0_app_125_p),
+ JH7110_DIV(JH7110_STGCLK_USB0_REFCLK, "usb0_refclk", usb0_refclk_p, 2),
+
+ JH7110_GATE(JH7110_STGCLK_PCIE0_AXI_MST0, "pcie0_axi_mst0",
+ pcie0_axi_mst0_p),
+ JH7110_GATE(JH7110_STGCLK_PCIE0_APB, "pcie0_apb", pcie0_apb_p),
+ JH7110_GATE(JH7110_STGCLK_PCIE0_TL, "pcie0_tl", pcie0_tl_p),
+ JH7110_GATE(JH7110_STGCLK_PCIE1_AXI_MST0, "pcie1_axi_mst0",
+ pcie1_axi_mst0_p),
+
+ JH7110_GATE(JH7110_STGCLK_PCIE1_APB, "pcie1_apb", pcie1_apb_p),
+ JH7110_GATE(JH7110_STGCLK_PCIE1_TL, "pcie1_tl", pcie1_tl_p),
+ JH7110_GATE(JH7110_STGCLK_PCIE_SLV_MAIN, "pcie_slv_main",
+ pcie_slv_main_p),
+
+ JH7110_GATEDIV(JH7110_STGCLK_E2_RTC, "e2_rtc", e2_rtc_p, 24),
+ JH7110_GATE(JH7110_STGCLK_E2_CORE, "e2_core", e2_core_p),
+ JH7110_GATE(JH7110_STGCLK_E2_DBG, "e2_dbg", e2_dbg_p),
+
+ JH7110_GATE(JH7110_STGCLK_DMA1P_AXI, "dma1p_axi", dma1p_axi_p),
+ JH7110_GATE(JH7110_STGCLK_DMA1P_AHB, "dma1p_ahb", dma1p_ahb_p),
+};
+
+static int
+jh7110_clk_stg_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "StarFive JH7110 STG clock generator");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+jh7110_clk_stg_attach(device_t dev)
+{
+ struct jh7110_clkgen_softc *sc;
+ int err;
+
+ sc = device_get_softc(dev);
+
+ sc->reset_status_offset = STGCRG_RESET_STATUS;
+ sc->reset_selector_offset = STGCRG_RESET_SELECTOR;
+
+ mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF);
+
+ err = bus_alloc_resources(dev, res_spec, &sc->mem_res);
+ if (err != 0) {
+ device_printf(dev, "Couldn't allocate resources, error %d\n",
+ err);
+ return (ENXIO);
+ }
+
+ sc->clkdom = clkdom_create(dev);
+ if (sc->clkdom == NULL) {
+ device_printf(dev, "Couldn't create clkdom, error %d\n", err);
+ return (ENXIO);
+ }
+
+ for (int i = 0; i < nitems(stg_clks); i++) {
+ err = jh7110_clk_register(sc->clkdom, &stg_clks[i]);
+ if (err != 0) {
+ device_printf(dev,
+ "Couldn't register clk %s, error %d\n",
+ stg_clks[i].clkdef.name, err);
+ return (ENXIO);
+ }
+ }
+
+ if (clkdom_finit(sc->clkdom) != 0)
+ panic("Cannot finalize clkdom initialization\n");
+
+ if (bootverbose)
+ clkdom_dump(sc->clkdom);
+
+ hwreset_register_ofw_provider(dev);
+
+ return (0);
+}
+
+static void
+jh7110_clk_stg_device_lock(device_t dev)
+{
+ struct jh7110_clkgen_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_lock(&sc->mtx);
+}
+
+static void
+jh7110_clk_stg_device_unlock(device_t dev)
+{
+ struct jh7110_clkgen_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_unlock(&sc->mtx);
+}
+
+static int
+jh7110_clk_stg_detach(device_t dev)
+{
+ /* Detach not supported */
+ return (EBUSY);
+}
+
+static device_method_t jh7110_clk_stg_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, jh7110_clk_stg_probe),
+ DEVMETHOD(device_attach, jh7110_clk_stg_attach),
+ DEVMETHOD(device_detach, jh7110_clk_stg_detach),
+
+ /* clkdev interface */
+ DEVMETHOD(clkdev_device_lock, jh7110_clk_stg_device_lock),
+ DEVMETHOD(clkdev_device_unlock, jh7110_clk_stg_device_unlock),
+
+ /* Reset interface */
+ DEVMETHOD(hwreset_assert, jh7110_reset_assert),
+ DEVMETHOD(hwreset_is_asserted, jh7110_reset_is_asserted),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(jh7110_stg, jh7110_stg_driver, jh7110_clk_stg_methods,
+ sizeof(struct jh7110_clkgen_softc));
+EARLY_DRIVER_MODULE(jh7110_stg, simplebus, jh7110_stg_driver, 0, 0,
+ BUS_PASS_BUS + BUS_PASS_ORDER_LATE + 1);
+MODULE_VERSION(jh7110_stg, 1);
diff --git a/sys/dev/clk/starfive/jh7110_clk_sys.c b/sys/dev/clk/starfive/jh7110_clk_sys.c
new file mode 100644
index 000000000000..4bc29b20bc91
--- /dev/null
+++ b/sys/dev/clk/starfive/jh7110_clk_sys.c
@@ -0,0 +1,268 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright 2016 Michal Meloun <mmel@FreeBSD.org>
+ * Copyright (c) 2020 Oskar Holmlund <oskar.holmlund@ohdata.se>
+ * Copyright (c) 2022 Mitchell Horne <mhorne@FreeBSD.org>
+ * Copyright (c) 2024 Jari Sihvola <jsihv@gmx.com>
+ */
+
+/* Clocks for JH7110 SYS group. PLL driver must be attached before this. */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/resource.h>
+#include <sys/rman.h>
+
+#include <machine/bus.h>
+
+#include <dev/fdt/simplebus.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/clk/clk.h>
+#include <dev/clk/starfive/jh7110_clk.h>
+#include <dev/hwreset/hwreset.h>
+
+#include <dt-bindings/clock/starfive,jh7110-crg.h>
+
+#include "clkdev_if.h"
+#include "hwreset_if.h"
+
+static struct ofw_compat_data compat_data[] = {
+ { "starfive,jh7110-syscrg", 1 },
+ { NULL, 0 }
+};
+
+static struct resource_spec res_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE | RF_SHAREABLE },
+ RESOURCE_SPEC_END
+};
+
+/* parents for non-pll SYS clocks */
+static const char *cpu_root_p[] = { "osc", "pll0_out" };
+static const char *cpu_core_p[] = { "cpu_root" };
+static const char *cpu_bus_p[] = { "cpu_core" };
+static const char *perh_root_p[] = { "pll0_out", "pll2_out" };
+static const char *bus_root_p[] = { "osc", "pll2_out" };
+
+static const char *apb_bus_p[] = { "stg_axiahb" };
+static const char *apb0_p[] = { "apb_bus" };
+static const char *u0_sys_iomux_apb_p[] = { "apb_bus" };
+static const char *stg_axiahb_p[] = { "axi_cfg0" };
+static const char *ahb0_p[] = { "stg_axiahb" };
+static const char *axi_cfg0_p[] = { "bus_root" };
+static const char *nocstg_bus_p[] = { "bus_root" };
+static const char *noc_bus_stg_axi_p[] = { "nocstg_bus" };
+
+static const char *u0_dw_uart_clk_apb_p[] = { "apb0" };
+static const char *u0_dw_uart_clk_core_p[] = { "osc" };
+static const char *u0_dw_sdio_clk_ahb_p[] = { "ahb0" };
+static const char *u0_dw_sdio_clk_sdcard_p[] = { "axi_cfg0" };
+static const char *u1_dw_uart_clk_apb_p[] = { "apb0" };
+static const char *u1_dw_uart_clk_core_p[] = { "osc" };
+static const char *u1_dw_sdio_clk_ahb_p[] = { "ahb0" };
+static const char *u1_dw_sdio_clk_sdcard_p[] = { "axi_cfg0" };
+static const char *usb_125m_p[] = { "pll0_out" };
+static const char *u2_dw_uart_clk_apb_p[] = { "apb0" };
+static const char *u2_dw_uart_clk_core_p[] = { "osc" };
+static const char *u3_dw_uart_clk_apb_p[] = { "apb0" };
+static const char *u3_dw_uart_clk_core_p[] = { "perh_root" };
+
+static const char *gmac_src_p[] = { "pll0_out" };
+static const char *gmac_phy_p[] = { "gmac_src" };
+static const char *gmac0_gtxclk_p[] = { "pll0_out" };
+static const char *gmac0_ptp_p[] = { "gmac_src" };
+static const char *gmac0_gtxc_p[] = { "gmac0_gtxclk" };
+static const char *gmac1_gtxclk_p[] = { "pll0_out" };
+static const char *gmac1_gtxc_p[] = { "gmac1_gtxclk" };
+static const char *gmac1_rmii_rtx_p[] = { "gmac1_rmii_refin" };
+static const char *gmac1_axi_p[] = { "stg_axiahb" };
+static const char *gmac1_ahb_p[] = { "ahb0" };
+static const char *gmac1_ptp_p[] = { "gmac_src" };
+static const char *gmac1_tx_inv_p[] = { "gmac1_tx" };
+static const char *gmac1_tx_p[] = { "gmac1_gtxclk", "gmac1_rmii_rtx" };
+static const char *gmac1_rx_p[] = { "gmac1_rgmii_rxin", "gmac1_rmii_rtx" };
+static const char *gmac1_rx_inv_p[] = { "gmac1_rx" };
+
+/* non-pll SYS clocks */
+static const struct jh7110_clk_def sys_clks[] = {
+ JH7110_MUX(JH7110_SYSCLK_CPU_ROOT, "cpu_root", cpu_root_p),
+ JH7110_DIV(JH7110_SYSCLK_CPU_CORE, "cpu_core", cpu_core_p, 7),
+ JH7110_DIV(JH7110_SYSCLK_CPU_BUS, "cpu_bus", cpu_bus_p, 2),
+ JH7110_GATEDIV(JH7110_SYSCLK_PERH_ROOT, "perh_root", perh_root_p, 2),
+ JH7110_MUX(JH7110_SYSCLK_BUS_ROOT, "bus_root", bus_root_p),
+
+ JH7110_GATE(JH7110_SYSCLK_APB0, "apb0", apb0_p),
+ JH7110_GATE(JH7110_SYSCLK_IOMUX_APB, "u0_sys_iomux_apb",
+ u0_sys_iomux_apb_p),
+ JH7110_GATE(JH7110_SYSCLK_UART0_APB, "u0_dw_uart_clk_apb",
+ u0_dw_uart_clk_apb_p),
+ JH7110_GATE(JH7110_SYSCLK_UART0_CORE, "u0_dw_uart_clk_core",
+ u0_dw_uart_clk_core_p),
+ JH7110_GATE(JH7110_SYSCLK_UART1_APB, "u1_dw_uart_clk_apb",
+ u1_dw_uart_clk_apb_p),
+ JH7110_GATE(JH7110_SYSCLK_UART1_CORE, "u1_dw_uart_clk_core",
+ u1_dw_uart_clk_core_p),
+ JH7110_GATE(JH7110_SYSCLK_UART2_APB, "u2_dw_uart_clk_apb",
+ u2_dw_uart_clk_apb_p),
+ JH7110_GATE(JH7110_SYSCLK_UART2_CORE, "u2_dw_uart_clk_core",
+ u2_dw_uart_clk_core_p),
+ JH7110_GATE(JH7110_SYSCLK_UART3_APB, "u3_dw_uart_clk_apb",
+ u3_dw_uart_clk_apb_p),
+ JH7110_GATE(JH7110_SYSCLK_UART3_CORE, "u3_dw_uart_clk_core",
+ u3_dw_uart_clk_core_p),
+
+ JH7110_DIV(JH7110_SYSCLK_AXI_CFG0, "axi_cfg0", axi_cfg0_p, 3),
+ JH7110_DIV(JH7110_SYSCLK_STG_AXIAHB, "stg_axiahb", stg_axiahb_p, 2),
+ JH7110_DIV(JH7110_SYSCLK_NOCSTG_BUS, "nocstg_bus", nocstg_bus_p, 3),
+ JH7110_GATE(JH7110_SYSCLK_NOC_BUS_STG_AXI, "noc_bus_stg_axi",
+ noc_bus_stg_axi_p),
+ JH7110_GATE(JH7110_SYSCLK_AHB0, "ahb0", ahb0_p),
+ JH7110_DIV(JH7110_SYSCLK_APB_BUS, "apb_bus", apb_bus_p, 8),
+
+ JH7110_GATE(JH7110_SYSCLK_SDIO0_AHB, "u0_dw_sdio_clk_ahb",
+ u0_dw_sdio_clk_ahb_p),
+ JH7110_GATE(JH7110_SYSCLK_SDIO1_AHB, "u1_dw_sdio_clk_ahb",
+ u1_dw_sdio_clk_ahb_p),
+ JH7110_GATEDIV(JH7110_SYSCLK_SDIO0_SDCARD, "u0_dw_sdio_clk_sdcard",
+ u0_dw_sdio_clk_sdcard_p, 15),
+ JH7110_GATEDIV(JH7110_SYSCLK_SDIO1_SDCARD, "u1_dw_sdio_clk_sdcard",
+ u1_dw_sdio_clk_sdcard_p, 15),
+ JH7110_DIV(JH7110_SYSCLK_USB_125M, "usb_125m", usb_125m_p, 15),
+
+ JH7110_DIV(JH7110_SYSCLK_GMAC_SRC, "gmac_src", gmac_src_p, 7),
+ JH7110_GATEDIV(JH7110_SYSCLK_GMAC0_GTXCLK, "gmac0_gtxclk",
+ gmac0_gtxclk_p, 15),
+ JH7110_GATEDIV(JH7110_SYSCLK_GMAC0_PTP, "gmac0_ptp", gmac0_ptp_p, 31),
+ JH7110_GATEDIV(JH7110_SYSCLK_GMAC_PHY, "gmac_phy", gmac_phy_p, 31),
+ JH7110_GATE(JH7110_SYSCLK_GMAC0_GTXC, "gmac0_gtxc", gmac0_gtxc_p),
+
+ JH7110_MUX(JH7110_SYSCLK_GMAC1_RX, "gmac1_rx", gmac1_rx_p),
+ JH7110_INV(JH7110_SYSCLK_GMAC1_RX_INV, "gmac1_rx_inv", gmac1_rx_inv_p),
+ JH7110_GATE(JH7110_SYSCLK_GMAC1_AHB, "gmac1_ahb", gmac1_ahb_p),
+ JH7110_DIV(JH7110_SYSCLK_GMAC1_GTXCLK, "gmac1_gtxclk",
+ gmac1_gtxclk_p, 15),
+ JH7110_GATEMUX(JH7110_SYSCLK_GMAC1_TX, "gmac1_tx", gmac1_tx_p),
+ JH7110_INV(JH7110_SYSCLK_GMAC1_TX_INV, "gmac1_tx_inv", gmac1_tx_inv_p),
+ JH7110_GATEDIV(JH7110_SYSCLK_GMAC1_PTP, "gmac1_ptp", gmac1_ptp_p, 31),
+ JH7110_GATE(JH7110_SYSCLK_GMAC1_AXI, "gmac1_axi", gmac1_axi_p),
+ JH7110_GATE(JH7110_SYSCLK_GMAC1_GTXC, "gmac1_gtxc", gmac1_gtxc_p),
+ JH7110_DIV(JH7110_SYSCLK_GMAC1_RMII_RTX, "gmac1_rmii_rtx",
+ gmac1_rmii_rtx_p, 30),
+};
+
+static int
+jh7110_clk_sys_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "StarFive JH7110 SYS clock generator");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+jh7110_clk_sys_attach(device_t dev)
+{
+ struct jh7110_clkgen_softc *sc;
+ int i, error;
+
+ sc = device_get_softc(dev);
+
+ sc->reset_status_offset = SYSCRG_RESET_STATUS;
+ sc->reset_selector_offset = SYSCRG_RESET_SELECTOR;
+
+ mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF);
+
+ /* Allocate memory groups */
+ error = bus_alloc_resources(dev, res_spec, &sc->mem_res);
+ if (error != 0) {
+ device_printf(dev, "Couldn't allocate resources, error %d\n",
+ error);
+ return (ENXIO);
+ }
+
+ /* Create clock domain */
+ sc->clkdom = clkdom_create(dev);
+ if (sc->clkdom == NULL) {
+ device_printf(dev, "Couldn't create clkdom\n");
+ return (ENXIO);
+ }
+
+ /* Register clocks */
+ for (i = 0; i < nitems(sys_clks); i++) {
+ error = jh7110_clk_register(sc->clkdom, &sys_clks[i]);
+ if (error != 0) {
+ device_printf(dev, "Couldn't register clock %s: %d\n",
+ sys_clks[i].clkdef.name, error);
+ return (ENXIO);
+ }
+ }
+
+ if (clkdom_finit(sc->clkdom) != 0)
+ panic("Cannot finalize clkdom initialization\n");
+
+ if (bootverbose)
+ clkdom_dump(sc->clkdom);
+
+ hwreset_register_ofw_provider(dev);
+
+ return (0);
+}
+
+static int
+jh7110_clk_sys_detach(device_t dev)
+{
+ /* Detach not supported */
+ return (EBUSY);
+}
+
+static void
+jh7110_clk_sys_device_lock(device_t dev)
+{
+ struct jh7110_clkgen_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_lock(&sc->mtx);
+}
+
+static void
+jh7110_clk_sys_device_unlock(device_t dev)
+{
+ struct jh7110_clkgen_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_unlock(&sc->mtx);
+}
+
+static device_method_t jh7110_clk_sys_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, jh7110_clk_sys_probe),
+ DEVMETHOD(device_attach, jh7110_clk_sys_attach),
+ DEVMETHOD(device_detach, jh7110_clk_sys_detach),
+
+ /* clkdev interface */
+ DEVMETHOD(clkdev_device_lock, jh7110_clk_sys_device_lock),
+ DEVMETHOD(clkdev_device_unlock, jh7110_clk_sys_device_unlock),
+
+ /* Reset interface */
+ DEVMETHOD(hwreset_assert, jh7110_reset_assert),
+ DEVMETHOD(hwreset_is_asserted, jh7110_reset_is_asserted),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(jh7110_clk_sys, jh7110_clk_sys_driver, jh7110_clk_sys_methods,
+ sizeof(struct jh7110_clkgen_softc));
+EARLY_DRIVER_MODULE(jh7110_clk_sys, simplebus, jh7110_clk_sys_driver, 0, 0,
+ BUS_PASS_BUS + BUS_PASS_ORDER_LATE);
+MODULE_VERSION(jh7110_clk_sys, 1);
diff --git a/sys/dev/coretemp/coretemp.c b/sys/dev/coretemp/coretemp.c
index 4a7e4e7834d8..df1dcff83639 100644
--- a/sys/dev/coretemp/coretemp.c
+++ b/sys/dev/coretemp/coretemp.c
@@ -112,7 +112,7 @@ coretemp_identify(driver_t *driver, device_t parent)
u_int regs[4];
/* Make sure we're not being doubly invoked. */
- if (device_find_child(parent, "coretemp", -1) != NULL)
+ if (device_find_child(parent, "coretemp", DEVICE_UNIT_ANY) != NULL)
return;
/* Check that CPUID 0x06 is supported and the vendor is Intel.*/
diff --git a/sys/dev/cpuctl/cpuctl.c b/sys/dev/cpuctl/cpuctl.c
index 9253b17a259d..deaabaaaa1fc 100644
--- a/sys/dev/cpuctl/cpuctl.c
+++ b/sys/dev/cpuctl/cpuctl.c
@@ -344,7 +344,7 @@ ucode_intel_load_rv(void *arg)
d = arg;
if (PCPU_GET(cpuid) == d->cpu)
- d->ret = ucode_intel_load(d->ptr, true, NULL, NULL);
+ d->ret = ucode_intel_load(d->ptr, SAFE, NULL, NULL);
}
static int
@@ -402,19 +402,20 @@ out:
* its workings.
*/
static void
-amd_ucode_wrmsr(void *ucode_ptr)
+amd_ucode_wrmsr(void *arg)
{
+ struct ucode_update_data *d = arg;
uint32_t tmp[4];
- wrmsr_safe(MSR_K8_UCODE_UPDATE, (uintptr_t)ucode_ptr);
+ if (PCPU_GET(cpuid) == d->cpu)
+ d->ret = wrmsr_safe(MSR_K8_UCODE_UPDATE, (uintptr_t)d->ptr);
do_cpuid(0, tmp);
}
static int
update_amd(int cpu, cpuctl_update_args_t *args, struct thread *td)
{
- void *ptr;
- int ret;
+ struct ucode_update_data d = { .cpu = cpu };
if (args->size == 0 || args->data == NULL) {
DPRINTF("[cpuctl,%d]: zero-sized firmware image", __LINE__);
@@ -430,18 +431,17 @@ update_amd(int cpu, cpuctl_update_args_t *args, struct thread *td)
* malloc(9) always returns the pointer aligned at least on
* the size of the allocation.
*/
- ptr = malloc(args->size + 16, M_CPUCTL, M_ZERO | M_WAITOK);
- if (copyin(args->data, ptr, args->size) != 0) {
+ d.ptr = malloc(args->size + 16, M_CPUCTL, M_ZERO | M_WAITOK);
+ if (copyin(args->data, d.ptr, args->size) != 0) {
DPRINTF("[cpuctl,%d]: copyin %p->%p of %zd bytes failed",
__LINE__, args->data, ptr, args->size);
- ret = EFAULT;
+ d.ret = EFAULT;
goto fail;
}
- smp_rendezvous(NULL, amd_ucode_wrmsr, NULL, ptr);
- ret = 0;
+ smp_rendezvous(NULL, amd_ucode_wrmsr, NULL, &d);
fail:
- free(ptr, M_CPUCTL);
- return (ret);
+ free(d.ptr, M_CPUCTL);
+ return (d.ret);
}
static int
diff --git a/sys/dev/cpufreq/cpufreq_dt.c b/sys/dev/cpufreq/cpufreq_dt.c
index 929eebfe7dc5..b212b08e9a83 100644
--- a/sys/dev/cpufreq/cpufreq_dt.c
+++ b/sys/dev/cpufreq/cpufreq_dt.c
@@ -315,7 +315,7 @@ cpufreq_dt_identify(driver_t *driver, device_t parent)
!OF_hasprop(node, "operating-points-v2"))
return;
- if (device_find_child(parent, "cpufreq_dt", -1) != NULL)
+ if (device_find_child(parent, "cpufreq_dt", DEVICE_UNIT_ANY) != NULL)
return;
if (BUS_ADD_CHILD(parent, 0, "cpufreq_dt", device_get_unit(parent))
@@ -401,7 +401,7 @@ cpufreq_dt_oppv2_parse(struct cpufreq_dt_softc *sc, phandle_t node)
if (opp_table == opp_xref)
return (ENXIO);
- if (!OF_hasprop(opp_table, "opp-shared")) {
+ if (!OF_hasprop(opp_table, "opp-shared") && mp_ncpus > 1) {
device_printf(sc->dev, "Only opp-shared is supported\n");
return (ENXIO);
}
diff --git a/sys/dev/cpufreq/ichss.c b/sys/dev/cpufreq/ichss.c
index f1ec62ed6d8a..6c30bbb9700d 100644
--- a/sys/dev/cpufreq/ichss.c
+++ b/sys/dev/cpufreq/ichss.c
@@ -147,7 +147,7 @@ ichss_identify(driver_t *driver, device_t parent)
return;
/* Avoid duplicates. */
- if (device_find_child(parent, "ichss", -1))
+ if (device_find_child(parent, "ichss", DEVICE_UNIT_ANY))
return;
/*
@@ -218,13 +218,15 @@ ichss_probe(device_t dev)
* info, let it manage things. Also, if Enhanced SpeedStep is
* available, don't attach.
*/
- perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
+ perf_dev = device_find_child(device_get_parent(dev), "acpi_perf",
+ DEVICE_UNIT_ANY);
if (perf_dev && device_is_attached(perf_dev)) {
error = CPUFREQ_DRV_TYPE(perf_dev, &type);
if (error == 0 && (type & CPUFREQ_FLAG_INFO_ONLY) == 0)
return (ENXIO);
}
- est_dev = device_find_child(device_get_parent(dev), "est", -1);
+ est_dev = device_find_child(device_get_parent(dev), "est",
+ DEVICE_UNIT_ANY);
if (est_dev && device_is_attached(est_dev))
return (ENXIO);
diff --git a/sys/dev/cxgb/cxgb_main.c b/sys/dev/cxgb/cxgb_main.c
index 1a088cdf0abe..616a2ecc1a37 100644
--- a/sys/dev/cxgb/cxgb_main.c
+++ b/sys/dev/cxgb/cxgb_main.c
@@ -36,7 +36,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include <sys/kernel.h>
#include <sys/bus.h>
#include <sys/module.h>
-#include <sys/pciio.h>
#include <sys/conf.h>
#include <machine/bus.h>
#include <machine/resource.h>
@@ -76,7 +75,6 @@ POSSIBILITY OF SUCH DAMAGE.
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
-#include <dev/pci/pci_private.h>
#include <cxgb_include.h>
@@ -360,7 +358,7 @@ static int
cxgb_controller_probe(device_t dev)
{
const struct adapter_info *ai;
- char *ports, buf[80];
+ const char *ports;
int nports;
ai = cxgb_get_adapter_info(dev);
@@ -373,8 +371,7 @@ cxgb_controller_probe(device_t dev)
else
ports = "ports";
- snprintf(buf, sizeof(buf), "%s, %d %s", ai->desc, nports, ports);
- device_set_desc_copy(dev, buf);
+ device_set_descf(dev, "%s, %d %s", ai->desc, nports, ports);
return (BUS_PROBE_DEFAULT);
}
@@ -447,7 +444,6 @@ cxgb_controller_attach(device_t dev)
uint32_t vers;
int port_qsets = 1;
int msi_needed, reg;
- char buf[80];
sc = device_get_softc(dev);
sc->dev = dev;
@@ -626,7 +622,8 @@ cxgb_controller_attach(device_t dev)
for (i = 0; i < (sc)->params.nports; i++) {
struct port_info *pi;
- if ((child = device_add_child(dev, "cxgb", -1)) == NULL) {
+ if ((child = device_add_child(dev, "cxgb",
+ DEVICE_UNIT_ANY)) == NULL) {
device_printf(dev, "failed to add child port\n");
error = EINVAL;
goto out;
@@ -643,8 +640,7 @@ cxgb_controller_attach(device_t dev)
sc->portdev[i] = child;
device_set_softc(child, pi);
}
- if ((error = bus_generic_attach(dev)) != 0)
- goto out;
+ bus_attach_children(dev);
/* initialize sge private state */
t3_sge_init_adapter(sc);
@@ -659,10 +655,9 @@ cxgb_controller_attach(device_t dev)
G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers),
G_FW_VERSION_MICRO(vers));
- snprintf(buf, sizeof(buf), "%s %sNIC\t E/C: %s S/N: %s",
- ai->desc, is_offload(sc) ? "R" : "",
- sc->params.vpd.ec, sc->params.vpd.sn);
- device_set_desc_copy(dev, buf);
+ device_set_descf(dev, "%s %sNIC\t E/C: %s S/N: %s",
+ ai->desc, is_offload(sc) ? "R" : "",
+ sc->params.vpd.ec, sc->params.vpd.sn);
snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x",
sc->params.vpd.port_type[0], sc->params.vpd.port_type[1],
@@ -733,7 +728,7 @@ cxgb_free(struct adapter *sc)
/*
* Make sure all child devices are gone.
*/
- bus_generic_detach(sc->dev);
+ bus_detach_children(sc->dev);
for (i = 0; i < (sc)->params.nports; i++) {
if (sc->portdev[i] &&
device_delete_child(sc->dev, sc->portdev[i]) != 0)
@@ -966,13 +961,11 @@ static int
cxgb_port_probe(device_t dev)
{
struct port_info *p;
- char buf[80];
const char *desc;
p = device_get_softc(dev);
desc = p->phy.desc;
- snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc);
- device_set_desc_copy(dev, buf);
+ device_set_descf(dev, "Port %d %s", p->port_id, desc);
return (0);
}
@@ -1016,11 +1009,6 @@ cxgb_port_attach(device_t dev)
/* Allocate an ifnet object and set it up */
ifp = p->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "Cannot allocate ifnet\n");
- return (ENOMEM);
- }
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setinitfn(ifp, cxgb_init);
if_setsoftc(ifp, p);
@@ -1051,6 +1039,11 @@ cxgb_port_attach(device_t dev)
if_sethwassistbits(ifp, 0, CSUM_TSO);
}
+ /* Create a list of media supported by this port */
+ ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
+ cxgb_media_status);
+ cxgb_build_medialist(p);
+
ether_ifattach(ifp, p->hw_addr);
/* Attach driver debugnet methods. */
@@ -1065,11 +1058,6 @@ cxgb_port_attach(device_t dev)
return (err);
}
- /* Create a list of media supported by this port */
- ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change,
- cxgb_media_status);
- cxgb_build_medialist(p);
-
t3_sge_init_port(p);
return (err);
@@ -1077,7 +1065,7 @@ cxgb_port_attach(device_t dev)
/*
* cxgb_port_detach() is called via the device_detach methods when
- * cxgb_free() calls the bus_generic_detach. It is responsible for
+ * cxgb_free() calls the bus_detach_children. It is responsible for
* removing the device from the view of the kernel, i.e. from all
* interfaces lists etc. This routine is only called when the driver is
* being unloaded, not when the link goes down.
@@ -1148,66 +1136,23 @@ t3_fatal_err(struct adapter *sc)
int
t3_os_find_pci_capability(adapter_t *sc, int cap)
{
- device_t dev;
- struct pci_devinfo *dinfo;
- pcicfgregs *cfg;
- uint32_t status;
- uint8_t ptr;
-
- dev = sc->dev;
- dinfo = device_get_ivars(dev);
- cfg = &dinfo->cfg;
-
- status = pci_read_config(dev, PCIR_STATUS, 2);
- if (!(status & PCIM_STATUS_CAPPRESENT))
- return (0);
+ int rc, reg = 0;
- switch (cfg->hdrtype & PCIM_HDRTYPE) {
- case 0:
- case 1:
- ptr = PCIR_CAP_PTR;
- break;
- case 2:
- ptr = PCIR_CAP_PTR_2;
- break;
- default:
- return (0);
- break;
- }
- ptr = pci_read_config(dev, ptr, 1);
-
- while (ptr != 0) {
- if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap)
- return (ptr);
- ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1);
- }
-
- return (0);
+ rc = pci_find_cap(sc->dev, cap, &reg);
+ return (rc == 0 ? reg : 0);
}
int
t3_os_pci_save_state(struct adapter *sc)
{
- device_t dev;
- struct pci_devinfo *dinfo;
-
- dev = sc->dev;
- dinfo = device_get_ivars(dev);
-
- pci_cfg_save(dev, dinfo, 0);
+ pci_save_state(sc->dev);
return (0);
}
int
t3_os_pci_restore_state(struct adapter *sc)
{
- device_t dev;
- struct pci_devinfo *dinfo;
-
- dev = sc->dev;
- dinfo = device_get_ivars(dev);
-
- pci_cfg_restore(dev, dinfo);
+ pci_restore_state(sc->dev);
return (0);
}
@@ -2482,9 +2427,7 @@ set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset)
aligned_len = (len + (offset & 3) + 3) & ~3;
if (aligned_offset != offset || aligned_len != len) {
- buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO);
- if (!buf)
- return (ENOMEM);
+ buf = malloc(aligned_len, M_DEVBUF, M_WAITOK | M_ZERO);
err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf);
if (!err && aligned_len > 4)
err = t3_seeprom_read(adapter,
diff --git a/sys/dev/cxgb/cxgb_sge.c b/sys/dev/cxgb/cxgb_sge.c
index f57494065aec..85e92b0e06ff 100644
--- a/sys/dev/cxgb/cxgb_sge.c
+++ b/sys/dev/cxgb/cxgb_sge.c
@@ -266,7 +266,7 @@ check_pkt_coalesce(struct sge_qset *qs)
if (cxgb_tx_coalesce_enable_start > COALESCE_START_MAX)
cxgb_tx_coalesce_enable_start = COALESCE_START_MAX;
if (cxgb_tx_coalesce_enable_stop < COALESCE_STOP_MIN)
- cxgb_tx_coalesce_enable_start = COALESCE_STOP_MIN;
+ cxgb_tx_coalesce_enable_stop = COALESCE_STOP_MIN;
/*
* if the hardware transmit queue is more than 1/8 full
* we mark it as coalescing - we drop back from coalescing
@@ -553,9 +553,7 @@ t3_sge_prep(adapter_t *adap, struct sge_params *p)
nqsets *= adap->params.nports;
fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
-
- while (!powerof2(fl_q_size))
- fl_q_size--;
+ fl_q_size = rounddown_pow_of_two(fl_q_size);
use_16k = cxgb_use_16k_clusters != -1 ? cxgb_use_16k_clusters :
is_offload(adap);
@@ -567,8 +565,7 @@ t3_sge_prep(adapter_t *adap, struct sge_params *p)
jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
jumbo_buf_size = MJUM9BYTES;
}
- while (!powerof2(jumbo_q_size))
- jumbo_q_size--;
+ jumbo_q_size = rounddown_pow_of_two(jumbo_q_size);
if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2))
device_printf(adap->dev,
@@ -1927,7 +1924,7 @@ again: reclaim_completed_tx_imm(q);
/**
* restart_ctrlq - restart a suspended control queue
- * @qs: the queue set cotaining the control queue
+ * @qs: the queue set containing the control queue
*
* Resumes transmission on a suspended Tx control queue.
*/
@@ -2302,7 +2299,7 @@ again: reclaim_completed_tx(qs, 16, TXQ_OFLD);
/**
* restart_offloadq - restart a suspended offload queue
- * @qs: the queue set cotaining the offload queue
+ * @qs: the queue set containing the offload queue
*
* Resumes transmission on a suspended Tx offload queue.
*/
@@ -2422,11 +2419,8 @@ t3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
q->port = pi;
q->adap = sc;
- if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
- M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
- device_printf(sc->dev, "failed to allocate mbuf ring\n");
- goto err;
- }
+ q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
+ M_DEVBUF, M_WAITOK, &q->lock);
if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
M_NOWAIT | M_ZERO)) == NULL) {
device_printf(sc->dev, "failed to allocate ifq\n");
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
index 1a61834c5a40..55f09fefb7e3 100644
--- a/sys/dev/cxgbe/adapter.h
+++ b/sys/dev/cxgbe/adapter.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
* Written by: Navdeep Parhar <np@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -107,7 +106,11 @@ enum {
CTRL_EQ_QSIZE = 1024,
TX_EQ_QSIZE = 1024,
+#if MJUMPAGESIZE != MCLBYTES
SW_ZONE_SIZES = 4, /* cluster, jumbop, jumbo9k, jumbo16k */
+#else
+ SW_ZONE_SIZES = 3, /* cluster, jumbo9k, jumbo16k */
+#endif
CL_METADATA_SIZE = CACHE_LINE_SIZE,
SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / EQ_ESIZE, /* max WR size in desc */
@@ -315,7 +318,7 @@ struct port_info {
char lockname[16];
unsigned long flags;
- uint8_t lport; /* associated offload logical port */
+ uint8_t hw_port; /* associated hardware port idx */
int8_t mdio_addr;
uint8_t port_type;
uint8_t mod_type;
@@ -409,6 +412,24 @@ enum {
NUM_CPL_COOKIES = 8 /* Limited by M_COOKIE. Do not increase. */
};
+/*
+ * Crypto replies use the low bit in the 64-bit cookie of CPL_FW6_PLD as a
+ * CPL cookie to identify the sender/receiver.
+ */
+enum {
+ CPL_FW6_COOKIE_CCR = 0,
+ CPL_FW6_COOKIE_KTLS,
+
+ NUM_CPL_FW6_COOKIES = 2 /* Low bits of cookie value. */
+};
+
+_Static_assert(powerof2(NUM_CPL_FW6_COOKIES),
+ "NUM_CPL_FW6_COOKIES must be a power of 2");
+
+#define CPL_FW6_COOKIE_MASK (NUM_CPL_FW6_COOKIES - 1)
+
+#define CPL_FW6_PLD_COOKIE(cpl) (be64toh((cpl)->data[1]) & ~CPL_FW6_COOKIE_MASK)
+
struct sge_iq;
struct rss_header;
typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
@@ -473,6 +494,7 @@ struct sge_eq {
uint8_t doorbells;
uint8_t port_id; /* port_id of the port associated with the eq */
uint8_t tx_chan; /* tx channel used by the eq */
+ uint8_t hw_port; /* hw port used by the eq */
struct mtx eq_lock;
struct tx_desc *desc; /* KVA of descriptor ring */
@@ -636,12 +658,26 @@ struct sge_txq {
uint64_t kern_tls_full;
uint64_t kern_tls_octets;
uint64_t kern_tls_waste;
- uint64_t kern_tls_options;
uint64_t kern_tls_header;
- uint64_t kern_tls_fin;
uint64_t kern_tls_fin_short;
uint64_t kern_tls_cbc;
uint64_t kern_tls_gcm;
+ union {
+ struct {
+ /* T6 only. */
+ uint64_t kern_tls_options;
+ uint64_t kern_tls_fin;
+ };
+ struct {
+ /* T7 only. */
+ uint64_t kern_tls_ghash_received;
+ uint64_t kern_tls_ghash_requested;
+ uint64_t kern_tls_lso;
+ uint64_t kern_tls_partial_ghash;
+ uint64_t kern_tls_splitmode;
+ uint64_t kern_tls_trailer;
+ };
+ };
/* stats for not-that-common events */
@@ -765,6 +801,16 @@ struct sge_ofld_txq {
counter_u64_t tx_toe_tls_octets;
} __aligned(CACHE_LINE_SIZE);
+static inline int
+ofld_txq_group(int val, int mask)
+{
+ const uint32_t ngroup = 1 << bitcount32(mask);
+ const int mshift = ffs(mask) - 1;
+ const uint32_t gmask = ngroup - 1;
+
+ return (val >> mshift & gmask);
+}
+
#define INVALID_NM_RXQ_CNTXT_ID ((uint16_t)(-1))
struct sge_nm_rxq {
/* Items used by the driver rx ithread are in this cacheline. */
@@ -832,6 +878,7 @@ struct sge_nm_txq {
} __aligned(CACHE_LINE_SIZE);
struct sge {
+ int nctrlq; /* total # of control queues */
int nrxq; /* total # of Ethernet rx queues */
int ntxq; /* total # of Ethernet tx queues */
int nofldrxq; /* total # of TOE rx queues */
@@ -929,10 +976,12 @@ struct adapter {
u_int vxlan_refcount;
int rawf_base;
int nrawf;
+ u_int vlan_id;
struct taskqueue *tq[MAX_NPORTS]; /* General purpose taskqueues */
struct port_info *port[MAX_NPORTS];
- uint8_t chan_map[MAX_NCHAN]; /* channel -> port */
+ uint8_t chan_map[MAX_NCHAN]; /* tx_chan -> port_id */
+ uint8_t port_map[MAX_NPORTS]; /* hw_port -> port_id */
CXGBE_LIST_HEAD(, clip_entry) *clip_table;
TAILQ_HEAD(, clip_entry) clip_pending; /* these need hw update. */
@@ -954,9 +1003,12 @@ struct adapter {
vmem_t *key_map;
struct tls_tunables tlst;
+ vmem_t *pbl_arena;
+ vmem_t *stag_arena;
+
uint8_t doorbells;
int offload_map; /* port_id's with IFCAP_TOE enabled */
- int bt_map; /* tx_chan's with BASE-T */
+ int bt_map; /* hw_port's that are BASE-T */
int active_ulds; /* ULDs activated on this adapter */
int flags;
int debug_flags;
@@ -983,6 +1035,7 @@ struct adapter {
uint16_t nbmcaps;
uint16_t linkcaps;
uint16_t switchcaps;
+ uint16_t nvmecaps;
uint16_t niccaps;
uint16_t toecaps;
uint16_t rdmacaps;
@@ -1117,15 +1170,25 @@ forwarding_intr_to_fwq(struct adapter *sc)
return (sc->intr_count == 1);
}
-/* Works reliably inside a sync_op or with reg_lock held. */
+/* Works reliably inside a synch_op or with reg_lock held. */
static inline bool
hw_off_limits(struct adapter *sc)
{
- int off_limits = atomic_load_int(&sc->error_flags) & HW_OFF_LIMITS;
+ const int off_limits = atomic_load_int(&sc->error_flags) & HW_OFF_LIMITS;
return (__predict_false(off_limits != 0));
}
+/* Works reliably inside a synch_op or with reg_lock held. */
+static inline bool
+hw_all_ok(struct adapter *sc)
+{
+ const int not_ok = atomic_load_int(&sc->error_flags) &
+ (ADAP_STOPPED | HW_OFF_LIMITS);
+
+ return (__predict_true(not_ok == 0));
+}
+
static inline int
mbuf_nsegs(struct mbuf *m)
{
@@ -1354,8 +1417,6 @@ extern unsigned int t4_ddp_rcvbuf_cache;
extern device_method_t cxgbe_methods[];
int t4_os_find_pci_capability(struct adapter *, int);
-int t4_os_pci_save_state(struct adapter *);
-int t4_os_pci_restore_state(struct adapter *);
void t4_os_portmod_changed(struct port_info *);
void t4_os_link_changed(struct port_info *);
void t4_iterate(void (*)(struct adapter *, void *), void *);
@@ -1364,6 +1425,7 @@ void t4_add_adapter(struct adapter *);
int t4_detach_common(device_t);
int t4_map_bars_0_and_4(struct adapter *);
int t4_map_bar_2(struct adapter *);
+int t4_adj_doorbells(struct adapter *);
int t4_setup_intr_handlers(struct adapter *);
void t4_sysctls(struct adapter *);
int begin_synchronized_op(struct adapter *, struct vi_info *, int, char *);
@@ -1382,6 +1444,9 @@ void release_tid(struct adapter *, int, struct sge_wrq *);
int cxgbe_media_change(if_t);
void cxgbe_media_status(if_t, struct ifmediareq *);
void t4_os_cim_err(struct adapter *);
+int suspend_adapter(struct adapter *);
+int resume_adapter(struct adapter *);
+int toe_capability(struct vi_info *, bool);
#ifdef KERN_TLS
/* t6_kern_tls.c */
@@ -1392,6 +1457,14 @@ void t6_ktls_modunload(void);
int t6_ktls_try(if_t, struct socket *, struct ktls_session *);
int t6_ktls_parse_pkt(struct mbuf *);
int t6_ktls_write_wr(struct sge_txq *, void *, struct mbuf *, u_int);
+
+/* t7_kern_tls.c */
+int t7_tls_tag_alloc(struct ifnet *, union if_snd_tag_alloc_params *,
+ struct m_snd_tag **);
+void t7_ktls_modload(void);
+void t7_ktls_modunload(void);
+int t7_ktls_parse_pkt(struct mbuf *);
+int t7_ktls_write_wr(struct sge_txq *, void *, struct mbuf *, u_int);
#endif
/* t4_keyctx.c */
@@ -1519,6 +1592,27 @@ int t4_hashfilter_tcb_rpl(struct sge_iq *, const struct rss_header *, struct mbu
int t4_del_hashfilter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
void free_hftid_hash(struct tid_info *);
+/* t4_tpt.c */
+#define T4_STAG_UNSET 0xffffffff
+#define T4_WRITE_MEM_DMA_LEN \
+ roundup2(sizeof(struct ulp_mem_io) + sizeof(struct ulptx_sgl), 16)
+#define T4_ULPTX_MIN_IO 32
+#define T4_MAX_INLINE_SIZE 96
+#define T4_WRITE_MEM_INLINE_LEN(len) \
+ roundup2(sizeof(struct ulp_mem_io) + sizeof(struct ulptx_idata) + \
+ roundup((len), T4_ULPTX_MIN_IO), 16)
+
+uint32_t t4_pblpool_alloc(struct adapter *, int);
+void t4_pblpool_free(struct adapter *, uint32_t, int);
+uint32_t t4_stag_alloc(struct adapter *, int);
+void t4_stag_free(struct adapter *, uint32_t, int);
+void t4_init_tpt(struct adapter *);
+void t4_free_tpt(struct adapter *);
+void t4_write_mem_dma_wr(struct adapter *, void *, int, int, uint32_t,
+ uint32_t, vm_paddr_t, uint64_t);
+void t4_write_mem_inline_wr(struct adapter *, void *, int, int, uint32_t,
+ uint32_t, void *, uint64_t);
+
static inline struct wrqe *
alloc_wrqe(int wr_len, struct sge_wrq *wrq)
{
@@ -1551,7 +1645,10 @@ t4_wrq_tx(struct adapter *sc, struct wrqe *wr)
struct sge_wrq *wrq = wr->wrq;
TXQ_LOCK(wrq);
- t4_wrq_tx_locked(sc, wrq, wr);
+ if (__predict_true(wrq->eq.flags & EQ_HW_ALLOCATED))
+ t4_wrq_tx_locked(sc, wrq, wr);
+ else
+ free(wr, M_CXGBE);
TXQ_UNLOCK(wrq);
}
diff --git a/sys/dev/cxgbe/common/common.h b/sys/dev/cxgbe/common/common.h
index 894e0444b710..6b36832a7464 100644
--- a/sys/dev/cxgbe/common/common.h
+++ b/sys/dev/cxgbe/common/common.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,6 +31,15 @@
#include "t4_hw.h"
+#define GLBL_INTR_MASK (F_CIM | F_MPS | F_PL | F_PCIE | F_MC0 | F_EDC0 | \
+ F_EDC1 | F_LE | F_TP | F_MA | F_PM_TX | F_PM_RX | F_ULP_RX | \
+ F_CPL_SWITCH | F_SGE | F_ULP_TX | F_SF)
+
+#define GLBL_T7_INTR_MASK (F_CIM | F_MPS | F_PL | F_T7_PCIE | F_T7_MC0 | \
+ F_T7_EDC0 | F_T7_EDC1 | F_T7_LE | F_T7_TP | \
+ F_T7_MA | F_T7_PM_TX | F_T7_PM_RX | F_T7_ULP_RX | \
+ F_T7_CPL_SWITCH | F_T7_SGE | F_T7_ULP_TX | F_SF)
+
enum {
MAX_NPORTS = 4, /* max # of ports */
SERNUM_LEN = 24, /* Serial # length */
@@ -77,6 +85,18 @@ enum {
FEC_MODULE = 1 << 6, /* FEC suggested by the cable/transceiver. */
};
+enum {
+ ULP_T10DIF_ISCSI = 1 << 0,
+ ULP_T10DIF_FCOE = 1 << 1
+};
+
+enum {
+ ULP_CRYPTO_LOOKASIDE = 1 << 0,
+ ULP_CRYPTO_INLINE_TLS = 1 << 1,
+ ULP_CRYPTO_INLINE_IPSEC = 1 << 2,
+ ULP_CRYPTO_OFLD_OVER_IPSEC_INLINE = 1 << 4
+};
+
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
struct port_stats {
@@ -230,6 +250,15 @@ struct tp_cpl_stats {
struct tp_rdma_stats {
u32 rqe_dfr_pkt;
u32 rqe_dfr_mod;
+ u32 pkts_in[MAX_NCHAN];
+ u64 bytes_in[MAX_NCHAN];
+ /*
+ * When reading rdma stats, the address difference b/w RDMA_IN and
+ * RDMA_OUT is 4*u32, to read both at once, added padding
+ */
+ u32 padding[4];
+ u32 pkts_out[MAX_NCHAN];
+ u64 bytes_out[MAX_NCHAN];
};
struct sge_params {
@@ -259,7 +288,10 @@ struct tp_params {
uint32_t max_rx_pdu;
uint32_t max_tx_pdu;
bool rx_pkt_encap;
+ uint8_t lb_mode;
+ uint8_t lb_nchan;
+ int8_t ipsecidx_shift;
int8_t fcoe_shift;
int8_t port_shift;
int8_t vnic_shift;
@@ -270,6 +302,9 @@ struct tp_params {
int8_t macmatch_shift;
int8_t matchtype_shift;
int8_t frag_shift;
+ int8_t roce_shift;
+ int8_t synonly_shift;
+ int8_t tcpflags_shift;
};
/* Use same modulation queue as the tx channel. */
@@ -285,6 +320,22 @@ struct vpd_params {
u8 md[MD_LEN + 1];
};
+/*
+ * Maximum resources provisioned for a PCI PF.
+ */
+struct pf_resources {
+ unsigned int nvi; /* N virtual interfaces */
+ unsigned int neq; /* N egress Qs */
+ unsigned int nethctrl; /* N egress ETH or CTRL Qs */
+ unsigned int niqflint; /* N ingress Qs/w free list(s) & intr */
+ unsigned int niq; /* N ingress Qs */
+ unsigned int tc; /* PCI-E traffic class */
+ unsigned int pmask; /* port access rights mask */
+ unsigned int nexactf; /* N exact MPS filters */
+ unsigned int r_caps; /* read capabilities */
+ unsigned int wx_caps; /* write/execute capabilities */
+};
+
struct pci_params {
unsigned int vpd_cap_addr;
unsigned int mps;
@@ -308,8 +359,11 @@ struct chip_params {
u8 pm_stats_cnt;
u8 cng_ch_bits_log; /* congestion channel map bits width */
u8 nsched_cls;
+ u8 cim_num_ibq;
u8 cim_num_obq;
- u8 filter_opt_len;
+ u8 filter_opt_len; /* number of bits for optional fields */
+ u8 filter_num_opt; /* number of optional fields */
+ u8 sge_ctxt_size;
u16 mps_rplc_size;
u16 vfcount;
u32 sge_fl_db;
@@ -360,6 +414,7 @@ struct adapter_params {
struct sge_params sge;
struct tp_params tp; /* PF-only */
struct vpd_params vpd;
+ struct pf_resources pfres; /* PF-only */
struct pci_params pci;
struct devlog_params devlog; /* PF-only */
struct rss_params rss; /* VF-only */
@@ -399,12 +454,13 @@ struct adapter_params {
unsigned int ofldq_wr_cred;
unsigned int eo_wr_cred;
- unsigned int max_ordird_qp;
- unsigned int max_ird_adapter;
+ unsigned int max_ordird_qp; /* Max read depth per RDMA QP */
+ unsigned int max_ird_adapter; /* Max read depth per adapter */
/* These values are for all ports (8b/port, upto 4 ports) */
uint32_t mps_bg_map; /* MPS rx buffer group map */
uint32_t tp_ch_map; /* TPCHMAP from firmware */
+ uint32_t tx_tp_ch_map; /* TX_TPCHMAP from firmware */
bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */
@@ -412,11 +468,15 @@ struct adapter_params {
bool viid_smt_extn_support; /* FW returns vin, vfvld & smt index? */
unsigned int max_pkts_per_eth_tx_pkts_wr;
uint8_t nsched_cls; /* # of usable sched classes per port */
+
+ uint8_t ncores;
+ uint32_t tid_qid_sel_mask; /* TID based QID selection mask */
};
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
#define CHELSIO_T6 0x6
+#define CHELSIO_T7 0x7
/*
* State needed to monitor the forward progress of SGE Ingress DMA activities
@@ -509,10 +569,11 @@ static inline int is_hashfilter(const struct adapter *adap)
static inline int is_ktls(const struct adapter *adap)
{
- return adap->cryptocaps & FW_CAPS_CONFIG_TLS_HW;
+ return adap->cryptocaps & FW_CAPS_CONFIG_TLS_HW ||
+ adap->params.chipid == CHELSIO_T7;
}
-static inline int chip_id(struct adapter *adap)
+static inline int chip_id(const struct adapter *adap)
{
return adap->params.chipid;
}
@@ -537,6 +598,11 @@ static inline int is_t6(struct adapter *adap)
return adap->params.chipid == CHELSIO_T6;
}
+static inline int is_t7(struct adapter *adap)
+{
+ return adap->params.chipid == CHELSIO_T7;
+}
+
static inline int is_fpga(struct adapter *adap)
{
return adap->params.fpga;
@@ -641,7 +707,7 @@ int t4_load_bootcfg(struct adapter *adapter, const u8 *cfg_data, unsigned int si
int t4_load_boot(struct adapter *adap, u8 *boot_data,
unsigned int boot_addr, unsigned int size);
int t4_flash_erase_sectors(struct adapter *adapter, int start, int end);
-int t4_flash_cfg_addr(struct adapter *adapter);
+int t4_flash_cfg_addr(struct adapter *adapter, unsigned int *lenp);
int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr);
@@ -655,9 +721,10 @@ int t4_init_hw(struct adapter *adapter, u32 fw_params);
const struct chip_params *t4_get_chip_params(int chipid);
int t4_prep_adapter(struct adapter *adapter, u32 *buf);
int t4_shutdown_adapter(struct adapter *adapter);
-int t4_init_devlog_params(struct adapter *adapter, int fw_attach);
+int t4_init_devlog_ncores_params(struct adapter *adapter, int fw_attach);
int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_width(const struct adapter *adap, int filter_field);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id);
void t4_fatal_err(struct adapter *adapter, bool fw_error);
@@ -665,6 +732,7 @@ int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
int filter_index, int enable);
void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
int filter_index, int *enabled);
+void t4_set_trace_rss_control(struct adapter *adap, u8 chan, u16 qid);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
@@ -691,19 +759,60 @@ void t4_write_rss_pf_mask(struct adapter *adapter, u32 pfmask, bool sleep_ok);
int t4_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[]);
-void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
-int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n);
-int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n);
-int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp);
-int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
- const unsigned int *valp);
-int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp);
-int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
+void t4_pmrx_cache_get_stats(struct adapter *adap, u32 stats[]);
+void t4_read_cimq_cfg_core(struct adapter *adap, u8 coreid, u16 *base,
+ u16 *size, u16 *thres);
+int t4_read_cim_ibq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n);
+int t4_read_cim_obq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n);
+int t4_cim_read_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ unsigned int *valp);
+int t4_cim_write_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ const unsigned int *valp);
+int t4_cim_read_la_core(struct adapter *adap, u8 coreid, u32 *la_buf,
+ u32 *wrptr);
void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
unsigned int *pif_req_wrptr, unsigned int *pif_rsp_wrptr);
void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp);
+
+static inline void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size,
+ u16 *thres)
+{
+ t4_read_cimq_cfg_core(adap, 0, base, size, thres);
+}
+
+static inline int t4_read_cim_ibq(struct adapter *adap, u32 qid, u32 *data,
+ size_t n)
+{
+ return t4_read_cim_ibq_core(adap, 0, qid, data, n);
+}
+
+static inline int t4_read_cim_obq(struct adapter *adap, u32 qid, u32 *data,
+ size_t n)
+{
+ return t4_read_cim_obq_core(adap, 0, qid, data, n);
+}
+
+static inline int t4_cim_read(struct adapter *adap, unsigned int addr,
+ unsigned int n, unsigned int *valp)
+{
+ return t4_cim_read_core(adap, 0, 0, addr, n, valp);
+}
+
+static inline int t4_cim_write(struct adapter *adap, unsigned int addr,
+ unsigned int n, unsigned int *valp)
+{
+ return t4_cim_write_core(adap, 0, 0, addr, n, valp);
+}
+
+static inline int t4_cim_read_la(struct adapter *adap, u32 *la_buf, u32 *wrptr)
+{
+ return t4_cim_read_la_core(adap, 0, la_buf, wrptr);
+}
+
int t4_get_flash_params(struct adapter *adapter);
u32 t4_read_pcie_cfg4(struct adapter *adap, int reg, int drv_fw_attach);
@@ -919,6 +1028,8 @@ int t4_configure_ringbb(struct adapter *adap);
int t4_configure_add_smac(struct adapter *adap);
int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
u16 vlan);
+int t4_flash_loc_start(struct adapter *adap, enum t4_flash_loc loc,
+ unsigned int *lenp);
static inline int t4vf_query_params(struct adapter *adapter,
unsigned int nparams, const u32 *params,
@@ -948,6 +1059,7 @@ int t4vf_get_vfres(struct adapter *adapter);
int t4vf_prep_adapter(struct adapter *adapter);
int t4vf_get_vf_mac(struct adapter *adapter, unsigned int port,
unsigned int *naddr, u8 *addr);
+int t4vf_get_vf_vlan(struct adapter *adapter);
int t4_bar2_sge_qregs(struct adapter *adapter, unsigned int qid,
enum t4_bar2_qtype qtype, int user, u64 *pbar2_qoffset,
unsigned int *pbar2_qid);
@@ -963,4 +1075,66 @@ port_top_speed(const struct port_info *pi)
return (fwcap_to_speed(pi->link_cfg.pcaps) / 1000);
}
+/* SET_TCB_FIELD sent as a ULP command looks like this */
+#define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
+ sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
+
+static inline void *
+mk_set_tcb_field_ulp_with_rpl(struct adapter *sc, void *cur, int tid,
+ uint16_t word, uint64_t mask, uint64_t val, const int qid)
+{
+ struct ulp_txpkt *ulpmc;
+ struct ulptx_idata *ulpsc;
+ struct cpl_set_tcb_field_core *req;
+
+ MPASS(((uintptr_t)cur & 7) == 0);
+
+ ulpmc = cur;
+ ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DEST(ULP_TXPKT_DEST_TP));
+ ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
+
+ ulpsc = (struct ulptx_idata *)(ulpmc + 1);
+ ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ ulpsc->len = htobe32(sizeof(*req));
+
+ req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
+ OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
+
+ if (qid == -1) {
+ req->reply_ctrl = htobe16(F_NO_REPLY);
+ req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
+ } else {
+ if (chip_id(sc) >= CHELSIO_T7) {
+ req->reply_ctrl = htobe16(V_T7_QUEUENO(qid) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ req->reply_ctrl = htobe16(V_QUEUENO(qid) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
+ req->word_cookie = htobe16(V_WORD(word) |
+ V_COOKIE(CPL_COOKIE_TOM));
+ }
+ req->mask = htobe64(mask);
+ req->val = htobe64(val);
+
+ /*
+ * ULP_TX is an 8B processor but the firmware transfers WRs in 16B
+ * chunks. The master command for set_tcb_field does not end at a 16B
+ * boundary so it needs to be padded with a no-op.
+ */
+ MPASS((LEN__SET_TCB_FIELD_ULP & 0xf) != 0);
+ ulpsc = (struct ulptx_idata *)(req + 1);
+ ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
+ ulpsc->len = htobe32(0);
+
+ return (ulpsc + 1);
+}
+
+static inline void *
+mk_set_tcb_field_ulp(struct adapter *sc, void *cur, int tid, uint16_t word,
+ uint64_t mask, uint64_t val)
+{
+ return (mk_set_tcb_field_ulp_with_rpl(sc, cur, tid, word, mask, val, -1));
+}
#endif /* __CHELSIO_COMMON_H */
diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c
index 3d22673d34c1..eb7ea9acc108 100644
--- a/sys/dev/cxgbe/common/t4_hw.c
+++ b/sys/dev/cxgbe/common/t4_hw.c
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2012, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2012, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -246,6 +245,8 @@ struct port_tx_state {
u32
t4_port_reg(struct adapter *adap, u8 port, u32 reg)
{
+ if (chip_id(adap) > CHELSIO_T6)
+ return T7_PORT_REG(port, reg);
if (chip_id(adap) > CHELSIO_T4)
return T5_PORT_REG(port, reg);
return PORT_REG(port, reg);
@@ -268,8 +269,10 @@ read_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
{
int i;
- for_each_port(sc, i)
- read_tx_state_one(sc, i, &tx_state[i]);
+ for (i = 0; i < MAX_NCHAN; i++) {
+ if (sc->chan_map[i] != 0xff)
+ read_tx_state_one(sc, i, &tx_state[i]);
+ }
}
static void
@@ -279,7 +282,9 @@ check_tx_state(struct adapter *sc, struct port_tx_state *tx_state)
uint64_t tx_frames, rx_pause;
int i;
- for_each_port(sc, i) {
+ for (i = 0; i < MAX_NCHAN; i++) {
+ if (sc->chan_map[i] == 0xff)
+ continue;
rx_pause = tx_state[i].rx_pause;
tx_frames = tx_state[i].tx_frames;
read_tx_state_one(sc, i, &tx_state[i]); /* update */
@@ -351,7 +356,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
return -EINVAL;
if (adap->flags & IS_VF) {
- if (is_t6(adap))
+ if (chip_id(adap) >= CHELSIO_T6)
data_reg = FW_T6VF_MBDATA_BASE_ADDR;
else
data_reg = FW_T4VF_MBDATA_BASE_ADDR;
@@ -508,9 +513,8 @@ failed:
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
void *rpl, bool sleep_ok)
{
- return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
- sleep_ok, FW_CMD_MAX_TIMEOUT);
-
+ return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl,
+ sleep_ok, FW_CMD_MAX_TIMEOUT);
}
static int t4_edc_err_read(struct adapter *adap, int idx)
@@ -799,6 +803,7 @@ unsigned int t4_get_regs_len(struct adapter *adapter)
case CHELSIO_T5:
case CHELSIO_T6:
+ case CHELSIO_T7:
if (adapter->flags & IS_VF)
return FW_T4VF_REGMAP_SIZE;
return T5_REGMAP_SIZE;
@@ -2639,6 +2644,638 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
};
+ static const unsigned int t7_reg_ranges[] = {
+ 0x1008, 0x101c,
+ 0x1024, 0x10a8,
+ 0x10b4, 0x10f8,
+ 0x1100, 0x1114,
+ 0x111c, 0x112c,
+ 0x1138, 0x113c,
+ 0x1144, 0x115c,
+ 0x1180, 0x1184,
+ 0x1190, 0x1194,
+ 0x11a0, 0x11a4,
+ 0x11b0, 0x11d0,
+ 0x11fc, 0x1278,
+ 0x1280, 0x1368,
+ 0x1700, 0x172c,
+ 0x173c, 0x1760,
+ 0x1800, 0x18fc,
+ 0x3000, 0x3044,
+ 0x3060, 0x3064,
+ 0x30a4, 0x30b0,
+ 0x30b8, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35e0, 0x35ec,
+ 0x3600, 0x37fc,
+ 0x3804, 0x3818,
+ 0x3880, 0x388c,
+ 0x3900, 0x3904,
+ 0x3910, 0x3978,
+ 0x3980, 0x399c,
+ 0x4700, 0x4720,
+ 0x4728, 0x475c,
+ 0x480c, 0x4814,
+ 0x4890, 0x489c,
+ 0x48a4, 0x48ac,
+ 0x48b8, 0x48c4,
+ 0x4900, 0x4924,
+ 0x4ffc, 0x4ffc,
+ 0x5500, 0x5624,
+ 0x56c4, 0x56ec,
+ 0x56f4, 0x5720,
+ 0x5728, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x589c,
+ 0x58a4, 0x58ac,
+ 0x58b8, 0x58bc,
+ 0x5940, 0x598c,
+ 0x59b0, 0x59c8,
+ 0x59d0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a6c,
+ 0x5a80, 0x5a8c,
+ 0x5a94, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x5c10, 0x5e48,
+ 0x5e50, 0x5e94,
+ 0x5ea0, 0x5eb0,
+ 0x5ec0, 0x5ec0,
+ 0x5ec8, 0x5ed0,
+ 0x5ee0, 0x5ee0,
+ 0x5ef0, 0x5ef0,
+ 0x5f00, 0x5f04,
+ 0x5f0c, 0x5f10,
+ 0x5f20, 0x5f88,
+ 0x5f90, 0x5fd8,
+ 0x6000, 0x6020,
+ 0x6028, 0x6030,
+ 0x6044, 0x609c,
+ 0x60a8, 0x60ac,
+ 0x60b8, 0x60ec,
+ 0x6100, 0x6104,
+ 0x6118, 0x611c,
+ 0x6150, 0x6150,
+ 0x6180, 0x61b8,
+ 0x7700, 0x77a8,
+ 0x77b0, 0x7888,
+ 0x78cc, 0x7970,
+ 0x7b00, 0x7b00,
+ 0x7b08, 0x7b0c,
+ 0x7b24, 0x7b84,
+ 0x7b8c, 0x7c2c,
+ 0x7c34, 0x7c40,
+ 0x7c48, 0x7c68,
+ 0x7c70, 0x7c7c,
+ 0x7d00, 0x7ddc,
+ 0x7de4, 0x7e38,
+ 0x7e40, 0x7e44,
+ 0x7e4c, 0x7e74,
+ 0x7e80, 0x7ee0,
+ 0x7ee8, 0x7f0c,
+ 0x7f20, 0x7f5c,
+ 0x8dc0, 0x8de8,
+ 0x8df8, 0x8e04,
+ 0x8e10, 0x8e30,
+ 0x8e7c, 0x8ee8,
+ 0x8f88, 0x8f88,
+ 0x8f90, 0x8fb0,
+ 0x8fb8, 0x9058,
+ 0x9074, 0x90f8,
+ 0x9100, 0x912c,
+ 0x9138, 0x9188,
+ 0x9400, 0x9414,
+ 0x9430, 0x9440,
+ 0x9454, 0x9454,
+ 0x945c, 0x947c,
+ 0x9498, 0x94b8,
+ 0x9600, 0x9600,
+ 0x9608, 0x9638,
+ 0x9640, 0x9704,
+ 0x9710, 0x971c,
+ 0x9800, 0x9804,
+ 0x9854, 0x9854,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0x9fec,
+ 0xa000, 0xa06c,
+ 0xa080, 0xa0ec,
+ 0xa100, 0xa16c,
+ 0xa180, 0xa1ec,
+ 0xa200, 0xa26c,
+ 0xa280, 0xa2ec,
+ 0xa300, 0xa36c,
+ 0xa380, 0xa458,
+ 0xa460, 0xa4f8,
+ 0xd000, 0xd03c,
+ 0xd100, 0xd134,
+ 0xd200, 0xd214,
+ 0xd220, 0xd234,
+ 0xd240, 0xd254,
+ 0xd260, 0xd274,
+ 0xd280, 0xd294,
+ 0xd2a0, 0xd2b4,
+ 0xd2c0, 0xd2d4,
+ 0xd2e0, 0xd2f4,
+ 0xd300, 0xd31c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xe00c,
+ 0xf000, 0xf008,
+ 0xf010, 0xf06c,
+ 0x11000, 0x11014,
+ 0x11048, 0x11120,
+ 0x11130, 0x11144,
+ 0x11174, 0x11178,
+ 0x11190, 0x111a0,
+ 0x111e4, 0x112f0,
+ 0x11300, 0x1133c,
+ 0x11408, 0x1146c,
+ 0x12000, 0x12004,
+ 0x12060, 0x122c4,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x190e8,
+ 0x190f0, 0x190f8,
+ 0x19100, 0x19110,
+ 0x19120, 0x19124,
+ 0x19150, 0x19194,
+ 0x1919c, 0x191a0,
+ 0x191ac, 0x191c8,
+ 0x191d0, 0x191e4,
+ 0x19250, 0x19250,
+ 0x19258, 0x19268,
+ 0x19278, 0x19278,
+ 0x19280, 0x192b0,
+ 0x192bc, 0x192f0,
+ 0x19300, 0x19308,
+ 0x19310, 0x19318,
+ 0x19320, 0x19328,
+ 0x19330, 0x19330,
+ 0x19348, 0x1934c,
+ 0x193f8, 0x19428,
+ 0x19430, 0x19444,
+ 0x1944c, 0x1946c,
+ 0x19474, 0x1947c,
+ 0x19488, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c48,
+ 0x19c50, 0x19c80,
+ 0x19c94, 0x19c98,
+ 0x19ca0, 0x19cdc,
+ 0x19ce4, 0x19cf8,
+ 0x19d00, 0x19d30,
+ 0x19d50, 0x19d80,
+ 0x19d94, 0x19d98,
+ 0x19da0, 0x19de0,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e6c,
+ 0x19ea0, 0x19ebc,
+ 0x19ec4, 0x19ef4,
+ 0x19f04, 0x19f2c,
+ 0x19f34, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fb4,
+ 0x19fbc, 0x19fbc,
+ 0x19fc4, 0x19fc8,
+ 0x19fd0, 0x19fe4,
+ 0x1a000, 0x1a004,
+ 0x1a010, 0x1a06c,
+ 0x1a0b0, 0x1a0e4,
+ 0x1a0ec, 0x1a108,
+ 0x1a114, 0x1a130,
+ 0x1a138, 0x1a1c4,
+ 0x1a1fc, 0x1a29c,
+ 0x1a2a8, 0x1a2b8,
+ 0x1a2c0, 0x1a388,
+ 0x1a398, 0x1a3ac,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e044,
+ 0x1e04c, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e4,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e444,
+ 0x1e44c, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e4,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e844,
+ 0x1e84c, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae4,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec44,
+ 0x1ec4c, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee4,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f044,
+ 0x1f04c, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e4,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f444,
+ 0x1f44c, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e4,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f844,
+ 0x1f84c, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae4,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc44,
+ 0x1fc4c, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee4,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30038,
+ 0x30100, 0x3017c,
+ 0x30190, 0x301a0,
+ 0x301a8, 0x301b8,
+ 0x301c4, 0x301c8,
+ 0x301d0, 0x301e0,
+ 0x30200, 0x30344,
+ 0x30400, 0x304b4,
+ 0x304c0, 0x3052c,
+ 0x30540, 0x3065c,
+ 0x30800, 0x30848,
+ 0x30850, 0x308a8,
+ 0x308b8, 0x308c0,
+ 0x308cc, 0x308dc,
+ 0x30900, 0x30904,
+ 0x3090c, 0x30914,
+ 0x3091c, 0x30928,
+ 0x30930, 0x3093c,
+ 0x30944, 0x30948,
+ 0x30954, 0x30974,
+ 0x3097c, 0x30980,
+ 0x30a00, 0x30a20,
+ 0x30a38, 0x30a3c,
+ 0x30a50, 0x30a50,
+ 0x30a80, 0x30a80,
+ 0x30a88, 0x30aa8,
+ 0x30ab0, 0x30ab4,
+ 0x30ac8, 0x30ad4,
+ 0x30b28, 0x30b84,
+ 0x30b98, 0x30bb8,
+ 0x30c98, 0x30d14,
+ 0x31000, 0x31020,
+ 0x31038, 0x3103c,
+ 0x31050, 0x31050,
+ 0x31080, 0x31080,
+ 0x31088, 0x310a8,
+ 0x310b0, 0x310b4,
+ 0x310c8, 0x310d4,
+ 0x31128, 0x31184,
+ 0x31198, 0x311b8,
+ 0x32000, 0x32038,
+ 0x32100, 0x3217c,
+ 0x32190, 0x321a0,
+ 0x321a8, 0x321b8,
+ 0x321c4, 0x321c8,
+ 0x321d0, 0x321e0,
+ 0x32200, 0x32344,
+ 0x32400, 0x324b4,
+ 0x324c0, 0x3252c,
+ 0x32540, 0x3265c,
+ 0x32800, 0x32848,
+ 0x32850, 0x328a8,
+ 0x328b8, 0x328c0,
+ 0x328cc, 0x328dc,
+ 0x32900, 0x32904,
+ 0x3290c, 0x32914,
+ 0x3291c, 0x32928,
+ 0x32930, 0x3293c,
+ 0x32944, 0x32948,
+ 0x32954, 0x32974,
+ 0x3297c, 0x32980,
+ 0x32a00, 0x32a20,
+ 0x32a38, 0x32a3c,
+ 0x32a50, 0x32a50,
+ 0x32a80, 0x32a80,
+ 0x32a88, 0x32aa8,
+ 0x32ab0, 0x32ab4,
+ 0x32ac8, 0x32ad4,
+ 0x32b28, 0x32b84,
+ 0x32b98, 0x32bb8,
+ 0x32c98, 0x32d14,
+ 0x33000, 0x33020,
+ 0x33038, 0x3303c,
+ 0x33050, 0x33050,
+ 0x33080, 0x33080,
+ 0x33088, 0x330a8,
+ 0x330b0, 0x330b4,
+ 0x330c8, 0x330d4,
+ 0x33128, 0x33184,
+ 0x33198, 0x331b8,
+ 0x34000, 0x34038,
+ 0x34100, 0x3417c,
+ 0x34190, 0x341a0,
+ 0x341a8, 0x341b8,
+ 0x341c4, 0x341c8,
+ 0x341d0, 0x341e0,
+ 0x34200, 0x34344,
+ 0x34400, 0x344b4,
+ 0x344c0, 0x3452c,
+ 0x34540, 0x3465c,
+ 0x34800, 0x34848,
+ 0x34850, 0x348a8,
+ 0x348b8, 0x348c0,
+ 0x348cc, 0x348dc,
+ 0x34900, 0x34904,
+ 0x3490c, 0x34914,
+ 0x3491c, 0x34928,
+ 0x34930, 0x3493c,
+ 0x34944, 0x34948,
+ 0x34954, 0x34974,
+ 0x3497c, 0x34980,
+ 0x34a00, 0x34a20,
+ 0x34a38, 0x34a3c,
+ 0x34a50, 0x34a50,
+ 0x34a80, 0x34a80,
+ 0x34a88, 0x34aa8,
+ 0x34ab0, 0x34ab4,
+ 0x34ac8, 0x34ad4,
+ 0x34b28, 0x34b84,
+ 0x34b98, 0x34bb8,
+ 0x34c98, 0x34d14,
+ 0x35000, 0x35020,
+ 0x35038, 0x3503c,
+ 0x35050, 0x35050,
+ 0x35080, 0x35080,
+ 0x35088, 0x350a8,
+ 0x350b0, 0x350b4,
+ 0x350c8, 0x350d4,
+ 0x35128, 0x35184,
+ 0x35198, 0x351b8,
+ 0x36000, 0x36038,
+ 0x36100, 0x3617c,
+ 0x36190, 0x361a0,
+ 0x361a8, 0x361b8,
+ 0x361c4, 0x361c8,
+ 0x361d0, 0x361e0,
+ 0x36200, 0x36344,
+ 0x36400, 0x364b4,
+ 0x364c0, 0x3652c,
+ 0x36540, 0x3665c,
+ 0x36800, 0x36848,
+ 0x36850, 0x368a8,
+ 0x368b8, 0x368c0,
+ 0x368cc, 0x368dc,
+ 0x36900, 0x36904,
+ 0x3690c, 0x36914,
+ 0x3691c, 0x36928,
+ 0x36930, 0x3693c,
+ 0x36944, 0x36948,
+ 0x36954, 0x36974,
+ 0x3697c, 0x36980,
+ 0x36a00, 0x36a20,
+ 0x36a38, 0x36a3c,
+ 0x36a50, 0x36a50,
+ 0x36a80, 0x36a80,
+ 0x36a88, 0x36aa8,
+ 0x36ab0, 0x36ab4,
+ 0x36ac8, 0x36ad4,
+ 0x36b28, 0x36b84,
+ 0x36b98, 0x36bb8,
+ 0x36c98, 0x36d14,
+ 0x37000, 0x37020,
+ 0x37038, 0x3703c,
+ 0x37050, 0x37050,
+ 0x37080, 0x37080,
+ 0x37088, 0x370a8,
+ 0x370b0, 0x370b4,
+ 0x370c8, 0x370d4,
+ 0x37128, 0x37184,
+ 0x37198, 0x371b8,
+ 0x38000, 0x380b0,
+ 0x380b8, 0x38130,
+ 0x38140, 0x38140,
+ 0x38150, 0x38154,
+ 0x38160, 0x381c4,
+ 0x381f0, 0x38204,
+ 0x3820c, 0x38214,
+ 0x3821c, 0x3822c,
+ 0x38244, 0x38244,
+ 0x38254, 0x38274,
+ 0x3827c, 0x38280,
+ 0x38300, 0x38304,
+ 0x3830c, 0x38314,
+ 0x3831c, 0x3832c,
+ 0x38344, 0x38344,
+ 0x38354, 0x38374,
+ 0x3837c, 0x38380,
+ 0x38400, 0x38424,
+ 0x38438, 0x3843c,
+ 0x38480, 0x38480,
+ 0x384a8, 0x384a8,
+ 0x384b0, 0x384b4,
+ 0x384c8, 0x38514,
+ 0x38600, 0x3860c,
+ 0x3861c, 0x38624,
+ 0x38900, 0x38924,
+ 0x38938, 0x3893c,
+ 0x38980, 0x38980,
+ 0x389a8, 0x389a8,
+ 0x389b0, 0x389b4,
+ 0x389c8, 0x38a14,
+ 0x38b00, 0x38b0c,
+ 0x38b1c, 0x38b24,
+ 0x38e00, 0x38e00,
+ 0x38e18, 0x38e20,
+ 0x38e38, 0x38e40,
+ 0x38e58, 0x38e60,
+ 0x38e78, 0x38e80,
+ 0x38e98, 0x38ea0,
+ 0x38eb8, 0x38ec0,
+ 0x38ed8, 0x38ee0,
+ 0x38ef8, 0x38f08,
+ 0x38f10, 0x38f2c,
+ 0x38f80, 0x38ffc,
+ 0x39080, 0x39080,
+ 0x39088, 0x39090,
+ 0x39100, 0x39108,
+ 0x39120, 0x39128,
+ 0x39140, 0x39148,
+ 0x39160, 0x39168,
+ 0x39180, 0x39188,
+ 0x391a0, 0x391a8,
+ 0x391c0, 0x391c8,
+ 0x391e0, 0x391e8,
+ 0x39200, 0x39200,
+ 0x39208, 0x39240,
+ 0x39300, 0x39300,
+ 0x39308, 0x39340,
+ 0x39400, 0x39400,
+ 0x39408, 0x39440,
+ 0x39500, 0x39500,
+ 0x39508, 0x39540,
+ 0x39600, 0x39600,
+ 0x39608, 0x39640,
+ 0x39700, 0x39700,
+ 0x39708, 0x39740,
+ 0x39800, 0x39800,
+ 0x39808, 0x39840,
+ 0x39900, 0x39900,
+ 0x39908, 0x39940,
+ 0x39a00, 0x39a04,
+ 0x39a10, 0x39a14,
+ 0x39a1c, 0x39aa8,
+ 0x39b00, 0x39ecc,
+ 0x3a000, 0x3a004,
+ 0x3a050, 0x3a084,
+ 0x3a090, 0x3a09c,
+ 0x3e000, 0x3e020,
+ 0x3e03c, 0x3e05c,
+ 0x3e100, 0x3e120,
+ 0x3e13c, 0x3e15c,
+ 0x3e200, 0x3e220,
+ 0x3e23c, 0x3e25c,
+ 0x3e300, 0x3e320,
+ 0x3e33c, 0x3e35c,
+ 0x3f000, 0x3f034,
+ 0x3f100, 0x3f130,
+ 0x3f200, 0x3f218,
+ 0x44000, 0x44014,
+ 0x44020, 0x44028,
+ 0x44030, 0x44030,
+ 0x44100, 0x44114,
+ 0x44120, 0x44128,
+ 0x44130, 0x44130,
+ 0x44200, 0x44214,
+ 0x44220, 0x44228,
+ 0x44230, 0x44230,
+ 0x44300, 0x44314,
+ 0x44320, 0x44328,
+ 0x44330, 0x44330,
+ 0x44400, 0x44414,
+ 0x44420, 0x44428,
+ 0x44430, 0x44430,
+ 0x44500, 0x44514,
+ 0x44520, 0x44528,
+ 0x44530, 0x44530,
+ 0x44714, 0x44718,
+ 0x44730, 0x44730,
+ 0x447c0, 0x447c0,
+ 0x447f0, 0x447f0,
+ 0x447f8, 0x447fc,
+ 0x45000, 0x45014,
+ 0x45020, 0x45028,
+ 0x45030, 0x45030,
+ 0x45100, 0x45114,
+ 0x45120, 0x45128,
+ 0x45130, 0x45130,
+ 0x45200, 0x45214,
+ 0x45220, 0x45228,
+ 0x45230, 0x45230,
+ 0x45300, 0x45314,
+ 0x45320, 0x45328,
+ 0x45330, 0x45330,
+ 0x45400, 0x45414,
+ 0x45420, 0x45428,
+ 0x45430, 0x45430,
+ 0x45500, 0x45514,
+ 0x45520, 0x45528,
+ 0x45530, 0x45530,
+ 0x45714, 0x45718,
+ 0x45730, 0x45730,
+ 0x457c0, 0x457c0,
+ 0x457f0, 0x457f0,
+ 0x457f8, 0x457fc,
+ 0x46000, 0x46010,
+ 0x46020, 0x46034,
+ 0x46040, 0x46050,
+ 0x46060, 0x46088,
+ 0x47000, 0x4709c,
+ 0x470c0, 0x470d4,
+ 0x47100, 0x471a8,
+ 0x471b0, 0x471e8,
+ 0x47200, 0x47210,
+ 0x4721c, 0x47230,
+ 0x47238, 0x47238,
+ 0x47240, 0x472ac,
+ 0x472d0, 0x472f4,
+ 0x47300, 0x47310,
+ 0x47318, 0x47348,
+ 0x47350, 0x47354,
+ 0x47380, 0x47388,
+ 0x47390, 0x47394,
+ 0x47400, 0x47448,
+ 0x47450, 0x47458,
+ 0x47500, 0x4751c,
+ 0x47530, 0x4754c,
+ 0x47560, 0x4757c,
+ 0x47590, 0x475ac,
+ 0x47600, 0x47630,
+ 0x47640, 0x47644,
+ 0x47660, 0x4769c,
+ 0x47700, 0x47710,
+ 0x47740, 0x47750,
+ 0x4775c, 0x4779c,
+ 0x477b0, 0x477bc,
+ 0x477c4, 0x477c8,
+ 0x477d4, 0x477fc,
+ 0x48000, 0x48004,
+ 0x48018, 0x4801c,
+ 0x49304, 0x493f0,
+ 0x49400, 0x49410,
+ 0x49460, 0x494f4,
+ 0x50000, 0x50084,
+ 0x50090, 0x500cc,
+ 0x50300, 0x50384,
+ 0x50400, 0x50404,
+ 0x50800, 0x50884,
+ 0x50890, 0x508cc,
+ 0x50b00, 0x50b84,
+ 0x50c00, 0x50c04,
+ 0x51000, 0x51020,
+ 0x51028, 0x510c4,
+ 0x51104, 0x51108,
+ 0x51200, 0x51274,
+ 0x51300, 0x51324,
+ 0x51400, 0x51548,
+ 0x51550, 0x51554,
+ 0x5155c, 0x51584,
+ 0x5158c, 0x515c8,
+ 0x515f0, 0x515f4,
+ 0x58000, 0x58004,
+ 0x58018, 0x5801c,
+ 0x59304, 0x593f0,
+ 0x59400, 0x59410,
+ 0x59460, 0x594f4,
+ };
+
u32 *buf_end = (u32 *)(buf + buf_size);
const unsigned int *reg_ranges;
int reg_ranges_size, range;
@@ -2679,6 +3316,16 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
}
break;
+ case CHELSIO_T7:
+ if (adap->flags & IS_VF) {
+ reg_ranges = t6vf_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
+ } else {
+ reg_ranges = t7_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t7_reg_ranges);
+ }
+ break;
+
default:
CH_ERR(adap,
"Unsupported chip version %d\n", chip_version);
@@ -3086,6 +3733,56 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p,
return 0;
}
+/* Flash Layout {start sector, # of sectors} for T4/T5/T6 adapters */
+static const struct t4_flash_loc_entry t4_flash_loc_arr[] = {
+ [FLASH_LOC_EXP_ROM] = { 0, 6 },
+ [FLASH_LOC_IBFT] = { 6, 1 },
+ [FLASH_LOC_BOOTCFG] = { 7, 1 },
+ [FLASH_LOC_FW] = { 8, 16 },
+ [FLASH_LOC_FWBOOTSTRAP] = { 27, 1 },
+ [FLASH_LOC_ISCSI_CRASH] = { 29, 1 },
+ [FLASH_LOC_FCOE_CRASH] = { 30, 1 },
+ [FLASH_LOC_CFG] = { 31, 1 },
+ [FLASH_LOC_CUDBG] = { 32, 32 },
+ [FLASH_LOC_BOOT_AREA] = { 0, 8 }, /* Spans complete Boot Area */
+ [FLASH_LOC_END] = { 64, 0 },
+};
+
+/* Flash Layout {start sector, # of sectors} for T7 adapters */
+static const struct t4_flash_loc_entry t7_flash_loc_arr[] = {
+ [FLASH_LOC_VPD] = { 0, 1 },
+ [FLASH_LOC_FWBOOTSTRAP] = { 1, 1 },
+ [FLASH_LOC_FW] = { 2, 29 },
+ [FLASH_LOC_CFG] = { 31, 1 },
+ [FLASH_LOC_EXP_ROM] = { 32, 15 },
+ [FLASH_LOC_IBFT] = { 47, 1 },
+ [FLASH_LOC_BOOTCFG] = { 48, 1 },
+ [FLASH_LOC_DPU_BOOT] = { 49, 13 },
+ [FLASH_LOC_ISCSI_CRASH] = { 62, 1 },
+ [FLASH_LOC_FCOE_CRASH] = { 63, 1 },
+ [FLASH_LOC_VPD_BACKUP] = { 64, 1 },
+ [FLASH_LOC_FWBOOTSTRAP_BACKUP] = { 65, 1 },
+ [FLASH_LOC_FW_BACKUP] = { 66, 29 },
+ [FLASH_LOC_CFG_BACK] = { 95, 1 },
+ [FLASH_LOC_CUDBG] = { 96, 48 },
+ [FLASH_LOC_CHIP_DUMP] = { 144, 48 },
+ [FLASH_LOC_DPU_AREA] = { 192, 64 },
+ [FLASH_LOC_BOOT_AREA] = { 32, 17 }, /* Spans complete UEFI/PXE Boot Area */
+ [FLASH_LOC_END] = { 256, 0 },
+};
+
+int
+t4_flash_loc_start(struct adapter *adap, enum t4_flash_loc loc,
+ unsigned int *lenp)
+{
+ const struct t4_flash_loc_entry *l = chip_id(adap) >= CHELSIO_T7 ?
+ &t7_flash_loc_arr[loc] : &t4_flash_loc_arr[loc];
+
+ if (lenp != NULL)
+ *lenp = FLASH_MAX_SIZE(l->nsecs);
+ return (FLASH_START(l->start_sec));
+}
+
/* serial flash and firmware constants and flash config file constants */
enum {
SF_ATTEMPTS = 10, /* max retries for SF operations */
@@ -3116,13 +3813,16 @@ static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
int lock, u32 *valp)
{
int ret;
+ uint32_t op;
if (!byte_cnt || byte_cnt > 4)
return -EINVAL;
if (t4_read_reg(adapter, A_SF_OP) & F_BUSY)
return -EBUSY;
- t4_write_reg(adapter, A_SF_OP,
- V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
+ op = V_SF_LOCK(lock) | V_CONT(cont) | V_BYTECNT(byte_cnt - 1);
+ if (chip_id(adapter) >= CHELSIO_T7)
+ op |= F_QUADREADDISABLE;
+ t4_write_reg(adapter, A_SF_OP, op);
ret = t4_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 5);
if (!ret)
*valp = t4_read_reg(adapter, A_SF_DATA);
@@ -3294,9 +3994,10 @@ unlock:
*/
int t4_get_fw_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, FLASH_FW_START +
- offsetof(struct fw_hdr, fw_ver), 1,
- vers, 0);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
+
+ return t4_read_flash(adapter, start + offsetof(struct fw_hdr, fw_ver),
+ 1, vers, 0);
}
/**
@@ -3308,8 +4009,10 @@ int t4_get_fw_version(struct adapter *adapter, u32 *vers)
*/
int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
{
- return t4_read_flash(adapter, FLASH_FW_START,
- sizeof (*hdr) / sizeof (uint32_t), (uint32_t *)hdr, 1);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
+
+ return t4_read_flash(adapter, start, sizeof (*hdr) / sizeof (uint32_t),
+ (uint32_t *)hdr, 1);
}
/**
@@ -3321,9 +4024,11 @@ int t4_get_fw_hdr(struct adapter *adapter, struct fw_hdr *hdr)
*/
int t4_get_bs_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
- offsetof(struct fw_hdr, fw_ver), 1,
- vers, 0);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FWBOOTSTRAP,
+ NULL);
+
+ return t4_read_flash(adapter, start + offsetof(struct fw_hdr, fw_ver),
+ 1, vers, 0);
}
/**
@@ -3335,9 +4040,10 @@ int t4_get_bs_version(struct adapter *adapter, u32 *vers)
*/
int t4_get_tp_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, FLASH_FW_START +
- offsetof(struct fw_hdr, tp_microcode_ver),
- 1, vers, 0);
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_FW, NULL);
+
+ return t4_read_flash(adapter, start +
+ offsetof(struct fw_hdr, tp_microcode_ver), 1, vers, 0);
}
/**
@@ -3359,10 +4065,10 @@ int t4_get_exprom_version(struct adapter *adapter, u32 *vers)
u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
sizeof(u32))];
int ret;
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_EXP_ROM, NULL);
- ret = t4_read_flash(adapter, FLASH_EXP_ROM_START,
- ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
- 0);
+ ret = t4_read_flash(adapter, start, ARRAY_SIZE(exprom_header_buf),
+ exprom_header_buf, 0);
if (ret)
return ret;
@@ -3520,16 +4226,20 @@ int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
* File is stored, or an error if the device FLASH is too small to contain
* a Firmware Configuration File.
*/
-int t4_flash_cfg_addr(struct adapter *adapter)
+int t4_flash_cfg_addr(struct adapter *adapter, unsigned int *lenp)
{
+ unsigned int len = 0;
+ const int cfg_start = t4_flash_loc_start(adapter, FLASH_LOC_CFG, &len);
+
/*
* If the device FLASH isn't large enough to hold a Firmware
* Configuration File, return an error.
*/
- if (adapter->params.sf_size < FLASH_CFG_START + FLASH_CFG_MAX_SIZE)
+ if (adapter->params.sf_size < cfg_start + len)
return -ENOSPC;
-
- return FLASH_CFG_START;
+ if (lenp != NULL)
+ *lenp = len;
+ return (cfg_start);
}
/*
@@ -3547,7 +4257,8 @@ static int t4_fw_matches_chip(struct adapter *adap,
*/
if ((is_t4(adap) && hdr->chip == FW_HDR_CHIP_T4) ||
(is_t5(adap) && hdr->chip == FW_HDR_CHIP_T5) ||
- (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6))
+ (is_t6(adap) && hdr->chip == FW_HDR_CHIP_T6) ||
+ (is_t7(adap) && hdr->chip == FW_HDR_CHIP_T7))
return 1;
CH_ERR(adap,
@@ -3572,20 +4283,15 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
u8 first_page[SF_PAGE_SIZE];
const u32 *p = (const u32 *)fw_data;
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
unsigned int fw_start_sec;
unsigned int fw_start;
unsigned int fw_size;
+ enum t4_flash_loc loc;
- if (ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP) {
- fw_start_sec = FLASH_FWBOOTSTRAP_START_SEC;
- fw_start = FLASH_FWBOOTSTRAP_START;
- fw_size = FLASH_FWBOOTSTRAP_MAX_SIZE;
- } else {
- fw_start_sec = FLASH_FW_START_SEC;
- fw_start = FLASH_FW_START;
- fw_size = FLASH_FW_MAX_SIZE;
- }
+ loc = ntohl(hdr->magic) == FW_HDR_MAGIC_BOOTSTRAP ?
+ FLASH_LOC_FWBOOTSTRAP : FLASH_LOC_FW;
+ fw_start = t4_flash_loc_start(adap, loc, &fw_size);
+ fw_start_sec = fw_start / SF_SEC_SIZE;
if (!size) {
CH_ERR(adap, "FW image has no data\n");
@@ -3618,7 +4324,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
return -EINVAL;
}
- i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
+ i = DIV_ROUND_UP(size, SF_SEC_SIZE); /* # of sectors spanned */
ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
if (ret)
goto out;
@@ -3672,7 +4378,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
c.param[0].mnem =
cpu_to_be32(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_FWCACHE));
- c.param[0].val = (__force __be32)op;
+ c.param[0].val = cpu_to_be32(op);
return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
}
@@ -3922,15 +4628,12 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
* speed and let the firmware pick one.
*/
fec |= FW_PORT_CAP32_FORCE_FEC;
- if (speed & FW_PORT_CAP32_SPEED_100G) {
+ if (speed & FW_PORT_CAP32_SPEED_25G) {
fec |= FW_PORT_CAP32_FEC_RS;
- fec |= FW_PORT_CAP32_FEC_NO_FEC;
- } else if (speed & FW_PORT_CAP32_SPEED_50G) {
fec |= FW_PORT_CAP32_FEC_BASER_RS;
fec |= FW_PORT_CAP32_FEC_NO_FEC;
} else {
fec |= FW_PORT_CAP32_FEC_RS;
- fec |= FW_PORT_CAP32_FEC_BASER_RS;
fec |= FW_PORT_CAP32_FEC_NO_FEC;
}
} else {
@@ -3948,12 +4651,9 @@ int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
* the potential top speed. Request the best
* FEC at that speed instead.
*/
- if (speed & FW_PORT_CAP32_SPEED_100G) {
- if (fec == FW_PORT_CAP32_FEC_BASER_RS)
- fec = FW_PORT_CAP32_FEC_RS;
- } else if (speed & FW_PORT_CAP32_SPEED_50G) {
- if (fec == FW_PORT_CAP32_FEC_RS)
- fec = FW_PORT_CAP32_FEC_BASER_RS;
+ if ((speed & FW_PORT_CAP32_SPEED_25G) == 0 &&
+ fec == FW_PORT_CAP32_FEC_BASER_RS) {
+ fec = FW_PORT_CAP32_FEC_RS;
}
}
} else {
@@ -4925,6 +5625,15 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
.details = mps_trc_intr_details,
.actions = NULL,
};
+ static const struct intr_info t7_mps_trc_intr_info = {
+ .name = "T7_MPS_TRC_INT_CAUSE",
+ .cause_reg = A_T7_MPS_TRC_INT_CAUSE,
+ .enable_reg = A_T7_MPS_TRC_INT_ENABLE,
+ .fatal = F_MISCPERR | V_PKTFIFO(M_PKTFIFO) | V_FILTMEM(M_FILTMEM),
+ .flags = 0,
+ .details = mps_trc_intr_details,
+ .actions = NULL,
+ };
static const struct intr_details mps_stat_sram_intr_details[] = {
{ 0xffffffff, "MPS statistics SRAM parity error" },
{ 0 }
@@ -4998,7 +5707,10 @@ static bool mps_intr_handler(struct adapter *adap, int arg, bool verbose)
fatal = false;
fatal |= t4_handle_intr(adap, &mps_rx_perr_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_tx_intr_info, 0, verbose);
- fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
+ if (chip_id(adap) > CHELSIO_T6)
+ fatal |= t4_handle_intr(adap, &t7_mps_trc_intr_info, 0, verbose);
+ else
+ fatal |= t4_handle_intr(adap, &mps_trc_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_stat_sram_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_stat_tx_intr_info, 0, verbose);
fatal |= t4_handle_intr(adap, &mps_stat_rx_intr_info, 0, verbose);
@@ -5225,7 +5937,7 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
ii.flags = 0;
ii.details = mac_intr_details;
ii.actions = NULL;
- } else {
+ } else if (chip_id(adap) < CHELSIO_T7) {
snprintf(name, sizeof(name), "MAC_PORT%u_INT_CAUSE", port);
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_INT_CAUSE);
@@ -5234,10 +5946,29 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
ii.flags = 0;
ii.details = mac_intr_details;
ii.actions = NULL;
+ } else {
+ snprintf(name, sizeof(name), "T7_MAC_PORT%u_INT_CAUSE", port);
+ ii.name = &name[0];
+ ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_CAUSE);
+ ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_INT_EN);
+ ii.fatal = F_TXFIFO_PRTY_ERR | F_RXFIFO_PRTY_ERR;
+ ii.flags = 0;
+ ii.details = mac_intr_details;
+ ii.actions = NULL;
}
fatal |= t4_handle_intr(adap, &ii, 0, verbose);
- if (chip_id(adap) >= CHELSIO_T5) {
+ if (chip_id(adap) > CHELSIO_T6) {
+ snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE", port);
+ ii.name = &name[0];
+ ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE);
+ ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN);
+ ii.fatal = 0;
+ ii.flags = 0;
+ ii.details = NULL;
+ ii.actions = NULL;
+ fatal |= t4_handle_intr(adap, &ii, 0, verbose);
+ } else if (chip_id(adap) >= CHELSIO_T5) {
snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE", port);
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE);
@@ -5249,7 +5980,17 @@ static bool mac_intr_handler(struct adapter *adap, int port, bool verbose)
fatal |= t4_handle_intr(adap, &ii, 0, verbose);
}
- if (chip_id(adap) >= CHELSIO_T6) {
+ if (chip_id(adap) > CHELSIO_T6) {
+ snprintf(name, sizeof(name), "T7_MAC_PORT%u_PERR_INT_CAUSE_100G", port);
+ ii.name = &name[0];
+ ii.cause_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_CAUSE_100G);
+ ii.enable_reg = T7_PORT_REG(port, A_T7_MAC_PORT_PERR_INT_EN_100G);
+ ii.fatal = 0;
+ ii.flags = 0;
+ ii.details = NULL;
+ ii.actions = NULL;
+ fatal |= t4_handle_intr(adap, &ii, 0, verbose);
+ } else if (is_t6(adap)) {
snprintf(name, sizeof(name), "MAC_PORT%u_PERR_INT_CAUSE_100G", port);
ii.name = &name[0];
ii.cause_reg = T5_PORT_REG(port, A_MAC_PORT_PERR_INT_CAUSE_100G);
@@ -5346,13 +6087,42 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
{ F_CIM, "CIM" },
{ 0 }
};
- static const struct intr_info pl_perr_cause = {
+ static const struct intr_details t7_pl_intr_details[] = {
+ { F_T7_MC1, "MC1" },
+ { F_T7_ULP_TX, "ULP TX" },
+ { F_T7_SGE, "SGE" },
+ { F_T7_CPL_SWITCH, "CPL Switch" },
+ { F_T7_ULP_RX, "ULP RX" },
+ { F_T7_PM_RX, "PM RX" },
+ { F_T7_PM_TX, "PM TX" },
+ { F_T7_MA, "MA" },
+ { F_T7_TP, "TP" },
+ { F_T7_LE, "LE" },
+ { F_T7_EDC1, "EDC1" },
+ { F_T7_EDC0, "EDC0" },
+ { F_T7_MC0, "MC0" },
+ { F_T7_PCIE, "PCIE" },
+ { F_MAC3, "MAC3" },
+ { F_MAC2, "MAC2" },
+ { F_MAC1, "MAC1" },
+ { F_MAC0, "MAC0" },
+ { F_SMB, "SMB" },
+ { F_PL, "PL" },
+ { F_NCSI, "NC-SI" },
+ { F_MPS, "MPS" },
+ { F_DBG, "DBG" },
+ { F_I2CM, "I2CM" },
+ { F_MI, "MI" },
+ { F_CIM, "CIM" },
+ { 0 }
+ };
+ struct intr_info pl_perr_cause = {
.name = "PL_PERR_CAUSE",
.cause_reg = A_PL_PERR_CAUSE,
.enable_reg = A_PL_PERR_ENABLE,
.fatal = 0xffffffff,
- .flags = 0,
- .details = pl_intr_details,
+ .flags = NONFATAL_IF_DISABLED,
+ .details = NULL,
.actions = NULL,
};
static const struct intr_action pl_intr_action[] = {
@@ -5381,17 +6151,53 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
{ F_CIM, -1, cim_intr_handler },
{ 0 }
};
- static const struct intr_info pl_intr_info = {
+ static const struct intr_action t7_pl_intr_action[] = {
+ { F_T7_ULP_TX, -1, ulptx_intr_handler },
+ { F_T7_SGE, -1, sge_intr_handler },
+ { F_T7_CPL_SWITCH, -1, cplsw_intr_handler },
+ { F_T7_ULP_RX, -1, ulprx_intr_handler },
+ { F_T7_PM_RX, -1, pmrx_intr_handler},
+ { F_T7_PM_TX, -1, pmtx_intr_handler},
+ { F_T7_MA, -1, ma_intr_handler },
+ { F_T7_TP, -1, tp_intr_handler },
+ { F_T7_LE, -1, le_intr_handler },
+ { F_T7_EDC1, MEM_EDC1, mem_intr_handler },
+ { F_T7_EDC0, MEM_EDC0, mem_intr_handler },
+ { F_T7_MC1, MEM_MC1, mem_intr_handler },
+ { F_T7_MC0, MEM_MC0, mem_intr_handler },
+ { F_T7_PCIE, -1, pcie_intr_handler },
+ { F_MAC3, 3, mac_intr_handler},
+ { F_MAC2, 2, mac_intr_handler},
+ { F_MAC1, 1, mac_intr_handler},
+ { F_MAC0, 0, mac_intr_handler},
+ { F_SMB, -1, smb_intr_handler},
+ { F_PL, -1, plpl_intr_handler },
+ { F_NCSI, -1, ncsi_intr_handler},
+ { F_MPS, -1, mps_intr_handler },
+ { F_CIM, -1, cim_intr_handler },
+ { 0 }
+ };
+ struct intr_info pl_intr_info = {
.name = "PL_INT_CAUSE",
.cause_reg = A_PL_INT_CAUSE,
.enable_reg = A_PL_INT_ENABLE,
.fatal = 0,
.flags = 0,
- .details = pl_intr_details,
- .actions = pl_intr_action,
+ .details = NULL,
+ .actions = NULL,
};
u32 perr;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ pl_perr_cause.details = t7_pl_intr_details;
+ pl_intr_info.details = t7_pl_intr_details;
+ pl_intr_info.actions = t7_pl_intr_action;
+ } else {
+ pl_perr_cause.details = pl_intr_details;
+ pl_intr_info.details = pl_intr_details;
+ pl_intr_info.actions = pl_intr_action;
+ }
+
perr = t4_read_reg(adap, pl_perr_cause.cause_reg);
if (verbose || perr != 0) {
t4_show_intr_info(adap, &pl_perr_cause, perr);
@@ -5421,19 +6227,20 @@ bool t4_slow_intr_handler(struct adapter *adap, bool verbose)
*/
void t4_intr_enable(struct adapter *adap)
{
- u32 val = 0;
+ u32 mask, val;
if (chip_id(adap) <= CHELSIO_T5)
- val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT;
+ val = F_ERR_DROPPED_DB | F_ERR_EGR_CTXT_PRIO | F_DBFIFO_HP_INT |
+ F_DBFIFO_LP_INT;
else
val = F_ERR_PCIE_ERROR0 | F_ERR_PCIE_ERROR1 | F_FATAL_WRE_LEN;
val |= F_ERR_CPL_EXCEED_IQE_SIZE | F_ERR_INVALID_CIDX_INC |
F_ERR_CPL_OPCODE_0 | F_ERR_DATA_CPL_ON_HIGH_QID1 |
F_INGRESS_SIZE_ERR | F_ERR_DATA_CPL_ON_HIGH_QID0 |
F_ERR_BAD_DB_PIDX3 | F_ERR_BAD_DB_PIDX2 | F_ERR_BAD_DB_PIDX1 |
- F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_DBFIFO_LP_INT |
- F_EGRESS_SIZE_ERR;
- t4_set_reg_field(adap, A_SGE_INT_ENABLE3, val, val);
+ F_ERR_BAD_DB_PIDX0 | F_ERR_ING_CTXT_PRIO | F_EGRESS_SIZE_ERR;
+ mask = val;
+ t4_set_reg_field(adap, A_SGE_INT_ENABLE3, mask, val);
t4_write_reg(adap, MYPF_REG(A_PL_PF_INT_ENABLE), PF_INTR_MASK);
t4_set_reg_field(adap, A_PL_INT_ENABLE, F_SF | F_I2CM, 0);
t4_set_reg_field(adap, A_PL_INT_MAP0, 0, 1 << adap->pf);
@@ -6184,6 +6991,11 @@ void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
{
t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, A_TP_MIB_RQE_DFR_PKT,
sleep_ok);
+
+ if (chip_id(adap) >= CHELSIO_T7)
+ /* read RDMA stats IN and OUT for all ports at once */
+ t4_tp_mib_read(adap, &st->pkts_in[0], 28, A_TP_MIB_RDMA_IN_PKT_0,
+ sleep_ok);
}
/**
@@ -6564,16 +7376,24 @@ void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
int idx, int enable)
{
- int i, ofst = idx * 4;
+ int i, ofst;
+ u32 match_ctl_a, match_ctl_b;
u32 data_reg, mask_reg, cfg;
u32 en = is_t4(adap) ? F_TFEN : F_T5_TFEN;
if (idx < 0 || idx >= NTRACE)
return -EINVAL;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ match_ctl_a = T7_MPS_TRC_FILTER_MATCH_CTL_A(idx);
+ match_ctl_b = T7_MPS_TRC_FILTER_MATCH_CTL_B(idx);
+ } else {
+ match_ctl_a = MPS_TRC_FILTER_MATCH_CTL_A(idx);
+ match_ctl_b = MPS_TRC_FILTER_MATCH_CTL_B(idx);
+ }
+
if (tp == NULL || !enable) {
- t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en,
- enable ? en : 0);
+ t4_set_reg_field(adap, match_ctl_a, en, enable ? en : 0);
return 0;
}
@@ -6610,22 +7430,20 @@ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
return -EINVAL;
/* stop the tracer we'll be changing */
- t4_set_reg_field(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst, en, 0);
+ t4_set_reg_field(adap, match_ctl_a, en, 0);
- idx *= (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH);
- data_reg = A_MPS_TRC_FILTER0_MATCH + idx;
- mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + idx;
+ ofst = (A_MPS_TRC_FILTER1_MATCH - A_MPS_TRC_FILTER0_MATCH) * idx;
+ data_reg = A_MPS_TRC_FILTER0_MATCH + ofst;
+ mask_reg = A_MPS_TRC_FILTER0_DONT_CARE + ofst;
for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
t4_write_reg(adap, data_reg, tp->data[i]);
t4_write_reg(adap, mask_reg, ~tp->mask[i]);
}
- t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst,
- V_TFCAPTUREMAX(tp->snap_len) |
+ t4_write_reg(adap, match_ctl_b, V_TFCAPTUREMAX(tp->snap_len) |
V_TFMINPKTSIZE(tp->min_len));
- t4_write_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst,
- V_TFOFFSET(tp->skip_ofst) | V_TFLENGTH(tp->skip_len) | en |
- (is_t4(adap) ?
+ t4_write_reg(adap, match_ctl_a, V_TFOFFSET(tp->skip_ofst) |
+ V_TFLENGTH(tp->skip_len) | en | (is_t4(adap) ?
V_TFPORT(tp->port) | V_TFINVERTMATCH(tp->invert) :
V_T5_TFPORT(tp->port) | V_T5_TFINVERTMATCH(tp->invert)));
@@ -6645,11 +7463,16 @@ void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
int *enabled)
{
u32 ctla, ctlb;
- int i, ofst = idx * 4;
+ int i, ofst;
u32 data_reg, mask_reg;
- ctla = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_A + ofst);
- ctlb = t4_read_reg(adap, A_MPS_TRC_FILTER_MATCH_CTL_B + ofst);
+ if (chip_id(adap) >= CHELSIO_T7) {
+ ctla = t4_read_reg(adap, T7_MPS_TRC_FILTER_MATCH_CTL_A(idx));
+ ctlb = t4_read_reg(adap, T7_MPS_TRC_FILTER_MATCH_CTL_B(idx));
+ } else {
+ ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A(idx));
+ ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B(idx));
+ }
if (is_t4(adap)) {
*enabled = !!(ctla & F_TFEN);
@@ -6676,6 +7499,37 @@ void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
}
/**
+ * t4_set_trace_rss_control - configure the trace rss control register
+ * @adap: the adapter
+ * @chan: the channel number for RSS control
+ * @qid: queue number
+ *
+ * Configures the MPS tracing RSS control parameter for specified
+ * @chan channel and @qid queue number.
+ */
+void t4_set_trace_rss_control(struct adapter *adap, u8 chan, u16 qid)
+{
+ u32 mps_trc_rss_control;
+
+ switch (chip_id(adap)) {
+ case CHELSIO_T4:
+ mps_trc_rss_control = A_MPS_TRC_RSS_CONTROL;
+ break;
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ mps_trc_rss_control = A_MPS_T5_TRC_RSS_CONTROL;
+ break;
+ case CHELSIO_T7:
+ default:
+ mps_trc_rss_control = A_T7_MPS_T5_TRC_RSS_CONTROL;
+ break;
+ }
+
+ t4_write_reg(adap, mps_trc_rss_control,
+ V_RSSCONTROL(chan) | V_QUEUENUMBER(qid));
+}
+
+/**
* t4_pmtx_get_stats - returns the HW stats from PMTX
* @adap: the adapter
* @cnt: where to store the count statistics
@@ -6696,6 +7550,8 @@ void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
else {
t4_read_indirect(adap, A_PM_TX_DBG_CTRL,
A_PM_TX_DBG_DATA, data, 2,
+ chip_id(adap) >= CHELSIO_T7 ?
+ A_T7_PM_TX_DBG_STAT_MSB :
A_PM_TX_DBG_STAT_MSB);
cycles[i] = (((u64)data[0] << 32) | data[1]);
}
@@ -6730,6 +7586,25 @@ void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
}
/**
+ * t4_pmrx_cache_get_stats - returns the HW PMRX cache stats
+ * @adap: the adapter
+ * @stats: where to store the statistics
+ *
+ * Returns performance statistics of PMRX cache.
+ */
+void t4_pmrx_cache_get_stats(struct adapter *adap, u32 stats[])
+{
+ u8 i, j;
+
+ for (i = 0, j = 0; i < T7_PM_RX_CACHE_NSTATS / 3; i++, j += 3) {
+ t4_write_reg(adap, A_PM_RX_STAT_CONFIG, 0x100 + i);
+ stats[j] = t4_read_reg(adap, A_PM_RX_STAT_COUNT);
+ t4_read_indirect(adap, A_PM_RX_DBG_CTRL, A_PM_RX_DBG_DATA,
+ &stats[j + 1], 2, A_PM_RX_DBG_STAT_MSB);
+ }
+}
+
+/**
* t4_get_mps_bg_map - return the buffer groups associated with a port
* @adap: the adapter
* @idx: the port index
@@ -6762,11 +7637,24 @@ static unsigned int t4_get_rx_e_chan_map(struct adapter *adap, int idx)
const u32 n = adap->params.nports;
const u32 all_chan = (1 << adap->chip_params->nchan) - 1;
- if (n == 1)
- return idx == 0 ? all_chan : 0;
- if (n == 2 && chip_id(adap) <= CHELSIO_T5)
- return idx < 2 ? (3 << (2 * idx)) : 0;
- return 1 << idx;
+ switch (adap->params.tp.lb_mode) {
+ case 0:
+ if (n == 1)
+ return (all_chan);
+ if (n == 2 && chip_id(adap) <= CHELSIO_T5)
+ return (3 << (2 * idx));
+ return (1 << idx);
+ case 1:
+ MPASS(n == 1);
+ return (all_chan);
+ case 2:
+ MPASS(n <= 2);
+ return (3 << (2 * idx));
+ default:
+ CH_ERR(adap, "Unsupported LB mode %d\n",
+ adap->params.tp.lb_mode);
+ return (0);
+ }
}
/*
@@ -6784,6 +7672,8 @@ static unsigned int t4_get_rx_c_chan(struct adapter *adap, int idx)
*/
static unsigned int t4_get_tx_c_chan(struct adapter *adap, int idx)
{
+ if (adap->params.tx_tp_ch_map != UINT32_MAX)
+ return (adap->params.tx_tp_ch_map >> (8 * idx)) & 0xff;
return idx;
}
@@ -6856,79 +7746,89 @@ void t4_get_port_stats_offset(struct adapter *adap, int idx,
*/
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
{
- struct port_info *pi = adap->port[idx];
- u32 bgmap = pi->mps_bg_map;
- u32 stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
+ struct port_info *pi;
+ int port_id, tx_chan;
+ u32 bgmap, stat_ctl;
+
+ port_id = adap->port_map[idx];
+ MPASS(port_id >= 0 && port_id <= adap->params.nports);
+ pi = adap->port[port_id];
#define GET_STAT(name) \
t4_read_reg64(adap, \
- t4_port_reg(adap, pi->tx_chan, A_MPS_PORT_STAT_##name##_L));
-#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+ t4_port_reg(adap, tx_chan, A_MPS_PORT_STAT_##name##_L));
+ memset(p, 0, sizeof(*p));
+ for (tx_chan = pi->tx_chan;
+ tx_chan < pi->tx_chan + adap->params.tp.lb_nchan; tx_chan++) {
+ p->tx_pause += GET_STAT(TX_PORT_PAUSE);
+ p->tx_octets += GET_STAT(TX_PORT_BYTES);
+ p->tx_frames += GET_STAT(TX_PORT_FRAMES);
+ p->tx_bcast_frames += GET_STAT(TX_PORT_BCAST);
+ p->tx_mcast_frames += GET_STAT(TX_PORT_MCAST);
+ p->tx_ucast_frames += GET_STAT(TX_PORT_UCAST);
+ p->tx_error_frames += GET_STAT(TX_PORT_ERROR);
+ p->tx_frames_64 += GET_STAT(TX_PORT_64B);
+ p->tx_frames_65_127 += GET_STAT(TX_PORT_65B_127B);
+ p->tx_frames_128_255 += GET_STAT(TX_PORT_128B_255B);
+ p->tx_frames_256_511 += GET_STAT(TX_PORT_256B_511B);
+ p->tx_frames_512_1023 += GET_STAT(TX_PORT_512B_1023B);
+ p->tx_frames_1024_1518 += GET_STAT(TX_PORT_1024B_1518B);
+ p->tx_frames_1519_max += GET_STAT(TX_PORT_1519B_MAX);
+ p->tx_drop += GET_STAT(TX_PORT_DROP);
+ p->tx_ppp0 += GET_STAT(TX_PORT_PPP0);
+ p->tx_ppp1 += GET_STAT(TX_PORT_PPP1);
+ p->tx_ppp2 += GET_STAT(TX_PORT_PPP2);
+ p->tx_ppp3 += GET_STAT(TX_PORT_PPP3);
+ p->tx_ppp4 += GET_STAT(TX_PORT_PPP4);
+ p->tx_ppp5 += GET_STAT(TX_PORT_PPP5);
+ p->tx_ppp6 += GET_STAT(TX_PORT_PPP6);
+ p->tx_ppp7 += GET_STAT(TX_PORT_PPP7);
+
+ p->rx_pause += GET_STAT(RX_PORT_PAUSE);
+ p->rx_octets += GET_STAT(RX_PORT_BYTES);
+ p->rx_frames += GET_STAT(RX_PORT_FRAMES);
+ p->rx_bcast_frames += GET_STAT(RX_PORT_BCAST);
+ p->rx_mcast_frames += GET_STAT(RX_PORT_MCAST);
+ p->rx_ucast_frames += GET_STAT(RX_PORT_UCAST);
+ p->rx_too_long += GET_STAT(RX_PORT_MTU_ERROR);
+ p->rx_jabber += GET_STAT(RX_PORT_MTU_CRC_ERROR);
+ p->rx_len_err += GET_STAT(RX_PORT_LEN_ERROR);
+ p->rx_symbol_err += GET_STAT(RX_PORT_SYM_ERROR);
+ p->rx_runt += GET_STAT(RX_PORT_LESS_64B);
+ p->rx_frames_64 += GET_STAT(RX_PORT_64B);
+ p->rx_frames_65_127 += GET_STAT(RX_PORT_65B_127B);
+ p->rx_frames_128_255 += GET_STAT(RX_PORT_128B_255B);
+ p->rx_frames_256_511 += GET_STAT(RX_PORT_256B_511B);
+ p->rx_frames_512_1023 += GET_STAT(RX_PORT_512B_1023B);
+ p->rx_frames_1024_1518 += GET_STAT(RX_PORT_1024B_1518B);
+ p->rx_frames_1519_max += GET_STAT(RX_PORT_1519B_MAX);
+ p->rx_ppp0 += GET_STAT(RX_PORT_PPP0);
+ p->rx_ppp1 += GET_STAT(RX_PORT_PPP1);
+ p->rx_ppp2 += GET_STAT(RX_PORT_PPP2);
+ p->rx_ppp3 += GET_STAT(RX_PORT_PPP3);
+ p->rx_ppp4 += GET_STAT(RX_PORT_PPP4);
+ p->rx_ppp5 += GET_STAT(RX_PORT_PPP5);
+ p->rx_ppp6 += GET_STAT(RX_PORT_PPP6);
+ p->rx_ppp7 += GET_STAT(RX_PORT_PPP7);
+ if (!is_t6(adap)) {
+ MPASS(pi->fcs_reg == A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
+ p->rx_fcs_err += GET_STAT(RX_PORT_CRC_ERROR);
+ }
+ }
+#undef GET_STAT
- p->tx_pause = GET_STAT(TX_PORT_PAUSE);
- p->tx_octets = GET_STAT(TX_PORT_BYTES);
- p->tx_frames = GET_STAT(TX_PORT_FRAMES);
- p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
- p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
- p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
- p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
- p->tx_frames_64 = GET_STAT(TX_PORT_64B);
- p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
- p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
- p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
- p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
- p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
- p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
- p->tx_drop = GET_STAT(TX_PORT_DROP);
- p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
- p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
- p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
- p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
- p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
- p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
- p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
- p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+ if (is_t6(adap) && pi->fcs_reg != -1)
+ p->rx_fcs_err = t4_read_reg64(adap,
+ t4_port_reg(adap, pi->tx_chan, pi->fcs_reg)) - pi->fcs_base;
if (chip_id(adap) >= CHELSIO_T5) {
+ stat_ctl = t4_read_reg(adap, A_MPS_STAT_CTL);
if (stat_ctl & F_COUNTPAUSESTATTX) {
p->tx_frames -= p->tx_pause;
p->tx_octets -= p->tx_pause * 64;
}
if (stat_ctl & F_COUNTPAUSEMCTX)
p->tx_mcast_frames -= p->tx_pause;
- }
-
- p->rx_pause = GET_STAT(RX_PORT_PAUSE);
- p->rx_octets = GET_STAT(RX_PORT_BYTES);
- p->rx_frames = GET_STAT(RX_PORT_FRAMES);
- p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
- p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
- p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
- p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
- p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
- p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
- p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
- p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
- p->rx_frames_64 = GET_STAT(RX_PORT_64B);
- p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
- p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
- p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
- p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
- p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
- p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
- p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
- p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
- p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
- p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
- p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
- p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
- p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
- p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
-
- if (pi->fcs_reg != -1)
- p->rx_fcs_err = t4_read_reg64(adap, pi->fcs_reg) - pi->fcs_base;
-
- if (chip_id(adap) >= CHELSIO_T5) {
if (stat_ctl & F_COUNTPAUSESTATRX) {
p->rx_frames -= p->rx_pause;
p->rx_octets -= p->rx_pause * 64;
@@ -6937,6 +7837,8 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->rx_mcast_frames -= p->rx_pause;
}
+#define GET_STAT_COM(name) t4_read_reg64(adap, A_MPS_STAT_##name##_L)
+ bgmap = pi->mps_bg_map;
p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
@@ -6945,8 +7847,6 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
-
-#undef GET_STAT
#undef GET_STAT_COM
}
@@ -7016,10 +7916,14 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
mag_id_reg_l = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_LO);
mag_id_reg_h = PORT_REG(port, A_XGMAC_PORT_MAGIC_MACID_HI);
port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
- } else {
+ } else if (chip_id(adap) < CHELSIO_T7) {
mag_id_reg_l = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_LO);
mag_id_reg_h = T5_PORT_REG(port, A_MAC_PORT_MAGIC_MACID_HI);
port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
+ } else {
+ mag_id_reg_l = T7_PORT_REG(port, A_T7_MAC_PORT_MAGIC_MACID_LO);
+ mag_id_reg_h = T7_PORT_REG(port, A_T7_MAC_PORT_MAGIC_MACID_HI);
+ port_cfg_reg = T7_PORT_REG(port, A_MAC_PORT_CFG2);
}
if (addr) {
@@ -7056,8 +7960,10 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
if (is_t4(adap))
port_cfg_reg = PORT_REG(port, A_XGMAC_PORT_CFG2);
- else
+ else if (chip_id(adap) < CHELSIO_T7)
port_cfg_reg = T5_PORT_REG(port, A_MAC_PORT_CFG2);
+ else
+ port_cfg_reg = T7_PORT_REG(port, A_MAC_PORT_CFG2);
if (!enable) {
t4_set_reg_field(adap, port_cfg_reg, F_PATEN, 0);
@@ -7348,6 +8254,7 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
break;
case CHELSIO_T6:
+ case CHELSIO_T7:
sge_idma_decode = (const char * const *)t6_decode;
sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
break;
@@ -8964,7 +9871,7 @@ static void handle_port_info(struct port_info *pi, const struct fw_port_cmd *p,
memset(&cmd, 0, sizeof(cmd));
cmd.op_to_portid = cpu_to_be32(V_FW_CMD_OP(FW_PORT_CMD) |
F_FW_CMD_REQUEST | F_FW_CMD_READ |
- V_FW_PORT_CMD_PORTID(pi->tx_chan));
+ V_FW_PORT_CMD_PORTID(pi->hw_port));
action = sc->params.port_caps32 ? FW_PORT_ACTION_GET_PORT_INFO32 :
FW_PORT_ACTION_GET_PORT_INFO;
cmd.action_to_len16 = cpu_to_be32(V_FW_PORT_CMD_ACTION(action) |
@@ -8996,16 +9903,12 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
(action == FW_PORT_ACTION_GET_PORT_INFO ||
action == FW_PORT_ACTION_GET_PORT_INFO32)) {
/* link/module state change message */
- int i;
- int chan = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
- struct port_info *pi = NULL;
-
- for_each_port(adap, i) {
- pi = adap2pinfo(adap, i);
- if (pi->tx_chan == chan)
- break;
- }
+ int hw_port = G_FW_PORT_CMD_PORTID(be32_to_cpu(p->op_to_portid));
+ int port_id = adap->port_map[hw_port];
+ struct port_info *pi;
+ MPASS(port_id >= 0 && port_id < adap->params.nports);
+ pi = adap->port[port_id];
PORT_LOCK(pi);
handle_port_info(pi, p, action, &mod_changed, &link_changed);
PORT_UNLOCK(pi);
@@ -9159,14 +10062,15 @@ int t4_get_flash_params(struct adapter *adapter)
}
/* If we didn't recognize the FLASH part, that's no real issue: the
- * Hardware/Software contract says that Hardware will _*ALWAYS*_
- * use a FLASH part which is at least 4MB in size and has 64KB
- * sectors. The unrecognized FLASH part is likely to be much larger
- * than 4MB, but that's all we really need.
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_ use a
+ * FLASH part which has 64KB sectors and is at least 4MB or 16MB in
+ * size, depending on the board.
*/
if (size == 0) {
- CH_WARN(adapter, "Unknown Flash Part, ID = %#x, assuming 4MB\n", flashid);
- size = 1 << 22;
+ size = chip_id(adapter) >= CHELSIO_T7 ? 16 : 4;
+ CH_WARN(adapter, "Unknown Flash Part %#x, assuming %uMB\n",
+ flashid, size);
+ size <<= 20;
}
/*
@@ -9212,11 +10116,14 @@ const struct chip_params *t4_get_chip_params(int chipid)
.pm_stats_cnt = PM_NSTATS,
.cng_ch_bits_log = 2,
.nsched_cls = 15,
+ .cim_num_ibq = CIM_NUM_IBQ,
.cim_num_obq = CIM_NUM_OBQ,
.filter_opt_len = FILTER_OPT_LEN,
+ .filter_num_opt = S_FT_LAST + 1,
.mps_rplc_size = 128,
.vfcount = 128,
.sge_fl_db = F_DBPRIO,
+ .sge_ctxt_size = SGE_CTXT_SIZE,
.mps_tcam_size = NUM_MPS_CLS_SRAM_L_INSTANCES,
.rss_nentries = RSS_NENTRIES,
.cim_la_size = CIMLA_SIZE,
@@ -9227,11 +10134,14 @@ const struct chip_params *t4_get_chip_params(int chipid)
.pm_stats_cnt = PM_NSTATS,
.cng_ch_bits_log = 2,
.nsched_cls = 16,
+ .cim_num_ibq = CIM_NUM_IBQ,
.cim_num_obq = CIM_NUM_OBQ_T5,
.filter_opt_len = T5_FILTER_OPT_LEN,
+ .filter_num_opt = S_FT_LAST + 1,
.mps_rplc_size = 128,
.vfcount = 128,
.sge_fl_db = F_DBPRIO | F_DBTYPE,
+ .sge_ctxt_size = SGE_CTXT_SIZE,
.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
.rss_nentries = RSS_NENTRIES,
.cim_la_size = CIMLA_SIZE,
@@ -9242,15 +10152,36 @@ const struct chip_params *t4_get_chip_params(int chipid)
.pm_stats_cnt = T6_PM_NSTATS,
.cng_ch_bits_log = 3,
.nsched_cls = 16,
+ .cim_num_ibq = CIM_NUM_IBQ,
.cim_num_obq = CIM_NUM_OBQ_T5,
.filter_opt_len = T5_FILTER_OPT_LEN,
+ .filter_num_opt = S_FT_LAST + 1,
.mps_rplc_size = 256,
.vfcount = 256,
.sge_fl_db = 0,
+ .sge_ctxt_size = SGE_CTXT_SIZE,
.mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
.rss_nentries = T6_RSS_NENTRIES,
.cim_la_size = CIMLA_SIZE_T6,
},
+ {
+ /* T7 */
+ .nchan = NCHAN,
+ .pm_stats_cnt = T6_PM_NSTATS,
+ .cng_ch_bits_log = 2,
+ .nsched_cls = 16,
+ .cim_num_ibq = CIM_NUM_IBQ_T7,
+ .cim_num_obq = CIM_NUM_OBQ_T7,
+ .filter_opt_len = T7_FILTER_OPT_LEN,
+ .filter_num_opt = S_T7_FT_LAST + 1,
+ .mps_rplc_size = 256,
+ .vfcount = 256,
+ .sge_fl_db = 0,
+ .sge_ctxt_size = SGE_CTXT_SIZE_T7,
+ .mps_tcam_size = NUM_MPS_T5_CLS_SRAM_L_INSTANCES,
+ .rss_nentries = T7_RSS_NENTRIES,
+ .cim_la_size = CIMLA_SIZE_T6,
+ },
};
chipid -= CHELSIO_T4;
@@ -9466,14 +10397,11 @@ int t4_bar2_sge_qregs(struct adapter *adapter,
}
/**
- * t4_init_devlog_params - initialize adapter->params.devlog
+ * t4_init_devlog_ncores_params - initialize adap->params.devlog and ncores
* @adap: the adapter
* @fw_attach: whether we can talk to the firmware
- *
- * Initialize various fields of the adapter's Firmware Device Log
- * Parameters structure.
*/
-int t4_init_devlog_params(struct adapter *adap, int fw_attach)
+int t4_init_devlog_ncores_params(struct adapter *adap, int fw_attach)
{
struct devlog_params *dparams = &adap->params.devlog;
u32 pf_dparams;
@@ -9487,12 +10415,15 @@ int t4_init_devlog_params(struct adapter *adap, int fw_attach)
*/
pf_dparams =
t4_read_reg(adap, PCIE_FW_REG(A_PCIE_FW_PF, PCIE_FW_PF_DEVLOG));
- if (pf_dparams) {
- unsigned int nentries, nentries128;
+ if (pf_dparams && pf_dparams != UINT32_MAX) {
+ unsigned int nentries, nentries128, ncore_shift;
+
+ ncore_shift = (G_PCIE_FW_PF_DEVLOG_COUNT_MSB(pf_dparams) << 1) |
+ G_PCIE_FW_PF_DEVLOG_COUNT_LSB(pf_dparams);
+ adap->params.ncores = 1 << ncore_shift;
dparams->memtype = G_PCIE_FW_PF_DEVLOG_MEMTYPE(pf_dparams);
dparams->start = G_PCIE_FW_PF_DEVLOG_ADDR16(pf_dparams) << 4;
-
nentries128 = G_PCIE_FW_PF_DEVLOG_NENTRIES128(pf_dparams);
nentries = (nentries128 + 1) * 128;
dparams->size = nentries * sizeof(struct fw_devlog_e);
@@ -9503,6 +10434,7 @@ int t4_init_devlog_params(struct adapter *adap, int fw_attach)
/*
* For any failing returns ...
*/
+ adap->params.ncores = 1;
memset(dparams, 0, sizeof *dparams);
/*
@@ -9624,21 +10556,28 @@ int t4_init_sge_params(struct adapter *adapter)
/* Convert the LE's hardware hash mask to a shorter filter mask. */
static inline uint16_t
-hashmask_to_filtermask(uint64_t hashmask, uint16_t filter_mode)
+hashmask_to_filtermask(struct adapter *adap, uint64_t hashmask, uint16_t filter_mode)
{
- static const uint8_t width[] = {1, 3, 17, 17, 8, 8, 16, 9, 3, 1};
- int i;
+ int first, last, i;
uint16_t filter_mask;
- uint64_t mask; /* field mask */
+ uint64_t mask; /* field mask */
+
+
+ if (chip_id(adap) >= CHELSIO_T7) {
+ first = S_T7_FT_FIRST;
+ last = S_T7_FT_LAST;
+ } else {
+ first = S_FT_FIRST;
+ last = S_FT_LAST;
+ }
- filter_mask = 0;
- for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
+ for (filter_mask = 0, i = first; i <= last; i++) {
if ((filter_mode & (1 << i)) == 0)
continue;
- mask = (1 << width[i]) - 1;
+ mask = (1 << t4_filter_field_width(adap, i)) - 1;
if ((hashmask & mask) == mask)
filter_mask |= 1 << i;
- hashmask >>= width[i];
+ hashmask >>= t4_filter_field_width(adap, i);
}
return (filter_mask);
@@ -9681,7 +10620,15 @@ read_filter_mode_and_ingress_config(struct adapter *adap)
v = t4_read_reg(adap, LE_HASH_MASK_GEN_IPV4T5(4));
hash_mask |= (u64)v << 32;
}
- tpp->filter_mask = hashmask_to_filtermask(hash_mask,
+ if (chip_id(adap) >= CHELSIO_T7) {
+ /*
+ * This param came before T7 so T7+ firmwares should
+ * always support this query.
+ */
+ CH_WARN(adap, "query for filter mode/mask failed: %d\n",
+ rc);
+ }
+ tpp->filter_mask = hashmask_to_filtermask(adap, hash_mask,
tpp->filter_mode);
t4_tp_pio_read(adap, &v, 1, A_TP_INGRESS_CONFIG, true);
@@ -9696,16 +10643,37 @@ read_filter_mode_and_ingress_config(struct adapter *adap)
* shift positions of several elements of the Compressed Filter Tuple
* for this adapter which we need frequently ...
*/
- tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
- tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
- tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
- tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
- tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
- tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
- tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
- tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
- tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
- tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
+ if (chip_id(adap) >= CHELSIO_T7) {
+ tpp->ipsecidx_shift = t4_filter_field_shift(adap, F_IPSECIDX);
+ tpp->fcoe_shift = t4_filter_field_shift(adap, F_T7_FCOE);
+ tpp->port_shift = t4_filter_field_shift(adap, F_T7_PORT);
+ tpp->vnic_shift = t4_filter_field_shift(adap, F_T7_VNIC_ID);
+ tpp->vlan_shift = t4_filter_field_shift(adap, F_T7_VLAN);
+ tpp->tos_shift = t4_filter_field_shift(adap, F_T7_TOS);
+ tpp->protocol_shift = t4_filter_field_shift(adap, F_T7_PROTOCOL);
+ tpp->ethertype_shift = t4_filter_field_shift(adap, F_T7_ETHERTYPE);
+ tpp->macmatch_shift = t4_filter_field_shift(adap, F_T7_MACMATCH);
+ tpp->matchtype_shift = t4_filter_field_shift(adap, F_T7_MPSHITTYPE);
+ tpp->frag_shift = t4_filter_field_shift(adap, F_T7_FRAGMENTATION);
+ tpp->roce_shift = t4_filter_field_shift(adap, F_ROCE);
+ tpp->synonly_shift = t4_filter_field_shift(adap, F_SYNONLY);
+ tpp->tcpflags_shift = t4_filter_field_shift(adap, F_TCPFLAGS);
+ } else {
+ tpp->ipsecidx_shift = -1;
+ tpp->fcoe_shift = t4_filter_field_shift(adap, F_FCOE);
+ tpp->port_shift = t4_filter_field_shift(adap, F_PORT);
+ tpp->vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+ tpp->vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+ tpp->tos_shift = t4_filter_field_shift(adap, F_TOS);
+ tpp->protocol_shift = t4_filter_field_shift(adap, F_PROTOCOL);
+ tpp->ethertype_shift = t4_filter_field_shift(adap, F_ETHERTYPE);
+ tpp->macmatch_shift = t4_filter_field_shift(adap, F_MACMATCH);
+ tpp->matchtype_shift = t4_filter_field_shift(adap, F_MPSHITTYPE);
+ tpp->frag_shift = t4_filter_field_shift(adap, F_FRAGMENTATION);
+ tpp->roce_shift = -1;
+ tpp->synonly_shift = -1;
+ tpp->tcpflags_shift = -1;
+ }
}
/**
@@ -9725,11 +10693,21 @@ int t4_init_tp_params(struct adapter *adap)
read_filter_mode_and_ingress_config(adap);
+ tpp->rx_pkt_encap = false;
+ tpp->lb_mode = 0;
+ tpp->lb_nchan = 1;
if (chip_id(adap) > CHELSIO_T5) {
v = t4_read_reg(adap, A_TP_OUT_CONFIG);
tpp->rx_pkt_encap = v & F_CRXPKTENC;
- } else
- tpp->rx_pkt_encap = false;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ t4_tp_pio_read(adap, &v, 1, A_TP_CHANNEL_MAP, true);
+ tpp->lb_mode = G_T7_LB_MODE(v);
+ if (tpp->lb_mode == 1)
+ tpp->lb_nchan = 4;
+ else if (tpp->lb_mode == 2)
+ tpp->lb_nchan = 2;
+ }
+ }
rx_len = t4_read_reg(adap, A_TP_PMM_RX_PAGE_SIZE);
tx_len = t4_read_reg(adap, A_TP_PMM_TX_PAGE_SIZE);
@@ -9750,6 +10728,53 @@ int t4_init_tp_params(struct adapter *adap)
}
/**
+ * t4_filter_field_width - returns the width of a filter field
+ * @adap: the adapter
+ * @filter_field: the filter field whose width is being requested
+ *
+ * Return the shift position of a filter field within the Compressed
+ * Filter Tuple. The filter field is specified via its selection bit
+ * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
+ */
+int t4_filter_field_width(const struct adapter *adap, int filter_field)
+{
+ const int nopt = adap->chip_params->filter_num_opt;
+ static const uint8_t width_t7[] = {
+ W_FT_IPSECIDX,
+ W_FT_FCOE,
+ W_FT_PORT,
+ W_FT_VNIC_ID,
+ W_FT_VLAN,
+ W_FT_TOS,
+ W_FT_PROTOCOL,
+ W_FT_ETHERTYPE,
+ W_FT_MACMATCH,
+ W_FT_MPSHITTYPE,
+ W_FT_FRAGMENTATION,
+ W_FT_ROCE,
+ W_FT_SYNONLY,
+ W_FT_TCPFLAGS
+ };
+ static const uint8_t width_t4[] = {
+ W_FT_FCOE,
+ W_FT_PORT,
+ W_FT_VNIC_ID,
+ W_FT_VLAN,
+ W_FT_TOS,
+ W_FT_PROTOCOL,
+ W_FT_ETHERTYPE,
+ W_FT_MACMATCH,
+ W_FT_MPSHITTYPE,
+ W_FT_FRAGMENTATION
+ };
+ const uint8_t *width = chip_id(adap) >= CHELSIO_T7 ? width_t7 : width_t4;
+
+ if (filter_field < 0 || filter_field >= nopt)
+ return (0);
+ return (width[filter_field]);
+}
+
+/**
* t4_filter_field_shift - calculate filter field shift
* @adap: the adapter
* @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
@@ -9767,6 +10792,56 @@ int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
if ((filter_mode & filter_sel) == 0)
return -1;
+ if (chip_id(adap) >= CHELSIO_T7) {
+ for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+ switch (filter_mode & sel) {
+ case F_IPSECIDX:
+ field_shift += W_FT_IPSECIDX;
+ break;
+ case F_T7_FCOE:
+ field_shift += W_FT_FCOE;
+ break;
+ case F_T7_PORT:
+ field_shift += W_FT_PORT;
+ break;
+ case F_T7_VNIC_ID:
+ field_shift += W_FT_VNIC_ID;
+ break;
+ case F_T7_VLAN:
+ field_shift += W_FT_VLAN;
+ break;
+ case F_T7_TOS:
+ field_shift += W_FT_TOS;
+ break;
+ case F_T7_PROTOCOL:
+ field_shift += W_FT_PROTOCOL;
+ break;
+ case F_T7_ETHERTYPE:
+ field_shift += W_FT_ETHERTYPE;
+ break;
+ case F_T7_MACMATCH:
+ field_shift += W_FT_MACMATCH;
+ break;
+ case F_T7_MPSHITTYPE:
+ field_shift += W_FT_MPSHITTYPE;
+ break;
+ case F_T7_FRAGMENTATION:
+ field_shift += W_FT_FRAGMENTATION;
+ break;
+ case F_ROCE:
+ field_shift += W_FT_ROCE;
+ break;
+ case F_SYNONLY:
+ field_shift += W_FT_SYNONLY;
+ break;
+ case F_TCPFLAGS:
+ field_shift += W_FT_TCPFLAGS;
+ break;
+ }
+ }
+ return field_shift;
+ }
+
for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
switch (filter_mode & sel) {
case F_FCOE:
@@ -9818,11 +10893,11 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
} while ((adap->params.portvec & (1 << j)) == 0);
}
+ p->hw_port = j;
p->tx_chan = t4_get_tx_c_chan(adap, j);
p->rx_chan = t4_get_rx_c_chan(adap, j);
p->mps_bg_map = t4_get_mps_bg_map(adap, j);
p->rx_e_chan_map = t4_get_rx_e_chan_map(adap, j);
- p->lport = j;
if (!(adap->flags & IS_VF) ||
adap->params.vfres.r_caps & FW_CMD_CAP_PORT) {
@@ -9851,232 +10926,321 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf, int port_id)
return 0;
}
+static void t4_read_cimq_cfg_ibq_core(struct adapter *adap, u8 coreid, u32 qid,
+ u16 *base, u16 *size, u16 *thres)
+{
+ unsigned int v, m;
+
+ if (chip_id(adap) > CHELSIO_T6) {
+ v = F_T7_IBQSELECT | V_T7_QUENUMSELECT(qid) |
+ V_CORESELECT(coreid);
+ /* value is in 512-byte units */
+ m = 512;
+ } else {
+ v = F_IBQSELECT | V_QUENUMSELECT(qid);
+ /* value is in 256-byte units */
+ m = 256;
+ }
+
+ t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, v);
+ v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+ if (base)
+ *base = G_CIMQBASE(v) * m;
+ if (size)
+ *size = G_CIMQSIZE(v) * m;
+ if (thres)
+ *thres = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
+}
+
+static void t4_read_cimq_cfg_obq_core(struct adapter *adap, u8 coreid, u32 qid,
+ u16 *base, u16 *size)
+{
+ unsigned int v, m;
+
+ if (chip_id(adap) > CHELSIO_T6) {
+ v = F_T7_OBQSELECT | V_T7_QUENUMSELECT(qid) |
+ V_CORESELECT(coreid);
+ /* value is in 512-byte units */
+ m = 512;
+ } else {
+ v = F_OBQSELECT | V_QUENUMSELECT(qid);
+ /* value is in 256-byte units */
+ m = 256;
+ }
+
+ t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, v);
+ v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
+ if (base)
+ *base = G_CIMQBASE(v) * m;
+ if (size)
+ *size = G_CIMQSIZE(v) * m;
+}
+
/**
- * t4_read_cimq_cfg - read CIM queue configuration
+ * t4_read_cimq_cfg_core - read CIM queue configuration on specific core
* @adap: the adapter
+ * @coreid: the uP coreid
* @base: holds the queue base addresses in bytes
* @size: holds the queue sizes in bytes
* @thres: holds the queue full thresholds in bytes
*
* Returns the current configuration of the CIM queues, starting with
- * the IBQs, then the OBQs.
+ * the IBQs, then the OBQs, on a specific @coreid.
*/
-void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
+void t4_read_cimq_cfg_core(struct adapter *adap, u8 coreid, u16 *base,
+ u16 *size, u16 *thres)
{
- unsigned int i, v;
- int cim_num_obq = adap->chip_params->cim_num_obq;
+ unsigned int cim_num_ibq = adap->chip_params->cim_num_ibq;
+ unsigned int cim_num_obq = adap->chip_params->cim_num_obq;
+ unsigned int i;
- for (i = 0; i < CIM_NUM_IBQ; i++) {
- t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_IBQSELECT |
- V_QUENUMSELECT(i));
- v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
- /* value is in 256-byte units */
- *base++ = G_CIMQBASE(v) * 256;
- *size++ = G_CIMQSIZE(v) * 256;
- *thres++ = G_QUEFULLTHRSH(v) * 8; /* 8-byte unit */
- }
- for (i = 0; i < cim_num_obq; i++) {
- t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
- V_QUENUMSELECT(i));
- v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
- /* value is in 256-byte units */
- *base++ = G_CIMQBASE(v) * 256;
- *size++ = G_CIMQSIZE(v) * 256;
- }
+ for (i = 0; i < cim_num_ibq; i++, base++, size++, thres++)
+ t4_read_cimq_cfg_ibq_core(adap, coreid, i, base, size, thres);
+
+ for (i = 0; i < cim_num_obq; i++, base++, size++)
+ t4_read_cimq_cfg_obq_core(adap, coreid, i, base, size);
+}
+
+static int t4_read_cim_ibq_data_core(struct adapter *adap, u8 coreid, u32 addr,
+ u32 *data)
+{
+ int ret, attempts;
+ unsigned int v;
+
+ /* It might take 3-10ms before the IBQ debug read access is allowed.
+ * Wait for 1 Sec with a delay of 1 usec.
+ */
+ attempts = 1000000;
+
+ if (chip_id(adap) > CHELSIO_T6)
+ v = V_T7_IBQDBGADDR(addr) | V_IBQDBGCORE(coreid);
+ else
+ v = V_IBQDBGADDR(addr);
+
+ t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, v | F_IBQDBGEN);
+ ret = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
+ attempts, 1);
+ if (ret)
+ return ret;
+
+ *data = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
+ return 0;
}
/**
- * t4_read_cim_ibq - read the contents of a CIM inbound queue
+ * t4_read_cim_ibq_core - read the contents of a CIM inbound queue on
+ * specific core
* @adap: the adapter
+ * @coreid: the uP coreid
* @qid: the queue index
* @data: where to store the queue contents
* @n: capacity of @data in 32-bit words
*
* Reads the contents of the selected CIM queue starting at address 0 up
- * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
- * error and the number of 32-bit words actually read on success.
+ * to the capacity of @data on a specific @coreid. @n must be a multiple
+ * of 4. Returns < 0 on error and the number of 32-bit words actually
+ * read on success.
*/
-int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+int t4_read_cim_ibq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n)
{
- int i, err, attempts;
- unsigned int addr;
- const unsigned int nwords = CIM_IBQ_SIZE * 4;
+ unsigned int cim_num_ibq = adap->chip_params->cim_num_ibq;
+ u16 i, addr, nwords;
+ int ret;
- if (qid > 5 || (n & 3))
+ if (qid > (cim_num_ibq - 1) || (n & 3))
return -EINVAL;
- addr = qid * nwords;
+ t4_read_cimq_cfg_ibq_core(adap, coreid, qid, &addr, &nwords, NULL);
+ addr >>= sizeof(u16);
+ nwords >>= sizeof(u16);
if (n > nwords)
n = nwords;
- /* It might take 3-10ms before the IBQ debug read access is allowed.
- * Wait for 1 Sec with a delay of 1 usec.
- */
- attempts = 1000000;
-
- for (i = 0; i < n; i++, addr++) {
- t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, V_IBQDBGADDR(addr) |
- F_IBQDBGEN);
- err = t4_wait_op_done(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGBUSY, 0,
- attempts, 1);
- if (err)
- return err;
- *data++ = t4_read_reg(adap, A_CIM_IBQ_DBG_DATA);
+ for (i = 0; i < n; i++, addr++, data++) {
+ ret = t4_read_cim_ibq_data_core(adap, coreid, addr, data);
+ if (ret < 0)
+ return ret;
}
+
t4_write_reg(adap, A_CIM_IBQ_DBG_CFG, 0);
return i;
}
+static int t4_read_cim_obq_data_core(struct adapter *adap, u8 coreid, u32 addr,
+ u32 *data)
+{
+ unsigned int v;
+ int ret;
+
+ if (chip_id(adap) > CHELSIO_T6)
+ v = V_T7_OBQDBGADDR(addr) | V_OBQDBGCORE(coreid);
+ else
+ v = V_OBQDBGADDR(addr);
+
+ t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, v | F_OBQDBGEN);
+ ret = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0, 2, 1);
+ if (ret)
+ return ret;
+
+ *data = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
+ return 0;
+}
+
/**
- * t4_read_cim_obq - read the contents of a CIM outbound queue
+ * t4_read_cim_obq_core - read the contents of a CIM outbound queue on
+ * specific core
* @adap: the adapter
+ * @coreid: the uP coreid
* @qid: the queue index
* @data: where to store the queue contents
* @n: capacity of @data in 32-bit words
*
* Reads the contents of the selected CIM queue starting at address 0 up
- * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
- * error and the number of 32-bit words actually read on success.
+ * to the capacity of @data on specific @coreid. @n must be a multiple
+ * of 4. Returns < 0 on error and the number of 32-bit words actually
+ * read on success.
*/
-int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
+int t4_read_cim_obq_core(struct adapter *adap, u8 coreid, u32 qid, u32 *data,
+ size_t n)
{
- int i, err;
- unsigned int addr, v, nwords;
- int cim_num_obq = adap->chip_params->cim_num_obq;
+ unsigned int cim_num_obq = adap->chip_params->cim_num_obq;
+ u16 i, addr, nwords;
+ int ret;
if ((qid > (cim_num_obq - 1)) || (n & 3))
return -EINVAL;
- t4_write_reg(adap, A_CIM_QUEUE_CONFIG_REF, F_OBQSELECT |
- V_QUENUMSELECT(qid));
- v = t4_read_reg(adap, A_CIM_QUEUE_CONFIG_CTRL);
-
- addr = G_CIMQBASE(v) * 64; /* muliple of 256 -> muliple of 4 */
- nwords = G_CIMQSIZE(v) * 64; /* same */
+ t4_read_cimq_cfg_obq_core(adap, coreid, qid, &addr, &nwords);
+ addr >>= sizeof(u16);
+ nwords >>= sizeof(u16);
if (n > nwords)
n = nwords;
- for (i = 0; i < n; i++, addr++) {
- t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, V_OBQDBGADDR(addr) |
- F_OBQDBGEN);
- err = t4_wait_op_done(adap, A_CIM_OBQ_DBG_CFG, F_OBQDBGBUSY, 0,
- 2, 1);
- if (err)
- return err;
- *data++ = t4_read_reg(adap, A_CIM_OBQ_DBG_DATA);
+ for (i = 0; i < n; i++, addr++, data++) {
+ ret = t4_read_cim_obq_data_core(adap, coreid, addr, data);
+ if (ret < 0)
+ return ret;
}
+
t4_write_reg(adap, A_CIM_OBQ_DBG_CFG, 0);
- return i;
+ return i;
}
-enum {
- CIM_QCTL_BASE = 0,
- CIM_CTL_BASE = 0x2000,
- CIM_PBT_ADDR_BASE = 0x2800,
- CIM_PBT_LRF_BASE = 0x3000,
- CIM_PBT_DATA_BASE = 0x3800
-};
-
/**
- * t4_cim_read - read a block from CIM internal address space
+ * t4_cim_read_core - read a block from CIM internal address space
+ * of a control register group on specific core.
* @adap: the adapter
+ * @group: the control register group to select for read
+ * @coreid: the uP coreid
* @addr: the start address within the CIM address space
* @n: number of words to read
* @valp: where to store the result
*
- * Reads a block of 4-byte words from the CIM intenal address space.
+ * Reads a block of 4-byte words from the CIM intenal address space
+ * of a control register @group on a specific @coreid.
*/
-int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp)
+int t4_cim_read_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ unsigned int *valp)
{
+ unsigned int hostbusy, v = 0;
int ret = 0;
- if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+ if (chip_id(adap) > CHELSIO_T6) {
+ hostbusy = F_T7_HOSTBUSY;
+ v = V_HOSTGRPSEL(group) | V_HOSTCORESEL(coreid);
+ } else {
+ hostbusy = F_HOSTBUSY;
+ }
+
+ if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & hostbusy)
return -EBUSY;
for ( ; !ret && n--; addr += 4) {
- t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr);
- ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+ t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | v);
+ ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, hostbusy,
0, 5, 2);
if (!ret)
*valp++ = t4_read_reg(adap, A_CIM_HOST_ACC_DATA);
}
+
return ret;
}
/**
- * t4_cim_write - write a block into CIM internal address space
+ * t4_cim_write_core - write a block into CIM internal address space
+ * of a control register group on specific core.
* @adap: the adapter
+ * @group: the control register group to select for write
+ * @coreid: the uP coreid
* @addr: the start address within the CIM address space
* @n: number of words to write
* @valp: set of values to write
*
- * Writes a block of 4-byte words into the CIM intenal address space.
+ * Writes a block of 4-byte words into the CIM intenal address space
+ * of a control register @group on a specific @coreid.
*/
-int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
- const unsigned int *valp)
+int t4_cim_write_core(struct adapter *adap, u8 group, u8 coreid,
+ unsigned int addr, unsigned int n,
+ const unsigned int *valp)
{
+ unsigned int hostbusy, v;
int ret = 0;
- if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
+ if (chip_id(adap) > CHELSIO_T6) {
+ hostbusy = F_T7_HOSTBUSY;
+ v = F_T7_HOSTWRITE | V_HOSTGRPSEL(group) |
+ V_HOSTCORESEL(coreid);
+ } else {
+ hostbusy = F_HOSTBUSY;
+ v = F_HOSTWRITE;
+ }
+
+ if (t4_read_reg(adap, A_CIM_HOST_ACC_CTRL) & hostbusy)
return -EBUSY;
for ( ; !ret && n--; addr += 4) {
t4_write_reg(adap, A_CIM_HOST_ACC_DATA, *valp++);
- t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | F_HOSTWRITE);
- ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
+ t4_write_reg(adap, A_CIM_HOST_ACC_CTRL, addr | v);
+ ret = t4_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, hostbusy,
0, 5, 2);
}
- return ret;
-}
-static int t4_cim_write1(struct adapter *adap, unsigned int addr,
- unsigned int val)
-{
- return t4_cim_write(adap, addr, 1, &val);
-}
-
-/**
- * t4_cim_ctl_read - read a block from CIM control region
- * @adap: the adapter
- * @addr: the start address within the CIM control region
- * @n: number of words to read
- * @valp: where to store the result
- *
- * Reads a block of 4-byte words from the CIM control region.
- */
-int t4_cim_ctl_read(struct adapter *adap, unsigned int addr, unsigned int n,
- unsigned int *valp)
-{
- return t4_cim_read(adap, addr + CIM_CTL_BASE, n, valp);
+ return ret;
}
/**
- * t4_cim_read_la - read CIM LA capture buffer
+ * t4_cim_read_la_core - read CIM LA capture buffer on specific core
* @adap: the adapter
+ * @coreid: uP coreid
* @la_buf: where to store the LA data
* @wrptr: the HW write pointer within the capture buffer
*
- * Reads the contents of the CIM LA buffer with the most recent entry at
- * the end of the returned data and with the entry at @wrptr first.
- * We try to leave the LA in the running state we find it in.
+ * Reads the contents of the CIM LA buffer on a specific @coreid
+ * with the most recent entry at the end of the returned data
+ * and with the entry at @wrptr first. We try to leave the LA
+ * in the running state we find it in.
*/
-int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
+int t4_cim_read_la_core(struct adapter *adap, u8 coreid, u32 *la_buf,
+ u32 *wrptr)
{
- int i, ret;
unsigned int cfg, val, idx;
+ int i, ret;
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &cfg);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, &cfg);
if (ret)
return ret;
if (cfg & F_UPDBGLAEN) { /* LA is running, freeze it */
- ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG, 0);
+ val = 0;
+ ret = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (ret)
return ret;
}
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1, &val);
if (ret)
goto restart;
@@ -10085,25 +11249,28 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
*wrptr = idx;
for (i = 0; i < adap->params.cim_la_size; i++) {
- ret = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
- V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN);
+ val = V_UPDBGLARDPTR(idx) | F_UPDBGLARDEN;
+ ret = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (ret)
break;
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_CFG, 1, &val);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (ret)
break;
if (val & F_UPDBGLARDEN) {
ret = -ETIMEDOUT;
break;
}
- ret = t4_cim_read(adap, A_UP_UP_DBG_LA_DATA, 1, &la_buf[i]);
+ ret = t4_cim_read_core(adap, 1, coreid, A_UP_UP_DBG_LA_DATA, 1,
+ &la_buf[i]);
if (ret)
break;
/* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
* identify the 32-bit portion of the full 312-bit data
*/
- if (is_t6(adap) && (idx & 0xf) >= 9)
+ if ((chip_id(adap) > CHELSIO_T5) && (idx & 0xf) >= 9)
idx = (idx & 0xff0) + 0x10;
else
idx++;
@@ -10112,11 +11279,15 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
}
restart:
if (cfg & F_UPDBGLAEN) {
- int r = t4_cim_write1(adap, A_UP_UP_DBG_LA_CFG,
- cfg & ~F_UPDBGLARDEN);
+ int r;
+
+ val = cfg & ~F_UPDBGLARDEN;
+ r = t4_cim_write_core(adap, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &val);
if (!ret)
ret = r;
}
+
return ret;
}
@@ -10403,25 +11574,20 @@ void t4_get_tx_sched(struct adapter *adap, unsigned int sched, unsigned int *kbp
int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
{
int ret, i, n, cfg_addr;
- unsigned int addr;
+ unsigned int addr, len;
unsigned int flash_cfg_start_sec;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
- cfg_addr = t4_flash_cfg_addr(adap);
+ cfg_addr = t4_flash_cfg_addr(adap, &len);
if (cfg_addr < 0)
return cfg_addr;
- addr = cfg_addr;
- flash_cfg_start_sec = addr / SF_SEC_SIZE;
-
- if (size > FLASH_CFG_MAX_SIZE) {
- CH_ERR(adap, "cfg file too large, max is %u bytes\n",
- FLASH_CFG_MAX_SIZE);
+ if (size > len) {
+ CH_ERR(adap, "cfg file too large, max is %u bytes\n", len);
return -EFBIG;
}
- i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
- sf_sec_size);
+ flash_cfg_start_sec = cfg_addr / SF_SEC_SIZE;
+ i = DIV_ROUND_UP(len, SF_SEC_SIZE);
ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
flash_cfg_start_sec + i - 1);
/*
@@ -10432,15 +11598,12 @@ int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
goto out;
/* this will write to the flash up to SF_PAGE_SIZE at a time */
- for (i = 0; i< size; i+= SF_PAGE_SIZE) {
- if ( (size - i) < SF_PAGE_SIZE)
- n = size - i;
- else
- n = SF_PAGE_SIZE;
+ addr = cfg_addr;
+ for (i = 0; i < size; i += SF_PAGE_SIZE) {
+ n = min(size - i, SF_PAGE_SIZE);
ret = t4_write_flash(adap, addr, n, cfg_data, 1);
if (ret)
goto out;
-
addr += SF_PAGE_SIZE;
cfg_data += SF_PAGE_SIZE;
}
@@ -10644,25 +11807,25 @@ int t4_load_boot(struct adapter *adap, u8 *boot_data,
pcir_data_t *pcir_header;
int ret, addr;
uint16_t device_id;
- unsigned int i;
- unsigned int boot_sector = (boot_addr * 1024 );
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
+ unsigned int i, start, len;
+ unsigned int boot_sector = boot_addr * 1024;
/*
- * Make sure the boot image does not encroach on the firmware region
+ * Make sure the boot image does not exceed its available space.
*/
- if ((boot_sector + size) >> 16 > FLASH_FW_START_SEC) {
- CH_ERR(adap, "boot image encroaching on firmware region\n");
+ len = 0;
+ start = t4_flash_loc_start(adap, FLASH_LOC_BOOT_AREA, &len);
+ if (boot_sector + size > start + len) {
+ CH_ERR(adap, "boot data is larger than available BOOT area\n");
return -EFBIG;
}
/*
* The boot sector is comprised of the Expansion-ROM boot, iSCSI boot,
* and Boot configuration data sections. These 3 boot sections span
- * sectors 0 to 7 in flash and live right before the FW image location.
+ * the entire FLASH_LOC_BOOT_AREA.
*/
- i = DIV_ROUND_UP(size ? size : FLASH_FW_START,
- sf_sec_size);
+ i = DIV_ROUND_UP(size ? size : len, SF_SEC_SIZE);
ret = t4_flash_erase_sectors(adap, boot_sector >> 16,
(boot_sector >> 16) + i - 1);
@@ -10765,40 +11928,39 @@ out:
* is stored, or an error if the device FLASH is too small to contain
* a OptionROM Configuration.
*/
-static int t4_flash_bootcfg_addr(struct adapter *adapter)
+static int t4_flash_bootcfg_addr(struct adapter *adapter, unsigned int *lenp)
{
+ unsigned int len = 0;
+ const int start = t4_flash_loc_start(adapter, FLASH_LOC_BOOTCFG, &len);
+
/*
* If the device FLASH isn't large enough to hold a Firmware
* Configuration File, return an error.
*/
- if (adapter->params.sf_size < FLASH_BOOTCFG_START + FLASH_BOOTCFG_MAX_SIZE)
+ if (adapter->params.sf_size < start + len)
return -ENOSPC;
-
- return FLASH_BOOTCFG_START;
+ if (lenp != NULL)
+ *lenp = len;
+ return (start);
}
int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
{
int ret, i, n, cfg_addr;
- unsigned int addr;
+ unsigned int addr, len;
unsigned int flash_cfg_start_sec;
- unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
- cfg_addr = t4_flash_bootcfg_addr(adap);
+ cfg_addr = t4_flash_bootcfg_addr(adap, &len);
if (cfg_addr < 0)
return cfg_addr;
- addr = cfg_addr;
- flash_cfg_start_sec = addr / SF_SEC_SIZE;
-
- if (size > FLASH_BOOTCFG_MAX_SIZE) {
- CH_ERR(adap, "bootcfg file too large, max is %u bytes\n",
- FLASH_BOOTCFG_MAX_SIZE);
+ if (size > len) {
+ CH_ERR(adap, "bootcfg file too large, max is %u bytes\n", len);
return -EFBIG;
}
- i = DIV_ROUND_UP(FLASH_BOOTCFG_MAX_SIZE,/* # of sectors spanned */
- sf_sec_size);
+ flash_cfg_start_sec = cfg_addr / SF_SEC_SIZE;
+ i = DIV_ROUND_UP(len, SF_SEC_SIZE);
ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
flash_cfg_start_sec + i - 1);
@@ -10810,15 +11972,12 @@ int t4_load_bootcfg(struct adapter *adap,const u8 *cfg_data, unsigned int size)
goto out;
/* this will write to the flash up to SF_PAGE_SIZE at a time */
- for (i = 0; i< size; i+= SF_PAGE_SIZE) {
- if ( (size - i) < SF_PAGE_SIZE)
- n = size - i;
- else
- n = SF_PAGE_SIZE;
+ addr = cfg_addr;
+ for (i = 0; i < size; i += SF_PAGE_SIZE) {
+ n = min(size - i, SF_PAGE_SIZE);
ret = t4_write_flash(adap, addr, n, cfg_data, 0);
if (ret)
goto out;
-
addr += SF_PAGE_SIZE;
cfg_data += SF_PAGE_SIZE;
}
@@ -10844,19 +12003,20 @@ out:
*/
int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
{
- static const uint8_t width[] = {1, 3, 17, 17, 8, 8, 16, 9, 3, 1};
int i, nbits, rc;
uint32_t param, val;
uint16_t fmode, fmask;
const int maxbits = adap->chip_params->filter_opt_len;
+ const int nopt = adap->chip_params->filter_num_opt;
+ int width;
if (mode != -1 || mask != -1) {
if (mode != -1) {
fmode = mode;
nbits = 0;
- for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
+ for (i = 0; i < nopt; i++) {
if (fmode & (1 << i))
- nbits += width[i];
+ nbits += t4_filter_field_width(adap, i);
}
if (nbits > maxbits) {
CH_ERR(adap, "optional fields in the filter "
@@ -10867,17 +12027,20 @@ int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
}
/*
- * Hardware wants the bits to be maxed out. Keep
+ * Hardware < T7 wants the bits to be maxed out. Keep
* setting them until there's no room for more.
*/
- for (i = S_FCOE; i <= S_FRAGMENTATION; i++) {
- if (fmode & (1 << i))
- continue;
- if (nbits + width[i] <= maxbits) {
- fmode |= 1 << i;
- nbits += width[i];
- if (nbits == maxbits)
- break;
+ if (chip_id(adap) < CHELSIO_T7) {
+ for (i = 0; i < nopt; i++) {
+ if (fmode & (1 << i))
+ continue;
+ width = t4_filter_field_width(adap, i);
+ if (nbits + width <= maxbits) {
+ fmode |= 1 << i;
+ nbits += width;
+ if (nbits == maxbits)
+ break;
+ }
}
}
@@ -10936,21 +12099,26 @@ int t4_set_filter_cfg(struct adapter *adap, int mode, int mask, int vnic_mode)
*/
void t4_clr_port_stats(struct adapter *adap, int idx)
{
- unsigned int i;
- u32 bgmap = adap2pinfo(adap, idx)->mps_bg_map;
- u32 port_base_addr;
+ struct port_info *pi;
+ int i, port_id, tx_chan;
+ u32 bgmap, port_base_addr;
- if (is_t4(adap))
- port_base_addr = PORT_BASE(idx);
- else
- port_base_addr = T5_PORT_BASE(idx);
-
- for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
- i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
- t4_write_reg(adap, port_base_addr + i, 0);
- for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
- i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
- t4_write_reg(adap, port_base_addr + i, 0);
+ port_id = adap->port_map[idx];
+ MPASS(port_id >= 0 && port_id <= adap->params.nports);
+ pi = adap->port[port_id];
+
+ for (tx_chan = pi->tx_chan;
+ tx_chan < pi->tx_chan + adap->params.tp.lb_nchan; tx_chan++) {
+ port_base_addr = t4_port_reg(adap, tx_chan, 0);
+
+ for (i = A_MPS_PORT_STAT_TX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_TX_PORT_PPP7_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
+ for (i = A_MPS_PORT_STAT_RX_PORT_BYTES_L;
+ i <= A_MPS_PORT_STAT_RX_PORT_LESS_64B_H; i += 8)
+ t4_write_reg(adap, port_base_addr + i, 0);
+ }
+ bgmap = pi->mps_bg_map;
for (i = 0; i < 4; i++)
if (bgmap & (1 << i)) {
t4_write_reg(adap,
@@ -11078,6 +12246,8 @@ int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
+ if (chip_id(adap) > CHELSIO_T6)
+ data[6] = be32_to_cpu(c.u.idctxt.ctxt_data6);
}
return ret;
}
@@ -11099,9 +12269,12 @@ int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, enum ctxt_type cty
t4_write_reg(adap, A_SGE_CTXT_CMD, V_CTXTQID(cid) | V_CTXTTYPE(ctype));
ret = t4_wait_op_done(adap, A_SGE_CTXT_CMD, F_BUSY, 0, 3, 1);
- if (!ret)
+ if (!ret) {
for (i = A_SGE_CTXT_DATA0; i <= A_SGE_CTXT_DATA5; i += 4)
*data++ = t4_read_reg(adap, i);
+ if (chip_id(adap) > CHELSIO_T6)
+ *data++ = t4_read_reg(adap, i);
+ }
return ret;
}
@@ -11377,7 +12550,7 @@ out:
* @vlan: The vlanid to be set
*
*/
-int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
+int t4_set_vlan_acl(struct adapter *adap, unsigned int pf, unsigned int vf,
u16 vlan)
{
struct fw_acl_vlan_cmd vlan_cmd;
@@ -11389,9 +12562,10 @@ int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
F_FW_CMD_REQUEST |
F_FW_CMD_WRITE |
F_FW_CMD_EXEC |
- V_FW_ACL_VLAN_CMD_PFN(adap->pf) |
+ V_FW_ACL_VLAN_CMD_PFN(pf) |
V_FW_ACL_VLAN_CMD_VFN(vf));
- vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
+ vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd) |
+ V_FW_ACL_VLAN_CMD_PMASK(1 << pf));
/* Drop all packets that donot match vlan id */
vlan_cmd.dropnovlan_fm = (enable
? (F_FW_ACL_VLAN_CMD_DROPNOVLAN |
diff --git a/sys/dev/cxgbe/common/t4_hw.h b/sys/dev/cxgbe/common/t4_hw.h
index 79ec690cd5e6..09bd9ac9e637 100644
--- a/sys/dev/cxgbe/common/t4_hw.h
+++ b/sys/dev/cxgbe/common/t4_hw.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,30 +41,36 @@ enum {
EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
T6_RSS_NENTRIES = 4096,
+ T7_RSS_NENTRIES = 16384,
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
NTX_SCHED = 8, /* # of HW Tx scheduling queues */
PM_NSTATS = 5, /* # of PM stats */
- T6_PM_NSTATS = 7,
+ T6_PM_NSTATS = 7, /* # of PM stats in T6 */
MAX_PM_NSTATS = 7,
+ T7_PM_RX_CACHE_NSTATS = 27, /* # of PM Rx Cache stats in T7 */
MBOX_LEN = 64, /* mailbox size in bytes */
NTRACE = 4, /* # of tracing filters */
TRACE_LEN = 112, /* length of trace data and mask */
FILTER_OPT_LEN = 36, /* filter tuple width of optional components */
T5_FILTER_OPT_LEN = 40,
+ T7_FILTER_OPT_LEN = 63,
NWOL_PAT = 8, /* # of WoL patterns */
WOL_PAT_LEN = 128, /* length of WoL patterns */
UDBS_SEG_SIZE = 128, /* Segment size of BAR2 doorbells */
UDBS_SEG_SHIFT = 7, /* log2(UDBS_SEG_SIZE) */
UDBS_DB_OFFSET = 8, /* offset of the 4B doorbell in a segment */
UDBS_WR_OFFSET = 64, /* offset of the work request in a segment */
+ MAX_UP_CORES = 8, /* Max # of uP cores that can be enabled */
};
enum {
CIM_NUM_IBQ = 6, /* # of CIM IBQs */
+ CIM_NUM_IBQ_T7 = 16, /* # of CIM IBQs for T7 */
CIM_NUM_OBQ = 6, /* # of CIM OBQs */
CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */
+ CIM_NUM_OBQ_T7 = 16, /* # of CIM OBQs for T7 adapter */
CIMLA_SIZE = 256 * 8, /* 256 rows * ceil(235/32) 32-bit words */
CIMLA_SIZE_T6 = 256 * 10, /* 256 rows * ceil(311/32) 32-bit words */
CIM_PIFLA_SIZE = 64, /* # of 192-bit words in CIM PIF LA */
@@ -91,6 +96,7 @@ enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV }; /* mailbox owners */
enum {
SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
SGE_CTXT_SIZE = 24, /* size of SGE context */
+ SGE_CTXT_SIZE_T7 = 28, /* size of SGE context for T7 */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
SGE_NDBQTIMERS = 8, /* # of Doorbell Queue Timer values */
@@ -161,6 +167,18 @@ struct rsp_ctrl {
#define V_QINTR_TIMER_IDX(x) ((x) << S_QINTR_TIMER_IDX)
#define G_QINTR_TIMER_IDX(x) (((x) >> S_QINTR_TIMER_IDX) & M_QINTR_TIMER_IDX)
+#define S_ARM_QTYPE 11
+#define M_ARM_QTYPE 1
+#define V_ARM_QTYPE(x) ((x) << S_ARM_QTYPE)
+
+#define S_ARM_PIDX 0
+#define M_ARM_PIDX 0x7ffU
+#define V_ARM_PIDX(x) ((x) << S_ARM_PIDX)
+
+#define S_ARM_CIDXINC 0
+#define M_ARM_CIDXINC 0x7ffU
+#define V_ARM_CIDXINC(x) ((x) << S_ARM_CIDXINC)
+
/* # of pages a pagepod can hold without needing another pagepod */
#define PPOD_PAGES 4U
@@ -206,95 +224,116 @@ struct pagepod {
*/
#define FLASH_START(start) ((start) * SF_SEC_SIZE)
#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
+#define FLASH_MIN_SIZE FLASH_START(32)
-enum {
+enum t4_flash_loc {
/*
* Various Expansion-ROM boot images, etc.
*/
- FLASH_EXP_ROM_START_SEC = 0,
- FLASH_EXP_ROM_NSECS = 6,
- FLASH_EXP_ROM_START = FLASH_START(FLASH_EXP_ROM_START_SEC),
- FLASH_EXP_ROM_MAX_SIZE = FLASH_MAX_SIZE(FLASH_EXP_ROM_NSECS),
+ FLASH_LOC_EXP_ROM = 0,
/*
* iSCSI Boot Firmware Table (iBFT) and other driver-related
* parameters ...
*/
- FLASH_IBFT_START_SEC = 6,
- FLASH_IBFT_NSECS = 1,
- FLASH_IBFT_START = FLASH_START(FLASH_IBFT_START_SEC),
- FLASH_IBFT_MAX_SIZE = FLASH_MAX_SIZE(FLASH_IBFT_NSECS),
+ FLASH_LOC_IBFT,
/*
* Boot configuration data.
*/
- FLASH_BOOTCFG_START_SEC = 7,
- FLASH_BOOTCFG_NSECS = 1,
- FLASH_BOOTCFG_START = FLASH_START(FLASH_BOOTCFG_START_SEC),
- FLASH_BOOTCFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_BOOTCFG_NSECS),
+ FLASH_LOC_BOOTCFG,
/*
* Location of firmware image in FLASH.
*/
- FLASH_FW_START_SEC = 8,
- FLASH_FW_NSECS = 16,
- FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
- FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+ FLASH_LOC_FW,
/*
* Location of bootstrap firmware image in FLASH.
*/
- FLASH_FWBOOTSTRAP_START_SEC = 27,
- FLASH_FWBOOTSTRAP_NSECS = 1,
- FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC),
- FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS),
+ FLASH_LOC_FWBOOTSTRAP,
/*
* iSCSI persistent/crash information.
*/
- FLASH_ISCSI_CRASH_START_SEC = 29,
- FLASH_ISCSI_CRASH_NSECS = 1,
- FLASH_ISCSI_CRASH_START = FLASH_START(FLASH_ISCSI_CRASH_START_SEC),
- FLASH_ISCSI_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_ISCSI_CRASH_NSECS),
+ FLASH_LOC_ISCSI_CRASH,
/*
* FCoE persistent/crash information.
*/
- FLASH_FCOE_CRASH_START_SEC = 30,
- FLASH_FCOE_CRASH_NSECS = 1,
- FLASH_FCOE_CRASH_START = FLASH_START(FLASH_FCOE_CRASH_START_SEC),
- FLASH_FCOE_CRASH_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FCOE_CRASH_NSECS),
+ FLASH_LOC_FCOE_CRASH,
/*
* Location of Firmware Configuration File in FLASH.
*/
- FLASH_CFG_START_SEC = 31,
- FLASH_CFG_NSECS = 1,
- FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC),
- FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS),
+ FLASH_LOC_CFG,
+
+ /*
+ * CUDBG chip dump.
+ */
+ FLASH_LOC_CUDBG,
+
+ /*
+ * FW chip dump.
+ */
+ FLASH_LOC_CHIP_DUMP,
+
+ /*
+ * DPU boot information store.
+ */
+ FLASH_LOC_DPU_BOOT,
+
+ /*
+ * DPU peristent information store.
+ */
+ FLASH_LOC_DPU_AREA,
/*
- * We don't support FLASH devices which can't support the full
- * standard set of sections which we need for normal operations.
+ * VPD location.
*/
- FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE,
+ FLASH_LOC_VPD,
/*
- * Sectors 32-63 for CUDBG.
+ * Backup init/vpd.
*/
- FLASH_CUDBG_START_SEC = 32,
- FLASH_CUDBG_NSECS = 32,
- FLASH_CUDBG_START = FLASH_START(FLASH_CUDBG_START_SEC),
- FLASH_CUDBG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CUDBG_NSECS),
+ FLASH_LOC_VPD_BACKUP,
/*
- * Size of defined FLASH regions.
+ * Backup firmware image.
*/
- FLASH_END_SEC = 64,
+ FLASH_LOC_FW_BACKUP,
+
+ /*
+ * Backup bootstrap firmware image.
+ */
+ FLASH_LOC_FWBOOTSTRAP_BACKUP,
+
+ /*
+ * Backup Location of Firmware Configuration File in FLASH.
+ */
+ FLASH_LOC_CFG_BACK,
+
+ /*
+ * Helper to retrieve info that spans the entire Boot related area.
+ */
+ FLASH_LOC_BOOT_AREA,
+
+ /*
+ * Helper to determine minimum standard set of sections needed for
+ * normal operations.
+ */
+ FLASH_LOC_MIN_SIZE,
+
+ /*
+ * End of FLASH regions.
+ */
+ FLASH_LOC_END
};
-#undef FLASH_START
-#undef FLASH_MAX_SIZE
+struct t4_flash_loc_entry {
+ u16 start_sec;
+ u16 nsecs;
+};
#define S_SGE_TIMESTAMP 0
#define M_SGE_TIMESTAMP 0xfffffffffffffffULL
diff --git a/sys/dev/cxgbe/common/t4_msg.h b/sys/dev/cxgbe/common/t4_msg.h
index d356d0d99f36..0d12ccf2e910 100644
--- a/sys/dev/cxgbe/common/t4_msg.h
+++ b/sys/dev/cxgbe/common/t4_msg.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,7 +29,7 @@
#ifndef T4_MSG_H
#define T4_MSG_H
-enum {
+enum cpl_opcodes {
CPL_PASS_OPEN_REQ = 0x1,
CPL_PASS_ACCEPT_RPL = 0x2,
CPL_ACT_OPEN_REQ = 0x3,
@@ -68,13 +67,16 @@ enum {
CPL_PEER_CLOSE = 0x26,
CPL_RTE_DELETE_RPL = 0x27,
CPL_RTE_WRITE_RPL = 0x28,
+ CPL_ROCE_FW_NOTIFY = 0x28,
CPL_RX_URG_PKT = 0x29,
CPL_TAG_WRITE_RPL = 0x2A,
+ CPL_RDMA_ASYNC_EVENT = 0x2A,
CPL_ABORT_REQ_RSS = 0x2B,
CPL_RX_URG_NOTIFY = 0x2C,
CPL_ABORT_RPL_RSS = 0x2D,
CPL_SMT_WRITE_RPL = 0x2E,
CPL_TX_DATA_ACK = 0x2F,
+ CPL_RDMA_INV_REQ = 0x2F,
CPL_RX_PHYS_ADDR = 0x30,
CPL_PCMD_READ_RPL = 0x31,
@@ -107,19 +109,30 @@ enum {
CPL_RX_DATA_DIF = 0x4B,
CPL_ERR_NOTIFY = 0x4D,
CPL_RX_TLS_CMP = 0x4E,
+ CPL_T6_TX_DATA_ACK = 0x4F,
CPL_RDMA_READ_REQ = 0x60,
CPL_RX_ISCSI_DIF = 0x60,
+ CPL_RDMA_CQE_EXT = 0x61,
+ CPL_RDMA_CQE_FW_EXT = 0x62,
+ CPL_RDMA_CQE_ERR_EXT = 0x63,
+ CPL_TX_DATA_ACK_XT = 0x64,
+ CPL_ROCE_CQE = 0x68,
+ CPL_ROCE_CQE_FW = 0x69,
+ CPL_ROCE_CQE_ERR = 0x6A,
+
+ CPL_SACK_REQ = 0x70,
CPL_SET_LE_REQ = 0x80,
CPL_PASS_OPEN_REQ6 = 0x81,
CPL_ACT_OPEN_REQ6 = 0x83,
CPL_TX_TLS_PDU = 0x88,
CPL_TX_TLS_SFO = 0x89,
-
CPL_TX_SEC_PDU = 0x8A,
CPL_TX_TLS_ACK = 0x8B,
+ CPL_RCB_UPD = 0x8C,
+ CPL_SGE_FLR_FLUSH = 0xA0,
CPL_RDMA_TERMINATE = 0xA2,
CPL_RDMA_WRITE = 0xA4,
CPL_SGE_EGR_UPDATE = 0xA5,
@@ -138,15 +151,27 @@ enum {
CPL_TLS_DATA = 0xB1,
CPL_ISCSI_DATA = 0xB2,
CPL_FCOE_DATA = 0xB3,
+ CPL_NVMT_DATA = 0xB4,
+ CPL_NVMT_CMP = 0xB5,
+ CPL_NVMT_CMP_IMM = 0xB6,
+ CPL_NVMT_CMP_SRQ = 0xB7,
+ CPL_ROCE_ACK_NAK_REQ = 0xBC,
+ CPL_ROCE_ACK_NAK = 0xBD,
CPL_FW4_MSG = 0xC0,
CPL_FW4_PLD = 0xC1,
+ CPL_RDMA_CQE_SRQ = 0xC2,
+ CPL_ACCELERATOR_ACK = 0xC4,
CPL_FW4_ACK = 0xC3,
+ CPL_RX_PKT_IPSEC = 0xC6,
CPL_SRQ_TABLE_RPL = 0xCC,
+ CPL_TX_DATA_REQ = 0xCF,
+
CPL_RX_PHYS_DSGL = 0xD0,
CPL_FW6_MSG = 0xE0,
CPL_FW6_PLD = 0xE1,
+ CPL_ACCELERATOR_HDR = 0xE8,
CPL_TX_TNL_LSO = 0xEC,
CPL_TX_PKT_LSO = 0xED,
CPL_TX_PKT_XT = 0xEE,
@@ -233,6 +258,8 @@ enum {
ULP_MODE_TCPDDP = 5,
ULP_MODE_FCOE = 6,
ULP_MODE_TLS = 8,
+ ULP_MODE_RDMA_V2 = 10,
+ ULP_MODE_NVMET = 11,
};
enum {
@@ -325,9 +352,14 @@ union opcode_tid {
#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
/* extract the TID from a CPL command */
-#define GET_TID(cmd) (G_TID(ntohl(OPCODE_TID(cmd))))
+#define GET_TID(cmd) (G_TID(be32toh(OPCODE_TID(cmd))))
#define GET_OPCODE(cmd) ((cmd)->ot.opcode)
+
+/*
+ * Note that this driver splits the 14b opaque atid into an 11b atid and a 3b
+ * cookie that is used to demux replies for shared CPLs.
+ */
/* partitioning of TID fields that also carry a queue id */
#define S_TID_TID 0
#define M_TID_TID 0x7ff
@@ -717,7 +749,7 @@ struct cpl_pass_establish {
struct cpl_pass_accept_req {
RSS_HDR
union opcode_tid ot;
- __be16 rsvd;
+ __be16 ipsecen_outiphdrlen;
__be16 len;
__be32 hdr_len;
__be16 vlan;
@@ -775,6 +807,155 @@ struct cpl_pass_accept_req {
#define V_SYN_INTF(x) ((x) << S_SYN_INTF)
#define G_SYN_INTF(x) (((x) >> S_SYN_INTF) & M_SYN_INTF)
+struct cpl_t7_pass_accept_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 ipsecen_to_outiphdrlen;
+ __be16 length;
+ __be32 ethhdrlen_to_rxchannel;
+ __be16 vlantag;
+ __be16 interface_to_mac_ix;
+ __be32 tos_ptid;
+ __be16 tcpmss;
+ __u8 tcpwsc;
+ __u8 tcptmstp_to_tcpunkn;
+};
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_IPSECEN 12
+#define M_CPL_T7_PASS_ACCEPT_REQ_IPSECEN 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_IPSECEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_IPSECEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_IPSECEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_IPSECEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_IPSECEN)
+#define F_CPL_PASS_T7_ACCEPT_REQ_IPSECEN \
+ V_CPL_T7_PASS_ACCEPT_REQ_IPSECEN(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE 10
+#define M_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE 0x3
+#define V_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE)
+#define G_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_IPSECTYPE)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN 0x3ff
+#define V_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_OUTIPHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN 24
+#define M_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN 0xff
+#define V_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_ETHHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN 14
+#define M_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN 0x3ff
+#define V_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_IPHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN 8
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN 0x3f
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPHDRLEN)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL 0xf
+#define V_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL)
+#define G_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_RXCHANNEL)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_INTERFACE 12
+#define M_CPL_T7_PASS_ACCEPT_REQ_INTERFACE 0xf
+#define V_CPL_T7_PASS_ACCEPT_REQ_INTERFACE(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_INTERFACE)
+#define G_CPL_T7_PASS_ACCEPT_REQ_INTERFACE(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_INTERFACE) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_INTERFACE)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH 9
+#define M_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH)
+#define G_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH)
+#define F_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH \
+ V_CPL_T7_PASS_ACCEPT_REQ_MAC_MATCH(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_MAC_IX 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_MAC_IX 0x1ff
+#define V_CPL_T7_PASS_ACCEPT_REQ_MAC_IX(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_MAC_IX)
+#define G_CPL_T7_PASS_ACCEPT_REQ_MAC_IX(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_MAC_IX) & M_CPL_T7_PASS_ACCEPT_REQ_MAC_IX)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TOS 24
+#define M_CPL_T7_PASS_ACCEPT_REQ_TOS 0xff
+#define V_CPL_T7_PASS_ACCEPT_REQ_TOS(x) ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TOS)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TOS(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TOS) & M_CPL_T7_PASS_ACCEPT_REQ_TOS)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_PTID 0
+#define M_CPL_T7_PASS_ACCEPT_REQ_PTID 0xffffff
+#define V_CPL_T7_PASS_ACCEPT_REQ_PTID(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_PTID)
+#define G_CPL_T7_PASS_ACCEPT_REQ_PTID(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_PTID) & M_CPL_T7_PASS_ACCEPT_REQ_PTID)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP 7
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPTMSTP(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPSACK 6
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPSACK 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPSACK(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPSACK)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPSACK(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPSACK) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPSACK)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPSACK \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPSACK(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPECN 5
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPECN 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPECN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPECN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPECN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPECN) & M_CPL_T7_PASS_ACCEPT_REQ_TCPECN)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPECN \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPECN(1U)
+
+#define S_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN 4
+#define M_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN 0x1
+#define V_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN(x) \
+ ((x) << S_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN)
+#define G_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN(x) \
+ (((x) >> S_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN) & \
+ M_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN)
+#define F_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN \
+ V_CPL_T7_PASS_ACCEPT_REQ_TCPUNKN(1U)
+
struct cpl_pass_accept_rpl {
WR_HDR;
union opcode_tid ot;
@@ -810,6 +991,7 @@ struct cpl_act_open_req {
#define M_FILTER_TUPLE 0xFFFFFFFFFF
#define V_FILTER_TUPLE(x) ((x) << S_FILTER_TUPLE)
#define G_FILTER_TUPLE(x) (((x) >> S_FILTER_TUPLE) & M_FILTER_TUPLE)
+
struct cpl_t5_act_open_req {
WR_HDR;
union opcode_tid ot;
@@ -843,6 +1025,26 @@ struct cpl_t6_act_open_req {
#define V_AOPEN_FCOEMASK(x) ((x) << S_AOPEN_FCOEMASK)
#define F_AOPEN_FCOEMASK V_AOPEN_FCOEMASK(1U)
+struct cpl_t7_act_open_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be32 local_ip;
+ __be32 peer_ip;
+ __be64 opt0;
+ __be32 iss;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
+#define S_T7_FILTER_TUPLE 1
+#define M_T7_FILTER_TUPLE 0x7FFFFFFFFFFFFFFFULL
+#define V_T7_FILTER_TUPLE(x) ((x) << S_T7_FILTER_TUPLE)
+#define G_T7_FILTER_TUPLE(x) (((x) >> S_T7_FILTER_TUPLE) & M_T7_FILTER_TUPLE)
+
struct cpl_act_open_req6 {
WR_HDR;
union opcode_tid ot;
@@ -889,6 +1091,23 @@ struct cpl_t6_act_open_req6 {
__be32 opt3;
};
+struct cpl_t7_act_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be32 iss;
+ __be32 opt2;
+ __be64 params;
+ __be32 rsvd2;
+ __be32 opt3;
+};
+
struct cpl_act_open_rpl {
RSS_HDR
union opcode_tid ot;
@@ -921,8 +1140,7 @@ struct cpl_get_tcb {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
- __u8 rsvd;
- __u8 cookie;
+ __be16 cookie;
};
/* cpl_get_tcb.reply_ctrl fields */
@@ -931,10 +1149,20 @@ struct cpl_get_tcb {
#define V_QUEUENO(x) ((x) << S_QUEUENO)
#define G_QUEUENO(x) (((x) >> S_QUEUENO) & M_QUEUENO)
+#define S_T7_QUEUENO 0
+#define M_T7_QUEUENO 0xFFF
+#define V_T7_QUEUENO(x) ((x) << S_T7_QUEUENO)
+#define G_T7_QUEUENO(x) (((x) >> S_T7_QUEUENO) & M_T7_QUEUENO)
+
#define S_REPLY_CHAN 14
#define V_REPLY_CHAN(x) ((x) << S_REPLY_CHAN)
#define F_REPLY_CHAN V_REPLY_CHAN(1U)
+#define S_T7_REPLY_CHAN 12
+#define M_T7_REPLY_CHAN 0x7
+#define V_T7_REPLY_CHAN(x) ((x) << S_T7_REPLY_CHAN)
+#define G_T7_REPLY_CHAN(x) (((x) >> S_T7_REPLY_CHAN) & M_T7_REPLY_CHAN)
+
#define S_NO_REPLY 15
#define V_NO_REPLY(x) ((x) << S_NO_REPLY)
#define F_NO_REPLY V_NO_REPLY(1U)
@@ -1018,6 +1246,40 @@ struct cpl_close_listsvr_req {
#define V_LISTSVR_IPV6(x) ((x) << S_LISTSVR_IPV6)
#define F_LISTSVR_IPV6 V_LISTSVR_IPV6(1U)
+struct cpl_t7_close_listsvr_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 noreply_to_queue;
+ __be16 r2;
+};
+
+#define S_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY 15
+#define M_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY 0x1
+#define V_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY(x) \
+ ((x) << S_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY)
+#define G_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY) & \
+ M_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY)
+#define F_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY \
+ V_CPL_T7_CLOSE_LISTSVR_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_CLOSE_LISTSVR_REQ_IPV6 14
+#define M_CPL_T7_CLOSE_LISTSVR_REQ_IPV6 0x1
+#define V_CPL_T7_CLOSE_LISTSVR_REQ_IPV6(x) \
+ ((x) << S_CPL_T7_CLOSE_LISTSVR_REQ_IPV6)
+#define G_CPL_T7_CLOSE_LISTSVR_REQ_IPV6(x) \
+ (((x) >> S_CPL_T7_CLOSE_LISTSVR_REQ_IPV6) & M_CPL_T7_CLOSE_LISTSVR_REQ_IPV6)
+#define F_CPL_T7_CLOSE_LISTSVR_REQ_IPV6 \
+ V_CPL_T7_CLOSE_LISTSVR_REQ_IPV6(1U)
+
+#define S_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE 0
+#define M_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE 0xfff
+#define V_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE(x) \
+ ((x) << S_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE)
+#define G_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE(x) \
+ (((x) >> S_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE) & \
+ M_CPL_T7_CLOSE_LISTSVR_REQ_QUEUE)
+
struct cpl_close_listsvr_rpl {
RSS_HDR
union opcode_tid ot;
@@ -1250,6 +1512,71 @@ struct cpl_tx_data_ack {
__be32 snd_una;
};
+struct cpl_tx_data_ack_xt {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 snd_una;
+ __be32 snd_end;
+ __be32 snd_nxt;
+ __be32 snd_adv;
+ __be16 rttvar;
+ __be16 srtt;
+ __be32 extinfoh[2];
+ __be32 extinfol[2];
+};
+
+struct cpl_tx_data_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 snd_una;
+ __be32 snd_end;
+ __be32 snd_nxt;
+ __be32 snd_adv;
+ __be16 rttvar;
+ __be16 srtt;
+};
+
+#define S_CPL_TX_DATA_REQ_TID 0
+#define M_CPL_TX_DATA_REQ_TID 0xffffff
+#define V_CPL_TX_DATA_REQ_TID(x) ((x) << S_CPL_TX_DATA_REQ_TID)
+#define G_CPL_TX_DATA_REQ_TID(x) \
+ (((x) >> S_CPL_TX_DATA_REQ_TID) & M_CPL_TX_DATA_REQ_TID)
+
+struct cpl_sack_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 snd_una;
+ __be32 snd_end;
+ __be32 snd_nxt;
+ __be32 snd_adv;
+ __be16 rttvar;
+ __be16 srtt;
+ __be32 block1[2];
+ __be32 block2[2];
+ __be32 block3[2];
+};
+
+struct cpl_sge_flr_flush {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 cookievalue_cookiesel;
+};
+
+#define S_CPL_SGE_FLR_FLUSH_COOKIEVALUE 4
+#define M_CPL_SGE_FLR_FLUSH_COOKIEVALUE 0x3ff
+#define V_CPL_SGE_FLR_FLUSH_COOKIEVALUE(x) \
+ ((x) << S_CPL_SGE_FLR_FLUSH_COOKIEVALUE)
+#define G_CPL_SGE_FLR_FLUSH_COOKIEVALUE(x) \
+ (((x) >> S_CPL_SGE_FLR_FLUSH_COOKIEVALUE) & \
+ M_CPL_SGE_FLR_FLUSH_COOKIEVALUE)
+
+#define S_CPL_SGE_FLR_FLUSH_COOKIESEL 0
+#define M_CPL_SGE_FLR_FLUSH_COOKIESEL 0xf
+#define V_CPL_SGE_FLR_FLUSH_COOKIESEL(x) \
+ ((x) << S_CPL_SGE_FLR_FLUSH_COOKIESEL)
+#define G_CPL_SGE_FLR_FLUSH_COOKIESEL(x) \
+ (((x) >> S_CPL_SGE_FLR_FLUSH_COOKIESEL) & M_CPL_SGE_FLR_FLUSH_COOKIESEL)
+
struct cpl_wr_ack { /* XXX */
RSS_HDR
union opcode_tid ot;
@@ -1271,8 +1598,6 @@ struct cpl_tx_pkt {
struct cpl_tx_pkt_core c;
};
-#define cpl_tx_pkt_xt cpl_tx_pkt
-
/* cpl_tx_pkt_core.ctrl0 fields */
#define S_TXPKT_VF 0
#define M_TXPKT_VF 0xFF
@@ -1404,6 +1729,261 @@ struct cpl_tx_pkt {
#define V_TXPKT_L4CSUM_DIS(x) ((__u64)(x) << S_TXPKT_L4CSUM_DIS)
#define F_TXPKT_L4CSUM_DIS V_TXPKT_L4CSUM_DIS(1ULL)
+struct cpl_tx_pkt_xt {
+ WR_HDR;
+ __be32 ctrl0;
+ __be16 pack;
+ __be16 len;
+ __be32 ctrl1;
+ __be32 ctrl2;
+};
+
+/* cpl_tx_pkt_xt.core.ctrl0 fields */
+#define S_CPL_TX_PKT_XT_OPCODE 24
+#define M_CPL_TX_PKT_XT_OPCODE 0xff
+#define V_CPL_TX_PKT_XT_OPCODE(x) ((x) << S_CPL_TX_PKT_XT_OPCODE)
+#define G_CPL_TX_PKT_XT_OPCODE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OPCODE) & M_CPL_TX_PKT_XT_OPCODE)
+
+#define S_CPL_TX_PKT_XT_TIMESTAMP 23
+#define M_CPL_TX_PKT_XT_TIMESTAMP 0x1
+#define V_CPL_TX_PKT_XT_TIMESTAMP(x) ((x) << S_CPL_TX_PKT_XT_TIMESTAMP)
+#define G_CPL_TX_PKT_XT_TIMESTAMP(x) \
+ (((x) >> S_CPL_TX_PKT_XT_TIMESTAMP) & M_CPL_TX_PKT_XT_TIMESTAMP)
+#define F_CPL_TX_PKT_XT_TIMESTAMP V_CPL_TX_PKT_XT_TIMESTAMP(1U)
+
+#define S_CPL_TX_PKT_XT_STATDISABLE 22
+#define M_CPL_TX_PKT_XT_STATDISABLE 0x1
+#define V_CPL_TX_PKT_XT_STATDISABLE(x) ((x) << S_CPL_TX_PKT_XT_STATDISABLE)
+#define G_CPL_TX_PKT_XT_STATDISABLE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_STATDISABLE) & M_CPL_TX_PKT_XT_STATDISABLE)
+#define F_CPL_TX_PKT_XT_STATDISABLE V_CPL_TX_PKT_XT_STATDISABLE(1U)
+
+#define S_CPL_TX_PKT_XT_FCSDIS 21
+#define M_CPL_TX_PKT_XT_FCSDIS 0x1
+#define V_CPL_TX_PKT_XT_FCSDIS(x) ((x) << S_CPL_TX_PKT_XT_FCSDIS)
+#define G_CPL_TX_PKT_XT_FCSDIS(x) \
+ (((x) >> S_CPL_TX_PKT_XT_FCSDIS) & M_CPL_TX_PKT_XT_FCSDIS)
+#define F_CPL_TX_PKT_XT_FCSDIS V_CPL_TX_PKT_XT_FCSDIS(1U)
+
+#define S_CPL_TX_PKT_XT_STATSPECIAL 20
+#define M_CPL_TX_PKT_XT_STATSPECIAL 0x1
+#define V_CPL_TX_PKT_XT_STATSPECIAL(x) ((x) << S_CPL_TX_PKT_XT_STATSPECIAL)
+#define G_CPL_TX_PKT_XT_STATSPECIAL(x) \
+ (((x) >> S_CPL_TX_PKT_XT_STATSPECIAL) & M_CPL_TX_PKT_XT_STATSPECIAL)
+#define F_CPL_TX_PKT_XT_STATSPECIAL V_CPL_TX_PKT_XT_STATSPECIAL(1U)
+
+#define S_CPL_TX_PKT_XT_INTERFACE 16
+#define M_CPL_TX_PKT_XT_INTERFACE 0xf
+#define V_CPL_TX_PKT_XT_INTERFACE(x) ((x) << S_CPL_TX_PKT_XT_INTERFACE)
+#define G_CPL_TX_PKT_XT_INTERFACE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_INTERFACE) & M_CPL_TX_PKT_XT_INTERFACE)
+
+#define S_CPL_TX_PKT_XT_OVLAN 15
+#define M_CPL_TX_PKT_XT_OVLAN 0x1
+#define V_CPL_TX_PKT_XT_OVLAN(x) ((x) << S_CPL_TX_PKT_XT_OVLAN)
+#define G_CPL_TX_PKT_XT_OVLAN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OVLAN) & M_CPL_TX_PKT_XT_OVLAN)
+#define F_CPL_TX_PKT_XT_OVLAN V_CPL_TX_PKT_XT_OVLAN(1U)
+
+#define S_CPL_TX_PKT_XT_OVLANIDX 12
+#define M_CPL_TX_PKT_XT_OVLANIDX 0x7
+#define V_CPL_TX_PKT_XT_OVLANIDX(x) ((x) << S_CPL_TX_PKT_XT_OVLANIDX)
+#define G_CPL_TX_PKT_XT_OVLANIDX(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OVLANIDX) & M_CPL_TX_PKT_XT_OVLANIDX)
+
+#define S_CPL_TX_PKT_XT_VFVALID 11
+#define M_CPL_TX_PKT_XT_VFVALID 0x1
+#define V_CPL_TX_PKT_XT_VFVALID(x) ((x) << S_CPL_TX_PKT_XT_VFVALID)
+#define G_CPL_TX_PKT_XT_VFVALID(x) \
+ (((x) >> S_CPL_TX_PKT_XT_VFVALID) & M_CPL_TX_PKT_XT_VFVALID)
+#define F_CPL_TX_PKT_XT_VFVALID V_CPL_TX_PKT_XT_VFVALID(1U)
+
+#define S_CPL_TX_PKT_XT_PF 8
+#define M_CPL_TX_PKT_XT_PF 0x7
+#define V_CPL_TX_PKT_XT_PF(x) ((x) << S_CPL_TX_PKT_XT_PF)
+#define G_CPL_TX_PKT_XT_PF(x) \
+ (((x) >> S_CPL_TX_PKT_XT_PF) & M_CPL_TX_PKT_XT_PF)
+
+#define S_CPL_TX_PKT_XT_VF 0
+#define M_CPL_TX_PKT_XT_VF 0xff
+#define V_CPL_TX_PKT_XT_VF(x) ((x) << S_CPL_TX_PKT_XT_VF)
+#define G_CPL_TX_PKT_XT_VF(x) \
+ (((x) >> S_CPL_TX_PKT_XT_VF) & M_CPL_TX_PKT_XT_VF)
+
+/* cpl_tx_pkt_xt.core.ctrl1 fields */
+#define S_CPL_TX_PKT_XT_L4CHKDISABLE 31
+#define M_CPL_TX_PKT_XT_L4CHKDISABLE 0x1
+#define V_CPL_TX_PKT_XT_L4CHKDISABLE(x) ((x) << S_CPL_TX_PKT_XT_L4CHKDISABLE)
+#define G_CPL_TX_PKT_XT_L4CHKDISABLE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_L4CHKDISABLE) & M_CPL_TX_PKT_XT_L4CHKDISABLE)
+#define F_CPL_TX_PKT_XT_L4CHKDISABLE V_CPL_TX_PKT_XT_L4CHKDISABLE(1U)
+
+#define S_CPL_TX_PKT_XT_L3CHKDISABLE 30
+#define M_CPL_TX_PKT_XT_L3CHKDISABLE 0x1
+#define V_CPL_TX_PKT_XT_L3CHKDISABLE(x) ((x) << S_CPL_TX_PKT_XT_L3CHKDISABLE)
+#define G_CPL_TX_PKT_XT_L3CHKDISABLE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_L3CHKDISABLE) & M_CPL_TX_PKT_XT_L3CHKDISABLE)
+#define F_CPL_TX_PKT_XT_L3CHKDISABLE V_CPL_TX_PKT_XT_L3CHKDISABLE(1U)
+
+#define S_CPL_TX_PKT_XT_OUTL4CHKEN 29
+#define M_CPL_TX_PKT_XT_OUTL4CHKEN 0x1
+#define V_CPL_TX_PKT_XT_OUTL4CHKEN(x) ((x) << S_CPL_TX_PKT_XT_OUTL4CHKEN)
+#define G_CPL_TX_PKT_XT_OUTL4CHKEN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_OUTL4CHKEN) & M_CPL_TX_PKT_XT_OUTL4CHKEN)
+#define F_CPL_TX_PKT_XT_OUTL4CHKEN V_CPL_TX_PKT_XT_OUTL4CHKEN(1U)
+
+#define S_CPL_TX_PKT_XT_IVLAN 28
+#define M_CPL_TX_PKT_XT_IVLAN 0x1
+#define V_CPL_TX_PKT_XT_IVLAN(x) ((x) << S_CPL_TX_PKT_XT_IVLAN)
+#define G_CPL_TX_PKT_XT_IVLAN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IVLAN) & M_CPL_TX_PKT_XT_IVLAN)
+#define F_CPL_TX_PKT_XT_IVLAN V_CPL_TX_PKT_XT_IVLAN(1U)
+
+#define S_CPL_TX_PKT_XT_IVLANTAG 12
+#define M_CPL_TX_PKT_XT_IVLANTAG 0xffff
+#define V_CPL_TX_PKT_XT_IVLANTAG(x) ((x) << S_CPL_TX_PKT_XT_IVLANTAG)
+#define G_CPL_TX_PKT_XT_IVLANTAG(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IVLANTAG) & M_CPL_TX_PKT_XT_IVLANTAG)
+
+#define S_CPL_TX_PKT_XT_CHKTYPE 8
+#define M_CPL_TX_PKT_XT_CHKTYPE 0xf
+#define V_CPL_TX_PKT_XT_CHKTYPE(x) ((x) << S_CPL_TX_PKT_XT_CHKTYPE)
+#define G_CPL_TX_PKT_XT_CHKTYPE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKTYPE) & M_CPL_TX_PKT_XT_CHKTYPE)
+
+#define S_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI 0
+#define M_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI 0xff
+#define V_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI)
+#define G_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI) & \
+ M_CPL_TX_PKT_XT_CHKINSRTOFFSET_HI)
+
+#define S_CPL_TX_PKT_XT_ETHHDRLEN 0
+#define M_CPL_TX_PKT_XT_ETHHDRLEN 0xff
+#define V_CPL_TX_PKT_XT_ETHHDRLEN(x) ((x) << S_CPL_TX_PKT_XT_ETHHDRLEN)
+#define G_CPL_TX_PKT_XT_ETHHDRLEN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ETHHDRLEN) & M_CPL_TX_PKT_XT_ETHHDRLEN)
+
+#define S_CPL_TX_PKT_XT_ROCECHKINSMODE 6
+#define M_CPL_TX_PKT_XT_ROCECHKINSMODE 0x3
+#define V_CPL_TX_PKT_XT_ROCECHKINSMODE(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCECHKINSMODE)
+#define G_CPL_TX_PKT_XT_ROCECHKINSMODE(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCECHKINSMODE) & M_CPL_TX_PKT_XT_ROCECHKINSMODE)
+
+#define S_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI 0
+#define M_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI 0x3f
+#define V_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI)
+#define G_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI) & \
+ M_CPL_TX_PKT_XT_ROCEIPHDRLEN_HI)
+
+#define S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 30
+#define M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO 0x3
+#define V_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
+#define G_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO) & \
+ M_CPL_TX_PKT_XT_ROCEIPHDRLEN_LO)
+
+/* cpl_tx_pkt_xt.core.ctrl2 fields */
+#define S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO 30
+#define M_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO 0x3
+#define V_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO)
+#define G_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO) & \
+ M_CPL_TX_PKT_XT_CHKINSRTOFFSET_LO)
+
+#define S_CPL_TX_PKT_XT_CHKSTARTOFFSET 20
+#define M_CPL_TX_PKT_XT_CHKSTARTOFFSET 0x3ff
+#define V_CPL_TX_PKT_XT_CHKSTARTOFFSET(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKSTARTOFFSET)
+#define G_CPL_TX_PKT_XT_CHKSTARTOFFSET(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKSTARTOFFSET) & M_CPL_TX_PKT_XT_CHKSTARTOFFSET)
+
+#define S_CPL_TX_PKT_XT_IPHDRLEN 20
+#define M_CPL_TX_PKT_XT_IPHDRLEN 0xfff
+#define V_CPL_TX_PKT_XT_IPHDRLEN(x) ((x) << S_CPL_TX_PKT_XT_IPHDRLEN)
+#define G_CPL_TX_PKT_XT_IPHDRLEN(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IPHDRLEN) & M_CPL_TX_PKT_XT_IPHDRLEN)
+
+#define S_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET 20
+#define M_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET 0x3ff
+#define V_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET(x) \
+ ((x) << S_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET)
+#define G_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET(x) \
+ (((x) >> S_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET) & \
+ M_CPL_TX_PKT_XT_ROCECHKSTARTOFFSET)
+
+#define S_CPL_TX_PKT_XT_CHKSTOPOFFSET 12
+#define M_CPL_TX_PKT_XT_CHKSTOPOFFSET 0xff
+#define V_CPL_TX_PKT_XT_CHKSTOPOFFSET(x) \
+ ((x) << S_CPL_TX_PKT_XT_CHKSTOPOFFSET)
+#define G_CPL_TX_PKT_XT_CHKSTOPOFFSET(x) \
+ (((x) >> S_CPL_TX_PKT_XT_CHKSTOPOFFSET) & M_CPL_TX_PKT_XT_CHKSTOPOFFSET)
+
+#define S_CPL_TX_PKT_XT_IPSECIDX 0
+#define M_CPL_TX_PKT_XT_IPSECIDX 0xfff
+#define V_CPL_TX_PKT_XT_IPSECIDX(x) ((x) << S_CPL_TX_PKT_XT_IPSECIDX)
+#define G_CPL_TX_PKT_XT_IPSECIDX(x) \
+ (((x) >> S_CPL_TX_PKT_XT_IPSECIDX) & M_CPL_TX_PKT_XT_IPSECIDX)
+
+#define S_CPL_TX_TNL_LSO_BTH_OPCODE 24
+#define M_CPL_TX_TNL_LSO_BTH_OPCODE 0xff
+#define V_CPL_TX_TNL_LSO_BTH_OPCODE(x) ((x) << S_CPL_TX_TNL_LSO_BTH_OPCODE)
+#define G_CPL_TX_TNL_LSO_BTH_OPCODE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_BTH_OPCODE) & \
+ M_CPL_TX_TNL_LSO_BTH_OPCODE)
+
+#define S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN 0
+#define M_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN 0xffffff
+#define V_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN(x) \
+ ((x) << S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN)
+#define G_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN) & \
+ M_CPL_TX_TNL_LSO_TCPSEQOFFSET_PSN)
+
+#define S_CPL_TX_TNL_LSO_MSS_TVER 8
+#define M_CPL_TX_TNL_LSO_MSS_TVER 0xf
+#define V_CPL_TX_TNL_LSO_MSS_TVER(x) ((x) << S_CPL_TX_TNL_LSO_MSS_TVER)
+#define G_CPL_TX_TNL_LSO_MSS_TVER(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_TVER) & M_CPL_TX_TNL_LSO_MSS_TVER)
+
+#define S_CPL_TX_TNL_LSO_MSS_M 7
+#define M_CPL_TX_TNL_LSO_MSS_M 0x1
+#define V_CPL_TX_TNL_LSO_MSS_M(x) ((x) << S_CPL_TX_TNL_LSO_MSS_M)
+#define G_CPL_TX_TNL_LSO_MSS_M(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_M) & M_CPL_TX_TNL_LSO_MSS_M)
+
+#define S_CPL_TX_TNL_LSO_MSS_PMTU 4
+#define M_CPL_TX_TNL_LSO_MSS_PMTU 0x7
+#define V_CPL_TX_TNL_LSO_MSS_PMTU(x) ((x) << S_CPL_TX_TNL_LSO_MSS_PMTU)
+#define G_CPL_TX_TNL_LSO_MSS_PMTU(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_PMTU) & M_CPL_TX_TNL_LSO_MSS_PMTU)
+
+#define S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR 3
+#define M_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR 0x1
+#define V_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR(x) \
+ ((x) << S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR)
+#define G_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR) & M_CPL_TX_TNL_LSO_MSS_RR_MSN_INCR)
+
+#define S_CPL_TX_TNL_LSO_MSS_ACKREQ 1
+#define M_CPL_TX_TNL_LSO_MSS_ACKREQ 0x3
+#define V_CPL_TX_TNL_LSO_MSS_ACKREQ(x) ((x) << S_CPL_TX_TNL_LSO_MSS_ACKREQ)
+#define G_CPL_TX_TNL_LSO_MSS_ACKREQ(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_ACKREQ) & M_CPL_TX_TNL_LSO_MSS_ACKREQ)
+
+#define S_CPL_TX_TNL_LSO_MSS_SE 0
+#define M_CPL_TX_TNL_LSO_MSS_SE 0x1
+#define V_CPL_TX_TNL_LSO_MSS_SE(x) ((x) << S_CPL_TX_TNL_LSO_MSS_SE)
+#define G_CPL_TX_TNL_LSO_MSS_SE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_MSS_SE) & M_CPL_TX_TNL_LSO_MSS_SE)
+
struct cpl_tx_pkt_lso_core {
__be32 lso_ctrl;
__be16 ipid_ofst;
@@ -1600,6 +2180,100 @@ struct cpl_tx_data_iso {
(((x) >> S_CPL_TX_DATA_ISO_SEGLEN_OFFSET) & \
M_CPL_TX_DATA_ISO_SEGLEN_OFFSET)
+struct cpl_t7_tx_data_iso {
+ __be32 op_to_scsi;
+ __u8 nvme_tcp_pkd;
+ __u8 ahs;
+ __be16 mpdu;
+ __be32 burst;
+ __be32 size;
+ __be32 num_pi_bytes_seglen_offset;
+ __be32 datasn_offset;
+ __be32 buffer_offset;
+ __be32 reserved3;
+};
+
+#define S_CPL_T7_TX_DATA_ISO_OPCODE 24
+#define M_CPL_T7_TX_DATA_ISO_OPCODE 0xff
+#define V_CPL_T7_TX_DATA_ISO_OPCODE(x) ((x) << S_CPL_T7_TX_DATA_ISO_OPCODE)
+#define G_CPL_T7_TX_DATA_ISO_OPCODE(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_OPCODE) & M_CPL_T7_TX_DATA_ISO_OPCODE)
+
+#define S_CPL_T7_TX_DATA_ISO_FIRST 23
+#define M_CPL_T7_TX_DATA_ISO_FIRST 0x1
+#define V_CPL_T7_TX_DATA_ISO_FIRST(x) ((x) << S_CPL_T7_TX_DATA_ISO_FIRST)
+#define G_CPL_T7_TX_DATA_ISO_FIRST(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_FIRST) & M_CPL_T7_TX_DATA_ISO_FIRST)
+#define F_CPL_T7_TX_DATA_ISO_FIRST V_CPL_T7_TX_DATA_ISO_FIRST(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_LAST 22
+#define M_CPL_T7_TX_DATA_ISO_LAST 0x1
+#define V_CPL_T7_TX_DATA_ISO_LAST(x) ((x) << S_CPL_T7_TX_DATA_ISO_LAST)
+#define G_CPL_T7_TX_DATA_ISO_LAST(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_LAST) & M_CPL_T7_TX_DATA_ISO_LAST)
+#define F_CPL_T7_TX_DATA_ISO_LAST V_CPL_T7_TX_DATA_ISO_LAST(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_CPLHDRLEN 21
+#define M_CPL_T7_TX_DATA_ISO_CPLHDRLEN 0x1
+#define V_CPL_T7_TX_DATA_ISO_CPLHDRLEN(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_CPLHDRLEN)
+#define G_CPL_T7_TX_DATA_ISO_CPLHDRLEN(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_CPLHDRLEN) & M_CPL_T7_TX_DATA_ISO_CPLHDRLEN)
+#define F_CPL_T7_TX_DATA_ISO_CPLHDRLEN V_CPL_T7_TX_DATA_ISO_CPLHDRLEN(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_HDRCRC 20
+#define M_CPL_T7_TX_DATA_ISO_HDRCRC 0x1
+#define V_CPL_T7_TX_DATA_ISO_HDRCRC(x) ((x) << S_CPL_T7_TX_DATA_ISO_HDRCRC)
+#define G_CPL_T7_TX_DATA_ISO_HDRCRC(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_HDRCRC) & M_CPL_T7_TX_DATA_ISO_HDRCRC)
+#define F_CPL_T7_TX_DATA_ISO_HDRCRC V_CPL_T7_TX_DATA_ISO_HDRCRC(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_PLDCRC 19
+#define M_CPL_T7_TX_DATA_ISO_PLDCRC 0x1
+#define V_CPL_T7_TX_DATA_ISO_PLDCRC(x) ((x) << S_CPL_T7_TX_DATA_ISO_PLDCRC)
+#define G_CPL_T7_TX_DATA_ISO_PLDCRC(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_PLDCRC) & M_CPL_T7_TX_DATA_ISO_PLDCRC)
+#define F_CPL_T7_TX_DATA_ISO_PLDCRC V_CPL_T7_TX_DATA_ISO_PLDCRC(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_IMMEDIATE 18
+#define M_CPL_T7_TX_DATA_ISO_IMMEDIATE 0x1
+#define V_CPL_T7_TX_DATA_ISO_IMMEDIATE(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_IMMEDIATE)
+#define G_CPL_T7_TX_DATA_ISO_IMMEDIATE(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_IMMEDIATE) & M_CPL_T7_TX_DATA_ISO_IMMEDIATE)
+#define F_CPL_T7_TX_DATA_ISO_IMMEDIATE \
+ V_CPL_T7_TX_DATA_ISO_IMMEDIATE(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_SCSI 16
+#define M_CPL_T7_TX_DATA_ISO_SCSI 0x3
+#define V_CPL_T7_TX_DATA_ISO_SCSI(x) ((x) << S_CPL_T7_TX_DATA_ISO_SCSI)
+#define G_CPL_T7_TX_DATA_ISO_SCSI(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_SCSI) & M_CPL_T7_TX_DATA_ISO_SCSI)
+
+#define S_CPL_T7_TX_DATA_ISO_NVME_TCP 0
+#define M_CPL_T7_TX_DATA_ISO_NVME_TCP 0x1
+#define V_CPL_T7_TX_DATA_ISO_NVME_TCP(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_NVME_TCP)
+#define G_CPL_T7_TX_DATA_ISO_NVME_TCP(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_NVME_TCP) & M_CPL_T7_TX_DATA_ISO_NVME_TCP)
+#define F_CPL_T7_TX_DATA_ISO_NVME_TCP \
+ V_CPL_T7_TX_DATA_ISO_NVME_TCP(1U)
+
+#define S_CPL_T7_TX_DATA_ISO_NUMPIBYTES 24
+#define M_CPL_T7_TX_DATA_ISO_NUMPIBYTES 0xff
+#define V_CPL_T7_TX_DATA_ISO_NUMPIBYTES(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_NUMPIBYTES)
+#define G_CPL_T7_TX_DATA_ISO_NUMPIBYTES(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_NUMPIBYTES) & M_CPL_T7_TX_DATA_ISO_NUMPIBYTES)
+
+#define S_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET 0
+#define M_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET 0xffffff
+#define V_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET(x) \
+ ((x) << S_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET)
+#define G_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET(x) \
+ (((x) >> S_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET) & \
+ M_CPL_T7_TX_DATA_ISO_DATASEGLENOFFSET)
+
struct cpl_iscsi_hdr {
RSS_HDR
union opcode_tid ot;
@@ -2324,6 +2998,18 @@ struct cpl_l2t_write_req {
#define V_L2T_W_NOREPLY(x) ((x) << S_L2T_W_NOREPLY)
#define F_L2T_W_NOREPLY V_L2T_W_NOREPLY(1U)
+
+/* cpl_l2t_write_req.vlan fields */
+#define S_L2T_VLANTAG 0
+#define M_L2T_VLANTAG 0xFFF
+#define V_L2T_VLANTAG(x) ((x) << S_L2T_VLANTAG)
+#define G_L2T_VLANTAG(x) (((x) >> S_L2T_VLANTAG) & M_L2T_VLANTAG)
+
+#define S_L2T_VLANPRIO 13
+#define M_L2T_VLANPRIO 0x7
+#define V_L2T_VLANPRIO(x) ((x) << S_L2T_VLANPRIO)
+#define G_L2T_VLANPRIO(x) (((x) >> S_L2T_VLANPRIO) & M_L2T_VLANPRIO)
+
#define CPL_L2T_VLAN_NONE 0xfff
struct cpl_l2t_write_rpl {
@@ -2400,6 +3086,175 @@ struct cpl_srq_table_rpl {
#define V_SRQT_IDX(x) ((x) << S_SRQT_IDX)
#define G_SRQT_IDX(x) (((x) >> S_SRQT_IDX) & M_SRQT_IDX)
+struct cpl_t7_srq_table_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 noreply_to_index;
+ __be16 srqlimit_pkd;
+ __be16 cqid;
+ __be16 xdid;
+ __be16 pdid;
+ __be32 quelen_quebase;
+ __be32 curmsn_maxmsn;
+};
+
+#define S_CPL_T7_SRQ_TABLE_REQ_NOREPLY 31
+#define M_CPL_T7_SRQ_TABLE_REQ_NOREPLY 0x1
+#define V_CPL_T7_SRQ_TABLE_REQ_NOREPLY(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_NOREPLY)
+#define G_CPL_T7_SRQ_TABLE_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_NOREPLY) & M_CPL_T7_SRQ_TABLE_REQ_NOREPLY)
+#define F_CPL_T7_SRQ_TABLE_REQ_NOREPLY \
+ V_CPL_T7_SRQ_TABLE_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_WRITE 30
+#define M_CPL_T7_SRQ_TABLE_REQ_WRITE 0x1
+#define V_CPL_T7_SRQ_TABLE_REQ_WRITE(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_WRITE)
+#define G_CPL_T7_SRQ_TABLE_REQ_WRITE(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_WRITE) & M_CPL_T7_SRQ_TABLE_REQ_WRITE)
+#define F_CPL_T7_SRQ_TABLE_REQ_WRITE V_CPL_T7_SRQ_TABLE_REQ_WRITE(1U)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_INCR 28
+#define M_CPL_T7_SRQ_TABLE_REQ_INCR 0x3
+#define V_CPL_T7_SRQ_TABLE_REQ_INCR(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_INCR)
+#define G_CPL_T7_SRQ_TABLE_REQ_INCR(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_INCR) & M_CPL_T7_SRQ_TABLE_REQ_INCR)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_OVER 24
+#define M_CPL_T7_SRQ_TABLE_REQ_OVER 0xf
+#define V_CPL_T7_SRQ_TABLE_REQ_OVER(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_OVER)
+#define G_CPL_T7_SRQ_TABLE_REQ_OVER(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_OVER) & M_CPL_T7_SRQ_TABLE_REQ_OVER)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_LIMITUPD 23
+#define M_CPL_T7_SRQ_TABLE_REQ_LIMITUPD 0x1
+#define V_CPL_T7_SRQ_TABLE_REQ_LIMITUPD(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_LIMITUPD)
+#define G_CPL_T7_SRQ_TABLE_REQ_LIMITUPD(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_LIMITUPD) & M_CPL_T7_SRQ_TABLE_REQ_LIMITUPD)
+#define F_CPL_T7_SRQ_TABLE_REQ_LIMITUPD V_CPL_T7_SRQ_TABLE_REQ_LIMITUPD(1U)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_INDEX 0
+#define M_CPL_T7_SRQ_TABLE_REQ_INDEX 0x3ff
+#define V_CPL_T7_SRQ_TABLE_REQ_INDEX(x) ((x) << S_CPL_T7_SRQ_TABLE_REQ_INDEX)
+#define G_CPL_T7_SRQ_TABLE_REQ_INDEX(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_INDEX) & M_CPL_T7_SRQ_TABLE_REQ_INDEX)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT 0
+#define M_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT 0x3f
+#define V_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT)
+#define G_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT) & M_CPL_T7_SRQ_TABLE_REQ_SRQLIMIT)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_QUELEN 28
+#define M_CPL_T7_SRQ_TABLE_REQ_QUELEN 0xf
+#define V_CPL_T7_SRQ_TABLE_REQ_QUELEN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_QUELEN)
+#define G_CPL_T7_SRQ_TABLE_REQ_QUELEN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_QUELEN) & M_CPL_T7_SRQ_TABLE_REQ_QUELEN)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_QUEBASE 0
+#define M_CPL_T7_SRQ_TABLE_REQ_QUEBASE 0x3ffffff
+#define V_CPL_T7_SRQ_TABLE_REQ_QUEBASE(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_QUEBASE)
+#define G_CPL_T7_SRQ_TABLE_REQ_QUEBASE(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_QUEBASE) & M_CPL_T7_SRQ_TABLE_REQ_QUEBASE)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_CURMSN 16
+#define M_CPL_T7_SRQ_TABLE_REQ_CURMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_REQ_CURMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_CURMSN)
+#define G_CPL_T7_SRQ_TABLE_REQ_CURMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_CURMSN) & M_CPL_T7_SRQ_TABLE_REQ_CURMSN)
+
+#define S_CPL_T7_SRQ_TABLE_REQ_MAXMSN 0
+#define M_CPL_T7_SRQ_TABLE_REQ_MAXMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_REQ_MAXMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_REQ_MAXMSN)
+#define G_CPL_T7_SRQ_TABLE_REQ_MAXMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_REQ_MAXMSN) & M_CPL_T7_SRQ_TABLE_REQ_MAXMSN)
+
+struct cpl_t7_srq_table_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 status_index;
+ __be16 srqlimit_pkd;
+ __be16 cqid;
+ __be16 xdid;
+ __be16 pdid;
+ __be32 quelen_quebase;
+ __be32 curmsn_maxmsn;
+};
+
+#define S_CPL_T7_SRQ_TABLE_RPL_STATUS 24
+#define M_CPL_T7_SRQ_TABLE_RPL_STATUS 0xff
+#define V_CPL_T7_SRQ_TABLE_RPL_STATUS(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_STATUS)
+#define G_CPL_T7_SRQ_TABLE_RPL_STATUS(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_STATUS) & M_CPL_T7_SRQ_TABLE_RPL_STATUS)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_INDEX 0
+#define M_CPL_T7_SRQ_TABLE_RPL_INDEX 0x3ff
+#define V_CPL_T7_SRQ_TABLE_RPL_INDEX(x) ((x) << S_CPL_T7_SRQ_TABLE_RPL_INDEX)
+#define G_CPL_T7_SRQ_TABLE_RPL_INDEX(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_INDEX) & M_CPL_T7_SRQ_TABLE_RPL_INDEX)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT 0
+#define M_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT 0x3f
+#define V_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT)
+#define G_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT) & M_CPL_T7_SRQ_TABLE_RPL_SRQLIMIT)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_QUELEN 28
+#define M_CPL_T7_SRQ_TABLE_RPL_QUELEN 0xf
+#define V_CPL_T7_SRQ_TABLE_RPL_QUELEN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_QUELEN)
+#define G_CPL_T7_SRQ_TABLE_RPL_QUELEN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_QUELEN) & M_CPL_T7_SRQ_TABLE_RPL_QUELEN)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_QUEBASE 0
+#define M_CPL_T7_SRQ_TABLE_RPL_QUEBASE 0x3ffffff
+#define V_CPL_T7_SRQ_TABLE_RPL_QUEBASE(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_QUEBASE)
+#define G_CPL_T7_SRQ_TABLE_RPL_QUEBASE(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_QUEBASE) & M_CPL_T7_SRQ_TABLE_RPL_QUEBASE)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_CURMSN 16
+#define M_CPL_T7_SRQ_TABLE_RPL_CURMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_RPL_CURMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_CURMSN)
+#define G_CPL_T7_SRQ_TABLE_RPL_CURMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_CURMSN) & M_CPL_T7_SRQ_TABLE_RPL_CURMSN)
+
+#define S_CPL_T7_SRQ_TABLE_RPL_MAXMSN 0
+#define M_CPL_T7_SRQ_TABLE_RPL_MAXMSN 0xffff
+#define V_CPL_T7_SRQ_TABLE_RPL_MAXMSN(x) \
+ ((x) << S_CPL_T7_SRQ_TABLE_RPL_MAXMSN)
+#define G_CPL_T7_SRQ_TABLE_RPL_MAXMSN(x) \
+ (((x) >> S_CPL_T7_SRQ_TABLE_RPL_MAXMSN) & M_CPL_T7_SRQ_TABLE_RPL_MAXMSN)
+
+struct cpl_rdma_async_event {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 EventInfo;
+};
+
+#define S_CPL_RDMA_ASYNC_EVENT_EVENTTYPE 16
+#define M_CPL_RDMA_ASYNC_EVENT_EVENTTYPE 0xf
+#define V_CPL_RDMA_ASYNC_EVENT_EVENTTYPE(x) \
+ ((x) << S_CPL_RDMA_ASYNC_EVENT_EVENTTYPE)
+#define G_CPL_RDMA_ASYNC_EVENT_EVENTTYPE(x) \
+ (((x) >> S_CPL_RDMA_ASYNC_EVENT_EVENTTYPE) & \
+ M_CPL_RDMA_ASYNC_EVENT_EVENTTYPE)
+
+#define S_CPL_RDMA_ASYNC_EVENT_INDEX 0
+#define M_CPL_RDMA_ASYNC_EVENT_INDEX 0xffff
+#define V_CPL_RDMA_ASYNC_EVENT_INDEX(x) ((x) << S_CPL_RDMA_ASYNC_EVENT_INDEX)
+#define G_CPL_RDMA_ASYNC_EVENT_INDEX(x) \
+ (((x) >> S_CPL_RDMA_ASYNC_EVENT_INDEX) & M_CPL_RDMA_ASYNC_EVENT_INDEX)
+
struct cpl_smt_write_req {
WR_HDR;
union opcode_tid ot;
@@ -2479,6 +3334,118 @@ struct cpl_smt_read_rpl {
#define V_SMTW_VF_VLD(x) ((x) << S_SMTW_VF_VLD)
#define F_SMTW_VF_VLD V_SMTW_VF_VLD(1U)
+struct cpl_t7_smt_write_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 noreply_to_mtu;
+ union smt_write_req {
+ struct smt_write_req_pfvf {
+ __be64 tagvalue;
+ __be32 pfvf_smac_hi;
+ __be32 smac_lo;
+ __be64 tagext;
+ } pfvf;
+ struct smt_write_req_ipv4 {
+ __be32 srcipv4;
+ __be32 destipv4;
+ } ipv4;
+ struct smt_write_req_ipv6 {
+ __be64 ipv6ms;
+ __be64 ipv6ls;
+ } ipv6;
+ } u;
+};
+
+#define S_CPL_T7_SMT_WRITE_REQ_NOREPLY 31
+#define M_CPL_T7_SMT_WRITE_REQ_NOREPLY 0x1
+#define V_CPL_T7_SMT_WRITE_REQ_NOREPLY(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_NOREPLY)
+#define G_CPL_T7_SMT_WRITE_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_NOREPLY) & M_CPL_T7_SMT_WRITE_REQ_NOREPLY)
+#define F_CPL_T7_SMT_WRITE_REQ_NOREPLY \
+ V_CPL_T7_SMT_WRITE_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_SMT_WRITE_REQ_TAGINSERT 30
+#define M_CPL_T7_SMT_WRITE_REQ_TAGINSERT 0x1
+#define V_CPL_T7_SMT_WRITE_REQ_TAGINSERT(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_TAGINSERT)
+#define G_CPL_T7_SMT_WRITE_REQ_TAGINSERT(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_TAGINSERT) & \
+ M_CPL_T7_SMT_WRITE_REQ_TAGINSERT)
+#define F_CPL_T7_SMT_WRITE_REQ_TAGINSERT \
+ V_CPL_T7_SMT_WRITE_REQ_TAGINSERT(1U)
+
+#define S_CPL_T7_SMT_WRITE_REQ_TAGTYPE 28
+#define M_CPL_T7_SMT_WRITE_REQ_TAGTYPE 0x3
+#define V_CPL_T7_SMT_WRITE_REQ_TAGTYPE(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_TAGTYPE)
+#define G_CPL_T7_SMT_WRITE_REQ_TAGTYPE(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_TAGTYPE) & M_CPL_T7_SMT_WRITE_REQ_TAGTYPE)
+
+#define S_CPL_T7_SMT_WRITE_REQ_INDEX 20
+#define M_CPL_T7_SMT_WRITE_REQ_INDEX 0xff
+#define V_CPL_T7_SMT_WRITE_REQ_INDEX(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_INDEX)
+#define G_CPL_T7_SMT_WRITE_REQ_INDEX(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_INDEX) & M_CPL_T7_SMT_WRITE_REQ_INDEX)
+
+#define S_CPL_T7_SMT_WRITE_REQ_OVLAN 16
+#define M_CPL_T7_SMT_WRITE_REQ_OVLAN 0xf
+#define V_CPL_T7_SMT_WRITE_REQ_OVLAN(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_OVLAN)
+#define G_CPL_T7_SMT_WRITE_REQ_OVLAN(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_OVLAN) & M_CPL_T7_SMT_WRITE_REQ_OVLAN)
+
+#define S_CPL_T7_SMT_WRITE_REQ_IPSEC 14
+#define M_CPL_T7_SMT_WRITE_REQ_IPSEC 0x1
+#define V_CPL_T7_SMT_WRITE_REQ_IPSEC(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_IPSEC)
+#define G_CPL_T7_SMT_WRITE_REQ_IPSEC(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_IPSEC) & M_CPL_T7_SMT_WRITE_REQ_IPSEC)
+#define F_CPL_T7_SMT_WRITE_REQ_IPSEC V_CPL_T7_SMT_WRITE_REQ_IPSEC(1U)
+
+#define S_CPL_T7_SMT_WRITE_REQ_MTU 0
+#define M_CPL_T7_SMT_WRITE_REQ_MTU 0x3fff
+#define V_CPL_T7_SMT_WRITE_REQ_MTU(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_MTU)
+#define G_CPL_T7_SMT_WRITE_REQ_MTU(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_MTU) & M_CPL_T7_SMT_WRITE_REQ_MTU)
+
+#define S_CPL_T7_SMT_WRITE_REQ_PFVF 16
+#define M_CPL_T7_SMT_WRITE_REQ_PFVF 0xfff
+#define V_CPL_T7_SMT_WRITE_REQ_PFVF(x) ((x) << S_CPL_T7_SMT_WRITE_REQ_PFVF)
+#define G_CPL_T7_SMT_WRITE_REQ_PFVF(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_PFVF) & M_CPL_T7_SMT_WRITE_REQ_PFVF)
+
+#define S_CPL_T7_SMT_WRITE_REQ_SMAC_HI 0
+#define M_CPL_T7_SMT_WRITE_REQ_SMAC_HI 0xffff
+#define V_CPL_T7_SMT_WRITE_REQ_SMAC_HI(x) \
+ ((x) << S_CPL_T7_SMT_WRITE_REQ_SMAC_HI)
+#define G_CPL_T7_SMT_WRITE_REQ_SMAC_HI(x) \
+ (((x) >> S_CPL_T7_SMT_WRITE_REQ_SMAC_HI) & M_CPL_T7_SMT_WRITE_REQ_SMAC_HI)
+
+struct cpl_t7_smt_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 index_to_ipsecidx;
+};
+
+#define S_CPL_T7_SMT_READ_REQ_INDEX 20
+#define M_CPL_T7_SMT_READ_REQ_INDEX 0xff
+#define V_CPL_T7_SMT_READ_REQ_INDEX(x) ((x) << S_CPL_T7_SMT_READ_REQ_INDEX)
+#define G_CPL_T7_SMT_READ_REQ_INDEX(x) \
+ (((x) >> S_CPL_SMT_READ_REQ_INDEX) & M_CPL_T7_SMT_READ_REQ_INDEX)
+
+#define S_CPL_T7_SMT_READ_REQ_IPSEC 14
+#define M_CPL_T7_SMT_READ_REQ_IPSEC 0x1
+#define V_CPL_T7_SMT_READ_REQ_IPSEC(x) ((x) << S_CPL_T7_SMT_READ_REQ_IPSEC)
+#define G_CPL_T7_SMT_READ_REQ_IPSEC(x) \
+ (((x) >> S_CPL_T7_SMT_READ_REQ_IPSEC) & M_CPL_T7_SMT_READ_REQ_IPSEC)
+#define F_CPL_T7_SMT_READ_REQ_IPSEC V_CPL_T7_SMT_READ_REQ_IPSEC(1U)
+
+#define S_CPL_T7_SMT_READ_REQ_IPSECIDX 0
+#define M_CPL_T7_SMT_READ_REQ_IPSECIDX 0x1fff
+#define V_CPL_T7_SMT_READ_REQ_IPSECIDX(x) \
+ ((x) << S_CPL_T7_SMT_READ_REQ_IPSECIDX)
+#define G_CPL_T7_SMT_READ_REQ_IPSECIDX(x) \
+ (((x) >> S_CPL_T7_SMT_READ_REQ_IPSECIDX) & M_CPL_T7_SMT_READ_REQ_IPSECIDX)
+
struct cpl_tag_write_req {
WR_HDR;
union opcode_tid ot;
@@ -2611,6 +3578,352 @@ struct cpl_pkt_notify {
#define V_NTFY_T5_ETHHDR_LEN(x) ((x) << S_NTFY_T5_ETHHDR_LEN)
#define G_NTFY_T5_ETHHDR_LEN(x) (((x) >> S_NTFY_T5_ETHHDR_LEN) & M_NTFY_T5_ETHHDR_LEN)
+struct cpl_t7_pkt_notify {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r1;
+ __be16 length;
+ __be32 ethhdrlen_to_macindex;
+ __be32 lineinfo;
+};
+
+#define S_CPL_T7_PKT_NOTIFY_ETHHDRLEN 24
+#define M_CPL_T7_PKT_NOTIFY_ETHHDRLEN 0xff
+#define V_CPL_T7_PKT_NOTIFY_ETHHDRLEN(x) \
+ ((x) << S_CPL_T7_PKT_NOTIFY_ETHHDRLEN)
+#define G_CPL_T7_PKT_NOTIFY_ETHHDRLEN(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_ETHHDRLEN) & M_CPL_T7_PKT_NOTIFY_ETHHDRLEN)
+
+#define S_CPL_T7_PKT_NOTIFY_IPHDRLEN 18
+#define M_CPL_T7_PKT_NOTIFY_IPHDRLEN 0x3f
+#define V_CPL_T7_PKT_NOTIFY_IPHDRLEN(x) ((x) << S_CPL_T7_PKT_NOTIFY_IPHDRLEN)
+#define G_CPL_T7_PKT_NOTIFY_IPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_IPHDRLEN) & M_CPL_T7_PKT_NOTIFY_IPHDRLEN)
+
+#define S_CPL_T7_PKT_NOTIFY_TCPHDRLEN 14
+#define M_CPL_T7_PKT_NOTIFY_TCPHDRLEN 0xf
+#define V_CPL_T7_PKT_NOTIFY_TCPHDRLEN(x) \
+ ((x) << S_CPL_T7_PKT_NOTIFY_TCPHDRLEN)
+#define G_CPL_T7_PKT_NOTIFY_TCPHDRLEN(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_TCPHDRLEN) & M_CPL_T7_PKT_NOTIFY_TCPHDRLEN)
+
+#define S_CPL_T7_PKT_NOTIFY_INTERFACE 10
+#define M_CPL_T7_PKT_NOTIFY_INTERFACE 0xf
+#define V_CPL_T7_PKT_NOTIFY_INTERFACE(x) \
+ ((x) << S_CPL_T7_PKT_NOTIFY_INTERFACE)
+#define G_CPL_T7_PKT_NOTIFY_INTERFACE(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_INTERFACE) & M_CPL_T7_PKT_NOTIFY_INTERFACE)
+
+#define S_CPL_T7_PKT_NOTIFY_MACINDEX 0
+#define M_CPL_T7_PKT_NOTIFY_MACINDEX 0x1ff
+#define V_CPL_T7_PKT_NOTIFY_MACINDEX(x) ((x) << S_CPL_T7_PKT_NOTIFY_MACINDEX)
+#define G_CPL_T7_PKT_NOTIFY_MACINDEX(x) \
+ (((x) >> S_CPL_T7_PKT_NOTIFY_MACINDEX) & M_CPL_T7_PKT_NOTIFY_MACINDEX)
+
+struct cpl_rdma_cqe {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+};
+
+#define S_CPL_RDMA_CQE_RSSCTRL 16
+#define M_CPL_RDMA_CQE_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_RSSCTRL)
+#define G_CPL_RDMA_CQE_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_RSSCTRL) & M_CPL_RDMA_CQE_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_CQID 0
+#define M_CPL_RDMA_CQE_CQID 0xffff
+#define V_CPL_RDMA_CQE_CQID(x) ((x) << S_CPL_RDMA_CQE_CQID)
+#define G_CPL_RDMA_CQE_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_CQID) & M_CPL_RDMA_CQE_CQID)
+
+#define S_CPL_RDMA_CQE_TID 8
+#define M_CPL_RDMA_CQE_TID 0xfffff
+#define V_CPL_RDMA_CQE_TID(x) ((x) << S_CPL_RDMA_CQE_TID)
+#define G_CPL_RDMA_CQE_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_TID) & M_CPL_RDMA_CQE_TID)
+
+#define S_CPL_RDMA_CQE_FLITCNT 0
+#define M_CPL_RDMA_CQE_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_FLITCNT)
+#define G_CPL_RDMA_CQE_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FLITCNT) & M_CPL_RDMA_CQE_FLITCNT)
+
+#define S_CPL_RDMA_CQE_QPID 12
+#define M_CPL_RDMA_CQE_QPID 0xfffff
+#define V_CPL_RDMA_CQE_QPID(x) ((x) << S_CPL_RDMA_CQE_QPID)
+#define G_CPL_RDMA_CQE_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_QPID) & M_CPL_RDMA_CQE_QPID)
+
+#define S_CPL_RDMA_CQE_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_GENERATION_BIT) & M_CPL_RDMA_CQE_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_GENERATION_BIT V_CPL_RDMA_CQE_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_STATUS 5
+#define M_CPL_RDMA_CQE_STATUS 0x1f
+#define V_CPL_RDMA_CQE_STATUS(x) ((x) << S_CPL_RDMA_CQE_STATUS)
+#define G_CPL_RDMA_CQE_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_STATUS) & M_CPL_RDMA_CQE_STATUS)
+
+#define S_CPL_RDMA_CQE_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_CQE_TYPE)
+#define G_CPL_RDMA_CQE_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_CQE_TYPE) & M_CPL_RDMA_CQE_CQE_TYPE)
+#define F_CPL_RDMA_CQE_CQE_TYPE V_CPL_RDMA_CQE_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_WR_TYPE 0
+#define M_CPL_RDMA_CQE_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_WR_TYPE)
+#define G_CPL_RDMA_CQE_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_WR_TYPE) & M_CPL_RDMA_CQE_WR_TYPE)
+
+struct cpl_rdma_cqe_srq {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 r3;
+ __be32 rqe;
+};
+
+#define S_CPL_RDMA_CQE_SRQ_OPCODE 24
+#define M_CPL_RDMA_CQE_SRQ_OPCODE 0xff
+#define V_CPL_RDMA_CQE_SRQ_OPCODE(x) ((x) << S_CPL_RDMA_CQE_SRQ_OPCODE)
+#define G_CPL_RDMA_CQE_SRQ_OPCODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_OPCODE) & M_CPL_RDMA_CQE_SRQ_OPCODE)
+
+#define S_CPL_RDMA_CQE_SRQ_RSSCTRL 16
+#define M_CPL_RDMA_CQE_SRQ_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_SRQ_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_SRQ_RSSCTRL)
+#define G_CPL_RDMA_CQE_SRQ_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_RSSCTRL) & M_CPL_RDMA_CQE_SRQ_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_SRQ_CQID 0
+#define M_CPL_RDMA_CQE_SRQ_CQID 0xffff
+#define V_CPL_RDMA_CQE_SRQ_CQID(x) ((x) << S_CPL_RDMA_CQE_SRQ_CQID)
+#define G_CPL_RDMA_CQE_SRQ_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_CQID) & M_CPL_RDMA_CQE_SRQ_CQID)
+
+#define S_CPL_RDMA_CQE_SRQ_TID 8
+#define M_CPL_RDMA_CQE_SRQ_TID 0xfffff
+#define V_CPL_RDMA_CQE_SRQ_TID(x) ((x) << S_CPL_RDMA_CQE_SRQ_TID)
+#define G_CPL_RDMA_CQE_SRQ_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_TID) & M_CPL_RDMA_CQE_SRQ_TID)
+
+#define S_CPL_RDMA_CQE_SRQ_FLITCNT 0
+#define M_CPL_RDMA_CQE_SRQ_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_SRQ_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_SRQ_FLITCNT)
+#define G_CPL_RDMA_CQE_SRQ_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_FLITCNT) & M_CPL_RDMA_CQE_SRQ_FLITCNT)
+
+#define S_CPL_RDMA_CQE_SRQ_QPID 12
+#define M_CPL_RDMA_CQE_SRQ_QPID 0xfffff
+#define V_CPL_RDMA_CQE_SRQ_QPID(x) ((x) << S_CPL_RDMA_CQE_SRQ_QPID)
+#define G_CPL_RDMA_CQE_SRQ_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_QPID) & M_CPL_RDMA_CQE_SRQ_QPID)
+
+#define S_CPL_RDMA_CQE_SRQ_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_SRQ_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_SRQ_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_SRQ_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_SRQ_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_SRQ_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_SRQ_GENERATION_BIT \
+ V_CPL_RDMA_CQE_SRQ_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_SRQ_STATUS 5
+#define M_CPL_RDMA_CQE_SRQ_STATUS 0x1f
+#define V_CPL_RDMA_CQE_SRQ_STATUS(x) ((x) << S_CPL_RDMA_CQE_SRQ_STATUS)
+#define G_CPL_RDMA_CQE_SRQ_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_STATUS) & M_CPL_RDMA_CQE_SRQ_STATUS)
+
+#define S_CPL_RDMA_CQE_SRQ_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_SRQ_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_SRQ_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_SRQ_CQE_TYPE)
+#define G_CPL_RDMA_CQE_SRQ_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_CQE_TYPE) & M_CPL_RDMA_CQE_SRQ_CQE_TYPE)
+#define F_CPL_RDMA_CQE_SRQ_CQE_TYPE V_CPL_RDMA_CQE_SRQ_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_SRQ_WR_TYPE 0
+#define M_CPL_RDMA_CQE_SRQ_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_SRQ_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_SRQ_WR_TYPE)
+#define G_CPL_RDMA_CQE_SRQ_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_SRQ_WR_TYPE) & M_CPL_RDMA_CQE_SRQ_WR_TYPE)
+
+struct cpl_rdma_cqe_read_rsp {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+};
+
+#define S_CPL_RDMA_CQE_READ_RSP_RSSCTRL 16
+#define M_CPL_RDMA_CQE_READ_RSP_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_READ_RSP_RSSCTRL(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_RSSCTRL)
+#define G_CPL_RDMA_CQE_READ_RSP_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_RSSCTRL) & \
+ M_CPL_RDMA_CQE_READ_RSP_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_READ_RSP_CQID 0
+#define M_CPL_RDMA_CQE_READ_RSP_CQID 0xffff
+#define V_CPL_RDMA_CQE_READ_RSP_CQID(x) ((x) << S_CPL_RDMA_CQE_READ_RSP_CQID)
+#define G_CPL_RDMA_CQE_READ_RSP_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_CQID) & M_CPL_RDMA_CQE_READ_RSP_CQID)
+
+#define S_CPL_RDMA_CQE_READ_RSP_TID 8
+#define M_CPL_RDMA_CQE_READ_RSP_TID 0xfffff
+#define V_CPL_RDMA_CQE_READ_RSP_TID(x) ((x) << S_CPL_RDMA_CQE_READ_RSP_TID)
+#define G_CPL_RDMA_CQE_READ_RSP_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_TID) & M_CPL_RDMA_CQE_READ_RSP_TID)
+
+#define S_CPL_RDMA_CQE_READ_RSP_FLITCNT 0
+#define M_CPL_RDMA_CQE_READ_RSP_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_READ_RSP_FLITCNT(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_FLITCNT)
+#define G_CPL_RDMA_CQE_READ_RSP_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_FLITCNT) & \
+ M_CPL_RDMA_CQE_READ_RSP_FLITCNT)
+
+#define S_CPL_RDMA_CQE_READ_RSP_QPID 12
+#define M_CPL_RDMA_CQE_READ_RSP_QPID 0xfffff
+#define V_CPL_RDMA_CQE_READ_RSP_QPID(x) ((x) << S_CPL_RDMA_CQE_READ_RSP_QPID)
+#define G_CPL_RDMA_CQE_READ_RSP_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_QPID) & M_CPL_RDMA_CQE_READ_RSP_QPID)
+
+#define S_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT \
+ V_CPL_RDMA_CQE_READ_RSP_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_READ_RSP_STATUS 5
+#define M_CPL_RDMA_CQE_READ_RSP_STATUS 0x1f
+#define V_CPL_RDMA_CQE_READ_RSP_STATUS(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_STATUS)
+#define G_CPL_RDMA_CQE_READ_RSP_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_STATUS) & M_CPL_RDMA_CQE_READ_RSP_STATUS)
+
+#define S_CPL_RDMA_CQE_READ_RSP_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_READ_RSP_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_READ_RSP_CQE_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_CQE_TYPE)
+#define G_CPL_RDMA_CQE_READ_RSP_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_CQE_TYPE) & \
+ M_CPL_RDMA_CQE_READ_RSP_CQE_TYPE)
+#define F_CPL_RDMA_CQE_READ_RSP_CQE_TYPE V_CPL_RDMA_CQE_READ_RSP_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_READ_RSP_WR_TYPE 0
+#define M_CPL_RDMA_CQE_READ_RSP_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_READ_RSP_WR_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_READ_RSP_WR_TYPE)
+#define G_CPL_RDMA_CQE_READ_RSP_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_READ_RSP_WR_TYPE) & \
+ M_CPL_RDMA_CQE_READ_RSP_WR_TYPE)
+
+struct cpl_rdma_cqe_err {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+};
+
+#define S_CPL_RDMA_CQE_ERR_RSSCTRL 16
+#define M_CPL_RDMA_CQE_ERR_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_ERR_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_ERR_RSSCTRL)
+#define G_CPL_RDMA_CQE_ERR_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_RSSCTRL) & M_CPL_RDMA_CQE_ERR_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_ERR_CQID 0
+#define M_CPL_RDMA_CQE_ERR_CQID 0xffff
+#define V_CPL_RDMA_CQE_ERR_CQID(x) ((x) << S_CPL_RDMA_CQE_ERR_CQID)
+#define G_CPL_RDMA_CQE_ERR_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_CQID) & M_CPL_RDMA_CQE_ERR_CQID)
+
+#define S_CPL_RDMA_CQE_ERR_TID 8
+#define M_CPL_RDMA_CQE_ERR_TID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_TID(x) ((x) << S_CPL_RDMA_CQE_ERR_TID)
+#define G_CPL_RDMA_CQE_ERR_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_TID) & M_CPL_RDMA_CQE_ERR_TID)
+
+#define S_CPL_RDMA_CQE_ERR_FLITCNT 0
+#define M_CPL_RDMA_CQE_ERR_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_ERR_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_ERR_FLITCNT)
+#define G_CPL_RDMA_CQE_ERR_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_FLITCNT) & M_CPL_RDMA_CQE_ERR_FLITCNT)
+
+#define S_CPL_RDMA_CQE_ERR_QPID 12
+#define M_CPL_RDMA_CQE_ERR_QPID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_QPID(x) ((x) << S_CPL_RDMA_CQE_ERR_QPID)
+#define G_CPL_RDMA_CQE_ERR_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_QPID) & M_CPL_RDMA_CQE_ERR_QPID)
+
+#define S_CPL_RDMA_CQE_ERR_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_ERR_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_ERR_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_ERR_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_ERR_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_ERR_GENERATION_BIT \
+ V_CPL_RDMA_CQE_ERR_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_ERR_STATUS 5
+#define M_CPL_RDMA_CQE_ERR_STATUS 0x1f
+#define V_CPL_RDMA_CQE_ERR_STATUS(x) ((x) << S_CPL_RDMA_CQE_ERR_STATUS)
+#define G_CPL_RDMA_CQE_ERR_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_STATUS) & M_CPL_RDMA_CQE_ERR_STATUS)
+
+#define S_CPL_RDMA_CQE_ERR_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_ERR_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_ERR_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_ERR_CQE_TYPE)
+#define G_CPL_RDMA_CQE_ERR_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_CQE_TYPE) & M_CPL_RDMA_CQE_ERR_CQE_TYPE)
+#define F_CPL_RDMA_CQE_ERR_CQE_TYPE V_CPL_RDMA_CQE_ERR_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_WR_TYPE 0
+#define M_CPL_RDMA_CQE_ERR_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_ERR_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_ERR_WR_TYPE)
+#define G_CPL_RDMA_CQE_ERR_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_WR_TYPE) & M_CPL_RDMA_CQE_ERR_WR_TYPE)
+
+struct cpl_rdma_read_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 srq_pkd;
+ __be16 length;
+};
+
+#define S_CPL_RDMA_READ_REQ_SRQ 0
+#define M_CPL_RDMA_READ_REQ_SRQ 0xfff
+#define V_CPL_RDMA_READ_REQ_SRQ(x) ((x) << S_CPL_RDMA_READ_REQ_SRQ)
+#define G_CPL_RDMA_READ_REQ_SRQ(x) \
+ (((x) >> S_CPL_RDMA_READ_REQ_SRQ) & M_CPL_RDMA_READ_REQ_SRQ)
+
struct cpl_rdma_terminate {
RSS_HDR
union opcode_tid ot;
@@ -2618,6 +3931,404 @@ struct cpl_rdma_terminate {
__be16 len;
};
+struct cpl_rdma_atomic_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 opcode_srq;
+ __be16 length;
+};
+
+#define S_CPL_RDMA_ATOMIC_REQ_OPCODE 12
+#define M_CPL_RDMA_ATOMIC_REQ_OPCODE 0xf
+#define V_CPL_RDMA_ATOMIC_REQ_OPCODE(x) ((x) << S_CPL_RDMA_ATOMIC_REQ_OPCODE)
+#define G_CPL_RDMA_ATOMIC_REQ_OPCODE(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_REQ_OPCODE) & M_CPL_RDMA_ATOMIC_REQ_OPCODE)
+
+#define S_CPL_RDMA_ATOMIC_REQ_SRQ 0
+#define M_CPL_RDMA_ATOMIC_REQ_SRQ 0xfff
+#define V_CPL_RDMA_ATOMIC_REQ_SRQ(x) ((x) << S_CPL_RDMA_ATOMIC_REQ_SRQ)
+#define G_CPL_RDMA_ATOMIC_REQ_SRQ(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_REQ_SRQ) & M_CPL_RDMA_ATOMIC_REQ_SRQ)
+
+struct cpl_rdma_atomic_rpl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 opcode_srq;
+ __be16 length;
+};
+
+#define S_CPL_RDMA_ATOMIC_RPL_OPCODE 12
+#define M_CPL_RDMA_ATOMIC_RPL_OPCODE 0xf
+#define V_CPL_RDMA_ATOMIC_RPL_OPCODE(x) ((x) << S_CPL_RDMA_ATOMIC_RPL_OPCODE)
+#define G_CPL_RDMA_ATOMIC_RPL_OPCODE(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_RPL_OPCODE) & M_CPL_RDMA_ATOMIC_RPL_OPCODE)
+
+#define S_CPL_RDMA_ATOMIC_RPL_SRQ 0
+#define M_CPL_RDMA_ATOMIC_RPL_SRQ 0xfff
+#define V_CPL_RDMA_ATOMIC_RPL_SRQ(x) ((x) << S_CPL_RDMA_ATOMIC_RPL_SRQ)
+#define G_CPL_RDMA_ATOMIC_RPL_SRQ(x) \
+ (((x) >> S_CPL_RDMA_ATOMIC_RPL_SRQ) & M_CPL_RDMA_ATOMIC_RPL_SRQ)
+
+struct cpl_rdma_imm_data {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r;
+ __be16 Length;
+};
+
+struct cpl_rdma_imm_data_se {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r;
+ __be16 Length;
+};
+
+struct cpl_rdma_inv_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 stag;
+ __be32 cqid_pdid_hi;
+ __be32 pdid_lo_qpid;
+};
+
+#define S_CPL_RDMA_INV_REQ_CQID 8
+#define M_CPL_RDMA_INV_REQ_CQID 0xfffff
+#define V_CPL_RDMA_INV_REQ_CQID(x) ((x) << S_CPL_RDMA_INV_REQ_CQID)
+#define G_CPL_RDMA_INV_REQ_CQID(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_CQID) & M_CPL_RDMA_INV_REQ_CQID)
+
+#define S_CPL_RDMA_INV_REQ_PDID_HI 0
+#define M_CPL_RDMA_INV_REQ_PDID_HI 0xff
+#define V_CPL_RDMA_INV_REQ_PDID_HI(x) ((x) << S_CPL_RDMA_INV_REQ_PDID_HI)
+#define G_CPL_RDMA_INV_REQ_PDID_HI(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_PDID_HI) & M_CPL_RDMA_INV_REQ_PDID_HI)
+
+#define S_CPL_RDMA_INV_REQ_PDID_LO 20
+#define M_CPL_RDMA_INV_REQ_PDID_LO 0xfff
+#define V_CPL_RDMA_INV_REQ_PDID_LO(x) ((x) << S_CPL_RDMA_INV_REQ_PDID_LO)
+#define G_CPL_RDMA_INV_REQ_PDID_LO(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_PDID_LO) & M_CPL_RDMA_INV_REQ_PDID_LO)
+
+#define S_CPL_RDMA_INV_REQ_QPID 0
+#define M_CPL_RDMA_INV_REQ_QPID 0xfffff
+#define V_CPL_RDMA_INV_REQ_QPID(x) ((x) << S_CPL_RDMA_INV_REQ_QPID)
+#define G_CPL_RDMA_INV_REQ_QPID(x) \
+ (((x) >> S_CPL_RDMA_INV_REQ_QPID) & M_CPL_RDMA_INV_REQ_QPID)
+
+struct cpl_rdma_cqe_ext {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_RDMA_CQE_EXT_RSSCTRL 16
+#define M_CPL_RDMA_CQE_EXT_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_EXT_RSSCTRL(x) ((x) << S_CPL_RDMA_CQE_EXT_RSSCTRL)
+#define G_CPL_RDMA_CQE_EXT_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_RSSCTRL) & M_CPL_RDMA_CQE_EXT_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_EXT_CQID 0
+#define M_CPL_RDMA_CQE_EXT_CQID 0xffff
+#define V_CPL_RDMA_CQE_EXT_CQID(x) ((x) << S_CPL_RDMA_CQE_EXT_CQID)
+#define G_CPL_RDMA_CQE_EXT_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_CQID) & M_CPL_RDMA_CQE_EXT_CQID)
+
+#define S_CPL_RDMA_CQE_EXT_TID 8
+#define M_CPL_RDMA_CQE_EXT_TID 0xfffff
+#define V_CPL_RDMA_CQE_EXT_TID(x) ((x) << S_CPL_RDMA_CQE_EXT_TID)
+#define G_CPL_RDMA_CQE_EXT_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_TID) & M_CPL_RDMA_CQE_EXT_TID)
+
+#define S_CPL_RDMA_CQE_EXT_FLITCNT 0
+#define M_CPL_RDMA_CQE_EXT_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_EXT_FLITCNT(x) ((x) << S_CPL_RDMA_CQE_EXT_FLITCNT)
+#define G_CPL_RDMA_CQE_EXT_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_FLITCNT) & M_CPL_RDMA_CQE_EXT_FLITCNT)
+
+#define S_CPL_RDMA_CQE_EXT_QPID 12
+#define M_CPL_RDMA_CQE_EXT_QPID 0xfffff
+#define V_CPL_RDMA_CQE_EXT_QPID(x) ((x) << S_CPL_RDMA_CQE_EXT_QPID)
+#define G_CPL_RDMA_CQE_EXT_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_QPID) & M_CPL_RDMA_CQE_EXT_QPID)
+
+#define S_CPL_RDMA_CQE_EXT_EXTMODE 11
+#define M_CPL_RDMA_CQE_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_EXT_EXTMODE(x) ((x) << S_CPL_RDMA_CQE_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_EXTMODE) & M_CPL_RDMA_CQE_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_EXT_EXTMODE V_CPL_RDMA_CQE_EXT_EXTMODE(1U)
+
+#define S_CPL_RDMA_CQE_EXT_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_EXT_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_EXT_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_EXT_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_EXT_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_EXT_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_EXT_GENERATION_BIT \
+ V_CPL_RDMA_CQE_EXT_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_EXT_STATUS 5
+#define M_CPL_RDMA_CQE_EXT_STATUS 0x1f
+#define V_CPL_RDMA_CQE_EXT_STATUS(x) ((x) << S_CPL_RDMA_CQE_EXT_STATUS)
+#define G_CPL_RDMA_CQE_EXT_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_STATUS) & M_CPL_RDMA_CQE_EXT_STATUS)
+
+#define S_CPL_RDMA_CQE_EXT_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_EXT_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_EXT_CQE_TYPE(x) ((x) << S_CPL_RDMA_CQE_EXT_CQE_TYPE)
+#define G_CPL_RDMA_CQE_EXT_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_CQE_TYPE) & M_CPL_RDMA_CQE_EXT_CQE_TYPE)
+#define F_CPL_RDMA_CQE_EXT_CQE_TYPE V_CPL_RDMA_CQE_EXT_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_EXT_WR_TYPE 0
+#define M_CPL_RDMA_CQE_EXT_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_EXT_WR_TYPE(x) ((x) << S_CPL_RDMA_CQE_EXT_WR_TYPE)
+#define G_CPL_RDMA_CQE_EXT_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_WR_TYPE) & M_CPL_RDMA_CQE_EXT_WR_TYPE)
+
+#define S_CPL_RDMA_CQE_EXT_SE 31
+#define M_CPL_RDMA_CQE_EXT_SE 0x1
+#define V_CPL_RDMA_CQE_EXT_SE(x) ((x) << S_CPL_RDMA_CQE_EXT_SE)
+#define G_CPL_RDMA_CQE_EXT_SE(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_SE) & M_CPL_RDMA_CQE_EXT_SE)
+#define F_CPL_RDMA_CQE_EXT_SE V_CPL_RDMA_CQE_EXT_SE(1U)
+
+#define S_CPL_RDMA_CQE_EXT_WR_TYPE_EXT 24
+#define M_CPL_RDMA_CQE_EXT_WR_TYPE_EXT 0x7f
+#define V_CPL_RDMA_CQE_EXT_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_RDMA_CQE_EXT_WR_TYPE_EXT)
+#define G_CPL_RDMA_CQE_EXT_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_WR_TYPE_EXT) & M_CPL_RDMA_CQE_EXT_WR_TYPE_EXT)
+
+#define S_CPL_RDMA_CQE_EXT_SRQ 0
+#define M_CPL_RDMA_CQE_EXT_SRQ 0xfff
+#define V_CPL_RDMA_CQE_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_EXT_SRQ)
+#define G_CPL_RDMA_CQE_EXT_SRQ(x) \
+ (((x) >> S_CPL_RDMA_CQE_EXT_SRQ) & M_CPL_RDMA_CQE_EXT_SRQ)
+
+struct cpl_rdma_cqe_fw_ext {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_RDMA_CQE_FW_EXT_RSSCTRL 16
+#define M_CPL_RDMA_CQE_FW_EXT_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_FW_EXT_RSSCTRL(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_RSSCTRL)
+#define G_CPL_RDMA_CQE_FW_EXT_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_RSSCTRL) & M_CPL_RDMA_CQE_FW_EXT_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_FW_EXT_CQID 0
+#define M_CPL_RDMA_CQE_FW_EXT_CQID 0xffff
+#define V_CPL_RDMA_CQE_FW_EXT_CQID(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_CQID)
+#define G_CPL_RDMA_CQE_FW_EXT_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_CQID) & M_CPL_RDMA_CQE_FW_EXT_CQID)
+
+#define S_CPL_RDMA_CQE_FW_EXT_TID 8
+#define M_CPL_RDMA_CQE_FW_EXT_TID 0xfffff
+#define V_CPL_RDMA_CQE_FW_EXT_TID(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_TID)
+#define G_CPL_RDMA_CQE_FW_EXT_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_TID) & M_CPL_RDMA_CQE_FW_EXT_TID)
+
+#define S_CPL_RDMA_CQE_FW_EXT_FLITCNT 0
+#define M_CPL_RDMA_CQE_FW_EXT_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_FW_EXT_FLITCNT(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_FLITCNT)
+#define G_CPL_RDMA_CQE_FW_EXT_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_FLITCNT) & M_CPL_RDMA_CQE_FW_EXT_FLITCNT)
+
+#define S_CPL_RDMA_CQE_FW_EXT_QPID 12
+#define M_CPL_RDMA_CQE_FW_EXT_QPID 0xfffff
+#define V_CPL_RDMA_CQE_FW_EXT_QPID(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_QPID)
+#define G_CPL_RDMA_CQE_FW_EXT_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_QPID) & M_CPL_RDMA_CQE_FW_EXT_QPID)
+
+#define S_CPL_RDMA_CQE_FW_EXT_EXTMODE 11
+#define M_CPL_RDMA_CQE_FW_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_FW_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_EXTMODE) & M_CPL_RDMA_CQE_FW_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_FW_EXT_EXTMODE V_CPL_RDMA_CQE_FW_EXT_EXTMODE(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT \
+ V_CPL_RDMA_CQE_FW_EXT_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_STATUS 5
+#define M_CPL_RDMA_CQE_FW_EXT_STATUS 0x1f
+#define V_CPL_RDMA_CQE_FW_EXT_STATUS(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_STATUS)
+#define G_CPL_RDMA_CQE_FW_EXT_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_STATUS) & M_CPL_RDMA_CQE_FW_EXT_STATUS)
+
+#define S_CPL_RDMA_CQE_FW_EXT_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_FW_EXT_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_CQE_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_CQE_TYPE)
+#define G_CPL_RDMA_CQE_FW_EXT_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_CQE_TYPE) & M_CPL_RDMA_CQE_FW_EXT_CQE_TYPE)
+#define F_CPL_RDMA_CQE_FW_EXT_CQE_TYPE V_CPL_RDMA_CQE_FW_EXT_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_WR_TYPE 0
+#define M_CPL_RDMA_CQE_FW_EXT_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_FW_EXT_WR_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_WR_TYPE)
+#define G_CPL_RDMA_CQE_FW_EXT_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_WR_TYPE) & M_CPL_RDMA_CQE_FW_EXT_WR_TYPE)
+
+#define S_CPL_RDMA_CQE_FW_EXT_SE 31
+#define M_CPL_RDMA_CQE_FW_EXT_SE 0x1
+#define V_CPL_RDMA_CQE_FW_EXT_SE(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_SE)
+#define G_CPL_RDMA_CQE_FW_EXT_SE(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_SE) & M_CPL_RDMA_CQE_FW_EXT_SE)
+#define F_CPL_RDMA_CQE_FW_EXT_SE V_CPL_RDMA_CQE_FW_EXT_SE(1U)
+
+#define S_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT 24
+#define M_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT 0x7f
+#define V_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT)
+#define G_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT) & \
+ M_CPL_RDMA_CQE_FW_EXT_WR_TYPE_EXT)
+
+#define S_CPL_RDMA_CQE_FW_EXT_SRQ 0
+#define M_CPL_RDMA_CQE_FW_EXT_SRQ 0xfff
+#define V_CPL_RDMA_CQE_FW_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_FW_EXT_SRQ)
+#define G_CPL_RDMA_CQE_FW_EXT_SRQ(x) \
+ (((x) >> S_CPL_RDMA_CQE_FW_EXT_SRQ) & M_CPL_RDMA_CQE_FW_EXT_SRQ)
+
+struct cpl_rdma_cqe_err_ext {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_RDMA_CQE_ERR_EXT_RSSCTRL 16
+#define M_CPL_RDMA_CQE_ERR_EXT_RSSCTRL 0xff
+#define V_CPL_RDMA_CQE_ERR_EXT_RSSCTRL(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_RSSCTRL)
+#define G_CPL_RDMA_CQE_ERR_EXT_RSSCTRL(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_RSSCTRL) & M_CPL_RDMA_CQE_ERR_EXT_RSSCTRL)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_CQID 0
+#define M_CPL_RDMA_CQE_ERR_EXT_CQID 0xffff
+#define V_CPL_RDMA_CQE_ERR_EXT_CQID(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_CQID)
+#define G_CPL_RDMA_CQE_ERR_EXT_CQID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_CQID) & M_CPL_RDMA_CQE_ERR_EXT_CQID)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_TID 8
+#define M_CPL_RDMA_CQE_ERR_EXT_TID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_EXT_TID(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_TID)
+#define G_CPL_RDMA_CQE_ERR_EXT_TID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_TID) & M_CPL_RDMA_CQE_ERR_EXT_TID)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_FLITCNT 0
+#define M_CPL_RDMA_CQE_ERR_EXT_FLITCNT 0xff
+#define V_CPL_RDMA_CQE_ERR_EXT_FLITCNT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_FLITCNT)
+#define G_CPL_RDMA_CQE_ERR_EXT_FLITCNT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_FLITCNT) & M_CPL_RDMA_CQE_ERR_EXT_FLITCNT)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_QPID 12
+#define M_CPL_RDMA_CQE_ERR_EXT_QPID 0xfffff
+#define V_CPL_RDMA_CQE_ERR_EXT_QPID(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_QPID)
+#define G_CPL_RDMA_CQE_ERR_EXT_QPID(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_QPID) & M_CPL_RDMA_CQE_ERR_EXT_QPID)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_EXTMODE 11
+#define M_CPL_RDMA_CQE_ERR_EXT_EXTMODE 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
+#define G_CPL_RDMA_CQE_ERR_EXT_EXTMODE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_EXTMODE) & M_CPL_RDMA_CQE_ERR_EXT_EXTMODE)
+#define F_CPL_RDMA_CQE_ERR_EXT_EXTMODE V_CPL_RDMA_CQE_ERR_EXT_EXTMODE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT 10
+#define M_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT)
+#define G_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT) & \
+ M_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT)
+#define F_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT \
+ V_CPL_RDMA_CQE_ERR_EXT_GENERATION_BIT(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_STATUS 5
+#define M_CPL_RDMA_CQE_ERR_EXT_STATUS 0x1f
+#define V_CPL_RDMA_CQE_ERR_EXT_STATUS(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_STATUS)
+#define G_CPL_RDMA_CQE_ERR_EXT_STATUS(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_STATUS) & M_CPL_RDMA_CQE_ERR_EXT_STATUS)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE 4
+#define M_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE)
+#define G_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE) & \
+ M_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE)
+#define F_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE V_CPL_RDMA_CQE_ERR_EXT_CQE_TYPE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE 0
+#define M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE 0xf
+#define V_CPL_RDMA_CQE_ERR_EXT_WR_TYPE(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE)
+#define G_CPL_RDMA_CQE_ERR_EXT_WR_TYPE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE) & M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_SE 31
+#define M_CPL_RDMA_CQE_ERR_EXT_SE 0x1
+#define V_CPL_RDMA_CQE_ERR_EXT_SE(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_SE)
+#define G_CPL_RDMA_CQE_ERR_EXT_SE(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_SE) & M_CPL_RDMA_CQE_ERR_EXT_SE)
+#define F_CPL_RDMA_CQE_ERR_EXT_SE V_CPL_RDMA_CQE_ERR_EXT_SE(1U)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT 24
+#define M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT 0x7f
+#define V_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT)
+#define G_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT) & \
+ M_CPL_RDMA_CQE_ERR_EXT_WR_TYPE_EXT)
+
+#define S_CPL_RDMA_CQE_ERR_EXT_SRQ 0
+#define M_CPL_RDMA_CQE_ERR_EXT_SRQ 0xfff
+#define V_CPL_RDMA_CQE_ERR_EXT_SRQ(x) ((x) << S_CPL_RDMA_CQE_ERR_EXT_SRQ)
+#define G_CPL_RDMA_CQE_ERR_EXT_SRQ(x) \
+ (((x) >> S_CPL_RDMA_CQE_ERR_EXT_SRQ) & M_CPL_RDMA_CQE_ERR_EXT_SRQ)
+
struct cpl_set_le_req {
WR_HDR;
union opcode_tid ot;
@@ -2630,6 +4341,13 @@ struct cpl_set_le_req {
};
/* cpl_set_le_req.reply_ctrl additional fields */
+#define S_LE_REQ_RXCHANNEL 14
+#define M_LE_REQ_RXCHANNEL 0x1
+#define V_LE_REQ_RXCHANNEL(x) ((x) << S_LE_REQ_RXCHANNEL)
+#define G_LE_REQ_RXCHANNEL(x) \
+ (((x) >> S_LE_REQ_RXCHANNEL) & M_LE_REQ_RXCHANNEL)
+#define F_LE_REQ_RXCHANNEL V_LE_REQ_RXCHANNEL(1U)
+
#define S_LE_REQ_IP6 13
#define V_LE_REQ_IP6(x) ((x) << S_LE_REQ_IP6)
#define F_LE_REQ_IP6 V_LE_REQ_IP6(1U)
@@ -2659,6 +4377,80 @@ struct cpl_set_le_req {
#define V_LE_REQCMD(x) ((x) << S_LE_REQCMD)
#define G_LE_REQCMD(x) (((x) >> S_LE_REQCMD) & M_LE_REQCMD)
+struct cpl_t7_set_le_req {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 noreply_to_channel;
+ __be32 mask1[2];
+ __be32 mask0[2];
+ __be32 value1[2];
+ __be32 value0[2];
+};
+
+#define S_CPL_T7_SET_LE_REQ_INDEX 0
+#define M_CPL_T7_SET_LE_REQ_INDEX 0xffffff
+#define V_CPL_T7_SET_LE_REQ_INDEX(x) ((x) << S_CPL_T7_SET_LE_REQ_INDEX)
+#define G_CPL_T7_SET_LE_REQ_INDEX(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_INDEX) & M_CPL_T7_SET_LE_REQ_INDEX)
+
+#define S_CPL_T7_SET_LE_REQ_NOREPLY 31
+#define M_CPL_T7_SET_LE_REQ_NOREPLY 0x1
+#define V_CPL_T7_SET_LE_REQ_NOREPLY(x) ((x) << S_CPL_T7_SET_LE_REQ_NOREPLY)
+#define G_CPL_T7_SET_LE_REQ_NOREPLY(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_NOREPLY) & M_CPL_T7_SET_LE_REQ_NOREPLY)
+#define F_CPL_T7_SET_LE_REQ_NOREPLY V_CPL_T7_SET_LE_REQ_NOREPLY(1U)
+
+#define S_CPL_T7_SET_LE_REQ_RXCHANNEL 28
+#define M_CPL_T7_SET_LE_REQ_RXCHANNEL 0x7
+#define V_CPL_T7_SET_LE_REQ_RXCHANNEL(x) \
+ ((x) << S_CPL_T7_SET_LE_REQ_RXCHANNEL)
+#define G_CPL_T7_SET_LE_REQ_RXCHANNEL(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_RXCHANNEL) & M_CPL_T7_SET_LE_REQ_RXCHANNEL)
+
+#define S_CPL_T7_SET_LE_REQ_QUEUE 16
+#define M_CPL_T7_SET_LE_REQ_QUEUE 0xfff
+#define V_CPL_T7_SET_LE_REQ_QUEUE(x) ((x) << S_CPL_T7_SET_LE_REQ_QUEUE)
+#define G_CPL_T7_SET_LE_REQ_QUEUE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_QUEUE) & M_CPL_T7_SET_LE_REQ_QUEUE)
+
+#define S_CPL_T7_SET_LE_REQ_REQCMD 12
+#define M_CPL_T7_SET_LE_REQ_REQCMD 0xf
+#define V_CPL_T7_SET_LE_REQ_REQCMD(x) ((x) << S_CPL_T7_SET_LE_REQ_REQCMD)
+#define G_CPL_T7_SET_LE_REQ_REQCMD(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_REQCMD) & M_CPL_T7_SET_LE_REQ_REQCMD)
+
+#define S_CPL_T7_SET_LE_REQ_REQSIZE 9
+#define M_CPL_T7_SET_LE_REQ_REQSIZE 0x7
+#define V_CPL_T7_SET_LE_REQ_REQSIZE(x) ((x) << S_CPL_T7_SET_LE_REQ_REQSIZE)
+#define G_CPL_T7_SET_LE_REQ_REQSIZE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_REQSIZE) & M_CPL_T7_SET_LE_REQ_REQSIZE)
+
+#define S_CPL_T7_SET_LE_REQ_MORE 8
+#define M_CPL_T7_SET_LE_REQ_MORE 0x1
+#define V_CPL_T7_SET_LE_REQ_MORE(x) ((x) << S_CPL_T7_SET_LE_REQ_MORE)
+#define G_CPL_T7_SET_LE_REQ_MORE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_MORE) & M_CPL_T7_SET_LE_REQ_MORE)
+#define F_CPL_T7_SET_LE_REQ_MORE V_CPL_T7_SET_LE_REQ_MORE(1U)
+
+#define S_CPL_T7_SET_LE_REQ_OFFSET 5
+#define M_CPL_T7_SET_LE_REQ_OFFSET 0x7
+#define V_CPL_T7_SET_LE_REQ_OFFSET(x) ((x) << S_CPL_T7_SET_LE_REQ_OFFSET)
+#define G_CPL_T7_SET_LE_REQ_OFFSET(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_OFFSET) & M_CPL_T7_SET_LE_REQ_OFFSET)
+
+#define S_CPL_T7_SET_LE_REQ_REQTYPE 4
+#define M_CPL_T7_SET_LE_REQ_REQTYPE 0x1
+#define V_CPL_T7_SET_LE_REQ_REQTYPE(x) ((x) << S_CPL_T7_SET_LE_REQ_REQTYPE)
+#define G_CPL_T7_SET_LE_REQ_REQTYPE(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_REQTYPE) & M_CPL_T7_SET_LE_REQ_REQTYPE)
+#define F_CPL_T7_SET_LE_REQ_REQTYPE V_CPL_T7_SET_LE_REQ_REQTYPE(1U)
+
+#define S_CPL_T7_SET_LE_REQ_CHANNEL 0
+#define M_CPL_T7_SET_LE_REQ_CHANNEL 0x3
+#define V_CPL_T7_SET_LE_REQ_CHANNEL(x) ((x) << S_CPL_T7_SET_LE_REQ_CHANNEL)
+#define G_CPL_T7_SET_LE_REQ_CHANNEL(x) \
+ (((x) >> S_CPL_T7_SET_LE_REQ_CHANNEL) & M_CPL_T7_SET_LE_REQ_CHANNEL)
+
struct cpl_set_le_rpl {
RSS_HDR
union opcode_tid ot;
@@ -2710,6 +4502,7 @@ enum {
FW_TYPE_WRERR_RPL = 5,
FW_TYPE_PI_ERR = 6,
FW_TYPE_TLS_KEY = 7,
+ FW_TYPE_IPSEC_SA = 8,
};
struct cpl_fw2_pld {
@@ -2811,6 +4604,8 @@ enum {
FW6_TYPE_RSSCPL = FW_TYPE_RSSCPL,
FW6_TYPE_WRERR_RPL = FW_TYPE_WRERR_RPL,
FW6_TYPE_PI_ERR = FW_TYPE_PI_ERR,
+ FW6_TYPE_TLS_KEY = FW_TYPE_TLS_KEY,
+ FW6_TYPE_IPSEC_SA = FW_TYPE_IPSEC_SA,
NUM_FW6_TYPES
};
@@ -2932,6 +4727,10 @@ struct ulp_mem_io {
#define M_ULP_MEMIO_DATA_LEN 0x1F
#define V_ULP_MEMIO_DATA_LEN(x) ((x) << S_ULP_MEMIO_DATA_LEN)
+#define S_T7_ULP_MEMIO_DATA_LEN 0
+#define M_T7_ULP_MEMIO_DATA_LEN 0x7FF
+#define V_T7_ULP_MEMIO_DATA_LEN(x) ((x) << S_T7_ULP_MEMIO_DATA_LEN)
+
/* ULP_TXPKT field values */
enum {
ULP_TXPKT_DEST_TP = 0,
@@ -2960,11 +4759,25 @@ struct ulp_txpkt {
(((x) >> S_ULP_TXPKT_CHANNELID) & M_ULP_TXPKT_CHANNELID)
#define F_ULP_TXPKT_CHANNELID V_ULP_TXPKT_CHANNELID(1U)
+#define S_T7_ULP_TXPKT_CHANNELID 22
+#define M_T7_ULP_TXPKT_CHANNELID 0x3
+#define V_T7_ULP_TXPKT_CHANNELID(x) ((x) << S_T7_ULP_TXPKT_CHANNELID)
+#define G_T7_ULP_TXPKT_CHANNELID(x) \
+ (((x) >> S_T7_ULP_TXPKT_CHANNELID) & M_T7_ULP_TXPKT_CHANNELID)
+#define F_T7_ULP_TXPKT_CHANNELID V_T7_ULP_TXPKT_CHANNELID(1U)
+
/* ulp_txpkt.cmd_dest fields */
#define S_ULP_TXPKT_DEST 16
#define M_ULP_TXPKT_DEST 0x3
#define V_ULP_TXPKT_DEST(x) ((x) << S_ULP_TXPKT_DEST)
+#define S_ULP_TXPKT_CMDMORE 15
+#define M_ULP_TXPKT_CMDMORE 0x1
+#define V_ULP_TXPKT_CMDMORE(x) ((x) << S_ULP_TXPKT_CMDMORE)
+#define G_ULP_TXPKT_CMDMORE(x) \
+ (((x) >> S_ULP_TXPKT_CMDMORE) & M_ULP_TXPKT_CMDMORE)
+#define F_ULP_TXPKT_CMDMORE V_ULP_TXPKT_CMDMORE(1U)
+
#define S_ULP_TXPKT_FID 4
#define M_ULP_TXPKT_FID 0x7ff
#define V_ULP_TXPKT_FID(x) ((x) << S_ULP_TXPKT_FID)
@@ -2978,13 +4791,15 @@ enum cpl_tx_tnl_lso_type {
TX_TNL_TYPE_NVGRE,
TX_TNL_TYPE_VXLAN,
TX_TNL_TYPE_GENEVE,
+ TX_TNL_TYPE_IPSEC,
};
struct cpl_tx_tnl_lso {
__be32 op_to_IpIdSplitOut;
__be16 IpIdOffsetOut;
__be16 UdpLenSetOut_to_TnlHdrLen;
- __be64 r1;
+ __be32 ipsecen_to_rocev2;
+ __be32 roce_eth;
__be32 Flow_to_TcpHdrLen;
__be16 IpIdOffset;
__be16 IpIdSplit_to_Mss;
@@ -3098,6 +4913,68 @@ struct cpl_tx_tnl_lso {
#define G_CPL_TX_TNL_LSO_TNLHDRLEN(x) \
(((x) >> S_CPL_TX_TNL_LSO_TNLHDRLEN) & M_CPL_TX_TNL_LSO_TNLHDRLEN)
+#define S_CPL_TX_TNL_LSO_IPSECEN 31
+#define M_CPL_TX_TNL_LSO_IPSECEN 0x1
+#define V_CPL_TX_TNL_LSO_IPSECEN(x) ((x) << S_CPL_TX_TNL_LSO_IPSECEN)
+#define G_CPL_TX_TNL_LSO_IPSECEN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECEN) & M_CPL_TX_TNL_LSO_IPSECEN)
+#define F_CPL_TX_TNL_LSO_IPSECEN V_CPL_TX_TNL_LSO_IPSECEN(1U)
+
+#define S_CPL_TX_TNL_LSO_ENCAPDIS 30
+#define M_CPL_TX_TNL_LSO_ENCAPDIS 0x1
+#define V_CPL_TX_TNL_LSO_ENCAPDIS(x) ((x) << S_CPL_TX_TNL_LSO_ENCAPDIS)
+#define G_CPL_TX_TNL_LSO_ENCAPDIS(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_ENCAPDIS) & M_CPL_TX_TNL_LSO_ENCAPDIS)
+#define F_CPL_TX_TNL_LSO_ENCAPDIS V_CPL_TX_TNL_LSO_ENCAPDIS(1U)
+
+#define S_CPL_TX_TNL_LSO_IPSECMODE 29
+#define M_CPL_TX_TNL_LSO_IPSECMODE 0x1
+#define V_CPL_TX_TNL_LSO_IPSECMODE(x) ((x) << S_CPL_TX_TNL_LSO_IPSECMODE)
+#define G_CPL_TX_TNL_LSO_IPSECMODE(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECMODE) & M_CPL_TX_TNL_LSO_IPSECMODE)
+#define F_CPL_TX_TNL_LSO_IPSECMODE V_CPL_TX_TNL_LSO_IPSECMODE(1U)
+
+#define S_CPL_TX_TNL_LSO_IPSECTNLIPV6 28
+#define M_CPL_TX_TNL_LSO_IPSECTNLIPV6 0x1
+#define V_CPL_TX_TNL_LSO_IPSECTNLIPV6(x) \
+ ((x) << S_CPL_TX_TNL_LSO_IPSECTNLIPV6)
+#define G_CPL_TX_TNL_LSO_IPSECTNLIPV6(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECTNLIPV6) & M_CPL_TX_TNL_LSO_IPSECTNLIPV6)
+#define F_CPL_TX_TNL_LSO_IPSECTNLIPV6 V_CPL_TX_TNL_LSO_IPSECTNLIPV6(1U)
+
+#define S_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN 20
+#define M_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN 0xff
+#define V_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN(x) \
+ ((x) << S_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN)
+#define G_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN) & \
+ M_CPL_TX_TNL_LSO_IPSECTNLIPHDRLEN)
+
+#define S_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT 19
+#define M_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT 0x1
+#define V_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT(x) \
+ ((x) << S_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT)
+#define G_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT) & \
+ M_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT)
+#define F_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT \
+ V_CPL_TX_TNL_LSO_IPSECTNLIPIDSPLIT(1U)
+
+#define S_CPL_TX_TNL_LSO_ROCEV2 18
+#define M_CPL_TX_TNL_LSO_ROCEV2 0x1
+#define V_CPL_TX_TNL_LSO_ROCEV2(x) ((x) << S_CPL_TX_TNL_LSO_ROCEV2)
+#define G_CPL_TX_TNL_LSO_ROCEV2(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_ROCEV2) & M_CPL_TX_TNL_LSO_ROCEV2)
+#define F_CPL_TX_TNL_LSO_ROCEV2 V_CPL_TX_TNL_LSO_ROCEV2(1U)
+
+#define S_CPL_TX_TNL_LSO_UDPCHKUPDOUT 17
+#define M_CPL_TX_TNL_LSO_UDPCHKUPDOUT 0x1
+#define V_CPL_TX_TNL_LSO_UDPCHKUPDOUT(x) \
+ ((x) << S_CPL_TX_TNL_LSO_UDPCHKUPDOUT)
+#define G_CPL_TX_TNL_LSO_UDPCHKUPDOUT(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_UDPCHKUPDOUT) & M_CPL_TX_TNL_LSO_UDPCHKUPDOUT)
+#define F_CPL_TX_TNL_LSO_UDPCHKUPDOUT V_CPL_TX_TNL_LSO_UDPCHKUPDOUT(1U)
+
#define S_CPL_TX_TNL_LSO_FLOW 21
#define M_CPL_TX_TNL_LSO_FLOW 0x1
#define V_CPL_TX_TNL_LSO_FLOW(x) ((x) << S_CPL_TX_TNL_LSO_FLOW)
@@ -3180,6 +5057,12 @@ struct cpl_rx_mps_pkt {
#define G_CPL_RX_MPS_PKT_TYPE(x) \
(((x) >> S_CPL_RX_MPS_PKT_TYPE) & M_CPL_RX_MPS_PKT_TYPE)
+#define S_CPL_RX_MPS_PKT_LENGTH 0
+#define M_CPL_RX_MPS_PKT_LENGTH 0xffff
+#define V_CPL_RX_MPS_PKT_LENGTH(x) ((x) << S_CPL_RX_MPS_PKT_LENGTH)
+#define G_CPL_RX_MPS_PKT_LENGTH(x) \
+ (((x) >> S_CPL_RX_MPS_PKT_LENGTH) & M_CPL_RX_MPS_PKT_LENGTH)
+
/*
* Values for CPL_RX_MPS_PKT_TYPE, a bit-wise orthogonal field.
*/
@@ -3188,6 +5071,88 @@ struct cpl_rx_mps_pkt {
#define X_CPL_RX_MPS_PKT_TYPE_QFC (1 << 2)
#define X_CPL_RX_MPS_PKT_TYPE_PTP (1 << 3)
+struct cpl_t7_rx_mps_pkt {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 length_pkd;
+};
+
+#define S_CPL_T7_RX_MPS_PKT_TYPE 20
+#define M_CPL_T7_RX_MPS_PKT_TYPE 0xf
+#define V_CPL_T7_RX_MPS_PKT_TYPE(x) ((x) << S_CPL_T7_RX_MPS_PKT_TYPE)
+#define G_CPL_T7_RX_MPS_PKT_TYPE(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_TYPE) & M_CPL_T7_RX_MPS_PKT_TYPE)
+
+#define S_CPL_T7_RX_MPS_PKT_INTERFACE 16
+#define M_CPL_T7_RX_MPS_PKT_INTERFACE 0xf
+#define V_CPL_T7_RX_MPS_PKT_INTERFACE(x) \
+ ((x) << S_CPL_T7_RX_MPS_PKT_INTERFACE)
+#define G_CPL_T7_RX_MPS_PKT_INTERFACE(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_INTERFACE) & M_CPL_T7_RX_MPS_PKT_INTERFACE)
+
+#define S_CPL_T7_RX_MPS_PKT_TRUNCATED 7
+#define M_CPL_T7_RX_MPS_PKT_TRUNCATED 0x1
+#define V_CPL_T7_RX_MPS_PKT_TRUNCATED(x) \
+ ((x) << S_CPL_T7_RX_MPS_PKT_TRUNCATED)
+#define G_CPL_T7_RX_MPS_PKT_TRUNCATED(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_TRUNCATED) & M_CPL_T7_RX_MPS_PKT_TRUNCATED)
+#define F_CPL_T7_RX_MPS_PKT_TRUNCATED V_CPL_T7_RX_MPS_PKT_TRUNCATED(1U)
+
+#define S_CPL_T7_RX_MPS_PKT_PKTERR 6
+#define M_CPL_T7_RX_MPS_PKT_PKTERR 0x1
+#define V_CPL_T7_RX_MPS_PKT_PKTERR(x) ((x) << S_CPL_T7_RX_MPS_PKT_PKTERR)
+#define G_CPL_T7_RX_MPS_PKT_PKTERR(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_PKTERR) & M_CPL_T7_RX_MPS_PKT_PKTERR)
+#define F_CPL_T7_RX_MPS_PKT_PKTERR V_CPL_T7_RX_MPS_PKT_PKTERR(1U)
+
+#define S_CPL_T7_RX_MPS_PKT_LENGTH 0
+#define M_CPL_T7_RX_MPS_PKT_LENGTH 0xffff
+#define V_CPL_T7_RX_MPS_PKT_LENGTH(x) ((x) << S_CPL_T7_RX_MPS_PKT_LENGTH)
+#define G_CPL_T7_RX_MPS_PKT_LENGTH(x) \
+ (((x) >> S_CPL_T7_RX_MPS_PKT_LENGTH) & M_CPL_T7_RX_MPS_PKT_LENGTH)
+
+struct cpl_tx_tls_pdu {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 pldlen_pkd;
+ __be32 customtype_customprotover;
+ __be32 r2_lo;
+ __be32 scmd0[2];
+ __be32 scmd1[2];
+};
+
+#define S_CPL_TX_TLS_PDU_DATATYPE 20
+#define M_CPL_TX_TLS_PDU_DATATYPE 0xf
+#define V_CPL_TX_TLS_PDU_DATATYPE(x) ((x) << S_CPL_TX_TLS_PDU_DATATYPE)
+#define G_CPL_TX_TLS_PDU_DATATYPE(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_DATATYPE) & M_CPL_TX_TLS_PDU_DATATYPE)
+
+#define S_CPL_TX_TLS_PDU_CPLLEN 16
+#define M_CPL_TX_TLS_PDU_CPLLEN 0xf
+#define V_CPL_TX_TLS_PDU_CPLLEN(x) ((x) << S_CPL_TX_TLS_PDU_CPLLEN)
+#define G_CPL_TX_TLS_PDU_CPLLEN(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_CPLLEN) & M_CPL_TX_TLS_PDU_CPLLEN)
+
+#define S_CPL_TX_TLS_PDU_PLDLEN 0
+#define M_CPL_TX_TLS_PDU_PLDLEN 0xfffff
+#define V_CPL_TX_TLS_PDU_PLDLEN(x) ((x) << S_CPL_TX_TLS_PDU_PLDLEN)
+#define G_CPL_TX_TLS_PDU_PLDLEN(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_PLDLEN) & M_CPL_TX_TLS_PDU_PLDLEN)
+
+#define S_CPL_TX_TLS_PDU_CUSTOMTYPE 24
+#define M_CPL_TX_TLS_PDU_CUSTOMTYPE 0xff
+#define V_CPL_TX_TLS_PDU_CUSTOMTYPE(x) ((x) << S_CPL_TX_TLS_PDU_CUSTOMTYPE)
+#define G_CPL_TX_TLS_PDU_CUSTOMTYPE(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_CUSTOMTYPE) & M_CPL_TX_TLS_PDU_CUSTOMTYPE)
+
+#define S_CPL_TX_TLS_PDU_CUSTOMPROTOVER 8
+#define M_CPL_TX_TLS_PDU_CUSTOMPROTOVER 0xffff
+#define V_CPL_TX_TLS_PDU_CUSTOMPROTOVER(x) \
+ ((x) << S_CPL_TX_TLS_PDU_CUSTOMPROTOVER)
+#define G_CPL_TX_TLS_PDU_CUSTOMPROTOVER(x) \
+ (((x) >> S_CPL_TX_TLS_PDU_CUSTOMPROTOVER) & \
+ M_CPL_TX_TLS_PDU_CUSTOMPROTOVER)
+
struct cpl_tx_tls_sfo {
__be32 op_to_seg_len;
__be32 pld_len;
@@ -3223,6 +5188,12 @@ struct cpl_tx_tls_sfo {
#define G_CPL_TX_TLS_SFO_SEG_LEN(x) \
(((x) >> S_CPL_TX_TLS_SFO_SEG_LEN) & M_CPL_TX_TLS_SFO_SEG_LEN)
+#define S_CPL_TX_TLS_SFO_PLDLEN 0
+#define M_CPL_TX_TLS_SFO_PLDLEN 0xfffff
+#define V_CPL_TX_TLS_SFO_PLDLEN(x) ((x) << S_CPL_TX_TLS_SFO_PLDLEN)
+#define G_CPL_TX_TLS_SFO_PLDLEN(x) \
+ (((x) >> S_CPL_TX_TLS_SFO_PLDLEN) & M_CPL_TX_TLS_SFO_PLDLEN)
+
#define S_CPL_TX_TLS_SFO_TYPE 24
#define M_CPL_TX_TLS_SFO_TYPE 0xff
#define V_CPL_TX_TLS_SFO_TYPE(x) ((x) << S_CPL_TX_TLS_SFO_TYPE)
@@ -3454,6 +5425,119 @@ struct cpl_rx_tls_cmp {
#define G_SCMD_HDR_LEN(x) \
(((x) >> S_SCMD_HDR_LEN) & M_SCMD_HDR_LEN)
+struct cpl_rx_pkt_ipsec {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 vlan;
+ __be16 length;
+ __be32 rxchannel_to_ethhdrlen;
+ __be32 iphdrlen_to_rxerror;
+ __be64 timestamp;
+};
+
+#define S_CPL_RX_PKT_IPSEC_OPCODE 24
+#define M_CPL_RX_PKT_IPSEC_OPCODE 0xff
+#define V_CPL_RX_PKT_IPSEC_OPCODE(x) ((x) << S_CPL_RX_PKT_IPSEC_OPCODE)
+#define G_CPL_RX_PKT_IPSEC_OPCODE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_OPCODE) & M_CPL_RX_PKT_IPSEC_OPCODE)
+
+#define S_CPL_RX_PKT_IPSEC_IPFRAG 23
+#define M_CPL_RX_PKT_IPSEC_IPFRAG 0x1
+#define V_CPL_RX_PKT_IPSEC_IPFRAG(x) ((x) << S_CPL_RX_PKT_IPSEC_IPFRAG)
+#define G_CPL_RX_PKT_IPSEC_IPFRAG(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPFRAG) & M_CPL_RX_PKT_IPSEC_IPFRAG)
+#define F_CPL_RX_PKT_IPSEC_IPFRAG V_CPL_RX_PKT_IPSEC_IPFRAG(1U)
+
+#define S_CPL_RX_PKT_IPSEC_VLAN_EX 22
+#define M_CPL_RX_PKT_IPSEC_VLAN_EX 0x1
+#define V_CPL_RX_PKT_IPSEC_VLAN_EX(x) ((x) << S_CPL_RX_PKT_IPSEC_VLAN_EX)
+#define G_CPL_RX_PKT_IPSEC_VLAN_EX(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_VLAN_EX) & M_CPL_RX_PKT_IPSEC_VLAN_EX)
+#define F_CPL_RX_PKT_IPSEC_VLAN_EX V_CPL_RX_PKT_IPSEC_VLAN_EX(1U)
+
+#define S_CPL_RX_PKT_IPSEC_IPMI 21
+#define M_CPL_RX_PKT_IPSEC_IPMI 0x1
+#define V_CPL_RX_PKT_IPSEC_IPMI(x) ((x) << S_CPL_RX_PKT_IPSEC_IPMI)
+#define G_CPL_RX_PKT_IPSEC_IPMI(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPMI) & M_CPL_RX_PKT_IPSEC_IPMI)
+#define F_CPL_RX_PKT_IPSEC_IPMI V_CPL_RX_PKT_IPSEC_IPMI(1U)
+
+#define S_CPL_RX_PKT_IPSEC_INTERFACE 16
+#define M_CPL_RX_PKT_IPSEC_INTERFACE 0xf
+#define V_CPL_RX_PKT_IPSEC_INTERFACE(x) ((x) << S_CPL_RX_PKT_IPSEC_INTERFACE)
+#define G_CPL_RX_PKT_IPSEC_INTERFACE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_INTERFACE) & M_CPL_RX_PKT_IPSEC_INTERFACE)
+
+#define S_CPL_RX_PKT_IPSEC_IPSECEXTERR 12
+#define M_CPL_RX_PKT_IPSEC_IPSECEXTERR 0xf
+#define V_CPL_RX_PKT_IPSEC_IPSECEXTERR(x) \
+ ((x) << S_CPL_RX_PKT_IPSEC_IPSECEXTERR)
+#define G_CPL_RX_PKT_IPSEC_IPSECEXTERR(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPSECEXTERR) & M_CPL_RX_PKT_IPSEC_IPSECEXTERR)
+
+#define S_CPL_RX_PKT_IPSEC_IPSECTYPE 10
+#define M_CPL_RX_PKT_IPSEC_IPSECTYPE 0x3
+#define V_CPL_RX_PKT_IPSEC_IPSECTYPE(x) ((x) << S_CPL_RX_PKT_IPSEC_IPSECTYPE)
+#define G_CPL_RX_PKT_IPSEC_IPSECTYPE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPSECTYPE) & M_CPL_RX_PKT_IPSEC_IPSECTYPE)
+
+#define S_CPL_RX_PKT_IPSEC_OUTIPHDRLEN 0
+#define M_CPL_RX_PKT_IPSEC_OUTIPHDRLEN 0x3ff
+#define V_CPL_RX_PKT_IPSEC_OUTIPHDRLEN(x) \
+ ((x) << S_CPL_RX_PKT_IPSEC_OUTIPHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_OUTIPHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_OUTIPHDRLEN) & M_CPL_RX_PKT_IPSEC_OUTIPHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_RXCHANNEL 28
+#define M_CPL_RX_PKT_IPSEC_RXCHANNEL 0xf
+#define V_CPL_RX_PKT_IPSEC_RXCHANNEL(x) ((x) << S_CPL_RX_PKT_IPSEC_RXCHANNEL)
+#define G_CPL_RX_PKT_IPSEC_RXCHANNEL(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_RXCHANNEL) & M_CPL_RX_PKT_IPSEC_RXCHANNEL)
+
+#define S_CPL_RX_PKT_IPSEC_FLAGS 20
+#define M_CPL_RX_PKT_IPSEC_FLAGS 0xff
+#define V_CPL_RX_PKT_IPSEC_FLAGS(x) ((x) << S_CPL_RX_PKT_IPSEC_FLAGS)
+#define G_CPL_RX_PKT_IPSEC_FLAGS(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_FLAGS) & M_CPL_RX_PKT_IPSEC_FLAGS)
+
+#define S_CPL_RX_PKT_IPSEC_MACMATCHTYPE 17
+#define M_CPL_RX_PKT_IPSEC_MACMATCHTYPE 0x7
+#define V_CPL_RX_PKT_IPSEC_MACMATCHTYPE(x) \
+ ((x) << S_CPL_RX_PKT_IPSEC_MACMATCHTYPE)
+#define G_CPL_RX_PKT_IPSEC_MACMATCHTYPE(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_MACMATCHTYPE) & \
+ M_CPL_RX_PKT_IPSEC_MACMATCHTYPE)
+
+#define S_CPL_RX_PKT_IPSEC_MACINDEX 8
+#define M_CPL_RX_PKT_IPSEC_MACINDEX 0x1ff
+#define V_CPL_RX_PKT_IPSEC_MACINDEX(x) ((x) << S_CPL_RX_PKT_IPSEC_MACINDEX)
+#define G_CPL_RX_PKT_IPSEC_MACINDEX(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_MACINDEX) & M_CPL_RX_PKT_IPSEC_MACINDEX)
+
+#define S_CPL_RX_PKT_IPSEC_ETHHDRLEN 0
+#define M_CPL_RX_PKT_IPSEC_ETHHDRLEN 0xff
+#define V_CPL_RX_PKT_IPSEC_ETHHDRLEN(x) ((x) << S_CPL_RX_PKT_IPSEC_ETHHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_ETHHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_ETHHDRLEN) & M_CPL_RX_PKT_IPSEC_ETHHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_IPHDRLEN 22
+#define M_CPL_RX_PKT_IPSEC_IPHDRLEN 0x3ff
+#define V_CPL_RX_PKT_IPSEC_IPHDRLEN(x) ((x) << S_CPL_RX_PKT_IPSEC_IPHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_IPHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_IPHDRLEN) & M_CPL_RX_PKT_IPSEC_IPHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_TCPHDRLEN 16
+#define M_CPL_RX_PKT_IPSEC_TCPHDRLEN 0x3f
+#define V_CPL_RX_PKT_IPSEC_TCPHDRLEN(x) ((x) << S_CPL_RX_PKT_IPSEC_TCPHDRLEN)
+#define G_CPL_RX_PKT_IPSEC_TCPHDRLEN(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_TCPHDRLEN) & M_CPL_RX_PKT_IPSEC_TCPHDRLEN)
+
+#define S_CPL_RX_PKT_IPSEC_RXERROR 0
+#define M_CPL_RX_PKT_IPSEC_RXERROR 0xffff
+#define V_CPL_RX_PKT_IPSEC_RXERROR(x) ((x) << S_CPL_RX_PKT_IPSEC_RXERROR)
+#define G_CPL_RX_PKT_IPSEC_RXERROR(x) \
+ (((x) >> S_CPL_RX_PKT_IPSEC_RXERROR) & M_CPL_RX_PKT_IPSEC_RXERROR)
+
struct cpl_tx_sec_pdu {
__be32 op_ivinsrtofst;
__be32 pldlen;
@@ -3478,6 +5562,13 @@ struct cpl_tx_sec_pdu {
(((x) >> S_CPL_TX_SEC_PDU_RXCHID) & M_CPL_TX_SEC_PDU_RXCHID)
#define F_CPL_TX_SEC_PDU_RXCHID V_CPL_TX_SEC_PDU_RXCHID(1U)
+#define S_T7_CPL_TX_SEC_PDU_RXCHID 22
+#define M_T7_CPL_TX_SEC_PDU_RXCHID 0x3
+#define V_T7_CPL_TX_SEC_PDU_RXCHID(x) ((x) << S_T7_CPL_TX_SEC_PDU_RXCHID)
+#define G_T7_CPL_TX_SEC_PDU_RXCHID(x) \
+(((x) >> S_T7_CPL_TX_SEC_PDU_RXCHID) & M_T7_CPL_TX_SEC_PDU_RXCHID)
+#define F_T7_CPL_TX_SEC_PDU_RXCHID V_T7_CPL_TX_SEC_PDU_RXCHID(1U)
+
/* Ack Follows */
#define S_CPL_TX_SEC_PDU_ACKFOLLOWS 21
#define M_CPL_TX_SEC_PDU_ACKFOLLOWS 0x1
@@ -3501,6 +5592,13 @@ struct cpl_tx_sec_pdu {
#define G_CPL_TX_SEC_PDU_CPLLEN(x) \
(((x) >> S_CPL_TX_SEC_PDU_CPLLEN) & M_CPL_TX_SEC_PDU_CPLLEN)
+#define S_CPL_TX_SEC_PDU_ACKNEXT 15
+#define M_CPL_TX_SEC_PDU_ACKNEXT 0x1
+#define V_CPL_TX_SEC_PDU_ACKNEXT(x) ((x) << S_CPL_TX_SEC_PDU_ACKNEXT)
+#define G_CPL_TX_SEC_PDU_ACKNEXT(x) \
+ (((x) >> S_CPL_TX_SEC_PDU_ACKNEXT) & M_CPL_TX_SEC_PDU_ACKNEXT)
+#define F_CPL_TX_SEC_PDU_ACKNEXT V_CPL_TX_SEC_PDU_ACKNEXT(1U)
+
/* PlaceHolder */
#define S_CPL_TX_SEC_PDU_PLACEHOLDER 10
#define M_CPL_TX_SEC_PDU_PLACEHOLDER 0x1
@@ -3517,6 +5615,12 @@ struct cpl_tx_sec_pdu {
(((x) >> S_CPL_TX_SEC_PDU_IVINSRTOFST) & \
M_CPL_TX_SEC_PDU_IVINSRTOFST)
+#define S_CPL_TX_SEC_PDU_PLDLEN 0
+#define M_CPL_TX_SEC_PDU_PLDLEN 0xfffff
+#define V_CPL_TX_SEC_PDU_PLDLEN(x) ((x) << S_CPL_TX_SEC_PDU_PLDLEN)
+#define G_CPL_TX_SEC_PDU_PLDLEN(x) \
+ (((x) >> S_CPL_TX_SEC_PDU_PLDLEN) & M_CPL_TX_SEC_PDU_PLDLEN)
+
/* AadStartOffset: Offset in bytes for AAD start from
* the first byte following
* the pkt headers (0-255
@@ -3666,6 +5770,62 @@ struct cpl_rx_phys_dsgl {
(((x) >> S_CPL_RX_PHYS_DSGL_NOOFSGENTR) & \
M_CPL_RX_PHYS_DSGL_NOOFSGENTR)
+struct cpl_t7_rx_phys_dsgl {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 PhysAddrFields_lo_to_NumSGE;
+ __be32 RSSCopy[2];
+};
+
+#define S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI 0
+#define M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI 0xffffff
+#define V_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI)
+#define G_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI) & \
+ M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_HI)
+
+#define S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO 16
+#define M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO 0xffff
+#define V_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO)
+#define G_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO) & \
+ M_CPL_T7_RX_PHYS_DSGL_PHYSADDRFIELDS_LO)
+
+#define S_CPL_T7_RX_PHYS_DSGL_NUMSGEERR 11
+#define M_CPL_T7_RX_PHYS_DSGL_NUMSGEERR 0x1
+#define V_CPL_T7_RX_PHYS_DSGL_NUMSGEERR(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_NUMSGEERR)
+#define G_CPL_T7_RX_PHYS_DSGL_NUMSGEERR(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_NUMSGEERR) & M_CPL_T7_RX_PHYS_DSGL_NUMSGEERR)
+#define F_CPL_T7_RX_PHYS_DSGL_NUMSGEERR V_CPL_T7_RX_PHYS_DSGL_NUMSGEERR(1U)
+
+#define S_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE 10
+#define M_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE 0x1
+#define V_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE)
+#define G_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE) & \
+ M_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE)
+#define F_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE \
+ V_CPL_T7_RX_PHYS_DSGL_FIXEDSGEMODE(1U)
+
+#define S_CPL_T7_RX_PHYS_DSGL_SPLITMODE 9
+#define M_CPL_T7_RX_PHYS_DSGL_SPLITMODE 0x1
+#define V_CPL_T7_RX_PHYS_DSGL_SPLITMODE(x) \
+ ((x) << S_CPL_T7_RX_PHYS_DSGL_SPLITMODE)
+#define G_CPL_T7_RX_PHYS_DSGL_SPLITMODE(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_SPLITMODE) & M_CPL_T7_RX_PHYS_DSGL_SPLITMODE)
+#define F_CPL_T7_RX_PHYS_DSGL_SPLITMODE \
+ V_CPL_T7_RX_PHYS_DSGL_SPLITMODE(1U)
+
+#define S_CPL_T7_RX_PHYS_DSGL_NUMSGE 0
+#define M_CPL_T7_RX_PHYS_DSGL_NUMSGE 0x1ff
+#define V_CPL_T7_RX_PHYS_DSGL_NUMSGE(x) ((x) << S_CPL_T7_RX_PHYS_DSGL_NUMSGE)
+#define G_CPL_T7_RX_PHYS_DSGL_NUMSGE(x) \
+ (((x) >> S_CPL_T7_RX_PHYS_DSGL_NUMSGE) & M_CPL_T7_RX_PHYS_DSGL_NUMSGE)
+
/* CPL_TX_TLS_ACK */
struct cpl_tx_tls_ack {
__be32 op_to_Rsvd2;
@@ -3679,12 +5839,11 @@ struct cpl_tx_tls_ack {
#define G_CPL_TX_TLS_ACK_OPCODE(x) \
(((x) >> S_CPL_TX_TLS_ACK_OPCODE) & M_CPL_TX_TLS_ACK_OPCODE)
-#define S_CPL_TX_TLS_ACK_RSVD1 23
-#define M_CPL_TX_TLS_ACK_RSVD1 0x1
-#define V_CPL_TX_TLS_ACK_RSVD1(x) ((x) << S_CPL_TX_TLS_ACK_RSVD1)
-#define G_CPL_TX_TLS_ACK_RSVD1(x) \
- (((x) >> S_CPL_TX_TLS_ACK_RSVD1) & M_CPL_TX_TLS_ACK_RSVD1)
-#define F_CPL_TX_TLS_ACK_RSVD1 V_CPL_TX_TLS_ACK_RSVD1(1U)
+#define S_T7_CPL_TX_TLS_ACK_RXCHID 22
+#define M_T7_CPL_TX_TLS_ACK_RXCHID 0x3
+#define V_T7_CPL_TX_TLS_ACK_RXCHID(x) ((x) << S_T7_CPL_TX_TLS_ACK_RXCHID)
+#define G_T7_CPL_TX_TLS_ACK_RXCHID(x) \
+ (((x) >> S_T7_CPL_TX_TLS_ACK_RXCHID) & M_T7_CPL_TX_TLS_ACK_RXCHID)
#define S_CPL_TX_TLS_ACK_RXCHID 22
#define M_CPL_TX_TLS_ACK_RXCHID 0x1
@@ -3740,4 +5899,822 @@ struct cpl_tx_tls_ack {
#define G_CPL_TX_TLS_ACK_RSVD2(x) \
(((x) >> S_CPL_TX_TLS_ACK_RSVD2) & M_CPL_TX_TLS_ACK_RSVD2)
+#define S_CPL_TX_TLS_ACK_PLDLEN 0
+#define M_CPL_TX_TLS_ACK_PLDLEN 0xfffff
+#define V_CPL_TX_TLS_ACK_PLDLEN(x) ((x) << S_CPL_TX_TLS_ACK_PLDLEN)
+#define G_CPL_TX_TLS_ACK_PLDLEN(x) \
+ (((x) >> S_CPL_TX_TLS_ACK_PLDLEN) & M_CPL_TX_TLS_ACK_PLDLEN)
+
+struct cpl_rcb_upd {
+ __be32 op_to_tid;
+ __be32 opcode_psn;
+ __u8 nodata_to_cnprepclr;
+ __u8 r0;
+ __be16 wrptr;
+ __be32 length;
+};
+
+#define S_CPL_RCB_UPD_OPCODE 24
+#define M_CPL_RCB_UPD_OPCODE 0xff
+#define V_CPL_RCB_UPD_OPCODE(x) ((x) << S_CPL_RCB_UPD_OPCODE)
+#define G_CPL_RCB_UPD_OPCODE(x) \
+ (((x) >> S_CPL_RCB_UPD_OPCODE) & M_CPL_RCB_UPD_OPCODE)
+
+#define S_CPL_RCB_UPD_TID 0
+#define M_CPL_RCB_UPD_TID 0xffffff
+#define V_CPL_RCB_UPD_TID(x) ((x) << S_CPL_RCB_UPD_TID)
+#define G_CPL_RCB_UPD_TID(x) \
+ (((x) >> S_CPL_RCB_UPD_TID) & M_CPL_RCB_UPD_TID)
+
+#define S_CPL_RCB_UPD_OPCODE 24
+#define M_CPL_RCB_UPD_OPCODE 0xff
+#define V_CPL_RCB_UPD_OPCODE(x) ((x) << S_CPL_RCB_UPD_OPCODE)
+#define G_CPL_RCB_UPD_OPCODE(x) \
+ (((x) >> S_CPL_RCB_UPD_OPCODE) & M_CPL_RCB_UPD_OPCODE)
+
+#define S_CPL_RCB_UPD_PSN 0
+#define M_CPL_RCB_UPD_PSN 0xffffff
+#define V_CPL_RCB_UPD_PSN(x) ((x) << S_CPL_RCB_UPD_PSN)
+#define G_CPL_RCB_UPD_PSN(x) \
+ (((x) >> S_CPL_RCB_UPD_PSN) & M_CPL_RCB_UPD_PSN)
+
+#define S_CPL_RCB_UPD_NODATA 7
+#define M_CPL_RCB_UPD_NODATA 0x1
+#define V_CPL_RCB_UPD_NODATA(x) ((x) << S_CPL_RCB_UPD_NODATA)
+#define G_CPL_RCB_UPD_NODATA(x) \
+ (((x) >> S_CPL_RCB_UPD_NODATA) & M_CPL_RCB_UPD_NODATA)
+#define F_CPL_RCB_UPD_NODATA V_CPL_RCB_UPD_NODATA(1U)
+
+#define S_CPL_RCB_UPD_RTTSTAMP 6
+#define M_CPL_RCB_UPD_RTTSTAMP 0x1
+#define V_CPL_RCB_UPD_RTTSTAMP(x) ((x) << S_CPL_RCB_UPD_RTTSTAMP)
+#define G_CPL_RCB_UPD_RTTSTAMP(x) \
+ (((x) >> S_CPL_RCB_UPD_RTTSTAMP) & M_CPL_RCB_UPD_RTTSTAMP)
+#define F_CPL_RCB_UPD_RTTSTAMP V_CPL_RCB_UPD_RTTSTAMP(1U)
+
+#define S_CPL_RCB_UPD_ECNREPCLR 5
+#define M_CPL_RCB_UPD_ECNREPCLR 0x1
+#define V_CPL_RCB_UPD_ECNREPCLR(x) ((x) << S_CPL_RCB_UPD_ECNREPCLR)
+#define G_CPL_RCB_UPD_ECNREPCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_ECNREPCLR) & M_CPL_RCB_UPD_ECNREPCLR)
+#define F_CPL_RCB_UPD_ECNREPCLR V_CPL_RCB_UPD_ECNREPCLR(1U)
+
+#define S_CPL_RCB_UPD_NAKSEQCLR 4
+#define M_CPL_RCB_UPD_NAKSEQCLR 0x1
+#define V_CPL_RCB_UPD_NAKSEQCLR(x) ((x) << S_CPL_RCB_UPD_NAKSEQCLR)
+#define G_CPL_RCB_UPD_NAKSEQCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_NAKSEQCLR) & M_CPL_RCB_UPD_NAKSEQCLR)
+#define F_CPL_RCB_UPD_NAKSEQCLR V_CPL_RCB_UPD_NAKSEQCLR(1U)
+
+#define S_CPL_RCB_UPD_QPERRSET 3
+#define M_CPL_RCB_UPD_QPERRSET 0x1
+#define V_CPL_RCB_UPD_QPERRSET(x) ((x) << S_CPL_RCB_UPD_QPERRSET)
+#define G_CPL_RCB_UPD_QPERRSET(x) \
+ (((x) >> S_CPL_RCB_UPD_QPERRSET) & M_CPL_RCB_UPD_QPERRSET)
+#define F_CPL_RCB_UPD_QPERRSET V_CPL_RCB_UPD_QPERRSET(1U)
+
+#define S_CPL_RCB_UPD_RRQUPDEN 2
+#define M_CPL_RCB_UPD_RRQUPDEN 0x1
+#define V_CPL_RCB_UPD_RRQUPDEN(x) ((x) << S_CPL_RCB_UPD_RRQUPDEN)
+#define G_CPL_RCB_UPD_RRQUPDEN(x) \
+ (((x) >> S_CPL_RCB_UPD_RRQUPDEN) & M_CPL_RCB_UPD_RRQUPDEN)
+#define F_CPL_RCB_UPD_RRQUPDEN V_CPL_RCB_UPD_RRQUPDEN(1U)
+
+#define S_CPL_RCB_UPD_RQUPDEN 1
+#define M_CPL_RCB_UPD_RQUPDEN 0x1
+#define V_CPL_RCB_UPD_RQUPDEN(x) ((x) << S_CPL_RCB_UPD_RQUPDEN)
+#define G_CPL_RCB_UPD_RQUPDEN(x) \
+ (((x) >> S_CPL_RCB_UPD_RQUPDEN) & M_CPL_RCB_UPD_RQUPDEN)
+#define F_CPL_RCB_UPD_RQUPDEN V_CPL_RCB_UPD_RQUPDEN(1U)
+
+#define S_CPL_RCB_UPD_CNPREPCLR 0
+#define M_CPL_RCB_UPD_CNPREPCLR 0x1
+#define V_CPL_RCB_UPD_CNPREPCLR(x) ((x) << S_CPL_RCB_UPD_CNPREPCLR)
+#define G_CPL_RCB_UPD_CNPREPCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_CNPREPCLR) & M_CPL_RCB_UPD_CNPREPCLR)
+#define F_CPL_RCB_UPD_CNPREPCLR V_CPL_RCB_UPD_CNPREPCLR(1U)
+
+#define S_CPL_RCB_UPD_RSPNAKSEQCLR 7
+#define M_CPL_RCB_UPD_RSPNAKSEQCLR 0x1
+#define V_CPL_RCB_UPD_RSPNAKSEQCLR(x) ((x) << S_CPL_RCB_UPD_RSPNAKSEQCLR)
+#define G_CPL_RCB_UPD_RSPNAKSEQCLR(x) \
+ (((x) >> S_CPL_RCB_UPD_RSPNAKSEQCLR) & M_CPL_RCB_UPD_RSPNAKSEQCLR)
+#define F_CPL_RCB_UPD_RSPNAKSEQCLR V_CPL_RCB_UPD_RSPNAKSEQCLR(1U)
+
+struct cpl_roce_fw_notify {
+ RSS_HDR
+ union opcode_tid ot;
+ __be32 type_pkd;
+};
+
+#define S_CPL_ROCE_FW_NOTIFY_OPCODE 24
+#define M_CPL_ROCE_FW_NOTIFY_OPCODE 0xff
+#define V_CPL_ROCE_FW_NOTIFY_OPCODE(x) ((x) << S_CPL_ROCE_FW_NOTIFY_OPCODE)
+#define G_CPL_ROCE_FW_NOTIFY_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_FW_NOTIFY_OPCODE) & M_CPL_ROCE_FW_NOTIFY_OPCODE)
+
+#define S_CPL_ROCE_FW_NOTIFY_TID 0
+#define M_CPL_ROCE_FW_NOTIFY_TID 0xffffff
+#define V_CPL_ROCE_FW_NOTIFY_TID(x) ((x) << S_CPL_ROCE_FW_NOTIFY_TID)
+#define G_CPL_ROCE_FW_NOTIFY_TID(x) \
+ (((x) >> S_CPL_ROCE_FW_NOTIFY_TID) & M_CPL_ROCE_FW_NOTIFY_TID)
+
+#define S_CPL_ROCE_FW_NOTIFY_TYPE 28
+#define M_CPL_ROCE_FW_NOTIFY_TYPE 0xf
+#define V_CPL_ROCE_FW_NOTIFY_TYPE(x) ((x) << S_CPL_ROCE_FW_NOTIFY_TYPE)
+#define G_CPL_ROCE_FW_NOTIFY_TYPE(x) \
+ (((x) >> S_CPL_ROCE_FW_NOTIFY_TYPE) & M_CPL_ROCE_FW_NOTIFY_TYPE)
+
+struct cpl_roce_ack_nak_req {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 type_to_opcode;
+ __be16 length;
+ __be32 psn_msn_hi;
+ __be32 msn_lo_pkd;
+};
+
+#define S_CPL_ROCE_ACK_NAK_REQ_OPCODE 24
+#define M_CPL_ROCE_ACK_NAK_REQ_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_REQ_OPCODE(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_REQ_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_OPCODE) & M_CPL_ROCE_ACK_NAK_REQ_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_TID 0
+#define M_CPL_ROCE_ACK_NAK_REQ_TID 0xffffff
+#define V_CPL_ROCE_ACK_NAK_REQ_TID(x) ((x) << S_CPL_ROCE_ACK_NAK_REQ_TID)
+#define G_CPL_ROCE_ACK_NAK_REQ_TID(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_TID) & M_CPL_ROCE_ACK_NAK_REQ_TID)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_TYPE 12
+#define M_CPL_ROCE_ACK_NAK_REQ_TYPE 0xf
+#define V_CPL_ROCE_ACK_NAK_REQ_TYPE(x) ((x) << S_CPL_ROCE_ACK_NAK_REQ_TYPE)
+#define G_CPL_ROCE_ACK_NAK_REQ_TYPE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_TYPE) & M_CPL_ROCE_ACK_NAK_REQ_TYPE)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_STATUS 8
+#define M_CPL_ROCE_ACK_NAK_REQ_STATUS 0xf
+#define V_CPL_ROCE_ACK_NAK_REQ_STATUS(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_STATUS)
+#define G_CPL_ROCE_ACK_NAK_REQ_STATUS(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_STATUS) & M_CPL_ROCE_ACK_NAK_REQ_STATUS)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE 0
+#define M_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE) & M_CPL_ROCE_ACK_NAK_REQ_WIRE_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_PSN 8
+#define M_CPL_ROCE_ACK_NAK_REQ_PSN 0xffffff
+#define V_CPL_ROCE_ACK_NAK_REQ_PSN(x) ((x) << S_CPL_ROCE_ACK_NAK_REQ_PSN)
+#define G_CPL_ROCE_ACK_NAK_REQ_PSN(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_PSN) & M_CPL_ROCE_ACK_NAK_REQ_PSN)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_MSN_HI 0
+#define M_CPL_ROCE_ACK_NAK_REQ_MSN_HI 0xff
+#define V_CPL_ROCE_ACK_NAK_REQ_MSN_HI(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_MSN_HI)
+#define G_CPL_ROCE_ACK_NAK_REQ_MSN_HI(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_MSN_HI) & M_CPL_ROCE_ACK_NAK_REQ_MSN_HI)
+
+#define S_CPL_ROCE_ACK_NAK_REQ_MSN_LO 16
+#define M_CPL_ROCE_ACK_NAK_REQ_MSN_LO 0xffff
+#define V_CPL_ROCE_ACK_NAK_REQ_MSN_LO(x) \
+ ((x) << S_CPL_ROCE_ACK_NAK_REQ_MSN_LO)
+#define G_CPL_ROCE_ACK_NAK_REQ_MSN_LO(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_REQ_MSN_LO) & M_CPL_ROCE_ACK_NAK_REQ_MSN_LO)
+
+struct cpl_roce_ack_nak {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 type_to_opcode;
+ __be16 length;
+ __be32 psn_rtt_hi;
+ __be32 rtt_lo_to_rttbad;
+};
+
+#define S_CPL_ROCE_ACK_NAK_OPCODE 24
+#define M_CPL_ROCE_ACK_NAK_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_OPCODE(x) ((x) << S_CPL_ROCE_ACK_NAK_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_OPCODE) & M_CPL_ROCE_ACK_NAK_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_TID 0
+#define M_CPL_ROCE_ACK_NAK_TID 0xffffff
+#define V_CPL_ROCE_ACK_NAK_TID(x) ((x) << S_CPL_ROCE_ACK_NAK_TID)
+#define G_CPL_ROCE_ACK_NAK_TID(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_TID) & M_CPL_ROCE_ACK_NAK_TID)
+
+#define S_CPL_ROCE_ACK_NAK_TYPE 12
+#define M_CPL_ROCE_ACK_NAK_TYPE 0xf
+#define V_CPL_ROCE_ACK_NAK_TYPE(x) ((x) << S_CPL_ROCE_ACK_NAK_TYPE)
+#define G_CPL_ROCE_ACK_NAK_TYPE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_TYPE) & M_CPL_ROCE_ACK_NAK_TYPE)
+
+#define S_CPL_ROCE_ACK_NAK_STATUS 8
+#define M_CPL_ROCE_ACK_NAK_STATUS 0xf
+#define V_CPL_ROCE_ACK_NAK_STATUS(x) ((x) << S_CPL_ROCE_ACK_NAK_STATUS)
+#define G_CPL_ROCE_ACK_NAK_STATUS(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_STATUS) & M_CPL_ROCE_ACK_NAK_STATUS)
+
+#define S_CPL_ROCE_ACK_NAK_WIRE_OPCODE 0
+#define M_CPL_ROCE_ACK_NAK_WIRE_OPCODE 0xff
+#define V_CPL_ROCE_ACK_NAK_WIRE_OPCODE(x) ((x) << S_CPL_ROCE_ACK_NAK_WIRE_OPCODE)
+#define G_CPL_ROCE_ACK_NAK_WIRE_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_WIRE_OPCODE) & M_CPL_ROCE_ACK_NAK_WIRE_OPCODE)
+
+#define S_CPL_ROCE_ACK_NAK_PSN 8
+#define M_CPL_ROCE_ACK_NAK_PSN 0xffffff
+#define V_CPL_ROCE_ACK_NAK_PSN(x) ((x) << S_CPL_ROCE_ACK_NAK_PSN)
+#define G_CPL_ROCE_ACK_NAK_PSN(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_PSN) & M_CPL_ROCE_ACK_NAK_PSN)
+
+#define S_CPL_ROCE_ACK_NAK_RTT_HI 0
+#define M_CPL_ROCE_ACK_NAK_RTT_HI 0xff
+#define V_CPL_ROCE_ACK_NAK_RTT_HI(x) ((x) << S_CPL_ROCE_ACK_NAK_RTT_HI)
+#define G_CPL_ROCE_ACK_NAK_RTT_HI(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTT_HI) & M_CPL_ROCE_ACK_NAK_RTT_HI)
+
+#define S_CPL_ROCE_ACK_NAK_RTT_LO 24
+#define M_CPL_ROCE_ACK_NAK_RTT_LO 0xff
+#define V_CPL_ROCE_ACK_NAK_RTT_LO(x) ((x) << S_CPL_ROCE_ACK_NAK_RTT_LO)
+#define G_CPL_ROCE_ACK_NAK_RTT_LO(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTT_LO) & M_CPL_ROCE_ACK_NAK_RTT_LO)
+
+#define S_CPL_ROCE_ACK_NAK_RTTVALID 23
+#define M_CPL_ROCE_ACK_NAK_RTTVALID 0x1
+#define V_CPL_ROCE_ACK_NAK_RTTVALID(x) ((x) << S_CPL_ROCE_ACK_NAK_RTTVALID)
+#define G_CPL_ROCE_ACK_NAK_RTTVALID(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTTVALID) & M_CPL_ROCE_ACK_NAK_RTTVALID)
+#define F_CPL_ROCE_ACK_NAK_RTTVALID V_CPL_ROCE_ACK_NAK_RTTVALID(1U)
+
+#define S_CPL_ROCE_ACK_NAK_RTTBAD 22
+#define M_CPL_ROCE_ACK_NAK_RTTBAD 0x1
+#define V_CPL_ROCE_ACK_NAK_RTTBAD(x) ((x) << S_CPL_ROCE_ACK_NAK_RTTBAD)
+#define G_CPL_ROCE_ACK_NAK_RTTBAD(x) \
+ (((x) >> S_CPL_ROCE_ACK_NAK_RTTBAD) & M_CPL_ROCE_ACK_NAK_RTTBAD)
+#define F_CPL_ROCE_ACK_NAK_RTTBAD V_CPL_ROCE_ACK_NAK_RTTBAD(1U)
+
+struct cpl_roce_cqe {
+ __be16 op_rssctrl;
+ __be16 cqid;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_ROCE_CQE_OPCODE 8
+#define M_CPL_ROCE_CQE_OPCODE 0xff
+#define V_CPL_ROCE_CQE_OPCODE(x) ((x) << S_CPL_ROCE_CQE_OPCODE)
+#define G_CPL_ROCE_CQE_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_OPCODE) & M_CPL_ROCE_CQE_OPCODE)
+
+#define S_CPL_ROCE_CQE_RSSCTRL 0
+#define M_CPL_ROCE_CQE_RSSCTRL 0xff
+#define V_CPL_ROCE_CQE_RSSCTRL(x) ((x) << S_CPL_ROCE_CQE_RSSCTRL)
+#define G_CPL_ROCE_CQE_RSSCTRL(x) \
+ (((x) >> S_CPL_ROCE_CQE_RSSCTRL) & M_CPL_ROCE_CQE_RSSCTRL)
+
+#define S_CPL_ROCE_CQE_TID 8
+#define M_CPL_ROCE_CQE_TID 0xfffff
+#define V_CPL_ROCE_CQE_TID(x) ((x) << S_CPL_ROCE_CQE_TID)
+#define G_CPL_ROCE_CQE_TID(x) \
+ (((x) >> S_CPL_ROCE_CQE_TID) & M_CPL_ROCE_CQE_TID)
+
+#define S_CPL_ROCE_CQE_FLITCNT 0
+#define M_CPL_ROCE_CQE_FLITCNT 0xff
+#define V_CPL_ROCE_CQE_FLITCNT(x) ((x) << S_CPL_ROCE_CQE_FLITCNT)
+#define G_CPL_ROCE_CQE_FLITCNT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FLITCNT) & M_CPL_ROCE_CQE_FLITCNT)
+
+#define S_CPL_ROCE_CQE_QPID 12
+#define M_CPL_ROCE_CQE_QPID 0xfffff
+#define V_CPL_ROCE_CQE_QPID(x) ((x) << S_CPL_ROCE_CQE_QPID)
+#define G_CPL_ROCE_CQE_QPID(x) \
+ (((x) >> S_CPL_ROCE_CQE_QPID) & M_CPL_ROCE_CQE_QPID)
+
+#define S_CPL_ROCE_CQE_EXTMODE 11
+#define M_CPL_ROCE_CQE_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_EXTMODE)
+#define G_CPL_ROCE_CQE_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_EXTMODE) & M_CPL_ROCE_CQE_EXTMODE)
+#define F_CPL_ROCE_CQE_EXTMODE V_CPL_ROCE_CQE_EXTMODE(1U)
+
+#define S_CPL_ROCE_CQE_GENERATION_BIT 10
+#define M_CPL_ROCE_CQE_GENERATION_BIT 0x1
+#define V_CPL_ROCE_CQE_GENERATION_BIT(x) \
+ ((x) << S_CPL_ROCE_CQE_GENERATION_BIT)
+#define G_CPL_ROCE_CQE_GENERATION_BIT(x) \
+ (((x) >> S_CPL_ROCE_CQE_GENERATION_BIT) & M_CPL_ROCE_CQE_GENERATION_BIT)
+#define F_CPL_ROCE_CQE_GENERATION_BIT V_CPL_ROCE_CQE_GENERATION_BIT(1U)
+
+#define S_CPL_ROCE_CQE_STATUS 5
+#define M_CPL_ROCE_CQE_STATUS 0x1f
+#define V_CPL_ROCE_CQE_STATUS(x) ((x) << S_CPL_ROCE_CQE_STATUS)
+#define G_CPL_ROCE_CQE_STATUS(x) \
+ (((x) >> S_CPL_ROCE_CQE_STATUS) & M_CPL_ROCE_CQE_STATUS)
+
+#define S_CPL_ROCE_CQE_CQE_TYPE 4
+#define M_CPL_ROCE_CQE_CQE_TYPE 0x1
+#define V_CPL_ROCE_CQE_CQE_TYPE(x) ((x) << S_CPL_ROCE_CQE_CQE_TYPE)
+#define G_CPL_ROCE_CQE_CQE_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_CQE_TYPE) & M_CPL_ROCE_CQE_CQE_TYPE)
+#define F_CPL_ROCE_CQE_CQE_TYPE V_CPL_ROCE_CQE_CQE_TYPE(1U)
+
+#define S_CPL_ROCE_CQE_WR_TYPE 0
+#define M_CPL_ROCE_CQE_WR_TYPE 0xf
+#define V_CPL_ROCE_CQE_WR_TYPE(x) ((x) << S_CPL_ROCE_CQE_WR_TYPE)
+#define G_CPL_ROCE_CQE_WR_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_WR_TYPE) & M_CPL_ROCE_CQE_WR_TYPE)
+
+#define S_CPL_ROCE_CQE_SE 31
+#define M_CPL_ROCE_CQE_SE 0x1
+#define V_CPL_ROCE_CQE_SE(x) ((x) << S_CPL_ROCE_CQE_SE)
+#define G_CPL_ROCE_CQE_SE(x) \
+ (((x) >> S_CPL_ROCE_CQE_SE) & M_CPL_ROCE_CQE_SE)
+#define F_CPL_ROCE_CQE_SE V_CPL_ROCE_CQE_SE(1U)
+
+#define S_CPL_ROCE_CQE_WR_TYPE_EXT 24
+#define M_CPL_ROCE_CQE_WR_TYPE_EXT 0x7f
+#define V_CPL_ROCE_CQE_WR_TYPE_EXT(x) ((x) << S_CPL_ROCE_CQE_WR_TYPE_EXT)
+#define G_CPL_ROCE_CQE_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_ROCE_CQE_WR_TYPE_EXT) & M_CPL_ROCE_CQE_WR_TYPE_EXT)
+
+#define S_CPL_ROCE_CQE_SRQ 0
+#define M_CPL_ROCE_CQE_SRQ 0xfff
+#define V_CPL_ROCE_CQE_SRQ(x) ((x) << S_CPL_ROCE_CQE_SRQ)
+#define G_CPL_ROCE_CQE_SRQ(x) \
+ (((x) >> S_CPL_ROCE_CQE_SRQ) & M_CPL_ROCE_CQE_SRQ)
+
+struct cpl_roce_cqe_fw {
+ __be32 op_to_cqid;
+ __be32 tid_flitcnt;
+ __be32 qpid_to_wr_type;
+ __be32 length;
+ __be32 tag;
+ __be32 msn;
+ __be32 se_to_srq;
+ __be32 rqe;
+ __be32 extinfoms[2];
+ __be32 extinfols[2];
+};
+
+#define S_CPL_ROCE_CQE_FW_OPCODE 24
+#define M_CPL_ROCE_CQE_FW_OPCODE 0xff
+#define V_CPL_ROCE_CQE_FW_OPCODE(x) ((x) << S_CPL_ROCE_CQE_FW_OPCODE)
+#define G_CPL_ROCE_CQE_FW_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_OPCODE) & M_CPL_ROCE_CQE_FW_OPCODE)
+
+#define S_CPL_ROCE_CQE_FW_RSSCTRL 16
+#define M_CPL_ROCE_CQE_FW_RSSCTRL 0xff
+#define V_CPL_ROCE_CQE_FW_RSSCTRL(x) ((x) << S_CPL_ROCE_CQE_FW_RSSCTRL)
+#define G_CPL_ROCE_CQE_FW_RSSCTRL(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_RSSCTRL) & M_CPL_ROCE_CQE_FW_RSSCTRL)
+
+#define S_CPL_ROCE_CQE_FW_CQID 0
+#define M_CPL_ROCE_CQE_FW_CQID 0xffff
+#define V_CPL_ROCE_CQE_FW_CQID(x) ((x) << S_CPL_ROCE_CQE_FW_CQID)
+#define G_CPL_ROCE_CQE_FW_CQID(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_CQID) & M_CPL_ROCE_CQE_FW_CQID)
+
+#define S_CPL_ROCE_CQE_FW_TID 8
+#define M_CPL_ROCE_CQE_FW_TID 0xfffff
+#define V_CPL_ROCE_CQE_FW_TID(x) ((x) << S_CPL_ROCE_CQE_FW_TID)
+#define G_CPL_ROCE_CQE_FW_TID(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_TID) & M_CPL_ROCE_CQE_FW_TID)
+
+#define S_CPL_ROCE_CQE_FW_FLITCNT 0
+#define M_CPL_ROCE_CQE_FW_FLITCNT 0xff
+#define V_CPL_ROCE_CQE_FW_FLITCNT(x) ((x) << S_CPL_ROCE_CQE_FW_FLITCNT)
+#define G_CPL_ROCE_CQE_FW_FLITCNT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_FLITCNT) & M_CPL_ROCE_CQE_FW_FLITCNT)
+
+#define S_CPL_ROCE_CQE_FW_QPID 12
+#define M_CPL_ROCE_CQE_FW_QPID 0xfffff
+#define V_CPL_ROCE_CQE_FW_QPID(x) ((x) << S_CPL_ROCE_CQE_FW_QPID)
+#define G_CPL_ROCE_CQE_FW_QPID(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_QPID) & M_CPL_ROCE_CQE_FW_QPID)
+
+#define S_CPL_ROCE_CQE_FW_EXTMODE 11
+#define M_CPL_ROCE_CQE_FW_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_FW_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_FW_EXTMODE)
+#define G_CPL_ROCE_CQE_FW_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_EXTMODE) & M_CPL_ROCE_CQE_FW_EXTMODE)
+#define F_CPL_ROCE_CQE_FW_EXTMODE V_CPL_ROCE_CQE_FW_EXTMODE(1U)
+
+#define S_CPL_ROCE_CQE_FW_GENERATION_BIT 10
+#define M_CPL_ROCE_CQE_FW_GENERATION_BIT 0x1
+#define V_CPL_ROCE_CQE_FW_GENERATION_BIT(x) \
+ ((x) << S_CPL_ROCE_CQE_FW_GENERATION_BIT)
+#define G_CPL_ROCE_CQE_FW_GENERATION_BIT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_GENERATION_BIT) & \
+ M_CPL_ROCE_CQE_FW_GENERATION_BIT)
+#define F_CPL_ROCE_CQE_FW_GENERATION_BIT V_CPL_ROCE_CQE_FW_GENERATION_BIT(1U)
+
+#define S_CPL_ROCE_CQE_FW_STATUS 5
+#define M_CPL_ROCE_CQE_FW_STATUS 0x1f
+#define V_CPL_ROCE_CQE_FW_STATUS(x) ((x) << S_CPL_ROCE_CQE_FW_STATUS)
+#define G_CPL_ROCE_CQE_FW_STATUS(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_STATUS) & M_CPL_ROCE_CQE_FW_STATUS)
+
+#define S_CPL_ROCE_CQE_FW_CQE_TYPE 4
+#define M_CPL_ROCE_CQE_FW_CQE_TYPE 0x1
+#define V_CPL_ROCE_CQE_FW_CQE_TYPE(x) ((x) << S_CPL_ROCE_CQE_FW_CQE_TYPE)
+#define G_CPL_ROCE_CQE_FW_CQE_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_CQE_TYPE) & M_CPL_ROCE_CQE_FW_CQE_TYPE)
+#define F_CPL_ROCE_CQE_FW_CQE_TYPE V_CPL_ROCE_CQE_FW_CQE_TYPE(1U)
+
+#define S_CPL_ROCE_CQE_FW_WR_TYPE 0
+#define M_CPL_ROCE_CQE_FW_WR_TYPE 0xf
+#define V_CPL_ROCE_CQE_FW_WR_TYPE(x) ((x) << S_CPL_ROCE_CQE_FW_WR_TYPE)
+#define G_CPL_ROCE_CQE_FW_WR_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_WR_TYPE) & M_CPL_ROCE_CQE_FW_WR_TYPE)
+
+#define S_CPL_ROCE_CQE_FW_SE 31
+#define M_CPL_ROCE_CQE_FW_SE 0x1
+#define V_CPL_ROCE_CQE_FW_SE(x) ((x) << S_CPL_ROCE_CQE_FW_SE)
+#define G_CPL_ROCE_CQE_FW_SE(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_SE) & M_CPL_ROCE_CQE_FW_SE)
+#define F_CPL_ROCE_CQE_FW_SE V_CPL_ROCE_CQE_FW_SE(1U)
+
+#define S_CPL_ROCE_CQE_FW_WR_TYPE_EXT 24
+#define M_CPL_ROCE_CQE_FW_WR_TYPE_EXT 0x7f
+#define V_CPL_ROCE_CQE_FW_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_ROCE_CQE_FW_WR_TYPE_EXT)
+#define G_CPL_ROCE_CQE_FW_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_WR_TYPE_EXT) & M_CPL_ROCE_CQE_FW_WR_TYPE_EXT)
+
+#define S_CPL_ROCE_CQE_FW_SRQ 0
+#define M_CPL_ROCE_CQE_FW_SRQ 0xfff
+#define V_CPL_ROCE_CQE_FW_SRQ(x) ((x) << S_CPL_ROCE_CQE_FW_SRQ)
+#define G_CPL_ROCE_CQE_FW_SRQ(x) \
+ (((x) >> S_CPL_ROCE_CQE_FW_SRQ) & M_CPL_ROCE_CQE_FW_SRQ)
+
+struct cpl_roce_cqe_err {
+ __be32 op_to_CQID;
+ __be32 Tid_FlitCnt;
+ __be32 QPID_to_WR_type;
+ __be32 Length;
+ __be32 TAG;
+ __be32 MSN;
+ __be32 SE_to_SRQ;
+ __be32 RQE;
+ __be32 ExtInfoMS[2];
+ __be32 ExtInfoLS[2];
+};
+
+#define S_CPL_ROCE_CQE_ERR_OPCODE 24
+#define M_CPL_ROCE_CQE_ERR_OPCODE 0xff
+#define V_CPL_ROCE_CQE_ERR_OPCODE(x) ((x) << S_CPL_ROCE_CQE_ERR_OPCODE)
+#define G_CPL_ROCE_CQE_ERR_OPCODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_OPCODE) & M_CPL_ROCE_CQE_ERR_OPCODE)
+
+#define S_CPL_ROCE_CQE_ERR_RSSCTRL 16
+#define M_CPL_ROCE_CQE_ERR_RSSCTRL 0xff
+#define V_CPL_ROCE_CQE_ERR_RSSCTRL(x) ((x) << S_CPL_ROCE_CQE_ERR_RSSCTRL)
+#define G_CPL_ROCE_CQE_ERR_RSSCTRL(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_RSSCTRL) & M_CPL_ROCE_CQE_ERR_RSSCTRL)
+
+#define S_CPL_ROCE_CQE_ERR_CQID 0
+#define M_CPL_ROCE_CQE_ERR_CQID 0xffff
+#define V_CPL_ROCE_CQE_ERR_CQID(x) ((x) << S_CPL_ROCE_CQE_ERR_CQID)
+#define G_CPL_ROCE_CQE_ERR_CQID(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_CQID) & M_CPL_ROCE_CQE_ERR_CQID)
+
+#define S_CPL_ROCE_CQE_ERR_TID 8
+#define M_CPL_ROCE_CQE_ERR_TID 0xfffff
+#define V_CPL_ROCE_CQE_ERR_TID(x) ((x) << S_CPL_ROCE_CQE_ERR_TID)
+#define G_CPL_ROCE_CQE_ERR_TID(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_TID) & M_CPL_ROCE_CQE_ERR_TID)
+
+#define S_CPL_ROCE_CQE_ERR_FLITCNT 0
+#define M_CPL_ROCE_CQE_ERR_FLITCNT 0xff
+#define V_CPL_ROCE_CQE_ERR_FLITCNT(x) ((x) << S_CPL_ROCE_CQE_ERR_FLITCNT)
+#define G_CPL_ROCE_CQE_ERR_FLITCNT(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_FLITCNT) & M_CPL_ROCE_CQE_ERR_FLITCNT)
+
+#define S_CPL_ROCE_CQE_ERR_QPID 12
+#define M_CPL_ROCE_CQE_ERR_QPID 0xfffff
+#define V_CPL_ROCE_CQE_ERR_QPID(x) ((x) << S_CPL_ROCE_CQE_ERR_QPID)
+#define G_CPL_ROCE_CQE_ERR_QPID(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_QPID) & M_CPL_ROCE_CQE_ERR_QPID)
+
+#define S_CPL_ROCE_CQE_ERR_EXTMODE 11
+#define M_CPL_ROCE_CQE_ERR_EXTMODE 0x1
+#define V_CPL_ROCE_CQE_ERR_EXTMODE(x) ((x) << S_CPL_ROCE_CQE_ERR_EXTMODE)
+#define G_CPL_ROCE_CQE_ERR_EXTMODE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_EXTMODE) & M_CPL_ROCE_CQE_ERR_EXTMODE)
+#define F_CPL_ROCE_CQE_ERR_EXTMODE V_CPL_ROCE_CQE_ERR_EXTMODE(1U)
+
+#define S_CPL_ROCE_CQE_ERR_GENERATION_BIT 10
+#define M_CPL_ROCE_CQE_ERR_GENERATION_BIT 0x1
+#define V_CPL_ROCE_CQE_ERR_GENERATION_BIT(x) \
+ ((x) << S_CPL_ROCE_CQE_ERR_GENERATION_BIT)
+#define G_CPL_ROCE_CQE_ERR_GENERATION_BIT(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_GENERATION_BIT) & \
+ M_CPL_ROCE_CQE_ERR_GENERATION_BIT)
+#define F_CPL_ROCE_CQE_ERR_GENERATION_BIT \
+ V_CPL_ROCE_CQE_ERR_GENERATION_BIT(1U)
+
+#define S_CPL_ROCE_CQE_ERR_STATUS 5
+#define M_CPL_ROCE_CQE_ERR_STATUS 0x1f
+#define V_CPL_ROCE_CQE_ERR_STATUS(x) ((x) << S_CPL_ROCE_CQE_ERR_STATUS)
+#define G_CPL_ROCE_CQE_ERR_STATUS(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_STATUS) & M_CPL_ROCE_CQE_ERR_STATUS)
+
+#define S_CPL_ROCE_CQE_ERR_CQE_TYPE 4
+#define M_CPL_ROCE_CQE_ERR_CQE_TYPE 0x1
+#define V_CPL_ROCE_CQE_ERR_CQE_TYPE(x) ((x) << S_CPL_ROCE_CQE_ERR_CQE_TYPE)
+#define G_CPL_ROCE_CQE_ERR_CQE_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_CQE_TYPE) & M_CPL_ROCE_CQE_ERR_CQE_TYPE)
+#define F_CPL_ROCE_CQE_ERR_CQE_TYPE V_CPL_ROCE_CQE_ERR_CQE_TYPE(1U)
+
+#define S_CPL_ROCE_CQE_ERR_WR_TYPE 0
+#define M_CPL_ROCE_CQE_ERR_WR_TYPE 0xf
+#define V_CPL_ROCE_CQE_ERR_WR_TYPE(x) ((x) << S_CPL_ROCE_CQE_ERR_WR_TYPE)
+#define G_CPL_ROCE_CQE_ERR_WR_TYPE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_WR_TYPE) & M_CPL_ROCE_CQE_ERR_WR_TYPE)
+
+#define S_CPL_ROCE_CQE_ERR_SE 31
+#define M_CPL_ROCE_CQE_ERR_SE 0x1
+#define V_CPL_ROCE_CQE_ERR_SE(x) ((x) << S_CPL_ROCE_CQE_ERR_SE)
+#define G_CPL_ROCE_CQE_ERR_SE(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_SE) & M_CPL_ROCE_CQE_ERR_SE)
+#define F_CPL_ROCE_CQE_ERR_SE V_CPL_ROCE_CQE_ERR_SE(1U)
+
+#define S_CPL_ROCE_CQE_ERR_WR_TYPE_EXT 24
+#define M_CPL_ROCE_CQE_ERR_WR_TYPE_EXT 0x7f
+#define V_CPL_ROCE_CQE_ERR_WR_TYPE_EXT(x) \
+ ((x) << S_CPL_ROCE_CQE_ERR_WR_TYPE_EXT)
+#define G_CPL_ROCE_CQE_ERR_WR_TYPE_EXT(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_WR_TYPE_EXT) & M_CPL_ROCE_CQE_ERR_WR_TYPE_EXT)
+
+#define S_CPL_ROCE_CQE_ERR_SRQ 0
+#define M_CPL_ROCE_CQE_ERR_SRQ 0xfff
+#define V_CPL_ROCE_CQE_ERR_SRQ(x) ((x) << S_CPL_ROCE_CQE_ERR_SRQ)
+#define G_CPL_ROCE_CQE_ERR_SRQ(x) \
+ (((x) >> S_CPL_ROCE_CQE_ERR_SRQ) & M_CPL_ROCE_CQE_ERR_SRQ)
+
+struct cpl_accelerator_hdr {
+ __be16 op_accelerator_id;
+ __be16 rxchid_payload_to_inner_cpl_length_ack;
+ __be32 inner_cpl_length_payload_status_loc;
+};
+
+#define S_CPL_ACCELERATOR_HDR_OPCODE 8
+#define M_CPL_ACCELERATOR_HDR_OPCODE 0xff
+#define V_CPL_ACCELERATOR_HDR_OPCODE(x) ((x) << S_CPL_ACCELERATOR_HDR_OPCODE)
+#define G_CPL_ACCELERATOR_HDR_OPCODE(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_OPCODE) & M_CPL_ACCELERATOR_HDR_OPCODE)
+
+#define S_CPL_ACCELERATOR_HDR_ACCELERATOR_ID 0
+#define M_CPL_ACCELERATOR_HDR_ACCELERATOR_ID 0xff
+#define V_CPL_ACCELERATOR_HDR_ACCELERATOR_ID(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_ACCELERATOR_ID)
+#define G_CPL_ACCELERATOR_HDR_ACCELERATOR_ID(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_ACCELERATOR_ID) & \
+ M_CPL_ACCELERATOR_HDR_ACCELERATOR_ID)
+
+#define S_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD 14
+#define M_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD 0x3
+#define V_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD)
+#define G_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD) & \
+ M_CPL_ACCELERATOR_HDR_RXCHID_PAYLOAD)
+
+#define S_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD 12
+#define M_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD 0x3
+#define V_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD)
+#define G_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD) & \
+ M_CPL_ACCELERATOR_HDR_DESTID_PAYLOAD)
+
+#define S_CPL_ACCELERATOR_HDR_RXCHID_ACK 10
+#define M_CPL_ACCELERATOR_HDR_RXCHID_ACK 0x3
+#define V_CPL_ACCELERATOR_HDR_RXCHID_ACK(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_RXCHID_ACK)
+#define G_CPL_ACCELERATOR_HDR_RXCHID_ACK(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_RXCHID_ACK) & \
+ M_CPL_ACCELERATOR_HDR_RXCHID_ACK)
+
+#define S_CPL_ACCELERATOR_HDR_DESTID_ACK 8
+#define M_CPL_ACCELERATOR_HDR_DESTID_ACK 0x3
+#define V_CPL_ACCELERATOR_HDR_DESTID_ACK(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_DESTID_ACK)
+#define G_CPL_ACCELERATOR_HDR_DESTID_ACK(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_DESTID_ACK) & \
+ M_CPL_ACCELERATOR_HDR_DESTID_ACK)
+
+#define S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK 0
+#define M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK 0xff
+#define V_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK)
+#define G_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK) & \
+ M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_ACK)
+
+#define S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD 24
+#define M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD 0xff
+#define V_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD)
+#define G_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD) & \
+ M_CPL_ACCELERATOR_HDR_INNER_CPL_LENGTH_PAYLOAD)
+
+#define S_CPL_ACCELERATOR_HDR_STATUS_LOC 22
+#define M_CPL_ACCELERATOR_HDR_STATUS_LOC 0x3
+#define V_CPL_ACCELERATOR_HDR_STATUS_LOC(x) \
+ ((x) << S_CPL_ACCELERATOR_HDR_STATUS_LOC)
+#define G_CPL_ACCELERATOR_HDR_STATUS_LOC(x) \
+ (((x) >> S_CPL_ACCELERATOR_HDR_STATUS_LOC) & \
+ M_CPL_ACCELERATOR_HDR_STATUS_LOC)
+
+struct cpl_accelerator_ack {
+ RSS_HDR
+ __be16 op_accelerator_id;
+ __be16 r0;
+ __be32 status;
+ __be64 r1;
+ __be64 r2;
+};
+
+#define S_CPL_ACCELERATOR_ACK_OPCODE 8
+#define M_CPL_ACCELERATOR_ACK_OPCODE 0xff
+#define V_CPL_ACCELERATOR_ACK_OPCODE(x) ((x) << S_CPL_ACCELERATOR_ACK_OPCODE)
+#define G_CPL_ACCELERATOR_ACK_OPCODE(x) \
+ (((x) >> S_CPL_ACCELERATOR_ACK_OPCODE) & M_CPL_ACCELERATOR_ACK_OPCODE)
+
+#define S_CPL_ACCELERATOR_ACK_ACCELERATOR_ID 0
+#define M_CPL_ACCELERATOR_ACK_ACCELERATOR_ID 0xff
+#define V_CPL_ACCELERATOR_ACK_ACCELERATOR_ID(x) \
+ ((x) << S_CPL_ACCELERATOR_ACK_ACCELERATOR_ID)
+#define G_CPL_ACCELERATOR_ACK_ACCELERATOR_ID(x) \
+ (((x) >> S_CPL_ACCELERATOR_ACK_ACCELERATOR_ID) & \
+ M_CPL_ACCELERATOR_ACK_ACCELERATOR_ID)
+
+struct cpl_nvmt_data {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 r0;
+ __be16 length;
+ __be32 seq;
+ __be32 status_pkd;
+};
+
+#define S_CPL_NVMT_DATA_OPCODE 24
+#define M_CPL_NVMT_DATA_OPCODE 0xff
+#define V_CPL_NVMT_DATA_OPCODE(x) ((x) << S_CPL_NVMT_DATA_OPCODE)
+#define G_CPL_NVMT_DATA_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_DATA_OPCODE) & M_CPL_NVMT_DATA_OPCODE)
+
+#define S_CPL_NVMT_DATA_TID 0
+#define M_CPL_NVMT_DATA_TID 0xffffff
+#define V_CPL_NVMT_DATA_TID(x) ((x) << S_CPL_NVMT_DATA_TID)
+#define G_CPL_NVMT_DATA_TID(x) \
+ (((x) >> S_CPL_NVMT_DATA_TID) & M_CPL_NVMT_DATA_TID)
+
+#define S_CPL_NVMT_DATA_STATUS 0
+#define M_CPL_NVMT_DATA_STATUS 0xff
+#define V_CPL_NVMT_DATA_STATUS(x) ((x) << S_CPL_NVMT_DATA_STATUS)
+#define G_CPL_NVMT_DATA_STATUS(x) \
+ (((x) >> S_CPL_NVMT_DATA_STATUS) & M_CPL_NVMT_DATA_STATUS)
+
+struct cpl_nvmt_cmp {
+ RSS_HDR
+ union opcode_tid ot;
+ __be16 crch;
+ __be16 length;
+ __be32 seq;
+ __u8 t10status;
+ __u8 status;
+ __be16 crcl;
+};
+
+#define S_CPL_NVMT_CMP_OPCODE 24
+#define M_CPL_NVMT_CMP_OPCODE 0xff
+#define V_CPL_NVMT_CMP_OPCODE(x) ((x) << S_CPL_NVMT_CMP_OPCODE)
+#define G_CPL_NVMT_CMP_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_CMP_OPCODE) & M_CPL_NVMT_CMP_OPCODE)
+
+#define S_CPL_NVMT_CMP_TID 0
+#define M_CPL_NVMT_CMP_TID 0xffffff
+#define V_CPL_NVMT_CMP_TID(x) ((x) << S_CPL_NVMT_CMP_TID)
+#define G_CPL_NVMT_CMP_TID(x) \
+ (((x) >> S_CPL_NVMT_CMP_TID) & M_CPL_NVMT_CMP_TID)
+
+struct cpl_nvmt_cmp_imm {
+ __be32 op_to_cqid;
+ __be32 generation_bit_to_oprqinc;
+ __be32 seq;
+ __be16 crch;
+ __be16 length;
+ __be16 crcl;
+ __u8 t10status;
+ __u8 status;
+ __be32 r1;
+};
+
+#define S_CPL_NVMT_CMP_IMM_OPCODE 24
+#define M_CPL_NVMT_CMP_IMM_OPCODE 0xff
+#define V_CPL_NVMT_CMP_IMM_OPCODE(x) ((x) << S_CPL_NVMT_CMP_IMM_OPCODE)
+#define G_CPL_NVMT_CMP_IMM_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_OPCODE) & M_CPL_NVMT_CMP_IMM_OPCODE)
+
+#define S_CPL_NVMT_CMP_IMM_RSSCTRL 16
+#define M_CPL_NVMT_CMP_IMM_RSSCTRL 0xff
+#define V_CPL_NVMT_CMP_IMM_RSSCTRL(x) ((x) << S_CPL_NVMT_CMP_IMM_RSSCTRL)
+#define G_CPL_NVMT_CMP_IMM_RSSCTRL(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_RSSCTRL) & M_CPL_NVMT_CMP_IMM_RSSCTRL)
+
+#define S_CPL_NVMT_CMP_IMM_CQID 0
+#define M_CPL_NVMT_CMP_IMM_CQID 0xffff
+#define V_CPL_NVMT_CMP_IMM_CQID(x) ((x) << S_CPL_NVMT_CMP_IMM_CQID)
+#define G_CPL_NVMT_CMP_IMM_CQID(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_CQID) & M_CPL_NVMT_CMP_IMM_CQID)
+
+#define S_CPL_NVMT_CMP_IMM_GENERATION_BIT 31
+#define M_CPL_NVMT_CMP_IMM_GENERATION_BIT 0x1
+#define V_CPL_NVMT_CMP_IMM_GENERATION_BIT(x) \
+ ((x) << S_CPL_NVMT_CMP_IMM_GENERATION_BIT)
+#define G_CPL_NVMT_CMP_IMM_GENERATION_BIT(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_GENERATION_BIT) & \
+ M_CPL_NVMT_CMP_IMM_GENERATION_BIT)
+#define F_CPL_NVMT_CMP_IMM_GENERATION_BIT \
+ V_CPL_NVMT_CMP_IMM_GENERATION_BIT(1U)
+
+#define S_CPL_NVMT_CMP_IMM_TID 8
+#define M_CPL_NVMT_CMP_IMM_TID 0xfffff
+#define V_CPL_NVMT_CMP_IMM_TID(x) ((x) << S_CPL_NVMT_CMP_IMM_TID)
+#define G_CPL_NVMT_CMP_IMM_TID(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_TID) & M_CPL_NVMT_CMP_IMM_TID)
+
+#define S_CPL_NVMT_CMP_IMM_OPRQINC 0
+#define M_CPL_NVMT_CMP_IMM_OPRQINC 0xff
+#define V_CPL_NVMT_CMP_IMM_OPRQINC(x) ((x) << S_CPL_NVMT_CMP_IMM_OPRQINC)
+#define G_CPL_NVMT_CMP_IMM_OPRQINC(x) \
+ (((x) >> S_CPL_NVMT_CMP_IMM_OPRQINC) & M_CPL_NVMT_CMP_IMM_OPRQINC)
+
+struct cpl_nvmt_cmp_srq {
+ __be32 op_to_cqid;
+ __be32 generation_bit_to_oprqinc;
+ __be32 seq;
+ __be16 crch;
+ __be16 length;
+ __be16 crcl;
+ __u8 t10status;
+ __u8 status;
+ __be32 rqe;
+};
+
+#define S_CPL_NVMT_CMP_SRQ_OPCODE 24
+#define M_CPL_NVMT_CMP_SRQ_OPCODE 0xff
+#define V_CPL_NVMT_CMP_SRQ_OPCODE(x) ((x) << S_CPL_NVMT_CMP_SRQ_OPCODE)
+#define G_CPL_NVMT_CMP_SRQ_OPCODE(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_OPCODE) & M_CPL_NVMT_CMP_SRQ_OPCODE)
+
+#define S_CPL_NVMT_CMP_SRQ_RSSCTRL 16
+#define M_CPL_NVMT_CMP_SRQ_RSSCTRL 0xff
+#define V_CPL_NVMT_CMP_SRQ_RSSCTRL(x) ((x) << S_CPL_NVMT_CMP_SRQ_RSSCTRL)
+#define G_CPL_NVMT_CMP_SRQ_RSSCTRL(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_RSSCTRL) & M_CPL_NVMT_CMP_SRQ_RSSCTRL)
+
+#define S_CPL_NVMT_CMP_SRQ_CQID 0
+#define M_CPL_NVMT_CMP_SRQ_CQID 0xffff
+#define V_CPL_NVMT_CMP_SRQ_CQID(x) ((x) << S_CPL_NVMT_CMP_SRQ_CQID)
+#define G_CPL_NVMT_CMP_SRQ_CQID(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_CQID) & M_CPL_NVMT_CMP_SRQ_CQID)
+
+#define S_CPL_NVMT_CMP_SRQ_GENERATION_BIT 31
+#define M_CPL_NVMT_CMP_SRQ_GENERATION_BIT 0x1
+#define V_CPL_NVMT_CMP_SRQ_GENERATION_BIT(x) \
+ ((x) << S_CPL_NVMT_CMP_SRQ_GENERATION_BIT)
+#define G_CPL_NVMT_CMP_SRQ_GENERATION_BIT(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_GENERATION_BIT) & \
+ M_CPL_NVMT_CMP_SRQ_GENERATION_BIT)
+#define F_CPL_NVMT_CMP_SRQ_GENERATION_BIT \
+ V_CPL_NVMT_CMP_SRQ_GENERATION_BIT(1U)
+
+#define S_CPL_NVMT_CMP_SRQ_TID 8
+#define M_CPL_NVMT_CMP_SRQ_TID 0xfffff
+#define V_CPL_NVMT_CMP_SRQ_TID(x) ((x) << S_CPL_NVMT_CMP_SRQ_TID)
+#define G_CPL_NVMT_CMP_SRQ_TID(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_TID) & M_CPL_NVMT_CMP_SRQ_TID)
+
+#define S_CPL_NVMT_CMP_SRQ_OPRQINC 0
+#define M_CPL_NVMT_CMP_SRQ_OPRQINC 0xff
+#define V_CPL_NVMT_CMP_SRQ_OPRQINC(x) ((x) << S_CPL_NVMT_CMP_SRQ_OPRQINC)
+#define G_CPL_NVMT_CMP_SRQ_OPRQINC(x) \
+ (((x) >> S_CPL_NVMT_CMP_SRQ_OPRQINC) & M_CPL_NVMT_CMP_SRQ_OPRQINC)
+
#endif /* T4_MSG_H */
diff --git a/sys/dev/cxgbe/common/t4_regs.h b/sys/dev/cxgbe/common/t4_regs.h
index e3b2a29b2ea9..8f500ec0fbdd 100644
--- a/sys/dev/cxgbe/common/t4_regs.h
+++ b/sys/dev/cxgbe/common/t4_regs.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2013, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2013, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,10 +27,11 @@
*/
/* This file is automatically generated --- changes will be lost */
-/* Generation Date : Wed Jan 27 10:57:51 IST 2016 */
-/* Directory name: t4_reg.txt, Changeset: */
-/* Directory name: t5_reg.txt, Changeset: 6936:7f6342b03d61 */
-/* Directory name: t6_reg.txt, Changeset: 4191:ce3ccd95c109 */
+/* Generation Date : Thu Sep 11 05:25:56 PM IST 2025 */
+/* Directory name: t4_reg.txt, Date: Not specified */
+/* Directory name: t5_reg.txt, Changeset: 6945:54ba4ba7ee8b */
+/* Directory name: t6_reg.txt, Changeset: 4277:9c165d0f4899 */
+/* Directory name: t7_reg.txt, Changeset: 5945:1487219ecb20 */
#define MYPF_BASE 0x1b000
#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
@@ -285,9 +285,6 @@
#define T5_PORT_BASE(idx) (T5_PORT0_BASE + (idx) * T5_PORT_STRIDE)
#define T5_PORT_REG(idx, reg) (T5_PORT_BASE(idx) + (reg))
-#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
-#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
-
#define PCIE_PF_INT_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
#define NUM_PCIE_PF_INT_INSTANCES 8
@@ -459,9 +456,6 @@
#define LE_DB_DBGI_REQ_MASK_T6(idx) (A_LE_DB_DBGI_REQ_MASK + (idx) * 4)
#define NUM_LE_DB_DBGI_REQ_MASK_T6_INSTANCES 11
-#define LE_DB_DBGI_RSP_DATA_T6(idx) (A_LE_DB_DBGI_RSP_DATA + (idx) * 4)
-#define NUM_LE_DB_DBGI_RSP_DATA_T6_INSTANCES 11
-
#define LE_DB_ACTIVE_MASK_IPV6_T6(idx) (A_LE_DB_ACTIVE_MASK_IPV6 + (idx) * 4)
#define NUM_LE_DB_ACTIVE_MASK_IPV6_T6_INSTANCES 8
@@ -501,12 +495,175 @@
#define CIM_CTL_MAILBOX_VFN_CTL_T6(idx) (A_CIM_CTL_MAILBOX_VFN_CTL + (idx) * 4)
#define NUM_CIM_CTL_MAILBOX_VFN_CTL_T6_INSTANCES 256
+#define T7_MYPORT_BASE 0x2e000
+#define T7_MYPORT_REG(reg_addr) (T7_MYPORT_BASE + (reg_addr))
+
+#define T7_PORT0_BASE 0x30000
+#define T7_PORT0_REG(reg_addr) (T7_PORT0_BASE + (reg_addr))
+
+#define T7_PORT1_BASE 0x32000
+#define T7_PORT1_REG(reg_addr) (T7_PORT1_BASE + (reg_addr))
+
+#define T7_PORT2_BASE 0x34000
+#define T7_PORT2_REG(reg_addr) (T7_PORT2_BASE + (reg_addr))
+
+#define T7_PORT3_BASE 0x36000
+#define T7_PORT3_REG(reg_addr) (T7_PORT3_BASE + (reg_addr))
+
+#define T7_PORT_STRIDE 0x2000
+#define T7_PORT_BASE(idx) (T7_PORT0_BASE + (idx) * T7_PORT_STRIDE)
+#define T7_PORT_REG(idx, reg) (T7_PORT_BASE(idx) + (reg))
+
+#define PCIE_MEM_ACCESS_T7_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_MEM_ACCESS_T7_INSTANCES 16
+
+#define PCIE_T7_CMD_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_T7_CMD_INSTANCES 1
+
+#define PCIE_T5_ARM_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_T5_ARM_INSTANCES 1
+
+#define PCIE_JBOF_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_PCIE_JBOF_INSTANCES 16
+
+#define PCIE_EMUADRRMAP_REG(reg_addr, idx) ((reg_addr) + (idx) * 32)
+#define NUM_PCIE_EMUADRRMAP_INSTANCES 3
+
+#define CIM_GFT_MASK(idx) (A_CIM_GFT_MASK + (idx) * 4)
+#define NUM_CIM_GFT_MASK_INSTANCES 4
+
+#define T7_MPS_TRC_FILTER_MATCH_CTL_A(idx) (A_T7_MPS_TRC_FILTER_MATCH_CTL_A + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_MATCH_CTL_A_INSTANCES 8
+
+#define T7_MPS_TRC_FILTER_MATCH_CTL_B(idx) (A_T7_MPS_TRC_FILTER_MATCH_CTL_B + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_MATCH_CTL_B_INSTANCES 8
+
+#define T7_MPS_TRC_FILTER_RUNT_CTL(idx) (A_T7_MPS_TRC_FILTER_RUNT_CTL + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_RUNT_CTL_INSTANCES 8
+
+#define T7_MPS_TRC_FILTER_DROP(idx) (A_T7_MPS_TRC_FILTER_DROP + (idx) * 4)
+#define NUM_T7_MPS_TRC_FILTER_DROP_INSTANCES 8
+
+#define MPS_TRC_FILTER4_MATCH(idx) (A_MPS_TRC_FILTER4_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER4_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER4_DONT_CARE(idx) (A_MPS_TRC_FILTER4_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER4_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER5_MATCH(idx) (A_MPS_TRC_FILTER5_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER5_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER5_DONT_CARE(idx) (A_MPS_TRC_FILTER5_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER5_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER6_MATCH(idx) (A_MPS_TRC_FILTER6_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER6_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER6_DONT_CARE(idx) (A_MPS_TRC_FILTER6_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER6_DONT_CARE_INSTANCES 28
+
+#define MPS_TRC_FILTER7_MATCH(idx) (A_MPS_TRC_FILTER7_MATCH + (idx) * 4)
+#define NUM_MPS_TRC_FILTER7_MATCH_INSTANCES 28
+
+#define MPS_TRC_FILTER7_DONT_CARE(idx) (A_MPS_TRC_FILTER7_DONT_CARE + (idx) * 4)
+#define NUM_MPS_TRC_FILTER7_DONT_CARE_INSTANCES 28
+
+#define LE_DB_DBGI_REQ_DATA_T7(idx) (A_LE_DB_DBGI_REQ_DATA + (idx) * 4)
+#define NUM_LE_DB_DBGI_REQ_DATA_T7_INSTANCES 13
+
+#define LE_DB_DBGI_REQ_MASK_T7(idx) (A_LE_DB_DBGI_REQ_MASK + (idx) * 4)
+#define NUM_LE_DB_DBGI_REQ_MASK_T7_INSTANCES 13
+
+#define LE_DB_ACTIVE_MASK_IPV6_T7(idx) (A_LE_DB_ACTIVE_MASK_IPV6 + (idx) * 4)
+#define NUM_LE_DB_ACTIVE_MASK_IPV6_T7_INSTANCES 8
+
+#define LE_HASH_MASK_GEN_IPV4T7(idx) (A_LE_HASH_MASK_GEN_IPV4T5 + (idx) * 4)
+#define NUM_LE_HASH_MASK_GEN_IPV4T7_INSTANCES 8
+
+#define T7_LE_HASH_MASK_GEN_IPV6T5(idx) (A_T7_LE_HASH_MASK_GEN_IPV6T5 + (idx) * 4)
+#define NUM_T7_LE_HASH_MASK_GEN_IPV6T5_INSTANCES 8
+
+#define LE_DB_SECOND_GEN_HASH_MASK_IPV4_T7(idx) (A_LE_DB_SECOND_GEN_HASH_MASK_IPV4 + (idx) * 4)
+#define NUM_LE_DB_SECOND_GEN_HASH_MASK_IPV4_T7_INSTANCES 8
+
+#define TLS_TX_CH_REG(reg_addr, idx) ((reg_addr) + (idx) * 256)
+#define NUM_TLS_TX_CH_INSTANCES 6
+
+#define TLS_TX_CH_IND_REG(reg_addr, idx) ((reg_addr) + (idx) * 256)
+#define NUM_TLS_TX_CH_IND_INSTANCES 6
+
+#define ARM_CPU_REG(reg_addr, idx) ((reg_addr) + (idx) * 16)
+#define NUM_ARM_CPU_INSTANCES 4
+
+#define ARM_CCIM_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_ARM_CCIM_INSTANCES 4
+
+#define ARM_CCIS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define NUM_ARM_CCIS_INSTANCES 5
+
+#define ARM_CCI_EVNTBUS(idx) (A_ARM_CCI_EVNTBUS + (idx) * 4)
+#define NUM_ARM_CCI_EVNTBUS_INSTANCES 5
+
+#define ARM_ARM_CFG1(idx) (A_ARM_ARM_CFG1 + (idx) * 4)
+#define NUM_ARM_ARM_CFG1_INSTANCES 2
+
+#define ARM_ARM_CFG2(idx) (A_ARM_ARM_CFG2 + (idx) * 4)
+#define NUM_ARM_ARM_CFG2_INSTANCES 2
+
+#define ARM_MSG_REG(reg_addr, idx) ((reg_addr) + (idx) * 48)
+#define NUM_ARM_MSG_INSTANCES 4
+
+#define ARM_MSG_PCIE_MESSAGE2AXI_CFG4(idx) (A_ARM_MSG_PCIE_MESSAGE2AXI_CFG4 + (idx) * 4)
+#define NUM_ARM_MSG_PCIE_MESSAGE2AXI_CFG4_INSTANCES 2
+
+#define MC_CE_ERR_DATA_T7_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_CE_ERR_DATA_T7_INSTANCES 16
+
+#define MC_UE_ERR_DATA_T7_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_UE_ERR_DATA_T7_INSTANCES 16
+
+#define MC_P_BIST_USER_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_MC_P_BIST_USER_INSTANCES 36
+
+#define HMA_H_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_HMA_H_BIST_STATUS_INSTANCES 18
+
+#define GCACHE_P_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define NUM_GCACHE_P_BIST_STATUS_INSTANCES 18
+
+#define CIM_CTL_MAILBOX_VF_STATUS_T7(idx) (A_CIM_CTL_MAILBOX_VF_STATUS + (idx) * 4)
+#define NUM_CIM_CTL_MAILBOX_VF_STATUS_T7_INSTANCES 8
+
+#define CIM_CTL_MAILBOX_VFN_CTL_T7(idx) (A_CIM_CTL_MAILBOX_VFN_CTL + (idx) * 4)
+#define NUM_CIM_CTL_MAILBOX_VFN_CTL_T7_INSTANCES 256
+
+#define CIM_CTL_TID_MAP_EN(idx) (A_CIM_CTL_TID_MAP_EN + (idx) * 4)
+#define NUM_CIM_CTL_TID_MAP_EN_INSTANCES 8
+
+#define CIM_CTL_TID_MAP_CORE(idx) (A_CIM_CTL_TID_MAP_CORE + (idx) * 4)
+#define NUM_CIM_CTL_TID_MAP_CORE_INSTANCES 8
+
+#define CIM_CTL_CRYPTO_KEY_DATA(idx) (A_CIM_CTL_CRYPTO_KEY_DATA + (idx) * 4)
+#define NUM_CIM_CTL_CRYPTO_KEY_DATA_INSTANCES 17
+
+#define CIM_CTL_FLOWID_OP_VALID(idx) (A_CIM_CTL_FLOWID_OP_VALID + (idx) * 4)
+#define NUM_CIM_CTL_FLOWID_OP_VALID_INSTANCES 8
+
+#define CIM_CTL_SLV_REG(reg_addr, idx) ((reg_addr) + (idx) * 1024)
+#define NUM_CIM_CTL_SLV_INSTANCES 7
+
#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
#define EDC_T5_STRIDE (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
#define EDC_T5_REG(reg, idx) (reg + EDC_T5_STRIDE * idx)
+#define MC_STRIDE (MC_1_BASE_ADDR - MC_0_BASE_ADDR)
+#define MC_REG(reg, idx) (reg + MC_STRIDE * idx)
+
+#define MC_T7_STRIDE (MC_T71_BASE_ADDR - MC_T70_BASE_ADDR)
+#define MC_T7_REG(reg, idx) (reg + MC_T7_STRIDE * idx)
+
/* registers for module SGE */
#define SGE_BASE_ADDR 0x1000
@@ -637,6 +794,24 @@
#define V_GLOBALENABLE(x) ((x) << S_GLOBALENABLE)
#define F_GLOBALENABLE V_GLOBALENABLE(1U)
+#define S_NUMOFFID 19
+#define M_NUMOFFID 0x7U
+#define V_NUMOFFID(x) ((x) << S_NUMOFFID)
+#define G_NUMOFFID(x) (((x) >> S_NUMOFFID) & M_NUMOFFID)
+
+#define S_INGHINTENABLE2 16
+#define V_INGHINTENABLE2(x) ((x) << S_INGHINTENABLE2)
+#define F_INGHINTENABLE2 V_INGHINTENABLE2(1U)
+
+#define S_INGHINTENABLE3 3
+#define V_INGHINTENABLE3(x) ((x) << S_INGHINTENABLE3)
+#define F_INGHINTENABLE3 V_INGHINTENABLE3(1U)
+
+#define S_TF_MODE 1
+#define M_TF_MODE 0x3U
+#define V_TF_MODE(x) ((x) << S_TF_MODE)
+#define G_TF_MODE(x) (((x) >> S_TF_MODE) & M_TF_MODE)
+
#define A_SGE_HOST_PAGE_SIZE 0x100c
#define S_HOSTPAGESIZEPF7 28
@@ -792,6 +967,16 @@
#define V_WR_ERROR_OPCODE(x) ((x) << S_WR_ERROR_OPCODE)
#define G_WR_ERROR_OPCODE(x) (((x) >> S_WR_ERROR_OPCODE) & M_WR_ERROR_OPCODE)
+#define S_WR_SENDPATH_ERROR_OPCODE 16
+#define M_WR_SENDPATH_ERROR_OPCODE 0xffU
+#define V_WR_SENDPATH_ERROR_OPCODE(x) ((x) << S_WR_SENDPATH_ERROR_OPCODE)
+#define G_WR_SENDPATH_ERROR_OPCODE(x) (((x) >> S_WR_SENDPATH_ERROR_OPCODE) & M_WR_SENDPATH_ERROR_OPCODE)
+
+#define S_WR_SENDPATH_OPCODE 8
+#define M_WR_SENDPATH_OPCODE 0xffU
+#define V_WR_SENDPATH_OPCODE(x) ((x) << S_WR_SENDPATH_OPCODE)
+#define G_WR_SENDPATH_OPCODE(x) (((x) >> S_WR_SENDPATH_OPCODE) & M_WR_SENDPATH_OPCODE)
+
#define A_SGE_PERR_INJECT 0x1020
#define S_MEMSEL 1
@@ -941,6 +1126,22 @@
#define V_PERR_PC_REQ(x) ((x) << S_PERR_PC_REQ)
#define F_PERR_PC_REQ V_PERR_PC_REQ(1U)
+#define S_PERR_HEADERSPLIT_FIFO3 28
+#define V_PERR_HEADERSPLIT_FIFO3(x) ((x) << S_PERR_HEADERSPLIT_FIFO3)
+#define F_PERR_HEADERSPLIT_FIFO3 V_PERR_HEADERSPLIT_FIFO3(1U)
+
+#define S_PERR_HEADERSPLIT_FIFO2 27
+#define V_PERR_HEADERSPLIT_FIFO2(x) ((x) << S_PERR_HEADERSPLIT_FIFO2)
+#define F_PERR_HEADERSPLIT_FIFO2 V_PERR_HEADERSPLIT_FIFO2(1U)
+
+#define S_PERR_PAYLOAD_FIFO3 26
+#define V_PERR_PAYLOAD_FIFO3(x) ((x) << S_PERR_PAYLOAD_FIFO3)
+#define F_PERR_PAYLOAD_FIFO3 V_PERR_PAYLOAD_FIFO3(1U)
+
+#define S_PERR_PAYLOAD_FIFO2 25
+#define V_PERR_PAYLOAD_FIFO2(x) ((x) << S_PERR_PAYLOAD_FIFO2)
+#define F_PERR_PAYLOAD_FIFO2 V_PERR_PAYLOAD_FIFO2(1U)
+
#define A_SGE_INT_ENABLE1 0x1028
#define A_SGE_PERR_ENABLE1 0x102c
#define A_SGE_INT_CAUSE2 0x1030
@@ -1105,6 +1306,22 @@
#define V_PERR_DB_FIFO(x) ((x) << S_PERR_DB_FIFO)
#define F_PERR_DB_FIFO V_PERR_DB_FIFO(1U)
+#define S_TF_FIFO_PERR 24
+#define V_TF_FIFO_PERR(x) ((x) << S_TF_FIFO_PERR)
+#define F_TF_FIFO_PERR V_TF_FIFO_PERR(1U)
+
+#define S_PERR_ISW_IDMA3_FIFO 15
+#define V_PERR_ISW_IDMA3_FIFO(x) ((x) << S_PERR_ISW_IDMA3_FIFO)
+#define F_PERR_ISW_IDMA3_FIFO V_PERR_ISW_IDMA3_FIFO(1U)
+
+#define S_PERR_ISW_IDMA2_FIFO 13
+#define V_PERR_ISW_IDMA2_FIFO(x) ((x) << S_PERR_ISW_IDMA2_FIFO)
+#define F_PERR_ISW_IDMA2_FIFO V_PERR_ISW_IDMA2_FIFO(1U)
+
+#define S_SGE_IPP_FIFO_PERR 5
+#define V_SGE_IPP_FIFO_PERR(x) ((x) << S_SGE_IPP_FIFO_PERR)
+#define F_SGE_IPP_FIFO_PERR V_SGE_IPP_FIFO_PERR(1U)
+
#define A_SGE_INT_ENABLE2 0x1034
#define A_SGE_PERR_ENABLE2 0x1038
#define A_SGE_INT_CAUSE3 0x103c
@@ -1259,110 +1476,20 @@
#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
#define A_SGE_FL_BUFFER_SIZE1 0x1048
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE2 0x104c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE3 0x1050
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE4 0x1054
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE5 0x1058
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE6 0x105c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE7 0x1060
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE8 0x1064
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE9 0x1068
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE10 0x106c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE11 0x1070
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE12 0x1074
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE13 0x1078
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE14 0x107c
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_FL_BUFFER_SIZE15 0x1080
-
-#define S_T6_SIZE 4
-#define M_T6_SIZE 0xfffffU
-#define V_T6_SIZE(x) ((x) << S_T6_SIZE)
-#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE)
-
#define A_SGE_DBQ_CTXT_BADDR 0x1084
#define S_BASEADDR 3
@@ -1426,6 +1553,10 @@
#define V_NULLPTREN(x) ((x) << S_NULLPTREN)
#define F_NULLPTREN V_NULLPTREN(1U)
+#define S_HDRSTARTFLQ4K 1
+#define V_HDRSTARTFLQ4K(x) ((x) << S_HDRSTARTFLQ4K)
+#define F_HDRSTARTFLQ4K V_HDRSTARTFLQ4K(1U)
+
#define A_SGE_CONM_CTRL 0x1094
#define S_EGRTHRESHOLD 8
@@ -2243,6 +2374,34 @@
#define V_PERR_IDMA_SWITCH_OUTPUT_FIFO0(x) ((x) << S_PERR_IDMA_SWITCH_OUTPUT_FIFO0)
#define F_PERR_IDMA_SWITCH_OUTPUT_FIFO0 V_PERR_IDMA_SWITCH_OUTPUT_FIFO0(1U)
+#define S_PERR_POINTER_HDR_FIFO3 10
+#define V_PERR_POINTER_HDR_FIFO3(x) ((x) << S_PERR_POINTER_HDR_FIFO3)
+#define F_PERR_POINTER_HDR_FIFO3 V_PERR_POINTER_HDR_FIFO3(1U)
+
+#define S_PERR_POINTER_HDR_FIFO2 9
+#define V_PERR_POINTER_HDR_FIFO2(x) ((x) << S_PERR_POINTER_HDR_FIFO2)
+#define F_PERR_POINTER_HDR_FIFO2 V_PERR_POINTER_HDR_FIFO2(1U)
+
+#define S_PERR_POINTER_DATA_FIFO3 8
+#define V_PERR_POINTER_DATA_FIFO3(x) ((x) << S_PERR_POINTER_DATA_FIFO3)
+#define F_PERR_POINTER_DATA_FIFO3 V_PERR_POINTER_DATA_FIFO3(1U)
+
+#define S_PERR_POINTER_DATA_FIFO2 7
+#define V_PERR_POINTER_DATA_FIFO2(x) ((x) << S_PERR_POINTER_DATA_FIFO2)
+#define F_PERR_POINTER_DATA_FIFO2 V_PERR_POINTER_DATA_FIFO2(1U)
+
+#define S_PERR_IDMA2IMSG_FIFO3 3
+#define V_PERR_IDMA2IMSG_FIFO3(x) ((x) << S_PERR_IDMA2IMSG_FIFO3)
+#define F_PERR_IDMA2IMSG_FIFO3 V_PERR_IDMA2IMSG_FIFO3(1U)
+
+#define S_PERR_IDMA2IMSG_FIFO2 2
+#define V_PERR_IDMA2IMSG_FIFO2(x) ((x) << S_PERR_IDMA2IMSG_FIFO2)
+#define F_PERR_IDMA2IMSG_FIFO2 V_PERR_IDMA2IMSG_FIFO2(1U)
+
+#define S_PERR_HINT_DELAY_FIFO 0
+#define V_PERR_HINT_DELAY_FIFO(x) ((x) << S_PERR_HINT_DELAY_FIFO)
+#define F_PERR_HINT_DELAY_FIFO V_PERR_HINT_DELAY_FIFO(1U)
+
#define A_SGE_INT_ENABLE5 0x1110
#define A_SGE_PERR_ENABLE5 0x1114
#define A_SGE_DBFIFO_STATUS2 0x1118
@@ -2359,6 +2518,46 @@
#define V_TX_COALESCE_PRI(x) ((x) << S_TX_COALESCE_PRI)
#define F_TX_COALESCE_PRI V_TX_COALESCE_PRI(1U)
+#define S_HINT_SGE_SEL 31
+#define V_HINT_SGE_SEL(x) ((x) << S_HINT_SGE_SEL)
+#define F_HINT_SGE_SEL V_HINT_SGE_SEL(1U)
+
+#define S_HINT_SEL 30
+#define V_HINT_SEL(x) ((x) << S_HINT_SEL)
+#define F_HINT_SEL V_HINT_SEL(1U)
+
+#define S_HINT_DISABLE 29
+#define V_HINT_DISABLE(x) ((x) << S_HINT_DISABLE)
+#define F_HINT_DISABLE V_HINT_DISABLE(1U)
+
+#define S_RXCPLMODE_ISCSI 28
+#define V_RXCPLMODE_ISCSI(x) ((x) << S_RXCPLMODE_ISCSI)
+#define F_RXCPLMODE_ISCSI V_RXCPLMODE_ISCSI(1U)
+
+#define S_RXCPLMODE_NVMT 27
+#define V_RXCPLMODE_NVMT(x) ((x) << S_RXCPLMODE_NVMT)
+#define F_RXCPLMODE_NVMT V_RXCPLMODE_NVMT(1U)
+
+#define S_WRE_REPLAY_INORDER 26
+#define V_WRE_REPLAY_INORDER(x) ((x) << S_WRE_REPLAY_INORDER)
+#define F_WRE_REPLAY_INORDER V_WRE_REPLAY_INORDER(1U)
+
+#define S_ETH2XEN 25
+#define V_ETH2XEN(x) ((x) << S_ETH2XEN)
+#define F_ETH2XEN V_ETH2XEN(1U)
+
+#define S_ARMDBENDDIS 24
+#define V_ARMDBENDDIS(x) ((x) << S_ARMDBENDDIS)
+#define F_ARMDBENDDIS V_ARMDBENDDIS(1U)
+
+#define S_PACKPADT7 23
+#define V_PACKPADT7(x) ((x) << S_PACKPADT7)
+#define F_PACKPADT7 V_PACKPADT7(1U)
+
+#define S_WRE_UPFLCREDIT 22
+#define V_WRE_UPFLCREDIT(x) ((x) << S_WRE_UPFLCREDIT)
+#define F_WRE_UPFLCREDIT V_WRE_UPFLCREDIT(1U)
+
#define A_SGE_DEEP_SLEEP 0x1128
#define S_IDMA1_SLEEP_STATUS 11
@@ -2493,6 +2692,42 @@
#define V_FATAL_DEQ(x) ((x) << S_FATAL_DEQ)
#define F_FATAL_DEQ V_FATAL_DEQ(1U)
+#define S_FATAL_DEQ0_DRDY 29
+#define M_FATAL_DEQ0_DRDY 0x7U
+#define V_FATAL_DEQ0_DRDY(x) ((x) << S_FATAL_DEQ0_DRDY)
+#define G_FATAL_DEQ0_DRDY(x) (((x) >> S_FATAL_DEQ0_DRDY) & M_FATAL_DEQ0_DRDY)
+
+#define S_FATAL_OUT0_DRDY 26
+#define M_FATAL_OUT0_DRDY 0x7U
+#define V_FATAL_OUT0_DRDY(x) ((x) << S_FATAL_OUT0_DRDY)
+#define G_FATAL_OUT0_DRDY(x) (((x) >> S_FATAL_OUT0_DRDY) & M_FATAL_OUT0_DRDY)
+
+#define S_IMSG_DBG3_STUCK 25
+#define V_IMSG_DBG3_STUCK(x) ((x) << S_IMSG_DBG3_STUCK)
+#define F_IMSG_DBG3_STUCK V_IMSG_DBG3_STUCK(1U)
+
+#define S_IMSG_DBG2_STUCK 24
+#define V_IMSG_DBG2_STUCK(x) ((x) << S_IMSG_DBG2_STUCK)
+#define F_IMSG_DBG2_STUCK V_IMSG_DBG2_STUCK(1U)
+
+#define S_IMSG_DBG1_STUCK 23
+#define V_IMSG_DBG1_STUCK(x) ((x) << S_IMSG_DBG1_STUCK)
+#define F_IMSG_DBG1_STUCK V_IMSG_DBG1_STUCK(1U)
+
+#define S_IMSG_DBG0_STUCK 22
+#define V_IMSG_DBG0_STUCK(x) ((x) << S_IMSG_DBG0_STUCK)
+#define F_IMSG_DBG0_STUCK V_IMSG_DBG0_STUCK(1U)
+
+#define S_FATAL_DEQ1_DRDY 3
+#define M_FATAL_DEQ1_DRDY 0x3U
+#define V_FATAL_DEQ1_DRDY(x) ((x) << S_FATAL_DEQ1_DRDY)
+#define G_FATAL_DEQ1_DRDY(x) (((x) >> S_FATAL_DEQ1_DRDY) & M_FATAL_DEQ1_DRDY)
+
+#define S_FATAL_OUT1_DRDY 1
+#define M_FATAL_OUT1_DRDY 0x3U
+#define V_FATAL_OUT1_DRDY(x) ((x) << S_FATAL_OUT1_DRDY)
+#define G_FATAL_OUT1_DRDY(x) (((x) >> S_FATAL_OUT1_DRDY) & M_FATAL_OUT1_DRDY)
+
#define A_SGE_DOORBELL_THROTTLE_THRESHOLD 0x112c
#define S_THROTTLE_THRESHOLD_FL 16
@@ -2612,6 +2847,55 @@
#define V_DBPTBUFRSV0(x) ((x) << S_DBPTBUFRSV0)
#define G_DBPTBUFRSV0(x) (((x) >> S_DBPTBUFRSV0) & M_DBPTBUFRSV0)
+#define A_SGE_TBUF_CONTROL0 0x114c
+#define A_SGE_TBUF_CONTROL1 0x1150
+
+#define S_DBPTBUFRSV3 9
+#define M_DBPTBUFRSV3 0x1ffU
+#define V_DBPTBUFRSV3(x) ((x) << S_DBPTBUFRSV3)
+#define G_DBPTBUFRSV3(x) (((x) >> S_DBPTBUFRSV3) & M_DBPTBUFRSV3)
+
+#define S_DBPTBUFRSV2 0
+#define M_DBPTBUFRSV2 0x1ffU
+#define V_DBPTBUFRSV2(x) ((x) << S_DBPTBUFRSV2)
+#define G_DBPTBUFRSV2(x) (((x) >> S_DBPTBUFRSV2) & M_DBPTBUFRSV2)
+
+#define A_SGE_TBUF_CONTROL2 0x1154
+
+#define S_DBPTBUFRSV5 9
+#define M_DBPTBUFRSV5 0x1ffU
+#define V_DBPTBUFRSV5(x) ((x) << S_DBPTBUFRSV5)
+#define G_DBPTBUFRSV5(x) (((x) >> S_DBPTBUFRSV5) & M_DBPTBUFRSV5)
+
+#define S_DBPTBUFRSV4 0
+#define M_DBPTBUFRSV4 0x1ffU
+#define V_DBPTBUFRSV4(x) ((x) << S_DBPTBUFRSV4)
+#define G_DBPTBUFRSV4(x) (((x) >> S_DBPTBUFRSV4) & M_DBPTBUFRSV4)
+
+#define A_SGE_TBUF_CONTROL3 0x1158
+
+#define S_DBPTBUFRSV7 9
+#define M_DBPTBUFRSV7 0x1ffU
+#define V_DBPTBUFRSV7(x) ((x) << S_DBPTBUFRSV7)
+#define G_DBPTBUFRSV7(x) (((x) >> S_DBPTBUFRSV7) & M_DBPTBUFRSV7)
+
+#define S_DBPTBUFRSV6 0
+#define M_DBPTBUFRSV6 0x1ffU
+#define V_DBPTBUFRSV6(x) ((x) << S_DBPTBUFRSV6)
+#define G_DBPTBUFRSV6(x) (((x) >> S_DBPTBUFRSV6) & M_DBPTBUFRSV6)
+
+#define A_SGE_TBUF_CONTROL4 0x115c
+
+#define S_DBPTBUFRSV9 9
+#define M_DBPTBUFRSV9 0x1ffU
+#define V_DBPTBUFRSV9(x) ((x) << S_DBPTBUFRSV9)
+#define G_DBPTBUFRSV9(x) (((x) >> S_DBPTBUFRSV9) & M_DBPTBUFRSV9)
+
+#define S_DBPTBUFRSV8 0
+#define M_DBPTBUFRSV8 0x1ffU
+#define V_DBPTBUFRSV8(x) ((x) << S_DBPTBUFRSV8)
+#define G_DBPTBUFRSV8(x) (((x) >> S_DBPTBUFRSV8) & M_DBPTBUFRSV8)
+
#define A_SGE_PC0_REQ_BIST_CMD 0x1180
#define A_SGE_PC0_REQ_BIST_ERROR_CNT 0x1184
#define A_SGE_PC1_REQ_BIST_CMD 0x1190
@@ -2620,6 +2904,113 @@
#define A_SGE_PC0_RSP_BIST_ERROR_CNT 0x11a4
#define A_SGE_PC1_RSP_BIST_CMD 0x11b0
#define A_SGE_PC1_RSP_BIST_ERROR_CNT 0x11b4
+#define A_SGE_DBQ_TIMER_THRESH0 0x11b8
+
+#define S_TXTIMETH3 24
+#define M_TXTIMETH3 0x3fU
+#define V_TXTIMETH3(x) ((x) << S_TXTIMETH3)
+#define G_TXTIMETH3(x) (((x) >> S_TXTIMETH3) & M_TXTIMETH3)
+
+#define S_TXTIMETH2 16
+#define M_TXTIMETH2 0x3fU
+#define V_TXTIMETH2(x) ((x) << S_TXTIMETH2)
+#define G_TXTIMETH2(x) (((x) >> S_TXTIMETH2) & M_TXTIMETH2)
+
+#define S_TXTIMETH1 8
+#define M_TXTIMETH1 0x3fU
+#define V_TXTIMETH1(x) ((x) << S_TXTIMETH1)
+#define G_TXTIMETH1(x) (((x) >> S_TXTIMETH1) & M_TXTIMETH1)
+
+#define S_TXTIMETH0 0
+#define M_TXTIMETH0 0x3fU
+#define V_TXTIMETH0(x) ((x) << S_TXTIMETH0)
+#define G_TXTIMETH0(x) (((x) >> S_TXTIMETH0) & M_TXTIMETH0)
+
+#define A_SGE_DBQ_TIMER_THRESH1 0x11bc
+
+#define S_TXTIMETH7 24
+#define M_TXTIMETH7 0x3fU
+#define V_TXTIMETH7(x) ((x) << S_TXTIMETH7)
+#define G_TXTIMETH7(x) (((x) >> S_TXTIMETH7) & M_TXTIMETH7)
+
+#define S_TXTIMETH6 16
+#define M_TXTIMETH6 0x3fU
+#define V_TXTIMETH6(x) ((x) << S_TXTIMETH6)
+#define G_TXTIMETH6(x) (((x) >> S_TXTIMETH6) & M_TXTIMETH6)
+
+#define S_TXTIMETH5 8
+#define M_TXTIMETH5 0x3fU
+#define V_TXTIMETH5(x) ((x) << S_TXTIMETH5)
+#define G_TXTIMETH5(x) (((x) >> S_TXTIMETH5) & M_TXTIMETH5)
+
+#define S_TXTIMETH4 0
+#define M_TXTIMETH4 0x3fU
+#define V_TXTIMETH4(x) ((x) << S_TXTIMETH4)
+#define G_TXTIMETH4(x) (((x) >> S_TXTIMETH4) & M_TXTIMETH4)
+
+#define A_SGE_DBQ_TIMER_CONFIG 0x11c0
+
+#define S_DBQ_TIMER_OP 0
+#define M_DBQ_TIMER_OP 0xffU
+#define V_DBQ_TIMER_OP(x) ((x) << S_DBQ_TIMER_OP)
+#define G_DBQ_TIMER_OP(x) (((x) >> S_DBQ_TIMER_OP) & M_DBQ_TIMER_OP)
+
+#define A_SGE_DBQ_TIMER_DBG 0x11c4
+
+#define S_DBQ_TIMER_CMD 31
+#define V_DBQ_TIMER_CMD(x) ((x) << S_DBQ_TIMER_CMD)
+#define F_DBQ_TIMER_CMD V_DBQ_TIMER_CMD(1U)
+
+#define S_DBQ_TIMER_INDEX 24
+#define M_DBQ_TIMER_INDEX 0x3fU
+#define V_DBQ_TIMER_INDEX(x) ((x) << S_DBQ_TIMER_INDEX)
+#define G_DBQ_TIMER_INDEX(x) (((x) >> S_DBQ_TIMER_INDEX) & M_DBQ_TIMER_INDEX)
+
+#define S_DBQ_TIMER_QCNT 0
+#define M_DBQ_TIMER_QCNT 0x1ffffU
+#define V_DBQ_TIMER_QCNT(x) ((x) << S_DBQ_TIMER_QCNT)
+#define G_DBQ_TIMER_QCNT(x) (((x) >> S_DBQ_TIMER_QCNT) & M_DBQ_TIMER_QCNT)
+
+#define A_SGE_INT_CAUSE8 0x11c8
+
+#define S_TRACE_RXPERR 8
+#define V_TRACE_RXPERR(x) ((x) << S_TRACE_RXPERR)
+#define F_TRACE_RXPERR V_TRACE_RXPERR(1U)
+
+#define S_U3_RXPERR 7
+#define V_U3_RXPERR(x) ((x) << S_U3_RXPERR)
+#define F_U3_RXPERR V_U3_RXPERR(1U)
+
+#define S_U2_RXPERR 6
+#define V_U2_RXPERR(x) ((x) << S_U2_RXPERR)
+#define F_U2_RXPERR V_U2_RXPERR(1U)
+
+#define S_U1_RXPERR 5
+#define V_U1_RXPERR(x) ((x) << S_U1_RXPERR)
+#define F_U1_RXPERR V_U1_RXPERR(1U)
+
+#define S_U0_RXPERR 4
+#define V_U0_RXPERR(x) ((x) << S_U0_RXPERR)
+#define F_U0_RXPERR V_U0_RXPERR(1U)
+
+#define S_T3_RXPERR 3
+#define V_T3_RXPERR(x) ((x) << S_T3_RXPERR)
+#define F_T3_RXPERR V_T3_RXPERR(1U)
+
+#define S_T2_RXPERR 2
+#define V_T2_RXPERR(x) ((x) << S_T2_RXPERR)
+#define F_T2_RXPERR V_T2_RXPERR(1U)
+
+#define S_T1_RXPERR 1
+#define V_T1_RXPERR(x) ((x) << S_T1_RXPERR)
+#define F_T1_RXPERR V_T1_RXPERR(1U)
+
+#define S_T0_RXPERR 0
+#define V_T0_RXPERR(x) ((x) << S_T0_RXPERR)
+#define F_T0_RXPERR V_T0_RXPERR(1U)
+
+#define A_SGE_INT_ENABLE8 0x11cc
+#define A_SGE_PERR_ENABLE8 0x11d0
#define A_SGE_CTXT_CMD 0x11fc
#define S_BUSY 31
@@ -2648,6 +3039,17 @@
#define A_SGE_CTXT_DATA4 0x1210
#define A_SGE_CTXT_DATA5 0x1214
#define A_SGE_CTXT_DATA6 0x1218
+
+#define S_DATA_UNUSED 7
+#define M_DATA_UNUSED 0x1ffffffU
+#define V_DATA_UNUSED(x) ((x) << S_DATA_UNUSED)
+#define G_DATA_UNUSED(x) (((x) >> S_DATA_UNUSED) & M_DATA_UNUSED)
+
+#define S_DATA6 0
+#define M_DATA6 0x7fU
+#define V_DATA6(x) ((x) << S_DATA6)
+#define G_DATA6(x) (((x) >> S_DATA6) & M_DATA6)
+
#define A_SGE_CTXT_DATA7 0x121c
#define A_SGE_CTXT_MASK0 0x1220
#define A_SGE_CTXT_MASK1 0x1224
@@ -2656,6 +3058,17 @@
#define A_SGE_CTXT_MASK4 0x1230
#define A_SGE_CTXT_MASK5 0x1234
#define A_SGE_CTXT_MASK6 0x1238
+
+#define S_MASK_UNUSED 7
+#define M_MASK_UNUSED 0x1ffffffU
+#define V_MASK_UNUSED(x) ((x) << S_MASK_UNUSED)
+#define G_MASK_UNUSED(x) (((x) >> S_MASK_UNUSED) & M_MASK_UNUSED)
+
+#define S_MASK 0
+#define M_MASK 0x7fU
+#define V_MASK(x) ((x) << S_MASK)
+#define G_MASK(x) (((x) >> S_MASK) & M_MASK)
+
#define A_SGE_CTXT_MASK7 0x123c
#define A_SGE_QBASE_MAP0 0x1240
@@ -2674,6 +3087,10 @@
#define V_INGRESS0_SIZE(x) ((x) << S_INGRESS0_SIZE)
#define G_INGRESS0_SIZE(x) (((x) >> S_INGRESS0_SIZE) & M_INGRESS0_SIZE)
+#define S_DESTINATION 31
+#define V_DESTINATION(x) ((x) << S_DESTINATION)
+#define F_DESTINATION V_DESTINATION(1U)
+
#define A_SGE_QBASE_MAP1 0x1244
#define S_EGRESS0_BASE 0
@@ -2719,6 +3136,10 @@
#define V_FLMTHRESH(x) ((x) << S_FLMTHRESH)
#define G_FLMTHRESH(x) (((x) >> S_FLMTHRESH) & M_FLMTHRESH)
+#define S_CONENMIDDLE 7
+#define V_CONENMIDDLE(x) ((x) << S_CONENMIDDLE)
+#define F_CONENMIDDLE V_CONENMIDDLE(1U)
+
#define A_SGE_DEBUG_CONM 0x1258
#define S_MPS_CH_CNG 16
@@ -2745,6 +3166,16 @@
#define V_LAST_QID(x) ((x) << S_LAST_QID)
#define G_LAST_QID(x) (((x) >> S_LAST_QID) & M_LAST_QID)
+#define S_CH_CNG 16
+#define M_CH_CNG 0xffffU
+#define V_CH_CNG(x) ((x) << S_CH_CNG)
+#define G_CH_CNG(x) (((x) >> S_CH_CNG) & M_CH_CNG)
+
+#define S_CH_SEL 14
+#define M_CH_SEL 0x3U
+#define V_CH_SEL(x) ((x) << S_CH_SEL)
+#define G_CH_SEL(x) (((x) >> S_CH_SEL) & M_CH_SEL)
+
#define A_SGE_DBG_QUEUE_STAT0_CTRL 0x125c
#define S_IMSG_GTS_SEL 18
@@ -2766,6 +3197,7 @@
#define A_SGE_DBG_BAR2_PKT_CNT 0x126c
#define A_SGE_DBG_DB_PKT_CNT 0x1270
#define A_SGE_DBG_GTS_PKT_CNT 0x1274
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_16 0x1278
#define A_SGE_DEBUG_DATA_HIGH_INDEX_0 0x1280
#define S_CIM_WM 24
@@ -3965,6 +4397,352 @@
#define V_VFWCOFFSET(x) ((x) << S_VFWCOFFSET)
#define G_VFWCOFFSET(x) (((x) >> S_VFWCOFFSET) & M_VFWCOFFSET)
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_17 0x1340
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_18 0x1344
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_19 0x1348
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_20 0x134c
+#define A_SGE_DEBUG_DATA_HIGH_INDEX_21 0x1350
+#define A_SGE_DEBUG_DATA_LOW_INDEX_16 0x1354
+#define A_SGE_DEBUG_DATA_LOW_INDEX_17 0x1358
+#define A_SGE_DEBUG_DATA_LOW_INDEX_18 0x135c
+#define A_SGE_INT_CAUSE7 0x1360
+
+#define S_HINT_FIFO_FULL 25
+#define V_HINT_FIFO_FULL(x) ((x) << S_HINT_FIFO_FULL)
+#define F_HINT_FIFO_FULL V_HINT_FIFO_FULL(1U)
+
+#define S_CERR_HINT_DELAY_FIFO 24
+#define V_CERR_HINT_DELAY_FIFO(x) ((x) << S_CERR_HINT_DELAY_FIFO)
+#define F_CERR_HINT_DELAY_FIFO V_CERR_HINT_DELAY_FIFO(1U)
+
+#define S_COAL_TIMER_FIFO_PERR 23
+#define V_COAL_TIMER_FIFO_PERR(x) ((x) << S_COAL_TIMER_FIFO_PERR)
+#define F_COAL_TIMER_FIFO_PERR V_COAL_TIMER_FIFO_PERR(1U)
+
+#define S_CMP_FIFO_PERR 22
+#define V_CMP_FIFO_PERR(x) ((x) << S_CMP_FIFO_PERR)
+#define F_CMP_FIFO_PERR V_CMP_FIFO_PERR(1U)
+
+#define S_SGE_IPP_FIFO_CERR 21
+#define V_SGE_IPP_FIFO_CERR(x) ((x) << S_SGE_IPP_FIFO_CERR)
+#define F_SGE_IPP_FIFO_CERR V_SGE_IPP_FIFO_CERR(1U)
+
+#define S_CERR_ING_CTXT_CACHE 20
+#define V_CERR_ING_CTXT_CACHE(x) ((x) << S_CERR_ING_CTXT_CACHE)
+#define F_CERR_ING_CTXT_CACHE V_CERR_ING_CTXT_CACHE(1U)
+
+#define S_IMSG_CNTX_PERR 19
+#define V_IMSG_CNTX_PERR(x) ((x) << S_IMSG_CNTX_PERR)
+#define F_IMSG_CNTX_PERR V_IMSG_CNTX_PERR(1U)
+
+#define S_PD_FIFO_PERR 18
+#define V_PD_FIFO_PERR(x) ((x) << S_PD_FIFO_PERR)
+#define F_PD_FIFO_PERR V_PD_FIFO_PERR(1U)
+
+#define S_IMSG_512_FIFO_PERR 17
+#define V_IMSG_512_FIFO_PERR(x) ((x) << S_IMSG_512_FIFO_PERR)
+#define F_IMSG_512_FIFO_PERR V_IMSG_512_FIFO_PERR(1U)
+
+#define S_CPLSW_FIFO_PERR 16
+#define V_CPLSW_FIFO_PERR(x) ((x) << S_CPLSW_FIFO_PERR)
+#define F_CPLSW_FIFO_PERR V_CPLSW_FIFO_PERR(1U)
+
+#define S_IMSG_FIFO_PERR 15
+#define V_IMSG_FIFO_PERR(x) ((x) << S_IMSG_FIFO_PERR)
+#define F_IMSG_FIFO_PERR V_IMSG_FIFO_PERR(1U)
+
+#define S_CERR_ITP_EVR 14
+#define V_CERR_ITP_EVR(x) ((x) << S_CERR_ITP_EVR)
+#define F_CERR_ITP_EVR V_CERR_ITP_EVR(1U)
+
+#define S_CERR_CONM_SRAM 13
+#define V_CERR_CONM_SRAM(x) ((x) << S_CERR_CONM_SRAM)
+#define F_CERR_CONM_SRAM V_CERR_CONM_SRAM(1U)
+
+#define S_CERR_EGR_CTXT_CACHE 12
+#define V_CERR_EGR_CTXT_CACHE(x) ((x) << S_CERR_EGR_CTXT_CACHE)
+#define F_CERR_EGR_CTXT_CACHE V_CERR_EGR_CTXT_CACHE(1U)
+
+#define S_CERR_FLM_CNTXMEM 11
+#define V_CERR_FLM_CNTXMEM(x) ((x) << S_CERR_FLM_CNTXMEM)
+#define F_CERR_FLM_CNTXMEM V_CERR_FLM_CNTXMEM(1U)
+
+#define S_CERR_FUNC_QBASE 10
+#define V_CERR_FUNC_QBASE(x) ((x) << S_CERR_FUNC_QBASE)
+#define F_CERR_FUNC_QBASE V_CERR_FUNC_QBASE(1U)
+
+#define S_IMSG_CNTX_CERR 9
+#define V_IMSG_CNTX_CERR(x) ((x) << S_IMSG_CNTX_CERR)
+#define F_IMSG_CNTX_CERR V_IMSG_CNTX_CERR(1U)
+
+#define S_PD_FIFO_CERR 8
+#define V_PD_FIFO_CERR(x) ((x) << S_PD_FIFO_CERR)
+#define F_PD_FIFO_CERR V_PD_FIFO_CERR(1U)
+
+#define S_IMSG_512_FIFO_CERR 7
+#define V_IMSG_512_FIFO_CERR(x) ((x) << S_IMSG_512_FIFO_CERR)
+#define F_IMSG_512_FIFO_CERR V_IMSG_512_FIFO_CERR(1U)
+
+#define S_CPLSW_FIFO_CERR 6
+#define V_CPLSW_FIFO_CERR(x) ((x) << S_CPLSW_FIFO_CERR)
+#define F_CPLSW_FIFO_CERR V_CPLSW_FIFO_CERR(1U)
+
+#define S_IMSG_FIFO_CERR 5
+#define V_IMSG_FIFO_CERR(x) ((x) << S_IMSG_FIFO_CERR)
+#define F_IMSG_FIFO_CERR V_IMSG_FIFO_CERR(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO3 4
+#define V_CERR_HEADERSPLIT_FIFO3(x) ((x) << S_CERR_HEADERSPLIT_FIFO3)
+#define F_CERR_HEADERSPLIT_FIFO3 V_CERR_HEADERSPLIT_FIFO3(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO2 3
+#define V_CERR_HEADERSPLIT_FIFO2(x) ((x) << S_CERR_HEADERSPLIT_FIFO2)
+#define F_CERR_HEADERSPLIT_FIFO2 V_CERR_HEADERSPLIT_FIFO2(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO1 2
+#define V_CERR_HEADERSPLIT_FIFO1(x) ((x) << S_CERR_HEADERSPLIT_FIFO1)
+#define F_CERR_HEADERSPLIT_FIFO1 V_CERR_HEADERSPLIT_FIFO1(1U)
+
+#define S_CERR_HEADERSPLIT_FIFO0 1
+#define V_CERR_HEADERSPLIT_FIFO0(x) ((x) << S_CERR_HEADERSPLIT_FIFO0)
+#define F_CERR_HEADERSPLIT_FIFO0 V_CERR_HEADERSPLIT_FIFO0(1U)
+
+#define S_CERR_FLM_L1CACHE 0
+#define V_CERR_FLM_L1CACHE(x) ((x) << S_CERR_FLM_L1CACHE)
+#define F_CERR_FLM_L1CACHE V_CERR_FLM_L1CACHE(1U)
+
+#define A_SGE_INT_ENABLE7 0x1364
+#define A_SGE_PERR_ENABLE7 0x1368
+#define A_SGE_ING_COMP_COAL_CFG 0x1700
+
+#define S_USE_PTP_TIMER 27
+#define V_USE_PTP_TIMER(x) ((x) << S_USE_PTP_TIMER)
+#define F_USE_PTP_TIMER V_USE_PTP_TIMER(1U)
+
+#define S_IMSG_SET_OFLOW_ALL_ENTRIES_43060 26
+#define V_IMSG_SET_OFLOW_ALL_ENTRIES_43060(x) ((x) << S_IMSG_SET_OFLOW_ALL_ENTRIES_43060)
+#define F_IMSG_SET_OFLOW_ALL_ENTRIES_43060 V_IMSG_SET_OFLOW_ALL_ENTRIES_43060(1U)
+
+#define S_IMSG_STUCK_INDIRECT_QUEUE_42907 25
+#define V_IMSG_STUCK_INDIRECT_QUEUE_42907(x) ((x) << S_IMSG_STUCK_INDIRECT_QUEUE_42907)
+#define F_IMSG_STUCK_INDIRECT_QUEUE_42907 V_IMSG_STUCK_INDIRECT_QUEUE_42907(1U)
+
+#define S_COMP_COAL_PIDX_INCR 24
+#define V_COMP_COAL_PIDX_INCR(x) ((x) << S_COMP_COAL_PIDX_INCR)
+#define F_COMP_COAL_PIDX_INCR V_COMP_COAL_PIDX_INCR(1U)
+
+#define S_COMP_COAL_TIMER_CNT 16
+#define M_COMP_COAL_TIMER_CNT 0xffU
+#define V_COMP_COAL_TIMER_CNT(x) ((x) << S_COMP_COAL_TIMER_CNT)
+#define G_COMP_COAL_TIMER_CNT(x) (((x) >> S_COMP_COAL_TIMER_CNT) & M_COMP_COAL_TIMER_CNT)
+
+#define S_COMP_COAL_CNTR_TH 8
+#define M_COMP_COAL_CNTR_TH 0xffU
+#define V_COMP_COAL_CNTR_TH(x) ((x) << S_COMP_COAL_CNTR_TH)
+#define G_COMP_COAL_CNTR_TH(x) (((x) >> S_COMP_COAL_CNTR_TH) & M_COMP_COAL_CNTR_TH)
+
+#define S_COMP_COAL_OPCODE 0
+#define M_COMP_COAL_OPCODE 0xffU
+#define V_COMP_COAL_OPCODE(x) ((x) << S_COMP_COAL_OPCODE)
+#define G_COMP_COAL_OPCODE(x) (((x) >> S_COMP_COAL_OPCODE) & M_COMP_COAL_OPCODE)
+
+#define A_SGE_ING_IMSG_DBG 0x1704
+
+#define S_STUCK_CTR_TH 1
+#define M_STUCK_CTR_TH 0xffU
+#define V_STUCK_CTR_TH(x) ((x) << S_STUCK_CTR_TH)
+#define G_STUCK_CTR_TH(x) (((x) >> S_STUCK_CTR_TH) & M_STUCK_CTR_TH)
+
+#define S_STUCK_INT_EN 0
+#define V_STUCK_INT_EN(x) ((x) << S_STUCK_INT_EN)
+#define F_STUCK_INT_EN V_STUCK_INT_EN(1U)
+
+#define A_SGE_ING_IMSG_RSP0_DBG 0x1708
+
+#define S_IDMA1_QID 16
+#define M_IDMA1_QID 0xffffU
+#define V_IDMA1_QID(x) ((x) << S_IDMA1_QID)
+#define G_IDMA1_QID(x) (((x) >> S_IDMA1_QID) & M_IDMA1_QID)
+
+#define S_IDMA0_QID 0
+#define M_IDMA0_QID 0xffffU
+#define V_IDMA0_QID(x) ((x) << S_IDMA0_QID)
+#define G_IDMA0_QID(x) (((x) >> S_IDMA0_QID) & M_IDMA0_QID)
+
+#define A_SGE_ING_IMSG_RSP1_DBG 0x170c
+
+#define S_IDMA3_QID 16
+#define M_IDMA3_QID 0xffffU
+#define V_IDMA3_QID(x) ((x) << S_IDMA3_QID)
+#define G_IDMA3_QID(x) (((x) >> S_IDMA3_QID) & M_IDMA3_QID)
+
+#define S_IDMA2_QID 0
+#define M_IDMA2_QID 0xffffU
+#define V_IDMA2_QID(x) ((x) << S_IDMA2_QID)
+#define G_IDMA2_QID(x) (((x) >> S_IDMA2_QID) & M_IDMA2_QID)
+
+#define A_SGE_LB_MODE 0x1710
+
+#define S_LB_MODE 0
+#define M_LB_MODE 0x3U
+#define V_LB_MODE(x) ((x) << S_LB_MODE)
+#define G_LB_MODE(x) (((x) >> S_LB_MODE) & M_LB_MODE)
+
+#define A_SGE_IMSG_QUESCENT 0x1714
+
+#define S_IMSG_QUESCENT 0
+#define V_IMSG_QUESCENT(x) ((x) << S_IMSG_QUESCENT)
+#define F_IMSG_QUESCENT V_IMSG_QUESCENT(1U)
+
+#define A_SGE_LA_CTRL 0x1718
+
+#define S_LA_GLOBAL_EN 8
+#define V_LA_GLOBAL_EN(x) ((x) << S_LA_GLOBAL_EN)
+#define F_LA_GLOBAL_EN V_LA_GLOBAL_EN(1U)
+
+#define S_PTP_TIMESTAMP_SEL 7
+#define V_PTP_TIMESTAMP_SEL(x) ((x) << S_PTP_TIMESTAMP_SEL)
+#define F_PTP_TIMESTAMP_SEL V_PTP_TIMESTAMP_SEL(1U)
+
+#define S_CIM2SGE_ID_CHK_VLD 6
+#define V_CIM2SGE_ID_CHK_VLD(x) ((x) << S_CIM2SGE_ID_CHK_VLD)
+#define F_CIM2SGE_ID_CHK_VLD V_CIM2SGE_ID_CHK_VLD(1U)
+
+#define S_CPLSW_ID_CHK_VLD 5
+#define V_CPLSW_ID_CHK_VLD(x) ((x) << S_CPLSW_ID_CHK_VLD)
+#define F_CPLSW_ID_CHK_VLD V_CPLSW_ID_CHK_VLD(1U)
+
+#define S_FLM_ID_CHK_VLD 4
+#define V_FLM_ID_CHK_VLD(x) ((x) << S_FLM_ID_CHK_VLD)
+#define F_FLM_ID_CHK_VLD V_FLM_ID_CHK_VLD(1U)
+
+#define S_IQ_DBP_ID_CHK_VLD 3
+#define V_IQ_DBP_ID_CHK_VLD(x) ((x) << S_IQ_DBP_ID_CHK_VLD)
+#define F_IQ_DBP_ID_CHK_VLD V_IQ_DBP_ID_CHK_VLD(1U)
+
+#define S_UP_OBQ_ID_CHK_VLD 2
+#define V_UP_OBQ_ID_CHK_VLD(x) ((x) << S_UP_OBQ_ID_CHK_VLD)
+#define F_UP_OBQ_ID_CHK_VLD V_UP_OBQ_ID_CHK_VLD(1U)
+
+#define S_CIM_ID_CHK_VLD 1
+#define V_CIM_ID_CHK_VLD(x) ((x) << S_CIM_ID_CHK_VLD)
+#define F_CIM_ID_CHK_VLD V_CIM_ID_CHK_VLD(1U)
+
+#define S_DBP_ID_CHK_VLD 0
+#define V_DBP_ID_CHK_VLD(x) ((x) << S_DBP_ID_CHK_VLD)
+#define F_DBP_ID_CHK_VLD V_DBP_ID_CHK_VLD(1U)
+
+#define A_SGE_LA_CTRL_EQID_LOW 0x171c
+
+#define S_EQ_ID_CHK_LOW 0
+#define M_EQ_ID_CHK_LOW 0x1ffffU
+#define V_EQ_ID_CHK_LOW(x) ((x) << S_EQ_ID_CHK_LOW)
+#define G_EQ_ID_CHK_LOW(x) (((x) >> S_EQ_ID_CHK_LOW) & M_EQ_ID_CHK_LOW)
+
+#define A_SGE_LA_CTRL_EQID_HIGH 0x1720
+
+#define S_EQ_ID_CHK_HIGH 0
+#define M_EQ_ID_CHK_HIGH 0x1ffffU
+#define V_EQ_ID_CHK_HIGH(x) ((x) << S_EQ_ID_CHK_HIGH)
+#define G_EQ_ID_CHK_HIGH(x) (((x) >> S_EQ_ID_CHK_HIGH) & M_EQ_ID_CHK_HIGH)
+
+#define A_SGE_LA_CTRL_IQID 0x1724
+
+#define S_IQ_ID_CHK_HIGH 16
+#define M_IQ_ID_CHK_HIGH 0xffffU
+#define V_IQ_ID_CHK_HIGH(x) ((x) << S_IQ_ID_CHK_HIGH)
+#define G_IQ_ID_CHK_HIGH(x) (((x) >> S_IQ_ID_CHK_HIGH) & M_IQ_ID_CHK_HIGH)
+
+#define S_IQ_ID_CHK_LOW 0
+#define M_IQ_ID_CHK_LOW 0xffffU
+#define V_IQ_ID_CHK_LOW(x) ((x) << S_IQ_ID_CHK_LOW)
+#define G_IQ_ID_CHK_LOW(x) (((x) >> S_IQ_ID_CHK_LOW) & M_IQ_ID_CHK_LOW)
+
+#define A_SGE_LA_CTRL_TID_LOW 0x1728
+
+#define S_TID_CHK_LOW 0
+#define M_TID_CHK_LOW 0xffffffU
+#define V_TID_CHK_LOW(x) ((x) << S_TID_CHK_LOW)
+#define G_TID_CHK_LOW(x) (((x) >> S_TID_CHK_LOW) & M_TID_CHK_LOW)
+
+#define A_SGE_LA_CTRL_TID_HIGH 0x172c
+
+#define S_TID_CHK_HIGH 0
+#define M_TID_CHK_HIGH 0xffffffU
+#define V_TID_CHK_HIGH(x) ((x) << S_TID_CHK_HIGH)
+#define G_TID_CHK_HIGH(x) (((x) >> S_TID_CHK_HIGH) & M_TID_CHK_HIGH)
+
+#define A_SGE_CFG_TP_ERR 0x173c
+
+#define S_TP_ERR_STATUS_CH3 30
+#define M_TP_ERR_STATUS_CH3 0x3U
+#define V_TP_ERR_STATUS_CH3(x) ((x) << S_TP_ERR_STATUS_CH3)
+#define G_TP_ERR_STATUS_CH3(x) (((x) >> S_TP_ERR_STATUS_CH3) & M_TP_ERR_STATUS_CH3)
+
+#define S_TP_ERR_STATUS_CH2 28
+#define M_TP_ERR_STATUS_CH2 0x3U
+#define V_TP_ERR_STATUS_CH2(x) ((x) << S_TP_ERR_STATUS_CH2)
+#define G_TP_ERR_STATUS_CH2(x) (((x) >> S_TP_ERR_STATUS_CH2) & M_TP_ERR_STATUS_CH2)
+
+#define S_TP_ERR_STATUS_CH1 26
+#define M_TP_ERR_STATUS_CH1 0x3U
+#define V_TP_ERR_STATUS_CH1(x) ((x) << S_TP_ERR_STATUS_CH1)
+#define G_TP_ERR_STATUS_CH1(x) (((x) >> S_TP_ERR_STATUS_CH1) & M_TP_ERR_STATUS_CH1)
+
+#define S_TP_ERR_STATUS_CH0 24
+#define M_TP_ERR_STATUS_CH0 0x3U
+#define V_TP_ERR_STATUS_CH0(x) ((x) << S_TP_ERR_STATUS_CH0)
+#define G_TP_ERR_STATUS_CH0(x) (((x) >> S_TP_ERR_STATUS_CH0) & M_TP_ERR_STATUS_CH0)
+
+#define S_CPL0_SIZE 16
+#define M_CPL0_SIZE 0xffU
+#define V_CPL0_SIZE(x) ((x) << S_CPL0_SIZE)
+#define G_CPL0_SIZE(x) (((x) >> S_CPL0_SIZE) & M_CPL0_SIZE)
+
+#define S_CPL1_SIZE 8
+#define M_CPL1_SIZE 0xffU
+#define V_CPL1_SIZE(x) ((x) << S_CPL1_SIZE)
+#define G_CPL1_SIZE(x) (((x) >> S_CPL1_SIZE) & M_CPL1_SIZE)
+
+#define S_SIZE_LATCH_CLR 3
+#define V_SIZE_LATCH_CLR(x) ((x) << S_SIZE_LATCH_CLR)
+#define F_SIZE_LATCH_CLR V_SIZE_LATCH_CLR(1U)
+
+#define S_EXT_LATCH_CLR 2
+#define V_EXT_LATCH_CLR(x) ((x) << S_EXT_LATCH_CLR)
+#define F_EXT_LATCH_CLR V_EXT_LATCH_CLR(1U)
+
+#define S_EXT_CHANGE_42875 1
+#define V_EXT_CHANGE_42875(x) ((x) << S_EXT_CHANGE_42875)
+#define F_EXT_CHANGE_42875 V_EXT_CHANGE_42875(1U)
+
+#define S_SIZE_CHANGE_42913 0
+#define V_SIZE_CHANGE_42913(x) ((x) << S_SIZE_CHANGE_42913)
+#define F_SIZE_CHANGE_42913 V_SIZE_CHANGE_42913(1U)
+
+#define A_SGE_CHNL0_CTX_ERROR_COUNT_PER_TID 0x1740
+#define A_SGE_CHNL1_CTX_ERROR_COUNT_PER_TID 0x1744
+#define A_SGE_CHNL2_CTX_ERROR_COUNT_PER_TID 0x1748
+#define A_SGE_CHNL3_CTX_ERROR_COUNT_PER_TID 0x174c
+#define A_SGE_CTX_ACC_CH0 0x1750
+
+#define S_RDMA_INV_HANDLING 24
+#define M_RDMA_INV_HANDLING 0x3U
+#define V_RDMA_INV_HANDLING(x) ((x) << S_RDMA_INV_HANDLING)
+#define G_RDMA_INV_HANDLING(x) (((x) >> S_RDMA_INV_HANDLING) & M_RDMA_INV_HANDLING)
+
+#define S_T7_TERMINATE_STATUS_EN 23
+#define V_T7_TERMINATE_STATUS_EN(x) ((x) << S_T7_TERMINATE_STATUS_EN)
+#define F_T7_TERMINATE_STATUS_EN V_T7_TERMINATE_STATUS_EN(1U)
+
+#define S_T7_DISABLE 22
+#define V_T7_DISABLE(x) ((x) << S_T7_DISABLE)
+#define F_T7_DISABLE V_T7_DISABLE(1U)
+
+#define A_SGE_CTX_ACC_CH1 0x1754
+#define A_SGE_CTX_ACC_CH2 0x1758
+#define A_SGE_CTX_ACC_CH3 0x175c
+#define A_SGE_CTX_BASE 0x1760
#define A_SGE_LA_RDPTR_0 0x1800
#define A_SGE_LA_RDDATA_0 0x1804
#define A_SGE_LA_WRPTR_0 0x1808
@@ -4296,6 +5074,11 @@
#define A_PCIE_INT_CAUSE 0x3004
#define A_PCIE_PERR_ENABLE 0x3008
+
+#define S_TGTTAGQCLIENT1PERR 29
+#define V_TGTTAGQCLIENT1PERR(x) ((x) << S_TGTTAGQCLIENT1PERR)
+#define F_TGTTAGQCLIENT1PERR V_TGTTAGQCLIENT1PERR(1U)
+
#define A_PCIE_PERR_INJECT 0x300c
#define S_IDE 0
@@ -4582,10 +5365,6 @@
#define V_LINKREQRSTPCIECRSTMODE(x) ((x) << S_LINKREQRSTPCIECRSTMODE)
#define F_LINKREQRSTPCIECRSTMODE V_LINKREQRSTPCIECRSTMODE(1U)
-#define S_T6_PIOSTOPEN 31
-#define V_T6_PIOSTOPEN(x) ((x) << S_T6_PIOSTOPEN)
-#define F_T6_PIOSTOPEN V_T6_PIOSTOPEN(1U)
-
#define A_PCIE_DMA_CTRL 0x3018
#define S_LITTLEENDIAN 7
@@ -4618,6 +5397,14 @@
#define V_T6_TOTMAXTAG(x) ((x) << S_T6_TOTMAXTAG)
#define G_T6_TOTMAXTAG(x) (((x) >> S_T6_TOTMAXTAG) & M_T6_TOTMAXTAG)
+#define S_REG_VDM_ONLY 17
+#define V_REG_VDM_ONLY(x) ((x) << S_REG_VDM_ONLY)
+#define F_REG_VDM_ONLY V_REG_VDM_ONLY(1U)
+
+#define S_MULT_REQID_SUP 16
+#define V_MULT_REQID_SUP(x) ((x) << S_MULT_REQID_SUP)
+#define F_MULT_REQID_SUP V_MULT_REQID_SUP(1U)
+
#define A_PCIE_DMA_CFG 0x301c
#define S_MAXPYLDSIZE 28
@@ -4668,6 +5455,10 @@
#define V_DMADCASTFIRSTONLY(x) ((x) << S_DMADCASTFIRSTONLY)
#define F_DMADCASTFIRSTONLY V_DMADCASTFIRSTONLY(1U)
+#define S_ARMDCASTFIRSTONLY 7
+#define V_ARMDCASTFIRSTONLY(x) ((x) << S_ARMDCASTFIRSTONLY)
+#define F_ARMDCASTFIRSTONLY V_ARMDCASTFIRSTONLY(1U)
+
#define A_PCIE_DMA_STAT 0x3020
#define S_STATEREQ 28
@@ -4748,7 +5539,157 @@
#define G_PERSTTIMER(x) (((x) >> S_PERSTTIMER) & M_PERSTTIMER)
#define A_PCIE_CFG7 0x302c
+#define A_PCIE_INT_ENABLE_EXT 0x3030
+
+#define S_TCAMRSPERR 31
+#define V_TCAMRSPERR(x) ((x) << S_TCAMRSPERR)
+#define F_TCAMRSPERR V_TCAMRSPERR(1U)
+
+#define S_IPFORMQPERR 30
+#define V_IPFORMQPERR(x) ((x) << S_IPFORMQPERR)
+#define F_IPFORMQPERR V_IPFORMQPERR(1U)
+
+#define S_IPFORMQCERR 29
+#define V_IPFORMQCERR(x) ((x) << S_IPFORMQCERR)
+#define F_IPFORMQCERR V_IPFORMQCERR(1U)
+
+#define S_TRGT1GRPCERR 28
+#define V_TRGT1GRPCERR(x) ((x) << S_TRGT1GRPCERR)
+#define F_TRGT1GRPCERR V_TRGT1GRPCERR(1U)
+
+#define S_IPSOTCERR 27
+#define V_IPSOTCERR(x) ((x) << S_IPSOTCERR)
+#define F_IPSOTCERR V_IPSOTCERR(1U)
+
+#define S_IPRETRYCERR 26
+#define V_IPRETRYCERR(x) ((x) << S_IPRETRYCERR)
+#define F_IPRETRYCERR V_IPRETRYCERR(1U)
+
+#define S_IPRXDATAGRPCERR 25
+#define V_IPRXDATAGRPCERR(x) ((x) << S_IPRXDATAGRPCERR)
+#define F_IPRXDATAGRPCERR V_IPRXDATAGRPCERR(1U)
+
+#define S_IPRXHDRGRPCERR 24
+#define V_IPRXHDRGRPCERR(x) ((x) << S_IPRXHDRGRPCERR)
+#define F_IPRXHDRGRPCERR V_IPRXHDRGRPCERR(1U)
+
+#define S_A0ARBRSPORDFIFOPERR 19
+#define V_A0ARBRSPORDFIFOPERR(x) ((x) << S_A0ARBRSPORDFIFOPERR)
+#define F_A0ARBRSPORDFIFOPERR V_A0ARBRSPORDFIFOPERR(1U)
+
+#define S_HRSPCERR 18
+#define V_HRSPCERR(x) ((x) << S_HRSPCERR)
+#define F_HRSPCERR V_HRSPCERR(1U)
+
+#define S_HREQRDCERR 17
+#define V_HREQRDCERR(x) ((x) << S_HREQRDCERR)
+#define F_HREQRDCERR V_HREQRDCERR(1U)
+
+#define S_HREQWRCERR 16
+#define V_HREQWRCERR(x) ((x) << S_HREQWRCERR)
+#define F_HREQWRCERR V_HREQWRCERR(1U)
+
+#define S_DRSPCERR 15
+#define V_DRSPCERR(x) ((x) << S_DRSPCERR)
+#define F_DRSPCERR V_DRSPCERR(1U)
+
+#define S_DREQRDCERR 14
+#define V_DREQRDCERR(x) ((x) << S_DREQRDCERR)
+#define F_DREQRDCERR V_DREQRDCERR(1U)
+
+#define S_DREQWRCERR 13
+#define V_DREQWRCERR(x) ((x) << S_DREQWRCERR)
+#define F_DREQWRCERR V_DREQWRCERR(1U)
+
+#define S_CRSPCERR 12
+#define V_CRSPCERR(x) ((x) << S_CRSPCERR)
+#define F_CRSPCERR V_CRSPCERR(1U)
+
+#define S_ARSPPERR 11
+#define V_ARSPPERR(x) ((x) << S_ARSPPERR)
+#define F_ARSPPERR V_ARSPPERR(1U)
+
+#define S_AREQRDPERR 10
+#define V_AREQRDPERR(x) ((x) << S_AREQRDPERR)
+#define F_AREQRDPERR V_AREQRDPERR(1U)
+
+#define S_AREQWRPERR 9
+#define V_AREQWRPERR(x) ((x) << S_AREQWRPERR)
+#define F_AREQWRPERR V_AREQWRPERR(1U)
+
+#define S_PIOREQGRPCERR 8
+#define V_PIOREQGRPCERR(x) ((x) << S_PIOREQGRPCERR)
+#define F_PIOREQGRPCERR V_PIOREQGRPCERR(1U)
+
+#define S_ARSPCERR 7
+#define V_ARSPCERR(x) ((x) << S_ARSPCERR)
+#define F_ARSPCERR V_ARSPCERR(1U)
+
+#define S_AREQRDCERR 6
+#define V_AREQRDCERR(x) ((x) << S_AREQRDCERR)
+#define F_AREQRDCERR V_AREQRDCERR(1U)
+
+#define S_AREQWRCERR 5
+#define V_AREQWRCERR(x) ((x) << S_AREQWRCERR)
+#define F_AREQWRCERR V_AREQWRCERR(1U)
+
+#define S_MARSPPERR 4
+#define V_MARSPPERR(x) ((x) << S_MARSPPERR)
+#define F_MARSPPERR V_MARSPPERR(1U)
+
+#define S_INICMAWDATAORDPERR 3
+#define V_INICMAWDATAORDPERR(x) ((x) << S_INICMAWDATAORDPERR)
+#define F_INICMAWDATAORDPERR V_INICMAWDATAORDPERR(1U)
+
+#define S_EMUPERR 2
+#define V_EMUPERR(x) ((x) << S_EMUPERR)
+#define F_EMUPERR V_EMUPERR(1U)
+
+#define S_ERRSPPERR 1
+#define V_ERRSPPERR(x) ((x) << S_ERRSPPERR)
+#define F_ERRSPPERR V_ERRSPPERR(1U)
+
+#define S_MSTGRPCERR 0
+#define V_MSTGRPCERR(x) ((x) << S_MSTGRPCERR)
+#define F_MSTGRPCERR V_MSTGRPCERR(1U)
+
+#define A_PCIE_INT_ENABLE_X8 0x3034
+
+#define S_X8TGTGRPPERR 23
+#define V_X8TGTGRPPERR(x) ((x) << S_X8TGTGRPPERR)
+#define F_X8TGTGRPPERR V_X8TGTGRPPERR(1U)
+
+#define S_X8IPSOTPERR 22
+#define V_X8IPSOTPERR(x) ((x) << S_X8IPSOTPERR)
+#define F_X8IPSOTPERR V_X8IPSOTPERR(1U)
+
+#define S_X8IPRETRYPERR 21
+#define V_X8IPRETRYPERR(x) ((x) << S_X8IPRETRYPERR)
+#define F_X8IPRETRYPERR V_X8IPRETRYPERR(1U)
+
+#define S_X8IPRXDATAGRPPERR 20
+#define V_X8IPRXDATAGRPPERR(x) ((x) << S_X8IPRXDATAGRPPERR)
+#define F_X8IPRXDATAGRPPERR V_X8IPRXDATAGRPPERR(1U)
+
+#define S_X8IPRXHDRGRPPERR 19
+#define V_X8IPRXHDRGRPPERR(x) ((x) << S_X8IPRXHDRGRPPERR)
+#define F_X8IPRXHDRGRPPERR V_X8IPRXHDRGRPPERR(1U)
+
+#define S_X8IPCORECERR 3
+#define V_X8IPCORECERR(x) ((x) << S_X8IPCORECERR)
+#define F_X8IPCORECERR V_X8IPCORECERR(1U)
+
+#define S_X8MSTGRPPERR 2
+#define V_X8MSTGRPPERR(x) ((x) << S_X8MSTGRPPERR)
+#define F_X8MSTGRPPERR V_X8MSTGRPPERR(1U)
+
+#define S_X8MSTGRPCERR 1
+#define V_X8MSTGRPCERR(x) ((x) << S_X8MSTGRPCERR)
+#define F_X8MSTGRPCERR V_X8MSTGRPCERR(1U)
+
+#define A_PCIE_INT_CAUSE_EXT 0x3038
#define A_PCIE_CMD_CTRL 0x303c
+#define A_PCIE_INT_CAUSE_X8 0x303c
#define A_PCIE_CMD_CFG 0x3040
#define S_MAXRSPCNT 16
@@ -4761,6 +5702,40 @@
#define V_MAXREQCNT(x) ((x) << S_MAXREQCNT)
#define G_MAXREQCNT(x) (((x) >> S_MAXREQCNT) & M_MAXREQCNT)
+#define A_PCIE_PERR_ENABLE_EXT 0x3040
+
+#define S_T7_ARSPPERR 18
+#define V_T7_ARSPPERR(x) ((x) << S_T7_ARSPPERR)
+#define F_T7_ARSPPERR V_T7_ARSPPERR(1U)
+
+#define S_T7_AREQRDPERR 17
+#define V_T7_AREQRDPERR(x) ((x) << S_T7_AREQRDPERR)
+#define F_T7_AREQRDPERR V_T7_AREQRDPERR(1U)
+
+#define S_T7_AREQWRPERR 16
+#define V_T7_AREQWRPERR(x) ((x) << S_T7_AREQWRPERR)
+#define F_T7_AREQWRPERR V_T7_AREQWRPERR(1U)
+
+#define S_T7_A0ARBRSPORDFIFOPERR 15
+#define V_T7_A0ARBRSPORDFIFOPERR(x) ((x) << S_T7_A0ARBRSPORDFIFOPERR)
+#define F_T7_A0ARBRSPORDFIFOPERR V_T7_A0ARBRSPORDFIFOPERR(1U)
+
+#define S_T7_MARSPPERR 14
+#define V_T7_MARSPPERR(x) ((x) << S_T7_MARSPPERR)
+#define F_T7_MARSPPERR V_T7_MARSPPERR(1U)
+
+#define S_T7_INICMAWDATAORDPERR 13
+#define V_T7_INICMAWDATAORDPERR(x) ((x) << S_T7_INICMAWDATAORDPERR)
+#define F_T7_INICMAWDATAORDPERR V_T7_INICMAWDATAORDPERR(1U)
+
+#define S_T7_EMUPERR 12
+#define V_T7_EMUPERR(x) ((x) << S_T7_EMUPERR)
+#define F_T7_EMUPERR V_T7_EMUPERR(1U)
+
+#define S_T7_ERRSPPERR 11
+#define V_T7_ERRSPPERR(x) ((x) << S_T7_ERRSPPERR)
+#define F_T7_ERRSPPERR V_T7_ERRSPPERR(1U)
+
#define A_PCIE_CMD_STAT 0x3044
#define S_RSPCNT 16
@@ -4773,6 +5748,32 @@
#define V_REQCNT(x) ((x) << S_REQCNT)
#define G_REQCNT(x) (((x) >> S_REQCNT) & M_REQCNT)
+#define A_PCIE_PERR_ENABLE_X8 0x3044
+
+#define S_T7_X8TGTGRPPERR 28
+#define V_T7_X8TGTGRPPERR(x) ((x) << S_T7_X8TGTGRPPERR)
+#define F_T7_X8TGTGRPPERR V_T7_X8TGTGRPPERR(1U)
+
+#define S_T7_X8IPSOTPERR 27
+#define V_T7_X8IPSOTPERR(x) ((x) << S_T7_X8IPSOTPERR)
+#define F_T7_X8IPSOTPERR V_T7_X8IPSOTPERR(1U)
+
+#define S_T7_X8IPRETRYPERR 26
+#define V_T7_X8IPRETRYPERR(x) ((x) << S_T7_X8IPRETRYPERR)
+#define F_T7_X8IPRETRYPERR V_T7_X8IPRETRYPERR(1U)
+
+#define S_T7_X8IPRXDATAGRPPERR 25
+#define V_T7_X8IPRXDATAGRPPERR(x) ((x) << S_T7_X8IPRXDATAGRPPERR)
+#define F_T7_X8IPRXDATAGRPPERR V_T7_X8IPRXDATAGRPPERR(1U)
+
+#define S_T7_X8IPRXHDRGRPPERR 24
+#define V_T7_X8IPRXHDRGRPPERR(x) ((x) << S_T7_X8IPRXHDRGRPPERR)
+#define F_T7_X8IPRXHDRGRPPERR V_T7_X8IPRXHDRGRPPERR(1U)
+
+#define S_T7_X8MSTGRPPERR 0
+#define V_T7_X8MSTGRPPERR(x) ((x) << S_T7_X8MSTGRPPERR)
+#define F_T7_X8MSTGRPPERR V_T7_X8MSTGRPPERR(1U)
+
#define A_PCIE_HMA_CTRL 0x3050
#define S_IPLTSSM 12
@@ -4889,9 +5890,9 @@
#define V_T6_ENABLE(x) ((x) << S_T6_ENABLE)
#define F_T6_ENABLE V_T6_ENABLE(1U)
-#define S_T6_AI 30
-#define V_T6_AI(x) ((x) << S_T6_AI)
-#define F_T6_AI V_T6_AI(1U)
+#define S_T6_1_AI 30
+#define V_T6_1_AI(x) ((x) << S_T6_1_AI)
+#define F_T6_1_AI V_T6_1_AI(1U)
#define S_T6_CS2 29
#define V_T6_CS2(x) ((x) << S_T6_CS2)
@@ -4936,6 +5937,7 @@
#define V_MEMOFST(x) ((x) << S_MEMOFST)
#define G_MEMOFST(x) (((x) >> S_MEMOFST) & M_MEMOFST)
+#define A_T7_PCIE_MAILBOX_BASE_WIN 0x30a4
#define A_PCIE_MAILBOX_BASE_WIN 0x30a8
#define S_MBOXPCIEOFST 6
@@ -4953,7 +5955,21 @@
#define V_MBOXWIN(x) ((x) << S_MBOXWIN)
#define G_MBOXWIN(x) (((x) >> S_MBOXWIN) & M_MBOXWIN)
+#define A_PCIE_MAILBOX_OFFSET0 0x30a8
+
+#define S_MEMOFST0 3
+#define M_MEMOFST0 0x1fffffffU
+#define V_MEMOFST0(x) ((x) << S_MEMOFST0)
+#define G_MEMOFST0(x) (((x) >> S_MEMOFST0) & M_MEMOFST0)
+
#define A_PCIE_MAILBOX_OFFSET 0x30ac
+#define A_PCIE_MAILBOX_OFFSET1 0x30ac
+
+#define S_MEMOFST1 0
+#define M_MEMOFST1 0xfU
+#define V_MEMOFST1(x) ((x) << S_MEMOFST1)
+#define G_MEMOFST1(x) (((x) >> S_MEMOFST1) & M_MEMOFST1)
+
#define A_PCIE_MA_CTRL 0x30b0
#define S_MA_TAGFREE 29
@@ -5098,6 +6114,11 @@
#define V_STATIC_SPARE3(x) ((x) << S_STATIC_SPARE3)
#define G_STATIC_SPARE3(x) (((x) >> S_STATIC_SPARE3) & M_STATIC_SPARE3)
+#define S_T7_STATIC_SPARE3 0
+#define M_T7_STATIC_SPARE3 0x7fffU
+#define V_T7_STATIC_SPARE3(x) ((x) << S_T7_STATIC_SPARE3)
+#define G_T7_STATIC_SPARE3(x) (((x) >> S_T7_STATIC_SPARE3) & M_T7_STATIC_SPARE3)
+
#define A_PCIE_DBG_INDIR_REQ 0x30ec
#define S_DBGENABLE 31
@@ -5173,6 +6194,17 @@
#define G_PFNUM(x) (((x) >> S_PFNUM) & M_PFNUM)
#define A_PCIE_PF_INT_CFG 0x3140
+
+#define S_T7_VECNUM 12
+#define M_T7_VECNUM 0x7ffU
+#define V_T7_VECNUM(x) ((x) << S_T7_VECNUM)
+#define G_T7_VECNUM(x) (((x) >> S_T7_VECNUM) & M_T7_VECNUM)
+
+#define S_T7_VECBASE 0
+#define M_T7_VECBASE 0xfffU
+#define V_T7_VECBASE(x) ((x) << S_T7_VECBASE)
+#define G_T7_VECBASE(x) (((x) >> S_T7_VECBASE) & M_T7_VECBASE)
+
#define A_PCIE_PF_INT_CFG2 0x3144
#define A_PCIE_VF_INT_CFG 0x3180
#define A_PCIE_VF_INT_CFG2 0x3184
@@ -5198,6 +6230,20 @@
#define A_PCIE_VF_MSIX_EN_1 0x35c4
#define A_PCIE_VF_MSIX_EN_2 0x35c8
#define A_PCIE_VF_MSIX_EN_3 0x35cc
+#define A_PCIE_FID_PASID 0x35e0
+#define A_PCIE_FID_VFID_CTL 0x35e4
+
+#define S_T7_WRITE 0
+#define V_T7_WRITE(x) ((x) << S_T7_WRITE)
+#define F_T7_WRITE V_T7_WRITE(1U)
+
+#define A_T7_PCIE_FID_VFID_SEL 0x35e8
+
+#define S_T7_ADDR 2
+#define M_T7_ADDR 0x1fffU
+#define V_T7_ADDR(x) ((x) << S_T7_ADDR)
+#define G_T7_ADDR(x) (((x) >> S_T7_ADDR) & M_T7_ADDR)
+
#define A_PCIE_FID_VFID_SEL 0x35ec
#define S_FID_VFID_SEL_SELECT 0
@@ -5205,6 +6251,17 @@
#define V_FID_VFID_SEL_SELECT(x) ((x) << S_FID_VFID_SEL_SELECT)
#define G_FID_VFID_SEL_SELECT(x) (((x) >> S_FID_VFID_SEL_SELECT) & M_FID_VFID_SEL_SELECT)
+#define A_T7_PCIE_FID_VFID 0x35ec
+
+#define S_FID_VFID_NVMEGROUPEN 29
+#define V_FID_VFID_NVMEGROUPEN(x) ((x) << S_FID_VFID_NVMEGROUPEN)
+#define F_FID_VFID_NVMEGROUPEN V_FID_VFID_NVMEGROUPEN(1U)
+
+#define S_FID_VFID_GROUPSEL 25
+#define M_FID_VFID_GROUPSEL 0xfU
+#define V_FID_VFID_GROUPSEL(x) ((x) << S_FID_VFID_GROUPSEL)
+#define G_FID_VFID_GROUPSEL(x) (((x) >> S_FID_VFID_GROUPSEL) & M_FID_VFID_GROUPSEL)
+
#define A_PCIE_FID_VFID 0x3600
#define S_FID_VFID_SELECT 30
@@ -5264,6 +6321,227 @@
#define V_T6_FID_VFID_RVF(x) ((x) << S_T6_FID_VFID_RVF)
#define G_T6_FID_VFID_RVF(x) (((x) >> S_T6_FID_VFID_RVF) & M_T6_FID_VFID_RVF)
+#define A_PCIE_JBOF_NVME_HIGH_DW_START_ADDR 0x3600
+#define A_PCIE_JBOF_NVME_LOW_DW_START_ADDR 0x3604
+#define A_PCIE_JBOF_NVME_LENGTH 0x3608
+
+#define S_NVMEDISABLE 31
+#define V_NVMEDISABLE(x) ((x) << S_NVMEDISABLE)
+#define F_NVMEDISABLE V_NVMEDISABLE(1U)
+
+#define S_NVMELENGTH 0
+#define M_NVMELENGTH 0x3fffffffU
+#define V_NVMELENGTH(x) ((x) << S_NVMELENGTH)
+#define G_NVMELENGTH(x) (((x) >> S_NVMELENGTH) & M_NVMELENGTH)
+
+#define A_PCIE_JBOF_NVME_GROUP 0x360c
+
+#define S_NVMEGROUPSEL 0
+#define M_NVMEGROUPSEL 0xfU
+#define V_NVMEGROUPSEL(x) ((x) << S_NVMEGROUPSEL)
+#define G_NVMEGROUPSEL(x) (((x) >> S_NVMEGROUPSEL) & M_NVMEGROUPSEL)
+
+#define A_T7_PCIE_MEM_ACCESS_BASE_WIN 0x3700
+#define A_PCIE_MEM_ACCESS_BASE_WIN1 0x3704
+
+#define S_PCIEOFST1 0
+#define M_PCIEOFST1 0xffU
+#define V_PCIEOFST1(x) ((x) << S_PCIEOFST1)
+#define G_PCIEOFST1(x) (((x) >> S_PCIEOFST1) & M_PCIEOFST1)
+
+#define A_PCIE_MEM_ACCESS_OFFSET0 0x3708
+#define A_PCIE_MEM_ACCESS_OFFSET1 0x370c
+#define A_PCIE_PTM_EP_EXT_STROBE 0x3804
+
+#define S_PTM_AUTO_UPDATE 1
+#define V_PTM_AUTO_UPDATE(x) ((x) << S_PTM_AUTO_UPDATE)
+#define F_PTM_AUTO_UPDATE V_PTM_AUTO_UPDATE(1U)
+
+#define S_PTM_EXT_STROBE 0
+#define V_PTM_EXT_STROBE(x) ((x) << S_PTM_EXT_STROBE)
+#define F_PTM_EXT_STROBE V_PTM_EXT_STROBE(1U)
+
+#define A_PCIE_PTM_EP_EXT_TIME0 0x3808
+#define A_PCIE_PTM_EP_EXT_TIME1 0x380c
+#define A_PCIE_PTM_MAN_UPD_PULSE 0x3810
+
+#define S_PTM_MAN_UPD_PULSE 0
+#define V_PTM_MAN_UPD_PULSE(x) ((x) << S_PTM_MAN_UPD_PULSE)
+#define F_PTM_MAN_UPD_PULSE V_PTM_MAN_UPD_PULSE(1U)
+
+#define A_PCIE_SWAP_DATA_B2L_X16 0x3814
+#define A_PCIE_PCIE_RC_RST 0x3818
+
+#define S_PERST 0
+#define V_PERST(x) ((x) << S_PERST)
+#define F_PERST V_PERST(1U)
+
+#define A_PCIE_PCIE_LN_CLKSEL 0x3880
+
+#define S_DS8_SEL 30
+#define M_DS8_SEL 0x3U
+#define V_DS8_SEL(x) ((x) << S_DS8_SEL)
+#define G_DS8_SEL(x) (((x) >> S_DS8_SEL) & M_DS8_SEL)
+
+#define S_DS7_SEL 28
+#define M_DS7_SEL 0x3U
+#define V_DS7_SEL(x) ((x) << S_DS7_SEL)
+#define G_DS7_SEL(x) (((x) >> S_DS7_SEL) & M_DS7_SEL)
+
+#define S_DS6_SEL 26
+#define M_DS6_SEL 0x3U
+#define V_DS6_SEL(x) ((x) << S_DS6_SEL)
+#define G_DS6_SEL(x) (((x) >> S_DS6_SEL) & M_DS6_SEL)
+
+#define S_DS5_SEL 24
+#define M_DS5_SEL 0x3U
+#define V_DS5_SEL(x) ((x) << S_DS5_SEL)
+#define G_DS5_SEL(x) (((x) >> S_DS5_SEL) & M_DS5_SEL)
+
+#define S_DS4_SEL 22
+#define M_DS4_SEL 0x3U
+#define V_DS4_SEL(x) ((x) << S_DS4_SEL)
+#define G_DS4_SEL(x) (((x) >> S_DS4_SEL) & M_DS4_SEL)
+
+#define S_DS3_SEL 20
+#define M_DS3_SEL 0x3U
+#define V_DS3_SEL(x) ((x) << S_DS3_SEL)
+#define G_DS3_SEL(x) (((x) >> S_DS3_SEL) & M_DS3_SEL)
+
+#define S_DS2_SEL 18
+#define M_DS2_SEL 0x3U
+#define V_DS2_SEL(x) ((x) << S_DS2_SEL)
+#define G_DS2_SEL(x) (((x) >> S_DS2_SEL) & M_DS2_SEL)
+
+#define S_DS1_SEL 16
+#define M_DS1_SEL 0x3U
+#define V_DS1_SEL(x) ((x) << S_DS1_SEL)
+#define G_DS1_SEL(x) (((x) >> S_DS1_SEL) & M_DS1_SEL)
+
+#define S_LN14_SEL 14
+#define M_LN14_SEL 0x3U
+#define V_LN14_SEL(x) ((x) << S_LN14_SEL)
+#define G_LN14_SEL(x) (((x) >> S_LN14_SEL) & M_LN14_SEL)
+
+#define S_LN12_SEL 12
+#define M_LN12_SEL 0x3U
+#define V_LN12_SEL(x) ((x) << S_LN12_SEL)
+#define G_LN12_SEL(x) (((x) >> S_LN12_SEL) & M_LN12_SEL)
+
+#define S_LN10_SEL 10
+#define M_LN10_SEL 0x3U
+#define V_LN10_SEL(x) ((x) << S_LN10_SEL)
+#define G_LN10_SEL(x) (((x) >> S_LN10_SEL) & M_LN10_SEL)
+
+#define S_LN8_SEL 8
+#define M_LN8_SEL 0x3U
+#define V_LN8_SEL(x) ((x) << S_LN8_SEL)
+#define G_LN8_SEL(x) (((x) >> S_LN8_SEL) & M_LN8_SEL)
+
+#define S_LN6_SEL 6
+#define M_LN6_SEL 0x3U
+#define V_LN6_SEL(x) ((x) << S_LN6_SEL)
+#define G_LN6_SEL(x) (((x) >> S_LN6_SEL) & M_LN6_SEL)
+
+#define S_LN4_SEL 4
+#define M_LN4_SEL 0x3U
+#define V_LN4_SEL(x) ((x) << S_LN4_SEL)
+#define G_LN4_SEL(x) (((x) >> S_LN4_SEL) & M_LN4_SEL)
+
+#define S_LN2_SEL 2
+#define M_LN2_SEL 0x3U
+#define V_LN2_SEL(x) ((x) << S_LN2_SEL)
+#define G_LN2_SEL(x) (((x) >> S_LN2_SEL) & M_LN2_SEL)
+
+#define S_LN0_SEL 0
+#define M_LN0_SEL 0x3U
+#define V_LN0_SEL(x) ((x) << S_LN0_SEL)
+#define G_LN0_SEL(x) (((x) >> S_LN0_SEL) & M_LN0_SEL)
+
+#define A_PCIE_PCIE_MSIX_EN 0x3884
+
+#define S_MSIX_ENABLE 0
+#define M_MSIX_ENABLE 0xffU
+#define V_MSIX_ENABLE(x) ((x) << S_MSIX_ENABLE)
+#define G_MSIX_ENABLE(x) (((x) >> S_MSIX_ENABLE) & M_MSIX_ENABLE)
+
+#define A_PCIE_LFSR_WRCTRL 0x3888
+
+#define S_WR_LFSR_CMP_DATA 16
+#define M_WR_LFSR_CMP_DATA 0xffffU
+#define V_WR_LFSR_CMP_DATA(x) ((x) << S_WR_LFSR_CMP_DATA)
+#define G_WR_LFSR_CMP_DATA(x) (((x) >> S_WR_LFSR_CMP_DATA) & M_WR_LFSR_CMP_DATA)
+
+#define S_WR_LFSR_RSVD 2
+#define M_WR_LFSR_RSVD 0x3fffU
+#define V_WR_LFSR_RSVD(x) ((x) << S_WR_LFSR_RSVD)
+#define G_WR_LFSR_RSVD(x) (((x) >> S_WR_LFSR_RSVD) & M_WR_LFSR_RSVD)
+
+#define S_WR_LFSR_EN 1
+#define V_WR_LFSR_EN(x) ((x) << S_WR_LFSR_EN)
+#define F_WR_LFSR_EN V_WR_LFSR_EN(1U)
+
+#define S_WR_LFSR_START 0
+#define V_WR_LFSR_START(x) ((x) << S_WR_LFSR_START)
+#define F_WR_LFSR_START V_WR_LFSR_START(1U)
+
+#define A_PCIE_LFSR_RDCTRL 0x388c
+
+#define S_CMD_LFSR_CMP_DATA 24
+#define M_CMD_LFSR_CMP_DATA 0xffU
+#define V_CMD_LFSR_CMP_DATA(x) ((x) << S_CMD_LFSR_CMP_DATA)
+#define G_CMD_LFSR_CMP_DATA(x) (((x) >> S_CMD_LFSR_CMP_DATA) & M_CMD_LFSR_CMP_DATA)
+
+#define S_RD_LFSR_CMD_DATA 16
+#define M_RD_LFSR_CMD_DATA 0xffU
+#define V_RD_LFSR_CMD_DATA(x) ((x) << S_RD_LFSR_CMD_DATA)
+#define G_RD_LFSR_CMD_DATA(x) (((x) >> S_RD_LFSR_CMD_DATA) & M_RD_LFSR_CMD_DATA)
+
+#define S_RD_LFSR_RSVD 10
+#define M_RD_LFSR_RSVD 0x3fU
+#define V_RD_LFSR_RSVD(x) ((x) << S_RD_LFSR_RSVD)
+#define G_RD_LFSR_RSVD(x) (((x) >> S_RD_LFSR_RSVD) & M_RD_LFSR_RSVD)
+
+#define S_RD3_LFSR_EN 9
+#define V_RD3_LFSR_EN(x) ((x) << S_RD3_LFSR_EN)
+#define F_RD3_LFSR_EN V_RD3_LFSR_EN(1U)
+
+#define S_RD3_LFSR_START 8
+#define V_RD3_LFSR_START(x) ((x) << S_RD3_LFSR_START)
+#define F_RD3_LFSR_START V_RD3_LFSR_START(1U)
+
+#define S_RD2_LFSR_EN 7
+#define V_RD2_LFSR_EN(x) ((x) << S_RD2_LFSR_EN)
+#define F_RD2_LFSR_EN V_RD2_LFSR_EN(1U)
+
+#define S_RD2_LFSR_START 6
+#define V_RD2_LFSR_START(x) ((x) << S_RD2_LFSR_START)
+#define F_RD2_LFSR_START V_RD2_LFSR_START(1U)
+
+#define S_RD1_LFSR_EN 5
+#define V_RD1_LFSR_EN(x) ((x) << S_RD1_LFSR_EN)
+#define F_RD1_LFSR_EN V_RD1_LFSR_EN(1U)
+
+#define S_RD1_LFSR_START 4
+#define V_RD1_LFSR_START(x) ((x) << S_RD1_LFSR_START)
+#define F_RD1_LFSR_START V_RD1_LFSR_START(1U)
+
+#define S_RD0_LFSR_EN 3
+#define V_RD0_LFSR_EN(x) ((x) << S_RD0_LFSR_EN)
+#define F_RD0_LFSR_EN V_RD0_LFSR_EN(1U)
+
+#define S_RD0_LFSR_START 2
+#define V_RD0_LFSR_START(x) ((x) << S_RD0_LFSR_START)
+#define F_RD0_LFSR_START V_RD0_LFSR_START(1U)
+
+#define S_CMD_LFSR_EN 1
+#define V_CMD_LFSR_EN(x) ((x) << S_CMD_LFSR_EN)
+#define F_CMD_LFSR_EN V_CMD_LFSR_EN(1U)
+
+#define S_CMD_LFSR_START 0
+#define V_CMD_LFSR_START(x) ((x) << S_CMD_LFSR_START)
+#define F_CMD_LFSR_START V_CMD_LFSR_START(1U)
+
#define A_PCIE_FID 0x3900
#define S_PAD 11
@@ -5280,6 +6558,309 @@
#define V_FUNC(x) ((x) << S_FUNC)
#define G_FUNC(x) (((x) >> S_FUNC) & M_FUNC)
+#define A_PCIE_EMU_ADDR 0x3900
+
+#define S_EMU_ADDR 0
+#define M_EMU_ADDR 0x1ffU
+#define V_EMU_ADDR(x) ((x) << S_EMU_ADDR)
+#define G_EMU_ADDR(x) (((x) >> S_EMU_ADDR) & M_EMU_ADDR)
+
+#define A_PCIE_EMU_CFG 0x3904
+
+#define S_EMUENABLE 16
+#define V_EMUENABLE(x) ((x) << S_EMUENABLE)
+#define F_EMUENABLE V_EMUENABLE(1U)
+
+#define S_EMUTYPE 14
+#define M_EMUTYPE 0x3U
+#define V_EMUTYPE(x) ((x) << S_EMUTYPE)
+#define G_EMUTYPE(x) (((x) >> S_EMUTYPE) & M_EMUTYPE)
+
+#define S_BAR0TARGET 12
+#define M_BAR0TARGET 0x3U
+#define V_BAR0TARGET(x) ((x) << S_BAR0TARGET)
+#define G_BAR0TARGET(x) (((x) >> S_BAR0TARGET) & M_BAR0TARGET)
+
+#define S_BAR2TARGET 10
+#define M_BAR2TARGET 0x3U
+#define V_BAR2TARGET(x) ((x) << S_BAR2TARGET)
+#define G_BAR2TARGET(x) (((x) >> S_BAR2TARGET) & M_BAR2TARGET)
+
+#define S_BAR4TARGET 8
+#define M_BAR4TARGET 0x3U
+#define V_BAR4TARGET(x) ((x) << S_BAR4TARGET)
+#define G_BAR4TARGET(x) (((x) >> S_BAR4TARGET) & M_BAR4TARGET)
+
+#define S_RELEATIVEEMUID 0
+#define M_RELEATIVEEMUID 0xffU
+#define V_RELEATIVEEMUID(x) ((x) << S_RELEATIVEEMUID)
+#define G_RELEATIVEEMUID(x) (((x) >> S_RELEATIVEEMUID) & M_RELEATIVEEMUID)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET0_BAR0 0x3910
+
+#define S_T7_MEMOFST0 0
+#define M_T7_MEMOFST0 0xfffffffU
+#define V_T7_MEMOFST0(x) ((x) << S_T7_MEMOFST0)
+#define G_T7_MEMOFST0(x) (((x) >> S_T7_MEMOFST0) & M_T7_MEMOFST0)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG0_BAR0 0x3914
+
+#define S_SIZE0 0
+#define M_SIZE0 0x1fU
+#define V_SIZE0(x) ((x) << S_SIZE0)
+#define G_SIZE0(x) (((x) >> S_SIZE0) & M_SIZE0)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET1_BAR0 0x3918
+
+#define S_T7_MEMOFST1 0
+#define M_T7_MEMOFST1 0xfffffffU
+#define V_T7_MEMOFST1(x) ((x) << S_T7_MEMOFST1)
+#define G_T7_MEMOFST1(x) (((x) >> S_T7_MEMOFST1) & M_T7_MEMOFST1)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG1_BAR0 0x391c
+
+#define S_SIZE1 0
+#define M_SIZE1 0x1fU
+#define V_SIZE1(x) ((x) << S_SIZE1)
+#define G_SIZE1(x) (((x) >> S_SIZE1) & M_SIZE1)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET2_BAR0 0x3920
+
+#define S_MEMOFST2 0
+#define M_MEMOFST2 0xfffffffU
+#define V_MEMOFST2(x) ((x) << S_MEMOFST2)
+#define G_MEMOFST2(x) (((x) >> S_MEMOFST2) & M_MEMOFST2)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG2_BAR0 0x3924
+
+#define S_SIZE2 0
+#define M_SIZE2 0x1fU
+#define V_SIZE2(x) ((x) << S_SIZE2)
+#define G_SIZE2(x) (((x) >> S_SIZE2) & M_SIZE2)
+
+#define A_PCIE_EMUADRRMAP_MEM_OFFSET3_BAR0 0x3928
+
+#define S_MEMOFST3 0
+#define M_MEMOFST3 0xfffffffU
+#define V_MEMOFST3(x) ((x) << S_MEMOFST3)
+#define G_MEMOFST3(x) (((x) >> S_MEMOFST3) & M_MEMOFST3)
+
+#define A_PCIE_EMUADRRMAP_MEM_CFG3_BAR0 0x392c
+
+#define S_SIZE3 0
+#define M_SIZE3 0x1fU
+#define V_SIZE3(x) ((x) << S_SIZE3)
+#define G_SIZE3(x) (((x) >> S_SIZE3) & M_SIZE3)
+
+#define A_PCIE_TCAM_DATA 0x3970
+#define A_PCIE_TCAM_CTL 0x3974
+
+#define S_TCAMADDR 8
+#define M_TCAMADDR 0x3ffU
+#define V_TCAMADDR(x) ((x) << S_TCAMADDR)
+#define G_TCAMADDR(x) (((x) >> S_TCAMADDR) & M_TCAMADDR)
+
+#define S_CAMEN 0
+#define V_CAMEN(x) ((x) << S_CAMEN)
+#define F_CAMEN V_CAMEN(1U)
+
+#define A_PCIE_TCAM_DBG 0x3978
+
+#define S_CBPASS 24
+#define V_CBPASS(x) ((x) << S_CBPASS)
+#define F_CBPASS V_CBPASS(1U)
+
+#define S_CBBUSY 20
+#define V_CBBUSY(x) ((x) << S_CBBUSY)
+#define F_CBBUSY V_CBBUSY(1U)
+
+#define S_CBSTART 17
+#define V_CBSTART(x) ((x) << S_CBSTART)
+#define F_CBSTART V_CBSTART(1U)
+
+#define S_RSTCB 16
+#define V_RSTCB(x) ((x) << S_RSTCB)
+#define F_RSTCB V_RSTCB(1U)
+
+#define S_TCAM_DBG_DATA 0
+#define M_TCAM_DBG_DATA 0xffffU
+#define V_TCAM_DBG_DATA(x) ((x) << S_TCAM_DBG_DATA)
+#define G_TCAM_DBG_DATA(x) (((x) >> S_TCAM_DBG_DATA) & M_TCAM_DBG_DATA)
+
+#define A_PCIE_TEST_CTRL0 0x3980
+#define A_PCIE_TEST_CTRL1 0x3984
+#define A_PCIE_TEST_CTRL2 0x3988
+#define A_PCIE_TEST_CTRL3 0x398c
+#define A_PCIE_TEST_STS0 0x3990
+#define A_PCIE_TEST_STS1 0x3994
+#define A_PCIE_TEST_STS2 0x3998
+#define A_PCIE_TEST_STS3 0x399c
+#define A_PCIE_X8_CORE_ACK_LATENCY_TIMER_REPLAY_TIMER 0x4700
+#define A_PCIE_X8_CORE_VENDOR_SPECIFIC_DLLP 0x4704
+#define A_PCIE_X8_CORE_PORT_FORCE_LINK 0x4708
+#define A_PCIE_X8_CORE_ACK_FREQUENCY_L0L1_ASPM_CONTROL 0x470c
+#define A_PCIE_X8_CORE_PORT_LINK_CONTROL 0x4710
+#define A_PCIE_X8_CORE_LANE_SKEW 0x4714
+#define A_PCIE_X8_CORE_SYMBOL_NUMBER 0x4718
+#define A_PCIE_X8_CORE_SYMBOL_TIMER_FILTER_MASK1 0x471c
+#define A_PCIE_X8_CORE_FILTER_MASK2 0x4720
+#define A_PCIE_X8_CORE_DEBUG_0 0x4728
+#define A_PCIE_X8_CORE_DEBUG_1 0x472c
+#define A_PCIE_X8_CORE_TRANSMIT_POSTED_FC_CREDIT_STATUS 0x4730
+#define A_PCIE_X8_CORE_TRANSMIT_NONPOSTED_FC_CREDIT_STATUS 0x4734
+#define A_PCIE_X8_CORE_TRANSMIT_COMPLETION_FC_CREDIT_STATUS 0x4738
+#define A_PCIE_X8_CORE_QUEUE_STATUS 0x473c
+#define A_PCIE_X8_CORE_VC_TRANSMIT_ARBITRATION_1 0x4740
+#define A_PCIE_X8_CORE_VC_TRANSMIT_ARBITRATION_2 0x4744
+#define A_PCIE_X8_CORE_VC0_POSTED_RECEIVE_QUEUE_CONTROL 0x4748
+#define A_PCIE_X8_CORE_VC0_NONPOSTED_RECEIVE_QUEUE_CONTROL 0x474c
+#define A_PCIE_X8_CORE_VC0_COMPLETION_RECEIVE_QUEUE_CONTROL 0x4750
+#define A_PCIE_X8_CORE_VC1_POSTED_RECEIVE_QUEUE_CONTROL 0x4754
+#define A_PCIE_X8_CORE_VC1_NONPOSTED_RECEIVE_QUEUE_CONTROL 0x4758
+#define A_PCIE_X8_CORE_VC1_COMPLETION_RECEIVE_QUEUE_CONTROL 0x475c
+#define A_PCIE_X8_CORE_LINK_WIDTH_SPEED_CHANGE 0x480c
+#define A_PCIE_X8_CORE_PHY_STATUS 0x4810
+#define A_PCIE_X8_CORE_PHY_CONTROL 0x4814
+#define A_PCIE_X8_CORE_GEN3_CONTROL 0x4890
+#define A_PCIE_X8_CORE_GEN3_EQ_FS_LF 0x4894
+#define A_PCIE_X8_CORE_GEN3_EQ_PRESET_COEFF 0x4898
+#define A_PCIE_X8_CORE_GEN3_EQ_PRESET_INDEX 0x489c
+#define A_PCIE_X8_CORE_GEN3_EQ_STATUS 0x48a4
+#define A_PCIE_X8_CORE_GEN3_EQ_CONTROL 0x48a8
+#define A_PCIE_X8_CORE_GEN3_EQ_DIRCHANGE_FEEDBACK 0x48ac
+#define A_PCIE_X8_CORE_PIPE_CONTROL 0x48b8
+#define A_PCIE_X8_CORE_DBI_RO_WE 0x48bc
+#define A_PCIE_X8_CFG_SPACE_REQ 0x48c0
+#define A_PCIE_X8_CFG_SPACE_DATA 0x48c4
+#define A_PCIE_X8_CFG_MPS_MRS 0x4900
+
+#define S_MRS 3
+#define M_MRS 0x7U
+#define V_MRS(x) ((x) << S_MRS)
+#define G_MRS(x) (((x) >> S_MRS) & M_MRS)
+
+#define S_T7_MPS 0
+#define M_T7_MPS 0x7U
+#define V_T7_MPS(x) ((x) << S_T7_MPS)
+#define G_T7_MPS(x) (((x) >> S_T7_MPS) & M_T7_MPS)
+
+#define A_PCIE_X8_CFG_ATTRIBUTES 0x4904
+
+#define S_T7_DCAEN 2
+#define V_T7_DCAEN(x) ((x) << S_T7_DCAEN)
+#define F_T7_DCAEN V_T7_DCAEN(1U)
+
+#define S_DCASTFITTRAONLEN 1
+#define V_DCASTFITTRAONLEN(x) ((x) << S_DCASTFITTRAONLEN)
+#define F_DCASTFITTRAONLEN V_DCASTFITTRAONLEN(1U)
+
+#define S_REQCTLDYNSTCLKEN 0
+#define V_REQCTLDYNSTCLKEN(x) ((x) << S_REQCTLDYNSTCLKEN)
+#define F_REQCTLDYNSTCLKEN V_REQCTLDYNSTCLKEN(1U)
+
+#define A_PCIE_X8_CFG_LTSSM 0x4908
+
+#define S_APP_LTSSM_ENABLE 0
+#define V_APP_LTSSM_ENABLE(x) ((x) << S_APP_LTSSM_ENABLE)
+#define F_APP_LTSSM_ENABLE V_APP_LTSSM_ENABLE(1U)
+
+#define A_PCIE_ARM_REQUESTER_ID_X8 0x490c
+
+#define S_A1_RSVD1 24
+#define M_A1_RSVD1 0xffU
+#define V_A1_RSVD1(x) ((x) << S_A1_RSVD1)
+#define G_A1_RSVD1(x) (((x) >> S_A1_RSVD1) & M_A1_RSVD1)
+
+#define S_A1_PRIMBUSNUMBER 16
+#define M_A1_PRIMBUSNUMBER 0xffU
+#define V_A1_PRIMBUSNUMBER(x) ((x) << S_A1_PRIMBUSNUMBER)
+#define G_A1_PRIMBUSNUMBER(x) (((x) >> S_A1_PRIMBUSNUMBER) & M_A1_PRIMBUSNUMBER)
+
+#define S_A1_REQUESTERID 0
+#define M_A1_REQUESTERID 0xffffU
+#define V_A1_REQUESTERID(x) ((x) << S_A1_REQUESTERID)
+#define G_A1_REQUESTERID(x) (((x) >> S_A1_REQUESTERID) & M_A1_REQUESTERID)
+
+#define A_PCIE_SWAP_DATA_B2L_X8 0x4910
+
+#define S_CFGRD_SWAP_EN 1
+#define V_CFGRD_SWAP_EN(x) ((x) << S_CFGRD_SWAP_EN)
+#define F_CFGRD_SWAP_EN V_CFGRD_SWAP_EN(1U)
+
+#define S_CFGWR_SWAP_EN 0
+#define V_CFGWR_SWAP_EN(x) ((x) << S_CFGWR_SWAP_EN)
+#define F_CFGWR_SWAP_EN V_CFGWR_SWAP_EN(1U)
+
+#define A_PCIE_PDEBUG_DATA0_X8 0x4914
+#define A_PCIE_PDEBUG_DATA1_X8 0x4918
+#define A_PCIE_PDEBUG_DATA2_X8 0x491c
+#define A_PCIE_PDEBUG_CTRL_X8 0x4920
+#define A_PCIE_PDEBUG_DATA_X8 0x4924
+#define A_PCIE_SPARE_REGISTER_SPACES_X8 0x4ffc
+#define A_PCIE_PIPE_LANE0_REG0 0x5500
+#define A_PCIE_PIPE_LANE0_REG1 0x5504
+#define A_PCIE_PIPE_LANE0_REG2 0x5508
+#define A_PCIE_PIPE_LANE0_REG3 0x550c
+#define A_PCIE_PIPE_LANE1_REG0 0x5510
+#define A_PCIE_PIPE_LANE1_REG1 0x5514
+#define A_PCIE_PIPE_LANE1_REG2 0x5518
+#define A_PCIE_PIPE_LANE1_REG3 0x551c
+#define A_PCIE_PIPE_LANE2_REG0 0x5520
+#define A_PCIE_PIPE_LANE2_REG1 0x5524
+#define A_PCIE_PIPE_LANE2_REG2 0x5528
+#define A_PCIE_PIPE_LANE2_REG3 0x552c
+#define A_PCIE_PIPE_LANE3_REG0 0x5530
+#define A_PCIE_PIPE_LANE3_REG1 0x5534
+#define A_PCIE_PIPE_LANE3_REG2 0x5538
+#define A_PCIE_PIPE_LANE3_REG3 0x553c
+#define A_PCIE_PIPE_LANE4_REG0 0x5540
+#define A_PCIE_PIPE_LANE4_REG1 0x5544
+#define A_PCIE_PIPE_LANE4_REG2 0x5548
+#define A_PCIE_PIPE_LANE4_REG3 0x554c
+#define A_PCIE_PIPE_LANE5_REG0 0x5550
+#define A_PCIE_PIPE_LANE5_REG1 0x5554
+#define A_PCIE_PIPE_LANE5_REG2 0x5558
+#define A_PCIE_PIPE_LANE5_REG3 0x555c
+#define A_PCIE_PIPE_LANE6_REG0 0x5560
+#define A_PCIE_PIPE_LANE6_REG1 0x5564
+#define A_PCIE_PIPE_LANE6_REG2 0x5568
+#define A_PCIE_PIPE_LANE6_REG3 0x556c
+#define A_PCIE_PIPE_LANE7_REG0 0x5570
+#define A_PCIE_PIPE_LANE7_REG1 0x5574
+#define A_PCIE_PIPE_LANE7_REG2 0x5578
+#define A_PCIE_PIPE_LANE7_REG3 0x557c
+#define A_PCIE_PIPE_LANE8_REG0 0x5580
+#define A_PCIE_PIPE_LANE8_REG1 0x5584
+#define A_PCIE_PIPE_LANE8_REG2 0x5588
+#define A_PCIE_PIPE_LANE8_REG3 0x558c
+#define A_PCIE_PIPE_LANE9_REG0 0x5590
+#define A_PCIE_PIPE_LANE9_REG1 0x5594
+#define A_PCIE_PIPE_LANE9_REG2 0x5598
+#define A_PCIE_PIPE_LANE9_REG3 0x559c
+#define A_PCIE_PIPE_LANE10_REG0 0x55a0
+#define A_PCIE_PIPE_LANE10_REG1 0x55a4
+#define A_PCIE_PIPE_LANE10_REG2 0x55a8
+#define A_PCIE_PIPE_LANE10_REG3 0x55ac
+#define A_PCIE_PIPE_LANE11_REG0 0x55b0
+#define A_PCIE_PIPE_LANE11_REG1 0x55b4
+#define A_PCIE_PIPE_LANE11_REG2 0x55b8
+#define A_PCIE_PIPE_LANE11_REG3 0x55bc
+#define A_PCIE_PIPE_LANE12_REG0 0x55c0
+#define A_PCIE_PIPE_LANE12_REG1 0x55c4
+#define A_PCIE_PIPE_LANE12_REG2 0x55c8
+#define A_PCIE_PIPE_LANE12_REG3 0x55cc
+#define A_PCIE_PIPE_LANE13_REG0 0x55d0
+#define A_PCIE_PIPE_LANE13_REG1 0x55d4
+#define A_PCIE_PIPE_LANE13_REG2 0x55d8
+#define A_PCIE_PIPE_LANE13_REG3 0x55dc
+#define A_PCIE_PIPE_LANE14_REG0 0x55e0
+#define A_PCIE_PIPE_LANE14_REG1 0x55e4
+#define A_PCIE_PIPE_LANE14_REG2 0x55e8
+#define A_PCIE_PIPE_LANE14_REG3 0x55ec
+#define A_PCIE_PIPE_LANE15_REG0 0x55f0
+#define A_PCIE_PIPE_LANE15_REG1 0x55f4
+#define A_PCIE_PIPE_LANE15_REG2 0x55f8
+#define A_PCIE_PIPE_LANE15_REG3 0x55fc
#define A_PCIE_COOKIE_STAT 0x5600
#define S_COOKIEB 16
@@ -5346,6 +6927,30 @@
#define V_T6_RCVDPIOREQCOOKIE(x) ((x) << S_T6_RCVDPIOREQCOOKIE)
#define G_T6_RCVDPIOREQCOOKIE(x) (((x) >> S_T6_RCVDPIOREQCOOKIE) & M_T6_RCVDPIOREQCOOKIE)
+#define A_T7_PCIE_VC0_CDTS0 0x56c4
+
+#define S_T7_CPLD0 16
+#define M_T7_CPLD0 0xffffU
+#define V_T7_CPLD0(x) ((x) << S_T7_CPLD0)
+#define G_T7_CPLD0(x) (((x) >> S_T7_CPLD0) & M_T7_CPLD0)
+
+#define S_T7_CPLH0 0
+#define M_T7_CPLH0 0xfffU
+#define V_T7_CPLH0(x) ((x) << S_T7_CPLH0)
+#define G_T7_CPLH0(x) (((x) >> S_T7_CPLH0) & M_T7_CPLH0)
+
+#define A_T7_PCIE_VC0_CDTS1 0x56c8
+
+#define S_T7_PD0 16
+#define M_T7_PD0 0xffffU
+#define V_T7_PD0(x) ((x) << S_T7_PD0)
+#define G_T7_PD0(x) (((x) >> S_T7_PD0) & M_T7_PD0)
+
+#define S_T7_PH0 0
+#define M_T7_PH0 0xfffU
+#define V_T7_PH0(x) ((x) << S_T7_PH0)
+#define G_T7_PH0(x) (((x) >> S_T7_PH0) & M_T7_PH0)
+
#define A_PCIE_VC0_CDTS0 0x56cc
#define S_CPLD0 20
@@ -5363,6 +6968,18 @@
#define V_PD0(x) ((x) << S_PD0)
#define G_PD0(x) (((x) >> S_PD0) & M_PD0)
+#define A_PCIE_VC0_CDTS2 0x56cc
+
+#define S_T7_NPD0 16
+#define M_T7_NPD0 0xffffU
+#define V_T7_NPD0(x) ((x) << S_T7_NPD0)
+#define G_T7_NPD0(x) (((x) >> S_T7_NPD0) & M_T7_NPD0)
+
+#define S_T7_NPH0 0
+#define M_T7_NPH0 0xfffU
+#define V_T7_NPH0(x) ((x) << S_T7_NPH0)
+#define G_T7_NPH0(x) (((x) >> S_T7_NPH0) & M_T7_NPH0)
+
#define A_PCIE_VC0_CDTS1 0x56d0
#define S_CPLH0 20
@@ -5380,6 +6997,7 @@
#define V_NPD0(x) ((x) << S_NPD0)
#define G_NPD0(x) (((x) >> S_NPD0) & M_NPD0)
+#define A_T7_PCIE_VC1_CDTS0 0x56d0
#define A_PCIE_VC1_CDTS0 0x56d4
#define S_CPLD1 20
@@ -5397,6 +7015,7 @@
#define V_PD1(x) ((x) << S_PD1)
#define G_PD1(x) (((x) >> S_PD1) & M_PD1)
+#define A_T7_PCIE_VC1_CDTS1 0x56d4
#define A_PCIE_VC1_CDTS1 0x56d8
#define S_CPLH1 20
@@ -5414,6 +7033,7 @@
#define V_NPD1(x) ((x) << S_NPD1)
#define G_NPD1(x) (((x) >> S_NPD1) & M_NPD1)
+#define A_PCIE_VC1_CDTS2 0x56d8
#define A_PCIE_FLR_PF_STATUS 0x56dc
#define A_PCIE_FLR_VF0_STATUS 0x56e0
#define A_PCIE_FLR_VF1_STATUS 0x56e4
@@ -5916,6 +7536,11 @@
#define V_DISABLE_SCRAMBLER(x) ((x) << S_DISABLE_SCRAMBLER)
#define F_DISABLE_SCRAMBLER V_DISABLE_SCRAMBLER(1U)
+#define S_RATE_SHADOW_SEL 24
+#define M_RATE_SHADOW_SEL 0x3U
+#define V_RATE_SHADOW_SEL(x) ((x) << S_RATE_SHADOW_SEL)
+#define G_RATE_SHADOW_SEL(x) (((x) >> S_RATE_SHADOW_SEL) & M_RATE_SHADOW_SEL)
+
#define A_PCIE_CORE_GEN3_EQ_FS_LF 0x5894
#define S_FULL_SWING 6
@@ -6347,6 +7972,35 @@
#define V_RDSOPCNT(x) ((x) << S_RDSOPCNT)
#define G_RDSOPCNT(x) (((x) >> S_RDSOPCNT) & M_RDSOPCNT)
+#define S_DMA_COOKIECNT 24
+#define M_DMA_COOKIECNT 0xfU
+#define V_DMA_COOKIECNT(x) ((x) << S_DMA_COOKIECNT)
+#define G_DMA_COOKIECNT(x) (((x) >> S_DMA_COOKIECNT) & M_DMA_COOKIECNT)
+
+#define S_DMA_RDSEQNUMUPDCNT 20
+#define M_DMA_RDSEQNUMUPDCNT 0xfU
+#define V_DMA_RDSEQNUMUPDCNT(x) ((x) << S_DMA_RDSEQNUMUPDCNT)
+#define G_DMA_RDSEQNUMUPDCNT(x) (((x) >> S_DMA_RDSEQNUMUPDCNT) & M_DMA_RDSEQNUMUPDCNT)
+
+#define S_DMA_SIREQCNT 16
+#define M_DMA_SIREQCNT 0xfU
+#define V_DMA_SIREQCNT(x) ((x) << S_DMA_SIREQCNT)
+#define G_DMA_SIREQCNT(x) (((x) >> S_DMA_SIREQCNT) & M_DMA_SIREQCNT)
+
+#define S_DMA_WREOPMATCHSOP 12
+#define V_DMA_WREOPMATCHSOP(x) ((x) << S_DMA_WREOPMATCHSOP)
+#define F_DMA_WREOPMATCHSOP V_DMA_WREOPMATCHSOP(1U)
+
+#define S_DMA_WRSOPCNT 8
+#define M_DMA_WRSOPCNT 0xfU
+#define V_DMA_WRSOPCNT(x) ((x) << S_DMA_WRSOPCNT)
+#define G_DMA_WRSOPCNT(x) (((x) >> S_DMA_WRSOPCNT) & M_DMA_WRSOPCNT)
+
+#define S_DMA_RDSOPCNT 0
+#define M_DMA_RDSOPCNT 0xffU
+#define V_DMA_RDSOPCNT(x) ((x) << S_DMA_RDSOPCNT)
+#define G_DMA_RDSOPCNT(x) (((x) >> S_DMA_RDSOPCNT) & M_DMA_RDSOPCNT)
+
#define A_PCIE_T5_DMA_STAT3 0x594c
#define S_ATMREQSOPCNT 24
@@ -6372,6 +8026,29 @@
#define V_RSPSOPCNT(x) ((x) << S_RSPSOPCNT)
#define G_RSPSOPCNT(x) (((x) >> S_RSPSOPCNT) & M_RSPSOPCNT)
+#define S_DMA_ATMREQSOPCNT 24
+#define M_DMA_ATMREQSOPCNT 0xffU
+#define V_DMA_ATMREQSOPCNT(x) ((x) << S_DMA_ATMREQSOPCNT)
+#define G_DMA_ATMREQSOPCNT(x) (((x) >> S_DMA_ATMREQSOPCNT) & M_DMA_ATMREQSOPCNT)
+
+#define S_DMA_ATMEOPMATCHSOP 17
+#define V_DMA_ATMEOPMATCHSOP(x) ((x) << S_DMA_ATMEOPMATCHSOP)
+#define F_DMA_ATMEOPMATCHSOP V_DMA_ATMEOPMATCHSOP(1U)
+
+#define S_DMA_RSPEOPMATCHSOP 16
+#define V_DMA_RSPEOPMATCHSOP(x) ((x) << S_DMA_RSPEOPMATCHSOP)
+#define F_DMA_RSPEOPMATCHSOP V_DMA_RSPEOPMATCHSOP(1U)
+
+#define S_DMA_RSPERRCNT 8
+#define M_DMA_RSPERRCNT 0xffU
+#define V_DMA_RSPERRCNT(x) ((x) << S_DMA_RSPERRCNT)
+#define G_DMA_RSPERRCNT(x) (((x) >> S_DMA_RSPERRCNT) & M_DMA_RSPERRCNT)
+
+#define S_DMA_RSPSOPCNT 0
+#define M_DMA_RSPSOPCNT 0xffU
+#define V_DMA_RSPSOPCNT(x) ((x) << S_DMA_RSPSOPCNT)
+#define G_DMA_RSPSOPCNT(x) (((x) >> S_DMA_RSPSOPCNT) & M_DMA_RSPSOPCNT)
+
#define A_PCIE_CORE_OUTBOUND_POSTED_HEADER_BUFFER_ALLOCATION 0x5960
#define S_OP0H 24
@@ -6507,11 +8184,6 @@
#define V_T6_USECMDPOOL(x) ((x) << S_T6_USECMDPOOL)
#define F_T6_USECMDPOOL V_T6_USECMDPOOL(1U)
-#define S_T6_MINTAG 0
-#define M_T6_MINTAG 0xffU
-#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
-#define G_T6_MINTAG(x) (((x) >> S_T6_MINTAG) & M_T6_MINTAG)
-
#define A_PCIE_T5_CMD_STAT 0x5984
#define S_T5_STAT_RSPCNT 20
@@ -6558,6 +8230,21 @@
#define A_PCIE_T5_CMD_STAT2 0x5988
#define A_PCIE_T5_CMD_STAT3 0x598c
+
+#define S_CMD_RSPEOPMATCHSOP 16
+#define V_CMD_RSPEOPMATCHSOP(x) ((x) << S_CMD_RSPEOPMATCHSOP)
+#define F_CMD_RSPEOPMATCHSOP V_CMD_RSPEOPMATCHSOP(1U)
+
+#define S_CMD_RSPERRCNT 8
+#define M_CMD_RSPERRCNT 0xffU
+#define V_CMD_RSPERRCNT(x) ((x) << S_CMD_RSPERRCNT)
+#define G_CMD_RSPERRCNT(x) (((x) >> S_CMD_RSPERRCNT) & M_CMD_RSPERRCNT)
+
+#define S_CMD_RSPSOPCNT 0
+#define M_CMD_RSPSOPCNT 0xffU
+#define V_CMD_RSPSOPCNT(x) ((x) << S_CMD_RSPSOPCNT)
+#define G_CMD_RSPSOPCNT(x) (((x) >> S_CMD_RSPSOPCNT) & M_CMD_RSPSOPCNT)
+
#define A_PCIE_CORE_PCI_EXPRESS_TAGS_ALLOCATION 0x5990
#define S_OC0T 24
@@ -6868,14 +8555,14 @@
#define V_T6_T5_HMA_MAXRSPCNT(x) ((x) << S_T6_T5_HMA_MAXRSPCNT)
#define G_T6_T5_HMA_MAXRSPCNT(x) (((x) >> S_T6_T5_HMA_MAXRSPCNT) & M_T6_T5_HMA_MAXRSPCNT)
-#define S_T6_SEQCHKDIS 8
-#define V_T6_SEQCHKDIS(x) ((x) << S_T6_SEQCHKDIS)
-#define F_T6_SEQCHKDIS V_T6_SEQCHKDIS(1U)
+#define S_T5_HMA_SEQCHKDIS 8
+#define V_T5_HMA_SEQCHKDIS(x) ((x) << S_T5_HMA_SEQCHKDIS)
+#define F_T5_HMA_SEQCHKDIS V_T5_HMA_SEQCHKDIS(1U)
-#define S_T6_MINTAG 0
-#define M_T6_MINTAG 0xffU
-#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG)
-#define G_T6_MINTAG(x) (((x) >> S_T6_MINTAG) & M_T6_MINTAG)
+#define S_T5_MINTAG 0
+#define M_T5_MINTAG 0xffU
+#define V_T5_MINTAG(x) ((x) << S_T5_MINTAG)
+#define G_T5_MINTAG(x) (((x) >> S_T5_MINTAG) & M_T5_MINTAG)
#define A_PCIE_CORE_ROOT_COMPLEX_ERROR_SEVERITY 0x59b4
@@ -6992,6 +8679,31 @@
#define F_CRSI V_CRSI(1U)
#define A_PCIE_T5_HMA_STAT2 0x59b8
+
+#define S_HMA_COOKIECNT 24
+#define M_HMA_COOKIECNT 0xfU
+#define V_HMA_COOKIECNT(x) ((x) << S_HMA_COOKIECNT)
+#define G_HMA_COOKIECNT(x) (((x) >> S_HMA_COOKIECNT) & M_HMA_COOKIECNT)
+
+#define S_HMA_RDSEQNUMUPDCNT 20
+#define M_HMA_RDSEQNUMUPDCNT 0xfU
+#define V_HMA_RDSEQNUMUPDCNT(x) ((x) << S_HMA_RDSEQNUMUPDCNT)
+#define G_HMA_RDSEQNUMUPDCNT(x) (((x) >> S_HMA_RDSEQNUMUPDCNT) & M_HMA_RDSEQNUMUPDCNT)
+
+#define S_HMA_WREOPMATCHSOP 12
+#define V_HMA_WREOPMATCHSOP(x) ((x) << S_HMA_WREOPMATCHSOP)
+#define F_HMA_WREOPMATCHSOP V_HMA_WREOPMATCHSOP(1U)
+
+#define S_HMA_WRSOPCNT 8
+#define M_HMA_WRSOPCNT 0xfU
+#define V_HMA_WRSOPCNT(x) ((x) << S_HMA_WRSOPCNT)
+#define G_HMA_WRSOPCNT(x) (((x) >> S_HMA_WRSOPCNT) & M_HMA_WRSOPCNT)
+
+#define S_HMA_RDSOPCNT 0
+#define M_HMA_RDSOPCNT 0xffU
+#define V_HMA_RDSOPCNT(x) ((x) << S_HMA_RDSOPCNT)
+#define G_HMA_RDSOPCNT(x) (((x) >> S_HMA_RDSOPCNT) & M_HMA_RDSOPCNT)
+
#define A_PCIE_CORE_ENDPOINT_STATUS 0x59bc
#define S_PTOM 31
@@ -7035,6 +8747,21 @@
#define F_PMC7 V_PMC7(1U)
#define A_PCIE_T5_HMA_STAT3 0x59bc
+
+#define S_HMA_RSPEOPMATCHSOP 16
+#define V_HMA_RSPEOPMATCHSOP(x) ((x) << S_HMA_RSPEOPMATCHSOP)
+#define F_HMA_RSPEOPMATCHSOP V_HMA_RSPEOPMATCHSOP(1U)
+
+#define S_HMA_RSPERRCNT 8
+#define M_HMA_RSPERRCNT 0xffU
+#define V_HMA_RSPERRCNT(x) ((x) << S_HMA_RSPERRCNT)
+#define G_HMA_RSPERRCNT(x) (((x) >> S_HMA_RSPERRCNT) & M_HMA_RSPERRCNT)
+
+#define S_HMA_RSPSOPCNT 0
+#define M_HMA_RSPSOPCNT 0xffU
+#define V_HMA_RSPSOPCNT(x) ((x) << S_HMA_RSPSOPCNT)
+#define G_HMA_RSPSOPCNT(x) (((x) >> S_HMA_RSPSOPCNT) & M_HMA_RSPSOPCNT)
+
#define A_PCIE_CORE_ENDPOINT_ERROR_SEVERITY 0x59c0
#define S_PTOS 31
@@ -7187,6 +8914,14 @@
#define V_STI_SLEEPREQ(x) ((x) << S_STI_SLEEPREQ)
#define F_STI_SLEEPREQ V_STI_SLEEPREQ(1U)
+#define S_ARM_STATIC_CGEN 28
+#define V_ARM_STATIC_CGEN(x) ((x) << S_ARM_STATIC_CGEN)
+#define F_ARM_STATIC_CGEN V_ARM_STATIC_CGEN(1U)
+
+#define S_ARM_DYNAMIC_CGEN 27
+#define V_ARM_DYNAMIC_CGEN(x) ((x) << S_ARM_DYNAMIC_CGEN)
+#define F_ARM_DYNAMIC_CGEN V_ARM_DYNAMIC_CGEN(1U)
+
#define A_PCIE_CORE_ENDPOINT_INTERRUPT_ENABLE 0x59c4
#define S_PTOI 31
@@ -7521,6 +9256,14 @@
#define V_PIOCPL_VDMTXDATAPERR(x) ((x) << S_PIOCPL_VDMTXDATAPERR)
#define F_PIOCPL_VDMTXDATAPERR V_PIOCPL_VDMTXDATAPERR(1U)
+#define S_TGT1_MEM_PERR 28
+#define V_TGT1_MEM_PERR(x) ((x) << S_TGT1_MEM_PERR)
+#define F_TGT1_MEM_PERR V_TGT1_MEM_PERR(1U)
+
+#define S_TGT2_MEM_PERR 27
+#define V_TGT2_MEM_PERR(x) ((x) << S_TGT2_MEM_PERR)
+#define F_TGT2_MEM_PERR V_TGT2_MEM_PERR(1U)
+
#define A_PCIE_CORE_GENERAL_PURPOSE_CONTROL_2 0x59d4
#define A_PCIE_RSP_ERR_INT_LOG_EN 0x59d4
@@ -7622,6 +9365,16 @@
#define V_T6_REQVFID(x) ((x) << S_T6_REQVFID)
#define G_T6_REQVFID(x) (((x) >> S_T6_REQVFID) & M_T6_REQVFID)
+#define S_LOGADDR10B 9
+#define M_LOGADDR10B 0x3ffU
+#define V_LOGADDR10B(x) ((x) << S_LOGADDR10B)
+#define G_LOGADDR10B(x) (((x) >> S_LOGADDR10B) & M_LOGADDR10B)
+
+#define S_LOGREQVFID 0
+#define M_LOGREQVFID 0x1ffU
+#define V_LOGREQVFID(x) ((x) << S_LOGREQVFID)
+#define G_LOGREQVFID(x) (((x) >> S_LOGREQVFID) & M_LOGREQVFID)
+
#define A_PCIE_CHANGESET 0x59fc
#define A_PCIE_REVISION 0x5a00
#define A_PCIE_PDEBUG_INDEX 0x5a04
@@ -7646,6 +9399,16 @@
#define V_T6_PDEBUGSELL(x) ((x) << S_T6_PDEBUGSELL)
#define G_T6_PDEBUGSELL(x) (((x) >> S_T6_PDEBUGSELL) & M_T6_PDEBUGSELL)
+#define S_T7_1_PDEBUGSELH 16
+#define M_T7_1_PDEBUGSELH 0xffU
+#define V_T7_1_PDEBUGSELH(x) ((x) << S_T7_1_PDEBUGSELH)
+#define G_T7_1_PDEBUGSELH(x) (((x) >> S_T7_1_PDEBUGSELH) & M_T7_1_PDEBUGSELH)
+
+#define S_T7_1_PDEBUGSELL 0
+#define M_T7_1_PDEBUGSELL 0xffU
+#define V_T7_1_PDEBUGSELL(x) ((x) << S_T7_1_PDEBUGSELL)
+#define G_T7_1_PDEBUGSELL(x) (((x) >> S_T7_1_PDEBUGSELL) & M_T7_1_PDEBUGSELL)
+
#define A_PCIE_PDEBUG_DATA_HIGH 0x5a08
#define A_PCIE_PDEBUG_DATA_LOW 0x5a0c
#define A_PCIE_CDEBUG_INDEX 0x5a10
@@ -8468,6 +10231,21 @@
#define A_PCIE_PHY_INDIR_DATA 0x5bf4
#define A_PCIE_STATIC_SPARE1 0x5bf8
#define A_PCIE_STATIC_SPARE2 0x5bfc
+
+#define S_X8_SW_EN 30
+#define V_X8_SW_EN(x) ((x) << S_X8_SW_EN)
+#define F_X8_SW_EN V_X8_SW_EN(1U)
+
+#define S_SWITCHCFG 28
+#define M_SWITCHCFG 0x3U
+#define V_SWITCHCFG(x) ((x) << S_SWITCHCFG)
+#define G_SWITCHCFG(x) (((x) >> S_SWITCHCFG) & M_SWITCHCFG)
+
+#define S_STATIC_SPARE2 0
+#define M_STATIC_SPARE2 0xfffffffU
+#define V_STATIC_SPARE2(x) ((x) << S_STATIC_SPARE2)
+#define G_STATIC_SPARE2(x) (((x) >> S_STATIC_SPARE2) & M_STATIC_SPARE2)
+
#define A_PCIE_KDOORBELL_GTS_PF_BASE_LEN 0x5c10
#define S_KDB_PF_LEN 24
@@ -8872,9 +10650,13 @@
#define A_PCIE_FLR_VF6_STATUS 0x5e78
#define A_PCIE_FLR_VF7_STATUS 0x5e7c
#define A_T6_PCIE_BUS_MST_STAT_4 0x5e80
+#define A_T7_PCIE_BUS_MST_STAT_4 0x5e80
#define A_T6_PCIE_BUS_MST_STAT_5 0x5e84
+#define A_T7_PCIE_BUS_MST_STAT_5 0x5e84
#define A_T6_PCIE_BUS_MST_STAT_6 0x5e88
+#define A_T7_PCIE_BUS_MST_STAT_6 0x5e88
#define A_T6_PCIE_BUS_MST_STAT_7 0x5e8c
+#define A_T7_PCIE_BUS_MST_STAT_7 0x5e8c
#define A_PCIE_BUS_MST_STAT_8 0x5e90
#define S_BUSMST_263_256 0
@@ -8895,9 +10677,13 @@
#define G_DATAFREECNT(x) (((x) >> S_DATAFREECNT) & M_DATAFREECNT)
#define A_T6_PCIE_RSP_ERR_STAT_4 0x5ea0
+#define A_T7_PCIE_RSP_ERR_STAT_4 0x5ea0
#define A_T6_PCIE_RSP_ERR_STAT_5 0x5ea4
+#define A_T7_PCIE_RSP_ERR_STAT_5 0x5ea4
#define A_T6_PCIE_RSP_ERR_STAT_6 0x5ea8
+#define A_T7_PCIE_RSP_ERR_STAT_6 0x5ea8
#define A_T6_PCIE_RSP_ERR_STAT_7 0x5eac
+#define A_T7_PCIE_RSP_ERR_STAT_7 0x5eac
#define A_PCIE_RSP_ERR_STAT_8 0x5eb0
#define S_RSPERR_263_256 0
@@ -9025,6 +10811,1028 @@
#define A_PCIE_DEBUG_ADDR_RANGE1 0x5ee0
#define A_PCIE_DEBUG_ADDR_RANGE2 0x5ef0
#define A_PCIE_DEBUG_ADDR_RANGE_CNT 0x5f00
+#define A_PCIE_PHY_PGM_LOAD_CTRL 0x5f04
+
+#define S_HSS_PMLD_ACC_EN 31
+#define V_HSS_PMLD_ACC_EN(x) ((x) << S_HSS_PMLD_ACC_EN)
+#define F_HSS_PMLD_ACC_EN V_HSS_PMLD_ACC_EN(1U)
+
+#define S_HSS_PMRDWR_ADDR 0
+#define M_HSS_PMRDWR_ADDR 0x3ffffU
+#define V_HSS_PMRDWR_ADDR(x) ((x) << S_HSS_PMRDWR_ADDR)
+#define G_HSS_PMRDWR_ADDR(x) (((x) >> S_HSS_PMRDWR_ADDR) & M_HSS_PMRDWR_ADDR)
+
+#define A_PCIE_PHY_PGM_LOAD_DATA 0x5f08
+#define A_PCIE_HSS_CFG 0x5f0c
+
+#define S_HSS_PCS_AGGREGATION_MODE 30
+#define M_HSS_PCS_AGGREGATION_MODE 0x3U
+#define V_HSS_PCS_AGGREGATION_MODE(x) ((x) << S_HSS_PCS_AGGREGATION_MODE)
+#define G_HSS_PCS_AGGREGATION_MODE(x) (((x) >> S_HSS_PCS_AGGREGATION_MODE) & M_HSS_PCS_AGGREGATION_MODE)
+
+#define S_HSS_PCS_FURCATE_MODE 28
+#define M_HSS_PCS_FURCATE_MODE 0x3U
+#define V_HSS_PCS_FURCATE_MODE(x) ((x) << S_HSS_PCS_FURCATE_MODE)
+#define G_HSS_PCS_FURCATE_MODE(x) (((x) >> S_HSS_PCS_FURCATE_MODE) & M_HSS_PCS_FURCATE_MODE)
+
+#define S_HSS_PCS_PCLK_ON_IN_P2 27
+#define V_HSS_PCS_PCLK_ON_IN_P2(x) ((x) << S_HSS_PCS_PCLK_ON_IN_P2)
+#define F_HSS_PCS_PCLK_ON_IN_P2 V_HSS_PCS_PCLK_ON_IN_P2(1U)
+
+#define S_HSS0_PHY_CTRL_REFCLK 17
+#define M_HSS0_PHY_CTRL_REFCLK 0x1fU
+#define V_HSS0_PHY_CTRL_REFCLK(x) ((x) << S_HSS0_PHY_CTRL_REFCLK)
+#define G_HSS0_PHY_CTRL_REFCLK(x) (((x) >> S_HSS0_PHY_CTRL_REFCLK) & M_HSS0_PHY_CTRL_REFCLK)
+
+#define S_HSS1_PHY_CTRL_REFCLK 12
+#define M_HSS1_PHY_CTRL_REFCLK 0x1fU
+#define V_HSS1_PHY_CTRL_REFCLK(x) ((x) << S_HSS1_PHY_CTRL_REFCLK)
+#define G_HSS1_PHY_CTRL_REFCLK(x) (((x) >> S_HSS1_PHY_CTRL_REFCLK) & M_HSS1_PHY_CTRL_REFCLK)
+
+#define S_HSS0_PHY_REXT_MASTER 11
+#define V_HSS0_PHY_REXT_MASTER(x) ((x) << S_HSS0_PHY_REXT_MASTER)
+#define F_HSS0_PHY_REXT_MASTER V_HSS0_PHY_REXT_MASTER(1U)
+
+#define S_HSS1_PHY_REXT_MASTER 10
+#define V_HSS1_PHY_REXT_MASTER(x) ((x) << S_HSS1_PHY_REXT_MASTER)
+#define F_HSS1_PHY_REXT_MASTER V_HSS1_PHY_REXT_MASTER(1U)
+
+#define S_HSS0_PHY_CTRL_VDDA_SEL 9
+#define V_HSS0_PHY_CTRL_VDDA_SEL(x) ((x) << S_HSS0_PHY_CTRL_VDDA_SEL)
+#define F_HSS0_PHY_CTRL_VDDA_SEL V_HSS0_PHY_CTRL_VDDA_SEL(1U)
+
+#define S_HSS0_PHY_CTRL_VDDHA_SEL 8
+#define V_HSS0_PHY_CTRL_VDDHA_SEL(x) ((x) << S_HSS0_PHY_CTRL_VDDHA_SEL)
+#define F_HSS0_PHY_CTRL_VDDHA_SEL V_HSS0_PHY_CTRL_VDDHA_SEL(1U)
+
+#define S_HSS1_PHY_CTRL_VDDA_SEL 7
+#define V_HSS1_PHY_CTRL_VDDA_SEL(x) ((x) << S_HSS1_PHY_CTRL_VDDA_SEL)
+#define F_HSS1_PHY_CTRL_VDDA_SEL V_HSS1_PHY_CTRL_VDDA_SEL(1U)
+
+#define S_HSS1_PHY_CTRL_VDDHA_SEL 6
+#define V_HSS1_PHY_CTRL_VDDHA_SEL(x) ((x) << S_HSS1_PHY_CTRL_VDDHA_SEL)
+#define F_HSS1_PHY_CTRL_VDDHA_SEL V_HSS1_PHY_CTRL_VDDHA_SEL(1U)
+
+#define S_HSS1_CPU_MEMPSACK 5
+#define V_HSS1_CPU_MEMPSACK(x) ((x) << S_HSS1_CPU_MEMPSACK)
+#define F_HSS1_CPU_MEMPSACK V_HSS1_CPU_MEMPSACK(1U)
+
+#define S_HSS0_CPU_MEMPSACK 3
+#define V_HSS0_CPU_MEMPSACK(x) ((x) << S_HSS0_CPU_MEMPSACK)
+#define F_HSS0_CPU_MEMPSACK V_HSS0_CPU_MEMPSACK(1U)
+
+#define S_HSS1_CPU_MEMACK 4
+#define V_HSS1_CPU_MEMACK(x) ((x) << S_HSS1_CPU_MEMACK)
+#define F_HSS1_CPU_MEMACK V_HSS1_CPU_MEMACK(1U)
+
+#define S_HSS0_CPU_MEMACK 2
+#define V_HSS0_CPU_MEMACK(x) ((x) << S_HSS0_CPU_MEMACK)
+#define F_HSS0_CPU_MEMACK V_HSS0_CPU_MEMACK(1U)
+
+#define S_HSS_PM_IS_ROM 1
+#define V_HSS_PM_IS_ROM(x) ((x) << S_HSS_PM_IS_ROM)
+#define F_HSS_PM_IS_ROM V_HSS_PM_IS_ROM(1U)
+
+#define A_PCIE_HSS_RST 0x5f10
+
+#define S_HSS_RST_CTRL_BY_FW 31
+#define V_HSS_RST_CTRL_BY_FW(x) ((x) << S_HSS_RST_CTRL_BY_FW)
+#define F_HSS_RST_CTRL_BY_FW V_HSS_RST_CTRL_BY_FW(1U)
+
+#define S_HSS_PIPE0_RESET_N 30
+#define V_HSS_PIPE0_RESET_N(x) ((x) << S_HSS_PIPE0_RESET_N)
+#define F_HSS_PIPE0_RESET_N V_HSS_PIPE0_RESET_N(1U)
+
+#define S_HSS0_POR_N 29
+#define V_HSS0_POR_N(x) ((x) << S_HSS0_POR_N)
+#define F_HSS0_POR_N V_HSS0_POR_N(1U)
+
+#define S_HSS1_POR_N 28
+#define V_HSS1_POR_N(x) ((x) << S_HSS1_POR_N)
+#define F_HSS1_POR_N V_HSS1_POR_N(1U)
+
+#define S_HSS0_CPU_RESET 27
+#define V_HSS0_CPU_RESET(x) ((x) << S_HSS0_CPU_RESET)
+#define F_HSS0_CPU_RESET V_HSS0_CPU_RESET(1U)
+
+#define S_HSS1_CPU_RESET 26
+#define V_HSS1_CPU_RESET(x) ((x) << S_HSS1_CPU_RESET)
+#define F_HSS1_CPU_RESET V_HSS1_CPU_RESET(1U)
+
+#define S_HSS_PCS_POR_N 25
+#define V_HSS_PCS_POR_N(x) ((x) << S_HSS_PCS_POR_N)
+#define F_HSS_PCS_POR_N V_HSS_PCS_POR_N(1U)
+
+#define S_SW_CRST_ 24
+#define V_SW_CRST_(x) ((x) << S_SW_CRST_)
+#define F_SW_CRST_ V_SW_CRST_(1U)
+
+#define S_SW_PCIECRST_ 23
+#define V_SW_PCIECRST_(x) ((x) << S_SW_PCIECRST_)
+#define F_SW_PCIECRST_ V_SW_PCIECRST_(1U)
+
+#define S_SW_PCIEPIPERST_ 22
+#define V_SW_PCIEPIPERST_(x) ((x) << S_SW_PCIEPIPERST_)
+#define F_SW_PCIEPIPERST_ V_SW_PCIEPIPERST_(1U)
+
+#define S_SW_PCIEPHYRST_ 21
+#define V_SW_PCIEPHYRST_(x) ((x) << S_SW_PCIEPHYRST_)
+#define F_SW_PCIEPHYRST_ V_SW_PCIEPHYRST_(1U)
+
+#define S_HSS1_ERR_O 3
+#define V_HSS1_ERR_O(x) ((x) << S_HSS1_ERR_O)
+#define F_HSS1_ERR_O V_HSS1_ERR_O(1U)
+
+#define S_HSS0_ERR_O 2
+#define V_HSS0_ERR_O(x) ((x) << S_HSS0_ERR_O)
+#define F_HSS0_ERR_O V_HSS0_ERR_O(1U)
+
+#define S_HSS1_PLL_LOCK 1
+#define V_HSS1_PLL_LOCK(x) ((x) << S_HSS1_PLL_LOCK)
+#define F_HSS1_PLL_LOCK V_HSS1_PLL_LOCK(1U)
+
+#define S_HSS0_PLL_LOCK 0
+#define V_HSS0_PLL_LOCK(x) ((x) << S_HSS0_PLL_LOCK)
+#define F_HSS0_PLL_LOCK V_HSS0_PLL_LOCK(1U)
+
+#define A_PCIE_T5_ARM_CFG 0x5f20
+
+#define S_T5_ARM_MAXREQCNT 20
+#define M_T5_ARM_MAXREQCNT 0x7fU
+#define V_T5_ARM_MAXREQCNT(x) ((x) << S_T5_ARM_MAXREQCNT)
+#define G_T5_ARM_MAXREQCNT(x) (((x) >> S_T5_ARM_MAXREQCNT) & M_T5_ARM_MAXREQCNT)
+
+#define S_T5_ARM_MAXRDREQSIZE 17
+#define M_T5_ARM_MAXRDREQSIZE 0x7U
+#define V_T5_ARM_MAXRDREQSIZE(x) ((x) << S_T5_ARM_MAXRDREQSIZE)
+#define G_T5_ARM_MAXRDREQSIZE(x) (((x) >> S_T5_ARM_MAXRDREQSIZE) & M_T5_ARM_MAXRDREQSIZE)
+
+#define S_T5_ARM_MAXRSPCNT 9
+#define M_T5_ARM_MAXRSPCNT 0xffU
+#define V_T5_ARM_MAXRSPCNT(x) ((x) << S_T5_ARM_MAXRSPCNT)
+#define G_T5_ARM_MAXRSPCNT(x) (((x) >> S_T5_ARM_MAXRSPCNT) & M_T5_ARM_MAXRSPCNT)
+
+#define A_PCIE_T5_ARM_STAT 0x5f24
+
+#define S_ARM_RESPCNT 20
+#define M_ARM_RESPCNT 0x1ffU
+#define V_ARM_RESPCNT(x) ((x) << S_ARM_RESPCNT)
+#define G_ARM_RESPCNT(x) (((x) >> S_ARM_RESPCNT) & M_ARM_RESPCNT)
+
+#define S_ARM_RDREQCNT 12
+#define M_ARM_RDREQCNT 0x3fU
+#define V_ARM_RDREQCNT(x) ((x) << S_ARM_RDREQCNT)
+#define G_ARM_RDREQCNT(x) (((x) >> S_ARM_RDREQCNT) & M_ARM_RDREQCNT)
+
+#define S_ARM_WRREQCNT 0
+#define M_ARM_WRREQCNT 0x1ffU
+#define V_ARM_WRREQCNT(x) ((x) << S_ARM_WRREQCNT)
+#define G_ARM_WRREQCNT(x) (((x) >> S_ARM_WRREQCNT) & M_ARM_WRREQCNT)
+
+#define A_PCIE_T5_ARM_STAT2 0x5f28
+
+#define S_ARM_COOKIECNT 24
+#define M_ARM_COOKIECNT 0xfU
+#define V_ARM_COOKIECNT(x) ((x) << S_ARM_COOKIECNT)
+#define G_ARM_COOKIECNT(x) (((x) >> S_ARM_COOKIECNT) & M_ARM_COOKIECNT)
+
+#define S_ARM_RDSEQNUMUPDCNT 20
+#define M_ARM_RDSEQNUMUPDCNT 0xfU
+#define V_ARM_RDSEQNUMUPDCNT(x) ((x) << S_ARM_RDSEQNUMUPDCNT)
+#define G_ARM_RDSEQNUMUPDCNT(x) (((x) >> S_ARM_RDSEQNUMUPDCNT) & M_ARM_RDSEQNUMUPDCNT)
+
+#define S_ARM_SIREQCNT 16
+#define M_ARM_SIREQCNT 0xfU
+#define V_ARM_SIREQCNT(x) ((x) << S_ARM_SIREQCNT)
+#define G_ARM_SIREQCNT(x) (((x) >> S_ARM_SIREQCNT) & M_ARM_SIREQCNT)
+
+#define S_ARM_WREOPMATCHSOP 12
+#define V_ARM_WREOPMATCHSOP(x) ((x) << S_ARM_WREOPMATCHSOP)
+#define F_ARM_WREOPMATCHSOP V_ARM_WREOPMATCHSOP(1U)
+
+#define S_ARM_WRSOPCNT 8
+#define M_ARM_WRSOPCNT 0xfU
+#define V_ARM_WRSOPCNT(x) ((x) << S_ARM_WRSOPCNT)
+#define G_ARM_WRSOPCNT(x) (((x) >> S_ARM_WRSOPCNT) & M_ARM_WRSOPCNT)
+
+#define S_ARM_RDSOPCNT 0
+#define M_ARM_RDSOPCNT 0xffU
+#define V_ARM_RDSOPCNT(x) ((x) << S_ARM_RDSOPCNT)
+#define G_ARM_RDSOPCNT(x) (((x) >> S_ARM_RDSOPCNT) & M_ARM_RDSOPCNT)
+
+#define A_PCIE_T5_ARM_STAT3 0x5f2c
+
+#define S_ARM_ATMREQSOPCNT 24
+#define M_ARM_ATMREQSOPCNT 0xffU
+#define V_ARM_ATMREQSOPCNT(x) ((x) << S_ARM_ATMREQSOPCNT)
+#define G_ARM_ATMREQSOPCNT(x) (((x) >> S_ARM_ATMREQSOPCNT) & M_ARM_ATMREQSOPCNT)
+
+#define S_ARM_ATMEOPMATCHSOP 17
+#define V_ARM_ATMEOPMATCHSOP(x) ((x) << S_ARM_ATMEOPMATCHSOP)
+#define F_ARM_ATMEOPMATCHSOP V_ARM_ATMEOPMATCHSOP(1U)
+
+#define S_ARM_RSPEOPMATCHSOP 16
+#define V_ARM_RSPEOPMATCHSOP(x) ((x) << S_ARM_RSPEOPMATCHSOP)
+#define F_ARM_RSPEOPMATCHSOP V_ARM_RSPEOPMATCHSOP(1U)
+
+#define S_ARM_RSPERRCNT 8
+#define M_ARM_RSPERRCNT 0xffU
+#define V_ARM_RSPERRCNT(x) ((x) << S_ARM_RSPERRCNT)
+#define G_ARM_RSPERRCNT(x) (((x) >> S_ARM_RSPERRCNT) & M_ARM_RSPERRCNT)
+
+#define S_ARM_RSPSOPCNT 0
+#define M_ARM_RSPSOPCNT 0xffU
+#define V_ARM_RSPSOPCNT(x) ((x) << S_ARM_RSPSOPCNT)
+#define G_ARM_RSPSOPCNT(x) (((x) >> S_ARM_RSPSOPCNT) & M_ARM_RSPSOPCNT)
+
+#define A_PCIE_ARM_REQUESTER_ID 0x5f30
+
+#define S_A0_RSVD1 24
+#define M_A0_RSVD1 0xffU
+#define V_A0_RSVD1(x) ((x) << S_A0_RSVD1)
+#define G_A0_RSVD1(x) (((x) >> S_A0_RSVD1) & M_A0_RSVD1)
+
+#define S_A0_PRIMBUSNUMBER 16
+#define M_A0_PRIMBUSNUMBER 0xffU
+#define V_A0_PRIMBUSNUMBER(x) ((x) << S_A0_PRIMBUSNUMBER)
+#define G_A0_PRIMBUSNUMBER(x) (((x) >> S_A0_PRIMBUSNUMBER) & M_A0_PRIMBUSNUMBER)
+
+#define S_A0_REQUESTERID 0
+#define M_A0_REQUESTERID 0xffffU
+#define V_A0_REQUESTERID(x) ((x) << S_A0_REQUESTERID)
+#define G_A0_REQUESTERID(x) (((x) >> S_A0_REQUESTERID) & M_A0_REQUESTERID)
+
+#define A_PCIE_SWITCH_CFG_SPACE_REQ0 0x5f34
+
+#define S_REQ0ENABLE 31
+#define V_REQ0ENABLE(x) ((x) << S_REQ0ENABLE)
+#define F_REQ0ENABLE V_REQ0ENABLE(1U)
+
+#define S_RDREQ0TYPE 19
+#define V_RDREQ0TYPE(x) ((x) << S_RDREQ0TYPE)
+#define F_RDREQ0TYPE V_RDREQ0TYPE(1U)
+
+#define S_BYTEENABLE0 15
+#define M_BYTEENABLE0 0xfU
+#define V_BYTEENABLE0(x) ((x) << S_BYTEENABLE0)
+#define G_BYTEENABLE0(x) (((x) >> S_BYTEENABLE0) & M_BYTEENABLE0)
+
+#define S_REGADDR0 0
+#define M_REGADDR0 0x7fffU
+#define V_REGADDR0(x) ((x) << S_REGADDR0)
+#define G_REGADDR0(x) (((x) >> S_REGADDR0) & M_REGADDR0)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA0 0x5f38
+#define A_PCIE_SWITCH_CFG_SPACE_REQ1 0x5f3c
+
+#define S_REQ1ENABLE 31
+#define V_REQ1ENABLE(x) ((x) << S_REQ1ENABLE)
+#define F_REQ1ENABLE V_REQ1ENABLE(1U)
+
+#define S_RDREQ1TYPE 26
+#define M_RDREQ1TYPE 0xfU
+#define V_RDREQ1TYPE(x) ((x) << S_RDREQ1TYPE)
+#define G_RDREQ1TYPE(x) (((x) >> S_RDREQ1TYPE) & M_RDREQ1TYPE)
+
+#define S_BYTEENABLE1 15
+#define M_BYTEENABLE1 0x7ffU
+#define V_BYTEENABLE1(x) ((x) << S_BYTEENABLE1)
+#define G_BYTEENABLE1(x) (((x) >> S_BYTEENABLE1) & M_BYTEENABLE1)
+
+#define S_REGADDR1 0
+#define M_REGADDR1 0x7fffU
+#define V_REGADDR1(x) ((x) << S_REGADDR1)
+#define G_REGADDR1(x) (((x) >> S_REGADDR1) & M_REGADDR1)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA1 0x5f40
+#define A_PCIE_SWITCH_CFG_SPACE_REQ2 0x5f44
+
+#define S_REQ2ENABLE 31
+#define V_REQ2ENABLE(x) ((x) << S_REQ2ENABLE)
+#define F_REQ2ENABLE V_REQ2ENABLE(1U)
+
+#define S_RDREQ2TYPE 26
+#define M_RDREQ2TYPE 0xfU
+#define V_RDREQ2TYPE(x) ((x) << S_RDREQ2TYPE)
+#define G_RDREQ2TYPE(x) (((x) >> S_RDREQ2TYPE) & M_RDREQ2TYPE)
+
+#define S_BYTEENABLE2 15
+#define M_BYTEENABLE2 0x7ffU
+#define V_BYTEENABLE2(x) ((x) << S_BYTEENABLE2)
+#define G_BYTEENABLE2(x) (((x) >> S_BYTEENABLE2) & M_BYTEENABLE2)
+
+#define S_REGADDR2 0
+#define M_REGADDR2 0x7fffU
+#define V_REGADDR2(x) ((x) << S_REGADDR2)
+#define G_REGADDR2(x) (((x) >> S_REGADDR2) & M_REGADDR2)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA2 0x5f48
+#define A_PCIE_SWITCH_CFG_SPACE_REQ3 0x5f4c
+
+#define S_REQ3ENABLE 31
+#define V_REQ3ENABLE(x) ((x) << S_REQ3ENABLE)
+#define F_REQ3ENABLE V_REQ3ENABLE(1U)
+
+#define S_RDREQ3TYPE 26
+#define M_RDREQ3TYPE 0xfU
+#define V_RDREQ3TYPE(x) ((x) << S_RDREQ3TYPE)
+#define G_RDREQ3TYPE(x) (((x) >> S_RDREQ3TYPE) & M_RDREQ3TYPE)
+
+#define S_BYTEENABLE3 15
+#define M_BYTEENABLE3 0x7ffU
+#define V_BYTEENABLE3(x) ((x) << S_BYTEENABLE3)
+#define G_BYTEENABLE3(x) (((x) >> S_BYTEENABLE3) & M_BYTEENABLE3)
+
+#define S_REGADDR3 0
+#define M_REGADDR3 0x7fffU
+#define V_REGADDR3(x) ((x) << S_REGADDR3)
+#define G_REGADDR3(x) (((x) >> S_REGADDR3) & M_REGADDR3)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA3 0x5f50
+#define A_PCIE_SWITCH_CFG_SPACE_REQ4 0x5f54
+
+#define S_REQ4ENABLE 31
+#define V_REQ4ENABLE(x) ((x) << S_REQ4ENABLE)
+#define F_REQ4ENABLE V_REQ4ENABLE(1U)
+
+#define S_RDREQ4TYPE 26
+#define M_RDREQ4TYPE 0xfU
+#define V_RDREQ4TYPE(x) ((x) << S_RDREQ4TYPE)
+#define G_RDREQ4TYPE(x) (((x) >> S_RDREQ4TYPE) & M_RDREQ4TYPE)
+
+#define S_BYTEENABLE4 15
+#define M_BYTEENABLE4 0x7ffU
+#define V_BYTEENABLE4(x) ((x) << S_BYTEENABLE4)
+#define G_BYTEENABLE4(x) (((x) >> S_BYTEENABLE4) & M_BYTEENABLE4)
+
+#define S_REGADDR4 0
+#define M_REGADDR4 0x7fffU
+#define V_REGADDR4(x) ((x) << S_REGADDR4)
+#define G_REGADDR4(x) (((x) >> S_REGADDR4) & M_REGADDR4)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA4 0x5f58
+#define A_PCIE_SWITCH_CFG_SPACE_REQ5 0x5f5c
+
+#define S_REQ5ENABLE 31
+#define V_REQ5ENABLE(x) ((x) << S_REQ5ENABLE)
+#define F_REQ5ENABLE V_REQ5ENABLE(1U)
+
+#define S_RDREQ5TYPE 26
+#define M_RDREQ5TYPE 0xfU
+#define V_RDREQ5TYPE(x) ((x) << S_RDREQ5TYPE)
+#define G_RDREQ5TYPE(x) (((x) >> S_RDREQ5TYPE) & M_RDREQ5TYPE)
+
+#define S_BYTEENABLE5 15
+#define M_BYTEENABLE5 0x7ffU
+#define V_BYTEENABLE5(x) ((x) << S_BYTEENABLE5)
+#define G_BYTEENABLE5(x) (((x) >> S_BYTEENABLE5) & M_BYTEENABLE5)
+
+#define S_REGADDR5 0
+#define M_REGADDR5 0x7fffU
+#define V_REGADDR5(x) ((x) << S_REGADDR5)
+#define G_REGADDR5(x) (((x) >> S_REGADDR5) & M_REGADDR5)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA5 0x5f60
+#define A_PCIE_SWITCH_CFG_SPACE_REQ6 0x5f64
+
+#define S_REQ6ENABLE 31
+#define V_REQ6ENABLE(x) ((x) << S_REQ6ENABLE)
+#define F_REQ6ENABLE V_REQ6ENABLE(1U)
+
+#define S_RDREQ6TYPE 26
+#define M_RDREQ6TYPE 0xfU
+#define V_RDREQ6TYPE(x) ((x) << S_RDREQ6TYPE)
+#define G_RDREQ6TYPE(x) (((x) >> S_RDREQ6TYPE) & M_RDREQ6TYPE)
+
+#define S_BYTEENABLE6 15
+#define M_BYTEENABLE6 0x7ffU
+#define V_BYTEENABLE6(x) ((x) << S_BYTEENABLE6)
+#define G_BYTEENABLE6(x) (((x) >> S_BYTEENABLE6) & M_BYTEENABLE6)
+
+#define S_REGADDR6 0
+#define M_REGADDR6 0x7fffU
+#define V_REGADDR6(x) ((x) << S_REGADDR6)
+#define G_REGADDR6(x) (((x) >> S_REGADDR6) & M_REGADDR6)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA6 0x5f68
+#define A_PCIE_SWITCH_CFG_SPACE_REQ7 0x5f6c
+
+#define S_REQ7ENABLE 31
+#define V_REQ7ENABLE(x) ((x) << S_REQ7ENABLE)
+#define F_REQ7ENABLE V_REQ7ENABLE(1U)
+
+#define S_RDREQ7TYPE 26
+#define M_RDREQ7TYPE 0xfU
+#define V_RDREQ7TYPE(x) ((x) << S_RDREQ7TYPE)
+#define G_RDREQ7TYPE(x) (((x) >> S_RDREQ7TYPE) & M_RDREQ7TYPE)
+
+#define S_BYTEENABLE7 15
+#define M_BYTEENABLE7 0x7ffU
+#define V_BYTEENABLE7(x) ((x) << S_BYTEENABLE7)
+#define G_BYTEENABLE7(x) (((x) >> S_BYTEENABLE7) & M_BYTEENABLE7)
+
+#define S_REGADDR7 0
+#define M_REGADDR7 0x7fffU
+#define V_REGADDR7(x) ((x) << S_REGADDR7)
+#define G_REGADDR7(x) (((x) >> S_REGADDR7) & M_REGADDR7)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA7 0x5f70
+#define A_PCIE_SWITCH_CFG_SPACE_REQ8 0x5f74
+
+#define S_REQ8ENABLE 31
+#define V_REQ8ENABLE(x) ((x) << S_REQ8ENABLE)
+#define F_REQ8ENABLE V_REQ8ENABLE(1U)
+
+#define S_RDREQ8TYPE 26
+#define M_RDREQ8TYPE 0xfU
+#define V_RDREQ8TYPE(x) ((x) << S_RDREQ8TYPE)
+#define G_RDREQ8TYPE(x) (((x) >> S_RDREQ8TYPE) & M_RDREQ8TYPE)
+
+#define S_BYTEENABLE8 15
+#define M_BYTEENABLE8 0x7ffU
+#define V_BYTEENABLE8(x) ((x) << S_BYTEENABLE8)
+#define G_BYTEENABLE8(x) (((x) >> S_BYTEENABLE8) & M_BYTEENABLE8)
+
+#define S_REGADDR8 0
+#define M_REGADDR8 0x7fffU
+#define V_REGADDR8(x) ((x) << S_REGADDR8)
+#define G_REGADDR8(x) (((x) >> S_REGADDR8) & M_REGADDR8)
+
+#define A_PCIE_SWITCH_CFG_SPACE_DATA8 0x5f78
+#define A_PCIE_SNPS_G5_PHY_CR_REQ 0x5f7c
+
+#define S_REGSEL 31
+#define V_REGSEL(x) ((x) << S_REGSEL)
+#define F_REGSEL V_REGSEL(1U)
+
+#define S_RDENABLE 30
+#define V_RDENABLE(x) ((x) << S_RDENABLE)
+#define F_RDENABLE V_RDENABLE(1U)
+
+#define S_WRENABLE 29
+#define V_WRENABLE(x) ((x) << S_WRENABLE)
+#define F_WRENABLE V_WRENABLE(1U)
+
+#define S_AUTOINCRVAL 21
+#define M_AUTOINCRVAL 0x3U
+#define V_AUTOINCRVAL(x) ((x) << S_AUTOINCRVAL)
+#define G_AUTOINCRVAL(x) (((x) >> S_AUTOINCRVAL) & M_AUTOINCRVAL)
+
+#define S_AUTOINCR 20
+#define V_AUTOINCR(x) ((x) << S_AUTOINCR)
+#define F_AUTOINCR V_AUTOINCR(1U)
+
+#define S_PHYSEL 16
+#define M_PHYSEL 0xfU
+#define V_PHYSEL(x) ((x) << S_PHYSEL)
+#define G_PHYSEL(x) (((x) >> S_PHYSEL) & M_PHYSEL)
+
+#define S_T7_REGADDR 0
+#define M_T7_REGADDR 0xffffU
+#define V_T7_REGADDR(x) ((x) << S_T7_REGADDR)
+#define G_T7_REGADDR(x) (((x) >> S_T7_REGADDR) & M_T7_REGADDR)
+
+#define A_PCIE_SNPS_G5_PHY_CR_DATA 0x5f80
+#define A_PCIE_SNPS_G5_PHY_SRAM_CFG 0x5f84
+
+#define S_PHY3_SRAM_BOOTLOAD_BYPASS 27
+#define V_PHY3_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY3_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY3_SRAM_BOOTLOAD_BYPASS V_PHY3_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY3_SRAM_BYPASS 26
+#define V_PHY3_SRAM_BYPASS(x) ((x) << S_PHY3_SRAM_BYPASS)
+#define F_PHY3_SRAM_BYPASS V_PHY3_SRAM_BYPASS(1U)
+
+#define S_PHY3_SRAM_ECC_EN 25
+#define V_PHY3_SRAM_ECC_EN(x) ((x) << S_PHY3_SRAM_ECC_EN)
+#define F_PHY3_SRAM_ECC_EN V_PHY3_SRAM_ECC_EN(1U)
+
+#define S_PHY3_SRAM_EXT_LD_DONE 24
+#define V_PHY3_SRAM_EXT_LD_DONE(x) ((x) << S_PHY3_SRAM_EXT_LD_DONE)
+#define F_PHY3_SRAM_EXT_LD_DONE V_PHY3_SRAM_EXT_LD_DONE(1U)
+
+#define S_PHY2_SRAM_BOOTLOAD_BYPASS 19
+#define V_PHY2_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY2_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY2_SRAM_BOOTLOAD_BYPASS V_PHY2_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY2_SRAM_BYPASS 18
+#define V_PHY2_SRAM_BYPASS(x) ((x) << S_PHY2_SRAM_BYPASS)
+#define F_PHY2_SRAM_BYPASS V_PHY2_SRAM_BYPASS(1U)
+
+#define S_PHY2_SRAM_ECC_EN 17
+#define V_PHY2_SRAM_ECC_EN(x) ((x) << S_PHY2_SRAM_ECC_EN)
+#define F_PHY2_SRAM_ECC_EN V_PHY2_SRAM_ECC_EN(1U)
+
+#define S_PHY2_SRAM_EXT_LD_DONE 16
+#define V_PHY2_SRAM_EXT_LD_DONE(x) ((x) << S_PHY2_SRAM_EXT_LD_DONE)
+#define F_PHY2_SRAM_EXT_LD_DONE V_PHY2_SRAM_EXT_LD_DONE(1U)
+
+#define S_PHY1_SRAM_BOOTLOAD_BYPASS 11
+#define V_PHY1_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY1_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY1_SRAM_BOOTLOAD_BYPASS V_PHY1_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY1_SRAM_BYPASS 10
+#define V_PHY1_SRAM_BYPASS(x) ((x) << S_PHY1_SRAM_BYPASS)
+#define F_PHY1_SRAM_BYPASS V_PHY1_SRAM_BYPASS(1U)
+
+#define S_PHY1_SRAM_ECC_EN 9
+#define V_PHY1_SRAM_ECC_EN(x) ((x) << S_PHY1_SRAM_ECC_EN)
+#define F_PHY1_SRAM_ECC_EN V_PHY1_SRAM_ECC_EN(1U)
+
+#define S_PHY1_SRAM_EXT_LD_DONE 8
+#define V_PHY1_SRAM_EXT_LD_DONE(x) ((x) << S_PHY1_SRAM_EXT_LD_DONE)
+#define F_PHY1_SRAM_EXT_LD_DONE V_PHY1_SRAM_EXT_LD_DONE(1U)
+
+#define S_PHY_CR_PARA_SEL 4
+#define M_PHY_CR_PARA_SEL 0xfU
+#define V_PHY_CR_PARA_SEL(x) ((x) << S_PHY_CR_PARA_SEL)
+#define G_PHY_CR_PARA_SEL(x) (((x) >> S_PHY_CR_PARA_SEL) & M_PHY_CR_PARA_SEL)
+
+#define S_PHY0_SRAM_BOOTLOAD_BYPASS 3
+#define V_PHY0_SRAM_BOOTLOAD_BYPASS(x) ((x) << S_PHY0_SRAM_BOOTLOAD_BYPASS)
+#define F_PHY0_SRAM_BOOTLOAD_BYPASS V_PHY0_SRAM_BOOTLOAD_BYPASS(1U)
+
+#define S_PHY0_SRAM_BYPASS 2
+#define V_PHY0_SRAM_BYPASS(x) ((x) << S_PHY0_SRAM_BYPASS)
+#define F_PHY0_SRAM_BYPASS V_PHY0_SRAM_BYPASS(1U)
+
+#define S_PHY0_SRAM_ECC_EN 1
+#define V_PHY0_SRAM_ECC_EN(x) ((x) << S_PHY0_SRAM_ECC_EN)
+#define F_PHY0_SRAM_ECC_EN V_PHY0_SRAM_ECC_EN(1U)
+
+#define S_PHY0_SRAM_EXT_LD_DONE 0
+#define V_PHY0_SRAM_EXT_LD_DONE(x) ((x) << S_PHY0_SRAM_EXT_LD_DONE)
+#define F_PHY0_SRAM_EXT_LD_DONE V_PHY0_SRAM_EXT_LD_DONE(1U)
+
+#define A_PCIE_SNPS_G5_PHY_SRAM_STS 0x5f88
+
+#define S_PHY3_SRAM_INIT_DONE 3
+#define V_PHY3_SRAM_INIT_DONE(x) ((x) << S_PHY3_SRAM_INIT_DONE)
+#define F_PHY3_SRAM_INIT_DONE V_PHY3_SRAM_INIT_DONE(1U)
+
+#define S_PHY2_SRAM_INIT_DONE 2
+#define V_PHY2_SRAM_INIT_DONE(x) ((x) << S_PHY2_SRAM_INIT_DONE)
+#define F_PHY2_SRAM_INIT_DONE V_PHY2_SRAM_INIT_DONE(1U)
+
+#define S_PHY1_SRAM_INIT_DONE 1
+#define V_PHY1_SRAM_INIT_DONE(x) ((x) << S_PHY1_SRAM_INIT_DONE)
+#define F_PHY1_SRAM_INIT_DONE V_PHY1_SRAM_INIT_DONE(1U)
+
+#define S_PHY0_SRAM_INIT_DONE 0
+#define V_PHY0_SRAM_INIT_DONE(x) ((x) << S_PHY0_SRAM_INIT_DONE)
+#define F_PHY0_SRAM_INIT_DONE V_PHY0_SRAM_INIT_DONE(1U)
+
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_0_TO_3 0x5f90
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_0_DATA 0x5f94
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_1_DATA 0x5f98
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_2_DATA 0x5f9c
+#define A_PCIE_SNPS_G5_PHY_CTRL_PHY_3_DATA 0x5fa0
+#define A_PCIE_SNPS_G5_PHY_DEFAULTS 0x5fa4
+#define A_PCIE_SNPS_G5_PHY_0_VALUES 0x5fa8
+
+#define S_RX_TERM_OFFSET 28
+#define V_RX_TERM_OFFSET(x) ((x) << S_RX_TERM_OFFSET)
+#define F_RX_TERM_OFFSET V_RX_TERM_OFFSET(1U)
+
+#define S_REFB_RAW_CLK_DIV2_EN 27
+#define V_REFB_RAW_CLK_DIV2_EN(x) ((x) << S_REFB_RAW_CLK_DIV2_EN)
+#define F_REFB_RAW_CLK_DIV2_EN V_REFB_RAW_CLK_DIV2_EN(1U)
+
+#define S_REFB_RANGE 23
+#define M_REFB_RANGE 0xfU
+#define V_REFB_RANGE(x) ((x) << S_REFB_RANGE)
+#define G_REFB_RANGE(x) (((x) >> S_REFB_RANGE) & M_REFB_RANGE)
+
+#define S_REFB_LANE_CLK_EN 22
+#define V_REFB_LANE_CLK_EN(x) ((x) << S_REFB_LANE_CLK_EN)
+#define F_REFB_LANE_CLK_EN V_REFB_LANE_CLK_EN(1U)
+
+#define S_REFB_CLK_DIV2_EN 21
+#define V_REFB_CLK_DIV2_EN(x) ((x) << S_REFB_CLK_DIV2_EN)
+#define F_REFB_CLK_DIV2_EN V_REFB_CLK_DIV2_EN(1U)
+
+#define S_REFA_RAW_CLK_DIV2_EN 20
+#define V_REFA_RAW_CLK_DIV2_EN(x) ((x) << S_REFA_RAW_CLK_DIV2_EN)
+#define F_REFA_RAW_CLK_DIV2_EN V_REFA_RAW_CLK_DIV2_EN(1U)
+
+#define S_REFA_RANGE 16
+#define M_REFA_RANGE 0xfU
+#define V_REFA_RANGE(x) ((x) << S_REFA_RANGE)
+#define G_REFA_RANGE(x) (((x) >> S_REFA_RANGE) & M_REFA_RANGE)
+
+#define S_REFA_LANE_CLK_EN 15
+#define V_REFA_LANE_CLK_EN(x) ((x) << S_REFA_LANE_CLK_EN)
+#define F_REFA_LANE_CLK_EN V_REFA_LANE_CLK_EN(1U)
+
+#define S_REFA_CLK_DIV2_EN 14
+#define V_REFA_CLK_DIV2_EN(x) ((x) << S_REFA_CLK_DIV2_EN)
+#define F_REFA_CLK_DIV2_EN V_REFA_CLK_DIV2_EN(1U)
+
+#define S_NOMINAL_VPH_SEL 10
+#define M_NOMINAL_VPH_SEL 0x3U
+#define V_NOMINAL_VPH_SEL(x) ((x) << S_NOMINAL_VPH_SEL)
+#define G_NOMINAL_VPH_SEL(x) (((x) >> S_NOMINAL_VPH_SEL) & M_NOMINAL_VPH_SEL)
+
+#define S_NOMINAL_VP_SEL 8
+#define M_NOMINAL_VP_SEL 0x3U
+#define V_NOMINAL_VP_SEL(x) ((x) << S_NOMINAL_VP_SEL)
+#define G_NOMINAL_VP_SEL(x) (((x) >> S_NOMINAL_VP_SEL) & M_NOMINAL_VP_SEL)
+
+#define S_MPLLB_WORD_CLK_EN 7
+#define V_MPLLB_WORD_CLK_EN(x) ((x) << S_MPLLB_WORD_CLK_EN)
+#define F_MPLLB_WORD_CLK_EN V_MPLLB_WORD_CLK_EN(1U)
+
+#define S_MPLLB_SSC_EN 6
+#define V_MPLLB_SSC_EN(x) ((x) << S_MPLLB_SSC_EN)
+#define F_MPLLB_SSC_EN V_MPLLB_SSC_EN(1U)
+
+#define S_MPLLB_SHORT_LOCK_EN 5
+#define V_MPLLB_SHORT_LOCK_EN(x) ((x) << S_MPLLB_SHORT_LOCK_EN)
+#define F_MPLLB_SHORT_LOCK_EN V_MPLLB_SHORT_LOCK_EN(1U)
+
+#define S_MPLLB_FORCE_EN 4
+#define V_MPLLB_FORCE_EN(x) ((x) << S_MPLLB_FORCE_EN)
+#define F_MPLLB_FORCE_EN V_MPLLB_FORCE_EN(1U)
+
+#define S_MPLLA_WORD_CLK_EN 3
+#define V_MPLLA_WORD_CLK_EN(x) ((x) << S_MPLLA_WORD_CLK_EN)
+#define F_MPLLA_WORD_CLK_EN V_MPLLA_WORD_CLK_EN(1U)
+
+#define S_MPLLA_SSC_EN 2
+#define V_MPLLA_SSC_EN(x) ((x) << S_MPLLA_SSC_EN)
+#define F_MPLLA_SSC_EN V_MPLLA_SSC_EN(1U)
+
+#define S_MPLLA_SHORT_LOCK_EN 1
+#define V_MPLLA_SHORT_LOCK_EN(x) ((x) << S_MPLLA_SHORT_LOCK_EN)
+#define F_MPLLA_SHORT_LOCK_EN V_MPLLA_SHORT_LOCK_EN(1U)
+
+#define S_MPLLA_FORCE_EN 0
+#define V_MPLLA_FORCE_EN(x) ((x) << S_MPLLA_FORCE_EN)
+#define F_MPLLA_FORCE_EN V_MPLLA_FORCE_EN(1U)
+
+#define A_PCIE_SNPS_G5_PHY_1_VALUES 0x5fac
+
+#define S_REF_ALT1_CLK_M 13
+#define V_REF_ALT1_CLK_M(x) ((x) << S_REF_ALT1_CLK_M)
+#define F_REF_ALT1_CLK_M V_REF_ALT1_CLK_M(1U)
+
+#define S_REF_ALT1_CLK_P 12
+#define V_REF_ALT1_CLK_P(x) ((x) << S_REF_ALT1_CLK_P)
+#define F_REF_ALT1_CLK_P V_REF_ALT1_CLK_P(1U)
+
+#define A_PCIE_SNPS_G5_PHY_2_VALUES 0x5fb0
+#define A_PCIE_SNPS_G5_PHY_3_VALUES 0x5fb4
+#define A_PCIE_SNPS_G5_PHY_0_RX_LANEPLL_BYPASS_MODE 0x5fb8
+
+#define S_T7_LANE3 15
+#define M_T7_LANE3 0x1fU
+#define V_T7_LANE3(x) ((x) << S_T7_LANE3)
+#define G_T7_LANE3(x) (((x) >> S_T7_LANE3) & M_T7_LANE3)
+
+#define S_T7_LANE2 10
+#define M_T7_LANE2 0x1fU
+#define V_T7_LANE2(x) ((x) << S_T7_LANE2)
+#define G_T7_LANE2(x) (((x) >> S_T7_LANE2) & M_T7_LANE2)
+
+#define S_T7_LANE1 5
+#define M_T7_LANE1 0x1fU
+#define V_T7_LANE1(x) ((x) << S_T7_LANE1)
+#define G_T7_LANE1(x) (((x) >> S_T7_LANE1) & M_T7_LANE1)
+
+#define S_T7_LANE0 0
+#define M_T7_LANE0 0x1fU
+#define V_T7_LANE0(x) ((x) << S_T7_LANE0)
+#define G_T7_LANE0(x) (((x) >> S_T7_LANE0) & M_T7_LANE0)
+
+#define A_PCIE_SNPS_G5_PHY_1_RX_LANEPLL_BYPASS_MODE 0x5fbc
+#define A_PCIE_SNPS_G5_PHY_2_RX_LANEPLL_BYPASS_MODE 0x5fc0
+#define A_PCIE_SNPS_G5_PHY_3_RX_LANEPLL_BYPASS_MODE 0x5fc4
+#define A_PCIE_SNPS_G5_PHY_0_1_RX_LANEPLL_SRC_SEL 0x5fc8
+
+#define S_LANE7_LANEPLL_SRC_SEL 28
+#define M_LANE7_LANEPLL_SRC_SEL 0xfU
+#define V_LANE7_LANEPLL_SRC_SEL(x) ((x) << S_LANE7_LANEPLL_SRC_SEL)
+#define G_LANE7_LANEPLL_SRC_SEL(x) (((x) >> S_LANE7_LANEPLL_SRC_SEL) & M_LANE7_LANEPLL_SRC_SEL)
+
+#define S_LANE6_LANEPLL_SRC_SEL 24
+#define M_LANE6_LANEPLL_SRC_SEL 0xfU
+#define V_LANE6_LANEPLL_SRC_SEL(x) ((x) << S_LANE6_LANEPLL_SRC_SEL)
+#define G_LANE6_LANEPLL_SRC_SEL(x) (((x) >> S_LANE6_LANEPLL_SRC_SEL) & M_LANE6_LANEPLL_SRC_SEL)
+
+#define S_LANE5_LANEPLL_SRC_SEL 20
+#define M_LANE5_LANEPLL_SRC_SEL 0xfU
+#define V_LANE5_LANEPLL_SRC_SEL(x) ((x) << S_LANE5_LANEPLL_SRC_SEL)
+#define G_LANE5_LANEPLL_SRC_SEL(x) (((x) >> S_LANE5_LANEPLL_SRC_SEL) & M_LANE5_LANEPLL_SRC_SEL)
+
+#define S_LANE4_LANEPLL_SRC_SEL 16
+#define M_LANE4_LANEPLL_SRC_SEL 0xfU
+#define V_LANE4_LANEPLL_SRC_SEL(x) ((x) << S_LANE4_LANEPLL_SRC_SEL)
+#define G_LANE4_LANEPLL_SRC_SEL(x) (((x) >> S_LANE4_LANEPLL_SRC_SEL) & M_LANE4_LANEPLL_SRC_SEL)
+
+#define S_LANE3_LANEPLL_SRC_SEL 12
+#define M_LANE3_LANEPLL_SRC_SEL 0xfU
+#define V_LANE3_LANEPLL_SRC_SEL(x) ((x) << S_LANE3_LANEPLL_SRC_SEL)
+#define G_LANE3_LANEPLL_SRC_SEL(x) (((x) >> S_LANE3_LANEPLL_SRC_SEL) & M_LANE3_LANEPLL_SRC_SEL)
+
+#define S_LANE2_LANEPLL_SRC_SEL 8
+#define M_LANE2_LANEPLL_SRC_SEL 0xfU
+#define V_LANE2_LANEPLL_SRC_SEL(x) ((x) << S_LANE2_LANEPLL_SRC_SEL)
+#define G_LANE2_LANEPLL_SRC_SEL(x) (((x) >> S_LANE2_LANEPLL_SRC_SEL) & M_LANE2_LANEPLL_SRC_SEL)
+
+#define S_LANE1_LANEPLL_SRC_SEL 4
+#define M_LANE1_LANEPLL_SRC_SEL 0xfU
+#define V_LANE1_LANEPLL_SRC_SEL(x) ((x) << S_LANE1_LANEPLL_SRC_SEL)
+#define G_LANE1_LANEPLL_SRC_SEL(x) (((x) >> S_LANE1_LANEPLL_SRC_SEL) & M_LANE1_LANEPLL_SRC_SEL)
+
+#define S_LANE0_LANEPLL_SRC_SEL 0
+#define M_LANE0_LANEPLL_SRC_SEL 0xfU
+#define V_LANE0_LANEPLL_SRC_SEL(x) ((x) << S_LANE0_LANEPLL_SRC_SEL)
+#define G_LANE0_LANEPLL_SRC_SEL(x) (((x) >> S_LANE0_LANEPLL_SRC_SEL) & M_LANE0_LANEPLL_SRC_SEL)
+
+#define A_PCIE_SNPS_G5_PHY_2_3_RX_LANEPLL_SRC_SEL 0x5fcc
+#define A_PCIE_SNPS_G5_PHY_RX_DECERR 0x5fd0
+
+#define S_LANE15_REC_OVRD_8B10B_DECERR 30
+#define M_LANE15_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE15_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE15_REC_OVRD_8B10B_DECERR)
+#define G_LANE15_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE15_REC_OVRD_8B10B_DECERR) & M_LANE15_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE14_REC_OVRD_8B10B_DECERR 28
+#define M_LANE14_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE14_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE14_REC_OVRD_8B10B_DECERR)
+#define G_LANE14_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE14_REC_OVRD_8B10B_DECERR) & M_LANE14_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE13_REC_OVRD_8B10B_DECERR 26
+#define M_LANE13_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE13_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE13_REC_OVRD_8B10B_DECERR)
+#define G_LANE13_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE13_REC_OVRD_8B10B_DECERR) & M_LANE13_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE12_REC_OVRD_8B10B_DECERR 24
+#define M_LANE12_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE12_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE12_REC_OVRD_8B10B_DECERR)
+#define G_LANE12_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE12_REC_OVRD_8B10B_DECERR) & M_LANE12_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE11_REC_OVRD_8B10B_DECERR 22
+#define M_LANE11_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE11_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE11_REC_OVRD_8B10B_DECERR)
+#define G_LANE11_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE11_REC_OVRD_8B10B_DECERR) & M_LANE11_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE10_REC_OVRD_8B10B_DECERR 20
+#define M_LANE10_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE10_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE10_REC_OVRD_8B10B_DECERR)
+#define G_LANE10_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE10_REC_OVRD_8B10B_DECERR) & M_LANE10_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE9_REC_OVRD_8B10B_DECERR 18
+#define M_LANE9_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE9_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE9_REC_OVRD_8B10B_DECERR)
+#define G_LANE9_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE9_REC_OVRD_8B10B_DECERR) & M_LANE9_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE8_REC_OVRD_8B10B_DECERR 16
+#define M_LANE8_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE8_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE8_REC_OVRD_8B10B_DECERR)
+#define G_LANE8_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE8_REC_OVRD_8B10B_DECERR) & M_LANE8_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE7_REC_OVRD_8B10B_DECERR 14
+#define M_LANE7_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE7_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE7_REC_OVRD_8B10B_DECERR)
+#define G_LANE7_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE7_REC_OVRD_8B10B_DECERR) & M_LANE7_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE6_REC_OVRD_8B10B_DECERR 12
+#define M_LANE6_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE6_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE6_REC_OVRD_8B10B_DECERR)
+#define G_LANE6_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE6_REC_OVRD_8B10B_DECERR) & M_LANE6_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE5_REC_OVRD_8B10B_DECERR 10
+#define M_LANE5_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE5_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE5_REC_OVRD_8B10B_DECERR)
+#define G_LANE5_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE5_REC_OVRD_8B10B_DECERR) & M_LANE5_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE4_REC_OVRD_8B10B_DECERR 8
+#define M_LANE4_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE4_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE4_REC_OVRD_8B10B_DECERR)
+#define G_LANE4_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE4_REC_OVRD_8B10B_DECERR) & M_LANE4_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE3_REC_OVRD_8B10B_DECERR 6
+#define M_LANE3_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE3_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE3_REC_OVRD_8B10B_DECERR)
+#define G_LANE3_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE3_REC_OVRD_8B10B_DECERR) & M_LANE3_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE2_REC_OVRD_8B10B_DECERR 4
+#define M_LANE2_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE2_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE2_REC_OVRD_8B10B_DECERR)
+#define G_LANE2_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE2_REC_OVRD_8B10B_DECERR) & M_LANE2_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE1_REC_OVRD_8B10B_DECERR 2
+#define M_LANE1_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE1_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE1_REC_OVRD_8B10B_DECERR)
+#define G_LANE1_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE1_REC_OVRD_8B10B_DECERR) & M_LANE1_REC_OVRD_8B10B_DECERR)
+
+#define S_LANE0_REC_OVRD_8B10B_DECERR 0
+#define M_LANE0_REC_OVRD_8B10B_DECERR 0x3U
+#define V_LANE0_REC_OVRD_8B10B_DECERR(x) ((x) << S_LANE0_REC_OVRD_8B10B_DECERR)
+#define G_LANE0_REC_OVRD_8B10B_DECERR(x) (((x) >> S_LANE0_REC_OVRD_8B10B_DECERR) & M_LANE0_REC_OVRD_8B10B_DECERR)
+
+#define A_PCIE_SNPS_G5_PHY_TX2RX_LOOPBK_REC_OVRD_EN 0x5fd4
+
+#define S_LANE15_REC_OVRD_EN 31
+#define V_LANE15_REC_OVRD_EN(x) ((x) << S_LANE15_REC_OVRD_EN)
+#define F_LANE15_REC_OVRD_EN V_LANE15_REC_OVRD_EN(1U)
+
+#define S_LANE14_REC_OVRD_EN 30
+#define V_LANE14_REC_OVRD_EN(x) ((x) << S_LANE14_REC_OVRD_EN)
+#define F_LANE14_REC_OVRD_EN V_LANE14_REC_OVRD_EN(1U)
+
+#define S_LANE13_REC_OVRD_EN 29
+#define V_LANE13_REC_OVRD_EN(x) ((x) << S_LANE13_REC_OVRD_EN)
+#define F_LANE13_REC_OVRD_EN V_LANE13_REC_OVRD_EN(1U)
+
+#define S_LANE11_REC_OVRD_EN 27
+#define V_LANE11_REC_OVRD_EN(x) ((x) << S_LANE11_REC_OVRD_EN)
+#define F_LANE11_REC_OVRD_EN V_LANE11_REC_OVRD_EN(1U)
+
+#define S_LANE12_REC_OVRD_EN 28
+#define V_LANE12_REC_OVRD_EN(x) ((x) << S_LANE12_REC_OVRD_EN)
+#define F_LANE12_REC_OVRD_EN V_LANE12_REC_OVRD_EN(1U)
+
+#define S_LANE10_REC_OVRD_EN 26
+#define V_LANE10_REC_OVRD_EN(x) ((x) << S_LANE10_REC_OVRD_EN)
+#define F_LANE10_REC_OVRD_EN V_LANE10_REC_OVRD_EN(1U)
+
+#define S_LANE9_REC_OVRD_EN 25
+#define V_LANE9_REC_OVRD_EN(x) ((x) << S_LANE9_REC_OVRD_EN)
+#define F_LANE9_REC_OVRD_EN V_LANE9_REC_OVRD_EN(1U)
+
+#define S_LANE8_REC_OVRD_EN 24
+#define V_LANE8_REC_OVRD_EN(x) ((x) << S_LANE8_REC_OVRD_EN)
+#define F_LANE8_REC_OVRD_EN V_LANE8_REC_OVRD_EN(1U)
+
+#define S_LANE7_REC_OVRD_EN 23
+#define V_LANE7_REC_OVRD_EN(x) ((x) << S_LANE7_REC_OVRD_EN)
+#define F_LANE7_REC_OVRD_EN V_LANE7_REC_OVRD_EN(1U)
+
+#define S_LANE6_REC_OVRD_EN 22
+#define V_LANE6_REC_OVRD_EN(x) ((x) << S_LANE6_REC_OVRD_EN)
+#define F_LANE6_REC_OVRD_EN V_LANE6_REC_OVRD_EN(1U)
+
+#define S_LANE5_REC_OVRD_EN 21
+#define V_LANE5_REC_OVRD_EN(x) ((x) << S_LANE5_REC_OVRD_EN)
+#define F_LANE5_REC_OVRD_EN V_LANE5_REC_OVRD_EN(1U)
+
+#define S_LANE4_REC_OVRD_EN 20
+#define V_LANE4_REC_OVRD_EN(x) ((x) << S_LANE4_REC_OVRD_EN)
+#define F_LANE4_REC_OVRD_EN V_LANE4_REC_OVRD_EN(1U)
+
+#define S_LANE3_REC_OVRD_EN 19
+#define V_LANE3_REC_OVRD_EN(x) ((x) << S_LANE3_REC_OVRD_EN)
+#define F_LANE3_REC_OVRD_EN V_LANE3_REC_OVRD_EN(1U)
+
+#define S_LANE2_REC_OVRD_EN 18
+#define V_LANE2_REC_OVRD_EN(x) ((x) << S_LANE2_REC_OVRD_EN)
+#define F_LANE2_REC_OVRD_EN V_LANE2_REC_OVRD_EN(1U)
+
+#define S_LANE1_REC_OVRD_EN 17
+#define V_LANE1_REC_OVRD_EN(x) ((x) << S_LANE1_REC_OVRD_EN)
+#define F_LANE1_REC_OVRD_EN V_LANE1_REC_OVRD_EN(1U)
+
+#define S_LANE0_REC_OVRD_EN 16
+#define V_LANE0_REC_OVRD_EN(x) ((x) << S_LANE0_REC_OVRD_EN)
+#define F_LANE0_REC_OVRD_EN V_LANE0_REC_OVRD_EN(1U)
+
+#define S_LANE15_TX2RX_LOOPBK 15
+#define V_LANE15_TX2RX_LOOPBK(x) ((x) << S_LANE15_TX2RX_LOOPBK)
+#define F_LANE15_TX2RX_LOOPBK V_LANE15_TX2RX_LOOPBK(1U)
+
+#define S_LANE14_TX2RX_LOOPBK 14
+#define V_LANE14_TX2RX_LOOPBK(x) ((x) << S_LANE14_TX2RX_LOOPBK)
+#define F_LANE14_TX2RX_LOOPBK V_LANE14_TX2RX_LOOPBK(1U)
+
+#define S_LANE13_TX2RX_LOOPBK 13
+#define V_LANE13_TX2RX_LOOPBK(x) ((x) << S_LANE13_TX2RX_LOOPBK)
+#define F_LANE13_TX2RX_LOOPBK V_LANE13_TX2RX_LOOPBK(1U)
+
+#define S_LANE12_TX2RX_LOOPBK 12
+#define V_LANE12_TX2RX_LOOPBK(x) ((x) << S_LANE12_TX2RX_LOOPBK)
+#define F_LANE12_TX2RX_LOOPBK V_LANE12_TX2RX_LOOPBK(1U)
+
+#define S_LANE11_TX2RX_LOOPBK 11
+#define V_LANE11_TX2RX_LOOPBK(x) ((x) << S_LANE11_TX2RX_LOOPBK)
+#define F_LANE11_TX2RX_LOOPBK V_LANE11_TX2RX_LOOPBK(1U)
+
+#define S_LANE10_TX2RX_LOOPBK 10
+#define V_LANE10_TX2RX_LOOPBK(x) ((x) << S_LANE10_TX2RX_LOOPBK)
+#define F_LANE10_TX2RX_LOOPBK V_LANE10_TX2RX_LOOPBK(1U)
+
+#define S_LANE9_TX2RX_LOOPBK 9
+#define V_LANE9_TX2RX_LOOPBK(x) ((x) << S_LANE9_TX2RX_LOOPBK)
+#define F_LANE9_TX2RX_LOOPBK V_LANE9_TX2RX_LOOPBK(1U)
+
+#define S_LANE8_TX2RX_LOOPBK 8
+#define V_LANE8_TX2RX_LOOPBK(x) ((x) << S_LANE8_TX2RX_LOOPBK)
+#define F_LANE8_TX2RX_LOOPBK V_LANE8_TX2RX_LOOPBK(1U)
+
+#define S_LANE7_TX2RX_LOOPBK 7
+#define V_LANE7_TX2RX_LOOPBK(x) ((x) << S_LANE7_TX2RX_LOOPBK)
+#define F_LANE7_TX2RX_LOOPBK V_LANE7_TX2RX_LOOPBK(1U)
+
+#define S_LANE6_TX2RX_LOOPBK 6
+#define V_LANE6_TX2RX_LOOPBK(x) ((x) << S_LANE6_TX2RX_LOOPBK)
+#define F_LANE6_TX2RX_LOOPBK V_LANE6_TX2RX_LOOPBK(1U)
+
+#define S_LANE5_TX2RX_LOOPBK 5
+#define V_LANE5_TX2RX_LOOPBK(x) ((x) << S_LANE5_TX2RX_LOOPBK)
+#define F_LANE5_TX2RX_LOOPBK V_LANE5_TX2RX_LOOPBK(1U)
+
+#define S_LANE4_TX2RX_LOOPBK 4
+#define V_LANE4_TX2RX_LOOPBK(x) ((x) << S_LANE4_TX2RX_LOOPBK)
+#define F_LANE4_TX2RX_LOOPBK V_LANE4_TX2RX_LOOPBK(1U)
+
+#define S_LANE3_TX2RX_LOOPBK 3
+#define V_LANE3_TX2RX_LOOPBK(x) ((x) << S_LANE3_TX2RX_LOOPBK)
+#define F_LANE3_TX2RX_LOOPBK V_LANE3_TX2RX_LOOPBK(1U)
+
+#define S_LANE2_TX2RX_LOOPBK 2
+#define V_LANE2_TX2RX_LOOPBK(x) ((x) << S_LANE2_TX2RX_LOOPBK)
+#define F_LANE2_TX2RX_LOOPBK V_LANE2_TX2RX_LOOPBK(1U)
+
+#define S_LANE1_TX2RX_LOOPBK 1
+#define V_LANE1_TX2RX_LOOPBK(x) ((x) << S_LANE1_TX2RX_LOOPBK)
+#define F_LANE1_TX2RX_LOOPBK V_LANE1_TX2RX_LOOPBK(1U)
+
+#define S_LANE0_TX2RX_LOOPBK 0
+#define V_LANE0_TX2RX_LOOPBK(x) ((x) << S_LANE0_TX2RX_LOOPBK)
+#define F_LANE0_TX2RX_LOOPBK V_LANE0_TX2RX_LOOPBK(1U)
+
+#define A_PCIE_PHY_TX_DISABLE_UPCS_PIPE_CONFIG 0x5fd8
+
+#define S_UPCS_PIPE_CONFIG 16
+#define M_UPCS_PIPE_CONFIG 0xffffU
+#define V_UPCS_PIPE_CONFIG(x) ((x) << S_UPCS_PIPE_CONFIG)
+#define G_UPCS_PIPE_CONFIG(x) (((x) >> S_UPCS_PIPE_CONFIG) & M_UPCS_PIPE_CONFIG)
+
+#define S_TX15_DISABLE 15
+#define V_TX15_DISABLE(x) ((x) << S_TX15_DISABLE)
+#define F_TX15_DISABLE V_TX15_DISABLE(1U)
+
+#define S_TX14_DISABLE 14
+#define V_TX14_DISABLE(x) ((x) << S_TX14_DISABLE)
+#define F_TX14_DISABLE V_TX14_DISABLE(1U)
+
+#define S_TX13_DISABLE 13
+#define V_TX13_DISABLE(x) ((x) << S_TX13_DISABLE)
+#define F_TX13_DISABLE V_TX13_DISABLE(1U)
+
+#define S_TX12_DISABLE 12
+#define V_TX12_DISABLE(x) ((x) << S_TX12_DISABLE)
+#define F_TX12_DISABLE V_TX12_DISABLE(1U)
+
+#define S_TX11_DISABLE 11
+#define V_TX11_DISABLE(x) ((x) << S_TX11_DISABLE)
+#define F_TX11_DISABLE V_TX11_DISABLE(1U)
+
+#define S_TX10_DISABLE 10
+#define V_TX10_DISABLE(x) ((x) << S_TX10_DISABLE)
+#define F_TX10_DISABLE V_TX10_DISABLE(1U)
+
+#define S_TX9_DISABLE 9
+#define V_TX9_DISABLE(x) ((x) << S_TX9_DISABLE)
+#define F_TX9_DISABLE V_TX9_DISABLE(1U)
+
+#define S_TX8_DISABLE 8
+#define V_TX8_DISABLE(x) ((x) << S_TX8_DISABLE)
+#define F_TX8_DISABLE V_TX8_DISABLE(1U)
+
+#define S_TX7_DISABLE 7
+#define V_TX7_DISABLE(x) ((x) << S_TX7_DISABLE)
+#define F_TX7_DISABLE V_TX7_DISABLE(1U)
+
+#define S_TX6_DISABLE 6
+#define V_TX6_DISABLE(x) ((x) << S_TX6_DISABLE)
+#define F_TX6_DISABLE V_TX6_DISABLE(1U)
+
+#define S_TX5_DISABLE 5
+#define V_TX5_DISABLE(x) ((x) << S_TX5_DISABLE)
+#define F_TX5_DISABLE V_TX5_DISABLE(1U)
+
+#define S_TX4_DISABLE 4
+#define V_TX4_DISABLE(x) ((x) << S_TX4_DISABLE)
+#define F_TX4_DISABLE V_TX4_DISABLE(1U)
+
+#define S_TX3_DISABLE 3
+#define V_TX3_DISABLE(x) ((x) << S_TX3_DISABLE)
+#define F_TX3_DISABLE V_TX3_DISABLE(1U)
+
+#define S_TX2_DISABLE 2
+#define V_TX2_DISABLE(x) ((x) << S_TX2_DISABLE)
+#define F_TX2_DISABLE V_TX2_DISABLE(1U)
+
+#define S_TX1_DISABLE 1
+#define V_TX1_DISABLE(x) ((x) << S_TX1_DISABLE)
+#define F_TX1_DISABLE V_TX1_DISABLE(1U)
+
+#define S_TX0_DISABLE 0
+#define V_TX0_DISABLE(x) ((x) << S_TX0_DISABLE)
+#define F_TX0_DISABLE V_TX0_DISABLE(1U)
+
#define A_PCIE_PDEBUG_REG_0X0 0x0
#define A_PCIE_PDEBUG_REG_0X1 0x1
#define A_PCIE_PDEBUG_REG_0X2 0x2
@@ -11668,6 +14476,40 @@
#define V_GPIO0_OUT_VAL(x) ((x) << S_GPIO0_OUT_VAL)
#define F_GPIO0_OUT_VAL V_GPIO0_OUT_VAL(1U)
+#define A_DBG_GPIO_OUT 0x6010
+
+#define S_GPIO23_OUT_VAL 23
+#define V_GPIO23_OUT_VAL(x) ((x) << S_GPIO23_OUT_VAL)
+#define F_GPIO23_OUT_VAL V_GPIO23_OUT_VAL(1U)
+
+#define S_GPIO22_OUT_VAL 22
+#define V_GPIO22_OUT_VAL(x) ((x) << S_GPIO22_OUT_VAL)
+#define F_GPIO22_OUT_VAL V_GPIO22_OUT_VAL(1U)
+
+#define S_GPIO21_OUT_VAL 21
+#define V_GPIO21_OUT_VAL(x) ((x) << S_GPIO21_OUT_VAL)
+#define F_GPIO21_OUT_VAL V_GPIO21_OUT_VAL(1U)
+
+#define S_GPIO20_OUT_VAL 20
+#define V_GPIO20_OUT_VAL(x) ((x) << S_GPIO20_OUT_VAL)
+#define F_GPIO20_OUT_VAL V_GPIO20_OUT_VAL(1U)
+
+#define S_T7_GPIO19_OUT_VAL 19
+#define V_T7_GPIO19_OUT_VAL(x) ((x) << S_T7_GPIO19_OUT_VAL)
+#define F_T7_GPIO19_OUT_VAL V_T7_GPIO19_OUT_VAL(1U)
+
+#define S_T7_GPIO18_OUT_VAL 18
+#define V_T7_GPIO18_OUT_VAL(x) ((x) << S_T7_GPIO18_OUT_VAL)
+#define F_T7_GPIO18_OUT_VAL V_T7_GPIO18_OUT_VAL(1U)
+
+#define S_T7_GPIO17_OUT_VAL 17
+#define V_T7_GPIO17_OUT_VAL(x) ((x) << S_T7_GPIO17_OUT_VAL)
+#define F_T7_GPIO17_OUT_VAL V_T7_GPIO17_OUT_VAL(1U)
+
+#define S_T7_GPIO16_OUT_VAL 16
+#define V_T7_GPIO16_OUT_VAL(x) ((x) << S_T7_GPIO16_OUT_VAL)
+#define F_T7_GPIO16_OUT_VAL V_T7_GPIO16_OUT_VAL(1U)
+
#define A_DBG_GPIO_IN 0x6014
#define S_GPIO15_CHG_DET 31
@@ -11798,6 +14640,38 @@
#define V_GPIO0_IN(x) ((x) << S_GPIO0_IN)
#define F_GPIO0_IN V_GPIO0_IN(1U)
+#define S_GPIO23_IN 23
+#define V_GPIO23_IN(x) ((x) << S_GPIO23_IN)
+#define F_GPIO23_IN V_GPIO23_IN(1U)
+
+#define S_GPIO22_IN 22
+#define V_GPIO22_IN(x) ((x) << S_GPIO22_IN)
+#define F_GPIO22_IN V_GPIO22_IN(1U)
+
+#define S_GPIO21_IN 21
+#define V_GPIO21_IN(x) ((x) << S_GPIO21_IN)
+#define F_GPIO21_IN V_GPIO21_IN(1U)
+
+#define S_GPIO20_IN 20
+#define V_GPIO20_IN(x) ((x) << S_GPIO20_IN)
+#define F_GPIO20_IN V_GPIO20_IN(1U)
+
+#define S_T7_GPIO19_IN 19
+#define V_T7_GPIO19_IN(x) ((x) << S_T7_GPIO19_IN)
+#define F_T7_GPIO19_IN V_T7_GPIO19_IN(1U)
+
+#define S_T7_GPIO18_IN 18
+#define V_T7_GPIO18_IN(x) ((x) << S_T7_GPIO18_IN)
+#define F_T7_GPIO18_IN V_T7_GPIO18_IN(1U)
+
+#define S_T7_GPIO17_IN 17
+#define V_T7_GPIO17_IN(x) ((x) << S_T7_GPIO17_IN)
+#define F_T7_GPIO17_IN V_T7_GPIO17_IN(1U)
+
+#define S_T7_GPIO16_IN 16
+#define V_T7_GPIO16_IN(x) ((x) << S_T7_GPIO16_IN)
+#define F_T7_GPIO16_IN V_T7_GPIO16_IN(1U)
+
#define A_DBG_INT_ENABLE 0x6018
#define S_IBM_FDL_FAIL_INT_ENBL 25
@@ -11920,6 +14794,58 @@
#define V_GPIO16(x) ((x) << S_GPIO16)
#define F_GPIO16 V_GPIO16(1U)
+#define S_USBFIFOPARERR 12
+#define V_USBFIFOPARERR(x) ((x) << S_USBFIFOPARERR)
+#define F_USBFIFOPARERR V_USBFIFOPARERR(1U)
+
+#define S_T7_IBM_FDL_FAIL_INT_ENBL 11
+#define V_T7_IBM_FDL_FAIL_INT_ENBL(x) ((x) << S_T7_IBM_FDL_FAIL_INT_ENBL)
+#define F_T7_IBM_FDL_FAIL_INT_ENBL V_T7_IBM_FDL_FAIL_INT_ENBL(1U)
+
+#define S_T7_PLL_LOCK_LOST_INT_ENBL 10
+#define V_T7_PLL_LOCK_LOST_INT_ENBL(x) ((x) << S_T7_PLL_LOCK_LOST_INT_ENBL)
+#define F_T7_PLL_LOCK_LOST_INT_ENBL V_T7_PLL_LOCK_LOST_INT_ENBL(1U)
+
+#define S_M1_LOCK 9
+#define V_M1_LOCK(x) ((x) << S_M1_LOCK)
+#define F_M1_LOCK V_M1_LOCK(1U)
+
+#define S_T7_PCIE_LOCK 8
+#define V_T7_PCIE_LOCK(x) ((x) << S_T7_PCIE_LOCK)
+#define F_T7_PCIE_LOCK V_T7_PCIE_LOCK(1U)
+
+#define S_T7_U_LOCK 7
+#define V_T7_U_LOCK(x) ((x) << S_T7_U_LOCK)
+#define F_T7_U_LOCK V_T7_U_LOCK(1U)
+
+#define S_MAC_LOCK 6
+#define V_MAC_LOCK(x) ((x) << S_MAC_LOCK)
+#define F_MAC_LOCK V_MAC_LOCK(1U)
+
+#define S_ARM_LOCK 5
+#define V_ARM_LOCK(x) ((x) << S_ARM_LOCK)
+#define F_ARM_LOCK V_ARM_LOCK(1U)
+
+#define S_M0_LOCK 4
+#define V_M0_LOCK(x) ((x) << S_M0_LOCK)
+#define F_M0_LOCK V_M0_LOCK(1U)
+
+#define S_XGPBUS_LOCK 3
+#define V_XGPBUS_LOCK(x) ((x) << S_XGPBUS_LOCK)
+#define F_XGPBUS_LOCK V_XGPBUS_LOCK(1U)
+
+#define S_XGPHY_LOCK 2
+#define V_XGPHY_LOCK(x) ((x) << S_XGPHY_LOCK)
+#define F_XGPHY_LOCK V_XGPHY_LOCK(1U)
+
+#define S_USB_LOCK 1
+#define V_USB_LOCK(x) ((x) << S_USB_LOCK)
+#define F_USB_LOCK V_USB_LOCK(1U)
+
+#define S_T7_C_LOCK 0
+#define V_T7_C_LOCK(x) ((x) << S_T7_C_LOCK)
+#define F_T7_C_LOCK V_T7_C_LOCK(1U)
+
#define A_DBG_INT_CAUSE 0x601c
#define S_IBM_FDL_FAIL_INT_CAUSE 25
@@ -11938,6 +14864,14 @@
#define V_PLL_LOCK_LOST_INT_CAUSE(x) ((x) << S_PLL_LOCK_LOST_INT_CAUSE)
#define F_PLL_LOCK_LOST_INT_CAUSE V_PLL_LOCK_LOST_INT_CAUSE(1U)
+#define S_T7_IBM_FDL_FAIL_INT_CAUSE 11
+#define V_T7_IBM_FDL_FAIL_INT_CAUSE(x) ((x) << S_T7_IBM_FDL_FAIL_INT_CAUSE)
+#define F_T7_IBM_FDL_FAIL_INT_CAUSE V_T7_IBM_FDL_FAIL_INT_CAUSE(1U)
+
+#define S_T7_PLL_LOCK_LOST_INT_CAUSE 10
+#define V_T7_PLL_LOCK_LOST_INT_CAUSE(x) ((x) << S_T7_PLL_LOCK_LOST_INT_CAUSE)
+#define F_T7_PLL_LOCK_LOST_INT_CAUSE V_T7_PLL_LOCK_LOST_INT_CAUSE(1U)
+
#define A_DBG_DBG0_RST_VALUE 0x6020
#define S_DEBUGDATA 0
@@ -11977,6 +14911,10 @@
#define V_C_OCLK_EN(x) ((x) << S_C_OCLK_EN)
#define F_C_OCLK_EN V_C_OCLK_EN(1U)
+#define S_INIC_MODE_EN 0
+#define V_INIC_MODE_EN(x) ((x) << S_INIC_MODE_EN)
+#define F_INIC_MODE_EN V_INIC_MODE_EN(1U)
+
#define A_DBG_PLL_LOCK 0x602c
#define S_PLL_P_LOCK 20
@@ -12003,6 +14941,38 @@
#define V_PLL_C_LOCK(x) ((x) << S_PLL_C_LOCK)
#define F_PLL_C_LOCK V_PLL_C_LOCK(1U)
+#define S_T7_PLL_M_LOCK 9
+#define V_T7_PLL_M_LOCK(x) ((x) << S_T7_PLL_M_LOCK)
+#define F_T7_PLL_M_LOCK V_T7_PLL_M_LOCK(1U)
+
+#define S_PLL_PCIE_LOCK 8
+#define V_PLL_PCIE_LOCK(x) ((x) << S_PLL_PCIE_LOCK)
+#define F_PLL_PCIE_LOCK V_PLL_PCIE_LOCK(1U)
+
+#define S_T7_PLL_U_LOCK 7
+#define V_T7_PLL_U_LOCK(x) ((x) << S_T7_PLL_U_LOCK)
+#define F_T7_PLL_U_LOCK V_T7_PLL_U_LOCK(1U)
+
+#define S_PLL_MAC_LOCK 6
+#define V_PLL_MAC_LOCK(x) ((x) << S_PLL_MAC_LOCK)
+#define F_PLL_MAC_LOCK V_PLL_MAC_LOCK(1U)
+
+#define S_PLL_ARM_LOCK 5
+#define V_PLL_ARM_LOCK(x) ((x) << S_PLL_ARM_LOCK)
+#define F_PLL_ARM_LOCK V_PLL_ARM_LOCK(1U)
+
+#define S_PLL_XGPBUS_LOCK 3
+#define V_PLL_XGPBUS_LOCK(x) ((x) << S_PLL_XGPBUS_LOCK)
+#define F_PLL_XGPBUS_LOCK V_PLL_XGPBUS_LOCK(1U)
+
+#define S_PLL_XGPHY_LOCK 2
+#define V_PLL_XGPHY_LOCK(x) ((x) << S_PLL_XGPHY_LOCK)
+#define F_PLL_XGPHY_LOCK V_PLL_XGPHY_LOCK(1U)
+
+#define S_PLL_USB_LOCK 1
+#define V_PLL_USB_LOCK(x) ((x) << S_PLL_USB_LOCK)
+#define F_PLL_USB_LOCK V_PLL_USB_LOCK(1U)
+
#define A_DBG_GPIO_ACT_LOW 0x6030
#define S_P_LOCK_ACT_LOW 21
@@ -12109,6 +15079,48 @@
#define V_GPIO16_ACT_LOW(x) ((x) << S_GPIO16_ACT_LOW)
#define F_GPIO16_ACT_LOW V_GPIO16_ACT_LOW(1U)
+#define A_DBG_PLL_LOCK_ACT_LOW 0x6030
+
+#define S_M1_LOCK_ACT_LOW 9
+#define V_M1_LOCK_ACT_LOW(x) ((x) << S_M1_LOCK_ACT_LOW)
+#define F_M1_LOCK_ACT_LOW V_M1_LOCK_ACT_LOW(1U)
+
+#define S_PCIE_LOCK_ACT_LOW 8
+#define V_PCIE_LOCK_ACT_LOW(x) ((x) << S_PCIE_LOCK_ACT_LOW)
+#define F_PCIE_LOCK_ACT_LOW V_PCIE_LOCK_ACT_LOW(1U)
+
+#define S_T7_U_LOCK_ACT_LOW 7
+#define V_T7_U_LOCK_ACT_LOW(x) ((x) << S_T7_U_LOCK_ACT_LOW)
+#define F_T7_U_LOCK_ACT_LOW V_T7_U_LOCK_ACT_LOW(1U)
+
+#define S_MAC_LOCK_ACT_LOW 6
+#define V_MAC_LOCK_ACT_LOW(x) ((x) << S_MAC_LOCK_ACT_LOW)
+#define F_MAC_LOCK_ACT_LOW V_MAC_LOCK_ACT_LOW(1U)
+
+#define S_ARM_LOCK_ACT_LOW 5
+#define V_ARM_LOCK_ACT_LOW(x) ((x) << S_ARM_LOCK_ACT_LOW)
+#define F_ARM_LOCK_ACT_LOW V_ARM_LOCK_ACT_LOW(1U)
+
+#define S_M0_LOCK_ACT_LOW 4
+#define V_M0_LOCK_ACT_LOW(x) ((x) << S_M0_LOCK_ACT_LOW)
+#define F_M0_LOCK_ACT_LOW V_M0_LOCK_ACT_LOW(1U)
+
+#define S_XGPBUS_LOCK_ACT_LOW 3
+#define V_XGPBUS_LOCK_ACT_LOW(x) ((x) << S_XGPBUS_LOCK_ACT_LOW)
+#define F_XGPBUS_LOCK_ACT_LOW V_XGPBUS_LOCK_ACT_LOW(1U)
+
+#define S_XGPHY_LOCK_ACT_LOW 2
+#define V_XGPHY_LOCK_ACT_LOW(x) ((x) << S_XGPHY_LOCK_ACT_LOW)
+#define F_XGPHY_LOCK_ACT_LOW V_XGPHY_LOCK_ACT_LOW(1U)
+
+#define S_USB_LOCK_ACT_LOW 1
+#define V_USB_LOCK_ACT_LOW(x) ((x) << S_USB_LOCK_ACT_LOW)
+#define F_USB_LOCK_ACT_LOW V_USB_LOCK_ACT_LOW(1U)
+
+#define S_T7_C_LOCK_ACT_LOW 0
+#define V_T7_C_LOCK_ACT_LOW(x) ((x) << S_T7_C_LOCK_ACT_LOW)
+#define F_T7_C_LOCK_ACT_LOW V_T7_C_LOCK_ACT_LOW(1U)
+
#define A_DBG_EFUSE_BYTE0_3 0x6034
#define A_DBG_EFUSE_BYTE4_7 0x6038
#define A_DBG_EFUSE_BYTE8_11 0x603c
@@ -12140,6 +15152,32 @@
#define V_STATIC_U_PLL_TUNE(x) ((x) << S_STATIC_U_PLL_TUNE)
#define G_STATIC_U_PLL_TUNE(x) (((x) >> S_STATIC_U_PLL_TUNE) & M_STATIC_U_PLL_TUNE)
+#define A_T7_DBG_STATIC_U_PLL_CONF1 0x6044
+
+#define S_STATIC_U_PLL_RANGE 22
+#define M_STATIC_U_PLL_RANGE 0x7U
+#define V_STATIC_U_PLL_RANGE(x) ((x) << S_STATIC_U_PLL_RANGE)
+#define G_STATIC_U_PLL_RANGE(x) (((x) >> S_STATIC_U_PLL_RANGE) & M_STATIC_U_PLL_RANGE)
+
+#define S_STATIC_U_PLL_DIVQ 17
+#define M_STATIC_U_PLL_DIVQ 0x1fU
+#define V_STATIC_U_PLL_DIVQ(x) ((x) << S_STATIC_U_PLL_DIVQ)
+#define G_STATIC_U_PLL_DIVQ(x) (((x) >> S_STATIC_U_PLL_DIVQ) & M_STATIC_U_PLL_DIVQ)
+
+#define S_STATIC_U_PLL_DIVFI 8
+#define M_STATIC_U_PLL_DIVFI 0x1ffU
+#define V_STATIC_U_PLL_DIVFI(x) ((x) << S_STATIC_U_PLL_DIVFI)
+#define G_STATIC_U_PLL_DIVFI(x) (((x) >> S_STATIC_U_PLL_DIVFI) & M_STATIC_U_PLL_DIVFI)
+
+#define S_STATIC_U_PLL_DIVR 2
+#define M_STATIC_U_PLL_DIVR 0x3fU
+#define V_STATIC_U_PLL_DIVR(x) ((x) << S_STATIC_U_PLL_DIVR)
+#define G_STATIC_U_PLL_DIVR(x) (((x) >> S_STATIC_U_PLL_DIVR) & M_STATIC_U_PLL_DIVR)
+
+#define S_T7_1_STATIC_U_PLL_BYPASS 1
+#define V_T7_1_STATIC_U_PLL_BYPASS(x) ((x) << S_T7_1_STATIC_U_PLL_BYPASS)
+#define F_T7_1_STATIC_U_PLL_BYPASS V_T7_1_STATIC_U_PLL_BYPASS(1U)
+
#define A_DBG_STATIC_C_PLL_CONF 0x6048
#define S_STATIC_C_PLL_MULT 23
@@ -12167,6 +15205,26 @@
#define V_STATIC_C_PLL_TUNE(x) ((x) << S_STATIC_C_PLL_TUNE)
#define G_STATIC_C_PLL_TUNE(x) (((x) >> S_STATIC_C_PLL_TUNE) & M_STATIC_C_PLL_TUNE)
+#define A_T7_DBG_STATIC_U_PLL_CONF2 0x6048
+
+#define S_STATIC_U_PLL_SSMF 5
+#define M_STATIC_U_PLL_SSMF 0xfU
+#define V_STATIC_U_PLL_SSMF(x) ((x) << S_STATIC_U_PLL_SSMF)
+#define G_STATIC_U_PLL_SSMF(x) (((x) >> S_STATIC_U_PLL_SSMF) & M_STATIC_U_PLL_SSMF)
+
+#define S_STATIC_U_PLL_SSMD 2
+#define M_STATIC_U_PLL_SSMD 0x7U
+#define V_STATIC_U_PLL_SSMD(x) ((x) << S_STATIC_U_PLL_SSMD)
+#define G_STATIC_U_PLL_SSMD(x) (((x) >> S_STATIC_U_PLL_SSMD) & M_STATIC_U_PLL_SSMD)
+
+#define S_STATIC_U_PLL_SSDS 1
+#define V_STATIC_U_PLL_SSDS(x) ((x) << S_STATIC_U_PLL_SSDS)
+#define F_STATIC_U_PLL_SSDS V_STATIC_U_PLL_SSDS(1U)
+
+#define S_STATIC_U_PLL_SSE 0
+#define V_STATIC_U_PLL_SSE(x) ((x) << S_STATIC_U_PLL_SSE)
+#define F_STATIC_U_PLL_SSE V_STATIC_U_PLL_SSE(1U)
+
#define A_DBG_STATIC_M_PLL_CONF 0x604c
#define S_STATIC_M_PLL_MULT 23
@@ -12194,6 +15252,32 @@
#define V_STATIC_M_PLL_TUNE(x) ((x) << S_STATIC_M_PLL_TUNE)
#define G_STATIC_M_PLL_TUNE(x) (((x) >> S_STATIC_M_PLL_TUNE) & M_STATIC_M_PLL_TUNE)
+#define A_T7_DBG_STATIC_C_PLL_CONF1 0x604c
+
+#define S_STATIC_C_PLL_RANGE 22
+#define M_STATIC_C_PLL_RANGE 0x7U
+#define V_STATIC_C_PLL_RANGE(x) ((x) << S_STATIC_C_PLL_RANGE)
+#define G_STATIC_C_PLL_RANGE(x) (((x) >> S_STATIC_C_PLL_RANGE) & M_STATIC_C_PLL_RANGE)
+
+#define S_STATIC_C_PLL_DIVQ 17
+#define M_STATIC_C_PLL_DIVQ 0x1fU
+#define V_STATIC_C_PLL_DIVQ(x) ((x) << S_STATIC_C_PLL_DIVQ)
+#define G_STATIC_C_PLL_DIVQ(x) (((x) >> S_STATIC_C_PLL_DIVQ) & M_STATIC_C_PLL_DIVQ)
+
+#define S_STATIC_C_PLL_DIVFI 8
+#define M_STATIC_C_PLL_DIVFI 0x1ffU
+#define V_STATIC_C_PLL_DIVFI(x) ((x) << S_STATIC_C_PLL_DIVFI)
+#define G_STATIC_C_PLL_DIVFI(x) (((x) >> S_STATIC_C_PLL_DIVFI) & M_STATIC_C_PLL_DIVFI)
+
+#define S_STATIC_C_PLL_DIVR 2
+#define M_STATIC_C_PLL_DIVR 0x3fU
+#define V_STATIC_C_PLL_DIVR(x) ((x) << S_STATIC_C_PLL_DIVR)
+#define G_STATIC_C_PLL_DIVR(x) (((x) >> S_STATIC_C_PLL_DIVR) & M_STATIC_C_PLL_DIVR)
+
+#define S_T7_1_STATIC_C_PLL_BYPASS 1
+#define V_T7_1_STATIC_C_PLL_BYPASS(x) ((x) << S_T7_1_STATIC_C_PLL_BYPASS)
+#define F_T7_1_STATIC_C_PLL_BYPASS V_T7_1_STATIC_C_PLL_BYPASS(1U)
+
#define A_DBG_STATIC_KX_PLL_CONF 0x6050
#define S_STATIC_KX_PLL_C 21
@@ -12226,6 +15310,26 @@
#define V_STATIC_KX_PLL_P(x) ((x) << S_STATIC_KX_PLL_P)
#define G_STATIC_KX_PLL_P(x) (((x) >> S_STATIC_KX_PLL_P) & M_STATIC_KX_PLL_P)
+#define A_T7_DBG_STATIC_C_PLL_CONF2 0x6050
+
+#define S_STATIC_C_PLL_SSMF 5
+#define M_STATIC_C_PLL_SSMF 0xfU
+#define V_STATIC_C_PLL_SSMF(x) ((x) << S_STATIC_C_PLL_SSMF)
+#define G_STATIC_C_PLL_SSMF(x) (((x) >> S_STATIC_C_PLL_SSMF) & M_STATIC_C_PLL_SSMF)
+
+#define S_STATIC_C_PLL_SSMD 2
+#define M_STATIC_C_PLL_SSMD 0x7U
+#define V_STATIC_C_PLL_SSMD(x) ((x) << S_STATIC_C_PLL_SSMD)
+#define G_STATIC_C_PLL_SSMD(x) (((x) >> S_STATIC_C_PLL_SSMD) & M_STATIC_C_PLL_SSMD)
+
+#define S_STATIC_C_PLL_SSDS 1
+#define V_STATIC_C_PLL_SSDS(x) ((x) << S_STATIC_C_PLL_SSDS)
+#define F_STATIC_C_PLL_SSDS V_STATIC_C_PLL_SSDS(1U)
+
+#define S_STATIC_C_PLL_SSE 0
+#define V_STATIC_C_PLL_SSE(x) ((x) << S_STATIC_C_PLL_SSE)
+#define F_STATIC_C_PLL_SSE V_STATIC_C_PLL_SSE(1U)
+
#define A_DBG_STATIC_KR_PLL_CONF 0x6054
#define S_STATIC_KR_PLL_C 21
@@ -12258,6 +15362,38 @@
#define V_STATIC_KR_PLL_P(x) ((x) << S_STATIC_KR_PLL_P)
#define G_STATIC_KR_PLL_P(x) (((x) >> S_STATIC_KR_PLL_P) & M_STATIC_KR_PLL_P)
+#define A_DBG_STATIC_PLL_DFS_CONF 0x6054
+
+#define S_STATIC_U_DFS_ACK 23
+#define V_STATIC_U_DFS_ACK(x) ((x) << S_STATIC_U_DFS_ACK)
+#define F_STATIC_U_DFS_ACK V_STATIC_U_DFS_ACK(1U)
+
+#define S_STATIC_C_DFS_ACK 22
+#define V_STATIC_C_DFS_ACK(x) ((x) << S_STATIC_C_DFS_ACK)
+#define F_STATIC_C_DFS_ACK V_STATIC_C_DFS_ACK(1U)
+
+#define S_STATIC_U_DFS_DIVFI 13
+#define M_STATIC_U_DFS_DIVFI 0x1ffU
+#define V_STATIC_U_DFS_DIVFI(x) ((x) << S_STATIC_U_DFS_DIVFI)
+#define G_STATIC_U_DFS_DIVFI(x) (((x) >> S_STATIC_U_DFS_DIVFI) & M_STATIC_U_DFS_DIVFI)
+
+#define S_STATIC_U_DFS_NEWDIV 12
+#define V_STATIC_U_DFS_NEWDIV(x) ((x) << S_STATIC_U_DFS_NEWDIV)
+#define F_STATIC_U_DFS_NEWDIV V_STATIC_U_DFS_NEWDIV(1U)
+
+#define S_T7_STATIC_U_DFS_ENABLE 11
+#define V_T7_STATIC_U_DFS_ENABLE(x) ((x) << S_T7_STATIC_U_DFS_ENABLE)
+#define F_T7_STATIC_U_DFS_ENABLE V_T7_STATIC_U_DFS_ENABLE(1U)
+
+#define S_STATIC_C_DFS_DIVFI 2
+#define M_STATIC_C_DFS_DIVFI 0x1ffU
+#define V_STATIC_C_DFS_DIVFI(x) ((x) << S_STATIC_C_DFS_DIVFI)
+#define G_STATIC_C_DFS_DIVFI(x) (((x) >> S_STATIC_C_DFS_DIVFI) & M_STATIC_C_DFS_DIVFI)
+
+#define S_STATIC_C_DFS_NEWDIV 1
+#define V_STATIC_C_DFS_NEWDIV(x) ((x) << S_STATIC_C_DFS_NEWDIV)
+#define F_STATIC_C_DFS_NEWDIV V_STATIC_C_DFS_NEWDIV(1U)
+
#define A_DBG_EXTRA_STATIC_BITS_CONF 0x6058
#define S_STATIC_M_PLL_RESET 30
@@ -12343,6 +15479,14 @@
#define V_PSRO_SEL(x) ((x) << S_PSRO_SEL)
#define G_PSRO_SEL(x) (((x) >> S_PSRO_SEL) & M_PSRO_SEL)
+#define S_T7_STATIC_LVDS_CLKOUT_EN 21
+#define V_T7_STATIC_LVDS_CLKOUT_EN(x) ((x) << S_T7_STATIC_LVDS_CLKOUT_EN)
+#define F_T7_STATIC_LVDS_CLKOUT_EN V_T7_STATIC_LVDS_CLKOUT_EN(1U)
+
+#define S_T7_EXPHYCLK_SEL_EN 16
+#define V_T7_EXPHYCLK_SEL_EN(x) ((x) << S_T7_EXPHYCLK_SEL_EN)
+#define F_T7_EXPHYCLK_SEL_EN V_T7_EXPHYCLK_SEL_EN(1U)
+
#define A_DBG_STATIC_OCLK_MUXSEL_CONF 0x605c
#define S_M_OCLK_MUXSEL 12
@@ -12467,16 +15611,6 @@
#define V_T5_RD_ADDR0(x) ((x) << S_T5_RD_ADDR0)
#define G_T5_RD_ADDR0(x) (((x) >> S_T5_RD_ADDR0) & M_T5_RD_ADDR0)
-#define S_T6_RD_ADDR1 11
-#define M_T6_RD_ADDR1 0x1ffU
-#define V_T6_RD_ADDR1(x) ((x) << S_T6_RD_ADDR1)
-#define G_T6_RD_ADDR1(x) (((x) >> S_T6_RD_ADDR1) & M_T6_RD_ADDR1)
-
-#define S_T6_RD_ADDR0 2
-#define M_T6_RD_ADDR0 0x1ffU
-#define V_T6_RD_ADDR0(x) ((x) << S_T6_RD_ADDR0)
-#define G_T6_RD_ADDR0(x) (((x) >> S_T6_RD_ADDR0) & M_T6_RD_ADDR0)
-
#define A_DBG_TRACE_WRADDR 0x6090
#define S_WR_POINTER_ADDR1 16
@@ -12499,16 +15633,6 @@
#define V_T5_WR_POINTER_ADDR0(x) ((x) << S_T5_WR_POINTER_ADDR0)
#define G_T5_WR_POINTER_ADDR0(x) (((x) >> S_T5_WR_POINTER_ADDR0) & M_T5_WR_POINTER_ADDR0)
-#define S_T6_WR_POINTER_ADDR1 16
-#define M_T6_WR_POINTER_ADDR1 0x1ffU
-#define V_T6_WR_POINTER_ADDR1(x) ((x) << S_T6_WR_POINTER_ADDR1)
-#define G_T6_WR_POINTER_ADDR1(x) (((x) >> S_T6_WR_POINTER_ADDR1) & M_T6_WR_POINTER_ADDR1)
-
-#define S_T6_WR_POINTER_ADDR0 0
-#define M_T6_WR_POINTER_ADDR0 0x1ffU
-#define V_T6_WR_POINTER_ADDR0(x) ((x) << S_T6_WR_POINTER_ADDR0)
-#define G_T6_WR_POINTER_ADDR0(x) (((x) >> S_T6_WR_POINTER_ADDR0) & M_T6_WR_POINTER_ADDR0)
-
#define A_DBG_TRACE0_DATA_OUT 0x6094
#define A_DBG_TRACE1_DATA_OUT 0x6098
#define A_DBG_FUSE_SENSE_DONE 0x609c
@@ -12575,7 +15699,52 @@
#define V_T6_TVSENSE_RST(x) ((x) << S_T6_TVSENSE_RST)
#define F_T6_TVSENSE_RST V_T6_TVSENSE_RST(1U)
+#define A_DBG_PVT_EN1 0x60a8
+
+#define S_PVT_TRIMO 18
+#define M_PVT_TRIMO 0x3fU
+#define V_PVT_TRIMO(x) ((x) << S_PVT_TRIMO)
+#define G_PVT_TRIMO(x) (((x) >> S_PVT_TRIMO) & M_PVT_TRIMO)
+
+#define S_PVT_TRIMG 13
+#define M_PVT_TRIMG 0x1fU
+#define V_PVT_TRIMG(x) ((x) << S_PVT_TRIMG)
+#define G_PVT_TRIMG(x) (((x) >> S_PVT_TRIMG) & M_PVT_TRIMG)
+
+#define S_PVT_VSAMPLE 12
+#define V_PVT_VSAMPLE(x) ((x) << S_PVT_VSAMPLE)
+#define F_PVT_VSAMPLE V_PVT_VSAMPLE(1U)
+
+#define S_PVT_PSAMPLE 10
+#define M_PVT_PSAMPLE 0x3U
+#define V_PVT_PSAMPLE(x) ((x) << S_PVT_PSAMPLE)
+#define G_PVT_PSAMPLE(x) (((x) >> S_PVT_PSAMPLE) & M_PVT_PSAMPLE)
+
+#define S_PVT_ENA 9
+#define V_PVT_ENA(x) ((x) << S_PVT_ENA)
+#define F_PVT_ENA V_PVT_ENA(1U)
+
+#define S_PVT_RESET 8
+#define V_PVT_RESET(x) ((x) << S_PVT_RESET)
+#define F_PVT_RESET V_PVT_RESET(1U)
+
+#define S_PVT_DIV 0
+#define M_PVT_DIV 0xffU
+#define V_PVT_DIV(x) ((x) << S_PVT_DIV)
+#define G_PVT_DIV(x) (((x) >> S_PVT_DIV) & M_PVT_DIV)
+
#define A_DBG_CUST_EFUSE_OUT_EN 0x60ac
+#define A_DBG_PVT_EN2 0x60ac
+
+#define S_PVT_DATA_OUT 1
+#define M_PVT_DATA_OUT 0x3ffU
+#define V_PVT_DATA_OUT(x) ((x) << S_PVT_DATA_OUT)
+#define G_PVT_DATA_OUT(x) (((x) >> S_PVT_DATA_OUT) & M_PVT_DATA_OUT)
+
+#define S_PVT_DATA_VALID 0
+#define V_PVT_DATA_VALID(x) ((x) << S_PVT_DATA_VALID)
+#define F_PVT_DATA_VALID V_PVT_DATA_VALID(1U)
+
#define A_DBG_CUST_EFUSE_SEL1_EN 0x60b0
#define A_DBG_CUST_EFUSE_SEL2_EN 0x60b4
@@ -12638,6 +15807,36 @@
#define V_STATIC_M_PLL_FFSLEWRATE(x) ((x) << S_STATIC_M_PLL_FFSLEWRATE)
#define G_STATIC_M_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_M_PLL_FFSLEWRATE) & M_STATIC_M_PLL_FFSLEWRATE)
+#define A_DBG_STATIC_M0_PLL_CONF1 0x60b8
+
+#define S_STATIC_M0_PLL_RANGE 22
+#define M_STATIC_M0_PLL_RANGE 0x7U
+#define V_STATIC_M0_PLL_RANGE(x) ((x) << S_STATIC_M0_PLL_RANGE)
+#define G_STATIC_M0_PLL_RANGE(x) (((x) >> S_STATIC_M0_PLL_RANGE) & M_STATIC_M0_PLL_RANGE)
+
+#define S_STATIC_M0_PLL_DIVQ 17
+#define M_STATIC_M0_PLL_DIVQ 0x1fU
+#define V_STATIC_M0_PLL_DIVQ(x) ((x) << S_STATIC_M0_PLL_DIVQ)
+#define G_STATIC_M0_PLL_DIVQ(x) (((x) >> S_STATIC_M0_PLL_DIVQ) & M_STATIC_M0_PLL_DIVQ)
+
+#define S_STATIC_M0_PLL_DIVFI 8
+#define M_STATIC_M0_PLL_DIVFI 0x1ffU
+#define V_STATIC_M0_PLL_DIVFI(x) ((x) << S_STATIC_M0_PLL_DIVFI)
+#define G_STATIC_M0_PLL_DIVFI(x) (((x) >> S_STATIC_M0_PLL_DIVFI) & M_STATIC_M0_PLL_DIVFI)
+
+#define S_STATIC_M0_PLL_DIVR 2
+#define M_STATIC_M0_PLL_DIVR 0x3fU
+#define V_STATIC_M0_PLL_DIVR(x) ((x) << S_STATIC_M0_PLL_DIVR)
+#define G_STATIC_M0_PLL_DIVR(x) (((x) >> S_STATIC_M0_PLL_DIVR) & M_STATIC_M0_PLL_DIVR)
+
+#define S_STATIC_M0_PLL_BYPASS 1
+#define V_STATIC_M0_PLL_BYPASS(x) ((x) << S_STATIC_M0_PLL_BYPASS)
+#define F_STATIC_M0_PLL_BYPASS V_STATIC_M0_PLL_BYPASS(1U)
+
+#define S_STATIC_M0_PLL_RESET 0
+#define V_STATIC_M0_PLL_RESET(x) ((x) << S_STATIC_M0_PLL_RESET)
+#define F_STATIC_M0_PLL_RESET V_STATIC_M0_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF2 0x60bc
#define S_T5_STATIC_M_PLL_DCO_BYPASS 23
@@ -12715,6 +15914,50 @@
#define V_STATIC_M_PLL_LOCKTUNE(x) ((x) << S_STATIC_M_PLL_LOCKTUNE)
#define G_STATIC_M_PLL_LOCKTUNE(x) (((x) >> S_STATIC_M_PLL_LOCKTUNE) & M_STATIC_M_PLL_LOCKTUNE)
+#define A_DBG_STATIC_M0_PLL_CONF2 0x60bc
+
+#define S_T7_STATIC_SWMC1RST_ 14
+#define V_T7_STATIC_SWMC1RST_(x) ((x) << S_T7_STATIC_SWMC1RST_)
+#define F_T7_STATIC_SWMC1RST_ V_T7_STATIC_SWMC1RST_(1U)
+
+#define S_T7_STATIC_SWMC1CFGRST_ 13
+#define V_T7_STATIC_SWMC1CFGRST_(x) ((x) << S_T7_STATIC_SWMC1CFGRST_)
+#define F_T7_STATIC_SWMC1CFGRST_ V_T7_STATIC_SWMC1CFGRST_(1U)
+
+#define S_T7_STATIC_PHY0RECRST_ 12
+#define V_T7_STATIC_PHY0RECRST_(x) ((x) << S_T7_STATIC_PHY0RECRST_)
+#define F_T7_STATIC_PHY0RECRST_ V_T7_STATIC_PHY0RECRST_(1U)
+
+#define S_T7_STATIC_PHY1RECRST_ 11
+#define V_T7_STATIC_PHY1RECRST_(x) ((x) << S_T7_STATIC_PHY1RECRST_)
+#define F_T7_STATIC_PHY1RECRST_ V_T7_STATIC_PHY1RECRST_(1U)
+
+#define S_T7_STATIC_SWMC0RST_ 10
+#define V_T7_STATIC_SWMC0RST_(x) ((x) << S_T7_STATIC_SWMC0RST_)
+#define F_T7_STATIC_SWMC0RST_ V_T7_STATIC_SWMC0RST_(1U)
+
+#define S_T7_STATIC_SWMC0CFGRST_ 9
+#define V_T7_STATIC_SWMC0CFGRST_(x) ((x) << S_T7_STATIC_SWMC0CFGRST_)
+#define F_T7_STATIC_SWMC0CFGRST_ V_T7_STATIC_SWMC0CFGRST_(1U)
+
+#define S_STATIC_M0_PLL_SSMF 5
+#define M_STATIC_M0_PLL_SSMF 0xfU
+#define V_STATIC_M0_PLL_SSMF(x) ((x) << S_STATIC_M0_PLL_SSMF)
+#define G_STATIC_M0_PLL_SSMF(x) (((x) >> S_STATIC_M0_PLL_SSMF) & M_STATIC_M0_PLL_SSMF)
+
+#define S_STATIC_M0_PLL_SSMD 2
+#define M_STATIC_M0_PLL_SSMD 0x7U
+#define V_STATIC_M0_PLL_SSMD(x) ((x) << S_STATIC_M0_PLL_SSMD)
+#define G_STATIC_M0_PLL_SSMD(x) (((x) >> S_STATIC_M0_PLL_SSMD) & M_STATIC_M0_PLL_SSMD)
+
+#define S_STATIC_M0_PLL_SSDS 1
+#define V_STATIC_M0_PLL_SSDS(x) ((x) << S_STATIC_M0_PLL_SSDS)
+#define F_STATIC_M0_PLL_SSDS V_STATIC_M0_PLL_SSDS(1U)
+
+#define S_STATIC_M0_PLL_SSE 0
+#define V_STATIC_M0_PLL_SSE(x) ((x) << S_STATIC_M0_PLL_SSE)
+#define F_STATIC_M0_PLL_SSE V_STATIC_M0_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF3 0x60c0
#define S_T5_STATIC_M_PLL_MULTPRE 30
@@ -12778,8 +16021,58 @@
#define V_T6_STATIC_M_PLL_RANGEA(x) ((x) << S_T6_STATIC_M_PLL_RANGEA)
#define G_T6_STATIC_M_PLL_RANGEA(x) (((x) >> S_T6_STATIC_M_PLL_RANGEA) & M_T6_STATIC_M_PLL_RANGEA)
+#define A_DBG_STATIC_MAC_PLL_CONF1 0x60c0
+
+#define S_STATIC_MAC_PLL_RANGE 22
+#define M_STATIC_MAC_PLL_RANGE 0x7U
+#define V_STATIC_MAC_PLL_RANGE(x) ((x) << S_STATIC_MAC_PLL_RANGE)
+#define G_STATIC_MAC_PLL_RANGE(x) (((x) >> S_STATIC_MAC_PLL_RANGE) & M_STATIC_MAC_PLL_RANGE)
+
+#define S_STATIC_MAC_PLL_DIVQ 17
+#define M_STATIC_MAC_PLL_DIVQ 0x1fU
+#define V_STATIC_MAC_PLL_DIVQ(x) ((x) << S_STATIC_MAC_PLL_DIVQ)
+#define G_STATIC_MAC_PLL_DIVQ(x) (((x) >> S_STATIC_MAC_PLL_DIVQ) & M_STATIC_MAC_PLL_DIVQ)
+
+#define S_STATIC_MAC_PLL_DIVFI 8
+#define M_STATIC_MAC_PLL_DIVFI 0x1ffU
+#define V_STATIC_MAC_PLL_DIVFI(x) ((x) << S_STATIC_MAC_PLL_DIVFI)
+#define G_STATIC_MAC_PLL_DIVFI(x) (((x) >> S_STATIC_MAC_PLL_DIVFI) & M_STATIC_MAC_PLL_DIVFI)
+
+#define S_STATIC_MAC_PLL_DIVR 2
+#define M_STATIC_MAC_PLL_DIVR 0x3fU
+#define V_STATIC_MAC_PLL_DIVR(x) ((x) << S_STATIC_MAC_PLL_DIVR)
+#define G_STATIC_MAC_PLL_DIVR(x) (((x) >> S_STATIC_MAC_PLL_DIVR) & M_STATIC_MAC_PLL_DIVR)
+
+#define S_STATIC_MAC_PLL_BYPASS 1
+#define V_STATIC_MAC_PLL_BYPASS(x) ((x) << S_STATIC_MAC_PLL_BYPASS)
+#define F_STATIC_MAC_PLL_BYPASS V_STATIC_MAC_PLL_BYPASS(1U)
+
+#define S_STATIC_MAC_PLL_RESET 0
+#define V_STATIC_MAC_PLL_RESET(x) ((x) << S_STATIC_MAC_PLL_RESET)
+#define F_STATIC_MAC_PLL_RESET V_STATIC_MAC_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF4 0x60c4
#define A_DBG_STATIC_M_PLL_CONF4 0x60c4
+#define A_DBG_STATIC_MAC_PLL_CONF2 0x60c4
+
+#define S_STATIC_MAC_PLL_SSMF 5
+#define M_STATIC_MAC_PLL_SSMF 0xfU
+#define V_STATIC_MAC_PLL_SSMF(x) ((x) << S_STATIC_MAC_PLL_SSMF)
+#define G_STATIC_MAC_PLL_SSMF(x) (((x) >> S_STATIC_MAC_PLL_SSMF) & M_STATIC_MAC_PLL_SSMF)
+
+#define S_STATIC_MAC_PLL_SSMD 2
+#define M_STATIC_MAC_PLL_SSMD 0x7U
+#define V_STATIC_MAC_PLL_SSMD(x) ((x) << S_STATIC_MAC_PLL_SSMD)
+#define G_STATIC_MAC_PLL_SSMD(x) (((x) >> S_STATIC_MAC_PLL_SSMD) & M_STATIC_MAC_PLL_SSMD)
+
+#define S_STATIC_MAC_PLL_SSDS 1
+#define V_STATIC_MAC_PLL_SSDS(x) ((x) << S_STATIC_MAC_PLL_SSDS)
+#define F_STATIC_MAC_PLL_SSDS V_STATIC_MAC_PLL_SSDS(1U)
+
+#define S_STATIC_MAC_PLL_SSE 0
+#define V_STATIC_MAC_PLL_SSE(x) ((x) << S_STATIC_MAC_PLL_SSE)
+#define F_STATIC_MAC_PLL_SSE V_STATIC_MAC_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF5 0x60c8
#define S_T5_STATIC_M_PLL_VCVTUNE 24
@@ -12835,6 +16128,36 @@
#define V_T6_STATIC_M_PLL_MULT(x) ((x) << S_T6_STATIC_M_PLL_MULT)
#define G_T6_STATIC_M_PLL_MULT(x) (((x) >> S_T6_STATIC_M_PLL_MULT) & M_T6_STATIC_M_PLL_MULT)
+#define A_DBG_STATIC_ARM_PLL_CONF1 0x60c8
+
+#define S_STATIC_ARM_PLL_RANGE 22
+#define M_STATIC_ARM_PLL_RANGE 0x7U
+#define V_STATIC_ARM_PLL_RANGE(x) ((x) << S_STATIC_ARM_PLL_RANGE)
+#define G_STATIC_ARM_PLL_RANGE(x) (((x) >> S_STATIC_ARM_PLL_RANGE) & M_STATIC_ARM_PLL_RANGE)
+
+#define S_STATIC_ARM_PLL_DIVQ 17
+#define M_STATIC_ARM_PLL_DIVQ 0x1fU
+#define V_STATIC_ARM_PLL_DIVQ(x) ((x) << S_STATIC_ARM_PLL_DIVQ)
+#define G_STATIC_ARM_PLL_DIVQ(x) (((x) >> S_STATIC_ARM_PLL_DIVQ) & M_STATIC_ARM_PLL_DIVQ)
+
+#define S_STATIC_ARM_PLL_DIVFI 8
+#define M_STATIC_ARM_PLL_DIVFI 0x1ffU
+#define V_STATIC_ARM_PLL_DIVFI(x) ((x) << S_STATIC_ARM_PLL_DIVFI)
+#define G_STATIC_ARM_PLL_DIVFI(x) (((x) >> S_STATIC_ARM_PLL_DIVFI) & M_STATIC_ARM_PLL_DIVFI)
+
+#define S_STATIC_ARM_PLL_DIVR 2
+#define M_STATIC_ARM_PLL_DIVR 0x3fU
+#define V_STATIC_ARM_PLL_DIVR(x) ((x) << S_STATIC_ARM_PLL_DIVR)
+#define G_STATIC_ARM_PLL_DIVR(x) (((x) >> S_STATIC_ARM_PLL_DIVR) & M_STATIC_ARM_PLL_DIVR)
+
+#define S_STATIC_ARM_PLL_BYPASS 1
+#define V_STATIC_ARM_PLL_BYPASS(x) ((x) << S_STATIC_ARM_PLL_BYPASS)
+#define F_STATIC_ARM_PLL_BYPASS V_STATIC_ARM_PLL_BYPASS(1U)
+
+#define S_STATIC_ARM_PLL_RESET 0
+#define V_STATIC_ARM_PLL_RESET(x) ((x) << S_STATIC_ARM_PLL_RESET)
+#define F_STATIC_ARM_PLL_RESET V_STATIC_ARM_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_M_PLL_CONF6 0x60cc
#define S_T5_STATIC_PHY0RECRST_ 5
@@ -12913,6 +16236,26 @@
#define V_STATIC_SWMC1CFGRST_(x) ((x) << S_STATIC_SWMC1CFGRST_)
#define F_STATIC_SWMC1CFGRST_ V_STATIC_SWMC1CFGRST_(1U)
+#define A_DBG_STATIC_ARM_PLL_CONF2 0x60cc
+
+#define S_STATIC_ARM_PLL_SSMF 5
+#define M_STATIC_ARM_PLL_SSMF 0xfU
+#define V_STATIC_ARM_PLL_SSMF(x) ((x) << S_STATIC_ARM_PLL_SSMF)
+#define G_STATIC_ARM_PLL_SSMF(x) (((x) >> S_STATIC_ARM_PLL_SSMF) & M_STATIC_ARM_PLL_SSMF)
+
+#define S_STATIC_ARM_PLL_SSMD 2
+#define M_STATIC_ARM_PLL_SSMD 0x7U
+#define V_STATIC_ARM_PLL_SSMD(x) ((x) << S_STATIC_ARM_PLL_SSMD)
+#define G_STATIC_ARM_PLL_SSMD(x) (((x) >> S_STATIC_ARM_PLL_SSMD) & M_STATIC_ARM_PLL_SSMD)
+
+#define S_STATIC_ARM_PLL_SSDS 1
+#define V_STATIC_ARM_PLL_SSDS(x) ((x) << S_STATIC_ARM_PLL_SSDS)
+#define F_STATIC_ARM_PLL_SSDS V_STATIC_ARM_PLL_SSDS(1U)
+
+#define S_STATIC_ARM_PLL_SSE 0
+#define V_STATIC_ARM_PLL_SSE(x) ((x) << S_STATIC_ARM_PLL_SSE)
+#define F_STATIC_ARM_PLL_SSE V_STATIC_ARM_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF1 0x60d0
#define S_T5_STATIC_C_PLL_MULTFRAC 8
@@ -12937,6 +16280,36 @@
#define V_STATIC_C_PLL_FFSLEWRATE(x) ((x) << S_STATIC_C_PLL_FFSLEWRATE)
#define G_STATIC_C_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_C_PLL_FFSLEWRATE) & M_STATIC_C_PLL_FFSLEWRATE)
+#define A_DBG_STATIC_USB_PLL_CONF1 0x60d0
+
+#define S_STATIC_USB_PLL_RANGE 22
+#define M_STATIC_USB_PLL_RANGE 0x7U
+#define V_STATIC_USB_PLL_RANGE(x) ((x) << S_STATIC_USB_PLL_RANGE)
+#define G_STATIC_USB_PLL_RANGE(x) (((x) >> S_STATIC_USB_PLL_RANGE) & M_STATIC_USB_PLL_RANGE)
+
+#define S_STATIC_USB_PLL_DIVQ 17
+#define M_STATIC_USB_PLL_DIVQ 0x1fU
+#define V_STATIC_USB_PLL_DIVQ(x) ((x) << S_STATIC_USB_PLL_DIVQ)
+#define G_STATIC_USB_PLL_DIVQ(x) (((x) >> S_STATIC_USB_PLL_DIVQ) & M_STATIC_USB_PLL_DIVQ)
+
+#define S_STATIC_USB_PLL_DIVFI 8
+#define M_STATIC_USB_PLL_DIVFI 0x1ffU
+#define V_STATIC_USB_PLL_DIVFI(x) ((x) << S_STATIC_USB_PLL_DIVFI)
+#define G_STATIC_USB_PLL_DIVFI(x) (((x) >> S_STATIC_USB_PLL_DIVFI) & M_STATIC_USB_PLL_DIVFI)
+
+#define S_STATIC_USB_PLL_DIVR 2
+#define M_STATIC_USB_PLL_DIVR 0x3fU
+#define V_STATIC_USB_PLL_DIVR(x) ((x) << S_STATIC_USB_PLL_DIVR)
+#define G_STATIC_USB_PLL_DIVR(x) (((x) >> S_STATIC_USB_PLL_DIVR) & M_STATIC_USB_PLL_DIVR)
+
+#define S_STATIC_USB_PLL_BYPASS 1
+#define V_STATIC_USB_PLL_BYPASS(x) ((x) << S_STATIC_USB_PLL_BYPASS)
+#define F_STATIC_USB_PLL_BYPASS V_STATIC_USB_PLL_BYPASS(1U)
+
+#define S_STATIC_USB_PLL_RESET 0
+#define V_STATIC_USB_PLL_RESET(x) ((x) << S_STATIC_USB_PLL_RESET)
+#define F_STATIC_USB_PLL_RESET V_STATIC_USB_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF2 0x60d4
#define S_T5_STATIC_C_PLL_DCO_BYPASS 23
@@ -13019,6 +16392,26 @@
#define V_STATIC_C_PLL_LOCKTUNE(x) ((x) << S_STATIC_C_PLL_LOCKTUNE)
#define G_STATIC_C_PLL_LOCKTUNE(x) (((x) >> S_STATIC_C_PLL_LOCKTUNE) & M_STATIC_C_PLL_LOCKTUNE)
+#define A_DBG_STATIC_USB_PLL_CONF2 0x60d4
+
+#define S_STATIC_USB_PLL_SSMF 5
+#define M_STATIC_USB_PLL_SSMF 0xfU
+#define V_STATIC_USB_PLL_SSMF(x) ((x) << S_STATIC_USB_PLL_SSMF)
+#define G_STATIC_USB_PLL_SSMF(x) (((x) >> S_STATIC_USB_PLL_SSMF) & M_STATIC_USB_PLL_SSMF)
+
+#define S_STATIC_USB_PLL_SSMD 2
+#define M_STATIC_USB_PLL_SSMD 0x7U
+#define V_STATIC_USB_PLL_SSMD(x) ((x) << S_STATIC_USB_PLL_SSMD)
+#define G_STATIC_USB_PLL_SSMD(x) (((x) >> S_STATIC_USB_PLL_SSMD) & M_STATIC_USB_PLL_SSMD)
+
+#define S_STATIC_USB_PLL_SSDS 1
+#define V_STATIC_USB_PLL_SSDS(x) ((x) << S_STATIC_USB_PLL_SSDS)
+#define F_STATIC_USB_PLL_SSDS V_STATIC_USB_PLL_SSDS(1U)
+
+#define S_STATIC_USB_PLL_SSE 0
+#define V_STATIC_USB_PLL_SSE(x) ((x) << S_STATIC_USB_PLL_SSE)
+#define F_STATIC_USB_PLL_SSE V_STATIC_USB_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF3 0x60d8
#define S_T5_STATIC_C_PLL_MULTPRE 30
@@ -13082,8 +16475,58 @@
#define V_T6_STATIC_C_PLL_RANGEA(x) ((x) << S_T6_STATIC_C_PLL_RANGEA)
#define G_T6_STATIC_C_PLL_RANGEA(x) (((x) >> S_T6_STATIC_C_PLL_RANGEA) & M_T6_STATIC_C_PLL_RANGEA)
+#define A_DBG_STATIC_XGPHY_PLL_CONF1 0x60d8
+
+#define S_STATIC_XGPHY_PLL_RANGE 22
+#define M_STATIC_XGPHY_PLL_RANGE 0x7U
+#define V_STATIC_XGPHY_PLL_RANGE(x) ((x) << S_STATIC_XGPHY_PLL_RANGE)
+#define G_STATIC_XGPHY_PLL_RANGE(x) (((x) >> S_STATIC_XGPHY_PLL_RANGE) & M_STATIC_XGPHY_PLL_RANGE)
+
+#define S_STATIC_XGPHY_PLL_DIVQ 17
+#define M_STATIC_XGPHY_PLL_DIVQ 0x1fU
+#define V_STATIC_XGPHY_PLL_DIVQ(x) ((x) << S_STATIC_XGPHY_PLL_DIVQ)
+#define G_STATIC_XGPHY_PLL_DIVQ(x) (((x) >> S_STATIC_XGPHY_PLL_DIVQ) & M_STATIC_XGPHY_PLL_DIVQ)
+
+#define S_STATIC_XGPHY_PLL_DIVFI 8
+#define M_STATIC_XGPHY_PLL_DIVFI 0x1ffU
+#define V_STATIC_XGPHY_PLL_DIVFI(x) ((x) << S_STATIC_XGPHY_PLL_DIVFI)
+#define G_STATIC_XGPHY_PLL_DIVFI(x) (((x) >> S_STATIC_XGPHY_PLL_DIVFI) & M_STATIC_XGPHY_PLL_DIVFI)
+
+#define S_STATIC_XGPHY_PLL_DIVR 2
+#define M_STATIC_XGPHY_PLL_DIVR 0x3fU
+#define V_STATIC_XGPHY_PLL_DIVR(x) ((x) << S_STATIC_XGPHY_PLL_DIVR)
+#define G_STATIC_XGPHY_PLL_DIVR(x) (((x) >> S_STATIC_XGPHY_PLL_DIVR) & M_STATIC_XGPHY_PLL_DIVR)
+
+#define S_STATIC_XGPHY_PLL_BYPASS 1
+#define V_STATIC_XGPHY_PLL_BYPASS(x) ((x) << S_STATIC_XGPHY_PLL_BYPASS)
+#define F_STATIC_XGPHY_PLL_BYPASS V_STATIC_XGPHY_PLL_BYPASS(1U)
+
+#define S_STATIC_XGPHY_PLL_RESET 0
+#define V_STATIC_XGPHY_PLL_RESET(x) ((x) << S_STATIC_XGPHY_PLL_RESET)
+#define F_STATIC_XGPHY_PLL_RESET V_STATIC_XGPHY_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF4 0x60dc
#define A_DBG_STATIC_C_PLL_CONF4 0x60dc
+#define A_DBG_STATIC_XGPHY_PLL_CONF2 0x60dc
+
+#define S_STATIC_XGPHY_PLL_SSMF 5
+#define M_STATIC_XGPHY_PLL_SSMF 0xfU
+#define V_STATIC_XGPHY_PLL_SSMF(x) ((x) << S_STATIC_XGPHY_PLL_SSMF)
+#define G_STATIC_XGPHY_PLL_SSMF(x) (((x) >> S_STATIC_XGPHY_PLL_SSMF) & M_STATIC_XGPHY_PLL_SSMF)
+
+#define S_STATIC_XGPHY_PLL_SSMD 2
+#define M_STATIC_XGPHY_PLL_SSMD 0x7U
+#define V_STATIC_XGPHY_PLL_SSMD(x) ((x) << S_STATIC_XGPHY_PLL_SSMD)
+#define G_STATIC_XGPHY_PLL_SSMD(x) (((x) >> S_STATIC_XGPHY_PLL_SSMD) & M_STATIC_XGPHY_PLL_SSMD)
+
+#define S_STATIC_XGPHY_PLL_SSDS 1
+#define V_STATIC_XGPHY_PLL_SSDS(x) ((x) << S_STATIC_XGPHY_PLL_SSDS)
+#define F_STATIC_XGPHY_PLL_SSDS V_STATIC_XGPHY_PLL_SSDS(1U)
+
+#define S_STATIC_XGPHY_PLL_SSE 0
+#define V_STATIC_XGPHY_PLL_SSE(x) ((x) << S_STATIC_XGPHY_PLL_SSE)
+#define F_STATIC_XGPHY_PLL_SSE V_STATIC_XGPHY_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_C_PLL_CONF5 0x60e0
#define S_T5_STATIC_C_PLL_VCVTUNE 22
@@ -13140,6 +16583,40 @@
#define V_T6_STATIC_C_PLL_MULT(x) ((x) << S_T6_STATIC_C_PLL_MULT)
#define G_T6_STATIC_C_PLL_MULT(x) (((x) >> S_T6_STATIC_C_PLL_MULT) & M_T6_STATIC_C_PLL_MULT)
+#define A_DBG_STATIC_XGPBUS_PLL_CONF1 0x60e0
+
+#define S_STATIC_XGPBUS_SWRST_ 25
+#define V_STATIC_XGPBUS_SWRST_(x) ((x) << S_STATIC_XGPBUS_SWRST_)
+#define F_STATIC_XGPBUS_SWRST_ V_STATIC_XGPBUS_SWRST_(1U)
+
+#define S_STATIC_XGPBUS_PLL_RANGE 22
+#define M_STATIC_XGPBUS_PLL_RANGE 0x7U
+#define V_STATIC_XGPBUS_PLL_RANGE(x) ((x) << S_STATIC_XGPBUS_PLL_RANGE)
+#define G_STATIC_XGPBUS_PLL_RANGE(x) (((x) >> S_STATIC_XGPBUS_PLL_RANGE) & M_STATIC_XGPBUS_PLL_RANGE)
+
+#define S_STATIC_XGPBUS_PLL_DIVQ 17
+#define M_STATIC_XGPBUS_PLL_DIVQ 0x1fU
+#define V_STATIC_XGPBUS_PLL_DIVQ(x) ((x) << S_STATIC_XGPBUS_PLL_DIVQ)
+#define G_STATIC_XGPBUS_PLL_DIVQ(x) (((x) >> S_STATIC_XGPBUS_PLL_DIVQ) & M_STATIC_XGPBUS_PLL_DIVQ)
+
+#define S_STATIC_XGPBUS_PLL_DIVFI 8
+#define M_STATIC_XGPBUS_PLL_DIVFI 0x1ffU
+#define V_STATIC_XGPBUS_PLL_DIVFI(x) ((x) << S_STATIC_XGPBUS_PLL_DIVFI)
+#define G_STATIC_XGPBUS_PLL_DIVFI(x) (((x) >> S_STATIC_XGPBUS_PLL_DIVFI) & M_STATIC_XGPBUS_PLL_DIVFI)
+
+#define S_STATIC_XGPBUS_PLL_DIVR 2
+#define M_STATIC_XGPBUS_PLL_DIVR 0x3fU
+#define V_STATIC_XGPBUS_PLL_DIVR(x) ((x) << S_STATIC_XGPBUS_PLL_DIVR)
+#define G_STATIC_XGPBUS_PLL_DIVR(x) (((x) >> S_STATIC_XGPBUS_PLL_DIVR) & M_STATIC_XGPBUS_PLL_DIVR)
+
+#define S_STATIC_XGPBUS_PLL_BYPASS 1
+#define V_STATIC_XGPBUS_PLL_BYPASS(x) ((x) << S_STATIC_XGPBUS_PLL_BYPASS)
+#define F_STATIC_XGPBUS_PLL_BYPASS V_STATIC_XGPBUS_PLL_BYPASS(1U)
+
+#define S_STATIC_XGPBUS_PLL_RESET 0
+#define V_STATIC_XGPBUS_PLL_RESET(x) ((x) << S_STATIC_XGPBUS_PLL_RESET)
+#define F_STATIC_XGPBUS_PLL_RESET V_STATIC_XGPBUS_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF1 0x60e4
#define S_T5_STATIC_U_PLL_MULTFRAC 8
@@ -13164,6 +16641,26 @@
#define V_STATIC_U_PLL_FFSLEWRATE(x) ((x) << S_STATIC_U_PLL_FFSLEWRATE)
#define G_STATIC_U_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_U_PLL_FFSLEWRATE) & M_STATIC_U_PLL_FFSLEWRATE)
+#define A_DBG_STATIC_XGPBUS_PLL_CONF2 0x60e4
+
+#define S_STATIC_XGPBUS_PLL_SSMF 5
+#define M_STATIC_XGPBUS_PLL_SSMF 0xfU
+#define V_STATIC_XGPBUS_PLL_SSMF(x) ((x) << S_STATIC_XGPBUS_PLL_SSMF)
+#define G_STATIC_XGPBUS_PLL_SSMF(x) (((x) >> S_STATIC_XGPBUS_PLL_SSMF) & M_STATIC_XGPBUS_PLL_SSMF)
+
+#define S_STATIC_XGPBUS_PLL_SSMD 2
+#define M_STATIC_XGPBUS_PLL_SSMD 0x7U
+#define V_STATIC_XGPBUS_PLL_SSMD(x) ((x) << S_STATIC_XGPBUS_PLL_SSMD)
+#define G_STATIC_XGPBUS_PLL_SSMD(x) (((x) >> S_STATIC_XGPBUS_PLL_SSMD) & M_STATIC_XGPBUS_PLL_SSMD)
+
+#define S_STATIC_XGPBUS_PLL_SSDS 1
+#define V_STATIC_XGPBUS_PLL_SSDS(x) ((x) << S_STATIC_XGPBUS_PLL_SSDS)
+#define F_STATIC_XGPBUS_PLL_SSDS V_STATIC_XGPBUS_PLL_SSDS(1U)
+
+#define S_STATIC_XGPBUS_PLL_SSE 0
+#define V_STATIC_XGPBUS_PLL_SSE(x) ((x) << S_STATIC_XGPBUS_PLL_SSE)
+#define F_STATIC_XGPBUS_PLL_SSE V_STATIC_XGPBUS_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF2 0x60e8
#define S_T5_STATIC_U_PLL_DCO_BYPASS 23
@@ -13246,6 +16743,36 @@
#define V_STATIC_U_PLL_LOCKTUNE(x) ((x) << S_STATIC_U_PLL_LOCKTUNE)
#define G_STATIC_U_PLL_LOCKTUNE(x) (((x) >> S_STATIC_U_PLL_LOCKTUNE) & M_STATIC_U_PLL_LOCKTUNE)
+#define A_DBG_STATIC_M1_PLL_CONF1 0x60e8
+
+#define S_STATIC_M1_PLL_RANGE 22
+#define M_STATIC_M1_PLL_RANGE 0x7U
+#define V_STATIC_M1_PLL_RANGE(x) ((x) << S_STATIC_M1_PLL_RANGE)
+#define G_STATIC_M1_PLL_RANGE(x) (((x) >> S_STATIC_M1_PLL_RANGE) & M_STATIC_M1_PLL_RANGE)
+
+#define S_STATIC_M1_PLL_DIVQ 17
+#define M_STATIC_M1_PLL_DIVQ 0x1fU
+#define V_STATIC_M1_PLL_DIVQ(x) ((x) << S_STATIC_M1_PLL_DIVQ)
+#define G_STATIC_M1_PLL_DIVQ(x) (((x) >> S_STATIC_M1_PLL_DIVQ) & M_STATIC_M1_PLL_DIVQ)
+
+#define S_STATIC_M1_PLL_DIVFI 8
+#define M_STATIC_M1_PLL_DIVFI 0x1ffU
+#define V_STATIC_M1_PLL_DIVFI(x) ((x) << S_STATIC_M1_PLL_DIVFI)
+#define G_STATIC_M1_PLL_DIVFI(x) (((x) >> S_STATIC_M1_PLL_DIVFI) & M_STATIC_M1_PLL_DIVFI)
+
+#define S_STATIC_M1_PLL_DIVR 2
+#define M_STATIC_M1_PLL_DIVR 0x3fU
+#define V_STATIC_M1_PLL_DIVR(x) ((x) << S_STATIC_M1_PLL_DIVR)
+#define G_STATIC_M1_PLL_DIVR(x) (((x) >> S_STATIC_M1_PLL_DIVR) & M_STATIC_M1_PLL_DIVR)
+
+#define S_STATIC_M1_PLL_BYPASS 1
+#define V_STATIC_M1_PLL_BYPASS(x) ((x) << S_STATIC_M1_PLL_BYPASS)
+#define F_STATIC_M1_PLL_BYPASS V_STATIC_M1_PLL_BYPASS(1U)
+
+#define S_STATIC_M1_PLL_RESET 0
+#define V_STATIC_M1_PLL_RESET(x) ((x) << S_STATIC_M1_PLL_RESET)
+#define F_STATIC_M1_PLL_RESET V_STATIC_M1_PLL_RESET(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF3 0x60ec
#define S_T5_STATIC_U_PLL_MULTPRE 30
@@ -13309,6 +16836,26 @@
#define V_T6_STATIC_U_PLL_RANGEA(x) ((x) << S_T6_STATIC_U_PLL_RANGEA)
#define G_T6_STATIC_U_PLL_RANGEA(x) (((x) >> S_T6_STATIC_U_PLL_RANGEA) & M_T6_STATIC_U_PLL_RANGEA)
+#define A_DBG_STATIC_M1_PLL_CONF2 0x60ec
+
+#define S_STATIC_M1_PLL_SSMF 5
+#define M_STATIC_M1_PLL_SSMF 0xfU
+#define V_STATIC_M1_PLL_SSMF(x) ((x) << S_STATIC_M1_PLL_SSMF)
+#define G_STATIC_M1_PLL_SSMF(x) (((x) >> S_STATIC_M1_PLL_SSMF) & M_STATIC_M1_PLL_SSMF)
+
+#define S_STATIC_M1_PLL_SSMD 2
+#define M_STATIC_M1_PLL_SSMD 0x7U
+#define V_STATIC_M1_PLL_SSMD(x) ((x) << S_STATIC_M1_PLL_SSMD)
+#define G_STATIC_M1_PLL_SSMD(x) (((x) >> S_STATIC_M1_PLL_SSMD) & M_STATIC_M1_PLL_SSMD)
+
+#define S_STATIC_M1_PLL_SSDS 1
+#define V_STATIC_M1_PLL_SSDS(x) ((x) << S_STATIC_M1_PLL_SSDS)
+#define F_STATIC_M1_PLL_SSDS V_STATIC_M1_PLL_SSDS(1U)
+
+#define S_STATIC_M1_PLL_SSE 0
+#define V_STATIC_M1_PLL_SSE(x) ((x) << S_STATIC_M1_PLL_SSE)
+#define F_STATIC_M1_PLL_SSE V_STATIC_M1_PLL_SSE(1U)
+
#define A_DBG_T5_STATIC_U_PLL_CONF4 0x60f0
#define A_DBG_STATIC_U_PLL_CONF4 0x60f0
#define A_DBG_T5_STATIC_U_PLL_CONF5 0x60f4
@@ -13557,6 +17104,104 @@
#define V_GPIO19_OUT_VAL(x) ((x) << S_GPIO19_OUT_VAL)
#define F_GPIO19_OUT_VAL V_GPIO19_OUT_VAL(1U)
+#define A_DBG_GPIO_OEN 0x6100
+
+#define S_GPIO23_OEN 23
+#define V_GPIO23_OEN(x) ((x) << S_GPIO23_OEN)
+#define F_GPIO23_OEN V_GPIO23_OEN(1U)
+
+#define S_GPIO22_OEN 22
+#define V_GPIO22_OEN(x) ((x) << S_GPIO22_OEN)
+#define F_GPIO22_OEN V_GPIO22_OEN(1U)
+
+#define S_GPIO21_OEN 21
+#define V_GPIO21_OEN(x) ((x) << S_GPIO21_OEN)
+#define F_GPIO21_OEN V_GPIO21_OEN(1U)
+
+#define S_GPIO20_OEN 20
+#define V_GPIO20_OEN(x) ((x) << S_GPIO20_OEN)
+#define F_GPIO20_OEN V_GPIO20_OEN(1U)
+
+#define S_T7_GPIO19_OEN 19
+#define V_T7_GPIO19_OEN(x) ((x) << S_T7_GPIO19_OEN)
+#define F_T7_GPIO19_OEN V_T7_GPIO19_OEN(1U)
+
+#define S_T7_GPIO18_OEN 18
+#define V_T7_GPIO18_OEN(x) ((x) << S_T7_GPIO18_OEN)
+#define F_T7_GPIO18_OEN V_T7_GPIO18_OEN(1U)
+
+#define S_T7_GPIO17_OEN 17
+#define V_T7_GPIO17_OEN(x) ((x) << S_T7_GPIO17_OEN)
+#define F_T7_GPIO17_OEN V_T7_GPIO17_OEN(1U)
+
+#define S_T7_GPIO16_OEN 16
+#define V_T7_GPIO16_OEN(x) ((x) << S_T7_GPIO16_OEN)
+#define F_T7_GPIO16_OEN V_T7_GPIO16_OEN(1U)
+
+#define S_T7_GPIO15_OEN 15
+#define V_T7_GPIO15_OEN(x) ((x) << S_T7_GPIO15_OEN)
+#define F_T7_GPIO15_OEN V_T7_GPIO15_OEN(1U)
+
+#define S_T7_GPIO14_OEN 14
+#define V_T7_GPIO14_OEN(x) ((x) << S_T7_GPIO14_OEN)
+#define F_T7_GPIO14_OEN V_T7_GPIO14_OEN(1U)
+
+#define S_T7_GPIO13_OEN 13
+#define V_T7_GPIO13_OEN(x) ((x) << S_T7_GPIO13_OEN)
+#define F_T7_GPIO13_OEN V_T7_GPIO13_OEN(1U)
+
+#define S_T7_GPIO12_OEN 12
+#define V_T7_GPIO12_OEN(x) ((x) << S_T7_GPIO12_OEN)
+#define F_T7_GPIO12_OEN V_T7_GPIO12_OEN(1U)
+
+#define S_T7_GPIO11_OEN 11
+#define V_T7_GPIO11_OEN(x) ((x) << S_T7_GPIO11_OEN)
+#define F_T7_GPIO11_OEN V_T7_GPIO11_OEN(1U)
+
+#define S_T7_GPIO10_OEN 10
+#define V_T7_GPIO10_OEN(x) ((x) << S_T7_GPIO10_OEN)
+#define F_T7_GPIO10_OEN V_T7_GPIO10_OEN(1U)
+
+#define S_T7_GPIO9_OEN 9
+#define V_T7_GPIO9_OEN(x) ((x) << S_T7_GPIO9_OEN)
+#define F_T7_GPIO9_OEN V_T7_GPIO9_OEN(1U)
+
+#define S_T7_GPIO8_OEN 8
+#define V_T7_GPIO8_OEN(x) ((x) << S_T7_GPIO8_OEN)
+#define F_T7_GPIO8_OEN V_T7_GPIO8_OEN(1U)
+
+#define S_T7_GPIO7_OEN 7
+#define V_T7_GPIO7_OEN(x) ((x) << S_T7_GPIO7_OEN)
+#define F_T7_GPIO7_OEN V_T7_GPIO7_OEN(1U)
+
+#define S_T7_GPIO6_OEN 6
+#define V_T7_GPIO6_OEN(x) ((x) << S_T7_GPIO6_OEN)
+#define F_T7_GPIO6_OEN V_T7_GPIO6_OEN(1U)
+
+#define S_T7_GPIO5_OEN 5
+#define V_T7_GPIO5_OEN(x) ((x) << S_T7_GPIO5_OEN)
+#define F_T7_GPIO5_OEN V_T7_GPIO5_OEN(1U)
+
+#define S_T7_GPIO4_OEN 4
+#define V_T7_GPIO4_OEN(x) ((x) << S_T7_GPIO4_OEN)
+#define F_T7_GPIO4_OEN V_T7_GPIO4_OEN(1U)
+
+#define S_T7_GPIO3_OEN 3
+#define V_T7_GPIO3_OEN(x) ((x) << S_T7_GPIO3_OEN)
+#define F_T7_GPIO3_OEN V_T7_GPIO3_OEN(1U)
+
+#define S_T7_GPIO2_OEN 2
+#define V_T7_GPIO2_OEN(x) ((x) << S_T7_GPIO2_OEN)
+#define F_T7_GPIO2_OEN V_T7_GPIO2_OEN(1U)
+
+#define S_T7_GPIO1_OEN 1
+#define V_T7_GPIO1_OEN(x) ((x) << S_T7_GPIO1_OEN)
+#define F_T7_GPIO1_OEN V_T7_GPIO1_OEN(1U)
+
+#define S_T7_GPIO0_OEN 0
+#define V_T7_GPIO0_OEN(x) ((x) << S_T7_GPIO0_OEN)
+#define F_T7_GPIO0_OEN V_T7_GPIO0_OEN(1U)
+
#define A_DBG_PVT_REG_UPDATE_CTL 0x6104
#define S_FAST_UPDATE 8
@@ -13605,6 +17250,104 @@
#define V_GPIO16_IN(x) ((x) << S_GPIO16_IN)
#define F_GPIO16_IN V_GPIO16_IN(1U)
+#define A_DBG_GPIO_CHG_DET 0x6104
+
+#define S_GPIO23_CHG_DET 23
+#define V_GPIO23_CHG_DET(x) ((x) << S_GPIO23_CHG_DET)
+#define F_GPIO23_CHG_DET V_GPIO23_CHG_DET(1U)
+
+#define S_GPIO22_CHG_DET 22
+#define V_GPIO22_CHG_DET(x) ((x) << S_GPIO22_CHG_DET)
+#define F_GPIO22_CHG_DET V_GPIO22_CHG_DET(1U)
+
+#define S_GPIO21_CHG_DET 21
+#define V_GPIO21_CHG_DET(x) ((x) << S_GPIO21_CHG_DET)
+#define F_GPIO21_CHG_DET V_GPIO21_CHG_DET(1U)
+
+#define S_GPIO20_CHG_DET 20
+#define V_GPIO20_CHG_DET(x) ((x) << S_GPIO20_CHG_DET)
+#define F_GPIO20_CHG_DET V_GPIO20_CHG_DET(1U)
+
+#define S_T7_GPIO19_CHG_DET 19
+#define V_T7_GPIO19_CHG_DET(x) ((x) << S_T7_GPIO19_CHG_DET)
+#define F_T7_GPIO19_CHG_DET V_T7_GPIO19_CHG_DET(1U)
+
+#define S_T7_GPIO18_CHG_DET 18
+#define V_T7_GPIO18_CHG_DET(x) ((x) << S_T7_GPIO18_CHG_DET)
+#define F_T7_GPIO18_CHG_DET V_T7_GPIO18_CHG_DET(1U)
+
+#define S_T7_GPIO17_CHG_DET 17
+#define V_T7_GPIO17_CHG_DET(x) ((x) << S_T7_GPIO17_CHG_DET)
+#define F_T7_GPIO17_CHG_DET V_T7_GPIO17_CHG_DET(1U)
+
+#define S_T7_GPIO16_CHG_DET 16
+#define V_T7_GPIO16_CHG_DET(x) ((x) << S_T7_GPIO16_CHG_DET)
+#define F_T7_GPIO16_CHG_DET V_T7_GPIO16_CHG_DET(1U)
+
+#define S_T7_GPIO15_CHG_DET 15
+#define V_T7_GPIO15_CHG_DET(x) ((x) << S_T7_GPIO15_CHG_DET)
+#define F_T7_GPIO15_CHG_DET V_T7_GPIO15_CHG_DET(1U)
+
+#define S_T7_GPIO14_CHG_DET 14
+#define V_T7_GPIO14_CHG_DET(x) ((x) << S_T7_GPIO14_CHG_DET)
+#define F_T7_GPIO14_CHG_DET V_T7_GPIO14_CHG_DET(1U)
+
+#define S_T7_GPIO13_CHG_DET 13
+#define V_T7_GPIO13_CHG_DET(x) ((x) << S_T7_GPIO13_CHG_DET)
+#define F_T7_GPIO13_CHG_DET V_T7_GPIO13_CHG_DET(1U)
+
+#define S_T7_GPIO12_CHG_DET 12
+#define V_T7_GPIO12_CHG_DET(x) ((x) << S_T7_GPIO12_CHG_DET)
+#define F_T7_GPIO12_CHG_DET V_T7_GPIO12_CHG_DET(1U)
+
+#define S_T7_GPIO11_CHG_DET 11
+#define V_T7_GPIO11_CHG_DET(x) ((x) << S_T7_GPIO11_CHG_DET)
+#define F_T7_GPIO11_CHG_DET V_T7_GPIO11_CHG_DET(1U)
+
+#define S_T7_GPIO10_CHG_DET 10
+#define V_T7_GPIO10_CHG_DET(x) ((x) << S_T7_GPIO10_CHG_DET)
+#define F_T7_GPIO10_CHG_DET V_T7_GPIO10_CHG_DET(1U)
+
+#define S_T7_GPIO9_CHG_DET 9
+#define V_T7_GPIO9_CHG_DET(x) ((x) << S_T7_GPIO9_CHG_DET)
+#define F_T7_GPIO9_CHG_DET V_T7_GPIO9_CHG_DET(1U)
+
+#define S_T7_GPIO8_CHG_DET 8
+#define V_T7_GPIO8_CHG_DET(x) ((x) << S_T7_GPIO8_CHG_DET)
+#define F_T7_GPIO8_CHG_DET V_T7_GPIO8_CHG_DET(1U)
+
+#define S_T7_GPIO7_CHG_DET 7
+#define V_T7_GPIO7_CHG_DET(x) ((x) << S_T7_GPIO7_CHG_DET)
+#define F_T7_GPIO7_CHG_DET V_T7_GPIO7_CHG_DET(1U)
+
+#define S_T7_GPIO6_CHG_DET 6
+#define V_T7_GPIO6_CHG_DET(x) ((x) << S_T7_GPIO6_CHG_DET)
+#define F_T7_GPIO6_CHG_DET V_T7_GPIO6_CHG_DET(1U)
+
+#define S_T7_GPIO5_CHG_DET 5
+#define V_T7_GPIO5_CHG_DET(x) ((x) << S_T7_GPIO5_CHG_DET)
+#define F_T7_GPIO5_CHG_DET V_T7_GPIO5_CHG_DET(1U)
+
+#define S_T7_GPIO4_CHG_DET 4
+#define V_T7_GPIO4_CHG_DET(x) ((x) << S_T7_GPIO4_CHG_DET)
+#define F_T7_GPIO4_CHG_DET V_T7_GPIO4_CHG_DET(1U)
+
+#define S_T7_GPIO3_CHG_DET 3
+#define V_T7_GPIO3_CHG_DET(x) ((x) << S_T7_GPIO3_CHG_DET)
+#define F_T7_GPIO3_CHG_DET V_T7_GPIO3_CHG_DET(1U)
+
+#define S_T7_GPIO2_CHG_DET 2
+#define V_T7_GPIO2_CHG_DET(x) ((x) << S_T7_GPIO2_CHG_DET)
+#define F_T7_GPIO2_CHG_DET V_T7_GPIO2_CHG_DET(1U)
+
+#define S_T7_GPIO1_CHG_DET 1
+#define V_T7_GPIO1_CHG_DET(x) ((x) << S_T7_GPIO1_CHG_DET)
+#define F_T7_GPIO1_CHG_DET V_T7_GPIO1_CHG_DET(1U)
+
+#define S_T7_GPIO0_CHG_DET 0
+#define V_T7_GPIO0_CHG_DET(x) ((x) << S_T7_GPIO0_CHG_DET)
+#define F_T7_GPIO0_CHG_DET V_T7_GPIO0_CHG_DET(1U)
+
#define A_DBG_PVT_REG_LAST_MEASUREMENT 0x6108
#define S_LAST_MEASUREMENT_SELECT 8
@@ -13964,6 +17707,22 @@
#define V_GPIO0_PE_EN(x) ((x) << S_GPIO0_PE_EN)
#define F_GPIO0_PE_EN V_GPIO0_PE_EN(1U)
+#define S_GPIO23_PE_EN 23
+#define V_GPIO23_PE_EN(x) ((x) << S_GPIO23_PE_EN)
+#define F_GPIO23_PE_EN V_GPIO23_PE_EN(1U)
+
+#define S_GPIO22_PE_EN 22
+#define V_GPIO22_PE_EN(x) ((x) << S_GPIO22_PE_EN)
+#define F_GPIO22_PE_EN V_GPIO22_PE_EN(1U)
+
+#define S_GPIO21_PE_EN 21
+#define V_GPIO21_PE_EN(x) ((x) << S_GPIO21_PE_EN)
+#define F_GPIO21_PE_EN V_GPIO21_PE_EN(1U)
+
+#define S_GPIO20_PE_EN 20
+#define V_GPIO20_PE_EN(x) ((x) << S_GPIO20_PE_EN)
+#define F_GPIO20_PE_EN V_GPIO20_PE_EN(1U)
+
#define A_DBG_PVT_REG_THRESHOLD 0x611c
#define S_PVT_CALIBRATION_DONE 8
@@ -14084,6 +17843,22 @@
#define V_GPIO0_PS_EN(x) ((x) << S_GPIO0_PS_EN)
#define F_GPIO0_PS_EN V_GPIO0_PS_EN(1U)
+#define S_GPIO23_PS_EN 23
+#define V_GPIO23_PS_EN(x) ((x) << S_GPIO23_PS_EN)
+#define F_GPIO23_PS_EN V_GPIO23_PS_EN(1U)
+
+#define S_GPIO22_PS_EN 22
+#define V_GPIO22_PS_EN(x) ((x) << S_GPIO22_PS_EN)
+#define F_GPIO22_PS_EN V_GPIO22_PS_EN(1U)
+
+#define S_GPIO21_PS_EN 21
+#define V_GPIO21_PS_EN(x) ((x) << S_GPIO21_PS_EN)
+#define F_GPIO21_PS_EN V_GPIO21_PS_EN(1U)
+
+#define S_GPIO20_PS_EN 20
+#define V_GPIO20_PS_EN(x) ((x) << S_GPIO20_PS_EN)
+#define F_GPIO20_PS_EN V_GPIO20_PS_EN(1U)
+
#define A_DBG_PVT_REG_IN_TERMP 0x6120
#define S_REG_IN_TERMP_B 4
@@ -14254,6 +18029,17 @@
#define V_STATIC_U_PLL_VREGTUNE(x) ((x) << S_STATIC_U_PLL_VREGTUNE)
#define G_STATIC_U_PLL_VREGTUNE(x) (((x) >> S_STATIC_U_PLL_VREGTUNE) & M_STATIC_U_PLL_VREGTUNE)
+#define A_DBG_STATIC_PLL_LOCK_WAIT_CONF 0x6150
+
+#define S_STATIC_WAIT_LOCK 24
+#define V_STATIC_WAIT_LOCK(x) ((x) << S_STATIC_WAIT_LOCK)
+#define F_STATIC_WAIT_LOCK V_STATIC_WAIT_LOCK(1U)
+
+#define S_STATIC_LOCK_WAIT_TIME 0
+#define M_STATIC_LOCK_WAIT_TIME 0xffffffU
+#define V_STATIC_LOCK_WAIT_TIME(x) ((x) << S_STATIC_LOCK_WAIT_TIME)
+#define G_STATIC_LOCK_WAIT_TIME(x) (((x) >> S_STATIC_LOCK_WAIT_TIME) & M_STATIC_LOCK_WAIT_TIME)
+
#define A_DBG_STATIC_C_PLL_CONF6 0x6154
#define S_STATIC_C_PLL_VREGTUNE 0
@@ -14303,13 +18089,274 @@
#define A_DBG_CUST_EFUSE_BYTE24_27 0x6178
#define A_DBG_CUST_EFUSE_BYTE28_31 0x617c
#define A_DBG_CUST_EFUSE_BYTE32_35 0x6180
+#define A_DBG_GPIO_INT_ENABLE 0x6180
+
+#define S_GPIO23 23
+#define V_GPIO23(x) ((x) << S_GPIO23)
+#define F_GPIO23 V_GPIO23(1U)
+
+#define S_GPIO22 22
+#define V_GPIO22(x) ((x) << S_GPIO22)
+#define F_GPIO22 V_GPIO22(1U)
+
+#define S_GPIO21 21
+#define V_GPIO21(x) ((x) << S_GPIO21)
+#define F_GPIO21 V_GPIO21(1U)
+
+#define S_GPIO20 20
+#define V_GPIO20(x) ((x) << S_GPIO20)
+#define F_GPIO20 V_GPIO20(1U)
+
+#define S_T7_GPIO19 19
+#define V_T7_GPIO19(x) ((x) << S_T7_GPIO19)
+#define F_T7_GPIO19 V_T7_GPIO19(1U)
+
+#define S_T7_GPIO18 18
+#define V_T7_GPIO18(x) ((x) << S_T7_GPIO18)
+#define F_T7_GPIO18 V_T7_GPIO18(1U)
+
+#define S_T7_GPIO17 17
+#define V_T7_GPIO17(x) ((x) << S_T7_GPIO17)
+#define F_T7_GPIO17 V_T7_GPIO17(1U)
+
+#define S_T7_GPIO16 16
+#define V_T7_GPIO16(x) ((x) << S_T7_GPIO16)
+#define F_T7_GPIO16 V_T7_GPIO16(1U)
+
#define A_DBG_CUST_EFUSE_BYTE36_39 0x6184
+#define A_DBG_GPIO_INT_CAUSE 0x6184
#define A_DBG_CUST_EFUSE_BYTE40_43 0x6188
+#define A_T7_DBG_GPIO_ACT_LOW 0x6188
+
+#define S_GPIO23_ACT_LOW 23
+#define V_GPIO23_ACT_LOW(x) ((x) << S_GPIO23_ACT_LOW)
+#define F_GPIO23_ACT_LOW V_GPIO23_ACT_LOW(1U)
+
+#define S_GPIO22_ACT_LOW 22
+#define V_GPIO22_ACT_LOW(x) ((x) << S_GPIO22_ACT_LOW)
+#define F_GPIO22_ACT_LOW V_GPIO22_ACT_LOW(1U)
+
+#define S_GPIO21_ACT_LOW 21
+#define V_GPIO21_ACT_LOW(x) ((x) << S_GPIO21_ACT_LOW)
+#define F_GPIO21_ACT_LOW V_GPIO21_ACT_LOW(1U)
+
+#define S_GPIO20_ACT_LOW 20
+#define V_GPIO20_ACT_LOW(x) ((x) << S_GPIO20_ACT_LOW)
+#define F_GPIO20_ACT_LOW V_GPIO20_ACT_LOW(1U)
+
+#define S_T7_GPIO19_ACT_LOW 19
+#define V_T7_GPIO19_ACT_LOW(x) ((x) << S_T7_GPIO19_ACT_LOW)
+#define F_T7_GPIO19_ACT_LOW V_T7_GPIO19_ACT_LOW(1U)
+
+#define S_T7_GPIO18_ACT_LOW 18
+#define V_T7_GPIO18_ACT_LOW(x) ((x) << S_T7_GPIO18_ACT_LOW)
+#define F_T7_GPIO18_ACT_LOW V_T7_GPIO18_ACT_LOW(1U)
+
+#define S_T7_GPIO17_ACT_LOW 17
+#define V_T7_GPIO17_ACT_LOW(x) ((x) << S_T7_GPIO17_ACT_LOW)
+#define F_T7_GPIO17_ACT_LOW V_T7_GPIO17_ACT_LOW(1U)
+
+#define S_T7_GPIO16_ACT_LOW 16
+#define V_T7_GPIO16_ACT_LOW(x) ((x) << S_T7_GPIO16_ACT_LOW)
+#define F_T7_GPIO16_ACT_LOW V_T7_GPIO16_ACT_LOW(1U)
+
#define A_DBG_CUST_EFUSE_BYTE44_47 0x618c
+#define A_DBG_DDR_CAL 0x618c
+
+#define S_CAL_ENDC 9
+#define V_CAL_ENDC(x) ((x) << S_CAL_ENDC)
+#define F_CAL_ENDC V_CAL_ENDC(1U)
+
+#define S_CAL_MODE 8
+#define V_CAL_MODE(x) ((x) << S_CAL_MODE)
+#define F_CAL_MODE V_CAL_MODE(1U)
+
+#define S_CAL_REFSEL 7
+#define V_CAL_REFSEL(x) ((x) << S_CAL_REFSEL)
+#define F_CAL_REFSEL V_CAL_REFSEL(1U)
+
+#define S_PD 6
+#define V_PD(x) ((x) << S_PD)
+#define F_PD V_PD(1U)
+
+#define S_CAL_RST 5
+#define V_CAL_RST(x) ((x) << S_CAL_RST)
+#define F_CAL_RST V_CAL_RST(1U)
+
+#define S_CAL_READ 4
+#define V_CAL_READ(x) ((x) << S_CAL_READ)
+#define F_CAL_READ V_CAL_READ(1U)
+
+#define S_CAL_SC 3
+#define V_CAL_SC(x) ((x) << S_CAL_SC)
+#define F_CAL_SC V_CAL_SC(1U)
+
+#define S_CAL_LC 2
+#define V_CAL_LC(x) ((x) << S_CAL_LC)
+#define F_CAL_LC V_CAL_LC(1U)
+
+#define S_CAL_CCAL 1
+#define V_CAL_CCAL(x) ((x) << S_CAL_CCAL)
+#define F_CAL_CCAL V_CAL_CCAL(1U)
+
+#define S_CAL_RES 0
+#define V_CAL_RES(x) ((x) << S_CAL_RES)
+#define F_CAL_RES V_CAL_RES(1U)
+
#define A_DBG_CUST_EFUSE_BYTE48_51 0x6190
+#define A_DBG_EFUSE_CTL_0 0x6190
+
+#define S_EFUSE_CSB 31
+#define V_EFUSE_CSB(x) ((x) << S_EFUSE_CSB)
+#define F_EFUSE_CSB V_EFUSE_CSB(1U)
+
+#define S_EFUSE_STROBE 30
+#define V_EFUSE_STROBE(x) ((x) << S_EFUSE_STROBE)
+#define F_EFUSE_STROBE V_EFUSE_STROBE(1U)
+
+#define S_EFUSE_LOAD 29
+#define V_EFUSE_LOAD(x) ((x) << S_EFUSE_LOAD)
+#define F_EFUSE_LOAD V_EFUSE_LOAD(1U)
+
+#define S_EFUSE_PGENB 28
+#define V_EFUSE_PGENB(x) ((x) << S_EFUSE_PGENB)
+#define F_EFUSE_PGENB V_EFUSE_PGENB(1U)
+
+#define S_EFUSE_PS 27
+#define V_EFUSE_PS(x) ((x) << S_EFUSE_PS)
+#define F_EFUSE_PS V_EFUSE_PS(1U)
+
+#define S_EFUSE_MR 26
+#define V_EFUSE_MR(x) ((x) << S_EFUSE_MR)
+#define F_EFUSE_MR V_EFUSE_MR(1U)
+
+#define S_EFUSE_PD 25
+#define V_EFUSE_PD(x) ((x) << S_EFUSE_PD)
+#define F_EFUSE_PD V_EFUSE_PD(1U)
+
+#define S_EFUSE_RWL 24
+#define V_EFUSE_RWL(x) ((x) << S_EFUSE_RWL)
+#define F_EFUSE_RWL V_EFUSE_RWL(1U)
+
+#define S_EFUSE_RSB 23
+#define V_EFUSE_RSB(x) ((x) << S_EFUSE_RSB)
+#define F_EFUSE_RSB V_EFUSE_RSB(1U)
+
+#define S_EFUSE_TRCS 22
+#define V_EFUSE_TRCS(x) ((x) << S_EFUSE_TRCS)
+#define F_EFUSE_TRCS V_EFUSE_TRCS(1U)
+
+#define S_EFUSE_AT 20
+#define M_EFUSE_AT 0x3U
+#define V_EFUSE_AT(x) ((x) << S_EFUSE_AT)
+#define G_EFUSE_AT(x) (((x) >> S_EFUSE_AT) & M_EFUSE_AT)
+
+#define S_EFUSE_RD_STATE 16
+#define M_EFUSE_RD_STATE 0xfU
+#define V_EFUSE_RD_STATE(x) ((x) << S_EFUSE_RD_STATE)
+#define G_EFUSE_RD_STATE(x) (((x) >> S_EFUSE_RD_STATE) & M_EFUSE_RD_STATE)
+
+#define S_EFUSE_BUSY 15
+#define V_EFUSE_BUSY(x) ((x) << S_EFUSE_BUSY)
+#define F_EFUSE_BUSY V_EFUSE_BUSY(1U)
+
+#define S_EFUSE_WR_RD 13
+#define M_EFUSE_WR_RD 0x3U
+#define V_EFUSE_WR_RD(x) ((x) << S_EFUSE_WR_RD)
+#define G_EFUSE_WR_RD(x) (((x) >> S_EFUSE_WR_RD) & M_EFUSE_WR_RD)
+
+#define S_EFUSE_A 0
+#define M_EFUSE_A 0x7ffU
+#define V_EFUSE_A(x) ((x) << S_EFUSE_A)
+#define G_EFUSE_A(x) (((x) >> S_EFUSE_A) & M_EFUSE_A)
+
#define A_DBG_CUST_EFUSE_BYTE52_55 0x6194
+#define A_DBG_EFUSE_CTL_1 0x6194
#define A_DBG_CUST_EFUSE_BYTE56_59 0x6198
+#define A_DBG_EFUSE_RD_CTL 0x6198
+
+#define S_EFUSE_RD_ID 6
+#define M_EFUSE_RD_ID 0x3U
+#define V_EFUSE_RD_ID(x) ((x) << S_EFUSE_RD_ID)
+#define G_EFUSE_RD_ID(x) (((x) >> S_EFUSE_RD_ID) & M_EFUSE_RD_ID)
+
+#define S_EFUSE_RD_ADDR 0
+#define M_EFUSE_RD_ADDR 0x3fU
+#define V_EFUSE_RD_ADDR(x) ((x) << S_EFUSE_RD_ADDR)
+#define G_EFUSE_RD_ADDR(x) (((x) >> S_EFUSE_RD_ADDR) & M_EFUSE_RD_ADDR)
+
#define A_DBG_CUST_EFUSE_BYTE60_63 0x619c
+#define A_DBG_EFUSE_RD_DATA 0x619c
+#define A_DBG_EFUSE_TIME_0 0x61a0
+
+#define S_EFUSE_TIME_1 16
+#define M_EFUSE_TIME_1 0xffffU
+#define V_EFUSE_TIME_1(x) ((x) << S_EFUSE_TIME_1)
+#define G_EFUSE_TIME_1(x) (((x) >> S_EFUSE_TIME_1) & M_EFUSE_TIME_1)
+
+#define S_EFUSE_TIME_0 0
+#define M_EFUSE_TIME_0 0xffffU
+#define V_EFUSE_TIME_0(x) ((x) << S_EFUSE_TIME_0)
+#define G_EFUSE_TIME_0(x) (((x) >> S_EFUSE_TIME_0) & M_EFUSE_TIME_0)
+
+#define A_DBG_EFUSE_TIME_1 0x61a4
+
+#define S_EFUSE_TIME_3 16
+#define M_EFUSE_TIME_3 0xffffU
+#define V_EFUSE_TIME_3(x) ((x) << S_EFUSE_TIME_3)
+#define G_EFUSE_TIME_3(x) (((x) >> S_EFUSE_TIME_3) & M_EFUSE_TIME_3)
+
+#define S_EFUSE_TIME_2 0
+#define M_EFUSE_TIME_2 0xffffU
+#define V_EFUSE_TIME_2(x) ((x) << S_EFUSE_TIME_2)
+#define G_EFUSE_TIME_2(x) (((x) >> S_EFUSE_TIME_2) & M_EFUSE_TIME_2)
+
+#define A_DBG_EFUSE_TIME_2 0x61a8
+
+#define S_EFUSE_TIME_5 16
+#define M_EFUSE_TIME_5 0xffffU
+#define V_EFUSE_TIME_5(x) ((x) << S_EFUSE_TIME_5)
+#define G_EFUSE_TIME_5(x) (((x) >> S_EFUSE_TIME_5) & M_EFUSE_TIME_5)
+
+#define S_EFUSE_TIME_4 0
+#define M_EFUSE_TIME_4 0xffffU
+#define V_EFUSE_TIME_4(x) ((x) << S_EFUSE_TIME_4)
+#define G_EFUSE_TIME_4(x) (((x) >> S_EFUSE_TIME_4) & M_EFUSE_TIME_4)
+
+#define A_DBG_EFUSE_TIME_3 0x61ac
+
+#define S_EFUSE_TIME_7 16
+#define M_EFUSE_TIME_7 0xffffU
+#define V_EFUSE_TIME_7(x) ((x) << S_EFUSE_TIME_7)
+#define G_EFUSE_TIME_7(x) (((x) >> S_EFUSE_TIME_7) & M_EFUSE_TIME_7)
+
+#define S_EFUSE_TIME_6 0
+#define M_EFUSE_TIME_6 0xffffU
+#define V_EFUSE_TIME_6(x) ((x) << S_EFUSE_TIME_6)
+#define G_EFUSE_TIME_6(x) (((x) >> S_EFUSE_TIME_6) & M_EFUSE_TIME_6)
+
+#define A_DBG_VREF_CTL 0x61b0
+
+#define S_VREF_SEL_1 15
+#define V_VREF_SEL_1(x) ((x) << S_VREF_SEL_1)
+#define F_VREF_SEL_1 V_VREF_SEL_1(1U)
+
+#define S_VREF_R_1 8
+#define M_VREF_R_1 0x7fU
+#define V_VREF_R_1(x) ((x) << S_VREF_R_1)
+#define G_VREF_R_1(x) (((x) >> S_VREF_R_1) & M_VREF_R_1)
+
+#define S_VREF_SEL_0 7
+#define V_VREF_SEL_0(x) ((x) << S_VREF_SEL_0)
+#define F_VREF_SEL_0 V_VREF_SEL_0(1U)
+
+#define S_VREF_R_0 0
+#define M_VREF_R_0 0x7fU
+#define V_VREF_R_0(x) ((x) << S_VREF_R_0)
+#define G_VREF_R_0(x) (((x) >> S_VREF_R_0) & M_VREF_R_0)
+
+#define A_DBG_FPGA_EFUSE_CTL 0x61b4
+#define A_DBG_FPGA_EFUSE_DATA 0x61b8
/* registers for module MC */
#define MC_BASE_ADDR 0x6200
@@ -16048,31 +20095,91 @@
#define V_THRESHOLD0_EN(x) ((x) << S_THRESHOLD0_EN)
#define F_THRESHOLD0_EN V_THRESHOLD0_EN(1U)
+#define A_MA_CLIENT0_PR_THRESHOLD 0x7700
+
+#define S_T7_THRESHOLD1_EN 31
+#define V_T7_THRESHOLD1_EN(x) ((x) << S_T7_THRESHOLD1_EN)
+#define F_T7_THRESHOLD1_EN V_T7_THRESHOLD1_EN(1U)
+
+#define S_T7_THRESHOLD1 16
+#define M_T7_THRESHOLD1 0x7fffU
+#define V_T7_THRESHOLD1(x) ((x) << S_T7_THRESHOLD1)
+#define G_T7_THRESHOLD1(x) (((x) >> S_T7_THRESHOLD1) & M_T7_THRESHOLD1)
+
+#define S_T7_THRESHOLD0_EN 15
+#define V_T7_THRESHOLD0_EN(x) ((x) << S_T7_THRESHOLD0_EN)
+#define F_T7_THRESHOLD0_EN V_T7_THRESHOLD0_EN(1U)
+
+#define S_T7_THRESHOLD0 0
+#define M_T7_THRESHOLD0 0x7fffU
+#define V_T7_THRESHOLD0(x) ((x) << S_T7_THRESHOLD0)
+#define G_T7_THRESHOLD0(x) (((x) >> S_T7_THRESHOLD0) & M_T7_THRESHOLD0)
+
#define A_MA_CLIENT0_WR_LATENCY_THRESHOLD 0x7704
+#define A_MA_CLIENT0_CR_THRESHOLD 0x7704
+
+#define S_CREDITSHAPER_EN 31
+#define V_CREDITSHAPER_EN(x) ((x) << S_CREDITSHAPER_EN)
+#define F_CREDITSHAPER_EN V_CREDITSHAPER_EN(1U)
+
+#define S_CREDIT_MAX 16
+#define M_CREDIT_MAX 0xfffU
+#define V_CREDIT_MAX(x) ((x) << S_CREDIT_MAX)
+#define G_CREDIT_MAX(x) (((x) >> S_CREDIT_MAX) & M_CREDIT_MAX)
+
+#define S_CREDIT_VAL 0
+#define M_CREDIT_VAL 0xfffU
+#define V_CREDIT_VAL(x) ((x) << S_CREDIT_VAL)
+#define G_CREDIT_VAL(x) (((x) >> S_CREDIT_VAL) & M_CREDIT_VAL)
+
#define A_MA_CLIENT1_RD_LATENCY_THRESHOLD 0x7708
+#define A_MA_CLIENT1_PR_THRESHOLD 0x7708
#define A_MA_CLIENT1_WR_LATENCY_THRESHOLD 0x770c
+#define A_MA_CLIENT1_CR_THRESHOLD 0x770c
#define A_MA_CLIENT2_RD_LATENCY_THRESHOLD 0x7710
+#define A_MA_CLIENT2_PR_THRESHOLD 0x7710
#define A_MA_CLIENT2_WR_LATENCY_THRESHOLD 0x7714
+#define A_MA_CLIENT2_CR_THRESHOLD 0x7714
#define A_MA_CLIENT3_RD_LATENCY_THRESHOLD 0x7718
+#define A_MA_CLIENT3_PR_THRESHOLD 0x7718
#define A_MA_CLIENT3_WR_LATENCY_THRESHOLD 0x771c
+#define A_MA_CLIENT3_CR_THRESHOLD 0x771c
#define A_MA_CLIENT4_RD_LATENCY_THRESHOLD 0x7720
+#define A_MA_CLIENT4_PR_THRESHOLD 0x7720
#define A_MA_CLIENT4_WR_LATENCY_THRESHOLD 0x7724
+#define A_MA_CLIENT4_CR_THRESHOLD 0x7724
#define A_MA_CLIENT5_RD_LATENCY_THRESHOLD 0x7728
+#define A_MA_CLIENT5_PR_THRESHOLD 0x7728
#define A_MA_CLIENT5_WR_LATENCY_THRESHOLD 0x772c
+#define A_MA_CLIENT5_CR_THRESHOLD 0x772c
#define A_MA_CLIENT6_RD_LATENCY_THRESHOLD 0x7730
+#define A_MA_CLIENT6_PR_THRESHOLD 0x7730
#define A_MA_CLIENT6_WR_LATENCY_THRESHOLD 0x7734
+#define A_MA_CLIENT6_CR_THRESHOLD 0x7734
#define A_MA_CLIENT7_RD_LATENCY_THRESHOLD 0x7738
+#define A_MA_CLIENT7_PR_THRESHOLD 0x7738
#define A_MA_CLIENT7_WR_LATENCY_THRESHOLD 0x773c
+#define A_MA_CLIENT7_CR_THRESHOLD 0x773c
#define A_MA_CLIENT8_RD_LATENCY_THRESHOLD 0x7740
+#define A_MA_CLIENT8_PR_THRESHOLD 0x7740
#define A_MA_CLIENT8_WR_LATENCY_THRESHOLD 0x7744
+#define A_MA_CLIENT8_CR_THRESHOLD 0x7744
#define A_MA_CLIENT9_RD_LATENCY_THRESHOLD 0x7748
+#define A_MA_CLIENT9_PR_THRESHOLD 0x7748
#define A_MA_CLIENT9_WR_LATENCY_THRESHOLD 0x774c
+#define A_MA_CLIENT9_CR_THRESHOLD 0x774c
#define A_MA_CLIENT10_RD_LATENCY_THRESHOLD 0x7750
+#define A_MA_CLIENT10_PR_THRESHOLD 0x7750
#define A_MA_CLIENT10_WR_LATENCY_THRESHOLD 0x7754
+#define A_MA_CLIENT10_CR_THRESHOLD 0x7754
#define A_MA_CLIENT11_RD_LATENCY_THRESHOLD 0x7758
+#define A_MA_CLIENT11_PR_THRESHOLD 0x7758
#define A_MA_CLIENT11_WR_LATENCY_THRESHOLD 0x775c
+#define A_MA_CLIENT11_CR_THRESHOLD 0x775c
#define A_MA_CLIENT12_RD_LATENCY_THRESHOLD 0x7760
+#define A_MA_CLIENT12_PR_THRESHOLD 0x7760
#define A_MA_CLIENT12_WR_LATENCY_THRESHOLD 0x7764
+#define A_MA_CLIENT12_CR_THRESHOLD 0x7764
#define A_MA_SGE_TH0_DEBUG_CNT 0x7768
#define S_DBG_READ_DATA_CNT 24
@@ -16103,10 +20210,359 @@
#define A_MA_TP_TH1_DEBUG_CNT 0x7780
#define A_MA_LE_DEBUG_CNT 0x7784
#define A_MA_CIM_DEBUG_CNT 0x7788
+#define A_MA_CIM_TH0_DEBUG_CNT 0x7788
#define A_MA_PCIE_DEBUG_CNT 0x778c
#define A_MA_PMTX_DEBUG_CNT 0x7790
#define A_MA_PMRX_DEBUG_CNT 0x7794
#define A_MA_HMA_DEBUG_CNT 0x7798
+#define A_MA_COR_ERROR_ENABLE1 0x779c
+
+#define S_ARB4_COR_WRQUEUE_ERROR_EN 9
+#define V_ARB4_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB4_COR_WRQUEUE_ERROR_EN)
+#define F_ARB4_COR_WRQUEUE_ERROR_EN V_ARB4_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB3_COR_WRQUEUE_ERROR_EN 8
+#define V_ARB3_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB3_COR_WRQUEUE_ERROR_EN)
+#define F_ARB3_COR_WRQUEUE_ERROR_EN V_ARB3_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB2_COR_WRQUEUE_ERROR_EN 7
+#define V_ARB2_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB2_COR_WRQUEUE_ERROR_EN)
+#define F_ARB2_COR_WRQUEUE_ERROR_EN V_ARB2_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB1_COR_WRQUEUE_ERROR_EN 6
+#define V_ARB1_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB1_COR_WRQUEUE_ERROR_EN)
+#define F_ARB1_COR_WRQUEUE_ERROR_EN V_ARB1_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB0_COR_WRQUEUE_ERROR_EN 5
+#define V_ARB0_COR_WRQUEUE_ERROR_EN(x) ((x) << S_ARB0_COR_WRQUEUE_ERROR_EN)
+#define F_ARB0_COR_WRQUEUE_ERROR_EN V_ARB0_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_ARB4_COR_RDQUEUE_ERROR_EN 4
+#define V_ARB4_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB4_COR_RDQUEUE_ERROR_EN)
+#define F_ARB4_COR_RDQUEUE_ERROR_EN V_ARB4_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB3_COR_RDQUEUE_ERROR_EN 3
+#define V_ARB3_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB3_COR_RDQUEUE_ERROR_EN)
+#define F_ARB3_COR_RDQUEUE_ERROR_EN V_ARB3_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB2_COR_RDQUEUE_ERROR_EN 2
+#define V_ARB2_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB2_COR_RDQUEUE_ERROR_EN)
+#define F_ARB2_COR_RDQUEUE_ERROR_EN V_ARB2_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB1_COR_RDQUEUE_ERROR_EN 1
+#define V_ARB1_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB1_COR_RDQUEUE_ERROR_EN)
+#define F_ARB1_COR_RDQUEUE_ERROR_EN V_ARB1_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_ARB0_COR_RDQUEUE_ERROR_EN 0
+#define V_ARB0_COR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB0_COR_RDQUEUE_ERROR_EN)
+#define F_ARB0_COR_RDQUEUE_ERROR_EN V_ARB0_COR_RDQUEUE_ERROR_EN(1U)
+
+#define A_MA_COR_ERROR_STATUS1 0x77a0
+
+#define S_ARB4_COR_WRQUEUE_ERROR 9
+#define V_ARB4_COR_WRQUEUE_ERROR(x) ((x) << S_ARB4_COR_WRQUEUE_ERROR)
+#define F_ARB4_COR_WRQUEUE_ERROR V_ARB4_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB3_COR_WRQUEUE_ERROR 8
+#define V_ARB3_COR_WRQUEUE_ERROR(x) ((x) << S_ARB3_COR_WRQUEUE_ERROR)
+#define F_ARB3_COR_WRQUEUE_ERROR V_ARB3_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB2_COR_WRQUEUE_ERROR 7
+#define V_ARB2_COR_WRQUEUE_ERROR(x) ((x) << S_ARB2_COR_WRQUEUE_ERROR)
+#define F_ARB2_COR_WRQUEUE_ERROR V_ARB2_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB1_COR_WRQUEUE_ERROR 6
+#define V_ARB1_COR_WRQUEUE_ERROR(x) ((x) << S_ARB1_COR_WRQUEUE_ERROR)
+#define F_ARB1_COR_WRQUEUE_ERROR V_ARB1_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB0_COR_WRQUEUE_ERROR 5
+#define V_ARB0_COR_WRQUEUE_ERROR(x) ((x) << S_ARB0_COR_WRQUEUE_ERROR)
+#define F_ARB0_COR_WRQUEUE_ERROR V_ARB0_COR_WRQUEUE_ERROR(1U)
+
+#define S_ARB4_COR_RDQUEUE_ERROR 4
+#define V_ARB4_COR_RDQUEUE_ERROR(x) ((x) << S_ARB4_COR_RDQUEUE_ERROR)
+#define F_ARB4_COR_RDQUEUE_ERROR V_ARB4_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB3_COR_RDQUEUE_ERROR 3
+#define V_ARB3_COR_RDQUEUE_ERROR(x) ((x) << S_ARB3_COR_RDQUEUE_ERROR)
+#define F_ARB3_COR_RDQUEUE_ERROR V_ARB3_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB2_COR_RDQUEUE_ERROR 2
+#define V_ARB2_COR_RDQUEUE_ERROR(x) ((x) << S_ARB2_COR_RDQUEUE_ERROR)
+#define F_ARB2_COR_RDQUEUE_ERROR V_ARB2_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB1_COR_RDQUEUE_ERROR 1
+#define V_ARB1_COR_RDQUEUE_ERROR(x) ((x) << S_ARB1_COR_RDQUEUE_ERROR)
+#define F_ARB1_COR_RDQUEUE_ERROR V_ARB1_COR_RDQUEUE_ERROR(1U)
+
+#define S_ARB0_COR_RDQUEUE_ERROR 0
+#define V_ARB0_COR_RDQUEUE_ERROR(x) ((x) << S_ARB0_COR_RDQUEUE_ERROR)
+#define F_ARB0_COR_RDQUEUE_ERROR V_ARB0_COR_RDQUEUE_ERROR(1U)
+
+#define A_MA_DBG_CTL 0x77a4
+
+#define S_DATAH_SEL 20
+#define V_DATAH_SEL(x) ((x) << S_DATAH_SEL)
+#define F_DATAH_SEL V_DATAH_SEL(1U)
+
+#define S_EN_DBG 16
+#define V_EN_DBG(x) ((x) << S_EN_DBG)
+#define F_EN_DBG V_EN_DBG(1U)
+
+#define S_T7_SEL 0
+#define M_T7_SEL 0xffU
+#define V_T7_SEL(x) ((x) << S_T7_SEL)
+#define G_T7_SEL(x) (((x) >> S_T7_SEL) & M_T7_SEL)
+
+#define A_MA_DBG_DATA 0x77a8
+#define A_MA_COR_ERROR_ENABLE2 0x77b0
+
+#define S_CL14_COR_WRQUEUE_ERROR_EN 14
+#define V_CL14_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL14_COR_WRQUEUE_ERROR_EN)
+#define F_CL14_COR_WRQUEUE_ERROR_EN V_CL14_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL13_COR_WRQUEUE_ERROR_EN 13
+#define V_CL13_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL13_COR_WRQUEUE_ERROR_EN)
+#define F_CL13_COR_WRQUEUE_ERROR_EN V_CL13_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL12_COR_WRQUEUE_ERROR_EN 12
+#define V_CL12_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL12_COR_WRQUEUE_ERROR_EN)
+#define F_CL12_COR_WRQUEUE_ERROR_EN V_CL12_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL11_COR_WRQUEUE_ERROR_EN 11
+#define V_CL11_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL11_COR_WRQUEUE_ERROR_EN)
+#define F_CL11_COR_WRQUEUE_ERROR_EN V_CL11_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL10_COR_WRQUEUE_ERROR_EN 10
+#define V_CL10_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL10_COR_WRQUEUE_ERROR_EN)
+#define F_CL10_COR_WRQUEUE_ERROR_EN V_CL10_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL9_COR_WRQUEUE_ERROR_EN 9
+#define V_CL9_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL9_COR_WRQUEUE_ERROR_EN)
+#define F_CL9_COR_WRQUEUE_ERROR_EN V_CL9_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL8_COR_WRQUEUE_ERROR_EN 8
+#define V_CL8_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL8_COR_WRQUEUE_ERROR_EN)
+#define F_CL8_COR_WRQUEUE_ERROR_EN V_CL8_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL7_COR_WRQUEUE_ERROR_EN 7
+#define V_CL7_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL7_COR_WRQUEUE_ERROR_EN)
+#define F_CL7_COR_WRQUEUE_ERROR_EN V_CL7_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL6_COR_WRQUEUE_ERROR_EN 6
+#define V_CL6_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL6_COR_WRQUEUE_ERROR_EN)
+#define F_CL6_COR_WRQUEUE_ERROR_EN V_CL6_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL5_COR_WRQUEUE_ERROR_EN 5
+#define V_CL5_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL5_COR_WRQUEUE_ERROR_EN)
+#define F_CL5_COR_WRQUEUE_ERROR_EN V_CL5_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL4_COR_WRQUEUE_ERROR_EN 4
+#define V_CL4_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL4_COR_WRQUEUE_ERROR_EN)
+#define F_CL4_COR_WRQUEUE_ERROR_EN V_CL4_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL3_COR_WRQUEUE_ERROR_EN 3
+#define V_CL3_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL3_COR_WRQUEUE_ERROR_EN)
+#define F_CL3_COR_WRQUEUE_ERROR_EN V_CL3_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL2_COR_WRQUEUE_ERROR_EN 2
+#define V_CL2_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL2_COR_WRQUEUE_ERROR_EN)
+#define F_CL2_COR_WRQUEUE_ERROR_EN V_CL2_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL1_COR_WRQUEUE_ERROR_EN 1
+#define V_CL1_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL1_COR_WRQUEUE_ERROR_EN)
+#define F_CL1_COR_WRQUEUE_ERROR_EN V_CL1_COR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL0_COR_WRQUEUE_ERROR_EN 0
+#define V_CL0_COR_WRQUEUE_ERROR_EN(x) ((x) << S_CL0_COR_WRQUEUE_ERROR_EN)
+#define F_CL0_COR_WRQUEUE_ERROR_EN V_CL0_COR_WRQUEUE_ERROR_EN(1U)
+
+#define A_MA_COR_ERROR_STATUS2 0x77b4
+
+#define S_CL14_COR_WRQUEUE_ERROR 14
+#define V_CL14_COR_WRQUEUE_ERROR(x) ((x) << S_CL14_COR_WRQUEUE_ERROR)
+#define F_CL14_COR_WRQUEUE_ERROR V_CL14_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL13_COR_WRQUEUE_ERROR 13
+#define V_CL13_COR_WRQUEUE_ERROR(x) ((x) << S_CL13_COR_WRQUEUE_ERROR)
+#define F_CL13_COR_WRQUEUE_ERROR V_CL13_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL12_COR_WRQUEUE_ERROR 12
+#define V_CL12_COR_WRQUEUE_ERROR(x) ((x) << S_CL12_COR_WRQUEUE_ERROR)
+#define F_CL12_COR_WRQUEUE_ERROR V_CL12_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL11_COR_WRQUEUE_ERROR 11
+#define V_CL11_COR_WRQUEUE_ERROR(x) ((x) << S_CL11_COR_WRQUEUE_ERROR)
+#define F_CL11_COR_WRQUEUE_ERROR V_CL11_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL10_COR_WRQUEUE_ERROR 10
+#define V_CL10_COR_WRQUEUE_ERROR(x) ((x) << S_CL10_COR_WRQUEUE_ERROR)
+#define F_CL10_COR_WRQUEUE_ERROR V_CL10_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL9_COR_WRQUEUE_ERROR 9
+#define V_CL9_COR_WRQUEUE_ERROR(x) ((x) << S_CL9_COR_WRQUEUE_ERROR)
+#define F_CL9_COR_WRQUEUE_ERROR V_CL9_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL8_COR_WRQUEUE_ERROR 8
+#define V_CL8_COR_WRQUEUE_ERROR(x) ((x) << S_CL8_COR_WRQUEUE_ERROR)
+#define F_CL8_COR_WRQUEUE_ERROR V_CL8_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL7_COR_WRQUEUE_ERROR 7
+#define V_CL7_COR_WRQUEUE_ERROR(x) ((x) << S_CL7_COR_WRQUEUE_ERROR)
+#define F_CL7_COR_WRQUEUE_ERROR V_CL7_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL6_COR_WRQUEUE_ERROR 6
+#define V_CL6_COR_WRQUEUE_ERROR(x) ((x) << S_CL6_COR_WRQUEUE_ERROR)
+#define F_CL6_COR_WRQUEUE_ERROR V_CL6_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL5_COR_WRQUEUE_ERROR 5
+#define V_CL5_COR_WRQUEUE_ERROR(x) ((x) << S_CL5_COR_WRQUEUE_ERROR)
+#define F_CL5_COR_WRQUEUE_ERROR V_CL5_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL4_COR_WRQUEUE_ERROR 4
+#define V_CL4_COR_WRQUEUE_ERROR(x) ((x) << S_CL4_COR_WRQUEUE_ERROR)
+#define F_CL4_COR_WRQUEUE_ERROR V_CL4_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL3_COR_WRQUEUE_ERROR 3
+#define V_CL3_COR_WRQUEUE_ERROR(x) ((x) << S_CL3_COR_WRQUEUE_ERROR)
+#define F_CL3_COR_WRQUEUE_ERROR V_CL3_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL2_COR_WRQUEUE_ERROR 2
+#define V_CL2_COR_WRQUEUE_ERROR(x) ((x) << S_CL2_COR_WRQUEUE_ERROR)
+#define F_CL2_COR_WRQUEUE_ERROR V_CL2_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL1_COR_WRQUEUE_ERROR 1
+#define V_CL1_COR_WRQUEUE_ERROR(x) ((x) << S_CL1_COR_WRQUEUE_ERROR)
+#define F_CL1_COR_WRQUEUE_ERROR V_CL1_COR_WRQUEUE_ERROR(1U)
+
+#define S_CL0_COR_WRQUEUE_ERROR 0
+#define V_CL0_COR_WRQUEUE_ERROR(x) ((x) << S_CL0_COR_WRQUEUE_ERROR)
+#define F_CL0_COR_WRQUEUE_ERROR V_CL0_COR_WRQUEUE_ERROR(1U)
+
+#define A_MA_COR_ERROR_ENABLE3 0x77b8
+
+#define S_CL14_COR_RDQUEUE_ERROR_EN 14
+#define V_CL14_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL14_COR_RDQUEUE_ERROR_EN)
+#define F_CL14_COR_RDQUEUE_ERROR_EN V_CL14_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL13_COR_RDQUEUE_ERROR_EN 13
+#define V_CL13_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL13_COR_RDQUEUE_ERROR_EN)
+#define F_CL13_COR_RDQUEUE_ERROR_EN V_CL13_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL12_COR_RDQUEUE_ERROR_EN 12
+#define V_CL12_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL12_COR_RDQUEUE_ERROR_EN)
+#define F_CL12_COR_RDQUEUE_ERROR_EN V_CL12_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL11_COR_RDQUEUE_ERROR_EN 11
+#define V_CL11_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL11_COR_RDQUEUE_ERROR_EN)
+#define F_CL11_COR_RDQUEUE_ERROR_EN V_CL11_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL10_COR_RDQUEUE_ERROR_EN 10
+#define V_CL10_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL10_COR_RDQUEUE_ERROR_EN)
+#define F_CL10_COR_RDQUEUE_ERROR_EN V_CL10_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL9_COR_RDQUEUE_ERROR_EN 9
+#define V_CL9_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL9_COR_RDQUEUE_ERROR_EN)
+#define F_CL9_COR_RDQUEUE_ERROR_EN V_CL9_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL8_COR_RDQUEUE_ERROR_EN 8
+#define V_CL8_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL8_COR_RDQUEUE_ERROR_EN)
+#define F_CL8_COR_RDQUEUE_ERROR_EN V_CL8_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL7_COR_RDQUEUE_ERROR_EN 7
+#define V_CL7_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL7_COR_RDQUEUE_ERROR_EN)
+#define F_CL7_COR_RDQUEUE_ERROR_EN V_CL7_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL6_COR_RDQUEUE_ERROR_EN 6
+#define V_CL6_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL6_COR_RDQUEUE_ERROR_EN)
+#define F_CL6_COR_RDQUEUE_ERROR_EN V_CL6_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL5_COR_RDQUEUE_ERROR_EN 5
+#define V_CL5_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL5_COR_RDQUEUE_ERROR_EN)
+#define F_CL5_COR_RDQUEUE_ERROR_EN V_CL5_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL4_COR_RDQUEUE_ERROR_EN 4
+#define V_CL4_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL4_COR_RDQUEUE_ERROR_EN)
+#define F_CL4_COR_RDQUEUE_ERROR_EN V_CL4_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL3_COR_RDQUEUE_ERROR_EN 3
+#define V_CL3_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL3_COR_RDQUEUE_ERROR_EN)
+#define F_CL3_COR_RDQUEUE_ERROR_EN V_CL3_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL2_COR_RDQUEUE_ERROR_EN 2
+#define V_CL2_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL2_COR_RDQUEUE_ERROR_EN)
+#define F_CL2_COR_RDQUEUE_ERROR_EN V_CL2_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL1_COR_RDQUEUE_ERROR_EN 1
+#define V_CL1_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL1_COR_RDQUEUE_ERROR_EN)
+#define F_CL1_COR_RDQUEUE_ERROR_EN V_CL1_COR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL0_COR_RDQUEUE_ERROR_EN 0
+#define V_CL0_COR_RDQUEUE_ERROR_EN(x) ((x) << S_CL0_COR_RDQUEUE_ERROR_EN)
+#define F_CL0_COR_RDQUEUE_ERROR_EN V_CL0_COR_RDQUEUE_ERROR_EN(1U)
+
+#define A_MA_COR_ERROR_STATUS3 0x77bc
+
+#define S_CL14_COR_RDQUEUE_ERROR 14
+#define V_CL14_COR_RDQUEUE_ERROR(x) ((x) << S_CL14_COR_RDQUEUE_ERROR)
+#define F_CL14_COR_RDQUEUE_ERROR V_CL14_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL13_COR_RDQUEUE_ERROR 13
+#define V_CL13_COR_RDQUEUE_ERROR(x) ((x) << S_CL13_COR_RDQUEUE_ERROR)
+#define F_CL13_COR_RDQUEUE_ERROR V_CL13_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL12_COR_RDQUEUE_ERROR 12
+#define V_CL12_COR_RDQUEUE_ERROR(x) ((x) << S_CL12_COR_RDQUEUE_ERROR)
+#define F_CL12_COR_RDQUEUE_ERROR V_CL12_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL11_COR_RDQUEUE_ERROR 11
+#define V_CL11_COR_RDQUEUE_ERROR(x) ((x) << S_CL11_COR_RDQUEUE_ERROR)
+#define F_CL11_COR_RDQUEUE_ERROR V_CL11_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL10_COR_RDQUEUE_ERROR 10
+#define V_CL10_COR_RDQUEUE_ERROR(x) ((x) << S_CL10_COR_RDQUEUE_ERROR)
+#define F_CL10_COR_RDQUEUE_ERROR V_CL10_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL9_COR_RDQUEUE_ERROR 9
+#define V_CL9_COR_RDQUEUE_ERROR(x) ((x) << S_CL9_COR_RDQUEUE_ERROR)
+#define F_CL9_COR_RDQUEUE_ERROR V_CL9_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL8_COR_RDQUEUE_ERROR 8
+#define V_CL8_COR_RDQUEUE_ERROR(x) ((x) << S_CL8_COR_RDQUEUE_ERROR)
+#define F_CL8_COR_RDQUEUE_ERROR V_CL8_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL7_COR_RDQUEUE_ERROR 7
+#define V_CL7_COR_RDQUEUE_ERROR(x) ((x) << S_CL7_COR_RDQUEUE_ERROR)
+#define F_CL7_COR_RDQUEUE_ERROR V_CL7_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL6_COR_RDQUEUE_ERROR 6
+#define V_CL6_COR_RDQUEUE_ERROR(x) ((x) << S_CL6_COR_RDQUEUE_ERROR)
+#define F_CL6_COR_RDQUEUE_ERROR V_CL6_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL5_COR_RDQUEUE_ERROR 5
+#define V_CL5_COR_RDQUEUE_ERROR(x) ((x) << S_CL5_COR_RDQUEUE_ERROR)
+#define F_CL5_COR_RDQUEUE_ERROR V_CL5_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL4_COR_RDQUEUE_ERROR 4
+#define V_CL4_COR_RDQUEUE_ERROR(x) ((x) << S_CL4_COR_RDQUEUE_ERROR)
+#define F_CL4_COR_RDQUEUE_ERROR V_CL4_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL3_COR_RDQUEUE_ERROR 3
+#define V_CL3_COR_RDQUEUE_ERROR(x) ((x) << S_CL3_COR_RDQUEUE_ERROR)
+#define F_CL3_COR_RDQUEUE_ERROR V_CL3_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL2_COR_RDQUEUE_ERROR 2
+#define V_CL2_COR_RDQUEUE_ERROR(x) ((x) << S_CL2_COR_RDQUEUE_ERROR)
+#define F_CL2_COR_RDQUEUE_ERROR V_CL2_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL1_COR_RDQUEUE_ERROR 1
+#define V_CL1_COR_RDQUEUE_ERROR(x) ((x) << S_CL1_COR_RDQUEUE_ERROR)
+#define F_CL1_COR_RDQUEUE_ERROR V_CL1_COR_RDQUEUE_ERROR(1U)
+
+#define S_CL0_COR_RDQUEUE_ERROR 0
+#define V_CL0_COR_RDQUEUE_ERROR(x) ((x) << S_CL0_COR_RDQUEUE_ERROR)
+#define F_CL0_COR_RDQUEUE_ERROR V_CL0_COR_RDQUEUE_ERROR(1U)
+
#define A_MA_EDRAM0_BAR 0x77c0
#define S_EDRAM0_BASE 16
@@ -16119,6 +20575,16 @@
#define V_EDRAM0_SIZE(x) ((x) << S_EDRAM0_SIZE)
#define G_EDRAM0_SIZE(x) (((x) >> S_EDRAM0_SIZE) & M_EDRAM0_SIZE)
+#define S_T7_EDRAM0_BASE 16
+#define M_T7_EDRAM0_BASE 0xffffU
+#define V_T7_EDRAM0_BASE(x) ((x) << S_T7_EDRAM0_BASE)
+#define G_T7_EDRAM0_BASE(x) (((x) >> S_T7_EDRAM0_BASE) & M_T7_EDRAM0_BASE)
+
+#define S_T7_EDRAM0_SIZE 0
+#define M_T7_EDRAM0_SIZE 0xffffU
+#define V_T7_EDRAM0_SIZE(x) ((x) << S_T7_EDRAM0_SIZE)
+#define G_T7_EDRAM0_SIZE(x) (((x) >> S_T7_EDRAM0_SIZE) & M_T7_EDRAM0_SIZE)
+
#define A_MA_EDRAM1_BAR 0x77c4
#define S_EDRAM1_BASE 16
@@ -16131,6 +20597,16 @@
#define V_EDRAM1_SIZE(x) ((x) << S_EDRAM1_SIZE)
#define G_EDRAM1_SIZE(x) (((x) >> S_EDRAM1_SIZE) & M_EDRAM1_SIZE)
+#define S_T7_EDRAM1_BASE 16
+#define M_T7_EDRAM1_BASE 0xffffU
+#define V_T7_EDRAM1_BASE(x) ((x) << S_T7_EDRAM1_BASE)
+#define G_T7_EDRAM1_BASE(x) (((x) >> S_T7_EDRAM1_BASE) & M_T7_EDRAM1_BASE)
+
+#define S_T7_EDRAM1_SIZE 0
+#define M_T7_EDRAM1_SIZE 0xffffU
+#define V_T7_EDRAM1_SIZE(x) ((x) << S_T7_EDRAM1_SIZE)
+#define G_T7_EDRAM1_SIZE(x) (((x) >> S_T7_EDRAM1_SIZE) & M_T7_EDRAM1_SIZE)
+
#define A_MA_EXT_MEMORY_BAR 0x77c8
#define S_EXT_MEM_BASE 16
@@ -16155,6 +20631,16 @@
#define V_EXT_MEM0_SIZE(x) ((x) << S_EXT_MEM0_SIZE)
#define G_EXT_MEM0_SIZE(x) (((x) >> S_EXT_MEM0_SIZE) & M_EXT_MEM0_SIZE)
+#define S_T7_EXT_MEM0_BASE 16
+#define M_T7_EXT_MEM0_BASE 0xffffU
+#define V_T7_EXT_MEM0_BASE(x) ((x) << S_T7_EXT_MEM0_BASE)
+#define G_T7_EXT_MEM0_BASE(x) (((x) >> S_T7_EXT_MEM0_BASE) & M_T7_EXT_MEM0_BASE)
+
+#define S_T7_EXT_MEM0_SIZE 0
+#define M_T7_EXT_MEM0_SIZE 0xffffU
+#define V_T7_EXT_MEM0_SIZE(x) ((x) << S_T7_EXT_MEM0_SIZE)
+#define G_T7_EXT_MEM0_SIZE(x) (((x) >> S_T7_EXT_MEM0_SIZE) & M_T7_EXT_MEM0_SIZE)
+
#define A_MA_HOST_MEMORY_BAR 0x77cc
#define S_HMA_BASE 16
@@ -16167,6 +20653,16 @@
#define V_HMA_SIZE(x) ((x) << S_HMA_SIZE)
#define G_HMA_SIZE(x) (((x) >> S_HMA_SIZE) & M_HMA_SIZE)
+#define S_HMATARGETBASE 16
+#define M_HMATARGETBASE 0xffffU
+#define V_HMATARGETBASE(x) ((x) << S_HMATARGETBASE)
+#define G_HMATARGETBASE(x) (((x) >> S_HMATARGETBASE) & M_HMATARGETBASE)
+
+#define S_T7_HMA_SIZE 0
+#define M_T7_HMA_SIZE 0xffffU
+#define V_T7_HMA_SIZE(x) ((x) << S_T7_HMA_SIZE)
+#define G_T7_HMA_SIZE(x) (((x) >> S_T7_HMA_SIZE) & M_T7_HMA_SIZE)
+
#define A_MA_EXT_MEM_PAGE_SIZE 0x77d0
#define S_BRC_MODE 2
@@ -16290,6 +20786,14 @@
#define V_MC_SPLIT(x) ((x) << S_MC_SPLIT)
#define F_MC_SPLIT V_MC_SPLIT(1U)
+#define S_EDC512 8
+#define V_EDC512(x) ((x) << S_EDC512)
+#define F_EDC512 V_EDC512(1U)
+
+#define S_MC_SPLIT_BOUNDARY 7
+#define V_MC_SPLIT_BOUNDARY(x) ((x) << S_MC_SPLIT_BOUNDARY)
+#define F_MC_SPLIT_BOUNDARY V_MC_SPLIT_BOUNDARY(1U)
+
#define A_MA_INT_ENABLE 0x77dc
#define S_MEM_PERR_INT_ENABLE 1
@@ -16475,6 +20979,55 @@
#define F_CL0_PAR_RDQUEUE_ERROR_EN V_CL0_PAR_RDQUEUE_ERROR_EN(1U)
#define A_MA_PARITY_ERROR_ENABLE1 0x77f0
+
+#define S_T7_ARB4_PAR_WRQUEUE_ERROR_EN 11
+#define V_T7_ARB4_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB4_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB4_PAR_WRQUEUE_ERROR_EN V_T7_ARB4_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB3_PAR_WRQUEUE_ERROR_EN 10
+#define V_T7_ARB3_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB3_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB3_PAR_WRQUEUE_ERROR_EN V_T7_ARB3_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB2_PAR_WRQUEUE_ERROR_EN 9
+#define V_T7_ARB2_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB2_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB2_PAR_WRQUEUE_ERROR_EN V_T7_ARB2_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB1_PAR_WRQUEUE_ERROR_EN 8
+#define V_T7_ARB1_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB1_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB1_PAR_WRQUEUE_ERROR_EN V_T7_ARB1_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB0_PAR_WRQUEUE_ERROR_EN 7
+#define V_T7_ARB0_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_ARB0_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_ARB0_PAR_WRQUEUE_ERROR_EN V_T7_ARB0_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB4_PAR_RDQUEUE_ERROR_EN 6
+#define V_T7_ARB4_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB4_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB4_PAR_RDQUEUE_ERROR_EN V_T7_ARB4_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB3_PAR_RDQUEUE_ERROR_EN 5
+#define V_T7_ARB3_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB3_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB3_PAR_RDQUEUE_ERROR_EN V_T7_ARB3_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB2_PAR_RDQUEUE_ERROR_EN 4
+#define V_T7_ARB2_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB2_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB2_PAR_RDQUEUE_ERROR_EN V_T7_ARB2_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB1_PAR_RDQUEUE_ERROR_EN 3
+#define V_T7_ARB1_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB1_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB1_PAR_RDQUEUE_ERROR_EN V_T7_ARB1_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_ARB0_PAR_RDQUEUE_ERROR_EN 2
+#define V_T7_ARB0_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_T7_ARB0_PAR_RDQUEUE_ERROR_EN)
+#define F_T7_ARB0_PAR_RDQUEUE_ERROR_EN V_T7_ARB0_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_T7_TP_DMARBT_PAR_ERROR_EN 1
+#define V_T7_TP_DMARBT_PAR_ERROR_EN(x) ((x) << S_T7_TP_DMARBT_PAR_ERROR_EN)
+#define F_T7_TP_DMARBT_PAR_ERROR_EN V_T7_TP_DMARBT_PAR_ERROR_EN(1U)
+
+#define S_T7_LOGIC_FIFO_PAR_ERROR_EN 0
+#define V_T7_LOGIC_FIFO_PAR_ERROR_EN(x) ((x) << S_T7_LOGIC_FIFO_PAR_ERROR_EN)
+#define F_T7_LOGIC_FIFO_PAR_ERROR_EN V_T7_LOGIC_FIFO_PAR_ERROR_EN(1U)
+
#define A_MA_PARITY_ERROR_STATUS 0x77f4
#define S_TP_DMARBT_PAR_ERROR 31
@@ -16606,6 +21159,55 @@
#define F_CL0_PAR_RDQUEUE_ERROR V_CL0_PAR_RDQUEUE_ERROR(1U)
#define A_MA_PARITY_ERROR_STATUS1 0x77f4
+
+#define S_T7_ARB4_PAR_WRQUEUE_ERROR 11
+#define V_T7_ARB4_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB4_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB4_PAR_WRQUEUE_ERROR V_T7_ARB4_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB3_PAR_WRQUEUE_ERROR 10
+#define V_T7_ARB3_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB3_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB3_PAR_WRQUEUE_ERROR V_T7_ARB3_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB2_PAR_WRQUEUE_ERROR 9
+#define V_T7_ARB2_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB2_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB2_PAR_WRQUEUE_ERROR V_T7_ARB2_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB1_PAR_WRQUEUE_ERROR 8
+#define V_T7_ARB1_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB1_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB1_PAR_WRQUEUE_ERROR V_T7_ARB1_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB0_PAR_WRQUEUE_ERROR 7
+#define V_T7_ARB0_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_ARB0_PAR_WRQUEUE_ERROR)
+#define F_T7_ARB0_PAR_WRQUEUE_ERROR V_T7_ARB0_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_ARB4_PAR_RDQUEUE_ERROR 6
+#define V_T7_ARB4_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB4_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB4_PAR_RDQUEUE_ERROR V_T7_ARB4_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB3_PAR_RDQUEUE_ERROR 5
+#define V_T7_ARB3_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB3_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB3_PAR_RDQUEUE_ERROR V_T7_ARB3_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB2_PAR_RDQUEUE_ERROR 4
+#define V_T7_ARB2_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB2_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB2_PAR_RDQUEUE_ERROR V_T7_ARB2_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB1_PAR_RDQUEUE_ERROR 3
+#define V_T7_ARB1_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB1_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB1_PAR_RDQUEUE_ERROR V_T7_ARB1_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_ARB0_PAR_RDQUEUE_ERROR 2
+#define V_T7_ARB0_PAR_RDQUEUE_ERROR(x) ((x) << S_T7_ARB0_PAR_RDQUEUE_ERROR)
+#define F_T7_ARB0_PAR_RDQUEUE_ERROR V_T7_ARB0_PAR_RDQUEUE_ERROR(1U)
+
+#define S_T7_TP_DMARBT_PAR_ERROR 1
+#define V_T7_TP_DMARBT_PAR_ERROR(x) ((x) << S_T7_TP_DMARBT_PAR_ERROR)
+#define F_T7_TP_DMARBT_PAR_ERROR V_T7_TP_DMARBT_PAR_ERROR(1U)
+
+#define S_T7_LOGIC_FIFO_PAR_ERROR 0
+#define V_T7_LOGIC_FIFO_PAR_ERROR(x) ((x) << S_T7_LOGIC_FIFO_PAR_ERROR)
+#define F_T7_LOGIC_FIFO_PAR_ERROR V_T7_LOGIC_FIFO_PAR_ERROR(1U)
+
#define A_MA_SGE_PCIE_COHERANCY_CTRL 0x77f8
#define S_BONUS_REG 6
@@ -16653,6 +21255,66 @@
#define V_ARB4_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_ARB4_PAR_RDQUEUE_ERROR_EN)
#define F_ARB4_PAR_RDQUEUE_ERROR_EN V_ARB4_PAR_RDQUEUE_ERROR_EN(1U)
+#define S_CL14_PAR_WRQUEUE_ERROR_EN 14
+#define V_CL14_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL14_PAR_WRQUEUE_ERROR_EN)
+#define F_CL14_PAR_WRQUEUE_ERROR_EN V_CL14_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL13_PAR_WRQUEUE_ERROR_EN 13
+#define V_CL13_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL13_PAR_WRQUEUE_ERROR_EN)
+#define F_CL13_PAR_WRQUEUE_ERROR_EN V_CL13_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL12_PAR_WRQUEUE_ERROR_EN 12
+#define V_CL12_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL12_PAR_WRQUEUE_ERROR_EN)
+#define F_CL12_PAR_WRQUEUE_ERROR_EN V_CL12_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_CL11_PAR_WRQUEUE_ERROR_EN 11
+#define V_CL11_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_CL11_PAR_WRQUEUE_ERROR_EN)
+#define F_CL11_PAR_WRQUEUE_ERROR_EN V_CL11_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL10_PAR_WRQUEUE_ERROR_EN 10
+#define V_T7_CL10_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL10_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL10_PAR_WRQUEUE_ERROR_EN V_T7_CL10_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL9_PAR_WRQUEUE_ERROR_EN 9
+#define V_T7_CL9_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL9_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL9_PAR_WRQUEUE_ERROR_EN V_T7_CL9_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL8_PAR_WRQUEUE_ERROR_EN 8
+#define V_T7_CL8_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL8_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL8_PAR_WRQUEUE_ERROR_EN V_T7_CL8_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL7_PAR_WRQUEUE_ERROR_EN 7
+#define V_T7_CL7_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL7_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL7_PAR_WRQUEUE_ERROR_EN V_T7_CL7_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL6_PAR_WRQUEUE_ERROR_EN 6
+#define V_T7_CL6_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL6_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL6_PAR_WRQUEUE_ERROR_EN V_T7_CL6_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL5_PAR_WRQUEUE_ERROR_EN 5
+#define V_T7_CL5_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL5_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL5_PAR_WRQUEUE_ERROR_EN V_T7_CL5_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL4_PAR_WRQUEUE_ERROR_EN 4
+#define V_T7_CL4_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL4_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL4_PAR_WRQUEUE_ERROR_EN V_T7_CL4_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL3_PAR_WRQUEUE_ERROR_EN 3
+#define V_T7_CL3_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL3_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL3_PAR_WRQUEUE_ERROR_EN V_T7_CL3_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL2_PAR_WRQUEUE_ERROR_EN 2
+#define V_T7_CL2_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL2_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL2_PAR_WRQUEUE_ERROR_EN V_T7_CL2_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL1_PAR_WRQUEUE_ERROR_EN 1
+#define V_T7_CL1_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL1_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL1_PAR_WRQUEUE_ERROR_EN V_T7_CL1_PAR_WRQUEUE_ERROR_EN(1U)
+
+#define S_T7_CL0_PAR_WRQUEUE_ERROR_EN 0
+#define V_T7_CL0_PAR_WRQUEUE_ERROR_EN(x) ((x) << S_T7_CL0_PAR_WRQUEUE_ERROR_EN)
+#define F_T7_CL0_PAR_WRQUEUE_ERROR_EN V_T7_CL0_PAR_WRQUEUE_ERROR_EN(1U)
+
#define A_MA_PARITY_ERROR_STATUS2 0x7804
#define S_ARB4_PAR_WRQUEUE_ERROR 1
@@ -16663,6 +21325,66 @@
#define V_ARB4_PAR_RDQUEUE_ERROR(x) ((x) << S_ARB4_PAR_RDQUEUE_ERROR)
#define F_ARB4_PAR_RDQUEUE_ERROR V_ARB4_PAR_RDQUEUE_ERROR(1U)
+#define S_CL14_PAR_WRQUEUE_ERROR 14
+#define V_CL14_PAR_WRQUEUE_ERROR(x) ((x) << S_CL14_PAR_WRQUEUE_ERROR)
+#define F_CL14_PAR_WRQUEUE_ERROR V_CL14_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL13_PAR_WRQUEUE_ERROR 13
+#define V_CL13_PAR_WRQUEUE_ERROR(x) ((x) << S_CL13_PAR_WRQUEUE_ERROR)
+#define F_CL13_PAR_WRQUEUE_ERROR V_CL13_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL12_PAR_WRQUEUE_ERROR 12
+#define V_CL12_PAR_WRQUEUE_ERROR(x) ((x) << S_CL12_PAR_WRQUEUE_ERROR)
+#define F_CL12_PAR_WRQUEUE_ERROR V_CL12_PAR_WRQUEUE_ERROR(1U)
+
+#define S_CL11_PAR_WRQUEUE_ERROR 11
+#define V_CL11_PAR_WRQUEUE_ERROR(x) ((x) << S_CL11_PAR_WRQUEUE_ERROR)
+#define F_CL11_PAR_WRQUEUE_ERROR V_CL11_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL10_PAR_WRQUEUE_ERROR 10
+#define V_T7_CL10_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL10_PAR_WRQUEUE_ERROR)
+#define F_T7_CL10_PAR_WRQUEUE_ERROR V_T7_CL10_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL9_PAR_WRQUEUE_ERROR 9
+#define V_T7_CL9_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL9_PAR_WRQUEUE_ERROR)
+#define F_T7_CL9_PAR_WRQUEUE_ERROR V_T7_CL9_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL8_PAR_WRQUEUE_ERROR 8
+#define V_T7_CL8_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL8_PAR_WRQUEUE_ERROR)
+#define F_T7_CL8_PAR_WRQUEUE_ERROR V_T7_CL8_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL7_PAR_WRQUEUE_ERROR 7
+#define V_T7_CL7_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL7_PAR_WRQUEUE_ERROR)
+#define F_T7_CL7_PAR_WRQUEUE_ERROR V_T7_CL7_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL6_PAR_WRQUEUE_ERROR 6
+#define V_T7_CL6_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL6_PAR_WRQUEUE_ERROR)
+#define F_T7_CL6_PAR_WRQUEUE_ERROR V_T7_CL6_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL5_PAR_WRQUEUE_ERROR 5
+#define V_T7_CL5_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL5_PAR_WRQUEUE_ERROR)
+#define F_T7_CL5_PAR_WRQUEUE_ERROR V_T7_CL5_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL4_PAR_WRQUEUE_ERROR 4
+#define V_T7_CL4_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL4_PAR_WRQUEUE_ERROR)
+#define F_T7_CL4_PAR_WRQUEUE_ERROR V_T7_CL4_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL3_PAR_WRQUEUE_ERROR 3
+#define V_T7_CL3_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL3_PAR_WRQUEUE_ERROR)
+#define F_T7_CL3_PAR_WRQUEUE_ERROR V_T7_CL3_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL2_PAR_WRQUEUE_ERROR 2
+#define V_T7_CL2_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL2_PAR_WRQUEUE_ERROR)
+#define F_T7_CL2_PAR_WRQUEUE_ERROR V_T7_CL2_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL1_PAR_WRQUEUE_ERROR 1
+#define V_T7_CL1_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL1_PAR_WRQUEUE_ERROR)
+#define F_T7_CL1_PAR_WRQUEUE_ERROR V_T7_CL1_PAR_WRQUEUE_ERROR(1U)
+
+#define S_T7_CL0_PAR_WRQUEUE_ERROR 0
+#define V_T7_CL0_PAR_WRQUEUE_ERROR(x) ((x) << S_T7_CL0_PAR_WRQUEUE_ERROR)
+#define F_T7_CL0_PAR_WRQUEUE_ERROR V_T7_CL0_PAR_WRQUEUE_ERROR(1U)
+
#define A_MA_EXT_MEMORY1_BAR 0x7808
#define S_EXT_MEM1_BASE 16
@@ -16675,6 +21397,16 @@
#define V_EXT_MEM1_SIZE(x) ((x) << S_EXT_MEM1_SIZE)
#define G_EXT_MEM1_SIZE(x) (((x) >> S_EXT_MEM1_SIZE) & M_EXT_MEM1_SIZE)
+#define S_T7_EXT_MEM1_BASE 16
+#define M_T7_EXT_MEM1_BASE 0xffffU
+#define V_T7_EXT_MEM1_BASE(x) ((x) << S_T7_EXT_MEM1_BASE)
+#define G_T7_EXT_MEM1_BASE(x) (((x) >> S_T7_EXT_MEM1_BASE) & M_T7_EXT_MEM1_BASE)
+
+#define S_T7_EXT_MEM1_SIZE 0
+#define M_T7_EXT_MEM1_SIZE 0xffffU
+#define V_T7_EXT_MEM1_SIZE(x) ((x) << S_T7_EXT_MEM1_SIZE)
+#define G_T7_EXT_MEM1_SIZE(x) (((x) >> S_T7_EXT_MEM1_SIZE) & M_T7_EXT_MEM1_SIZE)
+
#define A_MA_PMTX_THROTTLE 0x780c
#define S_FL_ENABLE 31
@@ -16696,6 +21428,7 @@
#define A_MA_TP_TH1_WRDATA_CNT 0x782c
#define A_MA_LE_WRDATA_CNT 0x7830
#define A_MA_CIM_WRDATA_CNT 0x7834
+#define A_MA_CIM_TH0_WRDATA_CNT 0x7834
#define A_MA_PCIE_WRDATA_CNT 0x7838
#define A_MA_PMTX_WRDATA_CNT 0x783c
#define A_MA_PMRX_WRDATA_CNT 0x7840
@@ -16709,6 +21442,7 @@
#define A_MA_TP_TH1_RDDATA_CNT 0x7860
#define A_MA_LE_RDDATA_CNT 0x7864
#define A_MA_CIM_RDDATA_CNT 0x7868
+#define A_MA_CIM_TH0_RDDATA_CNT 0x7868
#define A_MA_PCIE_RDDATA_CNT 0x786c
#define A_MA_PMTX_RDDATA_CNT 0x7870
#define A_MA_PMRX_RDDATA_CNT 0x7874
@@ -16733,7 +21467,43 @@
#define F_DDR_MODE V_DDR_MODE(1U)
#define A_MA_EDRAM1_WRDATA_CNT1 0x7884
+#define A_MA_PARITY_ERROR_ENABLE3 0x7884
+
+#define S_CL14_PAR_RDQUEUE_ERROR_EN 14
+#define V_CL14_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL14_PAR_RDQUEUE_ERROR_EN)
+#define F_CL14_PAR_RDQUEUE_ERROR_EN V_CL14_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL13_PAR_RDQUEUE_ERROR_EN 13
+#define V_CL13_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL13_PAR_RDQUEUE_ERROR_EN)
+#define F_CL13_PAR_RDQUEUE_ERROR_EN V_CL13_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL12_PAR_RDQUEUE_ERROR_EN 12
+#define V_CL12_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL12_PAR_RDQUEUE_ERROR_EN)
+#define F_CL12_PAR_RDQUEUE_ERROR_EN V_CL12_PAR_RDQUEUE_ERROR_EN(1U)
+
+#define S_CL11_PAR_RDQUEUE_ERROR_EN 11
+#define V_CL11_PAR_RDQUEUE_ERROR_EN(x) ((x) << S_CL11_PAR_RDQUEUE_ERROR_EN)
+#define F_CL11_PAR_RDQUEUE_ERROR_EN V_CL11_PAR_RDQUEUE_ERROR_EN(1U)
+
#define A_MA_EDRAM1_WRDATA_CNT0 0x7888
+#define A_MA_PARITY_ERROR_STATUS3 0x7888
+
+#define S_CL14_PAR_RDQUEUE_ERROR 14
+#define V_CL14_PAR_RDQUEUE_ERROR(x) ((x) << S_CL14_PAR_RDQUEUE_ERROR)
+#define F_CL14_PAR_RDQUEUE_ERROR V_CL14_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL13_PAR_RDQUEUE_ERROR 13
+#define V_CL13_PAR_RDQUEUE_ERROR(x) ((x) << S_CL13_PAR_RDQUEUE_ERROR)
+#define F_CL13_PAR_RDQUEUE_ERROR V_CL13_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL12_PAR_RDQUEUE_ERROR 12
+#define V_CL12_PAR_RDQUEUE_ERROR(x) ((x) << S_CL12_PAR_RDQUEUE_ERROR)
+#define F_CL12_PAR_RDQUEUE_ERROR V_CL12_PAR_RDQUEUE_ERROR(1U)
+
+#define S_CL11_PAR_RDQUEUE_ERROR 11
+#define V_CL11_PAR_RDQUEUE_ERROR(x) ((x) << S_CL11_PAR_RDQUEUE_ERROR)
+#define F_CL11_PAR_RDQUEUE_ERROR V_CL11_PAR_RDQUEUE_ERROR(1U)
+
#define A_MA_EXT_MEMORY0_WRDATA_CNT1 0x788c
#define A_MA_EXT_MEMORY0_WRDATA_CNT0 0x7890
#define A_MA_HOST_MEMORY_WRDATA_CNT1 0x7894
@@ -16915,6 +21685,30 @@
#define V_FUTURE_DEXPANSION_WTE(x) ((x) << S_FUTURE_DEXPANSION_WTE)
#define G_FUTURE_DEXPANSION_WTE(x) (((x) >> S_FUTURE_DEXPANSION_WTE) & M_FUTURE_DEXPANSION_WTE)
+#define S_T7_FUTURE_CEXPANSION_WTE 31
+#define V_T7_FUTURE_CEXPANSION_WTE(x) ((x) << S_T7_FUTURE_CEXPANSION_WTE)
+#define F_T7_FUTURE_CEXPANSION_WTE V_T7_FUTURE_CEXPANSION_WTE(1U)
+
+#define S_CL14_WR_CMD_TO_EN 30
+#define V_CL14_WR_CMD_TO_EN(x) ((x) << S_CL14_WR_CMD_TO_EN)
+#define F_CL14_WR_CMD_TO_EN V_CL14_WR_CMD_TO_EN(1U)
+
+#define S_CL13_WR_CMD_TO_EN 29
+#define V_CL13_WR_CMD_TO_EN(x) ((x) << S_CL13_WR_CMD_TO_EN)
+#define F_CL13_WR_CMD_TO_EN V_CL13_WR_CMD_TO_EN(1U)
+
+#define S_T7_FUTURE_DEXPANSION_WTE 15
+#define V_T7_FUTURE_DEXPANSION_WTE(x) ((x) << S_T7_FUTURE_DEXPANSION_WTE)
+#define F_T7_FUTURE_DEXPANSION_WTE V_T7_FUTURE_DEXPANSION_WTE(1U)
+
+#define S_CL14_WR_DATA_TO_EN 14
+#define V_CL14_WR_DATA_TO_EN(x) ((x) << S_CL14_WR_DATA_TO_EN)
+#define F_CL14_WR_DATA_TO_EN V_CL14_WR_DATA_TO_EN(1U)
+
+#define S_CL13_WR_DATA_TO_EN 13
+#define V_CL13_WR_DATA_TO_EN(x) ((x) << S_CL13_WR_DATA_TO_EN)
+#define F_CL13_WR_DATA_TO_EN V_CL13_WR_DATA_TO_EN(1U)
+
#define A_MA_WRITE_TIMEOUT_ERROR_STATUS 0x78d8
#define S_CL12_WR_CMD_TO_ERROR 28
@@ -17031,6 +21825,30 @@
#define V_FUTURE_DEXPANSION_WTS(x) ((x) << S_FUTURE_DEXPANSION_WTS)
#define G_FUTURE_DEXPANSION_WTS(x) (((x) >> S_FUTURE_DEXPANSION_WTS) & M_FUTURE_DEXPANSION_WTS)
+#define S_T7_FUTURE_CEXPANSION_WTS 31
+#define V_T7_FUTURE_CEXPANSION_WTS(x) ((x) << S_T7_FUTURE_CEXPANSION_WTS)
+#define F_T7_FUTURE_CEXPANSION_WTS V_T7_FUTURE_CEXPANSION_WTS(1U)
+
+#define S_CL14_WR_CMD_TO_ERROR 30
+#define V_CL14_WR_CMD_TO_ERROR(x) ((x) << S_CL14_WR_CMD_TO_ERROR)
+#define F_CL14_WR_CMD_TO_ERROR V_CL14_WR_CMD_TO_ERROR(1U)
+
+#define S_CL13_WR_CMD_TO_ERROR 29
+#define V_CL13_WR_CMD_TO_ERROR(x) ((x) << S_CL13_WR_CMD_TO_ERROR)
+#define F_CL13_WR_CMD_TO_ERROR V_CL13_WR_CMD_TO_ERROR(1U)
+
+#define S_T7_FUTURE_DEXPANSION_WTS 15
+#define V_T7_FUTURE_DEXPANSION_WTS(x) ((x) << S_T7_FUTURE_DEXPANSION_WTS)
+#define F_T7_FUTURE_DEXPANSION_WTS V_T7_FUTURE_DEXPANSION_WTS(1U)
+
+#define S_CL14_WR_DATA_TO_ERROR 14
+#define V_CL14_WR_DATA_TO_ERROR(x) ((x) << S_CL14_WR_DATA_TO_ERROR)
+#define F_CL14_WR_DATA_TO_ERROR V_CL14_WR_DATA_TO_ERROR(1U)
+
+#define S_CL13_WR_DATA_TO_ERROR 13
+#define V_CL13_WR_DATA_TO_ERROR(x) ((x) << S_CL13_WR_DATA_TO_ERROR)
+#define F_CL13_WR_DATA_TO_ERROR V_CL13_WR_DATA_TO_ERROR(1U)
+
#define A_MA_READ_TIMEOUT_ERROR_ENABLE 0x78dc
#define S_CL12_RD_CMD_TO_EN 28
@@ -17147,6 +21965,30 @@
#define V_FUTURE_DEXPANSION_RTE(x) ((x) << S_FUTURE_DEXPANSION_RTE)
#define G_FUTURE_DEXPANSION_RTE(x) (((x) >> S_FUTURE_DEXPANSION_RTE) & M_FUTURE_DEXPANSION_RTE)
+#define S_T7_FUTURE_CEXPANSION_RTE 31
+#define V_T7_FUTURE_CEXPANSION_RTE(x) ((x) << S_T7_FUTURE_CEXPANSION_RTE)
+#define F_T7_FUTURE_CEXPANSION_RTE V_T7_FUTURE_CEXPANSION_RTE(1U)
+
+#define S_CL14_RD_CMD_TO_EN 30
+#define V_CL14_RD_CMD_TO_EN(x) ((x) << S_CL14_RD_CMD_TO_EN)
+#define F_CL14_RD_CMD_TO_EN V_CL14_RD_CMD_TO_EN(1U)
+
+#define S_CL13_RD_CMD_TO_EN 29
+#define V_CL13_RD_CMD_TO_EN(x) ((x) << S_CL13_RD_CMD_TO_EN)
+#define F_CL13_RD_CMD_TO_EN V_CL13_RD_CMD_TO_EN(1U)
+
+#define S_T7_FUTURE_DEXPANSION_RTE 15
+#define V_T7_FUTURE_DEXPANSION_RTE(x) ((x) << S_T7_FUTURE_DEXPANSION_RTE)
+#define F_T7_FUTURE_DEXPANSION_RTE V_T7_FUTURE_DEXPANSION_RTE(1U)
+
+#define S_CL14_RD_DATA_TO_EN 14
+#define V_CL14_RD_DATA_TO_EN(x) ((x) << S_CL14_RD_DATA_TO_EN)
+#define F_CL14_RD_DATA_TO_EN V_CL14_RD_DATA_TO_EN(1U)
+
+#define S_CL13_RD_DATA_TO_EN 13
+#define V_CL13_RD_DATA_TO_EN(x) ((x) << S_CL13_RD_DATA_TO_EN)
+#define F_CL13_RD_DATA_TO_EN V_CL13_RD_DATA_TO_EN(1U)
+
#define A_MA_READ_TIMEOUT_ERROR_STATUS 0x78e0
#define S_CL12_RD_CMD_TO_ERROR 28
@@ -17263,6 +22105,27 @@
#define V_FUTURE_DEXPANSION_RTS(x) ((x) << S_FUTURE_DEXPANSION_RTS)
#define G_FUTURE_DEXPANSION_RTS(x) (((x) >> S_FUTURE_DEXPANSION_RTS) & M_FUTURE_DEXPANSION_RTS)
+#define S_T7_FUTURE_CEXPANSION_RTS 31
+#define V_T7_FUTURE_CEXPANSION_RTS(x) ((x) << S_T7_FUTURE_CEXPANSION_RTS)
+#define F_T7_FUTURE_CEXPANSION_RTS V_T7_FUTURE_CEXPANSION_RTS(1U)
+
+#define S_CL14_RD_CMD_TO_ERROR 30
+#define V_CL14_RD_CMD_TO_ERROR(x) ((x) << S_CL14_RD_CMD_TO_ERROR)
+#define F_CL14_RD_CMD_TO_ERROR V_CL14_RD_CMD_TO_ERROR(1U)
+
+#define S_CL13_RD_CMD_TO_ERROR 29
+#define V_CL13_RD_CMD_TO_ERROR(x) ((x) << S_CL13_RD_CMD_TO_ERROR)
+#define F_CL13_RD_CMD_TO_ERROR V_CL13_RD_CMD_TO_ERROR(1U)
+
+#define S_T7_FUTURE_DEXPANSION_RTS 14
+#define M_T7_FUTURE_DEXPANSION_RTS 0x3U
+#define V_T7_FUTURE_DEXPANSION_RTS(x) ((x) << S_T7_FUTURE_DEXPANSION_RTS)
+#define G_T7_FUTURE_DEXPANSION_RTS(x) (((x) >> S_T7_FUTURE_DEXPANSION_RTS) & M_T7_FUTURE_DEXPANSION_RTS)
+
+#define S_CL13_RD_DATA_TO_ERROR 13
+#define V_CL13_RD_DATA_TO_ERROR(x) ((x) << S_CL13_RD_DATA_TO_ERROR)
+#define F_CL13_RD_DATA_TO_ERROR V_CL13_RD_DATA_TO_ERROR(1U)
+
#define A_MA_BKP_CNT_SEL 0x78e4
#define S_BKP_CNT_TYPE 30
@@ -17361,12 +22224,16 @@
#define V_FUTURE_DEXPANSION_IPE(x) ((x) << S_FUTURE_DEXPANSION_IPE)
#define G_FUTURE_DEXPANSION_IPE(x) (((x) >> S_FUTURE_DEXPANSION_IPE) & M_FUTURE_DEXPANSION_IPE)
-#define A_MA_IF_PARITY_ERROR_STATUS 0x78f4
+#define S_T7_FUTURE_DEXPANSION_IPE 14
+#define M_T7_FUTURE_DEXPANSION_IPE 0x3ffffU
+#define V_T7_FUTURE_DEXPANSION_IPE(x) ((x) << S_T7_FUTURE_DEXPANSION_IPE)
+#define G_T7_FUTURE_DEXPANSION_IPE(x) (((x) >> S_T7_FUTURE_DEXPANSION_IPE) & M_T7_FUTURE_DEXPANSION_IPE)
-#define S_T5_FUTURE_DEXPANSION 13
-#define M_T5_FUTURE_DEXPANSION 0x7ffffU
-#define V_T5_FUTURE_DEXPANSION(x) ((x) << S_T5_FUTURE_DEXPANSION)
-#define G_T5_FUTURE_DEXPANSION(x) (((x) >> S_T5_FUTURE_DEXPANSION) & M_T5_FUTURE_DEXPANSION)
+#define S_CL13_IF_PAR_EN 13
+#define V_CL13_IF_PAR_EN(x) ((x) << S_CL13_IF_PAR_EN)
+#define F_CL13_IF_PAR_EN V_CL13_IF_PAR_EN(1U)
+
+#define A_MA_IF_PARITY_ERROR_STATUS 0x78f4
#define S_CL12_IF_PAR_ERROR 12
#define V_CL12_IF_PAR_ERROR(x) ((x) << S_CL12_IF_PAR_ERROR)
@@ -17425,6 +22292,15 @@
#define V_FUTURE_DEXPANSION_IPS(x) ((x) << S_FUTURE_DEXPANSION_IPS)
#define G_FUTURE_DEXPANSION_IPS(x) (((x) >> S_FUTURE_DEXPANSION_IPS) & M_FUTURE_DEXPANSION_IPS)
+#define S_T7_FUTURE_DEXPANSION_IPS 14
+#define M_T7_FUTURE_DEXPANSION_IPS 0x3ffffU
+#define V_T7_FUTURE_DEXPANSION_IPS(x) ((x) << S_T7_FUTURE_DEXPANSION_IPS)
+#define G_T7_FUTURE_DEXPANSION_IPS(x) (((x) >> S_T7_FUTURE_DEXPANSION_IPS) & M_T7_FUTURE_DEXPANSION_IPS)
+
+#define S_CL13_IF_PAR_ERROR 13
+#define V_CL13_IF_PAR_ERROR(x) ((x) << S_CL13_IF_PAR_ERROR)
+#define F_CL13_IF_PAR_ERROR V_CL13_IF_PAR_ERROR(1U)
+
#define A_MA_LOCAL_DEBUG_CFG 0x78f8
#define S_DEBUG_OR 15
@@ -17445,6 +22321,131 @@
#define G_DEBUGPAGE(x) (((x) >> S_DEBUGPAGE) & M_DEBUGPAGE)
#define A_MA_LOCAL_DEBUG_RPT 0x78fc
+#define A_MA_CLIENT13_PR_THRESHOLD 0x7900
+#define A_MA_CLIENT13_CR_THRESHOLD 0x7904
+#define A_MA_CRYPTO_DEBUG_CNT 0x7908
+#define A_MA_CRYPTO_WRDATA_CNT 0x790c
+#define A_MA_CRYPTO_RDDATA_CNT 0x7910
+#define A_MA_LOCAL_DEBUG_PERF_CFG 0x7914
+#define A_MA_LOCAL_DEBUG_PERF_RPT 0x7918
+#define A_MA_PCIE_THROTTLE 0x791c
+#define A_MA_CLIENT14_PR_THRESHOLD 0x7920
+#define A_MA_CLIENT14_CR_THRESHOLD 0x7924
+#define A_MA_CIM_TH1_DEBUG_CNT 0x7928
+#define A_MA_CIM_TH1_WRDATA_CNT 0x792c
+#define A_MA_CIM_TH1_RDDATA_CNT 0x7930
+#define A_MA_CIM_THREAD1_MAPPER 0x7934
+
+#define S_CIM_THREAD1_EN 0
+#define M_CIM_THREAD1_EN 0xffU
+#define V_CIM_THREAD1_EN(x) ((x) << S_CIM_THREAD1_EN)
+#define G_CIM_THREAD1_EN(x) (((x) >> S_CIM_THREAD1_EN) & M_CIM_THREAD1_EN)
+
+#define A_MA_PIO_CI_SGE_TH0_BASE 0x7938
+
+#define S_SGE_TH0_BASE 0
+#define M_SGE_TH0_BASE 0xffffU
+#define V_SGE_TH0_BASE(x) ((x) << S_SGE_TH0_BASE)
+#define G_SGE_TH0_BASE(x) (((x) >> S_SGE_TH0_BASE) & M_SGE_TH0_BASE)
+
+#define A_MA_PIO_CI_SGE_TH1_BASE 0x793c
+
+#define S_SGE_TH1_BASE 0
+#define M_SGE_TH1_BASE 0xffffU
+#define V_SGE_TH1_BASE(x) ((x) << S_SGE_TH1_BASE)
+#define G_SGE_TH1_BASE(x) (((x) >> S_SGE_TH1_BASE) & M_SGE_TH1_BASE)
+
+#define A_MA_PIO_CI_ULPTX_BASE 0x7940
+
+#define S_ULPTX_BASE 0
+#define M_ULPTX_BASE 0xffffU
+#define V_ULPTX_BASE(x) ((x) << S_ULPTX_BASE)
+#define G_ULPTX_BASE(x) (((x) >> S_ULPTX_BASE) & M_ULPTX_BASE)
+
+#define A_MA_PIO_CI_ULPRX_BASE 0x7944
+
+#define S_ULPRX_BASE 0
+#define M_ULPRX_BASE 0xffffU
+#define V_ULPRX_BASE(x) ((x) << S_ULPRX_BASE)
+#define G_ULPRX_BASE(x) (((x) >> S_ULPRX_BASE) & M_ULPRX_BASE)
+
+#define A_MA_PIO_CI_ULPTXRX_BASE 0x7948
+
+#define S_ULPTXRX_BASE 0
+#define M_ULPTXRX_BASE 0xffffU
+#define V_ULPTXRX_BASE(x) ((x) << S_ULPTXRX_BASE)
+#define G_ULPTXRX_BASE(x) (((x) >> S_ULPTXRX_BASE) & M_ULPTXRX_BASE)
+
+#define A_MA_PIO_CI_TP_TH0_BASE 0x794c
+
+#define S_TP_TH0_BASE 0
+#define M_TP_TH0_BASE 0xffffU
+#define V_TP_TH0_BASE(x) ((x) << S_TP_TH0_BASE)
+#define G_TP_TH0_BASE(x) (((x) >> S_TP_TH0_BASE) & M_TP_TH0_BASE)
+
+#define A_MA_PIO_CI_TP_TH1_BASE 0x7950
+
+#define S_TP_TH1_BASE 0
+#define M_TP_TH1_BASE 0xffffU
+#define V_TP_TH1_BASE(x) ((x) << S_TP_TH1_BASE)
+#define G_TP_TH1_BASE(x) (((x) >> S_TP_TH1_BASE) & M_TP_TH1_BASE)
+
+#define A_MA_PIO_CI_LE_BASE 0x7954
+
+#define S_LE_BASE 0
+#define M_LE_BASE 0xffffU
+#define V_LE_BASE(x) ((x) << S_LE_BASE)
+#define G_LE_BASE(x) (((x) >> S_LE_BASE) & M_LE_BASE)
+
+#define A_MA_PIO_CI_CIM_TH0_BASE 0x7958
+
+#define S_CIM_TH0_BASE 0
+#define M_CIM_TH0_BASE 0xffffU
+#define V_CIM_TH0_BASE(x) ((x) << S_CIM_TH0_BASE)
+#define G_CIM_TH0_BASE(x) (((x) >> S_CIM_TH0_BASE) & M_CIM_TH0_BASE)
+
+#define A_MA_PIO_CI_PCIE_BASE 0x795c
+
+#define S_PCIE_BASE 0
+#define M_PCIE_BASE 0xffffU
+#define V_PCIE_BASE(x) ((x) << S_PCIE_BASE)
+#define G_PCIE_BASE(x) (((x) >> S_PCIE_BASE) & M_PCIE_BASE)
+
+#define A_MA_PIO_CI_PMTX_BASE 0x7960
+
+#define S_PMTX_BASE 0
+#define M_PMTX_BASE 0xffffU
+#define V_PMTX_BASE(x) ((x) << S_PMTX_BASE)
+#define G_PMTX_BASE(x) (((x) >> S_PMTX_BASE) & M_PMTX_BASE)
+
+#define A_MA_PIO_CI_PMRX_BASE 0x7964
+
+#define S_PMRX_BASE 0
+#define M_PMRX_BASE 0xffffU
+#define V_PMRX_BASE(x) ((x) << S_PMRX_BASE)
+#define G_PMRX_BASE(x) (((x) >> S_PMRX_BASE) & M_PMRX_BASE)
+
+#define A_MA_PIO_CI_HMA_BASE 0x7968
+
+#define S_HMACLIENTBASE 0
+#define M_HMACLIENTBASE 0xffffU
+#define V_HMACLIENTBASE(x) ((x) << S_HMACLIENTBASE)
+#define G_HMACLIENTBASE(x) (((x) >> S_HMACLIENTBASE) & M_HMACLIENTBASE)
+
+#define A_MA_PIO_CI_CRYPTO_BASE 0x796c
+
+#define S_CRYPTO_BASE 0
+#define M_CRYPTO_BASE 0xffffU
+#define V_CRYPTO_BASE(x) ((x) << S_CRYPTO_BASE)
+#define G_CRYPTO_BASE(x) (((x) >> S_CRYPTO_BASE) & M_CRYPTO_BASE)
+
+#define A_MA_PIO_CI_CIM_TH1_BASE 0x7970
+
+#define S_CIM_TH1_BASE 0
+#define M_CIM_TH1_BASE 0xffffU
+#define V_CIM_TH1_BASE(x) ((x) << S_CIM_TH1_BASE)
+#define G_CIM_TH1_BASE(x) (((x) >> S_CIM_TH1_BASE) & M_CIM_TH1_BASE)
+
#define A_MA_SGE_THREAD_0_CLIENT_INTERFACE_EXTERNAL 0xa000
#define S_CMDVLD0 31
@@ -20418,6 +25419,124 @@
#define V_FLASHADDRSIZE(x) ((x) << S_FLASHADDRSIZE)
#define G_FLASHADDRSIZE(x) (((x) >> S_FLASHADDRSIZE) & M_FLASHADDRSIZE)
+#define A_T7_CIM_PERR_ENABLE 0x7b08
+
+#define S_T7_MA_CIM_INTFPERR 31
+#define V_T7_MA_CIM_INTFPERR(x) ((x) << S_T7_MA_CIM_INTFPERR)
+#define F_T7_MA_CIM_INTFPERR V_T7_MA_CIM_INTFPERR(1U)
+
+#define S_T7_MBHOSTPARERR 30
+#define V_T7_MBHOSTPARERR(x) ((x) << S_T7_MBHOSTPARERR)
+#define F_T7_MBHOSTPARERR V_T7_MBHOSTPARERR(1U)
+
+#define S_MAARBINVRSPTAG 29
+#define V_MAARBINVRSPTAG(x) ((x) << S_MAARBINVRSPTAG)
+#define F_MAARBINVRSPTAG V_MAARBINVRSPTAG(1U)
+
+#define S_MAARBFIFOPARERR 28
+#define V_MAARBFIFOPARERR(x) ((x) << S_MAARBFIFOPARERR)
+#define F_MAARBFIFOPARERR V_MAARBFIFOPARERR(1U)
+
+#define S_SEMSRAMPARERR 27
+#define V_SEMSRAMPARERR(x) ((x) << S_SEMSRAMPARERR)
+#define F_SEMSRAMPARERR V_SEMSRAMPARERR(1U)
+
+#define S_RSACPARERR 26
+#define V_RSACPARERR(x) ((x) << S_RSACPARERR)
+#define F_RSACPARERR V_RSACPARERR(1U)
+
+#define S_RSADPARERR 25
+#define V_RSADPARERR(x) ((x) << S_RSADPARERR)
+#define F_RSADPARERR V_RSADPARERR(1U)
+
+#define S_T7_PLCIM_MSTRSPDATAPARERR 24
+#define V_T7_PLCIM_MSTRSPDATAPARERR(x) ((x) << S_T7_PLCIM_MSTRSPDATAPARERR)
+#define F_T7_PLCIM_MSTRSPDATAPARERR V_T7_PLCIM_MSTRSPDATAPARERR(1U)
+
+#define S_T7_PCIE2CIMINTFPARERR 23
+#define V_T7_PCIE2CIMINTFPARERR(x) ((x) << S_T7_PCIE2CIMINTFPARERR)
+#define F_T7_PCIE2CIMINTFPARERR V_T7_PCIE2CIMINTFPARERR(1U)
+
+#define S_T7_NCSI2CIMINTFPARERR 22
+#define V_T7_NCSI2CIMINTFPARERR(x) ((x) << S_T7_NCSI2CIMINTFPARERR)
+#define F_T7_NCSI2CIMINTFPARERR V_T7_NCSI2CIMINTFPARERR(1U)
+
+#define S_T7_SGE2CIMINTFPARERR 21
+#define V_T7_SGE2CIMINTFPARERR(x) ((x) << S_T7_SGE2CIMINTFPARERR)
+#define F_T7_SGE2CIMINTFPARERR V_T7_SGE2CIMINTFPARERR(1U)
+
+#define S_T7_ULP2CIMINTFPARERR 20
+#define V_T7_ULP2CIMINTFPARERR(x) ((x) << S_T7_ULP2CIMINTFPARERR)
+#define F_T7_ULP2CIMINTFPARERR V_T7_ULP2CIMINTFPARERR(1U)
+
+#define S_T7_TP2CIMINTFPARERR 19
+#define V_T7_TP2CIMINTFPARERR(x) ((x) << S_T7_TP2CIMINTFPARERR)
+#define F_T7_TP2CIMINTFPARERR V_T7_TP2CIMINTFPARERR(1U)
+
+#define S_CORE7PARERR 18
+#define V_CORE7PARERR(x) ((x) << S_CORE7PARERR)
+#define F_CORE7PARERR V_CORE7PARERR(1U)
+
+#define S_CORE6PARERR 17
+#define V_CORE6PARERR(x) ((x) << S_CORE6PARERR)
+#define F_CORE6PARERR V_CORE6PARERR(1U)
+
+#define S_CORE5PARERR 16
+#define V_CORE5PARERR(x) ((x) << S_CORE5PARERR)
+#define F_CORE5PARERR V_CORE5PARERR(1U)
+
+#define S_CORE4PARERR 15
+#define V_CORE4PARERR(x) ((x) << S_CORE4PARERR)
+#define F_CORE4PARERR V_CORE4PARERR(1U)
+
+#define S_CORE3PARERR 14
+#define V_CORE3PARERR(x) ((x) << S_CORE3PARERR)
+#define F_CORE3PARERR V_CORE3PARERR(1U)
+
+#define S_CORE2PARERR 13
+#define V_CORE2PARERR(x) ((x) << S_CORE2PARERR)
+#define F_CORE2PARERR V_CORE2PARERR(1U)
+
+#define S_CORE1PARERR 12
+#define V_CORE1PARERR(x) ((x) << S_CORE1PARERR)
+#define F_CORE1PARERR V_CORE1PARERR(1U)
+
+#define S_GFTPARERR 10
+#define V_GFTPARERR(x) ((x) << S_GFTPARERR)
+#define F_GFTPARERR V_GFTPARERR(1U)
+
+#define S_MPSRSPDATAPARERR 9
+#define V_MPSRSPDATAPARERR(x) ((x) << S_MPSRSPDATAPARERR)
+#define F_MPSRSPDATAPARERR V_MPSRSPDATAPARERR(1U)
+
+#define S_ER_RSPDATAPARERR 8
+#define V_ER_RSPDATAPARERR(x) ((x) << S_ER_RSPDATAPARERR)
+#define F_ER_RSPDATAPARERR V_ER_RSPDATAPARERR(1U)
+
+#define S_FLOWFIFOPARERR 7
+#define V_FLOWFIFOPARERR(x) ((x) << S_FLOWFIFOPARERR)
+#define F_FLOWFIFOPARERR V_FLOWFIFOPARERR(1U)
+
+#define S_OBQSRAMPARERR 6
+#define V_OBQSRAMPARERR(x) ((x) << S_OBQSRAMPARERR)
+#define F_OBQSRAMPARERR V_OBQSRAMPARERR(1U)
+
+#define S_TIEQOUTPARERR 3
+#define V_TIEQOUTPARERR(x) ((x) << S_TIEQOUTPARERR)
+#define F_TIEQOUTPARERR V_TIEQOUTPARERR(1U)
+
+#define S_TIEQINPARERR 2
+#define V_TIEQINPARERR(x) ((x) << S_TIEQINPARERR)
+#define F_TIEQINPARERR V_TIEQINPARERR(1U)
+
+#define S_PIFRSPPARERR 1
+#define V_PIFRSPPARERR(x) ((x) << S_PIFRSPPARERR)
+#define F_PIFRSPPARERR V_PIFRSPPARERR(1U)
+
+#define S_PIFREQPARERR 0
+#define V_PIFREQPARERR(x) ((x) << S_PIFREQPARERR)
+#define F_PIFREQPARERR V_PIFREQPARERR(1U)
+
#define A_CIM_EEPROM_BASE_ADDR 0x7b0c
#define S_EEPROMBASEADDR 6
@@ -20425,6 +25544,7 @@
#define V_EEPROMBASEADDR(x) ((x) << S_EEPROMBASEADDR)
#define G_EEPROMBASEADDR(x) (((x) >> S_EEPROMBASEADDR) & M_EEPROMBASEADDR)
+#define A_CIM_PERR_CAUSE 0x7b0c
#define A_CIM_EEPROM_ADDR_SIZE 0x7b10
#define S_EEPROMADDRSIZE 4
@@ -20593,6 +25713,38 @@
#define V_IBQPCIEPARERR(x) ((x) << S_IBQPCIEPARERR)
#define F_IBQPCIEPARERR V_IBQPCIEPARERR(1U)
+#define S_CORE7ACCINT 22
+#define V_CORE7ACCINT(x) ((x) << S_CORE7ACCINT)
+#define F_CORE7ACCINT V_CORE7ACCINT(1U)
+
+#define S_CORE6ACCINT 21
+#define V_CORE6ACCINT(x) ((x) << S_CORE6ACCINT)
+#define F_CORE6ACCINT V_CORE6ACCINT(1U)
+
+#define S_CORE5ACCINT 20
+#define V_CORE5ACCINT(x) ((x) << S_CORE5ACCINT)
+#define F_CORE5ACCINT V_CORE5ACCINT(1U)
+
+#define S_CORE4ACCINT 19
+#define V_CORE4ACCINT(x) ((x) << S_CORE4ACCINT)
+#define F_CORE4ACCINT V_CORE4ACCINT(1U)
+
+#define S_CORE3ACCINT 18
+#define V_CORE3ACCINT(x) ((x) << S_CORE3ACCINT)
+#define F_CORE3ACCINT V_CORE3ACCINT(1U)
+
+#define S_CORE2ACCINT 17
+#define V_CORE2ACCINT(x) ((x) << S_CORE2ACCINT)
+#define F_CORE2ACCINT V_CORE2ACCINT(1U)
+
+#define S_CORE1ACCINT 16
+#define V_CORE1ACCINT(x) ((x) << S_CORE1ACCINT)
+#define F_CORE1ACCINT V_CORE1ACCINT(1U)
+
+#define S_PERRNONZERO 1
+#define V_PERRNONZERO(x) ((x) << S_PERRNONZERO)
+#define F_PERRNONZERO V_PERRNONZERO(1U)
+
#define A_CIM_HOST_INT_CAUSE 0x7b2c
#define S_TIEQOUTPARERRINT 20
@@ -20745,6 +25897,10 @@
#define V_RSVDSPACEINTEN(x) ((x) << S_RSVDSPACEINTEN)
#define F_RSVDSPACEINTEN V_RSVDSPACEINTEN(1U)
+#define S_CONWRERRINTEN 31
+#define V_CONWRERRINTEN(x) ((x) << S_CONWRERRINTEN)
+#define F_CONWRERRINTEN V_CONWRERRINTEN(1U)
+
#define A_CIM_HOST_UPACC_INT_CAUSE 0x7b34
#define S_EEPROMWRINT 30
@@ -20871,12 +26027,32 @@
#define V_RSVDSPACEINT(x) ((x) << S_RSVDSPACEINT)
#define F_RSVDSPACEINT V_RSVDSPACEINT(1U)
+#define S_CONWRERRINT 31
+#define V_CONWRERRINT(x) ((x) << S_CONWRERRINT)
+#define F_CONWRERRINT V_CONWRERRINT(1U)
+
#define A_CIM_UP_INT_ENABLE 0x7b38
#define S_MSTPLINTEN 4
#define V_MSTPLINTEN(x) ((x) << S_MSTPLINTEN)
#define F_MSTPLINTEN V_MSTPLINTEN(1U)
+#define S_SEMINT 8
+#define V_SEMINT(x) ((x) << S_SEMINT)
+#define F_SEMINT V_SEMINT(1U)
+
+#define S_RSAINT 7
+#define V_RSAINT(x) ((x) << S_RSAINT)
+#define F_RSAINT V_RSAINT(1U)
+
+#define S_TRNGINT 6
+#define V_TRNGINT(x) ((x) << S_TRNGINT)
+#define F_TRNGINT V_TRNGINT(1U)
+
+#define S_PEERHALTINT 5
+#define V_PEERHALTINT(x) ((x) << S_PEERHALTINT)
+#define F_PEERHALTINT V_PEERHALTINT(1U)
+
#define A_CIM_UP_INT_CAUSE 0x7b3c
#define S_MSTPLINT 4
@@ -20900,6 +26076,33 @@
#define V_QUENUMSELECT(x) ((x) << S_QUENUMSELECT)
#define G_QUENUMSELECT(x) (((x) >> S_QUENUMSELECT) & M_QUENUMSELECT)
+#define S_MAPOFFSET 11
+#define M_MAPOFFSET 0x1fU
+#define V_MAPOFFSET(x) ((x) << S_MAPOFFSET)
+#define G_MAPOFFSET(x) (((x) >> S_MAPOFFSET) & M_MAPOFFSET)
+
+#define S_MAPSELECT 10
+#define V_MAPSELECT(x) ((x) << S_MAPSELECT)
+#define F_MAPSELECT V_MAPSELECT(1U)
+
+#define S_CORESELECT 6
+#define M_CORESELECT 0xfU
+#define V_CORESELECT(x) ((x) << S_CORESELECT)
+#define G_CORESELECT(x) (((x) >> S_CORESELECT) & M_CORESELECT)
+
+#define S_T7_OBQSELECT 5
+#define V_T7_OBQSELECT(x) ((x) << S_T7_OBQSELECT)
+#define F_T7_OBQSELECT V_T7_OBQSELECT(1U)
+
+#define S_T7_IBQSELECT 4
+#define V_T7_IBQSELECT(x) ((x) << S_T7_IBQSELECT)
+#define F_T7_IBQSELECT V_T7_IBQSELECT(1U)
+
+#define S_T7_QUENUMSELECT 0
+#define M_T7_QUENUMSELECT 0xfU
+#define V_T7_QUENUMSELECT(x) ((x) << S_T7_QUENUMSELECT)
+#define G_T7_QUENUMSELECT(x) (((x) >> S_T7_QUENUMSELECT) & M_T7_QUENUMSELECT)
+
#define A_CIM_QUEUE_CONFIG_CTRL 0x7b4c
#define S_CIMQSIZE 24
@@ -20940,6 +26143,29 @@
#define V_HOSTADDR(x) ((x) << S_HOSTADDR)
#define G_HOSTADDR(x) (((x) >> S_HOSTADDR) & M_HOSTADDR)
+#define S_T7_HOSTBUSY 31
+#define V_T7_HOSTBUSY(x) ((x) << S_T7_HOSTBUSY)
+#define F_T7_HOSTBUSY V_T7_HOSTBUSY(1U)
+
+#define S_T7_HOSTWRITE 30
+#define V_T7_HOSTWRITE(x) ((x) << S_T7_HOSTWRITE)
+#define F_T7_HOSTWRITE V_T7_HOSTWRITE(1U)
+
+#define S_HOSTGRPSEL 28
+#define M_HOSTGRPSEL 0x3U
+#define V_HOSTGRPSEL(x) ((x) << S_HOSTGRPSEL)
+#define G_HOSTGRPSEL(x) (((x) >> S_HOSTGRPSEL) & M_HOSTGRPSEL)
+
+#define S_HOSTCORESEL 24
+#define M_HOSTCORESEL 0xfU
+#define V_HOSTCORESEL(x) ((x) << S_HOSTCORESEL)
+#define G_HOSTCORESEL(x) (((x) >> S_HOSTCORESEL) & M_HOSTCORESEL)
+
+#define S_T7_HOSTADDR 0
+#define M_T7_HOSTADDR 0xffffffU
+#define V_T7_HOSTADDR(x) ((x) << S_T7_HOSTADDR)
+#define G_T7_HOSTADDR(x) (((x) >> S_T7_HOSTADDR) & M_T7_HOSTADDR)
+
#define A_CIM_HOST_ACC_DATA 0x7b54
#define A_CIM_CDEBUGDATA 0x7b58
@@ -20953,6 +26179,31 @@
#define V_CDEBUGDATAL(x) ((x) << S_CDEBUGDATAL)
#define G_CDEBUGDATAL(x) (((x) >> S_CDEBUGDATAL) & M_CDEBUGDATAL)
+#define A_CIM_DEBUG_CFG 0x7b58
+
+#define S_OR_EN 20
+#define V_OR_EN(x) ((x) << S_OR_EN)
+#define F_OR_EN V_OR_EN(1U)
+
+#define S_USEL 19
+#define V_USEL(x) ((x) << S_USEL)
+#define F_USEL V_USEL(1U)
+
+#define S_HI 18
+#define V_HI(x) ((x) << S_HI)
+#define F_HI V_HI(1U)
+
+#define S_SELH 9
+#define M_SELH 0x1ffU
+#define V_SELH(x) ((x) << S_SELH)
+#define G_SELH(x) (((x) >> S_SELH) & M_SELH)
+
+#define S_SELL 0
+#define M_SELL 0x1ffU
+#define V_SELL(x) ((x) << S_SELL)
+#define G_SELL(x) (((x) >> S_SELL) & M_SELL)
+
+#define A_CIM_DEBUG_DATA 0x7b5c
#define A_CIM_IBQ_DBG_CFG 0x7b60
#define S_IBQDBGADDR 16
@@ -20972,6 +26223,25 @@
#define V_IBQDBGEN(x) ((x) << S_IBQDBGEN)
#define F_IBQDBGEN V_IBQDBGEN(1U)
+#define S_IBQDBGCORE 28
+#define M_IBQDBGCORE 0xfU
+#define V_IBQDBGCORE(x) ((x) << S_IBQDBGCORE)
+#define G_IBQDBGCORE(x) (((x) >> S_IBQDBGCORE) & M_IBQDBGCORE)
+
+#define S_T7_IBQDBGADDR 12
+#define M_T7_IBQDBGADDR 0x1fffU
+#define V_T7_IBQDBGADDR(x) ((x) << S_T7_IBQDBGADDR)
+#define G_T7_IBQDBGADDR(x) (((x) >> S_T7_IBQDBGADDR) & M_T7_IBQDBGADDR)
+
+#define S_IBQDBGSTATE 4
+#define M_IBQDBGSTATE 0x3U
+#define V_IBQDBGSTATE(x) ((x) << S_IBQDBGSTATE)
+#define G_IBQDBGSTATE(x) (((x) >> S_IBQDBGSTATE) & M_IBQDBGSTATE)
+
+#define S_PERRADDRCLR 3
+#define V_PERRADDRCLR(x) ((x) << S_PERRADDRCLR)
+#define F_PERRADDRCLR V_PERRADDRCLR(1U)
+
#define A_CIM_OBQ_DBG_CFG 0x7b64
#define S_OBQDBGADDR 16
@@ -20991,6 +26261,21 @@
#define V_OBQDBGEN(x) ((x) << S_OBQDBGEN)
#define F_OBQDBGEN V_OBQDBGEN(1U)
+#define S_OBQDBGCORE 28
+#define M_OBQDBGCORE 0xfU
+#define V_OBQDBGCORE(x) ((x) << S_OBQDBGCORE)
+#define G_OBQDBGCORE(x) (((x) >> S_OBQDBGCORE) & M_OBQDBGCORE)
+
+#define S_T7_OBQDBGADDR 12
+#define M_T7_OBQDBGADDR 0x1fffU
+#define V_T7_OBQDBGADDR(x) ((x) << S_T7_OBQDBGADDR)
+#define G_T7_OBQDBGADDR(x) (((x) >> S_T7_OBQDBGADDR) & M_T7_OBQDBGADDR)
+
+#define S_OBQDBGSTATE 4
+#define M_OBQDBGSTATE 0x3U
+#define V_OBQDBGSTATE(x) ((x) << S_OBQDBGSTATE)
+#define G_OBQDBGSTATE(x) (((x) >> S_OBQDBGSTATE) & M_OBQDBGSTATE)
+
#define A_CIM_IBQ_DBG_DATA 0x7b68
#define A_CIM_OBQ_DBG_DATA 0x7b6c
#define A_CIM_DEBUGCFG 0x7b70
@@ -21075,6 +26360,11 @@
#define V_ZONE_DST(x) ((x) << S_ZONE_DST)
#define G_ZONE_DST(x) (((x) >> S_ZONE_DST) & M_ZONE_DST)
+#define S_THREAD_ID 2
+#define M_THREAD_ID 0x7U
+#define V_THREAD_ID(x) ((x) << S_THREAD_ID)
+#define G_THREAD_ID(x) (((x) >> S_THREAD_ID) & M_THREAD_ID)
+
#define A_CIM_MEM_ZONE0_LEN 0x7b98
#define S_MEM_ZONE_LEN 4
@@ -21207,6 +26497,7 @@
#define G_DUPUACCMASK(x) (((x) >> S_DUPUACCMASK) & M_DUPUACCMASK)
#define A_CIM_PERR_INJECT 0x7c20
+#define A_CIM_FPGA_ROM_EFUSE_CMD 0x7c20
#define A_CIM_PERR_ENABLE 0x7c24
#define S_PERREN 0
@@ -21224,6 +26515,7 @@
#define V_T6_T5_PERREN(x) ((x) << S_T6_T5_PERREN)
#define G_T6_T5_PERREN(x) (((x) >> S_T6_T5_PERREN) & M_T6_T5_PERREN)
+#define A_CIM_FPGA_ROM_EFUSE_DATA 0x7c24
#define A_CIM_EEPROM_BUSY_BIT 0x7c28
#define S_EEPROMBUSY 0
@@ -21240,6 +26532,22 @@
#define V_SLOW_TIMER_ENABLE(x) ((x) << S_SLOW_TIMER_ENABLE)
#define F_SLOW_TIMER_ENABLE V_SLOW_TIMER_ENABLE(1U)
+#define S_FLASHWRPAGEMORE 5
+#define V_FLASHWRPAGEMORE(x) ((x) << S_FLASHWRPAGEMORE)
+#define F_FLASHWRPAGEMORE V_FLASHWRPAGEMORE(1U)
+
+#define S_FLASHWRENABLE 4
+#define V_FLASHWRENABLE(x) ((x) << S_FLASHWRENABLE)
+#define F_FLASHWRENABLE V_FLASHWRENABLE(1U)
+
+#define S_FLASHMOREENABLE 3
+#define V_FLASHMOREENABLE(x) ((x) << S_FLASHMOREENABLE)
+#define F_FLASHMOREENABLE V_FLASHMOREENABLE(1U)
+
+#define S_WR_RESP_ENABLE 2
+#define V_WR_RESP_ENABLE(x) ((x) << S_WR_RESP_ENABLE)
+#define F_WR_RESP_ENABLE V_WR_RESP_ENABLE(1U)
+
#define A_CIM_UP_PO_SINGLE_OUTSTANDING 0x7c30
#define S_UP_PO_SINGLE_OUTSTANDING 0
@@ -21271,6 +26579,18 @@
#define G_CIM_PCIE_PKT_ERR_CODE(x) (((x) >> S_CIM_PCIE_PKT_ERR_CODE) & M_CIM_PCIE_PKT_ERR_CODE)
#define A_CIM_IBQ_DBG_WAIT_COUNTER 0x7c40
+#define A_CIM_QUE_PERR_ADDR 0x7c40
+
+#define S_IBQPERRADDR 16
+#define M_IBQPERRADDR 0xfffU
+#define V_IBQPERRADDR(x) ((x) << S_IBQPERRADDR)
+#define G_IBQPERRADDR(x) (((x) >> S_IBQPERRADDR) & M_IBQPERRADDR)
+
+#define S_OBQPERRADDR 0
+#define M_OBQPERRADDR 0xfffU
+#define V_OBQPERRADDR(x) ((x) << S_OBQPERRADDR)
+#define G_OBQPERRADDR(x) (((x) >> S_OBQPERRADDR) & M_OBQPERRADDR)
+
#define A_CIM_PIO_UP_MST_CFG_SEL 0x7c44
#define S_PIO_UP_MST_CFG_SEL 0
@@ -21309,6 +26629,20 @@
#define V_PCIE_OBQ_IF_DISABLE(x) ((x) << S_PCIE_OBQ_IF_DISABLE)
#define F_PCIE_OBQ_IF_DISABLE V_PCIE_OBQ_IF_DISABLE(1U)
+#define S_ULP_OBQ_SIZE 8
+#define M_ULP_OBQ_SIZE 0x3U
+#define V_ULP_OBQ_SIZE(x) ((x) << S_ULP_OBQ_SIZE)
+#define G_ULP_OBQ_SIZE(x) (((x) >> S_ULP_OBQ_SIZE) & M_ULP_OBQ_SIZE)
+
+#define S_TP_IBQ_SIZE 6
+#define M_TP_IBQ_SIZE 0x3U
+#define V_TP_IBQ_SIZE(x) ((x) << S_TP_IBQ_SIZE)
+#define G_TP_IBQ_SIZE(x) (((x) >> S_TP_IBQ_SIZE) & M_TP_IBQ_SIZE)
+
+#define S_OBQ_EOM_ENABLE 5
+#define V_OBQ_EOM_ENABLE(x) ((x) << S_OBQ_EOM_ENABLE)
+#define F_OBQ_EOM_ENABLE V_OBQ_EOM_ENABLE(1U)
+
#define A_CIM_CGEN_GLOBAL 0x7c50
#define S_CGEN_GLOBAL 0
@@ -21321,6 +26655,77 @@
#define V_PIFDBGLA_DPSLP_EN(x) ((x) << S_PIFDBGLA_DPSLP_EN)
#define F_PIFDBGLA_DPSLP_EN V_PIFDBGLA_DPSLP_EN(1U)
+#define A_CIM_GFT_CMM_CONFIG 0x7c58
+
+#define S_GLFL 31
+#define V_GLFL(x) ((x) << S_GLFL)
+#define F_GLFL V_GLFL(1U)
+
+#define S_T7_WRCNTIDLE 16
+#define M_T7_WRCNTIDLE 0x7fffU
+#define V_T7_WRCNTIDLE(x) ((x) << S_T7_WRCNTIDLE)
+#define G_T7_WRCNTIDLE(x) (((x) >> S_T7_WRCNTIDLE) & M_T7_WRCNTIDLE)
+
+#define A_CIM_GFT_CONFIG 0x7c5c
+
+#define S_GFTMABASE 16
+#define M_GFTMABASE 0xffffU
+#define V_GFTMABASE(x) ((x) << S_GFTMABASE)
+#define G_GFTMABASE(x) (((x) >> S_GFTMABASE) & M_GFTMABASE)
+
+#define S_GFTHASHTBLSIZE 12
+#define M_GFTHASHTBLSIZE 0xfU
+#define V_GFTHASHTBLSIZE(x) ((x) << S_GFTHASHTBLSIZE)
+#define G_GFTHASHTBLSIZE(x) (((x) >> S_GFTHASHTBLSIZE) & M_GFTHASHTBLSIZE)
+
+#define S_GFTTCAMPRIORITY 11
+#define V_GFTTCAMPRIORITY(x) ((x) << S_GFTTCAMPRIORITY)
+#define F_GFTTCAMPRIORITY V_GFTTCAMPRIORITY(1U)
+
+#define S_GFTMATHREADID 8
+#define M_GFTMATHREADID 0x7U
+#define V_GFTMATHREADID(x) ((x) << S_GFTMATHREADID)
+#define G_GFTMATHREADID(x) (((x) >> S_GFTMATHREADID) & M_GFTMATHREADID)
+
+#define S_GFTTCAMINIT 7
+#define V_GFTTCAMINIT(x) ((x) << S_GFTTCAMINIT)
+#define F_GFTTCAMINIT V_GFTTCAMINIT(1U)
+
+#define S_GFTTCAMINITDONE 6
+#define V_GFTTCAMINITDONE(x) ((x) << S_GFTTCAMINITDONE)
+#define F_GFTTCAMINITDONE V_GFTTCAMINITDONE(1U)
+
+#define S_GFTTBLMODEEN 0
+#define V_GFTTBLMODEEN(x) ((x) << S_GFTTBLMODEEN)
+#define F_GFTTBLMODEEN V_GFTTBLMODEEN(1U)
+
+#define A_CIM_TCAM_BIST_CTRL 0x7c60
+
+#define S_RST_CB 31
+#define V_RST_CB(x) ((x) << S_RST_CB)
+#define F_RST_CB V_RST_CB(1U)
+
+#define S_CB_START 0
+#define M_CB_START 0xfffffffU
+#define V_CB_START(x) ((x) << S_CB_START)
+#define G_CB_START(x) (((x) >> S_CB_START) & M_CB_START)
+
+#define A_CIM_TCAM_BIST_CB_PASS 0x7c64
+
+#define S_CB_PASS 0
+#define M_CB_PASS 0xfffffffU
+#define V_CB_PASS(x) ((x) << S_CB_PASS)
+#define G_CB_PASS(x) (((x) >> S_CB_PASS) & M_CB_PASS)
+
+#define A_CIM_TCAM_BIST_CB_BUSY 0x7c68
+
+#define S_CB_BUSY 0
+#define M_CB_BUSY 0xfffffffU
+#define V_CB_BUSY(x) ((x) << S_CB_BUSY)
+#define G_CB_BUSY(x) (((x) >> S_CB_BUSY) & M_CB_BUSY)
+
+#define A_CIM_GFT_MASK 0x7c70
+
/* registers for module TP */
#define TP_BASE_ADDR 0x7d00
@@ -21613,6 +27018,14 @@
#define V_CRXPKTXT(x) ((x) << S_CRXPKTXT)
#define F_CRXPKTXT V_CRXPKTXT(1U)
+#define S_ETOEBYPCSUMNOWAIT 15
+#define V_ETOEBYPCSUMNOWAIT(x) ((x) << S_ETOEBYPCSUMNOWAIT)
+#define F_ETOEBYPCSUMNOWAIT V_ETOEBYPCSUMNOWAIT(1U)
+
+#define S_ENICCSUMNOWAIT 14
+#define V_ENICCSUMNOWAIT(x) ((x) << S_ENICCSUMNOWAIT)
+#define F_ENICCSUMNOWAIT V_ENICCSUMNOWAIT(1U)
+
#define A_TP_GLOBAL_CONFIG 0x7d08
#define S_SYNCOOKIEPARAMS 26
@@ -21703,6 +27116,31 @@
#define V_ACTIVEFILTERCOUNTS(x) ((x) << S_ACTIVEFILTERCOUNTS)
#define F_ACTIVEFILTERCOUNTS V_ACTIVEFILTERCOUNTS(1U)
+#define S_RXSACKPARSE 31
+#define V_RXSACKPARSE(x) ((x) << S_RXSACKPARSE)
+#define F_RXSACKPARSE V_RXSACKPARSE(1U)
+
+#define S_RXSACKFWDMODE 29
+#define M_RXSACKFWDMODE 0x3U
+#define V_RXSACKFWDMODE(x) ((x) << S_RXSACKFWDMODE)
+#define G_RXSACKFWDMODE(x) (((x) >> S_RXSACKFWDMODE) & M_RXSACKFWDMODE)
+
+#define S_SRVRCHRSSEN 26
+#define V_SRVRCHRSSEN(x) ((x) << S_SRVRCHRSSEN)
+#define F_SRVRCHRSSEN V_SRVRCHRSSEN(1U)
+
+#define S_LBCHNDISTEN 23
+#define V_LBCHNDISTEN(x) ((x) << S_LBCHNDISTEN)
+#define F_LBCHNDISTEN V_LBCHNDISTEN(1U)
+
+#define S_ETHTNLLEN2X 20
+#define V_ETHTNLLEN2X(x) ((x) << S_ETHTNLLEN2X)
+#define F_ETHTNLLEN2X V_ETHTNLLEN2X(1U)
+
+#define S_EGLBCHNDISTEN 19
+#define V_EGLBCHNDISTEN(x) ((x) << S_EGLBCHNDISTEN)
+#define F_EGLBCHNDISTEN V_EGLBCHNDISTEN(1U)
+
#define A_TP_DB_CONFIG 0x7d0c
#define S_DBMAXOPCNT 24
@@ -21767,6 +27205,11 @@
#define V_PMRXMAXPAGE(x) ((x) << S_PMRXMAXPAGE)
#define G_PMRXMAXPAGE(x) (((x) >> S_PMRXMAXPAGE) & M_PMRXMAXPAGE)
+#define S_T7_PMRXNUMCHN 29
+#define M_T7_PMRXNUMCHN 0x7U
+#define V_T7_PMRXNUMCHN(x) ((x) << S_T7_PMRXNUMCHN)
+#define G_T7_PMRXNUMCHN(x) (((x) >> S_T7_PMRXNUMCHN) & M_T7_PMRXNUMCHN)
+
#define A_TP_PMM_TX_PAGE_SIZE 0x7d34
#define A_TP_PMM_TX_MAX_PAGE 0x7d38
@@ -21780,6 +27223,83 @@
#define V_PMTXMAXPAGE(x) ((x) << S_PMTXMAXPAGE)
#define G_PMTXMAXPAGE(x) (((x) >> S_PMTXMAXPAGE) & M_PMTXMAXPAGE)
+#define S_T7_PMTXNUMCHN 29
+#define M_T7_PMTXNUMCHN 0x7U
+#define V_T7_PMTXNUMCHN(x) ((x) << S_T7_PMTXNUMCHN)
+#define G_T7_PMTXNUMCHN(x) (((x) >> S_T7_PMTXNUMCHN) & M_T7_PMTXNUMCHN)
+
+#define A_TP_EXT_CONFIG 0x7d3c
+
+#define S_TNLERRORIPSECARW 29
+#define V_TNLERRORIPSECARW(x) ((x) << S_TNLERRORIPSECARW)
+#define F_TNLERRORIPSECARW V_TNLERRORIPSECARW(1U)
+
+#define S_TNLERRORIPSECICV 28
+#define V_TNLERRORIPSECICV(x) ((x) << S_TNLERRORIPSECICV)
+#define F_TNLERRORIPSECICV V_TNLERRORIPSECICV(1U)
+
+#define S_DROPERRORIPSECARW 25
+#define V_DROPERRORIPSECARW(x) ((x) << S_DROPERRORIPSECARW)
+#define F_DROPERRORIPSECARW V_DROPERRORIPSECARW(1U)
+
+#define S_DROPERRORIPSECICV 24
+#define V_DROPERRORIPSECICV(x) ((x) << S_DROPERRORIPSECICV)
+#define F_DROPERRORIPSECICV V_DROPERRORIPSECICV(1U)
+
+#define S_MIBRDMAROCEEN 19
+#define V_MIBRDMAROCEEN(x) ((x) << S_MIBRDMAROCEEN)
+#define F_MIBRDMAROCEEN V_MIBRDMAROCEEN(1U)
+
+#define S_MIBRDMAIWARPEN 18
+#define V_MIBRDMAIWARPEN(x) ((x) << S_MIBRDMAIWARPEN)
+#define F_MIBRDMAIWARPEN V_MIBRDMAIWARPEN(1U)
+
+#define S_BYPTXDATAACKALLEN 17
+#define V_BYPTXDATAACKALLEN(x) ((x) << S_BYPTXDATAACKALLEN)
+#define F_BYPTXDATAACKALLEN V_BYPTXDATAACKALLEN(1U)
+
+#define S_DATAACKEXTEN 16
+#define V_DATAACKEXTEN(x) ((x) << S_DATAACKEXTEN)
+#define F_DATAACKEXTEN V_DATAACKEXTEN(1U)
+
+#define S_MACMATCH11FWD 11
+#define V_MACMATCH11FWD(x) ((x) << S_MACMATCH11FWD)
+#define F_MACMATCH11FWD V_MACMATCH11FWD(1U)
+
+#define S_USERTMSTPEN 10
+#define V_USERTMSTPEN(x) ((x) << S_USERTMSTPEN)
+#define F_USERTMSTPEN V_USERTMSTPEN(1U)
+
+#define S_MMGRCACHEDIS 9
+#define V_MMGRCACHEDIS(x) ((x) << S_MMGRCACHEDIS)
+#define F_MMGRCACHEDIS V_MMGRCACHEDIS(1U)
+
+#define S_TXPKTPACKOUTUDPEN 8
+#define V_TXPKTPACKOUTUDPEN(x) ((x) << S_TXPKTPACKOUTUDPEN)
+#define F_TXPKTPACKOUTUDPEN V_TXPKTPACKOUTUDPEN(1U)
+
+#define S_IPSECROCECRCMODE 6
+#define M_IPSECROCECRCMODE 0x3U
+#define V_IPSECROCECRCMODE(x) ((x) << S_IPSECROCECRCMODE)
+#define G_IPSECROCECRCMODE(x) (((x) >> S_IPSECROCECRCMODE) & M_IPSECROCECRCMODE)
+
+#define S_IPSECIDXLOC 5
+#define V_IPSECIDXLOC(x) ((x) << S_IPSECIDXLOC)
+#define F_IPSECIDXLOC V_IPSECIDXLOC(1U)
+
+#define S_IPSECIDXCAPEN 4
+#define V_IPSECIDXCAPEN(x) ((x) << S_IPSECIDXCAPEN)
+#define F_IPSECIDXCAPEN V_IPSECIDXCAPEN(1U)
+
+#define S_IPSECOFEN 3
+#define V_IPSECOFEN(x) ((x) << S_IPSECOFEN)
+#define F_IPSECOFEN V_IPSECOFEN(1U)
+
+#define S_IPSECCFG 0
+#define M_IPSECCFG 0x7U
+#define V_IPSECCFG(x) ((x) << S_IPSECCFG)
+#define G_IPSECCFG(x) (((x) >> S_IPSECCFG) & M_IPSECCFG)
+
#define A_TP_TCP_OPTIONS 0x7d40
#define S_MTUDEFAULT 16
@@ -22615,10 +28135,6 @@
#define V_TXPDUSIZEADJ(x) ((x) << S_TXPDUSIZEADJ)
#define G_TXPDUSIZEADJ(x) (((x) >> S_TXPDUSIZEADJ) & M_TXPDUSIZEADJ)
-#define S_ENABLECBYP 21
-#define V_ENABLECBYP(x) ((x) << S_ENABLECBYP)
-#define F_ENABLECBYP V_ENABLECBYP(1U)
-
#define S_LIMITEDTRANSMIT 20
#define M_LIMITEDTRANSMIT 0xfU
#define V_LIMITEDTRANSMIT(x) ((x) << S_LIMITEDTRANSMIT)
@@ -22779,6 +28295,18 @@
#define V_ECNSYNECT(x) ((x) << S_ECNSYNECT)
#define F_ECNSYNECT V_ECNSYNECT(1U)
+#define A_TP_PARA_REG9 0x7d88
+
+#define S_PMMAXXFERLEN3 16
+#define M_PMMAXXFERLEN3 0xffffU
+#define V_PMMAXXFERLEN3(x) ((x) << S_PMMAXXFERLEN3)
+#define G_PMMAXXFERLEN3(x) (((x) >> S_PMMAXXFERLEN3) & M_PMMAXXFERLEN3)
+
+#define S_PMMAXXFERLEN2 0
+#define M_PMMAXXFERLEN2 0xffffU
+#define V_PMMAXXFERLEN2(x) ((x) << S_PMMAXXFERLEN2)
+#define G_PMMAXXFERLEN2(x) (((x) >> S_PMMAXXFERLEN2) & M_PMMAXXFERLEN2)
+
#define A_TP_ERR_CONFIG 0x7d8c
#define S_TNLERRORPING 30
@@ -22926,6 +28454,11 @@
#define V_DELAYEDACKRESOLUTION(x) ((x) << S_DELAYEDACKRESOLUTION)
#define G_DELAYEDACKRESOLUTION(x) (((x) >> S_DELAYEDACKRESOLUTION) & M_DELAYEDACKRESOLUTION)
+#define S_ROCETIMERRESOLUTION 24
+#define M_ROCETIMERRESOLUTION 0xffU
+#define V_ROCETIMERRESOLUTION(x) ((x) << S_ROCETIMERRESOLUTION)
+#define G_ROCETIMERRESOLUTION(x) (((x) >> S_ROCETIMERRESOLUTION) & M_ROCETIMERRESOLUTION)
+
#define A_TP_MSL 0x7d94
#define S_MSL 0
@@ -23423,6 +28956,14 @@
#define V_FRMWRQUEMASK(x) ((x) << S_FRMWRQUEMASK)
#define G_FRMWRQUEMASK(x) (((x) >> S_FRMWRQUEMASK) & M_FRMWRQUEMASK)
+#define S_RRCPLOPT1SMSELEN 11
+#define V_RRCPLOPT1SMSELEN(x) ((x) << S_RRCPLOPT1SMSELEN)
+#define F_RRCPLOPT1SMSELEN V_RRCPLOPT1SMSELEN(1U)
+
+#define S_RRCPLOPT1BQEN 10
+#define V_RRCPLOPT1BQEN(x) ((x) << S_RRCPLOPT1BQEN)
+#define F_RRCPLOPT1BQEN V_RRCPLOPT1BQEN(1U)
+
#define A_TP_RSS_CONFIG_SYN 0x7dfc
#define A_TP_RSS_CONFIG_VRT 0x7e00
@@ -23595,6 +29136,69 @@
#define V_QUEUE(x) ((x) << S_QUEUE)
#define G_QUEUE(x) (((x) >> S_QUEUE) & M_QUEUE)
+#define S_T7_UPDVLD 19
+#define V_T7_UPDVLD(x) ((x) << S_T7_UPDVLD)
+#define F_T7_UPDVLD V_T7_UPDVLD(1U)
+
+#define S_T7_XOFF 18
+#define V_T7_XOFF(x) ((x) << S_T7_XOFF)
+#define F_T7_XOFF V_T7_XOFF(1U)
+
+#define S_T7_UPDCHN3 17
+#define V_T7_UPDCHN3(x) ((x) << S_T7_UPDCHN3)
+#define F_T7_UPDCHN3 V_T7_UPDCHN3(1U)
+
+#define S_T7_UPDCHN2 16
+#define V_T7_UPDCHN2(x) ((x) << S_T7_UPDCHN2)
+#define F_T7_UPDCHN2 V_T7_UPDCHN2(1U)
+
+#define S_T7_UPDCHN1 15
+#define V_T7_UPDCHN1(x) ((x) << S_T7_UPDCHN1)
+#define F_T7_UPDCHN1 V_T7_UPDCHN1(1U)
+
+#define S_T7_UPDCHN0 14
+#define V_T7_UPDCHN0(x) ((x) << S_T7_UPDCHN0)
+#define F_T7_UPDCHN0 V_T7_UPDCHN0(1U)
+
+#define S_T7_QUEUE 0
+#define M_T7_QUEUE 0x3fffU
+#define V_T7_QUEUE(x) ((x) << S_T7_QUEUE)
+#define G_T7_QUEUE(x) (((x) >> S_T7_QUEUE) & M_T7_QUEUE)
+
+#define A_TP_RSS_CONFIG_4CH 0x7e08
+
+#define S_BASEQIDEN 1
+#define V_BASEQIDEN(x) ((x) << S_BASEQIDEN)
+#define F_BASEQIDEN V_BASEQIDEN(1U)
+
+#define S_200GMODE 0
+#define V_200GMODE(x) ((x) << S_200GMODE)
+#define F_200GMODE V_200GMODE(1U)
+
+#define A_TP_RSS_CONFIG_SRAM 0x7e0c
+
+#define S_SRAMRDDIS 20
+#define V_SRAMRDDIS(x) ((x) << S_SRAMRDDIS)
+#define F_SRAMRDDIS V_SRAMRDDIS(1U)
+
+#define S_SRAMSTART 19
+#define V_SRAMSTART(x) ((x) << S_SRAMSTART)
+#define F_SRAMSTART V_SRAMSTART(1U)
+
+#define S_SRAMWRITE 18
+#define V_SRAMWRITE(x) ((x) << S_SRAMWRITE)
+#define F_SRAMWRITE V_SRAMWRITE(1U)
+
+#define S_SRAMSEL 16
+#define M_SRAMSEL 0x3U
+#define V_SRAMSEL(x) ((x) << S_SRAMSEL)
+#define G_SRAMSEL(x) (((x) >> S_SRAMSEL) & M_SRAMSEL)
+
+#define S_SRAMADDR 0
+#define M_SRAMADDR 0x3fffU
+#define V_SRAMADDR(x) ((x) << S_SRAMADDR)
+#define G_SRAMADDR(x) (((x) >> S_SRAMADDR) & M_SRAMADDR)
+
#define A_TP_LA_TABLE_0 0x7e10
#define S_VIRTPORT1TABLE 16
@@ -23621,6 +29225,18 @@
#define A_TP_TM_PIO_ADDR 0x7e18
#define A_TP_TM_PIO_DATA 0x7e1c
+#define A_TP_RX_MOD_CONFIG_CH3_CH2 0x7e20
+
+#define S_RXCHANNELWEIGHT3 8
+#define M_RXCHANNELWEIGHT3 0xffU
+#define V_RXCHANNELWEIGHT3(x) ((x) << S_RXCHANNELWEIGHT3)
+#define G_RXCHANNELWEIGHT3(x) (((x) >> S_RXCHANNELWEIGHT3) & M_RXCHANNELWEIGHT3)
+
+#define S_RXCHANNELWEIGHT2 0
+#define M_RXCHANNELWEIGHT2 0xffU
+#define V_RXCHANNELWEIGHT2(x) ((x) << S_RXCHANNELWEIGHT2)
+#define G_RXCHANNELWEIGHT2(x) (((x) >> S_RXCHANNELWEIGHT2) & M_RXCHANNELWEIGHT2)
+
#define A_TP_MOD_CONFIG 0x7e24
#define S_RXCHANNELWEIGHT1 24
@@ -23887,6 +29503,30 @@
#define V_SRQTABLEPERR(x) ((x) << S_SRQTABLEPERR)
#define F_SRQTABLEPERR V_SRQTABLEPERR(1U)
+#define S_TPCERR 5
+#define V_TPCERR(x) ((x) << S_TPCERR)
+#define F_TPCERR V_TPCERR(1U)
+
+#define S_OTHERPERR 4
+#define V_OTHERPERR(x) ((x) << S_OTHERPERR)
+#define F_OTHERPERR V_OTHERPERR(1U)
+
+#define S_TPEING1PERR 3
+#define V_TPEING1PERR(x) ((x) << S_TPEING1PERR)
+#define F_TPEING1PERR V_TPEING1PERR(1U)
+
+#define S_TPEING0PERR 2
+#define V_TPEING0PERR(x) ((x) << S_TPEING0PERR)
+#define F_TPEING0PERR V_TPEING0PERR(1U)
+
+#define S_TPEEGPERR 1
+#define V_TPEEGPERR(x) ((x) << S_TPEEGPERR)
+#define F_TPEEGPERR V_TPEEGPERR(1U)
+
+#define S_TPCPERR 0
+#define V_TPCPERR(x) ((x) << S_TPCPERR)
+#define F_TPCPERR V_TPCPERR(1U)
+
#define A_TP_INT_CAUSE 0x7e74
#define A_TP_PER_ENABLE 0x7e78
#define A_TP_FLM_FREE_PS_CNT 0x7e80
@@ -23907,6 +29547,11 @@
#define V_FREERXPAGECOUNT(x) ((x) << S_FREERXPAGECOUNT)
#define G_FREERXPAGECOUNT(x) (((x) >> S_FREERXPAGECOUNT) & M_FREERXPAGECOUNT)
+#define S_T7_FREERXPAGECHN 28
+#define M_T7_FREERXPAGECHN 0x7U
+#define V_T7_FREERXPAGECHN(x) ((x) << S_T7_FREERXPAGECHN)
+#define G_T7_FREERXPAGECHN(x) (((x) >> S_T7_FREERXPAGECHN) & M_T7_FREERXPAGECHN)
+
#define A_TP_FLM_FREE_TX_CNT 0x7e88
#define S_FREETXPAGECHN 28
@@ -23919,6 +29564,11 @@
#define V_FREETXPAGECOUNT(x) ((x) << S_FREETXPAGECOUNT)
#define G_FREETXPAGECOUNT(x) (((x) >> S_FREETXPAGECOUNT) & M_FREETXPAGECOUNT)
+#define S_T7_FREETXPAGECHN 28
+#define M_T7_FREETXPAGECHN 0x7U
+#define V_T7_FREETXPAGECHN(x) ((x) << S_T7_FREETXPAGECHN)
+#define G_T7_FREETXPAGECHN(x) (((x) >> S_T7_FREETXPAGECHN) & M_T7_FREETXPAGECHN)
+
#define A_TP_TM_HEAP_PUSH_CNT 0x7e8c
#define A_TP_TM_HEAP_POP_CNT 0x7e90
#define A_TP_TM_DACK_PUSH_CNT 0x7e94
@@ -24111,6 +29761,38 @@
#define V_COMMITLIMIT0(x) ((x) << S_COMMITLIMIT0)
#define G_COMMITLIMIT0(x) (((x) >> S_COMMITLIMIT0) & M_COMMITLIMIT0)
+#define S_RXCOMMITRESET3 7
+#define V_RXCOMMITRESET3(x) ((x) << S_RXCOMMITRESET3)
+#define F_RXCOMMITRESET3 V_RXCOMMITRESET3(1U)
+
+#define S_RXCOMMITRESET2 6
+#define V_RXCOMMITRESET2(x) ((x) << S_RXCOMMITRESET2)
+#define F_RXCOMMITRESET2 V_RXCOMMITRESET2(1U)
+
+#define S_T7_RXCOMMITRESET1 5
+#define V_T7_RXCOMMITRESET1(x) ((x) << S_T7_RXCOMMITRESET1)
+#define F_T7_RXCOMMITRESET1 V_T7_RXCOMMITRESET1(1U)
+
+#define S_T7_RXCOMMITRESET0 4
+#define V_T7_RXCOMMITRESET0(x) ((x) << S_T7_RXCOMMITRESET0)
+#define F_T7_RXCOMMITRESET0 V_T7_RXCOMMITRESET0(1U)
+
+#define S_RXFORCECONG3 3
+#define V_RXFORCECONG3(x) ((x) << S_RXFORCECONG3)
+#define F_RXFORCECONG3 V_RXFORCECONG3(1U)
+
+#define S_RXFORCECONG2 2
+#define V_RXFORCECONG2(x) ((x) << S_RXFORCECONG2)
+#define F_RXFORCECONG2 V_RXFORCECONG2(1U)
+
+#define S_T7_RXFORCECONG1 1
+#define V_T7_RXFORCECONG1(x) ((x) << S_T7_RXFORCECONG1)
+#define F_T7_RXFORCECONG1 V_T7_RXFORCECONG1(1U)
+
+#define S_T7_RXFORCECONG0 0
+#define V_T7_RXFORCECONG0(x) ((x) << S_T7_RXFORCECONG0)
+#define F_T7_RXFORCECONG0 V_T7_RXFORCECONG0(1U)
+
#define A_TP_TX_SCHED 0x7eb4
#define S_COMMITRESET3 31
@@ -24229,6 +29911,14 @@
#define V_RXMODXOFF0(x) ((x) << S_RXMODXOFF0)
#define F_RXMODXOFF0 V_RXMODXOFF0(1U)
+#define S_RXMODXOFF3 3
+#define V_RXMODXOFF3(x) ((x) << S_RXMODXOFF3)
+#define F_RXMODXOFF3 V_RXMODXOFF3(1U)
+
+#define S_RXMODXOFF2 2
+#define V_RXMODXOFF2(x) ((x) << S_RXMODXOFF2)
+#define F_RXMODXOFF2 V_RXMODXOFF2(1U)
+
#define A_TP_TX_ORATE 0x7ebc
#define S_OFDRATE3 24
@@ -24313,6 +30003,37 @@
#define A_TP_DBG_LA_DATAL 0x7ed8
#define A_TP_DBG_LA_DATAH 0x7edc
+#define A_TP_DBG_LA_FILTER 0x7ee0
+
+#define S_FILTERTID 12
+#define M_FILTERTID 0xfffffU
+#define V_FILTERTID(x) ((x) << S_FILTERTID)
+#define G_FILTERTID(x) (((x) >> S_FILTERTID) & M_FILTERTID)
+
+#define S_ENTIDFILTER 5
+#define V_ENTIDFILTER(x) ((x) << S_ENTIDFILTER)
+#define F_ENTIDFILTER V_ENTIDFILTER(1U)
+
+#define S_ENOFFLOAD 4
+#define V_ENOFFLOAD(x) ((x) << S_ENOFFLOAD)
+#define F_ENOFFLOAD V_ENOFFLOAD(1U)
+
+#define S_ENTUNNEL 3
+#define V_ENTUNNEL(x) ((x) << S_ENTUNNEL)
+#define F_ENTUNNEL V_ENTUNNEL(1U)
+
+#define S_ENI 2
+#define V_ENI(x) ((x) << S_ENI)
+#define F_ENI V_ENI(1U)
+
+#define S_ENC 1
+#define V_ENC(x) ((x) << S_ENC)
+#define F_ENC V_ENC(1U)
+
+#define S_ENE 0
+#define V_ENE(x) ((x) << S_ENE)
+#define F_ENE V_ENE(1U)
+
#define A_TP_PROTOCOL_CNTRL 0x7ee8
#define S_WRITEENABLE 31
@@ -24348,6 +30069,546 @@
#define V_PROTOCOLDATAFIELD(x) ((x) << S_PROTOCOLDATAFIELD)
#define G_PROTOCOLDATAFIELD(x) (((x) >> S_PROTOCOLDATAFIELD) & M_PROTOCOLDATAFIELD)
+#define A_TP_INIC_CTRL0 0x7f00
+#define A_TP_INIC_DBG 0x7f04
+#define A_TP_INIC_PERR_ENABLE 0x7f08
+
+#define S_INICMAC1_ERR 16
+#define M_INICMAC1_ERR 0x3fU
+#define V_INICMAC1_ERR(x) ((x) << S_INICMAC1_ERR)
+#define G_INICMAC1_ERR(x) (((x) >> S_INICMAC1_ERR) & M_INICMAC1_ERR)
+
+#define S_INICMAC0_ERR 0
+#define M_INICMAC0_ERR 0x3fU
+#define V_INICMAC0_ERR(x) ((x) << S_INICMAC0_ERR)
+#define G_INICMAC0_ERR(x) (((x) >> S_INICMAC0_ERR) & M_INICMAC0_ERR)
+
+#define A_TP_INIC_PERR_CAUSE 0x7f0c
+#define A_TP_PARA_REG10 0x7f20
+
+#define S_DIS39320FIX 20
+#define V_DIS39320FIX(x) ((x) << S_DIS39320FIX)
+#define F_DIS39320FIX V_DIS39320FIX(1U)
+
+#define S_IWARPMAXPDULEN 16
+#define M_IWARPMAXPDULEN 0xfU
+#define V_IWARPMAXPDULEN(x) ((x) << S_IWARPMAXPDULEN)
+#define G_IWARPMAXPDULEN(x) (((x) >> S_IWARPMAXPDULEN) & M_IWARPMAXPDULEN)
+
+#define S_TLSMAXRXDATA 0
+#define M_TLSMAXRXDATA 0xffffU
+#define V_TLSMAXRXDATA(x) ((x) << S_TLSMAXRXDATA)
+#define G_TLSMAXRXDATA(x) (((x) >> S_TLSMAXRXDATA) & M_TLSMAXRXDATA)
+
+#define A_TP_TCAM_BIST_CTRL 0x7f24
+#define A_TP_TCAM_BIST_CB_PASS 0x7f28
+#define A_TP_TCAM_BIST_CB_BUSY 0x7f2c
+#define A_TP_C_PERR_ENABLE 0x7f30
+
+#define S_DMXFIFOOVFL 26
+#define V_DMXFIFOOVFL(x) ((x) << S_DMXFIFOOVFL)
+#define F_DMXFIFOOVFL V_DMXFIFOOVFL(1U)
+
+#define S_URX2TPCDDPINTF 25
+#define V_URX2TPCDDPINTF(x) ((x) << S_URX2TPCDDPINTF)
+#define F_URX2TPCDDPINTF V_URX2TPCDDPINTF(1U)
+
+#define S_TPCDISPTOKENFIFO 24
+#define V_TPCDISPTOKENFIFO(x) ((x) << S_TPCDISPTOKENFIFO)
+#define F_TPCDISPTOKENFIFO V_TPCDISPTOKENFIFO(1U)
+
+#define S_TPCDISPCPLFIFO3 23
+#define V_TPCDISPCPLFIFO3(x) ((x) << S_TPCDISPCPLFIFO3)
+#define F_TPCDISPCPLFIFO3 V_TPCDISPCPLFIFO3(1U)
+
+#define S_TPCDISPCPLFIFO2 22
+#define V_TPCDISPCPLFIFO2(x) ((x) << S_TPCDISPCPLFIFO2)
+#define F_TPCDISPCPLFIFO2 V_TPCDISPCPLFIFO2(1U)
+
+#define S_TPCDISPCPLFIFO1 21
+#define V_TPCDISPCPLFIFO1(x) ((x) << S_TPCDISPCPLFIFO1)
+#define F_TPCDISPCPLFIFO1 V_TPCDISPCPLFIFO1(1U)
+
+#define S_TPCDISPCPLFIFO0 20
+#define V_TPCDISPCPLFIFO0(x) ((x) << S_TPCDISPCPLFIFO0)
+#define F_TPCDISPCPLFIFO0 V_TPCDISPCPLFIFO0(1U)
+
+#define S_URXPLDINTFCRC3 19
+#define V_URXPLDINTFCRC3(x) ((x) << S_URXPLDINTFCRC3)
+#define F_URXPLDINTFCRC3 V_URXPLDINTFCRC3(1U)
+
+#define S_URXPLDINTFCRC2 18
+#define V_URXPLDINTFCRC2(x) ((x) << S_URXPLDINTFCRC2)
+#define F_URXPLDINTFCRC2 V_URXPLDINTFCRC2(1U)
+
+#define S_URXPLDINTFCRC1 17
+#define V_URXPLDINTFCRC1(x) ((x) << S_URXPLDINTFCRC1)
+#define F_URXPLDINTFCRC1 V_URXPLDINTFCRC1(1U)
+
+#define S_URXPLDINTFCRC0 16
+#define V_URXPLDINTFCRC0(x) ((x) << S_URXPLDINTFCRC0)
+#define F_URXPLDINTFCRC0 V_URXPLDINTFCRC0(1U)
+
+#define S_DMXDBFIFO 15
+#define V_DMXDBFIFO(x) ((x) << S_DMXDBFIFO)
+#define F_DMXDBFIFO V_DMXDBFIFO(1U)
+
+#define S_DMXDBSRAM 14
+#define V_DMXDBSRAM(x) ((x) << S_DMXDBSRAM)
+#define F_DMXDBSRAM V_DMXDBSRAM(1U)
+
+#define S_DMXCPLFIFO 13
+#define V_DMXCPLFIFO(x) ((x) << S_DMXCPLFIFO)
+#define F_DMXCPLFIFO V_DMXCPLFIFO(1U)
+
+#define S_DMXCPLSRAM 12
+#define V_DMXCPLSRAM(x) ((x) << S_DMXCPLSRAM)
+#define F_DMXCPLSRAM V_DMXCPLSRAM(1U)
+
+#define S_DMXCSUMFIFO 11
+#define V_DMXCSUMFIFO(x) ((x) << S_DMXCSUMFIFO)
+#define F_DMXCSUMFIFO V_DMXCSUMFIFO(1U)
+
+#define S_DMXLENFIFO 10
+#define V_DMXLENFIFO(x) ((x) << S_DMXLENFIFO)
+#define F_DMXLENFIFO V_DMXLENFIFO(1U)
+
+#define S_DMXCHECKFIFO 9
+#define V_DMXCHECKFIFO(x) ((x) << S_DMXCHECKFIFO)
+#define F_DMXCHECKFIFO V_DMXCHECKFIFO(1U)
+
+#define S_DMXWINFIFO 8
+#define V_DMXWINFIFO(x) ((x) << S_DMXWINFIFO)
+#define F_DMXWINFIFO V_DMXWINFIFO(1U)
+
+#define S_EGTOKENFIFO 7
+#define V_EGTOKENFIFO(x) ((x) << S_EGTOKENFIFO)
+#define F_EGTOKENFIFO V_EGTOKENFIFO(1U)
+
+#define S_EGDATAFIFO 6
+#define V_EGDATAFIFO(x) ((x) << S_EGDATAFIFO)
+#define F_EGDATAFIFO V_EGDATAFIFO(1U)
+
+#define S_UTX2TPCINTF3 5
+#define V_UTX2TPCINTF3(x) ((x) << S_UTX2TPCINTF3)
+#define F_UTX2TPCINTF3 V_UTX2TPCINTF3(1U)
+
+#define S_UTX2TPCINTF2 4
+#define V_UTX2TPCINTF2(x) ((x) << S_UTX2TPCINTF2)
+#define F_UTX2TPCINTF2 V_UTX2TPCINTF2(1U)
+
+#define S_UTX2TPCINTF1 3
+#define V_UTX2TPCINTF1(x) ((x) << S_UTX2TPCINTF1)
+#define F_UTX2TPCINTF1 V_UTX2TPCINTF1(1U)
+
+#define S_UTX2TPCINTF0 2
+#define V_UTX2TPCINTF0(x) ((x) << S_UTX2TPCINTF0)
+#define F_UTX2TPCINTF0 V_UTX2TPCINTF0(1U)
+
+#define S_LBKTOKENFIFO 1
+#define V_LBKTOKENFIFO(x) ((x) << S_LBKTOKENFIFO)
+#define F_LBKTOKENFIFO V_LBKTOKENFIFO(1U)
+
+#define S_LBKDATAFIFO 0
+#define V_LBKDATAFIFO(x) ((x) << S_LBKDATAFIFO)
+#define F_LBKDATAFIFO V_LBKDATAFIFO(1U)
+
+#define A_TP_C_PERR_CAUSE 0x7f34
+#define A_TP_E_EG_PERR_ENABLE 0x7f38
+
+#define S_MPSLPBKTOKENFIFO 25
+#define V_MPSLPBKTOKENFIFO(x) ((x) << S_MPSLPBKTOKENFIFO)
+#define F_MPSLPBKTOKENFIFO V_MPSLPBKTOKENFIFO(1U)
+
+#define S_MPSMACTOKENFIFO 24
+#define V_MPSMACTOKENFIFO(x) ((x) << S_MPSMACTOKENFIFO)
+#define F_MPSMACTOKENFIFO V_MPSMACTOKENFIFO(1U)
+
+#define S_DISPIPSECFIFO3 23
+#define V_DISPIPSECFIFO3(x) ((x) << S_DISPIPSECFIFO3)
+#define F_DISPIPSECFIFO3 V_DISPIPSECFIFO3(1U)
+
+#define S_DISPTCPFIFO3 22
+#define V_DISPTCPFIFO3(x) ((x) << S_DISPTCPFIFO3)
+#define F_DISPTCPFIFO3 V_DISPTCPFIFO3(1U)
+
+#define S_DISPIPFIFO3 21
+#define V_DISPIPFIFO3(x) ((x) << S_DISPIPFIFO3)
+#define F_DISPIPFIFO3 V_DISPIPFIFO3(1U)
+
+#define S_DISPETHFIFO3 20
+#define V_DISPETHFIFO3(x) ((x) << S_DISPETHFIFO3)
+#define F_DISPETHFIFO3 V_DISPETHFIFO3(1U)
+
+#define S_DISPGREFIFO3 19
+#define V_DISPGREFIFO3(x) ((x) << S_DISPGREFIFO3)
+#define F_DISPGREFIFO3 V_DISPGREFIFO3(1U)
+
+#define S_DISPCPL5FIFO3 18
+#define V_DISPCPL5FIFO3(x) ((x) << S_DISPCPL5FIFO3)
+#define F_DISPCPL5FIFO3 V_DISPCPL5FIFO3(1U)
+
+#define S_DISPIPSECFIFO2 17
+#define V_DISPIPSECFIFO2(x) ((x) << S_DISPIPSECFIFO2)
+#define F_DISPIPSECFIFO2 V_DISPIPSECFIFO2(1U)
+
+#define S_DISPTCPFIFO2 16
+#define V_DISPTCPFIFO2(x) ((x) << S_DISPTCPFIFO2)
+#define F_DISPTCPFIFO2 V_DISPTCPFIFO2(1U)
+
+#define S_DISPIPFIFO2 15
+#define V_DISPIPFIFO2(x) ((x) << S_DISPIPFIFO2)
+#define F_DISPIPFIFO2 V_DISPIPFIFO2(1U)
+
+#define S_DISPETHFIFO2 14
+#define V_DISPETHFIFO2(x) ((x) << S_DISPETHFIFO2)
+#define F_DISPETHFIFO2 V_DISPETHFIFO2(1U)
+
+#define S_DISPGREFIFO2 13
+#define V_DISPGREFIFO2(x) ((x) << S_DISPGREFIFO2)
+#define F_DISPGREFIFO2 V_DISPGREFIFO2(1U)
+
+#define S_DISPCPL5FIFO2 12
+#define V_DISPCPL5FIFO2(x) ((x) << S_DISPCPL5FIFO2)
+#define F_DISPCPL5FIFO2 V_DISPCPL5FIFO2(1U)
+
+#define S_DISPIPSECFIFO1 11
+#define V_DISPIPSECFIFO1(x) ((x) << S_DISPIPSECFIFO1)
+#define F_DISPIPSECFIFO1 V_DISPIPSECFIFO1(1U)
+
+#define S_DISPTCPFIFO1 10
+#define V_DISPTCPFIFO1(x) ((x) << S_DISPTCPFIFO1)
+#define F_DISPTCPFIFO1 V_DISPTCPFIFO1(1U)
+
+#define S_DISPIPFIFO1 9
+#define V_DISPIPFIFO1(x) ((x) << S_DISPIPFIFO1)
+#define F_DISPIPFIFO1 V_DISPIPFIFO1(1U)
+
+#define S_DISPETHFIFO1 8
+#define V_DISPETHFIFO1(x) ((x) << S_DISPETHFIFO1)
+#define F_DISPETHFIFO1 V_DISPETHFIFO1(1U)
+
+#define S_DISPGREFIFO1 7
+#define V_DISPGREFIFO1(x) ((x) << S_DISPGREFIFO1)
+#define F_DISPGREFIFO1 V_DISPGREFIFO1(1U)
+
+#define S_DISPCPL5FIFO1 6
+#define V_DISPCPL5FIFO1(x) ((x) << S_DISPCPL5FIFO1)
+#define F_DISPCPL5FIFO1 V_DISPCPL5FIFO1(1U)
+
+#define S_DISPIPSECFIFO0 5
+#define V_DISPIPSECFIFO0(x) ((x) << S_DISPIPSECFIFO0)
+#define F_DISPIPSECFIFO0 V_DISPIPSECFIFO0(1U)
+
+#define S_DISPTCPFIFO0 4
+#define V_DISPTCPFIFO0(x) ((x) << S_DISPTCPFIFO0)
+#define F_DISPTCPFIFO0 V_DISPTCPFIFO0(1U)
+
+#define S_DISPIPFIFO0 3
+#define V_DISPIPFIFO0(x) ((x) << S_DISPIPFIFO0)
+#define F_DISPIPFIFO0 V_DISPIPFIFO0(1U)
+
+#define S_DISPETHFIFO0 2
+#define V_DISPETHFIFO0(x) ((x) << S_DISPETHFIFO0)
+#define F_DISPETHFIFO0 V_DISPETHFIFO0(1U)
+
+#define S_DISPGREFIFO0 1
+#define V_DISPGREFIFO0(x) ((x) << S_DISPGREFIFO0)
+#define F_DISPGREFIFO0 V_DISPGREFIFO0(1U)
+
+#define S_DISPCPL5FIFO0 0
+#define V_DISPCPL5FIFO0(x) ((x) << S_DISPCPL5FIFO0)
+#define F_DISPCPL5FIFO0 V_DISPCPL5FIFO0(1U)
+
+#define A_TP_E_EG_PERR_CAUSE 0x7f3c
+#define A_TP_E_IN0_PERR_ENABLE 0x7f40
+
+#define S_DMXISSFIFO 30
+#define V_DMXISSFIFO(x) ((x) << S_DMXISSFIFO)
+#define F_DMXISSFIFO V_DMXISSFIFO(1U)
+
+#define S_DMXERRFIFO 29
+#define V_DMXERRFIFO(x) ((x) << S_DMXERRFIFO)
+#define F_DMXERRFIFO V_DMXERRFIFO(1U)
+
+#define S_DMXATTFIFO 28
+#define V_DMXATTFIFO(x) ((x) << S_DMXATTFIFO)
+#define F_DMXATTFIFO V_DMXATTFIFO(1U)
+
+#define S_DMXTCPFIFO 27
+#define V_DMXTCPFIFO(x) ((x) << S_DMXTCPFIFO)
+#define F_DMXTCPFIFO V_DMXTCPFIFO(1U)
+
+#define S_DMXMPAFIFO 26
+#define V_DMXMPAFIFO(x) ((x) << S_DMXMPAFIFO)
+#define F_DMXMPAFIFO V_DMXMPAFIFO(1U)
+
+#define S_DMXOPTFIFO 25
+#define V_DMXOPTFIFO(x) ((x) << S_DMXOPTFIFO)
+#define F_DMXOPTFIFO V_DMXOPTFIFO(1U)
+
+#define S_INGTOKENFIFO 24
+#define V_INGTOKENFIFO(x) ((x) << S_INGTOKENFIFO)
+#define F_INGTOKENFIFO V_INGTOKENFIFO(1U)
+
+#define S_DMXPLDCHKOVFL1 21
+#define V_DMXPLDCHKOVFL1(x) ((x) << S_DMXPLDCHKOVFL1)
+#define F_DMXPLDCHKOVFL1 V_DMXPLDCHKOVFL1(1U)
+
+#define S_DMXPLDCHKFIFO1 20
+#define V_DMXPLDCHKFIFO1(x) ((x) << S_DMXPLDCHKFIFO1)
+#define F_DMXPLDCHKFIFO1 V_DMXPLDCHKFIFO1(1U)
+
+#define S_DMXOPTFIFO1 19
+#define V_DMXOPTFIFO1(x) ((x) << S_DMXOPTFIFO1)
+#define F_DMXOPTFIFO1 V_DMXOPTFIFO1(1U)
+
+#define S_DMXMPAFIFO1 18
+#define V_DMXMPAFIFO1(x) ((x) << S_DMXMPAFIFO1)
+#define F_DMXMPAFIFO1 V_DMXMPAFIFO1(1U)
+
+#define S_DMXDBFIFO1 17
+#define V_DMXDBFIFO1(x) ((x) << S_DMXDBFIFO1)
+#define F_DMXDBFIFO1 V_DMXDBFIFO1(1U)
+
+#define S_DMXATTFIFO1 16
+#define V_DMXATTFIFO1(x) ((x) << S_DMXATTFIFO1)
+#define F_DMXATTFIFO1 V_DMXATTFIFO1(1U)
+
+#define S_DMXISSFIFO1 15
+#define V_DMXISSFIFO1(x) ((x) << S_DMXISSFIFO1)
+#define F_DMXISSFIFO1 V_DMXISSFIFO1(1U)
+
+#define S_DMXTCPFIFO1 14
+#define V_DMXTCPFIFO1(x) ((x) << S_DMXTCPFIFO1)
+#define F_DMXTCPFIFO1 V_DMXTCPFIFO1(1U)
+
+#define S_DMXERRFIFO1 13
+#define V_DMXERRFIFO1(x) ((x) << S_DMXERRFIFO1)
+#define F_DMXERRFIFO1 V_DMXERRFIFO1(1U)
+
+#define S_MPS2TPINTF1 12
+#define V_MPS2TPINTF1(x) ((x) << S_MPS2TPINTF1)
+#define F_MPS2TPINTF1 V_MPS2TPINTF1(1U)
+
+#define S_DMXPLDCHKOVFL0 9
+#define V_DMXPLDCHKOVFL0(x) ((x) << S_DMXPLDCHKOVFL0)
+#define F_DMXPLDCHKOVFL0 V_DMXPLDCHKOVFL0(1U)
+
+#define S_DMXPLDCHKFIFO0 8
+#define V_DMXPLDCHKFIFO0(x) ((x) << S_DMXPLDCHKFIFO0)
+#define F_DMXPLDCHKFIFO0 V_DMXPLDCHKFIFO0(1U)
+
+#define S_DMXOPTFIFO0 7
+#define V_DMXOPTFIFO0(x) ((x) << S_DMXOPTFIFO0)
+#define F_DMXOPTFIFO0 V_DMXOPTFIFO0(1U)
+
+#define S_DMXMPAFIFO0 6
+#define V_DMXMPAFIFO0(x) ((x) << S_DMXMPAFIFO0)
+#define F_DMXMPAFIFO0 V_DMXMPAFIFO0(1U)
+
+#define S_DMXDBFIFO0 5
+#define V_DMXDBFIFO0(x) ((x) << S_DMXDBFIFO0)
+#define F_DMXDBFIFO0 V_DMXDBFIFO0(1U)
+
+#define S_DMXATTFIFO0 4
+#define V_DMXATTFIFO0(x) ((x) << S_DMXATTFIFO0)
+#define F_DMXATTFIFO0 V_DMXATTFIFO0(1U)
+
+#define S_DMXISSFIFO0 3
+#define V_DMXISSFIFO0(x) ((x) << S_DMXISSFIFO0)
+#define F_DMXISSFIFO0 V_DMXISSFIFO0(1U)
+
+#define S_DMXTCPFIFO0 2
+#define V_DMXTCPFIFO0(x) ((x) << S_DMXTCPFIFO0)
+#define F_DMXTCPFIFO0 V_DMXTCPFIFO0(1U)
+
+#define S_DMXERRFIFO0 1
+#define V_DMXERRFIFO0(x) ((x) << S_DMXERRFIFO0)
+#define F_DMXERRFIFO0 V_DMXERRFIFO0(1U)
+
+#define S_MPS2TPINTF0 0
+#define V_MPS2TPINTF0(x) ((x) << S_MPS2TPINTF0)
+#define F_MPS2TPINTF0 V_MPS2TPINTF0(1U)
+
+#define A_TP_E_IN0_PERR_CAUSE 0x7f44
+#define A_TP_E_IN1_PERR_ENABLE 0x7f48
+
+#define S_DMXPLDCHKOVFL3 21
+#define V_DMXPLDCHKOVFL3(x) ((x) << S_DMXPLDCHKOVFL3)
+#define F_DMXPLDCHKOVFL3 V_DMXPLDCHKOVFL3(1U)
+
+#define S_DMXPLDCHKFIFO3 20
+#define V_DMXPLDCHKFIFO3(x) ((x) << S_DMXPLDCHKFIFO3)
+#define F_DMXPLDCHKFIFO3 V_DMXPLDCHKFIFO3(1U)
+
+#define S_DMXOPTFIFO3 19
+#define V_DMXOPTFIFO3(x) ((x) << S_DMXOPTFIFO3)
+#define F_DMXOPTFIFO3 V_DMXOPTFIFO3(1U)
+
+#define S_DMXMPAFIFO3 18
+#define V_DMXMPAFIFO3(x) ((x) << S_DMXMPAFIFO3)
+#define F_DMXMPAFIFO3 V_DMXMPAFIFO3(1U)
+
+#define S_DMXDBFIFO3 17
+#define V_DMXDBFIFO3(x) ((x) << S_DMXDBFIFO3)
+#define F_DMXDBFIFO3 V_DMXDBFIFO3(1U)
+
+#define S_DMXATTFIFO3 16
+#define V_DMXATTFIFO3(x) ((x) << S_DMXATTFIFO3)
+#define F_DMXATTFIFO3 V_DMXATTFIFO3(1U)
+
+#define S_DMXISSFIFO3 15
+#define V_DMXISSFIFO3(x) ((x) << S_DMXISSFIFO3)
+#define F_DMXISSFIFO3 V_DMXISSFIFO3(1U)
+
+#define S_DMXTCPFIFO3 14
+#define V_DMXTCPFIFO3(x) ((x) << S_DMXTCPFIFO3)
+#define F_DMXTCPFIFO3 V_DMXTCPFIFO3(1U)
+
+#define S_DMXERRFIFO3 13
+#define V_DMXERRFIFO3(x) ((x) << S_DMXERRFIFO3)
+#define F_DMXERRFIFO3 V_DMXERRFIFO3(1U)
+
+#define S_MPS2TPINTF3 12
+#define V_MPS2TPINTF3(x) ((x) << S_MPS2TPINTF3)
+#define F_MPS2TPINTF3 V_MPS2TPINTF3(1U)
+
+#define S_DMXPLDCHKOVFL2 9
+#define V_DMXPLDCHKOVFL2(x) ((x) << S_DMXPLDCHKOVFL2)
+#define F_DMXPLDCHKOVFL2 V_DMXPLDCHKOVFL2(1U)
+
+#define S_DMXPLDCHKFIFO2 8
+#define V_DMXPLDCHKFIFO2(x) ((x) << S_DMXPLDCHKFIFO2)
+#define F_DMXPLDCHKFIFO2 V_DMXPLDCHKFIFO2(1U)
+
+#define S_DMXOPTFIFO2 7
+#define V_DMXOPTFIFO2(x) ((x) << S_DMXOPTFIFO2)
+#define F_DMXOPTFIFO2 V_DMXOPTFIFO2(1U)
+
+#define S_DMXMPAFIFO2 6
+#define V_DMXMPAFIFO2(x) ((x) << S_DMXMPAFIFO2)
+#define F_DMXMPAFIFO2 V_DMXMPAFIFO2(1U)
+
+#define S_DMXDBFIFO2 5
+#define V_DMXDBFIFO2(x) ((x) << S_DMXDBFIFO2)
+#define F_DMXDBFIFO2 V_DMXDBFIFO2(1U)
+
+#define S_DMXATTFIFO2 4
+#define V_DMXATTFIFO2(x) ((x) << S_DMXATTFIFO2)
+#define F_DMXATTFIFO2 V_DMXATTFIFO2(1U)
+
+#define S_DMXISSFIFO2 3
+#define V_DMXISSFIFO2(x) ((x) << S_DMXISSFIFO2)
+#define F_DMXISSFIFO2 V_DMXISSFIFO2(1U)
+
+#define S_DMXTCPFIFO2 2
+#define V_DMXTCPFIFO2(x) ((x) << S_DMXTCPFIFO2)
+#define F_DMXTCPFIFO2 V_DMXTCPFIFO2(1U)
+
+#define S_DMXERRFIFO2 1
+#define V_DMXERRFIFO2(x) ((x) << S_DMXERRFIFO2)
+#define F_DMXERRFIFO2 V_DMXERRFIFO2(1U)
+
+#define S_MPS2TPINTF2 0
+#define V_MPS2TPINTF2(x) ((x) << S_MPS2TPINTF2)
+#define F_MPS2TPINTF2 V_MPS2TPINTF2(1U)
+
+#define A_TP_E_IN1_PERR_CAUSE 0x7f4c
+#define A_TP_O_PERR_ENABLE 0x7f50
+
+#define S_DMARBTPERR 31
+#define V_DMARBTPERR(x) ((x) << S_DMARBTPERR)
+#define F_DMARBTPERR V_DMARBTPERR(1U)
+
+#define S_MMGRCACHEDATASRAM 24
+#define V_MMGRCACHEDATASRAM(x) ((x) << S_MMGRCACHEDATASRAM)
+#define F_MMGRCACHEDATASRAM V_MMGRCACHEDATASRAM(1U)
+
+#define S_MMGRCACHETAGFIFO 23
+#define V_MMGRCACHETAGFIFO(x) ((x) << S_MMGRCACHETAGFIFO)
+#define F_MMGRCACHETAGFIFO V_MMGRCACHETAGFIFO(1U)
+
+#define S_TPPROTOSRAM 16
+#define V_TPPROTOSRAM(x) ((x) << S_TPPROTOSRAM)
+#define F_TPPROTOSRAM V_TPPROTOSRAM(1U)
+
+#define S_HSPSRAM 15
+#define V_HSPSRAM(x) ((x) << S_HSPSRAM)
+#define F_HSPSRAM V_HSPSRAM(1U)
+
+#define S_RATEGRPSRAM 14
+#define V_RATEGRPSRAM(x) ((x) << S_RATEGRPSRAM)
+#define F_RATEGRPSRAM V_RATEGRPSRAM(1U)
+
+#define S_TXFBSEQFIFO 13
+#define V_TXFBSEQFIFO(x) ((x) << S_TXFBSEQFIFO)
+#define F_TXFBSEQFIFO V_TXFBSEQFIFO(1U)
+
+#define S_CMDATASRAM 12
+#define V_CMDATASRAM(x) ((x) << S_CMDATASRAM)
+#define F_CMDATASRAM V_CMDATASRAM(1U)
+
+#define S_CMTAGFIFO 11
+#define V_CMTAGFIFO(x) ((x) << S_CMTAGFIFO)
+#define F_CMTAGFIFO V_CMTAGFIFO(1U)
+
+#define S_RFCOPFIFO 10
+#define V_RFCOPFIFO(x) ((x) << S_RFCOPFIFO)
+#define F_RFCOPFIFO V_RFCOPFIFO(1U)
+
+#define S_DELINVFIFO 9
+#define V_DELINVFIFO(x) ((x) << S_DELINVFIFO)
+#define F_DELINVFIFO V_DELINVFIFO(1U)
+
+#define S_RSSCFGSRAM 8
+#define V_RSSCFGSRAM(x) ((x) << S_RSSCFGSRAM)
+#define F_RSSCFGSRAM V_RSSCFGSRAM(1U)
+
+#define S_RSSKEYSRAM 7
+#define V_RSSKEYSRAM(x) ((x) << S_RSSKEYSRAM)
+#define F_RSSKEYSRAM V_RSSKEYSRAM(1U)
+
+#define S_RSSLKPSRAM 6
+#define V_RSSLKPSRAM(x) ((x) << S_RSSLKPSRAM)
+#define F_RSSLKPSRAM V_RSSLKPSRAM(1U)
+
+#define S_SRQSRAM 5
+#define V_SRQSRAM(x) ((x) << S_SRQSRAM)
+#define F_SRQSRAM V_SRQSRAM(1U)
+
+#define S_ARPDASRAM 4
+#define V_ARPDASRAM(x) ((x) << S_ARPDASRAM)
+#define F_ARPDASRAM V_ARPDASRAM(1U)
+
+#define S_ARPSASRAM 3
+#define V_ARPSASRAM(x) ((x) << S_ARPSASRAM)
+#define F_ARPSASRAM V_ARPSASRAM(1U)
+
+#define S_ARPGRESRAM 2
+#define V_ARPGRESRAM(x) ((x) << S_ARPGRESRAM)
+#define F_ARPGRESRAM V_ARPGRESRAM(1U)
+
+#define S_ARPIPSECSRAM1 1
+#define V_ARPIPSECSRAM1(x) ((x) << S_ARPIPSECSRAM1)
+#define F_ARPIPSECSRAM1 V_ARPIPSECSRAM1(1U)
+
+#define S_ARPIPSECSRAM0 0
+#define V_ARPIPSECSRAM0(x) ((x) << S_ARPIPSECSRAM0)
+#define F_ARPIPSECSRAM0 V_ARPIPSECSRAM0(1U)
+
+#define A_TP_O_PERR_CAUSE 0x7f54
+#define A_TP_CERR_ENABLE 0x7f58
+
+#define S_TPCEGDATAFIFO 8
+#define V_TPCEGDATAFIFO(x) ((x) << S_TPCEGDATAFIFO)
+#define F_TPCEGDATAFIFO V_TPCEGDATAFIFO(1U)
+
+#define S_TPCLBKDATAFIFO 7
+#define V_TPCLBKDATAFIFO(x) ((x) << S_TPCLBKDATAFIFO)
+#define F_TPCLBKDATAFIFO V_TPCLBKDATAFIFO(1U)
+
+#define A_TP_CERR_CAUSE 0x7f5c
#define A_TP_TX_MOD_Q7_Q6_TIMER_SEPARATOR 0x0
#define S_TXTIMERSEPQ7 16
@@ -24520,6 +30781,137 @@
#define A_TP_TX_MOD_C3_C2_RATE_LIMIT 0xa
#define A_TP_TX_MOD_C1_C0_RATE_LIMIT 0xb
+#define A_TP_RX_MOD_Q3_Q2_TIMER_SEPARATOR 0xc
+
+#define S_RXTIMERSEPQ3 16
+#define M_RXTIMERSEPQ3 0xffffU
+#define V_RXTIMERSEPQ3(x) ((x) << S_RXTIMERSEPQ3)
+#define G_RXTIMERSEPQ3(x) (((x) >> S_RXTIMERSEPQ3) & M_RXTIMERSEPQ3)
+
+#define S_RXTIMERSEPQ2 0
+#define M_RXTIMERSEPQ2 0xffffU
+#define V_RXTIMERSEPQ2(x) ((x) << S_RXTIMERSEPQ2)
+#define G_RXTIMERSEPQ2(x) (((x) >> S_RXTIMERSEPQ2) & M_RXTIMERSEPQ2)
+
+#define A_TP_RX_MOD_Q3_Q2_RATE_LIMIT 0xd
+
+#define S_RXRATEINCQ3 24
+#define M_RXRATEINCQ3 0xffU
+#define V_RXRATEINCQ3(x) ((x) << S_RXRATEINCQ3)
+#define G_RXRATEINCQ3(x) (((x) >> S_RXRATEINCQ3) & M_RXRATEINCQ3)
+
+#define S_RXRATETCKQ3 16
+#define M_RXRATETCKQ3 0xffU
+#define V_RXRATETCKQ3(x) ((x) << S_RXRATETCKQ3)
+#define G_RXRATETCKQ3(x) (((x) >> S_RXRATETCKQ3) & M_RXRATETCKQ3)
+
+#define S_RXRATEINCQ2 8
+#define M_RXRATEINCQ2 0xffU
+#define V_RXRATEINCQ2(x) ((x) << S_RXRATEINCQ2)
+#define G_RXRATEINCQ2(x) (((x) >> S_RXRATEINCQ2) & M_RXRATEINCQ2)
+
+#define S_RXRATETCKQ2 0
+#define M_RXRATETCKQ2 0xffU
+#define V_RXRATETCKQ2(x) ((x) << S_RXRATETCKQ2)
+#define G_RXRATETCKQ2(x) (((x) >> S_RXRATETCKQ2) & M_RXRATETCKQ2)
+
+#define A_TP_RX_LPBK_CONG 0x1c
+#define A_TP_RX_SCHED_MOD 0x1d
+
+#define S_T7_ENABLELPBKFULL1 28
+#define M_T7_ENABLELPBKFULL1 0xfU
+#define V_T7_ENABLELPBKFULL1(x) ((x) << S_T7_ENABLELPBKFULL1)
+#define G_T7_ENABLELPBKFULL1(x) (((x) >> S_T7_ENABLELPBKFULL1) & M_T7_ENABLELPBKFULL1)
+
+#define S_T7_ENABLEFIFOFULL1 24
+#define M_T7_ENABLEFIFOFULL1 0xfU
+#define V_T7_ENABLEFIFOFULL1(x) ((x) << S_T7_ENABLEFIFOFULL1)
+#define G_T7_ENABLEFIFOFULL1(x) (((x) >> S_T7_ENABLEFIFOFULL1) & M_T7_ENABLEFIFOFULL1)
+
+#define S_T7_ENABLEPCMDFULL1 20
+#define M_T7_ENABLEPCMDFULL1 0xfU
+#define V_T7_ENABLEPCMDFULL1(x) ((x) << S_T7_ENABLEPCMDFULL1)
+#define G_T7_ENABLEPCMDFULL1(x) (((x) >> S_T7_ENABLEPCMDFULL1) & M_T7_ENABLEPCMDFULL1)
+
+#define S_T7_ENABLEHDRFULL1 16
+#define M_T7_ENABLEHDRFULL1 0xfU
+#define V_T7_ENABLEHDRFULL1(x) ((x) << S_T7_ENABLEHDRFULL1)
+#define G_T7_ENABLEHDRFULL1(x) (((x) >> S_T7_ENABLEHDRFULL1) & M_T7_ENABLEHDRFULL1)
+
+#define S_T7_ENABLELPBKFULL0 12
+#define M_T7_ENABLELPBKFULL0 0xfU
+#define V_T7_ENABLELPBKFULL0(x) ((x) << S_T7_ENABLELPBKFULL0)
+#define G_T7_ENABLELPBKFULL0(x) (((x) >> S_T7_ENABLELPBKFULL0) & M_T7_ENABLELPBKFULL0)
+
+#define S_T7_ENABLEFIFOFULL0 8
+#define M_T7_ENABLEFIFOFULL0 0xfU
+#define V_T7_ENABLEFIFOFULL0(x) ((x) << S_T7_ENABLEFIFOFULL0)
+#define G_T7_ENABLEFIFOFULL0(x) (((x) >> S_T7_ENABLEFIFOFULL0) & M_T7_ENABLEFIFOFULL0)
+
+#define S_T7_ENABLEPCMDFULL0 4
+#define M_T7_ENABLEPCMDFULL0 0xfU
+#define V_T7_ENABLEPCMDFULL0(x) ((x) << S_T7_ENABLEPCMDFULL0)
+#define G_T7_ENABLEPCMDFULL0(x) (((x) >> S_T7_ENABLEPCMDFULL0) & M_T7_ENABLEPCMDFULL0)
+
+#define S_T7_ENABLEHDRFULL0 0
+#define M_T7_ENABLEHDRFULL0 0xfU
+#define V_T7_ENABLEHDRFULL0(x) ((x) << S_T7_ENABLEHDRFULL0)
+#define G_T7_ENABLEHDRFULL0(x) (((x) >> S_T7_ENABLEHDRFULL0) & M_T7_ENABLEHDRFULL0)
+
+#define A_TP_RX_SCHED_MOD_CH3_CH2 0x1e
+
+#define S_ENABLELPBKFULL3 28
+#define M_ENABLELPBKFULL3 0xfU
+#define V_ENABLELPBKFULL3(x) ((x) << S_ENABLELPBKFULL3)
+#define G_ENABLELPBKFULL3(x) (((x) >> S_ENABLELPBKFULL3) & M_ENABLELPBKFULL3)
+
+#define S_ENABLEFIFOFULL3 24
+#define M_ENABLEFIFOFULL3 0xfU
+#define V_ENABLEFIFOFULL3(x) ((x) << S_ENABLEFIFOFULL3)
+#define G_ENABLEFIFOFULL3(x) (((x) >> S_ENABLEFIFOFULL3) & M_ENABLEFIFOFULL3)
+
+#define S_ENABLEPCMDFULL3 20
+#define M_ENABLEPCMDFULL3 0xfU
+#define V_ENABLEPCMDFULL3(x) ((x) << S_ENABLEPCMDFULL3)
+#define G_ENABLEPCMDFULL3(x) (((x) >> S_ENABLEPCMDFULL3) & M_ENABLEPCMDFULL3)
+
+#define S_ENABLEHDRFULL3 16
+#define M_ENABLEHDRFULL3 0xfU
+#define V_ENABLEHDRFULL3(x) ((x) << S_ENABLEHDRFULL3)
+#define G_ENABLEHDRFULL3(x) (((x) >> S_ENABLEHDRFULL3) & M_ENABLEHDRFULL3)
+
+#define S_ENABLELPBKFULL2 12
+#define M_ENABLELPBKFULL2 0xfU
+#define V_ENABLELPBKFULL2(x) ((x) << S_ENABLELPBKFULL2)
+#define G_ENABLELPBKFULL2(x) (((x) >> S_ENABLELPBKFULL2) & M_ENABLELPBKFULL2)
+
+#define S_ENABLEFIFOFULL2 8
+#define M_ENABLEFIFOFULL2 0xfU
+#define V_ENABLEFIFOFULL2(x) ((x) << S_ENABLEFIFOFULL2)
+#define G_ENABLEFIFOFULL2(x) (((x) >> S_ENABLEFIFOFULL2) & M_ENABLEFIFOFULL2)
+
+#define S_ENABLEPCMDFULL2 4
+#define M_ENABLEPCMDFULL2 0xfU
+#define V_ENABLEPCMDFULL2(x) ((x) << S_ENABLEPCMDFULL2)
+#define G_ENABLEPCMDFULL2(x) (((x) >> S_ENABLEPCMDFULL2) & M_ENABLEPCMDFULL2)
+
+#define S_ENABLEHDRFULL2 0
+#define M_ENABLEHDRFULL2 0xfU
+#define V_ENABLEHDRFULL2(x) ((x) << S_ENABLEHDRFULL2)
+#define G_ENABLEHDRFULL2(x) (((x) >> S_ENABLEHDRFULL2) & M_ENABLEHDRFULL2)
+
+#define A_TP_RX_SCHED_MAP_CH3_CH2 0x1f
+
+#define S_T7_RXMAPCHANNEL3 16
+#define M_T7_RXMAPCHANNEL3 0xffffU
+#define V_T7_RXMAPCHANNEL3(x) ((x) << S_T7_RXMAPCHANNEL3)
+#define G_T7_RXMAPCHANNEL3(x) (((x) >> S_T7_RXMAPCHANNEL3) & M_T7_RXMAPCHANNEL3)
+
+#define S_T7_RXMAPCHANNEL2 0
+#define M_T7_RXMAPCHANNEL2 0xffffU
+#define V_T7_RXMAPCHANNEL2(x) ((x) << S_T7_RXMAPCHANNEL2)
+#define G_T7_RXMAPCHANNEL2(x) (((x) >> S_T7_RXMAPCHANNEL2) & M_T7_RXMAPCHANNEL2)
+
#define A_TP_RX_SCHED_MAP 0x20
#define S_RXMAPCHANNEL3 24
@@ -24542,6 +30934,16 @@
#define V_RXMAPCHANNEL0(x) ((x) << S_RXMAPCHANNEL0)
#define G_RXMAPCHANNEL0(x) (((x) >> S_RXMAPCHANNEL0) & M_RXMAPCHANNEL0)
+#define S_T7_RXMAPCHANNEL1 16
+#define M_T7_RXMAPCHANNEL1 0xffffU
+#define V_T7_RXMAPCHANNEL1(x) ((x) << S_T7_RXMAPCHANNEL1)
+#define G_T7_RXMAPCHANNEL1(x) (((x) >> S_T7_RXMAPCHANNEL1) & M_T7_RXMAPCHANNEL1)
+
+#define S_T7_RXMAPCHANNEL0 0
+#define M_T7_RXMAPCHANNEL0 0xffffU
+#define V_T7_RXMAPCHANNEL0(x) ((x) << S_T7_RXMAPCHANNEL0)
+#define G_T7_RXMAPCHANNEL0(x) (((x) >> S_T7_RXMAPCHANNEL0) & M_T7_RXMAPCHANNEL0)
+
#define A_TP_RX_SCHED_SGE 0x21
#define S_RXSGEMOD1 12
@@ -24570,6 +30972,16 @@
#define V_RXSGECHANNEL0(x) ((x) << S_RXSGECHANNEL0)
#define F_RXSGECHANNEL0 V_RXSGECHANNEL0(1U)
+#define S_RXSGEMOD3 20
+#define M_RXSGEMOD3 0xfU
+#define V_RXSGEMOD3(x) ((x) << S_RXSGEMOD3)
+#define G_RXSGEMOD3(x) (((x) >> S_RXSGEMOD3) & M_RXSGEMOD3)
+
+#define S_RXSGEMOD2 16
+#define M_RXSGEMOD2 0xfU
+#define V_RXSGEMOD2(x) ((x) << S_RXSGEMOD2)
+#define G_RXSGEMOD2(x) (((x) >> S_RXSGEMOD2) & M_RXSGEMOD2)
+
#define A_TP_TX_SCHED_MAP 0x22
#define S_TXMAPCHANNEL3 12
@@ -24600,6 +31012,14 @@
#define V_TXLPKCHANNEL0(x) ((x) << S_TXLPKCHANNEL0)
#define F_TXLPKCHANNEL0 V_TXLPKCHANNEL0(1U)
+#define S_TXLPKCHANNEL3 19
+#define V_TXLPKCHANNEL3(x) ((x) << S_TXLPKCHANNEL3)
+#define F_TXLPKCHANNEL3 V_TXLPKCHANNEL3(1U)
+
+#define S_TXLPKCHANNEL2 18
+#define V_TXLPKCHANNEL2(x) ((x) << S_TXLPKCHANNEL2)
+#define F_TXLPKCHANNEL2 V_TXLPKCHANNEL2(1U)
+
#define A_TP_TX_SCHED_HDR 0x23
#define S_TXMAPHDRCHANNEL7 28
@@ -24827,6 +31247,69 @@
#define V_RXMAPE2CCHANNEL0(x) ((x) << S_RXMAPE2CCHANNEL0)
#define F_RXMAPE2CCHANNEL0 V_RXMAPE2CCHANNEL0(1U)
+#define S_T7_LB_MODE 30
+#define M_T7_LB_MODE 0x3U
+#define V_T7_LB_MODE(x) ((x) << S_T7_LB_MODE)
+#define G_T7_LB_MODE(x) (((x) >> S_T7_LB_MODE) & M_T7_LB_MODE)
+
+#define S_ING_LB_MODE 28
+#define M_ING_LB_MODE 0x3U
+#define V_ING_LB_MODE(x) ((x) << S_ING_LB_MODE)
+#define G_ING_LB_MODE(x) (((x) >> S_ING_LB_MODE) & M_ING_LB_MODE)
+
+#define S_RXC_LB_MODE 26
+#define M_RXC_LB_MODE 0x3U
+#define V_RXC_LB_MODE(x) ((x) << S_RXC_LB_MODE)
+#define G_RXC_LB_MODE(x) (((x) >> S_RXC_LB_MODE) & M_RXC_LB_MODE)
+
+#define S_SINGLERXCHANNEL 25
+#define V_SINGLERXCHANNEL(x) ((x) << S_SINGLERXCHANNEL)
+#define F_SINGLERXCHANNEL V_SINGLERXCHANNEL(1U)
+
+#define S_RXCHANNELCHECK 24
+#define V_RXCHANNELCHECK(x) ((x) << S_RXCHANNELCHECK)
+#define F_RXCHANNELCHECK V_RXCHANNELCHECK(1U)
+
+#define S_T7_RXMAPC2CCHANNEL3 21
+#define M_T7_RXMAPC2CCHANNEL3 0x7U
+#define V_T7_RXMAPC2CCHANNEL3(x) ((x) << S_T7_RXMAPC2CCHANNEL3)
+#define G_T7_RXMAPC2CCHANNEL3(x) (((x) >> S_T7_RXMAPC2CCHANNEL3) & M_T7_RXMAPC2CCHANNEL3)
+
+#define S_T7_RXMAPC2CCHANNEL2 18
+#define M_T7_RXMAPC2CCHANNEL2 0x7U
+#define V_T7_RXMAPC2CCHANNEL2(x) ((x) << S_T7_RXMAPC2CCHANNEL2)
+#define G_T7_RXMAPC2CCHANNEL2(x) (((x) >> S_T7_RXMAPC2CCHANNEL2) & M_T7_RXMAPC2CCHANNEL2)
+
+#define S_T7_RXMAPC2CCHANNEL1 15
+#define M_T7_RXMAPC2CCHANNEL1 0x7U
+#define V_T7_RXMAPC2CCHANNEL1(x) ((x) << S_T7_RXMAPC2CCHANNEL1)
+#define G_T7_RXMAPC2CCHANNEL1(x) (((x) >> S_T7_RXMAPC2CCHANNEL1) & M_T7_RXMAPC2CCHANNEL1)
+
+#define S_T7_RXMAPC2CCHANNEL0 12
+#define M_T7_RXMAPC2CCHANNEL0 0x7U
+#define V_T7_RXMAPC2CCHANNEL0(x) ((x) << S_T7_RXMAPC2CCHANNEL0)
+#define G_T7_RXMAPC2CCHANNEL0(x) (((x) >> S_T7_RXMAPC2CCHANNEL0) & M_T7_RXMAPC2CCHANNEL0)
+
+#define S_T7_RXMAPE2CCHANNEL3 9
+#define M_T7_RXMAPE2CCHANNEL3 0x7U
+#define V_T7_RXMAPE2CCHANNEL3(x) ((x) << S_T7_RXMAPE2CCHANNEL3)
+#define G_T7_RXMAPE2CCHANNEL3(x) (((x) >> S_T7_RXMAPE2CCHANNEL3) & M_T7_RXMAPE2CCHANNEL3)
+
+#define S_T7_RXMAPE2CCHANNEL2 6
+#define M_T7_RXMAPE2CCHANNEL2 0x7U
+#define V_T7_RXMAPE2CCHANNEL2(x) ((x) << S_T7_RXMAPE2CCHANNEL2)
+#define G_T7_RXMAPE2CCHANNEL2(x) (((x) >> S_T7_RXMAPE2CCHANNEL2) & M_T7_RXMAPE2CCHANNEL2)
+
+#define S_T7_RXMAPE2CCHANNEL1 3
+#define M_T7_RXMAPE2CCHANNEL1 0x7U
+#define V_T7_RXMAPE2CCHANNEL1(x) ((x) << S_T7_RXMAPE2CCHANNEL1)
+#define G_T7_RXMAPE2CCHANNEL1(x) (((x) >> S_T7_RXMAPE2CCHANNEL1) & M_T7_RXMAPE2CCHANNEL1)
+
+#define S_T7_RXMAPE2CCHANNEL0 0
+#define M_T7_RXMAPE2CCHANNEL0 0x7U
+#define V_T7_RXMAPE2CCHANNEL0(x) ((x) << S_T7_RXMAPE2CCHANNEL0)
+#define G_T7_RXMAPE2CCHANNEL0(x) (((x) >> S_T7_RXMAPE2CCHANNEL0) & M_T7_RXMAPE2CCHANNEL0)
+
#define A_TP_RX_LPBK 0x28
#define A_TP_TX_LPBK 0x29
#define A_TP_TX_SCHED_PPP 0x2a
@@ -24873,6 +31356,55 @@
#define V_COMMITLIMIT0L(x) ((x) << S_COMMITLIMIT0L)
#define G_COMMITLIMIT0L(x) (((x) >> S_COMMITLIMIT0L) & M_COMMITLIMIT0L)
+#define A_TP_RX_SCHED_FIFO_CH3_CH2 0x2c
+
+#define S_COMMITLIMIT3H 24
+#define M_COMMITLIMIT3H 0xffU
+#define V_COMMITLIMIT3H(x) ((x) << S_COMMITLIMIT3H)
+#define G_COMMITLIMIT3H(x) (((x) >> S_COMMITLIMIT3H) & M_COMMITLIMIT3H)
+
+#define S_COMMITLIMIT3L 16
+#define M_COMMITLIMIT3L 0xffU
+#define V_COMMITLIMIT3L(x) ((x) << S_COMMITLIMIT3L)
+#define G_COMMITLIMIT3L(x) (((x) >> S_COMMITLIMIT3L) & M_COMMITLIMIT3L)
+
+#define S_COMMITLIMIT2H 8
+#define M_COMMITLIMIT2H 0xffU
+#define V_COMMITLIMIT2H(x) ((x) << S_COMMITLIMIT2H)
+#define G_COMMITLIMIT2H(x) (((x) >> S_COMMITLIMIT2H) & M_COMMITLIMIT2H)
+
+#define S_COMMITLIMIT2L 0
+#define M_COMMITLIMIT2L 0xffU
+#define V_COMMITLIMIT2L(x) ((x) << S_COMMITLIMIT2L)
+#define G_COMMITLIMIT2L(x) (((x) >> S_COMMITLIMIT2L) & M_COMMITLIMIT2L)
+
+#define A_TP_CHANNEL_MAP_LPBK 0x2d
+
+#define S_T7_RXMAPCHANNELELN 12
+#define M_T7_RXMAPCHANNELELN 0xfU
+#define V_T7_RXMAPCHANNELELN(x) ((x) << S_T7_RXMAPCHANNELELN)
+#define G_T7_RXMAPCHANNELELN(x) (((x) >> S_T7_RXMAPCHANNELELN) & M_T7_RXMAPCHANNELELN)
+
+#define S_T7_RXMAPE2LCHANNEL3 9
+#define M_T7_RXMAPE2LCHANNEL3 0x7U
+#define V_T7_RXMAPE2LCHANNEL3(x) ((x) << S_T7_RXMAPE2LCHANNEL3)
+#define G_T7_RXMAPE2LCHANNEL3(x) (((x) >> S_T7_RXMAPE2LCHANNEL3) & M_T7_RXMAPE2LCHANNEL3)
+
+#define S_T7_RXMAPE2LCHANNEL2 6
+#define M_T7_RXMAPE2LCHANNEL2 0x7U
+#define V_T7_RXMAPE2LCHANNEL2(x) ((x) << S_T7_RXMAPE2LCHANNEL2)
+#define G_T7_RXMAPE2LCHANNEL2(x) (((x) >> S_T7_RXMAPE2LCHANNEL2) & M_T7_RXMAPE2LCHANNEL2)
+
+#define S_T7_RXMAPE2LCHANNEL1 3
+#define M_T7_RXMAPE2LCHANNEL1 0x7U
+#define V_T7_RXMAPE2LCHANNEL1(x) ((x) << S_T7_RXMAPE2LCHANNEL1)
+#define G_T7_RXMAPE2LCHANNEL1(x) (((x) >> S_T7_RXMAPE2LCHANNEL1) & M_T7_RXMAPE2LCHANNEL1)
+
+#define S_T7_RXMAPE2LCHANNEL0 0
+#define M_T7_RXMAPE2LCHANNEL0 0x7U
+#define V_T7_RXMAPE2LCHANNEL0(x) ((x) << S_T7_RXMAPE2LCHANNEL0)
+#define G_T7_RXMAPE2LCHANNEL0(x) (((x) >> S_T7_RXMAPE2LCHANNEL0) & M_T7_RXMAPE2LCHANNEL0)
+
#define A_TP_IPMI_CFG1 0x2e
#define S_VLANENABLE 31
@@ -24966,47 +31498,12 @@
#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
#define A_TP_RSS_PF1_CONFIG 0x31
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF2_CONFIG 0x32
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF3_CONFIG 0x33
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF4_CONFIG 0x34
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF5_CONFIG 0x35
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF6_CONFIG 0x36
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF7_CONFIG 0x37
-
-#define S_T6_CHNENABLE 29
-#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE)
-#define F_T6_CHNENABLE V_T6_CHNENABLE(1U)
-
#define A_TP_RSS_PF_MAP 0x38
#define S_LKPIDXSIZE 24
@@ -25097,6 +31594,22 @@
#define G_PF0MSKSIZE(x) (((x) >> S_PF0MSKSIZE) & M_PF0MSKSIZE)
#define A_TP_RSS_VFL_CONFIG 0x3a
+
+#define S_BASEQID 16
+#define M_BASEQID 0xfffU
+#define V_BASEQID(x) ((x) << S_BASEQID)
+#define G_BASEQID(x) (((x) >> S_BASEQID) & M_BASEQID)
+
+#define S_MAXRRQID 8
+#define M_MAXRRQID 0xffU
+#define V_MAXRRQID(x) ((x) << S_MAXRRQID)
+#define G_MAXRRQID(x) (((x) >> S_MAXRRQID) & M_MAXRRQID)
+
+#define S_RRCOUNTER 0
+#define M_RRCOUNTER 0xffU
+#define V_RRCOUNTER(x) ((x) << S_RRCOUNTER)
+#define G_RRCOUNTER(x) (((x) >> S_RRCOUNTER) & M_RRCOUNTER)
+
#define A_TP_RSS_VFH_CONFIG 0x3b
#define S_ENABLEUDPHASH 31
@@ -25150,6 +31663,10 @@
#define V_KEYINDEX(x) ((x) << S_KEYINDEX)
#define G_KEYINDEX(x) (((x) >> S_KEYINDEX) & M_KEYINDEX)
+#define S_ROUNDROBINEN 3
+#define V_ROUNDROBINEN(x) ((x) << S_ROUNDROBINEN)
+#define F_ROUNDROBINEN V_ROUNDROBINEN(1U)
+
#define A_TP_RSS_SECRET_KEY0 0x40
#define A_TP_RSS_SECRET_KEY1 0x41
#define A_TP_RSS_SECRET_KEY2 0x42
@@ -25283,6 +31800,36 @@
#define V_SHAREDXRC(x) ((x) << S_SHAREDXRC)
#define F_SHAREDXRC V_SHAREDXRC(1U)
+#define S_VERIFYRSPOP 25
+#define M_VERIFYRSPOP 0x1fU
+#define V_VERIFYRSPOP(x) ((x) << S_VERIFYRSPOP)
+#define G_VERIFYRSPOP(x) (((x) >> S_VERIFYRSPOP) & M_VERIFYRSPOP)
+
+#define S_VERIFYREQOP 20
+#define M_VERIFYREQOP 0x1fU
+#define V_VERIFYREQOP(x) ((x) << S_VERIFYREQOP)
+#define G_VERIFYREQOP(x) (((x) >> S_VERIFYREQOP) & M_VERIFYREQOP)
+
+#define S_AWRITERSPOP 15
+#define M_AWRITERSPOP 0x1fU
+#define V_AWRITERSPOP(x) ((x) << S_AWRITERSPOP)
+#define G_AWRITERSPOP(x) (((x) >> S_AWRITERSPOP) & M_AWRITERSPOP)
+
+#define S_AWRITEREQOP 10
+#define M_AWRITEREQOP 0x1fU
+#define V_AWRITEREQOP(x) ((x) << S_AWRITEREQOP)
+#define G_AWRITEREQOP(x) (((x) >> S_AWRITEREQOP) & M_AWRITEREQOP)
+
+#define S_FLUSHRSPOP 5
+#define M_FLUSHRSPOP 0x1fU
+#define V_FLUSHRSPOP(x) ((x) << S_FLUSHRSPOP)
+#define G_FLUSHRSPOP(x) (((x) >> S_FLUSHRSPOP) & M_FLUSHRSPOP)
+
+#define S_FLUSHREQOP 0
+#define M_FLUSHREQOP 0x1fU
+#define V_FLUSHREQOP(x) ((x) << S_FLUSHREQOP)
+#define G_FLUSHREQOP(x) (((x) >> S_FLUSHREQOP) & M_FLUSHREQOP)
+
#define A_TP_FRAG_CONFIG 0x56
#define S_TLSMODE 16
@@ -25330,6 +31877,21 @@
#define V_PASSMODE(x) ((x) << S_PASSMODE)
#define G_PASSMODE(x) (((x) >> S_PASSMODE) & M_PASSMODE)
+#define S_NVMTMODE 22
+#define M_NVMTMODE 0x3U
+#define V_NVMTMODE(x) ((x) << S_NVMTMODE)
+#define G_NVMTMODE(x) (((x) >> S_NVMTMODE) & M_NVMTMODE)
+
+#define S_ROCEMODE 20
+#define M_ROCEMODE 0x3U
+#define V_ROCEMODE(x) ((x) << S_ROCEMODE)
+#define G_ROCEMODE(x) (((x) >> S_ROCEMODE) & M_ROCEMODE)
+
+#define S_DTLSMODE 18
+#define M_DTLSMODE 0x3U
+#define V_DTLSMODE(x) ((x) << S_DTLSMODE)
+#define G_DTLSMODE(x) (((x) >> S_DTLSMODE) & M_DTLSMODE)
+
#define A_TP_CMM_CONFIG 0x57
#define S_WRCNTIDLE 16
@@ -25383,6 +31945,7 @@
#define V_GRETYPE(x) ((x) << S_GRETYPE)
#define G_GRETYPE(x) (((x) >> S_GRETYPE) & M_GRETYPE)
+#define A_TP_MMGR_CMM_CONFIG 0x5a
#define A_TP_DBG_CLEAR 0x60
#define A_TP_DBG_CORE_HDR0 0x61
@@ -25843,14 +32406,6 @@
#define V_T5_EPCMDBUSY(x) ((x) << S_T5_EPCMDBUSY)
#define F_T5_EPCMDBUSY V_T5_EPCMDBUSY(1U)
-#define S_T6_ETXBUSY 1
-#define V_T6_ETXBUSY(x) ((x) << S_T6_ETXBUSY)
-#define F_T6_ETXBUSY V_T6_ETXBUSY(1U)
-
-#define S_T6_EPCMDBUSY 0
-#define V_T6_EPCMDBUSY(x) ((x) << S_T6_EPCMDBUSY)
-#define F_T6_EPCMDBUSY V_T6_EPCMDBUSY(1U)
-
#define A_TP_DBG_ENG_RES1 0x67
#define S_RXCPLSRDY 31
@@ -26114,16 +32669,6 @@
#define V_T5_RXPCMDCNG(x) ((x) << S_T5_RXPCMDCNG)
#define G_T5_RXPCMDCNG(x) (((x) >> S_T5_RXPCMDCNG) & M_T5_RXPCMDCNG)
-#define S_T6_RXFIFOCNG 20
-#define M_T6_RXFIFOCNG 0xfU
-#define V_T6_RXFIFOCNG(x) ((x) << S_T6_RXFIFOCNG)
-#define G_T6_RXFIFOCNG(x) (((x) >> S_T6_RXFIFOCNG) & M_T6_RXFIFOCNG)
-
-#define S_T6_RXPCMDCNG 14
-#define M_T6_RXPCMDCNG 0x3U
-#define V_T6_RXPCMDCNG(x) ((x) << S_T6_RXPCMDCNG)
-#define G_T6_RXPCMDCNG(x) (((x) >> S_T6_RXPCMDCNG) & M_T6_RXPCMDCNG)
-
#define A_TP_DBG_ERROR_CNT 0x6c
#define A_TP_DBG_CORE_CPL 0x6d
@@ -26191,6 +32736,244 @@
#define A_TP_DBG_CACHE_RD_HIT 0x73
#define A_TP_DBG_CACHE_MC_REQ 0x74
#define A_TP_DBG_CACHE_MC_RSP 0x75
+#define A_TP_RSS_PF0_CONFIG_CH3_CH2 0x80
+
+#define S_PFMAPALWAYS 22
+#define V_PFMAPALWAYS(x) ((x) << S_PFMAPALWAYS)
+#define F_PFMAPALWAYS V_PFMAPALWAYS(1U)
+
+#define S_PFROUNDROBINEN 21
+#define V_PFROUNDROBINEN(x) ((x) << S_PFROUNDROBINEN)
+#define F_PFROUNDROBINEN V_PFROUNDROBINEN(1U)
+
+#define S_FOURCHNEN 20
+#define V_FOURCHNEN(x) ((x) << S_FOURCHNEN)
+#define F_FOURCHNEN V_FOURCHNEN(1U)
+
+#define S_CH3DEFAULTQUEUE 10
+#define M_CH3DEFAULTQUEUE 0x3ffU
+#define V_CH3DEFAULTQUEUE(x) ((x) << S_CH3DEFAULTQUEUE)
+#define G_CH3DEFAULTQUEUE(x) (((x) >> S_CH3DEFAULTQUEUE) & M_CH3DEFAULTQUEUE)
+
+#define S_CH2DEFAULTQUEUE 0
+#define M_CH2DEFAULTQUEUE 0x3ffU
+#define V_CH2DEFAULTQUEUE(x) ((x) << S_CH2DEFAULTQUEUE)
+#define G_CH2DEFAULTQUEUE(x) (((x) >> S_CH2DEFAULTQUEUE) & M_CH2DEFAULTQUEUE)
+
+#define A_TP_RSS_PF1_CONFIG_CH3_CH2 0x81
+#define A_TP_RSS_PF2_CONFIG_CH3_CH2 0x82
+#define A_TP_RSS_PF3_CONFIG_CH3_CH2 0x83
+#define A_TP_RSS_PF4_CONFIG_CH3_CH2 0x84
+#define A_TP_RSS_PF5_CONFIG_CH3_CH2 0x85
+#define A_TP_RSS_PF6_CONFIG_CH3_CH2 0x86
+#define A_TP_RSS_PF7_CONFIG_CH3_CH2 0x87
+#define A_TP_RSS_PF0_EXT_CONFIG 0x88
+#define A_TP_RSS_PF1_EXT_CONFIG 0x89
+#define A_TP_RSS_PF2_EXT_CONFIG 0x8a
+#define A_TP_RSS_PF3_EXT_CONFIG 0x8b
+#define A_TP_RSS_PF4_EXT_CONFIG 0x8c
+#define A_TP_RSS_PF5_EXT_CONFIG 0x8d
+#define A_TP_RSS_PF6_EXT_CONFIG 0x8e
+#define A_TP_RSS_PF7_EXT_CONFIG 0x8f
+#define A_TP_ROCE_CONFIG 0x90
+
+#define S_IGNAETHMSB 24
+#define V_IGNAETHMSB(x) ((x) << S_IGNAETHMSB)
+#define F_IGNAETHMSB V_IGNAETHMSB(1U)
+
+#define S_XDIDMMCTL 23
+#define V_XDIDMMCTL(x) ((x) << S_XDIDMMCTL)
+#define F_XDIDMMCTL V_XDIDMMCTL(1U)
+
+#define S_WRRETHDBGFWDEN 22
+#define V_WRRETHDBGFWDEN(x) ((x) << S_WRRETHDBGFWDEN)
+#define F_WRRETHDBGFWDEN V_WRRETHDBGFWDEN(1U)
+
+#define S_ACKINTGENCTRL 20
+#define M_ACKINTGENCTRL 0x3U
+#define V_ACKINTGENCTRL(x) ((x) << S_ACKINTGENCTRL)
+#define G_ACKINTGENCTRL(x) (((x) >> S_ACKINTGENCTRL) & M_ACKINTGENCTRL)
+
+#define S_ATOMICALIGNCHKEN 19
+#define V_ATOMICALIGNCHKEN(x) ((x) << S_ATOMICALIGNCHKEN)
+#define F_ATOMICALIGNCHKEN V_ATOMICALIGNCHKEN(1U)
+
+#define S_RDRETHLENCHKEN 18
+#define V_RDRETHLENCHKEN(x) ((x) << S_RDRETHLENCHKEN)
+#define F_RDRETHLENCHKEN V_RDRETHLENCHKEN(1U)
+
+#define S_WRTOTALLENCHKEN 17
+#define V_WRTOTALLENCHKEN(x) ((x) << S_WRTOTALLENCHKEN)
+#define F_WRTOTALLENCHKEN V_WRTOTALLENCHKEN(1U)
+
+#define S_WRRETHLENCHKEN 16
+#define V_WRRETHLENCHKEN(x) ((x) << S_WRRETHLENCHKEN)
+#define F_WRRETHLENCHKEN V_WRRETHLENCHKEN(1U)
+
+#define S_TNLERRORUDPLEN 11
+#define V_TNLERRORUDPLEN(x) ((x) << S_TNLERRORUDPLEN)
+#define F_TNLERRORUDPLEN V_TNLERRORUDPLEN(1U)
+
+#define S_TNLERRORPKEY 10
+#define V_TNLERRORPKEY(x) ((x) << S_TNLERRORPKEY)
+#define F_TNLERRORPKEY V_TNLERRORPKEY(1U)
+
+#define S_TNLERROROPCODE 9
+#define V_TNLERROROPCODE(x) ((x) << S_TNLERROROPCODE)
+#define F_TNLERROROPCODE V_TNLERROROPCODE(1U)
+
+#define S_TNLERRORTVER 8
+#define V_TNLERRORTVER(x) ((x) << S_TNLERRORTVER)
+#define F_TNLERRORTVER V_TNLERRORTVER(1U)
+
+#define S_DROPERRORUDPLEN 3
+#define V_DROPERRORUDPLEN(x) ((x) << S_DROPERRORUDPLEN)
+#define F_DROPERRORUDPLEN V_DROPERRORUDPLEN(1U)
+
+#define S_DROPERRORPKEY 2
+#define V_DROPERRORPKEY(x) ((x) << S_DROPERRORPKEY)
+#define F_DROPERRORPKEY V_DROPERRORPKEY(1U)
+
+#define S_DROPERROROPCODE 1
+#define V_DROPERROROPCODE(x) ((x) << S_DROPERROROPCODE)
+#define F_DROPERROROPCODE V_DROPERROROPCODE(1U)
+
+#define S_DROPERRORTVER 0
+#define V_DROPERRORTVER(x) ((x) << S_DROPERRORTVER)
+#define F_DROPERRORTVER V_DROPERRORTVER(1U)
+
+#define A_TP_NVMT_CONFIG 0x91
+
+#define S_PDACHKEN 2
+#define V_PDACHKEN(x) ((x) << S_PDACHKEN)
+#define F_PDACHKEN V_PDACHKEN(1U)
+
+#define S_FORCERQNONDDP 1
+#define V_FORCERQNONDDP(x) ((x) << S_FORCERQNONDDP)
+#define F_FORCERQNONDDP V_FORCERQNONDDP(1U)
+
+#define S_STRIPHCRC 0
+#define V_STRIPHCRC(x) ((x) << S_STRIPHCRC)
+#define F_STRIPHCRC V_STRIPHCRC(1U)
+
+#define A_TP_NVMT_MAXHDR 0x92
+
+#define S_MAXHDR3 24
+#define M_MAXHDR3 0xffU
+#define V_MAXHDR3(x) ((x) << S_MAXHDR3)
+#define G_MAXHDR3(x) (((x) >> S_MAXHDR3) & M_MAXHDR3)
+
+#define S_MAXHDR2 16
+#define M_MAXHDR2 0xffU
+#define V_MAXHDR2(x) ((x) << S_MAXHDR2)
+#define G_MAXHDR2(x) (((x) >> S_MAXHDR2) & M_MAXHDR2)
+
+#define S_MAXHDR1 8
+#define M_MAXHDR1 0xffU
+#define V_MAXHDR1(x) ((x) << S_MAXHDR1)
+#define G_MAXHDR1(x) (((x) >> S_MAXHDR1) & M_MAXHDR1)
+
+#define S_MAXHDR0 0
+#define M_MAXHDR0 0xffU
+#define V_MAXHDR0(x) ((x) << S_MAXHDR0)
+#define G_MAXHDR0(x) (((x) >> S_MAXHDR0) & M_MAXHDR0)
+
+#define A_TP_NVMT_PDORSVD 0x93
+
+#define S_PDORSVD3 24
+#define M_PDORSVD3 0xffU
+#define V_PDORSVD3(x) ((x) << S_PDORSVD3)
+#define G_PDORSVD3(x) (((x) >> S_PDORSVD3) & M_PDORSVD3)
+
+#define S_PDORSVD2 16
+#define M_PDORSVD2 0xffU
+#define V_PDORSVD2(x) ((x) << S_PDORSVD2)
+#define G_PDORSVD2(x) (((x) >> S_PDORSVD2) & M_PDORSVD2)
+
+#define S_PDORSVD1 8
+#define M_PDORSVD1 0xffU
+#define V_PDORSVD1(x) ((x) << S_PDORSVD1)
+#define G_PDORSVD1(x) (((x) >> S_PDORSVD1) & M_PDORSVD1)
+
+#define S_PDORSVD0 0
+#define M_PDORSVD0 0xffU
+#define V_PDORSVD0(x) ((x) << S_PDORSVD0)
+#define G_PDORSVD0(x) (((x) >> S_PDORSVD0) & M_PDORSVD0)
+
+#define A_TP_RDMA_CONFIG 0x94
+
+#define S_SRQLIMITEN 20
+#define V_SRQLIMITEN(x) ((x) << S_SRQLIMITEN)
+#define F_SRQLIMITEN V_SRQLIMITEN(1U)
+
+#define S_SNDIMMSEOP 15
+#define M_SNDIMMSEOP 0x1fU
+#define V_SNDIMMSEOP(x) ((x) << S_SNDIMMSEOP)
+#define G_SNDIMMSEOP(x) (((x) >> S_SNDIMMSEOP) & M_SNDIMMSEOP)
+
+#define S_SNDIMMOP 10
+#define M_SNDIMMOP 0x1fU
+#define V_SNDIMMOP(x) ((x) << S_SNDIMMOP)
+#define G_SNDIMMOP(x) (((x) >> S_SNDIMMOP) & M_SNDIMMOP)
+
+#define S_IWARPXRCIDCHKEN 4
+#define V_IWARPXRCIDCHKEN(x) ((x) << S_IWARPXRCIDCHKEN)
+#define F_IWARPXRCIDCHKEN V_IWARPXRCIDCHKEN(1U)
+
+#define S_IWARPEXTOPEN 3
+#define V_IWARPEXTOPEN(x) ((x) << S_IWARPEXTOPEN)
+#define F_IWARPEXTOPEN V_IWARPEXTOPEN(1U)
+
+#define S_XRCIMPLTYPE 1
+#define V_XRCIMPLTYPE(x) ((x) << S_XRCIMPLTYPE)
+#define F_XRCIMPLTYPE V_XRCIMPLTYPE(1U)
+
+#define S_XRCEN 0
+#define V_XRCEN(x) ((x) << S_XRCEN)
+#define F_XRCEN V_XRCEN(1U)
+
+#define A_TP_ROCE_RRQ_BASE 0x95
+#define A_TP_FILTER_RATE_CFG 0x96
+
+#define S_GRP_CFG_RD 30
+#define V_GRP_CFG_RD(x) ((x) << S_GRP_CFG_RD)
+#define F_GRP_CFG_RD V_GRP_CFG_RD(1U)
+
+#define S_GRP_CFG_INIT 29
+#define V_GRP_CFG_INIT(x) ((x) << S_GRP_CFG_INIT)
+#define F_GRP_CFG_INIT V_GRP_CFG_INIT(1U)
+
+#define S_GRP_CFG_RST 28
+#define V_GRP_CFG_RST(x) ((x) << S_GRP_CFG_RST)
+#define F_GRP_CFG_RST V_GRP_CFG_RST(1U)
+
+#define S_GRP_CFG_SEL 16
+#define M_GRP_CFG_SEL 0xfffU
+#define V_GRP_CFG_SEL(x) ((x) << S_GRP_CFG_SEL)
+#define G_GRP_CFG_SEL(x) (((x) >> S_GRP_CFG_SEL) & M_GRP_CFG_SEL)
+
+#define S_US_TIMER_TICK 0
+#define M_US_TIMER_TICK 0xffffU
+#define V_US_TIMER_TICK(x) ((x) << S_US_TIMER_TICK)
+#define G_US_TIMER_TICK(x) (((x) >> S_US_TIMER_TICK) & M_US_TIMER_TICK)
+
+#define A_TP_TLS_CONFIG 0x99
+
+#define S_QUIESCETYPE1 24
+#define M_QUIESCETYPE1 0xffU
+#define V_QUIESCETYPE1(x) ((x) << S_QUIESCETYPE1)
+#define G_QUIESCETYPE1(x) (((x) >> S_QUIESCETYPE1) & M_QUIESCETYPE1)
+
+#define S_QUIESCETYPE2 16
+#define M_QUIESCETYPE2 0xffU
+#define V_QUIESCETYPE2(x) ((x) << S_QUIESCETYPE2)
+#define G_QUIESCETYPE2(x) (((x) >> S_QUIESCETYPE2) & M_QUIESCETYPE2)
+
+#define S_QUIESCETYPE3 8
+#define M_QUIESCETYPE3 0xffU
+#define V_QUIESCETYPE3(x) ((x) << S_QUIESCETYPE3)
+#define G_QUIESCETYPE3(x) (((x) >> S_QUIESCETYPE3) & M_QUIESCETYPE3)
+
#define A_TP_T5_TX_DROP_CNT_CH0 0x120
#define A_TP_T5_TX_DROP_CNT_CH1 0x121
#define A_TP_TX_DROP_CNT_CH2 0x122
@@ -26682,10 +33465,6 @@
#define A_TP_DBG_ESIDE_DISP1 0x137
-#define S_T6_ESTATIC4 12
-#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
-#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U)
-
#define S_TXFULL_ESIDE1 0
#define V_TXFULL_ESIDE1(x) ((x) << S_TXFULL_ESIDE1)
#define F_TXFULL_ESIDE1 V_TXFULL_ESIDE1(1U)
@@ -26719,20 +33498,12 @@
#define A_TP_DBG_ESIDE_DISP2 0x13a
-#define S_T6_ESTATIC4 12
-#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
-#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U)
-
#define S_TXFULL_ESIDE2 0
#define V_TXFULL_ESIDE2(x) ((x) << S_TXFULL_ESIDE2)
#define F_TXFULL_ESIDE2 V_TXFULL_ESIDE2(1U)
#define A_TP_DBG_ESIDE_DISP3 0x13b
-#define S_T6_ESTATIC4 12
-#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4)
-#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U)
-
#define S_TXFULL_ESIDE3 0
#define V_TXFULL_ESIDE3(x) ((x) << S_TXFULL_ESIDE3)
#define F_TXFULL_ESIDE3 V_TXFULL_ESIDE3(1U)
@@ -26836,6 +33607,94 @@
#define V_SRVRSRAM(x) ((x) << S_SRVRSRAM)
#define F_SRVRSRAM V_SRVRSRAM(1U)
+#define S_T7_FILTERMODE 31
+#define V_T7_FILTERMODE(x) ((x) << S_T7_FILTERMODE)
+#define F_T7_FILTERMODE V_T7_FILTERMODE(1U)
+
+#define S_T7_FCOEMASK 30
+#define V_T7_FCOEMASK(x) ((x) << S_T7_FCOEMASK)
+#define F_T7_FCOEMASK V_T7_FCOEMASK(1U)
+
+#define S_T7_SRVRSRAM 29
+#define V_T7_SRVRSRAM(x) ((x) << S_T7_SRVRSRAM)
+#define F_T7_SRVRSRAM V_T7_SRVRSRAM(1U)
+
+#define S_ROCEUDFORCEIPV6 28
+#define V_ROCEUDFORCEIPV6(x) ((x) << S_ROCEUDFORCEIPV6)
+#define F_ROCEUDFORCEIPV6 V_ROCEUDFORCEIPV6(1U)
+
+#define S_TCPFLAGS8 27
+#define V_TCPFLAGS8(x) ((x) << S_TCPFLAGS8)
+#define F_TCPFLAGS8 V_TCPFLAGS8(1U)
+
+#define S_MACMATCH11 26
+#define V_MACMATCH11(x) ((x) << S_MACMATCH11)
+#define F_MACMATCH11 V_MACMATCH11(1U)
+
+#define S_SMACMATCH10 25
+#define V_SMACMATCH10(x) ((x) << S_SMACMATCH10)
+#define F_SMACMATCH10 V_SMACMATCH10(1U)
+
+#define S_SMACMATCH 14
+#define V_SMACMATCH(x) ((x) << S_SMACMATCH)
+#define F_SMACMATCH V_SMACMATCH(1U)
+
+#define S_TCPFLAGS 13
+#define V_TCPFLAGS(x) ((x) << S_TCPFLAGS)
+#define F_TCPFLAGS V_TCPFLAGS(1U)
+
+#define S_SYNONLY 12
+#define V_SYNONLY(x) ((x) << S_SYNONLY)
+#define F_SYNONLY V_SYNONLY(1U)
+
+#define S_ROCE 11
+#define V_ROCE(x) ((x) << S_ROCE)
+#define F_ROCE V_ROCE(1U)
+
+#define S_T7_FRAGMENTATION 10
+#define V_T7_FRAGMENTATION(x) ((x) << S_T7_FRAGMENTATION)
+#define F_T7_FRAGMENTATION V_T7_FRAGMENTATION(1U)
+
+#define S_T7_MPSHITTYPE 9
+#define V_T7_MPSHITTYPE(x) ((x) << S_T7_MPSHITTYPE)
+#define F_T7_MPSHITTYPE V_T7_MPSHITTYPE(1U)
+
+#define S_T7_MACMATCH 8
+#define V_T7_MACMATCH(x) ((x) << S_T7_MACMATCH)
+#define F_T7_MACMATCH V_T7_MACMATCH(1U)
+
+#define S_T7_ETHERTYPE 7
+#define V_T7_ETHERTYPE(x) ((x) << S_T7_ETHERTYPE)
+#define F_T7_ETHERTYPE V_T7_ETHERTYPE(1U)
+
+#define S_T7_PROTOCOL 6
+#define V_T7_PROTOCOL(x) ((x) << S_T7_PROTOCOL)
+#define F_T7_PROTOCOL V_T7_PROTOCOL(1U)
+
+#define S_T7_TOS 5
+#define V_T7_TOS(x) ((x) << S_T7_TOS)
+#define F_T7_TOS V_T7_TOS(1U)
+
+#define S_T7_VLAN 4
+#define V_T7_VLAN(x) ((x) << S_T7_VLAN)
+#define F_T7_VLAN V_T7_VLAN(1U)
+
+#define S_T7_VNIC_ID 3
+#define V_T7_VNIC_ID(x) ((x) << S_T7_VNIC_ID)
+#define F_T7_VNIC_ID V_T7_VNIC_ID(1U)
+
+#define S_T7_PORT 2
+#define V_T7_PORT(x) ((x) << S_T7_PORT)
+#define F_T7_PORT V_T7_PORT(1U)
+
+#define S_T7_FCOE 1
+#define V_T7_FCOE(x) ((x) << S_T7_FCOE)
+#define F_T7_FCOE V_T7_FCOE(1U)
+
+#define S_IPSECIDX 0
+#define V_IPSECIDX(x) ((x) << S_IPSECIDX)
+#define F_IPSECIDX V_IPSECIDX(1U)
+
#define A_TP_INGRESS_CONFIG 0x141
#define S_OPAQUE_TYPE 16
@@ -26888,6 +33747,14 @@
#define V_USE_ENC_IDX(x) ((x) << S_USE_ENC_IDX)
#define F_USE_ENC_IDX V_USE_ENC_IDX(1U)
+#define S_USE_MPS_ECN 15
+#define V_USE_MPS_ECN(x) ((x) << S_USE_MPS_ECN)
+#define F_USE_MPS_ECN V_USE_MPS_ECN(1U)
+
+#define S_USE_MPS_CONG 14
+#define V_USE_MPS_CONG(x) ((x) << S_USE_MPS_CONG)
+#define F_USE_MPS_CONG V_USE_MPS_CONG(1U)
+
#define A_TP_TX_DROP_CFG_CH2 0x142
#define A_TP_TX_DROP_CFG_CH3 0x143
#define A_TP_EGRESS_CONFIG 0x145
@@ -27490,6 +34357,51 @@
#define V_ROCEV2UDPPORT(x) ((x) << S_ROCEV2UDPPORT)
#define G_ROCEV2UDPPORT(x) (((x) >> S_ROCEV2UDPPORT) & M_ROCEV2UDPPORT)
+#define S_IPSECTUNETHTRANSEN 29
+#define V_IPSECTUNETHTRANSEN(x) ((x) << S_IPSECTUNETHTRANSEN)
+#define F_IPSECTUNETHTRANSEN V_IPSECTUNETHTRANSEN(1U)
+
+#define S_ROCEV2ZEROUDP6CSUM 28
+#define V_ROCEV2ZEROUDP6CSUM(x) ((x) << S_ROCEV2ZEROUDP6CSUM)
+#define F_ROCEV2ZEROUDP6CSUM V_ROCEV2ZEROUDP6CSUM(1U)
+
+#define S_ROCEV2PROCEN 27
+#define V_ROCEV2PROCEN(x) ((x) << S_ROCEV2PROCEN)
+#define F_ROCEV2PROCEN V_ROCEV2PROCEN(1U)
+
+#define A_TP_ESIDE_ROCE_PORT12 0x161
+
+#define S_ROCEV2UDPPORT2 16
+#define M_ROCEV2UDPPORT2 0xffffU
+#define V_ROCEV2UDPPORT2(x) ((x) << S_ROCEV2UDPPORT2)
+#define G_ROCEV2UDPPORT2(x) (((x) >> S_ROCEV2UDPPORT2) & M_ROCEV2UDPPORT2)
+
+#define S_ROCEV2UDPPORT1 0
+#define M_ROCEV2UDPPORT1 0xffffU
+#define V_ROCEV2UDPPORT1(x) ((x) << S_ROCEV2UDPPORT1)
+#define G_ROCEV2UDPPORT1(x) (((x) >> S_ROCEV2UDPPORT1) & M_ROCEV2UDPPORT1)
+
+#define A_TP_ESIDE_ROCE_PORT34 0x162
+
+#define S_ROCEV2UDPPORT4 16
+#define M_ROCEV2UDPPORT4 0xffffU
+#define V_ROCEV2UDPPORT4(x) ((x) << S_ROCEV2UDPPORT4)
+#define G_ROCEV2UDPPORT4(x) (((x) >> S_ROCEV2UDPPORT4) & M_ROCEV2UDPPORT4)
+
+#define S_ROCEV2UDPPORT3 0
+#define M_ROCEV2UDPPORT3 0xffffU
+#define V_ROCEV2UDPPORT3(x) ((x) << S_ROCEV2UDPPORT3)
+#define G_ROCEV2UDPPORT3(x) (((x) >> S_ROCEV2UDPPORT3) & M_ROCEV2UDPPORT3)
+
+#define A_TP_ESIDE_CONFIG1 0x163
+
+#define S_ROCEV2CRCIGN 0
+#define M_ROCEV2CRCIGN 0xfU
+#define V_ROCEV2CRCIGN(x) ((x) << S_ROCEV2CRCIGN)
+#define G_ROCEV2CRCIGN(x) (((x) >> S_ROCEV2CRCIGN) & M_ROCEV2CRCIGN)
+
+#define A_TP_ESIDE_DEBUG_CFG 0x16c
+#define A_TP_ESIDE_DEBUG_DATA 0x16d
#define A_TP_DBG_CSIDE_RX0 0x230
#define S_CRXSOPCNT 28
@@ -27962,56 +34874,7 @@
#define V_TXFULL2X(x) ((x) << S_TXFULL2X)
#define F_TXFULL2X V_TXFULL2X(1U)
-#define S_T6_TXFULL 31
-#define V_T6_TXFULL(x) ((x) << S_T6_TXFULL)
-#define F_T6_TXFULL V_T6_TXFULL(1U)
-
-#define S_T6_PLD_RXZEROP_SRDY 25
-#define V_T6_PLD_RXZEROP_SRDY(x) ((x) << S_T6_PLD_RXZEROP_SRDY)
-#define F_T6_PLD_RXZEROP_SRDY V_T6_PLD_RXZEROP_SRDY(1U)
-
-#define S_T6_DDP_SRDY 22
-#define V_T6_DDP_SRDY(x) ((x) << S_T6_DDP_SRDY)
-#define F_T6_DDP_SRDY V_T6_DDP_SRDY(1U)
-
-#define S_T6_DDP_DRDY 21
-#define V_T6_DDP_DRDY(x) ((x) << S_T6_DDP_DRDY)
-#define F_T6_DDP_DRDY V_T6_DDP_DRDY(1U)
-
#define A_TP_DBG_CSIDE_DISP1 0x23b
-
-#define S_T5_TXFULL 31
-#define V_T5_TXFULL(x) ((x) << S_T5_TXFULL)
-#define F_T5_TXFULL V_T5_TXFULL(1U)
-
-#define S_T5_PLD_RXZEROP_SRDY 25
-#define V_T5_PLD_RXZEROP_SRDY(x) ((x) << S_T5_PLD_RXZEROP_SRDY)
-#define F_T5_PLD_RXZEROP_SRDY V_T5_PLD_RXZEROP_SRDY(1U)
-
-#define S_T5_DDP_SRDY 22
-#define V_T5_DDP_SRDY(x) ((x) << S_T5_DDP_SRDY)
-#define F_T5_DDP_SRDY V_T5_DDP_SRDY(1U)
-
-#define S_T5_DDP_DRDY 21
-#define V_T5_DDP_DRDY(x) ((x) << S_T5_DDP_DRDY)
-#define F_T5_DDP_DRDY V_T5_DDP_DRDY(1U)
-
-#define S_T6_TXFULL 31
-#define V_T6_TXFULL(x) ((x) << S_T6_TXFULL)
-#define F_T6_TXFULL V_T6_TXFULL(1U)
-
-#define S_T6_PLD_RXZEROP_SRDY 25
-#define V_T6_PLD_RXZEROP_SRDY(x) ((x) << S_T6_PLD_RXZEROP_SRDY)
-#define F_T6_PLD_RXZEROP_SRDY V_T6_PLD_RXZEROP_SRDY(1U)
-
-#define S_T6_DDP_SRDY 22
-#define V_T6_DDP_SRDY(x) ((x) << S_T6_DDP_SRDY)
-#define F_T6_DDP_SRDY V_T6_DDP_SRDY(1U)
-
-#define S_T6_DDP_DRDY 21
-#define V_T6_DDP_DRDY(x) ((x) << S_T6_DDP_DRDY)
-#define F_T6_DDP_DRDY V_T6_DDP_DRDY(1U)
-
#define A_TP_DBG_CSIDE_DDP0 0x23c
#define S_DDPMSGLATEST7 28
@@ -28222,6 +35085,59 @@
#define V_ISCSICMDMODE(x) ((x) << S_ISCSICMDMODE)
#define F_ISCSICMDMODE V_ISCSICMDMODE(1U)
+#define S_NVMTOPUPDEN 30
+#define V_NVMTOPUPDEN(x) ((x) << S_NVMTOPUPDEN)
+#define F_NVMTOPUPDEN V_NVMTOPUPDEN(1U)
+
+#define S_NOPDIS 29
+#define V_NOPDIS(x) ((x) << S_NOPDIS)
+#define F_NOPDIS V_NOPDIS(1U)
+
+#define S_IWARPINVREQEN 27
+#define V_IWARPINVREQEN(x) ((x) << S_IWARPINVREQEN)
+#define F_IWARPINVREQEN V_IWARPINVREQEN(1U)
+
+#define S_ROCEINVREQEN 26
+#define V_ROCEINVREQEN(x) ((x) << S_ROCEINVREQEN)
+#define F_ROCEINVREQEN V_ROCEINVREQEN(1U)
+
+#define S_ROCESRQFWEN 25
+#define V_ROCESRQFWEN(x) ((x) << S_ROCESRQFWEN)
+#define F_ROCESRQFWEN V_ROCESRQFWEN(1U)
+
+#define S_T7_WRITEZEROOP 20
+#define M_T7_WRITEZEROOP 0x1fU
+#define V_T7_WRITEZEROOP(x) ((x) << S_T7_WRITEZEROOP)
+#define G_T7_WRITEZEROOP(x) (((x) >> S_T7_WRITEZEROOP) & M_T7_WRITEZEROOP)
+
+#define S_IWARPEXTMODE 9
+#define V_IWARPEXTMODE(x) ((x) << S_IWARPEXTMODE)
+#define F_IWARPEXTMODE V_IWARPEXTMODE(1U)
+
+#define S_IWARPINVFWEN 8
+#define V_IWARPINVFWEN(x) ((x) << S_IWARPINVFWEN)
+#define F_IWARPINVFWEN V_IWARPINVFWEN(1U)
+
+#define S_IWARPSRQFWEN 7
+#define V_IWARPSRQFWEN(x) ((x) << S_IWARPSRQFWEN)
+#define F_IWARPSRQFWEN V_IWARPSRQFWEN(1U)
+
+#define S_T7_STARTSKIPPLD 3
+#define V_T7_STARTSKIPPLD(x) ((x) << S_T7_STARTSKIPPLD)
+#define F_T7_STARTSKIPPLD V_T7_STARTSKIPPLD(1U)
+
+#define S_NVMTFLIMMEN 2
+#define V_NVMTFLIMMEN(x) ((x) << S_NVMTFLIMMEN)
+#define F_NVMTFLIMMEN V_NVMTFLIMMEN(1U)
+
+#define S_NVMTOPCTRLEN 1
+#define V_NVMTOPCTRLEN(x) ((x) << S_NVMTOPCTRLEN)
+#define F_NVMTOPCTRLEN V_NVMTOPCTRLEN(1U)
+
+#define S_T7_WRITEZEROEN 0
+#define V_T7_WRITEZEROEN(x) ((x) << S_T7_WRITEZEROEN)
+#define F_T7_WRITEZEROEN V_T7_WRITEZEROEN(1U)
+
#define A_TP_CSPI_POWER 0x243
#define S_GATECHNTX3 11
@@ -28256,6 +35172,26 @@
#define V_SLEEPREQUTRN(x) ((x) << S_SLEEPREQUTRN)
#define F_SLEEPREQUTRN V_SLEEPREQUTRN(1U)
+#define S_GATECHNRX3 7
+#define V_GATECHNRX3(x) ((x) << S_GATECHNRX3)
+#define F_GATECHNRX3 V_GATECHNRX3(1U)
+
+#define S_GATECHNRX2 6
+#define V_GATECHNRX2(x) ((x) << S_GATECHNRX2)
+#define F_GATECHNRX2 V_GATECHNRX2(1U)
+
+#define S_T7_GATECHNRX1 5
+#define V_T7_GATECHNRX1(x) ((x) << S_T7_GATECHNRX1)
+#define F_T7_GATECHNRX1 V_T7_GATECHNRX1(1U)
+
+#define S_T7_GATECHNRX0 4
+#define V_T7_GATECHNRX0(x) ((x) << S_T7_GATECHNRX0)
+#define F_T7_GATECHNRX0 V_T7_GATECHNRX0(1U)
+
+#define S_T7_SLEEPRDYUTRN 3
+#define V_T7_SLEEPRDYUTRN(x) ((x) << S_T7_SLEEPRDYUTRN)
+#define F_T7_SLEEPRDYUTRN V_T7_SLEEPRDYUTRN(1U)
+
#define A_TP_TRC_CONFIG 0x244
#define S_TRCRR 1
@@ -28266,6 +35202,19 @@
#define V_TRCCH(x) ((x) << S_TRCCH)
#define F_TRCCH V_TRCCH(1U)
+#define S_DEBUGPG 3
+#define V_DEBUGPG(x) ((x) << S_DEBUGPG)
+#define F_DEBUGPG V_DEBUGPG(1U)
+
+#define S_T7_TRCRR 2
+#define V_T7_TRCRR(x) ((x) << S_T7_TRCRR)
+#define F_T7_TRCRR V_T7_TRCRR(1U)
+
+#define S_T7_TRCCH 0
+#define M_T7_TRCCH 0x3U
+#define V_T7_TRCCH(x) ((x) << S_T7_TRCCH)
+#define G_T7_TRCCH(x) (((x) >> S_T7_TRCCH) & M_T7_TRCCH)
+
#define A_TP_TAG_CONFIG 0x245
#define S_ETAGTYPE 16
@@ -28379,26 +35328,6 @@
#define V_T5_CPRSSTATE0(x) ((x) << S_T5_CPRSSTATE0)
#define G_T5_CPRSSTATE0(x) (((x) >> S_T5_CPRSSTATE0) & M_T5_CPRSSTATE0)
-#define S_T6_CPRSSTATE3 24
-#define M_T6_CPRSSTATE3 0xfU
-#define V_T6_CPRSSTATE3(x) ((x) << S_T6_CPRSSTATE3)
-#define G_T6_CPRSSTATE3(x) (((x) >> S_T6_CPRSSTATE3) & M_T6_CPRSSTATE3)
-
-#define S_T6_CPRSSTATE2 16
-#define M_T6_CPRSSTATE2 0xfU
-#define V_T6_CPRSSTATE2(x) ((x) << S_T6_CPRSSTATE2)
-#define G_T6_CPRSSTATE2(x) (((x) >> S_T6_CPRSSTATE2) & M_T6_CPRSSTATE2)
-
-#define S_T6_CPRSSTATE1 8
-#define M_T6_CPRSSTATE1 0xfU
-#define V_T6_CPRSSTATE1(x) ((x) << S_T6_CPRSSTATE1)
-#define G_T6_CPRSSTATE1(x) (((x) >> S_T6_CPRSSTATE1) & M_T6_CPRSSTATE1)
-
-#define S_T6_CPRSSTATE0 0
-#define M_T6_CPRSSTATE0 0xfU
-#define V_T6_CPRSSTATE0(x) ((x) << S_T6_CPRSSTATE0)
-#define G_T6_CPRSSTATE0(x) (((x) >> S_T6_CPRSSTATE0) & M_T6_CPRSSTATE0)
-
#define A_TP_DBG_CSIDE_DEMUX 0x247
#define S_CALLDONE 28
@@ -28630,6 +35559,62 @@
#define A_TP_DBG_CSIDE_ARBIT_WAIT1 0x24e
#define A_TP_DBG_CSIDE_ARBIT_CNT0 0x24f
#define A_TP_DBG_CSIDE_ARBIT_CNT1 0x250
+#define A_TP_CHDR_CONFIG1 0x259
+
+#define S_CH3HIGH 24
+#define M_CH3HIGH 0xffU
+#define V_CH3HIGH(x) ((x) << S_CH3HIGH)
+#define G_CH3HIGH(x) (((x) >> S_CH3HIGH) & M_CH3HIGH)
+
+#define S_CH3LOW 16
+#define M_CH3LOW 0xffU
+#define V_CH3LOW(x) ((x) << S_CH3LOW)
+#define G_CH3LOW(x) (((x) >> S_CH3LOW) & M_CH3LOW)
+
+#define S_CH2HIGH 8
+#define M_CH2HIGH 0xffU
+#define V_CH2HIGH(x) ((x) << S_CH2HIGH)
+#define G_CH2HIGH(x) (((x) >> S_CH2HIGH) & M_CH2HIGH)
+
+#define S_CH2LOW 0
+#define M_CH2LOW 0xffU
+#define V_CH2LOW(x) ((x) << S_CH2LOW)
+#define G_CH2LOW(x) (((x) >> S_CH2LOW) & M_CH2LOW)
+
+#define A_TP_CDSP_RDMA_CONFIG 0x260
+#define A_TP_NVMT_OP_CTRL 0x268
+
+#define S_DEFOPCTRL 30
+#define M_DEFOPCTRL 0x3U
+#define V_DEFOPCTRL(x) ((x) << S_DEFOPCTRL)
+#define G_DEFOPCTRL(x) (((x) >> S_DEFOPCTRL) & M_DEFOPCTRL)
+
+#define S_NVMTOPCTRL 0
+#define M_NVMTOPCTRL 0x3fffffffU
+#define V_NVMTOPCTRL(x) ((x) << S_NVMTOPCTRL)
+#define G_NVMTOPCTRL(x) (((x) >> S_NVMTOPCTRL) & M_NVMTOPCTRL)
+
+#define A_TP_CSIDE_DEBUG_CFG 0x26c
+
+#define S_T7_OR_EN 13
+#define V_T7_OR_EN(x) ((x) << S_T7_OR_EN)
+#define F_T7_OR_EN V_T7_OR_EN(1U)
+
+#define S_T7_HI 12
+#define V_T7_HI(x) ((x) << S_T7_HI)
+#define F_T7_HI V_T7_HI(1U)
+
+#define S_T7_SELH 6
+#define M_T7_SELH 0x3fU
+#define V_T7_SELH(x) ((x) << S_T7_SELH)
+#define G_T7_SELH(x) (((x) >> S_T7_SELH) & M_T7_SELH)
+
+#define S_T7_SELL 0
+#define M_T7_SELL 0x3fU
+#define V_T7_SELL(x) ((x) << S_T7_SELL)
+#define G_T7_SELL(x) (((x) >> S_T7_SELL) & M_T7_SELL)
+
+#define A_TP_CSIDE_DEBUG_DATA 0x26d
#define A_TP_FIFO_CONFIG 0x8c0
#define S_CH1_OUTPUT 27
@@ -28771,6 +35756,174 @@
#define A_TP_MIB_TNL_ERR_1 0x71
#define A_TP_MIB_TNL_ERR_2 0x72
#define A_TP_MIB_TNL_ERR_3 0x73
+#define A_TP_MIB_RDMA_IN_PKT_0 0x80
+#define A_TP_MIB_RDMA_IN_PKT_1 0x81
+#define A_TP_MIB_RDMA_IN_PKT_2 0x82
+#define A_TP_MIB_RDMA_IN_PKT_3 0x83
+#define A_TP_MIB_RDMA_IN_BYTE_HI_0 0x84
+#define A_TP_MIB_RDMA_IN_BYTE_LO_0 0x85
+#define A_TP_MIB_RDMA_IN_BYTE_HI_1 0x86
+#define A_TP_MIB_RDMA_IN_BYTE_LO_1 0x87
+#define A_TP_MIB_RDMA_IN_BYTE_HI_2 0x88
+#define A_TP_MIB_RDMA_IN_BYTE_LO_2 0x89
+#define A_TP_MIB_RDMA_IN_BYTE_HI_3 0x8a
+#define A_TP_MIB_RDMA_IN_BYTE_LO_3 0x8b
+#define A_TP_MIB_RDMA_OUT_PKT_0 0x90
+#define A_TP_MIB_RDMA_OUT_PKT_1 0x91
+#define A_TP_MIB_RDMA_OUT_PKT_2 0x92
+#define A_TP_MIB_RDMA_OUT_PKT_3 0x93
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_0 0x94
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_0 0x95
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_1 0x96
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_1 0x97
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_2 0x98
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_2 0x99
+#define A_TP_MIB_RDMA_OUT_BYTE_HI_3 0x9a
+#define A_TP_MIB_RDMA_OUT_BYTE_LO_3 0x9b
+#define A_TP_MIB_ISCSI_IN_PKT_0 0xa0
+#define A_TP_MIB_ISCSI_IN_PKT_1 0xa1
+#define A_TP_MIB_ISCSI_IN_PKT_2 0xa2
+#define A_TP_MIB_ISCSI_IN_PKT_3 0xa3
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_0 0xa4
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_0 0xa5
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_1 0xa6
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_1 0xa7
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_2 0xa8
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_2 0xa9
+#define A_TP_MIB_ISCSI_IN_BYTE_HI_3 0xaa
+#define A_TP_MIB_ISCSI_IN_BYTE_LO_3 0xab
+#define A_TP_MIB_ISCSI_OUT_PKT_0 0xb0
+#define A_TP_MIB_ISCSI_OUT_PKT_1 0xb1
+#define A_TP_MIB_ISCSI_OUT_PKT_2 0xb2
+#define A_TP_MIB_ISCSI_OUT_PKT_3 0xb3
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_0 0xb4
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_0 0xb5
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_1 0xb6
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_1 0xb7
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_2 0xb8
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_2 0xb9
+#define A_TP_MIB_ISCSI_OUT_BYTE_HI_3 0xba
+#define A_TP_MIB_ISCSI_OUT_BYTE_LO_3 0xbb
+#define A_TP_MIB_NVMT_IN_PKT_0 0xc0
+#define A_TP_MIB_NVMT_IN_PKT_1 0xc1
+#define A_TP_MIB_NVMT_IN_PKT_2 0xc2
+#define A_TP_MIB_NVMT_IN_PKT_3 0xc3
+#define A_TP_MIB_NVMT_IN_BYTE_HI_0 0xc4
+#define A_TP_MIB_NVMT_IN_BYTE_LO_0 0xc5
+#define A_TP_MIB_NVMT_IN_BYTE_HI_1 0xc6
+#define A_TP_MIB_NVMT_IN_BYTE_LO_1 0xc7
+#define A_TP_MIB_NVMT_IN_BYTE_HI_2 0xc8
+#define A_TP_MIB_NVMT_IN_BYTE_LO_2 0xc9
+#define A_TP_MIB_NVMT_IN_BYTE_HI_3 0xca
+#define A_TP_MIB_NVMT_IN_BYTE_LO_3 0xcb
+#define A_TP_MIB_NVMT_OUT_PKT_0 0xd0
+#define A_TP_MIB_NVMT_OUT_PKT_1 0xd1
+#define A_TP_MIB_NVMT_OUT_PKT_2 0xd2
+#define A_TP_MIB_NVMT_OUT_PKT_3 0xd3
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_0 0xd4
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_0 0xd5
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_1 0xd6
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_1 0xd7
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_2 0xd8
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_2 0xd9
+#define A_TP_MIB_NVMT_OUT_BYTE_HI_3 0xda
+#define A_TP_MIB_NVMT_OUT_BYTE_LO_3 0xdb
+#define A_TP_MIB_TLS_IN_PKT_0 0xe0
+#define A_TP_MIB_TLS_IN_PKT_1 0xe1
+#define A_TP_MIB_TLS_IN_PKT_2 0xe2
+#define A_TP_MIB_TLS_IN_PKT_3 0xe3
+#define A_TP_MIB_TLS_IN_BYTE_HI_0 0xe4
+#define A_TP_MIB_TLS_IN_BYTE_LO_0 0xe5
+#define A_TP_MIB_TLS_IN_BYTE_HI_1 0xe6
+#define A_TP_MIB_TLS_IN_BYTE_LO_1 0xe7
+#define A_TP_MIB_TLS_IN_BYTE_HI_2 0xe8
+#define A_TP_MIB_TLS_IN_BYTE_LO_2 0xe9
+#define A_TP_MIB_TLS_IN_BYTE_HI_3 0xea
+#define A_TP_MIB_TLS_IN_BYTE_LO_3 0xeb
+#define A_TP_MIB_TLS_OUT_PKT_0 0xf0
+#define A_TP_MIB_TLS_OUT_PKT_1 0xf1
+#define A_TP_MIB_TLS_OUT_PKT_2 0xf2
+#define A_TP_MIB_TLS_OUT_PKT_3 0xf3
+#define A_TP_MIB_TLS_OUT_BYTE_HI_0 0xf4
+#define A_TP_MIB_TLS_OUT_BYTE_LO_0 0xf5
+#define A_TP_MIB_TLS_OUT_BYTE_HI_1 0xf6
+#define A_TP_MIB_TLS_OUT_BYTE_LO_1 0xf7
+#define A_TP_MIB_TLS_OUT_BYTE_HI_2 0xf8
+#define A_TP_MIB_TLS_OUT_BYTE_LO_2 0xf9
+#define A_TP_MIB_TLS_OUT_BYTE_HI_3 0xfa
+#define A_TP_MIB_TLS_OUT_BYTE_LO_3 0xfb
+#define A_TP_MIB_ROCE_IN_PKT_0 0x100
+#define A_TP_MIB_ROCE_IN_PKT_1 0x101
+#define A_TP_MIB_ROCE_IN_PKT_2 0x102
+#define A_TP_MIB_ROCE_IN_PKT_3 0x103
+#define A_TP_MIB_ROCE_IN_BYTE_HI_0 0x104
+#define A_TP_MIB_ROCE_IN_BYTE_LO_0 0x105
+#define A_TP_MIB_ROCE_IN_BYTE_HI_1 0x106
+#define A_TP_MIB_ROCE_IN_BYTE_LO_1 0x107
+#define A_TP_MIB_ROCE_IN_BYTE_HI_2 0x108
+#define A_TP_MIB_ROCE_IN_BYTE_LO_2 0x109
+#define A_TP_MIB_ROCE_IN_BYTE_HI_3 0x10a
+#define A_TP_MIB_ROCE_IN_BYTE_LO_3 0x10b
+#define A_TP_MIB_ROCE_OUT_PKT_0 0x110
+#define A_TP_MIB_ROCE_OUT_PKT_1 0x111
+#define A_TP_MIB_ROCE_OUT_PKT_2 0x112
+#define A_TP_MIB_ROCE_OUT_PKT_3 0x113
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_0 0x114
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_0 0x115
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_1 0x116
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_1 0x117
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_2 0x118
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_2 0x119
+#define A_TP_MIB_ROCE_OUT_BYTE_HI_3 0x11a
+#define A_TP_MIB_ROCE_OUT_BYTE_LO_3 0x11b
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_0 0x120
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_1 0x121
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_2 0x122
+#define A_TP_MIB_IPSEC_TNL_IN_PKT_3 0x123
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_0 0x124
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_0 0x125
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_1 0x126
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_1 0x127
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_2 0x128
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_2 0x129
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_HI_3 0x12a
+#define A_TP_MIB_IPSEC_TNL_IN_BYTE_LO_3 0x12b
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_0 0x130
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_1 0x131
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_2 0x132
+#define A_TP_MIB_IPSEC_TNL_OUT_PKT_3 0x133
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_0 0x134
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_0 0x135
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_1 0x136
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_1 0x137
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_2 0x138
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_2 0x139
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_HI_3 0x13a
+#define A_TP_MIB_IPSEC_TNL_OUT_BYTE_LO_3 0x13b
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_0 0x140
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_1 0x141
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_2 0x142
+#define A_TP_MIB_IPSEC_OFD_IN_PKT_3 0x143
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_0 0x144
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_0 0x145
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_1 0x146
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_1 0x147
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_2 0x148
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_2 0x149
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_HI_3 0x14a
+#define A_TP_MIB_IPSEC_OFD_IN_BYTE_LO_3 0x14b
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_0 0x150
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_1 0x151
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_2 0x152
+#define A_TP_MIB_IPSEC_OFD_OUT_PKT_3 0x153
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_0 0x154
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_0 0x155
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_1 0x156
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_1 0x157
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_2 0x158
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_2 0x159
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_HI_3 0x15a
+#define A_TP_MIB_IPSEC_OFD_OUT_BYTE_LO_3 0x15b
/* registers for module ULP_TX */
#define ULP_TX_BASE_ADDR 0x8dc0
@@ -28853,7 +36006,58 @@
#define V_ATOMIC_FIX_DIS(x) ((x) << S_ATOMIC_FIX_DIS)
#define F_ATOMIC_FIX_DIS V_ATOMIC_FIX_DIS(1U)
+#define S_LB_LEN_SEL 28
+#define V_LB_LEN_SEL(x) ((x) << S_LB_LEN_SEL)
+#define F_LB_LEN_SEL V_LB_LEN_SEL(1U)
+
+#define S_DISABLE_TPT_CREDIT_CHK 27
+#define V_DISABLE_TPT_CREDIT_CHK(x) ((x) << S_DISABLE_TPT_CREDIT_CHK)
+#define F_DISABLE_TPT_CREDIT_CHK V_DISABLE_TPT_CREDIT_CHK(1U)
+
+#define S_REQSRC 26
+#define V_REQSRC(x) ((x) << S_REQSRC)
+#define F_REQSRC V_REQSRC(1U)
+
+#define S_ERR2UP 25
+#define V_ERR2UP(x) ((x) << S_ERR2UP)
+#define F_ERR2UP V_ERR2UP(1U)
+
+#define S_SGE_INVALIDATE_DIS 24
+#define V_SGE_INVALIDATE_DIS(x) ((x) << S_SGE_INVALIDATE_DIS)
+#define F_SGE_INVALIDATE_DIS V_SGE_INVALIDATE_DIS(1U)
+
+#define S_ROCE_ACKREQ_CTRL 23
+#define V_ROCE_ACKREQ_CTRL(x) ((x) << S_ROCE_ACKREQ_CTRL)
+#define F_ROCE_ACKREQ_CTRL V_ROCE_ACKREQ_CTRL(1U)
+
+#define S_MEM_ADDR_CTRL 21
+#define M_MEM_ADDR_CTRL 0x3U
+#define V_MEM_ADDR_CTRL(x) ((x) << S_MEM_ADDR_CTRL)
+#define G_MEM_ADDR_CTRL(x) (((x) >> S_MEM_ADDR_CTRL) & M_MEM_ADDR_CTRL)
+
+#define S_TPT_EXTENSION_MODE 20
+#define V_TPT_EXTENSION_MODE(x) ((x) << S_TPT_EXTENSION_MODE)
+#define F_TPT_EXTENSION_MODE V_TPT_EXTENSION_MODE(1U)
+
+#define S_XRC_INDICATION 19
+#define V_XRC_INDICATION(x) ((x) << S_XRC_INDICATION)
+#define F_XRC_INDICATION V_XRC_INDICATION(1U)
+
+#define S_LSO_1SEG_LEN_UPD_EN 18
+#define V_LSO_1SEG_LEN_UPD_EN(x) ((x) << S_LSO_1SEG_LEN_UPD_EN)
+#define F_LSO_1SEG_LEN_UPD_EN V_LSO_1SEG_LEN_UPD_EN(1U)
+
+#define S_PKT_ISGL_ERR_ST_EN 17
+#define V_PKT_ISGL_ERR_ST_EN(x) ((x) << S_PKT_ISGL_ERR_ST_EN)
+#define F_PKT_ISGL_ERR_ST_EN V_PKT_ISGL_ERR_ST_EN(1U)
+
#define A_ULP_TX_PERR_INJECT 0x8dc4
+
+#define S_T7_1_MEMSEL 1
+#define M_T7_1_MEMSEL 0x7fU
+#define V_T7_1_MEMSEL(x) ((x) << S_T7_1_MEMSEL)
+#define G_T7_1_MEMSEL(x) (((x) >> S_T7_1_MEMSEL) & M_T7_1_MEMSEL)
+
#define A_ULP_TX_INT_ENABLE 0x8dc8
#define S_PBL_BOUND_ERR_CH3 31
@@ -28984,8 +36188,28 @@
#define V_IMM_DATA_PERR_SET_CH0(x) ((x) << S_IMM_DATA_PERR_SET_CH0)
#define F_IMM_DATA_PERR_SET_CH0 V_IMM_DATA_PERR_SET_CH0(1U)
+#define A_ULP_TX_INT_ENABLE_1 0x8dc8
+
+#define S_TLS_DSGL_PARERR3 3
+#define V_TLS_DSGL_PARERR3(x) ((x) << S_TLS_DSGL_PARERR3)
+#define F_TLS_DSGL_PARERR3 V_TLS_DSGL_PARERR3(1U)
+
+#define S_TLS_DSGL_PARERR2 2
+#define V_TLS_DSGL_PARERR2(x) ((x) << S_TLS_DSGL_PARERR2)
+#define F_TLS_DSGL_PARERR2 V_TLS_DSGL_PARERR2(1U)
+
+#define S_TLS_DSGL_PARERR1 1
+#define V_TLS_DSGL_PARERR1(x) ((x) << S_TLS_DSGL_PARERR1)
+#define F_TLS_DSGL_PARERR1 V_TLS_DSGL_PARERR1(1U)
+
+#define S_TLS_DSGL_PARERR0 0
+#define V_TLS_DSGL_PARERR0(x) ((x) << S_TLS_DSGL_PARERR0)
+#define F_TLS_DSGL_PARERR0 V_TLS_DSGL_PARERR0(1U)
+
#define A_ULP_TX_INT_CAUSE 0x8dcc
+#define A_ULP_TX_INT_CAUSE_1 0x8dcc
#define A_ULP_TX_PERR_ENABLE 0x8dd0
+#define A_ULP_TX_PERR_ENABLE_1 0x8dd0
#define A_ULP_TX_TPT_LLIMIT 0x8dd4
#define A_ULP_TX_TPT_ULIMIT 0x8dd8
#define A_ULP_TX_PBL_LLIMIT 0x8ddc
@@ -29014,6 +36238,13 @@
#define F_TLSDISABLE V_TLSDISABLE(1U)
#define A_ULP_TX_CPL_ERR_MASK_L 0x8de8
+#define A_ULP_TX_FID_1 0x8de8
+
+#define S_FID_1 0
+#define M_FID_1 0x7ffU
+#define V_FID_1(x) ((x) << S_FID_1)
+#define G_FID_1(x) (((x) >> S_FID_1) & M_FID_1)
+
#define A_ULP_TX_CPL_ERR_MASK_H 0x8dec
#define A_ULP_TX_CPL_ERR_VALUE_L 0x8df0
#define A_ULP_TX_CPL_ERR_VALUE_H 0x8df4
@@ -29166,6 +36397,15 @@
#define V_WRREQ_SZ(x) ((x) << S_WRREQ_SZ)
#define G_WRREQ_SZ(x) (((x) >> S_WRREQ_SZ) & M_WRREQ_SZ)
+#define S_T7_GLOBALENABLE 31
+#define V_T7_GLOBALENABLE(x) ((x) << S_T7_GLOBALENABLE)
+#define F_T7_GLOBALENABLE V_T7_GLOBALENABLE(1U)
+
+#define S_RDREQ_SZ 3
+#define M_RDREQ_SZ 0x7U
+#define V_RDREQ_SZ(x) ((x) << S_RDREQ_SZ)
+#define G_RDREQ_SZ(x) (((x) >> S_RDREQ_SZ) & M_RDREQ_SZ)
+
#define A_ULP_TX_ULP2TP_BIST_ERROR_CNT 0x8e34
#define A_ULP_TX_PERR_INJECT_2 0x8e34
@@ -29385,6 +36625,200 @@
#define A_ULP_TX_INT_CAUSE_2 0x8e80
#define A_ULP_TX_PERR_ENABLE_2 0x8e84
+#define A_ULP_TX_INT_ENABLE_3 0x8e88
+
+#define S_GF_SGE_FIFO_PARERR3 31
+#define V_GF_SGE_FIFO_PARERR3(x) ((x) << S_GF_SGE_FIFO_PARERR3)
+#define F_GF_SGE_FIFO_PARERR3 V_GF_SGE_FIFO_PARERR3(1U)
+
+#define S_GF_SGE_FIFO_PARERR2 30
+#define V_GF_SGE_FIFO_PARERR2(x) ((x) << S_GF_SGE_FIFO_PARERR2)
+#define F_GF_SGE_FIFO_PARERR2 V_GF_SGE_FIFO_PARERR2(1U)
+
+#define S_GF_SGE_FIFO_PARERR1 29
+#define V_GF_SGE_FIFO_PARERR1(x) ((x) << S_GF_SGE_FIFO_PARERR1)
+#define F_GF_SGE_FIFO_PARERR1 V_GF_SGE_FIFO_PARERR1(1U)
+
+#define S_GF_SGE_FIFO_PARERR0 28
+#define V_GF_SGE_FIFO_PARERR0(x) ((x) << S_GF_SGE_FIFO_PARERR0)
+#define F_GF_SGE_FIFO_PARERR0 V_GF_SGE_FIFO_PARERR0(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR3 27
+#define V_DEDUPE_SGE_FIFO_PARERR3(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR3)
+#define F_DEDUPE_SGE_FIFO_PARERR3 V_DEDUPE_SGE_FIFO_PARERR3(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR2 26
+#define V_DEDUPE_SGE_FIFO_PARERR2(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR2)
+#define F_DEDUPE_SGE_FIFO_PARERR2 V_DEDUPE_SGE_FIFO_PARERR2(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR1 25
+#define V_DEDUPE_SGE_FIFO_PARERR1(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR1)
+#define F_DEDUPE_SGE_FIFO_PARERR1 V_DEDUPE_SGE_FIFO_PARERR1(1U)
+
+#define S_DEDUPE_SGE_FIFO_PARERR0 24
+#define V_DEDUPE_SGE_FIFO_PARERR0(x) ((x) << S_DEDUPE_SGE_FIFO_PARERR0)
+#define F_DEDUPE_SGE_FIFO_PARERR0 V_DEDUPE_SGE_FIFO_PARERR0(1U)
+
+#define S_GF3_DSGL_FIFO_PARERR 23
+#define V_GF3_DSGL_FIFO_PARERR(x) ((x) << S_GF3_DSGL_FIFO_PARERR)
+#define F_GF3_DSGL_FIFO_PARERR V_GF3_DSGL_FIFO_PARERR(1U)
+
+#define S_GF2_DSGL_FIFO_PARERR 22
+#define V_GF2_DSGL_FIFO_PARERR(x) ((x) << S_GF2_DSGL_FIFO_PARERR)
+#define F_GF2_DSGL_FIFO_PARERR V_GF2_DSGL_FIFO_PARERR(1U)
+
+#define S_GF1_DSGL_FIFO_PARERR 21
+#define V_GF1_DSGL_FIFO_PARERR(x) ((x) << S_GF1_DSGL_FIFO_PARERR)
+#define F_GF1_DSGL_FIFO_PARERR V_GF1_DSGL_FIFO_PARERR(1U)
+
+#define S_GF0_DSGL_FIFO_PARERR 20
+#define V_GF0_DSGL_FIFO_PARERR(x) ((x) << S_GF0_DSGL_FIFO_PARERR)
+#define F_GF0_DSGL_FIFO_PARERR V_GF0_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE3_DSGL_FIFO_PARERR 19
+#define V_DEDUPE3_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE3_DSGL_FIFO_PARERR)
+#define F_DEDUPE3_DSGL_FIFO_PARERR V_DEDUPE3_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE2_DSGL_FIFO_PARERR 18
+#define V_DEDUPE2_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE2_DSGL_FIFO_PARERR)
+#define F_DEDUPE2_DSGL_FIFO_PARERR V_DEDUPE2_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE1_DSGL_FIFO_PARERR 17
+#define V_DEDUPE1_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE1_DSGL_FIFO_PARERR)
+#define F_DEDUPE1_DSGL_FIFO_PARERR V_DEDUPE1_DSGL_FIFO_PARERR(1U)
+
+#define S_DEDUPE0_DSGL_FIFO_PARERR 16
+#define V_DEDUPE0_DSGL_FIFO_PARERR(x) ((x) << S_DEDUPE0_DSGL_FIFO_PARERR)
+#define F_DEDUPE0_DSGL_FIFO_PARERR V_DEDUPE0_DSGL_FIFO_PARERR(1U)
+
+#define S_XP10_SGE_FIFO_PARERR 15
+#define V_XP10_SGE_FIFO_PARERR(x) ((x) << S_XP10_SGE_FIFO_PARERR)
+#define F_XP10_SGE_FIFO_PARERR V_XP10_SGE_FIFO_PARERR(1U)
+
+#define S_DSGL_PAR_ERR 14
+#define V_DSGL_PAR_ERR(x) ((x) << S_DSGL_PAR_ERR)
+#define F_DSGL_PAR_ERR V_DSGL_PAR_ERR(1U)
+
+#define S_CDDIP_INT 13
+#define V_CDDIP_INT(x) ((x) << S_CDDIP_INT)
+#define F_CDDIP_INT V_CDDIP_INT(1U)
+
+#define S_CCEIP_INT 12
+#define V_CCEIP_INT(x) ((x) << S_CCEIP_INT)
+#define F_CCEIP_INT V_CCEIP_INT(1U)
+
+#define S_TLS_SGE_FIFO_PARERR3 11
+#define V_TLS_SGE_FIFO_PARERR3(x) ((x) << S_TLS_SGE_FIFO_PARERR3)
+#define F_TLS_SGE_FIFO_PARERR3 V_TLS_SGE_FIFO_PARERR3(1U)
+
+#define S_TLS_SGE_FIFO_PARERR2 10
+#define V_TLS_SGE_FIFO_PARERR2(x) ((x) << S_TLS_SGE_FIFO_PARERR2)
+#define F_TLS_SGE_FIFO_PARERR2 V_TLS_SGE_FIFO_PARERR2(1U)
+
+#define S_TLS_SGE_FIFO_PARERR1 9
+#define V_TLS_SGE_FIFO_PARERR1(x) ((x) << S_TLS_SGE_FIFO_PARERR1)
+#define F_TLS_SGE_FIFO_PARERR1 V_TLS_SGE_FIFO_PARERR1(1U)
+
+#define S_TLS_SGE_FIFO_PARERR0 8
+#define V_TLS_SGE_FIFO_PARERR0(x) ((x) << S_TLS_SGE_FIFO_PARERR0)
+#define F_TLS_SGE_FIFO_PARERR0 V_TLS_SGE_FIFO_PARERR0(1U)
+
+#define S_ULP2SMARBT_RSP_PERR 6
+#define V_ULP2SMARBT_RSP_PERR(x) ((x) << S_ULP2SMARBT_RSP_PERR)
+#define F_ULP2SMARBT_RSP_PERR V_ULP2SMARBT_RSP_PERR(1U)
+
+#define S_ULPTX2MA_RSP_PERR 5
+#define V_ULPTX2MA_RSP_PERR(x) ((x) << S_ULPTX2MA_RSP_PERR)
+#define F_ULPTX2MA_RSP_PERR V_ULPTX2MA_RSP_PERR(1U)
+
+#define S_PCIE2ULP_PERR3 4
+#define V_PCIE2ULP_PERR3(x) ((x) << S_PCIE2ULP_PERR3)
+#define F_PCIE2ULP_PERR3 V_PCIE2ULP_PERR3(1U)
+
+#define S_PCIE2ULP_PERR2 3
+#define V_PCIE2ULP_PERR2(x) ((x) << S_PCIE2ULP_PERR2)
+#define F_PCIE2ULP_PERR2 V_PCIE2ULP_PERR2(1U)
+
+#define S_PCIE2ULP_PERR1 2
+#define V_PCIE2ULP_PERR1(x) ((x) << S_PCIE2ULP_PERR1)
+#define F_PCIE2ULP_PERR1 V_PCIE2ULP_PERR1(1U)
+
+#define S_PCIE2ULP_PERR0 1
+#define V_PCIE2ULP_PERR0(x) ((x) << S_PCIE2ULP_PERR0)
+#define F_PCIE2ULP_PERR0 V_PCIE2ULP_PERR0(1U)
+
+#define S_CIM2ULP_PERR 0
+#define V_CIM2ULP_PERR(x) ((x) << S_CIM2ULP_PERR)
+#define F_CIM2ULP_PERR V_CIM2ULP_PERR(1U)
+
+#define A_ULP_TX_INT_CAUSE_3 0x8e8c
+#define A_ULP_TX_PERR_ENABLE_3 0x8e90
+#define A_ULP_TX_INT_ENABLE_4 0x8e94
+
+#define S_DMA_PAR_ERR3 28
+#define M_DMA_PAR_ERR3 0xfU
+#define V_DMA_PAR_ERR3(x) ((x) << S_DMA_PAR_ERR3)
+#define G_DMA_PAR_ERR3(x) (((x) >> S_DMA_PAR_ERR3) & M_DMA_PAR_ERR3)
+
+#define S_DMA_PAR_ERR2 24
+#define M_DMA_PAR_ERR2 0xfU
+#define V_DMA_PAR_ERR2(x) ((x) << S_DMA_PAR_ERR2)
+#define G_DMA_PAR_ERR2(x) (((x) >> S_DMA_PAR_ERR2) & M_DMA_PAR_ERR2)
+
+#define S_DMA_PAR_ERR1 20
+#define M_DMA_PAR_ERR1 0xfU
+#define V_DMA_PAR_ERR1(x) ((x) << S_DMA_PAR_ERR1)
+#define G_DMA_PAR_ERR1(x) (((x) >> S_DMA_PAR_ERR1) & M_DMA_PAR_ERR1)
+
+#define S_DMA_PAR_ERR0 16
+#define M_DMA_PAR_ERR0 0xfU
+#define V_DMA_PAR_ERR0(x) ((x) << S_DMA_PAR_ERR0)
+#define G_DMA_PAR_ERR0(x) (((x) >> S_DMA_PAR_ERR0) & M_DMA_PAR_ERR0)
+
+#define S_CORE_CMD_FIFO_LB1 12
+#define M_CORE_CMD_FIFO_LB1 0xfU
+#define V_CORE_CMD_FIFO_LB1(x) ((x) << S_CORE_CMD_FIFO_LB1)
+#define G_CORE_CMD_FIFO_LB1(x) (((x) >> S_CORE_CMD_FIFO_LB1) & M_CORE_CMD_FIFO_LB1)
+
+#define S_CORE_CMD_FIFO_LB0 8
+#define M_CORE_CMD_FIFO_LB0 0xfU
+#define V_CORE_CMD_FIFO_LB0(x) ((x) << S_CORE_CMD_FIFO_LB0)
+#define G_CORE_CMD_FIFO_LB0(x) (((x) >> S_CORE_CMD_FIFO_LB0) & M_CORE_CMD_FIFO_LB0)
+
+#define S_XP10_2_ULP_PERR 7
+#define V_XP10_2_ULP_PERR(x) ((x) << S_XP10_2_ULP_PERR)
+#define F_XP10_2_ULP_PERR V_XP10_2_ULP_PERR(1U)
+
+#define S_ULP_2_XP10_PERR 6
+#define V_ULP_2_XP10_PERR(x) ((x) << S_ULP_2_XP10_PERR)
+#define F_ULP_2_XP10_PERR V_ULP_2_XP10_PERR(1U)
+
+#define S_CMD_FIFO_LB1 5
+#define V_CMD_FIFO_LB1(x) ((x) << S_CMD_FIFO_LB1)
+#define F_CMD_FIFO_LB1 V_CMD_FIFO_LB1(1U)
+
+#define S_CMD_FIFO_LB0 4
+#define V_CMD_FIFO_LB0(x) ((x) << S_CMD_FIFO_LB0)
+#define F_CMD_FIFO_LB0 V_CMD_FIFO_LB0(1U)
+
+#define S_TF_TP_PERR 3
+#define V_TF_TP_PERR(x) ((x) << S_TF_TP_PERR)
+#define F_TF_TP_PERR V_TF_TP_PERR(1U)
+
+#define S_TF_SGE_PERR 2
+#define V_TF_SGE_PERR(x) ((x) << S_TF_SGE_PERR)
+#define F_TF_SGE_PERR V_TF_SGE_PERR(1U)
+
+#define S_TF_MEM_PERR 1
+#define V_TF_MEM_PERR(x) ((x) << S_TF_MEM_PERR)
+#define F_TF_MEM_PERR V_TF_MEM_PERR(1U)
+
+#define S_TF_MP_PERR 0
+#define V_TF_MP_PERR(x) ((x) << S_TF_MP_PERR)
+#define F_TF_MP_PERR V_TF_MP_PERR(1U)
+
+#define A_ULP_TX_INT_CAUSE_4 0x8e98
+#define A_ULP_TX_PERR_ENABLE_4 0x8e9c
#define A_ULP_TX_SE_CNT_ERR 0x8ea0
#define S_ERR_CH3 12
@@ -29531,16 +36965,381 @@
#define A_ULP_TX_CSU_REVISION 0x8ebc
#define A_ULP_TX_LA_RDPTR_0 0x8ec0
+#define A_ULP_TX_PL2APB_INFO 0x8ec0
+
+#define S_PL2APB_BRIDGE_HUNG 27
+#define V_PL2APB_BRIDGE_HUNG(x) ((x) << S_PL2APB_BRIDGE_HUNG)
+#define F_PL2APB_BRIDGE_HUNG V_PL2APB_BRIDGE_HUNG(1U)
+
+#define S_PL2APB_BRIDGE_STATE 26
+#define V_PL2APB_BRIDGE_STATE(x) ((x) << S_PL2APB_BRIDGE_STATE)
+#define F_PL2APB_BRIDGE_STATE V_PL2APB_BRIDGE_STATE(1U)
+
+#define S_PL2APB_BRIDGE_HUNG_TYPE 25
+#define V_PL2APB_BRIDGE_HUNG_TYPE(x) ((x) << S_PL2APB_BRIDGE_HUNG_TYPE)
+#define F_PL2APB_BRIDGE_HUNG_TYPE V_PL2APB_BRIDGE_HUNG_TYPE(1U)
+
+#define S_PL2APB_BRIDGE_HUNG_ID 24
+#define V_PL2APB_BRIDGE_HUNG_ID(x) ((x) << S_PL2APB_BRIDGE_HUNG_ID)
+#define F_PL2APB_BRIDGE_HUNG_ID V_PL2APB_BRIDGE_HUNG_ID(1U)
+
+#define S_PL2APB_BRIDGE_HUNG_ADDR 0
+#define M_PL2APB_BRIDGE_HUNG_ADDR 0xfffffU
+#define V_PL2APB_BRIDGE_HUNG_ADDR(x) ((x) << S_PL2APB_BRIDGE_HUNG_ADDR)
+#define G_PL2APB_BRIDGE_HUNG_ADDR(x) (((x) >> S_PL2APB_BRIDGE_HUNG_ADDR) & M_PL2APB_BRIDGE_HUNG_ADDR)
+
#define A_ULP_TX_LA_RDDATA_0 0x8ec4
+#define A_ULP_TX_INT_ENABLE_5 0x8ec4
+
+#define S_DEDUPE_PERR3 23
+#define V_DEDUPE_PERR3(x) ((x) << S_DEDUPE_PERR3)
+#define F_DEDUPE_PERR3 V_DEDUPE_PERR3(1U)
+
+#define S_DEDUPE_PERR2 22
+#define V_DEDUPE_PERR2(x) ((x) << S_DEDUPE_PERR2)
+#define F_DEDUPE_PERR2 V_DEDUPE_PERR2(1U)
+
+#define S_DEDUPE_PERR1 21
+#define V_DEDUPE_PERR1(x) ((x) << S_DEDUPE_PERR1)
+#define F_DEDUPE_PERR1 V_DEDUPE_PERR1(1U)
+
+#define S_DEDUPE_PERR0 20
+#define V_DEDUPE_PERR0(x) ((x) << S_DEDUPE_PERR0)
+#define F_DEDUPE_PERR0 V_DEDUPE_PERR0(1U)
+
+#define S_GF_PERR3 19
+#define V_GF_PERR3(x) ((x) << S_GF_PERR3)
+#define F_GF_PERR3 V_GF_PERR3(1U)
+
+#define S_GF_PERR2 18
+#define V_GF_PERR2(x) ((x) << S_GF_PERR2)
+#define F_GF_PERR2 V_GF_PERR2(1U)
+
+#define S_GF_PERR1 17
+#define V_GF_PERR1(x) ((x) << S_GF_PERR1)
+#define F_GF_PERR1 V_GF_PERR1(1U)
+
+#define S_GF_PERR0 16
+#define V_GF_PERR0(x) ((x) << S_GF_PERR0)
+#define F_GF_PERR0 V_GF_PERR0(1U)
+
+#define S_SGE2ULP_INV_PERR 13
+#define V_SGE2ULP_INV_PERR(x) ((x) << S_SGE2ULP_INV_PERR)
+#define F_SGE2ULP_INV_PERR V_SGE2ULP_INV_PERR(1U)
+
+#define S_T7_PL_BUSPERR 12
+#define V_T7_PL_BUSPERR(x) ((x) << S_T7_PL_BUSPERR)
+#define F_T7_PL_BUSPERR V_T7_PL_BUSPERR(1U)
+
+#define S_TLSTX2ULPTX_PERR3 11
+#define V_TLSTX2ULPTX_PERR3(x) ((x) << S_TLSTX2ULPTX_PERR3)
+#define F_TLSTX2ULPTX_PERR3 V_TLSTX2ULPTX_PERR3(1U)
+
+#define S_TLSTX2ULPTX_PERR2 10
+#define V_TLSTX2ULPTX_PERR2(x) ((x) << S_TLSTX2ULPTX_PERR2)
+#define F_TLSTX2ULPTX_PERR2 V_TLSTX2ULPTX_PERR2(1U)
+
+#define S_TLSTX2ULPTX_PERR1 9
+#define V_TLSTX2ULPTX_PERR1(x) ((x) << S_TLSTX2ULPTX_PERR1)
+#define F_TLSTX2ULPTX_PERR1 V_TLSTX2ULPTX_PERR1(1U)
+
+#define S_TLSTX2ULPTX_PERR0 8
+#define V_TLSTX2ULPTX_PERR0(x) ((x) << S_TLSTX2ULPTX_PERR0)
+#define F_TLSTX2ULPTX_PERR0 V_TLSTX2ULPTX_PERR0(1U)
+
+#define S_XP10_2_ULP_PL_PERR 1
+#define V_XP10_2_ULP_PL_PERR(x) ((x) << S_XP10_2_ULP_PL_PERR)
+#define F_XP10_2_ULP_PL_PERR V_XP10_2_ULP_PL_PERR(1U)
+
+#define S_ULP_2_XP10_PL_PERR 0
+#define V_ULP_2_XP10_PL_PERR(x) ((x) << S_ULP_2_XP10_PL_PERR)
+#define F_ULP_2_XP10_PL_PERR V_ULP_2_XP10_PL_PERR(1U)
+
#define A_ULP_TX_LA_WRPTR_0 0x8ec8
+#define A_ULP_TX_INT_CAUSE_5 0x8ec8
#define A_ULP_TX_LA_RESERVED_0 0x8ecc
+#define A_ULP_TX_PERR_ENABLE_5 0x8ecc
#define A_ULP_TX_LA_RDPTR_1 0x8ed0
+#define A_ULP_TX_INT_CAUSE_6 0x8ed0
+
+#define S_DDR_HDR_FIFO_PERR_SET3 12
+#define V_DDR_HDR_FIFO_PERR_SET3(x) ((x) << S_DDR_HDR_FIFO_PERR_SET3)
+#define F_DDR_HDR_FIFO_PERR_SET3 V_DDR_HDR_FIFO_PERR_SET3(1U)
+
+#define S_DDR_HDR_FIFO_PERR_SET2 11
+#define V_DDR_HDR_FIFO_PERR_SET2(x) ((x) << S_DDR_HDR_FIFO_PERR_SET2)
+#define F_DDR_HDR_FIFO_PERR_SET2 V_DDR_HDR_FIFO_PERR_SET2(1U)
+
+#define S_DDR_HDR_FIFO_PERR_SET1 10
+#define V_DDR_HDR_FIFO_PERR_SET1(x) ((x) << S_DDR_HDR_FIFO_PERR_SET1)
+#define F_DDR_HDR_FIFO_PERR_SET1 V_DDR_HDR_FIFO_PERR_SET1(1U)
+
+#define S_DDR_HDR_FIFO_PERR_SET0 9
+#define V_DDR_HDR_FIFO_PERR_SET0(x) ((x) << S_DDR_HDR_FIFO_PERR_SET0)
+#define F_DDR_HDR_FIFO_PERR_SET0 V_DDR_HDR_FIFO_PERR_SET0(1U)
+
+#define S_PRE_MP_RSP_PERR_SET3 8
+#define V_PRE_MP_RSP_PERR_SET3(x) ((x) << S_PRE_MP_RSP_PERR_SET3)
+#define F_PRE_MP_RSP_PERR_SET3 V_PRE_MP_RSP_PERR_SET3(1U)
+
+#define S_PRE_MP_RSP_PERR_SET2 7
+#define V_PRE_MP_RSP_PERR_SET2(x) ((x) << S_PRE_MP_RSP_PERR_SET2)
+#define F_PRE_MP_RSP_PERR_SET2 V_PRE_MP_RSP_PERR_SET2(1U)
+
+#define S_PRE_MP_RSP_PERR_SET1 6
+#define V_PRE_MP_RSP_PERR_SET1(x) ((x) << S_PRE_MP_RSP_PERR_SET1)
+#define F_PRE_MP_RSP_PERR_SET1 V_PRE_MP_RSP_PERR_SET1(1U)
+
+#define S_PRE_MP_RSP_PERR_SET0 5
+#define V_PRE_MP_RSP_PERR_SET0(x) ((x) << S_PRE_MP_RSP_PERR_SET0)
+#define F_PRE_MP_RSP_PERR_SET0 V_PRE_MP_RSP_PERR_SET0(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET3 4
+#define V_PRE_CQE_FIFO_PERR_SET3(x) ((x) << S_PRE_CQE_FIFO_PERR_SET3)
+#define F_PRE_CQE_FIFO_PERR_SET3 V_PRE_CQE_FIFO_PERR_SET3(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET2 3
+#define V_PRE_CQE_FIFO_PERR_SET2(x) ((x) << S_PRE_CQE_FIFO_PERR_SET2)
+#define F_PRE_CQE_FIFO_PERR_SET2 V_PRE_CQE_FIFO_PERR_SET2(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET1 2
+#define V_PRE_CQE_FIFO_PERR_SET1(x) ((x) << S_PRE_CQE_FIFO_PERR_SET1)
+#define F_PRE_CQE_FIFO_PERR_SET1 V_PRE_CQE_FIFO_PERR_SET1(1U)
+
+#define S_PRE_CQE_FIFO_PERR_SET0 1
+#define V_PRE_CQE_FIFO_PERR_SET0(x) ((x) << S_PRE_CQE_FIFO_PERR_SET0)
+#define F_PRE_CQE_FIFO_PERR_SET0 V_PRE_CQE_FIFO_PERR_SET0(1U)
+
+#define S_RSP_FIFO_PERR_SET 0
+#define V_RSP_FIFO_PERR_SET(x) ((x) << S_RSP_FIFO_PERR_SET)
+#define F_RSP_FIFO_PERR_SET V_RSP_FIFO_PERR_SET(1U)
+
#define A_ULP_TX_LA_RDDATA_1 0x8ed4
+#define A_ULP_TX_INT_ENABLE_6 0x8ed4
#define A_ULP_TX_LA_WRPTR_1 0x8ed8
+#define A_ULP_TX_PERR_ENABLE_6 0x8ed8
#define A_ULP_TX_LA_RESERVED_1 0x8edc
+#define A_ULP_TX_INT_CAUSE_7 0x8edc
+
+#define S_TLS_SGE_FIFO_CORERR3 23
+#define V_TLS_SGE_FIFO_CORERR3(x) ((x) << S_TLS_SGE_FIFO_CORERR3)
+#define F_TLS_SGE_FIFO_CORERR3 V_TLS_SGE_FIFO_CORERR3(1U)
+
+#define S_TLS_SGE_FIFO_CORERR2 22
+#define V_TLS_SGE_FIFO_CORERR2(x) ((x) << S_TLS_SGE_FIFO_CORERR2)
+#define F_TLS_SGE_FIFO_CORERR2 V_TLS_SGE_FIFO_CORERR2(1U)
+
+#define S_TLS_SGE_FIFO_CORERR1 21
+#define V_TLS_SGE_FIFO_CORERR1(x) ((x) << S_TLS_SGE_FIFO_CORERR1)
+#define F_TLS_SGE_FIFO_CORERR1 V_TLS_SGE_FIFO_CORERR1(1U)
+
+#define S_TLS_SGE_FIFO_CORERR0 20
+#define V_TLS_SGE_FIFO_CORERR0(x) ((x) << S_TLS_SGE_FIFO_CORERR0)
+#define F_TLS_SGE_FIFO_CORERR0 V_TLS_SGE_FIFO_CORERR0(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET3 19
+#define V_LSO_HDR_SRAM_CERR_SET3(x) ((x) << S_LSO_HDR_SRAM_CERR_SET3)
+#define F_LSO_HDR_SRAM_CERR_SET3 V_LSO_HDR_SRAM_CERR_SET3(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET2 18
+#define V_LSO_HDR_SRAM_CERR_SET2(x) ((x) << S_LSO_HDR_SRAM_CERR_SET2)
+#define F_LSO_HDR_SRAM_CERR_SET2 V_LSO_HDR_SRAM_CERR_SET2(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET1 17
+#define V_LSO_HDR_SRAM_CERR_SET1(x) ((x) << S_LSO_HDR_SRAM_CERR_SET1)
+#define F_LSO_HDR_SRAM_CERR_SET1 V_LSO_HDR_SRAM_CERR_SET1(1U)
+
+#define S_LSO_HDR_SRAM_CERR_SET0 16
+#define V_LSO_HDR_SRAM_CERR_SET0(x) ((x) << S_LSO_HDR_SRAM_CERR_SET0)
+#define F_LSO_HDR_SRAM_CERR_SET0 V_LSO_HDR_SRAM_CERR_SET0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH3_LB1 15
+#define V_CORE_CMD_FIFO_CERR_SET_CH3_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH3_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH3_LB1 V_CORE_CMD_FIFO_CERR_SET_CH3_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH2_LB1 14
+#define V_CORE_CMD_FIFO_CERR_SET_CH2_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH2_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH2_LB1 V_CORE_CMD_FIFO_CERR_SET_CH2_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH1_LB1 13
+#define V_CORE_CMD_FIFO_CERR_SET_CH1_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH1_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH1_LB1 V_CORE_CMD_FIFO_CERR_SET_CH1_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH0_LB1 12
+#define V_CORE_CMD_FIFO_CERR_SET_CH0_LB1(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH0_LB1)
+#define F_CORE_CMD_FIFO_CERR_SET_CH0_LB1 V_CORE_CMD_FIFO_CERR_SET_CH0_LB1(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH3_LB0 11
+#define V_CORE_CMD_FIFO_CERR_SET_CH3_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH3_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH3_LB0 V_CORE_CMD_FIFO_CERR_SET_CH3_LB0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH2_LB0 10
+#define V_CORE_CMD_FIFO_CERR_SET_CH2_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH2_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH2_LB0 V_CORE_CMD_FIFO_CERR_SET_CH2_LB0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH1_LB0 9
+#define V_CORE_CMD_FIFO_CERR_SET_CH1_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH1_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH1_LB0 V_CORE_CMD_FIFO_CERR_SET_CH1_LB0(1U)
+
+#define S_CORE_CMD_FIFO_CERR_SET_CH0_LB0 8
+#define V_CORE_CMD_FIFO_CERR_SET_CH0_LB0(x) ((x) << S_CORE_CMD_FIFO_CERR_SET_CH0_LB0)
+#define F_CORE_CMD_FIFO_CERR_SET_CH0_LB0 V_CORE_CMD_FIFO_CERR_SET_CH0_LB0(1U)
+
+#define S_CQE_FIFO_CERR_SET3 7
+#define V_CQE_FIFO_CERR_SET3(x) ((x) << S_CQE_FIFO_CERR_SET3)
+#define F_CQE_FIFO_CERR_SET3 V_CQE_FIFO_CERR_SET3(1U)
+
+#define S_CQE_FIFO_CERR_SET2 6
+#define V_CQE_FIFO_CERR_SET2(x) ((x) << S_CQE_FIFO_CERR_SET2)
+#define F_CQE_FIFO_CERR_SET2 V_CQE_FIFO_CERR_SET2(1U)
+
+#define S_CQE_FIFO_CERR_SET1 5
+#define V_CQE_FIFO_CERR_SET1(x) ((x) << S_CQE_FIFO_CERR_SET1)
+#define F_CQE_FIFO_CERR_SET1 V_CQE_FIFO_CERR_SET1(1U)
+
+#define S_CQE_FIFO_CERR_SET0 4
+#define V_CQE_FIFO_CERR_SET0(x) ((x) << S_CQE_FIFO_CERR_SET0)
+#define F_CQE_FIFO_CERR_SET0 V_CQE_FIFO_CERR_SET0(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET3 3
+#define V_PRE_CQE_FIFO_CERR_SET3(x) ((x) << S_PRE_CQE_FIFO_CERR_SET3)
+#define F_PRE_CQE_FIFO_CERR_SET3 V_PRE_CQE_FIFO_CERR_SET3(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET2 2
+#define V_PRE_CQE_FIFO_CERR_SET2(x) ((x) << S_PRE_CQE_FIFO_CERR_SET2)
+#define F_PRE_CQE_FIFO_CERR_SET2 V_PRE_CQE_FIFO_CERR_SET2(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET1 1
+#define V_PRE_CQE_FIFO_CERR_SET1(x) ((x) << S_PRE_CQE_FIFO_CERR_SET1)
+#define F_PRE_CQE_FIFO_CERR_SET1 V_PRE_CQE_FIFO_CERR_SET1(1U)
+
+#define S_PRE_CQE_FIFO_CERR_SET0 0
+#define V_PRE_CQE_FIFO_CERR_SET0(x) ((x) << S_PRE_CQE_FIFO_CERR_SET0)
+#define F_PRE_CQE_FIFO_CERR_SET0 V_PRE_CQE_FIFO_CERR_SET0(1U)
+
#define A_ULP_TX_LA_RDPTR_2 0x8ee0
+#define A_ULP_TX_INT_ENABLE_7 0x8ee0
#define A_ULP_TX_LA_RDDATA_2 0x8ee4
+#define A_ULP_TX_INT_CAUSE_8 0x8ee4
+
+#define S_MEM_RSP_FIFO_CERR_SET3 28
+#define V_MEM_RSP_FIFO_CERR_SET3(x) ((x) << S_MEM_RSP_FIFO_CERR_SET3)
+#define F_MEM_RSP_FIFO_CERR_SET3 V_MEM_RSP_FIFO_CERR_SET3(1U)
+
+#define S_MEM_RSP_FIFO_CERR_SET2 27
+#define V_MEM_RSP_FIFO_CERR_SET2(x) ((x) << S_MEM_RSP_FIFO_CERR_SET2)
+#define F_MEM_RSP_FIFO_CERR_SET2 V_MEM_RSP_FIFO_CERR_SET2(1U)
+
+#define S_MEM_RSP_FIFO_CERR_SET1 26
+#define V_MEM_RSP_FIFO_CERR_SET1(x) ((x) << S_MEM_RSP_FIFO_CERR_SET1)
+#define F_MEM_RSP_FIFO_CERR_SET1 V_MEM_RSP_FIFO_CERR_SET1(1U)
+
+#define S_MEM_RSP_FIFO_CERR_SET0 25
+#define V_MEM_RSP_FIFO_CERR_SET0(x) ((x) << S_MEM_RSP_FIFO_CERR_SET0)
+#define F_MEM_RSP_FIFO_CERR_SET0 V_MEM_RSP_FIFO_CERR_SET0(1U)
+
+#define S_PI_SRAM_CERR_SET3 24
+#define V_PI_SRAM_CERR_SET3(x) ((x) << S_PI_SRAM_CERR_SET3)
+#define F_PI_SRAM_CERR_SET3 V_PI_SRAM_CERR_SET3(1U)
+
+#define S_PI_SRAM_CERR_SET2 23
+#define V_PI_SRAM_CERR_SET2(x) ((x) << S_PI_SRAM_CERR_SET2)
+#define F_PI_SRAM_CERR_SET2 V_PI_SRAM_CERR_SET2(1U)
+
+#define S_PI_SRAM_CERR_SET1 22
+#define V_PI_SRAM_CERR_SET1(x) ((x) << S_PI_SRAM_CERR_SET1)
+#define F_PI_SRAM_CERR_SET1 V_PI_SRAM_CERR_SET1(1U)
+
+#define S_PI_SRAM_CERR_SET0 21
+#define V_PI_SRAM_CERR_SET0(x) ((x) << S_PI_SRAM_CERR_SET0)
+#define F_PI_SRAM_CERR_SET0 V_PI_SRAM_CERR_SET0(1U)
+
+#define S_PRE_MP_RSP_CERR_SET3 20
+#define V_PRE_MP_RSP_CERR_SET3(x) ((x) << S_PRE_MP_RSP_CERR_SET3)
+#define F_PRE_MP_RSP_CERR_SET3 V_PRE_MP_RSP_CERR_SET3(1U)
+
+#define S_PRE_MP_RSP_CERR_SET2 19
+#define V_PRE_MP_RSP_CERR_SET2(x) ((x) << S_PRE_MP_RSP_CERR_SET2)
+#define F_PRE_MP_RSP_CERR_SET2 V_PRE_MP_RSP_CERR_SET2(1U)
+
+#define S_PRE_MP_RSP_CERR_SET1 18
+#define V_PRE_MP_RSP_CERR_SET1(x) ((x) << S_PRE_MP_RSP_CERR_SET1)
+#define F_PRE_MP_RSP_CERR_SET1 V_PRE_MP_RSP_CERR_SET1(1U)
+
+#define S_PRE_MP_RSP_CERR_SET0 17
+#define V_PRE_MP_RSP_CERR_SET0(x) ((x) << S_PRE_MP_RSP_CERR_SET0)
+#define F_PRE_MP_RSP_CERR_SET0 V_PRE_MP_RSP_CERR_SET0(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET3 16
+#define V_DDR_HDR_FIFO_CERR_SET3(x) ((x) << S_DDR_HDR_FIFO_CERR_SET3)
+#define F_DDR_HDR_FIFO_CERR_SET3 V_DDR_HDR_FIFO_CERR_SET3(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET2 15
+#define V_DDR_HDR_FIFO_CERR_SET2(x) ((x) << S_DDR_HDR_FIFO_CERR_SET2)
+#define F_DDR_HDR_FIFO_CERR_SET2 V_DDR_HDR_FIFO_CERR_SET2(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET1 14
+#define V_DDR_HDR_FIFO_CERR_SET1(x) ((x) << S_DDR_HDR_FIFO_CERR_SET1)
+#define F_DDR_HDR_FIFO_CERR_SET1 V_DDR_HDR_FIFO_CERR_SET1(1U)
+
+#define S_DDR_HDR_FIFO_CERR_SET0 13
+#define V_DDR_HDR_FIFO_CERR_SET0(x) ((x) << S_DDR_HDR_FIFO_CERR_SET0)
+#define F_DDR_HDR_FIFO_CERR_SET0 V_DDR_HDR_FIFO_CERR_SET0(1U)
+
+#define S_CMD_FIFO_CERR_SET3 12
+#define V_CMD_FIFO_CERR_SET3(x) ((x) << S_CMD_FIFO_CERR_SET3)
+#define F_CMD_FIFO_CERR_SET3 V_CMD_FIFO_CERR_SET3(1U)
+
+#define S_CMD_FIFO_CERR_SET2 11
+#define V_CMD_FIFO_CERR_SET2(x) ((x) << S_CMD_FIFO_CERR_SET2)
+#define F_CMD_FIFO_CERR_SET2 V_CMD_FIFO_CERR_SET2(1U)
+
+#define S_CMD_FIFO_CERR_SET1 10
+#define V_CMD_FIFO_CERR_SET1(x) ((x) << S_CMD_FIFO_CERR_SET1)
+#define F_CMD_FIFO_CERR_SET1 V_CMD_FIFO_CERR_SET1(1U)
+
+#define S_CMD_FIFO_CERR_SET0 9
+#define V_CMD_FIFO_CERR_SET0(x) ((x) << S_CMD_FIFO_CERR_SET0)
+#define F_CMD_FIFO_CERR_SET0 V_CMD_FIFO_CERR_SET0(1U)
+
+#define S_GF_SGE_FIFO_CORERR3 8
+#define V_GF_SGE_FIFO_CORERR3(x) ((x) << S_GF_SGE_FIFO_CORERR3)
+#define F_GF_SGE_FIFO_CORERR3 V_GF_SGE_FIFO_CORERR3(1U)
+
+#define S_GF_SGE_FIFO_CORERR2 7
+#define V_GF_SGE_FIFO_CORERR2(x) ((x) << S_GF_SGE_FIFO_CORERR2)
+#define F_GF_SGE_FIFO_CORERR2 V_GF_SGE_FIFO_CORERR2(1U)
+
+#define S_GF_SGE_FIFO_CORERR1 6
+#define V_GF_SGE_FIFO_CORERR1(x) ((x) << S_GF_SGE_FIFO_CORERR1)
+#define F_GF_SGE_FIFO_CORERR1 V_GF_SGE_FIFO_CORERR1(1U)
+
+#define S_GF_SGE_FIFO_CORERR0 5
+#define V_GF_SGE_FIFO_CORERR0(x) ((x) << S_GF_SGE_FIFO_CORERR0)
+#define F_GF_SGE_FIFO_CORERR0 V_GF_SGE_FIFO_CORERR0(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR3 4
+#define V_DEDUPE_SGE_FIFO_CORERR3(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR3)
+#define F_DEDUPE_SGE_FIFO_CORERR3 V_DEDUPE_SGE_FIFO_CORERR3(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR2 3
+#define V_DEDUPE_SGE_FIFO_CORERR2(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR2)
+#define F_DEDUPE_SGE_FIFO_CORERR2 V_DEDUPE_SGE_FIFO_CORERR2(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR1 2
+#define V_DEDUPE_SGE_FIFO_CORERR1(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR1)
+#define F_DEDUPE_SGE_FIFO_CORERR1 V_DEDUPE_SGE_FIFO_CORERR1(1U)
+
+#define S_DEDUPE_SGE_FIFO_CORERR0 1
+#define V_DEDUPE_SGE_FIFO_CORERR0(x) ((x) << S_DEDUPE_SGE_FIFO_CORERR0)
+#define F_DEDUPE_SGE_FIFO_CORERR0 V_DEDUPE_SGE_FIFO_CORERR0(1U)
+
+#define S_RSP_FIFO_CERR_SET 0
+#define V_RSP_FIFO_CERR_SET(x) ((x) << S_RSP_FIFO_CERR_SET)
+#define F_RSP_FIFO_CERR_SET V_RSP_FIFO_CERR_SET(1U)
+
#define A_ULP_TX_LA_WRPTR_2 0x8ee8
+#define A_ULP_TX_INT_ENABLE_8 0x8ee8
#define A_ULP_TX_LA_RESERVED_2 0x8eec
#define A_ULP_TX_LA_RDPTR_3 0x8ef0
#define A_ULP_TX_LA_RDDATA_3 0x8ef4
@@ -29671,6 +37470,97 @@
#define V_SHOVE_LAST(x) ((x) << S_SHOVE_LAST)
#define F_SHOVE_LAST V_SHOVE_LAST(1U)
+#define A_ULP_TX_ACCELERATOR_CTL 0x8f90
+
+#define S_FIFO_THRESHOLD 8
+#define M_FIFO_THRESHOLD 0x1fU
+#define V_FIFO_THRESHOLD(x) ((x) << S_FIFO_THRESHOLD)
+#define G_FIFO_THRESHOLD(x) (((x) >> S_FIFO_THRESHOLD) & M_FIFO_THRESHOLD)
+
+#define S_COMPRESSION_XP10DISABLECFUSE 5
+#define V_COMPRESSION_XP10DISABLECFUSE(x) ((x) << S_COMPRESSION_XP10DISABLECFUSE)
+#define F_COMPRESSION_XP10DISABLECFUSE V_COMPRESSION_XP10DISABLECFUSE(1U)
+
+#define S_COMPRESSION_XP10DISABLE 4
+#define V_COMPRESSION_XP10DISABLE(x) ((x) << S_COMPRESSION_XP10DISABLE)
+#define F_COMPRESSION_XP10DISABLE V_COMPRESSION_XP10DISABLE(1U)
+
+#define S_DEDUPEDISABLECFUSE 3
+#define V_DEDUPEDISABLECFUSE(x) ((x) << S_DEDUPEDISABLECFUSE)
+#define F_DEDUPEDISABLECFUSE V_DEDUPEDISABLECFUSE(1U)
+
+#define S_DEDUPEDISABLE 2
+#define V_DEDUPEDISABLE(x) ((x) << S_DEDUPEDISABLE)
+#define F_DEDUPEDISABLE V_DEDUPEDISABLE(1U)
+
+#define S_GFDISABLECFUSE 1
+#define V_GFDISABLECFUSE(x) ((x) << S_GFDISABLECFUSE)
+#define F_GFDISABLECFUSE V_GFDISABLECFUSE(1U)
+
+#define S_GFDISABLE 0
+#define V_GFDISABLE(x) ((x) << S_GFDISABLE)
+#define F_GFDISABLE V_GFDISABLE(1U)
+
+#define A_ULP_TX_XP10_IND_ADDR 0x8f94
+
+#define S_XP10_CONTROL 31
+#define V_XP10_CONTROL(x) ((x) << S_XP10_CONTROL)
+#define F_XP10_CONTROL V_XP10_CONTROL(1U)
+
+#define S_XP10_ADDR 0
+#define M_XP10_ADDR 0xfffffU
+#define V_XP10_ADDR(x) ((x) << S_XP10_ADDR)
+#define G_XP10_ADDR(x) (((x) >> S_XP10_ADDR) & M_XP10_ADDR)
+
+#define A_ULP_TX_XP10_IND_DATA 0x8f98
+#define A_ULP_TX_IWARP_PMOF_OPCODES_1 0x8f9c
+
+#define S_RDMA_VERIFY_RESPONSE 24
+#define M_RDMA_VERIFY_RESPONSE 0x1fU
+#define V_RDMA_VERIFY_RESPONSE(x) ((x) << S_RDMA_VERIFY_RESPONSE)
+#define G_RDMA_VERIFY_RESPONSE(x) (((x) >> S_RDMA_VERIFY_RESPONSE) & M_RDMA_VERIFY_RESPONSE)
+
+#define S_RDMA_VERIFY_REQUEST 16
+#define M_RDMA_VERIFY_REQUEST 0x1fU
+#define V_RDMA_VERIFY_REQUEST(x) ((x) << S_RDMA_VERIFY_REQUEST)
+#define G_RDMA_VERIFY_REQUEST(x) (((x) >> S_RDMA_VERIFY_REQUEST) & M_RDMA_VERIFY_REQUEST)
+
+#define S_RDMA_FLUSH_RESPONSE 8
+#define M_RDMA_FLUSH_RESPONSE 0x1fU
+#define V_RDMA_FLUSH_RESPONSE(x) ((x) << S_RDMA_FLUSH_RESPONSE)
+#define G_RDMA_FLUSH_RESPONSE(x) (((x) >> S_RDMA_FLUSH_RESPONSE) & M_RDMA_FLUSH_RESPONSE)
+
+#define S_RDMA_FLUSH_REQUEST 0
+#define M_RDMA_FLUSH_REQUEST 0x1fU
+#define V_RDMA_FLUSH_REQUEST(x) ((x) << S_RDMA_FLUSH_REQUEST)
+#define G_RDMA_FLUSH_REQUEST(x) (((x) >> S_RDMA_FLUSH_REQUEST) & M_RDMA_FLUSH_REQUEST)
+
+#define A_ULP_TX_IWARP_PMOF_OPCODES_2 0x8fa0
+
+#define S_RDMA_SEND_WITH_SE_IMMEDIATE 24
+#define M_RDMA_SEND_WITH_SE_IMMEDIATE 0x1fU
+#define V_RDMA_SEND_WITH_SE_IMMEDIATE(x) ((x) << S_RDMA_SEND_WITH_SE_IMMEDIATE)
+#define G_RDMA_SEND_WITH_SE_IMMEDIATE(x) (((x) >> S_RDMA_SEND_WITH_SE_IMMEDIATE) & M_RDMA_SEND_WITH_SE_IMMEDIATE)
+
+#define S_RDMA_SEND_WITH_IMMEDIATE 16
+#define M_RDMA_SEND_WITH_IMMEDIATE 0x1fU
+#define V_RDMA_SEND_WITH_IMMEDIATE(x) ((x) << S_RDMA_SEND_WITH_IMMEDIATE)
+#define G_RDMA_SEND_WITH_IMMEDIATE(x) (((x) >> S_RDMA_SEND_WITH_IMMEDIATE) & M_RDMA_SEND_WITH_IMMEDIATE)
+
+#define S_RDMA_ATOMIC_WRITE_RESPONSE 8
+#define M_RDMA_ATOMIC_WRITE_RESPONSE 0x1fU
+#define V_RDMA_ATOMIC_WRITE_RESPONSE(x) ((x) << S_RDMA_ATOMIC_WRITE_RESPONSE)
+#define G_RDMA_ATOMIC_WRITE_RESPONSE(x) (((x) >> S_RDMA_ATOMIC_WRITE_RESPONSE) & M_RDMA_ATOMIC_WRITE_RESPONSE)
+
+#define S_RDMA_ATOMIC_WRITE_REQUEST 0
+#define M_RDMA_ATOMIC_WRITE_REQUEST 0x1fU
+#define V_RDMA_ATOMIC_WRITE_REQUEST(x) ((x) << S_RDMA_ATOMIC_WRITE_REQUEST)
+#define G_RDMA_ATOMIC_WRITE_REQUEST(x) (((x) >> S_RDMA_ATOMIC_WRITE_REQUEST) & M_RDMA_ATOMIC_WRITE_REQUEST)
+
+#define A_ULP_TX_NVME_TCP_TPT_LLIMIT 0x8fa4
+#define A_ULP_TX_NVME_TCP_TPT_ULIMIT 0x8fa8
+#define A_ULP_TX_NVME_TCP_PBL_LLIMIT 0x8fac
+#define A_ULP_TX_NVME_TCP_PBL_ULIMIT 0x8fb0
#define A_ULP_TX_TLS_IND_CMD 0x8fb8
#define S_TLS_TX_REG_OFF_ADDR 0
@@ -29678,7 +37568,48 @@
#define V_TLS_TX_REG_OFF_ADDR(x) ((x) << S_TLS_TX_REG_OFF_ADDR)
#define G_TLS_TX_REG_OFF_ADDR(x) (((x) >> S_TLS_TX_REG_OFF_ADDR) & M_TLS_TX_REG_OFF_ADDR)
+#define A_ULP_TX_DBG_CTL 0x8fb8
#define A_ULP_TX_TLS_IND_DATA 0x8fbc
+#define A_ULP_TX_DBG_DATA 0x8fbc
+#define A_ULP_TX_TLS_CH0_PERR_CAUSE 0xc
+
+#define S_GLUE_PERR 3
+#define V_GLUE_PERR(x) ((x) << S_GLUE_PERR)
+#define F_GLUE_PERR V_GLUE_PERR(1U)
+
+#define S_DSGL_PERR 2
+#define V_DSGL_PERR(x) ((x) << S_DSGL_PERR)
+#define F_DSGL_PERR V_DSGL_PERR(1U)
+
+#define S_SGE_PERR 1
+#define V_SGE_PERR(x) ((x) << S_SGE_PERR)
+#define F_SGE_PERR V_SGE_PERR(1U)
+
+#define S_KEX_PERR 0
+#define V_KEX_PERR(x) ((x) << S_KEX_PERR)
+#define F_KEX_PERR V_KEX_PERR(1U)
+
+#define A_ULP_TX_TLS_CH0_PERR_ENABLE 0x10
+#define A_ULP_TX_TLS_CH0_HMACCTRL_CFG 0x20
+
+#define S_HMAC_CFG6 12
+#define M_HMAC_CFG6 0x3fU
+#define V_HMAC_CFG6(x) ((x) << S_HMAC_CFG6)
+#define G_HMAC_CFG6(x) (((x) >> S_HMAC_CFG6) & M_HMAC_CFG6)
+
+#define S_HMAC_CFG5 6
+#define M_HMAC_CFG5 0x3fU
+#define V_HMAC_CFG5(x) ((x) << S_HMAC_CFG5)
+#define G_HMAC_CFG5(x) (((x) >> S_HMAC_CFG5) & M_HMAC_CFG5)
+
+#define S_HMAC_CFG4 0
+#define M_HMAC_CFG4 0x3fU
+#define V_HMAC_CFG4(x) ((x) << S_HMAC_CFG4)
+#define G_HMAC_CFG4(x) (((x) >> S_HMAC_CFG4) & M_HMAC_CFG4)
+
+#define A_ULP_TX_TLS_CH1_PERR_CAUSE 0x4c
+#define A_ULP_TX_TLS_CH1_PERR_ENABLE 0x50
+#define A_ULP_TX_TLS_CH1_HMACCTRL_CFG 0x60
/* registers for module PM_RX */
#define PM_RX_BASE_ADDR 0x8fc0
@@ -29703,6 +37634,31 @@
#define V_PREFETCH_ENABLE(x) ((x) << S_PREFETCH_ENABLE)
#define F_PREFETCH_ENABLE V_PREFETCH_ENABLE(1U)
+#define S_CACHE_HOLD 13
+#define V_CACHE_HOLD(x) ((x) << S_CACHE_HOLD)
+#define F_CACHE_HOLD V_CACHE_HOLD(1U)
+
+#define S_CACHE_INIT_DONE 12
+#define V_CACHE_INIT_DONE(x) ((x) << S_CACHE_INIT_DONE)
+#define F_CACHE_INIT_DONE V_CACHE_INIT_DONE(1U)
+
+#define S_CACHE_DEPTH 8
+#define M_CACHE_DEPTH 0xfU
+#define V_CACHE_DEPTH(x) ((x) << S_CACHE_DEPTH)
+#define G_CACHE_DEPTH(x) (((x) >> S_CACHE_DEPTH) & M_CACHE_DEPTH)
+
+#define S_CACHE_INIT 7
+#define V_CACHE_INIT(x) ((x) << S_CACHE_INIT)
+#define F_CACHE_INIT V_CACHE_INIT(1U)
+
+#define S_CACHE_SLEEP 6
+#define V_CACHE_SLEEP(x) ((x) << S_CACHE_SLEEP)
+#define F_CACHE_SLEEP V_CACHE_SLEEP(1U)
+
+#define S_CACHE_BYPASS 5
+#define V_CACHE_BYPASS(x) ((x) << S_CACHE_BYPASS)
+#define F_CACHE_BYPASS V_CACHE_BYPASS(1U)
+
#define A_PM_RX_STAT_CONFIG 0x8fc8
#define A_PM_RX_STAT_COUNT 0x8fcc
#define A_PM_RX_STAT_LSB 0x8fd0
@@ -29723,6 +37679,11 @@
#define V_PMDBGADDR(x) ((x) << S_PMDBGADDR)
#define G_PMDBGADDR(x) (((x) >> S_PMDBGADDR) & M_PMDBGADDR)
+#define S_T7_OSPIWRBUSY_T5 21
+#define M_T7_OSPIWRBUSY_T5 0xfU
+#define V_T7_OSPIWRBUSY_T5(x) ((x) << S_T7_OSPIWRBUSY_T5)
+#define G_T7_OSPIWRBUSY_T5(x) (((x) >> S_T7_OSPIWRBUSY_T5) & M_T7_OSPIWRBUSY_T5)
+
#define A_PM_RX_STAT_MSB 0x8fd4
#define A_PM_RX_DBG_DATA 0x8fd4
#define A_PM_RX_INT_ENABLE 0x8fd8
@@ -29843,7 +37804,36 @@
#define V_SDC_ERR(x) ((x) << S_SDC_ERR)
#define F_SDC_ERR V_SDC_ERR(1U)
+#define S_MASTER_PERR 31
+#define V_MASTER_PERR(x) ((x) << S_MASTER_PERR)
+#define F_MASTER_PERR V_MASTER_PERR(1U)
+
+#define S_T7_OSPI_OVERFLOW3 30
+#define V_T7_OSPI_OVERFLOW3(x) ((x) << S_T7_OSPI_OVERFLOW3)
+#define F_T7_OSPI_OVERFLOW3 V_T7_OSPI_OVERFLOW3(1U)
+
+#define S_T7_OSPI_OVERFLOW2 29
+#define V_T7_OSPI_OVERFLOW2(x) ((x) << S_T7_OSPI_OVERFLOW2)
+#define F_T7_OSPI_OVERFLOW2 V_T7_OSPI_OVERFLOW2(1U)
+
#define A_PM_RX_INT_CAUSE 0x8fdc
+
+#define S_CACHE_SRAM_ERROR 3
+#define V_CACHE_SRAM_ERROR(x) ((x) << S_CACHE_SRAM_ERROR)
+#define F_CACHE_SRAM_ERROR V_CACHE_SRAM_ERROR(1U)
+
+#define S_CACHE_LRU_ERROR 2
+#define V_CACHE_LRU_ERROR(x) ((x) << S_CACHE_LRU_ERROR)
+#define F_CACHE_LRU_ERROR V_CACHE_LRU_ERROR(1U)
+
+#define S_CACHE_ISLAND_ERROR 1
+#define V_CACHE_ISLAND_ERROR(x) ((x) << S_CACHE_ISLAND_ERROR)
+#define F_CACHE_ISLAND_ERROR V_CACHE_ISLAND_ERROR(1U)
+
+#define S_CACHE_CTRL_ERROR 0
+#define V_CACHE_CTRL_ERROR(x) ((x) << S_CACHE_CTRL_ERROR)
+#define F_CACHE_CTRL_ERROR V_CACHE_CTRL_ERROR(1U)
+
#define A_PM_RX_ISPI_DBG_4B_DATA0 0x10000
#define A_PM_RX_ISPI_DBG_4B_DATA1 0x10001
#define A_PM_RX_ISPI_DBG_4B_DATA2 0x10002
@@ -29959,12 +37949,25 @@
#define V_CHNL0_MAX_DEFICIT_CNT(x) ((x) << S_CHNL0_MAX_DEFICIT_CNT)
#define G_CHNL0_MAX_DEFICIT_CNT(x) (((x) >> S_CHNL0_MAX_DEFICIT_CNT) & M_CHNL0_MAX_DEFICIT_CNT)
+#define A_PM_RX_PRFTCH_WRR_MAX_DEFICIT_CNT0 0x1001c
#define A_PM_RX_FEATURE_EN 0x1001d
#define S_PIO_CH_DEFICIT_CTL_EN_RX 0
#define V_PIO_CH_DEFICIT_CTL_EN_RX(x) ((x) << S_PIO_CH_DEFICIT_CTL_EN_RX)
#define F_PIO_CH_DEFICIT_CTL_EN_RX V_PIO_CH_DEFICIT_CTL_EN_RX(1U)
+#define A_PM_RX_PRFTCH_WRR_MAX_DEFICIT_CNT1 0x1001d
+
+#define S_CHNL3_MAX_DEFICIT_CNT 16
+#define M_CHNL3_MAX_DEFICIT_CNT 0xffffU
+#define V_CHNL3_MAX_DEFICIT_CNT(x) ((x) << S_CHNL3_MAX_DEFICIT_CNT)
+#define G_CHNL3_MAX_DEFICIT_CNT(x) (((x) >> S_CHNL3_MAX_DEFICIT_CNT) & M_CHNL3_MAX_DEFICIT_CNT)
+
+#define S_CHNL2_MAX_DEFICIT_CNT 0
+#define M_CHNL2_MAX_DEFICIT_CNT 0xffffU
+#define V_CHNL2_MAX_DEFICIT_CNT(x) ((x) << S_CHNL2_MAX_DEFICIT_CNT)
+#define G_CHNL2_MAX_DEFICIT_CNT(x) (((x) >> S_CHNL2_MAX_DEFICIT_CNT) & M_CHNL2_MAX_DEFICIT_CNT)
+
#define A_PM_RX_CH0_OSPI_DEFICIT_THRSHLD 0x1001e
#define S_CH0_OSPI_DEFICIT_THRSHLD 0
@@ -30245,16 +38248,6 @@
#define V_RX_C_TXAFULL(x) ((x) << S_RX_C_TXAFULL)
#define G_RX_C_TXAFULL(x) (((x) >> S_RX_C_TXAFULL) & M_RX_C_TXAFULL)
-#define S_T6_RX_PCMD_DRDY 26
-#define M_T6_RX_PCMD_DRDY 0x3U
-#define V_T6_RX_PCMD_DRDY(x) ((x) << S_T6_RX_PCMD_DRDY)
-#define G_T6_RX_PCMD_DRDY(x) (((x) >> S_T6_RX_PCMD_DRDY) & M_T6_RX_PCMD_DRDY)
-
-#define S_T6_RX_PCMD_SRDY 24
-#define M_T6_RX_PCMD_SRDY 0x3U
-#define V_T6_RX_PCMD_SRDY(x) ((x) << S_T6_RX_PCMD_SRDY)
-#define G_T6_RX_PCMD_SRDY(x) (((x) >> S_T6_RX_PCMD_SRDY) & M_T6_RX_PCMD_SRDY)
-
#define A_PM_RX_DBG_STAT6 0x10027
#define S_RX_M_INTRNL_FIFO_CNT 4
@@ -30434,6 +38427,179 @@
#define V_RX_BUNDLE_LEN0(x) ((x) << S_RX_BUNDLE_LEN0)
#define G_RX_BUNDLE_LEN0(x) (((x) >> S_RX_BUNDLE_LEN0) & M_RX_BUNDLE_LEN0)
+#define A_PM_RX_INT_CAUSE_MASK_HALT_2 0x10049
+#define A_PM_RX_INT_ENABLE_2 0x10060
+
+#define S_CACHE_SRAM_ODD_CERR 12
+#define V_CACHE_SRAM_ODD_CERR(x) ((x) << S_CACHE_SRAM_ODD_CERR)
+#define F_CACHE_SRAM_ODD_CERR V_CACHE_SRAM_ODD_CERR(1U)
+
+#define S_CACHE_SRAM_EVEN_CERR 11
+#define V_CACHE_SRAM_EVEN_CERR(x) ((x) << S_CACHE_SRAM_EVEN_CERR)
+#define F_CACHE_SRAM_EVEN_CERR V_CACHE_SRAM_EVEN_CERR(1U)
+
+#define S_CACHE_LRU_LEFT_CERR 10
+#define V_CACHE_LRU_LEFT_CERR(x) ((x) << S_CACHE_LRU_LEFT_CERR)
+#define F_CACHE_LRU_LEFT_CERR V_CACHE_LRU_LEFT_CERR(1U)
+
+#define S_CACHE_LRU_RIGHT_CERR 9
+#define V_CACHE_LRU_RIGHT_CERR(x) ((x) << S_CACHE_LRU_RIGHT_CERR)
+#define F_CACHE_LRU_RIGHT_CERR V_CACHE_LRU_RIGHT_CERR(1U)
+
+#define S_CACHE_ISLAND_CERR 8
+#define V_CACHE_ISLAND_CERR(x) ((x) << S_CACHE_ISLAND_CERR)
+#define F_CACHE_ISLAND_CERR V_CACHE_ISLAND_CERR(1U)
+
+#define S_OCSPI_CERR 7
+#define V_OCSPI_CERR(x) ((x) << S_OCSPI_CERR)
+#define F_OCSPI_CERR V_OCSPI_CERR(1U)
+
+#define S_IESPI_CERR 6
+#define V_IESPI_CERR(x) ((x) << S_IESPI_CERR)
+#define F_IESPI_CERR V_IESPI_CERR(1U)
+
+#define S_OCSPI2_RX_FRAMING_ERROR 5
+#define V_OCSPI2_RX_FRAMING_ERROR(x) ((x) << S_OCSPI2_RX_FRAMING_ERROR)
+#define F_OCSPI2_RX_FRAMING_ERROR V_OCSPI2_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI3_RX_FRAMING_ERROR 4
+#define V_OCSPI3_RX_FRAMING_ERROR(x) ((x) << S_OCSPI3_RX_FRAMING_ERROR)
+#define F_OCSPI3_RX_FRAMING_ERROR V_OCSPI3_RX_FRAMING_ERROR(1U)
+
+#define S_OCSPI2_TX_FRAMING_ERROR 3
+#define V_OCSPI2_TX_FRAMING_ERROR(x) ((x) << S_OCSPI2_TX_FRAMING_ERROR)
+#define F_OCSPI2_TX_FRAMING_ERROR V_OCSPI2_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI3_TX_FRAMING_ERROR 2
+#define V_OCSPI3_TX_FRAMING_ERROR(x) ((x) << S_OCSPI3_TX_FRAMING_ERROR)
+#define F_OCSPI3_TX_FRAMING_ERROR V_OCSPI3_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI2_OFIFO2X_TX_FRAMING_ERROR 1
+#define V_OCSPI2_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI2_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI2_OFIFO2X_TX_FRAMING_ERROR V_OCSPI2_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_OCSPI3_OFIFO2X_TX_FRAMING_ERROR 0
+#define V_OCSPI3_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_OCSPI3_OFIFO2X_TX_FRAMING_ERROR)
+#define F_OCSPI3_OFIFO2X_TX_FRAMING_ERROR V_OCSPI3_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define A_PM_RX_INT_CAUSE_2 0x10061
+#define A_PM_RX_PERR_ENABLE 0x10062
+
+#define S_T7_SDC_ERR 31
+#define V_T7_SDC_ERR(x) ((x) << S_T7_SDC_ERR)
+#define F_T7_SDC_ERR V_T7_SDC_ERR(1U)
+
+#define S_T7_MA_INTF_SDC_ERR 30
+#define V_T7_MA_INTF_SDC_ERR(x) ((x) << S_T7_MA_INTF_SDC_ERR)
+#define F_T7_MA_INTF_SDC_ERR V_T7_MA_INTF_SDC_ERR(1U)
+
+#define S_E_PCMD_PERR 21
+#define V_E_PCMD_PERR(x) ((x) << S_E_PCMD_PERR)
+#define F_E_PCMD_PERR V_E_PCMD_PERR(1U)
+
+#define S_CACHE_RSP_DFIFO_PERR 20
+#define V_CACHE_RSP_DFIFO_PERR(x) ((x) << S_CACHE_RSP_DFIFO_PERR)
+#define F_CACHE_RSP_DFIFO_PERR V_CACHE_RSP_DFIFO_PERR(1U)
+
+#define S_CACHE_SRAM_ODD_PERR 19
+#define V_CACHE_SRAM_ODD_PERR(x) ((x) << S_CACHE_SRAM_ODD_PERR)
+#define F_CACHE_SRAM_ODD_PERR V_CACHE_SRAM_ODD_PERR(1U)
+
+#define S_CACHE_SRAM_EVEN_PERR 18
+#define V_CACHE_SRAM_EVEN_PERR(x) ((x) << S_CACHE_SRAM_EVEN_PERR)
+#define F_CACHE_SRAM_EVEN_PERR V_CACHE_SRAM_EVEN_PERR(1U)
+
+#define S_CACHE_RSVD_PERR 17
+#define V_CACHE_RSVD_PERR(x) ((x) << S_CACHE_RSVD_PERR)
+#define F_CACHE_RSVD_PERR V_CACHE_RSVD_PERR(1U)
+
+#define S_CACHE_LRU_LEFT_PERR 16
+#define V_CACHE_LRU_LEFT_PERR(x) ((x) << S_CACHE_LRU_LEFT_PERR)
+#define F_CACHE_LRU_LEFT_PERR V_CACHE_LRU_LEFT_PERR(1U)
+
+#define S_CACHE_LRU_RIGHT_PERR 15
+#define V_CACHE_LRU_RIGHT_PERR(x) ((x) << S_CACHE_LRU_RIGHT_PERR)
+#define F_CACHE_LRU_RIGHT_PERR V_CACHE_LRU_RIGHT_PERR(1U)
+
+#define S_CACHE_RSP_CMD_PERR 14
+#define V_CACHE_RSP_CMD_PERR(x) ((x) << S_CACHE_RSP_CMD_PERR)
+#define F_CACHE_RSP_CMD_PERR V_CACHE_RSP_CMD_PERR(1U)
+
+#define S_CACHE_SRAM_CMD_PERR 13
+#define V_CACHE_SRAM_CMD_PERR(x) ((x) << S_CACHE_SRAM_CMD_PERR)
+#define F_CACHE_SRAM_CMD_PERR V_CACHE_SRAM_CMD_PERR(1U)
+
+#define S_CACHE_MA_CMD_PERR 12
+#define V_CACHE_MA_CMD_PERR(x) ((x) << S_CACHE_MA_CMD_PERR)
+#define F_CACHE_MA_CMD_PERR V_CACHE_MA_CMD_PERR(1U)
+
+#define S_CACHE_TCAM_PERR 11
+#define V_CACHE_TCAM_PERR(x) ((x) << S_CACHE_TCAM_PERR)
+#define F_CACHE_TCAM_PERR V_CACHE_TCAM_PERR(1U)
+
+#define S_CACHE_ISLAND_PERR 10
+#define V_CACHE_ISLAND_PERR(x) ((x) << S_CACHE_ISLAND_PERR)
+#define F_CACHE_ISLAND_PERR V_CACHE_ISLAND_PERR(1U)
+
+#define S_MC_WCNT_FIFO_PERR 9
+#define V_MC_WCNT_FIFO_PERR(x) ((x) << S_MC_WCNT_FIFO_PERR)
+#define F_MC_WCNT_FIFO_PERR V_MC_WCNT_FIFO_PERR(1U)
+
+#define S_MC_WDATA_FIFO_PERR 8
+#define V_MC_WDATA_FIFO_PERR(x) ((x) << S_MC_WDATA_FIFO_PERR)
+#define F_MC_WDATA_FIFO_PERR V_MC_WDATA_FIFO_PERR(1U)
+
+#define S_MC_RCNT_FIFO_PERR 7
+#define V_MC_RCNT_FIFO_PERR(x) ((x) << S_MC_RCNT_FIFO_PERR)
+#define F_MC_RCNT_FIFO_PERR V_MC_RCNT_FIFO_PERR(1U)
+
+#define S_MC_RDATA_FIFO_PERR 6
+#define V_MC_RDATA_FIFO_PERR(x) ((x) << S_MC_RDATA_FIFO_PERR)
+#define F_MC_RDATA_FIFO_PERR V_MC_RDATA_FIFO_PERR(1U)
+
+#define S_TOKEN_FIFO_PERR 5
+#define V_TOKEN_FIFO_PERR(x) ((x) << S_TOKEN_FIFO_PERR)
+#define F_TOKEN_FIFO_PERR V_TOKEN_FIFO_PERR(1U)
+
+#define S_T7_BUNDLE_LEN_PARERR 4
+#define V_T7_BUNDLE_LEN_PARERR(x) ((x) << S_T7_BUNDLE_LEN_PARERR)
+#define F_T7_BUNDLE_LEN_PARERR V_T7_BUNDLE_LEN_PARERR(1U)
+
+#define A_PM_RX_PERR_CAUSE 0x10063
+#define A_PM_RX_EXT_CFIFO_CONFIG0 0x10070
+
+#define S_CH1_PTR_MAX 17
+#define M_CH1_PTR_MAX 0x7fffU
+#define V_CH1_PTR_MAX(x) ((x) << S_CH1_PTR_MAX)
+#define G_CH1_PTR_MAX(x) (((x) >> S_CH1_PTR_MAX) & M_CH1_PTR_MAX)
+
+#define S_CH0_PTR_MAX 1
+#define M_CH0_PTR_MAX 0x7fffU
+#define V_CH0_PTR_MAX(x) ((x) << S_CH0_PTR_MAX)
+#define G_CH0_PTR_MAX(x) (((x) >> S_CH0_PTR_MAX) & M_CH0_PTR_MAX)
+
+#define S_STROBE 0
+#define V_STROBE(x) ((x) << S_STROBE)
+#define F_STROBE V_STROBE(1U)
+
+#define A_PM_RX_EXT_CFIFO_CONFIG1 0x10071
+
+#define S_CH2_PTR_MAX 1
+#define M_CH2_PTR_MAX 0x7fffU
+#define V_CH2_PTR_MAX(x) ((x) << S_CH2_PTR_MAX)
+#define G_CH2_PTR_MAX(x) (((x) >> S_CH2_PTR_MAX) & M_CH2_PTR_MAX)
+
+#define A_PM_RX_EXT_EFIFO_CONFIG0 0x10072
+#define A_PM_RX_EXT_EFIFO_CONFIG1 0x10073
+#define A_T7_PM_RX_CH0_OSPI_DEFICIT_THRSHLD 0x10074
+#define A_T7_PM_RX_CH1_OSPI_DEFICIT_THRSHLD 0x10075
+#define A_PM_RX_CH2_OSPI_DEFICIT_THRSHLD 0x10076
+#define A_PM_RX_CH3_OSPI_DEFICIT_THRSHLD 0x10077
+#define A_T7_PM_RX_FEATURE_EN 0x10078
+#define A_PM_RX_TCAM_BIST_CTRL 0x10080
+#define A_PM_RX_TCAM_BIST_CB_PASS 0x10081
+#define A_PM_RX_TCAM_BIST_CB_BUSY 0x10082
+
/* registers for module PM_TX */
#define PM_TX_BASE_ADDR 0x8fe0
@@ -30613,6 +38779,118 @@
#define V_C_PCMD_PAR_ERROR(x) ((x) << S_C_PCMD_PAR_ERROR)
#define F_C_PCMD_PAR_ERROR V_C_PCMD_PAR_ERROR(1U)
+#define S_T7_ZERO_C_CMD_ERROR 30
+#define V_T7_ZERO_C_CMD_ERROR(x) ((x) << S_T7_ZERO_C_CMD_ERROR)
+#define F_T7_ZERO_C_CMD_ERROR V_T7_ZERO_C_CMD_ERROR(1U)
+
+#define S_OESPI_COR_ERR 29
+#define V_OESPI_COR_ERR(x) ((x) << S_OESPI_COR_ERR)
+#define F_OESPI_COR_ERR V_OESPI_COR_ERR(1U)
+
+#define S_ICSPI_COR_ERR 28
+#define V_ICSPI_COR_ERR(x) ((x) << S_ICSPI_COR_ERR)
+#define F_ICSPI_COR_ERR V_ICSPI_COR_ERR(1U)
+
+#define S_ICSPI_OVFL 24
+#define V_ICSPI_OVFL(x) ((x) << S_ICSPI_OVFL)
+#define F_ICSPI_OVFL V_ICSPI_OVFL(1U)
+
+#define S_PCMD_LEN_OVFL3 23
+#define V_PCMD_LEN_OVFL3(x) ((x) << S_PCMD_LEN_OVFL3)
+#define F_PCMD_LEN_OVFL3 V_PCMD_LEN_OVFL3(1U)
+
+#define S_T7_PCMD_LEN_OVFL2 22
+#define V_T7_PCMD_LEN_OVFL2(x) ((x) << S_T7_PCMD_LEN_OVFL2)
+#define F_T7_PCMD_LEN_OVFL2 V_T7_PCMD_LEN_OVFL2(1U)
+
+#define S_T7_PCMD_LEN_OVFL1 21
+#define V_T7_PCMD_LEN_OVFL1(x) ((x) << S_T7_PCMD_LEN_OVFL1)
+#define F_T7_PCMD_LEN_OVFL1 V_T7_PCMD_LEN_OVFL1(1U)
+
+#define S_T7_PCMD_LEN_OVFL0 20
+#define V_T7_PCMD_LEN_OVFL0(x) ((x) << S_T7_PCMD_LEN_OVFL0)
+#define F_T7_PCMD_LEN_OVFL0 V_T7_PCMD_LEN_OVFL0(1U)
+
+#define S_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR 19
+#define V_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI0_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR 18
+#define V_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI1_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR 17
+#define V_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI2_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR 16
+#define V_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR)
+#define F_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR V_T7_ICSPI3_FIFO2X_RX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI0_TX_FRAMING_ERROR 15
+#define V_T7_ICSPI0_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI0_TX_FRAMING_ERROR)
+#define F_T7_ICSPI0_TX_FRAMING_ERROR V_T7_ICSPI0_TX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI1_TX_FRAMING_ERROR 14
+#define V_T7_ICSPI1_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI1_TX_FRAMING_ERROR)
+#define F_T7_ICSPI1_TX_FRAMING_ERROR V_T7_ICSPI1_TX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI2_TX_FRAMING_ERROR 13
+#define V_T7_ICSPI2_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI2_TX_FRAMING_ERROR)
+#define F_T7_ICSPI2_TX_FRAMING_ERROR V_T7_ICSPI2_TX_FRAMING_ERROR(1U)
+
+#define S_T7_ICSPI3_TX_FRAMING_ERROR 12
+#define V_T7_ICSPI3_TX_FRAMING_ERROR(x) ((x) << S_T7_ICSPI3_TX_FRAMING_ERROR)
+#define F_T7_ICSPI3_TX_FRAMING_ERROR V_T7_ICSPI3_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI0_RX_FRAMING_ERROR 11
+#define V_T7_OESPI0_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI0_RX_FRAMING_ERROR)
+#define F_T7_OESPI0_RX_FRAMING_ERROR V_T7_OESPI0_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI1_RX_FRAMING_ERROR 10
+#define V_T7_OESPI1_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI1_RX_FRAMING_ERROR)
+#define F_T7_OESPI1_RX_FRAMING_ERROR V_T7_OESPI1_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI2_RX_FRAMING_ERROR 9
+#define V_T7_OESPI2_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI2_RX_FRAMING_ERROR)
+#define F_T7_OESPI2_RX_FRAMING_ERROR V_T7_OESPI2_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI3_RX_FRAMING_ERROR 8
+#define V_T7_OESPI3_RX_FRAMING_ERROR(x) ((x) << S_T7_OESPI3_RX_FRAMING_ERROR)
+#define F_T7_OESPI3_RX_FRAMING_ERROR V_T7_OESPI3_RX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI0_TX_FRAMING_ERROR 7
+#define V_T7_OESPI0_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI0_TX_FRAMING_ERROR)
+#define F_T7_OESPI0_TX_FRAMING_ERROR V_T7_OESPI0_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI1_TX_FRAMING_ERROR 6
+#define V_T7_OESPI1_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI1_TX_FRAMING_ERROR)
+#define F_T7_OESPI1_TX_FRAMING_ERROR V_T7_OESPI1_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI2_TX_FRAMING_ERROR 5
+#define V_T7_OESPI2_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI2_TX_FRAMING_ERROR)
+#define F_T7_OESPI2_TX_FRAMING_ERROR V_T7_OESPI2_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI3_TX_FRAMING_ERROR 4
+#define V_T7_OESPI3_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI3_TX_FRAMING_ERROR)
+#define F_T7_OESPI3_TX_FRAMING_ERROR V_T7_OESPI3_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR 3
+#define V_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI0_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR 2
+#define V_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI1_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR 1
+#define V_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI2_OFIFO2X_TX_FRAMING_ERROR(1U)
+
+#define S_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR 0
+#define V_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR(x) ((x) << S_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR)
+#define F_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR V_T7_OESPI3_OFIFO2X_TX_FRAMING_ERROR(1U)
+
#define A_PM_TX_INT_CAUSE 0x8ffc
#define S_ZERO_C_CMD_ERROR 28
@@ -30624,23 +38902,51 @@
#define F_OSPI_OR_BUNDLE_LEN_PAR_ERR V_OSPI_OR_BUNDLE_LEN_PAR_ERR(1U)
#define A_PM_TX_ISPI_DBG_4B_DATA0 0x10000
+#define A_T7_PM_TX_DBG_STAT_MSB 0x10000
#define A_PM_TX_ISPI_DBG_4B_DATA1 0x10001
+#define A_T7_PM_TX_DBG_STAT_LSB 0x10001
#define A_PM_TX_ISPI_DBG_4B_DATA2 0x10002
+#define A_T7_PM_TX_DBG_RSVD_FLIT_CNT 0x10002
#define A_PM_TX_ISPI_DBG_4B_DATA3 0x10003
+#define A_T7_PM_TX_SDC_EN 0x10003
#define A_PM_TX_ISPI_DBG_4B_DATA4 0x10004
+#define A_T7_PM_TX_INOUT_FIFO_DBG_CHNL_SEL 0x10004
#define A_PM_TX_ISPI_DBG_4B_DATA5 0x10005
+#define A_T7_PM_TX_INOUT_FIFO_DBG_WR 0x10005
#define A_PM_TX_ISPI_DBG_4B_DATA6 0x10006
+#define A_T7_PM_TX_INPUT_FIFO_STR_FWD_EN 0x10006
#define A_PM_TX_ISPI_DBG_4B_DATA7 0x10007
+#define A_T7_PM_TX_FEATURE_EN 0x10007
+
+#define S_IN_AFULL_TH 5
+#define M_IN_AFULL_TH 0x3U
+#define V_IN_AFULL_TH(x) ((x) << S_IN_AFULL_TH)
+#define G_IN_AFULL_TH(x) (((x) >> S_IN_AFULL_TH) & M_IN_AFULL_TH)
+
+#define S_PIO_FROM_CH_EN 4
+#define V_PIO_FROM_CH_EN(x) ((x) << S_PIO_FROM_CH_EN)
+#define F_PIO_FROM_CH_EN V_PIO_FROM_CH_EN(1U)
+
#define A_PM_TX_ISPI_DBG_4B_DATA8 0x10008
+#define A_T7_PM_TX_T5_PM_TX_INT_ENABLE 0x10008
#define A_PM_TX_OSPI_DBG_4B_DATA0 0x10009
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD0 0x10009
#define A_PM_TX_OSPI_DBG_4B_DATA1 0x1000a
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD1 0x1000a
#define A_PM_TX_OSPI_DBG_4B_DATA2 0x1000b
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD2 0x1000b
#define A_PM_TX_OSPI_DBG_4B_DATA3 0x1000c
+#define A_T7_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD3 0x1000c
#define A_PM_TX_OSPI_DBG_4B_DATA4 0x1000d
+#define A_T7_PM_TX_CH0_OSPI_DEFICIT_THRSHLD 0x1000d
#define A_PM_TX_OSPI_DBG_4B_DATA5 0x1000e
+#define A_T7_PM_TX_CH1_OSPI_DEFICIT_THRSHLD 0x1000e
#define A_PM_TX_OSPI_DBG_4B_DATA6 0x1000f
+#define A_T7_PM_TX_CH2_OSPI_DEFICIT_THRSHLD 0x1000f
#define A_PM_TX_OSPI_DBG_4B_DATA7 0x10010
+#define A_T7_PM_TX_CH3_OSPI_DEFICIT_THRSHLD 0x10010
#define A_PM_TX_OSPI_DBG_4B_DATA8 0x10011
+#define A_T7_PM_TX_INT_CAUSE_MASK_HALT 0x10011
#define A_PM_TX_OSPI_DBG_4B_DATA9 0x10012
#define A_PM_TX_OSPI_DBG_4B_DATA10 0x10013
#define A_PM_TX_OSPI_DBG_4B_DATA11 0x10014
@@ -30722,6 +39028,48 @@
#define A_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD3 0x10026
#define A_PM_TX_CH0_OSPI_DEFICIT_THRSHLD 0x10027
#define A_PM_TX_CH1_OSPI_DEFICIT_THRSHLD 0x10028
+#define A_PM_TX_PERR_ENABLE 0x10028
+
+#define S_T7_1_OSPI_OVERFLOW3 23
+#define V_T7_1_OSPI_OVERFLOW3(x) ((x) << S_T7_1_OSPI_OVERFLOW3)
+#define F_T7_1_OSPI_OVERFLOW3 V_T7_1_OSPI_OVERFLOW3(1U)
+
+#define S_T7_1_OSPI_OVERFLOW2 22
+#define V_T7_1_OSPI_OVERFLOW2(x) ((x) << S_T7_1_OSPI_OVERFLOW2)
+#define F_T7_1_OSPI_OVERFLOW2 V_T7_1_OSPI_OVERFLOW2(1U)
+
+#define S_T7_1_OSPI_OVERFLOW1 21
+#define V_T7_1_OSPI_OVERFLOW1(x) ((x) << S_T7_1_OSPI_OVERFLOW1)
+#define F_T7_1_OSPI_OVERFLOW1 V_T7_1_OSPI_OVERFLOW1(1U)
+
+#define S_T7_1_OSPI_OVERFLOW0 20
+#define V_T7_1_OSPI_OVERFLOW0(x) ((x) << S_T7_1_OSPI_OVERFLOW0)
+#define F_T7_1_OSPI_OVERFLOW0 V_T7_1_OSPI_OVERFLOW0(1U)
+
+#define S_T7_BUNDLE_LEN_OVFL_EN 18
+#define V_T7_BUNDLE_LEN_OVFL_EN(x) ((x) << S_T7_BUNDLE_LEN_OVFL_EN)
+#define F_T7_BUNDLE_LEN_OVFL_EN V_T7_BUNDLE_LEN_OVFL_EN(1U)
+
+#define S_T7_M_INTFPERREN 17
+#define V_T7_M_INTFPERREN(x) ((x) << S_T7_M_INTFPERREN)
+#define F_T7_M_INTFPERREN V_T7_M_INTFPERREN(1U)
+
+#define S_T7_1_SDC_ERR 16
+#define V_T7_1_SDC_ERR(x) ((x) << S_T7_1_SDC_ERR)
+#define F_T7_1_SDC_ERR V_T7_1_SDC_ERR(1U)
+
+#define S_TOKEN_PAR_ERROR 5
+#define V_TOKEN_PAR_ERROR(x) ((x) << S_TOKEN_PAR_ERROR)
+#define F_TOKEN_PAR_ERROR V_TOKEN_PAR_ERROR(1U)
+
+#define S_BUNDLE_LEN_PAR_ERROR 4
+#define V_BUNDLE_LEN_PAR_ERROR(x) ((x) << S_BUNDLE_LEN_PAR_ERROR)
+#define F_BUNDLE_LEN_PAR_ERROR V_BUNDLE_LEN_PAR_ERROR(1U)
+
+#define S_C_PCMD_TOKEN_PAR_ERROR 0
+#define V_C_PCMD_TOKEN_PAR_ERROR(x) ((x) << S_C_PCMD_TOKEN_PAR_ERROR)
+#define F_C_PCMD_TOKEN_PAR_ERROR V_C_PCMD_TOKEN_PAR_ERROR(1U)
+
#define A_PM_TX_CH2_OSPI_DEFICIT_THRSHLD 0x10029
#define S_CH2_OSPI_DEFICIT_THRSHLD 0
@@ -30729,6 +39077,7 @@
#define V_CH2_OSPI_DEFICIT_THRSHLD(x) ((x) << S_CH2_OSPI_DEFICIT_THRSHLD)
#define G_CH2_OSPI_DEFICIT_THRSHLD(x) (((x) >> S_CH2_OSPI_DEFICIT_THRSHLD) & M_CH2_OSPI_DEFICIT_THRSHLD)
+#define A_PM_TX_PERR_CAUSE 0x10029
#define A_PM_TX_CH3_OSPI_DEFICIT_THRSHLD 0x1002a
#define S_CH3_OSPI_DEFICIT_THRSHLD 0
@@ -31462,6 +39811,7 @@
#define G_ADDR(x) (((x) >> S_ADDR) & M_ADDR)
#define A_MPS_PORT_TX_PAUSE_SOURCE_L 0x24
+#define A_MPS_VF_TX_MAC_DROP_PP 0x24
#define A_MPS_PORT_TX_PAUSE_SOURCE_H 0x28
#define A_MPS_PORT_PRTY_BUFFER_GROUP_MAP 0x2c
@@ -31547,6 +39897,24 @@
#define V_TXPRTY0(x) ((x) << S_TXPRTY0)
#define G_TXPRTY0(x) (((x) >> S_TXPRTY0) & M_TXPRTY0)
+#define A_MPS_PORT_PRTY_GROUP_MAP 0x34
+#define A_MPS_PORT_TRACE_MAX_CAPTURE_SIZE 0x38
+
+#define S_TX2RX 6
+#define M_TX2RX 0x7U
+#define V_TX2RX(x) ((x) << S_TX2RX)
+#define G_TX2RX(x) (((x) >> S_TX2RX) & M_TX2RX)
+
+#define S_MAC2MPS 3
+#define M_MAC2MPS 0x7U
+#define V_MAC2MPS(x) ((x) << S_MAC2MPS)
+#define G_MAC2MPS(x) (((x) >> S_MAC2MPS) & M_MAC2MPS)
+
+#define S_MPS2MAC 0
+#define M_MPS2MAC 0x7U
+#define V_MPS2MAC(x) ((x) << S_MPS2MAC)
+#define G_MPS2MAC(x) (((x) >> S_MPS2MAC) & M_MPS2MAC)
+
#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_L 0x80
#define A_MPS_VF_STAT_TX_VF_BCAST_BYTES_H 0x84
#define A_MPS_VF_STAT_TX_VF_BCAST_FRAMES_L 0x88
@@ -31578,7 +39946,9 @@
#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_L 0xf0
#define A_MPS_VF_STAT_RX_VF_UCAST_FRAMES_H 0xf4
#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_L 0xf8
+#define A_MPS_VF_STAT_RX_VF_ERR_DROP_FRAMES_L 0xf8
#define A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H 0xfc
+#define A_MPS_VF_STAT_RX_VF_ERR_DROP_FRAMES_H 0xfc
#define A_MPS_PORT_RX_CTL 0x100
#define S_NO_RPLCT_M 20
@@ -31682,6 +40052,26 @@
#define V_HASH_EN_MAC(x) ((x) << S_HASH_EN_MAC)
#define F_HASH_EN_MAC V_HASH_EN_MAC(1U)
+#define S_TRANS_ENCAP_EN 30
+#define V_TRANS_ENCAP_EN(x) ((x) << S_TRANS_ENCAP_EN)
+#define F_TRANS_ENCAP_EN V_TRANS_ENCAP_EN(1U)
+
+#define S_CRYPTO_DUMMY_PKT_CHK_EN 29
+#define V_CRYPTO_DUMMY_PKT_CHK_EN(x) ((x) << S_CRYPTO_DUMMY_PKT_CHK_EN)
+#define F_CRYPTO_DUMMY_PKT_CHK_EN V_CRYPTO_DUMMY_PKT_CHK_EN(1U)
+
+#define S_PASS_HPROM 28
+#define V_PASS_HPROM(x) ((x) << S_PASS_HPROM)
+#define F_PASS_HPROM V_PASS_HPROM(1U)
+
+#define S_PASS_PROM 27
+#define V_PASS_PROM(x) ((x) << S_PASS_PROM)
+#define F_PASS_PROM V_PASS_PROM(1U)
+
+#define S_ENCAP_ONLY_IF_OUTER_HIT 26
+#define V_ENCAP_ONLY_IF_OUTER_HIT(x) ((x) << S_ENCAP_ONLY_IF_OUTER_HIT)
+#define F_ENCAP_ONLY_IF_OUTER_HIT V_ENCAP_ONLY_IF_OUTER_HIT(1U)
+
#define A_MPS_PORT_RX_MTU 0x104
#define A_MPS_PORT_RX_PF_MAP 0x108
#define A_MPS_PORT_RX_VF_MAP0 0x10c
@@ -31924,6 +40314,23 @@
#define V_REPL_VECT_SEL(x) ((x) << S_REPL_VECT_SEL)
#define G_REPL_VECT_SEL(x) (((x) >> S_REPL_VECT_SEL) & M_REPL_VECT_SEL)
+#define A_MPS_PORT_MAC_RX_DROP_EN_PP 0x16c
+
+#define S_PRIO 0
+#define M_PRIO 0xffU
+#define V_PRIO(x) ((x) << S_PRIO)
+#define G_PRIO(x) (((x) >> S_PRIO) & M_PRIO)
+
+#define A_MPS_PORT_RX_INT_RSS_HASH 0x170
+#define A_MPS_PORT_RX_INT_RSS_CONTROL 0x174
+#define A_MPS_PORT_RX_CNT_DBG_CTL 0x178
+
+#define S_DBG_TYPE 0
+#define M_DBG_TYPE 0x1fU
+#define V_DBG_TYPE(x) ((x) << S_DBG_TYPE)
+#define G_DBG_TYPE(x) (((x) >> S_DBG_TYPE) & M_DBG_TYPE)
+
+#define A_MPS_PORT_RX_CNT_DBG 0x17c
#define A_MPS_PORT_TX_MAC_RELOAD_CH0 0x190
#define S_CREDIT 0
@@ -31984,6 +40391,10 @@
#define V_ON_PENDING(x) ((x) << S_ON_PENDING)
#define G_ON_PENDING(x) (((x) >> S_ON_PENDING) & M_ON_PENDING)
+#define A_MPS_PORT_TX_MAC_DROP_PP 0x1d4
+#define A_MPS_PORT_TX_LPBK_DROP_PP 0x1d8
+#define A_MPS_PORT_TX_MAC_DROP_CNT 0x1dc
+#define A_MPS_PORT_TX_LPBK_DROP_CNT 0x1e0
#define A_MPS_PORT_CLS_HASH_SRAM 0x200
#define S_VALID 20
@@ -32097,6 +40508,13 @@
#define V_TAG(x) ((x) << S_TAG)
#define G_TAG(x) (((x) >> S_TAG) & M_TAG)
+#define A_MPS_PF_TX_MAC_DROP_PP 0x2e4
+
+#define S_T7_DROPEN 0
+#define M_T7_DROPEN 0xffU
+#define V_T7_DROPEN(x) ((x) << S_T7_DROPEN)
+#define G_T7_DROPEN(x) (((x) >> S_T7_DROPEN) & M_T7_DROPEN)
+
#define A_MPS_PF_STAT_TX_PF_BCAST_BYTES_L 0x300
#define A_MPS_PF_STAT_TX_PF_BCAST_BYTES_H 0x304
#define A_MPS_PORT_CLS_HASH_CTL 0x304
@@ -32112,35 +40530,9 @@
#define V_PROMISCEN(x) ((x) << S_PROMISCEN)
#define F_PROMISCEN V_PROMISCEN(1U)
-#define S_T6_MULTILISTEN 16
-#define V_T6_MULTILISTEN(x) ((x) << S_T6_MULTILISTEN)
-#define F_T6_MULTILISTEN V_T6_MULTILISTEN(1U)
-
-#define S_T6_PRIORITY 13
-#define M_T6_PRIORITY 0x7U
-#define V_T6_PRIORITY(x) ((x) << S_T6_PRIORITY)
-#define G_T6_PRIORITY(x) (((x) >> S_T6_PRIORITY) & M_T6_PRIORITY)
-
-#define S_T6_REPLICATE 12
-#define V_T6_REPLICATE(x) ((x) << S_T6_REPLICATE)
-#define F_T6_REPLICATE V_T6_REPLICATE(1U)
-
-#define S_T6_PF 9
-#define M_T6_PF 0x7U
-#define V_T6_PF(x) ((x) << S_T6_PF)
-#define G_T6_PF(x) (((x) >> S_T6_PF) & M_T6_PF)
-
-#define S_T6_VF_VALID 8
-#define V_T6_VF_VALID(x) ((x) << S_T6_VF_VALID)
-#define F_T6_VF_VALID V_T6_VF_VALID(1U)
-
-#define S_T6_VF 0
-#define M_T6_VF 0xffU
-#define V_T6_VF(x) ((x) << S_T6_VF)
-#define G_T6_VF(x) (((x) >> S_T6_VF) & M_T6_VF)
-
#define A_MPS_PF_STAT_TX_PF_BCAST_FRAMES_H 0x30c
#define A_MPS_PORT_CLS_BMC_MAC_ADDR_L 0x30c
+#define A_MPS_PORT_CLS_BMC_MAC0_ADDR_L 0x30c
#define A_MPS_PF_STAT_TX_PF_MCAST_BYTES_L 0x310
#define A_MPS_PORT_CLS_BMC_MAC_ADDR_H 0x310
@@ -32156,6 +40548,7 @@
#define V_MATCHALL(x) ((x) << S_MATCHALL)
#define F_MATCHALL V_MATCHALL(1U)
+#define A_MPS_PORT_CLS_BMC_MAC0_ADDR_H 0x310
#define A_MPS_PF_STAT_TX_PF_MCAST_BYTES_H 0x314
#define A_MPS_PORT_CLS_BMC_VLAN 0x314
@@ -32167,6 +40560,7 @@
#define V_VLAN_VLD(x) ((x) << S_VLAN_VLD)
#define F_VLAN_VLD V_VLAN_VLD(1U)
+#define A_MPS_PORT_CLS_BMC_VLAN0 0x314
#define A_MPS_PF_STAT_TX_PF_MCAST_FRAMES_L 0x318
#define A_MPS_PORT_CLS_CTL 0x318
@@ -32218,6 +40612,18 @@
#define V_DMAC_TCAM_SEL(x) ((x) << S_DMAC_TCAM_SEL)
#define G_DMAC_TCAM_SEL(x) (((x) >> S_DMAC_TCAM_SEL) & M_DMAC_TCAM_SEL)
+#define S_SMAC_INDEX_EN 17
+#define V_SMAC_INDEX_EN(x) ((x) << S_SMAC_INDEX_EN)
+#define F_SMAC_INDEX_EN V_SMAC_INDEX_EN(1U)
+
+#define S_LPBK_TCAM2_HIT_PRIORITY 16
+#define V_LPBK_TCAM2_HIT_PRIORITY(x) ((x) << S_LPBK_TCAM2_HIT_PRIORITY)
+#define F_LPBK_TCAM2_HIT_PRIORITY V_LPBK_TCAM2_HIT_PRIORITY(1U)
+
+#define S_TCAM2_HIT_PRIORITY 15
+#define V_TCAM2_HIT_PRIORITY(x) ((x) << S_TCAM2_HIT_PRIORITY)
+#define F_TCAM2_HIT_PRIORITY V_TCAM2_HIT_PRIORITY(1U)
+
#define A_MPS_PF_STAT_TX_PF_MCAST_FRAMES_H 0x31c
#define A_MPS_PORT_CLS_NCSI_ETH_TYPE 0x31c
@@ -32238,14 +40644,23 @@
#define F_EN2 V_EN2(1U)
#define A_MPS_PF_STAT_TX_PF_UCAST_BYTES_H 0x324
+#define A_MPS_PORT_CLS_BMC_MAC1_ADDR_L 0x324
#define A_MPS_PF_STAT_TX_PF_UCAST_FRAMES_L 0x328
+#define A_MPS_PORT_CLS_BMC_MAC1_ADDR_H 0x328
#define A_MPS_PF_STAT_TX_PF_UCAST_FRAMES_H 0x32c
+#define A_MPS_PORT_CLS_BMC_MAC2_ADDR_L 0x32c
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_BYTES_L 0x330
+#define A_MPS_PORT_CLS_BMC_MAC2_ADDR_H 0x330
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_BYTES_H 0x334
+#define A_MPS_PORT_CLS_BMC_MAC3_ADDR_L 0x334
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_FRAMES_L 0x338
+#define A_MPS_PORT_CLS_BMC_MAC3_ADDR_H 0x338
#define A_MPS_PF_STAT_TX_PF_OFFLOAD_FRAMES_H 0x33c
+#define A_MPS_PORT_CLS_BMC_VLAN1 0x33c
#define A_MPS_PF_STAT_RX_PF_BYTES_L 0x340
+#define A_MPS_PORT_CLS_BMC_VLAN2 0x340
#define A_MPS_PF_STAT_RX_PF_BYTES_H 0x344
+#define A_MPS_PORT_CLS_BMC_VLAN3 0x344
#define A_MPS_PF_STAT_RX_PF_FRAMES_L 0x348
#define A_MPS_PF_STAT_RX_PF_FRAMES_H 0x34c
#define A_MPS_PF_STAT_RX_PF_BCAST_BYTES_L 0x350
@@ -32261,7 +40676,9 @@
#define A_MPS_PF_STAT_RX_PF_UCAST_FRAMES_L 0x378
#define A_MPS_PF_STAT_RX_PF_UCAST_FRAMES_H 0x37c
#define A_MPS_PF_STAT_RX_PF_ERR_FRAMES_L 0x380
+#define A_MPS_PF_STAT_RX_PF_ERR_DROP_FRAMES_L 0x380
#define A_MPS_PF_STAT_RX_PF_ERR_FRAMES_H 0x384
+#define A_MPS_PF_STAT_RX_PF_ERR_DROP_FRAMES_H 0x384
#define A_MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
#define A_MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
#define A_MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
@@ -32393,6 +40810,22 @@
#define A_MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
#define A_MPS_PORT_STAT_RX_PORT_MAC_ERROR_L 0x618
#define A_MPS_PORT_STAT_RX_PORT_MAC_ERROR_H 0x61c
+#define A_MPS_PORT_STAT_RX_PRIO_0_DROP_FRAME_L 0x620
+#define A_MPS_PORT_STAT_RX_PRIO_0_DROP_FRAME_H 0x624
+#define A_MPS_PORT_STAT_RX_PRIO_1_DROP_FRAME_L 0x628
+#define A_MPS_PORT_STAT_RX_PRIO_1_DROP_FRAME_H 0x62c
+#define A_MPS_PORT_STAT_RX_PRIO_2_DROP_FRAME_L 0x630
+#define A_MPS_PORT_STAT_RX_PRIO_2_DROP_FRAME_H 0x634
+#define A_MPS_PORT_STAT_RX_PRIO_3_DROP_FRAME_L 0x638
+#define A_MPS_PORT_STAT_RX_PRIO_3_DROP_FRAME_H 0x63c
+#define A_MPS_PORT_STAT_RX_PRIO_4_DROP_FRAME_L 0x640
+#define A_MPS_PORT_STAT_RX_PRIO_4_DROP_FRAME_H 0x644
+#define A_MPS_PORT_STAT_RX_PRIO_5_DROP_FRAME_L 0x648
+#define A_MPS_PORT_STAT_RX_PRIO_5_DROP_FRAME_H 0x64c
+#define A_MPS_PORT_STAT_RX_PRIO_6_DROP_FRAME_L 0x650
+#define A_MPS_PORT_STAT_RX_PRIO_6_DROP_FRAME_H 0x654
+#define A_MPS_PORT_STAT_RX_PRIO_7_DROP_FRAME_L 0x658
+#define A_MPS_PORT_STAT_RX_PRIO_7_DROP_FRAME_H 0x65c
#define A_MPS_CMN_CTL 0x9000
#define S_DETECT8023 3
@@ -32425,6 +40858,46 @@
#define V_SPEEDMODE(x) ((x) << S_SPEEDMODE)
#define G_SPEEDMODE(x) (((x) >> S_SPEEDMODE) & M_SPEEDMODE)
+#define S_PT1_SEL_CFG 21
+#define V_PT1_SEL_CFG(x) ((x) << S_PT1_SEL_CFG)
+#define F_PT1_SEL_CFG V_PT1_SEL_CFG(1U)
+
+#define S_BUG_42938_EN 20
+#define V_BUG_42938_EN(x) ((x) << S_BUG_42938_EN)
+#define F_BUG_42938_EN V_BUG_42938_EN(1U)
+
+#define S_NO_BYPASS_PAUSE 19
+#define V_NO_BYPASS_PAUSE(x) ((x) << S_NO_BYPASS_PAUSE)
+#define F_NO_BYPASS_PAUSE V_NO_BYPASS_PAUSE(1U)
+
+#define S_BYPASS_PAUSE 18
+#define V_BYPASS_PAUSE(x) ((x) << S_BYPASS_PAUSE)
+#define F_BYPASS_PAUSE V_BYPASS_PAUSE(1U)
+
+#define S_PBUS_EN 16
+#define M_PBUS_EN 0x3U
+#define V_PBUS_EN(x) ((x) << S_PBUS_EN)
+#define G_PBUS_EN(x) (((x) >> S_PBUS_EN) & M_PBUS_EN)
+
+#define S_INIC_EN 14
+#define M_INIC_EN 0x3U
+#define V_INIC_EN(x) ((x) << S_INIC_EN)
+#define G_INIC_EN(x) (((x) >> S_INIC_EN) & M_INIC_EN)
+
+#define S_SBA_EN 12
+#define M_SBA_EN 0x3U
+#define V_SBA_EN(x) ((x) << S_SBA_EN)
+#define G_SBA_EN(x) (((x) >> S_SBA_EN) & M_SBA_EN)
+
+#define S_BG2TP_MAP_MODE 11
+#define V_BG2TP_MAP_MODE(x) ((x) << S_BG2TP_MAP_MODE)
+#define F_BG2TP_MAP_MODE V_BG2TP_MAP_MODE(1U)
+
+#define S_MPS_LB_MODE 9
+#define M_MPS_LB_MODE 0x3U
+#define V_MPS_LB_MODE(x) ((x) << S_MPS_LB_MODE)
+#define G_MPS_LB_MODE(x) (((x) >> S_MPS_LB_MODE) & M_MPS_LB_MODE)
+
#define A_MPS_INT_ENABLE 0x9004
#define S_STATINTENB 5
@@ -32618,6 +41091,17 @@
#define A_MPS_T5_BUILD_REVISION 0x9078
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH0 0x907c
+
+#define S_VALUE_1 16
+#define M_VALUE_1 0xffffU
+#define V_VALUE_1(x) ((x) << S_VALUE_1)
+#define G_VALUE_1(x) (((x) >> S_VALUE_1) & M_VALUE_1)
+
+#define S_VALUE_0 0
+#define M_VALUE_0 0xffffU
+#define V_VALUE_0(x) ((x) << S_VALUE_0)
+#define G_VALUE_0(x) (((x) >> S_VALUE_0) & M_VALUE_0)
+
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH1 0x9080
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH2 0x9084
#define A_MPS_TX_PAUSE_DURATION_BUF_GRP_TH3 0x9088
@@ -32671,11 +41155,130 @@
#define G_T6_BASEADDR(x) (((x) >> S_T6_BASEADDR) & M_T6_BASEADDR)
#define A_MPS_FPGA_BIST_CFG_P1 0x9124
-
-#define S_T6_BASEADDR 0
-#define M_T6_BASEADDR 0xffffU
-#define V_T6_BASEADDR(x) ((x) << S_T6_BASEADDR)
-#define G_T6_BASEADDR(x) (((x) >> S_T6_BASEADDR) & M_T6_BASEADDR)
+#define A_MPS_FPGA_BIST_CFG_P2 0x9128
+#define A_MPS_FPGA_BIST_CFG_P3 0x912c
+#define A_MPS_INIC_CTL 0x9130
+
+#define S_T7_RD_WRN 16
+#define V_T7_RD_WRN(x) ((x) << S_T7_RD_WRN)
+#define F_T7_RD_WRN V_T7_RD_WRN(1U)
+
+#define A_MPS_INIC_DATA 0x9134
+#define A_MPS_TP_CSIDE_MUX_CTL_P2 0x9138
+#define A_MPS_TP_CSIDE_MUX_CTL_P3 0x913c
+#define A_MPS_RED_CTL 0x9140
+
+#define S_LPBK_SHIFT_0 28
+#define M_LPBK_SHIFT_0 0xfU
+#define V_LPBK_SHIFT_0(x) ((x) << S_LPBK_SHIFT_0)
+#define G_LPBK_SHIFT_0(x) (((x) >> S_LPBK_SHIFT_0) & M_LPBK_SHIFT_0)
+
+#define S_LPBK_SHIFT_1 24
+#define M_LPBK_SHIFT_1 0xfU
+#define V_LPBK_SHIFT_1(x) ((x) << S_LPBK_SHIFT_1)
+#define G_LPBK_SHIFT_1(x) (((x) >> S_LPBK_SHIFT_1) & M_LPBK_SHIFT_1)
+
+#define S_LPBK_SHIFT_2 20
+#define M_LPBK_SHIFT_2 0xfU
+#define V_LPBK_SHIFT_2(x) ((x) << S_LPBK_SHIFT_2)
+#define G_LPBK_SHIFT_2(x) (((x) >> S_LPBK_SHIFT_2) & M_LPBK_SHIFT_2)
+
+#define S_LPBK_SHIFT_3 16
+#define M_LPBK_SHIFT_3 0xfU
+#define V_LPBK_SHIFT_3(x) ((x) << S_LPBK_SHIFT_3)
+#define G_LPBK_SHIFT_3(x) (((x) >> S_LPBK_SHIFT_3) & M_LPBK_SHIFT_3)
+
+#define S_MAC_SHIFT_0 12
+#define M_MAC_SHIFT_0 0xfU
+#define V_MAC_SHIFT_0(x) ((x) << S_MAC_SHIFT_0)
+#define G_MAC_SHIFT_0(x) (((x) >> S_MAC_SHIFT_0) & M_MAC_SHIFT_0)
+
+#define S_MAC_SHIFT_1 8
+#define M_MAC_SHIFT_1 0xfU
+#define V_MAC_SHIFT_1(x) ((x) << S_MAC_SHIFT_1)
+#define G_MAC_SHIFT_1(x) (((x) >> S_MAC_SHIFT_1) & M_MAC_SHIFT_1)
+
+#define S_MAC_SHIFT_2 4
+#define M_MAC_SHIFT_2 0xfU
+#define V_MAC_SHIFT_2(x) ((x) << S_MAC_SHIFT_2)
+#define G_MAC_SHIFT_2(x) (((x) >> S_MAC_SHIFT_2) & M_MAC_SHIFT_2)
+
+#define S_MAC_SHIFT_3 0
+#define M_MAC_SHIFT_3 0xfU
+#define V_MAC_SHIFT_3(x) ((x) << S_MAC_SHIFT_3)
+#define G_MAC_SHIFT_3(x) (((x) >> S_MAC_SHIFT_3) & M_MAC_SHIFT_3)
+
+#define A_MPS_RED_EN 0x9144
+
+#define S_LPBK_EN3 7
+#define V_LPBK_EN3(x) ((x) << S_LPBK_EN3)
+#define F_LPBK_EN3 V_LPBK_EN3(1U)
+
+#define S_LPBK_EN2 6
+#define V_LPBK_EN2(x) ((x) << S_LPBK_EN2)
+#define F_LPBK_EN2 V_LPBK_EN2(1U)
+
+#define S_LPBK_EN1 5
+#define V_LPBK_EN1(x) ((x) << S_LPBK_EN1)
+#define F_LPBK_EN1 V_LPBK_EN1(1U)
+
+#define S_LPBK_EN0 4
+#define V_LPBK_EN0(x) ((x) << S_LPBK_EN0)
+#define F_LPBK_EN0 V_LPBK_EN0(1U)
+
+#define S_MAC_EN3 3
+#define V_MAC_EN3(x) ((x) << S_MAC_EN3)
+#define F_MAC_EN3 V_MAC_EN3(1U)
+
+#define S_MAC_EN2 2
+#define V_MAC_EN2(x) ((x) << S_MAC_EN2)
+#define F_MAC_EN2 V_MAC_EN2(1U)
+
+#define S_MAC_EN1 1
+#define V_MAC_EN1(x) ((x) << S_MAC_EN1)
+#define F_MAC_EN1 V_MAC_EN1(1U)
+
+#define S_MAC_EN0 0
+#define V_MAC_EN0(x) ((x) << S_MAC_EN0)
+#define F_MAC_EN0 V_MAC_EN0(1U)
+
+#define A_MPS_MAC0_RED_DROP_CNT_H 0x9148
+#define A_MPS_MAC0_RED_DROP_CNT_L 0x914c
+#define A_MPS_MAC1_RED_DROP_CNT_H 0x9150
+#define A_MPS_MAC1_RED_DROP_CNT_L 0x9154
+#define A_MPS_MAC2_RED_DROP_CNT_H 0x9158
+#define A_MPS_MAC2_RED_DROP_CNT_L 0x915c
+#define A_MPS_MAC3_RED_DROP_CNT_H 0x9160
+#define A_MPS_MAC3_RED_DROP_CNT_L 0x9164
+#define A_MPS_LPBK0_RED_DROP_CNT_H 0x9168
+#define A_MPS_LPBK0_RED_DROP_CNT_L 0x916c
+#define A_MPS_LPBK1_RED_DROP_CNT_H 0x9170
+#define A_MPS_LPBK1_RED_DROP_CNT_L 0x9174
+#define A_MPS_LPBK2_RED_DROP_CNT_H 0x9178
+#define A_MPS_LPBK2_RED_DROP_CNT_L 0x917c
+#define A_MPS_LPBK3_RED_DROP_CNT_H 0x9180
+#define A_MPS_LPBK3_RED_DROP_CNT_L 0x9184
+#define A_MPS_MAC_RED_PP_DROP_EN 0x9188
+
+#define S_T7_MAC3 24
+#define M_T7_MAC3 0xffU
+#define V_T7_MAC3(x) ((x) << S_T7_MAC3)
+#define G_T7_MAC3(x) (((x) >> S_T7_MAC3) & M_T7_MAC3)
+
+#define S_T7_MAC2 16
+#define M_T7_MAC2 0xffU
+#define V_T7_MAC2(x) ((x) << S_T7_MAC2)
+#define G_T7_MAC2(x) (((x) >> S_T7_MAC2) & M_T7_MAC2)
+
+#define S_T7_MAC1 8
+#define M_T7_MAC1 0xffU
+#define V_T7_MAC1(x) ((x) << S_T7_MAC1)
+#define G_T7_MAC1(x) (((x) >> S_T7_MAC1) & M_T7_MAC1)
+
+#define S_T7_MAC0 0
+#define M_T7_MAC0 0xffU
+#define V_T7_MAC0(x) ((x) << S_T7_MAC0)
+#define G_T7_MAC0(x) (((x) >> S_T7_MAC0) & M_T7_MAC0)
#define A_MPS_TX_PRTY_SEL 0x9400
@@ -32714,6 +41317,26 @@
#define V_NCSI_SOURCE(x) ((x) << S_NCSI_SOURCE)
#define G_NCSI_SOURCE(x) (((x) >> S_NCSI_SOURCE) & M_NCSI_SOURCE)
+#define S_T7_CH4_PRTY 16
+#define M_T7_CH4_PRTY 0x7U
+#define V_T7_CH4_PRTY(x) ((x) << S_T7_CH4_PRTY)
+#define G_T7_CH4_PRTY(x) (((x) >> S_T7_CH4_PRTY) & M_T7_CH4_PRTY)
+
+#define S_T7_CH3_PRTY 13
+#define M_T7_CH3_PRTY 0x7U
+#define V_T7_CH3_PRTY(x) ((x) << S_T7_CH3_PRTY)
+#define G_T7_CH3_PRTY(x) (((x) >> S_T7_CH3_PRTY) & M_T7_CH3_PRTY)
+
+#define S_T7_CH2_PRTY 10
+#define M_T7_CH2_PRTY 0x7U
+#define V_T7_CH2_PRTY(x) ((x) << S_T7_CH2_PRTY)
+#define G_T7_CH2_PRTY(x) (((x) >> S_T7_CH2_PRTY) & M_T7_CH2_PRTY)
+
+#define S_T7_CH1_PRTY 7
+#define M_T7_CH1_PRTY 0x7U
+#define V_T7_CH1_PRTY(x) ((x) << S_T7_CH1_PRTY)
+#define G_T7_CH1_PRTY(x) (((x) >> S_T7_CH1_PRTY) & M_T7_CH1_PRTY)
+
#define A_MPS_TX_INT_ENABLE 0x9404
#define S_PORTERR 16
@@ -32751,9 +41374,52 @@
#define V_TPFIFO(x) ((x) << S_TPFIFO)
#define G_TPFIFO(x) (((x) >> S_TPFIFO) & M_TPFIFO)
+#define S_T7_PORTERR 28
+#define V_T7_PORTERR(x) ((x) << S_T7_PORTERR)
+#define F_T7_PORTERR V_T7_PORTERR(1U)
+
+#define S_T7_FRMERR 27
+#define V_T7_FRMERR(x) ((x) << S_T7_FRMERR)
+#define F_T7_FRMERR V_T7_FRMERR(1U)
+
+#define S_T7_SECNTERR 26
+#define V_T7_SECNTERR(x) ((x) << S_T7_SECNTERR)
+#define F_T7_SECNTERR V_T7_SECNTERR(1U)
+
+#define S_T7_BUBBLE 25
+#define V_T7_BUBBLE(x) ((x) << S_T7_BUBBLE)
+#define F_T7_BUBBLE V_T7_BUBBLE(1U)
+
+#define S_TXTOKENFIFO 15
+#define M_TXTOKENFIFO 0x3ffU
+#define V_TXTOKENFIFO(x) ((x) << S_TXTOKENFIFO)
+#define G_TXTOKENFIFO(x) (((x) >> S_TXTOKENFIFO) & M_TXTOKENFIFO)
+
+#define S_PERR_TP2MPS_TFIFO 13
+#define M_PERR_TP2MPS_TFIFO 0x3U
+#define V_PERR_TP2MPS_TFIFO(x) ((x) << S_PERR_TP2MPS_TFIFO)
+#define G_PERR_TP2MPS_TFIFO(x) (((x) >> S_PERR_TP2MPS_TFIFO) & M_PERR_TP2MPS_TFIFO)
+
#define A_MPS_TX_INT_CAUSE 0x9408
#define A_MPS_TX_NCSI2MPS_CNT 0x940c
#define A_MPS_TX_PERR_ENABLE 0x9410
+
+#define S_PORTERRINT 28
+#define V_PORTERRINT(x) ((x) << S_PORTERRINT)
+#define F_PORTERRINT V_PORTERRINT(1U)
+
+#define S_FRAMINGERRINT 27
+#define V_FRAMINGERRINT(x) ((x) << S_FRAMINGERRINT)
+#define F_FRAMINGERRINT V_FRAMINGERRINT(1U)
+
+#define S_SECNTERRINT 26
+#define V_SECNTERRINT(x) ((x) << S_SECNTERRINT)
+#define F_SECNTERRINT V_SECNTERRINT(1U)
+
+#define S_BUBBLEERRINT 25
+#define V_BUBBLEERRINT(x) ((x) << S_BUBBLEERRINT)
+#define F_BUBBLEERRINT V_BUBBLEERRINT(1U)
+
#define A_MPS_TX_PERR_INJECT 0x9414
#define S_MPSTXMEMSEL 1
@@ -33481,6 +42147,41 @@
#define F_TXINCH0_CGEN V_TXINCH0_CGEN(1U)
#define A_MPS_TX_CGEN_DYNAMIC 0x9470
+#define A_MPS_TX2RX_CH_MAP 0x9474
+
+#define S_ENABLELBK_CH3 3
+#define V_ENABLELBK_CH3(x) ((x) << S_ENABLELBK_CH3)
+#define F_ENABLELBK_CH3 V_ENABLELBK_CH3(1U)
+
+#define S_ENABLELBK_CH2 2
+#define V_ENABLELBK_CH2(x) ((x) << S_ENABLELBK_CH2)
+#define F_ENABLELBK_CH2 V_ENABLELBK_CH2(1U)
+
+#define S_ENABLELBK_CH1 1
+#define V_ENABLELBK_CH1(x) ((x) << S_ENABLELBK_CH1)
+#define F_ENABLELBK_CH1 V_ENABLELBK_CH1(1U)
+
+#define S_ENABLELBK_CH0 0
+#define V_ENABLELBK_CH0(x) ((x) << S_ENABLELBK_CH0)
+#define F_ENABLELBK_CH0 V_ENABLELBK_CH0(1U)
+
+#define A_MPS_TX_DBG_CNT_CTL 0x9478
+
+#define S_DBG_CNT_CTL 0
+#define M_DBG_CNT_CTL 0xffU
+#define V_DBG_CNT_CTL(x) ((x) << S_DBG_CNT_CTL)
+#define G_DBG_CNT_CTL(x) (((x) >> S_DBG_CNT_CTL) & M_DBG_CNT_CTL)
+
+#define A_MPS_TX_DBG_CNT 0x947c
+#define A_MPS_TX_INT2_ENABLE 0x9498
+#define A_MPS_TX_INT2_CAUSE 0x949c
+#define A_MPS_TX_PERR2_ENABLE 0x94a0
+#define A_MPS_TX_INT3_ENABLE 0x94a4
+#define A_MPS_TX_INT3_CAUSE 0x94a8
+#define A_MPS_TX_PERR3_ENABLE 0x94ac
+#define A_MPS_TX_INT4_ENABLE 0x94b0
+#define A_MPS_TX_INT4_CAUSE 0x94b4
+#define A_MPS_TX_PERR4_ENABLE 0x94b8
#define A_MPS_STAT_CTL 0x9600
#define S_COUNTVFINPF 1
@@ -33810,6 +42511,7 @@
#define A_MPS_TRC_RSS_HASH 0x9804
#define A_MPS_TRC_FILTER0_RSS_HASH 0x9804
+#define A_T7_MPS_TRC_PERR_INJECT 0x9804
#define A_MPS_TRC_RSS_CONTROL 0x9808
#define S_RSSCONTROL 16
@@ -33939,6 +42641,20 @@
#define V_FILTMEM(x) ((x) << S_FILTMEM)
#define G_FILTMEM(x) (((x) >> S_FILTMEM) & M_FILTMEM)
+#define S_T7_MISCPERR 16
+#define V_T7_MISCPERR(x) ((x) << S_T7_MISCPERR)
+#define F_T7_MISCPERR V_T7_MISCPERR(1U)
+
+#define S_T7_PKTFIFO 8
+#define M_T7_PKTFIFO 0xffU
+#define V_T7_PKTFIFO(x) ((x) << S_T7_PKTFIFO)
+#define G_T7_PKTFIFO(x) (((x) >> S_T7_PKTFIFO) & M_T7_PKTFIFO)
+
+#define S_T7_FILTMEM 0
+#define M_T7_FILTMEM 0xffU
+#define V_T7_FILTMEM(x) ((x) << S_T7_FILTMEM)
+#define G_T7_FILTMEM(x) (((x) >> S_T7_FILTMEM) & M_T7_FILTMEM)
+
#define A_MPS_TRC_INT_ENABLE 0x9858
#define S_TRCPLERRENB 9
@@ -33961,6 +42677,7 @@
#define A_MPS_TRC_FILTER2_RSS_HASH 0x9ff8
#define A_MPS_TRC_FILTER2_RSS_CONTROL 0x9ffc
#define A_MPS_TRC_FILTER3_RSS_HASH 0xa000
+#define A_MPS_TRC_FILTER4_MATCH 0xa000
#define A_MPS_TRC_FILTER3_RSS_CONTROL 0xa004
#define A_MPS_T5_TRC_RSS_HASH 0xa008
#define A_MPS_T5_TRC_RSS_CONTROL 0xa00c
@@ -34043,125 +42760,8 @@
#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
#define A_MPS_TRC_VF_OFF_FILTER_1 0xa014
-
-#define S_T6_TRCMPS2TP_MACONLY 22
-#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
-#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U)
-
-#define S_T6_TRCALLMPS2TP 21
-#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
-#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U)
-
-#define S_T6_TRCALLTP2MPS 20
-#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
-#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U)
-
-#define S_T6_TRCALLVF 19
-#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
-#define F_T6_TRCALLVF V_T6_TRCALLVF(1U)
-
-#define S_T6_TRC_OFLD_EN 18
-#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
-#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U)
-
-#define S_T6_VFFILTEN 17
-#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
-#define F_T6_VFFILTEN V_T6_VFFILTEN(1U)
-
-#define S_T6_VFFILTMASK 9
-#define M_T6_VFFILTMASK 0xffU
-#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
-#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
-
-#define S_T6_VFFILTVALID 8
-#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
-#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U)
-
-#define S_T6_VFFILTDATA 0
-#define M_T6_VFFILTDATA 0xffU
-#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
-#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
-
#define A_MPS_TRC_VF_OFF_FILTER_2 0xa018
-
-#define S_T6_TRCMPS2TP_MACONLY 22
-#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
-#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U)
-
-#define S_T6_TRCALLMPS2TP 21
-#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
-#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U)
-
-#define S_T6_TRCALLTP2MPS 20
-#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
-#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U)
-
-#define S_T6_TRCALLVF 19
-#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
-#define F_T6_TRCALLVF V_T6_TRCALLVF(1U)
-
-#define S_T6_TRC_OFLD_EN 18
-#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
-#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U)
-
-#define S_T6_VFFILTEN 17
-#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
-#define F_T6_VFFILTEN V_T6_VFFILTEN(1U)
-
-#define S_T6_VFFILTMASK 9
-#define M_T6_VFFILTMASK 0xffU
-#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
-#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
-
-#define S_T6_VFFILTVALID 8
-#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
-#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U)
-
-#define S_T6_VFFILTDATA 0
-#define M_T6_VFFILTDATA 0xffU
-#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
-#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
-
#define A_MPS_TRC_VF_OFF_FILTER_3 0xa01c
-
-#define S_T6_TRCMPS2TP_MACONLY 22
-#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY)
-#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U)
-
-#define S_T6_TRCALLMPS2TP 21
-#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP)
-#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U)
-
-#define S_T6_TRCALLTP2MPS 20
-#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS)
-#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U)
-
-#define S_T6_TRCALLVF 19
-#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF)
-#define F_T6_TRCALLVF V_T6_TRCALLVF(1U)
-
-#define S_T6_TRC_OFLD_EN 18
-#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN)
-#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U)
-
-#define S_T6_VFFILTEN 17
-#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN)
-#define F_T6_VFFILTEN V_T6_VFFILTEN(1U)
-
-#define S_T6_VFFILTMASK 9
-#define M_T6_VFFILTMASK 0xffU
-#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK)
-#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK)
-
-#define S_T6_VFFILTVALID 8
-#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID)
-#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U)
-
-#define S_T6_VFFILTDATA 0
-#define M_T6_VFFILTDATA 0xffU
-#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA)
-#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA)
-
#define A_MPS_TRC_CGEN 0xa020
#define S_MPSTRCCGEN 0
@@ -34169,6 +42769,129 @@
#define V_MPSTRCCGEN(x) ((x) << S_MPSTRCCGEN)
#define G_MPSTRCCGEN(x) (((x) >> S_MPSTRCCGEN) & M_MPSTRCCGEN)
+#define A_MPS_TRC_FILTER4_DONT_CARE 0xa080
+#define A_MPS_TRC_FILTER5_MATCH 0xa100
+#define A_MPS_TRC_FILTER5_DONT_CARE 0xa180
+#define A_MPS_TRC_FILTER6_MATCH 0xa200
+#define A_MPS_TRC_FILTER6_DONT_CARE 0xa280
+#define A_MPS_TRC_FILTER7_MATCH 0xa300
+#define A_MPS_TRC_FILTER7_DONT_CARE 0xa380
+#define A_T7_MPS_TRC_FILTER0_RSS_HASH 0xa3f0
+#define A_T7_MPS_TRC_FILTER0_RSS_CONTROL 0xa3f4
+#define A_T7_MPS_TRC_FILTER1_RSS_HASH 0xa3f8
+#define A_T7_MPS_TRC_FILTER1_RSS_CONTROL 0xa3fc
+#define A_T7_MPS_TRC_FILTER2_RSS_HASH 0xa400
+#define A_T7_MPS_TRC_FILTER2_RSS_CONTROL 0xa404
+#define A_T7_MPS_TRC_FILTER3_RSS_HASH 0xa408
+#define A_T7_MPS_TRC_FILTER3_RSS_CONTROL 0xa40c
+#define A_MPS_TRC_FILTER4_RSS_HASH 0xa410
+#define A_MPS_TRC_FILTER4_RSS_CONTROL 0xa414
+#define A_MPS_TRC_FILTER5_RSS_HASH 0xa418
+#define A_MPS_TRC_FILTER5_RSS_CONTROL 0xa41c
+#define A_MPS_TRC_FILTER6_RSS_HASH 0xa420
+#define A_MPS_TRC_FILTER6_RSS_CONTROL 0xa424
+#define A_MPS_TRC_FILTER7_RSS_HASH 0xa428
+#define A_MPS_TRC_FILTER7_RSS_CONTROL 0xa42c
+#define A_T7_MPS_T5_TRC_RSS_HASH 0xa430
+#define A_T7_MPS_T5_TRC_RSS_CONTROL 0xa434
+#define A_T7_MPS_TRC_VF_OFF_FILTER_0 0xa438
+#define A_T7_MPS_TRC_VF_OFF_FILTER_1 0xa43c
+#define A_T7_MPS_TRC_VF_OFF_FILTER_2 0xa440
+#define A_T7_MPS_TRC_VF_OFF_FILTER_3 0xa444
+#define A_MPS_TRC_VF_OFF_FILTER_4 0xa448
+#define A_MPS_TRC_VF_OFF_FILTER_5 0xa44c
+#define A_MPS_TRC_VF_OFF_FILTER_6 0xa450
+#define A_MPS_TRC_VF_OFF_FILTER_7 0xa454
+#define A_T7_MPS_TRC_CGEN 0xa458
+
+#define S_T7_MPSTRCCGEN 0
+#define M_T7_MPSTRCCGEN 0xffU
+#define V_T7_MPSTRCCGEN(x) ((x) << S_T7_MPSTRCCGEN)
+#define G_T7_MPSTRCCGEN(x) (((x) >> S_T7_MPSTRCCGEN) & M_T7_MPSTRCCGEN)
+
+#define A_T7_MPS_TRC_FILTER_MATCH_CTL_A 0xa460
+#define A_T7_MPS_TRC_FILTER_MATCH_CTL_B 0xa480
+#define A_T7_MPS_TRC_FILTER_RUNT_CTL 0xa4a0
+#define A_T7_MPS_TRC_FILTER_DROP 0xa4c0
+#define A_T7_MPS_TRC_INT_ENABLE 0xa4e0
+
+#define S_T7_TRCPLERRENB 17
+#define V_T7_TRCPLERRENB(x) ((x) << S_T7_TRCPLERRENB)
+#define F_T7_TRCPLERRENB V_T7_TRCPLERRENB(1U)
+
+#define A_T7_MPS_TRC_INT_CAUSE 0xa4e4
+#define A_T7_MPS_TRC_TIMESTAMP_L 0xa4e8
+#define A_T7_MPS_TRC_TIMESTAMP_H 0xa4ec
+#define A_MPS_TRC_PERR_ENABLE2 0xa4f0
+
+#define S_TRC_TF_ECC 24
+#define M_TRC_TF_ECC 0xffU
+#define V_TRC_TF_ECC(x) ((x) << S_TRC_TF_ECC)
+#define G_TRC_TF_ECC(x) (((x) >> S_TRC_TF_ECC) & M_TRC_TF_ECC)
+
+#define S_MPS2MAC_CONV_TRC_CERR 22
+#define M_MPS2MAC_CONV_TRC_CERR 0x3U
+#define V_MPS2MAC_CONV_TRC_CERR(x) ((x) << S_MPS2MAC_CONV_TRC_CERR)
+#define G_MPS2MAC_CONV_TRC_CERR(x) (((x) >> S_MPS2MAC_CONV_TRC_CERR) & M_MPS2MAC_CONV_TRC_CERR)
+
+#define S_MPS2MAC_CONV_TRC 18
+#define M_MPS2MAC_CONV_TRC 0xfU
+#define V_MPS2MAC_CONV_TRC(x) ((x) << S_MPS2MAC_CONV_TRC)
+#define G_MPS2MAC_CONV_TRC(x) (((x) >> S_MPS2MAC_CONV_TRC) & M_MPS2MAC_CONV_TRC)
+
+#define S_TF0_PERR_1 17
+#define V_TF0_PERR_1(x) ((x) << S_TF0_PERR_1)
+#define F_TF0_PERR_1 V_TF0_PERR_1(1U)
+
+#define S_TF1_PERR_1 16
+#define V_TF1_PERR_1(x) ((x) << S_TF1_PERR_1)
+#define F_TF1_PERR_1 V_TF1_PERR_1(1U)
+
+#define S_TF2_PERR_1 15
+#define V_TF2_PERR_1(x) ((x) << S_TF2_PERR_1)
+#define F_TF2_PERR_1 V_TF2_PERR_1(1U)
+
+#define S_TF3_PERR_1 14
+#define V_TF3_PERR_1(x) ((x) << S_TF3_PERR_1)
+#define F_TF3_PERR_1 V_TF3_PERR_1(1U)
+
+#define S_TF4_PERR_1 13
+#define V_TF4_PERR_1(x) ((x) << S_TF4_PERR_1)
+#define F_TF4_PERR_1 V_TF4_PERR_1(1U)
+
+#define S_TF0_PERR_0 12
+#define V_TF0_PERR_0(x) ((x) << S_TF0_PERR_0)
+#define F_TF0_PERR_0 V_TF0_PERR_0(1U)
+
+#define S_TF1_PERR_0 11
+#define V_TF1_PERR_0(x) ((x) << S_TF1_PERR_0)
+#define F_TF1_PERR_0 V_TF1_PERR_0(1U)
+
+#define S_TF2_PERR_0 10
+#define V_TF2_PERR_0(x) ((x) << S_TF2_PERR_0)
+#define F_TF2_PERR_0 V_TF2_PERR_0(1U)
+
+#define S_TF3_PERR_0 9
+#define V_TF3_PERR_0(x) ((x) << S_TF3_PERR_0)
+#define F_TF3_PERR_0 V_TF3_PERR_0(1U)
+
+#define S_TF4_PERR_0 8
+#define V_TF4_PERR_0(x) ((x) << S_TF4_PERR_0)
+#define F_TF4_PERR_0 V_TF4_PERR_0(1U)
+
+#define S_PERR_TF_IN_CTL 0
+#define M_PERR_TF_IN_CTL 0xffU
+#define V_PERR_TF_IN_CTL(x) ((x) << S_PERR_TF_IN_CTL)
+#define G_PERR_TF_IN_CTL(x) (((x) >> S_PERR_TF_IN_CTL) & M_PERR_TF_IN_CTL)
+
+#define A_MPS_TRC_INT_ENABLE2 0xa4f4
+#define A_MPS_TRC_INT_CAUSE2 0xa4f8
+
+#define S_T7_TRC_TF_ECC 22
+#define M_T7_TRC_TF_ECC 0xffU
+#define V_T7_TRC_TF_ECC(x) ((x) << S_T7_TRC_TF_ECC)
+#define G_T7_TRC_TF_ECC(x) (((x) >> S_T7_TRC_TF_ECC) & M_T7_TRC_TF_ECC)
+
#define A_MPS_CLS_CTL 0xd000
#define S_MEMWRITEFAULT 4
@@ -34246,12 +42969,24 @@
#define V_MATCHSRAM(x) ((x) << S_MATCHSRAM)
#define F_MATCHSRAM V_MATCHSRAM(1U)
+#define S_CIM2MPS_INTF_PAR 4
+#define V_CIM2MPS_INTF_PAR(x) ((x) << S_CIM2MPS_INTF_PAR)
+#define F_CIM2MPS_INTF_PAR V_CIM2MPS_INTF_PAR(1U)
+
+#define S_TCAM_CRC_SRAM 3
+#define V_TCAM_CRC_SRAM(x) ((x) << S_TCAM_CRC_SRAM)
+#define F_TCAM_CRC_SRAM V_TCAM_CRC_SRAM(1U)
+
#define A_MPS_CLS_INT_ENABLE 0xd024
#define S_PLERRENB 3
#define V_PLERRENB(x) ((x) << S_PLERRENB)
#define F_PLERRENB V_PLERRENB(1U)
+#define S_T7_PLERRENB 5
+#define V_T7_PLERRENB(x) ((x) << S_T7_PLERRENB)
+#define F_T7_PLERRENB V_T7_PLERRENB(1U)
+
#define A_MPS_CLS_INT_CAUSE 0xd028
#define A_MPS_CLS_PL_TEST_DATA_L 0xd02c
#define A_MPS_CLS_PL_TEST_DATA_H 0xd030
@@ -34314,6 +43049,25 @@
#define V_T6_CLS_VF(x) ((x) << S_T6_CLS_VF)
#define G_T6_CLS_VF(x) (((x) >> S_T6_CLS_VF) & M_T6_CLS_VF)
+#define S_T7_CLS_SPARE 30
+#define M_T7_CLS_SPARE 0x3U
+#define V_T7_CLS_SPARE(x) ((x) << S_T7_CLS_SPARE)
+#define G_T7_CLS_SPARE(x) (((x) >> S_T7_CLS_SPARE) & M_T7_CLS_SPARE)
+
+#define S_T7_1_CLS_PRIORITY 27
+#define M_T7_1_CLS_PRIORITY 0x7U
+#define V_T7_1_CLS_PRIORITY(x) ((x) << S_T7_1_CLS_PRIORITY)
+#define G_T7_1_CLS_PRIORITY(x) (((x) >> S_T7_1_CLS_PRIORITY) & M_T7_1_CLS_PRIORITY)
+
+#define S_T7_1_CLS_REPLICATE 26
+#define V_T7_1_CLS_REPLICATE(x) ((x) << S_T7_1_CLS_REPLICATE)
+#define F_T7_1_CLS_REPLICATE V_T7_1_CLS_REPLICATE(1U)
+
+#define S_T7_1_CLS_INDEX 15
+#define M_T7_1_CLS_INDEX 0x7ffU
+#define V_T7_1_CLS_INDEX(x) ((x) << S_T7_1_CLS_INDEX)
+#define G_T7_1_CLS_INDEX(x) (((x) >> S_T7_1_CLS_INDEX) & M_T7_1_CLS_INDEX)
+
#define A_MPS_CLS_PL_TEST_CTL 0xd038
#define S_PLTESTCTL 0
@@ -34327,12 +43081,26 @@
#define F_PRTBMCCTL V_PRTBMCCTL(1U)
#define A_MPS_CLS_MATCH_CNT_TCAM 0xd100
+#define A_MPS_CLS0_MATCH_CNT_TCAM 0xd100
#define A_MPS_CLS_MATCH_CNT_HASH 0xd104
+#define A_MPS_CLS0_MATCH_CNT_HASH 0xd104
#define A_MPS_CLS_MATCH_CNT_BCAST 0xd108
+#define A_MPS_CLS0_MATCH_CNT_BCAST 0xd108
#define A_MPS_CLS_MATCH_CNT_BMC 0xd10c
+#define A_MPS_CLS0_MATCH_CNT_BMC 0xd10c
#define A_MPS_CLS_MATCH_CNT_PROM 0xd110
+#define A_MPS_CLS0_MATCH_CNT_PROM 0xd110
#define A_MPS_CLS_MATCH_CNT_HPROM 0xd114
+#define A_MPS_CLS0_MATCH_CNT_HPROM 0xd114
#define A_MPS_CLS_MISS_CNT 0xd118
+#define A_MPS_CLS0_MISS_CNT 0xd118
+#define A_MPS_CLS1_MATCH_CNT_TCAM 0xd11c
+#define A_MPS_CLS1_MATCH_CNT_HASH 0xd120
+#define A_MPS_CLS1_MATCH_CNT_BCAST 0xd124
+#define A_MPS_CLS1_MATCH_CNT_BMC 0xd128
+#define A_MPS_CLS1_MATCH_CNT_PROM 0xd12c
+#define A_MPS_CLS1_MATCH_CNT_HPROM 0xd130
+#define A_MPS_CLS1_MISS_CNT 0xd134
#define A_MPS_CLS_REQUEST_TRACE_MAC_DA_L 0xd200
#define A_MPS_CLS_REQUEST_TRACE_MAC_DA_H 0xd204
@@ -34428,6 +43196,15 @@
#define V_CLSTRCVF(x) ((x) << S_CLSTRCVF)
#define G_CLSTRCVF(x) (((x) >> S_CLSTRCVF) & M_CLSTRCVF)
+#define S_T7_CLSTRCMATCH 23
+#define V_T7_CLSTRCMATCH(x) ((x) << S_T7_CLSTRCMATCH)
+#define F_T7_CLSTRCMATCH V_T7_CLSTRCMATCH(1U)
+
+#define S_T7_CLSTRCINDEX 12
+#define M_T7_CLSTRCINDEX 0x7ffU
+#define V_T7_CLSTRCINDEX(x) ((x) << S_T7_CLSTRCINDEX)
+#define G_T7_CLSTRCINDEX(x) (((x) >> S_T7_CLSTRCINDEX) & M_T7_CLSTRCINDEX)
+
#define A_MPS_CLS_VLAN_TABLE 0xdfc0
#define S_VLAN_MASK 16
@@ -34536,24 +43313,6 @@
#define V_T6_SRAM_VLD(x) ((x) << S_T6_SRAM_VLD)
#define F_T6_SRAM_VLD V_T6_SRAM_VLD(1U)
-#define S_T6_REPLICATE 12
-#define V_T6_REPLICATE(x) ((x) << S_T6_REPLICATE)
-#define F_T6_REPLICATE V_T6_REPLICATE(1U)
-
-#define S_T6_PF 9
-#define M_T6_PF 0x7U
-#define V_T6_PF(x) ((x) << S_T6_PF)
-#define G_T6_PF(x) (((x) >> S_T6_PF) & M_T6_PF)
-
-#define S_T6_VF_VALID 8
-#define V_T6_VF_VALID(x) ((x) << S_T6_VF_VALID)
-#define F_T6_VF_VALID V_T6_VF_VALID(1U)
-
-#define S_T6_VF 0
-#define M_T6_VF 0xffU
-#define V_T6_VF(x) ((x) << S_T6_VF)
-#define G_T6_VF(x) (((x) >> S_T6_VF) & M_T6_VF)
-
#define A_MPS_CLS_SRAM_H 0xe004
#define S_MACPARITY1 9
@@ -34580,6 +43339,41 @@
#define V_MACPARITY2(x) ((x) << S_MACPARITY2)
#define F_MACPARITY2 V_MACPARITY2(1U)
+#define S_SRAMWRN 31
+#define V_SRAMWRN(x) ((x) << S_SRAMWRN)
+#define F_SRAMWRN V_SRAMWRN(1U)
+
+#define S_SRAMSPARE 27
+#define M_SRAMSPARE 0xfU
+#define V_SRAMSPARE(x) ((x) << S_SRAMSPARE)
+#define G_SRAMSPARE(x) (((x) >> S_SRAMSPARE) & M_SRAMSPARE)
+
+#define S_SRAMINDEX 16
+#define M_SRAMINDEX 0x7ffU
+#define V_SRAMINDEX(x) ((x) << S_SRAMINDEX)
+#define G_SRAMINDEX(x) (((x) >> S_SRAMINDEX) & M_SRAMINDEX)
+
+#define A_MPS_CLS_HASH_TCAM_CTL 0xe008
+
+#define S_T7_CTLCMDTYPE 15
+#define V_T7_CTLCMDTYPE(x) ((x) << S_T7_CTLCMDTYPE)
+#define F_T7_CTLCMDTYPE V_T7_CTLCMDTYPE(1U)
+
+#define S_T7_CTLXYBITSEL 12
+#define V_T7_CTLXYBITSEL(x) ((x) << S_T7_CTLXYBITSEL)
+#define F_T7_CTLXYBITSEL V_T7_CTLXYBITSEL(1U)
+
+#define S_T7_CTLTCAMINDEX 0
+#define M_T7_CTLTCAMINDEX 0x1ffU
+#define V_T7_CTLTCAMINDEX(x) ((x) << S_T7_CTLTCAMINDEX)
+#define G_T7_CTLTCAMINDEX(x) (((x) >> S_T7_CTLTCAMINDEX) & M_T7_CTLTCAMINDEX)
+
+#define A_MPS_CLS_HASH_TCAM_DATA 0xe00c
+
+#define S_LKPTYPE 24
+#define V_LKPTYPE(x) ((x) << S_LKPTYPE)
+#define F_LKPTYPE V_LKPTYPE(1U)
+
#define A_MPS_CLS_TCAM_Y_L 0xf000
#define A_MPS_CLS_TCAM_DATA0 0xf000
#define A_MPS_CLS_TCAM_Y_H 0xf004
@@ -34648,6 +43442,16 @@
#define V_DATAVIDH1(x) ((x) << S_DATAVIDH1)
#define G_DATAVIDH1(x) (((x) >> S_DATAVIDH1) & M_DATAVIDH1)
+#define S_T7_CTLTCAMSEL 26
+#define M_T7_CTLTCAMSEL 0x3U
+#define V_T7_CTLTCAMSEL(x) ((x) << S_T7_CTLTCAMSEL)
+#define G_T7_CTLTCAMSEL(x) (((x) >> S_T7_CTLTCAMSEL) & M_T7_CTLTCAMSEL)
+
+#define S_T7_1_CTLTCAMINDEX 17
+#define M_T7_1_CTLTCAMINDEX 0x1ffU
+#define V_T7_1_CTLTCAMINDEX(x) ((x) << S_T7_1_CTLTCAMINDEX)
+#define G_T7_1_CTLTCAMINDEX(x) (((x) >> S_T7_1_CTLTCAMINDEX) & M_T7_1_CTLTCAMINDEX)
+
#define A_MPS_CLS_TCAM_X_H 0xf00c
#define S_TCAMXH 0
@@ -34656,11 +43460,47 @@
#define G_TCAMXH(x) (((x) >> S_TCAMXH) & M_TCAMXH)
#define A_MPS_CLS_TCAM_RDATA0_REQ_ID0 0xf010
+#define A_MPS_CLS_TCAM0_RDATA0_REQ_ID0 0xf010
#define A_MPS_CLS_TCAM_RDATA1_REQ_ID0 0xf014
+#define A_MPS_CLS_TCAM0_RDATA1_REQ_ID0 0xf014
#define A_MPS_CLS_TCAM_RDATA2_REQ_ID0 0xf018
+#define A_MPS_CLS_TCAM0_RDATA2_REQ_ID0 0xf018
+#define A_MPS_CLS_TCAM0_RDATA0_REQ_ID1 0xf01c
#define A_MPS_CLS_TCAM_RDATA0_REQ_ID1 0xf020
+#define A_MPS_CLS_TCAM0_RDATA1_REQ_ID1 0xf020
#define A_MPS_CLS_TCAM_RDATA1_REQ_ID1 0xf024
+#define A_MPS_CLS_TCAM0_RDATA2_REQ_ID1 0xf024
#define A_MPS_CLS_TCAM_RDATA2_REQ_ID1 0xf028
+#define A_MPS_CLS_TCAM1_RDATA0_REQ_ID0 0xf028
+#define A_MPS_CLS_TCAM1_RDATA1_REQ_ID0 0xf02c
+#define A_MPS_CLS_TCAM1_RDATA2_REQ_ID0 0xf030
+#define A_MPS_CLS_TCAM1_RDATA0_REQ_ID1 0xf034
+#define A_MPS_CLS_TCAM1_RDATA1_REQ_ID1 0xf038
+#define A_MPS_CLS_TCAM1_RDATA2_REQ_ID1 0xf03c
+#define A_MPS_CLS_TCAM0_MASK_REG0 0xf040
+#define A_MPS_CLS_TCAM0_MASK_REG1 0xf044
+#define A_MPS_CLS_TCAM0_MASK_REG2 0xf048
+
+#define S_MASK_0_2 0
+#define M_MASK_0_2 0xffffU
+#define V_MASK_0_2(x) ((x) << S_MASK_0_2)
+#define G_MASK_0_2(x) (((x) >> S_MASK_0_2) & M_MASK_0_2)
+
+#define A_MPS_CLS_TCAM1_MASK_REG0 0xf04c
+#define A_MPS_CLS_TCAM1_MASK_REG1 0xf050
+#define A_MPS_CLS_TCAM1_MASK_REG2 0xf054
+
+#define S_MASK_1_2 0
+#define M_MASK_1_2 0xffffU
+#define V_MASK_1_2(x) ((x) << S_MASK_1_2)
+#define G_MASK_1_2(x) (((x) >> S_MASK_1_2) & M_MASK_1_2)
+
+#define A_MPS_CLS_TCAM_BIST_CTRL 0xf058
+#define A_MPS_CLS_TCAM_BIST_CB_PASS 0xf05c
+#define A_MPS_CLS_TCAM_BIST_CB_BUSY 0xf060
+#define A_MPS_CLS_TCAM2_MASK_REG0 0xf064
+#define A_MPS_CLS_TCAM2_MASK_REG1 0xf068
+#define A_MPS_CLS_TCAM2_MASK_REG2 0xf06c
#define A_MPS_RX_CTL 0x11000
#define S_FILT_VLAN_SEL 17
@@ -34686,6 +43526,14 @@
#define V_SNF(x) ((x) << S_SNF)
#define G_SNF(x) (((x) >> S_SNF) & M_SNF)
+#define S_HASH_TCAM_EN 19
+#define V_HASH_TCAM_EN(x) ((x) << S_HASH_TCAM_EN)
+#define F_HASH_TCAM_EN V_HASH_TCAM_EN(1U)
+
+#define S_SND_ORG_PFVF 18
+#define V_SND_ORG_PFVF(x) ((x) << S_SND_ORG_PFVF)
+#define F_SND_ORG_PFVF V_SND_ORG_PFVF(1U)
+
#define A_MPS_RX_PORT_MUX_CTL 0x11004
#define S_CTL_P3 12
@@ -34877,6 +43725,11 @@
#define V_THRESH(x) ((x) << S_THRESH)
#define G_THRESH(x) (((x) >> S_THRESH) & M_THRESH)
+#define S_T7_THRESH 0
+#define M_T7_THRESH 0xfffU
+#define V_T7_THRESH(x) ((x) << S_T7_THRESH)
+#define G_T7_THRESH(x) (((x) >> S_T7_THRESH) & M_T7_THRESH)
+
#define A_MPS_RX_LPBK_BP1 0x11060
#define A_MPS_RX_LPBK_BP2 0x11064
#define A_MPS_RX_LPBK_BP3 0x11068
@@ -34888,6 +43741,12 @@
#define G_GAP(x) (((x) >> S_GAP) & M_GAP)
#define A_MPS_RX_CHMN_CNT 0x11070
+#define A_MPS_CTL_STAT 0x11070
+
+#define S_T7_CTL 0
+#define V_T7_CTL(x) ((x) << S_T7_CTL)
+#define F_T7_CTL V_T7_CTL(1U)
+
#define A_MPS_RX_PERR_INT_CAUSE 0x11074
#define S_FF 23
@@ -34990,18 +43849,54 @@
#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U)
-#define A_MPS_RX_PERR_INT_ENABLE 0x11078
+#define S_MAC_IN_FIFO_768B 30
+#define V_MAC_IN_FIFO_768B(x) ((x) << S_MAC_IN_FIFO_768B)
+#define F_MAC_IN_FIFO_768B V_MAC_IN_FIFO_768B(1U)
-#define S_T6_INT_ERR_INT 24
-#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
-#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U)
+#define S_T7_1_INT_ERR_INT 29
+#define V_T7_1_INT_ERR_INT(x) ((x) << S_T7_1_INT_ERR_INT)
+#define F_T7_1_INT_ERR_INT V_T7_1_INT_ERR_INT(1U)
-#define A_MPS_RX_PERR_ENABLE 0x1107c
+#define S_FLOP_PERR 28
+#define V_FLOP_PERR(x) ((x) << S_FLOP_PERR)
+#define F_FLOP_PERR V_FLOP_PERR(1U)
-#define S_T6_INT_ERR_INT 24
-#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT)
-#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U)
+#define S_RPLC_MAP 13
+#define M_RPLC_MAP 0x1fU
+#define V_RPLC_MAP(x) ((x) << S_RPLC_MAP)
+#define G_RPLC_MAP(x) (((x) >> S_RPLC_MAP) & M_RPLC_MAP)
+
+#define S_TKN_RUNT_DROP_FIFO 12
+#define V_TKN_RUNT_DROP_FIFO(x) ((x) << S_TKN_RUNT_DROP_FIFO)
+#define F_TKN_RUNT_DROP_FIFO V_TKN_RUNT_DROP_FIFO(1U)
+
+#define S_T7_PPM3 9
+#define M_T7_PPM3 0x7U
+#define V_T7_PPM3(x) ((x) << S_T7_PPM3)
+#define G_T7_PPM3(x) (((x) >> S_T7_PPM3) & M_T7_PPM3)
+#define S_T7_PPM2 6
+#define M_T7_PPM2 0x7U
+#define V_T7_PPM2(x) ((x) << S_T7_PPM2)
+#define G_T7_PPM2(x) (((x) >> S_T7_PPM2) & M_T7_PPM2)
+
+#define S_T7_PPM1 3
+#define M_T7_PPM1 0x7U
+#define V_T7_PPM1(x) ((x) << S_T7_PPM1)
+#define G_T7_PPM1(x) (((x) >> S_T7_PPM1) & M_T7_PPM1)
+
+#define S_T7_PPM0 0
+#define M_T7_PPM0 0x7U
+#define V_T7_PPM0(x) ((x) << S_T7_PPM0)
+#define G_T7_PPM0(x) (((x) >> S_T7_PPM0) & M_T7_PPM0)
+
+#define A_MPS_RX_PERR_INT_ENABLE 0x11078
+
+#define S_T7_2_INT_ERR_INT 30
+#define V_T7_2_INT_ERR_INT(x) ((x) << S_T7_2_INT_ERR_INT)
+#define F_T7_2_INT_ERR_INT V_T7_2_INT_ERR_INT(1U)
+
+#define A_MPS_RX_PERR_ENABLE 0x1107c
#define A_MPS_RX_PERR_INJECT 0x11080
#define A_MPS_RX_FUNC_INT_CAUSE 0x11084
@@ -35083,8 +43978,43 @@
#define V_TH_LOW(x) ((x) << S_TH_LOW)
#define G_TH_LOW(x) (((x) >> S_TH_LOW) & M_TH_LOW)
+#define A_MPS_RX_PERR_INT_CAUSE2 0x1108c
+
+#define S_CRYPT2MPS_RX_INTF_FIFO 28
+#define M_CRYPT2MPS_RX_INTF_FIFO 0xfU
+#define V_CRYPT2MPS_RX_INTF_FIFO(x) ((x) << S_CRYPT2MPS_RX_INTF_FIFO)
+#define G_CRYPT2MPS_RX_INTF_FIFO(x) (((x) >> S_CRYPT2MPS_RX_INTF_FIFO) & M_CRYPT2MPS_RX_INTF_FIFO)
+
+#define S_INIC2MPS_TX0_PERR 27
+#define V_INIC2MPS_TX0_PERR(x) ((x) << S_INIC2MPS_TX0_PERR)
+#define F_INIC2MPS_TX0_PERR V_INIC2MPS_TX0_PERR(1U)
+
+#define S_INIC2MPS_TX1_PERR 26
+#define V_INIC2MPS_TX1_PERR(x) ((x) << S_INIC2MPS_TX1_PERR)
+#define F_INIC2MPS_TX1_PERR V_INIC2MPS_TX1_PERR(1U)
+
+#define S_XGMAC2MPS_RX0_PERR 25
+#define V_XGMAC2MPS_RX0_PERR(x) ((x) << S_XGMAC2MPS_RX0_PERR)
+#define F_XGMAC2MPS_RX0_PERR V_XGMAC2MPS_RX0_PERR(1U)
+
+#define S_XGMAC2MPS_RX1_PERR 24
+#define V_XGMAC2MPS_RX1_PERR(x) ((x) << S_XGMAC2MPS_RX1_PERR)
+#define F_XGMAC2MPS_RX1_PERR V_XGMAC2MPS_RX1_PERR(1U)
+
+#define S_MPS2CRYPTO_RX_INTF_FIFO 20
+#define M_MPS2CRYPTO_RX_INTF_FIFO 0xfU
+#define V_MPS2CRYPTO_RX_INTF_FIFO(x) ((x) << S_MPS2CRYPTO_RX_INTF_FIFO)
+#define G_MPS2CRYPTO_RX_INTF_FIFO(x) (((x) >> S_MPS2CRYPTO_RX_INTF_FIFO) & M_MPS2CRYPTO_RX_INTF_FIFO)
+
+#define S_RX_PRE_PROC_PERR 9
+#define M_RX_PRE_PROC_PERR 0x7ffU
+#define V_RX_PRE_PROC_PERR(x) ((x) << S_RX_PRE_PROC_PERR)
+#define G_RX_PRE_PROC_PERR(x) (((x) >> S_RX_PRE_PROC_PERR) & M_RX_PRE_PROC_PERR)
+
#define A_MPS_RX_PAUSE_GEN_TH_1 0x11090
+#define A_MPS_RX_PERR_INT_ENABLE2 0x11090
#define A_MPS_RX_PAUSE_GEN_TH_2 0x11094
+#define A_MPS_RX_PERR_ENABLE2 0x11094
#define A_MPS_RX_PAUSE_GEN_TH_3 0x11098
#define A_MPS_RX_REPL_CTL 0x11098
@@ -35126,10 +44056,13 @@
#define A_MPS_RX_PT_ARB1 0x110ac
#define A_MPS_RX_PT_ARB2 0x110b0
+#define A_T7_MPS_RX_PT_ARB4 0x110b0
#define A_MPS_RX_PT_ARB3 0x110b4
#define A_T6_MPS_PF_OUT_EN 0x110b4
+#define A_T7_MPS_PF_OUT_EN 0x110b4
#define A_MPS_RX_PT_ARB4 0x110b8
#define A_T6_MPS_BMC_MTU 0x110b8
+#define A_T7_MPS_BMC_MTU 0x110b8
#define A_MPS_PF_OUT_EN 0x110bc
#define S_OUTEN 0
@@ -35138,6 +44071,7 @@
#define G_OUTEN(x) (((x) >> S_OUTEN) & M_OUTEN)
#define A_T6_MPS_BMC_PKT_CNT 0x110bc
+#define A_T7_MPS_BMC_PKT_CNT 0x110bc
#define A_MPS_BMC_MTU 0x110c0
#define S_MTU 0
@@ -35146,6 +44080,7 @@
#define G_MTU(x) (((x) >> S_MTU) & M_MTU)
#define A_T6_MPS_BMC_BYTE_CNT 0x110c0
+#define A_T7_MPS_BMC_BYTE_CNT 0x110c0
#define A_MPS_BMC_PKT_CNT 0x110c4
#define A_T6_MPS_PFVF_ATRB_CTL 0x110c4
@@ -35154,6 +44089,7 @@
#define V_T6_PFVF(x) ((x) << S_T6_PFVF)
#define G_T6_PFVF(x) (((x) >> S_T6_PFVF) & M_T6_PFVF)
+#define A_T7_MPS_PFVF_ATRB_CTL 0x110c4
#define A_MPS_BMC_BYTE_CNT 0x110c8
#define A_T6_MPS_PFVF_ATRB 0x110c8
@@ -35161,6 +44097,12 @@
#define V_FULL_FRAME_MODE(x) ((x) << S_FULL_FRAME_MODE)
#define F_FULL_FRAME_MODE V_FULL_FRAME_MODE(1U)
+#define A_T7_MPS_PFVF_ATRB 0x110c8
+
+#define S_EXTRACT_DEL_VLAN 31
+#define V_EXTRACT_DEL_VLAN(x) ((x) << S_EXTRACT_DEL_VLAN)
+#define F_EXTRACT_DEL_VLAN V_EXTRACT_DEL_VLAN(1U)
+
#define A_MPS_PFVF_ATRB_CTL 0x110cc
#define S_RD_WRN 31
@@ -35173,6 +44115,7 @@
#define G_PFVF(x) (((x) >> S_PFVF) & M_PFVF)
#define A_T6_MPS_PFVF_ATRB_FLTR0 0x110cc
+#define A_T7_MPS_PFVF_ATRB_FLTR0 0x110cc
#define A_MPS_PFVF_ATRB 0x110d0
#define S_ATTR_PF 28
@@ -35193,6 +44136,7 @@
#define F_ATTR_MODE V_ATTR_MODE(1U)
#define A_T6_MPS_PFVF_ATRB_FLTR1 0x110d0
+#define A_T7_MPS_PFVF_ATRB_FLTR1 0x110d0
#define A_MPS_PFVF_ATRB_FLTR0 0x110d4
#define S_VLAN_EN 16
@@ -35205,36 +44149,58 @@
#define G_VLAN_ID(x) (((x) >> S_VLAN_ID) & M_VLAN_ID)
#define A_T6_MPS_PFVF_ATRB_FLTR2 0x110d4
+#define A_T7_MPS_PFVF_ATRB_FLTR2 0x110d4
#define A_MPS_PFVF_ATRB_FLTR1 0x110d8
#define A_T6_MPS_PFVF_ATRB_FLTR3 0x110d8
+#define A_T7_MPS_PFVF_ATRB_FLTR3 0x110d8
#define A_MPS_PFVF_ATRB_FLTR2 0x110dc
#define A_T6_MPS_PFVF_ATRB_FLTR4 0x110dc
+#define A_T7_MPS_PFVF_ATRB_FLTR4 0x110dc
#define A_MPS_PFVF_ATRB_FLTR3 0x110e0
#define A_T6_MPS_PFVF_ATRB_FLTR5 0x110e0
+#define A_T7_MPS_PFVF_ATRB_FLTR5 0x110e0
#define A_MPS_PFVF_ATRB_FLTR4 0x110e4
#define A_T6_MPS_PFVF_ATRB_FLTR6 0x110e4
+#define A_T7_MPS_PFVF_ATRB_FLTR6 0x110e4
#define A_MPS_PFVF_ATRB_FLTR5 0x110e8
#define A_T6_MPS_PFVF_ATRB_FLTR7 0x110e8
+#define A_T7_MPS_PFVF_ATRB_FLTR7 0x110e8
#define A_MPS_PFVF_ATRB_FLTR6 0x110ec
#define A_T6_MPS_PFVF_ATRB_FLTR8 0x110ec
+#define A_T7_MPS_PFVF_ATRB_FLTR8 0x110ec
#define A_MPS_PFVF_ATRB_FLTR7 0x110f0
#define A_T6_MPS_PFVF_ATRB_FLTR9 0x110f0
+#define A_T7_MPS_PFVF_ATRB_FLTR9 0x110f0
#define A_MPS_PFVF_ATRB_FLTR8 0x110f4
#define A_T6_MPS_PFVF_ATRB_FLTR10 0x110f4
+#define A_T7_MPS_PFVF_ATRB_FLTR10 0x110f4
#define A_MPS_PFVF_ATRB_FLTR9 0x110f8
#define A_T6_MPS_PFVF_ATRB_FLTR11 0x110f8
+#define A_T7_MPS_PFVF_ATRB_FLTR11 0x110f8
#define A_MPS_PFVF_ATRB_FLTR10 0x110fc
#define A_T6_MPS_PFVF_ATRB_FLTR12 0x110fc
+#define A_T7_MPS_PFVF_ATRB_FLTR12 0x110fc
#define A_MPS_PFVF_ATRB_FLTR11 0x11100
#define A_T6_MPS_PFVF_ATRB_FLTR13 0x11100
+#define A_T7_MPS_PFVF_ATRB_FLTR13 0x11100
#define A_MPS_PFVF_ATRB_FLTR12 0x11104
#define A_T6_MPS_PFVF_ATRB_FLTR14 0x11104
+#define A_T7_MPS_PFVF_ATRB_FLTR14 0x11104
#define A_MPS_PFVF_ATRB_FLTR13 0x11108
#define A_T6_MPS_PFVF_ATRB_FLTR15 0x11108
+#define A_T7_MPS_PFVF_ATRB_FLTR15 0x11108
#define A_MPS_PFVF_ATRB_FLTR14 0x1110c
#define A_T6_MPS_RPLC_MAP_CTL 0x1110c
+#define A_T7_MPS_RPLC_MAP_CTL 0x1110c
+
+#define S_T7_RPLC_MAP_ADDR 0
+#define M_T7_RPLC_MAP_ADDR 0xfffU
+#define V_T7_RPLC_MAP_ADDR(x) ((x) << S_T7_RPLC_MAP_ADDR)
+#define G_T7_RPLC_MAP_ADDR(x) (((x) >> S_T7_RPLC_MAP_ADDR) & M_T7_RPLC_MAP_ADDR)
+
#define A_MPS_PFVF_ATRB_FLTR15 0x11110
#define A_T6_MPS_PF_RPLCT_MAP 0x11110
+#define A_T7_MPS_PF_RPLCT_MAP 0x11110
#define A_MPS_RPLC_MAP_CTL 0x11114
#define S_RPLC_MAP_ADDR 0
@@ -35243,6 +44209,7 @@
#define G_RPLC_MAP_ADDR(x) (((x) >> S_RPLC_MAP_ADDR) & M_RPLC_MAP_ADDR)
#define A_T6_MPS_VF_RPLCT_MAP0 0x11114
+#define A_T7_MPS_VF_RPLCT_MAP0 0x11114
#define A_MPS_PF_RPLCT_MAP 0x11118
#define S_PF_EN 0
@@ -35251,10 +44218,13 @@
#define G_PF_EN(x) (((x) >> S_PF_EN) & M_PF_EN)
#define A_T6_MPS_VF_RPLCT_MAP1 0x11118
+#define A_T7_MPS_VF_RPLCT_MAP1 0x11118
#define A_MPS_VF_RPLCT_MAP0 0x1111c
#define A_T6_MPS_VF_RPLCT_MAP2 0x1111c
+#define A_T7_MPS_VF_RPLCT_MAP2 0x1111c
#define A_MPS_VF_RPLCT_MAP1 0x11120
#define A_T6_MPS_VF_RPLCT_MAP3 0x11120
+#define A_T7_MPS_VF_RPLCT_MAP3 0x11120
#define A_MPS_VF_RPLCT_MAP2 0x11124
#define A_MPS_VF_RPLCT_MAP3 0x11128
#define A_MPS_MEM_DBG_CTL 0x1112c
@@ -35629,9 +44599,13 @@
#define V_CONG_TH(x) ((x) << S_CONG_TH)
#define G_CONG_TH(x) (((x) >> S_CONG_TH) & M_CONG_TH)
+#define A_MPS_RX_LPBK_BG_PG_CNT2 0x11220
#define A_MPS_RX_CONGESTION_THRESHOLD_BG1 0x11224
+#define A_MPS_RX_LPBK_BG_PG_CNT3 0x11224
#define A_MPS_RX_CONGESTION_THRESHOLD_BG2 0x11228
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG0 0x11228
#define A_MPS_RX_CONGESTION_THRESHOLD_BG3 0x1122c
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG1 0x1122c
#define A_MPS_RX_GRE_PROT_TYPE 0x11230
#define S_NVGRE_EN 9
@@ -35647,6 +44621,7 @@
#define V_GRE(x) ((x) << S_GRE)
#define G_GRE(x) (((x) >> S_GRE) & M_GRE)
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG2 0x11230
#define A_MPS_RX_VXLAN_TYPE 0x11234
#define S_VXLAN_EN 16
@@ -35658,6 +44633,7 @@
#define V_VXLAN(x) ((x) << S_VXLAN)
#define G_VXLAN(x) (((x) >> S_VXLAN) & M_VXLAN)
+#define A_T7_MPS_RX_CONGESTION_THRESHOLD_BG3 0x11234
#define A_MPS_RX_GENEVE_TYPE 0x11238
#define S_GENEVE_EN 16
@@ -35669,12 +44645,14 @@
#define V_GENEVE(x) ((x) << S_GENEVE)
#define G_GENEVE(x) (((x) >> S_GENEVE) & M_GENEVE)
+#define A_T7_MPS_RX_GRE_PROT_TYPE 0x11238
#define A_MPS_RX_INNER_HDR_IVLAN 0x1123c
#define S_T6_IVLAN_EN 16
#define V_T6_IVLAN_EN(x) ((x) << S_T6_IVLAN_EN)
#define F_T6_IVLAN_EN V_T6_IVLAN_EN(1U)
+#define A_T7_MPS_RX_VXLAN_TYPE 0x1123c
#define A_MPS_RX_ENCAP_NVGRE 0x11240
#define S_ETYPE_EN 16
@@ -35686,13 +44664,9 @@
#define V_T6_ETYPE(x) ((x) << S_T6_ETYPE)
#define G_T6_ETYPE(x) (((x) >> S_T6_ETYPE) & M_T6_ETYPE)
+#define A_T7_MPS_RX_GENEVE_TYPE 0x11240
#define A_MPS_RX_ENCAP_GENEVE 0x11244
-
-#define S_T6_ETYPE 0
-#define M_T6_ETYPE 0xffffU
-#define V_T6_ETYPE(x) ((x) << S_T6_ETYPE)
-#define G_T6_ETYPE(x) (((x) >> S_T6_ETYPE) & M_T6_ETYPE)
-
+#define A_T7_MPS_RX_INNER_HDR_IVLAN 0x11244
#define A_MPS_RX_TCP 0x11248
#define S_PROT_TYPE_EN 8
@@ -35704,8 +44678,11 @@
#define V_PROT_TYPE(x) ((x) << S_PROT_TYPE)
#define G_PROT_TYPE(x) (((x) >> S_PROT_TYPE) & M_PROT_TYPE)
+#define A_T7_MPS_RX_ENCAP_NVGRE 0x11248
#define A_MPS_RX_UDP 0x1124c
+#define A_T7_MPS_RX_ENCAP_GENEVE 0x1124c
#define A_MPS_RX_PAUSE 0x11250
+#define A_T7_MPS_RX_TCP 0x11250
#define A_MPS_RX_LENGTH 0x11254
#define S_SAP_VALUE 16
@@ -35718,6 +44695,7 @@
#define V_LENGTH_ETYPE(x) ((x) << S_LENGTH_ETYPE)
#define G_LENGTH_ETYPE(x) (((x) >> S_LENGTH_ETYPE) & M_LENGTH_ETYPE)
+#define A_T7_MPS_RX_UDP 0x11254
#define A_MPS_RX_CTL_ORG 0x11258
#define S_CTL_VALUE 24
@@ -35730,6 +44708,7 @@
#define V_ORG_VALUE(x) ((x) << S_ORG_VALUE)
#define G_ORG_VALUE(x) (((x) >> S_ORG_VALUE) & M_ORG_VALUE)
+#define A_T7_MPS_RX_PAUSE 0x11258
#define A_MPS_RX_IPV4 0x1125c
#define S_ETYPE_IPV4 0
@@ -35737,6 +44716,7 @@
#define V_ETYPE_IPV4(x) ((x) << S_ETYPE_IPV4)
#define G_ETYPE_IPV4(x) (((x) >> S_ETYPE_IPV4) & M_ETYPE_IPV4)
+#define A_T7_MPS_RX_LENGTH 0x1125c
#define A_MPS_RX_IPV6 0x11260
#define S_ETYPE_IPV6 0
@@ -35744,6 +44724,7 @@
#define V_ETYPE_IPV6(x) ((x) << S_ETYPE_IPV6)
#define G_ETYPE_IPV6(x) (((x) >> S_ETYPE_IPV6) & M_ETYPE_IPV6)
+#define A_T7_MPS_RX_CTL_ORG 0x11260
#define A_MPS_RX_TTL 0x11264
#define S_TTL_IPV4 10
@@ -35764,6 +44745,7 @@
#define V_TTL_CHK_EN_IPV6(x) ((x) << S_TTL_CHK_EN_IPV6)
#define F_TTL_CHK_EN_IPV6 V_TTL_CHK_EN_IPV6(1U)
+#define A_T7_MPS_RX_IPV4 0x11264
#define A_MPS_RX_DEFAULT_VNI 0x11268
#define S_VNI 0
@@ -35771,6 +44753,7 @@
#define V_VNI(x) ((x) << S_VNI)
#define G_VNI(x) (((x) >> S_VNI) & M_VNI)
+#define A_T7_MPS_RX_IPV6 0x11268
#define A_MPS_RX_PRS_CTL 0x1126c
#define S_CTL_CHK_EN 28
@@ -35821,6 +44804,7 @@
#define V_DIP_EN(x) ((x) << S_DIP_EN)
#define F_DIP_EN V_DIP_EN(1U)
+#define A_T7_MPS_RX_TTL 0x1126c
#define A_MPS_RX_PRS_CTL_2 0x11270
#define S_EN_UDP_CSUM_CHK 4
@@ -35843,7 +44827,9 @@
#define V_T6_IPV6_UDP_CSUM_COMPAT(x) ((x) << S_T6_IPV6_UDP_CSUM_COMPAT)
#define F_T6_IPV6_UDP_CSUM_COMPAT V_T6_IPV6_UDP_CSUM_COMPAT(1U)
+#define A_T7_MPS_RX_DEFAULT_VNI 0x11270
#define A_MPS_RX_MPS2NCSI_CNT 0x11274
+#define A_T7_MPS_RX_PRS_CTL 0x11274
#define A_MPS_RX_MAX_TNL_HDR_LEN 0x11278
#define S_T6_LEN 0
@@ -35851,38 +44837,222 @@
#define V_T6_LEN(x) ((x) << S_T6_LEN)
#define G_T6_LEN(x) (((x) >> S_T6_LEN) & M_T6_LEN)
+#define A_T7_MPS_RX_PRS_CTL_2 0x11278
+
+#define S_IP_EXT_HDR_EN 5
+#define V_IP_EXT_HDR_EN(x) ((x) << S_IP_EXT_HDR_EN)
+#define F_IP_EXT_HDR_EN V_IP_EXT_HDR_EN(1U)
+
#define A_MPS_RX_PAUSE_DA_H 0x1127c
+#define A_T7_MPS_RX_MPS2NCSI_CNT 0x1127c
#define A_MPS_RX_PAUSE_DA_L 0x11280
+#define A_T7_MPS_RX_MAX_TNL_HDR_LEN 0x11280
+
+#define S_MPS_TNL_HDR_LEN_MODE 9
+#define V_MPS_TNL_HDR_LEN_MODE(x) ((x) << S_MPS_TNL_HDR_LEN_MODE)
+#define F_MPS_TNL_HDR_LEN_MODE V_MPS_TNL_HDR_LEN_MODE(1U)
+
+#define S_MPS_MAX_TNL_HDR_LEN 0
+#define M_MPS_MAX_TNL_HDR_LEN 0x1ffU
+#define V_MPS_MAX_TNL_HDR_LEN(x) ((x) << S_MPS_MAX_TNL_HDR_LEN)
+#define G_MPS_MAX_TNL_HDR_LEN(x) (((x) >> S_MPS_MAX_TNL_HDR_LEN) & M_MPS_MAX_TNL_HDR_LEN)
+
#define A_MPS_RX_CNT_NVGRE_PKT_MAC0 0x11284
+#define A_T7_MPS_RX_PAUSE_DA_H 0x11284
#define A_MPS_RX_CNT_VXLAN_PKT_MAC0 0x11288
+#define A_T7_MPS_RX_PAUSE_DA_L 0x11288
#define A_MPS_RX_CNT_GENEVE_PKT_MAC0 0x1128c
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_MAC0 0x1128c
#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC0 0x11290
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_MAC0 0x11290
#define A_MPS_RX_CNT_NVGRE_PKT_MAC1 0x11294
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_MAC0 0x11294
#define A_MPS_RX_CNT_VXLAN_PKT_MAC1 0x11298
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_MAC0 0x11298
#define A_MPS_RX_CNT_GENEVE_PKT_MAC1 0x1129c
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_MAC1 0x1129c
#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC1 0x112a0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_MAC1 0x112a0
#define A_MPS_RX_CNT_NVGRE_PKT_LPBK0 0x112a4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_MAC1 0x112a4
#define A_MPS_RX_CNT_VXLAN_PKT_LPBK0 0x112a8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_MAC1 0x112a8
#define A_MPS_RX_CNT_GENEVE_PKT_LPBK0 0x112ac
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_LPBK0 0x112ac
#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK0 0x112b0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_LPBK0 0x112b0
#define A_MPS_RX_CNT_NVGRE_PKT_LPBK1 0x112b4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_LPBK0 0x112b4
#define A_MPS_RX_CNT_VXLAN_PKT_LPBK1 0x112b8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_LPBK0 0x112b8
#define A_MPS_RX_CNT_GENEVE_PKT_LPBK1 0x112bc
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_LPBK1 0x112bc
#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK1 0x112c0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_LPBK1 0x112c0
#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP0 0x112c4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_LPBK1 0x112c4
#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP0 0x112c8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_LPBK1 0x112c8
#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP0 0x112cc
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_TO_TP0 0x112cc
#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP0 0x112d0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_TO_TP0 0x112d0
#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP1 0x112d4
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_TO_TP0 0x112d4
#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP1 0x112d8
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_TO_TP0 0x112d8
#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP1 0x112dc
+#define A_T7_MPS_RX_CNT_NVGRE_PKT_TO_TP1 0x112dc
#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP1 0x112e0
+#define A_T7_MPS_RX_CNT_VXLAN_PKT_TO_TP1 0x112e0
+#define A_T7_MPS_RX_CNT_GENEVE_PKT_TO_TP1 0x112e4
+#define A_T7_MPS_RX_CNT_TNL_ERR_PKT_TO_TP1 0x112e8
+#define A_MPS_RX_ESP 0x112ec
+#define A_MPS_EN_LPBK_BLK_SNDR 0x112f0
+
+#define S_EN_CH3 3
+#define V_EN_CH3(x) ((x) << S_EN_CH3)
+#define F_EN_CH3 V_EN_CH3(1U)
+
+#define S_EN_CH2 2
+#define V_EN_CH2(x) ((x) << S_EN_CH2)
+#define F_EN_CH2 V_EN_CH2(1U)
+
+#define S_EN_CH1 1
+#define V_EN_CH1(x) ((x) << S_EN_CH1)
+#define F_EN_CH1 V_EN_CH1(1U)
+
+#define S_EN_CH0 0
+#define V_EN_CH0(x) ((x) << S_EN_CH0)
+#define F_EN_CH0 V_EN_CH0(1U)
+
#define A_MPS_VF_RPLCT_MAP4 0x11300
#define A_MPS_VF_RPLCT_MAP5 0x11304
#define A_MPS_VF_RPLCT_MAP6 0x11308
#define A_MPS_VF_RPLCT_MAP7 0x1130c
+#define A_MPS_RX_PERR_INT_CAUSE3 0x11310
+#define A_MPS_RX_PERR_INT_ENABLE3 0x11314
+#define A_MPS_RX_PERR_ENABLE3 0x11318
+#define A_MPS_RX_PERR_INT_CAUSE4 0x1131c
+
+#define S_CLS 20
+#define M_CLS 0x3fU
+#define V_CLS(x) ((x) << S_CLS)
+#define G_CLS(x) (((x) >> S_CLS) & M_CLS)
+
+#define S_RX_PRE_PROC 16
+#define M_RX_PRE_PROC 0xfU
+#define V_RX_PRE_PROC(x) ((x) << S_RX_PRE_PROC)
+#define G_RX_PRE_PROC(x) (((x) >> S_RX_PRE_PROC) & M_RX_PRE_PROC)
+
+#define S_PPROC3 12
+#define M_PPROC3 0xfU
+#define V_PPROC3(x) ((x) << S_PPROC3)
+#define G_PPROC3(x) (((x) >> S_PPROC3) & M_PPROC3)
+
+#define S_PPROC2 8
+#define M_PPROC2 0xfU
+#define V_PPROC2(x) ((x) << S_PPROC2)
+#define G_PPROC2(x) (((x) >> S_PPROC2) & M_PPROC2)
+
+#define S_PPROC1 4
+#define M_PPROC1 0xfU
+#define V_PPROC1(x) ((x) << S_PPROC1)
+#define G_PPROC1(x) (((x) >> S_PPROC1) & M_PPROC1)
+
+#define S_PPROC0 0
+#define M_PPROC0 0xfU
+#define V_PPROC0(x) ((x) << S_PPROC0)
+#define G_PPROC0(x) (((x) >> S_PPROC0) & M_PPROC0)
+
+#define A_MPS_RX_PERR_INT_ENABLE4 0x11320
+#define A_MPS_RX_PERR_ENABLE4 0x11324
+#define A_MPS_RX_PERR_INT_CAUSE5 0x11328
+
+#define S_MPS2CRYP_RX_FIFO 26
+#define M_MPS2CRYP_RX_FIFO 0xfU
+#define V_MPS2CRYP_RX_FIFO(x) ((x) << S_MPS2CRYP_RX_FIFO)
+#define G_MPS2CRYP_RX_FIFO(x) (((x) >> S_MPS2CRYP_RX_FIFO) & M_MPS2CRYP_RX_FIFO)
+
+#define S_RX_OUT 20
+#define M_RX_OUT 0x3fU
+#define V_RX_OUT(x) ((x) << S_RX_OUT)
+#define G_RX_OUT(x) (((x) >> S_RX_OUT) & M_RX_OUT)
+
+#define S_MEM_WRAP 0
+#define M_MEM_WRAP 0xfffffU
+#define V_MEM_WRAP(x) ((x) << S_MEM_WRAP)
+#define G_MEM_WRAP(x) (((x) >> S_MEM_WRAP) & M_MEM_WRAP)
+
+#define A_MPS_RX_PERR_INT_ENABLE5 0x1132c
+#define A_MPS_RX_PERR_ENABLE5 0x11330
+#define A_MPS_RX_PERR_INT_CAUSE6 0x11334
+
+#define S_MPS_RX_MEM_WRAP 0
+#define M_MPS_RX_MEM_WRAP 0x1ffffffU
+#define V_MPS_RX_MEM_WRAP(x) ((x) << S_MPS_RX_MEM_WRAP)
+#define G_MPS_RX_MEM_WRAP(x) (((x) >> S_MPS_RX_MEM_WRAP) & M_MPS_RX_MEM_WRAP)
+
+#define A_MPS_RX_PERR_INT_ENABLE6 0x11338
+#define A_MPS_RX_PERR_ENABLE6 0x1133c
+#define A_MPS_RX_CNT_NVGRE_PKT_MAC2 0x11408
+#define A_MPS_RX_CNT_VXLAN_PKT_MAC2 0x1140c
+#define A_MPS_RX_CNT_GENEVE_PKT_MAC2 0x11410
+#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC2 0x11414
+#define A_MPS_RX_CNT_NVGRE_PKT_MAC3 0x11418
+#define A_MPS_RX_CNT_VXLAN_PKT_MAC3 0x1141c
+#define A_MPS_RX_CNT_GENEVE_PKT_MAC3 0x11420
+#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC3 0x11424
+#define A_MPS_RX_CNT_NVGRE_PKT_LPBK2 0x11428
+#define A_MPS_RX_CNT_VXLAN_PKT_LPBK2 0x1142c
+#define A_MPS_RX_CNT_GENEVE_PKT_LPBK2 0x11430
+#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK2 0x11434
+#define A_MPS_RX_CNT_NVGRE_PKT_LPBK3 0x11438
+#define A_MPS_RX_CNT_VXLAN_PKT_LPBK3 0x1143c
+#define A_MPS_RX_CNT_GENEVE_PKT_LPBK3 0x11440
+#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK3 0x11444
+#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP2 0x11448
+#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP2 0x1144c
+#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP2 0x11450
+#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP2 0x11454
+#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP3 0x11458
+#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP3 0x1145c
+#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP3 0x11460
+#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP3 0x11464
+#define A_T7_MPS_RX_PT_ARB2 0x11468
+#define A_T7_MPS_RX_PT_ARB3 0x1146c
#define A_MPS_CLS_DIPIPV4_ID_TABLE 0x12000
+#define A_MPS_CLS_DIP_ID_TABLE_CTL 0x12000
+
+#define S_DIP_VLD 12
+#define V_DIP_VLD(x) ((x) << S_DIP_VLD)
+#define F_DIP_VLD V_DIP_VLD(1U)
+
+#define S_DIP_TYPE 11
+#define V_DIP_TYPE(x) ((x) << S_DIP_TYPE)
+#define F_DIP_TYPE V_DIP_TYPE(1U)
+
+#define S_DIP_WRN 10
+#define V_DIP_WRN(x) ((x) << S_DIP_WRN)
+#define F_DIP_WRN V_DIP_WRN(1U)
+
+#define S_DIP_SEG 8
+#define M_DIP_SEG 0x3U
+#define V_DIP_SEG(x) ((x) << S_DIP_SEG)
+#define G_DIP_SEG(x) (((x) >> S_DIP_SEG) & M_DIP_SEG)
+
+#define S_DIP_TBL_RSVD1 5
+#define M_DIP_TBL_RSVD1 0x7U
+#define V_DIP_TBL_RSVD1(x) ((x) << S_DIP_TBL_RSVD1)
+#define G_DIP_TBL_RSVD1(x) (((x) >> S_DIP_TBL_RSVD1) & M_DIP_TBL_RSVD1)
+
+#define S_DIP_TBL_ADDR 0
+#define M_DIP_TBL_ADDR 0x1fU
+#define V_DIP_TBL_ADDR(x) ((x) << S_DIP_TBL_ADDR)
+#define G_DIP_TBL_ADDR(x) (((x) >> S_DIP_TBL_ADDR) & M_DIP_TBL_ADDR)
+
#define A_MPS_CLS_DIPIPV4_MASK_TABLE 0x12004
+#define A_MPS_CLS_DIP_ID_TABLE_DATA 0x12004
#define A_MPS_CLS_DIPIPV6ID_0_TABLE 0x12020
#define A_MPS_CLS_DIPIPV6ID_1_TABLE 0x12024
#define A_MPS_CLS_DIPIPV6ID_2_TABLE 0x12028
@@ -35892,6 +45062,226 @@
#define A_MPS_CLS_DIPIPV6MASK_2_TABLE 0x12038
#define A_MPS_CLS_DIPIPV6MASK_3_TABLE 0x1203c
#define A_MPS_RX_HASH_LKP_TABLE 0x12060
+#define A_MPS_CLS_DROP_DMAC0_L 0x12070
+#define A_MPS_CLS_DROP_DMAC0_H 0x12074
+
+#define S_DMAC 0
+#define M_DMAC 0xffffU
+#define V_DMAC(x) ((x) << S_DMAC)
+#define G_DMAC(x) (((x) >> S_DMAC) & M_DMAC)
+
+#define A_MPS_CLS_DROP_DMAC1_L 0x12078
+#define A_MPS_CLS_DROP_DMAC1_H 0x1207c
+#define A_MPS_CLS_DROP_DMAC2_L 0x12080
+#define A_MPS_CLS_DROP_DMAC2_H 0x12084
+#define A_MPS_CLS_DROP_DMAC3_L 0x12088
+#define A_MPS_CLS_DROP_DMAC3_H 0x1208c
+#define A_MPS_CLS_DROP_DMAC4_L 0x12090
+#define A_MPS_CLS_DROP_DMAC4_H 0x12094
+#define A_MPS_CLS_DROP_DMAC5_L 0x12098
+#define A_MPS_CLS_DROP_DMAC5_H 0x1209c
+#define A_MPS_CLS_DROP_DMAC6_L 0x120a0
+#define A_MPS_CLS_DROP_DMAC6_H 0x120a4
+#define A_MPS_CLS_DROP_DMAC7_L 0x120a8
+#define A_MPS_CLS_DROP_DMAC7_H 0x120ac
+#define A_MPS_CLS_DROP_DMAC8_L 0x120b0
+#define A_MPS_CLS_DROP_DMAC8_H 0x120b4
+#define A_MPS_CLS_DROP_DMAC9_L 0x120b8
+#define A_MPS_CLS_DROP_DMAC9_H 0x120bc
+#define A_MPS_CLS_DROP_DMAC10_L 0x120c0
+#define A_MPS_CLS_DROP_DMAC10_H 0x120c4
+#define A_MPS_CLS_DROP_DMAC11_L 0x120c8
+#define A_MPS_CLS_DROP_DMAC11_H 0x120cc
+#define A_MPS_CLS_DROP_DMAC12_L 0x120d0
+#define A_MPS_CLS_DROP_DMAC12_H 0x120d4
+#define A_MPS_CLS_DROP_DMAC13_L 0x120d8
+#define A_MPS_CLS_DROP_DMAC13_H 0x120dc
+#define A_MPS_CLS_DROP_DMAC14_L 0x120e0
+#define A_MPS_CLS_DROP_DMAC14_H 0x120e4
+#define A_MPS_CLS_DROP_DMAC15_L 0x120e8
+#define A_MPS_CLS_DROP_DMAC15_H 0x120ec
+#define A_MPS_RX_ENCAP_VXLAN 0x120f0
+#define A_MPS_RX_INT_VXLAN 0x120f4
+
+#define S_INT_TYPE_EN 16
+#define V_INT_TYPE_EN(x) ((x) << S_INT_TYPE_EN)
+#define F_INT_TYPE_EN V_INT_TYPE_EN(1U)
+
+#define S_INT_TYPE 0
+#define M_INT_TYPE 0xffffU
+#define V_INT_TYPE(x) ((x) << S_INT_TYPE)
+#define G_INT_TYPE(x) (((x) >> S_INT_TYPE) & M_INT_TYPE)
+
+#define A_MPS_RX_INT_GENEVE 0x120f8
+#define A_MPS_PFVF_ATRB2 0x120fc
+
+#define S_EXTRACT_DEL_ENCAP 31
+#define V_EXTRACT_DEL_ENCAP(x) ((x) << S_EXTRACT_DEL_ENCAP)
+#define F_EXTRACT_DEL_ENCAP V_EXTRACT_DEL_ENCAP(1U)
+
+#define A_MPS_RX_TRANS_ENCAP_FLTR_CTL 0x12100
+
+#define S_TIMEOUT_FLT_CLR_EN 8
+#define V_TIMEOUT_FLT_CLR_EN(x) ((x) << S_TIMEOUT_FLT_CLR_EN)
+#define F_TIMEOUT_FLT_CLR_EN V_TIMEOUT_FLT_CLR_EN(1U)
+
+#define S_FLTR_TIMOUT_VAL 0
+#define M_FLTR_TIMOUT_VAL 0xffU
+#define V_FLTR_TIMOUT_VAL(x) ((x) << S_FLTR_TIMOUT_VAL)
+#define G_FLTR_TIMOUT_VAL(x) (((x) >> S_FLTR_TIMOUT_VAL) & M_FLTR_TIMOUT_VAL)
+
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_0 0x12104
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_1 0x12108
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_2 0x1210c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0_3 0x12110
+#define A_MPS_RX_PAUSE_GEN_TH_0_4 0x12114
+#define A_MPS_RX_PAUSE_GEN_TH_0_5 0x12118
+#define A_MPS_RX_PAUSE_GEN_TH_0_6 0x1211c
+#define A_MPS_RX_PAUSE_GEN_TH_0_7 0x12120
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_0 0x12124
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_1 0x12128
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_2 0x1212c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1_3 0x12130
+#define A_MPS_RX_PAUSE_GEN_TH_1_4 0x12134
+#define A_MPS_RX_PAUSE_GEN_TH_1_5 0x12138
+#define A_MPS_RX_PAUSE_GEN_TH_1_6 0x1213c
+#define A_MPS_RX_PAUSE_GEN_TH_1_7 0x12140
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_0 0x12144
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_1 0x12148
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_2 0x1214c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2_3 0x12150
+#define A_MPS_RX_PAUSE_GEN_TH_2_4 0x12154
+#define A_MPS_RX_PAUSE_GEN_TH_2_5 0x12158
+#define A_MPS_RX_PAUSE_GEN_TH_2_6 0x1215c
+#define A_MPS_RX_PAUSE_GEN_TH_2_7 0x12160
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_0 0x12164
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_1 0x12168
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_2 0x1216c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3_3 0x12170
+#define A_MPS_RX_PAUSE_GEN_TH_3_4 0x12174
+#define A_MPS_RX_PAUSE_GEN_TH_3_5 0x12178
+#define A_MPS_RX_PAUSE_GEN_TH_3_6 0x1217c
+#define A_MPS_RX_PAUSE_GEN_TH_3_7 0x12180
+#define A_MPS_RX_DROP_0_0 0x12184
+
+#define S_DROP_TH 0
+#define M_DROP_TH 0xffffU
+#define V_DROP_TH(x) ((x) << S_DROP_TH)
+#define G_DROP_TH(x) (((x) >> S_DROP_TH) & M_DROP_TH)
+
+#define A_MPS_RX_DROP_0_1 0x12188
+#define A_MPS_RX_DROP_0_2 0x1218c
+#define A_MPS_RX_DROP_0_3 0x12190
+#define A_MPS_RX_DROP_0_4 0x12194
+#define A_MPS_RX_DROP_0_5 0x12198
+#define A_MPS_RX_DROP_0_6 0x1219c
+#define A_MPS_RX_DROP_0_7 0x121a0
+#define A_MPS_RX_DROP_1_0 0x121a4
+#define A_MPS_RX_DROP_1_1 0x121a8
+#define A_MPS_RX_DROP_1_2 0x121ac
+#define A_MPS_RX_DROP_1_3 0x121b0
+#define A_MPS_RX_DROP_1_4 0x121b4
+#define A_MPS_RX_DROP_1_5 0x121b8
+#define A_MPS_RX_DROP_1_6 0x121bc
+#define A_MPS_RX_DROP_1_7 0x121c0
+#define A_MPS_RX_DROP_2_0 0x121c4
+#define A_MPS_RX_DROP_2_1 0x121c8
+#define A_MPS_RX_DROP_2_2 0x121cc
+#define A_MPS_RX_DROP_2_3 0x121d0
+#define A_MPS_RX_DROP_2_4 0x121d4
+#define A_MPS_RX_DROP_2_5 0x121d8
+#define A_MPS_RX_DROP_2_6 0x121dc
+#define A_MPS_RX_DROP_2_7 0x121e0
+#define A_MPS_RX_DROP_3_0 0x121e4
+#define A_MPS_RX_DROP_3_1 0x121e8
+#define A_MPS_RX_DROP_3_2 0x121ec
+#define A_MPS_RX_DROP_3_3 0x121f0
+#define A_MPS_RX_DROP_3_4 0x121f4
+#define A_MPS_RX_DROP_3_5 0x121f8
+#define A_MPS_RX_DROP_3_6 0x121fc
+#define A_MPS_RX_DROP_3_7 0x12200
+#define A_MPS_RX_MAC_BG_PG_CNT0_0 0x12204
+#define A_MPS_RX_MAC_BG_PG_CNT0_1 0x12208
+#define A_MPS_RX_MAC_BG_PG_CNT0_2 0x1220c
+#define A_MPS_RX_MAC_BG_PG_CNT0_3 0x12210
+#define A_MPS_RX_MAC_BG_PG_CNT0_4 0x12214
+#define A_MPS_RX_MAC_BG_PG_CNT0_5 0x12218
+#define A_MPS_RX_MAC_BG_PG_CNT0_6 0x1221c
+#define A_MPS_RX_MAC_BG_PG_CNT0_7 0x12220
+#define A_MPS_RX_MAC_BG_PG_CNT1_0 0x12224
+#define A_MPS_RX_MAC_BG_PG_CNT1_1 0x12228
+#define A_MPS_RX_MAC_BG_PG_CNT1_2 0x1222c
+#define A_MPS_RX_MAC_BG_PG_CNT1_3 0x12230
+#define A_MPS_RX_MAC_BG_PG_CNT1_4 0x12234
+#define A_MPS_RX_MAC_BG_PG_CNT1_5 0x12238
+#define A_MPS_RX_MAC_BG_PG_CNT1_6 0x1223c
+#define A_MPS_RX_MAC_BG_PG_CNT1_7 0x12240
+#define A_MPS_RX_MAC_BG_PG_CNT2_0 0x12244
+#define A_MPS_RX_MAC_BG_PG_CNT2_1 0x12248
+#define A_MPS_RX_MAC_BG_PG_CNT2_2 0x1224c
+#define A_MPS_RX_MAC_BG_PG_CNT2_3 0x12250
+#define A_MPS_RX_MAC_BG_PG_CNT2_4 0x12254
+#define A_MPS_RX_MAC_BG_PG_CNT2_5 0x12258
+#define A_MPS_RX_MAC_BG_PG_CNT2_6 0x1225c
+#define A_MPS_RX_MAC_BG_PG_CNT2_7 0x12260
+#define A_MPS_RX_MAC_BG_PG_CNT3_0 0x12264
+#define A_MPS_RX_MAC_BG_PG_CNT3_1 0x12268
+#define A_MPS_RX_MAC_BG_PG_CNT3_2 0x1226c
+#define A_MPS_RX_MAC_BG_PG_CNT3_3 0x12270
+#define A_MPS_RX_MAC_BG_PG_CNT3_4 0x12274
+#define A_MPS_RX_MAC_BG_PG_CNT3_5 0x12278
+#define A_MPS_RX_MAC_BG_PG_CNT3_6 0x1227c
+#define A_MPS_RX_MAC_BG_PG_CNT3_7 0x12280
+#define A_T7_MPS_RX_PAUSE_GEN_TH_0 0x12284
+#define A_T7_MPS_RX_PAUSE_GEN_TH_1 0x12288
+#define A_T7_MPS_RX_PAUSE_GEN_TH_2 0x1228c
+#define A_T7_MPS_RX_PAUSE_GEN_TH_3 0x12290
+#define A_MPS_RX_BG0_IPSEC_CNT 0x12294
+#define A_MPS_RX_BG1_IPSEC_CNT 0x12298
+#define A_MPS_RX_BG2_IPSEC_CNT 0x1229c
+#define A_MPS_RX_BG3_IPSEC_CNT 0x122a0
+#define A_MPS_RX_MEM_FIFO_CONFIG0 0x122a4
+
+#define S_FIFO_CONFIG2 16
+#define M_FIFO_CONFIG2 0xffffU
+#define V_FIFO_CONFIG2(x) ((x) << S_FIFO_CONFIG2)
+#define G_FIFO_CONFIG2(x) (((x) >> S_FIFO_CONFIG2) & M_FIFO_CONFIG2)
+
+#define S_FIFO_CONFIG1 0
+#define M_FIFO_CONFIG1 0xffffU
+#define V_FIFO_CONFIG1(x) ((x) << S_FIFO_CONFIG1)
+#define G_FIFO_CONFIG1(x) (((x) >> S_FIFO_CONFIG1) & M_FIFO_CONFIG1)
+
+#define A_MPS_RX_MEM_FIFO_CONFIG1 0x122a8
+
+#define S_FIFO_CONFIG3 0
+#define M_FIFO_CONFIG3 0xffffU
+#define V_FIFO_CONFIG3(x) ((x) << S_FIFO_CONFIG3)
+#define G_FIFO_CONFIG3(x) (((x) >> S_FIFO_CONFIG3) & M_FIFO_CONFIG3)
+
+#define A_MPS_LPBK_MEM_FIFO_CONFIG0 0x122ac
+#define A_MPS_LPBK_MEM_FIFO_CONFIG1 0x122b0
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG0 0x122b4
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG1 0x122b8
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG2 0x122bc
+#define A_MPS_RX_LPBK_CONGESTION_THRESHOLD_BG3 0x122c0
+#define A_MPS_BG_PAUSE_CTL 0x122c4
+
+#define S_BG0_PAUSE_EN 3
+#define V_BG0_PAUSE_EN(x) ((x) << S_BG0_PAUSE_EN)
+#define F_BG0_PAUSE_EN V_BG0_PAUSE_EN(1U)
+
+#define S_BG1_PAUSE_EN 2
+#define V_BG1_PAUSE_EN(x) ((x) << S_BG1_PAUSE_EN)
+#define F_BG1_PAUSE_EN V_BG1_PAUSE_EN(1U)
+
+#define S_BG2_PAUSE_EN 1
+#define V_BG2_PAUSE_EN(x) ((x) << S_BG2_PAUSE_EN)
+#define F_BG2_PAUSE_EN V_BG2_PAUSE_EN(1U)
+
+#define S_BG3_PAUSE_EN 0
+#define V_BG3_PAUSE_EN(x) ((x) << S_BG3_PAUSE_EN)
+#define F_BG3_PAUSE_EN V_BG3_PAUSE_EN(1U)
/* registers for module CPL_SWITCH */
#define CPL_SWITCH_BASE_ADDR 0x19040
@@ -35931,6 +45321,7 @@
#define V_CIM_SPLIT_ENABLE(x) ((x) << S_CIM_SPLIT_ENABLE)
#define F_CIM_SPLIT_ENABLE V_CIM_SPLIT_ENABLE(1U)
+#define A_CNTRL 0x19040
#define A_CPL_SWITCH_TBL_IDX 0x19044
#define S_SWITCH_TBL_IDX 0
@@ -35938,7 +45329,9 @@
#define V_SWITCH_TBL_IDX(x) ((x) << S_SWITCH_TBL_IDX)
#define G_SWITCH_TBL_IDX(x) (((x) >> S_SWITCH_TBL_IDX) & M_SWITCH_TBL_IDX)
+#define A_TBL_IDX 0x19044
#define A_CPL_SWITCH_TBL_DATA 0x19048
+#define A_TBL_DATA 0x19048
#define A_CPL_SWITCH_ZERO_ERROR 0x1904c
#define S_ZERO_CMD_CH1 8
@@ -35951,6 +45344,18 @@
#define V_ZERO_CMD_CH0(x) ((x) << S_ZERO_CMD_CH0)
#define G_ZERO_CMD_CH0(x) (((x) >> S_ZERO_CMD_CH0) & M_ZERO_CMD_CH0)
+#define A_ZERO_ERROR 0x1904c
+
+#define S_ZERO_CMD_CH3 24
+#define M_ZERO_CMD_CH3 0xffU
+#define V_ZERO_CMD_CH3(x) ((x) << S_ZERO_CMD_CH3)
+#define G_ZERO_CMD_CH3(x) (((x) >> S_ZERO_CMD_CH3) & M_ZERO_CMD_CH3)
+
+#define S_ZERO_CMD_CH2 16
+#define M_ZERO_CMD_CH2 0xffU
+#define V_ZERO_CMD_CH2(x) ((x) << S_ZERO_CMD_CH2)
+#define G_ZERO_CMD_CH2(x) (((x) >> S_ZERO_CMD_CH2) & M_ZERO_CMD_CH2)
+
#define A_CPL_INTR_ENABLE 0x19050
#define S_CIM_OP_MAP_PERR 5
@@ -35985,7 +45390,18 @@
#define V_PERR_CPL_128TO128_0(x) ((x) << S_PERR_CPL_128TO128_0)
#define F_PERR_CPL_128TO128_0 V_PERR_CPL_128TO128_0(1U)
+#define A_INTR_ENABLE 0x19050
+
+#define S_PERR_CPL_128TO128_3 9
+#define V_PERR_CPL_128TO128_3(x) ((x) << S_PERR_CPL_128TO128_3)
+#define F_PERR_CPL_128TO128_3 V_PERR_CPL_128TO128_3(1U)
+
+#define S_PERR_CPL_128TO128_2 8
+#define V_PERR_CPL_128TO128_2(x) ((x) << S_PERR_CPL_128TO128_2)
+#define F_PERR_CPL_128TO128_2 V_PERR_CPL_128TO128_2(1U)
+
#define A_CPL_INTR_CAUSE 0x19054
+#define A_INTR_CAUSE 0x19054
#define A_CPL_MAP_TBL_IDX 0x19058
#define S_MAP_TBL_IDX 0
@@ -35997,6 +45413,13 @@
#define V_CIM_SPLIT_OPCODE_PROGRAM(x) ((x) << S_CIM_SPLIT_OPCODE_PROGRAM)
#define F_CIM_SPLIT_OPCODE_PROGRAM V_CIM_SPLIT_OPCODE_PROGRAM(1U)
+#define A_MAP_TBL_IDX 0x19058
+
+#define S_CPL_MAP_TBL_SEL 9
+#define M_CPL_MAP_TBL_SEL 0x3U
+#define V_CPL_MAP_TBL_SEL(x) ((x) << S_CPL_MAP_TBL_SEL)
+#define G_CPL_MAP_TBL_SEL(x) (((x) >> S_CPL_MAP_TBL_SEL) & M_CPL_MAP_TBL_SEL)
+
#define A_CPL_MAP_TBL_DATA 0x1905c
#define S_MAP_TBL_DATA 0
@@ -36004,6 +45427,8 @@
#define V_MAP_TBL_DATA(x) ((x) << S_MAP_TBL_DATA)
#define G_MAP_TBL_DATA(x) (((x) >> S_MAP_TBL_DATA) & M_MAP_TBL_DATA)
+#define A_MAP_TBL_DATA 0x1905c
+
/* registers for module SMB */
#define SMB_BASE_ADDR 0x19060
@@ -36019,6 +45444,16 @@
#define V_MICROCNTCFG(x) ((x) << S_MICROCNTCFG)
#define G_MICROCNTCFG(x) (((x) >> S_MICROCNTCFG) & M_MICROCNTCFG)
+#define S_T7_MACROCNTCFG 12
+#define M_T7_MACROCNTCFG 0x1fU
+#define V_T7_MACROCNTCFG(x) ((x) << S_T7_MACROCNTCFG)
+#define G_T7_MACROCNTCFG(x) (((x) >> S_T7_MACROCNTCFG) & M_T7_MACROCNTCFG)
+
+#define S_T7_MICROCNTCFG 0
+#define M_T7_MICROCNTCFG 0xfffU
+#define V_T7_MICROCNTCFG(x) ((x) << S_T7_MICROCNTCFG)
+#define G_T7_MICROCNTCFG(x) (((x) >> S_T7_MICROCNTCFG) & M_T7_MICROCNTCFG)
+
#define A_SMB_MST_TIMEOUT_CFG 0x19064
#define S_MSTTIMEOUTCFG 0
@@ -36685,6 +46120,26 @@
#define V_UART_CLKDIV(x) ((x) << S_UART_CLKDIV)
#define G_UART_CLKDIV(x) (((x) >> S_UART_CLKDIV) & M_UART_CLKDIV)
+#define S_T7_STOPBITS 25
+#define M_T7_STOPBITS 0x3U
+#define V_T7_STOPBITS(x) ((x) << S_T7_STOPBITS)
+#define G_T7_STOPBITS(x) (((x) >> S_T7_STOPBITS) & M_T7_STOPBITS)
+
+#define S_T7_PARITY 23
+#define M_T7_PARITY 0x3U
+#define V_T7_PARITY(x) ((x) << S_T7_PARITY)
+#define G_T7_PARITY(x) (((x) >> S_T7_PARITY) & M_T7_PARITY)
+
+#define S_T7_DATABITS 19
+#define M_T7_DATABITS 0xfU
+#define V_T7_DATABITS(x) ((x) << S_T7_DATABITS)
+#define G_T7_DATABITS(x) (((x) >> S_T7_DATABITS) & M_T7_DATABITS)
+
+#define S_T7_UART_CLKDIV 0
+#define M_T7_UART_CLKDIV 0x3ffffU
+#define V_T7_UART_CLKDIV(x) ((x) << S_T7_UART_CLKDIV)
+#define G_T7_UART_CLKDIV(x) (((x) >> S_T7_UART_CLKDIV) & M_T7_UART_CLKDIV)
+
/* registers for module PMU */
#define PMU_BASE_ADDR 0x19120
@@ -36767,6 +46222,26 @@
#define V_PL_DIS_PRTY_CHK(x) ((x) << S_PL_DIS_PRTY_CHK)
#define F_PL_DIS_PRTY_CHK V_PL_DIS_PRTY_CHK(1U)
+#define S_ARM_PART_CGEN 19
+#define V_ARM_PART_CGEN(x) ((x) << S_ARM_PART_CGEN)
+#define F_ARM_PART_CGEN V_ARM_PART_CGEN(1U)
+
+#define S_CRYPTO_PART_CGEN 14
+#define V_CRYPTO_PART_CGEN(x) ((x) << S_CRYPTO_PART_CGEN)
+#define F_CRYPTO_PART_CGEN V_CRYPTO_PART_CGEN(1U)
+
+#define S_NVME_PART_CGEN 9
+#define V_NVME_PART_CGEN(x) ((x) << S_NVME_PART_CGEN)
+#define F_NVME_PART_CGEN V_NVME_PART_CGEN(1U)
+
+#define S_XP10_PART_CGEN 8
+#define V_XP10_PART_CGEN(x) ((x) << S_XP10_PART_CGEN)
+#define F_XP10_PART_CGEN V_XP10_PART_CGEN(1U)
+
+#define S_GPEX_PART_CGEN 7
+#define V_GPEX_PART_CGEN(x) ((x) << S_GPEX_PART_CGEN)
+#define F_GPEX_PART_CGEN V_GPEX_PART_CGEN(1U)
+
#define A_PMU_SLEEPMODE_WAKEUP 0x19124
#define S_HWWAKEUPEN 5
@@ -36861,6 +46336,72 @@
#define V_TDDPTAGTCB(x) ((x) << S_TDDPTAGTCB)
#define F_TDDPTAGTCB V_TDDPTAGTCB(1U)
+#define S_ISCSI_PAGE_SIZE_CHK_ENB 31
+#define V_ISCSI_PAGE_SIZE_CHK_ENB(x) ((x) << S_ISCSI_PAGE_SIZE_CHK_ENB)
+#define F_ISCSI_PAGE_SIZE_CHK_ENB V_ISCSI_PAGE_SIZE_CHK_ENB(1U)
+
+#define S_RDMA_0B_WR_OPCODE_HI 29
+#define V_RDMA_0B_WR_OPCODE_HI(x) ((x) << S_RDMA_0B_WR_OPCODE_HI)
+#define F_RDMA_0B_WR_OPCODE_HI V_RDMA_0B_WR_OPCODE_HI(1U)
+
+#define S_RDMA_IMMEDIATE_CQE 28
+#define V_RDMA_IMMEDIATE_CQE(x) ((x) << S_RDMA_IMMEDIATE_CQE)
+#define F_RDMA_IMMEDIATE_CQE V_RDMA_IMMEDIATE_CQE(1U)
+
+#define S_RDMA_ATOMIC_WR_RSP_CQE 27
+#define V_RDMA_ATOMIC_WR_RSP_CQE(x) ((x) << S_RDMA_ATOMIC_WR_RSP_CQE)
+#define F_RDMA_ATOMIC_WR_RSP_CQE V_RDMA_ATOMIC_WR_RSP_CQE(1U)
+
+#define S_RDMA_VERIFY_RSP_FLUSH 26
+#define V_RDMA_VERIFY_RSP_FLUSH(x) ((x) << S_RDMA_VERIFY_RSP_FLUSH)
+#define F_RDMA_VERIFY_RSP_FLUSH V_RDMA_VERIFY_RSP_FLUSH(1U)
+
+#define S_RDMA_VERIFY_RSP_CQE 25
+#define V_RDMA_VERIFY_RSP_CQE(x) ((x) << S_RDMA_VERIFY_RSP_CQE)
+#define F_RDMA_VERIFY_RSP_CQE V_RDMA_VERIFY_RSP_CQE(1U)
+
+#define S_RDMA_FLUSH_RSP_CQE 24
+#define V_RDMA_FLUSH_RSP_CQE(x) ((x) << S_RDMA_FLUSH_RSP_CQE)
+#define F_RDMA_FLUSH_RSP_CQE V_RDMA_FLUSH_RSP_CQE(1U)
+
+#define S_RDMA_ATOMIC_RSP_CQE 23
+#define V_RDMA_ATOMIC_RSP_CQE(x) ((x) << S_RDMA_ATOMIC_RSP_CQE)
+#define F_RDMA_ATOMIC_RSP_CQE V_RDMA_ATOMIC_RSP_CQE(1U)
+
+#define S_T7_TPT_EXTENSION_MODE 22
+#define V_T7_TPT_EXTENSION_MODE(x) ((x) << S_T7_TPT_EXTENSION_MODE)
+#define F_T7_TPT_EXTENSION_MODE V_T7_TPT_EXTENSION_MODE(1U)
+
+#define S_NVME_TCP_DDP_VAL_EN 21
+#define V_NVME_TCP_DDP_VAL_EN(x) ((x) << S_NVME_TCP_DDP_VAL_EN)
+#define F_NVME_TCP_DDP_VAL_EN V_NVME_TCP_DDP_VAL_EN(1U)
+
+#define S_NVME_TCP_REMOVE_HDR_CRC 20
+#define V_NVME_TCP_REMOVE_HDR_CRC(x) ((x) << S_NVME_TCP_REMOVE_HDR_CRC)
+#define F_NVME_TCP_REMOVE_HDR_CRC V_NVME_TCP_REMOVE_HDR_CRC(1U)
+
+#define S_NVME_TCP_LAST_PDU_CHECK_ENB 19
+#define V_NVME_TCP_LAST_PDU_CHECK_ENB(x) ((x) << S_NVME_TCP_LAST_PDU_CHECK_ENB)
+#define F_NVME_TCP_LAST_PDU_CHECK_ENB V_NVME_TCP_LAST_PDU_CHECK_ENB(1U)
+
+#define S_NVME_TCP_OFFSET_SUBMODE 17
+#define M_NVME_TCP_OFFSET_SUBMODE 0x3U
+#define V_NVME_TCP_OFFSET_SUBMODE(x) ((x) << S_NVME_TCP_OFFSET_SUBMODE)
+#define G_NVME_TCP_OFFSET_SUBMODE(x) (((x) >> S_NVME_TCP_OFFSET_SUBMODE) & M_NVME_TCP_OFFSET_SUBMODE)
+
+#define S_NVME_TCP_OFFSET_MODE 16
+#define V_NVME_TCP_OFFSET_MODE(x) ((x) << S_NVME_TCP_OFFSET_MODE)
+#define F_NVME_TCP_OFFSET_MODE V_NVME_TCP_OFFSET_MODE(1U)
+
+#define S_QPID_CHECK_DISABLE_FOR_SEND 15
+#define V_QPID_CHECK_DISABLE_FOR_SEND(x) ((x) << S_QPID_CHECK_DISABLE_FOR_SEND)
+#define F_QPID_CHECK_DISABLE_FOR_SEND V_QPID_CHECK_DISABLE_FOR_SEND(1U)
+
+#define S_RDMA_0B_WR_OPCODE_LO 10
+#define M_RDMA_0B_WR_OPCODE_LO 0xfU
+#define V_RDMA_0B_WR_OPCODE_LO(x) ((x) << S_RDMA_0B_WR_OPCODE_LO)
+#define G_RDMA_0B_WR_OPCODE_LO(x) (((x) >> S_RDMA_0B_WR_OPCODE_LO) & M_RDMA_0B_WR_OPCODE_LO)
+
#define A_ULP_RX_INT_ENABLE 0x19154
#define S_ENABLE_CTX_1 24
@@ -36971,6 +46512,86 @@
#define V_SE_CNT_MISMATCH_0(x) ((x) << S_SE_CNT_MISMATCH_0)
#define F_SE_CNT_MISMATCH_0 V_SE_CNT_MISMATCH_0(1U)
+#define S_CERR_PCMD_FIFO_3 19
+#define V_CERR_PCMD_FIFO_3(x) ((x) << S_CERR_PCMD_FIFO_3)
+#define F_CERR_PCMD_FIFO_3 V_CERR_PCMD_FIFO_3(1U)
+
+#define S_CERR_PCMD_FIFO_2 18
+#define V_CERR_PCMD_FIFO_2(x) ((x) << S_CERR_PCMD_FIFO_2)
+#define F_CERR_PCMD_FIFO_2 V_CERR_PCMD_FIFO_2(1U)
+
+#define S_CERR_PCMD_FIFO_1 17
+#define V_CERR_PCMD_FIFO_1(x) ((x) << S_CERR_PCMD_FIFO_1)
+#define F_CERR_PCMD_FIFO_1 V_CERR_PCMD_FIFO_1(1U)
+
+#define S_CERR_PCMD_FIFO_0 16
+#define V_CERR_PCMD_FIFO_0(x) ((x) << S_CERR_PCMD_FIFO_0)
+#define F_CERR_PCMD_FIFO_0 V_CERR_PCMD_FIFO_0(1U)
+
+#define S_CERR_DATA_FIFO_3 15
+#define V_CERR_DATA_FIFO_3(x) ((x) << S_CERR_DATA_FIFO_3)
+#define F_CERR_DATA_FIFO_3 V_CERR_DATA_FIFO_3(1U)
+
+#define S_CERR_DATA_FIFO_2 14
+#define V_CERR_DATA_FIFO_2(x) ((x) << S_CERR_DATA_FIFO_2)
+#define F_CERR_DATA_FIFO_2 V_CERR_DATA_FIFO_2(1U)
+
+#define S_CERR_DATA_FIFO_1 13
+#define V_CERR_DATA_FIFO_1(x) ((x) << S_CERR_DATA_FIFO_1)
+#define F_CERR_DATA_FIFO_1 V_CERR_DATA_FIFO_1(1U)
+
+#define S_CERR_DATA_FIFO_0 12
+#define V_CERR_DATA_FIFO_0(x) ((x) << S_CERR_DATA_FIFO_0)
+#define F_CERR_DATA_FIFO_0 V_CERR_DATA_FIFO_0(1U)
+
+#define S_SE_CNT_MISMATCH_3 11
+#define V_SE_CNT_MISMATCH_3(x) ((x) << S_SE_CNT_MISMATCH_3)
+#define F_SE_CNT_MISMATCH_3 V_SE_CNT_MISMATCH_3(1U)
+
+#define S_SE_CNT_MISMATCH_2 10
+#define V_SE_CNT_MISMATCH_2(x) ((x) << S_SE_CNT_MISMATCH_2)
+#define F_SE_CNT_MISMATCH_2 V_SE_CNT_MISMATCH_2(1U)
+
+#define S_T7_SE_CNT_MISMATCH_1 9
+#define V_T7_SE_CNT_MISMATCH_1(x) ((x) << S_T7_SE_CNT_MISMATCH_1)
+#define F_T7_SE_CNT_MISMATCH_1 V_T7_SE_CNT_MISMATCH_1(1U)
+
+#define S_T7_SE_CNT_MISMATCH_0 8
+#define V_T7_SE_CNT_MISMATCH_0(x) ((x) << S_T7_SE_CNT_MISMATCH_0)
+#define F_T7_SE_CNT_MISMATCH_0 V_T7_SE_CNT_MISMATCH_0(1U)
+
+#define S_ENABLE_CTX_3 7
+#define V_ENABLE_CTX_3(x) ((x) << S_ENABLE_CTX_3)
+#define F_ENABLE_CTX_3 V_ENABLE_CTX_3(1U)
+
+#define S_ENABLE_CTX_2 6
+#define V_ENABLE_CTX_2(x) ((x) << S_ENABLE_CTX_2)
+#define F_ENABLE_CTX_2 V_ENABLE_CTX_2(1U)
+
+#define S_T7_ENABLE_CTX_1 5
+#define V_T7_ENABLE_CTX_1(x) ((x) << S_T7_ENABLE_CTX_1)
+#define F_T7_ENABLE_CTX_1 V_T7_ENABLE_CTX_1(1U)
+
+#define S_T7_ENABLE_CTX_0 4
+#define V_T7_ENABLE_CTX_0(x) ((x) << S_T7_ENABLE_CTX_0)
+#define F_T7_ENABLE_CTX_0 V_T7_ENABLE_CTX_0(1U)
+
+#define S_ENABLE_ALN_SDC_ERR_3 3
+#define V_ENABLE_ALN_SDC_ERR_3(x) ((x) << S_ENABLE_ALN_SDC_ERR_3)
+#define F_ENABLE_ALN_SDC_ERR_3 V_ENABLE_ALN_SDC_ERR_3(1U)
+
+#define S_ENABLE_ALN_SDC_ERR_2 2
+#define V_ENABLE_ALN_SDC_ERR_2(x) ((x) << S_ENABLE_ALN_SDC_ERR_2)
+#define F_ENABLE_ALN_SDC_ERR_2 V_ENABLE_ALN_SDC_ERR_2(1U)
+
+#define S_T7_ENABLE_ALN_SDC_ERR_1 1
+#define V_T7_ENABLE_ALN_SDC_ERR_1(x) ((x) << S_T7_ENABLE_ALN_SDC_ERR_1)
+#define F_T7_ENABLE_ALN_SDC_ERR_1 V_T7_ENABLE_ALN_SDC_ERR_1(1U)
+
+#define S_T7_ENABLE_ALN_SDC_ERR_0 0
+#define V_T7_ENABLE_ALN_SDC_ERR_0(x) ((x) << S_T7_ENABLE_ALN_SDC_ERR_0)
+#define F_T7_ENABLE_ALN_SDC_ERR_0 V_T7_ENABLE_ALN_SDC_ERR_0(1U)
+
#define A_ULP_RX_INT_CAUSE 0x19158
#define S_CAUSE_CTX_1 24
@@ -37282,6 +46903,312 @@
#define G_ULPRX_TID(x) (((x) >> S_ULPRX_TID) & M_ULPRX_TID)
#define A_ULP_RX_CTX_ACC_CH1 0x191b0
+#define A_ULP_RX_CTX_ACC_CH2 0x191b4
+#define A_ULP_RX_CTX_ACC_CH3 0x191b8
+#define A_ULP_RX_CTL2 0x191bc
+
+#define S_PCMD3THRESHOLD 24
+#define M_PCMD3THRESHOLD 0xffU
+#define V_PCMD3THRESHOLD(x) ((x) << S_PCMD3THRESHOLD)
+#define G_PCMD3THRESHOLD(x) (((x) >> S_PCMD3THRESHOLD) & M_PCMD3THRESHOLD)
+
+#define S_PCMD2THRESHOLD 16
+#define M_PCMD2THRESHOLD 0xffU
+#define V_PCMD2THRESHOLD(x) ((x) << S_PCMD2THRESHOLD)
+#define G_PCMD2THRESHOLD(x) (((x) >> S_PCMD2THRESHOLD) & M_PCMD2THRESHOLD)
+
+#define S_T7_PCMD1THRESHOLD 8
+#define M_T7_PCMD1THRESHOLD 0xffU
+#define V_T7_PCMD1THRESHOLD(x) ((x) << S_T7_PCMD1THRESHOLD)
+#define G_T7_PCMD1THRESHOLD(x) (((x) >> S_T7_PCMD1THRESHOLD) & M_T7_PCMD1THRESHOLD)
+
+#define S_T7_PCMD0THRESHOLD 0
+#define M_T7_PCMD0THRESHOLD 0xffU
+#define V_T7_PCMD0THRESHOLD(x) ((x) << S_T7_PCMD0THRESHOLD)
+#define G_T7_PCMD0THRESHOLD(x) (((x) >> S_T7_PCMD0THRESHOLD) & M_T7_PCMD0THRESHOLD)
+
+#define A_ULP_RX_INT_ENABLE_INTERFACE 0x191c0
+
+#define S_ENABLE_ULPRX2SBT_RSPPERR 31
+#define V_ENABLE_ULPRX2SBT_RSPPERR(x) ((x) << S_ENABLE_ULPRX2SBT_RSPPERR)
+#define F_ENABLE_ULPRX2SBT_RSPPERR V_ENABLE_ULPRX2SBT_RSPPERR(1U)
+
+#define S_ENABLE_ULPRX2MA_RSPPERR 30
+#define V_ENABLE_ULPRX2MA_RSPPERR(x) ((x) << S_ENABLE_ULPRX2MA_RSPPERR)
+#define F_ENABLE_ULPRX2MA_RSPPERR V_ENABLE_ULPRX2MA_RSPPERR(1U)
+
+#define S_ENABME_PIO_BUS_PERR 29
+#define V_ENABME_PIO_BUS_PERR(x) ((x) << S_ENABME_PIO_BUS_PERR)
+#define F_ENABME_PIO_BUS_PERR V_ENABME_PIO_BUS_PERR(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_3 19
+#define V_ENABLE_PM2ULP_SNOOPDATA_3(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_3)
+#define F_ENABLE_PM2ULP_SNOOPDATA_3 V_ENABLE_PM2ULP_SNOOPDATA_3(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_2 18
+#define V_ENABLE_PM2ULP_SNOOPDATA_2(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_2)
+#define F_ENABLE_PM2ULP_SNOOPDATA_2 V_ENABLE_PM2ULP_SNOOPDATA_2(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_1 17
+#define V_ENABLE_PM2ULP_SNOOPDATA_1(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_1)
+#define F_ENABLE_PM2ULP_SNOOPDATA_1 V_ENABLE_PM2ULP_SNOOPDATA_1(1U)
+
+#define S_ENABLE_PM2ULP_SNOOPDATA_0 16
+#define V_ENABLE_PM2ULP_SNOOPDATA_0(x) ((x) << S_ENABLE_PM2ULP_SNOOPDATA_0)
+#define F_ENABLE_PM2ULP_SNOOPDATA_0 V_ENABLE_PM2ULP_SNOOPDATA_0(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_3 15
+#define V_ENABLE_TLS2ULP_DATA_3(x) ((x) << S_ENABLE_TLS2ULP_DATA_3)
+#define F_ENABLE_TLS2ULP_DATA_3 V_ENABLE_TLS2ULP_DATA_3(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_2 14
+#define V_ENABLE_TLS2ULP_DATA_2(x) ((x) << S_ENABLE_TLS2ULP_DATA_2)
+#define F_ENABLE_TLS2ULP_DATA_2 V_ENABLE_TLS2ULP_DATA_2(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_1 13
+#define V_ENABLE_TLS2ULP_DATA_1(x) ((x) << S_ENABLE_TLS2ULP_DATA_1)
+#define F_ENABLE_TLS2ULP_DATA_1 V_ENABLE_TLS2ULP_DATA_1(1U)
+
+#define S_ENABLE_TLS2ULP_DATA_0 12
+#define V_ENABLE_TLS2ULP_DATA_0(x) ((x) << S_ENABLE_TLS2ULP_DATA_0)
+#define F_ENABLE_TLS2ULP_DATA_0 V_ENABLE_TLS2ULP_DATA_0(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_3 11
+#define V_ENABLE_TLS2ULP_PLENDATA_3(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_3)
+#define F_ENABLE_TLS2ULP_PLENDATA_3 V_ENABLE_TLS2ULP_PLENDATA_3(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_2 10
+#define V_ENABLE_TLS2ULP_PLENDATA_2(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_2)
+#define F_ENABLE_TLS2ULP_PLENDATA_2 V_ENABLE_TLS2ULP_PLENDATA_2(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_1 9
+#define V_ENABLE_TLS2ULP_PLENDATA_1(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_1)
+#define F_ENABLE_TLS2ULP_PLENDATA_1 V_ENABLE_TLS2ULP_PLENDATA_1(1U)
+
+#define S_ENABLE_TLS2ULP_PLENDATA_0 8
+#define V_ENABLE_TLS2ULP_PLENDATA_0(x) ((x) << S_ENABLE_TLS2ULP_PLENDATA_0)
+#define F_ENABLE_TLS2ULP_PLENDATA_0 V_ENABLE_TLS2ULP_PLENDATA_0(1U)
+
+#define S_ENABLE_PM2ULP_DATA_3 7
+#define V_ENABLE_PM2ULP_DATA_3(x) ((x) << S_ENABLE_PM2ULP_DATA_3)
+#define F_ENABLE_PM2ULP_DATA_3 V_ENABLE_PM2ULP_DATA_3(1U)
+
+#define S_ENABLE_PM2ULP_DATA_2 6
+#define V_ENABLE_PM2ULP_DATA_2(x) ((x) << S_ENABLE_PM2ULP_DATA_2)
+#define F_ENABLE_PM2ULP_DATA_2 V_ENABLE_PM2ULP_DATA_2(1U)
+
+#define S_ENABLE_PM2ULP_DATA_1 5
+#define V_ENABLE_PM2ULP_DATA_1(x) ((x) << S_ENABLE_PM2ULP_DATA_1)
+#define F_ENABLE_PM2ULP_DATA_1 V_ENABLE_PM2ULP_DATA_1(1U)
+
+#define S_ENABLE_PM2ULP_DATA_0 4
+#define V_ENABLE_PM2ULP_DATA_0(x) ((x) << S_ENABLE_PM2ULP_DATA_0)
+#define F_ENABLE_PM2ULP_DATA_0 V_ENABLE_PM2ULP_DATA_0(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_3 3
+#define V_ENABLE_TP2ULP_PCMD_3(x) ((x) << S_ENABLE_TP2ULP_PCMD_3)
+#define F_ENABLE_TP2ULP_PCMD_3 V_ENABLE_TP2ULP_PCMD_3(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_2 2
+#define V_ENABLE_TP2ULP_PCMD_2(x) ((x) << S_ENABLE_TP2ULP_PCMD_2)
+#define F_ENABLE_TP2ULP_PCMD_2 V_ENABLE_TP2ULP_PCMD_2(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_1 1
+#define V_ENABLE_TP2ULP_PCMD_1(x) ((x) << S_ENABLE_TP2ULP_PCMD_1)
+#define F_ENABLE_TP2ULP_PCMD_1 V_ENABLE_TP2ULP_PCMD_1(1U)
+
+#define S_ENABLE_TP2ULP_PCMD_0 0
+#define V_ENABLE_TP2ULP_PCMD_0(x) ((x) << S_ENABLE_TP2ULP_PCMD_0)
+#define F_ENABLE_TP2ULP_PCMD_0 V_ENABLE_TP2ULP_PCMD_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_INTERFACE 0x191c4
+
+#define S_CAUSE_ULPRX2SBT_RSPPERR 31
+#define V_CAUSE_ULPRX2SBT_RSPPERR(x) ((x) << S_CAUSE_ULPRX2SBT_RSPPERR)
+#define F_CAUSE_ULPRX2SBT_RSPPERR V_CAUSE_ULPRX2SBT_RSPPERR(1U)
+
+#define S_CAUSE_ULPRX2MA_RSPPERR 30
+#define V_CAUSE_ULPRX2MA_RSPPERR(x) ((x) << S_CAUSE_ULPRX2MA_RSPPERR)
+#define F_CAUSE_ULPRX2MA_RSPPERR V_CAUSE_ULPRX2MA_RSPPERR(1U)
+
+#define S_CAUSE_PIO_BUS_PERR 29
+#define V_CAUSE_PIO_BUS_PERR(x) ((x) << S_CAUSE_PIO_BUS_PERR)
+#define F_CAUSE_PIO_BUS_PERR V_CAUSE_PIO_BUS_PERR(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_3 19
+#define V_CAUSE_PM2ULP_SNOOPDATA_3(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_3)
+#define F_CAUSE_PM2ULP_SNOOPDATA_3 V_CAUSE_PM2ULP_SNOOPDATA_3(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_2 18
+#define V_CAUSE_PM2ULP_SNOOPDATA_2(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_2)
+#define F_CAUSE_PM2ULP_SNOOPDATA_2 V_CAUSE_PM2ULP_SNOOPDATA_2(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_1 17
+#define V_CAUSE_PM2ULP_SNOOPDATA_1(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_1)
+#define F_CAUSE_PM2ULP_SNOOPDATA_1 V_CAUSE_PM2ULP_SNOOPDATA_1(1U)
+
+#define S_CAUSE_PM2ULP_SNOOPDATA_0 16
+#define V_CAUSE_PM2ULP_SNOOPDATA_0(x) ((x) << S_CAUSE_PM2ULP_SNOOPDATA_0)
+#define F_CAUSE_PM2ULP_SNOOPDATA_0 V_CAUSE_PM2ULP_SNOOPDATA_0(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_3 15
+#define V_CAUSE_TLS2ULP_DATA_3(x) ((x) << S_CAUSE_TLS2ULP_DATA_3)
+#define F_CAUSE_TLS2ULP_DATA_3 V_CAUSE_TLS2ULP_DATA_3(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_2 14
+#define V_CAUSE_TLS2ULP_DATA_2(x) ((x) << S_CAUSE_TLS2ULP_DATA_2)
+#define F_CAUSE_TLS2ULP_DATA_2 V_CAUSE_TLS2ULP_DATA_2(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_1 13
+#define V_CAUSE_TLS2ULP_DATA_1(x) ((x) << S_CAUSE_TLS2ULP_DATA_1)
+#define F_CAUSE_TLS2ULP_DATA_1 V_CAUSE_TLS2ULP_DATA_1(1U)
+
+#define S_CAUSE_TLS2ULP_DATA_0 12
+#define V_CAUSE_TLS2ULP_DATA_0(x) ((x) << S_CAUSE_TLS2ULP_DATA_0)
+#define F_CAUSE_TLS2ULP_DATA_0 V_CAUSE_TLS2ULP_DATA_0(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_3 11
+#define V_CAUSE_TLS2ULP_PLENDATA_3(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_3)
+#define F_CAUSE_TLS2ULP_PLENDATA_3 V_CAUSE_TLS2ULP_PLENDATA_3(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_2 10
+#define V_CAUSE_TLS2ULP_PLENDATA_2(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_2)
+#define F_CAUSE_TLS2ULP_PLENDATA_2 V_CAUSE_TLS2ULP_PLENDATA_2(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_1 9
+#define V_CAUSE_TLS2ULP_PLENDATA_1(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_1)
+#define F_CAUSE_TLS2ULP_PLENDATA_1 V_CAUSE_TLS2ULP_PLENDATA_1(1U)
+
+#define S_CAUSE_TLS2ULP_PLENDATA_0 8
+#define V_CAUSE_TLS2ULP_PLENDATA_0(x) ((x) << S_CAUSE_TLS2ULP_PLENDATA_0)
+#define F_CAUSE_TLS2ULP_PLENDATA_0 V_CAUSE_TLS2ULP_PLENDATA_0(1U)
+
+#define S_CAUSE_PM2ULP_DATA_3 7
+#define V_CAUSE_PM2ULP_DATA_3(x) ((x) << S_CAUSE_PM2ULP_DATA_3)
+#define F_CAUSE_PM2ULP_DATA_3 V_CAUSE_PM2ULP_DATA_3(1U)
+
+#define S_CAUSE_PM2ULP_DATA_2 6
+#define V_CAUSE_PM2ULP_DATA_2(x) ((x) << S_CAUSE_PM2ULP_DATA_2)
+#define F_CAUSE_PM2ULP_DATA_2 V_CAUSE_PM2ULP_DATA_2(1U)
+
+#define S_CAUSE_PM2ULP_DATA_1 5
+#define V_CAUSE_PM2ULP_DATA_1(x) ((x) << S_CAUSE_PM2ULP_DATA_1)
+#define F_CAUSE_PM2ULP_DATA_1 V_CAUSE_PM2ULP_DATA_1(1U)
+
+#define S_CAUSE_PM2ULP_DATA_0 4
+#define V_CAUSE_PM2ULP_DATA_0(x) ((x) << S_CAUSE_PM2ULP_DATA_0)
+#define F_CAUSE_PM2ULP_DATA_0 V_CAUSE_PM2ULP_DATA_0(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_3 3
+#define V_CAUSE_TP2ULP_PCMD_3(x) ((x) << S_CAUSE_TP2ULP_PCMD_3)
+#define F_CAUSE_TP2ULP_PCMD_3 V_CAUSE_TP2ULP_PCMD_3(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_2 2
+#define V_CAUSE_TP2ULP_PCMD_2(x) ((x) << S_CAUSE_TP2ULP_PCMD_2)
+#define F_CAUSE_TP2ULP_PCMD_2 V_CAUSE_TP2ULP_PCMD_2(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_1 1
+#define V_CAUSE_TP2ULP_PCMD_1(x) ((x) << S_CAUSE_TP2ULP_PCMD_1)
+#define F_CAUSE_TP2ULP_PCMD_1 V_CAUSE_TP2ULP_PCMD_1(1U)
+
+#define S_CAUSE_TP2ULP_PCMD_0 0
+#define V_CAUSE_TP2ULP_PCMD_0(x) ((x) << S_CAUSE_TP2ULP_PCMD_0)
+#define F_CAUSE_TP2ULP_PCMD_0 V_CAUSE_TP2ULP_PCMD_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_INTERFACE 0x191c8
+
+#define S_PERR_ULPRX2SBT_RSPPERR 31
+#define V_PERR_ULPRX2SBT_RSPPERR(x) ((x) << S_PERR_ULPRX2SBT_RSPPERR)
+#define F_PERR_ULPRX2SBT_RSPPERR V_PERR_ULPRX2SBT_RSPPERR(1U)
+
+#define S_PERR_ULPRX2MA_RSPPERR 30
+#define V_PERR_ULPRX2MA_RSPPERR(x) ((x) << S_PERR_ULPRX2MA_RSPPERR)
+#define F_PERR_ULPRX2MA_RSPPERR V_PERR_ULPRX2MA_RSPPERR(1U)
+
+#define S_PERR_PIO_BUS_PERR 29
+#define V_PERR_PIO_BUS_PERR(x) ((x) << S_PERR_PIO_BUS_PERR)
+#define F_PERR_PIO_BUS_PERR V_PERR_PIO_BUS_PERR(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_3 19
+#define V_PERR_PM2ULP_SNOOPDATA_3(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_3)
+#define F_PERR_PM2ULP_SNOOPDATA_3 V_PERR_PM2ULP_SNOOPDATA_3(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_2 18
+#define V_PERR_PM2ULP_SNOOPDATA_2(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_2)
+#define F_PERR_PM2ULP_SNOOPDATA_2 V_PERR_PM2ULP_SNOOPDATA_2(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_1 17
+#define V_PERR_PM2ULP_SNOOPDATA_1(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_1)
+#define F_PERR_PM2ULP_SNOOPDATA_1 V_PERR_PM2ULP_SNOOPDATA_1(1U)
+
+#define S_PERR_PM2ULP_SNOOPDATA_0 16
+#define V_PERR_PM2ULP_SNOOPDATA_0(x) ((x) << S_PERR_PM2ULP_SNOOPDATA_0)
+#define F_PERR_PM2ULP_SNOOPDATA_0 V_PERR_PM2ULP_SNOOPDATA_0(1U)
+
+#define S_PERR_TLS2ULP_DATA_3 15
+#define V_PERR_TLS2ULP_DATA_3(x) ((x) << S_PERR_TLS2ULP_DATA_3)
+#define F_PERR_TLS2ULP_DATA_3 V_PERR_TLS2ULP_DATA_3(1U)
+
+#define S_PERR_TLS2ULP_DATA_2 14
+#define V_PERR_TLS2ULP_DATA_2(x) ((x) << S_PERR_TLS2ULP_DATA_2)
+#define F_PERR_TLS2ULP_DATA_2 V_PERR_TLS2ULP_DATA_2(1U)
+
+#define S_PERR_TLS2ULP_DATA_1 13
+#define V_PERR_TLS2ULP_DATA_1(x) ((x) << S_PERR_TLS2ULP_DATA_1)
+#define F_PERR_TLS2ULP_DATA_1 V_PERR_TLS2ULP_DATA_1(1U)
+
+#define S_PERR_TLS2ULP_DATA_0 12
+#define V_PERR_TLS2ULP_DATA_0(x) ((x) << S_PERR_TLS2ULP_DATA_0)
+#define F_PERR_TLS2ULP_DATA_0 V_PERR_TLS2ULP_DATA_0(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_3 11
+#define V_PERR_TLS2ULP_PLENDATA_3(x) ((x) << S_PERR_TLS2ULP_PLENDATA_3)
+#define F_PERR_TLS2ULP_PLENDATA_3 V_PERR_TLS2ULP_PLENDATA_3(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_2 10
+#define V_PERR_TLS2ULP_PLENDATA_2(x) ((x) << S_PERR_TLS2ULP_PLENDATA_2)
+#define F_PERR_TLS2ULP_PLENDATA_2 V_PERR_TLS2ULP_PLENDATA_2(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_1 9
+#define V_PERR_TLS2ULP_PLENDATA_1(x) ((x) << S_PERR_TLS2ULP_PLENDATA_1)
+#define F_PERR_TLS2ULP_PLENDATA_1 V_PERR_TLS2ULP_PLENDATA_1(1U)
+
+#define S_PERR_TLS2ULP_PLENDATA_0 8
+#define V_PERR_TLS2ULP_PLENDATA_0(x) ((x) << S_PERR_TLS2ULP_PLENDATA_0)
+#define F_PERR_TLS2ULP_PLENDATA_0 V_PERR_TLS2ULP_PLENDATA_0(1U)
+
+#define S_PERR_PM2ULP_DATA_3 7
+#define V_PERR_PM2ULP_DATA_3(x) ((x) << S_PERR_PM2ULP_DATA_3)
+#define F_PERR_PM2ULP_DATA_3 V_PERR_PM2ULP_DATA_3(1U)
+
+#define S_PERR_PM2ULP_DATA_2 6
+#define V_PERR_PM2ULP_DATA_2(x) ((x) << S_PERR_PM2ULP_DATA_2)
+#define F_PERR_PM2ULP_DATA_2 V_PERR_PM2ULP_DATA_2(1U)
+
+#define S_PERR_PM2ULP_DATA_1 5
+#define V_PERR_PM2ULP_DATA_1(x) ((x) << S_PERR_PM2ULP_DATA_1)
+#define F_PERR_PM2ULP_DATA_1 V_PERR_PM2ULP_DATA_1(1U)
+
+#define S_PERR_PM2ULP_DATA_0 4
+#define V_PERR_PM2ULP_DATA_0(x) ((x) << S_PERR_PM2ULP_DATA_0)
+#define F_PERR_PM2ULP_DATA_0 V_PERR_PM2ULP_DATA_0(1U)
+
+#define S_PERR_TP2ULP_PCMD_3 3
+#define V_PERR_TP2ULP_PCMD_3(x) ((x) << S_PERR_TP2ULP_PCMD_3)
+#define F_PERR_TP2ULP_PCMD_3 V_PERR_TP2ULP_PCMD_3(1U)
+
+#define S_PERR_TP2ULP_PCMD_2 2
+#define V_PERR_TP2ULP_PCMD_2(x) ((x) << S_PERR_TP2ULP_PCMD_2)
+#define F_PERR_TP2ULP_PCMD_2 V_PERR_TP2ULP_PCMD_2(1U)
+
+#define S_PERR_TP2ULP_PCMD_1 1
+#define V_PERR_TP2ULP_PCMD_1(x) ((x) << S_PERR_TP2ULP_PCMD_1)
+#define F_PERR_TP2ULP_PCMD_1 V_PERR_TP2ULP_PCMD_1(1U)
+
+#define S_PERR_TP2ULP_PCMD_0 0
+#define V_PERR_TP2ULP_PCMD_0(x) ((x) << S_PERR_TP2ULP_PCMD_0)
+#define F_PERR_TP2ULP_PCMD_0 V_PERR_TP2ULP_PCMD_0(1U)
+
#define A_ULP_RX_SE_CNT_ERR 0x191d0
#define A_ULP_RX_SE_CNT_CLR 0x191d4
@@ -37295,6 +47222,26 @@
#define V_CLRCHAN1(x) ((x) << S_CLRCHAN1)
#define G_CLRCHAN1(x) (((x) >> S_CLRCHAN1) & M_CLRCHAN1)
+#define S_CLRCHAN3 12
+#define M_CLRCHAN3 0xfU
+#define V_CLRCHAN3(x) ((x) << S_CLRCHAN3)
+#define G_CLRCHAN3(x) (((x) >> S_CLRCHAN3) & M_CLRCHAN3)
+
+#define S_CLRCHAN2 8
+#define M_CLRCHAN2 0xfU
+#define V_CLRCHAN2(x) ((x) << S_CLRCHAN2)
+#define G_CLRCHAN2(x) (((x) >> S_CLRCHAN2) & M_CLRCHAN2)
+
+#define S_T7_CLRCHAN1 4
+#define M_T7_CLRCHAN1 0xfU
+#define V_T7_CLRCHAN1(x) ((x) << S_T7_CLRCHAN1)
+#define G_T7_CLRCHAN1(x) (((x) >> S_T7_CLRCHAN1) & M_T7_CLRCHAN1)
+
+#define S_T7_CLRCHAN0 0
+#define M_T7_CLRCHAN0 0xfU
+#define V_T7_CLRCHAN0(x) ((x) << S_T7_CLRCHAN0)
+#define G_T7_CLRCHAN0(x) (((x) >> S_T7_CLRCHAN0) & M_T7_CLRCHAN0)
+
#define A_ULP_RX_SE_CNT_CH0 0x191d8
#define S_SOP_CNT_OUT0 28
@@ -37400,6 +47347,7 @@
#define G_SEL_L(x) (((x) >> S_SEL_L) & M_SEL_L)
#define A_ULP_RX_DBG_DATAH 0x191e4
+#define A_ULP_RX_DBG_DATA 0x191e4
#define A_ULP_RX_DBG_DATAL 0x191e8
#define A_ULP_RX_LA_CHNL 0x19238
@@ -37581,6 +47529,11 @@
#define V_PIO_RDMA_SEND_RQE(x) ((x) << S_PIO_RDMA_SEND_RQE)
#define F_PIO_RDMA_SEND_RQE V_PIO_RDMA_SEND_RQE(1U)
+#define S_TLS_KEYSIZECONF 26
+#define M_TLS_KEYSIZECONF 0x3U
+#define V_TLS_KEYSIZECONF(x) ((x) << S_TLS_KEYSIZECONF)
+#define G_TLS_KEYSIZECONF(x) (((x) >> S_TLS_KEYSIZECONF) & M_TLS_KEYSIZECONF)
+
#define A_ULP_RX_CH0_CGEN 0x19260
#define S_BYPASS_CGEN 7
@@ -37615,7 +47568,61 @@
#define V_RDMA_DATAPATH_CGEN(x) ((x) << S_RDMA_DATAPATH_CGEN)
#define F_RDMA_DATAPATH_CGEN V_RDMA_DATAPATH_CGEN(1U)
+#define A_ULP_RX_CH_CGEN 0x19260
+
+#define S_T7_BYPASS_CGEN 28
+#define M_T7_BYPASS_CGEN 0xfU
+#define V_T7_BYPASS_CGEN(x) ((x) << S_T7_BYPASS_CGEN)
+#define G_T7_BYPASS_CGEN(x) (((x) >> S_T7_BYPASS_CGEN) & M_T7_BYPASS_CGEN)
+
+#define S_T7_TDDP_CGEN 24
+#define M_T7_TDDP_CGEN 0xfU
+#define V_T7_TDDP_CGEN(x) ((x) << S_T7_TDDP_CGEN)
+#define G_T7_TDDP_CGEN(x) (((x) >> S_T7_TDDP_CGEN) & M_T7_TDDP_CGEN)
+
+#define S_T7_ISCSI_CGEN 20
+#define M_T7_ISCSI_CGEN 0xfU
+#define V_T7_ISCSI_CGEN(x) ((x) << S_T7_ISCSI_CGEN)
+#define G_T7_ISCSI_CGEN(x) (((x) >> S_T7_ISCSI_CGEN) & M_T7_ISCSI_CGEN)
+
+#define S_T7_RDMA_CGEN 16
+#define M_T7_RDMA_CGEN 0xfU
+#define V_T7_RDMA_CGEN(x) ((x) << S_T7_RDMA_CGEN)
+#define G_T7_RDMA_CGEN(x) (((x) >> S_T7_RDMA_CGEN) & M_T7_RDMA_CGEN)
+
+#define S_T7_CHANNEL_CGEN 12
+#define M_T7_CHANNEL_CGEN 0xfU
+#define V_T7_CHANNEL_CGEN(x) ((x) << S_T7_CHANNEL_CGEN)
+#define G_T7_CHANNEL_CGEN(x) (((x) >> S_T7_CHANNEL_CGEN) & M_T7_CHANNEL_CGEN)
+
+#define S_T7_ALL_DATAPATH_CGEN 8
+#define M_T7_ALL_DATAPATH_CGEN 0xfU
+#define V_T7_ALL_DATAPATH_CGEN(x) ((x) << S_T7_ALL_DATAPATH_CGEN)
+#define G_T7_ALL_DATAPATH_CGEN(x) (((x) >> S_T7_ALL_DATAPATH_CGEN) & M_T7_ALL_DATAPATH_CGEN)
+
+#define S_T7_T10DIFF_DATAPATH_CGEN 4
+#define M_T7_T10DIFF_DATAPATH_CGEN 0xfU
+#define V_T7_T10DIFF_DATAPATH_CGEN(x) ((x) << S_T7_T10DIFF_DATAPATH_CGEN)
+#define G_T7_T10DIFF_DATAPATH_CGEN(x) (((x) >> S_T7_T10DIFF_DATAPATH_CGEN) & M_T7_T10DIFF_DATAPATH_CGEN)
+
+#define S_T7_RDMA_DATAPATH_CGEN 0
+#define M_T7_RDMA_DATAPATH_CGEN 0xfU
+#define V_T7_RDMA_DATAPATH_CGEN(x) ((x) << S_T7_RDMA_DATAPATH_CGEN)
+#define G_T7_RDMA_DATAPATH_CGEN(x) (((x) >> S_T7_RDMA_DATAPATH_CGEN) & M_T7_RDMA_DATAPATH_CGEN)
+
#define A_ULP_RX_CH1_CGEN 0x19264
+#define A_ULP_RX_CH_CGEN_1 0x19264
+
+#define S_NVME_TCP_CGEN 4
+#define M_NVME_TCP_CGEN 0xfU
+#define V_NVME_TCP_CGEN(x) ((x) << S_NVME_TCP_CGEN)
+#define G_NVME_TCP_CGEN(x) (((x) >> S_NVME_TCP_CGEN) & M_NVME_TCP_CGEN)
+
+#define S_ROCE_CGEN 0
+#define M_ROCE_CGEN 0xfU
+#define V_ROCE_CGEN(x) ((x) << S_ROCE_CGEN)
+#define G_ROCE_CGEN(x) (((x) >> S_ROCE_CGEN) & M_ROCE_CGEN)
+
#define A_ULP_RX_RFE_DISABLE 0x19268
#define S_RQE_LIM_CHECK_RFE_DISABLE 0
@@ -37742,6 +47749,30 @@
#define V_SKIP_MA_REQ_EN0(x) ((x) << S_SKIP_MA_REQ_EN0)
#define F_SKIP_MA_REQ_EN0 V_SKIP_MA_REQ_EN0(1U)
+#define S_CLEAR_CTX_ERR_CNT3 7
+#define V_CLEAR_CTX_ERR_CNT3(x) ((x) << S_CLEAR_CTX_ERR_CNT3)
+#define F_CLEAR_CTX_ERR_CNT3 V_CLEAR_CTX_ERR_CNT3(1U)
+
+#define S_CLEAR_CTX_ERR_CNT2 6
+#define V_CLEAR_CTX_ERR_CNT2(x) ((x) << S_CLEAR_CTX_ERR_CNT2)
+#define F_CLEAR_CTX_ERR_CNT2 V_CLEAR_CTX_ERR_CNT2(1U)
+
+#define S_T7_CLEAR_CTX_ERR_CNT1 5
+#define V_T7_CLEAR_CTX_ERR_CNT1(x) ((x) << S_T7_CLEAR_CTX_ERR_CNT1)
+#define F_T7_CLEAR_CTX_ERR_CNT1 V_T7_CLEAR_CTX_ERR_CNT1(1U)
+
+#define S_T7_CLEAR_CTX_ERR_CNT0 4
+#define V_T7_CLEAR_CTX_ERR_CNT0(x) ((x) << S_T7_CLEAR_CTX_ERR_CNT0)
+#define F_T7_CLEAR_CTX_ERR_CNT0 V_T7_CLEAR_CTX_ERR_CNT0(1U)
+
+#define S_SKIP_MA_REQ_EN3 3
+#define V_SKIP_MA_REQ_EN3(x) ((x) << S_SKIP_MA_REQ_EN3)
+#define F_SKIP_MA_REQ_EN3 V_SKIP_MA_REQ_EN3(1U)
+
+#define S_SKIP_MA_REQ_EN2 2
+#define V_SKIP_MA_REQ_EN2(x) ((x) << S_SKIP_MA_REQ_EN2)
+#define F_SKIP_MA_REQ_EN2 V_SKIP_MA_REQ_EN2(1U)
+
#define A_ULP_RX_CHNL0_CTX_ERROR_COUNT_PER_TID 0x19288
#define A_ULP_RX_CHNL1_CTX_ERROR_COUNT_PER_TID 0x1928c
#define A_ULP_RX_MSN_CHECK_ENABLE 0x19290
@@ -37758,6 +47789,92 @@
#define V_SEND_MSN_CHECK_ENABLE(x) ((x) << S_SEND_MSN_CHECK_ENABLE)
#define F_SEND_MSN_CHECK_ENABLE V_SEND_MSN_CHECK_ENABLE(1U)
+#define A_ULP_RX_SE_CNT_CH2 0x19294
+
+#define S_SOP_CNT_OUT2 28
+#define M_SOP_CNT_OUT2 0xfU
+#define V_SOP_CNT_OUT2(x) ((x) << S_SOP_CNT_OUT2)
+#define G_SOP_CNT_OUT2(x) (((x) >> S_SOP_CNT_OUT2) & M_SOP_CNT_OUT2)
+
+#define S_EOP_CNT_OUT2 24
+#define M_EOP_CNT_OUT2 0xfU
+#define V_EOP_CNT_OUT2(x) ((x) << S_EOP_CNT_OUT2)
+#define G_EOP_CNT_OUT2(x) (((x) >> S_EOP_CNT_OUT2) & M_EOP_CNT_OUT2)
+
+#define S_SOP_CNT_AL2 20
+#define M_SOP_CNT_AL2 0xfU
+#define V_SOP_CNT_AL2(x) ((x) << S_SOP_CNT_AL2)
+#define G_SOP_CNT_AL2(x) (((x) >> S_SOP_CNT_AL2) & M_SOP_CNT_AL2)
+
+#define S_EOP_CNT_AL2 16
+#define M_EOP_CNT_AL2 0xfU
+#define V_EOP_CNT_AL2(x) ((x) << S_EOP_CNT_AL2)
+#define G_EOP_CNT_AL2(x) (((x) >> S_EOP_CNT_AL2) & M_EOP_CNT_AL2)
+
+#define S_SOP_CNT_MR2 12
+#define M_SOP_CNT_MR2 0xfU
+#define V_SOP_CNT_MR2(x) ((x) << S_SOP_CNT_MR2)
+#define G_SOP_CNT_MR2(x) (((x) >> S_SOP_CNT_MR2) & M_SOP_CNT_MR2)
+
+#define S_EOP_CNT_MR2 8
+#define M_EOP_CNT_MR2 0xfU
+#define V_EOP_CNT_MR2(x) ((x) << S_EOP_CNT_MR2)
+#define G_EOP_CNT_MR2(x) (((x) >> S_EOP_CNT_MR2) & M_EOP_CNT_MR2)
+
+#define S_SOP_CNT_IN2 4
+#define M_SOP_CNT_IN2 0xfU
+#define V_SOP_CNT_IN2(x) ((x) << S_SOP_CNT_IN2)
+#define G_SOP_CNT_IN2(x) (((x) >> S_SOP_CNT_IN2) & M_SOP_CNT_IN2)
+
+#define S_EOP_CNT_IN2 0
+#define M_EOP_CNT_IN2 0xfU
+#define V_EOP_CNT_IN2(x) ((x) << S_EOP_CNT_IN2)
+#define G_EOP_CNT_IN2(x) (((x) >> S_EOP_CNT_IN2) & M_EOP_CNT_IN2)
+
+#define A_ULP_RX_SE_CNT_CH3 0x19298
+
+#define S_SOP_CNT_OUT3 28
+#define M_SOP_CNT_OUT3 0xfU
+#define V_SOP_CNT_OUT3(x) ((x) << S_SOP_CNT_OUT3)
+#define G_SOP_CNT_OUT3(x) (((x) >> S_SOP_CNT_OUT3) & M_SOP_CNT_OUT3)
+
+#define S_EOP_CNT_OUT3 24
+#define M_EOP_CNT_OUT3 0xfU
+#define V_EOP_CNT_OUT3(x) ((x) << S_EOP_CNT_OUT3)
+#define G_EOP_CNT_OUT3(x) (((x) >> S_EOP_CNT_OUT3) & M_EOP_CNT_OUT3)
+
+#define S_SOP_CNT_AL3 20
+#define M_SOP_CNT_AL3 0xfU
+#define V_SOP_CNT_AL3(x) ((x) << S_SOP_CNT_AL3)
+#define G_SOP_CNT_AL3(x) (((x) >> S_SOP_CNT_AL3) & M_SOP_CNT_AL3)
+
+#define S_EOP_CNT_AL3 16
+#define M_EOP_CNT_AL3 0xfU
+#define V_EOP_CNT_AL3(x) ((x) << S_EOP_CNT_AL3)
+#define G_EOP_CNT_AL3(x) (((x) >> S_EOP_CNT_AL3) & M_EOP_CNT_AL3)
+
+#define S_SOP_CNT_MR3 12
+#define M_SOP_CNT_MR3 0xfU
+#define V_SOP_CNT_MR3(x) ((x) << S_SOP_CNT_MR3)
+#define G_SOP_CNT_MR3(x) (((x) >> S_SOP_CNT_MR3) & M_SOP_CNT_MR3)
+
+#define S_EOP_CNT_MR3 8
+#define M_EOP_CNT_MR3 0xfU
+#define V_EOP_CNT_MR3(x) ((x) << S_EOP_CNT_MR3)
+#define G_EOP_CNT_MR3(x) (((x) >> S_EOP_CNT_MR3) & M_EOP_CNT_MR3)
+
+#define S_SOP_CNT_IN3 4
+#define M_SOP_CNT_IN3 0xfU
+#define V_SOP_CNT_IN3(x) ((x) << S_SOP_CNT_IN3)
+#define G_SOP_CNT_IN3(x) (((x) >> S_SOP_CNT_IN3) & M_SOP_CNT_IN3)
+
+#define S_EOP_CNT_IN3 0
+#define M_EOP_CNT_IN3 0xfU
+#define V_EOP_CNT_IN3(x) ((x) << S_EOP_CNT_IN3)
+#define G_EOP_CNT_IN3(x) (((x) >> S_EOP_CNT_IN3) & M_EOP_CNT_IN3)
+
+#define A_ULP_RX_CHNL2_CTX_ERROR_COUNT_PER_TID 0x1929c
+#define A_ULP_RX_CHNL3_CTX_ERROR_COUNT_PER_TID 0x192a0
#define A_ULP_RX_TLS_PP_LLIMIT 0x192a4
#define S_TLSPPLLIMIT 6
@@ -37787,6 +47904,933 @@
#define G_TLSKEYULIMIT(x) (((x) >> S_TLSKEYULIMIT) & M_TLSKEYULIMIT)
#define A_ULP_RX_TLS_CTL 0x192bc
+#define A_ULP_RX_RRQ_LLIMIT 0x192c0
+#define A_ULP_RX_RRQ_ULIMIT 0x192c4
+#define A_ULP_RX_NVME_TCP_STAG_LLIMIT 0x192c8
+#define A_ULP_RX_NVME_TCP_STAG_ULIMIT 0x192cc
+#define A_ULP_RX_NVME_TCP_RQ_LLIMIT 0x192d0
+#define A_ULP_RX_NVME_TCP_RQ_ULIMIT 0x192d4
+#define A_ULP_RX_NVME_TCP_PBL_LLIMIT 0x192d8
+#define A_ULP_RX_NVME_TCP_PBL_ULIMIT 0x192dc
+#define A_ULP_RX_NVME_TCP_MAX_LENGTH 0x192e0
+
+#define S_NVME_TCP_MAX_PLEN01 24
+#define M_NVME_TCP_MAX_PLEN01 0xffU
+#define V_NVME_TCP_MAX_PLEN01(x) ((x) << S_NVME_TCP_MAX_PLEN01)
+#define G_NVME_TCP_MAX_PLEN01(x) (((x) >> S_NVME_TCP_MAX_PLEN01) & M_NVME_TCP_MAX_PLEN01)
+
+#define S_NVME_TCP_MAX_PLEN23 16
+#define M_NVME_TCP_MAX_PLEN23 0xffU
+#define V_NVME_TCP_MAX_PLEN23(x) ((x) << S_NVME_TCP_MAX_PLEN23)
+#define G_NVME_TCP_MAX_PLEN23(x) (((x) >> S_NVME_TCP_MAX_PLEN23) & M_NVME_TCP_MAX_PLEN23)
+
+#define S_NVME_TCP_MAX_CMD_PDU_LENGTH 0
+#define M_NVME_TCP_MAX_CMD_PDU_LENGTH 0xffffU
+#define V_NVME_TCP_MAX_CMD_PDU_LENGTH(x) ((x) << S_NVME_TCP_MAX_CMD_PDU_LENGTH)
+#define G_NVME_TCP_MAX_CMD_PDU_LENGTH(x) (((x) >> S_NVME_TCP_MAX_CMD_PDU_LENGTH) & M_NVME_TCP_MAX_CMD_PDU_LENGTH)
+
+#define A_ULP_RX_NVME_TCP_IQE_SIZE 0x192e4
+#define A_ULP_RX_NVME_TCP_NEW_PDU_TYPES 0x192e8
+#define A_ULP_RX_IWARP_PMOF_OPCODES_1 0x192ec
+#define A_ULP_RX_IWARP_PMOF_OPCODES_2 0x192f0
+#define A_ULP_RX_INT_ENABLE_PCMD 0x19300
+
+#define S_ENABLE_PCMD_SFIFO_3 30
+#define V_ENABLE_PCMD_SFIFO_3(x) ((x) << S_ENABLE_PCMD_SFIFO_3)
+#define F_ENABLE_PCMD_SFIFO_3 V_ENABLE_PCMD_SFIFO_3(1U)
+
+#define S_ENABLE_PCMD_FIFO_3 29
+#define V_ENABLE_PCMD_FIFO_3(x) ((x) << S_ENABLE_PCMD_FIFO_3)
+#define F_ENABLE_PCMD_FIFO_3 V_ENABLE_PCMD_FIFO_3(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_3 28
+#define V_ENABLE_PCMD_DDP_HINT_3(x) ((x) << S_ENABLE_PCMD_DDP_HINT_3)
+#define F_ENABLE_PCMD_DDP_HINT_3 V_ENABLE_PCMD_DDP_HINT_3(1U)
+
+#define S_ENABLE_PCMD_TPT_3 27
+#define V_ENABLE_PCMD_TPT_3(x) ((x) << S_ENABLE_PCMD_TPT_3)
+#define F_ENABLE_PCMD_TPT_3 V_ENABLE_PCMD_TPT_3(1U)
+
+#define S_ENABLE_PCMD_DDP_3 26
+#define V_ENABLE_PCMD_DDP_3(x) ((x) << S_ENABLE_PCMD_DDP_3)
+#define F_ENABLE_PCMD_DDP_3 V_ENABLE_PCMD_DDP_3(1U)
+
+#define S_ENABLE_PCMD_MPAR_3 25
+#define V_ENABLE_PCMD_MPAR_3(x) ((x) << S_ENABLE_PCMD_MPAR_3)
+#define F_ENABLE_PCMD_MPAR_3 V_ENABLE_PCMD_MPAR_3(1U)
+
+#define S_ENABLE_PCMD_MPAC_3 24
+#define V_ENABLE_PCMD_MPAC_3(x) ((x) << S_ENABLE_PCMD_MPAC_3)
+#define F_ENABLE_PCMD_MPAC_3 V_ENABLE_PCMD_MPAC_3(1U)
+
+#define S_ENABLE_PCMD_SFIFO_2 22
+#define V_ENABLE_PCMD_SFIFO_2(x) ((x) << S_ENABLE_PCMD_SFIFO_2)
+#define F_ENABLE_PCMD_SFIFO_2 V_ENABLE_PCMD_SFIFO_2(1U)
+
+#define S_ENABLE_PCMD_FIFO_2 21
+#define V_ENABLE_PCMD_FIFO_2(x) ((x) << S_ENABLE_PCMD_FIFO_2)
+#define F_ENABLE_PCMD_FIFO_2 V_ENABLE_PCMD_FIFO_2(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_2 20
+#define V_ENABLE_PCMD_DDP_HINT_2(x) ((x) << S_ENABLE_PCMD_DDP_HINT_2)
+#define F_ENABLE_PCMD_DDP_HINT_2 V_ENABLE_PCMD_DDP_HINT_2(1U)
+
+#define S_ENABLE_PCMD_TPT_2 19
+#define V_ENABLE_PCMD_TPT_2(x) ((x) << S_ENABLE_PCMD_TPT_2)
+#define F_ENABLE_PCMD_TPT_2 V_ENABLE_PCMD_TPT_2(1U)
+
+#define S_ENABLE_PCMD_DDP_2 18
+#define V_ENABLE_PCMD_DDP_2(x) ((x) << S_ENABLE_PCMD_DDP_2)
+#define F_ENABLE_PCMD_DDP_2 V_ENABLE_PCMD_DDP_2(1U)
+
+#define S_ENABLE_PCMD_MPAR_2 17
+#define V_ENABLE_PCMD_MPAR_2(x) ((x) << S_ENABLE_PCMD_MPAR_2)
+#define F_ENABLE_PCMD_MPAR_2 V_ENABLE_PCMD_MPAR_2(1U)
+
+#define S_ENABLE_PCMD_MPAC_2 16
+#define V_ENABLE_PCMD_MPAC_2(x) ((x) << S_ENABLE_PCMD_MPAC_2)
+#define F_ENABLE_PCMD_MPAC_2 V_ENABLE_PCMD_MPAC_2(1U)
+
+#define S_ENABLE_PCMD_SFIFO_1 14
+#define V_ENABLE_PCMD_SFIFO_1(x) ((x) << S_ENABLE_PCMD_SFIFO_1)
+#define F_ENABLE_PCMD_SFIFO_1 V_ENABLE_PCMD_SFIFO_1(1U)
+
+#define S_ENABLE_PCMD_FIFO_1 13
+#define V_ENABLE_PCMD_FIFO_1(x) ((x) << S_ENABLE_PCMD_FIFO_1)
+#define F_ENABLE_PCMD_FIFO_1 V_ENABLE_PCMD_FIFO_1(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_1 12
+#define V_ENABLE_PCMD_DDP_HINT_1(x) ((x) << S_ENABLE_PCMD_DDP_HINT_1)
+#define F_ENABLE_PCMD_DDP_HINT_1 V_ENABLE_PCMD_DDP_HINT_1(1U)
+
+#define S_ENABLE_PCMD_TPT_1 11
+#define V_ENABLE_PCMD_TPT_1(x) ((x) << S_ENABLE_PCMD_TPT_1)
+#define F_ENABLE_PCMD_TPT_1 V_ENABLE_PCMD_TPT_1(1U)
+
+#define S_ENABLE_PCMD_DDP_1 10
+#define V_ENABLE_PCMD_DDP_1(x) ((x) << S_ENABLE_PCMD_DDP_1)
+#define F_ENABLE_PCMD_DDP_1 V_ENABLE_PCMD_DDP_1(1U)
+
+#define S_ENABLE_PCMD_MPAR_1 9
+#define V_ENABLE_PCMD_MPAR_1(x) ((x) << S_ENABLE_PCMD_MPAR_1)
+#define F_ENABLE_PCMD_MPAR_1 V_ENABLE_PCMD_MPAR_1(1U)
+
+#define S_ENABLE_PCMD_MPAC_1 8
+#define V_ENABLE_PCMD_MPAC_1(x) ((x) << S_ENABLE_PCMD_MPAC_1)
+#define F_ENABLE_PCMD_MPAC_1 V_ENABLE_PCMD_MPAC_1(1U)
+
+#define S_ENABLE_PCMD_SFIFO_0 6
+#define V_ENABLE_PCMD_SFIFO_0(x) ((x) << S_ENABLE_PCMD_SFIFO_0)
+#define F_ENABLE_PCMD_SFIFO_0 V_ENABLE_PCMD_SFIFO_0(1U)
+
+#define S_ENABLE_PCMD_FIFO_0 5
+#define V_ENABLE_PCMD_FIFO_0(x) ((x) << S_ENABLE_PCMD_FIFO_0)
+#define F_ENABLE_PCMD_FIFO_0 V_ENABLE_PCMD_FIFO_0(1U)
+
+#define S_ENABLE_PCMD_DDP_HINT_0 4
+#define V_ENABLE_PCMD_DDP_HINT_0(x) ((x) << S_ENABLE_PCMD_DDP_HINT_0)
+#define F_ENABLE_PCMD_DDP_HINT_0 V_ENABLE_PCMD_DDP_HINT_0(1U)
+
+#define S_ENABLE_PCMD_TPT_0 3
+#define V_ENABLE_PCMD_TPT_0(x) ((x) << S_ENABLE_PCMD_TPT_0)
+#define F_ENABLE_PCMD_TPT_0 V_ENABLE_PCMD_TPT_0(1U)
+
+#define S_ENABLE_PCMD_DDP_0 2
+#define V_ENABLE_PCMD_DDP_0(x) ((x) << S_ENABLE_PCMD_DDP_0)
+#define F_ENABLE_PCMD_DDP_0 V_ENABLE_PCMD_DDP_0(1U)
+
+#define S_ENABLE_PCMD_MPAR_0 1
+#define V_ENABLE_PCMD_MPAR_0(x) ((x) << S_ENABLE_PCMD_MPAR_0)
+#define F_ENABLE_PCMD_MPAR_0 V_ENABLE_PCMD_MPAR_0(1U)
+
+#define S_ENABLE_PCMD_MPAC_0 0
+#define V_ENABLE_PCMD_MPAC_0(x) ((x) << S_ENABLE_PCMD_MPAC_0)
+#define F_ENABLE_PCMD_MPAC_0 V_ENABLE_PCMD_MPAC_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_PCMD 0x19304
+
+#define S_CAUSE_PCMD_SFIFO_3 30
+#define V_CAUSE_PCMD_SFIFO_3(x) ((x) << S_CAUSE_PCMD_SFIFO_3)
+#define F_CAUSE_PCMD_SFIFO_3 V_CAUSE_PCMD_SFIFO_3(1U)
+
+#define S_CAUSE_PCMD_FIFO_3 29
+#define V_CAUSE_PCMD_FIFO_3(x) ((x) << S_CAUSE_PCMD_FIFO_3)
+#define F_CAUSE_PCMD_FIFO_3 V_CAUSE_PCMD_FIFO_3(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_3 28
+#define V_CAUSE_PCMD_DDP_HINT_3(x) ((x) << S_CAUSE_PCMD_DDP_HINT_3)
+#define F_CAUSE_PCMD_DDP_HINT_3 V_CAUSE_PCMD_DDP_HINT_3(1U)
+
+#define S_CAUSE_PCMD_TPT_3 27
+#define V_CAUSE_PCMD_TPT_3(x) ((x) << S_CAUSE_PCMD_TPT_3)
+#define F_CAUSE_PCMD_TPT_3 V_CAUSE_PCMD_TPT_3(1U)
+
+#define S_CAUSE_PCMD_DDP_3 26
+#define V_CAUSE_PCMD_DDP_3(x) ((x) << S_CAUSE_PCMD_DDP_3)
+#define F_CAUSE_PCMD_DDP_3 V_CAUSE_PCMD_DDP_3(1U)
+
+#define S_CAUSE_PCMD_MPAR_3 25
+#define V_CAUSE_PCMD_MPAR_3(x) ((x) << S_CAUSE_PCMD_MPAR_3)
+#define F_CAUSE_PCMD_MPAR_3 V_CAUSE_PCMD_MPAR_3(1U)
+
+#define S_CAUSE_PCMD_MPAC_3 24
+#define V_CAUSE_PCMD_MPAC_3(x) ((x) << S_CAUSE_PCMD_MPAC_3)
+#define F_CAUSE_PCMD_MPAC_3 V_CAUSE_PCMD_MPAC_3(1U)
+
+#define S_CAUSE_PCMD_SFIFO_2 22
+#define V_CAUSE_PCMD_SFIFO_2(x) ((x) << S_CAUSE_PCMD_SFIFO_2)
+#define F_CAUSE_PCMD_SFIFO_2 V_CAUSE_PCMD_SFIFO_2(1U)
+
+#define S_CAUSE_PCMD_FIFO_2 21
+#define V_CAUSE_PCMD_FIFO_2(x) ((x) << S_CAUSE_PCMD_FIFO_2)
+#define F_CAUSE_PCMD_FIFO_2 V_CAUSE_PCMD_FIFO_2(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_2 20
+#define V_CAUSE_PCMD_DDP_HINT_2(x) ((x) << S_CAUSE_PCMD_DDP_HINT_2)
+#define F_CAUSE_PCMD_DDP_HINT_2 V_CAUSE_PCMD_DDP_HINT_2(1U)
+
+#define S_CAUSE_PCMD_TPT_2 19
+#define V_CAUSE_PCMD_TPT_2(x) ((x) << S_CAUSE_PCMD_TPT_2)
+#define F_CAUSE_PCMD_TPT_2 V_CAUSE_PCMD_TPT_2(1U)
+
+#define S_CAUSE_PCMD_DDP_2 18
+#define V_CAUSE_PCMD_DDP_2(x) ((x) << S_CAUSE_PCMD_DDP_2)
+#define F_CAUSE_PCMD_DDP_2 V_CAUSE_PCMD_DDP_2(1U)
+
+#define S_CAUSE_PCMD_MPAR_2 17
+#define V_CAUSE_PCMD_MPAR_2(x) ((x) << S_CAUSE_PCMD_MPAR_2)
+#define F_CAUSE_PCMD_MPAR_2 V_CAUSE_PCMD_MPAR_2(1U)
+
+#define S_CAUSE_PCMD_MPAC_2 16
+#define V_CAUSE_PCMD_MPAC_2(x) ((x) << S_CAUSE_PCMD_MPAC_2)
+#define F_CAUSE_PCMD_MPAC_2 V_CAUSE_PCMD_MPAC_2(1U)
+
+#define S_CAUSE_PCMD_SFIFO_1 14
+#define V_CAUSE_PCMD_SFIFO_1(x) ((x) << S_CAUSE_PCMD_SFIFO_1)
+#define F_CAUSE_PCMD_SFIFO_1 V_CAUSE_PCMD_SFIFO_1(1U)
+
+#define S_CAUSE_PCMD_FIFO_1 13
+#define V_CAUSE_PCMD_FIFO_1(x) ((x) << S_CAUSE_PCMD_FIFO_1)
+#define F_CAUSE_PCMD_FIFO_1 V_CAUSE_PCMD_FIFO_1(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_1 12
+#define V_CAUSE_PCMD_DDP_HINT_1(x) ((x) << S_CAUSE_PCMD_DDP_HINT_1)
+#define F_CAUSE_PCMD_DDP_HINT_1 V_CAUSE_PCMD_DDP_HINT_1(1U)
+
+#define S_CAUSE_PCMD_TPT_1 11
+#define V_CAUSE_PCMD_TPT_1(x) ((x) << S_CAUSE_PCMD_TPT_1)
+#define F_CAUSE_PCMD_TPT_1 V_CAUSE_PCMD_TPT_1(1U)
+
+#define S_CAUSE_PCMD_DDP_1 10
+#define V_CAUSE_PCMD_DDP_1(x) ((x) << S_CAUSE_PCMD_DDP_1)
+#define F_CAUSE_PCMD_DDP_1 V_CAUSE_PCMD_DDP_1(1U)
+
+#define S_CAUSE_PCMD_MPAR_1 9
+#define V_CAUSE_PCMD_MPAR_1(x) ((x) << S_CAUSE_PCMD_MPAR_1)
+#define F_CAUSE_PCMD_MPAR_1 V_CAUSE_PCMD_MPAR_1(1U)
+
+#define S_CAUSE_PCMD_MPAC_1 8
+#define V_CAUSE_PCMD_MPAC_1(x) ((x) << S_CAUSE_PCMD_MPAC_1)
+#define F_CAUSE_PCMD_MPAC_1 V_CAUSE_PCMD_MPAC_1(1U)
+
+#define S_CAUSE_PCMD_SFIFO_0 6
+#define V_CAUSE_PCMD_SFIFO_0(x) ((x) << S_CAUSE_PCMD_SFIFO_0)
+#define F_CAUSE_PCMD_SFIFO_0 V_CAUSE_PCMD_SFIFO_0(1U)
+
+#define S_CAUSE_PCMD_FIFO_0 5
+#define V_CAUSE_PCMD_FIFO_0(x) ((x) << S_CAUSE_PCMD_FIFO_0)
+#define F_CAUSE_PCMD_FIFO_0 V_CAUSE_PCMD_FIFO_0(1U)
+
+#define S_CAUSE_PCMD_DDP_HINT_0 4
+#define V_CAUSE_PCMD_DDP_HINT_0(x) ((x) << S_CAUSE_PCMD_DDP_HINT_0)
+#define F_CAUSE_PCMD_DDP_HINT_0 V_CAUSE_PCMD_DDP_HINT_0(1U)
+
+#define S_CAUSE_PCMD_TPT_0 3
+#define V_CAUSE_PCMD_TPT_0(x) ((x) << S_CAUSE_PCMD_TPT_0)
+#define F_CAUSE_PCMD_TPT_0 V_CAUSE_PCMD_TPT_0(1U)
+
+#define S_CAUSE_PCMD_DDP_0 2
+#define V_CAUSE_PCMD_DDP_0(x) ((x) << S_CAUSE_PCMD_DDP_0)
+#define F_CAUSE_PCMD_DDP_0 V_CAUSE_PCMD_DDP_0(1U)
+
+#define S_CAUSE_PCMD_MPAR_0 1
+#define V_CAUSE_PCMD_MPAR_0(x) ((x) << S_CAUSE_PCMD_MPAR_0)
+#define F_CAUSE_PCMD_MPAR_0 V_CAUSE_PCMD_MPAR_0(1U)
+
+#define S_CAUSE_PCMD_MPAC_0 0
+#define V_CAUSE_PCMD_MPAC_0(x) ((x) << S_CAUSE_PCMD_MPAC_0)
+#define F_CAUSE_PCMD_MPAC_0 V_CAUSE_PCMD_MPAC_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_PCMD 0x19308
+
+#define S_PERR_ENABLE_PCMD_SFIFO_3 30
+#define V_PERR_ENABLE_PCMD_SFIFO_3(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_3)
+#define F_PERR_ENABLE_PCMD_SFIFO_3 V_PERR_ENABLE_PCMD_SFIFO_3(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_3 29
+#define V_PERR_ENABLE_PCMD_FIFO_3(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_3)
+#define F_PERR_ENABLE_PCMD_FIFO_3 V_PERR_ENABLE_PCMD_FIFO_3(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_3 28
+#define V_PERR_ENABLE_PCMD_DDP_HINT_3(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_3)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_3 V_PERR_ENABLE_PCMD_DDP_HINT_3(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_3 27
+#define V_PERR_ENABLE_PCMD_TPT_3(x) ((x) << S_PERR_ENABLE_PCMD_TPT_3)
+#define F_PERR_ENABLE_PCMD_TPT_3 V_PERR_ENABLE_PCMD_TPT_3(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_3 26
+#define V_PERR_ENABLE_PCMD_DDP_3(x) ((x) << S_PERR_ENABLE_PCMD_DDP_3)
+#define F_PERR_ENABLE_PCMD_DDP_3 V_PERR_ENABLE_PCMD_DDP_3(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_3 25
+#define V_PERR_ENABLE_PCMD_MPAR_3(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_3)
+#define F_PERR_ENABLE_PCMD_MPAR_3 V_PERR_ENABLE_PCMD_MPAR_3(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_3 24
+#define V_PERR_ENABLE_PCMD_MPAC_3(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_3)
+#define F_PERR_ENABLE_PCMD_MPAC_3 V_PERR_ENABLE_PCMD_MPAC_3(1U)
+
+#define S_PERR_ENABLE_PCMD_SFIFO_2 22
+#define V_PERR_ENABLE_PCMD_SFIFO_2(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_2)
+#define F_PERR_ENABLE_PCMD_SFIFO_2 V_PERR_ENABLE_PCMD_SFIFO_2(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_2 21
+#define V_PERR_ENABLE_PCMD_FIFO_2(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_2)
+#define F_PERR_ENABLE_PCMD_FIFO_2 V_PERR_ENABLE_PCMD_FIFO_2(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_2 20
+#define V_PERR_ENABLE_PCMD_DDP_HINT_2(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_2)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_2 V_PERR_ENABLE_PCMD_DDP_HINT_2(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_2 19
+#define V_PERR_ENABLE_PCMD_TPT_2(x) ((x) << S_PERR_ENABLE_PCMD_TPT_2)
+#define F_PERR_ENABLE_PCMD_TPT_2 V_PERR_ENABLE_PCMD_TPT_2(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_2 18
+#define V_PERR_ENABLE_PCMD_DDP_2(x) ((x) << S_PERR_ENABLE_PCMD_DDP_2)
+#define F_PERR_ENABLE_PCMD_DDP_2 V_PERR_ENABLE_PCMD_DDP_2(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_2 17
+#define V_PERR_ENABLE_PCMD_MPAR_2(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_2)
+#define F_PERR_ENABLE_PCMD_MPAR_2 V_PERR_ENABLE_PCMD_MPAR_2(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_2 16
+#define V_PERR_ENABLE_PCMD_MPAC_2(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_2)
+#define F_PERR_ENABLE_PCMD_MPAC_2 V_PERR_ENABLE_PCMD_MPAC_2(1U)
+
+#define S_PERR_ENABLE_PCMD_SFIFO_1 14
+#define V_PERR_ENABLE_PCMD_SFIFO_1(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_1)
+#define F_PERR_ENABLE_PCMD_SFIFO_1 V_PERR_ENABLE_PCMD_SFIFO_1(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_1 13
+#define V_PERR_ENABLE_PCMD_FIFO_1(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_1)
+#define F_PERR_ENABLE_PCMD_FIFO_1 V_PERR_ENABLE_PCMD_FIFO_1(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_1 12
+#define V_PERR_ENABLE_PCMD_DDP_HINT_1(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_1)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_1 V_PERR_ENABLE_PCMD_DDP_HINT_1(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_1 11
+#define V_PERR_ENABLE_PCMD_TPT_1(x) ((x) << S_PERR_ENABLE_PCMD_TPT_1)
+#define F_PERR_ENABLE_PCMD_TPT_1 V_PERR_ENABLE_PCMD_TPT_1(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_1 10
+#define V_PERR_ENABLE_PCMD_DDP_1(x) ((x) << S_PERR_ENABLE_PCMD_DDP_1)
+#define F_PERR_ENABLE_PCMD_DDP_1 V_PERR_ENABLE_PCMD_DDP_1(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_1 9
+#define V_PERR_ENABLE_PCMD_MPAR_1(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_1)
+#define F_PERR_ENABLE_PCMD_MPAR_1 V_PERR_ENABLE_PCMD_MPAR_1(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_1 8
+#define V_PERR_ENABLE_PCMD_MPAC_1(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_1)
+#define F_PERR_ENABLE_PCMD_MPAC_1 V_PERR_ENABLE_PCMD_MPAC_1(1U)
+
+#define S_PERR_ENABLE_PCMD_SFIFO_0 6
+#define V_PERR_ENABLE_PCMD_SFIFO_0(x) ((x) << S_PERR_ENABLE_PCMD_SFIFO_0)
+#define F_PERR_ENABLE_PCMD_SFIFO_0 V_PERR_ENABLE_PCMD_SFIFO_0(1U)
+
+#define S_PERR_ENABLE_PCMD_FIFO_0 5
+#define V_PERR_ENABLE_PCMD_FIFO_0(x) ((x) << S_PERR_ENABLE_PCMD_FIFO_0)
+#define F_PERR_ENABLE_PCMD_FIFO_0 V_PERR_ENABLE_PCMD_FIFO_0(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_HINT_0 4
+#define V_PERR_ENABLE_PCMD_DDP_HINT_0(x) ((x) << S_PERR_ENABLE_PCMD_DDP_HINT_0)
+#define F_PERR_ENABLE_PCMD_DDP_HINT_0 V_PERR_ENABLE_PCMD_DDP_HINT_0(1U)
+
+#define S_PERR_ENABLE_PCMD_TPT_0 3
+#define V_PERR_ENABLE_PCMD_TPT_0(x) ((x) << S_PERR_ENABLE_PCMD_TPT_0)
+#define F_PERR_ENABLE_PCMD_TPT_0 V_PERR_ENABLE_PCMD_TPT_0(1U)
+
+#define S_PERR_ENABLE_PCMD_DDP_0 2
+#define V_PERR_ENABLE_PCMD_DDP_0(x) ((x) << S_PERR_ENABLE_PCMD_DDP_0)
+#define F_PERR_ENABLE_PCMD_DDP_0 V_PERR_ENABLE_PCMD_DDP_0(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAR_0 1
+#define V_PERR_ENABLE_PCMD_MPAR_0(x) ((x) << S_PERR_ENABLE_PCMD_MPAR_0)
+#define F_PERR_ENABLE_PCMD_MPAR_0 V_PERR_ENABLE_PCMD_MPAR_0(1U)
+
+#define S_PERR_ENABLE_PCMD_MPAC_0 0
+#define V_PERR_ENABLE_PCMD_MPAC_0(x) ((x) << S_PERR_ENABLE_PCMD_MPAC_0)
+#define F_PERR_ENABLE_PCMD_MPAC_0 V_PERR_ENABLE_PCMD_MPAC_0(1U)
+
+#define A_ULP_RX_INT_ENABLE_DATA 0x19310
+
+#define S_ENABLE_DATA_SNOOP_3 29
+#define V_ENABLE_DATA_SNOOP_3(x) ((x) << S_ENABLE_DATA_SNOOP_3)
+#define F_ENABLE_DATA_SNOOP_3 V_ENABLE_DATA_SNOOP_3(1U)
+
+#define S_ENABLE_DATA_SFIFO_3 28
+#define V_ENABLE_DATA_SFIFO_3(x) ((x) << S_ENABLE_DATA_SFIFO_3)
+#define F_ENABLE_DATA_SFIFO_3 V_ENABLE_DATA_SFIFO_3(1U)
+
+#define S_ENABLE_DATA_FIFO_3 27
+#define V_ENABLE_DATA_FIFO_3(x) ((x) << S_ENABLE_DATA_FIFO_3)
+#define F_ENABLE_DATA_FIFO_3 V_ENABLE_DATA_FIFO_3(1U)
+
+#define S_ENABLE_DATA_DDP_3 26
+#define V_ENABLE_DATA_DDP_3(x) ((x) << S_ENABLE_DATA_DDP_3)
+#define F_ENABLE_DATA_DDP_3 V_ENABLE_DATA_DDP_3(1U)
+
+#define S_ENABLE_DATA_CTX_3 25
+#define V_ENABLE_DATA_CTX_3(x) ((x) << S_ENABLE_DATA_CTX_3)
+#define F_ENABLE_DATA_CTX_3 V_ENABLE_DATA_CTX_3(1U)
+
+#define S_ENABLE_DATA_PARSER_3 24
+#define V_ENABLE_DATA_PARSER_3(x) ((x) << S_ENABLE_DATA_PARSER_3)
+#define F_ENABLE_DATA_PARSER_3 V_ENABLE_DATA_PARSER_3(1U)
+
+#define S_ENABLE_DATA_SNOOP_2 21
+#define V_ENABLE_DATA_SNOOP_2(x) ((x) << S_ENABLE_DATA_SNOOP_2)
+#define F_ENABLE_DATA_SNOOP_2 V_ENABLE_DATA_SNOOP_2(1U)
+
+#define S_ENABLE_DATA_SFIFO_2 20
+#define V_ENABLE_DATA_SFIFO_2(x) ((x) << S_ENABLE_DATA_SFIFO_2)
+#define F_ENABLE_DATA_SFIFO_2 V_ENABLE_DATA_SFIFO_2(1U)
+
+#define S_ENABLE_DATA_FIFO_2 19
+#define V_ENABLE_DATA_FIFO_2(x) ((x) << S_ENABLE_DATA_FIFO_2)
+#define F_ENABLE_DATA_FIFO_2 V_ENABLE_DATA_FIFO_2(1U)
+
+#define S_ENABLE_DATA_DDP_2 18
+#define V_ENABLE_DATA_DDP_2(x) ((x) << S_ENABLE_DATA_DDP_2)
+#define F_ENABLE_DATA_DDP_2 V_ENABLE_DATA_DDP_2(1U)
+
+#define S_ENABLE_DATA_CTX_2 17
+#define V_ENABLE_DATA_CTX_2(x) ((x) << S_ENABLE_DATA_CTX_2)
+#define F_ENABLE_DATA_CTX_2 V_ENABLE_DATA_CTX_2(1U)
+
+#define S_ENABLE_DATA_PARSER_2 16
+#define V_ENABLE_DATA_PARSER_2(x) ((x) << S_ENABLE_DATA_PARSER_2)
+#define F_ENABLE_DATA_PARSER_2 V_ENABLE_DATA_PARSER_2(1U)
+
+#define S_ENABLE_DATA_SNOOP_1 13
+#define V_ENABLE_DATA_SNOOP_1(x) ((x) << S_ENABLE_DATA_SNOOP_1)
+#define F_ENABLE_DATA_SNOOP_1 V_ENABLE_DATA_SNOOP_1(1U)
+
+#define S_ENABLE_DATA_SFIFO_1 12
+#define V_ENABLE_DATA_SFIFO_1(x) ((x) << S_ENABLE_DATA_SFIFO_1)
+#define F_ENABLE_DATA_SFIFO_1 V_ENABLE_DATA_SFIFO_1(1U)
+
+#define S_ENABLE_DATA_FIFO_1 11
+#define V_ENABLE_DATA_FIFO_1(x) ((x) << S_ENABLE_DATA_FIFO_1)
+#define F_ENABLE_DATA_FIFO_1 V_ENABLE_DATA_FIFO_1(1U)
+
+#define S_ENABLE_DATA_DDP_1 10
+#define V_ENABLE_DATA_DDP_1(x) ((x) << S_ENABLE_DATA_DDP_1)
+#define F_ENABLE_DATA_DDP_1 V_ENABLE_DATA_DDP_1(1U)
+
+#define S_ENABLE_DATA_CTX_1 9
+#define V_ENABLE_DATA_CTX_1(x) ((x) << S_ENABLE_DATA_CTX_1)
+#define F_ENABLE_DATA_CTX_1 V_ENABLE_DATA_CTX_1(1U)
+
+#define S_ENABLE_DATA_PARSER_1 8
+#define V_ENABLE_DATA_PARSER_1(x) ((x) << S_ENABLE_DATA_PARSER_1)
+#define F_ENABLE_DATA_PARSER_1 V_ENABLE_DATA_PARSER_1(1U)
+
+#define S_ENABLE_DATA_SNOOP_0 5
+#define V_ENABLE_DATA_SNOOP_0(x) ((x) << S_ENABLE_DATA_SNOOP_0)
+#define F_ENABLE_DATA_SNOOP_0 V_ENABLE_DATA_SNOOP_0(1U)
+
+#define S_ENABLE_DATA_SFIFO_0 4
+#define V_ENABLE_DATA_SFIFO_0(x) ((x) << S_ENABLE_DATA_SFIFO_0)
+#define F_ENABLE_DATA_SFIFO_0 V_ENABLE_DATA_SFIFO_0(1U)
+
+#define S_ENABLE_DATA_FIFO_0 3
+#define V_ENABLE_DATA_FIFO_0(x) ((x) << S_ENABLE_DATA_FIFO_0)
+#define F_ENABLE_DATA_FIFO_0 V_ENABLE_DATA_FIFO_0(1U)
+
+#define S_ENABLE_DATA_DDP_0 2
+#define V_ENABLE_DATA_DDP_0(x) ((x) << S_ENABLE_DATA_DDP_0)
+#define F_ENABLE_DATA_DDP_0 V_ENABLE_DATA_DDP_0(1U)
+
+#define S_ENABLE_DATA_CTX_0 1
+#define V_ENABLE_DATA_CTX_0(x) ((x) << S_ENABLE_DATA_CTX_0)
+#define F_ENABLE_DATA_CTX_0 V_ENABLE_DATA_CTX_0(1U)
+
+#define S_ENABLE_DATA_PARSER_0 0
+#define V_ENABLE_DATA_PARSER_0(x) ((x) << S_ENABLE_DATA_PARSER_0)
+#define F_ENABLE_DATA_PARSER_0 V_ENABLE_DATA_PARSER_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_DATA 0x19314
+
+#define S_CAUSE_DATA_SNOOP_3 29
+#define V_CAUSE_DATA_SNOOP_3(x) ((x) << S_CAUSE_DATA_SNOOP_3)
+#define F_CAUSE_DATA_SNOOP_3 V_CAUSE_DATA_SNOOP_3(1U)
+
+#define S_CAUSE_DATA_SFIFO_3 28
+#define V_CAUSE_DATA_SFIFO_3(x) ((x) << S_CAUSE_DATA_SFIFO_3)
+#define F_CAUSE_DATA_SFIFO_3 V_CAUSE_DATA_SFIFO_3(1U)
+
+#define S_CAUSE_DATA_FIFO_3 27
+#define V_CAUSE_DATA_FIFO_3(x) ((x) << S_CAUSE_DATA_FIFO_3)
+#define F_CAUSE_DATA_FIFO_3 V_CAUSE_DATA_FIFO_3(1U)
+
+#define S_CAUSE_DATA_DDP_3 26
+#define V_CAUSE_DATA_DDP_3(x) ((x) << S_CAUSE_DATA_DDP_3)
+#define F_CAUSE_DATA_DDP_3 V_CAUSE_DATA_DDP_3(1U)
+
+#define S_CAUSE_DATA_CTX_3 25
+#define V_CAUSE_DATA_CTX_3(x) ((x) << S_CAUSE_DATA_CTX_3)
+#define F_CAUSE_DATA_CTX_3 V_CAUSE_DATA_CTX_3(1U)
+
+#define S_CAUSE_DATA_PARSER_3 24
+#define V_CAUSE_DATA_PARSER_3(x) ((x) << S_CAUSE_DATA_PARSER_3)
+#define F_CAUSE_DATA_PARSER_3 V_CAUSE_DATA_PARSER_3(1U)
+
+#define S_CAUSE_DATA_SNOOP_2 21
+#define V_CAUSE_DATA_SNOOP_2(x) ((x) << S_CAUSE_DATA_SNOOP_2)
+#define F_CAUSE_DATA_SNOOP_2 V_CAUSE_DATA_SNOOP_2(1U)
+
+#define S_CAUSE_DATA_SFIFO_2 20
+#define V_CAUSE_DATA_SFIFO_2(x) ((x) << S_CAUSE_DATA_SFIFO_2)
+#define F_CAUSE_DATA_SFIFO_2 V_CAUSE_DATA_SFIFO_2(1U)
+
+#define S_CAUSE_DATA_FIFO_2 19
+#define V_CAUSE_DATA_FIFO_2(x) ((x) << S_CAUSE_DATA_FIFO_2)
+#define F_CAUSE_DATA_FIFO_2 V_CAUSE_DATA_FIFO_2(1U)
+
+#define S_CAUSE_DATA_DDP_2 18
+#define V_CAUSE_DATA_DDP_2(x) ((x) << S_CAUSE_DATA_DDP_2)
+#define F_CAUSE_DATA_DDP_2 V_CAUSE_DATA_DDP_2(1U)
+
+#define S_CAUSE_DATA_CTX_2 17
+#define V_CAUSE_DATA_CTX_2(x) ((x) << S_CAUSE_DATA_CTX_2)
+#define F_CAUSE_DATA_CTX_2 V_CAUSE_DATA_CTX_2(1U)
+
+#define S_CAUSE_DATA_PARSER_2 16
+#define V_CAUSE_DATA_PARSER_2(x) ((x) << S_CAUSE_DATA_PARSER_2)
+#define F_CAUSE_DATA_PARSER_2 V_CAUSE_DATA_PARSER_2(1U)
+
+#define S_CAUSE_DATA_SNOOP_1 13
+#define V_CAUSE_DATA_SNOOP_1(x) ((x) << S_CAUSE_DATA_SNOOP_1)
+#define F_CAUSE_DATA_SNOOP_1 V_CAUSE_DATA_SNOOP_1(1U)
+
+#define S_CAUSE_DATA_SFIFO_1 12
+#define V_CAUSE_DATA_SFIFO_1(x) ((x) << S_CAUSE_DATA_SFIFO_1)
+#define F_CAUSE_DATA_SFIFO_1 V_CAUSE_DATA_SFIFO_1(1U)
+
+#define S_CAUSE_DATA_FIFO_1 11
+#define V_CAUSE_DATA_FIFO_1(x) ((x) << S_CAUSE_DATA_FIFO_1)
+#define F_CAUSE_DATA_FIFO_1 V_CAUSE_DATA_FIFO_1(1U)
+
+#define S_CAUSE_DATA_DDP_1 10
+#define V_CAUSE_DATA_DDP_1(x) ((x) << S_CAUSE_DATA_DDP_1)
+#define F_CAUSE_DATA_DDP_1 V_CAUSE_DATA_DDP_1(1U)
+
+#define S_CAUSE_DATA_CTX_1 9
+#define V_CAUSE_DATA_CTX_1(x) ((x) << S_CAUSE_DATA_CTX_1)
+#define F_CAUSE_DATA_CTX_1 V_CAUSE_DATA_CTX_1(1U)
+
+#define S_CAUSE_DATA_PARSER_1 8
+#define V_CAUSE_DATA_PARSER_1(x) ((x) << S_CAUSE_DATA_PARSER_1)
+#define F_CAUSE_DATA_PARSER_1 V_CAUSE_DATA_PARSER_1(1U)
+
+#define S_CAUSE_DATA_SNOOP_0 5
+#define V_CAUSE_DATA_SNOOP_0(x) ((x) << S_CAUSE_DATA_SNOOP_0)
+#define F_CAUSE_DATA_SNOOP_0 V_CAUSE_DATA_SNOOP_0(1U)
+
+#define S_CAUSE_DATA_SFIFO_0 4
+#define V_CAUSE_DATA_SFIFO_0(x) ((x) << S_CAUSE_DATA_SFIFO_0)
+#define F_CAUSE_DATA_SFIFO_0 V_CAUSE_DATA_SFIFO_0(1U)
+
+#define S_CAUSE_DATA_FIFO_0 3
+#define V_CAUSE_DATA_FIFO_0(x) ((x) << S_CAUSE_DATA_FIFO_0)
+#define F_CAUSE_DATA_FIFO_0 V_CAUSE_DATA_FIFO_0(1U)
+
+#define S_CAUSE_DATA_DDP_0 2
+#define V_CAUSE_DATA_DDP_0(x) ((x) << S_CAUSE_DATA_DDP_0)
+#define F_CAUSE_DATA_DDP_0 V_CAUSE_DATA_DDP_0(1U)
+
+#define S_CAUSE_DATA_CTX_0 1
+#define V_CAUSE_DATA_CTX_0(x) ((x) << S_CAUSE_DATA_CTX_0)
+#define F_CAUSE_DATA_CTX_0 V_CAUSE_DATA_CTX_0(1U)
+
+#define S_CAUSE_DATA_PARSER_0 0
+#define V_CAUSE_DATA_PARSER_0(x) ((x) << S_CAUSE_DATA_PARSER_0)
+#define F_CAUSE_DATA_PARSER_0 V_CAUSE_DATA_PARSER_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_DATA 0x19318
+
+#define S_PERR_ENABLE_DATA_SNOOP_3 29
+#define V_PERR_ENABLE_DATA_SNOOP_3(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_3)
+#define F_PERR_ENABLE_DATA_SNOOP_3 V_PERR_ENABLE_DATA_SNOOP_3(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_3 28
+#define V_PERR_ENABLE_DATA_SFIFO_3(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_3)
+#define F_PERR_ENABLE_DATA_SFIFO_3 V_PERR_ENABLE_DATA_SFIFO_3(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_3 27
+#define V_PERR_ENABLE_DATA_FIFO_3(x) ((x) << S_PERR_ENABLE_DATA_FIFO_3)
+#define F_PERR_ENABLE_DATA_FIFO_3 V_PERR_ENABLE_DATA_FIFO_3(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_3 26
+#define V_PERR_ENABLE_DATA_DDP_3(x) ((x) << S_PERR_ENABLE_DATA_DDP_3)
+#define F_PERR_ENABLE_DATA_DDP_3 V_PERR_ENABLE_DATA_DDP_3(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_3 25
+#define V_PERR_ENABLE_DATA_CTX_3(x) ((x) << S_PERR_ENABLE_DATA_CTX_3)
+#define F_PERR_ENABLE_DATA_CTX_3 V_PERR_ENABLE_DATA_CTX_3(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_3 24
+#define V_PERR_ENABLE_DATA_PARSER_3(x) ((x) << S_PERR_ENABLE_DATA_PARSER_3)
+#define F_PERR_ENABLE_DATA_PARSER_3 V_PERR_ENABLE_DATA_PARSER_3(1U)
+
+#define S_PERR_ENABLE_DATA_SNOOP_2 21
+#define V_PERR_ENABLE_DATA_SNOOP_2(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_2)
+#define F_PERR_ENABLE_DATA_SNOOP_2 V_PERR_ENABLE_DATA_SNOOP_2(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_2 20
+#define V_PERR_ENABLE_DATA_SFIFO_2(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_2)
+#define F_PERR_ENABLE_DATA_SFIFO_2 V_PERR_ENABLE_DATA_SFIFO_2(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_2 19
+#define V_PERR_ENABLE_DATA_FIFO_2(x) ((x) << S_PERR_ENABLE_DATA_FIFO_2)
+#define F_PERR_ENABLE_DATA_FIFO_2 V_PERR_ENABLE_DATA_FIFO_2(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_2 18
+#define V_PERR_ENABLE_DATA_DDP_2(x) ((x) << S_PERR_ENABLE_DATA_DDP_2)
+#define F_PERR_ENABLE_DATA_DDP_2 V_PERR_ENABLE_DATA_DDP_2(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_2 17
+#define V_PERR_ENABLE_DATA_CTX_2(x) ((x) << S_PERR_ENABLE_DATA_CTX_2)
+#define F_PERR_ENABLE_DATA_CTX_2 V_PERR_ENABLE_DATA_CTX_2(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_2 16
+#define V_PERR_ENABLE_DATA_PARSER_2(x) ((x) << S_PERR_ENABLE_DATA_PARSER_2)
+#define F_PERR_ENABLE_DATA_PARSER_2 V_PERR_ENABLE_DATA_PARSER_2(1U)
+
+#define S_PERR_ENABLE_DATA_SNOOP_1 13
+#define V_PERR_ENABLE_DATA_SNOOP_1(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_1)
+#define F_PERR_ENABLE_DATA_SNOOP_1 V_PERR_ENABLE_DATA_SNOOP_1(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_1 12
+#define V_PERR_ENABLE_DATA_SFIFO_1(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_1)
+#define F_PERR_ENABLE_DATA_SFIFO_1 V_PERR_ENABLE_DATA_SFIFO_1(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_1 11
+#define V_PERR_ENABLE_DATA_FIFO_1(x) ((x) << S_PERR_ENABLE_DATA_FIFO_1)
+#define F_PERR_ENABLE_DATA_FIFO_1 V_PERR_ENABLE_DATA_FIFO_1(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_1 10
+#define V_PERR_ENABLE_DATA_DDP_1(x) ((x) << S_PERR_ENABLE_DATA_DDP_1)
+#define F_PERR_ENABLE_DATA_DDP_1 V_PERR_ENABLE_DATA_DDP_1(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_1 9
+#define V_PERR_ENABLE_DATA_CTX_1(x) ((x) << S_PERR_ENABLE_DATA_CTX_1)
+#define F_PERR_ENABLE_DATA_CTX_1 V_PERR_ENABLE_DATA_CTX_1(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_1 8
+#define V_PERR_ENABLE_DATA_PARSER_1(x) ((x) << S_PERR_ENABLE_DATA_PARSER_1)
+#define F_PERR_ENABLE_DATA_PARSER_1 V_PERR_ENABLE_DATA_PARSER_1(1U)
+
+#define S_PERR_ENABLE_DATA_SNOOP_0 5
+#define V_PERR_ENABLE_DATA_SNOOP_0(x) ((x) << S_PERR_ENABLE_DATA_SNOOP_0)
+#define F_PERR_ENABLE_DATA_SNOOP_0 V_PERR_ENABLE_DATA_SNOOP_0(1U)
+
+#define S_PERR_ENABLE_DATA_SFIFO_0 4
+#define V_PERR_ENABLE_DATA_SFIFO_0(x) ((x) << S_PERR_ENABLE_DATA_SFIFO_0)
+#define F_PERR_ENABLE_DATA_SFIFO_0 V_PERR_ENABLE_DATA_SFIFO_0(1U)
+
+#define S_PERR_ENABLE_DATA_FIFO_0 3
+#define V_PERR_ENABLE_DATA_FIFO_0(x) ((x) << S_PERR_ENABLE_DATA_FIFO_0)
+#define F_PERR_ENABLE_DATA_FIFO_0 V_PERR_ENABLE_DATA_FIFO_0(1U)
+
+#define S_PERR_ENABLE_DATA_DDP_0 2
+#define V_PERR_ENABLE_DATA_DDP_0(x) ((x) << S_PERR_ENABLE_DATA_DDP_0)
+#define F_PERR_ENABLE_DATA_DDP_0 V_PERR_ENABLE_DATA_DDP_0(1U)
+
+#define S_PERR_ENABLE_DATA_CTX_0 1
+#define V_PERR_ENABLE_DATA_CTX_0(x) ((x) << S_PERR_ENABLE_DATA_CTX_0)
+#define F_PERR_ENABLE_DATA_CTX_0 V_PERR_ENABLE_DATA_CTX_0(1U)
+
+#define S_PERR_ENABLE_DATA_PARSER_0 0
+#define V_PERR_ENABLE_DATA_PARSER_0(x) ((x) << S_PERR_ENABLE_DATA_PARSER_0)
+#define F_PERR_ENABLE_DATA_PARSER_0 V_PERR_ENABLE_DATA_PARSER_0(1U)
+
+#define A_ULP_RX_INT_ENABLE_ARB 0x19320
+
+#define S_ENABLE_ARB_PBL_PF_3 27
+#define V_ENABLE_ARB_PBL_PF_3(x) ((x) << S_ENABLE_ARB_PBL_PF_3)
+#define F_ENABLE_ARB_PBL_PF_3 V_ENABLE_ARB_PBL_PF_3(1U)
+
+#define S_ENABLE_ARB_PF_3 26
+#define V_ENABLE_ARB_PF_3(x) ((x) << S_ENABLE_ARB_PF_3)
+#define F_ENABLE_ARB_PF_3 V_ENABLE_ARB_PF_3(1U)
+
+#define S_ENABLE_ARB_TPT_PF_3 25
+#define V_ENABLE_ARB_TPT_PF_3(x) ((x) << S_ENABLE_ARB_TPT_PF_3)
+#define F_ENABLE_ARB_TPT_PF_3 V_ENABLE_ARB_TPT_PF_3(1U)
+
+#define S_ENABLE_ARB_F_3 24
+#define V_ENABLE_ARB_F_3(x) ((x) << S_ENABLE_ARB_F_3)
+#define F_ENABLE_ARB_F_3 V_ENABLE_ARB_F_3(1U)
+
+#define S_ENABLE_ARB_PBL_PF_2 19
+#define V_ENABLE_ARB_PBL_PF_2(x) ((x) << S_ENABLE_ARB_PBL_PF_2)
+#define F_ENABLE_ARB_PBL_PF_2 V_ENABLE_ARB_PBL_PF_2(1U)
+
+#define S_ENABLE_ARB_PF_2 18
+#define V_ENABLE_ARB_PF_2(x) ((x) << S_ENABLE_ARB_PF_2)
+#define F_ENABLE_ARB_PF_2 V_ENABLE_ARB_PF_2(1U)
+
+#define S_ENABLE_ARB_TPT_PF_2 17
+#define V_ENABLE_ARB_TPT_PF_2(x) ((x) << S_ENABLE_ARB_TPT_PF_2)
+#define F_ENABLE_ARB_TPT_PF_2 V_ENABLE_ARB_TPT_PF_2(1U)
+
+#define S_ENABLE_ARB_F_2 16
+#define V_ENABLE_ARB_F_2(x) ((x) << S_ENABLE_ARB_F_2)
+#define F_ENABLE_ARB_F_2 V_ENABLE_ARB_F_2(1U)
+
+#define S_ENABLE_ARB_PBL_PF_1 11
+#define V_ENABLE_ARB_PBL_PF_1(x) ((x) << S_ENABLE_ARB_PBL_PF_1)
+#define F_ENABLE_ARB_PBL_PF_1 V_ENABLE_ARB_PBL_PF_1(1U)
+
+#define S_ENABLE_ARB_PF_1 10
+#define V_ENABLE_ARB_PF_1(x) ((x) << S_ENABLE_ARB_PF_1)
+#define F_ENABLE_ARB_PF_1 V_ENABLE_ARB_PF_1(1U)
+
+#define S_ENABLE_ARB_TPT_PF_1 9
+#define V_ENABLE_ARB_TPT_PF_1(x) ((x) << S_ENABLE_ARB_TPT_PF_1)
+#define F_ENABLE_ARB_TPT_PF_1 V_ENABLE_ARB_TPT_PF_1(1U)
+
+#define S_ENABLE_ARB_F_1 8
+#define V_ENABLE_ARB_F_1(x) ((x) << S_ENABLE_ARB_F_1)
+#define F_ENABLE_ARB_F_1 V_ENABLE_ARB_F_1(1U)
+
+#define S_ENABLE_ARB_PBL_PF_0 3
+#define V_ENABLE_ARB_PBL_PF_0(x) ((x) << S_ENABLE_ARB_PBL_PF_0)
+#define F_ENABLE_ARB_PBL_PF_0 V_ENABLE_ARB_PBL_PF_0(1U)
+
+#define S_ENABLE_ARB_PF_0 2
+#define V_ENABLE_ARB_PF_0(x) ((x) << S_ENABLE_ARB_PF_0)
+#define F_ENABLE_ARB_PF_0 V_ENABLE_ARB_PF_0(1U)
+
+#define S_ENABLE_ARB_TPT_PF_0 1
+#define V_ENABLE_ARB_TPT_PF_0(x) ((x) << S_ENABLE_ARB_TPT_PF_0)
+#define F_ENABLE_ARB_TPT_PF_0 V_ENABLE_ARB_TPT_PF_0(1U)
+
+#define S_ENABLE_ARB_F_0 0
+#define V_ENABLE_ARB_F_0(x) ((x) << S_ENABLE_ARB_F_0)
+#define F_ENABLE_ARB_F_0 V_ENABLE_ARB_F_0(1U)
+
+#define A_ULP_RX_INT_CAUSE_ARB 0x19324
+
+#define S_CAUSE_ARB_PBL_PF_3 27
+#define V_CAUSE_ARB_PBL_PF_3(x) ((x) << S_CAUSE_ARB_PBL_PF_3)
+#define F_CAUSE_ARB_PBL_PF_3 V_CAUSE_ARB_PBL_PF_3(1U)
+
+#define S_CAUSE_ARB_PF_3 26
+#define V_CAUSE_ARB_PF_3(x) ((x) << S_CAUSE_ARB_PF_3)
+#define F_CAUSE_ARB_PF_3 V_CAUSE_ARB_PF_3(1U)
+
+#define S_CAUSE_ARB_TPT_PF_3 25
+#define V_CAUSE_ARB_TPT_PF_3(x) ((x) << S_CAUSE_ARB_TPT_PF_3)
+#define F_CAUSE_ARB_TPT_PF_3 V_CAUSE_ARB_TPT_PF_3(1U)
+
+#define S_CAUSE_ARB_F_3 24
+#define V_CAUSE_ARB_F_3(x) ((x) << S_CAUSE_ARB_F_3)
+#define F_CAUSE_ARB_F_3 V_CAUSE_ARB_F_3(1U)
+
+#define S_CAUSE_ARB_PBL_PF_2 19
+#define V_CAUSE_ARB_PBL_PF_2(x) ((x) << S_CAUSE_ARB_PBL_PF_2)
+#define F_CAUSE_ARB_PBL_PF_2 V_CAUSE_ARB_PBL_PF_2(1U)
+
+#define S_CAUSE_ARB_PF_2 18
+#define V_CAUSE_ARB_PF_2(x) ((x) << S_CAUSE_ARB_PF_2)
+#define F_CAUSE_ARB_PF_2 V_CAUSE_ARB_PF_2(1U)
+
+#define S_CAUSE_ARB_TPT_PF_2 17
+#define V_CAUSE_ARB_TPT_PF_2(x) ((x) << S_CAUSE_ARB_TPT_PF_2)
+#define F_CAUSE_ARB_TPT_PF_2 V_CAUSE_ARB_TPT_PF_2(1U)
+
+#define S_CAUSE_ARB_F_2 16
+#define V_CAUSE_ARB_F_2(x) ((x) << S_CAUSE_ARB_F_2)
+#define F_CAUSE_ARB_F_2 V_CAUSE_ARB_F_2(1U)
+
+#define S_CAUSE_ARB_PBL_PF_1 11
+#define V_CAUSE_ARB_PBL_PF_1(x) ((x) << S_CAUSE_ARB_PBL_PF_1)
+#define F_CAUSE_ARB_PBL_PF_1 V_CAUSE_ARB_PBL_PF_1(1U)
+
+#define S_CAUSE_ARB_PF_1 10
+#define V_CAUSE_ARB_PF_1(x) ((x) << S_CAUSE_ARB_PF_1)
+#define F_CAUSE_ARB_PF_1 V_CAUSE_ARB_PF_1(1U)
+
+#define S_CAUSE_ARB_TPT_PF_1 9
+#define V_CAUSE_ARB_TPT_PF_1(x) ((x) << S_CAUSE_ARB_TPT_PF_1)
+#define F_CAUSE_ARB_TPT_PF_1 V_CAUSE_ARB_TPT_PF_1(1U)
+
+#define S_CAUSE_ARB_F_1 8
+#define V_CAUSE_ARB_F_1(x) ((x) << S_CAUSE_ARB_F_1)
+#define F_CAUSE_ARB_F_1 V_CAUSE_ARB_F_1(1U)
+
+#define S_CAUSE_ARB_PBL_PF_0 3
+#define V_CAUSE_ARB_PBL_PF_0(x) ((x) << S_CAUSE_ARB_PBL_PF_0)
+#define F_CAUSE_ARB_PBL_PF_0 V_CAUSE_ARB_PBL_PF_0(1U)
+
+#define S_CAUSE_ARB_PF_0 2
+#define V_CAUSE_ARB_PF_0(x) ((x) << S_CAUSE_ARB_PF_0)
+#define F_CAUSE_ARB_PF_0 V_CAUSE_ARB_PF_0(1U)
+
+#define S_CAUSE_ARB_TPT_PF_0 1
+#define V_CAUSE_ARB_TPT_PF_0(x) ((x) << S_CAUSE_ARB_TPT_PF_0)
+#define F_CAUSE_ARB_TPT_PF_0 V_CAUSE_ARB_TPT_PF_0(1U)
+
+#define S_CAUSE_ARB_F_0 0
+#define V_CAUSE_ARB_F_0(x) ((x) << S_CAUSE_ARB_F_0)
+#define F_CAUSE_ARB_F_0 V_CAUSE_ARB_F_0(1U)
+
+#define A_ULP_RX_PERR_ENABLE_ARB 0x19328
+
+#define S_PERR_ENABLE_ARB_PBL_PF_3 27
+#define V_PERR_ENABLE_ARB_PBL_PF_3(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_3)
+#define F_PERR_ENABLE_ARB_PBL_PF_3 V_PERR_ENABLE_ARB_PBL_PF_3(1U)
+
+#define S_PERR_ENABLE_ARB_PF_3 26
+#define V_PERR_ENABLE_ARB_PF_3(x) ((x) << S_PERR_ENABLE_ARB_PF_3)
+#define F_PERR_ENABLE_ARB_PF_3 V_PERR_ENABLE_ARB_PF_3(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_3 25
+#define V_PERR_ENABLE_ARB_TPT_PF_3(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_3)
+#define F_PERR_ENABLE_ARB_TPT_PF_3 V_PERR_ENABLE_ARB_TPT_PF_3(1U)
+
+#define S_PERR_ENABLE_ARB_F_3 24
+#define V_PERR_ENABLE_ARB_F_3(x) ((x) << S_PERR_ENABLE_ARB_F_3)
+#define F_PERR_ENABLE_ARB_F_3 V_PERR_ENABLE_ARB_F_3(1U)
+
+#define S_PERR_ENABLE_ARB_PBL_PF_2 19
+#define V_PERR_ENABLE_ARB_PBL_PF_2(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_2)
+#define F_PERR_ENABLE_ARB_PBL_PF_2 V_PERR_ENABLE_ARB_PBL_PF_2(1U)
+
+#define S_PERR_ENABLE_ARB_PF_2 18
+#define V_PERR_ENABLE_ARB_PF_2(x) ((x) << S_PERR_ENABLE_ARB_PF_2)
+#define F_PERR_ENABLE_ARB_PF_2 V_PERR_ENABLE_ARB_PF_2(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_2 17
+#define V_PERR_ENABLE_ARB_TPT_PF_2(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_2)
+#define F_PERR_ENABLE_ARB_TPT_PF_2 V_PERR_ENABLE_ARB_TPT_PF_2(1U)
+
+#define S_PERR_ENABLE_ARB_F_2 16
+#define V_PERR_ENABLE_ARB_F_2(x) ((x) << S_PERR_ENABLE_ARB_F_2)
+#define F_PERR_ENABLE_ARB_F_2 V_PERR_ENABLE_ARB_F_2(1U)
+
+#define S_PERR_ENABLE_ARB_PBL_PF_1 11
+#define V_PERR_ENABLE_ARB_PBL_PF_1(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_1)
+#define F_PERR_ENABLE_ARB_PBL_PF_1 V_PERR_ENABLE_ARB_PBL_PF_1(1U)
+
+#define S_PERR_ENABLE_ARB_PF_1 10
+#define V_PERR_ENABLE_ARB_PF_1(x) ((x) << S_PERR_ENABLE_ARB_PF_1)
+#define F_PERR_ENABLE_ARB_PF_1 V_PERR_ENABLE_ARB_PF_1(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_1 9
+#define V_PERR_ENABLE_ARB_TPT_PF_1(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_1)
+#define F_PERR_ENABLE_ARB_TPT_PF_1 V_PERR_ENABLE_ARB_TPT_PF_1(1U)
+
+#define S_PERR_ENABLE_ARB_F_1 8
+#define V_PERR_ENABLE_ARB_F_1(x) ((x) << S_PERR_ENABLE_ARB_F_1)
+#define F_PERR_ENABLE_ARB_F_1 V_PERR_ENABLE_ARB_F_1(1U)
+
+#define S_PERR_ENABLE_ARB_PBL_PF_0 3
+#define V_PERR_ENABLE_ARB_PBL_PF_0(x) ((x) << S_PERR_ENABLE_ARB_PBL_PF_0)
+#define F_PERR_ENABLE_ARB_PBL_PF_0 V_PERR_ENABLE_ARB_PBL_PF_0(1U)
+
+#define S_PERR_ENABLE_ARB_PF_0 2
+#define V_PERR_ENABLE_ARB_PF_0(x) ((x) << S_PERR_ENABLE_ARB_PF_0)
+#define F_PERR_ENABLE_ARB_PF_0 V_PERR_ENABLE_ARB_PF_0(1U)
+
+#define S_PERR_ENABLE_ARB_TPT_PF_0 1
+#define V_PERR_ENABLE_ARB_TPT_PF_0(x) ((x) << S_PERR_ENABLE_ARB_TPT_PF_0)
+#define F_PERR_ENABLE_ARB_TPT_PF_0 V_PERR_ENABLE_ARB_TPT_PF_0(1U)
+
+#define S_PERR_ENABLE_ARB_F_0 0
+#define V_PERR_ENABLE_ARB_F_0(x) ((x) << S_PERR_ENABLE_ARB_F_0)
+#define F_PERR_ENABLE_ARB_F_0 V_PERR_ENABLE_ARB_F_0(1U)
+
+#define A_ULP_RX_CTL1 0x19330
+
+#define S_ISCSI_CTL2 27
+#define V_ISCSI_CTL2(x) ((x) << S_ISCSI_CTL2)
+#define F_ISCSI_CTL2 V_ISCSI_CTL2(1U)
+
+#define S_ISCSI_CTL1 26
+#define V_ISCSI_CTL1(x) ((x) << S_ISCSI_CTL1)
+#define F_ISCSI_CTL1 V_ISCSI_CTL1(1U)
+
+#define S_ISCSI_CTL0 25
+#define V_ISCSI_CTL0(x) ((x) << S_ISCSI_CTL0)
+#define F_ISCSI_CTL0 V_ISCSI_CTL0(1U)
+
+#define S_NVME_TCP_DATA_ALIGNMENT 16
+#define M_NVME_TCP_DATA_ALIGNMENT 0x1ffU
+#define V_NVME_TCP_DATA_ALIGNMENT(x) ((x) << S_NVME_TCP_DATA_ALIGNMENT)
+#define G_NVME_TCP_DATA_ALIGNMENT(x) (((x) >> S_NVME_TCP_DATA_ALIGNMENT) & M_NVME_TCP_DATA_ALIGNMENT)
+
+#define S_NVME_TCP_INVLD_MSG_DIS 14
+#define M_NVME_TCP_INVLD_MSG_DIS 0x3U
+#define V_NVME_TCP_INVLD_MSG_DIS(x) ((x) << S_NVME_TCP_INVLD_MSG_DIS)
+#define G_NVME_TCP_INVLD_MSG_DIS(x) (((x) >> S_NVME_TCP_INVLD_MSG_DIS) & M_NVME_TCP_INVLD_MSG_DIS)
+
+#define S_NVME_TCP_DDP_PDU_CHK_TYPE 13
+#define V_NVME_TCP_DDP_PDU_CHK_TYPE(x) ((x) << S_NVME_TCP_DDP_PDU_CHK_TYPE)
+#define F_NVME_TCP_DDP_PDU_CHK_TYPE V_NVME_TCP_DDP_PDU_CHK_TYPE(1U)
+
+#define S_T10_CONFIG_ENB 12
+#define V_T10_CONFIG_ENB(x) ((x) << S_T10_CONFIG_ENB)
+#define F_T10_CONFIG_ENB V_T10_CONFIG_ENB(1U)
+
+#define S_NVME_TCP_COLOUR_ENB 10
+#define M_NVME_TCP_COLOUR_ENB 0x3U
+#define V_NVME_TCP_COLOUR_ENB(x) ((x) << S_NVME_TCP_COLOUR_ENB)
+#define G_NVME_TCP_COLOUR_ENB(x) (((x) >> S_NVME_TCP_COLOUR_ENB) & M_NVME_TCP_COLOUR_ENB)
+
+#define S_ROCE_SEND_RQE 8
+#define V_ROCE_SEND_RQE(x) ((x) << S_ROCE_SEND_RQE)
+#define F_ROCE_SEND_RQE V_ROCE_SEND_RQE(1U)
+
+#define S_RDMA_INVLD_MSG_DIS 6
+#define M_RDMA_INVLD_MSG_DIS 0x3U
+#define V_RDMA_INVLD_MSG_DIS(x) ((x) << S_RDMA_INVLD_MSG_DIS)
+#define G_RDMA_INVLD_MSG_DIS(x) (((x) >> S_RDMA_INVLD_MSG_DIS) & M_RDMA_INVLD_MSG_DIS)
+
+#define S_ROCE_INVLD_MSG_DIS 4
+#define M_ROCE_INVLD_MSG_DIS 0x3U
+#define V_ROCE_INVLD_MSG_DIS(x) ((x) << S_ROCE_INVLD_MSG_DIS)
+#define G_ROCE_INVLD_MSG_DIS(x) (((x) >> S_ROCE_INVLD_MSG_DIS) & M_ROCE_INVLD_MSG_DIS)
+
+#define S_T7_MEM_ADDR_CTRL 2
+#define M_T7_MEM_ADDR_CTRL 0x3U
+#define V_T7_MEM_ADDR_CTRL(x) ((x) << S_T7_MEM_ADDR_CTRL)
+#define G_T7_MEM_ADDR_CTRL(x) (((x) >> S_T7_MEM_ADDR_CTRL) & M_T7_MEM_ADDR_CTRL)
+
+#define S_ENB_32K_PDU 1
+#define V_ENB_32K_PDU(x) ((x) << S_ENB_32K_PDU)
+#define F_ENB_32K_PDU V_ENB_32K_PDU(1U)
+
+#define S_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS 0
+#define V_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS(x) ((x) << S_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS)
+#define F_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS V_C2H_SUCCESS_WO_LAST_PDU_CHK_DIS(1U)
+
#define A_ULP_RX_TLS_IND_CMD 0x19348
#define S_TLS_RX_REG_OFF_ADDR 0
@@ -37795,6 +48839,8 @@
#define G_TLS_RX_REG_OFF_ADDR(x) (((x) >> S_TLS_RX_REG_OFF_ADDR) & M_TLS_RX_REG_OFF_ADDR)
#define A_ULP_RX_TLS_IND_DATA 0x1934c
+#define A_ULP_RX_TLS_CH0_HMACCTRL_CFG 0x20
+#define A_ULP_RX_TLS_CH1_HMACCTRL_CFG 0x60
/* registers for module SF */
#define SF_BASE_ADDR 0x193f8
@@ -37815,6 +48861,39 @@
#define V_BYTECNT(x) ((x) << S_BYTECNT)
#define G_BYTECNT(x) (((x) >> S_BYTECNT) & M_BYTECNT)
+#define S_EN32BADDR 30
+#define V_EN32BADDR(x) ((x) << S_EN32BADDR)
+#define F_EN32BADDR V_EN32BADDR(1U)
+
+#define S_NUM_OF_BYTES 1
+#define M_NUM_OF_BYTES 0x3U
+#define V_NUM_OF_BYTES(x) ((x) << S_NUM_OF_BYTES)
+#define G_NUM_OF_BYTES(x) (((x) >> S_NUM_OF_BYTES) & M_NUM_OF_BYTES)
+
+#define S_QUADREADDISABLE 5
+#define V_QUADREADDISABLE(x) ((x) << S_QUADREADDISABLE)
+#define F_QUADREADDISABLE V_QUADREADDISABLE(1U)
+
+#define S_EXIT4B 6
+#define V_EXIT4B(x) ((x) << S_EXIT4B)
+#define F_EXIT4B V_EXIT4B(1U)
+
+#define S_ENTER4B 7
+#define V_ENTER4B(x) ((x) << S_ENTER4B)
+#define F_ENTER4B V_ENTER4B(1U)
+
+#define S_QUADWRENABLE 8
+#define V_QUADWRENABLE(x) ((x) << S_QUADWRENABLE)
+#define F_QUADWRENABLE V_QUADWRENABLE(1U)
+
+#define S_REGDBG_SEL 9
+#define V_REGDBG_SEL(x) ((x) << S_REGDBG_SEL)
+#define F_REGDBG_SEL V_REGDBG_SEL(1U)
+
+#define S_REGDBG_MODE 10
+#define V_REGDBG_MODE(x) ((x) << S_REGDBG_MODE)
+#define F_REGDBG_MODE V_REGDBG_MODE(1U)
+
/* registers for module PL */
#define PL_BASE_ADDR 0x19400
@@ -37892,21 +48971,6 @@
#define F_SWINT V_SWINT(1U)
#define A_PL_WHOAMI 0x19400
-
-#define S_T6_SOURCEPF 9
-#define M_T6_SOURCEPF 0x7U
-#define V_T6_SOURCEPF(x) ((x) << S_T6_SOURCEPF)
-#define G_T6_SOURCEPF(x) (((x) >> S_T6_SOURCEPF) & M_T6_SOURCEPF)
-
-#define S_T6_ISVF 8
-#define V_T6_ISVF(x) ((x) << S_T6_ISVF)
-#define F_T6_ISVF V_T6_ISVF(1U)
-
-#define S_T6_VFID 0
-#define M_T6_VFID 0xffU
-#define V_T6_VFID(x) ((x) << S_T6_VFID)
-#define G_T6_VFID(x) (((x) >> S_T6_VFID) & M_T6_VFID)
-
#define A_PL_PERR_CAUSE 0x19404
#define S_UART 28
@@ -38037,6 +49101,134 @@
#define V_ANYMAC(x) ((x) << S_ANYMAC)
#define F_ANYMAC V_ANYMAC(1U)
+#define S_T7_PL_PERR_CRYPTO_KEY 31
+#define V_T7_PL_PERR_CRYPTO_KEY(x) ((x) << S_T7_PL_PERR_CRYPTO_KEY)
+#define F_T7_PL_PERR_CRYPTO_KEY V_T7_PL_PERR_CRYPTO_KEY(1U)
+
+#define S_T7_PL_PERR_CRYPTO1 30
+#define V_T7_PL_PERR_CRYPTO1(x) ((x) << S_T7_PL_PERR_CRYPTO1)
+#define F_T7_PL_PERR_CRYPTO1 V_T7_PL_PERR_CRYPTO1(1U)
+
+#define S_T7_PL_PERR_CRYPTO0 29
+#define V_T7_PL_PERR_CRYPTO0(x) ((x) << S_T7_PL_PERR_CRYPTO0)
+#define F_T7_PL_PERR_CRYPTO0 V_T7_PL_PERR_CRYPTO0(1U)
+
+#define S_T7_PL_PERR_GCACHE 28
+#define V_T7_PL_PERR_GCACHE(x) ((x) << S_T7_PL_PERR_GCACHE)
+#define F_T7_PL_PERR_GCACHE V_T7_PL_PERR_GCACHE(1U)
+
+#define S_T7_PL_PERR_ARM 27
+#define V_T7_PL_PERR_ARM(x) ((x) << S_T7_PL_PERR_ARM)
+#define F_T7_PL_PERR_ARM V_T7_PL_PERR_ARM(1U)
+
+#define S_T7_PL_PERR_ULP_TX 26
+#define V_T7_PL_PERR_ULP_TX(x) ((x) << S_T7_PL_PERR_ULP_TX)
+#define F_T7_PL_PERR_ULP_TX V_T7_PL_PERR_ULP_TX(1U)
+
+#define S_T7_PL_PERR_SGE 25
+#define V_T7_PL_PERR_SGE(x) ((x) << S_T7_PL_PERR_SGE)
+#define F_T7_PL_PERR_SGE V_T7_PL_PERR_SGE(1U)
+
+#define S_T7_PL_PERR_HMA 24
+#define V_T7_PL_PERR_HMA(x) ((x) << S_T7_PL_PERR_HMA)
+#define F_T7_PL_PERR_HMA V_T7_PL_PERR_HMA(1U)
+
+#define S_T7_PL_PERR_CPL_SWITCH 23
+#define V_T7_PL_PERR_CPL_SWITCH(x) ((x) << S_T7_PL_PERR_CPL_SWITCH)
+#define F_T7_PL_PERR_CPL_SWITCH V_T7_PL_PERR_CPL_SWITCH(1U)
+
+#define S_T7_PL_PERR_ULP_RX 22
+#define V_T7_PL_PERR_ULP_RX(x) ((x) << S_T7_PL_PERR_ULP_RX)
+#define F_T7_PL_PERR_ULP_RX V_T7_PL_PERR_ULP_RX(1U)
+
+#define S_T7_PL_PERR_PM_RX 21
+#define V_T7_PL_PERR_PM_RX(x) ((x) << S_T7_PL_PERR_PM_RX)
+#define F_T7_PL_PERR_PM_RX V_T7_PL_PERR_PM_RX(1U)
+
+#define S_T7_PL_PERR_PM_TX 20
+#define V_T7_PL_PERR_PM_TX(x) ((x) << S_T7_PL_PERR_PM_TX)
+#define F_T7_PL_PERR_PM_TX V_T7_PL_PERR_PM_TX(1U)
+
+#define S_T7_PL_PERR_MA 19
+#define V_T7_PL_PERR_MA(x) ((x) << S_T7_PL_PERR_MA)
+#define F_T7_PL_PERR_MA V_T7_PL_PERR_MA(1U)
+
+#define S_T7_PL_PERR_TP 18
+#define V_T7_PL_PERR_TP(x) ((x) << S_T7_PL_PERR_TP)
+#define F_T7_PL_PERR_TP V_T7_PL_PERR_TP(1U)
+
+#define S_T7_PL_PERR_LE 17
+#define V_T7_PL_PERR_LE(x) ((x) << S_T7_PL_PERR_LE)
+#define F_T7_PL_PERR_LE V_T7_PL_PERR_LE(1U)
+
+#define S_T7_PL_PERR_EDC1 16
+#define V_T7_PL_PERR_EDC1(x) ((x) << S_T7_PL_PERR_EDC1)
+#define F_T7_PL_PERR_EDC1 V_T7_PL_PERR_EDC1(1U)
+
+#define S_T7_PL_PERR_EDC0 15
+#define V_T7_PL_PERR_EDC0(x) ((x) << S_T7_PL_PERR_EDC0)
+#define F_T7_PL_PERR_EDC0 V_T7_PL_PERR_EDC0(1U)
+
+#define S_T7_PL_PERR_MC1 14
+#define V_T7_PL_PERR_MC1(x) ((x) << S_T7_PL_PERR_MC1)
+#define F_T7_PL_PERR_MC1 V_T7_PL_PERR_MC1(1U)
+
+#define S_T7_PL_PERR_MC0 13
+#define V_T7_PL_PERR_MC0(x) ((x) << S_T7_PL_PERR_MC0)
+#define F_T7_PL_PERR_MC0 V_T7_PL_PERR_MC0(1U)
+
+#define S_T7_PL_PERR_PCIE 12
+#define V_T7_PL_PERR_PCIE(x) ((x) << S_T7_PL_PERR_PCIE)
+#define F_T7_PL_PERR_PCIE V_T7_PL_PERR_PCIE(1U)
+
+#define S_T7_PL_PERR_UART 11
+#define V_T7_PL_PERR_UART(x) ((x) << S_T7_PL_PERR_UART)
+#define F_T7_PL_PERR_UART V_T7_PL_PERR_UART(1U)
+
+#define S_T7_PL_PERR_PMU 10
+#define V_T7_PL_PERR_PMU(x) ((x) << S_T7_PL_PERR_PMU)
+#define F_T7_PL_PERR_PMU V_T7_PL_PERR_PMU(1U)
+
+#define S_T7_PL_PERR_MAC 9
+#define V_T7_PL_PERR_MAC(x) ((x) << S_T7_PL_PERR_MAC)
+#define F_T7_PL_PERR_MAC V_T7_PL_PERR_MAC(1U)
+
+#define S_T7_PL_PERR_SMB 8
+#define V_T7_PL_PERR_SMB(x) ((x) << S_T7_PL_PERR_SMB)
+#define F_T7_PL_PERR_SMB V_T7_PL_PERR_SMB(1U)
+
+#define S_T7_PL_PERR_SF 7
+#define V_T7_PL_PERR_SF(x) ((x) << S_T7_PL_PERR_SF)
+#define F_T7_PL_PERR_SF V_T7_PL_PERR_SF(1U)
+
+#define S_T7_PL_PERR_PL 6
+#define V_T7_PL_PERR_PL(x) ((x) << S_T7_PL_PERR_PL)
+#define F_T7_PL_PERR_PL V_T7_PL_PERR_PL(1U)
+
+#define S_T7_PL_PERR_NCSI 5
+#define V_T7_PL_PERR_NCSI(x) ((x) << S_T7_PL_PERR_NCSI)
+#define F_T7_PL_PERR_NCSI V_T7_PL_PERR_NCSI(1U)
+
+#define S_T7_PL_PERR_MPS 4
+#define V_T7_PL_PERR_MPS(x) ((x) << S_T7_PL_PERR_MPS)
+#define F_T7_PL_PERR_MPS V_T7_PL_PERR_MPS(1U)
+
+#define S_T7_PL_PERR_MI 3
+#define V_T7_PL_PERR_MI(x) ((x) << S_T7_PL_PERR_MI)
+#define F_T7_PL_PERR_MI V_T7_PL_PERR_MI(1U)
+
+#define S_T7_PL_PERR_DBG 2
+#define V_T7_PL_PERR_DBG(x) ((x) << S_T7_PL_PERR_DBG)
+#define F_T7_PL_PERR_DBG V_T7_PL_PERR_DBG(1U)
+
+#define S_T7_PL_PERR_I2CM 1
+#define V_T7_PL_PERR_I2CM(x) ((x) << S_T7_PL_PERR_I2CM)
+#define F_T7_PL_PERR_I2CM V_T7_PL_PERR_I2CM(1U)
+
+#define S_T7_PL_PERR_CIM 0
+#define V_T7_PL_PERR_CIM(x) ((x) << S_T7_PL_PERR_CIM)
+#define F_T7_PL_PERR_CIM V_T7_PL_PERR_CIM(1U)
+
#define A_PL_PERR_ENABLE 0x19408
#define A_PL_INT_CAUSE 0x1940c
@@ -38064,6 +49256,78 @@
#define V_MAC0(x) ((x) << S_MAC0)
#define F_MAC0 V_MAC0(1U)
+#define S_T7_FLR 31
+#define V_T7_FLR(x) ((x) << S_T7_FLR)
+#define F_T7_FLR V_T7_FLR(1U)
+
+#define S_T7_SW_CIM 30
+#define V_T7_SW_CIM(x) ((x) << S_T7_SW_CIM)
+#define F_T7_SW_CIM V_T7_SW_CIM(1U)
+
+#define S_T7_ULP_TX 29
+#define V_T7_ULP_TX(x) ((x) << S_T7_ULP_TX)
+#define F_T7_ULP_TX V_T7_ULP_TX(1U)
+
+#define S_T7_SGE 28
+#define V_T7_SGE(x) ((x) << S_T7_SGE)
+#define F_T7_SGE V_T7_SGE(1U)
+
+#define S_T7_HMA 27
+#define V_T7_HMA(x) ((x) << S_T7_HMA)
+#define F_T7_HMA V_T7_HMA(1U)
+
+#define S_T7_CPL_SWITCH 26
+#define V_T7_CPL_SWITCH(x) ((x) << S_T7_CPL_SWITCH)
+#define F_T7_CPL_SWITCH V_T7_CPL_SWITCH(1U)
+
+#define S_T7_ULP_RX 25
+#define V_T7_ULP_RX(x) ((x) << S_T7_ULP_RX)
+#define F_T7_ULP_RX V_T7_ULP_RX(1U)
+
+#define S_T7_PM_RX 24
+#define V_T7_PM_RX(x) ((x) << S_T7_PM_RX)
+#define F_T7_PM_RX V_T7_PM_RX(1U)
+
+#define S_T7_PM_TX 23
+#define V_T7_PM_TX(x) ((x) << S_T7_PM_TX)
+#define F_T7_PM_TX V_T7_PM_TX(1U)
+
+#define S_T7_MA 22
+#define V_T7_MA(x) ((x) << S_T7_MA)
+#define F_T7_MA V_T7_MA(1U)
+
+#define S_T7_TP 21
+#define V_T7_TP(x) ((x) << S_T7_TP)
+#define F_T7_TP V_T7_TP(1U)
+
+#define S_T7_LE 20
+#define V_T7_LE(x) ((x) << S_T7_LE)
+#define F_T7_LE V_T7_LE(1U)
+
+#define S_T7_EDC1 19
+#define V_T7_EDC1(x) ((x) << S_T7_EDC1)
+#define F_T7_EDC1 V_T7_EDC1(1U)
+
+#define S_T7_EDC0 18
+#define V_T7_EDC0(x) ((x) << S_T7_EDC0)
+#define F_T7_EDC0 V_T7_EDC0(1U)
+
+#define S_T7_MC1 17
+#define V_T7_MC1(x) ((x) << S_T7_MC1)
+#define F_T7_MC1 V_T7_MC1(1U)
+
+#define S_T7_MC0 16
+#define V_T7_MC0(x) ((x) << S_T7_MC0)
+#define F_T7_MC0 V_T7_MC0(1U)
+
+#define S_T7_PCIE 15
+#define V_T7_PCIE(x) ((x) << S_T7_PCIE)
+#define F_T7_PCIE V_T7_PCIE(1U)
+
+#define S_T7_UART 14
+#define V_T7_UART(x) ((x) << S_T7_UART)
+#define F_T7_UART V_T7_UART(1U)
+
#define A_PL_INT_ENABLE 0x19410
#define A_PL_INT_MAP0 0x19414
@@ -38262,15 +49526,10 @@
#define V_T6_LN0_AECMD(x) ((x) << S_T6_LN0_AECMD)
#define G_T6_LN0_AECMD(x) (((x) >> S_T6_LN0_AECMD) & M_T6_LN0_AECMD)
-#define S_T6_STATECFGINITF 16
-#define M_T6_STATECFGINITF 0xffU
-#define V_T6_STATECFGINITF(x) ((x) << S_T6_STATECFGINITF)
-#define G_T6_STATECFGINITF(x) (((x) >> S_T6_STATECFGINITF) & M_T6_STATECFGINITF)
-
-#define S_T6_STATECFGINIT 12
-#define M_T6_STATECFGINIT 0xfU
-#define V_T6_STATECFGINIT(x) ((x) << S_T6_STATECFGINIT)
-#define G_T6_STATECFGINIT(x) (((x) >> S_T6_STATECFGINIT) & M_T6_STATECFGINIT)
+#define S_T6_1_STATECFGINITF 16
+#define M_T6_1_STATECFGINITF 0xffU
+#define V_T6_1_STATECFGINITF(x) ((x) << S_T6_1_STATECFGINITF)
+#define G_T6_1_STATECFGINITF(x) (((x) >> S_T6_1_STATECFGINITF) & M_T6_1_STATECFGINITF)
#define S_PHY_STATUS 10
#define V_PHY_STATUS(x) ((x) << S_PHY_STATUS)
@@ -38285,9 +49544,9 @@
#define V_PERSTTIMEOUT_PL(x) ((x) << S_PERSTTIMEOUT_PL)
#define F_PERSTTIMEOUT_PL V_PERSTTIMEOUT_PL(1U)
-#define S_T6_LTSSMENABLE 6
-#define V_T6_LTSSMENABLE(x) ((x) << S_T6_LTSSMENABLE)
-#define F_T6_LTSSMENABLE V_T6_LTSSMENABLE(1U)
+#define S_SPEEDMS 30
+#define V_SPEEDMS(x) ((x) << S_SPEEDMS)
+#define F_SPEEDMS V_SPEEDMS(1U)
#define A_PL_PCIE_CTL_STAT 0x19444
@@ -38382,6 +49641,37 @@
#define V_MAP0(x) ((x) << S_MAP0)
#define G_MAP0(x) (((x) >> S_MAP0) & M_MAP0)
+#define A_PL_INT_CAUSE2 0x19478
+
+#define S_CRYPTO_KEY 4
+#define V_CRYPTO_KEY(x) ((x) << S_CRYPTO_KEY)
+#define F_CRYPTO_KEY V_CRYPTO_KEY(1U)
+
+#define S_CRYPTO1 3
+#define V_CRYPTO1(x) ((x) << S_CRYPTO1)
+#define F_CRYPTO1 V_CRYPTO1(1U)
+
+#define S_CRYPTO0 2
+#define V_CRYPTO0(x) ((x) << S_CRYPTO0)
+#define F_CRYPTO0 V_CRYPTO0(1U)
+
+#define S_GCACHE 1
+#define V_GCACHE(x) ((x) << S_GCACHE)
+#define F_GCACHE V_GCACHE(1U)
+
+#define S_ARM 0
+#define V_ARM(x) ((x) << S_ARM)
+#define F_ARM V_ARM(1U)
+
+#define A_PL_INT_ENABLE2 0x1947c
+#define A_PL_ER_CMD 0x19488
+
+#define S_ER_ADDR 2
+#define M_ER_ADDR 0x3fffffffU
+#define V_ER_ADDR(x) ((x) << S_ER_ADDR)
+#define G_ER_ADDR(x) (((x) >> S_ER_ADDR) & M_ER_ADDR)
+
+#define A_PL_ER_DATA 0x1948c
#define A_PL_VF_SLICE_L 0x19490
#define S_LIMITADDR 16
@@ -38638,6 +49928,10 @@
#define V_REGION_EN(x) ((x) << S_REGION_EN)
#define G_REGION_EN(x) (((x) >> S_REGION_EN) & M_REGION_EN)
+#define S_CACHEBYPASS 28
+#define V_CACHEBYPASS(x) ((x) << S_CACHEBYPASS)
+#define F_CACHEBYPASS V_CACHEBYPASS(1U)
+
#define A_LE_MISC 0x19c08
#define S_CMPUNVAIL 0
@@ -38830,6 +50124,10 @@
#define V_TCAM_SIZE(x) ((x) << S_TCAM_SIZE)
#define G_TCAM_SIZE(x) (((x) >> S_TCAM_SIZE) & M_TCAM_SIZE)
+#define S_MLL_MASK 2
+#define V_MLL_MASK(x) ((x) << S_MLL_MASK)
+#define F_MLL_MASK V_MLL_MASK(1U)
+
#define A_LE_DB_INT_ENABLE 0x19c38
#define S_MSGSEL 27
@@ -39045,40 +50343,15 @@
#define V_PIPELINEERR(x) ((x) << S_PIPELINEERR)
#define F_PIPELINEERR V_PIPELINEERR(1U)
-#define A_LE_DB_INT_CAUSE 0x19c3c
-
-#define S_T6_ACTRGNFULL 21
-#define V_T6_ACTRGNFULL(x) ((x) << S_T6_ACTRGNFULL)
-#define F_T6_ACTRGNFULL V_T6_ACTRGNFULL(1U)
+#define S_CACHEINTPERR 31
+#define V_CACHEINTPERR(x) ((x) << S_CACHEINTPERR)
+#define F_CACHEINTPERR V_CACHEINTPERR(1U)
-#define S_T6_ACTCNTIPV6TZERO 20
-#define V_T6_ACTCNTIPV6TZERO(x) ((x) << S_T6_ACTCNTIPV6TZERO)
-#define F_T6_ACTCNTIPV6TZERO V_T6_ACTCNTIPV6TZERO(1U)
-
-#define S_T6_ACTCNTIPV4TZERO 19
-#define V_T6_ACTCNTIPV4TZERO(x) ((x) << S_T6_ACTCNTIPV4TZERO)
-#define F_T6_ACTCNTIPV4TZERO V_T6_ACTCNTIPV4TZERO(1U)
-
-#define S_T6_ACTCNTIPV6ZERO 18
-#define V_T6_ACTCNTIPV6ZERO(x) ((x) << S_T6_ACTCNTIPV6ZERO)
-#define F_T6_ACTCNTIPV6ZERO V_T6_ACTCNTIPV6ZERO(1U)
-
-#define S_T6_ACTCNTIPV4ZERO 17
-#define V_T6_ACTCNTIPV4ZERO(x) ((x) << S_T6_ACTCNTIPV4ZERO)
-#define F_T6_ACTCNTIPV4ZERO V_T6_ACTCNTIPV4ZERO(1U)
-
-#define S_T6_UNKNOWNCMD 3
-#define V_T6_UNKNOWNCMD(x) ((x) << S_T6_UNKNOWNCMD)
-#define F_T6_UNKNOWNCMD V_T6_UNKNOWNCMD(1U)
-
-#define S_T6_LIP0 2
-#define V_T6_LIP0(x) ((x) << S_T6_LIP0)
-#define F_T6_LIP0 V_T6_LIP0(1U)
-
-#define S_T6_LIPMISS 1
-#define V_T6_LIPMISS(x) ((x) << S_T6_LIPMISS)
-#define F_T6_LIPMISS V_T6_LIPMISS(1U)
+#define S_CACHESRAMPERR 30
+#define V_CACHESRAMPERR(x) ((x) << S_CACHESRAMPERR)
+#define F_CACHESRAMPERR V_CACHESRAMPERR(1U)
+#define A_LE_DB_INT_CAUSE 0x19c3c
#define A_LE_DB_INT_TID 0x19c40
#define S_INTTID 0
@@ -39287,6 +50560,14 @@
#define A_LE_DB_MASK_IPV6 0x19ca0
#define A_LE_DB_DBG_MATCH_DATA 0x19ca0
+#define A_LE_CMM_CONFIG 0x19cc0
+#define A_LE_CACHE_DBG 0x19cc4
+#define A_LE_CACHE_WR_ALL_CNT 0x19cc8
+#define A_LE_CACHE_WR_HIT_CNT 0x19ccc
+#define A_LE_CACHE_RD_ALL_CNT 0x19cd0
+#define A_LE_CACHE_RD_HIT_CNT 0x19cd4
+#define A_LE_CACHE_MC_WR_CNT 0x19cd8
+#define A_LE_CACHE_MC_RD_CNT 0x19cdc
#define A_LE_DB_REQ_RSP_CNT 0x19ce4
#define S_T4_RSPCNT 16
@@ -39309,6 +50590,14 @@
#define V_REQCNTLE(x) ((x) << S_REQCNTLE)
#define G_REQCNTLE(x) (((x) >> S_REQCNTLE) & M_REQCNTLE)
+#define A_LE_IND_ADDR 0x19ce8
+
+#define S_T7_1_ADDR 0
+#define M_T7_1_ADDR 0xffU
+#define V_T7_1_ADDR(x) ((x) << S_T7_1_ADDR)
+#define G_T7_1_ADDR(x) (((x) >> S_T7_1_ADDR) & M_T7_1_ADDR)
+
+#define A_LE_IND_DATA 0x19cec
#define A_LE_DB_DBGI_CONFIG 0x19cf0
#define S_DBGICMDPERR 31
@@ -39436,6 +50725,11 @@
#define V_T6_HASHTBLMEMCRCERR(x) ((x) << S_T6_HASHTBLMEMCRCERR)
#define F_T6_HASHTBLMEMCRCERR V_T6_HASHTBLMEMCRCERR(1U)
+#define S_T7_BKCHKPERIOD 22
+#define M_T7_BKCHKPERIOD 0xffU
+#define V_T7_BKCHKPERIOD(x) ((x) << S_T7_BKCHKPERIOD)
+#define G_T7_BKCHKPERIOD(x) (((x) >> S_T7_BKCHKPERIOD) & M_T7_BKCHKPERIOD)
+
#define A_LE_SPARE 0x19cfc
#define A_LE_DB_DBGI_REQ_DATA 0x19d00
#define A_LE_DB_DBGI_REQ_MASK 0x19d50
@@ -39551,6 +50845,7 @@
#define V_HASH_TID_BASE(x) ((x) << S_HASH_TID_BASE)
#define G_HASH_TID_BASE(x) (((x) >> S_HASH_TID_BASE) & M_HASH_TID_BASE)
+#define A_T7_LE_DB_HASH_TID_BASE 0x19df8
#define A_LE_PERR_INJECT 0x19dfc
#define S_LEMEMSEL 1
@@ -39573,6 +50868,7 @@
#define A_LE_HASH_MASK_GEN_IPV6 0x19eb0
#define A_LE_HASH_MASK_GEN_IPV6T5 0x19eb4
#define A_T6_LE_HASH_MASK_GEN_IPV6T5 0x19ec4
+#define A_T7_LE_HASH_MASK_GEN_IPV6T5 0x19ec4
#define A_LE_HASH_MASK_CMP_IPV4 0x19ee0
#define A_LE_HASH_MASK_CMP_IPV4T5 0x19ee4
#define A_LE_DB_PSV_FILTER_MASK_TUP_IPV4 0x19ee4
@@ -39677,6 +50973,9 @@
#define A_LE_TCAM_DEBUG_LA_DATA 0x19f4c
#define A_LE_DB_SECOND_GEN_HASH_MASK_IPV4 0x19f90
#define A_LE_DB_SECOND_CMP_HASH_MASK_IPV4 0x19fa4
+#define A_LE_TCAM_BIST_CTRL 0x19fb0
+#define A_LE_TCAM_BIST_CB_PASS 0x19fb4
+#define A_LE_TCAM_BIST_CB_BUSY 0x19fbc
#define A_LE_HASH_COLLISION 0x19fc4
#define A_LE_GLOBAL_COLLISION 0x19fc8
#define A_LE_FULL_CNT_COLLISION 0x19fcc
@@ -39686,6 +50985,38 @@
#define A_LE_RSP_DEBUG_LA_DATAT5 0x19fdc
#define A_LE_RSP_DEBUG_LA_WRPTRT5 0x19fe0
#define A_LE_DEBUG_LA_SEL_DATA 0x19fe4
+#define A_LE_TCAM_NEG_CTRL0 0x0
+#define A_LE_TCAM_NEG_CTRL1 0x1
+#define A_LE_TCAM_NEG_CTRL2 0x2
+#define A_LE_TCAM_NEG_CTRL3 0x3
+#define A_LE_TCAM_NEG_CTRL4 0x4
+#define A_LE_TCAM_NEG_CTRL5 0x5
+#define A_LE_TCAM_NEG_CTRL6 0x6
+#define A_LE_TCAM_NEG_CTRL7 0x7
+#define A_LE_TCAM_NEG_CTRL8 0x8
+#define A_LE_TCAM_NEG_CTRL9 0x9
+#define A_LE_TCAM_NEG_CTRL10 0xa
+#define A_LE_TCAM_NEG_CTRL11 0xb
+#define A_LE_TCAM_NEG_CTRL12 0xc
+#define A_LE_TCAM_NEG_CTRL13 0xd
+#define A_LE_TCAM_NEG_CTRL14 0xe
+#define A_LE_TCAM_NEG_CTRL15 0xf
+#define A_LE_TCAM_NEG_CTRL16 0x10
+#define A_LE_TCAM_NEG_CTRL17 0x11
+#define A_LE_TCAM_NEG_CTRL18 0x12
+#define A_LE_TCAM_NEG_CTRL19 0x13
+#define A_LE_TCAM_NEG_CTRL20 0x14
+#define A_LE_TCAM_NEG_CTRL21 0x15
+#define A_LE_TCAM_NEG_CTRL22 0x16
+#define A_LE_TCAM_NEG_CTRL23 0x17
+#define A_LE_TCAM_NEG_CTRL24 0x18
+#define A_LE_TCAM_NEG_CTRL25 0x19
+#define A_LE_TCAM_NEG_CTRL26 0x1a
+#define A_LE_TCAM_NEG_CTRL27 0x1b
+#define A_LE_TCAM_NEG_CTRL28 0x1c
+#define A_LE_TCAM_NEG_CTRL29 0x1d
+#define A_LE_TCAM_NEG_CTRL30 0x1e
+#define A_LE_TCAM_NEG_CTRL31 0x1f
/* registers for module NCSI */
#define NCSI_BASE_ADDR 0x1a000
@@ -39735,6 +51066,10 @@
#define V_TX_BYTE_SWAP(x) ((x) << S_TX_BYTE_SWAP)
#define F_TX_BYTE_SWAP V_TX_BYTE_SWAP(1U)
+#define S_XGMAC0_EN 0
+#define V_XGMAC0_EN(x) ((x) << S_XGMAC0_EN)
+#define F_XGMAC0_EN V_XGMAC0_EN(1U)
+
#define A_NCSI_RST_CTRL 0x1a004
#define S_MAC_REF_RST 2
@@ -39991,6 +51326,10 @@
#define V_RXFIFO_PRTY_ERR(x) ((x) << S_RXFIFO_PRTY_ERR)
#define F_RXFIFO_PRTY_ERR V_RXFIFO_PRTY_ERR(1U)
+#define S_CIM2NC_PERR 9
+#define V_CIM2NC_PERR(x) ((x) << S_CIM2NC_PERR)
+#define F_CIM2NC_PERR V_CIM2NC_PERR(1U)
+
#define A_NCSI_INT_CAUSE 0x1a0d8
#define A_NCSI_STATUS 0x1a0dc
@@ -40048,6 +51387,12 @@
#define F_MCSIMELSEL V_MCSIMELSEL(1U)
#define A_NCSI_PERR_ENABLE 0x1a0f8
+#define A_NCSI_MODE_SEL 0x1a0fc
+
+#define S_XGMAC_MODE 0
+#define V_XGMAC_MODE(x) ((x) << S_XGMAC_MODE)
+#define F_XGMAC_MODE V_XGMAC_MODE(1U)
+
#define A_NCSI_MACB_NETWORK_CTRL 0x1a100
#define S_TXSNDZEROPAUSE 12
@@ -40550,6 +51895,832 @@
#define V_DESREV(x) ((x) << S_DESREV)
#define G_DESREV(x) (((x) >> S_DESREV) & M_DESREV)
+#define A_NCSI_TX_CTRL 0x1a200
+
+#define S_T7_TXEN 0
+#define V_T7_TXEN(x) ((x) << S_T7_TXEN)
+#define F_T7_TXEN V_T7_TXEN(1U)
+
+#define A_NCSI_TX_CFG 0x1a204
+#define A_NCSI_TX_PAUSE_QUANTA 0x1a208
+#define A_NCSI_RX_CTRL 0x1a20c
+#define A_NCSI_RX_CFG 0x1a210
+#define A_NCSI_RX_HASH_LOW 0x1a214
+#define A_NCSI_RX_HASH_HIGH 0x1a218
+#define A_NCSI_RX_EXACT_MATCH_LOW_1 0x1a21c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_1 0x1a220
+#define A_NCSI_RX_EXACT_MATCH_LOW_2 0x1a224
+#define A_NCSI_RX_EXACT_MATCH_HIGH_2 0x1a228
+#define A_NCSI_RX_EXACT_MATCH_LOW_3 0x1a22c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_3 0x1a230
+#define A_NCSI_RX_EXACT_MATCH_LOW_4 0x1a234
+#define A_NCSI_RX_EXACT_MATCH_HIGH_4 0x1a238
+#define A_NCSI_RX_EXACT_MATCH_LOW_5 0x1a23c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_5 0x1a240
+#define A_NCSI_RX_EXACT_MATCH_LOW_6 0x1a244
+#define A_NCSI_RX_EXACT_MATCH_HIGH_6 0x1a248
+#define A_NCSI_RX_EXACT_MATCH_LOW_7 0x1a24c
+#define A_NCSI_RX_EXACT_MATCH_HIGH_7 0x1a250
+#define A_NCSI_RX_EXACT_MATCH_LOW_8 0x1a254
+#define A_NCSI_RX_EXACT_MATCH_HIGH_8 0x1a258
+#define A_NCSI_RX_TYPE_MATCH_1 0x1a25c
+#define A_NCSI_RX_TYPE_MATCH_2 0x1a260
+#define A_NCSI_RX_TYPE_MATCH_3 0x1a264
+#define A_NCSI_RX_TYPE_MATCH_4 0x1a268
+#define A_NCSI_INT_STATUS 0x1a26c
+#define A_NCSI_XGM_INT_MASK 0x1a270
+#define A_NCSI_XGM_INT_ENABLE 0x1a274
+#define A_NCSI_XGM_INT_DISABLE 0x1a278
+#define A_NCSI_TX_PAUSE_TIMER 0x1a27c
+#define A_NCSI_STAT_CTRL 0x1a280
+#define A_NCSI_RXFIFO_CFG 0x1a284
+
+#define S_RXFIFO_EMPTY 31
+#define V_RXFIFO_EMPTY(x) ((x) << S_RXFIFO_EMPTY)
+#define F_RXFIFO_EMPTY V_RXFIFO_EMPTY(1U)
+
+#define S_RXFIFO_FULL 30
+#define V_RXFIFO_FULL(x) ((x) << S_RXFIFO_FULL)
+#define F_RXFIFO_FULL V_RXFIFO_FULL(1U)
+
+#define S_RXFIFOPAUSEHWM 17
+#define M_RXFIFOPAUSEHWM 0xfffU
+#define V_RXFIFOPAUSEHWM(x) ((x) << S_RXFIFOPAUSEHWM)
+#define G_RXFIFOPAUSEHWM(x) (((x) >> S_RXFIFOPAUSEHWM) & M_RXFIFOPAUSEHWM)
+
+#define S_RXFIFOPAUSELWM 5
+#define M_RXFIFOPAUSELWM 0xfffU
+#define V_RXFIFOPAUSELWM(x) ((x) << S_RXFIFOPAUSELWM)
+#define G_RXFIFOPAUSELWM(x) (((x) >> S_RXFIFOPAUSELWM) & M_RXFIFOPAUSELWM)
+
+#define S_FORCEDPAUSE 4
+#define V_FORCEDPAUSE(x) ((x) << S_FORCEDPAUSE)
+#define F_FORCEDPAUSE V_FORCEDPAUSE(1U)
+
+#define S_EXTERNLOOPBACK 3
+#define V_EXTERNLOOPBACK(x) ((x) << S_EXTERNLOOPBACK)
+#define F_EXTERNLOOPBACK V_EXTERNLOOPBACK(1U)
+
+#define S_RXBYTESWAP 2
+#define V_RXBYTESWAP(x) ((x) << S_RXBYTESWAP)
+#define F_RXBYTESWAP V_RXBYTESWAP(1U)
+
+#define S_RXSTRFRWRD 1
+#define V_RXSTRFRWRD(x) ((x) << S_RXSTRFRWRD)
+#define F_RXSTRFRWRD V_RXSTRFRWRD(1U)
+
+#define S_DISERRFRAMES 0
+#define V_DISERRFRAMES(x) ((x) << S_DISERRFRAMES)
+#define F_DISERRFRAMES V_DISERRFRAMES(1U)
+
+#define A_NCSI_TXFIFO_CFG 0x1a288
+
+#define S_T7_TXFIFO_EMPTY 31
+#define V_T7_TXFIFO_EMPTY(x) ((x) << S_T7_TXFIFO_EMPTY)
+#define F_T7_TXFIFO_EMPTY V_T7_TXFIFO_EMPTY(1U)
+
+#define S_T7_TXFIFO_FULL 30
+#define V_T7_TXFIFO_FULL(x) ((x) << S_T7_TXFIFO_FULL)
+#define F_T7_TXFIFO_FULL V_T7_TXFIFO_FULL(1U)
+
+#define S_UNDERUNFIX 22
+#define V_UNDERUNFIX(x) ((x) << S_UNDERUNFIX)
+#define F_UNDERUNFIX V_UNDERUNFIX(1U)
+
+#define S_ENDROPPKT 21
+#define V_ENDROPPKT(x) ((x) << S_ENDROPPKT)
+#define F_ENDROPPKT V_ENDROPPKT(1U)
+
+#define S_TXIPG 13
+#define M_TXIPG 0xffU
+#define V_TXIPG(x) ((x) << S_TXIPG)
+#define G_TXIPG(x) (((x) >> S_TXIPG) & M_TXIPG)
+
+#define S_TXFIFOTHRESH 4
+#define M_TXFIFOTHRESH 0x1ffU
+#define V_TXFIFOTHRESH(x) ((x) << S_TXFIFOTHRESH)
+#define G_TXFIFOTHRESH(x) (((x) >> S_TXFIFOTHRESH) & M_TXFIFOTHRESH)
+
+#define S_INTERNLOOPBACK 3
+#define V_INTERNLOOPBACK(x) ((x) << S_INTERNLOOPBACK)
+#define F_INTERNLOOPBACK V_INTERNLOOPBACK(1U)
+
+#define S_TXBYTESWAP 2
+#define V_TXBYTESWAP(x) ((x) << S_TXBYTESWAP)
+#define F_TXBYTESWAP V_TXBYTESWAP(1U)
+
+#define S_DISCRC 1
+#define V_DISCRC(x) ((x) << S_DISCRC)
+#define F_DISCRC V_DISCRC(1U)
+
+#define S_DISPREAMBLE 0
+#define V_DISPREAMBLE(x) ((x) << S_DISPREAMBLE)
+#define F_DISPREAMBLE V_DISPREAMBLE(1U)
+
+#define A_NCSI_SLOW_TIMER 0x1a28c
+
+#define S_PAUSESLOWTIMEREN 31
+#define V_PAUSESLOWTIMEREN(x) ((x) << S_PAUSESLOWTIMEREN)
+#define F_PAUSESLOWTIMEREN V_PAUSESLOWTIMEREN(1U)
+
+#define S_PAUSESLOWTIMER 0
+#define M_PAUSESLOWTIMER 0xfffffU
+#define V_PAUSESLOWTIMER(x) ((x) << S_PAUSESLOWTIMER)
+#define G_PAUSESLOWTIMER(x) (((x) >> S_PAUSESLOWTIMER) & M_PAUSESLOWTIMER)
+
+#define A_NCSI_PAUSE_TIMER 0x1a290
+
+#define S_PAUSETIMER 0
+#define M_PAUSETIMER 0xfffffU
+#define V_PAUSETIMER(x) ((x) << S_PAUSETIMER)
+#define G_PAUSETIMER(x) (((x) >> S_PAUSETIMER) & M_PAUSETIMER)
+
+#define A_NCSI_XAUI_PCS_TEST 0x1a294
+
+#define S_TESTPATTERN 1
+#define M_TESTPATTERN 0x3U
+#define V_TESTPATTERN(x) ((x) << S_TESTPATTERN)
+#define G_TESTPATTERN(x) (((x) >> S_TESTPATTERN) & M_TESTPATTERN)
+
+#define S_ENTEST 0
+#define V_ENTEST(x) ((x) << S_ENTEST)
+#define F_ENTEST V_ENTEST(1U)
+
+#define A_NCSI_RGMII_CTRL 0x1a298
+
+#define S_PHALIGNFIFOTHRESH 1
+#define M_PHALIGNFIFOTHRESH 0x3U
+#define V_PHALIGNFIFOTHRESH(x) ((x) << S_PHALIGNFIFOTHRESH)
+#define G_PHALIGNFIFOTHRESH(x) (((x) >> S_PHALIGNFIFOTHRESH) & M_PHALIGNFIFOTHRESH)
+
+#define S_TXCLK90SHIFT 0
+#define V_TXCLK90SHIFT(x) ((x) << S_TXCLK90SHIFT)
+#define F_TXCLK90SHIFT V_TXCLK90SHIFT(1U)
+
+#define A_NCSI_RGMII_IMP 0x1a29c
+
+#define S_CALRESET 8
+#define V_CALRESET(x) ((x) << S_CALRESET)
+#define F_CALRESET V_CALRESET(1U)
+
+#define S_CALUPDATE 7
+#define V_CALUPDATE(x) ((x) << S_CALUPDATE)
+#define F_CALUPDATE V_CALUPDATE(1U)
+
+#define S_IMPSETUPDATE 6
+#define V_IMPSETUPDATE(x) ((x) << S_IMPSETUPDATE)
+#define F_IMPSETUPDATE V_IMPSETUPDATE(1U)
+
+#define S_RGMIIIMPPD 3
+#define M_RGMIIIMPPD 0x7U
+#define V_RGMIIIMPPD(x) ((x) << S_RGMIIIMPPD)
+#define G_RGMIIIMPPD(x) (((x) >> S_RGMIIIMPPD) & M_RGMIIIMPPD)
+
+#define S_RGMIIIMPPU 0
+#define M_RGMIIIMPPU 0x7U
+#define V_RGMIIIMPPU(x) ((x) << S_RGMIIIMPPU)
+#define G_RGMIIIMPPU(x) (((x) >> S_RGMIIIMPPU) & M_RGMIIIMPPU)
+
+#define A_NCSI_RX_MAX_PKT_SIZE 0x1a2a8
+
+#define S_RXMAXFRAMERSIZE 17
+#define M_RXMAXFRAMERSIZE 0x3fffU
+#define V_RXMAXFRAMERSIZE(x) ((x) << S_RXMAXFRAMERSIZE)
+#define G_RXMAXFRAMERSIZE(x) (((x) >> S_RXMAXFRAMERSIZE) & M_RXMAXFRAMERSIZE)
+
+#define S_RXENERRORGATHER 16
+#define V_RXENERRORGATHER(x) ((x) << S_RXENERRORGATHER)
+#define F_RXENERRORGATHER V_RXENERRORGATHER(1U)
+
+#define S_RXENSINGLEFLIT 15
+#define V_RXENSINGLEFLIT(x) ((x) << S_RXENSINGLEFLIT)
+#define F_RXENSINGLEFLIT V_RXENSINGLEFLIT(1U)
+
+#define S_RXENFRAMER 14
+#define V_RXENFRAMER(x) ((x) << S_RXENFRAMER)
+#define F_RXENFRAMER V_RXENFRAMER(1U)
+
+#define S_RXMAXPKTSIZE 0
+#define M_RXMAXPKTSIZE 0x3fffU
+#define V_RXMAXPKTSIZE(x) ((x) << S_RXMAXPKTSIZE)
+#define G_RXMAXPKTSIZE(x) (((x) >> S_RXMAXPKTSIZE) & M_RXMAXPKTSIZE)
+
+#define A_NCSI_RESET_CTRL 0x1a2ac
+
+#define S_XGMAC_STOP_EN 4
+#define V_XGMAC_STOP_EN(x) ((x) << S_XGMAC_STOP_EN)
+#define F_XGMAC_STOP_EN V_XGMAC_STOP_EN(1U)
+
+#define S_XG2G_RESET_ 3
+#define V_XG2G_RESET_(x) ((x) << S_XG2G_RESET_)
+#define F_XG2G_RESET_ V_XG2G_RESET_(1U)
+
+#define S_RGMII_RESET_ 2
+#define V_RGMII_RESET_(x) ((x) << S_RGMII_RESET_)
+#define F_RGMII_RESET_ V_RGMII_RESET_(1U)
+
+#define S_PCS_RESET_ 1
+#define V_PCS_RESET_(x) ((x) << S_PCS_RESET_)
+#define F_PCS_RESET_ V_PCS_RESET_(1U)
+
+#define S_MAC_RESET_ 0
+#define V_MAC_RESET_(x) ((x) << S_MAC_RESET_)
+#define F_MAC_RESET_ V_MAC_RESET_(1U)
+
+#define A_NCSI_XAUI1G_CTRL 0x1a2b0
+
+#define S_XAUI1GLINKID 0
+#define M_XAUI1GLINKID 0x3U
+#define V_XAUI1GLINKID(x) ((x) << S_XAUI1GLINKID)
+#define G_XAUI1GLINKID(x) (((x) >> S_XAUI1GLINKID) & M_XAUI1GLINKID)
+
+#define A_NCSI_SERDES_LANE_CTRL 0x1a2b4
+
+#define S_LANEREVERSAL 8
+#define V_LANEREVERSAL(x) ((x) << S_LANEREVERSAL)
+#define F_LANEREVERSAL V_LANEREVERSAL(1U)
+
+#define S_TXPOLARITY 4
+#define M_TXPOLARITY 0xfU
+#define V_TXPOLARITY(x) ((x) << S_TXPOLARITY)
+#define G_TXPOLARITY(x) (((x) >> S_TXPOLARITY) & M_TXPOLARITY)
+
+#define S_RXPOLARITY 0
+#define M_RXPOLARITY 0xfU
+#define V_RXPOLARITY(x) ((x) << S_RXPOLARITY)
+#define G_RXPOLARITY(x) (((x) >> S_RXPOLARITY) & M_RXPOLARITY)
+
+#define A_NCSI_PORT_CFG 0x1a2b8
+
+#define S_NCSI_SAFESPEEDCHANGE 4
+#define V_NCSI_SAFESPEEDCHANGE(x) ((x) << S_NCSI_SAFESPEEDCHANGE)
+#define F_NCSI_SAFESPEEDCHANGE V_NCSI_SAFESPEEDCHANGE(1U)
+
+#define S_NCSI_CLKDIVRESET_ 3
+#define V_NCSI_CLKDIVRESET_(x) ((x) << S_NCSI_CLKDIVRESET_)
+#define F_NCSI_CLKDIVRESET_ V_NCSI_CLKDIVRESET_(1U)
+
+#define S_NCSI_PORTSPEED 1
+#define M_NCSI_PORTSPEED 0x3U
+#define V_NCSI_PORTSPEED(x) ((x) << S_NCSI_PORTSPEED)
+#define G_NCSI_PORTSPEED(x) (((x) >> S_NCSI_PORTSPEED) & M_NCSI_PORTSPEED)
+
+#define S_NCSI_ENRGMII 0
+#define V_NCSI_ENRGMII(x) ((x) << S_NCSI_ENRGMII)
+#define F_NCSI_ENRGMII V_NCSI_ENRGMII(1U)
+
+#define A_NCSI_EPIO_DATA0 0x1a2c0
+#define A_NCSI_EPIO_DATA1 0x1a2c4
+#define A_NCSI_EPIO_DATA2 0x1a2c8
+#define A_NCSI_EPIO_DATA3 0x1a2cc
+#define A_NCSI_EPIO_OP 0x1a2d0
+
+#define S_PIO_READY 31
+#define V_PIO_READY(x) ((x) << S_PIO_READY)
+#define F_PIO_READY V_PIO_READY(1U)
+
+#define S_PIO_WRRD 24
+#define V_PIO_WRRD(x) ((x) << S_PIO_WRRD)
+#define F_PIO_WRRD V_PIO_WRRD(1U)
+
+#define S_PIO_ADDRESS 0
+#define M_PIO_ADDRESS 0xffU
+#define V_PIO_ADDRESS(x) ((x) << S_PIO_ADDRESS)
+#define G_PIO_ADDRESS(x) (((x) >> S_PIO_ADDRESS) & M_PIO_ADDRESS)
+
+#define A_NCSI_XGMAC0_INT_ENABLE 0x1a2d4
+
+#define S_XAUIPCSDECERR 24
+#define V_XAUIPCSDECERR(x) ((x) << S_XAUIPCSDECERR)
+#define F_XAUIPCSDECERR V_XAUIPCSDECERR(1U)
+
+#define S_RGMIIRXFIFOOVERFLOW 23
+#define V_RGMIIRXFIFOOVERFLOW(x) ((x) << S_RGMIIRXFIFOOVERFLOW)
+#define F_RGMIIRXFIFOOVERFLOW V_RGMIIRXFIFOOVERFLOW(1U)
+
+#define S_RGMIIRXFIFOUNDERFLOW 22
+#define V_RGMIIRXFIFOUNDERFLOW(x) ((x) << S_RGMIIRXFIFOUNDERFLOW)
+#define F_RGMIIRXFIFOUNDERFLOW V_RGMIIRXFIFOUNDERFLOW(1U)
+
+#define S_RXPKTSIZEERROR 21
+#define V_RXPKTSIZEERROR(x) ((x) << S_RXPKTSIZEERROR)
+#define F_RXPKTSIZEERROR V_RXPKTSIZEERROR(1U)
+
+#define S_WOLPATDETECTED 20
+#define V_WOLPATDETECTED(x) ((x) << S_WOLPATDETECTED)
+#define F_WOLPATDETECTED V_WOLPATDETECTED(1U)
+
+#define S_T7_TXFIFO_PRTY_ERR 17
+#define M_T7_TXFIFO_PRTY_ERR 0x7U
+#define V_T7_TXFIFO_PRTY_ERR(x) ((x) << S_T7_TXFIFO_PRTY_ERR)
+#define G_T7_TXFIFO_PRTY_ERR(x) (((x) >> S_T7_TXFIFO_PRTY_ERR) & M_T7_TXFIFO_PRTY_ERR)
+
+#define S_T7_RXFIFO_PRTY_ERR 14
+#define M_T7_RXFIFO_PRTY_ERR 0x7U
+#define V_T7_RXFIFO_PRTY_ERR(x) ((x) << S_T7_RXFIFO_PRTY_ERR)
+#define G_T7_RXFIFO_PRTY_ERR(x) (((x) >> S_T7_RXFIFO_PRTY_ERR) & M_T7_RXFIFO_PRTY_ERR)
+
+#define S_TXFIFO_UNDERRUN 13
+#define V_TXFIFO_UNDERRUN(x) ((x) << S_TXFIFO_UNDERRUN)
+#define F_TXFIFO_UNDERRUN V_TXFIFO_UNDERRUN(1U)
+
+#define S_RXFIFO_OVERFLOW 12
+#define V_RXFIFO_OVERFLOW(x) ((x) << S_RXFIFO_OVERFLOW)
+#define F_RXFIFO_OVERFLOW V_RXFIFO_OVERFLOW(1U)
+
+#define S_SERDESBISTERR 8
+#define M_SERDESBISTERR 0xfU
+#define V_SERDESBISTERR(x) ((x) << S_SERDESBISTERR)
+#define G_SERDESBISTERR(x) (((x) >> S_SERDESBISTERR) & M_SERDESBISTERR)
+
+#define S_SERDESLOWSIGCHANGE 4
+#define M_SERDESLOWSIGCHANGE 0xfU
+#define V_SERDESLOWSIGCHANGE(x) ((x) << S_SERDESLOWSIGCHANGE)
+#define G_SERDESLOWSIGCHANGE(x) (((x) >> S_SERDESLOWSIGCHANGE) & M_SERDESLOWSIGCHANGE)
+
+#define S_XAUIPCSCTCERR 3
+#define V_XAUIPCSCTCERR(x) ((x) << S_XAUIPCSCTCERR)
+#define F_XAUIPCSCTCERR V_XAUIPCSCTCERR(1U)
+
+#define S_XAUIPCSALIGNCHANGE 2
+#define V_XAUIPCSALIGNCHANGE(x) ((x) << S_XAUIPCSALIGNCHANGE)
+#define F_XAUIPCSALIGNCHANGE V_XAUIPCSALIGNCHANGE(1U)
+
+#define S_RGMIILINKSTSCHANGE 1
+#define V_RGMIILINKSTSCHANGE(x) ((x) << S_RGMIILINKSTSCHANGE)
+#define F_RGMIILINKSTSCHANGE V_RGMIILINKSTSCHANGE(1U)
+
+#define S_T7_XGM_INT 0
+#define V_T7_XGM_INT(x) ((x) << S_T7_XGM_INT)
+#define F_T7_XGM_INT V_T7_XGM_INT(1U)
+
+#define A_NCSI_XGMAC0_INT_CAUSE 0x1a2d8
+#define A_NCSI_XAUI_ACT_CTRL 0x1a2dc
+#define A_NCSI_SERDES_CTRL0 0x1a2e0
+
+#define S_INTSERLPBK3 27
+#define V_INTSERLPBK3(x) ((x) << S_INTSERLPBK3)
+#define F_INTSERLPBK3 V_INTSERLPBK3(1U)
+
+#define S_INTSERLPBK2 26
+#define V_INTSERLPBK2(x) ((x) << S_INTSERLPBK2)
+#define F_INTSERLPBK2 V_INTSERLPBK2(1U)
+
+#define S_INTSERLPBK1 25
+#define V_INTSERLPBK1(x) ((x) << S_INTSERLPBK1)
+#define F_INTSERLPBK1 V_INTSERLPBK1(1U)
+
+#define S_INTSERLPBK0 24
+#define V_INTSERLPBK0(x) ((x) << S_INTSERLPBK0)
+#define F_INTSERLPBK0 V_INTSERLPBK0(1U)
+
+#define S_RESET3 23
+#define V_RESET3(x) ((x) << S_RESET3)
+#define F_RESET3 V_RESET3(1U)
+
+#define S_RESET2 22
+#define V_RESET2(x) ((x) << S_RESET2)
+#define F_RESET2 V_RESET2(1U)
+
+#define S_RESET1 21
+#define V_RESET1(x) ((x) << S_RESET1)
+#define F_RESET1 V_RESET1(1U)
+
+#define S_RESET0 20
+#define V_RESET0(x) ((x) << S_RESET0)
+#define F_RESET0 V_RESET0(1U)
+
+#define S_PWRDN3 19
+#define V_PWRDN3(x) ((x) << S_PWRDN3)
+#define F_PWRDN3 V_PWRDN3(1U)
+
+#define S_PWRDN2 18
+#define V_PWRDN2(x) ((x) << S_PWRDN2)
+#define F_PWRDN2 V_PWRDN2(1U)
+
+#define S_PWRDN1 17
+#define V_PWRDN1(x) ((x) << S_PWRDN1)
+#define F_PWRDN1 V_PWRDN1(1U)
+
+#define S_PWRDN0 16
+#define V_PWRDN0(x) ((x) << S_PWRDN0)
+#define F_PWRDN0 V_PWRDN0(1U)
+
+#define S_RESETPLL23 15
+#define V_RESETPLL23(x) ((x) << S_RESETPLL23)
+#define F_RESETPLL23 V_RESETPLL23(1U)
+
+#define S_RESETPLL01 14
+#define V_RESETPLL01(x) ((x) << S_RESETPLL01)
+#define F_RESETPLL01 V_RESETPLL01(1U)
+
+#define S_PW23 12
+#define M_PW23 0x3U
+#define V_PW23(x) ((x) << S_PW23)
+#define G_PW23(x) (((x) >> S_PW23) & M_PW23)
+
+#define S_PW01 10
+#define M_PW01 0x3U
+#define V_PW01(x) ((x) << S_PW01)
+#define G_PW01(x) (((x) >> S_PW01) & M_PW01)
+
+#define S_DEQ 6
+#define M_DEQ 0xfU
+#define V_DEQ(x) ((x) << S_DEQ)
+#define G_DEQ(x) (((x) >> S_DEQ) & M_DEQ)
+
+#define S_DTX 2
+#define M_DTX 0xfU
+#define V_DTX(x) ((x) << S_DTX)
+#define G_DTX(x) (((x) >> S_DTX) & M_DTX)
+
+#define S_LODRV 1
+#define V_LODRV(x) ((x) << S_LODRV)
+#define F_LODRV V_LODRV(1U)
+
+#define S_HIDRV 0
+#define V_HIDRV(x) ((x) << S_HIDRV)
+#define F_HIDRV V_HIDRV(1U)
+
+#define A_NCSI_SERDES_CTRL1 0x1a2e4
+
+#define S_FMOFFSET3 19
+#define M_FMOFFSET3 0x1fU
+#define V_FMOFFSET3(x) ((x) << S_FMOFFSET3)
+#define G_FMOFFSET3(x) (((x) >> S_FMOFFSET3) & M_FMOFFSET3)
+
+#define S_FMOFFSETEN3 18
+#define V_FMOFFSETEN3(x) ((x) << S_FMOFFSETEN3)
+#define F_FMOFFSETEN3 V_FMOFFSETEN3(1U)
+
+#define S_FMOFFSET2 13
+#define M_FMOFFSET2 0x1fU
+#define V_FMOFFSET2(x) ((x) << S_FMOFFSET2)
+#define G_FMOFFSET2(x) (((x) >> S_FMOFFSET2) & M_FMOFFSET2)
+
+#define S_FMOFFSETEN2 12
+#define V_FMOFFSETEN2(x) ((x) << S_FMOFFSETEN2)
+#define F_FMOFFSETEN2 V_FMOFFSETEN2(1U)
+
+#define S_FMOFFSET1 7
+#define M_FMOFFSET1 0x1fU
+#define V_FMOFFSET1(x) ((x) << S_FMOFFSET1)
+#define G_FMOFFSET1(x) (((x) >> S_FMOFFSET1) & M_FMOFFSET1)
+
+#define S_FMOFFSETEN1 6
+#define V_FMOFFSETEN1(x) ((x) << S_FMOFFSETEN1)
+#define F_FMOFFSETEN1 V_FMOFFSETEN1(1U)
+
+#define S_FMOFFSET0 1
+#define M_FMOFFSET0 0x1fU
+#define V_FMOFFSET0(x) ((x) << S_FMOFFSET0)
+#define G_FMOFFSET0(x) (((x) >> S_FMOFFSET0) & M_FMOFFSET0)
+
+#define S_FMOFFSETEN0 0
+#define V_FMOFFSETEN0(x) ((x) << S_FMOFFSETEN0)
+#define F_FMOFFSETEN0 V_FMOFFSETEN0(1U)
+
+#define A_NCSI_SERDES_CTRL2 0x1a2e8
+
+#define S_DNIN3 11
+#define V_DNIN3(x) ((x) << S_DNIN3)
+#define F_DNIN3 V_DNIN3(1U)
+
+#define S_UPIN3 10
+#define V_UPIN3(x) ((x) << S_UPIN3)
+#define F_UPIN3 V_UPIN3(1U)
+
+#define S_RXSLAVE3 9
+#define V_RXSLAVE3(x) ((x) << S_RXSLAVE3)
+#define F_RXSLAVE3 V_RXSLAVE3(1U)
+
+#define S_DNIN2 8
+#define V_DNIN2(x) ((x) << S_DNIN2)
+#define F_DNIN2 V_DNIN2(1U)
+
+#define S_UPIN2 7
+#define V_UPIN2(x) ((x) << S_UPIN2)
+#define F_UPIN2 V_UPIN2(1U)
+
+#define S_RXSLAVE2 6
+#define V_RXSLAVE2(x) ((x) << S_RXSLAVE2)
+#define F_RXSLAVE2 V_RXSLAVE2(1U)
+
+#define S_DNIN1 5
+#define V_DNIN1(x) ((x) << S_DNIN1)
+#define F_DNIN1 V_DNIN1(1U)
+
+#define S_UPIN1 4
+#define V_UPIN1(x) ((x) << S_UPIN1)
+#define F_UPIN1 V_UPIN1(1U)
+
+#define S_RXSLAVE1 3
+#define V_RXSLAVE1(x) ((x) << S_RXSLAVE1)
+#define F_RXSLAVE1 V_RXSLAVE1(1U)
+
+#define S_DNIN0 2
+#define V_DNIN0(x) ((x) << S_DNIN0)
+#define F_DNIN0 V_DNIN0(1U)
+
+#define S_UPIN0 1
+#define V_UPIN0(x) ((x) << S_UPIN0)
+#define F_UPIN0 V_UPIN0(1U)
+
+#define S_RXSLAVE0 0
+#define V_RXSLAVE0(x) ((x) << S_RXSLAVE0)
+#define F_RXSLAVE0 V_RXSLAVE0(1U)
+
+#define A_NCSI_SERDES_CTRL3 0x1a2ec
+
+#define S_EXTBISTCHKERRCLR3 31
+#define V_EXTBISTCHKERRCLR3(x) ((x) << S_EXTBISTCHKERRCLR3)
+#define F_EXTBISTCHKERRCLR3 V_EXTBISTCHKERRCLR3(1U)
+
+#define S_EXTBISTCHKEN3 30
+#define V_EXTBISTCHKEN3(x) ((x) << S_EXTBISTCHKEN3)
+#define F_EXTBISTCHKEN3 V_EXTBISTCHKEN3(1U)
+
+#define S_EXTBISTGENEN3 29
+#define V_EXTBISTGENEN3(x) ((x) << S_EXTBISTGENEN3)
+#define F_EXTBISTGENEN3 V_EXTBISTGENEN3(1U)
+
+#define S_EXTBISTPAT3 26
+#define M_EXTBISTPAT3 0x7U
+#define V_EXTBISTPAT3(x) ((x) << S_EXTBISTPAT3)
+#define G_EXTBISTPAT3(x) (((x) >> S_EXTBISTPAT3) & M_EXTBISTPAT3)
+
+#define S_EXTPARRESET3 25
+#define V_EXTPARRESET3(x) ((x) << S_EXTPARRESET3)
+#define F_EXTPARRESET3 V_EXTPARRESET3(1U)
+
+#define S_EXTPARLPBK3 24
+#define V_EXTPARLPBK3(x) ((x) << S_EXTPARLPBK3)
+#define F_EXTPARLPBK3 V_EXTPARLPBK3(1U)
+
+#define S_EXTBISTCHKERRCLR2 23
+#define V_EXTBISTCHKERRCLR2(x) ((x) << S_EXTBISTCHKERRCLR2)
+#define F_EXTBISTCHKERRCLR2 V_EXTBISTCHKERRCLR2(1U)
+
+#define S_EXTBISTCHKEN2 22
+#define V_EXTBISTCHKEN2(x) ((x) << S_EXTBISTCHKEN2)
+#define F_EXTBISTCHKEN2 V_EXTBISTCHKEN2(1U)
+
+#define S_EXTBISTGENEN2 21
+#define V_EXTBISTGENEN2(x) ((x) << S_EXTBISTGENEN2)
+#define F_EXTBISTGENEN2 V_EXTBISTGENEN2(1U)
+
+#define S_EXTBISTPAT2 18
+#define M_EXTBISTPAT2 0x7U
+#define V_EXTBISTPAT2(x) ((x) << S_EXTBISTPAT2)
+#define G_EXTBISTPAT2(x) (((x) >> S_EXTBISTPAT2) & M_EXTBISTPAT2)
+
+#define S_EXTPARRESET2 17
+#define V_EXTPARRESET2(x) ((x) << S_EXTPARRESET2)
+#define F_EXTPARRESET2 V_EXTPARRESET2(1U)
+
+#define S_EXTPARLPBK2 16
+#define V_EXTPARLPBK2(x) ((x) << S_EXTPARLPBK2)
+#define F_EXTPARLPBK2 V_EXTPARLPBK2(1U)
+
+#define S_EXTBISTCHKERRCLR1 15
+#define V_EXTBISTCHKERRCLR1(x) ((x) << S_EXTBISTCHKERRCLR1)
+#define F_EXTBISTCHKERRCLR1 V_EXTBISTCHKERRCLR1(1U)
+
+#define S_EXTBISTCHKEN1 14
+#define V_EXTBISTCHKEN1(x) ((x) << S_EXTBISTCHKEN1)
+#define F_EXTBISTCHKEN1 V_EXTBISTCHKEN1(1U)
+
+#define S_EXTBISTGENEN1 13
+#define V_EXTBISTGENEN1(x) ((x) << S_EXTBISTGENEN1)
+#define F_EXTBISTGENEN1 V_EXTBISTGENEN1(1U)
+
+#define S_EXTBISTPAT1 10
+#define M_EXTBISTPAT1 0x7U
+#define V_EXTBISTPAT1(x) ((x) << S_EXTBISTPAT1)
+#define G_EXTBISTPAT1(x) (((x) >> S_EXTBISTPAT1) & M_EXTBISTPAT1)
+
+#define S_EXTPARRESET1 9
+#define V_EXTPARRESET1(x) ((x) << S_EXTPARRESET1)
+#define F_EXTPARRESET1 V_EXTPARRESET1(1U)
+
+#define S_EXTPARLPBK1 8
+#define V_EXTPARLPBK1(x) ((x) << S_EXTPARLPBK1)
+#define F_EXTPARLPBK1 V_EXTPARLPBK1(1U)
+
+#define S_EXTBISTCHKERRCLR0 7
+#define V_EXTBISTCHKERRCLR0(x) ((x) << S_EXTBISTCHKERRCLR0)
+#define F_EXTBISTCHKERRCLR0 V_EXTBISTCHKERRCLR0(1U)
+
+#define S_EXTBISTCHKEN0 6
+#define V_EXTBISTCHKEN0(x) ((x) << S_EXTBISTCHKEN0)
+#define F_EXTBISTCHKEN0 V_EXTBISTCHKEN0(1U)
+
+#define S_EXTBISTGENEN0 5
+#define V_EXTBISTGENEN0(x) ((x) << S_EXTBISTGENEN0)
+#define F_EXTBISTGENEN0 V_EXTBISTGENEN0(1U)
+
+#define S_EXTBISTPAT0 2
+#define M_EXTBISTPAT0 0x7U
+#define V_EXTBISTPAT0(x) ((x) << S_EXTBISTPAT0)
+#define G_EXTBISTPAT0(x) (((x) >> S_EXTBISTPAT0) & M_EXTBISTPAT0)
+
+#define S_EXTPARRESET0 1
+#define V_EXTPARRESET0(x) ((x) << S_EXTPARRESET0)
+#define F_EXTPARRESET0 V_EXTPARRESET0(1U)
+
+#define S_EXTPARLPBK0 0
+#define V_EXTPARLPBK0(x) ((x) << S_EXTPARLPBK0)
+#define F_EXTPARLPBK0 V_EXTPARLPBK0(1U)
+
+#define A_NCSI_SERDES_STAT0 0x1a2f0
+
+#define S_EXTBISTCHKERRCNT0 4
+#define M_EXTBISTCHKERRCNT0 0xffffffU
+#define V_EXTBISTCHKERRCNT0(x) ((x) << S_EXTBISTCHKERRCNT0)
+#define G_EXTBISTCHKERRCNT0(x) (((x) >> S_EXTBISTCHKERRCNT0) & M_EXTBISTCHKERRCNT0)
+
+#define S_EXTBISTCHKFMD0 3
+#define V_EXTBISTCHKFMD0(x) ((x) << S_EXTBISTCHKFMD0)
+#define F_EXTBISTCHKFMD0 V_EXTBISTCHKFMD0(1U)
+
+#define S_LOWSIGFORCEEN0 2
+#define V_LOWSIGFORCEEN0(x) ((x) << S_LOWSIGFORCEEN0)
+#define F_LOWSIGFORCEEN0 V_LOWSIGFORCEEN0(1U)
+
+#define S_LOWSIGFORCEVALUE0 1
+#define V_LOWSIGFORCEVALUE0(x) ((x) << S_LOWSIGFORCEVALUE0)
+#define F_LOWSIGFORCEVALUE0 V_LOWSIGFORCEVALUE0(1U)
+
+#define S_LOWSIG0 0
+#define V_LOWSIG0(x) ((x) << S_LOWSIG0)
+#define F_LOWSIG0 V_LOWSIG0(1U)
+
+#define A_NCSI_SERDES_STAT1 0x1a2f4
+
+#define S_EXTBISTCHKERRCNT1 4
+#define M_EXTBISTCHKERRCNT1 0xffffffU
+#define V_EXTBISTCHKERRCNT1(x) ((x) << S_EXTBISTCHKERRCNT1)
+#define G_EXTBISTCHKERRCNT1(x) (((x) >> S_EXTBISTCHKERRCNT1) & M_EXTBISTCHKERRCNT1)
+
+#define S_EXTBISTCHKFMD1 3
+#define V_EXTBISTCHKFMD1(x) ((x) << S_EXTBISTCHKFMD1)
+#define F_EXTBISTCHKFMD1 V_EXTBISTCHKFMD1(1U)
+
+#define S_LOWSIGFORCEEN1 2
+#define V_LOWSIGFORCEEN1(x) ((x) << S_LOWSIGFORCEEN1)
+#define F_LOWSIGFORCEEN1 V_LOWSIGFORCEEN1(1U)
+
+#define S_LOWSIGFORCEVALUE1 1
+#define V_LOWSIGFORCEVALUE1(x) ((x) << S_LOWSIGFORCEVALUE1)
+#define F_LOWSIGFORCEVALUE1 V_LOWSIGFORCEVALUE1(1U)
+
+#define S_LOWSIG1 0
+#define V_LOWSIG1(x) ((x) << S_LOWSIG1)
+#define F_LOWSIG1 V_LOWSIG1(1U)
+
+#define A_NCSI_SERDES_STAT2 0x1a2f8
+
+#define S_EXTBISTCHKERRCNT2 4
+#define M_EXTBISTCHKERRCNT2 0xffffffU
+#define V_EXTBISTCHKERRCNT2(x) ((x) << S_EXTBISTCHKERRCNT2)
+#define G_EXTBISTCHKERRCNT2(x) (((x) >> S_EXTBISTCHKERRCNT2) & M_EXTBISTCHKERRCNT2)
+
+#define S_EXTBISTCHKFMD2 3
+#define V_EXTBISTCHKFMD2(x) ((x) << S_EXTBISTCHKFMD2)
+#define F_EXTBISTCHKFMD2 V_EXTBISTCHKFMD2(1U)
+
+#define S_LOWSIGFORCEEN2 2
+#define V_LOWSIGFORCEEN2(x) ((x) << S_LOWSIGFORCEEN2)
+#define F_LOWSIGFORCEEN2 V_LOWSIGFORCEEN2(1U)
+
+#define S_LOWSIGFORCEVALUE2 1
+#define V_LOWSIGFORCEVALUE2(x) ((x) << S_LOWSIGFORCEVALUE2)
+#define F_LOWSIGFORCEVALUE2 V_LOWSIGFORCEVALUE2(1U)
+
+#define S_LOWSIG2 0
+#define V_LOWSIG2(x) ((x) << S_LOWSIG2)
+#define F_LOWSIG2 V_LOWSIG2(1U)
+
+#define A_NCSI_SERDES_STAT3 0x1a2fc
+
+#define S_EXTBISTCHKERRCNT3 4
+#define M_EXTBISTCHKERRCNT3 0xffffffU
+#define V_EXTBISTCHKERRCNT3(x) ((x) << S_EXTBISTCHKERRCNT3)
+#define G_EXTBISTCHKERRCNT3(x) (((x) >> S_EXTBISTCHKERRCNT3) & M_EXTBISTCHKERRCNT3)
+
+#define S_EXTBISTCHKFMD3 3
+#define V_EXTBISTCHKFMD3(x) ((x) << S_EXTBISTCHKFMD3)
+#define F_EXTBISTCHKFMD3 V_EXTBISTCHKFMD3(1U)
+
+#define S_LOWSIGFORCEEN3 2
+#define V_LOWSIGFORCEEN3(x) ((x) << S_LOWSIGFORCEEN3)
+#define F_LOWSIGFORCEEN3 V_LOWSIGFORCEEN3(1U)
+
+#define S_LOWSIGFORCEVALUE3 1
+#define V_LOWSIGFORCEVALUE3(x) ((x) << S_LOWSIGFORCEVALUE3)
+#define F_LOWSIGFORCEVALUE3 V_LOWSIGFORCEVALUE3(1U)
+
+#define S_LOWSIG3 0
+#define V_LOWSIG3(x) ((x) << S_LOWSIG3)
+#define F_LOWSIG3 V_LOWSIG3(1U)
+
+#define A_NCSI_STAT_TX_BYTE_LOW 0x1a300
+#define A_NCSI_STAT_TX_BYTE_HIGH 0x1a304
+#define A_NCSI_STAT_TX_FRAME_LOW 0x1a308
+#define A_NCSI_STAT_TX_FRAME_HIGH 0x1a30c
+#define A_NCSI_STAT_TX_BCAST 0x1a310
+#define A_NCSI_STAT_TX_MCAST 0x1a314
+#define A_NCSI_STAT_TX_PAUSE 0x1a318
+#define A_NCSI_STAT_TX_64B_FRAMES 0x1a31c
+#define A_NCSI_STAT_TX_65_127B_FRAMES 0x1a320
+#define A_NCSI_STAT_TX_128_255B_FRAMES 0x1a324
+#define A_NCSI_STAT_TX_256_511B_FRAMES 0x1a328
+#define A_NCSI_STAT_TX_512_1023B_FRAMES 0x1a32c
+#define A_NCSI_STAT_TX_1024_1518B_FRAMES 0x1a330
+#define A_NCSI_STAT_TX_1519_MAXB_FRAMES 0x1a334
+#define A_NCSI_STAT_TX_ERR_FRAMES 0x1a338
+#define A_NCSI_STAT_RX_BYTES_LOW 0x1a33c
+#define A_NCSI_STAT_RX_BYTES_HIGH 0x1a340
+#define A_NCSI_STAT_RX_FRAMES_LOW 0x1a344
+#define A_NCSI_STAT_RX_FRAMES_HIGH 0x1a348
+#define A_NCSI_STAT_RX_BCAST_FRAMES 0x1a34c
+#define A_NCSI_STAT_RX_MCAST_FRAMES 0x1a350
+#define A_NCSI_STAT_RX_PAUSE_FRAMES 0x1a354
+#define A_NCSI_STAT_RX_64B_FRAMES 0x1a358
+#define A_NCSI_STAT_RX_65_127B_FRAMES 0x1a35c
+#define A_NCSI_STAT_RX_128_255B_FRAMES 0x1a360
+#define A_NCSI_STAT_RX_256_511B_FRAMES 0x1a364
+#define A_NCSI_STAT_RX_512_1023B_FRAMES 0x1a368
+#define A_NCSI_STAT_RX_1024_1518B_FRAMES 0x1a36c
+#define A_NCSI_STAT_RX_1519_MAXB_FRAMES 0x1a370
+#define A_NCSI_STAT_RX_SHORT_FRAMES 0x1a374
+#define A_NCSI_STAT_RX_OVERSIZE_FRAMES 0x1a378
+#define A_NCSI_STAT_RX_JABBER_FRAMES 0x1a37c
+#define A_NCSI_STAT_RX_CRC_ERR_FRAMES 0x1a380
+#define A_NCSI_STAT_RX_LENGTH_ERR_FRAMES 0x1a384
+#define A_NCSI_STAT_RX_SYM_CODE_ERR_FRAMES 0x1a388
+#define A_NCSI_XAUI_PCS_ERR 0x1a398
+
+#define S_PCS_SYNCSTATUS 5
+#define M_PCS_SYNCSTATUS 0xfU
+#define V_PCS_SYNCSTATUS(x) ((x) << S_PCS_SYNCSTATUS)
+#define G_PCS_SYNCSTATUS(x) (((x) >> S_PCS_SYNCSTATUS) & M_PCS_SYNCSTATUS)
+
+#define S_PCS_CTCFIFOERR 1
+#define M_PCS_CTCFIFOERR 0xfU
+#define V_PCS_CTCFIFOERR(x) ((x) << S_PCS_CTCFIFOERR)
+#define G_PCS_CTCFIFOERR(x) (((x) >> S_PCS_CTCFIFOERR) & M_PCS_CTCFIFOERR)
+
+#define S_PCS_NOTALIGNED 0
+#define V_PCS_NOTALIGNED(x) ((x) << S_PCS_NOTALIGNED)
+#define F_PCS_NOTALIGNED V_PCS_NOTALIGNED(1U)
+
+#define A_NCSI_RGMII_STATUS 0x1a39c
+
+#define S_GMIIDUPLEX 3
+#define V_GMIIDUPLEX(x) ((x) << S_GMIIDUPLEX)
+#define F_GMIIDUPLEX V_GMIIDUPLEX(1U)
+
+#define S_GMIISPEED 1
+#define M_GMIISPEED 0x3U
+#define V_GMIISPEED(x) ((x) << S_GMIISPEED)
+#define G_GMIISPEED(x) (((x) >> S_GMIISPEED) & M_GMIISPEED)
+
+#define S_GMIILINKSTATUS 0
+#define V_GMIILINKSTATUS(x) ((x) << S_GMIILINKSTATUS)
+#define F_GMIILINKSTATUS V_GMIILINKSTATUS(1U)
+
+#define A_NCSI_WOL_STATUS 0x1a3a0
+
+#define S_T7_PATDETECTED 31
+#define V_T7_PATDETECTED(x) ((x) << S_T7_PATDETECTED)
+#define F_T7_PATDETECTED V_T7_PATDETECTED(1U)
+
+#define A_NCSI_RX_MAX_PKT_SIZE_ERR_CNT 0x1a3a4
+#define A_NCSI_TX_SPI4_SOP_EOP_CNT 0x1a3a8
+
+#define S_TXSPI4SOPCNT 16
+#define M_TXSPI4SOPCNT 0xffffU
+#define V_TXSPI4SOPCNT(x) ((x) << S_TXSPI4SOPCNT)
+#define G_TXSPI4SOPCNT(x) (((x) >> S_TXSPI4SOPCNT) & M_TXSPI4SOPCNT)
+
+#define S_TXSPI4EOPCNT 0
+#define M_TXSPI4EOPCNT 0xffffU
+#define V_TXSPI4EOPCNT(x) ((x) << S_TXSPI4EOPCNT)
+#define G_TXSPI4EOPCNT(x) (((x) >> S_TXSPI4EOPCNT) & M_TXSPI4EOPCNT)
+
+#define A_NCSI_RX_SPI4_SOP_EOP_CNT 0x1a3ac
+
+#define S_RXSPI4SOPCNT 16
+#define M_RXSPI4SOPCNT 0xffffU
+#define V_RXSPI4SOPCNT(x) ((x) << S_RXSPI4SOPCNT)
+#define G_RXSPI4SOPCNT(x) (((x) >> S_RXSPI4SOPCNT) & M_RXSPI4SOPCNT)
+
+#define S_RXSPI4EOPCNT 0
+#define M_RXSPI4EOPCNT 0xffffU
+#define V_RXSPI4EOPCNT(x) ((x) << S_RXSPI4EOPCNT)
+#define G_RXSPI4EOPCNT(x) (((x) >> S_RXSPI4EOPCNT) & M_RXSPI4EOPCNT)
+
/* registers for module XGMAC */
#define XGMAC_BASE_ADDR 0x0
@@ -44054,6 +56225,16 @@
#define V_IBQEMPTY(x) ((x) << S_IBQEMPTY)
#define G_IBQEMPTY(x) (((x) >> S_IBQEMPTY) & M_IBQEMPTY)
+#define S_T7_IBQGEN1 10
+#define M_T7_IBQGEN1 0x3fU
+#define V_T7_IBQGEN1(x) ((x) << S_T7_IBQGEN1)
+#define G_T7_IBQGEN1(x) (((x) >> S_T7_IBQGEN1) & M_T7_IBQGEN1)
+
+#define S_T7_IBQEMPTY 0
+#define M_T7_IBQEMPTY 0x3ffU
+#define V_T7_IBQEMPTY(x) ((x) << S_T7_IBQEMPTY)
+#define G_T7_IBQEMPTY(x) (((x) >> S_T7_IBQEMPTY) & M_T7_IBQEMPTY)
+
#define A_UP_OBQ_GEN 0xc
#define S_OBQGEN 6
@@ -44076,6 +56257,16 @@
#define V_T5_OBQFULL(x) ((x) << S_T5_OBQFULL)
#define G_T5_OBQFULL(x) (((x) >> S_T5_OBQFULL) & M_T5_OBQFULL)
+#define S_T7_T5_OBQGEN 16
+#define M_T7_T5_OBQGEN 0xffffU
+#define V_T7_T5_OBQGEN(x) ((x) << S_T7_T5_OBQGEN)
+#define G_T7_T5_OBQGEN(x) (((x) >> S_T7_T5_OBQGEN) & M_T7_T5_OBQGEN)
+
+#define S_T7_T5_OBQFULL 0
+#define M_T7_T5_OBQFULL 0xffffU
+#define V_T7_T5_OBQFULL(x) ((x) << S_T7_T5_OBQFULL)
+#define G_T7_T5_OBQFULL(x) (((x) >> S_T7_T5_OBQFULL) & M_T7_T5_OBQFULL)
+
#define A_UP_IBQ_0_RDADDR 0x10
#define S_QUEID 13
@@ -44088,6 +56279,13 @@
#define V_IBQRDADDR(x) ((x) << S_IBQRDADDR)
#define G_IBQRDADDR(x) (((x) >> S_IBQRDADDR) & M_IBQRDADDR)
+#define A_UP_IBQ_GEN_IPC 0x10
+
+#define S_IPCEMPTY 0
+#define M_IPCEMPTY 0x7fU
+#define V_IPCEMPTY(x) ((x) << S_IPCEMPTY)
+#define G_IPCEMPTY(x) (((x) >> S_IPCEMPTY) & M_IPCEMPTY)
+
#define A_UP_IBQ_0_WRADDR 0x14
#define S_IBQWRADDR 0
@@ -44160,10 +56358,15 @@
#define A_UP_OBQ_0_STATUS 0x78
#define A_UP_OBQ_0_PKTCNT 0x7c
#define A_UP_OBQ_1_RDADDR 0x80
+#define A_UP_NXT_FLOWADDR0 0x80
#define A_UP_OBQ_1_WRADDR 0x84
+#define A_UP_NXT_FLOWADDR1 0x84
#define A_UP_OBQ_1_STATUS 0x88
+#define A_UP_NXT_FLOWADDR2 0x88
#define A_UP_OBQ_1_PKTCNT 0x8c
+#define A_UP_NXT_FLOWADDR3 0x8c
#define A_UP_OBQ_2_RDADDR 0x90
+#define A_UP_DFT_FLOWADDR 0x90
#define A_UP_OBQ_2_WRADDR 0x94
#define A_UP_OBQ_2_STATUS 0x98
#define A_UP_OBQ_2_PKTCNT 0x9c
@@ -44176,9 +56379,33 @@
#define A_UP_OBQ_4_STATUS 0xb8
#define A_UP_OBQ_4_PKTCNT 0xbc
#define A_UP_OBQ_5_RDADDR 0xc0
+#define A_UP_MAX_SEQ_NUM 0xc0
#define A_UP_OBQ_5_WRADDR 0xc4
+#define A_UP_UNACK_SEQ_NUM 0xc4
#define A_UP_OBQ_5_STATUS 0xc8
+#define A_UP_SEARCH_SEQ_NUM 0xc8
#define A_UP_OBQ_5_PKTCNT 0xcc
+#define A_UP_SEQ_SEARCH_CTRL 0xcc
+
+#define S_FIFO_SIZE 29
+#define M_FIFO_SIZE 0x7U
+#define V_FIFO_SIZE(x) ((x) << S_FIFO_SIZE)
+#define G_FIFO_SIZE(x) (((x) >> S_FIFO_SIZE) & M_FIFO_SIZE)
+
+#define S_ROCE_MODE 28
+#define V_ROCE_MODE(x) ((x) << S_ROCE_MODE)
+#define F_ROCE_MODE V_ROCE_MODE(1U)
+
+#define S_SEQ_WR_PTR 16
+#define M_SEQ_WR_PTR 0xfffU
+#define V_SEQ_WR_PTR(x) ((x) << S_SEQ_WR_PTR)
+#define G_SEQ_WR_PTR(x) (((x) >> S_SEQ_WR_PTR) & M_SEQ_WR_PTR)
+
+#define S_SEQ_RD_PTR 0
+#define M_SEQ_RD_PTR 0xfffU
+#define V_SEQ_RD_PTR(x) ((x) << S_SEQ_RD_PTR)
+#define G_SEQ_RD_PTR(x) (((x) >> S_SEQ_RD_PTR) & M_SEQ_RD_PTR)
+
#define A_UP_IBQ_0_CONFIG 0xd0
#define S_QUESIZE 26
@@ -44203,6 +56430,25 @@
#define V_QUE1KEN(x) ((x) << S_QUE1KEN)
#define F_QUE1KEN V_QUE1KEN(1U)
+#define A_UP_SEQ_SEARCH_RES0 0xd0
+
+#define S_INV_SEQ 18
+#define V_INV_SEQ(x) ((x) << S_INV_SEQ)
+#define F_INV_SEQ V_INV_SEQ(1U)
+
+#define S_DUP_SEQ 17
+#define V_DUP_SEQ(x) ((x) << S_DUP_SEQ)
+#define F_DUP_SEQ V_DUP_SEQ(1U)
+
+#define S_MATCH_VLD 16
+#define V_MATCH_VLD(x) ((x) << S_MATCH_VLD)
+#define F_MATCH_VLD V_MATCH_VLD(1U)
+
+#define S_MATCH_INDEX 0
+#define M_MATCH_INDEX 0xffffU
+#define V_MATCH_INDEX(x) ((x) << S_MATCH_INDEX)
+#define G_MATCH_INDEX(x) (((x) >> S_MATCH_INDEX) & M_MATCH_INDEX)
+
#define A_UP_IBQ_0_REALADDR 0xd4
#define S_QUERDADDRWRAP 31
@@ -44218,6 +56464,7 @@
#define V_QUEMEMADDR(x) ((x) << S_QUEMEMADDR)
#define G_QUEMEMADDR(x) (((x) >> S_QUEMEMADDR) & M_QUEMEMADDR)
+#define A_UP_SEQ_SEARCH_RES1 0xd4
#define A_UP_IBQ_1_CONFIG 0xd8
#define A_UP_IBQ_1_REALADDR 0xdc
#define A_UP_IBQ_2_CONFIG 0xe0
@@ -44229,14 +56476,34 @@
#define A_UP_IBQ_5_CONFIG 0xf8
#define A_UP_IBQ_5_REALADDR 0xfc
#define A_UP_OBQ_0_CONFIG 0x100
+#define A_UP_PEER_HALT_STAT0 0x100
+
+#define S_HALTINFO 1
+#define M_HALTINFO 0x7fffffffU
+#define V_HALTINFO(x) ((x) << S_HALTINFO)
+#define G_HALTINFO(x) (((x) >> S_HALTINFO) & M_HALTINFO)
+
#define A_UP_OBQ_0_REALADDR 0x104
+#define A_UP_PEER_HALT_STAT1 0x104
#define A_UP_OBQ_1_CONFIG 0x108
+#define A_UP_PEER_HALT_STAT2 0x108
#define A_UP_OBQ_1_REALADDR 0x10c
+#define A_UP_PEER_HALT_STAT3 0x10c
#define A_UP_OBQ_2_CONFIG 0x110
+#define A_UP_PEER_HALT_STAT4 0x110
#define A_UP_OBQ_2_REALADDR 0x114
+#define A_UP_PEER_HALT_STAT5 0x114
#define A_UP_OBQ_3_CONFIG 0x118
+#define A_UP_PEER_HALT_STAT6 0x118
#define A_UP_OBQ_3_REALADDR 0x11c
+#define A_UP_PEER_HALT_STAT7 0x11c
#define A_UP_OBQ_4_CONFIG 0x120
+#define A_UP_PEER_HALT_CTL 0x120
+
+#define S_HALTREQ 0
+#define V_HALTREQ(x) ((x) << S_HALTREQ)
+#define F_HALTREQ V_HALTREQ(1U)
+
#define A_UP_OBQ_4_REALADDR 0x124
#define A_UP_OBQ_5_CONFIG 0x128
#define A_UP_OBQ_5_REALADDR 0x12c
@@ -44516,6 +56783,204 @@
#define A_UP_OBQ_6_SHADOW_REALADDR 0x3c4
#define A_UP_OBQ_7_SHADOW_CONFIG 0x3c8
#define A_UP_OBQ_7_SHADOW_REALADDR 0x3cc
+#define A_T7_UP_IBQ_0_SHADOW_RDADDR 0x400
+#define A_T7_UP_IBQ_0_SHADOW_WRADDR 0x404
+#define A_T7_UP_IBQ_0_SHADOW_STATUS 0x408
+
+#define S_T7_QUEREMFLITS 0
+#define M_T7_QUEREMFLITS 0xfffU
+#define V_T7_QUEREMFLITS(x) ((x) << S_T7_QUEREMFLITS)
+#define G_T7_QUEREMFLITS(x) (((x) >> S_T7_QUEREMFLITS) & M_T7_QUEREMFLITS)
+
+#define A_T7_UP_IBQ_0_SHADOW_PKTCNT 0x40c
+#define A_T7_UP_IBQ_1_SHADOW_RDADDR 0x410
+#define A_T7_UP_IBQ_1_SHADOW_WRADDR 0x414
+#define A_T7_UP_IBQ_1_SHADOW_STATUS 0x418
+#define A_T7_UP_IBQ_1_SHADOW_PKTCNT 0x41c
+#define A_T7_UP_IBQ_2_SHADOW_RDADDR 0x420
+#define A_T7_UP_IBQ_2_SHADOW_WRADDR 0x424
+#define A_T7_UP_IBQ_2_SHADOW_STATUS 0x428
+#define A_T7_UP_IBQ_2_SHADOW_PKTCNT 0x42c
+#define A_T7_UP_IBQ_3_SHADOW_RDADDR 0x430
+#define A_T7_UP_IBQ_3_SHADOW_WRADDR 0x434
+#define A_T7_UP_IBQ_3_SHADOW_STATUS 0x438
+#define A_T7_UP_IBQ_3_SHADOW_PKTCNT 0x43c
+#define A_T7_UP_IBQ_4_SHADOW_RDADDR 0x440
+#define A_T7_UP_IBQ_4_SHADOW_WRADDR 0x444
+#define A_T7_UP_IBQ_4_SHADOW_STATUS 0x448
+#define A_T7_UP_IBQ_4_SHADOW_PKTCNT 0x44c
+#define A_T7_UP_IBQ_5_SHADOW_RDADDR 0x450
+#define A_T7_UP_IBQ_5_SHADOW_WRADDR 0x454
+#define A_T7_UP_IBQ_5_SHADOW_STATUS 0x458
+#define A_T7_UP_IBQ_5_SHADOW_PKTCNT 0x45c
+#define A_UP_IBQ_6_SHADOW_RDADDR 0x460
+#define A_UP_IBQ_6_SHADOW_WRADDR 0x464
+#define A_UP_IBQ_6_SHADOW_STATUS 0x468
+#define A_UP_IBQ_6_SHADOW_PKTCNT 0x46c
+#define A_UP_IBQ_7_SHADOW_RDADDR 0x470
+#define A_UP_IBQ_7_SHADOW_WRADDR 0x474
+#define A_UP_IBQ_7_SHADOW_STATUS 0x478
+#define A_UP_IBQ_7_SHADOW_PKTCNT 0x47c
+#define A_UP_IBQ_8_SHADOW_RDADDR 0x480
+#define A_UP_IBQ_8_SHADOW_WRADDR 0x484
+#define A_UP_IBQ_8_SHADOW_STATUS 0x488
+#define A_UP_IBQ_8_SHADOW_PKTCNT 0x48c
+#define A_UP_IBQ_9_SHADOW_RDADDR 0x490
+#define A_UP_IBQ_9_SHADOW_WRADDR 0x494
+#define A_UP_IBQ_9_SHADOW_STATUS 0x498
+#define A_UP_IBQ_9_SHADOW_PKTCNT 0x49c
+#define A_UP_IBQ_10_SHADOW_RDADDR 0x4a0
+#define A_UP_IBQ_10_SHADOW_WRADDR 0x4a4
+#define A_UP_IBQ_10_SHADOW_STATUS 0x4a8
+#define A_UP_IBQ_10_SHADOW_PKTCNT 0x4ac
+#define A_UP_IBQ_11_SHADOW_RDADDR 0x4b0
+#define A_UP_IBQ_11_SHADOW_WRADDR 0x4b4
+#define A_UP_IBQ_11_SHADOW_STATUS 0x4b8
+#define A_UP_IBQ_11_SHADOW_PKTCNT 0x4bc
+#define A_UP_IBQ_12_SHADOW_RDADDR 0x4c0
+#define A_UP_IBQ_12_SHADOW_WRADDR 0x4c4
+#define A_UP_IBQ_12_SHADOW_STATUS 0x4c8
+#define A_UP_IBQ_12_SHADOW_PKTCNT 0x4cc
+#define A_UP_IBQ_13_SHADOW_RDADDR 0x4d0
+#define A_UP_IBQ_13_SHADOW_WRADDR 0x4d4
+#define A_UP_IBQ_13_SHADOW_STATUS 0x4d8
+#define A_UP_IBQ_13_SHADOW_PKTCNT 0x4dc
+#define A_UP_IBQ_14_SHADOW_RDADDR 0x4e0
+#define A_UP_IBQ_14_SHADOW_WRADDR 0x4e4
+#define A_UP_IBQ_14_SHADOW_STATUS 0x4e8
+#define A_UP_IBQ_14_SHADOW_PKTCNT 0x4ec
+#define A_UP_IBQ_15_SHADOW_RDADDR 0x4f0
+#define A_UP_IBQ_15_SHADOW_WRADDR 0x4f4
+#define A_UP_IBQ_15_SHADOW_STATUS 0x4f8
+#define A_UP_IBQ_15_SHADOW_PKTCNT 0x4fc
+#define A_T7_UP_IBQ_0_SHADOW_CONFIG 0x500
+#define A_T7_UP_IBQ_0_SHADOW_REALADDR 0x504
+#define A_T7_UP_IBQ_1_SHADOW_CONFIG 0x510
+#define A_T7_UP_IBQ_1_SHADOW_REALADDR 0x514
+#define A_T7_UP_IBQ_2_SHADOW_CONFIG 0x520
+#define A_T7_UP_IBQ_2_SHADOW_REALADDR 0x524
+#define A_T7_UP_IBQ_3_SHADOW_CONFIG 0x530
+#define A_T7_UP_IBQ_3_SHADOW_REALADDR 0x534
+#define A_T7_UP_IBQ_4_SHADOW_CONFIG 0x540
+#define A_T7_UP_IBQ_4_SHADOW_REALADDR 0x544
+#define A_T7_UP_IBQ_5_SHADOW_CONFIG 0x550
+#define A_T7_UP_IBQ_5_SHADOW_REALADDR 0x554
+#define A_UP_IBQ_6_SHADOW_CONFIG 0x560
+#define A_UP_IBQ_6_SHADOW_REALADDR 0x564
+#define A_UP_IBQ_7_SHADOW_CONFIG 0x570
+#define A_UP_IBQ_7_SHADOW_REALADDR 0x574
+#define A_UP_IBQ_8_SHADOW_CONFIG 0x580
+#define A_UP_IBQ_8_SHADOW_REALADDR 0x584
+#define A_UP_IBQ_9_SHADOW_CONFIG 0x590
+#define A_UP_IBQ_9_SHADOW_REALADDR 0x594
+#define A_UP_IBQ_10_SHADOW_CONFIG 0x5a0
+#define A_UP_IBQ_10_SHADOW_REALADDR 0x5a4
+#define A_UP_IBQ_11_SHADOW_CONFIG 0x5b0
+#define A_UP_IBQ_11_SHADOW_REALADDR 0x5b4
+#define A_UP_IBQ_12_SHADOW_CONFIG 0x5c0
+#define A_UP_IBQ_12_SHADOW_REALADDR 0x5c4
+#define A_UP_IBQ_13_SHADOW_CONFIG 0x5d0
+#define A_UP_IBQ_13_SHADOW_REALADDR 0x5d4
+#define A_UP_IBQ_14_SHADOW_CONFIG 0x5e0
+#define A_UP_IBQ_14_SHADOW_REALADDR 0x5e4
+#define A_UP_IBQ_15_SHADOW_CONFIG 0x5f0
+#define A_UP_IBQ_15_SHADOW_REALADDR 0x5f4
+#define A_T7_UP_OBQ_0_SHADOW_RDADDR 0x600
+#define A_T7_UP_OBQ_0_SHADOW_WRADDR 0x604
+#define A_T7_UP_OBQ_0_SHADOW_STATUS 0x608
+#define A_T7_UP_OBQ_0_SHADOW_PKTCNT 0x60c
+#define A_T7_UP_OBQ_1_SHADOW_RDADDR 0x610
+#define A_T7_UP_OBQ_1_SHADOW_WRADDR 0x614
+#define A_T7_UP_OBQ_1_SHADOW_STATUS 0x618
+#define A_T7_UP_OBQ_1_SHADOW_PKTCNT 0x61c
+#define A_T7_UP_OBQ_2_SHADOW_RDADDR 0x620
+#define A_T7_UP_OBQ_2_SHADOW_WRADDR 0x624
+#define A_T7_UP_OBQ_2_SHADOW_STATUS 0x628
+#define A_T7_UP_OBQ_2_SHADOW_PKTCNT 0x62c
+#define A_T7_UP_OBQ_3_SHADOW_RDADDR 0x630
+#define A_T7_UP_OBQ_3_SHADOW_WRADDR 0x634
+#define A_T7_UP_OBQ_3_SHADOW_STATUS 0x638
+#define A_T7_UP_OBQ_3_SHADOW_PKTCNT 0x63c
+#define A_T7_UP_OBQ_4_SHADOW_RDADDR 0x640
+#define A_T7_UP_OBQ_4_SHADOW_WRADDR 0x644
+#define A_T7_UP_OBQ_4_SHADOW_STATUS 0x648
+#define A_T7_UP_OBQ_4_SHADOW_PKTCNT 0x64c
+#define A_T7_UP_OBQ_5_SHADOW_RDADDR 0x650
+#define A_T7_UP_OBQ_5_SHADOW_WRADDR 0x654
+#define A_T7_UP_OBQ_5_SHADOW_STATUS 0x658
+#define A_T7_UP_OBQ_5_SHADOW_PKTCNT 0x65c
+#define A_T7_UP_OBQ_6_SHADOW_RDADDR 0x660
+#define A_T7_UP_OBQ_6_SHADOW_WRADDR 0x664
+#define A_T7_UP_OBQ_6_SHADOW_STATUS 0x668
+#define A_T7_UP_OBQ_6_SHADOW_PKTCNT 0x66c
+#define A_T7_UP_OBQ_7_SHADOW_RDADDR 0x670
+#define A_T7_UP_OBQ_7_SHADOW_WRADDR 0x674
+#define A_T7_UP_OBQ_7_SHADOW_STATUS 0x678
+#define A_T7_UP_OBQ_7_SHADOW_PKTCNT 0x67c
+#define A_UP_OBQ_8_SHADOW_RDADDR 0x680
+#define A_UP_OBQ_8_SHADOW_WRADDR 0x684
+#define A_UP_OBQ_8_SHADOW_STATUS 0x688
+#define A_UP_OBQ_8_SHADOW_PKTCNT 0x68c
+#define A_UP_OBQ_9_SHADOW_RDADDR 0x690
+#define A_UP_OBQ_9_SHADOW_WRADDR 0x694
+#define A_UP_OBQ_9_SHADOW_STATUS 0x698
+#define A_UP_OBQ_9_SHADOW_PKTCNT 0x69c
+#define A_UP_OBQ_10_SHADOW_RDADDR 0x6a0
+#define A_UP_OBQ_10_SHADOW_WRADDR 0x6a4
+#define A_UP_OBQ_10_SHADOW_STATUS 0x6a8
+#define A_UP_OBQ_10_SHADOW_PKTCNT 0x6ac
+#define A_UP_OBQ_11_SHADOW_RDADDR 0x6b0
+#define A_UP_OBQ_11_SHADOW_WRADDR 0x6b4
+#define A_UP_OBQ_11_SHADOW_STATUS 0x6b8
+#define A_UP_OBQ_11_SHADOW_PKTCNT 0x6bc
+#define A_UP_OBQ_12_SHADOW_RDADDR 0x6c0
+#define A_UP_OBQ_12_SHADOW_WRADDR 0x6c4
+#define A_UP_OBQ_12_SHADOW_STATUS 0x6c8
+#define A_UP_OBQ_12_SHADOW_PKTCNT 0x6cc
+#define A_UP_OBQ_13_SHADOW_RDADDR 0x6d0
+#define A_UP_OBQ_13_SHADOW_WRADDR 0x6d4
+#define A_UP_OBQ_13_SHADOW_STATUS 0x6d8
+#define A_UP_OBQ_13_SHADOW_PKTCNT 0x6dc
+#define A_UP_OBQ_14_SHADOW_RDADDR 0x6e0
+#define A_UP_OBQ_14_SHADOW_WRADDR 0x6e4
+#define A_UP_OBQ_14_SHADOW_STATUS 0x6e8
+#define A_UP_OBQ_14_SHADOW_PKTCNT 0x6ec
+#define A_UP_OBQ_15_SHADOW_RDADDR 0x6f0
+#define A_UP_OBQ_15_SHADOW_WRADDR 0x6f4
+#define A_UP_OBQ_15_SHADOW_STATUS 0x6f8
+#define A_UP_OBQ_15_SHADOW_PKTCNT 0x6fc
+#define A_T7_UP_OBQ_0_SHADOW_CONFIG 0x700
+#define A_T7_UP_OBQ_0_SHADOW_REALADDR 0x704
+#define A_T7_UP_OBQ_1_SHADOW_CONFIG 0x710
+#define A_T7_UP_OBQ_1_SHADOW_REALADDR 0x714
+#define A_T7_UP_OBQ_2_SHADOW_CONFIG 0x720
+#define A_T7_UP_OBQ_2_SHADOW_REALADDR 0x724
+#define A_T7_UP_OBQ_3_SHADOW_CONFIG 0x730
+#define A_T7_UP_OBQ_3_SHADOW_REALADDR 0x734
+#define A_T7_UP_OBQ_4_SHADOW_CONFIG 0x740
+#define A_T7_UP_OBQ_4_SHADOW_REALADDR 0x744
+#define A_T7_UP_OBQ_5_SHADOW_CONFIG 0x750
+#define A_T7_UP_OBQ_5_SHADOW_REALADDR 0x754
+#define A_T7_UP_OBQ_6_SHADOW_CONFIG 0x760
+#define A_T7_UP_OBQ_6_SHADOW_REALADDR 0x764
+#define A_T7_UP_OBQ_7_SHADOW_CONFIG 0x770
+#define A_T7_UP_OBQ_7_SHADOW_REALADDR 0x774
+#define A_UP_OBQ_8_SHADOW_CONFIG 0x780
+#define A_UP_OBQ_8_SHADOW_REALADDR 0x784
+#define A_UP_OBQ_9_SHADOW_CONFIG 0x790
+#define A_UP_OBQ_9_SHADOW_REALADDR 0x794
+#define A_UP_OBQ_10_SHADOW_CONFIG 0x7a0
+#define A_UP_OBQ_10_SHADOW_REALADDR 0x7a4
+#define A_UP_OBQ_11_SHADOW_CONFIG 0x7b0
+#define A_UP_OBQ_11_SHADOW_REALADDR 0x7b4
+#define A_UP_OBQ_12_SHADOW_CONFIG 0x7c0
+#define A_UP_OBQ_12_SHADOW_REALADDR 0x7c4
+#define A_UP_OBQ_13_SHADOW_CONFIG 0x7d0
+#define A_UP_OBQ_13_SHADOW_REALADDR 0x7d4
+#define A_UP_OBQ_14_SHADOW_CONFIG 0x7e0
+#define A_UP_OBQ_14_SHADOW_REALADDR 0x7e4
+#define A_UP_OBQ_15_SHADOW_CONFIG 0x7f0
+#define A_UP_OBQ_15_SHADOW_REALADDR 0x7f4
/* registers for module CIM_CTL */
#define CIM_CTL_BASE_ADDR 0x0
@@ -44579,17 +57044,63 @@
#define A_CIM_CTL_STATIC_PREFADDR10 0x38
#define A_CIM_CTL_STATIC_PREFADDR11 0x3c
#define A_CIM_CTL_STATIC_PREFADDR12 0x40
+#define A_CIM_CTL_SEM_CFG 0x40
+
+#define S_SEMINIT 31
+#define V_SEMINIT(x) ((x) << S_SEMINIT)
+#define F_SEMINIT V_SEMINIT(1U)
+
+#define S_NUMSEM 0
+#define M_NUMSEM 0x3ffffU
+#define V_NUMSEM(x) ((x) << S_NUMSEM)
+#define G_NUMSEM(x) (((x) >> S_NUMSEM) & M_NUMSEM)
+
#define A_CIM_CTL_STATIC_PREFADDR13 0x44
+#define A_CIM_CTL_SEM_MA_CFG 0x44
+
+#define S_SEMMABASE 4
+#define M_SEMMABASE 0xfffffffU
+#define V_SEMMABASE(x) ((x) << S_SEMMABASE)
+#define G_SEMMABASE(x) (((x) >> S_SEMMABASE) & M_SEMMABASE)
+
+#define S_SEMMATHREADID 0
+#define M_SEMMATHREADID 0x7U
+#define V_SEMMATHREADID(x) ((x) << S_SEMMATHREADID)
+#define G_SEMMATHREADID(x) (((x) >> S_SEMMATHREADID) & M_SEMMATHREADID)
+
#define A_CIM_CTL_STATIC_PREFADDR14 0x48
#define A_CIM_CTL_STATIC_PREFADDR15 0x4c
#define A_CIM_CTL_STATIC_ALLOCADDR0 0x50
+#define A_CIM_CTL_LOCK_CFG 0x50
+
+#define S_NUMLOCK 0
+#define M_NUMLOCK 0x3ffffU
+#define V_NUMLOCK(x) ((x) << S_NUMLOCK)
+#define G_NUMLOCK(x) (((x) >> S_NUMLOCK) & M_NUMLOCK)
+
#define A_CIM_CTL_STATIC_ALLOCADDR1 0x54
+#define A_CIM_CTL_LOCK_MA_CFG 0x54
+
+#define S_LOCKMABASE 4
+#define M_LOCKMABASE 0xfffffffU
+#define V_LOCKMABASE(x) ((x) << S_LOCKMABASE)
+#define G_LOCKMABASE(x) (((x) >> S_LOCKMABASE) & M_LOCKMABASE)
+
+#define S_LOCKMATHREADID 0
+#define M_LOCKMATHREADID 0x7U
+#define V_LOCKMATHREADID(x) ((x) << S_LOCKMATHREADID)
+#define G_LOCKMATHREADID(x) (((x) >> S_LOCKMATHREADID) & M_LOCKMATHREADID)
+
#define A_CIM_CTL_STATIC_ALLOCADDR2 0x58
#define A_CIM_CTL_STATIC_ALLOCADDR3 0x5c
#define A_CIM_CTL_STATIC_ALLOCADDR4 0x60
+#define A_CIM_CTL_RSA_INT 0x60
#define A_CIM_CTL_STATIC_ALLOCADDR5 0x64
+#define A_CIM_CTL_RSA_BUSY 0x64
#define A_CIM_CTL_STATIC_ALLOCADDR6 0x68
+#define A_CIM_CTL_RSA_CPERR 0x68
#define A_CIM_CTL_STATIC_ALLOCADDR7 0x6c
+#define A_CIM_CTL_RSA_DPERR 0x6c
#define A_CIM_CTL_STATIC_ALLOCADDR8 0x70
#define A_CIM_CTL_STATIC_ALLOCADDR9 0x74
#define A_CIM_CTL_STATIC_ALLOCADDR10 0x78
@@ -44650,6 +57161,66 @@
#define A_CIM_CTL_GEN_TIMER3 0xd0
#define A_CIM_CTL_MAILBOX_VF_STATUS 0xe0
#define A_CIM_CTL_MAILBOX_VFN_CTL 0x100
+#define A_CIM_CTL_TID_MAP_EN 0x500
+#define A_CIM_CTL_TID_MAP_CORE 0x520
+#define A_CIM_CTL_TID_MAP_CONFIG 0x540
+
+#define S_TIDDEFCORE 4
+#define M_TIDDEFCORE 0xfU
+#define V_TIDDEFCORE(x) ((x) << S_TIDDEFCORE)
+#define G_TIDDEFCORE(x) (((x) >> S_TIDDEFCORE) & M_TIDDEFCORE)
+
+#define S_TIDVECBASE 0
+#define M_TIDVECBASE 0x7U
+#define V_TIDVECBASE(x) ((x) << S_TIDVECBASE)
+#define G_TIDVECBASE(x) (((x) >> S_TIDVECBASE) & M_TIDVECBASE)
+
+#define A_CIM_CTL_CRYPTO_KEY_DATA 0x600
+#define A_CIM_CTL_SECURE_CONFIG 0x6f8
+#define A_CIM_CTL_CRYPTO_KEY_CTRL 0x6fc
+
+#define S_CRYPTOKEYDATAREGNUM 8
+#define M_CRYPTOKEYDATAREGNUM 0xffU
+#define V_CRYPTOKEYDATAREGNUM(x) ((x) << S_CRYPTOKEYDATAREGNUM)
+#define G_CRYPTOKEYDATAREGNUM(x) (((x) >> S_CRYPTOKEYDATAREGNUM) & M_CRYPTOKEYDATAREGNUM)
+
+#define S_CRYPTOKEYSTARTBUSY 0
+#define V_CRYPTOKEYSTARTBUSY(x) ((x) << S_CRYPTOKEYSTARTBUSY)
+#define F_CRYPTOKEYSTARTBUSY V_CRYPTOKEYSTARTBUSY(1U)
+
+#define A_CIM_CTL_FLOWID_OP_VALID 0x700
+#define A_CIM_CTL_FLOWID_CTL 0x720
+
+#define S_FLOWBASEADDR 8
+#define M_FLOWBASEADDR 0xffffffU
+#define V_FLOWBASEADDR(x) ((x) << S_FLOWBASEADDR)
+#define G_FLOWBASEADDR(x) (((x) >> S_FLOWBASEADDR) & M_FLOWBASEADDR)
+
+#define S_SEQSRCHALIGNCFG 4
+#define M_SEQSRCHALIGNCFG 0x3U
+#define V_SEQSRCHALIGNCFG(x) ((x) << S_SEQSRCHALIGNCFG)
+#define G_SEQSRCHALIGNCFG(x) (((x) >> S_SEQSRCHALIGNCFG) & M_SEQSRCHALIGNCFG)
+
+#define S_FLOWADDRSIZE 1
+#define M_FLOWADDRSIZE 0x3U
+#define V_FLOWADDRSIZE(x) ((x) << S_FLOWADDRSIZE)
+#define G_FLOWADDRSIZE(x) (((x) >> S_FLOWADDRSIZE) & M_FLOWADDRSIZE)
+
+#define S_FLOWIDEN 0
+#define V_FLOWIDEN(x) ((x) << S_FLOWIDEN)
+#define F_FLOWIDEN V_FLOWIDEN(1U)
+
+#define A_CIM_CTL_FLOWID_MAX 0x724
+
+#define S_MAXFLOWID 0
+#define M_MAXFLOWID 0xffffffU
+#define V_MAXFLOWID(x) ((x) << S_MAXFLOWID)
+#define G_MAXFLOWID(x) (((x) >> S_MAXFLOWID) & M_MAXFLOWID)
+
+#define A_CIM_CTL_FLOWID_HINT0 0x728
+#define A_CIM_CTL_EFUSE_CTRL 0x780
+#define A_CIM_CTL_EFUSE_QOUT 0x784
+#define A_CIM_CTL_EFUSE_RFOUT 0x788
#define A_CIM_CTL_TSCH_CHNLN_CTL 0x900
#define S_TSCHNLEN 31
@@ -45001,14 +57572,19 @@
#define A_CIM_CTL_TSCH_TICK3 0xd8c
#define A_CIM_CTL_MAILBOX_PF3_CTL 0xd90
#define A_T6_CIM_CTL_MAILBOX_PF0_CTL 0xd90
+#define A_T7_CIM_CTL_MAILBOX_PF0_CTL 0xd90
#define A_CIM_CTL_MAILBOX_PF4_CTL 0xd94
#define A_T6_CIM_CTL_MAILBOX_PF1_CTL 0xd94
+#define A_T7_CIM_CTL_MAILBOX_PF1_CTL 0xd94
#define A_CIM_CTL_MAILBOX_PF5_CTL 0xd98
#define A_T6_CIM_CTL_MAILBOX_PF2_CTL 0xd98
+#define A_T7_CIM_CTL_MAILBOX_PF2_CTL 0xd98
#define A_CIM_CTL_MAILBOX_PF6_CTL 0xd9c
#define A_T6_CIM_CTL_MAILBOX_PF3_CTL 0xd9c
+#define A_T7_CIM_CTL_MAILBOX_PF3_CTL 0xd9c
#define A_CIM_CTL_MAILBOX_PF7_CTL 0xda0
#define A_T6_CIM_CTL_MAILBOX_PF4_CTL 0xda0
+#define A_T7_CIM_CTL_MAILBOX_PF4_CTL 0xda0
#define A_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xda4
#define S_PF7_OWNER_PL 15
@@ -45076,6 +57652,7 @@
#define F_PF0_OWNER_UP V_PF0_OWNER_UP(1U)
#define A_T6_CIM_CTL_MAILBOX_PF5_CTL 0xda4
+#define A_T7_CIM_CTL_MAILBOX_PF5_CTL 0xda4
#define A_CIM_CTL_PIO_MST_CONFIG 0xda8
#define S_T5_CTLRID 0
@@ -45084,15 +57661,13 @@
#define G_T5_CTLRID(x) (((x) >> S_T5_CTLRID) & M_T5_CTLRID)
#define A_T6_CIM_CTL_MAILBOX_PF6_CTL 0xda8
+#define A_T7_CIM_CTL_MAILBOX_PF6_CTL 0xda8
#define A_T6_CIM_CTL_MAILBOX_PF7_CTL 0xdac
+#define A_T7_CIM_CTL_MAILBOX_PF7_CTL 0xdac
#define A_T6_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xdb0
+#define A_T7_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xdb0
#define A_T6_CIM_CTL_PIO_MST_CONFIG 0xdb4
-
-#define S_T6_UPRID 0
-#define M_T6_UPRID 0x1ffU
-#define V_T6_UPRID(x) ((x) << S_T6_UPRID)
-#define G_T6_UPRID(x) (((x) >> S_T6_UPRID) & M_T6_UPRID)
-
+#define A_T7_CIM_CTL_PIO_MST_CONFIG 0xdb4
#define A_CIM_CTL_ULP_OBQ0_PAUSE_MASK 0xe00
#define A_CIM_CTL_ULP_OBQ1_PAUSE_MASK 0xe04
#define A_CIM_CTL_ULP_OBQ2_PAUSE_MASK 0xe08
@@ -45119,6 +57694,64 @@
#define V_MA_TIMEOUT(x) ((x) << S_MA_TIMEOUT)
#define G_MA_TIMEOUT(x) (((x) >> S_MA_TIMEOUT) & M_MA_TIMEOUT)
+#define A_CIM_CTL_BREAK 0xf00
+
+#define S_XOCDMODE 8
+#define M_XOCDMODE 0xffU
+#define V_XOCDMODE(x) ((x) << S_XOCDMODE)
+#define G_XOCDMODE(x) (((x) >> S_XOCDMODE) & M_XOCDMODE)
+
+#define S_BREAKIN_CONTROL 0
+#define M_BREAKIN_CONTROL 0xffU
+#define V_BREAKIN_CONTROL(x) ((x) << S_BREAKIN_CONTROL)
+#define G_BREAKIN_CONTROL(x) (((x) >> S_BREAKIN_CONTROL) & M_BREAKIN_CONTROL)
+
+#define A_CIM_CTL_SLV_BOOT_CFG 0x4000
+
+#define S_T7_UPGEN 3
+#define M_T7_UPGEN 0x1fU
+#define V_T7_UPGEN(x) ((x) << S_T7_UPGEN)
+#define G_T7_UPGEN(x) (((x) >> S_T7_UPGEN) & M_T7_UPGEN)
+
+#define S_UPCLKEN 2
+#define V_UPCLKEN(x) ((x) << S_UPCLKEN)
+#define F_UPCLKEN V_UPCLKEN(1U)
+
+#define A_CIM_CTL_SLV_BOOT_LEN 0x4004
+#define A_CIM_CTL_SLV_ACC_INT_ENABLE 0x4008
+#define A_CIM_CTL_SLV_ACC_INT_CAUSE 0x400c
+#define A_CIM_CTL_SLV_INT_ENABLE 0x4010
+#define A_CIM_CTL_SLV_INT_CAUSE 0x4014
+#define A_CIM_CTL_SLV_PERR_ENABLE 0x4018
+#define A_CIM_CTL_SLV_PERR_CAUSE 0x401c
+#define A_CIM_CTL_SLV_ADDR_TIMEOUT 0x4028
+#define A_CIM_CTL_SLV_ADDR_ILLEGAL 0x402c
+#define A_CIM_CTL_SLV_PIO_MST_CONFIG 0x4030
+#define A_CIM_CTL_SLV_MEM_ZONE0_VA 0x4040
+#define A_CIM_CTL_SLV_MEM_ZONE0_BA 0x4044
+#define A_CIM_CTL_SLV_MEM_ZONE0_LEN 0x4048
+#define A_CIM_CTL_SLV_MEM_ZONE1_VA 0x404c
+#define A_CIM_CTL_SLV_MEM_ZONE1_BA 0x4050
+#define A_CIM_CTL_SLV_MEM_ZONE1_LEN 0x4054
+#define A_CIM_CTL_SLV_MEM_ZONE2_VA 0x4058
+#define A_CIM_CTL_SLV_MEM_ZONE2_BA 0x405c
+#define A_CIM_CTL_SLV_MEM_ZONE2_LEN 0x4060
+#define A_CIM_CTL_SLV_MEM_ZONE3_VA 0x4064
+#define A_CIM_CTL_SLV_MEM_ZONE3_BA 0x4068
+#define A_CIM_CTL_SLV_MEM_ZONE3_LEN 0x406c
+#define A_CIM_CTL_SLV_MEM_ZONE4_VA 0x4070
+#define A_CIM_CTL_SLV_MEM_ZONE4_BA 0x4074
+#define A_CIM_CTL_SLV_MEM_ZONE4_LEN 0x4078
+#define A_CIM_CTL_SLV_MEM_ZONE5_VA 0x407c
+#define A_CIM_CTL_SLV_MEM_ZONE5_BA 0x4080
+#define A_CIM_CTL_SLV_MEM_ZONE5_LEN 0x4084
+#define A_CIM_CTL_SLV_MEM_ZONE6_VA 0x4088
+#define A_CIM_CTL_SLV_MEM_ZONE6_BA 0x408c
+#define A_CIM_CTL_SLV_MEM_ZONE6_LEN 0x4090
+#define A_CIM_CTL_SLV_MEM_ZONE7_VA 0x4094
+#define A_CIM_CTL_SLV_MEM_ZONE7_BA 0x4098
+#define A_CIM_CTL_SLV_MEM_ZONE7_LEN 0x409c
+
/* registers for module MAC */
#define MAC_BASE_ADDR 0x0
@@ -46613,33 +59246,7 @@
#define F_PERR_TX_PCS1G V_PERR_TX_PCS1G(1U)
#define A_MAC_PORT_PERR_INT_CAUSE 0x8e4
-
-#define S_T6_PERR_PKT_RAM 31
-#define V_T6_PERR_PKT_RAM(x) ((x) << S_T6_PERR_PKT_RAM)
-#define F_T6_PERR_PKT_RAM V_T6_PERR_PKT_RAM(1U)
-
-#define S_T6_PERR_MASK_RAM 30
-#define V_T6_PERR_MASK_RAM(x) ((x) << S_T6_PERR_MASK_RAM)
-#define F_T6_PERR_MASK_RAM V_T6_PERR_MASK_RAM(1U)
-
-#define S_T6_PERR_CRC_RAM 29
-#define V_T6_PERR_CRC_RAM(x) ((x) << S_T6_PERR_CRC_RAM)
-#define F_T6_PERR_CRC_RAM V_T6_PERR_CRC_RAM(1U)
-
#define A_MAC_PORT_PERR_ENABLE 0x8e8
-
-#define S_T6_PERR_PKT_RAM 31
-#define V_T6_PERR_PKT_RAM(x) ((x) << S_T6_PERR_PKT_RAM)
-#define F_T6_PERR_PKT_RAM V_T6_PERR_PKT_RAM(1U)
-
-#define S_T6_PERR_MASK_RAM 30
-#define V_T6_PERR_MASK_RAM(x) ((x) << S_T6_PERR_MASK_RAM)
-#define F_T6_PERR_MASK_RAM V_T6_PERR_MASK_RAM(1U)
-
-#define S_T6_PERR_CRC_RAM 29
-#define V_T6_PERR_CRC_RAM(x) ((x) << S_T6_PERR_CRC_RAM)
-#define F_T6_PERR_CRC_RAM V_T6_PERR_CRC_RAM(1U)
-
#define A_MAC_PORT_PERR_INJECT 0x8ec
#define S_MEMSEL_PERR 1
@@ -47304,10 +59911,12 @@
#define A_MAC_PORT_PTP_DRIFT_ADJUST_COUNT 0x9a0
#define A_MAC_PORT_PTP_OFFSET_ADJUST_FINE 0x9a4
+#if 0
#define S_B 16
-#define CXGBE_M_B 0xffffU
+#define M_B 0xffffU
#define V_B(x) ((x) << S_B)
-#define G_B(x) (((x) >> S_B) & CXGBE_M_B)
+#define G_B(x) (((x) >> S_B) & M_B)
+#endif
#define S_A 0
#define M_A 0xffffU
@@ -48454,10 +61063,6 @@
#define V_LOW_POWER(x) ((x) << S_LOW_POWER)
#define F_LOW_POWER V_LOW_POWER(1U)
-#define S_T6_SPEED_SEL1 6
-#define V_T6_SPEED_SEL1(x) ((x) << S_T6_SPEED_SEL1)
-#define F_T6_SPEED_SEL1 V_T6_SPEED_SEL1(1U)
-
#define S_SPEED_SEL2 2
#define M_SPEED_SEL2 0xfU
#define V_SPEED_SEL2(x) ((x) << S_SPEED_SEL2)
@@ -49016,7 +61621,7 @@
#define S_VLANTAG 0
#define CXGBE_M_VLANTAG 0xffffU
#define V_VLANTAG(x) ((x) << S_VLANTAG)
-#define G_VLANTAG(x) (((x) >> S_VLANTAG) & CXGBE_M_VLANTAG)
+#define G_VLANTAG(x) (((x) >> S_VLANTAG) & M_VLANTAG)
#define A_MAC_PORT_MTIP_VLAN_TPID_1 0x1a04
#define A_MAC_PORT_MTIP_VLAN_TPID_2 0x1a08
@@ -51279,75 +63884,24 @@
#define G_DPC_TIME_LIM(x) (((x) >> S_DPC_TIME_LIM) & M_DPC_TIME_LIM)
#define A_MAC_PORT_AET_STAGE_CONFIGURATION_1 0x2b20
-
-#define S_T6_INIT_METH 12
-#define M_T6_INIT_METH 0xfU
-#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
-#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
-
#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_1 0x2b24
#define A_MAC_PORT_AET_ZFE_LIMITS_1 0x2b28
#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_1 0x2b2c
#define A_MAC_PORT_AET_STATUS_1 0x2b30
-
-#define S_T6_NEU_STATE 4
-#define M_T6_NEU_STATE 0xfU
-#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
-#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
-
-#define S_T6_CTRL_STATE 0
-#define M_T6_CTRL_STATE 0xfU
-#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
-#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
-
#define A_MAC_PORT_AET_STATUS_21 0x2b34
#define A_MAC_PORT_AET_LIMITS1 0x2b38
#define A_MAC_PORT_AET_STAGE_CONFIGURATION_2 0x2b40
-
-#define S_T6_INIT_METH 12
-#define M_T6_INIT_METH 0xfU
-#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
-#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
-
#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_2 0x2b44
#define A_MAC_PORT_AET_ZFE_LIMITS_2 0x2b48
#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_2 0x2b4c
#define A_MAC_PORT_AET_STATUS_2 0x2b50
-
-#define S_T6_NEU_STATE 4
-#define M_T6_NEU_STATE 0xfU
-#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
-#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
-
-#define S_T6_CTRL_STATE 0
-#define M_T6_CTRL_STATE 0xfU
-#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
-#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
-
#define A_MAC_PORT_AET_STATUS_22 0x2b54
#define A_MAC_PORT_AET_LIMITS2 0x2b58
#define A_MAC_PORT_AET_STAGE_CONFIGURATION_3 0x2b60
-
-#define S_T6_INIT_METH 12
-#define M_T6_INIT_METH 0xfU
-#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH)
-#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH)
-
#define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_3 0x2b64
#define A_MAC_PORT_AET_ZFE_LIMITS_3 0x2b68
#define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_3 0x2b6c
#define A_MAC_PORT_AET_STATUS_3 0x2b70
-
-#define S_T6_NEU_STATE 4
-#define M_T6_NEU_STATE 0xfU
-#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE)
-#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE)
-
-#define S_T6_CTRL_STATE 0
-#define M_T6_CTRL_STATE 0xfU
-#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE)
-#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE)
-
#define A_MAC_PORT_AET_STATUS_23 0x2b74
#define A_MAC_PORT_AET_LIMITS3 0x2b78
#define A_T6_MAC_PORT_BEAN_CTL 0x2c00
@@ -52384,103 +64938,21 @@
#define F_BSOUTP V_BSOUTP(1U)
#define A_MAC_PORT_TX_LINKB_TRANSMIT_CONFIGURATION_MODE 0x3100
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TEST_CONTROL 0x3104
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_COEFFICIENT_CONTROL 0x3108
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DRIVER_MODE_CONTROL 0x310c
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3110
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3114
#define A_MAC_PORT_TX_LINKB_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3118
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x311c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_0_COEFFICIENT 0x3120
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_1_COEFFICIENT 0x3124
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_2_COEFFICIENT 0x3128
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_3_COEFFICIENT 0x312c
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AMPLITUDE 0x3130
#define A_MAC_PORT_TX_LINKB_TRANSMIT_POLARITY 0x3134
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3138
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x313c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3140
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3140
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3144
@@ -52503,12 +64975,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3174
#define A_MAC_PORT_TX_LINKB_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3178
#define A_MAC_PORT_TX_LINKB_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x317c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3180
#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3184
#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3188
@@ -52521,21 +64987,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AZ_CONTROL 0x319c
#define A_T6_MAC_PORT_TX_LINKB_TRANSMIT_DCC_CONTROL 0x31a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINKB_DCCSTEP_CTL 6
#define M_TX_LINKB_DCCSTEP_CTL 0x3U
#define V_TX_LINKB_DCCSTEP_CTL(x) ((x) << S_TX_LINKB_DCCSTEP_CTL)
@@ -52553,20 +65004,9 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x31e0
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_5 0x31ec
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_4 0x31f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_3 0x31f4
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_2 0x31f8
#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_1 0x31fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_RX_LINKA_RECEIVER_CONFIGURATION_MODE 0x3200
#define S_T5_RX_LINKEN 15
@@ -54442,56 +66882,15 @@
#define A_MAC_PORT_RX_LINKB_RECEIVER_TEST_CONTROL 0x3304
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_CONTROL 0x3308
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_OFFSET_CONTROL 0x330c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_POSITION_1 0x3310
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_POSITION_2 0x3314
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3318
#define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x331c
#define A_MAC_PORT_RX_LINKB_DFE_CONTROL 0x3320
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINKB_DFE_SAMPLE_SNAPSHOT_1 0x3324
#define A_MAC_PORT_RX_LINKB_DFE_SAMPLE_SNAPSHOT_2 0x3328
#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_1 0x332c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_2 0x3330
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_3 0x3334
#define A_MAC_PORT_RX_LINKB_RECEIVER_DQCC_CONTROL_1 0x3338
#define A_MAC_PORT_RX_LINKB_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3338
@@ -54515,12 +66914,6 @@
#define A_MAC_PORT_RX_LINKB_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x336c
#define A_MAC_PORT_RX_LINKB_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3370
#define A_MAC_PORT_RX_LINKB_DYNAMIC_DATA_CENTERING_DDC 0x3374
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINKB_RECEIVER_INTERNAL_STATUS 0x3378
#define S_RX_LINKB_ACCCMP_RIS 11
@@ -54550,20 +66943,6 @@
#define A_MAC_PORT_RX_LINKB_INTEGRATOR_DAC_OFFSET 0x33a4
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_CONTROL 0x33a8
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS 0x33ac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_ERROR_COUNT 0x33b0
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x33b4
#define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x33b8
@@ -54611,103 +66990,21 @@
#define A_MAC_PORT_RX_LINKB_RECEIVER_MACRO_TEST_CONTROL_REGISTER_2 0x33f8
#define A_MAC_PORT_RX_LINKB_RECEIVER_MACRO_TEST_CONTROL_1 0x33fc
#define A_MAC_PORT_TX_LINKC_TRANSMIT_CONFIGURATION_MODE 0x3400
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TEST_CONTROL 0x3404
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_COEFFICIENT_CONTROL 0x3408
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DRIVER_MODE_CONTROL 0x340c
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3410
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3414
#define A_MAC_PORT_TX_LINKC_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3418
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x341c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_0_COEFFICIENT 0x3420
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_1_COEFFICIENT 0x3424
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_2_COEFFICIENT 0x3428
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_3_COEFFICIENT 0x342c
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AMPLITUDE 0x3430
#define A_MAC_PORT_TX_LINKC_TRANSMIT_POLARITY 0x3434
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3438
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x343c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3440
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3440
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3444
@@ -54730,12 +67027,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3474
#define A_MAC_PORT_TX_LINKC_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3478
#define A_MAC_PORT_TX_LINKC_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x347c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3480
#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3484
#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3488
@@ -54748,21 +67039,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AZ_CONTROL 0x349c
#define A_T6_MAC_PORT_TX_LINKC_TRANSMIT_DCC_CONTROL 0x34a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINKC_DCCSTEP_CTL 6
#define M_TX_LINKC_DCCSTEP_CTL 0x3U
#define V_TX_LINKC_DCCSTEP_CTL(x) ((x) << S_TX_LINKC_DCCSTEP_CTL)
@@ -54780,118 +67056,25 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x34e0
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_5 0x34ec
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_4 0x34f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_3 0x34f4
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_2 0x34f8
#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_1 0x34fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_CONFIGURATION_MODE 0x3500
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TEST_CONTROL 0x3504
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_COEFFICIENT_CONTROL 0x3508
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DRIVER_MODE_CONTROL 0x350c
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3510
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3514
#define A_MAC_PORT_TX_LINKD_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3518
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x351c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_0_COEFFICIENT 0x3520
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_1_COEFFICIENT 0x3524
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_2_COEFFICIENT 0x3528
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_3_COEFFICIENT 0x352c
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AMPLITUDE 0x3530
#define A_MAC_PORT_TX_LINKD_TRANSMIT_POLARITY 0x3534
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3538
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x353c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3540
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3540
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3544
@@ -54914,12 +67097,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3574
#define A_MAC_PORT_TX_LINKD_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3578
#define A_MAC_PORT_TX_LINKD_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x357c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3580
#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3584
#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3588
@@ -54932,21 +67109,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AZ_CONTROL 0x359c
#define A_T6_MAC_PORT_TX_LINKD_TRANSMIT_DCC_CONTROL 0x35a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINKD_DCCSTEP_CTL 6
#define M_TX_LINKD_DCCSTEP_CTL 0x3U
#define V_TX_LINKD_DCCSTEP_CTL(x) ((x) << S_TX_LINKD_DCCSTEP_CTL)
@@ -54964,74 +67126,22 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x35e0
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_5 0x35ec
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_4 0x35f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_3 0x35f4
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_2 0x35f8
#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_1 0x35fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_CONFIGURATION_MODE 0x3600
#define A_MAC_PORT_RX_LINKC_RECEIVER_TEST_CONTROL 0x3604
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_CONTROL 0x3608
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_OFFSET_CONTROL 0x360c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_POSITION_1 0x3610
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_POSITION_2 0x3614
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3618
#define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x361c
#define A_MAC_PORT_RX_LINKC_DFE_CONTROL 0x3620
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINKC_DFE_SAMPLE_SNAPSHOT_1 0x3624
#define A_MAC_PORT_RX_LINKC_DFE_SAMPLE_SNAPSHOT_2 0x3628
#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_1 0x362c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_2 0x3630
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_3 0x3634
#define A_MAC_PORT_RX_LINKC_RECEIVER_DQCC_CONTROL_1 0x3638
#define A_MAC_PORT_RX_LINKC_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3638
@@ -55055,12 +67165,6 @@
#define A_MAC_PORT_RX_LINKC_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x366c
#define A_MAC_PORT_RX_LINKC_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3670
#define A_MAC_PORT_RX_LINKC_DYNAMIC_DATA_CENTERING_DDC 0x3674
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINKC_RECEIVER_INTERNAL_STATUS 0x3678
#define S_RX_LINKC_ACCCMP_RIS 11
@@ -55090,20 +67194,6 @@
#define A_MAC_PORT_RX_LINKC_INTEGRATOR_DAC_OFFSET 0x36a4
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_CONTROL 0x36a8
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS 0x36ac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_ERROR_COUNT 0x36b0
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x36b4
#define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x36b8
@@ -55154,56 +67244,15 @@
#define A_MAC_PORT_RX_LINKD_RECEIVER_TEST_CONTROL 0x3704
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_CONTROL 0x3708
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_OFFSET_CONTROL 0x370c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_POSITION_1 0x3710
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_POSITION_2 0x3714
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3718
#define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x371c
#define A_MAC_PORT_RX_LINKD_DFE_CONTROL 0x3720
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINKD_DFE_SAMPLE_SNAPSHOT_1 0x3724
#define A_MAC_PORT_RX_LINKD_DFE_SAMPLE_SNAPSHOT_2 0x3728
#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_1 0x372c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_2 0x3730
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_3 0x3734
#define A_MAC_PORT_RX_LINKD_RECEIVER_DQCC_CONTROL_1 0x3738
#define A_MAC_PORT_RX_LINKD_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3738
@@ -55227,12 +67276,6 @@
#define A_MAC_PORT_RX_LINKD_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x376c
#define A_MAC_PORT_RX_LINKD_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3770
#define A_MAC_PORT_RX_LINKD_DYNAMIC_DATA_CENTERING_DDC 0x3774
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINKD_RECEIVER_INTERNAL_STATUS 0x3778
#define S_RX_LINKD_ACCCMP_RIS 11
@@ -55262,20 +67305,6 @@
#define A_MAC_PORT_RX_LINKD_INTEGRATOR_DAC_OFFSET 0x37a4
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_CONTROL 0x37a8
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS 0x37ac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_ERROR_COUNT 0x37b0
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x37b4
#define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x37b8
@@ -55597,103 +67626,21 @@
#define F_MACROTEST V_MACROTEST(1U)
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_CONFIGURATION_MODE 0x3900
-
-#define S_T6_T5_TX_RXLOOP 5
-#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP)
-#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U)
-
-#define S_T6_T5_TX_BWSEL 2
-#define M_T6_T5_TX_BWSEL 0x3U
-#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL)
-#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TEST_CONTROL 0x3904
-
-#define S_T6_ERROR 9
-#define V_T6_ERROR(x) ((x) << S_T6_ERROR)
-#define F_T6_ERROR V_T6_ERROR(1U)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_COEFFICIENT_CONTROL 0x3908
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DRIVER_MODE_CONTROL 0x390c
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3910
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3914
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3918
-
-#define S_T6_CALSSTN 8
-#define M_T6_CALSSTN 0x3fU
-#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN)
-#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN)
-
-#define S_T6_CALSSTP 0
-#define M_T6_CALSSTP 0x3fU
-#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP)
-#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x391c
-
-#define S_T6_DRTOL 2
-#define M_T6_DRTOL 0x7U
-#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL)
-#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_0_COEFFICIENT 0x3920
-
-#define S_T6_NXTT0 0
-#define M_T6_NXTT0 0x3fU
-#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0)
-#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_1_COEFFICIENT 0x3924
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_2_COEFFICIENT 0x3928
-
-#define S_T6_NXTT2 0
-#define M_T6_NXTT2 0x3fU
-#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2)
-#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_3_COEFFICIENT 0x392c
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AMPLITUDE 0x3930
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_POLARITY 0x3934
-
-#define S_T6_NXTPOL 0
-#define M_T6_NXTPOL 0xfU
-#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL)
-#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3938
-
-#define S_T6_C0UPDT 6
-#define M_T6_C0UPDT 0x3U
-#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT)
-#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT)
-
-#define S_T6_C2UPDT 2
-#define M_T6_C2UPDT 0x3U
-#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT)
-#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT)
-
-#define S_T6_C1UPDT 0
-#define M_T6_C1UPDT 0x3U
-#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT)
-#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x393c
-
-#define S_T6_C0STAT 6
-#define M_T6_C0STAT 0x3U
-#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT)
-#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT)
-
-#define S_T6_C2STAT 2
-#define M_T6_C2STAT 0x3U
-#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT)
-#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT)
-
-#define S_T6_C1STAT 0
-#define M_T6_C1STAT 0x3U
-#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT)
-#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3940
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3940
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3944
@@ -55716,12 +67663,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3974
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3978
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x397c
-
-#define S_T6_XADDR 1
-#define M_T6_XADDR 0x1fU
-#define V_T6_XADDR(x) ((x) << S_T6_XADDR)
-#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3980
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3984
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3988
@@ -55734,21 +67675,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AZ_CONTROL 0x399c
#define A_T6_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_CONTROL 0x39a0
-#define S_T6_DCCTIMEEN 13
-#define M_T6_DCCTIMEEN 0x3U
-#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN)
-#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN)
-
-#define S_T6_DCCLOCK 11
-#define M_T6_DCCLOCK 0x3U
-#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK)
-#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK)
-
-#define S_T6_DCCOFFSET 8
-#define M_T6_DCCOFFSET 0x7U
-#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET)
-#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET)
-
#define S_TX_LINK_BCST_DCCSTEP_CTL 6
#define M_TX_LINK_BCST_DCCSTEP_CTL 0x3U
#define V_TX_LINK_BCST_DCCSTEP_CTL(x) ((x) << S_TX_LINK_BCST_DCCSTEP_CTL)
@@ -55766,74 +67692,22 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x39e0
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_5 0x39ec
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_4 0x39f0
-
-#define S_T6_SDOVRD 0
-#define M_T6_SDOVRD 0xffffU
-#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD)
-#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_3 0x39f4
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_2 0x39f8
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_1 0x39fc
-
-#define S_T6_SDOVRDEN 15
-#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN)
-#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_CONFIGURATION_MODE 0x3a00
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_TEST_CONTROL 0x3a04
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_CONTROL 0x3a08
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_OFFSET_CONTROL 0x3a0c
-
-#define S_T6_TMSCAL 8
-#define M_T6_TMSCAL 0x3U
-#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL)
-#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL)
-
-#define S_T6_APADJ 7
-#define V_T6_APADJ(x) ((x) << S_T6_APADJ)
-#define F_T6_APADJ V_T6_APADJ(1U)
-
-#define S_T6_RSEL 6
-#define V_T6_RSEL(x) ((x) << S_T6_RSEL)
-#define F_T6_RSEL V_T6_RSEL(1U)
-
-#define S_T6_PHOFFS 0
-#define M_T6_PHOFFS 0x3fU
-#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS)
-#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS)
-
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_POSITION_1 0x3a10
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_POSITION_2 0x3a14
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3a18
#define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x3a1c
#define A_MAC_PORT_RX_LINK_BCST_DFE_CONTROL 0x3a20
-
-#define S_T6_SPIFMT 8
-#define M_T6_SPIFMT 0xfU
-#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT)
-#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT)
-
#define A_MAC_PORT_RX_LINK_BCST_DFE_SAMPLE_SNAPSHOT_1 0x3a24
#define A_MAC_PORT_RX_LINK_BCST_DFE_SAMPLE_SNAPSHOT_2 0x3a28
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_1 0x3a2c
-
-#define S_T6_WRAPSEL 15
-#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL)
-#define F_T6_WRAPSEL V_T6_WRAPSEL(1U)
-
-#define S_T6_PEAK 9
-#define M_T6_PEAK 0x1fU
-#define V_T6_PEAK(x) ((x) << S_T6_PEAK)
-#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_2 0x3a30
-
-#define S_T6_T5VGAIN 0
-#define M_T6_T5VGAIN 0x7fU
-#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN)
-#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_3 0x3a34
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DQCC_CONTROL_1 0x3a38
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3a38
@@ -55857,12 +67731,6 @@
#define A_MAC_PORT_RX_LINK_BCST_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x3a6c
#define A_MAC_PORT_RX_LINK_BCST_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3a70
#define A_MAC_PORT_RX_LINK_BCST_DYNAMIC_DATA_CENTERING_DDC 0x3a74
-
-#define S_T6_ODEC 0
-#define M_T6_ODEC 0xfU
-#define V_T6_ODEC(x) ((x) << S_T6_ODEC)
-#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC)
-
#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_INTERNAL_STATUS 0x3a78
#define S_RX_LINK_BCST_ACCCMP_RIS 11
@@ -55892,20 +67760,6 @@
#define A_MAC_PORT_RX_LINK_BCST_INTEGRATOR_DAC_OFFSET 0x3aa4
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_CONTROL 0x3aa8
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS 0x3aac
-
-#define S_T6_EMMD 3
-#define M_T6_EMMD 0x3U
-#define V_T6_EMMD(x) ((x) << S_T6_EMMD)
-#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD)
-
-#define S_T6_EMBRDY 2
-#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY)
-#define F_T6_EMBRDY V_T6_EMBRDY(1U)
-
-#define S_T6_EMBUMP 1
-#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP)
-#define F_T6_EMBUMP V_T6_EMBUMP(1U)
-
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_ERROR_COUNT 0x3ab0
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x3ab4
#define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x3ab8
@@ -56304,17 +68158,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56323,17 +68166,6 @@
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56349,17 +68181,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56368,17 +68189,6 @@
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56394,17 +68204,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56413,17 +68212,6 @@
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56439,17 +68227,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10
-
-#define S_T6_C0MAX 8
-#define M_T6_C0MAX 0x7fU
-#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX)
-#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX)
-
-#define S_T6_C0MIN 0
-#define M_T6_C0MIN 0x7fU
-#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN)
-#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C1_INIT_EXTENDED 0x18
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20
@@ -56458,17 +68235,6 @@
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C2_INIT_EXTENDED 0x28
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30
-
-#define S_T6_C2MAX 8
-#define M_T6_C2MAX 0x7fU
-#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX)
-#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX)
-
-#define S_T6_C2MIN 0
-#define M_T6_C2MIN 0x7fU
-#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN)
-#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN)
-
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38
#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40
@@ -56639,17 +68405,6 @@
#define G_RX_LINKB_INDEX_DFE_EN(x) (((x) >> S_RX_LINKB_INDEX_DFE_EN) & M_RX_LINKB_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINKB_DFE_H1 0x2b04
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINKB_DFE_H2 0x2b08
#define A_T6_MAC_PORT_RX_LINKB_DFE_H3 0x2b0c
#define A_T6_MAC_PORT_RX_LINKB_DFE_H4 0x2b10
@@ -56668,17 +68423,6 @@
#define G_RX_LINKC_INDEX_DFE_EN(x) (((x) >> S_RX_LINKC_INDEX_DFE_EN) & M_RX_LINKC_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINKC_DFE_H1 0x2e04
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINKC_DFE_H2 0x2e08
#define A_T6_MAC_PORT_RX_LINKC_DFE_H3 0x2e0c
#define A_T6_MAC_PORT_RX_LINKC_DFE_H4 0x2e10
@@ -56697,17 +68441,6 @@
#define G_RX_LINKD_INDEX_DFE_EN(x) (((x) >> S_RX_LINKD_INDEX_DFE_EN) & M_RX_LINKD_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINKD_DFE_H1 0x2f04
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINKD_DFE_H2 0x2f08
#define A_T6_MAC_PORT_RX_LINKD_DFE_H3 0x2f0c
#define A_T6_MAC_PORT_RX_LINKD_DFE_H4 0x2f10
@@ -56726,17 +68459,6 @@
#define G_RX_LINK_BCST_INDEX_DFE_EN(x) (((x) >> S_RX_LINK_BCST_INDEX_DFE_EN) & M_RX_LINK_BCST_INDEX_DFE_EN)
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H1 0x3204
-
-#define S_T6_H1OSN 13
-#define M_T6_H1OSN 0x7U
-#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN)
-#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN)
-
-#define S_T6_H1OMAG 8
-#define M_T6_H1OMAG 0x1fU
-#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG)
-#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG)
-
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H2 0x3208
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H3 0x320c
#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H4 0x3210
@@ -57294,69 +69016,21 @@
#define G_BANK(x) (((x) >> S_BANK) & M_BANK)
#define A_MC_LMC_INITSEQ1 0x40148
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD1 0x4014c
#define A_MC_LMC_INITSEQ2 0x40150
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD2 0x40154
#define A_MC_LMC_INITSEQ3 0x40158
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD3 0x4015c
#define A_MC_LMC_INITSEQ4 0x40160
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD4 0x40164
#define A_MC_LMC_INITSEQ5 0x40168
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD5 0x4016c
#define A_MC_LMC_INITSEQ6 0x40170
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD6 0x40174
#define A_MC_LMC_INITSEQ7 0x40178
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD7 0x4017c
#define A_MC_UPCTL_ECCCFG 0x40180
#define A_MC_LMC_INITSEQ8 0x40180
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_UPCTL_ECCTST 0x40184
#define S_ECC_TEST_MASK0 0
@@ -57367,61 +69041,19 @@
#define A_MC_LMC_CMD8 0x40184
#define A_MC_UPCTL_ECCCLR 0x40188
#define A_MC_LMC_INITSEQ9 0x40188
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_UPCTL_ECCLOG 0x4018c
#define A_MC_LMC_CMD9 0x4018c
#define A_MC_LMC_INITSEQ10 0x40190
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD10 0x40194
#define A_MC_LMC_INITSEQ11 0x40198
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD11 0x4019c
#define A_MC_LMC_INITSEQ12 0x401a0
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD12 0x401a4
#define A_MC_LMC_INITSEQ13 0x401a8
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD13 0x401ac
#define A_MC_LMC_INITSEQ14 0x401b0
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD14 0x401b4
#define A_MC_LMC_INITSEQ15 0x401b8
-
-#define S_T6_RANK 0
-#define M_T6_RANK 0xfU
-#define V_T6_RANK(x) ((x) << S_T6_RANK)
-#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK)
-
#define A_MC_LMC_CMD15 0x401bc
#define A_MC_UPCTL_DTUWACTL 0x40200
@@ -61990,6 +73622,11 @@
#define V_NUMPIPESTAGES(x) ((x) << S_NUMPIPESTAGES)
#define G_NUMPIPESTAGES(x) (((x) >> S_NUMPIPESTAGES) & M_NUMPIPESTAGES)
+#define S_DRAMREFENABLE 27
+#define M_DRAMREFENABLE 0x3U
+#define V_DRAMREFENABLE(x) ((x) << S_DRAMREFENABLE)
+#define G_DRAMREFENABLE(x) (((x) >> S_DRAMREFENABLE) & M_DRAMREFENABLE)
+
#define A_EDC_H_DBG_MA_CMD_INTF 0x50300
#define S_MCMDADDR 12
@@ -62372,12 +74009,51 @@
#define V_REFCNT(x) ((x) << S_REFCNT)
#define G_REFCNT(x) (((x) >> S_REFCNT) & M_REFCNT)
+#define A_EDC_H_PAR_CAUSE 0x50404
+
+#define S_STG_CMDQ_PARERR_CAUSE 7
+#define V_STG_CMDQ_PARERR_CAUSE(x) ((x) << S_STG_CMDQ_PARERR_CAUSE)
+#define F_STG_CMDQ_PARERR_CAUSE V_STG_CMDQ_PARERR_CAUSE(1U)
+
+#define S_STG_WRDQ_PARERR_CAUSE 6
+#define V_STG_WRDQ_PARERR_CAUSE(x) ((x) << S_STG_WRDQ_PARERR_CAUSE)
+#define F_STG_WRDQ_PARERR_CAUSE V_STG_WRDQ_PARERR_CAUSE(1U)
+
+#define S_INP_CMDQ_PARERR_CAUSE 5
+#define V_INP_CMDQ_PARERR_CAUSE(x) ((x) << S_INP_CMDQ_PARERR_CAUSE)
+#define F_INP_CMDQ_PARERR_CAUSE V_INP_CMDQ_PARERR_CAUSE(1U)
+
+#define S_INP_WRDQ_PARERR_CAUSE 4
+#define V_INP_WRDQ_PARERR_CAUSE(x) ((x) << S_INP_WRDQ_PARERR_CAUSE)
+#define F_INP_WRDQ_PARERR_CAUSE V_INP_WRDQ_PARERR_CAUSE(1U)
+
+#define S_INP_BEQ_PARERR_CAUSE 3
+#define V_INP_BEQ_PARERR_CAUSE(x) ((x) << S_INP_BEQ_PARERR_CAUSE)
+#define F_INP_BEQ_PARERR_CAUSE V_INP_BEQ_PARERR_CAUSE(1U)
+
+#define S_ECC_CE_PAR_ENABLE_CAUSE 2
+#define V_ECC_CE_PAR_ENABLE_CAUSE(x) ((x) << S_ECC_CE_PAR_ENABLE_CAUSE)
+#define F_ECC_CE_PAR_ENABLE_CAUSE V_ECC_CE_PAR_ENABLE_CAUSE(1U)
+
+#define S_ECC_UE_PAR_ENABLE_CAUSE 1
+#define V_ECC_UE_PAR_ENABLE_CAUSE(x) ((x) << S_ECC_UE_PAR_ENABLE_CAUSE)
+#define F_ECC_UE_PAR_ENABLE_CAUSE V_ECC_UE_PAR_ENABLE_CAUSE(1U)
+
+#define S_RDDQ_PARERR_CAUSE 0
+#define V_RDDQ_PARERR_CAUSE(x) ((x) << S_RDDQ_PARERR_CAUSE)
+#define F_RDDQ_PARERR_CAUSE V_RDDQ_PARERR_CAUSE(1U)
+
/* registers for module EDC_T61 */
#define EDC_T61_BASE_ADDR 0x50800
/* registers for module HMA_T6 */
#define HMA_T6_BASE_ADDR 0x51000
+#define S_T7_CLIENT_EN 0
+#define M_T7_CLIENT_EN 0x7fffU
+#define V_T7_CLIENT_EN(x) ((x) << S_T7_CLIENT_EN)
+#define G_T7_CLIENT_EN(x) (((x) >> S_T7_CLIENT_EN) & M_T7_CLIENT_EN)
+
#define S_TPH 12
#define M_TPH 0x3U
#define V_TPH(x) ((x) << S_TPH)
@@ -62398,6 +74074,14 @@
#define V_OP_MODE(x) ((x) << S_OP_MODE)
#define F_OP_MODE V_OP_MODE(1U)
+#define S_GK_ENABLE 30
+#define V_GK_ENABLE(x) ((x) << S_GK_ENABLE)
+#define F_GK_ENABLE V_GK_ENABLE(1U)
+
+#define S_DBGCNTRST 29
+#define V_DBGCNTRST(x) ((x) << S_DBGCNTRST)
+#define F_DBGCNTRST V_DBGCNTRST(1U)
+
#define A_HMA_TLB_ACCESS 0x51028
#define S_INV_ALL 29
@@ -62437,6 +74121,11 @@
#define V_REGION(x) ((x) << S_REGION)
#define G_REGION(x) (((x) >> S_REGION) & M_REGION)
+#define S_T7_VA 8
+#define M_T7_VA 0xffffffU
+#define V_T7_VA(x) ((x) << S_T7_VA)
+#define G_T7_VA(x) (((x) >> S_T7_VA) & M_T7_VA)
+
#define A_HMA_TLB_DESC_0_H 0x51030
#define A_HMA_TLB_DESC_0_L 0x51034
#define A_HMA_TLB_DESC_1_H 0x51038
@@ -62460,6 +74149,11 @@
#define V_ADDR0_MIN(x) ((x) << S_ADDR0_MIN)
#define G_ADDR0_MIN(x) (((x) >> S_ADDR0_MIN) & M_ADDR0_MIN)
+#define S_REG0MINADDR0MIN 8
+#define M_REG0MINADDR0MIN 0xffffffU
+#define V_REG0MINADDR0MIN(x) ((x) << S_REG0MINADDR0MIN)
+#define G_REG0MINADDR0MIN(x) (((x) >> S_REG0MINADDR0MIN) & M_REG0MINADDR0MIN)
+
#define A_HMA_REG0_MAX 0x51074
#define S_ADDR0_MAX 12
@@ -62467,6 +74161,11 @@
#define V_ADDR0_MAX(x) ((x) << S_ADDR0_MAX)
#define G_ADDR0_MAX(x) (((x) >> S_ADDR0_MAX) & M_ADDR0_MAX)
+#define S_REG0MAXADDR0MAX 8
+#define M_REG0MAXADDR0MAX 0xffffffU
+#define V_REG0MAXADDR0MAX(x) ((x) << S_REG0MAXADDR0MAX)
+#define G_REG0MAXADDR0MAX(x) (((x) >> S_REG0MAXADDR0MAX) & M_REG0MAXADDR0MAX)
+
#define A_HMA_REG0_MASK 0x51078
#define S_PAGE_SIZE0 12
@@ -62475,6 +74174,7 @@
#define G_PAGE_SIZE0(x) (((x) >> S_PAGE_SIZE0) & M_PAGE_SIZE0)
#define A_HMA_REG0_BASE 0x5107c
+#define A_HMA_REG0_BASE_LSB 0x5107c
#define A_HMA_REG1_MIN 0x51080
#define S_ADDR1_MIN 12
@@ -62482,6 +74182,11 @@
#define V_ADDR1_MIN(x) ((x) << S_ADDR1_MIN)
#define G_ADDR1_MIN(x) (((x) >> S_ADDR1_MIN) & M_ADDR1_MIN)
+#define S_REG1MINADDR1MIN 8
+#define M_REG1MINADDR1MIN 0xffffffU
+#define V_REG1MINADDR1MIN(x) ((x) << S_REG1MINADDR1MIN)
+#define G_REG1MINADDR1MIN(x) (((x) >> S_REG1MINADDR1MIN) & M_REG1MINADDR1MIN)
+
#define A_HMA_REG1_MAX 0x51084
#define S_ADDR1_MAX 12
@@ -62489,6 +74194,11 @@
#define V_ADDR1_MAX(x) ((x) << S_ADDR1_MAX)
#define G_ADDR1_MAX(x) (((x) >> S_ADDR1_MAX) & M_ADDR1_MAX)
+#define S_REG1MAXADDR1MAX 8
+#define M_REG1MAXADDR1MAX 0xffffffU
+#define V_REG1MAXADDR1MAX(x) ((x) << S_REG1MAXADDR1MAX)
+#define G_REG1MAXADDR1MAX(x) (((x) >> S_REG1MAXADDR1MAX) & M_REG1MAXADDR1MAX)
+
#define A_HMA_REG1_MASK 0x51088
#define S_PAGE_SIZE1 12
@@ -62497,6 +74207,7 @@
#define G_PAGE_SIZE1(x) (((x) >> S_PAGE_SIZE1) & M_PAGE_SIZE1)
#define A_HMA_REG1_BASE 0x5108c
+#define A_HMA_REG1_BASE_LSB 0x5108c
#define A_HMA_REG2_MIN 0x51090
#define S_ADDR2_MIN 12
@@ -62504,6 +74215,11 @@
#define V_ADDR2_MIN(x) ((x) << S_ADDR2_MIN)
#define G_ADDR2_MIN(x) (((x) >> S_ADDR2_MIN) & M_ADDR2_MIN)
+#define S_REG2MINADDR2MIN 8
+#define M_REG2MINADDR2MIN 0xffffffU
+#define V_REG2MINADDR2MIN(x) ((x) << S_REG2MINADDR2MIN)
+#define G_REG2MINADDR2MIN(x) (((x) >> S_REG2MINADDR2MIN) & M_REG2MINADDR2MIN)
+
#define A_HMA_REG2_MAX 0x51094
#define S_ADDR2_MAX 12
@@ -62511,6 +74227,11 @@
#define V_ADDR2_MAX(x) ((x) << S_ADDR2_MAX)
#define G_ADDR2_MAX(x) (((x) >> S_ADDR2_MAX) & M_ADDR2_MAX)
+#define S_REG2MAXADDR2MAX 8
+#define M_REG2MAXADDR2MAX 0xffffffU
+#define V_REG2MAXADDR2MAX(x) ((x) << S_REG2MAXADDR2MAX)
+#define G_REG2MAXADDR2MAX(x) (((x) >> S_REG2MAXADDR2MAX) & M_REG2MAXADDR2MAX)
+
#define A_HMA_REG2_MASK 0x51098
#define S_PAGE_SIZE2 12
@@ -62519,6 +74240,7 @@
#define G_PAGE_SIZE2(x) (((x) >> S_PAGE_SIZE2) & M_PAGE_SIZE2)
#define A_HMA_REG2_BASE 0x5109c
+#define A_HMA_REG2_BASE_LSB 0x5109c
#define A_HMA_REG3_MIN 0x510a0
#define S_ADDR3_MIN 12
@@ -62526,6 +74248,11 @@
#define V_ADDR3_MIN(x) ((x) << S_ADDR3_MIN)
#define G_ADDR3_MIN(x) (((x) >> S_ADDR3_MIN) & M_ADDR3_MIN)
+#define S_REG3MINADDR3MIN 8
+#define M_REG3MINADDR3MIN 0xffffffU
+#define V_REG3MINADDR3MIN(x) ((x) << S_REG3MINADDR3MIN)
+#define G_REG3MINADDR3MIN(x) (((x) >> S_REG3MINADDR3MIN) & M_REG3MINADDR3MIN)
+
#define A_HMA_REG3_MAX 0x510a4
#define S_ADDR3_MAX 12
@@ -62533,6 +74260,11 @@
#define V_ADDR3_MAX(x) ((x) << S_ADDR3_MAX)
#define G_ADDR3_MAX(x) (((x) >> S_ADDR3_MAX) & M_ADDR3_MAX)
+#define S_REG3MAXADDR3MAX 8
+#define M_REG3MAXADDR3MAX 0xffffffU
+#define V_REG3MAXADDR3MAX(x) ((x) << S_REG3MAXADDR3MAX)
+#define G_REG3MAXADDR3MAX(x) (((x) >> S_REG3MAXADDR3MAX) & M_REG3MAXADDR3MAX)
+
#define A_HMA_REG3_MASK 0x510a8
#define S_PAGE_SIZE3 12
@@ -62541,6 +74273,7 @@
#define G_PAGE_SIZE3(x) (((x) >> S_PAGE_SIZE3) & M_PAGE_SIZE3)
#define A_HMA_REG3_BASE 0x510ac
+#define A_HMA_REG3_BASE_LSB 0x510ac
#define A_HMA_SW_SYNC 0x510b0
#define S_ENTER_SYNC 31
@@ -62551,6 +74284,84 @@
#define V_EXIT_SYNC(x) ((x) << S_EXIT_SYNC)
#define F_EXIT_SYNC V_EXIT_SYNC(1U)
+#define A_HMA_GC_MODE_SEL 0x510b4
+
+#define S_MODE_SEL 8
+#define M_MODE_SEL 0x3U
+#define V_MODE_SEL(x) ((x) << S_MODE_SEL)
+#define G_MODE_SEL(x) (((x) >> S_MODE_SEL) & M_MODE_SEL)
+
+#define S_FLUSH_REQ 4
+#define V_FLUSH_REQ(x) ((x) << S_FLUSH_REQ)
+#define F_FLUSH_REQ V_FLUSH_REQ(1U)
+
+#define S_CLEAR_REQ 0
+#define V_CLEAR_REQ(x) ((x) << S_CLEAR_REQ)
+#define F_CLEAR_REQ V_CLEAR_REQ(1U)
+
+#define A_HMA_REG0_BASE_MSB 0x510b8
+
+#define S_BASE0_MSB 0
+#define M_BASE0_MSB 0xfU
+#define V_BASE0_MSB(x) ((x) << S_BASE0_MSB)
+#define G_BASE0_MSB(x) (((x) >> S_BASE0_MSB) & M_BASE0_MSB)
+
+#define A_HMA_REG1_BASE_MSB 0x510bc
+
+#define S_BASE1_MSB 0
+#define M_BASE1_MSB 0xfU
+#define V_BASE1_MSB(x) ((x) << S_BASE1_MSB)
+#define G_BASE1_MSB(x) (((x) >> S_BASE1_MSB) & M_BASE1_MSB)
+
+#define A_HMA_REG2_BASE_MSB 0x510c0
+
+#define S_BASE2_MSB 0
+#define M_BASE2_MSB 0xfU
+#define V_BASE2_MSB(x) ((x) << S_BASE2_MSB)
+#define G_BASE2_MSB(x) (((x) >> S_BASE2_MSB) & M_BASE2_MSB)
+
+#define A_HMA_REG3_BASE_MSB 0x510c4
+
+#define S_BASE3_MSB 0
+#define M_BASE3_MSB 0xfU
+#define V_BASE3_MSB(x) ((x) << S_BASE3_MSB)
+#define G_BASE3_MSB(x) (((x) >> S_BASE3_MSB) & M_BASE3_MSB)
+
+#define A_HMA_DBG_CTL 0x51104
+#define A_HMA_DBG_DATA 0x51108
+#define A_HMA_H_BIST_CMD 0x51200
+#define A_HMA_H_BIST_CMD_ADDR 0x51204
+#define A_HMA_H_BIST_CMD_LEN 0x51208
+#define A_HMA_H_BIST_DATA_PATTERN 0x5120c
+#define A_HMA_H_BIST_USER_WDATA0 0x51210
+#define A_HMA_H_BIST_USER_WDATA1 0x51214
+#define A_HMA_H_BIST_USER_WDATA2 0x51218
+#define A_HMA_H_BIST_NUM_ERR 0x5121c
+#define A_HMA_H_BIST_ERR_FIRST_ADDR 0x51220
+#define A_HMA_H_BIST_STATUS_RDATA 0x51224
+#define A_HMA_H_BIST_CRC_SEED 0x5126c
+#define A_HMA_TABLE_LINE1_MSB 0x51270
+
+#define S_STARTA 0
+#define M_STARTA 0xfU
+#define V_STARTA(x) ((x) << S_STARTA)
+#define G_STARTA(x) (((x) >> S_STARTA) & M_STARTA)
+
+#define A_HMA_TABLE_LINE2_MSB 0x51274
+
+#define S_ENDA 0
+#define M_ENDA 0xfU
+#define V_ENDA(x) ((x) << S_ENDA)
+#define G_ENDA(x) (((x) >> S_ENDA) & M_ENDA)
+
+#define S_GK_UF_PAR_ENABLE 6
+#define V_GK_UF_PAR_ENABLE(x) ((x) << S_GK_UF_PAR_ENABLE)
+#define F_GK_UF_PAR_ENABLE V_GK_UF_PAR_ENABLE(1U)
+
+#define S_PCIEMST_PAR_ENABLE 2
+#define V_PCIEMST_PAR_ENABLE(x) ((x) << S_PCIEMST_PAR_ENABLE)
+#define F_PCIEMST_PAR_ENABLE V_PCIEMST_PAR_ENABLE(1U)
+
#define S_IDTF_INT_ENABLE 5
#define V_IDTF_INT_ENABLE(x) ((x) << S_IDTF_INT_ENABLE)
#define F_IDTF_INT_ENABLE V_IDTF_INT_ENABLE(1U)
@@ -62571,6 +74382,10 @@
#define V_MAMST_INT_ENABLE(x) ((x) << S_MAMST_INT_ENABLE)
#define F_MAMST_INT_ENABLE V_MAMST_INT_ENABLE(1U)
+#define S_GK_UF_INT_ENABLE 6
+#define V_GK_UF_INT_ENABLE(x) ((x) << S_GK_UF_INT_ENABLE)
+#define F_GK_UF_INT_ENABLE V_GK_UF_INT_ENABLE(1U)
+
#define S_IDTF_INT_CAUSE 5
#define V_IDTF_INT_CAUSE(x) ((x) << S_IDTF_INT_CAUSE)
#define F_IDTF_INT_CAUSE V_IDTF_INT_CAUSE(1U)
@@ -62591,6 +74406,10 @@
#define V_MAMST_INT_CAUSE(x) ((x) << S_MAMST_INT_CAUSE)
#define F_MAMST_INT_CAUSE V_MAMST_INT_CAUSE(1U)
+#define S_GK_UF_INT_CAUSE 6
+#define V_GK_UF_INT_CAUSE(x) ((x) << S_GK_UF_INT_CAUSE)
+#define F_GK_UF_INT_CAUSE V_GK_UF_INT_CAUSE(1U)
+
#define A_HMA_MA_MST_ERR 0x5130c
#define A_HMA_RTF_ERR 0x51310
#define A_HMA_OTF_ERR 0x51314
@@ -62904,3 +74723,12365 @@
#define M_RD_EOP_CNT 0xffU
#define V_RD_EOP_CNT(x) ((x) << S_RD_EOP_CNT)
#define G_RD_EOP_CNT(x) (((x) >> S_RD_EOP_CNT) & M_RD_EOP_CNT)
+
+#define S_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT 16
+#define M_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT 0xffU
+#define V_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT(x) ((x) << S_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT)
+#define G_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT(x) (((x) >> S_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT) & M_DEBUG_PCIE_SOP_EOP_CNTWR_EOP_CNT)
+
+#define S_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT 8
+#define M_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT 0xffU
+#define V_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT(x) ((x) << S_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT)
+#define G_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT(x) (((x) >> S_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT) & M_DEBUG_PCIE_SOP_EOP_CNTRD_SOP_CNT)
+
+#define S_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT 0
+#define M_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT 0xffU
+#define V_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT(x) ((x) << S_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT)
+#define G_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT(x) (((x) >> S_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT) & M_DEBUG_PCIE_SOP_EOP_CNTRD_EOP_CNT)
+
+/* registers for module MAC_T7 */
+#define MAC_T7_BASE_ADDR 0x38000
+
+#define S_T7_PORT_MAP 21
+#define M_T7_PORT_MAP 0x7U
+#define V_T7_PORT_MAP(x) ((x) << S_T7_PORT_MAP)
+#define G_T7_PORT_MAP(x) (((x) >> S_T7_PORT_MAP) & M_T7_PORT_MAP)
+
+#define S_T7_SMUX_RX_LOOP 17
+#define M_T7_SMUX_RX_LOOP 0xfU
+#define V_T7_SMUX_RX_LOOP(x) ((x) << S_T7_SMUX_RX_LOOP)
+#define G_T7_SMUX_RX_LOOP(x) (((x) >> S_T7_SMUX_RX_LOOP) & M_T7_SMUX_RX_LOOP)
+
+#define S_T7_SIGNAL_DET 15
+#define V_T7_SIGNAL_DET(x) ((x) << S_T7_SIGNAL_DET)
+#define F_T7_SIGNAL_DET V_T7_SIGNAL_DET(1U)
+
+#define S_CFG_MAC_2_MPS_FULL 13
+#define V_CFG_MAC_2_MPS_FULL(x) ((x) << S_CFG_MAC_2_MPS_FULL)
+#define F_CFG_MAC_2_MPS_FULL V_CFG_MAC_2_MPS_FULL(1U)
+
+#define S_MPS_FULL_SEL 12
+#define V_MPS_FULL_SEL(x) ((x) << S_MPS_FULL_SEL)
+#define F_MPS_FULL_SEL V_MPS_FULL_SEL(1U)
+
+#define S_T7_SMUXTXSEL 8
+#define M_T7_SMUXTXSEL 0xfU
+#define V_T7_SMUXTXSEL(x) ((x) << S_T7_SMUXTXSEL)
+#define G_T7_SMUXTXSEL(x) (((x) >> S_T7_SMUXTXSEL) & M_T7_SMUXTXSEL)
+
+#define S_T7_PORTSPEED 4
+#define M_T7_PORTSPEED 0xfU
+#define V_T7_PORTSPEED(x) ((x) << S_T7_PORTSPEED)
+#define G_T7_PORTSPEED(x) (((x) >> S_T7_PORTSPEED) & M_T7_PORTSPEED)
+
+#define S_MTIP_REG_RESET 25
+#define V_MTIP_REG_RESET(x) ((x) << S_MTIP_REG_RESET)
+#define F_MTIP_REG_RESET V_MTIP_REG_RESET(1U)
+
+#define S_RESET_REG_CLK_I 24
+#define V_RESET_REG_CLK_I(x) ((x) << S_RESET_REG_CLK_I)
+#define F_RESET_REG_CLK_I V_RESET_REG_CLK_I(1U)
+
+#define S_T7_LED1_CFG1 15
+#define M_T7_LED1_CFG1 0x7U
+#define V_T7_LED1_CFG1(x) ((x) << S_T7_LED1_CFG1)
+#define G_T7_LED1_CFG1(x) (((x) >> S_T7_LED1_CFG1) & M_T7_LED1_CFG1)
+
+#define S_T7_LED0_CFG1 12
+#define M_T7_LED0_CFG1 0x7U
+#define V_T7_LED0_CFG1(x) ((x) << S_T7_LED0_CFG1)
+#define G_T7_LED0_CFG1(x) (((x) >> S_T7_LED0_CFG1) & M_T7_LED0_CFG1)
+
+#define A_T7_MAC_PORT_MAGIC_MACID_LO 0x820
+#define A_T7_MAC_PORT_MAGIC_MACID_HI 0x824
+#define A_T7_MAC_PORT_LINK_STATUS 0x828
+
+#define S_EGR_SE_CNT_ERR 9
+#define V_EGR_SE_CNT_ERR(x) ((x) << S_EGR_SE_CNT_ERR)
+#define F_EGR_SE_CNT_ERR V_EGR_SE_CNT_ERR(1U)
+
+#define S_INGR_SE_CNT_ERR 8
+#define V_INGR_SE_CNT_ERR(x) ((x) << S_INGR_SE_CNT_ERR)
+#define F_INGR_SE_CNT_ERR V_INGR_SE_CNT_ERR(1U)
+
+#define A_T7_MAC_PORT_PERR_INT_EN_100G 0x82c
+
+#define S_PERR_PCSR_FDM_3 21
+#define V_PERR_PCSR_FDM_3(x) ((x) << S_PERR_PCSR_FDM_3)
+#define F_PERR_PCSR_FDM_3 V_PERR_PCSR_FDM_3(1U)
+
+#define S_PERR_PCSR_FDM_2 20
+#define V_PERR_PCSR_FDM_2(x) ((x) << S_PERR_PCSR_FDM_2)
+#define F_PERR_PCSR_FDM_2 V_PERR_PCSR_FDM_2(1U)
+
+#define S_PERR_PCSR_FDM_1 19
+#define V_PERR_PCSR_FDM_1(x) ((x) << S_PERR_PCSR_FDM_1)
+#define F_PERR_PCSR_FDM_1 V_PERR_PCSR_FDM_1(1U)
+
+#define S_PERR_PCSR_FDM_0 18
+#define V_PERR_PCSR_FDM_0(x) ((x) << S_PERR_PCSR_FDM_0)
+#define F_PERR_PCSR_FDM_0 V_PERR_PCSR_FDM_0(1U)
+
+#define S_PERR_PCSR_FM_3 17
+#define V_PERR_PCSR_FM_3(x) ((x) << S_PERR_PCSR_FM_3)
+#define F_PERR_PCSR_FM_3 V_PERR_PCSR_FM_3(1U)
+
+#define S_PERR_PCSR_FM_2 16
+#define V_PERR_PCSR_FM_2(x) ((x) << S_PERR_PCSR_FM_2)
+#define F_PERR_PCSR_FM_2 V_PERR_PCSR_FM_2(1U)
+
+#define S_PERR_PCSR_FM_1 15
+#define V_PERR_PCSR_FM_1(x) ((x) << S_PERR_PCSR_FM_1)
+#define F_PERR_PCSR_FM_1 V_PERR_PCSR_FM_1(1U)
+
+#define S_PERR_PCSR_FM_0 14
+#define V_PERR_PCSR_FM_0(x) ((x) << S_PERR_PCSR_FM_0)
+#define F_PERR_PCSR_FM_0 V_PERR_PCSR_FM_0(1U)
+
+#define S_PERR_PCSR_DM_1 13
+#define V_PERR_PCSR_DM_1(x) ((x) << S_PERR_PCSR_DM_1)
+#define F_PERR_PCSR_DM_1 V_PERR_PCSR_DM_1(1U)
+
+#define S_PERR_PCSR_DM_0 12
+#define V_PERR_PCSR_DM_0(x) ((x) << S_PERR_PCSR_DM_0)
+#define F_PERR_PCSR_DM_0 V_PERR_PCSR_DM_0(1U)
+
+#define S_PERR_PCSR_DK_3 11
+#define V_PERR_PCSR_DK_3(x) ((x) << S_PERR_PCSR_DK_3)
+#define F_PERR_PCSR_DK_3 V_PERR_PCSR_DK_3(1U)
+
+#define S_PERR_PCSR_DK_2 10
+#define V_PERR_PCSR_DK_2(x) ((x) << S_PERR_PCSR_DK_2)
+#define F_PERR_PCSR_DK_2 V_PERR_PCSR_DK_2(1U)
+
+#define S_PERR_PCSR_DK_1 9
+#define V_PERR_PCSR_DK_1(x) ((x) << S_PERR_PCSR_DK_1)
+#define F_PERR_PCSR_DK_1 V_PERR_PCSR_DK_1(1U)
+
+#define S_PERR_PCSR_DK_0 8
+#define V_PERR_PCSR_DK_0(x) ((x) << S_PERR_PCSR_DK_0)
+#define F_PERR_PCSR_DK_0 V_PERR_PCSR_DK_0(1U)
+
+#define S_PERR_F91RO_1 7
+#define V_PERR_F91RO_1(x) ((x) << S_PERR_F91RO_1)
+#define F_PERR_F91RO_1 V_PERR_F91RO_1(1U)
+
+#define S_PERR_F91RO_0 6
+#define V_PERR_F91RO_0(x) ((x) << S_PERR_F91RO_0)
+#define F_PERR_F91RO_0 V_PERR_F91RO_0(1U)
+
+#define S_PERR_PCSR_F91DM 5
+#define V_PERR_PCSR_F91DM(x) ((x) << S_PERR_PCSR_F91DM)
+#define F_PERR_PCSR_F91DM V_PERR_PCSR_F91DM(1U)
+
+#define S_PERR_PCSR_F91TI 4
+#define V_PERR_PCSR_F91TI(x) ((x) << S_PERR_PCSR_F91TI)
+#define F_PERR_PCSR_F91TI V_PERR_PCSR_F91TI(1U)
+
+#define S_PERR_PCSR_F91TO 3
+#define V_PERR_PCSR_F91TO(x) ((x) << S_PERR_PCSR_F91TO)
+#define F_PERR_PCSR_F91TO V_PERR_PCSR_F91TO(1U)
+
+#define S_PERR_PCSR_F91M 2
+#define V_PERR_PCSR_F91M(x) ((x) << S_PERR_PCSR_F91M)
+#define F_PERR_PCSR_F91M V_PERR_PCSR_F91M(1U)
+
+#define S_PERR_PCSR_80_16_1 1
+#define V_PERR_PCSR_80_16_1(x) ((x) << S_PERR_PCSR_80_16_1)
+#define F_PERR_PCSR_80_16_1 V_PERR_PCSR_80_16_1(1U)
+
+#define S_PERR_PCSR_80_16_0 0
+#define V_PERR_PCSR_80_16_0(x) ((x) << S_PERR_PCSR_80_16_0)
+#define F_PERR_PCSR_80_16_0 V_PERR_PCSR_80_16_0(1U)
+
+#define A_T7_MAC_PORT_PERR_INT_CAUSE_100G 0x830
+#define A_T7_MAC_PORT_PERR_ENABLE_100G 0x834
+#define A_MAC_PORT_MAC10G100G_CONFIG_0 0x838
+
+#define S_PEER_DELAY_VAL 31
+#define V_PEER_DELAY_VAL(x) ((x) << S_PEER_DELAY_VAL)
+#define F_PEER_DELAY_VAL V_PEER_DELAY_VAL(1U)
+
+#define S_PEER_DELAY 1
+#define M_PEER_DELAY 0x3fffffffU
+#define V_PEER_DELAY(x) ((x) << S_PEER_DELAY)
+#define G_PEER_DELAY(x) (((x) >> S_PEER_DELAY) & M_PEER_DELAY)
+
+#define S_MODE1S_ENA 0
+#define V_MODE1S_ENA(x) ((x) << S_MODE1S_ENA)
+#define F_MODE1S_ENA V_MODE1S_ENA(1U)
+
+#define A_MAC_PORT_MAC10G100G_CONFIG_1 0x83c
+
+#define S_TX_STOP 25
+#define V_TX_STOP(x) ((x) << S_TX_STOP)
+#define F_TX_STOP V_TX_STOP(1U)
+
+#define S_T7_MODE1S_ENA 24
+#define V_T7_MODE1S_ENA(x) ((x) << S_T7_MODE1S_ENA)
+#define F_T7_MODE1S_ENA V_T7_MODE1S_ENA(1U)
+
+#define S_TX_TS_ID 12
+#define M_TX_TS_ID 0xfffU
+#define V_TX_TS_ID(x) ((x) << S_TX_TS_ID)
+#define G_TX_TS_ID(x) (((x) >> S_TX_TS_ID) & M_TX_TS_ID)
+
+#define S_T7_TX_LI_FAULT 11
+#define V_T7_TX_LI_FAULT(x) ((x) << S_T7_TX_LI_FAULT)
+#define F_T7_TX_LI_FAULT V_T7_TX_LI_FAULT(1U)
+
+#define S_XOFF_GEN 3
+#define M_XOFF_GEN 0xffU
+#define V_XOFF_GEN(x) ((x) << S_XOFF_GEN)
+#define G_XOFF_GEN(x) (((x) >> S_XOFF_GEN) & M_XOFF_GEN)
+
+#define S_TX_REM_FAULT 1
+#define V_TX_REM_FAULT(x) ((x) << S_TX_REM_FAULT)
+#define F_TX_REM_FAULT V_TX_REM_FAULT(1U)
+
+#define S_TX_LOC_FAULT 0
+#define V_TX_LOC_FAULT(x) ((x) << S_TX_LOC_FAULT)
+#define F_TX_LOC_FAULT V_TX_LOC_FAULT(1U)
+
+#define A_MAC_PORT_MAC10G100G_CONFIG_2 0x840
+
+#define S_FF_TX_RX_TS_NS 0
+#define M_FF_TX_RX_TS_NS 0x3fffffffU
+#define V_FF_TX_RX_TS_NS(x) ((x) << S_FF_TX_RX_TS_NS)
+#define G_FF_TX_RX_TS_NS(x) (((x) >> S_FF_TX_RX_TS_NS) & M_FF_TX_RX_TS_NS)
+
+#define A_MAC_PORT_MAC10G100G_STATUS 0x844
+
+#define S_REG_LOWP 21
+#define V_REG_LOWP(x) ((x) << S_REG_LOWP)
+#define F_REG_LOWP V_REG_LOWP(1U)
+
+#define S_LI_FAULT 20
+#define V_LI_FAULT(x) ((x) << S_LI_FAULT)
+#define F_LI_FAULT V_LI_FAULT(1U)
+
+#define S_TX_ISIDLE 19
+#define V_TX_ISIDLE(x) ((x) << S_TX_ISIDLE)
+#define F_TX_ISIDLE V_TX_ISIDLE(1U)
+
+#define S_TX_UNDERFLOW 18
+#define V_TX_UNDERFLOW(x) ((x) << S_TX_UNDERFLOW)
+#define F_TX_UNDERFLOW V_TX_UNDERFLOW(1U)
+
+#define S_T7_TX_EMPTY 17
+#define V_T7_TX_EMPTY(x) ((x) << S_T7_TX_EMPTY)
+#define F_T7_TX_EMPTY V_T7_TX_EMPTY(1U)
+
+#define S_T7_1_REM_FAULT 16
+#define V_T7_1_REM_FAULT(x) ((x) << S_T7_1_REM_FAULT)
+#define F_T7_1_REM_FAULT V_T7_1_REM_FAULT(1U)
+
+#define S_REG_TS_AVAIL 15
+#define V_REG_TS_AVAIL(x) ((x) << S_REG_TS_AVAIL)
+#define F_REG_TS_AVAIL V_REG_TS_AVAIL(1U)
+
+#define S_T7_PHY_TXENA 14
+#define V_T7_PHY_TXENA(x) ((x) << S_T7_PHY_TXENA)
+#define F_T7_PHY_TXENA V_T7_PHY_TXENA(1U)
+
+#define S_T7_PFC_MODE 13
+#define V_T7_PFC_MODE(x) ((x) << S_T7_PFC_MODE)
+#define F_T7_PFC_MODE V_T7_PFC_MODE(1U)
+
+#define S_PAUSE_ON 5
+#define M_PAUSE_ON 0xffU
+#define V_PAUSE_ON(x) ((x) << S_PAUSE_ON)
+#define G_PAUSE_ON(x) (((x) >> S_PAUSE_ON) & M_PAUSE_ON)
+
+#define S_MAC_PAUSE_EN 4
+#define V_MAC_PAUSE_EN(x) ((x) << S_MAC_PAUSE_EN)
+#define F_MAC_PAUSE_EN V_MAC_PAUSE_EN(1U)
+
+#define S_MAC_ENABLE 3
+#define V_MAC_ENABLE(x) ((x) << S_MAC_ENABLE)
+#define F_MAC_ENABLE V_MAC_ENABLE(1U)
+
+#define S_LOOP_ENA 2
+#define V_LOOP_ENA(x) ((x) << S_LOOP_ENA)
+#define F_LOOP_ENA V_LOOP_ENA(1U)
+
+#define S_LOC_FAULT 1
+#define V_LOC_FAULT(x) ((x) << S_LOC_FAULT)
+#define F_LOC_FAULT V_LOC_FAULT(1U)
+
+#define S_FF_RX_EMPTY 0
+#define V_FF_RX_EMPTY(x) ((x) << S_FF_RX_EMPTY)
+#define F_FF_RX_EMPTY V_FF_RX_EMPTY(1U)
+
+#define A_MAC_PORT_MAC_AN_STATE_STATUS0 0x848
+
+#define S_AN_VAL_AN 15
+#define V_AN_VAL_AN(x) ((x) << S_AN_VAL_AN)
+#define F_AN_VAL_AN V_AN_VAL_AN(1U)
+
+#define S_AN_TR_DIS_STATUS_AN 14
+#define V_AN_TR_DIS_STATUS_AN(x) ((x) << S_AN_TR_DIS_STATUS_AN)
+#define F_AN_TR_DIS_STATUS_AN V_AN_TR_DIS_STATUS_AN(1U)
+
+#define S_AN_STATUS_AN 13
+#define V_AN_STATUS_AN(x) ((x) << S_AN_STATUS_AN)
+#define F_AN_STATUS_AN V_AN_STATUS_AN(1U)
+
+#define S_AN_SELECT_AN 8
+#define M_AN_SELECT_AN 0x1fU
+#define V_AN_SELECT_AN(x) ((x) << S_AN_SELECT_AN)
+#define G_AN_SELECT_AN(x) (((x) >> S_AN_SELECT_AN) & M_AN_SELECT_AN)
+
+#define S_AN_RS_FEC_ENA_AN 7
+#define V_AN_RS_FEC_ENA_AN(x) ((x) << S_AN_RS_FEC_ENA_AN)
+#define F_AN_RS_FEC_ENA_AN V_AN_RS_FEC_ENA_AN(1U)
+
+#define S_AN_INT_AN 6
+#define V_AN_INT_AN(x) ((x) << S_AN_INT_AN)
+#define F_AN_INT_AN V_AN_INT_AN(1U)
+
+#define S_AN_FEC_ENA_AN 5
+#define V_AN_FEC_ENA_AN(x) ((x) << S_AN_FEC_ENA_AN)
+#define F_AN_FEC_ENA_AN V_AN_FEC_ENA_AN(1U)
+
+#define S_AN_DONE_AN 4
+#define V_AN_DONE_AN(x) ((x) << S_AN_DONE_AN)
+#define F_AN_DONE_AN V_AN_DONE_AN(1U)
+
+#define S_AN_STATE 0
+#define M_AN_STATE 0xfU
+#define V_AN_STATE(x) ((x) << S_AN_STATE)
+#define G_AN_STATE(x) (((x) >> S_AN_STATE) & M_AN_STATE)
+
+#define A_MAC_PORT_MAC_AN_STATE_STATUS1 0x84c
+#define A_T7_MAC_PORT_EPIO_DATA0 0x850
+#define A_T7_MAC_PORT_EPIO_DATA1 0x854
+#define A_T7_MAC_PORT_EPIO_DATA2 0x858
+#define A_T7_MAC_PORT_EPIO_DATA3 0x85c
+#define A_T7_MAC_PORT_EPIO_OP 0x860
+#define A_T7_MAC_PORT_WOL_STATUS 0x864
+#define A_T7_MAC_PORT_INT_EN 0x868
+
+#define S_MAC2MPS_PERR 31
+#define V_MAC2MPS_PERR(x) ((x) << S_MAC2MPS_PERR)
+#define F_MAC2MPS_PERR V_MAC2MPS_PERR(1U)
+
+#define S_MAC_PPS_INT_EN 30
+#define V_MAC_PPS_INT_EN(x) ((x) << S_MAC_PPS_INT_EN)
+#define F_MAC_PPS_INT_EN V_MAC_PPS_INT_EN(1U)
+
+#define S_MAC_TX_TS_AVAIL_INT_EN 29
+#define V_MAC_TX_TS_AVAIL_INT_EN(x) ((x) << S_MAC_TX_TS_AVAIL_INT_EN)
+#define F_MAC_TX_TS_AVAIL_INT_EN V_MAC_TX_TS_AVAIL_INT_EN(1U)
+
+#define S_MAC_SINGLE_ALARM_INT_EN 28
+#define V_MAC_SINGLE_ALARM_INT_EN(x) ((x) << S_MAC_SINGLE_ALARM_INT_EN)
+#define F_MAC_SINGLE_ALARM_INT_EN V_MAC_SINGLE_ALARM_INT_EN(1U)
+
+#define S_MAC_PERIODIC_ALARM_INT_EN 27
+#define V_MAC_PERIODIC_ALARM_INT_EN(x) ((x) << S_MAC_PERIODIC_ALARM_INT_EN)
+#define F_MAC_PERIODIC_ALARM_INT_EN V_MAC_PERIODIC_ALARM_INT_EN(1U)
+
+#define S_MAC_PATDETWAKE_INT_EN 26
+#define V_MAC_PATDETWAKE_INT_EN(x) ((x) << S_MAC_PATDETWAKE_INT_EN)
+#define F_MAC_PATDETWAKE_INT_EN V_MAC_PATDETWAKE_INT_EN(1U)
+
+#define S_MAC_MAGIC_WAKE_INT_EN 25
+#define V_MAC_MAGIC_WAKE_INT_EN(x) ((x) << S_MAC_MAGIC_WAKE_INT_EN)
+#define F_MAC_MAGIC_WAKE_INT_EN V_MAC_MAGIC_WAKE_INT_EN(1U)
+
+#define S_MAC_SIGDETCHG_INT_EN 24
+#define V_MAC_SIGDETCHG_INT_EN(x) ((x) << S_MAC_SIGDETCHG_INT_EN)
+#define F_MAC_SIGDETCHG_INT_EN V_MAC_SIGDETCHG_INT_EN(1U)
+
+#define S_MAC_PCS_LINK_GOOD_EN 12
+#define V_MAC_PCS_LINK_GOOD_EN(x) ((x) << S_MAC_PCS_LINK_GOOD_EN)
+#define F_MAC_PCS_LINK_GOOD_EN V_MAC_PCS_LINK_GOOD_EN(1U)
+
+#define S_MAC_PCS_LINK_FAIL_EN 11
+#define V_MAC_PCS_LINK_FAIL_EN(x) ((x) << S_MAC_PCS_LINK_FAIL_EN)
+#define F_MAC_PCS_LINK_FAIL_EN V_MAC_PCS_LINK_FAIL_EN(1U)
+
+#define S_MAC_OVRFLOW_INT_EN 10
+#define V_MAC_OVRFLOW_INT_EN(x) ((x) << S_MAC_OVRFLOW_INT_EN)
+#define F_MAC_OVRFLOW_INT_EN V_MAC_OVRFLOW_INT_EN(1U)
+
+#define S_MAC_REM_FAULT_INT_EN 7
+#define V_MAC_REM_FAULT_INT_EN(x) ((x) << S_MAC_REM_FAULT_INT_EN)
+#define F_MAC_REM_FAULT_INT_EN V_MAC_REM_FAULT_INT_EN(1U)
+
+#define S_MAC_LOC_FAULT_INT_EN 6
+#define V_MAC_LOC_FAULT_INT_EN(x) ((x) << S_MAC_LOC_FAULT_INT_EN)
+#define F_MAC_LOC_FAULT_INT_EN V_MAC_LOC_FAULT_INT_EN(1U)
+
+#define S_MAC_LINK_DOWN_INT_EN 5
+#define V_MAC_LINK_DOWN_INT_EN(x) ((x) << S_MAC_LINK_DOWN_INT_EN)
+#define F_MAC_LINK_DOWN_INT_EN V_MAC_LINK_DOWN_INT_EN(1U)
+
+#define S_MAC_LINK_UP_INT_EN 4
+#define V_MAC_LINK_UP_INT_EN(x) ((x) << S_MAC_LINK_UP_INT_EN)
+#define F_MAC_LINK_UP_INT_EN V_MAC_LINK_UP_INT_EN(1U)
+
+#define S_MAC_AN_DONE_INT_EN 3
+#define V_MAC_AN_DONE_INT_EN(x) ((x) << S_MAC_AN_DONE_INT_EN)
+#define F_MAC_AN_DONE_INT_EN V_MAC_AN_DONE_INT_EN(1U)
+
+#define S_MAC_AN_PGRD_INT_EN 2
+#define V_MAC_AN_PGRD_INT_EN(x) ((x) << S_MAC_AN_PGRD_INT_EN)
+#define F_MAC_AN_PGRD_INT_EN V_MAC_AN_PGRD_INT_EN(1U)
+
+#define S_MAC_TXFIFO_ERR_INT_EN 1
+#define V_MAC_TXFIFO_ERR_INT_EN(x) ((x) << S_MAC_TXFIFO_ERR_INT_EN)
+#define F_MAC_TXFIFO_ERR_INT_EN V_MAC_TXFIFO_ERR_INT_EN(1U)
+
+#define S_MAC_RXFIFO_ERR_INT_EN 0
+#define V_MAC_RXFIFO_ERR_INT_EN(x) ((x) << S_MAC_RXFIFO_ERR_INT_EN)
+#define F_MAC_RXFIFO_ERR_INT_EN V_MAC_RXFIFO_ERR_INT_EN(1U)
+
+#define A_T7_MAC_PORT_INT_CAUSE 0x86c
+
+#define S_MAC2MPS_PERR_CAUSE 31
+#define V_MAC2MPS_PERR_CAUSE(x) ((x) << S_MAC2MPS_PERR_CAUSE)
+#define F_MAC2MPS_PERR_CAUSE V_MAC2MPS_PERR_CAUSE(1U)
+
+#define S_MAC_PPS_INT_CAUSE 30
+#define V_MAC_PPS_INT_CAUSE(x) ((x) << S_MAC_PPS_INT_CAUSE)
+#define F_MAC_PPS_INT_CAUSE V_MAC_PPS_INT_CAUSE(1U)
+
+#define S_MAC_TX_TS_AVAIL_INT_CAUSE 29
+#define V_MAC_TX_TS_AVAIL_INT_CAUSE(x) ((x) << S_MAC_TX_TS_AVAIL_INT_CAUSE)
+#define F_MAC_TX_TS_AVAIL_INT_CAUSE V_MAC_TX_TS_AVAIL_INT_CAUSE(1U)
+
+#define S_MAC_SINGLE_ALARM_INT_CAUSE 28
+#define V_MAC_SINGLE_ALARM_INT_CAUSE(x) ((x) << S_MAC_SINGLE_ALARM_INT_CAUSE)
+#define F_MAC_SINGLE_ALARM_INT_CAUSE V_MAC_SINGLE_ALARM_INT_CAUSE(1U)
+
+#define S_MAC_PERIODIC_ALARM_INT_CAUSE 27
+#define V_MAC_PERIODIC_ALARM_INT_CAUSE(x) ((x) << S_MAC_PERIODIC_ALARM_INT_CAUSE)
+#define F_MAC_PERIODIC_ALARM_INT_CAUSE V_MAC_PERIODIC_ALARM_INT_CAUSE(1U)
+
+#define S_MAC_PATDETWAKE_INT_CAUSE 26
+#define V_MAC_PATDETWAKE_INT_CAUSE(x) ((x) << S_MAC_PATDETWAKE_INT_CAUSE)
+#define F_MAC_PATDETWAKE_INT_CAUSE V_MAC_PATDETWAKE_INT_CAUSE(1U)
+
+#define S_MAC_MAGIC_WAKE_INT_CAUSE 25
+#define V_MAC_MAGIC_WAKE_INT_CAUSE(x) ((x) << S_MAC_MAGIC_WAKE_INT_CAUSE)
+#define F_MAC_MAGIC_WAKE_INT_CAUSE V_MAC_MAGIC_WAKE_INT_CAUSE(1U)
+
+#define S_MAC_SIGDETCHG_INT_CAUSE 24
+#define V_MAC_SIGDETCHG_INT_CAUSE(x) ((x) << S_MAC_SIGDETCHG_INT_CAUSE)
+#define F_MAC_SIGDETCHG_INT_CAUSE V_MAC_SIGDETCHG_INT_CAUSE(1U)
+
+#define S_MAC_PCS_LINK_GOOD_CAUSE 12
+#define V_MAC_PCS_LINK_GOOD_CAUSE(x) ((x) << S_MAC_PCS_LINK_GOOD_CAUSE)
+#define F_MAC_PCS_LINK_GOOD_CAUSE V_MAC_PCS_LINK_GOOD_CAUSE(1U)
+
+#define S_MAC_PCS_LINK_FAIL_CAUSE 11
+#define V_MAC_PCS_LINK_FAIL_CAUSE(x) ((x) << S_MAC_PCS_LINK_FAIL_CAUSE)
+#define F_MAC_PCS_LINK_FAIL_CAUSE V_MAC_PCS_LINK_FAIL_CAUSE(1U)
+
+#define S_MAC_OVRFLOW_INT_CAUSE 10
+#define V_MAC_OVRFLOW_INT_CAUSE(x) ((x) << S_MAC_OVRFLOW_INT_CAUSE)
+#define F_MAC_OVRFLOW_INT_CAUSE V_MAC_OVRFLOW_INT_CAUSE(1U)
+
+#define S_MAC_REM_FAULT_INT_CAUSE 7
+#define V_MAC_REM_FAULT_INT_CAUSE(x) ((x) << S_MAC_REM_FAULT_INT_CAUSE)
+#define F_MAC_REM_FAULT_INT_CAUSE V_MAC_REM_FAULT_INT_CAUSE(1U)
+
+#define S_MAC_LOC_FAULT_INT_CAUSE 6
+#define V_MAC_LOC_FAULT_INT_CAUSE(x) ((x) << S_MAC_LOC_FAULT_INT_CAUSE)
+#define F_MAC_LOC_FAULT_INT_CAUSE V_MAC_LOC_FAULT_INT_CAUSE(1U)
+
+#define S_MAC_LINK_DOWN_INT_CAUSE 5
+#define V_MAC_LINK_DOWN_INT_CAUSE(x) ((x) << S_MAC_LINK_DOWN_INT_CAUSE)
+#define F_MAC_LINK_DOWN_INT_CAUSE V_MAC_LINK_DOWN_INT_CAUSE(1U)
+
+#define S_MAC_LINK_UP_INT_CAUSE 4
+#define V_MAC_LINK_UP_INT_CAUSE(x) ((x) << S_MAC_LINK_UP_INT_CAUSE)
+#define F_MAC_LINK_UP_INT_CAUSE V_MAC_LINK_UP_INT_CAUSE(1U)
+
+#define S_MAC_AN_DONE_INT_CAUSE 3
+#define V_MAC_AN_DONE_INT_CAUSE(x) ((x) << S_MAC_AN_DONE_INT_CAUSE)
+#define F_MAC_AN_DONE_INT_CAUSE V_MAC_AN_DONE_INT_CAUSE(1U)
+
+#define S_MAC_AN_PGRD_INT_CAUSE 2
+#define V_MAC_AN_PGRD_INT_CAUSE(x) ((x) << S_MAC_AN_PGRD_INT_CAUSE)
+#define F_MAC_AN_PGRD_INT_CAUSE V_MAC_AN_PGRD_INT_CAUSE(1U)
+
+#define S_MAC_TXFIFO_ERR_INT_CAUSE 1
+#define V_MAC_TXFIFO_ERR_INT_CAUSE(x) ((x) << S_MAC_TXFIFO_ERR_INT_CAUSE)
+#define F_MAC_TXFIFO_ERR_INT_CAUSE V_MAC_TXFIFO_ERR_INT_CAUSE(1U)
+
+#define S_MAC_RXFIFO_ERR_INT_CAUSE 0
+#define V_MAC_RXFIFO_ERR_INT_CAUSE(x) ((x) << S_MAC_RXFIFO_ERR_INT_CAUSE)
+#define F_MAC_RXFIFO_ERR_INT_CAUSE V_MAC_RXFIFO_ERR_INT_CAUSE(1U)
+
+#define A_T7_MAC_PORT_PERR_INT_EN 0x870
+#define A_T7_MAC_PORT_PERR_INT_CAUSE 0x874
+#define A_T7_MAC_PORT_PERR_ENABLE 0x878
+#define A_T7_MAC_PORT_PERR_INJECT 0x87c
+
+#define S_T7_MEMSEL_PERR 1
+#define M_T7_MEMSEL_PERR 0xffU
+#define V_T7_MEMSEL_PERR(x) ((x) << S_T7_MEMSEL_PERR)
+#define G_T7_MEMSEL_PERR(x) (((x) >> S_T7_MEMSEL_PERR) & M_T7_MEMSEL_PERR)
+
+#define A_T7_MAC_PORT_RUNT_FRAME 0x880
+#define A_T7_MAC_PORT_EEE_STATUS 0x884
+#define A_T7_MAC_PORT_TX_TS_ID 0x888
+
+#define S_TS_ID_MSB 3
+#define V_TS_ID_MSB(x) ((x) << S_TS_ID_MSB)
+#define F_TS_ID_MSB V_TS_ID_MSB(1U)
+
+#define A_T7_MAC_PORT_TX_TS_VAL_LO 0x88c
+#define A_T7_MAC_PORT_TX_TS_VAL_HI 0x890
+#define A_T7_MAC_PORT_EEE_CTL 0x894
+#define A_T7_MAC_PORT_EEE_TX_CTL 0x898
+#define A_T7_MAC_PORT_EEE_RX_CTL 0x89c
+#define A_T7_MAC_PORT_EEE_TX_10G_SLEEP_TIMER 0x8a0
+#define A_T7_MAC_PORT_EEE_TX_10G_QUIET_TIMER 0x8a4
+#define A_T7_MAC_PORT_EEE_TX_10G_WAKE_TIMER 0x8a8
+#define A_T7_MAC_PORT_EEE_RX_10G_QUIET_TIMER 0x8b8
+#define A_T7_MAC_PORT_EEE_RX_10G_WAKE_TIMER 0x8bc
+#define A_T7_MAC_PORT_EEE_RX_10G_WF_TIMER 0x8c0
+#define A_T7_MAC_PORT_EEE_WF_COUNT 0x8cc
+#define A_MAC_PORT_WOL_EN 0x8d0
+
+#define S_WOL_ENABLE 1
+#define V_WOL_ENABLE(x) ((x) << S_WOL_ENABLE)
+#define F_WOL_ENABLE V_WOL_ENABLE(1U)
+
+#define S_WOL_INDICATOR 0
+#define V_WOL_INDICATOR(x) ((x) << S_WOL_INDICATOR)
+#define F_WOL_INDICATOR V_WOL_INDICATOR(1U)
+
+#define A_MAC_PORT_INT_TRACE 0x8d4
+
+#define S_INTERRUPT 0
+#define M_INTERRUPT 0x7fffffffU
+#define V_INTERRUPT(x) ((x) << S_INTERRUPT)
+#define G_INTERRUPT(x) (((x) >> S_INTERRUPT) & M_INTERRUPT)
+
+#define A_MAC_PORT_TRACE_TS_LO 0x8d8
+#define A_MAC_PORT_TRACE_TS_HI 0x8dc
+#define A_MAC_PORT_MTIP_10G100G_REVISION 0x900
+
+#define S_VER_10G100G 8
+#define M_VER_10G100G 0xffU
+#define V_VER_10G100G(x) ((x) << S_VER_10G100G)
+#define G_VER_10G100G(x) (((x) >> S_VER_10G100G) & M_VER_10G100G)
+
+#define S_REV_10G100G 0
+#define M_REV_10G100G 0xffU
+#define V_REV_10G100G(x) ((x) << S_REV_10G100G)
+#define G_REV_10G100G(x) (((x) >> S_REV_10G100G) & M_REV_10G100G)
+
+#define A_MAC_PORT_MTIP_10G100G_SCRATCH 0x904
+#define A_MAC_PORT_MTIP_10G100G_COMMAND_CONFIG 0x908
+
+#define S_NO_PREAM 31
+#define V_NO_PREAM(x) ((x) << S_NO_PREAM)
+#define F_NO_PREAM V_NO_PREAM(1U)
+
+#define S_SHORT_PREAM 30
+#define V_SHORT_PREAM(x) ((x) << S_SHORT_PREAM)
+#define F_SHORT_PREAM V_SHORT_PREAM(1U)
+
+#define S_FLT_HDL_DIS 27
+#define V_FLT_HDL_DIS(x) ((x) << S_FLT_HDL_DIS)
+#define F_FLT_HDL_DIS V_FLT_HDL_DIS(1U)
+
+#define S_TX_FIFO_RESET 26
+#define V_TX_FIFO_RESET(x) ((x) << S_TX_FIFO_RESET)
+#define F_TX_FIFO_RESET V_TX_FIFO_RESET(1U)
+
+#define A_MAC_PORT_MTIP_10G100G_MAC_ADDR_0 0x90c
+#define A_MAC_PORT_MTIP_10G100G_MAC_ADDR_1 0x910
+#define A_MAC_PORT_MTIP_10G100G_FRM_LENGTH_TX_MTU 0x914
+#define A_MAC_PORT_MTIP_10G100G_RX_FIFO_SECTIONS 0x91c
+
+#define S_RX10G100G_EMPTY 16
+#define M_RX10G100G_EMPTY 0xffffU
+#define V_RX10G100G_EMPTY(x) ((x) << S_RX10G100G_EMPTY)
+#define G_RX10G100G_EMPTY(x) (((x) >> S_RX10G100G_EMPTY) & M_RX10G100G_EMPTY)
+
+#define S_RX10G100G_AVAIL 0
+#define M_RX10G100G_AVAIL 0xffffU
+#define V_RX10G100G_AVAIL(x) ((x) << S_RX10G100G_AVAIL)
+#define G_RX10G100G_AVAIL(x) (((x) >> S_RX10G100G_AVAIL) & M_RX10G100G_AVAIL)
+
+#define A_MAC_PORT_MTIP_10G100G_TX_FIFO_SECTIONS 0x920
+
+#define S_TX10G100G_EMPTY 16
+#define M_TX10G100G_EMPTY 0xffffU
+#define V_TX10G100G_EMPTY(x) ((x) << S_TX10G100G_EMPTY)
+#define G_TX10G100G_EMPTY(x) (((x) >> S_TX10G100G_EMPTY) & M_TX10G100G_EMPTY)
+
+#define S_TX10G100G_AVAIL 0
+#define M_TX10G100G_AVAIL 0xffffU
+#define V_TX10G100G_AVAIL(x) ((x) << S_TX10G100G_AVAIL)
+#define G_TX10G100G_AVAIL(x) (((x) >> S_TX10G100G_AVAIL) & M_TX10G100G_AVAIL)
+
+#define A_MAC_PORT_MTIP_10G100G_RX_FIFO_ALMOST_F_E 0x924
+#define A_MAC_PORT_MTIP_10G100G_TX_FIFO_ALMOST_F_E 0x928
+#define A_MAC_PORT_MTIP_10G100G_MDIO_CFG_STATUS 0x930
+#define A_MAC_PORT_MTIP_10G100G_MDIO_COMMAND 0x934
+#define A_MAC_PORT_MTIP_10G100G_MDIO_DATA 0x938
+#define A_MAC_PORT_MTIP_10G100G_MDIO_REGADDR 0x93c
+#define A_MAC_PORT_MTIP_10G100G_STATUS 0x940
+
+#define S_T7_TX_ISIDLE 8
+#define V_T7_TX_ISIDLE(x) ((x) << S_T7_TX_ISIDLE)
+#define F_T7_TX_ISIDLE V_T7_TX_ISIDLE(1U)
+
+#define A_MAC_PORT_MTIP_10G100G_TX_IPG_LENGTH 0x944
+
+#define S_IPG_COMP_CNT 16
+#define M_IPG_COMP_CNT 0xffffU
+#define V_IPG_COMP_CNT(x) ((x) << S_IPG_COMP_CNT)
+#define G_IPG_COMP_CNT(x) (((x) >> S_IPG_COMP_CNT) & M_IPG_COMP_CNT)
+
+#define S_AVG_IPG_LEN 2
+#define M_AVG_IPG_LEN 0xfU
+#define V_AVG_IPG_LEN(x) ((x) << S_AVG_IPG_LEN)
+#define G_AVG_IPG_LEN(x) (((x) >> S_AVG_IPG_LEN) & M_AVG_IPG_LEN)
+
+#define S_DSBL_DIC 0
+#define V_DSBL_DIC(x) ((x) << S_DSBL_DIC)
+#define F_DSBL_DIC V_DSBL_DIC(1U)
+
+#define A_MAC_PORT_MTIP_10G100G_CRC_MODE 0x948
+#define A_MAC_PORT_MTIP_10G100G_CL01_PAUSE_QUANTA 0x954
+#define A_MAC_PORT_MTIP_10G100G_CL23_PAUSE_QUANTA 0x958
+#define A_MAC_PORT_MTIP_10G100G_CL45_PAUSE_QUANTA 0x95c
+#define A_MAC_PORT_MTIP_10G100G_CL67_PAUSE_QUANTA 0x960
+#define A_MAC_PORT_MTIP_10G100G_CL01_QUANTA_THRESH 0x964
+#define A_MAC_PORT_MTIP_10G100G_CL23_QUANTA_THRESH 0x968
+#define A_MAC_PORT_MTIP_10G100G_CL45_QUANTA_THRESH 0x96c
+#define A_MAC_PORT_MTIP_10G100G_CL67_QUANTA_THRESH 0x970
+#define A_MAC_PORT_MTIP_10G100G_RX_PAUSE_STATUS 0x974
+#define A_MAC_PORT_MTIP_10G100G_TS_TIMESTAMP 0x97c
+#define A_MAC_PORT_MTIP_10G100G_XIF_MODE 0x980
+
+#define S_RX_CNT_MODE 16
+#define V_RX_CNT_MODE(x) ((x) << S_RX_CNT_MODE)
+#define F_RX_CNT_MODE V_RX_CNT_MODE(1U)
+
+#define S_TS_UPD64_MODE 12
+#define V_TS_UPD64_MODE(x) ((x) << S_TS_UPD64_MODE)
+#define F_TS_UPD64_MODE V_TS_UPD64_MODE(1U)
+
+#define S_TS_BINARY_MODE 11
+#define V_TS_BINARY_MODE(x) ((x) << S_TS_BINARY_MODE)
+#define F_TS_BINARY_MODE V_TS_BINARY_MODE(1U)
+
+#define S_TS_DELAY_MODE 10
+#define V_TS_DELAY_MODE(x) ((x) << S_TS_DELAY_MODE)
+#define F_TS_DELAY_MODE V_TS_DELAY_MODE(1U)
+
+#define S_TS_DELTA_MODE 9
+#define V_TS_DELTA_MODE(x) ((x) << S_TS_DELTA_MODE)
+#define F_TS_DELTA_MODE V_TS_DELTA_MODE(1U)
+
+#define S_TX_MAC_RS_ERR 8
+#define V_TX_MAC_RS_ERR(x) ((x) << S_TX_MAC_RS_ERR)
+#define F_TX_MAC_RS_ERR V_TX_MAC_RS_ERR(1U)
+
+#define S_RX_PAUSE_BYPASS 6
+#define V_RX_PAUSE_BYPASS(x) ((x) << S_RX_PAUSE_BYPASS)
+#define F_RX_PAUSE_BYPASS V_RX_PAUSE_BYPASS(1U)
+
+#define S_ONE_STEP_ENA 5
+#define V_ONE_STEP_ENA(x) ((x) << S_ONE_STEP_ENA)
+#define F_ONE_STEP_ENA V_ONE_STEP_ENA(1U)
+
+#define S_PAUSETIMERX8 4
+#define V_PAUSETIMERX8(x) ((x) << S_PAUSETIMERX8)
+#define F_PAUSETIMERX8 V_PAUSETIMERX8(1U)
+
+#define S_XGMII_ENA 0
+#define V_XGMII_ENA(x) ((x) << S_XGMII_ENA)
+#define F_XGMII_ENA V_XGMII_ENA(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_CONTROL_1 0xa00
+#define A_MAC_PORT_MTIP_CR4_0_STATUS_1 0xa04
+
+#define S_CR4_0_RX_LINK_STATUS 2
+#define V_CR4_0_RX_LINK_STATUS(x) ((x) << S_CR4_0_RX_LINK_STATUS)
+#define F_CR4_0_RX_LINK_STATUS V_CR4_0_RX_LINK_STATUS(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_DEVICE_ID0 0xa08
+
+#define S_CR4_0_DEVICE_ID0 0
+#define M_CR4_0_DEVICE_ID0 0xffffU
+#define V_CR4_0_DEVICE_ID0(x) ((x) << S_CR4_0_DEVICE_ID0)
+#define G_CR4_0_DEVICE_ID0(x) (((x) >> S_CR4_0_DEVICE_ID0) & M_CR4_0_DEVICE_ID0)
+
+#define A_MAC_PORT_MTIP_CR4_0_DEVICE_ID1 0xa0c
+
+#define S_CR4_0_DEVICE_ID1 0
+#define M_CR4_0_DEVICE_ID1 0xffffU
+#define V_CR4_0_DEVICE_ID1(x) ((x) << S_CR4_0_DEVICE_ID1)
+#define G_CR4_0_DEVICE_ID1(x) (((x) >> S_CR4_0_DEVICE_ID1) & M_CR4_0_DEVICE_ID1)
+
+#define A_MAC_PORT_MTIP_CR4_0_SPEED_ABILITY 0xa10
+
+#define S_50G_CAPABLE 5
+#define V_50G_CAPABLE(x) ((x) << S_50G_CAPABLE)
+#define F_50G_CAPABLE V_50G_CAPABLE(1U)
+
+#define S_25G_CAPABLE 4
+#define V_25G_CAPABLE(x) ((x) << S_25G_CAPABLE)
+#define F_25G_CAPABLE V_25G_CAPABLE(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_DEVICES_IN_PKG1 0xa14
+#define A_MAC_PORT_MTIP_CR4_0_DEVICES_IN_PKG2 0xa18
+#define A_MAC_PORT_MTIP_CR4_0_CONTROL_2 0xa1c
+
+#define S_T7_PCS_TYPE_SELECTION 0
+#define M_T7_PCS_TYPE_SELECTION 0xfU
+#define V_T7_PCS_TYPE_SELECTION(x) ((x) << S_T7_PCS_TYPE_SELECTION)
+#define G_T7_PCS_TYPE_SELECTION(x) (((x) >> S_T7_PCS_TYPE_SELECTION) & M_T7_PCS_TYPE_SELECTION)
+
+#define A_MAC_PORT_MTIP_CR4_0_STATUS_2 0xa20
+
+#define S_50GBASE_R_CAPABLE 8
+#define V_50GBASE_R_CAPABLE(x) ((x) << S_50GBASE_R_CAPABLE)
+#define F_50GBASE_R_CAPABLE V_50GBASE_R_CAPABLE(1U)
+
+#define S_25GBASE_R_CAPABLE 7
+#define V_25GBASE_R_CAPABLE(x) ((x) << S_25GBASE_R_CAPABLE)
+#define F_25GBASE_R_CAPABLE V_25GBASE_R_CAPABLE(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_PKG_ID0 0xa38
+#define A_MAC_PORT_MTIP_CR4_0_PKG_ID1 0xa3c
+#define A_MAC_PORT_MTIP_CR4_0_EEE_CTRL 0xa50
+
+#define S_50GBASE_R_FW 14
+#define V_50GBASE_R_FW(x) ((x) << S_50GBASE_R_FW)
+#define F_50GBASE_R_FW V_50GBASE_R_FW(1U)
+
+#define S_100GBASE_R_DS 13
+#define V_100GBASE_R_DS(x) ((x) << S_100GBASE_R_DS)
+#define F_100GBASE_R_DS V_100GBASE_R_DS(1U)
+
+#define S_100GBASE_R_FW 12
+#define V_100GBASE_R_FW(x) ((x) << S_100GBASE_R_FW)
+#define F_100GBASE_R_FW V_100GBASE_R_FW(1U)
+
+#define S_25GBASE_R_DS 11
+#define V_25GBASE_R_DS(x) ((x) << S_25GBASE_R_DS)
+#define F_25GBASE_R_DS V_25GBASE_R_DS(1U)
+
+#define S_25GBASE_R_FW 10
+#define V_25GBASE_R_FW(x) ((x) << S_25GBASE_R_FW)
+#define F_25GBASE_R_FW V_25GBASE_R_FW(1U)
+
+#define S_40GBASE_R_DS 9
+#define V_40GBASE_R_DS(x) ((x) << S_40GBASE_R_DS)
+#define F_40GBASE_R_DS V_40GBASE_R_DS(1U)
+
+#define S_40GBASE_R_FW 8
+#define V_40GBASE_R_FW(x) ((x) << S_40GBASE_R_FW)
+#define F_40GBASE_R_FW V_40GBASE_R_FW(1U)
+
+#define S_10GBASE_KE_EEE 6
+#define V_10GBASE_KE_EEE(x) ((x) << S_10GBASE_KE_EEE)
+#define F_10GBASE_KE_EEE V_10GBASE_KE_EEE(1U)
+
+#define S_FAST_WAKE 1
+#define M_FAST_WAKE 0x1fU
+#define V_FAST_WAKE(x) ((x) << S_FAST_WAKE)
+#define G_FAST_WAKE(x) (((x) >> S_FAST_WAKE) & M_FAST_WAKE)
+
+#define S_DEEP_SLEEP 0
+#define V_DEEP_SLEEP(x) ((x) << S_DEEP_SLEEP)
+#define F_DEEP_SLEEP V_DEEP_SLEEP(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_WAKE_ERROR_COUNTER 0xa58
+
+#define S_WAKE_ERROR_COUNTER 0
+#define M_WAKE_ERROR_COUNTER 0x1ffffU
+#define V_WAKE_ERROR_COUNTER(x) ((x) << S_WAKE_ERROR_COUNTER)
+#define G_WAKE_ERROR_COUNTER(x) (((x) >> S_WAKE_ERROR_COUNTER) & M_WAKE_ERROR_COUNTER)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_STATUS_1 0xa80
+
+#define S_CR4_0_BR_BLOCK_LOCK 0
+#define V_CR4_0_BR_BLOCK_LOCK(x) ((x) << S_CR4_0_BR_BLOCK_LOCK)
+#define F_CR4_0_BR_BLOCK_LOCK V_CR4_0_BR_BLOCK_LOCK(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_STATUS_2 0xa84
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_0 0xa88
+
+#define S_SEED_A_0 0
+#define M_SEED_A_0 0xffffU
+#define V_SEED_A_0(x) ((x) << S_SEED_A_0)
+#define G_SEED_A_0(x) (((x) >> S_SEED_A_0) & M_SEED_A_0)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_1 0xa8c
+
+#define S_SEED_A_1 0
+#define M_SEED_A_1 0xffffU
+#define V_SEED_A_1(x) ((x) << S_SEED_A_1)
+#define G_SEED_A_1(x) (((x) >> S_SEED_A_1) & M_SEED_A_1)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_2 0xa90
+
+#define S_SEED_A_2 0
+#define M_SEED_A_2 0xffffU
+#define V_SEED_A_2(x) ((x) << S_SEED_A_2)
+#define G_SEED_A_2(x) (((x) >> S_SEED_A_2) & M_SEED_A_2)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_A_3 0xa94
+
+#define S_SEED_A_3 0
+#define M_SEED_A_3 0xffffU
+#define V_SEED_A_3(x) ((x) << S_SEED_A_3)
+#define G_SEED_A_3(x) (((x) >> S_SEED_A_3) & M_SEED_A_3)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_0 0xa98
+
+#define S_SEED_B_0 0
+#define M_SEED_B_0 0xffffU
+#define V_SEED_B_0(x) ((x) << S_SEED_B_0)
+#define G_SEED_B_0(x) (((x) >> S_SEED_B_0) & M_SEED_B_0)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_1 0xa9c
+
+#define S_SEED_B_1 0
+#define M_SEED_B_1 0xffffU
+#define V_SEED_B_1(x) ((x) << S_SEED_B_1)
+#define G_SEED_B_1(x) (((x) >> S_SEED_B_1) & M_SEED_B_1)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_2 0xaa0
+
+#define S_SEED_B_2 0
+#define M_SEED_B_2 0xffffU
+#define V_SEED_B_2(x) ((x) << S_SEED_B_2)
+#define G_SEED_B_2(x) (((x) >> S_SEED_B_2) & M_SEED_B_2)
+
+#define A_MAC_PORT_MTIP_CR4_0_SEED_B_3 0xaa4
+
+#define S_SEED_B_3 0
+#define M_SEED_B_3 0xffffU
+#define V_SEED_B_3(x) ((x) << S_SEED_B_3)
+#define G_SEED_B_3(x) (((x) >> S_SEED_B_3) & M_SEED_B_3)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_TEST_PATTERN_CONTROL 0xaa8
+
+#define S_TEST_PATTERN_40G 7
+#define V_TEST_PATTERN_40G(x) ((x) << S_TEST_PATTERN_40G)
+#define F_TEST_PATTERN_40G V_TEST_PATTERN_40G(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_BASE_R_TEST_ERR_CNT 0xaac
+#define A_MAC_PORT_MTIP_CR4_0_BER_HIGH_ORDER_CNT 0xab0
+
+#define S_BASE_R_BER_HIGH_ORDER_CNT 0
+#define M_BASE_R_BER_HIGH_ORDER_CNT 0xffffU
+#define V_BASE_R_BER_HIGH_ORDER_CNT(x) ((x) << S_BASE_R_BER_HIGH_ORDER_CNT)
+#define G_BASE_R_BER_HIGH_ORDER_CNT(x) (((x) >> S_BASE_R_BER_HIGH_ORDER_CNT) & M_BASE_R_BER_HIGH_ORDER_CNT)
+
+#define A_MAC_PORT_MTIP_CR4_0_ERR_BLK_HIGH_ORDER_CNT 0xab4
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_1 0xac8
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_2 0xacc
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_3 0xad0
+#define A_MAC_PORT_MTIP_CR4_0_MULTI_LANE_ALIGN_STATUS_4 0xad4
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_0 0xad8
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_1 0xadc
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_2 0xae0
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_3 0xae4
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_4 0xae8
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_5 0xaec
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_6 0xaf0
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_7 0xaf4
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_8 0xaf8
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_9 0xafc
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_10 0xb00
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_11 0xb04
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_12 0xb08
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_13 0xb0c
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_14 0xb10
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_15 0xb14
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_16 0xb18
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_17 0xb1c
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_18 0xb20
+#define A_MAC_PORT_MTIP_CR4_0_BIP_ERR_CNTLANE_19 0xb24
+#define A_MAC_PORT_MTIP_CR4_0_LANE_0_MAPPING 0xb28
+#define A_MAC_PORT_MTIP_CR4_0_LANE_1_MAPPING 0xb2c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_2_MAPPING 0xb30
+#define A_MAC_PORT_MTIP_CR4_0_LANE_3_MAPPING 0xb34
+#define A_MAC_PORT_MTIP_CR4_0_LANE_4_MAPPING 0xb38
+#define A_MAC_PORT_MTIP_CR4_0_LANE_5_MAPPING 0xb3c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_6_MAPPING 0xb40
+#define A_MAC_PORT_MTIP_CR4_0_LANE_7_MAPPING 0xb44
+#define A_MAC_PORT_MTIP_CR4_0_LANE_8_MAPPING 0xb48
+#define A_MAC_PORT_MTIP_CR4_0_LANE_9_MAPPING 0xb4c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_10_MAPPING 0xb50
+#define A_MAC_PORT_MTIP_CR4_0_LANE_11_MAPPING 0xb54
+#define A_MAC_PORT_MTIP_CR4_0_LANE_12_MAPPING 0xb58
+#define A_MAC_PORT_MTIP_CR4_0_LANE_13_MAPPING 0xb5c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_14_MAPPING 0xb60
+#define A_MAC_PORT_MTIP_CR4_0_LANE_15_MAPPING 0xb64
+#define A_MAC_PORT_MTIP_CR4_0_LANE_16_MAPPING 0xb68
+#define A_MAC_PORT_MTIP_CR4_0_LANE_17_MAPPING 0xb6c
+#define A_MAC_PORT_MTIP_CR4_0_LANE_18_MAPPING 0xb70
+#define A_MAC_PORT_MTIP_CR4_0_LANE_19_MAPPING 0xb74
+#define A_MAC_PORT_MTIP_CR4_0_SCRATCH 0xb78
+#define A_MAC_PORT_MTIP_CR4_0_CORE_REVISION 0xb7c
+#define A_MAC_PORT_MTIP_CR4_0_VL_INTVL 0xb80
+
+#define S_VL_INTCL 0
+#define M_VL_INTCL 0xffffU
+#define V_VL_INTCL(x) ((x) << S_VL_INTCL)
+#define G_VL_INTCL(x) (((x) >> S_VL_INTCL) & M_VL_INTCL)
+
+#define A_MAC_PORT_MTIP_CR4_0_TX_LANE_THRESH 0xb84
+
+#define S_LANE6_LANE7 12
+#define M_LANE6_LANE7 0xfU
+#define V_LANE6_LANE7(x) ((x) << S_LANE6_LANE7)
+#define G_LANE6_LANE7(x) (((x) >> S_LANE6_LANE7) & M_LANE6_LANE7)
+
+#define S_LANE4_LANE5 8
+#define M_LANE4_LANE5 0xfU
+#define V_LANE4_LANE5(x) ((x) << S_LANE4_LANE5)
+#define G_LANE4_LANE5(x) (((x) >> S_LANE4_LANE5) & M_LANE4_LANE5)
+
+#define S_LANE2_LANE3 4
+#define M_LANE2_LANE3 0xfU
+#define V_LANE2_LANE3(x) ((x) << S_LANE2_LANE3)
+#define G_LANE2_LANE3(x) (((x) >> S_LANE2_LANE3) & M_LANE2_LANE3)
+
+#define S_LANE0_LANE1 0
+#define M_LANE0_LANE1 0xfU
+#define V_LANE0_LANE1(x) ((x) << S_LANE0_LANE1)
+#define G_LANE0_LANE1(x) (((x) >> S_LANE0_LANE1) & M_LANE0_LANE1)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL0_0 0xb98
+
+#define S_M1 8
+#define M_M1 0xffU
+#define V_M1(x) ((x) << S_M1)
+#define G_M1(x) (((x) >> S_M1) & M_M1)
+
+#define S_M0 0
+#define M_M0 0xffU
+#define V_M0(x) ((x) << S_M0)
+#define G_M0(x) (((x) >> S_M0) & M_M0)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL0_1 0xb9c
+
+#define S_M2 0
+#define M_M2 0xffU
+#define V_M2(x) ((x) << S_M2)
+#define G_M2(x) (((x) >> S_M2) & M_M2)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL1_0 0xba0
+#define A_MAC_PORT_MTIP_CR4_0_VL1_1 0xba4
+#define A_MAC_PORT_MTIP_CR4_0_VL2_0 0xba8
+#define A_MAC_PORT_MTIP_CR4_0_VL2_1 0xbac
+#define A_MAC_PORT_MTIP_CR4_0_VL3_0 0xbb0
+#define A_MAC_PORT_MTIP_CR4_0_VL3_1 0xbb4
+#define A_MAC_PORT_MTIP_CR4_0_PCS_MODE 0xbb8
+
+#define S_ST_DISABLE_MLD 9
+#define V_ST_DISABLE_MLD(x) ((x) << S_ST_DISABLE_MLD)
+#define F_ST_DISABLE_MLD V_ST_DISABLE_MLD(1U)
+
+#define S_ST_EN_CLAUSE49 8
+#define V_ST_EN_CLAUSE49(x) ((x) << S_ST_EN_CLAUSE49)
+#define F_ST_EN_CLAUSE49 V_ST_EN_CLAUSE49(1U)
+
+#define S_HI_BER25 2
+#define V_HI_BER25(x) ((x) << S_HI_BER25)
+#define F_HI_BER25 V_HI_BER25(1U)
+
+#define S_DISABLE_MLD 1
+#define V_DISABLE_MLD(x) ((x) << S_DISABLE_MLD)
+#define F_DISABLE_MLD V_DISABLE_MLD(1U)
+
+#define S_ENA_CLAUSE49 0
+#define V_ENA_CLAUSE49(x) ((x) << S_ENA_CLAUSE49)
+#define F_ENA_CLAUSE49 V_ENA_CLAUSE49(1U)
+
+#define A_MAC_PORT_MTIP_CR4_0_VL4_0 0xc98
+#define A_MAC_PORT_MTIP_CR4_0_VL4_1 0xc9c
+#define A_MAC_PORT_MTIP_CR4_0_VL5_0 0xca0
+#define A_MAC_PORT_MTIP_CR4_0_VL5_1 0xca4
+#define A_MAC_PORT_MTIP_CR4_0_VL6_0 0xca8
+#define A_MAC_PORT_MTIP_CR4_0_VL6_1 0xcac
+#define A_MAC_PORT_MTIP_CR4_0_VL7_0 0xcb0
+#define A_MAC_PORT_MTIP_CR4_0_VL7_1 0xcb4
+#define A_MAC_PORT_MTIP_CR4_0_VL8_0 0xcb8
+#define A_MAC_PORT_MTIP_CR4_0_VL8_1 0xcbc
+#define A_MAC_PORT_MTIP_CR4_0_VL9_0 0xcc0
+#define A_MAC_PORT_MTIP_CR4_0_VL9_1 0xcc4
+#define A_MAC_PORT_MTIP_CR4_0_VL10_0 0xcc8
+#define A_MAC_PORT_MTIP_CR4_0_VL10_1 0xccc
+#define A_MAC_PORT_MTIP_CR4_0_VL11_0 0xcd0
+#define A_MAC_PORT_MTIP_CR4_0_VL11_1 0xcd4
+#define A_MAC_PORT_MTIP_CR4_0_VL12_0 0xcd8
+#define A_MAC_PORT_MTIP_CR4_0_VL12_1 0xcdc
+#define A_MAC_PORT_MTIP_CR4_0_VL13_0 0xce0
+#define A_MAC_PORT_MTIP_CR4_0_VL13_1 0xce4
+#define A_MAC_PORT_MTIP_CR4_0_VL14_0 0xce8
+#define A_MAC_PORT_MTIP_CR4_0_VL14_1 0xcec
+#define A_MAC_PORT_MTIP_CR4_0_VL15_0 0xcf0
+#define A_MAC_PORT_MTIP_CR4_0_VL15_1 0xcf4
+#define A_MAC_PORT_MTIP_CR4_0_VL16_0 0xcf8
+#define A_MAC_PORT_MTIP_CR4_0_VL16_1 0xcfc
+#define A_MAC_PORT_MTIP_CR4_0_VL17_0 0xd00
+#define A_MAC_PORT_MTIP_CR4_0_VL17_1 0xd04
+#define A_MAC_PORT_MTIP_CR4_0_VL18_0 0xd08
+#define A_MAC_PORT_MTIP_CR4_0_VL18_1 0xd0c
+#define A_MAC_PORT_MTIP_CR4_0_VL19_0 0xd10
+#define A_MAC_PORT_MTIP_CR4_0_VL19_1 0xd14
+#define A_MAC_PORT_MTIP_CR4_1_CONTROL_1 0x1000
+#define A_MAC_PORT_MTIP_CR4_1_STATUS_1 0x1004
+
+#define S_CR4_RX_LINK_STATUS_1 2
+#define V_CR4_RX_LINK_STATUS_1(x) ((x) << S_CR4_RX_LINK_STATUS_1)
+#define F_CR4_RX_LINK_STATUS_1 V_CR4_RX_LINK_STATUS_1(1U)
+
+#define A_MAC_PORT_MTIP_CR4_1_DEVICE_ID0 0x1008
+
+#define S_CR4_1_DEVICE_ID0 0
+#define M_CR4_1_DEVICE_ID0 0xffffU
+#define V_CR4_1_DEVICE_ID0(x) ((x) << S_CR4_1_DEVICE_ID0)
+#define G_CR4_1_DEVICE_ID0(x) (((x) >> S_CR4_1_DEVICE_ID0) & M_CR4_1_DEVICE_ID0)
+
+#define A_MAC_PORT_MTIP_CR4_1_DEVICE_ID1 0x100c
+
+#define S_CR4_1_DEVICE_ID1 0
+#define M_CR4_1_DEVICE_ID1 0xffffU
+#define V_CR4_1_DEVICE_ID1(x) ((x) << S_CR4_1_DEVICE_ID1)
+#define G_CR4_1_DEVICE_ID1(x) (((x) >> S_CR4_1_DEVICE_ID1) & M_CR4_1_DEVICE_ID1)
+
+#define A_MAC_PORT_MTIP_CR4_1_SPEED_ABILITY 0x1010
+#define A_MAC_PORT_MTIP_CR4_1_DEVICES_IN_PKG1 0x1014
+#define A_MAC_PORT_MTIP_CR4_1_DEVICES_IN_PKG2 0x1018
+#define A_MAC_PORT_MTIP_CR4_1_CONTROL_2 0x101c
+#define A_MAC_PORT_MTIP_CR4_1_STATUS_2 0x1020
+#define A_MAC_PORT_MTIP_CR4_1_PKG_ID0 0x1038
+#define A_MAC_PORT_MTIP_CR4_1_PKG_ID1 0x103c
+#define A_MAC_PORT_MTIP_CR4_1_EEE_CTRL 0x1050
+#define A_MAC_PORT_MTIP_CR4_1_WAKE_ERROR_COUNTER 0x1058
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_STATUS_1 0x1080
+
+#define S_CR4_1_BR_BLOCK_LOCK 0
+#define V_CR4_1_BR_BLOCK_LOCK(x) ((x) << S_CR4_1_BR_BLOCK_LOCK)
+#define F_CR4_1_BR_BLOCK_LOCK V_CR4_1_BR_BLOCK_LOCK(1U)
+
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_STATUS_2 0x1084
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_0 0x1088
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_1 0x108c
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_2 0x1090
+#define A_MAC_PORT_MTIP_CR4_1_SEED_A_3 0x1094
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_0 0x1098
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_1 0x109c
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_2 0x10a0
+#define A_MAC_PORT_MTIP_CR4_1_SEED_B_3 0x10a4
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_TEST_PATTERN_CONTROL 0x10a8
+#define A_MAC_PORT_MTIP_CR4_1_BASE_R_TEST_ERR_CNT 0x10ac
+#define A_MAC_PORT_MTIP_CR4_1_BER_HIGH_ORDER_CNT 0x10b0
+#define A_MAC_PORT_MTIP_CR4_1_ERR_BLK_HIGH_ORDER_CNT 0x10b4
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_1 0x10c8
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_2 0x10cc
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_3 0x10d0
+#define A_MAC_PORT_MTIP_CR4_1_MULTI_LANE_ALIGN_STATUS_4 0x10d4
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_0 0x10d8
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_1 0x10dc
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_2 0x10e0
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_3 0x10e4
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_4 0x10e8
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_5 0x10ec
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_6 0x10f0
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_7 0x10f4
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_8 0x10f8
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_9 0x10fc
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_10 0x1100
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_11 0x1104
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_12 0x1108
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_13 0x110c
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_14 0x1110
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_15 0x1114
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_16 0x1118
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_17 0x111c
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_18 0x1120
+#define A_MAC_PORT_MTIP_CR4_1_BIP_ERR_CNTLANE_19 0x1124
+#define A_MAC_PORT_MTIP_CR4_1_LANE_0_MAPPING 0x1128
+#define A_MAC_PORT_MTIP_CR4_1_LANE_1_MAPPING 0x112c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_2_MAPPING 0x1130
+#define A_MAC_PORT_MTIP_CR4_1_LANE_3_MAPPING 0x1134
+#define A_MAC_PORT_MTIP_CR4_1_LANE_4_MAPPING 0x1138
+#define A_MAC_PORT_MTIP_CR4_1_LANE_5_MAPPING 0x113c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_6_MAPPING 0x1140
+#define A_MAC_PORT_MTIP_CR4_1_LANE_7_MAPPING 0x1144
+#define A_MAC_PORT_MTIP_CR4_1_LANE_8_MAPPING 0x1148
+#define A_MAC_PORT_MTIP_CR4_1_LANE_9_MAPPING 0x114c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_10_MAPPING 0x1150
+#define A_MAC_PORT_MTIP_CR4_1_LANE_11_MAPPING 0x1154
+#define A_MAC_PORT_MTIP_CR4_1_LANE_12_MAPPING 0x1158
+#define A_MAC_PORT_MTIP_CR4_1_LANE_13_MAPPING 0x115c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_14_MAPPING 0x1160
+#define A_MAC_PORT_MTIP_CR4_1_LANE_15_MAPPING 0x1164
+#define A_MAC_PORT_MTIP_CR4_1_LANE_16_MAPPING 0x1168
+#define A_MAC_PORT_MTIP_CR4_1_LANE_17_MAPPING 0x116c
+#define A_MAC_PORT_MTIP_CR4_1_LANE_18_MAPPING 0x1170
+#define A_MAC_PORT_MTIP_CR4_1_LANE_19_MAPPING 0x1174
+#define A_MAC_PORT_MTIP_CR4_1_SCRATCH 0x1178
+#define A_MAC_PORT_MTIP_CR4_1_CORE_REVISION 0x117c
+#define A_MAC_PORT_MTIP_CR4_1_VL_INTVL 0x1180
+#define A_MAC_PORT_MTIP_CR4_1_TX_LANE_THRESH 0x1184
+#define A_MAC_PORT_MTIP_CR4_1_VL0_0 0x1198
+#define A_MAC_PORT_MTIP_CR4_1_VL0_1 0x119c
+#define A_MAC_PORT_MTIP_CR4_1_VL1_0 0x11a0
+#define A_MAC_PORT_MTIP_CR4_1_VL1_1 0x11a4
+#define A_MAC_PORT_MTIP_CR4_1_VL2_0 0x11a8
+#define A_MAC_PORT_MTIP_CR4_1_VL2_1 0x11ac
+#define A_MAC_PORT_MTIP_CR4_1_VL3_0 0x11b0
+#define A_MAC_PORT_MTIP_CR4_1_VL3_1 0x11b4
+#define A_MAC_PORT_MTIP_CR4_1_PCS_MODE 0x11b8
+#define A_MAC_COMMON_CFG_0 0x38000
+
+#define S_T7_RX_POLARITY_INV 24
+#define M_T7_RX_POLARITY_INV 0xffU
+#define V_T7_RX_POLARITY_INV(x) ((x) << S_T7_RX_POLARITY_INV)
+#define G_T7_RX_POLARITY_INV(x) (((x) >> S_T7_RX_POLARITY_INV) & M_T7_RX_POLARITY_INV)
+
+#define S_T7_TX_POLARITY_INV 16
+#define M_T7_TX_POLARITY_INV 0xffU
+#define V_T7_TX_POLARITY_INV(x) ((x) << S_T7_TX_POLARITY_INV)
+#define G_T7_TX_POLARITY_INV(x) (((x) >> S_T7_TX_POLARITY_INV) & M_T7_TX_POLARITY_INV)
+
+#define S_T7_DEBUG_PORT_SEL 14
+#define M_T7_DEBUG_PORT_SEL 0x3U
+#define V_T7_DEBUG_PORT_SEL(x) ((x) << S_T7_DEBUG_PORT_SEL)
+#define G_T7_DEBUG_PORT_SEL(x) (((x) >> S_T7_DEBUG_PORT_SEL) & M_T7_DEBUG_PORT_SEL)
+
+#define S_MAC_SEPTY_CTL 8
+#define M_MAC_SEPTY_CTL 0x3fU
+#define V_MAC_SEPTY_CTL(x) ((x) << S_MAC_SEPTY_CTL)
+#define G_MAC_SEPTY_CTL(x) (((x) >> S_MAC_SEPTY_CTL) & M_MAC_SEPTY_CTL)
+
+#define S_T7_DEBUG_TX_RX_SEL 7
+#define V_T7_DEBUG_TX_RX_SEL(x) ((x) << S_T7_DEBUG_TX_RX_SEL)
+#define F_T7_DEBUG_TX_RX_SEL V_T7_DEBUG_TX_RX_SEL(1U)
+
+#define S_MAC_RDY_CTL 0
+#define M_MAC_RDY_CTL 0x3fU
+#define V_MAC_RDY_CTL(x) ((x) << S_MAC_RDY_CTL)
+#define G_MAC_RDY_CTL(x) (((x) >> S_MAC_RDY_CTL) & M_MAC_RDY_CTL)
+
+#define A_MAC_MTIP_RESET_CTRL_0 0x38004
+
+#define S_RESET_F91_REF_CLK_I 31
+#define V_RESET_F91_REF_CLK_I(x) ((x) << S_RESET_F91_REF_CLK_I)
+#define F_RESET_F91_REF_CLK_I V_RESET_F91_REF_CLK_I(1U)
+
+#define S_RESET_PCS000_REF_CLK_I 30
+#define V_RESET_PCS000_REF_CLK_I(x) ((x) << S_RESET_PCS000_REF_CLK_I)
+#define F_RESET_PCS000_REF_CLK_I V_RESET_PCS000_REF_CLK_I(1U)
+
+#define S_RESET_REF_CLK_I 29
+#define V_RESET_REF_CLK_I(x) ((x) << S_RESET_REF_CLK_I)
+#define F_RESET_REF_CLK_I V_RESET_REF_CLK_I(1U)
+
+#define S_RESET_SD_RX_CLK_I_0 28
+#define V_RESET_SD_RX_CLK_I_0(x) ((x) << S_RESET_SD_RX_CLK_I_0)
+#define F_RESET_SD_RX_CLK_I_0 V_RESET_SD_RX_CLK_I_0(1U)
+
+#define S_RESET_SD_RX_CLK_I_1 27
+#define V_RESET_SD_RX_CLK_I_1(x) ((x) << S_RESET_SD_RX_CLK_I_1)
+#define F_RESET_SD_RX_CLK_I_1 V_RESET_SD_RX_CLK_I_1(1U)
+
+#define S_RESET_SD_RX_CLK_I_2 26
+#define V_RESET_SD_RX_CLK_I_2(x) ((x) << S_RESET_SD_RX_CLK_I_2)
+#define F_RESET_SD_RX_CLK_I_2 V_RESET_SD_RX_CLK_I_2(1U)
+
+#define S_RESET_SD_RX_CLK_I_3 25
+#define V_RESET_SD_RX_CLK_I_3(x) ((x) << S_RESET_SD_RX_CLK_I_3)
+#define F_RESET_SD_RX_CLK_I_3 V_RESET_SD_RX_CLK_I_3(1U)
+
+#define S_RESET_SD_RX_CLK_I_4 24
+#define V_RESET_SD_RX_CLK_I_4(x) ((x) << S_RESET_SD_RX_CLK_I_4)
+#define F_RESET_SD_RX_CLK_I_4 V_RESET_SD_RX_CLK_I_4(1U)
+
+#define S_RESET_SD_RX_CLK_I_5 23
+#define V_RESET_SD_RX_CLK_I_5(x) ((x) << S_RESET_SD_RX_CLK_I_5)
+#define F_RESET_SD_RX_CLK_I_5 V_RESET_SD_RX_CLK_I_5(1U)
+
+#define S_RESET_SD_RX_CLK_I_6 22
+#define V_RESET_SD_RX_CLK_I_6(x) ((x) << S_RESET_SD_RX_CLK_I_6)
+#define F_RESET_SD_RX_CLK_I_6 V_RESET_SD_RX_CLK_I_6(1U)
+
+#define S_RESET_SD_RX_CLK_I_7 21
+#define V_RESET_SD_RX_CLK_I_7(x) ((x) << S_RESET_SD_RX_CLK_I_7)
+#define F_RESET_SD_RX_CLK_I_7 V_RESET_SD_RX_CLK_I_7(1U)
+
+#define S_RESET_SD_TX_CLK_I_0 20
+#define V_RESET_SD_TX_CLK_I_0(x) ((x) << S_RESET_SD_TX_CLK_I_0)
+#define F_RESET_SD_TX_CLK_I_0 V_RESET_SD_TX_CLK_I_0(1U)
+
+#define S_RESET_SD_TX_CLK_I_1 19
+#define V_RESET_SD_TX_CLK_I_1(x) ((x) << S_RESET_SD_TX_CLK_I_1)
+#define F_RESET_SD_TX_CLK_I_1 V_RESET_SD_TX_CLK_I_1(1U)
+
+#define S_RESET_SD_TX_CLK_I_2 18
+#define V_RESET_SD_TX_CLK_I_2(x) ((x) << S_RESET_SD_TX_CLK_I_2)
+#define F_RESET_SD_TX_CLK_I_2 V_RESET_SD_TX_CLK_I_2(1U)
+
+#define S_RESET_SD_TX_CLK_I_3 17
+#define V_RESET_SD_TX_CLK_I_3(x) ((x) << S_RESET_SD_TX_CLK_I_3)
+#define F_RESET_SD_TX_CLK_I_3 V_RESET_SD_TX_CLK_I_3(1U)
+
+#define S_RESET_SD_TX_CLK_I_4 16
+#define V_RESET_SD_TX_CLK_I_4(x) ((x) << S_RESET_SD_TX_CLK_I_4)
+#define F_RESET_SD_TX_CLK_I_4 V_RESET_SD_TX_CLK_I_4(1U)
+
+#define S_RESET_SD_TX_CLK_I_5 15
+#define V_RESET_SD_TX_CLK_I_5(x) ((x) << S_RESET_SD_TX_CLK_I_5)
+#define F_RESET_SD_TX_CLK_I_5 V_RESET_SD_TX_CLK_I_5(1U)
+
+#define S_RESET_SD_TX_CLK_I_6 14
+#define V_RESET_SD_TX_CLK_I_6(x) ((x) << S_RESET_SD_TX_CLK_I_6)
+#define F_RESET_SD_TX_CLK_I_6 V_RESET_SD_TX_CLK_I_6(1U)
+
+#define S_RESET_SD_TX_CLK_I_7 13
+#define V_RESET_SD_TX_CLK_I_7(x) ((x) << S_RESET_SD_TX_CLK_I_7)
+#define F_RESET_SD_TX_CLK_I_7 V_RESET_SD_TX_CLK_I_7(1U)
+
+#define S_RESET_XPCS_REF_CLK_I_0 12
+#define V_RESET_XPCS_REF_CLK_I_0(x) ((x) << S_RESET_XPCS_REF_CLK_I_0)
+#define F_RESET_XPCS_REF_CLK_I_0 V_RESET_XPCS_REF_CLK_I_0(1U)
+
+#define S_RESET_XPCS_REF_CLK_I_1 11
+#define V_RESET_XPCS_REF_CLK_I_1(x) ((x) << S_RESET_XPCS_REF_CLK_I_1)
+#define F_RESET_XPCS_REF_CLK_I_1 V_RESET_XPCS_REF_CLK_I_1(1U)
+
+#define S_RESET_FF_RX_CLK_0_I 9
+#define V_RESET_FF_RX_CLK_0_I(x) ((x) << S_RESET_FF_RX_CLK_0_I)
+#define F_RESET_FF_RX_CLK_0_I V_RESET_FF_RX_CLK_0_I(1U)
+
+#define S_RESET_FF_TX_CLK_0_I 8
+#define V_RESET_FF_TX_CLK_0_I(x) ((x) << S_RESET_FF_TX_CLK_0_I)
+#define F_RESET_FF_TX_CLK_0_I V_RESET_FF_TX_CLK_0_I(1U)
+
+#define S_RESET_RXCLK_0_I 7
+#define V_RESET_RXCLK_0_I(x) ((x) << S_RESET_RXCLK_0_I)
+#define F_RESET_RXCLK_0_I V_RESET_RXCLK_0_I(1U)
+
+#define S_RESET_TXCLK_0_I 6
+#define V_RESET_TXCLK_0_I(x) ((x) << S_RESET_TXCLK_0_I)
+#define F_RESET_TXCLK_0_I V_RESET_TXCLK_0_I(1U)
+
+#define S_RESET_FF_RX_CLK_1_I 5
+#define V_RESET_FF_RX_CLK_1_I(x) ((x) << S_RESET_FF_RX_CLK_1_I)
+#define F_RESET_FF_RX_CLK_1_I V_RESET_FF_RX_CLK_1_I(1U)
+
+#define S_RESET_FF_TX_CLK_1_I 4
+#define V_RESET_FF_TX_CLK_1_I(x) ((x) << S_RESET_FF_TX_CLK_1_I)
+#define F_RESET_FF_TX_CLK_1_I V_RESET_FF_TX_CLK_1_I(1U)
+
+#define S_RESET_RXCLK_1_I 3
+#define V_RESET_RXCLK_1_I(x) ((x) << S_RESET_RXCLK_1_I)
+#define F_RESET_RXCLK_1_I V_RESET_RXCLK_1_I(1U)
+
+#define S_RESET_TXCLK_1_I 2
+#define V_RESET_TXCLK_1_I(x) ((x) << S_RESET_TXCLK_1_I)
+#define F_RESET_TXCLK_1_I V_RESET_TXCLK_1_I(1U)
+
+#define S_XGMII_CLK_RESET_0 0
+#define V_XGMII_CLK_RESET_0(x) ((x) << S_XGMII_CLK_RESET_0)
+#define F_XGMII_CLK_RESET_0 V_XGMII_CLK_RESET_0(1U)
+
+#define A_MAC_MTIP_RESET_CTRL_1 0x38008
+
+#define S_RESET_FF_RX_CLK_2_I 31
+#define V_RESET_FF_RX_CLK_2_I(x) ((x) << S_RESET_FF_RX_CLK_2_I)
+#define F_RESET_FF_RX_CLK_2_I V_RESET_FF_RX_CLK_2_I(1U)
+
+#define S_RESET_FF_TX_CLK_2_I 30
+#define V_RESET_FF_TX_CLK_2_I(x) ((x) << S_RESET_FF_TX_CLK_2_I)
+#define F_RESET_FF_TX_CLK_2_I V_RESET_FF_TX_CLK_2_I(1U)
+
+#define S_RESET_RXCLK_2_I 29
+#define V_RESET_RXCLK_2_I(x) ((x) << S_RESET_RXCLK_2_I)
+#define F_RESET_RXCLK_2_I V_RESET_RXCLK_2_I(1U)
+
+#define S_RESET_TXCLK_2_I 28
+#define V_RESET_TXCLK_2_I(x) ((x) << S_RESET_TXCLK_2_I)
+#define F_RESET_TXCLK_2_I V_RESET_TXCLK_2_I(1U)
+
+#define S_RESET_FF_RX_CLK_3_I 27
+#define V_RESET_FF_RX_CLK_3_I(x) ((x) << S_RESET_FF_RX_CLK_3_I)
+#define F_RESET_FF_RX_CLK_3_I V_RESET_FF_RX_CLK_3_I(1U)
+
+#define S_RESET_FF_TX_CLK_3_I 26
+#define V_RESET_FF_TX_CLK_3_I(x) ((x) << S_RESET_FF_TX_CLK_3_I)
+#define F_RESET_FF_TX_CLK_3_I V_RESET_FF_TX_CLK_3_I(1U)
+
+#define S_RESET_RXCLK_3_I 25
+#define V_RESET_RXCLK_3_I(x) ((x) << S_RESET_RXCLK_3_I)
+#define F_RESET_RXCLK_3_I V_RESET_RXCLK_3_I(1U)
+
+#define S_RESET_TXCLK_3_I 24
+#define V_RESET_TXCLK_3_I(x) ((x) << S_RESET_TXCLK_3_I)
+#define F_RESET_TXCLK_3_I V_RESET_TXCLK_3_I(1U)
+
+#define S_RESET_FF_RX_CLK_4_I 23
+#define V_RESET_FF_RX_CLK_4_I(x) ((x) << S_RESET_FF_RX_CLK_4_I)
+#define F_RESET_FF_RX_CLK_4_I V_RESET_FF_RX_CLK_4_I(1U)
+
+#define S_RESET_FF_TX_CLK_4_I 22
+#define V_RESET_FF_TX_CLK_4_I(x) ((x) << S_RESET_FF_TX_CLK_4_I)
+#define F_RESET_FF_TX_CLK_4_I V_RESET_FF_TX_CLK_4_I(1U)
+
+#define S_RESET_RXCLK_4_I 21
+#define V_RESET_RXCLK_4_I(x) ((x) << S_RESET_RXCLK_4_I)
+#define F_RESET_RXCLK_4_I V_RESET_RXCLK_4_I(1U)
+
+#define S_RESET_TXCLK_4_I 20
+#define V_RESET_TXCLK_4_I(x) ((x) << S_RESET_TXCLK_4_I)
+#define F_RESET_TXCLK_4_I V_RESET_TXCLK_4_I(1U)
+
+#define S_RESET_FF_RX_CLK_5_I 19
+#define V_RESET_FF_RX_CLK_5_I(x) ((x) << S_RESET_FF_RX_CLK_5_I)
+#define F_RESET_FF_RX_CLK_5_I V_RESET_FF_RX_CLK_5_I(1U)
+
+#define S_RESET_FF_TX_CLK_5_I 18
+#define V_RESET_FF_TX_CLK_5_I(x) ((x) << S_RESET_FF_TX_CLK_5_I)
+#define F_RESET_FF_TX_CLK_5_I V_RESET_FF_TX_CLK_5_I(1U)
+
+#define S_RESET_RXCLK_5_I 17
+#define V_RESET_RXCLK_5_I(x) ((x) << S_RESET_RXCLK_5_I)
+#define F_RESET_RXCLK_5_I V_RESET_RXCLK_5_I(1U)
+
+#define S_RESET_TXCLK_5_I 16
+#define V_RESET_TXCLK_5_I(x) ((x) << S_RESET_TXCLK_5_I)
+#define F_RESET_TXCLK_5_I V_RESET_TXCLK_5_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_0_I 15
+#define V_RESET_SD_RX_CLK_AN_0_I(x) ((x) << S_RESET_SD_RX_CLK_AN_0_I)
+#define F_RESET_SD_RX_CLK_AN_0_I V_RESET_SD_RX_CLK_AN_0_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_0_I 14
+#define V_RESET_SD_TX_CLK_AN_0_I(x) ((x) << S_RESET_SD_TX_CLK_AN_0_I)
+#define F_RESET_SD_TX_CLK_AN_0_I V_RESET_SD_TX_CLK_AN_0_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_1_I 13
+#define V_RESET_SD_RX_CLK_AN_1_I(x) ((x) << S_RESET_SD_RX_CLK_AN_1_I)
+#define F_RESET_SD_RX_CLK_AN_1_I V_RESET_SD_RX_CLK_AN_1_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_1_I 12
+#define V_RESET_SD_TX_CLK_AN_1_I(x) ((x) << S_RESET_SD_TX_CLK_AN_1_I)
+#define F_RESET_SD_TX_CLK_AN_1_I V_RESET_SD_TX_CLK_AN_1_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_2_I 11
+#define V_RESET_SD_RX_CLK_AN_2_I(x) ((x) << S_RESET_SD_RX_CLK_AN_2_I)
+#define F_RESET_SD_RX_CLK_AN_2_I V_RESET_SD_RX_CLK_AN_2_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_2_I 10
+#define V_RESET_SD_TX_CLK_AN_2_I(x) ((x) << S_RESET_SD_TX_CLK_AN_2_I)
+#define F_RESET_SD_TX_CLK_AN_2_I V_RESET_SD_TX_CLK_AN_2_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_3_I 9
+#define V_RESET_SD_RX_CLK_AN_3_I(x) ((x) << S_RESET_SD_RX_CLK_AN_3_I)
+#define F_RESET_SD_RX_CLK_AN_3_I V_RESET_SD_RX_CLK_AN_3_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_3_I 8
+#define V_RESET_SD_TX_CLK_AN_3_I(x) ((x) << S_RESET_SD_TX_CLK_AN_3_I)
+#define F_RESET_SD_TX_CLK_AN_3_I V_RESET_SD_TX_CLK_AN_3_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_4_I 7
+#define V_RESET_SD_RX_CLK_AN_4_I(x) ((x) << S_RESET_SD_RX_CLK_AN_4_I)
+#define F_RESET_SD_RX_CLK_AN_4_I V_RESET_SD_RX_CLK_AN_4_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_4_I 6
+#define V_RESET_SD_TX_CLK_AN_4_I(x) ((x) << S_RESET_SD_TX_CLK_AN_4_I)
+#define F_RESET_SD_TX_CLK_AN_4_I V_RESET_SD_TX_CLK_AN_4_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_5_I 5
+#define V_RESET_SD_RX_CLK_AN_5_I(x) ((x) << S_RESET_SD_RX_CLK_AN_5_I)
+#define F_RESET_SD_RX_CLK_AN_5_I V_RESET_SD_RX_CLK_AN_5_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_5_I 4
+#define V_RESET_SD_TX_CLK_AN_5_I(x) ((x) << S_RESET_SD_TX_CLK_AN_5_I)
+#define F_RESET_SD_TX_CLK_AN_5_I V_RESET_SD_TX_CLK_AN_5_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_6_I 3
+#define V_RESET_SD_RX_CLK_AN_6_I(x) ((x) << S_RESET_SD_RX_CLK_AN_6_I)
+#define F_RESET_SD_RX_CLK_AN_6_I V_RESET_SD_RX_CLK_AN_6_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_6_I 2
+#define V_RESET_SD_TX_CLK_AN_6_I(x) ((x) << S_RESET_SD_TX_CLK_AN_6_I)
+#define F_RESET_SD_TX_CLK_AN_6_I V_RESET_SD_TX_CLK_AN_6_I(1U)
+
+#define S_RESET_SD_RX_CLK_AN_7_I 1
+#define V_RESET_SD_RX_CLK_AN_7_I(x) ((x) << S_RESET_SD_RX_CLK_AN_7_I)
+#define F_RESET_SD_RX_CLK_AN_7_I V_RESET_SD_RX_CLK_AN_7_I(1U)
+
+#define S_RESET_SD_TX_CLK_AN_7_I 0
+#define V_RESET_SD_TX_CLK_AN_7_I(x) ((x) << S_RESET_SD_TX_CLK_AN_7_I)
+#define F_RESET_SD_TX_CLK_AN_7_I V_RESET_SD_TX_CLK_AN_7_I(1U)
+
+#define A_MAC_MTIP_RESET_CTRL_2 0x3800c
+
+#define S_RESET_SGMII_TXCLK_I_3 31
+#define V_RESET_SGMII_TXCLK_I_3(x) ((x) << S_RESET_SGMII_TXCLK_I_3)
+#define F_RESET_SGMII_TXCLK_I_3 V_RESET_SGMII_TXCLK_I_3(1U)
+
+#define S_RESET_SGMII_RXCLK_I_3 30
+#define V_RESET_SGMII_RXCLK_I_3(x) ((x) << S_RESET_SGMII_RXCLK_I_3)
+#define F_RESET_SGMII_RXCLK_I_3 V_RESET_SGMII_RXCLK_I_3(1U)
+
+#define S_RESET_SGMII_TXCLK_I_2 29
+#define V_RESET_SGMII_TXCLK_I_2(x) ((x) << S_RESET_SGMII_TXCLK_I_2)
+#define F_RESET_SGMII_TXCLK_I_2 V_RESET_SGMII_TXCLK_I_2(1U)
+
+#define S_RESET_SGMII_RXCLK_I_2 28
+#define V_RESET_SGMII_RXCLK_I_2(x) ((x) << S_RESET_SGMII_RXCLK_I_2)
+#define F_RESET_SGMII_RXCLK_I_2 V_RESET_SGMII_RXCLK_I_2(1U)
+
+#define S_RESET_SGMII_TXCLK_I_1 27
+#define V_RESET_SGMII_TXCLK_I_1(x) ((x) << S_RESET_SGMII_TXCLK_I_1)
+#define F_RESET_SGMII_TXCLK_I_1 V_RESET_SGMII_TXCLK_I_1(1U)
+
+#define S_RESET_SGMII_RXCLK_I_1 26
+#define V_RESET_SGMII_RXCLK_I_1(x) ((x) << S_RESET_SGMII_RXCLK_I_1)
+#define F_RESET_SGMII_RXCLK_I_1 V_RESET_SGMII_RXCLK_I_1(1U)
+
+#define S_RESET_SGMII_TXCLK_I_0 25
+#define V_RESET_SGMII_TXCLK_I_0(x) ((x) << S_RESET_SGMII_TXCLK_I_0)
+#define F_RESET_SGMII_TXCLK_I_0 V_RESET_SGMII_TXCLK_I_0(1U)
+
+#define S_RESET_SGMII_RXCLK_I_0 24
+#define V_RESET_SGMII_RXCLK_I_0(x) ((x) << S_RESET_SGMII_RXCLK_I_0)
+#define F_RESET_SGMII_RXCLK_I_0 V_RESET_SGMII_RXCLK_I_0(1U)
+
+#define S_MTIPSD7TXRST 23
+#define V_MTIPSD7TXRST(x) ((x) << S_MTIPSD7TXRST)
+#define F_MTIPSD7TXRST V_MTIPSD7TXRST(1U)
+
+#define S_MTIPSD6TXRST 22
+#define V_MTIPSD6TXRST(x) ((x) << S_MTIPSD6TXRST)
+#define F_MTIPSD6TXRST V_MTIPSD6TXRST(1U)
+
+#define S_MTIPSD5TXRST 21
+#define V_MTIPSD5TXRST(x) ((x) << S_MTIPSD5TXRST)
+#define F_MTIPSD5TXRST V_MTIPSD5TXRST(1U)
+
+#define S_MTIPSD4TXRST 20
+#define V_MTIPSD4TXRST(x) ((x) << S_MTIPSD4TXRST)
+#define F_MTIPSD4TXRST V_MTIPSD4TXRST(1U)
+
+#define S_T7_MTIPSD3TXRST 19
+#define V_T7_MTIPSD3TXRST(x) ((x) << S_T7_MTIPSD3TXRST)
+#define F_T7_MTIPSD3TXRST V_T7_MTIPSD3TXRST(1U)
+
+#define S_T7_MTIPSD2TXRST 18
+#define V_T7_MTIPSD2TXRST(x) ((x) << S_T7_MTIPSD2TXRST)
+#define F_T7_MTIPSD2TXRST V_T7_MTIPSD2TXRST(1U)
+
+#define S_T7_MTIPSD1TXRST 17
+#define V_T7_MTIPSD1TXRST(x) ((x) << S_T7_MTIPSD1TXRST)
+#define F_T7_MTIPSD1TXRST V_T7_MTIPSD1TXRST(1U)
+
+#define S_T7_MTIPSD0TXRST 16
+#define V_T7_MTIPSD0TXRST(x) ((x) << S_T7_MTIPSD0TXRST)
+#define F_T7_MTIPSD0TXRST V_T7_MTIPSD0TXRST(1U)
+
+#define S_MTIPSD7RXRST 15
+#define V_MTIPSD7RXRST(x) ((x) << S_MTIPSD7RXRST)
+#define F_MTIPSD7RXRST V_MTIPSD7RXRST(1U)
+
+#define S_MTIPSD6RXRST 14
+#define V_MTIPSD6RXRST(x) ((x) << S_MTIPSD6RXRST)
+#define F_MTIPSD6RXRST V_MTIPSD6RXRST(1U)
+
+#define S_MTIPSD5RXRST 13
+#define V_MTIPSD5RXRST(x) ((x) << S_MTIPSD5RXRST)
+#define F_MTIPSD5RXRST V_MTIPSD5RXRST(1U)
+
+#define S_MTIPSD4RXRST 12
+#define V_MTIPSD4RXRST(x) ((x) << S_MTIPSD4RXRST)
+#define F_MTIPSD4RXRST V_MTIPSD4RXRST(1U)
+
+#define S_T7_MTIPSD3RXRST 11
+#define V_T7_MTIPSD3RXRST(x) ((x) << S_T7_MTIPSD3RXRST)
+#define F_T7_MTIPSD3RXRST V_T7_MTIPSD3RXRST(1U)
+
+#define S_T7_MTIPSD2RXRST 10
+#define V_T7_MTIPSD2RXRST(x) ((x) << S_T7_MTIPSD2RXRST)
+#define F_T7_MTIPSD2RXRST V_T7_MTIPSD2RXRST(1U)
+
+#define S_T7_MTIPSD1RXRST 9
+#define V_T7_MTIPSD1RXRST(x) ((x) << S_T7_MTIPSD1RXRST)
+#define F_T7_MTIPSD1RXRST V_T7_MTIPSD1RXRST(1U)
+
+#define S_T7_MTIPSD0RXRST 8
+#define V_T7_MTIPSD0RXRST(x) ((x) << S_T7_MTIPSD0RXRST)
+#define F_T7_MTIPSD0RXRST V_T7_MTIPSD0RXRST(1U)
+
+#define S_RESET_REG_CLK_AN_0_I 7
+#define V_RESET_REG_CLK_AN_0_I(x) ((x) << S_RESET_REG_CLK_AN_0_I)
+#define F_RESET_REG_CLK_AN_0_I V_RESET_REG_CLK_AN_0_I(1U)
+
+#define S_RESET_REG_CLK_AN_1_I 6
+#define V_RESET_REG_CLK_AN_1_I(x) ((x) << S_RESET_REG_CLK_AN_1_I)
+#define F_RESET_REG_CLK_AN_1_I V_RESET_REG_CLK_AN_1_I(1U)
+
+#define S_RESET_REG_CLK_AN_2_I 5
+#define V_RESET_REG_CLK_AN_2_I(x) ((x) << S_RESET_REG_CLK_AN_2_I)
+#define F_RESET_REG_CLK_AN_2_I V_RESET_REG_CLK_AN_2_I(1U)
+
+#define S_RESET_REG_CLK_AN_3_I 4
+#define V_RESET_REG_CLK_AN_3_I(x) ((x) << S_RESET_REG_CLK_AN_3_I)
+#define F_RESET_REG_CLK_AN_3_I V_RESET_REG_CLK_AN_3_I(1U)
+
+#define S_RESET_REG_CLK_AN_4_I 3
+#define V_RESET_REG_CLK_AN_4_I(x) ((x) << S_RESET_REG_CLK_AN_4_I)
+#define F_RESET_REG_CLK_AN_4_I V_RESET_REG_CLK_AN_4_I(1U)
+
+#define S_RESET_REG_CLK_AN_5_I 2
+#define V_RESET_REG_CLK_AN_5_I(x) ((x) << S_RESET_REG_CLK_AN_5_I)
+#define F_RESET_REG_CLK_AN_5_I V_RESET_REG_CLK_AN_5_I(1U)
+
+#define S_RESET_REG_CLK_AN_6_I 1
+#define V_RESET_REG_CLK_AN_6_I(x) ((x) << S_RESET_REG_CLK_AN_6_I)
+#define F_RESET_REG_CLK_AN_6_I V_RESET_REG_CLK_AN_6_I(1U)
+
+#define S_RESET_REG_CLK_AN_7_I 0
+#define V_RESET_REG_CLK_AN_7_I(x) ((x) << S_RESET_REG_CLK_AN_7_I)
+#define F_RESET_REG_CLK_AN_7_I V_RESET_REG_CLK_AN_7_I(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_0 0x38010
+
+#define S_F91_REF_CLK_I_G 31
+#define V_F91_REF_CLK_I_G(x) ((x) << S_F91_REF_CLK_I_G)
+#define F_F91_REF_CLK_I_G V_F91_REF_CLK_I_G(1U)
+
+#define S_PCS000_REF_CLK_I_G 30
+#define V_PCS000_REF_CLK_I_G(x) ((x) << S_PCS000_REF_CLK_I_G)
+#define F_PCS000_REF_CLK_I_G V_PCS000_REF_CLK_I_G(1U)
+
+#define S_REF_CLK_I_G 29
+#define V_REF_CLK_I_G(x) ((x) << S_REF_CLK_I_G)
+#define F_REF_CLK_I_G V_REF_CLK_I_G(1U)
+
+#define S_SD_RX_CLK_I_0_G 28
+#define V_SD_RX_CLK_I_0_G(x) ((x) << S_SD_RX_CLK_I_0_G)
+#define F_SD_RX_CLK_I_0_G V_SD_RX_CLK_I_0_G(1U)
+
+#define S_SD_RX_CLK_I_1_G 27
+#define V_SD_RX_CLK_I_1_G(x) ((x) << S_SD_RX_CLK_I_1_G)
+#define F_SD_RX_CLK_I_1_G V_SD_RX_CLK_I_1_G(1U)
+
+#define S_SD_RX_CLK_I_2_G 26
+#define V_SD_RX_CLK_I_2_G(x) ((x) << S_SD_RX_CLK_I_2_G)
+#define F_SD_RX_CLK_I_2_G V_SD_RX_CLK_I_2_G(1U)
+
+#define S_SD_RX_CLK_I_3_G 25
+#define V_SD_RX_CLK_I_3_G(x) ((x) << S_SD_RX_CLK_I_3_G)
+#define F_SD_RX_CLK_I_3_G V_SD_RX_CLK_I_3_G(1U)
+
+#define S_SD_RX_CLK_I_4_G 24
+#define V_SD_RX_CLK_I_4_G(x) ((x) << S_SD_RX_CLK_I_4_G)
+#define F_SD_RX_CLK_I_4_G V_SD_RX_CLK_I_4_G(1U)
+
+#define S_SD_RX_CLK_I_5_G 23
+#define V_SD_RX_CLK_I_5_G(x) ((x) << S_SD_RX_CLK_I_5_G)
+#define F_SD_RX_CLK_I_5_G V_SD_RX_CLK_I_5_G(1U)
+
+#define S_SD_RX_CLK_I_6_G 22
+#define V_SD_RX_CLK_I_6_G(x) ((x) << S_SD_RX_CLK_I_6_G)
+#define F_SD_RX_CLK_I_6_G V_SD_RX_CLK_I_6_G(1U)
+
+#define S_SD_RX_CLK_I_7_G 21
+#define V_SD_RX_CLK_I_7_G(x) ((x) << S_SD_RX_CLK_I_7_G)
+#define F_SD_RX_CLK_I_7_G V_SD_RX_CLK_I_7_G(1U)
+
+#define S_SD_TX_CLK_I_0_G 20
+#define V_SD_TX_CLK_I_0_G(x) ((x) << S_SD_TX_CLK_I_0_G)
+#define F_SD_TX_CLK_I_0_G V_SD_TX_CLK_I_0_G(1U)
+
+#define S_SD_TX_CLK_I_1_G 19
+#define V_SD_TX_CLK_I_1_G(x) ((x) << S_SD_TX_CLK_I_1_G)
+#define F_SD_TX_CLK_I_1_G V_SD_TX_CLK_I_1_G(1U)
+
+#define S_SD_TX_CLK_I_2_G 18
+#define V_SD_TX_CLK_I_2_G(x) ((x) << S_SD_TX_CLK_I_2_G)
+#define F_SD_TX_CLK_I_2_G V_SD_TX_CLK_I_2_G(1U)
+
+#define S_SD_TX_CLK_I_3_G 17
+#define V_SD_TX_CLK_I_3_G(x) ((x) << S_SD_TX_CLK_I_3_G)
+#define F_SD_TX_CLK_I_3_G V_SD_TX_CLK_I_3_G(1U)
+
+#define S_SD_TX_CLK_I_4_G 16
+#define V_SD_TX_CLK_I_4_G(x) ((x) << S_SD_TX_CLK_I_4_G)
+#define F_SD_TX_CLK_I_4_G V_SD_TX_CLK_I_4_G(1U)
+
+#define S_SD_TX_CLK_I_5_G 15
+#define V_SD_TX_CLK_I_5_G(x) ((x) << S_SD_TX_CLK_I_5_G)
+#define F_SD_TX_CLK_I_5_G V_SD_TX_CLK_I_5_G(1U)
+
+#define S_SD_TX_CLK_I_6_G 14
+#define V_SD_TX_CLK_I_6_G(x) ((x) << S_SD_TX_CLK_I_6_G)
+#define F_SD_TX_CLK_I_6_G V_SD_TX_CLK_I_6_G(1U)
+
+#define S_SD_TX_CLK_I_7_G 13
+#define V_SD_TX_CLK_I_7_G(x) ((x) << S_SD_TX_CLK_I_7_G)
+#define F_SD_TX_CLK_I_7_G V_SD_TX_CLK_I_7_G(1U)
+
+#define S_XPCS_REF_CLK_I_0_G 12
+#define V_XPCS_REF_CLK_I_0_G(x) ((x) << S_XPCS_REF_CLK_I_0_G)
+#define F_XPCS_REF_CLK_I_0_G V_XPCS_REF_CLK_I_0_G(1U)
+
+#define S_XPCS_REF_CLK_I_1_G 11
+#define V_XPCS_REF_CLK_I_1_G(x) ((x) << S_XPCS_REF_CLK_I_1_G)
+#define F_XPCS_REF_CLK_I_1_G V_XPCS_REF_CLK_I_1_G(1U)
+
+#define S_REG_CLK_I_G 10
+#define V_REG_CLK_I_G(x) ((x) << S_REG_CLK_I_G)
+#define F_REG_CLK_I_G V_REG_CLK_I_G(1U)
+
+#define S_FF_RX_CLK_0_I_G 9
+#define V_FF_RX_CLK_0_I_G(x) ((x) << S_FF_RX_CLK_0_I_G)
+#define F_FF_RX_CLK_0_I_G V_FF_RX_CLK_0_I_G(1U)
+
+#define S_FF_TX_CLK_0_I_G 8
+#define V_FF_TX_CLK_0_I_G(x) ((x) << S_FF_TX_CLK_0_I_G)
+#define F_FF_TX_CLK_0_I_G V_FF_TX_CLK_0_I_G(1U)
+
+#define S_RXCLK_0_I_G 7
+#define V_RXCLK_0_I_G(x) ((x) << S_RXCLK_0_I_G)
+#define F_RXCLK_0_I_G V_RXCLK_0_I_G(1U)
+
+#define S_TXCLK_0_I_G 6
+#define V_TXCLK_0_I_G(x) ((x) << S_TXCLK_0_I_G)
+#define F_TXCLK_0_I_G V_TXCLK_0_I_G(1U)
+
+#define S_FF_RX_CLK_1_I_G 5
+#define V_FF_RX_CLK_1_I_G(x) ((x) << S_FF_RX_CLK_1_I_G)
+#define F_FF_RX_CLK_1_I_G V_FF_RX_CLK_1_I_G(1U)
+
+#define S_FF_TX_CLK_1_I_G 4
+#define V_FF_TX_CLK_1_I_G(x) ((x) << S_FF_TX_CLK_1_I_G)
+#define F_FF_TX_CLK_1_I_G V_FF_TX_CLK_1_I_G(1U)
+
+#define S_RXCLK_1_I_G 3
+#define V_RXCLK_1_I_G(x) ((x) << S_RXCLK_1_I_G)
+#define F_RXCLK_1_I_G V_RXCLK_1_I_G(1U)
+
+#define S_TXCLK_1_I_G 2
+#define V_TXCLK_1_I_G(x) ((x) << S_TXCLK_1_I_G)
+#define F_TXCLK_1_I_G V_TXCLK_1_I_G(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_1 0x38014
+
+#define S_FF_RX_CLK_2_I_G 31
+#define V_FF_RX_CLK_2_I_G(x) ((x) << S_FF_RX_CLK_2_I_G)
+#define F_FF_RX_CLK_2_I_G V_FF_RX_CLK_2_I_G(1U)
+
+#define S_FF_TX_CLK_2_I_G 30
+#define V_FF_TX_CLK_2_I_G(x) ((x) << S_FF_TX_CLK_2_I_G)
+#define F_FF_TX_CLK_2_I_G V_FF_TX_CLK_2_I_G(1U)
+
+#define S_RXCLK_2_I_G 29
+#define V_RXCLK_2_I_G(x) ((x) << S_RXCLK_2_I_G)
+#define F_RXCLK_2_I_G V_RXCLK_2_I_G(1U)
+
+#define S_TXCLK_2_I_G 28
+#define V_TXCLK_2_I_G(x) ((x) << S_TXCLK_2_I_G)
+#define F_TXCLK_2_I_G V_TXCLK_2_I_G(1U)
+
+#define S_FF_RX_CLK_3_I_G 27
+#define V_FF_RX_CLK_3_I_G(x) ((x) << S_FF_RX_CLK_3_I_G)
+#define F_FF_RX_CLK_3_I_G V_FF_RX_CLK_3_I_G(1U)
+
+#define S_FF_TX_CLK_3_I_G 26
+#define V_FF_TX_CLK_3_I_G(x) ((x) << S_FF_TX_CLK_3_I_G)
+#define F_FF_TX_CLK_3_I_G V_FF_TX_CLK_3_I_G(1U)
+
+#define S_RXCLK_3_I_G 25
+#define V_RXCLK_3_I_G(x) ((x) << S_RXCLK_3_I_G)
+#define F_RXCLK_3_I_G V_RXCLK_3_I_G(1U)
+
+#define S_TXCLK_3_I_G 24
+#define V_TXCLK_3_I_G(x) ((x) << S_TXCLK_3_I_G)
+#define F_TXCLK_3_I_G V_TXCLK_3_I_G(1U)
+
+#define S_FF_RX_CLK_4_I_G 23
+#define V_FF_RX_CLK_4_I_G(x) ((x) << S_FF_RX_CLK_4_I_G)
+#define F_FF_RX_CLK_4_I_G V_FF_RX_CLK_4_I_G(1U)
+
+#define S_FF_TX_CLK_4_I_G 22
+#define V_FF_TX_CLK_4_I_G(x) ((x) << S_FF_TX_CLK_4_I_G)
+#define F_FF_TX_CLK_4_I_G V_FF_TX_CLK_4_I_G(1U)
+
+#define S_RXCLK_4_I_G 21
+#define V_RXCLK_4_I_G(x) ((x) << S_RXCLK_4_I_G)
+#define F_RXCLK_4_I_G V_RXCLK_4_I_G(1U)
+
+#define S_TXCLK_4_I_G 20
+#define V_TXCLK_4_I_G(x) ((x) << S_TXCLK_4_I_G)
+#define F_TXCLK_4_I_G V_TXCLK_4_I_G(1U)
+
+#define S_FF_RX_CLK_5_I_G 19
+#define V_FF_RX_CLK_5_I_G(x) ((x) << S_FF_RX_CLK_5_I_G)
+#define F_FF_RX_CLK_5_I_G V_FF_RX_CLK_5_I_G(1U)
+
+#define S_FF_TX_CLK_5_I_G 18
+#define V_FF_TX_CLK_5_I_G(x) ((x) << S_FF_TX_CLK_5_I_G)
+#define F_FF_TX_CLK_5_I_G V_FF_TX_CLK_5_I_G(1U)
+
+#define S_RXCLK_5_I_G 17
+#define V_RXCLK_5_I_G(x) ((x) << S_RXCLK_5_I_G)
+#define F_RXCLK_5_I_G V_RXCLK_5_I_G(1U)
+
+#define S_TXCLK_5_I_G 16
+#define V_TXCLK_5_I_G(x) ((x) << S_TXCLK_5_I_G)
+#define F_TXCLK_5_I_G V_TXCLK_5_I_G(1U)
+
+#define S_SD_RX_CLK_AN_0_I_G 15
+#define V_SD_RX_CLK_AN_0_I_G(x) ((x) << S_SD_RX_CLK_AN_0_I_G)
+#define F_SD_RX_CLK_AN_0_I_G V_SD_RX_CLK_AN_0_I_G(1U)
+
+#define S_SD_TX_CLK_AN_0_I_G 14
+#define V_SD_TX_CLK_AN_0_I_G(x) ((x) << S_SD_TX_CLK_AN_0_I_G)
+#define F_SD_TX_CLK_AN_0_I_G V_SD_TX_CLK_AN_0_I_G(1U)
+
+#define S_SD_RX_CLK_AN_1_I_G 13
+#define V_SD_RX_CLK_AN_1_I_G(x) ((x) << S_SD_RX_CLK_AN_1_I_G)
+#define F_SD_RX_CLK_AN_1_I_G V_SD_RX_CLK_AN_1_I_G(1U)
+
+#define S_SD_TX_CLK_AN_1_I_G 12
+#define V_SD_TX_CLK_AN_1_I_G(x) ((x) << S_SD_TX_CLK_AN_1_I_G)
+#define F_SD_TX_CLK_AN_1_I_G V_SD_TX_CLK_AN_1_I_G(1U)
+
+#define S_SD_RX_CLK_AN_2_I_G 11
+#define V_SD_RX_CLK_AN_2_I_G(x) ((x) << S_SD_RX_CLK_AN_2_I_G)
+#define F_SD_RX_CLK_AN_2_I_G V_SD_RX_CLK_AN_2_I_G(1U)
+
+#define S_SD_TX_CLK_AN_2_I_G 10
+#define V_SD_TX_CLK_AN_2_I_G(x) ((x) << S_SD_TX_CLK_AN_2_I_G)
+#define F_SD_TX_CLK_AN_2_I_G V_SD_TX_CLK_AN_2_I_G(1U)
+
+#define S_SD_RX_CLK_AN_3_I_G 9
+#define V_SD_RX_CLK_AN_3_I_G(x) ((x) << S_SD_RX_CLK_AN_3_I_G)
+#define F_SD_RX_CLK_AN_3_I_G V_SD_RX_CLK_AN_3_I_G(1U)
+
+#define S_SD_TX_CLK_AN_3_I_G 8
+#define V_SD_TX_CLK_AN_3_I_G(x) ((x) << S_SD_TX_CLK_AN_3_I_G)
+#define F_SD_TX_CLK_AN_3_I_G V_SD_TX_CLK_AN_3_I_G(1U)
+
+#define S_SD_RX_CLK_AN_4_I_G 7
+#define V_SD_RX_CLK_AN_4_I_G(x) ((x) << S_SD_RX_CLK_AN_4_I_G)
+#define F_SD_RX_CLK_AN_4_I_G V_SD_RX_CLK_AN_4_I_G(1U)
+
+#define S_SD_TX_CLK_AN_4_I_G 6
+#define V_SD_TX_CLK_AN_4_I_G(x) ((x) << S_SD_TX_CLK_AN_4_I_G)
+#define F_SD_TX_CLK_AN_4_I_G V_SD_TX_CLK_AN_4_I_G(1U)
+
+#define S_SD_RX_CLK_AN_5_I_G 5
+#define V_SD_RX_CLK_AN_5_I_G(x) ((x) << S_SD_RX_CLK_AN_5_I_G)
+#define F_SD_RX_CLK_AN_5_I_G V_SD_RX_CLK_AN_5_I_G(1U)
+
+#define S_SD_TX_CLK_AN_5_I_G 4
+#define V_SD_TX_CLK_AN_5_I_G(x) ((x) << S_SD_TX_CLK_AN_5_I_G)
+#define F_SD_TX_CLK_AN_5_I_G V_SD_TX_CLK_AN_5_I_G(1U)
+
+#define S_SD_RX_CLK_AN_6_I_G 3
+#define V_SD_RX_CLK_AN_6_I_G(x) ((x) << S_SD_RX_CLK_AN_6_I_G)
+#define F_SD_RX_CLK_AN_6_I_G V_SD_RX_CLK_AN_6_I_G(1U)
+
+#define S_SD_TX_CLK_AN_6_I_G 2
+#define V_SD_TX_CLK_AN_6_I_G(x) ((x) << S_SD_TX_CLK_AN_6_I_G)
+#define F_SD_TX_CLK_AN_6_I_G V_SD_TX_CLK_AN_6_I_G(1U)
+
+#define S_SD_RX_CLK_AN_7_I_G 1
+#define V_SD_RX_CLK_AN_7_I_G(x) ((x) << S_SD_RX_CLK_AN_7_I_G)
+#define F_SD_RX_CLK_AN_7_I_G V_SD_RX_CLK_AN_7_I_G(1U)
+
+#define S_SD_TX_CLK_AN_7_I_G 0
+#define V_SD_TX_CLK_AN_7_I_G(x) ((x) << S_SD_TX_CLK_AN_7_I_G)
+#define F_SD_TX_CLK_AN_7_I_G V_SD_TX_CLK_AN_7_I_G(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_2 0x38018
+
+#define S_SD_RX_CLK_0_G 31
+#define V_SD_RX_CLK_0_G(x) ((x) << S_SD_RX_CLK_0_G)
+#define F_SD_RX_CLK_0_G V_SD_RX_CLK_0_G(1U)
+
+#define S_SD_RX_CLK_1_G 30
+#define V_SD_RX_CLK_1_G(x) ((x) << S_SD_RX_CLK_1_G)
+#define F_SD_RX_CLK_1_G V_SD_RX_CLK_1_G(1U)
+
+#define S_SD_RX_CLK_2_G 29
+#define V_SD_RX_CLK_2_G(x) ((x) << S_SD_RX_CLK_2_G)
+#define F_SD_RX_CLK_2_G V_SD_RX_CLK_2_G(1U)
+
+#define S_SD_RX_CLK_3_G 28
+#define V_SD_RX_CLK_3_G(x) ((x) << S_SD_RX_CLK_3_G)
+#define F_SD_RX_CLK_3_G V_SD_RX_CLK_3_G(1U)
+
+#define S_SD_RX_CLK_4_G 27
+#define V_SD_RX_CLK_4_G(x) ((x) << S_SD_RX_CLK_4_G)
+#define F_SD_RX_CLK_4_G V_SD_RX_CLK_4_G(1U)
+
+#define S_SD_RX_CLK_5_G 26
+#define V_SD_RX_CLK_5_G(x) ((x) << S_SD_RX_CLK_5_G)
+#define F_SD_RX_CLK_5_G V_SD_RX_CLK_5_G(1U)
+
+#define S_SD_RX_CLK_6_G 25
+#define V_SD_RX_CLK_6_G(x) ((x) << S_SD_RX_CLK_6_G)
+#define F_SD_RX_CLK_6_G V_SD_RX_CLK_6_G(1U)
+
+#define S_SD_RX_CLK_7_G 24
+#define V_SD_RX_CLK_7_G(x) ((x) << S_SD_RX_CLK_7_G)
+#define F_SD_RX_CLK_7_G V_SD_RX_CLK_7_G(1U)
+
+#define S_SD_TX_CLK_0_G 23
+#define V_SD_TX_CLK_0_G(x) ((x) << S_SD_TX_CLK_0_G)
+#define F_SD_TX_CLK_0_G V_SD_TX_CLK_0_G(1U)
+
+#define S_SD_TX_CLK_1_G 22
+#define V_SD_TX_CLK_1_G(x) ((x) << S_SD_TX_CLK_1_G)
+#define F_SD_TX_CLK_1_G V_SD_TX_CLK_1_G(1U)
+
+#define S_SD_TX_CLK_2_G 21
+#define V_SD_TX_CLK_2_G(x) ((x) << S_SD_TX_CLK_2_G)
+#define F_SD_TX_CLK_2_G V_SD_TX_CLK_2_G(1U)
+
+#define S_SD_TX_CLK_3_G 20
+#define V_SD_TX_CLK_3_G(x) ((x) << S_SD_TX_CLK_3_G)
+#define F_SD_TX_CLK_3_G V_SD_TX_CLK_3_G(1U)
+
+#define S_SD_TX_CLK_4_G 19
+#define V_SD_TX_CLK_4_G(x) ((x) << S_SD_TX_CLK_4_G)
+#define F_SD_TX_CLK_4_G V_SD_TX_CLK_4_G(1U)
+
+#define S_SD_TX_CLK_5_G 18
+#define V_SD_TX_CLK_5_G(x) ((x) << S_SD_TX_CLK_5_G)
+#define F_SD_TX_CLK_5_G V_SD_TX_CLK_5_G(1U)
+
+#define S_SD_TX_CLK_6_G 17
+#define V_SD_TX_CLK_6_G(x) ((x) << S_SD_TX_CLK_6_G)
+#define F_SD_TX_CLK_6_G V_SD_TX_CLK_6_G(1U)
+
+#define S_SD_TX_CLK_7_G 16
+#define V_SD_TX_CLK_7_G(x) ((x) << S_SD_TX_CLK_7_G)
+#define F_SD_TX_CLK_7_G V_SD_TX_CLK_7_G(1U)
+
+#define S_SD_RX_CLK_AEC_0_G 15
+#define V_SD_RX_CLK_AEC_0_G(x) ((x) << S_SD_RX_CLK_AEC_0_G)
+#define F_SD_RX_CLK_AEC_0_G V_SD_RX_CLK_AEC_0_G(1U)
+
+#define S_SD_RX_CLK_AEC_1_G 14
+#define V_SD_RX_CLK_AEC_1_G(x) ((x) << S_SD_RX_CLK_AEC_1_G)
+#define F_SD_RX_CLK_AEC_1_G V_SD_RX_CLK_AEC_1_G(1U)
+
+#define S_SD_RX_CLK_AEC_2_G 13
+#define V_SD_RX_CLK_AEC_2_G(x) ((x) << S_SD_RX_CLK_AEC_2_G)
+#define F_SD_RX_CLK_AEC_2_G V_SD_RX_CLK_AEC_2_G(1U)
+
+#define S_SD_RX_CLK_AEC_3_G 12
+#define V_SD_RX_CLK_AEC_3_G(x) ((x) << S_SD_RX_CLK_AEC_3_G)
+#define F_SD_RX_CLK_AEC_3_G V_SD_RX_CLK_AEC_3_G(1U)
+
+#define S_SD_RX_CLK_AEC_4_G 11
+#define V_SD_RX_CLK_AEC_4_G(x) ((x) << S_SD_RX_CLK_AEC_4_G)
+#define F_SD_RX_CLK_AEC_4_G V_SD_RX_CLK_AEC_4_G(1U)
+
+#define S_SD_RX_CLK_AEC_5_G 10
+#define V_SD_RX_CLK_AEC_5_G(x) ((x) << S_SD_RX_CLK_AEC_5_G)
+#define F_SD_RX_CLK_AEC_5_G V_SD_RX_CLK_AEC_5_G(1U)
+
+#define S_SD_RX_CLK_AEC_6_G 9
+#define V_SD_RX_CLK_AEC_6_G(x) ((x) << S_SD_RX_CLK_AEC_6_G)
+#define F_SD_RX_CLK_AEC_6_G V_SD_RX_CLK_AEC_6_G(1U)
+
+#define S_SD_RX_CLK_AEC_7_G 8
+#define V_SD_RX_CLK_AEC_7_G(x) ((x) << S_SD_RX_CLK_AEC_7_G)
+#define F_SD_RX_CLK_AEC_7_G V_SD_RX_CLK_AEC_7_G(1U)
+
+#define S_SD_TX_CLK_AEC_0_G 7
+#define V_SD_TX_CLK_AEC_0_G(x) ((x) << S_SD_TX_CLK_AEC_0_G)
+#define F_SD_TX_CLK_AEC_0_G V_SD_TX_CLK_AEC_0_G(1U)
+
+#define S_SD_TX_CLK_AEC_1_G 6
+#define V_SD_TX_CLK_AEC_1_G(x) ((x) << S_SD_TX_CLK_AEC_1_G)
+#define F_SD_TX_CLK_AEC_1_G V_SD_TX_CLK_AEC_1_G(1U)
+
+#define S_SD_TX_CLK_AEC_2_G 5
+#define V_SD_TX_CLK_AEC_2_G(x) ((x) << S_SD_TX_CLK_AEC_2_G)
+#define F_SD_TX_CLK_AEC_2_G V_SD_TX_CLK_AEC_2_G(1U)
+
+#define S_SD_TX_CLK_AEC_3_G 4
+#define V_SD_TX_CLK_AEC_3_G(x) ((x) << S_SD_TX_CLK_AEC_3_G)
+#define F_SD_TX_CLK_AEC_3_G V_SD_TX_CLK_AEC_3_G(1U)
+
+#define S_SD_TX_CLK_AEC_4_G 3
+#define V_SD_TX_CLK_AEC_4_G(x) ((x) << S_SD_TX_CLK_AEC_4_G)
+#define F_SD_TX_CLK_AEC_4_G V_SD_TX_CLK_AEC_4_G(1U)
+
+#define S_SD_TX_CLK_AEC_5_G 2
+#define V_SD_TX_CLK_AEC_5_G(x) ((x) << S_SD_TX_CLK_AEC_5_G)
+#define F_SD_TX_CLK_AEC_5_G V_SD_TX_CLK_AEC_5_G(1U)
+
+#define S_SD_TX_CLK_AEC_6_G 1
+#define V_SD_TX_CLK_AEC_6_G(x) ((x) << S_SD_TX_CLK_AEC_6_G)
+#define F_SD_TX_CLK_AEC_6_G V_SD_TX_CLK_AEC_6_G(1U)
+
+#define S_SD_TX_CLK_AEC_7_G 0
+#define V_SD_TX_CLK_AEC_7_G(x) ((x) << S_SD_TX_CLK_AEC_7_G)
+#define F_SD_TX_CLK_AEC_7_G V_SD_TX_CLK_AEC_7_G(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_3 0x3801c
+
+#define S_PCS_RX_CLK_0_G 31
+#define V_PCS_RX_CLK_0_G(x) ((x) << S_PCS_RX_CLK_0_G)
+#define F_PCS_RX_CLK_0_G V_PCS_RX_CLK_0_G(1U)
+
+#define S_PCS_RX_CLK_1_G 30
+#define V_PCS_RX_CLK_1_G(x) ((x) << S_PCS_RX_CLK_1_G)
+#define F_PCS_RX_CLK_1_G V_PCS_RX_CLK_1_G(1U)
+
+#define S_PCS_RX_CLK_2_G 29
+#define V_PCS_RX_CLK_2_G(x) ((x) << S_PCS_RX_CLK_2_G)
+#define F_PCS_RX_CLK_2_G V_PCS_RX_CLK_2_G(1U)
+
+#define S_PCS_RX_CLK_3_G 28
+#define V_PCS_RX_CLK_3_G(x) ((x) << S_PCS_RX_CLK_3_G)
+#define F_PCS_RX_CLK_3_G V_PCS_RX_CLK_3_G(1U)
+
+#define S_PCS_RX_CLK_4_G 27
+#define V_PCS_RX_CLK_4_G(x) ((x) << S_PCS_RX_CLK_4_G)
+#define F_PCS_RX_CLK_4_G V_PCS_RX_CLK_4_G(1U)
+
+#define S_PCS_RX_CLK_5_G 26
+#define V_PCS_RX_CLK_5_G(x) ((x) << S_PCS_RX_CLK_5_G)
+#define F_PCS_RX_CLK_5_G V_PCS_RX_CLK_5_G(1U)
+
+#define S_PCS_RX_CLK_6_G 25
+#define V_PCS_RX_CLK_6_G(x) ((x) << S_PCS_RX_CLK_6_G)
+#define F_PCS_RX_CLK_6_G V_PCS_RX_CLK_6_G(1U)
+
+#define S_PCS_RX_CLK_7_G 24
+#define V_PCS_RX_CLK_7_G(x) ((x) << S_PCS_RX_CLK_7_G)
+#define F_PCS_RX_CLK_7_G V_PCS_RX_CLK_7_G(1U)
+
+#define S_PCS_TX_CLK_0_G 23
+#define V_PCS_TX_CLK_0_G(x) ((x) << S_PCS_TX_CLK_0_G)
+#define F_PCS_TX_CLK_0_G V_PCS_TX_CLK_0_G(1U)
+
+#define S_PCS_TX_CLK_1_G 22
+#define V_PCS_TX_CLK_1_G(x) ((x) << S_PCS_TX_CLK_1_G)
+#define F_PCS_TX_CLK_1_G V_PCS_TX_CLK_1_G(1U)
+
+#define S_PCS_TX_CLK_2_G 21
+#define V_PCS_TX_CLK_2_G(x) ((x) << S_PCS_TX_CLK_2_G)
+#define F_PCS_TX_CLK_2_G V_PCS_TX_CLK_2_G(1U)
+
+#define S_PCS_TX_CLK_3_G 20
+#define V_PCS_TX_CLK_3_G(x) ((x) << S_PCS_TX_CLK_3_G)
+#define F_PCS_TX_CLK_3_G V_PCS_TX_CLK_3_G(1U)
+
+#define S_PCS_TX_CLK_4_G 19
+#define V_PCS_TX_CLK_4_G(x) ((x) << S_PCS_TX_CLK_4_G)
+#define F_PCS_TX_CLK_4_G V_PCS_TX_CLK_4_G(1U)
+
+#define S_PCS_TX_CLK_5_G 18
+#define V_PCS_TX_CLK_5_G(x) ((x) << S_PCS_TX_CLK_5_G)
+#define F_PCS_TX_CLK_5_G V_PCS_TX_CLK_5_G(1U)
+
+#define S_PCS_TX_CLK_6_G 17
+#define V_PCS_TX_CLK_6_G(x) ((x) << S_PCS_TX_CLK_6_G)
+#define F_PCS_TX_CLK_6_G V_PCS_TX_CLK_6_G(1U)
+
+#define S_PCS_TX_CLK_7_G 16
+#define V_PCS_TX_CLK_7_G(x) ((x) << S_PCS_TX_CLK_7_G)
+#define F_PCS_TX_CLK_7_G V_PCS_TX_CLK_7_G(1U)
+
+#define S_SD_RX_CLK_EN_0 15
+#define V_SD_RX_CLK_EN_0(x) ((x) << S_SD_RX_CLK_EN_0)
+#define F_SD_RX_CLK_EN_0 V_SD_RX_CLK_EN_0(1U)
+
+#define S_SD_RX_CLK_EN_1 14
+#define V_SD_RX_CLK_EN_1(x) ((x) << S_SD_RX_CLK_EN_1)
+#define F_SD_RX_CLK_EN_1 V_SD_RX_CLK_EN_1(1U)
+
+#define S_SD_RX_CLK_EN_2 13
+#define V_SD_RX_CLK_EN_2(x) ((x) << S_SD_RX_CLK_EN_2)
+#define F_SD_RX_CLK_EN_2 V_SD_RX_CLK_EN_2(1U)
+
+#define S_SD_RX_CLK_EN_3 12
+#define V_SD_RX_CLK_EN_3(x) ((x) << S_SD_RX_CLK_EN_3)
+#define F_SD_RX_CLK_EN_3 V_SD_RX_CLK_EN_3(1U)
+
+#define S_SD_RX_CLK_EN_4 11
+#define V_SD_RX_CLK_EN_4(x) ((x) << S_SD_RX_CLK_EN_4)
+#define F_SD_RX_CLK_EN_4 V_SD_RX_CLK_EN_4(1U)
+
+#define S_SD_RX_CLK_EN_5 10
+#define V_SD_RX_CLK_EN_5(x) ((x) << S_SD_RX_CLK_EN_5)
+#define F_SD_RX_CLK_EN_5 V_SD_RX_CLK_EN_5(1U)
+
+#define S_SD_RX_CLK_EN_6 9
+#define V_SD_RX_CLK_EN_6(x) ((x) << S_SD_RX_CLK_EN_6)
+#define F_SD_RX_CLK_EN_6 V_SD_RX_CLK_EN_6(1U)
+
+#define S_SD_RX_CLK_EN_7 8
+#define V_SD_RX_CLK_EN_7(x) ((x) << S_SD_RX_CLK_EN_7)
+#define F_SD_RX_CLK_EN_7 V_SD_RX_CLK_EN_7(1U)
+
+#define S_SD_TX_CLK_EN_0 7
+#define V_SD_TX_CLK_EN_0(x) ((x) << S_SD_TX_CLK_EN_0)
+#define F_SD_TX_CLK_EN_0 V_SD_TX_CLK_EN_0(1U)
+
+#define S_SD_TX_CLK_EN_1 6
+#define V_SD_TX_CLK_EN_1(x) ((x) << S_SD_TX_CLK_EN_1)
+#define F_SD_TX_CLK_EN_1 V_SD_TX_CLK_EN_1(1U)
+
+#define S_SD_TX_CLK_EN_2 5
+#define V_SD_TX_CLK_EN_2(x) ((x) << S_SD_TX_CLK_EN_2)
+#define F_SD_TX_CLK_EN_2 V_SD_TX_CLK_EN_2(1U)
+
+#define S_SD_TX_CLK_EN_3 4
+#define V_SD_TX_CLK_EN_3(x) ((x) << S_SD_TX_CLK_EN_3)
+#define F_SD_TX_CLK_EN_3 V_SD_TX_CLK_EN_3(1U)
+
+#define S_SD_TX_CLK_EN_4 3
+#define V_SD_TX_CLK_EN_4(x) ((x) << S_SD_TX_CLK_EN_4)
+#define F_SD_TX_CLK_EN_4 V_SD_TX_CLK_EN_4(1U)
+
+#define S_SD_TX_CLK_EN_5 2
+#define V_SD_TX_CLK_EN_5(x) ((x) << S_SD_TX_CLK_EN_5)
+#define F_SD_TX_CLK_EN_5 V_SD_TX_CLK_EN_5(1U)
+
+#define S_SD_TX_CLK_EN_6 1
+#define V_SD_TX_CLK_EN_6(x) ((x) << S_SD_TX_CLK_EN_6)
+#define F_SD_TX_CLK_EN_6 V_SD_TX_CLK_EN_6(1U)
+
+#define S_SD_TX_CLK_EN_7 0
+#define V_SD_TX_CLK_EN_7(x) ((x) << S_SD_TX_CLK_EN_7)
+#define F_SD_TX_CLK_EN_7 V_SD_TX_CLK_EN_7(1U)
+
+#define A_MAC_MTIP_CLK_CTRL_4 0x38020
+
+#define S_SGMII_TX_CLK_0_G 7
+#define V_SGMII_TX_CLK_0_G(x) ((x) << S_SGMII_TX_CLK_0_G)
+#define F_SGMII_TX_CLK_0_G V_SGMII_TX_CLK_0_G(1U)
+
+#define S_SGMII_TX_CLK_1_G 6
+#define V_SGMII_TX_CLK_1_G(x) ((x) << S_SGMII_TX_CLK_1_G)
+#define F_SGMII_TX_CLK_1_G V_SGMII_TX_CLK_1_G(1U)
+
+#define S_SGMII_TX_CLK_2_G 5
+#define V_SGMII_TX_CLK_2_G(x) ((x) << S_SGMII_TX_CLK_2_G)
+#define F_SGMII_TX_CLK_2_G V_SGMII_TX_CLK_2_G(1U)
+
+#define S_SGMII_TX_CLK_3_G 4
+#define V_SGMII_TX_CLK_3_G(x) ((x) << S_SGMII_TX_CLK_3_G)
+#define F_SGMII_TX_CLK_3_G V_SGMII_TX_CLK_3_G(1U)
+
+#define S_SGMII_RX_CLK_0_G 3
+#define V_SGMII_RX_CLK_0_G(x) ((x) << S_SGMII_RX_CLK_0_G)
+#define F_SGMII_RX_CLK_0_G V_SGMII_RX_CLK_0_G(1U)
+
+#define S_SGMII_RX_CLK_1_G 2
+#define V_SGMII_RX_CLK_1_G(x) ((x) << S_SGMII_RX_CLK_1_G)
+#define F_SGMII_RX_CLK_1_G V_SGMII_RX_CLK_1_G(1U)
+
+#define S_SGMII_RX_CLK_2_G 1
+#define V_SGMII_RX_CLK_2_G(x) ((x) << S_SGMII_RX_CLK_2_G)
+#define F_SGMII_RX_CLK_2_G V_SGMII_RX_CLK_2_G(1U)
+
+#define S_SGMII_RX_CLK_3_G 0
+#define V_SGMII_RX_CLK_3_G(x) ((x) << S_SGMII_RX_CLK_3_G)
+#define F_SGMII_RX_CLK_3_G V_SGMII_RX_CLK_3_G(1U)
+
+#define A_MAC_PCS_CONFIG_0 0x38024
+
+#define S_KP_MODE_IN 24
+#define M_KP_MODE_IN 0xffU
+#define V_KP_MODE_IN(x) ((x) << S_KP_MODE_IN)
+#define G_KP_MODE_IN(x) (((x) >> S_KP_MODE_IN) & M_KP_MODE_IN)
+
+#define S_FEC91_ENA_IN 16
+#define M_FEC91_ENA_IN 0xffU
+#define V_FEC91_ENA_IN(x) ((x) << S_FEC91_ENA_IN)
+#define G_FEC91_ENA_IN(x) (((x) >> S_FEC91_ENA_IN) & M_FEC91_ENA_IN)
+
+#define S_SD_8X 8
+#define M_SD_8X 0xffU
+#define V_SD_8X(x) ((x) << S_SD_8X)
+#define G_SD_8X(x) (((x) >> S_SD_8X) & M_SD_8X)
+
+#define S_SD_N2 0
+#define M_SD_N2 0xffU
+#define V_SD_N2(x) ((x) << S_SD_N2)
+#define G_SD_N2(x) (((x) >> S_SD_N2) & M_SD_N2)
+
+#define A_MAC_PCS_CONFIG_1 0x38028
+
+#define S_FAST_1LANE_MODE 24
+#define M_FAST_1LANE_MODE 0xffU
+#define V_FAST_1LANE_MODE(x) ((x) << S_FAST_1LANE_MODE)
+#define G_FAST_1LANE_MODE(x) (((x) >> S_FAST_1LANE_MODE) & M_FAST_1LANE_MODE)
+
+#define S_PACER_10G 16
+#define M_PACER_10G 0xffU
+#define V_PACER_10G(x) ((x) << S_PACER_10G)
+#define G_PACER_10G(x) (((x) >> S_PACER_10G) & M_PACER_10G)
+
+#define S_PCS400_ENA_IN 14
+#define M_PCS400_ENA_IN 0x3U
+#define V_PCS400_ENA_IN(x) ((x) << S_PCS400_ENA_IN)
+#define G_PCS400_ENA_IN(x) (((x) >> S_PCS400_ENA_IN) & M_PCS400_ENA_IN)
+
+#define S_MODE40_ENA_IN4 13
+#define V_MODE40_ENA_IN4(x) ((x) << S_MODE40_ENA_IN4)
+#define F_MODE40_ENA_IN4 V_MODE40_ENA_IN4(1U)
+
+#define S_MODE40_ENA_IN0 12
+#define V_MODE40_ENA_IN0(x) ((x) << S_MODE40_ENA_IN0)
+#define F_MODE40_ENA_IN0 V_MODE40_ENA_IN0(1U)
+
+#define S_PCS100_ENA_IN6 11
+#define V_PCS100_ENA_IN6(x) ((x) << S_PCS100_ENA_IN6)
+#define F_PCS100_ENA_IN6 V_PCS100_ENA_IN6(1U)
+
+#define S_PCS100_ENA_IN4 10
+#define V_PCS100_ENA_IN4(x) ((x) << S_PCS100_ENA_IN4)
+#define F_PCS100_ENA_IN4 V_PCS100_ENA_IN4(1U)
+
+#define S_PCS100_ENA_IN2 9
+#define V_PCS100_ENA_IN2(x) ((x) << S_PCS100_ENA_IN2)
+#define F_PCS100_ENA_IN2 V_PCS100_ENA_IN2(1U)
+
+#define S_PCS100_ENA_IN0 8
+#define V_PCS100_ENA_IN0(x) ((x) << S_PCS100_ENA_IN0)
+#define F_PCS100_ENA_IN0 V_PCS100_ENA_IN0(1U)
+
+#define S_RXLAUI_ENA_IN6 7
+#define V_RXLAUI_ENA_IN6(x) ((x) << S_RXLAUI_ENA_IN6)
+#define F_RXLAUI_ENA_IN6 V_RXLAUI_ENA_IN6(1U)
+
+#define S_RXLAUI_ENA_IN4 6
+#define V_RXLAUI_ENA_IN4(x) ((x) << S_RXLAUI_ENA_IN4)
+#define F_RXLAUI_ENA_IN4 V_RXLAUI_ENA_IN4(1U)
+
+#define S_RXLAUI_ENA_IN2 5
+#define V_RXLAUI_ENA_IN2(x) ((x) << S_RXLAUI_ENA_IN2)
+#define F_RXLAUI_ENA_IN2 V_RXLAUI_ENA_IN2(1U)
+
+#define S_RXLAUI_ENA_IN0 4
+#define V_RXLAUI_ENA_IN0(x) ((x) << S_RXLAUI_ENA_IN0)
+#define F_RXLAUI_ENA_IN0 V_RXLAUI_ENA_IN0(1U)
+
+#define S_FEC91_LANE_IN6 3
+#define V_FEC91_LANE_IN6(x) ((x) << S_FEC91_LANE_IN6)
+#define F_FEC91_LANE_IN6 V_FEC91_LANE_IN6(1U)
+
+#define S_FEC91_LANE_IN4 2
+#define V_FEC91_LANE_IN4(x) ((x) << S_FEC91_LANE_IN4)
+#define F_FEC91_LANE_IN4 V_FEC91_LANE_IN4(1U)
+
+#define S_FEC91_LANE_IN2 1
+#define V_FEC91_LANE_IN2(x) ((x) << S_FEC91_LANE_IN2)
+#define F_FEC91_LANE_IN2 V_FEC91_LANE_IN2(1U)
+
+#define S_FEC91_LANE_IN0 0
+#define V_FEC91_LANE_IN0(x) ((x) << S_FEC91_LANE_IN0)
+#define F_FEC91_LANE_IN0 V_FEC91_LANE_IN0(1U)
+
+#define A_MAC_PCS_CONFIG_2 0x3802c
+
+#define S_SGPCS_EN_3 29
+#define V_SGPCS_EN_3(x) ((x) << S_SGPCS_EN_3)
+#define F_SGPCS_EN_3 V_SGPCS_EN_3(1U)
+
+#define S_SGPCS_EN_2 28
+#define V_SGPCS_EN_2(x) ((x) << S_SGPCS_EN_2)
+#define F_SGPCS_EN_2 V_SGPCS_EN_2(1U)
+
+#define S_SGPCS_EN_1 27
+#define V_SGPCS_EN_1(x) ((x) << S_SGPCS_EN_1)
+#define F_SGPCS_EN_1 V_SGPCS_EN_1(1U)
+
+#define S_SGPCS_EN_0 26
+#define V_SGPCS_EN_0(x) ((x) << S_SGPCS_EN_0)
+#define F_SGPCS_EN_0 V_SGPCS_EN_0(1U)
+
+#define S_CFG_CLOCK_RATE 22
+#define M_CFG_CLOCK_RATE 0xfU
+#define V_CFG_CLOCK_RATE(x) ((x) << S_CFG_CLOCK_RATE)
+#define G_CFG_CLOCK_RATE(x) (((x) >> S_CFG_CLOCK_RATE) & M_CFG_CLOCK_RATE)
+
+#define S_FEC_ERR_ENA 14
+#define M_FEC_ERR_ENA 0xffU
+#define V_FEC_ERR_ENA(x) ((x) << S_FEC_ERR_ENA)
+#define G_FEC_ERR_ENA(x) (((x) >> S_FEC_ERR_ENA) & M_FEC_ERR_ENA)
+
+#define S_FEC_ENA 6
+#define M_FEC_ENA 0xffU
+#define V_FEC_ENA(x) ((x) << S_FEC_ENA)
+#define G_FEC_ENA(x) (((x) >> S_FEC_ENA) & M_FEC_ENA)
+
+#define S_PCS001_TX_AM_SF 3
+#define M_PCS001_TX_AM_SF 0x7U
+#define V_PCS001_TX_AM_SF(x) ((x) << S_PCS001_TX_AM_SF)
+#define G_PCS001_TX_AM_SF(x) (((x) >> S_PCS001_TX_AM_SF) & M_PCS001_TX_AM_SF)
+
+#define S_PCS000_TX_AM_SF 0
+#define M_PCS000_TX_AM_SF 0x7U
+#define V_PCS000_TX_AM_SF(x) ((x) << S_PCS000_TX_AM_SF)
+#define G_PCS000_TX_AM_SF(x) (((x) >> S_PCS000_TX_AM_SF) & M_PCS000_TX_AM_SF)
+
+#define A_MAC_PCS_STATUS_0 0x38030
+
+#define S_PCS000_ALIGN_LOCK 30
+#define M_PCS000_ALIGN_LOCK 0x3U
+#define V_PCS000_ALIGN_LOCK(x) ((x) << S_PCS000_ALIGN_LOCK)
+#define G_PCS000_ALIGN_LOCK(x) (((x) >> S_PCS000_ALIGN_LOCK) & M_PCS000_ALIGN_LOCK)
+
+#define S_PCS000_HI_SER 28
+#define M_PCS000_HI_SER 0x3U
+#define V_PCS000_HI_SER(x) ((x) << S_PCS000_HI_SER)
+#define G_PCS000_HI_SER(x) (((x) >> S_PCS000_HI_SER) & M_PCS000_HI_SER)
+
+#define S_BER_TIMER_DONE 20
+#define M_BER_TIMER_DONE 0xffU
+#define V_BER_TIMER_DONE(x) ((x) << S_BER_TIMER_DONE)
+#define G_BER_TIMER_DONE(x) (((x) >> S_BER_TIMER_DONE) & M_BER_TIMER_DONE)
+
+#define S_T7_AMPS_LOCK 4
+#define M_T7_AMPS_LOCK 0xffffU
+#define V_T7_AMPS_LOCK(x) ((x) << S_T7_AMPS_LOCK)
+#define G_T7_AMPS_LOCK(x) (((x) >> S_T7_AMPS_LOCK) & M_T7_AMPS_LOCK)
+
+#define S_T7_ALIGN_DONE 0
+#define M_T7_ALIGN_DONE 0xfU
+#define V_T7_ALIGN_DONE(x) ((x) << S_T7_ALIGN_DONE)
+#define G_T7_ALIGN_DONE(x) (((x) >> S_T7_ALIGN_DONE) & M_T7_ALIGN_DONE)
+
+#define A_MAC_PCS_STATUS_1 0x38034
+#define A_MAC_PCS_STATUS_2 0x38038
+
+#define S_RSFEC_ALIGNED 24
+#define M_RSFEC_ALIGNED 0xffU
+#define V_RSFEC_ALIGNED(x) ((x) << S_RSFEC_ALIGNED)
+#define G_RSFEC_ALIGNED(x) (((x) >> S_RSFEC_ALIGNED) & M_RSFEC_ALIGNED)
+
+#define S_T7_FEC_LOCKED 8
+#define M_T7_FEC_LOCKED 0xffffU
+#define V_T7_FEC_LOCKED(x) ((x) << S_T7_FEC_LOCKED)
+#define G_T7_FEC_LOCKED(x) (((x) >> S_T7_FEC_LOCKED) & M_T7_FEC_LOCKED)
+
+#define S_T7_BLOCK_LOCK 0
+#define M_T7_BLOCK_LOCK 0xffU
+#define V_T7_BLOCK_LOCK(x) ((x) << S_T7_BLOCK_LOCK)
+#define G_T7_BLOCK_LOCK(x) (((x) >> S_T7_BLOCK_LOCK) & M_T7_BLOCK_LOCK)
+
+#define A_MAC_PCS_STATUS_3 0x3803c
+
+#define S_FEC_NCERR 16
+#define M_FEC_NCERR 0xffffU
+#define V_FEC_NCERR(x) ((x) << S_FEC_NCERR)
+#define G_FEC_NCERR(x) (((x) >> S_FEC_NCERR) & M_FEC_NCERR)
+
+#define S_FEC_CERR 0
+#define M_FEC_CERR 0xffffU
+#define V_FEC_CERR(x) ((x) << S_FEC_CERR)
+#define G_FEC_CERR(x) (((x) >> S_FEC_CERR) & M_FEC_CERR)
+
+#define A_MAC_PCS_STATUS_4 0x38040
+
+#define S_MAC1_RES_SPEED 23
+#define M_MAC1_RES_SPEED 0xffU
+#define V_MAC1_RES_SPEED(x) ((x) << S_MAC1_RES_SPEED)
+#define G_MAC1_RES_SPEED(x) (((x) >> S_MAC1_RES_SPEED) & M_MAC1_RES_SPEED)
+
+#define S_MAC0_RES_SPEED 14
+#define M_MAC0_RES_SPEED 0xffU
+#define V_MAC0_RES_SPEED(x) ((x) << S_MAC0_RES_SPEED)
+#define G_MAC0_RES_SPEED(x) (((x) >> S_MAC0_RES_SPEED) & M_MAC0_RES_SPEED)
+
+#define S_PCS400_ENA_IN_REF 12
+#define M_PCS400_ENA_IN_REF 0x3U
+#define V_PCS400_ENA_IN_REF(x) ((x) << S_PCS400_ENA_IN_REF)
+#define G_PCS400_ENA_IN_REF(x) (((x) >> S_PCS400_ENA_IN_REF) & M_PCS400_ENA_IN_REF)
+
+#define S_PCS000_DEGRADE_SER 10
+#define M_PCS000_DEGRADE_SER 0x3U
+#define V_PCS000_DEGRADE_SER(x) ((x) << S_PCS000_DEGRADE_SER)
+#define G_PCS000_DEGRADE_SER(x) (((x) >> S_PCS000_DEGRADE_SER) & M_PCS000_DEGRADE_SER)
+
+#define S_P4X_SIGNAL_OK 8
+#define M_P4X_SIGNAL_OK 0x3U
+#define V_P4X_SIGNAL_OK(x) ((x) << S_P4X_SIGNAL_OK)
+#define G_P4X_SIGNAL_OK(x) (((x) >> S_P4X_SIGNAL_OK) & M_P4X_SIGNAL_OK)
+
+#define S_MODE200_IND_REF 7
+#define V_MODE200_IND_REF(x) ((x) << S_MODE200_IND_REF)
+#define F_MODE200_IND_REF V_MODE200_IND_REF(1U)
+
+#define S_MODE200_8X26_IND_REF 6
+#define V_MODE200_8X26_IND_REF(x) ((x) << S_MODE200_8X26_IND_REF)
+#define F_MODE200_8X26_IND_REF V_MODE200_8X26_IND_REF(1U)
+
+#define S_PCS001_RX_AM_SF 3
+#define M_PCS001_RX_AM_SF 0x7U
+#define V_PCS001_RX_AM_SF(x) ((x) << S_PCS001_RX_AM_SF)
+#define G_PCS001_RX_AM_SF(x) (((x) >> S_PCS001_RX_AM_SF) & M_PCS001_RX_AM_SF)
+
+#define S_PCS000_RX_AM_SF 0
+#define M_PCS000_RX_AM_SF 0x7U
+#define V_PCS000_RX_AM_SF(x) ((x) << S_PCS000_RX_AM_SF)
+#define G_PCS000_RX_AM_SF(x) (((x) >> S_PCS000_RX_AM_SF) & M_PCS000_RX_AM_SF)
+
+#define A_MAC_PCS_STATUS_5 0x38044
+
+#define S_MAC5_RES_SPEED 24
+#define M_MAC5_RES_SPEED 0xffU
+#define V_MAC5_RES_SPEED(x) ((x) << S_MAC5_RES_SPEED)
+#define G_MAC5_RES_SPEED(x) (((x) >> S_MAC5_RES_SPEED) & M_MAC5_RES_SPEED)
+
+#define S_MAC4_RES_SPEED 16
+#define M_MAC4_RES_SPEED 0xffU
+#define V_MAC4_RES_SPEED(x) ((x) << S_MAC4_RES_SPEED)
+#define G_MAC4_RES_SPEED(x) (((x) >> S_MAC4_RES_SPEED) & M_MAC4_RES_SPEED)
+
+#define S_MAC3_RES_SPEED 8
+#define M_MAC3_RES_SPEED 0xffU
+#define V_MAC3_RES_SPEED(x) ((x) << S_MAC3_RES_SPEED)
+#define G_MAC3_RES_SPEED(x) (((x) >> S_MAC3_RES_SPEED) & M_MAC3_RES_SPEED)
+
+#define S_MAC2_RES_SPEED 0
+#define M_MAC2_RES_SPEED 0xffU
+#define V_MAC2_RES_SPEED(x) ((x) << S_MAC2_RES_SPEED)
+#define G_MAC2_RES_SPEED(x) (((x) >> S_MAC2_RES_SPEED) & M_MAC2_RES_SPEED)
+
+#define A_MAC_PCS_STATUS_6 0x38048
+
+#define S_MARKER_INS_CNT_100_00 16
+#define M_MARKER_INS_CNT_100_00 0x7fffU
+#define V_MARKER_INS_CNT_100_00(x) ((x) << S_MARKER_INS_CNT_100_00)
+#define G_MARKER_INS_CNT_100_00(x) (((x) >> S_MARKER_INS_CNT_100_00) & M_MARKER_INS_CNT_100_00)
+
+#define S_MAC7_RES_SPEED 8
+#define M_MAC7_RES_SPEED 0xffU
+#define V_MAC7_RES_SPEED(x) ((x) << S_MAC7_RES_SPEED)
+#define G_MAC7_RES_SPEED(x) (((x) >> S_MAC7_RES_SPEED) & M_MAC7_RES_SPEED)
+
+#define S_MAC6_RES_SPEED 0
+#define M_MAC6_RES_SPEED 0xffU
+#define V_MAC6_RES_SPEED(x) ((x) << S_MAC6_RES_SPEED)
+#define G_MAC6_RES_SPEED(x) (((x) >> S_MAC6_RES_SPEED) & M_MAC6_RES_SPEED)
+
+#define A_MAC_PCS_STATUS_7 0x3804c
+
+#define S_PCS000_LINK_STATUS 30
+#define M_PCS000_LINK_STATUS 0x3U
+#define V_PCS000_LINK_STATUS(x) ((x) << S_PCS000_LINK_STATUS)
+#define G_PCS000_LINK_STATUS(x) (((x) >> S_PCS000_LINK_STATUS) & M_PCS000_LINK_STATUS)
+
+#define S_MARKER_INS_CNT_100_02 15
+#define M_MARKER_INS_CNT_100_02 0x7fffU
+#define V_MARKER_INS_CNT_100_02(x) ((x) << S_MARKER_INS_CNT_100_02)
+#define G_MARKER_INS_CNT_100_02(x) (((x) >> S_MARKER_INS_CNT_100_02) & M_MARKER_INS_CNT_100_02)
+
+#define S_MARKER_INS_CNT_100_01 0
+#define M_MARKER_INS_CNT_100_01 0x7fffU
+#define V_MARKER_INS_CNT_100_01(x) ((x) << S_MARKER_INS_CNT_100_01)
+#define G_MARKER_INS_CNT_100_01(x) (((x) >> S_MARKER_INS_CNT_100_01) & M_MARKER_INS_CNT_100_01)
+
+#define A_MAC_PCS_STATUS_8 0x38050
+
+#define S_MARKER_INS_CNT_25_1 15
+#define M_MARKER_INS_CNT_25_1 0xffffU
+#define V_MARKER_INS_CNT_25_1(x) ((x) << S_MARKER_INS_CNT_25_1)
+#define G_MARKER_INS_CNT_25_1(x) (((x) >> S_MARKER_INS_CNT_25_1) & M_MARKER_INS_CNT_25_1)
+
+#define S_MARKER_INS_CNT_100_03 0
+#define M_MARKER_INS_CNT_100_03 0x7fffU
+#define V_MARKER_INS_CNT_100_03(x) ((x) << S_MARKER_INS_CNT_100_03)
+#define G_MARKER_INS_CNT_100_03(x) (((x) >> S_MARKER_INS_CNT_100_03) & M_MARKER_INS_CNT_100_03)
+
+#define A_MAC_PCS_STATUS_9 0x38054
+
+#define S_MARKER_INS_CNT_25_5 16
+#define M_MARKER_INS_CNT_25_5 0xffffU
+#define V_MARKER_INS_CNT_25_5(x) ((x) << S_MARKER_INS_CNT_25_5)
+#define G_MARKER_INS_CNT_25_5(x) (((x) >> S_MARKER_INS_CNT_25_5) & M_MARKER_INS_CNT_25_5)
+
+#define S_MARKER_INS_CNT_25_3 0
+#define M_MARKER_INS_CNT_25_3 0xffffU
+#define V_MARKER_INS_CNT_25_3(x) ((x) << S_MARKER_INS_CNT_25_3)
+#define G_MARKER_INS_CNT_25_3(x) (((x) >> S_MARKER_INS_CNT_25_3) & M_MARKER_INS_CNT_25_3)
+
+#define A_MAC_PCS_STATUS_10 0x38058
+
+#define S_MARKER_INS_CNT_25_50_2 16
+#define M_MARKER_INS_CNT_25_50_2 0xffffU
+#define V_MARKER_INS_CNT_25_50_2(x) ((x) << S_MARKER_INS_CNT_25_50_2)
+#define G_MARKER_INS_CNT_25_50_2(x) (((x) >> S_MARKER_INS_CNT_25_50_2) & M_MARKER_INS_CNT_25_50_2)
+
+#define S_MARKER_INS_CNT_25_50_0 0
+#define M_MARKER_INS_CNT_25_50_0 0xffffU
+#define V_MARKER_INS_CNT_25_50_0(x) ((x) << S_MARKER_INS_CNT_25_50_0)
+#define G_MARKER_INS_CNT_25_50_0(x) (((x) >> S_MARKER_INS_CNT_25_50_0) & M_MARKER_INS_CNT_25_50_0)
+
+#define A_MAC_PCS_STATUS_11 0x3805c
+
+#define S_MARKER_INS_CNT_25_50_6 16
+#define M_MARKER_INS_CNT_25_50_6 0xffffU
+#define V_MARKER_INS_CNT_25_50_6(x) ((x) << S_MARKER_INS_CNT_25_50_6)
+#define G_MARKER_INS_CNT_25_50_6(x) (((x) >> S_MARKER_INS_CNT_25_50_6) & M_MARKER_INS_CNT_25_50_6)
+
+#define S_MARKER_INS_CNT_25_50_4 0
+#define M_MARKER_INS_CNT_25_50_4 0xffffU
+#define V_MARKER_INS_CNT_25_50_4(x) ((x) << S_MARKER_INS_CNT_25_50_4)
+#define G_MARKER_INS_CNT_25_50_4(x) (((x) >> S_MARKER_INS_CNT_25_50_4) & M_MARKER_INS_CNT_25_50_4)
+
+#define A_MAC_PCS_STATUS_12 0x38060
+
+#define S_T7_LINK_STATUS 24
+#define M_T7_LINK_STATUS 0xffU
+#define V_T7_LINK_STATUS(x) ((x) << S_T7_LINK_STATUS)
+#define G_T7_LINK_STATUS(x) (((x) >> S_T7_LINK_STATUS) & M_T7_LINK_STATUS)
+
+#define S_T7_HI_BER 16
+#define M_T7_HI_BER 0xffU
+#define V_T7_HI_BER(x) ((x) << S_T7_HI_BER)
+#define G_T7_HI_BER(x) (((x) >> S_T7_HI_BER) & M_T7_HI_BER)
+
+#define S_MARKER_INS_CNT_25_7 0
+#define M_MARKER_INS_CNT_25_7 0xffffU
+#define V_MARKER_INS_CNT_25_7(x) ((x) << S_MARKER_INS_CNT_25_7)
+#define G_MARKER_INS_CNT_25_7(x) (((x) >> S_MARKER_INS_CNT_25_7) & M_MARKER_INS_CNT_25_7)
+
+#define A_MAC_MAC200G400G_0_CONFIG_0 0x38064
+#define A_MAC_MAC200G400G_0_CONFIG_1 0x38068
+
+#define S_FF_TX_CRC_OVR 11
+#define V_FF_TX_CRC_OVR(x) ((x) << S_FF_TX_CRC_OVR)
+#define F_FF_TX_CRC_OVR V_FF_TX_CRC_OVR(1U)
+
+#define S_TX_SMHOLD 2
+#define V_TX_SMHOLD(x) ((x) << S_TX_SMHOLD)
+#define F_TX_SMHOLD V_TX_SMHOLD(1U)
+
+#define A_MAC_MAC200G400G_0_CONFIG_2 0x3806c
+#define A_MAC_MAC200G400G_0_CONFIG_3 0x38070
+#define A_MAC_MAC200G400G_0_CONFIG_4 0x38074
+
+#define S_FRC_DELTA 0
+#define M_FRC_DELTA 0xffffU
+#define V_FRC_DELTA(x) ((x) << S_FRC_DELTA)
+#define G_FRC_DELTA(x) (((x) >> S_FRC_DELTA) & M_FRC_DELTA)
+
+#define A_MAC_MAC200G400G_0_STATUS 0x38078
+
+#define S_T7_LOOP_ENA 4
+#define V_T7_LOOP_ENA(x) ((x) << S_T7_LOOP_ENA)
+#define F_T7_LOOP_ENA V_T7_LOOP_ENA(1U)
+
+#define S_T7_LOC_FAULT 3
+#define V_T7_LOC_FAULT(x) ((x) << S_T7_LOC_FAULT)
+#define F_T7_LOC_FAULT V_T7_LOC_FAULT(1U)
+
+#define S_FRM_DROP 2
+#define V_FRM_DROP(x) ((x) << S_FRM_DROP)
+#define F_FRM_DROP V_FRM_DROP(1U)
+
+#define S_FF_TX_CREDIT 1
+#define V_FF_TX_CREDIT(x) ((x) << S_FF_TX_CREDIT)
+#define F_FF_TX_CREDIT V_FF_TX_CREDIT(1U)
+
+#define A_MAC_MAC200G400G_1_CONFIG_0 0x3807c
+#define A_MAC_MAC200G400G_1_CONFIG_1 0x38080
+#define A_MAC_MAC200G400G_1_CONFIG_2 0x38084
+#define A_MAC_MAC200G400G_1_CONFIG_3 0x38088
+#define A_MAC_MAC200G400G_1_CONFIG_4 0x3808c
+#define A_MAC_MAC200G400G_1_STATUS 0x38090
+#define A_MAC_AN_CFG_0 0x38094
+
+#define S_T7_AN_DATA_CTL 24
+#define M_T7_AN_DATA_CTL 0xffU
+#define V_T7_AN_DATA_CTL(x) ((x) << S_T7_AN_DATA_CTL)
+#define G_T7_AN_DATA_CTL(x) (((x) >> S_T7_AN_DATA_CTL) & M_T7_AN_DATA_CTL)
+
+#define S_T7_AN_ENA 16
+#define M_T7_AN_ENA 0xffU
+#define V_T7_AN_ENA(x) ((x) << S_T7_AN_ENA)
+#define G_T7_AN_ENA(x) (((x) >> S_T7_AN_ENA) & M_T7_AN_ENA)
+
+#define A_MAC_AN_CFG_1 0x38098
+
+#define S_AN_DIS_TIMER_AN_7 7
+#define V_AN_DIS_TIMER_AN_7(x) ((x) << S_AN_DIS_TIMER_AN_7)
+#define F_AN_DIS_TIMER_AN_7 V_AN_DIS_TIMER_AN_7(1U)
+
+#define S_AN_DIS_TIMER_AN_6 6
+#define V_AN_DIS_TIMER_AN_6(x) ((x) << S_AN_DIS_TIMER_AN_6)
+#define F_AN_DIS_TIMER_AN_6 V_AN_DIS_TIMER_AN_6(1U)
+
+#define S_AN_DIS_TIMER_AN_5 5
+#define V_AN_DIS_TIMER_AN_5(x) ((x) << S_AN_DIS_TIMER_AN_5)
+#define F_AN_DIS_TIMER_AN_5 V_AN_DIS_TIMER_AN_5(1U)
+
+#define S_AN_DIS_TIMER_AN_4 4
+#define V_AN_DIS_TIMER_AN_4(x) ((x) << S_AN_DIS_TIMER_AN_4)
+#define F_AN_DIS_TIMER_AN_4 V_AN_DIS_TIMER_AN_4(1U)
+
+#define S_AN_DIS_TIMER_AN_3 3
+#define V_AN_DIS_TIMER_AN_3(x) ((x) << S_AN_DIS_TIMER_AN_3)
+#define F_AN_DIS_TIMER_AN_3 V_AN_DIS_TIMER_AN_3(1U)
+
+#define S_AN_DIS_TIMER_AN_2 2
+#define V_AN_DIS_TIMER_AN_2(x) ((x) << S_AN_DIS_TIMER_AN_2)
+#define F_AN_DIS_TIMER_AN_2 V_AN_DIS_TIMER_AN_2(1U)
+
+#define S_AN_DIS_TIMER_AN_1 1
+#define V_AN_DIS_TIMER_AN_1(x) ((x) << S_AN_DIS_TIMER_AN_1)
+#define F_AN_DIS_TIMER_AN_1 V_AN_DIS_TIMER_AN_1(1U)
+
+#define S_AN_DIS_TIMER_AN_0 0
+#define V_AN_DIS_TIMER_AN_0(x) ((x) << S_AN_DIS_TIMER_AN_0)
+#define F_AN_DIS_TIMER_AN_0 V_AN_DIS_TIMER_AN_0(1U)
+
+#define A_MAC_AN_SERDES25G_ENA 0x3809c
+
+#define S_AN_SD25_TX_ENA_7 15
+#define V_AN_SD25_TX_ENA_7(x) ((x) << S_AN_SD25_TX_ENA_7)
+#define F_AN_SD25_TX_ENA_7 V_AN_SD25_TX_ENA_7(1U)
+
+#define S_AN_SD25_TX_ENA_6 14
+#define V_AN_SD25_TX_ENA_6(x) ((x) << S_AN_SD25_TX_ENA_6)
+#define F_AN_SD25_TX_ENA_6 V_AN_SD25_TX_ENA_6(1U)
+
+#define S_AN_SD25_TX_ENA_5 13
+#define V_AN_SD25_TX_ENA_5(x) ((x) << S_AN_SD25_TX_ENA_5)
+#define F_AN_SD25_TX_ENA_5 V_AN_SD25_TX_ENA_5(1U)
+
+#define S_AN_SD25_TX_ENA_4 12
+#define V_AN_SD25_TX_ENA_4(x) ((x) << S_AN_SD25_TX_ENA_4)
+#define F_AN_SD25_TX_ENA_4 V_AN_SD25_TX_ENA_4(1U)
+
+#define S_AN_SD25_TX_ENA_3 11
+#define V_AN_SD25_TX_ENA_3(x) ((x) << S_AN_SD25_TX_ENA_3)
+#define F_AN_SD25_TX_ENA_3 V_AN_SD25_TX_ENA_3(1U)
+
+#define S_AN_SD25_TX_ENA_2 10
+#define V_AN_SD25_TX_ENA_2(x) ((x) << S_AN_SD25_TX_ENA_2)
+#define F_AN_SD25_TX_ENA_2 V_AN_SD25_TX_ENA_2(1U)
+
+#define S_AN_SD25_TX_ENA_1 9
+#define V_AN_SD25_TX_ENA_1(x) ((x) << S_AN_SD25_TX_ENA_1)
+#define F_AN_SD25_TX_ENA_1 V_AN_SD25_TX_ENA_1(1U)
+
+#define S_AN_SD25_TX_ENA_0 8
+#define V_AN_SD25_TX_ENA_0(x) ((x) << S_AN_SD25_TX_ENA_0)
+#define F_AN_SD25_TX_ENA_0 V_AN_SD25_TX_ENA_0(1U)
+
+#define S_AN_SD25_RX_ENA_7 7
+#define V_AN_SD25_RX_ENA_7(x) ((x) << S_AN_SD25_RX_ENA_7)
+#define F_AN_SD25_RX_ENA_7 V_AN_SD25_RX_ENA_7(1U)
+
+#define S_AN_SD25_RX_ENA_6 6
+#define V_AN_SD25_RX_ENA_6(x) ((x) << S_AN_SD25_RX_ENA_6)
+#define F_AN_SD25_RX_ENA_6 V_AN_SD25_RX_ENA_6(1U)
+
+#define S_AN_SD25_RX_ENA_5 5
+#define V_AN_SD25_RX_ENA_5(x) ((x) << S_AN_SD25_RX_ENA_5)
+#define F_AN_SD25_RX_ENA_5 V_AN_SD25_RX_ENA_5(1U)
+
+#define S_AN_SD25_RX_ENA_4 4
+#define V_AN_SD25_RX_ENA_4(x) ((x) << S_AN_SD25_RX_ENA_4)
+#define F_AN_SD25_RX_ENA_4 V_AN_SD25_RX_ENA_4(1U)
+
+#define S_AN_SD25_RX_ENA_3 3
+#define V_AN_SD25_RX_ENA_3(x) ((x) << S_AN_SD25_RX_ENA_3)
+#define F_AN_SD25_RX_ENA_3 V_AN_SD25_RX_ENA_3(1U)
+
+#define S_AN_SD25_RX_ENA_2 2
+#define V_AN_SD25_RX_ENA_2(x) ((x) << S_AN_SD25_RX_ENA_2)
+#define F_AN_SD25_RX_ENA_2 V_AN_SD25_RX_ENA_2(1U)
+
+#define S_AN_SD25_RX_ENA_1 1
+#define V_AN_SD25_RX_ENA_1(x) ((x) << S_AN_SD25_RX_ENA_1)
+#define F_AN_SD25_RX_ENA_1 V_AN_SD25_RX_ENA_1(1U)
+
+#define S_AN_SD25_RX_ENA_0 0
+#define V_AN_SD25_RX_ENA_0(x) ((x) << S_AN_SD25_RX_ENA_0)
+#define F_AN_SD25_RX_ENA_0 V_AN_SD25_RX_ENA_0(1U)
+
+#define A_MAC_PLL_CFG_0 0x380a0
+
+#define S_USE_RX_CDR_CLK_FOR_TX 7
+#define V_USE_RX_CDR_CLK_FOR_TX(x) ((x) << S_USE_RX_CDR_CLK_FOR_TX)
+#define F_USE_RX_CDR_CLK_FOR_TX V_USE_RX_CDR_CLK_FOR_TX(1U)
+
+#define S_HSSPLLSEL0 5
+#define M_HSSPLLSEL0 0x3U
+#define V_HSSPLLSEL0(x) ((x) << S_HSSPLLSEL0)
+#define G_HSSPLLSEL0(x) (((x) >> S_HSSPLLSEL0) & M_HSSPLLSEL0)
+
+#define S_HSSTXDIV2CLK_SEL0 3
+#define M_HSSTXDIV2CLK_SEL0 0x3U
+#define V_HSSTXDIV2CLK_SEL0(x) ((x) << S_HSSTXDIV2CLK_SEL0)
+#define G_HSSTXDIV2CLK_SEL0(x) (((x) >> S_HSSTXDIV2CLK_SEL0) & M_HSSTXDIV2CLK_SEL0)
+
+#define S_HSS_RESET0 2
+#define V_HSS_RESET0(x) ((x) << S_HSS_RESET0)
+#define F_HSS_RESET0 V_HSS_RESET0(1U)
+
+#define S_APB_RESET0 1
+#define V_APB_RESET0(x) ((x) << S_APB_RESET0)
+#define F_APB_RESET0 V_APB_RESET0(1U)
+
+#define S_HSSCLK32DIV2_RESET0 0
+#define V_HSSCLK32DIV2_RESET0(x) ((x) << S_HSSCLK32DIV2_RESET0)
+#define F_HSSCLK32DIV2_RESET0 V_HSSCLK32DIV2_RESET0(1U)
+
+#define A_MAC_PLL_CFG_1 0x380a4
+
+#define S_HSSPLLSEL1 5
+#define M_HSSPLLSEL1 0x3U
+#define V_HSSPLLSEL1(x) ((x) << S_HSSPLLSEL1)
+#define G_HSSPLLSEL1(x) (((x) >> S_HSSPLLSEL1) & M_HSSPLLSEL1)
+
+#define S_HSSTXDIV2CLK_SEL1 3
+#define M_HSSTXDIV2CLK_SEL1 0x3U
+#define V_HSSTXDIV2CLK_SEL1(x) ((x) << S_HSSTXDIV2CLK_SEL1)
+#define G_HSSTXDIV2CLK_SEL1(x) (((x) >> S_HSSTXDIV2CLK_SEL1) & M_HSSTXDIV2CLK_SEL1)
+
+#define S_HSS_RESET1 2
+#define V_HSS_RESET1(x) ((x) << S_HSS_RESET1)
+#define F_HSS_RESET1 V_HSS_RESET1(1U)
+
+#define S_APB_RESET1 1
+#define V_APB_RESET1(x) ((x) << S_APB_RESET1)
+#define F_APB_RESET1 V_APB_RESET1(1U)
+
+#define S_HSSCLK32DIV2_RESET1 0
+#define V_HSSCLK32DIV2_RESET1(x) ((x) << S_HSSCLK32DIV2_RESET1)
+#define F_HSSCLK32DIV2_RESET1 V_HSSCLK32DIV2_RESET1(1U)
+
+#define A_MAC_PLL_CFG_2 0x380a8
+
+#define S_HSSPLLSEL2 5
+#define M_HSSPLLSEL2 0x3U
+#define V_HSSPLLSEL2(x) ((x) << S_HSSPLLSEL2)
+#define G_HSSPLLSEL2(x) (((x) >> S_HSSPLLSEL2) & M_HSSPLLSEL2)
+
+#define S_HSSTXDIV2CLK_SEL2 3
+#define M_HSSTXDIV2CLK_SEL2 0x3U
+#define V_HSSTXDIV2CLK_SEL2(x) ((x) << S_HSSTXDIV2CLK_SEL2)
+#define G_HSSTXDIV2CLK_SEL2(x) (((x) >> S_HSSTXDIV2CLK_SEL2) & M_HSSTXDIV2CLK_SEL2)
+
+#define S_HSS_RESET2 2
+#define V_HSS_RESET2(x) ((x) << S_HSS_RESET2)
+#define F_HSS_RESET2 V_HSS_RESET2(1U)
+
+#define S_APB_RESET2 1
+#define V_APB_RESET2(x) ((x) << S_APB_RESET2)
+#define F_APB_RESET2 V_APB_RESET2(1U)
+
+#define S_HSSCLK32DIV2_RESET2 0
+#define V_HSSCLK32DIV2_RESET2(x) ((x) << S_HSSCLK32DIV2_RESET2)
+#define F_HSSCLK32DIV2_RESET2 V_HSSCLK32DIV2_RESET2(1U)
+
+#define A_MAC_PLL_CFG_3 0x380ac
+
+#define S_HSSPLLSEL3 5
+#define M_HSSPLLSEL3 0x3U
+#define V_HSSPLLSEL3(x) ((x) << S_HSSPLLSEL3)
+#define G_HSSPLLSEL3(x) (((x) >> S_HSSPLLSEL3) & M_HSSPLLSEL3)
+
+#define S_HSSTXDIV2CLK_SEL3 3
+#define M_HSSTXDIV2CLK_SEL3 0x3U
+#define V_HSSTXDIV2CLK_SEL3(x) ((x) << S_HSSTXDIV2CLK_SEL3)
+#define G_HSSTXDIV2CLK_SEL3(x) (((x) >> S_HSSTXDIV2CLK_SEL3) & M_HSSTXDIV2CLK_SEL3)
+
+#define S_HSS_RESET3 2
+#define V_HSS_RESET3(x) ((x) << S_HSS_RESET3)
+#define F_HSS_RESET3 V_HSS_RESET3(1U)
+
+#define S_APB_RESET3 1
+#define V_APB_RESET3(x) ((x) << S_APB_RESET3)
+#define F_APB_RESET3 V_APB_RESET3(1U)
+
+#define S_HSSCLK32DIV2_RESET3 0
+#define V_HSSCLK32DIV2_RESET3(x) ((x) << S_HSSCLK32DIV2_RESET3)
+#define F_HSSCLK32DIV2_RESET3 V_HSSCLK32DIV2_RESET3(1U)
+
+#define A_MAC_HSS_STATUS 0x380b0
+
+#define S_TX_LANE_PLL_SEL_3 30
+#define M_TX_LANE_PLL_SEL_3 0x3U
+#define V_TX_LANE_PLL_SEL_3(x) ((x) << S_TX_LANE_PLL_SEL_3)
+#define G_TX_LANE_PLL_SEL_3(x) (((x) >> S_TX_LANE_PLL_SEL_3) & M_TX_LANE_PLL_SEL_3)
+
+#define S_TX_LANE_PLL_SEL_2 28
+#define M_TX_LANE_PLL_SEL_2 0x3U
+#define V_TX_LANE_PLL_SEL_2(x) ((x) << S_TX_LANE_PLL_SEL_2)
+#define G_TX_LANE_PLL_SEL_2(x) (((x) >> S_TX_LANE_PLL_SEL_2) & M_TX_LANE_PLL_SEL_2)
+
+#define S_TX_LANE_PLL_SEL_1 26
+#define M_TX_LANE_PLL_SEL_1 0x3U
+#define V_TX_LANE_PLL_SEL_1(x) ((x) << S_TX_LANE_PLL_SEL_1)
+#define G_TX_LANE_PLL_SEL_1(x) (((x) >> S_TX_LANE_PLL_SEL_1) & M_TX_LANE_PLL_SEL_1)
+
+#define S_TX_LANE_PLL_SEL_0 24
+#define M_TX_LANE_PLL_SEL_0 0x3U
+#define V_TX_LANE_PLL_SEL_0(x) ((x) << S_TX_LANE_PLL_SEL_0)
+#define G_TX_LANE_PLL_SEL_0(x) (((x) >> S_TX_LANE_PLL_SEL_0) & M_TX_LANE_PLL_SEL_0)
+
+#define S_HSSPLLLOCKB_HSS3 7
+#define V_HSSPLLLOCKB_HSS3(x) ((x) << S_HSSPLLLOCKB_HSS3)
+#define F_HSSPLLLOCKB_HSS3 V_HSSPLLLOCKB_HSS3(1U)
+
+#define S_HSSPLLLOCKA_HSS3 6
+#define V_HSSPLLLOCKA_HSS3(x) ((x) << S_HSSPLLLOCKA_HSS3)
+#define F_HSSPLLLOCKA_HSS3 V_HSSPLLLOCKA_HSS3(1U)
+
+#define S_HSSPLLLOCKB_HSS2 5
+#define V_HSSPLLLOCKB_HSS2(x) ((x) << S_HSSPLLLOCKB_HSS2)
+#define F_HSSPLLLOCKB_HSS2 V_HSSPLLLOCKB_HSS2(1U)
+
+#define S_HSSPLLLOCKA_HSS2 4
+#define V_HSSPLLLOCKA_HSS2(x) ((x) << S_HSSPLLLOCKA_HSS2)
+#define F_HSSPLLLOCKA_HSS2 V_HSSPLLLOCKA_HSS2(1U)
+
+#define S_HSSPLLLOCKB_HSS1 3
+#define V_HSSPLLLOCKB_HSS1(x) ((x) << S_HSSPLLLOCKB_HSS1)
+#define F_HSSPLLLOCKB_HSS1 V_HSSPLLLOCKB_HSS1(1U)
+
+#define S_HSSPLLLOCKA_HSS1 2
+#define V_HSSPLLLOCKA_HSS1(x) ((x) << S_HSSPLLLOCKA_HSS1)
+#define F_HSSPLLLOCKA_HSS1 V_HSSPLLLOCKA_HSS1(1U)
+
+#define S_HSSPLLLOCKB_HSS0 1
+#define V_HSSPLLLOCKB_HSS0(x) ((x) << S_HSSPLLLOCKB_HSS0)
+#define F_HSSPLLLOCKB_HSS0 V_HSSPLLLOCKB_HSS0(1U)
+
+#define S_HSSPLLLOCKA_HSS0 0
+#define V_HSSPLLLOCKA_HSS0(x) ((x) << S_HSSPLLLOCKA_HSS0)
+#define F_HSSPLLLOCKA_HSS0 V_HSSPLLLOCKA_HSS0(1U)
+
+#define A_MAC_HSS_SIGDET_STATUS 0x380b4
+
+#define S_HSS3_SIGDET 6
+#define M_HSS3_SIGDET 0x3U
+#define V_HSS3_SIGDET(x) ((x) << S_HSS3_SIGDET)
+#define G_HSS3_SIGDET(x) (((x) >> S_HSS3_SIGDET) & M_HSS3_SIGDET)
+
+#define S_HSS2_SIGDET 4
+#define M_HSS2_SIGDET 0x3U
+#define V_HSS2_SIGDET(x) ((x) << S_HSS2_SIGDET)
+#define G_HSS2_SIGDET(x) (((x) >> S_HSS2_SIGDET) & M_HSS2_SIGDET)
+
+#define S_HSS1_SIGDET 2
+#define M_HSS1_SIGDET 0x3U
+#define V_HSS1_SIGDET(x) ((x) << S_HSS1_SIGDET)
+#define G_HSS1_SIGDET(x) (((x) >> S_HSS1_SIGDET) & M_HSS1_SIGDET)
+
+#define S_HSS0_SIGDET 0
+#define M_HSS0_SIGDET 0x3U
+#define V_HSS0_SIGDET(x) ((x) << S_HSS0_SIGDET)
+#define G_HSS0_SIGDET(x) (((x) >> S_HSS0_SIGDET) & M_HSS0_SIGDET)
+
+#define A_MAC_FPGA_CFG_0 0x380b8
+#define A_MAC_PMD_STATUS 0x380bc
+
+#define S_SIGNAL_DETECT 0
+#define M_SIGNAL_DETECT 0xffU
+#define V_SIGNAL_DETECT(x) ((x) << S_SIGNAL_DETECT)
+#define G_SIGNAL_DETECT(x) (((x) >> S_SIGNAL_DETECT) & M_SIGNAL_DETECT)
+
+#define A_MAC_PMD_AN_CONFIG0 0x380c0
+
+#define S_AN3_RATE_SELECT 25
+#define M_AN3_RATE_SELECT 0x1fU
+#define V_AN3_RATE_SELECT(x) ((x) << S_AN3_RATE_SELECT)
+#define G_AN3_RATE_SELECT(x) (((x) >> S_AN3_RATE_SELECT) & M_AN3_RATE_SELECT)
+
+#define S_AN3_STATUS 24
+#define V_AN3_STATUS(x) ((x) << S_AN3_STATUS)
+#define F_AN3_STATUS V_AN3_STATUS(1U)
+
+#define S_AN2_RATE_SELECT 17
+#define M_AN2_RATE_SELECT 0x1fU
+#define V_AN2_RATE_SELECT(x) ((x) << S_AN2_RATE_SELECT)
+#define G_AN2_RATE_SELECT(x) (((x) >> S_AN2_RATE_SELECT) & M_AN2_RATE_SELECT)
+
+#define S_AN2_STATUS 16
+#define V_AN2_STATUS(x) ((x) << S_AN2_STATUS)
+#define F_AN2_STATUS V_AN2_STATUS(1U)
+
+#define S_AN1_RATE_SELECT 9
+#define M_AN1_RATE_SELECT 0x1fU
+#define V_AN1_RATE_SELECT(x) ((x) << S_AN1_RATE_SELECT)
+#define G_AN1_RATE_SELECT(x) (((x) >> S_AN1_RATE_SELECT) & M_AN1_RATE_SELECT)
+
+#define S_AN1_STATUS 8
+#define V_AN1_STATUS(x) ((x) << S_AN1_STATUS)
+#define F_AN1_STATUS V_AN1_STATUS(1U)
+
+#define S_AN0_RATE_SELECT 1
+#define M_AN0_RATE_SELECT 0x1fU
+#define V_AN0_RATE_SELECT(x) ((x) << S_AN0_RATE_SELECT)
+#define G_AN0_RATE_SELECT(x) (((x) >> S_AN0_RATE_SELECT) & M_AN0_RATE_SELECT)
+
+#define S_AN0_STATUS 0
+#define V_AN0_STATUS(x) ((x) << S_AN0_STATUS)
+#define F_AN0_STATUS V_AN0_STATUS(1U)
+
+#define A_MAC_PMD_AN_CONFIG1 0x380c4
+
+#define S_AN7_RATE_SELECT 25
+#define M_AN7_RATE_SELECT 0x1fU
+#define V_AN7_RATE_SELECT(x) ((x) << S_AN7_RATE_SELECT)
+#define G_AN7_RATE_SELECT(x) (((x) >> S_AN7_RATE_SELECT) & M_AN7_RATE_SELECT)
+
+#define S_AN7_STATUS 24
+#define V_AN7_STATUS(x) ((x) << S_AN7_STATUS)
+#define F_AN7_STATUS V_AN7_STATUS(1U)
+
+#define S_AN6_RATE_SELECT 17
+#define M_AN6_RATE_SELECT 0x1fU
+#define V_AN6_RATE_SELECT(x) ((x) << S_AN6_RATE_SELECT)
+#define G_AN6_RATE_SELECT(x) (((x) >> S_AN6_RATE_SELECT) & M_AN6_RATE_SELECT)
+
+#define S_AN6_STATUS 16
+#define V_AN6_STATUS(x) ((x) << S_AN6_STATUS)
+#define F_AN6_STATUS V_AN6_STATUS(1U)
+
+#define S_AN5_RATE_SELECT 9
+#define M_AN5_RATE_SELECT 0x1fU
+#define V_AN5_RATE_SELECT(x) ((x) << S_AN5_RATE_SELECT)
+#define G_AN5_RATE_SELECT(x) (((x) >> S_AN5_RATE_SELECT) & M_AN5_RATE_SELECT)
+
+#define S_AN5_STATUS 8
+#define V_AN5_STATUS(x) ((x) << S_AN5_STATUS)
+#define F_AN5_STATUS V_AN5_STATUS(1U)
+
+#define S_AN4_RATE_SELECT 1
+#define M_AN4_RATE_SELECT 0x1fU
+#define V_AN4_RATE_SELECT(x) ((x) << S_AN4_RATE_SELECT)
+#define G_AN4_RATE_SELECT(x) (((x) >> S_AN4_RATE_SELECT) & M_AN4_RATE_SELECT)
+
+#define S_AN4_STATUS 0
+#define V_AN4_STATUS(x) ((x) << S_AN4_STATUS)
+#define F_AN4_STATUS V_AN4_STATUS(1U)
+
+#define A_MAC_INT_EN_CMN 0x380c8
+
+#define S_HSS3PLL1_LOCK_LOST_INT_EN 21
+#define V_HSS3PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS3PLL1_LOCK_LOST_INT_EN)
+#define F_HSS3PLL1_LOCK_LOST_INT_EN V_HSS3PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS3PLL1_LOCK_INT_EN 20
+#define V_HSS3PLL1_LOCK_INT_EN(x) ((x) << S_HSS3PLL1_LOCK_INT_EN)
+#define F_HSS3PLL1_LOCK_INT_EN V_HSS3PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS3PLL0_LOCK_LOST_INT_EN 19
+#define V_HSS3PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS3PLL0_LOCK_LOST_INT_EN)
+#define F_HSS3PLL0_LOCK_LOST_INT_EN V_HSS3PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS3PLL0_LOCK_INT_EN 18
+#define V_HSS3PLL0_LOCK_INT_EN(x) ((x) << S_HSS3PLL0_LOCK_INT_EN)
+#define F_HSS3PLL0_LOCK_INT_EN V_HSS3PLL0_LOCK_INT_EN(1U)
+
+#define S_HSS2PLL1_LOCK_LOST_INT_EN 17
+#define V_HSS2PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS2PLL1_LOCK_LOST_INT_EN)
+#define F_HSS2PLL1_LOCK_LOST_INT_EN V_HSS2PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS2PLL1_LOCK_INT_EN 16
+#define V_HSS2PLL1_LOCK_INT_EN(x) ((x) << S_HSS2PLL1_LOCK_INT_EN)
+#define F_HSS2PLL1_LOCK_INT_EN V_HSS2PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS2PLL0_LOCK_LOST_INT_EN 15
+#define V_HSS2PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS2PLL0_LOCK_LOST_INT_EN)
+#define F_HSS2PLL0_LOCK_LOST_INT_EN V_HSS2PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS2PLL0_LOCK_INT_EN 14
+#define V_HSS2PLL0_LOCK_INT_EN(x) ((x) << S_HSS2PLL0_LOCK_INT_EN)
+#define F_HSS2PLL0_LOCK_INT_EN V_HSS2PLL0_LOCK_INT_EN(1U)
+
+#define S_HSS1PLL1_LOCK_LOST_INT_EN 13
+#define V_HSS1PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS1PLL1_LOCK_LOST_INT_EN)
+#define F_HSS1PLL1_LOCK_LOST_INT_EN V_HSS1PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS1PLL1_LOCK_INT_EN 12
+#define V_HSS1PLL1_LOCK_INT_EN(x) ((x) << S_HSS1PLL1_LOCK_INT_EN)
+#define F_HSS1PLL1_LOCK_INT_EN V_HSS1PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS1PLL0_LOCK_LOST_INT_EN 11
+#define V_HSS1PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS1PLL0_LOCK_LOST_INT_EN)
+#define F_HSS1PLL0_LOCK_LOST_INT_EN V_HSS1PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS1PLL0_LOCK_INT_EN 10
+#define V_HSS1PLL0_LOCK_INT_EN(x) ((x) << S_HSS1PLL0_LOCK_INT_EN)
+#define F_HSS1PLL0_LOCK_INT_EN V_HSS1PLL0_LOCK_INT_EN(1U)
+
+#define S_HSS0PLL1_LOCK_LOST_INT_EN 9
+#define V_HSS0PLL1_LOCK_LOST_INT_EN(x) ((x) << S_HSS0PLL1_LOCK_LOST_INT_EN)
+#define F_HSS0PLL1_LOCK_LOST_INT_EN V_HSS0PLL1_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS0PLL1_LOCK_INT_EN 8
+#define V_HSS0PLL1_LOCK_INT_EN(x) ((x) << S_HSS0PLL1_LOCK_INT_EN)
+#define F_HSS0PLL1_LOCK_INT_EN V_HSS0PLL1_LOCK_INT_EN(1U)
+
+#define S_HSS0PLL0_LOCK_LOST_INT_EN 7
+#define V_HSS0PLL0_LOCK_LOST_INT_EN(x) ((x) << S_HSS0PLL0_LOCK_LOST_INT_EN)
+#define F_HSS0PLL0_LOCK_LOST_INT_EN V_HSS0PLL0_LOCK_LOST_INT_EN(1U)
+
+#define S_HSS0PLL0_LOCK_INT_EN 6
+#define V_HSS0PLL0_LOCK_INT_EN(x) ((x) << S_HSS0PLL0_LOCK_INT_EN)
+#define F_HSS0PLL0_LOCK_INT_EN V_HSS0PLL0_LOCK_INT_EN(1U)
+
+#define S_FLOCK_ASSERTED 5
+#define V_FLOCK_ASSERTED(x) ((x) << S_FLOCK_ASSERTED)
+#define F_FLOCK_ASSERTED V_FLOCK_ASSERTED(1U)
+
+#define S_FLOCK_LOST 4
+#define V_FLOCK_LOST(x) ((x) << S_FLOCK_LOST)
+#define F_FLOCK_LOST V_FLOCK_LOST(1U)
+
+#define S_PHASE_LOCK_ASSERTED 3
+#define V_PHASE_LOCK_ASSERTED(x) ((x) << S_PHASE_LOCK_ASSERTED)
+#define F_PHASE_LOCK_ASSERTED V_PHASE_LOCK_ASSERTED(1U)
+
+#define S_PHASE_LOCK_LOST 2
+#define V_PHASE_LOCK_LOST(x) ((x) << S_PHASE_LOCK_LOST)
+#define F_PHASE_LOCK_LOST V_PHASE_LOCK_LOST(1U)
+
+#define S_LOCK_ASSERTED 1
+#define V_LOCK_ASSERTED(x) ((x) << S_LOCK_ASSERTED)
+#define F_LOCK_ASSERTED V_LOCK_ASSERTED(1U)
+
+#define S_LOCK_LOST 0
+#define V_LOCK_LOST(x) ((x) << S_LOCK_LOST)
+#define F_LOCK_LOST V_LOCK_LOST(1U)
+
+#define A_MAC_INT_CAUSE_CMN 0x380cc
+
+#define S_HSS3PLL1_LOCK_LOST_INT_CAUSE 21
+#define V_HSS3PLL1_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS3PLL1_LOCK_LOST_INT_CAUSE)
+#define F_HSS3PLL1_LOCK_LOST_INT_CAUSE V_HSS3PLL1_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS3PLL1_LOCK_INT_CAUSE 20
+#define V_HSS3PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS3PLL1_LOCK_INT_CAUSE)
+#define F_HSS3PLL1_LOCK_INT_CAUSE V_HSS3PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS3PLL0_LOCK_LOST_INT_CAUSE 19
+#define V_HSS3PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS3PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS3PLL0_LOCK_LOST_INT_CAUSE V_HSS3PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS3PLL0_LOCK_INT_CAUSE 18
+#define V_HSS3PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS3PLL0_LOCK_INT_CAUSE)
+#define F_HSS3PLL0_LOCK_INT_CAUSE V_HSS3PLL0_LOCK_INT_CAUSE(1U)
+
+#define S_HSS2PLL1_LOCK_LOST_CAUSE 17
+#define V_HSS2PLL1_LOCK_LOST_CAUSE(x) ((x) << S_HSS2PLL1_LOCK_LOST_CAUSE)
+#define F_HSS2PLL1_LOCK_LOST_CAUSE V_HSS2PLL1_LOCK_LOST_CAUSE(1U)
+
+#define S_HSS2PLL1_LOCK_INT_CAUSE 16
+#define V_HSS2PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS2PLL1_LOCK_INT_CAUSE)
+#define F_HSS2PLL1_LOCK_INT_CAUSE V_HSS2PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS2PLL0_LOCK_LOST_INT_CAUSE 15
+#define V_HSS2PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS2PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS2PLL0_LOCK_LOST_INT_CAUSE V_HSS2PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS2PLL0_LOCK_INT_CAUSE 14
+#define V_HSS2PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS2PLL0_LOCK_INT_CAUSE)
+#define F_HSS2PLL0_LOCK_INT_CAUSE V_HSS2PLL0_LOCK_INT_CAUSE(1U)
+
+#define S_HSS1PLL1_LOCK_LOST_INT_CAUSE 13
+#define V_HSS1PLL1_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS1PLL1_LOCK_LOST_INT_CAUSE)
+#define F_HSS1PLL1_LOCK_LOST_INT_CAUSE V_HSS1PLL1_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS1PLL1_LOCK_INT_CAUSE 12
+#define V_HSS1PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS1PLL1_LOCK_INT_CAUSE)
+#define F_HSS1PLL1_LOCK_INT_CAUSE V_HSS1PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS1PLL0_LOCK_LOST_INT_CAUSE 11
+#define V_HSS1PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS1PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS1PLL0_LOCK_LOST_INT_CAUSE V_HSS1PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS1PLL0_LOCK_INT_CAUSE 10
+#define V_HSS1PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS1PLL0_LOCK_INT_CAUSE)
+#define F_HSS1PLL0_LOCK_INT_CAUSE V_HSS1PLL0_LOCK_INT_CAUSE(1U)
+
+#define S_HSS0PLL1_LOCK_LOST_INT_CAUSE 9
+#define V_HSS0PLL1_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS0PLL1_LOCK_LOST_INT_CAUSE)
+#define F_HSS0PLL1_LOCK_LOST_INT_CAUSE V_HSS0PLL1_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS0PLL1_LOCK_INT_CAUSE 8
+#define V_HSS0PLL1_LOCK_INT_CAUSE(x) ((x) << S_HSS0PLL1_LOCK_INT_CAUSE)
+#define F_HSS0PLL1_LOCK_INT_CAUSE V_HSS0PLL1_LOCK_INT_CAUSE(1U)
+
+#define S_HSS0PLL0_LOCK_LOST_INT_CAUSE 7
+#define V_HSS0PLL0_LOCK_LOST_INT_CAUSE(x) ((x) << S_HSS0PLL0_LOCK_LOST_INT_CAUSE)
+#define F_HSS0PLL0_LOCK_LOST_INT_CAUSE V_HSS0PLL0_LOCK_LOST_INT_CAUSE(1U)
+
+#define S_HSS0PLL0_LOCK_INT_CAUSE 6
+#define V_HSS0PLL0_LOCK_INT_CAUSE(x) ((x) << S_HSS0PLL0_LOCK_INT_CAUSE)
+#define F_HSS0PLL0_LOCK_INT_CAUSE V_HSS0PLL0_LOCK_INT_CAUSE(1U)
+
+#define A_MAC_PERR_INT_EN_MTIP 0x380d0
+
+#define S_PERR_MAC0_TX 19
+#define V_PERR_MAC0_TX(x) ((x) << S_PERR_MAC0_TX)
+#define F_PERR_MAC0_TX V_PERR_MAC0_TX(1U)
+
+#define S_PERR_MAC1_TX 18
+#define V_PERR_MAC1_TX(x) ((x) << S_PERR_MAC1_TX)
+#define F_PERR_MAC1_TX V_PERR_MAC1_TX(1U)
+
+#define S_PERR_MAC2_TX 17
+#define V_PERR_MAC2_TX(x) ((x) << S_PERR_MAC2_TX)
+#define F_PERR_MAC2_TX V_PERR_MAC2_TX(1U)
+
+#define S_PERR_MAC3_TX 16
+#define V_PERR_MAC3_TX(x) ((x) << S_PERR_MAC3_TX)
+#define F_PERR_MAC3_TX V_PERR_MAC3_TX(1U)
+
+#define S_PERR_MAC4_TX 15
+#define V_PERR_MAC4_TX(x) ((x) << S_PERR_MAC4_TX)
+#define F_PERR_MAC4_TX V_PERR_MAC4_TX(1U)
+
+#define S_PERR_MAC5_TX 14
+#define V_PERR_MAC5_TX(x) ((x) << S_PERR_MAC5_TX)
+#define F_PERR_MAC5_TX V_PERR_MAC5_TX(1U)
+
+#define S_PERR_MAC0_RX 13
+#define V_PERR_MAC0_RX(x) ((x) << S_PERR_MAC0_RX)
+#define F_PERR_MAC0_RX V_PERR_MAC0_RX(1U)
+
+#define S_PERR_MAC1_RX 12
+#define V_PERR_MAC1_RX(x) ((x) << S_PERR_MAC1_RX)
+#define F_PERR_MAC1_RX V_PERR_MAC1_RX(1U)
+
+#define S_PERR_MAC2_RX 11
+#define V_PERR_MAC2_RX(x) ((x) << S_PERR_MAC2_RX)
+#define F_PERR_MAC2_RX V_PERR_MAC2_RX(1U)
+
+#define S_PERR_MAC3_RX 10
+#define V_PERR_MAC3_RX(x) ((x) << S_PERR_MAC3_RX)
+#define F_PERR_MAC3_RX V_PERR_MAC3_RX(1U)
+
+#define S_PERR_MAC4_RX 9
+#define V_PERR_MAC4_RX(x) ((x) << S_PERR_MAC4_RX)
+#define F_PERR_MAC4_RX V_PERR_MAC4_RX(1U)
+
+#define S_PERR_MAC5_RX 8
+#define V_PERR_MAC5_RX(x) ((x) << S_PERR_MAC5_RX)
+#define F_PERR_MAC5_RX V_PERR_MAC5_RX(1U)
+
+#define S_PERR_MAC_STAT2_RX 7
+#define V_PERR_MAC_STAT2_RX(x) ((x) << S_PERR_MAC_STAT2_RX)
+#define F_PERR_MAC_STAT2_RX V_PERR_MAC_STAT2_RX(1U)
+
+#define S_PERR_MAC_STAT3_RX 6
+#define V_PERR_MAC_STAT3_RX(x) ((x) << S_PERR_MAC_STAT3_RX)
+#define F_PERR_MAC_STAT3_RX V_PERR_MAC_STAT3_RX(1U)
+
+#define S_PERR_MAC_STAT4_RX 5
+#define V_PERR_MAC_STAT4_RX(x) ((x) << S_PERR_MAC_STAT4_RX)
+#define F_PERR_MAC_STAT4_RX V_PERR_MAC_STAT4_RX(1U)
+
+#define S_PERR_MAC_STAT5_RX 4
+#define V_PERR_MAC_STAT5_RX(x) ((x) << S_PERR_MAC_STAT5_RX)
+#define F_PERR_MAC_STAT5_RX V_PERR_MAC_STAT5_RX(1U)
+
+#define S_PERR_MAC_STAT2_TX 3
+#define V_PERR_MAC_STAT2_TX(x) ((x) << S_PERR_MAC_STAT2_TX)
+#define F_PERR_MAC_STAT2_TX V_PERR_MAC_STAT2_TX(1U)
+
+#define S_PERR_MAC_STAT3_TX 2
+#define V_PERR_MAC_STAT3_TX(x) ((x) << S_PERR_MAC_STAT3_TX)
+#define F_PERR_MAC_STAT3_TX V_PERR_MAC_STAT3_TX(1U)
+
+#define S_PERR_MAC_STAT4_TX 1
+#define V_PERR_MAC_STAT4_TX(x) ((x) << S_PERR_MAC_STAT4_TX)
+#define F_PERR_MAC_STAT4_TX V_PERR_MAC_STAT4_TX(1U)
+
+#define S_PERR_MAC_STAT5_TX 0
+#define V_PERR_MAC_STAT5_TX(x) ((x) << S_PERR_MAC_STAT5_TX)
+#define F_PERR_MAC_STAT5_TX V_PERR_MAC_STAT5_TX(1U)
+
+#define A_MAC_PERR_INT_CAUSE_MTIP 0x380d4
+
+#define S_PERR_MAC_STAT_RX 7
+#define V_PERR_MAC_STAT_RX(x) ((x) << S_PERR_MAC_STAT_RX)
+#define F_PERR_MAC_STAT_RX V_PERR_MAC_STAT_RX(1U)
+
+#define S_PERR_MAC_STAT_TX 3
+#define V_PERR_MAC_STAT_TX(x) ((x) << S_PERR_MAC_STAT_TX)
+#define F_PERR_MAC_STAT_TX V_PERR_MAC_STAT_TX(1U)
+
+#define S_PERR_MAC_STAT_CAP 2
+#define V_PERR_MAC_STAT_CAP(x) ((x) << S_PERR_MAC_STAT_CAP)
+#define F_PERR_MAC_STAT_CAP V_PERR_MAC_STAT_CAP(1U)
+
+#define A_MAC_PERR_ENABLE_MTIP 0x380d8
+#define A_MAC_PCS_1G_CONFIG_0 0x380dc
+
+#define S_SEQ_ENA_3 19
+#define V_SEQ_ENA_3(x) ((x) << S_SEQ_ENA_3)
+#define F_SEQ_ENA_3 V_SEQ_ENA_3(1U)
+
+#define S_SEQ_ENA_2 18
+#define V_SEQ_ENA_2(x) ((x) << S_SEQ_ENA_2)
+#define F_SEQ_ENA_2 V_SEQ_ENA_2(1U)
+
+#define S_SEQ_ENA_1 17
+#define V_SEQ_ENA_1(x) ((x) << S_SEQ_ENA_1)
+#define F_SEQ_ENA_1 V_SEQ_ENA_1(1U)
+
+#define S_SEQ_ENA_0 16
+#define V_SEQ_ENA_0(x) ((x) << S_SEQ_ENA_0)
+#define F_SEQ_ENA_0 V_SEQ_ENA_0(1U)
+
+#define S_TX_LANE_THRESH_3 12
+#define M_TX_LANE_THRESH_3 0xfU
+#define V_TX_LANE_THRESH_3(x) ((x) << S_TX_LANE_THRESH_3)
+#define G_TX_LANE_THRESH_3(x) (((x) >> S_TX_LANE_THRESH_3) & M_TX_LANE_THRESH_3)
+
+#define S_TX_LANE_THRESH_2 8
+#define M_TX_LANE_THRESH_2 0xfU
+#define V_TX_LANE_THRESH_2(x) ((x) << S_TX_LANE_THRESH_2)
+#define G_TX_LANE_THRESH_2(x) (((x) >> S_TX_LANE_THRESH_2) & M_TX_LANE_THRESH_2)
+
+#define S_TX_LANE_THRESH_1 4
+#define M_TX_LANE_THRESH_1 0xfU
+#define V_TX_LANE_THRESH_1(x) ((x) << S_TX_LANE_THRESH_1)
+#define G_TX_LANE_THRESH_1(x) (((x) >> S_TX_LANE_THRESH_1) & M_TX_LANE_THRESH_1)
+
+#define S_TX_LANE_THRESH_0 0
+#define M_TX_LANE_THRESH_0 0xfU
+#define V_TX_LANE_THRESH_0(x) ((x) << S_TX_LANE_THRESH_0)
+#define G_TX_LANE_THRESH_0(x) (((x) >> S_TX_LANE_THRESH_0) & M_TX_LANE_THRESH_0)
+
+#define A_MAC_PCS_1G_CONFIG_1 0x380e0
+
+#define S_TX_LANE_CKMULT_3 9
+#define M_TX_LANE_CKMULT_3 0x7U
+#define V_TX_LANE_CKMULT_3(x) ((x) << S_TX_LANE_CKMULT_3)
+#define G_TX_LANE_CKMULT_3(x) (((x) >> S_TX_LANE_CKMULT_3) & M_TX_LANE_CKMULT_3)
+
+#define S_TX_LANE_CKMULT_2 6
+#define M_TX_LANE_CKMULT_2 0x7U
+#define V_TX_LANE_CKMULT_2(x) ((x) << S_TX_LANE_CKMULT_2)
+#define G_TX_LANE_CKMULT_2(x) (((x) >> S_TX_LANE_CKMULT_2) & M_TX_LANE_CKMULT_2)
+
+#define S_TX_LANE_CKMULT_1 3
+#define M_TX_LANE_CKMULT_1 0x7U
+#define V_TX_LANE_CKMULT_1(x) ((x) << S_TX_LANE_CKMULT_1)
+#define G_TX_LANE_CKMULT_1(x) (((x) >> S_TX_LANE_CKMULT_1) & M_TX_LANE_CKMULT_1)
+
+#define S_TX_LANE_CKMULT_0 0
+#define M_TX_LANE_CKMULT_0 0x7U
+#define V_TX_LANE_CKMULT_0(x) ((x) << S_TX_LANE_CKMULT_0)
+#define G_TX_LANE_CKMULT_0(x) (((x) >> S_TX_LANE_CKMULT_0) & M_TX_LANE_CKMULT_0)
+
+#define A_MAC_PTP_TIMER_RD0_LO 0x380e4
+#define A_MAC_PTP_TIMER_RD0_HI 0x380e8
+#define A_MAC_PTP_TIMER_RD1_LO 0x380ec
+#define A_MAC_PTP_TIMER_RD1_HI 0x380f0
+#define A_MAC_PTP_TIMER_WR_LO 0x380f4
+#define A_MAC_PTP_TIMER_WR_HI 0x380f8
+#define A_MAC_PTP_TIMER_OFFSET_0 0x380fc
+#define A_MAC_PTP_TIMER_OFFSET_1 0x38100
+#define A_MAC_PTP_TIMER_OFFSET_2 0x38104
+#define A_MAC_PTP_SUM_LO 0x38108
+#define A_MAC_PTP_SUM_HI 0x3810c
+#define A_MAC_PTP_TIMER_INCR0 0x38110
+#define A_MAC_PTP_TIMER_INCR1 0x38114
+#define A_MAC_PTP_DRIFT_ADJUST_COUNT 0x38118
+#define A_MAC_PTP_OFFSET_ADJUST_FINE 0x3811c
+#define A_MAC_PTP_OFFSET_ADJUST_TOTAL 0x38120
+#define A_MAC_PTP_CFG 0x38124
+#define A_MAC_PTP_PPS 0x38128
+#define A_MAC_PTP_SINGLE_ALARM 0x3812c
+#define A_MAC_PTP_PERIODIC_ALARM 0x38130
+#define A_MAC_PTP_STATUS 0x38134
+#define A_MAC_STS_GPIO_SEL 0x38140
+
+#define S_STSOUTSEL 1
+#define V_STSOUTSEL(x) ((x) << S_STSOUTSEL)
+#define F_STSOUTSEL V_STSOUTSEL(1U)
+
+#define S_STSINSEL 0
+#define V_STSINSEL(x) ((x) << S_STSINSEL)
+#define F_STSINSEL V_STSINSEL(1U)
+
+#define A_MAC_CERR_INT_EN_MTIP 0x38150
+
+#define S_CERR_MAC0_TX 11
+#define V_CERR_MAC0_TX(x) ((x) << S_CERR_MAC0_TX)
+#define F_CERR_MAC0_TX V_CERR_MAC0_TX(1U)
+
+#define S_CERR_MAC1_TX 10
+#define V_CERR_MAC1_TX(x) ((x) << S_CERR_MAC1_TX)
+#define F_CERR_MAC1_TX V_CERR_MAC1_TX(1U)
+
+#define S_CERR_MAC2_TX 9
+#define V_CERR_MAC2_TX(x) ((x) << S_CERR_MAC2_TX)
+#define F_CERR_MAC2_TX V_CERR_MAC2_TX(1U)
+
+#define S_CERR_MAC3_TX 8
+#define V_CERR_MAC3_TX(x) ((x) << S_CERR_MAC3_TX)
+#define F_CERR_MAC3_TX V_CERR_MAC3_TX(1U)
+
+#define S_CERR_MAC4_TX 7
+#define V_CERR_MAC4_TX(x) ((x) << S_CERR_MAC4_TX)
+#define F_CERR_MAC4_TX V_CERR_MAC4_TX(1U)
+
+#define S_CERR_MAC5_TX 6
+#define V_CERR_MAC5_TX(x) ((x) << S_CERR_MAC5_TX)
+#define F_CERR_MAC5_TX V_CERR_MAC5_TX(1U)
+
+#define S_CERR_MAC0_RX 5
+#define V_CERR_MAC0_RX(x) ((x) << S_CERR_MAC0_RX)
+#define F_CERR_MAC0_RX V_CERR_MAC0_RX(1U)
+
+#define S_CERR_MAC1_RX 4
+#define V_CERR_MAC1_RX(x) ((x) << S_CERR_MAC1_RX)
+#define F_CERR_MAC1_RX V_CERR_MAC1_RX(1U)
+
+#define S_CERR_MAC2_RX 3
+#define V_CERR_MAC2_RX(x) ((x) << S_CERR_MAC2_RX)
+#define F_CERR_MAC2_RX V_CERR_MAC2_RX(1U)
+
+#define S_CERR_MAC3_RX 2
+#define V_CERR_MAC3_RX(x) ((x) << S_CERR_MAC3_RX)
+#define F_CERR_MAC3_RX V_CERR_MAC3_RX(1U)
+
+#define S_CERR_MAC4_RX 1
+#define V_CERR_MAC4_RX(x) ((x) << S_CERR_MAC4_RX)
+#define F_CERR_MAC4_RX V_CERR_MAC4_RX(1U)
+
+#define S_CERR_MAC5_RX 0
+#define V_CERR_MAC5_RX(x) ((x) << S_CERR_MAC5_RX)
+#define F_CERR_MAC5_RX V_CERR_MAC5_RX(1U)
+
+#define A_MAC_CERR_INT_CAUSE_MTIP 0x38154
+#define A_MAC_1G_PCS0_STATUS 0x38160
+
+#define S_1G_PCS0_LOOPBACK 12
+#define V_1G_PCS0_LOOPBACK(x) ((x) << S_1G_PCS0_LOOPBACK)
+#define F_1G_PCS0_LOOPBACK V_1G_PCS0_LOOPBACK(1U)
+
+#define S_1G_PCS0_LINK_STATUS 11
+#define V_1G_PCS0_LINK_STATUS(x) ((x) << S_1G_PCS0_LINK_STATUS)
+#define F_1G_PCS0_LINK_STATUS V_1G_PCS0_LINK_STATUS(1U)
+
+#define S_1G_PCS0_RX_SYNC 10
+#define V_1G_PCS0_RX_SYNC(x) ((x) << S_1G_PCS0_RX_SYNC)
+#define F_1G_PCS0_RX_SYNC V_1G_PCS0_RX_SYNC(1U)
+
+#define S_1G_PCS0_AN_DONE 9
+#define V_1G_PCS0_AN_DONE(x) ((x) << S_1G_PCS0_AN_DONE)
+#define F_1G_PCS0_AN_DONE V_1G_PCS0_AN_DONE(1U)
+
+#define S_1G_PCS0_PGRCVD 8
+#define V_1G_PCS0_PGRCVD(x) ((x) << S_1G_PCS0_PGRCVD)
+#define F_1G_PCS0_PGRCVD V_1G_PCS0_PGRCVD(1U)
+
+#define S_1G_PCS0_SPEED_SEL 6
+#define M_1G_PCS0_SPEED_SEL 0x3U
+#define V_1G_PCS0_SPEED_SEL(x) ((x) << S_1G_PCS0_SPEED_SEL)
+#define G_1G_PCS0_SPEED_SEL(x) (((x) >> S_1G_PCS0_SPEED_SEL) & M_1G_PCS0_SPEED_SEL)
+
+#define S_1G_PCS0_HALF_DUPLEX 5
+#define V_1G_PCS0_HALF_DUPLEX(x) ((x) << S_1G_PCS0_HALF_DUPLEX)
+#define F_1G_PCS0_HALF_DUPLEX V_1G_PCS0_HALF_DUPLEX(1U)
+
+#define S_1G_PCS0_TX_MODE_QUIET 4
+#define V_1G_PCS0_TX_MODE_QUIET(x) ((x) << S_1G_PCS0_TX_MODE_QUIET)
+#define F_1G_PCS0_TX_MODE_QUIET V_1G_PCS0_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS0_TX_LPI_ACTIVE 3
+#define V_1G_PCS0_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS0_TX_LPI_ACTIVE)
+#define F_1G_PCS0_TX_LPI_ACTIVE V_1G_PCS0_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS0_RX_MODE_QUIET 2
+#define V_1G_PCS0_RX_MODE_QUIET(x) ((x) << S_1G_PCS0_RX_MODE_QUIET)
+#define F_1G_PCS0_RX_MODE_QUIET V_1G_PCS0_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS0_RX_LPI_ACTIVE 1
+#define V_1G_PCS0_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS0_RX_LPI_ACTIVE)
+#define F_1G_PCS0_RX_LPI_ACTIVE V_1G_PCS0_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS0_RX_WAKE_ERR 0
+#define V_1G_PCS0_RX_WAKE_ERR(x) ((x) << S_1G_PCS0_RX_WAKE_ERR)
+#define F_1G_PCS0_RX_WAKE_ERR V_1G_PCS0_RX_WAKE_ERR(1U)
+
+#define A_MAC_1G_PCS1_STATUS 0x38164
+
+#define S_1G_PCS1_LOOPBACK 12
+#define V_1G_PCS1_LOOPBACK(x) ((x) << S_1G_PCS1_LOOPBACK)
+#define F_1G_PCS1_LOOPBACK V_1G_PCS1_LOOPBACK(1U)
+
+#define S_1G_PCS1_LINK_STATUS 11
+#define V_1G_PCS1_LINK_STATUS(x) ((x) << S_1G_PCS1_LINK_STATUS)
+#define F_1G_PCS1_LINK_STATUS V_1G_PCS1_LINK_STATUS(1U)
+
+#define S_1G_PCS1_RX_SYNC 10
+#define V_1G_PCS1_RX_SYNC(x) ((x) << S_1G_PCS1_RX_SYNC)
+#define F_1G_PCS1_RX_SYNC V_1G_PCS1_RX_SYNC(1U)
+
+#define S_1G_PCS1_AN_DONE 9
+#define V_1G_PCS1_AN_DONE(x) ((x) << S_1G_PCS1_AN_DONE)
+#define F_1G_PCS1_AN_DONE V_1G_PCS1_AN_DONE(1U)
+
+#define S_1G_PCS1_PGRCVD 8
+#define V_1G_PCS1_PGRCVD(x) ((x) << S_1G_PCS1_PGRCVD)
+#define F_1G_PCS1_PGRCVD V_1G_PCS1_PGRCVD(1U)
+
+#define S_1G_PCS1_SPEED_SEL 6
+#define M_1G_PCS1_SPEED_SEL 0x3U
+#define V_1G_PCS1_SPEED_SEL(x) ((x) << S_1G_PCS1_SPEED_SEL)
+#define G_1G_PCS1_SPEED_SEL(x) (((x) >> S_1G_PCS1_SPEED_SEL) & M_1G_PCS1_SPEED_SEL)
+
+#define S_1G_PCS1_HALF_DUPLEX 5
+#define V_1G_PCS1_HALF_DUPLEX(x) ((x) << S_1G_PCS1_HALF_DUPLEX)
+#define F_1G_PCS1_HALF_DUPLEX V_1G_PCS1_HALF_DUPLEX(1U)
+
+#define S_1G_PCS1_TX_MODE_QUIET 4
+#define V_1G_PCS1_TX_MODE_QUIET(x) ((x) << S_1G_PCS1_TX_MODE_QUIET)
+#define F_1G_PCS1_TX_MODE_QUIET V_1G_PCS1_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS1_TX_LPI_ACTIVE 3
+#define V_1G_PCS1_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS1_TX_LPI_ACTIVE)
+#define F_1G_PCS1_TX_LPI_ACTIVE V_1G_PCS1_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS1_RX_MODE_QUIET 2
+#define V_1G_PCS1_RX_MODE_QUIET(x) ((x) << S_1G_PCS1_RX_MODE_QUIET)
+#define F_1G_PCS1_RX_MODE_QUIET V_1G_PCS1_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS1_RX_LPI_ACTIVE 1
+#define V_1G_PCS1_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS1_RX_LPI_ACTIVE)
+#define F_1G_PCS1_RX_LPI_ACTIVE V_1G_PCS1_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS1_RX_WAKE_ERR 0
+#define V_1G_PCS1_RX_WAKE_ERR(x) ((x) << S_1G_PCS1_RX_WAKE_ERR)
+#define F_1G_PCS1_RX_WAKE_ERR V_1G_PCS1_RX_WAKE_ERR(1U)
+
+#define A_MAC_1G_PCS2_STATUS 0x38168
+
+#define S_1G_PCS2_LOOPBACK 12
+#define V_1G_PCS2_LOOPBACK(x) ((x) << S_1G_PCS2_LOOPBACK)
+#define F_1G_PCS2_LOOPBACK V_1G_PCS2_LOOPBACK(1U)
+
+#define S_1G_PCS2_LINK_STATUS 11
+#define V_1G_PCS2_LINK_STATUS(x) ((x) << S_1G_PCS2_LINK_STATUS)
+#define F_1G_PCS2_LINK_STATUS V_1G_PCS2_LINK_STATUS(1U)
+
+#define S_1G_PCS2_RX_SYNC 10
+#define V_1G_PCS2_RX_SYNC(x) ((x) << S_1G_PCS2_RX_SYNC)
+#define F_1G_PCS2_RX_SYNC V_1G_PCS2_RX_SYNC(1U)
+
+#define S_1G_PCS2_AN_DONE 9
+#define V_1G_PCS2_AN_DONE(x) ((x) << S_1G_PCS2_AN_DONE)
+#define F_1G_PCS2_AN_DONE V_1G_PCS2_AN_DONE(1U)
+
+#define S_1G_PCS2_PGRCVD 8
+#define V_1G_PCS2_PGRCVD(x) ((x) << S_1G_PCS2_PGRCVD)
+#define F_1G_PCS2_PGRCVD V_1G_PCS2_PGRCVD(1U)
+
+#define S_1G_PCS2_SPEED_SEL 6
+#define M_1G_PCS2_SPEED_SEL 0x3U
+#define V_1G_PCS2_SPEED_SEL(x) ((x) << S_1G_PCS2_SPEED_SEL)
+#define G_1G_PCS2_SPEED_SEL(x) (((x) >> S_1G_PCS2_SPEED_SEL) & M_1G_PCS2_SPEED_SEL)
+
+#define S_1G_PCS2_HALF_DUPLEX 5
+#define V_1G_PCS2_HALF_DUPLEX(x) ((x) << S_1G_PCS2_HALF_DUPLEX)
+#define F_1G_PCS2_HALF_DUPLEX V_1G_PCS2_HALF_DUPLEX(1U)
+
+#define S_1G_PCS2_TX_MODE_QUIET 4
+#define V_1G_PCS2_TX_MODE_QUIET(x) ((x) << S_1G_PCS2_TX_MODE_QUIET)
+#define F_1G_PCS2_TX_MODE_QUIET V_1G_PCS2_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS2_TX_LPI_ACTIVE 3
+#define V_1G_PCS2_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS2_TX_LPI_ACTIVE)
+#define F_1G_PCS2_TX_LPI_ACTIVE V_1G_PCS2_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS2_RX_MODE_QUIET 2
+#define V_1G_PCS2_RX_MODE_QUIET(x) ((x) << S_1G_PCS2_RX_MODE_QUIET)
+#define F_1G_PCS2_RX_MODE_QUIET V_1G_PCS2_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS2_RX_LPI_ACTIVE 1
+#define V_1G_PCS2_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS2_RX_LPI_ACTIVE)
+#define F_1G_PCS2_RX_LPI_ACTIVE V_1G_PCS2_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS2_RX_WAKE_ERR 0
+#define V_1G_PCS2_RX_WAKE_ERR(x) ((x) << S_1G_PCS2_RX_WAKE_ERR)
+#define F_1G_PCS2_RX_WAKE_ERR V_1G_PCS2_RX_WAKE_ERR(1U)
+
+#define A_MAC_1G_PCS3_STATUS 0x3816c
+
+#define S_1G_PCS3_LOOPBACK 12
+#define V_1G_PCS3_LOOPBACK(x) ((x) << S_1G_PCS3_LOOPBACK)
+#define F_1G_PCS3_LOOPBACK V_1G_PCS3_LOOPBACK(1U)
+
+#define S_1G_PCS3_LINK_STATUS 11
+#define V_1G_PCS3_LINK_STATUS(x) ((x) << S_1G_PCS3_LINK_STATUS)
+#define F_1G_PCS3_LINK_STATUS V_1G_PCS3_LINK_STATUS(1U)
+
+#define S_1G_PCS3_RX_SYNC 10
+#define V_1G_PCS3_RX_SYNC(x) ((x) << S_1G_PCS3_RX_SYNC)
+#define F_1G_PCS3_RX_SYNC V_1G_PCS3_RX_SYNC(1U)
+
+#define S_1G_PCS3_AN_DONE 9
+#define V_1G_PCS3_AN_DONE(x) ((x) << S_1G_PCS3_AN_DONE)
+#define F_1G_PCS3_AN_DONE V_1G_PCS3_AN_DONE(1U)
+
+#define S_1G_PCS3_PGRCVD 8
+#define V_1G_PCS3_PGRCVD(x) ((x) << S_1G_PCS3_PGRCVD)
+#define F_1G_PCS3_PGRCVD V_1G_PCS3_PGRCVD(1U)
+
+#define S_1G_PCS3_SPEED_SEL 6
+#define M_1G_PCS3_SPEED_SEL 0x3U
+#define V_1G_PCS3_SPEED_SEL(x) ((x) << S_1G_PCS3_SPEED_SEL)
+#define G_1G_PCS3_SPEED_SEL(x) (((x) >> S_1G_PCS3_SPEED_SEL) & M_1G_PCS3_SPEED_SEL)
+
+#define S_1G_PCS3_HALF_DUPLEX 5
+#define V_1G_PCS3_HALF_DUPLEX(x) ((x) << S_1G_PCS3_HALF_DUPLEX)
+#define F_1G_PCS3_HALF_DUPLEX V_1G_PCS3_HALF_DUPLEX(1U)
+
+#define S_1G_PCS3_TX_MODE_QUIET 4
+#define V_1G_PCS3_TX_MODE_QUIET(x) ((x) << S_1G_PCS3_TX_MODE_QUIET)
+#define F_1G_PCS3_TX_MODE_QUIET V_1G_PCS3_TX_MODE_QUIET(1U)
+
+#define S_1G_PCS3_TX_LPI_ACTIVE 3
+#define V_1G_PCS3_TX_LPI_ACTIVE(x) ((x) << S_1G_PCS3_TX_LPI_ACTIVE)
+#define F_1G_PCS3_TX_LPI_ACTIVE V_1G_PCS3_TX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS3_RX_MODE_QUIET 2
+#define V_1G_PCS3_RX_MODE_QUIET(x) ((x) << S_1G_PCS3_RX_MODE_QUIET)
+#define F_1G_PCS3_RX_MODE_QUIET V_1G_PCS3_RX_MODE_QUIET(1U)
+
+#define S_1G_PCS3_RX_LPI_ACTIVE 1
+#define V_1G_PCS3_RX_LPI_ACTIVE(x) ((x) << S_1G_PCS3_RX_LPI_ACTIVE)
+#define F_1G_PCS3_RX_LPI_ACTIVE V_1G_PCS3_RX_LPI_ACTIVE(1U)
+
+#define S_1G_PCS3_RX_WAKE_ERR 0
+#define V_1G_PCS3_RX_WAKE_ERR(x) ((x) << S_1G_PCS3_RX_WAKE_ERR)
+#define F_1G_PCS3_RX_WAKE_ERR V_1G_PCS3_RX_WAKE_ERR(1U)
+
+#define A_MAC_PCS_LPI_STATUS_0 0x38170
+
+#define S_TX_LPI_STATE 0
+#define M_TX_LPI_STATE 0xffffffU
+#define V_TX_LPI_STATE(x) ((x) << S_TX_LPI_STATE)
+#define G_TX_LPI_STATE(x) (((x) >> S_TX_LPI_STATE) & M_TX_LPI_STATE)
+
+#define A_MAC_PCS_LPI_STATUS_1 0x38174
+
+#define S_TX_LPI_MODE 0
+#define M_TX_LPI_MODE 0xffffU
+#define V_TX_LPI_MODE(x) ((x) << S_TX_LPI_MODE)
+#define G_TX_LPI_MODE(x) (((x) >> S_TX_LPI_MODE) & M_TX_LPI_MODE)
+
+#define A_MAC_PCS_LPI_STATUS_2 0x38178
+
+#define S_RX_LPI_MODE 24
+#define M_RX_LPI_MODE 0xffU
+#define V_RX_LPI_MODE(x) ((x) << S_RX_LPI_MODE)
+#define G_RX_LPI_MODE(x) (((x) >> S_RX_LPI_MODE) & M_RX_LPI_MODE)
+
+#define S_RX_LPI_STATE 0
+#define M_RX_LPI_STATE 0xffffffU
+#define V_RX_LPI_STATE(x) ((x) << S_RX_LPI_STATE)
+#define G_RX_LPI_STATE(x) (((x) >> S_RX_LPI_STATE) & M_RX_LPI_STATE)
+
+#define A_MAC_PCS_LPI_STATUS_3 0x3817c
+
+#define S_T7_RX_LPI_ACTIVE 0
+#define M_T7_RX_LPI_ACTIVE 0xffU
+#define V_T7_RX_LPI_ACTIVE(x) ((x) << S_T7_RX_LPI_ACTIVE)
+#define G_T7_RX_LPI_ACTIVE(x) (((x) >> S_T7_RX_LPI_ACTIVE) & M_T7_RX_LPI_ACTIVE)
+
+#define A_MAC_TX0_CLK_DIV 0x38180
+#define A_MAC_TX1_CLK_DIV 0x38184
+#define A_MAC_TX2_CLK_DIV 0x38188
+#define A_MAC_TX3_CLK_DIV 0x3818c
+#define A_MAC_TX4_CLK_DIV 0x38190
+#define A_MAC_TX5_CLK_DIV 0x38194
+#define A_MAC_TX6_CLK_DIV 0x38198
+#define A_MAC_TX7_CLK_DIV 0x3819c
+#define A_MAC_RX0_CLK_DIV 0x381a0
+#define A_MAC_RX1_CLK_DIV 0x381a4
+#define A_MAC_RX2_CLK_DIV 0x381a8
+#define A_MAC_RX3_CLK_DIV 0x381ac
+#define A_MAC_RX4_CLK_DIV 0x381b0
+#define A_MAC_RX5_CLK_DIV 0x381b4
+#define A_MAC_RX6_CLK_DIV 0x381b8
+#define A_MAC_RX7_CLK_DIV 0x381bc
+#define A_MAC_SYNC_E_CDR_LANE_SEL 0x381c0
+
+#define S_CML_MUX_SEL 11
+#define V_CML_MUX_SEL(x) ((x) << S_CML_MUX_SEL)
+#define F_CML_MUX_SEL V_CML_MUX_SEL(1U)
+
+#define S_CMOS_OUT_EN 10
+#define V_CMOS_OUT_EN(x) ((x) << S_CMOS_OUT_EN)
+#define F_CMOS_OUT_EN V_CMOS_OUT_EN(1U)
+
+#define S_CML_OUT_EN 9
+#define V_CML_OUT_EN(x) ((x) << S_CML_OUT_EN)
+#define F_CML_OUT_EN V_CML_OUT_EN(1U)
+
+#define S_LOC_FAULT_PORT_SEL 6
+#define M_LOC_FAULT_PORT_SEL 0x3U
+#define V_LOC_FAULT_PORT_SEL(x) ((x) << S_LOC_FAULT_PORT_SEL)
+#define G_LOC_FAULT_PORT_SEL(x) (((x) >> S_LOC_FAULT_PORT_SEL) & M_LOC_FAULT_PORT_SEL)
+
+#define S_TX_CDR_LANE_SEL 3
+#define M_TX_CDR_LANE_SEL 0x7U
+#define V_TX_CDR_LANE_SEL(x) ((x) << S_TX_CDR_LANE_SEL)
+#define G_TX_CDR_LANE_SEL(x) (((x) >> S_TX_CDR_LANE_SEL) & M_TX_CDR_LANE_SEL)
+
+#define S_RX_CDR_LANE_SEL 0
+#define M_RX_CDR_LANE_SEL 0x7U
+#define V_RX_CDR_LANE_SEL(x) ((x) << S_RX_CDR_LANE_SEL)
+#define G_RX_CDR_LANE_SEL(x) (((x) >> S_RX_CDR_LANE_SEL) & M_RX_CDR_LANE_SEL)
+
+#define A_MAC_DEBUG_PL_IF_1 0x381c4
+#define A_MAC_SIGNAL_DETECT_CTRL 0x381f0
+
+#define S_SIGNAL_DET_LN7 15
+#define V_SIGNAL_DET_LN7(x) ((x) << S_SIGNAL_DET_LN7)
+#define F_SIGNAL_DET_LN7 V_SIGNAL_DET_LN7(1U)
+
+#define S_SIGNAL_DET_LN6 14
+#define V_SIGNAL_DET_LN6(x) ((x) << S_SIGNAL_DET_LN6)
+#define F_SIGNAL_DET_LN6 V_SIGNAL_DET_LN6(1U)
+
+#define S_SIGNAL_DET_LN5 13
+#define V_SIGNAL_DET_LN5(x) ((x) << S_SIGNAL_DET_LN5)
+#define F_SIGNAL_DET_LN5 V_SIGNAL_DET_LN5(1U)
+
+#define S_SIGNAL_DET_LN4 12
+#define V_SIGNAL_DET_LN4(x) ((x) << S_SIGNAL_DET_LN4)
+#define F_SIGNAL_DET_LN4 V_SIGNAL_DET_LN4(1U)
+
+#define S_SIGNAL_DET_LN3 11
+#define V_SIGNAL_DET_LN3(x) ((x) << S_SIGNAL_DET_LN3)
+#define F_SIGNAL_DET_LN3 V_SIGNAL_DET_LN3(1U)
+
+#define S_SIGNAL_DET_LN2 10
+#define V_SIGNAL_DET_LN2(x) ((x) << S_SIGNAL_DET_LN2)
+#define F_SIGNAL_DET_LN2 V_SIGNAL_DET_LN2(1U)
+
+#define S_SIGNAL_DET_LN1 9
+#define V_SIGNAL_DET_LN1(x) ((x) << S_SIGNAL_DET_LN1)
+#define F_SIGNAL_DET_LN1 V_SIGNAL_DET_LN1(1U)
+
+#define S_SIGNAL_DET_LN0 8
+#define V_SIGNAL_DET_LN0(x) ((x) << S_SIGNAL_DET_LN0)
+#define F_SIGNAL_DET_LN0 V_SIGNAL_DET_LN0(1U)
+
+#define S_SIGDETCTRL_LN7 7
+#define V_SIGDETCTRL_LN7(x) ((x) << S_SIGDETCTRL_LN7)
+#define F_SIGDETCTRL_LN7 V_SIGDETCTRL_LN7(1U)
+
+#define S_SIGDETCTRL_LN6 6
+#define V_SIGDETCTRL_LN6(x) ((x) << S_SIGDETCTRL_LN6)
+#define F_SIGDETCTRL_LN6 V_SIGDETCTRL_LN6(1U)
+
+#define S_SIGDETCTRL_LN5 5
+#define V_SIGDETCTRL_LN5(x) ((x) << S_SIGDETCTRL_LN5)
+#define F_SIGDETCTRL_LN5 V_SIGDETCTRL_LN5(1U)
+
+#define S_SIGDETCTRL_LN4 4
+#define V_SIGDETCTRL_LN4(x) ((x) << S_SIGDETCTRL_LN4)
+#define F_SIGDETCTRL_LN4 V_SIGDETCTRL_LN4(1U)
+
+#define S_SIGDETCTRL_LN3 3
+#define V_SIGDETCTRL_LN3(x) ((x) << S_SIGDETCTRL_LN3)
+#define F_SIGDETCTRL_LN3 V_SIGDETCTRL_LN3(1U)
+
+#define S_SIGDETCTRL_LN2 2
+#define V_SIGDETCTRL_LN2(x) ((x) << S_SIGDETCTRL_LN2)
+#define F_SIGDETCTRL_LN2 V_SIGDETCTRL_LN2(1U)
+
+#define S_SIGDETCTRL_LN1 1
+#define V_SIGDETCTRL_LN1(x) ((x) << S_SIGDETCTRL_LN1)
+#define F_SIGDETCTRL_LN1 V_SIGDETCTRL_LN1(1U)
+
+#define S_SIGDETCTRL_LN0 0
+#define V_SIGDETCTRL_LN0(x) ((x) << S_SIGDETCTRL_LN0)
+#define F_SIGDETCTRL_LN0 V_SIGDETCTRL_LN0(1U)
+
+#define A_MAC_FPGA_STATUS_FRM_BOARD 0x381f4
+
+#define S_SFP3_RX_LOS 15
+#define V_SFP3_RX_LOS(x) ((x) << S_SFP3_RX_LOS)
+#define F_SFP3_RX_LOS V_SFP3_RX_LOS(1U)
+
+#define S_SFP3_TX_FAULT 14
+#define V_SFP3_TX_FAULT(x) ((x) << S_SFP3_TX_FAULT)
+#define F_SFP3_TX_FAULT V_SFP3_TX_FAULT(1U)
+
+#define S_SFP3_MOD_PRES 13
+#define V_SFP3_MOD_PRES(x) ((x) << S_SFP3_MOD_PRES)
+#define F_SFP3_MOD_PRES V_SFP3_MOD_PRES(1U)
+
+#define S_SFP2_RX_LOS 12
+#define V_SFP2_RX_LOS(x) ((x) << S_SFP2_RX_LOS)
+#define F_SFP2_RX_LOS V_SFP2_RX_LOS(1U)
+
+#define S_SFP2_TX_FAULT 11
+#define V_SFP2_TX_FAULT(x) ((x) << S_SFP2_TX_FAULT)
+#define F_SFP2_TX_FAULT V_SFP2_TX_FAULT(1U)
+
+#define S_SFP2_MOD_PRES 10
+#define V_SFP2_MOD_PRES(x) ((x) << S_SFP2_MOD_PRES)
+#define F_SFP2_MOD_PRES V_SFP2_MOD_PRES(1U)
+
+#define S_SFP1_RX_LOS 9
+#define V_SFP1_RX_LOS(x) ((x) << S_SFP1_RX_LOS)
+#define F_SFP1_RX_LOS V_SFP1_RX_LOS(1U)
+
+#define S_SFP1_TX_FAULT 8
+#define V_SFP1_TX_FAULT(x) ((x) << S_SFP1_TX_FAULT)
+#define F_SFP1_TX_FAULT V_SFP1_TX_FAULT(1U)
+
+#define S_SFP1_MOD_PRES 7
+#define V_SFP1_MOD_PRES(x) ((x) << S_SFP1_MOD_PRES)
+#define F_SFP1_MOD_PRES V_SFP1_MOD_PRES(1U)
+
+#define S_SFP0_RX_LOS 6
+#define V_SFP0_RX_LOS(x) ((x) << S_SFP0_RX_LOS)
+#define F_SFP0_RX_LOS V_SFP0_RX_LOS(1U)
+
+#define S_SFP0_TX_FAULT 5
+#define V_SFP0_TX_FAULT(x) ((x) << S_SFP0_TX_FAULT)
+#define F_SFP0_TX_FAULT V_SFP0_TX_FAULT(1U)
+
+#define S_SFP0_MOD_PRES 4
+#define V_SFP0_MOD_PRES(x) ((x) << S_SFP0_MOD_PRES)
+#define F_SFP0_MOD_PRES V_SFP0_MOD_PRES(1U)
+
+#define S_QSFP1_INT_L 3
+#define V_QSFP1_INT_L(x) ((x) << S_QSFP1_INT_L)
+#define F_QSFP1_INT_L V_QSFP1_INT_L(1U)
+
+#define S_QSFP1_MOD_PRES 2
+#define V_QSFP1_MOD_PRES(x) ((x) << S_QSFP1_MOD_PRES)
+#define F_QSFP1_MOD_PRES V_QSFP1_MOD_PRES(1U)
+
+#define S_QSFP0_INT_L 1
+#define V_QSFP0_INT_L(x) ((x) << S_QSFP0_INT_L)
+#define F_QSFP0_INT_L V_QSFP0_INT_L(1U)
+
+#define S_QSFP0_MOD_PRES 0
+#define V_QSFP0_MOD_PRES(x) ((x) << S_QSFP0_MOD_PRES)
+#define F_QSFP0_MOD_PRES V_QSFP0_MOD_PRES(1U)
+
+#define A_MAC_FPGA_CONTROL_TO_BOARD 0x381f8
+
+#define S_T7_1_LB_MODE 10
+#define M_T7_1_LB_MODE 0x3U
+#define V_T7_1_LB_MODE(x) ((x) << S_T7_1_LB_MODE)
+#define G_T7_1_LB_MODE(x) (((x) >> S_T7_1_LB_MODE) & M_T7_1_LB_MODE)
+
+#define S_SFP3_TX_DISABLE 9
+#define V_SFP3_TX_DISABLE(x) ((x) << S_SFP3_TX_DISABLE)
+#define F_SFP3_TX_DISABLE V_SFP3_TX_DISABLE(1U)
+
+#define S_SFP2_TX_DISABLE 8
+#define V_SFP2_TX_DISABLE(x) ((x) << S_SFP2_TX_DISABLE)
+#define F_SFP2_TX_DISABLE V_SFP2_TX_DISABLE(1U)
+
+#define S_SFP1_TX_DISABLE 7
+#define V_SFP1_TX_DISABLE(x) ((x) << S_SFP1_TX_DISABLE)
+#define F_SFP1_TX_DISABLE V_SFP1_TX_DISABLE(1U)
+
+#define S_SFP0_TX_DISABLE 6
+#define V_SFP0_TX_DISABLE(x) ((x) << S_SFP0_TX_DISABLE)
+#define F_SFP0_TX_DISABLE V_SFP0_TX_DISABLE(1U)
+
+#define S_QSFP1_LPMODE 5
+#define V_QSFP1_LPMODE(x) ((x) << S_QSFP1_LPMODE)
+#define F_QSFP1_LPMODE V_QSFP1_LPMODE(1U)
+
+#define S_QSFP1_MODSEL_L 4
+#define V_QSFP1_MODSEL_L(x) ((x) << S_QSFP1_MODSEL_L)
+#define F_QSFP1_MODSEL_L V_QSFP1_MODSEL_L(1U)
+
+#define S_QSFP1_RESET_L 3
+#define V_QSFP1_RESET_L(x) ((x) << S_QSFP1_RESET_L)
+#define F_QSFP1_RESET_L V_QSFP1_RESET_L(1U)
+
+#define S_QSFP0_LPMODE 2
+#define V_QSFP0_LPMODE(x) ((x) << S_QSFP0_LPMODE)
+#define F_QSFP0_LPMODE V_QSFP0_LPMODE(1U)
+
+#define S_QSFP0_MODSEL_L 1
+#define V_QSFP0_MODSEL_L(x) ((x) << S_QSFP0_MODSEL_L)
+#define F_QSFP0_MODSEL_L V_QSFP0_MODSEL_L(1U)
+
+#define S_QSFP0_RESET_L 0
+#define V_QSFP0_RESET_L(x) ((x) << S_QSFP0_RESET_L)
+#define F_QSFP0_RESET_L V_QSFP0_RESET_L(1U)
+
+#define A_MAC_FPGA_LINK_STATUS 0x381fc
+
+#define S_PORT3_FPGA_LINK_UP 3
+#define V_PORT3_FPGA_LINK_UP(x) ((x) << S_PORT3_FPGA_LINK_UP)
+#define F_PORT3_FPGA_LINK_UP V_PORT3_FPGA_LINK_UP(1U)
+
+#define S_PORT2_FPGA_LINK_UP 2
+#define V_PORT2_FPGA_LINK_UP(x) ((x) << S_PORT2_FPGA_LINK_UP)
+#define F_PORT2_FPGA_LINK_UP V_PORT2_FPGA_LINK_UP(1U)
+
+#define S_PORT1_FPGA_LINK_UP 1
+#define V_PORT1_FPGA_LINK_UP(x) ((x) << S_PORT1_FPGA_LINK_UP)
+#define F_PORT1_FPGA_LINK_UP V_PORT1_FPGA_LINK_UP(1U)
+
+#define S_PORT0_FPGA_LINK_UP 0
+#define V_PORT0_FPGA_LINK_UP(x) ((x) << S_PORT0_FPGA_LINK_UP)
+#define F_PORT0_FPGA_LINK_UP V_PORT0_FPGA_LINK_UP(1U)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_REVISION 0x38200
+
+#define S_MTIP_REV_400G_0 0
+#define M_MTIP_REV_400G_0 0xffU
+#define V_MTIP_REV_400G_0(x) ((x) << S_MTIP_REV_400G_0)
+#define G_MTIP_REV_400G_0(x) (((x) >> S_MTIP_REV_400G_0) & M_MTIP_REV_400G_0)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_SCRATCH 0x38204
+#define A_MAC_MTIP_MAC400G_0_MTIP_COMMAND_CONFIG 0x38208
+
+#define S_INV_LOOP 31
+#define V_INV_LOOP(x) ((x) << S_INV_LOOP)
+#define F_INV_LOOP V_INV_LOOP(1U)
+
+#define S_TX_FLUSH_ENABLE_400G_0 22
+#define V_TX_FLUSH_ENABLE_400G_0(x) ((x) << S_TX_FLUSH_ENABLE_400G_0)
+#define F_TX_FLUSH_ENABLE_400G_0 V_TX_FLUSH_ENABLE_400G_0(1U)
+
+#define S_PHY_LOOPBACK_EN_400G 10
+#define V_PHY_LOOPBACK_EN_400G(x) ((x) << S_PHY_LOOPBACK_EN_400G)
+#define F_PHY_LOOPBACK_EN_400G V_PHY_LOOPBACK_EN_400G(1U)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_ADDR_0 0x3820c
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_ADDR_1 0x38210
+#define A_MAC_MTIP_MAC400G_0_MTIP_FRM_LENGTH 0x38214
+#define A_MAC_MTIP_MAC400G_0_MTIP_RX_FIFO_SECTIONS 0x3821c
+#define A_MAC_MTIP_MAC400G_0_MTIP_TX_FIFO_SECTIONS 0x38220
+#define A_MAC_MTIP_MAC400G_0_MTIP_RX_FIFO_ALMOST_F_E 0x38224
+#define A_MAC_MTIP_MAC400G_0_MTIP_TX_FIFO_ALMOST_F_E 0x38228
+#define A_MAC_MTIP_MAC400G_0_MTIP_HASHTABLE_LOAD 0x3822c
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_STATUS 0x38240
+#define A_MAC_MTIP_MAC400G_0_MTIP_TX_IPG_LENGTH 0x38244
+
+#define S_T7_IPG 19
+#define M_T7_IPG 0x1fffU
+#define V_T7_IPG(x) ((x) << S_T7_IPG)
+#define G_T7_IPG(x) (((x) >> S_T7_IPG) & M_T7_IPG)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL01_PAUSE_QUANTA 0x38254
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL23_PAUSE_QUANTA 0x38258
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL45_PAUSE_QUANTA 0x3825c
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL67_PAUSE_QUANTA 0x38260
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL01_PAUSE_QUANTA_THRESH 0x38264
+
+#define S_CL1_PAUSE_QUANTA_THRESH 16
+#define M_CL1_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL1_PAUSE_QUANTA_THRESH(x) ((x) << S_CL1_PAUSE_QUANTA_THRESH)
+#define G_CL1_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL1_PAUSE_QUANTA_THRESH) & M_CL1_PAUSE_QUANTA_THRESH)
+
+#define S_CL0_PAUSE_QUANTA_THRESH 0
+#define M_CL0_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL0_PAUSE_QUANTA_THRESH(x) ((x) << S_CL0_PAUSE_QUANTA_THRESH)
+#define G_CL0_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL0_PAUSE_QUANTA_THRESH) & M_CL0_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL23_PAUSE_QUANTA_THRESH 0x38268
+
+#define S_CL3_PAUSE_QUANTA_THRESH 16
+#define M_CL3_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL3_PAUSE_QUANTA_THRESH(x) ((x) << S_CL3_PAUSE_QUANTA_THRESH)
+#define G_CL3_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL3_PAUSE_QUANTA_THRESH) & M_CL3_PAUSE_QUANTA_THRESH)
+
+#define S_CL2_PAUSE_QUANTA_THRESH 0
+#define M_CL2_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL2_PAUSE_QUANTA_THRESH(x) ((x) << S_CL2_PAUSE_QUANTA_THRESH)
+#define G_CL2_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL2_PAUSE_QUANTA_THRESH) & M_CL2_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL45_PAUSE_QUANTA_THRESH 0x3826c
+
+#define S_CL5_PAUSE_QUANTA_THRESH 16
+#define M_CL5_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL5_PAUSE_QUANTA_THRESH(x) ((x) << S_CL5_PAUSE_QUANTA_THRESH)
+#define G_CL5_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL5_PAUSE_QUANTA_THRESH) & M_CL5_PAUSE_QUANTA_THRESH)
+
+#define S_CL4_PAUSE_QUANTA_THRESH 0
+#define M_CL4_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL4_PAUSE_QUANTA_THRESH(x) ((x) << S_CL4_PAUSE_QUANTA_THRESH)
+#define G_CL4_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL4_PAUSE_QUANTA_THRESH) & M_CL4_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_MAC_CL67_PAUSE_QUANTA_THRESH 0x38270
+
+#define S_CL7_PAUSE_QUANTA_THRESH 16
+#define M_CL7_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL7_PAUSE_QUANTA_THRESH(x) ((x) << S_CL7_PAUSE_QUANTA_THRESH)
+#define G_CL7_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL7_PAUSE_QUANTA_THRESH) & M_CL7_PAUSE_QUANTA_THRESH)
+
+#define S_CL6_PAUSE_QUANTA_THRESH 0
+#define M_CL6_PAUSE_QUANTA_THRESH 0xffffU
+#define V_CL6_PAUSE_QUANTA_THRESH(x) ((x) << S_CL6_PAUSE_QUANTA_THRESH)
+#define G_CL6_PAUSE_QUANTA_THRESH(x) (((x) >> S_CL6_PAUSE_QUANTA_THRESH) & M_CL6_PAUSE_QUANTA_THRESH)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_RX_PAUSE_STATUS 0x38274
+
+#define S_RX_PAUSE_STATUS 0
+#define M_RX_PAUSE_STATUS 0xffU
+#define V_RX_PAUSE_STATUS(x) ((x) << S_RX_PAUSE_STATUS)
+#define G_RX_PAUSE_STATUS(x) (((x) >> S_RX_PAUSE_STATUS) & M_RX_PAUSE_STATUS)
+
+#define A_MAC_MTIP_MAC400G_0_MTIP_TS_TIMESTAMP 0x3827c
+#define A_MAC_MTIP_MAC400G_0_MTIP_XIF_MODE 0x38280
+#define A_MAC_MTIP_MAC400G_1_MTIP_REVISION 0x38300
+
+#define S_MTIP_REV_400G_1 0
+#define M_MTIP_REV_400G_1 0xffU
+#define V_MTIP_REV_400G_1(x) ((x) << S_MTIP_REV_400G_1)
+#define G_MTIP_REV_400G_1(x) (((x) >> S_MTIP_REV_400G_1) & M_MTIP_REV_400G_1)
+
+#define A_MAC_MTIP_MAC400G_1_MTIP_SCRATCH 0x38304
+#define A_MAC_MTIP_MAC400G_1_MTIP_COMMAND_CONFIG 0x38308
+
+#define S_TX_FLUSH_ENABLE_400G_1 22
+#define V_TX_FLUSH_ENABLE_400G_1(x) ((x) << S_TX_FLUSH_ENABLE_400G_1)
+#define F_TX_FLUSH_ENABLE_400G_1 V_TX_FLUSH_ENABLE_400G_1(1U)
+
+#define S_PHY_LOOPBACK_EN_400G_1 10
+#define V_PHY_LOOPBACK_EN_400G_1(x) ((x) << S_PHY_LOOPBACK_EN_400G_1)
+#define F_PHY_LOOPBACK_EN_400G_1 V_PHY_LOOPBACK_EN_400G_1(1U)
+
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_ADDR_0 0x3830c
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_ADDR_1 0x38310
+#define A_MAC_MTIP_MAC400G_1_MTIP_FRM_LENGTH 0x38314
+#define A_MAC_MTIP_MAC400G_1_MTIP_RX_FIFO_SECTIONS 0x3831c
+#define A_MAC_MTIP_MAC400G_1_MTIP_TX_FIFO_SECTIONS 0x38320
+#define A_MAC_MTIP_MAC400G_1_MTIP_RX_FIFO_ALMOST_F_E 0x38324
+#define A_MAC_MTIP_MAC400G_1_MTIP_TX_FIFO_ALMOST_F_E 0x38328
+#define A_MAC_MTIP_MAC400G_1_MTIP_HASHTABLE_LOAD 0x3832c
+
+#define S_ENABLE_MCAST_RX_400G_1 8
+#define V_ENABLE_MCAST_RX_400G_1(x) ((x) << S_ENABLE_MCAST_RX_400G_1)
+#define F_ENABLE_MCAST_RX_400G_1 V_ENABLE_MCAST_RX_400G_1(1U)
+
+#define S_HASHTABLE_ADDR_400G_1 0
+#define M_HASHTABLE_ADDR_400G_1 0x3fU
+#define V_HASHTABLE_ADDR_400G_1(x) ((x) << S_HASHTABLE_ADDR_400G_1)
+#define G_HASHTABLE_ADDR_400G_1(x) (((x) >> S_HASHTABLE_ADDR_400G_1) & M_HASHTABLE_ADDR_400G_1)
+
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_STATUS 0x38340
+#define A_MAC_MTIP_MAC400G_1_MTIP_TX_IPG_LENGTH 0x38344
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL01_PAUSE_QUANTA 0x38354
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL23_PAUSE_QUANTA 0x38358
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL45_PAUSE_QUANTA 0x3835c
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL67_PAUSE_QUANTA 0x38360
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL01_PAUSE_QUANTA_THRESH 0x38364
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL23_PAUSE_QUANTA_THRESH 0x38368
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL45_PAUSE_QUANTA_THRESH 0x3836c
+#define A_MAC_MTIP_MAC400G_1_MTIP_MAC_CL67_PAUSE_QUANTA_THRESH 0x38370
+#define A_MAC_MTIP_MAC400G_1_MTIP_RX_PAUSE_STATUS 0x38374
+#define A_MAC_MTIP_MAC400G_1_MTIP_TS_TIMESTAMP 0x3837c
+#define A_MAC_MTIP_MAC400G_1_MTIP_XIF_MODE 0x38380
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CONTROL_1 0x38400
+
+#define S_T7_SPEED_SELECTION 2
+#define V_T7_SPEED_SELECTION(x) ((x) << S_T7_SPEED_SELECTION)
+#define F_T7_SPEED_SELECTION V_T7_SPEED_SELECTION(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_STATUS_1 0x38404
+
+#define S_400G_RX_LINK_STATUS 2
+#define V_400G_RX_LINK_STATUS(x) ((x) << S_400G_RX_LINK_STATUS)
+#define F_400G_RX_LINK_STATUS V_400G_RX_LINK_STATUS(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICE_ID0 0x38408
+
+#define S_400G_DEVICE_ID0_0 0
+#define M_400G_DEVICE_ID0_0 0xffffU
+#define V_400G_DEVICE_ID0_0(x) ((x) << S_400G_DEVICE_ID0_0)
+#define G_400G_DEVICE_ID0_0(x) (((x) >> S_400G_DEVICE_ID0_0) & M_400G_DEVICE_ID0_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICE_ID1 0x3840c
+
+#define S_400G_DEVICE_ID1_0 0
+#define M_400G_DEVICE_ID1_0 0xffffU
+#define V_400G_DEVICE_ID1_0(x) ((x) << S_400G_DEVICE_ID1_0)
+#define G_400G_DEVICE_ID1_0(x) (((x) >> S_400G_DEVICE_ID1_0) & M_400G_DEVICE_ID1_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_SPEED_ABILITY 0x38410
+
+#define S_400G_CAPABLE_0 9
+#define V_400G_CAPABLE_0(x) ((x) << S_400G_CAPABLE_0)
+#define F_400G_CAPABLE_0 V_400G_CAPABLE_0(1U)
+
+#define S_200G_CAPABLE_0 8
+#define V_200G_CAPABLE_0(x) ((x) << S_200G_CAPABLE_0)
+#define F_200G_CAPABLE_0 V_200G_CAPABLE_0(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICES_IN_PKG1 0x38414
+
+#define S_DEVICE_PACKAGE 3
+#define V_DEVICE_PACKAGE(x) ((x) << S_DEVICE_PACKAGE)
+#define F_DEVICE_PACKAGE V_DEVICE_PACKAGE(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DEVICES_IN_PKG2 0x38418
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CONTROL_2 0x3841c
+
+#define S_400G_PCS_TYPE_SELECTION_0 0
+#define M_400G_PCS_TYPE_SELECTION_0 0xfU
+#define V_400G_PCS_TYPE_SELECTION_0(x) ((x) << S_400G_PCS_TYPE_SELECTION_0)
+#define G_400G_PCS_TYPE_SELECTION_0(x) (((x) >> S_400G_PCS_TYPE_SELECTION_0) & M_400G_PCS_TYPE_SELECTION_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_STATUS_2 0x38420
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_STATUS_3 0x38424
+
+#define S_T7_DEVICE_PRESENT 2
+#define M_T7_DEVICE_PRESENT 0x3fffU
+#define V_T7_DEVICE_PRESENT(x) ((x) << S_T7_DEVICE_PRESENT)
+#define G_T7_DEVICE_PRESENT(x) (((x) >> S_T7_DEVICE_PRESENT) & M_T7_DEVICE_PRESENT)
+
+#define S_400GBASE_R 1
+#define V_400GBASE_R(x) ((x) << S_400GBASE_R)
+#define F_400GBASE_R V_400GBASE_R(1U)
+
+#define S_200GBASE_R 0
+#define V_200GBASE_R(x) ((x) << S_200GBASE_R)
+#define F_200GBASE_R V_200GBASE_R(1U)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_PKG_ID0 0x38438
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_PKG_ID1 0x3843c
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_STATUS_1 0x38480
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_STATUS_2 0x38484
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_TEST_CONTROL 0x384a8
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BASE_R_TEST_ERR_CNT 0x384ac
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_BER_HIGH_ORDER_CNT 0x384b0
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_ERR_BLK_HIGH_ORDER_CNT 0x384b4
+
+#define S_HIGH_ORDER 15
+#define V_HIGH_ORDER(x) ((x) << S_HIGH_ORDER)
+#define F_HIGH_ORDER V_HIGH_ORDER(1U)
+
+#define S_ERROR_BLOCK_COUNTER 0
+#define M_ERROR_BLOCK_COUNTER 0x3fffU
+#define V_ERROR_BLOCK_COUNTER(x) ((x) << S_ERROR_BLOCK_COUNTER)
+#define G_ERROR_BLOCK_COUNTER(x) (((x) >> S_ERROR_BLOCK_COUNTER) & M_ERROR_BLOCK_COUNTER)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_1 0x384c8
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_2 0x384cc
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_3 0x384d0
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_MULTI_LANE_ALIGN_STATUS_4 0x384d4
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_0_MAPPING 0x384d8
+
+#define S_T7_LANE_0_MAPPING 0
+#define M_T7_LANE_0_MAPPING 0xfU
+#define V_T7_LANE_0_MAPPING(x) ((x) << S_T7_LANE_0_MAPPING)
+#define G_T7_LANE_0_MAPPING(x) (((x) >> S_T7_LANE_0_MAPPING) & M_T7_LANE_0_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_1_MAPPING 0x384dc
+
+#define S_T7_LANE_1_MAPPING 0
+#define M_T7_LANE_1_MAPPING 0xfU
+#define V_T7_LANE_1_MAPPING(x) ((x) << S_T7_LANE_1_MAPPING)
+#define G_T7_LANE_1_MAPPING(x) (((x) >> S_T7_LANE_1_MAPPING) & M_T7_LANE_1_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_2_MAPPING 0x384e0
+
+#define S_T7_LANE_2_MAPPING 0
+#define M_T7_LANE_2_MAPPING 0xfU
+#define V_T7_LANE_2_MAPPING(x) ((x) << S_T7_LANE_2_MAPPING)
+#define G_T7_LANE_2_MAPPING(x) (((x) >> S_T7_LANE_2_MAPPING) & M_T7_LANE_2_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_3_MAPPING 0x384e4
+
+#define S_T7_LANE_3_MAPPING 0
+#define M_T7_LANE_3_MAPPING 0xfU
+#define V_T7_LANE_3_MAPPING(x) ((x) << S_T7_LANE_3_MAPPING)
+#define G_T7_LANE_3_MAPPING(x) (((x) >> S_T7_LANE_3_MAPPING) & M_T7_LANE_3_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_4_MAPPING 0x384e8
+
+#define S_T7_LANE_4_MAPPING 0
+#define M_T7_LANE_4_MAPPING 0xfU
+#define V_T7_LANE_4_MAPPING(x) ((x) << S_T7_LANE_4_MAPPING)
+#define G_T7_LANE_4_MAPPING(x) (((x) >> S_T7_LANE_4_MAPPING) & M_T7_LANE_4_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_5_MAPPING 0x384ec
+
+#define S_T7_LANE_5_MAPPING 0
+#define M_T7_LANE_5_MAPPING 0xfU
+#define V_T7_LANE_5_MAPPING(x) ((x) << S_T7_LANE_5_MAPPING)
+#define G_T7_LANE_5_MAPPING(x) (((x) >> S_T7_LANE_5_MAPPING) & M_T7_LANE_5_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_6_MAPPING 0x384f0
+
+#define S_T7_LANE_6_MAPPING 0
+#define M_T7_LANE_6_MAPPING 0xfU
+#define V_T7_LANE_6_MAPPING(x) ((x) << S_T7_LANE_6_MAPPING)
+#define G_T7_LANE_6_MAPPING(x) (((x) >> S_T7_LANE_6_MAPPING) & M_T7_LANE_6_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_7_MAPPING 0x384f4
+
+#define S_T7_LANE_7_MAPPING 0
+#define M_T7_LANE_7_MAPPING 0xfU
+#define V_T7_LANE_7_MAPPING(x) ((x) << S_T7_LANE_7_MAPPING)
+#define G_T7_LANE_7_MAPPING(x) (((x) >> S_T7_LANE_7_MAPPING) & M_T7_LANE_7_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_8_MAPPING 0x384f8
+
+#define S_T7_LANE_8_MAPPING 0
+#define M_T7_LANE_8_MAPPING 0xfU
+#define V_T7_LANE_8_MAPPING(x) ((x) << S_T7_LANE_8_MAPPING)
+#define G_T7_LANE_8_MAPPING(x) (((x) >> S_T7_LANE_8_MAPPING) & M_T7_LANE_8_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_9_MAPPING 0x384fc
+
+#define S_T7_LANE_9_MAPPING 0
+#define M_T7_LANE_9_MAPPING 0xfU
+#define V_T7_LANE_9_MAPPING(x) ((x) << S_T7_LANE_9_MAPPING)
+#define G_T7_LANE_9_MAPPING(x) (((x) >> S_T7_LANE_9_MAPPING) & M_T7_LANE_9_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_10_MAPPING 0x38500
+
+#define S_T7_LANE_10_MAPPING 0
+#define M_T7_LANE_10_MAPPING 0xfU
+#define V_T7_LANE_10_MAPPING(x) ((x) << S_T7_LANE_10_MAPPING)
+#define G_T7_LANE_10_MAPPING(x) (((x) >> S_T7_LANE_10_MAPPING) & M_T7_LANE_10_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_11_MAPPING 0x38504
+
+#define S_T7_LANE_11_MAPPING 0
+#define M_T7_LANE_11_MAPPING 0xfU
+#define V_T7_LANE_11_MAPPING(x) ((x) << S_T7_LANE_11_MAPPING)
+#define G_T7_LANE_11_MAPPING(x) (((x) >> S_T7_LANE_11_MAPPING) & M_T7_LANE_11_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_12_MAPPING 0x38508
+
+#define S_T7_LANE_12_MAPPING 0
+#define M_T7_LANE_12_MAPPING 0xfU
+#define V_T7_LANE_12_MAPPING(x) ((x) << S_T7_LANE_12_MAPPING)
+#define G_T7_LANE_12_MAPPING(x) (((x) >> S_T7_LANE_12_MAPPING) & M_T7_LANE_12_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_13_MAPPING 0x3850c
+
+#define S_T7_LANE_13_MAPPING 0
+#define M_T7_LANE_13_MAPPING 0xfU
+#define V_T7_LANE_13_MAPPING(x) ((x) << S_T7_LANE_13_MAPPING)
+#define G_T7_LANE_13_MAPPING(x) (((x) >> S_T7_LANE_13_MAPPING) & M_T7_LANE_13_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_14_MAPPING 0x38510
+
+#define S_T7_LANE_14_MAPPING 0
+#define M_T7_LANE_14_MAPPING 0xfU
+#define V_T7_LANE_14_MAPPING(x) ((x) << S_T7_LANE_14_MAPPING)
+#define G_T7_LANE_14_MAPPING(x) (((x) >> S_T7_LANE_14_MAPPING) & M_T7_LANE_14_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_LANE_15_MAPPING 0x38514
+
+#define S_T7_LANE_15_MAPPING 0
+#define M_T7_LANE_15_MAPPING 0xfU
+#define V_T7_LANE_15_MAPPING(x) ((x) << S_T7_LANE_15_MAPPING)
+#define G_T7_LANE_15_MAPPING(x) (((x) >> S_T7_LANE_15_MAPPING) & M_T7_LANE_15_MAPPING)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_SCRATCH 0x38600
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CORE_REVISION 0x38604
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_CL_INTVL 0x38608
+
+#define S_T7_VL_INTVL 0
+#define M_T7_VL_INTVL 0xffffU
+#define V_T7_VL_INTVL(x) ((x) << S_T7_VL_INTVL)
+#define G_T7_VL_INTVL(x) (((x) >> S_T7_VL_INTVL) & M_T7_VL_INTVL)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_TX_LANE_THRESH 0x3860c
+
+#define S_TX_LANE_THRESH 0
+#define M_TX_LANE_THRESH 0xfU
+#define V_TX_LANE_THRESH(x) ((x) << S_TX_LANE_THRESH)
+#define G_TX_LANE_THRESH(x) (((x) >> S_TX_LANE_THRESH) & M_TX_LANE_THRESH)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_TX_CDMII_PACE 0x3861c
+
+#define S_TX_CDMII_PACE 0
+#define M_TX_CDMII_PACE 0xfU
+#define V_TX_CDMII_PACE(x) ((x) << S_TX_CDMII_PACE)
+#define G_TX_CDMII_PACE(x) (((x) >> S_TX_CDMII_PACE) & M_TX_CDMII_PACE)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_AM_0 0x38620
+
+#define S_AM_0 0
+#define M_AM_0 0xffffU
+#define V_AM_0(x) ((x) << S_AM_0)
+#define G_AM_0(x) (((x) >> S_AM_0) & M_AM_0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_AM_1 0x38624
+
+#define S_AM_1 0
+#define M_AM_1 0xffffU
+#define V_AM_1(x) ((x) << S_AM_1)
+#define G_AM_1(x) (((x) >> S_AM_1) & M_AM_1)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO0 0x38800
+
+#define S_DBGINFO0 0
+#define M_DBGINFO0 0xffffU
+#define V_DBGINFO0(x) ((x) << S_DBGINFO0)
+#define G_DBGINFO0(x) (((x) >> S_DBGINFO0) & M_DBGINFO0)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO1 0x38804
+
+#define S_DBGINFO1 0
+#define M_DBGINFO1 0xffffU
+#define V_DBGINFO1(x) ((x) << S_DBGINFO1)
+#define G_DBGINFO1(x) (((x) >> S_DBGINFO1) & M_DBGINFO1)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO2 0x38808
+
+#define S_DBGINFO2 0
+#define M_DBGINFO2 0xffffU
+#define V_DBGINFO2(x) ((x) << S_DBGINFO2)
+#define G_DBGINFO2(x) (((x) >> S_DBGINFO2) & M_DBGINFO2)
+
+#define A_MAC_MTIP_PCS400G_0_MTIP_400G_DBGINFO3 0x3880c
+
+#define S_DBGINFO3 0
+#define M_DBGINFO3 0xffffU
+#define V_DBGINFO3(x) ((x) << S_DBGINFO3)
+#define G_DBGINFO3(x) (((x) >> S_DBGINFO3) & M_DBGINFO3)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CONTROL_1 0x38900
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_STATUS_1 0x38904
+
+#define S_400G_RX_LINK_STATUS_1 2
+#define V_400G_RX_LINK_STATUS_1(x) ((x) << S_400G_RX_LINK_STATUS_1)
+#define F_400G_RX_LINK_STATUS_1 V_400G_RX_LINK_STATUS_1(1U)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICE_ID0 0x38908
+
+#define S_400G_DEVICE_ID0_1 0
+#define M_400G_DEVICE_ID0_1 0xffffU
+#define V_400G_DEVICE_ID0_1(x) ((x) << S_400G_DEVICE_ID0_1)
+#define G_400G_DEVICE_ID0_1(x) (((x) >> S_400G_DEVICE_ID0_1) & M_400G_DEVICE_ID0_1)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICE_ID1 0x3890c
+
+#define S_400G_DEVICE_ID1_1 0
+#define M_400G_DEVICE_ID1_1 0xffffU
+#define V_400G_DEVICE_ID1_1(x) ((x) << S_400G_DEVICE_ID1_1)
+#define G_400G_DEVICE_ID1_1(x) (((x) >> S_400G_DEVICE_ID1_1) & M_400G_DEVICE_ID1_1)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_SPEED_ABILITY 0x38910
+
+#define S_400G_CAPABLE_1 9
+#define V_400G_CAPABLE_1(x) ((x) << S_400G_CAPABLE_1)
+#define F_400G_CAPABLE_1 V_400G_CAPABLE_1(1U)
+
+#define S_200G_CAPABLE_1 8
+#define V_200G_CAPABLE_1(x) ((x) << S_200G_CAPABLE_1)
+#define F_200G_CAPABLE_1 V_200G_CAPABLE_1(1U)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICES_IN_PKG1 0x38914
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DEVICES_IN_PKG2 0x38918
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CONTROL_2 0x3891c
+
+#define S_400G_PCS_TYPE_SELECTION_1 0
+#define M_400G_PCS_TYPE_SELECTION_1 0xfU
+#define V_400G_PCS_TYPE_SELECTION_1(x) ((x) << S_400G_PCS_TYPE_SELECTION_1)
+#define G_400G_PCS_TYPE_SELECTION_1(x) (((x) >> S_400G_PCS_TYPE_SELECTION_1) & M_400G_PCS_TYPE_SELECTION_1)
+
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_STATUS_2 0x38920
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_STATUS_3 0x38924
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_PKG_ID0 0x38938
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_PKG_ID1 0x3893c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_STATUS_1 0x38980
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_STATUS_2 0x38984
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_TEST_CONTROL 0x389a8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BASE_R_TEST_ERR_CNT 0x389ac
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_BER_HIGH_ORDER_CNT 0x389b0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_ERR_BLK_HIGH_ORDER_CNT 0x389b4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_1 0x389c8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_2 0x389cc
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_3 0x389d0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_MULTI_LANE_ALIGN_STATUS_4 0x389d4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_0_MAPPING 0x389d8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_1_MAPPING 0x389dc
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_2_MAPPING 0x389e0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_3_MAPPING 0x389e4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_4_MAPPING 0x389e8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_5_MAPPING 0x389ec
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_6_MAPPING 0x389f0
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_7_MAPPING 0x389f4
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_8_MAPPING 0x389f8
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_9_MAPPING 0x389fc
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_10_MAPPING 0x38a00
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_11_MAPPING 0x38a04
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_12_MAPPING 0x38a08
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_13_MAPPING 0x38a0c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_14_MAPPING 0x38a10
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_LANE_15_MAPPING 0x38a14
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_SCRATCH 0x38b00
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CORE_REVISION 0x38b04
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_CL_INTVL 0x38b08
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_TX_LANE_THRESH 0x38b0c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_TX_CDMII_PACE 0x38b1c
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_AM_0 0x38b20
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_AM_1 0x38b24
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO0 0x38d00
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO1 0x38d04
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO2 0x38d08
+#define A_MAC_MTIP_PCS400G_1_MTIP_400G_DBGINFO3 0x38d0c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_0 0x38e00
+
+#define S_TC_PAD_ALTER 10
+#define V_TC_PAD_ALTER(x) ((x) << S_TC_PAD_ALTER)
+#define F_TC_PAD_ALTER V_TC_PAD_ALTER(1U)
+
+#define S_TC_PAD_VALUE 9
+#define V_TC_PAD_VALUE(x) ((x) << S_TC_PAD_VALUE)
+#define F_TC_PAD_VALUE V_TC_PAD_VALUE(1U)
+
+#define S_KP_ENABLE 8
+#define V_KP_ENABLE(x) ((x) << S_KP_ENABLE)
+#define F_KP_ENABLE V_KP_ENABLE(1U)
+
+#define S_AM16_COPY_DIS 3
+#define V_AM16_COPY_DIS(x) ((x) << S_AM16_COPY_DIS)
+#define F_AM16_COPY_DIS V_AM16_COPY_DIS(1U)
+
+#define S_RS_FEC_DEGRADE_OPTION_ENA 2
+#define V_RS_FEC_DEGRADE_OPTION_ENA(x) ((x) << S_RS_FEC_DEGRADE_OPTION_ENA)
+#define F_RS_FEC_DEGRADE_OPTION_ENA V_RS_FEC_DEGRADE_OPTION_ENA(1U)
+
+#define A_MAC_MTIP_RS_FEC_STATUS_0_0 0x38e04
+
+#define S_FEC_STATUS_0_14 14
+#define V_FEC_STATUS_0_14(x) ((x) << S_FEC_STATUS_0_14)
+#define F_FEC_STATUS_0_14 V_FEC_STATUS_0_14(1U)
+
+#define S_FEC_STATUS_0_11 8
+#define M_FEC_STATUS_0_11 0xfU
+#define V_FEC_STATUS_0_11(x) ((x) << S_FEC_STATUS_0_11)
+#define G_FEC_STATUS_0_11(x) (((x) >> S_FEC_STATUS_0_11) & M_FEC_STATUS_0_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED0_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED0_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED0_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED0_0 V_RS_FEC_DEGRADE_SER_RECEIVED0_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED0_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED0_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED0_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED0_1 V_RS_FEC_DEGRADE_SER_RECEIVED0_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED0_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED0_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED0_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED0_2 V_RS_FEC_DEGRADE_SER_RECEIVED0_2(1U)
+
+#define S_FEC_STATUS_0_4 4
+#define V_FEC_STATUS_0_4(x) ((x) << S_FEC_STATUS_0_4)
+#define F_FEC_STATUS_0_4 V_FEC_STATUS_0_4(1U)
+
+#define S_FEC_STATUS_0_3 3
+#define V_FEC_STATUS_0_3(x) ((x) << S_FEC_STATUS_0_3)
+#define F_FEC_STATUS_0_3 V_FEC_STATUS_0_3(1U)
+
+#define S_FEC_STATUS_0_2 2
+#define V_FEC_STATUS_0_2(x) ((x) << S_FEC_STATUS_0_2)
+#define F_FEC_STATUS_0_2 V_FEC_STATUS_0_2(1U)
+
+#define S_FEC_STATUS_0_1 1
+#define V_FEC_STATUS_0_1(x) ((x) << S_FEC_STATUS_0_1)
+#define F_FEC_STATUS_0_1 V_FEC_STATUS_0_1(1U)
+
+#define S_FEC_STATUS_0_0 0
+#define V_FEC_STATUS_0_0(x) ((x) << S_FEC_STATUS_0_0)
+#define F_FEC_STATUS_0_0 V_FEC_STATUS_0_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_0 0x38e08
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_0 0x38e0c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_0 0x38e10
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_0 0x38e14
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_0 0x38e18
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_0 0x38e1c
+
+#define S_DEC_TRESH 0
+#define M_DEC_TRESH 0x3fU
+#define V_DEC_TRESH(x) ((x) << S_DEC_TRESH)
+#define G_DEC_TRESH(x) (((x) >> S_DEC_TRESH) & M_DEC_TRESH)
+
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_1 0x38e20
+#define A_MAC_MTIP_RS_FEC_STATUS_0_1 0x38e24
+
+#define S_FEC_STATUS_1_14 14
+#define V_FEC_STATUS_1_14(x) ((x) << S_FEC_STATUS_1_14)
+#define F_FEC_STATUS_1_14 V_FEC_STATUS_1_14(1U)
+
+#define S_FEC_STATUS_1_11 8
+#define M_FEC_STATUS_1_11 0xfU
+#define V_FEC_STATUS_1_11(x) ((x) << S_FEC_STATUS_1_11)
+#define G_FEC_STATUS_1_11(x) (((x) >> S_FEC_STATUS_1_11) & M_FEC_STATUS_1_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED1_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED1_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED1_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED1_0 V_RS_FEC_DEGRADE_SER_RECEIVED1_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED1_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED1_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED1_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED1_1 V_RS_FEC_DEGRADE_SER_RECEIVED1_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED1_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED1_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED1_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED1_2 V_RS_FEC_DEGRADE_SER_RECEIVED1_2(1U)
+
+#define S_FEC_STATUS_1_4 4
+#define V_FEC_STATUS_1_4(x) ((x) << S_FEC_STATUS_1_4)
+#define F_FEC_STATUS_1_4 V_FEC_STATUS_1_4(1U)
+
+#define S_FEC_STATUS_1_3 3
+#define V_FEC_STATUS_1_3(x) ((x) << S_FEC_STATUS_1_3)
+#define F_FEC_STATUS_1_3 V_FEC_STATUS_1_3(1U)
+
+#define S_FEC_STATUS_1_2 2
+#define V_FEC_STATUS_1_2(x) ((x) << S_FEC_STATUS_1_2)
+#define F_FEC_STATUS_1_2 V_FEC_STATUS_1_2(1U)
+
+#define S_FEC_STATUS_1_1 1
+#define V_FEC_STATUS_1_1(x) ((x) << S_FEC_STATUS_1_1)
+#define F_FEC_STATUS_1_1 V_FEC_STATUS_1_1(1U)
+
+#define S_FEC_STATUS_1_0 0
+#define V_FEC_STATUS_1_0(x) ((x) << S_FEC_STATUS_1_0)
+#define F_FEC_STATUS_1_0 V_FEC_STATUS_1_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_1 0x38e28
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_1 0x38e2c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_1 0x38e30
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_1 0x38e34
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_1 0x38e38
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_1 0x38e3c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_2 0x38e40
+#define A_MAC_MTIP_RS_FEC_STATUS_0_2 0x38e44
+
+#define S_FEC_STATUS_2_14 14
+#define V_FEC_STATUS_2_14(x) ((x) << S_FEC_STATUS_2_14)
+#define F_FEC_STATUS_2_14 V_FEC_STATUS_2_14(1U)
+
+#define S_FEC_STATUS_2_11 8
+#define M_FEC_STATUS_2_11 0xfU
+#define V_FEC_STATUS_2_11(x) ((x) << S_FEC_STATUS_2_11)
+#define G_FEC_STATUS_2_11(x) (((x) >> S_FEC_STATUS_2_11) & M_FEC_STATUS_2_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED2_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED2_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED2_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED2_0 V_RS_FEC_DEGRADE_SER_RECEIVED2_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED2_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED2_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED2_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED2_1 V_RS_FEC_DEGRADE_SER_RECEIVED2_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED2_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED2_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED2_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED2_2 V_RS_FEC_DEGRADE_SER_RECEIVED2_2(1U)
+
+#define S_FEC_STATUS_2_4 4
+#define V_FEC_STATUS_2_4(x) ((x) << S_FEC_STATUS_2_4)
+#define F_FEC_STATUS_2_4 V_FEC_STATUS_2_4(1U)
+
+#define S_FEC_STATUS_2_3 3
+#define V_FEC_STATUS_2_3(x) ((x) << S_FEC_STATUS_2_3)
+#define F_FEC_STATUS_2_3 V_FEC_STATUS_2_3(1U)
+
+#define S_FEC_STATUS_2_2 2
+#define V_FEC_STATUS_2_2(x) ((x) << S_FEC_STATUS_2_2)
+#define F_FEC_STATUS_2_2 V_FEC_STATUS_2_2(1U)
+
+#define S_FEC_STATUS_2_1 1
+#define V_FEC_STATUS_2_1(x) ((x) << S_FEC_STATUS_2_1)
+#define F_FEC_STATUS_2_1 V_FEC_STATUS_2_1(1U)
+
+#define S_FEC_STATUS_2_0 0
+#define V_FEC_STATUS_2_0(x) ((x) << S_FEC_STATUS_2_0)
+#define F_FEC_STATUS_2_0 V_FEC_STATUS_2_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_2 0x38e48
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_2 0x38e4c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_2 0x38e50
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_2 0x38e54
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_2 0x38e58
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_2 0x38e5c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_3 0x38e60
+#define A_MAC_MTIP_RS_FEC_STATUS_0_3 0x38e64
+
+#define S_FEC_STATUS_3_14 14
+#define V_FEC_STATUS_3_14(x) ((x) << S_FEC_STATUS_3_14)
+#define F_FEC_STATUS_3_14 V_FEC_STATUS_3_14(1U)
+
+#define S_FEC_STATUS_3_11 8
+#define M_FEC_STATUS_3_11 0xfU
+#define V_FEC_STATUS_3_11(x) ((x) << S_FEC_STATUS_3_11)
+#define G_FEC_STATUS_3_11(x) (((x) >> S_FEC_STATUS_3_11) & M_FEC_STATUS_3_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED3_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED3_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED3_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED3_0 V_RS_FEC_DEGRADE_SER_RECEIVED3_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED3_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED3_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED3_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED3_1 V_RS_FEC_DEGRADE_SER_RECEIVED3_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED3_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED3_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED3_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED3_2 V_RS_FEC_DEGRADE_SER_RECEIVED3_2(1U)
+
+#define S_FEC_STATUS_3_4 4
+#define V_FEC_STATUS_3_4(x) ((x) << S_FEC_STATUS_3_4)
+#define F_FEC_STATUS_3_4 V_FEC_STATUS_3_4(1U)
+
+#define S_FEC_STATUS_3_3 3
+#define V_FEC_STATUS_3_3(x) ((x) << S_FEC_STATUS_3_3)
+#define F_FEC_STATUS_3_3 V_FEC_STATUS_3_3(1U)
+
+#define S_FEC_STATUS_3_2 2
+#define V_FEC_STATUS_3_2(x) ((x) << S_FEC_STATUS_3_2)
+#define F_FEC_STATUS_3_2 V_FEC_STATUS_3_2(1U)
+
+#define S_FEC_STATUS_3_1 1
+#define V_FEC_STATUS_3_1(x) ((x) << S_FEC_STATUS_3_1)
+#define F_FEC_STATUS_3_1 V_FEC_STATUS_3_1(1U)
+
+#define S_FEC_STATUS_3_0 0
+#define V_FEC_STATUS_3_0(x) ((x) << S_FEC_STATUS_3_0)
+#define F_FEC_STATUS_3_0 V_FEC_STATUS_3_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_3 0x38e68
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_3 0x38e6c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_3 0x38e70
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_3 0x38e74
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_3 0x38e78
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_3 0x38e7c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_4 0x38e80
+#define A_MAC_MTIP_RS_FEC_STATUS_0_4 0x38e84
+
+#define S_FEC_STATUS_4_14 14
+#define V_FEC_STATUS_4_14(x) ((x) << S_FEC_STATUS_4_14)
+#define F_FEC_STATUS_4_14 V_FEC_STATUS_4_14(1U)
+
+#define S_FEC_STATUS_4_11 8
+#define M_FEC_STATUS_4_11 0xfU
+#define V_FEC_STATUS_4_11(x) ((x) << S_FEC_STATUS_4_11)
+#define G_FEC_STATUS_4_11(x) (((x) >> S_FEC_STATUS_4_11) & M_FEC_STATUS_4_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED4_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED4_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED4_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED4_0 V_RS_FEC_DEGRADE_SER_RECEIVED4_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED4_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED4_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED4_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED4_1 V_RS_FEC_DEGRADE_SER_RECEIVED4_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED4_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED4_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED4_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED4_2 V_RS_FEC_DEGRADE_SER_RECEIVED4_2(1U)
+
+#define S_FEC_STATUS_4_4 4
+#define V_FEC_STATUS_4_4(x) ((x) << S_FEC_STATUS_4_4)
+#define F_FEC_STATUS_4_4 V_FEC_STATUS_4_4(1U)
+
+#define S_FEC_STATUS_4_3 3
+#define V_FEC_STATUS_4_3(x) ((x) << S_FEC_STATUS_4_3)
+#define F_FEC_STATUS_4_3 V_FEC_STATUS_4_3(1U)
+
+#define S_FEC_STATUS_4_2 2
+#define V_FEC_STATUS_4_2(x) ((x) << S_FEC_STATUS_4_2)
+#define F_FEC_STATUS_4_2 V_FEC_STATUS_4_2(1U)
+
+#define S_FEC_STATUS_4_1 1
+#define V_FEC_STATUS_4_1(x) ((x) << S_FEC_STATUS_4_1)
+#define F_FEC_STATUS_4_1 V_FEC_STATUS_4_1(1U)
+
+#define S_FEC_STATUS_4_0 0
+#define V_FEC_STATUS_4_0(x) ((x) << S_FEC_STATUS_4_0)
+#define F_FEC_STATUS_4_0 V_FEC_STATUS_4_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_4 0x38e88
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_4 0x38e8c
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_4 0x38e90
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_4 0x38e94
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_4 0x38e98
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_4 0x38e9c
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_5 0x38ea0
+#define A_MAC_MTIP_RS_FEC_STATUS_0_5 0x38ea4
+
+#define S_FEC_STATUS_5_14 14
+#define V_FEC_STATUS_5_14(x) ((x) << S_FEC_STATUS_5_14)
+#define F_FEC_STATUS_5_14 V_FEC_STATUS_5_14(1U)
+
+#define S_FEC_STATUS_5_11 8
+#define M_FEC_STATUS_5_11 0xfU
+#define V_FEC_STATUS_5_11(x) ((x) << S_FEC_STATUS_5_11)
+#define G_FEC_STATUS_5_11(x) (((x) >> S_FEC_STATUS_5_11) & M_FEC_STATUS_5_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED5_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED5_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED5_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED5_0 V_RS_FEC_DEGRADE_SER_RECEIVED5_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED5_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED5_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED5_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED5_1 V_RS_FEC_DEGRADE_SER_RECEIVED5_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED5_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED5_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED5_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED5_2 V_RS_FEC_DEGRADE_SER_RECEIVED5_2(1U)
+
+#define S_FEC_STATUS_5_4 4
+#define V_FEC_STATUS_5_4(x) ((x) << S_FEC_STATUS_5_4)
+#define F_FEC_STATUS_5_4 V_FEC_STATUS_5_4(1U)
+
+#define S_FEC_STATUS_5_3 3
+#define V_FEC_STATUS_5_3(x) ((x) << S_FEC_STATUS_5_3)
+#define F_FEC_STATUS_5_3 V_FEC_STATUS_5_3(1U)
+
+#define S_FEC_STATUS_5_2 2
+#define V_FEC_STATUS_5_2(x) ((x) << S_FEC_STATUS_5_2)
+#define F_FEC_STATUS_5_2 V_FEC_STATUS_5_2(1U)
+
+#define S_FEC_STATUS_5_1 1
+#define V_FEC_STATUS_5_1(x) ((x) << S_FEC_STATUS_5_1)
+#define F_FEC_STATUS_5_1 V_FEC_STATUS_5_1(1U)
+
+#define S_FEC_STATUS_5_0 0
+#define V_FEC_STATUS_5_0(x) ((x) << S_FEC_STATUS_5_0)
+#define F_FEC_STATUS_5_0 V_FEC_STATUS_5_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_5 0x38ea8
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_5 0x38eac
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_5 0x38eb0
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_5 0x38eb4
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_5 0x38eb8
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_5 0x38ebc
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_6 0x38ec0
+#define A_MAC_MTIP_RS_FEC_STATUS_0_6 0x38ec4
+
+#define S_FEC_STATUS_6_14 14
+#define V_FEC_STATUS_6_14(x) ((x) << S_FEC_STATUS_6_14)
+#define F_FEC_STATUS_6_14 V_FEC_STATUS_6_14(1U)
+
+#define S_FEC_STATUS_6_11 8
+#define M_FEC_STATUS_6_11 0xfU
+#define V_FEC_STATUS_6_11(x) ((x) << S_FEC_STATUS_6_11)
+#define G_FEC_STATUS_6_11(x) (((x) >> S_FEC_STATUS_6_11) & M_FEC_STATUS_6_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED6_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED6_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED6_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED6_0 V_RS_FEC_DEGRADE_SER_RECEIVED6_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED6_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED6_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED6_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED6_1 V_RS_FEC_DEGRADE_SER_RECEIVED6_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED6_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED6_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED6_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED6_2 V_RS_FEC_DEGRADE_SER_RECEIVED6_2(1U)
+
+#define S_FEC_STATUS_6_4 4
+#define V_FEC_STATUS_6_4(x) ((x) << S_FEC_STATUS_6_4)
+#define F_FEC_STATUS_6_4 V_FEC_STATUS_6_4(1U)
+
+#define S_FEC_STATUS_6_3 3
+#define V_FEC_STATUS_6_3(x) ((x) << S_FEC_STATUS_6_3)
+#define F_FEC_STATUS_6_3 V_FEC_STATUS_6_3(1U)
+
+#define S_FEC_STATUS_6_2 2
+#define V_FEC_STATUS_6_2(x) ((x) << S_FEC_STATUS_6_2)
+#define F_FEC_STATUS_6_2 V_FEC_STATUS_6_2(1U)
+
+#define S_FEC_STATUS_6_1 1
+#define V_FEC_STATUS_6_1(x) ((x) << S_FEC_STATUS_6_1)
+#define F_FEC_STATUS_6_1 V_FEC_STATUS_6_1(1U)
+
+#define S_FEC_STATUS_6_0 0
+#define V_FEC_STATUS_6_0(x) ((x) << S_FEC_STATUS_6_0)
+#define F_FEC_STATUS_6_0 V_FEC_STATUS_6_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_6 0x38ec8
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_6 0x38ecc
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_6 0x38ed0
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_6 0x38ed4
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_6 0x38ed8
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_6 0x38edc
+#define A_MAC_MTIP_RS_FEC_CONTROL_0_7 0x38ee0
+#define A_MAC_MTIP_RS_FEC_STATUS_0_7 0x38ee4
+
+#define S_FEC_STATUS_7_14 14
+#define V_FEC_STATUS_7_14(x) ((x) << S_FEC_STATUS_7_14)
+#define F_FEC_STATUS_7_14 V_FEC_STATUS_7_14(1U)
+
+#define S_FEC_STATUS_7_11 8
+#define M_FEC_STATUS_7_11 0xfU
+#define V_FEC_STATUS_7_11(x) ((x) << S_FEC_STATUS_7_11)
+#define G_FEC_STATUS_7_11(x) (((x) >> S_FEC_STATUS_7_11) & M_FEC_STATUS_7_11)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED7_0 7
+#define V_RS_FEC_DEGRADE_SER_RECEIVED7_0(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED7_0)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED7_0 V_RS_FEC_DEGRADE_SER_RECEIVED7_0(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED7_1 6
+#define V_RS_FEC_DEGRADE_SER_RECEIVED7_1(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED7_1)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED7_1 V_RS_FEC_DEGRADE_SER_RECEIVED7_1(1U)
+
+#define S_RS_FEC_DEGRADE_SER_RECEIVED7_2 5
+#define V_RS_FEC_DEGRADE_SER_RECEIVED7_2(x) ((x) << S_RS_FEC_DEGRADE_SER_RECEIVED7_2)
+#define F_RS_FEC_DEGRADE_SER_RECEIVED7_2 V_RS_FEC_DEGRADE_SER_RECEIVED7_2(1U)
+
+#define S_FEC_STATUS_7_4 4
+#define V_FEC_STATUS_7_4(x) ((x) << S_FEC_STATUS_7_4)
+#define F_FEC_STATUS_7_4 V_FEC_STATUS_7_4(1U)
+
+#define S_FEC_STATUS_7_3 3
+#define V_FEC_STATUS_7_3(x) ((x) << S_FEC_STATUS_7_3)
+#define F_FEC_STATUS_7_3 V_FEC_STATUS_7_3(1U)
+
+#define S_FEC_STATUS_7_2 2
+#define V_FEC_STATUS_7_2(x) ((x) << S_FEC_STATUS_7_2)
+#define F_FEC_STATUS_7_2 V_FEC_STATUS_7_2(1U)
+
+#define S_FEC_STATUS_7_1 1
+#define V_FEC_STATUS_7_1(x) ((x) << S_FEC_STATUS_7_1)
+#define F_FEC_STATUS_7_1 V_FEC_STATUS_7_1(1U)
+
+#define S_FEC_STATUS_7_0 0
+#define V_FEC_STATUS_7_0(x) ((x) << S_FEC_STATUS_7_0)
+#define F_FEC_STATUS_7_0 V_FEC_STATUS_7_0(1U)
+
+#define A_MAC_MTIP_RS_FEC_CCW_LO_0_7 0x38ee8
+#define A_MAC_MTIP_RS_FEC_CCW_HI_0_7 0x38eec
+#define A_MAC_MTIP_RS_FEC_NCCW_LO_0_7 0x38ef0
+#define A_MAC_MTIP_RS_FEC_NCCW_HI_0_7 0x38ef4
+#define A_MAC_MTIP_RS_FEC_LANEMAPRS_FEC_0_7 0x38ef8
+#define A_MAC_MTIP_RS_FEC_DEC_THRESH_0_7 0x38efc
+#define A_MAC_MTIP_RS_FEC_HISER_CW 0x38f00
+
+#define S_HISER_CW 0
+#define M_HISER_CW 0xffffU
+#define V_HISER_CW(x) ((x) << S_HISER_CW)
+#define G_HISER_CW(x) (((x) >> S_HISER_CW) & M_HISER_CW)
+
+#define A_MAC_MTIP_RS_FEC_HISER_THRESH 0x38f04
+
+#define S_HISER_THRESH 0
+#define M_HISER_THRESH 0xffffU
+#define V_HISER_THRESH(x) ((x) << S_HISER_THRESH)
+#define G_HISER_THRESH(x) (((x) >> S_HISER_THRESH) & M_HISER_THRESH)
+
+#define A_MAC_MTIP_RS_FEC_HISER_TIME 0x38f08
+
+#define S_HISER_TIME 0
+#define M_HISER_TIME 0xffffU
+#define V_HISER_TIME(x) ((x) << S_HISER_TIME)
+#define G_HISER_TIME(x) (((x) >> S_HISER_TIME) & M_HISER_TIME)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CW 0x38f10
+
+#define S_DEGRADE_SET_CW 0
+#define M_DEGRADE_SET_CW 0xffffU
+#define V_DEGRADE_SET_CW(x) ((x) << S_DEGRADE_SET_CW)
+#define G_DEGRADE_SET_CW(x) (((x) >> S_DEGRADE_SET_CW) & M_DEGRADE_SET_CW)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CW_HI 0x38f14
+
+#define S_DEGRADE_SET_CW_HI 0
+#define M_DEGRADE_SET_CW_HI 0xffffU
+#define V_DEGRADE_SET_CW_HI(x) ((x) << S_DEGRADE_SET_CW_HI)
+#define G_DEGRADE_SET_CW_HI(x) (((x) >> S_DEGRADE_SET_CW_HI) & M_DEGRADE_SET_CW_HI)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_THRESH 0x38f18
+
+#define S_DEGRADE_SET_THRESH 0
+#define M_DEGRADE_SET_THRESH 0xffffU
+#define V_DEGRADE_SET_THRESH(x) ((x) << S_DEGRADE_SET_THRESH)
+#define G_DEGRADE_SET_THRESH(x) (((x) >> S_DEGRADE_SET_THRESH) & M_DEGRADE_SET_THRESH)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_THRESH_HI 0x38f1c
+
+#define S_DEGRADE_SET_THRESH_HI 0
+#define M_DEGRADE_SET_THRESH_HI 0xffffU
+#define V_DEGRADE_SET_THRESH_HI(x) ((x) << S_DEGRADE_SET_THRESH_HI)
+#define G_DEGRADE_SET_THRESH_HI(x) (((x) >> S_DEGRADE_SET_THRESH_HI) & M_DEGRADE_SET_THRESH_HI)
+
+#define A_MAC_MTIP_RS_DEGRADE_CLEAR 0x38f20
+
+#define S_DEGRADE_SET_CLEAR 0
+#define M_DEGRADE_SET_CLEAR 0xffffU
+#define V_DEGRADE_SET_CLEAR(x) ((x) << S_DEGRADE_SET_CLEAR)
+#define G_DEGRADE_SET_CLEAR(x) (((x) >> S_DEGRADE_SET_CLEAR) & M_DEGRADE_SET_CLEAR)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CLEAR_HI 0x38f24
+
+#define S_DEGRADE_SET_CLEAR_HI 0
+#define M_DEGRADE_SET_CLEAR_HI 0xffffU
+#define V_DEGRADE_SET_CLEAR_HI(x) ((x) << S_DEGRADE_SET_CLEAR_HI)
+#define G_DEGRADE_SET_CLEAR_HI(x) (((x) >> S_DEGRADE_SET_CLEAR_HI) & M_DEGRADE_SET_CLEAR_HI)
+
+#define A_MAC_MTIP_RS_DEGRADE_CLEAR_THRESH 0x38f28
+
+#define S_DEGRADE_SET_CLEAR_THRESH 0
+#define M_DEGRADE_SET_CLEAR_THRESH 0xffffU
+#define V_DEGRADE_SET_CLEAR_THRESH(x) ((x) << S_DEGRADE_SET_CLEAR_THRESH)
+#define G_DEGRADE_SET_CLEAR_THRESH(x) (((x) >> S_DEGRADE_SET_CLEAR_THRESH) & M_DEGRADE_SET_CLEAR_THRESH)
+
+#define A_MAC_MTIP_RS_DEGRADE_SET_CLEAR_THRESH_HI 0x38f2c
+
+#define S_DEGRADE_SET_CLEAR_THRESH_HI 0
+#define M_DEGRADE_SET_CLEAR_THRESH_HI 0xffffU
+#define V_DEGRADE_SET_CLEAR_THRESH_HI(x) ((x) << S_DEGRADE_SET_CLEAR_THRESH_HI)
+#define G_DEGRADE_SET_CLEAR_THRESH_HI(x) (((x) >> S_DEGRADE_SET_CLEAR_THRESH_HI) & M_DEGRADE_SET_CLEAR_THRESH_HI)
+
+#define A_MAC_MTIP_RS_VL0_0 0x38f80
+#define A_MAC_MTIP_RS_VL0_1 0x38f84
+#define A_MAC_MTIP_RS_VL1_0 0x38f88
+#define A_MAC_MTIP_RS_VL1_1 0x38f8c
+#define A_MAC_MTIP_RS_VL2_0 0x38f90
+#define A_MAC_MTIP_RS_VL2_1 0x38f94
+#define A_MAC_MTIP_RS_VL3_0 0x38f98
+#define A_MAC_MTIP_RS_VL3_1 0x38f9c
+#define A_MAC_MTIP_RS_VL4_0 0x38fa0
+#define A_MAC_MTIP_RS_VL4_1 0x38fa4
+#define A_MAC_MTIP_RS_VL5_0 0x38fa8
+#define A_MAC_MTIP_RS_VL5_1 0x38fac
+#define A_MAC_MTIP_RS_VL6_0 0x38fb0
+#define A_MAC_MTIP_RS_VL6_1 0x38fb4
+#define A_MAC_MTIP_RS_VL7_0 0x38fb8
+#define A_MAC_MTIP_RS_VL7_1 0x38fbc
+#define A_MAC_MTIP_RS_VL8_0 0x38fc0
+#define A_MAC_MTIP_RS_VL8_1 0x38fc4
+#define A_MAC_MTIP_RS_VL9_0 0x38fc8
+#define A_MAC_MTIP_RS_VL9_1 0x38fcc
+#define A_MAC_MTIP_RS_VL10_0 0x38fd0
+#define A_MAC_MTIP_RS_VL10_1 0x38fd4
+#define A_MAC_MTIP_RS_VL11_0 0x38fd8
+#define A_MAC_MTIP_RS_VL11_1 0x38fdc
+#define A_MAC_MTIP_RS_VL12_0 0x38fe0
+#define A_MAC_MTIP_RS_VL12_1 0x38fe4
+#define A_MAC_MTIP_RS_VL13_0 0x38fe8
+#define A_MAC_MTIP_RS_VL13_1 0x38fec
+#define A_MAC_MTIP_RS_VL14_0 0x38ff0
+#define A_MAC_MTIP_RS_VL14_1 0x38ff4
+#define A_MAC_MTIP_RS_VL15_0 0x38ff8
+#define A_MAC_MTIP_RS_VL15_1 0x38ffc
+#define A_MAC_MTIP_RS_FEC_SYMBLERR0_LO 0x39000
+#define A_MAC_MTIP_RS_FEC_SYMBLERR0_HI 0x39004
+#define A_MAC_MTIP_RS_FEC_SYMBLERR1_LO 0x39008
+#define A_MAC_MTIP_RS_FEC_SYMBLERR1_HI 0x3900c
+#define A_MAC_MTIP_RS_FEC_SYMBLERR2_LO 0x39010
+#define A_MAC_MTIP_RS_FEC_SYMBLERR2_HI 0x39014
+#define A_MAC_MTIP_RS_FEC_SYMBLERR3_LO 0x39018
+#define A_MAC_MTIP_RS_FEC_SYMBLERR3_HI 0x3901c
+#define A_MAC_MTIP_RS_FEC_SYMBLERR4_LO 0x39020
+
+#define S_RS_FEC_SYMBLERR4_LO 0
+#define V_RS_FEC_SYMBLERR4_LO(x) ((x) << S_RS_FEC_SYMBLERR4_LO)
+#define F_RS_FEC_SYMBLERR4_LO V_RS_FEC_SYMBLERR4_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR4_HI 0x39024
+
+#define S_RS_FEC_SYMBLERR4_HI 0
+#define V_RS_FEC_SYMBLERR4_HI(x) ((x) << S_RS_FEC_SYMBLERR4_HI)
+#define F_RS_FEC_SYMBLERR4_HI V_RS_FEC_SYMBLERR4_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR5_LO 0x39028
+
+#define S_RS_FEC_SYMBLERR5_LO 0
+#define V_RS_FEC_SYMBLERR5_LO(x) ((x) << S_RS_FEC_SYMBLERR5_LO)
+#define F_RS_FEC_SYMBLERR5_LO V_RS_FEC_SYMBLERR5_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR5_HI 0x3902c
+
+#define S_RS_FEC_SYMBLERR5_HI 0
+#define V_RS_FEC_SYMBLERR5_HI(x) ((x) << S_RS_FEC_SYMBLERR5_HI)
+#define F_RS_FEC_SYMBLERR5_HI V_RS_FEC_SYMBLERR5_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR6_LO 0x39030
+
+#define S_RS_FEC_SYMBLERR6_LO 0
+#define V_RS_FEC_SYMBLERR6_LO(x) ((x) << S_RS_FEC_SYMBLERR6_LO)
+#define F_RS_FEC_SYMBLERR6_LO V_RS_FEC_SYMBLERR6_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR6_HI 0x39034
+
+#define S_RS_FEC_SYMBLERR6_HI 0
+#define V_RS_FEC_SYMBLERR6_HI(x) ((x) << S_RS_FEC_SYMBLERR6_HI)
+#define F_RS_FEC_SYMBLERR6_HI V_RS_FEC_SYMBLERR6_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR7_LO 0x39038
+
+#define S_RS_FEC_SYMBLERR7_LO 0
+#define V_RS_FEC_SYMBLERR7_LO(x) ((x) << S_RS_FEC_SYMBLERR7_LO)
+#define F_RS_FEC_SYMBLERR7_LO V_RS_FEC_SYMBLERR7_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR7_HI 0x3903c
+
+#define S_RS_FEC_SYMBLERR7_HI 0
+#define V_RS_FEC_SYMBLERR7_HI(x) ((x) << S_RS_FEC_SYMBLERR7_HI)
+#define F_RS_FEC_SYMBLERR7_HI V_RS_FEC_SYMBLERR7_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR8_LO 0x39040
+
+#define S_RS_FEC_SYMBLERR8_LO 0
+#define V_RS_FEC_SYMBLERR8_LO(x) ((x) << S_RS_FEC_SYMBLERR8_LO)
+#define F_RS_FEC_SYMBLERR8_LO V_RS_FEC_SYMBLERR8_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR8_HI 0x39044
+
+#define S_RS_FEC_SYMBLERR8_HI 0
+#define V_RS_FEC_SYMBLERR8_HI(x) ((x) << S_RS_FEC_SYMBLERR8_HI)
+#define F_RS_FEC_SYMBLERR8_HI V_RS_FEC_SYMBLERR8_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR9_LO 0x39048
+
+#define S_RS_FEC_SYMBLERR9_LO 0
+#define V_RS_FEC_SYMBLERR9_LO(x) ((x) << S_RS_FEC_SYMBLERR9_LO)
+#define F_RS_FEC_SYMBLERR9_LO V_RS_FEC_SYMBLERR9_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR9_HI 0x3904c
+
+#define S_RS_FEC_SYMBLERR9_HI 0
+#define V_RS_FEC_SYMBLERR9_HI(x) ((x) << S_RS_FEC_SYMBLERR9_HI)
+#define F_RS_FEC_SYMBLERR9_HI V_RS_FEC_SYMBLERR9_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR10_LO 0x39050
+
+#define S_RS_FEC_SYMBLERR10_LO 0
+#define V_RS_FEC_SYMBLERR10_LO(x) ((x) << S_RS_FEC_SYMBLERR10_LO)
+#define F_RS_FEC_SYMBLERR10_LO V_RS_FEC_SYMBLERR10_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR10_HI 0x39054
+
+#define S_RS_FEC_SYMBLERR10_HI 0
+#define V_RS_FEC_SYMBLERR10_HI(x) ((x) << S_RS_FEC_SYMBLERR10_HI)
+#define F_RS_FEC_SYMBLERR10_HI V_RS_FEC_SYMBLERR10_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR11_LO 0x39058
+
+#define S_RS_FEC_SYMBLERR11_LO 0
+#define V_RS_FEC_SYMBLERR11_LO(x) ((x) << S_RS_FEC_SYMBLERR11_LO)
+#define F_RS_FEC_SYMBLERR11_LO V_RS_FEC_SYMBLERR11_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR11_HI 0x3905c
+
+#define S_RS_FEC_SYMBLERR11_HI 0
+#define V_RS_FEC_SYMBLERR11_HI(x) ((x) << S_RS_FEC_SYMBLERR11_HI)
+#define F_RS_FEC_SYMBLERR11_HI V_RS_FEC_SYMBLERR11_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR12_LO 0x39060
+
+#define S_RS_FEC_SYMBLERR12_LO 0
+#define V_RS_FEC_SYMBLERR12_LO(x) ((x) << S_RS_FEC_SYMBLERR12_LO)
+#define F_RS_FEC_SYMBLERR12_LO V_RS_FEC_SYMBLERR12_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR12_HI 0x39064
+
+#define S_RS_FEC_SYMBLERR12_HI 0
+#define V_RS_FEC_SYMBLERR12_HI(x) ((x) << S_RS_FEC_SYMBLERR12_HI)
+#define F_RS_FEC_SYMBLERR12_HI V_RS_FEC_SYMBLERR12_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR13_LO 0x39068
+
+#define S_RS_FEC_SYMBLERR13_LO 0
+#define V_RS_FEC_SYMBLERR13_LO(x) ((x) << S_RS_FEC_SYMBLERR13_LO)
+#define F_RS_FEC_SYMBLERR13_LO V_RS_FEC_SYMBLERR13_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR13_HI 0x3906c
+
+#define S_RS_FEC_SYMBLERR13_HI 0
+#define V_RS_FEC_SYMBLERR13_HI(x) ((x) << S_RS_FEC_SYMBLERR13_HI)
+#define F_RS_FEC_SYMBLERR13_HI V_RS_FEC_SYMBLERR13_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR14_LO 0x39070
+
+#define S_RS_FEC_SYMBLERR14_LO 0
+#define V_RS_FEC_SYMBLERR14_LO(x) ((x) << S_RS_FEC_SYMBLERR14_LO)
+#define F_RS_FEC_SYMBLERR14_LO V_RS_FEC_SYMBLERR14_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR14_HI 0x39074
+
+#define S_RS_FEC_SYMBLERR14_HI 0
+#define V_RS_FEC_SYMBLERR14_HI(x) ((x) << S_RS_FEC_SYMBLERR14_HI)
+#define F_RS_FEC_SYMBLERR14_HI V_RS_FEC_SYMBLERR14_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR15_LO 0x39078
+
+#define S_RS_FEC_SYMBLERR15_LO 0
+#define V_RS_FEC_SYMBLERR15_LO(x) ((x) << S_RS_FEC_SYMBLERR15_LO)
+#define F_RS_FEC_SYMBLERR15_LO V_RS_FEC_SYMBLERR15_LO(1U)
+
+#define A_MAC_MTIP_RS_FEC_SYMBLERR15_HI 0x3907c
+
+#define S_RS_FEC_SYMBLERR15_HI 0
+#define V_RS_FEC_SYMBLERR15_HI(x) ((x) << S_RS_FEC_SYMBLERR15_HI)
+#define F_RS_FEC_SYMBLERR15_HI V_RS_FEC_SYMBLERR15_HI(1U)
+
+#define A_MAC_MTIP_RS_FEC_VENDOR_CONTROL 0x39080
+#define A_MAC_MTIP_RS_FEC_VENDOR_INFO_1 0x39084
+
+#define S_VENDOR_INFO_1_AMPS_LOCK 0
+#define V_VENDOR_INFO_1_AMPS_LOCK(x) ((x) << S_VENDOR_INFO_1_AMPS_LOCK)
+#define F_VENDOR_INFO_1_AMPS_LOCK V_VENDOR_INFO_1_AMPS_LOCK(1U)
+
+#define A_MAC_MTIP_RS_FEC_VENDOR_INFO_2 0x39088
+
+#define S_VENDOR_INFO_2_AMPS_LOCK 0
+#define M_VENDOR_INFO_2_AMPS_LOCK 0xffffU
+#define V_VENDOR_INFO_2_AMPS_LOCK(x) ((x) << S_VENDOR_INFO_2_AMPS_LOCK)
+#define G_VENDOR_INFO_2_AMPS_LOCK(x) (((x) >> S_VENDOR_INFO_2_AMPS_LOCK) & M_VENDOR_INFO_2_AMPS_LOCK)
+
+#define A_MAC_MTIP_RS_FEC_VENDOR_REVISION 0x3908c
+#define A_MAC_MTIP_RS_FEC_VENDOR_ALIGN_STATUS 0x39090
+
+#define S_RS_FEC_VENDOR_ALIGN_STATUS 0
+#define M_RS_FEC_VENDOR_ALIGN_STATUS 0xffffU
+#define V_RS_FEC_VENDOR_ALIGN_STATUS(x) ((x) << S_RS_FEC_VENDOR_ALIGN_STATUS)
+#define G_RS_FEC_VENDOR_ALIGN_STATUS(x) (((x) >> S_RS_FEC_VENDOR_ALIGN_STATUS) & M_RS_FEC_VENDOR_ALIGN_STATUS)
+
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_0 0x39100
+
+#define S_FEC74_FEC_ABILITY_0_B1 1
+#define V_FEC74_FEC_ABILITY_0_B1(x) ((x) << S_FEC74_FEC_ABILITY_0_B1)
+#define F_FEC74_FEC_ABILITY_0_B1 V_FEC74_FEC_ABILITY_0_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_0_B0 0
+#define V_FEC74_FEC_ABILITY_0_B0(x) ((x) << S_FEC74_FEC_ABILITY_0_B0)
+#define F_FEC74_FEC_ABILITY_0_B0 V_FEC74_FEC_ABILITY_0_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_0 0x39104
+
+#define S_FEC_ENABLE_ERROR_INDICATION 1
+#define V_FEC_ENABLE_ERROR_INDICATION(x) ((x) << S_FEC_ENABLE_ERROR_INDICATION)
+#define F_FEC_ENABLE_ERROR_INDICATION V_FEC_ENABLE_ERROR_INDICATION(1U)
+
+#define S_T7_FEC_ENABLE 0
+#define V_T7_FEC_ENABLE(x) ((x) << S_T7_FEC_ENABLE)
+#define F_T7_FEC_ENABLE V_T7_FEC_ENABLE(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_STATUS_0 0x39108
+
+#define S_FEC_LOCKED_1 1
+#define V_FEC_LOCKED_1(x) ((x) << S_FEC_LOCKED_1)
+#define F_FEC_LOCKED_1 V_FEC_LOCKED_1(1U)
+
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_0 0x3910c
+
+#define S_VL0_CCW_LO 0
+#define M_VL0_CCW_LO 0xffffU
+#define V_VL0_CCW_LO(x) ((x) << S_VL0_CCW_LO)
+#define G_VL0_CCW_LO(x) (((x) >> S_VL0_CCW_LO) & M_VL0_CCW_LO)
+
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_0 0x39110
+
+#define S_VL0_NCCW_LO 0
+#define M_VL0_NCCW_LO 0xffffU
+#define V_VL0_NCCW_LO(x) ((x) << S_VL0_NCCW_LO)
+#define G_VL0_NCCW_LO(x) (((x) >> S_VL0_NCCW_LO) & M_VL0_NCCW_LO)
+
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_0 0x39114
+
+#define S_VL1_CCW_LO 0
+#define M_VL1_CCW_LO 0xffffU
+#define V_VL1_CCW_LO(x) ((x) << S_VL1_CCW_LO)
+#define G_VL1_CCW_LO(x) (((x) >> S_VL1_CCW_LO) & M_VL1_CCW_LO)
+
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_0 0x39118
+
+#define S_VL1_NCCW_LO 0
+#define M_VL1_NCCW_LO 0xffffU
+#define V_VL1_NCCW_LO(x) ((x) << S_VL1_NCCW_LO)
+#define G_VL1_NCCW_LO(x) (((x) >> S_VL1_NCCW_LO) & M_VL1_NCCW_LO)
+
+#define A_MAC_MTIP_FEC74_COUNTER_HI_0 0x3911c
+
+#define S_COUNTER_HI 0
+#define M_COUNTER_HI 0xffffU
+#define V_COUNTER_HI(x) ((x) << S_COUNTER_HI)
+#define G_COUNTER_HI(x) (((x) >> S_COUNTER_HI) & M_COUNTER_HI)
+
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_1 0x39120
+
+#define S_FEC74_FEC_ABILITY_1_B1 1
+#define V_FEC74_FEC_ABILITY_1_B1(x) ((x) << S_FEC74_FEC_ABILITY_1_B1)
+#define F_FEC74_FEC_ABILITY_1_B1 V_FEC74_FEC_ABILITY_1_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_1_B0 0
+#define V_FEC74_FEC_ABILITY_1_B0(x) ((x) << S_FEC74_FEC_ABILITY_1_B0)
+#define F_FEC74_FEC_ABILITY_1_B0 V_FEC74_FEC_ABILITY_1_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_1 0x39124
+#define A_MAC_MTIP_FEC74_FEC_STATUS_1 0x39128
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_1 0x3912c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_1 0x39130
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_1 0x39134
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_1 0x39138
+#define A_MAC_MTIP_FEC74_COUNTER_HI_1 0x3913c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_2 0x39140
+
+#define S_FEC74_FEC_ABILITY_2_B1 1
+#define V_FEC74_FEC_ABILITY_2_B1(x) ((x) << S_FEC74_FEC_ABILITY_2_B1)
+#define F_FEC74_FEC_ABILITY_2_B1 V_FEC74_FEC_ABILITY_2_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_2_B0 0
+#define V_FEC74_FEC_ABILITY_2_B0(x) ((x) << S_FEC74_FEC_ABILITY_2_B0)
+#define F_FEC74_FEC_ABILITY_2_B0 V_FEC74_FEC_ABILITY_2_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_2 0x39144
+#define A_MAC_MTIP_FEC74_FEC_STATUS_2 0x39148
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_2 0x3914c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_2 0x39150
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_2 0x39154
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_2 0x39158
+#define A_MAC_MTIP_FEC74_COUNTER_HI_2 0x3915c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_3 0x39160
+
+#define S_FEC74_FEC_ABILITY_3_B1 1
+#define V_FEC74_FEC_ABILITY_3_B1(x) ((x) << S_FEC74_FEC_ABILITY_3_B1)
+#define F_FEC74_FEC_ABILITY_3_B1 V_FEC74_FEC_ABILITY_3_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_3_B0 0
+#define V_FEC74_FEC_ABILITY_3_B0(x) ((x) << S_FEC74_FEC_ABILITY_3_B0)
+#define F_FEC74_FEC_ABILITY_3_B0 V_FEC74_FEC_ABILITY_3_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_3 0x39164
+#define A_MAC_MTIP_FEC74_FEC_STATUS_3 0x39168
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_3 0x3916c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_3 0x39170
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_3 0x39174
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_3 0x39178
+#define A_MAC_MTIP_FEC74_COUNTER_HI_3 0x3917c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_4 0x39180
+
+#define S_FEC74_FEC_ABILITY_4_B1 1
+#define V_FEC74_FEC_ABILITY_4_B1(x) ((x) << S_FEC74_FEC_ABILITY_4_B1)
+#define F_FEC74_FEC_ABILITY_4_B1 V_FEC74_FEC_ABILITY_4_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_4_B0 0
+#define V_FEC74_FEC_ABILITY_4_B0(x) ((x) << S_FEC74_FEC_ABILITY_4_B0)
+#define F_FEC74_FEC_ABILITY_4_B0 V_FEC74_FEC_ABILITY_4_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_4 0x39184
+#define A_MAC_MTIP_FEC74_FEC_STATUS_4 0x39188
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_4 0x3918c
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_4 0x39190
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_4 0x39194
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_4 0x39198
+#define A_MAC_MTIP_FEC74_COUNTER_HI_4 0x3919c
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_5 0x391a0
+
+#define S_FEC74_FEC_ABILITY_5_B1 1
+#define V_FEC74_FEC_ABILITY_5_B1(x) ((x) << S_FEC74_FEC_ABILITY_5_B1)
+#define F_FEC74_FEC_ABILITY_5_B1 V_FEC74_FEC_ABILITY_5_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_5_B0 0
+#define V_FEC74_FEC_ABILITY_5_B0(x) ((x) << S_FEC74_FEC_ABILITY_5_B0)
+#define F_FEC74_FEC_ABILITY_5_B0 V_FEC74_FEC_ABILITY_5_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_5 0x391a4
+#define A_MAC_MTIP_FEC74_FEC_STATUS_5 0x391a8
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_5 0x391ac
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_5 0x391b0
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_5 0x391b4
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_5 0x391b8
+#define A_MAC_MTIP_FEC74_COUNTER_HI_5 0x391bc
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_6 0x391c0
+
+#define S_FEC74_FEC_ABILITY_6_B1 1
+#define V_FEC74_FEC_ABILITY_6_B1(x) ((x) << S_FEC74_FEC_ABILITY_6_B1)
+#define F_FEC74_FEC_ABILITY_6_B1 V_FEC74_FEC_ABILITY_6_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_6_B0 0
+#define V_FEC74_FEC_ABILITY_6_B0(x) ((x) << S_FEC74_FEC_ABILITY_6_B0)
+#define F_FEC74_FEC_ABILITY_6_B0 V_FEC74_FEC_ABILITY_6_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_6 0x391c4
+#define A_MAC_MTIP_FEC74_FEC_STATUS_6 0x391c8
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_6 0x391cc
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_6 0x391d0
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_6 0x391d4
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_6 0x391d8
+#define A_MAC_MTIP_FEC74_COUNTER_HI_6 0x391dc
+#define A_MAC_MTIP_FEC74_FEC_ABILITY_7 0x391e0
+
+#define S_FEC74_FEC_ABILITY_7_B1 1
+#define V_FEC74_FEC_ABILITY_7_B1(x) ((x) << S_FEC74_FEC_ABILITY_7_B1)
+#define F_FEC74_FEC_ABILITY_7_B1 V_FEC74_FEC_ABILITY_7_B1(1U)
+
+#define S_FEC74_FEC_ABILITY_7_B0 0
+#define V_FEC74_FEC_ABILITY_7_B0(x) ((x) << S_FEC74_FEC_ABILITY_7_B0)
+#define F_FEC74_FEC_ABILITY_7_B0 V_FEC74_FEC_ABILITY_7_B0(1U)
+
+#define A_MAC_MTIP_FEC74_FEC_CONTROL_7 0x391e4
+#define A_MAC_MTIP_FEC74_FEC_STATUS_7 0x391e8
+#define A_MAC_MTIP_FEC74_VL0_CCW_LO_7 0x391ec
+#define A_MAC_MTIP_FEC74_VL0_NCCW_LO_7 0x391f0
+#define A_MAC_MTIP_FEC74_VL1_CCW_LO_7 0x391f4
+#define A_MAC_MTIP_FEC74_VL1_NCCW_LO_7 0x391f8
+#define A_MAC_MTIP_FEC74_COUNTER_HI_7 0x391fc
+#define A_MAC_BEAN0_CTL 0x39200
+#define A_MAC_BEAN0_STATUS 0x39204
+#define A_MAC_BEAN0_ABILITY_0 0x39208
+
+#define S_BEAN0_REM_FAULT 13
+#define V_BEAN0_REM_FAULT(x) ((x) << S_BEAN0_REM_FAULT)
+#define F_BEAN0_REM_FAULT V_BEAN0_REM_FAULT(1U)
+
+#define A_MAC_BEAN0_ABILITY_1 0x3920c
+#define A_MAC_BEAN0_ABILITY_2 0x39210
+
+#define S_BEAN0_AB_2_15_12 12
+#define M_BEAN0_AB_2_15_12 0xfU
+#define V_BEAN0_AB_2_15_12(x) ((x) << S_BEAN0_AB_2_15_12)
+#define G_BEAN0_AB_2_15_12(x) (((x) >> S_BEAN0_AB_2_15_12) & M_BEAN0_AB_2_15_12)
+
+#define S_BEAN0_AB_2_11_0 0
+#define M_BEAN0_AB_2_11_0 0xfffU
+#define V_BEAN0_AB_2_11_0(x) ((x) << S_BEAN0_AB_2_11_0)
+#define G_BEAN0_AB_2_11_0(x) (((x) >> S_BEAN0_AB_2_11_0) & M_BEAN0_AB_2_11_0)
+
+#define A_MAC_BEAN0_REM_ABILITY_0 0x39214
+
+#define S_BEAN0_ABL_REM_FAULT 13
+#define V_BEAN0_ABL_REM_FAULT(x) ((x) << S_BEAN0_ABL_REM_FAULT)
+#define F_BEAN0_ABL_REM_FAULT V_BEAN0_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN0_REM_ABILITY_1 0x39218
+#define A_MAC_BEAN0_REM_ABILITY_2 0x3921c
+
+#define S_BEAN0_REM_AB_15_12 12
+#define M_BEAN0_REM_AB_15_12 0xfU
+#define V_BEAN0_REM_AB_15_12(x) ((x) << S_BEAN0_REM_AB_15_12)
+#define G_BEAN0_REM_AB_15_12(x) (((x) >> S_BEAN0_REM_AB_15_12) & M_BEAN0_REM_AB_15_12)
+
+#define S_BEAN0_REM_AB_11_0 0
+#define M_BEAN0_REM_AB_11_0 0xfffU
+#define V_BEAN0_REM_AB_11_0(x) ((x) << S_BEAN0_REM_AB_11_0)
+#define G_BEAN0_REM_AB_11_0(x) (((x) >> S_BEAN0_REM_AB_11_0) & M_BEAN0_REM_AB_11_0)
+
+#define A_MAC_BEAN0_MS_COUNT 0x39220
+#define A_MAC_BEAN0_XNP_0 0x39224
+#define A_MAC_BEAN0_XNP_1 0x39228
+#define A_MAC_BEAN0_XNP_2 0x3922c
+#define A_MAC_LP_BEAN0_XNP_0 0x39230
+#define A_MAC_LP_BEAN0_XNP_1 0x39234
+#define A_MAC_LP_BEAN0_XNP_2 0x39238
+#define A_MAC_BEAN0_ETH_STATUS 0x3923c
+
+#define S_5GKR 15
+#define V_5GKR(x) ((x) << S_5GKR)
+#define F_5GKR V_5GKR(1U)
+
+#define S_2P5GKX 14
+#define V_2P5GKX(x) ((x) << S_2P5GKX)
+#define F_2P5GKX V_2P5GKX(1U)
+
+#define S_25G_KR 13
+#define V_25G_KR(x) ((x) << S_25G_KR)
+#define F_25G_KR V_25G_KR(1U)
+
+#define S_25G_KR_S 12
+#define V_25G_KR_S(x) ((x) << S_25G_KR_S)
+#define F_25G_KR_S V_25G_KR_S(1U)
+
+#define S_RS_FEC 7
+#define V_RS_FEC(x) ((x) << S_RS_FEC)
+#define F_RS_FEC V_RS_FEC(1U)
+
+#define S_FC_FEC 4
+#define V_FC_FEC(x) ((x) << S_FC_FEC)
+#define F_FC_FEC V_FC_FEC(1U)
+
+#define A_MAC_BEAN0_ETH_STATUS_2 0x39240
+
+#define S_RS_FEC_NEGOTIATED 6
+#define V_RS_FEC_NEGOTIATED(x) ((x) << S_RS_FEC_NEGOTIATED)
+#define F_RS_FEC_NEGOTIATED V_RS_FEC_NEGOTIATED(1U)
+
+#define S_400GKR4CR4 5
+#define V_400GKR4CR4(x) ((x) << S_400GKR4CR4)
+#define F_400GKR4CR4 V_400GKR4CR4(1U)
+
+#define S_200GKR2CR2 4
+#define V_200GKR2CR2(x) ((x) << S_200GKR2CR2)
+#define F_200GKR2CR2 V_200GKR2CR2(1U)
+
+#define S_100GKR1CR1 3
+#define V_100GKR1CR1(x) ((x) << S_100GKR1CR1)
+#define F_100GKR1CR1 V_100GKR1CR1(1U)
+
+#define S_200GKR4CR4 2
+#define V_200GKR4CR4(x) ((x) << S_200GKR4CR4)
+#define F_200GKR4CR4 V_200GKR4CR4(1U)
+
+#define S_100GKR2CR2 1
+#define V_100GKR2CR2(x) ((x) << S_100GKR2CR2)
+#define F_100GKR2CR2 V_100GKR2CR2(1U)
+
+#define S_50GKRCR 0
+#define V_50GKRCR(x) ((x) << S_50GKRCR)
+#define F_50GKRCR V_50GKRCR(1U)
+
+#define A_MAC_BEAN1_CTL 0x39300
+#define A_MAC_BEAN1_STATUS 0x39304
+#define A_MAC_BEAN1_ABILITY_0 0x39308
+
+#define S_BEAN1_REM_FAULT 13
+#define V_BEAN1_REM_FAULT(x) ((x) << S_BEAN1_REM_FAULT)
+#define F_BEAN1_REM_FAULT V_BEAN1_REM_FAULT(1U)
+
+#define A_MAC_BEAN1_ABILITY_1 0x3930c
+#define A_MAC_BEAN1_ABILITY_2 0x39310
+
+#define S_BEAN1_AB_2_15_12 12
+#define M_BEAN1_AB_2_15_12 0xfU
+#define V_BEAN1_AB_2_15_12(x) ((x) << S_BEAN1_AB_2_15_12)
+#define G_BEAN1_AB_2_15_12(x) (((x) >> S_BEAN1_AB_2_15_12) & M_BEAN1_AB_2_15_12)
+
+#define S_BEAN1_AB_2_11_0 0
+#define M_BEAN1_AB_2_11_0 0xfffU
+#define V_BEAN1_AB_2_11_0(x) ((x) << S_BEAN1_AB_2_11_0)
+#define G_BEAN1_AB_2_11_0(x) (((x) >> S_BEAN1_AB_2_11_0) & M_BEAN1_AB_2_11_0)
+
+#define A_MAC_BEAN1_REM_ABILITY_0 0x39314
+
+#define S_BEAN1_ABL_REM_FAULT 13
+#define V_BEAN1_ABL_REM_FAULT(x) ((x) << S_BEAN1_ABL_REM_FAULT)
+#define F_BEAN1_ABL_REM_FAULT V_BEAN1_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN1_REM_ABILITY_1 0x39318
+#define A_MAC_BEAN1_REM_ABILITY_2 0x3931c
+
+#define S_BEAN1_REM_AB_15_12 12
+#define M_BEAN1_REM_AB_15_12 0xfU
+#define V_BEAN1_REM_AB_15_12(x) ((x) << S_BEAN1_REM_AB_15_12)
+#define G_BEAN1_REM_AB_15_12(x) (((x) >> S_BEAN1_REM_AB_15_12) & M_BEAN1_REM_AB_15_12)
+
+#define S_BEAN1_REM_AB_11_0 0
+#define M_BEAN1_REM_AB_11_0 0xfffU
+#define V_BEAN1_REM_AB_11_0(x) ((x) << S_BEAN1_REM_AB_11_0)
+#define G_BEAN1_REM_AB_11_0(x) (((x) >> S_BEAN1_REM_AB_11_0) & M_BEAN1_REM_AB_11_0)
+
+#define A_MAC_BEAN1_MS_COUNT 0x39320
+#define A_MAC_BEAN1_XNP_0 0x39324
+#define A_MAC_BEAN1_XNP_1 0x39328
+#define A_MAC_BEAN1_XNP_2 0x3932c
+#define A_MAC_LP_BEAN1_XNP_0 0x39330
+#define A_MAC_LP_BEAN1_XNP_1 0x39334
+#define A_MAC_LP_BEAN1_XNP_2 0x39338
+#define A_MAC_BEAN1_ETH_STATUS 0x3933c
+#define A_MAC_BEAN1_ETH_STATUS_2 0x39340
+#define A_MAC_BEAN2_CTL 0x39400
+#define A_MAC_BEAN2_STATUS 0x39404
+#define A_MAC_BEAN2_ABILITY_0 0x39408
+
+#define S_BEAN2_REM_FAULT 13
+#define V_BEAN2_REM_FAULT(x) ((x) << S_BEAN2_REM_FAULT)
+#define F_BEAN2_REM_FAULT V_BEAN2_REM_FAULT(1U)
+
+#define A_MAC_BEAN2_ABILITY_1 0x3940c
+#define A_MAC_BEAN2_ABILITY_2 0x39410
+
+#define S_BEAN2_AB_2_15_12 12
+#define M_BEAN2_AB_2_15_12 0xfU
+#define V_BEAN2_AB_2_15_12(x) ((x) << S_BEAN2_AB_2_15_12)
+#define G_BEAN2_AB_2_15_12(x) (((x) >> S_BEAN2_AB_2_15_12) & M_BEAN2_AB_2_15_12)
+
+#define S_BEAN2_AB_2_11_0 0
+#define M_BEAN2_AB_2_11_0 0xfffU
+#define V_BEAN2_AB_2_11_0(x) ((x) << S_BEAN2_AB_2_11_0)
+#define G_BEAN2_AB_2_11_0(x) (((x) >> S_BEAN2_AB_2_11_0) & M_BEAN2_AB_2_11_0)
+
+#define A_MAC_BEAN2_REM_ABILITY_0 0x39414
+
+#define S_BEAN2_ABL_REM_FAULT 13
+#define V_BEAN2_ABL_REM_FAULT(x) ((x) << S_BEAN2_ABL_REM_FAULT)
+#define F_BEAN2_ABL_REM_FAULT V_BEAN2_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN2_REM_ABILITY_1 0x39418
+#define A_MAC_BEAN2_REM_ABILITY_2 0x3941c
+
+#define S_BEAN2_REM_AB_15_12 12
+#define M_BEAN2_REM_AB_15_12 0xfU
+#define V_BEAN2_REM_AB_15_12(x) ((x) << S_BEAN2_REM_AB_15_12)
+#define G_BEAN2_REM_AB_15_12(x) (((x) >> S_BEAN2_REM_AB_15_12) & M_BEAN2_REM_AB_15_12)
+
+#define S_BEAN2_REM_AB_11_0 0
+#define M_BEAN2_REM_AB_11_0 0xfffU
+#define V_BEAN2_REM_AB_11_0(x) ((x) << S_BEAN2_REM_AB_11_0)
+#define G_BEAN2_REM_AB_11_0(x) (((x) >> S_BEAN2_REM_AB_11_0) & M_BEAN2_REM_AB_11_0)
+
+#define A_MAC_BEAN2_MS_COUNT 0x39420
+#define A_MAC_BEAN2_XNP_0 0x39424
+#define A_MAC_BEAN2_XNP_1 0x39428
+#define A_MAC_BEAN2_XNP_2 0x3942c
+#define A_MAC_LP_BEAN2_XNP_0 0x39430
+#define A_MAC_LP_BEAN2_XNP_1 0x39434
+#define A_MAC_LP_BEAN2_XNP_2 0x39438
+#define A_MAC_BEAN2_ETH_STATUS 0x3943c
+#define A_MAC_BEAN2_ETH_STATUS_2 0x39440
+#define A_MAC_BEAN3_CTL 0x39500
+#define A_MAC_BEAN3_STATUS 0x39504
+#define A_MAC_BEAN3_ABILITY_0 0x39508
+
+#define S_BEAN3_REM_FAULT 13
+#define V_BEAN3_REM_FAULT(x) ((x) << S_BEAN3_REM_FAULT)
+#define F_BEAN3_REM_FAULT V_BEAN3_REM_FAULT(1U)
+
+#define A_MAC_BEAN3_ABILITY_1 0x3950c
+#define A_MAC_BEAN3_ABILITY_2 0x39510
+
+#define S_BEAN3_AB_2_15_12 12
+#define M_BEAN3_AB_2_15_12 0xfU
+#define V_BEAN3_AB_2_15_12(x) ((x) << S_BEAN3_AB_2_15_12)
+#define G_BEAN3_AB_2_15_12(x) (((x) >> S_BEAN3_AB_2_15_12) & M_BEAN3_AB_2_15_12)
+
+#define S_BEAN3_AB_2_11_0 0
+#define M_BEAN3_AB_2_11_0 0xfffU
+#define V_BEAN3_AB_2_11_0(x) ((x) << S_BEAN3_AB_2_11_0)
+#define G_BEAN3_AB_2_11_0(x) (((x) >> S_BEAN3_AB_2_11_0) & M_BEAN3_AB_2_11_0)
+
+#define A_MAC_BEAN3_REM_ABILITY_0 0x39514
+
+#define S_BEAN3_ABL_REM_FAULT 13
+#define V_BEAN3_ABL_REM_FAULT(x) ((x) << S_BEAN3_ABL_REM_FAULT)
+#define F_BEAN3_ABL_REM_FAULT V_BEAN3_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN3_REM_ABILITY_1 0x39518
+#define A_MAC_BEAN3_REM_ABILITY_2 0x3951c
+
+#define S_BEAN3_REM_AB_15_12 12
+#define M_BEAN3_REM_AB_15_12 0xfU
+#define V_BEAN3_REM_AB_15_12(x) ((x) << S_BEAN3_REM_AB_15_12)
+#define G_BEAN3_REM_AB_15_12(x) (((x) >> S_BEAN3_REM_AB_15_12) & M_BEAN3_REM_AB_15_12)
+
+#define S_BEAN3_REM_AB_11_0 0
+#define M_BEAN3_REM_AB_11_0 0xfffU
+#define V_BEAN3_REM_AB_11_0(x) ((x) << S_BEAN3_REM_AB_11_0)
+#define G_BEAN3_REM_AB_11_0(x) (((x) >> S_BEAN3_REM_AB_11_0) & M_BEAN3_REM_AB_11_0)
+
+#define A_MAC_BEAN3_MS_COUNT 0x39520
+#define A_MAC_BEAN3_XNP_0 0x39524
+#define A_MAC_BEAN3_XNP_1 0x39528
+#define A_MAC_BEAN3_XNP_2 0x3952c
+#define A_MAC_LP_BEAN3_XNP_0 0x39530
+#define A_MAC_LP_BEAN3_XNP_1 0x39534
+#define A_MAC_LP_BEAN3_XNP_2 0x39538
+#define A_MAC_BEAN3_ETH_STATUS 0x3953c
+#define A_MAC_BEAN3_ETH_STATUS_2 0x39540
+#define A_MAC_BEAN4_CTL 0x39600
+#define A_MAC_BEAN4_STATUS 0x39604
+#define A_MAC_BEAN4_ABILITY_0 0x39608
+
+#define S_BEAN4_REM_FAULT 13
+#define V_BEAN4_REM_FAULT(x) ((x) << S_BEAN4_REM_FAULT)
+#define F_BEAN4_REM_FAULT V_BEAN4_REM_FAULT(1U)
+
+#define A_MAC_BEAN4_ABILITY_1 0x3960c
+#define A_MAC_BEAN4_ABILITY_2 0x39610
+
+#define S_BEAN4_AB_2_15_12 12
+#define M_BEAN4_AB_2_15_12 0xfU
+#define V_BEAN4_AB_2_15_12(x) ((x) << S_BEAN4_AB_2_15_12)
+#define G_BEAN4_AB_2_15_12(x) (((x) >> S_BEAN4_AB_2_15_12) & M_BEAN4_AB_2_15_12)
+
+#define S_BEAN4_AB_2_11_0 0
+#define M_BEAN4_AB_2_11_0 0xfffU
+#define V_BEAN4_AB_2_11_0(x) ((x) << S_BEAN4_AB_2_11_0)
+#define G_BEAN4_AB_2_11_0(x) (((x) >> S_BEAN4_AB_2_11_0) & M_BEAN4_AB_2_11_0)
+
+#define A_MAC_BEAN4_REM_ABILITY_0 0x39614
+
+#define S_BEAN4_ABL_REM_FAULT 13
+#define V_BEAN4_ABL_REM_FAULT(x) ((x) << S_BEAN4_ABL_REM_FAULT)
+#define F_BEAN4_ABL_REM_FAULT V_BEAN4_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN4_REM_ABILITY_1 0x39618
+#define A_MAC_BEAN4_REM_ABILITY_2 0x3961c
+
+#define S_BEAN4_REM_AB_15_12 12
+#define M_BEAN4_REM_AB_15_12 0xfU
+#define V_BEAN4_REM_AB_15_12(x) ((x) << S_BEAN4_REM_AB_15_12)
+#define G_BEAN4_REM_AB_15_12(x) (((x) >> S_BEAN4_REM_AB_15_12) & M_BEAN4_REM_AB_15_12)
+
+#define S_BEAN4_REM_AB_11_0 0
+#define M_BEAN4_REM_AB_11_0 0xfffU
+#define V_BEAN4_REM_AB_11_0(x) ((x) << S_BEAN4_REM_AB_11_0)
+#define G_BEAN4_REM_AB_11_0(x) (((x) >> S_BEAN4_REM_AB_11_0) & M_BEAN4_REM_AB_11_0)
+
+#define A_MAC_BEAN4_MS_COUNT 0x39620
+#define A_MAC_BEAN4_XNP_0 0x39624
+#define A_MAC_BEAN4_XNP_1 0x39628
+#define A_MAC_BEAN4_XNP_2 0x3962c
+#define A_MAC_LP_BEAN4_XNP_0 0x39630
+#define A_MAC_LP_BEAN4_XNP_1 0x39634
+#define A_MAC_LP_BEAN4_XNP_2 0x39638
+#define A_MAC_BEAN4_ETH_STATUS 0x3963c
+#define A_MAC_BEAN4_ETH_STATUS_2 0x39640
+#define A_MAC_BEAN5_CTL 0x39700
+#define A_MAC_BEAN5_STATUS 0x39704
+#define A_MAC_BEAN5_ABILITY_0 0x39708
+
+#define S_BEAN5_REM_FAULT 13
+#define V_BEAN5_REM_FAULT(x) ((x) << S_BEAN5_REM_FAULT)
+#define F_BEAN5_REM_FAULT V_BEAN5_REM_FAULT(1U)
+
+#define A_MAC_BEAN5_ABILITY_1 0x3970c
+#define A_MAC_BEAN5_ABILITY_2 0x39710
+
+#define S_BEAN5_AB_2_15_12 12
+#define M_BEAN5_AB_2_15_12 0xfU
+#define V_BEAN5_AB_2_15_12(x) ((x) << S_BEAN5_AB_2_15_12)
+#define G_BEAN5_AB_2_15_12(x) (((x) >> S_BEAN5_AB_2_15_12) & M_BEAN5_AB_2_15_12)
+
+#define S_BEAN5_AB_2_11_0 0
+#define M_BEAN5_AB_2_11_0 0xfffU
+#define V_BEAN5_AB_2_11_0(x) ((x) << S_BEAN5_AB_2_11_0)
+#define G_BEAN5_AB_2_11_0(x) (((x) >> S_BEAN5_AB_2_11_0) & M_BEAN5_AB_2_11_0)
+
+#define A_MAC_BEAN5_REM_ABILITY_0 0x39714
+
+#define S_BEAN5_ABL_REM_FAULT 13
+#define V_BEAN5_ABL_REM_FAULT(x) ((x) << S_BEAN5_ABL_REM_FAULT)
+#define F_BEAN5_ABL_REM_FAULT V_BEAN5_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN5_REM_ABILITY_1 0x39718
+#define A_MAC_BEAN5_REM_ABILITY_2 0x3971c
+
+#define S_BEAN5_REM_AB_15_12 12
+#define M_BEAN5_REM_AB_15_12 0xfU
+#define V_BEAN5_REM_AB_15_12(x) ((x) << S_BEAN5_REM_AB_15_12)
+#define G_BEAN5_REM_AB_15_12(x) (((x) >> S_BEAN5_REM_AB_15_12) & M_BEAN5_REM_AB_15_12)
+
+#define S_BEAN5_REM_AB_11_0 0
+#define M_BEAN5_REM_AB_11_0 0xfffU
+#define V_BEAN5_REM_AB_11_0(x) ((x) << S_BEAN5_REM_AB_11_0)
+#define G_BEAN5_REM_AB_11_0(x) (((x) >> S_BEAN5_REM_AB_11_0) & M_BEAN5_REM_AB_11_0)
+
+#define A_MAC_BEAN5_MS_COUNT 0x39720
+#define A_MAC_BEAN5_XNP_0 0x39724
+#define A_MAC_BEAN5_XNP_1 0x39728
+#define A_MAC_BEAN5_XNP_2 0x3972c
+#define A_MAC_LP_BEAN5_XNP_0 0x39730
+#define A_MAC_LP_BEAN5_XNP_1 0x39734
+#define A_MAC_LP_BEAN5_XNP_2 0x39738
+#define A_MAC_BEAN5_ETH_STATUS 0x3973c
+#define A_MAC_BEAN5_ETH_STATUS_2 0x39740
+#define A_MAC_BEAN6_CTL 0x39800
+#define A_MAC_BEAN6_STATUS 0x39804
+#define A_MAC_BEAN6_ABILITY_0 0x39808
+
+#define S_BEAN6_REM_FAULT 13
+#define V_BEAN6_REM_FAULT(x) ((x) << S_BEAN6_REM_FAULT)
+#define F_BEAN6_REM_FAULT V_BEAN6_REM_FAULT(1U)
+
+#define A_MAC_BEAN6_ABILITY_1 0x3980c
+#define A_MAC_BEAN6_ABILITY_2 0x39810
+
+#define S_BEAN6_AB_2_15_12 12
+#define M_BEAN6_AB_2_15_12 0xfU
+#define V_BEAN6_AB_2_15_12(x) ((x) << S_BEAN6_AB_2_15_12)
+#define G_BEAN6_AB_2_15_12(x) (((x) >> S_BEAN6_AB_2_15_12) & M_BEAN6_AB_2_15_12)
+
+#define S_BEAN6_AB_2_11_0 0
+#define M_BEAN6_AB_2_11_0 0xfffU
+#define V_BEAN6_AB_2_11_0(x) ((x) << S_BEAN6_AB_2_11_0)
+#define G_BEAN6_AB_2_11_0(x) (((x) >> S_BEAN6_AB_2_11_0) & M_BEAN6_AB_2_11_0)
+
+#define A_MAC_BEAN6_REM_ABILITY_0 0x39814
+
+#define S_BEAN6_ABL_REM_FAULT 13
+#define V_BEAN6_ABL_REM_FAULT(x) ((x) << S_BEAN6_ABL_REM_FAULT)
+#define F_BEAN6_ABL_REM_FAULT V_BEAN6_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN6_REM_ABILITY_1 0x39818
+#define A_MAC_BEAN6_REM_ABILITY_2 0x3981c
+
+#define S_BEAN6_REM_AB_15_12 12
+#define M_BEAN6_REM_AB_15_12 0xfU
+#define V_BEAN6_REM_AB_15_12(x) ((x) << S_BEAN6_REM_AB_15_12)
+#define G_BEAN6_REM_AB_15_12(x) (((x) >> S_BEAN6_REM_AB_15_12) & M_BEAN6_REM_AB_15_12)
+
+#define S_BEAN6_REM_AB_11_0 0
+#define M_BEAN6_REM_AB_11_0 0xfffU
+#define V_BEAN6_REM_AB_11_0(x) ((x) << S_BEAN6_REM_AB_11_0)
+#define G_BEAN6_REM_AB_11_0(x) (((x) >> S_BEAN6_REM_AB_11_0) & M_BEAN6_REM_AB_11_0)
+
+#define A_MAC_BEAN6_MS_COUNT 0x39820
+#define A_MAC_BEAN6_XNP_0 0x39824
+#define A_MAC_BEAN6_XNP_1 0x39828
+#define A_MAC_BEAN6_XNP_2 0x3982c
+#define A_MAC_LP_BEAN6_XNP_0 0x39830
+#define A_MAC_LP_BEAN6_XNP_1 0x39834
+#define A_MAC_LP_BEAN6_XNP_2 0x39838
+#define A_MAC_BEAN6_ETH_STATUS 0x3983c
+#define A_MAC_BEAN6_ETH_STATUS_2 0x39840
+#define A_MAC_BEAN7_CTL 0x39900
+#define A_MAC_BEAN7_STATUS 0x39904
+#define A_MAC_BEAN7_ABILITY_0 0x39908
+
+#define S_BEAN7_REM_FAULT 13
+#define V_BEAN7_REM_FAULT(x) ((x) << S_BEAN7_REM_FAULT)
+#define F_BEAN7_REM_FAULT V_BEAN7_REM_FAULT(1U)
+
+#define A_MAC_BEAN7_ABILITY_1 0x3990c
+#define A_MAC_BEAN7_ABILITY_2 0x39910
+
+#define S_BEAN7_AB_2_15_12 12
+#define M_BEAN7_AB_2_15_12 0xfU
+#define V_BEAN7_AB_2_15_12(x) ((x) << S_BEAN7_AB_2_15_12)
+#define G_BEAN7_AB_2_15_12(x) (((x) >> S_BEAN7_AB_2_15_12) & M_BEAN7_AB_2_15_12)
+
+#define S_BEAN7_AB_2_11_0 0
+#define M_BEAN7_AB_2_11_0 0xfffU
+#define V_BEAN7_AB_2_11_0(x) ((x) << S_BEAN7_AB_2_11_0)
+#define G_BEAN7_AB_2_11_0(x) (((x) >> S_BEAN7_AB_2_11_0) & M_BEAN7_AB_2_11_0)
+
+#define A_MAC_BEAN7_REM_ABILITY_0 0x39914
+
+#define S_BEAN7_ABL_REM_FAULT 13
+#define V_BEAN7_ABL_REM_FAULT(x) ((x) << S_BEAN7_ABL_REM_FAULT)
+#define F_BEAN7_ABL_REM_FAULT V_BEAN7_ABL_REM_FAULT(1U)
+
+#define A_MAC_BEAN7_REM_ABILITY_1 0x39918
+#define A_MAC_BEAN7_REM_ABILITY_2 0x3991c
+
+#define S_BEAN7_REM_AB_15_12 12
+#define M_BEAN7_REM_AB_15_12 0xfU
+#define V_BEAN7_REM_AB_15_12(x) ((x) << S_BEAN7_REM_AB_15_12)
+#define G_BEAN7_REM_AB_15_12(x) (((x) >> S_BEAN7_REM_AB_15_12) & M_BEAN7_REM_AB_15_12)
+
+#define S_BEAN7_REM_AB_11_0 0
+#define M_BEAN7_REM_AB_11_0 0xfffU
+#define V_BEAN7_REM_AB_11_0(x) ((x) << S_BEAN7_REM_AB_11_0)
+#define G_BEAN7_REM_AB_11_0(x) (((x) >> S_BEAN7_REM_AB_11_0) & M_BEAN7_REM_AB_11_0)
+
+#define A_MAC_BEAN7_MS_COUNT 0x39920
+#define A_MAC_BEAN7_XNP_0 0x39924
+#define A_MAC_BEAN7_XNP_1 0x39928
+#define A_MAC_BEAN7_XNP_2 0x3992c
+#define A_MAC_LP_BEAN7_XNP_0 0x39930
+#define A_MAC_LP_BEAN7_XNP_1 0x39934
+#define A_MAC_LP_BEAN7_XNP_2 0x39938
+#define A_MAC_BEAN7_ETH_STATUS 0x3993c
+#define A_MAC_BEAN7_ETH_STATUS_2 0x39940
+#define A_MAC_MTIP_ETHERSTATS_DATA_HI 0x39a00
+#define A_MAC_MTIP_ETHERSTATS_STATN_STATUS 0x39a04
+#define A_MAC_MTIP_ETHERSTATS_STATN_CONFIG 0x39a08
+
+#define S_T7_RESET 31
+#define V_T7_RESET(x) ((x) << S_T7_RESET)
+#define F_T7_RESET V_T7_RESET(1U)
+
+#define A_MAC_MTIP_ETHERSTATS_STATN_CONTROL 0x39a0c
+
+#define S_CMD_CLEAR_TX 31
+#define V_CMD_CLEAR_TX(x) ((x) << S_CMD_CLEAR_TX)
+#define F_CMD_CLEAR_TX V_CMD_CLEAR_TX(1U)
+
+#define S_CMD_CLEAR_RX 30
+#define V_CMD_CLEAR_RX(x) ((x) << S_CMD_CLEAR_RX)
+#define F_CMD_CLEAR_RX V_CMD_CLEAR_RX(1U)
+
+#define S_CLEAR_PRE 29
+#define V_CLEAR_PRE(x) ((x) << S_CLEAR_PRE)
+#define F_CLEAR_PRE V_CLEAR_PRE(1U)
+
+#define S_CMD_CAPTURE_TX 28
+#define V_CMD_CAPTURE_TX(x) ((x) << S_CMD_CAPTURE_TX)
+#define F_CMD_CAPTURE_TX V_CMD_CAPTURE_TX(1U)
+
+#define S_CMD_CAPTURE_RX 27
+#define V_CMD_CAPTURE_RX(x) ((x) << S_CMD_CAPTURE_RX)
+#define F_CMD_CAPTURE_RX V_CMD_CAPTURE_RX(1U)
+
+#define S_PORTMASK 0
+#define M_PORTMASK 0xffU
+#define V_PORTMASK(x) ((x) << S_PORTMASK)
+#define G_PORTMASK(x) (((x) >> S_PORTMASK) & M_PORTMASK)
+
+#define A_MAC_MTIP_ETHERSTATS_STATN_CLEARVALUE_LO 0x39a10
+
+#define S_STATN_CLEARVALUE_LO 0
+#define V_STATN_CLEARVALUE_LO(x) ((x) << S_STATN_CLEARVALUE_LO)
+#define F_STATN_CLEARVALUE_LO V_STATN_CLEARVALUE_LO(1U)
+
+#define A_MAC_MTIP_ETHERSTATS_STATN_CLEARVALUE_HI 0x39a14
+
+#define S_STATN_CLEARVALUE_HI 0
+#define V_STATN_CLEARVALUE_HI(x) ((x) << S_STATN_CLEARVALUE_HI)
+#define F_STATN_CLEARVALUE_HI V_STATN_CLEARVALUE_HI(1U)
+
+#define A_MAC_MTIP_ETHERSTATS_DATA_HI_1 0x39a1c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_0 0x39a20
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_1 0x39a24
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_2 0x39a28
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_3 0x39a2c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_4 0x39a30
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_5 0x39a34
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_6 0x39a38
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_7 0x39a3c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_8 0x39a40
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_9 0x39a44
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_10 0x39a48
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_11 0x39a4c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_12 0x39a50
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_13 0x39a54
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_14 0x39a58
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_15 0x39a5c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_16 0x39a60
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_17 0x39a64
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_18 0x39a68
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_19 0x39a6c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_20 0x39a70
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_21 0x39a74
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_22 0x39a78
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_23 0x39a7c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_24 0x39a80
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_25 0x39a84
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_26 0x39a88
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_27 0x39a8c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_28 0x39a90
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_29 0x39a94
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_30 0x39a98
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_31 0x39a9c
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_32 0x39aa0
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_33 0x39aa4
+#define A_MAC_MTIP_ETHERSTATS_CAPTURED_PAGE_34 0x39aa8
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSOCTETS 0x39b00
+#define A_MAC_MTIP_ETHERSTATS0_OCTETSRECEIVEDOK 0x39b04
+#define A_MAC_MTIP_ETHERSTATS0_AALIGNMENTERRORS 0x39b08
+#define A_MAC_MTIP_ETHERSTATS0_APAUSEMACCTRLFRAMESRECEIVED 0x39b0c
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMETOOLONGERRORS 0x39b10
+#define A_MAC_MTIP_ETHERSTATS0_AINRANGELENGTHERRORS 0x39b14
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMESRECEIVEDOK 0x39b18
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMECHECKSEQUENCEERRORS 0x39b1c
+#define A_MAC_MTIP_ETHERSTATS0_VLANRECEIVEDOK 0x39b20
+#define A_MAC_MTIP_ETHERSTATS0_IFINERRORS_RX 0x39b24
+#define A_MAC_MTIP_ETHERSTATS0_IFINUCASTPKTS_RX 0x39b28
+#define A_MAC_MTIP_ETHERSTATS0_IFINMULTICASTPKTS_RX 0x39b2c
+#define A_MAC_MTIP_ETHERSTATS0_IFINBROADCASTPKTS_RX 0x39b30
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSDROPEVENTS_RX 0x39b34
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS_RX 0x39b38
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSUNDERSIZEPKTS_RX 0x39b3c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS64OCTETS_RX 0x39b40
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS65TO127OCTETS_RX 0x39b44
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS128TO255OCTETS_RX 0x39b48
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS256TO511OCTETS_RX 0x39b4c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39b50
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39b54
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39b58
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSOVERSIZEPKTS_RX 0x39b5c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSJABBERS_RX 0x39b60
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSFRAGMENTS_RX 0x39b64
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39b68
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39b6c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39b70
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39b74
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39b78
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39b7c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39b80
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39b84
+#define A_MAC_MTIP_ETHERSTATS0_AMACCONTROLFRAMESRECEIVED_RX 0x39b88
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSOCTETS 0x39b8c
+#define A_MAC_MTIP_ETHERSTATS1_OCTETSRECEIVEDOK 0x39b90
+#define A_MAC_MTIP_ETHERSTATS1_AALIGNMENTERRORS 0x39b94
+#define A_MAC_MTIP_ETHERSTATS1_APAUSEMACCTRLFRAMESRECEIVED 0x39b98
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMETOOLONGERRORS 0x39b9c
+#define A_MAC_MTIP_ETHERSTATS1_AINRANGELENGTHERRORS 0x39ba0
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMESRECEIVEDOK 0x39ba4
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMECHECKSEQUENCEERRORS 0x39ba8
+#define A_MAC_MTIP_ETHERSTATS1_VLANRECEIVEDOK 0x39bac
+#define A_MAC_MTIP_ETHERSTATS1_IFINERRORS_RX 0x39bb0
+#define A_MAC_MTIP_ETHERSTATS1_IFINUCASTPKTS_RX 0x39bb4
+#define A_MAC_MTIP_ETHERSTATS1_IFINMULTICASTPKTS_RX 0x39bb8
+#define A_MAC_MTIP_ETHERSTATS1_IFINBROADCASTPKTS_RX 0x39bbc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSDROPEVENTS_RX 0x39bc0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS_RX 0x39bc4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSUNDERSIZEPKTS_RX 0x39bc8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS64OCTETS_RX 0x39bcc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS65TO127OCTETS_RX 0x39bd0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS128TO255OCTETS_RX 0x39bd4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS256TO511OCTETS_RX 0x39bd8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39bdc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39be0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39be4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSOVERSIZEPKTS_RX 0x39be8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSJABBERS_RX 0x39bec
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSFRAGMENTS_RX 0x39bf0
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39bf4
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39bf8
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39bfc
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39c00
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39c04
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39c08
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39c0c
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39c10
+#define A_MAC_MTIP_ETHERSTATS1_AMACCONTROLFRAMESRECEIVED_RX 0x39c14
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSOCTETS 0x39c18
+#define A_MAC_MTIP_ETHERSTATS2_OCTETSRECEIVEDOK 0x39c1c
+#define A_MAC_MTIP_ETHERSTATS2_AALIGNMENTERRORS 0x39c20
+#define A_MAC_MTIP_ETHERSTATS2_APAUSEMACCTRLFRAMESRECEIVED 0x39c24
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMETOOLONGERRORS 0x39c28
+#define A_MAC_MTIP_ETHERSTATS2_AINRANGELENGTHERRORS 0x39c2c
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMESRECEIVEDOK 0x39c30
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMECHECKSEQUENCEERRORS 0x39c34
+#define A_MAC_MTIP_ETHERSTATS2_VLANRECEIVEDOK 0x39c38
+#define A_MAC_MTIP_ETHERSTATS2_IFINERRORS_RX 0x39c3c
+#define A_MAC_MTIP_ETHERSTATS2_IFINUCASTPKTS_RX 0x39c40
+#define A_MAC_MTIP_ETHERSTATS2_IFINMULTICASTPKTS_RX 0x39c44
+#define A_MAC_MTIP_ETHERSTATS2_IFINBROADCASTPKTS_RX 0x39c48
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSDROPEVENTS_RX 0x39c4c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS_RX 0x39c50
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSUNDERSIZEPKTS_RX 0x39c54
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS64OCTETS_RX 0x39c58
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS65TO127OCTETS_RX 0x39c5c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS128TO255OCTETS_RX 0x39c60
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS256TO511OCTETS_RX 0x39c64
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39c68
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39c6c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39c70
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSOVERSIZEPKTS_RX 0x39c74
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSJABBERS_RX 0x39c78
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSFRAGMENTS_RX 0x39c7c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39c80
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39c84
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39c88
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39c8c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39c90
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39c94
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39c98
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39c9c
+#define A_MAC_MTIP_ETHERSTATS2_AMACCONTROLFRAMESRECEIVED_RX 0x39ca0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSOCTETS 0x39ca4
+#define A_MAC_MTIP_ETHERSTATS3_OCTETSRECEIVEDOK 0x39ca8
+#define A_MAC_MTIP_ETHERSTATS3_AALIGNMENTERRORS 0x39cac
+#define A_MAC_MTIP_ETHERSTATS3_APAUSEMACCTRLFRAMESRECEIVED 0x39cb0
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMETOOLONGERRORS 0x39cb4
+#define A_MAC_MTIP_ETHERSTATS3_AINRANGELENGTHERRORS 0x39cb8
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMESRECEIVEDOK 0x39cbc
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMECHECKSEQUENCEERRORS 0x39cc0
+#define A_MAC_MTIP_ETHERSTATS3_VLANRECEIVEDOK 0x39cc4
+#define A_MAC_MTIP_ETHERSTATS3_IFINERRORS_RX 0x39cc8
+#define A_MAC_MTIP_ETHERSTATS3_IFINUCASTPKTS_RX 0x39ccc
+#define A_MAC_MTIP_ETHERSTATS3_IFINMULTICASTPKTS_RX 0x39cd0
+#define A_MAC_MTIP_ETHERSTATS3_IFINBROADCASTPKTS_RX 0x39cd4
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSDROPEVENTS_RX 0x39cd8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS_RX 0x39cdc
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSUNDERSIZEPKTS_RX 0x39ce0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS64OCTETS_RX 0x39ce4
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS65TO127OCTETS_RX 0x39ce8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS128TO255OCTETS_RX 0x39cec
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS256TO511OCTETS_RX 0x39cf0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS512TO1023OCTETS_RX 0x39cf4
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1024TO1518OCTETS_RX 0x39cf8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1519TOMAXOCTETS_RX 0x39cfc
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSOVERSIZEPKTS_RX 0x39d00
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSJABBERS_RX 0x39d04
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSFRAGMENTS_RX 0x39d08
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_0_RX 0x39d0c
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_1_RX 0x39d10
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_2_RX 0x39d14
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_3_RX 0x39d18
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_4_RX 0x39d1c
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_5_RX 0x39d20
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_6_RX 0x39d24
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESRECEIVED_7_RX 0x39d28
+#define A_MAC_MTIP_ETHERSTATS3_AMACCONTROLFRAMESRECEIVED_RX 0x39d2c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSOCTETS_TX 0x39d30
+#define A_MAC_MTIP_ETHERSTATS0_OCTETSTRANSMITTEDOK_TX 0x39d34
+#define A_MAC_MTIP_ETHERSTATS0_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39d38
+#define A_MAC_MTIP_ETHERSTATS0_AFRAMESTRANSMITTEDOK_TX 0x39d3c
+#define A_MAC_MTIP_ETHERSTATS0_VLANTRANSMITTEDOK_TX 0x39d40
+#define A_MAC_MTIP_ETHERSTATS0_IFOUTERRORS_TX 0x39d44
+#define A_MAC_MTIP_ETHERSTATS0_IFOUTUCASTPKTS_TX 0x39d48
+#define A_MAC_MTIP_ETHERSTATS0IFOUTMULTICASTPKTS_TX 0x39d4c
+#define A_MAC_MTIP_ETHERSTATS0_IFOUTBROADCASTPKTS_TX 0x39d50
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS64OCTETS_TX 0x39d54
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS65TO127OCTETS_TX 0x39d58
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS128TO255OCTETS_TX 0x39d5c
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS256TO511OCTETS_TX 0x39d60
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39d64
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39d68
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39d6c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39d70
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39d74
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39d78
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39d7c
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39d80
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39d84
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39d88
+#define A_MAC_MTIP_ETHERSTATS0_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39d8c
+#define A_MAC_MTIP_ETHERSTATS0_AMACCONTROLFRAMESTRANSMITTED_TX 0x39d90
+#define A_MAC_MTIP_ETHERSTATS0_ETHERSTATSPKTS_TX 0x39d94
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSOCTETS_TX 0x39d98
+#define A_MAC_MTIP_ETHERSTATS1_OCTETSTRANSMITTEDOK_TX 0x39d9c
+#define A_MAC_MTIP_ETHERSTATS1_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39da0
+#define A_MAC_MTIP_ETHERSTATS1_AFRAMESTRANSMITTEDOK_TX 0x39da4
+#define A_MAC_MTIP_ETHERSTATS1_VLANTRANSMITTEDOK_TX 0x39da8
+#define A_MAC_MTIP_ETHERSTATS1_IFOUTERRORS_TX 0x39dac
+#define A_MAC_MTIP_ETHERSTATS1_IFOUTUCASTPKTS_TX 0x39db0
+#define A_MAC_MTIP_ETHERSTATS1IFOUTMULTICASTPKTS_TX 0x39db4
+#define A_MAC_MTIP_ETHERSTATS1_IFOUTBROADCASTPKTS_TX 0x39db8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS64OCTETS_TX 0x39dbc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS65TO127OCTETS_TX 0x39dc0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS128TO255OCTETS_TX 0x39dc4
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS256TO511OCTETS_TX 0x39dc8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39dcc
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39dd0
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39dd4
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39dd8
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39ddc
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39de0
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39de4
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39de8
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39dec
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39df0
+#define A_MAC_MTIP_ETHERSTATS1_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39df4
+#define A_MAC_MTIP_ETHERSTATS1_AMACCONTROLFRAMESTRANSMITTED_TX 0x39df8
+#define A_MAC_MTIP_ETHERSTATS1_ETHERSTATSPKTS_TX 0x39dfc
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSOCTETS_TX 0x39e00
+#define A_MAC_MTIP_ETHERSTATS2_OCTETSTRANSMITTEDOK_TX 0x39e04
+#define A_MAC_MTIP_ETHERSTATS2_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39e08
+#define A_MAC_MTIP_ETHERSTATS2_AFRAMESTRANSMITTEDOK_TX 0x39e0c
+#define A_MAC_MTIP_ETHERSTATS2_VLANTRANSMITTEDOK_TX 0x39e10
+#define A_MAC_MTIP_ETHERSTATS2_IFOUTERRORS_TX 0x39e14
+#define A_MAC_MTIP_ETHERSTATS2_IFOUTUCASTPKTS_TX 0x39e18
+#define A_MAC_MTIP_ETHERSTATS2IFOUTMULTICASTPKTS_TX 0x39e1c
+#define A_MAC_MTIP_ETHERSTATS2_IFOUTBROADCASTPKTS_TX 0x39e20
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS64OCTETS_TX 0x39e24
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS65TO127OCTETS_TX 0x39e28
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS128TO255OCTETS_TX 0x39e2c
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS256TO511OCTETS_TX 0x39e30
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39e34
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39e38
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39e3c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39e40
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39e44
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39e48
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39e4c
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39e50
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39e54
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39e58
+#define A_MAC_MTIP_ETHERSTATS2_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39e5c
+#define A_MAC_MTIP_ETHERSTATS2_AMACCONTROLFRAMESTRANSMITTED_TX 0x39e60
+#define A_MAC_MTIP_ETHERSTATS2_ETHERSTATSPKTS_TX 0x39e64
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSOCTETS_TX 0x39e68
+#define A_MAC_MTIP_ETHERSTATS3_OCTETSTRANSMITTEDOK_TX 0x39e6c
+#define A_MAC_MTIP_ETHERSTATS3_APAUSEMACCTRLFRAMESTRANSMITTED_TX 0x39e70
+#define A_MAC_MTIP_ETHERSTATS3_AFRAMESTRANSMITTEDOK_TX 0x39e74
+#define A_MAC_MTIP_ETHERSTATS3_VLANTRANSMITTEDOK_TX 0x39e78
+#define A_MAC_MTIP_ETHERSTATS3_IFOUTERRORS_TX 0x39e7c
+#define A_MAC_MTIP_ETHERSTATS3_IFOUTUCASTPKTS_TX 0x39e80
+#define A_MAC_MTIP_ETHERSTATS3IFOUTMULTICASTPKTS_TX 0x39e84
+#define A_MAC_MTIP_ETHERSTATS3_IFOUTBROADCASTPKTS_TX 0x39e88
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS64OCTETS_TX 0x39e8c
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS65TO127OCTETS_TX 0x39e90
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS128TO255OCTETS_TX 0x39e94
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS256TO511OCTETS_TX 0x39e98
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS512TO1023OCTETS_TX 0x39e9c
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1024TO1518OCTETS_TX 0x39ea0
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS1519TOMAXOCTETS_TX 0x39ea4
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_0_TX 0x39ea8
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_1_TX 0x39eac
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_2_TX 0x39eb0
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_3_TX 0x39eb4
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_4_TX 0x39eb8
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_5_TX 0x39ebc
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_6_TX 0x39ec0
+#define A_MAC_MTIP_ETHERSTATS3_ACBFCPAUSEFRAMESTRANSMITTED_7_TX 0x39ec4
+#define A_MAC_MTIP_ETHERSTATS3_AMACCONTROLFRAMESTRANSMITTED_TX 0x39ec8
+#define A_MAC_MTIP_ETHERSTATS3_ETHERSTATSPKTS_TX 0x39ecc
+#define A_MAC_IOS_CTRL 0x3a000
+
+#define S_SUB_BLOCK_SEL 28
+#define M_SUB_BLOCK_SEL 0x7U
+#define V_SUB_BLOCK_SEL(x) ((x) << S_SUB_BLOCK_SEL)
+#define G_SUB_BLOCK_SEL(x) (((x) >> S_SUB_BLOCK_SEL) & M_SUB_BLOCK_SEL)
+
+#define S_QUAD_BROADCAST_EN 24
+#define V_QUAD_BROADCAST_EN(x) ((x) << S_QUAD_BROADCAST_EN)
+#define F_QUAD_BROADCAST_EN V_QUAD_BROADCAST_EN(1U)
+
+#define S_AUTO_INCR 20
+#define V_AUTO_INCR(x) ((x) << S_AUTO_INCR)
+#define F_AUTO_INCR V_AUTO_INCR(1U)
+
+#define S_T7_2_ADDR 0
+#define M_T7_2_ADDR 0x7ffffU
+#define V_T7_2_ADDR(x) ((x) << S_T7_2_ADDR)
+#define G_T7_2_ADDR(x) (((x) >> S_T7_2_ADDR) & M_T7_2_ADDR)
+
+#define A_MAC_IOS_DATA 0x3a004
+#define A_MAC_IOS_BGR_RST 0x3a050
+
+#define S_BGR_RSTN 0
+#define V_BGR_RSTN(x) ((x) << S_BGR_RSTN)
+#define F_BGR_RSTN V_BGR_RSTN(1U)
+
+#define A_MAC_IOS_BGR_CFG 0x3a054
+
+#define S_SOC_REFCLK_EN 0
+#define V_SOC_REFCLK_EN(x) ((x) << S_SOC_REFCLK_EN)
+#define F_SOC_REFCLK_EN V_SOC_REFCLK_EN(1U)
+
+#define A_MAC_IOS_QUAD0_CFG 0x3a058
+
+#define S_QUAD0_CH3_RSTN 5
+#define V_QUAD0_CH3_RSTN(x) ((x) << S_QUAD0_CH3_RSTN)
+#define F_QUAD0_CH3_RSTN V_QUAD0_CH3_RSTN(1U)
+
+#define S_QUAD0_CH2_RSTN 4
+#define V_QUAD0_CH2_RSTN(x) ((x) << S_QUAD0_CH2_RSTN)
+#define F_QUAD0_CH2_RSTN V_QUAD0_CH2_RSTN(1U)
+
+#define S_QUAD0_CH1_RSTN 3
+#define V_QUAD0_CH1_RSTN(x) ((x) << S_QUAD0_CH1_RSTN)
+#define F_QUAD0_CH1_RSTN V_QUAD0_CH1_RSTN(1U)
+
+#define S_QUAD0_CH0_RSTN 2
+#define V_QUAD0_CH0_RSTN(x) ((x) << S_QUAD0_CH0_RSTN)
+#define F_QUAD0_CH0_RSTN V_QUAD0_CH0_RSTN(1U)
+
+#define S_QUAD0_RSTN 1
+#define V_QUAD0_RSTN(x) ((x) << S_QUAD0_RSTN)
+#define F_QUAD0_RSTN V_QUAD0_RSTN(1U)
+
+#define S_PLL0_RSTN 0
+#define V_PLL0_RSTN(x) ((x) << S_PLL0_RSTN)
+#define F_PLL0_RSTN V_PLL0_RSTN(1U)
+
+#define A_MAC_IOS_QUAD1_CFG 0x3a05c
+
+#define S_QUAD1_CH3_RSTN 5
+#define V_QUAD1_CH3_RSTN(x) ((x) << S_QUAD1_CH3_RSTN)
+#define F_QUAD1_CH3_RSTN V_QUAD1_CH3_RSTN(1U)
+
+#define S_QUAD1_CH2_RSTN 4
+#define V_QUAD1_CH2_RSTN(x) ((x) << S_QUAD1_CH2_RSTN)
+#define F_QUAD1_CH2_RSTN V_QUAD1_CH2_RSTN(1U)
+
+#define S_QUAD1_CH1_RSTN 3
+#define V_QUAD1_CH1_RSTN(x) ((x) << S_QUAD1_CH1_RSTN)
+#define F_QUAD1_CH1_RSTN V_QUAD1_CH1_RSTN(1U)
+
+#define S_QUAD1_CH0_RSTN 2
+#define V_QUAD1_CH0_RSTN(x) ((x) << S_QUAD1_CH0_RSTN)
+#define F_QUAD1_CH0_RSTN V_QUAD1_CH0_RSTN(1U)
+
+#define S_QUAD1_RSTN 1
+#define V_QUAD1_RSTN(x) ((x) << S_QUAD1_RSTN)
+#define F_QUAD1_RSTN V_QUAD1_RSTN(1U)
+
+#define S_PLL1_RSTN 0
+#define V_PLL1_RSTN(x) ((x) << S_PLL1_RSTN)
+#define F_PLL1_RSTN V_PLL1_RSTN(1U)
+
+#define A_MAC_IOS_SCRATCHPAD0 0x3a060
+#define A_MAC_IOS_SCRATCHPAD1 0x3a064
+#define A_MAC_IOS_SCRATCHPAD2 0x3a068
+#define A_MAC_IOS_SCRATCHPAD3 0x3a06c
+
+#define S_DATA0 1
+#define M_DATA0 0x7fffffffU
+#define V_DATA0(x) ((x) << S_DATA0)
+#define G_DATA0(x) (((x) >> S_DATA0) & M_DATA0)
+
+#define S_I2C_MODE 0
+#define V_I2C_MODE(x) ((x) << S_I2C_MODE)
+#define F_I2C_MODE V_I2C_MODE(1U)
+
+#define A_MAC_IOS_BGR_DBG_COUNTER 0x3a070
+#define A_MAC_IOS_QUAD0_DBG_COUNTER 0x3a074
+#define A_MAC_IOS_PLL0_DBG_COUNTER 0x3a078
+#define A_MAC_IOS_QUAD1_DBG_COUNTER 0x3a07c
+#define A_MAC_IOS_PLL1_DBG_COUNTER 0x3a080
+#define A_MAC_IOS_DBG_CLK_CFG 0x3a084
+
+#define S_DBG_CLK_MUX_GPIO 3
+#define V_DBG_CLK_MUX_GPIO(x) ((x) << S_DBG_CLK_MUX_GPIO)
+#define F_DBG_CLK_MUX_GPIO V_DBG_CLK_MUX_GPIO(1U)
+
+#define S_DBG_CLK_MUX_SEL 0
+#define M_DBG_CLK_MUX_SEL 0x7U
+#define V_DBG_CLK_MUX_SEL(x) ((x) << S_DBG_CLK_MUX_SEL)
+#define G_DBG_CLK_MUX_SEL(x) (((x) >> S_DBG_CLK_MUX_SEL) & M_DBG_CLK_MUX_SEL)
+
+#define A_MAC_IOS_INTR_EN_QUAD0 0x3a090
+
+#define S_Q0_MAILBOX_INT_ASSERT 24
+#define V_Q0_MAILBOX_INT_ASSERT(x) ((x) << S_Q0_MAILBOX_INT_ASSERT)
+#define F_Q0_MAILBOX_INT_ASSERT V_Q0_MAILBOX_INT_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_3_ASSERT 23
+#define V_Q0_TRAINING_FAILURE_3_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_3_ASSERT)
+#define F_Q0_TRAINING_FAILURE_3_ASSERT V_Q0_TRAINING_FAILURE_3_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_2_ASSERT 22
+#define V_Q0_TRAINING_FAILURE_2_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_2_ASSERT)
+#define F_Q0_TRAINING_FAILURE_2_ASSERT V_Q0_TRAINING_FAILURE_2_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_1_ASSERT 21
+#define V_Q0_TRAINING_FAILURE_1_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_1_ASSERT)
+#define F_Q0_TRAINING_FAILURE_1_ASSERT V_Q0_TRAINING_FAILURE_1_ASSERT(1U)
+
+#define S_Q0_TRAINING_FAILURE_0_ASSERT 20
+#define V_Q0_TRAINING_FAILURE_0_ASSERT(x) ((x) << S_Q0_TRAINING_FAILURE_0_ASSERT)
+#define F_Q0_TRAINING_FAILURE_0_ASSERT V_Q0_TRAINING_FAILURE_0_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_3_ASSERT 19
+#define V_Q0_TRAINING_COMPLETE_3_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_3_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_3_ASSERT V_Q0_TRAINING_COMPLETE_3_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_2_ASSERT 18
+#define V_Q0_TRAINING_COMPLETE_2_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_2_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_2_ASSERT V_Q0_TRAINING_COMPLETE_2_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_1_ASSERT 17
+#define V_Q0_TRAINING_COMPLETE_1_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_1_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_1_ASSERT V_Q0_TRAINING_COMPLETE_1_ASSERT(1U)
+
+#define S_Q0_TRAINING_COMPLETE_0_ASSERT 16
+#define V_Q0_TRAINING_COMPLETE_0_ASSERT(x) ((x) << S_Q0_TRAINING_COMPLETE_0_ASSERT)
+#define F_Q0_TRAINING_COMPLETE_0_ASSERT V_Q0_TRAINING_COMPLETE_0_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_3_ASSERT 15
+#define V_Q0_AN_TX_INT_3_ASSERT(x) ((x) << S_Q0_AN_TX_INT_3_ASSERT)
+#define F_Q0_AN_TX_INT_3_ASSERT V_Q0_AN_TX_INT_3_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_2_ASSERT 14
+#define V_Q0_AN_TX_INT_2_ASSERT(x) ((x) << S_Q0_AN_TX_INT_2_ASSERT)
+#define F_Q0_AN_TX_INT_2_ASSERT V_Q0_AN_TX_INT_2_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_1_ASSERT 13
+#define V_Q0_AN_TX_INT_1_ASSERT(x) ((x) << S_Q0_AN_TX_INT_1_ASSERT)
+#define F_Q0_AN_TX_INT_1_ASSERT V_Q0_AN_TX_INT_1_ASSERT(1U)
+
+#define S_Q0_AN_TX_INT_0_ASSERT 12
+#define V_Q0_AN_TX_INT_0_ASSERT(x) ((x) << S_Q0_AN_TX_INT_0_ASSERT)
+#define F_Q0_AN_TX_INT_0_ASSERT V_Q0_AN_TX_INT_0_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_3_ASSERT 11
+#define V_Q0_SIGNAL_DETECT_3_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_3_ASSERT)
+#define F_Q0_SIGNAL_DETECT_3_ASSERT V_Q0_SIGNAL_DETECT_3_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_2_ASSERT 10
+#define V_Q0_SIGNAL_DETECT_2_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_2_ASSERT)
+#define F_Q0_SIGNAL_DETECT_2_ASSERT V_Q0_SIGNAL_DETECT_2_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_1_ASSERT 9
+#define V_Q0_SIGNAL_DETECT_1_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_1_ASSERT)
+#define F_Q0_SIGNAL_DETECT_1_ASSERT V_Q0_SIGNAL_DETECT_1_ASSERT(1U)
+
+#define S_Q0_SIGNAL_DETECT_0_ASSERT 8
+#define V_Q0_SIGNAL_DETECT_0_ASSERT(x) ((x) << S_Q0_SIGNAL_DETECT_0_ASSERT)
+#define F_Q0_SIGNAL_DETECT_0_ASSERT V_Q0_SIGNAL_DETECT_0_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_3_ASSERT 7
+#define V_Q0_CDR_LOL_3_ASSERT(x) ((x) << S_Q0_CDR_LOL_3_ASSERT)
+#define F_Q0_CDR_LOL_3_ASSERT V_Q0_CDR_LOL_3_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_2_ASSERT 6
+#define V_Q0_CDR_LOL_2_ASSERT(x) ((x) << S_Q0_CDR_LOL_2_ASSERT)
+#define F_Q0_CDR_LOL_2_ASSERT V_Q0_CDR_LOL_2_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_1_ASSERT 5
+#define V_Q0_CDR_LOL_1_ASSERT(x) ((x) << S_Q0_CDR_LOL_1_ASSERT)
+#define F_Q0_CDR_LOL_1_ASSERT V_Q0_CDR_LOL_1_ASSERT(1U)
+
+#define S_Q0_CDR_LOL_0_ASSERT 4
+#define V_Q0_CDR_LOL_0_ASSERT(x) ((x) << S_Q0_CDR_LOL_0_ASSERT)
+#define F_Q0_CDR_LOL_0_ASSERT V_Q0_CDR_LOL_0_ASSERT(1U)
+
+#define S_Q0_LOS_3_ASSERT 3
+#define V_Q0_LOS_3_ASSERT(x) ((x) << S_Q0_LOS_3_ASSERT)
+#define F_Q0_LOS_3_ASSERT V_Q0_LOS_3_ASSERT(1U)
+
+#define S_Q0_LOS_2_ASSERT 2
+#define V_Q0_LOS_2_ASSERT(x) ((x) << S_Q0_LOS_2_ASSERT)
+#define F_Q0_LOS_2_ASSERT V_Q0_LOS_2_ASSERT(1U)
+
+#define S_Q0_LOS_1_ASSERT 1
+#define V_Q0_LOS_1_ASSERT(x) ((x) << S_Q0_LOS_1_ASSERT)
+#define F_Q0_LOS_1_ASSERT V_Q0_LOS_1_ASSERT(1U)
+
+#define S_Q0_LOS_0_ASSERT 0
+#define V_Q0_LOS_0_ASSERT(x) ((x) << S_Q0_LOS_0_ASSERT)
+#define F_Q0_LOS_0_ASSERT V_Q0_LOS_0_ASSERT(1U)
+
+#define A_MAC_IOS_INTR_CAUSE_QUAD0 0x3a094
+#define A_MAC_IOS_INTR_EN_QUAD1 0x3a098
+
+#define S_Q1_MAILBOX_INT_ASSERT 24
+#define V_Q1_MAILBOX_INT_ASSERT(x) ((x) << S_Q1_MAILBOX_INT_ASSERT)
+#define F_Q1_MAILBOX_INT_ASSERT V_Q1_MAILBOX_INT_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_3_ASSERT 23
+#define V_Q1_TRAINING_FAILURE_3_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_3_ASSERT)
+#define F_Q1_TRAINING_FAILURE_3_ASSERT V_Q1_TRAINING_FAILURE_3_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_2_ASSERT 22
+#define V_Q1_TRAINING_FAILURE_2_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_2_ASSERT)
+#define F_Q1_TRAINING_FAILURE_2_ASSERT V_Q1_TRAINING_FAILURE_2_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_1_ASSERT 21
+#define V_Q1_TRAINING_FAILURE_1_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_1_ASSERT)
+#define F_Q1_TRAINING_FAILURE_1_ASSERT V_Q1_TRAINING_FAILURE_1_ASSERT(1U)
+
+#define S_Q1_TRAINING_FAILURE_0_ASSERT 20
+#define V_Q1_TRAINING_FAILURE_0_ASSERT(x) ((x) << S_Q1_TRAINING_FAILURE_0_ASSERT)
+#define F_Q1_TRAINING_FAILURE_0_ASSERT V_Q1_TRAINING_FAILURE_0_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_3_ASSERT 19
+#define V_Q1_TRAINING_COMPLETE_3_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_3_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_3_ASSERT V_Q1_TRAINING_COMPLETE_3_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_2_ASSERT 18
+#define V_Q1_TRAINING_COMPLETE_2_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_2_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_2_ASSERT V_Q1_TRAINING_COMPLETE_2_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_1_ASSERT 17
+#define V_Q1_TRAINING_COMPLETE_1_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_1_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_1_ASSERT V_Q1_TRAINING_COMPLETE_1_ASSERT(1U)
+
+#define S_Q1_TRAINING_COMPLETE_0_ASSERT 16
+#define V_Q1_TRAINING_COMPLETE_0_ASSERT(x) ((x) << S_Q1_TRAINING_COMPLETE_0_ASSERT)
+#define F_Q1_TRAINING_COMPLETE_0_ASSERT V_Q1_TRAINING_COMPLETE_0_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_3_ASSERT 15
+#define V_Q1_AN_TX_INT_3_ASSERT(x) ((x) << S_Q1_AN_TX_INT_3_ASSERT)
+#define F_Q1_AN_TX_INT_3_ASSERT V_Q1_AN_TX_INT_3_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_2_ASSERT 14
+#define V_Q1_AN_TX_INT_2_ASSERT(x) ((x) << S_Q1_AN_TX_INT_2_ASSERT)
+#define F_Q1_AN_TX_INT_2_ASSERT V_Q1_AN_TX_INT_2_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_1_ASSERT 13
+#define V_Q1_AN_TX_INT_1_ASSERT(x) ((x) << S_Q1_AN_TX_INT_1_ASSERT)
+#define F_Q1_AN_TX_INT_1_ASSERT V_Q1_AN_TX_INT_1_ASSERT(1U)
+
+#define S_Q1_AN_TX_INT_0_ASSERT 12
+#define V_Q1_AN_TX_INT_0_ASSERT(x) ((x) << S_Q1_AN_TX_INT_0_ASSERT)
+#define F_Q1_AN_TX_INT_0_ASSERT V_Q1_AN_TX_INT_0_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_3_ASSERT 11
+#define V_Q1_SIGNAL_DETECT_3_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_3_ASSERT)
+#define F_Q1_SIGNAL_DETECT_3_ASSERT V_Q1_SIGNAL_DETECT_3_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_2_ASSERT 10
+#define V_Q1_SIGNAL_DETECT_2_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_2_ASSERT)
+#define F_Q1_SIGNAL_DETECT_2_ASSERT V_Q1_SIGNAL_DETECT_2_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_1_ASSERT 9
+#define V_Q1_SIGNAL_DETECT_1_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_1_ASSERT)
+#define F_Q1_SIGNAL_DETECT_1_ASSERT V_Q1_SIGNAL_DETECT_1_ASSERT(1U)
+
+#define S_Q1_SIGNAL_DETECT_0_ASSERT 8
+#define V_Q1_SIGNAL_DETECT_0_ASSERT(x) ((x) << S_Q1_SIGNAL_DETECT_0_ASSERT)
+#define F_Q1_SIGNAL_DETECT_0_ASSERT V_Q1_SIGNAL_DETECT_0_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_3_ASSERT 7
+#define V_Q1_CDR_LOL_3_ASSERT(x) ((x) << S_Q1_CDR_LOL_3_ASSERT)
+#define F_Q1_CDR_LOL_3_ASSERT V_Q1_CDR_LOL_3_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_2_ASSERT 6
+#define V_Q1_CDR_LOL_2_ASSERT(x) ((x) << S_Q1_CDR_LOL_2_ASSERT)
+#define F_Q1_CDR_LOL_2_ASSERT V_Q1_CDR_LOL_2_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_1_ASSERT 5
+#define V_Q1_CDR_LOL_1_ASSERT(x) ((x) << S_Q1_CDR_LOL_1_ASSERT)
+#define F_Q1_CDR_LOL_1_ASSERT V_Q1_CDR_LOL_1_ASSERT(1U)
+
+#define S_Q1_CDR_LOL_0_ASSERT 4
+#define V_Q1_CDR_LOL_0_ASSERT(x) ((x) << S_Q1_CDR_LOL_0_ASSERT)
+#define F_Q1_CDR_LOL_0_ASSERT V_Q1_CDR_LOL_0_ASSERT(1U)
+
+#define S_Q1_LOS_3_ASSERT 3
+#define V_Q1_LOS_3_ASSERT(x) ((x) << S_Q1_LOS_3_ASSERT)
+#define F_Q1_LOS_3_ASSERT V_Q1_LOS_3_ASSERT(1U)
+
+#define S_Q1_LOS_2_ASSERT 2
+#define V_Q1_LOS_2_ASSERT(x) ((x) << S_Q1_LOS_2_ASSERT)
+#define F_Q1_LOS_2_ASSERT V_Q1_LOS_2_ASSERT(1U)
+
+#define S_Q1_LOS_1_ASSERT 1
+#define V_Q1_LOS_1_ASSERT(x) ((x) << S_Q1_LOS_1_ASSERT)
+#define F_Q1_LOS_1_ASSERT V_Q1_LOS_1_ASSERT(1U)
+
+#define S_Q1_LOS_0_ASSERT 0
+#define V_Q1_LOS_0_ASSERT(x) ((x) << S_Q1_LOS_0_ASSERT)
+#define F_Q1_LOS_0_ASSERT V_Q1_LOS_0_ASSERT(1U)
+
+#define A_MAC_IOS_INTR_CAUSE_QUAD1 0x3a09c
+#define A_MAC_MTIP_PCS_1G_0_CONTROL 0x3e000
+
+#define S_SPEED_SEL_1 13
+#define V_SPEED_SEL_1(x) ((x) << S_SPEED_SEL_1)
+#define F_SPEED_SEL_1 V_SPEED_SEL_1(1U)
+
+#define S_AUTO_NEG_ENA 12
+#define V_AUTO_NEG_ENA(x) ((x) << S_AUTO_NEG_ENA)
+#define F_AUTO_NEG_ENA V_AUTO_NEG_ENA(1U)
+
+#define S_T7_POWER_DOWN 11
+#define V_T7_POWER_DOWN(x) ((x) << S_T7_POWER_DOWN)
+#define F_T7_POWER_DOWN V_T7_POWER_DOWN(1U)
+
+#define S_RESTART_AUTO_NEG 9
+#define V_RESTART_AUTO_NEG(x) ((x) << S_RESTART_AUTO_NEG)
+#define F_RESTART_AUTO_NEG V_RESTART_AUTO_NEG(1U)
+
+#define S_SPEED_SEL_0 6
+#define V_SPEED_SEL_0(x) ((x) << S_SPEED_SEL_0)
+#define F_SPEED_SEL_0 V_SPEED_SEL_0(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_STATUS 0x3e004
+
+#define S_100BASE_T4 15
+#define V_100BASE_T4(x) ((x) << S_100BASE_T4)
+#define F_100BASE_T4 V_100BASE_T4(1U)
+
+#define S_100BASE_X_FULL_DUPLEX 14
+#define V_100BASE_X_FULL_DUPLEX(x) ((x) << S_100BASE_X_FULL_DUPLEX)
+#define F_100BASE_X_FULL_DUPLEX V_100BASE_X_FULL_DUPLEX(1U)
+
+#define S_100BASE_X_HALF_DUPLEX 13
+#define V_100BASE_X_HALF_DUPLEX(x) ((x) << S_100BASE_X_HALF_DUPLEX)
+#define F_100BASE_X_HALF_DUPLEX V_100BASE_X_HALF_DUPLEX(1U)
+
+#define S_10MBPS_FULL_DUPLEX 12
+#define V_10MBPS_FULL_DUPLEX(x) ((x) << S_10MBPS_FULL_DUPLEX)
+#define F_10MBPS_FULL_DUPLEX V_10MBPS_FULL_DUPLEX(1U)
+
+#define S_10MBPS_HALF_DUPLEX 11
+#define V_10MBPS_HALF_DUPLEX(x) ((x) << S_10MBPS_HALF_DUPLEX)
+#define F_10MBPS_HALF_DUPLEX V_10MBPS_HALF_DUPLEX(1U)
+
+#define S_100BASE_T2_HALF_DUPLEX1 10
+#define V_100BASE_T2_HALF_DUPLEX1(x) ((x) << S_100BASE_T2_HALF_DUPLEX1)
+#define F_100BASE_T2_HALF_DUPLEX1 V_100BASE_T2_HALF_DUPLEX1(1U)
+
+#define S_100BASE_T2_HALF_DUPLEX0 9
+#define V_100BASE_T2_HALF_DUPLEX0(x) ((x) << S_100BASE_T2_HALF_DUPLEX0)
+#define F_100BASE_T2_HALF_DUPLEX0 V_100BASE_T2_HALF_DUPLEX0(1U)
+
+#define S_T7_EXTENDED_STATUS 8
+#define V_T7_EXTENDED_STATUS(x) ((x) << S_T7_EXTENDED_STATUS)
+#define F_T7_EXTENDED_STATUS V_T7_EXTENDED_STATUS(1U)
+
+#define S_AUTO_NEG_COMPLETE 5
+#define V_AUTO_NEG_COMPLETE(x) ((x) << S_AUTO_NEG_COMPLETE)
+#define F_AUTO_NEG_COMPLETE V_AUTO_NEG_COMPLETE(1U)
+
+#define S_T7_REMOTE_FAULT 4
+#define V_T7_REMOTE_FAULT(x) ((x) << S_T7_REMOTE_FAULT)
+#define F_T7_REMOTE_FAULT V_T7_REMOTE_FAULT(1U)
+
+#define S_AUTO_NEG_ABILITY 3
+#define V_AUTO_NEG_ABILITY(x) ((x) << S_AUTO_NEG_ABILITY)
+#define F_AUTO_NEG_ABILITY V_AUTO_NEG_ABILITY(1U)
+
+#define S_JABBER_DETECT 1
+#define V_JABBER_DETECT(x) ((x) << S_JABBER_DETECT)
+#define F_JABBER_DETECT V_JABBER_DETECT(1U)
+
+#define S_EXTENDED_CAPABILITY 0
+#define V_EXTENDED_CAPABILITY(x) ((x) << S_EXTENDED_CAPABILITY)
+#define F_EXTENDED_CAPABILITY V_EXTENDED_CAPABILITY(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_PHY_IDENTIFIER_0 0x3e008
+#define A_MAC_MTIP_PCS_1G_0_PHY_IDENTIFIER_1 0x3e00c
+#define A_MAC_MTIP_PCS_1G_0_DEV_ABILITY 0x3e010
+
+#define S_EEE_CLOCK_STOP_ENABLE 8
+#define V_EEE_CLOCK_STOP_ENABLE(x) ((x) << S_EEE_CLOCK_STOP_ENABLE)
+#define F_EEE_CLOCK_STOP_ENABLE V_EEE_CLOCK_STOP_ENABLE(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_PARTNER_ABILITY 0x3e014
+
+#define S_COPPER_LINK_STATUS 15
+#define V_COPPER_LINK_STATUS(x) ((x) << S_COPPER_LINK_STATUS)
+#define F_COPPER_LINK_STATUS V_COPPER_LINK_STATUS(1U)
+
+#define S_COPPER_DUPLEX_STATUS 12
+#define V_COPPER_DUPLEX_STATUS(x) ((x) << S_COPPER_DUPLEX_STATUS)
+#define F_COPPER_DUPLEX_STATUS V_COPPER_DUPLEX_STATUS(1U)
+
+#define S_COPPER_SPEED 10
+#define M_COPPER_SPEED 0x3U
+#define V_COPPER_SPEED(x) ((x) << S_COPPER_SPEED)
+#define G_COPPER_SPEED(x) (((x) >> S_COPPER_SPEED) & M_COPPER_SPEED)
+
+#define S_EEE_CAPABILITY 9
+#define V_EEE_CAPABILITY(x) ((x) << S_EEE_CAPABILITY)
+#define F_EEE_CAPABILITY V_EEE_CAPABILITY(1U)
+
+#define S_EEE_CLOCK_STOP_CAPABILITY 8
+#define V_EEE_CLOCK_STOP_CAPABILITY(x) ((x) << S_EEE_CLOCK_STOP_CAPABILITY)
+#define F_EEE_CLOCK_STOP_CAPABILITY V_EEE_CLOCK_STOP_CAPABILITY(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_AN_EXPANSION 0x3e018
+#define A_MAC_MTIP_PCS_1G_0_NP_TX 0x3e01c
+#define A_MAC_MTIP_PCS_1G_0_LP_NP_RX 0x3e020
+
+#define S_T7_DATA 0
+#define M_T7_DATA 0x7ffU
+#define V_T7_DATA(x) ((x) << S_T7_DATA)
+#define G_T7_DATA(x) (((x) >> S_T7_DATA) & M_T7_DATA)
+
+#define A_MAC_MTIP_PCS_1G_0_EXTENDED_STATUS 0x3e03c
+#define A_MAC_MTIP_PCS_1G_0_SCRATCH 0x3e040
+#define A_MAC_MTIP_PCS_1G_0_REV 0x3e044
+#define A_MAC_MTIP_PCS_1G_0_LINK_TIMER_0 0x3e048
+
+#define S_LINK_TIMER_VAL 0
+#define M_LINK_TIMER_VAL 0xffffU
+#define V_LINK_TIMER_VAL(x) ((x) << S_LINK_TIMER_VAL)
+#define G_LINK_TIMER_VAL(x) (((x) >> S_LINK_TIMER_VAL) & M_LINK_TIMER_VAL)
+
+#define A_MAC_MTIP_PCS_1G_0_LINK_TIMER_1 0x3e04c
+
+#define S_T7_LINK_TIMER_VAL 0
+#define M_T7_LINK_TIMER_VAL 0x1fU
+#define V_T7_LINK_TIMER_VAL(x) ((x) << S_T7_LINK_TIMER_VAL)
+#define G_T7_LINK_TIMER_VAL(x) (((x) >> S_T7_LINK_TIMER_VAL) & M_T7_LINK_TIMER_VAL)
+
+#define A_MAC_MTIP_PCS_1G_0_IF_MODE 0x3e050
+#define A_MAC_MTIP_PCS_1G_0_DEC_ERR_CNT 0x3e054
+#define A_MAC_MTIP_PCS_1G_0_VENDOR_CONTROL 0x3e058
+
+#define S_SGPCS_ENA_ST 15
+#define V_SGPCS_ENA_ST(x) ((x) << S_SGPCS_ENA_ST)
+#define F_SGPCS_ENA_ST V_SGPCS_ENA_ST(1U)
+
+#define S_T7_CFG_CLOCK_RATE 4
+#define M_T7_CFG_CLOCK_RATE 0xfU
+#define V_T7_CFG_CLOCK_RATE(x) ((x) << S_T7_CFG_CLOCK_RATE)
+#define G_T7_CFG_CLOCK_RATE(x) (((x) >> S_T7_CFG_CLOCK_RATE) & M_T7_CFG_CLOCK_RATE)
+
+#define S_SGPCS_ENA_R 0
+#define V_SGPCS_ENA_R(x) ((x) << S_SGPCS_ENA_R)
+#define F_SGPCS_ENA_R V_SGPCS_ENA_R(1U)
+
+#define A_MAC_MTIP_PCS_1G_0_SD_BIT_SLIP 0x3e05c
+
+#define S_SD_BIT_SLIP 0
+#define M_SD_BIT_SLIP 0xfU
+#define V_SD_BIT_SLIP(x) ((x) << S_SD_BIT_SLIP)
+#define G_SD_BIT_SLIP(x) (((x) >> S_SD_BIT_SLIP) & M_SD_BIT_SLIP)
+
+#define A_MAC_MTIP_PCS_1G_1_CONTROL 0x3e100
+#define A_MAC_MTIP_PCS_1G_1_STATUS 0x3e104
+#define A_MAC_MTIP_PCS_1G_1_PHY_IDENTIFIER_0 0x3e108
+#define A_MAC_MTIP_PCS_1G_1_PHY_IDENTIFIER_1 0x3e10c
+#define A_MAC_MTIP_PCS_1G_1_DEV_ABILITY 0x3e110
+#define A_MAC_MTIP_PCS_1G_1_PARTNER_ABILITY 0x3e114
+#define A_MAC_MTIP_PCS_1G_1_AN_EXPANSION 0x3e118
+#define A_MAC_MTIP_PCS_1G_1_NP_TX 0x3e11c
+#define A_MAC_MTIP_PCS_1G_1_LP_NP_RX 0x3e120
+#define A_MAC_MTIP_PCS_1G_1_EXTENDED_STATUS 0x3e13c
+#define A_MAC_MTIP_PCS_1G_1_SCRATCH 0x3e140
+#define A_MAC_MTIP_PCS_1G_1_REV 0x3e144
+#define A_MAC_MTIP_PCS_1G_1_LINK_TIMER_0 0x3e148
+#define A_MAC_MTIP_PCS_1G_1_LINK_TIMER_1 0x3e14c
+#define A_MAC_MTIP_PCS_1G_1_IF_MODE 0x3e150
+#define A_MAC_MTIP_PCS_1G_1_DEC_ERR_CNT 0x3e154
+#define A_MAC_MTIP_PCS_1G_1_VENDOR_CONTROL 0x3e158
+#define A_MAC_MTIP_PCS_1G_1_SD_BIT_SLIP 0x3e15c
+#define A_MAC_MTIP_PCS_1G_2_CONTROL 0x3e200
+#define A_MAC_MTIP_PCS_1G_2_STATUS 0x3e204
+#define A_MAC_MTIP_PCS_1G_2_PHY_IDENTIFIER_0 0x3e208
+#define A_MAC_MTIP_PCS_1G_2_PHY_IDENTIFIER_1 0x3e20c
+#define A_MAC_MTIP_PCS_1G_2_DEV_ABILITY 0x3e210
+#define A_MAC_MTIP_PCS_1G_2_PARTNER_ABILITY 0x3e214
+#define A_MAC_MTIP_PCS_1G_2_AN_EXPANSION 0x3e218
+#define A_MAC_MTIP_PCS_1G_2_NP_TX 0x3e21c
+#define A_MAC_MTIP_PCS_1G_2_LP_NP_RX 0x3e220
+#define A_MAC_MTIP_PCS_1G_2_EXTENDED_STATUS 0x3e23c
+#define A_MAC_MTIP_PCS_1G_2_SCRATCH 0x3e240
+#define A_MAC_MTIP_PCS_1G_2_REV 0x3e244
+#define A_MAC_MTIP_PCS_1G_2_LINK_TIMER_0 0x3e248
+#define A_MAC_MTIP_PCS_1G_2_LINK_TIMER_1 0x3e24c
+#define A_MAC_MTIP_PCS_1G_2_IF_MODE 0x3e250
+#define A_MAC_MTIP_PCS_1G_2_DEC_ERR_CNT 0x3e254
+#define A_MAC_MTIP_PCS_1G_2_VENDOR_CONTROL 0x3e258
+#define A_MAC_MTIP_PCS_1G_2_SD_BIT_SLIP 0x3e25c
+#define A_MAC_MTIP_PCS_1G_3_CONTROL 0x3e300
+#define A_MAC_MTIP_PCS_1G_3_STATUS 0x3e304
+#define A_MAC_MTIP_PCS_1G_3_PHY_IDENTIFIER_0 0x3e308
+#define A_MAC_MTIP_PCS_1G_3_PHY_IDENTIFIER_1 0x3e30c
+#define A_MAC_MTIP_PCS_1G_3_DEV_ABILITY 0x3e310
+#define A_MAC_MTIP_PCS_1G_3_PARTNER_ABILITY 0x3e314
+#define A_MAC_MTIP_PCS_1G_3_AN_EXPANSION 0x3e318
+#define A_MAC_MTIP_PCS_1G_3_NP_TX 0x3e31c
+#define A_MAC_MTIP_PCS_1G_3_LP_NP_RX 0x3e320
+#define A_MAC_MTIP_PCS_1G_3_EXTENDED_STATUS 0x3e33c
+#define A_MAC_MTIP_PCS_1G_3_SCRATCH 0x3e340
+#define A_MAC_MTIP_PCS_1G_3_REV 0x3e344
+#define A_MAC_MTIP_PCS_1G_3_LINK_TIMER_0 0x3e348
+#define A_MAC_MTIP_PCS_1G_3_LINK_TIMER_1 0x3e34c
+#define A_MAC_MTIP_PCS_1G_3_IF_MODE 0x3e350
+#define A_MAC_MTIP_PCS_1G_3_DEC_ERR_CNT 0x3e354
+#define A_MAC_MTIP_PCS_1G_3_VENDOR_CONTROL 0x3e358
+#define A_MAC_MTIP_PCS_1G_3_SD_BIT_SLIP 0x3e35c
+#define A_MAC_DPLL_CTRL_0 0x3f000
+
+#define S_LOCAL_FAULT_OVRD 18
+#define V_LOCAL_FAULT_OVRD(x) ((x) << S_LOCAL_FAULT_OVRD)
+#define F_LOCAL_FAULT_OVRD V_LOCAL_FAULT_OVRD(1U)
+
+#define S_LOCAL_FAULT_HOLD_EN 17
+#define V_LOCAL_FAULT_HOLD_EN(x) ((x) << S_LOCAL_FAULT_HOLD_EN)
+#define F_LOCAL_FAULT_HOLD_EN V_LOCAL_FAULT_HOLD_EN(1U)
+
+#define S_DPLL_RST 16
+#define V_DPLL_RST(x) ((x) << S_DPLL_RST)
+#define F_DPLL_RST V_DPLL_RST(1U)
+
+#define S_CNTOFFSET 0
+#define M_CNTOFFSET 0xffffU
+#define V_CNTOFFSET(x) ((x) << S_CNTOFFSET)
+#define G_CNTOFFSET(x) (((x) >> S_CNTOFFSET) & M_CNTOFFSET)
+
+#define A_MAC_DPLL_CTRL_1 0x3f004
+
+#define S_DELAYK 0
+#define M_DELAYK 0xffffffU
+#define V_DELAYK(x) ((x) << S_DELAYK)
+#define G_DELAYK(x) (((x) >> S_DELAYK) & M_DELAYK)
+
+#define A_MAC_DPLL_CTRL_2 0x3f008
+
+#define S_DIVFFB 16
+#define M_DIVFFB 0xffffU
+#define V_DIVFFB(x) ((x) << S_DIVFFB)
+#define G_DIVFFB(x) (((x) >> S_DIVFFB) & M_DIVFFB)
+
+#define S_DIVFIN 0
+#define M_DIVFIN 0xffffU
+#define V_DIVFIN(x) ((x) << S_DIVFIN)
+#define G_DIVFIN(x) (((x) >> S_DIVFIN) & M_DIVFIN)
+
+#define A_MAC_DPLL_CTRL_3 0x3f00c
+
+#define S_ISHIFT_HOLD 28
+#define M_ISHIFT_HOLD 0xfU
+#define V_ISHIFT_HOLD(x) ((x) << S_ISHIFT_HOLD)
+#define G_ISHIFT_HOLD(x) (((x) >> S_ISHIFT_HOLD) & M_ISHIFT_HOLD)
+
+#define S_ISHIFT 24
+#define M_ISHIFT 0xfU
+#define V_ISHIFT(x) ((x) << S_ISHIFT)
+#define G_ISHIFT(x) (((x) >> S_ISHIFT) & M_ISHIFT)
+
+#define S_INT_PRESET 12
+#define M_INT_PRESET 0xfffU
+#define V_INT_PRESET(x) ((x) << S_INT_PRESET)
+#define G_INT_PRESET(x) (((x) >> S_INT_PRESET) & M_INT_PRESET)
+
+#define S_FMI 4
+#define M_FMI 0xffU
+#define V_FMI(x) ((x) << S_FMI)
+#define G_FMI(x) (((x) >> S_FMI) & M_FMI)
+
+#define S_DPLL_PROGRAM 3
+#define V_DPLL_PROGRAM(x) ((x) << S_DPLL_PROGRAM)
+#define F_DPLL_PROGRAM V_DPLL_PROGRAM(1U)
+
+#define S_PRESET_EN 2
+#define V_PRESET_EN(x) ((x) << S_PRESET_EN)
+#define F_PRESET_EN V_PRESET_EN(1U)
+
+#define S_ONTARGETOV 1
+#define V_ONTARGETOV(x) ((x) << S_ONTARGETOV)
+#define F_ONTARGETOV V_ONTARGETOV(1U)
+
+#define S_FDONLY 0
+#define V_FDONLY(x) ((x) << S_FDONLY)
+#define F_FDONLY V_FDONLY(1U)
+
+#define A_MAC_DPLL_CTRL_4 0x3f010
+
+#define S_FKI 24
+#define M_FKI 0x1fU
+#define V_FKI(x) ((x) << S_FKI)
+#define G_FKI(x) (((x) >> S_FKI) & M_FKI)
+
+#define S_FRAC_PRESET 0
+#define M_FRAC_PRESET 0xffffffU
+#define V_FRAC_PRESET(x) ((x) << S_FRAC_PRESET)
+#define G_FRAC_PRESET(x) (((x) >> S_FRAC_PRESET) & M_FRAC_PRESET)
+
+#define A_MAC_DPLL_CTRL_5 0x3f014
+
+#define S_PH_STEP_CNT_HOLD 24
+#define M_PH_STEP_CNT_HOLD 0x1fU
+#define V_PH_STEP_CNT_HOLD(x) ((x) << S_PH_STEP_CNT_HOLD)
+#define G_PH_STEP_CNT_HOLD(x) (((x) >> S_PH_STEP_CNT_HOLD) & M_PH_STEP_CNT_HOLD)
+
+#define S_CFG_RESET 23
+#define V_CFG_RESET(x) ((x) << S_CFG_RESET)
+#define F_CFG_RESET V_CFG_RESET(1U)
+
+#define S_PH_STEP_CNT 16
+#define M_PH_STEP_CNT 0x1fU
+#define V_PH_STEP_CNT(x) ((x) << S_PH_STEP_CNT)
+#define G_PH_STEP_CNT(x) (((x) >> S_PH_STEP_CNT) & M_PH_STEP_CNT)
+
+#define S_OTDLY 0
+#define M_OTDLY 0xffffU
+#define V_OTDLY(x) ((x) << S_OTDLY)
+#define G_OTDLY(x) (((x) >> S_OTDLY) & M_OTDLY)
+
+#define A_MAC_DPLL_CTRL_6 0x3f018
+
+#define S_TARGETCNT 16
+#define M_TARGETCNT 0xffffU
+#define V_TARGETCNT(x) ((x) << S_TARGETCNT)
+#define G_TARGETCNT(x) (((x) >> S_TARGETCNT) & M_TARGETCNT)
+
+#define S_PKP 8
+#define M_PKP 0x1fU
+#define V_PKP(x) ((x) << S_PKP)
+#define G_PKP(x) (((x) >> S_PKP) & M_PKP)
+
+#define S_PMP 0
+#define M_PMP 0xffU
+#define V_PMP(x) ((x) << S_PMP)
+#define G_PMP(x) (((x) >> S_PMP) & M_PMP)
+
+#define A_MAC_DPLL_CTRL_7 0x3f01c
+#define A_MAC_DPLL_STATUS_0 0x3f020
+
+#define S_FRAC 0
+#define M_FRAC 0xffffffU
+#define V_FRAC(x) ((x) << S_FRAC)
+#define G_FRAC(x) (((x) >> S_FRAC) & M_FRAC)
+
+#define A_MAC_DPLL_STATUS_1 0x3f024
+
+#define S_FRAC_PD_OUT 0
+#define M_FRAC_PD_OUT 0xffffffU
+#define V_FRAC_PD_OUT(x) ((x) << S_FRAC_PD_OUT)
+#define G_FRAC_PD_OUT(x) (((x) >> S_FRAC_PD_OUT) & M_FRAC_PD_OUT)
+
+#define A_MAC_DPLL_STATUS_2 0x3f028
+
+#define S_INT 12
+#define M_INT 0xfffU
+#define V_INT(x) ((x) << S_INT)
+#define G_INT(x) (((x) >> S_INT) & M_INT)
+
+#define S_INT_PD_OUT 0
+#define M_INT_PD_OUT 0xfffU
+#define V_INT_PD_OUT(x) ((x) << S_INT_PD_OUT)
+#define G_INT_PD_OUT(x) (((x) >> S_INT_PD_OUT) & M_INT_PD_OUT)
+
+#define A_MAC_FRAC_N_PLL_CTRL_0 0x3f02c
+
+#define S_FRAC_N_DSKEWCALCNT 29
+#define M_FRAC_N_DSKEWCALCNT 0x7U
+#define V_FRAC_N_DSKEWCALCNT(x) ((x) << S_FRAC_N_DSKEWCALCNT)
+#define G_FRAC_N_DSKEWCALCNT(x) (((x) >> S_FRAC_N_DSKEWCALCNT) & M_FRAC_N_DSKEWCALCNT)
+
+#define S_PLLEN 28
+#define V_PLLEN(x) ((x) << S_PLLEN)
+#define F_PLLEN V_PLLEN(1U)
+
+#define S_T7_BYPASS 24
+#define M_T7_BYPASS 0xfU
+#define V_T7_BYPASS(x) ((x) << S_T7_BYPASS)
+#define G_T7_BYPASS(x) (((x) >> S_T7_BYPASS) & M_T7_BYPASS)
+
+#define S_POSTDIV3A 21
+#define M_POSTDIV3A 0x7U
+#define V_POSTDIV3A(x) ((x) << S_POSTDIV3A)
+#define G_POSTDIV3A(x) (((x) >> S_POSTDIV3A) & M_POSTDIV3A)
+
+#define S_POSTDIV3B 18
+#define M_POSTDIV3B 0x7U
+#define V_POSTDIV3B(x) ((x) << S_POSTDIV3B)
+#define G_POSTDIV3B(x) (((x) >> S_POSTDIV3B) & M_POSTDIV3B)
+
+#define S_POSTDIV2A 15
+#define M_POSTDIV2A 0x7U
+#define V_POSTDIV2A(x) ((x) << S_POSTDIV2A)
+#define G_POSTDIV2A(x) (((x) >> S_POSTDIV2A) & M_POSTDIV2A)
+
+#define S_POSTDIV2B 12
+#define M_POSTDIV2B 0x7U
+#define V_POSTDIV2B(x) ((x) << S_POSTDIV2B)
+#define G_POSTDIV2B(x) (((x) >> S_POSTDIV2B) & M_POSTDIV2B)
+
+#define S_POSTDIV1A 9
+#define M_POSTDIV1A 0x7U
+#define V_POSTDIV1A(x) ((x) << S_POSTDIV1A)
+#define G_POSTDIV1A(x) (((x) >> S_POSTDIV1A) & M_POSTDIV1A)
+
+#define S_POSTDIV1B 6
+#define M_POSTDIV1B 0x7U
+#define V_POSTDIV1B(x) ((x) << S_POSTDIV1B)
+#define G_POSTDIV1B(x) (((x) >> S_POSTDIV1B) & M_POSTDIV1B)
+
+#define S_POSTDIV0A 3
+#define M_POSTDIV0A 0x7U
+#define V_POSTDIV0A(x) ((x) << S_POSTDIV0A)
+#define G_POSTDIV0A(x) (((x) >> S_POSTDIV0A) & M_POSTDIV0A)
+
+#define S_POSTDIV0B 0
+#define M_POSTDIV0B 0x7U
+#define V_POSTDIV0B(x) ((x) << S_POSTDIV0B)
+#define G_POSTDIV0B(x) (((x) >> S_POSTDIV0B) & M_POSTDIV0B)
+
+#define A_MAC_FRAC_N_PLL_CTRL_1 0x3f030
+
+#define S_FRAC_N_FRAC_N_FOUTEN 28
+#define M_FRAC_N_FRAC_N_FOUTEN 0xfU
+#define V_FRAC_N_FRAC_N_FOUTEN(x) ((x) << S_FRAC_N_FRAC_N_FOUTEN)
+#define G_FRAC_N_FRAC_N_FOUTEN(x) (((x) >> S_FRAC_N_FRAC_N_FOUTEN) & M_FRAC_N_FRAC_N_FOUTEN)
+
+#define S_FRAC_N_DSKEWCALIN 16
+#define M_FRAC_N_DSKEWCALIN 0xfffU
+#define V_FRAC_N_DSKEWCALIN(x) ((x) << S_FRAC_N_DSKEWCALIN)
+#define G_FRAC_N_DSKEWCALIN(x) (((x) >> S_FRAC_N_DSKEWCALIN) & M_FRAC_N_DSKEWCALIN)
+
+#define S_FRAC_N_REFDIV 10
+#define M_FRAC_N_REFDIV 0x3fU
+#define V_FRAC_N_REFDIV(x) ((x) << S_FRAC_N_REFDIV)
+#define G_FRAC_N_REFDIV(x) (((x) >> S_FRAC_N_REFDIV) & M_FRAC_N_REFDIV)
+
+#define S_FRAC_N_DSMEN 9
+#define V_FRAC_N_DSMEN(x) ((x) << S_FRAC_N_DSMEN)
+#define F_FRAC_N_DSMEN V_FRAC_N_DSMEN(1U)
+
+#define S_FRAC_N_PLLEN 8
+#define V_FRAC_N_PLLEN(x) ((x) << S_FRAC_N_PLLEN)
+#define F_FRAC_N_PLLEN V_FRAC_N_PLLEN(1U)
+
+#define S_FRAC_N_DACEN 7
+#define V_FRAC_N_DACEN(x) ((x) << S_FRAC_N_DACEN)
+#define F_FRAC_N_DACEN V_FRAC_N_DACEN(1U)
+
+#define S_FRAC_N_POSTDIV0PRE 6
+#define V_FRAC_N_POSTDIV0PRE(x) ((x) << S_FRAC_N_POSTDIV0PRE)
+#define F_FRAC_N_POSTDIV0PRE V_FRAC_N_POSTDIV0PRE(1U)
+
+#define S_FRAC_N_DSKEWCALBYP 5
+#define V_FRAC_N_DSKEWCALBYP(x) ((x) << S_FRAC_N_DSKEWCALBYP)
+#define F_FRAC_N_DSKEWCALBYP V_FRAC_N_DSKEWCALBYP(1U)
+
+#define S_FRAC_N_DSKEWFASTCAL 4
+#define V_FRAC_N_DSKEWFASTCAL(x) ((x) << S_FRAC_N_DSKEWFASTCAL)
+#define F_FRAC_N_DSKEWFASTCAL V_FRAC_N_DSKEWFASTCAL(1U)
+
+#define S_FRAC_N_DSKEWCALEN 3
+#define V_FRAC_N_DSKEWCALEN(x) ((x) << S_FRAC_N_DSKEWCALEN)
+#define F_FRAC_N_DSKEWCALEN V_FRAC_N_DSKEWCALEN(1U)
+
+#define S_FRAC_N_FREFCMLEN 2
+#define V_FRAC_N_FREFCMLEN(x) ((x) << S_FRAC_N_FREFCMLEN)
+#define F_FRAC_N_FREFCMLEN V_FRAC_N_FREFCMLEN(1U)
+
+#define A_MAC_FRAC_N_PLL_STATUS_0 0x3f034
+
+#define S_DSKEWCALLOCK 12
+#define V_DSKEWCALLOCK(x) ((x) << S_DSKEWCALLOCK)
+#define F_DSKEWCALLOCK V_DSKEWCALLOCK(1U)
+
+#define S_DSKEWCALOUT 0
+#define M_DSKEWCALOUT 0xfffU
+#define V_DSKEWCALOUT(x) ((x) << S_DSKEWCALOUT)
+#define G_DSKEWCALOUT(x) (((x) >> S_DSKEWCALOUT) & M_DSKEWCALOUT)
+
+#define A_MAC_MTIP_PCS_STATUS_0 0x3f100
+
+#define S_XLGMII7_TX_TSU 22
+#define M_XLGMII7_TX_TSU 0x3U
+#define V_XLGMII7_TX_TSU(x) ((x) << S_XLGMII7_TX_TSU)
+#define G_XLGMII7_TX_TSU(x) (((x) >> S_XLGMII7_TX_TSU) & M_XLGMII7_TX_TSU)
+
+#define S_XLGMII6_TX_TSU 20
+#define M_XLGMII6_TX_TSU 0x3U
+#define V_XLGMII6_TX_TSU(x) ((x) << S_XLGMII6_TX_TSU)
+#define G_XLGMII6_TX_TSU(x) (((x) >> S_XLGMII6_TX_TSU) & M_XLGMII6_TX_TSU)
+
+#define S_XLGMII5_TX_TSU 18
+#define M_XLGMII5_TX_TSU 0x3U
+#define V_XLGMII5_TX_TSU(x) ((x) << S_XLGMII5_TX_TSU)
+#define G_XLGMII5_TX_TSU(x) (((x) >> S_XLGMII5_TX_TSU) & M_XLGMII5_TX_TSU)
+
+#define S_XLGMII4_TX_TSU 16
+#define M_XLGMII4_TX_TSU 0x3U
+#define V_XLGMII4_TX_TSU(x) ((x) << S_XLGMII4_TX_TSU)
+#define G_XLGMII4_TX_TSU(x) (((x) >> S_XLGMII4_TX_TSU) & M_XLGMII4_TX_TSU)
+
+#define S_XLGMII3_TX_TSU 14
+#define M_XLGMII3_TX_TSU 0x3U
+#define V_XLGMII3_TX_TSU(x) ((x) << S_XLGMII3_TX_TSU)
+#define G_XLGMII3_TX_TSU(x) (((x) >> S_XLGMII3_TX_TSU) & M_XLGMII3_TX_TSU)
+
+#define S_XLGMII2_TX_TSU 12
+#define M_XLGMII2_TX_TSU 0x3U
+#define V_XLGMII2_TX_TSU(x) ((x) << S_XLGMII2_TX_TSU)
+#define G_XLGMII2_TX_TSU(x) (((x) >> S_XLGMII2_TX_TSU) & M_XLGMII2_TX_TSU)
+
+#define S_XLGMII1_TX_TSU 10
+#define M_XLGMII1_TX_TSU 0x3U
+#define V_XLGMII1_TX_TSU(x) ((x) << S_XLGMII1_TX_TSU)
+#define G_XLGMII1_TX_TSU(x) (((x) >> S_XLGMII1_TX_TSU) & M_XLGMII1_TX_TSU)
+
+#define S_XLGMII0_TX_TSU 8
+#define M_XLGMII0_TX_TSU 0x3U
+#define V_XLGMII0_TX_TSU(x) ((x) << S_XLGMII0_TX_TSU)
+#define G_XLGMII0_TX_TSU(x) (((x) >> S_XLGMII0_TX_TSU) & M_XLGMII0_TX_TSU)
+
+#define S_CGMII3_TX_TSU 6
+#define M_CGMII3_TX_TSU 0x3U
+#define V_CGMII3_TX_TSU(x) ((x) << S_CGMII3_TX_TSU)
+#define G_CGMII3_TX_TSU(x) (((x) >> S_CGMII3_TX_TSU) & M_CGMII3_TX_TSU)
+
+#define S_CGMII2_TX_TSU 4
+#define M_CGMII2_TX_TSU 0x3U
+#define V_CGMII2_TX_TSU(x) ((x) << S_CGMII2_TX_TSU)
+#define G_CGMII2_TX_TSU(x) (((x) >> S_CGMII2_TX_TSU) & M_CGMII2_TX_TSU)
+
+#define S_CGMII1_TX_TSU 2
+#define M_CGMII1_TX_TSU 0x3U
+#define V_CGMII1_TX_TSU(x) ((x) << S_CGMII1_TX_TSU)
+#define G_CGMII1_TX_TSU(x) (((x) >> S_CGMII1_TX_TSU) & M_CGMII1_TX_TSU)
+
+#define S_CGMII0_TX_TSU 0
+#define M_CGMII0_TX_TSU 0x3U
+#define V_CGMII0_TX_TSU(x) ((x) << S_CGMII0_TX_TSU)
+#define G_CGMII0_TX_TSU(x) (((x) >> S_CGMII0_TX_TSU) & M_CGMII0_TX_TSU)
+
+#define A_MAC_MTIP_PCS_STATUS_1 0x3f104
+
+#define S_CDMII1_RX_TSU 26
+#define M_CDMII1_RX_TSU 0x3U
+#define V_CDMII1_RX_TSU(x) ((x) << S_CDMII1_RX_TSU)
+#define G_CDMII1_RX_TSU(x) (((x) >> S_CDMII1_RX_TSU) & M_CDMII1_RX_TSU)
+
+#define S_CDMII0_RX_TSU 24
+#define M_CDMII0_RX_TSU 0x3U
+#define V_CDMII0_RX_TSU(x) ((x) << S_CDMII0_RX_TSU)
+#define G_CDMII0_RX_TSU(x) (((x) >> S_CDMII0_RX_TSU) & M_CDMII0_RX_TSU)
+
+#define S_XLGMII7_RX_TSU 22
+#define M_XLGMII7_RX_TSU 0x3U
+#define V_XLGMII7_RX_TSU(x) ((x) << S_XLGMII7_RX_TSU)
+#define G_XLGMII7_RX_TSU(x) (((x) >> S_XLGMII7_RX_TSU) & M_XLGMII7_RX_TSU)
+
+#define S_XLGMII6_RX_TSU 20
+#define M_XLGMII6_RX_TSU 0x3U
+#define V_XLGMII6_RX_TSU(x) ((x) << S_XLGMII6_RX_TSU)
+#define G_XLGMII6_RX_TSU(x) (((x) >> S_XLGMII6_RX_TSU) & M_XLGMII6_RX_TSU)
+
+#define S_XLGMII5_RX_TSU 18
+#define M_XLGMII5_RX_TSU 0x3U
+#define V_XLGMII5_RX_TSU(x) ((x) << S_XLGMII5_RX_TSU)
+#define G_XLGMII5_RX_TSU(x) (((x) >> S_XLGMII5_RX_TSU) & M_XLGMII5_RX_TSU)
+
+#define S_XLGMII4_RX_TSU 16
+#define M_XLGMII4_RX_TSU 0x3U
+#define V_XLGMII4_RX_TSU(x) ((x) << S_XLGMII4_RX_TSU)
+#define G_XLGMII4_RX_TSU(x) (((x) >> S_XLGMII4_RX_TSU) & M_XLGMII4_RX_TSU)
+
+#define S_XLGMII3_RX_TSU 14
+#define M_XLGMII3_RX_TSU 0x3U
+#define V_XLGMII3_RX_TSU(x) ((x) << S_XLGMII3_RX_TSU)
+#define G_XLGMII3_RX_TSU(x) (((x) >> S_XLGMII3_RX_TSU) & M_XLGMII3_RX_TSU)
+
+#define S_XLGMII2_RX_TSU 12
+#define M_XLGMII2_RX_TSU 0x3U
+#define V_XLGMII2_RX_TSU(x) ((x) << S_XLGMII2_RX_TSU)
+#define G_XLGMII2_RX_TSU(x) (((x) >> S_XLGMII2_RX_TSU) & M_XLGMII2_RX_TSU)
+
+#define S_XLGMII1_RX_TSU 10
+#define M_XLGMII1_RX_TSU 0x3U
+#define V_XLGMII1_RX_TSU(x) ((x) << S_XLGMII1_RX_TSU)
+#define G_XLGMII1_RX_TSU(x) (((x) >> S_XLGMII1_RX_TSU) & M_XLGMII1_RX_TSU)
+
+#define S_XLGMII0_RX_TSU 8
+#define M_XLGMII0_RX_TSU 0x3U
+#define V_XLGMII0_RX_TSU(x) ((x) << S_XLGMII0_RX_TSU)
+#define G_XLGMII0_RX_TSU(x) (((x) >> S_XLGMII0_RX_TSU) & M_XLGMII0_RX_TSU)
+
+#define S_CGMII3_RX_TSU 6
+#define M_CGMII3_RX_TSU 0x3U
+#define V_CGMII3_RX_TSU(x) ((x) << S_CGMII3_RX_TSU)
+#define G_CGMII3_RX_TSU(x) (((x) >> S_CGMII3_RX_TSU) & M_CGMII3_RX_TSU)
+
+#define S_CGMII2_RX_TSU 4
+#define M_CGMII2_RX_TSU 0x3U
+#define V_CGMII2_RX_TSU(x) ((x) << S_CGMII2_RX_TSU)
+#define G_CGMII2_RX_TSU(x) (((x) >> S_CGMII2_RX_TSU) & M_CGMII2_RX_TSU)
+
+#define S_CGMII1_RX_TSU 2
+#define M_CGMII1_RX_TSU 0x3U
+#define V_CGMII1_RX_TSU(x) ((x) << S_CGMII1_RX_TSU)
+#define G_CGMII1_RX_TSU(x) (((x) >> S_CGMII1_RX_TSU) & M_CGMII1_RX_TSU)
+
+#define S_CGMII0_RX_TSU 0
+#define M_CGMII0_RX_TSU 0x3U
+#define V_CGMII0_RX_TSU(x) ((x) << S_CGMII0_RX_TSU)
+#define G_CGMII0_RX_TSU(x) (((x) >> S_CGMII0_RX_TSU) & M_CGMII0_RX_TSU)
+
+#define A_MAC_MTIP_PCS_STATUS_2 0x3f108
+
+#define S_SD_BIT_SLIP_0 0
+#define M_SD_BIT_SLIP_0 0x3fffffffU
+#define V_SD_BIT_SLIP_0(x) ((x) << S_SD_BIT_SLIP_0)
+#define G_SD_BIT_SLIP_0(x) (((x) >> S_SD_BIT_SLIP_0) & M_SD_BIT_SLIP_0)
+
+#define A_MAC_MTIP_PCS_STATUS_3 0x3f10c
+
+#define S_SD_BIT_SLIP_1 0
+#define M_SD_BIT_SLIP_1 0x3ffffU
+#define V_SD_BIT_SLIP_1(x) ((x) << S_SD_BIT_SLIP_1)
+#define G_SD_BIT_SLIP_1(x) (((x) >> S_SD_BIT_SLIP_1) & M_SD_BIT_SLIP_1)
+
+#define A_MAC_MTIP_PCS_STATUS_4 0x3f110
+
+#define S_TSU_RX_SD 0
+#define M_TSU_RX_SD 0xffffU
+#define V_TSU_RX_SD(x) ((x) << S_TSU_RX_SD)
+#define G_TSU_RX_SD(x) (((x) >> S_TSU_RX_SD) & M_TSU_RX_SD)
+
+#define A_MAC_MTIP_PCS_STATUS_5 0x3f114
+
+#define S_RSFEC_XSTATS_STRB 0
+#define M_RSFEC_XSTATS_STRB 0xffffffU
+#define V_RSFEC_XSTATS_STRB(x) ((x) << S_RSFEC_XSTATS_STRB)
+#define G_RSFEC_XSTATS_STRB(x) (((x) >> S_RSFEC_XSTATS_STRB) & M_RSFEC_XSTATS_STRB)
+
+#define A_MAC_MTIP_PCS_STATUS_6 0x3f118
+#define A_MAC_MTIP_PCS_STATUS_7 0x3f11c
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_0 0x3f120
+
+#define S_TSV_XON_STB_2 24
+#define M_TSV_XON_STB_2 0xffU
+#define V_TSV_XON_STB_2(x) ((x) << S_TSV_XON_STB_2)
+#define G_TSV_XON_STB_2(x) (((x) >> S_TSV_XON_STB_2) & M_TSV_XON_STB_2)
+
+#define S_TSV_XOFF_STB_2 16
+#define M_TSV_XOFF_STB_2 0xffU
+#define V_TSV_XOFF_STB_2(x) ((x) << S_TSV_XOFF_STB_2)
+#define G_TSV_XOFF_STB_2(x) (((x) >> S_TSV_XOFF_STB_2) & M_TSV_XOFF_STB_2)
+
+#define S_RSV_XON_STB_2 8
+#define M_RSV_XON_STB_2 0xffU
+#define V_RSV_XON_STB_2(x) ((x) << S_RSV_XON_STB_2)
+#define G_RSV_XON_STB_2(x) (((x) >> S_RSV_XON_STB_2) & M_RSV_XON_STB_2)
+
+#define S_RSV_XOFF_STB_2 0
+#define M_RSV_XOFF_STB_2 0xffU
+#define V_RSV_XOFF_STB_2(x) ((x) << S_RSV_XOFF_STB_2)
+#define G_RSV_XOFF_STB_2(x) (((x) >> S_RSV_XOFF_STB_2) & M_RSV_XOFF_STB_2)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_1 0x3f124
+
+#define S_TSV_XON_STB_3 24
+#define M_TSV_XON_STB_3 0xffU
+#define V_TSV_XON_STB_3(x) ((x) << S_TSV_XON_STB_3)
+#define G_TSV_XON_STB_3(x) (((x) >> S_TSV_XON_STB_3) & M_TSV_XON_STB_3)
+
+#define S_TSV_XOFF_STB_3 16
+#define M_TSV_XOFF_STB_3 0xffU
+#define V_TSV_XOFF_STB_3(x) ((x) << S_TSV_XOFF_STB_3)
+#define G_TSV_XOFF_STB_3(x) (((x) >> S_TSV_XOFF_STB_3) & M_TSV_XOFF_STB_3)
+
+#define S_RSV_XON_STB_3 8
+#define M_RSV_XON_STB_3 0xffU
+#define V_RSV_XON_STB_3(x) ((x) << S_RSV_XON_STB_3)
+#define G_RSV_XON_STB_3(x) (((x) >> S_RSV_XON_STB_3) & M_RSV_XON_STB_3)
+
+#define S_RSV_XOFF_STB_3 0
+#define M_RSV_XOFF_STB_3 0xffU
+#define V_RSV_XOFF_STB_3(x) ((x) << S_RSV_XOFF_STB_3)
+#define G_RSV_XOFF_STB_3(x) (((x) >> S_RSV_XOFF_STB_3) & M_RSV_XOFF_STB_3)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_2 0x3f128
+
+#define S_TSV_XON_STB_4 24
+#define M_TSV_XON_STB_4 0xffU
+#define V_TSV_XON_STB_4(x) ((x) << S_TSV_XON_STB_4)
+#define G_TSV_XON_STB_4(x) (((x) >> S_TSV_XON_STB_4) & M_TSV_XON_STB_4)
+
+#define S_TSV_XOFF_STB_4 16
+#define M_TSV_XOFF_STB_4 0xffU
+#define V_TSV_XOFF_STB_4(x) ((x) << S_TSV_XOFF_STB_4)
+#define G_TSV_XOFF_STB_4(x) (((x) >> S_TSV_XOFF_STB_4) & M_TSV_XOFF_STB_4)
+
+#define S_RSV_XON_STB_4 8
+#define M_RSV_XON_STB_4 0xffU
+#define V_RSV_XON_STB_4(x) ((x) << S_RSV_XON_STB_4)
+#define G_RSV_XON_STB_4(x) (((x) >> S_RSV_XON_STB_4) & M_RSV_XON_STB_4)
+
+#define S_RSV_XOFF_STB_4 0
+#define M_RSV_XOFF_STB_4 0xffU
+#define V_RSV_XOFF_STB_4(x) ((x) << S_RSV_XOFF_STB_4)
+#define G_RSV_XOFF_STB_4(x) (((x) >> S_RSV_XOFF_STB_4) & M_RSV_XOFF_STB_4)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_3 0x3f12c
+
+#define S_TSV_XON_STB_5 24
+#define M_TSV_XON_STB_5 0xffU
+#define V_TSV_XON_STB_5(x) ((x) << S_TSV_XON_STB_5)
+#define G_TSV_XON_STB_5(x) (((x) >> S_TSV_XON_STB_5) & M_TSV_XON_STB_5)
+
+#define S_TSV_XOFF_STB_5 16
+#define M_TSV_XOFF_STB_5 0xffU
+#define V_TSV_XOFF_STB_5(x) ((x) << S_TSV_XOFF_STB_5)
+#define G_TSV_XOFF_STB_5(x) (((x) >> S_TSV_XOFF_STB_5) & M_TSV_XOFF_STB_5)
+
+#define S_RSV_XON_STB_5 8
+#define M_RSV_XON_STB_5 0xffU
+#define V_RSV_XON_STB_5(x) ((x) << S_RSV_XON_STB_5)
+#define G_RSV_XON_STB_5(x) (((x) >> S_RSV_XON_STB_5) & M_RSV_XON_STB_5)
+
+#define S_RSV_XOFF_STB_5 0
+#define M_RSV_XOFF_STB_5 0xffU
+#define V_RSV_XOFF_STB_5(x) ((x) << S_RSV_XOFF_STB_5)
+#define G_RSV_XOFF_STB_5(x) (((x) >> S_RSV_XOFF_STB_5) & M_RSV_XOFF_STB_5)
+
+#define A_MAC_MTIP_MAC_10G_100G_STATUS_4 0x3f130
+
+#define S_TX_SFD_O_5 19
+#define V_TX_SFD_O_5(x) ((x) << S_TX_SFD_O_5)
+#define F_TX_SFD_O_5 V_TX_SFD_O_5(1U)
+
+#define S_TX_SFD_O_4 18
+#define V_TX_SFD_O_4(x) ((x) << S_TX_SFD_O_4)
+#define F_TX_SFD_O_4 V_TX_SFD_O_4(1U)
+
+#define S_TX_SFD_O_3 17
+#define V_TX_SFD_O_3(x) ((x) << S_TX_SFD_O_3)
+#define F_TX_SFD_O_3 V_TX_SFD_O_3(1U)
+
+#define S_TX_SFD_O_2 16
+#define V_TX_SFD_O_2(x) ((x) << S_TX_SFD_O_2)
+#define F_TX_SFD_O_2 V_TX_SFD_O_2(1U)
+
+#define S_RX_SFD_O_5 15
+#define V_RX_SFD_O_5(x) ((x) << S_RX_SFD_O_5)
+#define F_RX_SFD_O_5 V_RX_SFD_O_5(1U)
+
+#define S_RX_SFD_O_4 14
+#define V_RX_SFD_O_4(x) ((x) << S_RX_SFD_O_4)
+#define F_RX_SFD_O_4 V_RX_SFD_O_4(1U)
+
+#define S_RX_SFD_O_3 13
+#define V_RX_SFD_O_3(x) ((x) << S_RX_SFD_O_3)
+#define F_RX_SFD_O_3 V_RX_SFD_O_3(1U)
+
+#define S_RX_SFD_O_2 12
+#define V_RX_SFD_O_2(x) ((x) << S_RX_SFD_O_2)
+#define F_RX_SFD_O_2 V_RX_SFD_O_2(1U)
+
+#define S_RX_SFD_SHIFT_O_5 11
+#define V_RX_SFD_SHIFT_O_5(x) ((x) << S_RX_SFD_SHIFT_O_5)
+#define F_RX_SFD_SHIFT_O_5 V_RX_SFD_SHIFT_O_5(1U)
+
+#define S_RX_SFD_SHIFT_O_4 10
+#define V_RX_SFD_SHIFT_O_4(x) ((x) << S_RX_SFD_SHIFT_O_4)
+#define F_RX_SFD_SHIFT_O_4 V_RX_SFD_SHIFT_O_4(1U)
+
+#define S_RX_SFD_SHIFT_O_3 9
+#define V_RX_SFD_SHIFT_O_3(x) ((x) << S_RX_SFD_SHIFT_O_3)
+#define F_RX_SFD_SHIFT_O_3 V_RX_SFD_SHIFT_O_3(1U)
+
+#define S_RX_SFD_SHIFT_O_2 8
+#define V_RX_SFD_SHIFT_O_2(x) ((x) << S_RX_SFD_SHIFT_O_2)
+#define F_RX_SFD_SHIFT_O_2 V_RX_SFD_SHIFT_O_2(1U)
+
+#define S_TX_SFD_SHIFT_O_5 7
+#define V_TX_SFD_SHIFT_O_5(x) ((x) << S_TX_SFD_SHIFT_O_5)
+#define F_TX_SFD_SHIFT_O_5 V_TX_SFD_SHIFT_O_5(1U)
+
+#define S_TX_SFD_SHIFT_O_4 6
+#define V_TX_SFD_SHIFT_O_4(x) ((x) << S_TX_SFD_SHIFT_O_4)
+#define F_TX_SFD_SHIFT_O_4 V_TX_SFD_SHIFT_O_4(1U)
+
+#define S_TX_SFD_SHIFT_O_3 5
+#define V_TX_SFD_SHIFT_O_3(x) ((x) << S_TX_SFD_SHIFT_O_3)
+#define F_TX_SFD_SHIFT_O_3 V_TX_SFD_SHIFT_O_3(1U)
+
+#define S_TX_SFD_SHIFT_O_2 4
+#define V_TX_SFD_SHIFT_O_2(x) ((x) << S_TX_SFD_SHIFT_O_2)
+#define F_TX_SFD_SHIFT_O_2 V_TX_SFD_SHIFT_O_2(1U)
+
+#define S_TS_SFD_ENA_5 3
+#define V_TS_SFD_ENA_5(x) ((x) << S_TS_SFD_ENA_5)
+#define F_TS_SFD_ENA_5 V_TS_SFD_ENA_5(1U)
+
+#define S_TS_SFD_ENA_4 2
+#define V_TS_SFD_ENA_4(x) ((x) << S_TS_SFD_ENA_4)
+#define F_TS_SFD_ENA_4 V_TS_SFD_ENA_4(1U)
+
+#define S_TS_SFD_ENA_3 1
+#define V_TS_SFD_ENA_3(x) ((x) << S_TS_SFD_ENA_3)
+#define F_TS_SFD_ENA_3 V_TS_SFD_ENA_3(1U)
+
+#define S_TS_SFD_ENA_2 0
+#define V_TS_SFD_ENA_2(x) ((x) << S_TS_SFD_ENA_2)
+#define F_TS_SFD_ENA_2 V_TS_SFD_ENA_2(1U)
+
+#define A_MAC_STS_CONFIG 0x3f200
+
+#define S_STS_ENA 30
+#define V_STS_ENA(x) ((x) << S_STS_ENA)
+#define F_STS_ENA V_STS_ENA(1U)
+
+#define S_N_PPS_ENA 29
+#define V_N_PPS_ENA(x) ((x) << S_N_PPS_ENA)
+#define F_N_PPS_ENA V_N_PPS_ENA(1U)
+
+#define S_STS_RESET 28
+#define V_STS_RESET(x) ((x) << S_STS_RESET)
+#define F_STS_RESET V_STS_RESET(1U)
+
+#define S_DEBOUNCE_CNT 0
+#define M_DEBOUNCE_CNT 0xfffffffU
+#define V_DEBOUNCE_CNT(x) ((x) << S_DEBOUNCE_CNT)
+#define G_DEBOUNCE_CNT(x) (((x) >> S_DEBOUNCE_CNT) & M_DEBOUNCE_CNT)
+
+#define A_MAC_STS_COUNTER 0x3f204
+#define A_MAC_STS_COUNT_1 0x3f208
+#define A_MAC_STS_COUNT_2 0x3f20c
+#define A_MAC_STS_N_PPS_COUNT_HI 0x3f210
+#define A_MAC_STS_N_PPS_COUNT_LO 0x3f214
+#define A_MAC_STS_N_PPS_COUNTER 0x3f218
+#define A_MAC_BGR_PQ0_FIRMWARE_COMMON_0 0x4030
+
+#define S_MAC_BGR_BGR_REG_APB_SEL 0
+#define V_MAC_BGR_BGR_REG_APB_SEL(x) ((x) << S_MAC_BGR_BGR_REG_APB_SEL)
+#define F_MAC_BGR_BGR_REG_APB_SEL V_MAC_BGR_BGR_REG_APB_SEL(1U)
+
+#define A_MAC_BGR_TOP_DIG_CTRL1_REG_LSB 0x4430
+
+#define S_MAC_BGR_BGR_REFCLK_CTRL_BYPASS 15
+#define V_MAC_BGR_BGR_REFCLK_CTRL_BYPASS(x) ((x) << S_MAC_BGR_BGR_REFCLK_CTRL_BYPASS)
+#define F_MAC_BGR_BGR_REFCLK_CTRL_BYPASS V_MAC_BGR_BGR_REFCLK_CTRL_BYPASS(1U)
+
+#define S_MAC_BGR_BGR_COREREFCLK_SEL 14
+#define V_MAC_BGR_BGR_COREREFCLK_SEL(x) ((x) << S_MAC_BGR_BGR_COREREFCLK_SEL)
+#define F_MAC_BGR_BGR_COREREFCLK_SEL V_MAC_BGR_BGR_COREREFCLK_SEL(1U)
+
+#define S_MAC_BGR_BGR_TEST_CLK_DIV 8
+#define M_MAC_BGR_BGR_TEST_CLK_DIV 0x7U
+#define V_MAC_BGR_BGR_TEST_CLK_DIV(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_DIV)
+#define G_MAC_BGR_BGR_TEST_CLK_DIV(x) (((x) >> S_MAC_BGR_BGR_TEST_CLK_DIV) & M_MAC_BGR_BGR_TEST_CLK_DIV)
+
+#define S_MAC_BGR_BGR_TEST_CLK_EN 7
+#define V_MAC_BGR_BGR_TEST_CLK_EN(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_EN)
+#define F_MAC_BGR_BGR_TEST_CLK_EN V_MAC_BGR_BGR_TEST_CLK_EN(1U)
+
+#define S_MAC_BGR_BGR_TEST_CLK_BGRSEL 5
+#define M_MAC_BGR_BGR_TEST_CLK_BGRSEL 0x3U
+#define V_MAC_BGR_BGR_TEST_CLK_BGRSEL(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_BGRSEL)
+#define G_MAC_BGR_BGR_TEST_CLK_BGRSEL(x) (((x) >> S_MAC_BGR_BGR_TEST_CLK_BGRSEL) & M_MAC_BGR_BGR_TEST_CLK_BGRSEL)
+
+#define S_MAC_BGR_BGR_TEST_CLK_SEL 0
+#define M_MAC_BGR_BGR_TEST_CLK_SEL 0x1fU
+#define V_MAC_BGR_BGR_TEST_CLK_SEL(x) ((x) << S_MAC_BGR_BGR_TEST_CLK_SEL)
+#define G_MAC_BGR_BGR_TEST_CLK_SEL(x) (((x) >> S_MAC_BGR_BGR_TEST_CLK_SEL) & M_MAC_BGR_BGR_TEST_CLK_SEL)
+
+#define A_MAC_BGR_PQ0_FIRMWARE_SEQ0_0 0x6000
+
+#define S_MAC_BGR_BGR_REG_PRG_EN 0
+#define V_MAC_BGR_BGR_REG_PRG_EN(x) ((x) << S_MAC_BGR_BGR_REG_PRG_EN)
+#define F_MAC_BGR_BGR_REG_PRG_EN V_MAC_BGR_BGR_REG_PRG_EN(1U)
+
+#define A_MAC_BGR_PQ0_FIRMWARE_SEQ0_1 0x6020
+
+#define S_MAC_BGR_BGR_REG_GPO 0
+#define V_MAC_BGR_BGR_REG_GPO(x) ((x) << S_MAC_BGR_BGR_REG_GPO)
+#define F_MAC_BGR_BGR_REG_GPO V_MAC_BGR_BGR_REG_GPO(1U)
+
+#define A_MAC_BGR_MGMT_SPINE_MACRO_PMA_0 0x40000
+
+#define S_MAC_BGR_CUREFCLKSEL1 0
+#define M_MAC_BGR_CUREFCLKSEL1 0x3U
+#define V_MAC_BGR_CUREFCLKSEL1(x) ((x) << S_MAC_BGR_CUREFCLKSEL1)
+#define G_MAC_BGR_CUREFCLKSEL1(x) (((x) >> S_MAC_BGR_CUREFCLKSEL1) & M_MAC_BGR_CUREFCLKSEL1)
+
+#define A_MAC_BGR_REFCLK_CONTROL_1 0x40004
+
+#define S_MAC_BGR_IM_CUREFCLKLR_EN 0
+#define V_MAC_BGR_IM_CUREFCLKLR_EN(x) ((x) << S_MAC_BGR_IM_CUREFCLKLR_EN)
+#define F_MAC_BGR_IM_CUREFCLKLR_EN V_MAC_BGR_IM_CUREFCLKLR_EN(1U)
+
+#define A_MAC_BGR_REFCLK_CONTROL_2 0x40080
+
+#define S_MAC_BGR_IM_REF_EN 0
+#define V_MAC_BGR_IM_REF_EN(x) ((x) << S_MAC_BGR_IM_REF_EN)
+#define F_MAC_BGR_IM_REF_EN V_MAC_BGR_IM_REF_EN(1U)
+
+#define A_MAC_PLL0_PLL_TOP_CUPLL_LOCK 0x4438
+
+#define S_MAC_PLL0_PLL2_LOCK_STATUS 2
+#define V_MAC_PLL0_PLL2_LOCK_STATUS(x) ((x) << S_MAC_PLL0_PLL2_LOCK_STATUS)
+#define F_MAC_PLL0_PLL2_LOCK_STATUS V_MAC_PLL0_PLL2_LOCK_STATUS(1U)
+
+#define S_MAC_PLL0_PLL1_LOCK_STATUS 1
+#define V_MAC_PLL0_PLL1_LOCK_STATUS(x) ((x) << S_MAC_PLL0_PLL1_LOCK_STATUS)
+#define F_MAC_PLL0_PLL1_LOCK_STATUS V_MAC_PLL0_PLL1_LOCK_STATUS(1U)
+
+#define S_MAC_PLL0_PLL0_LOCK_STATUS 0
+#define V_MAC_PLL0_PLL0_LOCK_STATUS(x) ((x) << S_MAC_PLL0_PLL0_LOCK_STATUS)
+#define F_MAC_PLL0_PLL0_LOCK_STATUS V_MAC_PLL0_PLL0_LOCK_STATUS(1U)
+
+#define A_MAC_PLL0_PLL_PQ0_FIRMWARE_SEQ0_1 0x6020
+
+#define S_MAC_PLL0_PLL_PRG_EN 0
+#define M_MAC_PLL0_PLL_PRG_EN 0xfU
+#define V_MAC_PLL0_PLL_PRG_EN(x) ((x) << S_MAC_PLL0_PLL_PRG_EN)
+#define G_MAC_PLL0_PLL_PRG_EN(x) (((x) >> S_MAC_PLL0_PLL_PRG_EN) & M_MAC_PLL0_PLL_PRG_EN)
+
+#define A_MAC_PLL0_PLL_CMUTOP_KV16_MGMT_PLL_MACRO_SELECT_0 0x7fc00
+
+#define S_MAC_PLL0_PMA_MACRO_SELECT 0
+#define M_MAC_PLL0_PMA_MACRO_SELECT 0x3ffU
+#define V_MAC_PLL0_PMA_MACRO_SELECT(x) ((x) << S_MAC_PLL0_PMA_MACRO_SELECT)
+#define G_MAC_PLL0_PMA_MACRO_SELECT(x) (((x) >> S_MAC_PLL0_PMA_MACRO_SELECT) & M_MAC_PLL0_PMA_MACRO_SELECT)
+
+#define A_MAC_PLL1_PLL_TOP_CUPLL_LOCK 0x4438
+
+#define S_MAC_PLL1_PLL2_LOCK_STATUS 2
+#define V_MAC_PLL1_PLL2_LOCK_STATUS(x) ((x) << S_MAC_PLL1_PLL2_LOCK_STATUS)
+#define F_MAC_PLL1_PLL2_LOCK_STATUS V_MAC_PLL1_PLL2_LOCK_STATUS(1U)
+
+#define S_MAC_PLL1_PLL1_LOCK_STATUS 1
+#define V_MAC_PLL1_PLL1_LOCK_STATUS(x) ((x) << S_MAC_PLL1_PLL1_LOCK_STATUS)
+#define F_MAC_PLL1_PLL1_LOCK_STATUS V_MAC_PLL1_PLL1_LOCK_STATUS(1U)
+
+#define S_MAC_PLL1_PLL0_LOCK_STATUS 0
+#define V_MAC_PLL1_PLL0_LOCK_STATUS(x) ((x) << S_MAC_PLL1_PLL0_LOCK_STATUS)
+#define F_MAC_PLL1_PLL0_LOCK_STATUS V_MAC_PLL1_PLL0_LOCK_STATUS(1U)
+
+#define A_MAC_PLL1_PLL_PQ0_FIRMWARE_SEQ0_1 0x6020
+
+#define S_MAC_PLL1_PLL_PRG_EN 0
+#define M_MAC_PLL1_PLL_PRG_EN 0xfU
+#define V_MAC_PLL1_PLL_PRG_EN(x) ((x) << S_MAC_PLL1_PLL_PRG_EN)
+#define G_MAC_PLL1_PLL_PRG_EN(x) (((x) >> S_MAC_PLL1_PLL_PRG_EN) & M_MAC_PLL1_PLL_PRG_EN)
+
+#define A_MAC_PLL1_PLL_CMUTOP_KV16_MGMT_PLL_MACRO_SELECT_0 0x7fc00
+
+#define S_MAC_PLL1_PMA_MACRO_SELECT 0
+#define M_MAC_PLL1_PMA_MACRO_SELECT 0x3ffU
+#define V_MAC_PLL1_PMA_MACRO_SELECT(x) ((x) << S_MAC_PLL1_PMA_MACRO_SELECT)
+#define G_MAC_PLL1_PMA_MACRO_SELECT(x) (((x) >> S_MAC_PLL1_PMA_MACRO_SELECT) & M_MAC_PLL1_PMA_MACRO_SELECT)
+
+/* registers for module CRYPTO_0 */
+#define CRYPTO_0_BASE_ADDR 0x44000
+
+#define A_TLS_TX_CH_CONFIG 0x44000
+
+#define S_SMALL_LEN_THRESH 16
+#define M_SMALL_LEN_THRESH 0xffffU
+#define V_SMALL_LEN_THRESH(x) ((x) << S_SMALL_LEN_THRESH)
+#define G_SMALL_LEN_THRESH(x) (((x) >> S_SMALL_LEN_THRESH) & M_SMALL_LEN_THRESH)
+
+#define S_CIPH0_CTL_SEL 12
+#define M_CIPH0_CTL_SEL 0x7U
+#define V_CIPH0_CTL_SEL(x) ((x) << S_CIPH0_CTL_SEL)
+#define G_CIPH0_CTL_SEL(x) (((x) >> S_CIPH0_CTL_SEL) & M_CIPH0_CTL_SEL)
+
+#define S_CIPHN_CTL_SEL 9
+#define M_CIPHN_CTL_SEL 0x7U
+#define V_CIPHN_CTL_SEL(x) ((x) << S_CIPHN_CTL_SEL)
+#define G_CIPHN_CTL_SEL(x) (((x) >> S_CIPHN_CTL_SEL) & M_CIPHN_CTL_SEL)
+
+#define S_MAC_CTL_SEL 6
+#define M_MAC_CTL_SEL 0x7U
+#define V_MAC_CTL_SEL(x) ((x) << S_MAC_CTL_SEL)
+#define G_MAC_CTL_SEL(x) (((x) >> S_MAC_CTL_SEL) & M_MAC_CTL_SEL)
+
+#define S_CIPH0_XOR_SEL 5
+#define V_CIPH0_XOR_SEL(x) ((x) << S_CIPH0_XOR_SEL)
+#define F_CIPH0_XOR_SEL V_CIPH0_XOR_SEL(1U)
+
+#define S_CIPHN_XOR_SEL 4
+#define V_CIPHN_XOR_SEL(x) ((x) << S_CIPHN_XOR_SEL)
+#define F_CIPHN_XOR_SEL V_CIPHN_XOR_SEL(1U)
+
+#define S_MAC_XOR_SEL 3
+#define V_MAC_XOR_SEL(x) ((x) << S_MAC_XOR_SEL)
+#define F_MAC_XOR_SEL V_MAC_XOR_SEL(1U)
+
+#define S_CIPH0_DP_SEL 2
+#define V_CIPH0_DP_SEL(x) ((x) << S_CIPH0_DP_SEL)
+#define F_CIPH0_DP_SEL V_CIPH0_DP_SEL(1U)
+
+#define S_CIPHN_DP_SEL 1
+#define V_CIPHN_DP_SEL(x) ((x) << S_CIPHN_DP_SEL)
+#define F_CIPHN_DP_SEL V_CIPHN_DP_SEL(1U)
+
+#define S_MAC_DP_SEL 0
+#define V_MAC_DP_SEL(x) ((x) << S_MAC_DP_SEL)
+#define F_MAC_DP_SEL V_MAC_DP_SEL(1U)
+
+#define A_TLS_TX_CH_PERR_INJECT 0x44004
+#define A_TLS_TX_CH_INT_ENABLE 0x44008
+
+#define S_KEYLENERR 3
+#define V_KEYLENERR(x) ((x) << S_KEYLENERR)
+#define F_KEYLENERR V_KEYLENERR(1U)
+
+#define S_INTF1_PERR 2
+#define V_INTF1_PERR(x) ((x) << S_INTF1_PERR)
+#define F_INTF1_PERR V_INTF1_PERR(1U)
+
+#define S_INTF0_PERR 1
+#define V_INTF0_PERR(x) ((x) << S_INTF0_PERR)
+#define F_INTF0_PERR V_INTF0_PERR(1U)
+
+#define A_TLS_TX_CH_INT_CAUSE 0x4400c
+
+#define S_KEX_CERR 4
+#define V_KEX_CERR(x) ((x) << S_KEX_CERR)
+#define F_KEX_CERR V_KEX_CERR(1U)
+
+#define A_TLS_TX_CH_PERR_ENABLE 0x44010
+#define A_TLS_TX_CH_DEBUG_FLAGS 0x44014
+#define A_TLS_TX_CH_HMACCTRL_CFG 0x44020
+#define A_TLS_TX_CH_ERR_RSP_HDR 0x44024
+#define A_TLS_TX_CH_HANG_TIMEOUT 0x44028
+
+#define S_T7_TIMEOUT 0
+#define M_T7_TIMEOUT 0xffU
+#define V_T7_TIMEOUT(x) ((x) << S_T7_TIMEOUT)
+#define G_T7_TIMEOUT(x) (((x) >> S_T7_TIMEOUT) & M_T7_TIMEOUT)
+
+#define A_TLS_TX_CH_DBG_STEP_CTRL 0x44030
+
+#define S_DBG_STEP_CTRL 1
+#define V_DBG_STEP_CTRL(x) ((x) << S_DBG_STEP_CTRL)
+#define F_DBG_STEP_CTRL V_DBG_STEP_CTRL(1U)
+
+#define S_DBG_STEP_EN 0
+#define V_DBG_STEP_EN(x) ((x) << S_DBG_STEP_EN)
+#define F_DBG_STEP_EN V_DBG_STEP_EN(1U)
+
+#define A_TLS_TX_DBG_SELL_DATA 0x44714
+#define A_TLS_TX_DBG_SELH_DATA 0x44718
+#define A_TLS_TX_DBG_SEL_CTRL 0x44730
+#define A_TLS_TX_GLOBAL_CONFIG 0x447c0
+
+#define S_QUIC_EN 2
+#define V_QUIC_EN(x) ((x) << S_QUIC_EN)
+#define F_QUIC_EN V_QUIC_EN(1U)
+
+#define S_IPSEC_IDX_UPD_EN 1
+#define V_IPSEC_IDX_UPD_EN(x) ((x) << S_IPSEC_IDX_UPD_EN)
+#define F_IPSEC_IDX_UPD_EN V_IPSEC_IDX_UPD_EN(1U)
+
+#define S_IPSEC_IDX_CTL 0
+#define V_IPSEC_IDX_CTL(x) ((x) << S_IPSEC_IDX_CTL)
+#define F_IPSEC_IDX_CTL V_IPSEC_IDX_CTL(1U)
+
+#define A_TLS_TX_CGEN 0x447f0
+
+#define S_CHCGEN 0
+#define M_CHCGEN 0x3fU
+#define V_CHCGEN(x) ((x) << S_CHCGEN)
+#define G_CHCGEN(x) (((x) >> S_CHCGEN) & M_CHCGEN)
+
+#define A_TLS_TX_IND_ADDR 0x447f8
+
+#define S_T7_3_ADDR 0
+#define M_T7_3_ADDR 0xfffU
+#define V_T7_3_ADDR(x) ((x) << S_T7_3_ADDR)
+#define G_T7_3_ADDR(x) (((x) >> S_T7_3_ADDR) & M_T7_3_ADDR)
+
+#define A_TLS_TX_IND_DATA 0x447fc
+#define A_TLS_TX_CH_IND_ING_BYTE_CNT_LO 0x0
+#define A_TLS_TX_CH_IND_ING_BYTE_CNT_HI 0x1
+#define A_TLS_TX_CH_IND_ING_PKT_CNT 0x2
+#define A_TLS_TX_CH_IND_DISPATCH_PKT_CNT 0x4
+#define A_TLS_TX_CH_IND_ERROR_CNTS0 0x5
+#define A_TLS_TX_CH_IND_DEC_ERROR_CNTS 0x7
+#define A_TLS_TX_CH_IND_DBG_SPP_CFG 0x1f
+
+#define S_DIS_IF_ERR 11
+#define V_DIS_IF_ERR(x) ((x) << S_DIS_IF_ERR)
+#define F_DIS_IF_ERR V_DIS_IF_ERR(1U)
+
+#define S_DIS_ERR_MSG 10
+#define V_DIS_ERR_MSG(x) ((x) << S_DIS_ERR_MSG)
+#define F_DIS_ERR_MSG V_DIS_ERR_MSG(1U)
+
+#define S_DIS_BP_SEQF 9
+#define V_DIS_BP_SEQF(x) ((x) << S_DIS_BP_SEQF)
+#define F_DIS_BP_SEQF V_DIS_BP_SEQF(1U)
+
+#define S_DIS_BP_LENF 8
+#define V_DIS_BP_LENF(x) ((x) << S_DIS_BP_LENF)
+#define F_DIS_BP_LENF V_DIS_BP_LENF(1U)
+
+#define S_DIS_KEX_ERR 6
+#define V_DIS_KEX_ERR(x) ((x) << S_DIS_KEX_ERR)
+#define F_DIS_KEX_ERR V_DIS_KEX_ERR(1U)
+
+#define S_CLR_STS 5
+#define V_CLR_STS(x) ((x) << S_CLR_STS)
+#define F_CLR_STS V_CLR_STS(1U)
+
+#define S_TGL_CNT 4
+#define V_TGL_CNT(x) ((x) << S_TGL_CNT)
+#define F_TGL_CNT V_TGL_CNT(1U)
+
+#define S_ENB_PAZ 3
+#define V_ENB_PAZ(x) ((x) << S_ENB_PAZ)
+#define F_ENB_PAZ V_ENB_PAZ(1U)
+
+#define S_DIS_NOP 2
+#define V_DIS_NOP(x) ((x) << S_DIS_NOP)
+#define F_DIS_NOP V_DIS_NOP(1U)
+
+#define S_DIS_CPL_ERR 1
+#define V_DIS_CPL_ERR(x) ((x) << S_DIS_CPL_ERR)
+#define F_DIS_CPL_ERR V_DIS_CPL_ERR(1U)
+
+#define S_DIS_OFF_ERR 0
+#define V_DIS_OFF_ERR(x) ((x) << S_DIS_OFF_ERR)
+#define F_DIS_OFF_ERR V_DIS_OFF_ERR(1U)
+
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID0 0x20
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID1 0x21
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID2 0x22
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID3 0x23
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID4 0x24
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID5 0x25
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID6 0x26
+#define A_TLS_TX_CH_IND_DBG_SPP_PKTID7 0x27
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W0 0x28
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W1 0x29
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W2 0x2a
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_CPL_W3 0x2b
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W0 0x2c
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W1 0x2d
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W2 0x2e
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_SMD_W3 0x2f
+#define A_TLS_TX_CH_IND_DBG_SPP_SPR_ERR 0x30
+#define A_TLS_TX_CH_IND_DBG_SPP_SFO_BP 0x31
+#define A_TLS_TX_CH_IND_DBG_SPP_SFO_CTL_M 0x32
+#define A_TLS_TX_CH_IND_DBG_SPP_SFO_CTL_L 0x33
+#define A_TLS_TX_CH_IND_DBG_PKT_STAT 0x3f
+
+/* registers for module CRYPTO_1 */
+#define CRYPTO_1_BASE_ADDR 0x45000
+
+/* registers for module CRYPTO_KEY */
+#define CRYPTO_KEY_BASE_ADDR 0x46000
+
+#define A_CRYPTO_KEY_CONFIG 0x46000
+
+#define S_ESNWIN 1
+#define M_ESNWIN 0x7U
+#define V_ESNWIN(x) ((x) << S_ESNWIN)
+#define G_ESNWIN(x) (((x) >> S_ESNWIN) & M_ESNWIN)
+
+#define S_INGKEY96 0
+#define V_INGKEY96(x) ((x) << S_INGKEY96)
+#define F_INGKEY96 V_INGKEY96(1U)
+
+#define A_CRYPTO_KEY_RST 0x46004
+
+#define S_CORE1RST 1
+#define V_CORE1RST(x) ((x) << S_CORE1RST)
+#define F_CORE1RST V_CORE1RST(1U)
+
+#define S_CORE0RST 0
+#define V_CORE0RST(x) ((x) << S_CORE0RST)
+#define F_CORE0RST V_CORE0RST(1U)
+
+#define A_CRYPTO_KEY_INT_ENABLE 0x46008
+
+#define S_MA_FIFO_PERR 22
+#define V_MA_FIFO_PERR(x) ((x) << S_MA_FIFO_PERR)
+#define F_MA_FIFO_PERR V_MA_FIFO_PERR(1U)
+
+#define S_MA_RSP_PERR 21
+#define V_MA_RSP_PERR(x) ((x) << S_MA_RSP_PERR)
+#define F_MA_RSP_PERR V_MA_RSP_PERR(1U)
+
+#define S_ING_CACHE_DATA_PERR 19
+#define V_ING_CACHE_DATA_PERR(x) ((x) << S_ING_CACHE_DATA_PERR)
+#define F_ING_CACHE_DATA_PERR V_ING_CACHE_DATA_PERR(1U)
+
+#define S_ING_CACHE_TAG_PERR 18
+#define V_ING_CACHE_TAG_PERR(x) ((x) << S_ING_CACHE_TAG_PERR)
+#define F_ING_CACHE_TAG_PERR V_ING_CACHE_TAG_PERR(1U)
+
+#define S_LKP_KEY_REQ_PERR 17
+#define V_LKP_KEY_REQ_PERR(x) ((x) << S_LKP_KEY_REQ_PERR)
+#define F_LKP_KEY_REQ_PERR V_LKP_KEY_REQ_PERR(1U)
+
+#define S_LKP_CLIP_TCAM_PERR 16
+#define V_LKP_CLIP_TCAM_PERR(x) ((x) << S_LKP_CLIP_TCAM_PERR)
+#define F_LKP_CLIP_TCAM_PERR V_LKP_CLIP_TCAM_PERR(1U)
+
+#define S_LKP_MAIN_TCAM_PERR 15
+#define V_LKP_MAIN_TCAM_PERR(x) ((x) << S_LKP_MAIN_TCAM_PERR)
+#define F_LKP_MAIN_TCAM_PERR V_LKP_MAIN_TCAM_PERR(1U)
+
+#define S_EGR_KEY_REQ_PERR 14
+#define V_EGR_KEY_REQ_PERR(x) ((x) << S_EGR_KEY_REQ_PERR)
+#define F_EGR_KEY_REQ_PERR V_EGR_KEY_REQ_PERR(1U)
+
+#define S_EGR_CACHE_DATA_PERR 13
+#define V_EGR_CACHE_DATA_PERR(x) ((x) << S_EGR_CACHE_DATA_PERR)
+#define F_EGR_CACHE_DATA_PERR V_EGR_CACHE_DATA_PERR(1U)
+
+#define S_EGR_CACHE_TAG_PERR 12
+#define V_EGR_CACHE_TAG_PERR(x) ((x) << S_EGR_CACHE_TAG_PERR)
+#define F_EGR_CACHE_TAG_PERR V_EGR_CACHE_TAG_PERR(1U)
+
+#define S_CIM_PERR 11
+#define V_CIM_PERR(x) ((x) << S_CIM_PERR)
+#define F_CIM_PERR V_CIM_PERR(1U)
+
+#define S_MA_INV_RSP_TAG 10
+#define V_MA_INV_RSP_TAG(x) ((x) << S_MA_INV_RSP_TAG)
+#define F_MA_INV_RSP_TAG V_MA_INV_RSP_TAG(1U)
+
+#define S_ING_KEY_RANGE_ERR 9
+#define V_ING_KEY_RANGE_ERR(x) ((x) << S_ING_KEY_RANGE_ERR)
+#define F_ING_KEY_RANGE_ERR V_ING_KEY_RANGE_ERR(1U)
+
+#define S_ING_MFIFO_OVFL 8
+#define V_ING_MFIFO_OVFL(x) ((x) << S_ING_MFIFO_OVFL)
+#define F_ING_MFIFO_OVFL V_ING_MFIFO_OVFL(1U)
+
+#define S_LKP_REQ_OVFL 7
+#define V_LKP_REQ_OVFL(x) ((x) << S_LKP_REQ_OVFL)
+#define F_LKP_REQ_OVFL V_LKP_REQ_OVFL(1U)
+
+#define S_EOK_WAIT_ERR 6
+#define V_EOK_WAIT_ERR(x) ((x) << S_EOK_WAIT_ERR)
+#define F_EOK_WAIT_ERR V_EOK_WAIT_ERR(1U)
+
+#define S_EGR_KEY_RANGE_ERR 5
+#define V_EGR_KEY_RANGE_ERR(x) ((x) << S_EGR_KEY_RANGE_ERR)
+#define F_EGR_KEY_RANGE_ERR V_EGR_KEY_RANGE_ERR(1U)
+
+#define S_EGR_MFIFO_OVFL 4
+#define V_EGR_MFIFO_OVFL(x) ((x) << S_EGR_MFIFO_OVFL)
+#define F_EGR_MFIFO_OVFL V_EGR_MFIFO_OVFL(1U)
+
+#define S_SEQ_WRAP_HP_OVFL 3
+#define V_SEQ_WRAP_HP_OVFL(x) ((x) << S_SEQ_WRAP_HP_OVFL)
+#define F_SEQ_WRAP_HP_OVFL V_SEQ_WRAP_HP_OVFL(1U)
+
+#define S_SEQ_WRAP_LP_OVFL 2
+#define V_SEQ_WRAP_LP_OVFL(x) ((x) << S_SEQ_WRAP_LP_OVFL)
+#define F_SEQ_WRAP_LP_OVFL V_SEQ_WRAP_LP_OVFL(1U)
+
+#define S_EGR_SEQ_WRAP_HP 1
+#define V_EGR_SEQ_WRAP_HP(x) ((x) << S_EGR_SEQ_WRAP_HP)
+#define F_EGR_SEQ_WRAP_HP V_EGR_SEQ_WRAP_HP(1U)
+
+#define S_EGR_SEQ_WRAP_LP 0
+#define V_EGR_SEQ_WRAP_LP(x) ((x) << S_EGR_SEQ_WRAP_LP)
+#define F_EGR_SEQ_WRAP_LP V_EGR_SEQ_WRAP_LP(1U)
+
+#define A_CRYPTO_KEY_INT_CAUSE 0x4600c
+#define A_CRYPTO_KEY_PERR_ENABLE 0x46010
+#define A_CRYPTO_KEY_EGR_SEQ_WRAP_LP_KEY_ID 0x46018
+
+#define S_KEY_VALID 31
+#define V_KEY_VALID(x) ((x) << S_KEY_VALID)
+#define F_KEY_VALID V_KEY_VALID(1U)
+
+#define S_KEY_ID 0
+#define M_KEY_ID 0x7fffffffU
+#define V_KEY_ID(x) ((x) << S_KEY_ID)
+#define G_KEY_ID(x) (((x) >> S_KEY_ID) & M_KEY_ID)
+
+#define A_CRYPTO_KEY_EGR_SEQ_WRAP_HP_KEY_ID 0x4601c
+#define A_CRYPTO_KEY_TCAM_DATA0 0x46020
+#define A_CRYPTO_KEY_TCAM_DATA1 0x46024
+#define A_CRYPTO_KEY_TCAM_DATA2 0x46028
+#define A_CRYPTO_KEY_TCAM_DATA3 0x4602c
+#define A_CRYPTO_KEY_TCAM_CTL 0x46030
+
+#define S_SRCHMHIT 21
+#define V_SRCHMHIT(x) ((x) << S_SRCHMHIT)
+#define F_SRCHMHIT V_SRCHMHIT(1U)
+
+#define S_T7_BUSY 20
+#define V_T7_BUSY(x) ((x) << S_T7_BUSY)
+#define F_T7_BUSY V_T7_BUSY(1U)
+
+#define S_SRCHHIT 19
+#define V_SRCHHIT(x) ((x) << S_SRCHHIT)
+#define F_SRCHHIT V_SRCHHIT(1U)
+
+#define S_IPVERSION 18
+#define V_IPVERSION(x) ((x) << S_IPVERSION)
+#define F_IPVERSION V_IPVERSION(1U)
+
+#define S_BITSEL 17
+#define V_BITSEL(x) ((x) << S_BITSEL)
+#define F_BITSEL V_BITSEL(1U)
+
+#define S_TCAMSEL 16
+#define V_TCAMSEL(x) ((x) << S_TCAMSEL)
+#define F_TCAMSEL V_TCAMSEL(1U)
+
+#define S_CMDTYPE 14
+#define M_CMDTYPE 0x3U
+#define V_CMDTYPE(x) ((x) << S_CMDTYPE)
+#define G_CMDTYPE(x) (((x) >> S_CMDTYPE) & M_CMDTYPE)
+
+#define S_TCAMINDEX 0
+#define M_TCAMINDEX 0x3fffU
+#define V_TCAMINDEX(x) ((x) << S_TCAMINDEX)
+#define G_TCAMINDEX(x) (((x) >> S_TCAMINDEX) & M_TCAMINDEX)
+
+#define A_CRYPTO_KEY_TCAM_CONFIG 0x46034
+
+#define S_T7_CLTCAMDEEPSLEEP_STAT 3
+#define V_T7_CLTCAMDEEPSLEEP_STAT(x) ((x) << S_T7_CLTCAMDEEPSLEEP_STAT)
+#define F_T7_CLTCAMDEEPSLEEP_STAT V_T7_CLTCAMDEEPSLEEP_STAT(1U)
+
+#define S_T7_TCAMDEEPSLEEP_STAT 2
+#define V_T7_TCAMDEEPSLEEP_STAT(x) ((x) << S_T7_TCAMDEEPSLEEP_STAT)
+#define F_T7_TCAMDEEPSLEEP_STAT V_T7_TCAMDEEPSLEEP_STAT(1U)
+
+#define S_T7_CLTCAMDEEPSLEEP 1
+#define V_T7_CLTCAMDEEPSLEEP(x) ((x) << S_T7_CLTCAMDEEPSLEEP)
+#define F_T7_CLTCAMDEEPSLEEP V_T7_CLTCAMDEEPSLEEP(1U)
+
+#define S_T7_TCAMDEEPSLEEP 0
+#define V_T7_TCAMDEEPSLEEP(x) ((x) << S_T7_TCAMDEEPSLEEP)
+#define F_T7_TCAMDEEPSLEEP V_T7_TCAMDEEPSLEEP(1U)
+
+#define A_CRYPTO_KEY_TX_CMM_CONFIG 0x46040
+#define A_CRYPTO_KEY_TX_TNL_BASE 0x46044
+#define A_CRYPTO_KEY_TX_TRN_BASE 0x46048
+#define A_CRYPTO_KEY_TX_MAX_KEYS 0x4604c
+
+#define S_TNL_MAX 16
+#define M_TNL_MAX 0xffffU
+#define V_TNL_MAX(x) ((x) << S_TNL_MAX)
+#define G_TNL_MAX(x) (((x) >> S_TNL_MAX) & M_TNL_MAX)
+
+#define S_TRN_MAX 0
+#define M_TRN_MAX 0xffffU
+#define V_TRN_MAX(x) ((x) << S_TRN_MAX)
+#define G_TRN_MAX(x) (((x) >> S_TRN_MAX) & M_TRN_MAX)
+
+#define A_CRYPTO_KEY_TX_SEQ_STAT 0x46050
+
+#define S_ESN 24
+#define V_ESN(x) ((x) << S_ESN)
+#define F_ESN V_ESN(1U)
+
+#define S_SEQHI 20
+#define M_SEQHI 0xfU
+#define V_SEQHI(x) ((x) << S_SEQHI)
+#define G_SEQHI(x) (((x) >> S_SEQHI) & M_SEQHI)
+
+#define S_KEYID 0
+#define M_KEYID 0xfffffU
+#define V_KEYID(x) ((x) << S_KEYID)
+#define G_KEYID(x) (((x) >> S_KEYID) & M_KEYID)
+
+#define A_CRYPTO_KEY_RX_CMM_CONFIG 0x46060
+#define A_CRYPTO_KEY_RX_BASE 0x46064
+#define A_CRYPTO_KEY_RX_MAX_KEYS 0x46068
+
+#define S_MAXKEYS 0
+#define M_MAXKEYS 0xffffU
+#define V_MAXKEYS(x) ((x) << S_MAXKEYS)
+#define G_MAXKEYS(x) (((x) >> S_MAXKEYS) & M_MAXKEYS)
+
+#define A_CRYPTO_KEY_CRYPTO_REVISION 0x4606c
+#define A_CRYPTO_KEY_RX_SEQ_STAT 0x46070
+#define A_CRYPTO_KEY_TCAM_BIST_CTRL 0x46074
+#define A_CRYPTO_KEY_TCAM_BIST_CB_PASS 0x46078
+#define A_CRYPTO_KEY_TCAM_BIST_CB_BUSY 0x4607c
+#define A_CRYPTO_KEY_DBG_SEL_CTRL 0x46080
+
+#define S_SEL_OVR_EN 16
+#define V_SEL_OVR_EN(x) ((x) << S_SEL_OVR_EN)
+#define F_SEL_OVR_EN V_SEL_OVR_EN(1U)
+
+#define S_T7_1_SELH 8
+#define M_T7_1_SELH 0xffU
+#define V_T7_1_SELH(x) ((x) << S_T7_1_SELH)
+#define G_T7_1_SELH(x) (((x) >> S_T7_1_SELH) & M_T7_1_SELH)
+
+#define S_T7_1_SELL 0
+#define M_T7_1_SELL 0xffU
+#define V_T7_1_SELL(x) ((x) << S_T7_1_SELL)
+#define G_T7_1_SELL(x) (((x) >> S_T7_1_SELL) & M_T7_1_SELL)
+
+#define A_CRYPTO_KEY_DBG_SELL_DATA 0x46084
+#define A_CRYPTO_KEY_DBG_SELH_DATA 0x46088
+
+/* registers for module ARM */
+#define ARM_BASE_ADDR 0x47000
+
+#define A_ARM_CPU_POR_RST 0x47000
+
+#define S_CPUPORRSTN3 3
+#define V_CPUPORRSTN3(x) ((x) << S_CPUPORRSTN3)
+#define F_CPUPORRSTN3 V_CPUPORRSTN3(1U)
+
+#define S_CPUPORRSTN2 2
+#define V_CPUPORRSTN2(x) ((x) << S_CPUPORRSTN2)
+#define F_CPUPORRSTN2 V_CPUPORRSTN2(1U)
+
+#define S_CPUPORRSTN1 1
+#define V_CPUPORRSTN1(x) ((x) << S_CPUPORRSTN1)
+#define F_CPUPORRSTN1 V_CPUPORRSTN1(1U)
+
+#define S_CPUPORRSTN0 0
+#define V_CPUPORRSTN0(x) ((x) << S_CPUPORRSTN0)
+#define F_CPUPORRSTN0 V_CPUPORRSTN0(1U)
+
+#define A_ARM_CPU_CORE_RST 0x47004
+
+#define S_CPUCORERSTN3 3
+#define V_CPUCORERSTN3(x) ((x) << S_CPUCORERSTN3)
+#define F_CPUCORERSTN3 V_CPUCORERSTN3(1U)
+
+#define S_CPUCORERSTN2 2
+#define V_CPUCORERSTN2(x) ((x) << S_CPUCORERSTN2)
+#define F_CPUCORERSTN2 V_CPUCORERSTN2(1U)
+
+#define S_CPUCORERSTN1 1
+#define V_CPUCORERSTN1(x) ((x) << S_CPUCORERSTN1)
+#define F_CPUCORERSTN1 V_CPUCORERSTN1(1U)
+
+#define S_CPUCORERSTN0 0
+#define V_CPUCORERSTN0(x) ((x) << S_CPUCORERSTN0)
+#define F_CPUCORERSTN0 V_CPUCORERSTN0(1U)
+
+#define A_ARM_CPU_WARM_RST_REQ 0x47008
+
+#define S_CPUWARMRSTREQ3 3
+#define V_CPUWARMRSTREQ3(x) ((x) << S_CPUWARMRSTREQ3)
+#define F_CPUWARMRSTREQ3 V_CPUWARMRSTREQ3(1U)
+
+#define S_CPUWARMRSTREQ2 2
+#define V_CPUWARMRSTREQ2(x) ((x) << S_CPUWARMRSTREQ2)
+#define F_CPUWARMRSTREQ2 V_CPUWARMRSTREQ2(1U)
+
+#define S_CPUWARMRSTREQ1 1
+#define V_CPUWARMRSTREQ1(x) ((x) << S_CPUWARMRSTREQ1)
+#define F_CPUWARMRSTREQ1 V_CPUWARMRSTREQ1(1U)
+
+#define S_CPUWARMRSTREQ0 0
+#define V_CPUWARMRSTREQ0(x) ((x) << S_CPUWARMRSTREQ0)
+#define F_CPUWARMRSTREQ0 V_CPUWARMRSTREQ0(1U)
+
+#define A_ARM_CPU_L2_RST 0x4700c
+
+#define S_CPUL2RSTN 0
+#define V_CPUL2RSTN(x) ((x) << S_CPUL2RSTN)
+#define F_CPUL2RSTN V_CPUL2RSTN(1U)
+
+#define A_ARM_CPU_L2_RST_DIS 0x47010
+
+#define S_CPUL2RSTDISABLE 0
+#define V_CPUL2RSTDISABLE(x) ((x) << S_CPUL2RSTDISABLE)
+#define F_CPUL2RSTDISABLE V_CPUL2RSTDISABLE(1U)
+
+#define A_ARM_CPU_PRESET_DBG 0x47014
+
+#define S_CPUPRESETDBGN 0
+#define V_CPUPRESETDBGN(x) ((x) << S_CPUPRESETDBGN)
+#define F_CPUPRESETDBGN V_CPUPRESETDBGN(1U)
+
+#define A_ARM_PL_DMA_AW_OFFSET 0x47018
+
+#define S_PL_DMA_AW_OFFSET 0
+#define M_PL_DMA_AW_OFFSET 0x3fffffffU
+#define V_PL_DMA_AW_OFFSET(x) ((x) << S_PL_DMA_AW_OFFSET)
+#define G_PL_DMA_AW_OFFSET(x) (((x) >> S_PL_DMA_AW_OFFSET) & M_PL_DMA_AW_OFFSET)
+
+#define A_ARM_PL_DMA_AR_OFFSET 0x4701c
+
+#define S_PL_DMA_AR_OFFSET 0
+#define M_PL_DMA_AR_OFFSET 0x3fffffffU
+#define V_PL_DMA_AR_OFFSET(x) ((x) << S_PL_DMA_AR_OFFSET)
+#define G_PL_DMA_AR_OFFSET(x) (((x) >> S_PL_DMA_AR_OFFSET) & M_PL_DMA_AR_OFFSET)
+
+#define A_ARM_CPU_RESET_VECTOR_BASE_ADDR0 0x47020
+#define A_ARM_CPU_RESET_VECTOR_BASE_ADDR1 0x47024
+
+#define S_CPURESETVECBA1 0
+#define M_CPURESETVECBA1 0x3ffU
+#define V_CPURESETVECBA1(x) ((x) << S_CPURESETVECBA1)
+#define G_CPURESETVECBA1(x) (((x) >> S_CPURESETVECBA1) & M_CPURESETVECBA1)
+
+#define A_ARM_CPU_PMU_EVENT 0x47028
+
+#define S_CPUPMUEVENT 0
+#define M_CPUPMUEVENT 0x1ffffffU
+#define V_CPUPMUEVENT(x) ((x) << S_CPUPMUEVENT)
+#define G_CPUPMUEVENT(x) (((x) >> S_CPUPMUEVENT) & M_CPUPMUEVENT)
+
+#define A_ARM_DMA_RST 0x4702c
+
+#define S_DMA_PL_RST_N 0
+#define V_DMA_PL_RST_N(x) ((x) << S_DMA_PL_RST_N)
+#define F_DMA_PL_RST_N V_DMA_PL_RST_N(1U)
+
+#define A_ARM_PLM_RID_CFG 0x4703c
+#define A_ARM_PLM_EROM_CFG 0x47040
+#define A_ARM_PL_ARM_HDR_CFG 0x4704c
+#define A_ARM_RC_INT_STATUS 0x4705c
+
+#define S_RC_INT_STATUS_REG 0
+#define M_RC_INT_STATUS_REG 0x3fU
+#define V_RC_INT_STATUS_REG(x) ((x) << S_RC_INT_STATUS_REG)
+#define G_RC_INT_STATUS_REG(x) (((x) >> S_RC_INT_STATUS_REG) & M_RC_INT_STATUS_REG)
+
+#define A_ARM_CPU_DBG_PWR_UP_REQ 0x47060
+
+#define S_CPUDBGPWRUPREQ3 3
+#define V_CPUDBGPWRUPREQ3(x) ((x) << S_CPUDBGPWRUPREQ3)
+#define F_CPUDBGPWRUPREQ3 V_CPUDBGPWRUPREQ3(1U)
+
+#define S_CPUDBGPWRUPREQ2 2
+#define V_CPUDBGPWRUPREQ2(x) ((x) << S_CPUDBGPWRUPREQ2)
+#define F_CPUDBGPWRUPREQ2 V_CPUDBGPWRUPREQ2(1U)
+
+#define S_CPUDBGPWRUPREQ1 1
+#define V_CPUDBGPWRUPREQ1(x) ((x) << S_CPUDBGPWRUPREQ1)
+#define F_CPUDBGPWRUPREQ1 V_CPUDBGPWRUPREQ1(1U)
+
+#define S_CPUDBGPWRUPREQ0 0
+#define V_CPUDBGPWRUPREQ0(x) ((x) << S_CPUDBGPWRUPREQ0)
+#define F_CPUDBGPWRUPREQ0 V_CPUDBGPWRUPREQ0(1U)
+
+#define A_ARM_CPU_STANDBY_WFE_WFI 0x47064
+
+#define S_CPUSTANDBYWFIL2 8
+#define V_CPUSTANDBYWFIL2(x) ((x) << S_CPUSTANDBYWFIL2)
+#define F_CPUSTANDBYWFIL2 V_CPUSTANDBYWFIL2(1U)
+
+#define S_CPUSTANDBYWFI3 7
+#define V_CPUSTANDBYWFI3(x) ((x) << S_CPUSTANDBYWFI3)
+#define F_CPUSTANDBYWFI3 V_CPUSTANDBYWFI3(1U)
+
+#define S_CPUSTANDBYWFI2 6
+#define V_CPUSTANDBYWFI2(x) ((x) << S_CPUSTANDBYWFI2)
+#define F_CPUSTANDBYWFI2 V_CPUSTANDBYWFI2(1U)
+
+#define S_CPUSTANDBYWFI1 5
+#define V_CPUSTANDBYWFI1(x) ((x) << S_CPUSTANDBYWFI1)
+#define F_CPUSTANDBYWFI1 V_CPUSTANDBYWFI1(1U)
+
+#define S_CPUSTANDBYWFI0 4
+#define V_CPUSTANDBYWFI0(x) ((x) << S_CPUSTANDBYWFI0)
+#define F_CPUSTANDBYWFI0 V_CPUSTANDBYWFI0(1U)
+
+#define S_CPUSTANDBYWFE3 3
+#define V_CPUSTANDBYWFE3(x) ((x) << S_CPUSTANDBYWFE3)
+#define F_CPUSTANDBYWFE3 V_CPUSTANDBYWFE3(1U)
+
+#define S_CPUSTANDBYWFE2 2
+#define V_CPUSTANDBYWFE2(x) ((x) << S_CPUSTANDBYWFE2)
+#define F_CPUSTANDBYWFE2 V_CPUSTANDBYWFE2(1U)
+
+#define S_CPUSTANDBYWFE1 1
+#define V_CPUSTANDBYWFE1(x) ((x) << S_CPUSTANDBYWFE1)
+#define F_CPUSTANDBYWFE1 V_CPUSTANDBYWFE1(1U)
+
+#define S_CPUSTANDBYWFE0 0
+#define V_CPUSTANDBYWFE0(x) ((x) << S_CPUSTANDBYWFE0)
+#define F_CPUSTANDBYWFE0 V_CPUSTANDBYWFE0(1U)
+
+#define A_ARM_CPU_SMPEN 0x47068
+
+#define S_CPUSMPEN3 3
+#define V_CPUSMPEN3(x) ((x) << S_CPUSMPEN3)
+#define F_CPUSMPEN3 V_CPUSMPEN3(1U)
+
+#define S_CPUSMPEN2 2
+#define V_CPUSMPEN2(x) ((x) << S_CPUSMPEN2)
+#define F_CPUSMPEN2 V_CPUSMPEN2(1U)
+
+#define S_CPUSMPEN1 1
+#define V_CPUSMPEN1(x) ((x) << S_CPUSMPEN1)
+#define F_CPUSMPEN1 V_CPUSMPEN1(1U)
+
+#define S_CPUSMPEN0 0
+#define V_CPUSMPEN0(x) ((x) << S_CPUSMPEN0)
+#define F_CPUSMPEN0 V_CPUSMPEN0(1U)
+
+#define A_ARM_CPU_QACTIVE 0x4706c
+
+#define S_CPUQACTIVE3 3
+#define V_CPUQACTIVE3(x) ((x) << S_CPUQACTIVE3)
+#define F_CPUQACTIVE3 V_CPUQACTIVE3(1U)
+
+#define S_CPUQACTIVE2 2
+#define V_CPUQACTIVE2(x) ((x) << S_CPUQACTIVE2)
+#define F_CPUQACTIVE2 V_CPUQACTIVE2(1U)
+
+#define S_CPUQACTIVE1 1
+#define V_CPUQACTIVE1(x) ((x) << S_CPUQACTIVE1)
+#define F_CPUQACTIVE1 V_CPUQACTIVE1(1U)
+
+#define S_CPUQACTIVE0 0
+#define V_CPUQACTIVE0(x) ((x) << S_CPUQACTIVE0)
+#define F_CPUQACTIVE0 V_CPUQACTIVE0(1U)
+
+#define A_ARM_CPU_QREQ 0x47070
+
+#define S_CPUL2FLUSHREQ 5
+#define V_CPUL2FLUSHREQ(x) ((x) << S_CPUL2FLUSHREQ)
+#define F_CPUL2FLUSHREQ V_CPUL2FLUSHREQ(1U)
+
+#define S_CPUL2QREQN 4
+#define V_CPUL2QREQN(x) ((x) << S_CPUL2QREQN)
+#define F_CPUL2QREQN V_CPUL2QREQN(1U)
+
+#define S_CPUQREQ3N 3
+#define V_CPUQREQ3N(x) ((x) << S_CPUQREQ3N)
+#define F_CPUQREQ3N V_CPUQREQ3N(1U)
+
+#define S_CPUQREQ2N 2
+#define V_CPUQREQ2N(x) ((x) << S_CPUQREQ2N)
+#define F_CPUQREQ2N V_CPUQREQ2N(1U)
+
+#define S_CPUQREQ1N 1
+#define V_CPUQREQ1N(x) ((x) << S_CPUQREQ1N)
+#define F_CPUQREQ1N V_CPUQREQ1N(1U)
+
+#define S_CPUQREQ0N 0
+#define V_CPUQREQ0N(x) ((x) << S_CPUQREQ0N)
+#define F_CPUQREQ0N V_CPUQREQ0N(1U)
+
+#define A_ARM_CPU_QREQ_STATUS 0x47074
+
+#define S_CPUL2FLUSHDONE 10
+#define V_CPUL2FLUSHDONE(x) ((x) << S_CPUL2FLUSHDONE)
+#define F_CPUL2FLUSHDONE V_CPUL2FLUSHDONE(1U)
+
+#define S_CPUL2QDENY 9
+#define V_CPUL2QDENY(x) ((x) << S_CPUL2QDENY)
+#define F_CPUL2QDENY V_CPUL2QDENY(1U)
+
+#define S_CPUL2QACCEPTN 8
+#define V_CPUL2QACCEPTN(x) ((x) << S_CPUL2QACCEPTN)
+#define F_CPUL2QACCEPTN V_CPUL2QACCEPTN(1U)
+
+#define S_CPUQDENY3 7
+#define V_CPUQDENY3(x) ((x) << S_CPUQDENY3)
+#define F_CPUQDENY3 V_CPUQDENY3(1U)
+
+#define S_CPUQDENY2 6
+#define V_CPUQDENY2(x) ((x) << S_CPUQDENY2)
+#define F_CPUQDENY2 V_CPUQDENY2(1U)
+
+#define S_CPUQDENY1 5
+#define V_CPUQDENY1(x) ((x) << S_CPUQDENY1)
+#define F_CPUQDENY1 V_CPUQDENY1(1U)
+
+#define S_CPUQDENY0 4
+#define V_CPUQDENY0(x) ((x) << S_CPUQDENY0)
+#define F_CPUQDENY0 V_CPUQDENY0(1U)
+
+#define S_CPUQACCEPT3N 3
+#define V_CPUQACCEPT3N(x) ((x) << S_CPUQACCEPT3N)
+#define F_CPUQACCEPT3N V_CPUQACCEPT3N(1U)
+
+#define S_CPUQACCEPT2N 2
+#define V_CPUQACCEPT2N(x) ((x) << S_CPUQACCEPT2N)
+#define F_CPUQACCEPT2N V_CPUQACCEPT2N(1U)
+
+#define S_CPUQACCEPT1N 1
+#define V_CPUQACCEPT1N(x) ((x) << S_CPUQACCEPT1N)
+#define F_CPUQACCEPT1N V_CPUQACCEPT1N(1U)
+
+#define S_CPUQACCEPT0N 0
+#define V_CPUQACCEPT0N(x) ((x) << S_CPUQACCEPT0N)
+#define F_CPUQACCEPT0N V_CPUQACCEPT0N(1U)
+
+#define A_ARM_CPU_DBG_EN 0x47078
+
+#define S_CPUDBGL1RSTDISABLE 28
+#define V_CPUDBGL1RSTDISABLE(x) ((x) << S_CPUDBGL1RSTDISABLE)
+#define F_CPUDBGL1RSTDISABLE V_CPUDBGL1RSTDISABLE(1U)
+
+#define S_CPUDBGRSTREQ3 27
+#define V_CPUDBGRSTREQ3(x) ((x) << S_CPUDBGRSTREQ3)
+#define F_CPUDBGRSTREQ3 V_CPUDBGRSTREQ3(1U)
+
+#define S_CPUDBGRSTREQ2 26
+#define V_CPUDBGRSTREQ2(x) ((x) << S_CPUDBGRSTREQ2)
+#define F_CPUDBGRSTREQ2 V_CPUDBGRSTREQ2(1U)
+
+#define S_CPUDBGRSTREQ1 25
+#define V_CPUDBGRSTREQ1(x) ((x) << S_CPUDBGRSTREQ1)
+#define F_CPUDBGRSTREQ1 V_CPUDBGRSTREQ1(1U)
+
+#define S_CPUDBGRSTREQ0 24
+#define V_CPUDBGRSTREQ0(x) ((x) << S_CPUDBGRSTREQ0)
+#define F_CPUDBGRSTREQ0 V_CPUDBGRSTREQ0(1U)
+
+#define S_CPUDBGPWRDUP3 23
+#define V_CPUDBGPWRDUP3(x) ((x) << S_CPUDBGPWRDUP3)
+#define F_CPUDBGPWRDUP3 V_CPUDBGPWRDUP3(1U)
+
+#define S_CPUDBGPWRDUP2 22
+#define V_CPUDBGPWRDUP2(x) ((x) << S_CPUDBGPWRDUP2)
+#define F_CPUDBGPWRDUP2 V_CPUDBGPWRDUP2(1U)
+
+#define S_CPUDBGPWRDUP1 21
+#define V_CPUDBGPWRDUP1(x) ((x) << S_CPUDBGPWRDUP1)
+#define F_CPUDBGPWRDUP1 V_CPUDBGPWRDUP1(1U)
+
+#define S_CPUDBGPWRDUP0 20
+#define V_CPUDBGPWRDUP0(x) ((x) << S_CPUDBGPWRDUP0)
+#define F_CPUDBGPWRDUP0 V_CPUDBGPWRDUP0(1U)
+
+#define S_CPUEXTDBGREQ3 19
+#define V_CPUEXTDBGREQ3(x) ((x) << S_CPUEXTDBGREQ3)
+#define F_CPUEXTDBGREQ3 V_CPUEXTDBGREQ3(1U)
+
+#define S_CPUEXTDBGREQ2 18
+#define V_CPUEXTDBGREQ2(x) ((x) << S_CPUEXTDBGREQ2)
+#define F_CPUEXTDBGREQ2 V_CPUEXTDBGREQ2(1U)
+
+#define S_CPUEXTDBGREQ1 17
+#define V_CPUEXTDBGREQ1(x) ((x) << S_CPUEXTDBGREQ1)
+#define F_CPUEXTDBGREQ1 V_CPUEXTDBGREQ1(1U)
+
+#define S_CPUEXTDBGREQ0 16
+#define V_CPUEXTDBGREQ0(x) ((x) << S_CPUEXTDBGREQ0)
+#define F_CPUEXTDBGREQ0 V_CPUEXTDBGREQ0(1U)
+
+#define S_CPUSPNIDEN3 15
+#define V_CPUSPNIDEN3(x) ((x) << S_CPUSPNIDEN3)
+#define F_CPUSPNIDEN3 V_CPUSPNIDEN3(1U)
+
+#define S_CPUSPNIDEN2 14
+#define V_CPUSPNIDEN2(x) ((x) << S_CPUSPNIDEN2)
+#define F_CPUSPNIDEN2 V_CPUSPNIDEN2(1U)
+
+#define S_CPUSPNIDEN1 13
+#define V_CPUSPNIDEN1(x) ((x) << S_CPUSPNIDEN1)
+#define F_CPUSPNIDEN1 V_CPUSPNIDEN1(1U)
+
+#define S_CPUSPNIDEN0 12
+#define V_CPUSPNIDEN0(x) ((x) << S_CPUSPNIDEN0)
+#define F_CPUSPNIDEN0 V_CPUSPNIDEN0(1U)
+
+#define S_CPUSPDBGEN3 11
+#define V_CPUSPDBGEN3(x) ((x) << S_CPUSPDBGEN3)
+#define F_CPUSPDBGEN3 V_CPUSPDBGEN3(1U)
+
+#define S_CPUSPDBGEN2 10
+#define V_CPUSPDBGEN2(x) ((x) << S_CPUSPDBGEN2)
+#define F_CPUSPDBGEN2 V_CPUSPDBGEN2(1U)
+
+#define S_CPUSPDBGEN1 9
+#define V_CPUSPDBGEN1(x) ((x) << S_CPUSPDBGEN1)
+#define F_CPUSPDBGEN1 V_CPUSPDBGEN1(1U)
+
+#define S_CPUSPDBGEN0 8
+#define V_CPUSPDBGEN0(x) ((x) << S_CPUSPDBGEN0)
+#define F_CPUSPDBGEN0 V_CPUSPDBGEN0(1U)
+
+#define S_CPUNIDEN3 7
+#define V_CPUNIDEN3(x) ((x) << S_CPUNIDEN3)
+#define F_CPUNIDEN3 V_CPUNIDEN3(1U)
+
+#define S_CPUNIDEN2 6
+#define V_CPUNIDEN2(x) ((x) << S_CPUNIDEN2)
+#define F_CPUNIDEN2 V_CPUNIDEN2(1U)
+
+#define S_CPUNIDEN1 5
+#define V_CPUNIDEN1(x) ((x) << S_CPUNIDEN1)
+#define F_CPUNIDEN1 V_CPUNIDEN1(1U)
+
+#define S_CPUNIDEN0 4
+#define V_CPUNIDEN0(x) ((x) << S_CPUNIDEN0)
+#define F_CPUNIDEN0 V_CPUNIDEN0(1U)
+
+#define S_CPUDBGEN3 3
+#define V_CPUDBGEN3(x) ((x) << S_CPUDBGEN3)
+#define F_CPUDBGEN3 V_CPUDBGEN3(1U)
+
+#define S_CPUDBGEN2 2
+#define V_CPUDBGEN2(x) ((x) << S_CPUDBGEN2)
+#define F_CPUDBGEN2 V_CPUDBGEN2(1U)
+
+#define S_CPUDBGEN1 1
+#define V_CPUDBGEN1(x) ((x) << S_CPUDBGEN1)
+#define F_CPUDBGEN1 V_CPUDBGEN1(1U)
+
+#define S_CPUDBGEN0 0
+#define V_CPUDBGEN0(x) ((x) << S_CPUDBGEN0)
+#define F_CPUDBGEN0 V_CPUDBGEN0(1U)
+
+#define A_ARM_CPU_DBG_ACK 0x4707c
+
+#define S_CPUDBGNOPWRDWN3 11
+#define V_CPUDBGNOPWRDWN3(x) ((x) << S_CPUDBGNOPWRDWN3)
+#define F_CPUDBGNOPWRDWN3 V_CPUDBGNOPWRDWN3(1U)
+
+#define S_CPUDBGNOPWRDWN2 10
+#define V_CPUDBGNOPWRDWN2(x) ((x) << S_CPUDBGNOPWRDWN2)
+#define F_CPUDBGNOPWRDWN2 V_CPUDBGNOPWRDWN2(1U)
+
+#define S_CPUDBGNOPWRDWN1 9
+#define V_CPUDBGNOPWRDWN1(x) ((x) << S_CPUDBGNOPWRDWN1)
+#define F_CPUDBGNOPWRDWN1 V_CPUDBGNOPWRDWN1(1U)
+
+#define S_CPUDBGNOPWRDWN0 8
+#define V_CPUDBGNOPWRDWN0(x) ((x) << S_CPUDBGNOPWRDWN0)
+#define F_CPUDBGNOPWRDWN0 V_CPUDBGNOPWRDWN0(1U)
+
+#define S_CPUDGNRSTREQ3 7
+#define V_CPUDGNRSTREQ3(x) ((x) << S_CPUDGNRSTREQ3)
+#define F_CPUDGNRSTREQ3 V_CPUDGNRSTREQ3(1U)
+
+#define S_CPUDGNRSTREQ2 6
+#define V_CPUDGNRSTREQ2(x) ((x) << S_CPUDGNRSTREQ2)
+#define F_CPUDGNRSTREQ2 V_CPUDGNRSTREQ2(1U)
+
+#define S_CPUDGNRSTREQ1 5
+#define V_CPUDGNRSTREQ1(x) ((x) << S_CPUDGNRSTREQ1)
+#define F_CPUDGNRSTREQ1 V_CPUDGNRSTREQ1(1U)
+
+#define S_CPUDGNRSTREQ0 4
+#define V_CPUDGNRSTREQ0(x) ((x) << S_CPUDGNRSTREQ0)
+#define F_CPUDGNRSTREQ0 V_CPUDGNRSTREQ0(1U)
+
+#define S_CPUDBGACK3 3
+#define V_CPUDBGACK3(x) ((x) << S_CPUDBGACK3)
+#define F_CPUDBGACK3 V_CPUDBGACK3(1U)
+
+#define S_CPUDBGACK2 2
+#define V_CPUDBGACK2(x) ((x) << S_CPUDBGACK2)
+#define F_CPUDBGACK2 V_CPUDBGACK2(1U)
+
+#define S_CPUDBGACK1 1
+#define V_CPUDBGACK1(x) ((x) << S_CPUDBGACK1)
+#define F_CPUDBGACK1 V_CPUDBGACK1(1U)
+
+#define S_CPUDBGACK0 0
+#define V_CPUDBGACK0(x) ((x) << S_CPUDBGACK0)
+#define F_CPUDBGACK0 V_CPUDBGACK0(1U)
+
+#define A_ARM_CPU_PMU_SNAPSHOT_REQ 0x47080
+
+#define S_CPUPMUSNAPSHOTREQ3 3
+#define V_CPUPMUSNAPSHOTREQ3(x) ((x) << S_CPUPMUSNAPSHOTREQ3)
+#define F_CPUPMUSNAPSHOTREQ3 V_CPUPMUSNAPSHOTREQ3(1U)
+
+#define S_CPUPMUSNAPSHOTREQ2 2
+#define V_CPUPMUSNAPSHOTREQ2(x) ((x) << S_CPUPMUSNAPSHOTREQ2)
+#define F_CPUPMUSNAPSHOTREQ2 V_CPUPMUSNAPSHOTREQ2(1U)
+
+#define S_CPUPMUSNAPSHOTREQ1 1
+#define V_CPUPMUSNAPSHOTREQ1(x) ((x) << S_CPUPMUSNAPSHOTREQ1)
+#define F_CPUPMUSNAPSHOTREQ1 V_CPUPMUSNAPSHOTREQ1(1U)
+
+#define S_CPUPMUSNAPSHOTREQ0 0
+#define V_CPUPMUSNAPSHOTREQ0(x) ((x) << S_CPUPMUSNAPSHOTREQ0)
+#define F_CPUPMUSNAPSHOTREQ0 V_CPUPMUSNAPSHOTREQ0(1U)
+
+#define A_ARM_CPU_PMU_SNAPSHOT_ACK 0x47084
+
+#define S_CPUPMUSNAPSHOTACK3 3
+#define V_CPUPMUSNAPSHOTACK3(x) ((x) << S_CPUPMUSNAPSHOTACK3)
+#define F_CPUPMUSNAPSHOTACK3 V_CPUPMUSNAPSHOTACK3(1U)
+
+#define S_CPUPMUSNAPSHOTACK2 2
+#define V_CPUPMUSNAPSHOTACK2(x) ((x) << S_CPUPMUSNAPSHOTACK2)
+#define F_CPUPMUSNAPSHOTACK2 V_CPUPMUSNAPSHOTACK2(1U)
+
+#define S_CPUPMUSNAPSHOTACK1 1
+#define V_CPUPMUSNAPSHOTACK1(x) ((x) << S_CPUPMUSNAPSHOTACK1)
+#define F_CPUPMUSNAPSHOTACK1 V_CPUPMUSNAPSHOTACK1(1U)
+
+#define S_CPUPMUSNAPSHOTACK0 0
+#define V_CPUPMUSNAPSHOTACK0(x) ((x) << S_CPUPMUSNAPSHOTACK0)
+#define F_CPUPMUSNAPSHOTACK0 V_CPUPMUSNAPSHOTACK0(1U)
+
+#define A_ARM_EMMC_CTRL 0x47088
+
+#define S_EMMC_DATA_P2 24
+#define M_EMMC_DATA_P2 0xffU
+#define V_EMMC_DATA_P2(x) ((x) << S_EMMC_DATA_P2)
+#define G_EMMC_DATA_P2(x) (((x) >> S_EMMC_DATA_P2) & M_EMMC_DATA_P2)
+
+#define S_EMMC_DATA_P1 16
+#define M_EMMC_DATA_P1 0xffU
+#define V_EMMC_DATA_P1(x) ((x) << S_EMMC_DATA_P1)
+#define G_EMMC_DATA_P1(x) (((x) >> S_EMMC_DATA_P1) & M_EMMC_DATA_P1)
+
+#define S_EMMC_CMD_P2 15
+#define V_EMMC_CMD_P2(x) ((x) << S_EMMC_CMD_P2)
+#define F_EMMC_CMD_P2 V_EMMC_CMD_P2(1U)
+
+#define S_EMMC_CMD_P1 14
+#define V_EMMC_CMD_P1(x) ((x) << S_EMMC_CMD_P1)
+#define F_EMMC_CMD_P1 V_EMMC_CMD_P1(1U)
+
+#define S_EMMC_RST_P2 13
+#define V_EMMC_RST_P2(x) ((x) << S_EMMC_RST_P2)
+#define F_EMMC_RST_P2 V_EMMC_RST_P2(1U)
+
+#define S_EMMC_RST_P1 12
+#define V_EMMC_RST_P1(x) ((x) << S_EMMC_RST_P1)
+#define F_EMMC_RST_P1 V_EMMC_RST_P1(1U)
+
+#define S_EMMC_GP_IN_P2 10
+#define M_EMMC_GP_IN_P2 0x3U
+#define V_EMMC_GP_IN_P2(x) ((x) << S_EMMC_GP_IN_P2)
+#define G_EMMC_GP_IN_P2(x) (((x) >> S_EMMC_GP_IN_P2) & M_EMMC_GP_IN_P2)
+
+#define S_EMMC_GP_IN_P1 8
+#define M_EMMC_GP_IN_P1 0x3U
+#define V_EMMC_GP_IN_P1(x) ((x) << S_EMMC_GP_IN_P1)
+#define G_EMMC_GP_IN_P1(x) (((x) >> S_EMMC_GP_IN_P1) & M_EMMC_GP_IN_P1)
+
+#define S_EMMC_CLK_SEL 0
+#define M_EMMC_CLK_SEL 0xffU
+#define V_EMMC_CLK_SEL(x) ((x) << S_EMMC_CLK_SEL)
+#define G_EMMC_CLK_SEL(x) (((x) >> S_EMMC_CLK_SEL) & M_EMMC_CLK_SEL)
+
+#define A_ARM_CPU_CFG_END_VINI_TE 0x4708c
+
+#define S_CPUSYSBARDISABLE 23
+#define V_CPUSYSBARDISABLE(x) ((x) << S_CPUSYSBARDISABLE)
+#define F_CPUSYSBARDISABLE V_CPUSYSBARDISABLE(1U)
+
+#define S_CPUBROADCACHEMAIN 22
+#define V_CPUBROADCACHEMAIN(x) ((x) << S_CPUBROADCACHEMAIN)
+#define F_CPUBROADCACHEMAIN V_CPUBROADCACHEMAIN(1U)
+
+#define S_CPUBROADOUTER 21
+#define V_CPUBROADOUTER(x) ((x) << S_CPUBROADOUTER)
+#define F_CPUBROADOUTER V_CPUBROADOUTER(1U)
+
+#define S_CPUBROADINNER 20
+#define V_CPUBROADINNER(x) ((x) << S_CPUBROADINNER)
+#define F_CPUBROADINNER V_CPUBROADINNER(1U)
+
+#define S_CPUCRYPTODISABLE3 19
+#define V_CPUCRYPTODISABLE3(x) ((x) << S_CPUCRYPTODISABLE3)
+#define F_CPUCRYPTODISABLE3 V_CPUCRYPTODISABLE3(1U)
+
+#define S_CPUCRYPTODISABLE2 18
+#define V_CPUCRYPTODISABLE2(x) ((x) << S_CPUCRYPTODISABLE2)
+#define F_CPUCRYPTODISABLE2 V_CPUCRYPTODISABLE2(1U)
+
+#define S_CPUCRYPTODISABLE1 17
+#define V_CPUCRYPTODISABLE1(x) ((x) << S_CPUCRYPTODISABLE1)
+#define F_CPUCRYPTODISABLE1 V_CPUCRYPTODISABLE1(1U)
+
+#define S_CPUCRYPTODISABLE0 16
+#define V_CPUCRYPTODISABLE0(x) ((x) << S_CPUCRYPTODISABLE0)
+#define F_CPUCRYPTODISABLE0 V_CPUCRYPTODISABLE0(1U)
+
+#define S_CPUAA64NAA323 15
+#define V_CPUAA64NAA323(x) ((x) << S_CPUAA64NAA323)
+#define F_CPUAA64NAA323 V_CPUAA64NAA323(1U)
+
+#define S_CPUAA64NAA322 14
+#define V_CPUAA64NAA322(x) ((x) << S_CPUAA64NAA322)
+#define F_CPUAA64NAA322 V_CPUAA64NAA322(1U)
+
+#define S_CPUAA64NAA321 13
+#define V_CPUAA64NAA321(x) ((x) << S_CPUAA64NAA321)
+#define F_CPUAA64NAA321 V_CPUAA64NAA321(1U)
+
+#define S_CPUAA64NAA320 12
+#define V_CPUAA64NAA320(x) ((x) << S_CPUAA64NAA320)
+#define F_CPUAA64NAA320 V_CPUAA64NAA320(1U)
+
+#define S_CPUCFGTE3 11
+#define V_CPUCFGTE3(x) ((x) << S_CPUCFGTE3)
+#define F_CPUCFGTE3 V_CPUCFGTE3(1U)
+
+#define S_CPUCFGTE2 10
+#define V_CPUCFGTE2(x) ((x) << S_CPUCFGTE2)
+#define F_CPUCFGTE2 V_CPUCFGTE2(1U)
+
+#define S_CPUCFGTE1 9
+#define V_CPUCFGTE1(x) ((x) << S_CPUCFGTE1)
+#define F_CPUCFGTE1 V_CPUCFGTE1(1U)
+
+#define S_CPUCFGTE0 8
+#define V_CPUCFGTE0(x) ((x) << S_CPUCFGTE0)
+#define F_CPUCFGTE0 V_CPUCFGTE0(1U)
+
+#define S_CPUVINIHI3 7
+#define V_CPUVINIHI3(x) ((x) << S_CPUVINIHI3)
+#define F_CPUVINIHI3 V_CPUVINIHI3(1U)
+
+#define S_CPUVINIHI2 6
+#define V_CPUVINIHI2(x) ((x) << S_CPUVINIHI2)
+#define F_CPUVINIHI2 V_CPUVINIHI2(1U)
+
+#define S_CPUVINIHI1 5
+#define V_CPUVINIHI1(x) ((x) << S_CPUVINIHI1)
+#define F_CPUVINIHI1 V_CPUVINIHI1(1U)
+
+#define S_CPUVINIHI0 4
+#define V_CPUVINIHI0(x) ((x) << S_CPUVINIHI0)
+#define F_CPUVINIHI0 V_CPUVINIHI0(1U)
+
+#define S_CPUCFGEND3 3
+#define V_CPUCFGEND3(x) ((x) << S_CPUCFGEND3)
+#define F_CPUCFGEND3 V_CPUCFGEND3(1U)
+
+#define S_CPUCFGEND2 2
+#define V_CPUCFGEND2(x) ((x) << S_CPUCFGEND2)
+#define F_CPUCFGEND2 V_CPUCFGEND2(1U)
+
+#define S_CPUCFGEND1 1
+#define V_CPUCFGEND1(x) ((x) << S_CPUCFGEND1)
+#define F_CPUCFGEND1 V_CPUCFGEND1(1U)
+
+#define S_CPUCFGEND0 0
+#define V_CPUCFGEND0(x) ((x) << S_CPUCFGEND0)
+#define F_CPUCFGEND0 V_CPUCFGEND0(1U)
+
+#define A_ARM_CPU_CP15_SDISABLE 0x47090
+
+#define S_CPUCP15SDISABLE3 3
+#define V_CPUCP15SDISABLE3(x) ((x) << S_CPUCP15SDISABLE3)
+#define F_CPUCP15SDISABLE3 V_CPUCP15SDISABLE3(1U)
+
+#define S_CPUCP15SDISABLE2 2
+#define V_CPUCP15SDISABLE2(x) ((x) << S_CPUCP15SDISABLE2)
+#define F_CPUCP15SDISABLE2 V_CPUCP15SDISABLE2(1U)
+
+#define S_CPUCP15SDISABLE1 1
+#define V_CPUCP15SDISABLE1(x) ((x) << S_CPUCP15SDISABLE1)
+#define F_CPUCP15SDISABLE1 V_CPUCP15SDISABLE1(1U)
+
+#define S_CPUCP15SDISABLE0 0
+#define V_CPUCP15SDISABLE0(x) ((x) << S_CPUCP15SDISABLE0)
+#define F_CPUCP15SDISABLE0 V_CPUCP15SDISABLE0(1U)
+
+#define A_ARM_CPU_CLUSTER_ID_AFF 0x47094
+
+#define S_CPUCLUSTERIDAFF2 8
+#define M_CPUCLUSTERIDAFF2 0xffU
+#define V_CPUCLUSTERIDAFF2(x) ((x) << S_CPUCLUSTERIDAFF2)
+#define G_CPUCLUSTERIDAFF2(x) (((x) >> S_CPUCLUSTERIDAFF2) & M_CPUCLUSTERIDAFF2)
+
+#define S_CPUCLUSTERIDAFF1 0
+#define M_CPUCLUSTERIDAFF1 0xffU
+#define V_CPUCLUSTERIDAFF1(x) ((x) << S_CPUCLUSTERIDAFF1)
+#define G_CPUCLUSTERIDAFF1(x) (((x) >> S_CPUCLUSTERIDAFF1) & M_CPUCLUSTERIDAFF1)
+
+#define A_ARM_CPU_CLK_CFG 0x47098
+
+#define S_CPUACINACTIVEM 1
+#define V_CPUACINACTIVEM(x) ((x) << S_CPUACINACTIVEM)
+#define F_CPUACINACTIVEM V_CPUACINACTIVEM(1U)
+
+#define S_CPUACLKENM 0
+#define V_CPUACLKENM(x) ((x) << S_CPUACLKENM)
+#define F_CPUACLKENM V_CPUACLKENM(1U)
+
+#define A_ARM_NVME_DB_EMU_INT_CAUSE 0x4709c
+
+#define S_INVALID_BRESP 3
+#define V_INVALID_BRESP(x) ((x) << S_INVALID_BRESP)
+#define F_INVALID_BRESP V_INVALID_BRESP(1U)
+
+#define S_DATA_LEN_OF 2
+#define V_DATA_LEN_OF(x) ((x) << S_DATA_LEN_OF)
+#define F_DATA_LEN_OF V_DATA_LEN_OF(1U)
+
+#define S_INVALID_EMU_ADDR 1
+#define V_INVALID_EMU_ADDR(x) ((x) << S_INVALID_EMU_ADDR)
+#define F_INVALID_EMU_ADDR V_INVALID_EMU_ADDR(1U)
+
+#define S_INVALID_AXI_ADDR_CFG 0
+#define V_INVALID_AXI_ADDR_CFG(x) ((x) << S_INVALID_AXI_ADDR_CFG)
+#define F_INVALID_AXI_ADDR_CFG V_INVALID_AXI_ADDR_CFG(1U)
+
+#define A_ARM_CS_RST 0x470c0
+
+#define S_ATCLKEN 9
+#define V_ATCLKEN(x) ((x) << S_ATCLKEN)
+#define F_ATCLKEN V_ATCLKEN(1U)
+
+#define S_CXAPBICRSTN 8
+#define V_CXAPBICRSTN(x) ((x) << S_CXAPBICRSTN)
+#define F_CXAPBICRSTN V_CXAPBICRSTN(1U)
+
+#define S_CSDBGEN 7
+#define V_CSDBGEN(x) ((x) << S_CSDBGEN)
+#define F_CSDBGEN V_CSDBGEN(1U)
+
+#define S_JTAGNPOTRST 6
+#define V_JTAGNPOTRST(x) ((x) << S_JTAGNPOTRST)
+#define F_JTAGNPOTRST V_JTAGNPOTRST(1U)
+
+#define S_JTAGNTRST 5
+#define V_JTAGNTRST(x) ((x) << S_JTAGNTRST)
+#define F_JTAGNTRST V_JTAGNTRST(1U)
+
+#define S_PADDR31S0 4
+#define V_PADDR31S0(x) ((x) << S_PADDR31S0)
+#define F_PADDR31S0 V_PADDR31S0(1U)
+
+#define S_CTICLKEN 3
+#define V_CTICLKEN(x) ((x) << S_CTICLKEN)
+#define F_CTICLKEN V_CTICLKEN(1U)
+
+#define S_PCLKENDBG 2
+#define V_PCLKENDBG(x) ((x) << S_PCLKENDBG)
+#define F_PCLKENDBG V_PCLKENDBG(1U)
+
+#define S_CPU_NIDEN 1
+#define V_CPU_NIDEN(x) ((x) << S_CPU_NIDEN)
+#define F_CPU_NIDEN V_CPU_NIDEN(1U)
+
+#define S_CPU_DBGEN 0
+#define V_CPU_DBGEN(x) ((x) << S_CPU_DBGEN)
+#define F_CPU_DBGEN V_CPU_DBGEN(1U)
+
+#define A_ARM_CS_ADDRL 0x470c4
+#define A_ARM_CS_ADDRH 0x470c8
+#define A_ARM_CS_DFT_CONTROL 0x470cc
+
+#define S_DFTMBISTADDR 5
+#define M_DFTMBISTADDR 0x7ffU
+#define V_DFTMBISTADDR(x) ((x) << S_DFTMBISTADDR)
+#define G_DFTMBISTADDR(x) (((x) >> S_DFTMBISTADDR) & M_DFTMBISTADDR)
+
+#define S_DFTMTESTON 3
+#define V_DFTMTESTON(x) ((x) << S_DFTMTESTON)
+#define F_DFTMTESTON V_DFTMTESTON(1U)
+
+#define S_DFTMBISTCE 2
+#define V_DFTMBISTCE(x) ((x) << S_DFTMBISTCE)
+#define F_DFTMBISTCE V_DFTMBISTCE(1U)
+
+#define S_DFTMBITWR 1
+#define V_DFTMBITWR(x) ((x) << S_DFTMBITWR)
+#define F_DFTMBITWR V_DFTMBITWR(1U)
+
+#define S_DFTSE 0
+#define V_DFTSE(x) ((x) << S_DFTSE)
+#define F_DFTSE V_DFTSE(1U)
+
+#define A_ARM_CS_DFT_IN 0x470d0
+#define A_ARM_CS_DFT_OUT 0x470d4
+#define A_ARM_CPU_EVENT_I 0x47100
+
+#define S_CPUEVENTI 0
+#define V_CPUEVENTI(x) ((x) << S_CPUEVENTI)
+#define F_CPUEVENTI V_CPUEVENTI(1U)
+
+#define A_ARM_CPU_EVENT_O 0x47104
+
+#define S_CPUEVENTO 0
+#define V_CPUEVENTO(x) ((x) << S_CPUEVENTO)
+#define F_CPUEVENTO V_CPUEVENTO(1U)
+
+#define A_ARM_CPU_CLR_EXMON_REQ 0x47108
+
+#define S_CPUCLREXMONREQ 0
+#define V_CPUCLREXMONREQ(x) ((x) << S_CPUCLREXMONREQ)
+#define F_CPUCLREXMONREQ V_CPUCLREXMONREQ(1U)
+
+#define A_ARM_CPU_CLR_EXMON_ACK 0x4710c
+
+#define S_CPUCLREXMONACK 0
+#define V_CPUCLREXMONACK(x) ((x) << S_CPUCLREXMONACK)
+#define F_CPUCLREXMONACK V_CPUCLREXMONACK(1U)
+
+#define A_ARM_UART_MSTR_RXD 0x47110
+#define A_ARM_UART_MSTR_RXC 0x47114
+
+#define S_UART_MSTR_RXC 0
+#define V_UART_MSTR_RXC(x) ((x) << S_UART_MSTR_RXC)
+#define F_UART_MSTR_RXC V_UART_MSTR_RXC(1U)
+
+#define A_ARM_UART_MSTR_TXD 0x47118
+#define A_ARM_UART_MSTR_TXC 0x4711c
+
+#define S_T7_INT 1
+#define V_T7_INT(x) ((x) << S_T7_INT)
+#define F_T7_INT V_T7_INT(1U)
+
+#define S_UART_MSTC_TXC 0
+#define V_UART_MSTC_TXC(x) ((x) << S_UART_MSTC_TXC)
+#define F_UART_MSTC_TXC V_UART_MSTC_TXC(1U)
+
+#define A_ARM_UART_SLV_SEL 0x47120
+
+#define S_UART_SLV_SEL 0
+#define V_UART_SLV_SEL(x) ((x) << S_UART_SLV_SEL)
+#define F_UART_SLV_SEL V_UART_SLV_SEL(1U)
+
+#define A_ARM_CPU_PERIPH_BASE 0x47124
+#define A_ARM_PERR_INT_ENB2 0x47128
+#define A_ARM_PERR_ENABLE2 0x4712c
+#define A_ARM_UART_CONFIG 0x47130
+#define A_ARM_UART_STAT 0x47134
+
+#define S_RSV1 6
+#define M_RSV1 0x3ffffffU
+#define V_RSV1(x) ((x) << S_RSV1)
+#define G_RSV1(x) (((x) >> S_RSV1) & M_RSV1)
+
+#define S_RXFRMERR 5
+#define V_RXFRMERR(x) ((x) << S_RXFRMERR)
+#define F_RXFRMERR V_RXFRMERR(1U)
+
+#define S_RXPARERR 4
+#define V_RXPARERR(x) ((x) << S_RXPARERR)
+#define F_RXPARERR V_RXPARERR(1U)
+
+#define S_RXOVRN 3
+#define V_RXOVRN(x) ((x) << S_RXOVRN)
+#define F_RXOVRN V_RXOVRN(1U)
+
+#define S_CTL_RXRDY 2
+#define V_CTL_RXRDY(x) ((x) << S_CTL_RXRDY)
+#define F_CTL_RXRDY V_CTL_RXRDY(1U)
+
+#define S_TXOVRN 1
+#define V_TXOVRN(x) ((x) << S_TXOVRN)
+#define F_TXOVRN V_TXOVRN(1U)
+
+#define S_CTL_TXRDY 0
+#define V_CTL_TXRDY(x) ((x) << S_CTL_TXRDY)
+#define F_CTL_TXRDY V_CTL_TXRDY(1U)
+
+#define A_ARM_UART_TX_DATA 0x47138
+
+#define S_TX_DATA 0
+#define M_TX_DATA 0xffU
+#define V_TX_DATA(x) ((x) << S_TX_DATA)
+#define G_TX_DATA(x) (((x) >> S_TX_DATA) & M_TX_DATA)
+
+#define A_ARM_UART_RX_DATA 0x4713c
+
+#define S_RX_DATA 0
+#define M_RX_DATA 0xffU
+#define V_RX_DATA(x) ((x) << S_RX_DATA)
+#define G_RX_DATA(x) (((x) >> S_RX_DATA) & M_RX_DATA)
+
+#define A_ARM_UART_DBG0 0x47140
+#define A_ARM_UART_DBG1 0x47144
+#define A_ARM_UART_DBG2 0x47148
+#define A_ARM_UART_DBG3 0x4714c
+#define A_ARM_ARM_CPU_PC0 0x47150
+#define A_ARM_ARM_CPU_PC1 0x47154
+#define A_ARM_ARM_UART_INT_CAUSE 0x47158
+
+#define S_RX_FIFO_NOT_EMPTY 1
+#define V_RX_FIFO_NOT_EMPTY(x) ((x) << S_RX_FIFO_NOT_EMPTY)
+#define F_RX_FIFO_NOT_EMPTY V_RX_FIFO_NOT_EMPTY(1U)
+
+#define S_TX_FIFO_EMPTY 0
+#define V_TX_FIFO_EMPTY(x) ((x) << S_TX_FIFO_EMPTY)
+#define F_TX_FIFO_EMPTY V_TX_FIFO_EMPTY(1U)
+
+#define A_ARM_ARM_UART_INT_EN 0x4715c
+
+#define S_RX_FIFO_INT_NOT_EMPTY 1
+#define V_RX_FIFO_INT_NOT_EMPTY(x) ((x) << S_RX_FIFO_INT_NOT_EMPTY)
+#define F_RX_FIFO_INT_NOT_EMPTY V_RX_FIFO_INT_NOT_EMPTY(1U)
+
+#define S_TX_FIFO_INT_EMPTY 0
+#define V_TX_FIFO_INT_EMPTY(x) ((x) << S_TX_FIFO_INT_EMPTY)
+#define F_TX_FIFO_INT_EMPTY V_TX_FIFO_INT_EMPTY(1U)
+
+#define A_ARM_ARM_UART_GPIO_SEL 0x47160
+
+#define S_PC_SEL 1
+#define M_PC_SEL 0x7U
+#define V_PC_SEL(x) ((x) << S_PC_SEL)
+#define G_PC_SEL(x) (((x) >> S_PC_SEL) & M_PC_SEL)
+
+#define S_UART_GPIO_SEL 0
+#define V_UART_GPIO_SEL(x) ((x) << S_UART_GPIO_SEL)
+#define F_UART_GPIO_SEL V_UART_GPIO_SEL(1U)
+
+#define A_ARM_ARM_SCRATCH_PAD0 0x47164
+#define A_ARM_ARM_SCRATCH_PAD1 0x47168
+#define A_ARM_ARM_SCRATCH_PAD2 0x4716c
+#define A_ARM_PERR_INT_CAUSE0 0x47170
+
+#define S_INIC_WRDATA_FIFO_PERR 31
+#define V_INIC_WRDATA_FIFO_PERR(x) ((x) << S_INIC_WRDATA_FIFO_PERR)
+#define F_INIC_WRDATA_FIFO_PERR V_INIC_WRDATA_FIFO_PERR(1U)
+
+#define S_INIC_RDATA_FIFO_PERR 30
+#define V_INIC_RDATA_FIFO_PERR(x) ((x) << S_INIC_RDATA_FIFO_PERR)
+#define F_INIC_RDATA_FIFO_PERR V_INIC_RDATA_FIFO_PERR(1U)
+
+#define S_MSI_MEM_PERR 29
+#define V_MSI_MEM_PERR(x) ((x) << S_MSI_MEM_PERR)
+#define F_MSI_MEM_PERR V_MSI_MEM_PERR(1U)
+
+#define S_ARM_DB_SRAM_PERR 27
+#define M_ARM_DB_SRAM_PERR 0x3U
+#define V_ARM_DB_SRAM_PERR(x) ((x) << S_ARM_DB_SRAM_PERR)
+#define G_ARM_DB_SRAM_PERR(x) (((x) >> S_ARM_DB_SRAM_PERR) & M_ARM_DB_SRAM_PERR)
+
+#define S_EMMC_FIFOPARINT 26
+#define V_EMMC_FIFOPARINT(x) ((x) << S_EMMC_FIFOPARINT)
+#define F_EMMC_FIFOPARINT V_EMMC_FIFOPARINT(1U)
+
+#define S_ICB_RAM_PERR 25
+#define V_ICB_RAM_PERR(x) ((x) << S_ICB_RAM_PERR)
+#define F_ICB_RAM_PERR V_ICB_RAM_PERR(1U)
+
+#define S_MESS2AXI4_WRFIFO_PERR 24
+#define V_MESS2AXI4_WRFIFO_PERR(x) ((x) << S_MESS2AXI4_WRFIFO_PERR)
+#define F_MESS2AXI4_WRFIFO_PERR V_MESS2AXI4_WRFIFO_PERR(1U)
+
+#define S_RC_WFIFO_OUTPERR 23
+#define V_RC_WFIFO_OUTPERR(x) ((x) << S_RC_WFIFO_OUTPERR)
+#define F_RC_WFIFO_OUTPERR V_RC_WFIFO_OUTPERR(1U)
+
+#define S_RC_SRAM_PERR 21
+#define M_RC_SRAM_PERR 0x3U
+#define V_RC_SRAM_PERR(x) ((x) << S_RC_SRAM_PERR)
+#define G_RC_SRAM_PERR(x) (((x) >> S_RC_SRAM_PERR) & M_RC_SRAM_PERR)
+
+#define S_MSI_FIFO_PAR_ERR 20
+#define V_MSI_FIFO_PAR_ERR(x) ((x) << S_MSI_FIFO_PAR_ERR)
+#define F_MSI_FIFO_PAR_ERR V_MSI_FIFO_PAR_ERR(1U)
+
+#define S_INIC2MA_INTFPERR 19
+#define V_INIC2MA_INTFPERR(x) ((x) << S_INIC2MA_INTFPERR)
+#define F_INIC2MA_INTFPERR V_INIC2MA_INTFPERR(1U)
+
+#define S_RDATAFIFO0_PERR 18
+#define V_RDATAFIFO0_PERR(x) ((x) << S_RDATAFIFO0_PERR)
+#define F_RDATAFIFO0_PERR V_RDATAFIFO0_PERR(1U)
+
+#define S_RDATAFIFO1_PERR 17
+#define V_RDATAFIFO1_PERR(x) ((x) << S_RDATAFIFO1_PERR)
+#define F_RDATAFIFO1_PERR V_RDATAFIFO1_PERR(1U)
+
+#define S_WRDATAFIFO0_PERR 16
+#define V_WRDATAFIFO0_PERR(x) ((x) << S_WRDATAFIFO0_PERR)
+#define F_WRDATAFIFO0_PERR V_WRDATAFIFO0_PERR(1U)
+
+#define S_WRDATAFIFO1_PERR 15
+#define V_WRDATAFIFO1_PERR(x) ((x) << S_WRDATAFIFO1_PERR)
+#define F_WRDATAFIFO1_PERR V_WRDATAFIFO1_PERR(1U)
+
+#define S_WR512DATAFIFO0_PERR 14
+#define V_WR512DATAFIFO0_PERR(x) ((x) << S_WR512DATAFIFO0_PERR)
+#define F_WR512DATAFIFO0_PERR V_WR512DATAFIFO0_PERR(1U)
+
+#define S_WR512DATAFIFO1_PERR 13
+#define V_WR512DATAFIFO1_PERR(x) ((x) << S_WR512DATAFIFO1_PERR)
+#define F_WR512DATAFIFO1_PERR V_WR512DATAFIFO1_PERR(1U)
+
+#define S_ROBUFF_PARERR3 12
+#define V_ROBUFF_PARERR3(x) ((x) << S_ROBUFF_PARERR3)
+#define F_ROBUFF_PARERR3 V_ROBUFF_PARERR3(1U)
+
+#define S_ROBUFF_PARERR2 11
+#define V_ROBUFF_PARERR2(x) ((x) << S_ROBUFF_PARERR2)
+#define F_ROBUFF_PARERR2 V_ROBUFF_PARERR2(1U)
+
+#define S_ROBUFF_PARERR1 10
+#define V_ROBUFF_PARERR1(x) ((x) << S_ROBUFF_PARERR1)
+#define F_ROBUFF_PARERR1 V_ROBUFF_PARERR1(1U)
+
+#define S_ROBUFF_PARERR0 9
+#define V_ROBUFF_PARERR0(x) ((x) << S_ROBUFF_PARERR0)
+#define F_ROBUFF_PARERR0 V_ROBUFF_PARERR0(1U)
+
+#define S_MA2AXI_REQDATAPARERR 8
+#define V_MA2AXI_REQDATAPARERR(x) ((x) << S_MA2AXI_REQDATAPARERR)
+#define F_MA2AXI_REQDATAPARERR V_MA2AXI_REQDATAPARERR(1U)
+
+#define S_MA2AXI_REQCTLPARERR 7
+#define V_MA2AXI_REQCTLPARERR(x) ((x) << S_MA2AXI_REQCTLPARERR)
+#define F_MA2AXI_REQCTLPARERR V_MA2AXI_REQCTLPARERR(1U)
+
+#define S_MA_RSPPERR 6
+#define V_MA_RSPPERR(x) ((x) << S_MA_RSPPERR)
+#define F_MA_RSPPERR V_MA_RSPPERR(1U)
+
+#define S_PCIE2MA_REQCTLPARERR 5
+#define V_PCIE2MA_REQCTLPARERR(x) ((x) << S_PCIE2MA_REQCTLPARERR)
+#define F_PCIE2MA_REQCTLPARERR V_PCIE2MA_REQCTLPARERR(1U)
+
+#define S_PCIE2MA_REQDATAPARERR 4
+#define V_PCIE2MA_REQDATAPARERR(x) ((x) << S_PCIE2MA_REQDATAPARERR)
+#define F_PCIE2MA_REQDATAPARERR V_PCIE2MA_REQDATAPARERR(1U)
+
+#define S_INIC2MA_REQCTLPARERR 3
+#define V_INIC2MA_REQCTLPARERR(x) ((x) << S_INIC2MA_REQCTLPARERR)
+#define F_INIC2MA_REQCTLPARERR V_INIC2MA_REQCTLPARERR(1U)
+
+#define S_INIC2MA_REQDATAPARERR 2
+#define V_INIC2MA_REQDATAPARERR(x) ((x) << S_INIC2MA_REQDATAPARERR)
+#define F_INIC2MA_REQDATAPARERR V_INIC2MA_REQDATAPARERR(1U)
+
+#define S_MA_RSPUE 1
+#define V_MA_RSPUE(x) ((x) << S_MA_RSPUE)
+#define F_MA_RSPUE V_MA_RSPUE(1U)
+
+#define S_APB2PL_RSPDATAPERR 0
+#define V_APB2PL_RSPDATAPERR(x) ((x) << S_APB2PL_RSPDATAPERR)
+#define F_APB2PL_RSPDATAPERR V_APB2PL_RSPDATAPERR(1U)
+
+#define A_ARM_PERR_INT_ENB0 0x47174
+#define A_ARM_SCRATCH_PAD3 0x47178
+
+#define S_ECO_43187 31
+#define V_ECO_43187(x) ((x) << S_ECO_43187)
+#define F_ECO_43187 V_ECO_43187(1U)
+
+#define S_TIMER_SEL 28
+#define M_TIMER_SEL 0x7U
+#define V_TIMER_SEL(x) ((x) << S_TIMER_SEL)
+#define G_TIMER_SEL(x) (((x) >> S_TIMER_SEL) & M_TIMER_SEL)
+
+#define S_TIMER 4
+#define M_TIMER 0xffffffU
+#define V_TIMER(x) ((x) << S_TIMER)
+#define G_TIMER(x) (((x) >> S_TIMER) & M_TIMER)
+
+#define S_T7_1_INT 0
+#define M_T7_1_INT 0x3U
+#define V_T7_1_INT(x) ((x) << S_T7_1_INT)
+#define G_T7_1_INT(x) (((x) >> S_T7_1_INT) & M_T7_1_INT)
+
+#define A_ARM_PERR_INT_CAUSE2 0x4717c
+
+#define S_INIC_WSTRB_FIFO_PERR 31
+#define V_INIC_WSTRB_FIFO_PERR(x) ((x) << S_INIC_WSTRB_FIFO_PERR)
+#define F_INIC_WSTRB_FIFO_PERR V_INIC_WSTRB_FIFO_PERR(1U)
+
+#define S_INIC_BID_FIFO_PERR 30
+#define V_INIC_BID_FIFO_PERR(x) ((x) << S_INIC_BID_FIFO_PERR)
+#define F_INIC_BID_FIFO_PERR V_INIC_BID_FIFO_PERR(1U)
+
+#define S_CC_SRAM_PKA_PERR 29
+#define V_CC_SRAM_PKA_PERR(x) ((x) << S_CC_SRAM_PKA_PERR)
+#define F_CC_SRAM_PKA_PERR V_CC_SRAM_PKA_PERR(1U)
+
+#define S_CC_SRAM_SEC_PERR 28
+#define V_CC_SRAM_SEC_PERR(x) ((x) << S_CC_SRAM_SEC_PERR)
+#define F_CC_SRAM_SEC_PERR V_CC_SRAM_SEC_PERR(1U)
+
+#define S_MESS2AXI4_PARERR 27
+#define V_MESS2AXI4_PARERR(x) ((x) << S_MESS2AXI4_PARERR)
+#define F_MESS2AXI4_PARERR V_MESS2AXI4_PARERR(1U)
+
+#define S_CCI2INIC_INTF_PARERR 26
+#define V_CCI2INIC_INTF_PARERR(x) ((x) << S_CCI2INIC_INTF_PARERR)
+#define F_CCI2INIC_INTF_PARERR V_CCI2INIC_INTF_PARERR(1U)
+
+#define A_ARM_MA2AXI_AW_ATTR 0x47180
+
+#define S_AWLOCKR1 29
+#define V_AWLOCKR1(x) ((x) << S_AWLOCKR1)
+#define F_AWLOCKR1 V_AWLOCKR1(1U)
+
+#define S_AWCACHER1 25
+#define M_AWCACHER1 0xfU
+#define V_AWCACHER1(x) ((x) << S_AWCACHER1)
+#define G_AWCACHER1(x) (((x) >> S_AWCACHER1) & M_AWCACHER1)
+
+#define S_AWPROTR1 21
+#define M_AWPROTR1 0xfU
+#define V_AWPROTR1(x) ((x) << S_AWPROTR1)
+#define G_AWPROTR1(x) (((x) >> S_AWPROTR1) & M_AWPROTR1)
+
+#define S_AWSNOOPR1 18
+#define M_AWSNOOPR1 0x7U
+#define V_AWSNOOPR1(x) ((x) << S_AWSNOOPR1)
+#define G_AWSNOOPR1(x) (((x) >> S_AWSNOOPR1) & M_AWSNOOPR1)
+
+#define S_AWDOMAINR1 16
+#define M_AWDOMAINR1 0x3U
+#define V_AWDOMAINR1(x) ((x) << S_AWDOMAINR1)
+#define G_AWDOMAINR1(x) (((x) >> S_AWDOMAINR1) & M_AWDOMAINR1)
+
+#define S_AWLOCKR0 13
+#define V_AWLOCKR0(x) ((x) << S_AWLOCKR0)
+#define F_AWLOCKR0 V_AWLOCKR0(1U)
+
+#define S_AWCACHER0 9
+#define M_AWCACHER0 0xfU
+#define V_AWCACHER0(x) ((x) << S_AWCACHER0)
+#define G_AWCACHER0(x) (((x) >> S_AWCACHER0) & M_AWCACHER0)
+
+#define S_AWPROTR0 5
+#define M_AWPROTR0 0xfU
+#define V_AWPROTR0(x) ((x) << S_AWPROTR0)
+#define G_AWPROTR0(x) (((x) >> S_AWPROTR0) & M_AWPROTR0)
+
+#define S_AWSNOOPR0 2
+#define M_AWSNOOPR0 0x7U
+#define V_AWSNOOPR0(x) ((x) << S_AWSNOOPR0)
+#define G_AWSNOOPR0(x) (((x) >> S_AWSNOOPR0) & M_AWSNOOPR0)
+
+#define S_AWDOMAINR0 0
+#define M_AWDOMAINR0 0x3U
+#define V_AWDOMAINR0(x) ((x) << S_AWDOMAINR0)
+#define G_AWDOMAINR0(x) (((x) >> S_AWDOMAINR0) & M_AWDOMAINR0)
+
+#define A_ARM_MA2AXI_AR_ATTR 0x47184
+
+#define S_ARLOCKR1 29
+#define V_ARLOCKR1(x) ((x) << S_ARLOCKR1)
+#define F_ARLOCKR1 V_ARLOCKR1(1U)
+
+#define S_ARCACHER1 25
+#define M_ARCACHER1 0xfU
+#define V_ARCACHER1(x) ((x) << S_ARCACHER1)
+#define G_ARCACHER1(x) (((x) >> S_ARCACHER1) & M_ARCACHER1)
+
+#define S_ARPROTR1 21
+#define M_ARPROTR1 0xfU
+#define V_ARPROTR1(x) ((x) << S_ARPROTR1)
+#define G_ARPROTR1(x) (((x) >> S_ARPROTR1) & M_ARPROTR1)
+
+#define S_ARSNOOPR1 18
+#define M_ARSNOOPR1 0x7U
+#define V_ARSNOOPR1(x) ((x) << S_ARSNOOPR1)
+#define G_ARSNOOPR1(x) (((x) >> S_ARSNOOPR1) & M_ARSNOOPR1)
+
+#define S_ARDOMAINR1 16
+#define M_ARDOMAINR1 0x3U
+#define V_ARDOMAINR1(x) ((x) << S_ARDOMAINR1)
+#define G_ARDOMAINR1(x) (((x) >> S_ARDOMAINR1) & M_ARDOMAINR1)
+
+#define S_ARLOCKR0 13
+#define V_ARLOCKR0(x) ((x) << S_ARLOCKR0)
+#define F_ARLOCKR0 V_ARLOCKR0(1U)
+
+#define S_ARCACHER0 9
+#define M_ARCACHER0 0xfU
+#define V_ARCACHER0(x) ((x) << S_ARCACHER0)
+#define G_ARCACHER0(x) (((x) >> S_ARCACHER0) & M_ARCACHER0)
+
+#define S_ARPROTR0 5
+#define M_ARPROTR0 0xfU
+#define V_ARPROTR0(x) ((x) << S_ARPROTR0)
+#define G_ARPROTR0(x) (((x) >> S_ARPROTR0) & M_ARPROTR0)
+
+#define S_ARSNOOPR0 2
+#define M_ARSNOOPR0 0x7U
+#define V_ARSNOOPR0(x) ((x) << S_ARSNOOPR0)
+#define G_ARSNOOPR0(x) (((x) >> S_ARSNOOPR0) & M_ARSNOOPR0)
+
+#define S_ARDOMAINR0 0
+#define M_ARDOMAINR0 0x3U
+#define V_ARDOMAINR0(x) ((x) << S_ARDOMAINR0)
+#define G_ARDOMAINR0(x) (((x) >> S_ARDOMAINR0) & M_ARDOMAINR0)
+
+#define A_ARM_MA2AXI_SNOOP_RGN 0x47188
+
+#define S_SNOOP_END 16
+#define M_SNOOP_END 0xffffU
+#define V_SNOOP_END(x) ((x) << S_SNOOP_END)
+#define G_SNOOP_END(x) (((x) >> S_SNOOP_END) & M_SNOOP_END)
+
+#define S_SNOOP_START 0
+#define M_SNOOP_START 0xffffU
+#define V_SNOOP_START(x) ((x) << S_SNOOP_START)
+#define G_SNOOP_START(x) (((x) >> S_SNOOP_START) & M_SNOOP_START)
+
+#define A_ARM_PERIPHERAL_INT_CAUSE 0x4718c
+
+#define S_TIMER_INT 5
+#define V_TIMER_INT(x) ((x) << S_TIMER_INT)
+#define F_TIMER_INT V_TIMER_INT(1U)
+
+#define S_NVME_INT 4
+#define V_NVME_INT(x) ((x) << S_NVME_INT)
+#define F_NVME_INT V_NVME_INT(1U)
+
+#define S_EMMC_WAKEUP_INT 3
+#define V_EMMC_WAKEUP_INT(x) ((x) << S_EMMC_WAKEUP_INT)
+#define F_EMMC_WAKEUP_INT V_EMMC_WAKEUP_INT(1U)
+
+#define S_EMMC_INT 2
+#define V_EMMC_INT(x) ((x) << S_EMMC_INT)
+#define F_EMMC_INT V_EMMC_INT(1U)
+
+#define S_USB_MC_INT 1
+#define V_USB_MC_INT(x) ((x) << S_USB_MC_INT)
+#define F_USB_MC_INT V_USB_MC_INT(1U)
+
+#define S_USB_DMA_INT 0
+#define V_USB_DMA_INT(x) ((x) << S_USB_DMA_INT)
+#define F_USB_DMA_INT V_USB_DMA_INT(1U)
+
+#define A_ARM_SCRATCH_PAD4 0x47190
+
+#define S_PAD4 15
+#define M_PAD4 0x1ffffU
+#define V_PAD4(x) ((x) << S_PAD4)
+#define G_PAD4(x) (((x) >> S_PAD4) & M_PAD4)
+
+#define S_ARM_DB_CNT 0
+#define M_ARM_DB_CNT 0x7fffU
+#define V_ARM_DB_CNT(x) ((x) << S_ARM_DB_CNT)
+#define G_ARM_DB_CNT(x) (((x) >> S_ARM_DB_CNT) & M_ARM_DB_CNT)
+
+#define A_ARM_SCRATCH_PAD5 0x47194
+#define A_ARM_SCRATCH_PAD6 0x47198
+#define A_ARM_SCRATCH_PAD7 0x4719c
+#define A_ARM_NVME_DB_EMU_INDEX 0x471a0
+#define A_ARM_NVME_DB_EMU_REGION_CTL 0x471a4
+
+#define S_WINDOW_EN 4
+#define V_WINDOW_EN(x) ((x) << S_WINDOW_EN)
+#define F_WINDOW_EN V_WINDOW_EN(1U)
+
+#define S_RGN2_INT_EN 3
+#define V_RGN2_INT_EN(x) ((x) << S_RGN2_INT_EN)
+#define F_RGN2_INT_EN V_RGN2_INT_EN(1U)
+
+#define S_RGN1_INT_EN 2
+#define V_RGN1_INT_EN(x) ((x) << S_RGN1_INT_EN)
+#define F_RGN1_INT_EN V_RGN1_INT_EN(1U)
+
+#define S_QUEUE_EN 1
+#define V_QUEUE_EN(x) ((x) << S_QUEUE_EN)
+#define F_QUEUE_EN V_QUEUE_EN(1U)
+
+#define S_RGN0_INT_EN 0
+#define V_RGN0_INT_EN(x) ((x) << S_RGN0_INT_EN)
+#define F_RGN0_INT_EN V_RGN0_INT_EN(1U)
+
+#define A_ARM_NVME_DB_EMU_DEVICE_CTL 0x471a8
+
+#define S_DEVICE_SIZE 8
+#define M_DEVICE_SIZE 0xfU
+#define V_DEVICE_SIZE(x) ((x) << S_DEVICE_SIZE)
+#define G_DEVICE_SIZE(x) (((x) >> S_DEVICE_SIZE) & M_DEVICE_SIZE)
+
+#define S_RGN1_SIZE 4
+#define M_RGN1_SIZE 0xfU
+#define V_RGN1_SIZE(x) ((x) << S_RGN1_SIZE)
+#define G_RGN1_SIZE(x) (((x) >> S_RGN1_SIZE) & M_RGN1_SIZE)
+
+#define S_RGN0_SIZE 0
+#define M_RGN0_SIZE 0xfU
+#define V_RGN0_SIZE(x) ((x) << S_RGN0_SIZE)
+#define G_RGN0_SIZE(x) (((x) >> S_RGN0_SIZE) & M_RGN0_SIZE)
+
+#define A_ARM_NVME_DB_EMU_WINDOW_START_ADDR 0x471b0
+
+#define S_T7_4_ADDR 0
+#define M_T7_4_ADDR 0xfffffffU
+#define V_T7_4_ADDR(x) ((x) << S_T7_4_ADDR)
+#define G_T7_4_ADDR(x) (((x) >> S_T7_4_ADDR) & M_T7_4_ADDR)
+
+#define A_ARM_NVME_DB_EMU_WINDOW_END_ADDR 0x471b4
+#define A_ARM_NVME_DB_EMU_QBASE_ADDR 0x471b8
+#define A_ARM_NVME_DB_EMU_QUEUE_CID 0x471bc
+
+#define S_T7_CID 0
+#define M_T7_CID 0x1ffffU
+#define V_T7_CID(x) ((x) << S_T7_CID)
+#define G_T7_CID(x) (((x) >> S_T7_CID) & M_T7_CID)
+
+#define A_ARM_NVME_DB_EMU_QUEUE_CTL 0x471c0
+
+#define S_INT_EN 27
+#define V_INT_EN(x) ((x) << S_INT_EN)
+#define F_INT_EN V_INT_EN(1U)
+
+#define S_THRESHOLD 10
+#define M_THRESHOLD 0x1ffffU
+#define V_THRESHOLD(x) ((x) << S_THRESHOLD)
+#define G_THRESHOLD(x) (((x) >> S_THRESHOLD) & M_THRESHOLD)
+
+#define S_T7_1_SIZE 0
+#define M_T7_1_SIZE 0x3ffU
+#define V_T7_1_SIZE(x) ((x) << S_T7_1_SIZE)
+#define G_T7_1_SIZE(x) (((x) >> S_T7_1_SIZE) & M_T7_1_SIZE)
+
+#define A_ARM_NVME_DB_EMU_MSIX_ADDR_L 0x471c4
+#define A_ARM_NVME_DB_EMU_MSIX_ADDR_H 0x471c8
+#define A_ARM_NVME_DB_EMU_MSIX_OFFSET 0x471cc
+#define A_ARM_NVME_DB_EMU_QUEUE_MSIX_ADDR_L 0x471d0
+#define A_ARM_NVME_DB_EMU_QUEUE_MSIX_ADDR_H 0x471d4
+#define A_ARM_NVME_DB_EMU_QUEUE_MSIX_OFFSET 0x471d8
+#define A_ARM_CERR_INT_CAUSE0 0x471dc
+
+#define S_WRDATA_FIFO0_CERR 31
+#define V_WRDATA_FIFO0_CERR(x) ((x) << S_WRDATA_FIFO0_CERR)
+#define F_WRDATA_FIFO0_CERR V_WRDATA_FIFO0_CERR(1U)
+
+#define S_WRDATA_FIFO1_CERR 30
+#define V_WRDATA_FIFO1_CERR(x) ((x) << S_WRDATA_FIFO1_CERR)
+#define F_WRDATA_FIFO1_CERR V_WRDATA_FIFO1_CERR(1U)
+
+#define S_WR512DATAFIFO0_CERR 29
+#define V_WR512DATAFIFO0_CERR(x) ((x) << S_WR512DATAFIFO0_CERR)
+#define F_WR512DATAFIFO0_CERR V_WR512DATAFIFO0_CERR(1U)
+
+#define S_WR512DATAFIFO1_CERR 28
+#define V_WR512DATAFIFO1_CERR(x) ((x) << S_WR512DATAFIFO1_CERR)
+#define F_WR512DATAFIFO1_CERR V_WR512DATAFIFO1_CERR(1U)
+
+#define S_RDATAFIFO0_CERR 27
+#define V_RDATAFIFO0_CERR(x) ((x) << S_RDATAFIFO0_CERR)
+#define F_RDATAFIFO0_CERR V_RDATAFIFO0_CERR(1U)
+
+#define S_RDATAFIFO1_CERR 26
+#define V_RDATAFIFO1_CERR(x) ((x) << S_RDATAFIFO1_CERR)
+#define F_RDATAFIFO1_CERR V_RDATAFIFO1_CERR(1U)
+
+#define S_ROBUFF_CORERR0 25
+#define V_ROBUFF_CORERR0(x) ((x) << S_ROBUFF_CORERR0)
+#define F_ROBUFF_CORERR0 V_ROBUFF_CORERR0(1U)
+
+#define S_ROBUFF_CORERR1 24
+#define V_ROBUFF_CORERR1(x) ((x) << S_ROBUFF_CORERR1)
+#define F_ROBUFF_CORERR1 V_ROBUFF_CORERR1(1U)
+
+#define S_ROBUFF_CORERR2 23
+#define V_ROBUFF_CORERR2(x) ((x) << S_ROBUFF_CORERR2)
+#define F_ROBUFF_CORERR2 V_ROBUFF_CORERR2(1U)
+
+#define S_ROBUFF_CORERR3 22
+#define V_ROBUFF_CORERR3(x) ((x) << S_ROBUFF_CORERR3)
+#define F_ROBUFF_CORERR3 V_ROBUFF_CORERR3(1U)
+
+#define S_MA2AXI_RSPDATACORERR 21
+#define V_MA2AXI_RSPDATACORERR(x) ((x) << S_MA2AXI_RSPDATACORERR)
+#define F_MA2AXI_RSPDATACORERR V_MA2AXI_RSPDATACORERR(1U)
+
+#define S_RC_SRAM_CERR 19
+#define M_RC_SRAM_CERR 0x3U
+#define V_RC_SRAM_CERR(x) ((x) << S_RC_SRAM_CERR)
+#define G_RC_SRAM_CERR(x) (((x) >> S_RC_SRAM_CERR) & M_RC_SRAM_CERR)
+
+#define S_RC_WFIFO_OUTCERR 18
+#define V_RC_WFIFO_OUTCERR(x) ((x) << S_RC_WFIFO_OUTCERR)
+#define F_RC_WFIFO_OUTCERR V_RC_WFIFO_OUTCERR(1U)
+
+#define S_RC_RSPFIFO_CERR 17
+#define V_RC_RSPFIFO_CERR(x) ((x) << S_RC_RSPFIFO_CERR)
+#define F_RC_RSPFIFO_CERR V_RC_RSPFIFO_CERR(1U)
+
+#define S_MSI_MEM_CERR 16
+#define V_MSI_MEM_CERR(x) ((x) << S_MSI_MEM_CERR)
+#define F_MSI_MEM_CERR V_MSI_MEM_CERR(1U)
+
+#define S_INIC_WRDATA_FIFO_CERR 15
+#define V_INIC_WRDATA_FIFO_CERR(x) ((x) << S_INIC_WRDATA_FIFO_CERR)
+#define F_INIC_WRDATA_FIFO_CERR V_INIC_WRDATA_FIFO_CERR(1U)
+
+#define S_INIC_RDATAFIFO_CERR 14
+#define V_INIC_RDATAFIFO_CERR(x) ((x) << S_INIC_RDATAFIFO_CERR)
+#define F_INIC_RDATAFIFO_CERR V_INIC_RDATAFIFO_CERR(1U)
+
+#define S_ARM_DB_SRAM_CERR 12
+#define M_ARM_DB_SRAM_CERR 0x3U
+#define V_ARM_DB_SRAM_CERR(x) ((x) << S_ARM_DB_SRAM_CERR)
+#define G_ARM_DB_SRAM_CERR(x) (((x) >> S_ARM_DB_SRAM_CERR) & M_ARM_DB_SRAM_CERR)
+
+#define S_ICB_RAM_CERR 11
+#define V_ICB_RAM_CERR(x) ((x) << S_ICB_RAM_CERR)
+#define F_ICB_RAM_CERR V_ICB_RAM_CERR(1U)
+
+#define S_CC_SRAM_PKA_CERR 10
+#define V_CC_SRAM_PKA_CERR(x) ((x) << S_CC_SRAM_PKA_CERR)
+#define F_CC_SRAM_PKA_CERR V_CC_SRAM_PKA_CERR(1U)
+
+#define S_CC_SRAM_SEC_CERR 9
+#define V_CC_SRAM_SEC_CERR(x) ((x) << S_CC_SRAM_SEC_CERR)
+#define F_CC_SRAM_SEC_CERR V_CC_SRAM_SEC_CERR(1U)
+
+#define A_ARM_NVME_DB_EMU_QUEUE_CTL_2 0x471e0
+
+#define S_INTERRUPT_CLEAR 0
+#define V_INTERRUPT_CLEAR(x) ((x) << S_INTERRUPT_CLEAR)
+#define F_INTERRUPT_CLEAR V_INTERRUPT_CLEAR(1U)
+
+#define A_ARM_PERIPHERAL_INT_ENB 0x471e4
+#define A_ARM_CERR_INT_ENB0 0x471e8
+#define A_ARM_CPU_DBG_ROM_ADDR0 0x47200
+
+#define S_CPUDBGROMADDR0 0
+#define M_CPUDBGROMADDR0 0xfffffU
+#define V_CPUDBGROMADDR0(x) ((x) << S_CPUDBGROMADDR0)
+#define G_CPUDBGROMADDR0(x) (((x) >> S_CPUDBGROMADDR0) & M_CPUDBGROMADDR0)
+
+#define A_ARM_CPU_DBG_ROM_ADDR1 0x47204
+
+#define S_CPUDBGROMADDR1 0
+#define M_CPUDBGROMADDR1 0x3ffU
+#define V_CPUDBGROMADDR1(x) ((x) << S_CPUDBGROMADDR1)
+#define G_CPUDBGROMADDR1(x) (((x) >> S_CPUDBGROMADDR1) & M_CPUDBGROMADDR1)
+
+#define A_ARM_CPU_DBG_ROM_ADDR_VALID 0x47208
+
+#define S_CPUDBGROMADDRVALID 0
+#define V_CPUDBGROMADDRVALID(x) ((x) << S_CPUDBGROMADDRVALID)
+#define F_CPUDBGROMADDRVALID V_CPUDBGROMADDRVALID(1U)
+
+#define A_ARM_PERR_ENABLE0 0x4720c
+#define A_ARM_SRAM2_WRITE_DATA3 0x47210
+#define A_ARM_SRAM2_READ_DATA3 0x4721c
+#define A_ARM_CPU_DFT_CFG 0x47220
+
+#define S_CPUMBISTREQ 11
+#define V_CPUMBISTREQ(x) ((x) << S_CPUMBISTREQ)
+#define F_CPUMBISTREQ V_CPUMBISTREQ(1U)
+
+#define S_CPUMBISTRSTN 10
+#define V_CPUMBISTRSTN(x) ((x) << S_CPUMBISTRSTN)
+#define F_CPUMBISTRSTN V_CPUMBISTRSTN(1U)
+
+#define S_CPUDFTDFTSE 9
+#define V_CPUDFTDFTSE(x) ((x) << S_CPUDFTDFTSE)
+#define F_CPUDFTDFTSE V_CPUDFTDFTSE(1U)
+
+#define S_CPUDFTRSTDISABLE 8
+#define V_CPUDFTRSTDISABLE(x) ((x) << S_CPUDFTRSTDISABLE)
+#define F_CPUDFTRSTDISABLE V_CPUDFTRSTDISABLE(1U)
+
+#define S_CPUDFTRAMDISABLE 7
+#define V_CPUDFTRAMDISABLE(x) ((x) << S_CPUDFTRAMDISABLE)
+#define F_CPUDFTRAMDISABLE V_CPUDFTRAMDISABLE(1U)
+
+#define S_CPUDFTMCPDISABLE 6
+#define V_CPUDFTMCPDISABLE(x) ((x) << S_CPUDFTMCPDISABLE)
+#define F_CPUDFTMCPDISABLE V_CPUDFTMCPDISABLE(1U)
+
+#define S_CPUDFTL2CLKDISABLE 5
+#define V_CPUDFTL2CLKDISABLE(x) ((x) << S_CPUDFTL2CLKDISABLE)
+#define F_CPUDFTL2CLKDISABLE V_CPUDFTL2CLKDISABLE(1U)
+
+#define S_CPUDFTCLKDISABLE3 4
+#define V_CPUDFTCLKDISABLE3(x) ((x) << S_CPUDFTCLKDISABLE3)
+#define F_CPUDFTCLKDISABLE3 V_CPUDFTCLKDISABLE3(1U)
+
+#define S_CPUDFTCLKDISABLE2 3
+#define V_CPUDFTCLKDISABLE2(x) ((x) << S_CPUDFTCLKDISABLE2)
+#define F_CPUDFTCLKDISABLE2 V_CPUDFTCLKDISABLE2(1U)
+
+#define S_CPUDFTCLKDISABLE1 2
+#define V_CPUDFTCLKDISABLE1(x) ((x) << S_CPUDFTCLKDISABLE1)
+#define F_CPUDFTCLKDISABLE1 V_CPUDFTCLKDISABLE1(1U)
+
+#define S_CPUDFTCLKDISABLE0 1
+#define V_CPUDFTCLKDISABLE0(x) ((x) << S_CPUDFTCLKDISABLE0)
+#define F_CPUDFTCLKDISABLE0 V_CPUDFTCLKDISABLE0(1U)
+
+#define S_CPUDFTCLKBYPASS 0
+#define V_CPUDFTCLKBYPASS(x) ((x) << S_CPUDFTCLKBYPASS)
+#define F_CPUDFTCLKBYPASS V_CPUDFTCLKBYPASS(1U)
+
+#define A_ARM_APB_CFG 0x47224
+
+#define S_APB_CFG 0
+#define M_APB_CFG 0x3ffffU
+#define V_APB_CFG(x) ((x) << S_APB_CFG)
+#define G_APB_CFG(x) (((x) >> S_APB_CFG) & M_APB_CFG)
+
+#define A_ARM_EMMC_BUFS 0x47228
+
+#define S_EMMC_BUFS_OEN 2
+#define M_EMMC_BUFS_OEN 0x3U
+#define V_EMMC_BUFS_OEN(x) ((x) << S_EMMC_BUFS_OEN)
+#define G_EMMC_BUFS_OEN(x) (((x) >> S_EMMC_BUFS_OEN) & M_EMMC_BUFS_OEN)
+
+#define S_EMMC_BUFS_I 0
+#define M_EMMC_BUFS_I 0x3U
+#define V_EMMC_BUFS_I(x) ((x) << S_EMMC_BUFS_I)
+#define G_EMMC_BUFS_I(x) (((x) >> S_EMMC_BUFS_I) & M_EMMC_BUFS_I)
+
+#define A_ARM_SWP_EN 0x4722c
+#define A_ARM_ADB_PWR_DWN_REQ_N 0x47230
+
+#define S_ADBPWRDWNREQN 0
+#define V_ADBPWRDWNREQN(x) ((x) << S_ADBPWRDWNREQN)
+#define F_ADBPWRDWNREQN V_ADBPWRDWNREQN(1U)
+
+#define A_ARM_GIC_USER 0x47238
+
+#define S_CPU_GIC_USER 0
+#define M_CPU_GIC_USER 0x7fU
+#define V_CPU_GIC_USER(x) ((x) << S_CPU_GIC_USER)
+#define G_CPU_GIC_USER(x) (((x) >> S_CPU_GIC_USER) & M_CPU_GIC_USER)
+
+#define A_ARM_DBPROC_SRAM_TH_ADDR 0x47240
+
+#define S_DBPROC_TH_ADDR 0
+#define M_DBPROC_TH_ADDR 0x1ffU
+#define V_DBPROC_TH_ADDR(x) ((x) << S_DBPROC_TH_ADDR)
+#define G_DBPROC_TH_ADDR(x) (((x) >> S_DBPROC_TH_ADDR) & M_DBPROC_TH_ADDR)
+
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA0 0x47244
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA1 0x47248
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA2 0x4724c
+#define A_ARM_DBPROC_SRAM_TH_READ_DATA3 0x47250
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA0 0x47254
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA1 0x47258
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA2 0x4725c
+#define A_ARM_DBPROC_SRAM_TH_WR_DATA3 0x47260
+#define A_ARM_SWP_EN_2 0x47264
+
+#define S_SWP_EN_2 0
+#define M_SWP_EN_2 0x3U
+#define V_SWP_EN_2(x) ((x) << S_SWP_EN_2)
+#define G_SWP_EN_2(x) (((x) >> S_SWP_EN_2) & M_SWP_EN_2)
+
+#define A_ARM_GIC_ERR 0x47268
+
+#define S_ECC_FATAL 1
+#define V_ECC_FATAL(x) ((x) << S_ECC_FATAL)
+#define F_ECC_FATAL V_ECC_FATAL(1U)
+
+#define S_AXIM_ERR 0
+#define V_AXIM_ERR(x) ((x) << S_AXIM_ERR)
+#define F_AXIM_ERR V_AXIM_ERR(1U)
+
+#define A_ARM_CPU_STAT 0x4726c
+
+#define S_CPU_L2_QACTIVE 12
+#define V_CPU_L2_QACTIVE(x) ((x) << S_CPU_L2_QACTIVE)
+#define F_CPU_L2_QACTIVE V_CPU_L2_QACTIVE(1U)
+
+#define S_WAKEUPM_O_ADB 11
+#define V_WAKEUPM_O_ADB(x) ((x) << S_WAKEUPM_O_ADB)
+#define F_WAKEUPM_O_ADB V_WAKEUPM_O_ADB(1U)
+
+#define S_PWRQACTIVEM_ADB 10
+#define V_PWRQACTIVEM_ADB(x) ((x) << S_PWRQACTIVEM_ADB)
+#define F_PWRQACTIVEM_ADB V_PWRQACTIVEM_ADB(1U)
+
+#define S_CLKQACTIVEM_ADB 9
+#define V_CLKQACTIVEM_ADB(x) ((x) << S_CLKQACTIVEM_ADB)
+#define F_CLKQACTIVEM_ADB V_CLKQACTIVEM_ADB(1U)
+
+#define S_CLKQDENYM_ADB 8
+#define V_CLKQDENYM_ADB(x) ((x) << S_CLKQDENYM_ADB)
+#define F_CLKQDENYM_ADB V_CLKQDENYM_ADB(1U)
+
+#define S_CLKQACCEPTNM_ADB 7
+#define V_CLKQACCEPTNM_ADB(x) ((x) << S_CLKQACCEPTNM_ADB)
+#define F_CLKQACCEPTNM_ADB V_CLKQACCEPTNM_ADB(1U)
+
+#define S_WAKEUPS_O_ADB 6
+#define V_WAKEUPS_O_ADB(x) ((x) << S_WAKEUPS_O_ADB)
+#define F_WAKEUPS_O_ADB V_WAKEUPS_O_ADB(1U)
+
+#define S_PWRQACTIVES_ADB 5
+#define V_PWRQACTIVES_ADB(x) ((x) << S_PWRQACTIVES_ADB)
+#define F_PWRQACTIVES_ADB V_PWRQACTIVES_ADB(1U)
+
+#define S_CLKQACTIVES_ADB 4
+#define V_CLKQACTIVES_ADB(x) ((x) << S_CLKQACTIVES_ADB)
+#define F_CLKQACTIVES_ADB V_CLKQACTIVES_ADB(1U)
+
+#define S_CLKQDENYS_ADB 3
+#define V_CLKQDENYS_ADB(x) ((x) << S_CLKQDENYS_ADB)
+#define F_CLKQDENYS_ADB V_CLKQDENYS_ADB(1U)
+
+#define S_CLKQACCEPTNS_ADB 2
+#define V_CLKQACCEPTNS_ADB(x) ((x) << S_CLKQACCEPTNS_ADB)
+#define F_CLKQACCEPTNS_ADB V_CLKQACCEPTNS_ADB(1U)
+
+#define S_PWRQDENYS_ADB 1
+#define V_PWRQDENYS_ADB(x) ((x) << S_PWRQDENYS_ADB)
+#define F_PWRQDENYS_ADB V_PWRQDENYS_ADB(1U)
+
+#define S_PWRQACCEPTNS_ADB 0
+#define V_PWRQACCEPTNS_ADB(x) ((x) << S_PWRQACCEPTNS_ADB)
+#define F_PWRQACCEPTNS_ADB V_PWRQACCEPTNS_ADB(1U)
+
+#define A_ARM_DEBUG_INT_WRITE_DATA 0x47270
+
+#define S_DEBUG_INT_WRITE_DATA 0
+#define M_DEBUG_INT_WRITE_DATA 0xfffU
+#define V_DEBUG_INT_WRITE_DATA(x) ((x) << S_DEBUG_INT_WRITE_DATA)
+#define G_DEBUG_INT_WRITE_DATA(x) (((x) >> S_DEBUG_INT_WRITE_DATA) & M_DEBUG_INT_WRITE_DATA)
+
+#define A_ARM_DEBUG_INT_STAT 0x47274
+
+#define S_DEBUG_INT_STATUS_REG 0
+#define M_DEBUG_INT_STATUS_REG 0xfffU
+#define V_DEBUG_INT_STATUS_REG(x) ((x) << S_DEBUG_INT_STATUS_REG)
+#define G_DEBUG_INT_STATUS_REG(x) (((x) >> S_DEBUG_INT_STATUS_REG) & M_DEBUG_INT_STATUS_REG)
+
+#define A_ARM_DEBUG_STAT 0x47278
+
+#define S_ARM_DEBUG_STAT 0
+#define M_ARM_DEBUG_STAT 0x3fffU
+#define V_ARM_DEBUG_STAT(x) ((x) << S_ARM_DEBUG_STAT)
+#define G_ARM_DEBUG_STAT(x) (((x) >> S_ARM_DEBUG_STAT) & M_ARM_DEBUG_STAT)
+
+#define A_ARM_SIZE_STAT 0x4727c
+
+#define S_ARM_SIZE_STAT 0
+#define M_ARM_SIZE_STAT 0x3fffffffU
+#define V_ARM_SIZE_STAT(x) ((x) << S_ARM_SIZE_STAT)
+#define G_ARM_SIZE_STAT(x) (((x) >> S_ARM_SIZE_STAT) & M_ARM_SIZE_STAT)
+
+#define A_ARM_CCI_CFG0 0x47280
+
+#define S_CCIBROADCASTCACHEMAINT 28
+#define M_CCIBROADCASTCACHEMAINT 0x7U
+#define V_CCIBROADCASTCACHEMAINT(x) ((x) << S_CCIBROADCASTCACHEMAINT)
+#define G_CCIBROADCASTCACHEMAINT(x) (((x) >> S_CCIBROADCASTCACHEMAINT) & M_CCIBROADCASTCACHEMAINT)
+
+#define S_CCISTRIPINGGRANULE 25
+#define M_CCISTRIPINGGRANULE 0x7U
+#define V_CCISTRIPINGGRANULE(x) ((x) << S_CCISTRIPINGGRANULE)
+#define G_CCISTRIPINGGRANULE(x) (((x) >> S_CCISTRIPINGGRANULE) & M_CCISTRIPINGGRANULE)
+
+#define S_CCIPERIPHBASE 0
+#define M_CCIPERIPHBASE 0x1ffffffU
+#define V_CCIPERIPHBASE(x) ((x) << S_CCIPERIPHBASE)
+#define G_CCIPERIPHBASE(x) (((x) >> S_CCIPERIPHBASE) & M_CCIPERIPHBASE)
+
+#define A_ARM_CCI_CFG1 0x47284
+
+#define S_CCIDFTRSTDISABLE 18
+#define V_CCIDFTRSTDISABLE(x) ((x) << S_CCIDFTRSTDISABLE)
+#define F_CCIDFTRSTDISABLE V_CCIDFTRSTDISABLE(1U)
+
+#define S_CCISPNIDEN 17
+#define V_CCISPNIDEN(x) ((x) << S_CCISPNIDEN)
+#define F_CCISPNIDEN V_CCISPNIDEN(1U)
+
+#define S_CCINIDEN 16
+#define V_CCINIDEN(x) ((x) << S_CCINIDEN)
+#define F_CCINIDEN V_CCINIDEN(1U)
+
+#define S_CCIACCHANNELN 11
+#define M_CCIACCHANNELN 0x1fU
+#define V_CCIACCHANNELN(x) ((x) << S_CCIACCHANNELN)
+#define G_CCIACCHANNELN(x) (((x) >> S_CCIACCHANNELN) & M_CCIACCHANNELN)
+
+#define S_CCIQOSOVERRIDE 6
+#define M_CCIQOSOVERRIDE 0x1fU
+#define V_CCIQOSOVERRIDE(x) ((x) << S_CCIQOSOVERRIDE)
+#define G_CCIQOSOVERRIDE(x) (((x) >> S_CCIQOSOVERRIDE) & M_CCIQOSOVERRIDE)
+
+#define S_CCIBUFFERABLEOVERRIDE 3
+#define M_CCIBUFFERABLEOVERRIDE 0x7U
+#define V_CCIBUFFERABLEOVERRIDE(x) ((x) << S_CCIBUFFERABLEOVERRIDE)
+#define G_CCIBUFFERABLEOVERRIDE(x) (((x) >> S_CCIBUFFERABLEOVERRIDE) & M_CCIBUFFERABLEOVERRIDE)
+
+#define S_CCIBARRIERTERMINATE 0
+#define M_CCIBARRIERTERMINATE 0x7U
+#define V_CCIBARRIERTERMINATE(x) ((x) << S_CCIBARRIERTERMINATE)
+#define G_CCIBARRIERTERMINATE(x) (((x) >> S_CCIBARRIERTERMINATE) & M_CCIBARRIERTERMINATE)
+
+#define A_ARM_CCI_CFG2 0x47288
+
+#define S_CCIADDRMAP15 30
+#define M_CCIADDRMAP15 0x3U
+#define V_CCIADDRMAP15(x) ((x) << S_CCIADDRMAP15)
+#define G_CCIADDRMAP15(x) (((x) >> S_CCIADDRMAP15) & M_CCIADDRMAP15)
+
+#define S_CCIADDRMAP14 28
+#define M_CCIADDRMAP14 0x3U
+#define V_CCIADDRMAP14(x) ((x) << S_CCIADDRMAP14)
+#define G_CCIADDRMAP14(x) (((x) >> S_CCIADDRMAP14) & M_CCIADDRMAP14)
+
+#define S_CCIADDRMAP13 26
+#define M_CCIADDRMAP13 0x3U
+#define V_CCIADDRMAP13(x) ((x) << S_CCIADDRMAP13)
+#define G_CCIADDRMAP13(x) (((x) >> S_CCIADDRMAP13) & M_CCIADDRMAP13)
+
+#define S_CCIADDRMAP12 24
+#define M_CCIADDRMAP12 0x3U
+#define V_CCIADDRMAP12(x) ((x) << S_CCIADDRMAP12)
+#define G_CCIADDRMAP12(x) (((x) >> S_CCIADDRMAP12) & M_CCIADDRMAP12)
+
+#define S_CCIADDRMAP11 22
+#define M_CCIADDRMAP11 0x3U
+#define V_CCIADDRMAP11(x) ((x) << S_CCIADDRMAP11)
+#define G_CCIADDRMAP11(x) (((x) >> S_CCIADDRMAP11) & M_CCIADDRMAP11)
+
+#define S_CCIADDRMAP10 20
+#define M_CCIADDRMAP10 0x3U
+#define V_CCIADDRMAP10(x) ((x) << S_CCIADDRMAP10)
+#define G_CCIADDRMAP10(x) (((x) >> S_CCIADDRMAP10) & M_CCIADDRMAP10)
+
+#define S_CCIADDRMAP9 18
+#define M_CCIADDRMAP9 0x3U
+#define V_CCIADDRMAP9(x) ((x) << S_CCIADDRMAP9)
+#define G_CCIADDRMAP9(x) (((x) >> S_CCIADDRMAP9) & M_CCIADDRMAP9)
+
+#define S_CCIADDRMAP8 16
+#define M_CCIADDRMAP8 0x3U
+#define V_CCIADDRMAP8(x) ((x) << S_CCIADDRMAP8)
+#define G_CCIADDRMAP8(x) (((x) >> S_CCIADDRMAP8) & M_CCIADDRMAP8)
+
+#define S_CCIADDRMAP7 14
+#define M_CCIADDRMAP7 0x3U
+#define V_CCIADDRMAP7(x) ((x) << S_CCIADDRMAP7)
+#define G_CCIADDRMAP7(x) (((x) >> S_CCIADDRMAP7) & M_CCIADDRMAP7)
+
+#define S_CCIADDRMAP6 12
+#define M_CCIADDRMAP6 0x3U
+#define V_CCIADDRMAP6(x) ((x) << S_CCIADDRMAP6)
+#define G_CCIADDRMAP6(x) (((x) >> S_CCIADDRMAP6) & M_CCIADDRMAP6)
+
+#define S_CCIADDRMAP5 10
+#define M_CCIADDRMAP5 0x3U
+#define V_CCIADDRMAP5(x) ((x) << S_CCIADDRMAP5)
+#define G_CCIADDRMAP5(x) (((x) >> S_CCIADDRMAP5) & M_CCIADDRMAP5)
+
+#define S_CCIADDRMAP4 8
+#define M_CCIADDRMAP4 0x3U
+#define V_CCIADDRMAP4(x) ((x) << S_CCIADDRMAP4)
+#define G_CCIADDRMAP4(x) (((x) >> S_CCIADDRMAP4) & M_CCIADDRMAP4)
+
+#define S_CCIADDRMAP3 6
+#define M_CCIADDRMAP3 0x3U
+#define V_CCIADDRMAP3(x) ((x) << S_CCIADDRMAP3)
+#define G_CCIADDRMAP3(x) (((x) >> S_CCIADDRMAP3) & M_CCIADDRMAP3)
+
+#define S_CCIADDRMAP2 4
+#define M_CCIADDRMAP2 0x3U
+#define V_CCIADDRMAP2(x) ((x) << S_CCIADDRMAP2)
+#define G_CCIADDRMAP2(x) (((x) >> S_CCIADDRMAP2) & M_CCIADDRMAP2)
+
+#define S_CCIADDRMAP1 2
+#define M_CCIADDRMAP1 0x3U
+#define V_CCIADDRMAP1(x) ((x) << S_CCIADDRMAP1)
+#define G_CCIADDRMAP1(x) (((x) >> S_CCIADDRMAP1) & M_CCIADDRMAP1)
+
+#define S_CCIADDRMAP0 0
+#define M_CCIADDRMAP0 0x3U
+#define V_CCIADDRMAP0(x) ((x) << S_CCIADDRMAP0)
+#define G_CCIADDRMAP0(x) (((x) >> S_CCIADDRMAP0) & M_CCIADDRMAP0)
+
+#define A_ARM_CCI_STATUS 0x4728c
+
+#define S_CCICACTIVE 6
+#define V_CCICACTIVE(x) ((x) << S_CCICACTIVE)
+#define F_CCICACTIVE V_CCICACTIVE(1U)
+
+#define S_CCICSYSACK 5
+#define V_CCICSYSACK(x) ((x) << S_CCICSYSACK)
+#define F_CCICSYSACK V_CCICSYSACK(1U)
+
+#define S_CCINEVNTCNTOVERFLOW 0
+#define M_CCINEVNTCNTOVERFLOW 0x1fU
+#define V_CCINEVNTCNTOVERFLOW(x) ((x) << S_CCINEVNTCNTOVERFLOW)
+#define G_CCINEVNTCNTOVERFLOW(x) (((x) >> S_CCINEVNTCNTOVERFLOW) & M_CCINEVNTCNTOVERFLOW)
+
+#define A_ARM_CCIM_CCI_QVN_MASTER_CFG 0x47290
+
+#define S_CCIVWREADYVN3M 20
+#define V_CCIVWREADYVN3M(x) ((x) << S_CCIVWREADYVN3M)
+#define F_CCIVWREADYVN3M V_CCIVWREADYVN3M(1U)
+
+#define S_CCIVAWREADYVN3M 19
+#define V_CCIVAWREADYVN3M(x) ((x) << S_CCIVAWREADYVN3M)
+#define F_CCIVAWREADYVN3M V_CCIVAWREADYVN3M(1U)
+
+#define S_CCIVARREADYVN3M 18
+#define V_CCIVARREADYVN3M(x) ((x) << S_CCIVARREADYVN3M)
+#define F_CCIVARREADYVN3M V_CCIVARREADYVN3M(1U)
+
+#define S_CCIVWREADYVN2M 17
+#define V_CCIVWREADYVN2M(x) ((x) << S_CCIVWREADYVN2M)
+#define F_CCIVWREADYVN2M V_CCIVWREADYVN2M(1U)
+
+#define S_CCIVAWREADYVN2M 16
+#define V_CCIVAWREADYVN2M(x) ((x) << S_CCIVAWREADYVN2M)
+#define F_CCIVAWREADYVN2M V_CCIVAWREADYVN2M(1U)
+
+#define S_CCIVARREADYVN2M 15
+#define V_CCIVARREADYVN2M(x) ((x) << S_CCIVARREADYVN2M)
+#define F_CCIVARREADYVN2M V_CCIVARREADYVN2M(1U)
+
+#define S_CCIVWREADYVN1M 14
+#define V_CCIVWREADYVN1M(x) ((x) << S_CCIVWREADYVN1M)
+#define F_CCIVWREADYVN1M V_CCIVWREADYVN1M(1U)
+
+#define S_CCIVAWREADYVN1M 13
+#define V_CCIVAWREADYVN1M(x) ((x) << S_CCIVAWREADYVN1M)
+#define F_CCIVAWREADYVN1M V_CCIVAWREADYVN1M(1U)
+
+#define S_CCIVARREADYVN1M 12
+#define V_CCIVARREADYVN1M(x) ((x) << S_CCIVARREADYVN1M)
+#define F_CCIVARREADYVN1M V_CCIVARREADYVN1M(1U)
+
+#define S_CCIVWREADYVN0M 11
+#define V_CCIVWREADYVN0M(x) ((x) << S_CCIVWREADYVN0M)
+#define F_CCIVWREADYVN0M V_CCIVWREADYVN0M(1U)
+
+#define S_CCIVAWREADYVN0M 10
+#define V_CCIVAWREADYVN0M(x) ((x) << S_CCIVAWREADYVN0M)
+#define F_CCIVAWREADYVN0M V_CCIVAWREADYVN0M(1U)
+
+#define S_CCIVARREADYVN0M 9
+#define V_CCIVARREADYVN0M(x) ((x) << S_CCIVARREADYVN0M)
+#define F_CCIVARREADYVN0M V_CCIVARREADYVN0M(1U)
+
+#define S_CCIQVNPREALLOCWM 5
+#define M_CCIQVNPREALLOCWM 0xfU
+#define V_CCIQVNPREALLOCWM(x) ((x) << S_CCIQVNPREALLOCWM)
+#define G_CCIQVNPREALLOCWM(x) (((x) >> S_CCIQVNPREALLOCWM) & M_CCIQVNPREALLOCWM)
+
+#define S_CCIQVNPREALLOCRM 1
+#define M_CCIQVNPREALLOCRM 0xfU
+#define V_CCIQVNPREALLOCRM(x) ((x) << S_CCIQVNPREALLOCRM)
+#define G_CCIQVNPREALLOCRM(x) (((x) >> S_CCIQVNPREALLOCRM) & M_CCIQVNPREALLOCRM)
+
+#define S_CCIQVNENABLEM 0
+#define V_CCIQVNENABLEM(x) ((x) << S_CCIQVNENABLEM)
+#define F_CCIQVNENABLEM V_CCIQVNENABLEM(1U)
+
+#define A_ARM_CCIM_CCI_QVN_MASTER_STATUS 0x47294
+
+#define S_CCIVWVALIDN3M 31
+#define V_CCIVWVALIDN3M(x) ((x) << S_CCIVWVALIDN3M)
+#define F_CCIVWVALIDN3M V_CCIVWVALIDN3M(1U)
+
+#define S_CCIVAWVALIDN3M 30
+#define V_CCIVAWVALIDN3M(x) ((x) << S_CCIVAWVALIDN3M)
+#define F_CCIVAWVALIDN3M V_CCIVAWVALIDN3M(1U)
+
+#define S_CCIVAWQOSN3M 29
+#define V_CCIVAWQOSN3M(x) ((x) << S_CCIVAWQOSN3M)
+#define F_CCIVAWQOSN3M V_CCIVAWQOSN3M(1U)
+
+#define S_CCIVARVALIDN3M 28
+#define V_CCIVARVALIDN3M(x) ((x) << S_CCIVARVALIDN3M)
+#define F_CCIVARVALIDN3M V_CCIVARVALIDN3M(1U)
+
+#define S_CCIVARQOSN3M 24
+#define M_CCIVARQOSN3M 0xfU
+#define V_CCIVARQOSN3M(x) ((x) << S_CCIVARQOSN3M)
+#define G_CCIVARQOSN3M(x) (((x) >> S_CCIVARQOSN3M) & M_CCIVARQOSN3M)
+
+#define S_CCIVWVALIDN2M 23
+#define V_CCIVWVALIDN2M(x) ((x) << S_CCIVWVALIDN2M)
+#define F_CCIVWVALIDN2M V_CCIVWVALIDN2M(1U)
+
+#define S_CCIVAWVALIDN2M 22
+#define V_CCIVAWVALIDN2M(x) ((x) << S_CCIVAWVALIDN2M)
+#define F_CCIVAWVALIDN2M V_CCIVAWVALIDN2M(1U)
+
+#define S_CCIVAWQOSN2M 21
+#define V_CCIVAWQOSN2M(x) ((x) << S_CCIVAWQOSN2M)
+#define F_CCIVAWQOSN2M V_CCIVAWQOSN2M(1U)
+
+#define S_CCIVARVALIDN2M 20
+#define V_CCIVARVALIDN2M(x) ((x) << S_CCIVARVALIDN2M)
+#define F_CCIVARVALIDN2M V_CCIVARVALIDN2M(1U)
+
+#define S_CCIVARQOSN2M 16
+#define M_CCIVARQOSN2M 0xfU
+#define V_CCIVARQOSN2M(x) ((x) << S_CCIVARQOSN2M)
+#define G_CCIVARQOSN2M(x) (((x) >> S_CCIVARQOSN2M) & M_CCIVARQOSN2M)
+
+#define S_CCIVWVALIDN1M 15
+#define V_CCIVWVALIDN1M(x) ((x) << S_CCIVWVALIDN1M)
+#define F_CCIVWVALIDN1M V_CCIVWVALIDN1M(1U)
+
+#define S_CCIVAWVALIDN1M 14
+#define V_CCIVAWVALIDN1M(x) ((x) << S_CCIVAWVALIDN1M)
+#define F_CCIVAWVALIDN1M V_CCIVAWVALIDN1M(1U)
+
+#define S_CCIVAWQOSN1M 13
+#define V_CCIVAWQOSN1M(x) ((x) << S_CCIVAWQOSN1M)
+#define F_CCIVAWQOSN1M V_CCIVAWQOSN1M(1U)
+
+#define S_CCIVARVALIDN1M 12
+#define V_CCIVARVALIDN1M(x) ((x) << S_CCIVARVALIDN1M)
+#define F_CCIVARVALIDN1M V_CCIVARVALIDN1M(1U)
+
+#define S_CCIVARQOSN1M 8
+#define M_CCIVARQOSN1M 0xfU
+#define V_CCIVARQOSN1M(x) ((x) << S_CCIVARQOSN1M)
+#define G_CCIVARQOSN1M(x) (((x) >> S_CCIVARQOSN1M) & M_CCIVARQOSN1M)
+
+#define S_CCIVWVALIDN0M 7
+#define V_CCIVWVALIDN0M(x) ((x) << S_CCIVWVALIDN0M)
+#define F_CCIVWVALIDN0M V_CCIVWVALIDN0M(1U)
+
+#define S_CCIVAWVALIDN0M 6
+#define V_CCIVAWVALIDN0M(x) ((x) << S_CCIVAWVALIDN0M)
+#define F_CCIVAWVALIDN0M V_CCIVAWVALIDN0M(1U)
+
+#define S_CCIVAWQOSN0M 5
+#define V_CCIVAWQOSN0M(x) ((x) << S_CCIVAWQOSN0M)
+#define F_CCIVAWQOSN0M V_CCIVAWQOSN0M(1U)
+
+#define S_CCIVARVALIDN0M 4
+#define V_CCIVARVALIDN0M(x) ((x) << S_CCIVARVALIDN0M)
+#define F_CCIVARVALIDN0M V_CCIVARVALIDN0M(1U)
+
+#define S_CCIVARQOSN0M 0
+#define M_CCIVARQOSN0M 0xfU
+#define V_CCIVARQOSN0M(x) ((x) << S_CCIVARQOSN0M)
+#define G_CCIVARQOSN0M(x) (((x) >> S_CCIVARQOSN0M) & M_CCIVARQOSN0M)
+
+#define A_ARM_CCIS_CCI_QVN_SLAVE_CFG 0x472d0
+
+#define S_CCIQVNVNETS 0
+#define M_CCIQVNVNETS 0x3U
+#define V_CCIQVNVNETS(x) ((x) << S_CCIQVNVNETS)
+#define G_CCIQVNVNETS(x) (((x) >> S_CCIQVNVNETS) & M_CCIQVNVNETS)
+
+#define A_ARM_CCIS_CCI_QVN_SLAVE_STATUS 0x472d4
+
+#define S_CCIEVNTAWQOS 4
+#define M_CCIEVNTAWQOS 0xfU
+#define V_CCIEVNTAWQOS(x) ((x) << S_CCIEVNTAWQOS)
+#define G_CCIEVNTAWQOS(x) (((x) >> S_CCIEVNTAWQOS) & M_CCIEVNTAWQOS)
+
+#define S_CCIEVNTARQOS 0
+#define M_CCIEVNTARQOS 0xfU
+#define V_CCIEVNTARQOS(x) ((x) << S_CCIEVNTARQOS)
+#define G_CCIEVNTARQOS(x) (((x) >> S_CCIEVNTARQOS) & M_CCIEVNTARQOS)
+
+#define A_ARM_CCI_EVNTBUS 0x47300
+#define A_ARM_CCI_RST_N 0x47318
+
+#define S_CCIRSTN 0
+#define V_CCIRSTN(x) ((x) << S_CCIRSTN)
+#define F_CCIRSTN V_CCIRSTN(1U)
+
+#define A_ARM_CCI_CSYREQ 0x4731c
+
+#define S_CCICSYSREQ 0
+#define V_CCICSYSREQ(x) ((x) << S_CCICSYSREQ)
+#define F_CCICSYSREQ V_CCICSYSREQ(1U)
+
+#define A_ARM_CCI_TR_DEBUGS0 0x47320
+
+#define S_CCIS0RCNT 24
+#define M_CCIS0RCNT 0xffU
+#define V_CCIS0RCNT(x) ((x) << S_CCIS0RCNT)
+#define G_CCIS0RCNT(x) (((x) >> S_CCIS0RCNT) & M_CCIS0RCNT)
+
+#define S_CCIS0ARCNT 16
+#define M_CCIS0ARCNT 0xffU
+#define V_CCIS0ARCNT(x) ((x) << S_CCIS0ARCNT)
+#define G_CCIS0ARCNT(x) (((x) >> S_CCIS0ARCNT) & M_CCIS0ARCNT)
+
+#define S_CCIS0WCNT 8
+#define M_CCIS0WCNT 0xffU
+#define V_CCIS0WCNT(x) ((x) << S_CCIS0WCNT)
+#define G_CCIS0WCNT(x) (((x) >> S_CCIS0WCNT) & M_CCIS0WCNT)
+
+#define S_CCIS0AWCNT 0
+#define M_CCIS0AWCNT 0xffU
+#define V_CCIS0AWCNT(x) ((x) << S_CCIS0AWCNT)
+#define G_CCIS0AWCNT(x) (((x) >> S_CCIS0AWCNT) & M_CCIS0AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS1 0x47324
+
+#define S_CCIS1RCNT 24
+#define M_CCIS1RCNT 0xffU
+#define V_CCIS1RCNT(x) ((x) << S_CCIS1RCNT)
+#define G_CCIS1RCNT(x) (((x) >> S_CCIS1RCNT) & M_CCIS1RCNT)
+
+#define S_CCIS1ARCNT 16
+#define M_CCIS1ARCNT 0xffU
+#define V_CCIS1ARCNT(x) ((x) << S_CCIS1ARCNT)
+#define G_CCIS1ARCNT(x) (((x) >> S_CCIS1ARCNT) & M_CCIS1ARCNT)
+
+#define S_CCIS1WCNT 8
+#define M_CCIS1WCNT 0xffU
+#define V_CCIS1WCNT(x) ((x) << S_CCIS1WCNT)
+#define G_CCIS1WCNT(x) (((x) >> S_CCIS1WCNT) & M_CCIS1WCNT)
+
+#define S_CCIS1AWCNT 0
+#define M_CCIS1AWCNT 0xffU
+#define V_CCIS1AWCNT(x) ((x) << S_CCIS1AWCNT)
+#define G_CCIS1AWCNT(x) (((x) >> S_CCIS1AWCNT) & M_CCIS1AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS2 0x47328
+
+#define S_CCIS2RCNT 24
+#define M_CCIS2RCNT 0xffU
+#define V_CCIS2RCNT(x) ((x) << S_CCIS2RCNT)
+#define G_CCIS2RCNT(x) (((x) >> S_CCIS2RCNT) & M_CCIS2RCNT)
+
+#define S_CCIS2ARCNT 16
+#define M_CCIS2ARCNT 0xffU
+#define V_CCIS2ARCNT(x) ((x) << S_CCIS2ARCNT)
+#define G_CCIS2ARCNT(x) (((x) >> S_CCIS2ARCNT) & M_CCIS2ARCNT)
+
+#define S_CCIS2WCNT 8
+#define M_CCIS2WCNT 0xffU
+#define V_CCIS2WCNT(x) ((x) << S_CCIS2WCNT)
+#define G_CCIS2WCNT(x) (((x) >> S_CCIS2WCNT) & M_CCIS2WCNT)
+
+#define S_CCIS2AWCNT 0
+#define M_CCIS2AWCNT 0xffU
+#define V_CCIS2AWCNT(x) ((x) << S_CCIS2AWCNT)
+#define G_CCIS2AWCNT(x) (((x) >> S_CCIS2AWCNT) & M_CCIS2AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS3 0x4732c
+
+#define S_CCIS3RCNT 24
+#define M_CCIS3RCNT 0xffU
+#define V_CCIS3RCNT(x) ((x) << S_CCIS3RCNT)
+#define G_CCIS3RCNT(x) (((x) >> S_CCIS3RCNT) & M_CCIS3RCNT)
+
+#define S_CCIS3ARCNT 16
+#define M_CCIS3ARCNT 0xffU
+#define V_CCIS3ARCNT(x) ((x) << S_CCIS3ARCNT)
+#define G_CCIS3ARCNT(x) (((x) >> S_CCIS3ARCNT) & M_CCIS3ARCNT)
+
+#define S_CCIS3WCNT 8
+#define M_CCIS3WCNT 0xffU
+#define V_CCIS3WCNT(x) ((x) << S_CCIS3WCNT)
+#define G_CCIS3WCNT(x) (((x) >> S_CCIS3WCNT) & M_CCIS3WCNT)
+
+#define S_CCIS3AWCNT 0
+#define M_CCIS3AWCNT 0xffU
+#define V_CCIS3AWCNT(x) ((x) << S_CCIS3AWCNT)
+#define G_CCIS3AWCNT(x) (((x) >> S_CCIS3AWCNT) & M_CCIS3AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS4 0x47330
+
+#define S_CCIS4RCNT 24
+#define M_CCIS4RCNT 0xffU
+#define V_CCIS4RCNT(x) ((x) << S_CCIS4RCNT)
+#define G_CCIS4RCNT(x) (((x) >> S_CCIS4RCNT) & M_CCIS4RCNT)
+
+#define S_CCIS4ARCNT 16
+#define M_CCIS4ARCNT 0xffU
+#define V_CCIS4ARCNT(x) ((x) << S_CCIS4ARCNT)
+#define G_CCIS4ARCNT(x) (((x) >> S_CCIS4ARCNT) & M_CCIS4ARCNT)
+
+#define S_CCIS4WCNT 8
+#define M_CCIS4WCNT 0xffU
+#define V_CCIS4WCNT(x) ((x) << S_CCIS4WCNT)
+#define G_CCIS4WCNT(x) (((x) >> S_CCIS4WCNT) & M_CCIS4WCNT)
+
+#define S_CCIS4AWCNT 0
+#define M_CCIS4AWCNT 0xffU
+#define V_CCIS4AWCNT(x) ((x) << S_CCIS4AWCNT)
+#define G_CCIS4AWCNT(x) (((x) >> S_CCIS4AWCNT) & M_CCIS4AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGS34 0x47334
+
+#define S_CCIS4RSPCNT 24
+#define M_CCIS4RSPCNT 0xffU
+#define V_CCIS4RSPCNT(x) ((x) << S_CCIS4RSPCNT)
+#define G_CCIS4RSPCNT(x) (((x) >> S_CCIS4RSPCNT) & M_CCIS4RSPCNT)
+
+#define S_CCIS4ACCNT 16
+#define M_CCIS4ACCNT 0xffU
+#define V_CCIS4ACCNT(x) ((x) << S_CCIS4ACCNT)
+#define G_CCIS4ACCNT(x) (((x) >> S_CCIS4ACCNT) & M_CCIS4ACCNT)
+
+#define S_CCIS3RSPCNT 8
+#define M_CCIS3RSPCNT 0xffU
+#define V_CCIS3RSPCNT(x) ((x) << S_CCIS3RSPCNT)
+#define G_CCIS3RSPCNT(x) (((x) >> S_CCIS3RSPCNT) & M_CCIS3RSPCNT)
+
+#define S_CCIS3ACCNT 0
+#define M_CCIS3ACCNT 0xffU
+#define V_CCIS3ACCNT(x) ((x) << S_CCIS3ACCNT)
+#define G_CCIS3ACCNT(x) (((x) >> S_CCIS3ACCNT) & M_CCIS3ACCNT)
+
+#define A_ARM_CCI_TR_DEBUGM0 0x47338
+
+#define S_CCIM0RCNT 24
+#define M_CCIM0RCNT 0xffU
+#define V_CCIM0RCNT(x) ((x) << S_CCIM0RCNT)
+#define G_CCIM0RCNT(x) (((x) >> S_CCIM0RCNT) & M_CCIM0RCNT)
+
+#define S_CCIM0ARCNT 16
+#define M_CCIM0ARCNT 0xffU
+#define V_CCIM0ARCNT(x) ((x) << S_CCIM0ARCNT)
+#define G_CCIM0ARCNT(x) (((x) >> S_CCIM0ARCNT) & M_CCIM0ARCNT)
+
+#define S_CCIM0WCNT 8
+#define M_CCIM0WCNT 0xffU
+#define V_CCIM0WCNT(x) ((x) << S_CCIM0WCNT)
+#define G_CCIM0WCNT(x) (((x) >> S_CCIM0WCNT) & M_CCIM0WCNT)
+
+#define S_CCIM0AWCNT 0
+#define M_CCIM0AWCNT 0xffU
+#define V_CCIM0AWCNT(x) ((x) << S_CCIM0AWCNT)
+#define G_CCIM0AWCNT(x) (((x) >> S_CCIM0AWCNT) & M_CCIM0AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGM1 0x4733c
+
+#define S_CCIM1RCNT 24
+#define M_CCIM1RCNT 0xffU
+#define V_CCIM1RCNT(x) ((x) << S_CCIM1RCNT)
+#define G_CCIM1RCNT(x) (((x) >> S_CCIM1RCNT) & M_CCIM1RCNT)
+
+#define S_CCIM1ARCNT 16
+#define M_CCIM1ARCNT 0xffU
+#define V_CCIM1ARCNT(x) ((x) << S_CCIM1ARCNT)
+#define G_CCIM1ARCNT(x) (((x) >> S_CCIM1ARCNT) & M_CCIM1ARCNT)
+
+#define S_CCIM1WCNT 8
+#define M_CCIM1WCNT 0xffU
+#define V_CCIM1WCNT(x) ((x) << S_CCIM1WCNT)
+#define G_CCIM1WCNT(x) (((x) >> S_CCIM1WCNT) & M_CCIM1WCNT)
+
+#define S_CCIM1AWCNT 0
+#define M_CCIM1AWCNT 0xffU
+#define V_CCIM1AWCNT(x) ((x) << S_CCIM1AWCNT)
+#define G_CCIM1AWCNT(x) (((x) >> S_CCIM1AWCNT) & M_CCIM1AWCNT)
+
+#define A_ARM_CCI_TR_DEBUGM2 0x47340
+
+#define S_CCIM2RCNT 24
+#define M_CCIM2RCNT 0xffU
+#define V_CCIM2RCNT(x) ((x) << S_CCIM2RCNT)
+#define G_CCIM2RCNT(x) (((x) >> S_CCIM2RCNT) & M_CCIM2RCNT)
+
+#define S_CCIM2ARCNT 16
+#define M_CCIM2ARCNT 0xffU
+#define V_CCIM2ARCNT(x) ((x) << S_CCIM2ARCNT)
+#define G_CCIM2ARCNT(x) (((x) >> S_CCIM2ARCNT) & M_CCIM2ARCNT)
+
+#define S_CCIM2WCNT 8
+#define M_CCIM2WCNT 0xffU
+#define V_CCIM2WCNT(x) ((x) << S_CCIM2WCNT)
+#define G_CCIM2WCNT(x) (((x) >> S_CCIM2WCNT) & M_CCIM2WCNT)
+
+#define S_CCIM2AWCNT 0
+#define M_CCIM2AWCNT 0xffU
+#define V_CCIM2AWCNT(x) ((x) << S_CCIM2AWCNT)
+#define G_CCIM2AWCNT(x) (((x) >> S_CCIM2AWCNT) & M_CCIM2AWCNT)
+
+#define A_ARM_MA_TR_DEBUG 0x47344
+
+#define S_MA1_RD_CNT 24
+#define M_MA1_RD_CNT 0xffU
+#define V_MA1_RD_CNT(x) ((x) << S_MA1_RD_CNT)
+#define G_MA1_RD_CNT(x) (((x) >> S_MA1_RD_CNT) & M_MA1_RD_CNT)
+
+#define S_MA1_WR_CNT 16
+#define M_MA1_WR_CNT 0xffU
+#define V_MA1_WR_CNT(x) ((x) << S_MA1_WR_CNT)
+#define G_MA1_WR_CNT(x) (((x) >> S_MA1_WR_CNT) & M_MA1_WR_CNT)
+
+#define S_MA0_RD_CNT 8
+#define M_MA0_RD_CNT 0xffU
+#define V_MA0_RD_CNT(x) ((x) << S_MA0_RD_CNT)
+#define G_MA0_RD_CNT(x) (((x) >> S_MA0_RD_CNT) & M_MA0_RD_CNT)
+
+#define S_MA0_WR_CNT 0
+#define M_MA0_WR_CNT 0xffU
+#define V_MA0_WR_CNT(x) ((x) << S_MA0_WR_CNT)
+#define G_MA0_WR_CNT(x) (((x) >> S_MA0_WR_CNT) & M_MA0_WR_CNT)
+
+#define A_ARM_GP_INT 0x47348
+
+#define S_GP_INT 0
+#define M_GP_INT 0xffU
+#define V_GP_INT(x) ((x) << S_GP_INT)
+#define G_GP_INT(x) (((x) >> S_GP_INT) & M_GP_INT)
+
+#define A_ARM_DMA_CFG0 0x47350
+#define A_ARM_DMA_CFG1 0x47354
+
+#define S_DMABOOTPERIPHNS 16
+#define M_DMABOOTPERIPHNS 0x3ffU
+#define V_DMABOOTPERIPHNS(x) ((x) << S_DMABOOTPERIPHNS)
+#define G_DMABOOTPERIPHNS(x) (((x) >> S_DMABOOTPERIPHNS) & M_DMABOOTPERIPHNS)
+
+#define S_DMABOOTIRQNS 4
+#define M_DMABOOTIRQNS 0x3ffU
+#define V_DMABOOTIRQNS(x) ((x) << S_DMABOOTIRQNS)
+#define G_DMABOOTIRQNS(x) (((x) >> S_DMABOOTIRQNS) & M_DMABOOTIRQNS)
+
+#define S_DMABOOTMANAGERNS 1
+#define V_DMABOOTMANAGERNS(x) ((x) << S_DMABOOTMANAGERNS)
+#define F_DMABOOTMANAGERNS V_DMABOOTMANAGERNS(1U)
+
+#define S_DMABOOTFROMPC 0
+#define V_DMABOOTFROMPC(x) ((x) << S_DMABOOTFROMPC)
+#define F_DMABOOTFROMPC V_DMABOOTFROMPC(1U)
+
+#define A_ARM_ARM_CFG0 0x47380
+
+#define S_MESSAGEBYPASS_DATA 2
+#define V_MESSAGEBYPASS_DATA(x) ((x) << S_MESSAGEBYPASS_DATA)
+#define F_MESSAGEBYPASS_DATA V_MESSAGEBYPASS_DATA(1U)
+
+#define S_MESSAGEBYPASS 1
+#define V_MESSAGEBYPASS(x) ((x) << S_MESSAGEBYPASS)
+#define F_MESSAGEBYPASS V_MESSAGEBYPASS(1U)
+
+#define S_PCIEBYPASS 0
+#define V_PCIEBYPASS(x) ((x) << S_PCIEBYPASS)
+#define F_PCIEBYPASS V_PCIEBYPASS(1U)
+
+#define A_ARM_ARM_CFG1 0x47384
+#define A_ARM_ARM_CFG2 0x47390
+#define A_ARM_PCIE_MA_ADDR_REGION0 0x47400
+
+#define S_ADDRREG0 0
+#define M_ADDRREG0 0xfffffffU
+#define V_ADDRREG0(x) ((x) << S_ADDRREG0)
+#define G_ADDRREG0(x) (((x) >> S_ADDRREG0) & M_ADDRREG0)
+
+#define A_ARM_PCIE_MA_ADDR_REGION1 0x47404
+
+#define S_ADDRREG1 0
+#define M_ADDRREG1 0xfffffffU
+#define V_ADDRREG1(x) ((x) << S_ADDRREG1)
+#define G_ADDRREG1(x) (((x) >> S_ADDRREG1) & M_ADDRREG1)
+
+#define A_ARM_PCIE_MA_ADDR_REGION2 0x47408
+
+#define S_ADDRREG2 0
+#define M_ADDRREG2 0xfffffffU
+#define V_ADDRREG2(x) ((x) << S_ADDRREG2)
+#define G_ADDRREG2(x) (((x) >> S_ADDRREG2) & M_ADDRREG2)
+
+#define A_ARM_PCIE_MA_ADDR_REGION3 0x4740c
+
+#define S_ADDRREG3 0
+#define M_ADDRREG3 0xfffffffU
+#define V_ADDRREG3(x) ((x) << S_ADDRREG3)
+#define G_ADDRREG3(x) (((x) >> S_ADDRREG3) & M_ADDRREG3)
+
+#define A_ARM_PCIE_MA_ADDR_REGION4 0x47410
+
+#define S_ADDRREG4 0
+#define M_ADDRREG4 0xfffffffU
+#define V_ADDRREG4(x) ((x) << S_ADDRREG4)
+#define G_ADDRREG4(x) (((x) >> S_ADDRREG4) & M_ADDRREG4)
+
+#define A_ARM_PCIE_MA_ADDR_REGION5 0x47414
+
+#define S_ADDRREG5 0
+#define M_ADDRREG5 0xfffffffU
+#define V_ADDRREG5(x) ((x) << S_ADDRREG5)
+#define G_ADDRREG5(x) (((x) >> S_ADDRREG5) & M_ADDRREG5)
+
+#define A_ARM_PCIE_MA_ADDR_REGION6 0x47418
+
+#define S_ADDRREG6 0
+#define M_ADDRREG6 0xfffffffU
+#define V_ADDRREG6(x) ((x) << S_ADDRREG6)
+#define G_ADDRREG6(x) (((x) >> S_ADDRREG6) & M_ADDRREG6)
+
+#define A_ARM_PCIE_MA_ADDR_REGION7 0x4741c
+
+#define S_ADDRREG7 0
+#define M_ADDRREG7 0xfffffffU
+#define V_ADDRREG7(x) ((x) << S_ADDRREG7)
+#define G_ADDRREG7(x) (((x) >> S_ADDRREG7) & M_ADDRREG7)
+
+#define A_ARM_INTERRUPT_GEN 0x47420
+
+#define S_INT_GEN 0
+#define M_INT_GEN 0x3U
+#define V_INT_GEN(x) ((x) << S_INT_GEN)
+#define G_INT_GEN(x) (((x) >> S_INT_GEN) & M_INT_GEN)
+
+#define A_ARM_INTERRUPT_CLEAR 0x47424
+
+#define S_INT_CLEAR 0
+#define M_INT_CLEAR 0x3U
+#define V_INT_CLEAR(x) ((x) << S_INT_CLEAR)
+#define G_INT_CLEAR(x) (((x) >> S_INT_CLEAR) & M_INT_CLEAR)
+
+#define A_ARM_DEBUG_STATUS_0 0x47428
+#define A_ARM_DBPROC_CONTROL 0x4742c
+
+#define S_NO_OF_INTERRUPTS 0
+#define M_NO_OF_INTERRUPTS 0x3U
+#define V_NO_OF_INTERRUPTS(x) ((x) << S_NO_OF_INTERRUPTS)
+#define G_NO_OF_INTERRUPTS(x) (((x) >> S_NO_OF_INTERRUPTS) & M_NO_OF_INTERRUPTS)
+
+#define A_ARM_PERR_INT_CAUSE1 0x47430
+
+#define S_ARWFIFO0_PERR 31
+#define V_ARWFIFO0_PERR(x) ((x) << S_ARWFIFO0_PERR)
+#define F_ARWFIFO0_PERR V_ARWFIFO0_PERR(1U)
+
+#define S_ARWFIFO1_PERR 30
+#define V_ARWFIFO1_PERR(x) ((x) << S_ARWFIFO1_PERR)
+#define F_ARWFIFO1_PERR V_ARWFIFO1_PERR(1U)
+
+#define S_ARWIDFIFO0_PERR 29
+#define V_ARWIDFIFO0_PERR(x) ((x) << S_ARWIDFIFO0_PERR)
+#define F_ARWIDFIFO0_PERR V_ARWIDFIFO0_PERR(1U)
+
+#define S_ARWIDFIFO1_PERR 28
+#define V_ARWIDFIFO1_PERR(x) ((x) << S_ARWIDFIFO1_PERR)
+#define F_ARWIDFIFO1_PERR V_ARWIDFIFO1_PERR(1U)
+
+#define S_ARIDFIFO0_PERR 27
+#define V_ARIDFIFO0_PERR(x) ((x) << S_ARIDFIFO0_PERR)
+#define F_ARIDFIFO0_PERR V_ARIDFIFO0_PERR(1U)
+
+#define S_ARIDFIFO1_PERR 26
+#define V_ARIDFIFO1_PERR(x) ((x) << S_ARIDFIFO1_PERR)
+#define F_ARIDFIFO1_PERR V_ARIDFIFO1_PERR(1U)
+
+#define S_RRSPADDR_FIFO0_PERR 25
+#define V_RRSPADDR_FIFO0_PERR(x) ((x) << S_RRSPADDR_FIFO0_PERR)
+#define F_RRSPADDR_FIFO0_PERR V_RRSPADDR_FIFO0_PERR(1U)
+
+#define S_RRSPADDR_FIFO1_PERR 24
+#define V_RRSPADDR_FIFO1_PERR(x) ((x) << S_RRSPADDR_FIFO1_PERR)
+#define F_RRSPADDR_FIFO1_PERR V_RRSPADDR_FIFO1_PERR(1U)
+
+#define S_WRSTRB_FIFO0_PERR 23
+#define V_WRSTRB_FIFO0_PERR(x) ((x) << S_WRSTRB_FIFO0_PERR)
+#define F_WRSTRB_FIFO0_PERR V_WRSTRB_FIFO0_PERR(1U)
+
+#define S_WRSTRB_FIFO1_PERR 22
+#define V_WRSTRB_FIFO1_PERR(x) ((x) << S_WRSTRB_FIFO1_PERR)
+#define F_WRSTRB_FIFO1_PERR V_WRSTRB_FIFO1_PERR(1U)
+
+#define S_MA2AXI_RSPDATAPARERR 21
+#define V_MA2AXI_RSPDATAPARERR(x) ((x) << S_MA2AXI_RSPDATAPARERR)
+#define F_MA2AXI_RSPDATAPARERR V_MA2AXI_RSPDATAPARERR(1U)
+
+#define S_MA2AXI_DATA_PAR_ERR 20
+#define V_MA2AXI_DATA_PAR_ERR(x) ((x) << S_MA2AXI_DATA_PAR_ERR)
+#define F_MA2AXI_DATA_PAR_ERR V_MA2AXI_DATA_PAR_ERR(1U)
+
+#define S_MA2AXI_WR_ORD_FIFO_PARERR 19
+#define V_MA2AXI_WR_ORD_FIFO_PARERR(x) ((x) << S_MA2AXI_WR_ORD_FIFO_PARERR)
+#define F_MA2AXI_WR_ORD_FIFO_PARERR V_MA2AXI_WR_ORD_FIFO_PARERR(1U)
+
+#define S_NVME_DB_EMU_TRACKER_FIFO_PERR 18
+#define V_NVME_DB_EMU_TRACKER_FIFO_PERR(x) ((x) << S_NVME_DB_EMU_TRACKER_FIFO_PERR)
+#define F_NVME_DB_EMU_TRACKER_FIFO_PERR V_NVME_DB_EMU_TRACKER_FIFO_PERR(1U)
+
+#define S_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR 17
+#define V_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR(x) ((x) << S_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR)
+#define F_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR V_NVME_DB_EMU_QUEUE_AW_ADDR_FIFO_PERR(1U)
+
+#define S_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR 16
+#define V_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR(x) ((x) << S_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR)
+#define F_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR V_NVME_DB_EMU_INTERRUPT_OFFSET_FIFO_PERR(1U)
+
+#define S_NVME_DB_EMU_ID_FIFO0_PERR 15
+#define V_NVME_DB_EMU_ID_FIFO0_PERR(x) ((x) << S_NVME_DB_EMU_ID_FIFO0_PERR)
+#define F_NVME_DB_EMU_ID_FIFO0_PERR V_NVME_DB_EMU_ID_FIFO0_PERR(1U)
+
+#define S_NVME_DB_EMU_ID_FIFO1_PERR 14
+#define V_NVME_DB_EMU_ID_FIFO1_PERR(x) ((x) << S_NVME_DB_EMU_ID_FIFO1_PERR)
+#define F_NVME_DB_EMU_ID_FIFO1_PERR V_NVME_DB_EMU_ID_FIFO1_PERR(1U)
+
+#define S_RC_ARWFIFO_PERR 13
+#define V_RC_ARWFIFO_PERR(x) ((x) << S_RC_ARWFIFO_PERR)
+#define F_RC_ARWFIFO_PERR V_RC_ARWFIFO_PERR(1U)
+
+#define S_RC_ARIDBURSTADDRFIFO_PERR 12
+#define V_RC_ARIDBURSTADDRFIFO_PERR(x) ((x) << S_RC_ARIDBURSTADDRFIFO_PERR)
+#define F_RC_ARIDBURSTADDRFIFO_PERR V_RC_ARIDBURSTADDRFIFO_PERR(1U)
+
+#define S_RC_CFG_FIFO_PERR 11
+#define V_RC_CFG_FIFO_PERR(x) ((x) << S_RC_CFG_FIFO_PERR)
+#define F_RC_CFG_FIFO_PERR V_RC_CFG_FIFO_PERR(1U)
+
+#define S_RC_RSPFIFO_PERR 10
+#define V_RC_RSPFIFO_PERR(x) ((x) << S_RC_RSPFIFO_PERR)
+#define F_RC_RSPFIFO_PERR V_RC_RSPFIFO_PERR(1U)
+
+#define S_INIC_ARIDFIFO_PERR 9
+#define V_INIC_ARIDFIFO_PERR(x) ((x) << S_INIC_ARIDFIFO_PERR)
+#define F_INIC_ARIDFIFO_PERR V_INIC_ARIDFIFO_PERR(1U)
+
+#define S_INIC_ARWFIFO_PERR 8
+#define V_INIC_ARWFIFO_PERR(x) ((x) << S_INIC_ARWFIFO_PERR)
+#define F_INIC_ARWFIFO_PERR V_INIC_ARWFIFO_PERR(1U)
+
+#define S_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR 7
+#define V_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR(x) ((x) << S_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR)
+#define F_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR V_AXI2MA_128_RD_ADDR_SIZE_FIFO_PERR(1U)
+
+#define S_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR 6
+#define V_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR(x) ((x) << S_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR)
+#define F_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR V_AXI2RC_128_RD_ADDR_SIZE_FIFO_PERR(1U)
+
+#define S_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR 5
+#define V_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR(x) ((x) << S_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR)
+#define F_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR V_ARM_MA_512B_RD_ADDR_SIZE_FIFO0_PERR(1U)
+
+#define S_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR 4
+#define V_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR(x) ((x) << S_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR)
+#define F_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR V_ARM_MA_512B_RD_ADDR_SIZE_FIFO1_PERR(1U)
+
+#define S_ARM_MA_512B_ARB_FIFO_PERR 3
+#define V_ARM_MA_512B_ARB_FIFO_PERR(x) ((x) << S_ARM_MA_512B_ARB_FIFO_PERR)
+#define F_ARM_MA_512B_ARB_FIFO_PERR V_ARM_MA_512B_ARB_FIFO_PERR(1U)
+
+#define S_PCIE_INIC_MA_ARB_FIFO_PERR 2
+#define V_PCIE_INIC_MA_ARB_FIFO_PERR(x) ((x) << S_PCIE_INIC_MA_ARB_FIFO_PERR)
+#define F_PCIE_INIC_MA_ARB_FIFO_PERR V_PCIE_INIC_MA_ARB_FIFO_PERR(1U)
+
+#define S_PCIE_INIC_ARB_RSPPERR 1
+#define V_PCIE_INIC_ARB_RSPPERR(x) ((x) << S_PCIE_INIC_ARB_RSPPERR)
+#define F_PCIE_INIC_ARB_RSPPERR V_PCIE_INIC_ARB_RSPPERR(1U)
+
+#define S_ITE_CACHE_PERR 0
+#define V_ITE_CACHE_PERR(x) ((x) << S_ITE_CACHE_PERR)
+#define F_ITE_CACHE_PERR V_ITE_CACHE_PERR(1U)
+
+#define A_ARM_PERR_INT_ENB1 0x47434
+#define A_ARM_PERR_ENABLE1 0x47438
+#define A_ARM_DEBUG_STATUS_1 0x4743c
+#define A_ARM_PCIE_MA_ADDR_REGION_DST 0x47440
+
+#define S_ADDRREGDST 0
+#define M_ADDRREGDST 0x1ffU
+#define V_ADDRREGDST(x) ((x) << S_ADDRREGDST)
+#define G_ADDRREGDST(x) (((x) >> S_ADDRREGDST) & M_ADDRREGDST)
+
+#define A_ARM_ERR_INT_CAUSE0 0x47444
+
+#define S_STRB0_ERROR 31
+#define V_STRB0_ERROR(x) ((x) << S_STRB0_ERROR)
+#define F_STRB0_ERROR V_STRB0_ERROR(1U)
+
+#define S_STRB1_ERROR 30
+#define V_STRB1_ERROR(x) ((x) << S_STRB1_ERROR)
+#define F_STRB1_ERROR V_STRB1_ERROR(1U)
+
+#define S_PCIE_INIC_MA_ARB_INV_RSP_TAG 29
+#define V_PCIE_INIC_MA_ARB_INV_RSP_TAG(x) ((x) << S_PCIE_INIC_MA_ARB_INV_RSP_TAG)
+#define F_PCIE_INIC_MA_ARB_INV_RSP_TAG V_PCIE_INIC_MA_ARB_INV_RSP_TAG(1U)
+
+#define S_ERROR0_NOCMD_DATA 28
+#define V_ERROR0_NOCMD_DATA(x) ((x) << S_ERROR0_NOCMD_DATA)
+#define F_ERROR0_NOCMD_DATA V_ERROR0_NOCMD_DATA(1U)
+
+#define S_ERROR1_NOCMD_DATA 27
+#define V_ERROR1_NOCMD_DATA(x) ((x) << S_ERROR1_NOCMD_DATA)
+#define F_ERROR1_NOCMD_DATA V_ERROR1_NOCMD_DATA(1U)
+
+#define S_INIC_STRB_ERROR 26
+#define V_INIC_STRB_ERROR(x) ((x) << S_INIC_STRB_ERROR)
+#define F_INIC_STRB_ERROR V_INIC_STRB_ERROR(1U)
+
+#define A_ARM_ERR_INT_ENB0 0x47448
+#define A_ARM_DEBUG_INDEX 0x47450
+#define A_ARM_DEBUG_DATA_HIGH 0x47454
+#define A_ARM_DEBUG_DATA_LOW 0x47458
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_BA0 0x47500
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_BA1 0x47504
+
+#define S_BASEADDRESS 0
+#define M_BASEADDRESS 0x3U
+#define V_BASEADDRESS(x) ((x) << S_BASEADDRESS)
+#define G_BASEADDRESS(x) (((x) >> S_BASEADDRESS) & M_BASEADDRESS)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG0 0x47508
+
+#define S_WATERMARK 16
+#define M_WATERMARK 0x3ffU
+#define V_WATERMARK(x) ((x) << S_WATERMARK)
+#define G_WATERMARK(x) (((x) >> S_WATERMARK) & M_WATERMARK)
+
+#define S_SIZEMAX 0
+#define M_SIZEMAX 0x3ffU
+#define V_SIZEMAX(x) ((x) << S_SIZEMAX)
+#define G_SIZEMAX(x) (((x) >> S_SIZEMAX) & M_SIZEMAX)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG1 0x4750c
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG2 0x47510
+
+#define S_CPUREADADDRESS 0
+#define M_CPUREADADDRESS 0x3ffU
+#define V_CPUREADADDRESS(x) ((x) << S_CPUREADADDRESS)
+#define G_CPUREADADDRESS(x) (((x) >> S_CPUREADADDRESS) & M_CPUREADADDRESS)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG3 0x47514
+
+#define S_CPUREADADDRESSVLD 0
+#define V_CPUREADADDRESSVLD(x) ((x) << S_CPUREADADDRESSVLD)
+#define F_CPUREADADDRESSVLD V_CPUREADADDRESSVLD(1U)
+
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG4 0x47518
+#define A_ARM_APB2MSI_INTERRUPT_0_STATUS 0x47600
+#define A_ARM_APB2MSI_INTERRUPT_1_STATUS 0x47604
+#define A_ARM_APB2MSI_INTERRUPT_2_STATUS 0x47608
+#define A_ARM_APB2MSI_INTERRUPT_3_STATUS 0x4760c
+#define A_ARM_APB2MSI_INTERRUPT_0_ENABLE 0x47610
+#define A_ARM_APB2MSI_INTERRUPT_1_ENABLE 0x47614
+#define A_ARM_APB2MSI_INTERRUPT_2_ENABLE 0x47618
+#define A_ARM_APB2MSI_INTERRUPT_3_ENABLE 0x4761c
+#define A_ARM_APB2MSI_INTERRUPT_PRIORITY_LEVEL 0x47620
+
+#define S_ARM_APB2MSI_INT_PRIORITY_LEVEL 0
+#define M_ARM_APB2MSI_INT_PRIORITY_LEVEL 0x7U
+#define V_ARM_APB2MSI_INT_PRIORITY_LEVEL(x) ((x) << S_ARM_APB2MSI_INT_PRIORITY_LEVEL)
+#define G_ARM_APB2MSI_INT_PRIORITY_LEVEL(x) (((x) >> S_ARM_APB2MSI_INT_PRIORITY_LEVEL) & M_ARM_APB2MSI_INT_PRIORITY_LEVEL)
+
+#define A_ARM_APB2MSI_MEM_READ_ADDR 0x47624
+
+#define S_ARM_APB2MSI_MEM_READ_ADDR 0
+#define M_ARM_APB2MSI_MEM_READ_ADDR 0x7fU
+#define V_ARM_APB2MSI_MEM_READ_ADDR(x) ((x) << S_ARM_APB2MSI_MEM_READ_ADDR)
+#define G_ARM_APB2MSI_MEM_READ_ADDR(x) (((x) >> S_ARM_APB2MSI_MEM_READ_ADDR) & M_ARM_APB2MSI_MEM_READ_ADDR)
+
+#define A_ARM_MSI_MEMORY_DATA 0x47628
+#define A_ARM_MSI_MEMORY_ADDR 0x4762c
+#define A_ARM_MSG_PCIE_MESSAGE2AXI_CFG5 0x47630
+
+#define S_CONFIGDONE 0
+#define V_CONFIGDONE(x) ((x) << S_CONFIGDONE)
+#define F_CONFIGDONE V_CONFIGDONE(1U)
+
+#define A_ARM_AXI2MA_TIMERCNT 0x47640
+#define A_ARM_AXI2MA_TRTYPE 0x47644
+
+#define S_ARMMA2AXI1ARTRTYPE 3
+#define V_ARMMA2AXI1ARTRTYPE(x) ((x) << S_ARMMA2AXI1ARTRTYPE)
+#define F_ARMMA2AXI1ARTRTYPE V_ARMMA2AXI1ARTRTYPE(1U)
+
+#define S_ARMMA2AXI1AWTRTYPE 2
+#define V_ARMMA2AXI1AWTRTYPE(x) ((x) << S_ARMMA2AXI1AWTRTYPE)
+#define F_ARMMA2AXI1AWTRTYPE V_ARMMA2AXI1AWTRTYPE(1U)
+
+#define S_ARMMA2AXI0ARTRTYPE 1
+#define V_ARMMA2AXI0ARTRTYPE(x) ((x) << S_ARMMA2AXI0ARTRTYPE)
+#define F_ARMMA2AXI0ARTRTYPE V_ARMMA2AXI0ARTRTYPE(1U)
+
+#define S_ARMMA2AXI0AWTRTYPE 0
+#define V_ARMMA2AXI0AWTRTYPE(x) ((x) << S_ARMMA2AXI0AWTRTYPE)
+#define F_ARMMA2AXI0AWTRTYPE V_ARMMA2AXI0AWTRTYPE(1U)
+
+#define A_ARM_AXI2PCIE_VENDOR 0x47660
+
+#define S_T7_VENDORID 4
+#define M_T7_VENDORID 0xffffU
+#define V_T7_VENDORID(x) ((x) << S_T7_VENDORID)
+#define G_T7_VENDORID(x) (((x) >> S_T7_VENDORID) & M_T7_VENDORID)
+
+#define S_OBFFCODE 0
+#define M_OBFFCODE 0xfU
+#define V_OBFFCODE(x) ((x) << S_OBFFCODE)
+#define G_OBFFCODE(x) (((x) >> S_OBFFCODE) & M_OBFFCODE)
+
+#define A_ARM_AXI2PCIE_VENMSGHDR_DW3 0x47664
+#define A_ARM_CLUSTER_SEL 0x47668
+
+#define S_ARM_CLUSTER_SEL 0
+#define V_ARM_CLUSTER_SEL(x) ((x) << S_ARM_CLUSTER_SEL)
+#define F_ARM_CLUSTER_SEL V_ARM_CLUSTER_SEL(1U)
+
+#define A_ARM_PWRREQ_PERMIT_ADB 0x4766c
+
+#define S_PWRQ_PERMIT_DENY_SAR 1
+#define V_PWRQ_PERMIT_DENY_SAR(x) ((x) << S_PWRQ_PERMIT_DENY_SAR)
+#define F_PWRQ_PERMIT_DENY_SAR V_PWRQ_PERMIT_DENY_SAR(1U)
+
+#define S_PWRQREQNS_ADB 0
+#define V_PWRQREQNS_ADB(x) ((x) << S_PWRQREQNS_ADB)
+#define F_PWRQREQNS_ADB V_PWRQREQNS_ADB(1U)
+
+#define A_ARM_CLK_REQ_ADB 0x47670
+
+#define S_CLKQREQNS_ADB 0
+#define V_CLKQREQNS_ADB(x) ((x) << S_CLKQREQNS_ADB)
+#define F_CLKQREQNS_ADB V_CLKQREQNS_ADB(1U)
+
+#define A_ARM_WAKEUPM 0x47674
+
+#define S_DFTRSTDISABLEM_ADB 2
+#define V_DFTRSTDISABLEM_ADB(x) ((x) << S_DFTRSTDISABLEM_ADB)
+#define F_DFTRSTDISABLEM_ADB V_DFTRSTDISABLEM_ADB(1U)
+
+#define S_DFTRSTDISABLES_ADB 1
+#define V_DFTRSTDISABLES_ADB(x) ((x) << S_DFTRSTDISABLES_ADB)
+#define F_DFTRSTDISABLES_ADB V_DFTRSTDISABLES_ADB(1U)
+
+#define S_WAKEUPM_I_ADB 0
+#define V_WAKEUPM_I_ADB(x) ((x) << S_WAKEUPM_I_ADB)
+#define F_WAKEUPM_I_ADB V_WAKEUPM_I_ADB(1U)
+
+#define A_ARM_CC_APB_FILTERING 0x47678
+
+#define S_CC_DFTSCANMODE 11
+#define V_CC_DFTSCANMODE(x) ((x) << S_CC_DFTSCANMODE)
+#define F_CC_DFTSCANMODE V_CC_DFTSCANMODE(1U)
+
+#define S_CC_OTP_FILTERING_DISABLE 10
+#define V_CC_OTP_FILTERING_DISABLE(x) ((x) << S_CC_OTP_FILTERING_DISABLE)
+#define F_CC_OTP_FILTERING_DISABLE V_CC_OTP_FILTERING_DISABLE(1U)
+
+#define S_CC_APB_FILTERING 0
+#define M_CC_APB_FILTERING 0x3ffU
+#define V_CC_APB_FILTERING(x) ((x) << S_CC_APB_FILTERING)
+#define G_CC_APB_FILTERING(x) (((x) >> S_CC_APB_FILTERING) & M_CC_APB_FILTERING)
+
+#define A_ARM_DCU_EN0 0x4767c
+#define A_ARM_DCU_EN1 0x47680
+#define A_ARM_DCU_EN2 0x47684
+#define A_ARM_DCU_EN3 0x47688
+#define A_ARM_DCU_LOCK0 0x4768c
+#define A_ARM_DCU_LOCK1 0x47690
+#define A_ARM_DCU_LOCK2 0x47694
+#define A_ARM_DCU_LOCK3 0x47698
+#define A_ARM_GPPC 0x4769c
+
+#define S_CC_SEC_DEBUG_RESET 24
+#define V_CC_SEC_DEBUG_RESET(x) ((x) << S_CC_SEC_DEBUG_RESET)
+#define F_CC_SEC_DEBUG_RESET V_CC_SEC_DEBUG_RESET(1U)
+
+#define S_CC_DFTSE 23
+#define V_CC_DFTSE(x) ((x) << S_CC_DFTSE)
+#define F_CC_DFTSE V_CC_DFTSE(1U)
+
+#define S_CC_DFTCGEN 22
+#define V_CC_DFTCGEN(x) ((x) << S_CC_DFTCGEN)
+#define F_CC_DFTCGEN V_CC_DFTCGEN(1U)
+
+#define S_CC_DFTRAMHOLD 21
+#define V_CC_DFTRAMHOLD(x) ((x) << S_CC_DFTRAMHOLD)
+#define F_CC_DFTRAMHOLD V_CC_DFTRAMHOLD(1U)
+
+#define S_CC_LOCK_BITS 12
+#define M_CC_LOCK_BITS 0x1ffU
+#define V_CC_LOCK_BITS(x) ((x) << S_CC_LOCK_BITS)
+#define G_CC_LOCK_BITS(x) (((x) >> S_CC_LOCK_BITS) & M_CC_LOCK_BITS)
+
+#define S_CC_LCS_IS_VALID 11
+#define V_CC_LCS_IS_VALID(x) ((x) << S_CC_LCS_IS_VALID)
+#define F_CC_LCS_IS_VALID V_CC_LCS_IS_VALID(1U)
+
+#define S_CC_LCS 8
+#define M_CC_LCS 0x7U
+#define V_CC_LCS(x) ((x) << S_CC_LCS)
+#define G_CC_LCS(x) (((x) >> S_CC_LCS) & M_CC_LCS)
+
+#define S_CC_GPPC 0
+#define M_CC_GPPC 0xffU
+#define V_CC_GPPC(x) ((x) << S_CC_GPPC)
+#define G_CC_GPPC(x) (((x) >> S_CC_GPPC) & M_CC_GPPC)
+
+#define A_ARM_EMMC 0x47700
+
+#define S_EMMC_CARD_CLK_EN 31
+#define V_EMMC_CARD_CLK_EN(x) ((x) << S_EMMC_CARD_CLK_EN)
+#define F_EMMC_CARD_CLK_EN V_EMMC_CARD_CLK_EN(1U)
+
+#define S_EMMC_LED_CONTROL 30
+#define V_EMMC_LED_CONTROL(x) ((x) << S_EMMC_LED_CONTROL)
+#define F_EMMC_LED_CONTROL V_EMMC_LED_CONTROL(1U)
+
+#define S_EMMC_UHS1_SWVOLT_EN 29
+#define V_EMMC_UHS1_SWVOLT_EN(x) ((x) << S_EMMC_UHS1_SWVOLT_EN)
+#define F_EMMC_UHS1_SWVOLT_EN V_EMMC_UHS1_SWVOLT_EN(1U)
+
+#define S_EMMC_UHS1_DRV_STH 27
+#define M_EMMC_UHS1_DRV_STH 0x3U
+#define V_EMMC_UHS1_DRV_STH(x) ((x) << S_EMMC_UHS1_DRV_STH)
+#define G_EMMC_UHS1_DRV_STH(x) (((x) >> S_EMMC_UHS1_DRV_STH) & M_EMMC_UHS1_DRV_STH)
+
+#define S_EMMC_SD_VDD1_ON 26
+#define V_EMMC_SD_VDD1_ON(x) ((x) << S_EMMC_SD_VDD1_ON)
+#define F_EMMC_SD_VDD1_ON V_EMMC_SD_VDD1_ON(1U)
+
+#define S_EMMC_SD_VDD1_SEL 23
+#define M_EMMC_SD_VDD1_SEL 0x7U
+#define V_EMMC_SD_VDD1_SEL(x) ((x) << S_EMMC_SD_VDD1_SEL)
+#define G_EMMC_SD_VDD1_SEL(x) (((x) >> S_EMMC_SD_VDD1_SEL) & M_EMMC_SD_VDD1_SEL)
+
+#define S_EMMC_INTCLK_EN 22
+#define V_EMMC_INTCLK_EN(x) ((x) << S_EMMC_INTCLK_EN)
+#define F_EMMC_INTCLK_EN V_EMMC_INTCLK_EN(1U)
+
+#define S_EMMC_CARD_CLK_FREQ_SEL 12
+#define M_EMMC_CARD_CLK_FREQ_SEL 0x3ffU
+#define V_EMMC_CARD_CLK_FREQ_SEL(x) ((x) << S_EMMC_CARD_CLK_FREQ_SEL)
+#define G_EMMC_CARD_CLK_FREQ_SEL(x) (((x) >> S_EMMC_CARD_CLK_FREQ_SEL) & M_EMMC_CARD_CLK_FREQ_SEL)
+
+#define S_EMMC_CARD_CLK_GEN_SEL 11
+#define V_EMMC_CARD_CLK_GEN_SEL(x) ((x) << S_EMMC_CARD_CLK_GEN_SEL)
+#define F_EMMC_CARD_CLK_GEN_SEL V_EMMC_CARD_CLK_GEN_SEL(1U)
+
+#define S_EMMC_CLK2CARD_ON 10
+#define V_EMMC_CLK2CARD_ON(x) ((x) << S_EMMC_CLK2CARD_ON)
+#define F_EMMC_CLK2CARD_ON V_EMMC_CLK2CARD_ON(1U)
+
+#define S_EMMC_CARD_CLK_STABLE 9
+#define V_EMMC_CARD_CLK_STABLE(x) ((x) << S_EMMC_CARD_CLK_STABLE)
+#define F_EMMC_CARD_CLK_STABLE V_EMMC_CARD_CLK_STABLE(1U)
+
+#define S_EMMC_INT_BCLK_STABLE 8
+#define V_EMMC_INT_BCLK_STABLE(x) ((x) << S_EMMC_INT_BCLK_STABLE)
+#define F_EMMC_INT_BCLK_STABLE V_EMMC_INT_BCLK_STABLE(1U)
+
+#define S_EMMC_INT_ACLK_STABLE 7
+#define V_EMMC_INT_ACLK_STABLE(x) ((x) << S_EMMC_INT_ACLK_STABLE)
+#define F_EMMC_INT_ACLK_STABLE V_EMMC_INT_ACLK_STABLE(1U)
+
+#define S_EMMC_INT_TMCLK_STABLE 6
+#define V_EMMC_INT_TMCLK_STABLE(x) ((x) << S_EMMC_INT_TMCLK_STABLE)
+#define F_EMMC_INT_TMCLK_STABLE V_EMMC_INT_TMCLK_STABLE(1U)
+
+#define S_EMMC_HOST_REG_VOL_STABLE 5
+#define V_EMMC_HOST_REG_VOL_STABLE(x) ((x) << S_EMMC_HOST_REG_VOL_STABLE)
+#define F_EMMC_HOST_REG_VOL_STABLE V_EMMC_HOST_REG_VOL_STABLE(1U)
+
+#define S_EMMC_CARD_DETECT_N 4
+#define V_EMMC_CARD_DETECT_N(x) ((x) << S_EMMC_CARD_DETECT_N)
+#define F_EMMC_CARD_DETECT_N V_EMMC_CARD_DETECT_N(1U)
+
+#define S_EMMC_CARD_WRITE_PROT 3
+#define V_EMMC_CARD_WRITE_PROT(x) ((x) << S_EMMC_CARD_WRITE_PROT)
+#define F_EMMC_CARD_WRITE_PROT V_EMMC_CARD_WRITE_PROT(1U)
+
+#define S_EMMC_GP_IN 2
+#define V_EMMC_GP_IN(x) ((x) << S_EMMC_GP_IN)
+#define F_EMMC_GP_IN V_EMMC_GP_IN(1U)
+
+#define S_EMMC_TEST_SCAN_MODE 1
+#define V_EMMC_TEST_SCAN_MODE(x) ((x) << S_EMMC_TEST_SCAN_MODE)
+#define F_EMMC_TEST_SCAN_MODE V_EMMC_TEST_SCAN_MODE(1U)
+
+#define S_EMMC_FIFOINJDATAERR 0
+#define V_EMMC_FIFOINJDATAERR(x) ((x) << S_EMMC_FIFOINJDATAERR)
+#define F_EMMC_FIFOINJDATAERR V_EMMC_FIFOINJDATAERR(1U)
+
+#define A_ARM_WAKEUPS 0x47704
+
+#define S_WAKEUPS_I_ADB 0
+#define V_WAKEUPS_I_ADB(x) ((x) << S_WAKEUPS_I_ADB)
+#define F_WAKEUPS_I_ADB V_WAKEUPS_I_ADB(1U)
+
+#define A_ARM_CLKREQNM_ADB 0x47708
+
+#define S_CLKQREQNM_ADB 0
+#define V_CLKQREQNM_ADB(x) ((x) << S_CLKQREQNM_ADB)
+#define F_CLKQREQNM_ADB V_CLKQREQNM_ADB(1U)
+
+#define A_ARM_ATOMICDATA0_0 0x4770c
+#define A_ARM_ATOMICDATA1_0 0x47710
+#define A_ARM_NVME_DB_EMU_INT_ENABLE 0x47740
+#define A_ARM_TCAM_WRITE_DATA 0x47744
+
+#define S_TCAM_WRITE_DATA 0
+#define M_TCAM_WRITE_DATA 0x3fffffffU
+#define V_TCAM_WRITE_DATA(x) ((x) << S_TCAM_WRITE_DATA)
+#define G_TCAM_WRITE_DATA(x) (((x) >> S_TCAM_WRITE_DATA) & M_TCAM_WRITE_DATA)
+
+#define A_ARM_TCAM_WRITE_ADDR 0x47748
+
+#define S_TCAM_WRITE_ADDR 0
+#define M_TCAM_WRITE_ADDR 0x1ffU
+#define V_TCAM_WRITE_ADDR(x) ((x) << S_TCAM_WRITE_ADDR)
+#define G_TCAM_WRITE_ADDR(x) (((x) >> S_TCAM_WRITE_ADDR) & M_TCAM_WRITE_ADDR)
+
+#define A_ARM_TCAM_READ_ADDR 0x4774c
+
+#define S_TCAM_READ_ADDR 0
+#define M_TCAM_READ_ADDR 0x1ffU
+#define V_TCAM_READ_ADDR(x) ((x) << S_TCAM_READ_ADDR)
+#define G_TCAM_READ_ADDR(x) (((x) >> S_TCAM_READ_ADDR) & M_TCAM_READ_ADDR)
+
+#define A_ARM_TCAM_CTL 0x47750
+
+#define S_TCAMCBBUSY 6
+#define V_TCAMCBBUSY(x) ((x) << S_TCAMCBBUSY)
+#define F_TCAMCBBUSY V_TCAMCBBUSY(1U)
+
+#define S_TCAMCBPASS 5
+#define V_TCAMCBPASS(x) ((x) << S_TCAMCBPASS)
+#define F_TCAMCBPASS V_TCAMCBPASS(1U)
+
+#define S_TCAMCBSTART 4
+#define V_TCAMCBSTART(x) ((x) << S_TCAMCBSTART)
+#define F_TCAMCBSTART V_TCAMCBSTART(1U)
+
+#define S_TCAMRSTCB 3
+#define V_TCAMRSTCB(x) ((x) << S_TCAMRSTCB)
+#define F_TCAMRSTCB V_TCAMRSTCB(1U)
+
+#define S_TCAM_REQBITPOS 2
+#define V_TCAM_REQBITPOS(x) ((x) << S_TCAM_REQBITPOS)
+#define F_TCAM_REQBITPOS V_TCAM_REQBITPOS(1U)
+
+#define S_TCAM_WRITE 1
+#define V_TCAM_WRITE(x) ((x) << S_TCAM_WRITE)
+#define F_TCAM_WRITE V_TCAM_WRITE(1U)
+
+#define S_TCAM_ENABLE 0
+#define V_TCAM_ENABLE(x) ((x) << S_TCAM_ENABLE)
+#define F_TCAM_ENABLE V_TCAM_ENABLE(1U)
+
+#define A_ARM_TCAM_READ_DATA 0x4775c
+
+#define S_TCAM_READ_DATA 0
+#define M_TCAM_READ_DATA 0x3fffffffU
+#define V_TCAM_READ_DATA(x) ((x) << S_TCAM_READ_DATA)
+#define G_TCAM_READ_DATA(x) (((x) >> S_TCAM_READ_DATA) & M_TCAM_READ_DATA)
+
+#define A_ARM_SRAM1_WRITE_DATA 0x47760
+
+#define S_SRAM1_WRITE_DATA 0
+#define M_SRAM1_WRITE_DATA 0x7fffffU
+#define V_SRAM1_WRITE_DATA(x) ((x) << S_SRAM1_WRITE_DATA)
+#define G_SRAM1_WRITE_DATA(x) (((x) >> S_SRAM1_WRITE_DATA) & M_SRAM1_WRITE_DATA)
+
+#define A_ARM_SRAM1_WRITE_ADDR 0x47764
+
+#define S_SRAM1_WRITE_ADDR 0
+#define M_SRAM1_WRITE_ADDR 0x1ffU
+#define V_SRAM1_WRITE_ADDR(x) ((x) << S_SRAM1_WRITE_ADDR)
+#define G_SRAM1_WRITE_ADDR(x) (((x) >> S_SRAM1_WRITE_ADDR) & M_SRAM1_WRITE_ADDR)
+
+#define A_ARM_SRAM1_READ_ADDR 0x47768
+
+#define S_SRAM1_READ_ADDR 0
+#define M_SRAM1_READ_ADDR 0x1ffU
+#define V_SRAM1_READ_ADDR(x) ((x) << S_SRAM1_READ_ADDR)
+#define G_SRAM1_READ_ADDR(x) (((x) >> S_SRAM1_READ_ADDR) & M_SRAM1_READ_ADDR)
+
+#define A_ARM_SRAM1_CTL 0x4776c
+
+#define S_SRAM1_WRITE 1
+#define V_SRAM1_WRITE(x) ((x) << S_SRAM1_WRITE)
+#define F_SRAM1_WRITE V_SRAM1_WRITE(1U)
+
+#define S_SRAM1_ENABLE 0
+#define V_SRAM1_ENABLE(x) ((x) << S_SRAM1_ENABLE)
+#define F_SRAM1_ENABLE V_SRAM1_ENABLE(1U)
+
+#define A_ARM_SRAM1_READ_DATA 0x47770
+
+#define S_SRAM1_READ_DATA 0
+#define M_SRAM1_READ_DATA 0x7fffffU
+#define V_SRAM1_READ_DATA(x) ((x) << S_SRAM1_READ_DATA)
+#define G_SRAM1_READ_DATA(x) (((x) >> S_SRAM1_READ_DATA) & M_SRAM1_READ_DATA)
+
+#define A_ARM_SRAM2_WRITE_DATA0 0x47774
+#define A_ARM_SRAM2_WRITE_DATA1 0x47778
+#define A_ARM_SRAM2_WRITE_DATA2 0x4777c
+#define A_ARM_SRAM2_WRITE_ADDR 0x47780
+
+#define S_SRAM2_WRITE_ADDR 0
+#define M_SRAM2_WRITE_ADDR 0x1fffU
+#define V_SRAM2_WRITE_ADDR(x) ((x) << S_SRAM2_WRITE_ADDR)
+#define G_SRAM2_WRITE_ADDR(x) (((x) >> S_SRAM2_WRITE_ADDR) & M_SRAM2_WRITE_ADDR)
+
+#define A_ARM_SRAM2_READ_ADDR 0x47784
+
+#define S_SRAM2_READ_ADDR 0
+#define M_SRAM2_READ_ADDR 0x1fffU
+#define V_SRAM2_READ_ADDR(x) ((x) << S_SRAM2_READ_ADDR)
+#define G_SRAM2_READ_ADDR(x) (((x) >> S_SRAM2_READ_ADDR) & M_SRAM2_READ_ADDR)
+
+#define A_ARM_SRAM2_CTL 0x47788
+
+#define S_SRAM2_WRITE 1
+#define V_SRAM2_WRITE(x) ((x) << S_SRAM2_WRITE)
+#define F_SRAM2_WRITE V_SRAM2_WRITE(1U)
+
+#define S_SRAM2_ENABLE 0
+#define V_SRAM2_ENABLE(x) ((x) << S_SRAM2_ENABLE)
+#define F_SRAM2_ENABLE V_SRAM2_ENABLE(1U)
+
+#define A_ARM_SRAM2_READ_DATA0 0x4778c
+#define A_ARM_SRAM2_READ_DATA1 0x47790
+#define A_ARM_SRAM2_READ_DATA2 0x47794
+#define A_ARM_DBPROC_SRAM_CTL 0x47798
+
+#define S_DBPROC_RD_EN 0
+#define V_DBPROC_RD_EN(x) ((x) << S_DBPROC_RD_EN)
+#define F_DBPROC_RD_EN V_DBPROC_RD_EN(1U)
+
+#define A_ARM_DBPROC_SRAM_READ_ADDR 0x4779c
+
+#define S_DBPROC_RD_ADDR 0
+#define M_DBPROC_RD_ADDR 0x1ffU
+#define V_DBPROC_RD_ADDR(x) ((x) << S_DBPROC_RD_ADDR)
+#define G_DBPROC_RD_ADDR(x) (((x) >> S_DBPROC_RD_ADDR) & M_DBPROC_RD_ADDR)
+
+#define A_ARM_DBPROC_SRAM_READ_DATA0 0x477a0
+#define A_ARM_DBPROC_SRAM_READ_DATA1 0x477a4
+#define A_ARM_DBPROC_SRAM_READ_DATA2 0x477a8
+#define A_ARM_DBPROC_SRAM_READ_DATA3 0x477ac
+#define A_ARM_ATOMICDATA0_1 0x477b0
+#define A_ARM_ATOMICDATA1_1 0x477b4
+#define A_ARM_SPIDEN 0x477b8
+
+#define S_SPIDEN 0
+#define V_SPIDEN(x) ((x) << S_SPIDEN)
+#define F_SPIDEN V_SPIDEN(1U)
+
+#define A_ARM_RC_INT_WRITE_DATA 0x477bc
+
+#define S_RC_INT_STATUS_WRITE_DATA 0
+#define M_RC_INT_STATUS_WRITE_DATA 0x3fU
+#define V_RC_INT_STATUS_WRITE_DATA(x) ((x) << S_RC_INT_STATUS_WRITE_DATA)
+#define G_RC_INT_STATUS_WRITE_DATA(x) (((x) >> S_RC_INT_STATUS_WRITE_DATA) & M_RC_INT_STATUS_WRITE_DATA)
+
+#define A_ARM_DFT_MBI 0x477c4
+
+#define S_MBISTREQ 3
+#define V_MBISTREQ(x) ((x) << S_MBISTREQ)
+#define F_MBISTREQ V_MBISTREQ(1U)
+
+#define S_MBISTRESETN 2
+#define V_MBISTRESETN(x) ((x) << S_MBISTRESETN)
+#define F_MBISTRESETN V_MBISTRESETN(1U)
+
+#define S_DFTRAMHOLD 1
+#define V_DFTRAMHOLD(x) ((x) << S_DFTRAMHOLD)
+#define F_DFTRAMHOLD V_DFTRAMHOLD(1U)
+
+#define S_DFTCGEN 0
+#define V_DFTCGEN(x) ((x) << S_DFTCGEN)
+#define F_DFTCGEN V_DFTCGEN(1U)
+
+#define A_ARM_DBPROC_SRAM_TH_CTL 0x477c8
+
+#define S_DBPROC_TH_WR_EN 1
+#define V_DBPROC_TH_WR_EN(x) ((x) << S_DBPROC_TH_WR_EN)
+#define F_DBPROC_TH_WR_EN V_DBPROC_TH_WR_EN(1U)
+
+#define S_DBPROC_TH_RD_EN 0
+#define V_DBPROC_TH_RD_EN(x) ((x) << S_DBPROC_TH_RD_EN)
+#define F_DBPROC_TH_RD_EN V_DBPROC_TH_RD_EN(1U)
+
+#define A_ARM_MBISTACK 0x477d4
+
+#define S_MBISTACK 0
+#define V_MBISTACK(x) ((x) << S_MBISTACK)
+#define F_MBISTACK V_MBISTACK(1U)
+
+#define A_ARM_MBISTADDR 0x477d8
+
+#define S_MBISTADDR 0
+#define M_MBISTADDR 0xfffU
+#define V_MBISTADDR(x) ((x) << S_MBISTADDR)
+#define G_MBISTADDR(x) (((x) >> S_MBISTADDR) & M_MBISTADDR)
+
+#define A_ARM_MBISTREADEN 0x477dc
+
+#define S_MBISTREADEN 0
+#define V_MBISTREADEN(x) ((x) << S_MBISTREADEN)
+#define F_MBISTREADEN V_MBISTREADEN(1U)
+
+#define A_ARM_MBISTWRITEEN 0x477e0
+
+#define S_MBISTWRITEEN 0
+#define V_MBISTWRITEEN(x) ((x) << S_MBISTWRITEEN)
+#define F_MBISTWRITEEN V_MBISTWRITEEN(1U)
+
+#define A_ARM_MBISTARRAY 0x477e4
+
+#define S_MBISTARRAY 0
+#define M_MBISTARRAY 0x3U
+#define V_MBISTARRAY(x) ((x) << S_MBISTARRAY)
+#define G_MBISTARRAY(x) (((x) >> S_MBISTARRAY) & M_MBISTARRAY)
+
+#define A_ARM_MBISTCFG 0x477e8
+
+#define S_MBISTCFG 0
+#define V_MBISTCFG(x) ((x) << S_MBISTCFG)
+#define F_MBISTCFG V_MBISTCFG(1U)
+
+#define A_ARM_MBISTINDATA0 0x477ec
+#define A_ARM_MBISTINDATA1 0x477f0
+#define A_ARM_MBISTOUTDATA1 0x477f4
+#define A_ARM_MBISTOUTDATA0 0x477f8
+#define A_ARM_NVME_DB_EMU_EN 0x477fc
+
+#define S_NVME_DB_EN 0
+#define V_NVME_DB_EN(x) ((x) << S_NVME_DB_EN)
+#define F_NVME_DB_EN V_NVME_DB_EN(1U)
+
+/* registers for module MC_T70 */
+#define MC_T70_BASE_ADDR 0x48000
+
+#define A_MC_IND_ADDR 0x48000
+
+#define S_T7_AUTOINCR 30
+#define M_T7_AUTOINCR 0x3U
+#define V_T7_AUTOINCR(x) ((x) << S_T7_AUTOINCR)
+#define G_T7_AUTOINCR(x) (((x) >> S_T7_AUTOINCR) & M_T7_AUTOINCR)
+
+#define S_IND_ADDR_ADDR 0
+#define M_IND_ADDR_ADDR 0x1ffffffU
+#define V_IND_ADDR_ADDR(x) ((x) << S_IND_ADDR_ADDR)
+#define G_IND_ADDR_ADDR(x) (((x) >> S_IND_ADDR_ADDR) & M_IND_ADDR_ADDR)
+
+#define A_MC_IND_DATA 0x48004
+#define A_MC_DBG_CTL 0x48018
+#define A_MC_DBG_DATA 0x4801c
+#define A_T7_MC_P_DDRPHY_RST_CTRL 0x49300
+#define A_T7_MC_P_PERFORMANCE_CTRL 0x49304
+#define A_T7_MC_P_ECC_CTRL 0x49308
+
+#define S_BISTECCHBWCTL 7
+#define M_BISTECCHBWCTL 0x3U
+#define V_BISTECCHBWCTL(x) ((x) << S_BISTECCHBWCTL)
+#define G_BISTECCHBWCTL(x) (((x) >> S_BISTECCHBWCTL) & M_BISTECCHBWCTL)
+
+#define S_BISTTESTMODE 6
+#define V_BISTTESTMODE(x) ((x) << S_BISTTESTMODE)
+#define F_BISTTESTMODE V_BISTTESTMODE(1U)
+
+#define S_RMW_CTL_CFG 4
+#define M_RMW_CTL_CFG 0x3U
+#define V_RMW_CTL_CFG(x) ((x) << S_RMW_CTL_CFG)
+#define G_RMW_CTL_CFG(x) (((x) >> S_RMW_CTL_CFG) & M_RMW_CTL_CFG)
+
+#define A_MC_P_DDRCTL_INT_ENABLE 0x4930c
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE 5
+#define V_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE V_HIF_WDATA_PTR_ADDR_ERR_DCH1_ENABLE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_DCH1_ENABLE 4
+#define V_HIF_RDATA_CRC_ERR_DCH1_ENABLE(x) ((x) << S_HIF_RDATA_CRC_ERR_DCH1_ENABLE)
+#define F_HIF_RDATA_CRC_ERR_DCH1_ENABLE V_HIF_RDATA_CRC_ERR_DCH1_ENABLE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_DCH1_ENABLE 3
+#define V_HIF_RDATA_ADDR_ERR_DCH1_ENABLE(x) ((x) << S_HIF_RDATA_ADDR_ERR_DCH1_ENABLE)
+#define F_HIF_RDATA_ADDR_ERR_DCH1_ENABLE V_HIF_RDATA_ADDR_ERR_DCH1_ENABLE(1U)
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE 2
+#define V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_ENABLE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE 1
+#define V_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE(x) ((x) << S_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE)
+#define F_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE V_HIF_RDATA_CRC_ERR_INTR_DCH0_ENABLE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE 0
+#define V_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE(x) ((x) << S_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE)
+#define F_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE V_HIF_RDATA_ADDR_ERR_INTR_DCH0_ENABLE(1U)
+
+#define A_MC_P_DDRCTL_INT_CAUSE 0x49310
+
+#define S_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE 25
+#define V_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(x) ((x) << S_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE)
+#define F_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE V_WR_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(1U)
+
+#define S_WR_CRC_ERR_INTR_DCH1_CAUSE 24
+#define V_WR_CRC_ERR_INTR_DCH1_CAUSE(x) ((x) << S_WR_CRC_ERR_INTR_DCH1_CAUSE)
+#define F_WR_CRC_ERR_INTR_DCH1_CAUSE V_WR_CRC_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE 23
+#define V_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE(x) ((x) << S_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE)
+#define F_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE V_CAPAR_ERR_MAX_REACHED_INTR_DCH1_CAUSE(1U)
+
+#define S_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE 22
+#define V_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(x) ((x) << S_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE)
+#define F_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE V_RD_CRC_ERR_MAX_REACHED_INTR_DCH1_CAUSE(1U)
+
+#define S_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE 21
+#define V_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE(x) ((x) << S_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE)
+#define F_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE V_DERATE_TEMP_LIMIT_INTR_DCH1_CAUSE(1U)
+
+#define S_SWCMD_ERR_INTR_DCH1_CAUSE 20
+#define V_SWCMD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_SWCMD_ERR_INTR_DCH1_CAUSE)
+#define F_SWCMD_ERR_INTR_DCH1_CAUSE V_SWCMD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_DUCMD_ERR_INTR_DCH1_CAUSE 19
+#define V_DUCMD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_DUCMD_ERR_INTR_DCH1_CAUSE)
+#define F_DUCMD_ERR_INTR_DCH1_CAUSE V_DUCMD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_LCCMD_ERR_INTR_DCH1_CAUSE 18
+#define V_LCCMD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_LCCMD_ERR_INTR_DCH1_CAUSE)
+#define F_LCCMD_ERR_INTR_DCH1_CAUSE V_LCCMD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_CTRLUPD_ERR_INTR_DCH1_CAUSE 17
+#define V_CTRLUPD_ERR_INTR_DCH1_CAUSE(x) ((x) << S_CTRLUPD_ERR_INTR_DCH1_CAUSE)
+#define F_CTRLUPD_ERR_INTR_DCH1_CAUSE V_CTRLUPD_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_RFM_ALERT_INTR_DCH1_CAUSE 16
+#define V_RFM_ALERT_INTR_DCH1_CAUSE(x) ((x) << S_RFM_ALERT_INTR_DCH1_CAUSE)
+#define F_RFM_ALERT_INTR_DCH1_CAUSE V_RFM_ALERT_INTR_DCH1_CAUSE(1U)
+
+#define S_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE 15
+#define V_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(x) ((x) << S_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE)
+#define F_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE V_WR_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(1U)
+
+#define S_WR_CRC_ERR_INTR_DCH0_CAUSE 14
+#define V_WR_CRC_ERR_INTR_DCH0_CAUSE(x) ((x) << S_WR_CRC_ERR_INTR_DCH0_CAUSE)
+#define F_WR_CRC_ERR_INTR_DCH0_CAUSE V_WR_CRC_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE 13
+#define V_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE(x) ((x) << S_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE)
+#define F_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE V_CAPAR_ERR_MAX_REACHED_INTR_DCH0_CAUSE(1U)
+
+#define S_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE 12
+#define V_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(x) ((x) << S_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE)
+#define F_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE V_RD_CRC_ERR_MAX_REACHED_INTR_DCH0_CAUSE(1U)
+
+#define S_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE 11
+#define V_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE(x) ((x) << S_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE)
+#define F_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE V_DERATE_TEMP_LIMIT_INTR_DCH0_CAUSE(1U)
+
+#define S_SWCMD_ERR_INTR_DCH0_CAUSE 10
+#define V_SWCMD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_SWCMD_ERR_INTR_DCH0_CAUSE)
+#define F_SWCMD_ERR_INTR_DCH0_CAUSE V_SWCMD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_DUCMD_ERR_INTR_DCH0_CAUSE 9
+#define V_DUCMD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_DUCMD_ERR_INTR_DCH0_CAUSE)
+#define F_DUCMD_ERR_INTR_DCH0_CAUSE V_DUCMD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_LCCMD_ERR_INTR_DCH0_CAUSE 8
+#define V_LCCMD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_LCCMD_ERR_INTR_DCH0_CAUSE)
+#define F_LCCMD_ERR_INTR_DCH0_CAUSE V_LCCMD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_CTRLUPD_ERR_INTR_DCH0_CAUSE 7
+#define V_CTRLUPD_ERR_INTR_DCH0_CAUSE(x) ((x) << S_CTRLUPD_ERR_INTR_DCH0_CAUSE)
+#define F_CTRLUPD_ERR_INTR_DCH0_CAUSE V_CTRLUPD_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_RFM_ALERT_INTR_DCH0_CAUSE 6
+#define V_RFM_ALERT_INTR_DCH0_CAUSE(x) ((x) << S_RFM_ALERT_INTR_DCH0_CAUSE)
+#define F_RFM_ALERT_INTR_DCH0_CAUSE V_RFM_ALERT_INTR_DCH0_CAUSE(1U)
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE 5
+#define V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE 4
+#define V_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE(x) ((x) << S_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE)
+#define F_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE V_HIF_RDATA_CRC_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE 3
+#define V_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE(x) ((x) << S_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE)
+#define F_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE V_HIF_RDATA_ADDR_ERR_INTR_DCH1_CAUSE(1U)
+
+#define S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE 2
+#define V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE(x) ((x) << S_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE)
+#define F_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE V_HIF_WDATA_PTR_ADDR_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE 1
+#define V_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE(x) ((x) << S_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE)
+#define F_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE V_HIF_RDATA_CRC_ERR_INTR_DCH0_CAUSE(1U)
+
+#define S_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE 0
+#define V_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE(x) ((x) << S_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE)
+#define F_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE V_HIF_RDATA_ADDR_ERR_INTR_DCH0_CAUSE(1U)
+
+#define A_T7_MC_P_PAR_ENABLE 0x49314
+
+#define S_HIF_WDATA_Q_PARERR_DCH1_ENABLE 13
+#define V_HIF_WDATA_Q_PARERR_DCH1_ENABLE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH1_ENABLE)
+#define F_HIF_WDATA_Q_PARERR_DCH1_ENABLE V_HIF_WDATA_Q_PARERR_DCH1_ENABLE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH1_ENABLE 12
+#define V_DDRCTL_ECC_CE_PAR_DCH1_ENABLE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH1_ENABLE)
+#define F_DDRCTL_ECC_CE_PAR_DCH1_ENABLE V_DDRCTL_ECC_CE_PAR_DCH1_ENABLE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH0_ENABLE 11
+#define V_DDRCTL_ECC_CE_PAR_DCH0_ENABLE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH0_ENABLE)
+#define F_DDRCTL_ECC_CE_PAR_DCH0_ENABLE V_DDRCTL_ECC_CE_PAR_DCH0_ENABLE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH1_ENABLE 10
+#define V_DDRCTL_ECC_UE_PAR_DCH1_ENABLE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH1_ENABLE)
+#define F_DDRCTL_ECC_UE_PAR_DCH1_ENABLE V_DDRCTL_ECC_UE_PAR_DCH1_ENABLE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH0_ENABLE 9
+#define V_DDRCTL_ECC_UE_PAR_DCH0_ENABLE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH0_ENABLE)
+#define F_DDRCTL_ECC_UE_PAR_DCH0_ENABLE V_DDRCTL_ECC_UE_PAR_DCH0_ENABLE(1U)
+
+#define S_WDATARAM_PARERR_DCH1_ENABLE 8
+#define V_WDATARAM_PARERR_DCH1_ENABLE(x) ((x) << S_WDATARAM_PARERR_DCH1_ENABLE)
+#define F_WDATARAM_PARERR_DCH1_ENABLE V_WDATARAM_PARERR_DCH1_ENABLE(1U)
+
+#define S_WDATARAM_PARERR_DCH0_ENABLE 7
+#define V_WDATARAM_PARERR_DCH0_ENABLE(x) ((x) << S_WDATARAM_PARERR_DCH0_ENABLE)
+#define F_WDATARAM_PARERR_DCH0_ENABLE V_WDATARAM_PARERR_DCH0_ENABLE(1U)
+
+#define S_BIST_ADDR_FIFO_PARERR_ENABLE 6
+#define V_BIST_ADDR_FIFO_PARERR_ENABLE(x) ((x) << S_BIST_ADDR_FIFO_PARERR_ENABLE)
+#define F_BIST_ADDR_FIFO_PARERR_ENABLE V_BIST_ADDR_FIFO_PARERR_ENABLE(1U)
+
+#define S_BIST_ERR_ADDR_FIFO_PARERR_ENABLE 5
+#define V_BIST_ERR_ADDR_FIFO_PARERR_ENABLE(x) ((x) << S_BIST_ERR_ADDR_FIFO_PARERR_ENABLE)
+#define F_BIST_ERR_ADDR_FIFO_PARERR_ENABLE V_BIST_ERR_ADDR_FIFO_PARERR_ENABLE(1U)
+
+#define S_HIF_WDATA_Q_PARERR_DCH0_ENABLE 4
+#define V_HIF_WDATA_Q_PARERR_DCH0_ENABLE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH0_ENABLE)
+#define F_HIF_WDATA_Q_PARERR_DCH0_ENABLE V_HIF_WDATA_Q_PARERR_DCH0_ENABLE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE 3
+#define V_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE V_HIF_RSPDATA_Q_PARERR_DCH1_ENABLE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE 2
+#define V_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE V_HIF_RSPDATA_Q_PARERR_DCH0_ENABLE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE 1
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_ENABLE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE 0
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_ENABLE(1U)
+
+#define A_T7_MC_P_PAR_CAUSE 0x49318
+
+#define S_HIF_WDATA_Q_PARERR_DCH1_CAUSE 13
+#define V_HIF_WDATA_Q_PARERR_DCH1_CAUSE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH1_CAUSE)
+#define F_HIF_WDATA_Q_PARERR_DCH1_CAUSE V_HIF_WDATA_Q_PARERR_DCH1_CAUSE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH1_CAUSE 12
+#define V_DDRCTL_ECC_CE_PAR_DCH1_CAUSE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH1_CAUSE)
+#define F_DDRCTL_ECC_CE_PAR_DCH1_CAUSE V_DDRCTL_ECC_CE_PAR_DCH1_CAUSE(1U)
+
+#define S_DDRCTL_ECC_CE_PAR_DCH0_CAUSE 11
+#define V_DDRCTL_ECC_CE_PAR_DCH0_CAUSE(x) ((x) << S_DDRCTL_ECC_CE_PAR_DCH0_CAUSE)
+#define F_DDRCTL_ECC_CE_PAR_DCH0_CAUSE V_DDRCTL_ECC_CE_PAR_DCH0_CAUSE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH1_CAUSE 10
+#define V_DDRCTL_ECC_UE_PAR_DCH1_CAUSE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH1_CAUSE)
+#define F_DDRCTL_ECC_UE_PAR_DCH1_CAUSE V_DDRCTL_ECC_UE_PAR_DCH1_CAUSE(1U)
+
+#define S_DDRCTL_ECC_UE_PAR_DCH0_CAUSE 9
+#define V_DDRCTL_ECC_UE_PAR_DCH0_CAUSE(x) ((x) << S_DDRCTL_ECC_UE_PAR_DCH0_CAUSE)
+#define F_DDRCTL_ECC_UE_PAR_DCH0_CAUSE V_DDRCTL_ECC_UE_PAR_DCH0_CAUSE(1U)
+
+#define S_WDATARAM_PARERR_DCH1_CAUSE 8
+#define V_WDATARAM_PARERR_DCH1_CAUSE(x) ((x) << S_WDATARAM_PARERR_DCH1_CAUSE)
+#define F_WDATARAM_PARERR_DCH1_CAUSE V_WDATARAM_PARERR_DCH1_CAUSE(1U)
+
+#define S_WDATARAM_PARERR_DCH0_CAUSE 7
+#define V_WDATARAM_PARERR_DCH0_CAUSE(x) ((x) << S_WDATARAM_PARERR_DCH0_CAUSE)
+#define F_WDATARAM_PARERR_DCH0_CAUSE V_WDATARAM_PARERR_DCH0_CAUSE(1U)
+
+#define S_BIST_ADDR_FIFO_PARERR_CAUSE 6
+#define V_BIST_ADDR_FIFO_PARERR_CAUSE(x) ((x) << S_BIST_ADDR_FIFO_PARERR_CAUSE)
+#define F_BIST_ADDR_FIFO_PARERR_CAUSE V_BIST_ADDR_FIFO_PARERR_CAUSE(1U)
+
+#define S_BIST_ERR_ADDR_FIFO_PARERR_CAUSE 5
+#define V_BIST_ERR_ADDR_FIFO_PARERR_CAUSE(x) ((x) << S_BIST_ERR_ADDR_FIFO_PARERR_CAUSE)
+#define F_BIST_ERR_ADDR_FIFO_PARERR_CAUSE V_BIST_ERR_ADDR_FIFO_PARERR_CAUSE(1U)
+
+#define S_HIF_WDATA_Q_PARERR_DCH0_CAUSE 4
+#define V_HIF_WDATA_Q_PARERR_DCH0_CAUSE(x) ((x) << S_HIF_WDATA_Q_PARERR_DCH0_CAUSE)
+#define F_HIF_WDATA_Q_PARERR_DCH0_CAUSE V_HIF_WDATA_Q_PARERR_DCH0_CAUSE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE 3
+#define V_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE V_HIF_RSPDATA_Q_PARERR_DCH1_CAUSE(1U)
+
+#define S_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE 2
+#define V_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE(x) ((x) << S_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE)
+#define F_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE V_HIF_RSPDATA_Q_PARERR_DCH0_CAUSE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE 1
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE V_HIF_WDATA_MASK_FIFO_PARERR_DCH1_CAUSE(1U)
+
+#define S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE 0
+#define V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE(x) ((x) << S_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE)
+#define F_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE V_HIF_WDATA_MASK_FIFO_PARERR_DCH0_CAUSE(1U)
+
+#define A_T7_MC_P_INT_ENABLE 0x4931c
+
+#define S_DDRPHY_INT_ENABLE 4
+#define V_DDRPHY_INT_ENABLE(x) ((x) << S_DDRPHY_INT_ENABLE)
+#define F_DDRPHY_INT_ENABLE V_DDRPHY_INT_ENABLE(1U)
+
+#define S_DDRCTL_INT_ENABLE 3
+#define V_DDRCTL_INT_ENABLE(x) ((x) << S_DDRCTL_INT_ENABLE)
+#define F_DDRCTL_INT_ENABLE V_DDRCTL_INT_ENABLE(1U)
+
+#define S_T7_ECC_CE_INT_ENABLE 2
+#define V_T7_ECC_CE_INT_ENABLE(x) ((x) << S_T7_ECC_CE_INT_ENABLE)
+#define F_T7_ECC_CE_INT_ENABLE V_T7_ECC_CE_INT_ENABLE(1U)
+
+#define S_T7_ECC_UE_INT_ENABLE 1
+#define V_T7_ECC_UE_INT_ENABLE(x) ((x) << S_T7_ECC_UE_INT_ENABLE)
+#define F_T7_ECC_UE_INT_ENABLE V_T7_ECC_UE_INT_ENABLE(1U)
+
+#define A_T7_MC_P_INT_CAUSE 0x49320
+
+#define S_DDRPHY_INT_CAUSE 4
+#define V_DDRPHY_INT_CAUSE(x) ((x) << S_DDRPHY_INT_CAUSE)
+#define F_DDRPHY_INT_CAUSE V_DDRPHY_INT_CAUSE(1U)
+
+#define S_DDRCTL_INT_CAUSE 3
+#define V_DDRCTL_INT_CAUSE(x) ((x) << S_DDRCTL_INT_CAUSE)
+#define F_DDRCTL_INT_CAUSE V_DDRCTL_INT_CAUSE(1U)
+
+#define S_T7_ECC_CE_INT_CAUSE 2
+#define V_T7_ECC_CE_INT_CAUSE(x) ((x) << S_T7_ECC_CE_INT_CAUSE)
+#define F_T7_ECC_CE_INT_CAUSE V_T7_ECC_CE_INT_CAUSE(1U)
+
+#define S_T7_ECC_UE_INT_CAUSE 1
+#define V_T7_ECC_UE_INT_CAUSE(x) ((x) << S_T7_ECC_UE_INT_CAUSE)
+#define F_T7_ECC_UE_INT_CAUSE V_T7_ECC_UE_INT_CAUSE(1U)
+
+#define A_MC_P_ECC_UE_INT_ENABLE 0x49324
+
+#define S_BIST_RSP_SRAM_UERR_ENABLE 0
+#define V_BIST_RSP_SRAM_UERR_ENABLE(x) ((x) << S_BIST_RSP_SRAM_UERR_ENABLE)
+#define F_BIST_RSP_SRAM_UERR_ENABLE V_BIST_RSP_SRAM_UERR_ENABLE(1U)
+
+#define A_MC_P_ECC_UE_INT_CAUSE 0x49328
+
+#define S_BIST_RSP_SRAM_UERR_CAUSE 0
+#define V_BIST_RSP_SRAM_UERR_CAUSE(x) ((x) << S_BIST_RSP_SRAM_UERR_CAUSE)
+#define F_BIST_RSP_SRAM_UERR_CAUSE V_BIST_RSP_SRAM_UERR_CAUSE(1U)
+
+#define A_T7_MC_P_ECC_STATUS 0x4932c
+#define A_T7_MC_P_PHY_CTRL 0x49330
+#define A_T7_MC_P_STATIC_CFG_STATUS 0x49334
+
+#define S_DFIFREQRATIO 27
+#define V_DFIFREQRATIO(x) ((x) << S_DFIFREQRATIO)
+#define F_DFIFREQRATIO V_DFIFREQRATIO(1U)
+
+#define S_STATIC_DDR5_HBW_CHANNEL 3
+#define V_STATIC_DDR5_HBW_CHANNEL(x) ((x) << S_STATIC_DDR5_HBW_CHANNEL)
+#define F_STATIC_DDR5_HBW_CHANNEL V_STATIC_DDR5_HBW_CHANNEL(1U)
+
+#define S_STATIC_DDR5_HBW 2
+#define V_STATIC_DDR5_HBW(x) ((x) << S_STATIC_DDR5_HBW)
+#define F_STATIC_DDR5_HBW V_STATIC_DDR5_HBW(1U)
+
+#define S_T7_STATIC_WIDTH 1
+#define V_T7_STATIC_WIDTH(x) ((x) << S_T7_STATIC_WIDTH)
+#define F_T7_STATIC_WIDTH V_T7_STATIC_WIDTH(1U)
+
+#define A_T7_MC_P_CORE_PCTL_STAT 0x49338
+#define A_T7_MC_P_DEBUG_CNT 0x4933c
+#define A_T7_MC_CE_ERR_DATA_RDATA 0x49340
+#define A_T7_MC_UE_ERR_DATA_RDATA 0x49380
+#define A_T7_MC_CE_ADDR 0x493c0
+#define A_T7_MC_UE_ADDR 0x493c4
+#define A_T7_MC_P_DEEP_SLEEP 0x493c8
+#define A_T7_MC_P_FPGA_BONUS 0x493cc
+#define A_T7_MC_P_DEBUG_CFG 0x493d0
+#define A_T7_MC_P_DEBUG_RPT 0x493d4
+#define A_T7_MC_P_PHY_ADR_CK_EN 0x493d8
+#define A_MC_P_WDATARAM_INIT 0x493dc
+
+#define S_ENABLE_DCH1 1
+#define V_ENABLE_DCH1(x) ((x) << S_ENABLE_DCH1)
+#define F_ENABLE_DCH1 V_ENABLE_DCH1(1U)
+
+#define S_ENABLE_DCH0 0
+#define V_ENABLE_DCH0(x) ((x) << S_ENABLE_DCH0)
+#define F_ENABLE_DCH0 V_ENABLE_DCH0(1U)
+
+#define A_T7_MC_CE_ERR_ECC_DATA0 0x493e0
+#define A_T7_MC_CE_ERR_ECC_DATA1 0x493e4
+#define A_T7_MC_UE_ERR_ECC_DATA0 0x493e8
+#define A_T7_MC_UE_ERR_ECC_DATA1 0x493ec
+#define A_T7_MC_P_RMW_PRIO 0x493f0
+#define A_T7_MC_P_BIST_CMD 0x49400
+
+#define S_FIFO_ERROR_FLAG 30
+#define V_FIFO_ERROR_FLAG(x) ((x) << S_FIFO_ERROR_FLAG)
+#define F_FIFO_ERROR_FLAG V_FIFO_ERROR_FLAG(1U)
+
+#define A_T7_MC_P_BIST_CMD_ADDR 0x49404
+
+#define S_T7_VALUE 0
+#define M_T7_VALUE 0x1fffffffU
+#define V_T7_VALUE(x) ((x) << S_T7_VALUE)
+#define G_T7_VALUE(x) (((x) >> S_T7_VALUE) & M_T7_VALUE)
+
+#define A_MC_P_BIST_NUM_BURST 0x49408
+#define A_T7_MC_P_BIST_DATA_PATTERN 0x4940c
+
+#define S_DATA_TYPE 0
+#define M_DATA_TYPE 0xfU
+#define V_DATA_TYPE(x) ((x) << S_DATA_TYPE)
+#define G_DATA_TYPE(x) (((x) >> S_DATA_TYPE) & M_DATA_TYPE)
+
+#define A_T7_MC_P_BIST_CRC_SEED 0x49410
+#define A_T7_MC_P_BIST_NUM_ERR 0x49460
+#define A_MC_P_BIST_ERR_ADDR 0x49464
+
+#define S_ERROR_ADDR 0
+#define M_ERROR_ADDR 0x3fffffffU
+#define V_ERROR_ADDR(x) ((x) << S_ERROR_ADDR)
+#define G_ERROR_ADDR(x) (((x) >> S_ERROR_ADDR) & M_ERROR_ADDR)
+
+#define A_MC_P_BIST_USER_RWEDATA 0x49468
+#define A_MC_REGB_DDRC_CH0_SCHED0 0x10380
+
+#define S_OPT_VPRW_SCH 31
+#define V_OPT_VPRW_SCH(x) ((x) << S_OPT_VPRW_SCH)
+#define F_OPT_VPRW_SCH V_OPT_VPRW_SCH(1U)
+
+#define S_DIS_SPECULATIVE_ACT 30
+#define V_DIS_SPECULATIVE_ACT(x) ((x) << S_DIS_SPECULATIVE_ACT)
+#define F_DIS_SPECULATIVE_ACT V_DIS_SPECULATIVE_ACT(1U)
+
+#define S_OPT_ACT_LAT 27
+#define V_OPT_ACT_LAT(x) ((x) << S_OPT_ACT_LAT)
+#define F_OPT_ACT_LAT V_OPT_ACT_LAT(1U)
+
+#define S_LPR_NUM_ENTRIES 8
+#define M_LPR_NUM_ENTRIES 0x3fU
+#define V_LPR_NUM_ENTRIES(x) ((x) << S_LPR_NUM_ENTRIES)
+#define G_LPR_NUM_ENTRIES(x) (((x) >> S_LPR_NUM_ENTRIES) & M_LPR_NUM_ENTRIES)
+
+#define S_AUTOPRE_RMW 7
+#define V_AUTOPRE_RMW(x) ((x) << S_AUTOPRE_RMW)
+#define F_AUTOPRE_RMW V_AUTOPRE_RMW(1U)
+
+#define S_DIS_OPT_NTT_BY_PRE 6
+#define V_DIS_OPT_NTT_BY_PRE(x) ((x) << S_DIS_OPT_NTT_BY_PRE)
+#define F_DIS_OPT_NTT_BY_PRE V_DIS_OPT_NTT_BY_PRE(1U)
+
+#define S_DIS_OPT_NTT_BY_ACT 5
+#define V_DIS_OPT_NTT_BY_ACT(x) ((x) << S_DIS_OPT_NTT_BY_ACT)
+#define F_DIS_OPT_NTT_BY_ACT V_DIS_OPT_NTT_BY_ACT(1U)
+
+#define S_OPT_WRCAM_FILL_LEVEL 4
+#define V_OPT_WRCAM_FILL_LEVEL(x) ((x) << S_OPT_WRCAM_FILL_LEVEL)
+#define F_OPT_WRCAM_FILL_LEVEL V_OPT_WRCAM_FILL_LEVEL(1U)
+
+#define S_PAGECLOSE 2
+#define V_PAGECLOSE(x) ((x) << S_PAGECLOSE)
+#define F_PAGECLOSE V_PAGECLOSE(1U)
+
+#define S_PREFER_WRITE 1
+#define V_PREFER_WRITE(x) ((x) << S_PREFER_WRITE)
+#define F_PREFER_WRITE V_PREFER_WRITE(1U)
+
+#define A_MC_REGB_DDRC_CH0_ECCCFG0 0x10600
+
+#define S_DIS_SCRUB 23
+#define V_DIS_SCRUB(x) ((x) << S_DIS_SCRUB)
+#define F_DIS_SCRUB V_DIS_SCRUB(1U)
+
+#define S_ECC_TYPE 4
+#define M_ECC_TYPE 0x3U
+#define V_ECC_TYPE(x) ((x) << S_ECC_TYPE)
+#define G_ECC_TYPE(x) (((x) >> S_ECC_TYPE) & M_ECC_TYPE)
+
+#define S_TEST_MODE 3
+#define V_TEST_MODE(x) ((x) << S_TEST_MODE)
+#define F_TEST_MODE V_TEST_MODE(1U)
+
+#define S_ECC_MODE 0
+#define M_ECC_MODE 0x7U
+#define V_ECC_MODE(x) ((x) << S_ECC_MODE)
+#define G_ECC_MODE(x) (((x) >> S_ECC_MODE) & M_ECC_MODE)
+
+#define A_MC_REGB_DDRC_CH0_ECCCFG1 0x10604
+
+#define S_DATA_POISON_BIT 1
+#define V_DATA_POISON_BIT(x) ((x) << S_DATA_POISON_BIT)
+#define F_DATA_POISON_BIT V_DATA_POISON_BIT(1U)
+
+#define S_DATA_POISON_EN 0
+#define V_DATA_POISON_EN(x) ((x) << S_DATA_POISON_EN)
+#define F_DATA_POISON_EN V_DATA_POISON_EN(1U)
+
+#define A_MC_REGB_DDRC_CH0_ECCSTAT 0x10608
+
+#define S_ECC_UNCORRECTED_ERR 16
+#define M_ECC_UNCORRECTED_ERR 0xffU
+#define V_ECC_UNCORRECTED_ERR(x) ((x) << S_ECC_UNCORRECTED_ERR)
+#define G_ECC_UNCORRECTED_ERR(x) (((x) >> S_ECC_UNCORRECTED_ERR) & M_ECC_UNCORRECTED_ERR)
+
+#define S_ECC_CORRECTED_ERR 8
+#define M_ECC_CORRECTED_ERR 0xffU
+#define V_ECC_CORRECTED_ERR(x) ((x) << S_ECC_CORRECTED_ERR)
+#define G_ECC_CORRECTED_ERR(x) (((x) >> S_ECC_CORRECTED_ERR) & M_ECC_CORRECTED_ERR)
+
+#define S_ECC_CORRECTED_BIT_NUM 0
+#define M_ECC_CORRECTED_BIT_NUM 0x7fU
+#define V_ECC_CORRECTED_BIT_NUM(x) ((x) << S_ECC_CORRECTED_BIT_NUM)
+#define G_ECC_CORRECTED_BIT_NUM(x) (((x) >> S_ECC_CORRECTED_BIT_NUM) & M_ECC_CORRECTED_BIT_NUM)
+
+#define A_MC_REGB_DDRC_CH0_ECCCTL 0x1060c
+
+#define S_ECC_UNCORRECTED_ERR_INTR_FORCE 17
+#define V_ECC_UNCORRECTED_ERR_INTR_FORCE(x) ((x) << S_ECC_UNCORRECTED_ERR_INTR_FORCE)
+#define F_ECC_UNCORRECTED_ERR_INTR_FORCE V_ECC_UNCORRECTED_ERR_INTR_FORCE(1U)
+
+#define S_ECC_CORRECTED_ERR_INTR_FORCE 16
+#define V_ECC_CORRECTED_ERR_INTR_FORCE(x) ((x) << S_ECC_CORRECTED_ERR_INTR_FORCE)
+#define F_ECC_CORRECTED_ERR_INTR_FORCE V_ECC_CORRECTED_ERR_INTR_FORCE(1U)
+
+#define S_ECC_UNCORRECTED_ERR_INTR_EN 9
+#define V_ECC_UNCORRECTED_ERR_INTR_EN(x) ((x) << S_ECC_UNCORRECTED_ERR_INTR_EN)
+#define F_ECC_UNCORRECTED_ERR_INTR_EN V_ECC_UNCORRECTED_ERR_INTR_EN(1U)
+
+#define S_ECC_CORRECTED_ERR_INTR_EN 8
+#define V_ECC_CORRECTED_ERR_INTR_EN(x) ((x) << S_ECC_CORRECTED_ERR_INTR_EN)
+#define F_ECC_CORRECTED_ERR_INTR_EN V_ECC_CORRECTED_ERR_INTR_EN(1U)
+
+#define S_ECC_UNCORR_ERR_CNT_CLR 3
+#define V_ECC_UNCORR_ERR_CNT_CLR(x) ((x) << S_ECC_UNCORR_ERR_CNT_CLR)
+#define F_ECC_UNCORR_ERR_CNT_CLR V_ECC_UNCORR_ERR_CNT_CLR(1U)
+
+#define S_ECC_CORR_ERR_CNT_CLR 2
+#define V_ECC_CORR_ERR_CNT_CLR(x) ((x) << S_ECC_CORR_ERR_CNT_CLR)
+#define F_ECC_CORR_ERR_CNT_CLR V_ECC_CORR_ERR_CNT_CLR(1U)
+
+#define S_ECC_UNCORRECTED_ERR_CLR 1
+#define V_ECC_UNCORRECTED_ERR_CLR(x) ((x) << S_ECC_UNCORRECTED_ERR_CLR)
+#define F_ECC_UNCORRECTED_ERR_CLR V_ECC_UNCORRECTED_ERR_CLR(1U)
+
+#define S_ECC_CORRECTED_ERR_CLR 0
+#define V_ECC_CORRECTED_ERR_CLR(x) ((x) << S_ECC_CORRECTED_ERR_CLR)
+#define F_ECC_CORRECTED_ERR_CLR V_ECC_CORRECTED_ERR_CLR(1U)
+
+#define A_MC_REGB_DDRC_CH0_ECCERRCNT 0x10610
+
+#define S_ECC_UNCORR_ERR_CNT 16
+#define M_ECC_UNCORR_ERR_CNT 0xffffU
+#define V_ECC_UNCORR_ERR_CNT(x) ((x) << S_ECC_UNCORR_ERR_CNT)
+#define G_ECC_UNCORR_ERR_CNT(x) (((x) >> S_ECC_UNCORR_ERR_CNT) & M_ECC_UNCORR_ERR_CNT)
+
+#define S_ECC_CORR_ERR_CNT 0
+#define M_ECC_CORR_ERR_CNT 0xffffU
+#define V_ECC_CORR_ERR_CNT(x) ((x) << S_ECC_CORR_ERR_CNT)
+#define G_ECC_CORR_ERR_CNT(x) (((x) >> S_ECC_CORR_ERR_CNT) & M_ECC_CORR_ERR_CNT)
+
+#define A_MC_REGB_DDRC_CH0_ECCCADDR0 0x10614
+
+#define S_ECC_CORR_RANK 24
+#define V_ECC_CORR_RANK(x) ((x) << S_ECC_CORR_RANK)
+#define F_ECC_CORR_RANK V_ECC_CORR_RANK(1U)
+
+#define S_ECC_CORR_ROW 0
+#define M_ECC_CORR_ROW 0x3ffffU
+#define V_ECC_CORR_ROW(x) ((x) << S_ECC_CORR_ROW)
+#define G_ECC_CORR_ROW(x) (((x) >> S_ECC_CORR_ROW) & M_ECC_CORR_ROW)
+
+#define A_MC_REGB_DDRC_CH0_ECCCADDR1 0x10618
+
+#define S_ECC_CORR_BG 24
+#define M_ECC_CORR_BG 0x7U
+#define V_ECC_CORR_BG(x) ((x) << S_ECC_CORR_BG)
+#define G_ECC_CORR_BG(x) (((x) >> S_ECC_CORR_BG) & M_ECC_CORR_BG)
+
+#define S_ECC_CORR_BANK 16
+#define M_ECC_CORR_BANK 0x3U
+#define V_ECC_CORR_BANK(x) ((x) << S_ECC_CORR_BANK)
+#define G_ECC_CORR_BANK(x) (((x) >> S_ECC_CORR_BANK) & M_ECC_CORR_BANK)
+
+#define S_ECC_CORR_COL 0
+#define M_ECC_CORR_COL 0x7ffU
+#define V_ECC_CORR_COL(x) ((x) << S_ECC_CORR_COL)
+#define G_ECC_CORR_COL(x) (((x) >> S_ECC_CORR_COL) & M_ECC_CORR_COL)
+
+#define A_MC_REGB_DDRC_CH0_ECCCSYN0 0x1061c
+#define A_MC_REGB_DDRC_CH0_ECCCSYN1 0x10620
+#define A_MC_REGB_DDRC_CH0_ECCCSYN2 0x10624
+
+#define S_CB_CORR_SYNDROME 16
+#define M_CB_CORR_SYNDROME 0xffU
+#define V_CB_CORR_SYNDROME(x) ((x) << S_CB_CORR_SYNDROME)
+#define G_CB_CORR_SYNDROME(x) (((x) >> S_CB_CORR_SYNDROME) & M_CB_CORR_SYNDROME)
+
+#define S_ECC_CORR_SYNDROMES_71_64 0
+#define M_ECC_CORR_SYNDROMES_71_64 0xffU
+#define V_ECC_CORR_SYNDROMES_71_64(x) ((x) << S_ECC_CORR_SYNDROMES_71_64)
+#define G_ECC_CORR_SYNDROMES_71_64(x) (((x) >> S_ECC_CORR_SYNDROMES_71_64) & M_ECC_CORR_SYNDROMES_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCBITMASK0 0x10628
+#define A_MC_REGB_DDRC_CH0_ECCBITMASK1 0x1062c
+#define A_MC_REGB_DDRC_CH0_ECCBITMASK2 0x10630
+
+#define S_ECC_CORR_BIT_MASK_71_64 0
+#define M_ECC_CORR_BIT_MASK_71_64 0xffU
+#define V_ECC_CORR_BIT_MASK_71_64(x) ((x) << S_ECC_CORR_BIT_MASK_71_64)
+#define G_ECC_CORR_BIT_MASK_71_64(x) (((x) >> S_ECC_CORR_BIT_MASK_71_64) & M_ECC_CORR_BIT_MASK_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCUADDR0 0x10634
+
+#define S_ECC_UNCORR_RANK 24
+#define V_ECC_UNCORR_RANK(x) ((x) << S_ECC_UNCORR_RANK)
+#define F_ECC_UNCORR_RANK V_ECC_UNCORR_RANK(1U)
+
+#define S_ECC_UNCORR_ROW 0
+#define M_ECC_UNCORR_ROW 0x3ffffU
+#define V_ECC_UNCORR_ROW(x) ((x) << S_ECC_UNCORR_ROW)
+#define G_ECC_UNCORR_ROW(x) (((x) >> S_ECC_UNCORR_ROW) & M_ECC_UNCORR_ROW)
+
+#define A_MC_REGB_DDRC_CH0_ECCUADDR1 0x10638
+
+#define S_ECC_UNCORR_BG 24
+#define M_ECC_UNCORR_BG 0x7U
+#define V_ECC_UNCORR_BG(x) ((x) << S_ECC_UNCORR_BG)
+#define G_ECC_UNCORR_BG(x) (((x) >> S_ECC_UNCORR_BG) & M_ECC_UNCORR_BG)
+
+#define S_ECC_UNCORR_BANK 16
+#define M_ECC_UNCORR_BANK 0x3U
+#define V_ECC_UNCORR_BANK(x) ((x) << S_ECC_UNCORR_BANK)
+#define G_ECC_UNCORR_BANK(x) (((x) >> S_ECC_UNCORR_BANK) & M_ECC_UNCORR_BANK)
+
+#define S_ECC_UNCORR_COL 0
+#define M_ECC_UNCORR_COL 0x7ffU
+#define V_ECC_UNCORR_COL(x) ((x) << S_ECC_UNCORR_COL)
+#define G_ECC_UNCORR_COL(x) (((x) >> S_ECC_UNCORR_COL) & M_ECC_UNCORR_COL)
+
+#define A_MC_REGB_DDRC_CH0_ECCUSYN0 0x1063c
+#define A_MC_REGB_DDRC_CH0_ECCUSYN1 0x10640
+#define A_MC_REGB_DDRC_CH0_ECCUSYN2 0x10644
+
+#define S_CB_UNCORR_SYNDROME 16
+#define M_CB_UNCORR_SYNDROME 0xffU
+#define V_CB_UNCORR_SYNDROME(x) ((x) << S_CB_UNCORR_SYNDROME)
+#define G_CB_UNCORR_SYNDROME(x) (((x) >> S_CB_UNCORR_SYNDROME) & M_CB_UNCORR_SYNDROME)
+
+#define S_ECC_UNCORR_SYNDROMES_71_64 0
+#define M_ECC_UNCORR_SYNDROMES_71_64 0xffU
+#define V_ECC_UNCORR_SYNDROMES_71_64(x) ((x) << S_ECC_UNCORR_SYNDROMES_71_64)
+#define G_ECC_UNCORR_SYNDROMES_71_64(x) (((x) >> S_ECC_UNCORR_SYNDROMES_71_64) & M_ECC_UNCORR_SYNDROMES_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCPOISONADDR0 0x10648
+
+#define S_ECC_POISON_RANK 24
+#define V_ECC_POISON_RANK(x) ((x) << S_ECC_POISON_RANK)
+#define F_ECC_POISON_RANK V_ECC_POISON_RANK(1U)
+
+#define S_ECC_POISON_COL 0
+#define M_ECC_POISON_COL 0xfffU
+#define V_ECC_POISON_COL(x) ((x) << S_ECC_POISON_COL)
+#define G_ECC_POISON_COL(x) (((x) >> S_ECC_POISON_COL) & M_ECC_POISON_COL)
+
+#define A_MC_REGB_DDRC_CH0_ECCPOISONADDR1 0x1064c
+
+#define S_ECC_POISON_BG 28
+#define M_ECC_POISON_BG 0x7U
+#define V_ECC_POISON_BG(x) ((x) << S_ECC_POISON_BG)
+#define G_ECC_POISON_BG(x) (((x) >> S_ECC_POISON_BG) & M_ECC_POISON_BG)
+
+#define S_ECC_POISON_BANK 24
+#define M_ECC_POISON_BANK 0x3U
+#define V_ECC_POISON_BANK(x) ((x) << S_ECC_POISON_BANK)
+#define G_ECC_POISON_BANK(x) (((x) >> S_ECC_POISON_BANK) & M_ECC_POISON_BANK)
+
+#define S_ECC_POISON_ROW 0
+#define M_ECC_POISON_ROW 0x3ffffU
+#define V_ECC_POISON_ROW(x) ((x) << S_ECC_POISON_ROW)
+#define G_ECC_POISON_ROW(x) (((x) >> S_ECC_POISON_ROW) & M_ECC_POISON_ROW)
+
+#define A_MC_REGB_DDRC_CH0_ECCPOISONPAT0 0x10658
+#define A_MC_REGB_DDRC_CH0_ECCPOISONPAT1 0x1065c
+#define A_MC_REGB_DDRC_CH0_ECCPOISONPAT2 0x10660
+
+#define S_ECC_POISON_DATA_71_64 0
+#define M_ECC_POISON_DATA_71_64 0xffU
+#define V_ECC_POISON_DATA_71_64(x) ((x) << S_ECC_POISON_DATA_71_64)
+#define G_ECC_POISON_DATA_71_64(x) (((x) >> S_ECC_POISON_DATA_71_64) & M_ECC_POISON_DATA_71_64)
+
+#define A_MC_REGB_DDRC_CH0_ECCCFG2 0x10668
+
+#define S_FLIP_BIT_POS1 24
+#define M_FLIP_BIT_POS1 0x7fU
+#define V_FLIP_BIT_POS1(x) ((x) << S_FLIP_BIT_POS1)
+#define G_FLIP_BIT_POS1(x) (((x) >> S_FLIP_BIT_POS1) & M_FLIP_BIT_POS1)
+
+#define S_FLIP_BIT_POS0 16
+#define M_FLIP_BIT_POS0 0x7fU
+#define V_FLIP_BIT_POS0(x) ((x) << S_FLIP_BIT_POS0)
+#define G_FLIP_BIT_POS0(x) (((x) >> S_FLIP_BIT_POS0) & M_FLIP_BIT_POS0)
+
+#define A_MC_REGB_DDRC_CH1_ECCCTL 0x1160c
+#define A_MC_REGB_DDRC_CH1_ECCERRCNT 0x11610
+#define A_MC_REGB_DDRC_CH1_ECCCADDR0 0x11614
+#define A_MC_REGB_DDRC_CH1_ECCCADDR1 0x11618
+#define A_MC_REGB_DDRC_CH1_ECCCSYN0 0x1161c
+#define A_MC_REGB_DDRC_CH1_ECCCSYN1 0x11620
+#define A_MC_REGB_DDRC_CH1_ECCCSYN2 0x11624
+#define A_MC_REGB_DDRC_CH1_ECCBITMASK0 0x11628
+#define A_MC_REGB_DDRC_CH1_ECCBITMASK1 0x1162c
+#define A_MC_REGB_DDRC_CH1_ECCBITMASK2 0x11630
+#define A_MC_REGB_DDRC_CH1_ECCUADDR0 0x11634
+#define A_MC_REGB_DDRC_CH1_ECCUADDR1 0x11638
+#define A_MC_REGB_DDRC_CH1_ECCUSYN0 0x1163c
+#define A_MC_REGB_DDRC_CH1_ECCUSYN1 0x11640
+#define A_MC_REGB_DDRC_CH1_ECCUSYN2 0x11644
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTENABLE 0x20100
+
+#define S_PHYSTICKYUNLOCKEN 15
+#define V_PHYSTICKYUNLOCKEN(x) ((x) << S_PHYSTICKYUNLOCKEN)
+#define F_PHYSTICKYUNLOCKEN V_PHYSTICKYUNLOCKEN(1U)
+
+#define S_PHYBSIEN 14
+#define V_PHYBSIEN(x) ((x) << S_PHYBSIEN)
+#define F_PHYBSIEN V_PHYBSIEN(1U)
+
+#define S_PHYANIBRCVERREN 13
+#define V_PHYANIBRCVERREN(x) ((x) << S_PHYANIBRCVERREN)
+#define F_PHYANIBRCVERREN V_PHYANIBRCVERREN(1U)
+
+#define S_PHYD5ACSM1PARITYEN 12
+#define V_PHYD5ACSM1PARITYEN(x) ((x) << S_PHYD5ACSM1PARITYEN)
+#define F_PHYD5ACSM1PARITYEN V_PHYD5ACSM1PARITYEN(1U)
+
+#define S_PHYD5ACSM0PARITYEN 11
+#define V_PHYD5ACSM0PARITYEN(x) ((x) << S_PHYD5ACSM0PARITYEN)
+#define F_PHYD5ACSM0PARITYEN V_PHYD5ACSM0PARITYEN(1U)
+
+#define S_PHYRXFIFOCHECKEN 10
+#define V_PHYRXFIFOCHECKEN(x) ((x) << S_PHYRXFIFOCHECKEN)
+#define F_PHYRXFIFOCHECKEN V_PHYRXFIFOCHECKEN(1U)
+
+#define S_PHYTXPPTEN 9
+#define V_PHYTXPPTEN(x) ((x) << S_PHYTXPPTEN)
+#define F_PHYTXPPTEN V_PHYTXPPTEN(1U)
+
+#define S_PHYECCEN 8
+#define V_PHYECCEN(x) ((x) << S_PHYECCEN)
+#define F_PHYECCEN V_PHYECCEN(1U)
+
+#define S_PHYFWRESERVEDEN 3
+#define M_PHYFWRESERVEDEN 0x1fU
+#define V_PHYFWRESERVEDEN(x) ((x) << S_PHYFWRESERVEDEN)
+#define G_PHYFWRESERVEDEN(x) (((x) >> S_PHYFWRESERVEDEN) & M_PHYFWRESERVEDEN)
+
+#define S_PHYTRNGFAILEN 2
+#define V_PHYTRNGFAILEN(x) ((x) << S_PHYTRNGFAILEN)
+#define F_PHYTRNGFAILEN V_PHYTRNGFAILEN(1U)
+
+#define S_PHYINITCMPLTEN 1
+#define V_PHYINITCMPLTEN(x) ((x) << S_PHYINITCMPLTEN)
+#define F_PHYINITCMPLTEN V_PHYINITCMPLTEN(1U)
+
+#define S_PHYTRNGCMPLTEN 0
+#define V_PHYTRNGCMPLTEN(x) ((x) << S_PHYTRNGCMPLTEN)
+#define F_PHYTRNGCMPLTEN V_PHYTRNGCMPLTEN(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTFWCONTROL 0x20101
+
+#define S_PHYFWRESERVEDFW 3
+#define M_PHYFWRESERVEDFW 0x1fU
+#define V_PHYFWRESERVEDFW(x) ((x) << S_PHYFWRESERVEDFW)
+#define G_PHYFWRESERVEDFW(x) (((x) >> S_PHYFWRESERVEDFW) & M_PHYFWRESERVEDFW)
+
+#define S_PHYTRNGFAILFW 2
+#define V_PHYTRNGFAILFW(x) ((x) << S_PHYTRNGFAILFW)
+#define F_PHYTRNGFAILFW V_PHYTRNGFAILFW(1U)
+
+#define S_PHYINITCMPLTFW 1
+#define V_PHYINITCMPLTFW(x) ((x) << S_PHYINITCMPLTFW)
+#define F_PHYINITCMPLTFW V_PHYINITCMPLTFW(1U)
+
+#define S_PHYTRNGCMPLTFW 0
+#define V_PHYTRNGCMPLTFW(x) ((x) << S_PHYTRNGCMPLTFW)
+#define F_PHYTRNGCMPLTFW V_PHYTRNGCMPLTFW(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTMASK 0x20102
+
+#define S_PHYSTICKYUNLOCKMSK 15
+#define V_PHYSTICKYUNLOCKMSK(x) ((x) << S_PHYSTICKYUNLOCKMSK)
+#define F_PHYSTICKYUNLOCKMSK V_PHYSTICKYUNLOCKMSK(1U)
+
+#define S_PHYBSIMSK 14
+#define V_PHYBSIMSK(x) ((x) << S_PHYBSIMSK)
+#define F_PHYBSIMSK V_PHYBSIMSK(1U)
+
+#define S_PHYANIBRCVERRMSK 13
+#define V_PHYANIBRCVERRMSK(x) ((x) << S_PHYANIBRCVERRMSK)
+#define F_PHYANIBRCVERRMSK V_PHYANIBRCVERRMSK(1U)
+
+#define S_PHYD5ACSM1PARITYMSK 12
+#define V_PHYD5ACSM1PARITYMSK(x) ((x) << S_PHYD5ACSM1PARITYMSK)
+#define F_PHYD5ACSM1PARITYMSK V_PHYD5ACSM1PARITYMSK(1U)
+
+#define S_PHYD5ACSM0PARITYMSK 11
+#define V_PHYD5ACSM0PARITYMSK(x) ((x) << S_PHYD5ACSM0PARITYMSK)
+#define F_PHYD5ACSM0PARITYMSK V_PHYD5ACSM0PARITYMSK(1U)
+
+#define S_PHYRXFIFOCHECKMSK 10
+#define V_PHYRXFIFOCHECKMSK(x) ((x) << S_PHYRXFIFOCHECKMSK)
+#define F_PHYRXFIFOCHECKMSK V_PHYRXFIFOCHECKMSK(1U)
+
+#define S_PHYTXPPTMSK 9
+#define V_PHYTXPPTMSK(x) ((x) << S_PHYTXPPTMSK)
+#define F_PHYTXPPTMSK V_PHYTXPPTMSK(1U)
+
+#define S_PHYECCMSK 8
+#define V_PHYECCMSK(x) ((x) << S_PHYECCMSK)
+#define F_PHYECCMSK V_PHYECCMSK(1U)
+
+#define S_PHYFWRESERVEDMSK 3
+#define M_PHYFWRESERVEDMSK 0x1fU
+#define V_PHYFWRESERVEDMSK(x) ((x) << S_PHYFWRESERVEDMSK)
+#define G_PHYFWRESERVEDMSK(x) (((x) >> S_PHYFWRESERVEDMSK) & M_PHYFWRESERVEDMSK)
+
+#define S_PHYTRNGFAILMSK 2
+#define V_PHYTRNGFAILMSK(x) ((x) << S_PHYTRNGFAILMSK)
+#define F_PHYTRNGFAILMSK V_PHYTRNGFAILMSK(1U)
+
+#define S_PHYINITCMPLTMSK 1
+#define V_PHYINITCMPLTMSK(x) ((x) << S_PHYINITCMPLTMSK)
+#define F_PHYINITCMPLTMSK V_PHYINITCMPLTMSK(1U)
+
+#define S_PHYTRNGCMPLTMSK 0
+#define V_PHYTRNGCMPLTMSK(x) ((x) << S_PHYTRNGCMPLTMSK)
+#define F_PHYTRNGCMPLTMSK V_PHYTRNGCMPLTMSK(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTCLEAR 0x20103
+
+#define S_PHYSTICKYUNLOCKCLR 15
+#define V_PHYSTICKYUNLOCKCLR(x) ((x) << S_PHYSTICKYUNLOCKCLR)
+#define F_PHYSTICKYUNLOCKCLR V_PHYSTICKYUNLOCKCLR(1U)
+
+#define S_PHYBSICLR 14
+#define V_PHYBSICLR(x) ((x) << S_PHYBSICLR)
+#define F_PHYBSICLR V_PHYBSICLR(1U)
+
+#define S_PHYANIBRCVERRCLR 13
+#define V_PHYANIBRCVERRCLR(x) ((x) << S_PHYANIBRCVERRCLR)
+#define F_PHYANIBRCVERRCLR V_PHYANIBRCVERRCLR(1U)
+
+#define S_PHYD5ACSM1PARITYCLR 12
+#define V_PHYD5ACSM1PARITYCLR(x) ((x) << S_PHYD5ACSM1PARITYCLR)
+#define F_PHYD5ACSM1PARITYCLR V_PHYD5ACSM1PARITYCLR(1U)
+
+#define S_PHYD5ACSM0PARITYCLR 11
+#define V_PHYD5ACSM0PARITYCLR(x) ((x) << S_PHYD5ACSM0PARITYCLR)
+#define F_PHYD5ACSM0PARITYCLR V_PHYD5ACSM0PARITYCLR(1U)
+
+#define S_PHYRXFIFOCHECKCLR 10
+#define V_PHYRXFIFOCHECKCLR(x) ((x) << S_PHYRXFIFOCHECKCLR)
+#define F_PHYRXFIFOCHECKCLR V_PHYRXFIFOCHECKCLR(1U)
+
+#define S_PHYTXPPTCLR 9
+#define V_PHYTXPPTCLR(x) ((x) << S_PHYTXPPTCLR)
+#define F_PHYTXPPTCLR V_PHYTXPPTCLR(1U)
+
+#define S_PHYECCCLR 8
+#define V_PHYECCCLR(x) ((x) << S_PHYECCCLR)
+#define F_PHYECCCLR V_PHYECCCLR(1U)
+
+#define S_PHYFWRESERVEDCLR 3
+#define M_PHYFWRESERVEDCLR 0x1fU
+#define V_PHYFWRESERVEDCLR(x) ((x) << S_PHYFWRESERVEDCLR)
+#define G_PHYFWRESERVEDCLR(x) (((x) >> S_PHYFWRESERVEDCLR) & M_PHYFWRESERVEDCLR)
+
+#define S_PHYTRNGFAILCLR 2
+#define V_PHYTRNGFAILCLR(x) ((x) << S_PHYTRNGFAILCLR)
+#define F_PHYTRNGFAILCLR V_PHYTRNGFAILCLR(1U)
+
+#define S_PHYINITCMPLTCLR 1
+#define V_PHYINITCMPLTCLR(x) ((x) << S_PHYINITCMPLTCLR)
+#define F_PHYINITCMPLTCLR V_PHYINITCMPLTCLR(1U)
+
+#define S_PHYTRNGCMPLTCLR 0
+#define V_PHYTRNGCMPLTCLR(x) ((x) << S_PHYTRNGCMPLTCLR)
+#define F_PHYTRNGCMPLTCLR V_PHYTRNGCMPLTCLR(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTSTATUS 0x20104
+
+#define S_PHYSTICKYUNLOCKERR 15
+#define V_PHYSTICKYUNLOCKERR(x) ((x) << S_PHYSTICKYUNLOCKERR)
+#define F_PHYSTICKYUNLOCKERR V_PHYSTICKYUNLOCKERR(1U)
+
+#define S_PHYBSIINT 14
+#define V_PHYBSIINT(x) ((x) << S_PHYBSIINT)
+#define F_PHYBSIINT V_PHYBSIINT(1U)
+
+#define S_PHYANIBRCVERR 13
+#define V_PHYANIBRCVERR(x) ((x) << S_PHYANIBRCVERR)
+#define F_PHYANIBRCVERR V_PHYANIBRCVERR(1U)
+
+#define S_PHYD5ACSM1PARITYERR 12
+#define V_PHYD5ACSM1PARITYERR(x) ((x) << S_PHYD5ACSM1PARITYERR)
+#define F_PHYD5ACSM1PARITYERR V_PHYD5ACSM1PARITYERR(1U)
+
+#define S_PHYD5ACSM0PARITYERR 11
+#define V_PHYD5ACSM0PARITYERR(x) ((x) << S_PHYD5ACSM0PARITYERR)
+#define F_PHYD5ACSM0PARITYERR V_PHYD5ACSM0PARITYERR(1U)
+
+#define S_PHYRXFIFOCHECKERR 10
+#define V_PHYRXFIFOCHECKERR(x) ((x) << S_PHYRXFIFOCHECKERR)
+#define F_PHYRXFIFOCHECKERR V_PHYRXFIFOCHECKERR(1U)
+
+#define S_PHYRXTXPPTERR 9
+#define V_PHYRXTXPPTERR(x) ((x) << S_PHYRXTXPPTERR)
+#define F_PHYRXTXPPTERR V_PHYRXTXPPTERR(1U)
+
+#define S_PHYECCERR 8
+#define V_PHYECCERR(x) ((x) << S_PHYECCERR)
+#define F_PHYECCERR V_PHYECCERR(1U)
+
+#define S_PHYFWRESERVED 3
+#define M_PHYFWRESERVED 0x1fU
+#define V_PHYFWRESERVED(x) ((x) << S_PHYFWRESERVED)
+#define G_PHYFWRESERVED(x) (((x) >> S_PHYFWRESERVED) & M_PHYFWRESERVED)
+
+#define S_PHYTRNGFAIL 2
+#define V_PHYTRNGFAIL(x) ((x) << S_PHYTRNGFAIL)
+#define F_PHYTRNGFAIL V_PHYTRNGFAIL(1U)
+
+#define S_PHYINITCMPLT 1
+#define V_PHYINITCMPLT(x) ((x) << S_PHYINITCMPLT)
+#define F_PHYINITCMPLT V_PHYINITCMPLT(1U)
+
+#define S_PHYTRNGCMPLT 0
+#define V_PHYTRNGCMPLT(x) ((x) << S_PHYTRNGCMPLT)
+#define F_PHYTRNGCMPLT V_PHYTRNGCMPLT(1U)
+
+#define A_MC_DWC_DDRPHYA_MASTER0_BASE0_PHYINTERRUPTOVERRIDE 0x20107
+
+#define S_PHYINTERRUPTOVERRIDE 0
+#define M_PHYINTERRUPTOVERRIDE 0xffffU
+#define V_PHYINTERRUPTOVERRIDE(x) ((x) << S_PHYINTERRUPTOVERRIDE)
+#define G_PHYINTERRUPTOVERRIDE(x) (((x) >> S_PHYINTERRUPTOVERRIDE) & M_PHYINTERRUPTOVERRIDE)
+
+/* registers for module MC_T71 */
+#define MC_T71_BASE_ADDR 0x58000
+
+/* registers for module GCACHE */
+#define GCACHE_BASE_ADDR 0x51400
+
+#define A_GCACHE_MODE_SEL0 0x51400
+
+#define S_GC_MA_RSP 16
+#define V_GC_MA_RSP(x) ((x) << S_GC_MA_RSP)
+#define F_GC_MA_RSP V_GC_MA_RSP(1U)
+
+#define A_GCACHE_MEMZONE0_REGION1 0x51404
+
+#define S_REGION_EN1 18
+#define V_REGION_EN1(x) ((x) << S_REGION_EN1)
+#define F_REGION_EN1 V_REGION_EN1(1U)
+
+#define S_EDC_REGION1 17
+#define V_EDC_REGION1(x) ((x) << S_EDC_REGION1)
+#define F_EDC_REGION1 V_EDC_REGION1(1U)
+
+#define S_CACHE_REGION1 16
+#define V_CACHE_REGION1(x) ((x) << S_CACHE_REGION1)
+#define F_CACHE_REGION1 V_CACHE_REGION1(1U)
+
+#define S_END1 0
+#define M_END1 0xffffU
+#define V_END1(x) ((x) << S_END1)
+#define G_END1(x) (((x) >> S_END1) & M_END1)
+
+#define A_GCACHE_MEMZONE0_REGION2 0x51408
+
+#define S_REGION_EN2 18
+#define V_REGION_EN2(x) ((x) << S_REGION_EN2)
+#define F_REGION_EN2 V_REGION_EN2(1U)
+
+#define S_EDC_REGION2 17
+#define V_EDC_REGION2(x) ((x) << S_EDC_REGION2)
+#define F_EDC_REGION2 V_EDC_REGION2(1U)
+
+#define S_CACHE_REGION2 16
+#define V_CACHE_REGION2(x) ((x) << S_CACHE_REGION2)
+#define F_CACHE_REGION2 V_CACHE_REGION2(1U)
+
+#define S_END2 0
+#define M_END2 0xffffU
+#define V_END2(x) ((x) << S_END2)
+#define G_END2(x) (((x) >> S_END2) & M_END2)
+
+#define A_GCACHE_MEMZONE0_REGION3 0x5140c
+
+#define S_REGION_EN3 18
+#define V_REGION_EN3(x) ((x) << S_REGION_EN3)
+#define F_REGION_EN3 V_REGION_EN3(1U)
+
+#define S_EDC_REGION3 17
+#define V_EDC_REGION3(x) ((x) << S_EDC_REGION3)
+#define F_EDC_REGION3 V_EDC_REGION3(1U)
+
+#define S_CACHE_REGION3 16
+#define V_CACHE_REGION3(x) ((x) << S_CACHE_REGION3)
+#define F_CACHE_REGION3 V_CACHE_REGION3(1U)
+
+#define S_END3 0
+#define M_END3 0xffffU
+#define V_END3(x) ((x) << S_END3)
+#define G_END3(x) (((x) >> S_END3) & M_END3)
+
+#define A_GCACHE_MEMZONE0_REGION4 0x51410
+
+#define S_REGION_EN4 18
+#define V_REGION_EN4(x) ((x) << S_REGION_EN4)
+#define F_REGION_EN4 V_REGION_EN4(1U)
+
+#define S_EDC_REGION4 17
+#define V_EDC_REGION4(x) ((x) << S_EDC_REGION4)
+#define F_EDC_REGION4 V_EDC_REGION4(1U)
+
+#define S_CACHE_REGION4 16
+#define V_CACHE_REGION4(x) ((x) << S_CACHE_REGION4)
+#define F_CACHE_REGION4 V_CACHE_REGION4(1U)
+
+#define S_END4 0
+#define M_END4 0xffffU
+#define V_END4(x) ((x) << S_END4)
+#define G_END4(x) (((x) >> S_END4) & M_END4)
+
+#define A_GCACHE_MEMZONE0_REGION5 0x51414
+
+#define S_REGION_EN5 18
+#define V_REGION_EN5(x) ((x) << S_REGION_EN5)
+#define F_REGION_EN5 V_REGION_EN5(1U)
+
+#define S_EDC_REGION5 17
+#define V_EDC_REGION5(x) ((x) << S_EDC_REGION5)
+#define F_EDC_REGION5 V_EDC_REGION5(1U)
+
+#define S_CACHE_REGION5 16
+#define V_CACHE_REGION5(x) ((x) << S_CACHE_REGION5)
+#define F_CACHE_REGION5 V_CACHE_REGION5(1U)
+
+#define S_END5 0
+#define M_END5 0xffffU
+#define V_END5(x) ((x) << S_END5)
+#define G_END5(x) (((x) >> S_END5) & M_END5)
+
+#define A_GCACHE_MEMZONE0_REGION6 0x51418
+
+#define S_REGION_EN6 18
+#define V_REGION_EN6(x) ((x) << S_REGION_EN6)
+#define F_REGION_EN6 V_REGION_EN6(1U)
+
+#define S_EDC_REGION6 17
+#define V_EDC_REGION6(x) ((x) << S_EDC_REGION6)
+#define F_EDC_REGION6 V_EDC_REGION6(1U)
+
+#define S_CACHE_REGION6 16
+#define V_CACHE_REGION6(x) ((x) << S_CACHE_REGION6)
+#define F_CACHE_REGION6 V_CACHE_REGION6(1U)
+
+#define S_END6 0
+#define M_END6 0xffffU
+#define V_END6(x) ((x) << S_END6)
+#define G_END6(x) (((x) >> S_END6) & M_END6)
+
+#define A_GCACHE_MEMZONE0_REGION7 0x5141c
+
+#define S_REGION_EN7 18
+#define V_REGION_EN7(x) ((x) << S_REGION_EN7)
+#define F_REGION_EN7 V_REGION_EN7(1U)
+
+#define S_EDC_REGION7 17
+#define V_EDC_REGION7(x) ((x) << S_EDC_REGION7)
+#define F_EDC_REGION7 V_EDC_REGION7(1U)
+
+#define S_CACHE_REGION7 16
+#define V_CACHE_REGION7(x) ((x) << S_CACHE_REGION7)
+#define F_CACHE_REGION7 V_CACHE_REGION7(1U)
+
+#define S_END7 0
+#define M_END7 0xffffU
+#define V_END7(x) ((x) << S_END7)
+#define G_END7(x) (((x) >> S_END7) & M_END7)
+
+#define A_GCACHE_MEMZONE0_REGION8 0x51420
+
+#define S_REGION_EN8 18
+#define V_REGION_EN8(x) ((x) << S_REGION_EN8)
+#define F_REGION_EN8 V_REGION_EN8(1U)
+
+#define S_EDC_REGION8 17
+#define V_EDC_REGION8(x) ((x) << S_EDC_REGION8)
+#define F_EDC_REGION8 V_EDC_REGION8(1U)
+
+#define S_CACHE_REGION8 16
+#define V_CACHE_REGION8(x) ((x) << S_CACHE_REGION8)
+#define F_CACHE_REGION8 V_CACHE_REGION8(1U)
+
+#define S_END8 0
+#define M_END8 0xffffU
+#define V_END8(x) ((x) << S_END8)
+#define G_END8(x) (((x) >> S_END8) & M_END8)
+
+#define A_GCACHE_REG0_BASE_MSB 0x51424
+#define A_GCACHE_MEMZONE0_REGION1_MSB 0x51428
+
+#define S_START1 0
+#define M_START1 0xffffU
+#define V_START1(x) ((x) << S_START1)
+#define G_START1(x) (((x) >> S_START1) & M_START1)
+
+#define A_GCACHE_MEMZONE0_REGION2_MSB 0x5142c
+
+#define S_START2 0
+#define M_START2 0xffffU
+#define V_START2(x) ((x) << S_START2)
+#define G_START2(x) (((x) >> S_START2) & M_START2)
+
+#define A_GCACHE_MEMZONE0_REGION3_MSB 0x51430
+
+#define S_START3 0
+#define M_START3 0xffffU
+#define V_START3(x) ((x) << S_START3)
+#define G_START3(x) (((x) >> S_START3) & M_START3)
+
+#define A_GCACHE_MEMZONE0_REGION4_MSB 0x51434
+
+#define S_START4 0
+#define M_START4 0xffffU
+#define V_START4(x) ((x) << S_START4)
+#define G_START4(x) (((x) >> S_START4) & M_START4)
+
+#define A_GCACHE_MEMZONE0_REGION5_MSB 0x51438
+
+#define S_START5 0
+#define M_START5 0xffffU
+#define V_START5(x) ((x) << S_START5)
+#define G_START5(x) (((x) >> S_START5) & M_START5)
+
+#define A_GCACHE_MEMZONE0_REGION6_MSB 0x5143c
+
+#define S_START6 0
+#define M_START6 0xffffU
+#define V_START6(x) ((x) << S_START6)
+#define G_START6(x) (((x) >> S_START6) & M_START6)
+
+#define A_GCACHE_MEMZONE0_REGION7_MSB 0x51440
+
+#define S_START7 0
+#define M_START7 0xffffU
+#define V_START7(x) ((x) << S_START7)
+#define G_START7(x) (((x) >> S_START7) & M_START7)
+
+#define A_GCACHE_MEMZONE0_REGION8_MSB 0x51444
+
+#define S_START8 0
+#define M_START8 0xffffU
+#define V_START8(x) ((x) << S_START8)
+#define G_START8(x) (((x) >> S_START8) & M_START8)
+
+#define A_GCACHE_MODE_SEL1 0x51448
+#define A_GCACHE_MEMZONE1_REGION1 0x5144c
+#define A_GCACHE_MEMZONE1_REGION2 0x51450
+#define A_GCACHE_MEMZONE1_REGION3 0x51454
+#define A_GCACHE_MEMZONE1_REGION4 0x51458
+#define A_GCACHE_MEMZONE1_REGION5 0x5145c
+#define A_GCACHE_MEMZONE1_REGION6 0x51460
+#define A_GCACHE_MEMZONE1_REGION7 0x51464
+#define A_GCACHE_MEMZONE1_REGION8 0x51468
+#define A_GCACHE_MEMZONE1_REGION1_MSB 0x5146c
+#define A_GCACHE_MEMZONE1_REGION2_MSB 0x51470
+#define A_GCACHE_MEMZONE1_REGION3_MSB 0x51474
+#define A_GCACHE_MEMZONE1_REGION4_MSB 0x51478
+#define A_GCACHE_MEMZONE1_REGION5_MSB 0x5147c
+#define A_GCACHE_MEMZONE1_REGION6_MSB 0x51480
+#define A_GCACHE_MEMZONE1_REGION7_MSB 0x51484
+#define A_GCACHE_MEMZONE1_REGION8_MSB 0x51488
+#define A_GCACHE_HMA_MC1_EN 0x5148c
+
+#define S_MC1_EN 1
+#define V_MC1_EN(x) ((x) << S_MC1_EN)
+#define F_MC1_EN V_MC1_EN(1U)
+
+#define S_HMA_EN 0
+#define V_HMA_EN(x) ((x) << S_HMA_EN)
+#define F_HMA_EN V_HMA_EN(1U)
+
+#define A_GCACHE_P_BIST_CMD 0x51490
+#define A_GCACHE_P_BIST_CMD_ADDR 0x51494
+#define A_GCACHE_P_BIST_CMD_LEN 0x51498
+#define A_GCACHE_P_BIST_DATA_PATTERN 0x5149c
+#define A_GCACHE_P_BIST_USER_WDATA0 0x514a0
+#define A_GCACHE_P_BIST_USER_WDATA1 0x514a4
+#define A_GCACHE_P_BIST_USER_WDATA2 0x514a8
+#define A_GCACHE_P_BIST_NUM_ERR 0x514ac
+#define A_GCACHE_P_BIST_ERR_FIRST_ADDR 0x514b0
+#define A_GCACHE_P_BIST_STATUS_RDATA 0x514b4
+#define A_GCACHE_P_BIST_CRC_SEED 0x514fc
+#define A_GCACHE_CACHE_SIZE 0x51500
+
+#define S_HMA_2MB 1
+#define V_HMA_2MB(x) ((x) << S_HMA_2MB)
+#define F_HMA_2MB V_HMA_2MB(1U)
+
+#define S_MC0_2MB 0
+#define V_MC0_2MB(x) ((x) << S_MC0_2MB)
+#define F_MC0_2MB V_MC0_2MB(1U)
+
+#define A_GCACHE_HINT_MAPPING 0x51504
+
+#define S_CLIENT_HINT_EN 16
+#define M_CLIENT_HINT_EN 0x7fffU
+#define V_CLIENT_HINT_EN(x) ((x) << S_CLIENT_HINT_EN)
+#define G_CLIENT_HINT_EN(x) (((x) >> S_CLIENT_HINT_EN) & M_CLIENT_HINT_EN)
+
+#define S_HINT_ADDR_SPLIT_EN 8
+#define V_HINT_ADDR_SPLIT_EN(x) ((x) << S_HINT_ADDR_SPLIT_EN)
+#define F_HINT_ADDR_SPLIT_EN V_HINT_ADDR_SPLIT_EN(1U)
+
+#define S_TP_HINT_HMA_MC 2
+#define V_TP_HINT_HMA_MC(x) ((x) << S_TP_HINT_HMA_MC)
+#define F_TP_HINT_HMA_MC V_TP_HINT_HMA_MC(1U)
+
+#define S_CIM_HINT_HMA_MC 1
+#define V_CIM_HINT_HMA_MC(x) ((x) << S_CIM_HINT_HMA_MC)
+#define F_CIM_HINT_HMA_MC V_CIM_HINT_HMA_MC(1U)
+
+#define S_LE_HINT_HMA_MC 0
+#define V_LE_HINT_HMA_MC(x) ((x) << S_LE_HINT_HMA_MC)
+#define F_LE_HINT_HMA_MC V_LE_HINT_HMA_MC(1U)
+
+#define A_GCACHE_PERF_EN 0x51508
+
+#define S_PERF_CLEAR_GC1 3
+#define V_PERF_CLEAR_GC1(x) ((x) << S_PERF_CLEAR_GC1)
+#define F_PERF_CLEAR_GC1 V_PERF_CLEAR_GC1(1U)
+
+#define S_PERF_CLEAR_GC0 2
+#define V_PERF_CLEAR_GC0(x) ((x) << S_PERF_CLEAR_GC0)
+#define F_PERF_CLEAR_GC0 V_PERF_CLEAR_GC0(1U)
+
+#define S_PERF_EN_GC1 1
+#define V_PERF_EN_GC1(x) ((x) << S_PERF_EN_GC1)
+#define F_PERF_EN_GC1 V_PERF_EN_GC1(1U)
+
+#define S_PERF_EN_GC0 0
+#define V_PERF_EN_GC0(x) ((x) << S_PERF_EN_GC0)
+#define F_PERF_EN_GC0 V_PERF_EN_GC0(1U)
+
+#define A_GCACHE_PERF_GC0_RD_HIT 0x5150c
+#define A_GCACHE_PERF_GC1_RD_HIT 0x51510
+#define A_GCACHE_PERF_GC0_WR_HIT 0x51514
+#define A_GCACHE_PERF_GC1_WR_HIT 0x51518
+#define A_GCACHE_PERF_GC0_RD_MISS 0x5151c
+#define A_GCACHE_PERF_GC1_RD_MISS 0x51520
+#define A_GCACHE_PERF_GC0_WR_MISS 0x51524
+#define A_GCACHE_PERF_GC1_WR_MISS 0x51528
+#define A_GCACHE_PERF_GC0_RD_REQ 0x5152c
+#define A_GCACHE_PERF_GC1_RD_REQ 0x51530
+#define A_GCACHE_PERF_GC0_WR_REQ 0x51534
+#define A_GCACHE_PERF_GC1_WR_REQ 0x51538
+#define A_GCACHE_PAR_CAUSE 0x5153c
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE V_GC1_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE V_GC0_SRAM_RSP_DATAQ_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_PAR_CAUSE 25
+#define V_GC1_WQDATA_FIFO_PERR_PAR_CAUSE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_PAR_CAUSE)
+#define F_GC1_WQDATA_FIFO_PERR_PAR_CAUSE V_GC1_WQDATA_FIFO_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_PAR_CAUSE 24
+#define V_GC0_WQDATA_FIFO_PERR_PAR_CAUSE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_PAR_CAUSE)
+#define F_GC0_WQDATA_FIFO_PERR_PAR_CAUSE V_GC0_WQDATA_FIFO_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE 23
+#define V_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC1_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE 22
+#define V_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC0_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_RSP_PERR_PAR_CAUSE 19
+#define V_GC1_RSP_PERR_PAR_CAUSE(x) ((x) << S_GC1_RSP_PERR_PAR_CAUSE)
+#define F_GC1_RSP_PERR_PAR_CAUSE V_GC1_RSP_PERR_PAR_CAUSE(1U)
+
+#define S_GC0_RSP_PERR_PAR_CAUSE 18
+#define V_GC0_RSP_PERR_PAR_CAUSE(x) ((x) << S_GC0_RSP_PERR_PAR_CAUSE)
+#define F_GC0_RSP_PERR_PAR_CAUSE V_GC0_RSP_PERR_PAR_CAUSE(1U)
+
+#define S_GC1_LRU_UERR_PAR_CAUSE 17
+#define V_GC1_LRU_UERR_PAR_CAUSE(x) ((x) << S_GC1_LRU_UERR_PAR_CAUSE)
+#define F_GC1_LRU_UERR_PAR_CAUSE V_GC1_LRU_UERR_PAR_CAUSE(1U)
+
+#define S_GC0_LRU_UERR_PAR_CAUSE 16
+#define V_GC0_LRU_UERR_PAR_CAUSE(x) ((x) << S_GC0_LRU_UERR_PAR_CAUSE)
+#define F_GC0_LRU_UERR_PAR_CAUSE V_GC0_LRU_UERR_PAR_CAUSE(1U)
+
+#define S_GC1_TAG_UERR_PAR_CAUSE 15
+#define V_GC1_TAG_UERR_PAR_CAUSE(x) ((x) << S_GC1_TAG_UERR_PAR_CAUSE)
+#define F_GC1_TAG_UERR_PAR_CAUSE V_GC1_TAG_UERR_PAR_CAUSE(1U)
+
+#define S_GC0_TAG_UERR_PAR_CAUSE 14
+#define V_GC0_TAG_UERR_PAR_CAUSE(x) ((x) << S_GC0_TAG_UERR_PAR_CAUSE)
+#define F_GC0_TAG_UERR_PAR_CAUSE V_GC0_TAG_UERR_PAR_CAUSE(1U)
+
+#define S_GC1_LRU_CERR_PAR_CAUSE 13
+#define V_GC1_LRU_CERR_PAR_CAUSE(x) ((x) << S_GC1_LRU_CERR_PAR_CAUSE)
+#define F_GC1_LRU_CERR_PAR_CAUSE V_GC1_LRU_CERR_PAR_CAUSE(1U)
+
+#define S_GC0_LRU_CERR_PAR_CAUSE 12
+#define V_GC0_LRU_CERR_PAR_CAUSE(x) ((x) << S_GC0_LRU_CERR_PAR_CAUSE)
+#define F_GC0_LRU_CERR_PAR_CAUSE V_GC0_LRU_CERR_PAR_CAUSE(1U)
+
+#define S_GC1_TAG_CERR_PAR_CAUSE 11
+#define V_GC1_TAG_CERR_PAR_CAUSE(x) ((x) << S_GC1_TAG_CERR_PAR_CAUSE)
+#define F_GC1_TAG_CERR_PAR_CAUSE V_GC1_TAG_CERR_PAR_CAUSE(1U)
+
+#define S_GC0_TAG_CERR_PAR_CAUSE 10
+#define V_GC0_TAG_CERR_PAR_CAUSE(x) ((x) << S_GC0_TAG_CERR_PAR_CAUSE)
+#define F_GC0_TAG_CERR_PAR_CAUSE V_GC0_TAG_CERR_PAR_CAUSE(1U)
+
+#define S_GC1_CE_PAR_CAUSE 9
+#define V_GC1_CE_PAR_CAUSE(x) ((x) << S_GC1_CE_PAR_CAUSE)
+#define F_GC1_CE_PAR_CAUSE V_GC1_CE_PAR_CAUSE(1U)
+
+#define S_GC0_CE_PAR_CAUSE 8
+#define V_GC0_CE_PAR_CAUSE(x) ((x) << S_GC0_CE_PAR_CAUSE)
+#define F_GC0_CE_PAR_CAUSE V_GC0_CE_PAR_CAUSE(1U)
+
+#define S_GC1_UE_PAR_CAUSE 7
+#define V_GC1_UE_PAR_CAUSE(x) ((x) << S_GC1_UE_PAR_CAUSE)
+#define F_GC1_UE_PAR_CAUSE V_GC1_UE_PAR_CAUSE(1U)
+
+#define S_GC0_UE_PAR_CAUSE 6
+#define V_GC0_UE_PAR_CAUSE(x) ((x) << S_GC0_UE_PAR_CAUSE)
+#define F_GC0_UE_PAR_CAUSE V_GC0_UE_PAR_CAUSE(1U)
+
+#define S_GC1_CMD_PAR_CAUSE 5
+#define V_GC1_CMD_PAR_CAUSE(x) ((x) << S_GC1_CMD_PAR_CAUSE)
+#define F_GC1_CMD_PAR_CAUSE V_GC1_CMD_PAR_CAUSE(1U)
+
+#define S_GC1_DATA_PAR_CAUSE 4
+#define V_GC1_DATA_PAR_CAUSE(x) ((x) << S_GC1_DATA_PAR_CAUSE)
+#define F_GC1_DATA_PAR_CAUSE V_GC1_DATA_PAR_CAUSE(1U)
+
+#define S_GC0_CMD_PAR_CAUSE 3
+#define V_GC0_CMD_PAR_CAUSE(x) ((x) << S_GC0_CMD_PAR_CAUSE)
+#define F_GC0_CMD_PAR_CAUSE V_GC0_CMD_PAR_CAUSE(1U)
+
+#define S_GC0_DATA_PAR_CAUSE 2
+#define V_GC0_DATA_PAR_CAUSE(x) ((x) << S_GC0_DATA_PAR_CAUSE)
+#define F_GC0_DATA_PAR_CAUSE V_GC0_DATA_PAR_CAUSE(1U)
+
+#define S_ILLADDRACCESS1_PAR_CAUSE 1
+#define V_ILLADDRACCESS1_PAR_CAUSE(x) ((x) << S_ILLADDRACCESS1_PAR_CAUSE)
+#define F_ILLADDRACCESS1_PAR_CAUSE V_ILLADDRACCESS1_PAR_CAUSE(1U)
+
+#define S_ILLADDRACCESS0_PAR_CAUSE 0
+#define V_ILLADDRACCESS0_PAR_CAUSE(x) ((x) << S_ILLADDRACCESS0_PAR_CAUSE)
+#define F_ILLADDRACCESS0_PAR_CAUSE V_ILLADDRACCESS0_PAR_CAUSE(1U)
+
+#define A_GCACHE_PAR_ENABLE 0x51540
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE V_GC1_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE V_GC0_SRAM_RSP_DATAQ_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_PAR_ENABLE 25
+#define V_GC1_WQDATA_FIFO_PERR_PAR_ENABLE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_PAR_ENABLE)
+#define F_GC1_WQDATA_FIFO_PERR_PAR_ENABLE V_GC1_WQDATA_FIFO_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_PAR_ENABLE 24
+#define V_GC0_WQDATA_FIFO_PERR_PAR_ENABLE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_PAR_ENABLE)
+#define F_GC0_WQDATA_FIFO_PERR_PAR_ENABLE V_GC0_WQDATA_FIFO_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE 23
+#define V_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC1_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE 22
+#define V_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC0_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC1_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE V_GC0_SRAM_RDTAG_QUEUE_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_RSP_PERR_PAR_ENABLE 19
+#define V_GC1_RSP_PERR_PAR_ENABLE(x) ((x) << S_GC1_RSP_PERR_PAR_ENABLE)
+#define F_GC1_RSP_PERR_PAR_ENABLE V_GC1_RSP_PERR_PAR_ENABLE(1U)
+
+#define S_GC0_RSP_PERR_PAR_ENABLE 18
+#define V_GC0_RSP_PERR_PAR_ENABLE(x) ((x) << S_GC0_RSP_PERR_PAR_ENABLE)
+#define F_GC0_RSP_PERR_PAR_ENABLE V_GC0_RSP_PERR_PAR_ENABLE(1U)
+
+#define S_GC1_LRU_UERR_PAR_ENABLE 17
+#define V_GC1_LRU_UERR_PAR_ENABLE(x) ((x) << S_GC1_LRU_UERR_PAR_ENABLE)
+#define F_GC1_LRU_UERR_PAR_ENABLE V_GC1_LRU_UERR_PAR_ENABLE(1U)
+
+#define S_GC0_LRU_UERR_PAR_ENABLE 16
+#define V_GC0_LRU_UERR_PAR_ENABLE(x) ((x) << S_GC0_LRU_UERR_PAR_ENABLE)
+#define F_GC0_LRU_UERR_PAR_ENABLE V_GC0_LRU_UERR_PAR_ENABLE(1U)
+
+#define S_GC1_TAG_UERR_PAR_ENABLE 15
+#define V_GC1_TAG_UERR_PAR_ENABLE(x) ((x) << S_GC1_TAG_UERR_PAR_ENABLE)
+#define F_GC1_TAG_UERR_PAR_ENABLE V_GC1_TAG_UERR_PAR_ENABLE(1U)
+
+#define S_GC0_TAG_UERR_PAR_ENABLE 14
+#define V_GC0_TAG_UERR_PAR_ENABLE(x) ((x) << S_GC0_TAG_UERR_PAR_ENABLE)
+#define F_GC0_TAG_UERR_PAR_ENABLE V_GC0_TAG_UERR_PAR_ENABLE(1U)
+
+#define S_GC1_LRU_CERR_PAR_ENABLE 13
+#define V_GC1_LRU_CERR_PAR_ENABLE(x) ((x) << S_GC1_LRU_CERR_PAR_ENABLE)
+#define F_GC1_LRU_CERR_PAR_ENABLE V_GC1_LRU_CERR_PAR_ENABLE(1U)
+
+#define S_GC0_LRU_CERR_PAR_ENABLE 12
+#define V_GC0_LRU_CERR_PAR_ENABLE(x) ((x) << S_GC0_LRU_CERR_PAR_ENABLE)
+#define F_GC0_LRU_CERR_PAR_ENABLE V_GC0_LRU_CERR_PAR_ENABLE(1U)
+
+#define S_GC1_TAG_CERR_PAR_ENABLE 11
+#define V_GC1_TAG_CERR_PAR_ENABLE(x) ((x) << S_GC1_TAG_CERR_PAR_ENABLE)
+#define F_GC1_TAG_CERR_PAR_ENABLE V_GC1_TAG_CERR_PAR_ENABLE(1U)
+
+#define S_GC0_TAG_CERR_PAR_ENABLE 10
+#define V_GC0_TAG_CERR_PAR_ENABLE(x) ((x) << S_GC0_TAG_CERR_PAR_ENABLE)
+#define F_GC0_TAG_CERR_PAR_ENABLE V_GC0_TAG_CERR_PAR_ENABLE(1U)
+
+#define S_GC1_CE_PAR_ENABLE 9
+#define V_GC1_CE_PAR_ENABLE(x) ((x) << S_GC1_CE_PAR_ENABLE)
+#define F_GC1_CE_PAR_ENABLE V_GC1_CE_PAR_ENABLE(1U)
+
+#define S_GC0_CE_PAR_ENABLE 8
+#define V_GC0_CE_PAR_ENABLE(x) ((x) << S_GC0_CE_PAR_ENABLE)
+#define F_GC0_CE_PAR_ENABLE V_GC0_CE_PAR_ENABLE(1U)
+
+#define S_GC1_UE_PAR_ENABLE 7
+#define V_GC1_UE_PAR_ENABLE(x) ((x) << S_GC1_UE_PAR_ENABLE)
+#define F_GC1_UE_PAR_ENABLE V_GC1_UE_PAR_ENABLE(1U)
+
+#define S_GC0_UE_PAR_ENABLE 6
+#define V_GC0_UE_PAR_ENABLE(x) ((x) << S_GC0_UE_PAR_ENABLE)
+#define F_GC0_UE_PAR_ENABLE V_GC0_UE_PAR_ENABLE(1U)
+
+#define S_GC1_CMD_PAR_ENABLE 5
+#define V_GC1_CMD_PAR_ENABLE(x) ((x) << S_GC1_CMD_PAR_ENABLE)
+#define F_GC1_CMD_PAR_ENABLE V_GC1_CMD_PAR_ENABLE(1U)
+
+#define S_GC1_DATA_PAR_ENABLE 4
+#define V_GC1_DATA_PAR_ENABLE(x) ((x) << S_GC1_DATA_PAR_ENABLE)
+#define F_GC1_DATA_PAR_ENABLE V_GC1_DATA_PAR_ENABLE(1U)
+
+#define S_GC0_CMD_PAR_ENABLE 3
+#define V_GC0_CMD_PAR_ENABLE(x) ((x) << S_GC0_CMD_PAR_ENABLE)
+#define F_GC0_CMD_PAR_ENABLE V_GC0_CMD_PAR_ENABLE(1U)
+
+#define S_GC0_DATA_PAR_ENABLE 2
+#define V_GC0_DATA_PAR_ENABLE(x) ((x) << S_GC0_DATA_PAR_ENABLE)
+#define F_GC0_DATA_PAR_ENABLE V_GC0_DATA_PAR_ENABLE(1U)
+
+#define S_ILLADDRACCESS1_PAR_ENABLE 1
+#define V_ILLADDRACCESS1_PAR_ENABLE(x) ((x) << S_ILLADDRACCESS1_PAR_ENABLE)
+#define F_ILLADDRACCESS1_PAR_ENABLE V_ILLADDRACCESS1_PAR_ENABLE(1U)
+
+#define S_ILLADDRACCESS0_PAR_ENABLE 0
+#define V_ILLADDRACCESS0_PAR_ENABLE(x) ((x) << S_ILLADDRACCESS0_PAR_ENABLE)
+#define F_ILLADDRACCESS0_PAR_ENABLE V_ILLADDRACCESS0_PAR_ENABLE(1U)
+
+#define A_GCACHE_INT_ENABLE 0x51544
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE V_GC1_SRAM_RSP_DATAQ_PERR_INT_ENABLE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE V_GC0_SRAM_RSP_DATAQ_PERR_INT_ENABLE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_INT_ENABLE 25
+#define V_GC1_WQDATA_FIFO_PERR_INT_ENABLE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_INT_ENABLE)
+#define F_GC1_WQDATA_FIFO_PERR_INT_ENABLE V_GC1_WQDATA_FIFO_PERR_INT_ENABLE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_INT_ENABLE 24
+#define V_GC0_WQDATA_FIFO_PERR_INT_ENABLE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_INT_ENABLE)
+#define F_GC0_WQDATA_FIFO_PERR_INT_ENABLE V_GC0_WQDATA_FIFO_PERR_INT_ENABLE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_INT_ENABLE 23
+#define V_GC1_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC1_RDTAG_QUEUE_PERR_INT_ENABLE V_GC1_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_INT_ENABLE 22
+#define V_GC0_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC0_RDTAG_QUEUE_PERR_INT_ENABLE V_GC0_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_ENABLE(1U)
+
+#define S_GC1_RSP_PERR_INT_ENABLE 19
+#define V_GC1_RSP_PERR_INT_ENABLE(x) ((x) << S_GC1_RSP_PERR_INT_ENABLE)
+#define F_GC1_RSP_PERR_INT_ENABLE V_GC1_RSP_PERR_INT_ENABLE(1U)
+
+#define S_GC0_RSP_PERR_INT_ENABLE 18
+#define V_GC0_RSP_PERR_INT_ENABLE(x) ((x) << S_GC0_RSP_PERR_INT_ENABLE)
+#define F_GC0_RSP_PERR_INT_ENABLE V_GC0_RSP_PERR_INT_ENABLE(1U)
+
+#define S_GC1_LRU_UERR_INT_ENABLE 17
+#define V_GC1_LRU_UERR_INT_ENABLE(x) ((x) << S_GC1_LRU_UERR_INT_ENABLE)
+#define F_GC1_LRU_UERR_INT_ENABLE V_GC1_LRU_UERR_INT_ENABLE(1U)
+
+#define S_GC0_LRU_UERR_INT_ENABLE 16
+#define V_GC0_LRU_UERR_INT_ENABLE(x) ((x) << S_GC0_LRU_UERR_INT_ENABLE)
+#define F_GC0_LRU_UERR_INT_ENABLE V_GC0_LRU_UERR_INT_ENABLE(1U)
+
+#define S_GC1_TAG_UERR_INT_ENABLE 15
+#define V_GC1_TAG_UERR_INT_ENABLE(x) ((x) << S_GC1_TAG_UERR_INT_ENABLE)
+#define F_GC1_TAG_UERR_INT_ENABLE V_GC1_TAG_UERR_INT_ENABLE(1U)
+
+#define S_GC0_TAG_UERR_INT_ENABLE 14
+#define V_GC0_TAG_UERR_INT_ENABLE(x) ((x) << S_GC0_TAG_UERR_INT_ENABLE)
+#define F_GC0_TAG_UERR_INT_ENABLE V_GC0_TAG_UERR_INT_ENABLE(1U)
+
+#define S_GC1_LRU_CERR_INT_ENABLE 13
+#define V_GC1_LRU_CERR_INT_ENABLE(x) ((x) << S_GC1_LRU_CERR_INT_ENABLE)
+#define F_GC1_LRU_CERR_INT_ENABLE V_GC1_LRU_CERR_INT_ENABLE(1U)
+
+#define S_GC0_LRU_CERR_INT_ENABLE 12
+#define V_GC0_LRU_CERR_INT_ENABLE(x) ((x) << S_GC0_LRU_CERR_INT_ENABLE)
+#define F_GC0_LRU_CERR_INT_ENABLE V_GC0_LRU_CERR_INT_ENABLE(1U)
+
+#define S_GC1_TAG_CERR_INT_ENABLE 11
+#define V_GC1_TAG_CERR_INT_ENABLE(x) ((x) << S_GC1_TAG_CERR_INT_ENABLE)
+#define F_GC1_TAG_CERR_INT_ENABLE V_GC1_TAG_CERR_INT_ENABLE(1U)
+
+#define S_GC0_TAG_CERR_INT_ENABLE 10
+#define V_GC0_TAG_CERR_INT_ENABLE(x) ((x) << S_GC0_TAG_CERR_INT_ENABLE)
+#define F_GC0_TAG_CERR_INT_ENABLE V_GC0_TAG_CERR_INT_ENABLE(1U)
+
+#define S_GC1_CE_INT_ENABLE 9
+#define V_GC1_CE_INT_ENABLE(x) ((x) << S_GC1_CE_INT_ENABLE)
+#define F_GC1_CE_INT_ENABLE V_GC1_CE_INT_ENABLE(1U)
+
+#define S_GC0_CE_INT_ENABLE 8
+#define V_GC0_CE_INT_ENABLE(x) ((x) << S_GC0_CE_INT_ENABLE)
+#define F_GC0_CE_INT_ENABLE V_GC0_CE_INT_ENABLE(1U)
+
+#define S_GC1_UE_INT_ENABLE 7
+#define V_GC1_UE_INT_ENABLE(x) ((x) << S_GC1_UE_INT_ENABLE)
+#define F_GC1_UE_INT_ENABLE V_GC1_UE_INT_ENABLE(1U)
+
+#define S_GC0_UE_INT_ENABLE 6
+#define V_GC0_UE_INT_ENABLE(x) ((x) << S_GC0_UE_INT_ENABLE)
+#define F_GC0_UE_INT_ENABLE V_GC0_UE_INT_ENABLE(1U)
+
+#define S_GC1_CMD_PAR_INT_ENABLE 5
+#define V_GC1_CMD_PAR_INT_ENABLE(x) ((x) << S_GC1_CMD_PAR_INT_ENABLE)
+#define F_GC1_CMD_PAR_INT_ENABLE V_GC1_CMD_PAR_INT_ENABLE(1U)
+
+#define S_GC1_DATA_PAR_INT_ENABLE 4
+#define V_GC1_DATA_PAR_INT_ENABLE(x) ((x) << S_GC1_DATA_PAR_INT_ENABLE)
+#define F_GC1_DATA_PAR_INT_ENABLE V_GC1_DATA_PAR_INT_ENABLE(1U)
+
+#define S_GC0_CMD_PAR_INT_ENABLE 3
+#define V_GC0_CMD_PAR_INT_ENABLE(x) ((x) << S_GC0_CMD_PAR_INT_ENABLE)
+#define F_GC0_CMD_PAR_INT_ENABLE V_GC0_CMD_PAR_INT_ENABLE(1U)
+
+#define S_GC0_DATA_PAR_INT_ENABLE 2
+#define V_GC0_DATA_PAR_INT_ENABLE(x) ((x) << S_GC0_DATA_PAR_INT_ENABLE)
+#define F_GC0_DATA_PAR_INT_ENABLE V_GC0_DATA_PAR_INT_ENABLE(1U)
+
+#define S_ILLADDRACCESS1_INT_ENABLE 1
+#define V_ILLADDRACCESS1_INT_ENABLE(x) ((x) << S_ILLADDRACCESS1_INT_ENABLE)
+#define F_ILLADDRACCESS1_INT_ENABLE V_ILLADDRACCESS1_INT_ENABLE(1U)
+
+#define S_ILLADDRACCESS0_INT_ENABLE 0
+#define V_ILLADDRACCESS0_INT_ENABLE(x) ((x) << S_ILLADDRACCESS0_INT_ENABLE)
+#define F_ILLADDRACCESS0_INT_ENABLE V_ILLADDRACCESS0_INT_ENABLE(1U)
+
+#define A_GCACHE_INT_CAUSE 0x51548
+
+#define S_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE 27
+#define V_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE(x) ((x) << S_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE)
+#define F_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE V_GC1_SRAM_RSP_DATAQ_PERR_INT_CAUSE(1U)
+
+#define S_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE 26
+#define V_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE(x) ((x) << S_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE)
+#define F_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE V_GC0_SRAM_RSP_DATAQ_PERR_INT_CAUSE(1U)
+
+#define S_GC1_WQDATA_FIFO_PERR_INT_CAUSE 25
+#define V_GC1_WQDATA_FIFO_PERR_INT_CAUSE(x) ((x) << S_GC1_WQDATA_FIFO_PERR_INT_CAUSE)
+#define F_GC1_WQDATA_FIFO_PERR_INT_CAUSE V_GC1_WQDATA_FIFO_PERR_INT_CAUSE(1U)
+
+#define S_GC0_WQDATA_FIFO_PERR_INT_CAUSE 24
+#define V_GC0_WQDATA_FIFO_PERR_INT_CAUSE(x) ((x) << S_GC0_WQDATA_FIFO_PERR_INT_CAUSE)
+#define F_GC0_WQDATA_FIFO_PERR_INT_CAUSE V_GC0_WQDATA_FIFO_PERR_INT_CAUSE(1U)
+
+#define S_GC1_RDTAG_QUEUE_PERR_INT_CAUSE 23
+#define V_GC1_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC1_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC1_RDTAG_QUEUE_PERR_INT_CAUSE V_GC1_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC0_RDTAG_QUEUE_PERR_INT_CAUSE 22
+#define V_GC0_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC0_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC0_RDTAG_QUEUE_PERR_INT_CAUSE V_GC0_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE 21
+#define V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE V_GC1_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE 20
+#define V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(x) ((x) << S_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE)
+#define F_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE V_GC0_SRAM_RDTAG_QUEUE_PERR_INT_CAUSE(1U)
+
+#define S_GC1_RSP_PERR_INT_CAUSE 19
+#define V_GC1_RSP_PERR_INT_CAUSE(x) ((x) << S_GC1_RSP_PERR_INT_CAUSE)
+#define F_GC1_RSP_PERR_INT_CAUSE V_GC1_RSP_PERR_INT_CAUSE(1U)
+
+#define S_GC0_RSP_PERR_INT_CAUSE 18
+#define V_GC0_RSP_PERR_INT_CAUSE(x) ((x) << S_GC0_RSP_PERR_INT_CAUSE)
+#define F_GC0_RSP_PERR_INT_CAUSE V_GC0_RSP_PERR_INT_CAUSE(1U)
+
+#define S_GC1_LRU_UERR_INT_CAUSE 17
+#define V_GC1_LRU_UERR_INT_CAUSE(x) ((x) << S_GC1_LRU_UERR_INT_CAUSE)
+#define F_GC1_LRU_UERR_INT_CAUSE V_GC1_LRU_UERR_INT_CAUSE(1U)
+
+#define S_GC0_LRU_UERR_INT_CAUSE 16
+#define V_GC0_LRU_UERR_INT_CAUSE(x) ((x) << S_GC0_LRU_UERR_INT_CAUSE)
+#define F_GC0_LRU_UERR_INT_CAUSE V_GC0_LRU_UERR_INT_CAUSE(1U)
+
+#define S_GC1_TAG_UERR_INT_CAUSE 15
+#define V_GC1_TAG_UERR_INT_CAUSE(x) ((x) << S_GC1_TAG_UERR_INT_CAUSE)
+#define F_GC1_TAG_UERR_INT_CAUSE V_GC1_TAG_UERR_INT_CAUSE(1U)
+
+#define S_GC0_TAG_UERR_INT_CAUSE 14
+#define V_GC0_TAG_UERR_INT_CAUSE(x) ((x) << S_GC0_TAG_UERR_INT_CAUSE)
+#define F_GC0_TAG_UERR_INT_CAUSE V_GC0_TAG_UERR_INT_CAUSE(1U)
+
+#define S_GC1_LRU_CERR_INT_CAUSE 13
+#define V_GC1_LRU_CERR_INT_CAUSE(x) ((x) << S_GC1_LRU_CERR_INT_CAUSE)
+#define F_GC1_LRU_CERR_INT_CAUSE V_GC1_LRU_CERR_INT_CAUSE(1U)
+
+#define S_GC0_LRU_CERR_INT_CAUSE 12
+#define V_GC0_LRU_CERR_INT_CAUSE(x) ((x) << S_GC0_LRU_CERR_INT_CAUSE)
+#define F_GC0_LRU_CERR_INT_CAUSE V_GC0_LRU_CERR_INT_CAUSE(1U)
+
+#define S_GC1_TAG_CERR_INT_CAUSE 11
+#define V_GC1_TAG_CERR_INT_CAUSE(x) ((x) << S_GC1_TAG_CERR_INT_CAUSE)
+#define F_GC1_TAG_CERR_INT_CAUSE V_GC1_TAG_CERR_INT_CAUSE(1U)
+
+#define S_GC0_TAG_CERR_INT_CAUSE 10
+#define V_GC0_TAG_CERR_INT_CAUSE(x) ((x) << S_GC0_TAG_CERR_INT_CAUSE)
+#define F_GC0_TAG_CERR_INT_CAUSE V_GC0_TAG_CERR_INT_CAUSE(1U)
+
+#define S_GC1_CE_INT_CAUSE 9
+#define V_GC1_CE_INT_CAUSE(x) ((x) << S_GC1_CE_INT_CAUSE)
+#define F_GC1_CE_INT_CAUSE V_GC1_CE_INT_CAUSE(1U)
+
+#define S_GC0_CE_INT_CAUSE 8
+#define V_GC0_CE_INT_CAUSE(x) ((x) << S_GC0_CE_INT_CAUSE)
+#define F_GC0_CE_INT_CAUSE V_GC0_CE_INT_CAUSE(1U)
+
+#define S_GC1_UE_INT_CAUSE 7
+#define V_GC1_UE_INT_CAUSE(x) ((x) << S_GC1_UE_INT_CAUSE)
+#define F_GC1_UE_INT_CAUSE V_GC1_UE_INT_CAUSE(1U)
+
+#define S_GC0_UE_INT_CAUSE 6
+#define V_GC0_UE_INT_CAUSE(x) ((x) << S_GC0_UE_INT_CAUSE)
+#define F_GC0_UE_INT_CAUSE V_GC0_UE_INT_CAUSE(1U)
+
+#define S_GC1_CMD_PAR_INT_CAUSE 5
+#define V_GC1_CMD_PAR_INT_CAUSE(x) ((x) << S_GC1_CMD_PAR_INT_CAUSE)
+#define F_GC1_CMD_PAR_INT_CAUSE V_GC1_CMD_PAR_INT_CAUSE(1U)
+
+#define S_GC1_DATA_PAR_INT_CAUSE 4
+#define V_GC1_DATA_PAR_INT_CAUSE(x) ((x) << S_GC1_DATA_PAR_INT_CAUSE)
+#define F_GC1_DATA_PAR_INT_CAUSE V_GC1_DATA_PAR_INT_CAUSE(1U)
+
+#define S_GC0_CMD_PAR_INT_CAUSE 3
+#define V_GC0_CMD_PAR_INT_CAUSE(x) ((x) << S_GC0_CMD_PAR_INT_CAUSE)
+#define F_GC0_CMD_PAR_INT_CAUSE V_GC0_CMD_PAR_INT_CAUSE(1U)
+
+#define S_GC0_DATA_PAR_INT_CAUSE 2
+#define V_GC0_DATA_PAR_INT_CAUSE(x) ((x) << S_GC0_DATA_PAR_INT_CAUSE)
+#define F_GC0_DATA_PAR_INT_CAUSE V_GC0_DATA_PAR_INT_CAUSE(1U)
+
+#define S_ILLADDRACCESS1_INT_CAUSE 1
+#define V_ILLADDRACCESS1_INT_CAUSE(x) ((x) << S_ILLADDRACCESS1_INT_CAUSE)
+#define F_ILLADDRACCESS1_INT_CAUSE V_ILLADDRACCESS1_INT_CAUSE(1U)
+
+#define S_ILLADDRACCESS0_INT_CAUSE 0
+#define V_ILLADDRACCESS0_INT_CAUSE(x) ((x) << S_ILLADDRACCESS0_INT_CAUSE)
+#define F_ILLADDRACCESS0_INT_CAUSE V_ILLADDRACCESS0_INT_CAUSE(1U)
+
+#define A_GCACHE_DBG_SEL_CTRL 0x51550
+
+#define S_DBG_SEL_CTRLSEL_OVR_EN 31
+#define V_DBG_SEL_CTRLSEL_OVR_EN(x) ((x) << S_DBG_SEL_CTRLSEL_OVR_EN)
+#define F_DBG_SEL_CTRLSEL_OVR_EN V_DBG_SEL_CTRLSEL_OVR_EN(1U)
+
+#define S_T7_DEBUG_HI 16
+#define V_T7_DEBUG_HI(x) ((x) << S_T7_DEBUG_HI)
+#define F_T7_DEBUG_HI V_T7_DEBUG_HI(1U)
+
+#define S_DBG_SEL_CTRLSELH 8
+#define M_DBG_SEL_CTRLSELH 0xffU
+#define V_DBG_SEL_CTRLSELH(x) ((x) << S_DBG_SEL_CTRLSELH)
+#define G_DBG_SEL_CTRLSELH(x) (((x) >> S_DBG_SEL_CTRLSELH) & M_DBG_SEL_CTRLSELH)
+
+#define S_DBG_SEL_CTRLSELL 0
+#define M_DBG_SEL_CTRLSELL 0xffU
+#define V_DBG_SEL_CTRLSELL(x) ((x) << S_DBG_SEL_CTRLSELL)
+#define G_DBG_SEL_CTRLSELL(x) (((x) >> S_DBG_SEL_CTRLSELL) & M_DBG_SEL_CTRLSELL)
+
+#define A_GCACHE_LOCAL_DEBUG_RPT 0x51554
+#define A_GCACHE_DBG_ILL_ACC 0x5155c
+#define A_GCACHE_DBG_ILL_ADDR0 0x51560
+#define A_GCACHE_DBG_ILL_ADDR1 0x51564
+#define A_GCACHE_GC0_DBG_ADDR_0_32 0x51568
+#define A_GCACHE_GC0_DBG_ADDR_32_32 0x5156c
+#define A_GCACHE_GC0_DBG_ADDR_64_32 0x51570
+#define A_GCACHE_GC0_DBG_ADDR_96_32 0x51574
+#define A_GCACHE_GC0_DBG_ADDR_0_64 0x51578
+#define A_GCACHE_GC0_DBG_ADDR_64_64 0x5157c
+#define A_GCACHE_GC0_DBG_ADDR_0_96 0x51580
+#define A_GCACHE_GC0_DBG_ADDR_32_96 0x51584
+#define A_GCACHE_GC1_DBG_ADDR_0_32 0x5158c
+#define A_GCACHE_GC1_DBG_ADDR_32_32 0x51590
+#define A_GCACHE_GC1_DBG_ADDR_64_32 0x51594
+#define A_GCACHE_GC1_DBG_ADDR_96_32 0x51598
+#define A_GCACHE_GC1_DBG_ADDR_0_64 0x5159c
+#define A_GCACHE_GC1_DBG_ADDR_64_64 0x515a0
+#define A_GCACHE_GC1_DBG_ADDR_0_96 0x515a4
+#define A_GCACHE_GC1_DBG_ADDR_32_96 0x515a8
+#define A_GCACHE_GC0_DBG_ADDR_32_64 0x515ac
+#define A_GCACHE_GC1_DBG_ADDR_32_64 0x515b0
+#define A_GCACHE_PERF_GC0_EVICT 0x515b4
+#define A_GCACHE_PERF_GC1_EVICT 0x515b8
+#define A_GCACHE_PERF_GC0_CE_COUNT 0x515bc
+#define A_GCACHE_PERF_GC1_CE_COUNT 0x515c0
+#define A_GCACHE_PERF_GC0_UE_COUNT 0x515c4
+#define A_GCACHE_PERF_GC1_UE_COUNT 0x515c8
+#define A_GCACHE_DBG_CTL 0x515f0
+#define A_GCACHE_DBG_DATA 0x515f4
diff --git a/sys/dev/cxgbe/common/t4_regs_values.h b/sys/dev/cxgbe/common/t4_regs_values.h
index 830828097802..6485fa50bd08 100644
--- a/sys/dev/cxgbe/common/t4_regs_values.h
+++ b/sys/dev/cxgbe/common/t4_regs_values.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -269,6 +268,7 @@
#define X_WINDOW_SHIFT 10
#define X_PCIEOFST_SHIFT 10
+#define X_T7_MEMOFST_SHIFT 4
/*
* TP definitions.
@@ -284,6 +284,10 @@
#define S_FT_FIRST S_FCOE
#define S_FT_LAST S_FRAGMENTATION
+#define S_T7_FT_FIRST S_IPSECIDX
+#define S_T7_FT_LAST S_TCPFLAGS
+
+#define W_FT_IPSECIDX 12
#define W_FT_FCOE 1
#define W_FT_PORT 3
#define W_FT_VNIC_ID 17
@@ -294,17 +298,9 @@
#define W_FT_MACMATCH 9
#define W_FT_MPSHITTYPE 3
#define W_FT_FRAGMENTATION 1
-
-#define M_FT_FCOE ((1ULL << W_FT_FCOE) - 1)
-#define M_FT_PORT ((1ULL << W_FT_PORT) - 1)
-#define M_FT_VNIC_ID ((1ULL << W_FT_VNIC_ID) - 1)
-#define M_FT_VLAN ((1ULL << W_FT_VLAN) - 1)
-#define M_FT_TOS ((1ULL << W_FT_TOS) - 1)
-#define M_FT_PROTOCOL ((1ULL << W_FT_PROTOCOL) - 1)
-#define M_FT_ETHERTYPE ((1ULL << W_FT_ETHERTYPE) - 1)
-#define M_FT_MACMATCH ((1ULL << W_FT_MACMATCH) - 1)
-#define M_FT_MPSHITTYPE ((1ULL << W_FT_MPSHITTYPE) - 1)
-#define M_FT_FRAGMENTATION ((1ULL << W_FT_FRAGMENTATION) - 1)
+#define W_FT_ROCE 1
+#define W_FT_SYNONLY 1
+#define W_FT_TCPFLAGS 12
/*
* Some of the Compressed Filter Tuple fields have internal structure. These
@@ -327,6 +323,6 @@
#define S_FT_VNID_ID_VLD 16
#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
-#define F_FT_VNID_ID_VLD(x) V_FT_VNID_ID_VLD(1U)
+#define F_FT_VNID_ID_VLD V_FT_VNID_ID_VLD(1U)
#endif /* __T4_REGS_VALUES_H__ */
diff --git a/sys/dev/cxgbe/common/t4_tcb.h b/sys/dev/cxgbe/common/t4_tcb.h
index f9631ba58418..8bff15f04e7a 100644
--- a/sys/dev/cxgbe/common/t4_tcb.h
+++ b/sys/dev/cxgbe/common/t4_tcb.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011, 2016 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2016, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -340,10 +339,9 @@
/* 1023:1020 */
#define W_TCB_ULP_EXT 31
-#define S_TCP_ULP_EXT 28
+#define S_TCB_ULP_EXT 28
#define M_TCB_ULP_EXT 0xfULL
-#define V_TCB_ULP_EXT(x) ((x) << S_TCP_ULP_EXT)
-
+#define V_TCB_ULP_EXT(x) ((x) << S_TCB_ULP_EXT)
/* 840:832 */
#define W_TCB_IRS_ULP 26
@@ -495,31 +493,31 @@
#define M_TCB_RX_DDP_BUF1_TAG 0xffffffffULL
#define V_TCB_RX_DDP_BUF1_TAG(x) ((x) << S_TCB_RX_DDP_BUF1_TAG)
-/* 855:832 */
+/* 855:832 */
#define W_TCB_RX_TLS_BUF_OFFSET 26
#define S_TCB_RX_TLS_BUF_OFFSET 0
#define M_TCB_RX_TLS_BUF_OFFSET 0xffffffULL
#define V_TCB_RX_TLS_BUF_OFFSET(x) ((x) << S_TCB_RX_TLS_BUF_OFFSET)
-/* 876:856 */
+/* 879:856 */
#define W_TCB_RX_TLS_BUF_LEN 26
#define S_TCB_RX_TLS_BUF_LEN 24
#define M_TCB_RX_TLS_BUF_LEN 0xffffffULL
#define V_TCB_RX_TLS_BUF_LEN(x) ((__u64)(x) << S_TCB_RX_TLS_BUF_LEN)
-/* 895:880 */
-#define W_TCB_RX_TLS_FLAGS 26
-#define S_TCB_RX_TLS_FLAGS 48
+/* 895:880 */
+#define W_TCB_RX_TLS_FLAGS 27
+#define S_TCB_RX_TLS_FLAGS 16
#define M_TCB_RX_TLS_FLAGS 0xffffULL
#define V_TCB_RX_TLS_FLAGS(x) ((__u64)(x) << S_TCB_RX_TLS_FLAGS)
-/* 959:896 */
-#define W_TCB_TLS_SEQ 28
-#define S_TCB_TLS_SEQ 0
-#define M_TCB_TLS_SEQ 0xffffffffffffffffULL
-#define V_TCB_TLS_SEQ(x) ((__u64)(x) << S_TCB_TLS_SEQ)
+/* 959:896 */
+#define W_TCB_RX_TLS_SEQ 28
+#define S_TCB_RX_TLS_SEQ 0
+#define M_TCB_RX_TLS_SEQ 0xffffffffffffffffULL
+#define V_TCB_RX_TLS_SEQ(x) ((__u64)(x) << S_TCB_RX_TLS_SEQ)
-/* 991:960 */
+/* 991:960 */
#define W_TCB_RX_TLS_BUF_TAG 30
#define S_TCB_RX_TLS_BUF_TAG 0
#define M_TCB_RX_TLS_BUF_TAG 0xffffffffULL
@@ -531,17 +529,113 @@
#define M_TCB_RX_TLS_KEY_TAG 0xffffffffULL
#define V_TCB_RX_TLS_KEY_TAG(x) ((x) << S_TCB_RX_TLS_KEY_TAG)
+#define S_TF_TLS_ENABLE 0
+#define V_TF_TLS_ENABLE(x) ((x) << S_TF_TLS_ENABLE)
+
+#define S_TF_TLS_ACTIVE 1
+#define V_TF_TLS_ACTIVE(x) ((x) << S_TF_TLS_ACTIVE)
+
+#define S_TF_TLS_CONTROL 2
+#define V_TF_TLS_CONTROL(x) ((x) << S_TF_TLS_CONTROL)
+
#define S_TF_TLS_KEY_SIZE 7
#define V_TF_TLS_KEY_SIZE(x) ((x) << S_TF_TLS_KEY_SIZE)
-#define S_TF_TLS_CONTROL 2
-#define V_TF_TLS_CONTROL(x) ((x) << S_TF_TLS_CONTROL)
+/* 853:832 */
+#define W_TCB_TPT_OFFSET 26
+#define S_TCB_TPT_OFFSET 0
+#define M_TCB_TPT_OFFSET 0x3fffffULL
+#define V_TCB_TPT_OFFSET(x) ((x) << S_TCB_TPT_OFFSET)
+
+/* 863:854 */
+#define W_TCB_T10_CONFIG 26
+#define S_TCB_T10_CONFIG 22
+#define M_TCB_T10_CONFIG 0x3ffULL
+#define V_TCB_T10_CONFIG(x) ((x) << S_TCB_T10_CONFIG)
+
+/* 871:864 */
+#define W_TCB_PDU_HLEN 27
+#define S_TCB_PDU_HLEN 0
+#define M_TCB_PDU_HLEN 0xffULL
+#define V_TCB_PDU_HLEN(x) ((x) << S_TCB_PDU_HLEN)
+
+/* 879:872 */
+#define W_TCB_PDU_PDO 27
+#define S_TCB_PDU_PDO 8
+#define M_TCB_PDU_PDO 0xffULL
+#define V_TCB_PDU_PDO(x) ((x) << S_TCB_PDU_PDO)
-#define S_TF_TLS_ACTIVE 1
-#define V_TF_TLS_ACTIVE(x) ((x) << S_TF_TLS_ACTIVE)
+/* 895:880 */
+#define W_TCB_N_CQ_IDX_RQ 27
+#define S_TCB_N_CQ_IDX_RQ 16
+#define M_TCB_N_CQ_IDX_RQ 0xffffULL
+#define V_TCB_N_CQ_IDX_RQ(x) ((x) << S_TCB_N_CQ_IDX_RQ)
+
+/* 900:896 */
+#define W_TCB_NVMT_PDA 28
+#define S_TCB_NVMT_PDA 0
+#define M_TCB_NVMT_PDA 0x1fULL
+#define V_TCB_NVMT_PDA(x) ((x) << S_TCB_NVMT_PDA)
+
+/* 911:901 */
+#define W_TCB_RSVD 28
+#define S_TCB_RSVD 5
+#define M_TCB_RSVD 0x7ffULL
+#define V_TCB_RSVD(x) ((x) << S_TCB_RSVD)
-#define S_TF_TLS_ENABLE 0
-#define V_TF_TLS_ENABLE(x) ((x) << S_TF_TLS_ENABLE)
+/* 927:912 */
+#define W_TCB_N_PD_ID 28
+#define S_TCB_N_PD_ID 16
+#define M_TCB_N_PD_ID 0xffffULL
+#define V_TCB_N_PD_ID(x) ((x) << S_TCB_N_PD_ID)
+
+/* 929:928 */
+#define W_TCB_CMP_IMM_SZ 29
+#define S_TCB_CMP_IMM_SZ 0
+#define M_TCB_CMP_IMM_SZ 0x3ULL
+#define V_TCB_CMP_IMM_SZ(x) ((x) << S_TCB_CMP_IMM_SZ)
+
+/* 931:930 */
+#define W_TCB_PDU_DGST_FLAGS 29
+#define S_TCB_PDU_DGST_FLAGS 2
+#define M_TCB_PDU_DGST_FLAGS 0x3ULL
+#define V_TCB_PDU_DGST_FLAGS(x) ((x) << S_TCB_PDU_DGST_FLAGS)
+
+/* 959:932 */
+#define W_TCB_RSVD1 29
+#define S_TCB_RSVD1 4
+#define M_TCB_RSVD1 0xfffffffULL
+#define V_TCB_RSVD1(x) ((x) << S_TCB_RSVD1)
+
+/* 985:960 */
+#define W_TCB_N_RQ_START 30
+#define S_TCB_N_RQ_START 0
+#define M_TCB_N_RQ_START 0x3ffffffULL
+#define V_TCB_N_RQ_START(x) ((x) << S_TCB_N_RQ_START)
+
+/* 998:986 */
+#define W_TCB_N_RQ_MSN 30
+#define S_TCB_N_RQ_MSN 26
+#define M_TCB_N_RQ_MSN 0x1fffULL
+#define V_TCB_N_RQ_MSN(x) ((__u64)(x) << S_TCB_N_RQ_MSN)
+
+/* 1002:999 */
+#define W_TCB_N_RQ_MAX_OFFSET 31
+#define S_TCB_N_RQ_MAX_OFFSET 7
+#define M_TCB_N_RQ_MAX_OFFSET 0xfULL
+#define V_TCB_N_RQ_MAX_OFFSET(x) ((x) << S_TCB_N_RQ_MAX_OFFSET)
+
+/* 1015:1003 */
+#define W_TCB_N_RQ_WRITE_PTR 31
+#define S_TCB_N_RQ_WRITE_PTR 11
+#define M_TCB_N_RQ_WRITE_PTR 0x1fffULL
+#define V_TCB_N_RQ_WRITE_PTR(x) ((x) << S_TCB_N_RQ_WRITE_PTR)
+
+/* 1023:1016 */
+#define W_TCB_N_PDU_TYPE 31
+#define S_TCB_N_PDU_TYPE 24
+#define M_TCB_N_PDU_TYPE 0xffULL
+#define V_TCB_N_PDU_TYPE(x) ((x) << S_TCB_N_PDU_TYPE)
#define S_TF_MIGRATING 0
#define V_TF_MIGRATING(x) ((x) << S_TF_MIGRATING)
@@ -549,15 +643,24 @@
#define S_TF_NON_OFFLOAD 1
#define V_TF_NON_OFFLOAD(x) ((x) << S_TF_NON_OFFLOAD)
+#define S_TF_FILTER 1
+#define V_TF_FILTER(x) ((x) << S_TF_FILTER)
+
#define S_TF_LOCK_TID 2
#define V_TF_LOCK_TID(x) ((x) << S_TF_LOCK_TID)
#define S_TF_KEEPALIVE 3
#define V_TF_KEEPALIVE(x) ((x) << S_TF_KEEPALIVE)
+#define S_TF_DROP_ENCAPS_HDR 3
+#define V_TF_DROP_ENCAPS_HDR(x) ((x) << S_TF_DROP_ENCAPS_HDR)
+
#define S_TF_DACK 4
#define V_TF_DACK(x) ((x) << S_TF_DACK)
+#define S_TF_COUNT_HITS 4
+#define V_TF_COUNT_HITS(x) ((x) << S_TF_COUNT_HITS)
+
#define S_TF_DACK_MSS 5
#define V_TF_DACK_MSS(x) ((x) << S_TF_DACK_MSS)
@@ -567,6 +670,9 @@
#define S_TF_NAGLE 7
#define V_TF_NAGLE(x) ((x) << S_TF_NAGLE)
+#define S_TF_REMOVE_VLAN 7
+#define V_TF_REMOVE_VLAN(x) ((x) << S_TF_REMOVE_VLAN)
+
#define S_TF_SSWS_DISABLED 8
#define V_TF_SSWS_DISABLED(x) ((x) << S_TF_SSWS_DISABLED)
@@ -576,15 +682,24 @@
#define S_TF_RX_FLOW_CONTROL_DISABLE 10
#define V_TF_RX_FLOW_CONTROL_DISABLE(x) ((x) << S_TF_RX_FLOW_CONTROL_DISABLE)
+#define S_TF_NAT_SEQ_CHECK 10
+#define V_TF_NAT_SEQ_CHECK(x) ((x) << S_TF_NAT_SEQ_CHECK)
+
#define S_TF_RX_CHANNEL 11
#define V_TF_RX_CHANNEL(x) ((x) << S_TF_RX_CHANNEL)
#define S_TF_TX_CHANNEL0 12
#define V_TF_TX_CHANNEL0(x) ((x) << S_TF_TX_CHANNEL0)
+#define S_TF_LPBK_TX_CHANNEL0 12
+#define V_TF_LPBK_TX_CHANNEL0(x) ((x) << S_TF_LPBK_TX_CHANNEL0)
+
#define S_TF_TX_CHANNEL1 13
#define V_TF_TX_CHANNEL1(x) ((x) << S_TF_TX_CHANNEL1)
+#define S_TF_LPBK_TX_CHANNEL1 13
+#define V_TF_LPBK_TX_CHANNEL1(x) ((x) << S_TF_LPBK_TX_CHANNEL1)
+
#define S_TF_TX_QUIESCE 14
#define V_TF_TX_QUIESCE(x) ((x) << S_TF_TX_QUIESCE)
@@ -607,6 +722,10 @@
#define M_TF_TX_QUEUE 0x7ULL
#define V_TF_TX_QUEUE(x) ((x) << S_TF_TX_QUEUE)
+#define S_TF_NAT_MODE 18
+#define M_TF_NAT_MODE 0x7ULL
+#define V_TF_NAT_MODE(x) ((x) << S_TF_NAT_MODE)
+
#define S_TF_TURBO 21
#define V_TF_TURBO(x) ((x) << S_TF_TURBO)
@@ -652,8 +771,8 @@
#define S_TF_RCV_COALESCE_HEARTBEAT 32
#define V_TF_RCV_COALESCE_HEARTBEAT(x) ((__u64)(x) << S_TF_RCV_COALESCE_HEARTBEAT)
-#define S_TF_INIT 33
-#define V_TF_INIT(x) ((__u64)(x) << S_TF_INIT)
+#define S_TF_RSS_FW 33
+#define V_TF_RSS_FW(x) ((__u64)(x) << S_TF_RSS_FW)
#define S_TF_ACTIVE_OPEN 34
#define V_TF_ACTIVE_OPEN(x) ((__u64)(x) << S_TF_ACTIVE_OPEN)
@@ -712,12 +831,21 @@
#define S_TF_RECV_SCALE 52
#define V_TF_RECV_SCALE(x) ((__u64)(x) << S_TF_RECV_SCALE)
+#define S_TF_NAT_FLAG_CHECK 52
+#define V_TF_NAT_FLAG_CHECK(x) ((__u64)(x) << S_TF_NAT_FLAG_CHECK)
+
#define S_TF_RECV_TSTMP 53
#define V_TF_RECV_TSTMP(x) ((__u64)(x) << S_TF_RECV_TSTMP)
+#define S_TF_LPBK_TX_LPBK 53
+#define V_TF_LPBK_TX_LPBK(x) ((__u64)(x) << S_TF_LPBK_TX_LPBK)
+
#define S_TF_RECV_SACK 54
#define V_TF_RECV_SACK(x) ((__u64)(x) << S_TF_RECV_SACK)
+#define S_TF_SWAP_MAC_ADDR 54
+#define V_TF_SWAP_MAC_ADDR(x) ((__u64)(x) << S_TF_SWAP_MAC_ADDR)
+
#define S_TF_PEND_CTL0 55
#define V_TF_PEND_CTL0(x) ((__u64)(x) << S_TF_PEND_CTL0)
@@ -751,6 +879,9 @@
#define S_TF_CCTRL_RFR 62
#define V_TF_CCTRL_RFR(x) ((__u64)(x) << S_TF_CCTRL_RFR)
+#define S_TF_INSERT_VLAN 62
+#define V_TF_INSERT_VLAN(x) ((__u64)(x) << S_TF_INSERT_VLAN)
+
#define S_TF_CORE_BYPASS 63
#define V_TF_CORE_BYPASS(x) ((__u64)(x) << S_TF_CORE_BYPASS)
@@ -772,6 +903,9 @@
#define S_TF_DDP_RX2TX 21
#define V_TF_DDP_RX2TX(x) ((x) << S_TF_DDP_RX2TX)
+#define S_TF_DDP_INDICATE_FLL 22
+#define V_TF_DDP_INDICATE_FLL(x) ((x) << S_TF_DDP_INDICATE_FLL)
+
#define S_TF_DDP_BUF0_VALID 24
#define V_TF_DDP_BUF0_VALID(x) ((x) << S_TF_DDP_BUF0_VALID)
diff --git a/sys/dev/cxgbe/common/t4vf_hw.c b/sys/dev/cxgbe/common/t4vf_hw.c
index 25ab3db77c72..8091eb5db2f9 100644
--- a/sys/dev/cxgbe/common/t4vf_hw.c
+++ b/sys/dev/cxgbe/common/t4vf_hw.c
@@ -139,7 +139,10 @@ int t4vf_get_sge_params(struct adapter *adapter)
* This is based on the PF from which we're instantiated.
*/
whoami = t4_read_reg(adapter, VF_PL_REG(A_PL_VF_WHOAMI));
- pf = G_SOURCEPF(whoami);
+ if (chip_id(adapter) <= CHELSIO_T5)
+ pf = G_SOURCEPF(whoami);
+ else
+ pf = G_T6_SOURCEPF(whoami);
s_hps = (S_HOSTPAGESIZEPF0 +
(S_HOSTPAGESIZEPF1 - S_HOSTPAGESIZEPF0) * pf);
@@ -426,3 +429,30 @@ int t4vf_get_vf_mac(struct adapter *adapter, unsigned int port,
return ret;
}
+
+/*
+ * t4vf_get_vf_vlan - Get the VLAN ID to be set to the VI of this VF.
+ * @adapter: The adapter
+ *
+ * Find the VLAN ID to be set to the VF's VI. The requested VLAN ID
+ * is from the host OS via callback in the PF driver.
+ */
+int t4vf_get_vf_vlan(struct adapter *adapter)
+{
+ struct fw_acl_vlan_cmd cmd = {0};
+ int vlan = 0;
+ int ret = 0;
+
+ cmd.op_to_vfn = htonl(V_FW_CMD_OP(FW_ACL_VLAN_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ);
+
+ /* Note: Do not enable the ACL */
+ cmd.en_to_len16 = htonl((unsigned int)FW_LEN16(cmd));
+
+ ret = t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), &cmd);
+
+ if (!ret)
+ vlan = be16_to_cpu(cmd.vlanid[0]);
+
+ return vlan;
+}
diff --git a/sys/dev/cxgbe/crypto/t4_crypto.c b/sys/dev/cxgbe/crypto/t4_crypto.c
index 4ea4ad1639eb..80e31b1159fd 100644
--- a/sys/dev/cxgbe/crypto/t4_crypto.c
+++ b/sys/dev/cxgbe/crypto/t4_crypto.c
@@ -208,6 +208,7 @@ struct ccr_softc {
counter_u64_t stats_pad_error;
counter_u64_t stats_sglist_error;
counter_u64_t stats_process_error;
+ counter_u64_t stats_pointer_error;
counter_u64_t stats_sw_fallback;
struct sysctl_ctx_list ctx;
@@ -458,8 +459,9 @@ ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s,
crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
V_ULP_TXPKT_DATAMODIFY(0) |
- V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) |
+ V_T7_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) |
V_ULP_TXPKT_DEST(0) |
+ (is_t7(sc->adapter) ? V_ULP_TXPKT_CMDMORE(1) : 0) |
V_ULP_TXPKT_FID(sc->first_rxq_id) | V_ULP_TXPKT_RO(1));
crwr->ulptx.len = htobe32(
((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
@@ -545,7 +547,7 @@ ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
@@ -705,7 +707,7 @@ ccr_cipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1006,7 +1008,7 @@ ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1293,7 +1295,7 @@ ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1645,7 +1647,7 @@ ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
crwr->sec_cpl.op_ivinsrtofst = htobe32(
V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
- V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
+ V_T7_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
@@ -1804,8 +1806,8 @@ ccr_identify(driver_t *driver, device_t parent)
sc = device_get_softc(parent);
if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
- device_find_child(parent, "ccr", -1) == NULL)
- device_add_child(parent, "ccr", -1);
+ device_find_child(parent, "ccr", DEVICE_UNIT_ANY) == NULL)
+ device_add_child(parent, "ccr", DEVICE_UNIT_ANY);
}
static int
@@ -1883,6 +1885,9 @@ ccr_sysctls(struct ccr_softc *sc)
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "process_error",
CTLFLAG_RD, &sc->stats_process_error,
"Requests failed during queueing");
+ SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "pointer_error",
+ CTLFLAG_RD, &sc->stats_pointer_error,
+ "Requests with a misaligned request pointer");
SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sw_fallback",
CTLFLAG_RD, &sc->stats_sw_fallback,
"Requests processed by falling back to software");
@@ -1932,13 +1937,15 @@ ccr_init_port(struct ccr_softc *sc, int port)
"Too many ports to fit in port_mask");
/*
- * Completions for crypto requests on port 1 can sometimes
+ * Completions for crypto requests on port 1 on T6 can sometimes
* return a stale cookie value due to a firmware bug. Disable
* requests on port 1 by default on affected firmware.
*/
- if (sc->adapter->params.fw_vers >= FW_VERSION32(1, 25, 4, 0) ||
- port == 0)
- sc->port_mask |= 1u << port;
+ if (port != 0 && is_t6(sc->adapter) &&
+ sc->adapter->params.fw_vers < FW_VERSION32(1, 25, 4, 0))
+ return;
+
+ sc->port_mask |= 1u << port;
}
static int
@@ -1988,6 +1995,7 @@ ccr_attach(device_t dev)
sc->stats_pad_error = counter_u64_alloc(M_WAITOK);
sc->stats_sglist_error = counter_u64_alloc(M_WAITOK);
sc->stats_process_error = counter_u64_alloc(M_WAITOK);
+ sc->stats_pointer_error = counter_u64_alloc(M_WAITOK);
sc->stats_sw_fallback = counter_u64_alloc(M_WAITOK);
ccr_sysctls(sc);
@@ -2034,6 +2042,7 @@ ccr_detach(device_t dev)
counter_u64_free(sc->stats_pad_error);
counter_u64_free(sc->stats_sglist_error);
counter_u64_free(sc->stats_process_error);
+ counter_u64_free(sc->stats_pointer_error);
counter_u64_free(sc->stats_sw_fallback);
for_each_port(sc->adapter, i) {
ccr_free_port(sc, i);
@@ -2531,6 +2540,16 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
s = crypto_get_driver_session(crp->crp_session);
sc = device_get_softc(dev);
+ /*
+ * Request pointers with the low bit set in the pointer can't
+ * be stored as the cookie in the CPL_FW6_PLD reply.
+ */
+ if (((uintptr_t)crp & CPL_FW6_COOKIE_MASK) != 0) {
+ counter_u64_add(sc->stats_pointer_error, 1);
+ error = EINVAL;
+ goto out_unlocked;
+ }
+
mtx_lock(&s->lock);
error = ccr_populate_sglist(s->sg_input, &crp->crp_buf);
if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp))
@@ -2637,6 +2656,7 @@ ccr_process(device_t dev, struct cryptop *crp, int hint)
out:
mtx_unlock(&s->lock);
+out_unlocked:
if (error) {
crp->crp_etype = error;
crypto_done(crp);
@@ -2646,7 +2666,7 @@ out:
}
static int
-do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
+fw6_pld_ccr(struct sge_iq *iq, const struct rss_header *rss,
struct mbuf *m)
{
struct ccr_softc *sc;
@@ -2661,7 +2681,7 @@ do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
else
cpl = (const void *)(rss + 1);
- crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
+ crp = (struct cryptop *)(uintptr_t)CPL_FW6_PLD_COOKIE(cpl);
s = crypto_get_driver_session(crp->crp_session);
status = be64toh(cpl->data[0]);
if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
@@ -2715,10 +2735,12 @@ ccr_modevent(module_t mod, int cmd, void *arg)
switch (cmd) {
case MOD_LOAD:
- t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, fw6_pld_ccr,
+ CPL_FW6_COOKIE_CCR);
return (0);
case MOD_UNLOAD:
- t4_register_cpl_handler(CPL_FW6_PLD, NULL);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, NULL,
+ CPL_FW6_COOKIE_CCR);
return (0);
default:
return (EOPNOTSUPP);
@@ -2745,7 +2767,9 @@ static driver_t ccr_driver = {
sizeof(struct ccr_softc)
};
-DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_modevent, NULL);
+DRIVER_MODULE(ccr, chnex, ccr_driver, ccr_modevent, NULL);
+DRIVER_MODULE(ccr, t6nex, ccr_driver, NULL, NULL);
MODULE_VERSION(ccr, 1);
MODULE_DEPEND(ccr, crypto, 1, 1, 1);
+MODULE_DEPEND(ccr, chnex, 1, 1, 1);
MODULE_DEPEND(ccr, t6nex, 1, 1, 1);
diff --git a/sys/dev/cxgbe/crypto/t4_crypto.h b/sys/dev/cxgbe/crypto/t4_crypto.h
index 452e48d20dfd..71c9ec3903ef 100644
--- a/sys/dev/cxgbe/crypto/t4_crypto.h
+++ b/sys/dev/cxgbe/crypto/t4_crypto.h
@@ -139,6 +139,7 @@ struct phys_sge_pairs {
#define SCMD_PROTO_VERSION_TLS_1_2 0
#define SCMD_PROTO_VERSION_TLS_1_1 1
#define SCMD_PROTO_VERSION_GENERIC 4
+#define SCMD_PROTO_VERSION_TLS_1_3 8
#define SCMD_CIPH_MODE_NOP 0
#define SCMD_CIPH_MODE_AES_CBC 1
diff --git a/sys/dev/cxgbe/crypto/t4_keyctx.c b/sys/dev/cxgbe/crypto/t4_keyctx.c
index 50e339ac2e05..b85e50fd6cb1 100644
--- a/sys/dev/cxgbe/crypto/t4_keyctx.c
+++ b/sys/dev/cxgbe/crypto/t4_keyctx.c
@@ -437,10 +437,16 @@ t4_tls_key_info_size(const struct ktls_session *tls)
int
t4_tls_proto_ver(const struct ktls_session *tls)
{
- if (tls->params.tls_vminor == TLS_MINOR_VER_ONE)
+ switch (tls->params.tls_vminor) {
+ case TLS_MINOR_VER_ONE:
return (SCMD_PROTO_VERSION_TLS_1_1);
- else
+ case TLS_MINOR_VER_TWO:
return (SCMD_PROTO_VERSION_TLS_1_2);
+ case TLS_MINOR_VER_THREE:
+ return (SCMD_PROTO_VERSION_TLS_1_3);
+ default:
+ __assert_unreachable();
+ }
}
int
@@ -492,6 +498,17 @@ t4_tls_hmac_ctrl(const struct ktls_session *tls)
}
static int
+tls_seqnum_ctrl(const struct ktls_session *tls)
+{
+ switch (tls->params.tls_vminor) {
+ case TLS_MINOR_VER_THREE:
+ return (0);
+ default:
+ return (3);
+ }
+}
+
+static int
tls_cipher_key_size(const struct ktls_session *tls)
{
switch (tls->params.cipher_key_len) {
@@ -557,7 +574,7 @@ t4_tls_key_ctx(const struct ktls_session *tls, int direction,
kctx->u.rxhdr.authmode_to_rxvalid =
V_TLS_KEYCTX_TX_WR_AUTHMODE(t4_tls_auth_mode(tls)) |
- V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(3) |
+ V_TLS_KEYCTX_TX_WR_SEQNUMCTRL(tls_seqnum_ctrl(tls)) |
V_TLS_KEYCTX_TX_WR_RXVALID(1);
kctx->u.rxhdr.ivpresent_to_rxmk_size =
@@ -607,7 +624,8 @@ t4_tls_key_ctx(const struct ktls_session *tls, int direction,
_Static_assert(offsetof(struct tx_keyctx_hdr, txsalt) ==
offsetof(struct rx_keyctx_hdr, rxsalt),
"salt offset mismatch");
- memcpy(kctx->u.txhdr.txsalt, tls->params.iv, SALT_SIZE);
+ memcpy(kctx->u.txhdr.txsalt, tls->params.iv,
+ tls->params.iv_len);
t4_init_gmac_hash(tls->params.cipher_key,
tls->params.cipher_key_len, hash);
} else {
@@ -665,6 +683,10 @@ t4_write_tlskey_wr(const struct ktls_session *tls, int direction, int tid,
kwr->reneg_to_write_rx = V_KEY_GET_LOC(direction == KTLS_TX ?
KEY_WRITE_TX : KEY_WRITE_RX);
+ /* We don't need to use V_T7_ULP_MEMIO_DATA_LEN in this routine. */
+ _Static_assert(V_T7_ULP_MEMIO_DATA_LEN(TLS_KEY_CONTEXT_SZ >> 5) ==
+ V_ULP_MEMIO_DATA_LEN(TLS_KEY_CONTEXT_SZ >> 5), "datalen mismatch");
+
/* master command */
kwr->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
V_T5_ULP_MEMIO_ORDER(1) | V_T5_ULP_MEMIO_IMM(1));
diff --git a/sys/dev/cxgbe/crypto/t6_kern_tls.c b/sys/dev/cxgbe/crypto/t6_kern_tls.c
index f374de5241f6..454b2e264a0e 100644
--- a/sys/dev/cxgbe/crypto/t6_kern_tls.c
+++ b/sys/dev/cxgbe/crypto/t6_kern_tls.c
@@ -995,7 +995,7 @@ t6_ktls_parse_pkt(struct mbuf *m)
* See if we have any TCP options or a FIN requiring a
* dedicated packet.
*/
- if ((tcp->th_flags & TH_FIN) != 0 || ktls_has_tcp_options(tcp)) {
+ if ((tcp_get_flags(tcp) & TH_FIN) != 0 || ktls_has_tcp_options(tcp)) {
wr_len = sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_pkt_core) + roundup2(m->m_len, 16);
if (wr_len > SGE_MAX_WR_LEN) {
@@ -1167,7 +1167,8 @@ ktls_write_tcp_options(struct sge_txq *txq, void *dst, struct mbuf *m,
} else {
ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
newip6 = *ip6;
- newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
+ newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen -
+ sizeof(*ip6));
copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) |
@@ -1180,7 +1181,7 @@ ktls_write_tcp_options(struct sge_txq *txq, void *dst, struct mbuf *m,
/* Clear PUSH and FIN in the TCP header if present. */
tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
newtcp = *tcp;
- newtcp.th_flags &= ~(TH_PUSH | TH_FIN);
+ tcp_set_flags(&newtcp, tcp_get_flags(&newtcp) & ~(TH_PUSH | TH_FIN));
copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
/* Copy rest of packet. */
@@ -1267,7 +1268,8 @@ ktls_write_tunnel_packet(struct sge_txq *txq, void *dst, struct mbuf *m,
} else {
ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
newip6 = *ip6;
- newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
+ newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen -
+ sizeof(*ip6));
copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) |
@@ -1370,7 +1372,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq, void *dst,
CTR4(KTR_CXGBE, "%s: tid %d short TLS record %u with offset %u",
__func__, tlsp->tid, (u_int)m_tls->m_epg_seqno, offset);
#endif
- if (m_tls->m_next == NULL && (tcp->th_flags & TH_FIN) != 0) {
+ if (m_tls->m_next == NULL && (tcp_get_flags(tcp) & TH_FIN) != 0) {
txq->kern_tls_fin_short++;
#ifdef INVARIANTS
panic("%s: FIN on short TLS record", __func__);
@@ -1385,7 +1387,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq, void *dst,
* FIN is set, then ktls_write_tcp_fin() will write out the
* last work request.
*/
- last_wr = m_tls->m_next == NULL && (tcp->th_flags & TH_FIN) == 0;
+ last_wr = m_tls->m_next == NULL && (tcp_get_flags(tcp) & TH_FIN) == 0;
/*
* The host stack may ask us to not send part of the start of
@@ -1769,7 +1771,7 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq, void *dst,
tx_data->rsvd = htobe32(tcp_seqno + m_tls->m_epg_hdrlen + offset);
}
tx_data->flags = htobe32(F_TX_BYPASS);
- if (last_wr && tcp->th_flags & TH_PUSH)
+ if (last_wr && tcp_get_flags(tcp) & TH_PUSH)
tx_data->flags |= htobe32(F_TX_PUSH | F_TX_SHOVE);
/* Populate the TLS header */
@@ -1793,9 +1795,11 @@ ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq, void *dst,
}
if (imm_len % 16 != 0) {
- /* Zero pad to an 8-byte boundary. */
- memset(out, 0, 8 - (imm_len % 8));
- out += 8 - (imm_len % 8);
+ if (imm_len % 8 != 0) {
+ /* Zero pad to an 8-byte boundary. */
+ memset(out, 0, 8 - (imm_len % 8));
+ out += 8 - (imm_len % 8);
+ }
/*
* Insert a ULP_TX_SC_NOOP if needed so the SGL is
@@ -1909,7 +1913,8 @@ ktls_write_tcp_fin(struct sge_txq *txq, void *dst, struct mbuf *m,
} else {
ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
newip6 = *ip6;
- newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen);
+ newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen -
+ sizeof(*ip6));
copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) |
@@ -1966,7 +1971,7 @@ t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen +
m->m_pkthdr.l3hlen);
pidx = eq->pidx;
- has_fin = (tcp->th_flags & TH_FIN) != 0;
+ has_fin = (tcp_get_flags(tcp) & TH_FIN) != 0;
/*
* If this TLS record has a FIN, then we will send any
@@ -1998,7 +2003,7 @@ t6_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
if (tlsp->l2te)
t4_l2t_release(tlsp->l2te);
tlsp->l2te = t4_l2t_alloc_tls(tlsp->sc, txq, dst, &ndesc,
- vlan_tag, tlsp->vi->pi->lport, eh->ether_dhost);
+ vlan_tag, tlsp->vi->pi->hw_port, eh->ether_dhost);
if (tlsp->l2te == NULL)
CXGBE_UNIMPLEMENTED("failed to allocate TLS L2TE");
if (ndesc != 0) {
diff --git a/sys/dev/cxgbe/crypto/t7_kern_tls.c b/sys/dev/cxgbe/crypto/t7_kern_tls.c
new file mode 100644
index 000000000000..217459126361
--- /dev/null
+++ b/sys/dev/cxgbe/crypto/t7_kern_tls.c
@@ -0,0 +1,2196 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Chelsio Communications
+ * Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_kern_tls.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/ktr.h>
+#include <sys/ktls.h>
+#include <sys/sglist.h>
+#include <sys/socket.h>
+#include <sys/socketvar.h>
+#include <sys/sockbuf.h>
+#include <netinet/in.h>
+#include <netinet/in_pcb.h>
+#include <netinet/ip.h>
+#include <netinet/ip6.h>
+#include <netinet/tcp_var.h>
+#include <opencrypto/cryptodev.h>
+#include <opencrypto/xform.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include "common/common.h"
+#include "common/t4_regs.h"
+#include "common/t4_regs_values.h"
+#include "common/t4_tcb.h"
+#include "t4_l2t.h"
+#include "t4_clip.h"
+#include "t4_mp_ring.h"
+#include "crypto/t4_crypto.h"
+
+#if defined(INET) || defined(INET6)
+
+#define TLS_HEADER_LENGTH 5
+
+struct tls_scmd {
+ __be32 seqno_numivs;
+ __be32 ivgen_hdrlen;
+};
+
+struct tlspcb {
+ struct m_snd_tag com;
+ struct vi_info *vi; /* virtual interface */
+ struct adapter *sc;
+ struct sge_txq *txq;
+
+ int tx_key_addr;
+ bool inline_key;
+ bool tls13;
+ unsigned char enc_mode;
+
+ struct tls_scmd scmd0;
+ struct tls_scmd scmd0_partial;
+ struct tls_scmd scmd0_short;
+
+ unsigned int tx_key_info_size;
+
+ uint16_t prev_mss;
+
+ /* Fields used for GCM records using GHASH state. */
+ uint16_t ghash_offset;
+ uint64_t ghash_tls_seqno;
+ char ghash[AES_GMAC_HASH_LEN];
+ bool ghash_valid;
+ bool ghash_pending;
+ bool ghash_lcb;
+ bool queue_mbufs;
+ uint8_t rx_chid;
+ uint16_t rx_qid;
+ struct mbufq pending_mbufs;
+
+ /*
+ * Only used outside of setup and teardown when using inline
+ * keys or for partial GCM mode.
+ */
+ struct tls_keyctx keyctx;
+};
+
+static void t7_tls_tag_free(struct m_snd_tag *mst);
+static int ktls_setup_keys(struct tlspcb *tlsp,
+ const struct ktls_session *tls, struct sge_txq *txq);
+
+static void *zero_buffer;
+static vm_paddr_t zero_buffer_pa;
+
+static const struct if_snd_tag_sw t7_tls_tag_sw = {
+ .snd_tag_free = t7_tls_tag_free,
+ .type = IF_SND_TAG_TYPE_TLS
+};
+
+static inline struct tlspcb *
+mst_to_tls(struct m_snd_tag *t)
+{
+ return (__containerof(t, struct tlspcb, com));
+}
+
+static struct tlspcb *
+alloc_tlspcb(struct ifnet *ifp, struct vi_info *vi, int flags)
+{
+ struct port_info *pi = vi->pi;
+ struct adapter *sc = pi->adapter;
+ struct tlspcb *tlsp;
+
+ tlsp = malloc(sizeof(*tlsp), M_CXGBE, M_ZERO | flags);
+ if (tlsp == NULL)
+ return (NULL);
+
+ m_snd_tag_init(&tlsp->com, ifp, &t7_tls_tag_sw);
+ tlsp->vi = vi;
+ tlsp->sc = sc;
+ tlsp->tx_key_addr = -1;
+ tlsp->ghash_offset = -1;
+ tlsp->rx_chid = pi->rx_chan;
+ tlsp->rx_qid = sc->sge.rxq[pi->vi->first_rxq].iq.abs_id;
+ mbufq_init(&tlsp->pending_mbufs, INT_MAX);
+
+ return (tlsp);
+}
+
+int
+t7_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
+ struct m_snd_tag **pt)
+{
+ const struct ktls_session *tls;
+ struct tlspcb *tlsp;
+ struct adapter *sc;
+ struct vi_info *vi;
+ struct inpcb *inp;
+ struct sge_txq *txq;
+ int error, iv_size, keyid, mac_first;
+
+ tls = params->tls.tls;
+
+ /* TLS 1.1 through TLS 1.3 are currently supported. */
+ if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
+ tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
+ tls->params.tls_vminor > TLS_MINOR_VER_THREE)
+ return (EPROTONOSUPPORT);
+
+ /* Sanity check values in *tls. */
+ switch (tls->params.cipher_algorithm) {
+ case CRYPTO_AES_CBC:
+ /* XXX: Explicitly ignore any provided IV. */
+ switch (tls->params.cipher_key_len) {
+ case 128 / 8:
+ case 192 / 8:
+ case 256 / 8:
+ break;
+ default:
+ return (EINVAL);
+ }
+ switch (tls->params.auth_algorithm) {
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ break;
+ default:
+ return (EPROTONOSUPPORT);
+ }
+ iv_size = AES_BLOCK_LEN;
+ mac_first = 1;
+ break;
+ case CRYPTO_AES_NIST_GCM_16:
+ switch (tls->params.cipher_key_len) {
+ case 128 / 8:
+ case 192 / 8:
+ case 256 / 8:
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ /*
+ * The IV size for TLS 1.2 is the explicit IV in the
+ * record header. For TLS 1.3 it is the size of the
+ * sequence number.
+ */
+ iv_size = 8;
+ mac_first = 0;
+ break;
+ default:
+ return (EPROTONOSUPPORT);
+ }
+
+ vi = if_getsoftc(ifp);
+ sc = vi->adapter;
+
+ tlsp = alloc_tlspcb(ifp, vi, M_WAITOK);
+
+ /*
+ * Pointers with the low bit set in the pointer can't
+ * be stored as the cookie in the CPL_FW6_PLD reply.
+ */
+ if (((uintptr_t)tlsp & CPL_FW6_COOKIE_MASK) != 0) {
+ error = EINVAL;
+ goto failed;
+ }
+
+ tlsp->tls13 = tls->params.tls_vminor == TLS_MINOR_VER_THREE;
+
+ if (sc->tlst.inline_keys)
+ keyid = -1;
+ else
+ keyid = t4_alloc_tls_keyid(sc);
+ if (keyid < 0) {
+ CTR(KTR_CXGBE, "%s: %p using immediate key ctx", __func__,
+ tlsp);
+ tlsp->inline_key = true;
+ } else {
+ tlsp->tx_key_addr = keyid;
+ CTR(KTR_CXGBE, "%s: %p allocated TX key addr %#x", __func__,
+ tlsp, tlsp->tx_key_addr);
+ }
+
+ inp = params->tls.inp;
+ INP_RLOCK(inp);
+ if (inp->inp_flags & INP_DROPPED) {
+ INP_RUNLOCK(inp);
+ error = ECONNRESET;
+ goto failed;
+ }
+
+ txq = &sc->sge.txq[vi->first_txq];
+ if (inp->inp_flowtype != M_HASHTYPE_NONE)
+ txq += ((inp->inp_flowid % (vi->ntxq - vi->rsrv_noflowq)) +
+ vi->rsrv_noflowq);
+ tlsp->txq = txq;
+ INP_RUNLOCK(inp);
+
+ error = ktls_setup_keys(tlsp, tls, txq);
+ if (error)
+ goto failed;
+
+ tlsp->enc_mode = t4_tls_cipher_mode(tls);
+ tlsp->tx_key_info_size = t4_tls_key_info_size(tls);
+
+ /* The SCMD fields used when encrypting a full TLS record. */
+ if (tlsp->tls13)
+ tlsp->scmd0.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0);
+ else
+ tlsp->scmd0.seqno_numivs = V_SCMD_SEQ_NO_CTRL(3);
+ tlsp->scmd0.seqno_numivs |=
+ V_SCMD_PROTO_VERSION(t4_tls_proto_ver(tls)) |
+ V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
+ V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
+ V_SCMD_CIPH_MODE(tlsp->enc_mode) |
+ V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
+ V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
+ V_SCMD_IV_SIZE(iv_size / 2) | V_SCMD_NUM_IVS(1);
+ tlsp->scmd0.seqno_numivs = htobe32(tlsp->scmd0.seqno_numivs);
+
+ tlsp->scmd0.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
+ V_SCMD_TLS_FRAG_ENABLE(0);
+ if (tlsp->inline_key)
+ tlsp->scmd0.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
+
+ /*
+ * The SCMD fields used when encrypting a short TLS record
+ * (no trailer and possibly a truncated payload).
+ */
+ tlsp->scmd0_short.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0) |
+ V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
+ V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
+ V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
+ V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
+ V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
+ V_SCMD_IV_SIZE(AES_BLOCK_LEN / 2) | V_SCMD_NUM_IVS(0);
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
+ tlsp->scmd0_short.seqno_numivs |=
+ V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CTR);
+ else
+ tlsp->scmd0_short.seqno_numivs |=
+ V_SCMD_CIPH_MODE(tlsp->enc_mode);
+ tlsp->scmd0_short.seqno_numivs =
+ htobe32(tlsp->scmd0_short.seqno_numivs);
+
+ tlsp->scmd0_short.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
+ V_SCMD_TLS_FRAG_ENABLE(0) | V_SCMD_AADIVDROP(1);
+ if (tlsp->inline_key)
+ tlsp->scmd0_short.ivgen_hdrlen |= V_SCMD_KEY_CTX_INLINE(1);
+
+ /*
+ * The SCMD fields used when encrypting a short TLS record
+ * using a partial GHASH.
+ */
+ tlsp->scmd0_partial.seqno_numivs = V_SCMD_SEQ_NO_CTRL(0) |
+ V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
+ V_SCMD_ENC_DEC_CTRL(SCMD_ENCDECCTRL_ENCRYPT) |
+ V_SCMD_CIPH_AUTH_SEQ_CTRL((mac_first == 0)) |
+ V_SCMD_CIPH_MODE(tlsp->enc_mode) |
+ V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
+ V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
+ V_SCMD_IV_SIZE(AES_BLOCK_LEN / 2) | V_SCMD_NUM_IVS(1);
+ tlsp->scmd0_partial.seqno_numivs =
+ htobe32(tlsp->scmd0_partial.seqno_numivs);
+
+ tlsp->scmd0_partial.ivgen_hdrlen = V_SCMD_IV_GEN_CTRL(0) |
+ V_SCMD_TLS_FRAG_ENABLE(0) | V_SCMD_AADIVDROP(1) |
+ V_SCMD_KEY_CTX_INLINE(1);
+
+ TXQ_LOCK(txq);
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM)
+ txq->kern_tls_gcm++;
+ else
+ txq->kern_tls_cbc++;
+ TXQ_UNLOCK(txq);
+ *pt = &tlsp->com;
+ return (0);
+
+failed:
+ m_snd_tag_rele(&tlsp->com);
+ return (error);
+}
+
+static int
+ktls_setup_keys(struct tlspcb *tlsp, const struct ktls_session *tls,
+ struct sge_txq *txq)
+{
+ struct tls_key_req *kwr;
+ struct tls_keyctx *kctx;
+ void *items[1];
+ struct mbuf *m;
+ int error;
+
+ /*
+ * Store the salt and keys in the key context. For
+ * connections with an inline key, this key context is passed
+ * as immediate data in each work request. For connections
+ * storing the key in DDR, a work request is used to store a
+ * copy of the key context in DDR.
+ */
+ t4_tls_key_ctx(tls, KTLS_TX, &tlsp->keyctx);
+ if (tlsp->inline_key)
+ return (0);
+
+ /* Populate key work request. */
+ m = alloc_wr_mbuf(TLS_KEY_WR_SZ, M_NOWAIT);
+ if (m == NULL) {
+ CTR(KTR_CXGBE, "%s: %p failed to alloc WR mbuf", __func__,
+ tlsp);
+ return (ENOMEM);
+ }
+ m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
+ m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
+ kwr = mtod(m, void *);
+ memset(kwr, 0, TLS_KEY_WR_SZ);
+
+ t4_write_tlskey_wr(tls, KTLS_TX, 0, 0, tlsp->tx_key_addr, kwr);
+ kctx = (struct tls_keyctx *)(kwr + 1);
+ memcpy(kctx, &tlsp->keyctx, sizeof(*kctx));
+
+ /*
+ * Place the key work request in the transmit queue. It
+ * should be sent to the NIC before any TLS packets using this
+ * session.
+ */
+ items[0] = m;
+ error = mp_ring_enqueue(txq->r, items, 1, 1);
+ if (error)
+ m_free(m);
+ else
+ CTR(KTR_CXGBE, "%s: %p sent key WR", __func__, tlsp);
+ return (error);
+}
+
+static u_int
+ktls_base_wr_size(struct tlspcb *tlsp, bool inline_key)
+{
+ u_int wr_len;
+
+ wr_len = sizeof(struct fw_ulptx_wr); // 16
+ wr_len += sizeof(struct ulp_txpkt); // 8
+ wr_len += sizeof(struct ulptx_idata); // 8
+ wr_len += sizeof(struct cpl_tx_sec_pdu);// 32
+ if (inline_key)
+ wr_len += tlsp->tx_key_info_size;
+ else {
+ wr_len += sizeof(struct ulptx_sc_memrd);// 8
+ wr_len += sizeof(struct ulptx_idata); // 8
+ }
+ /* SplitMode CPL_RX_PHYS_DSGL here if needed. */
+ /* CPL_TX_*_LSO here if needed. */
+ wr_len += sizeof(struct cpl_tx_pkt_core);// 16
+ return (wr_len);
+}
+
+static u_int
+ktls_sgl_size(u_int nsegs)
+{
+ u_int wr_len;
+
+ /* First segment is part of ulptx_sgl. */
+ nsegs--;
+
+ wr_len = sizeof(struct ulptx_sgl);
+ wr_len += 8 * ((3 * nsegs) / 2 + (nsegs & 1));
+ return (wr_len);
+}
+
+/*
+ * A request that doesn't need to generate the TLS trailer is a short
+ * record. For these requests, part of the TLS record payload is
+ * encrypted without invoking the MAC.
+ *
+ * Returns true if this record should be sent as a short record. In
+ * either case, the remaining outputs describe the how much of the
+ * TLS record to send as input to the crypto block and the amount of
+ * crypto output to trim via SplitMode:
+ *
+ * *header_len - Number of bytes of TLS header to pass as immediate
+ * data
+ *
+ * *offset - Start offset of TLS record payload to pass as DSGL data
+ *
+ * *plen - Length of TLS record payload to pass as DSGL data
+ *
+ * *leading_waste - amount of non-packet-header bytes to drop at the
+ * start of the crypto output
+ *
+ * *trailing_waste - amount of crypto output to drop from the end
+ */
+static bool
+ktls_is_short_record(struct tlspcb *tlsp, struct mbuf *m_tls, u_int tlen,
+ u_int rlen, u_int *header_len, u_int *offset, u_int *plen,
+ u_int *leading_waste, u_int *trailing_waste, bool send_partial_ghash,
+ bool request_ghash)
+{
+ u_int new_tlen, trailer_len;
+
+ MPASS(tlen > m_tls->m_epg_hdrlen);
+
+ /*
+ * For TLS 1.3 treat the inner record type stored as the first
+ * byte of the trailer as part of the payload rather than part
+ * of the trailer.
+ */
+ trailer_len = m_tls->m_epg_trllen;
+ if (tlsp->tls13)
+ trailer_len--;
+
+ /*
+ * Default to sending the full record as input to the crypto
+ * engine and relying on SplitMode to drop any waste.
+ */
+ *header_len = m_tls->m_epg_hdrlen;
+ *offset = 0;
+ *plen = rlen - (m_tls->m_epg_hdrlen + trailer_len);
+ *leading_waste = mtod(m_tls, vm_offset_t);
+ *trailing_waste = rlen - tlen;
+ if (!tlsp->sc->tlst.short_records)
+ return (false);
+
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_CBC) {
+ /*
+ * For AES-CBC we have to send input from the start of
+ * the TLS record payload that is a multiple of the
+ * block size. new_tlen rounds up tlen to the end of
+ * the containing AES block. If this last block
+ * overlaps with the trailer, send the full record to
+ * generate the MAC.
+ */
+ new_tlen = TLS_HEADER_LENGTH +
+ roundup2(tlen - TLS_HEADER_LENGTH, AES_BLOCK_LEN);
+ if (rlen - new_tlen < trailer_len)
+ return (false);
+
+ *trailing_waste = new_tlen - tlen;
+ *plen = new_tlen - m_tls->m_epg_hdrlen;
+ } else {
+ if (rlen - tlen < trailer_len ||
+ (rlen - tlen == trailer_len && request_ghash)) {
+ /*
+ * For AES-GCM we have to send the full record
+ * if the end overlaps with the trailer and a
+ * partial GHASH isn't being sent.
+ */
+ if (!send_partial_ghash)
+ return (false);
+
+ /*
+ * Will need to treat any excess trailer bytes as
+ * trailing waste. *trailing_waste is already
+ * correct.
+ */
+ } else {
+ /*
+ * We can use AES-CTR or AES-GCM in partial GHASH
+ * mode to encrypt a partial PDU.
+ *
+ * The last block can be partially encrypted
+ * without any trailing waste.
+ */
+ *trailing_waste = 0;
+ *plen = tlen - m_tls->m_epg_hdrlen;
+ }
+
+ /*
+ * If this request starts at the first byte of the
+ * payload (so the previous request sent the full TLS
+ * header as a tunnel packet) and a partial GHASH is
+ * being requested, the full TLS header must be sent
+ * as input for the GHASH.
+ */
+ if (mtod(m_tls, vm_offset_t) == m_tls->m_epg_hdrlen &&
+ request_ghash)
+ return (true);
+
+ /*
+ * In addition, we can minimize leading waste by
+ * starting encryption at the start of the closest AES
+ * block.
+ */
+ if (mtod(m_tls, vm_offset_t) >= m_tls->m_epg_hdrlen) {
+ *header_len = 0;
+ *offset = mtod(m_tls, vm_offset_t) -
+ m_tls->m_epg_hdrlen;
+ if (*offset >= *plen)
+ *offset = *plen;
+ else
+ *offset = rounddown2(*offset, AES_BLOCK_LEN);
+
+ /*
+ * If the request is just bytes from the trailer,
+ * trim the offset to the end of the payload.
+ */
+ *offset = min(*offset, *plen);
+ *plen -= *offset;
+ *leading_waste -= (m_tls->m_epg_hdrlen + *offset);
+ }
+ }
+ return (true);
+}
+
+/* Size of the AES-GCM TLS AAD for a given connection. */
+static int
+ktls_gcm_aad_len(struct tlspcb *tlsp)
+{
+ return (tlsp->tls13 ? sizeof(struct tls_aead_data_13) :
+ sizeof(struct tls_aead_data));
+}
+
+static int
+ktls_wr_len(struct tlspcb *tlsp, struct mbuf *m, struct mbuf *m_tls,
+ int *nsegsp)
+{
+ const struct tls_record_layer *hdr;
+ u_int header_len, imm_len, offset, plen, rlen, tlen, wr_len;
+ u_int leading_waste, trailing_waste;
+ bool inline_key, last_ghash_frag, request_ghash, send_partial_ghash;
+ bool short_record;
+
+ M_ASSERTEXTPG(m_tls);
+
+ /*
+ * The relative offset of the last byte to send from the TLS
+ * record.
+ */
+ tlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
+ if (tlen <= m_tls->m_epg_hdrlen) {
+ /*
+ * For requests that only want to send the TLS header,
+ * send a tunnelled packet as immediate data.
+ */
+ wr_len = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_pkt_core) +
+ roundup2(m->m_len + m_tls->m_len, 16);
+ if (wr_len > SGE_MAX_WR_LEN) {
+ CTR(KTR_CXGBE,
+ "%s: %p TLS header-only packet too long (len %d)",
+ __func__, tlsp, m->m_len + m_tls->m_len);
+ }
+
+ /* This should always be the last TLS record in a chain. */
+ MPASS(m_tls->m_next == NULL);
+ *nsegsp = 0;
+ return (wr_len);
+ }
+
+ hdr = (void *)m_tls->m_epg_hdr;
+ rlen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length);
+
+ /*
+ * See if this request might make use of GHASH state. This
+ * errs on the side of over-budgeting the WR size.
+ */
+ last_ghash_frag = false;
+ request_ghash = false;
+ send_partial_ghash = false;
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM &&
+ tlsp->sc->tlst.partial_ghash && tlsp->sc->tlst.short_records) {
+ u_int trailer_len;
+
+ trailer_len = m_tls->m_epg_trllen;
+ if (tlsp->tls13)
+ trailer_len--;
+ KASSERT(trailer_len == AES_GMAC_HASH_LEN,
+ ("invalid trailer length for AES-GCM"));
+
+ /* Is this the start of a TLS record? */
+ if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen) {
+ /*
+ * Might use partial GHASH if this doesn't
+ * send the full record.
+ */
+ if (tlen < rlen) {
+ if (tlen < (rlen - trailer_len))
+ send_partial_ghash = true;
+ request_ghash = true;
+ }
+ } else {
+ send_partial_ghash = true;
+ if (tlen < rlen)
+ request_ghash = true;
+ if (tlen >= (rlen - trailer_len))
+ last_ghash_frag = true;
+ }
+ }
+
+ /*
+ * Assume not sending partial GHASH for this call to get the
+ * larger size.
+ */
+ short_record = ktls_is_short_record(tlsp, m_tls, tlen, rlen,
+ &header_len, &offset, &plen, &leading_waste, &trailing_waste,
+ false, request_ghash);
+
+ inline_key = send_partial_ghash || tlsp->inline_key;
+
+ /* Calculate the size of the work request. */
+ wr_len = ktls_base_wr_size(tlsp, inline_key);
+
+ if (send_partial_ghash)
+ wr_len += AES_GMAC_HASH_LEN;
+
+ if (leading_waste != 0 || trailing_waste != 0) {
+ /*
+ * Partial records might require a SplitMode
+ * CPL_RX_PHYS_DSGL.
+ */
+ wr_len += sizeof(struct cpl_t7_rx_phys_dsgl);
+ }
+
+ /* Budget for an LSO header even if we don't use it. */
+ wr_len += sizeof(struct cpl_tx_pkt_lso_core);
+
+ /*
+ * Headers (including the TLS header) are always sent as
+ * immediate data. Short records include a raw AES IV as
+ * immediate data. TLS 1.3 non-short records include a
+ * placeholder for the sequence number as immediate data.
+ * Short records using a partial hash may also need to send
+ * TLS AAD. If a partial hash might be sent, assume a short
+ * record to get the larger size.
+ */
+ imm_len = m->m_len + header_len;
+ if (short_record || send_partial_ghash) {
+ imm_len += AES_BLOCK_LEN;
+ if (send_partial_ghash && header_len != 0)
+ imm_len += ktls_gcm_aad_len(tlsp);
+ } else if (tlsp->tls13)
+ imm_len += sizeof(uint64_t);
+ wr_len += roundup2(imm_len, 16);
+
+ /*
+ * TLS record payload via DSGL. For partial GCM mode we
+ * might need an extra SG entry for a placeholder.
+ */
+ *nsegsp = sglist_count_mbuf_epg(m_tls, m_tls->m_epg_hdrlen + offset,
+ plen);
+ wr_len += ktls_sgl_size(*nsegsp + (last_ghash_frag ? 1 : 0));
+
+ if (request_ghash) {
+ /* AES-GCM records might return a partial hash. */
+ wr_len += sizeof(struct ulp_txpkt);
+ wr_len += sizeof(struct ulptx_idata);
+ wr_len += sizeof(struct cpl_tx_tls_ack);
+ wr_len += sizeof(struct rss_header) +
+ sizeof(struct cpl_fw6_pld);
+ wr_len += AES_GMAC_HASH_LEN;
+ }
+
+ wr_len = roundup2(wr_len, 16);
+ return (wr_len);
+}
+
+/* Queue the next pending packet. */
+static void
+ktls_queue_next_packet(struct tlspcb *tlsp, bool enqueue_only)
+{
+#ifdef KTR
+ struct ether_header *eh;
+ struct tcphdr *tcp;
+ tcp_seq tcp_seqno;
+#endif
+ struct mbuf *m;
+ void *items[1];
+ int rc;
+
+ TXQ_LOCK_ASSERT_OWNED(tlsp->txq);
+ KASSERT(tlsp->queue_mbufs, ("%s: mbufs not being queued for %p",
+ __func__, tlsp));
+ for (;;) {
+ m = mbufq_dequeue(&tlsp->pending_mbufs);
+ if (m == NULL) {
+ tlsp->queue_mbufs = false;
+ return;
+ }
+
+#ifdef KTR
+ eh = mtod(m, struct ether_header *);
+ tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen +
+ m->m_pkthdr.l3hlen);
+ tcp_seqno = ntohl(tcp->th_seq);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u", __func__,
+ m->m_pkthdr.len, tcp_seqno);
+#endif
+#endif
+
+ items[0] = m;
+ if (enqueue_only)
+ rc = mp_ring_enqueue_only(tlsp->txq->r, items, 1);
+ else {
+ TXQ_UNLOCK(tlsp->txq);
+ rc = mp_ring_enqueue(tlsp->txq->r, items, 1, 256);
+ TXQ_LOCK(tlsp->txq);
+ }
+ if (__predict_true(rc == 0))
+ return;
+
+ CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u dropped", __func__,
+ m->m_pkthdr.len, tcp_seqno);
+ m_freem(m);
+ }
+}
+
+int
+t7_ktls_parse_pkt(struct mbuf *m)
+{
+ struct tlspcb *tlsp;
+ struct ether_header *eh;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct tcphdr *tcp;
+ struct mbuf *m_tls;
+ void *items[1];
+ int error, nsegs;
+ u_int wr_len, tot_len;
+ uint16_t eh_type;
+
+ /*
+ * Locate headers in initial mbuf.
+ *
+ * XXX: This assumes all of the headers are in the initial mbuf.
+ * Could perhaps use m_advance() like parse_pkt() if that turns
+ * out to not be true.
+ */
+ M_ASSERTPKTHDR(m);
+ MPASS(m->m_pkthdr.snd_tag != NULL);
+ tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
+
+ if (m->m_len <= sizeof(*eh) + sizeof(*ip)) {
+ CTR(KTR_CXGBE, "%s: %p header mbuf too short", __func__, tlsp);
+ return (EINVAL);
+ }
+ eh = mtod(m, struct ether_header *);
+ eh_type = ntohs(eh->ether_type);
+ if (eh_type == ETHERTYPE_VLAN) {
+ struct ether_vlan_header *evh = (void *)eh;
+
+ eh_type = ntohs(evh->evl_proto);
+ m->m_pkthdr.l2hlen = sizeof(*evh);
+ } else
+ m->m_pkthdr.l2hlen = sizeof(*eh);
+
+ switch (eh_type) {
+ case ETHERTYPE_IP:
+ ip = (struct ip *)(eh + 1);
+ if (ip->ip_p != IPPROTO_TCP) {
+ CTR(KTR_CXGBE, "%s: %p mbuf not IPPROTO_TCP", __func__,
+ tlsp);
+ return (EINVAL);
+ }
+ m->m_pkthdr.l3hlen = ip->ip_hl * 4;
+ break;
+ case ETHERTYPE_IPV6:
+ ip6 = (struct ip6_hdr *)(eh + 1);
+ if (ip6->ip6_nxt != IPPROTO_TCP) {
+ CTR(KTR_CXGBE, "%s: %p, mbuf not IPPROTO_TCP (%u)",
+ __func__, tlsp, ip6->ip6_nxt);
+ return (EINVAL);
+ }
+ m->m_pkthdr.l3hlen = sizeof(struct ip6_hdr);
+ break;
+ default:
+ CTR(KTR_CXGBE, "%s: %p mbuf not ETHERTYPE_IP{,V6}", __func__,
+ tlsp);
+ return (EINVAL);
+ }
+ if (m->m_len < m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
+ sizeof(*tcp)) {
+ CTR(KTR_CXGBE, "%s: %p header mbuf too short (2)", __func__,
+ tlsp);
+ return (EINVAL);
+ }
+ tcp = (struct tcphdr *)((char *)(eh + 1) + m->m_pkthdr.l3hlen);
+ m->m_pkthdr.l4hlen = tcp->th_off * 4;
+
+ /* Bail if there is TCP payload before the TLS record. */
+ if (m->m_len != m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen +
+ m->m_pkthdr.l4hlen) {
+ CTR(KTR_CXGBE,
+ "%s: %p header mbuf bad length (%d + %d + %d != %d)",
+ __func__, tlsp, m->m_pkthdr.l2hlen, m->m_pkthdr.l3hlen,
+ m->m_pkthdr.l4hlen, m->m_len);
+ return (EINVAL);
+ }
+
+ /* Assume all headers are in 'm' for now. */
+ MPASS(m->m_next != NULL);
+ MPASS(m->m_next->m_flags & M_EXTPG);
+
+ tot_len = 0;
+
+ /*
+ * Each of the remaining mbufs in the chain should reference a
+ * TLS record.
+ */
+ for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
+ MPASS(m_tls->m_flags & M_EXTPG);
+
+ wr_len = ktls_wr_len(tlsp, m, m_tls, &nsegs);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p wr_len %d nsegs %d", __func__, tlsp,
+ wr_len, nsegs);
+#endif
+ if (wr_len > SGE_MAX_WR_LEN || nsegs > TX_SGL_SEGS)
+ return (EFBIG);
+ tot_len += roundup2(wr_len, EQ_ESIZE);
+
+ /*
+ * Store 'nsegs' for the first TLS record in the
+ * header mbuf's metadata.
+ */
+ if (m_tls == m->m_next)
+ set_mbuf_nsegs(m, nsegs);
+ }
+
+ MPASS(tot_len != 0);
+ set_mbuf_len16(m, tot_len / 16);
+
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ /* Defer packets beyond what has been sent so far. */
+ TXQ_LOCK(tlsp->txq);
+ if (tlsp->queue_mbufs) {
+ error = mbufq_enqueue(&tlsp->pending_mbufs, m);
+ if (error == 0) {
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE,
+ "%s: %p len16 %d nsegs %d TCP seq %u deferred",
+ __func__, tlsp, mbuf_len16(m),
+ mbuf_nsegs(m), ntohl(tcp->th_seq));
+#endif
+ }
+ TXQ_UNLOCK(tlsp->txq);
+ return (error);
+ }
+ tlsp->queue_mbufs = true;
+ TXQ_UNLOCK(tlsp->txq);
+ }
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p len16 %d nsegs %d", __func__, tlsp,
+ mbuf_len16(m), mbuf_nsegs(m));
+#endif
+ items[0] = m;
+ error = mp_ring_enqueue(tlsp->txq->r, items, 1, 256);
+ if (__predict_false(error != 0)) {
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ TXQ_LOCK(tlsp->txq);
+ ktls_queue_next_packet(tlsp, false);
+ TXQ_UNLOCK(tlsp->txq);
+ }
+ }
+ return (error);
+}
+
+static inline bool
+needs_vlan_insertion(struct mbuf *m)
+{
+
+ M_ASSERTPKTHDR(m);
+
+ return (m->m_flags & M_VLANTAG);
+}
+
+static inline uint64_t
+pkt_ctrl1(struct sge_txq *txq, struct mbuf *m, uint16_t eh_type)
+{
+ uint64_t ctrl1;
+
+ /* Checksums are always offloaded */
+ if (eh_type == ETHERTYPE_IP) {
+ ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP) |
+ V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
+ V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
+ } else {
+ MPASS(m->m_pkthdr.l3hlen == sizeof(struct ip6_hdr));
+ ctrl1 = V_TXPKT_CSUM_TYPE(TX_CSUM_TCPIP6) |
+ V_T6_TXPKT_ETHHDR_LEN(m->m_pkthdr.l2hlen - ETHER_HDR_LEN) |
+ V_TXPKT_IPHDR_LEN(m->m_pkthdr.l3hlen);
+ }
+ txq->txcsum++;
+
+ /* VLAN tag insertion */
+ if (needs_vlan_insertion(m)) {
+ ctrl1 |= F_TXPKT_VLAN_VLD |
+ V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
+ txq->vlan_insertion++;
+ }
+
+ return (ctrl1);
+}
+
+static inline void *
+write_lso_cpl(void *cpl, struct mbuf *m0, uint16_t mss, uint16_t eh_type,
+ int total_len)
+{
+ struct cpl_tx_pkt_lso_core *lso;
+ uint32_t ctrl;
+
+ KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 &&
+ m0->m_pkthdr.l4hlen > 0,
+ ("%s: mbuf %p needs TSO but missing header lengths",
+ __func__, m0));
+
+ ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) |
+ F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE |
+ V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) |
+ V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) |
+ V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2);
+ if (eh_type == ETHERTYPE_IPV6)
+ ctrl |= F_LSO_IPV6;
+
+ lso = cpl;
+ lso->lso_ctrl = htobe32(ctrl);
+ lso->ipid_ofst = htobe16(0);
+ lso->mss = htobe16(mss);
+ lso->seqno_offset = htobe32(0);
+ lso->len = htobe32(total_len);
+
+ return (lso + 1);
+}
+
+static inline void *
+write_tx_tls_ack(void *dst, u_int rx_chid, u_int hash_len, bool ghash_lcb)
+{
+ struct cpl_tx_tls_ack *cpl;
+ uint32_t flags;
+
+ flags = ghash_lcb ? F_CPL_TX_TLS_ACK_LCB : F_CPL_TX_TLS_ACK_PHASH;
+ cpl = dst;
+ cpl->op_to_Rsvd2 = htobe32(V_CPL_TX_TLS_ACK_OPCODE(CPL_TX_TLS_ACK) |
+ V_T7_CPL_TX_TLS_ACK_RXCHID(rx_chid) | F_CPL_TX_TLS_ACK_ULPTXLPBK |
+ flags);
+
+ /* 32 == AckEncCpl, 16 == LCB */
+ cpl->PldLen = htobe32(V_CPL_TX_TLS_ACK_PLDLEN(32 + 16 + hash_len));
+ cpl->Rsvd3 = 0;
+
+ return (cpl + 1);
+}
+
+static inline void *
+write_fw6_pld(void *dst, u_int rx_chid, u_int rx_qid, u_int hash_len,
+ uint64_t cookie)
+{
+ struct rss_header *rss;
+ struct cpl_fw6_pld *cpl;
+
+ rss = dst;
+ memset(rss, 0, sizeof(*rss));
+ rss->opcode = CPL_FW6_PLD;
+ rss->qid = htobe16(rx_qid);
+ rss->channel = rx_chid;
+
+ cpl = (void *)(rss + 1);
+ memset(cpl, 0, sizeof(*cpl));
+ cpl->opcode = CPL_FW6_PLD;
+ cpl->len = htobe16(hash_len);
+ cpl->data[1] = htobe64(cookie);
+
+ return (cpl + 1);
+}
+
+static inline void *
+write_split_mode_rx_phys(void *dst, struct mbuf *m, struct mbuf *m_tls,
+ u_int crypto_hdr_len, u_int leading_waste, u_int trailing_waste)
+{
+ struct cpl_t7_rx_phys_dsgl *cpl;
+ uint16_t *len;
+ uint8_t numsge;
+
+ /* Forward first (3) and third (1) segments. */
+ numsge = 0xa;
+
+ cpl = dst;
+ cpl->ot.opcode = CPL_RX_PHYS_DSGL;
+ cpl->PhysAddrFields_lo_to_NumSGE =
+ htobe32(F_CPL_T7_RX_PHYS_DSGL_SPLITMODE |
+ V_CPL_T7_RX_PHYS_DSGL_NUMSGE(numsge));
+
+ len = (uint16_t *)(cpl->RSSCopy);
+
+ /*
+ * First segment always contains packet headers as well as
+ * transmit-related CPLs.
+ */
+ len[0] = htobe16(crypto_hdr_len);
+
+ /*
+ * Second segment is "gap" of data to drop at the front of the
+ * TLS record.
+ */
+ len[1] = htobe16(leading_waste);
+
+ /* Third segment is how much of the TLS record to send. */
+ len[2] = htobe16(m_tls->m_len);
+
+ /* Fourth segment is how much data to drop at the end. */
+ len[3] = htobe16(trailing_waste);
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: forward %u skip %u forward %u skip %u",
+ __func__, be16toh(len[0]), be16toh(len[1]), be16toh(len[2]),
+ be16toh(len[3]));
+#endif
+ return (cpl + 1);
+}
+
+/*
+ * If the SGL ends on an address that is not 16 byte aligned, this function will
+ * add a 0 filled flit at the end.
+ */
+static void *
+write_gl_to_buf(struct sglist *gl, caddr_t to)
+{
+ struct sglist_seg *seg;
+ __be64 *flitp;
+ struct ulptx_sgl *usgl;
+ int i, nflits, nsegs;
+
+ KASSERT(((uintptr_t)to & 0xf) == 0,
+ ("%s: SGL must start at a 16 byte boundary: %p", __func__, to));
+
+ nsegs = gl->sg_nseg;
+ MPASS(nsegs > 0);
+
+ nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2;
+ flitp = (__be64 *)to;
+ seg = &gl->sg_segs[0];
+ usgl = (void *)flitp;
+
+ usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
+ V_ULPTX_NSGE(nsegs));
+ usgl->len0 = htobe32(seg->ss_len);
+ usgl->addr0 = htobe64(seg->ss_paddr);
+ seg++;
+
+ for (i = 0; i < nsegs - 1; i++, seg++) {
+ usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len);
+ usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr);
+ }
+ if (i & 1)
+ usgl->sge[i / 2].len[1] = htobe32(0);
+ flitp += nflits;
+
+ if (nflits & 1) {
+ MPASS(((uintptr_t)flitp) & 0xf);
+ *flitp++ = 0;
+ }
+
+ MPASS((((uintptr_t)flitp) & 0xf) == 0);
+ return (flitp);
+}
+
+static inline void
+copy_to_txd(struct sge_eq *eq, const char *from, caddr_t *to, int len)
+{
+
+ MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
+ MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
+
+ if (__predict_true((uintptr_t)(*to) + len <=
+ (uintptr_t)&eq->desc[eq->sidx])) {
+ bcopy(from, *to, len);
+ (*to) += len;
+ if ((uintptr_t)(*to) == (uintptr_t)&eq->desc[eq->sidx])
+ (*to) = (caddr_t)eq->desc;
+ } else {
+ int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
+
+ bcopy(from, *to, portion);
+ from += portion;
+ portion = len - portion; /* remaining */
+ bcopy(from, (void *)eq->desc, portion);
+ (*to) = (caddr_t)eq->desc + portion;
+ }
+}
+
+static int
+ktls_write_tunnel_packet(struct sge_txq *txq, void *dst, struct mbuf *m,
+ const void *src, u_int len, u_int available, tcp_seq tcp_seqno, u_int pidx,
+ uint16_t eh_type, bool last_wr)
+{
+ struct tx_sdesc *txsd;
+ struct fw_eth_tx_pkt_wr *wr;
+ struct cpl_tx_pkt_core *cpl;
+ uint32_t ctrl;
+ int len16, ndesc, pktlen;
+ struct ether_header *eh;
+ struct ip *ip, newip;
+ struct ip6_hdr *ip6, newip6;
+ struct tcphdr *tcp, newtcp;
+ caddr_t out;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+ M_ASSERTPKTHDR(m);
+
+ wr = dst;
+ pktlen = m->m_len + len;
+ ctrl = sizeof(struct cpl_tx_pkt_core) + pktlen;
+ len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + ctrl, 16);
+ ndesc = tx_len16_to_desc(len16);
+ MPASS(ndesc <= available);
+
+ /* Firmware work request header */
+ /* TODO: Handle VF work request. */
+ wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) |
+ V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl));
+
+ ctrl = V_FW_WR_LEN16(len16);
+ wr->equiq_to_len16 = htobe32(ctrl);
+ wr->r3 = 0;
+
+ cpl = (void *)(wr + 1);
+
+ /* CPL header */
+ cpl->ctrl0 = txq->cpl_ctrl0;
+ cpl->pack = 0;
+ cpl->len = htobe16(pktlen);
+
+ out = (void *)(cpl + 1);
+
+ /* Copy over Ethernet header. */
+ eh = mtod(m, struct ether_header *);
+ copy_to_txd(&txq->eq, (caddr_t)eh, &out, m->m_pkthdr.l2hlen);
+
+ /* Fixup length in IP header and copy out. */
+ if (eh_type == ETHERTYPE_IP) {
+ ip = (void *)((char *)eh + m->m_pkthdr.l2hlen);
+ newip = *ip;
+ newip.ip_len = htons(pktlen - m->m_pkthdr.l2hlen);
+ copy_to_txd(&txq->eq, (caddr_t)&newip, &out, sizeof(newip));
+ if (m->m_pkthdr.l3hlen > sizeof(*ip))
+ copy_to_txd(&txq->eq, (caddr_t)(ip + 1), &out,
+ m->m_pkthdr.l3hlen - sizeof(*ip));
+ } else {
+ ip6 = (void *)((char *)eh + m->m_pkthdr.l2hlen);
+ newip6 = *ip6;
+ newip6.ip6_plen = htons(pktlen - m->m_pkthdr.l2hlen -
+ sizeof(*ip6));
+ copy_to_txd(&txq->eq, (caddr_t)&newip6, &out, sizeof(newip6));
+ MPASS(m->m_pkthdr.l3hlen == sizeof(*ip6));
+ }
+ cpl->ctrl1 = htobe64(pkt_ctrl1(txq, m, eh_type));
+
+ /* Set sequence number in TCP header. */
+ tcp = (void *)((char *)eh + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
+ newtcp = *tcp;
+ newtcp.th_seq = htonl(tcp_seqno);
+ copy_to_txd(&txq->eq, (caddr_t)&newtcp, &out, sizeof(newtcp));
+
+ /* Copy rest of TCP header. */
+ copy_to_txd(&txq->eq, (caddr_t)(tcp + 1), &out, m->m_len -
+ (m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + sizeof(*tcp)));
+
+ /* Copy the payload data. */
+ copy_to_txd(&txq->eq, src, &out, len);
+ txq->imm_wrs++;
+
+ txq->txpkt_wrs++;
+
+ txsd = &txq->sdesc[pidx];
+ if (last_wr)
+ txsd->m = m;
+ else
+ txsd->m = NULL;
+ txsd->desc_used = ndesc;
+
+ return (ndesc);
+}
+
+static int
+ktls_write_tls_wr(struct tlspcb *tlsp, struct sge_txq *txq,
+ void *dst, struct mbuf *m, struct tcphdr *tcp, struct mbuf *m_tls,
+ u_int available, tcp_seq tcp_seqno, u_int pidx, uint16_t eh_type,
+ uint16_t mss)
+{
+ struct sge_eq *eq = &txq->eq;
+ struct tx_sdesc *txsd;
+ struct fw_ulptx_wr *wr;
+ struct ulp_txpkt *txpkt;
+ struct ulptx_sc_memrd *memrd;
+ struct ulptx_idata *idata;
+ struct cpl_tx_sec_pdu *sec_pdu;
+ struct cpl_tx_pkt_core *tx_pkt;
+ const struct tls_record_layer *hdr;
+ struct ip *ip;
+ struct ip6_hdr *ip6;
+ struct tcphdr *newtcp;
+ char *iv, *out;
+ u_int aad_start, aad_stop;
+ u_int auth_start, auth_stop, auth_insert;
+ u_int cipher_start, cipher_stop, iv_offset;
+ u_int header_len, offset, plen, rlen, tlen;
+ u_int imm_len, ndesc, nsegs, txpkt_lens[2], wr_len;
+ u_int cpl_len, crypto_hdr_len, post_key_context_len;
+ u_int leading_waste, trailing_waste;
+ u_short ip_len;
+ bool inline_key, ghash_lcb, last_ghash_frag, last_wr, need_lso;
+ bool request_ghash, send_partial_ghash, short_record, split_mode;
+ bool using_scratch;
+
+ MPASS(tlsp->txq == txq);
+ M_ASSERTEXTPG(m_tls);
+
+ /* Final work request for this mbuf chain? */
+ last_wr = (m_tls->m_next == NULL);
+
+ /*
+ * The relative offset of the last byte to send from the TLS
+ * record.
+ */
+ tlen = mtod(m_tls, vm_offset_t) + m_tls->m_len;
+ if (tlen <= m_tls->m_epg_hdrlen) {
+ /*
+ * For requests that only want to send the TLS header,
+ * send a tunnelled packet as immediate data.
+ */
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p header-only TLS record %u", __func__,
+ tlsp, (u_int)m_tls->m_epg_seqno);
+#endif
+ /* This should always be the last TLS record in a chain. */
+ MPASS(last_wr);
+
+ txq->kern_tls_header++;
+
+ return (ktls_write_tunnel_packet(txq, dst, m,
+ (char *)m_tls->m_epg_hdr + mtod(m_tls, vm_offset_t),
+ m_tls->m_len, available, tcp_seqno, pidx, eh_type,
+ last_wr));
+ }
+
+ /* Locate the TLS header. */
+ hdr = (void *)m_tls->m_epg_hdr;
+ rlen = TLS_HEADER_LENGTH + ntohs(hdr->tls_length);
+
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: offset %lu len %u TCP seq %u TLS record %u",
+ __func__, mtod(m_tls, vm_offset_t), m_tls->m_len, tcp_seqno,
+ (u_int)m_tls->m_epg_seqno);
+#endif
+
+ /* Should this request make use of GHASH state? */
+ ghash_lcb = false;
+ last_ghash_frag = false;
+ request_ghash = false;
+ send_partial_ghash = false;
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM &&
+ tlsp->sc->tlst.partial_ghash && tlsp->sc->tlst.short_records) {
+ u_int trailer_len;
+
+ trailer_len = m_tls->m_epg_trllen;
+ if (tlsp->tls13)
+ trailer_len--;
+ KASSERT(trailer_len == AES_GMAC_HASH_LEN,
+ ("invalid trailer length for AES-GCM"));
+
+ /* Is this the start of a TLS record? */
+ if (mtod(m_tls, vm_offset_t) <= m_tls->m_epg_hdrlen) {
+ /*
+ * If this is the very first TLS record or
+ * if this is a newer TLS record, request a partial
+ * hash, but not if we are going to send the whole
+ * thing.
+ */
+ if ((tlsp->ghash_tls_seqno == 0 ||
+ tlsp->ghash_tls_seqno < m_tls->m_epg_seqno) &&
+ tlen < rlen) {
+ /*
+ * If we are only missing part or all
+ * of the trailer, send a normal full
+ * record but request the hash.
+ * Otherwise, use partial GHASH mode.
+ */
+ if (tlen >= (rlen - trailer_len))
+ ghash_lcb = true;
+ else
+ send_partial_ghash = true;
+ request_ghash = true;
+ tlsp->ghash_tls_seqno = m_tls->m_epg_seqno;
+ }
+ } else if (tlsp->ghash_tls_seqno == m_tls->m_epg_seqno &&
+ tlsp->ghash_valid) {
+ /*
+ * Compute the offset of the first AES block as
+ * is done in ktls_is_short_record.
+ */
+ if (rlen - tlen < trailer_len)
+ plen = rlen - (m_tls->m_epg_hdrlen +
+ trailer_len);
+ else
+ plen = tlen - m_tls->m_epg_hdrlen;
+ offset = mtod(m_tls, vm_offset_t) - m_tls->m_epg_hdrlen;
+ if (offset >= plen)
+ offset = plen;
+ else
+ offset = rounddown2(offset, AES_BLOCK_LEN);
+ if (tlsp->ghash_offset == offset) {
+ if (offset == plen) {
+ /*
+ * Send a partial trailer as a
+ * tunnelled packet as
+ * immediate data.
+ */
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE,
+ "%s: %p trailer-only TLS record %u",
+ __func__, tlsp,
+ (u_int)m_tls->m_epg_seqno);
+#endif
+
+ txq->kern_tls_trailer++;
+
+ offset = mtod(m_tls, vm_offset_t) -
+ (m_tls->m_epg_hdrlen + plen);
+ KASSERT(offset <= AES_GMAC_HASH_LEN,
+ ("offset outside of trailer"));
+ return (ktls_write_tunnel_packet(txq,
+ dst, m, tlsp->ghash + offset,
+ m_tls->m_len, available, tcp_seqno,
+ pidx, eh_type, last_wr));
+ }
+
+ /*
+ * If this request sends the end of
+ * the payload, it is the last
+ * fragment.
+ */
+ if (tlen >= (rlen - trailer_len)) {
+ last_ghash_frag = true;
+ ghash_lcb = true;
+ }
+
+ /*
+ * Only use partial GCM mode (rather
+ * than an AES-CTR short record) if
+ * there is input auth data to pass to
+ * the GHASH. That is true so long as
+ * there is at least one full block of
+ * payload data, or if the remaining
+ * payload data is the final partial
+ * block.
+ */
+ if (plen - offset >= GMAC_BLOCK_LEN ||
+ last_ghash_frag) {
+ send_partial_ghash = true;
+
+ /*
+ * If not sending the complete
+ * end of the record, this is
+ * a middle request so needs
+ * to request an updated
+ * partial hash.
+ */
+ if (tlen < rlen)
+ request_ghash = true;
+ }
+ }
+ }
+ }
+
+ short_record = ktls_is_short_record(tlsp, m_tls, tlen, rlen,
+ &header_len, &offset, &plen, &leading_waste, &trailing_waste,
+ send_partial_ghash, request_ghash);
+
+ if (short_record) {
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE,
+ "%s: %p short TLS record %u hdr %u offs %u plen %u",
+ __func__, tlsp, (u_int)m_tls->m_epg_seqno, header_len,
+ offset, plen);
+ if (send_partial_ghash) {
+ if (header_len != 0)
+ CTR(KTR_CXGBE, "%s: %p sending initial GHASH",
+ __func__, tlsp);
+ else
+ CTR(KTR_CXGBE, "%s: %p sending partial GHASH for offset %u%s",
+ __func__, tlsp, tlsp->ghash_offset,
+ last_ghash_frag ? ", last_frag" : "");
+ }
+#endif
+ KASSERT(send_partial_ghash || !request_ghash,
+ ("requesting but not sending partial hash for short record"));
+ } else {
+ KASSERT(!send_partial_ghash,
+ ("sending partial hash with full record"));
+ }
+
+ if (tlen < rlen && m_tls->m_next == NULL &&
+ (tcp->th_flags & TH_FIN) != 0) {
+ txq->kern_tls_fin_short++;
+#ifdef INVARIANTS
+ panic("%s: FIN on short TLS record", __func__);
+#endif
+ }
+
+ /*
+ * Use cached value for first record in chain if not using
+ * partial GCM mode. ktls_parse_pkt() calculates nsegs based
+ * on send_partial_ghash being false.
+ */
+ if (m->m_next == m_tls && !send_partial_ghash)
+ nsegs = mbuf_nsegs(m);
+ else
+ nsegs = sglist_count_mbuf_epg(m_tls,
+ m_tls->m_epg_hdrlen + offset, plen);
+
+ /* Determine if we need an LSO header. */
+ need_lso = (m_tls->m_len > mss);
+
+ /* Calculate the size of the TLS work request. */
+ inline_key = send_partial_ghash || tlsp->inline_key;
+ wr_len = ktls_base_wr_size(tlsp, inline_key);
+
+ if (send_partial_ghash) {
+ /* Inline key context includes partial hash in OPAD. */
+ wr_len += AES_GMAC_HASH_LEN;
+ }
+
+ /*
+ * SplitMode is required if there is any thing we need to trim
+ * from the crypto output, either at the front or end of the
+ * record. Note that short records might not need trimming.
+ */
+ split_mode = leading_waste != 0 || trailing_waste != 0;
+ if (split_mode) {
+ /*
+ * Partial records require a SplitMode
+ * CPL_RX_PHYS_DSGL.
+ */
+ wr_len += sizeof(struct cpl_t7_rx_phys_dsgl);
+ }
+
+ if (need_lso)
+ wr_len += sizeof(struct cpl_tx_pkt_lso_core);
+
+ imm_len = m->m_len + header_len;
+ if (short_record) {
+ imm_len += AES_BLOCK_LEN;
+ if (send_partial_ghash && header_len != 0)
+ imm_len += ktls_gcm_aad_len(tlsp);
+ } else if (tlsp->tls13)
+ imm_len += sizeof(uint64_t);
+ wr_len += roundup2(imm_len, 16);
+ wr_len += ktls_sgl_size(nsegs + (last_ghash_frag ? 1 : 0));
+ wr_len = roundup2(wr_len, 16);
+ txpkt_lens[0] = wr_len - sizeof(*wr);
+
+ if (request_ghash) {
+ /*
+ * Requesting the hash entails a second ULP_TX_PKT
+ * containing CPL_TX_TLS_ACK, CPL_FW6_PLD, and space
+ * for the hash.
+ */
+ txpkt_lens[1] = sizeof(struct ulp_txpkt);
+ txpkt_lens[1] += sizeof(struct ulptx_idata);
+ txpkt_lens[1] += sizeof(struct cpl_tx_tls_ack);
+ txpkt_lens[1] += sizeof(struct rss_header) +
+ sizeof(struct cpl_fw6_pld);
+ txpkt_lens[1] += AES_GMAC_HASH_LEN;
+ wr_len += txpkt_lens[1];
+ } else
+ txpkt_lens[1] = 0;
+
+ ndesc = howmany(wr_len, EQ_ESIZE);
+ MPASS(ndesc <= available);
+
+ /*
+ * Use the per-txq scratch pad if near the end of the ring to
+ * simplify handling of wrap-around.
+ */
+ using_scratch = (eq->sidx - pidx < ndesc);
+ if (using_scratch)
+ wr = (void *)txq->ss;
+ else
+ wr = dst;
+
+ /* FW_ULPTX_WR */
+ wr->op_to_compl = htobe32(V_FW_WR_OP(FW_ULPTX_WR));
+ wr->flowid_len16 = htobe32(F_FW_ULPTX_WR_DATA |
+ V_FW_WR_LEN16(wr_len / 16));
+ wr->cookie = 0;
+
+ /* ULP_TXPKT */
+ txpkt = (void *)(wr + 1);
+ txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DATAMODIFY(0) |
+ V_T7_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) |
+ V_ULP_TXPKT_DEST(0) |
+ V_ULP_TXPKT_CMDMORE(request_ghash ? 1 : 0) |
+ V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1));
+ txpkt->len = htobe32(howmany(txpkt_lens[0], 16));
+
+ /* ULPTX_IDATA sub-command */
+ idata = (void *)(txpkt + 1);
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ V_ULP_TX_SC_MORE(1));
+ idata->len = sizeof(struct cpl_tx_sec_pdu);
+
+ /*
+ * After the key context comes CPL_RX_PHYS_DSGL, CPL_TX_*, and
+ * immediate data containing headers. When using an inline
+ * key, these are counted as part of this ULPTX_IDATA. When
+ * reading the key from memory, these are part of a separate
+ * ULPTX_IDATA.
+ */
+ cpl_len = sizeof(struct cpl_tx_pkt_core);
+ if (need_lso)
+ cpl_len += sizeof(struct cpl_tx_pkt_lso_core);
+ if (split_mode)
+ cpl_len += sizeof(struct cpl_t7_rx_phys_dsgl);
+ post_key_context_len = cpl_len + imm_len;
+
+ if (inline_key) {
+ idata->len += tlsp->tx_key_info_size + post_key_context_len;
+ if (send_partial_ghash) {
+ /* Partial GHASH in key context. */
+ idata->len += AES_GMAC_HASH_LEN;
+ }
+ }
+ idata->len = htobe32(idata->len);
+
+ /* CPL_TX_SEC_PDU */
+ sec_pdu = (void *)(idata + 1);
+
+ /*
+ * Packet headers are passed through unchanged by the crypto
+ * engine by marking them as header data in SCMD0.
+ */
+ crypto_hdr_len = m->m_len;
+
+ if (send_partial_ghash) {
+ /*
+ * For short records using a partial hash, the TLS
+ * header is counted as header data in SCMD0. TLS AAD
+ * is next (if AAD is present) followed by the AES-CTR
+ * IV. Last is the cipher region for the payload.
+ */
+ if (header_len != 0) {
+ aad_start = 1;
+ aad_stop = ktls_gcm_aad_len(tlsp);
+ } else {
+ aad_start = 0;
+ aad_stop = 0;
+ }
+ iv_offset = aad_stop + 1;
+ cipher_start = iv_offset + AES_BLOCK_LEN;
+ cipher_stop = 0;
+ if (last_ghash_frag) {
+ auth_start = cipher_start;
+ auth_stop = AES_GMAC_HASH_LEN;
+ auth_insert = auth_stop;
+ } else if (plen < GMAC_BLOCK_LEN) {
+ /*
+ * A request that sends part of the first AES
+ * block will only have AAD.
+ */
+ KASSERT(header_len != 0,
+ ("%s: partial GHASH with no auth", __func__));
+ auth_start = 0;
+ auth_stop = 0;
+ auth_insert = 0;
+ } else {
+ auth_start = cipher_start;
+ auth_stop = plen % GMAC_BLOCK_LEN;
+ auth_insert = 0;
+ }
+
+ sec_pdu->pldlen = htobe32(aad_stop + AES_BLOCK_LEN + plen +
+ (last_ghash_frag ? AES_GMAC_HASH_LEN : 0));
+
+ /*
+ * For short records, the TLS header is treated as
+ * header data.
+ */
+ crypto_hdr_len += header_len;
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ sec_pdu->seqno_numivs = tlsp->scmd0_partial.seqno_numivs;
+ sec_pdu->ivgen_hdrlen = tlsp->scmd0_partial.ivgen_hdrlen;
+ if (last_ghash_frag)
+ sec_pdu->ivgen_hdrlen |= V_SCMD_LAST_FRAG(1);
+ else
+ sec_pdu->ivgen_hdrlen |= V_SCMD_MORE_FRAGS(1);
+ sec_pdu->ivgen_hdrlen = htobe32(sec_pdu->ivgen_hdrlen |
+ V_SCMD_HDR_LEN(crypto_hdr_len));
+
+ txq->kern_tls_partial_ghash++;
+ } else if (short_record) {
+ /*
+ * For short records without a partial hash, the TLS
+ * header is counted as header data in SCMD0 and the
+ * IV is next, followed by a cipher region for the
+ * payload.
+ */
+ aad_start = 0;
+ aad_stop = 0;
+ iv_offset = 1;
+ auth_start = 0;
+ auth_stop = 0;
+ auth_insert = 0;
+ cipher_start = AES_BLOCK_LEN + 1;
+ cipher_stop = 0;
+
+ sec_pdu->pldlen = htobe32(AES_BLOCK_LEN + plen);
+
+ /*
+ * For short records, the TLS header is treated as
+ * header data.
+ */
+ crypto_hdr_len += header_len;
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ sec_pdu->seqno_numivs = tlsp->scmd0_short.seqno_numivs;
+ sec_pdu->ivgen_hdrlen = htobe32(
+ tlsp->scmd0_short.ivgen_hdrlen |
+ V_SCMD_HDR_LEN(crypto_hdr_len));
+
+ txq->kern_tls_short++;
+ } else {
+ /*
+ * AAD is TLS header. IV is after AAD for TLS < 1.3.
+ * For TLS 1.3, a placeholder for the TLS sequence
+ * number is provided as an IV before the AAD. The
+ * cipher region starts after the AAD and IV. See
+ * comments in ccr_authenc() and ccr_gmac() in
+ * t4_crypto.c regarding cipher and auth start/stop
+ * values.
+ */
+ if (tlsp->tls13) {
+ iv_offset = 1;
+ aad_start = 1 + sizeof(uint64_t);
+ aad_stop = sizeof(uint64_t) + TLS_HEADER_LENGTH;
+ cipher_start = aad_stop + 1;
+ } else {
+ aad_start = 1;
+ aad_stop = TLS_HEADER_LENGTH;
+ iv_offset = TLS_HEADER_LENGTH + 1;
+ cipher_start = m_tls->m_epg_hdrlen + 1;
+ }
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ cipher_stop = 0;
+ auth_start = cipher_start;
+ auth_stop = 0;
+ auth_insert = 0;
+ } else {
+ cipher_stop = 0;
+ auth_start = cipher_start;
+ auth_stop = 0;
+ auth_insert = 0;
+ }
+
+ sec_pdu->pldlen = htobe32((tlsp->tls13 ? sizeof(uint64_t) : 0) +
+ m_tls->m_epg_hdrlen + plen);
+
+ /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
+ sec_pdu->seqno_numivs = tlsp->scmd0.seqno_numivs;
+ sec_pdu->ivgen_hdrlen = htobe32(tlsp->scmd0.ivgen_hdrlen |
+ V_SCMD_HDR_LEN(crypto_hdr_len));
+
+ if (split_mode)
+ txq->kern_tls_partial++;
+ else
+ txq->kern_tls_full++;
+ }
+ sec_pdu->op_ivinsrtofst = htobe32(
+ V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
+ V_CPL_TX_SEC_PDU_CPLLEN(cpl_len / 8) |
+ V_CPL_TX_SEC_PDU_PLACEHOLDER(send_partial_ghash ? 1 : 0) |
+ V_CPL_TX_SEC_PDU_IVINSRTOFST(iv_offset));
+ sec_pdu->aadstart_cipherstop_hi = htobe32(
+ V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
+ V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
+ V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
+ V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
+ sec_pdu->cipherstop_lo_authinsert = htobe32(
+ V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
+ V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
+ V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
+ V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
+
+ if (send_partial_ghash && last_ghash_frag) {
+ uint64_t aad_len, cipher_len;
+
+ aad_len = ktls_gcm_aad_len(tlsp);
+ cipher_len = rlen - (m_tls->m_epg_hdrlen + AES_GMAC_HASH_LEN);
+ sec_pdu->scmd1 = htobe64(aad_len << 44 | cipher_len);
+ } else
+ sec_pdu->scmd1 = htobe64(m_tls->m_epg_seqno);
+
+ /* Key context */
+ out = (void *)(sec_pdu + 1);
+ if (inline_key) {
+ memcpy(out, &tlsp->keyctx, tlsp->tx_key_info_size);
+ if (send_partial_ghash) {
+ struct tls_keyctx *keyctx = (void *)out;
+
+ keyctx->u.txhdr.ctxlen++;
+ keyctx->u.txhdr.dualck_to_txvalid &= ~htobe16(
+ V_KEY_CONTEXT_MK_SIZE(M_KEY_CONTEXT_MK_SIZE));
+ keyctx->u.txhdr.dualck_to_txvalid |= htobe16(
+ F_KEY_CONTEXT_OPAD_PRESENT |
+ V_KEY_CONTEXT_MK_SIZE(0));
+ }
+ out += tlsp->tx_key_info_size;
+ if (send_partial_ghash) {
+ if (header_len != 0)
+ memset(out, 0, AES_GMAC_HASH_LEN);
+ else
+ memcpy(out, tlsp->ghash, AES_GMAC_HASH_LEN);
+ out += AES_GMAC_HASH_LEN;
+ }
+ } else {
+ /* ULPTX_SC_MEMRD to read key context. */
+ memrd = (void *)out;
+ memrd->cmd_to_len = htobe32(V_ULPTX_CMD(ULP_TX_SC_MEMRD) |
+ V_ULP_TX_SC_MORE(1) |
+ V_ULPTX_LEN16(tlsp->tx_key_info_size >> 4));
+ memrd->addr = htobe32(tlsp->tx_key_addr >> 5);
+
+ /* ULPTX_IDATA for CPL_TX_* and headers. */
+ idata = (void *)(memrd + 1);
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ V_ULP_TX_SC_MORE(1));
+ idata->len = htobe32(post_key_context_len);
+
+ out = (void *)(idata + 1);
+ }
+
+ /* CPL_RX_PHYS_DSGL */
+ if (split_mode) {
+ crypto_hdr_len = sizeof(struct cpl_tx_pkt_core);
+ if (need_lso)
+ crypto_hdr_len += sizeof(struct cpl_tx_pkt_lso_core);
+ crypto_hdr_len += m->m_len;
+ out = write_split_mode_rx_phys(out, m, m_tls, crypto_hdr_len,
+ leading_waste, trailing_waste);
+ }
+
+ /* CPL_TX_PKT_LSO */
+ if (need_lso) {
+ out = write_lso_cpl(out, m, mss, eh_type, m->m_len +
+ m_tls->m_len);
+ txq->tso_wrs++;
+ }
+
+ /* CPL_TX_PKT_XT */
+ tx_pkt = (void *)out;
+ tx_pkt->ctrl0 = txq->cpl_ctrl0;
+ tx_pkt->ctrl1 = htobe64(pkt_ctrl1(txq, m, eh_type));
+ tx_pkt->pack = 0;
+ tx_pkt->len = htobe16(m->m_len + m_tls->m_len);
+
+ /* Copy the packet headers. */
+ out = (void *)(tx_pkt + 1);
+ memcpy(out, mtod(m, char *), m->m_len);
+
+ /* Modify the packet length in the IP header. */
+ ip_len = m->m_len + m_tls->m_len - m->m_pkthdr.l2hlen;
+ if (eh_type == ETHERTYPE_IP) {
+ ip = (void *)(out + m->m_pkthdr.l2hlen);
+ be16enc(&ip->ip_len, ip_len);
+ } else {
+ ip6 = (void *)(out + m->m_pkthdr.l2hlen);
+ be16enc(&ip6->ip6_plen, ip_len - sizeof(*ip6));
+ }
+
+ /* Modify sequence number and flags in TCP header. */
+ newtcp = (void *)(out + m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen);
+ be32enc(&newtcp->th_seq, tcp_seqno);
+ if (!last_wr)
+ newtcp->th_flags = tcp->th_flags & ~(TH_PUSH | TH_FIN);
+ out += m->m_len;
+
+ /*
+ * Insert placeholder for sequence number as IV for TLS 1.3
+ * non-short records.
+ */
+ if (tlsp->tls13 && !short_record) {
+ memset(out, 0, sizeof(uint64_t));
+ out += sizeof(uint64_t);
+ }
+
+ /* Populate the TLS header */
+ memcpy(out, m_tls->m_epg_hdr, header_len);
+ out += header_len;
+
+ /* TLS AAD for short records using a partial hash. */
+ if (send_partial_ghash && header_len != 0) {
+ if (tlsp->tls13) {
+ struct tls_aead_data_13 ad;
+
+ ad.type = hdr->tls_type;
+ ad.tls_vmajor = hdr->tls_vmajor;
+ ad.tls_vminor = hdr->tls_vminor;
+ ad.tls_length = hdr->tls_length;
+ memcpy(out, &ad, sizeof(ad));
+ out += sizeof(ad);
+ } else {
+ struct tls_aead_data ad;
+ uint16_t cipher_len;
+
+ cipher_len = rlen -
+ (m_tls->m_epg_hdrlen + AES_GMAC_HASH_LEN);
+ ad.seq = htobe64(m_tls->m_epg_seqno);
+ ad.type = hdr->tls_type;
+ ad.tls_vmajor = hdr->tls_vmajor;
+ ad.tls_vminor = hdr->tls_vminor;
+ ad.tls_length = htons(cipher_len);
+ memcpy(out, &ad, sizeof(ad));
+ out += sizeof(ad);
+ }
+ }
+
+ /* AES IV for a short record. */
+ if (short_record) {
+ iv = out;
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM) {
+ memcpy(iv, tlsp->keyctx.u.txhdr.txsalt, SALT_SIZE);
+ if (tlsp->tls13) {
+ uint64_t value;
+
+ value = be64dec(tlsp->keyctx.u.txhdr.txsalt +
+ 4);
+ value ^= m_tls->m_epg_seqno;
+ be64enc(iv + 4, value);
+ } else
+ memcpy(iv + 4, hdr + 1, 8);
+ if (send_partial_ghash)
+ be32enc(iv + 12, 1 + offset / AES_BLOCK_LEN);
+ else
+ be32enc(iv + 12, 2 + offset / AES_BLOCK_LEN);
+ } else
+ memcpy(iv, hdr + 1, AES_BLOCK_LEN);
+ out += AES_BLOCK_LEN;
+ }
+
+ if (imm_len % 16 != 0) {
+ if (imm_len % 8 != 0) {
+ /* Zero pad to an 8-byte boundary. */
+ memset(out, 0, 8 - (imm_len % 8));
+ out += 8 - (imm_len % 8);
+ }
+
+ /*
+ * Insert a ULP_TX_SC_NOOP if needed so the SGL is
+ * 16-byte aligned.
+ */
+ if (imm_len % 16 <= 8) {
+ idata = (void *)out;
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP) |
+ V_ULP_TX_SC_MORE(1));
+ idata->len = htobe32(0);
+ out = (void *)(idata + 1);
+ }
+ }
+
+ /* SGL for record payload */
+ sglist_reset(txq->gl);
+ if (sglist_append_mbuf_epg(txq->gl, m_tls, m_tls->m_epg_hdrlen + offset,
+ plen) != 0) {
+#ifdef INVARIANTS
+ panic("%s: failed to append sglist", __func__);
+#endif
+ }
+ if (last_ghash_frag) {
+ if (sglist_append_phys(txq->gl, zero_buffer_pa,
+ AES_GMAC_HASH_LEN) != 0) {
+#ifdef INVARIANTS
+ panic("%s: failed to append sglist (2)", __func__);
+#endif
+ }
+ }
+ out = write_gl_to_buf(txq->gl, out);
+
+ if (request_ghash) {
+ /* ULP_TXPKT */
+ txpkt = (void *)out;
+ txpkt->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
+ V_ULP_TXPKT_DATAMODIFY(0) |
+ V_T7_ULP_TXPKT_CHANNELID(tlsp->vi->pi->port_id) |
+ V_ULP_TXPKT_DEST(0) |
+ V_ULP_TXPKT_FID(txq->eq.cntxt_id) | V_ULP_TXPKT_RO(1));
+ txpkt->len = htobe32(howmany(txpkt_lens[1], 16));
+
+ /* ULPTX_IDATA sub-command */
+ idata = (void *)(txpkt + 1);
+ idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
+ V_ULP_TX_SC_MORE(0));
+ idata->len = sizeof(struct cpl_tx_tls_ack);
+ idata->len += sizeof(struct rss_header) +
+ sizeof(struct cpl_fw6_pld);
+ idata->len += AES_GMAC_HASH_LEN;
+ idata->len = htobe32(idata->len);
+ out = (void *)(idata + 1);
+
+ /* CPL_TX_TLS_ACK */
+ out = write_tx_tls_ack(out, tlsp->rx_chid, AES_GMAC_HASH_LEN,
+ ghash_lcb);
+
+ /* CPL_FW6_PLD */
+ out = write_fw6_pld(out, tlsp->rx_chid, tlsp->rx_qid,
+ AES_GMAC_HASH_LEN, (uintptr_t)tlsp | CPL_FW6_COOKIE_KTLS);
+
+ /* Space for partial hash. */
+ memset(out, 0, AES_GMAC_HASH_LEN);
+ out += AES_GMAC_HASH_LEN;
+
+ tlsp->ghash_pending = true;
+ tlsp->ghash_valid = false;
+ tlsp->ghash_lcb = ghash_lcb;
+ if (last_ghash_frag)
+ tlsp->ghash_offset = offset + plen;
+ else
+ tlsp->ghash_offset = rounddown2(offset + plen,
+ GMAC_BLOCK_LEN);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p requesting GHASH for offset %u",
+ __func__, tlsp, tlsp->ghash_offset);
+#endif
+ m_snd_tag_ref(&tlsp->com);
+
+ txq->kern_tls_ghash_requested++;
+ }
+
+ if (using_scratch) {
+ out = dst;
+ copy_to_txd(eq, txq->ss, &out, wr_len);
+ }
+
+ txq->kern_tls_records++;
+ txq->kern_tls_octets += m_tls->m_len;
+ if (split_mode) {
+ txq->kern_tls_splitmode++;
+ txq->kern_tls_waste += leading_waste + trailing_waste;
+ }
+ if (need_lso)
+ txq->kern_tls_lso++;
+
+ txsd = &txq->sdesc[pidx];
+ if (last_wr)
+ txsd->m = m;
+ else
+ txsd->m = NULL;
+ txsd->desc_used = ndesc;
+
+ return (ndesc);
+}
+
+int
+t7_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
+ u_int available)
+{
+ struct sge_eq *eq = &txq->eq;
+ struct tlspcb *tlsp;
+ struct tcphdr *tcp;
+ struct mbuf *m_tls;
+ struct ether_header *eh;
+ tcp_seq tcp_seqno;
+ u_int ndesc, pidx, totdesc;
+ uint16_t eh_type, mss;
+
+ TXQ_LOCK_ASSERT_OWNED(txq);
+ M_ASSERTPKTHDR(m);
+ MPASS(m->m_pkthdr.snd_tag != NULL);
+ tlsp = mst_to_tls(m->m_pkthdr.snd_tag);
+
+ totdesc = 0;
+ eh = mtod(m, struct ether_header *);
+ eh_type = ntohs(eh->ether_type);
+ if (eh_type == ETHERTYPE_VLAN) {
+ struct ether_vlan_header *evh = (void *)eh;
+
+ eh_type = ntohs(evh->evl_proto);
+ }
+
+ tcp = (struct tcphdr *)((char *)eh + m->m_pkthdr.l2hlen +
+ m->m_pkthdr.l3hlen);
+ pidx = eq->pidx;
+
+ /* Determine MSS. */
+ if (m->m_pkthdr.csum_flags & CSUM_TSO) {
+ mss = m->m_pkthdr.tso_segsz;
+ tlsp->prev_mss = mss;
+ } else if (tlsp->prev_mss != 0)
+ mss = tlsp->prev_mss;
+ else
+ mss = if_getmtu(tlsp->vi->ifp) -
+ (m->m_pkthdr.l3hlen + m->m_pkthdr.l4hlen);
+
+ /* Fetch the starting TCP sequence number for this chain. */
+ tcp_seqno = ntohl(tcp->th_seq);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: pkt len %d TCP seq %u", __func__, m->m_pkthdr.len,
+ tcp_seqno);
+#endif
+ KASSERT(!tlsp->ghash_pending, ("%s: GHASH pending for send", __func__));
+
+ /*
+ * Iterate over each TLS record constructing a work request
+ * for that record.
+ */
+ for (m_tls = m->m_next; m_tls != NULL; m_tls = m_tls->m_next) {
+ MPASS(m_tls->m_flags & M_EXTPG);
+
+ ndesc = ktls_write_tls_wr(tlsp, txq, dst, m, tcp, m_tls,
+ available - totdesc, tcp_seqno, pidx, eh_type, mss);
+ totdesc += ndesc;
+ IDXINCR(pidx, ndesc, eq->sidx);
+ dst = &eq->desc[pidx];
+
+ tcp_seqno += m_tls->m_len;
+ }
+
+ /*
+ * Queue another packet if this was a GCM request that didn't
+ * request a GHASH response.
+ */
+ if (tlsp->enc_mode == SCMD_CIPH_MODE_AES_GCM && !tlsp->ghash_pending)
+ ktls_queue_next_packet(tlsp, true);
+
+ MPASS(totdesc <= available);
+ return (totdesc);
+}
+
+static void
+t7_tls_tag_free(struct m_snd_tag *mst)
+{
+ struct adapter *sc;
+ struct tlspcb *tlsp;
+
+ tlsp = mst_to_tls(mst);
+ sc = tlsp->sc;
+
+ CTR2(KTR_CXGBE, "%s: %p", __func__, tlsp);
+
+ if (tlsp->tx_key_addr >= 0)
+ t4_free_tls_keyid(sc, tlsp->tx_key_addr);
+
+ KASSERT(mbufq_len(&tlsp->pending_mbufs) == 0,
+ ("%s: pending mbufs", __func__));
+
+ zfree(tlsp, M_CXGBE);
+}
+
+static int
+ktls_fw6_pld(struct sge_iq *iq, const struct rss_header *rss,
+ struct mbuf *m)
+{
+ const struct cpl_fw6_pld *cpl;
+ struct tlspcb *tlsp;
+ const void *ghash;
+
+ if (m != NULL)
+ cpl = mtod(m, const void *);
+ else
+ cpl = (const void *)(rss + 1);
+
+ tlsp = (struct tlspcb *)(uintptr_t)CPL_FW6_PLD_COOKIE(cpl);
+ KASSERT(cpl->data[0] == 0, ("%s: error status returned", __func__));
+
+ TXQ_LOCK(tlsp->txq);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: %p received GHASH for offset %u%s", __func__, tlsp,
+ tlsp->ghash_offset, tlsp->ghash_lcb ? " in LCB" : "");
+#endif
+ if (tlsp->ghash_lcb)
+ ghash = &cpl->data[2];
+ else
+ ghash = cpl + 1;
+ memcpy(tlsp->ghash, ghash, AES_GMAC_HASH_LEN);
+ tlsp->ghash_valid = true;
+ tlsp->ghash_pending = false;
+ tlsp->txq->kern_tls_ghash_received++;
+
+ ktls_queue_next_packet(tlsp, false);
+ TXQ_UNLOCK(tlsp->txq);
+
+ m_snd_tag_rele(&tlsp->com);
+ m_freem(m);
+ return (0);
+}
+
+void
+t7_ktls_modload(void)
+{
+ zero_buffer = malloc_aligned(AES_GMAC_HASH_LEN, AES_GMAC_HASH_LEN,
+ M_CXGBE, M_ZERO | M_WAITOK);
+ zero_buffer_pa = vtophys(zero_buffer);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, ktls_fw6_pld,
+ CPL_FW6_COOKIE_KTLS);
+}
+
+void
+t7_ktls_modunload(void)
+{
+ free(zero_buffer, M_CXGBE);
+ t4_register_shared_cpl_handler(CPL_FW6_PLD, NULL, CPL_FW6_COOKIE_KTLS);
+}
+
+#else
+
+int
+t7_tls_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
+ struct m_snd_tag **pt)
+{
+ return (ENXIO);
+}
+
+int
+t7_ktls_parse_pkt(struct mbuf *m)
+{
+ return (EINVAL);
+}
+
+int
+t7_ktls_write_wr(struct sge_txq *txq, void *dst, struct mbuf *m,
+ u_int available)
+{
+ panic("can't happen");
+}
+
+void
+t7_ktls_modload(void)
+{
+}
+
+void
+t7_ktls_modunload(void)
+{
+}
+
+#endif
diff --git a/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c b/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c
index b8e6eeba0280..2cd24c635325 100644
--- a/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c
+++ b/sys/dev/cxgbe/cudbg/cudbg_flash_utils.c
@@ -32,19 +32,6 @@
#include "cudbg.h"
#include "cudbg_lib_common.h"
-enum {
- SF_ATTEMPTS = 10, /* max retries for SF operations */
-
- /* flash command opcodes */
- SF_PROG_PAGE = 2, /* program page */
- SF_WR_DISABLE = 4, /* disable writes */
- SF_RD_STATUS = 5, /* read status register */
- SF_WR_ENABLE = 6, /* enable writes */
- SF_RD_DATA_FAST = 0xb, /* read flash */
- SF_RD_ID = 0x9f, /* read ID */
- SF_ERASE_SECTOR = 0xd8, /* erase sector */
-};
-
int write_flash(struct adapter *adap, u32 start_sec, void *data, u32 size);
int read_flash(struct adapter *adap, u32 start_sec , void *data, u32 size,
u32 start_address);
@@ -56,10 +43,12 @@ update_skip_size(struct cudbg_flash_sec_info *sec_info, u32 size)
}
static
-void set_sector_availability(struct cudbg_flash_sec_info *sec_info,
- int sector_nu, int avail)
+void set_sector_availability(struct adapter *adap,
+ struct cudbg_flash_sec_info *sec_info, int sector_nu, int avail)
{
- sector_nu -= CUDBG_START_SEC;
+ int start = t4_flash_loc_start(adap, FLASH_LOC_CUDBG, NULL);
+
+ sector_nu -= start / SF_SEC_SIZE;;
if (avail)
set_dbg_bitmap(sec_info->sec_bitmap, sector_nu);
else
@@ -68,13 +57,17 @@ void set_sector_availability(struct cudbg_flash_sec_info *sec_info,
/* This function will return empty sector available for filling */
static int
-find_empty_sec(struct cudbg_flash_sec_info *sec_info)
+find_empty_sec(struct adapter *adap, struct cudbg_flash_sec_info *sec_info)
{
int i, index, bit;
-
- for (i = CUDBG_START_SEC; i < CUDBG_SF_MAX_SECTOR; i++) {
- index = (i - CUDBG_START_SEC) / 8;
- bit = (i - CUDBG_START_SEC) % 8;
+ unsigned int len = 0;
+ int start = t4_flash_loc_start(adap, FLASH_LOC_CUDBG, &len);
+
+ start /= SF_SEC_SIZE; /* addr -> sector */
+ len /= SF_SEC_SIZE;
+ for (i = start; i < start + len; i++) {
+ index = (i - start) / 8;
+ bit = (i - start) % 8;
if (!(sec_info->sec_bitmap[index] & (1 << bit)))
return i;
}
@@ -102,7 +95,7 @@ static void update_headers(void *handle, struct cudbg_buffer *dbg_buff,
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
- sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+ sec_hdr_start_addr = SF_SEC_SIZE - total_hdr_size;
sec_hdr = sec_info->sec_data + sec_hdr_start_addr;
flash_hdr = (struct cudbg_flash_hdr *)(sec_hdr);
@@ -166,11 +159,13 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
u32 space_left;
int rc = 0;
int sec;
+ unsigned int cudbg_max_size = 0;
+ t4_flash_loc_start(adap, FLASH_LOC_CUDBG, &cudbg_max_size);
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
- sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+ sec_hdr_start_addr = SF_SEC_SIZE - total_hdr_size;
sec_data_size = sec_hdr_start_addr;
cudbg_init->print("\tWriting %u bytes to flash\n", cur_entity_size);
@@ -191,12 +186,12 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
flash_hdr = (struct cudbg_flash_hdr *)(sec_info->sec_data +
sec_hdr_start_addr);
- if (flash_hdr->data_len > CUDBG_FLASH_SIZE) {
+ if (flash_hdr->data_len > cudbg_max_size) {
rc = CUDBG_STATUS_FLASH_FULL;
goto out;
}
- space_left = CUDBG_FLASH_SIZE - flash_hdr->data_len;
+ space_left = cudbg_max_size - flash_hdr->data_len;
if (cur_entity_size > space_left) {
rc = CUDBG_STATUS_FLASH_FULL;
@@ -204,10 +199,11 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
}
while (cur_entity_size > 0) {
- sec = find_empty_sec(sec_info);
+ sec = find_empty_sec(adap, sec_info);
if (sec_info->par_sec) {
sec_data_offset = sec_info->par_sec_offset;
- set_sector_availability(sec_info, sec_info->par_sec, 0);
+ set_sector_availability(adap, sec_info,
+ sec_info->par_sec, 0);
sec_info->par_sec = 0;
sec_info->par_sec_offset = 0;
@@ -230,13 +226,12 @@ int cudbg_write_flash(void *handle, u64 timestamp, void *data,
(void *)((char *)dbg_buff->data + start_offset),
tmp_size);
- rc = write_flash(adap, sec, sec_info->sec_data,
- CUDBG_SF_SECTOR_SIZE);
+ rc = write_flash(adap, sec, sec_info->sec_data, SF_SEC_SIZE);
if (rc)
goto out;
cur_entity_size -= tmp_size;
- set_sector_availability(sec_info, sec, 1);
+ set_sector_availability(adap, sec_info, sec, 1);
start_offset += tmp_size;
}
out:
@@ -247,19 +242,14 @@ int write_flash(struct adapter *adap, u32 start_sec, void *data, u32 size)
{
unsigned int addr;
unsigned int i, n;
- unsigned int sf_sec_size;
int rc = 0;
u8 *ptr = (u8 *)data;
- sf_sec_size = adap->params.sf_size/adap->params.sf_nsec;
-
- addr = start_sec * CUDBG_SF_SECTOR_SIZE;
- i = DIV_ROUND_UP(size,/* # of sectors spanned */
- sf_sec_size);
+ addr = start_sec * SF_SEC_SIZE;
+ i = DIV_ROUND_UP(size, SF_SEC_SIZE);
- rc = t4_flash_erase_sectors(adap, start_sec,
- start_sec + i - 1);
+ rc = t4_flash_erase_sectors(adap, start_sec, start_sec + i - 1);
/*
* If size == 0 then we're simply erasing the FLASH sectors associated
* with the on-adapter OptionROM Configuration File.
@@ -337,6 +327,9 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
u32 data_offset = 0;
u32 i, j;
int rc;
+ unsigned int cudbg_len = 0;
+ int cudbg_start_sec = t4_flash_loc_start(adap, FLASH_LOC_CUDBG,
+ &cudbg_len) / SF_SEC_SIZE;
rc = t4_get_flash_params(adap);
if (rc) {
@@ -348,7 +341,7 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
total_hdr_size = data_hdr_size + sizeof(struct cudbg_flash_hdr);
- sec_hdr_start_addr = CUDBG_SF_SECTOR_SIZE - total_hdr_size;
+ sec_hdr_start_addr = SF_SEC_SIZE - total_hdr_size;
if (!data_flag) {
/* fill header */
@@ -357,14 +350,14 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
* have older filled sector also
*/
memset(&flash_hdr, 0, sizeof(struct cudbg_flash_hdr));
- rc = read_flash(adap, CUDBG_START_SEC, &flash_hdr,
+ rc = read_flash(adap, cudbg_start_sec, &flash_hdr,
sizeof(struct cudbg_flash_hdr),
sec_hdr_start_addr);
if (flash_hdr.signature == CUDBG_FL_SIGNATURE) {
sec_info->max_timestamp = flash_hdr.timestamp;
} else {
- rc = read_flash(adap, CUDBG_START_SEC + 1,
+ rc = read_flash(adap, cudbg_start_sec + 1,
&flash_hdr,
sizeof(struct cudbg_flash_hdr),
sec_hdr_start_addr);
@@ -383,8 +376,8 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
/* finding max sequence number because max sequenced
* sector has updated header
*/
- for (i = CUDBG_START_SEC; i <
- CUDBG_SF_MAX_SECTOR; i++) {
+ for (i = cudbg_start_sec; i < cudbg_start_sec +
+ cudbg_len / SF_SEC_SIZE; i++) {
memset(&flash_hdr, 0,
sizeof(struct cudbg_flash_hdr));
rc = read_flash(adap, i, &flash_hdr,
@@ -423,7 +416,8 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
/* finding sector sequence sorted */
for (i = 1; i <= sec_info->max_seq_no; i++) {
- for (j = CUDBG_START_SEC; j < CUDBG_SF_MAX_SECTOR; j++) {
+ for (j = cudbg_start_sec; j < cudbg_start_sec +
+ cudbg_len / SF_SEC_SIZE; j++) {
memset(&flash_hdr, 0, sizeof(struct cudbg_flash_hdr));
rc = read_flash(adap, j, &flash_hdr,
sizeof(struct cudbg_flash_hdr),
@@ -434,10 +428,8 @@ int cudbg_read_flash(void *handle, void *data, u32 size, int data_flag)
sec_info->max_timestamp ==
flash_hdr.timestamp &&
flash_hdr.sec_seq_no == i) {
- if (size + total_hdr_size >
- CUDBG_SF_SECTOR_SIZE)
- tmp_size = CUDBG_SF_SECTOR_SIZE -
- total_hdr_size;
+ if (size + total_hdr_size > SF_SEC_SIZE)
+ tmp_size = SF_SEC_SIZE - total_hdr_size;
else
tmp_size = size;
@@ -468,7 +460,7 @@ int read_flash(struct adapter *adap, u32 start_sec , void *data, u32 size,
unsigned int addr, i, n;
int rc;
u32 *ptr = (u32 *)data;
- addr = start_sec * CUDBG_SF_SECTOR_SIZE + start_address;
+ addr = start_sec * SF_SEC_SIZE + start_address;
size = size / 4;
for (i = 0; i < size; i += SF_PAGE_SIZE) {
if ((size - i) < SF_PAGE_SIZE)
diff --git a/sys/dev/cxgbe/cudbg/cudbg_lib.c b/sys/dev/cxgbe/cudbg/cudbg_lib.c
index a36c53f68223..f0273349263a 100644
--- a/sys/dev/cxgbe/cudbg/cudbg_lib.c
+++ b/sys/dev/cxgbe/cudbg/cudbg_lib.c
@@ -155,23 +155,25 @@ static int wr_entity_to_flash(void *handle, struct cudbg_buffer *dbg_buff,
u32 flash_data_offset;
u32 data_hdr_size;
int rc = -1;
+ unsigned int cudbg_len;
data_hdr_size = CUDBG_MAX_ENTITY * sizeof(struct cudbg_entity_hdr) +
sizeof(struct cudbg_hdr);
+ t4_flash_loc_start(cudbg_init->adap, FLASH_LOC_CUDBG, &cudbg_len);
- flash_data_offset = (FLASH_CUDBG_NSECS *
+ flash_data_offset = ((cudbg_len / SF_SEC_SIZE) *
(sizeof(struct cudbg_flash_hdr) +
data_hdr_size)) +
(cur_entity_data_offset - data_hdr_size);
- if (flash_data_offset > CUDBG_FLASH_SIZE) {
+ if (flash_data_offset > cudbg_len) {
update_skip_size(sec_info, cur_entity_size);
if (cudbg_init->verbose)
cudbg_init->print("Large entity skipping...\n");
return rc;
}
- remain_flash_size = CUDBG_FLASH_SIZE - flash_data_offset;
+ remain_flash_size = cudbg_len - flash_data_offset;
if (cur_entity_size > remain_flash_size) {
update_skip_size(sec_info, cur_entity_size);
@@ -1292,6 +1294,7 @@ static int collect_macstats(struct cudbg_init *pdbg_init,
mac_stats_buff->port_count = n;
for (i = 0; i < mac_stats_buff->port_count; i++)
+ /* Incorrect, should use hport instead of i */
t4_get_port_stats(padap, i, &mac_stats_buff->stats[i]);
rc = write_compression_hdr(&scratch_buff, dbg_buff);
@@ -1967,7 +1970,7 @@ static int collect_fw_devlog(struct cudbg_init *pdbg_init,
u32 offset;
int rc = 0, i;
- rc = t4_init_devlog_params(padap, 1);
+ rc = t4_init_devlog_ncores_params(padap, 1);
if (rc < 0) {
pdbg_init->print("%s(), t4_init_devlog_params failed!, rc: "\
diff --git a/sys/dev/cxgbe/cudbg/cudbg_lib_common.h b/sys/dev/cxgbe/cudbg/cudbg_lib_common.h
index 86390eb4399d..b6a85f436db0 100644
--- a/sys/dev/cxgbe/cudbg/cudbg_lib_common.h
+++ b/sys/dev/cxgbe/cudbg/cudbg_lib_common.h
@@ -59,11 +59,6 @@
#include "common/t4_hw.h"
#endif
-#define CUDBG_SF_MAX_SECTOR (FLASH_CUDBG_START_SEC + FLASH_CUDBG_NSECS)
-#define CUDBG_SF_SECTOR_SIZE SF_SEC_SIZE
-#define CUDBG_START_SEC FLASH_CUDBG_START_SEC
-#define CUDBG_FLASH_SIZE FLASH_CUDBG_MAX_SIZE
-
#define CUDBG_EXT_DATA_BIT 0
#define CUDBG_EXT_DATA_VALID (1 << CUDBG_EXT_DATA_BIT)
@@ -121,7 +116,7 @@ struct cudbg_flash_sec_info {
u32 hdr_data_len; /* Total data */
u32 skip_size; /* Total size of large entities. */
u64 max_timestamp;
- char sec_data[CUDBG_SF_SECTOR_SIZE];
+ char sec_data[SF_SEC_SIZE];
u8 sec_bitmap[8];
};
diff --git a/sys/dev/cxgbe/cxgbei/cxgbei.c b/sys/dev/cxgbe/cxgbei/cxgbei.c
index 193d58f9eda4..ccca45f5f761 100644
--- a/sys/dev/cxgbe/cxgbei/cxgbei.c
+++ b/sys/dev/cxgbe/cxgbei/cxgbei.c
@@ -842,9 +842,6 @@ cxgbei_activate(struct adapter *sc)
/* per-adapter softc for iSCSI */
ci = malloc(sizeof(*ci), M_CXGBE, M_ZERO | M_WAITOK);
- if (ci == NULL)
- return (ENOMEM);
-
rc = cxgbei_init(sc, ci);
if (rc != 0) {
free(ci, M_CXGBE);
@@ -901,9 +898,8 @@ cxgbei_deactivate_all(struct adapter *sc, void *arg __unused)
}
static struct uld_info cxgbei_uld_info = {
- .uld_id = ULD_ISCSI,
- .activate = cxgbei_activate,
- .deactivate = cxgbei_deactivate,
+ .uld_activate = cxgbei_activate,
+ .uld_deactivate = cxgbei_deactivate,
};
static int
@@ -916,7 +912,7 @@ cxgbei_mod_load(void)
t4_register_cpl_handler(CPL_RX_ISCSI_DDP, do_rx_iscsi_ddp);
t4_register_cpl_handler(CPL_RX_ISCSI_CMP, do_rx_iscsi_cmp);
- rc = t4_register_uld(&cxgbei_uld_info);
+ rc = t4_register_uld(&cxgbei_uld_info, ULD_ISCSI);
if (rc != 0)
return (rc);
@@ -931,7 +927,7 @@ cxgbei_mod_unload(void)
t4_iterate(cxgbei_deactivate_all, NULL);
- if (t4_unregister_uld(&cxgbei_uld_info) == EBUSY)
+ if (t4_unregister_uld(&cxgbei_uld_info, ULD_ISCSI) == EBUSY)
return (EBUSY);
t4_register_cpl_handler(CPL_ISCSI_HDR, NULL);
diff --git a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
index ab1428c06d87..9cdfd0fb9652 100644
--- a/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
+++ b/sys/dev/cxgbe/cxgbei/icl_cxgbei.c
@@ -651,7 +651,7 @@ icl_cxgbei_conn_pdu_append_bio(struct icl_conn *ic, struct icl_pdu *ip,
while (len > 0) {
if (m == NULL) {
m = mb_alloc_ext_pgs(flags & ~ICL_NOCOPY,
- cxgbei_free_mext_pg);
+ cxgbei_free_mext_pg, 0);
if (__predict_false(m == NULL))
return (ENOMEM);
atomic_add_int(&icp->ref_cnt, 1);
@@ -976,42 +976,6 @@ icl_cxgbei_setsockopt(struct icl_conn *ic, struct socket *so, int sspace,
return (0);
}
-/*
- * Request/response structure used to find out the adapter offloading a socket.
- */
-struct find_ofld_adapter_rr {
- struct socket *so;
- struct adapter *sc; /* result */
-};
-
-static void
-find_offload_adapter(struct adapter *sc, void *arg)
-{
- struct find_ofld_adapter_rr *fa = arg;
- struct socket *so = fa->so;
- struct tom_data *td = sc->tom_softc;
- struct tcpcb *tp;
- struct inpcb *inp;
-
- /* Non-TCP were filtered out earlier. */
- MPASS(so->so_proto->pr_protocol == IPPROTO_TCP);
-
- if (fa->sc != NULL)
- return; /* Found already. */
-
- if (td == NULL)
- return; /* TOE not enabled on this adapter. */
-
- inp = sotoinpcb(so);
- INP_WLOCK(inp);
- if ((inp->inp_flags & INP_DROPPED) == 0) {
- tp = intotcpcb(inp);
- if (tp->t_flags & TF_TOE && tp->tod == &td->tod)
- fa->sc = sc; /* Found. */
- }
- INP_WUNLOCK(inp);
-}
-
static bool
is_memfree(struct adapter *sc)
{
@@ -1025,46 +989,6 @@ is_memfree(struct adapter *sc)
return (true);
}
-/* XXXNP: move this to t4_tom. */
-static void
-send_iscsi_flowc_wr(struct adapter *sc, struct toepcb *toep, int maxlen)
-{
- struct wrqe *wr;
- struct fw_flowc_wr *flowc;
- const u_int nparams = 1;
- u_int flowclen;
- struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
-
- flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
-
- wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq);
- if (wr == NULL) {
- /* XXX */
- panic("%s: allocation failure.", __func__);
- }
- flowc = wrtod(wr);
- memset(flowc, 0, wr->wr_len);
-
- flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
- V_FW_FLOWC_WR_NPARAMS(nparams));
- flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
- V_FW_WR_FLOWID(toep->tid));
-
- flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
- flowc->mnemval[0].val = htobe32(maxlen);
-
- txsd->tx_credits = howmany(flowclen, 16);
- txsd->plen = 0;
- KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
- ("%s: not enough credits (%d)", __func__, toep->tx_credits));
- toep->tx_credits -= txsd->tx_credits;
- if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
- toep->txsd_pidx = 0;
- toep->txsd_avail--;
-
- t4_wrq_tx(sc, wr);
-}
-
static void
set_ulp_mode_iscsi(struct adapter *sc, struct toepcb *toep, u_int ulp_submode)
{
@@ -1093,7 +1017,6 @@ int
icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
{
struct icl_cxgbei_conn *icc = ic_to_icc(ic);
- struct find_ofld_adapter_rr fa;
struct file *fp;
struct socket *so;
struct inpcb *inp;
@@ -1137,15 +1060,11 @@ icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
fdrop(fp, curthread);
ICL_CONN_UNLOCK(ic);
- /* Find the adapter offloading this socket. */
- fa.sc = NULL;
- fa.so = so;
- t4_iterate(find_offload_adapter, &fa);
- if (fa.sc == NULL) {
+ icc->sc = find_offload_adapter(so);
+ if (icc->sc == NULL) {
error = EINVAL;
goto out;
}
- icc->sc = fa.sc;
max_rx_pdu_len = ISCSI_BHS_SIZE + ic->ic_max_recv_data_segment_length;
max_tx_pdu_len = ISCSI_BHS_SIZE + ic->ic_max_send_data_segment_length;
@@ -1203,7 +1122,7 @@ icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd)
toep->params.ulp_mode = ULP_MODE_ISCSI;
toep->ulpcb = icc;
- send_iscsi_flowc_wr(icc->sc, toep,
+ send_txdataplen_max_flowc_wr(icc->sc, toep,
roundup(max_iso_pdus * max_tx_pdu_len, tp->t_maxseg));
set_ulp_mode_iscsi(icc->sc, toep, icc->ulp_submode);
INP_WUNLOCK(inp);
@@ -1776,7 +1695,6 @@ cxgbei_limits(struct adapter *sc, void *arg)
static int
cxgbei_limits_fd(struct icl_drv_limits *idl, int fd)
{
- struct find_ofld_adapter_rr fa;
struct file *fp;
struct socket *so;
struct adapter *sc;
@@ -1799,17 +1717,13 @@ cxgbei_limits_fd(struct icl_drv_limits *idl, int fd)
return (EINVAL);
}
- /* Find the adapter offloading this socket. */
- fa.sc = NULL;
- fa.so = so;
- t4_iterate(find_offload_adapter, &fa);
- if (fa.sc == NULL) {
+ sc = find_offload_adapter(so);
+ if (sc == NULL) {
fdrop(fp, curthread);
return (ENXIO);
}
fdrop(fp, curthread);
- sc = fa.sc;
error = begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4lims");
if (error != 0)
return (error);
diff --git a/sys/dev/cxgbe/firmware/t4fw_interface.h b/sys/dev/cxgbe/firmware/t4fw_interface.h
index 686bb55d658b..5874f0343b03 100644
--- a/sys/dev/cxgbe/firmware/t4fw_interface.h
+++ b/sys/dev/cxgbe/firmware/t4fw_interface.h
@@ -1,6 +1,5 @@
/*-
- * Copyright (c) 2012-2017 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2012-2017, 2025 Chelsio Communications.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -67,8 +66,8 @@ enum fw_retval {
FW_FCOE_NO_XCHG = 136, /* */
FW_SCSI_RSP_ERR = 137, /* */
FW_ERR_RDEV_IMPL_LOGO = 138, /* */
- FW_SCSI_UNDER_FLOW_ERR = 139, /* */
- FW_SCSI_OVER_FLOW_ERR = 140, /* */
+ FW_SCSI_UNDER_FLOW_ERR = 139, /* */
+ FW_SCSI_OVER_FLOW_ERR = 140, /* */
FW_SCSI_DDP_ERR = 141, /* DDP error*/
FW_SCSI_TASK_ERR = 142, /* No SCSI tasks available */
FW_SCSI_IO_BLOCK = 143, /* IO is going to be blocked due to resource failure */
@@ -85,7 +84,7 @@ enum fw_memtype {
FW_MEMTYPE_FLASH = 0x4,
FW_MEMTYPE_INTERNAL = 0x5,
FW_MEMTYPE_EXTMEM1 = 0x6,
- FW_MEMTYPE_HMA = 0x7,
+ FW_MEMTYPE_HMA = 0x7,
};
/******************************************************************************
@@ -106,10 +105,14 @@ enum fw_wr_opcodes {
FW_OFLD_CONNECTION_WR = 0x2f,
FW_FLOWC_WR = 0x0a,
FW_OFLD_TX_DATA_WR = 0x0b,
+ FW_OFLD_TX_DATA_V2_WR = 0x0f,
FW_CMD_WR = 0x10,
FW_ETH_TX_PKT_VM_WR = 0x11,
FW_ETH_TX_PKTS_VM_WR = 0x12,
FW_RI_RES_WR = 0x0c,
+ FW_QP_RES_WR = FW_RI_RES_WR,
+ /* iwarp wr used from rdma kernel and user space */
+ FW_V2_NVMET_TX_DATA_WR = 0x13,
FW_RI_RDMA_WRITE_WR = 0x14,
FW_RI_SEND_WR = 0x15,
FW_RI_RDMA_READ_WR = 0x16,
@@ -118,6 +121,15 @@ enum fw_wr_opcodes {
FW_RI_FR_NSMR_WR = 0x19,
FW_RI_FR_NSMR_TPTE_WR = 0x20,
FW_RI_RDMA_WRITE_CMPL_WR = 0x21,
+ /* rocev2 wr used from rdma kernel and user space */
+ FW_RI_V2_RDMA_WRITE_WR = 0x22,
+ FW_RI_V2_SEND_WR = 0x23,
+ FW_RI_V2_RDMA_READ_WR = 0x24,
+ FW_RI_V2_BIND_MW_WR = 0x25,
+ FW_RI_V2_FR_NSMR_WR = 0x26,
+ FW_RI_V2_ATOMIC_WR = 0x27,
+ FW_NVMET_V2_FR_NSMR_WR = 0x28,
+ FW_RI_V2_INV_LSTAG_WR = 0x1e,
FW_RI_INV_LSTAG_WR = 0x1a,
FW_RI_SEND_IMMEDIATE_WR = 0x15,
FW_RI_ATOMIC_WR = 0x16,
@@ -138,10 +150,11 @@ enum fw_wr_opcodes {
FW_POFCOE_TCB_WR = 0x42,
FW_POFCOE_ULPTX_WR = 0x43,
FW_ISCSI_TX_DATA_WR = 0x45,
- FW_PTP_TX_PKT_WR = 0x46,
+ FW_PTP_TX_PKT_WR = 0x46,
FW_TLSTX_DATA_WR = 0x68,
FW_TLS_TUNNEL_OFLD_WR = 0x69,
FW_CRYPTO_LOOKASIDE_WR = 0x6d,
+ FW_CRYPTO_UPDATE_SA_WR = 0x6e,
FW_COISCSI_TGT_WR = 0x70,
FW_COISCSI_TGT_CONN_WR = 0x71,
FW_COISCSI_TGT_XMIT_WR = 0x72,
@@ -149,7 +162,8 @@ enum fw_wr_opcodes {
FW_ISNS_WR = 0x75,
FW_ISNS_XMIT_WR = 0x76,
FW_FILTER2_WR = 0x77,
- FW_LASTC2E_WR = 0x80
+ /* FW_LASTC2E_WR = 0x80 */
+ FW_LASTC2E_WR = 0xB0
};
/*
@@ -308,7 +322,7 @@ enum fw_filter_wr_cookie {
enum fw_filter_wr_nat_mode {
FW_FILTER_WR_NATMODE_NONE = 0,
- FW_FILTER_WR_NATMODE_DIP ,
+ FW_FILTER_WR_NATMODE_DIP,
FW_FILTER_WR_NATMODE_DIPDP,
FW_FILTER_WR_NATMODE_DIPDPSIP,
FW_FILTER_WR_NATMODE_DIPDPSP,
@@ -387,7 +401,7 @@ struct fw_filter2_wr {
__u8 newlip[16];
__u8 newfip[16];
__be32 natseqcheck;
- __be32 r9;
+ __be32 rocev2_qpn;
__be64 r10;
__be64 r11;
__be64 r12;
@@ -675,6 +689,19 @@ struct fw_filter2_wr {
#define G_FW_FILTER_WR_MATCHTYPEM(x) \
(((x) >> S_FW_FILTER_WR_MATCHTYPEM) & M_FW_FILTER_WR_MATCHTYPEM)
+#define S_FW_FILTER2_WR_ROCEV2 31
+#define M_FW_FILTER2_WR_ROCEV2 0x1
+#define V_FW_FILTER2_WR_ROCEV2(x) ((x) << S_FW_FILTER2_WR_ROCEV2)
+#define G_FW_FILTER2_WR_ROCEV2(x) \
+ (((x) >> S_FW_FILTER2_WR_ROCEV2) & M_FW_FILTER2_WR_ROCEV2)
+#define F_FW_FILTER2_WR_ROCEV2 V_FW_FILTER2_WR_ROCEV2(1U)
+
+#define S_FW_FILTER2_WR_QPN 0
+#define M_FW_FILTER2_WR_QPN 0xffffff
+#define V_FW_FILTER2_WR_QPN(x) ((x) << S_FW_FILTER2_WR_QPN)
+#define G_FW_FILTER2_WR_QPN(x) \
+ (((x) >> S_FW_FILTER2_WR_QPN) & M_FW_FILTER2_WR_QPN)
+
struct fw_ulptx_wr {
__be32 op_to_compl;
__be32 flowid_len16;
@@ -1034,7 +1061,10 @@ enum fw_flowc_mnem {
FW_FLOWC_MNEM_SND_SCALE = 13,
FW_FLOWC_MNEM_RCV_SCALE = 14,
FW_FLOWC_MNEM_ULP_MODE = 15,
- FW_FLOWC_MNEM_MAX = 16,
+ FW_FLOWC_MNEM_EQID = 16,
+ FW_FLOWC_MNEM_CONG_ALG = 17,
+ FW_FLOWC_MNEM_TXDATAPLEN_MIN = 18,
+ FW_FLOWC_MNEM_MAX = 19,
};
struct fw_flowc_mnemval {
@@ -1153,6 +1183,55 @@ struct fw_ofld_tx_data_wr {
#define G_FW_ISCSI_TX_DATA_WR_FLAGS_LO(x) \
(((x) >> S_FW_ISCSI_TX_DATA_WR_FLAGS_LO) & M_FW_ISCSI_TX_DATA_WR_FLAGS_LO)
+struct fw_ofld_tx_data_v2_wr {
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 r4;
+ __be16 r5;
+ __be16 wrid;
+ __be32 r6;
+ __be32 seqno;
+ __be32 plen;
+ __be32 lsodisable_to_flags;
+};
+
+#define S_FW_OFLD_TX_DATA_V2_WR_LSODISABLE 31
+#define M_FW_OFLD_TX_DATA_V2_WR_LSODISABLE 0x1
+#define V_FW_OFLD_TX_DATA_V2_WR_LSODISABLE(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_LSODISABLE)
+#define G_FW_OFLD_TX_DATA_V2_WR_LSODISABLE(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_LSODISABLE) & \
+ M_FW_OFLD_TX_DATA_V2_WR_LSODISABLE)
+#define F_FW_OFLD_TX_DATA_V2_WR_LSODISABLE \
+ V_FW_OFLD_TX_DATA_V2_WR_LSODISABLE(1U)
+
+#define S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD 30
+#define M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD 0x1
+#define V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD)
+#define G_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD) & \
+ M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD)
+#define F_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD \
+ V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLD(1U)
+
+#define S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE 29
+#define M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE 0x1
+#define V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE)
+#define G_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE) & \
+ M_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE)
+#define F_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE \
+ V_FW_OFLD_TX_DATA_V2_WR_ALIGNPLDSHOVE(1U)
+
+#define S_FW_OFLD_TX_DATA_V2_WR_FLAGS 0
+#define M_FW_OFLD_TX_DATA_V2_WR_FLAGS 0xfffffff
+#define V_FW_OFLD_TX_DATA_V2_WR_FLAGS(x) \
+ ((x) << S_FW_OFLD_TX_DATA_V2_WR_FLAGS)
+#define G_FW_OFLD_TX_DATA_V2_WR_FLAGS(x) \
+ (((x) >> S_FW_OFLD_TX_DATA_V2_WR_FLAGS) & M_FW_OFLD_TX_DATA_V2_WR_FLAGS)
+
struct fw_cmd_wr {
__be32 op_dma;
__be32 len16_pkd;
@@ -1218,8 +1297,15 @@ enum fw_ri_wr_opcode {
FW_RI_FAST_REGISTER = 0xd,
FW_RI_LOCAL_INV = 0xe,
#endif
+ /* Chelsio specific */
FW_RI_SGE_EC_CR_RETURN = 0xf,
FW_RI_WRITE_IMMEDIATE = FW_RI_RDMA_INIT,
+ FW_RI_SEND_IMMEDIATE = FW_RI_RDMA_INIT,
+
+ FW_RI_ROCEV2_SEND = 0x0,
+ FW_RI_ROCEV2_WRITE = 0x0,
+ FW_RI_ROCEV2_SEND_WITH_INV = 0x5,
+ FW_RI_ROCEV2_SEND_IMMEDIATE = 0xa,
};
enum fw_ri_wr_flags {
@@ -1229,7 +1315,8 @@ enum fw_ri_wr_flags {
FW_RI_READ_FENCE_FLAG = 0x08,
FW_RI_LOCAL_FENCE_FLAG = 0x10,
FW_RI_RDMA_READ_INVALIDATE = 0x20,
- FW_RI_RDMA_WRITE_WITH_IMMEDIATE = 0x40
+ FW_RI_RDMA_WRITE_WITH_IMMEDIATE = 0x40,
+ //FW_RI_REPLAYED_WR_FLAG = 0x80,
};
enum fw_ri_mpa_attrs {
@@ -1522,18 +1609,302 @@ struct fw_ri_cqe {
#define G_FW_RI_CQE_TYPE(x) \
(((x) >> S_FW_RI_CQE_TYPE) & M_FW_RI_CQE_TYPE)
-enum fw_ri_res_type {
+enum fw_res_type {
FW_RI_RES_TYPE_SQ,
FW_RI_RES_TYPE_RQ,
FW_RI_RES_TYPE_CQ,
FW_RI_RES_TYPE_SRQ,
+ FW_QP_RES_TYPE_SQ = FW_RI_RES_TYPE_SQ,
+ FW_QP_RES_TYPE_CQ = FW_RI_RES_TYPE_CQ,
};
-enum fw_ri_res_op {
+enum fw_res_op {
FW_RI_RES_OP_WRITE,
FW_RI_RES_OP_RESET,
+ FW_QP_RES_OP_WRITE = FW_RI_RES_OP_WRITE,
+ FW_QP_RES_OP_RESET = FW_RI_RES_OP_RESET,
+};
+
+enum fw_qp_transport_type {
+ FW_QP_TRANSPORT_TYPE_IWARP,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_UD,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_RC,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_XRC_INI,
+ FW_QP_TRANSPORT_TYPE_ROCEV2_XRC_TGT,
+ FW_QP_TRANSPORT_TYPE_NVMET,
+ FW_QP_TRANSPORT_TYPE_TOE,
+ FW_QP_TRANSPORT_TYPE_ISCSI,
+};
+
+struct fw_qp_res {
+ union fw_qp_restype {
+ struct fw_qp_res_sqrq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 eqid;
+ __be32 r4[2];
+ __be32 fetchszm_to_iqid;
+ __be32 dcaen_to_eqsize;
+ __be64 eqaddr;
+ } sqrq;
+ struct fw_qp_res_cq {
+ __u8 restype;
+ __u8 op;
+ __be16 r3;
+ __be32 iqid;
+ __be32 r4[2];
+ __be32 iqandst_to_iqandstindex;
+ __be16 iqdroprss_to_iqesize;
+ __be16 iqsize;
+ __be64 iqaddr;
+ __be32 iqns_iqro;
+ __be32 r6_lo;
+ __be64 r7;
+ } cq;
+ } u;
+};
+
+struct fw_qp_res_wr {
+ __be32 op_to_nres;
+ __be32 len16_pkd;
+ __u64 cookie;
+#ifndef C99_NOT_SUPPORTED
+ struct fw_qp_res res[0];
+#endif
};
+#define S_FW_QP_RES_WR_TRANSPORT_TYPE 16
+#define M_FW_QP_RES_WR_TRANSPORT_TYPE 0x7
+#define V_FW_QP_RES_WR_TRANSPORT_TYPE(x) \
+ ((x) << S_FW_QP_RES_WR_TRANSPORT_TYPE)
+#define G_FW_QP_RES_WR_TRANSPORT_TYPE(x) \
+ (((x) >> S_FW_QP_RES_WR_TRANSPORT_TYPE) & M_FW_QP_RES_WR_TRANSPORT_TYPE)
+
+#define S_FW_QP_RES_WR_VFN 8
+#define M_FW_QP_RES_WR_VFN 0xff
+#define V_FW_QP_RES_WR_VFN(x) ((x) << S_FW_QP_RES_WR_VFN)
+#define G_FW_QP_RES_WR_VFN(x) \
+ (((x) >> S_FW_QP_RES_WR_VFN) & M_FW_QP_RES_WR_VFN)
+
+#define S_FW_QP_RES_WR_NRES 0
+#define M_FW_QP_RES_WR_NRES 0xff
+#define V_FW_QP_RES_WR_NRES(x) ((x) << S_FW_QP_RES_WR_NRES)
+#define G_FW_QP_RES_WR_NRES(x) \
+ (((x) >> S_FW_QP_RES_WR_NRES) & M_FW_QP_RES_WR_NRES)
+
+#define S_FW_QP_RES_WR_FETCHSZM 26
+#define M_FW_QP_RES_WR_FETCHSZM 0x1
+#define V_FW_QP_RES_WR_FETCHSZM(x) ((x) << S_FW_QP_RES_WR_FETCHSZM)
+#define G_FW_QP_RES_WR_FETCHSZM(x) \
+ (((x) >> S_FW_QP_RES_WR_FETCHSZM) & M_FW_QP_RES_WR_FETCHSZM)
+#define F_FW_QP_RES_WR_FETCHSZM V_FW_QP_RES_WR_FETCHSZM(1U)
+
+#define S_FW_QP_RES_WR_STATUSPGNS 25
+#define M_FW_QP_RES_WR_STATUSPGNS 0x1
+#define V_FW_QP_RES_WR_STATUSPGNS(x) ((x) << S_FW_QP_RES_WR_STATUSPGNS)
+#define G_FW_QP_RES_WR_STATUSPGNS(x) \
+ (((x) >> S_FW_QP_RES_WR_STATUSPGNS) & M_FW_QP_RES_WR_STATUSPGNS)
+#define F_FW_QP_RES_WR_STATUSPGNS V_FW_QP_RES_WR_STATUSPGNS(1U)
+
+#define S_FW_QP_RES_WR_STATUSPGRO 24
+#define M_FW_QP_RES_WR_STATUSPGRO 0x1
+#define V_FW_QP_RES_WR_STATUSPGRO(x) ((x) << S_FW_QP_RES_WR_STATUSPGRO)
+#define G_FW_QP_RES_WR_STATUSPGRO(x) \
+ (((x) >> S_FW_QP_RES_WR_STATUSPGRO) & M_FW_QP_RES_WR_STATUSPGRO)
+#define F_FW_QP_RES_WR_STATUSPGRO V_FW_QP_RES_WR_STATUSPGRO(1U)
+
+#define S_FW_QP_RES_WR_FETCHNS 23
+#define M_FW_QP_RES_WR_FETCHNS 0x1
+#define V_FW_QP_RES_WR_FETCHNS(x) ((x) << S_FW_QP_RES_WR_FETCHNS)
+#define G_FW_QP_RES_WR_FETCHNS(x) \
+ (((x) >> S_FW_QP_RES_WR_FETCHNS) & M_FW_QP_RES_WR_FETCHNS)
+#define F_FW_QP_RES_WR_FETCHNS V_FW_QP_RES_WR_FETCHNS(1U)
+
+#define S_FW_QP_RES_WR_FETCHRO 22
+#define M_FW_QP_RES_WR_FETCHRO 0x1
+#define V_FW_QP_RES_WR_FETCHRO(x) ((x) << S_FW_QP_RES_WR_FETCHRO)
+#define G_FW_QP_RES_WR_FETCHRO(x) \
+ (((x) >> S_FW_QP_RES_WR_FETCHRO) & M_FW_QP_RES_WR_FETCHRO)
+#define F_FW_QP_RES_WR_FETCHRO V_FW_QP_RES_WR_FETCHRO(1U)
+
+#define S_FW_QP_RES_WR_HOSTFCMODE 20
+#define M_FW_QP_RES_WR_HOSTFCMODE 0x3
+#define V_FW_QP_RES_WR_HOSTFCMODE(x) ((x) << S_FW_QP_RES_WR_HOSTFCMODE)
+#define G_FW_QP_RES_WR_HOSTFCMODE(x) \
+ (((x) >> S_FW_QP_RES_WR_HOSTFCMODE) & M_FW_QP_RES_WR_HOSTFCMODE)
+
+#define S_FW_QP_RES_WR_CPRIO 19
+#define M_FW_QP_RES_WR_CPRIO 0x1
+#define V_FW_QP_RES_WR_CPRIO(x) ((x) << S_FW_QP_RES_WR_CPRIO)
+#define G_FW_QP_RES_WR_CPRIO(x) \
+ (((x) >> S_FW_QP_RES_WR_CPRIO) & M_FW_QP_RES_WR_CPRIO)
+#define F_FW_QP_RES_WR_CPRIO V_FW_QP_RES_WR_CPRIO(1U)
+
+#define S_FW_QP_RES_WR_ONCHIP 18
+#define M_FW_QP_RES_WR_ONCHIP 0x1
+#define V_FW_QP_RES_WR_ONCHIP(x) ((x) << S_FW_QP_RES_WR_ONCHIP)
+#define G_FW_QP_RES_WR_ONCHIP(x) \
+ (((x) >> S_FW_QP_RES_WR_ONCHIP) & M_FW_QP_RES_WR_ONCHIP)
+#define F_FW_QP_RES_WR_ONCHIP V_FW_QP_RES_WR_ONCHIP(1U)
+
+#define S_FW_QP_RES_WR_PCIECHN 16
+#define M_FW_QP_RES_WR_PCIECHN 0x3
+#define V_FW_QP_RES_WR_PCIECHN(x) ((x) << S_FW_QP_RES_WR_PCIECHN)
+#define G_FW_QP_RES_WR_PCIECHN(x) \
+ (((x) >> S_FW_QP_RES_WR_PCIECHN) & M_FW_QP_RES_WR_PCIECHN)
+
+#define S_FW_QP_RES_WR_IQID 0
+#define M_FW_QP_RES_WR_IQID 0xffff
+#define V_FW_QP_RES_WR_IQID(x) ((x) << S_FW_QP_RES_WR_IQID)
+#define G_FW_QP_RES_WR_IQID(x) \
+ (((x) >> S_FW_QP_RES_WR_IQID) & M_FW_QP_RES_WR_IQID)
+
+#define S_FW_QP_RES_WR_DCAEN 31
+#define M_FW_QP_RES_WR_DCAEN 0x1
+#define V_FW_QP_RES_WR_DCAEN(x) ((x) << S_FW_QP_RES_WR_DCAEN)
+#define G_FW_QP_RES_WR_DCAEN(x) \
+ (((x) >> S_FW_QP_RES_WR_DCAEN) & M_FW_QP_RES_WR_DCAEN)
+#define F_FW_QP_RES_WR_DCAEN V_FW_QP_RES_WR_DCAEN(1U)
+
+#define S_FW_QP_RES_WR_DCACPU 26
+#define M_FW_QP_RES_WR_DCACPU 0x1f
+#define V_FW_QP_RES_WR_DCACPU(x) ((x) << S_FW_QP_RES_WR_DCACPU)
+#define G_FW_QP_RES_WR_DCACPU(x) \
+ (((x) >> S_FW_QP_RES_WR_DCACPU) & M_FW_QP_RES_WR_DCACPU)
+
+#define S_FW_QP_RES_WR_FBMIN 23
+#define M_FW_QP_RES_WR_FBMIN 0x7
+#define V_FW_QP_RES_WR_FBMIN(x) ((x) << S_FW_QP_RES_WR_FBMIN)
+#define G_FW_QP_RES_WR_FBMIN(x) \
+ (((x) >> S_FW_QP_RES_WR_FBMIN) & M_FW_QP_RES_WR_FBMIN)
+
+#define S_FW_QP_RES_WR_FBMAX 20
+#define M_FW_QP_RES_WR_FBMAX 0x7
+#define V_FW_QP_RES_WR_FBMAX(x) ((x) << S_FW_QP_RES_WR_FBMAX)
+#define G_FW_QP_RES_WR_FBMAX(x) \
+ (((x) >> S_FW_QP_RES_WR_FBMAX) & M_FW_QP_RES_WR_FBMAX)
+
+#define S_FW_QP_RES_WR_CIDXFTHRESHO 19
+#define M_FW_QP_RES_WR_CIDXFTHRESHO 0x1
+#define V_FW_QP_RES_WR_CIDXFTHRESHO(x) ((x) << S_FW_QP_RES_WR_CIDXFTHRESHO)
+#define G_FW_QP_RES_WR_CIDXFTHRESHO(x) \
+ (((x) >> S_FW_QP_RES_WR_CIDXFTHRESHO) & M_FW_QP_RES_WR_CIDXFTHRESHO)
+#define F_FW_QP_RES_WR_CIDXFTHRESHO V_FW_QP_RES_WR_CIDXFTHRESHO(1U)
+
+#define S_FW_QP_RES_WR_CIDXFTHRESH 16
+#define M_FW_QP_RES_WR_CIDXFTHRESH 0x7
+#define V_FW_QP_RES_WR_CIDXFTHRESH(x) ((x) << S_FW_QP_RES_WR_CIDXFTHRESH)
+#define G_FW_QP_RES_WR_CIDXFTHRESH(x) \
+ (((x) >> S_FW_QP_RES_WR_CIDXFTHRESH) & M_FW_QP_RES_WR_CIDXFTHRESH)
+
+#define S_FW_QP_RES_WR_EQSIZE 0
+#define M_FW_QP_RES_WR_EQSIZE 0xffff
+#define V_FW_QP_RES_WR_EQSIZE(x) ((x) << S_FW_QP_RES_WR_EQSIZE)
+#define G_FW_QP_RES_WR_EQSIZE(x) \
+ (((x) >> S_FW_QP_RES_WR_EQSIZE) & M_FW_QP_RES_WR_EQSIZE)
+
+#define S_FW_QP_RES_WR_IQANDST 15
+#define M_FW_QP_RES_WR_IQANDST 0x1
+#define V_FW_QP_RES_WR_IQANDST(x) ((x) << S_FW_QP_RES_WR_IQANDST)
+#define G_FW_QP_RES_WR_IQANDST(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANDST) & M_FW_QP_RES_WR_IQANDST)
+#define F_FW_QP_RES_WR_IQANDST V_FW_QP_RES_WR_IQANDST(1U)
+
+#define S_FW_QP_RES_WR_IQANUS 14
+#define M_FW_QP_RES_WR_IQANUS 0x1
+#define V_FW_QP_RES_WR_IQANUS(x) ((x) << S_FW_QP_RES_WR_IQANUS)
+#define G_FW_QP_RES_WR_IQANUS(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANUS) & M_FW_QP_RES_WR_IQANUS)
+#define F_FW_QP_RES_WR_IQANUS V_FW_QP_RES_WR_IQANUS(1U)
+
+#define S_FW_QP_RES_WR_IQANUD 12
+#define M_FW_QP_RES_WR_IQANUD 0x3
+#define V_FW_QP_RES_WR_IQANUD(x) ((x) << S_FW_QP_RES_WR_IQANUD)
+#define G_FW_QP_RES_WR_IQANUD(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANUD) & M_FW_QP_RES_WR_IQANUD)
+
+#define S_FW_QP_RES_WR_IQANDSTINDEX 0
+#define M_FW_QP_RES_WR_IQANDSTINDEX 0xfff
+#define V_FW_QP_RES_WR_IQANDSTINDEX(x) ((x) << S_FW_QP_RES_WR_IQANDSTINDEX)
+#define G_FW_QP_RES_WR_IQANDSTINDEX(x) \
+ (((x) >> S_FW_QP_RES_WR_IQANDSTINDEX) & M_FW_QP_RES_WR_IQANDSTINDEX)
+
+#define S_FW_QP_RES_WR_IQDROPRSS 15
+#define M_FW_QP_RES_WR_IQDROPRSS 0x1
+#define V_FW_QP_RES_WR_IQDROPRSS(x) ((x) << S_FW_QP_RES_WR_IQDROPRSS)
+#define G_FW_QP_RES_WR_IQDROPRSS(x) \
+ (((x) >> S_FW_QP_RES_WR_IQDROPRSS) & M_FW_QP_RES_WR_IQDROPRSS)
+#define F_FW_QP_RES_WR_IQDROPRSS V_FW_QP_RES_WR_IQDROPRSS(1U)
+
+#define S_FW_QP_RES_WR_IQGTSMODE 14
+#define M_FW_QP_RES_WR_IQGTSMODE 0x1
+#define V_FW_QP_RES_WR_IQGTSMODE(x) ((x) << S_FW_QP_RES_WR_IQGTSMODE)
+#define G_FW_QP_RES_WR_IQGTSMODE(x) \
+ (((x) >> S_FW_QP_RES_WR_IQGTSMODE) & M_FW_QP_RES_WR_IQGTSMODE)
+#define F_FW_QP_RES_WR_IQGTSMODE V_FW_QP_RES_WR_IQGTSMODE(1U)
+
+#define S_FW_QP_RES_WR_IQPCIECH 12
+#define M_FW_QP_RES_WR_IQPCIECH 0x3
+#define V_FW_QP_RES_WR_IQPCIECH(x) ((x) << S_FW_QP_RES_WR_IQPCIECH)
+#define G_FW_QP_RES_WR_IQPCIECH(x) \
+ (((x) >> S_FW_QP_RES_WR_IQPCIECH) & M_FW_QP_RES_WR_IQPCIECH)
+
+#define S_FW_QP_RES_WR_IQDCAEN 11
+#define M_FW_QP_RES_WR_IQDCAEN 0x1
+#define V_FW_QP_RES_WR_IQDCAEN(x) ((x) << S_FW_QP_RES_WR_IQDCAEN)
+#define G_FW_QP_RES_WR_IQDCAEN(x) \
+ (((x) >> S_FW_QP_RES_WR_IQDCAEN) & M_FW_QP_RES_WR_IQDCAEN)
+#define F_FW_QP_RES_WR_IQDCAEN V_FW_QP_RES_WR_IQDCAEN(1U)
+
+#define S_FW_QP_RES_WR_IQDCACPU 6
+#define M_FW_QP_RES_WR_IQDCACPU 0x1f
+#define V_FW_QP_RES_WR_IQDCACPU(x) ((x) << S_FW_QP_RES_WR_IQDCACPU)
+#define G_FW_QP_RES_WR_IQDCACPU(x) \
+ (((x) >> S_FW_QP_RES_WR_IQDCACPU) & M_FW_QP_RES_WR_IQDCACPU)
+
+#define S_FW_QP_RES_WR_IQINTCNTTHRESH 4
+#define M_FW_QP_RES_WR_IQINTCNTTHRESH 0x3
+#define V_FW_QP_RES_WR_IQINTCNTTHRESH(x) \
+ ((x) << S_FW_QP_RES_WR_IQINTCNTTHRESH)
+#define G_FW_QP_RES_WR_IQINTCNTTHRESH(x) \
+ (((x) >> S_FW_QP_RES_WR_IQINTCNTTHRESH) & M_FW_QP_RES_WR_IQINTCNTTHRESH)
+
+#define S_FW_QP_RES_WR_IQO 3
+#define M_FW_QP_RES_WR_IQO 0x1
+#define V_FW_QP_RES_WR_IQO(x) ((x) << S_FW_QP_RES_WR_IQO)
+#define G_FW_QP_RES_WR_IQO(x) \
+ (((x) >> S_FW_QP_RES_WR_IQO) & M_FW_QP_RES_WR_IQO)
+#define F_FW_QP_RES_WR_IQO V_FW_QP_RES_WR_IQO(1U)
+
+#define S_FW_QP_RES_WR_IQCPRIO 2
+#define M_FW_QP_RES_WR_IQCPRIO 0x1
+#define V_FW_QP_RES_WR_IQCPRIO(x) ((x) << S_FW_QP_RES_WR_IQCPRIO)
+#define G_FW_QP_RES_WR_IQCPRIO(x) \
+ (((x) >> S_FW_QP_RES_WR_IQCPRIO) & M_FW_QP_RES_WR_IQCPRIO)
+#define F_FW_QP_RES_WR_IQCPRIO V_FW_QP_RES_WR_IQCPRIO(1U)
+
+#define S_FW_QP_RES_WR_IQESIZE 0
+#define M_FW_QP_RES_WR_IQESIZE 0x3
+#define V_FW_QP_RES_WR_IQESIZE(x) ((x) << S_FW_QP_RES_WR_IQESIZE)
+#define G_FW_QP_RES_WR_IQESIZE(x) \
+ (((x) >> S_FW_QP_RES_WR_IQESIZE) & M_FW_QP_RES_WR_IQESIZE)
+
+#define S_FW_QP_RES_WR_IQNS 31
+#define M_FW_QP_RES_WR_IQNS 0x1
+#define V_FW_QP_RES_WR_IQNS(x) ((x) << S_FW_QP_RES_WR_IQNS)
+#define G_FW_QP_RES_WR_IQNS(x) \
+ (((x) >> S_FW_QP_RES_WR_IQNS) & M_FW_QP_RES_WR_IQNS)
+#define F_FW_QP_RES_WR_IQNS V_FW_QP_RES_WR_IQNS(1U)
+
+#define S_FW_QP_RES_WR_IQRO 30
+#define M_FW_QP_RES_WR_IQRO 0x1
+#define V_FW_QP_RES_WR_IQRO(x) ((x) << S_FW_QP_RES_WR_IQRO)
+#define G_FW_QP_RES_WR_IQRO(x) \
+ (((x) >> S_FW_QP_RES_WR_IQRO) & M_FW_QP_RES_WR_IQRO)
+#define F_FW_QP_RES_WR_IQRO V_FW_QP_RES_WR_IQRO(1U)
+
+
struct fw_ri_res {
union fw_ri_restype {
struct fw_ri_res_sqrq {
@@ -1586,6 +1957,13 @@ struct fw_ri_res_wr {
#endif
};
+#define S_FW_RI_RES_WR_TRANSPORT_TYPE 16
+#define M_FW_RI_RES_WR_TRANSPORT_TYPE 0x7
+#define V_FW_RI_RES_WR_TRANSPORT_TYPE(x) \
+ ((x) << S_FW_RI_RES_WR_TRANSPORT_TYPE)
+#define G_FW_RI_RES_WR_TRANSPORT_TYPE(x) \
+ (((x) >> S_FW_RI_RES_WR_TRANSPORT_TYPE) & M_FW_RI_RES_WR_TRANSPORT_TYPE)
+
#define S_FW_RI_RES_WR_VFN 8
#define M_FW_RI_RES_WR_VFN 0xff
#define V_FW_RI_RES_WR_VFN(x) ((x) << S_FW_RI_RES_WR_VFN)
@@ -2092,8 +2470,18 @@ enum fw_ri_init_rqeqid_srq {
FW_RI_INIT_RQEQID_SRQ = 1 << 31,
};
+enum fw_nvmet_ulpsubmode {
+ FW_NVMET_ULPSUBMODE_HCRC = 0x1<<0,
+ FW_NVMET_ULPSUBMODE_DCRC = 0x1<<1,
+ FW_NVMET_ULPSUBMODE_ING_DIR = 0x1<<2,
+ FW_NVMET_ULPSUBMODE_SRQ_ENABLE = 0x1<<3,
+ FW_NVMET_ULPSUBMODE_PER_PDU_CMP = 0x1<<4,
+ FW_NVMET_ULPSUBMODE_PI_ENABLE = 0x1<<5,
+ FW_NVMET_ULPSUBMODE_USER_MODE = 0x1<<6,
+};
+
struct fw_ri_wr {
- __be32 op_compl;
+ __be32 op_compl; /* op_to_transport_type */
__be32 flowid_len16;
__u64 cookie;
union fw_ri {
@@ -2123,6 +2511,55 @@ struct fw_ri_wr {
struct fw_ri_send_wr send;
} u;
} init;
+ struct fw_ri_rocev2_init {
+ __u8 type;
+ __u8 r3[3];
+ __u8 rocev2_flags;
+ __u8 qp_caps;
+ __be16 nrqe;
+ __be32 pdid;
+ __be32 qpid;
+ __be32 sq_eqid;
+ __be32 rq_eqid;
+ __be32 scqid;
+ __be32 rcqid;
+ __be32 ord_max;
+ __be32 ird_max;
+ __be32 psn_pkd;
+ __be32 epsn_pkd;
+ __be32 hwrqsize;
+ __be32 hwrqaddr;
+ __be32 q_key;
+ __u8 pkthdrsize;
+ __u8 r;
+ __be16 p_key;
+ //struct cpl_tx_tnl_lso tnl_lso;
+ __u8 tnl_lso[48]; /* cpl_tx_tnl_lso + cpl_tx_pkt_xt */
+#ifndef C99_NOT_SUPPORTED
+ struct fw_ri_immd pkthdr[0];
+#endif
+ } rocev2_init;
+ struct fw_ri_nvmet_init {
+ __u8 type;
+ __u8 r3[3];
+ __u8 nvmt_flags;
+ __u8 qp_caps;
+ __be16 nrqe;
+ __be32 pdid;
+ __be32 qpid;
+ __be32 sq_eqid;
+ __be32 rq_eqid;
+ __be32 scqid;
+ __be32 rcqid;
+ __be32 r4[4];
+ __be32 hwrqsize;
+ __be32 hwrqaddr;
+ __u8 ulpsubmode;
+ __u8 nvmt_pda_cmp_imm_sz;
+ __be16 r7;
+ __be32 tpt_offset_t10_config;
+ __be32 r8[2];
+ } nvmet_init;
struct fw_ri_fini {
__u8 type;
__u8 r3[7];
@@ -2137,6 +2574,12 @@ struct fw_ri_wr {
} u;
};
+#define S_FW_RI_WR_TRANSPORT_TYPE 16
+#define M_FW_RI_WR_TRANSPORT_TYPE 0x7
+#define V_FW_RI_WR_TRANSPORT_TYPE(x) ((x) << S_FW_RI_WR_TRANSPORT_TYPE)
+#define G_FW_RI_WR_TRANSPORT_TYPE(x) \
+ (((x) >> S_FW_RI_WR_TRANSPORT_TYPE) & M_FW_RI_WR_TRANSPORT_TYPE)
+
#define S_FW_RI_WR_MPAREQBIT 7
#define M_FW_RI_WR_MPAREQBIT 0x1
#define V_FW_RI_WR_MPAREQBIT(x) ((x) << S_FW_RI_WR_MPAREQBIT)
@@ -2157,6 +2600,414 @@ struct fw_ri_wr {
#define G_FW_RI_WR_P2PTYPE(x) \
(((x) >> S_FW_RI_WR_P2PTYPE) & M_FW_RI_WR_P2PTYPE)
+#define S_FW_RI_WR_PSN 0
+#define M_FW_RI_WR_PSN 0xffffff
+#define V_FW_RI_WR_PSN(x) ((x) << S_FW_RI_WR_PSN)
+#define G_FW_RI_WR_PSN(x) (((x) >> S_FW_RI_WR_PSN) & M_FW_RI_WR_PSN)
+
+#define S_FW_RI_WR_EPSN 0
+#define M_FW_RI_WR_EPSN 0xffffff
+#define V_FW_RI_WR_EPSN(x) ((x) << S_FW_RI_WR_EPSN)
+#define G_FW_RI_WR_EPSN(x) (((x) >> S_FW_RI_WR_EPSN) & M_FW_RI_WR_EPSN)
+
+#define S_FW_RI_WR_NVMT_PDA 3
+#define M_FW_RI_WR_NVMT_PDA 0x1f
+#define V_FW_RI_WR_NVMT_PDA(x) ((x) << S_FW_RI_WR_NVMT_PDA)
+#define G_FW_RI_WR_NVMT_PDA(x) \
+ (((x) >> S_FW_RI_WR_NVMT_PDA) & M_FW_RI_WR_NVMT_PDA)
+
+#define S_FW_RI_WR_CMP_IMM_SZ 1
+#define M_FW_RI_WR_CMP_IMM_SZ 0x3
+#define V_FW_RI_WR_CMP_IMM_SZ(x) ((x) << S_FW_RI_WR_CMP_IMM_SZ)
+#define G_FW_RI_WR_CMP_IMM_SZ(x) \
+ (((x) >> S_FW_RI_WR_CMP_IMM_SZ) & M_FW_RI_WR_CMP_IMM_SZ)
+
+#define S_FW_RI_WR_TPT_OFFSET 10
+#define M_FW_RI_WR_TPT_OFFSET 0x3fffff
+#define V_FW_RI_WR_TPT_OFFSET(x) ((x) << S_FW_RI_WR_TPT_OFFSET)
+#define G_FW_RI_WR_TPT_OFFSET(x) \
+ (((x) >> S_FW_RI_WR_TPT_OFFSET) & M_FW_RI_WR_TPT_OFFSET)
+
+#define S_FW_RI_WR_T10_CONFIG 0
+#define M_FW_RI_WR_T10_CONFIG 0x3ff
+#define V_FW_RI_WR_T10_CONFIG(x) ((x) << S_FW_RI_WR_T10_CONFIG)
+#define G_FW_RI_WR_T10_CONFIG(x) \
+ (((x) >> S_FW_RI_WR_T10_CONFIG) & M_FW_RI_WR_T10_CONFIG)
+
+
+/******************************************************************************
+ * R o C E V 2 W O R K R E Q U E S T s
+ **************************************/
+enum fw_rocev2_wr_opcode {
+ /* RC */
+ FW_ROCEV2_RC_SEND_FIRST = 0x00,
+ FW_ROCEV2_RC_SEND_MIDDLE = 0x01,
+ FW_ROCEV2_RC_SEND_LAST = 0x02,
+ FW_ROCEV2_RC_SEND_LAST_WITH_IMMD = 0x03,
+ FW_ROCEV2_RC_SEND_ONLY = 0x04,
+ FW_ROCEV2_RC_SEND_ONLY_WITH_IMMD = 0x05,
+ FW_ROCEV2_RC_RDMA_WRITE_FIRST = 0x06,
+ FW_ROCEV2_RC_RDMA_WRITE_MIDDLE = 0x07,
+ FW_ROCEV2_RC_RDMA_WRITE_LAST = 0x08,
+ FW_ROCEV2_RC_RDMA_WRITE_LAST_WITH_IMMD = 0x09,
+ FW_ROCEV2_RC_RDMA_WRITE_ONLY = 0x0a,
+ FW_ROCEV2_RC_RDMA_WRITE_ONLY_WITH_IMMD = 0x0b,
+ FW_ROCEV2_RC_RDMA_READ_REQ = 0x0c,
+ FW_ROCEV2_RC_RDMA_READ_RESP_FIRST = 0x0d,
+ FW_ROCEV2_RC_RDMA_READ_RESP_MIDDLE = 0x0e,
+ FW_ROCEV2_RC_RDMA_READ_RESP_LAST = 0x0f,
+ FW_ROCEV2_RC_RDMA_READ_RESP_ONLY = 0x10,
+ FW_ROCEV2_RC_ACK = 0x11,
+ FW_ROCEV2_RC_ATOMIC_ACK = 0x12,
+ FW_ROCEV2_RC_CMP_SWAP = 0x13,
+ FW_ROCEV2_RC_FETCH_ADD = 0x14,
+ FW_ROCEV2_RC_SEND_LAST_WITH_INV = 0x16,
+ FW_ROCEV2_RC_SEND_ONLY_WITH_INV = 0x17,
+
+ /* XRC */
+ FW_ROCEV2_XRC_SEND_FIRST = 0xa0,
+ FW_ROCEV2_XRC_SEND_MIDDLE = 0xa1,
+ FW_ROCEV2_XRC_SEND_LAST = 0xa2,
+ FW_ROCEV2_XRC_SEND_LAST_WITH_IMMD = 0xa3,
+ FW_ROCEV2_XRC_SEND_ONLY = 0xa4,
+ FW_ROCEV2_XRC_SEND_ONLY_WITH_IMMD = 0xa5,
+ FW_ROCEV2_XRC_RDMA_WRITE_FIRST = 0xa6,
+ FW_ROCEV2_XRC_RDMA_WRITE_MIDDLE = 0xa7,
+ FW_ROCEV2_XRC_RDMA_WRITE_LAST = 0xa8,
+ FW_ROCEV2_XRC_RDMA_WRITE_LAST_WITH_IMMD = 0xa9,
+ FW_ROCEV2_XRC_RDMA_WRITE_ONLY = 0xaa,
+ FW_ROCEV2_XRC_RDMA_WRITE_ONLY_WITH_IMMD = 0xab,
+ FW_ROCEV2_XRC_RDMA_READ_REQ = 0xac,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_FIRST = 0xad,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_MIDDLE = 0xae,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_LAST = 0xaf,
+ FW_ROCEV2_XRC_RDMA_READ_RESP_ONLY = 0xb0,
+ FW_ROCEV2_XRC_ACK = 0xb1,
+ FW_ROCEV2_XRC_ATOMIC_ACK = 0xb2,
+ FW_ROCEV2_XRC_CMP_SWAP = 0xb3,
+ FW_ROCEV2_XRC_FETCH_ADD = 0xb4,
+ FW_ROCEV2_XRC_SEND_LAST_WITH_INV = 0xb6,
+ FW_ROCEV2_XRC_SEND_ONLY_WITH_INV = 0xb7,
+};
+
+#if 0
+enum fw_rocev2_cqe_err {
+ /* TODO */
+};
+#endif
+
+struct fw_ri_v2_rdma_write_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 psn_pkd;
+ __be32 r4[2];
+ __be32 r5;
+ __be32 immd_data;
+ __be64 to_sink;
+ __be32 stag_sink;
+ __be32 plen;
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_RI_V2_RDMA_WRITE_WR_PSN 0
+#define M_FW_RI_V2_RDMA_WRITE_WR_PSN 0xffffff
+#define V_FW_RI_V2_RDMA_WRITE_WR_PSN(x) ((x) << S_FW_RI_V2_RDMA_WRITE_WR_PSN)
+#define G_FW_RI_V2_RDMA_WRITE_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_RDMA_WRITE_WR_PSN) & M_FW_RI_V2_RDMA_WRITE_WR_PSN)
+
+struct fw_ri_v2_send_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 stag_inv;
+ __be32 plen;
+ __be32 sendop_psn;
+ __u8 immdlen;
+ __u8 r3[3];
+ __be32 r4;
+ /* CPL_TX_TNL_LSO, CPL_TX_PKT_XT and Eth/IP/UDP/BTH
+ * headers in UD QP case, align size to 16B */
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_immd immd_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_RI_V2_SEND_WR_SENDOP 24
+#define M_FW_RI_V2_SEND_WR_SENDOP 0xff
+#define V_FW_RI_V2_SEND_WR_SENDOP(x) ((x) << S_FW_RI_V2_SEND_WR_SENDOP)
+#define G_FW_RI_V2_SEND_WR_SENDOP(x) \
+ (((x) >> S_FW_RI_V2_SEND_WR_SENDOP) & M_FW_RI_V2_SEND_WR_SENDOP)
+
+#define S_FW_RI_V2_SEND_WR_PSN 0
+#define M_FW_RI_V2_SEND_WR_PSN 0xffffff
+#define V_FW_RI_V2_SEND_WR_PSN(x) ((x) << S_FW_RI_V2_SEND_WR_PSN)
+#define G_FW_RI_V2_SEND_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_SEND_WR_PSN) & M_FW_RI_V2_SEND_WR_PSN)
+
+struct fw_ri_v2_rdma_read_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 psn_pkd;
+ __be64 to_src;
+ __be32 stag_src;
+ __be32 plen;
+ struct fw_ri_isgl isgl_sink; /* RRQ, max 4 nsge in rocev2, 1 in iwarp */
+};
+
+#define S_FW_RI_V2_RDMA_READ_WR_PSN 0
+#define M_FW_RI_V2_RDMA_READ_WR_PSN 0xffffff
+#define V_FW_RI_V2_RDMA_READ_WR_PSN(x) ((x) << S_FW_RI_V2_RDMA_READ_WR_PSN)
+#define G_FW_RI_V2_RDMA_READ_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_RDMA_READ_WR_PSN) & M_FW_RI_V2_RDMA_READ_WR_PSN)
+
+struct fw_ri_v2_atomic_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2; /* set to 0 */
+ __be32 atomicop_psn;
+};
+
+#define S_FW_RI_V2_ATOMIC_WR_ATOMICOP 28
+#define M_FW_RI_V2_ATOMIC_WR_ATOMICOP 0xf
+#define V_FW_RI_V2_ATOMIC_WR_ATOMICOP(x) \
+ ((x) << S_FW_RI_V2_ATOMIC_WR_ATOMICOP)
+#define G_FW_RI_V2_ATOMIC_WR_ATOMICOP(x) \
+ (((x) >> S_FW_RI_V2_ATOMIC_WR_ATOMICOP) & M_FW_RI_V2_ATOMIC_WR_ATOMICOP)
+
+#define S_FW_RI_V2_ATOMIC_WR_PSN 0
+#define M_FW_RI_V2_ATOMIC_WR_PSN 0xffffff
+#define V_FW_RI_V2_ATOMIC_WR_PSN(x) ((x) << S_FW_RI_V2_ATOMIC_WR_PSN)
+#define G_FW_RI_V2_ATOMIC_WR_PSN(x) \
+ (((x) >> S_FW_RI_V2_ATOMIC_WR_PSN) & M_FW_RI_V2_ATOMIC_WR_PSN)
+
+struct fw_ri_v2_bind_mw_wr {
+ __u8 opcode;
+ __u8 flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2;
+ __be32 r5;
+ __be32 r6[2];
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag_mr;
+ __be32 stag_mw;
+ __be32 r3;
+ __be64 len_mw;
+ __be64 va_fbo;
+ __be64 r4;
+};
+
+
+#define S_FW_RI_V2_BIND_MW_WR_QPBINDE 6
+#define M_FW_RI_V2_BIND_MW_WR_QPBINDE 0x1
+#define V_FW_RI_V2_BIND_MW_WR_QPBINDE(x) \
+ ((x) << S_FW_RI_V2_BIND_MW_WR_QPBINDE)
+#define G_FW_RI_V2_BIND_MW_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_V2_BIND_MW_WR_QPBINDE) & M_FW_RI_V2_BIND_MW_WR_QPBINDE)
+#define F_FW_RI_V2_BIND_MW_WR_QPBINDE V_FW_RI_V2_BIND_MW_WR_QPBINDE(1U)
+
+#define S_FW_RI_V2_BIND_MW_WR_NS 5
+#define M_FW_RI_V2_BIND_MW_WR_NS 0x1
+#define V_FW_RI_V2_BIND_MW_WR_NS(x) ((x) << S_FW_RI_V2_BIND_MW_WR_NS)
+#define G_FW_RI_V2_BIND_MW_WR_NS(x) \
+ (((x) >> S_FW_RI_V2_BIND_MW_WR_NS) & M_FW_RI_V2_BIND_MW_WR_NS)
+#define F_FW_RI_V2_BIND_MW_WR_NS V_FW_RI_V2_BIND_MW_WR_NS(1U)
+
+#define S_FW_RI_V2_BIND_MW_WR_DCACPU 0
+#define M_FW_RI_V2_BIND_MW_WR_DCACPU 0x1f
+#define V_FW_RI_V2_BIND_MW_WR_DCACPU(x) ((x) << S_FW_RI_V2_BIND_MW_WR_DCACPU)
+#define G_FW_RI_V2_BIND_MW_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_V2_BIND_MW_WR_DCACPU) & M_FW_RI_V2_BIND_MW_WR_DCACPU)
+
+struct fw_ri_v2_fr_nsmr_wr {
+ __u8 opcode;
+ __u8 v2_flags;
+ __u16 wrid;
+ __u8 r1[3];
+ __u8 len16;
+ __be32 r2;
+ __be32 r3;
+ __be32 r4[2];
+ __u8 qpbinde_to_dcacpu;
+ __u8 pgsz_shift;
+ __u8 addr_type;
+ __u8 mem_perms;
+ __be32 stag;
+ __be32 len_hi;
+ __be32 len_lo;
+ __be32 va_hi;
+ __be32 va_lo_fbo;
+};
+
+#define S_FW_RI_V2_FR_NSMR_WR_QPBINDE 6
+#define M_FW_RI_V2_FR_NSMR_WR_QPBINDE 0x1
+#define V_FW_RI_V2_FR_NSMR_WR_QPBINDE(x) \
+ ((x) << S_FW_RI_V2_FR_NSMR_WR_QPBINDE)
+#define G_FW_RI_V2_FR_NSMR_WR_QPBINDE(x) \
+ (((x) >> S_FW_RI_V2_FR_NSMR_WR_QPBINDE) & M_FW_RI_V2_FR_NSMR_WR_QPBINDE)
+#define F_FW_RI_V2_FR_NSMR_WR_QPBINDE V_FW_RI_V2_FR_NSMR_WR_QPBINDE(1U)
+
+#define S_FW_RI_V2_FR_NSMR_WR_NS 5
+#define M_FW_RI_V2_FR_NSMR_WR_NS 0x1
+#define V_FW_RI_V2_FR_NSMR_WR_NS(x) ((x) << S_FW_RI_V2_FR_NSMR_WR_NS)
+#define G_FW_RI_V2_FR_NSMR_WR_NS(x) \
+ (((x) >> S_FW_RI_V2_FR_NSMR_WR_NS) & M_FW_RI_V2_FR_NSMR_WR_NS)
+#define F_FW_RI_V2_FR_NSMR_WR_NS V_FW_RI_V2_FR_NSMR_WR_NS(1U)
+
+#define S_FW_RI_V2_FR_NSMR_WR_DCACPU 0
+#define M_FW_RI_V2_FR_NSMR_WR_DCACPU 0x1f
+#define V_FW_RI_V2_FR_NSMR_WR_DCACPU(x) ((x) << S_FW_RI_V2_FR_NSMR_WR_DCACPU)
+#define G_FW_RI_V2_FR_NSMR_WR_DCACPU(x) \
+ (((x) >> S_FW_RI_V2_FR_NSMR_WR_DCACPU) & M_FW_RI_V2_FR_NSMR_WR_DCACPU)
+
+/******************************************************************************
+ * N V M E - T C P W O R K R E Q U E S T s
+ *****************************************************************************/
+
+struct fw_nvmet_v2_fr_nsmr_wr {
+ __be32 op_to_wrid;
+ __be32 flowid_len16;
+ __be32 r3;
+ __be32 r4;
+ __be32 mem_write_addr32;
+ __u8 r5;
+ __u8 imm_data_len32;
+ union {
+ __be16 dsgl_data_len32;
+ __be16 reset_mem_len32;
+ };
+ __be64 r6;
+};
+
+#define S_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL 23
+#define M_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL 0x1
+#define V_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL(x) \
+ ((x) << S_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL)
+#define G_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL(x) \
+ (((x) >> S_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL) & \
+ M_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL)
+#define F_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL \
+ V_FW_NVMET_V2_FR_NSMR_WR_TPTE_PBL(1U)
+
+#define S_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM 22
+#define M_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM 0x1
+#define V_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM(x) \
+ ((x) << S_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM)
+#define G_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM(x) \
+ (((x) >> S_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM) & \
+ M_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM)
+#define F_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM \
+ V_FW_NVMET_V2_FR_NSMR_WR_RESET_MEM(1U)
+
+#define S_FW_NVMET_V2_FR_NSMR_WR_WRID 0
+#define M_FW_NVMET_V2_FR_NSMR_WR_WRID 0xffff
+#define V_FW_NVMET_V2_FR_NSMR_WR_WRID(x) \
+ ((x) << S_FW_NVMET_V2_FR_NSMR_WR_WRID)
+#define G_FW_NVMET_V2_FR_NSMR_WR_WRID(x) \
+ (((x) >> S_FW_NVMET_V2_FR_NSMR_WR_WRID) & M_FW_NVMET_V2_FR_NSMR_WR_WRID)
+
+struct fw_v2_nvmet_tx_data_wr {
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 r4;
+ __be16 r5;
+ __be16 wrid;
+ __be32 r6;
+ __be32 seqno;
+ __be32 plen;
+ __be32 flags_hi_to_flags_lo;
+ /* optional immdlen data (fw_tx_pi_hdr, iso cpl, nvmet header etc) */
+#ifndef C99_NOT_SUPPORTED
+ union {
+ struct fw_ri_dsgl dsgl_src[0];
+ struct fw_ri_isgl isgl_src[0];
+ } u;
+#endif
+};
+
+#define S_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI 10
+#define M_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI 0x3fffff
+#define V_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI)
+#define G_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI) & \
+ M_FW_V2_NVMET_TX_DATA_WR_FLAGS_HI)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO 9
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_ISO(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI 8
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_PI(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC 7
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_DCRC(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC 6
+#define M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC 0x1
+#define V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC)
+#define G_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC) & \
+ M_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC)
+#define F_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC \
+ V_FW_V2_NVMET_TX_DATA_WR_ULPSUBMODE_HCRC(1U)
+
+#define S_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO 0
+#define M_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO 0x3f
+#define V_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO(x) \
+ ((x) << S_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO)
+#define G_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO(x) \
+ (((x) >> S_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO) & \
+ M_FW_V2_NVMET_TX_DATA_WR_FLAGS_LO)
+
+
/******************************************************************************
* F O i S C S I W O R K R E Q U E S T s
*********************************************/
@@ -3827,17 +4678,17 @@ struct fw_pi_error {
(((x) >> S_FW_PI_ERROR_ERR_TYPE) & M_FW_PI_ERROR_ERR_TYPE)
struct fw_tlstx_data_wr {
- __be32 op_to_immdlen;
- __be32 flowid_len16;
- __be32 plen;
- __be32 lsodisable_to_flags;
- __be32 r5;
- __be32 ctxloc_to_exp;
- __be16 mfs;
- __be16 adjustedplen_pkd;
- __be16 expinplenmax_pkd;
- __u8 pdusinplenmax_pkd;
- __u8 r10;
+ __be32 op_to_immdlen;
+ __be32 flowid_len16;
+ __be32 plen;
+ __be32 lsodisable_to_flags;
+ __be32 r5;
+ __be32 ctxloc_to_exp;
+ __be16 mfs;
+ __be16 adjustedplen_pkd;
+ __be16 expinplenmax_pkd;
+ __u8 pdusinplenmax_pkd;
+ __u8 r10;
};
#define S_FW_TLSTX_DATA_WR_OPCODE 24
@@ -4092,6 +4943,265 @@ struct fw_tls_tunnel_ofld_wr {
__be32 r4;
};
+struct fw_crypto_update_sa_wr {
+ __u8 opcode;
+ __u8 saop_to_txrx;
+ __u8 vfn;
+ __u8 r1;
+ __u8 r2[3];
+ __u8 len16;
+ __be64 cookie;
+ __be16 r3;
+ __be16 ipsecidx;
+ __be32 SPI;
+ __be64 dip_hi;
+ __be64 dip_lo;
+ __be64 lip_hi;
+ __be64 lip_lo;
+ union fw_crypto_update_sa_sa {
+ struct egress_sa {
+ __be32 valid_SPI_hi;
+ __be32 SPI_lo_eSeqNum_hi;
+ __be32 eSeqNum_lo_Salt_hi;
+ __be32 Salt_lo_to_keyID;
+ } egress;
+ struct ingress_sa {
+ __be32 valid_to_iSeqNum_hi;
+ __be32 iSeqNum_mi;
+ __be32 iSeqNum_lo_Salt_hi;
+ __be32 Salt_lo_to_IPVer;
+ } ingress;
+ } sa;
+ union fw_crypto_update_sa_key {
+ struct _aes128 {
+ __u8 key128[16];
+ __u8 H128[16];
+ __u8 rsvd[16];
+ } aes128;
+ struct _aes192 {
+ __u8 key192[24];
+ __be64 r3;
+ __u8 H192[16];
+ } aes192;
+ struct _aes256 {
+ __u8 key256[32];
+ __u8 H256[16];
+ } aes256;
+ } key;
+};
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SAOP 2
+#define M_FW_CRYPTO_UPDATE_SA_WR_SAOP 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_SAOP(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SAOP)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SAOP(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SAOP) & M_FW_CRYPTO_UPDATE_SA_WR_SAOP)
+#define F_FW_CRYPTO_UPDATE_SA_WR_SAOP V_FW_CRYPTO_UPDATE_SA_WR_SAOP(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_MODE 1
+#define M_FW_CRYPTO_UPDATE_SA_WR_MODE 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define G_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_MODE) & M_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define F_FW_CRYPTO_UPDATE_SA_WR_MODE V_FW_CRYPTO_UPDATE_SA_WR_MODE(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_TXRX 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_TXRX 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_TXRX(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_TXRX)
+#define G_FW_CRYPTO_UPDATE_SA_WR_TXRX(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_TXRX) & M_FW_CRYPTO_UPDATE_SA_WR_TXRX)
+#define F_FW_CRYPTO_UPDATE_SA_WR_TXRX V_FW_CRYPTO_UPDATE_SA_WR_TXRX(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_VALID 31
+#define M_FW_CRYPTO_UPDATE_SA_WR_VALID 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_VALID) & M_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define F_FW_CRYPTO_UPDATE_SA_WR_VALID V_FW_CRYPTO_UPDATE_SA_WR_VALID(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SPI_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_SPI_HI 0x7fffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_SPI_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SPI_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SPI_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SPI_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SPI_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SPI_LO 31
+#define M_FW_CRYPTO_UPDATE_SA_WR_SPI_LO 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_SPI_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SPI_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SPI_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SPI_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SPI_LO)
+#define F_FW_CRYPTO_UPDATE_SA_WR_SPI_LO V_FW_CRYPTO_UPDATE_SA_WR_SPI_LO(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI 0x7fffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESEQNUM_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0x7f
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 5
+#define M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 0x3
+#define V_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE 4
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE)
+#define F_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE \
+ V_FW_CRYPTO_UPDATE_SA_WR_ESN_ENABLE(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_KEYID 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_KEYID 0xf
+#define V_FW_CRYPTO_UPDATE_SA_WR_KEYID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_KEYID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_KEYID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_KEYID) & M_FW_CRYPTO_UPDATE_SA_WR_KEYID)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_VALID 31
+#define M_FW_CRYPTO_UPDATE_SA_WR_VALID 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_VALID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_VALID) & M_FW_CRYPTO_UPDATE_SA_WR_VALID)
+#define F_FW_CRYPTO_UPDATE_SA_WR_VALID V_FW_CRYPTO_UPDATE_SA_WR_VALID(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_EGKEYID 12
+#define M_FW_CRYPTO_UPDATE_SA_WR_EGKEYID 0xfff
+#define V_FW_CRYPTO_UPDATE_SA_WR_EGKEYID(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_EGKEYID)
+#define G_FW_CRYPTO_UPDATE_SA_WR_EGKEYID(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_EGKEYID) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_EGKEYID)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN 11
+#define M_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN)
+#define F_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN \
+ V_FW_CRYPTO_UPDATE_SA_WR_PADCHKEN(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW 0xf
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ESNWINDOW)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI 0x7f
+#define V_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ISEQNUM_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI 0x7f
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_HI(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_HI) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_HI)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 7
+#define M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO 0x1ffffff
+#define V_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+#define G_FW_CRYPTO_UPDATE_SA_WR_SALT_LO(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_SALT_LO) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_SALT_LO)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 5
+#define M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN 0x3
+#define V_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_KEYLEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_KEYLEN) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_KEYLEN)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH 3
+#define M_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH 0x3
+#define V_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH) & \
+ M_FW_CRYPTO_UPDATE_SA_WR_ICVWIDTH)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_ESNEN 2
+#define M_FW_CRYPTO_UPDATE_SA_WR_ESNEN 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_ESNEN(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_ESNEN)
+#define G_FW_CRYPTO_UPDATE_SA_WR_ESNEN(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_ESNEN) & M_FW_CRYPTO_UPDATE_SA_WR_ESNEN)
+#define F_FW_CRYPTO_UPDATE_SA_WR_ESNEN V_FW_CRYPTO_UPDATE_SA_WR_ESNEN(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_MODE 1
+#define M_FW_CRYPTO_UPDATE_SA_WR_MODE 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define G_FW_CRYPTO_UPDATE_SA_WR_MODE(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_MODE) & M_FW_CRYPTO_UPDATE_SA_WR_MODE)
+#define F_FW_CRYPTO_UPDATE_SA_WR_MODE V_FW_CRYPTO_UPDATE_SA_WR_MODE(1U)
+
+#define S_FW_CRYPTO_UPDATE_SA_WR_IPVER 0
+#define M_FW_CRYPTO_UPDATE_SA_WR_IPVER 0x1
+#define V_FW_CRYPTO_UPDATE_SA_WR_IPVER(x) \
+ ((x) << S_FW_CRYPTO_UPDATE_SA_WR_IPVER)
+#define G_FW_CRYPTO_UPDATE_SA_WR_IPVER(x) \
+ (((x) >> S_FW_CRYPTO_UPDATE_SA_WR_IPVER) & M_FW_CRYPTO_UPDATE_SA_WR_IPVER)
+#define F_FW_CRYPTO_UPDATE_SA_WR_IPVER V_FW_CRYPTO_UPDATE_SA_WR_IPVER(1U)
+
/******************************************************************************
* C O M M A N D s
*********************/
@@ -4157,11 +5267,12 @@ enum fw_cmd_opcodes {
FW_FCOE_SPARAMS_CMD = 0x35,
FW_FCOE_STATS_CMD = 0x37,
FW_FCOE_FCF_CMD = 0x38,
- FW_DCB_IEEE_CMD = 0x3a,
- FW_DIAG_CMD = 0x3d,
+ FW_DCB_IEEE_CMD = 0x3a,
+ FW_DIAG_CMD = 0x3d,
FW_PTP_CMD = 0x3e,
FW_HMA_CMD = 0x3f,
- FW_LASTC2E_CMD = 0x40,
+ FW_JBOF_WIN_REG_CMD = 0x40,
+ FW_LASTC2E_CMD = 0x41,
FW_ERROR_CMD = 0x80,
FW_DEBUG_CMD = 0x81,
};
@@ -4246,7 +5357,7 @@ enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_FUNC = 0x0028,
FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
FW_LDST_ADDRSPC_FUNC_I2C = 0x002A, /* legacy */
- FW_LDST_ADDRSPC_LE = 0x0030,
+ FW_LDST_ADDRSPC_LE = 0x0030,
FW_LDST_ADDRSPC_I2C = 0x0038,
FW_LDST_ADDRSPC_PCIE_CFGS = 0x0040,
FW_LDST_ADDRSPC_PCIE_DBG = 0x0041,
@@ -4665,11 +5776,17 @@ enum fw_caps_config_nic {
enum fw_caps_config_toe {
FW_CAPS_CONFIG_TOE = 0x00000001,
+ FW_CAPS_CONFIG_TOE_SENDPATH = 0x00000002,
};
enum fw_caps_config_rdma {
FW_CAPS_CONFIG_RDMA_RDDP = 0x00000001,
FW_CAPS_CONFIG_RDMA_RDMAC = 0x00000002,
+ FW_CAPS_CONFIG_RDMA_ROCEV2 = 0x00000004,
+};
+
+enum fw_caps_config_nvme {
+ FW_CAPS_CONFIG_NVME_TCP = 0x00000001,
};
enum fw_caps_config_iscsi {
@@ -4687,8 +5804,9 @@ enum fw_caps_config_iscsi {
enum fw_caps_config_crypto {
FW_CAPS_CONFIG_CRYPTO_LOOKASIDE = 0x00000001,
FW_CAPS_CONFIG_TLSKEYS = 0x00000002,
- FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004,
+ FW_CAPS_CONFIG_IPSEC_INLINE = 0x00000004, /* NIC over ipsecofld */
FW_CAPS_CONFIG_TLS_HW = 0x00000008,
+ FW_CAPS_CONFIG_OFLD_OVER_IPSEC_INLINE = 0x00000010,/* ofld over ipsecofld */
};
enum fw_caps_config_fcoe {
@@ -4716,7 +5834,7 @@ struct fw_caps_config_cmd {
__be16 nbmcaps;
__be16 linkcaps;
__be16 switchcaps;
- __be16 r3;
+ __be16 nvmecaps;
__be16 niccaps;
__be16 toecaps;
__be16 rdmacaps;
@@ -4840,6 +5958,8 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_DEV_512SGL_MR = 0x30,
FW_PARAMS_PARAM_DEV_KTLS_HW = 0x31,
FW_PARAMS_PARAM_DEV_VI_ENABLE_INGRESS_AFTER_LINKUP = 0x32,
+ FW_PARAMS_PARAM_DEV_TID_QID_SEL_MASK = 0x33,
+ FW_PARAMS_PARAM_DEV_TX_TPCHMAP = 0x3A,
};
/*
@@ -4911,6 +6031,8 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A,
FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B,
FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C,
+ /* no separate STAG/PBL START/END for nvmet.
+ * use same rdma stag/pbl memory range */
FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D,
FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E,
FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F,
@@ -4943,7 +6065,7 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_HPFILTER_START = 0x32,
FW_PARAMS_PARAM_PFVF_HPFILTER_END = 0x33,
FW_PARAMS_PARAM_PFVF_TLS_START = 0x34,
- FW_PARAMS_PARAM_PFVF_TLS_END = 0x35,
+ FW_PARAMS_PARAM_PFVF_TLS_END = 0x35,
FW_PARAMS_PARAM_PFVF_RAWF_START = 0x36,
FW_PARAMS_PARAM_PFVF_RAWF_END = 0x37,
FW_PARAMS_PARAM_PFVF_RSSKEYINFO = 0x38,
@@ -4955,6 +6077,13 @@ enum fw_params_param_pfvf {
FW_PARAMS_PARAM_PFVF_GET_SMT_START = 0x3E,
FW_PARAMS_PARAM_PFVF_GET_SMT_SIZE = 0x3F,
FW_PARAMS_PARAM_PFVF_LINK_STATE = 0x40,
+ FW_PARAMS_PARAM_PFVF_RRQ_START = 0x41,
+ FW_PARAMS_PARAM_PFVF_RRQ_END = 0x42,
+ FW_PARAMS_PARAM_PFVF_PKTHDR_START = 0x43,
+ FW_PARAMS_PARAM_PFVF_PKTHDR_END = 0x44,
+ FW_PARAMS_PARAM_PFVF_NIPSEC_TUNNEL = 0x45,
+ FW_PARAMS_PARAM_PFVF_NIPSEC_TRANSPORT = 0x46,
+ FW_PARAMS_PARAM_PFVF_OFLD_NIPSEC_TUNNEL = 0x47,
};
/*
@@ -4984,6 +6113,19 @@ enum fw_params_param_dmaq {
FW_PARAMS_PARAM_DMAQ_FLM_DCA = 0x30
};
+#define S_T7_DMAQ_CONM_CTXT_CNGTPMODE 0
+#define M_T7_DMAQ_CONM_CTXT_CNGTPMODE 0x3
+#define V_T7_DMAQ_CONM_CTXT_CNGTPMODE(x) ((x) << S_T7_DMAQ_CONM_CTXT_CNGTPMODE)
+#define G_T7_DMAQ_CONM_CTXT_CNGTPMODE(x) \
+ (((x) >> S_T7_DMAQ_CONM_CTXT_CNGTPMODE) & M_T7_DMAQ_CONM_CTXT_CNGTPMODE)
+
+#define S_T7_DMAQ_CONM_CTXT_CH_VEC 2
+#define M_T7_DMAQ_CONM_CTXT_CH_VEC 0xf
+#define V_T7_DMAQ_CONM_CTXT_CH_VEC(x) ((x) << S_T7_DMAQ_CONM_CTXT_CH_VEC)
+#define G_T7_DMAQ_CONM_CTXT_CH_VEC(x) \
+ (((x) >> S_T7_DMAQ_CONM_CTXT_CH_VEC) & M_T7_DMAQ_CONM_CTXT_CH_VEC)
+
+
/*
* chnet parameters
*/
@@ -5199,7 +6341,8 @@ struct fw_pfvf_cmd {
enum fw_iq_type {
FW_IQ_TYPE_FL_INT_CAP,
FW_IQ_TYPE_NO_FL_INT_CAP,
- FW_IQ_TYPE_VF_CQ
+ FW_IQ_TYPE_VF_CQ,
+ FW_IQ_TYPE_CQ,
};
enum fw_iq_iqtype {
@@ -5787,6 +6930,12 @@ struct fw_eq_mngt_cmd {
(((x) >> S_FW_EQ_MNGT_CMD_EQSTOP) & M_FW_EQ_MNGT_CMD_EQSTOP)
#define F_FW_EQ_MNGT_CMD_EQSTOP V_FW_EQ_MNGT_CMD_EQSTOP(1U)
+#define S_FW_EQ_MNGT_CMD_COREGROUP 16
+#define M_FW_EQ_MNGT_CMD_COREGROUP 0x3f
+#define V_FW_EQ_MNGT_CMD_COREGROUP(x) ((x) << S_FW_EQ_MNGT_CMD_COREGROUP)
+#define G_FW_EQ_MNGT_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_MNGT_CMD_COREGROUP) & M_FW_EQ_MNGT_CMD_COREGROUP)
+
#define S_FW_EQ_MNGT_CMD_CMPLIQID 20
#define M_FW_EQ_MNGT_CMD_CMPLIQID 0xfff
#define V_FW_EQ_MNGT_CMD_CMPLIQID(x) ((x) << S_FW_EQ_MNGT_CMD_CMPLIQID)
@@ -5977,6 +7126,12 @@ struct fw_eq_eth_cmd {
(((x) >> S_FW_EQ_ETH_CMD_EQSTOP) & M_FW_EQ_ETH_CMD_EQSTOP)
#define F_FW_EQ_ETH_CMD_EQSTOP V_FW_EQ_ETH_CMD_EQSTOP(1U)
+#define S_FW_EQ_ETH_CMD_COREGROUP 16
+#define M_FW_EQ_ETH_CMD_COREGROUP 0x3f
+#define V_FW_EQ_ETH_CMD_COREGROUP(x) ((x) << S_FW_EQ_ETH_CMD_COREGROUP)
+#define G_FW_EQ_ETH_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_ETH_CMD_COREGROUP) & M_FW_EQ_ETH_CMD_COREGROUP)
+
#define S_FW_EQ_ETH_CMD_EQID 0
#define M_FW_EQ_ETH_CMD_EQID 0xfffff
#define V_FW_EQ_ETH_CMD_EQID(x) ((x) << S_FW_EQ_ETH_CMD_EQID)
@@ -6190,6 +7345,12 @@ struct fw_eq_ctrl_cmd {
(((x) >> S_FW_EQ_CTRL_CMD_EQSTOP) & M_FW_EQ_CTRL_CMD_EQSTOP)
#define F_FW_EQ_CTRL_CMD_EQSTOP V_FW_EQ_CTRL_CMD_EQSTOP(1U)
+#define S_FW_EQ_CTRL_CMD_COREGROUP 16
+#define M_FW_EQ_CTRL_CMD_COREGROUP 0x3f
+#define V_FW_EQ_CTRL_CMD_COREGROUP(x) ((x) << S_FW_EQ_CTRL_CMD_COREGROUP)
+#define G_FW_EQ_CTRL_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_CTRL_CMD_COREGROUP) & M_FW_EQ_CTRL_CMD_COREGROUP)
+
#define S_FW_EQ_CTRL_CMD_CMPLIQID 20
#define M_FW_EQ_CTRL_CMD_CMPLIQID 0xfff
#define V_FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << S_FW_EQ_CTRL_CMD_CMPLIQID)
@@ -6377,6 +7538,12 @@ struct fw_eq_ofld_cmd {
(((x) >> S_FW_EQ_OFLD_CMD_EQSTOP) & M_FW_EQ_OFLD_CMD_EQSTOP)
#define F_FW_EQ_OFLD_CMD_EQSTOP V_FW_EQ_OFLD_CMD_EQSTOP(1U)
+#define S_FW_EQ_OFLD_CMD_COREGROUP 16
+#define M_FW_EQ_OFLD_CMD_COREGROUP 0x3f
+#define V_FW_EQ_OFLD_CMD_COREGROUP(x) ((x) << S_FW_EQ_OFLD_CMD_COREGROUP)
+#define G_FW_EQ_OFLD_CMD_COREGROUP(x) \
+ (((x) >> S_FW_EQ_OFLD_CMD_COREGROUP) & M_FW_EQ_OFLD_CMD_COREGROUP)
+
#define S_FW_EQ_OFLD_CMD_EQID 0
#define M_FW_EQ_OFLD_CMD_EQID 0xfffff
#define V_FW_EQ_OFLD_CMD_EQID(x) ((x) << S_FW_EQ_OFLD_CMD_EQID)
@@ -7285,7 +8452,8 @@ fec_supported(uint32_t caps)
{
return ((caps & (FW_PORT_CAP32_SPEED_25G | FW_PORT_CAP32_SPEED_50G |
- FW_PORT_CAP32_SPEED_100G)) != 0);
+ FW_PORT_CAP32_SPEED_100G | FW_PORT_CAP32_SPEED_200G |
+ FW_PORT_CAP32_SPEED_400G)) != 0);
}
enum fw_port_action {
@@ -7799,6 +8967,8 @@ enum fw_port_type {
FW_PORT_TYPE_SFP28 = 20, /* No, 1, 25G/10G/1G */
FW_PORT_TYPE_KR_SFP28 = 21, /* No, 1, 25G/10G/1G using Backplane */
FW_PORT_TYPE_KR_XLAUI = 22, /* No, 4, 40G/10G/1G, No AN*/
+ FW_PORT_TYPE_SFP56 = 26,
+ FW_PORT_TYPE_QSFP56 = 27,
FW_PORT_TYPE_NONE = M_FW_PORT_CMD_PTYPE
};
@@ -7820,6 +8990,8 @@ enum fw_port_module_type {
FW_PORT_MOD_TYPE_TWINAX_PASSIVE = 0x4,
FW_PORT_MOD_TYPE_TWINAX_ACTIVE = 0x5,
FW_PORT_MOD_TYPE_LRM = 0x6,
+ FW_PORT_MOD_TYPE_LR_SIMPLEX = 0x7,
+ FW_PORT_MOD_TYPE_DR = 0x8,
FW_PORT_MOD_TYPE_ERROR = M_FW_PORT_CMD_MODTYPE - 3,
FW_PORT_MOD_TYPE_UNKNOWN = M_FW_PORT_CMD_MODTYPE - 2,
FW_PORT_MOD_TYPE_NOTSUPPORTED = M_FW_PORT_CMD_MODTYPE - 1,
@@ -8860,7 +10032,9 @@ struct fw_devlog_cmd {
__u8 r2[7];
__be32 memtype_devlog_memaddr16_devlog;
__be32 memsize_devlog;
- __be32 r3[2];
+ __u8 num_devlog;
+ __u8 r3[3];
+ __be32 r4;
};
#define S_FW_DEVLOG_CMD_MEMTYPE_DEVLOG 28
@@ -9784,6 +10958,45 @@ struct fw_hma_cmd {
#define G_FW_HMA_CMD_ADDR_SIZE(x) \
(((x) >> S_FW_HMA_CMD_ADDR_SIZE) & M_FW_HMA_CMD_ADDR_SIZE)
+struct fw_jbof_win_reg_cmd {
+ __be32 op_pkd;
+ __be32 alloc_to_len16;
+ __be32 window_num_pcie_params;
+ __be32 window_size;
+ __be64 bus_addr;
+ __be64 phy_address;
+};
+
+#define S_FW_JBOF_WIN_REG_CMD_ALLOC 31
+#define M_FW_JBOF_WIN_REG_CMD_ALLOC 0x1
+#define V_FW_JBOF_WIN_REG_CMD_ALLOC(x) ((x) << S_FW_JBOF_WIN_REG_CMD_ALLOC)
+#define G_FW_JBOF_WIN_REG_CMD_ALLOC(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_ALLOC) & M_FW_JBOF_WIN_REG_CMD_ALLOC)
+#define F_FW_JBOF_WIN_REG_CMD_ALLOC V_FW_JBOF_WIN_REG_CMD_ALLOC(1U)
+
+#define S_FW_JBOF_WIN_REG_CMD_FREE 30
+#define M_FW_JBOF_WIN_REG_CMD_FREE 0x1
+#define V_FW_JBOF_WIN_REG_CMD_FREE(x) ((x) << S_FW_JBOF_WIN_REG_CMD_FREE)
+#define G_FW_JBOF_WIN_REG_CMD_FREE(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_FREE) & M_FW_JBOF_WIN_REG_CMD_FREE)
+#define F_FW_JBOF_WIN_REG_CMD_FREE V_FW_JBOF_WIN_REG_CMD_FREE(1U)
+
+#define S_FW_JBOF_WIN_REG_CMD_WINDOW_NUM 7
+#define M_FW_JBOF_WIN_REG_CMD_WINDOW_NUM 0xf
+#define V_FW_JBOF_WIN_REG_CMD_WINDOW_NUM(x) \
+ ((x) << S_FW_JBOF_WIN_REG_CMD_WINDOW_NUM)
+#define G_FW_JBOF_WIN_REG_CMD_WINDOW_NUM(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_WINDOW_NUM) & \
+ M_FW_JBOF_WIN_REG_CMD_WINDOW_NUM)
+
+#define S_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS 0
+#define M_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS 0x7f
+#define V_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS(x) \
+ ((x) << S_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS)
+#define G_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS(x) \
+ (((x) >> S_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS) & \
+ M_FW_JBOF_WIN_REG_CMD_PCIE_PARAMS)
+
/******************************************************************************
* P C I E F W R E G I S T E R
**************************************/
@@ -9912,8 +11125,15 @@ enum pcie_fw_eval {
*/
#define PCIE_FW_PF_DEVLOG 7
+#define S_PCIE_FW_PF_DEVLOG_COUNT_MSB 31
+#define M_PCIE_FW_PF_DEVLOG_COUNT_MSB 0x1
+#define V_PCIE_FW_PF_DEVLOG_COUNT_MSB(x) \
+ ((x) << S_PCIE_FW_PF_DEVLOG_COUNT_MSB)
+#define G_PCIE_FW_PF_DEVLOG_COUNT_MSB(x) \
+ (((x) >> S_PCIE_FW_PF_DEVLOG_COUNT_MSB) & M_PCIE_FW_PF_DEVLOG_COUNT_MSB)
+
#define S_PCIE_FW_PF_DEVLOG_NENTRIES128 28
-#define M_PCIE_FW_PF_DEVLOG_NENTRIES128 0xf
+#define M_PCIE_FW_PF_DEVLOG_NENTRIES128 0x7
#define V_PCIE_FW_PF_DEVLOG_NENTRIES128(x) \
((x) << S_PCIE_FW_PF_DEVLOG_NENTRIES128)
#define G_PCIE_FW_PF_DEVLOG_NENTRIES128(x) \
@@ -9926,8 +11146,15 @@ enum pcie_fw_eval {
#define G_PCIE_FW_PF_DEVLOG_ADDR16(x) \
(((x) >> S_PCIE_FW_PF_DEVLOG_ADDR16) & M_PCIE_FW_PF_DEVLOG_ADDR16)
+#define S_PCIE_FW_PF_DEVLOG_COUNT_LSB 3
+#define M_PCIE_FW_PF_DEVLOG_COUNT_LSB 0x1
+#define V_PCIE_FW_PF_DEVLOG_COUNT_LSB(x) \
+ ((x) << S_PCIE_FW_PF_DEVLOG_COUNT_LSB)
+#define G_PCIE_FW_PF_DEVLOG_COUNT_LSB(x) \
+ (((x) >> S_PCIE_FW_PF_DEVLOG_COUNT_LSB) & M_PCIE_FW_PF_DEVLOG_COUNT_LSB)
+
#define S_PCIE_FW_PF_DEVLOG_MEMTYPE 0
-#define M_PCIE_FW_PF_DEVLOG_MEMTYPE 0xf
+#define M_PCIE_FW_PF_DEVLOG_MEMTYPE 0x7
#define V_PCIE_FW_PF_DEVLOG_MEMTYPE(x) ((x) << S_PCIE_FW_PF_DEVLOG_MEMTYPE)
#define G_PCIE_FW_PF_DEVLOG_MEMTYPE(x) \
(((x) >> S_PCIE_FW_PF_DEVLOG_MEMTYPE) & M_PCIE_FW_PF_DEVLOG_MEMTYPE)
@@ -9967,7 +11194,8 @@ struct fw_hdr {
enum fw_hdr_chip {
FW_HDR_CHIP_T4,
FW_HDR_CHIP_T5,
- FW_HDR_CHIP_T6
+ FW_HDR_CHIP_T6,
+ FW_HDR_CHIP_T7
};
#define S_FW_HDR_FW_VER_MAJOR 24
@@ -10013,6 +11241,11 @@ enum {
T6FW_VERSION_MINOR = 27,
T6FW_VERSION_MICRO = 5,
T6FW_VERSION_BUILD = 0,
+
+ T7FW_VERSION_MAJOR = 2,
+ T7FW_VERSION_MINOR = 0,
+ T7FW_VERSION_MICRO = 0,
+ T7FW_VERSION_BUILD = 0,
};
enum {
@@ -10048,6 +11281,17 @@ enum {
T6FW_HDR_INTFVER_ISCSI = 0x00,
T6FW_HDR_INTFVER_FCOEPDU= 0x00,
T6FW_HDR_INTFVER_FCOE = 0x00,
+
+ /* T7
+ */
+ T7FW_HDR_INTFVER_NIC = 0x00,
+ T7FW_HDR_INTFVER_VNIC = 0x00,
+ T7FW_HDR_INTFVER_OFLD = 0x00,
+ T7FW_HDR_INTFVER_RI = 0x00,
+ T7FW_HDR_INTFVER_ISCSIPDU= 0x00,
+ T7FW_HDR_INTFVER_ISCSI = 0x00,
+ T7FW_HDR_INTFVER_FCOEPDU= 0x00,
+ T7FW_HDR_INTFVER_FCOE = 0x00,
};
#define FW_VERSION32(MAJOR, MINOR, MICRO, BUILD) ( \
@@ -10083,7 +11327,7 @@ struct fw_ephy_hdr {
enum {
FW_EPHY_HDR_MAGIC = 0x65706879,
};
-
+
struct fw_ifconf_dhcp_info {
__be32 addr;
__be32 mask;
diff --git a/sys/dev/cxgbe/firmware/t7fw_cfg.txt b/sys/dev/cxgbe/firmware/t7fw_cfg.txt
new file mode 100644
index 000000000000..499af3675bd9
--- /dev/null
+++ b/sys/dev/cxgbe/firmware/t7fw_cfg.txt
@@ -0,0 +1,644 @@
+# Chelsio T6 Factory Default configuration file.
+#
+# Copyright (C) 2014-2015 Chelsio Communications. All rights reserved.
+#
+# DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES. MODIFICATION OF THIS FILE
+# WILL RESULT IN A NON-FUNCTIONAL ADAPTER AND MAY RESULT IN PHYSICAL DAMAGE
+# TO ADAPTERS.
+
+
+# This file provides the default, power-on configuration for 2-port T6-based
+# adapters shipped from the factory. These defaults are designed to address
+# the needs of the vast majority of Terminator customers. The basic idea is to
+# have a default configuration which allows a customer to plug a Terminator
+# adapter in and have it work regardless of OS, driver or application except in
+# the most unusual and/or demanding customer applications.
+#
+# Many of the Terminator resources which are described by this configuration
+# are finite. This requires balancing the configuration/operation needs of
+# device drivers across OSes and a large number of customer application.
+#
+# Some of the more important resources to allocate and their constaints are:
+# 1. Virtual Interfaces: 256.
+# 2. Ingress Queues with Free Lists: 1024.
+# 3. Egress Queues: 128K.
+# 4. MSI-X Vectors: 1088.
+# 5. Multi-Port Support (MPS) TCAM: 336 entries to support MAC destination
+# address matching on Ingress Packets.
+#
+# Some of the important OS/Driver resource needs are:
+# 6. Some OS Drivers will manage all resources through a single Physical
+# Function (currently PF4 but it could be any Physical Function).
+# 7. Some OS Drivers will manage different ports and functions (NIC,
+# storage, etc.) on different Physical Functions. For example, NIC
+# functions for ports 0-1 on PF0-1, FCoE on PF4, iSCSI on PF5, etc.
+#
+# Some of the customer application needs which need to be accommodated:
+# 8. Some customers will want to support large CPU count systems with
+# good scaling. Thus, we'll need to accommodate a number of
+# Ingress Queues and MSI-X Vectors to allow up to some number of CPUs
+# to be involved per port and per application function. For example,
+# in the case where all ports and application functions will be
+# managed via a single Unified PF and we want to accommodate scaling up
+# to 8 CPUs, we would want:
+#
+# 2 ports *
+# 3 application functions (NIC, FCoE, iSCSI) per port *
+# 16 Ingress Queue/MSI-X Vectors per application function
+#
+# for a total of 96 Ingress Queues and MSI-X Vectors on the Unified PF.
+# (Plus a few for Firmware Event Queues, etc.)
+#
+# 9. Some customers will want to use PCI-E SR-IOV Capability to allow Virtual
+# Machines to directly access T6 functionality via SR-IOV Virtual Functions
+# and "PCI Device Passthrough" -- this is especially true for the NIC
+# application functionality.
+#
+
+
+# Global configuration settings.
+#
+[global]
+ rss_glb_config_mode = basicvirtual
+ rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
+
+ # PL_TIMEOUT register
+ pl_timeout_value = 200 # the timeout value in units of us
+
+ # The following Scatter Gather Engine (SGE) settings assume a 4KB Host
+ # Page Size and a 64B L1 Cache Line Size. It programs the
+ # EgrStatusPageSize and IngPadBoundary to 64B and the PktShift to 2.
+ # If a Master PF Driver finds itself on a machine with different
+ # parameters, then the Master PF Driver is responsible for initializing
+ # these parameters to appropriate values.
+ #
+ # Notes:
+ # 1. The Free List Buffer Sizes below are raw and the firmware will
+ # round them up to the Ingress Padding Boundary.
+ # 2. The SGE Timer Values below are expressed below in microseconds.
+ # The firmware will convert these values to Core Clock Ticks when
+ # it processes the configuration parameters.
+ #
+ reg[0x1008] = 0x40810/0x21c70 # SGE_CONTROL
+ reg[0x100c] = 0x22222222 # SGE_HOST_PAGE_SIZE
+ reg[0x10a0] = 0x01040810 # SGE_INGRESS_RX_THRESHOLD
+ reg[0x1044] = 4096 # SGE_FL_BUFFER_SIZE0
+ reg[0x1048] = 65536 # SGE_FL_BUFFER_SIZE1
+ reg[0x104c] = 1536 # SGE_FL_BUFFER_SIZE2
+ reg[0x1050] = 9024 # SGE_FL_BUFFER_SIZE3
+ reg[0x1054] = 9216 # SGE_FL_BUFFER_SIZE4
+ reg[0x1058] = 2048 # SGE_FL_BUFFER_SIZE5
+ reg[0x105c] = 128 # SGE_FL_BUFFER_SIZE6
+ reg[0x1060] = 8192 # SGE_FL_BUFFER_SIZE7
+ reg[0x1064] = 16384 # SGE_FL_BUFFER_SIZE8
+ reg[0x10a4] = 0xa000a000/0xf000f000 # SGE_DBFIFO_STATUS
+ reg[0x10a8] = 0x402000/0x402000 # SGE_DOORBELL_CONTROL
+ sge_timer_value = 5, 10, 20, 50, 100, 200 # SGE_TIMER_VALUE* in usecs
+ reg[0x10c4] = 0x20000000/0x20000000 # GK_CONTROL, enable 5th thread
+ reg[0x173c] = 0x2/0x2
+
+ reg[0x1750] = 0x01000000/0x03c00000 # RDMA_INV_Handling = 1
+ # terminate_status_en = 0
+ # DISABLE = 0
+
+ #DBQ Timer duration = 1 cclk cycle duration * (sge_dbq_timertick+1) * sge_dbq_timer
+ #SGE DBQ tick value. All timers are multiple of this value
+ sge_dbq_timertick = 50 #in usecs
+ sge_dbq_timer = 1, 2, 4, 6, 8, 10, 12, 16
+
+ #CIM_QUEUE_FEATURE_DISABLE.obq_eom_enable bit needs to be set to 1 for CmdMore handling support
+ reg[0x7c4c] = 0x20/0x20
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE
+ reg[0x7d04] = 0x00010000/0x00010000
+
+ reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
+
+ reg[0x46004] = 0x3/0x3 #Crypto core reset
+
+ #Tick granularities in kbps
+ tsch_ticks = 100000, 10000, 1000, 10
+
+ # TP_VLAN_PRI_MAP to select filter tuples and enable ServerSram
+ # filter control: compact, fcoemask
+ # server sram : srvrsram
+ # filter tuples : fragmentation, mpshittype, macmatch, ethertype,
+ # protocol, tos, vlan, vnic_id, port, fcoe
+ # valid filterModes are described the Terminator 5 Data Book
+ filterMode = fcoemask, srvrsram, ipsec, rocev2, fragmentation, mpshittype, protocol, vlan, port, fcoe
+
+ # filter tuples enforced in LE active region (equal to or subset of filterMode)
+ filterMask = protocol, ipsec, rocev2, fcoe
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP RX payload
+ tp_pmrx = 30
+
+ # TP RX payload page size
+ tp_pmrx_pagesize = 64K
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP TX payload
+ tp_pmtx = 50
+
+ # TP TX payload page size
+ tp_pmtx_pagesize = 64K
+
+ # TP OFLD MTUs
+ tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE and CRXPKTENC
+ reg[0x7d04] = 0x00010008/0x00010008
+
+ # TP_GLOBAL_CONFIG
+ reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
+
+ # TP_PC_CONFIG
+ reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
+
+ # TP_PARA_REG0
+ reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
+
+ # ULPRX iSCSI Page Sizes
+ reg[0x19168] = 0x04020100 # 64K, 16K, 8K and 4K
+
+ # LE_DB_CONFIG
+ reg[0x19c04] = 0x00400000/0x00440000 # LE Server SRAM Enable,
+ # LE IPv4 compression disabled
+ # LE_DB_HASH_CONFIG
+ reg[0x19c28] = 0x00800000/0x01f00000 # LE Hash bucket size 8,
+
+ # ULP_TX_CONFIG
+ reg[0x8dc0] = 0x00000104/0x02000104 # Enable ITT on PI err
+ # Enable more error msg for ...
+ # TPT error.
+ # Err2uP = 0
+
+ #ULP_RX_CTL1
+ reg[0x19330] = 0x000000f0/0x000000f0 # RDMA_Invld_Msg_Dis = 3
+ # ROCE_Invld_Msg_Dis = 3
+
+ #Enable iscsi completion moderation feature, disable rdma invlidate in ulptx
+ reg[0x1925c] = 0x000041c0/0x000031d0 # Enable offset decrement after
+ # PI extraction and before DDP.
+ # ulp insert pi source info in
+ # DIF.
+ # Enable iscsi hdr cmd mode.
+ # iscsi force cmd mode.
+ # Enable iscsi cmp mode.
+ # terminate_status_en = 0
+
+ #ULP_RX_CQE_GEN_EN
+ reg[0x19250] = 0x0/0x3 # Termimate_msg = 0
+ # Terminate_with_err = 0
+
+ gc_disable = 3 # 3 - disable gc for hma/mc1 and mc0,
+ # 2 - disable gc for mc1/hma enable mc0,
+ # 1 - enable gc for mc1/hma disable mc0,
+ # 0 - enable gc for mc1/hma and for mc0,
+ # default gc enabled.
+
+ # HMA configuration (uncomment following lines to enable HMA)
+ hma_size = 92 # Size (in MBs) of host memory expected
+ hma_regions = iscsi,rrq,tls,ddp,pmrx,stag,pbl,rq # What all regions to place in host memory
+
+ #mc[0]=0
+ #mc[1]=0
+
+# Some "definitions" to make the rest of this a bit more readable. We support
+# 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
+# per function per port ...
+#
+# NMSIX = 1088 # available MSI-X Vectors
+# NVI = 256 # available Virtual Interfaces
+# NMPSTCAM = 336 # MPS TCAM entries
+#
+# NPORTS = 2 # ports
+# NCPUS = 16 # CPUs we want to support scalably
+# NFUNCS = 3 # functions per port (NIC, FCoE, iSCSI)
+
+# Breakdown of Virtual Interface/Queue/Interrupt resources for the "Unified
+# PF" which many OS Drivers will use to manage most or all functions.
+#
+# Each Ingress Queue can use one MSI-X interrupt but some Ingress Queues can
+# use Forwarded Interrupt Ingress Queues. For these latter, an Ingress Queue
+# would be created and the Queue ID of a Forwarded Interrupt Ingress Queue
+# will be specified as the "Ingress Queue Asynchronous Destination Index."
+# Thus, the number of MSI-X Vectors assigned to the Unified PF will be less
+# than or equal to the number of Ingress Queues ...
+#
+# NVI_NIC = 4 # NIC access to NPORTS
+# NFLIQ_NIC = 32 # NIC Ingress Queues with Free Lists
+# NETHCTRL_NIC = 32 # NIC Ethernet Control/TX Queues
+# NEQ_NIC = 64 # NIC Egress Queues (FL, ETHCTRL/TX)
+# NMPSTCAM_NIC = 16 # NIC MPS TCAM Entries (NPORTS*4)
+# NMSIX_NIC = 32 # NIC MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_OFLD = 0 # Offload uses NIC function to access ports
+# NFLIQ_OFLD = 16 # Offload Ingress Queues with Free Lists
+# NETHCTRL_OFLD = 0 # Offload Ethernet Control/TX Queues
+# NEQ_OFLD = 16 # Offload Egress Queues (FL)
+# NMPSTCAM_OFLD = 0 # Offload MPS TCAM Entries (uses NIC's)
+# NMSIX_OFLD = 16 # Offload MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_RDMA = 0 # RDMA uses NIC function to access ports
+# NFLIQ_RDMA = 4 # RDMA Ingress Queues with Free Lists
+# NETHCTRL_RDMA = 0 # RDMA Ethernet Control/TX Queues
+# NEQ_RDMA = 4 # RDMA Egress Queues (FL)
+# NMPSTCAM_RDMA = 0 # RDMA MPS TCAM Entries (uses NIC's)
+# NMSIX_RDMA = 4 # RDMA MSI-X Interrupt Vectors (FLIQ)
+#
+# NEQ_WD = 128 # Wire Direct TX Queues and FLs
+# NETHCTRL_WD = 64 # Wire Direct TX Queues
+# NFLIQ_WD = 64 ` # Wire Direct Ingress Queues with Free Lists
+#
+# NVI_ISCSI = 4 # ISCSI access to NPORTS
+# NFLIQ_ISCSI = 4 # ISCSI Ingress Queues with Free Lists
+# NETHCTRL_ISCSI = 0 # ISCSI Ethernet Control/TX Queues
+# NEQ_ISCSI = 4 # ISCSI Egress Queues (FL)
+# NMPSTCAM_ISCSI = 4 # ISCSI MPS TCAM Entries (NPORTS)
+# NMSIX_ISCSI = 4 # ISCSI MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_FCOE = 4 # FCOE access to NPORTS
+# NFLIQ_FCOE = 34 # FCOE Ingress Queues with Free Lists
+# NETHCTRL_FCOE = 32 # FCOE Ethernet Control/TX Queues
+# NEQ_FCOE = 66 # FCOE Egress Queues (FL)
+# NMPSTCAM_FCOE = 32 # FCOE MPS TCAM Entries (NPORTS)
+# NMSIX_FCOE = 34 # FCOE MSI-X Interrupt Vectors (FLIQ)
+
+# Two extra Ingress Queues per function for Firmware Events and Forwarded
+# Interrupts, and two extra interrupts per function for Firmware Events (or a
+# Forwarded Interrupt Queue) and General Interrupts per function.
+#
+# NFLIQ_EXTRA = 6 # "extra" Ingress Queues 2*NFUNCS (Firmware and
+# # Forwarded Interrupts
+# NMSIX_EXTRA = 6 # extra interrupts 2*NFUNCS (Firmware and
+# # General Interrupts
+
+# Microsoft HyperV resources. The HyperV Virtual Ingress Queues will have
+# their interrupts forwarded to another set of Forwarded Interrupt Queues.
+#
+# NVI_HYPERV = 16 # VMs we want to support
+# NVIIQ_HYPERV = 2 # Virtual Ingress Queues with Free Lists per VM
+# NFLIQ_HYPERV = 40 # VIQs + NCPUS Forwarded Interrupt Queues
+# NEQ_HYPERV = 32 # VIQs Free Lists
+# NMPSTCAM_HYPERV = 16 # MPS TCAM Entries (NVI_HYPERV)
+# NMSIX_HYPERV = 8 # NCPUS Forwarded Interrupt Queues
+
+# Adding all of the above Unified PF resource needs together: (NIC + OFLD +
+# RDMA + ISCSI + FCOE + EXTRA + HYPERV)
+#
+# NVI_UNIFIED = 28
+# NFLIQ_UNIFIED = 106
+# NETHCTRL_UNIFIED = 32
+# NEQ_UNIFIED = 124
+# NMPSTCAM_UNIFIED = 40
+#
+# The sum of all the MSI-X resources above is 74 MSI-X Vectors but we'll round
+# that up to 128 to make sure the Unified PF doesn't run out of resources.
+#
+# NMSIX_UNIFIED = 128
+#
+# The Storage PFs could need up to NPORTS*NCPUS + NMSIX_EXTRA MSI-X Vectors
+# which is 34 but they're probably safe with 32.
+#
+# NMSIX_STORAGE = 32
+
+# Note: The UnifiedPF is PF4 which doesn't have any Virtual Functions
+# associated with it. Thus, the MSI-X Vector allocations we give to the
+# UnifiedPF aren't inherited by any Virtual Functions. As a result we can
+# provision many more Virtual Functions than we can if the UnifiedPF were
+# one of PF0-1.
+#
+
+# All of the below PCI-E parameters are actually stored in various *_init.txt
+# files. We include them below essentially as comments.
+#
+# For PF0-1 we assign 8 vectors each for NIC Ingress Queues of the associated
+# ports 0-1.
+#
+# For PF4, the Unified PF, we give it an MSI-X Table Size as outlined above.
+#
+# For PF5-6 we assign enough MSI-X Vectors to support FCoE and iSCSI
+# storage applications across all four possible ports.
+#
+# Additionally, since the UnifiedPF isn't one of the per-port Physical
+# Functions, we give the UnifiedPF and the PF0-1 Physical Functions
+# different PCI Device IDs which will allow Unified and Per-Port Drivers
+# to directly select the type of Physical Function to which they wish to be
+# attached.
+#
+# Note that the actual values used for the PCI-E Intelectual Property will be
+# 1 less than those below since that's the way it "counts" things. For
+# readability, we use the number we actually mean ...
+#
+# PF0_INT = 8 # NCPUS
+# PF1_INT = 8 # NCPUS
+# PF0_3_INT = 32 # PF0_INT + PF1_INT + PF2_INT + PF3_INT
+#
+# PF4_INT = 128 # NMSIX_UNIFIED
+# PF5_INT = 32 # NMSIX_STORAGE
+# PF6_INT = 32 # NMSIX_STORAGE
+# PF7_INT = 0 # Nothing Assigned
+# PF4_7_INT = 192 # PF4_INT + PF5_INT + PF6_INT + PF7_INT
+#
+# PF0_7_INT = 224 # PF0_3_INT + PF4_7_INT
+#
+# With the above we can get 17 VFs/PF0-3 (limited by 336 MPS TCAM entries)
+# but we'll lower that to 16 to make our total 64 and a nice power of 2 ...
+#
+# NVF = 16
+
+
+# For those OSes which manage different ports on different PFs, we need
+# only enough resources to support a single port's NIC application functions
+# on PF0-3. The below assumes that we're only doing NIC with NCPUS "Queue
+# Sets" for ports 0-3. The FCoE and iSCSI functions for such OSes will be
+# managed on the "storage PFs" (see below).
+#
+
+[function "0"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port
+
+
+[function "1"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port
+
+[function "2"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x4 # access to only one port
+ pmask = 0x1 # access to only one port
+
+[function "3"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x2 # access to only one port
+
+# Some OS Drivers manage all application functions for all ports via PF4.
+# Thus we need to provide a large number of resources here. For Egress
+# Queues we need to account for both TX Queues as well as Free List Queues
+# (because the host is responsible for producing Free List Buffers for the
+# hardware to consume).
+#
+
+[function "4"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 28 # NVI_UNIFIED
+ niqflint = 170 # NFLIQ_UNIFIED + NLFIQ_WD
+ nethctrl = 224 # NETHCTRL_UNIFIED + NETHCTRL_WD
+ neq = 252 # NEQ_UNIFIED + NEQ_WD
+ nqpcq = 12288
+ nexactf = 40 # NMPSTCAM_UNIFIED
+ nrawf = 4
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nethofld = 1024 # number of user mode ethernet flow contexts
+ ncrypto_lookaside = 32
+ nclip = 320 # number of clip region entries
+ nfilter = 480 # number of filter region entries
+ nserver = 480 # number of server region entries
+ nhash = 12288 # number of hash region entries
+ nhpfilter = 64 # number of high priority filter region entries
+ #protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, ofld_sendpath
+ protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, nvme_tcp
+ tp_l2t = 3072
+ tp_ddp = 2
+ tp_ddp_iscsi = 2
+ tp_tls_key = 3
+ tp_tls_mxrxsize = 33792 # 32768 + 1024, governs max rx data, pm max xfer len, rx coalesce sizes
+ tp_stag = 2
+ tp_pbl = 5
+ tp_rq = 7
+ tp_rrq = 4
+ tp_srq = 128
+ nipsec_tunnel16 = 64 # in unit of 16
+ nipsec_transport16 = 191 # in unit of 16
+
+
+# We have FCoE and iSCSI storage functions on PF5 and PF6 each of which may
+# need to have Virtual Interfaces on each of the four ports with up to NCPUS
+# "Queue Sets" each.
+#
+[function "5"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nexactf = 16 # (NPORTS *(no of snmc grp + 1 hw mac) + 1 anmc grp)) rounded to 16.
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nserver = 16
+ nhash = 1536
+ tp_l2t = 508
+ protocol = iscsi_initiator_fofld
+ tp_ddp_iscsi = 2
+ iscsi_ntask = 2048
+ iscsi_nsess = 2048
+ iscsi_nconn_per_session = 1
+ iscsi_ninitiator_instance = 64
+
+[function "6"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 66 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX) + 2 (EXTRA)
+ nexactf = 32 # NPORTS + adding 28 exact entries for FCoE
+ # which is OK since < MIN(SUM PF0..3, PF4)
+ # and we never load PF0..3 and PF4 concurrently
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nhash = 1536
+ tp_l2t = 4
+ protocol = fcoe_initiator
+ tp_ddp = 1
+ fcoe_nfcf = 16
+ fcoe_nvnp = 32
+ fcoe_nssn = 1024
+
+# Following function 7 is used by embedded ARM to communicate to
+# the firmware.
+[function "7"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 16 # number of high priority filter region entries
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nserver = 16
+ nhash = 1024
+ tp_l2t = 512
+ protocol = nic_vm, ofld, rddp, rdmac, tlskeys, ipsec_inline, rocev2, nvme_tcp
+
+# The following function, 1023, is not an actual PCIE function but is used to
+# configure and reserve firmware internal resources that come from the global
+# resource pool.
+#
+[function "1023"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 0 # number of high priority filter region entries
+
+
+# For Virtual functions, we only allow NIC functionality and we only allow
+# access to one port (1 << PF). Note that because of limitations in the
+# Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL
+# and GTS registers, the number of Ingress and Egress Queues must be a power
+# of 2.
+#
+[function "0/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "1/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+[function "2/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "3/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+# MPS features a 196608 bytes ingress buffer that is used for ingress buffering
+# for packets from the wire as well as the loopback path of the L2 switch. The
+# folling params control how the buffer memory is distributed and the L2 flow
+# control settings:
+#
+# bg_mem: %-age of mem to use for port/buffer group
+# lpbk_mem: %-age of port/bg mem to use for loopback
+# hwm: high watermark; bytes available when starting to send pause
+# frames (in units of 0.1 MTU)
+# lwm: low watermark; bytes remaining when sending 'unpause' frame
+# (in inuits of 0.1 MTU)
+# dwm: minimum delta between high and low watermark (in units of 100
+# Bytes)
+#
+[port "0"]
+ #dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "1"]
+ #dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[port "2"]
+ #dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "3"]
+ #dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[fini]
+ version = 0x1425001d
+ checksum = 0x684e23fb
+
+# Total resources used by above allocations:
+# Virtual Interfaces: 104
+# Ingress Queues/w Free Lists and Interrupts: 526
+# Egress Queues: 702
+# MPS TCAM Entries: 336
+# MSI-X Vectors: 736
+# Virtual Functions: 64
diff --git a/sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt b/sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt
new file mode 100644
index 000000000000..f06f059f4112
--- /dev/null
+++ b/sys/dev/cxgbe/firmware/t7fw_cfg_fpga.txt
@@ -0,0 +1,530 @@
+# Chelsio T6 Factory Default configuration file.
+#
+# Copyright (C) 2014-2015 Chelsio Communications. All rights reserved.
+#
+# DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES. MODIFICATION OF THIS FILE
+# WILL RESULT IN A NON-FUNCTIONAL ADAPTER AND MAY RESULT IN PHYSICAL DAMAGE
+# TO ADAPTERS.
+
+
+# This file provides the default, power-on configuration for 2-port T6-based
+# adapters shipped from the factory. These defaults are designed to address
+# the needs of the vast majority of Terminator customers. The basic idea is to
+# have a default configuration which allows a customer to plug a Terminator
+# adapter in and have it work regardless of OS, driver or application except in
+# the most unusual and/or demanding customer applications.
+#
+# Many of the Terminator resources which are described by this configuration
+# are finite. This requires balancing the configuration/operation needs of
+# device drivers across OSes and a large number of customer application.
+#
+# Some of the more important resources to allocate and their constaints are:
+# 1. Virtual Interfaces: 256.
+# 2. Ingress Queues with Free Lists: 1024.
+# 3. Egress Queues: 128K.
+# 4. MSI-X Vectors: 1088.
+# 5. Multi-Port Support (MPS) TCAM: 336 entries to support MAC destination
+# address matching on Ingress Packets.
+#
+# Some of the important OS/Driver resource needs are:
+# 6. Some OS Drivers will manage all resources through a single Physical
+# Function (currently PF4 but it could be any Physical Function).
+# 7. Some OS Drivers will manage different ports and functions (NIC,
+# storage, etc.) on different Physical Functions. For example, NIC
+# functions for ports 0-1 on PF0-1, FCoE on PF4, iSCSI on PF5, etc.
+#
+# Some of the customer application needs which need to be accommodated:
+# 8. Some customers will want to support large CPU count systems with
+# good scaling. Thus, we'll need to accommodate a number of
+# Ingress Queues and MSI-X Vectors to allow up to some number of CPUs
+# to be involved per port and per application function. For example,
+# in the case where all ports and application functions will be
+# managed via a single Unified PF and we want to accommodate scaling up
+# to 8 CPUs, we would want:
+#
+# 2 ports *
+# 3 application functions (NIC, FCoE, iSCSI) per port *
+# 16 Ingress Queue/MSI-X Vectors per application function
+#
+# for a total of 96 Ingress Queues and MSI-X Vectors on the Unified PF.
+# (Plus a few for Firmware Event Queues, etc.)
+#
+# 9. Some customers will want to use PCI-E SR-IOV Capability to allow Virtual
+# Machines to directly access T6 functionality via SR-IOV Virtual Functions
+# and "PCI Device Passthrough" -- this is especially true for the NIC
+# application functionality.
+#
+
+
+# Global configuration settings.
+#
+[global]
+ rss_glb_config_mode = basicvirtual
+ rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
+
+ # PL_TIMEOUT register
+ pl_timeout_value = 1000 # the timeout value in units of us
+
+ # The following Scatter Gather Engine (SGE) settings assume a 4KB Host
+ # Page Size and a 64B L1 Cache Line Size. It programs the
+ # EgrStatusPageSize and IngPadBoundary to 64B and the PktShift to 2.
+ # If a Master PF Driver finds itself on a machine with different
+ # parameters, then the Master PF Driver is responsible for initializing
+ # these parameters to appropriate values.
+ #
+ # Notes:
+ # 1. The Free List Buffer Sizes below are raw and the firmware will
+ # round them up to the Ingress Padding Boundary.
+ # 2. The SGE Timer Values below are expressed below in microseconds.
+ # The firmware will convert these values to Core Clock Ticks when
+ # it processes the configuration parameters.
+ #
+ reg[0x1008] = 0x40810/0x21c70 # SGE_CONTROL
+ reg[0x100c] = 0x22222222 # SGE_HOST_PAGE_SIZE
+ reg[0x10a0] = 0x01040810 # SGE_INGRESS_RX_THRESHOLD
+ reg[0x1044] = 4096 # SGE_FL_BUFFER_SIZE0
+ reg[0x1048] = 65536 # SGE_FL_BUFFER_SIZE1
+ reg[0x104c] = 1536 # SGE_FL_BUFFER_SIZE2
+ reg[0x1050] = 9024 # SGE_FL_BUFFER_SIZE3
+ reg[0x1054] = 9216 # SGE_FL_BUFFER_SIZE4
+ reg[0x1058] = 2048 # SGE_FL_BUFFER_SIZE5
+ reg[0x105c] = 128 # SGE_FL_BUFFER_SIZE6
+ reg[0x1060] = 8192 # SGE_FL_BUFFER_SIZE7
+ reg[0x1064] = 16384 # SGE_FL_BUFFER_SIZE8
+ reg[0x10a4] = 0xa000a000/0xf000f000 # SGE_DBFIFO_STATUS
+ reg[0x10a8] = 0x402000/0x402000 # SGE_DOORBELL_CONTROL
+ sge_timer_value = 5, 10, 20, 50, 100, 200 # SGE_TIMER_VALUE* in usecs
+ reg[0x10c4] = 0x20000000/0x20000000 # GK_CONTROL, enable 5th thread
+ reg[0x173c] = 0x2/0x2
+
+ reg[0x1750] = 0x01000000/0x03c00000 # RDMA_INV_Handling = 1
+ # terminate_status_en = 0
+ # DISABLE = 0
+
+ #DBQ Timer duration = 1 cclk cycle duration * (sge_dbq_timertick+1) * sge_dbq_timer
+ #SGE DBQ tick value. All timers are multiple of this value
+ sge_dbq_timertick = 1 #in usecs
+ sge_dbq_timer = 1, 2, 4, 6, 8, 10, 12, 16
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE
+ reg[0x7d04] = 0x00010000/0x00010000
+
+ reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
+
+ reg[0x46004] = 0x3/0x3 # Crypto core reset
+ reg[0x46000] = 0xa/0xe # 16K ESH Hi Extraction window
+
+ #Tick granularities in kbps
+ tsch_ticks = 1000, 100, 10, 1
+
+ # TP_VLAN_PRI_MAP to select filter tuples and enable ServerSram
+ # filter control: compact, fcoemask
+ # server sram : srvrsram
+ # filter tuples : fragmentation, mpshittype, macmatch, ethertype,
+ # protocol, tos, vlan, vnic_id, port, fcoe
+ # valid filterModes are described the Terminator 5 Data Book
+ filterMode = fcoemask, srvrsram, ipsec, rocev2, fragmentation, mpshittype, protocol, vlan, port, fcoe
+
+ # filter tuples enforced in LE active region (equal to or subset of filterMode)
+ filterMask = protocol, ipsec, rocev2, fcoe
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP RX payload
+ tp_pmrx = 30
+
+ # TP RX payload page size
+ tp_pmrx_pagesize = 64K
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP TX payload
+ tp_pmtx = 50
+
+ # TP TX payload page size
+ tp_pmtx_pagesize = 64K
+
+ # TP OFLD MTUs
+ tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE and CRXPKTENC
+ reg[0x7d04] = 0x00010008/0x00010008
+
+ # TP_GLOBAL_CONFIG
+ reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
+
+ # TP_PC_CONFIG
+ reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
+
+ # TP_PARA_REG0
+ reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
+
+ # ULPRX iSCSI Page Sizes
+ reg[0x19168] = 0x04020100 # 64K, 16K, 8K and 4K
+
+ # LE_DB_CONFIG
+ reg[0x19c04] = 0x00400000/0x00440000 # LE Server SRAM Enable,
+ # LE IPv4 compression disabled
+ # LE_DB_HASH_CONFIG
+ reg[0x19c28] = 0x00800000/0x01f00000 # LE Hash bucket size 8,
+
+ # ULP_TX_CONFIG
+ reg[0x8dc0] = 0x00000104/0x02000104 # Enable ITT on PI err
+ # Enable more error msg for ...
+ # TPT error.
+ # Err2uP = 0
+
+ #ULP_RX_CTL1
+ reg[0x19330] = 0x000000f0/0x000000f0 # RDMA_Invld_Msg_Dis = 3
+ # ROCE_Invld_Msg_Dis = 3
+
+ #Enable iscsi completion moderation feature, disable rdma invlidate in ulptx
+ reg[0x1925c] = 0x000041c0/0x000031d0 # Enable offset decrement after
+ # PI extraction and before DDP.
+ # ulp insert pi source info in
+ # DIF.
+ # Enable iscsi hdr cmd mode.
+ # iscsi force cmd mode.
+ # Enable iscsi cmp mode.
+ # terminate_status_en = 0
+
+ #ULP_RX_CQE_GEN_EN
+ reg[0x19250] = 0x0/0x3 # Termimate_msg = 0
+ # Terminate_with_err = 0
+
+ #gc_disable = 3 # 3 - disable gc for hma/mc1 and mc0,
+ # 2 - disable gc for mc1/hma enable mc0,
+ # 1 - enable gc for mc1/hma disable mc0,
+ # 0 - enable gc for mc1/hma and for mc0,
+ # default gc enabled.
+
+ # HMA configuration (uncomment following lines to enable HMA)
+ hma_size = 92 # Size (in MBs) of host memory expected
+ hma_regions = iscsi,rrq,tls,ddp,pmrx,stag,pbl,rq # What all regions to place in host memory
+
+ #mc[0]=0
+ #mc[1]=0
+
+# Some "definitions" to make the rest of this a bit more readable. We support
+# 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
+# per function per port ...
+#
+# NMSIX = 1088 # available MSI-X Vectors
+# NVI = 256 # available Virtual Interfaces
+# NMPSTCAM = 336 # MPS TCAM entries
+#
+# NPORTS = 2 # ports
+# NCPUS = 16 # CPUs we want to support scalably
+# NFUNCS = 3 # functions per port (NIC, FCoE, iSCSI)
+
+# Breakdown of Virtual Interface/Queue/Interrupt resources for the "Unified
+# PF" which many OS Drivers will use to manage most or all functions.
+#
+# Each Ingress Queue can use one MSI-X interrupt but some Ingress Queues can
+# use Forwarded Interrupt Ingress Queues. For these latter, an Ingress Queue
+# would be created and the Queue ID of a Forwarded Interrupt Ingress Queue
+# will be specified as the "Ingress Queue Asynchronous Destination Index."
+# Thus, the number of MSI-X Vectors assigned to the Unified PF will be less
+# than or equal to the number of Ingress Queues ...
+#
+# NVI_NIC = 4 # NIC access to NPORTS
+# NFLIQ_NIC = 32 # NIC Ingress Queues with Free Lists
+# NETHCTRL_NIC = 32 # NIC Ethernet Control/TX Queues
+# NEQ_NIC = 64 # NIC Egress Queues (FL, ETHCTRL/TX)
+# NMPSTCAM_NIC = 16 # NIC MPS TCAM Entries (NPORTS*4)
+# NMSIX_NIC = 32 # NIC MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_OFLD = 0 # Offload uses NIC function to access ports
+# NFLIQ_OFLD = 16 # Offload Ingress Queues with Free Lists
+# NETHCTRL_OFLD = 0 # Offload Ethernet Control/TX Queues
+# NEQ_OFLD = 16 # Offload Egress Queues (FL)
+# NMPSTCAM_OFLD = 0 # Offload MPS TCAM Entries (uses NIC's)
+# NMSIX_OFLD = 16 # Offload MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_RDMA = 0 # RDMA uses NIC function to access ports
+# NFLIQ_RDMA = 4 # RDMA Ingress Queues with Free Lists
+# NETHCTRL_RDMA = 0 # RDMA Ethernet Control/TX Queues
+# NEQ_RDMA = 4 # RDMA Egress Queues (FL)
+# NMPSTCAM_RDMA = 0 # RDMA MPS TCAM Entries (uses NIC's)
+# NMSIX_RDMA = 4 # RDMA MSI-X Interrupt Vectors (FLIQ)
+#
+# NEQ_WD = 128 # Wire Direct TX Queues and FLs
+# NETHCTRL_WD = 64 # Wire Direct TX Queues
+# NFLIQ_WD = 64 ` # Wire Direct Ingress Queues with Free Lists
+#
+# NVI_ISCSI = 4 # ISCSI access to NPORTS
+# NFLIQ_ISCSI = 4 # ISCSI Ingress Queues with Free Lists
+# NETHCTRL_ISCSI = 0 # ISCSI Ethernet Control/TX Queues
+# NEQ_ISCSI = 4 # ISCSI Egress Queues (FL)
+# NMPSTCAM_ISCSI = 4 # ISCSI MPS TCAM Entries (NPORTS)
+# NMSIX_ISCSI = 4 # ISCSI MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_FCOE = 4 # FCOE access to NPORTS
+# NFLIQ_FCOE = 34 # FCOE Ingress Queues with Free Lists
+# NETHCTRL_FCOE = 32 # FCOE Ethernet Control/TX Queues
+# NEQ_FCOE = 66 # FCOE Egress Queues (FL)
+# NMPSTCAM_FCOE = 32 # FCOE MPS TCAM Entries (NPORTS)
+# NMSIX_FCOE = 34 # FCOE MSI-X Interrupt Vectors (FLIQ)
+
+# Two extra Ingress Queues per function for Firmware Events and Forwarded
+# Interrupts, and two extra interrupts per function for Firmware Events (or a
+# Forwarded Interrupt Queue) and General Interrupts per function.
+#
+# NFLIQ_EXTRA = 6 # "extra" Ingress Queues 2*NFUNCS (Firmware and
+# # Forwarded Interrupts
+# NMSIX_EXTRA = 6 # extra interrupts 2*NFUNCS (Firmware and
+# # General Interrupts
+
+# Microsoft HyperV resources. The HyperV Virtual Ingress Queues will have
+# their interrupts forwarded to another set of Forwarded Interrupt Queues.
+#
+# NVI_HYPERV = 16 # VMs we want to support
+# NVIIQ_HYPERV = 2 # Virtual Ingress Queues with Free Lists per VM
+# NFLIQ_HYPERV = 40 # VIQs + NCPUS Forwarded Interrupt Queues
+# NEQ_HYPERV = 32 # VIQs Free Lists
+# NMPSTCAM_HYPERV = 16 # MPS TCAM Entries (NVI_HYPERV)
+# NMSIX_HYPERV = 8 # NCPUS Forwarded Interrupt Queues
+
+# Adding all of the above Unified PF resource needs together: (NIC + OFLD +
+# RDMA + ISCSI + FCOE + EXTRA + HYPERV)
+#
+# NVI_UNIFIED = 28
+# NFLIQ_UNIFIED = 106
+# NETHCTRL_UNIFIED = 32
+# NEQ_UNIFIED = 124
+# NMPSTCAM_UNIFIED = 40
+#
+# The sum of all the MSI-X resources above is 74 MSI-X Vectors but we'll round
+# that up to 128 to make sure the Unified PF doesn't run out of resources.
+#
+# NMSIX_UNIFIED = 128
+#
+# The Storage PFs could need up to NPORTS*NCPUS + NMSIX_EXTRA MSI-X Vectors
+# which is 34 but they're probably safe with 32.
+#
+# NMSIX_STORAGE = 32
+
+# Note: The UnifiedPF is PF4 which doesn't have any Virtual Functions
+# associated with it. Thus, the MSI-X Vector allocations we give to the
+# UnifiedPF aren't inherited by any Virtual Functions. As a result we can
+# provision many more Virtual Functions than we can if the UnifiedPF were
+# one of PF0-1.
+#
+
+# All of the below PCI-E parameters are actually stored in various *_init.txt
+# files. We include them below essentially as comments.
+#
+# For PF0-1 we assign 8 vectors each for NIC Ingress Queues of the associated
+# ports 0-1.
+#
+# For PF4, the Unified PF, we give it an MSI-X Table Size as outlined above.
+#
+# For PF5-6 we assign enough MSI-X Vectors to support FCoE and iSCSI
+# storage applications across all four possible ports.
+#
+# Additionally, since the UnifiedPF isn't one of the per-port Physical
+# Functions, we give the UnifiedPF and the PF0-1 Physical Functions
+# different PCI Device IDs which will allow Unified and Per-Port Drivers
+# to directly select the type of Physical Function to which they wish to be
+# attached.
+#
+# Note that the actual values used for the PCI-E Intelectual Property will be
+# 1 less than those below since that's the way it "counts" things. For
+# readability, we use the number we actually mean ...
+#
+# PF0_INT = 8 # NCPUS
+# PF1_INT = 8 # NCPUS
+# PF0_3_INT = 32 # PF0_INT + PF1_INT + PF2_INT + PF3_INT
+#
+# PF4_INT = 128 # NMSIX_UNIFIED
+# PF5_INT = 32 # NMSIX_STORAGE
+# PF6_INT = 32 # NMSIX_STORAGE
+# PF7_INT = 0 # Nothing Assigned
+# PF4_7_INT = 192 # PF4_INT + PF5_INT + PF6_INT + PF7_INT
+#
+# PF0_7_INT = 224 # PF0_3_INT + PF4_7_INT
+#
+# With the above we can get 17 VFs/PF0-3 (limited by 336 MPS TCAM entries)
+# but we'll lower that to 16 to make our total 64 and a nice power of 2 ...
+#
+# NVF = 16
+
+
+# For those OSes which manage different ports on different PFs, we need
+# only enough resources to support a single port's NIC application functions
+# on PF0-3. The below assumes that we're only doing NIC with NCPUS "Queue
+# Sets" for ports 0-3. The FCoE and iSCSI functions for such OSes will be
+# managed on the "storage PFs" (see below).
+#
+
+# Some OS Drivers manage all application functions for all ports via PF4.
+# Thus we need to provide a large number of resources here. For Egress
+# Queues we need to account for both TX Queues as well as Free List Queues
+# (because the host is responsible for producing Free List Buffers for the
+# hardware to consume).
+#
+[function "0"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 28 # NVI_UNIFIED
+ niqflint = 170 # NFLIQ_UNIFIED + NLFIQ_WD
+ nethctrl = 96 # NETHCTRL_UNIFIED + NETHCTRL_WD
+ neq = 252 # NEQ_UNIFIED + NEQ_WD
+ nqpcq = 12288
+ nexactf = 40 # NMPSTCAM_UNIFIED
+ nrawf = 4
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nethofld = 1024 # number of user mode ethernet flow contexts
+ ncrypto_lookaside = 32
+ nclip = 32 # number of clip region entries
+ nfilter = 48 # number of filter region entries
+ nserver = 48 # number of server region entries
+ nhash = 12288 # number of hash region entries
+ nhpfilter = 64 # number of high priority filter region entries
+ #protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, ofld_sendpath
+ protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, nvme_tcp
+ tp_l2t = 3072
+ tp_ddp = 2
+ tp_ddp_iscsi = 2
+ tp_tls_key = 3
+ tp_tls_mxrxsize = 33792 # 32768 + 1024, governs max rx data, pm max xfer len, rx coalesce sizes
+ tp_stag = 2
+ tp_pbl = 5
+ tp_rq = 7
+ tp_rrq = 4
+ tp_srq = 128
+ nipsec_tunnel16 = 64 # in unit of 16
+ nipsec_transport16 = 191 # in unit of 16
+
+
+# We have FCoE and iSCSI storage functions on PF5 and PF6 each of which may
+# need to have Virtual Interfaces on each of the four ports with up to NCPUS
+# "Queue Sets" each.
+#
+[function "1"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nexactf = 16 # (NPORTS *(no of snmc grp + 1 hw mac) + 1 anmc grp)) rounded to 16.
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nserver = 16
+ nhash = 2048
+ tp_l2t = 1020
+ protocol = iscsi_initiator_fofld
+ tp_ddp_iscsi = 2
+ iscsi_ntask = 2048
+ iscsi_nsess = 2048
+ iscsi_nconn_per_session = 1
+ iscsi_ninitiator_instance = 64
+
+
+# The following function, 1023, is not an actual PCIE function but is used to
+# configure and reserve firmware internal resources that come from the global
+# resource pool.
+#
+[function "1023"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 0 # number of high priority filter region entries
+
+
+# For Virtual functions, we only allow NIC functionality and we only allow
+# access to one port (1 << PF). Note that because of limitations in the
+# Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL
+# and GTS registers, the number of Ingress and Egress Queues must be a power
+# of 2.
+#
+[function "0/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "1/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+
+# MPS features a 196608 bytes ingress buffer that is used for ingress buffering
+# for packets from the wire as well as the loopback path of the L2 switch. The
+# folling params control how the buffer memory is distributed and the L2 flow
+# control settings:
+#
+# bg_mem: %-age of mem to use for port/buffer group
+# lpbk_mem: %-age of port/bg mem to use for loopback
+# hwm: high watermark; bytes available when starting to send pause
+# frames (in units of 0.1 MTU)
+# lwm: low watermark; bytes remaining when sending 'unpause' frame
+# (in inuits of 0.1 MTU)
+# dwm: minimum delta between high and low watermark (in units of 100
+# Bytes)
+#
+[port "0"]
+ dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "1"]
+ dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[port "2"]
+ dcb = ppp, dcbx, b2b # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "3"]
+ dcb = ppp, dcbx, b2b
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[fini]
+ version = 0x1425001d
+ checksum = 0x22432d98
+
+# Total resources used by above allocations:
+# Virtual Interfaces: 104
+# Ingress Queues/w Free Lists and Interrupts: 526
+# Egress Queues: 702
+# MPS TCAM Entries: 336
+# MSI-X Vectors: 736
+# Virtual Functions: 64
diff --git a/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt b/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt
new file mode 100644
index 000000000000..0bca1c194af8
--- /dev/null
+++ b/sys/dev/cxgbe/firmware/t7fw_cfg_uwire.txt
@@ -0,0 +1,644 @@
+# Chelsio T6 Factory Default configuration file.
+#
+# Copyright (C) 2014-2015 Chelsio Communications. All rights reserved.
+#
+# DO NOT MODIFY THIS FILE UNDER ANY CIRCUMSTANCES. MODIFICATION OF THIS FILE
+# WILL RESULT IN A NON-FUNCTIONAL ADAPTER AND MAY RESULT IN PHYSICAL DAMAGE
+# TO ADAPTERS.
+
+
+# This file provides the default, power-on configuration for 2-port T6-based
+# adapters shipped from the factory. These defaults are designed to address
+# the needs of the vast majority of Terminator customers. The basic idea is to
+# have a default configuration which allows a customer to plug a Terminator
+# adapter in and have it work regardless of OS, driver or application except in
+# the most unusual and/or demanding customer applications.
+#
+# Many of the Terminator resources which are described by this configuration
+# are finite. This requires balancing the configuration/operation needs of
+# device drivers across OSes and a large number of customer application.
+#
+# Some of the more important resources to allocate and their constaints are:
+# 1. Virtual Interfaces: 256.
+# 2. Ingress Queues with Free Lists: 1024.
+# 3. Egress Queues: 128K.
+# 4. MSI-X Vectors: 1088.
+# 5. Multi-Port Support (MPS) TCAM: 336 entries to support MAC destination
+# address matching on Ingress Packets.
+#
+# Some of the important OS/Driver resource needs are:
+# 6. Some OS Drivers will manage all resources through a single Physical
+# Function (currently PF4 but it could be any Physical Function).
+# 7. Some OS Drivers will manage different ports and functions (NIC,
+# storage, etc.) on different Physical Functions. For example, NIC
+# functions for ports 0-1 on PF0-1, FCoE on PF4, iSCSI on PF5, etc.
+#
+# Some of the customer application needs which need to be accommodated:
+# 8. Some customers will want to support large CPU count systems with
+# good scaling. Thus, we'll need to accommodate a number of
+# Ingress Queues and MSI-X Vectors to allow up to some number of CPUs
+# to be involved per port and per application function. For example,
+# in the case where all ports and application functions will be
+# managed via a single Unified PF and we want to accommodate scaling up
+# to 8 CPUs, we would want:
+#
+# 2 ports *
+# 3 application functions (NIC, FCoE, iSCSI) per port *
+# 16 Ingress Queue/MSI-X Vectors per application function
+#
+# for a total of 96 Ingress Queues and MSI-X Vectors on the Unified PF.
+# (Plus a few for Firmware Event Queues, etc.)
+#
+# 9. Some customers will want to use PCI-E SR-IOV Capability to allow Virtual
+# Machines to directly access T6 functionality via SR-IOV Virtual Functions
+# and "PCI Device Passthrough" -- this is especially true for the NIC
+# application functionality.
+#
+
+
+# Global configuration settings.
+#
+[global]
+ rss_glb_config_mode = basicvirtual
+ rss_glb_config_options = tnlmapen,hashtoeplitz,tnlalllkp
+
+ # PL_TIMEOUT register
+ pl_timeout_value = 200 # the timeout value in units of us
+
+ # The following Scatter Gather Engine (SGE) settings assume a 4KB Host
+ # Page Size and a 64B L1 Cache Line Size. It programs the
+ # EgrStatusPageSize and IngPadBoundary to 64B and the PktShift to 2.
+ # If a Master PF Driver finds itself on a machine with different
+ # parameters, then the Master PF Driver is responsible for initializing
+ # these parameters to appropriate values.
+ #
+ # Notes:
+ # 1. The Free List Buffer Sizes below are raw and the firmware will
+ # round them up to the Ingress Padding Boundary.
+ # 2. The SGE Timer Values below are expressed below in microseconds.
+ # The firmware will convert these values to Core Clock Ticks when
+ # it processes the configuration parameters.
+ #
+ reg[0x1008] = 0x40810/0x21c70 # SGE_CONTROL
+ reg[0x100c] = 0x22222222 # SGE_HOST_PAGE_SIZE
+ reg[0x10a0] = 0x01040810 # SGE_INGRESS_RX_THRESHOLD
+ reg[0x1044] = 4096 # SGE_FL_BUFFER_SIZE0
+ reg[0x1048] = 65536 # SGE_FL_BUFFER_SIZE1
+ reg[0x104c] = 1536 # SGE_FL_BUFFER_SIZE2
+ reg[0x1050] = 9024 # SGE_FL_BUFFER_SIZE3
+ reg[0x1054] = 9216 # SGE_FL_BUFFER_SIZE4
+ reg[0x1058] = 2048 # SGE_FL_BUFFER_SIZE5
+ reg[0x105c] = 128 # SGE_FL_BUFFER_SIZE6
+ reg[0x1060] = 8192 # SGE_FL_BUFFER_SIZE7
+ reg[0x1064] = 16384 # SGE_FL_BUFFER_SIZE8
+ reg[0x10a4] = 0xa000a000/0xf000f000 # SGE_DBFIFO_STATUS
+ reg[0x10a8] = 0x402000/0x402000 # SGE_DOORBELL_CONTROL
+ sge_timer_value = 5, 10, 20, 50, 100, 200 # SGE_TIMER_VALUE* in usecs
+ reg[0x10c4] = 0x20000000/0x20000000 # GK_CONTROL, enable 5th thread
+ reg[0x173c] = 0x2/0x2
+
+ reg[0x1750] = 0x01000000/0x03c00000 # RDMA_INV_Handling = 1
+ # terminate_status_en = 0
+ # DISABLE = 0
+
+ #DBQ Timer duration = 1 cclk cycle duration * (sge_dbq_timertick+1) * sge_dbq_timer
+ #SGE DBQ tick value. All timers are multiple of this value
+ sge_dbq_timertick = 50 #in usecs
+ sge_dbq_timer = 1, 2, 4, 6, 8, 10, 12, 16
+
+ #CIM_QUEUE_FEATURE_DISABLE.obq_eom_enable bit needs to be set to 1 for CmdMore handling support
+ reg[0x7c4c] = 0x20/0x20
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE
+ reg[0x7d04] = 0x00010000/0x00010000
+
+ reg[0x7dc0] = 0x0e2f8849 # TP_SHIFT_CNT
+
+ reg[0x46004] = 0x3/0x3 #Crypto core reset
+
+ #Tick granularities in kbps
+ tsch_ticks = 100000, 10000, 1000, 10
+
+ # TP_VLAN_PRI_MAP to select filter tuples and enable ServerSram
+ # filter control: compact, fcoemask
+ # server sram : srvrsram
+ # filter tuples : fragmentation, mpshittype, macmatch, ethertype,
+ # protocol, tos, vlan, vnic_id, port, fcoe
+ # valid filterModes are described the Terminator 5 Data Book
+ filterMode = fcoemask, srvrsram, ipsec, rocev2, fragmentation, mpshittype, protocol, vlan, port, fcoe
+
+ # filter tuples enforced in LE active region (equal to or subset of filterMode)
+ filterMask = protocol, ipsec, rocev2, fcoe
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP RX payload
+ tp_pmrx = 30
+
+ # TP RX payload page size
+ tp_pmrx_pagesize = 64K
+
+ # Percentage of dynamic memory (in either the EDRAM or external MEM)
+ # to use for TP TX payload
+ tp_pmtx = 50
+
+ # TP TX payload page size
+ tp_pmtx_pagesize = 64K
+
+ # TP OFLD MTUs
+ tp_mtus = 88, 256, 512, 576, 808, 1024, 1280, 1488, 1500, 2002, 2048, 4096, 4352, 8192, 9000, 9600
+
+ # enable TP_OUT_CONFIG.IPIDSPLITMODE and CRXPKTENC
+ reg[0x7d04] = 0x00010008/0x00010008
+
+ # TP_GLOBAL_CONFIG
+ reg[0x7d08] = 0x00000800/0x00000800 # set IssFromCplEnable
+
+ # TP_PC_CONFIG
+ reg[0x7d48] = 0x00000000/0x00000400 # clear EnableFLMError
+
+ # TP_PARA_REG0
+ reg[0x7d60] = 0x06000000/0x07000000 # set InitCWND to 6
+
+ # ULPRX iSCSI Page Sizes
+ reg[0x19168] = 0x04020100 # 64K, 16K, 8K and 4K
+
+ # LE_DB_CONFIG
+ reg[0x19c04] = 0x00400000/0x00440000 # LE Server SRAM Enable,
+ # LE IPv4 compression disabled
+ # LE_DB_HASH_CONFIG
+ reg[0x19c28] = 0x00800000/0x01f00000 # LE Hash bucket size 8,
+
+ # ULP_TX_CONFIG
+ reg[0x8dc0] = 0x00000104/0x02000104 # Enable ITT on PI err
+ # Enable more error msg for ...
+ # TPT error.
+ # Err2uP = 0
+
+ #ULP_RX_CTL1
+ reg[0x19330] = 0x000000f0/0x000000f0 # RDMA_Invld_Msg_Dis = 3
+ # ROCE_Invld_Msg_Dis = 3
+
+ #Enable iscsi completion moderation feature, disable rdma invlidate in ulptx
+ reg[0x1925c] = 0x000041c0/0x000031d0 # Enable offset decrement after
+ # PI extraction and before DDP.
+ # ulp insert pi source info in
+ # DIF.
+ # Enable iscsi hdr cmd mode.
+ # iscsi force cmd mode.
+ # Enable iscsi cmp mode.
+ # terminate_status_en = 0
+
+ #ULP_RX_CQE_GEN_EN
+ reg[0x19250] = 0x0/0x3 # Termimate_msg = 0
+ # Terminate_with_err = 0
+
+ gc_disable = 3 # 3 - disable gc for hma/mc1 and mc0,
+ # 2 - disable gc for mc1/hma enable mc0,
+ # 1 - enable gc for mc1/hma disable mc0,
+ # 0 - enable gc for mc1/hma and for mc0,
+ # default gc enabled.
+
+ # HMA configuration (uncomment following lines to enable HMA)
+ hma_size = 92 # Size (in MBs) of host memory expected
+ hma_regions = iscsi,rrq,tls,ddp,pmrx,stag,pbl,rq # What all regions to place in host memory
+
+ #mc[0]=0
+ #mc[1]=0
+
+# Some "definitions" to make the rest of this a bit more readable. We support
+# 4 ports, 3 functions (NIC, FCoE and iSCSI), scaling up to 8 "CPU Queue Sets"
+# per function per port ...
+#
+# NMSIX = 1088 # available MSI-X Vectors
+# NVI = 256 # available Virtual Interfaces
+# NMPSTCAM = 336 # MPS TCAM entries
+#
+# NPORTS = 2 # ports
+# NCPUS = 16 # CPUs we want to support scalably
+# NFUNCS = 3 # functions per port (NIC, FCoE, iSCSI)
+
+# Breakdown of Virtual Interface/Queue/Interrupt resources for the "Unified
+# PF" which many OS Drivers will use to manage most or all functions.
+#
+# Each Ingress Queue can use one MSI-X interrupt but some Ingress Queues can
+# use Forwarded Interrupt Ingress Queues. For these latter, an Ingress Queue
+# would be created and the Queue ID of a Forwarded Interrupt Ingress Queue
+# will be specified as the "Ingress Queue Asynchronous Destination Index."
+# Thus, the number of MSI-X Vectors assigned to the Unified PF will be less
+# than or equal to the number of Ingress Queues ...
+#
+# NVI_NIC = 4 # NIC access to NPORTS
+# NFLIQ_NIC = 32 # NIC Ingress Queues with Free Lists
+# NETHCTRL_NIC = 32 # NIC Ethernet Control/TX Queues
+# NEQ_NIC = 64 # NIC Egress Queues (FL, ETHCTRL/TX)
+# NMPSTCAM_NIC = 16 # NIC MPS TCAM Entries (NPORTS*4)
+# NMSIX_NIC = 32 # NIC MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_OFLD = 0 # Offload uses NIC function to access ports
+# NFLIQ_OFLD = 16 # Offload Ingress Queues with Free Lists
+# NETHCTRL_OFLD = 0 # Offload Ethernet Control/TX Queues
+# NEQ_OFLD = 16 # Offload Egress Queues (FL)
+# NMPSTCAM_OFLD = 0 # Offload MPS TCAM Entries (uses NIC's)
+# NMSIX_OFLD = 16 # Offload MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_RDMA = 0 # RDMA uses NIC function to access ports
+# NFLIQ_RDMA = 4 # RDMA Ingress Queues with Free Lists
+# NETHCTRL_RDMA = 0 # RDMA Ethernet Control/TX Queues
+# NEQ_RDMA = 4 # RDMA Egress Queues (FL)
+# NMPSTCAM_RDMA = 0 # RDMA MPS TCAM Entries (uses NIC's)
+# NMSIX_RDMA = 4 # RDMA MSI-X Interrupt Vectors (FLIQ)
+#
+# NEQ_WD = 128 # Wire Direct TX Queues and FLs
+# NETHCTRL_WD = 64 # Wire Direct TX Queues
+# NFLIQ_WD = 64 ` # Wire Direct Ingress Queues with Free Lists
+#
+# NVI_ISCSI = 4 # ISCSI access to NPORTS
+# NFLIQ_ISCSI = 4 # ISCSI Ingress Queues with Free Lists
+# NETHCTRL_ISCSI = 0 # ISCSI Ethernet Control/TX Queues
+# NEQ_ISCSI = 4 # ISCSI Egress Queues (FL)
+# NMPSTCAM_ISCSI = 4 # ISCSI MPS TCAM Entries (NPORTS)
+# NMSIX_ISCSI = 4 # ISCSI MSI-X Interrupt Vectors (FLIQ)
+#
+# NVI_FCOE = 4 # FCOE access to NPORTS
+# NFLIQ_FCOE = 34 # FCOE Ingress Queues with Free Lists
+# NETHCTRL_FCOE = 32 # FCOE Ethernet Control/TX Queues
+# NEQ_FCOE = 66 # FCOE Egress Queues (FL)
+# NMPSTCAM_FCOE = 32 # FCOE MPS TCAM Entries (NPORTS)
+# NMSIX_FCOE = 34 # FCOE MSI-X Interrupt Vectors (FLIQ)
+
+# Two extra Ingress Queues per function for Firmware Events and Forwarded
+# Interrupts, and two extra interrupts per function for Firmware Events (or a
+# Forwarded Interrupt Queue) and General Interrupts per function.
+#
+# NFLIQ_EXTRA = 6 # "extra" Ingress Queues 2*NFUNCS (Firmware and
+# # Forwarded Interrupts
+# NMSIX_EXTRA = 6 # extra interrupts 2*NFUNCS (Firmware and
+# # General Interrupts
+
+# Microsoft HyperV resources. The HyperV Virtual Ingress Queues will have
+# their interrupts forwarded to another set of Forwarded Interrupt Queues.
+#
+# NVI_HYPERV = 16 # VMs we want to support
+# NVIIQ_HYPERV = 2 # Virtual Ingress Queues with Free Lists per VM
+# NFLIQ_HYPERV = 40 # VIQs + NCPUS Forwarded Interrupt Queues
+# NEQ_HYPERV = 32 # VIQs Free Lists
+# NMPSTCAM_HYPERV = 16 # MPS TCAM Entries (NVI_HYPERV)
+# NMSIX_HYPERV = 8 # NCPUS Forwarded Interrupt Queues
+
+# Adding all of the above Unified PF resource needs together: (NIC + OFLD +
+# RDMA + ISCSI + FCOE + EXTRA + HYPERV)
+#
+# NVI_UNIFIED = 28
+# NFLIQ_UNIFIED = 106
+# NETHCTRL_UNIFIED = 32
+# NEQ_UNIFIED = 124
+# NMPSTCAM_UNIFIED = 40
+#
+# The sum of all the MSI-X resources above is 74 MSI-X Vectors but we'll round
+# that up to 128 to make sure the Unified PF doesn't run out of resources.
+#
+# NMSIX_UNIFIED = 128
+#
+# The Storage PFs could need up to NPORTS*NCPUS + NMSIX_EXTRA MSI-X Vectors
+# which is 34 but they're probably safe with 32.
+#
+# NMSIX_STORAGE = 32
+
+# Note: The UnifiedPF is PF4 which doesn't have any Virtual Functions
+# associated with it. Thus, the MSI-X Vector allocations we give to the
+# UnifiedPF aren't inherited by any Virtual Functions. As a result we can
+# provision many more Virtual Functions than we can if the UnifiedPF were
+# one of PF0-1.
+#
+
+# All of the below PCI-E parameters are actually stored in various *_init.txt
+# files. We include them below essentially as comments.
+#
+# For PF0-1 we assign 8 vectors each for NIC Ingress Queues of the associated
+# ports 0-1.
+#
+# For PF4, the Unified PF, we give it an MSI-X Table Size as outlined above.
+#
+# For PF5-6 we assign enough MSI-X Vectors to support FCoE and iSCSI
+# storage applications across all four possible ports.
+#
+# Additionally, since the UnifiedPF isn't one of the per-port Physical
+# Functions, we give the UnifiedPF and the PF0-1 Physical Functions
+# different PCI Device IDs which will allow Unified and Per-Port Drivers
+# to directly select the type of Physical Function to which they wish to be
+# attached.
+#
+# Note that the actual values used for the PCI-E Intelectual Property will be
+# 1 less than those below since that's the way it "counts" things. For
+# readability, we use the number we actually mean ...
+#
+# PF0_INT = 8 # NCPUS
+# PF1_INT = 8 # NCPUS
+# PF0_3_INT = 32 # PF0_INT + PF1_INT + PF2_INT + PF3_INT
+#
+# PF4_INT = 128 # NMSIX_UNIFIED
+# PF5_INT = 32 # NMSIX_STORAGE
+# PF6_INT = 32 # NMSIX_STORAGE
+# PF7_INT = 0 # Nothing Assigned
+# PF4_7_INT = 192 # PF4_INT + PF5_INT + PF6_INT + PF7_INT
+#
+# PF0_7_INT = 224 # PF0_3_INT + PF4_7_INT
+#
+# With the above we can get 17 VFs/PF0-3 (limited by 336 MPS TCAM entries)
+# but we'll lower that to 16 to make our total 64 and a nice power of 2 ...
+#
+# NVF = 16
+
+
+# For those OSes which manage different ports on different PFs, we need
+# only enough resources to support a single port's NIC application functions
+# on PF0-3. The below assumes that we're only doing NIC with NCPUS "Queue
+# Sets" for ports 0-3. The FCoE and iSCSI functions for such OSes will be
+# managed on the "storage PFs" (see below).
+#
+
+[function "0"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port
+
+
+[function "1"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port
+
+[function "2"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x4 # access to only one port
+ pmask = 0x1 # access to only one port
+
+[function "3"]
+ nvf = 16 # NVF on this function
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 1 # 1 port
+ niqflint = 8 # NCPUS "Queue Sets"
+ nethctrl = 8 # NCPUS "Queue Sets"
+ neq = 16 # niqflint + nethctrl Egress Queues
+ nexactf = 8 # number of exact MPSTCAM MAC filters
+ cmask = all # access to all channels
+ #pmask = 0x2 # access to only one port
+
+# Some OS Drivers manage all application functions for all ports via PF4.
+# Thus we need to provide a large number of resources here. For Egress
+# Queues we need to account for both TX Queues as well as Free List Queues
+# (because the host is responsible for producing Free List Buffers for the
+# hardware to consume).
+#
+
+[function "4"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 28 # NVI_UNIFIED
+ niqflint = 170 # NFLIQ_UNIFIED + NLFIQ_WD
+ nethctrl = 224 # NETHCTRL_UNIFIED + NETHCTRL_WD
+ neq = 252 # NEQ_UNIFIED + NEQ_WD
+ nqpcq = 12288
+ nexactf = 40 # NMPSTCAM_UNIFIED
+ nrawf = 4
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nethofld = 1024 # number of user mode ethernet flow contexts
+ ncrypto_lookaside = 32
+ nclip = 320 # number of clip region entries
+ nfilter = 480 # number of filter region entries
+ nserver = 480 # number of server region entries
+ nhash = 12288 # number of hash region entries
+ nhpfilter = 64 # number of high priority filter region entries
+ #protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, ofld_sendpath
+ protocol = nic_vm, ofld, rddp, rdmac, iscsi_initiator_pdu, iscsi_target_pdu, iscsi_t10dif, tlskeys, crypto_lookaside, ipsec_inline, rocev2, nic_hashfilter, nvme_tcp
+ tp_l2t = 3072
+ tp_ddp = 2
+ tp_ddp_iscsi = 2
+ tp_tls_key = 3
+ tp_tls_mxrxsize = 33792 # 32768 + 1024, governs max rx data, pm max xfer len, rx coalesce sizes
+ tp_stag = 2
+ tp_pbl = 5
+ tp_rq = 7
+ tp_rrq = 4
+ tp_srq = 128
+ nipsec_tunnel16 = 64 # in unit of 16
+ nipsec_transport16 = 191 # in unit of 16
+
+
+# We have FCoE and iSCSI storage functions on PF5 and PF6 each of which may
+# need to have Virtual Interfaces on each of the four ports with up to NCPUS
+# "Queue Sets" each.
+#
+[function "5"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nexactf = 16 # (NPORTS *(no of snmc grp + 1 hw mac) + 1 anmc grp)) rounded to 16.
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nserver = 16
+ nhash = 1536
+ tp_l2t = 508
+ protocol = iscsi_initiator_fofld
+ tp_ddp_iscsi = 2
+ iscsi_ntask = 2048
+ iscsi_nsess = 2048
+ iscsi_nconn_per_session = 1
+ iscsi_ninitiator_instance = 64
+
+[function "6"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NPORTS
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 66 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX) + 2 (EXTRA)
+ nexactf = 32 # NPORTS + adding 28 exact entries for FCoE
+ # which is OK since < MIN(SUM PF0..3, PF4)
+ # and we never load PF0..3 and PF4 concurrently
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nhash = 1536
+ tp_l2t = 4
+ protocol = fcoe_initiator
+ tp_ddp = 1
+ fcoe_nfcf = 16
+ fcoe_nvnp = 32
+ fcoe_nssn = 1024
+
+# Following function 7 is used by embedded ARM to communicate to
+# the firmware.
+[function "7"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 16 # number of high priority filter region entries
+ niqflint = 34 # NPORTS*NCPUS + NMSIX_EXTRA
+ nethctrl = 32 # NPORTS*NCPUS
+ neq = 64 # NPORTS*NCPUS * 2 (FL, ETHCTRL/TX)
+ nserver = 16
+ nhash = 1024
+ tp_l2t = 512
+ protocol = nic_vm, ofld, rddp, rdmac, tlskeys, ipsec_inline, rocev2, nvme_tcp
+
+# The following function, 1023, is not an actual PCIE function but is used to
+# configure and reserve firmware internal resources that come from the global
+# resource pool.
+#
+[function "1023"]
+ wx_caps = all # write/execute permissions for all commands
+ r_caps = all # read permissions for all commands
+ nvi = 4 # NVI_UNIFIED
+ cmask = all # access to all channels
+ pmask = all # access to all four ports ...
+ nexactf = 8 # NPORTS + DCBX +
+ nfilter = 16 # number of filter region entries
+ #nhpfilter = 0 # number of high priority filter region entries
+
+
+# For Virtual functions, we only allow NIC functionality and we only allow
+# access to one port (1 << PF). Note that because of limitations in the
+# Scatter Gather Engine (SGE) hardware which checks writes to VF KDOORBELL
+# and GTS registers, the number of Ingress and Egress Queues must be a power
+# of 2.
+#
+[function "0/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "1/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+[function "2/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x1 # access to only one port ...
+
+
+[function "3/*"] # NVF
+ wx_caps = 0x82 # DMAQ | VF
+ r_caps = 0x86 # DMAQ | VF | PORT
+ nvi = 1 # 1 port
+ niqflint = 6 # 2 "Queue Sets" + NXIQ
+ nethctrl = 4 # 2 "Queue Sets"
+ neq = 8 # 2 "Queue Sets" * 2
+ nexactf = 4
+ cmask = all # access to all channels
+ pmask = 0x2 # access to only one port ...
+
+# MPS features a 196608 bytes ingress buffer that is used for ingress buffering
+# for packets from the wire as well as the loopback path of the L2 switch. The
+# folling params control how the buffer memory is distributed and the L2 flow
+# control settings:
+#
+# bg_mem: %-age of mem to use for port/buffer group
+# lpbk_mem: %-age of port/bg mem to use for loopback
+# hwm: high watermark; bytes available when starting to send pause
+# frames (in units of 0.1 MTU)
+# lwm: low watermark; bytes remaining when sending 'unpause' frame
+# (in inuits of 0.1 MTU)
+# dwm: minimum delta between high and low watermark (in units of 100
+# Bytes)
+#
+[port "0"]
+ dcb = ppp, dcbx # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "1"]
+ dcb = ppp, dcbx
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[port "2"]
+ dcb = ppp, dcbx # configure for DCB PPP and enable DCBX offload
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+
+[port "3"]
+ dcb = ppp, dcbx
+ hwm = 30
+ lwm = 15
+ dwm = 30
+ dcb_app_tlv[0] = 0x8906, ethertype, 3
+ dcb_app_tlv[1] = 0x8914, ethertype, 3
+ dcb_app_tlv[2] = 3260, socketnum, 5
+
+[fini]
+ version = 0x1425001d
+ checksum = 0x5cab62d4
+
+# Total resources used by above allocations:
+# Virtual Interfaces: 104
+# Ingress Queues/w Free Lists and Interrupts: 526
+# Egress Queues: 702
+# MPS TCAM Entries: 336
+# MSI-X Vectors: 736
+# Virtual Functions: 64
diff --git a/sys/dev/cxgbe/iw_cxgbe/cm.c b/sys/dev/cxgbe/iw_cxgbe/cm.c
index e0e48fdff9ba..d291eeeb4f40 100644
--- a/sys/dev/cxgbe/iw_cxgbe/cm.c
+++ b/sys/dev/cxgbe/iw_cxgbe/cm.c
@@ -1080,7 +1080,7 @@ c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
* Wake up any threads waiting in rdma_init()/rdma_fini(),
* with locks held.
*/
- if (so->so_error || (ep->com.dev->rdev.flags & T4_FATAL_ERROR))
+ if (so->so_error || c4iw_stopped(&ep->com.dev->rdev))
c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
add_ep_to_req_list(ep, C4IW_EVENT_SOCKET);
@@ -2602,6 +2602,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
+ if (__predict_false(c4iw_stopped(&dev->rdev)))
+ return -EIO;
if ((conn_param->ord > c4iw_max_read_depth) ||
(conn_param->ird > c4iw_max_read_depth)) {
@@ -2655,8 +2657,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
- err = EHOSTUNREACH;
- return err;
+ return -EHOSTUNREACH;
}
if (!(if_getcapenable(nh_ifp) & IFCAP_TOE) ||
@@ -2706,11 +2707,10 @@ c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
struct listen_port_info *port_info = NULL;
int rc = 0;
- CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %s", __func__, cm_id,
+ CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %d", __func__, cm_id,
backlog);
- if (c4iw_fatal_error(&dev->rdev)) {
- CTR2(KTR_IW_CXGBE, "%s: cm_id %p, fatal error", __func__,
- cm_id);
+ if (c4iw_stopped(&dev->rdev)) {
+ CTR2(KTR_IW_CXGBE, "%s: cm_id %p, stopped", __func__, cm_id);
return -EIO;
}
lep = alloc_ep(sizeof(*lep), GFP_KERNEL);
@@ -2821,8 +2821,8 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
rdev = &ep->com.dev->rdev;
- if (c4iw_fatal_error(rdev)) {
- CTR3(KTR_IW_CXGBE, "%s:ced1 fatal error %p %s", __func__, ep,
+ if (c4iw_stopped(rdev)) {
+ CTR3(KTR_IW_CXGBE, "%s:ced1 stopped %p %s", __func__, ep,
states[ep->com.state]);
if (ep->com.state != DEAD) {
send_abort(ep);
diff --git a/sys/dev/cxgbe/iw_cxgbe/cq.c b/sys/dev/cxgbe/iw_cxgbe/cq.c
index 9339d083cae3..197f2bcf8af0 100644
--- a/sys/dev/cxgbe/iw_cxgbe/cq.c
+++ b/sys/dev/cxgbe/iw_cxgbe/cq.c
@@ -106,6 +106,8 @@ create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
struct wrqe *wr;
u64 cq_bar2_qoffset = 0;
+ if (__predict_false(c4iw_stopped(rdev)))
+ return -EIO;
cq->cqid = c4iw_get_cqid(rdev, uctx);
if (!cq->cqid) {
ret = -ENOMEM;
@@ -1037,6 +1039,8 @@ int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
unsigned long flag;
chp = to_c4iw_cq(ibcq);
+ if (__predict_false(c4iw_stopped(chp->cq.rdev)))
+ return -EIO;
spin_lock_irqsave(&chp->lock, flag);
t4_arm_cq(&chp->cq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
diff --git a/sys/dev/cxgbe/iw_cxgbe/device.c b/sys/dev/cxgbe/iw_cxgbe/device.c
index fa886766e383..4610f91e96ac 100644
--- a/sys/dev/cxgbe/iw_cxgbe/device.c
+++ b/sys/dev/cxgbe/iw_cxgbe/device.c
@@ -132,26 +132,21 @@ c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->stats.rqt.total = sc->vres.rq.size;
rdev->stats.qid.total = sc->vres.qp.size;
- rc = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
+ rc = c4iw_init_resource(rdev, T4_MAX_NUM_PD);
if (rc) {
device_printf(sc->dev, "error %d initializing resources\n", rc);
goto err1;
}
- rc = c4iw_pblpool_create(rdev);
- if (rc) {
- device_printf(sc->dev, "error %d initializing pbl pool\n", rc);
- goto err2;
- }
rc = c4iw_rqtpool_create(rdev);
if (rc) {
device_printf(sc->dev, "error %d initializing rqt pool\n", rc);
- goto err3;
+ goto err2;
}
rdev->status_page = (struct t4_dev_status_page *)
__get_free_page(GFP_KERNEL);
if (!rdev->status_page) {
rc = -ENOMEM;
- goto err4;
+ goto err3;
}
rdev->status_page->qp_start = sc->vres.qp.start;
rdev->status_page->qp_size = sc->vres.qp.size;
@@ -168,15 +163,13 @@ c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
if (!rdev->free_workq) {
rc = -ENOMEM;
- goto err5;
+ goto err4;
}
return (0);
-err5:
- free_page((unsigned long)rdev->status_page);
err4:
- c4iw_rqtpool_destroy(rdev);
+ free_page((unsigned long)rdev->status_page);
err3:
- c4iw_pblpool_destroy(rdev);
+ c4iw_rqtpool_destroy(rdev);
err2:
c4iw_destroy_resource(&rdev->resource);
err1:
@@ -186,7 +179,6 @@ err1:
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
free_page((unsigned long)rdev->status_page);
- c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev);
c4iw_destroy_resource(&rdev->resource);
}
@@ -259,13 +251,14 @@ static int c4iw_mod_load(void);
static int c4iw_mod_unload(void);
static int c4iw_activate(struct adapter *);
static int c4iw_deactivate(struct adapter *);
-static void c4iw_async_event(struct adapter *);
+static int c4iw_stop(struct adapter *);
+static int c4iw_restart(struct adapter *);
static struct uld_info c4iw_uld_info = {
- .uld_id = ULD_IWARP,
- .activate = c4iw_activate,
- .deactivate = c4iw_deactivate,
- .async_event = c4iw_async_event,
+ .uld_activate = c4iw_activate,
+ .uld_deactivate = c4iw_deactivate,
+ .uld_stop = c4iw_stop,
+ .uld_restart = c4iw_restart,
};
static int
@@ -283,7 +276,7 @@ c4iw_activate(struct adapter *sc)
}
if (uld_active(sc, ULD_IWARP)) {
- KASSERT(0, ("%s: RDMA already eanbled on sc %p", __func__, sc));
+ KASSERT(0, ("%s: RDMA already enabled on sc %p", __func__, sc));
return (0);
}
@@ -326,21 +319,34 @@ c4iw_deactivate(struct adapter *sc)
return (0);
}
-static void
-c4iw_async_event(struct adapter *sc)
+static int
+c4iw_stop(struct adapter *sc)
{
struct c4iw_dev *iwsc = sc->iwarp_softc;
if (iwsc) {
struct ib_event event = {0};
- device_printf(sc->dev,
- "iWARP driver received FATAL ERROR event.\n");
- iwsc->rdev.flags |= T4_FATAL_ERROR;
+ device_printf(sc->dev, "iWARP driver stopped.\n");
+ iwsc->rdev.flags |= T4_IW_STOPPED;
event.event = IB_EVENT_DEVICE_FATAL;
event.device = &iwsc->ibdev;
ib_dispatch_event(&event);
}
+
+ return (0);
+}
+
+static int
+c4iw_restart(struct adapter *sc)
+{
+ struct c4iw_dev *iwsc = sc->iwarp_softc;
+
+ if (iwsc) {
+ device_printf(sc->dev, "iWARP driver restarted.\n");
+ iwsc->rdev.flags &= ~T4_IW_STOPPED;
+ }
+ return (0);
}
static void
@@ -379,7 +385,7 @@ c4iw_mod_load(void)
if (rc != 0)
return (rc);
- rc = t4_register_uld(&c4iw_uld_info);
+ rc = t4_register_uld(&c4iw_uld_info, ULD_IWARP);
if (rc != 0) {
c4iw_cm_term();
return (rc);
@@ -398,7 +404,7 @@ c4iw_mod_unload(void)
c4iw_cm_term();
- if (t4_unregister_uld(&c4iw_uld_info) == EBUSY)
+ if (t4_unregister_uld(&c4iw_uld_info, ULD_IWARP) == EBUSY)
return (EBUSY);
return (0);
diff --git a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
index 5715b7e53863..47ce10562c66 100644
--- a/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
+++ b/sys/dev/cxgbe/iw_cxgbe/iw_cxgbe.h
@@ -99,7 +99,6 @@ struct c4iw_id_table {
};
struct c4iw_resource {
- struct c4iw_id_table tpt_table;
struct c4iw_id_table qid_table;
struct c4iw_id_table pdid_table;
};
@@ -116,7 +115,7 @@ struct c4iw_dev_ucontext {
};
enum c4iw_rdev_flags {
- T4_FATAL_ERROR = (1<<0),
+ T4_IW_STOPPED = (1<<0),
T4_STATUS_PAGE_DISABLED = (1<<1),
};
@@ -167,9 +166,9 @@ struct c4iw_rdev {
struct workqueue_struct *free_workq;
};
-static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
+static inline int c4iw_stopped(struct c4iw_rdev *rdev)
{
- return rdev->flags & T4_FATAL_ERROR;
+ return rdev->flags & T4_IW_STOPPED;
}
static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
@@ -214,7 +213,7 @@ c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp,
int timedout = 0;
struct timeval t1, t2;
- if (c4iw_fatal_error(rdev)) {
+ if (c4iw_stopped(rdev)) {
wr_waitp->ret = -EIO;
goto out;
}
@@ -240,7 +239,7 @@ c4iw_wait_for_reply(struct c4iw_rdev *rdev, struct c4iw_wr_wait *wr_waitp,
"seconds - tid %u qpid %u\n", func,
device_get_nameunit(sc->dev), t2.tv_sec, t2.tv_usec,
hwtid, qpid);
- if (c4iw_fatal_error(rdev)) {
+ if (c4iw_stopped(rdev)) {
wr_waitp->ret = -EIO;
break;
}
@@ -904,11 +903,9 @@ int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
struct l2t_entry *l2t);
u32 c4iw_get_resource(struct c4iw_id_table *id_table);
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry);
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid);
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_pdid);
int c4iw_init_ctrl_qp(struct c4iw_rdev *rdev);
-int c4iw_pblpool_create(struct c4iw_rdev *rdev);
int c4iw_rqtpool_create(struct c4iw_rdev *rdev);
-void c4iw_pblpool_destroy(struct c4iw_rdev *rdev);
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev);
void c4iw_destroy_resource(struct c4iw_resource *rscp);
int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
diff --git a/sys/dev/cxgbe/iw_cxgbe/mem.c b/sys/dev/cxgbe/iw_cxgbe/mem.c
index 5b8c7391514f..ae0aa0edc17a 100644
--- a/sys/dev/cxgbe/iw_cxgbe/mem.c
+++ b/sys/dev/cxgbe/iw_cxgbe/mem.c
@@ -56,46 +56,23 @@ mr_exceeds_hw_limits(struct c4iw_dev *dev, u64 length)
static int
_c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, u32 len,
- void *data, int wait)
+ dma_addr_t data, int wait)
{
struct adapter *sc = rdev->adap;
- struct ulp_mem_io *ulpmc;
- struct ulptx_sgl *sgl;
u8 wr_len;
int ret = 0;
struct c4iw_wr_wait wr_wait;
struct wrqe *wr;
- addr &= 0x7FFFFFF;
-
if (wait)
c4iw_init_wr_wait(&wr_wait);
- wr_len = roundup(sizeof *ulpmc + sizeof *sgl, 16);
+ wr_len = T4_WRITE_MEM_DMA_LEN;
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
if (wr == NULL)
return -ENOMEM;
- ulpmc = wrtod(wr);
-
- memset(ulpmc, 0, wr_len);
- INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
- (wait ? F_FW_WR_COMPL : 0));
- ulpmc->wr.wr_lo = wait ? (u64)(unsigned long)&wr_wait : 0;
- ulpmc->wr.wr_mid = cpu_to_be32(V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
- ulpmc->cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
- V_T5_ULP_MEMIO_ORDER(1) |
- V_T5_ULP_MEMIO_FID(sc->sge.ofld_rxq[0].iq.abs_id));
- ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(len>>5));
- ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr), 16));
- ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr));
-
- sgl = (struct ulptx_sgl *)(ulpmc + 1);
- sgl->cmd_nsge = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
- V_ULPTX_NSGE(1));
- sgl->len0 = cpu_to_be32(len);
- sgl->addr0 = cpu_to_be64((u64)data);
-
+ t4_write_mem_dma_wr(sc, wrtod(wr), wr_len, 0, addr, len, data,
+ wait ? (u64)(unsigned long)&wr_wait : 0);
t4_wrq_tx(sc, wr);
if (wait)
@@ -108,70 +85,32 @@ static int
_c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
{
struct adapter *sc = rdev->adap;
- struct ulp_mem_io *ulpmc;
- struct ulptx_idata *ulpsc;
- u8 wr_len, *to_dp, *from_dp;
+ u8 wr_len, *from_dp;
int copy_len, num_wqe, i, ret = 0;
struct c4iw_wr_wait wr_wait;
struct wrqe *wr;
- u32 cmd;
-
- cmd = cpu_to_be32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
- cmd |= cpu_to_be32(F_T5_ULP_MEMIO_IMM);
-
- addr &= 0x7FFFFFF;
CTR3(KTR_IW_CXGBE, "%s addr 0x%x len %u", __func__, addr, len);
- num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
c4iw_init_wr_wait(&wr_wait);
+ num_wqe = DIV_ROUND_UP(len, T4_MAX_INLINE_SIZE);
+ from_dp = data;
for (i = 0; i < num_wqe; i++) {
-
- copy_len = min(len, C4IW_MAX_INLINE_SIZE);
- wr_len = roundup(sizeof *ulpmc + sizeof *ulpsc +
- roundup(copy_len, T4_ULPTX_MIN_IO), 16);
+ copy_len = min(len, T4_MAX_INLINE_SIZE);
+ wr_len = T4_WRITE_MEM_INLINE_LEN(copy_len);
wr = alloc_wrqe(wr_len, &sc->sge.ctrlq[0]);
if (wr == NULL)
return -ENOMEM;
- ulpmc = wrtod(wr);
-
- memset(ulpmc, 0, wr_len);
- INIT_ULPTX_WR(ulpmc, wr_len, 0, 0);
-
- if (i == (num_wqe-1)) {
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR) |
- F_FW_WR_COMPL);
- ulpmc->wr.wr_lo =
- (__force __be64)(unsigned long) &wr_wait;
- } else
- ulpmc->wr.wr_hi = cpu_to_be32(V_FW_WR_OP(FW_ULPTX_WR));
- ulpmc->wr.wr_mid = cpu_to_be32(
- V_FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16)));
-
- ulpmc->cmd = cmd;
- ulpmc->dlen = cpu_to_be32(V_ULP_MEMIO_DATA_LEN(
- DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
- ulpmc->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(ulpmc->wr),
- 16));
- ulpmc->lock_addr = cpu_to_be32(V_ULP_MEMIO_ADDR(addr + i * 3));
-
- ulpsc = (struct ulptx_idata *)(ulpmc + 1);
- ulpsc->cmd_more = cpu_to_be32(V_ULPTX_CMD(ULP_TX_SC_IMM));
- ulpsc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
-
- to_dp = (u8 *)(ulpsc + 1);
- from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE;
- if (data)
- memcpy(to_dp, from_dp, copy_len);
- else
- memset(to_dp, 0, copy_len);
- if (copy_len % T4_ULPTX_MIN_IO)
- memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO -
- (copy_len % T4_ULPTX_MIN_IO));
+ t4_write_mem_inline_wr(sc, wrtod(wr), wr_len, 0, addr, copy_len,
+ from_dp, i == (num_wqe - 1) ?
+ (__force __be64)(unsigned long) &wr_wait : 0);
t4_wrq_tx(sc, wr);
- len -= C4IW_MAX_INLINE_SIZE;
- }
+ if (from_dp != NULL)
+ from_dp += T4_MAX_INLINE_SIZE;
+ addr += T4_MAX_INLINE_SIZE >> 5;
+ len -= T4_MAX_INLINE_SIZE;
+ }
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, NULL, __func__);
return ret;
}
@@ -201,7 +140,7 @@ _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
dmalen = T4_ULPTX_MAX_DMA;
remain -= dmalen;
ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen,
- (void *)daddr, !remain);
+ daddr, !remain);
if (ret)
goto out;
addr += dmalen >> 5;
@@ -256,15 +195,15 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
u32 stag_idx;
static atomic_t key;
- if (c4iw_fatal_error(rdev))
+ if (c4iw_stopped(rdev))
return -EIO;
stag_state = stag_state > 0;
stag_idx = (*stag) >> 8;
if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) {
- stag_idx = c4iw_get_resource(&rdev->resource.tpt_table);
- if (!stag_idx) {
+ stag_idx = t4_stag_alloc(rdev->adap, 1);
+ if (stag_idx == T4_STAG_UNSET) {
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.fail++;
mutex_unlock(&rdev->stats.lock);
@@ -309,7 +248,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
sizeof(tpt), &tpt);
if (reset_tpt_entry) {
- c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
+ t4_stag_free(rdev->adap, stag_idx, 1);
mutex_lock(&rdev->stats.lock);
rdev->stats.stag.cur -= 32;
mutex_unlock(&rdev->stats.lock);
@@ -621,6 +560,9 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
php = to_c4iw_pd(pd);
rhp = php->rhp;
+ if (__predict_false(c4iw_stopped(&rhp->rdev)))
+ return ERR_PTR(-EIO);
+
if (mr_type != IB_MR_TYPE_MEM_REG ||
max_num_sg > t4_max_fr_depth(&rhp->rdev, use_dsgl))
return ERR_PTR(-EINVAL);
diff --git a/sys/dev/cxgbe/iw_cxgbe/provider.c b/sys/dev/cxgbe/iw_cxgbe/provider.c
index 729733a040d5..511caa436969 100644
--- a/sys/dev/cxgbe/iw_cxgbe/provider.c
+++ b/sys/dev/cxgbe/iw_cxgbe/provider.c
@@ -231,6 +231,8 @@ c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata)
CTR4(KTR_IW_CXGBE, "%s: ibdev %p, pd %p, data %p", __func__, ibdev,
pd, udata);
rhp = (struct c4iw_dev *) ibdev;
+ if (__predict_false(c4iw_stopped(&rhp->rdev)))
+ return -EIO;
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
if (!pdid)
return -EINVAL;
diff --git a/sys/dev/cxgbe/iw_cxgbe/qp.c b/sys/dev/cxgbe/iw_cxgbe/qp.c
index 3aab07755101..cbf4bae00a60 100644
--- a/sys/dev/cxgbe/iw_cxgbe/qp.c
+++ b/sys/dev/cxgbe/iw_cxgbe/qp.c
@@ -138,6 +138,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
struct wrqe *wr;
u64 sq_bar2_qoffset = 0, rq_bar2_qoffset = 0;
+ if (__predict_false(c4iw_stopped(rdev)))
+ return -EIO;
+
wq->sq.qid = c4iw_get_qpid(rdev, uctx);
if (!wq->sq.qid)
return -ENOMEM;
@@ -785,6 +788,8 @@ int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
qhp = to_c4iw_qp(ibqp);
rdev = &qhp->rhp->rdev;
+ if (__predict_false(c4iw_stopped(rdev)))
+ return -EIO;
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
@@ -920,6 +925,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
u16 idx = 0;
qhp = to_c4iw_qp(ibqp);
+ if (__predict_false(c4iw_stopped(&qhp->rhp->rdev)))
+ return -EIO;
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
@@ -1319,6 +1326,8 @@ creds(struct toepcb *toep, struct inpcb *inp, size_t wrsize)
return (EINVAL);
}
txsd = &toep->txsd[toep->txsd_pidx];
+ KASSERT(howmany(wrsize, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %zu too large", __func__, howmany(wrsize, 16)));
txsd->tx_credits = howmany(wrsize, 16);
txsd->plen = 0;
KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
diff --git a/sys/dev/cxgbe/iw_cxgbe/resource.c b/sys/dev/cxgbe/iw_cxgbe/resource.c
index 644ea0c631bf..cd20f1eafdd6 100644
--- a/sys/dev/cxgbe/iw_cxgbe/resource.c
+++ b/sys/dev/cxgbe/iw_cxgbe/resource.c
@@ -59,13 +59,9 @@ static int c4iw_init_qid_table(struct c4iw_rdev *rdev)
}
/* nr_* must be power of 2 */
-int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
+int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_pdid)
{
int err = 0;
- err = c4iw_id_table_alloc(&rdev->resource.tpt_table, 0, nr_tpt, 1,
- C4IW_ID_TABLE_F_RANDOM);
- if (err)
- goto tpt_err;
err = c4iw_init_qid_table(rdev);
if (err)
goto qid_err;
@@ -77,8 +73,6 @@ int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
pdid_err:
c4iw_id_table_free(&rdev->resource.qid_table);
qid_err:
- c4iw_id_table_free(&rdev->resource.tpt_table);
- tpt_err:
return -ENOMEM;
}
@@ -243,7 +237,6 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
void c4iw_destroy_resource(struct c4iw_resource *rscp)
{
- c4iw_id_table_free(&rscp->tpt_table);
c4iw_id_table_free(&rscp->qid_table);
c4iw_id_table_free(&rscp->pdid_table);
}
@@ -254,12 +247,9 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp)
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{
- unsigned long addr;
+ u32 addr;
- vmem_xalloc(rdev->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)),
- 4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX,
- M_FIRSTFIT|M_NOWAIT, &addr);
- CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, (u32)addr, size);
+ addr = t4_pblpool_alloc(rdev->adap, size);
mutex_lock(&rdev->stats.lock);
if (addr) {
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
@@ -268,33 +258,15 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
} else
rdev->stats.pbl.fail++;
mutex_unlock(&rdev->stats.lock);
- return (u32)addr;
+ return addr;
}
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{
- CTR3(KTR_IW_CXGBE, "%s addr 0x%x size %d", __func__, addr, size);
mutex_lock(&rdev->stats.lock);
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock);
- vmem_xfree(rdev->pbl_arena, addr, roundup(size,(1 << MIN_PBL_SHIFT)));
-}
-
-int c4iw_pblpool_create(struct c4iw_rdev *rdev)
-{
- rdev->pbl_arena = vmem_create("PBL_MEM_POOL",
- rdev->adap->vres.pbl.start,
- rdev->adap->vres.pbl.size,
- 1, 0, M_FIRSTFIT| M_NOWAIT);
- if (!rdev->pbl_arena)
- return -ENOMEM;
-
- return 0;
-}
-
-void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
-{
- vmem_destroy(rdev->pbl_arena);
+ t4_pblpool_free(rdev->adap, addr, size);
}
/* RQT Memory Manager. */
diff --git a/sys/dev/cxgbe/iw_cxgbe/t4.h b/sys/dev/cxgbe/iw_cxgbe/t4.h
index 48f85cf7965b..ffb610420640 100644
--- a/sys/dev/cxgbe/iw_cxgbe/t4.h
+++ b/sys/dev/cxgbe/iw_cxgbe/t4.h
@@ -64,7 +64,6 @@
#define T4_MAX_NUM_PD 65536
#define T4_MAX_MR_SIZE (~0ULL)
#define T4_PAGESIZE_MASK 0xffffffff000 /* 4KB-8TB */
-#define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0
#define A_PCIE_MA_SYNC 0x30b4
diff --git a/sys/dev/cxgbe/offload.h b/sys/dev/cxgbe/offload.h
index a8b243b764c8..91a43785aaca 100644
--- a/sys/dev/cxgbe/offload.h
+++ b/sys/dev/cxgbe/offload.h
@@ -33,6 +33,7 @@
#include <sys/param.h>
#include <sys/proc.h>
#include <sys/condvar.h>
+#include <sys/bitstring.h>
#define INIT_ULPTX_WRH(w, wrlen, atomic, tid) do { \
(w)->wr_hi = htonl(V_FW_WR_OP(FW_ULPTX_WR) | V_FW_WR_ATOMIC(atomic)); \
@@ -57,15 +58,6 @@
OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \
} while (0)
-TAILQ_HEAD(stid_head, stid_region);
-struct listen_ctx;
-
-struct stid_region {
- TAILQ_ENTRY(stid_region) link;
- u_int used; /* # of stids used by this region */
- u_int free; /* # of contiguous stids free right after this region */
-};
-
/*
* Max # of ATIDs. The absolute HW max is larger than this but we reserve a few
* of the upper bits for use as a cookie to demux the reply.
@@ -143,14 +135,15 @@ struct tid_info {
struct mtx stid_lock __aligned(CACHE_LINE_SIZE);
struct listen_ctx **stid_tab;
+ bitstr_t *stid_bitmap;
u_int stids_in_use;
- u_int nstids_free_head; /* # of available stids at the beginning */
- struct stid_head stids;
+ bool stid_tab_stopped;
struct mtx atid_lock __aligned(CACHE_LINE_SIZE);
union aopen_entry *atid_tab;
union aopen_entry *afree;
u_int atids_in_use;
+ bool atid_alloc_stopped;
/* High priority filters and normal filters share the lock and cv. */
struct mtx ftid_lock __aligned(CACHE_LINE_SIZE);
@@ -209,12 +202,10 @@ enum {
struct adapter;
struct port_info;
struct uld_info {
- SLIST_ENTRY(uld_info) link;
- int refcount;
- int uld_id;
- int (*activate)(struct adapter *);
- int (*deactivate)(struct adapter *);
- void (*async_event)(struct adapter *);
+ int (*uld_activate)(struct adapter *);
+ int (*uld_deactivate)(struct adapter *);
+ int (*uld_stop)(struct adapter *);
+ int (*uld_restart)(struct adapter *);
};
struct tom_tunables {
@@ -238,12 +229,22 @@ struct iw_tunables {
struct tls_tunables {
int inline_keys;
- int combo_wrs;
+ union {
+ struct {
+ /* T6 only. */
+ int combo_wrs;
+ };
+ struct {
+ /* T7 only. */
+ int short_records;
+ int partial_ghash;
+ };
+ };
};
#ifdef TCP_OFFLOAD
-int t4_register_uld(struct uld_info *);
-int t4_unregister_uld(struct uld_info *);
+int t4_register_uld(struct uld_info *, int);
+int t4_unregister_uld(struct uld_info *, int);
int t4_activate_uld(struct adapter *, int);
int t4_deactivate_uld(struct adapter *, int);
int uld_active(struct adapter *, int);
diff --git a/sys/dev/cxgbe/osdep.h b/sys/dev/cxgbe/osdep.h
index 39675339dd2c..b8692692fd43 100644
--- a/sys/dev/cxgbe/osdep.h
+++ b/sys/dev/cxgbe/osdep.h
@@ -130,14 +130,6 @@ typedef boolean_t bool;
#define PCI_EXP_LNKSTA_NLW PCIEM_LINK_STA_WIDTH
#define PCI_EXP_DEVCTL2 PCIER_DEVICE_CTL2
-static inline int
-ilog2(long x)
-{
- KASSERT(x > 0 && powerof2(x), ("%s: invalid arg %ld", __func__, x));
-
- return (flsl(x) - 1);
-}
-
static inline char *
strstrip(char *s)
{
diff --git a/sys/dev/cxgbe/t4_clip.c b/sys/dev/cxgbe/t4_clip.c
index 24f049f9dc06..e462a064847f 100644
--- a/sys/dev/cxgbe/t4_clip.c
+++ b/sys/dev/cxgbe/t4_clip.c
@@ -576,7 +576,7 @@ update_hw_clip_table(struct adapter *sc)
rc = begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4clip");
if (rc != 0)
return (rc);
- if (hw_off_limits(sc))
+ if (!hw_all_ok(sc))
goto done; /* with rc = 0, we don't want to reschedule. */
while (!TAILQ_EMPTY(&sc->clip_pending)) {
ce = TAILQ_FIRST(&sc->clip_pending);
diff --git a/sys/dev/cxgbe/t4_filter.c b/sys/dev/cxgbe/t4_filter.c
index 18fa1093800f..4b583b67ba07 100644
--- a/sys/dev/cxgbe/t4_filter.c
+++ b/sys/dev/cxgbe/t4_filter.c
@@ -322,48 +322,85 @@ remove_hftid(struct adapter *sc, struct filter_entry *f)
LIST_REMOVE(f, link_tid);
}
-/*
- * Input: driver's 32b filter mode.
- * Returns: hardware filter mode (bits to set in vlan_pri_map) for the input.
- */
static uint16_t
-mode_to_fconf(uint32_t mode)
+mode_to_fconf_t4(uint32_t mode)
{
uint32_t fconf = 0;
if (mode & T4_FILTER_IP_FRAGMENT)
fconf |= F_FRAGMENTATION;
-
if (mode & T4_FILTER_MPS_HIT_TYPE)
fconf |= F_MPSHITTYPE;
-
if (mode & T4_FILTER_MAC_IDX)
fconf |= F_MACMATCH;
-
if (mode & T4_FILTER_ETH_TYPE)
fconf |= F_ETHERTYPE;
-
if (mode & T4_FILTER_IP_PROTO)
fconf |= F_PROTOCOL;
-
if (mode & T4_FILTER_IP_TOS)
fconf |= F_TOS;
-
if (mode & T4_FILTER_VLAN)
fconf |= F_VLAN;
-
if (mode & T4_FILTER_VNIC)
fconf |= F_VNIC_ID;
-
if (mode & T4_FILTER_PORT)
fconf |= F_PORT;
-
if (mode & T4_FILTER_FCoE)
fconf |= F_FCOE;
return (fconf);
}
+static uint16_t
+mode_to_fconf_t7(uint32_t mode)
+{
+ uint32_t fconf = 0;
+
+ if (mode & T4_FILTER_TCPFLAGS)
+ fconf |= F_TCPFLAGS;
+ if (mode & T4_FILTER_SYNONLY)
+ fconf |= F_SYNONLY;
+ if (mode & T4_FILTER_ROCE)
+ fconf |= F_ROCE;
+ if (mode & T4_FILTER_IP_FRAGMENT)
+ fconf |= F_T7_FRAGMENTATION;
+ if (mode & T4_FILTER_MPS_HIT_TYPE)
+ fconf |= F_T7_MPSHITTYPE;
+ if (mode & T4_FILTER_MAC_IDX)
+ fconf |= F_T7_MACMATCH;
+ if (mode & T4_FILTER_ETH_TYPE)
+ fconf |= F_T7_ETHERTYPE;
+ if (mode & T4_FILTER_IP_PROTO)
+ fconf |= F_T7_PROTOCOL;
+ if (mode & T4_FILTER_IP_TOS)
+ fconf |= F_T7_TOS;
+ if (mode & T4_FILTER_VLAN)
+ fconf |= F_T7_VLAN;
+ if (mode & T4_FILTER_VNIC)
+ fconf |= F_T7_VNIC_ID;
+ if (mode & T4_FILTER_PORT)
+ fconf |= F_T7_PORT;
+ if (mode & T4_FILTER_FCoE)
+ fconf |= F_T7_FCOE;
+ if (mode & T4_FILTER_IPSECIDX)
+ fconf |= F_IPSECIDX;
+
+ return (fconf);
+}
+
+/*
+ * Input: driver's 32b filter mode.
+ * Returns: hardware filter mode (bits to set in vlan_pri_map) for the input.
+ */
+static uint16_t
+mode_to_fconf(struct adapter *sc, uint32_t mode)
+{
+ if (chip_id(sc) >= CHELSIO_T7)
+ return (mode_to_fconf_t7(mode));
+ else
+ return (mode_to_fconf_t4(mode));
+}
+
/*
* Input: driver's 32b filter mode.
* Returns: hardware vnic mode (ingress config) matching the input.
@@ -389,65 +426,100 @@ check_fspec_against_fconf_iconf(struct adapter *sc,
struct tp_params *tpp = &sc->params.tp;
uint32_t fconf = 0;
- if (fs->val.frag || fs->mask.frag)
- fconf |= F_FRAGMENTATION;
-
- if (fs->val.matchtype || fs->mask.matchtype)
- fconf |= F_MPSHITTYPE;
-
- if (fs->val.macidx || fs->mask.macidx)
- fconf |= F_MACMATCH;
-
- if (fs->val.ethtype || fs->mask.ethtype)
- fconf |= F_ETHERTYPE;
-
- if (fs->val.proto || fs->mask.proto)
- fconf |= F_PROTOCOL;
-
- if (fs->val.tos || fs->mask.tos)
- fconf |= F_TOS;
-
- if (fs->val.vlan_vld || fs->mask.vlan_vld)
- fconf |= F_VLAN;
-
- if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
- if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
- return (EINVAL);
- fconf |= F_VNIC_ID;
- }
-
- if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
- if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
- return (EINVAL);
- fconf |= F_VNIC_ID;
- }
-
+ if (chip_id(sc) >= CHELSIO_T7) {
+ if (fs->val.tcpflags || fs->mask.tcpflags)
+ fconf |= F_TCPFLAGS;
+ if (fs->val.synonly || fs->mask.synonly)
+ fconf |= F_SYNONLY;
+ if (fs->val.roce || fs->mask.roce)
+ fconf |= F_ROCE;
+ if (fs->val.frag || fs->mask.frag)
+ fconf |= F_T7_FRAGMENTATION;
+ if (fs->val.matchtype || fs->mask.matchtype)
+ fconf |= F_T7_MPSHITTYPE;
+ if (fs->val.macidx || fs->mask.macidx)
+ fconf |= F_T7_MACMATCH;
+ if (fs->val.ethtype || fs->mask.ethtype)
+ fconf |= F_T7_ETHERTYPE;
+ if (fs->val.proto || fs->mask.proto)
+ fconf |= F_T7_PROTOCOL;
+ if (fs->val.tos || fs->mask.tos)
+ fconf |= F_T7_TOS;
+ if (fs->val.vlan_vld || fs->mask.vlan_vld)
+ fconf |= F_T7_VLAN;
+ if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
+ return (EINVAL);
+ fconf |= F_T7_VNIC_ID;
+ }
+ if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
+ return (EINVAL);
+ fconf |= F_T7_VNIC_ID;
+ }
#ifdef notyet
- if (fs->val.encap_vld || fs->mask.encap_vld) {
- if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
+ if (fs->val.encap_vld || fs->mask.encap_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
+ return (EINVAL);
+ fconf |= F_T7_VNIC_ID;
+ }
+#endif
+ if (fs->val.iport || fs->mask.iport)
+ fconf |= F_T7_PORT;
+ if (fs->val.fcoe || fs->mask.fcoe)
+ fconf |= F_T7_FCOE;
+ if (fs->val.ipsecidx || fs->mask.ipsecidx)
+ fconf |= F_IPSECIDX;
+ } else {
+ if (fs->val.tcpflags || fs->mask.tcpflags ||
+ fs->val.synonly || fs->mask.synonly ||
+ fs->val.roce || fs->mask.roce ||
+ fs->val.ipsecidx || fs->mask.ipsecidx)
return (EINVAL);
- fconf |= F_VNIC_ID;
- }
+ if (fs->val.frag || fs->mask.frag)
+ fconf |= F_FRAGMENTATION;
+ if (fs->val.matchtype || fs->mask.matchtype)
+ fconf |= F_MPSHITTYPE;
+ if (fs->val.macidx || fs->mask.macidx)
+ fconf |= F_MACMATCH;
+ if (fs->val.ethtype || fs->mask.ethtype)
+ fconf |= F_ETHERTYPE;
+ if (fs->val.proto || fs->mask.proto)
+ fconf |= F_PROTOCOL;
+ if (fs->val.tos || fs->mask.tos)
+ fconf |= F_TOS;
+ if (fs->val.vlan_vld || fs->mask.vlan_vld)
+ fconf |= F_VLAN;
+ if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
+ return (EINVAL);
+ fconf |= F_VNIC_ID;
+ }
+ if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
+ return (EINVAL);
+ fconf |= F_VNIC_ID;
+ }
+#ifdef notyet
+ if (fs->val.encap_vld || fs->mask.encap_vld) {
+ if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
+ return (EINVAL);
+ fconf |= F_VNIC_ID;
+ }
#endif
-
- if (fs->val.iport || fs->mask.iport)
- fconf |= F_PORT;
-
- if (fs->val.fcoe || fs->mask.fcoe)
- fconf |= F_FCOE;
-
+ if (fs->val.iport || fs->mask.iport)
+ fconf |= F_PORT;
+ if (fs->val.fcoe || fs->mask.fcoe)
+ fconf |= F_FCOE;
+ }
if ((tpp->filter_mode | fconf) != tpp->filter_mode)
return (E2BIG);
return (0);
}
-/*
- * Input: hardware filter configuration (filter mode/mask, ingress config).
- * Input: driver's 32b filter mode matching the input.
- */
static uint32_t
-fconf_to_mode(uint16_t hwmode, int vnic_mode)
+fconf_to_mode_t4(uint16_t hwmode, int vnic_mode)
{
uint32_t mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
@@ -488,6 +560,69 @@ fconf_to_mode(uint16_t hwmode, int vnic_mode)
return (mode);
}
+static uint32_t
+fconf_to_mode_t7(uint16_t hwmode, int vnic_mode)
+{
+ uint32_t mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
+ T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
+
+ if (hwmode & F_TCPFLAGS)
+ mode |= T4_FILTER_TCPFLAGS;
+ if (hwmode & F_SYNONLY)
+ mode |= T4_FILTER_SYNONLY;
+ if (hwmode & F_ROCE)
+ mode |= T4_FILTER_ROCE;
+ if (hwmode & F_T7_FRAGMENTATION)
+ mode |= T4_FILTER_IP_FRAGMENT;
+ if (hwmode & F_T7_MPSHITTYPE)
+ mode |= T4_FILTER_MPS_HIT_TYPE;
+ if (hwmode & F_T7_MACMATCH)
+ mode |= T4_FILTER_MAC_IDX;
+ if (hwmode & F_T7_ETHERTYPE)
+ mode |= T4_FILTER_ETH_TYPE;
+ if (hwmode & F_T7_PROTOCOL)
+ mode |= T4_FILTER_IP_PROTO;
+ if (hwmode & F_T7_TOS)
+ mode |= T4_FILTER_IP_TOS;
+ if (hwmode & F_T7_VLAN)
+ mode |= T4_FILTER_VLAN;
+ if (hwmode & F_T7_VNIC_ID)
+ mode |= T4_FILTER_VNIC; /* real meaning depends on vnic_mode. */
+ if (hwmode & F_T7_PORT)
+ mode |= T4_FILTER_PORT;
+ if (hwmode & F_T7_FCOE)
+ mode |= T4_FILTER_FCoE;
+ if (hwmode & F_IPSECIDX)
+ mode |= T4_FILTER_IPSECIDX;
+
+ switch (vnic_mode) {
+ case FW_VNIC_MODE_PF_VF:
+ mode |= T4_FILTER_IC_VNIC;
+ break;
+ case FW_VNIC_MODE_ENCAP_EN:
+ mode |= T4_FILTER_IC_ENCAP;
+ break;
+ case FW_VNIC_MODE_OUTER_VLAN:
+ default:
+ break;
+ }
+
+ return (mode);
+}
+
+/*
+ * Input: hardware filter configuration (filter mode/mask, ingress config).
+ * Output: driver's 32b filter mode matching the input.
+ */
+static inline uint32_t
+fconf_to_mode(struct adapter *sc, uint16_t hwmode, int vnic_mode)
+{
+ if (chip_id(sc) >= CHELSIO_T7)
+ return (fconf_to_mode_t7(hwmode, vnic_mode));
+ else
+ return (fconf_to_mode_t4(hwmode, vnic_mode));
+}
+
int
get_filter_mode(struct adapter *sc, uint32_t *mode)
{
@@ -499,7 +634,7 @@ get_filter_mode(struct adapter *sc, uint32_t *mode)
/* Non-zero incoming value in mode means "hashfilter mode". */
filter_mode = *mode ? tp->filter_mask : tp->filter_mode;
- *mode = fconf_to_mode(filter_mode, tp->vnic_mode);
+ *mode = fconf_to_mode(sc, filter_mode, tp->vnic_mode);
return (0);
}
@@ -512,7 +647,7 @@ set_filter_mode(struct adapter *sc, uint32_t mode)
uint16_t fconf;
iconf = mode_to_iconf(mode);
- fconf = mode_to_fconf(mode);
+ fconf = mode_to_fconf(sc, mode);
if ((iconf == -1 || iconf == tp->vnic_mode) && fconf == tp->filter_mode)
return (0); /* Nothing to do */
@@ -520,7 +655,7 @@ set_filter_mode(struct adapter *sc, uint32_t mode)
if (rc)
return (rc);
- if (hw_off_limits(sc)) {
+ if (!hw_all_ok(sc)) {
rc = ENXIO;
goto done;
}
@@ -554,7 +689,7 @@ set_filter_mask(struct adapter *sc, uint32_t mode)
uint16_t fmask;
iconf = mode_to_iconf(mode);
- fmask = mode_to_fconf(mode);
+ fmask = mode_to_fconf(sc, mode);
if ((iconf == -1 || iconf == tp->vnic_mode) && fmask == tp->filter_mask)
return (0); /* Nothing to do */
@@ -571,7 +706,7 @@ set_filter_mask(struct adapter *sc, uint32_t mode)
if (rc)
return (rc);
- if (hw_off_limits(sc)) {
+ if (!hw_all_ok(sc)) {
rc = ENXIO;
goto done;
}
@@ -602,7 +737,7 @@ get_filter_hits(struct adapter *sc, uint32_t tid)
tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
mtx_lock(&sc->reg_lock);
- if (hw_off_limits(sc))
+ if (!hw_all_ok(sc))
hits = 0;
else if (is_t4(sc)) {
uint64_t t;
@@ -811,71 +946,138 @@ hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
struct tp_params *tp = &sc->params.tp;
uint16_t fmask;
- *ftuple = fmask = 0;
-
/*
* Initialize each of the fields which we care about which are present
* in the Compressed Filter Tuple.
*/
- if (tp->vlan_shift >= 0 && fs->mask.vlan) {
- *ftuple |= (uint64_t)(F_FT_VLAN_VLD | fs->val.vlan) <<
- tp->vlan_shift;
- fmask |= F_VLAN;
- }
-
- if (tp->port_shift >= 0 && fs->mask.iport) {
- *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
- fmask |= F_PORT;
- }
-
- if (tp->protocol_shift >= 0 && fs->mask.proto) {
- *ftuple |= (uint64_t)fs->val.proto << tp->protocol_shift;
- fmask |= F_PROTOCOL;
- }
-
- if (tp->tos_shift >= 0 && fs->mask.tos) {
- *ftuple |= (uint64_t)(fs->val.tos) << tp->tos_shift;
- fmask |= F_TOS;
- }
-
- if (tp->vnic_shift >= 0 && fs->mask.vnic) {
- /* vnic_mode was already validated. */
- if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
- MPASS(fs->mask.pfvf_vld);
- else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
- MPASS(fs->mask.ovlan_vld);
+#define SFF(V, S) ((uint64_t)(V) << S) /* Shifted Filter Field. */
+ *ftuple = fmask = 0;
+ if (chip_id(sc) >= CHELSIO_T7) {
+ if (tp->ipsecidx_shift >= 0 && fs->mask.ipsecidx) {
+ *ftuple |= SFF(fs->val.ipsecidx, tp->ipsecidx_shift);
+ fmask |= F_IPSECIDX;
+ }
+ if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
+ *ftuple |= SFF(fs->val.fcoe, tp->fcoe_shift);
+ fmask |= F_T7_FCOE;
+ }
+ if (tp->port_shift >= 0 && fs->mask.iport) {
+ *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
+ fmask |= F_T7_PORT;
+ }
+ if (tp->vnic_shift >= 0 && fs->mask.vnic) {
+ /* vnic_mode was already validated. */
+ if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
+ MPASS(fs->mask.pfvf_vld);
+ else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
+ MPASS(fs->mask.ovlan_vld);
#ifdef notyet
- else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
- MPASS(fs->mask.encap_vld);
+ else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
+ MPASS(fs->mask.encap_vld);
#endif
- *ftuple |= ((1ULL << 16) | fs->val.vnic) << tp->vnic_shift;
- fmask |= F_VNIC_ID;
- }
-
- if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
- *ftuple |= (uint64_t)(fs->val.macidx) << tp->macmatch_shift;
- fmask |= F_MACMATCH;
- }
-
- if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
- *ftuple |= (uint64_t)(fs->val.ethtype) << tp->ethertype_shift;
- fmask |= F_ETHERTYPE;
- }
-
- if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
- *ftuple |= (uint64_t)(fs->val.matchtype) << tp->matchtype_shift;
- fmask |= F_MPSHITTYPE;
- }
-
- if (tp->frag_shift >= 0 && fs->mask.frag) {
- *ftuple |= (uint64_t)(fs->val.frag) << tp->frag_shift;
- fmask |= F_FRAGMENTATION;
- }
-
- if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
- *ftuple |= (uint64_t)(fs->val.fcoe) << tp->fcoe_shift;
- fmask |= F_FCOE;
+ *ftuple |= SFF(F_FT_VNID_ID_VLD | fs->val.vnic, tp->vnic_shift);
+ fmask |= F_T7_VNIC_ID;
+ }
+ if (tp->vlan_shift >= 0 && fs->mask.vlan) {
+ *ftuple |= SFF(F_FT_VLAN_VLD | fs->val.vlan, tp->vlan_shift);
+ fmask |= F_T7_VLAN;
+ }
+ if (tp->tos_shift >= 0 && fs->mask.tos) {
+ *ftuple |= SFF(fs->val.tos, tp->tos_shift);
+ fmask |= F_T7_TOS;
+ }
+ if (tp->protocol_shift >= 0 && fs->mask.proto) {
+ *ftuple |= SFF(fs->val.proto, tp->protocol_shift);
+ fmask |= F_T7_PROTOCOL;
+ }
+ if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
+ *ftuple |= SFF(fs->val.ethtype, tp->ethertype_shift);
+ fmask |= F_T7_ETHERTYPE;
+ }
+ if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
+ *ftuple |= SFF(fs->val.macidx, tp->macmatch_shift);
+ fmask |= F_T7_MACMATCH;
+ }
+ if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
+ *ftuple |= SFF(fs->val.matchtype, tp->matchtype_shift);
+ fmask |= F_T7_MPSHITTYPE;
+ }
+ if (tp->frag_shift >= 0 && fs->mask.frag) {
+ *ftuple |= SFF(fs->val.frag, tp->frag_shift);
+ fmask |= F_T7_FRAGMENTATION;
+ }
+ if (tp->roce_shift >= 0 && fs->mask.roce) {
+ *ftuple |= SFF(fs->val.roce, tp->roce_shift);
+ fmask |= F_ROCE;
+ }
+ if (tp->synonly_shift >= 0 && fs->mask.synonly) {
+ *ftuple |= SFF(fs->val.synonly, tp->synonly_shift);
+ fmask |= F_SYNONLY;
+ }
+ if (tp->tcpflags_shift >= 0 && fs->mask.tcpflags) {
+ *ftuple |= SFF(fs->val.tcpflags, tp->synonly_shift);
+ fmask |= F_TCPFLAGS;
+ }
+ } else {
+ if (fs->mask.ipsecidx || fs->mask.roce || fs->mask.synonly ||
+ fs->mask.tcpflags) {
+ MPASS(tp->ipsecidx_shift == -1);
+ MPASS(tp->roce_shift == -1);
+ MPASS(tp->synonly_shift == -1);
+ MPASS(tp->tcpflags_shift == -1);
+ return (EINVAL);
+ }
+ if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
+ *ftuple |= SFF(fs->val.fcoe, tp->fcoe_shift);
+ fmask |= F_FCOE;
+ }
+ if (tp->port_shift >= 0 && fs->mask.iport) {
+ *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
+ fmask |= F_PORT;
+ }
+ if (tp->vnic_shift >= 0 && fs->mask.vnic) {
+ /* vnic_mode was already validated. */
+ if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
+ MPASS(fs->mask.pfvf_vld);
+ else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
+ MPASS(fs->mask.ovlan_vld);
+#ifdef notyet
+ else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
+ MPASS(fs->mask.encap_vld);
+#endif
+ *ftuple |= SFF(F_FT_VNID_ID_VLD | fs->val.vnic, tp->vnic_shift);
+ fmask |= F_VNIC_ID;
+ }
+ if (tp->vlan_shift >= 0 && fs->mask.vlan) {
+ *ftuple |= SFF(F_FT_VLAN_VLD | fs->val.vlan, tp->vlan_shift);
+ fmask |= F_VLAN;
+ }
+ if (tp->tos_shift >= 0 && fs->mask.tos) {
+ *ftuple |= SFF(fs->val.tos, tp->tos_shift);
+ fmask |= F_TOS;
+ }
+ if (tp->protocol_shift >= 0 && fs->mask.proto) {
+ *ftuple |= SFF(fs->val.proto, tp->protocol_shift);
+ fmask |= F_PROTOCOL;
+ }
+ if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
+ *ftuple |= SFF(fs->val.ethtype, tp->ethertype_shift);
+ fmask |= F_ETHERTYPE;
+ }
+ if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
+ *ftuple |= SFF(fs->val.macidx, tp->macmatch_shift);
+ fmask |= F_MACMATCH;
+ }
+ if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
+ *ftuple |= SFF(fs->val.matchtype, tp->matchtype_shift);
+ fmask |= F_MPSHITTYPE;
+ }
+ if (tp->frag_shift >= 0 && fs->mask.frag) {
+ *ftuple |= SFF(fs->val.frag, tp->frag_shift);
+ fmask |= F_FRAGMENTATION;
+ }
}
+#undef SFF
/* A hashfilter must conform to the hardware filter mask. */
if (fmask != tp->filter_mask)
@@ -976,7 +1178,7 @@ set_filter(struct adapter *sc, struct t4_filter *t)
if (rc)
return (rc);
- if (hw_off_limits(sc)) {
+ if (!hw_all_ok(sc)) {
rc = ENXIO;
goto done;
}
@@ -1195,11 +1397,19 @@ set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
return (ENOMEM);
bzero(req, sizeof(*req));
INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
- if (no_reply == 0) {
- req->reply_ctrl = htobe16(V_QUEUENO(sc->sge.fwq.abs_id) |
- V_NO_REPLY(0));
- } else
- req->reply_ctrl = htobe16(V_NO_REPLY(1));
+ if (no_reply) {
+ req->reply_ctrl = htobe16(F_NO_REPLY);
+ } else {
+ const int qid = sc->sge.fwq.abs_id;
+
+ if (chip_id(sc) >= CHELSIO_T7) {
+ req->reply_ctrl = htobe16(V_T7_QUEUENO(qid) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ req->reply_ctrl = htobe16(V_QUEUENO(qid) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
+ }
req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
req->mask = htobe64(mask);
req->val = htobe64(val);
@@ -1594,7 +1804,7 @@ static int
act_open_cpl_len16(struct adapter *sc, int isipv6)
{
int idx;
- static const int sz_table[3][2] = {
+ static const int sz_table[4][2] = {
{
howmany(sizeof (struct cpl_act_open_req), 16),
howmany(sizeof (struct cpl_act_open_req6), 16)
@@ -1607,10 +1817,14 @@ act_open_cpl_len16(struct adapter *sc, int isipv6)
howmany(sizeof (struct cpl_t6_act_open_req), 16),
howmany(sizeof (struct cpl_t6_act_open_req6), 16)
},
+ {
+ howmany(sizeof (struct cpl_t7_act_open_req), 16),
+ howmany(sizeof (struct cpl_t7_act_open_req6), 16)
+ },
};
MPASS(chip_id(sc) >= CHELSIO_T4);
- idx = min(chip_id(sc) - CHELSIO_T4, 2);
+ idx = min(chip_id(sc) - CHELSIO_T4, 3);
return (sz_table[idx][!!isipv6]);
}
@@ -1698,40 +1912,6 @@ done:
return (rc);
}
-/* SET_TCB_FIELD sent as a ULP command looks like this */
-#define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
- sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
-
-static void *
-mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
- uint64_t val, uint32_t tid, uint32_t qid)
-{
- struct ulptx_idata *ulpsc;
- struct cpl_set_tcb_field_core *req;
-
- ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
- ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
-
- ulpsc = (struct ulptx_idata *)(ulpmc + 1);
- ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
- ulpsc->len = htobe32(sizeof(*req));
-
- req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
- OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply_ctrl = htobe16(V_NO_REPLY(1) | V_QUEUENO(qid));
- req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
- req->mask = htobe64(mask);
- req->val = htobe64(val);
-
- ulpsc = (struct ulptx_idata *)(req + 1);
- if (LEN__SET_TCB_FIELD_ULP % 16) {
- ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
- ulpsc->len = htobe32(0);
- return (ulpsc + 1);
- }
- return (ulpsc);
-}
-
/* ABORT_REQ sent as a ULP command looks like this */
#define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
@@ -1807,14 +1987,15 @@ del_hashfilter_wrlen(void)
}
static void
-mk_del_hashfilter_wr(int tid, struct work_request_hdr *wrh, int wrlen, int qid)
+mk_del_hashfilter_wr(struct adapter *sc, int tid, struct work_request_hdr *wrh,
+ int wrlen, int qid)
{
struct ulp_txpkt *ulpmc;
INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
ulpmc = (struct ulp_txpkt *)(wrh + 1);
- ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_RSS_INFO,
- V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid), tid, 0);
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, tid, W_TCB_RSS_INFO,
+ V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid));
ulpmc = mk_abort_req_ulp(ulpmc, tid);
ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
}
@@ -1857,7 +2038,7 @@ del_hashfilter(struct adapter *sc, struct t4_filter *t)
goto done;
}
- mk_del_hashfilter_wr(t->idx, wr, wrlen, sc->sge.fwq.abs_id);
+ mk_del_hashfilter_wr(sc, t->idx, wr, wrlen, sc->sge.fwq.abs_id);
f->locked = 1;
f->pending = 1;
commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
diff --git a/sys/dev/cxgbe/t4_ioctl.h b/sys/dev/cxgbe/t4_ioctl.h
index ba9a17dbaddf..f7c8ee24d596 100644
--- a/sys/dev/cxgbe/t4_ioctl.h
+++ b/sys/dev/cxgbe/t4_ioctl.h
@@ -64,6 +64,7 @@ enum {
T4_SET_FILTER_MASK, /* set filter mask (hashfilter mode) */
T4_HOLD_CLIP_ADDR, /* add ref on an IP in the CLIP */
T4_RELEASE_CLIP_ADDR, /* remove ref from an IP in the CLIP */
+ T4_GET_SGE_CTXT, /* get SGE context for a queue */
};
struct t4_reg {
@@ -119,6 +120,10 @@ struct t4_i2c_data {
#define T4_FILTER_MAC_IDX 0x2000 /* MPS MAC address match index */
#define T4_FILTER_MPS_HIT_TYPE 0x4000 /* MPS match type */
#define T4_FILTER_IP_FRAGMENT 0x8000 /* IP fragment */
+#define T4_FILTER_IPSECIDX 0x10000
+#define T4_FILTER_ROCE 0x20000
+#define T4_FILTER_SYNONLY 0x40000
+#define T4_FILTER_TCPFLAGS 0x80000
/*
* T4_FILTER_VNIC's real meaning depends on the ingress config.
*/
@@ -199,6 +204,10 @@ struct t4_filter_tuple {
uint32_t vlan_vld:1; /* VLAN valid */
uint32_t ovlan_vld:1; /* outer VLAN tag valid, value in "vnic" */
uint32_t pfvf_vld:1; /* VNIC id (PF/VF) valid, value in "vnic" */
+ uint32_t roce:1;
+ uint32_t synonly:1;
+ uint32_t tcpflags:6;
+ uint32_t ipsecidx:12;
};
struct t4_filter_specification {
@@ -322,6 +331,7 @@ struct t4_sched_queue {
};
#define T4_SGE_CONTEXT_SIZE 24
+#define T7_SGE_CONTEXT_SIZE 28
enum {
SGE_CONTEXT_EGRESS,
SGE_CONTEXT_INGRESS,
@@ -335,6 +345,12 @@ struct t4_sge_context {
uint32_t data[T4_SGE_CONTEXT_SIZE / 4];
};
+struct t4_sge_ctxt {
+ uint32_t mem_id;
+ uint32_t cid;
+ uint32_t data[T7_SGE_CONTEXT_SIZE / 4];
+};
+
struct t4_mem_range {
uint32_t addr;
uint32_t len;
@@ -444,4 +460,5 @@ struct t4_clip_addr {
#define CHELSIO_T4_SET_FILTER_MASK _IOW('f', T4_SET_FILTER_MASK, uint32_t)
#define CHELSIO_T4_HOLD_CLIP_ADDR _IOW('f', T4_HOLD_CLIP_ADDR, struct t4_clip_addr)
#define CHELSIO_T4_RELEASE_CLIP_ADDR _IOW('f', T4_RELEASE_CLIP_ADDR, struct t4_clip_addr)
+#define CHELSIO_T4_GET_SGE_CTXT _IOWR('f', T4_GET_SGE_CTXT, struct t4_sge_ctxt)
#endif
diff --git a/sys/dev/cxgbe/t4_iov.c b/sys/dev/cxgbe/t4_iov.c
index 7b5e0cb1af4e..452ebaaf0172 100644
--- a/sys/dev/cxgbe/t4_iov.c
+++ b/sys/dev/cxgbe/t4_iov.c
@@ -29,8 +29,12 @@
#include <sys/bus.h>
#include <sys/kernel.h>
#include <sys/module.h>
+#include <sys/socket.h>
#include <sys/systm.h>
+#include <sys/iov.h>
#include <dev/pci/pcivar.h>
+#include <net/if.h>
+#include <net/if_vlan_var.h>
#ifdef PCI_IOV
#include <sys/nv.h>
@@ -95,15 +99,15 @@ struct {
{0x6002, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
{0x6003, "Chelsio T6425-CR"}, /* 4 x 10/25G */
{0x6004, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */
- {0x6005, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */
- {0x6006, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */
+ {0x6005, "Chelsio T6225-SO-OCP3"}, /* 2 x 10/25G, nomem */
+ {0x6006, "Chelsio T6225-OCP3"}, /* 2 x 10/25G */
{0x6007, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
{0x6008, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
{0x6009, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */
{0x600d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
{0x6010, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */
{0x6011, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */
- {0x6014, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */
+ {0x6014, "Chelsio T62100-SO-OCP3"}, /* 2 x 40/50/100G, nomem */
{0x6015, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */
/* Custom */
@@ -115,6 +119,28 @@ struct {
{0x6085, "Chelsio T6240-SO 85"},
{0x6086, "Chelsio T6225-SO-CR 86"},
{0x6087, "Chelsio T6225-CR 87"},
+}, t7iov_pciids[] = {
+ {0xd000, "Chelsio Terminator 7 FPGA"}, /* T7 PE12K FPGA */
+ {0x7000, "Chelsio T72200-DBG"}, /* 2 x 200G, debug */
+ {0x7001, "Chelsio T7250"}, /* 2 x 10/25/50G, 1 mem */
+ {0x7002, "Chelsio S7250"}, /* 2 x 10/25/50G, nomem */
+ {0x7003, "Chelsio T7450"}, /* 4 x 10/25/50G, 1 mem */
+ {0x7004, "Chelsio S7450"}, /* 4 x 10/25/50G, nomem */
+ {0x7005, "Chelsio T72200"}, /* 2 x 40/100/200G, 1 mem */
+ {0x7006, "Chelsio S72200"}, /* 2 x 40/100/200G, nomem */
+ {0x7007, "Chelsio T72200-FH"}, /* 2 x 40/100/200G, 2 mem */
+ {0x7008, "Chelsio T71400"}, /* 1 x 400G, nomem */
+ {0x7009, "Chelsio S7210-BT"}, /* 2 x 10GBASE-T, nomem */
+ {0x700a, "Chelsio T7450-RC"}, /* 4 x 10/25/50G, 1 mem, RC */
+ {0x700b, "Chelsio T72200-RC"}, /* 2 x 40/100/200G, 1 mem, RC */
+ {0x700c, "Chelsio T72200-FH-RC"}, /* 2 x 40/100/200G, 2 mem, RC */
+ {0x700d, "Chelsio S72200-OCP3"}, /* 2 x 40/100/200G OCP3 */
+ {0x700e, "Chelsio S7450-OCP3"}, /* 4 x 1/20/25/50G OCP3 */
+ {0x700f, "Chelsio S7410-BT-OCP3"}, /* 4 x 10GBASE-T OCP3 */
+ {0x7010, "Chelsio S7210-BT-A"}, /* 2 x 10GBASE-T */
+ {0x7011, "Chelsio T7_MAYRA_7"}, /* Motherboard */
+
+ {0x7080, "Custom T7"},
};
static inline uint32_t
@@ -187,6 +213,26 @@ t6iov_probe(device_t dev)
}
static int
+chiov_probe(device_t dev)
+{
+ uint16_t d;
+ size_t i;
+
+ if (pci_get_vendor(dev) != PCI_VENDOR_ID_CHELSIO)
+ return (ENXIO);
+
+ d = pci_get_device(dev);
+ for (i = 0; i < nitems(t7iov_pciids); i++) {
+ if (d == t7iov_pciids[i].device) {
+ device_set_desc(dev, t7iov_pciids[i].desc);
+ device_quiet(dev);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+ return (ENXIO);
+}
+
+static int
t4iov_attach(device_t dev)
{
struct t4iov_softc *sc;
@@ -257,6 +303,7 @@ t4iov_attach_child(device_t dev)
pf_schema = pci_iov_schema_alloc_node();
vf_schema = pci_iov_schema_alloc_node();
pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
+ pci_iov_schema_add_vlan(vf_schema, "vlan", 0, 0);
error = pci_iov_attach_name(dev, pf_schema, vf_schema, "%s",
device_get_nameunit(pdev));
if (error) {
@@ -336,14 +383,15 @@ t4iov_add_vf(device_t dev, uint16_t vfnum, const struct nvlist *config)
size_t size;
int rc;
+ sc = device_get_softc(dev);
+ MPASS(sc->sc_attached);
+ MPASS(sc->sc_main != NULL);
+ adap = device_get_softc(sc->sc_main);
+
if (nvlist_exists_binary(config, "mac-addr")) {
mac = nvlist_get_binary(config, "mac-addr", &size);
bcopy(mac, ma, ETHER_ADDR_LEN);
- sc = device_get_softc(dev);
- MPASS(sc->sc_attached);
- MPASS(sc->sc_main != NULL);
- adap = device_get_softc(sc->sc_main);
if (begin_synchronized_op(adap, NULL, SLEEP_OK | INTR_OK,
"t4vfma") != 0)
return (ENXIO);
@@ -358,6 +406,29 @@ t4iov_add_vf(device_t dev, uint16_t vfnum, const struct nvlist *config)
}
}
+ if (nvlist_exists_number(config, "vlan")) {
+ uint16_t vlan = nvlist_get_number(config, "vlan");
+
+ /* We can't restrict to VID 0 */
+ if (vlan == DOT1Q_VID_NULL)
+ return (ENOTSUP);
+
+ if (vlan == VF_VLAN_TRUNK)
+ vlan = DOT1Q_VID_NULL;
+
+ if (begin_synchronized_op(adap, NULL, SLEEP_OK | INTR_OK,
+ "t4vfvl") != 0)
+ return (ENXIO);
+ rc = t4_set_vlan_acl(adap, sc->pf, vfnum + 1, vlan);
+ end_synchronized_op(adap, 0);
+ if (rc != 0) {
+ device_printf(dev,
+ "Failed to set VF%d VLAN to %d, rc = %d\n",
+ vfnum, vlan, rc);
+ return (rc);
+ }
+ }
+
return (0);
}
#endif
@@ -431,6 +502,28 @@ static driver_t t6iov_driver = {
sizeof(struct t4iov_softc)
};
+static device_method_t chiov_methods[] = {
+ DEVMETHOD(device_probe, chiov_probe),
+ DEVMETHOD(device_attach, t4iov_attach),
+ DEVMETHOD(device_detach, t4iov_detach),
+
+#ifdef PCI_IOV
+ DEVMETHOD(pci_iov_init, t4iov_iov_init),
+ DEVMETHOD(pci_iov_uninit, t4iov_iov_uninit),
+ DEVMETHOD(pci_iov_add_vf, t4iov_add_vf),
+#endif
+
+ DEVMETHOD(t4_attach_child, t4iov_attach_child),
+ DEVMETHOD(t4_detach_child, t4iov_detach_child),
+
+ DEVMETHOD_END
+};
+
+static driver_t chiov_driver = {
+ "chiov",
+ chiov_methods,
+ sizeof(struct t4iov_softc)
+};
DRIVER_MODULE(t4iov, pci, t4iov_driver, 0, 0);
MODULE_VERSION(t4iov, 1);
@@ -439,3 +532,6 @@ MODULE_VERSION(t5iov, 1);
DRIVER_MODULE(t6iov, pci, t6iov_driver, 0, 0);
MODULE_VERSION(t6iov, 1);
+
+DRIVER_MODULE(chiov, pci, chiov_driver, 0, 0);
+MODULE_VERSION(chiov, 1);
diff --git a/sys/dev/cxgbe/t4_l2t.c b/sys/dev/cxgbe/t4_l2t.c
index 005dce826ccb..5f9c26a0f720 100644
--- a/sys/dev/cxgbe/t4_l2t.c
+++ b/sys/dev/cxgbe/t4_l2t.c
@@ -73,7 +73,8 @@ t4_alloc_l2e(struct l2t_data *d)
struct l2t_entry *end, *e, **p;
rw_assert(&d->lock, RA_WLOCKED);
-
+ if (__predict_false(d->l2t_stopped))
+ return (NULL);
if (!atomic_load_acq_int(&d->nfree))
return (NULL);
@@ -118,7 +119,7 @@ find_or_alloc_l2e(struct l2t_data *d, uint16_t vlan, uint8_t port, uint8_t *dmac
first_free = e;
} else if (e->state == L2T_STATE_SWITCHING &&
memcmp(e->dmac, dmac, ETHER_ADDR_LEN) == 0 &&
- e->vlan == vlan && e->lport == port)
+ e->vlan == vlan && e->hw_port == port)
return (e); /* Found existing entry that matches. */
}
@@ -155,7 +156,7 @@ mk_write_l2e(struct adapter *sc, struct l2t_entry *e, int sync, int reply,
INIT_TP_WR(req, 0);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, idx |
V_SYNC_WR(sync) | V_TID_QID(e->iqid)));
- req->params = htons(V_L2T_W_PORT(e->lport) | V_L2T_W_NOREPLY(!reply));
+ req->params = htons(V_L2T_W_PORT(e->hw_port) | V_L2T_W_NOREPLY(!reply));
req->l2t_idx = htons(idx);
req->vlan = htons(e->vlan);
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
@@ -226,7 +227,7 @@ t4_l2t_alloc_tls(struct adapter *sc, struct sge_txq *txq, void *dst,
e = &d->l2tab[i];
if (e->state != L2T_STATE_TLS)
continue;
- if (e->vlan == vlan && e->lport == port &&
+ if (e->vlan == vlan && e->hw_port == port &&
e->wrq == (struct sge_wrq *)txq &&
memcmp(e->dmac, eth_addr, ETHER_ADDR_LEN) == 0) {
if (atomic_fetchadd_int(&e->refcnt, 1) == 0) {
@@ -262,7 +263,7 @@ t4_l2t_alloc_tls(struct adapter *sc, struct sge_txq *txq, void *dst,
/* Initialize the entry. */
e->state = L2T_STATE_TLS;
e->vlan = vlan;
- e->lport = port;
+ e->hw_port = port;
e->iqid = sc->sge.fwq.abs_id;
e->wrq = (struct sge_wrq *)txq;
memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
@@ -291,7 +292,10 @@ t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
int rc;
rw_wlock(&d->lock);
- e = find_or_alloc_l2e(d, vlan, port, eth_addr);
+ if (__predict_false(d->l2t_stopped))
+ e = NULL;
+ else
+ e = find_or_alloc_l2e(d, vlan, port, eth_addr);
if (e) {
if (atomic_load_acq_int(&e->refcnt) == 0) {
mtx_lock(&e->lock); /* avoid race with t4_l2t_free */
@@ -299,7 +303,7 @@ t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
e->iqid = sc->sge.fwq.abs_id;
e->state = L2T_STATE_SWITCHING;
e->vlan = vlan;
- e->lport = port;
+ e->hw_port = port;
memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
atomic_store_rel_int(&e->refcnt, 1);
atomic_subtract_int(&d->nfree, 1);
@@ -309,7 +313,7 @@ t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
e = NULL;
} else {
MPASS(e->vlan == vlan);
- MPASS(e->lport == port);
+ MPASS(e->hw_port == port);
atomic_add_int(&e->refcnt, 1);
}
}
@@ -333,6 +337,7 @@ t4_init_l2t(struct adapter *sc, int flags)
return (ENOMEM);
d->l2t_size = l2t_size;
+ d->l2t_stopped = false;
d->rover = d->l2tab;
atomic_store_rel_int(&d->nfree, l2t_size);
rw_init(&d->lock, "L2T");
@@ -353,8 +358,9 @@ t4_init_l2t(struct adapter *sc, int flags)
}
int
-t4_free_l2t(struct l2t_data *d)
+t4_free_l2t(struct adapter *sc)
{
+ struct l2t_data *d = sc->l2t;
int i;
for (i = 0; i < d->l2t_size; i++)
@@ -366,17 +372,50 @@ t4_free_l2t(struct l2t_data *d)
}
int
+t4_stop_l2t(struct adapter *sc)
+{
+ struct l2t_data *d = sc->l2t;
+
+ if (d == NULL)
+ return (0);
+ rw_wlock(&d->lock);
+ d->l2t_stopped = true;
+ rw_wunlock(&d->lock);
+
+ return (0);
+}
+
+int
+t4_restart_l2t(struct adapter *sc)
+{
+ struct l2t_data *d = sc->l2t;
+
+ if (d == NULL)
+ return (0);
+ rw_wlock(&d->lock);
+ d->l2t_stopped = false;
+ rw_wunlock(&d->lock);
+
+ return (0);
+}
+
+int
do_l2t_write_rpl(struct sge_iq *iq, const struct rss_header *rss,
struct mbuf *m)
{
+ struct adapter *sc = iq->adapter;
const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
- unsigned int tid = GET_TID(rpl);
- unsigned int idx = tid % L2T_SIZE;
+ const u_int hwidx = GET_TID(rpl) & ~(F_SYNC_WR | V_TID_QID(M_TID_QID));
+ const bool sync = GET_TID(rpl) & F_SYNC_WR;
+
+ MPASS(iq->abs_id == G_TID_QID(GET_TID(rpl)));
- if (__predict_false(rpl->status != CPL_ERR_NONE)) {
- log(LOG_ERR,
- "Unexpected L2T_WRITE_RPL (%u) for entry at hw_idx %u\n",
- rpl->status, idx);
+ if (__predict_false(hwidx < sc->vres.l2t.start) ||
+ __predict_false(hwidx >= sc->vres.l2t.start + sc->vres.l2t.size) ||
+ __predict_false(rpl->status != CPL_ERR_NONE)) {
+ CH_ERR(sc, "%s: hwidx %u, rpl %u, sync %u; L2T st %u, sz %u\n",
+ __func__, hwidx, rpl->status, sync, sc->vres.l2t.start,
+ sc->vres.l2t.size);
return (EINVAL);
}
@@ -449,7 +488,7 @@ sysctl_l2t(SYSCTL_HANDLER_ARGS)
" %u %2u %c %5u %s",
e->idx, ip, e->dmac[0], e->dmac[1], e->dmac[2],
e->dmac[3], e->dmac[4], e->dmac[5],
- e->vlan & 0xfff, vlan_prio(e), e->lport,
+ e->vlan & 0xfff, vlan_prio(e), e->hw_port,
l2e_state(e), atomic_load_acq_int(&e->refcnt),
e->ifp ? if_name(e->ifp) : "-");
skip:
diff --git a/sys/dev/cxgbe/t4_l2t.h b/sys/dev/cxgbe/t4_l2t.h
index be6337a60eab..989d2d5ec8f3 100644
--- a/sys/dev/cxgbe/t4_l2t.h
+++ b/sys/dev/cxgbe/t4_l2t.h
@@ -35,8 +35,6 @@
#define V_SYNC_WR(x) ((x) << S_SYNC_WR)
#define F_SYNC_WR V_SYNC_WR(1)
-enum { L2T_SIZE = 4096 }; /* # of L2T entries */
-
enum {
L2T_STATE_VALID, /* entry is up to date */
L2T_STATE_STALE, /* entry may be used but needs revalidation */
@@ -64,8 +62,7 @@ struct l2t_entry {
uint32_t addr[4]; /* next hop IP or IPv6 address */
uint32_t iqid; /* iqid for reply to write_l2e */
struct sge_wrq *wrq; /* queue to use for write_l2e */
- if_t ifp; /* outgoing interface */
- uint16_t smt_idx; /* SMT index */
+ if_t ifp; /* outgoing interface */
uint16_t vlan; /* VLAN TCI (id: 0-11, prio: 13-15) */
struct l2t_entry *first; /* start of hash chain */
struct l2t_entry *next; /* next l2t_entry on chain */
@@ -74,13 +71,14 @@ struct l2t_entry {
volatile int refcnt; /* entry reference count */
uint16_t hash; /* hash bucket the entry is on */
uint8_t ipv6; /* entry is for an IPv6 address */
- uint8_t lport; /* associated offload logical port */
+ uint8_t hw_port; /* associated hardware port idx */
uint8_t dmac[ETHER_ADDR_LEN]; /* next hop's MAC address */
};
struct l2t_data {
struct rwlock lock;
u_int l2t_size;
+ bool l2t_stopped;
volatile int nfree; /* number of free entries */
struct l2t_entry *rover;/* starting point for next allocation */
struct l2t_entry l2tab[];
@@ -88,14 +86,14 @@ struct l2t_data {
int t4_init_l2t(struct adapter *, int);
-int t4_free_l2t(struct l2t_data *);
+int t4_free_l2t(struct adapter *);
+int t4_stop_l2t(struct adapter *);
+int t4_restart_l2t(struct adapter *);
struct l2t_entry *t4_alloc_l2e(struct l2t_data *);
struct l2t_entry *t4_l2t_alloc_switching(struct adapter *, uint16_t, uint8_t,
uint8_t *);
struct l2t_entry *t4_l2t_alloc_tls(struct adapter *, struct sge_txq *,
void *, int *, uint16_t, uint8_t, uint8_t *);
-int t4_l2t_set_switching(struct adapter *, struct l2t_entry *, uint16_t,
- uint8_t, uint8_t *);
int t4_write_l2e(struct l2t_entry *, int);
int do_l2t_write_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 185cc1140486..22d2f504c257 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
* Written by: Navdeep Parhar <np@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -45,10 +44,8 @@
#include <sys/malloc.h>
#include <sys/queue.h>
#include <sys/taskqueue.h>
-#include <sys/pciio.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
-#include <dev/pci/pci_private.h>
#include <sys/firmware.h>
#include <sys/sbuf.h>
#include <sys/smp.h>
@@ -243,6 +240,45 @@ static driver_t vcc_driver = {
sizeof(struct vi_info)
};
+/* T7+ bus driver interface */
+static int ch_probe(device_t);
+static device_method_t ch_methods[] = {
+ DEVMETHOD(device_probe, ch_probe),
+ DEVMETHOD(device_attach, t4_attach),
+ DEVMETHOD(device_detach, t4_detach),
+ DEVMETHOD(device_suspend, t4_suspend),
+ DEVMETHOD(device_resume, t4_resume),
+
+ DEVMETHOD(bus_child_location, t4_child_location),
+ DEVMETHOD(bus_reset_prepare, t4_reset_prepare),
+ DEVMETHOD(bus_reset_post, t4_reset_post),
+
+ DEVMETHOD(t4_is_main_ready, t4_ready),
+ DEVMETHOD(t4_read_port_device, t4_read_port_device),
+
+ DEVMETHOD_END
+};
+static driver_t ch_driver = {
+ "chnex",
+ ch_methods,
+ sizeof(struct adapter)
+};
+
+
+/* T7+ port (che) interface */
+static driver_t che_driver = {
+ "che",
+ cxgbe_methods,
+ sizeof(struct port_info)
+};
+
+/* T7+ VI (vche) interface */
+static driver_t vche_driver = {
+ "vche",
+ vcxgbe_methods,
+ sizeof(struct vi_info)
+};
+
/* ifnet interface */
static void cxgbe_init(void *);
static int cxgbe_ioctl(if_t, unsigned long, caddr_t);
@@ -263,7 +299,7 @@ static struct sx t4_list_lock;
SLIST_HEAD(, adapter) t4_list;
#ifdef TCP_OFFLOAD
static struct sx t4_uld_list_lock;
-SLIST_HEAD(, uld_info) t4_uld_list;
+struct uld_info *t4_uld_list[ULD_MAX + 1];
#endif
/*
@@ -318,15 +354,17 @@ static int t4_nofldtxq = -NOFLDTXQ;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq, CTLFLAG_RDTUN, &t4_nofldtxq, 0,
"Number of offload TX queues per port");
-#define NOFLDRXQ 2
-static int t4_nofldrxq = -NOFLDRXQ;
-SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq, CTLFLAG_RDTUN, &t4_nofldrxq, 0,
- "Number of offload RX queues per port");
-
#define NOFLDTXQ_VI 1
static int t4_nofldtxq_vi = -NOFLDTXQ_VI;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldtxq_vi, CTLFLAG_RDTUN, &t4_nofldtxq_vi, 0,
"Number of offload TX queues per VI");
+#endif
+
+#if defined(TCP_OFFLOAD)
+#define NOFLDRXQ 2
+static int t4_nofldrxq = -NOFLDRXQ;
+SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq, CTLFLAG_RDTUN, &t4_nofldrxq, 0,
+ "Number of offload RX queues per port");
#define NOFLDRXQ_VI 1
static int t4_nofldrxq_vi = -NOFLDRXQ_VI;
@@ -334,12 +372,12 @@ SYSCTL_INT(_hw_cxgbe, OID_AUTO, nofldrxq_vi, CTLFLAG_RDTUN, &t4_nofldrxq_vi, 0,
"Number of offload RX queues per VI");
#define TMR_IDX_OFLD 1
-int t4_tmr_idx_ofld = TMR_IDX_OFLD;
+static int t4_tmr_idx_ofld = TMR_IDX_OFLD;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_timer_idx_ofld, CTLFLAG_RDTUN,
&t4_tmr_idx_ofld, 0, "Holdoff timer index for offload queues");
#define PKTC_IDX_OFLD (-1)
-int t4_pktc_idx_ofld = PKTC_IDX_OFLD;
+static int t4_pktc_idx_ofld = PKTC_IDX_OFLD;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, holdoff_pktc_idx_ofld, CTLFLAG_RDTUN,
&t4_pktc_idx_ofld, 0, "holdoff packet counter index for offload queues");
@@ -519,6 +557,9 @@ static int t4_fec = -1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, fec, CTLFLAG_RDTUN, &t4_fec, 0,
"Forward Error Correction (bit 0 = RS, bit 1 = BASER_RS)");
+static const char *
+t4_fec_bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2\6auto\7module";
+
/*
* Controls when the driver sets the FORCE_FEC bit in the L1_CFG32 that it
* issues to the firmware. If the firmware doesn't support FORCE_FEC then the
@@ -570,6 +611,10 @@ static int t4_switchcaps_allowed = FW_CAPS_CONFIG_SWITCH_INGRESS |
SYSCTL_INT(_hw_cxgbe, OID_AUTO, switchcaps_allowed, CTLFLAG_RDTUN,
&t4_switchcaps_allowed, 0, "Default switch capabilities");
+static int t4_nvmecaps_allowed = 0;
+SYSCTL_INT(_hw_cxgbe, OID_AUTO, nvmecaps_allowed, CTLFLAG_RDTUN,
+ &t4_nvmecaps_allowed, 0, "Default NVMe capabilities");
+
#ifdef RATELIMIT
static int t4_niccaps_allowed = FW_CAPS_CONFIG_NIC |
FW_CAPS_CONFIG_NIC_HASHFILTER | FW_CAPS_CONFIG_NIC_ETHOFLD;
@@ -604,6 +649,11 @@ static int t5_write_combine = 0;
SYSCTL_INT(_hw_cxl, OID_AUTO, write_combine, CTLFLAG_RDTUN, &t5_write_combine,
0, "Use WC instead of UC for BAR2");
+/* From t4_sysctls: doorbells = {"\20\1UDB\2WCWR\3UDBWC\4KDB"} */
+static int t4_doorbells_allowed = 0xf;
+SYSCTL_INT(_hw_cxgbe, OID_AUTO, doorbells_allowed, CTLFLAG_RDTUN,
+ &t4_doorbells_allowed, 0, "Limit tx queues to these doorbells");
+
static int t4_num_vis = 1;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, num_vis, CTLFLAG_RDTUN, &t4_num_vis, 0,
"Number of VIs per port");
@@ -628,6 +678,10 @@ static int t4_reset_on_fatal_err = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, reset_on_fatal_err, CTLFLAG_RWTUN,
&t4_reset_on_fatal_err, 0, "reset adapter on fatal errors");
+static int t4_reset_method = 1;
+SYSCTL_INT(_hw_cxgbe, OID_AUTO, reset_method, CTLFLAG_RWTUN, &t4_reset_method,
+ 0, "reset method: 0 = PL_RST, 1 = PCIe secondary bus reset, 2 = PCIe link bounce");
+
static int t4_clock_gate_on_suspend = 0;
SYSCTL_INT(_hw_cxgbe, OID_AUTO, clock_gate_on_suspend, CTLFLAG_RWTUN,
&t4_clock_gate_on_suspend, 0, "gate the clock on suspend");
@@ -681,9 +735,10 @@ SYSCTL_INT(_hw_cxgbe, OID_AUTO, drop_pkts_with_l4_errors, CTLFLAG_RDTUN,
* TOE tunables.
*/
static int t4_cop_managed_offloading = 0;
-SYSCTL_INT(_hw_cxgbe, OID_AUTO, cop_managed_offloading, CTLFLAG_RDTUN,
+SYSCTL_INT(_hw_cxgbe_toe, OID_AUTO, cop_managed_offloading, CTLFLAG_RDTUN,
&t4_cop_managed_offloading, 0,
"COP (Connection Offload Policy) controls all TOE offload");
+TUNABLE_INT("hw.cxgbe.cop_managed_offloading", &t4_cop_managed_offloading);
#endif
#ifdef KERN_TLS
@@ -706,6 +761,14 @@ SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, inline_keys, CTLFLAG_RDTUN,
static int t4_tls_combo_wrs = 0;
SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, combo_wrs, CTLFLAG_RDTUN, &t4_tls_combo_wrs,
0, "Attempt to combine TCB field updates with TLS record work requests.");
+
+static int t4_tls_short_records = 1;
+SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, short_records, CTLFLAG_RDTUN,
+ &t4_tls_short_records, 0, "Use cipher-only mode for short records.");
+
+static int t4_tls_partial_ghash = 1;
+SYSCTL_INT(_hw_cxgbe_tls, OID_AUTO, partial_ghash, CTLFLAG_RDTUN,
+ &t4_tls_partial_ghash, 0, "Use partial GHASH for AES-GCM records.");
#endif
/* Functions used by VIs to obtain unique MAC addresses for each VI. */
@@ -775,6 +838,8 @@ static int t4_alloc_irq(struct adapter *, struct irq *, int rid,
static int t4_free_irq(struct adapter *, struct irq *);
static void t4_init_atid_table(struct adapter *);
static void t4_free_atid_table(struct adapter *);
+static void stop_atid_allocator(struct adapter *);
+static void restart_atid_allocator(struct adapter *);
static void get_regs(struct adapter *, struct t4_regdump *, uint8_t *);
static void vi_refresh_stats(struct vi_info *);
static void cxgbe_refresh_stats(struct vi_info *);
@@ -797,17 +862,20 @@ static int sysctl_requested_fec(SYSCTL_HANDLER_ARGS);
static int sysctl_module_fec(SYSCTL_HANDLER_ARGS);
static int sysctl_autoneg(SYSCTL_HANDLER_ARGS);
static int sysctl_force_fec(SYSCTL_HANDLER_ARGS);
+static int sysctl_handle_t4_portstat64(SYSCTL_HANDLER_ARGS);
static int sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS);
static int sysctl_temperature(SYSCTL_HANDLER_ARGS);
static int sysctl_vdd(SYSCTL_HANDLER_ARGS);
static int sysctl_reset_sensor(SYSCTL_HANDLER_ARGS);
static int sysctl_loadavg(SYSCTL_HANDLER_ARGS);
static int sysctl_cctrl(SYSCTL_HANDLER_ARGS);
-static int sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_ibq(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_obq(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS);
static int sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS);
+static int sysctl_cim_qcfg_t7(SYSCTL_HANDLER_ARGS);
static int sysctl_cpl_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_ddp_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_tid_stats(SYSCTL_HANDLER_ARGS);
@@ -819,6 +887,7 @@ static int sysctl_linkdnrc(SYSCTL_HANDLER_ARGS);
static int sysctl_meminfo(SYSCTL_HANDLER_ARGS);
static int sysctl_mps_tcam(SYSCTL_HANDLER_ARGS);
static int sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS);
+static int sysctl_mps_tcam_t7(SYSCTL_HANDLER_ARGS);
static int sysctl_path_mtus(SYSCTL_HANDLER_ARGS);
static int sysctl_pm_stats(SYSCTL_HANDLER_ARGS);
static int sysctl_rdma_stats(SYSCTL_HANDLER_ARGS);
@@ -843,7 +912,7 @@ static int sysctl_tp_backoff(SYSCTL_HANDLER_ARGS);
static int sysctl_holdoff_tmr_idx_ofld(SYSCTL_HANDLER_ARGS);
static int sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS);
#endif
-static int get_sge_context(struct adapter *, struct t4_sge_context *);
+static int get_sge_context(struct adapter *, int, uint32_t, int, uint32_t *);
static int load_fw(struct adapter *, struct t4_data *);
static int load_cfg(struct adapter *, struct t4_data *);
static int load_boot(struct adapter *, struct t4_bootrom *);
@@ -856,10 +925,15 @@ static int read_i2c(struct adapter *, struct t4_i2c_data *);
static int clear_stats(struct adapter *, u_int);
static int hold_clip_addr(struct adapter *, struct t4_clip_addr *);
static int release_clip_addr(struct adapter *, struct t4_clip_addr *);
+static inline int stop_adapter(struct adapter *);
+static inline void set_adapter_hwstatus(struct adapter *, const bool);
+static int stop_lld(struct adapter *);
+static inline int restart_adapter(struct adapter *);
+static int restart_lld(struct adapter *);
#ifdef TCP_OFFLOAD
-static int toe_capability(struct vi_info *, bool);
-static int t4_deactivate_all_uld(struct adapter *);
-static void t4_async_event(struct adapter *);
+static int deactivate_all_uld(struct adapter *);
+static void stop_all_uld(struct adapter *);
+static void restart_all_uld(struct adapter *);
#endif
#ifdef KERN_TLS
static int ktls_capability(struct adapter *, bool);
@@ -923,15 +997,15 @@ struct {
{0x6402, "Chelsio T6225-SO-CR"}, /* 2 x 10/25G, nomem */
{0x6403, "Chelsio T6425-CR"}, /* 4 x 10/25G */
{0x6404, "Chelsio T6425-SO-CR"}, /* 4 x 10/25G, nomem */
- {0x6405, "Chelsio T6225-OCP-SO"}, /* 2 x 10/25G, nomem */
- {0x6406, "Chelsio T62100-OCP-SO"}, /* 2 x 40/50/100G, nomem */
+ {0x6405, "Chelsio T6225-SO-OCP3"}, /* 2 x 10/25G, nomem */
+ {0x6406, "Chelsio T6225-OCP3"}, /* 2 x 10/25G */
{0x6407, "Chelsio T62100-LP-CR"}, /* 2 x 40/50/100G */
{0x6408, "Chelsio T62100-SO-CR"}, /* 2 x 40/50/100G, nomem */
{0x6409, "Chelsio T6210-BT"}, /* 2 x 10GBASE-T */
{0x640d, "Chelsio T62100-CR"}, /* 2 x 40/50/100G */
{0x6410, "Chelsio T6-DBG-100"}, /* 2 x 40/50/100G, debug */
{0x6411, "Chelsio T6225-LL-CR"}, /* 2 x 10/25G */
- {0x6414, "Chelsio T61100-OCP-SO"}, /* 1 x 40/50/100G, nomem */
+ {0x6414, "Chelsio T62100-SO-OCP3"}, /* 2 x 40/50/100G, nomem */
{0x6415, "Chelsio T6201-BT"}, /* 2 x 1000BASE-T */
/* Custom */
@@ -943,6 +1017,29 @@ struct {
{0x6485, "Custom T6240-SO"},
{0x6486, "Custom T6225-SO-CR"},
{0x6487, "Custom T6225-CR"},
+}, t7_pciids[] = {
+ {0xd000, "Chelsio Terminator 7 FPGA"}, /* T7 PE12K FPGA */
+ {0x7400, "Chelsio T72200-DBG"}, /* 2 x 200G, debug */
+ {0x7401, "Chelsio T7250"}, /* 2 x 10/25/50G, 1 mem */
+ {0x7402, "Chelsio S7250"}, /* 2 x 10/25/50G, nomem */
+ {0x7403, "Chelsio T7450"}, /* 4 x 10/25/50G, 1 mem */
+ {0x7404, "Chelsio S7450"}, /* 4 x 10/25/50G, nomem */
+ {0x7405, "Chelsio T72200"}, /* 2 x 40/100/200G, 1 mem */
+ {0x7406, "Chelsio S72200"}, /* 2 x 40/100/200G, nomem */
+ {0x7407, "Chelsio T72200-FH"}, /* 2 x 40/100/200G, 2 mem */
+ {0x7408, "Chelsio S71400"}, /* 1 x 400G, nomem */
+ {0x7409, "Chelsio S7210-BT"}, /* 2 x 10GBASE-T, nomem */
+ {0x740a, "Chelsio T7450-RC"}, /* 4 x 10/25/50G, 1 mem, RC */
+ {0x740b, "Chelsio T72200-RC"}, /* 2 x 40/100/200G, 1 mem, RC */
+ {0x740c, "Chelsio T72200-FH-RC"}, /* 2 x 40/100/200G, 2 mem, RC */
+ {0x740d, "Chelsio S72200-OCP3"}, /* 2 x 40/100/200G OCP3 */
+ {0x740e, "Chelsio S7450-OCP3"}, /* 4 x 1/20/25/50G OCP3 */
+ {0x740f, "Chelsio S7410-BT-OCP3"}, /* 4 x 10GBASE-T OCP3 */
+ {0x7410, "Chelsio S7210-BT-A"}, /* 2 x 10GBASE-T */
+ {0x7411, "Chelsio T7_MAYRA_7"}, /* Motherboard */
+
+ /* Custom */
+ {0x7480, "Custom T7"},
};
#ifdef TCP_OFFLOAD
@@ -1025,6 +1122,31 @@ t6_probe(device_t dev)
return (ENXIO);
}
+static int
+ch_probe(device_t dev)
+{
+ int i;
+ uint16_t v = pci_get_vendor(dev);
+ uint16_t d = pci_get_device(dev);
+ uint8_t f = pci_get_function(dev);
+
+ if (v != PCI_VENDOR_ID_CHELSIO)
+ return (ENXIO);
+
+ /* Attach only to PF0 of the FPGA */
+ if (d == 0xd000 && f != 0)
+ return (ENXIO);
+
+ for (i = 0; i < nitems(t7_pciids); i++) {
+ if (d == t7_pciids[i].device) {
+ device_set_desc(dev, t7_pciids[i].desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+
+ return (ENXIO);
+}
+
static void
t5_attribute_workaround(device_t dev)
{
@@ -1074,6 +1196,13 @@ static const struct devnames devnames[] = {
.pf03_drv_name = "t6iov",
.vf_nexus_name = "t6vf",
.vf_ifnet_name = "ccv"
+ }, {
+ .nexus_name = "chnex",
+ .ifnet_name = "che",
+ .vi_ifnet_name = "vche",
+ .pf03_drv_name = "chiov",
+ .vf_nexus_name = "chvf",
+ .vf_ifnet_name = "chev"
}
};
@@ -1083,12 +1212,13 @@ t4_init_devnames(struct adapter *sc)
int id;
id = chip_id(sc);
- if (id >= CHELSIO_T4 && id - CHELSIO_T4 < nitems(devnames))
- sc->names = &devnames[id - CHELSIO_T4];
- else {
+ if (id < CHELSIO_T4) {
device_printf(sc->dev, "chip id %d is not supported.\n", id);
sc->names = NULL;
- }
+ } else if (id - CHELSIO_T4 < nitems(devnames))
+ sc->names = &devnames[id - CHELSIO_T4];
+ else
+ sc->names = &devnames[nitems(devnames) - 1];
}
static int
@@ -1120,7 +1250,7 @@ t4_calibration(void *arg)
sc = (struct adapter *)arg;
- KASSERT((hw_off_limits(sc) == 0), ("hw_off_limits at t4_calibration"));
+ KASSERT(hw_all_ok(sc), ("!hw_all_ok at t4_calibration"));
hw = t4_read_reg64(sc, A_SGE_TIMESTAMP_LO);
sbt = sbinuptime();
@@ -1260,6 +1390,7 @@ t4_attach(device_t dev)
goto done; /* error message displayed already */
memset(sc->chan_map, 0xff, sizeof(sc->chan_map));
+ memset(sc->port_map, 0xff, sizeof(sc->port_map));
/* Prepare the adapter for operation. */
buf = malloc(PAGE_SIZE, M_CXGBE, M_ZERO | M_WAITOK);
@@ -1292,7 +1423,7 @@ t4_attach(device_t dev)
* will work even in "recovery mode".
*/
setup_memwin(sc);
- if (t4_init_devlog_params(sc, 0) == 0)
+ if (t4_init_devlog_ncores_params(sc, 0) == 0)
fixup_devlog_params(sc);
make_dev_args_init(&mda);
mda.mda_devsw = &t4_cdevsw;
@@ -1347,6 +1478,10 @@ t4_attach(device_t dev)
if (rc != 0)
goto done; /* error message displayed already */
+ rc = t4_adj_doorbells(sc);
+ if (rc != 0)
+ goto done; /* error message displayed already */
+
rc = t4_create_dma_tag(sc);
if (rc != 0)
goto done; /* error message displayed already */
@@ -1386,14 +1521,16 @@ t4_attach(device_t dev)
}
if (is_bt(pi->port_type))
- setbit(&sc->bt_map, pi->tx_chan);
+ setbit(&sc->bt_map, pi->hw_port);
else
- MPASS(!isset(&sc->bt_map, pi->tx_chan));
+ MPASS(!isset(&sc->bt_map, pi->hw_port));
snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
device_get_nameunit(dev), i);
mtx_init(&pi->pi_lock, pi->lockname, 0, MTX_DEF);
- sc->chan_map[pi->tx_chan] = i;
+ for (j = 0; j < sc->params.tp.lb_nchan; j++)
+ sc->chan_map[pi->tx_chan + j] = i;
+ sc->port_map[pi->hw_port] = i;
/*
* The MPS counter for FCS errors doesn't work correctly on the
@@ -1403,10 +1540,8 @@ t4_attach(device_t dev)
*/
if (is_t6(sc))
pi->fcs_reg = -1;
- else {
- pi->fcs_reg = t4_port_reg(sc, pi->tx_chan,
- A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L);
- }
+ else
+ pi->fcs_reg = A_MPS_PORT_STAT_RX_PORT_CRC_ERROR_L;
pi->fcs_base = 0;
/* All VIs on this port share this media. */
@@ -1446,6 +1581,7 @@ t4_attach(device_t dev)
sc->intr_count = iaq.nirq;
s = &sc->sge;
+ s->nctrlq = max(sc->params.nports, sc->params.ncores);
s->nrxq = nports * iaq.nrxq;
s->ntxq = nports * iaq.ntxq;
if (num_vis > 1) {
@@ -1500,7 +1636,7 @@ t4_attach(device_t dev)
MPASS(s->niq <= s->iqmap_sz);
MPASS(s->neq <= s->eqmap_sz);
- s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
+ s->ctrlq = malloc(s->nctrlq * sizeof(struct sge_wrq), M_CXGBE,
M_ZERO | M_WAITOK);
s->rxq = malloc(s->nrxq * sizeof(struct sge_rxq), M_CXGBE,
M_ZERO | M_WAITOK);
@@ -1527,6 +1663,7 @@ t4_attach(device_t dev)
if (sc->vres.key.size != 0)
sc->key_map = vmem_create("T4TLS key map", sc->vres.key.start,
sc->vres.key.size, 32, 0, M_FIRSTFIT | M_WAITOK);
+ t4_init_tpt(sc);
/*
* Second pass over the ports. This time we know the number of rx and
@@ -1608,11 +1745,7 @@ t4_attach(device_t dev)
goto done;
}
- rc = bus_generic_probe(dev);
- if (rc != 0) {
- device_printf(dev, "failed to probe child drivers: %d\n", rc);
- goto done;
- }
+ bus_identify_children(dev);
/*
* Ensure thread-safe mailbox access (in debug builds).
@@ -1623,12 +1756,7 @@ t4_attach(device_t dev)
*/
sc->flags |= CHK_MBOX_ACCESS;
- rc = bus_generic_attach(dev);
- if (rc != 0) {
- device_printf(dev,
- "failed to attach all child ports: %d\n", rc);
- goto done;
- }
+ bus_attach_children(dev);
t4_calibration_start(sc);
device_printf(dev,
@@ -1755,7 +1883,7 @@ t4_detach_common(device_t dev)
sc = device_get_softc(dev);
#ifdef TCP_OFFLOAD
- rc = t4_deactivate_all_uld(sc);
+ rc = deactivate_all_uld(sc);
if (rc) {
device_printf(dev,
"failed to detach upper layer drivers: %d\n", rc);
@@ -1779,7 +1907,7 @@ t4_detach_common(device_t dev)
}
if (device_is_attached(dev)) {
- rc = bus_generic_detach(dev);
+ rc = bus_detach_children(dev);
if (rc) {
device_printf(dev,
"failed to detach child devices: %d\n", rc);
@@ -1797,8 +1925,6 @@ t4_detach_common(device_t dev)
pi = sc->port[i];
if (pi) {
t4_free_vi(sc, sc->mbox, sc->pf, 0, pi->vi[0].viid);
- if (pi->dev)
- device_delete_child(dev, pi->dev);
mtx_destroy(&pi->pi_lock);
free(pi->vi, M_CXGBE);
@@ -1830,7 +1956,7 @@ t4_detach_common(device_t dev)
sc->msix_res);
if (sc->l2t)
- t4_free_l2t(sc->l2t);
+ t4_free_l2t(sc);
if (sc->smt)
t4_free_smt(sc->smt);
t4_free_atid_table(sc);
@@ -1839,6 +1965,7 @@ t4_detach_common(device_t dev)
#endif
if (sc->key_map)
vmem_destroy(sc->key_map);
+ t4_free_tpt(sc);
#ifdef INET6
t4_destroy_clip_table(sc);
#endif
@@ -1900,57 +2027,104 @@ t4_detach_common(device_t dev)
return (0);
}
-static inline bool
-ok_to_reset(struct adapter *sc)
+static inline int
+stop_adapter(struct adapter *sc)
{
- struct tid_info *t = &sc->tids;
struct port_info *pi;
- struct vi_info *vi;
- int i, j;
- int caps = IFCAP_TOE | IFCAP_NETMAP | IFCAP_TXRTLMT;
-
- if (is_t6(sc))
- caps |= IFCAP_TXTLS;
-
- ASSERT_SYNCHRONIZED_OP(sc);
- MPASS(!(sc->flags & IS_VF));
+ int i;
+ if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_STOPPED))) {
+ CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x, EALREADY\n",
+ __func__, curthread, sc->flags, sc->error_flags);
+ return (EALREADY);
+ }
+ CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x\n", __func__, curthread,
+ sc->flags, sc->error_flags);
+ t4_shutdown_adapter(sc);
for_each_port(sc, i) {
pi = sc->port[i];
- for_each_vi(pi, j, vi) {
- if (if_getcapenable(vi->ifp) & caps)
- return (false);
+ if (pi == NULL)
+ continue;
+ PORT_LOCK(pi);
+ if (pi->up_vis > 0 && pi->link_cfg.link_ok) {
+ /*
+ * t4_shutdown_adapter has already shut down all the
+ * PHYs but it also disables interrupts and DMA so there
+ * won't be a link interrupt. Update the state manually
+ * if the link was up previously and inform the kernel.
+ */
+ pi->link_cfg.link_ok = false;
+ t4_os_link_changed(pi);
}
+ PORT_UNLOCK(pi);
}
- if (atomic_load_int(&t->tids_in_use) > 0)
- return (false);
- if (atomic_load_int(&t->stids_in_use) > 0)
- return (false);
- if (atomic_load_int(&t->atids_in_use) > 0)
- return (false);
- if (atomic_load_int(&t->ftids_in_use) > 0)
- return (false);
- if (atomic_load_int(&t->hpftids_in_use) > 0)
- return (false);
- if (atomic_load_int(&t->etids_in_use) > 0)
- return (false);
-
- return (true);
+ return (0);
}
static inline int
-stop_adapter(struct adapter *sc)
+restart_adapter(struct adapter *sc)
{
- if (atomic_testandset_int(&sc->error_flags, ilog2(ADAP_STOPPED)))
- return (1); /* Already stopped. */
- return (t4_shutdown_adapter(sc));
+ uint32_t val;
+
+ if (!atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_STOPPED))) {
+ CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x, EALREADY\n",
+ __func__, curthread, sc->flags, sc->error_flags);
+ return (EALREADY);
+ }
+ CH_ALERT(sc, "%s from %p, flags 0x%08x,0x%08x\n", __func__, curthread,
+ sc->flags, sc->error_flags);
+
+ MPASS(hw_off_limits(sc));
+ MPASS((sc->flags & FW_OK) == 0);
+ MPASS((sc->flags & MASTER_PF) == 0);
+ MPASS(sc->reset_thread == NULL);
+
+ /*
+ * The adapter is supposed to be back on PCIE with its config space and
+ * BARs restored to their state before reset. Register access via
+ * t4_read_reg BAR0 should just work.
+ */
+ sc->reset_thread = curthread;
+ val = t4_read_reg(sc, A_PL_WHOAMI);
+ if (val == 0xffffffff || val == 0xeeeeeeee) {
+ CH_ERR(sc, "%s: device registers not readable.\n", __func__);
+ sc->reset_thread = NULL;
+ atomic_set_int(&sc->error_flags, ADAP_STOPPED);
+ return (ENXIO);
+ }
+ atomic_clear_int(&sc->error_flags, ADAP_FATAL_ERR);
+ atomic_add_int(&sc->incarnation, 1);
+ atomic_add_int(&sc->num_resets, 1);
+
+ return (0);
+}
+
+static inline void
+set_adapter_hwstatus(struct adapter *sc, const bool usable)
+{
+ if (usable) {
+ /* Must be marked reusable by the designated thread. */
+ ASSERT_SYNCHRONIZED_OP(sc);
+ MPASS(sc->reset_thread == curthread);
+ mtx_lock(&sc->reg_lock);
+ atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS);
+ mtx_unlock(&sc->reg_lock);
+ } else {
+ /* Mark the adapter totally off limits. */
+ begin_synchronized_op(sc, NULL, SLEEP_OK, "t4hwsts");
+ mtx_lock(&sc->reg_lock);
+ atomic_set_int(&sc->error_flags, HW_OFF_LIMITS);
+ mtx_unlock(&sc->reg_lock);
+ sc->flags &= ~(FW_OK | MASTER_PF);
+ sc->reset_thread = NULL;
+ end_synchronized_op(sc, 0);
+ }
}
static int
-t4_suspend(device_t dev)
+stop_lld(struct adapter *sc)
{
- struct adapter *sc = device_get_softc(dev);
struct port_info *pi;
struct vi_info *vi;
if_t ifp;
@@ -1965,43 +2139,26 @@ t4_suspend(device_t dev)
#endif
int rc, i, j, k;
- CH_ALERT(sc, "suspend requested\n");
-
- rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4sus");
+ /*
+ * XXX: Can there be a synch_op in progress that will hang because
+ * hardware has been stopped? We'll hang too and the solution will be
+ * to use a version of begin_synch_op that wakes up existing synch_op
+ * with errors. Maybe stop_adapter should do this wakeup?
+ *
+ * I don't think any synch_op could get stranded waiting for DMA or
+ * interrupt so I think we're okay here. Remove this comment block
+ * after testing.
+ */
+ rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4slld");
if (rc != 0)
return (ENXIO);
- /* XXX: Can the kernel call suspend repeatedly without resume? */
- MPASS(!hw_off_limits(sc));
-
- if (!ok_to_reset(sc)) {
- /* XXX: should list what resource is preventing suspend. */
- CH_ERR(sc, "not safe to suspend.\n");
- rc = EBUSY;
- goto done;
- }
-
- /* No more DMA or interrupts. */
- stop_adapter(sc);
-
/* Quiesce all activity. */
for_each_port(sc, i) {
pi = sc->port[i];
+ if (pi == NULL)
+ continue;
pi->vxlan_tcam_entry = false;
-
- PORT_LOCK(pi);
- if (pi->up_vis > 0) {
- /*
- * t4_shutdown_adapter has already shut down all the
- * PHYs but it also disables interrupts and DMA so there
- * won't be a link interrupt. So we update the state
- * manually and inform the kernel.
- */
- pi->link_cfg.link_ok = false;
- t4_os_link_changed(pi);
- }
- PORT_UNLOCK(pi);
-
for_each_vi(pi, j, vi) {
vi->xact_addr_filt = -1;
mtx_lock(&vi->tick_mtx);
@@ -2028,7 +2185,9 @@ t4_suspend(device_t dev)
}
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
for_each_ofld_txq(vi, k, ofld_txq) {
+ TXQ_LOCK(&ofld_txq->wrq);
ofld_txq->wrq.eq.flags &= ~EQ_HW_ALLOCATED;
+ TXQ_UNLOCK(&ofld_txq->wrq);
}
#endif
for_each_rxq(vi, k, rxq) {
@@ -2046,9 +2205,18 @@ t4_suspend(device_t dev)
if (sc->flags & FULL_INIT_DONE) {
/* Control queue */
wrq = &sc->sge.ctrlq[i];
+ TXQ_LOCK(wrq);
wrq->eq.flags &= ~EQ_HW_ALLOCATED;
+ TXQ_UNLOCK(wrq);
quiesce_wrq(wrq);
}
+
+ if (pi->flags & HAS_TRACEQ) {
+ pi->flags &= ~HAS_TRACEQ;
+ sc->traceq = -1;
+ sc->tracer_valid = 0;
+ sc->tracer_enabled = 0;
+ }
}
if (sc->flags & FULL_INIT_DONE) {
/* Firmware event queue */
@@ -2060,22 +2228,43 @@ t4_suspend(device_t dev)
callout_stop(&sc->cal_callout);
callout_drain(&sc->cal_callout);
- /* Mark the adapter totally off limits. */
- mtx_lock(&sc->reg_lock);
- atomic_set_int(&sc->error_flags, HW_OFF_LIMITS);
- sc->flags &= ~(FW_OK | MASTER_PF);
- sc->reset_thread = NULL;
- mtx_unlock(&sc->reg_lock);
-
if (t4_clock_gate_on_suspend) {
t4_set_reg_field(sc, A_PMU_PART_CG_PWRMODE, F_MA_PART_CGEN |
F_LE_PART_CGEN | F_EDC1_PART_CGEN | F_EDC0_PART_CGEN |
F_TP_PART_CGEN | F_PDP_PART_CGEN | F_SGE_PART_CGEN, 0);
}
- CH_ALERT(sc, "suspend completed.\n");
-done:
end_synchronized_op(sc, 0);
+
+ stop_atid_allocator(sc);
+ t4_stop_l2t(sc);
+
+ return (rc);
+}
+
+int
+suspend_adapter(struct adapter *sc)
+{
+ stop_adapter(sc);
+ stop_lld(sc);
+#ifdef TCP_OFFLOAD
+ stop_all_uld(sc);
+#endif
+ set_adapter_hwstatus(sc, false);
+
+ return (0);
+}
+
+static int
+t4_suspend(device_t dev)
+{
+ struct adapter *sc = device_get_softc(dev);
+ int rc;
+
+ CH_ALERT(sc, "%s from thread %p.\n", __func__, curthread);
+ rc = suspend_adapter(sc);
+ CH_ALERT(sc, "%s end (thread %p).\n", __func__, curthread);
+
return (rc);
}
@@ -2084,6 +2273,7 @@ struct adapter_pre_reset_state {
uint16_t nbmcaps;
uint16_t linkcaps;
uint16_t switchcaps;
+ uint16_t nvmecaps;
uint16_t niccaps;
uint16_t toecaps;
uint16_t rdmacaps;
@@ -2115,6 +2305,7 @@ save_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
o->nbmcaps = sc->nbmcaps;
o->linkcaps = sc->linkcaps;
o->switchcaps = sc->switchcaps;
+ o->nvmecaps = sc->nvmecaps;
o->niccaps = sc->niccaps;
o->toecaps = sc->toecaps;
o->rdmacaps = sc->rdmacaps;
@@ -2153,6 +2344,7 @@ compare_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
COMPARE_CAPS(nbm);
COMPARE_CAPS(link);
COMPARE_CAPS(switch);
+ COMPARE_CAPS(nvme);
COMPARE_CAPS(nic);
COMPARE_CAPS(toe);
COMPARE_CAPS(rdma);
@@ -2231,9 +2423,8 @@ compare_caps_and_params(struct adapter *sc, struct adapter_pre_reset_state *o)
}
static int
-t4_resume(device_t dev)
+restart_lld(struct adapter *sc)
{
- struct adapter *sc = device_get_softc(dev);
struct adapter_pre_reset_state *old_state = NULL;
struct port_info *pi;
struct vi_info *vi;
@@ -2241,37 +2432,18 @@ t4_resume(device_t dev)
struct sge_txq *txq;
int rc, i, j, k;
- CH_ALERT(sc, "resume requested.\n");
-
- rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4res");
+ rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rlld");
if (rc != 0)
return (ENXIO);
- MPASS(hw_off_limits(sc));
- MPASS((sc->flags & FW_OK) == 0);
- MPASS((sc->flags & MASTER_PF) == 0);
- MPASS(sc->reset_thread == NULL);
- sc->reset_thread = curthread;
-
- /* Register access is expected to work by the time we're here. */
- if (t4_read_reg(sc, A_PL_WHOAMI) == 0xffffffff) {
- CH_ERR(sc, "%s: can't read device registers\n", __func__);
- rc = ENXIO;
- goto done;
- }
-
- /* Note that HW_OFF_LIMITS is cleared a bit later. */
- atomic_clear_int(&sc->error_flags, ADAP_FATAL_ERR | ADAP_STOPPED);
/* Restore memory window. */
setup_memwin(sc);
/* Go no further if recovery mode has been requested. */
if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
- CH_ALERT(sc, "recovery mode on resume.\n");
+ CH_ALERT(sc, "%s: recovery mode during restart.\n", __func__);
rc = 0;
- mtx_lock(&sc->reg_lock);
- atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS);
- mtx_unlock(&sc->reg_lock);
+ set_adapter_hwstatus(sc, true);
goto done;
}
@@ -2337,9 +2509,7 @@ t4_resume(device_t dev)
* want to access the hardware too. It is safe to do so. Note that
* this thread is still in the middle of a synchronized_op.
*/
- mtx_lock(&sc->reg_lock);
- atomic_clear_int(&sc->error_flags, HW_OFF_LIMITS);
- mtx_unlock(&sc->reg_lock);
+ set_adapter_hwstatus(sc, true);
if (sc->flags & FULL_INIT_DONE) {
rc = adapter_full_init(sc);
@@ -2365,6 +2535,11 @@ t4_resume(device_t dev)
"interface: %d\n", rc);
goto done;
}
+ if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
+ sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
+ t4_set_trace_rss_control(sc, pi->tx_chan, sc->traceq);
+ pi->flags |= HAS_TRACEQ;
+ }
ifp = vi->ifp;
if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
@@ -2428,14 +2603,37 @@ t4_resume(device_t dev)
/* Reset all calibration */
t4_calibration_start(sc);
-
done:
- if (rc == 0) {
- sc->incarnation++;
- CH_ALERT(sc, "resume completed.\n");
- }
end_synchronized_op(sc, 0);
free(old_state, M_CXGBE);
+
+ restart_atid_allocator(sc);
+ t4_restart_l2t(sc);
+
+ return (rc);
+}
+
+int
+resume_adapter(struct adapter *sc)
+{
+ restart_adapter(sc);
+ restart_lld(sc);
+#ifdef TCP_OFFLOAD
+ restart_all_uld(sc);
+#endif
+ return (0);
+}
+
+static int
+t4_resume(device_t dev)
+{
+ struct adapter *sc = device_get_softc(dev);
+ int rc;
+
+ CH_ALERT(sc, "%s from thread %p.\n", __func__, curthread);
+ rc = resume_adapter(sc);
+ CH_ALERT(sc, "%s end (thread %p).\n", __func__, curthread);
+
return (rc);
}
@@ -2444,7 +2642,7 @@ t4_reset_prepare(device_t dev, device_t child)
{
struct adapter *sc = device_get_softc(dev);
- CH_ALERT(sc, "reset_prepare.\n");
+ CH_ALERT(sc, "%s from thread %p.\n", __func__, curthread);
return (0);
}
@@ -2453,80 +2651,165 @@ t4_reset_post(device_t dev, device_t child)
{
struct adapter *sc = device_get_softc(dev);
- CH_ALERT(sc, "reset_post.\n");
+ CH_ALERT(sc, "%s from thread %p.\n", __func__, curthread);
return (0);
}
static int
-reset_adapter(struct adapter *sc)
+reset_adapter_with_pl_rst(struct adapter *sc)
{
- int rc, oldinc, error_flags;
+ /* This is a t4_write_reg without the hw_off_limits check. */
+ MPASS(sc->error_flags & HW_OFF_LIMITS);
+ bus_space_write_4(sc->bt, sc->bh, A_PL_RST,
+ F_PIORSTMODE | F_PIORST | F_AUTOPCIEPAUSE);
+ pause("pl_rst", 1 * hz); /* Wait 1s for reset */
+ return (0);
+}
- CH_ALERT(sc, "reset requested.\n");
+static int
+reset_adapter_with_pcie_sbr(struct adapter *sc)
+{
+ device_t pdev = device_get_parent(sc->dev);
+ device_t gpdev = device_get_parent(pdev);
+ device_t *children;
+ int rc, i, lcap, lsta, nchildren;
+ uint32_t v;
- rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rst1");
+ rc = pci_find_cap(gpdev, PCIY_EXPRESS, &v);
+ if (rc != 0) {
+ CH_ERR(sc, "%s: pci_find_cap(%s, pcie) failed: %d\n", __func__,
+ device_get_nameunit(gpdev), rc);
+ return (ENOTSUP);
+ }
+ lcap = v + PCIER_LINK_CAP;
+ lsta = v + PCIER_LINK_STA;
+
+ nchildren = 0;
+ device_get_children(pdev, &children, &nchildren);
+ for (i = 0; i < nchildren; i++)
+ pci_save_state(children[i]);
+ v = pci_read_config(gpdev, PCIR_BRIDGECTL_1, 2);
+ pci_write_config(gpdev, PCIR_BRIDGECTL_1, v | PCIB_BCR_SECBUS_RESET, 2);
+ pause("pcie_sbr1", hz / 10); /* 100ms */
+ pci_write_config(gpdev, PCIR_BRIDGECTL_1, v, 2);
+ pause("pcie_sbr2", hz); /* Wait 1s before restore_state. */
+ v = pci_read_config(gpdev, lsta, 2);
+ if (pci_read_config(gpdev, lcap, 2) & PCIEM_LINK_CAP_DL_ACTIVE)
+ rc = v & PCIEM_LINK_STA_DL_ACTIVE ? 0 : ETIMEDOUT;
+ else if (v & (PCIEM_LINK_STA_TRAINING_ERROR | PCIEM_LINK_STA_TRAINING))
+ rc = ETIMEDOUT;
+ else
+ rc = 0;
if (rc != 0)
- return (EBUSY);
-
- if (hw_off_limits(sc)) {
- CH_ERR(sc, "adapter is suspended, use resume (not reset).\n");
- rc = ENXIO;
- goto done;
+ CH_ERR(sc, "%s: PCIe link is down after reset, LINK_STA 0x%x\n",
+ __func__, v);
+ else {
+ for (i = 0; i < nchildren; i++)
+ pci_restore_state(children[i]);
}
+ free(children, M_TEMP);
- if (!ok_to_reset(sc)) {
- /* XXX: should list what resource is preventing reset. */
- CH_ERR(sc, "not safe to reset.\n");
- rc = EBUSY;
- goto done;
- }
+ return (rc);
+}
-done:
- oldinc = sc->incarnation;
- end_synchronized_op(sc, 0);
- if (rc != 0)
- return (rc); /* Error logged already. */
+static int
+reset_adapter_with_pcie_link_bounce(struct adapter *sc)
+{
+ device_t pdev = device_get_parent(sc->dev);
+ device_t gpdev = device_get_parent(pdev);
+ device_t *children;
+ int rc, i, lcap, lctl, lsta, nchildren;
+ uint32_t v;
- atomic_add_int(&sc->num_resets, 1);
- mtx_lock(&Giant);
- rc = BUS_RESET_CHILD(device_get_parent(sc->dev), sc->dev, 0);
- mtx_unlock(&Giant);
+ rc = pci_find_cap(gpdev, PCIY_EXPRESS, &v);
+ if (rc != 0) {
+ CH_ERR(sc, "%s: pci_find_cap(%s, pcie) failed: %d\n", __func__,
+ device_get_nameunit(gpdev), rc);
+ return (ENOTSUP);
+ }
+ lcap = v + PCIER_LINK_CAP;
+ lctl = v + PCIER_LINK_CTL;
+ lsta = v + PCIER_LINK_STA;
+
+ nchildren = 0;
+ device_get_children(pdev, &children, &nchildren);
+ for (i = 0; i < nchildren; i++)
+ pci_save_state(children[i]);
+ v = pci_read_config(gpdev, lctl, 2);
+ pci_write_config(gpdev, lctl, v | PCIEM_LINK_CTL_LINK_DIS, 2);
+ pause("pcie_lnk1", 100 * hz / 1000); /* 100ms */
+ pci_write_config(gpdev, lctl, v | PCIEM_LINK_CTL_RETRAIN_LINK, 2);
+ pause("pcie_lnk2", hz); /* Wait 1s before restore_state. */
+ v = pci_read_config(gpdev, lsta, 2);
+ if (pci_read_config(gpdev, lcap, 2) & PCIEM_LINK_CAP_DL_ACTIVE)
+ rc = v & PCIEM_LINK_STA_DL_ACTIVE ? 0 : ETIMEDOUT;
+ else if (v & (PCIEM_LINK_STA_TRAINING_ERROR | PCIEM_LINK_STA_TRAINING))
+ rc = ETIMEDOUT;
+ else
+ rc = 0;
if (rc != 0)
- CH_ERR(sc, "bus_reset_child failed: %d.\n", rc);
+ CH_ERR(sc, "%s: PCIe link is down after reset, LINK_STA 0x%x\n",
+ __func__, v);
else {
- rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4rst2");
- if (rc != 0)
- return (EBUSY);
- error_flags = atomic_load_int(&sc->error_flags);
- if (sc->incarnation > oldinc && error_flags == 0) {
- CH_ALERT(sc, "bus_reset_child succeeded.\n");
- } else {
- CH_ERR(sc, "adapter did not reset properly, flags "
- "0x%08x, error_flags 0x%08x.\n", sc->flags,
- error_flags);
- rc = ENXIO;
- }
- end_synchronized_op(sc, 0);
+ for (i = 0; i < nchildren; i++)
+ pci_restore_state(children[i]);
}
+ free(children, M_TEMP);
return (rc);
}
+static inline int
+reset_adapter(struct adapter *sc)
+{
+ int rc;
+ const int reset_method = vm_guest == VM_GUEST_NO ? t4_reset_method : 0;
+
+ rc = suspend_adapter(sc);
+ if (rc != 0)
+ return (rc);
+
+ switch (reset_method) {
+ case 1:
+ rc = reset_adapter_with_pcie_sbr(sc);
+ break;
+ case 2:
+ rc = reset_adapter_with_pcie_link_bounce(sc);
+ break;
+ case 0:
+ default:
+ rc = reset_adapter_with_pl_rst(sc);
+ break;
+ }
+ if (rc == 0)
+ rc = resume_adapter(sc);
+ return (rc);
+}
+
static void
reset_adapter_task(void *arg, int pending)
{
- /* XXX: t4_async_event here? */
- reset_adapter(arg);
+ struct adapter *sc = arg;
+ const int flags = sc->flags;
+ const int eflags = sc->error_flags;
+ int rc;
+
+ if (pending > 1)
+ CH_ALERT(sc, "%s: pending %d\n", __func__, pending);
+ rc = reset_adapter(sc);
+ if (rc != 0) {
+ CH_ERR(sc, "adapter did not reset properly, rc = %d, "
+ "flags 0x%08x -> 0x%08x, err_flags 0x%08x -> 0x%08x.\n",
+ rc, flags, sc->flags, eflags, sc->error_flags);
+ }
}
static int
cxgbe_probe(device_t dev)
{
- char buf[128];
struct port_info *pi = device_get_softc(dev);
- snprintf(buf, sizeof(buf), "port %d", pi->port_id);
- device_set_desc_copy(dev, buf);
+ device_set_descf(dev, "port %d", pi->port_id);
return (BUS_PROBE_DEFAULT);
}
@@ -2537,7 +2820,7 @@ cxgbe_probe(device_t dev)
IFCAP_HWRXTSTMP | IFCAP_MEXTPG)
#define T4_CAP_ENABLE (T4_CAP)
-static int
+static void
cxgbe_vi_attach(device_t dev, struct vi_info *vi)
{
if_t ifp;
@@ -2576,10 +2859,6 @@ cxgbe_vi_attach(device_t dev, struct vi_info *vi)
/* Allocate an ifnet and set it up */
ifp = if_alloc_dev(IFT_ETHER, dev);
- if (ifp == NULL) {
- device_printf(dev, "Cannot allocate ifnet\n");
- return (ENOMEM);
- }
vi->ifp = ifp;
if_setsoftc(ifp, vi);
@@ -2682,8 +2961,6 @@ cxgbe_vi_attach(device_t dev, struct vi_info *vi)
pa.pa_type = PFIL_TYPE_ETHERNET;
pa.pa_headname = if_name(ifp);
vi->pfil = pfil_head_register(&pa);
-
- return (0);
}
static int
@@ -2692,18 +2969,16 @@ cxgbe_attach(device_t dev)
struct port_info *pi = device_get_softc(dev);
struct adapter *sc = pi->adapter;
struct vi_info *vi;
- int i, rc;
+ int i;
sysctl_ctx_init(&pi->ctx);
- rc = cxgbe_vi_attach(dev, &pi->vi[0]);
- if (rc)
- return (rc);
+ cxgbe_vi_attach(dev, &pi->vi[0]);
for_each_vi(pi, i, vi) {
if (i == 0)
continue;
- vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, -1);
+ vi->dev = device_add_child(dev, sc->names->vi_ifnet_name, DEVICE_UNIT_ANY);
if (vi->dev == NULL) {
device_printf(dev, "failed to add VI %d\n", i);
continue;
@@ -2713,7 +2988,7 @@ cxgbe_attach(device_t dev)
cxgbe_sysctls(pi);
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -2756,7 +3031,6 @@ cxgbe_detach(device_t dev)
rc = bus_generic_detach(dev);
if (rc)
return (rc);
- device_delete_children(dev);
sysctl_ctx_free(&pi->ctx);
begin_vi_detach(sc, &pi->vi[0]);
@@ -2805,7 +3079,7 @@ cxgbe_ioctl(if_t ifp, unsigned long cmd, caddr_t data)
if_setmtu(ifp, mtu);
if (vi->flags & VI_INIT_DONE) {
t4_update_fl_bufsize(ifp);
- if (!hw_off_limits(sc) &&
+ if (hw_all_ok(sc) &&
if_getdrvflags(ifp) & IFF_DRV_RUNNING)
rc = update_mac_settings(ifp, XGMAC_MTU);
}
@@ -2817,7 +3091,7 @@ cxgbe_ioctl(if_t ifp, unsigned long cmd, caddr_t data)
if (rc)
return (rc);
- if (hw_off_limits(sc)) {
+ if (!hw_all_ok(sc)) {
rc = ENXIO;
goto fail;
}
@@ -2845,7 +3119,7 @@ cxgbe_ioctl(if_t ifp, unsigned long cmd, caddr_t data)
rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4multi");
if (rc)
return (rc);
- if (!hw_off_limits(sc) && if_getdrvflags(ifp) & IFF_DRV_RUNNING)
+ if (hw_all_ok(sc) && if_getdrvflags(ifp) & IFF_DRV_RUNNING)
rc = update_mac_settings(ifp, XGMAC_MCADDRS);
end_synchronized_op(sc, 0);
break;
@@ -3020,7 +3294,7 @@ fail:
rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4i2c");
if (rc)
return (rc);
- if (hw_off_limits(sc))
+ if (!hw_all_ok(sc))
rc = ENXIO;
else
rc = -t4_i2c_rd(sc, sc->mbox, pi->port_id, i2c.dev_addr,
@@ -3249,7 +3523,7 @@ cxgbe_snd_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
if (is_t6(vi->pi->adapter))
error = t6_tls_tag_alloc(ifp, params, pt);
else
- error = EOPNOTSUPP;
+ error = t7_tls_tag_alloc(ifp, params, pt);
break;
}
#endif
@@ -3297,7 +3571,7 @@ cxgbe_media_change(if_t ifp)
if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE)
lc->requested_fc |= PAUSE_TX;
}
- if (pi->up_vis > 0 && !hw_off_limits(sc)) {
+ if (pi->up_vis > 0 && hw_all_ok(sc)) {
fixup_link_config(pi);
rc = apply_link_config(pi);
}
@@ -3376,9 +3650,12 @@ port_mword(struct port_info *pi, uint32_t speed)
case FW_PORT_TYPE_CR_QSFP:
case FW_PORT_TYPE_CR2_QSFP:
case FW_PORT_TYPE_SFP28:
+ case FW_PORT_TYPE_SFP56:
+ case FW_PORT_TYPE_QSFP56:
/* Pluggable transceiver */
switch (pi->mod_type) {
case FW_PORT_MOD_TYPE_LR:
+ case FW_PORT_MOD_TYPE_LR_SIMPLEX:
switch (speed) {
case FW_PORT_CAP32_SPEED_1G:
return (IFM_1000_LX);
@@ -3392,6 +3669,8 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_50G_LR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_LR4);
+ case FW_PORT_CAP32_SPEED_200G:
+ return (IFM_200G_LR4);
}
break;
case FW_PORT_MOD_TYPE_SR:
@@ -3408,6 +3687,8 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_50G_SR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_SR4);
+ case FW_PORT_CAP32_SPEED_200G:
+ return (IFM_200G_SR4);
}
break;
case FW_PORT_MOD_TYPE_ER:
@@ -3429,12 +3710,20 @@ port_mword(struct port_info *pi, uint32_t speed)
return (IFM_50G_CR2);
case FW_PORT_CAP32_SPEED_100G:
return (IFM_100G_CR4);
+ case FW_PORT_CAP32_SPEED_200G:
+ return (IFM_200G_CR4_PAM4);
}
break;
case FW_PORT_MOD_TYPE_LRM:
if (speed == FW_PORT_CAP32_SPEED_10G)
return (IFM_10G_LRM);
break;
+ case FW_PORT_MOD_TYPE_DR:
+ if (speed == FW_PORT_CAP32_SPEED_100G)
+ return (IFM_100G_DR);
+ if (speed == FW_PORT_CAP32_SPEED_200G)
+ return (IFM_200G_DR4);
+ break;
case FW_PORT_MOD_TYPE_NA:
MPASS(0); /* Not pluggable? */
/* fall throough */
@@ -3465,7 +3754,7 @@ cxgbe_media_status(if_t ifp, struct ifmediareq *ifmr)
return;
PORT_LOCK(pi);
- if (pi->up_vis == 0 && !hw_off_limits(sc)) {
+ if (pi->up_vis == 0 && hw_all_ok(sc)) {
/*
* If all the interfaces are administratively down the firmware
* does not report transceiver changes. Refresh port info here
@@ -3499,12 +3788,10 @@ done:
static int
vcxgbe_probe(device_t dev)
{
- char buf[128];
struct vi_info *vi = device_get_softc(dev);
- snprintf(buf, sizeof(buf), "port %d vi %td", vi->pi->port_id,
+ device_set_descf(dev, "port %d vi %td", vi->pi->port_id,
vi - vi->pi->vi);
- device_set_desc_copy(dev, buf);
return (BUS_PROBE_DEFAULT);
}
@@ -3523,7 +3810,7 @@ alloc_extra_vi(struct adapter *sc, struct port_info *pi, struct vi_info *vi)
("%s: VI %s doesn't have a MAC func", __func__,
device_get_nameunit(vi->dev)));
func = vi_mac_funcs[index];
- rc = t4_alloc_vi_func(sc, sc->mbox, pi->tx_chan, sc->pf, 0, 1,
+ rc = t4_alloc_vi_func(sc, sc->mbox, pi->hw_port, sc->pf, 0, 1,
vi->hw_addr, &vi->rss_size, &vi->vfvld, &vi->vin, func, 0);
if (rc < 0) {
CH_ERR(vi, "failed to allocate virtual interface %d"
@@ -3579,11 +3866,8 @@ vcxgbe_attach(device_t dev)
if (rc)
return (rc);
- rc = cxgbe_vi_attach(dev, vi);
- if (rc) {
- t4_free_vi(sc, sc->mbox, sc->pf, 0, vi->viid);
- return (rc);
- }
+ cxgbe_vi_attach(dev, vi);
+
return (0);
}
@@ -3621,9 +3905,6 @@ fatal_error_task(void *arg, int pending)
struct adapter *sc = arg;
int rc;
-#ifdef TCP_OFFLOAD
- t4_async_event(sc);
-#endif
if (atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_CIM_ERR))) {
dump_cim_regs(sc);
dump_cimla(sc);
@@ -3631,7 +3912,7 @@ fatal_error_task(void *arg, int pending)
}
if (t4_reset_on_fatal_err) {
- CH_ALERT(sc, "resetting on fatal error.\n");
+ CH_ALERT(sc, "resetting adapter after fatal error.\n");
rc = reset_adapter(sc);
if (rc == 0 && t4_panic_on_fatal_err) {
CH_ALERT(sc, "reset was successful, "
@@ -3764,6 +4045,18 @@ t4_map_bar_2(struct adapter *sc)
return (0);
}
+int
+t4_adj_doorbells(struct adapter *sc)
+{
+ if ((sc->doorbells & t4_doorbells_allowed) != 0) {
+ sc->doorbells &= t4_doorbells_allowed;
+ return (0);
+ }
+ CH_ERR(sc, "No usable doorbell (available = 0x%x, allowed = 0x%x).\n",
+ sc->doorbells, t4_doorbells_allowed);
+ return (EINVAL);
+}
+
struct memwin_init {
uint32_t base;
uint32_t aperture;
@@ -3787,7 +4080,7 @@ setup_memwin(struct adapter *sc)
const struct memwin_init *mw_init;
struct memwin *mw;
int i;
- uint32_t bar0;
+ uint32_t bar0, reg;
if (is_t4(sc)) {
/*
@@ -3815,9 +4108,10 @@ setup_memwin(struct adapter *sc)
mw->mw_aperture = mw_init->aperture;
mw->mw_curpos = 0;
}
- t4_write_reg(sc,
- PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i),
- (mw->mw_base + bar0) | V_BIR(0) |
+ reg = chip_id(sc) > CHELSIO_T6 ?
+ PCIE_MEM_ACCESS_T7_REG(A_T7_PCIE_MEM_ACCESS_BASE_WIN, i) :
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, i);
+ t4_write_reg(sc, reg, (mw->mw_base + bar0) | V_BIR(0) |
V_WINDOW(ilog2(mw->mw_aperture) - 10));
rw_wlock(&mw->mw_lock);
position_memwin(sc, i, mw->mw_curpos);
@@ -3825,7 +4119,7 @@ setup_memwin(struct adapter *sc)
}
/* flush */
- t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
+ t4_read_reg(sc, reg);
}
/*
@@ -3838,8 +4132,7 @@ static void
position_memwin(struct adapter *sc, int idx, uint32_t addr)
{
struct memwin *mw;
- uint32_t pf;
- uint32_t reg;
+ uint32_t pf, reg, val;
MPASS(idx >= 0 && idx < NUM_MEMWIN);
mw = &sc->memwin[idx];
@@ -3852,8 +4145,14 @@ position_memwin(struct adapter *sc, int idx, uint32_t addr)
pf = V_PFNUM(sc->pf);
mw->mw_curpos = addr & ~0x7f; /* start must be 128B aligned */
}
- reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
- t4_write_reg(sc, reg, mw->mw_curpos | pf);
+ if (chip_id(sc) > CHELSIO_T6) {
+ reg = PCIE_MEM_ACCESS_T7_REG(A_PCIE_MEM_ACCESS_OFFSET0, idx);
+ val = (mw->mw_curpos >> X_T7_MEMOFST_SHIFT) | pf;
+ } else {
+ reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, idx);
+ val = mw->mw_curpos | pf;
+ }
+ t4_write_reg(sc, reg, val);
t4_read_reg(sc, reg); /* flush */
}
@@ -3925,6 +4224,7 @@ t4_init_atid_table(struct adapter *sc)
mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF);
t->afree = t->atid_tab;
t->atids_in_use = 0;
+ t->atid_alloc_stopped = false;
for (i = 1; i < t->natids; i++)
t->atid_tab[i - 1].next = &t->atid_tab[i];
t->atid_tab[t->natids - 1].next = NULL;
@@ -3946,6 +4246,32 @@ t4_free_atid_table(struct adapter *sc)
t->atid_tab = NULL;
}
+static void
+stop_atid_allocator(struct adapter *sc)
+{
+ struct tid_info *t = &sc->tids;
+
+ if (t->natids == 0)
+ return;
+ mtx_lock(&t->atid_lock);
+ t->atid_alloc_stopped = true;
+ mtx_unlock(&t->atid_lock);
+}
+
+static void
+restart_atid_allocator(struct adapter *sc)
+{
+ struct tid_info *t = &sc->tids;
+
+ if (t->natids == 0)
+ return;
+ mtx_lock(&t->atid_lock);
+ KASSERT(t->atids_in_use == 0,
+ ("%s: %d atids still in use.", __func__, t->atids_in_use));
+ t->atid_alloc_stopped = false;
+ mtx_unlock(&t->atid_lock);
+}
+
int
alloc_atid(struct adapter *sc, void *ctx)
{
@@ -3953,7 +4279,7 @@ alloc_atid(struct adapter *sc, void *ctx)
int atid = -1;
mtx_lock(&t->atid_lock);
- if (t->afree) {
+ if (t->afree && !t->atid_alloc_stopped) {
union aopen_entry *p = t->afree;
atid = p - t->atid_tab;
@@ -4259,8 +4585,27 @@ calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
iaq->nrxq_vi = t4_nrxq_vi;
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
if (is_offload(sc) || is_ethoffload(sc)) {
- iaq->nofldtxq = t4_nofldtxq;
- iaq->nofldtxq_vi = t4_nofldtxq_vi;
+ if (sc->params.tid_qid_sel_mask == 0) {
+ iaq->nofldtxq = t4_nofldtxq;
+ iaq->nofldtxq_vi = t4_nofldtxq_vi;
+ } else {
+ iaq->nofldtxq = roundup(t4_nofldtxq, sc->params.ncores);
+ iaq->nofldtxq_vi = roundup(t4_nofldtxq_vi,
+ sc->params.ncores);
+ if (iaq->nofldtxq != t4_nofldtxq)
+ device_printf(sc->dev,
+ "nofldtxq updated (%d -> %d) for correct"
+ " operation with %d firmware cores.\n",
+ t4_nofldtxq, iaq->nofldtxq,
+ sc->params.ncores);
+ if (iaq->num_vis > 1 &&
+ iaq->nofldtxq_vi != t4_nofldtxq_vi)
+ device_printf(sc->dev,
+ "nofldtxq_vi updated (%d -> %d) for correct"
+ " operation with %d firmware cores.\n",
+ t4_nofldtxq_vi, iaq->nofldtxq_vi,
+ sc->params.ncores);
+ }
}
#endif
#ifdef TCP_OFFLOAD
@@ -4332,9 +4677,7 @@ calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
*/
do {
if (iaq->nrxq > 1) {
- do {
- iaq->nrxq--;
- } while (!powerof2(iaq->nrxq));
+ iaq->nrxq = rounddown_pow_of_two(iaq->nrxq - 1);
if (iaq->nnmrxq > iaq->nrxq)
iaq->nnmrxq = iaq->nrxq;
}
@@ -4363,6 +4706,10 @@ calculate_iaq(struct adapter *sc, struct intrs_and_queues *iaq, int itype,
if (iaq->nofldrxq > 0) {
iaq->nofldrxq = 1;
iaq->nofldtxq = 1;
+ if (sc->params.tid_qid_sel_mask == 0)
+ iaq->nofldtxq = 1;
+ else
+ iaq->nofldtxq = sc->params.ncores;
}
iaq->nnmtxq = 0;
iaq->nnmrxq = 0;
@@ -4375,9 +4722,10 @@ done:
MPASS(iaq->nirq > 0);
MPASS(iaq->nrxq > 0);
MPASS(iaq->ntxq > 0);
- if (itype == INTR_MSI) {
+ if (itype == INTR_MSI)
MPASS(powerof2(iaq->nirq));
- }
+ if (sc->params.tid_qid_sel_mask != 0)
+ MPASS(iaq->nofldtxq % sc->params.ncores == 0);
}
static int
@@ -4519,6 +4867,22 @@ struct fw_info {
.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
.intfver_fcoe = FW_INTFVER(T6, FCOE),
},
+ }, {
+ .chip = CHELSIO_T7,
+ .kld_name = "t7fw_cfg",
+ .fw_mod_name = "t7fw",
+ .fw_h = {
+ .chip = FW_HDR_CHIP_T7,
+ .fw_ver = htobe32(FW_VERSION(T7)),
+ .intfver_nic = FW_INTFVER(T7, NIC),
+ .intfver_vnic = FW_INTFVER(T7, VNIC),
+ .intfver_ofld = FW_INTFVER(T7, OFLD),
+ .intfver_ri = FW_INTFVER(T7, RI),
+ .intfver_iscsipdu = FW_INTFVER(T7, ISCSIPDU),
+ .intfver_iscsi = FW_INTFVER(T7, ISCSI),
+ .intfver_fcoepdu = FW_INTFVER(T7, FCOEPDU),
+ .intfver_fcoe = FW_INTFVER(T7, FCOE),
+ },
}
};
@@ -4840,7 +5204,7 @@ done:
static int
copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
- uint32_t mtype, uint32_t moff)
+ uint32_t mtype, uint32_t moff, u_int maxlen)
{
struct fw_info *fw_info;
const struct firmware *dcfg, *rcfg = NULL;
@@ -4892,10 +5256,10 @@ copy_cfg_file_to_card(struct adapter *sc, char *cfg_file,
cflen = rcfg->datasize & ~3;
}
- if (cflen > FLASH_CFG_MAX_SIZE) {
+ if (cflen > maxlen) {
device_printf(sc->dev,
"config file too long (%d, max allowed is %d).\n",
- cflen, FLASH_CFG_MAX_SIZE);
+ cflen, maxlen);
rc = EINVAL;
goto done;
}
@@ -4920,6 +5284,7 @@ struct caps_allowed {
uint16_t nbmcaps;
uint16_t linkcaps;
uint16_t switchcaps;
+ uint16_t nvmecaps;
uint16_t niccaps;
uint16_t toecaps;
uint16_t rdmacaps;
@@ -4947,6 +5312,8 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
int rc;
struct fw_caps_config_cmd caps;
uint32_t mtype, moff, finicsum, cfcsum, param, val;
+ unsigned int maxlen = 0;
+ const int cfg_addr = t4_flash_cfg_addr(sc, &maxlen);
rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
if (rc != 0) {
@@ -4963,7 +5330,7 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
caps.cfvalid_to_len16 = htobe32(FW_LEN16(caps));
} else if (strncmp(cfg_file, FLASH_CF, sizeof(t4_cfg_file)) == 0) {
mtype = FW_MEMTYPE_FLASH;
- moff = t4_flash_cfg_addr(sc);
+ moff = cfg_addr;
caps.cfvalid_to_len16 = htobe32(F_FW_CAPS_CONFIG_CMD_CFVALID |
V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
@@ -4987,7 +5354,7 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(moff >> 16) |
FW_LEN16(caps));
- rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff);
+ rc = copy_cfg_file_to_card(sc, cfg_file, mtype, moff, maxlen);
if (rc != 0) {
device_printf(sc->dev,
"failed to upload config file to card: %d.\n", rc);
@@ -5021,6 +5388,7 @@ apply_cfg_and_initialize(struct adapter *sc, char *cfg_file,
LIMIT_CAPS(nbm);
LIMIT_CAPS(link);
LIMIT_CAPS(switch);
+ LIMIT_CAPS(nvme);
LIMIT_CAPS(nic);
LIMIT_CAPS(toe);
LIMIT_CAPS(rdma);
@@ -5086,6 +5454,7 @@ partition_resources(struct adapter *sc)
COPY_CAPS(nbm);
COPY_CAPS(link);
COPY_CAPS(switch);
+ COPY_CAPS(nvme);
COPY_CAPS(nic);
COPY_CAPS(toe);
COPY_CAPS(rdma);
@@ -5162,7 +5531,7 @@ get_params__pre_init(struct adapter *sc)
sc->params.vpd.cclk = val[1];
/* Read device log parameters. */
- rc = -t4_init_devlog_params(sc, 1);
+ rc = -t4_init_devlog_ncores_params(sc, 1);
if (rc == 0)
fixup_devlog_params(sc);
else {
@@ -5258,9 +5627,9 @@ get_params__post_init(struct adapter *sc)
}
sc->vres.l2t.start = val[4];
sc->vres.l2t.size = val[5] - val[4] + 1;
- KASSERT(sc->vres.l2t.size <= L2T_SIZE,
- ("%s: L2 table size (%u) larger than expected (%u)",
- __func__, sc->vres.l2t.size, L2T_SIZE));
+ /* val[5] is the last hwidx and it must not collide with F_SYNC_WR */
+ if (sc->vres.l2t.size > 0)
+ MPASS(fls(val[5]) <= S_SYNC_WR);
sc->params.core_vdd = val[6];
param[0] = FW_PARAM_PFVF(IQFLINT_END);
@@ -5316,6 +5685,14 @@ get_params__post_init(struct adapter *sc)
}
}
+ if (sc->params.ncores > 1) {
+ MPASS(chip_id(sc) >= CHELSIO_T7);
+
+ param[0] = FW_PARAM_DEV(TID_QID_SEL_MASK);
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
+ sc->params.tid_qid_sel_mask = rc == 0 ? val[0] : 0;
+ }
+
/*
* The parameters that follow may not be available on all firmwares. We
* query them individually rather than in a compound query because old
@@ -5341,6 +5718,14 @@ get_params__post_init(struct adapter *sc)
else
sc->params.tp_ch_map = UINT32_MAX; /* Not a legal value. */
+ param[0] = FW_PARAM_DEV(TX_TPCHMAP);
+ val[0] = 0;
+ rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, param, val);
+ if (rc == 0)
+ sc->params.tx_tp_ch_map = val[0];
+ else
+ sc->params.tx_tp_ch_map = UINT32_MAX; /* Not a legal value. */
+
/*
* Determine whether the firmware supports the filter2 work request.
*/
@@ -5412,6 +5797,7 @@ get_params__post_init(struct adapter *sc)
READ_CAPS(nbmcaps);
READ_CAPS(linkcaps);
READ_CAPS(switchcaps);
+ READ_CAPS(nvmecaps);
READ_CAPS(niccaps);
READ_CAPS(toecaps);
READ_CAPS(rdmacaps);
@@ -5754,9 +6140,13 @@ set_params__post_init(struct adapter *sc)
#ifdef KERN_TLS
if (is_ktls(sc)) {
sc->tlst.inline_keys = t4_tls_inline_keys;
- sc->tlst.combo_wrs = t4_tls_combo_wrs;
- if (t4_kern_tls != 0 && is_t6(sc))
+ if (t4_kern_tls != 0 && is_t6(sc)) {
+ sc->tlst.combo_wrs = t4_tls_combo_wrs;
t6_config_kern_tls(sc, true);
+ } else {
+ sc->tlst.short_records = t4_tls_short_records;
+ sc->tlst.partial_ghash = t4_tls_partial_ghash;
+ }
}
#endif
return (0);
@@ -5768,12 +6158,9 @@ set_params__post_init(struct adapter *sc)
static void
t4_set_desc(struct adapter *sc)
{
- char buf[128];
struct adapter_params *p = &sc->params;
- snprintf(buf, sizeof(buf), "Chelsio %s", p->vpd.id);
-
- device_set_desc_copy(sc->dev, buf);
+ device_set_descf(sc->dev, "Chelsio %s", p->vpd.id);
}
static inline void
@@ -6030,25 +6417,27 @@ apply_link_config(struct port_info *pi)
if (lc->requested_fec & FEC_BASER_RS)
MPASS(lc->pcaps & FW_PORT_CAP32_FEC_BASER_RS);
#endif
- rc = -t4_link_l1cfg(sc, sc->mbox, pi->tx_chan, lc);
- if (rc != 0) {
- /* Don't complain if the VF driver gets back an EPERM. */
- if (!(sc->flags & IS_VF) || rc != FW_EPERM)
+ if (!(sc->flags & IS_VF)) {
+ rc = -t4_link_l1cfg(sc, sc->mbox, pi->hw_port, lc);
+ if (rc != 0) {
device_printf(pi->dev, "l1cfg failed: %d\n", rc);
- } else {
- /*
- * An L1_CFG will almost always result in a link-change event if
- * the link is up, and the driver will refresh the actual
- * fec/fc/etc. when the notification is processed. If the link
- * is down then the actual settings are meaningless.
- *
- * This takes care of the case where a change in the L1 settings
- * may not result in a notification.
- */
- if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG))
- lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX);
+ return (rc);
+ }
}
- return (rc);
+
+ /*
+ * An L1_CFG will almost always result in a link-change event if the
+ * link is up, and the driver will refresh the actual fec/fc/etc. when
+ * the notification is processed. If the link is down then the actual
+ * settings are meaningless.
+ *
+ * This takes care of the case where a change in the L1 settings may not
+ * result in a notification.
+ */
+ if (lc->link_ok && !(lc->requested_fc & PAUSE_AUTONEG))
+ lc->fc = lc->requested_fc & (PAUSE_TX | PAUSE_RX);
+
+ return (0);
}
#define FW_MAC_EXACT_CHUNK 7
@@ -6237,20 +6626,13 @@ int
begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
char *wmesg)
{
- int rc, pri;
+ int rc;
#ifdef WITNESS
/* the caller thinks it's ok to sleep, but is it really? */
if (flags & SLEEP_OK)
- WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
- "begin_synchronized_op");
+ WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
#endif
-
- if (INTR_OK)
- pri = PCATCH;
- else
- pri = 0;
-
ADAPTER_LOCK(sc);
for (;;) {
@@ -6269,7 +6651,8 @@ begin_synchronized_op(struct adapter *sc, struct vi_info *vi, int flags,
goto done;
}
- if (mtx_sleep(&sc->flags, &sc->sc_lock, pri, wmesg, 0)) {
+ if (mtx_sleep(&sc->flags, &sc->sc_lock,
+ flags & INTR_OK ? PCATCH : 0, wmesg, 0)) {
rc = EINTR;
goto done;
}
@@ -6396,9 +6779,7 @@ cxgbe_init_synchronized(struct vi_info *vi)
*/
if (sc->traceq < 0 && IS_MAIN_VI(vi)) {
sc->traceq = sc->sge.rxq[vi->first_rxq].iq.abs_id;
- t4_write_reg(sc, is_t4(sc) ? A_MPS_TRC_RSS_CONTROL :
- A_MPS_T5_TRC_RSS_CONTROL, V_RSSCONTROL(pi->tx_chan) |
- V_QUEUENUMBER(sc->traceq));
+ t4_set_trace_rss_control(sc, pi->tx_chan, sc->traceq);
pi->flags |= HAS_TRACEQ;
}
@@ -7001,8 +7382,22 @@ quiesce_txq(struct sge_txq *txq)
static void
quiesce_wrq(struct sge_wrq *wrq)
{
+ struct wrqe *wr;
- /* XXXTX */
+ TXQ_LOCK(wrq);
+ while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL) {
+ STAILQ_REMOVE_HEAD(&wrq->wr_list, link);
+#ifdef INVARIANTS
+ wrq->nwr_pending--;
+ wrq->ndesc_needed -= howmany(wr->wr_len, EQ_ESIZE);
+#endif
+ free(wr, M_CXGBE);
+ }
+ MPASS(wrq->nwr_pending == 0);
+ MPASS(wrq->ndesc_needed == 0);
+ wrq->nwr_pending = 0;
+ wrq->ndesc_needed = 0;
+ TXQ_UNLOCK(wrq);
}
static void
@@ -7244,7 +7639,7 @@ cxgbe_refresh_stats(struct vi_info *vi)
pi = vi->pi;
sc = vi->adapter;
tnl_cong_drops = 0;
- t4_get_port_stats(sc, pi->port_id, &pi->stats);
+ t4_get_port_stats(sc, pi->hw_port, &pi->stats);
chan_map = pi->rx_e_chan_map;
while (chan_map) {
i = ffs(chan_map) - 1;
@@ -7282,6 +7677,150 @@ vi_tick(void *arg)
callout_schedule(&vi->tick, hz);
}
+/* CIM inbound queues */
+static const char *t4_ibq[CIM_NUM_IBQ] = {
+ "ibq_tp0", "ibq_tp1", "ibq_ulp", "ibq_sge0", "ibq_sge1", "ibq_ncsi"
+};
+static const char *t7_ibq[CIM_NUM_IBQ_T7] = {
+ "ibq_tp0", "ibq_tp1", "ibq_tp2", "ibq_tp3", "ibq_ulp", "ibq_sge0",
+ "ibq_sge1", "ibq_ncsi", NULL, "ibq_ipc1", "ibq_ipc2", "ibq_ipc3",
+ "ibq_ipc4", "ibq_ipc5", "ibq_ipc6", "ibq_ipc7"
+};
+static const char *t7_ibq_sec[] = {
+ "ibq_tp0", "ibq_tp1", "ibq_tp2", "ibq_tp3", "ibq_ulp", "ibq_sge0",
+ NULL, NULL, NULL, "ibq_ipc0"
+};
+
+/* CIM outbound queues */
+static const char *t4_obq[CIM_NUM_OBQ_T5] = {
+ "obq_ulp0", "obq_ulp1", "obq_ulp2", "obq_ulp3", "obq_sge", "obq_ncsi",
+ "obq_sge_rx_q0", "obq_sge_rx_q1" /* These two are T5/T6 only */
+};
+static const char *t7_obq[CIM_NUM_OBQ_T7] = {
+ "obq_ulp0", "obq_ulp1", "obq_ulp2", "obq_ulp3", "obq_sge", "obq_ncsi",
+ "obq_sge_rx_q0", NULL, NULL, "obq_ipc1", "obq_ipc2", "obq_ipc3",
+ "obq_ipc4", "obq_ipc5", "obq_ipc6", "obq_ipc7"
+};
+static const char *t7_obq_sec[] = {
+ "obq_ulp0", "obq_ulp1", "obq_ulp2", "obq_ulp3", "obq_sge", NULL,
+ "obq_sge_rx_q0", NULL, NULL, "obq_ipc0"
+};
+
+static void
+cim_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *c0)
+{
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *children1;
+ int i, j, qcount;
+ char s[16];
+ const char **qname;
+
+ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, "cim",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CIM block");
+ c0 = SYSCTL_CHILDREN(oid);
+
+ SYSCTL_ADD_U8(ctx, c0, OID_AUTO, "ncores", CTLFLAG_RD, NULL,
+ sc->params.ncores, "# of active CIM cores");
+
+ for (i = 0; i < sc->params.ncores; i++) {
+ snprintf(s, sizeof(s), "%u", i);
+ oid = SYSCTL_ADD_NODE(ctx, c0, OID_AUTO, s,
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "CIM core");
+ children1 = SYSCTL_CHILDREN(oid);
+
+ /*
+ * CTLFLAG_SKIP because the misc.devlog sysctl already displays
+ * the log for all cores. Use this sysctl to get the log for a
+ * particular core only.
+ */
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "devlog",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE | CTLFLAG_SKIP,
+ sc, i, sysctl_devlog, "A", "firmware's device log");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "loadavg",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_loadavg, "A",
+ "microprocessor load averages (select firmwares only)");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "qcfg",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ chip_id(sc) > CHELSIO_T6 ? sysctl_cim_qcfg_t7 : sysctl_cim_qcfg,
+ "A", "Queue configuration");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "la",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_cim_la, "A", "Logic analyzer");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "ma_la",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
+
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, "pif_la",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, i,
+ sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
+
+ /* IBQs */
+ switch (chip_id(sc)) {
+ case CHELSIO_T4:
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ qname = &t4_ibq[0];
+ qcount = nitems(t4_ibq);
+ break;
+ case CHELSIO_T7:
+ default:
+ if (i == 0) {
+ qname = &t7_ibq[0];
+ qcount = nitems(t7_ibq);
+ } else {
+ qname = &t7_ibq_sec[0];
+ qcount = nitems(t7_ibq_sec);
+ }
+ break;
+ }
+ MPASS(qcount <= sc->chip_params->cim_num_ibq);
+ for (j = 0; j < qcount; j++) {
+ if (qname[j] == NULL)
+ continue;
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, qname[j],
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
+ (i << 16) | j, sysctl_cim_ibq, "A", NULL);
+ }
+
+ /* OBQs */
+ switch (chip_id(sc)) {
+ case CHELSIO_T4:
+ qname = t4_obq;
+ qcount = CIM_NUM_OBQ;
+ break;
+ case CHELSIO_T5:
+ case CHELSIO_T6:
+ qname = t4_obq;
+ qcount = nitems(t4_obq);
+ break;
+ case CHELSIO_T7:
+ default:
+ if (i == 0) {
+ qname = t7_obq;
+ qcount = nitems(t7_obq);
+ } else {
+ qname = t7_obq_sec;
+ qcount = nitems(t7_obq_sec);
+ }
+ break;
+ }
+ MPASS(qcount <= sc->chip_params->cim_num_obq);
+ for (j = 0; j < qcount; j++) {
+ if (qname[j] == NULL)
+ continue;
+ SYSCTL_ADD_PROC(ctx, children1, OID_AUTO, qname[j],
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
+ (i << 16) | j, sysctl_cim_obq, "A", NULL);
+ }
+ }
+}
+
/*
* Should match fw_caps_config_<foo> enums in t4fw_interface.h
*/
@@ -7291,17 +7830,18 @@ static char *caps_decoder[] = {
"\20\001INGRESS\002EGRESS", /* 2: switch */
"\20\001NIC\002VM\003IDS\004UM\005UM_ISGL" /* 3: NIC */
"\006HASHFILTER\007ETHOFLD",
- "\20\001TOE", /* 4: TOE */
- "\20\001RDDP\002RDMAC", /* 5: RDMA */
+ "\20\001TOE\002SENDPATH", /* 4: TOE */
+ "\20\001RDDP\002RDMAC\003ROCEv2", /* 5: RDMA */
"\20\001INITIATOR_PDU\002TARGET_PDU" /* 6: iSCSI */
"\003INITIATOR_CNXOFLD\004TARGET_CNXOFLD"
"\005INITIATOR_SSNOFLD\006TARGET_SSNOFLD"
"\007T10DIF"
"\010INITIATOR_CMDOFLD\011TARGET_CMDOFLD",
"\20\001LOOKASIDE\002TLSKEYS\003IPSEC_INLINE" /* 7: Crypto */
- "\004TLS_HW",
+ "\004TLS_HW,\005TOE_IPSEC",
"\20\001INITIATOR\002TARGET\003CTRL_OFLD" /* 8: FCoE */
"\004PO_INITIATOR\005PO_TARGET",
+ "\20\001NVMe_TCP", /* 9: NVMe */
};
void
@@ -7406,6 +7946,7 @@ t4_sysctls(struct adapter *sc)
SYSCTL_CAP(nbmcaps, 0, "NBM");
SYSCTL_CAP(linkcaps, 1, "link");
SYSCTL_CAP(switchcaps, 2, "switch");
+ SYSCTL_CAP(nvmecaps, 9, "NVMe");
SYSCTL_CAP(niccaps, 3, "NIC");
SYSCTL_CAP(toecaps, 4, "TCP offload");
SYSCTL_CAP(rdmacaps, 5, "RDMA");
@@ -7424,11 +7965,6 @@ t4_sysctls(struct adapter *sc)
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
sysctl_reset_sensor, "I", "reset the chip's temperature sensor.");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "loadavg",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_loadavg, "A",
- "microprocessor load averages (debug firmwares only)");
-
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "core_vdd",
CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0, sysctl_vdd,
"I", "core Vdd (in mV)");
@@ -7460,81 +7996,7 @@ t4_sysctls(struct adapter *sc)
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
sysctl_cctrl, "A", "congestion control");
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp0",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 0 (TP0)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_tp1",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 1,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 1 (TP1)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ulp",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 2,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 2 (ULP)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge0",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 3,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 3 (SGE0)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_sge1",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 4,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 4 (SGE1)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ibq_ncsi",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 5,
- sysctl_cim_ibq_obq, "A", "CIM IBQ 5 (NCSI)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_la",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_la, "A", "CIM logic analyzer");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_ma_la",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_ma_la, "A", "CIM MA logic analyzer");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp0",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 0 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 0 (ULP0)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp1",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 1 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 1 (ULP1)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp2",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 2 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 2 (ULP2)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ulp3",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 3 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 3 (ULP3)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 4 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 4 (SGE)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_ncsi",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 5 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A", "CIM OBQ 5 (NCSI)");
-
- if (chip_id(sc) > CHELSIO_T4) {
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge0_rx",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 6 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
- "CIM OBQ 6 (SGE0-RX)");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_obq_sge1_rx",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc,
- 7 + CIM_NUM_IBQ, sysctl_cim_ibq_obq, "A",
- "CIM OBQ 7 (SGE1-RX)");
- }
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_pif_la",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_pif_la, "A", "CIM PIF logic analyzer");
-
- SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cim_qcfg",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_cim_qcfg, "A", "CIM queue configuration");
+ cim_sysctls(sc, ctx, children);
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cpl_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
@@ -7549,8 +8011,8 @@ t4_sysctls(struct adapter *sc)
sysctl_tid_stats, "A", "tid stats");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "devlog",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- sysctl_devlog, "A", "firmware's device log");
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, -1,
+ sysctl_devlog, "A", "firmware's device log (all cores)");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "fcoe_stats",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
@@ -7584,7 +8046,8 @@ t4_sysctls(struct adapter *sc)
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mps_tcam",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
- chip_id(sc) <= CHELSIO_T5 ? sysctl_mps_tcam : sysctl_mps_tcam_t6,
+ chip_id(sc) >= CHELSIO_T7 ? sysctl_mps_tcam_t7 :
+ (chip_id(sc) >= CHELSIO_T6 ? sysctl_mps_tcam_t6 : sysctl_mps_tcam),
"A", "MPS TCAM entries");
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "path_mtus",
@@ -7656,6 +8119,14 @@ t4_sysctls(struct adapter *sc)
CTLFLAG_RW, &sc->tlst.combo_wrs, 0, "Attempt to "
"combine TCB field updates with TLS record work "
"requests.");
+ else {
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "short_records",
+ CTLFLAG_RW, &sc->tlst.short_records, 0,
+ "Use cipher-only mode for short records.");
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "partial_ghash",
+ CTLFLAG_RW, &sc->tlst.partial_ghash, 0,
+ "Use partial GHASH for AES-GCM records.");
+ }
}
#endif
@@ -7691,7 +8162,7 @@ t4_sysctls(struct adapter *sc)
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_coalesce",
CTLFLAG_RW, &sc->tt.rx_coalesce, 0, "receive coalescing");
- sc->tt.tls = 0;
+ sc->tt.tls = 1;
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tls", CTLTYPE_INT |
CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, sysctl_tls, "I",
"Inline TLS allowed");
@@ -8031,86 +8502,112 @@ cxgbe_sysctls(struct port_info *pi)
&pi->tx_parse_error, 0,
"# of tx packets with invalid length or # of segments");
-#define T4_REGSTAT(name, stat, desc) \
- SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
- CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
- t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_##stat##_L), \
- sysctl_handle_t4_reg64, "QU", desc)
-
-/* We get these from port_stats and they may be stale by up to 1s */
-#define T4_PORTSTAT(name, desc) \
- SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, #name, CTLFLAG_RD, \
- &pi->stats.name, desc)
-
- T4_REGSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames");
- T4_REGSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames");
- T4_REGSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames");
- T4_REGSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames");
- T4_REGSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames");
- T4_REGSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames");
- T4_REGSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range");
- T4_REGSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range");
- T4_REGSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames");
- T4_REGSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted");
- T4_REGSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted");
- T4_REGSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted");
- T4_REGSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted");
- T4_REGSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted");
- T4_REGSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted");
- T4_REGSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted");
- T4_REGSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted");
- T4_REGSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted");
-
- T4_REGSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames");
- T4_REGSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames");
- T4_REGSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames");
- T4_REGSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames");
- T4_REGSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames");
- T4_REGSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU");
- T4_REGSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames");
+#define T4_LBSTAT(name, stat, desc) do { \
+ if (sc->params.tp.lb_mode) { \
+ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, pi, \
+ A_MPS_PORT_STAT_##stat##_L, \
+ sysctl_handle_t4_portstat64, "QU", desc); \
+ } else { \
+ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
+ t4_port_reg(sc, pi->tx_chan, A_MPS_PORT_STAT_##stat##_L), \
+ sysctl_handle_t4_reg64, "QU", desc); \
+ } \
+} while (0)
+
+ T4_LBSTAT(tx_octets, TX_PORT_BYTES, "# of octets in good frames");
+ T4_LBSTAT(tx_frames, TX_PORT_FRAMES, "total # of good frames");
+ T4_LBSTAT(tx_bcast_frames, TX_PORT_BCAST, "# of broadcast frames");
+ T4_LBSTAT(tx_mcast_frames, TX_PORT_MCAST, "# of multicast frames");
+ T4_LBSTAT(tx_ucast_frames, TX_PORT_UCAST, "# of unicast frames");
+ T4_LBSTAT(tx_error_frames, TX_PORT_ERROR, "# of error frames");
+ T4_LBSTAT(tx_frames_64, TX_PORT_64B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_65_127, TX_PORT_65B_127B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_128_255, TX_PORT_128B_255B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_256_511, TX_PORT_256B_511B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_512_1023, TX_PORT_512B_1023B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_1024_1518, TX_PORT_1024B_1518B, "# of tx frames in this range");
+ T4_LBSTAT(tx_frames_1519_max, TX_PORT_1519B_MAX, "# of tx frames in this range");
+ T4_LBSTAT(tx_drop, TX_PORT_DROP, "# of dropped tx frames");
+ T4_LBSTAT(tx_pause, TX_PORT_PAUSE, "# of pause frames transmitted");
+ T4_LBSTAT(tx_ppp0, TX_PORT_PPP0, "# of PPP prio 0 frames transmitted");
+ T4_LBSTAT(tx_ppp1, TX_PORT_PPP1, "# of PPP prio 1 frames transmitted");
+ T4_LBSTAT(tx_ppp2, TX_PORT_PPP2, "# of PPP prio 2 frames transmitted");
+ T4_LBSTAT(tx_ppp3, TX_PORT_PPP3, "# of PPP prio 3 frames transmitted");
+ T4_LBSTAT(tx_ppp4, TX_PORT_PPP4, "# of PPP prio 4 frames transmitted");
+ T4_LBSTAT(tx_ppp5, TX_PORT_PPP5, "# of PPP prio 5 frames transmitted");
+ T4_LBSTAT(tx_ppp6, TX_PORT_PPP6, "# of PPP prio 6 frames transmitted");
+ T4_LBSTAT(tx_ppp7, TX_PORT_PPP7, "# of PPP prio 7 frames transmitted");
+
+ T4_LBSTAT(rx_octets, RX_PORT_BYTES, "# of octets in good frames");
+ T4_LBSTAT(rx_frames, RX_PORT_FRAMES, "total # of good frames");
+ T4_LBSTAT(rx_bcast_frames, RX_PORT_BCAST, "# of broadcast frames");
+ T4_LBSTAT(rx_mcast_frames, RX_PORT_MCAST, "# of multicast frames");
+ T4_LBSTAT(rx_ucast_frames, RX_PORT_UCAST, "# of unicast frames");
+ T4_LBSTAT(rx_too_long, RX_PORT_MTU_ERROR, "# of frames exceeding MTU");
+ T4_LBSTAT(rx_jabber, RX_PORT_MTU_CRC_ERROR, "# of jabber frames");
if (is_t6(sc)) {
- T4_PORTSTAT(rx_fcs_err,
+ /* Read from port_stats and may be stale by up to 1s */
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "rx_fcs_err",
+ CTLFLAG_RD, &pi->stats.rx_fcs_err,
"# of frames received with bad FCS since last link up");
} else {
- T4_REGSTAT(rx_fcs_err, RX_PORT_CRC_ERROR,
+ T4_LBSTAT(rx_fcs_err, RX_PORT_CRC_ERROR,
"# of frames received with bad FCS");
}
- T4_REGSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error");
- T4_REGSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors");
- T4_REGSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received");
- T4_REGSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range");
- T4_REGSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range");
- T4_REGSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received");
- T4_REGSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received");
- T4_REGSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received");
- T4_REGSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received");
- T4_REGSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received");
- T4_REGSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received");
- T4_REGSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received");
- T4_REGSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received");
- T4_REGSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received");
-
- T4_PORTSTAT(rx_ovflow0, "# drops due to buffer-group 0 overflows");
- T4_PORTSTAT(rx_ovflow1, "# drops due to buffer-group 1 overflows");
- T4_PORTSTAT(rx_ovflow2, "# drops due to buffer-group 2 overflows");
- T4_PORTSTAT(rx_ovflow3, "# drops due to buffer-group 3 overflows");
- T4_PORTSTAT(rx_trunc0, "# of buffer-group 0 truncated packets");
- T4_PORTSTAT(rx_trunc1, "# of buffer-group 1 truncated packets");
- T4_PORTSTAT(rx_trunc2, "# of buffer-group 2 truncated packets");
- T4_PORTSTAT(rx_trunc3, "# of buffer-group 3 truncated packets");
+ T4_LBSTAT(rx_len_err, RX_PORT_LEN_ERROR, "# of frames received with length error");
+ T4_LBSTAT(rx_symbol_err, RX_PORT_SYM_ERROR, "symbol errors");
+ T4_LBSTAT(rx_runt, RX_PORT_LESS_64B, "# of short frames received");
+ T4_LBSTAT(rx_frames_64, RX_PORT_64B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_65_127, RX_PORT_65B_127B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_128_255, RX_PORT_128B_255B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_256_511, RX_PORT_256B_511B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_512_1023, RX_PORT_512B_1023B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_1024_1518, RX_PORT_1024B_1518B, "# of rx frames in this range");
+ T4_LBSTAT(rx_frames_1519_max, RX_PORT_1519B_MAX, "# of rx frames in this range");
+ T4_LBSTAT(rx_pause, RX_PORT_PAUSE, "# of pause frames received");
+ T4_LBSTAT(rx_ppp0, RX_PORT_PPP0, "# of PPP prio 0 frames received");
+ T4_LBSTAT(rx_ppp1, RX_PORT_PPP1, "# of PPP prio 1 frames received");
+ T4_LBSTAT(rx_ppp2, RX_PORT_PPP2, "# of PPP prio 2 frames received");
+ T4_LBSTAT(rx_ppp3, RX_PORT_PPP3, "# of PPP prio 3 frames received");
+ T4_LBSTAT(rx_ppp4, RX_PORT_PPP4, "# of PPP prio 4 frames received");
+ T4_LBSTAT(rx_ppp5, RX_PORT_PPP5, "# of PPP prio 5 frames received");
+ T4_LBSTAT(rx_ppp6, RX_PORT_PPP6, "# of PPP prio 6 frames received");
+ T4_LBSTAT(rx_ppp7, RX_PORT_PPP7, "# of PPP prio 7 frames received");
+#undef T4_LBSTAT
+
+#define T4_REGSTAT(name, stat, desc) do { \
+ SYSCTL_ADD_OID(ctx, children, OID_AUTO, #name, \
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, \
+ A_MPS_STAT_##stat##_L, sysctl_handle_t4_reg64, "QU", desc); \
+} while (0)
+ if (pi->mps_bg_map & 1) {
+ T4_REGSTAT(rx_ovflow0, RX_BG_0_MAC_DROP_FRAME,
+ "# drops due to buffer-group 0 overflows");
+ T4_REGSTAT(rx_trunc0, RX_BG_0_MAC_TRUNC_FRAME,
+ "# of buffer-group 0 truncated packets");
+ }
+ if (pi->mps_bg_map & 2) {
+ T4_REGSTAT(rx_ovflow1, RX_BG_1_MAC_DROP_FRAME,
+ "# drops due to buffer-group 1 overflows");
+ T4_REGSTAT(rx_trunc1, RX_BG_1_MAC_TRUNC_FRAME,
+ "# of buffer-group 1 truncated packets");
+ }
+ if (pi->mps_bg_map & 4) {
+ T4_REGSTAT(rx_ovflow2, RX_BG_2_MAC_DROP_FRAME,
+ "# drops due to buffer-group 2 overflows");
+ T4_REGSTAT(rx_trunc2, RX_BG_2_MAC_TRUNC_FRAME,
+ "# of buffer-group 2 truncated packets");
+ }
+ if (pi->mps_bg_map & 8) {
+ T4_REGSTAT(rx_ovflow3, RX_BG_3_MAC_DROP_FRAME,
+ "# drops due to buffer-group 3 overflows");
+ T4_REGSTAT(rx_trunc3, RX_BG_3_MAC_TRUNC_FRAME,
+ "# of buffer-group 3 truncated packets");
+ }
#undef T4_REGSTAT
-#undef T4_PORTSTAT
}
static int
@@ -8137,10 +8634,6 @@ sysctl_bitfield_8b(SYSCTL_HANDLER_ARGS)
int rc;
struct sbuf *sb;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return(rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
@@ -8158,10 +8651,6 @@ sysctl_bitfield_16b(SYSCTL_HANDLER_ARGS)
int rc;
struct sbuf *sb;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return(rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
@@ -8185,7 +8674,7 @@ sysctl_btphy(SYSCTL_HANDLER_ARGS)
rc = begin_synchronized_op(sc, &pi->vi[0], SLEEP_OK | INTR_OK, "t4btt");
if (rc)
return (rc);
- if (hw_off_limits(sc))
+ if (!hw_all_ok(sc))
rc = ENXIO;
else {
/* XXX: magic numbers */
@@ -8242,7 +8731,7 @@ sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS)
"t4txvm");
if (rc)
return (rc);
- if (hw_off_limits(sc))
+ if (!hw_all_ok(sc))
rc = ENXIO;
else if (if_getdrvflags(vi->ifp) & IFF_DRV_RUNNING) {
/*
@@ -8261,14 +8750,14 @@ sysctl_tx_vm_wr(SYSCTL_HANDLER_ARGS)
vi->flags |= TX_USES_VM_WR;
if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_VM_TSO);
ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan));
+ V_TXPKT_INTF(pi->hw_port));
if (!(sc->flags & IS_VF))
npkt--;
} else {
vi->flags &= ~TX_USES_VM_WR;
if_sethwtsomaxsegcount(vi->ifp, TX_SGL_SEGS_TSO);
ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
}
for_each_txq(vi, i, txq) {
@@ -8419,10 +8908,6 @@ sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
struct sbuf *sb;
static char *bits = "\20\1RX\2TX\3AUTO";
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return(rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
@@ -8460,7 +8945,7 @@ sysctl_pause_settings(SYSCTL_HANDLER_ARGS)
"t4PAUSE");
if (rc)
return (rc);
- if (!hw_off_limits(sc)) {
+ if (hw_all_ok(sc)) {
PORT_LOCK(pi);
lc->requested_fc = n;
fixup_link_config(pi);
@@ -8482,17 +8967,12 @@ sysctl_link_fec(SYSCTL_HANDLER_ARGS)
struct link_config *lc = &pi->link_cfg;
int rc;
struct sbuf *sb;
- static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD1\5RSVD2";
-
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return(rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
if (lc->link_ok)
- sbuf_printf(sb, "%b", lc->fec, bits);
+ sbuf_printf(sb, "%b", lc->fec, t4_fec_bits);
else
sbuf_printf(sb, "no link");
rc = sbuf_finish(sb);
@@ -8512,18 +8992,12 @@ sysctl_requested_fec(SYSCTL_HANDLER_ARGS)
if (req->newptr == NULL) {
struct sbuf *sb;
- static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2"
- "\5RSVD3\6auto\7module";
-
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return(rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
return (ENOMEM);
- sbuf_printf(sb, "%b", lc->requested_fec, bits);
+ sbuf_printf(sb, "%b", lc->requested_fec, t4_fec_bits);
rc = sbuf_finish(sb);
sbuf_delete(sb);
} else {
@@ -8564,7 +9038,7 @@ sysctl_requested_fec(SYSCTL_HANDLER_ARGS)
lc->requested_fec = n & (M_FW_PORT_CAP32_FEC |
FEC_MODULE);
}
- if (!hw_off_limits(sc)) {
+ if (hw_all_ok(sc)) {
fixup_link_config(pi);
if (pi->up_vis > 0) {
rc = apply_link_config(pi);
@@ -8592,11 +9066,6 @@ sysctl_module_fec(SYSCTL_HANDLER_ARGS)
int rc;
int8_t fec;
struct sbuf *sb;
- static char *bits = "\20\1RS-FEC\2FC-FEC\3NO-FEC\4RSVD2\5RSVD3";
-
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (sb == NULL)
@@ -8606,7 +9075,7 @@ sysctl_module_fec(SYSCTL_HANDLER_ARGS)
rc = EBUSY;
goto done;
}
- if (hw_off_limits(sc)) {
+ if (!hw_all_ok(sc)) {
rc = ENXIO;
goto done;
}
@@ -8624,14 +9093,15 @@ sysctl_module_fec(SYSCTL_HANDLER_ARGS)
fec = lc->fec_hint;
if (pi->mod_type == FW_PORT_MOD_TYPE_NONE ||
!fec_supported(lc->pcaps)) {
+ PORT_UNLOCK(pi);
sbuf_printf(sb, "n/a");
} else {
if (fec == 0)
fec = FEC_NONE;
- sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, bits);
+ PORT_UNLOCK(pi);
+ sbuf_printf(sb, "%b", fec & M_FW_PORT_CAP32_FEC, t4_fec_bits);
}
rc = sbuf_finish(sb);
- PORT_UNLOCK(pi);
done:
sbuf_delete(sb);
end_synchronized_op(sc, 0);
@@ -8671,7 +9141,7 @@ sysctl_autoneg(SYSCTL_HANDLER_ARGS)
goto done;
}
lc->requested_aneg = val;
- if (!hw_off_limits(sc)) {
+ if (hw_all_ok(sc)) {
fixup_link_config(pi);
if (pi->up_vis > 0)
rc = apply_link_config(pi);
@@ -8706,7 +9176,7 @@ sysctl_force_fec(SYSCTL_HANDLER_ARGS)
return (rc);
PORT_LOCK(pi);
lc->force_fec = val;
- if (!hw_off_limits(sc)) {
+ if (hw_all_ok(sc)) {
fixup_link_config(pi);
if (pi->up_vis > 0)
rc = apply_link_config(pi);
@@ -8737,6 +9207,31 @@ sysctl_handle_t4_reg64(SYSCTL_HANDLER_ARGS)
}
static int
+sysctl_handle_t4_portstat64(SYSCTL_HANDLER_ARGS)
+{
+ struct port_info *pi = arg1;
+ struct adapter *sc = pi->adapter;
+ int rc, i, reg = arg2;
+ uint64_t val;
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ val = 0;
+ for (i = 0; i < sc->params.tp.lb_nchan; i++) {
+ val += t4_read_reg64(sc,
+ t4_port_reg(sc, pi->tx_chan + i, reg));
+ }
+ rc = 0;
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc == 0)
+ rc = sysctl_handle_64(oidp, &val, 0, req);
+ return (rc);
+}
+
+static int
sysctl_temperature(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
@@ -8746,7 +9241,7 @@ sysctl_temperature(SYSCTL_HANDLER_ARGS)
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4temp");
if (rc)
return (rc);
- if (hw_off_limits(sc))
+ if (!hw_all_ok(sc))
rc = ENXIO;
else {
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
@@ -8777,7 +9272,7 @@ sysctl_vdd(SYSCTL_HANDLER_ARGS)
"t4vdd");
if (rc)
return (rc);
- if (hw_off_limits(sc))
+ if (!hw_all_ok(sc))
rc = ENXIO;
else {
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
@@ -8814,7 +9309,7 @@ sysctl_reset_sensor(SYSCTL_HANDLER_ARGS)
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4srst");
if (rc)
return (rc);
- if (hw_off_limits(sc))
+ if (!hw_all_ok(sc))
rc = ENXIO;
else {
param = (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
@@ -8836,25 +9331,26 @@ sysctl_loadavg(SYSCTL_HANDLER_ARGS)
struct sbuf *sb;
int rc;
uint32_t param, val;
+ uint8_t coreid = (uint8_t)arg2;
+
+ KASSERT(coreid < sc->params.ncores,
+ ("%s: bad coreid %u\n", __func__, coreid));
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4lavg");
if (rc)
return (rc);
- if (hw_off_limits(sc))
+ if (!hw_all_ok(sc))
rc = ENXIO;
else {
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
- V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD);
+ V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_LOAD) |
+ V_FW_PARAMS_PARAM_Y(coreid);
rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
}
end_synchronized_op(sc, 0);
if (rc)
return (rc);
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
@@ -8884,14 +9380,11 @@ sysctl_cctrl(SYSCTL_HANDLER_ARGS)
"0.9375"
};
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
@@ -8917,50 +9410,75 @@ done:
return (rc);
}
-static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
- "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
- "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
- "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
-};
-
static int
-sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
+sysctl_cim_ibq(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
- int rc, i, n, qid = arg2;
+ int rc, i, n, qid, coreid;
uint32_t *buf, *p;
- char *qtype;
- u_int cim_num_obq = sc->chip_params->cim_num_obq;
- KASSERT(qid >= 0 && qid < CIM_NUM_IBQ + cim_num_obq,
- ("%s: bad qid %d\n", __func__, qid));
+ qid = arg2 & 0xffff;
+ coreid = arg2 >> 16;
- if (qid < CIM_NUM_IBQ) {
- /* inbound queue */
- qtype = "IBQ";
- n = 4 * CIM_IBQ_SIZE;
- buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
- mtx_lock(&sc->reg_lock);
- if (hw_off_limits(sc))
- rc = -ENXIO;
- else
- rc = t4_read_cim_ibq(sc, qid, buf, n);
- mtx_unlock(&sc->reg_lock);
- } else {
- /* outbound queue */
- qtype = "OBQ";
- qid -= CIM_NUM_IBQ;
- n = 4 * cim_num_obq * CIM_OBQ_SIZE;
- buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
- mtx_lock(&sc->reg_lock);
- if (hw_off_limits(sc))
- rc = -ENXIO;
- else
- rc = t4_read_cim_obq(sc, qid, buf, n);
- mtx_unlock(&sc->reg_lock);
+ KASSERT(qid >= 0 && qid < sc->chip_params->cim_num_ibq,
+ ("%s: bad ibq qid %d\n", __func__, qid));
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+
+ n = 4 * CIM_IBQ_SIZE;
+ buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = -ENXIO;
+ else
+ rc = t4_read_cim_ibq_core(sc, coreid, qid, buf, n);
+ mtx_unlock(&sc->reg_lock);
+ if (rc < 0) {
+ rc = -rc;
+ goto done;
}
+ n = rc * sizeof(uint32_t); /* rc has # of words actually read */
+ sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
+ if (sb == NULL) {
+ rc = ENOMEM;
+ goto done;
+ }
+ for (i = 0, p = buf; i < n; i += 16, p += 4)
+ sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
+ p[2], p[3]);
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+done:
+ free(buf, M_CXGBE);
+ return (rc);
+}
+
+static int
+sysctl_cim_obq(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ struct sbuf *sb;
+ int rc, i, n, qid, coreid;
+ uint32_t *buf, *p;
+
+ qid = arg2 & 0xffff;
+ coreid = arg2 >> 16;
+
+ KASSERT(qid >= 0 && qid < sc->chip_params->cim_num_obq,
+ ("%s: bad obq qid %d\n", __func__, qid));
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+
+ n = 6 * CIM_OBQ_SIZE * 4;
+ buf = malloc(n * sizeof(uint32_t), M_CXGBE, M_ZERO | M_WAITOK);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = -ENXIO;
+ else
+ rc = t4_read_cim_obq_core(sc, coreid, qid, buf, n);
+ mtx_unlock(&sc->reg_lock);
if (rc < 0) {
rc = -rc;
goto done;
@@ -8976,12 +9494,9 @@ sysctl_cim_ibq_obq(SYSCTL_HANDLER_ARGS)
rc = ENOMEM;
goto done;
}
-
- sbuf_printf(sb, "%s%d %s", qtype , qid, qname[arg2]);
for (i = 0, p = buf; i < n; i += 16, p += 4)
sbuf_printf(sb, "\n%#06x: %08x %08x %08x %08x", i, p[0], p[1],
p[2], p[3]);
-
rc = sbuf_finish(sb);
sbuf_delete(sb);
done:
@@ -9052,7 +9567,7 @@ sbuf_cim_la6(struct adapter *sc, struct sbuf *sb, uint32_t *buf, uint32_t cfg)
}
static int
-sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
+sbuf_cim_la(struct adapter *sc, int coreid, struct sbuf *sb, int flags)
{
uint32_t cfg, *buf;
int rc;
@@ -9067,9 +9582,10 @@ sbuf_cim_la(struct adapter *sc, struct sbuf *sb, int flags)
if (hw_off_limits(sc))
rc = ENXIO;
else {
- rc = -t4_cim_read(sc, A_UP_UP_DBG_LA_CFG, 1, &cfg);
+ rc = -t4_cim_read_core(sc, 1, coreid, A_UP_UP_DBG_LA_CFG, 1,
+ &cfg);
if (rc == 0)
- rc = -t4_cim_read_la(sc, buf, NULL);
+ rc = -t4_cim_read_la_core(sc, coreid, buf, NULL);
}
mtx_unlock(&sc->reg_lock);
if (rc == 0) {
@@ -9086,17 +9602,15 @@ static int
sysctl_cim_la(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
+ int coreid = arg2;
struct sbuf *sb;
int rc;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
- rc = sbuf_cim_la(sc, sb, M_WAITOK);
+ rc = sbuf_cim_la(sc, coreid, sb, M_WAITOK);
if (rc == 0)
rc = sbuf_finish(sb);
sbuf_delete(sb);
@@ -9133,7 +9647,7 @@ dump_cimla(struct adapter *sc)
device_get_nameunit(sc->dev));
return;
}
- rc = sbuf_cim_la(sc, &sb, M_WAITOK);
+ rc = sbuf_cim_la(sc, 0, &sb, M_WAITOK);
if (rc == 0) {
rc = sbuf_finish(&sb);
if (rc == 0) {
@@ -9159,10 +9673,6 @@ sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
uint32_t *buf, *p;
int rc;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
@@ -9170,6 +9680,7 @@ sysctl_cim_ma_la(SYSCTL_HANDLER_ARGS)
buf = malloc(2 * CIM_MALA_SIZE * 5 * sizeof(uint32_t), M_CXGBE,
M_ZERO | M_WAITOK);
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
@@ -9210,10 +9721,6 @@ sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
uint32_t *buf, *p;
int rc;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
@@ -9221,6 +9728,7 @@ sysctl_cim_pif_la(SYSCTL_HANDLER_ARGS)
buf = malloc(2 * CIM_PIFLA_SIZE * 6 * sizeof(uint32_t), M_CXGBE,
M_ZERO | M_WAITOK);
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
@@ -9263,6 +9771,13 @@ sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
uint32_t obq_wr[2 * CIM_NUM_OBQ_T5], *wr = obq_wr;
uint32_t stat[4 * (CIM_NUM_IBQ + CIM_NUM_OBQ_T5)], *p = stat;
u_int cim_num_obq, ibq_rdaddr, obq_rdaddr, nq;
+ static const char *qname[CIM_NUM_IBQ + CIM_NUM_OBQ_T5] = {
+ "TP0", "TP1", "ULP", "SGE0", "SGE1", "NC-SI", /* ibq's */
+ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", /* obq's */
+ "SGE0-RX", "SGE1-RX" /* additional obq's (T5 onwards) */
+ };
+
+ MPASS(chip_id(sc) < CHELSIO_T7);
cim_num_obq = sc->chip_params->cim_num_obq;
if (is_t4(sc)) {
@@ -9290,10 +9805,6 @@ sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
if (rc)
return (rc);
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
if (sb == NULL)
return (ENOMEM);
@@ -9319,6 +9830,104 @@ sysctl_cim_qcfg(SYSCTL_HANDLER_ARGS)
}
static int
+sysctl_cim_qcfg_t7(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ u_int coreid = arg2;
+ struct sbuf *sb;
+ int rc, i;
+ u_int addr;
+ uint16_t base[CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7];
+ uint16_t size[CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7];
+ uint16_t thres[CIM_NUM_IBQ_T7];
+ uint32_t obq_wr[2 * CIM_NUM_OBQ_T7], *wr = obq_wr;
+ uint32_t stat[4 * (CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7)], *p = stat;
+ static const char * const qname_ibq_t7[] = {
+ "TP0", "TP1", "TP2", "TP3", "ULP", "SGE0", "SGE1", "NC-SI",
+ "RSVD", "IPC1", "IPC2", "IPC3", "IPC4", "IPC5", "IPC6", "IPC7",
+ };
+ static const char * const qname_obq_t7[] = {
+ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "NC-SI", "SGE0-RX",
+ "RSVD", "RSVD", "IPC1", "IPC2", "IPC3", "IPC4", "IPC5",
+ "IPC6", "IPC7"
+ };
+ static const char * const qname_ibq_sec_t7[] = {
+ "TP0", "TP1", "TP2", "TP3", "ULP", "SGE0", "RSVD", "RSVD",
+ "RSVD", "IPC0", "RSVD", "RSVD", "RSVD", "RSVD", "RSVD", "RSVD",
+ };
+ static const char * const qname_obq_sec_t7[] = {
+ "ULP0", "ULP1", "ULP2", "ULP3", "SGE", "RSVD", "SGE0-RX",
+ "RSVD", "RSVD", "IPC0", "RSVD", "RSVD", "RSVD", "RSVD",
+ "RSVD", "RSVD",
+ };
+
+ MPASS(chip_id(sc) >= CHELSIO_T7);
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ rc = -t4_cim_read_core(sc, 1, coreid,
+ A_T7_UP_IBQ_0_SHADOW_RDADDR, 4 * CIM_NUM_IBQ_T7, stat);
+ if (rc != 0)
+ goto unlock;
+
+ rc = -t4_cim_read_core(sc, 1, coreid,
+ A_T7_UP_OBQ_0_SHADOW_RDADDR, 4 * CIM_NUM_OBQ_T7,
+ &stat[4 * CIM_NUM_IBQ_T7]);
+ if (rc != 0)
+ goto unlock;
+
+ addr = A_T7_UP_OBQ_0_SHADOW_REALADDR;
+ for (i = 0; i < CIM_NUM_OBQ_T7 * 2; i++, addr += 8) {
+ rc = -t4_cim_read_core(sc, 1, coreid, addr, 1,
+ &obq_wr[i]);
+ if (rc != 0)
+ goto unlock;
+ }
+ t4_read_cimq_cfg_core(sc, coreid, base, size, thres);
+ }
+unlock:
+ mtx_unlock(&sc->reg_lock);
+ if (rc)
+ return (rc);
+
+ sb = sbuf_new_for_sysctl(NULL, NULL, PAGE_SIZE, req);
+ if (sb == NULL)
+ return (ENOMEM);
+
+ sbuf_printf(sb,
+ " Queue Base Size Thres RdPtr WrPtr SOP EOP Avail");
+
+ for (i = 0; i < CIM_NUM_IBQ_T7; i++, p += 4) {
+ if (!size[i])
+ continue;
+
+ sbuf_printf(sb, "\n%7s %5x %5u %5u %6x %4x %4u %4u %5u",
+ coreid == 0 ? qname_ibq_t7[i] : qname_ibq_sec_t7[i],
+ base[i], size[i], thres[i], G_IBQRDADDR(p[0]) & 0xfff,
+ G_IBQWRADDR(p[1]) & 0xfff, G_QUESOPCNT(p[3]),
+ G_QUEEOPCNT(p[3]), G_T7_QUEREMFLITS(p[2]) * 16);
+ }
+
+ for ( ; i < CIM_NUM_IBQ_T7 + CIM_NUM_OBQ_T7; i++, p += 4, wr += 2) {
+ if (!size[i])
+ continue;
+
+ sbuf_printf(sb, "\n%7s %5x %5u %12x %4x %4u %4u %5u",
+ coreid == 0 ? qname_obq_t7[i - CIM_NUM_IBQ_T7] :
+ qname_obq_sec_t7[i - CIM_NUM_IBQ_T7],
+ base[i], size[i], G_QUERDADDR(p[0]) & 0xfff,
+ wr[0] << 1, G_QUESOPCNT(p[3]), G_QUEEOPCNT(p[3]),
+ G_T7_QUEREMFLITS(p[2]) * 16);
+ }
+
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+ return (rc);
+}
+
+static int
sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
@@ -9326,14 +9935,11 @@ sysctl_cpl_stats(SYSCTL_HANDLER_ARGS)
int rc;
struct tp_cpl_stats stats;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
@@ -9372,14 +9978,11 @@ sysctl_ddp_stats(SYSCTL_HANDLER_ARGS)
int rc;
struct tp_usm_stats stats;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return(rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
@@ -9405,14 +10008,11 @@ sysctl_tid_stats(SYSCTL_HANDLER_ARGS)
int rc;
struct tp_tid_stats stats;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return(rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
@@ -9469,18 +10069,25 @@ static const char * const devlog_facility_strings[] = {
};
static int
-sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
+sbuf_devlog(struct adapter *sc, int coreid, struct sbuf *sb, int flags)
{
int i, j, rc, nentries, first = 0;
struct devlog_params *dparams = &sc->params.devlog;
struct fw_devlog_e *buf, *e;
+ uint32_t addr, size;
uint64_t ftstamp = UINT64_MAX;
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+
if (dparams->addr == 0)
return (ENXIO);
+ size = dparams->size / sc->params.ncores;
+ addr = dparams->addr + coreid * size;
+
MPASS(flags == M_WAITOK || flags == M_NOWAIT);
- buf = malloc(dparams->size, M_CXGBE, M_ZERO | flags);
+ buf = malloc(size, M_CXGBE, M_ZERO | flags);
if (buf == NULL)
return (ENOMEM);
@@ -9488,13 +10095,12 @@ sbuf_devlog(struct adapter *sc, struct sbuf *sb, int flags)
if (hw_off_limits(sc))
rc = ENXIO;
else
- rc = read_via_memwin(sc, 1, dparams->addr, (void *)buf,
- dparams->size);
+ rc = read_via_memwin(sc, 1, addr, (void *)buf, size);
mtx_unlock(&sc->reg_lock);
if (rc != 0)
goto done;
- nentries = dparams->size / sizeof(struct fw_devlog_e);
+ nentries = size / sizeof(struct fw_devlog_e);
for (i = 0; i < nentries; i++) {
e = &buf[i];
@@ -9546,17 +10152,24 @@ static int
sysctl_devlog(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
- int rc;
+ int rc, i, coreid = arg2;
struct sbuf *sb;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
-
- rc = sbuf_devlog(sc, sb, M_WAITOK);
+ if (coreid == -1) {
+ /* -1 means all cores */
+ for (i = rc = 0; i < sc->params.ncores && rc == 0; i++) {
+ if (sc->params.ncores > 0)
+ sbuf_printf(sb, "=== CIM core %u ===\n", i);
+ rc = sbuf_devlog(sc, i, sb, M_WAITOK);
+ }
+ } else {
+ KASSERT(coreid >= 0 && coreid < sc->params.ncores,
+ ("%s: bad coreid %d\n", __func__, coreid));
+ rc = sbuf_devlog(sc, coreid, sb, M_WAITOK);
+ }
if (rc == 0)
rc = sbuf_finish(sb);
sbuf_delete(sb);
@@ -9566,7 +10179,7 @@ sysctl_devlog(SYSCTL_HANDLER_ARGS)
static void
dump_devlog(struct adapter *sc)
{
- int rc;
+ int rc, i;
struct sbuf sb;
if (sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND) != &sb) {
@@ -9574,13 +10187,15 @@ dump_devlog(struct adapter *sc)
device_get_nameunit(sc->dev));
return;
}
- rc = sbuf_devlog(sc, &sb, M_WAITOK);
+ for (i = rc = 0; i < sc->params.ncores && rc == 0; i++) {
+ if (sc->params.ncores > 0)
+ sbuf_printf(&sb, "=== CIM core %u ===\n", i);
+ rc = sbuf_devlog(sc, i, &sb, M_WAITOK);
+ }
if (rc == 0) {
- rc = sbuf_finish(&sb);
- if (rc == 0) {
- log(LOG_DEBUG, "%s: device log follows.\n%s",
- device_get_nameunit(sc->dev), sbuf_data(&sb));
- }
+ sbuf_finish(&sb);
+ log(LOG_DEBUG, "%s: device log follows.\n%s",
+ device_get_nameunit(sc->dev), sbuf_data(&sb));
}
sbuf_delete(&sb);
}
@@ -9594,10 +10209,7 @@ sysctl_fcoe_stats(SYSCTL_HANDLER_ARGS)
struct tp_fcoe_stats stats[MAX_NCHAN];
int i, nchan = sc->chip_params->nchan;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
@@ -9650,16 +10262,13 @@ sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
unsigned int map, kbps, ipg, mode;
unsigned int pace_tab[NTX_SCHED];
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, 512, req);
if (sb == NULL)
return (ENOMEM);
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc)) {
+ mtx_unlock(&sc->reg_lock);
rc = ENXIO;
goto done;
}
@@ -9667,6 +10276,7 @@ sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
map = t4_read_reg(sc, A_TP_TX_MOD_QUEUE_REQ_MAP);
mode = G_TIMERMODE(t4_read_reg(sc, A_TP_MOD_CONFIG));
t4_read_pace_tbl(sc, pace_tab);
+ mtx_unlock(&sc->reg_lock);
sbuf_printf(sb, "Scheduler Mode Channel Rate (Kbps) "
"Class IPG (0.1 ns) Flow IPG (us)");
@@ -9692,7 +10302,6 @@ sysctl_hw_sched(SYSCTL_HANDLER_ARGS)
}
rc = sbuf_finish(sb);
done:
- mtx_unlock(&sc->reg_lock);
sbuf_delete(sb);
return (rc);
}
@@ -9715,16 +10324,13 @@ sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
"BG2FramesTrunc:", "BG3FramesTrunc:"
};
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
memset(s, 0, sizeof(s));
+ rc = 0;
for (i = 0; i < sc->chip_params->nchan; i += 2) {
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
@@ -9747,7 +10353,8 @@ sysctl_lb_stats(SYSCTL_HANDLER_ARGS)
*p0++, *p1++);
}
- rc = sbuf_finish(sb);
+ if (rc == 0)
+ rc = sbuf_finish(sb);
sbuf_delete(sb);
return (rc);
@@ -9761,9 +10368,6 @@ sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
struct link_config *lc = &pi->link_cfg;
struct sbuf *sb;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return(rc);
sb = sbuf_new_for_sysctl(NULL, NULL, 64, req);
if (sb == NULL)
return (ENOMEM);
@@ -9780,16 +10384,16 @@ sysctl_linkdnrc(SYSCTL_HANDLER_ARGS)
}
struct mem_desc {
- u_int base;
- u_int limit;
+ uint64_t base;
+ uint64_t limit;
u_int idx;
};
static int
mem_desc_cmp(const void *a, const void *b)
{
- const u_int v1 = ((const struct mem_desc *)a)->base;
- const u_int v2 = ((const struct mem_desc *)b)->base;
+ const uint64_t v1 = ((const struct mem_desc *)a)->base;
+ const uint64_t v2 = ((const struct mem_desc *)b)->base;
if (v1 < v2)
return (-1);
@@ -9800,10 +10404,9 @@ mem_desc_cmp(const void *a, const void *b)
}
static void
-mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
- unsigned int to)
+mem_region_show(struct sbuf *sb, const char *name, uint64_t from, uint64_t to)
{
- unsigned int size;
+ uintmax_t size;
if (from == to)
return;
@@ -9812,8 +10415,12 @@ mem_region_show(struct sbuf *sb, const char *name, unsigned int from,
if (size == 0)
return;
- /* XXX: need humanize_number(3) in libkern for a more readable 'size' */
- sbuf_printf(sb, "%-15s %#x-%#x [%u]\n", name, from, to, size);
+ if (from > UINT32_MAX || to > UINT32_MAX)
+ sbuf_printf(sb, "%-18s 0x%012jx-0x%012jx [%ju]\n", name,
+ (uintmax_t)from, (uintmax_t)to, size);
+ else
+ sbuf_printf(sb, "%-18s 0x%08jx-0x%08jx [%ju]\n", name,
+ (uintmax_t)from, (uintmax_t)to, size);
}
static int
@@ -9821,7 +10428,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
struct sbuf *sb;
- int rc, i, n;
+ int rc, i, n, nchan;
uint32_t lo, hi, used, free, alloc;
static const char *memory[] = {
"EDC0:", "EDC1:", "MC:", "MC0:", "MC1:", "HMA:"
@@ -9832,12 +10439,14 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
"Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
"TDDP region:", "TPT region:", "STAG region:", "RQ region:",
"RQUDP region:", "PBL region:", "TXPBL region:",
- "TLSKey region:", "DBVFIFO region:", "ULPRX state:",
- "ULPTX state:", "On-chip queues:",
+ "TLSKey region:", "RRQ region:", "NVMe STAG region:",
+ "NVMe RQ region:", "NVMe RXPBL region:", "NVMe TPT region:",
+ "NVMe TXPBL region:", "DBVFIFO region:", "ULPRX state:",
+ "ULPTX state:", "RoCE RRQ region:", "On-chip queues:",
};
struct mem_desc avail[4];
struct mem_desc mem[nitems(region) + 3]; /* up to 3 holes */
- struct mem_desc *md = mem;
+ struct mem_desc *md;
rc = sysctl_wire_old_buffer(req, 0);
if (rc != 0)
@@ -9863,36 +10472,91 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
lo = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
if (lo & F_EDRAM0_ENABLE) {
hi = t4_read_reg(sc, A_MA_EDRAM0_BAR);
- avail[i].base = G_EDRAM0_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EDRAM0_SIZE(hi) << 20);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ avail[i].base = (uint64_t)G_T7_EDRAM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EDRAM0_SIZE(hi) << 20);
+ } else {
+ avail[i].base = (uint64_t)G_EDRAM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EDRAM0_SIZE(hi) << 20);
+ }
avail[i].idx = 0;
i++;
}
if (lo & F_EDRAM1_ENABLE) {
hi = t4_read_reg(sc, A_MA_EDRAM1_BAR);
- avail[i].base = G_EDRAM1_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EDRAM1_SIZE(hi) << 20);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ avail[i].base = (uint64_t)G_T7_EDRAM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EDRAM1_SIZE(hi) << 20);
+ } else {
+ avail[i].base = (uint64_t)G_EDRAM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EDRAM1_SIZE(hi) << 20);
+ }
avail[i].idx = 1;
i++;
}
if (lo & F_EXT_MEM_ENABLE) {
- hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
- avail[i].base = G_EXT_MEM_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EXT_MEM_SIZE(hi) << 20);
- avail[i].idx = is_t5(sc) ? 3 : 2; /* Call it MC0 for T5 */
+ switch (chip_id(sc)) {
+ case CHELSIO_T4:
+ case CHELSIO_T6:
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
+ avail[i].base = (uint64_t)G_EXT_MEM_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM_SIZE(hi) << 20);
+ avail[i].idx = 2;
+ break;
+ case CHELSIO_T5:
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY0_BAR);
+ avail[i].base = (uint64_t)G_EXT_MEM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM0_SIZE(hi) << 20);
+ avail[i].idx = 3; /* Call it MC0 for T5 */
+ break;
+ default:
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY0_BAR);
+ avail[i].base = (uint64_t)G_T7_EXT_MEM0_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EXT_MEM0_SIZE(hi) << 20);
+ avail[i].idx = 3; /* Call it MC0 for T7+ */
+ break;
+ }
i++;
}
- if (is_t5(sc) && lo & F_EXT_MEM1_ENABLE) {
+ if (lo & F_EXT_MEM1_ENABLE && !(lo & F_MC_SPLIT)) {
+ /* Only T5 and T7+ have 2 MCs. */
+ MPASS(is_t5(sc) || chip_id(sc) >= CHELSIO_T7);
+
hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
- avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ avail[i].base = (uint64_t)G_T7_EXT_MEM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_EXT_MEM1_SIZE(hi) << 20);
+ } else {
+ avail[i].base = (uint64_t)G_EXT_MEM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM1_SIZE(hi) << 20);
+ }
avail[i].idx = 4;
i++;
}
- if (is_t6(sc) && lo & F_HMA_MUX) {
- hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
- avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
- avail[i].limit = avail[i].base + (G_EXT_MEM1_SIZE(hi) << 20);
+ if (lo & F_HMA_MUX) {
+ /* Only T6+ have HMA. */
+ MPASS(chip_id(sc) >= CHELSIO_T6);
+
+ if (chip_id(sc) >= CHELSIO_T7) {
+ hi = t4_read_reg(sc, A_MA_HOST_MEMORY_BAR);
+ avail[i].base = (uint64_t)G_HMATARGETBASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_T7_HMA_SIZE(hi) << 20);
+ } else {
+ hi = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
+ avail[i].base = G_EXT_MEM1_BASE(hi) << 20;
+ avail[i].limit = avail[i].base +
+ (G_EXT_MEM1_SIZE(hi) << 20);
+ }
avail[i].idx = 5;
i++;
}
@@ -9901,6 +10565,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
goto done;
qsort(avail, i, sizeof(struct mem_desc), mem_desc_cmp);
+ md = &mem[0];
(md++)->base = t4_read_reg(sc, A_SGE_DBQ_CTXT_BADDR);
(md++)->base = t4_read_reg(sc, A_SGE_IMSG_CTXT_BADDR);
(md++)->base = t4_read_reg(sc, A_SGE_FLM_CACHE_BADDR);
@@ -9936,22 +10601,52 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
}
md++;
-#define ulp_region(reg) \
- md->base = t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT);\
- (md++)->limit = t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT)
+#define ulp_region(reg) do {\
+ const u_int shift = chip_id(sc) >= CHELSIO_T7 ? 4 : 0; \
+ md->base = (uint64_t)t4_read_reg(sc, A_ULP_ ## reg ## _LLIMIT) << shift; \
+ md->limit = (uint64_t)t4_read_reg(sc, A_ULP_ ## reg ## _ULIMIT) << shift; \
+ md->limit += (1 << shift) - 1; \
+ md++; \
+ } while (0)
+
+#define hide_ulp_region() do { \
+ md->base = 0; \
+ md->idx = nitems(region); \
+ md++; \
+ } while (0)
ulp_region(RX_ISCSI);
ulp_region(RX_TDDP);
ulp_region(TX_TPT);
ulp_region(RX_STAG);
ulp_region(RX_RQ);
- ulp_region(RX_RQUDP);
+ if (chip_id(sc) < CHELSIO_T7)
+ ulp_region(RX_RQUDP);
+ else
+ hide_ulp_region();
ulp_region(RX_PBL);
ulp_region(TX_PBL);
- if (sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS) {
+ if (chip_id(sc) >= CHELSIO_T6)
ulp_region(RX_TLS_KEY);
+ else
+ hide_ulp_region();
+ if (chip_id(sc) >= CHELSIO_T7) {
+ ulp_region(RX_RRQ);
+ ulp_region(RX_NVME_TCP_STAG);
+ ulp_region(RX_NVME_TCP_RQ);
+ ulp_region(RX_NVME_TCP_PBL);
+ ulp_region(TX_NVME_TCP_TPT);
+ ulp_region(TX_NVME_TCP_PBL);
+ } else {
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
+ hide_ulp_region();
}
#undef ulp_region
+#undef hide_ulp_region
md->base = 0;
if (is_t4(sc))
@@ -9982,6 +10677,15 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
md->limit = 0;
md++;
+ if (chip_id(sc) >= CHELSIO_T7) {
+ t4_tp_pio_read(sc, &lo, 1, A_TP_ROCE_RRQ_BASE, false);
+ md->base = lo;
+ } else {
+ md->base = 0;
+ md->idx = nitems(region);
+ }
+ md++;
+
md->base = sc->vres.ocq.start;
if (sc->vres.ocq.size)
md->limit = md->base + sc->vres.ocq.size - 1;
@@ -10014,31 +10718,41 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
mem[i].limit);
}
- sbuf_printf(sb, "\n");
lo = t4_read_reg(sc, A_CIM_SDRAM_BASE_ADDR);
hi = t4_read_reg(sc, A_CIM_SDRAM_ADDR_SIZE) + lo - 1;
- mem_region_show(sb, "uP RAM:", lo, hi);
+ if (hi != lo - 1) {
+ sbuf_printf(sb, "\n");
+ mem_region_show(sb, "uP RAM:", lo, hi);
+ }
lo = t4_read_reg(sc, A_CIM_EXTMEM2_BASE_ADDR);
hi = t4_read_reg(sc, A_CIM_EXTMEM2_ADDR_SIZE) + lo - 1;
- mem_region_show(sb, "uP Extmem2:", lo, hi);
+ if (hi != lo - 1)
+ mem_region_show(sb, "uP Extmem2:", lo, hi);
lo = t4_read_reg(sc, A_TP_PMM_RX_MAX_PAGE);
- for (i = 0, free = 0; i < 2; i++)
+ if (chip_id(sc) >= CHELSIO_T7)
+ nchan = 1 << G_T7_PMRXNUMCHN(lo);
+ else
+ nchan = lo & F_PMRXNUMCHN ? 2 : 1;
+ for (i = 0, free = 0; i < nchan; i++)
free += G_FREERXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_RX_CNT));
sbuf_printf(sb, "\n%u Rx pages (%u free) of size %uKiB for %u channels\n",
G_PMRXMAXPAGE(lo), free,
- t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10,
- (lo & F_PMRXNUMCHN) ? 2 : 1);
+ t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE) >> 10, nchan);
lo = t4_read_reg(sc, A_TP_PMM_TX_MAX_PAGE);
hi = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE);
- for (i = 0, free = 0; i < 4; i++)
+ if (chip_id(sc) >= CHELSIO_T7)
+ nchan = 1 << G_T7_PMTXNUMCHN(lo);
+ else
+ nchan = 1 << G_PMTXNUMCHN(lo);
+ for (i = 0, free = 0; i < nchan; i++)
free += G_FREETXPAGECOUNT(t4_read_reg(sc, A_TP_FLM_FREE_TX_CNT));
sbuf_printf(sb, "%u Tx pages (%u free) of size %u%ciB for %u channels\n",
G_PMTXMAXPAGE(lo), free,
hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
- hi >= (1 << 20) ? 'M' : 'K', 1 << G_PMTXNUMCHN(lo));
+ hi >= (1 << 20) ? 'M' : 'K', nchan);
sbuf_printf(sb, "%u p-structs (%u free)\n",
t4_read_reg(sc, A_TP_CMM_MM_MAX_PSTRUCT),
G_FREEPSTRUCTCOUNT(t4_read_reg(sc, A_TP_FLM_FREE_PS_CNT)));
@@ -10055,7 +10769,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
used = G_USED(lo);
alloc = G_ALLOC(lo);
}
- /* For T6 these are MAC buffer groups */
+ /* For T6+ these are MAC buffer groups */
sbuf_printf(sb, "\nPort %d using %u pages out of %u allocated",
i, used, alloc);
}
@@ -10071,7 +10785,7 @@ sysctl_meminfo(SYSCTL_HANDLER_ARGS)
used = G_USED(lo);
alloc = G_ALLOC(lo);
}
- /* For T6 these are MAC buffer groups */
+ /* For T6+ these are MAC buffer groups */
sbuf_printf(sb,
"\nLoopback %d using %u pages out of %u allocated",
i, used, alloc);
@@ -10101,10 +10815,6 @@ sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
MPASS(chip_id(sc) <= CHELSIO_T5);
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
@@ -10112,6 +10822,7 @@ sysctl_mps_tcam(SYSCTL_HANDLER_ARGS)
sbuf_printf(sb,
"Idx Ethernet address Mask Vld Ports PF"
" VF Replication P0 P1 P2 P3 ML");
+ rc = 0;
for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
uint64_t tcamx, tcamy, mask;
uint32_t cls_lo, cls_hi;
@@ -10203,11 +10914,7 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
struct sbuf *sb;
int rc, i;
- MPASS(chip_id(sc) > CHELSIO_T5);
-
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
+ MPASS(chip_id(sc) == CHELSIO_T6);
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
@@ -10216,8 +10923,9 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
" IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
" Replication"
- " P0 P1 P2 P3 ML\n");
+ " P0 P1 P2 P3 ML");
+ rc = 0;
for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
uint8_t dip_hit, vlan_vld, lookup_type, port_num;
uint16_t ivlan;
@@ -10380,6 +11088,206 @@ sysctl_mps_tcam_t6(SYSCTL_HANDLER_ARGS)
}
static int
+sysctl_mps_tcam_t7(SYSCTL_HANDLER_ARGS)
+{
+ struct adapter *sc = arg1;
+ struct sbuf *sb;
+ int rc, i;
+
+ MPASS(chip_id(sc) >= CHELSIO_T7);
+
+ sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
+ if (sb == NULL)
+ return (ENOMEM);
+
+ sbuf_printf(sb, "Idx Ethernet address Mask VNI Mask"
+ " IVLAN Vld DIP_Hit Lookup Port Vld Ports PF VF"
+ " Replication"
+ " P0 P1 P2 P3 ML");
+
+ rc = 0;
+ for (i = 0; i < sc->chip_params->mps_tcam_size; i++) {
+ uint8_t dip_hit, vlan_vld, lookup_type, port_num;
+ uint16_t ivlan;
+ uint64_t tcamx, tcamy, val, mask;
+ uint32_t cls_lo, cls_hi, ctl, data2, vnix, vniy;
+ uint8_t addr[ETHER_ADDR_LEN];
+
+ /* Read tcamy */
+ ctl = (V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
+ if (chip_rev(sc) == 0) {
+ if (i < 256)
+ ctl |= V_CTLTCAMINDEX(i) | V_T7_CTLTCAMSEL(0);
+ else
+ ctl |= V_CTLTCAMINDEX(i - 256) | V_T7_CTLTCAMSEL(1);
+ } else {
+#if 0
+ ctl = (V_CTLREQID(1) | V_CTLCMDTYPE(0) | V_CTLXYBITSEL(0));
+#endif
+ if (i < 512)
+ ctl |= V_CTLTCAMINDEX(i) | V_T7_CTLTCAMSEL(0);
+ else if (i < 1024)
+ ctl |= V_CTLTCAMINDEX(i - 512) | V_T7_CTLTCAMSEL(1);
+ else
+ ctl |= V_CTLTCAMINDEX(i - 1024) | V_T7_CTLTCAMSEL(2);
+ }
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
+ val = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA1_REQ_ID1);
+ tcamy = G_DMACH(val) << 32;
+ tcamy |= t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA0_REQ_ID1);
+ data2 = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA2_REQ_ID1);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
+
+ lookup_type = G_DATALKPTYPE(data2);
+ port_num = G_DATAPORTNUM(data2);
+ if (lookup_type && lookup_type != M_DATALKPTYPE) {
+ /* Inner header VNI */
+ vniy = (((data2 & F_DATAVIDH2) |
+ G_DATAVIDH1(data2)) << 16) | G_VIDL(val);
+ dip_hit = data2 & F_DATADIPHIT;
+ vlan_vld = 0;
+ } else {
+ vniy = 0;
+ dip_hit = 0;
+ vlan_vld = data2 & F_DATAVIDH2;
+ ivlan = G_VIDL(val);
+ }
+
+ ctl |= V_CTLXYBITSEL(1);
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ t4_write_reg(sc, A_MPS_CLS_TCAM_DATA2_CTL, ctl);
+ val = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA1_REQ_ID1);
+ tcamx = G_DMACH(val) << 32;
+ tcamx |= t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA0_REQ_ID1);
+ data2 = t4_read_reg(sc, A_MPS_CLS_TCAM0_RDATA2_REQ_ID1);
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
+
+ if (lookup_type && lookup_type != M_DATALKPTYPE) {
+ /* Inner header VNI mask */
+ vnix = (((data2 & F_DATAVIDH2) |
+ G_DATAVIDH1(data2)) << 16) | G_VIDL(val);
+ } else
+ vnix = 0;
+
+ if (tcamx & tcamy)
+ continue;
+ tcamxy2valmask(tcamx, tcamy, addr, &mask);
+
+ mtx_lock(&sc->reg_lock);
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else {
+ if (chip_rev(sc) == 0) {
+ cls_lo = t4_read_reg(sc, MPS_CLS_SRAM_L(i));
+ cls_hi = t4_read_reg(sc, MPS_CLS_SRAM_H(i));
+ } else {
+ t4_write_reg(sc, A_MPS_CLS_SRAM_H,
+ V_SRAMWRN(0) | V_SRAMINDEX(i));
+ cls_lo = t4_read_reg(sc, A_MPS_CLS_SRAM_L);
+ cls_hi = t4_read_reg(sc, A_MPS_CLS_SRAM_H);
+ }
+ }
+ mtx_unlock(&sc->reg_lock);
+ if (rc != 0)
+ break;
+
+ if (lookup_type && lookup_type != M_DATALKPTYPE) {
+ sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012jx %06x %06x - - %3c"
+ " I %4x %3c %#x%4u%4d", i, addr[0],
+ addr[1], addr[2], addr[3], addr[4], addr[5],
+ (uintmax_t)mask, vniy, vnix, dip_hit ? 'Y' : 'N',
+ port_num, cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
+ G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
+ cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
+ } else {
+ sbuf_printf(sb, "\n%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012jx - - ", i, addr[0], addr[1],
+ addr[2], addr[3], addr[4], addr[5],
+ (uintmax_t)mask);
+
+ if (vlan_vld)
+ sbuf_printf(sb, "%4u Y ", ivlan);
+ else
+ sbuf_printf(sb, " - N ");
+
+ sbuf_printf(sb, "- %3c %4x %3c %#x%4u%4d",
+ lookup_type ? 'I' : 'O', port_num,
+ cls_lo & F_T6_SRAM_VLD ? 'Y' : 'N',
+ G_PORTMAP(cls_hi), G_T6_PF(cls_lo),
+ cls_lo & F_T6_VF_VALID ? G_T6_VF(cls_lo) : -1);
+ }
+
+ if (cls_lo & F_T6_REPLICATE) {
+ struct fw_ldst_cmd ldst_cmd;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_cmd.op_to_addrspace =
+ htobe32(V_FW_CMD_OP(FW_LDST_CMD) |
+ F_FW_CMD_REQUEST | F_FW_CMD_READ |
+ V_FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MPS));
+ ldst_cmd.cycles_to_len16 = htobe32(FW_LEN16(ldst_cmd));
+ ldst_cmd.u.mps.rplc.fid_idx =
+ htobe16(V_FW_LDST_CMD_FID(FW_LDST_MPS_RPLC) |
+ V_FW_LDST_CMD_IDX(i));
+
+ rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK,
+ "t6mps");
+ if (rc)
+ break;
+ if (hw_off_limits(sc))
+ rc = ENXIO;
+ else
+ rc = -t4_wr_mbox(sc, sc->mbox, &ldst_cmd,
+ sizeof(ldst_cmd), &ldst_cmd);
+ end_synchronized_op(sc, 0);
+ if (rc != 0)
+ break;
+ else {
+ sbuf_printf(sb, " %08x %08x %08x %08x"
+ " %08x %08x %08x %08x",
+ be32toh(ldst_cmd.u.mps.rplc.rplc255_224),
+ be32toh(ldst_cmd.u.mps.rplc.rplc223_192),
+ be32toh(ldst_cmd.u.mps.rplc.rplc191_160),
+ be32toh(ldst_cmd.u.mps.rplc.rplc159_128),
+ be32toh(ldst_cmd.u.mps.rplc.rplc127_96),
+ be32toh(ldst_cmd.u.mps.rplc.rplc95_64),
+ be32toh(ldst_cmd.u.mps.rplc.rplc63_32),
+ be32toh(ldst_cmd.u.mps.rplc.rplc31_0));
+ }
+ } else
+ sbuf_printf(sb, "%72s", "");
+
+ sbuf_printf(sb, "%4u%3u%3u%3u %#x",
+ G_T6_SRAM_PRIO0(cls_lo), G_T6_SRAM_PRIO1(cls_lo),
+ G_T6_SRAM_PRIO2(cls_lo), G_T6_SRAM_PRIO3(cls_lo),
+ (cls_lo >> S_T6_MULTILISTEN0) & 0xf);
+ }
+
+ if (rc)
+ (void) sbuf_finish(sb);
+ else
+ rc = sbuf_finish(sb);
+ sbuf_delete(sb);
+
+ return (rc);
+}
+
+static int
sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
{
struct adapter *sc = arg1;
@@ -10387,10 +11295,7 @@ sysctl_path_mtus(SYSCTL_HANDLER_ARGS)
int rc;
uint16_t mtus[NMTUS];
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
@@ -10423,6 +11328,7 @@ sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
int rc, i;
uint32_t tx_cnt[MAX_PM_NSTATS], rx_cnt[MAX_PM_NSTATS];
uint64_t tx_cyc[MAX_PM_NSTATS], rx_cyc[MAX_PM_NSTATS];
+ uint32_t stats[T7_PM_RX_CACHE_NSTATS];
static const char *tx_stats[MAX_PM_NSTATS] = {
"Read:", "Write bypass:", "Write mem:", "Bypass + mem:",
"Tx FIFO wait", NULL, "Tx latency"
@@ -10432,22 +11338,21 @@ sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
"Rx FIFO wait", NULL, "Rx latency"
};
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
else {
t4_pmtx_get_stats(sc, tx_cnt, tx_cyc);
t4_pmrx_get_stats(sc, rx_cnt, rx_cyc);
+ if (chip_id(sc) >= CHELSIO_T7)
+ t4_pmrx_cache_get_stats(sc, stats);
}
mtx_unlock(&sc->reg_lock);
if (rc != 0)
return (rc);
- sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
+ sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
@@ -10482,6 +11387,61 @@ sysctl_pm_stats(SYSCTL_HANDLER_ARGS)
rx_cyc[i]);
}
+ if (chip_id(sc) >= CHELSIO_T7) {
+ i = 0;
+ sbuf_printf(sb, "\n\nPM RX Cache Stats\n");
+ sbuf_printf(sb, "%-40s %u\n", "ReqWrite", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "ReqReadInv", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "ReqReadNoInv", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Split Request",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Split (Read Invalidate)", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Feedback Read Split (Read NoInvalidate)",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Hit", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Feedback Read Hit",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit Full Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Hit Full UnAvail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Hit Partial Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Full Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Full UnAvail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "FB Read Hit Partial Avail",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Normal Read Full Free",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Part-avail Mul-Regions",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "FB Read Part-avail Mul-Regions",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Miss FL Used",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Miss LRU Used",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Write Miss LRU-Multiple Evict", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Write Hit Increasing Islands", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n",
+ "Normal Read Island Read split", stats[i++]);
+ sbuf_printf(sb, "%-40s %u\n", "Write Overflow Eviction",
+ stats[i++]);
+ sbuf_printf(sb, "%-40s %u", "Read Overflow Eviction",
+ stats[i++]);
+ }
+
rc = sbuf_finish(sb);
sbuf_delete(sb);
@@ -10496,10 +11456,7 @@ sysctl_rdma_stats(SYSCTL_HANDLER_ARGS)
int rc;
struct tp_rdma_stats stats;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
@@ -10530,10 +11487,7 @@ sysctl_tcp_stats(SYSCTL_HANDLER_ARGS)
int rc;
struct tp_tcp_stats v4, v6;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
@@ -10573,10 +11527,7 @@ sysctl_tids(SYSCTL_HANDLER_ARGS)
uint32_t x, y;
struct tid_info *t = &sc->tids;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
+ rc = 0;
sb = sbuf_new_for_sysctl(NULL, NULL, 256, req);
if (sb == NULL)
return (ENOMEM);
@@ -10668,10 +11619,7 @@ sysctl_tp_err_stats(SYSCTL_HANDLER_ARGS)
int rc;
struct tp_err_stats stats;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
@@ -10749,10 +11697,7 @@ sysctl_tnl_stats(SYSCTL_HANDLER_ARGS)
int rc;
struct tp_tnl_stats stats;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return(rc);
-
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
@@ -11016,10 +11961,7 @@ sysctl_tp_la(SYSCTL_HANDLER_ARGS)
u_int i, inc;
void (*show_func)(struct sbuf *, uint64_t *, int);
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
+ rc = 0;
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
@@ -11067,10 +12009,7 @@ sysctl_tx_rate(SYSCTL_HANDLER_ARGS)
int rc;
u64 nrate[MAX_NCHAN], orate[MAX_NCHAN];
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
@@ -11113,10 +12052,7 @@ sysctl_ulprx_la(SYSCTL_HANDLER_ARGS)
uint32_t *buf, *p;
int rc, i;
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
+ rc = 0;
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
@@ -11157,10 +12093,7 @@ sysctl_wcwr_stats(SYSCTL_HANDLER_ARGS)
MPASS(chip_id(sc) >= CHELSIO_T5);
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
+ rc = 0;
mtx_lock(&sc->reg_lock);
if (hw_off_limits(sc))
rc = ENXIO;
@@ -11210,10 +12143,6 @@ sysctl_cpus(SYSCTL_HANDLER_ARGS)
if (rc != 0)
return (rc);
- rc = sysctl_wire_old_buffer(req, 0);
- if (rc != 0)
- return (rc);
-
sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
if (sb == NULL)
return (ENOMEM);
@@ -11523,15 +12452,17 @@ sysctl_holdoff_pktc_idx_ofld(SYSCTL_HANDLER_ARGS)
#endif
static int
-get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
+get_sge_context(struct adapter *sc, int mem_id, uint32_t cid, int len,
+ uint32_t *data)
{
int rc;
- if (cntxt->cid > M_CTXTQID)
+ if (len < sc->chip_params->sge_ctxt_size)
+ return (ENOBUFS);
+ if (cid > M_CTXTQID)
return (EINVAL);
-
- if (cntxt->mem_id != CTXT_EGRESS && cntxt->mem_id != CTXT_INGRESS &&
- cntxt->mem_id != CTXT_FLM && cntxt->mem_id != CTXT_CNM)
+ if (mem_id != CTXT_EGRESS && mem_id != CTXT_INGRESS &&
+ mem_id != CTXT_FLM && mem_id != CTXT_CNM)
return (EINVAL);
rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4ctxt");
@@ -11544,8 +12475,7 @@ get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
}
if (sc->flags & FW_OK) {
- rc = -t4_sge_ctxt_rd(sc, sc->mbox, cntxt->cid, cntxt->mem_id,
- &cntxt->data[0]);
+ rc = -t4_sge_ctxt_rd(sc, sc->mbox, cid, mem_id, data);
if (rc == 0)
goto done;
}
@@ -11554,7 +12484,7 @@ get_sge_context(struct adapter *sc, struct t4_sge_context *cntxt)
* Read via firmware failed or wasn't even attempted. Read directly via
* the backdoor.
*/
- rc = -t4_sge_ctxt_rd_bd(sc, cntxt->cid, cntxt->mem_id, &cntxt->data[0]);
+ rc = -t4_sge_ctxt_rd_bd(sc, cid, mem_id, data);
done:
end_synchronized_op(sc, 0);
return (rc);
@@ -11962,10 +12892,11 @@ clear_stats(struct adapter *sc, u_int port_id)
mtx_lock(&sc->reg_lock);
if (!hw_off_limits(sc)) {
/* MAC stats */
- t4_clr_port_stats(sc, pi->tx_chan);
+ t4_clr_port_stats(sc, pi->hw_port);
if (is_t6(sc)) {
if (pi->fcs_reg != -1)
- pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
+ pi->fcs_base = t4_read_reg64(sc,
+ t4_port_reg(sc, pi->tx_chan, pi->fcs_reg));
else
pi->stats.rx_fcs_err = 0;
}
@@ -12028,12 +12959,21 @@ clear_stats(struct adapter *sc, u_int port_id)
txq->kern_tls_full = 0;
txq->kern_tls_octets = 0;
txq->kern_tls_waste = 0;
- txq->kern_tls_options = 0;
txq->kern_tls_header = 0;
- txq->kern_tls_fin = 0;
txq->kern_tls_fin_short = 0;
txq->kern_tls_cbc = 0;
txq->kern_tls_gcm = 0;
+ if (is_t6(sc)) {
+ txq->kern_tls_options = 0;
+ txq->kern_tls_fin = 0;
+ } else {
+ txq->kern_tls_ghash_received = 0;
+ txq->kern_tls_ghash_requested = 0;
+ txq->kern_tls_lso = 0;
+ txq->kern_tls_partial_ghash = 0;
+ txq->kern_tls_splitmode = 0;
+ txq->kern_tls_trailer = 0;
+ }
mp_ring_reset_stats(txq->r);
}
@@ -12122,32 +13062,6 @@ t4_os_find_pci_capability(struct adapter *sc, int cap)
return (pci_find_cap(sc->dev, cap, &i) == 0 ? i : 0);
}
-int
-t4_os_pci_save_state(struct adapter *sc)
-{
- device_t dev;
- struct pci_devinfo *dinfo;
-
- dev = sc->dev;
- dinfo = device_get_ivars(dev);
-
- pci_cfg_save(dev, dinfo, 0);
- return (0);
-}
-
-int
-t4_os_pci_restore_state(struct adapter *sc)
-{
- device_t dev;
- struct pci_devinfo *dinfo;
-
- dev = sc->dev;
- dinfo = device_get_ivars(dev);
-
- pci_cfg_restore(dev, dinfo);
- return (0);
-}
-
void
t4_os_portmod_changed(struct port_info *pi)
{
@@ -12155,7 +13069,8 @@ t4_os_portmod_changed(struct port_info *pi)
struct vi_info *vi;
if_t ifp;
static const char *mod_str[] = {
- NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
+ NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM",
+ "LR_SIMPLEX", "DR"
};
KASSERT((pi->flags & FIXED_IFMEDIA) == 0,
@@ -12203,14 +13118,12 @@ t4_os_link_changed(struct port_info *pi)
if (is_t6(sc)) {
if (lc->link_ok) {
if (lc->speed > 25000 ||
- (lc->speed == 25000 && lc->fec == FEC_RS)) {
- pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
- A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS);
- } else {
- pi->fcs_reg = T5_PORT_REG(pi->tx_chan,
- A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS);
- }
- pi->fcs_base = t4_read_reg64(sc, pi->fcs_reg);
+ (lc->speed == 25000 && lc->fec == FEC_RS))
+ pi->fcs_reg = A_MAC_PORT_AFRAMECHECKSEQUENCEERRORS;
+ else
+ pi->fcs_reg = A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS;
+ pi->fcs_base = t4_read_reg64(sc,
+ t4_port_reg(sc, pi->tx_chan, pi->fcs_reg));
pi->stats.rx_fcs_err = 0;
} else {
pi->fcs_reg = -1;
@@ -12222,7 +13135,7 @@ t4_os_link_changed(struct port_info *pi)
for_each_vi(pi, v, vi) {
ifp = vi->ifp;
- if (ifp == NULL)
+ if (ifp == NULL || IS_DETACHING(vi))
continue;
if (lc->link_ok) {
@@ -12343,9 +13256,13 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
case CHELSIO_T4_DEL_FILTER:
rc = del_filter(sc, (struct t4_filter *)data);
break;
- case CHELSIO_T4_GET_SGE_CONTEXT:
- rc = get_sge_context(sc, (struct t4_sge_context *)data);
+ case CHELSIO_T4_GET_SGE_CONTEXT: {
+ struct t4_sge_context *ctxt = (struct t4_sge_context *)data;
+
+ rc = get_sge_context(sc, ctxt->mem_id, ctxt->cid,
+ sizeof(ctxt->data), &ctxt->data[0]);
break;
+ }
case CHELSIO_T4_LOAD_FW:
rc = load_fw(sc, (struct t4_data *)data);
break;
@@ -12391,6 +13308,13 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
case CHELSIO_T4_RELEASE_CLIP_ADDR:
rc = release_clip_addr(sc, (struct t4_clip_addr *)data);
break;
+ case CHELSIO_T4_GET_SGE_CTXT: {
+ struct t4_sge_ctxt *ctxt = (struct t4_sge_ctxt *)data;
+
+ rc = get_sge_context(sc, ctxt->mem_id, ctxt->cid,
+ sizeof(ctxt->data), &ctxt->data[0]);
+ break;
+ }
default:
rc = ENOTTY;
}
@@ -12399,7 +13323,7 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
}
#ifdef TCP_OFFLOAD
-static int
+int
toe_capability(struct vi_info *vi, bool enable)
{
int rc;
@@ -12410,7 +13334,7 @@ toe_capability(struct vi_info *vi, bool enable)
if (!is_offload(sc))
return (ENODEV);
- if (hw_off_limits(sc))
+ if (!hw_all_ok(sc))
return (ENXIO);
if (enable) {
@@ -12465,6 +13389,7 @@ toe_capability(struct vi_info *vi, bool enable)
if (isset(&sc->offload_map, pi->port_id)) {
/* TOE is enabled on another VI of this port. */
+ MPASS(pi->uld_vis > 0);
pi->uld_vis++;
return (0);
}
@@ -12490,17 +13415,17 @@ toe_capability(struct vi_info *vi, bool enable)
if (!uld_active(sc, ULD_ISCSI))
(void) t4_activate_uld(sc, ULD_ISCSI);
- pi->uld_vis++;
- setbit(&sc->offload_map, pi->port_id);
+ if (pi->uld_vis++ == 0)
+ setbit(&sc->offload_map, pi->port_id);
} else {
- pi->uld_vis--;
-
- if (!isset(&sc->offload_map, pi->port_id) || pi->uld_vis > 0)
+ if ((if_getcapenable(vi->ifp) & IFCAP_TOE) == 0) {
+ /* TOE is already disabled. */
return (0);
-
- KASSERT(uld_active(sc, ULD_TOM),
- ("%s: TOM never initialized?", __func__));
- clrbit(&sc->offload_map, pi->port_id);
+ }
+ MPASS(isset(&sc->offload_map, pi->port_id));
+ MPASS(pi->uld_vis > 0);
+ if (--pi->uld_vis == 0)
+ clrbit(&sc->offload_map, pi->port_id);
}
return (0);
@@ -12510,82 +13435,61 @@ toe_capability(struct vi_info *vi, bool enable)
* Add an upper layer driver to the global list.
*/
int
-t4_register_uld(struct uld_info *ui)
+t4_register_uld(struct uld_info *ui, int id)
{
- int rc = 0;
- struct uld_info *u;
+ int rc;
+ if (id < 0 || id > ULD_MAX)
+ return (EINVAL);
sx_xlock(&t4_uld_list_lock);
- SLIST_FOREACH(u, &t4_uld_list, link) {
- if (u->uld_id == ui->uld_id) {
- rc = EEXIST;
- goto done;
- }
+ if (t4_uld_list[id] != NULL)
+ rc = EEXIST;
+ else {
+ t4_uld_list[id] = ui;
+ rc = 0;
}
-
- SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
- ui->refcount = 0;
-done:
sx_xunlock(&t4_uld_list_lock);
return (rc);
}
int
-t4_unregister_uld(struct uld_info *ui)
+t4_unregister_uld(struct uld_info *ui, int id)
{
- int rc = EINVAL;
- struct uld_info *u;
+ if (id < 0 || id > ULD_MAX)
+ return (EINVAL);
sx_xlock(&t4_uld_list_lock);
-
- SLIST_FOREACH(u, &t4_uld_list, link) {
- if (u == ui) {
- if (ui->refcount > 0) {
- rc = EBUSY;
- goto done;
- }
-
- SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
- rc = 0;
- goto done;
- }
- }
-done:
+ MPASS(t4_uld_list[id] == ui);
+ t4_uld_list[id] = NULL;
sx_xunlock(&t4_uld_list_lock);
- return (rc);
+ return (0);
}
int
t4_activate_uld(struct adapter *sc, int id)
{
int rc;
- struct uld_info *ui;
ASSERT_SYNCHRONIZED_OP(sc);
if (id < 0 || id > ULD_MAX)
return (EINVAL);
- rc = EAGAIN; /* kldoad the module with this ULD and try again. */
-
- sx_slock(&t4_uld_list_lock);
-
- SLIST_FOREACH(ui, &t4_uld_list, link) {
- if (ui->uld_id == id) {
- if (!(sc->flags & FULL_INIT_DONE)) {
- rc = adapter_init(sc);
- if (rc != 0)
- break;
- }
- rc = ui->activate(sc);
- if (rc == 0) {
- setbit(&sc->active_ulds, id);
- ui->refcount++;
- }
- break;
- }
+ /* Adapter needs to be initialized before any ULD can be activated. */
+ if (!(sc->flags & FULL_INIT_DONE)) {
+ rc = adapter_init(sc);
+ if (rc != 0)
+ return (rc);
}
+ sx_slock(&t4_uld_list_lock);
+ if (t4_uld_list[id] == NULL)
+ rc = EAGAIN; /* load the KLD with this ULD and try again. */
+ else {
+ rc = t4_uld_list[id]->uld_activate(sc);
+ if (rc == 0)
+ setbit(&sc->active_ulds, id);
+ }
sx_sunlock(&t4_uld_list_lock);
return (rc);
@@ -12595,54 +13499,42 @@ int
t4_deactivate_uld(struct adapter *sc, int id)
{
int rc;
- struct uld_info *ui;
ASSERT_SYNCHRONIZED_OP(sc);
if (id < 0 || id > ULD_MAX)
return (EINVAL);
- rc = ENXIO;
sx_slock(&t4_uld_list_lock);
-
- SLIST_FOREACH(ui, &t4_uld_list, link) {
- if (ui->uld_id == id) {
- rc = ui->deactivate(sc);
- if (rc == 0) {
- clrbit(&sc->active_ulds, id);
- ui->refcount--;
- }
- break;
- }
+ if (t4_uld_list[id] == NULL)
+ rc = ENXIO;
+ else {
+ rc = t4_uld_list[id]->uld_deactivate(sc);
+ if (rc == 0)
+ clrbit(&sc->active_ulds, id);
}
-
sx_sunlock(&t4_uld_list_lock);
return (rc);
}
static int
-t4_deactivate_all_uld(struct adapter *sc)
+deactivate_all_uld(struct adapter *sc)
{
- int rc;
- struct uld_info *ui;
+ int i, rc;
rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4detuld");
if (rc != 0)
return (ENXIO);
-
sx_slock(&t4_uld_list_lock);
-
- SLIST_FOREACH(ui, &t4_uld_list, link) {
- if (isset(&sc->active_ulds, ui->uld_id)) {
- rc = ui->deactivate(sc);
- if (rc != 0)
- break;
- clrbit(&sc->active_ulds, ui->uld_id);
- ui->refcount--;
- }
+ for (i = 0; i <= ULD_MAX; i++) {
+ if (t4_uld_list[i] == NULL || !uld_active(sc, i))
+ continue;
+ rc = t4_uld_list[i]->uld_deactivate(sc);
+ if (rc != 0)
+ break;
+ clrbit(&sc->active_ulds, i);
}
-
sx_sunlock(&t4_uld_list_lock);
end_synchronized_op(sc, 0);
@@ -12650,30 +13542,48 @@ t4_deactivate_all_uld(struct adapter *sc)
}
static void
-t4_async_event(struct adapter *sc)
+stop_all_uld(struct adapter *sc)
{
- struct uld_info *ui;
+ int i;
- if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4async") != 0)
+ if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4uldst") != 0)
return;
sx_slock(&t4_uld_list_lock);
- SLIST_FOREACH(ui, &t4_uld_list, link) {
- if (ui->uld_id == ULD_IWARP) {
- ui->async_event(sc);
- break;
- }
+ for (i = 0; i <= ULD_MAX; i++) {
+ if (t4_uld_list[i] == NULL || !uld_active(sc, i) ||
+ t4_uld_list[i]->uld_stop == NULL)
+ continue;
+ (void) t4_uld_list[i]->uld_stop(sc);
+ }
+ sx_sunlock(&t4_uld_list_lock);
+ end_synchronized_op(sc, 0);
+}
+
+static void
+restart_all_uld(struct adapter *sc)
+{
+ int i;
+
+ if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4uldre") != 0)
+ return;
+ sx_slock(&t4_uld_list_lock);
+ for (i = 0; i <= ULD_MAX; i++) {
+ if (t4_uld_list[i] == NULL || !uld_active(sc, i) ||
+ t4_uld_list[i]->uld_restart == NULL)
+ continue;
+ (void) t4_uld_list[i]->uld_restart(sc);
}
sx_sunlock(&t4_uld_list_lock);
end_synchronized_op(sc, 0);
}
int
-uld_active(struct adapter *sc, int uld_id)
+uld_active(struct adapter *sc, int id)
{
- MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
+ MPASS(id >= 0 && id <= ULD_MAX);
- return (isset(&sc->active_ulds, uld_id));
+ return (isset(&sc->active_ulds, id));
}
#endif
@@ -12687,7 +13597,7 @@ ktls_capability(struct adapter *sc, bool enable)
return (ENODEV);
if (!is_t6(sc))
return (0);
- if (hw_off_limits(sc))
+ if (!hw_all_ok(sc))
return (ENXIO);
if (enable) {
@@ -12847,30 +13757,30 @@ tweak_tunables(void)
#ifdef DDB
static void
-t4_dump_tcb(struct adapter *sc, int tid)
+t4_dump_mem(struct adapter *sc, u_int addr, u_int len)
{
- uint32_t base, i, j, off, pf, reg, save, tcb_addr, win_pos;
+ uint32_t base, j, off, pf, reg, save, win_pos;
- reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
+ reg = chip_id(sc) > CHELSIO_T6 ?
+ PCIE_MEM_ACCESS_T7_REG(A_PCIE_MEM_ACCESS_OFFSET0, 2) :
+ PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, 2);
save = t4_read_reg(sc, reg);
base = sc->memwin[2].mw_base;
- /* Dump TCB for the tid */
- tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
- tcb_addr += tid * TCB_SIZE;
-
if (is_t4(sc)) {
pf = 0;
- win_pos = tcb_addr & ~0xf; /* start must be 16B aligned */
+ win_pos = addr & ~0xf; /* start must be 16B aligned */
} else {
pf = V_PFNUM(sc->pf);
- win_pos = tcb_addr & ~0x7f; /* start must be 128B aligned */
+ win_pos = addr & ~0x7f; /* start must be 128B aligned */
}
+ off = addr - win_pos;
+ if (chip_id(sc) > CHELSIO_T6)
+ win_pos >>= X_T7_MEMOFST_SHIFT;
t4_write_reg(sc, reg, win_pos | pf);
t4_read_reg(sc, reg);
- off = tcb_addr - win_pos;
- for (i = 0; i < 4; i++) {
+ while (len > 0 && !db_pager_quit) {
uint32_t buf[8];
for (j = 0; j < 8; j++, off += 4)
buf[j] = htonl(t4_read_reg(sc, base + off));
@@ -12878,6 +13788,10 @@ t4_dump_tcb(struct adapter *sc, int tid)
db_printf("%08x %08x %08x %08x %08x %08x %08x %08x\n",
buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6],
buf[7]);
+ if (len <= sizeof(buf))
+ len = 0;
+ else
+ len -= sizeof(buf);
}
t4_write_reg(sc, reg, save);
@@ -12885,6 +13799,17 @@ t4_dump_tcb(struct adapter *sc, int tid)
}
static void
+t4_dump_tcb(struct adapter *sc, int tid)
+{
+ uint32_t tcb_addr;
+
+ /* Dump TCB for the tid */
+ tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE);
+ tcb_addr += tid * TCB_SIZE;
+ t4_dump_mem(sc, tcb_addr, TCB_SIZE);
+}
+
+static void
t4_dump_devlog(struct adapter *sc)
{
struct devlog_params *dparams = &sc->params.devlog;
@@ -13015,6 +13940,51 @@ DB_TABLE_COMMAND_FLAGS(show_t4, tcb, db_show_t4tcb, CS_OWN)
t4_dump_tcb(device_get_softc(dev), tid);
}
+
+DB_TABLE_COMMAND_FLAGS(show_t4, memdump, db_show_memdump, CS_OWN)
+{
+ device_t dev;
+ int radix, t;
+ bool valid;
+
+ valid = false;
+ radix = db_radix;
+ db_radix = 10;
+ t = db_read_token();
+ if (t == tIDENT) {
+ dev = device_lookup_by_name(db_tok_string);
+ t = db_read_token();
+ if (t == tNUMBER) {
+ addr = db_tok_number;
+ t = db_read_token();
+ if (t == tNUMBER) {
+ count = db_tok_number;
+ valid = true;
+ }
+ }
+ }
+ db_radix = radix;
+ db_skip_to_eol();
+ if (!valid) {
+ db_printf("usage: show t4 memdump <nexus> <addr> <len>\n");
+ return;
+ }
+
+ if (dev == NULL) {
+ db_printf("device not found\n");
+ return;
+ }
+ if (addr < 0) {
+ db_printf("invalid address\n");
+ return;
+ }
+ if (count <= 0) {
+ db_printf("invalid length\n");
+ return;
+ }
+
+ t4_dump_mem(device_get_softc(dev), addr, count);
+}
#endif
static eventhandler_tag vxlan_start_evtag;
@@ -13165,13 +14135,13 @@ mod_event(module_t mod, int cmd, void *arg)
callout_init(&fatal_callout, 1);
#ifdef TCP_OFFLOAD
sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
- SLIST_INIT(&t4_uld_list);
#endif
#ifdef INET6
t4_clip_modload();
#endif
#ifdef KERN_TLS
t6_ktls_modload();
+ t7_ktls_modload();
#endif
t4_tracer_modload();
tweak_tunables();
@@ -13194,9 +14164,20 @@ mod_event(module_t mod, int cmd, void *arg)
case MOD_UNLOAD:
sx_xlock(&mlu);
if (--loaded == 0) {
+#ifdef TCP_OFFLOAD
+ int i;
+#endif
int tries;
taskqueue_free(reset_tq);
+
+ tries = 0;
+ while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
+ uprintf("%ju clusters with custom free routine "
+ "still is use.\n", t4_sge_extfree_refs());
+ pause("t4unload", 2 * hz);
+ }
+
sx_slock(&t4_list_lock);
if (!SLIST_EMPTY(&t4_list)) {
rc = EBUSY;
@@ -13205,20 +14186,14 @@ mod_event(module_t mod, int cmd, void *arg)
}
#ifdef TCP_OFFLOAD
sx_slock(&t4_uld_list_lock);
- if (!SLIST_EMPTY(&t4_uld_list)) {
- rc = EBUSY;
- sx_sunlock(&t4_uld_list_lock);
- sx_sunlock(&t4_list_lock);
- goto done_unload;
- }
-#endif
- tries = 0;
- while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
- uprintf("%ju clusters with custom free routine "
- "still is use.\n", t4_sge_extfree_refs());
- pause("t4unload", 2 * hz);
+ for (i = 0; i <= ULD_MAX; i++) {
+ if (t4_uld_list[i] != NULL) {
+ rc = EBUSY;
+ sx_sunlock(&t4_uld_list_lock);
+ sx_sunlock(&t4_list_lock);
+ goto done_unload;
+ }
}
-#ifdef TCP_OFFLOAD
sx_sunlock(&t4_uld_list_lock);
#endif
sx_sunlock(&t4_list_lock);
@@ -13230,6 +14205,7 @@ mod_event(module_t mod, int cmd, void *arg)
vxlan_stop_evtag);
t4_tracer_modunload();
#ifdef KERN_TLS
+ t7_ktls_modunload();
t6_ktls_modunload();
#endif
#ifdef INET6
@@ -13276,6 +14252,14 @@ MODULE_DEPEND(t6nex, firmware, 1, 1, 1);
MODULE_DEPEND(t6nex, netmap, 1, 1, 1);
#endif /* DEV_NETMAP */
+DRIVER_MODULE(chnex, pci, ch_driver, mod_event, 0);
+MODULE_VERSION(chnex, 1);
+MODULE_DEPEND(chnex, crypto, 1, 1, 1);
+MODULE_DEPEND(chnex, firmware, 1, 1, 1);
+#ifdef DEV_NETMAP
+MODULE_DEPEND(chnex, netmap, 1, 1, 1);
+#endif /* DEV_NETMAP */
+
DRIVER_MODULE(cxgbe, t4nex, cxgbe_driver, 0, 0);
MODULE_VERSION(cxgbe, 1);
@@ -13285,6 +14269,9 @@ MODULE_VERSION(cxl, 1);
DRIVER_MODULE(cc, t6nex, cc_driver, 0, 0);
MODULE_VERSION(cc, 1);
+DRIVER_MODULE(che, chnex, che_driver, 0, 0);
+MODULE_VERSION(che, 1);
+
DRIVER_MODULE(vcxgbe, cxgbe, vcxgbe_driver, 0, 0);
MODULE_VERSION(vcxgbe, 1);
@@ -13293,3 +14280,6 @@ MODULE_VERSION(vcxl, 1);
DRIVER_MODULE(vcc, cc, vcc_driver, 0, 0);
MODULE_VERSION(vcc, 1);
+
+DRIVER_MODULE(vche, che, vche_driver, 0, 0);
+MODULE_VERSION(vche, 1);
diff --git a/sys/dev/cxgbe/t4_mp_ring.c b/sys/dev/cxgbe/t4_mp_ring.c
index 531fd356728e..916c363a0c2a 100644
--- a/sys/dev/cxgbe/t4_mp_ring.c
+++ b/sys/dev/cxgbe/t4_mp_ring.c
@@ -305,7 +305,6 @@ failed:
}
void
-
mp_ring_free(struct mp_ring *r)
{
int i;
@@ -472,6 +471,86 @@ mp_ring_enqueue(struct mp_ring *r, void **items, int n, int budget)
return (0);
}
+/*
+ * Enqueue n items but never drain the ring. Can be called
+ * to enqueue new items while draining the ring.
+ *
+ * Returns an errno.
+ */
+int
+mp_ring_enqueue_only(struct mp_ring *r, void **items, int n)
+{
+ union ring_state os, ns;
+ uint16_t pidx_start, pidx_stop;
+ int i;
+
+ MPASS(items != NULL);
+ MPASS(n > 0);
+
+ /*
+ * Reserve room for the new items. Our reservation, if successful, is
+ * from 'pidx_start' to 'pidx_stop'.
+ */
+ os.state = atomic_load_64(&r->state);
+
+ /* Should only be used from the drain callback. */
+ MPASS(os.flags == BUSY || os.flags == TOO_BUSY ||
+ os.flags == TAKING_OVER);
+
+ for (;;) {
+ if (__predict_false(space_available(r, os) < n)) {
+ /* Not enough room in the ring. */
+ counter_u64_add(r->dropped, n);
+ return (ENOBUFS);
+ }
+
+ /* There is room in the ring. */
+
+ ns.state = os.state;
+ ns.pidx_head = increment_idx(r, os.pidx_head, n);
+ critical_enter();
+ if (atomic_fcmpset_64(&r->state, &os.state, ns.state))
+ break;
+ critical_exit();
+ cpu_spinwait();
+ };
+
+ pidx_start = os.pidx_head;
+ pidx_stop = ns.pidx_head;
+
+ /*
+ * Wait for other producers who got in ahead of us to enqueue their
+ * items, one producer at a time. It is our turn when the ring's
+ * pidx_tail reaches the beginning of our reservation (pidx_start).
+ */
+ while (ns.pidx_tail != pidx_start) {
+ cpu_spinwait();
+ ns.state = atomic_load_64(&r->state);
+ }
+
+ /* Now it is our turn to fill up the area we reserved earlier. */
+ i = pidx_start;
+ do {
+ r->items[i] = *items++;
+ if (__predict_false(++i == r->size))
+ i = 0;
+ } while (i != pidx_stop);
+
+ /*
+ * Update the ring's pidx_tail. The release style atomic guarantees
+ * that the items are visible to any thread that sees the updated pidx.
+ */
+ os.state = atomic_load_64(&r->state);
+ do {
+ ns.state = os.state;
+ ns.pidx_tail = pidx_stop;
+ } while (atomic_fcmpset_rel_64(&r->state, &os.state, ns.state) == 0);
+ critical_exit();
+
+ counter_u64_add(r->not_consumer, 1);
+ return (0);
+}
+
void
mp_ring_check_drainage(struct mp_ring *r, int budget)
{
diff --git a/sys/dev/cxgbe/t4_mp_ring.h b/sys/dev/cxgbe/t4_mp_ring.h
index 949174b9056d..07b15906cd43 100644
--- a/sys/dev/cxgbe/t4_mp_ring.h
+++ b/sys/dev/cxgbe/t4_mp_ring.h
@@ -62,6 +62,7 @@ int mp_ring_alloc(struct mp_ring **, int, void *, ring_drain_t,
ring_can_drain_t, struct malloc_type *, struct mtx *, int);
void mp_ring_free(struct mp_ring *);
int mp_ring_enqueue(struct mp_ring *, void **, int, int);
+int mp_ring_enqueue_only(struct mp_ring *, void **, int);
void mp_ring_check_drainage(struct mp_ring *, int);
void mp_ring_reset_stats(struct mp_ring *);
bool mp_ring_is_idle(struct mp_ring *);
diff --git a/sys/dev/cxgbe/t4_netmap.c b/sys/dev/cxgbe/t4_netmap.c
index 0377f65acc3e..0135bec6e2c1 100644
--- a/sys/dev/cxgbe/t4_netmap.c
+++ b/sys/dev/cxgbe/t4_netmap.c
@@ -232,7 +232,7 @@ alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx)
nm_txq->nid = idx;
nm_txq->iqidx = iqidx;
nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
if (sc->params.fw_vers >= FW_VERSION32(1, 24, 11, 0))
nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR));
@@ -276,7 +276,7 @@ free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
static int
alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, cong_map;
__be32 v;
struct adapter *sc = vi->adapter;
struct port_info *pi = vi->pi;
@@ -284,7 +284,6 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
struct netmap_adapter *na = NA(vi->ifp);
struct fw_iq_cmd c;
const int cong_drop = nm_cong_drop;
- const int cong_map = pi->rx_e_chan_map;
MPASS(na != NULL);
MPASS(nm_rxq->iq_desc != NULL);
@@ -314,13 +313,17 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq)
V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
V_FW_IQ_CMD_VIID(vi->viid) |
V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
- c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->hw_port) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(0) |
V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
c.iqsize = htobe16(vi->qsize_rxq);
c.iqaddr = htobe64(nm_rxq->iq_ba);
if (cong_drop != -1) {
+ if (chip_id(sc) >= CHELSIO_T7)
+ cong_map = 1 << pi->hw_port;
+ else
+ cong_map = pi->rx_e_chan_map;
c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN |
V_FW_IQ_CMD_FL0CNGCHMAP(cong_map) | F_FW_IQ_CMD_FL0CONGCIF |
F_FW_IQ_CMD_FL0CONGEN);
@@ -421,15 +424,19 @@ alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq *nm_txq)
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
V_FW_EQ_ETH_CMD_VFN(0));
c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
- if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID)
- c.alloc_to_len16 |= htobe32(F_FW_EQ_ETH_CMD_ALLOC);
- else
+ if (nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID) {
+ const int core = sc->params.ncores > 1 ?
+ nm_txq->nid % sc->params.ncores : 0;
+
+ c.alloc_to_len16 |= htobe32(F_FW_EQ_ETH_CMD_ALLOC |
+ V_FW_EQ_ETH_CMD_COREGROUP(core));
+ } else
c.eqid_pkd = htobe32(V_FW_EQ_ETH_CMD_EQID(nm_txq->cntxt_id));
c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
- V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
+ V_FW_EQ_ETH_CMD_PCIECHN(vi->pi->hw_port) | F_FW_EQ_ETH_CMD_FETCHRO |
V_FW_EQ_ETH_CMD_IQID(sc->sge.nm_rxq[nm_txq->iqidx].iq_cntxt_id));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
@@ -1025,29 +1032,28 @@ cxgbe_nm_tx(struct adapter *sc, struct sge_nm_txq *nm_txq,
nm_txq->pidx = 0;
}
- if (npkt == 0 && npkt_remaining == 0) {
+ if (npkt + npkt_remaining == 0) {
/* All done. */
- if (lazy_tx_credit_flush == 0) {
+ if (lazy_tx_credit_flush == 0 ||
+ NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) {
wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
F_FW_WR_EQUIQ);
nm_txq->equeqidx = nm_txq->pidx;
nm_txq->equiqidx = nm_txq->pidx;
+ } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) {
+ wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
+ nm_txq->equeqidx = nm_txq->pidx;
}
ring_nm_txq_db(sc, nm_txq);
return;
}
-
- if (NMIDXDIFF(nm_txq, equiqidx) >= nm_txq->sidx / 2) {
- wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ |
- F_FW_WR_EQUIQ);
- nm_txq->equeqidx = nm_txq->pidx;
- nm_txq->equiqidx = nm_txq->pidx;
- } else if (NMIDXDIFF(nm_txq, equeqidx) >= 64) {
- wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
- nm_txq->equeqidx = nm_txq->pidx;
- }
- if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC)
+ if (NMIDXDIFF(nm_txq, dbidx) >= 2 * SGE_MAX_WR_NDESC) {
+ if (NMIDXDIFF(nm_txq, equeqidx) >= 64) {
+ wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ);
+ nm_txq->equeqidx = nm_txq->pidx;
+ }
ring_nm_txq_db(sc, nm_txq);
+ }
}
/* Will get called again. */
diff --git a/sys/dev/cxgbe/t4_sched.c b/sys/dev/cxgbe/t4_sched.c
index 46638a086a0d..65c2720d692c 100644
--- a/sys/dev/cxgbe/t4_sched.c
+++ b/sys/dev/cxgbe/t4_sched.c
@@ -272,7 +272,7 @@ update_tx_sched(void *context, int pending)
}
rc = -t4_sched_params(sc, FW_SCHED_TYPE_PKTSCHED,
FW_SCHED_PARAMS_LEVEL_CL_RL, tc->mode, tc->rateunit,
- tc->ratemode, pi->tx_chan, j, 0, tc->maxrate, 0,
+ tc->ratemode, pi->hw_port, j, 0, tc->maxrate, 0,
tc->pktsize, tc->burstsize, 1);
end_synchronized_op(sc, 0);
@@ -291,7 +291,7 @@ update_tx_sched(void *context, int pending)
"params: mode %d, rateunit %d, ratemode %d, "
"channel %d, minrate %d, maxrate %d, pktsize %d, "
"burstsize %d\n", j, rc, tc->mode, tc->rateunit,
- tc->ratemode, pi->tx_chan, 0, tc->maxrate,
+ tc->ratemode, pi->hw_port, 0, tc->maxrate,
tc->pktsize, tc->burstsize);
}
}
@@ -334,7 +334,7 @@ bind_txq_to_traffic_class(struct adapter *sc, struct sge_txq *txq, int idx)
goto done;
}
- tc0 = &sc->port[txq->eq.tx_chan]->sched_params->cl_rl[0];
+ tc0 = &sc->port[txq->eq.port_id]->sched_params->cl_rl[0];
if (idx != -1) {
/*
* Bind to a different class at index idx.
@@ -839,7 +839,7 @@ failed:
cst->tx_total = cst->tx_credits;
cst->plen = 0;
cst->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
/*
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index e1705ae063e2..2f9cb1a4ebb5 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2011 Chelsio Communications, Inc.
- * All rights reserved.
+ * Copyright (c) 2011, 2025 Chelsio Communications.
* Written by: Navdeep Parhar <np@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
@@ -259,17 +258,20 @@ static void free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *);
static void add_ofld_rxq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *,
struct sge_ofld_rxq *);
#endif
-static int ctrl_eq_alloc(struct adapter *, struct sge_eq *);
-static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
+static int ctrl_eq_alloc(struct adapter *, struct sge_eq *, int);
+static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *,
+ int);
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
-static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *);
+static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *,
+ int);
#endif
static int alloc_eq(struct adapter *, struct sge_eq *, struct sysctl_ctx_list *,
struct sysctl_oid *);
static void free_eq(struct adapter *, struct sge_eq *);
static void add_eq_sysctls(struct adapter *, struct sysctl_ctx_list *,
struct sysctl_oid *, struct sge_eq *);
-static int alloc_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *);
+static int alloc_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *,
+ int);
static int free_eq_hwq(struct adapter *, struct vi_info *, struct sge_eq *);
static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *,
struct sysctl_ctx_list *, struct sysctl_oid *);
@@ -348,6 +350,7 @@ cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES];
cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES];
cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES];
cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES];
+cpl_handler_t fw6_pld_handlers[NUM_CPL_FW6_COOKIES];
void
t4_register_an_handler(an_handler_t h)
@@ -477,6 +480,21 @@ fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
return (fw4_ack_handlers[cookie](iq, rss, m));
}
+static int
+fw6_pld_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
+{
+ const struct cpl_fw6_pld *cpl;
+ uint64_t cookie;
+
+ if (m != NULL)
+ cpl = mtod(m, const void *);
+ else
+ cpl = (const void *)(rss + 1);
+ cookie = be64toh(cpl->data[1]) & CPL_FW6_COOKIE_MASK;
+
+ return (fw6_pld_handlers[cookie](iq, rss, m));
+}
+
static void
t4_init_shared_cpl_handlers(void)
{
@@ -486,6 +504,7 @@ t4_init_shared_cpl_handlers(void)
t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler);
t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler);
t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler);
+ t4_register_cpl_handler(CPL_FW6_PLD, fw6_pld_handler);
}
void
@@ -494,8 +513,12 @@ t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie)
uintptr_t *loc;
MPASS(opcode < nitems(t4_cpl_handler));
- MPASS(cookie > CPL_COOKIE_RESERVED);
- MPASS(cookie < NUM_CPL_COOKIES);
+ if (opcode == CPL_FW6_PLD) {
+ MPASS(cookie < NUM_CPL_FW6_COOKIES);
+ } else {
+ MPASS(cookie > CPL_COOKIE_RESERVED);
+ MPASS(cookie < NUM_CPL_COOKIES);
+ }
MPASS(t4_cpl_handler[opcode] != NULL);
switch (opcode) {
@@ -514,6 +537,9 @@ t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie)
case CPL_FW4_ACK:
loc = (uintptr_t *)&fw4_ack_handlers[cookie];
break;
+ case CPL_FW6_PLD:
+ loc = (uintptr_t *)&fw6_pld_handlers[cookie];
+ break;
default:
MPASS(0);
return;
@@ -570,7 +596,9 @@ t4_sge_modload(void)
}
if (largest_rx_cluster != MCLBYTES &&
+#if MJUMPAGESIZE != MCLBYTES
largest_rx_cluster != MJUMPAGESIZE &&
+#endif
largest_rx_cluster != MJUM9BYTES &&
largest_rx_cluster != MJUM16BYTES) {
printf("Invalid hw.cxgbe.largest_rx_cluster value (%d),"
@@ -579,7 +607,9 @@ t4_sge_modload(void)
}
if (safest_rx_cluster != MCLBYTES &&
+#if MJUMPAGESIZE != MCLBYTES
safest_rx_cluster != MJUMPAGESIZE &&
+#endif
safest_rx_cluster != MJUM9BYTES &&
safest_rx_cluster != MJUM16BYTES) {
printf("Invalid hw.cxgbe.safest_rx_cluster value (%d),"
@@ -718,7 +748,9 @@ t4_tweak_chip_settings(struct adapter *sc)
uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE);
static int sw_buf_sizes[] = {
MCLBYTES,
+#if MJUMPAGESIZE != MCLBYTES
MJUMPAGESIZE,
+#endif
MJUM9BYTES,
MJUM16BYTES
};
@@ -855,7 +887,9 @@ t4_init_rx_buf_info(struct adapter *sc)
int i, j, n;
static int sw_buf_sizes[] = { /* Sorted by size */
MCLBYTES,
+#if MJUMPAGESIZE != MCLBYTES
MJUMPAGESIZE,
+#endif
MJUM9BYTES,
MJUM16BYTES
};
@@ -1056,9 +1090,9 @@ t4_setup_adapter_queues(struct adapter *sc)
*/
/*
- * Control queues, one per port.
+ * Control queues. At least one per port and per internal core.
*/
- for_each_port(sc, i) {
+ for (i = 0; i < sc->sge.nctrlq; i++) {
rc = alloc_ctrlq(sc, i);
if (rc != 0)
return (rc);
@@ -1079,7 +1113,7 @@ t4_teardown_adapter_queues(struct adapter *sc)
if (sc->sge.ctrlq != NULL) {
MPASS(!(sc->flags & IS_VF)); /* VFs don't allocate ctrlq. */
- for_each_port(sc, i)
+ for (i = 0; i < sc->sge.nctrlq; i++)
free_ctrlq(sc, i);
}
free_fwq(sc);
@@ -2086,9 +2120,17 @@ have_mbuf:
}
if (cpl->vlan_ex) {
- m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
- m0->m_flags |= M_VLANTAG;
- rxq->vlan_extraction++;
+ if (sc->flags & IS_VF && sc->vlan_id) {
+ /*
+ * HW is not setup correctly if extracted vlan_id does
+ * not match the VF's setting.
+ */
+ MPASS(be16toh(cpl->vlan) == sc->vlan_id);
+ } else {
+ m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan);
+ m0->m_flags |= M_VLANTAG;
+ rxq->vlan_extraction++;
+ }
}
if (rxq->iq.flags & IQ_RX_TIMESTAMP) {
@@ -2685,9 +2727,14 @@ restart:
#endif
#ifdef KERN_TLS
if (mst != NULL && mst->sw->type == IF_SND_TAG_TYPE_TLS) {
+ struct vi_info *vi = if_getsoftc(mst->ifp);
+
cflags |= MC_TLS;
set_mbuf_cflags(m0, cflags);
- rc = t6_ktls_parse_pkt(m0);
+ if (is_t6(vi->pi->adapter))
+ rc = t6_ktls_parse_pkt(m0);
+ else
+ rc = t7_ktls_parse_pkt(m0);
if (rc != 0)
goto fail;
return (EINPROGRESS);
@@ -2913,6 +2960,10 @@ start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie)
MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC);
EQ_LOCK(eq);
+ if (__predict_false((eq->flags & EQ_HW_ALLOCATED) == 0)) {
+ EQ_UNLOCK(eq);
+ return (NULL);
+ }
if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list))
drain_wrq_wr_list(sc, wrq);
@@ -3008,7 +3059,10 @@ commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie)
F_FW_WR_EQUEQ);
}
- ring_eq_db(wrq->adapter, eq, ndesc);
+ if (__predict_true(eq->flags & EQ_HW_ALLOCATED))
+ ring_eq_db(wrq->adapter, eq, ndesc);
+ else
+ IDXINCR(eq->dbidx, ndesc, eq->sidx);
} else {
MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc);
next->pidx = pidx;
@@ -3250,7 +3304,10 @@ skip_coalescing:
#ifdef KERN_TLS
} else if (mbuf_cflags(m0) & MC_TLS) {
ETHER_BPF_MTAP(ifp, m0);
- n = t6_ktls_write_wr(txq, wr, m0, avail);
+ if (is_t6(sc))
+ n = t6_ktls_write_wr(txq, wr, m0, avail);
+ else
+ n = t7_ktls_write_wr(txq, wr, m0, avail);
#endif
} else {
ETHER_BPF_MTAP(ifp, m0);
@@ -3391,6 +3448,7 @@ init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize,
eq->type = eqtype;
eq->port_id = port_id;
eq->tx_chan = sc->port[port_id]->tx_chan;
+ eq->hw_port = sc->port[port_id]->hw_port;
eq->iq = iq;
eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE;
strlcpy(eq->lockname, name, sizeof(eq->lockname));
@@ -3554,7 +3612,7 @@ alloc_iq_fl_hwq(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
V_FW_IQ_CMD_VIID(vi->viid) |
V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT));
- c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+ c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->hw_port) |
F_FW_IQ_CMD_IQGTSMODE |
V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) |
V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4));
@@ -3562,7 +3620,13 @@ alloc_iq_fl_hwq(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl)
c.iqaddr = htobe64(iq->ba);
c.iqns_to_fl0congen = htobe32(V_FW_IQ_CMD_IQTYPE(iq->qtype));
if (iq->cong_drop != -1) {
- cong_map = iq->qtype == IQ_ETH ? pi->rx_e_chan_map : 0;
+ if (iq->qtype == IQ_ETH) {
+ if (chip_id(sc) >= CHELSIO_T7)
+ cong_map = 1 << pi->hw_port;
+ else
+ cong_map = pi->rx_e_chan_map;
+ } else
+ cong_map = 0;
c.iqns_to_fl0congen |= htobe32(F_FW_IQ_CMD_IQFLINTCONGEN);
}
@@ -3819,7 +3883,7 @@ alloc_ctrlq(struct adapter *sc, int idx)
struct sysctl_oid *oid;
struct sge_wrq *ctrlq = &sc->sge.ctrlq[idx];
- MPASS(idx < sc->params.nports);
+ MPASS(idx < sc->sge.nctrlq);
if (!(ctrlq->eq.flags & EQ_SW_ALLOCATED)) {
MPASS(!(ctrlq->eq.flags & EQ_HW_ALLOCATED));
@@ -3831,8 +3895,8 @@ alloc_ctrlq(struct adapter *sc, int idx)
snprintf(name, sizeof(name), "%s ctrlq%d",
device_get_nameunit(sc->dev), idx);
- init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, idx,
- &sc->sge.fwq, name);
+ init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE,
+ idx % sc->params.nports, &sc->sge.fwq, name);
rc = alloc_wrq(sc, NULL, ctrlq, &sc->ctx, oid);
if (rc != 0) {
CH_ERR(sc, "failed to allocate ctrlq%d: %d\n", idx, rc);
@@ -3844,8 +3908,10 @@ alloc_ctrlq(struct adapter *sc, int idx)
if (!(ctrlq->eq.flags & EQ_HW_ALLOCATED)) {
MPASS(ctrlq->eq.flags & EQ_SW_ALLOCATED);
+ MPASS(ctrlq->nwr_pending == 0);
+ MPASS(ctrlq->ndesc_needed == 0);
- rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq);
+ rc = alloc_eq_hwq(sc, NULL, &ctrlq->eq, idx);
if (rc != 0) {
CH_ERR(sc, "failed to create hw ctrlq%d: %d\n", idx, rc);
return (rc);
@@ -3913,14 +3979,19 @@ t4_sge_set_conm_context(struct adapter *sc, int cntxt_id, int cong_drop,
param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) |
V_FW_PARAMS_PARAM_YZ(cntxt_id);
- val = V_CONMCTXT_CNGTPMODE(cong_mode);
- if (cong_mode == X_CONMCTXT_CNGTPMODE_CHANNEL ||
- cong_mode == X_CONMCTXT_CNGTPMODE_BOTH) {
- for (i = 0, ch_map = 0; i < 4; i++) {
- if (cong_map & (1 << i))
- ch_map |= 1 << (i << cng_ch_bits_log);
+ if (chip_id(sc) >= CHELSIO_T7) {
+ val = V_T7_DMAQ_CONM_CTXT_CNGTPMODE(cong_mode) |
+ V_T7_DMAQ_CONM_CTXT_CH_VEC(cong_map);
+ } else {
+ val = V_CONMCTXT_CNGTPMODE(cong_mode);
+ if (cong_mode == X_CONMCTXT_CNGTPMODE_CHANNEL ||
+ cong_mode == X_CONMCTXT_CNGTPMODE_BOTH) {
+ for (i = 0, ch_map = 0; i < 4; i++) {
+ if (cong_map & (1 << i))
+ ch_map |= 1 << (i << cng_ch_bits_log);
+ }
+ val |= V_CONMCTXT_CNGCHMAP(ch_map);
}
- val |= V_CONMCTXT_CNGCHMAP(ch_map);
}
rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
if (rc != 0) {
@@ -4220,9 +4291,7 @@ qsize_to_fthresh(int qsize)
{
u_int fthresh;
- while (!powerof2(qsize))
- qsize++;
- fthresh = ilog2(qsize);
+ fthresh = qsize == 0 ? 0 : order_base_2(qsize);
if (fthresh > X_CIDXFLUSHTHRESH_128)
fthresh = X_CIDXFLUSHTHRESH_128;
@@ -4230,24 +4299,26 @@ qsize_to_fthresh(int qsize)
}
static int
-ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
+ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq, int idx)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, core;
struct fw_eq_ctrl_cmd c;
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
+ core = sc->params.tid_qid_sel_mask != 0 ? idx % sc->params.ncores : 0;
bzero(&c, sizeof(c));
c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST |
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) |
V_FW_EQ_CTRL_CMD_VFN(0));
c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC |
+ V_FW_EQ_CTRL_CMD_COREGROUP(core) |
F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid));
c.physeqid_pkd = htobe32(0);
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
- V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) |
+ V_FW_EQ_CTRL_CMD_PCIECHN(eq->hw_port) |
F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_CTRL_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
@@ -4259,8 +4330,8 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c);
if (rc != 0) {
- CH_ERR(sc, "failed to create hw ctrlq for tx_chan %d: %d\n",
- eq->tx_chan, rc);
+ CH_ERR(sc, "failed to create hw ctrlq for port %d: %d\n",
+ eq->port_id, rc);
return (rc);
}
@@ -4276,24 +4347,26 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
}
static int
-eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
+eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq, int idx)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, core;
struct fw_eq_eth_cmd c;
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
+ core = sc->params.ncores > 1 ? idx % sc->params.ncores : 0;
bzero(&c, sizeof(c));
c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST |
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) |
V_FW_EQ_ETH_CMD_VFN(0));
c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC |
+ V_FW_EQ_ETH_CMD_COREGROUP(core) |
F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE |
F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid));
c.fetchszm_to_iqid =
htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) |
- V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO |
+ V_FW_EQ_ETH_CMD_PCIECHN(eq->hw_port) | F_FW_EQ_ETH_CMD_FETCHRO |
V_FW_EQ_ETH_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
@@ -4321,23 +4394,44 @@ eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
}
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
+/*
+ * ncores number of uP cores.
+ * nq number of queues for this VI
+ * idx queue index
+ */
+static inline int
+qidx_to_core(int ncores, int nq, int idx)
+{
+ MPASS(nq % ncores == 0);
+ MPASS(idx >= 0 && idx < nq);
+
+ return (idx * ncores / nq);
+}
+
static int
-ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
+ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq,
+ int idx)
{
- int rc, cntxt_id;
+ int rc, cntxt_id, core;
struct fw_eq_ofld_cmd c;
int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE;
+ if (sc->params.tid_qid_sel_mask != 0)
+ core = qidx_to_core(sc->params.ncores, vi->nofldtxq, idx);
+ else
+ core = 0;
+
bzero(&c, sizeof(c));
c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST |
F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) |
V_FW_EQ_OFLD_CMD_VFN(0));
c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC |
+ V_FW_EQ_OFLD_CMD_COREGROUP(core) |
F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
c.fetchszm_to_iqid =
htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) |
- V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) |
+ V_FW_EQ_OFLD_CMD_PCIECHN(eq->hw_port) |
F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid));
c.dcaen_to_eqsize =
htobe32(V_FW_EQ_OFLD_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ?
@@ -4426,7 +4520,7 @@ add_eq_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx,
}
static int
-alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
+alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq, int idx)
{
int rc;
@@ -4441,16 +4535,16 @@ alloc_eq_hwq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
switch (eq->type) {
case EQ_CTRL:
- rc = ctrl_eq_alloc(sc, eq);
+ rc = ctrl_eq_alloc(sc, eq, idx);
break;
case EQ_ETH:
- rc = eth_eq_alloc(sc, vi, eq);
+ rc = eth_eq_alloc(sc, vi, eq, idx);
break;
#if defined(TCP_OFFLOAD) || defined(RATELIMIT)
case EQ_OFLD:
- rc = ofld_eq_alloc(sc, vi, eq);
+ rc = ofld_eq_alloc(sc, vi, eq, idx);
break;
#endif
@@ -4548,6 +4642,7 @@ free_wrq(struct adapter *sc, struct sge_wrq *wrq)
{
free_eq(sc, &wrq->eq);
MPASS(wrq->nwr_pending == 0);
+ MPASS(wrq->ndesc_needed == 0);
MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs));
MPASS(STAILQ_EMPTY(&wrq->wr_list));
bzero(wrq, sizeof(*wrq));
@@ -4629,7 +4724,7 @@ failed:
if (!(eq->flags & EQ_HW_ALLOCATED)) {
MPASS(eq->flags & EQ_SW_ALLOCATED);
- rc = alloc_eq_hwq(sc, vi, eq);
+ rc = alloc_eq_hwq(sc, vi, eq, idx);
if (rc != 0) {
CH_ERR(vi, "failed to create hw txq%d: %d\n", idx, rc);
return (rc);
@@ -4654,10 +4749,10 @@ failed:
if (vi->flags & TX_USES_VM_WR)
txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan));
+ V_TXPKT_INTF(pi->hw_port));
else
txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) |
- V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) |
+ V_TXPKT_INTF(pi->hw_port) | V_TXPKT_PF(sc->pf) |
V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld));
txq->tc_idx = -1;
@@ -4764,18 +4859,46 @@ add_txq_sysctls(struct vi_info *vi, struct sysctl_ctx_list *ctx,
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_waste",
CTLFLAG_RD, &txq->kern_tls_waste,
"# of octets DMAd but not transmitted in NIC TLS records");
- SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_options",
- CTLFLAG_RD, &txq->kern_tls_options,
- "# of NIC TLS options-only packets transmitted");
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_header",
CTLFLAG_RD, &txq->kern_tls_header,
"# of NIC TLS header-only packets transmitted");
- SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin",
- CTLFLAG_RD, &txq->kern_tls_fin,
- "# of NIC TLS FIN-only packets transmitted");
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_fin_short",
CTLFLAG_RD, &txq->kern_tls_fin_short,
"# of NIC TLS padded FIN packets on short TLS records");
+ if (is_t6(sc)) {
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_options", CTLFLAG_RD,
+ &txq->kern_tls_options,
+ "# of NIC TLS options-only packets transmitted");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_fin", CTLFLAG_RD, &txq->kern_tls_fin,
+ "# of NIC TLS FIN-only packets transmitted");
+ } else {
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_ghash_received", CTLFLAG_RD,
+ &txq->kern_tls_ghash_received,
+ "# of NIC TLS GHASHes received");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_ghash_requested", CTLFLAG_RD,
+ &txq->kern_tls_ghash_requested,
+ "# of NIC TLS GHASHes requested");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_lso", CTLFLAG_RD,
+ &txq->kern_tls_lso,
+ "# of NIC TLS records transmitted using LSO");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_partial_ghash", CTLFLAG_RD,
+ &txq->kern_tls_partial_ghash,
+ "# of NIC TLS records encrypted using a partial GHASH");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_splitmode", CTLFLAG_RD,
+ &txq->kern_tls_splitmode,
+ "# of NIC TLS records using SplitMode");
+ SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO,
+ "kern_tls_trailer", CTLFLAG_RD,
+ &txq->kern_tls_trailer,
+ "# of NIC TLS trailer-only packets transmitted");
+ }
SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "kern_tls_cbc",
CTLFLAG_RD, &txq->kern_tls_cbc,
"# of NIC TLS sessions using AES-CBC");
@@ -4842,7 +4965,10 @@ alloc_ofld_txq(struct vi_info *vi, struct sge_ofld_txq *ofld_txq, int idx)
}
if (!(eq->flags & EQ_HW_ALLOCATED)) {
- rc = alloc_eq_hwq(sc, vi, eq);
+ MPASS(eq->flags & EQ_SW_ALLOCATED);
+ MPASS(ofld_txq->wrq.nwr_pending == 0);
+ MPASS(ofld_txq->wrq.ndesc_needed == 0);
+ rc = alloc_eq_hwq(sc, vi, eq, idx);
if (rc != 0) {
CH_ERR(vi, "failed to create hw ofld_txq%d: %d\n", idx,
rc);
@@ -5391,7 +5517,8 @@ write_tnl_lso_cpl(void *cpl, struct mbuf *m0)
m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen +
m0->m_pkthdr.l5hlen) |
V_CPL_TX_TNL_LSO_TNLTYPE(TX_TNL_TYPE_VXLAN));
- tnl_lso->r1 = 0;
+ tnl_lso->ipsecen_to_rocev2 = 0;
+ tnl_lso->roce_eth = 0;
/* Inner headers. */
ctrl = V_CPL_TX_TNL_LSO_ETHHDRLEN(
@@ -5478,7 +5605,8 @@ write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0)
ctrl1 |= F_TXPKT_VLAN_VLD |
V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag);
txq->vlan_insertion++;
- }
+ } else if (sc->vlan_id)
+ ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(sc->vlan_id);
/* CPL header */
cpl->ctrl0 = txq->cpl_ctrl0;
@@ -5979,7 +6107,8 @@ write_txpkts_vm_wr(struct adapter *sc, struct sge_txq *txq)
ctrl1 |= F_TXPKT_VLAN_VLD |
V_TXPKT_VLAN(m->m_pkthdr.ether_vtag);
txq->vlan_insertion++;
- }
+ } else if (sc->vlan_id)
+ ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(sc->vlan_id);
/* CPL header */
cpl->ctrl0 = txq->cpl_ctrl0;
@@ -6554,10 +6683,11 @@ send_etid_flowc_wr(struct cxgbe_rate_tag *cst, struct port_info *pi,
V_FW_WR_FLOWID(cst->etid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = htobe32(pfvf);
+ /* Firmware expects hw port and will translate to channel itself. */
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
- flowc->mnemval[1].val = htobe32(pi->tx_chan);
+ flowc->mnemval[1].val = htobe32(pi->hw_port);
flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
- flowc->mnemval[2].val = htobe32(pi->tx_chan);
+ flowc->mnemval[2].val = htobe32(pi->hw_port);
flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
flowc->mnemval[3].val = htobe32(cst->iqid);
flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE;
diff --git a/sys/dev/cxgbe/t4_tpt.c b/sys/dev/cxgbe/t4_tpt.c
new file mode 100644
index 000000000000..d18eabb026f1
--- /dev/null
+++ b/sys/dev/cxgbe/t4_tpt.c
@@ -0,0 +1,193 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Chelsio Communications, Inc.
+ * Written by: John Baldwin <jhb@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "common/common.h"
+
+/*
+ * Support routines to manage TPT entries used for both RDMA and NVMe
+ * offloads. This includes allocating STAG indices and managing the
+ * PBL pool.
+ */
+
+#define T4_ULPTX_MIN_IO 32
+#define T4_MAX_INLINE_SIZE 96
+#define T4_ULPTX_MAX_DMA 1024
+
+/* PBL and STAG Memory Managers. */
+
+#define MIN_PBL_SHIFT 5 /* 32B == min PBL size (4 entries) */
+
+uint32_t
+t4_pblpool_alloc(struct adapter *sc, int size)
+{
+ vmem_addr_t addr;
+
+ if (vmem_xalloc(sc->pbl_arena, roundup(size, (1 << MIN_PBL_SHIFT)),
+ 4, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, M_FIRSTFIT | M_NOWAIT,
+ &addr) != 0)
+ return (0);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: addr 0x%lx size %d", __func__, addr, size);
+#endif
+ return (addr);
+}
+
+void
+t4_pblpool_free(struct adapter *sc, uint32_t addr, int size)
+{
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: addr 0x%x size %d", __func__, addr, size);
+#endif
+ vmem_xfree(sc->pbl_arena, addr, roundup(size, (1 << MIN_PBL_SHIFT)));
+}
+
+uint32_t
+t4_stag_alloc(struct adapter *sc, int size)
+{
+ vmem_addr_t stag_idx;
+
+ if (vmem_alloc(sc->stag_arena, size, M_FIRSTFIT | M_NOWAIT,
+ &stag_idx) != 0)
+ return (T4_STAG_UNSET);
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: idx 0x%lx size %d", __func__, stag_idx, size);
+#endif
+ return (stag_idx);
+}
+
+void
+t4_stag_free(struct adapter *sc, uint32_t stag_idx, int size)
+{
+#ifdef VERBOSE_TRACES
+ CTR(KTR_CXGBE, "%s: idx 0x%x size %d", __func__, stag_idx, size);
+#endif
+ vmem_free(sc->stag_arena, stag_idx, size);
+}
+
+void
+t4_init_tpt(struct adapter *sc)
+{
+ if (sc->vres.pbl.size != 0)
+ sc->pbl_arena = vmem_create("PBL_MEM_POOL", sc->vres.pbl.start,
+ sc->vres.pbl.size, 1, 0, M_FIRSTFIT | M_WAITOK);
+ if (sc->vres.stag.size != 0)
+ sc->stag_arena = vmem_create("STAG", 1,
+ sc->vres.stag.size >> 5, 1, 0, M_FIRSTFIT | M_WAITOK);
+}
+
+void
+t4_free_tpt(struct adapter *sc)
+{
+ if (sc->pbl_arena != NULL)
+ vmem_destroy(sc->pbl_arena);
+ if (sc->stag_arena != NULL)
+ vmem_destroy(sc->stag_arena);
+}
+
+/*
+ * TPT support routines. TPT entries are stored in the STAG adapter
+ * memory region and are written to via ULP_TX_MEM_WRITE commands in
+ * FW_ULPTX_WR work requests.
+ */
+
+void
+t4_write_mem_dma_wr(struct adapter *sc, void *wr, int wr_len, int tid,
+ uint32_t addr, uint32_t len, vm_paddr_t data, uint64_t cookie)
+{
+ struct ulp_mem_io *ulpmc;
+ struct ulptx_sgl *sgl;
+
+ MPASS(wr_len == T4_WRITE_MEM_DMA_LEN);
+
+ addr &= 0x7FFFFFF;
+
+ memset(wr, 0, wr_len);
+ ulpmc = wr;
+ INIT_ULPTX_WR(ulpmc, wr_len, 0, tid);
+ if (cookie != 0) {
+ ulpmc->wr.wr_hi |= htobe32(F_FW_WR_COMPL);
+ ulpmc->wr.wr_lo = cookie;
+ }
+ ulpmc->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
+ V_T5_ULP_MEMIO_ORDER(1) |
+ V_T5_ULP_MEMIO_FID(sc->sge.ofld_rxq[0].iq.abs_id));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(len >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(len >> 5));
+ ulpmc->len16 = htobe32((tid << 8) |
+ DIV_ROUND_UP(wr_len - sizeof(ulpmc->wr), 16));
+ ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(addr));
+
+ sgl = (struct ulptx_sgl *)(ulpmc + 1);
+ sgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(1));
+ sgl->len0 = htobe32(len);
+ sgl->addr0 = htobe64(data);
+}
+
+void
+t4_write_mem_inline_wr(struct adapter *sc, void *wr, int wr_len, int tid,
+ uint32_t addr, uint32_t len, void *data, uint64_t cookie)
+{
+ struct ulp_mem_io *ulpmc;
+ struct ulptx_idata *ulpsc;
+
+ MPASS(len > 0 && len <= T4_MAX_INLINE_SIZE);
+ MPASS(wr_len == T4_WRITE_MEM_INLINE_LEN(len));
+
+ addr &= 0x7FFFFFF;
+
+ memset(wr, 0, wr_len);
+ ulpmc = wr;
+ INIT_ULPTX_WR(ulpmc, wr_len, 0, tid);
+
+ if (cookie != 0) {
+ ulpmc->wr.wr_hi |= htobe32(F_FW_WR_COMPL);
+ ulpmc->wr.wr_lo = cookie;
+ }
+
+ ulpmc->cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE) |
+ F_T5_ULP_MEMIO_IMM);
+
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(
+ DIV_ROUND_UP(len, T4_ULPTX_MIN_IO)));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(
+ DIV_ROUND_UP(len, T4_ULPTX_MIN_IO)));
+ ulpmc->len16 = htobe32((tid << 8) |
+ DIV_ROUND_UP(wr_len - sizeof(ulpmc->wr), 16));
+ ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(addr));
+
+ ulpsc = (struct ulptx_idata *)(ulpmc + 1);
+ ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
+ ulpsc->len = htobe32(roundup(len, T4_ULPTX_MIN_IO));
+
+ if (data != NULL)
+ memcpy(ulpsc + 1, data, len);
+}
diff --git a/sys/dev/cxgbe/t4_tracer.c b/sys/dev/cxgbe/t4_tracer.c
index d9b336c4b64a..4f8d28626bc9 100644
--- a/sys/dev/cxgbe/t4_tracer.c
+++ b/sys/dev/cxgbe/t4_tracer.c
@@ -123,9 +123,8 @@ static int
t4_cloner_match(struct if_clone *ifc, const char *name)
{
- if (strncmp(name, "t4nex", 5) != 0 &&
- strncmp(name, "t5nex", 5) != 0 &&
- strncmp(name, "t6nex", 5) != 0)
+ if (strncmp(name, "t4nex", 5) != 0 && strncmp(name, "t5nex", 5) != 0 &&
+ strncmp(name, "t6nex", 5) != 0 && strncmp(name, "chnex", 5) != 0)
return (0);
if (name[5] < '0' || name[5] > '9')
return (0);
@@ -167,11 +166,6 @@ t4_cloner_create(struct if_clone *ifc, char *name, size_t len, caddr_t params)
}
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- rc = ENOMEM;
- goto done;
- }
-
/* Note that if_xname is identical to the nexus nameunit */
if_initname(ifp, name, -1);
if_setdname(ifp, t4_cloner_name);
diff --git a/sys/dev/cxgbe/t4_vf.c b/sys/dev/cxgbe/t4_vf.c
index a4a611ed645c..89dae02e9332 100644
--- a/sys/dev/cxgbe/t4_vf.c
+++ b/sys/dev/cxgbe/t4_vf.c
@@ -105,15 +105,15 @@ struct {
{0x6802, "Chelsio T6225-SO-CR VF"}, /* 2 x 10/25G, nomem */
{0x6803, "Chelsio T6425-CR VF"}, /* 4 x 10/25G */
{0x6804, "Chelsio T6425-SO-CR VF"}, /* 4 x 10/25G, nomem */
- {0x6805, "Chelsio T6225-OCP-SO VF"}, /* 2 x 10/25G, nomem */
- {0x6806, "Chelsio T62100-OCP-SO VF"}, /* 2 x 40/50/100G, nomem */
+ {0x6805, "Chelsio T6225-SO-OCP3 VF"}, /* 2 x 10/25G, nomem */
+ {0x6806, "Chelsio T6225-OCP3 VF"}, /* 2 x 10/25G */
{0x6807, "Chelsio T62100-LP-CR VF"}, /* 2 x 40/50/100G */
{0x6808, "Chelsio T62100-SO-CR VF"}, /* 2 x 40/50/100G, nomem */
{0x6809, "Chelsio T6210-BT VF"}, /* 2 x 10GBASE-T */
{0x680d, "Chelsio T62100-CR VF"}, /* 2 x 40/50/100G */
{0x6810, "Chelsio T6-DBG-100 VF"}, /* 2 x 40/50/100G, debug */
{0x6811, "Chelsio T6225-LL-CR VF"}, /* 2 x 10/25G */
- {0x6814, "Chelsio T61100-OCP-SO VF"}, /* 1 x 40/50/100G, nomem */
+ {0x6814, "Chelsio T62100-SO-OCP3 VF"}, /* 2 x 40/50/100G, nomem */
{0x6815, "Chelsio T6201-BT VF"}, /* 2 x 1000BASE-T */
/* Custom */
@@ -125,6 +125,28 @@ struct {
{0x6885, "Chelsio T6240-SO 85 VF"},
{0x6886, "Chelsio T6225-SO-CR 86 VF"},
{0x6887, "Chelsio T6225-CR 87 VF"},
+}, t7vf_pciids[] = {
+ {0xd800, "Chelsio T7 FPGA VF"}, /* T7 PE12K FPGA */
+ {0x7800, "Chelsio T72200-DBG VF"}, /* 2 x 200G, debug */
+ {0x7801, "Chelsio T7250 VF"}, /* 2 x 10/25/50G, 1 mem */
+ {0x7802, "Chelsio S7250 VF"}, /* 2 x 10/25/50G, nomem */
+ {0x7803, "Chelsio T7450 VF"}, /* 4 x 10/25/50G, 1 mem */
+ {0x7804, "Chelsio S7450 VF"}, /* 4 x 10/25/50G, nomem */
+ {0x7805, "Chelsio T72200 VF"}, /* 2 x 40/100/200G, 1 mem */
+ {0x7806, "Chelsio S72200 VF"}, /* 2 x 40/100/200G, nomem */
+ {0x7807, "Chelsio T72200-FH VF"}, /* 2 x 40/100/200G, 2 mem */
+ {0x7808, "Chelsio T71400 VF"}, /* 1 x 400G, nomem */
+ {0x7809, "Chelsio S7210-BT VF"}, /* 2 x 10GBASE-T, nomem */
+ {0x780a, "Chelsio T7450-RC VF"}, /* 4 x 10/25/50G, 1 mem, RC */
+ {0x780b, "Chelsio T72200-RC VF"}, /* 2 x 40/100/200G, 1 mem, RC */
+ {0x780c, "Chelsio T72200-FH-RC VF"}, /* 2 x 40/100/200G, 2 mem, RC */
+ {0x780d, "Chelsio S72200-OCP3 VF"}, /* 2 x 40/100/200G OCP3 */
+ {0x780e, "Chelsio S7450-OCP3 VF"}, /* 4 x 1/20/25/50G OCP3 */
+ {0x780f, "Chelsio S7410-BT-OCP3 VF"}, /* 4 x 10GBASE-T OCP3 */
+ {0x7810, "Chelsio S7210-BT-A VF"}, /* 2 x 10GBASE-T */
+ {0x7811, "Chelsio T7_MAYRA_7 VF"}, /* Motherboard */
+
+ {0x7880, "Custom T7 VF"},
};
static d_ioctl_t t4vf_ioctl;
@@ -183,6 +205,22 @@ t6vf_probe(device_t dev)
return (ENXIO);
}
+static int
+chvf_probe(device_t dev)
+{
+ uint16_t d;
+ size_t i;
+
+ d = pci_get_device(dev);
+ for (i = 0; i < nitems(t7vf_pciids); i++) {
+ if (d == t7vf_pciids[i].device) {
+ device_set_desc(dev, t7vf_pciids[i].desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+ }
+ return (ENXIO);
+}
+
#define FW_PARAM_DEV(param) \
(V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
@@ -589,6 +627,10 @@ t4vf_attach(device_t dev)
if (rc != 0)
goto done; /* error message displayed already */
+ rc = t4_adj_doorbells(sc);
+ if (rc != 0)
+ goto done; /* error message displayed already */
+
rc = t4_create_dma_tag(sc);
if (rc != 0)
goto done; /* error message displayed already */
@@ -656,6 +698,8 @@ t4vf_attach(device_t dev)
t4_os_set_hw_addr(pi, mac);
pmask &= ~(1 << p);
+ sc->vlan_id = t4vf_get_vf_vlan(sc);
+
/* No t4_link_start. */
snprintf(pi->lockname, sizeof(pi->lockname), "%sp%d",
@@ -667,7 +711,7 @@ t4vf_attach(device_t dev)
ifmedia_init(&pi->media, IFM_IMASK, cxgbe_media_change,
cxgbe_media_status);
- pi->dev = device_add_child(dev, sc->names->vf_ifnet_name, -1);
+ pi->dev = device_add_child(dev, sc->names->vf_ifnet_name, DEVICE_UNIT_ANY);
if (pi->dev == NULL) {
device_printf(dev,
"failed to add device for port %d.\n", i);
@@ -749,12 +793,7 @@ t4vf_attach(device_t dev)
goto done;
}
- rc = bus_generic_attach(dev);
- if (rc != 0) {
- device_printf(dev,
- "failed to attach all child ports: %d\n", rc);
- goto done;
- }
+ bus_attach_children(dev);
device_printf(dev,
"%d ports, %d %s interrupt%s, %d eq, %d iq\n",
@@ -955,6 +994,20 @@ static driver_t t6vf_driver = {
sizeof(struct adapter)
};
+static device_method_t chvf_methods[] = {
+ DEVMETHOD(device_probe, chvf_probe),
+ DEVMETHOD(device_attach, t4vf_attach),
+ DEVMETHOD(device_detach, t4_detach_common),
+
+ DEVMETHOD_END
+};
+
+static driver_t chvf_driver = {
+ "chvf",
+ chvf_methods,
+ sizeof(struct adapter)
+};
+
static driver_t cxgbev_driver = {
"cxgbev",
cxgbe_methods,
@@ -973,6 +1026,12 @@ static driver_t ccv_driver = {
sizeof(struct port_info)
};
+static driver_t chev_driver = {
+ "chev",
+ cxgbe_methods,
+ sizeof(struct port_info)
+};
+
DRIVER_MODULE(t4vf, pci, t4vf_driver, 0, 0);
MODULE_VERSION(t4vf, 1);
MODULE_DEPEND(t4vf, t4nex, 1, 1, 1);
@@ -985,6 +1044,10 @@ DRIVER_MODULE(t6vf, pci, t6vf_driver, 0, 0);
MODULE_VERSION(t6vf, 1);
MODULE_DEPEND(t6vf, t6nex, 1, 1, 1);
+DRIVER_MODULE(chvf, pci, chvf_driver, 0, 0);
+MODULE_VERSION(chvf, 1);
+MODULE_DEPEND(chvf, chnex, 1, 1, 1);
+
DRIVER_MODULE(cxgbev, t4vf, cxgbev_driver, 0, 0);
MODULE_VERSION(cxgbev, 1);
@@ -993,3 +1056,6 @@ MODULE_VERSION(cxlv, 1);
DRIVER_MODULE(ccv, t6vf, ccv_driver, 0, 0);
MODULE_VERSION(ccv, 1);
+
+DRIVER_MODULE(chev, chvf, chev_driver, 0, 0);
+MODULE_VERSION(chev, 1);
diff --git a/sys/dev/cxgbe/tom/t4_connect.c b/sys/dev/cxgbe/tom/t4_connect.c
index 4e81f23dc267..c236ee060bc2 100644
--- a/sys/dev/cxgbe/tom/t4_connect.c
+++ b/sys/dev/cxgbe/tom/t4_connect.c
@@ -89,6 +89,12 @@ do_act_establish(struct sge_iq *iq, const struct rss_header *rss,
INP_WLOCK(inp);
toep->tid = tid;
insert_tid(sc, tid, toep, inp->inp_vflag & INP_IPV6 ? 2 : 1);
+ if (sc->params.tid_qid_sel_mask != 0) {
+ update_tid_qid_sel(toep->vi, &toep->params, tid);
+ toep->ofld_txq = &sc->sge.ofld_txq[toep->params.txq_idx];
+ toep->ctrlq = &sc->sge.ctrlq[toep->params.ctrlq_idx];
+ }
+
if (inp->inp_flags & INP_DROPPED) {
/* socket closed by the kernel before hw told us it connected */
@@ -110,15 +116,23 @@ done:
}
void
-act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status)
+act_open_failure_cleanup(struct adapter *sc, struct toepcb *toep, u_int status)
{
- struct toepcb *toep = lookup_atid(sc, atid);
struct inpcb *inp = toep->inp;
struct toedev *tod = &toep->td->tod;
struct epoch_tracker et;
-
- free_atid(sc, atid);
- toep->tid = -1;
+ struct tom_data *td = sc->tom_softc;
+
+ if (toep->tid >= 0) {
+ free_atid(sc, toep->tid);
+ toep->tid = -1;
+ mtx_lock(&td->toep_list_lock);
+ if (toep->flags & TPF_IN_TOEP_LIST) {
+ toep->flags &= ~TPF_IN_TOEP_LIST;
+ TAILQ_REMOVE(&td->toep_list, toep, link);
+ }
+ mtx_unlock(&td->toep_list_lock);
+ }
CURVNET_SET(toep->vnet);
if (status != EAGAIN)
@@ -158,7 +172,7 @@ do_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
release_tid(sc, GET_TID(cpl), toep->ctrlq);
rc = act_open_rpl_status_to_errno(status);
- act_open_failure_cleanup(sc, atid, rc);
+ act_open_failure_cleanup(sc, toep, rc);
return (0);
}
@@ -197,7 +211,7 @@ static inline int
act_open_cpl_size(struct adapter *sc, int isipv6)
{
int idx;
- static const int sz_table[3][2] = {
+ static const int sz_table[4][2] = {
{
sizeof (struct cpl_act_open_req),
sizeof (struct cpl_act_open_req6)
@@ -210,10 +224,14 @@ act_open_cpl_size(struct adapter *sc, int isipv6)
sizeof (struct cpl_t6_act_open_req),
sizeof (struct cpl_t6_act_open_req6)
},
+ {
+ sizeof (struct cpl_t7_act_open_req),
+ sizeof (struct cpl_t7_act_open_req6)
+ },
};
MPASS(chip_id(sc) >= CHELSIO_T4);
- idx = min(chip_id(sc) - CHELSIO_T4, 2);
+ idx = min(chip_id(sc) - CHELSIO_T4, 3);
return (sz_table[idx][!!isipv6]);
}
@@ -233,6 +251,7 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
struct sockaddr *nam)
{
struct adapter *sc = tod->tod_softc;
+ struct tom_data *td;
struct toepcb *toep = NULL;
struct wrqe *wr = NULL;
if_t rt_ifp = nh->nh_ifp;
@@ -246,6 +265,7 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
struct offload_settings settings;
struct epoch_tracker et;
uint16_t vid = 0xfff, pcp = 0;
+ uint64_t ntuple;
INP_WLOCK_ASSERT(inp);
KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6,
@@ -299,10 +319,12 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
qid_atid = V_TID_QID(toep->ofld_rxq->iq.abs_id) | V_TID_TID(toep->tid) |
V_TID_COOKIE(CPL_COOKIE_TOM);
+ ntuple = select_ntuple(vi, toep->l2te);
if (isipv6) {
struct cpl_act_open_req6 *cpl = wrtod(wr);
struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
+ struct cpl_t7_act_open_req6 *cpl7 = (void *)cpl;
if ((inp->inp_vflag & INP_IPV6) == 0)
DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP);
@@ -314,18 +336,23 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
switch (chip_id(sc)) {
case CHELSIO_T4:
INIT_TP_WR(cpl, 0);
- cpl->params = select_ntuple(vi, toep->l2te);
+ cpl->params = htobe32((uint32_t)ntuple);
break;
case CHELSIO_T5:
INIT_TP_WR(cpl5, 0);
cpl5->iss = htobe32(tp->iss);
- cpl5->params = select_ntuple(vi, toep->l2te);
+ cpl5->params = htobe64(V_FILTER_TUPLE(ntuple));
break;
case CHELSIO_T6:
- default:
INIT_TP_WR(cpl6, 0);
cpl6->iss = htobe32(tp->iss);
- cpl6->params = select_ntuple(vi, toep->l2te);
+ cpl6->params = htobe64(V_FILTER_TUPLE(ntuple));
+ break;
+ case CHELSIO_T7:
+ default:
+ INIT_TP_WR(cpl7, 0);
+ cpl7->iss = htobe32(tp->iss);
+ cpl7->params = htobe64(V_T7_FILTER_TUPLE(ntuple));
break;
}
OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
@@ -347,23 +374,28 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
struct cpl_act_open_req *cpl = wrtod(wr);
struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
+ struct cpl_t7_act_open_req *cpl7 = (void *)cpl;
switch (chip_id(sc)) {
case CHELSIO_T4:
INIT_TP_WR(cpl, 0);
- cpl->params = select_ntuple(vi, toep->l2te);
+ cpl->params = htobe32((uint32_t)ntuple);
break;
case CHELSIO_T5:
INIT_TP_WR(cpl5, 0);
cpl5->iss = htobe32(tp->iss);
- cpl5->params = select_ntuple(vi, toep->l2te);
+ cpl5->params = htobe64(V_FILTER_TUPLE(ntuple));
break;
case CHELSIO_T6:
- default:
INIT_TP_WR(cpl6, 0);
cpl6->iss = htobe32(tp->iss);
- cpl6->params = select_ntuple(vi, toep->l2te);
+ cpl6->params = htobe64(V_FILTER_TUPLE(ntuple));
break;
+ case CHELSIO_T7:
+ default:
+ INIT_TP_WR(cpl7, 0);
+ cpl7->iss = htobe32(tp->iss);
+ cpl7->params = htobe64(V_T7_FILTER_TUPLE(ntuple));
}
OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
qid_atid));
@@ -379,6 +411,12 @@ t4_connect(struct toedev *tod, struct socket *so, struct nhop_object *nh,
}
offload_socket(so, toep);
+ /* Add the TOE PCB to the active list */
+ td = toep->td;
+ mtx_lock(&td->toep_list_lock);
+ TAILQ_INSERT_TAIL(&td->toep_list, toep, link);
+ toep->flags |= TPF_IN_TOEP_LIST;
+ mtx_unlock(&td->toep_list_lock);
NET_EPOCH_ENTER(et);
rc = t4_l2t_send(sc, wr, toep->l2te);
NET_EPOCH_EXIT(et);
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index 842e72bf8b2b..84e31efa8b58 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -127,8 +127,9 @@ send_flowc_wr(struct toepcb *toep, struct tcpcb *tp)
paramidx = 0;
FLOWC_PARAM(PFNVFN, pfvf);
- FLOWC_PARAM(CH, pi->tx_chan);
- FLOWC_PARAM(PORT, pi->tx_chan);
+ /* Firmware expects hw port and will translate to channel itself. */
+ FLOWC_PARAM(CH, pi->hw_port);
+ FLOWC_PARAM(PORT, pi->hw_port);
FLOWC_PARAM(IQID, toep->ofld_rxq->iq.abs_id);
FLOWC_PARAM(SNDBUF, toep->params.sndbuf);
if (tp) {
@@ -148,6 +149,8 @@ send_flowc_wr(struct toepcb *toep, struct tcpcb *tp)
KASSERT(paramidx == nparams, ("nparams mismatch"));
+ KASSERT(howmany(flowclen, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %u too large", __func__, howmany(flowclen, 16)));
txsd->tx_credits = howmany(flowclen, 16);
txsd->plen = 0;
KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
@@ -215,6 +218,8 @@ update_tx_rate_limit(struct adapter *sc, struct toepcb *toep, u_int Bps)
else
flowc->mnemval[0].val = htobe32(tc_idx);
+ KASSERT(flowclen16 <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %u too large", __func__, flowclen16));
txsd->tx_credits = flowclen16;
txsd->plen = 0;
toep->tx_credits -= txsd->tx_credits;
@@ -491,6 +496,9 @@ t4_close_conn(struct adapter *sc, struct toepcb *toep)
#define MIN_TX_CREDITS(iso) \
(MIN_OFLD_TX_CREDITS + ((iso) ? MIN_ISO_TX_CREDITS : 0))
+_Static_assert(MAX_OFLD_TX_CREDITS <= MAX_OFLD_TX_SDESC_CREDITS,
+ "MAX_OFLD_TX_SDESC_CREDITS too small");
+
/* Maximum amount of immediate data we could stuff in a WR */
static inline int
max_imm_payload(int tx_credits, int iso)
@@ -612,6 +620,48 @@ write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n)
__func__, nsegs, start, stop));
}
+bool
+t4_push_raw_wr(struct adapter *sc, struct toepcb *toep, struct mbuf *m)
+{
+#ifdef INVARIANTS
+ struct inpcb *inp = toep->inp;
+#endif
+ struct wrqe *wr;
+ struct ofld_tx_sdesc *txsd;
+ u_int credits, plen;
+
+ INP_WLOCK_ASSERT(inp);
+ MPASS(mbuf_raw_wr(m));
+ plen = m->m_pkthdr.len;
+ credits = howmany(plen, 16);
+ if (credits > toep->tx_credits)
+ return (false);
+
+ wr = alloc_wrqe(roundup2(plen, 16), &toep->ofld_txq->wrq);
+ if (wr == NULL)
+ return (false);
+
+ m_copydata(m, 0, plen, wrtod(wr));
+ m_freem(m);
+
+ toep->tx_credits -= credits;
+ if (toep->tx_credits < MIN_OFLD_TX_CREDITS)
+ toep->flags |= TPF_TX_SUSPENDED;
+
+ KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
+ KASSERT(credits <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %u too large", __func__, credits));
+ txsd = &toep->txsd[toep->txsd_pidx];
+ txsd->plen = 0;
+ txsd->tx_credits = credits;
+ if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
+ toep->txsd_pidx = 0;
+ toep->txsd_avail--;
+
+ t4_wrq_tx(sc, wr);
+ return (true);
+}
+
/*
* Max number of SGL entries an offload tx work request can have. This is 41
* (1 + 40) for a full 512B work request.
@@ -633,7 +683,7 @@ write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n)
* stalls). When drop is set this function MUST drop the bytes and wake up any
* writers.
*/
-void
+static void
t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
{
struct mbuf *sndptr, *m, *sb_sndptr;
@@ -644,6 +694,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
struct tcpcb *tp = intotcpcb(inp);
struct socket *so = inp->inp_socket;
struct sockbuf *sb = &so->so_snd;
+ struct mbufq *pduq = &toep->ulp_pduq;
int tx_credits, shove, compl, sowwakeup;
struct ofld_tx_sdesc *txsd;
bool nomap_mbuf_seen;
@@ -688,6 +739,19 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
max_imm = max_imm_payload(tx_credits, 0);
max_nsegs = max_dsgl_nsegs(tx_credits, 0);
+ if (__predict_false((sndptr = mbufq_first(pduq)) != NULL)) {
+ if (!t4_push_raw_wr(sc, toep, sndptr)) {
+ toep->flags |= TPF_TX_SUSPENDED;
+ return;
+ }
+
+ m = mbufq_dequeue(pduq);
+ MPASS(m == sndptr);
+
+ txsd = &toep->txsd[toep->txsd_pidx];
+ continue;
+ }
+
SOCKBUF_LOCK(sb);
sowwakeup = drop;
if (drop) {
@@ -703,7 +767,9 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
for (m = sndptr; m != NULL; m = m->m_next) {
int n;
- if ((m->m_flags & M_NOTAVAIL) != 0)
+ if ((m->m_flags & M_NOTREADY) != 0)
+ break;
+ if (plen + m->m_len > MAX_OFLD_TX_SDESC_PLEN)
break;
if (m->m_flags & M_EXTPG) {
#ifdef KERN_TLS
@@ -787,7 +853,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
/* nothing to send */
if (plen == 0) {
- KASSERT(m == NULL || (m->m_flags & M_NOTAVAIL) != 0,
+ KASSERT(m == NULL || (m->m_flags & M_NOTREADY) != 0,
("%s: nothing to send, but m != NULL is ready",
__func__));
break;
@@ -870,6 +936,8 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
toep->flags |= TPF_TX_SUSPENDED;
KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
+ KASSERT(plen <= MAX_OFLD_TX_SDESC_PLEN,
+ ("%s: plen %u too large", __func__, plen));
txsd->plen = plen;
txsd->tx_credits = credits;
txsd++;
@@ -880,7 +948,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
toep->txsd_avail--;
t4_l2t_send(sc, wr, toep->l2te);
- } while (m != NULL && (m->m_flags & M_NOTAVAIL) == 0);
+ } while (m != NULL && (m->m_flags & M_NOTREADY) == 0);
/* Send a FIN if requested, but only if there's no more data to send */
if (m == NULL && toep->flags & TPF_SEND_FIN)
@@ -1211,6 +1279,8 @@ t4_push_pdus(struct adapter *sc, struct toepcb *toep, int drop)
toep->flags |= TPF_TX_SUSPENDED;
KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
+ KASSERT(plen <= MAX_OFLD_TX_SDESC_PLEN,
+ ("%s: plen %u too large", __func__, plen));
txsd->plen = plen;
txsd->tx_credits = credits;
txsd++;
@@ -1240,6 +1310,35 @@ t4_push_data(struct adapter *sc, struct toepcb *toep, int drop)
t4_push_frames(sc, toep, drop);
}
+void
+t4_raw_wr_tx(struct adapter *sc, struct toepcb *toep, struct mbuf *m)
+{
+#ifdef INVARIANTS
+ struct inpcb *inp = toep->inp;
+#endif
+
+ INP_WLOCK_ASSERT(inp);
+
+ /*
+ * If there are other raw WRs enqueued, enqueue to preserve
+ * FIFO ordering.
+ */
+ if (!mbufq_empty(&toep->ulp_pduq)) {
+ mbufq_enqueue(&toep->ulp_pduq, m);
+ return;
+ }
+
+ /*
+ * Cannot call t4_push_data here as that will lock so_snd and
+ * some callers of this run in rx handlers with so_rcv locked.
+ * Instead, just try to transmit this WR.
+ */
+ if (!t4_push_raw_wr(sc, toep, m)) {
+ mbufq_enqueue(&toep->ulp_pduq, m);
+ toep->flags |= TPF_TX_SUSPENDED;
+ }
+}
+
int
t4_tod_output(struct toedev *tod, struct tcpcb *tp)
{
@@ -1393,6 +1492,7 @@ do_peer_close(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
case TCPS_FIN_WAIT_2:
restore_so_proto(so, inp->inp_vflag & INP_IPV6);
+ t4_pcb_detach(NULL, tp);
tcp_twstart(tp);
INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */
NET_EPOCH_EXIT(et);
@@ -1454,6 +1554,7 @@ do_close_con_rpl(struct sge_iq *iq, const struct rss_header *rss,
switch (tp->t_state) {
case TCPS_CLOSING: /* see TCPS_FIN_WAIT_2 in do_peer_close too */
restore_so_proto(so, inp->inp_vflag & INP_IPV6);
+ t4_pcb_detach(NULL, tp);
tcp_twstart(tp);
release:
INP_UNLOCK_ASSERT(inp); /* safe, we have a ref on the inp */
@@ -1939,35 +2040,55 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
}
void
-t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, struct toepcb *toep,
+write_set_tcb_field(struct adapter *sc, void *dst, struct toepcb *toep,
uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie)
{
- struct wrqe *wr;
- struct cpl_set_tcb_field *req;
- struct ofld_tx_sdesc *txsd;
+ struct cpl_set_tcb_field *req = dst;
MPASS((cookie & ~M_COOKIE) == 0);
if (reply) {
MPASS(cookie != CPL_COOKIE_RESERVED);
}
- wr = alloc_wrqe(sizeof(*req), wrq);
+ INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid);
+ if (reply == 0) {
+ req->reply_ctrl = htobe16(F_NO_REPLY);
+ } else {
+ const int qid = toep->ofld_rxq->iq.abs_id;
+ if (chip_id(sc) >= CHELSIO_T7) {
+ req->reply_ctrl = htobe16(V_T7_QUEUENO(qid) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ req->reply_ctrl = htobe16(V_QUEUENO(qid) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
+ }
+ req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie));
+ req->mask = htobe64(mask);
+ req->val = htobe64(val);
+}
+
+void
+t4_set_tcb_field(struct adapter *sc, struct sge_wrq *wrq, struct toepcb *toep,
+ uint16_t word, uint64_t mask, uint64_t val, int reply, int cookie)
+{
+ struct wrqe *wr;
+ struct ofld_tx_sdesc *txsd;
+ const u_int len = sizeof(struct cpl_set_tcb_field);
+
+ wr = alloc_wrqe(len, wrq);
if (wr == NULL) {
/* XXX */
panic("%s: allocation failure.", __func__);
}
- req = wrtod(wr);
+ write_set_tcb_field(sc, wrtod(wr), toep, word, mask, val, reply,
+ cookie);
- INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, toep->tid);
- req->reply_ctrl = htobe16(V_QUEUENO(toep->ofld_rxq->iq.abs_id));
- if (reply == 0)
- req->reply_ctrl |= htobe16(F_NO_REPLY);
- req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(cookie));
- req->mask = htobe64(mask);
- req->val = htobe64(val);
if (wrq->eq.type == EQ_OFLD) {
txsd = &toep->txsd[toep->txsd_pidx];
- txsd->tx_credits = howmany(sizeof(*req), 16);
+ _Static_assert(howmany(len, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ "MAX_OFLD_TX_SDESC_CREDITS too small");
+ txsd->tx_credits = howmany(len, 16);
txsd->plen = 0;
KASSERT(toep->tx_credits >= txsd->tx_credits &&
toep->txsd_avail > 0,
@@ -2124,12 +2245,7 @@ alloc_aiotx_mbuf(struct kaiocb *job, int len)
if (npages < 0)
break;
- m = mb_alloc_ext_pgs(M_WAITOK, aiotx_free_pgs);
- if (m == NULL) {
- vm_page_unhold_pages(pgs, npages);
- break;
- }
-
+ m = mb_alloc_ext_pgs(M_WAITOK, aiotx_free_pgs, M_RDONLY);
m->m_epg_1st_off = pgoff;
m->m_epg_npgs = npages;
if (npages == 1) {
diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c
index c1d4af45fd70..35fb1061d867 100644
--- a/sys/dev/cxgbe/tom/t4_ddp.c
+++ b/sys/dev/cxgbe/tom/t4_ddp.c
@@ -181,7 +181,7 @@ static void
free_ddp_rcv_buffer(struct toepcb *toep, struct ddp_rcv_buffer *drb)
{
t4_free_page_pods(&drb->prsv);
- contigfree(drb->buf, drb->len, M_CXGBE);
+ free(drb->buf, M_CXGBE);
free(drb, M_CXGBE);
counter_u64_add(toep->ofld_rxq->ddp_buffer_free, 1);
free_toepcb(toep);
@@ -242,7 +242,7 @@ alloc_ddp_rcv_buffer(struct toepcb *toep, int how)
error = t4_alloc_page_pods_for_rcvbuf(&td->pr, drb);
if (error != 0) {
- contigfree(drb->buf, drb->len, M_CXGBE);
+ free(drb->buf, M_CXGBE);
free(drb, M_CXGBE);
return (NULL);
}
@@ -250,7 +250,7 @@ alloc_ddp_rcv_buffer(struct toepcb *toep, int how)
error = t4_write_page_pods_for_rcvbuf(sc, toep->ctrlq, toep->tid, drb);
if (error != 0) {
t4_free_page_pods(&drb->prsv);
- contigfree(drb->buf, drb->len, M_CXGBE);
+ free(drb->buf, M_CXGBE);
free(drb, M_CXGBE);
return (NULL);
}
@@ -546,37 +546,6 @@ insert_ddp_data(struct toepcb *toep, uint32_t n)
sizeof(struct ulptx_idata) + sizeof(struct cpl_rx_data_ack_core))
static inline void *
-mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep,
- uint64_t word, uint64_t mask, uint64_t val)
-{
- struct ulptx_idata *ulpsc;
- struct cpl_set_tcb_field_core *req;
-
- ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
- ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
-
- ulpsc = (struct ulptx_idata *)(ulpmc + 1);
- ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
- ulpsc->len = htobe32(sizeof(*req));
-
- req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
- OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid));
- req->reply_ctrl = htobe16(V_NO_REPLY(1) |
- V_QUEUENO(toep->ofld_rxq->iq.abs_id));
- req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
- req->mask = htobe64(mask);
- req->val = htobe64(val);
-
- ulpsc = (struct ulptx_idata *)(req + 1);
- if (LEN__SET_TCB_FIELD_ULP % 16) {
- ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
- ulpsc->len = htobe32(0);
- return (ulpsc + 1);
- }
- return (ulpsc);
-}
-
-static inline void *
mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep)
{
struct ulptx_idata *ulpsc;
@@ -634,21 +603,21 @@ mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx,
ulpmc = (struct ulp_txpkt *)(wrh + 1);
/* Write the buffer's tag */
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
W_TCB_RX_DDP_BUF0_TAG + db_idx,
V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG),
V_TCB_RX_DDP_BUF0_TAG(prsv->prsv_tag));
/* Update the current offset in the DDP buffer and its total length */
if (db_idx == 0)
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
W_TCB_RX_DDP_BUF0_OFFSET,
V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) |
V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN),
V_TCB_RX_DDP_BUF0_OFFSET(offset) |
V_TCB_RX_DDP_BUF0_LEN(len));
else
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
W_TCB_RX_DDP_BUF1_OFFSET,
V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) |
V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32),
@@ -656,7 +625,7 @@ mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx,
V_TCB_RX_DDP_BUF1_LEN((u64)len << 32));
/* Update DDP flags */
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_FLAGS,
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_RX_DDP_FLAGS,
ddp_flags_mask, ddp_flags);
/* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */
@@ -1295,26 +1264,25 @@ set_ddp_ulp_mode(struct toepcb *toep)
* Words 26/27 are zero except for the DDP_OFF flag in
* W_TCB_RX_DDP_FLAGS (27).
*/
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 26,
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 26,
0xffffffffffffffff, (uint64_t)V_TF_DDP_OFF(1) << 32);
/* Words 28/29 are zero. */
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 28,
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 28,
0xffffffffffffffff, 0);
/* Words 30/31 are zero. */
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 30,
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 30,
0xffffffffffffffff, 0);
/* Set the ULP mode to ULP_MODE_TCPDDP. */
toep->params.ulp_mode = ULP_MODE_TCPDDP;
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_ULP_TYPE,
- V_TCB_ULP_TYPE(M_TCB_ULP_TYPE),
- V_TCB_ULP_TYPE(ULP_MODE_TCPDDP));
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_TYPE,
+ V_TCB_ULP_TYPE(M_TCB_ULP_TYPE), V_TCB_ULP_TYPE(ULP_MODE_TCPDDP));
#ifdef USE_DDP_RX_FLOW_CONTROL
/* Set TF_RX_FLOW_CONTROL_DDP. */
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_T_FLAGS,
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_FLAGS,
V_TF_RX_FLOW_CONTROL_DDP(1), V_TF_RX_FLOW_CONTROL_DDP(1));
#endif
@@ -1687,7 +1655,10 @@ t4_write_page_pods_for_ps(struct adapter *sc, struct sge_wrq *wrq, int tid,
INIT_ULPTX_WR(ulpmc, len, 0, 0);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
@@ -1817,7 +1788,7 @@ t4_write_page_pods_for_rcvbuf(struct adapter *sc, struct sge_wrq *wrq, int tid,
return (0);
}
-static struct mbuf *
+struct mbuf *
alloc_raw_wr_mbuf(int len)
{
struct mbuf *m;
@@ -1874,7 +1845,10 @@ t4_write_page_pods_for_bio(struct adapter *sc, struct toepcb *toep,
ulpmc = mtod(m, struct ulp_mem_io *);
INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
@@ -1954,7 +1928,10 @@ t4_write_page_pods_for_buf(struct adapter *sc, struct toepcb *toep,
INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
@@ -2045,7 +2022,10 @@ t4_write_page_pods_for_sgl(struct adapter *sc, struct toepcb *toep,
INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
ulpmc->cmd = cmd;
- ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
+ if (chip_id(sc) >= CHELSIO_T7)
+ ulpmc->dlen = htobe32(V_T7_ULP_MEMIO_DATA_LEN(chunk >> 5));
+ else
+ ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk >> 5));
ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
@@ -2685,8 +2665,8 @@ sbcopy:
* which will keep it open and keep the TCP PCB attached until
* after the job is completed.
*/
- wr = mk_update_tcb_for_ddp(sc, toep, db_idx, &ps->prsv, ps->len,
- job->aio_received, ddp_flags, ddp_flags_mask);
+ wr = mk_update_tcb_for_ddp(sc, toep, db_idx, &ps->prsv,
+ job->aio_received, ps->len, ddp_flags, ddp_flags_mask);
if (wr == NULL) {
recycle_pageset(toep, ps);
aio_ddp_requeue_one(toep, job);
@@ -2852,6 +2832,14 @@ t4_aio_queue_ddp(struct socket *so, struct kaiocb *job)
return (EOPNOTSUPP);
}
+ if ((toep->ddp.flags & DDP_AIO) == 0) {
+ toep->ddp.flags |= DDP_AIO;
+ TAILQ_INIT(&toep->ddp.cached_pagesets);
+ TAILQ_INIT(&toep->ddp.aiojobq);
+ TASK_INIT(&toep->ddp.requeue_task, 0, aio_ddp_requeue_task,
+ toep);
+ }
+
/*
* XXX: Think about possibly returning errors for ENOTCONN,
* etc. Perhaps the caller would only queue the request
@@ -2866,14 +2854,6 @@ t4_aio_queue_ddp(struct socket *so, struct kaiocb *job)
TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list);
toep->ddp.waiting_count++;
- if ((toep->ddp.flags & DDP_AIO) == 0) {
- toep->ddp.flags |= DDP_AIO;
- TAILQ_INIT(&toep->ddp.cached_pagesets);
- TAILQ_INIT(&toep->ddp.aiojobq);
- TASK_INIT(&toep->ddp.requeue_task, 0, aio_ddp_requeue_task,
- toep);
- }
-
/*
* Try to handle this request synchronously. If this has
* to block because the task is running, it will just bail
diff --git a/sys/dev/cxgbe/tom/t4_listen.c b/sys/dev/cxgbe/tom/t4_listen.c
index d25c161d3f8d..b879f6883f25 100644
--- a/sys/dev/cxgbe/tom/t4_listen.c
+++ b/sys/dev/cxgbe/tom/t4_listen.c
@@ -72,9 +72,9 @@
#include "tom/t4_tom.h"
/* stid services */
-static int alloc_stid(struct adapter *, struct listen_ctx *, int);
+static int alloc_stid(struct adapter *, bool, void *);
static struct listen_ctx *lookup_stid(struct adapter *, int);
-static void free_stid(struct adapter *, struct listen_ctx *);
+static void free_stid(struct adapter *, int , bool);
/* lctx services */
static struct listen_ctx *alloc_lctx(struct adapter *, struct inpcb *,
@@ -88,75 +88,206 @@ static struct inpcb *release_lctx(struct adapter *, struct listen_ctx *);
static void send_abort_rpl_synqe(struct toedev *, struct synq_entry *, int);
-static int
-alloc_stid(struct adapter *sc, struct listen_ctx *lctx, int isipv6)
+static int create_server6(struct adapter *, struct listen_ctx *);
+static int create_server(struct adapter *, struct listen_ctx *);
+
+int
+alloc_stid_tab(struct adapter *sc)
{
struct tid_info *t = &sc->tids;
- u_int stid, n, f, mask;
- struct stid_region *sr = &lctx->stid_region;
- /*
- * An IPv6 server needs 2 naturally aligned stids (1 stid = 4 cells) in
- * the TCAM. The start of the stid region is properly aligned (the chip
- * requires each region to be 128-cell aligned).
- */
- n = isipv6 ? 2 : 1;
- mask = n - 1;
- KASSERT((t->stid_base & mask) == 0 && (t->nstids & mask) == 0,
- ("%s: stid region (%u, %u) not properly aligned. n = %u",
- __func__, t->stid_base, t->nstids, n));
+ MPASS(t->nstids > 0);
+ MPASS(t->stid_tab == NULL);
+
+ t->stid_tab = malloc(t->nstids * sizeof(*t->stid_tab), M_CXGBE,
+ M_ZERO | M_NOWAIT);
+ if (t->stid_tab == NULL)
+ return (ENOMEM);
+ t->stid_bitmap = bit_alloc(t->nstids, M_CXGBE, M_NOWAIT);
+ if (t->stid_bitmap == NULL) {
+ free(t->stid_tab, M_CXGBE);
+ t->stid_tab = NULL;
+ return (ENOMEM);
+ }
+ mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF);
+ t->stids_in_use = 0;
+
+ return (0);
+}
+
+void
+free_stid_tab(struct adapter *sc)
+{
+ struct tid_info *t = &sc->tids;
+
+ KASSERT(t->stids_in_use == 0,
+ ("%s: %d tids still in use.", __func__, t->stids_in_use));
+
+ if (mtx_initialized(&t->stid_lock))
+ mtx_destroy(&t->stid_lock);
+ free(t->stid_tab, M_CXGBE);
+ t->stid_tab = NULL;
+ free(t->stid_bitmap, M_CXGBE);
+ t->stid_bitmap = NULL;
+}
+
+void
+stop_stid_tab(struct adapter *sc)
+{
+ struct tid_info *t = &sc->tids;
+ struct tom_data *td = sc->tom_softc;
+ struct listen_ctx *lctx;
+ struct synq_entry *synqe;
+ int i, ntids;
+
+ mtx_lock(&t->stid_lock);
+ t->stid_tab_stopped = true;
+ mtx_unlock(&t->stid_lock);
+
+ mtx_lock(&td->lctx_hash_lock);
+ for (i = 0; i <= td->listen_mask; i++) {
+ LIST_FOREACH(lctx, &td->listen_hash[i], link)
+ lctx->flags &= ~(LCTX_RPL_PENDING | LCTX_SETUP_IN_HW);
+ }
+ mtx_unlock(&td->lctx_hash_lock);
+
+ mtx_lock(&td->toep_list_lock);
+ TAILQ_FOREACH(synqe, &td->synqe_list, link) {
+ MPASS(sc->incarnation == synqe->incarnation);
+ MPASS(synqe->tid >= 0);
+ MPASS(synqe == lookup_tid(sc, synqe->tid));
+ /* Remove tid from the lookup table immediately. */
+ CTR(KTR_CXGBE, "%s: tid %d@%d STRANDED, removed from table",
+ __func__, synqe->tid, synqe->incarnation);
+ ntids = synqe->lctx->inp->inp_vflag & INP_IPV6 ? 2 : 1;
+ remove_tid(sc, synqe->tid, ntids);
+#if 0
+ /* synqe->tid is stale now but left alone for debug. */
+ synqe->tid = -1;
+#endif
+ }
+ MPASS(TAILQ_EMPTY(&td->stranded_synqe));
+ TAILQ_CONCAT(&td->stranded_synqe, &td->synqe_list, link);
+ MPASS(TAILQ_EMPTY(&td->synqe_list));
+ mtx_unlock(&td->toep_list_lock);
+}
+
+void
+restart_stid_tab(struct adapter *sc)
+{
+ struct tid_info *t = &sc->tids;
+ struct tom_data *td = sc->tom_softc;
+ struct listen_ctx *lctx;
+ int i;
+
+ mtx_lock(&td->lctx_hash_lock);
+ for (i = 0; i <= td->listen_mask; i++) {
+ LIST_FOREACH(lctx, &td->listen_hash[i], link) {
+ MPASS((lctx->flags & (LCTX_RPL_PENDING | LCTX_SETUP_IN_HW)) == 0);
+ lctx->flags |= LCTX_RPL_PENDING;
+ if (lctx->inp->inp_vflag & INP_IPV6)
+ create_server6(sc, lctx);
+ else
+ create_server(sc, lctx);
+ }
+ }
+ mtx_unlock(&td->lctx_hash_lock);
+
+ mtx_lock(&t->stid_lock);
+ t->stid_tab_stopped = false;
+ mtx_unlock(&t->stid_lock);
+
+}
+
+static int
+alloc_stid(struct adapter *sc, bool isipv6, void *ctx)
+{
+ struct tid_info *t = &sc->tids;
+ const u_int n = isipv6 ? 2 : 1;
+ int stid, pair_stid;
+ u_int i;
+ ssize_t val;
mtx_lock(&t->stid_lock);
- if (n > t->nstids - t->stids_in_use) {
+ MPASS(t->stids_in_use <= t->nstids);
+ if (n > t->nstids - t->stids_in_use || t->stid_tab_stopped) {
mtx_unlock(&t->stid_lock);
return (-1);
}
- if (t->nstids_free_head >= n) {
+ stid = -1;
+ if (isipv6) {
/*
- * This allocation will definitely succeed because the region
- * starts at a good alignment and we just checked we have enough
- * stids free.
+ * An IPv6 server needs 2 naturally aligned stids (1 stid = 4
+ * cells) in the TCAM. We know that the start of the stid
+ * region is properly aligned already (the chip requires each
+ * region to be 128-cell aligned).
*/
- f = t->nstids_free_head & mask;
- t->nstids_free_head -= n + f;
- stid = t->nstids_free_head;
- TAILQ_INSERT_HEAD(&t->stids, sr, link);
+ for (i = 0; i + 1 < t->nstids; i = roundup2(val + 1, 2)) {
+ bit_ffc_area_at(t->stid_bitmap, i, t->nstids, 2, &val);
+ if (val == -1)
+ break;
+ if ((val & 1) == 0) {
+ stid = val;
+ break;
+ }
+ }
} else {
- struct stid_region *s;
-
- stid = t->nstids_free_head;
- TAILQ_FOREACH(s, &t->stids, link) {
- stid += s->used + s->free;
- f = stid & mask;
- if (s->free >= n + f) {
- stid -= n + f;
- s->free -= n + f;
- TAILQ_INSERT_AFTER(&t->stids, s, sr, link);
- goto allocated;
+ /*
+ * An IPv4 server needs one stid without any alignment
+ * requirements. But we try extra hard to find an available
+ * stid adjacent to a used stid so that free "stid-pairs" are
+ * left intact for IPv6.
+ */
+ bit_ffc_at(t->stid_bitmap, 0, t->nstids, &val);
+ while (val != -1) {
+ if (stid == -1) {
+ /*
+ * First usable stid. Look no further if it's
+ * an ideal fit.
+ */
+ stid = val;
+ if (val & 1 || bit_test(t->stid_bitmap, val + 1))
+ break;
+ } else {
+ /*
+ * We have an unused stid already but are now
+ * looking for in-use stids because we'd prefer
+ * to grab an unused stid adjacent to one that's
+ * in use.
+ *
+ * Odd stids pair with the previous stid and
+ * even ones pair with the next stid.
+ */
+ pair_stid = val & 1 ? val - 1 : val + 1;
+ if (bit_test(t->stid_bitmap, pair_stid) == 0) {
+ stid = pair_stid;
+ break;
+ }
}
+ val = roundup2(val + 1, 2);
+ if (val >= t->nstids)
+ break;
+ bit_ffs_at(t->stid_bitmap, val, t->nstids, &val);
}
+ }
- if (__predict_false(stid != t->nstids)) {
- panic("%s: stids TAILQ (%p) corrupt."
- " At %d instead of %d at the end of the queue.",
- __func__, &t->stids, stid, t->nstids);
+ if (stid >= 0) {
+ MPASS(stid + n - 1 < t->nstids);
+ MPASS(bit_ntest(t->stid_bitmap, stid, stid + n - 1, 0));
+ bit_nset(t->stid_bitmap, stid, stid + n - 1);
+ t->stids_in_use += n;
+ t->stid_tab[stid] = ctx;
+#ifdef INVARIANTS
+ if (n == 2) {
+ MPASS((stid & 1) == 0);
+ t->stid_tab[stid + 1] = NULL;
}
-
- mtx_unlock(&t->stid_lock);
- return (-1);
+#endif
+ stid += t->stid_base;
}
-
-allocated:
- sr->used = n;
- sr->free = f;
- t->stids_in_use += n;
- t->stid_tab[stid] = lctx;
mtx_unlock(&t->stid_lock);
-
- KASSERT(((stid + t->stid_base) & mask) == 0,
- ("%s: EDOOFUS.", __func__));
- return (stid + t->stid_base);
+ return (stid);
}
static struct listen_ctx *
@@ -168,25 +299,28 @@ lookup_stid(struct adapter *sc, int stid)
}
static void
-free_stid(struct adapter *sc, struct listen_ctx *lctx)
+free_stid(struct adapter *sc, int stid, bool isipv6)
{
struct tid_info *t = &sc->tids;
- struct stid_region *sr = &lctx->stid_region;
- struct stid_region *s;
-
- KASSERT(sr->used > 0, ("%s: nonsense free (%d)", __func__, sr->used));
+ const u_int n = isipv6 ? 2 : 1;
mtx_lock(&t->stid_lock);
- s = TAILQ_PREV(sr, stid_head, link);
- if (s != NULL)
- s->free += sr->used + sr->free;
- else
- t->nstids_free_head += sr->used + sr->free;
- KASSERT(t->stids_in_use >= sr->used,
- ("%s: stids_in_use (%u) < stids being freed (%u)", __func__,
- t->stids_in_use, sr->used));
- t->stids_in_use -= sr->used;
- TAILQ_REMOVE(&t->stids, sr, link);
+ MPASS(stid >= t->stid_base);
+ stid -= t->stid_base;
+ MPASS(stid + n - 1 < t->nstids);
+ MPASS(t->stids_in_use <= t->nstids);
+ MPASS(t->stids_in_use >= n);
+ MPASS(t->stid_tab[stid] != NULL);
+#ifdef INVARIANTS
+ if (n == 2) {
+ MPASS((stid & 1) == 0);
+ MPASS(t->stid_tab[stid + 1] == NULL);
+ }
+#endif
+ MPASS(bit_ntest(t->stid_bitmap, stid, stid + n - 1, 1));
+ bit_nclear(t->stid_bitmap, stid, stid + n - 1);
+ t->stid_tab[stid] = NULL;
+ t->stids_in_use -= n;
mtx_unlock(&t->stid_lock);
}
@@ -201,13 +335,14 @@ alloc_lctx(struct adapter *sc, struct inpcb *inp, struct vi_info *vi)
if (lctx == NULL)
return (NULL);
- lctx->stid = alloc_stid(sc, lctx, inp->inp_vflag & INP_IPV6);
+ lctx->isipv6 = inp->inp_vflag & INP_IPV6;
+ lctx->stid = alloc_stid(sc, lctx->isipv6, lctx);
if (lctx->stid < 0) {
free(lctx, M_CXGBE);
return (NULL);
}
- if (inp->inp_vflag & INP_IPV6 &&
+ if (lctx->isipv6 &&
!IN6_ARE_ADDR_EQUAL(&in6addr_any, &inp->in6p_laddr)) {
lctx->ce = t4_get_clip_entry(sc, &inp->in6p_laddr, true);
if (lctx->ce == NULL) {
@@ -243,7 +378,7 @@ free_lctx(struct adapter *sc, struct listen_ctx *lctx)
if (lctx->ce)
t4_release_clip_entry(sc, lctx->ce);
- free_stid(sc, lctx);
+ free_stid(sc, lctx->stid, lctx->isipv6);
free(lctx, M_CXGBE);
return (in_pcbrele_wlocked(inp));
@@ -373,10 +508,11 @@ send_flowc_wr_synqe(struct adapter *sc, struct synq_entry *synqe)
V_FW_WR_FLOWID(synqe->tid));
flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
flowc->mnemval[0].val = htobe32(pfvf);
+ /* Firmware expects hw port and will translate to channel itself. */
flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
- flowc->mnemval[1].val = htobe32(pi->tx_chan);
+ flowc->mnemval[1].val = htobe32(pi->hw_port);
flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
- flowc->mnemval[2].val = htobe32(pi->tx_chan);
+ flowc->mnemval[2].val = htobe32(pi->hw_port);
flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
flowc->mnemval[3].val = htobe32(ofld_rxq->iq.abs_id);
flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDBUF;
@@ -632,12 +768,15 @@ t4_listen_stop(struct toedev *tod, struct tcpcb *tp)
return (EINPROGRESS);
}
- destroy_server(sc, lctx);
+ if (lctx->flags & LCTX_SETUP_IN_HW)
+ destroy_server(sc, lctx);
+ else
+ inp = release_lctx(sc, lctx);
return (0);
}
static inline struct synq_entry *
-alloc_synqe(struct adapter *sc __unused, struct listen_ctx *lctx, int flags)
+alloc_synqe(struct adapter *sc, struct listen_ctx *lctx, int flags)
{
struct synq_entry *synqe;
@@ -647,6 +786,7 @@ alloc_synqe(struct adapter *sc __unused, struct listen_ctx *lctx, int flags)
synqe = malloc(sizeof(*synqe), M_CXGBE, flags);
if (__predict_true(synqe != NULL)) {
synqe->flags = TPF_SYNQE;
+ synqe->incarnation = sc->incarnation;
refcount_init(&synqe->refcnt, 1);
synqe->lctx = lctx;
hold_lctx(lctx); /* Every synqe has a ref on its lctx. */
@@ -761,8 +901,9 @@ do_pass_open_rpl(struct sge_iq *iq, const struct rss_header *rss,
__func__, stid, status, lctx->flags);
lctx->flags &= ~LCTX_RPL_PENDING;
-
- if (status != CPL_ERR_NONE)
+ if (status == CPL_ERR_NONE)
+ lctx->flags |= LCTX_SETUP_IN_HW;
+ else
log(LOG_ERR, "listener (stid %u) failed: %d\n", stid, status);
#ifdef INVARIANTS
@@ -849,16 +990,22 @@ do_close_server_rpl(struct sge_iq *iq, const struct rss_header *rss,
static void
done_with_synqe(struct adapter *sc, struct synq_entry *synqe)
{
+ struct tom_data *td = sc->tom_softc;
struct listen_ctx *lctx = synqe->lctx;
struct inpcb *inp = lctx->inp;
struct l2t_entry *e = &sc->l2t->l2tab[synqe->params.l2t_idx];
int ntids;
INP_WLOCK_ASSERT(inp);
- ntids = inp->inp_vflag & INP_IPV6 ? 2 : 1;
- remove_tid(sc, synqe->tid, ntids);
- release_tid(sc, synqe->tid, lctx->ctrlq);
+ if (synqe->tid != -1) {
+ ntids = inp->inp_vflag & INP_IPV6 ? 2 : 1;
+ remove_tid(sc, synqe->tid, ntids);
+ mtx_lock(&td->toep_list_lock);
+ TAILQ_REMOVE(&td->synqe_list, synqe, link);
+ mtx_unlock(&td->toep_list_lock);
+ release_tid(sc, synqe->tid, lctx->ctrlq);
+ }
t4_l2t_release(e);
inp = release_synqe(sc, synqe);
if (inp)
@@ -866,10 +1013,8 @@ done_with_synqe(struct adapter *sc, struct synq_entry *synqe)
}
void
-synack_failure_cleanup(struct adapter *sc, int tid)
+synack_failure_cleanup(struct adapter *sc, struct synq_entry *synqe)
{
- struct synq_entry *synqe = lookup_tid(sc, tid);
-
INP_WLOCK(synqe->lctx->inp);
done_with_synqe(sc, synqe);
}
@@ -961,6 +1106,7 @@ void
t4_offload_socket(struct toedev *tod, void *arg, struct socket *so)
{
struct adapter *sc = tod->tod_softc;
+ struct tom_data *td = sc->tom_softc;
struct synq_entry *synqe = arg;
struct inpcb *inp = sotoinpcb(so);
struct toepcb *toep = synqe->toep;
@@ -976,6 +1122,12 @@ t4_offload_socket(struct toedev *tod, void *arg, struct socket *so)
toep->flags |= TPF_CPL_PENDING;
update_tid(sc, synqe->tid, toep);
synqe->flags |= TPF_SYNQE_EXPANDED;
+ mtx_lock(&td->toep_list_lock);
+ /* Remove synqe from its list and add the TOE PCB to the active list. */
+ TAILQ_REMOVE(&td->synqe_list, synqe, link);
+ TAILQ_INSERT_TAIL(&td->toep_list, toep, link);
+ toep->flags |= TPF_IN_TOEP_LIST;
+ mtx_unlock(&td->toep_list_lock);
inp->inp_flowtype = (inp->inp_vflag & INP_IPV6) ?
M_HASHTYPE_RSS_TCP_IPV6 : M_HASHTYPE_RSS_TCP_IPV4;
inp->inp_flowid = synqe->rss_hash;
@@ -1177,6 +1329,7 @@ do_pass_accept_req(struct sge_iq *iq, const struct rss_header *rss,
struct mbuf *m)
{
struct adapter *sc = iq->adapter;
+ struct tom_data *td = sc->tom_softc;
struct toedev *tod;
const struct cpl_pass_accept_req *cpl = mtod(m, const void *);
unsigned int stid = G_PASS_OPEN_TID(be32toh(cpl->tos_stid));
@@ -1355,6 +1508,8 @@ found:
init_conn_params(vi, &settings, &inc, so, &cpl->tcpopt, e->idx,
&synqe->params);
+ if (sc->params.tid_qid_sel_mask != 0)
+ update_tid_qid_sel(vi, &synqe->params, tid);
/*
* If all goes well t4_syncache_respond will get called during
@@ -1374,15 +1529,20 @@ found:
synqe->tid = tid;
synqe->syn = m;
m = NULL;
+ mtx_lock(&td->toep_list_lock);
+ TAILQ_INSERT_TAIL(&td->synqe_list, synqe, link);
+ mtx_unlock(&td->toep_list_lock);
if (send_synack(sc, synqe, opt0, opt2, tid) != 0) {
remove_tid(sc, tid, ntids);
m = synqe->syn;
synqe->syn = NULL;
+ mtx_lock(&td->toep_list_lock);
+ TAILQ_REMOVE(&td->synqe_list, synqe, link);
+ mtx_unlock(&td->toep_list_lock);
NET_EPOCH_EXIT(et);
REJECT_PASS_ACCEPT_REQ(true);
}
-
CTR6(KTR_CXGBE,
"%s: stid %u, tid %u, synqe %p, opt0 %#016lx, opt2 %#08x",
__func__, stid, tid, synqe, be64toh(opt0), be32toh(opt2));
@@ -1437,7 +1597,7 @@ synqe_to_protohdrs(struct adapter *sc, struct synq_entry *synqe,
pass_accept_req_to_protohdrs(sc, synqe->syn, inc, th, &iptos);
/* modify parts to make it look like the ACK to our SYN|ACK */
- th->th_flags = TH_ACK;
+ tcp_set_flags(th, TH_ACK);
th->th_ack = synqe->iss + 1;
th->th_seq = be32toh(cpl->rcv_isn);
bzero(to, sizeof(*to));
diff --git a/sys/dev/cxgbe/tom/t4_tls.c b/sys/dev/cxgbe/tom/t4_tls.c
index bdd03edd3a6f..bbcc1c88c3db 100644
--- a/sys/dev/cxgbe/tom/t4_tls.c
+++ b/sys/dev/cxgbe/tom/t4_tls.c
@@ -61,11 +61,21 @@
static void
t4_set_tls_tcb_field(struct toepcb *toep, uint16_t word, uint64_t mask,
- uint64_t val)
+ uint64_t val, int reply, int cookie)
{
struct adapter *sc = td_adapter(toep->td);
+ struct mbuf *m;
+
+ m = alloc_raw_wr_mbuf(sizeof(struct cpl_set_tcb_field));
+ if (m == NULL) {
+ /* XXX */
+ panic("%s: out of memory", __func__);
+ }
+
+ write_set_tcb_field(sc, mtod(m, void *), toep, word, mask, val, reply,
+ cookie);
- t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, word, mask, val, 0, 0);
+ t4_raw_wr_tx(sc, toep, m);
}
/* TLS and DTLS common routines */
@@ -88,10 +98,9 @@ tls_tx_key(struct toepcb *toep)
static void
t4_set_rx_quiesce(struct toepcb *toep)
{
- struct adapter *sc = td_adapter(toep->td);
- t4_set_tcb_field(sc, &toep->ofld_txq->wrq, toep, W_TCB_T_FLAGS,
- V_TF_RX_QUIESCE(1), V_TF_RX_QUIESCE(1), 1, CPL_COOKIE_TOM);
+ t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1),
+ V_TF_RX_QUIESCE(1), 1, CPL_COOKIE_TOM);
}
/* Clear TF_RX_QUIESCE to re-enable receive. */
@@ -99,7 +108,7 @@ static void
t4_clear_rx_quiesce(struct toepcb *toep)
{
- t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0);
+ t4_set_tls_tcb_field(toep, W_TCB_T_FLAGS, V_TF_RX_QUIESCE(1), 0, 0, 0);
}
/* TLS/DTLS content type for CPL SFO */
@@ -145,16 +154,15 @@ get_tp_plen_max(struct ktls_session *tls)
return (tls->params.max_frame_len <= 8192 ? plen : FC_TP_PLEN_MAX);
}
-/* Send request to get the key-id */
+/* Send request to save the key in on-card memory. */
static int
tls_program_key_id(struct toepcb *toep, struct ktls_session *tls,
int direction)
{
struct tls_ofld_info *tls_ofld = &toep->tls;
struct adapter *sc = td_adapter(toep->td);
- struct ofld_tx_sdesc *txsd;
int keyid;
- struct wrqe *wr;
+ struct mbuf *m;
struct tls_key_req *kwr;
struct tls_keyctx *kctx;
@@ -173,12 +181,12 @@ tls_program_key_id(struct toepcb *toep, struct ktls_session *tls,
return (ENOSPC);
}
- wr = alloc_wrqe(TLS_KEY_WR_SZ, &toep->ofld_txq->wrq);
- if (wr == NULL) {
+ m = alloc_raw_wr_mbuf(TLS_KEY_WR_SZ);
+ if (m == NULL) {
t4_free_tls_keyid(sc, keyid);
return (ENOMEM);
}
- kwr = wrtod(wr);
+ kwr = mtod(m, struct tls_key_req *);
memset(kwr, 0, TLS_KEY_WR_SZ);
t4_write_tlskey_wr(tls, direction, toep->tid, F_FW_WR_COMPL, keyid,
@@ -190,15 +198,7 @@ tls_program_key_id(struct toepcb *toep, struct ktls_session *tls,
tls_ofld->rx_key_addr = keyid;
t4_tls_key_ctx(tls, direction, kctx);
- txsd = &toep->txsd[toep->txsd_pidx];
- txsd->tx_credits = DIV_ROUND_UP(TLS_KEY_WR_SZ, 16);
- txsd->plen = 0;
- toep->tx_credits -= txsd->tx_credits;
- if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
- toep->txsd_pidx = 0;
- toep->txsd_avail--;
-
- t4_wrq_tx(sc, wr);
+ t4_raw_wr_tx(sc, toep, m);
return (0);
}
@@ -207,7 +207,7 @@ int
tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
{
struct adapter *sc = td_adapter(toep->td);
- int error, explicit_iv_size, mac_first;
+ int error, iv_size, mac_first;
if (!can_tls_offload(sc))
return (EINVAL);
@@ -228,6 +228,21 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
}
}
+ /* TLS 1.1 through TLS 1.3 are currently supported. */
+ if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
+ tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
+ tls->params.tls_vminor > TLS_MINOR_VER_THREE) {
+ return (EPROTONOSUPPORT);
+ }
+
+ /* TLS 1.3 is only supported on T7+. */
+ if (tls->params.tls_vminor == TLS_MINOR_VER_THREE) {
+ if (is_t6(sc)) {
+ return (EPROTONOSUPPORT);
+ }
+ }
+
+ /* Sanity check values in *tls. */
switch (tls->params.cipher_algorithm) {
case CRYPTO_AES_CBC:
/* XXX: Explicitly ignore any provided IV. */
@@ -247,13 +262,10 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
default:
return (EPROTONOSUPPORT);
}
- explicit_iv_size = AES_BLOCK_LEN;
+ iv_size = AES_BLOCK_LEN;
mac_first = 1;
break;
case CRYPTO_AES_NIST_GCM_16:
- if (tls->params.iv_len != SALT_SIZE) {
- return (EINVAL);
- }
switch (tls->params.cipher_key_len) {
case 128 / 8:
case 192 / 8:
@@ -262,20 +274,19 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
default:
return (EINVAL);
}
- explicit_iv_size = 8;
+
+ /*
+ * The IV size for TLS 1.2 is the explicit IV in the
+ * record header. For TLS 1.3 it is the size of the
+ * sequence number.
+ */
+ iv_size = 8;
mac_first = 0;
break;
default:
return (EPROTONOSUPPORT);
}
- /* Only TLS 1.1 and TLS 1.2 are currently supported. */
- if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
- tls->params.tls_vminor < TLS_MINOR_VER_ONE ||
- tls->params.tls_vminor > TLS_MINOR_VER_TWO) {
- return (EPROTONOSUPPORT);
- }
-
/* Bail if we already have a key. */
if (direction == KTLS_TX) {
if (toep->tls.tx_key_addr != -1)
@@ -289,6 +300,7 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
if (error)
return (error);
+ toep->tls.tls13 = tls->params.tls_vminor == TLS_MINOR_VER_THREE;
if (direction == KTLS_TX) {
toep->tls.scmd0.seqno_numivs =
(V_SCMD_SEQ_NO_CTRL(3) |
@@ -298,14 +310,14 @@ tls_alloc_ktls(struct toepcb *toep, struct ktls_session *tls, int direction)
V_SCMD_CIPH_MODE(t4_tls_cipher_mode(tls)) |
V_SCMD_AUTH_MODE(t4_tls_auth_mode(tls)) |
V_SCMD_HMAC_CTRL(t4_tls_hmac_ctrl(tls)) |
- V_SCMD_IV_SIZE(explicit_iv_size / 2));
+ V_SCMD_IV_SIZE(iv_size / 2));
toep->tls.scmd0.ivgen_hdrlen =
(V_SCMD_IV_GEN_CTRL(1) |
V_SCMD_KEY_CTX_INLINE(0) |
V_SCMD_TLS_FRAG_ENABLE(1));
- toep->tls.iv_len = explicit_iv_size;
+ toep->tls.iv_len = iv_size;
toep->tls.frag_size = tls->params.max_frame_len;
toep->tls.fcplenmax = get_tp_plen_max(tls);
toep->tls.expn_per_ulp = tls->params.tls_hlen +
@@ -352,7 +364,8 @@ tls_uninit_toep(struct toepcb *toep)
static void
write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
- unsigned int plen, unsigned int expn, uint8_t credits, int shove)
+ unsigned int plen, unsigned int expn, uint8_t credits, int shove,
+ int num_ivs)
{
struct tls_ofld_info *tls_ofld = &toep->tls;
unsigned int len = plen + expn;
@@ -365,7 +378,7 @@ write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
txwr->plen = htobe32(len);
txwr->lsodisable_to_flags = htobe32(V_TX_ULP_MODE(ULP_MODE_TLS) |
V_TX_URG(0) | /* F_T6_TX_FORCE | */ V_TX_SHOVE(shove));
- txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(1) |
+ txwr->ctxloc_to_exp = htobe32(V_FW_TLSTX_DATA_WR_NUMIVS(num_ivs) |
V_FW_TLSTX_DATA_WR_EXP(expn) |
V_FW_TLSTX_DATA_WR_CTXLOC(TLS_SFO_WR_CONTEXTLOC_DDR) |
V_FW_TLSTX_DATA_WR_IVDSGL(0) |
@@ -381,20 +394,20 @@ write_tlstx_wr(struct fw_tlstx_data_wr *txwr, struct toepcb *toep,
static void
write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep,
- struct tls_hdr *tls_hdr, unsigned int plen, uint64_t seqno)
+ struct tls_hdr *tls_hdr, unsigned int plen, uint8_t rec_type,
+ uint64_t seqno)
{
struct tls_ofld_info *tls_ofld = &toep->tls;
int data_type, seglen;
seglen = plen;
- data_type = tls_content_type(tls_hdr->type);
+ data_type = tls_content_type(rec_type);
cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) |
V_CPL_TX_TLS_SFO_DATA_TYPE(data_type) |
V_CPL_TX_TLS_SFO_CPL_LEN(2) | V_CPL_TX_TLS_SFO_SEG_LEN(seglen));
cpl->pld_len = htobe32(plen);
if (data_type == CPL_TX_TLS_SFO_TYPE_CUSTOM)
- cpl->type_protover = htobe32(
- V_CPL_TX_TLS_SFO_TYPE(tls_hdr->type));
+ cpl->type_protover = htobe32(V_CPL_TX_TLS_SFO_TYPE(rec_type));
cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs |
V_SCMD_NUM_IVS(1));
cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen);
@@ -494,9 +507,11 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
struct tcpcb *tp = intotcpcb(inp);
struct socket *so = inp->inp_socket;
struct sockbuf *sb = &so->so_snd;
+ struct mbufq *pduq = &toep->ulp_pduq;
int tls_size, tx_credits, shove, sowwakeup;
struct ofld_tx_sdesc *txsd;
char *buf;
+ bool tls13;
INP_WLOCK_ASSERT(inp);
KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
@@ -532,10 +547,23 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
return;
}
+ tls13 = toep->tls.tls13;
txsd = &toep->txsd[toep->txsd_pidx];
for (;;) {
tx_credits = min(toep->tx_credits, MAX_OFLD_TX_CREDITS);
+ if (__predict_false((m = mbufq_first(pduq)) != NULL)) {
+ if (!t4_push_raw_wr(sc, toep, m)) {
+ toep->flags |= TPF_TX_SUSPENDED;
+ return;
+ }
+
+ (void)mbufq_dequeue(pduq);
+
+ txsd = &toep->txsd[toep->txsd_pidx];
+ continue;
+ }
+
SOCKBUF_LOCK(sb);
sowwakeup = drop;
if (drop) {
@@ -563,7 +591,7 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
* If there is no ready data to send, wait until more
* data arrives.
*/
- if (m == NULL || (m->m_flags & M_NOTAVAIL) != 0) {
+ if (m == NULL || (m->m_flags & M_NOTREADY) != 0) {
if (sowwakeup)
sowwakeup_locked(so);
else
@@ -586,9 +614,11 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
sizeof(struct cpl_tx_tls_sfo) +
sizeof(struct ulptx_idata) + sizeof(struct ulptx_sc_memrd);
- /* Explicit IVs for AES-CBC and AES-GCM are <= 16. */
- MPASS(toep->tls.iv_len <= AES_BLOCK_LEN);
- wr_len += AES_BLOCK_LEN;
+ if (!tls13) {
+ /* Explicit IVs for AES-CBC and AES-GCM are <= 16. */
+ MPASS(toep->tls.iv_len <= AES_BLOCK_LEN);
+ wr_len += AES_BLOCK_LEN;
+ }
/* Account for SGL in work request length. */
nsegs = count_ext_pgs_segs(m);
@@ -614,7 +644,7 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
/* Shove if there is no additional data pending. */
shove = ((m->m_next == NULL ||
- (m->m_next->m_flags & M_NOTAVAIL) != 0)) &&
+ (m->m_next->m_flags & M_NOTREADY) != 0)) &&
(tp->t_flags & TF_MORETOCOME) == 0;
if (sb->sb_flags & SB_AUTOSIZE &&
@@ -658,8 +688,10 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
expn_size = m->m_epg_hdrlen +
m->m_epg_trllen;
tls_size = m->m_len - expn_size;
- write_tlstx_wr(txwr, toep, tls_size, expn_size, credits, shove);
- write_tlstx_cpl(cpl, toep, thdr, tls_size, m->m_epg_seqno);
+ write_tlstx_wr(txwr, toep, tls_size, expn_size, credits, shove,
+ tls13 ? 0 : 1);
+ write_tlstx_cpl(cpl, toep, thdr, tls_size,
+ tls13 ? m->m_epg_record_type : thdr->type, m->m_epg_seqno);
idata = (struct ulptx_idata *)(cpl + 1);
idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
@@ -670,10 +702,12 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
V_ULPTX_LEN16(toep->tls.tx_key_info_size >> 4));
memrd->addr = htobe32(toep->tls.tx_key_addr >> 5);
- /* Copy IV. */
buf = (char *)(memrd + 1);
- memcpy(buf, thdr + 1, toep->tls.iv_len);
- buf += AES_BLOCK_LEN;
+ if (!tls13) {
+ /* Copy IV. */
+ memcpy(buf, thdr + 1, toep->tls.iv_len);
+ buf += AES_BLOCK_LEN;
+ }
write_ktlstx_sgl(buf, m, nsegs);
@@ -694,6 +728,8 @@ t4_push_ktls(struct adapter *sc, struct toepcb *toep, int drop)
toep->flags |= TPF_TX_SUSPENDED;
KASSERT(toep->txsd_avail > 0, ("%s: no txsd", __func__));
+ KASSERT(m->m_len <= MAX_OFLD_TX_SDESC_PLEN,
+ ("%s: plen %u too large", __func__, m->m_len));
txsd->plen = m->m_len;
txsd->tx_credits = credits;
txsd++;
@@ -793,8 +829,8 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
struct sockbuf *sb;
struct mbuf *tls_data;
struct tls_get_record *tgr;
- struct mbuf *control;
- int pdu_length, trailer_len;
+ struct mbuf *control, *n;
+ int pdu_length, resid, trailer_len;
#if defined(KTR) || defined(INVARIANTS)
int len;
#endif
@@ -842,7 +878,9 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
/*
* The payload of this CPL is the TLS header followed by
- * additional fields.
+ * additional fields. For TLS 1.3 the type field holds the
+ * inner record type and the length field has been updated to
+ * strip the inner record type, padding, and MAC.
*/
KASSERT(m->m_len >= sizeof(*tls_hdr_pkt),
("%s: payload too small", __func__));
@@ -854,7 +892,14 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
("%s: sequence mismatch", __func__));
}
- /* Report decryption errors as EBADMSG. */
+ /*
+ * Report decryption errors as EBADMSG.
+ *
+ * XXX: To support rekeying for TLS 1.3 this will eventually
+ * have to be updated to recrypt the data with the old key and
+ * then decrypt with the new key. Punt for now as KTLS
+ * doesn't yet support rekeying.
+ */
if ((tls_hdr_pkt->res_to_mac_error & M_TLSRX_HDR_PKT_ERROR) != 0) {
CTR4(KTR_CXGBE, "%s: tid %u TLS error %#x ddp_vld %#x",
__func__, toep->tid, tls_hdr_pkt->res_to_mac_error,
@@ -872,6 +917,33 @@ do_rx_tls_cmp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
return (0);
}
+ /* For TLS 1.3 trim the header and trailer. */
+ if (toep->tls.tls13) {
+ KASSERT(tls_data != NULL, ("%s: TLS 1.3 record without data",
+ __func__));
+ MPASS(tls_data->m_pkthdr.len == pdu_length);
+ m_adj(tls_data, sizeof(struct tls_record_layer));
+ if (tls_data->m_pkthdr.len > be16toh(tls_hdr_pkt->length))
+ tls_data->m_pkthdr.len = be16toh(tls_hdr_pkt->length);
+ resid = tls_data->m_pkthdr.len;
+ if (resid == 0) {
+ m_freem(tls_data);
+ tls_data = NULL;
+ } else {
+ for (n = tls_data;; n = n->m_next) {
+ if (n->m_len < resid) {
+ resid -= n->m_len;
+ continue;
+ }
+
+ n->m_len = resid;
+ m_freem(n->m_next);
+ n->m_next = NULL;
+ break;
+ }
+ }
+ }
+
/* Handle data received after the socket is closed. */
sb = &so->so_rcv;
SOCKBUF_LOCK(sb);
@@ -1075,69 +1147,61 @@ out:
m_freem(m);
}
-/* SET_TCB_FIELD sent as a ULP command looks like this */
-#define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
- sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
-
-static inline void *
-mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep,
- uint64_t word, uint64_t mask, uint64_t val)
-{
- struct ulptx_idata *ulpsc;
- struct cpl_set_tcb_field_core *req;
-
- ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
- ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
-
- ulpsc = (struct ulptx_idata *)(ulpmc + 1);
- ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
- ulpsc->len = htobe32(sizeof(*req));
-
- req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
- OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid));
- req->reply_ctrl = htobe16(V_NO_REPLY(1) |
- V_QUEUENO(toep->ofld_rxq->iq.abs_id));
- req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
- req->mask = htobe64(mask);
- req->val = htobe64(val);
-
- ulpsc = (struct ulptx_idata *)(req + 1);
- if (LEN__SET_TCB_FIELD_ULP % 16) {
- ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
- ulpsc->len = htobe32(0);
- return (ulpsc + 1);
- }
- return (ulpsc);
-}
-
/*
- * Send a work request setting multiple TCB fields to enable
- * ULP_MODE_TLS.
+ * Send a work request setting one or more TCB fields to partially or
+ * fully enable ULP_MODE_TLS.
+ *
+ * - If resid == 0, the socket buffer ends at a record boundary
+ * (either empty or contains one or more complete records). Switch
+ * to ULP_MODE_TLS (if not already) and enable TLS decryption.
+ *
+ * - If resid != 0, the socket buffer contains a partial record. In
+ * this case, switch to ULP_MODE_TLS partially and configure the TCB
+ * to pass along the remaining resid bytes undecrypted. Once they
+ * arrive, this is called again with resid == 0 and enables TLS
+ * decryption.
*/
static void
-tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno)
+tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno,
+ size_t resid)
{
- struct wrqe *wr;
+ struct mbuf *m;
struct work_request_hdr *wrh;
struct ulp_txpkt *ulpmc;
int fields, key_offset, len;
- KASSERT(ulp_mode(toep) == ULP_MODE_NONE,
- ("%s: tid %d already ULP_MODE_TLS", __func__, toep->tid));
+ /*
+ * If we are already in ULP_MODE_TLS, then we should now be at
+ * a record boundary and ready to finish enabling TLS RX.
+ */
+ KASSERT(resid == 0 || ulp_mode(toep) == ULP_MODE_NONE,
+ ("%s: tid %d needs %zu more data but already ULP_MODE_TLS",
+ __func__, toep->tid, resid));
fields = 0;
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /* 2 writes for the overlay region */
+ fields += 2;
+ }
- /* 2 writes for the overlay region */
- fields += 2;
+ if (resid == 0) {
+ /* W_TCB_TLS_SEQ */
+ fields++;
- /* W_TCB_TLS_SEQ */
- fields++;
+ /* W_TCB_ULP_RAW */
+ fields++;
+ } else {
+ /* W_TCB_PDU_LEN */
+ fields++;
- /* W_TCB_ULP_RAW */
- fields++;
+ /* W_TCB_ULP_RAW */
+ fields++;
+ }
- /* W_TCB_ULP_TYPE */
- fields ++;
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /* W_TCB_ULP_TYPE */
+ fields ++;
+ }
/* W_TCB_T_FLAGS */
fields++;
@@ -1146,60 +1210,94 @@ tls_update_tcb(struct adapter *sc, struct toepcb *toep, uint64_t seqno)
KASSERT(len <= SGE_MAX_WR_LEN,
("%s: WR with %d TCB field updates too large", __func__, fields));
- wr = alloc_wrqe(len, toep->ctrlq);
- if (wr == NULL) {
+ m = alloc_raw_wr_mbuf(len);
+ if (m == NULL) {
/* XXX */
panic("%s: out of memory", __func__);
}
- wrh = wrtod(wr);
- INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */
+ wrh = mtod(m, struct work_request_hdr *);
+ INIT_ULPTX_WRH(wrh, len, 1, toep->tid); /* atomic */
ulpmc = (struct ulp_txpkt *)(wrh + 1);
- /*
- * Clear the TLS overlay region: 1023:832.
- *
- * Words 26/27 are always set to zero. Words 28/29
- * contain seqno and are set when enabling TLS
- * decryption. Word 30 is zero and Word 31 contains
- * the keyid.
- */
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 26,
- 0xffffffffffffffff, 0);
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /*
+ * Clear the TLS overlay region: 1023:832.
+ *
+ * Words 26/27 are always set to zero. Words 28/29
+ * contain seqno and are set when enabling TLS
+ * decryption. Word 30 is zero and Word 31 contains
+ * the keyid.
+ */
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 26,
+ 0xffffffffffffffff, 0);
- /*
- * RX key tags are an index into the key portion of MA
- * memory stored as an offset from the base address in
- * units of 64 bytes.
- */
- key_offset = toep->tls.rx_key_addr - sc->vres.key.start;
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 30,
- 0xffffffffffffffff,
- (uint64_t)V_TCB_RX_TLS_KEY_TAG(key_offset / 64) << 32);
-
- CTR3(KTR_CXGBE, "%s: tid %d enable TLS seqno %lu", __func__,
- toep->tid, seqno);
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_TLS_SEQ,
- V_TCB_TLS_SEQ(M_TCB_TLS_SEQ), V_TCB_TLS_SEQ(seqno));
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_ULP_RAW,
- V_TCB_ULP_RAW(M_TCB_ULP_RAW),
- V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | V_TF_TLS_CONTROL(1) |
- V_TF_TLS_ACTIVE(1) | V_TF_TLS_ENABLE(1))));
-
- toep->flags &= ~TPF_TLS_STARTING;
- toep->flags |= TPF_TLS_RECEIVE;
-
- /* Set the ULP mode to ULP_MODE_TLS. */
- toep->params.ulp_mode = ULP_MODE_TLS;
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_ULP_TYPE,
- V_TCB_ULP_TYPE(M_TCB_ULP_TYPE),
- V_TCB_ULP_TYPE(ULP_MODE_TLS));
+ /*
+ * RX key tags are an index into the key portion of MA
+ * memory stored as an offset from the base address in
+ * units of 64 bytes.
+ */
+ key_offset = toep->tls.rx_key_addr - sc->vres.key.start;
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 30,
+ 0xffffffffffffffff,
+ (uint64_t)V_TCB_RX_TLS_KEY_TAG(key_offset / 64) << 32);
+ }
+
+ if (resid == 0) {
+ /*
+ * The socket buffer is empty or only contains
+ * complete TLS records: Set the sequence number and
+ * enable TLS decryption.
+ */
+ CTR3(KTR_CXGBE, "%s: tid %d enable TLS seqno %lu", __func__,
+ toep->tid, seqno);
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_RX_TLS_SEQ, V_TCB_RX_TLS_SEQ(M_TCB_RX_TLS_SEQ),
+ V_TCB_RX_TLS_SEQ(seqno));
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
+ V_TCB_ULP_RAW((V_TF_TLS_KEY_SIZE(3) | V_TF_TLS_CONTROL(1) |
+ V_TF_TLS_ACTIVE(1) | V_TF_TLS_ENABLE(1))));
+
+ toep->flags &= ~TPF_TLS_STARTING;
+ toep->flags |= TPF_TLS_RECEIVE;
+ } else {
+ /*
+ * The socket buffer ends with a partial record with a
+ * full header and needs at least 6 bytes.
+ *
+ * Set PDU length. This is treating the 'resid' bytes
+ * as a TLS PDU, so the first 5 bytes are a fake
+ * header and the rest are the PDU length.
+ */
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_PDU_LEN, V_TCB_PDU_LEN(M_TCB_PDU_LEN),
+ V_TCB_PDU_LEN(resid - sizeof(struct tls_hdr)));
+ CTR3(KTR_CXGBE, "%s: tid %d setting PDU_LEN to %zu",
+ __func__, toep->tid, resid - sizeof(struct tls_hdr));
+
+ /* Clear all bits in ULP_RAW except for ENABLE. */
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_ULP_RAW, V_TCB_ULP_RAW(M_TCB_ULP_RAW),
+ V_TCB_ULP_RAW(V_TF_TLS_ENABLE(1)));
+
+ /* Wait for 'resid' bytes to be delivered as CPL_RX_DATA. */
+ toep->tls.rx_resid = resid;
+ }
+
+ if (ulp_mode(toep) == ULP_MODE_NONE) {
+ /* Set the ULP mode to ULP_MODE_TLS. */
+ toep->params.ulp_mode = ULP_MODE_TLS;
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
+ W_TCB_ULP_TYPE, V_TCB_ULP_TYPE(M_TCB_ULP_TYPE),
+ V_TCB_ULP_TYPE(ULP_MODE_TLS));
+ }
/* Clear TF_RX_QUIESCE. */
- ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_T_FLAGS,
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_FLAGS,
V_TF_RX_QUIESCE(1), 0);
- t4_wrq_tx(sc, wr);
+ t4_raw_wr_tx(sc, toep, m);
}
/*
@@ -1226,7 +1324,8 @@ tls_check_rx_sockbuf(struct adapter *sc, struct toepcb *toep,
* size of a TLS record, re-enable receive and pause again once
* we get more data to try again.
*/
- if (!have_header || resid != 0) {
+ if (!have_header || (resid != 0 && (resid < sizeof(struct tls_hdr) ||
+ is_t6(sc)))) {
CTR(KTR_CXGBE, "%s: tid %d waiting for more data", __func__,
toep->tid);
toep->flags &= ~TPF_TLS_RX_QUIESCED;
@@ -1234,7 +1333,7 @@ tls_check_rx_sockbuf(struct adapter *sc, struct toepcb *toep,
return;
}
- tls_update_tcb(sc, toep, seqno);
+ tls_update_tcb(sc, toep, seqno, resid);
}
void
diff --git a/sys/dev/cxgbe/tom/t4_tls.h b/sys/dev/cxgbe/tom/t4_tls.h
index 753a30890fdc..6faf946e9e3c 100644
--- a/sys/dev/cxgbe/tom/t4_tls.h
+++ b/sys/dev/cxgbe/tom/t4_tls.h
@@ -74,6 +74,7 @@ struct tls_ofld_info {
unsigned short adjusted_plen;
unsigned short expn_per_ulp;
unsigned short pdus_per_ulp;
+ bool tls13;
struct tls_scmd scmd0;
u_int iv_len;
unsigned int tx_key_info_size;
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index ac5bba75f904..53a945f8b4cc 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -89,18 +89,23 @@ static int t4_tom_modevent(module_t, int, void *);
/* ULD ops and helpers */
static int t4_tom_activate(struct adapter *);
static int t4_tom_deactivate(struct adapter *);
+static int t4_tom_stop(struct adapter *);
+static int t4_tom_restart(struct adapter *);
static struct uld_info tom_uld_info = {
- .uld_id = ULD_TOM,
- .activate = t4_tom_activate,
- .deactivate = t4_tom_deactivate,
+ .uld_activate = t4_tom_activate,
+ .uld_deactivate = t4_tom_deactivate,
+ .uld_stop = t4_tom_stop,
+ .uld_restart = t4_tom_restart,
};
static void release_offload_resources(struct toepcb *);
-static int alloc_tid_tabs(struct tid_info *);
-static void free_tid_tabs(struct tid_info *);
+static void done_with_toepcb(struct toepcb *);
+static int alloc_tid_tabs(struct adapter *);
+static void free_tid_tabs(struct adapter *);
static void free_tom_data(struct adapter *, struct tom_data *);
static void reclaim_wr_resources(void *, int);
+static void cleanup_stranded_tids(void *, int);
struct toepcb *
alloc_toepcb(struct vi_info *vi, int flags)
@@ -135,6 +140,7 @@ alloc_toepcb(struct vi_info *vi, int flags)
refcount_init(&toep->refcount, 1);
toep->td = sc->tom_softc;
+ toep->incarnation = sc->incarnation;
toep->vi = vi;
toep->tid = -1;
toep->tx_total = tx_credits;
@@ -176,7 +182,7 @@ init_toepcb(struct vi_info *vi, struct toepcb *toep)
}
toep->ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
toep->ofld_rxq = &sc->sge.ofld_rxq[cp->rxq_idx];
- toep->ctrlq = &sc->sge.ctrlq[pi->port_id];
+ toep->ctrlq = &sc->sge.ctrlq[cp->ctrlq_idx];
tls_init_toep(toep);
MPASS(ulp_mode(toep) != ULP_MODE_TCPDDP);
@@ -250,11 +256,6 @@ offload_socket(struct socket *so, struct toepcb *toep)
toep->inp = inp;
toep->flags |= TPF_ATTACHED;
in_pcbref(inp);
-
- /* Add the TOE PCB to the active list */
- mtx_lock(&td->toep_list_lock);
- TAILQ_INSERT_HEAD(&td->toep_list, toep, link);
- mtx_unlock(&td->toep_list_lock);
}
void
@@ -273,7 +274,6 @@ undo_offload_socket(struct socket *so)
struct inpcb *inp = sotoinpcb(so);
struct tcpcb *tp = intotcpcb(inp);
struct toepcb *toep = tp->t_toe;
- struct tom_data *td = toep->td;
struct sockbuf *sb;
INP_WLOCK_ASSERT(inp);
@@ -296,10 +296,6 @@ undo_offload_socket(struct socket *so)
toep->flags &= ~TPF_ATTACHED;
if (in_pcbrele_wlocked(inp))
panic("%s: inp freed.", __func__);
-
- mtx_lock(&td->toep_list_lock);
- TAILQ_REMOVE(&td->toep_list, toep, link);
- mtx_unlock(&td->toep_list_lock);
}
static void
@@ -311,12 +307,46 @@ release_offload_resources(struct toepcb *toep)
KASSERT(!(toep->flags & TPF_CPL_PENDING),
("%s: %p has CPL pending.", __func__, toep));
- KASSERT(!(toep->flags & TPF_ATTACHED),
- ("%s: %p is still attached.", __func__, toep));
CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)",
__func__, toep, tid, toep->l2te, toep->ce);
+ if (toep->l2te) {
+ t4_l2t_release(toep->l2te);
+ toep->l2te = NULL;
+ }
+ if (tid >= 0) {
+ remove_tid(sc, tid, toep->ce ? 2 : 1);
+ release_tid(sc, tid, toep->ctrlq);
+ toep->tid = -1;
+ mtx_lock(&td->toep_list_lock);
+ if (toep->flags & TPF_IN_TOEP_LIST) {
+ toep->flags &= ~TPF_IN_TOEP_LIST;
+ TAILQ_REMOVE(&td->toep_list, toep, link);
+ }
+ mtx_unlock(&td->toep_list_lock);
+ }
+ if (toep->ce) {
+ t4_release_clip_entry(sc, toep->ce);
+ toep->ce = NULL;
+ }
+ if (toep->params.tc_idx != -1)
+ t4_release_cl_rl(sc, toep->vi->pi->port_id, toep->params.tc_idx);
+}
+
+/*
+ * Both the driver and kernel are done with the toepcb.
+ */
+static void
+done_with_toepcb(struct toepcb *toep)
+{
+ KASSERT(!(toep->flags & TPF_CPL_PENDING),
+ ("%s: %p has CPL pending.", __func__, toep));
+ KASSERT(!(toep->flags & TPF_ATTACHED),
+ ("%s: %p is still attached.", __func__, toep));
+
+ CTR(KTR_CXGBE, "%s: toep %p (0x%x)", __func__, toep, toep->flags);
+
/*
* These queues should have been emptied at approximately the same time
* that a normal connection's socket's so_snd would have been purged or
@@ -329,24 +359,10 @@ release_offload_resources(struct toepcb *toep)
ddp_assert_empty(toep);
#endif
MPASS(TAILQ_EMPTY(&toep->aiotx_jobq));
-
- if (toep->l2te)
- t4_l2t_release(toep->l2te);
-
- if (tid >= 0) {
- remove_tid(sc, tid, toep->ce ? 2 : 1);
- release_tid(sc, tid, toep->ctrlq);
- }
-
- if (toep->ce)
- t4_release_clip_entry(sc, toep->ce);
-
- if (toep->params.tc_idx != -1)
- t4_release_cl_rl(sc, toep->vi->pi->port_id, toep->params.tc_idx);
-
- mtx_lock(&td->toep_list_lock);
- TAILQ_REMOVE(&td->toep_list, toep, link);
- mtx_unlock(&td->toep_list_lock);
+ MPASS(toep->tid == -1);
+ MPASS(toep->l2te == NULL);
+ MPASS(toep->ce == NULL);
+ MPASS((toep->flags & TPF_IN_TOEP_LIST) == 0);
free_toepcb(toep);
}
@@ -359,7 +375,7 @@ release_offload_resources(struct toepcb *toep)
* Also gets called when an offloaded active open fails and the TOM wants the
* kernel to take the TCP PCB back.
*/
-static void
+void
t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp)
{
#if defined(KTR) || defined(INVARIANTS)
@@ -392,7 +408,7 @@ t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp)
toep->flags &= ~TPF_ATTACHED;
if (!(toep->flags & TPF_CPL_PENDING))
- release_offload_resources(toep);
+ done_with_toepcb(toep);
}
/*
@@ -478,8 +494,15 @@ send_get_tcb(struct adapter *sc, u_int tid)
bzero(cpl, sizeof(*cpl));
INIT_TP_WR(cpl, tid);
OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_GET_TCB, tid));
- cpl->reply_ctrl = htobe16(V_REPLY_CHAN(0) |
- V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id));
+ if (chip_id(sc) >= CHELSIO_T7) {
+ cpl->reply_ctrl =
+ htobe16(V_T7_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id) |
+ V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
+ } else {
+ cpl->reply_ctrl =
+ htobe16(V_QUEUENO(sc->sge.ofld_rxq[0].iq.cntxt_id) |
+ V_REPLY_CHAN(0) | V_NO_REPLY(0));
+ }
cpl->cookie = 0xff;
commit_wrq_wr(&sc->sge.ctrlq[0], cpl, &cookie);
@@ -838,40 +861,6 @@ t4_alloc_tls_session(struct toedev *tod, struct tcpcb *tp,
}
#endif
-/* SET_TCB_FIELD sent as a ULP command looks like this */
-#define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
- sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
-
-static void *
-mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, uint64_t word, uint64_t mask,
- uint64_t val, uint32_t tid)
-{
- struct ulptx_idata *ulpsc;
- struct cpl_set_tcb_field_core *req;
-
- ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
- ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
-
- ulpsc = (struct ulptx_idata *)(ulpmc + 1);
- ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
- ulpsc->len = htobe32(sizeof(*req));
-
- req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
- OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply_ctrl = htobe16(V_NO_REPLY(1));
- req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
- req->mask = htobe64(mask);
- req->val = htobe64(val);
-
- ulpsc = (struct ulptx_idata *)(req + 1);
- if (LEN__SET_TCB_FIELD_ULP % 16) {
- ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
- ulpsc->len = htobe32(0);
- return (ulpsc + 1);
- }
- return (ulpsc);
-}
-
static void
send_mss_flowc_wr(struct adapter *sc, struct toepcb *toep)
{
@@ -900,6 +889,8 @@ send_mss_flowc_wr(struct adapter *sc, struct toepcb *toep)
flowc->mnemval[0].val = htobe32(toep->params.emss);
txsd = &toep->txsd[toep->txsd_pidx];
+ _Static_assert(flowclen16 <= MAX_OFLD_TX_SDESC_CREDITS,
+ "MAX_OFLD_TX_SDESC_CREDITS too small");
txsd->tx_credits = flowclen16;
txsd->plen = 0;
toep->tx_credits -= txsd->tx_credits;
@@ -958,10 +949,10 @@ t4_pmtu_update(struct toedev *tod, struct tcpcb *tp, tcp_seq seq, int mtu)
}
INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */
ulpmc = (struct ulp_txpkt *)(wrh + 1);
- ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_T_MAXSEG,
- V_TCB_T_MAXSEG(M_TCB_T_MAXSEG), V_TCB_T_MAXSEG(idx), toep->tid);
- ulpmc = mk_set_tcb_field_ulp(ulpmc, W_TCB_TIMESTAMP,
- V_TCB_TIMESTAMP(0x7FFFFULL << 11), 0, toep->tid);
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_MAXSEG,
+ V_TCB_T_MAXSEG(M_TCB_T_MAXSEG), V_TCB_T_MAXSEG(idx));
+ ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_TIMESTAMP,
+ V_TCB_TIMESTAMP(0x7FFFFULL << 11), 0);
commit_wrq_wr(toep->ctrlq, wrh, &cookie);
/* Update the software toepcb and tcpcb. */
@@ -1022,9 +1013,9 @@ final_cpl_received(struct toepcb *toep)
toep->flags &= ~(TPF_CPL_PENDING | TPF_WAITING_FOR_FINAL);
mbufq_drain(&toep->ulp_pduq);
mbufq_drain(&toep->ulp_pdu_reclaimq);
-
+ release_offload_resources(toep);
if (!(toep->flags & TPF_ATTACHED))
- release_offload_resources(toep);
+ done_with_toepcb(toep);
if (!in_pcbrele_wlocked(inp))
INP_WUNLOCK(inp);
@@ -1237,7 +1228,7 @@ select_ntuple(struct vi_info *vi, struct l2t_entry *e)
ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift;
if (tp->port_shift >= 0)
- ntuple |= (uint64_t)e->lport << tp->port_shift;
+ ntuple |= (uint64_t)e->hw_port << tp->port_shift;
if (tp->protocol_shift >= 0)
ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift;
@@ -1248,10 +1239,7 @@ select_ntuple(struct vi_info *vi, struct l2t_entry *e)
tp->vnic_shift;
}
- if (is_t4(sc))
- return (htobe32((uint32_t)ntuple));
- else
- return (htobe64(V_FILTER_TUPLE(ntuple)));
+ return (ntuple);
}
/*
@@ -1342,6 +1330,9 @@ init_conn_params(struct vi_info *vi , struct offload_settings *s,
*/
cp->mtu_idx = find_best_mtu_idx(sc, inc, s);
+ /* Control queue. */
+ cp->ctrlq_idx = vi->pi->port_id;
+
/* Tx queue for this connection. */
if (s->txq == QUEUE_RANDOM)
q_idx = arc4random();
@@ -1454,6 +1445,32 @@ init_conn_params(struct vi_info *vi , struct offload_settings *s,
cp->emss = 0;
}
+void
+update_tid_qid_sel(struct vi_info *vi, struct conn_params *cp, int tid)
+{
+ struct adapter *sc = vi->adapter;
+ const int mask = sc->params.tid_qid_sel_mask;
+ struct sge_ofld_txq *ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
+ uint32_t ngroup;
+ int g, nqpg;
+
+ cp->ctrlq_idx = ofld_txq_group(tid, mask);
+ CTR(KTR_CXGBE, "tid %u is on core %u", tid, cp->ctrlq_idx);
+ if ((ofld_txq->wrq.eq.cntxt_id & mask) == (tid & mask))
+ return;
+
+ ngroup = 1 << bitcount32(mask);
+ MPASS(vi->nofldtxq % ngroup == 0);
+ g = ofld_txq_group(tid, mask);
+ nqpg = vi->nofldtxq / ngroup;
+ cp->txq_idx = vi->first_ofld_txq + g * nqpg + arc4random() % nqpg;
+#ifdef INVARIANTS
+ MPASS(cp->txq_idx < vi->first_ofld_txq + vi->nofldtxq);
+ ofld_txq = &sc->sge.ofld_txq[cp->txq_idx];
+ MPASS((ofld_txq->wrq.eq.cntxt_id & mask) == (tid & mask));
+#endif
+}
+
int
negative_advice(int status)
{
@@ -1464,14 +1481,15 @@ negative_advice(int status)
}
static int
-alloc_tid_tab(struct tid_info *t, int flags)
+alloc_tid_tab(struct adapter *sc)
{
+ struct tid_info *t = &sc->tids;
MPASS(t->ntids > 0);
MPASS(t->tid_tab == NULL);
t->tid_tab = malloc(t->ntids * sizeof(*t->tid_tab), M_CXGBE,
- M_ZERO | flags);
+ M_ZERO | M_NOWAIT);
if (t->tid_tab == NULL)
return (ENOMEM);
atomic_store_rel_int(&t->tids_in_use, 0);
@@ -1480,8 +1498,9 @@ alloc_tid_tab(struct tid_info *t, int flags)
}
static void
-free_tid_tab(struct tid_info *t)
+free_tid_tab(struct adapter *sc)
{
+ struct tid_info *t = &sc->tids;
KASSERT(t->tids_in_use == 0,
("%s: %d tids still in use.", __func__, t->tids_in_use));
@@ -1490,62 +1509,29 @@ free_tid_tab(struct tid_info *t)
t->tid_tab = NULL;
}
-static int
-alloc_stid_tab(struct tid_info *t, int flags)
-{
-
- MPASS(t->nstids > 0);
- MPASS(t->stid_tab == NULL);
-
- t->stid_tab = malloc(t->nstids * sizeof(*t->stid_tab), M_CXGBE,
- M_ZERO | flags);
- if (t->stid_tab == NULL)
- return (ENOMEM);
- mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF);
- t->stids_in_use = 0;
- TAILQ_INIT(&t->stids);
- t->nstids_free_head = t->nstids;
-
- return (0);
-}
-
static void
-free_stid_tab(struct tid_info *t)
+free_tid_tabs(struct adapter *sc)
{
-
- KASSERT(t->stids_in_use == 0,
- ("%s: %d tids still in use.", __func__, t->stids_in_use));
-
- if (mtx_initialized(&t->stid_lock))
- mtx_destroy(&t->stid_lock);
- free(t->stid_tab, M_CXGBE);
- t->stid_tab = NULL;
-}
-
-static void
-free_tid_tabs(struct tid_info *t)
-{
-
- free_tid_tab(t);
- free_stid_tab(t);
+ free_tid_tab(sc);
+ free_stid_tab(sc);
}
static int
-alloc_tid_tabs(struct tid_info *t)
+alloc_tid_tabs(struct adapter *sc)
{
int rc;
- rc = alloc_tid_tab(t, M_NOWAIT);
+ rc = alloc_tid_tab(sc);
if (rc != 0)
goto failed;
- rc = alloc_stid_tab(t, M_NOWAIT);
+ rc = alloc_stid_tab(sc);
if (rc != 0)
goto failed;
return (0);
failed:
- free_tid_tabs(t);
+ free_tid_tabs(sc);
return (rc);
}
@@ -1602,7 +1588,7 @@ free_tom_data(struct adapter *sc, struct tom_data *td)
mtx_destroy(&td->toep_list_lock);
free_tcb_history(sc, td);
- free_tid_tabs(&sc->tids);
+ free_tid_tabs(sc);
free(td, M_CXGBE);
}
@@ -1807,13 +1793,14 @@ reclaim_wr_resources(void *arg, int count)
case CPL_ACT_OPEN_REQ6:
atid = G_TID_TID(be32toh(OPCODE_TID(cpl)));
CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid);
- act_open_failure_cleanup(sc, atid, EHOSTUNREACH);
+ act_open_failure_cleanup(sc, lookup_atid(sc, atid),
+ EHOSTUNREACH);
free(wr, M_CXGBE);
break;
case CPL_PASS_ACCEPT_RPL:
tid = GET_TID(cpl);
CTR2(KTR_CXGBE, "%s: tid %u ", __func__, tid);
- synack_failure_cleanup(sc, tid);
+ synack_failure_cleanup(sc, lookup_tid(sc, tid));
free(wr, M_CXGBE);
break;
default:
@@ -1825,6 +1812,83 @@ reclaim_wr_resources(void *arg, int count)
}
/*
+ * Based on do_abort_req. We treat an abrupt hardware stop as a connection
+ * abort from the hardware.
+ */
+static void
+live_tid_failure_cleanup(struct adapter *sc, struct toepcb *toep, u_int status)
+{
+ struct inpcb *inp;
+ struct tcpcb *tp;
+ struct epoch_tracker et;
+
+ MPASS(!(toep->flags & TPF_SYNQE));
+
+ inp = toep->inp;
+ CURVNET_SET(toep->vnet);
+ NET_EPOCH_ENTER(et); /* for tcp_close */
+ INP_WLOCK(inp);
+ tp = intotcpcb(inp);
+ toep->flags |= TPF_ABORT_SHUTDOWN;
+ if ((inp->inp_flags & INP_DROPPED) == 0) {
+ struct socket *so = inp->inp_socket;
+
+ if (so != NULL)
+ so_error_set(so, status);
+ tp = tcp_close(tp);
+ if (tp == NULL)
+ INP_WLOCK(inp); /* re-acquire */
+ }
+ final_cpl_received(toep);
+ NET_EPOCH_EXIT(et);
+ CURVNET_RESTORE();
+}
+
+static void
+cleanup_stranded_tids(void *arg, int count)
+{
+ TAILQ_HEAD(, toepcb) tlist = TAILQ_HEAD_INITIALIZER(tlist);
+ TAILQ_HEAD(, synq_entry) slist = TAILQ_HEAD_INITIALIZER(slist);
+ struct tom_data *td = arg;
+ struct adapter *sc = td_adapter(td);
+ struct toepcb *toep;
+ struct synq_entry *synqe;
+
+ /* Clean up synq entries. */
+ mtx_lock(&td->toep_list_lock);
+ TAILQ_SWAP(&td->stranded_synqe, &slist, synq_entry, link);
+ mtx_unlock(&td->toep_list_lock);
+ while ((synqe = TAILQ_FIRST(&slist)) != NULL) {
+ TAILQ_REMOVE(&slist, synqe, link);
+ MPASS(synqe->tid >= 0); /* stale, was kept around for debug */
+ synqe->tid = -1;
+ synack_failure_cleanup(sc, synqe);
+ }
+
+ /* Clean up in-flight active opens. */
+ mtx_lock(&td->toep_list_lock);
+ TAILQ_SWAP(&td->stranded_atids, &tlist, toepcb, link);
+ mtx_unlock(&td->toep_list_lock);
+ while ((toep = TAILQ_FIRST(&tlist)) != NULL) {
+ TAILQ_REMOVE(&tlist, toep, link);
+ MPASS(toep->tid >= 0); /* stale, was kept around for debug */
+ toep->tid = -1;
+ act_open_failure_cleanup(sc, toep, EHOSTUNREACH);
+ }
+
+ /* Clean up live connections. */
+ mtx_lock(&td->toep_list_lock);
+ TAILQ_SWAP(&td->stranded_tids, &tlist, toepcb, link);
+ mtx_unlock(&td->toep_list_lock);
+ while ((toep = TAILQ_FIRST(&tlist)) != NULL) {
+ TAILQ_REMOVE(&tlist, toep, link);
+ MPASS(toep->tid >= 0); /* stale, was kept around for debug */
+ toep->tid = -1;
+ live_tid_failure_cleanup(sc, toep, ECONNABORTED);
+ }
+}
+
+/*
* Ground control to Major TOM
* Commencing countdown, engines on
*/
@@ -1846,6 +1910,10 @@ t4_tom_activate(struct adapter *sc)
/* List of TOE PCBs and associated lock */
mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF);
TAILQ_INIT(&td->toep_list);
+ TAILQ_INIT(&td->synqe_list);
+ TAILQ_INIT(&td->stranded_atids);
+ TAILQ_INIT(&td->stranded_tids);
+ TASK_INIT(&td->cleanup_stranded_tids, 0, cleanup_stranded_tids, td);
/* Listen context */
mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF);
@@ -1858,7 +1926,7 @@ t4_tom_activate(struct adapter *sc)
TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td);
/* TID tables */
- rc = alloc_tid_tabs(&sc->tids);
+ rc = alloc_tid_tabs(sc);
if (rc != 0)
goto done;
@@ -1913,23 +1981,34 @@ done:
static int
t4_tom_deactivate(struct adapter *sc)
{
- int rc = 0;
+ int rc = 0, i, v;
struct tom_data *td = sc->tom_softc;
+ struct vi_info *vi;
ASSERT_SYNCHRONIZED_OP(sc);
if (td == NULL)
return (0); /* XXX. KASSERT? */
- if (sc->offload_map != 0)
- return (EBUSY); /* at least one port has IFCAP_TOE enabled */
-
if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI))
return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */
+ if (sc->offload_map != 0) {
+ for_each_port(sc, i) {
+ for_each_vi(sc->port[i], v, vi) {
+ toe_capability(vi, false);
+ if_setcapenablebit(vi->ifp, 0, IFCAP_TOE);
+ SETTOEDEV(vi->ifp, NULL);
+ }
+ }
+ MPASS(sc->offload_map == 0);
+ }
+
mtx_lock(&td->toep_list_lock);
if (!TAILQ_EMPTY(&td->toep_list))
rc = EBUSY;
+ MPASS(TAILQ_EMPTY(&td->synqe_list));
+ MPASS(TAILQ_EMPTY(&td->stranded_tids));
mtx_unlock(&td->toep_list_lock);
mtx_lock(&td->lctx_hash_lock);
@@ -1938,6 +2017,7 @@ t4_tom_deactivate(struct adapter *sc)
mtx_unlock(&td->lctx_hash_lock);
taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources);
+ taskqueue_drain(taskqueue_thread, &td->cleanup_stranded_tids);
mtx_lock(&td->unsent_wr_lock);
if (!STAILQ_EMPTY(&td->unsent_wr_list))
rc = EBUSY;
@@ -1952,6 +2032,182 @@ t4_tom_deactivate(struct adapter *sc)
return (rc);
}
+static void
+stop_atids(struct adapter *sc)
+{
+ struct tom_data *td = sc->tom_softc;
+ struct tid_info *t = &sc->tids;
+ struct toepcb *toep;
+ int atid;
+
+ /*
+ * Hashfilters and T6-KTLS are the only other users of atids but they're
+ * both mutually exclusive with TOE. That means t4_tom owns all the
+ * atids in the table.
+ */
+ MPASS(!is_hashfilter(sc));
+ if (is_t6(sc))
+ MPASS(!(sc->flags & KERN_TLS_ON));
+
+ /* New atids are not being allocated. */
+#ifdef INVARIANTS
+ mtx_lock(&t->atid_lock);
+ MPASS(t->atid_alloc_stopped == true);
+ mtx_unlock(&t->atid_lock);
+#endif
+
+ /*
+ * In-use atids fall in one of these two categories:
+ * a) Those waiting for L2 resolution before being submitted to
+ * hardware.
+ * b) Those that have been submitted to hardware and are awaiting
+ * replies that will never arrive because the LLD is stopped.
+ */
+ for (atid = 0; atid < t->natids; atid++) {
+ toep = lookup_atid(sc, atid);
+ if ((uintptr_t)toep >= (uintptr_t)&t->atid_tab[0] &&
+ (uintptr_t)toep < (uintptr_t)&t->atid_tab[t->natids])
+ continue;
+ if (__predict_false(toep == NULL))
+ continue;
+ MPASS(toep->tid == atid);
+ MPASS(toep->incarnation == sc->incarnation);
+ /*
+ * Take the atid out of the lookup table. toep->tid is stale
+ * after this but useful for debug.
+ */
+ CTR(KTR_CXGBE, "%s: atid %d@%d STRANDED, removed from table",
+ __func__, atid, toep->incarnation);
+ free_atid(sc, toep->tid);
+#if 0
+ toep->tid = -1;
+#endif
+ mtx_lock(&td->toep_list_lock);
+ toep->flags &= ~TPF_IN_TOEP_LIST;
+ TAILQ_REMOVE(&td->toep_list, toep, link);
+ TAILQ_INSERT_TAIL(&td->stranded_atids, toep, link);
+ mtx_unlock(&td->toep_list_lock);
+ }
+ MPASS(atomic_load_int(&t->atids_in_use) == 0);
+}
+
+static void
+stop_tids(struct adapter *sc)
+{
+ struct tom_data *td = sc->tom_softc;
+ struct toepcb *toep;
+#ifdef INVARIANTS
+ struct tid_info *t = &sc->tids;
+#endif
+
+ /*
+ * The LLD's offload queues are stopped so do_act_establish and
+ * do_pass_accept_req cannot run and insert tids in parallel with this
+ * thread. stop_stid_tab has also run and removed the synq entries'
+ * tids from the table. The only tids in the table are for connections
+ * at or beyond ESTABLISHED that are still waiting for the final CPL.
+ */
+ mtx_lock(&td->toep_list_lock);
+ TAILQ_FOREACH(toep, &td->toep_list, link) {
+ MPASS(sc->incarnation == toep->incarnation);
+ MPASS(toep->tid >= 0);
+ MPASS(toep == lookup_tid(sc, toep->tid));
+ /* Remove tid from the lookup table immediately. */
+ CTR(KTR_CXGBE, "%s: tid %d@%d STRANDED, removed from table",
+ __func__, toep->tid, toep->incarnation);
+ remove_tid(sc, toep->tid, toep->ce ? 2 : 1);
+#if 0
+ /* toep->tid is stale now but left alone for debug. */
+ toep->tid = -1;
+#endif
+ /* All toep in this list will get bulk moved to stranded_tids */
+ toep->flags &= ~TPF_IN_TOEP_LIST;
+ }
+ MPASS(TAILQ_EMPTY(&td->stranded_tids));
+ TAILQ_CONCAT(&td->stranded_tids, &td->toep_list, link);
+ MPASS(TAILQ_EMPTY(&td->toep_list));
+ mtx_unlock(&td->toep_list_lock);
+
+ MPASS(atomic_load_int(&t->tids_in_use) == 0);
+}
+
+/*
+ * L2T is stable because
+ * 1. stop_lld stopped all new allocations.
+ * 2. stop_lld also stopped the tx wrq so nothing is enqueueing new WRs to the
+ * queue or to l2t_entry->wr_list.
+ * 3. t4_l2t_update is ignoring all L2 updates.
+ */
+static void
+stop_tom_l2t(struct adapter *sc)
+{
+ struct l2t_data *d = sc->l2t;
+ struct tom_data *td = sc->tom_softc;
+ struct l2t_entry *e;
+ struct wrqe *wr;
+ int i;
+
+ /*
+ * This task cannot be enqueued because L2 state changes are not being
+ * processed. But if it's already scheduled or running then we need to
+ * wait for it to cleanup the atids in the unsent_wr_list.
+ */
+ taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources);
+ MPASS(STAILQ_EMPTY(&td->unsent_wr_list));
+
+ for (i = 0; i < d->l2t_size; i++) {
+ e = &d->l2tab[i];
+ mtx_lock(&e->lock);
+ if (e->state == L2T_STATE_VALID || e->state == L2T_STATE_STALE)
+ e->state = L2T_STATE_RESOLVING;
+ /*
+ * stop_atids is going to clean up _all_ atids in use, including
+ * these that were pending L2 resolution. Just discard the WRs.
+ */
+ while ((wr = STAILQ_FIRST(&e->wr_list)) != NULL) {
+ STAILQ_REMOVE_HEAD(&e->wr_list, link);
+ free(wr, M_CXGBE);
+ }
+ mtx_unlock(&e->lock);
+ }
+}
+
+static int
+t4_tom_stop(struct adapter *sc)
+{
+ struct tid_info *t = &sc->tids;
+ struct tom_data *td = sc->tom_softc;
+
+ ASSERT_SYNCHRONIZED_OP(sc);
+
+ stop_tom_l2t(sc);
+ if (atomic_load_int(&t->atids_in_use) > 0)
+ stop_atids(sc);
+ if (atomic_load_int(&t->stids_in_use) > 0)
+ stop_stid_tab(sc);
+ if (atomic_load_int(&t->tids_in_use) > 0)
+ stop_tids(sc);
+ taskqueue_enqueue(taskqueue_thread, &td->cleanup_stranded_tids);
+
+ /*
+ * L2T and atid_tab are restarted before t4_tom_restart so this assert
+ * is not valid in t4_tom_restart. This is the next best place for it.
+ */
+ MPASS(STAILQ_EMPTY(&td->unsent_wr_list));
+
+ return (0);
+}
+
+static int
+t4_tom_restart(struct adapter *sc)
+{
+ ASSERT_SYNCHRONIZED_OP(sc);
+
+ restart_stid_tab(sc);
+
+ return (0);
+}
+
static int
t4_ctloutput_tom(struct socket *so, struct sockopt *sopt)
{
@@ -1998,11 +2254,108 @@ t4_aio_queue_tom(struct socket *so, struct kaiocb *job)
if (ulp_mode(toep) == ULP_MODE_TCPDDP ||
ulp_mode(toep) == ULP_MODE_NONE) {
error = t4_aio_queue_ddp(so, job);
- if (error != EOPNOTSUPP)
- return (error);
+ if (error == 0)
+ return (0);
+ else if (error != EOPNOTSUPP)
+ return (soaio_queue_generic(so, job));
}
- return (t4_aio_queue_aiotx(so, job));
+ if (t4_aio_queue_aiotx(so, job) != 0)
+ return (soaio_queue_generic(so, job));
+ else
+ return (0);
+}
+
+/*
+ * Request/response structure used to find out the adapter offloading
+ * a socket.
+ */
+struct find_offload_adapter_data {
+ struct socket *so;
+ struct adapter *sc; /* result */
+};
+
+static void
+find_offload_adapter_cb(struct adapter *sc, void *arg)
+{
+ struct find_offload_adapter_data *fa = arg;
+ struct socket *so = fa->so;
+ struct tom_data *td = sc->tom_softc;
+ struct tcpcb *tp;
+ struct inpcb *inp;
+
+ /* Non-TCP were filtered out earlier. */
+ MPASS(so->so_proto->pr_protocol == IPPROTO_TCP);
+
+ if (fa->sc != NULL)
+ return; /* Found already. */
+
+ if (td == NULL)
+ return; /* TOE not enabled on this adapter. */
+
+ inp = sotoinpcb(so);
+ INP_WLOCK(inp);
+ if ((inp->inp_flags & INP_DROPPED) == 0) {
+ tp = intotcpcb(inp);
+ if (tp->t_flags & TF_TOE && tp->tod == &td->tod)
+ fa->sc = sc; /* Found. */
+ }
+ INP_WUNLOCK(inp);
+}
+
+struct adapter *
+find_offload_adapter(struct socket *so)
+{
+ struct find_offload_adapter_data fa;
+
+ fa.sc = NULL;
+ fa.so = so;
+ t4_iterate(find_offload_adapter_cb, &fa);
+ return (fa.sc);
+}
+
+void
+send_txdataplen_max_flowc_wr(struct adapter *sc, struct toepcb *toep,
+ int maxlen)
+{
+ struct wrqe *wr;
+ struct fw_flowc_wr *flowc;
+ const u_int nparams = 1;
+ u_int flowclen;
+ struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
+
+ CTR(KTR_CXGBE, "%s: tid %u maxlen=%d", __func__, toep->tid, maxlen);
+
+ flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval);
+
+ wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq);
+ if (wr == NULL) {
+ /* XXX */
+ panic("%s: allocation failure.", __func__);
+ }
+ flowc = wrtod(wr);
+ memset(flowc, 0, wr->wr_len);
+
+ flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) |
+ V_FW_FLOWC_WR_NPARAMS(nparams));
+ flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) |
+ V_FW_WR_FLOWID(toep->tid));
+
+ flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX;
+ flowc->mnemval[0].val = htobe32(maxlen);
+
+ KASSERT(howmany(flowclen, 16) <= MAX_OFLD_TX_SDESC_CREDITS,
+ ("%s: tx_credits %u too large", __func__, howmany(flowclen, 16)));
+ txsd->tx_credits = howmany(flowclen, 16);
+ txsd->plen = 0;
+ KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0,
+ ("%s: not enough credits (%d)", __func__, toep->tx_credits));
+ toep->tx_credits -= txsd->tx_credits;
+ if (__predict_false(++toep->txsd_pidx == toep->txsd_total))
+ toep->txsd_pidx = 0;
+ toep->txsd_avail--;
+
+ t4_wrq_tx(sc, wr);
}
static int
@@ -2027,18 +2380,20 @@ t4_tom_mod_load(void)
toe6_protosw.pr_ctloutput = t4_ctloutput_tom;
toe6_protosw.pr_aio_queue = t4_aio_queue_tom;
- return (t4_register_uld(&tom_uld_info));
+ return (t4_register_uld(&tom_uld_info, ULD_TOM));
}
static void
-tom_uninit(struct adapter *sc, void *arg __unused)
+tom_uninit(struct adapter *sc, void *arg)
{
+ bool *ok_to_unload = arg;
+
if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun"))
return;
/* Try to free resources (works only if no port has IFCAP_TOE) */
- if (uld_active(sc, ULD_TOM))
- t4_deactivate_uld(sc, ULD_TOM);
+ if (uld_active(sc, ULD_TOM) && t4_deactivate_uld(sc, ULD_TOM) != 0)
+ *ok_to_unload = false;
end_synchronized_op(sc, 0);
}
@@ -2046,9 +2401,13 @@ tom_uninit(struct adapter *sc, void *arg __unused)
static int
t4_tom_mod_unload(void)
{
- t4_iterate(tom_uninit, NULL);
+ bool ok_to_unload = true;
+
+ t4_iterate(tom_uninit, &ok_to_unload);
+ if (!ok_to_unload)
+ return (EBUSY);
- if (t4_unregister_uld(&tom_uld_info) == EBUSY)
+ if (t4_unregister_uld(&tom_uld_info, ULD_TOM) == EBUSY)
return (EBUSY);
t4_tls_mod_unload();
diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h
index 805d8bc0d644..c8c2d432b8f1 100644
--- a/sys/dev/cxgbe/tom/t4_tom.h
+++ b/sys/dev/cxgbe/tom/t4_tom.h
@@ -76,6 +76,7 @@ enum {
TPF_TLS_RX_QUIESCING = (1 << 14), /* RX quiesced for TLS RX startup */
TPF_TLS_RX_QUIESCED = (1 << 15), /* RX quiesced for TLS RX startup */
TPF_WAITING_FOR_FINAL = (1<< 16), /* waiting for wakeup on final CPL */
+ TPF_IN_TOEP_LIST = (1 << 17), /* toep is in the main td->toep_list */
};
enum {
@@ -112,6 +113,7 @@ struct conn_params {
int8_t mtu_idx;
int8_t ulp_mode;
int8_t tx_align;
+ int8_t ctrlq_idx; /* ctrlq = &sc->sge.ctrlq[ctrlq_idx] */
int16_t txq_idx; /* ofld_txq = &sc->sge.ofld_txq[txq_idx] */
int16_t rxq_idx; /* ofld_rxq = &sc->sge.ofld_rxq[rxq_idx] */
int16_t l2t_idx;
@@ -121,10 +123,13 @@ struct conn_params {
};
struct ofld_tx_sdesc {
- uint32_t plen; /* payload length */
- uint8_t tx_credits; /* firmware tx credits (unit is 16B) */
+ uint32_t plen : 26; /* payload length */
+ uint32_t tx_credits : 6; /* firmware tx credits (unit is 16B) */
};
+#define MAX_OFLD_TX_SDESC_PLEN ((1u << 26) - 1)
+#define MAX_OFLD_TX_SDESC_CREDITS ((1u << 6) - 1)
+
struct ppod_region {
u_int pr_start;
u_int pr_len;
@@ -210,7 +215,7 @@ struct toepcb {
struct tom_data *td;
struct inpcb *inp; /* backpointer to host stack's PCB */
u_int flags; /* miscellaneous flags */
- TAILQ_ENTRY(toepcb) link; /* toep_list */
+ TAILQ_ENTRY(toepcb) link; /* toep_list or stranded_toep_list */
int refcount;
struct vnet *vnet;
struct vi_info *vi; /* virtual interface */
@@ -220,6 +225,7 @@ struct toepcb {
struct l2t_entry *l2te; /* L2 table entry used by this connection */
struct clip_entry *ce; /* CLIP table entry used by this tid */
int tid; /* Connection identifier */
+ int incarnation; /* sc->incarnation when toepcb was allocated */
/* tx credit handling */
u_int tx_total; /* total tx WR credits (in 16B units) */
@@ -269,6 +275,7 @@ struct synq_entry {
struct listen_ctx *lctx; /* backpointer to listen ctx */
struct mbuf *syn;
int flags; /* same as toepcb's tp_flags */
+ TAILQ_ENTRY(synq_entry) link; /* synqe_list */
volatile int ok_to_respond;
volatile u_int refcnt;
int tid;
@@ -277,6 +284,7 @@ struct synq_entry {
uint32_t ts;
uint32_t rss_hash;
__be16 tcp_opt; /* from cpl_pass_establish */
+ int incarnation;
struct toepcb *toep;
struct conn_params params;
@@ -284,13 +292,14 @@ struct synq_entry {
/* listen_ctx flags */
#define LCTX_RPL_PENDING 1 /* waiting for a CPL_PASS_OPEN_RPL */
+#define LCTX_SETUP_IN_HW 2 /* stid entry is setup in hardware */
struct listen_ctx {
LIST_ENTRY(listen_ctx) link; /* listen hash linkage */
volatile int refcount;
int stid;
- struct stid_region stid_region;
int flags;
+ bool isipv6;
struct inpcb *inp; /* listening socket's inp */
struct vnet *vnet;
struct sge_wrq *ctrlq;
@@ -329,6 +338,12 @@ struct tom_data {
/* toepcb's associated with this TOE device */
struct mtx toep_list_lock;
TAILQ_HEAD(, toepcb) toep_list;
+ TAILQ_HEAD(, synq_entry) synqe_list;
+ /* List of tids left stranded because hw stopped abruptly. */
+ TAILQ_HEAD(, toepcb) stranded_atids;
+ TAILQ_HEAD(, toepcb) stranded_tids;
+ TAILQ_HEAD(, synq_entry) stranded_synqe;
+ struct task cleanup_stranded_tids;
struct mtx lctx_hash_lock;
LIST_HEAD(, listen_ctx) *listen_hash;
@@ -463,18 +478,22 @@ int select_rcv_wscale(void);
void init_conn_params(struct vi_info *, struct offload_settings *,
struct in_conninfo *, struct socket *, const struct tcp_options *, int16_t,
struct conn_params *cp);
+void update_tid_qid_sel(struct vi_info *, struct conn_params *, int);
__be64 calc_options0(struct vi_info *, struct conn_params *);
__be32 calc_options2(struct vi_info *, struct conn_params *);
uint64_t select_ntuple(struct vi_info *, struct l2t_entry *);
int negative_advice(int);
int add_tid_to_history(struct adapter *, u_int);
+struct adapter *find_offload_adapter(struct socket *);
+void send_txdataplen_max_flowc_wr(struct adapter *, struct toepcb *, int);
+void t4_pcb_detach(struct toedev *, struct tcpcb *);
/* t4_connect.c */
void t4_init_connect_cpl_handlers(void);
void t4_uninit_connect_cpl_handlers(void);
int t4_connect(struct toedev *, struct socket *, struct nhop_object *,
struct sockaddr *);
-void act_open_failure_cleanup(struct adapter *, u_int, u_int);
+void act_open_failure_cleanup(struct adapter *, struct toepcb *, u_int);
/* t4_listen.c */
void t4_init_listen_cpl_handlers(void);
@@ -489,7 +508,11 @@ int do_abort_req_synqe(struct sge_iq *, const struct rss_header *,
int do_abort_rpl_synqe(struct sge_iq *, const struct rss_header *,
struct mbuf *);
void t4_offload_socket(struct toedev *, void *, struct socket *);
-void synack_failure_cleanup(struct adapter *, int);
+void synack_failure_cleanup(struct adapter *, struct synq_entry *);
+int alloc_stid_tab(struct adapter *);
+void free_stid_tab(struct adapter *);
+void stop_stid_tab(struct adapter *);
+void restart_stid_tab(struct adapter *);
/* t4_cpl_io.c */
void aiotx_init_toep(struct toepcb *);
@@ -509,8 +532,11 @@ int t4_send_fin(struct toedev *, struct tcpcb *);
int t4_send_rst(struct toedev *, struct tcpcb *);
void t4_set_tcb_field(struct adapter *, struct sge_wrq *, struct toepcb *,
uint16_t, uint64_t, uint64_t, int, int);
-void t4_push_frames(struct adapter *, struct toepcb *, int);
void t4_push_pdus(struct adapter *, struct toepcb *, int);
+bool t4_push_raw_wr(struct adapter *, struct toepcb *, struct mbuf *);
+void t4_raw_wr_tx(struct adapter *, struct toepcb *, struct mbuf *);
+void write_set_tcb_field(struct adapter *, void *, struct toepcb *, uint16_t,
+ uint64_t, uint64_t, int, int);
/* t4_ddp.c */
int t4_init_ppod_region(struct ppod_region *, struct t4_range *, u_int,
@@ -536,6 +562,7 @@ int t4_aio_queue_ddp(struct socket *, struct kaiocb *);
int t4_enable_ddp_rcv(struct socket *, struct toepcb *);
void t4_ddp_mod_load(void);
void t4_ddp_mod_unload(void);
+struct mbuf *alloc_raw_wr_mbuf(int);
void ddp_assert_empty(struct toepcb *);
void ddp_uninit_toep(struct toepcb *);
void ddp_queue_toep(struct toepcb *);
@@ -559,4 +586,10 @@ int tls_tx_key(struct toepcb *);
void tls_uninit_toep(struct toepcb *);
int tls_alloc_ktls(struct toepcb *, struct ktls_session *, int);
+/* t4_tpt.c */
+uint32_t t4_pblpool_alloc(struct adapter *, int);
+void t4_pblpool_free(struct adapter *, uint32_t, int);
+int t4_pblpool_create(struct adapter *);
+void t4_pblpool_destroy(struct adapter *);
+
#endif
diff --git a/sys/dev/cxgbe/tom/t4_tom_l2t.c b/sys/dev/cxgbe/tom/t4_tom_l2t.c
index c397196d9cca..e245c2b6fd5b 100644
--- a/sys/dev/cxgbe/tom/t4_tom_l2t.c
+++ b/sys/dev/cxgbe/tom/t4_tom_l2t.c
@@ -212,20 +212,18 @@ update_entry(struct adapter *sc, struct l2t_entry *e, uint8_t *lladdr,
e->state = L2T_STATE_STALE;
- } else {
-
- if (e->state == L2T_STATE_RESOLVING ||
- e->state == L2T_STATE_FAILED ||
- memcmp(e->dmac, lladdr, ETHER_ADDR_LEN)) {
+ } else if (e->state == L2T_STATE_RESOLVING ||
+ e->state == L2T_STATE_FAILED ||
+ memcmp(e->dmac, lladdr, ETHER_ADDR_LEN)) {
- /* unresolved -> resolved; or dmac changed */
+ /* unresolved -> resolved; or dmac changed */
- memcpy(e->dmac, lladdr, ETHER_ADDR_LEN);
- e->vlan = vtag;
- t4_write_l2e(e, 1);
- }
+ memcpy(e->dmac, lladdr, ETHER_ADDR_LEN);
+ e->vlan = vtag;
+ if (t4_write_l2e(e, 1) == 0)
+ e->state = L2T_STATE_VALID;
+ } else
e->state = L2T_STATE_VALID;
- }
}
static int
@@ -291,7 +289,10 @@ again:
mtx_unlock(&e->lock);
goto again;
}
- arpq_enqueue(e, wr);
+ if (!hw_all_ok(sc))
+ free(wr, M_CXGBE);
+ else
+ arpq_enqueue(e, wr);
mtx_unlock(&e->lock);
if (resolve_entry(sc, e) == EWOULDBLOCK)
@@ -318,18 +319,23 @@ do_l2t_write_rpl2(struct sge_iq *iq, const struct rss_header *rss,
{
struct adapter *sc = iq->adapter;
const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1);
- unsigned int tid = GET_TID(rpl);
- unsigned int idx = tid % L2T_SIZE;
+ const u_int hwidx = GET_TID(rpl) & ~(F_SYNC_WR | V_TID_QID(M_TID_QID));
+ const bool sync = GET_TID(rpl) & F_SYNC_WR;
+
+ MPASS(iq->abs_id == G_TID_QID(GET_TID(rpl)));
- if (__predict_false(rpl->status != CPL_ERR_NONE)) {
- log(LOG_ERR,
- "Unexpected L2T_WRITE_RPL (%u) for entry at hw_idx %u\n",
- rpl->status, idx);
+ if (__predict_false(hwidx < sc->vres.l2t.start) ||
+ __predict_false(hwidx >= sc->vres.l2t.start + sc->vres.l2t.size) ||
+ __predict_false(rpl->status != CPL_ERR_NONE)) {
+ CH_ERR(sc, "%s: hwidx %u, rpl %u, sync %u; L2T st %u, sz %u\n",
+ __func__, hwidx, rpl->status, sync, sc->vres.l2t.start,
+ sc->vres.l2t.size);
return (EINVAL);
}
- if (tid & F_SYNC_WR) {
- struct l2t_entry *e = &sc->l2t->l2tab[idx - sc->vres.l2t.start];
+ if (sync) {
+ const u_int idx = hwidx - sc->vres.l2t.start;
+ struct l2t_entry *e = &sc->l2t->l2tab[idx];
mtx_lock(&e->lock);
if (e->state != L2T_STATE_SWITCHING) {
@@ -355,7 +361,7 @@ t4_l2t_get(struct port_info *pi, if_t ifp, struct sockaddr *sa)
struct l2t_entry *e;
struct adapter *sc = pi->adapter;
struct l2t_data *d = sc->l2t;
- u_int hash, smt_idx = pi->port_id;
+ u_int hash;
uint16_t vid, pcp, vtag;
KASSERT(sa->sa_family == AF_INET || sa->sa_family == AF_INET6,
@@ -375,9 +381,12 @@ t4_l2t_get(struct port_info *pi, if_t ifp, struct sockaddr *sa)
hash = l2_hash(d, sa, if_getindex(ifp));
rw_wlock(&d->lock);
+ if (__predict_false(d->l2t_stopped)) {
+ e = NULL;
+ goto done;
+ }
for (e = d->l2tab[hash].first; e; e = e->next) {
- if (l2_cmp(sa, e) == 0 && e->ifp == ifp && e->vlan == vtag &&
- e->smt_idx == smt_idx) {
+ if (l2_cmp(sa, e) == 0 && e->ifp == ifp && e->vlan == vtag) {
l2t_hold(d, e);
goto done;
}
@@ -393,9 +402,8 @@ t4_l2t_get(struct port_info *pi, if_t ifp, struct sockaddr *sa)
e->state = L2T_STATE_RESOLVING;
l2_store(sa, e);
e->ifp = ifp;
- e->smt_idx = smt_idx;
e->hash = hash;
- e->lport = pi->lport;
+ e->hw_port = pi->hw_port;
e->wrq = &sc->sge.ctrlq[pi->port_id];
e->iqid = sc->sge.ofld_rxq[pi->vi[0].first_ofld_rxq].iq.abs_id;
atomic_store_rel_int(&e->refcnt, 1);
@@ -424,16 +432,20 @@ t4_l2_update(struct toedev *tod, if_t ifp, struct sockaddr *sa,
hash = l2_hash(d, sa, if_getindex(ifp));
rw_rlock(&d->lock);
+ if (__predict_false(d->l2t_stopped))
+ goto done;
for (e = d->l2tab[hash].first; e; e = e->next) {
if (l2_cmp(sa, e) == 0 && e->ifp == ifp) {
mtx_lock(&e->lock);
if (atomic_load_acq_int(&e->refcnt))
goto found;
- e->state = L2T_STATE_STALE;
+ if (e->state == L2T_STATE_VALID)
+ e->state = L2T_STATE_STALE;
mtx_unlock(&e->lock);
break;
}
}
+done:
rw_runlock(&d->lock);
/*
diff --git a/sys/dev/cyapa/cyapa.c b/sys/dev/cyapa/cyapa.c
index 307cd4d35b2e..ed755f992949 100644
--- a/sys/dev/cyapa/cyapa.c
+++ b/sys/dev/cyapa/cyapa.c
@@ -761,42 +761,60 @@ again:
/*
* Generate report
*/
- c0 = 0;
- if (delta_x < 0)
- c0 |= 0x10;
- if (delta_y < 0)
- c0 |= 0x20;
- c0 |= 0x08;
- if (but & CYAPA_FNGR_LEFT)
- c0 |= 0x01;
- if (but & CYAPA_FNGR_MIDDLE)
- c0 |= 0x04;
- if (but & CYAPA_FNGR_RIGHT)
- c0 |= 0x02;
-
- fifo_write_char(sc, &sc->rfifo, c0);
- fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_x);
- fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_y);
- switch(sc->zenabled) {
- case 1:
- /* Z axis all 8 bits */
- fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_z);
- break;
- case 2:
- /*
- * Z axis low 4 bits + 4th button and 5th button
- * (high 2 bits must be left 0). Auto-scale
- * delta_z to fit to avoid a wrong-direction
- * overflow (don't try to retain the remainder).
- */
- while (delta_z > 7 || delta_z < -8)
- delta_z >>= 1;
- c0 = (uint8_t)delta_z & 0x0F;
+ if (sc->mode.level == 1) {
+ c0 = MOUSE_SYS_SYNC;
+ if (but & CYAPA_FNGR_LEFT)
+ c0 |= MOUSE_SYS_BUTTON1UP;
+ if (but & CYAPA_FNGR_MIDDLE)
+ c0 |= MOUSE_SYS_BUTTON2UP;
+ if (but & CYAPA_FNGR_RIGHT)
+ c0 |= MOUSE_SYS_BUTTON3UP;
fifo_write_char(sc, &sc->rfifo, c0);
- break;
- default:
- /* basic PS/2 */
- break;
+ fifo_write_char(sc, &sc->rfifo, delta_x >> 1);
+ fifo_write_char(sc, &sc->rfifo, delta_y >> 1);
+ fifo_write_char(sc, &sc->rfifo, delta_x - (delta_x >> 1));
+ fifo_write_char(sc, &sc->rfifo, delta_y - (delta_y >> 1));
+ fifo_write_char(sc, &sc->rfifo, delta_z >> 1);
+ fifo_write_char(sc, &sc->rfifo, delta_z - (delta_z >> 1));
+ fifo_write_char(sc, &sc->rfifo, MOUSE_SYS_EXTBUTTONS);
+ } else {
+ c0 = 0;
+ if (delta_x < 0)
+ c0 |= 0x10;
+ if (delta_y < 0)
+ c0 |= 0x20;
+ c0 |= 0x08;
+ if (but & CYAPA_FNGR_LEFT)
+ c0 |= 0x01;
+ if (but & CYAPA_FNGR_MIDDLE)
+ c0 |= 0x04;
+ if (but & CYAPA_FNGR_RIGHT)
+ c0 |= 0x02;
+
+ fifo_write_char(sc, &sc->rfifo, c0);
+ fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_x);
+ fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_y);
+ switch(sc->zenabled) {
+ case 1:
+ /* Z axis all 8 bits */
+ fifo_write_char(sc, &sc->rfifo, (uint8_t)delta_z);
+ break;
+ case 2:
+ /*
+ * Z axis low 4 bits + 4th button and 5th button
+ * (high 2 bits must be left 0). Auto-scale
+ * delta_z to fit to avoid a wrong-direction
+ * overflow (don't try to retain the remainder).
+ */
+ while (delta_z > 7 || delta_z < -8)
+ delta_z >>= 1;
+ c0 = (uint8_t)delta_z & 0x0F;
+ fifo_write_char(sc, &sc->rfifo, c0);
+ break;
+ default:
+ /* basic PS/2 */
+ break;
+ }
}
cyapa_notify(sc);
}
@@ -1100,7 +1118,7 @@ again:
static void cyapafiltdetach(struct knote *);
static int cyapafilt(struct knote *, long);
-static struct filterops cyapa_filtops = {
+static const struct filterops cyapa_filtops = {
.f_isfd = 1,
.f_detach = cyapafiltdetach,
.f_event = cyapafilt
@@ -1205,6 +1223,11 @@ cyapaioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread
((mousemode_t *)data)->packetsize =
MOUSE_PS2_PACKETSIZE;
break;
+ case 1:
+ ((mousemode_t *)data)->protocol = MOUSE_PROTO_SYSMOUSE;
+ ((mousemode_t *)data)->packetsize =
+ MOUSE_SYS_PACKETSIZE;
+ break;
case 2:
((mousemode_t *)data)->protocol = MOUSE_PROTO_PS2;
((mousemode_t *)data)->packetsize =
@@ -1223,7 +1246,7 @@ cyapaioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread
error = EINVAL;
break;
}
- sc->mode.level = *(int *)data ? 2 : 0;
+ sc->mode.level = *(int *)data;
sc->zenabled = sc->mode.level ? 1 : 0;
break;
diff --git a/sys/dev/dc/if_dc.c b/sys/dev/dc/if_dc.c
index ddb81d8a486c..bed74c3b6181 100644
--- a/sys/dev/dc/if_dc.c
+++ b/sys/dev/dc/if_dc.c
@@ -2380,11 +2380,6 @@ dc_attach(device_t dev)
goto fail;
ifp = sc->dc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -2544,8 +2539,6 @@ dc_detach(device_t dev)
callout_drain(&sc->dc_wdog_ch);
ether_ifdetach(ifp);
}
- if (sc->dc_miibus)
- device_delete_child(dev, sc->dc_miibus);
bus_generic_detach(dev);
if (sc->dc_intrhand)
diff --git a/sys/dev/dcons/dcons_os.c b/sys/dev/dcons/dcons_os.c
index 0d96ab51659b..4e34659fb3e8 100644
--- a/sys/dev/dcons/dcons_os.c
+++ b/sys/dev/dcons/dcons_os.c
@@ -442,7 +442,7 @@ dcons_modevent(module_t mode, int type, void *data)
dcons_detach(DCONS_GDB);
dg.buf->magic = 0;
- contigfree(dg.buf, DCONS_BUF_SIZE, M_DEVBUF);
+ free(dg.buf, M_DEVBUF);
}
/* Wait for tty deferred free callbacks to complete. */
diff --git a/sys/dev/dpaa/bman_portals.c b/sys/dev/dpaa/bman_portals.c
index 09bd9ea892ac..10c788410e1c 100644
--- a/sys/dev/dpaa/bman_portals.c
+++ b/sys/dev/dpaa/bman_portals.c
@@ -67,7 +67,8 @@ bman_portals_attach(device_t dev)
/* Set portal properties for XX_VirtToPhys() */
XX_PortalSetInfo(dev);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
int
diff --git a/sys/dev/dpaa/fman.c b/sys/dev/dpaa/fman.c
index 9dc4ac151789..6f63e50f3a14 100644
--- a/sys/dev/dpaa/fman.c
+++ b/sys/dev/dpaa/fman.c
@@ -473,7 +473,8 @@ fman_attach(device_t dev)
goto err;
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
err:
fman_detach(dev);
diff --git a/sys/dev/dpaa/if_dtsec.c b/sys/dev/dpaa/if_dtsec.c
index b5899a340f98..a5f9955061a4 100644
--- a/sys/dev/dpaa/if_dtsec.c
+++ b/sys/dev/dpaa/if_dtsec.c
@@ -708,12 +708,6 @@ dtsec_attach(device_t dev)
/* Create network interface for upper layers */
ifp = sc->sc_ifnet = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "if_alloc() failed.\n");
- dtsec_detach(dev);
- return (ENOMEM);
- }
-
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST);
diff --git a/sys/dev/dpaa/qman_portals.c b/sys/dev/dpaa/qman_portals.c
index 67a9fd1827e4..0f00a9f1a173 100644
--- a/sys/dev/dpaa/qman_portals.c
+++ b/sys/dev/dpaa/qman_portals.c
@@ -72,7 +72,8 @@ qman_portals_attach(device_t dev)
/* Set portal properties for XX_VirtToPhys() */
XX_PortalSetInfo(dev);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
int
diff --git a/sys/dev/dpaa2/dpaa2_channel.c b/sys/dev/dpaa2/dpaa2_channel.c
index 87b76923a16d..654c6f2baf70 100644
--- a/sys/dev/dpaa2/dpaa2_channel.c
+++ b/sys/dev/dpaa2/dpaa2_channel.c
@@ -146,12 +146,6 @@ dpaa2_chan_setup(device_t dev, device_t iodev, device_t condev, device_t bpdev,
}
ch = malloc(sizeof(struct dpaa2_channel), M_DPAA2_CH, M_WAITOK | M_ZERO);
- if (ch == NULL) {
- device_printf(dev, "%s: malloc() failed\n", __func__);
- error = ENOMEM;
- goto fail_malloc;
- }
-
ch->ni_dev = dev;
ch->io_dev = iodev;
ch->con_dev = condev;
@@ -281,7 +275,6 @@ fail_dma_setup:
/* taskqueue_drain(ch->cleanup_tq, &ch->cleanup_task); */
/* } */
/* taskqueue_free(ch->cleanup_tq); */
-fail_malloc:
(void)DPAA2_CMD_CON_DISABLE(dev, child, DPAA2_CMD_TK(&cmd, contk));
fail_con_enable:
(void)DPAA2_CMD_CON_CLOSE(dev, child, DPAA2_CMD_TK(&cmd, contk));
diff --git a/sys/dev/dpaa2/dpaa2_mc.c b/sys/dev/dpaa2/dpaa2_mc.c
index 66867a18068c..8abfc3bfe1cc 100644
--- a/sys/dev/dpaa2/dpaa2_mc.c
+++ b/sys/dev/dpaa2/dpaa2_mc.c
@@ -268,8 +268,8 @@ dpaa2_mc_attach(device_t dev)
dpaa2_mc_detach(dev);
return (ENXIO);
}
- bus_generic_probe(dev);
- bus_generic_attach(dev);
+ bus_identify_children(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -281,22 +281,18 @@ dpaa2_mc_detach(device_t dev)
struct dpaa2_devinfo *dinfo = NULL;
int error;
- bus_generic_detach(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
sc = device_get_softc(dev);
- if (sc->rcdev)
- device_delete_child(dev, sc->rcdev);
bus_release_resources(dev, dpaa2_mc_spec, sc->res);
dinfo = device_get_ivars(dev);
if (dinfo)
free(dinfo, M_DPAA2_MC);
- error = bus_generic_detach(dev);
- if (error != 0)
- return (error);
-
- return (device_delete_children(dev));
+ return (0);
}
/*
@@ -462,8 +458,6 @@ dpaa2_mc_manage_dev(device_t mcdev, device_t dpaa2_dev, uint32_t flags)
return (EINVAL);
di = malloc(sizeof(*di), M_DPAA2_MC, M_WAITOK | M_ZERO);
- if (!di)
- return (ENOMEM);
di->dpaa2_dev = dpaa2_dev;
di->flags = flags;
di->owners = 0;
diff --git a/sys/dev/dpaa2/dpaa2_mc_acpi.c b/sys/dev/dpaa2/dpaa2_mc_acpi.c
index 2007c18bec67..55c1c0d5b12e 100644
--- a/sys/dev/dpaa2/dpaa2_mc_acpi.c
+++ b/sys/dev/dpaa2/dpaa2_mc_acpi.c
@@ -46,6 +46,7 @@
#include <machine/resource.h>
#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
#include <dev/acpica/acpivar.h>
#include "acpi_bus_if.h"
@@ -56,6 +57,9 @@
#include "dpaa2_mc.h"
#include "dpaa2_mc_if.h"
+#define _COMPONENT ACPI_BUS
+ACPI_MODULE_NAME("DPAA2_MC")
+
struct dpaa2_mac_dev_softc {
int uid;
uint64_t reg;
@@ -225,7 +229,7 @@ dpaa2_mc_acpi_probe_child(ACPI_HANDLE h, device_t *dev, int level, void *arg)
if ((ad = malloc(sizeof(*ad), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL)
return (AE_OK);
- child = device_add_child(ctx->dev, "dpaa2_mac_dev", -1);
+ child = device_add_child(ctx->dev, "dpaa2_mac_dev", DEVICE_UNIT_ANY);
if (child == NULL) {
free(ad, M_DEVBUF);
return (AE_OK);
diff --git a/sys/dev/dpaa2/dpaa2_mc_fdt.c b/sys/dev/dpaa2/dpaa2_mc_fdt.c
index a6babfc89ca9..a571f4cf7219 100644
--- a/sys/dev/dpaa2/dpaa2_mc_fdt.c
+++ b/sys/dev/dpaa2/dpaa2_mc_fdt.c
@@ -253,11 +253,12 @@ dpaa2_mc_fdt_attach(device_t dev)
sc->acpi_based = false;
sc->ofw_node = ofw_bus_get_node(dev);
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
+
/*
* Attach the children represented in the device tree.
*/
diff --git a/sys/dev/dpaa2/dpaa2_ni.c b/sys/dev/dpaa2/dpaa2_ni.c
index a9e6aa120549..698b440376e3 100644
--- a/sys/dev/dpaa2/dpaa2_ni.c
+++ b/sys/dev/dpaa2/dpaa2_ni.c
@@ -138,8 +138,9 @@ MALLOC_DEFINE(M_DPAA2_TXB, "dpaa2_txb", "DPAA2 DMA-mapped buffer (Tx)");
#define DPNI_IRQ_LINK_CHANGED 1 /* Link state changed */
#define DPNI_IRQ_EP_CHANGED 2 /* DPAA2 endpoint dis/connected */
-/* Default maximum frame length. */
-#define DPAA2_ETH_MFL (ETHER_MAX_LEN - ETHER_CRC_LEN)
+/* Default maximum RX frame length w/o CRC. */
+#define DPAA2_ETH_MFL (ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN - \
+ ETHER_CRC_LEN)
/* Minimally supported version of the DPNI API. */
#define DPNI_VER_MAJOR 7
@@ -548,11 +549,6 @@ dpaa2_ni_attach(device_t dev)
/* Allocate network interface */
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "%s: failed to allocate network interface\n",
- __func__);
- goto err_exit;
- }
sc->ifp = ifp;
if_initname(ifp, DPAA2_NI_IFNAME, device_get_unit(sc->dev));
@@ -593,11 +589,6 @@ dpaa2_ni_attach(device_t dev)
/* Create a taskqueue thread to release new buffers to the pool. */
sc->bp_taskq = taskqueue_create(tq_name, M_WAITOK,
taskqueue_thread_enqueue, &sc->bp_taskq);
- if (sc->bp_taskq == NULL) {
- device_printf(dev, "%s: failed to allocate task queue: %s\n",
- __func__, tq_name);
- goto close_ni;
- }
taskqueue_start_threads(&sc->bp_taskq, 1, PI_NET, "%s", tq_name);
/* sc->cleanup_taskq = taskqueue_create("dpaa2_ch cleanup", M_WAITOK, */
@@ -1344,21 +1335,11 @@ dpaa2_ni_setup_tx_flow(device_t dev, struct dpaa2_ni_fq *fq)
for (uint64_t j = 0; j < DPAA2_NI_BUFS_PER_TX; j++) {
buf = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
M_WAITOK);
- if (buf == NULL) {
- device_printf(dev, "%s: malloc() failed (buf)\n",
- __func__);
- return (ENOMEM);
- }
/* Keep DMA tag and Tx ring linked to the buffer */
DPAA2_BUF_INIT_TAGOPT(buf, ch->tx_dmat, tx);
buf->sgt = malloc(sizeof(struct dpaa2_buf), M_DPAA2_TXB,
M_WAITOK);
- if (buf->sgt == NULL) {
- device_printf(dev, "%s: malloc() failed (sgt)\n",
- __func__);
- return (ENOMEM);
- }
/* Link SGT to DMA tag and back to its Tx buffer */
DPAA2_BUF_INIT_TAGOPT(buf->sgt, ch->sgt_dmat, buf);
@@ -2581,8 +2562,10 @@ dpaa2_ni_ioctl(if_t ifp, u_long c, caddr_t data)
DPNI_UNLOCK(sc);
/* Update maximum frame length. */
- error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd,
- mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
+ mtu += ETHER_HDR_LEN;
+ if (if_getcapenable(ifp) & IFCAP_VLAN_MTU)
+ mtu += ETHER_VLAN_ENCAP_LEN;
+ error = DPAA2_CMD_NI_SET_MFL(dev, child, &cmd, mtu);
if (error) {
device_printf(dev, "%s: failed to update maximum frame "
"length: error=%d\n", __func__, error);
@@ -2953,6 +2936,8 @@ dpaa2_ni_tx(struct dpaa2_ni_softc *sc, struct dpaa2_channel *ch,
KASSERT(btx->fq->chan == ch, ("%s: unexpected channel", __func__));
#endif /* INVARIANTS */
+ BPF_MTAP(sc->ifp, m);
+
error = bus_dmamap_load_mbuf_sg(buf->dmat, buf->dmap, m, segs, &nsegs,
BUS_DMA_NOWAIT);
if (__predict_false(error != 0)) {
diff --git a/sys/dev/dpaa2/dpaa2_rc.c b/sys/dev/dpaa2/dpaa2_rc.c
index 49ed8944b64b..3cb2fdfeaa2e 100644
--- a/sys/dev/dpaa2/dpaa2_rc.c
+++ b/sys/dev/dpaa2/dpaa2_rc.c
@@ -114,7 +114,7 @@ dpaa2_rc_detach(device_t dev)
if (dinfo)
free(dinfo, M_DPAA2_RC);
- return (device_delete_children(dev));
+ return (0);
}
static int
@@ -2826,12 +2826,8 @@ dpaa2_rc_discover(struct dpaa2_rc_softc *sc)
dpaa2_rc_add_managed_child(sc, &cmd, &obj);
}
/* Probe and attach MC portals. */
- bus_generic_probe(rcdev);
- rc = bus_generic_attach(rcdev);
- if (rc) {
- DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd);
- return (rc);
- }
+ bus_identify_children(rcdev);
+ bus_attach_children(rcdev);
/* Add managed devices (except DPMCPs) to the resource container. */
for (uint32_t i = 0; i < obj_count; i++) {
@@ -2854,12 +2850,8 @@ dpaa2_rc_discover(struct dpaa2_rc_softc *sc)
dpaa2_rc_add_managed_child(sc, &cmd, &obj);
}
/* Probe and attach managed devices properly. */
- bus_generic_probe(rcdev);
- rc = bus_generic_attach(rcdev);
- if (rc) {
- DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd);
- return (rc);
- }
+ bus_identify_children(rcdev);
+ bus_attach_children(rcdev);
/* Add other devices to the resource container. */
for (uint32_t i = 0; i < obj_count; i++) {
@@ -2879,8 +2871,9 @@ dpaa2_rc_discover(struct dpaa2_rc_softc *sc)
DPAA2_CMD_RC_CLOSE(rcdev, child, &cmd);
/* Probe and attach the rest of devices. */
- bus_generic_probe(rcdev);
- return (bus_generic_attach(rcdev));
+ bus_identify_children(rcdev);
+ bus_attach_children(rcdev);
+ return (0);
}
/**
@@ -2912,7 +2905,7 @@ dpaa2_rc_add_child(struct dpaa2_rc_softc *sc, struct dpaa2_cmd *cmd,
}
/* Add a device for the DPAA2 object. */
- dev = device_add_child(rcdev, devclass, -1);
+ dev = device_add_child(rcdev, devclass, DEVICE_UNIT_ANY);
if (dev == NULL) {
device_printf(rcdev, "%s: failed to add a device for DPAA2 "
"object: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type),
@@ -2986,8 +2979,8 @@ dpaa2_rc_add_child(struct dpaa2_rc_softc *sc, struct dpaa2_cmd *cmd,
* function is supposed to discover such managed objects in the resource
* container and add them as children to perform a proper initialization.
*
- * NOTE: It must be called together with bus_generic_probe() and
- * bus_generic_attach() before dpaa2_rc_add_child().
+ * NOTE: It must be called together with bus_identify_children() and
+ * bus_attach_children() before dpaa2_rc_add_child().
*/
static int
dpaa2_rc_add_managed_child(struct dpaa2_rc_softc *sc, struct dpaa2_cmd *cmd,
@@ -3038,7 +3031,7 @@ dpaa2_rc_add_managed_child(struct dpaa2_rc_softc *sc, struct dpaa2_cmd *cmd,
}
/* Add a device for the DPAA2 object. */
- dev = device_add_child(rcdev, devclass, -1);
+ dev = device_add_child(rcdev, devclass, DEVICE_UNIT_ANY);
if (dev == NULL) {
device_printf(rcdev, "%s: failed to add a device for DPAA2 "
"object: type=%s, id=%u\n", __func__, dpaa2_ttos(obj->type),
diff --git a/sys/dev/dpaa2/memac_mdio_acpi.c b/sys/dev/dpaa2/memac_mdio_acpi.c
index 8040b636d06b..dc08715343e0 100644
--- a/sys/dev/dpaa2/memac_mdio_acpi.c
+++ b/sys/dev/dpaa2/memac_mdio_acpi.c
@@ -227,7 +227,7 @@ memac_mdio_acpi_probe_child(ACPI_HANDLE h, device_t *dev, int level, void *arg)
if ((ad = malloc(sizeof(*ad), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL)
return (AE_OK);
- child = device_add_child(ctx->dev, "memacphy_acpi", -1);
+ child = device_add_child(ctx->dev, "memacphy_acpi", DEVICE_UNIT_ANY);
if (child == NULL) {
free(ad, M_DEVBUF);
return (AE_OK);
@@ -262,8 +262,8 @@ memac_mdio_acpi_attach(device_t dev)
ACPI_SCAN_CHILDREN(device_get_parent(dev), dev, 1,
memac_mdio_acpi_probe_child, &ctx);
if (ctx.countok > 0) {
- bus_generic_probe(dev);
- bus_generic_attach(dev);
+ bus_identify_children(dev);
+ bus_attach_children(dev);
}
return (0);
diff --git a/sys/dev/dpaa2/memac_mdio_fdt.c b/sys/dev/dpaa2/memac_mdio_fdt.c
index 18643522ed8a..247a70c6545c 100644
--- a/sys/dev/dpaa2/memac_mdio_fdt.c
+++ b/sys/dev/dpaa2/memac_mdio_fdt.c
@@ -238,7 +238,7 @@ memac_mdio_fdt_attach(device_t dev)
return (error);
/* Attach the *phy* children represented in the device tree. */
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
node = ofw_bus_get_node(dev);
simplebus_init(dev, node);
diff --git a/sys/dev/drm2/drm_buffer.c b/sys/dev/drm2/drm_buffer.c
index 8a674397262e..8069f2c8c4c6 100644
--- a/sys/dev/drm2/drm_buffer.c
+++ b/sys/dev/drm2/drm_buffer.c
@@ -50,45 +50,15 @@ int drm_buffer_alloc(struct drm_buffer **buf, int size)
* variable sized */
*buf = malloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
DRM_MEM_DRIVER, M_ZERO | M_WAITOK);
-
- if (*buf == NULL) {
- DRM_ERROR("Failed to allocate drm buffer object to hold"
- " %d bytes in %d pages.\n",
- size, nr_pages);
- return -ENOMEM;
- }
-
(*buf)->size = size;
for (idx = 0; idx < nr_pages; ++idx) {
-
(*buf)->data[idx] =
malloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
DRM_MEM_DRIVER, M_WAITOK);
-
-
- if ((*buf)->data[idx] == NULL) {
- DRM_ERROR("Failed to allocate %dth page for drm"
- " buffer with %d bytes and %d pages.\n",
- idx + 1, size, nr_pages);
- goto error_out;
- }
-
}
return 0;
-
-error_out:
-
- /* Only last element can be null pointer so check for it first. */
- if ((*buf)->data[idx])
- free((*buf)->data[idx], DRM_MEM_DRIVER);
-
- for (--idx; idx >= 0; --idx)
- free((*buf)->data[idx], DRM_MEM_DRIVER);
-
- free(*buf, DRM_MEM_DRIVER);
- return -ENOMEM;
}
EXPORT_SYMBOL(drm_buffer_alloc);
diff --git a/sys/dev/drm2/drm_crtc.c b/sys/dev/drm2/drm_crtc.c
index b9415082e7a1..a163c7455773 100644
--- a/sys/dev/drm2/drm_crtc.c
+++ b/sys/dev/drm2/drm_crtc.c
@@ -662,13 +662,6 @@ int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
plane->funcs = funcs;
plane->format_types = malloc(sizeof(uint32_t) * format_count,
DRM_MEM_KMS, M_WAITOK);
- if (!plane->format_types) {
- DRM_DEBUG_KMS("out of memory when allocating plane\n");
- drm_mode_object_put(dev, &plane->base);
- ret = -ENOMEM;
- goto out;
- }
-
memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
plane->format_count = format_count;
plane->possible_crtcs = possible_crtcs;
@@ -725,8 +718,6 @@ struct drm_display_mode *drm_mode_create(struct drm_device *dev)
nmode = malloc(sizeof(struct drm_display_mode), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
- if (!nmode)
- return NULL;
if (drm_mode_object_get(dev, &nmode->base, DRM_MODE_OBJECT_MODE)) {
free(nmode, DRM_MEM_KMS);
@@ -1009,9 +1000,6 @@ int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
group->id_list = malloc(total_objects * sizeof(uint32_t),
DRM_MEM_KMS, M_WAITOK | M_ZERO);
- if (!group->id_list)
- return -ENOMEM;
-
group->num_crtcs = 0;
group->num_connectors = 0;
group->num_encoders = 0;
@@ -1997,10 +1985,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
connector_set = malloc(crtc_req->count_connectors *
sizeof(struct drm_connector *),
DRM_MEM_KMS, M_WAITOK);
- if (!connector_set) {
- ret = -ENOMEM;
- goto out;
- }
for (i = 0; i < crtc_req->count_connectors; i++) {
set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
@@ -2522,11 +2506,6 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
}
clips = malloc(num_clips * sizeof(*clips), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
- if (!clips) {
- ret = -ENOMEM;
- goto out_err1;
- }
-
ret = copy_from_user(clips, clips_ptr,
num_clips * sizeof(*clips));
if (ret) {
@@ -2773,15 +2752,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
property = malloc(sizeof(struct drm_property), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
- if (!property)
- return NULL;
- if (num_values) {
+ if (num_values)
property->values = malloc(sizeof(uint64_t)*num_values, DRM_MEM_KMS,
M_WAITOK | M_ZERO);
- if (!property->values)
- goto fail;
- }
ret = drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY);
if (ret)
@@ -2907,9 +2881,6 @@ int drm_property_add_enum(struct drm_property *property, int index,
prop_enum = malloc(sizeof(struct drm_property_enum), DRM_MEM_KMS,
M_WAITOK | M_ZERO);
- if (!prop_enum)
- return -ENOMEM;
-
strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
prop_enum->value = value;
@@ -3103,9 +3074,6 @@ static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev
blob = malloc(sizeof(struct drm_property_blob)+length, DRM_MEM_KMS,
M_WAITOK | M_ZERO);
- if (!blob)
- return NULL;
-
ret = drm_mode_object_get(dev, &blob->base, DRM_MODE_OBJECT_BLOB);
if (ret) {
free(blob, DRM_MEM_KMS);
@@ -3433,10 +3401,6 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
crtc->gamma_store = malloc(gamma_size * sizeof(uint16_t) * 3,
DRM_MEM_KMS, M_WAITOK | M_ZERO);
- if (!crtc->gamma_store) {
- crtc->gamma_size = 0;
- return -ENOMEM;
- }
return 0;
}
@@ -3631,13 +3595,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
mtx_unlock(&dev->event_lock);
e = malloc(sizeof *e, DRM_MEM_KMS, M_WAITOK | M_ZERO);
- if (e == NULL) {
- mtx_lock(&dev->event_lock);
- file_priv->event_space += sizeof e->event;
- mtx_unlock(&dev->event_lock);
- goto out;
- }
-
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof e->event;
e->event.user_data = page_flip->user_data;
diff --git a/sys/dev/drm2/drm_dp_iic_helper.c b/sys/dev/drm2/drm_dp_iic_helper.c
index fd703494cd5e..444ceb1dcfb4 100644
--- a/sys/dev/drm2/drm_dp_iic_helper.c
+++ b/sys/dev/drm2/drm_dp_iic_helper.c
@@ -207,11 +207,11 @@ iic_dp_aux_attach(device_t idev)
struct iic_dp_aux_data *aux_data;
aux_data = device_get_softc(idev);
- aux_data->port = device_add_child(idev, "iicbus", -1);
+ aux_data->port = device_add_child(idev, "iicbus", DEVICE_UNIT_ANY);
if (aux_data->port == NULL)
return (ENXIO);
device_quiet(aux_data->port);
- bus_generic_attach(idev);
+ bus_attach_children(idev);
return (0);
}
diff --git a/sys/dev/drm2/drm_fb_helper.c b/sys/dev/drm2/drm_fb_helper.c
index f67cc9f60d02..1f4abd255690 100644
--- a/sys/dev/drm2/drm_fb_helper.c
+++ b/sys/dev/drm2/drm_fb_helper.c
@@ -51,7 +51,7 @@ struct vt_kms_softc {
struct task fb_mode_task;
};
-/* Call restore out of vt(9) locks. */
+/* Call restore out of vt(4) locks. */
static void
vt_restore_fbdev_mode(void *arg, int pending)
{
diff --git a/sys/dev/drm2/drm_os_freebsd.h b/sys/dev/drm2/drm_os_freebsd.h
index 0ce0dede6d73..ec1042f8f0d4 100644
--- a/sys/dev/drm2/drm_os_freebsd.h
+++ b/sys/dev/drm2/drm_os_freebsd.h
@@ -154,18 +154,11 @@ typedef void irqreturn_t;
#if !defined(__arm__)
#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) || defined(__aarch64__)
-#define DRM_MSG "This code is deprecated. Install the graphics/drm-kmod pkg\n"
+#define DRM_MSG "WARNING! drm2 module is deprecated. Install the graphics/drm-kmod pkg\n"
#else
-#define DRM_MSG "This code is deprecated."
+#define DRM_MSG "WARNING! drm2 module is deprecated.\n"
#endif
-
-#define DRM_OBSOLETE(dev) \
- do { \
- device_printf(dev, "=======================================================\n"); \
- device_printf(dev, DRM_MSG); \
- device_printf(dev, "=======================================================\n"); \
- gone_in_dev(dev, 13, "drm2 drivers"); \
- } while (0)
+#define DRM_OBSOLETE(dev) gone_in_dev(dev, 13, DRM_MSG)
#endif /* __arm__ */
/* DRM_READMEMORYBARRIER() prevents reordering of reads.
@@ -234,13 +227,6 @@ typedef void irqreturn_t;
#define div_u64(n, d) ((n) / (d))
#define hweight32(i) bitcount32(i)
-static inline unsigned long
-roundup_pow_of_two(unsigned long x)
-{
-
- return (1UL << flsl(x - 1));
-}
-
/**
* ror32 - rotate a 32-bit value right
* @word: value to rotate
@@ -297,13 +283,6 @@ get_unaligned_le32(const void *p)
}
#endif
-static inline unsigned long
-ilog2(unsigned long x)
-{
-
- return (flsl(x) - 1);
-}
-
int64_t timeval_to_ns(const struct timeval *tv);
struct timeval ns_to_timeval(const int64_t nsec);
@@ -461,7 +440,6 @@ extern unsigned long drm_linux_timer_hz_mask;
#define jiffies ticks
#define jiffies_to_msecs(x) (((int64_t)(x)) * 1000 / hz)
#define msecs_to_jiffies(x) (((int64_t)(x)) * hz / 1000)
-#define timespec_to_jiffies(x) (((x)->tv_sec * 1000000 + (x)->tv_nsec) * hz / 1000000)
#define time_after(a,b) ((long)(b) - (long)(a) < 0)
#define time_after_eq(a,b) ((long)(b) - (long)(a) <= 0)
#define round_jiffies(j) ((unsigned long)(((j) + drm_linux_timer_hz_mask) & ~drm_linux_timer_hz_mask))
diff --git a/sys/dev/drm2/ttm/ttm_bo_vm.c b/sys/dev/drm2/ttm/ttm_bo_vm.c
index 4f6c66382453..a30205358540 100644
--- a/sys/dev/drm2/ttm/ttm_bo_vm.c
+++ b/sys/dev/drm2/ttm/ttm_bo_vm.c
@@ -35,7 +35,8 @@
* <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*/
-#include <sys/cdefs.h>
+#include <sys/param.h>
+#include <sys/pctrie.h>
#include "opt_vm.h"
#include <dev/drm2/drmP.h>
@@ -46,6 +47,7 @@
#include <vm/vm.h>
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
+#include <vm/vm_radix.h>
#define TTM_BO_VM_NUM_PREFAULT 16
@@ -100,7 +102,7 @@ static int
ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
int prot, vm_page_t *mres)
{
-
+ struct pctrie_iter pages;
struct ttm_buffer_object *bo = vm_obj->handle;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_tt *ttm = NULL;
@@ -114,6 +116,7 @@ ttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
if (*mres != NULL) {
(void)vm_page_remove(*mres);
}
+ vm_page_iter_init(&pages, vm_obj);
retry:
VM_OBJECT_WUNLOCK(vm_obj);
m = NULL;
@@ -234,10 +237,12 @@ reserve:
ttm_bo_unreserve(bo);
goto retry;
}
- m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset));
+ pctrie_iter_reset(&pages);
+ m1 = vm_radix_iter_lookup(&pages, OFF_TO_IDX(offset));
/* XXX This looks like it should just be vm_page_replace? */
if (m1 == NULL) {
- if (vm_page_insert(m, vm_obj, OFF_TO_IDX(offset))) {
+ if (vm_page_iter_insert(
+ m, vm_obj, OFF_TO_IDX(offset), &pages) != 0) {
vm_page_xunbusy(m);
VM_OBJECT_WUNLOCK(vm_obj);
vm_wait(vm_obj);
@@ -361,26 +366,12 @@ void
ttm_bo_release_mmap(struct ttm_buffer_object *bo)
{
vm_object_t vm_obj;
- vm_page_t m;
- int i;
vm_obj = cdev_pager_lookup(bo);
- if (vm_obj == NULL)
- return;
-
- VM_OBJECT_WLOCK(vm_obj);
-retry:
- for (i = 0; i < bo->num_pages; i++) {
- m = vm_page_lookup(vm_obj, i);
- if (m == NULL)
- continue;
- if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0)
- goto retry;
- cdev_pager_free_page(vm_obj, m);
+ if (vm_obj != NULL) {
+ cdev_mgtdev_pager_free_pages(vm_obj);
+ vm_object_deallocate(vm_obj);
}
- VM_OBJECT_WUNLOCK(vm_obj);
-
- vm_object_deallocate(vm_obj);
}
#if 0
diff --git a/sys/dev/drm2/ttm/ttm_object.c b/sys/dev/drm2/ttm/ttm_object.c
index 8c373618d7ac..31af15cf4c56 100644
--- a/sys/dev/drm2/ttm/ttm_object.c
+++ b/sys/dev/drm2/ttm/ttm_object.c
@@ -282,11 +282,6 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
if (unlikely(ret != 0))
return ret;
ref = malloc(sizeof(*ref), M_TTM_OBJ_REF, M_WAITOK);
- if (unlikely(ref == NULL)) {
- ttm_mem_global_free(mem_glob, sizeof(*ref));
- return -ENOMEM;
- }
-
ref->hash.key = base->hash.key;
ref->obj = base;
ref->tfile = tfile;
diff --git a/sys/dev/drm2/ttm/ttm_page_alloc.c b/sys/dev/drm2/ttm/ttm_page_alloc.c
index 7518ecb4dfd1..724ba0bfb4d8 100644
--- a/sys/dev/drm2/ttm/ttm_page_alloc.c
+++ b/sys/dev/drm2/ttm/ttm_page_alloc.c
@@ -441,7 +441,7 @@ static int ttm_pool_get_num_unused_pages(void)
/**
* Callback for mm to request pool to reduce number of page held.
*/
-static int ttm_pool_mm_shrink(void *arg)
+static int ttm_pool_mm_shrink(void *arg, int flags __unused)
{
static unsigned int start_pool = 0;
unsigned i;
diff --git a/sys/dev/dwc/if_dwc.c b/sys/dev/dwc/if_dwc.c
index 17657045b73c..1b4b4be68747 100644
--- a/sys/dev/dwc/if_dwc.c
+++ b/sys/dev/dwc/if_dwc.c
@@ -535,7 +535,7 @@ dwc_attach(device_t dev)
sc->aal = true;
error = clk_set_assigned(dev, ofw_bus_get_node(dev));
- if (error != 0) {
+ if (error != 0 && error != ENOENT) {
device_printf(dev, "clk_set_assigned failed\n");
return (error);
}
@@ -656,10 +656,6 @@ dwc_detach(device_t dev)
ether_ifdetach(sc->ifp);
}
- if (sc->miibus != NULL) {
- device_delete_child(dev, sc->miibus);
- sc->miibus = NULL;
- }
bus_generic_detach(dev);
/* Free DMA descriptors */
diff --git a/sys/dev/dwc/if_dwc_cvitek.c b/sys/dev/dwc/if_dwc_cvitek.c
new file mode 100644
index 000000000000..fd9a844d3fd4
--- /dev/null
+++ b/sys/dev/dwc/if_dwc_cvitek.c
@@ -0,0 +1,89 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Bojan Novković <bnovkov@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/socket.h>
+#include <sys/module.h>
+
+#include <net/if.h>
+
+#include <machine/bus.h>
+#include <machine/param.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/clk/clk.h>
+#include <dev/hwreset/hwreset.h>
+
+#include <dev/dwc/if_dwcvar.h>
+#include <dev/dwc/dwc1000_reg.h>
+
+#include "if_dwc_if.h"
+
+static int
+if_dwc_cvitek_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_is_compatible(dev, "cvitek,ethernet"))
+ return (ENXIO);
+ device_set_desc(dev, "CVITEK Ethernet Controller");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+if_dwc_cvitek_mii_clk(device_t dev)
+{
+ /*
+ * XXX: This is a hack to get the driver working on
+ * the Milk-V platform. For reference, the u-boot designware
+ * driver uses the same '150_250M' clock value, but if_dwc
+ * will not work on Milk-V hardware unless the lowest
+ * bit of the PHY register address is always set.
+ */
+ return (0x10 | GMAC_MII_CLK_150_250M_DIV102);
+}
+
+static device_method_t dwc_cvitek_methods[] = {
+ DEVMETHOD(device_probe, if_dwc_cvitek_probe),
+
+ DEVMETHOD(if_dwc_mii_clk, if_dwc_cvitek_mii_clk),
+ DEVMETHOD_END
+};
+
+extern driver_t dwc_driver;
+
+DEFINE_CLASS_1(dwc, dwc_cvitek_driver, dwc_cvitek_methods,
+ sizeof(struct dwc_softc), dwc_driver);
+DRIVER_MODULE(dwc_cvitek, simplebus, dwc_cvitek_driver, 0, 0);
+MODULE_DEPEND(dwc_cvitek, dwc, 1, 1, 1);
diff --git a/sys/dev/dwwdt/dwwdt.c b/sys/dev/dwwdt/dwwdt.c
index 89f94fff9bad..13610b3637cb 100644
--- a/sys/dev/dwwdt/dwwdt.c
+++ b/sys/dev/dwwdt/dwwdt.c
@@ -290,7 +290,8 @@ dwwdt_attach(device_t dev)
sc->sc_evtag = EVENTHANDLER_REGISTER(watchdog_list, dwwdt_event, sc, 0);
sc->sc_status = DWWDT_STOPPED;
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
err_no_freq:
clk_release(sc->sc_clk);
@@ -309,6 +310,7 @@ static int
dwwdt_detach(device_t dev)
{
struct dwwdt_softc *sc = device_get_softc(dev);
+ int error;
if (dwwdt_started(sc)) {
/*
@@ -318,6 +320,10 @@ dwwdt_detach(device_t dev)
return (EBUSY);
}
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
+
EVENTHANDLER_DEREGISTER(watchdog_list, sc->sc_evtag);
sc->sc_evtag = NULL;
@@ -337,7 +343,7 @@ dwwdt_detach(device_t dev)
sc->sc_mem_res);
}
- return (bus_generic_detach(dev));
+ return (0);
}
static int
diff --git a/sys/dev/e1000/e1000_82575.c b/sys/dev/e1000/e1000_82575.c
index fded5dc2b04e..47b8006314f8 100644
--- a/sys/dev/e1000/e1000_82575.c
+++ b/sys/dev/e1000/e1000_82575.c
@@ -1686,14 +1686,10 @@ static s32 e1000_get_media_type_82575(struct e1000_hw *hw)
break;
}
- /* do not change link mode for 100BaseFX */
- if (dev_spec->eth_flags.e100_base_fx)
- break;
-
/* change current link mode setting */
ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
- if (hw->phy.media_type == e1000_media_type_copper)
+ if (dev_spec->sgmii_active)
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
else
ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
@@ -1701,6 +1697,9 @@ static s32 e1000_get_media_type_82575(struct e1000_hw *hw)
E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
break;
+ default:
+ DEBUGOUT("e1000_get_media_type_82575 unknown link type\n");
+ break;
}
return ret_val;
@@ -1750,24 +1749,27 @@ static s32 e1000_set_sfp_media_type_82575(struct e1000_hw *hw)
/* Check if there is some SFP module plugged and powered */
if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
- (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
+ (tranceiver_type == E1000_SFF_IDENTIFIER_SFF))
dev_spec->module_plugged = true;
- if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
- hw->phy.media_type = e1000_media_type_internal_serdes;
- } else if (eth_flags->e100_base_fx) {
- dev_spec->sgmii_active = true;
- hw->phy.media_type = e1000_media_type_internal_serdes;
- } else if (eth_flags->e1000_base_t) {
- dev_spec->sgmii_active = true;
- hw->phy.media_type = e1000_media_type_copper;
- } else {
- hw->phy.media_type = e1000_media_type_unknown;
- DEBUGOUT("PHY module has not been recognized\n");
- goto out;
- }
+ else
+ DEBUGOUT("PHY module is not SFP/SFF %x\n", tranceiver_type);
+
+ if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ DEBUGOUT("PHY module is 1000_base_lxsx\n");
+ } else if (eth_flags->e100_base_fx || eth_flags->e100_base_lx) {
+ dev_spec->sgmii_active = true;
+ hw->phy.media_type = e1000_media_type_internal_serdes;
+ DEBUGOUT("PHY module is 100_base_fxlx\n");
+ } else if (eth_flags->e1000_base_t) {
+ dev_spec->sgmii_active = true;
+ hw->phy.media_type = e1000_media_type_copper;
+ DEBUGOUT("PHY module is 1000_base_t\n");
} else {
hw->phy.media_type = e1000_media_type_unknown;
+ DEBUGOUT("PHY module has not been recognized\n");
}
+
ret_val = E1000_SUCCESS;
out:
/* Restore I2C interface setting */
@@ -2425,7 +2427,7 @@ static s32 e1000_validate_nvm_checksum_82580(struct e1000_hw *hw)
}
if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
- /* if chekcsums compatibility bit is set validate checksums
+ /* if checksums compatibility bit is set validate checksums
* for all 4 ports. */
eeprom_regions_count = 4;
}
diff --git a/sys/dev/e1000/e1000_api.c b/sys/dev/e1000/e1000_api.c
index cace8e563331..6c6cb999f29f 100644
--- a/sys/dev/e1000/e1000_api.c
+++ b/sys/dev/e1000/e1000_api.c
@@ -338,6 +338,8 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_PCH_ADL_I219_V16:
case E1000_DEV_ID_PCH_ADL_I219_LM17:
case E1000_DEV_ID_PCH_ADL_I219_V17:
+ case E1000_DEV_ID_PCH_ADL_I219_LM19:
+ case E1000_DEV_ID_PCH_ADL_I219_V19:
case E1000_DEV_ID_PCH_RPL_I219_LM22:
case E1000_DEV_ID_PCH_RPL_I219_V22:
case E1000_DEV_ID_PCH_RPL_I219_LM23:
@@ -346,8 +348,6 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
break;
case E1000_DEV_ID_PCH_MTP_I219_LM18:
case E1000_DEV_ID_PCH_MTP_I219_V18:
- case E1000_DEV_ID_PCH_MTP_I219_LM19:
- case E1000_DEV_ID_PCH_MTP_I219_V19:
case E1000_DEV_ID_PCH_LNL_I219_LM20:
case E1000_DEV_ID_PCH_LNL_I219_V20:
case E1000_DEV_ID_PCH_LNL_I219_LM21:
diff --git a/sys/dev/e1000/e1000_base.c b/sys/dev/e1000/e1000_base.c
index ff810821d9e9..ae44a0b91aac 100644
--- a/sys/dev/e1000/e1000_base.c
+++ b/sys/dev/e1000/e1000_base.c
@@ -137,7 +137,7 @@ void e1000_power_down_phy_copper_base(struct e1000_hw *hw)
return;
/* If the management interface is not enabled, then power down */
- if (phy->ops.check_reset_block(hw))
+ if (!(e1000_enable_mng_pass_thru(hw) || phy->ops.check_reset_block(hw)))
e1000_power_down_phy_copper(hw);
}
@@ -162,7 +162,8 @@ void e1000_rx_fifo_flush_base(struct e1000_hw *hw)
rfctl |= E1000_RFCTL_IPV6_EX_DIS;
E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
- if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
+ if (hw->mac.type != e1000_82575 ||
+ !(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_RCV_TCO_EN))
return;
/* Disable all Rx queues */
diff --git a/sys/dev/e1000/e1000_defines.h b/sys/dev/e1000/e1000_defines.h
index 92f676932636..f1b1008764a1 100644
--- a/sys/dev/e1000/e1000_defines.h
+++ b/sys/dev/e1000/e1000_defines.h
@@ -130,7 +130,6 @@
#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */
#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */
-#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
#define E1000_RXDEXT_STATERR_LB 0x00040000
diff --git a/sys/dev/e1000/e1000_hw.h b/sys/dev/e1000/e1000_hw.h
index f17877f3e463..b4a9592cd89b 100644
--- a/sys/dev/e1000/e1000_hw.h
+++ b/sys/dev/e1000/e1000_hw.h
@@ -172,8 +172,8 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_ADL_I219_V17 0x1A1D
#define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A
#define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B
-#define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C
-#define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D
+#define E1000_DEV_ID_PCH_ADL_I219_LM19 0x550C
+#define E1000_DEV_ID_PCH_ADL_I219_V19 0x550D
#define E1000_DEV_ID_PCH_LNL_I219_LM20 0x550E
#define E1000_DEV_ID_PCH_LNL_I219_V20 0x550F
#define E1000_DEV_ID_PCH_LNL_I219_LM21 0x5510
diff --git a/sys/dev/e1000/e1000_i210.c b/sys/dev/e1000/e1000_i210.c
index c7ca56914dba..4005034d7d31 100644
--- a/sys/dev/e1000/e1000_i210.c
+++ b/sys/dev/e1000/e1000_i210.c
@@ -362,7 +362,7 @@ s32 e1000_read_invm_version(struct e1000_hw *hw,
E1000_INVM_RECORD_SIZE_IN_BYTES);
u32 buffer[E1000_INVM_SIZE];
s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
- u16 version = 0;
+ u16 nvm_version = 0;
DEBUGFUNC("e1000_read_invm_version");
@@ -379,14 +379,14 @@ s32 e1000_read_invm_version(struct e1000_hw *hw,
/* Check if we have first version location used */
if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
- version = 0;
+ nvm_version = 0;
status = E1000_SUCCESS;
break;
}
/* Check if we have second version location used */
else if ((i == 1) &&
((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
- version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ nvm_version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
status = E1000_SUCCESS;
break;
}
@@ -397,7 +397,7 @@ s32 e1000_read_invm_version(struct e1000_hw *hw,
else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
(i != 1))) {
- version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+ nvm_version = (*next_record & E1000_INVM_VER_FIELD_TWO)
>> 13;
status = E1000_SUCCESS;
break;
@@ -408,16 +408,16 @@ s32 e1000_read_invm_version(struct e1000_hw *hw,
*/
else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
((*record & 0x3) == 0)) {
- version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+ nvm_version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
status = E1000_SUCCESS;
break;
}
}
if (status == E1000_SUCCESS) {
- invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+ invm_ver->invm_major = (nvm_version & E1000_INVM_MAJOR_MASK)
>> E1000_INVM_MAJOR_SHIFT;
- invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+ invm_ver->invm_minor = nvm_version & E1000_INVM_MINOR_MASK;
}
/* Read Image Type */
for (i = 1; i < invm_blocks; i++) {
diff --git a/sys/dev/e1000/e1000_osdep.c b/sys/dev/e1000/e1000_osdep.c
index 8016ee352068..8b598f18cf12 100644
--- a/sys/dev/e1000/e1000_osdep.c
+++ b/sys/dev/e1000/e1000_osdep.c
@@ -34,6 +34,16 @@
#include "e1000_api.h"
+int e1000_use_pause_delay = 0;
+
+static void
+e1000_enable_pause_delay(void *use_pause_delay)
+{
+ *((int *)use_pause_delay) = 1;
+}
+
+SYSINIT(enable_pause_delay, SI_SUB_CLOCKS, SI_ORDER_ANY, e1000_enable_pause_delay, &e1000_use_pause_delay);
+
/*
* NOTE: the following routines using the e1000
* naming style are provided to the shared
diff --git a/sys/dev/e1000/e1000_osdep.h b/sys/dev/e1000/e1000_osdep.h
index bddb97afd8b9..893979025f01 100644
--- a/sys/dev/e1000/e1000_osdep.h
+++ b/sys/dev/e1000/e1000_osdep.h
@@ -79,9 +79,11 @@ ms_scale(int x) {
}
}
+extern int e1000_use_pause_delay;
+
static inline void
safe_pause_us(int x) {
- if (cold) {
+ if (!e1000_use_pause_delay) {
DELAY(x);
} else {
pause("e1000_delay", max(1, x/(1000000/hz)));
@@ -90,7 +92,7 @@ safe_pause_us(int x) {
static inline void
safe_pause_ms(int x) {
- if (cold) {
+ if (!e1000_use_pause_delay) {
DELAY(x*1000);
} else {
pause("e1000_delay", ms_scale(x));
diff --git a/sys/dev/e1000/e1000_phy.c b/sys/dev/e1000/e1000_phy.c
index 0aaf32125bbe..634f48171c3e 100644
--- a/sys/dev/e1000/e1000_phy.c
+++ b/sys/dev/e1000/e1000_phy.c
@@ -1037,7 +1037,7 @@ static s32 e1000_set_master_slave_mode(struct e1000_hw *hw)
break;
case e1000_ms_auto:
phy_data &= ~CR_1000T_MS_ENABLE;
- /* FALLTHROUGH */
+ break;
default:
break;
}
diff --git a/sys/dev/e1000/em_txrx.c b/sys/dev/e1000/em_txrx.c
index eec198df7466..ced8d0f41d14 100644
--- a/sys/dev/e1000/em_txrx.c
+++ b/sys/dev/e1000/em_txrx.c
@@ -43,29 +43,26 @@
/*********************************************************************
* Local Function prototypes
*********************************************************************/
-static int em_tso_setup(struct e1000_softc *sc, if_pkt_info_t pi,
- uint32_t *txd_upper, uint32_t *txd_lower);
-static int em_transmit_checksum_setup(struct e1000_softc *sc,
- if_pkt_info_t pi, uint32_t *txd_upper, uint32_t *txd_lower);
-static int em_isc_txd_encap(void *arg, if_pkt_info_t pi);
-static void em_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
-static int em_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
-static void em_isc_rxd_refill(void *arg, if_rxd_update_t iru);
-static void em_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
- qidx_t pidx);
-static int em_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
- qidx_t budget);
-static int em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
-
-static void lem_isc_rxd_refill(void *arg, if_rxd_update_t iru);
-
-static int lem_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
- qidx_t budget);
-static int lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
+static int em_tso_setup(struct e1000_softc *, if_pkt_info_t, uint32_t *,
+ uint32_t *);
+static int em_transmit_checksum_setup(struct e1000_softc *, if_pkt_info_t,
+ uint32_t *, uint32_t *);
+static int em_isc_txd_encap(void *, if_pkt_info_t);
+static void em_isc_txd_flush(void *, uint16_t, qidx_t);
+static int em_isc_txd_credits_update(void *, uint16_t, bool);
+static void em_isc_rxd_refill(void *, if_rxd_update_t);
+static void em_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
+static int em_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
+static int em_isc_rxd_pkt_get(void *, if_rxd_info_t);
+
+static void lem_isc_rxd_refill(void *, if_rxd_update_t);
+
+static int lem_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
+static int lem_isc_rxd_pkt_get(void *, if_rxd_info_t);
static void em_receive_checksum(uint16_t, uint8_t, if_rxd_info_t);
-static int em_determine_rsstype(uint32_t pkt_info);
-extern int em_intr(void *arg);
+static int em_determine_rsstype(uint32_t);
+extern int em_intr(void *);
struct if_txrx em_txrx = {
.ift_txd_encap = em_isc_txd_encap,
@@ -111,16 +108,19 @@ em_dump_rs(struct e1000_softc *sc)
cur = txr->tx_rsq[rs_cidx];
status = txr->tx_base[cur].upper.fields.status;
if (!(status & E1000_TXD_STAT_DD))
- printf("qid[%d]->tx_rsq[%d]: %d clear ", qid, rs_cidx, cur);
+ printf("qid[%d]->tx_rsq[%d]: %d clear ",
+ qid, rs_cidx, cur);
} else {
rs_cidx = (rs_cidx-1)&(ntxd-1);
cur = txr->tx_rsq[rs_cidx];
- printf("qid[%d]->tx_rsq[rs_cidx-1=%d]: %d ", qid, rs_cidx, cur);
+ printf("qid[%d]->tx_rsq[rs_cidx-1=%d]: %d ",
+ qid, rs_cidx, cur);
}
printf("cidx_prev=%d rs_pidx=%d ",txr->tx_cidx_processed,
txr->tx_rs_pidx);
for (i = 0; i < ntxd; i++) {
- if (txr->tx_base[i].upper.fields.status & E1000_TXD_STAT_DD)
+ if (txr->tx_base[i].upper.fields.status &
+ E1000_TXD_STAT_DD)
printf("%d set ", i);
}
printf("\n");
@@ -146,8 +146,8 @@ em_tso_setup(struct e1000_softc *sc, if_pkt_info_t pi, uint32_t *txd_upper,
hdr_len = pi->ipi_ehdrlen + pi->ipi_ip_hlen + pi->ipi_tcp_hlen;
*txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */
- E1000_TXD_DTYP_D | /* Data descr type */
- E1000_TXD_CMD_TSE); /* Do TSE on this packet */
+ E1000_TXD_DTYP_D | /* Data descr type */
+ E1000_TXD_CMD_TSE); /* Do TSE on this packet */
cur = pi->ipi_pidx;
TXD = (struct e1000_context_desc *)&txr->tx_base[cur];
@@ -160,7 +160,8 @@ em_tso_setup(struct e1000_softc *sc, if_pkt_info_t pi, uint32_t *txd_upper,
switch(pi->ipi_etype) {
case ETHERTYPE_IP:
/* IP and/or TCP header checksum calculation and insertion. */
- *txd_upper = (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
+ *txd_upper =
+ (E1000_TXD_POPTS_IXSM | E1000_TXD_POPTS_TXSM) << 8;
TXD->lower_setup.ip_fields.ipcse =
htole16(pi->ipi_ehdrlen + pi->ipi_ip_hlen - 1);
@@ -186,7 +187,8 @@ em_tso_setup(struct e1000_softc *sc, if_pkt_info_t pi, uint32_t *txd_upper,
TXD->upper_setup.tcp_fields.tucss = pi->ipi_ehdrlen + pi->ipi_ip_hlen;
TXD->upper_setup.tcp_fields.tucse = 0;
TXD->upper_setup.tcp_fields.tucso =
- pi->ipi_ehdrlen + pi->ipi_ip_hlen + offsetof(struct tcphdr, th_sum);
+ pi->ipi_ehdrlen + pi->ipi_ip_hlen +
+ offsetof(struct tcphdr, th_sum);
/*
* Payload size per packet w/o any headers.
@@ -214,8 +216,8 @@ em_tso_setup(struct e1000_softc *sc, if_pkt_info_t pi, uint32_t *txd_upper,
if (++cur == scctx->isc_ntxd[0]) {
cur = 0;
}
- DPRINTF(iflib_get_dev(sc->ctx), "%s: pidx: %d cur: %d\n", __FUNCTION__,
- pi->ipi_pidx, cur);
+ DPRINTF(iflib_get_dev(sc->ctx), "%s: pidx: %d cur: %d\n",
+ __FUNCTION__, pi->ipi_pidx, cur);
return (cur);
}
@@ -280,8 +282,8 @@ em_transmit_checksum_setup(struct e1000_softc *sc, if_pkt_info_t pi,
* ipcse - End offset for header checksum calculation.
* ipcso - Offset of place to put the checksum.
*
- * We set ipcsX values regardless of IP version to work around HW issues
- * and ipcse must be 0 for IPv6 per "PCIe GbE SDM 2.5" page 61.
+ * We set ipcsX values regardless of IP version to work around HW
+ * issues and ipcse must be 0 for IPv6 per "PCIe GbE SDM 2.5" page 61.
* IXSM controls whether it's inserted.
*/
TXD->lower_setup.ip_fields.ipcss = pi->ipi_ehdrlen;
@@ -299,7 +301,8 @@ em_transmit_checksum_setup(struct e1000_softc *sc, if_pkt_info_t pi,
* tucse - End offset for payload checksum calculation.
* tucso - Offset of place to put the checksum.
*/
- if (csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP)) {
+ if (csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP |
+ CSUM_IP6_UDP)) {
uint8_t tucso;
*txd_upper |= E1000_TXD_POPTS_TXSM << 8;
@@ -329,7 +332,8 @@ em_transmit_checksum_setup(struct e1000_softc *sc, if_pkt_info_t pi,
cur = 0;
}
DPRINTF(iflib_get_dev(sc->ctx),
- "checksum_setup csum_flags=%x txd_upper=%x txd_lower=%x hdr_len=%d cmd=%x\n",
+ "checksum_setup csum_flags=%x txd_upper=%x txd_lower=%x"
+ " hdr_len=%d cmd=%x\n",
csum_flags, *txd_upper, *txd_lower, hdr_len, cmd);
return (cur);
}
@@ -375,7 +379,8 @@ em_isc_txd_encap(void *arg, if_pkt_info_t pi)
i = em_tso_setup(sc, pi, &txd_upper, &txd_lower);
tso_desc = true;
} else if (csum_flags & EM_CSUM_OFFLOAD) {
- i = em_transmit_checksum_setup(sc, pi, &txd_upper, &txd_lower);
+ i = em_transmit_checksum_setup(sc, pi, &txd_upper,
+ &txd_lower);
}
if (pi->ipi_mflags & M_VLANTAG) {
@@ -417,7 +422,8 @@ em_isc_txd_encap(void *arg, if_pkt_info_t pi)
/* Now make the sentinel */
ctxd = &txr->tx_base[i];
ctxd->buffer_addr = htole64(seg_addr + seg_len);
- ctxd->lower.data = htole32(cmd | txd_lower | TSO_WORKAROUND);
+ ctxd->lower.data =
+ htole32(cmd | txd_lower | TSO_WORKAROUND);
ctxd->upper.data = htole32(txd_upper);
pidx_last = i;
if (++i == scctx->isc_ntxd[0])
@@ -432,7 +438,8 @@ em_isc_txd_encap(void *arg, if_pkt_info_t pi)
pidx_last = i;
if (++i == scctx->isc_ntxd[0])
i = 0;
- DPRINTF(iflib_get_dev(sc->ctx), "pidx_last=%d i=%d ntxd[0]=%d\n",
+ DPRINTF(iflib_get_dev(sc->ctx),
+ "pidx_last=%d i=%d ntxd[0]=%d\n",
pidx_last, i, scctx->isc_ntxd[0]);
}
}
@@ -452,9 +459,14 @@ em_isc_txd_encap(void *arg, if_pkt_info_t pi)
}
ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | txd_flags);
DPRINTF(iflib_get_dev(sc->ctx),
- "tx_buffers[%d]->eop = %d ipi_new_pidx=%d\n", first, pidx_last, i);
+ "tx_buffers[%d]->eop = %d ipi_new_pidx=%d\n",
+ first, pidx_last, i);
pi->ipi_new_pidx = i;
+ /* Sent data accounting for AIM */
+ txr->tx_bytes += pi->ipi_len;
+ ++txr->tx_packets;
+
return (0);
}
@@ -507,8 +519,8 @@ em_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
delta += ntxd;
MPASS(delta > 0);
DPRINTF(iflib_get_dev(sc->ctx),
- "%s: cidx_processed=%u cur=%u clear=%d delta=%d\n",
- __FUNCTION__, prev, cur, clear, delta);
+ "%s: cidx_processed=%u cur=%u clear=%d delta=%d\n",
+ __FUNCTION__, prev, cur, clear, delta);
processed += delta;
prev = cur;
@@ -669,6 +681,7 @@ lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
len = le16toh(rxd->length);
ri->iri_len += len;
+ rxr->rx_bytes += ri->iri_len;
eop = (status & E1000_RXD_STAT_EOP) != 0;
@@ -690,12 +703,14 @@ lem_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
i++;
} while (!eop);
+ rxr->rx_packets++;
+
if (scctx->isc_capenable & IFCAP_RXCSUM)
em_receive_checksum(status, errors, ri);
if (scctx->isc_capenable & IFCAP_VLAN_HWTAGGING &&
status & E1000_RXD_STAT_VP) {
- ri->iri_vtag = le16toh(rxd->special & E1000_RXD_SPC_VLAN_MASK);
+ ri->iri_vtag = le16toh(rxd->special);
ri->iri_flags |= M_VLANTAG;
}
@@ -732,6 +747,7 @@ em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
len = le16toh(rxd->wb.upper.length);
ri->iri_len += len;
+ rxr->rx_bytes += ri->iri_len;
eop = (staterr & E1000_RXD_STAT_EOP) != 0;
@@ -752,6 +768,8 @@ em_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
i++;
} while (!eop);
+ rxr->rx_packets++;
+
if (scctx->isc_capenable & IFCAP_RXCSUM)
em_receive_checksum(staterr, staterr >> 24, ri);
@@ -782,7 +800,8 @@ em_receive_checksum(uint16_t status, uint8_t errors, if_rxd_info_t ri)
return;
/* If there is a layer 3 or 4 error we are done */
- if (__predict_false(errors & (E1000_RXD_ERR_IPE | E1000_RXD_ERR_TCPE)))
+ if (__predict_false(errors & (E1000_RXD_ERR_IPE |
+ E1000_RXD_ERR_TCPE)))
return;
/* IP Checksum Good */
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index 690d6dfa3218..247cf9d7fed3 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -1,8 +1,9 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
+ * Copyright (c) 2001-2024, Intel Corporation
* Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org>
- * All rights reserved.
+ * Copyright (c) 2024 Kevin Bowling <kbowling@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -37,7 +38,7 @@
* Driver version:
*********************************************************************/
static const char em_driver_version[] = "7.7.8-fbsd";
-static const char igb_driver_version[] = "2.5.19-fbsd";
+static const char igb_driver_version[] = "2.5.28-fbsd";
/*********************************************************************
* PCI Device ID Table
@@ -52,73 +53,129 @@ static const char igb_driver_version[] = "2.5.19-fbsd";
static const pci_vendor_info_t em_vendor_info_array[] =
{
/* Intel(R) - lem-class legacy devices */
- PVID(0x8086, E1000_DEV_ID_82540EM, "Intel(R) Legacy PRO/1000 MT 82540EM"),
- PVID(0x8086, E1000_DEV_ID_82540EM_LOM, "Intel(R) Legacy PRO/1000 MT 82540EM (LOM)"),
- PVID(0x8086, E1000_DEV_ID_82540EP, "Intel(R) Legacy PRO/1000 MT 82540EP"),
- PVID(0x8086, E1000_DEV_ID_82540EP_LOM, "Intel(R) Legacy PRO/1000 MT 82540EP (LOM)"),
- PVID(0x8086, E1000_DEV_ID_82540EP_LP, "Intel(R) Legacy PRO/1000 MT 82540EP (Mobile)"),
-
- PVID(0x8086, E1000_DEV_ID_82541EI, "Intel(R) Legacy PRO/1000 MT 82541EI (Copper)"),
- PVID(0x8086, E1000_DEV_ID_82541ER, "Intel(R) Legacy PRO/1000 82541ER"),
- PVID(0x8086, E1000_DEV_ID_82541ER_LOM, "Intel(R) Legacy PRO/1000 MT 82541ER"),
- PVID(0x8086, E1000_DEV_ID_82541EI_MOBILE, "Intel(R) Legacy PRO/1000 MT 82541EI (Mobile)"),
- PVID(0x8086, E1000_DEV_ID_82541GI, "Intel(R) Legacy PRO/1000 MT 82541GI"),
- PVID(0x8086, E1000_DEV_ID_82541GI_LF, "Intel(R) Legacy PRO/1000 GT 82541PI"),
- PVID(0x8086, E1000_DEV_ID_82541GI_MOBILE, "Intel(R) Legacy PRO/1000 MT 82541GI (Mobile)"),
-
- PVID(0x8086, E1000_DEV_ID_82542, "Intel(R) Legacy PRO/1000 82542 (Fiber)"),
-
- PVID(0x8086, E1000_DEV_ID_82543GC_FIBER, "Intel(R) Legacy PRO/1000 F 82543GC (Fiber)"),
- PVID(0x8086, E1000_DEV_ID_82543GC_COPPER, "Intel(R) Legacy PRO/1000 T 82543GC (Copper)"),
-
- PVID(0x8086, E1000_DEV_ID_82544EI_COPPER, "Intel(R) Legacy PRO/1000 XT 82544EI (Copper)"),
- PVID(0x8086, E1000_DEV_ID_82544EI_FIBER, "Intel(R) Legacy PRO/1000 XF 82544EI (Fiber)"),
- PVID(0x8086, E1000_DEV_ID_82544GC_COPPER, "Intel(R) Legacy PRO/1000 T 82544GC (Copper)"),
- PVID(0x8086, E1000_DEV_ID_82544GC_LOM, "Intel(R) Legacy PRO/1000 XT 82544GC (LOM)"),
-
- PVID(0x8086, E1000_DEV_ID_82545EM_COPPER, "Intel(R) Legacy PRO/1000 MT 82545EM (Copper)"),
- PVID(0x8086, E1000_DEV_ID_82545EM_FIBER, "Intel(R) Legacy PRO/1000 MF 82545EM (Fiber)"),
- PVID(0x8086, E1000_DEV_ID_82545GM_COPPER, "Intel(R) Legacy PRO/1000 MT 82545GM (Copper)"),
- PVID(0x8086, E1000_DEV_ID_82545GM_FIBER, "Intel(R) Legacy PRO/1000 MF 82545GM (Fiber)"),
- PVID(0x8086, E1000_DEV_ID_82545GM_SERDES, "Intel(R) Legacy PRO/1000 MB 82545GM (SERDES)"),
-
- PVID(0x8086, E1000_DEV_ID_82546EB_COPPER, "Intel(R) Legacy PRO/1000 MT 82546EB (Copper)"),
- PVID(0x8086, E1000_DEV_ID_82546EB_FIBER, "Intel(R) Legacy PRO/1000 MF 82546EB (Fiber)"),
- PVID(0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, "Intel(R) Legacy PRO/1000 MT 82546EB (Quad Copper"),
- PVID(0x8086, E1000_DEV_ID_82546GB_COPPER, "Intel(R) Legacy PRO/1000 MT 82546GB (Copper)"),
- PVID(0x8086, E1000_DEV_ID_82546GB_FIBER, "Intel(R) Legacy PRO/1000 MF 82546GB (Fiber)"),
- PVID(0x8086, E1000_DEV_ID_82546GB_SERDES, "Intel(R) Legacy PRO/1000 MB 82546GB (SERDES)"),
- PVID(0x8086, E1000_DEV_ID_82546GB_PCIE, "Intel(R) Legacy PRO/1000 P 82546GB (PCIe)"),
- PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, "Intel(R) Legacy PRO/1000 GT 82546GB (Quad Copper)"),
- PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3, "Intel(R) Legacy PRO/1000 GT 82546GB (Quad Copper)"),
-
- PVID(0x8086, E1000_DEV_ID_82547EI, "Intel(R) Legacy PRO/1000 CT 82547EI"),
- PVID(0x8086, E1000_DEV_ID_82547EI_MOBILE, "Intel(R) Legacy PRO/1000 CT 82547EI (Mobile)"),
- PVID(0x8086, E1000_DEV_ID_82547GI, "Intel(R) Legacy PRO/1000 CT 82547GI"),
+ PVID(0x8086, E1000_DEV_ID_82540EM,
+ "Intel(R) Legacy PRO/1000 MT 82540EM"),
+ PVID(0x8086, E1000_DEV_ID_82540EM_LOM,
+ "Intel(R) Legacy PRO/1000 MT 82540EM (LOM)"),
+ PVID(0x8086, E1000_DEV_ID_82540EP,
+ "Intel(R) Legacy PRO/1000 MT 82540EP"),
+ PVID(0x8086, E1000_DEV_ID_82540EP_LOM,
+ "Intel(R) Legacy PRO/1000 MT 82540EP (LOM)"),
+ PVID(0x8086, E1000_DEV_ID_82540EP_LP,
+ "Intel(R) Legacy PRO/1000 MT 82540EP (Mobile)"),
+
+ PVID(0x8086, E1000_DEV_ID_82541EI,
+ "Intel(R) Legacy PRO/1000 MT 82541EI (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82541ER,
+ "Intel(R) Legacy PRO/1000 82541ER"),
+ PVID(0x8086, E1000_DEV_ID_82541ER_LOM,
+ "Intel(R) Legacy PRO/1000 MT 82541ER"),
+ PVID(0x8086, E1000_DEV_ID_82541EI_MOBILE,
+ "Intel(R) Legacy PRO/1000 MT 82541EI (Mobile)"),
+ PVID(0x8086, E1000_DEV_ID_82541GI,
+ "Intel(R) Legacy PRO/1000 MT 82541GI"),
+ PVID(0x8086, E1000_DEV_ID_82541GI_LF,
+ "Intel(R) Legacy PRO/1000 GT 82541PI"),
+ PVID(0x8086, E1000_DEV_ID_82541GI_MOBILE,
+ "Intel(R) Legacy PRO/1000 MT 82541GI (Mobile)"),
+
+ PVID(0x8086, E1000_DEV_ID_82542,
+ "Intel(R) Legacy PRO/1000 82542 (Fiber)"),
+
+ PVID(0x8086, E1000_DEV_ID_82543GC_FIBER,
+ "Intel(R) Legacy PRO/1000 F 82543GC (Fiber)"),
+ PVID(0x8086, E1000_DEV_ID_82543GC_COPPER,
+ "Intel(R) Legacy PRO/1000 T 82543GC (Copper)"),
+
+ PVID(0x8086, E1000_DEV_ID_82544EI_COPPER,
+ "Intel(R) Legacy PRO/1000 XT 82544EI (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82544EI_FIBER,
+ "Intel(R) Legacy PRO/1000 XF 82544EI (Fiber)"),
+ PVID(0x8086, E1000_DEV_ID_82544GC_COPPER,
+ "Intel(R) Legacy PRO/1000 T 82544GC (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82544GC_LOM,
+ "Intel(R) Legacy PRO/1000 XT 82544GC (LOM)"),
+
+ PVID(0x8086, E1000_DEV_ID_82545EM_COPPER,
+ "Intel(R) Legacy PRO/1000 MT 82545EM (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82545EM_FIBER,
+ "Intel(R) Legacy PRO/1000 MF 82545EM (Fiber)"),
+ PVID(0x8086, E1000_DEV_ID_82545GM_COPPER,
+ "Intel(R) Legacy PRO/1000 MT 82545GM (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82545GM_FIBER,
+ "Intel(R) Legacy PRO/1000 MF 82545GM (Fiber)"),
+ PVID(0x8086, E1000_DEV_ID_82545GM_SERDES,
+ "Intel(R) Legacy PRO/1000 MB 82545GM (SERDES)"),
+
+ PVID(0x8086, E1000_DEV_ID_82546EB_COPPER,
+ "Intel(R) Legacy PRO/1000 MT 82546EB (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82546EB_FIBER,
+ "Intel(R) Legacy PRO/1000 MF 82546EB (Fiber)"),
+ PVID(0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER,
+ "Intel(R) Legacy PRO/1000 MT 82546EB (Quad Copper"),
+ PVID(0x8086, E1000_DEV_ID_82546GB_COPPER,
+ "Intel(R) Legacy PRO/1000 MT 82546GB (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82546GB_FIBER,
+ "Intel(R) Legacy PRO/1000 MF 82546GB (Fiber)"),
+ PVID(0x8086, E1000_DEV_ID_82546GB_SERDES,
+ "Intel(R) Legacy PRO/1000 MB 82546GB (SERDES)"),
+ PVID(0x8086, E1000_DEV_ID_82546GB_PCIE,
+ "Intel(R) Legacy PRO/1000 P 82546GB (PCIe)"),
+ PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER,
+ "Intel(R) Legacy PRO/1000 GT 82546GB (Quad Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3,
+ "Intel(R) Legacy PRO/1000 GT 82546GB (Quad Copper)"),
+
+ PVID(0x8086, E1000_DEV_ID_82547EI,
+ "Intel(R) Legacy PRO/1000 CT 82547EI"),
+ PVID(0x8086, E1000_DEV_ID_82547EI_MOBILE,
+ "Intel(R) Legacy PRO/1000 CT 82547EI (Mobile)"),
+ PVID(0x8086, E1000_DEV_ID_82547GI,
+ "Intel(R) Legacy PRO/1000 CT 82547GI"),
/* Intel(R) - em-class devices */
- PVID(0x8086, E1000_DEV_ID_82571EB_COPPER, "Intel(R) PRO/1000 PT 82571EB/82571GB (Copper)"),
- PVID(0x8086, E1000_DEV_ID_82571EB_FIBER, "Intel(R) PRO/1000 PF 82571EB/82571GB (Fiber)"),
- PVID(0x8086, E1000_DEV_ID_82571EB_SERDES, "Intel(R) PRO/1000 PB 82571EB (SERDES)"),
- PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL, "Intel(R) PRO/1000 82571EB (Dual Mezzanine)"),
- PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD, "Intel(R) PRO/1000 82571EB (Quad Mezzanine)"),
- PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER, "Intel(R) PRO/1000 PT 82571EB/82571GB (Quad Copper)"),
- PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP, "Intel(R) PRO/1000 PT 82571EB/82571GB (Quad Copper)"),
- PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER, "Intel(R) PRO/1000 PF 82571EB (Quad Fiber)"),
- PVID(0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER, "Intel(R) PRO/1000 PT 82571PT (Quad Copper)"),
- PVID(0x8086, E1000_DEV_ID_82572EI, "Intel(R) PRO/1000 PT 82572EI (Copper)"),
- PVID(0x8086, E1000_DEV_ID_82572EI_COPPER, "Intel(R) PRO/1000 PT 82572EI (Copper)"),
- PVID(0x8086, E1000_DEV_ID_82572EI_FIBER, "Intel(R) PRO/1000 PF 82572EI (Fiber)"),
- PVID(0x8086, E1000_DEV_ID_82572EI_SERDES, "Intel(R) PRO/1000 82572EI (SERDES)"),
- PVID(0x8086, E1000_DEV_ID_82573E, "Intel(R) PRO/1000 82573E (Copper)"),
- PVID(0x8086, E1000_DEV_ID_82573E_IAMT, "Intel(R) PRO/1000 82573E AMT (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82571EB_COPPER,
+ "Intel(R) PRO/1000 PT 82571EB/82571GB (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82571EB_FIBER,
+ "Intel(R) PRO/1000 PF 82571EB/82571GB (Fiber)"),
+ PVID(0x8086, E1000_DEV_ID_82571EB_SERDES,
+ "Intel(R) PRO/1000 PB 82571EB (SERDES)"),
+ PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL,
+ "Intel(R) PRO/1000 82571EB (Dual Mezzanine)"),
+ PVID(0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD,
+ "Intel(R) PRO/1000 82571EB (Quad Mezzanine)"),
+ PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER,
+ "Intel(R) PRO/1000 PT 82571EB/82571GB (Quad Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP,
+ "Intel(R) PRO/1000 PT 82571EB/82571GB (Quad Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER,
+ "Intel(R) PRO/1000 PF 82571EB (Quad Fiber)"),
+ PVID(0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER,
+ "Intel(R) PRO/1000 PT 82571PT (Quad Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82572EI,
+ "Intel(R) PRO/1000 PT 82572EI (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82572EI_COPPER,
+ "Intel(R) PRO/1000 PT 82572EI (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82572EI_FIBER,
+ "Intel(R) PRO/1000 PF 82572EI (Fiber)"),
+ PVID(0x8086, E1000_DEV_ID_82572EI_SERDES,
+ "Intel(R) PRO/1000 82572EI (SERDES)"),
+ PVID(0x8086, E1000_DEV_ID_82573E,
+ "Intel(R) PRO/1000 82573E (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82573E_IAMT,
+ "Intel(R) PRO/1000 82573E AMT (Copper)"),
PVID(0x8086, E1000_DEV_ID_82573L, "Intel(R) PRO/1000 82573L"),
PVID(0x8086, E1000_DEV_ID_82583V, "Intel(R) 82583V"),
- PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT, "Intel(R) 80003ES2LAN (Copper)"),
- PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT, "Intel(R) 80003ES2LAN (SERDES)"),
- PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT, "Intel(R) 80003ES2LAN (Dual Copper)"),
- PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT, "Intel(R) 80003ES2LAN (Dual SERDES)"),
- PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, "Intel(R) 82566MM ICH8 AMT (Mobile)"),
+ PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT,
+ "Intel(R) 80003ES2LAN (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT,
+ "Intel(R) 80003ES2LAN (SERDES)"),
+ PVID(0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT,
+ "Intel(R) 80003ES2LAN (Dual Copper)"),
+ PVID(0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT,
+ "Intel(R) 80003ES2LAN (Dual SERDES)"),
+ PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT,
+ "Intel(R) 82566MM ICH8 AMT (Mobile)"),
PVID(0x8086, E1000_DEV_ID_ICH8_IGP_AMT, "Intel(R) 82566DM ICH8 AMT"),
PVID(0x8086, E1000_DEV_ID_ICH8_IGP_C, "Intel(R) 82566DC ICH8"),
PVID(0x8086, E1000_DEV_ID_ICH8_IFE, "Intel(R) 82562V ICH8"),
@@ -126,8 +183,10 @@ static const pci_vendor_info_t em_vendor_info_array[] =
PVID(0x8086, E1000_DEV_ID_ICH8_IFE_G, "Intel(R) 82562G ICH8"),
PVID(0x8086, E1000_DEV_ID_ICH8_IGP_M, "Intel(R) 82566MC ICH8"),
PVID(0x8086, E1000_DEV_ID_ICH8_82567V_3, "Intel(R) 82567V-3 ICH8"),
- PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, "Intel(R) 82567LM ICH9 AMT"),
- PVID(0x8086, E1000_DEV_ID_ICH9_IGP_AMT, "Intel(R) 82566DM-2 ICH9 AMT"),
+ PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT,
+ "Intel(R) 82567LM ICH9 AMT"),
+ PVID(0x8086, E1000_DEV_ID_ICH9_IGP_AMT,
+ "Intel(R) 82566DM-2 ICH9 AMT"),
PVID(0x8086, E1000_DEV_ID_ICH9_IGP_C, "Intel(R) 82566DC-2 ICH9"),
PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M, "Intel(R) 82567LF ICH9"),
PVID(0x8086, E1000_DEV_ID_ICH9_IGP_M_V, "Intel(R) 82567V ICH9"),
@@ -151,7 +210,8 @@ static const pci_vendor_info_t em_vendor_info_array[] =
PVID(0x8086, E1000_DEV_ID_PCH2_LV_V, "Intel(R) 82579V"),
PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_LM, "Intel(R) I217-LM LPT"),
PVID(0x8086, E1000_DEV_ID_PCH_LPT_I217_V, "Intel(R) I217-V LPT"),
- PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_LM, "Intel(R) I218-LM LPTLP"),
+ PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_LM,
+ "Intel(R) I218-LM LPTLP"),
PVID(0x8086, E1000_DEV_ID_PCH_LPTLP_I218_V, "Intel(R) I218-V LPTLP"),
PVID(0x8086, E1000_DEV_ID_PCH_I218_LM2, "Intel(R) I218-LM (2)"),
PVID(0x8086, E1000_DEV_ID_PCH_I218_V2, "Intel(R) I218-V (2)"),
@@ -159,57 +219,102 @@ static const pci_vendor_info_t em_vendor_info_array[] =
PVID(0x8086, E1000_DEV_ID_PCH_I218_V3, "Intel(R) I218-V (3)"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM, "Intel(R) I219-LM SPT"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V, "Intel(R) I219-V SPT"),
- PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM2, "Intel(R) I219-LM SPT-H(2)"),
- PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V2, "Intel(R) I219-V SPT-H(2)"),
- PVID(0x8086, E1000_DEV_ID_PCH_LBG_I219_LM3, "Intel(R) I219-LM LBG(3)"),
- PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM4, "Intel(R) I219-LM SPT(4)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM2,
+ "Intel(R) I219-LM SPT-H(2)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V2,
+ "Intel(R) I219-V SPT-H(2)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_LBG_I219_LM3,
+ "Intel(R) I219-LM LBG(3)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM4,
+ "Intel(R) I219-LM SPT(4)"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V4, "Intel(R) I219-V SPT(4)"),
- PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM5, "Intel(R) I219-LM SPT(5)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_LM5,
+ "Intel(R) I219-LM SPT(5)"),
PVID(0x8086, E1000_DEV_ID_PCH_SPT_I219_V5, "Intel(R) I219-V SPT(5)"),
- PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_LM6, "Intel(R) I219-LM CNP(6)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_LM6,
+ "Intel(R) I219-LM CNP(6)"),
PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_V6, "Intel(R) I219-V CNP(6)"),
- PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_LM7, "Intel(R) I219-LM CNP(7)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_LM7,
+ "Intel(R) I219-LM CNP(7)"),
PVID(0x8086, E1000_DEV_ID_PCH_CNP_I219_V7, "Intel(R) I219-V CNP(7)"),
- PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_LM8, "Intel(R) I219-LM ICP(8)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_LM8,
+ "Intel(R) I219-LM ICP(8)"),
PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_V8, "Intel(R) I219-V ICP(8)"),
- PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_LM9, "Intel(R) I219-LM ICP(9)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_LM9,
+ "Intel(R) I219-LM ICP(9)"),
PVID(0x8086, E1000_DEV_ID_PCH_ICP_I219_V9, "Intel(R) I219-V ICP(9)"),
- PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_LM10, "Intel(R) I219-LM CMP(10)"),
- PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_V10, "Intel(R) I219-V CMP(10)"),
- PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_LM11, "Intel(R) I219-LM CMP(11)"),
- PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_V11, "Intel(R) I219-V CMP(11)"),
- PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_LM12, "Intel(R) I219-LM CMP(12)"),
- PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_V12, "Intel(R) I219-V CMP(12)"),
- PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_LM13, "Intel(R) I219-LM TGP(13)"),
- PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_V13, "Intel(R) I219-V TGP(13)"),
- PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_LM14, "Intel(R) I219-LM TGP(14)"),
- PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_V14, "Intel(R) I219-V GTP(14)"),
- PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_LM15, "Intel(R) I219-LM TGP(15)"),
- PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_V15, "Intel(R) I219-V TGP(15)"),
- PVID(0x8086, E1000_DEV_ID_PCH_ADL_I219_LM16, "Intel(R) I219-LM ADL(16)"),
- PVID(0x8086, E1000_DEV_ID_PCH_ADL_I219_V16, "Intel(R) I219-V ADL(16)"),
- PVID(0x8086, E1000_DEV_ID_PCH_ADL_I219_LM17, "Intel(R) I219-LM ADL(17)"),
- PVID(0x8086, E1000_DEV_ID_PCH_ADL_I219_V17, "Intel(R) I219-V ADL(17)"),
- PVID(0x8086, E1000_DEV_ID_PCH_MTP_I219_LM18, "Intel(R) I219-LM MTP(18)"),
- PVID(0x8086, E1000_DEV_ID_PCH_MTP_I219_V18, "Intel(R) I219-V MTP(18)"),
- PVID(0x8086, E1000_DEV_ID_PCH_MTP_I219_LM19, "Intel(R) I219-LM MTP(19)"),
- PVID(0x8086, E1000_DEV_ID_PCH_MTP_I219_V19, "Intel(R) I219-V MTP(19)"),
- PVID(0x8086, E1000_DEV_ID_PCH_LNL_I219_LM20, "Intel(R) I219-LM LNL(20)"),
- PVID(0x8086, E1000_DEV_ID_PCH_LNL_I219_V20, "Intel(R) I219-V LNL(20)"),
- PVID(0x8086, E1000_DEV_ID_PCH_LNL_I219_LM21, "Intel(R) I219-LM LNL(21)"),
- PVID(0x8086, E1000_DEV_ID_PCH_LNL_I219_V21, "Intel(R) I219-V LNL(21)"),
- PVID(0x8086, E1000_DEV_ID_PCH_RPL_I219_LM22, "Intel(R) I219-LM RPL(22)"),
- PVID(0x8086, E1000_DEV_ID_PCH_RPL_I219_V22, "Intel(R) I219-V RPL(22)"),
- PVID(0x8086, E1000_DEV_ID_PCH_RPL_I219_LM23, "Intel(R) I219-LM RPL(23)"),
- PVID(0x8086, E1000_DEV_ID_PCH_RPL_I219_V23, "Intel(R) I219-V RPL(23)"),
- PVID(0x8086, E1000_DEV_ID_PCH_ARL_I219_LM24, "Intel(R) I219-LM ARL(24)"),
- PVID(0x8086, E1000_DEV_ID_PCH_ARL_I219_V24, "Intel(R) I219-V ARL(24)"),
- PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_LM25, "Intel(R) I219-LM PTP(25)"),
- PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_V25, "Intel(R) I219-V PTP(25)"),
- PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_LM26, "Intel(R) I219-LM PTP(26)"),
- PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_V26, "Intel(R) I219-V PTP(26)"),
- PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_LM27, "Intel(R) I219-LM PTP(27)"),
- PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_V27, "Intel(R) I219-V PTP(27)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_LM10,
+ "Intel(R) I219-LM CMP(10)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_V10,
+ "Intel(R) I219-V CMP(10)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_LM11,
+ "Intel(R) I219-LM CMP(11)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_V11,
+ "Intel(R) I219-V CMP(11)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_LM12,
+ "Intel(R) I219-LM CMP(12)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_CMP_I219_V12,
+ "Intel(R) I219-V CMP(12)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_LM13,
+ "Intel(R) I219-LM TGP(13)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_V13,
+ "Intel(R) I219-V TGP(13)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_LM14,
+ "Intel(R) I219-LM TGP(14)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_V14,
+ "Intel(R) I219-V GTP(14)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_LM15,
+ "Intel(R) I219-LM TGP(15)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_TGP_I219_V15,
+ "Intel(R) I219-V TGP(15)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_ADL_I219_LM16,
+ "Intel(R) I219-LM ADL(16)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_ADL_I219_V16,
+ "Intel(R) I219-V ADL(16)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_ADL_I219_LM17,
+ "Intel(R) I219-LM ADL(17)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_ADL_I219_V17,
+ "Intel(R) I219-V ADL(17)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_MTP_I219_LM18,
+ "Intel(R) I219-LM MTP(18)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_MTP_I219_V18,
+ "Intel(R) I219-V MTP(18)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_ADL_I219_LM19,
+ "Intel(R) I219-LM ADL(19)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_ADL_I219_V19,
+ "Intel(R) I219-V ADL(19)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_LNL_I219_LM20,
+ "Intel(R) I219-LM LNL(20)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_LNL_I219_V20,
+ "Intel(R) I219-V LNL(20)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_LNL_I219_LM21,
+ "Intel(R) I219-LM LNL(21)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_LNL_I219_V21,
+ "Intel(R) I219-V LNL(21)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_RPL_I219_LM22,
+ "Intel(R) I219-LM RPL(22)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_RPL_I219_V22,
+ "Intel(R) I219-V RPL(22)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_RPL_I219_LM23,
+ "Intel(R) I219-LM RPL(23)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_RPL_I219_V23,
+ "Intel(R) I219-V RPL(23)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_ARL_I219_LM24,
+ "Intel(R) I219-LM ARL(24)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_ARL_I219_V24,
+ "Intel(R) I219-V ARL(24)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_LM25,
+ "Intel(R) I219-LM PTP(25)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_V25,
+ "Intel(R) I219-V PTP(25)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_LM26,
+ "Intel(R) I219-LM PTP(26)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_V26,
+ "Intel(R) I219-V PTP(26)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_LM27,
+ "Intel(R) I219-LM PTP(27)"),
+ PVID(0x8086, E1000_DEV_ID_PCH_PTP_I219_V27,
+ "Intel(R) I219-V PTP(27)"),
/* required last entry */
PVID_END
};
@@ -217,45 +322,68 @@ static const pci_vendor_info_t em_vendor_info_array[] =
static const pci_vendor_info_t igb_vendor_info_array[] =
{
/* Intel(R) - igb-class devices */
- PVID(0x8086, E1000_DEV_ID_82575EB_COPPER, "Intel(R) PRO/1000 82575EB (Copper)"),
- PVID(0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES, "Intel(R) PRO/1000 82575EB (SERDES)"),
- PVID(0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER, "Intel(R) PRO/1000 VT 82575GB (Quad Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82575EB_COPPER,
+ "Intel(R) PRO/1000 82575EB (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES,
+ "Intel(R) PRO/1000 82575EB (SERDES)"),
+ PVID(0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER,
+ "Intel(R) PRO/1000 VT 82575GB (Quad Copper)"),
PVID(0x8086, E1000_DEV_ID_82576, "Intel(R) PRO/1000 82576"),
PVID(0x8086, E1000_DEV_ID_82576_NS, "Intel(R) PRO/1000 82576NS"),
- PVID(0x8086, E1000_DEV_ID_82576_NS_SERDES, "Intel(R) PRO/1000 82576NS (SERDES)"),
- PVID(0x8086, E1000_DEV_ID_82576_FIBER, "Intel(R) PRO/1000 EF 82576 (Dual Fiber)"),
- PVID(0x8086, E1000_DEV_ID_82576_SERDES, "Intel(R) PRO/1000 82576 (Dual SERDES)"),
- PVID(0x8086, E1000_DEV_ID_82576_SERDES_QUAD, "Intel(R) PRO/1000 ET 82576 (Quad SERDES)"),
- PVID(0x8086, E1000_DEV_ID_82576_QUAD_COPPER, "Intel(R) PRO/1000 ET 82576 (Quad Copper)"),
- PVID(0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2, "Intel(R) PRO/1000 ET(2) 82576 (Quad Copper)"),
- PVID(0x8086, E1000_DEV_ID_82576_VF, "Intel(R) PRO/1000 82576 Virtual Function"),
- PVID(0x8086, E1000_DEV_ID_82580_COPPER, "Intel(R) I340 82580 (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82576_NS_SERDES,
+ "Intel(R) PRO/1000 82576NS (SERDES)"),
+ PVID(0x8086, E1000_DEV_ID_82576_FIBER,
+ "Intel(R) PRO/1000 EF 82576 (Dual Fiber)"),
+ PVID(0x8086, E1000_DEV_ID_82576_SERDES,
+ "Intel(R) PRO/1000 82576 (Dual SERDES)"),
+ PVID(0x8086, E1000_DEV_ID_82576_SERDES_QUAD,
+ "Intel(R) PRO/1000 ET 82576 (Quad SERDES)"),
+ PVID(0x8086, E1000_DEV_ID_82576_QUAD_COPPER,
+ "Intel(R) PRO/1000 ET 82576 (Quad Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2,
+ "Intel(R) PRO/1000 ET(2) 82576 (Quad Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82576_VF,
+ "Intel(R) PRO/1000 82576 Virtual Function"),
+ PVID(0x8086, E1000_DEV_ID_82580_COPPER,
+ "Intel(R) I340 82580 (Copper)"),
PVID(0x8086, E1000_DEV_ID_82580_FIBER, "Intel(R) I340 82580 (Fiber)"),
- PVID(0x8086, E1000_DEV_ID_82580_SERDES, "Intel(R) I340 82580 (SERDES)"),
+ PVID(0x8086, E1000_DEV_ID_82580_SERDES,
+ "Intel(R) I340 82580 (SERDES)"),
PVID(0x8086, E1000_DEV_ID_82580_SGMII, "Intel(R) I340 82580 (SGMII)"),
- PVID(0x8086, E1000_DEV_ID_82580_COPPER_DUAL, "Intel(R) I340-T2 82580 (Dual Copper)"),
- PVID(0x8086, E1000_DEV_ID_82580_QUAD_FIBER, "Intel(R) I340-F4 82580 (Quad Fiber)"),
- PVID(0x8086, E1000_DEV_ID_DH89XXCC_SERDES, "Intel(R) DH89XXCC (SERDES)"),
- PVID(0x8086, E1000_DEV_ID_DH89XXCC_SGMII, "Intel(R) I347-AT4 DH89XXCC"),
+ PVID(0x8086, E1000_DEV_ID_82580_COPPER_DUAL,
+ "Intel(R) I340-T2 82580 (Dual Copper)"),
+ PVID(0x8086, E1000_DEV_ID_82580_QUAD_FIBER,
+ "Intel(R) I340-F4 82580 (Quad Fiber)"),
+ PVID(0x8086, E1000_DEV_ID_DH89XXCC_SERDES,
+ "Intel(R) DH89XXCC (SERDES)"),
+ PVID(0x8086, E1000_DEV_ID_DH89XXCC_SGMII,
+ "Intel(R) I347-AT4 DH89XXCC"),
PVID(0x8086, E1000_DEV_ID_DH89XXCC_SFP, "Intel(R) DH89XXCC (SFP)"),
- PVID(0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE, "Intel(R) DH89XXCC (Backplane)"),
+ PVID(0x8086, E1000_DEV_ID_DH89XXCC_BACKPLANE,
+ "Intel(R) DH89XXCC (Backplane)"),
PVID(0x8086, E1000_DEV_ID_I350_COPPER, "Intel(R) I350 (Copper)"),
PVID(0x8086, E1000_DEV_ID_I350_FIBER, "Intel(R) I350 (Fiber)"),
PVID(0x8086, E1000_DEV_ID_I350_SERDES, "Intel(R) I350 (SERDES)"),
PVID(0x8086, E1000_DEV_ID_I350_SGMII, "Intel(R) I350 (SGMII)"),
PVID(0x8086, E1000_DEV_ID_I350_VF, "Intel(R) I350 Virtual Function"),
PVID(0x8086, E1000_DEV_ID_I210_COPPER, "Intel(R) I210 (Copper)"),
- PVID(0x8086, E1000_DEV_ID_I210_COPPER_IT, "Intel(R) I210 IT (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_I210_COPPER_IT,
+ "Intel(R) I210 IT (Copper)"),
PVID(0x8086, E1000_DEV_ID_I210_COPPER_OEM1, "Intel(R) I210 (OEM)"),
- PVID(0x8086, E1000_DEV_ID_I210_COPPER_FLASHLESS, "Intel(R) I210 Flashless (Copper)"),
- PVID(0x8086, E1000_DEV_ID_I210_SERDES_FLASHLESS, "Intel(R) I210 Flashless (SERDES)"),
- PVID(0x8086, E1000_DEV_ID_I210_SGMII_FLASHLESS, "Intel(R) I210 Flashless (SGMII)"),
+ PVID(0x8086, E1000_DEV_ID_I210_COPPER_FLASHLESS,
+ "Intel(R) I210 Flashless (Copper)"),
+ PVID(0x8086, E1000_DEV_ID_I210_SERDES_FLASHLESS,
+ "Intel(R) I210 Flashless (SERDES)"),
+ PVID(0x8086, E1000_DEV_ID_I210_SGMII_FLASHLESS,
+ "Intel(R) I210 Flashless (SGMII)"),
PVID(0x8086, E1000_DEV_ID_I210_FIBER, "Intel(R) I210 (Fiber)"),
PVID(0x8086, E1000_DEV_ID_I210_SERDES, "Intel(R) I210 (SERDES)"),
PVID(0x8086, E1000_DEV_ID_I210_SGMII, "Intel(R) I210 (SGMII)"),
PVID(0x8086, E1000_DEV_ID_I211_COPPER, "Intel(R) I211 (Copper)"),
- PVID(0x8086, E1000_DEV_ID_I354_BACKPLANE_1GBPS, "Intel(R) I354 (1.0 GbE Backplane)"),
- PVID(0x8086, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS, "Intel(R) I354 (2.5 GbE Backplane)"),
+ PVID(0x8086, E1000_DEV_ID_I354_BACKPLANE_1GBPS,
+ "Intel(R) I354 (1.0 GbE Backplane)"),
+ PVID(0x8086, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS,
+ "Intel(R) I354 (2.5 GbE Backplane)"),
PVID(0x8086, E1000_DEV_ID_I354_SGMII, "Intel(R) I354 (SGMII)"),
/* required last entry */
PVID_END
@@ -273,10 +401,13 @@ static int em_if_shutdown(if_ctx_t);
static int em_if_suspend(if_ctx_t);
static int em_if_resume(if_ctx_t);
-static int em_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
-static int em_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
+static int em_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
+ int);
+static int em_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
+ int);
static void em_if_queues_free(if_ctx_t);
+static uint64_t em_if_get_vf_counter(if_ctx_t, ift_counter);
static uint64_t em_if_get_counter(if_ctx_t, ift_counter);
static void em_if_init(if_ctx_t);
static void em_if_stop(if_ctx_t);
@@ -310,6 +441,7 @@ static int igb_if_tx_queue_intr_enable(if_ctx_t, uint16_t);
static void em_if_multi_set(if_ctx_t);
static void em_if_update_admin_status(if_ctx_t);
static void em_if_debug(if_ctx_t);
+static void em_update_vf_stats_counters(struct e1000_softc *);
static void em_update_stats_counters(struct e1000_softc *);
static void em_add_hw_stats(struct e1000_softc *);
static int em_if_set_promisc(if_ctx_t, int);
@@ -329,10 +461,13 @@ static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
static int em_get_rs(SYSCTL_HANDLER_ARGS);
static void em_print_debug_info(struct e1000_softc *);
static int em_is_valid_ether_addr(u8 *);
+static void em_newitr(struct e1000_softc *, struct em_rx_queue *,
+ struct tx_ring *, struct rx_ring *);
static bool em_automask_tso(if_ctx_t);
+static int em_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
static void em_add_int_delay_sysctl(struct e1000_softc *, const char *,
- const char *, struct em_int_delay_info *, int, int);
+ const char *, struct em_int_delay_info *, int, int);
/* Management and WOL Support */
static void em_init_manageability(struct e1000_softc *);
static void em_release_manageability(struct e1000_softc *);
@@ -354,6 +489,7 @@ static void em_enable_vectors_82574(if_ctx_t);
static int em_set_flowcntl(SYSCTL_HANDLER_ARGS);
static int em_sysctl_eee(SYSCTL_HANDLER_ARGS);
+static int igb_sysctl_dmac(SYSCTL_HANDLER_ARGS);
static void em_if_led_func(if_ctx_t, int);
static int em_get_regs(SYSCTL_HANDLER_ARGS);
@@ -498,9 +634,6 @@ static driver_t igb_if_driver = {
#define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
#define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
-#define MAX_INTS_PER_SEC 8000
-#define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256))
-
/* Allow common code without TSO */
#ifndef CSUM_TSO
#define CSUM_TSO 0
@@ -515,10 +648,10 @@ SYSCTL_INT(_hw_em, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN,
static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV);
static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR);
-SYSCTL_INT(_hw_em, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &em_tx_int_delay_dflt,
- 0, "Default transmit interrupt delay in usecs");
-SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &em_rx_int_delay_dflt,
- 0, "Default receive interrupt delay in usecs");
+SYSCTL_INT(_hw_em, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN,
+ &em_tx_int_delay_dflt, 0, "Default transmit interrupt delay in usecs");
+SYSCTL_INT(_hw_em, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN,
+ &em_rx_int_delay_dflt, 0, "Default receive interrupt delay in usecs");
static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV);
static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV);
@@ -530,7 +663,8 @@ SYSCTL_INT(_hw_em, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN,
"Default receive interrupt delay limit in usecs");
static int em_smart_pwr_down = false;
-SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &em_smart_pwr_down,
+SYSCTL_INT(_hw_em, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN,
+ &em_smart_pwr_down,
0, "Set to true to leave smart power down enabled on newer adapters");
static bool em_unsupported_tso = false;
@@ -548,9 +682,18 @@ SYSCTL_INT(_hw_em, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &eee_setting, 0,
"Enable Energy Efficient Ethernet");
/*
+ * AIM: Adaptive Interrupt Moderation
+ * which means that the interrupt rate is varied over time based on the
+ * traffic for that interrupt vector
+ */
+static int em_enable_aim = 1;
+SYSCTL_INT(_hw_em, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &em_enable_aim,
+ 0, "Enable adaptive interrupt moderation (1=normal, 2=lowlatency)");
+
+/*
** Tuneable Interrupt rate
*/
-static int em_max_interrupt_rate = 8000;
+static int em_max_interrupt_rate = EM_INTS_DEFAULT;
SYSCTL_INT(_hw_em, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
&em_max_interrupt_rate, 0, "Maximum interrupts per second");
@@ -578,7 +721,8 @@ static struct if_shared_ctx em_sctx_init = {
.isc_vendor_info = em_vendor_info_array,
.isc_driver_version = em_driver_version,
.isc_driver = &em_if_driver,
- .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
+ .isc_flags =
+ IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
.isc_nrxd_min = {EM_MIN_RXD},
.isc_ntxd_min = {EM_MIN_TXD},
@@ -605,7 +749,8 @@ static struct if_shared_ctx igb_sctx_init = {
.isc_vendor_info = igb_vendor_info_array,
.isc_driver_version = igb_driver_version,
.isc_driver = &igb_if_driver,
- .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
+ .isc_flags =
+ IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
.isc_nrxd_min = {EM_MIN_RXD},
.isc_ntxd_min = {EM_MIN_TXD},
@@ -715,15 +860,21 @@ static int em_get_regs(SYSCTL_HANDLER_ARGS)
for (j = 0; j < nrxd; j++) {
u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error);
u32 length = le32toh(rxr->rx_base[j].wb.upper.length);
- sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" PRIx64 " Error:%d Length:%d\n", j, rxr->rx_base[j].read.buffer_addr, staterr, length);
+ sbuf_printf(sb, "\tReceive Descriptor Address %d: %08"
+ PRIx64 " Error:%d Length:%d\n",
+ j, rxr->rx_base[j].read.buffer_addr, staterr, length);
}
for (j = 0; j < min(ntxd, 256); j++) {
unsigned int *ptr = (unsigned int *)&txr->tx_base[j];
- sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x eop: %d DD=%d\n",
- j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop,
- buf->eop != -1 ? txr->tx_base[buf->eop].upper.fields.status & E1000_TXD_STAT_DD : 0);
+ sbuf_printf(sb,
+ "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x"
+ " eop: %d DD=%d\n",
+ j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop,
+ buf->eop != -1 ?
+ txr->tx_base[buf->eop].upper.fields.status &
+ E1000_TXD_STAT_DD : 0);
}
}
@@ -831,16 +982,21 @@ em_if_attach_pre(if_ctx_t ctx)
child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "nvm",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
+ CTLTYPE_INT | CTLFLAG_RW, sc, 0,
em_sysctl_nvm_info, "I", "NVM Information");
+ sc->enable_aim = em_enable_aim;
+ SYSCTL_ADD_INT(ctx_list, child, OID_AUTO, "enable_aim",
+ CTLFLAG_RW, &sc->enable_aim, 0,
+ "Interrupt Moderation (1=normal, 2=lowlatency)");
+
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
+ CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
em_sysctl_print_fw_version, "A",
"Prints FW/NVM Versions");
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "debug",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
+ CTLTYPE_INT | CTLFLAG_RW, sc, 0,
em_sysctl_debug_info, "I", "Debug Information");
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
@@ -855,15 +1011,42 @@ em_if_attach_pre(if_ctx_t ctx)
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
em_get_rs, "I", "Dump RS indexes");
+ if (hw->mac.type >= e1000_i350) {
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
+ CTLTYPE_INT | CTLFLAG_RW, sc, 0,
+ igb_sysctl_dmac, "I", "DMA Coalesce");
+ }
+
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
+ "tso_tcp_flags_mask_first_segment",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ sc, 0, em_sysctl_tso_tcp_flags_mask, "IU",
+ "TSO TCP flags mask for first segment");
+
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
+ "tso_tcp_flags_mask_middle_segment",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ sc, 1, em_sysctl_tso_tcp_flags_mask, "IU",
+ "TSO TCP flags mask for middle segment");
+
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
+ "tso_tcp_flags_mask_last_segment",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ sc, 2, em_sysctl_tso_tcp_flags_mask, "IU",
+ "TSO TCP flags mask for last segment");
+
scctx->isc_tx_nsegments = EM_MAX_SCATTER;
- scctx->isc_nrxqsets_max = scctx->isc_ntxqsets_max = em_set_num_queues(ctx);
+ scctx->isc_nrxqsets_max =
+ scctx->isc_ntxqsets_max = em_set_num_queues(ctx);
if (bootverbose)
device_printf(dev, "attach_pre capping queues at %d\n",
scctx->isc_ntxqsets_max);
if (hw->mac.type >= igb_mac_min) {
- scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union e1000_adv_tx_desc), EM_DBA_ALIGN);
- scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union e1000_adv_rx_desc), EM_DBA_ALIGN);
+ scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] *
+ sizeof(union e1000_adv_tx_desc), EM_DBA_ALIGN);
+ scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] *
+ sizeof(union e1000_adv_rx_desc), EM_DBA_ALIGN);
scctx->isc_txd_size[0] = sizeof(union e1000_adv_tx_desc);
scctx->isc_rxd_size[0] = sizeof(union e1000_adv_rx_desc);
scctx->isc_txrx = &igb_txrx;
@@ -882,8 +1065,10 @@ em_if_attach_pre(if_ctx_t ctx)
*/
scctx->isc_msix_bar = pci_msix_table_bar(dev);
} else if (hw->mac.type >= em_mac_min) {
- scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]* sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
- scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN);
+ scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] *
+ sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
+ scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] *
+ sizeof(union e1000_rx_desc_extended), EM_DBA_ALIGN);
scctx->isc_txd_size[0] = sizeof(struct e1000_tx_desc);
scctx->isc_rxd_size[0] = sizeof(union e1000_rx_desc_extended);
scctx->isc_txrx = &em_txrx;
@@ -894,11 +1079,12 @@ em_if_attach_pre(if_ctx_t ctx)
scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO |
CSUM_IP6_TCP | CSUM_IP6_UDP;
- /* Disable TSO on all em(4) until ring stalls can be debugged */
+ /* Disable TSO on all em(4) until ring stalls are debugged */
scctx->isc_capenable &= ~IFCAP_TSO;
/*
- * Disable TSO on SPT due to errata that downclocks DMA performance
+ * Disable TSO on SPT due to errata that downclocks DMA
+ * performance
* i218-i219 Specification Update 1.5.4.5
*/
if (hw->mac.type == e1000_pch_spt)
@@ -915,8 +1101,10 @@ em_if_attach_pre(if_ctx_t ctx)
scctx->isc_disable_msix = 1;
}
} else {
- scctx->isc_txqsizes[0] = roundup2((scctx->isc_ntxd[0] + 1) * sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
- scctx->isc_rxqsizes[0] = roundup2((scctx->isc_nrxd[0] + 1) * sizeof(struct e1000_rx_desc), EM_DBA_ALIGN);
+ scctx->isc_txqsizes[0] = roundup2((scctx->isc_ntxd[0] + 1) *
+ sizeof(struct e1000_tx_desc), EM_DBA_ALIGN);
+ scctx->isc_rxqsizes[0] = roundup2((scctx->isc_nrxd[0] + 1) *
+ sizeof(struct e1000_rx_desc), EM_DBA_ALIGN);
scctx->isc_txd_size[0] = sizeof(struct e1000_tx_desc);
scctx->isc_rxd_size[0] = sizeof(struct e1000_rx_desc);
scctx->isc_txrx = &lem_txrx;
@@ -929,7 +1117,7 @@ em_if_attach_pre(if_ctx_t ctx)
scctx->isc_tx_csum_flags = CSUM_TCP | CSUM_UDP | CSUM_IP_TSO |
CSUM_IP6_TCP | CSUM_IP6_UDP;
- /* Disable TSO on all lem(4) until ring stalls can be debugged */
+ /* Disable TSO on all lem(4) until ring stalls debugged */
scctx->isc_capenable &= ~IFCAP_TSO;
/* 82541ER doesn't do HW tagging */
@@ -940,15 +1128,18 @@ em_if_attach_pre(if_ctx_t ctx)
}
/* This is the first e1000 chip and it does not do offloads */
if (hw->mac.type == e1000_82542) {
- scctx->isc_capabilities &= ~(IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
- IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWTAGGING |
- IFCAP_VLAN_HWFILTER | IFCAP_TSO | IFCAP_VLAN_HWTSO);
+ scctx->isc_capabilities &= ~(IFCAP_HWCSUM |
+ IFCAP_VLAN_HWCSUM | IFCAP_HWCSUM_IPV6 |
+ IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER |
+ IFCAP_TSO | IFCAP_VLAN_HWTSO);
scctx->isc_capenable = scctx->isc_capabilities;
}
/* These can't do TSO for various reasons */
- if (hw->mac.type < e1000_82544 || hw->mac.type == e1000_82547 ||
+ if (hw->mac.type < e1000_82544 ||
+ hw->mac.type == e1000_82547 ||
hw->mac.type == e1000_82547_rev_2) {
- scctx->isc_capabilities &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO);
+ scctx->isc_capabilities &=
+ ~(IFCAP_TSO |IFCAP_VLAN_HWTSO);
scctx->isc_capenable = scctx->isc_capabilities;
}
/* XXXKB: No IPv6 before this? */
@@ -956,10 +1147,14 @@ em_if_attach_pre(if_ctx_t ctx)
scctx->isc_capabilities &= ~IFCAP_HWCSUM_IPV6;
scctx->isc_capenable = scctx->isc_capabilities;
}
- /* "PCI/PCI-X SDM 4.0" page 33 (b) - FDX requirement on these chips */
- if (hw->mac.type == e1000_82547 || hw->mac.type == e1000_82547_rev_2)
- scctx->isc_capenable &= ~(IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM |
- IFCAP_HWCSUM_IPV6);
+ /*
+ * "PCI/PCI-X SDM 4.0" page 33 (b):
+ * FDX requirement on these chips
+ */
+ if (hw->mac.type == e1000_82547 ||
+ hw->mac.type == e1000_82547_rev_2)
+ scctx->isc_capenable &= ~(IFCAP_HWCSUM |
+ IFCAP_VLAN_HWCSUM | IFCAP_HWCSUM_IPV6);
/* INTx only */
scctx->isc_msix_bar = 0;
@@ -1006,11 +1201,9 @@ em_if_attach_pre(if_ctx_t ctx)
** FLASH read/write macros in the shared code.
*/
else if (hw->mac.type >= e1000_pch_spt) {
- sc->osdep.flash_bus_space_tag =
- sc->osdep.mem_bus_space_tag;
+ sc->osdep.flash_bus_space_tag = sc->osdep.mem_bus_space_tag;
sc->osdep.flash_bus_space_handle =
- sc->osdep.mem_bus_space_handle
- + E1000_FLASH_BASE_ADDR;
+ sc->osdep.mem_bus_space_handle + E1000_FLASH_BASE_ADDR;
}
/* Do Shared Code initialization */
@@ -1026,27 +1219,24 @@ em_if_attach_pre(if_ctx_t ctx)
e1000_get_bus_info(hw);
/* Set up some sysctls for the tunable interrupt delays */
- em_add_int_delay_sysctl(sc, "rx_int_delay",
- "receive interrupt delay in usecs", &sc->rx_int_delay,
- E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt);
- em_add_int_delay_sysctl(sc, "tx_int_delay",
- "transmit interrupt delay in usecs", &sc->tx_int_delay,
- E1000_REGISTER(hw, E1000_TIDV), em_tx_int_delay_dflt);
- em_add_int_delay_sysctl(sc, "rx_abs_int_delay",
- "receive interrupt delay limit in usecs",
- &sc->rx_abs_int_delay,
- E1000_REGISTER(hw, E1000_RADV),
- em_rx_abs_int_delay_dflt);
- em_add_int_delay_sysctl(sc, "tx_abs_int_delay",
- "transmit interrupt delay limit in usecs",
- &sc->tx_abs_int_delay,
- E1000_REGISTER(hw, E1000_TADV),
- em_tx_abs_int_delay_dflt);
- em_add_int_delay_sysctl(sc, "itr",
- "interrupt delay limit in usecs/4",
- &sc->tx_itr,
- E1000_REGISTER(hw, E1000_ITR),
- DEFAULT_ITR);
+ if (hw->mac.type < igb_mac_min) {
+ em_add_int_delay_sysctl(sc, "rx_int_delay",
+ "receive interrupt delay in usecs", &sc->rx_int_delay,
+ E1000_REGISTER(hw, E1000_RDTR), em_rx_int_delay_dflt);
+ em_add_int_delay_sysctl(sc, "tx_int_delay",
+ "transmit interrupt delay in usecs", &sc->tx_int_delay,
+ E1000_REGISTER(hw, E1000_TIDV), em_tx_int_delay_dflt);
+ }
+ if (hw->mac.type >= e1000_82540 && hw->mac.type < igb_mac_min) {
+ em_add_int_delay_sysctl(sc, "rx_abs_int_delay",
+ "receive interrupt delay limit in usecs",
+ &sc->rx_abs_int_delay,
+ E1000_REGISTER(hw, E1000_RADV), em_rx_abs_int_delay_dflt);
+ em_add_int_delay_sysctl(sc, "tx_abs_int_delay",
+ "transmit interrupt delay limit in usecs",
+ &sc->tx_abs_int_delay,
+ E1000_REGISTER(hw, E1000_TADV), em_tx_abs_int_delay_dflt);
+ }
hw->mac.autoneg = DO_AUTO_NEG;
hw->phy.autoneg_wait_to_complete = false;
@@ -1080,7 +1270,8 @@ em_if_attach_pre(if_ctx_t ctx)
sc->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN *
MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
if (sc->mta == NULL) {
- device_printf(dev, "Can not allocate multicast setup array\n");
+ device_printf(dev,
+ "Can not allocate multicast setup array\n");
error = ENOMEM;
goto err_late;
}
@@ -1090,11 +1281,14 @@ em_if_attach_pre(if_ctx_t ctx)
/* Check SOL/IDER usage */
if (e1000_check_reset_block(hw))
- device_printf(dev, "PHY reset is blocked"
- " due to SOL/IDER session.\n");
+ device_printf(dev,
+ "PHY reset is blocked due to SOL/IDER session.\n");
/* Sysctl for setting Energy Efficient Ethernet */
- hw->dev_spec.ich8lan.eee_disable = eee_setting;
+ if (hw->mac.type < igb_mac_min)
+ hw->dev_spec.ich8lan.eee_disable = eee_setting;
+ else
+ hw->dev_spec._82575.eee_disable = eee_setting;
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_control",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
em_sysctl_eee, "I", "Disable Energy Efficient Ethernet");
@@ -1123,8 +1317,8 @@ em_if_attach_pre(if_ctx_t ctx)
/* Copy the permanent MAC address out of the EEPROM */
if (e1000_read_mac_addr(hw) < 0) {
- device_printf(dev, "EEPROM read error while reading MAC"
- " address\n");
+ device_printf(dev,
+ "EEPROM read error while reading MAC address\n");
error = EIO;
goto err_late;
}
@@ -1185,6 +1379,11 @@ em_if_attach_post(if_ctx_t ctx)
em_reset(ctx);
/* Initialize statistics */
+ if (sc->vf_ifp)
+ sc->ustats.vf_stats = (struct e1000_vf_stats){};
+ else
+ sc->ustats.stats = (struct e1000_hw_stats){};
+
em_update_stats_counters(sc);
hw->mac.get_link_status = 1;
em_if_update_admin_status(ctx);
@@ -1199,7 +1398,10 @@ em_if_attach_post(if_ctx_t ctx)
return (0);
err_late:
- /* upon attach_post() error, iflib calls _if_detach() to free resources. */
+ /*
+ * Upon em_if_attach_post() error, iflib calls em_if_detach() to
+ * free resources
+ */
return (error);
}
@@ -1341,8 +1543,7 @@ em_if_init(if_ctx_t ctx)
INIT_DEBUGOUT("em_if_init: begin");
/* Get the latest mac address, User can use a LAA */
- bcopy(if_getlladdr(ifp), sc->hw.mac.addr,
- ETHER_ADDR_LEN);
+ bcopy(if_getlladdr(ifp), sc->hw.mac.addr, ETHER_ADDR_LEN);
/* Put the address into the Receive Address Array */
e1000_rar_set(&sc->hw, sc->hw.mac.addr, 0);
@@ -1363,7 +1564,8 @@ em_if_init(if_ctx_t ctx)
em_reset(ctx);
em_if_update_admin_status(ctx);
- for (i = 0, tx_que = sc->tx_queues; i < sc->tx_num_queues; i++, tx_que++) {
+ for (i = 0, tx_que = sc->tx_queues; i < sc->tx_num_queues;
+ i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
txr->tx_rs_cidx = txr->tx_rs_pidx;
@@ -1410,8 +1612,10 @@ em_if_init(if_ctx_t ctx)
E1000_WRITE_REG(&sc->hw, E1000_CTRL_EXT, tmp);
/* Set the IVAR - interrupt vector routing. */
E1000_WRITE_REG(&sc->hw, E1000_IVAR, sc->ivars);
- } else if (sc->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */
+ } else if (sc->intr_type == IFLIB_INTR_MSIX) {
+ /* Set up queue routing */
igb_configure_queues(sc);
+ }
/* this clears any pending interrupts */
E1000_READ_REG(&sc->hw, E1000_ICR);
@@ -1431,6 +1635,169 @@ em_if_init(if_ctx_t ctx)
}
}
+enum itr_latency_target {
+ itr_latency_disabled = 0,
+ itr_latency_lowest = 1,
+ itr_latency_low = 2,
+ itr_latency_bulk = 3
+};
+/*********************************************************************
+ *
+ * Helper to calculate next (E)ITR value for AIM
+ *
+ *********************************************************************/
+static void
+em_newitr(struct e1000_softc *sc, struct em_rx_queue *que,
+ struct tx_ring *txr, struct rx_ring *rxr)
+{
+ struct e1000_hw *hw = &sc->hw;
+ unsigned long bytes, bytes_per_packet, packets;
+ unsigned long rxbytes, rxpackets, txbytes, txpackets;
+ u32 newitr;
+ u8 nextlatency;
+
+ rxbytes = atomic_load_long(&rxr->rx_bytes);
+ txbytes = atomic_load_long(&txr->tx_bytes);
+
+ /* Idle, do nothing */
+ if (txbytes == 0 && rxbytes == 0)
+ return;
+
+ newitr = 0;
+
+ if (sc->enable_aim) {
+ nextlatency = rxr->rx_nextlatency;
+
+ /* Use half default (4K) ITR if sub-gig */
+ if (sc->link_speed != 1000) {
+ newitr = EM_INTS_4K;
+ goto em_set_next_itr;
+ }
+ /* Want at least enough packet buffer for two frames to AIM */
+ if (sc->shared->isc_max_frame_size * 2 > (sc->pba << 10)) {
+ newitr = em_max_interrupt_rate;
+ sc->enable_aim = 0;
+ goto em_set_next_itr;
+ }
+
+ bytes = bytes_per_packet = 0;
+ /* Get largest values from the associated tx and rx ring */
+ txpackets = atomic_load_long(&txr->tx_packets);
+ if (txpackets != 0) {
+ bytes = txbytes;
+ bytes_per_packet = txbytes / txpackets;
+ packets = txpackets;
+ }
+ rxpackets = atomic_load_long(&rxr->rx_packets);
+ if (rxpackets != 0) {
+ bytes = lmax(bytes, rxbytes);
+ bytes_per_packet =
+ lmax(bytes_per_packet, rxbytes / rxpackets);
+ packets = lmax(packets, rxpackets);
+ }
+
+ /* Latency state machine */
+ switch (nextlatency) {
+ case itr_latency_disabled: /* Bootstrapping */
+ nextlatency = itr_latency_low;
+ break;
+ case itr_latency_lowest: /* 70k ints/s */
+ /* TSO and jumbo frames */
+ if (bytes_per_packet > 8000)
+ nextlatency = itr_latency_bulk;
+ else if ((packets < 5) && (bytes > 512))
+ nextlatency = itr_latency_low;
+ break;
+ case itr_latency_low: /* 20k ints/s */
+ if (bytes > 10000) {
+ /* Handle TSO */
+ if (bytes_per_packet > 8000)
+ nextlatency = itr_latency_bulk;
+ else if ((packets < 10) ||
+ (bytes_per_packet > 1200))
+ nextlatency = itr_latency_bulk;
+ else if (packets > 35)
+ nextlatency = itr_latency_lowest;
+ } else if (bytes_per_packet > 2000) {
+ nextlatency = itr_latency_bulk;
+ } else if (packets < 3 && bytes < 512) {
+ nextlatency = itr_latency_lowest;
+ }
+ break;
+ case itr_latency_bulk: /* 4k ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ nextlatency = itr_latency_low;
+ } else if (bytes < 1500)
+ nextlatency = itr_latency_low;
+ break;
+ default:
+ nextlatency = itr_latency_low;
+ device_printf(sc->dev,
+ "Unexpected newitr transition %d\n", nextlatency);
+ break;
+ }
+
+ /* Trim itr_latency_lowest for default AIM setting */
+ if (sc->enable_aim == 1 && nextlatency == itr_latency_lowest)
+ nextlatency = itr_latency_low;
+
+ /* Request new latency */
+ rxr->rx_nextlatency = nextlatency;
+ } else {
+ /* We may have toggled to AIM disabled */
+ nextlatency = itr_latency_disabled;
+ rxr->rx_nextlatency = nextlatency;
+ }
+
+ /* ITR state machine */
+ switch(nextlatency) {
+ case itr_latency_lowest:
+ newitr = EM_INTS_70K;
+ break;
+ case itr_latency_low:
+ newitr = EM_INTS_20K;
+ break;
+ case itr_latency_bulk:
+ newitr = EM_INTS_4K;
+ break;
+ case itr_latency_disabled:
+ default:
+ newitr = em_max_interrupt_rate;
+ break;
+ }
+
+em_set_next_itr:
+ if (hw->mac.type >= igb_mac_min) {
+ newitr = IGB_INTS_TO_EITR(newitr);
+
+ if (hw->mac.type == e1000_82575)
+ newitr |= newitr << 16;
+ else
+ newitr |= E1000_EITR_CNT_IGNR;
+
+ if (newitr != que->itr_setting) {
+ que->itr_setting = newitr;
+ E1000_WRITE_REG(hw, E1000_EITR(que->msix),
+ que->itr_setting);
+ }
+ } else {
+ newitr = EM_INTS_TO_ITR(newitr);
+
+ if (newitr != que->itr_setting) {
+ que->itr_setting = newitr;
+ if (hw->mac.type == e1000_82574 && que->msix) {
+ E1000_WRITE_REG(hw,
+ E1000_EITR_82574(que->msix),
+ que->itr_setting);
+ } else {
+ E1000_WRITE_REG(hw, E1000_ITR,
+ que->itr_setting);
+ }
+ }
+ }
+}
+
/*********************************************************************
*
* Fast Legacy/MSI Combined Interrupt Service routine
@@ -1440,10 +1807,14 @@ int
em_intr(void *arg)
{
struct e1000_softc *sc = arg;
+ struct e1000_hw *hw = &sc->hw;
+ struct em_rx_queue *que = &sc->rx_queues[0];
+ struct tx_ring *txr = &sc->tx_queues[0].txr;
+ struct rx_ring *rxr = &que->rxr;
if_ctx_t ctx = sc->ctx;
u32 reg_icr;
- reg_icr = E1000_READ_REG(&sc->hw, E1000_ICR);
+ reg_icr = E1000_READ_REG(hw, E1000_ICR);
/* Hot eject? */
if (reg_icr == 0xffffffff)
@@ -1457,7 +1828,7 @@ em_intr(void *arg)
* Starting with the 82571 chip, bit 31 should be used to
* determine whether the interrupt belongs to us.
*/
- if (sc->hw.mac.type >= e1000_82571 &&
+ if (hw->mac.type >= e1000_82571 &&
(reg_icr & E1000_ICR_INT_ASSERTED) == 0)
return FILTER_STRAY;
@@ -1476,6 +1847,15 @@ em_intr(void *arg)
if (reg_icr & E1000_ICR_RXO)
sc->rx_overruns++;
+ if (hw->mac.type >= e1000_82540)
+ em_newitr(sc, que, txr, rxr);
+
+ /* Reset state */
+ txr->tx_bytes = 0;
+ txr->tx_packets = 0;
+ rxr->rx_bytes = 0;
+ rxr->rx_packets = 0;
+
return (FILTER_SCHEDULE_THREAD);
}
@@ -1528,9 +1908,20 @@ static int
em_msix_que(void *arg)
{
struct em_rx_queue *que = arg;
+ struct e1000_softc *sc = que->sc;
+ struct tx_ring *txr = &sc->tx_queues[que->msix].txr;
+ struct rx_ring *rxr = &que->rxr;
++que->irqs;
+ em_newitr(sc, que, txr, rxr);
+
+ /* Reset state */
+ txr->tx_bytes = 0;
+ txr->tx_packets = 0;
+ rxr->rx_bytes = 0;
+ rxr->rx_packets = 0;
+
return (FILTER_SCHEDULE_THREAD);
}
@@ -1560,8 +1951,8 @@ em_msix_link(void *arg)
E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC);
E1000_WRITE_REG(&sc->hw, E1000_EIMS, sc->link_mask);
} else if (sc->hw.mac.type == e1000_82574) {
- E1000_WRITE_REG(&sc->hw, E1000_IMS, E1000_IMS_LSC |
- E1000_IMS_OTHER);
+ E1000_WRITE_REG(&sc->hw, E1000_IMS,
+ E1000_IMS_LSC | E1000_IMS_OTHER);
/*
* Because we must read the ICR for this interrupt it may
* clear other causes using autoclear, for this reason we
@@ -1704,7 +2095,8 @@ em_if_set_promisc(if_ctx_t ctx, int flags)
if (flags & IFF_ALLMULTI)
mcnt = MAX_NUM_MULTICAST_ADDRESSES;
else
- mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
+ mcnt = min(if_llmaddr_count(ifp),
+ MAX_NUM_MULTICAST_ADDRESSES);
if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
reg_rctl &= (~E1000_RCTL_MPE);
@@ -1753,7 +2145,7 @@ em_if_multi_set(if_ctx_t ctx)
{
struct e1000_softc *sc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
- u8 *mta; /* Multicast array memory */
+ u8 *mta; /* Multicast array memory */
u32 reg_rctl = 0;
int mcnt = 0;
@@ -1812,7 +2204,6 @@ em_if_multi_set(if_ctx_t ctx)
static void
em_if_timer(if_ctx_t ctx, uint16_t qid)
{
-
if (qid != 0)
return;
@@ -1846,8 +2237,8 @@ em_if_update_admin_status(if_ctx_t ctx)
break;
case e1000_media_type_fiber:
e1000_check_for_link(hw);
- link_check = (E1000_READ_REG(hw, E1000_STATUS) &
- E1000_STATUS_LU);
+ link_check =
+ (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
break;
case e1000_media_type_internal_serdes:
e1000_check_for_link(hw);
@@ -1904,11 +2295,11 @@ em_if_update_admin_status(if_ctx_t ctx)
sc->flags |= IGB_MEDIA_RESET;
em_reset(ctx);
}
- /* Only do TSO on gigabit Ethernet for older chips due to errata */
+ /* Only do TSO on gigabit for older chips due to errata */
if (hw->mac.type < igb_mac_min)
automasked = em_automask_tso(ctx);
- /* Automasking resets the interface, so don't mark it up yet */
+ /* Automasking resets the interface so don't mark it up yet */
if (!automasked)
iflib_link_state_change(ctx, LINK_STATE_UP,
IF_Mbps(sc->link_speed));
@@ -1983,10 +2374,8 @@ em_identify_hardware(if_ctx_t ctx)
sc->hw.vendor_id = pci_get_vendor(dev);
sc->hw.device_id = pci_get_device(dev);
sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
- sc->hw.subsystem_vendor_id =
- pci_read_config(dev, PCIR_SUBVEND_0, 2);
- sc->hw.subsystem_device_id =
- pci_read_config(dev, PCIR_SUBDEV_0, 2);
+ sc->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2);
+ sc->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2);
/* Do Shared Code Init and Setup */
if (e1000_set_mac_type(&sc->hw)) {
@@ -2010,15 +2399,15 @@ em_allocate_pci_resources(if_ctx_t ctx)
int rid, val;
rid = PCIR_BAR(0);
- sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &rid, RF_ACTIVE);
+ sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
if (sc->memory == NULL) {
- device_printf(dev, "Unable to allocate bus resource: memory\n");
+ device_printf(dev,
+ "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory);
- sc->osdep.mem_bus_space_handle =
- rman_get_bushandle(sc->memory);
+ sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->memory);
sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
/* Only older adapters use IO mapping */
@@ -2041,8 +2430,8 @@ em_allocate_pci_resources(if_ctx_t ctx)
sc->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT,
&rid, RF_ACTIVE);
if (sc->ioport == NULL) {
- device_printf(dev, "Unable to allocate bus resource: "
- "ioport\n");
+ device_printf(dev,
+ "Unable to allocate bus resource: ioport\n");
return (ENXIO);
}
sc->hw.io_base = 0;
@@ -2075,9 +2464,12 @@ em_if_msix_intr_assign(if_ctx_t ctx, int msix)
for (i = 0; i < sc->rx_num_queues; i++, rx_que++, vector++) {
rid = vector + 1;
snprintf(buf, sizeof(buf), "rxq%d", i);
- error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, em_msix_que, rx_que, rx_que->me, buf);
+ error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
+ IFLIB_INTR_RXTX, em_msix_que, rx_que, rx_que->me, buf);
if (error) {
- device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error);
+ device_printf(iflib_get_dev(ctx),
+ "Failed to allocate que int %d err: %d",
+ i, error);
sc->rx_num_queues = i + 1;
goto fail;
}
@@ -2130,10 +2522,12 @@ em_if_msix_intr_assign(if_ctx_t ctx, int msix)
/* Link interrupt */
rid = rx_vectors + 1;
- error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, IFLIB_INTR_ADMIN, em_msix_link, sc, 0, "aq");
+ error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, IFLIB_INTR_ADMIN,
+ em_msix_link, sc, 0, "aq");
if (error) {
- device_printf(iflib_get_dev(ctx), "Failed to register admin handler");
+ device_printf(iflib_get_dev(ctx),
+ "Failed to register admin handler");
goto fail;
}
sc->linkvec = rx_vectors;
@@ -2183,7 +2577,8 @@ igb_configure_queues(struct e1000_softc *sc)
rx_que = &sc->rx_queues[i];
if (i & 1) {
ivar &= 0xFF00FFFF;
- ivar |= (rx_que->msix | E1000_IVAR_VALID) << 16;
+ ivar |= (rx_que->msix | E1000_IVAR_VALID) <<
+ 16;
} else {
ivar &= 0xFFFFFF00;
ivar |= rx_que->msix | E1000_IVAR_VALID;
@@ -2197,10 +2592,12 @@ igb_configure_queues(struct e1000_softc *sc)
tx_que = &sc->tx_queues[i];
if (i & 1) {
ivar &= 0x00FFFFFF;
- ivar |= (tx_que->msix | E1000_IVAR_VALID) << 24;
+ ivar |= (tx_que->msix | E1000_IVAR_VALID) <<
+ 24;
} else {
ivar &= 0xFFFF00FF;
- ivar |= (tx_que->msix | E1000_IVAR_VALID) << 8;
+ ivar |= (tx_que->msix | E1000_IVAR_VALID) <<
+ 8;
}
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
sc->que_mask |= tx_que->eims;
@@ -2222,7 +2619,8 @@ igb_configure_queues(struct e1000_softc *sc)
ivar |= rx_que->msix | E1000_IVAR_VALID;
} else {
ivar &= 0xFF00FFFF;
- ivar |= (rx_que->msix | E1000_IVAR_VALID) << 16;
+ ivar |= (rx_que->msix | E1000_IVAR_VALID) <<
+ 16;
}
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
sc->que_mask |= rx_que->eims;
@@ -2234,10 +2632,12 @@ igb_configure_queues(struct e1000_softc *sc)
tx_que = &sc->tx_queues[i];
if (i < 8) {
ivar &= 0xFFFF00FF;
- ivar |= (tx_que->msix | E1000_IVAR_VALID) << 8;
+ ivar |= (tx_que->msix | E1000_IVAR_VALID) <<
+ 8;
} else {
ivar &= 0x00FFFFFF;
- ivar |= (tx_que->msix | E1000_IVAR_VALID) << 24;
+ ivar |= (tx_que->msix | E1000_IVAR_VALID) <<
+ 24;
}
E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar);
sc->que_mask |= tx_que->eims;
@@ -2264,8 +2664,8 @@ igb_configure_queues(struct e1000_softc *sc)
tmp = E1000_EICR_RX_QUEUE0 << i;
tmp |= E1000_EICR_TX_QUEUE0 << i;
rx_que->eims = tmp;
- E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0),
- i, rx_que->eims);
+ E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), i,
+ rx_que->eims);
sc->que_mask |= rx_que->eims;
}
@@ -2277,18 +2677,19 @@ igb_configure_queues(struct e1000_softc *sc)
break;
}
- /* Set the starting interrupt rate */
- if (em_max_interrupt_rate > 0)
- newitr = (4000000 / em_max_interrupt_rate) & 0x7FFC;
+ /* Set the igb starting interrupt rate */
+ if (em_max_interrupt_rate > 0) {
+ newitr = IGB_INTS_TO_EITR(em_max_interrupt_rate);
- if (hw->mac.type == e1000_82575)
- newitr |= newitr << 16;
- else
- newitr |= E1000_EITR_CNT_IGNR;
+ if (hw->mac.type == e1000_82575)
+ newitr |= newitr << 16;
+ else
+ newitr |= E1000_EITR_CNT_IGNR;
- for (int i = 0; i < sc->rx_num_queues; i++) {
- rx_que = &sc->rx_queues[i];
- E1000_WRITE_REG(hw, E1000_EITR(rx_que->msix), newitr);
+ for (int i = 0; i < sc->rx_num_queues; i++) {
+ rx_que = &sc->rx_queues[i];
+ E1000_WRITE_REG(hw, E1000_EITR(rx_que->msix), newitr);
+ }
}
return;
@@ -2524,11 +2925,11 @@ igb_init_dmac(struct e1000_softc *sc, u32 pba)
static void
em_flush_tx_ring(struct e1000_softc *sc)
{
- struct e1000_hw *hw = &sc->hw;
- struct tx_ring *txr = &sc->tx_queues->txr;
- struct e1000_tx_desc *txd;
- u32 tctl, txd_lower = E1000_TXD_CMD_IFCS;
- u16 size = 512;
+ struct e1000_hw *hw = &sc->hw;
+ struct tx_ring *txr = &sc->tx_queues->txr;
+ struct e1000_tx_desc *txd;
+ u32 tctl, txd_lower = E1000_TXD_CMD_IFCS;
+ u16 size = 512;
tctl = E1000_READ_REG(hw, E1000_TCTL);
E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN);
@@ -2556,8 +2957,8 @@ em_flush_tx_ring(struct e1000_softc *sc)
static void
em_flush_rx_ring(struct e1000_softc *sc)
{
- struct e1000_hw *hw = &sc->hw;
- u32 rctl, rxdctl;
+ struct e1000_hw *hw = &sc->hw;
+ u32 rctl, rxdctl;
rctl = E1000_READ_REG(hw, E1000_RCTL);
E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
@@ -2569,7 +2970,8 @@ em_flush_rx_ring(struct e1000_softc *sc)
rxdctl &= 0xffffc000;
/*
* update thresholds: prefetch threshold to 31, host threshold to 1
- * and make sure the granularity is "descriptors" and not "cache lines"
+ * and make sure the granularity is "descriptors" and not
+ * "cache lines"
*/
rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl);
@@ -2596,15 +2998,15 @@ em_flush_desc_rings(struct e1000_softc *sc)
{
struct e1000_hw *hw = &sc->hw;
device_t dev = sc->dev;
- u16 hang_state;
- u32 fext_nvm11, tdlen;
+ u16 hang_state;
+ u32 fext_nvm11, tdlen;
/* First, disable MULR fix in FEXTNVM11 */
fext_nvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
E1000_WRITE_REG(hw, E1000_FEXTNVM11, fext_nvm11);
- /* do nothing if we're not in faulty state, or if the queue is empty */
+ /* do nothing if we're not in faulty state, or the queue is empty */
tdlen = E1000_READ_REG(hw, E1000_TDLEN(0));
hang_state = pci_read_config(dev, PCICFG_DESC_RING_STATUS, 2);
if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
@@ -2719,7 +3121,7 @@ em_reset(if_ctx_t ctx)
pba = E1000_PBA_34K;
break;
default:
- /* Remaining devices assumed to have a Packet Buffer of 64K. */
+ /* Remaining devices assumed to have Packet Buffer of 64K. */
if (hw->mac.max_frame_size > 8192)
pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */
else
@@ -2762,7 +3164,8 @@ em_reset(if_ctx_t ctx)
* response (Rx) to Ethernet PAUSE frames.
* - High water mark should allow for at least two frames to be
* received after sending an XOFF.
- * - Low water mark works best when it is very near the high water mark.
+ * - Low water mark works best when it is very near the high water
+ mark.
* This allows the receiver to restart by sending XON when it has
* drained a bit. Here we use an arbitrary value of 1500 which will
* restart after one full frame is pulled from the buffer. There
@@ -2875,6 +3278,9 @@ em_reset(if_ctx_t ctx)
if (hw->mac.type >= igb_mac_min)
igb_init_dmac(sc, pba);
+ /* Save the final PBA off if it needs to be used elsewhere i.e. AIM */
+ sc->pba = pba;
+
E1000_WRITE_REG(hw, E1000_VET, ETHERTYPE_VLAN);
e1000_get_phy_info(hw);
e1000_check_for_link(hw);
@@ -2889,9 +3295,9 @@ em_reset(if_ctx_t ctx)
static void
em_initialize_rss_mapping(struct e1000_softc *sc)
{
- uint8_t rss_key[4 * RSSKEYLEN];
+ uint8_t rss_key[4 * RSSKEYLEN];
uint32_t reta = 0;
- struct e1000_hw *hw = &sc->hw;
+ struct e1000_hw *hw = &sc->hw;
int i;
/*
@@ -3052,16 +3458,21 @@ em_setup_interface(if_ctx_t ctx)
if (sc->hw.mac.type == e1000_82545)
fiber_type = IFM_1000_LX;
- ifmedia_add(sc->media, IFM_ETHER | fiber_type | IFM_FDX, 0, NULL);
+ ifmedia_add(sc->media,
+ IFM_ETHER | fiber_type | IFM_FDX, 0, NULL);
ifmedia_add(sc->media, IFM_ETHER | fiber_type, 0, NULL);
} else {
ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
- ifmedia_add(sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
+ ifmedia_add(sc->media,
+ IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
- ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
+ ifmedia_add(sc->media,
+ IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
if (sc->hw.phy.type != e1000_phy_ife) {
- ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
- ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ ifmedia_add(sc->media,
+ IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
+ ifmedia_add(sc->media,
+ IFM_ETHER | IFM_1000_T, 0, NULL);
}
}
ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
@@ -3070,7 +3481,8 @@ em_setup_interface(if_ctx_t ctx)
}
static int
-em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
+em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int ntxqs, int ntxqsets)
{
struct e1000_softc *sc = iflib_get_softc(ctx);
if_softc_ctx_t scctx = sc->shared;
@@ -3085,7 +3497,8 @@ em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs
if (!(sc->tx_queues =
(struct em_tx_queue *) malloc(sizeof(struct em_tx_queue) *
sc->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
+ device_printf(iflib_get_dev(ctx),
+ "Unable to allocate queue memory\n");
return(ENOMEM);
}
@@ -3097,14 +3510,17 @@ em_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs
que->me = txr->me = i;
/* Allocate report status array */
- if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(iflib_get_dev(ctx), "failed to allocate rs_idxs memory\n");
+ if (!(txr->tx_rsq =
+ (qidx_t *) malloc(sizeof(qidx_t) * scctx->isc_ntxd[0],
+ M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(iflib_get_dev(ctx),
+ "failed to allocate rs_idxs memory\n");
error = ENOMEM;
goto fail;
}
for (j = 0; j < scctx->isc_ntxd[0]; j++)
txr->tx_rsq[j] = QIDX_INVALID;
- /* get the virtual and physical address of the hardware queues */
+ /* get the virtual and physical address of hardware queues */
txr->tx_base = (struct e1000_tx_desc *)vaddrs[i*ntxqs];
txr->tx_paddr = paddrs[i*ntxqs];
}
@@ -3119,7 +3535,8 @@ fail:
}
static int
-em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
+em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int nrxqs, int nrxqsets)
{
struct e1000_softc *sc = iflib_get_softc(ctx);
int error = E1000_SUCCESS;
@@ -3133,7 +3550,8 @@ em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs
if (!(sc->rx_queues =
(struct em_rx_queue *) malloc(sizeof(struct em_rx_queue) *
sc->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
+ device_printf(iflib_get_dev(ctx),
+ "Unable to allocate queue memory\n");
error = ENOMEM;
goto fail;
}
@@ -3145,8 +3563,9 @@ em_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs
rxr->que = que;
que->me = rxr->me = i;
- /* get the virtual and physical address of the hardware queues */
- rxr->rx_base = (union e1000_rx_desc_extended *)vaddrs[i*nrxqs];
+ /* get the virtual and physical address of hardware queues */
+ rxr->rx_base =
+ (union e1000_rx_desc_extended *)vaddrs[i*nrxqs];
rxr->rx_paddr = paddrs[i*nrxqs];
}
@@ -3219,10 +3638,8 @@ em_initialize_transmit_unit(if_ctx_t ctx)
/* Base and Len of TX Ring */
E1000_WRITE_REG(hw, E1000_TDLEN(i),
scctx->isc_ntxd[0] * sizeof(struct e1000_tx_desc));
- E1000_WRITE_REG(hw, E1000_TDBAH(i),
- (u32)(bus_addr >> 32));
- E1000_WRITE_REG(hw, E1000_TDBAL(i),
- (u32)bus_addr);
+ E1000_WRITE_REG(hw, E1000_TDBAH(i), (u32)(bus_addr >> 32));
+ E1000_WRITE_REG(hw, E1000_TDBAL(i), (u32)bus_addr);
/* Init the HEAD/TAIL indices */
E1000_WRITE_REG(hw, E1000_TDT(i), 0);
E1000_WRITE_REG(hw, E1000_TDH(i), 0);
@@ -3264,12 +3681,16 @@ em_initialize_transmit_unit(if_ctx_t ctx)
tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
}
- E1000_WRITE_REG(hw, E1000_TIPG, tipg);
- E1000_WRITE_REG(hw, E1000_TIDV, sc->tx_int_delay.value);
+ if (hw->mac.type < igb_mac_min) {
+ E1000_WRITE_REG(hw, E1000_TIPG, tipg);
+ E1000_WRITE_REG(hw, E1000_TIDV, sc->tx_int_delay.value);
+
+ if (sc->tx_int_delay.value > 0)
+ sc->txd_cmd |= E1000_TXD_CMD_IDE;
+ }
- if(hw->mac.type >= e1000_82540)
- E1000_WRITE_REG(hw, E1000_TADV,
- sc->tx_abs_int_delay.value);
+ if (hw->mac.type >= e1000_82540)
+ E1000_WRITE_REG(hw, E1000_TADV, sc->tx_abs_int_delay.value);
if (hw->mac.type == e1000_82571 || hw->mac.type == e1000_82572) {
tarc = E1000_READ_REG(hw, E1000_TARC(0));
@@ -3294,16 +3715,13 @@ em_initialize_transmit_unit(if_ctx_t ctx)
E1000_WRITE_REG(hw, E1000_TARC(0), tarc);
}
- if (sc->tx_int_delay.value > 0)
- sc->txd_cmd |= E1000_TXD_CMD_IDE;
-
/* Program the Transmit Control Register */
tctl = E1000_READ_REG(hw, E1000_TCTL);
tctl &= ~E1000_TCTL_CT;
tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
- if (hw->mac.type >= e1000_82571)
+ if (hw->mac.type >= e1000_82571 && hw->mac.type < igb_mac_min)
tctl |= E1000_TCTL_MULR;
/* This write will effectively turn on the transmit unit. */
@@ -3336,7 +3754,7 @@ em_initialize_receive_unit(if_ctx_t ctx)
struct e1000_softc *sc = iflib_get_softc(ctx);
if_softc_ctx_t scctx = sc->shared;
if_t ifp = iflib_get_ifp(ctx);
- struct e1000_hw *hw = &sc->hw;
+ struct e1000_hw *hw = &sc->hw;
struct em_rx_queue *que;
int i;
uint32_t rctl, rxcsum;
@@ -3371,17 +3789,29 @@ em_initialize_receive_unit(if_ctx_t ctx)
if (!em_disable_crc_stripping)
rctl |= E1000_RCTL_SECRC;
- if (hw->mac.type >= e1000_82540) {
- E1000_WRITE_REG(hw, E1000_RADV,
- sc->rx_abs_int_delay.value);
+ /* lem/em default interrupt moderation */
+ if (hw->mac.type < igb_mac_min) {
+ if (hw->mac.type >= e1000_82540) {
+ E1000_WRITE_REG(hw, E1000_RADV,
+ sc->rx_abs_int_delay.value);
- /*
- * Set the interrupt throttling rate. Value is calculated
- * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
+ /* Set the default interrupt throttling rate */
+ E1000_WRITE_REG(hw, E1000_ITR,
+ EM_INTS_TO_ITR(em_max_interrupt_rate));
+ }
+
+ /* XXX TEMPORARY WORKAROUND: on some systems with 82573
+ * long latencies are observed, like Lenovo X60. This
+ * change eliminates the problem, but since having positive
+ * values in RDTR is a known source of problems on other
+ * platforms another solution is being sought.
*/
- E1000_WRITE_REG(hw, E1000_ITR, DEFAULT_ITR);
+ if (hw->mac.type == e1000_82573)
+ E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
+ else
+ E1000_WRITE_REG(hw, E1000_RDTR,
+ sc->rx_int_delay.value);
}
- E1000_WRITE_REG(hw, E1000_RDTR, sc->rx_int_delay.value);
if (hw->mac.type >= em_mac_min) {
uint32_t rfctl;
@@ -3396,7 +3826,7 @@ em_initialize_receive_unit(if_ctx_t ctx)
if (hw->mac.type == e1000_82574) {
for (int i = 0; i < 4; i++)
E1000_WRITE_REG(hw, E1000_EITR_82574(i),
- DEFAULT_ITR);
+ EM_INTS_TO_ITR(em_max_interrupt_rate));
/* Disable accelerated acknowledge */
rfctl |= E1000_RFCTL_ACK_DIS;
}
@@ -3431,16 +3861,6 @@ em_initialize_receive_unit(if_ctx_t ctx)
}
E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
- /*
- * XXX TEMPORARY WORKAROUND: on some systems with 82573
- * long latencies are observed, like Lenovo X60. This
- * change eliminates the problem, but since having positive
- * values in RDTR is a known source of problems on other
- * platforms another solution is being sought.
- */
- if (hw->mac.type == e1000_82573)
- E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
-
for (i = 0, que = sc->rx_queues; i < sc->rx_num_queues; i++, que++) {
struct rx_ring *rxr = &que->rxr;
/* Setup the Base and Length of the Rx Descriptor Ring */
@@ -3450,7 +3870,8 @@ em_initialize_receive_unit(if_ctx_t ctx)
#endif
E1000_WRITE_REG(hw, E1000_RDLEN(i),
- scctx->isc_nrxd[0] * sizeof(union e1000_rx_desc_extended));
+ scctx->isc_nrxd[0] *
+ sizeof(union e1000_rx_desc_extended));
E1000_WRITE_REG(hw, E1000_RDBAH(i), (u32)(bus_addr >> 32));
E1000_WRITE_REG(hw, E1000_RDBAL(i), (u32)bus_addr);
/* Setup the Head and Tail Descriptor Pointers */
@@ -3498,18 +3919,20 @@ em_initialize_receive_unit(if_ctx_t ctx)
E1000_SRRCTL_BSIZEPKT_SHIFT;
/*
- * If TX flow control is disabled and there's >1 queue defined,
- * enable DROP.
+ * If TX flow control is disabled and there's >1 queue
+ * defined, enable DROP.
*
- * This drops frames rather than hanging the RX MAC for all queues.
+ * This drops frames rather than hanging the RX MAC for all
+ * queues.
*/
if ((sc->rx_num_queues > 1) &&
(sc->fc == e1000_fc_none ||
sc->fc == e1000_fc_rx_pause)) {
srrctl |= E1000_SRRCTL_DROP_EN;
}
- /* Setup the Base and Length of the Rx Descriptor Rings */
- for (i = 0, que = sc->rx_queues; i < sc->rx_num_queues; i++, que++) {
+ /* Setup the Base and Length of the Rx Descriptor Rings */
+ for (i = 0, que = sc->rx_queues; i < sc->rx_num_queues;
+ i++, que++) {
struct rx_ring *rxr = &que->rxr;
u64 bus_addr = rxr->rx_paddr;
u32 rxdctl;
@@ -3522,11 +3945,12 @@ em_initialize_receive_unit(if_ctx_t ctx)
#endif
E1000_WRITE_REG(hw, E1000_RDLEN(i),
- scctx->isc_nrxd[0] * sizeof(struct e1000_rx_desc));
+ scctx->isc_nrxd[0] *
+ sizeof(struct e1000_rx_desc));
E1000_WRITE_REG(hw, E1000_RDBAH(i),
- (uint32_t)(bus_addr >> 32));
+ (uint32_t)(bus_addr >> 32));
E1000_WRITE_REG(hw, E1000_RDBAL(i),
- (uint32_t)bus_addr);
+ (uint32_t)bus_addr);
E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
/* Enable this Queue */
rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
@@ -3661,15 +4085,16 @@ em_if_vlan_filter_write(struct e1000_softc *sc)
if (sc->vf_ifp)
return;
- /* Disable interrupts for lem-class devices during the filter change */
+ /* Disable interrupts for lem(4) devices during the filter change */
if (hw->mac.type < em_mac_min)
em_if_intr_disable(sc->ctx);
for (int i = 0; i < EM_VFTA_SIZE; i++)
if (sc->shadow_vfta[i] != 0) {
- /* XXXKB: incomplete VF support, we return early above */
+ /* XXXKB: incomplete VF support, we returned above */
if (sc->vf_ifp)
- e1000_vfta_set_vf(hw, sc->shadow_vfta[i], true);
+ e1000_vfta_set_vf(hw, sc->shadow_vfta[i],
+ true);
else
e1000_write_vfta(hw, i, sc->shadow_vfta[i]);
}
@@ -3687,8 +4112,8 @@ em_setup_vlan_hw_support(if_ctx_t ctx)
if_t ifp = iflib_get_ifp(ctx);
u32 reg;
- /* XXXKB: Return early if we are a VF until VF decap and filter management
- * is ready and tested.
+ /* XXXKB: Return early if we are a VF until VF decap and filter
+ * management is ready and tested.
*/
if (sc->vf_ifp)
return;
@@ -3733,6 +4158,7 @@ em_if_intr_enable(if_ctx_t ctx)
E1000_WRITE_REG(hw, EM_EIAC, sc->ims);
ims_mask |= sc->ims;
}
+
E1000_WRITE_REG(hw, E1000_IMS, ims_mask);
E1000_WRITE_FLUSH(hw);
}
@@ -3900,8 +4326,10 @@ em_automask_tso(if_ctx_t ctx)
if_t ifp = iflib_get_ifp(ctx);
if (!em_unsupported_tso && sc->link_speed &&
- sc->link_speed != SPEED_1000 && scctx->isc_capenable & IFCAP_TSO) {
- device_printf(sc->dev, "Disabling TSO for 10/100 Ethernet.\n");
+ sc->link_speed != SPEED_1000 &&
+ scctx->isc_capenable & IFCAP_TSO) {
+ device_printf(sc->dev,
+ "Disabling TSO for 10/100 Ethernet.\n");
sc->tso_automasked = scctx->isc_capenable & IFCAP_TSO;
scctx->isc_capenable &= ~IFCAP_TSO;
if_setcapenablebit(ifp, 0, IFCAP_TSO);
@@ -4055,10 +4483,9 @@ em_enable_wakeup(if_ctx_t ctx)
device_t dev = iflib_get_dev(ctx);
if_t ifp = iflib_get_ifp(ctx);
int error = 0;
- u32 pmc, ctrl, ctrl_ext, rctl;
- u16 status;
+ u32 ctrl, ctrl_ext, rctl;
- if (pci_find_cap(dev, PCIY_PMG, &pmc) != 0)
+ if (!pci_has_pm(dev))
return;
/*
@@ -4115,11 +4542,8 @@ em_enable_wakeup(if_ctx_t ctx)
e1000_igp3_phy_powerdown_workaround_ich8lan(&sc->hw);
pme:
- status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
- status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if (!error && (if_getcapenable(ifp) & IFCAP_WOL))
- status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
+ pci_enable_pme(dev);
return;
}
@@ -4251,118 +4675,176 @@ em_disable_aspm(struct e1000_softc *sc)
static void
em_update_stats_counters(struct e1000_softc *sc)
{
- u64 prev_xoffrxc = sc->stats.xoffrxc;
+ struct e1000_hw_stats *stats;
+ u64 prev_xoffrxc;
+
+ if (sc->vf_ifp) {
+ em_update_vf_stats_counters(sc);
+ return;
+ }
+
+ stats = &sc->ustats.stats;
+ prev_xoffrxc = stats->xoffrxc;
if(sc->hw.phy.media_type == e1000_media_type_copper ||
(E1000_READ_REG(&sc->hw, E1000_STATUS) & E1000_STATUS_LU)) {
- sc->stats.symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS);
- sc->stats.sec += E1000_READ_REG(&sc->hw, E1000_SEC);
- }
- sc->stats.crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS);
- sc->stats.mpc += E1000_READ_REG(&sc->hw, E1000_MPC);
- sc->stats.scc += E1000_READ_REG(&sc->hw, E1000_SCC);
- sc->stats.ecol += E1000_READ_REG(&sc->hw, E1000_ECOL);
-
- sc->stats.mcc += E1000_READ_REG(&sc->hw, E1000_MCC);
- sc->stats.latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL);
- sc->stats.colc += E1000_READ_REG(&sc->hw, E1000_COLC);
- sc->stats.dc += E1000_READ_REG(&sc->hw, E1000_DC);
- sc->stats.rlec += E1000_READ_REG(&sc->hw, E1000_RLEC);
- sc->stats.xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC);
- sc->stats.xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC);
- sc->stats.xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC);
+ stats->symerrs += E1000_READ_REG(&sc->hw, E1000_SYMERRS);
+ stats->sec += E1000_READ_REG(&sc->hw, E1000_SEC);
+ }
+ stats->crcerrs += E1000_READ_REG(&sc->hw, E1000_CRCERRS);
+ stats->mpc += E1000_READ_REG(&sc->hw, E1000_MPC);
+ stats->scc += E1000_READ_REG(&sc->hw, E1000_SCC);
+ stats->ecol += E1000_READ_REG(&sc->hw, E1000_ECOL);
+
+ stats->mcc += E1000_READ_REG(&sc->hw, E1000_MCC);
+ stats->latecol += E1000_READ_REG(&sc->hw, E1000_LATECOL);
+ stats->colc += E1000_READ_REG(&sc->hw, E1000_COLC);
+ stats->dc += E1000_READ_REG(&sc->hw, E1000_DC);
+ stats->rlec += E1000_READ_REG(&sc->hw, E1000_RLEC);
+ stats->xonrxc += E1000_READ_REG(&sc->hw, E1000_XONRXC);
+ stats->xontxc += E1000_READ_REG(&sc->hw, E1000_XONTXC);
+ stats->xoffrxc += E1000_READ_REG(&sc->hw, E1000_XOFFRXC);
/*
** For watchdog management we need to know if we have been
** paused during the last interval, so capture that here.
*/
- if (sc->stats.xoffrxc != prev_xoffrxc)
+ if (stats->xoffrxc != prev_xoffrxc)
sc->shared->isc_pause_frames = 1;
- sc->stats.xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC);
- sc->stats.fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC);
- sc->stats.prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64);
- sc->stats.prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127);
- sc->stats.prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255);
- sc->stats.prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511);
- sc->stats.prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023);
- sc->stats.prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522);
- sc->stats.gprc += E1000_READ_REG(&sc->hw, E1000_GPRC);
- sc->stats.bprc += E1000_READ_REG(&sc->hw, E1000_BPRC);
- sc->stats.mprc += E1000_READ_REG(&sc->hw, E1000_MPRC);
- sc->stats.gptc += E1000_READ_REG(&sc->hw, E1000_GPTC);
+ stats->xofftxc += E1000_READ_REG(&sc->hw, E1000_XOFFTXC);
+ stats->fcruc += E1000_READ_REG(&sc->hw, E1000_FCRUC);
+ stats->prc64 += E1000_READ_REG(&sc->hw, E1000_PRC64);
+ stats->prc127 += E1000_READ_REG(&sc->hw, E1000_PRC127);
+ stats->prc255 += E1000_READ_REG(&sc->hw, E1000_PRC255);
+ stats->prc511 += E1000_READ_REG(&sc->hw, E1000_PRC511);
+ stats->prc1023 += E1000_READ_REG(&sc->hw, E1000_PRC1023);
+ stats->prc1522 += E1000_READ_REG(&sc->hw, E1000_PRC1522);
+ stats->gprc += E1000_READ_REG(&sc->hw, E1000_GPRC);
+ stats->bprc += E1000_READ_REG(&sc->hw, E1000_BPRC);
+ stats->mprc += E1000_READ_REG(&sc->hw, E1000_MPRC);
+ stats->gptc += E1000_READ_REG(&sc->hw, E1000_GPTC);
/* For the 64-bit byte counters the low dword must be read first. */
/* Both registers clear on the read of the high dword */
- sc->stats.gorc += E1000_READ_REG(&sc->hw, E1000_GORCL) +
+ stats->gorc += E1000_READ_REG(&sc->hw, E1000_GORCL) +
((u64)E1000_READ_REG(&sc->hw, E1000_GORCH) << 32);
- sc->stats.gotc += E1000_READ_REG(&sc->hw, E1000_GOTCL) +
+ stats->gotc += E1000_READ_REG(&sc->hw, E1000_GOTCL) +
((u64)E1000_READ_REG(&sc->hw, E1000_GOTCH) << 32);
- sc->stats.rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC);
- sc->stats.ruc += E1000_READ_REG(&sc->hw, E1000_RUC);
- sc->stats.rfc += E1000_READ_REG(&sc->hw, E1000_RFC);
- sc->stats.roc += E1000_READ_REG(&sc->hw, E1000_ROC);
- sc->stats.rjc += E1000_READ_REG(&sc->hw, E1000_RJC);
-
- sc->stats.tor += E1000_READ_REG(&sc->hw, E1000_TORH);
- sc->stats.tot += E1000_READ_REG(&sc->hw, E1000_TOTH);
-
- sc->stats.tpr += E1000_READ_REG(&sc->hw, E1000_TPR);
- sc->stats.tpt += E1000_READ_REG(&sc->hw, E1000_TPT);
- sc->stats.ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64);
- sc->stats.ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127);
- sc->stats.ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255);
- sc->stats.ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511);
- sc->stats.ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023);
- sc->stats.ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522);
- sc->stats.mptc += E1000_READ_REG(&sc->hw, E1000_MPTC);
- sc->stats.bptc += E1000_READ_REG(&sc->hw, E1000_BPTC);
+ stats->rnbc += E1000_READ_REG(&sc->hw, E1000_RNBC);
+ stats->ruc += E1000_READ_REG(&sc->hw, E1000_RUC);
+ stats->rfc += E1000_READ_REG(&sc->hw, E1000_RFC);
+ stats->roc += E1000_READ_REG(&sc->hw, E1000_ROC);
+ stats->rjc += E1000_READ_REG(&sc->hw, E1000_RJC);
+
+ stats->mgprc += E1000_READ_REG(&sc->hw, E1000_MGTPRC);
+ stats->mgpdc += E1000_READ_REG(&sc->hw, E1000_MGTPDC);
+ stats->mgptc += E1000_READ_REG(&sc->hw, E1000_MGTPTC);
+
+ stats->tor += E1000_READ_REG(&sc->hw, E1000_TORH);
+ stats->tot += E1000_READ_REG(&sc->hw, E1000_TOTH);
+
+ stats->tpr += E1000_READ_REG(&sc->hw, E1000_TPR);
+ stats->tpt += E1000_READ_REG(&sc->hw, E1000_TPT);
+ stats->ptc64 += E1000_READ_REG(&sc->hw, E1000_PTC64);
+ stats->ptc127 += E1000_READ_REG(&sc->hw, E1000_PTC127);
+ stats->ptc255 += E1000_READ_REG(&sc->hw, E1000_PTC255);
+ stats->ptc511 += E1000_READ_REG(&sc->hw, E1000_PTC511);
+ stats->ptc1023 += E1000_READ_REG(&sc->hw, E1000_PTC1023);
+ stats->ptc1522 += E1000_READ_REG(&sc->hw, E1000_PTC1522);
+ stats->mptc += E1000_READ_REG(&sc->hw, E1000_MPTC);
+ stats->bptc += E1000_READ_REG(&sc->hw, E1000_BPTC);
/* Interrupt Counts */
- sc->stats.iac += E1000_READ_REG(&sc->hw, E1000_IAC);
- sc->stats.icrxptc += E1000_READ_REG(&sc->hw, E1000_ICRXPTC);
- sc->stats.icrxatc += E1000_READ_REG(&sc->hw, E1000_ICRXATC);
- sc->stats.ictxptc += E1000_READ_REG(&sc->hw, E1000_ICTXPTC);
- sc->stats.ictxatc += E1000_READ_REG(&sc->hw, E1000_ICTXATC);
- sc->stats.ictxqec += E1000_READ_REG(&sc->hw, E1000_ICTXQEC);
- sc->stats.ictxqmtc += E1000_READ_REG(&sc->hw, E1000_ICTXQMTC);
- sc->stats.icrxdmtc += E1000_READ_REG(&sc->hw, E1000_ICRXDMTC);
- sc->stats.icrxoc += E1000_READ_REG(&sc->hw, E1000_ICRXOC);
+ stats->iac += E1000_READ_REG(&sc->hw, E1000_IAC);
+ stats->icrxptc += E1000_READ_REG(&sc->hw, E1000_ICRXPTC);
+ stats->icrxatc += E1000_READ_REG(&sc->hw, E1000_ICRXATC);
+ stats->ictxptc += E1000_READ_REG(&sc->hw, E1000_ICTXPTC);
+ stats->ictxatc += E1000_READ_REG(&sc->hw, E1000_ICTXATC);
+ stats->ictxqec += E1000_READ_REG(&sc->hw, E1000_ICTXQEC);
+ stats->ictxqmtc += E1000_READ_REG(&sc->hw, E1000_ICTXQMTC);
+ stats->icrxdmtc += E1000_READ_REG(&sc->hw, E1000_ICRXDMTC);
+ stats->icrxoc += E1000_READ_REG(&sc->hw, E1000_ICRXOC);
if (sc->hw.mac.type >= e1000_82543) {
- sc->stats.algnerrc +=
+ stats->algnerrc +=
E1000_READ_REG(&sc->hw, E1000_ALGNERRC);
- sc->stats.rxerrc +=
+ stats->rxerrc +=
E1000_READ_REG(&sc->hw, E1000_RXERRC);
- sc->stats.tncrs +=
+ stats->tncrs +=
E1000_READ_REG(&sc->hw, E1000_TNCRS);
- sc->stats.cexterr +=
+ stats->cexterr +=
E1000_READ_REG(&sc->hw, E1000_CEXTERR);
- sc->stats.tsctc +=
+ stats->tsctc +=
E1000_READ_REG(&sc->hw, E1000_TSCTC);
- sc->stats.tsctfc +=
+ stats->tsctfc +=
E1000_READ_REG(&sc->hw, E1000_TSCTFC);
}
}
+static void
+em_update_vf_stats_counters(struct e1000_softc *sc)
+{
+ struct e1000_vf_stats *stats;
+
+ if (sc->link_speed == 0)
+ return;
+
+ stats = &sc->ustats.vf_stats;
+
+ UPDATE_VF_REG(E1000_VFGPRC,
+ stats->last_gprc, stats->gprc);
+ UPDATE_VF_REG(E1000_VFGORC,
+ stats->last_gorc, stats->gorc);
+ UPDATE_VF_REG(E1000_VFGPTC,
+ stats->last_gptc, stats->gptc);
+ UPDATE_VF_REG(E1000_VFGOTC,
+ stats->last_gotc, stats->gotc);
+ UPDATE_VF_REG(E1000_VFMPRC,
+ stats->last_mprc, stats->mprc);
+}
+
+static uint64_t
+em_if_get_vf_counter(if_ctx_t ctx, ift_counter cnt)
+{
+ struct e1000_softc *sc = iflib_get_softc(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
+
+ switch (cnt) {
+ case IFCOUNTER_IERRORS:
+ return sc->dropped_pkts;
+ case IFCOUNTER_OERRORS:
+ return (if_get_counter_default(ifp, cnt) +
+ sc->watchdog_events);
+ default:
+ return (if_get_counter_default(ifp, cnt));
+ }
+}
+
static uint64_t
em_if_get_counter(if_ctx_t ctx, ift_counter cnt)
{
struct e1000_softc *sc = iflib_get_softc(ctx);
+ struct e1000_hw_stats *stats;
if_t ifp = iflib_get_ifp(ctx);
+ if (sc->vf_ifp)
+ return (em_if_get_vf_counter(ctx, cnt));
+
+ stats = &sc->ustats.stats;
+
switch (cnt) {
case IFCOUNTER_COLLISIONS:
- return (sc->stats.colc);
+ return (stats->colc);
case IFCOUNTER_IERRORS:
- return (sc->dropped_pkts + sc->stats.rxerrc +
- sc->stats.crcerrs + sc->stats.algnerrc +
- sc->stats.ruc + sc->stats.roc +
- sc->stats.mpc + sc->stats.cexterr);
+ return (sc->dropped_pkts + stats->rxerrc +
+ stats->crcerrs + stats->algnerrc +
+ stats->ruc + stats->roc +
+ stats->mpc + stats->cexterr);
case IFCOUNTER_OERRORS:
- return (sc->stats.ecol + sc->stats.latecol +
- sc->watchdog_events);
+ return (if_get_counter_default(ifp, cnt) +
+ stats->ecol + stats->latecol + sc->watchdog_events);
default:
return (if_get_counter_default(ifp, cnt));
}
@@ -4398,6 +4880,58 @@ em_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
return (sysctl_handle_int(oidp, &val, 0, req));
}
+/* Per queue holdoff interrupt rate handler */
+static int
+em_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct em_rx_queue *rque;
+ struct em_tx_queue *tque;
+ struct e1000_hw *hw;
+ int error;
+ u32 reg, usec, rate;
+
+ bool tx = oidp->oid_arg2;
+
+ if (tx) {
+ tque = oidp->oid_arg1;
+ hw = &tque->sc->hw;
+ if (hw->mac.type >= igb_mac_min)
+ reg = E1000_READ_REG(hw, E1000_EITR(tque->me));
+ else if (hw->mac.type == e1000_82574 && tque->msix)
+ reg = E1000_READ_REG(hw, E1000_EITR_82574(tque->me));
+ else
+ reg = E1000_READ_REG(hw, E1000_ITR);
+ } else {
+ rque = oidp->oid_arg1;
+ hw = &rque->sc->hw;
+ if (hw->mac.type >= igb_mac_min)
+ reg = E1000_READ_REG(hw, E1000_EITR(rque->msix));
+ else if (hw->mac.type == e1000_82574 && rque->msix)
+ reg = E1000_READ_REG(hw,
+ E1000_EITR_82574(rque->msix));
+ else
+ reg = E1000_READ_REG(hw, E1000_ITR);
+ }
+
+ if (hw->mac.type < igb_mac_min) {
+ if (reg > 0)
+ rate = EM_INTS_TO_ITR(reg);
+ else
+ rate = 0;
+ } else {
+ usec = (reg & IGB_QVECTOR_MASK);
+ if (usec > 0)
+ rate = IGB_INTS_TO_EITR(usec);
+ else
+ rate = 0;
+ }
+
+ error = sysctl_handle_int(oidp, &rate, 0, req);
+ if (error || !req->newptr)
+ return error;
+ return 0;
+}
+
/*
* Add sysctl variables, one per statistic, to the system.
*/
@@ -4411,7 +4945,7 @@ em_add_hw_stats(struct e1000_softc *sc)
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
- struct e1000_hw_stats *stats = &sc->stats;
+ struct e1000_hw_stats *stats;
struct sysctl_oid *stat_node, *queue_node, *int_node;
struct sysctl_oid_list *stat_list, *queue_list, *int_list;
@@ -4421,31 +4955,31 @@ em_add_hw_stats(struct e1000_softc *sc)
/* Driver Statistics */
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
- CTLFLAG_RD, &sc->dropped_pkts,
- "Driver dropped packets");
+ CTLFLAG_RD, &sc->dropped_pkts,
+ "Driver dropped packets");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
- CTLFLAG_RD, &sc->link_irq,
- "Link MSI-X IRQ Handled");
+ CTLFLAG_RD, &sc->link_irq,
+ "Link MSI-X IRQ Handled");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
- CTLFLAG_RD, &sc->rx_overruns,
- "RX overruns");
+ CTLFLAG_RD, &sc->rx_overruns,
+ "RX overruns");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
- CTLFLAG_RD, &sc->watchdog_events,
- "Watchdog timeouts");
+ CTLFLAG_RD, &sc->watchdog_events,
+ "Watchdog timeouts");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
+ CTLTYPE_UINT | CTLFLAG_RD,
sc, E1000_CTRL, em_sysctl_reg_handler, "IU",
"Device Control Register");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
+ CTLTYPE_UINT | CTLFLAG_RD,
sc, E1000_RCTL, em_sysctl_reg_handler, "IU",
"Receiver Control Register");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
- CTLFLAG_RD, &sc->hw.fc.high_water, 0,
- "Flow Control High Watermark");
+ CTLFLAG_RD, &sc->hw.fc.high_water, 0,
+ "Flow Control High Watermark");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
- CTLFLAG_RD, &sc->hw.fc.low_water, 0,
- "Flow Control Low Watermark");
+ CTLFLAG_RD, &sc->hw.fc.low_water, 0,
+ "Flow Control Low Watermark");
for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
@@ -4454,17 +4988,22 @@ em_add_hw_stats(struct e1000_softc *sc)
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
+ CTLTYPE_UINT | CTLFLAG_RD, tx_que,
+ true, em_sysctl_interrupt_rate_handler,
+ "IU", "Interrupt Rate");
+
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
+ CTLTYPE_UINT | CTLFLAG_RD, sc,
E1000_TDH(txr->me), em_sysctl_reg_handler, "IU",
"Transmit Descriptor Head");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
+ CTLTYPE_UINT | CTLFLAG_RD, sc,
E1000_TDT(txr->me), em_sysctl_reg_handler, "IU",
"Transmit Descriptor Tail");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq",
- CTLFLAG_RD, &txr->tx_irq,
- "Queue MSI-X Transmit Interrupts");
+ CTLFLAG_RD, &txr->tx_irq,
+ "Queue MSI-X Transmit Interrupts");
}
for (int j = 0; j < sc->rx_num_queues; j++, rx_que++) {
@@ -4474,211 +5013,255 @@ em_add_hw_stats(struct e1000_softc *sc)
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
+ CTLTYPE_UINT | CTLFLAG_RD, rx_que,
+ false, em_sysctl_interrupt_rate_handler,
+ "IU", "Interrupt Rate");
+
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
+ CTLTYPE_UINT | CTLFLAG_RD, sc,
E1000_RDH(rxr->me), em_sysctl_reg_handler, "IU",
"Receive Descriptor Head");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
+ CTLTYPE_UINT | CTLFLAG_RD, sc,
E1000_RDT(rxr->me), em_sysctl_reg_handler, "IU",
"Receive Descriptor Tail");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq",
- CTLFLAG_RD, &rxr->rx_irq,
- "Queue MSI-X Receive Interrupts");
+ CTLFLAG_RD, &rxr->rx_irq,
+ "Queue MSI-X Receive Interrupts");
}
/* MAC stats get their own sub node */
-
stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics");
stat_list = SYSCTL_CHILDREN(stat_node);
+ /*
+ ** VF adapter has a very limited set of stats
+ ** since its not managing the metal, so to speak.
+ */
+ if (sc->vf_ifp) {
+ struct e1000_vf_stats *vfstats = &sc->ustats.vf_stats;
+
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
+ CTLFLAG_RD, &vfstats->gprc,
+ "Good Packets Received");
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
+ CTLFLAG_RD, &vfstats->gptc,
+ "Good Packets Transmitted");
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
+ CTLFLAG_RD, &vfstats->gorc,
+ "Good Octets Received");
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
+ CTLFLAG_RD, &vfstats->gotc,
+ "Good Octets Transmitted");
+ SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
+ CTLFLAG_RD, &vfstats->mprc,
+ "Multicast Packets Received");
+ return;
+ }
+
+ stats = &sc->ustats.stats;
+
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
- CTLFLAG_RD, &stats->ecol,
- "Excessive collisions");
+ CTLFLAG_RD, &stats->ecol,
+ "Excessive collisions");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
- CTLFLAG_RD, &stats->scc,
- "Single collisions");
+ CTLFLAG_RD, &stats->scc,
+ "Single collisions");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
- CTLFLAG_RD, &stats->mcc,
- "Multiple collisions");
+ CTLFLAG_RD, &stats->mcc,
+ "Multiple collisions");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
- CTLFLAG_RD, &stats->latecol,
- "Late collisions");
+ CTLFLAG_RD, &stats->latecol,
+ "Late collisions");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
- CTLFLAG_RD, &stats->colc,
- "Collision Count");
+ CTLFLAG_RD, &stats->colc,
+ "Collision Count");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
- CTLFLAG_RD, &sc->stats.symerrs,
- "Symbol Errors");
+ CTLFLAG_RD, &stats->symerrs,
+ "Symbol Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
- CTLFLAG_RD, &sc->stats.sec,
- "Sequence Errors");
+ CTLFLAG_RD, &stats->sec,
+ "Sequence Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
- CTLFLAG_RD, &sc->stats.dc,
- "Defer Count");
+ CTLFLAG_RD, &stats->dc,
+ "Defer Count");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
- CTLFLAG_RD, &sc->stats.mpc,
- "Missed Packets");
+ CTLFLAG_RD, &stats->mpc,
+ "Missed Packets");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_length_errors",
+ CTLFLAG_RD, &stats->rlec,
+ "Receive Length Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
- CTLFLAG_RD, &sc->stats.rnbc,
- "Receive No Buffers");
+ CTLFLAG_RD, &stats->rnbc,
+ "Receive No Buffers");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
- CTLFLAG_RD, &sc->stats.ruc,
- "Receive Undersize");
+ CTLFLAG_RD, &stats->ruc,
+ "Receive Undersize");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
- CTLFLAG_RD, &sc->stats.rfc,
- "Fragmented Packets Received ");
+ CTLFLAG_RD, &stats->rfc,
+ "Fragmented Packets Received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
- CTLFLAG_RD, &sc->stats.roc,
- "Oversized Packets Received");
+ CTLFLAG_RD, &stats->roc,
+ "Oversized Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
- CTLFLAG_RD, &sc->stats.rjc,
- "Recevied Jabber");
+ CTLFLAG_RD, &stats->rjc,
+ "Recevied Jabber");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
- CTLFLAG_RD, &sc->stats.rxerrc,
- "Receive Errors");
+ CTLFLAG_RD, &stats->rxerrc,
+ "Receive Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
- CTLFLAG_RD, &sc->stats.crcerrs,
- "CRC errors");
+ CTLFLAG_RD, &stats->crcerrs,
+ "CRC errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
- CTLFLAG_RD, &sc->stats.algnerrc,
- "Alignment Errors");
+ CTLFLAG_RD, &stats->algnerrc,
+ "Alignment Errors");
/* On 82575 these are collision counts */
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs",
- CTLFLAG_RD, &sc->stats.cexterr,
- "Collision/Carrier extension errors");
+ CTLFLAG_RD, &stats->cexterr,
+ "Collision/Carrier extension errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
- CTLFLAG_RD, &sc->stats.xonrxc,
- "XON Received");
+ CTLFLAG_RD, &stats->xonrxc,
+ "XON Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
- CTLFLAG_RD, &sc->stats.xontxc,
- "XON Transmitted");
+ CTLFLAG_RD, &stats->xontxc,
+ "XON Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
- CTLFLAG_RD, &sc->stats.xoffrxc,
- "XOFF Received");
+ CTLFLAG_RD, &stats->xoffrxc,
+ "XOFF Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
- CTLFLAG_RD, &sc->stats.xofftxc,
- "XOFF Transmitted");
+ CTLFLAG_RD, &stats->xofftxc,
+ "XOFF Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "unsupported_fc_recvd",
+ CTLFLAG_RD, &stats->fcruc,
+ "Unsupported Flow Control Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_recvd",
+ CTLFLAG_RD, &stats->mgprc,
+ "Management Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_drop",
+ CTLFLAG_RD, &stats->mgpdc,
+ "Management Packets Dropped");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_txd",
+ CTLFLAG_RD, &stats->mgptc,
+ "Management Packets Transmitted");
/* Packet Reception Stats */
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
- CTLFLAG_RD, &sc->stats.tpr,
- "Total Packets Received ");
+ CTLFLAG_RD, &stats->tpr,
+ "Total Packets Received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
- CTLFLAG_RD, &sc->stats.gprc,
- "Good Packets Received");
+ CTLFLAG_RD, &stats->gprc,
+ "Good Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
- CTLFLAG_RD, &sc->stats.bprc,
- "Broadcast Packets Received");
+ CTLFLAG_RD, &stats->bprc,
+ "Broadcast Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
- CTLFLAG_RD, &sc->stats.mprc,
- "Multicast Packets Received");
+ CTLFLAG_RD, &stats->mprc,
+ "Multicast Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
- CTLFLAG_RD, &sc->stats.prc64,
- "64 byte frames received ");
+ CTLFLAG_RD, &stats->prc64,
+ "64 byte frames received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
- CTLFLAG_RD, &sc->stats.prc127,
- "65-127 byte frames received");
+ CTLFLAG_RD, &stats->prc127,
+ "65-127 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
- CTLFLAG_RD, &sc->stats.prc255,
- "128-255 byte frames received");
+ CTLFLAG_RD, &stats->prc255,
+ "128-255 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
- CTLFLAG_RD, &sc->stats.prc511,
- "256-511 byte frames received");
+ CTLFLAG_RD, &stats->prc511,
+ "256-511 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
- CTLFLAG_RD, &sc->stats.prc1023,
- "512-1023 byte frames received");
+ CTLFLAG_RD, &stats->prc1023,
+ "512-1023 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
- CTLFLAG_RD, &sc->stats.prc1522,
- "1023-1522 byte frames received");
+ CTLFLAG_RD, &stats->prc1522,
+ "1023-1522 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
- CTLFLAG_RD, &sc->stats.gorc,
- "Good Octets Received");
+ CTLFLAG_RD, &stats->gorc,
+ "Good Octets Received");
/* Packet Transmission Stats */
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
- CTLFLAG_RD, &sc->stats.gotc,
- "Good Octets Transmitted");
+ CTLFLAG_RD, &stats->gotc,
+ "Good Octets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
- CTLFLAG_RD, &sc->stats.tpt,
- "Total Packets Transmitted");
+ CTLFLAG_RD, &stats->tpt,
+ "Total Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
- CTLFLAG_RD, &sc->stats.gptc,
- "Good Packets Transmitted");
+ CTLFLAG_RD, &stats->gptc,
+ "Good Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
- CTLFLAG_RD, &sc->stats.bptc,
- "Broadcast Packets Transmitted");
+ CTLFLAG_RD, &stats->bptc,
+ "Broadcast Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
- CTLFLAG_RD, &sc->stats.mptc,
- "Multicast Packets Transmitted");
+ CTLFLAG_RD, &stats->mptc,
+ "Multicast Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
- CTLFLAG_RD, &sc->stats.ptc64,
- "64 byte frames transmitted ");
+ CTLFLAG_RD, &stats->ptc64,
+ "64 byte frames transmitted ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
- CTLFLAG_RD, &sc->stats.ptc127,
- "65-127 byte frames transmitted");
+ CTLFLAG_RD, &stats->ptc127,
+ "65-127 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
- CTLFLAG_RD, &sc->stats.ptc255,
- "128-255 byte frames transmitted");
+ CTLFLAG_RD, &stats->ptc255,
+ "128-255 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
- CTLFLAG_RD, &sc->stats.ptc511,
- "256-511 byte frames transmitted");
+ CTLFLAG_RD, &stats->ptc511,
+ "256-511 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
- CTLFLAG_RD, &sc->stats.ptc1023,
- "512-1023 byte frames transmitted");
+ CTLFLAG_RD, &stats->ptc1023,
+ "512-1023 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
- CTLFLAG_RD, &sc->stats.ptc1522,
- "1024-1522 byte frames transmitted");
+ CTLFLAG_RD, &stats->ptc1522,
+ "1024-1522 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
- CTLFLAG_RD, &sc->stats.tsctc,
- "TSO Contexts Transmitted");
+ CTLFLAG_RD, &stats->tsctc,
+ "TSO Contexts Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail",
- CTLFLAG_RD, &sc->stats.tsctfc,
- "TSO Contexts Failed");
-
+ CTLFLAG_RD, &stats->tsctfc,
+ "TSO Contexts Failed");
/* Interrupt Stats */
-
int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Interrupt Statistics");
int_list = SYSCTL_CHILDREN(int_node);
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts",
- CTLFLAG_RD, &sc->stats.iac,
- "Interrupt Assertion Count");
+ CTLFLAG_RD, &stats->iac,
+ "Interrupt Assertion Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer",
- CTLFLAG_RD, &sc->stats.icrxptc,
- "Interrupt Cause Rx Pkt Timer Expire Count");
+ CTLFLAG_RD, &stats->icrxptc,
+ "Interrupt Cause Rx Pkt Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_abs_timer",
- CTLFLAG_RD, &sc->stats.icrxatc,
- "Interrupt Cause Rx Abs Timer Expire Count");
+ CTLFLAG_RD, &stats->icrxatc,
+ "Interrupt Cause Rx Abs Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer",
- CTLFLAG_RD, &sc->stats.ictxptc,
- "Interrupt Cause Tx Pkt Timer Expire Count");
+ CTLFLAG_RD, &stats->ictxptc,
+ "Interrupt Cause Tx Pkt Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_abs_timer",
- CTLFLAG_RD, &sc->stats.ictxatc,
- "Interrupt Cause Tx Abs Timer Expire Count");
+ CTLFLAG_RD, &stats->ictxatc,
+ "Interrupt Cause Tx Abs Timer Expire Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_empty",
- CTLFLAG_RD, &sc->stats.ictxqec,
- "Interrupt Cause Tx Queue Empty Count");
+ CTLFLAG_RD, &stats->ictxqec,
+ "Interrupt Cause Tx Queue Empty Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh",
- CTLFLAG_RD, &sc->stats.ictxqmtc,
- "Interrupt Cause Tx Queue Min Thresh Count");
+ CTLFLAG_RD, &stats->ictxqmtc,
+ "Interrupt Cause Tx Queue Min Thresh Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
- CTLFLAG_RD, &sc->stats.icrxdmtc,
- "Interrupt Cause Rx Desc Min Thresh Count");
+ CTLFLAG_RD, &stats->icrxdmtc,
+ "Interrupt Cause Rx Desc Min Thresh Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_overrun",
- CTLFLAG_RD, &sc->stats.icrxoc,
- "Interrupt Cause Receiver Overrun Count");
+ CTLFLAG_RD, &stats->icrxoc,
+ "Interrupt Cause Receiver Overrun Count");
}
static void
@@ -4690,9 +5273,9 @@ em_fw_version_locked(if_ctx_t ctx)
uint16_t eep = 0;
/*
- * em_fw_version_locked() must run under the IFLIB_CTX_LOCK to meet the
- * NVM locking model, so we do it in em_if_attach_pre() and store the
- * info in the softc
+ * em_fw_version_locked() must run under the IFLIB_CTX_LOCK to meet
+ * the NVM locking model, so we do it in em_if_attach_pre() and store
+ * the info in the softc
*/
ASSERT_CTX_LOCK_HELD(hw);
@@ -4705,8 +5288,8 @@ em_fw_version_locked(if_ctx_t ctx)
e1000_get_fw_version(hw, fw_ver);
} else {
/*
- * Otherwise, EEPROM version should be present on (almost?) all
- * devices here
+ * Otherwise, EEPROM version should be present on (almost?)
+ * all devices here
*/
if(e1000_read_nvm(hw, NVM_VERSION, 1, &eep)) {
INIT_DEBUGOUT("can't get EEPROM version");
@@ -4730,17 +5313,18 @@ em_sbuf_fw_version(struct e1000_fw_version *fw_ver, struct sbuf *buf)
space = " ";
}
- if (fw_ver->invm_major || fw_ver->invm_minor || fw_ver->invm_img_type) {
+ if (fw_ver->invm_major || fw_ver->invm_minor ||
+ fw_ver->invm_img_type) {
sbuf_printf(buf, "%sNVM V%d.%d imgtype%d",
- space, fw_ver->invm_major, fw_ver->invm_minor,
- fw_ver->invm_img_type);
+ space, fw_ver->invm_major, fw_ver->invm_minor,
+ fw_ver->invm_img_type);
space = " ";
}
if (fw_ver->or_valid) {
sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
- space, fw_ver->or_major, fw_ver->or_build,
- fw_ver->or_patch);
+ space, fw_ver->or_major, fw_ver->or_build,
+ fw_ver->or_patch);
space = " ";
}
@@ -4872,8 +5456,6 @@ em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
return (EINVAL);
info->value = usecs;
ticks = EM_USECS_TO_TICKS(usecs);
- if (info->offset == E1000_ITR) /* units are 256ns here */
- ticks *= 4;
sc = info->sc;
@@ -4896,10 +5478,47 @@ em_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
return (0);
}
+static int
+em_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)
+{
+ struct e1000_softc *sc;
+ u32 reg, val, shift;
+ int error, mask;
+
+ sc = oidp->oid_arg1;
+ switch (oidp->oid_arg2) {
+ case 0:
+ reg = E1000_DTXTCPFLGL;
+ shift = 0;
+ break;
+ case 1:
+ reg = E1000_DTXTCPFLGL;
+ shift = 16;
+ break;
+ case 2:
+ reg = E1000_DTXTCPFLGH;
+ shift = 0;
+ break;
+ default:
+ return (EINVAL);
+ break;
+ }
+ val = E1000_READ_REG(&sc->hw, reg);
+ mask = (val >> shift) & 0xfff;
+ error = sysctl_handle_int(oidp, &mask, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (mask < 0 || mask > 0xfff)
+ return (EINVAL);
+ val = (val & ~(0xfff << shift)) | (mask << shift);
+ E1000_WRITE_REG(&sc->hw, reg, val);
+ return (0);
+}
+
static void
em_add_int_delay_sysctl(struct e1000_softc *sc, const char *name,
- const char *description, struct em_int_delay_info *info,
- int offset, int value)
+ const char *description, struct em_int_delay_info *info, int offset,
+ int value)
{
info->sc = sc;
info->offset = offset;
@@ -4923,7 +5542,7 @@ em_set_flowcntl(SYSCTL_HANDLER_ARGS)
{
int error;
static int input = 3; /* default is full */
- struct e1000_softc *sc = (struct e1000_softc *) arg1;
+ struct e1000_softc *sc = (struct e1000_softc *) arg1;
error = sysctl_handle_int(oidp, &input, 0, req);
@@ -4952,6 +5571,55 @@ em_set_flowcntl(SYSCTL_HANDLER_ARGS)
}
/*
+ * Manage DMA Coalesce:
+ * Control values:
+ * 0/1 - off/on
+ * Legal timer values are:
+ * 250,500,1000-10000 in thousands
+ */
+static int
+igb_sysctl_dmac(SYSCTL_HANDLER_ARGS)
+{
+ struct e1000_softc *sc = (struct e1000_softc *) arg1;
+ int error;
+
+ error = sysctl_handle_int(oidp, &sc->dmac, 0, req);
+
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ switch (sc->dmac) {
+ case 0:
+ /* Disabling */
+ break;
+ case 1: /* Just enable and use default */
+ sc->dmac = 1000;
+ break;
+ case 250:
+ case 500:
+ case 1000:
+ case 2000:
+ case 3000:
+ case 4000:
+ case 5000:
+ case 6000:
+ case 7000:
+ case 8000:
+ case 9000:
+ case 10000:
+ /* Legal values - allow */
+ break;
+ default:
+ /* Do nothing, illegal value */
+ sc->dmac = 0;
+ return (EINVAL);
+ }
+ /* Reinit the interface */
+ em_if_init(sc->ctx);
+ return (error);
+}
+
+/*
* Manage Energy Efficient Ethernet:
* Control values:
* 0/1 - enabled/disabled
@@ -4962,11 +5630,17 @@ em_sysctl_eee(SYSCTL_HANDLER_ARGS)
struct e1000_softc *sc = (struct e1000_softc *) arg1;
int error, value;
- value = sc->hw.dev_spec.ich8lan.eee_disable;
+ if (sc->hw.mac.type < igb_mac_min)
+ value = sc->hw.dev_spec.ich8lan.eee_disable;
+ else
+ value = sc->hw.dev_spec._82575.eee_disable;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || req->newptr == NULL)
return (error);
- sc->hw.dev_spec.ich8lan.eee_disable = (value != 0);
+ if (sc->hw.mac.type < igb_mac_min)
+ sc->hw.dev_spec.ich8lan.eee_disable = (value != 0);
+ else
+ sc->hw.dev_spec._82575.eee_disable = (value != 0);
em_if_init(sc->ctx);
return (0);
@@ -5041,15 +5715,15 @@ em_print_debug_info(struct e1000_softc *sc)
for (int i = 0; i < sc->tx_num_queues; i++, txr++) {
device_printf(dev, "TX Queue %d ------\n", i);
device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
- E1000_READ_REG(&sc->hw, E1000_TDH(i)),
- E1000_READ_REG(&sc->hw, E1000_TDT(i)));
+ E1000_READ_REG(&sc->hw, E1000_TDH(i)),
+ E1000_READ_REG(&sc->hw, E1000_TDT(i)));
}
for (int j=0; j < sc->rx_num_queues; j++, rxr++) {
device_printf(dev, "RX Queue %d ------\n", j);
device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
- E1000_READ_REG(&sc->hw, E1000_RDH(j)),
- E1000_READ_REG(&sc->hw, E1000_RDT(j)));
+ E1000_READ_REG(&sc->hw, E1000_RDH(j)),
+ E1000_READ_REG(&sc->hw, E1000_RDT(j)));
}
}
diff --git a/sys/dev/e1000/if_em.h b/sys/dev/e1000/if_em.h
index 244762e8ed0d..582e8d9c6327 100644
--- a/sys/dev/e1000/if_em.h
+++ b/sys/dev/e1000/if_em.h
@@ -1,8 +1,9 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
+ * Copyright (c) 2001-2024, Intel Corporation
* Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org>
- * All rights reserved.
+ * Copyright (c) 2024 Kevin Bowling <kbowling@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -243,9 +244,19 @@
/* Support AutoMediaDetect for Marvell M88 PHY in i354 */
#define IGB_MEDIA_RESET (1 << 0)
-/* Define the starting Interrupt rate per Queue */
-#define IGB_INTS_PER_SEC 8000
-#define IGB_DEFAULT_ITR ((1000000/IGB_INTS_PER_SEC) << 2)
+/* Define the interrupt rates and ITR helpers */
+#define EM_INTS_4K 4000
+#define EM_INTS_20K 20000
+#define EM_INTS_70K 70000
+#define EM_INTS_DEFAULT 8000
+#define EM_INTS_MULTIPLIER 256
+#define EM_ITR_DIVIDEND 1000000000
+#define EM_INTS_TO_ITR(i) (EM_ITR_DIVIDEND/(i * EM_INTS_MULTIPLIER))
+#define IGB_EITR_DIVIDEND 1000000
+#define IGB_EITR_SHIFT 2
+#define IGB_QVECTOR_MASK 0x7FFC
+#define IGB_INTS_TO_EITR(i) (((IGB_EITR_DIVIDEND/i) & IGB_QVECTOR_MASK) << \
+ IGB_EITR_SHIFT)
#define IGB_LINK_ITR 2000
#define I210_LINK_DELAY 1000
@@ -359,6 +370,19 @@
#define EM_NVM_MSIX_N_MASK (0x7 << EM_NVM_MSIX_N_SHIFT)
#define EM_NVM_MSIX_N_SHIFT 7
+/*
+ * VFs use 32-bit counter that rolls over.
+ */
+#define UPDATE_VF_REG(reg, last, cur) \
+do { \
+ u32 new = E1000_READ_REG(&sc->hw, reg); \
+ if (new < last) \
+ cur += 0x100000000LL; \
+ last = new; \
+ cur &= 0xFFFFFFFF00000000LL; \
+ cur |= new; \
+} while (0)
+
struct e1000_softc;
struct em_int_delay_info {
@@ -383,7 +407,11 @@ struct tx_ring {
/* Interrupt resources */
void *tag;
struct resource *res;
+
+ /* Soft stats */
unsigned long tx_irq;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
/* Saved csum offloading context information */
int csum_flags;
@@ -419,6 +447,9 @@ struct rx_ring {
unsigned long rx_discarded;
unsigned long rx_packets;
unsigned long rx_bytes;
+
+ /* Next requested ITR latency */
+ u8 rx_nextlatency;
};
struct em_tx_queue {
@@ -434,6 +465,7 @@ struct em_rx_queue {
u32 me;
u32 msix;
u32 eims;
+ u32 itr_setting;
struct rx_ring rxr;
u64 irqs;
struct if_irq que_irq;
@@ -482,6 +514,7 @@ struct e1000_softc {
u32 rx_mbuf_sz;
+ int enable_aim;
/* Management and WOL features */
u32 wol;
bool has_manage;
@@ -505,6 +538,7 @@ struct e1000_softc {
u16 link_duplex;
u32 smartspeed;
u32 dmac;
+ u32 pba;
int link_mask;
int tso_automasked;
@@ -525,7 +559,11 @@ struct e1000_softc {
unsigned long rx_overruns;
unsigned long watchdog_events;
- struct e1000_hw_stats stats;
+ union {
+ struct e1000_hw_stats stats; /* !sc->vf_ifp */
+ struct e1000_vf_stats vf_stats; /* sc->vf_ifp */
+ } ustats;
+
u16 vf_ifp;
};
diff --git a/sys/dev/e1000/igb_txrx.c b/sys/dev/e1000/igb_txrx.c
index 2819150acba6..568d84807173 100644
--- a/sys/dev/e1000/igb_txrx.c
+++ b/sys/dev/e1000/igb_txrx.c
@@ -42,29 +42,27 @@
/*********************************************************************
* Local Function prototypes
*********************************************************************/
-static int igb_isc_txd_encap(void *arg, if_pkt_info_t pi);
-static void igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
-static int igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
+static int igb_isc_txd_encap(void *, if_pkt_info_t);
+static void igb_isc_txd_flush(void *, uint16_t, qidx_t);
+static int igb_isc_txd_credits_update(void *, uint16_t, bool);
-static void igb_isc_rxd_refill(void *arg, if_rxd_update_t iru);
+static void igb_isc_rxd_refill(void *, if_rxd_update_t);
-static void igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
- qidx_t pidx);
-static int igb_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
- qidx_t budget);
+static void igb_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
+static int igb_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
static int igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
-static int igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi,
- uint32_t *cmd_type_len, uint32_t *olinfo_status);
-static int igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi,
- uint32_t *cmd_type_len, uint32_t *olinfo_status);
+static int igb_tx_ctx_setup(struct tx_ring *, if_pkt_info_t, uint32_t *,
+ uint32_t *);
+static int igb_tso_setup(struct tx_ring *, if_pkt_info_t, uint32_t *,
+ uint32_t *);
-static void igb_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype);
-static int igb_determine_rsstype(uint16_t pkt_info);
+static void igb_rx_checksum(uint32_t, if_rxd_info_t, uint32_t);
+static int igb_determine_rsstype(uint16_t);
-extern void igb_if_enable_intr(if_ctx_t ctx);
-extern int em_intr(void *arg);
+extern void igb_if_enable_intr(if_ctx_t);
+extern int em_intr(void *);
struct if_txrx igb_txrx = {
.ift_txd_encap = igb_isc_txd_encap,
@@ -104,14 +102,15 @@ igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
break;
default:
panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
- __func__, ntohs(pi->ipi_etype));
+ __func__, ntohs(pi->ipi_etype));
break;
}
- TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
+ TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[pi->ipi_pidx];
/* This is used in the transmit desc in encap */
- paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
+ paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen -
+ pi->ipi_tcp_hlen;
/* VLAN MACLEN IPLEN */
if (pi->ipi_mflags & M_VLANTAG) {
@@ -149,8 +148,8 @@ igb_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
*
**********************************************************************/
static int
-igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
- uint32_t *olinfo_status)
+igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi,
+ uint32_t *cmd_type_len, uint32_t *olinfo_status)
{
struct e1000_adv_tx_context_desc *TXD;
struct e1000_softc *sc = txr->sc;
@@ -166,7 +165,7 @@ igb_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
*olinfo_status |= pi->ipi_len << E1000_ADVTXD_PAYLEN_SHIFT;
/* Now ready a context descriptor */
- TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
+ TXD = (struct e1000_adv_tx_context_desc *)&txr->tx_base[pi->ipi_pidx];
/*
** In advanced descriptors the vlan tag must
@@ -248,8 +247,8 @@ igb_isc_txd_encap(void *arg, if_pkt_info_t pi)
pidx_last = olinfo_status = 0;
/* Basic descriptor defines */
- cmd_type_len = (E1000_ADVTXD_DTYP_DATA |
- E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT);
+ cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
+ E1000_ADVTXD_DCMD_DEXT);
if (pi->ipi_mflags & M_VLANTAG)
cmd_type_len |= E1000_ADVTXD_DCMD_VLE;
@@ -292,15 +291,19 @@ igb_isc_txd_encap(void *arg, if_pkt_info_t pi)
txd->read.cmd_type_len |= htole32(E1000_TXD_CMD_EOP | txd_flags);
pi->ipi_new_pidx = i;
+ /* Sent data accounting for AIM */
+ txr->tx_bytes += pi->ipi_len;
+ ++txr->tx_packets;
+
return (0);
}
static void
igb_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
{
- struct e1000_softc *sc = arg;
- struct em_tx_queue *que = &sc->tx_queues[txqid];
- struct tx_ring *txr = &que->txr;
+ struct e1000_softc *sc = arg;
+ struct em_tx_queue *que = &sc->tx_queues[txqid];
+ struct tx_ring *txr = &que->txr;
E1000_WRITE_REG(&sc->hw, E1000_TDT(txr->me), pidx);
}
@@ -349,7 +352,8 @@ igb_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
if (rs_cidx == txr->tx_rs_pidx)
break;
cur = txr->tx_rsq[rs_cidx];
- status = ((union e1000_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
+ status = ((union e1000_adv_tx_desc *)
+ &txr->tx_base[cur])->wb.status;
} while ((status & E1000_TXD_STAT_DD));
txr->tx_rs_cidx = rs_cidx;
@@ -385,7 +389,8 @@ igb_isc_rxd_refill(void *arg, if_rxd_update_t iru)
}
static void
-igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
+igb_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
+ qidx_t pidx)
{
struct e1000_softc *sc = arg;
struct em_rx_queue *que = &sc->rx_queues[rxqid];
@@ -451,7 +456,8 @@ igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
MPASS ((staterr & E1000_RXD_STAT_DD) != 0);
len = le16toh(rxd->wb.upper.length);
- ptype = le32toh(rxd->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK;
+ ptype =
+ le32toh(rxd->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK;
ri->iri_len += len;
rxr->rx_bytes += ri->iri_len;
@@ -460,7 +466,8 @@ igb_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP);
/* Make sure bad packets are discarded */
- if (eop && ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) {
+ if (eop &&
+ ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0)) {
sc->dropped_pkts++;
++rxr->rx_discarded;
return (EBADMSG);
@@ -522,7 +529,8 @@ igb_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
return;
/* If there is a layer 3 or 4 error we are done */
- if (__predict_false(errors & (E1000_RXD_ERR_IPE | E1000_RXD_ERR_TCPE)))
+ if (__predict_false(errors &
+ (E1000_RXD_ERR_IPE | E1000_RXD_ERR_TCPE)))
return;
/* IP Checksum Good */
@@ -533,11 +541,13 @@ igb_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
if (__predict_true(status &
(E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) {
/* SCTP header present */
- if (__predict_false((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
+ if (__predict_false(
+ (ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 &&
(ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0)) {
ri->iri_csum_flags |= CSUM_SCTP_VALID;
} else {
- ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ ri->iri_csum_flags |=
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
ri->iri_csum_data = htons(0xffff);
}
}
diff --git a/sys/dev/efidev/efidev.c b/sys/dev/efidev/efidev.c
index 14712cf3c7bf..18bdaaa234f4 100644
--- a/sys/dev/efidev/efidev.c
+++ b/sys/dev/efidev/efidev.c
@@ -52,12 +52,13 @@ efidev_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t addr,
switch (cmd) {
case EFIIOC_GET_TABLE:
{
- struct efi_get_table_ioc *egtioc =
- (struct efi_get_table_ioc *)addr;
+ struct efi_get_table_ioctl *egtioc =
+ (struct efi_get_table_ioctl *)addr;
void *buf = NULL;
- error = efi_copy_table(&egtioc->uuid, egtioc->buf ? &buf : NULL,
- egtioc->buf_len, &egtioc->table_len);
+ error = efi_copy_table(&egtioc->guid,
+ egtioc->buf != NULL ? &buf : NULL, egtioc->buf_len,
+ &egtioc->table_len);
if (error != 0 || egtioc->buf == NULL)
break;
@@ -89,7 +90,7 @@ efidev_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t addr,
}
case EFIIOC_GET_WAKETIME:
{
- struct efi_waketime_ioc *wt = (struct efi_waketime_ioc *)addr;
+ struct efi_waketime_ioctl *wt = (struct efi_waketime_ioctl *)addr;
error = efi_get_waketime(&wt->enabled, &wt->pending,
&wt->waketime);
@@ -97,14 +98,14 @@ efidev_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t addr,
}
case EFIIOC_SET_WAKETIME:
{
- struct efi_waketime_ioc *wt = (struct efi_waketime_ioc *)addr;
+ struct efi_waketime_ioctl *wt = (struct efi_waketime_ioctl *)addr;
error = efi_set_waketime(wt->enabled, &wt->waketime);
break;
}
case EFIIOC_VAR_GET:
{
- struct efi_var_ioc *ev = (struct efi_var_ioc *)addr;
+ struct efi_var_ioctl *ev = (struct efi_var_ioctl *)addr;
void *data;
efi_char *name;
@@ -140,7 +141,7 @@ vg_out:
}
case EFIIOC_VAR_NEXT:
{
- struct efi_var_ioc *ev = (struct efi_var_ioc *)addr;
+ struct efi_var_ioctl *ev = (struct efi_var_ioctl *)addr;
efi_char *name;
name = malloc(ev->namesize, M_TEMP, M_WAITOK);
@@ -162,7 +163,7 @@ vg_out:
}
case EFIIOC_VAR_SET:
{
- struct efi_var_ioc *ev = (struct efi_var_ioc *)addr;
+ struct efi_var_ioctl *ev = (struct efi_var_ioctl *)addr;
void *data = NULL;
efi_char *name;
diff --git a/sys/dev/efidev/efirt.c b/sys/dev/efidev/efirt.c
index 2d45b4d6ac66..b55c1c191077 100644
--- a/sys/dev/efidev/efirt.c
+++ b/sys/dev/efidev/efirt.c
@@ -107,7 +107,8 @@ static int efi_status2err[25] = {
enum efi_table_type {
TYPE_ESRT = 0,
- TYPE_PROP
+ TYPE_PROP,
+ TYPE_MEMORY_ATTR
};
static int efi_enter(void);
@@ -123,11 +124,20 @@ efi_status_to_errno(efi_status status)
}
static struct mtx efi_lock;
-static SYSCTL_NODE(_hw, OID_AUTO, efi, CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL,
+SYSCTL_NODE(_hw, OID_AUTO, efi, CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL,
"EFI");
static bool efi_poweroff = true;
SYSCTL_BOOL(_hw_efi, OID_AUTO, poweroff, CTLFLAG_RWTUN, &efi_poweroff, 0,
"If true, use EFI runtime services to power off in preference to ACPI");
+extern int print_efirt_faults;
+SYSCTL_INT(_hw_efi, OID_AUTO, print_faults, CTLFLAG_RWTUN,
+ &print_efirt_faults, 0,
+ "Print fault information upon trap from EFIRT calls: "
+ "0 - never, 1 - once, 2 - always");
+extern u_long cnt_efirt_faults;
+SYSCTL_ULONG(_hw_efi, OID_AUTO, total_faults, CTLFLAG_RD,
+ &cnt_efirt_faults, 0,
+ "Total number of faults that occurred during EFIRT calls");
static bool
efi_is_in_map(struct efi_md *map, int ndesc, int descsz, vm_offset_t addr)
@@ -167,7 +177,6 @@ efi_init(void)
struct efi_map_header *efihdr;
struct efi_md *map;
struct efi_rt *rtdm;
- caddr_t kmdp;
size_t efisz;
int ndesc, rt_disabled;
@@ -197,10 +206,7 @@ efi_init(void)
printf("EFI config table is not present\n");
}
- kmdp = preload_search_by_type("elf kernel");
- if (kmdp == NULL)
- kmdp = preload_search_by_type("elf64 kernel");
- efihdr = (struct efi_map_header *)preload_search_info(kmdp,
+ efihdr = (struct efi_map_header *)preload_search_info(preload_kmdp,
MODINFO_METADATA | MODINFOMD_EFI_MAP);
if (efihdr == NULL) {
if (bootverbose)
@@ -309,6 +315,9 @@ efi_enter(void)
fpu_kern_leave(td, NULL);
mtx_unlock(&efi_lock);
PMAP_UNLOCK(curpmap);
+ } else {
+ MPASS((td->td_pflags & TDP_EFIRT) == 0);
+ td->td_pflags |= TDP_EFIRT;
}
return (error);
}
@@ -319,17 +328,20 @@ efi_leave(void)
struct thread *td;
pmap_t curpmap;
+ td = curthread;
+ MPASS((td->td_pflags & TDP_EFIRT) != 0);
+ td->td_pflags &= ~TDP_EFIRT;
+
efi_arch_leave();
curpmap = &curproc->p_vmspace->vm_pmap;
- td = curthread;
fpu_kern_leave(td, NULL);
mtx_unlock(&efi_lock);
PMAP_UNLOCK(curpmap);
}
static int
-get_table(struct uuid *uuid, void **ptr)
+get_table(efi_guid_t *guid, void **ptr)
{
struct efi_cfgtbl *ct;
u_long count;
@@ -343,7 +355,7 @@ get_table(struct uuid *uuid, void **ptr)
count = efi_systbl->st_entries;
ct = efi_cfgtbl;
while (count--) {
- if (!bcmp(&ct->ct_uuid, uuid, sizeof(*uuid))) {
+ if (!bcmp(&ct->ct_guid, guid, sizeof(*guid))) {
*ptr = ct->ct_data;
efi_leave();
return (0);
@@ -362,13 +374,13 @@ get_table_length(enum efi_table_type type, size_t *table_len, void **taddr)
case TYPE_ESRT:
{
struct efi_esrt_table *esrt = NULL;
- struct uuid uuid = EFI_TABLE_ESRT;
+ efi_guid_t guid = EFI_TABLE_ESRT;
uint32_t fw_resource_count = 0;
size_t len = sizeof(*esrt);
int error;
void *buf;
- error = efi_get_table(&uuid, (void **)&esrt);
+ error = efi_get_table(&guid, (void **)&esrt);
if (error != 0)
return (error);
@@ -404,14 +416,14 @@ get_table_length(enum efi_table_type type, size_t *table_len, void **taddr)
}
case TYPE_PROP:
{
- struct uuid uuid = EFI_PROPERTIES_TABLE;
+ efi_guid_t guid = EFI_PROPERTIES_TABLE;
struct efi_prop_table *prop;
size_t len = sizeof(*prop);
uint32_t prop_len;
int error;
void *buf;
- error = efi_get_table(&uuid, (void **)&prop);
+ error = efi_get_table(&guid, (void **)&prop);
if (error != 0)
return (error);
@@ -434,26 +446,63 @@ get_table_length(enum efi_table_type type, size_t *table_len, void **taddr)
free(buf, M_TEMP);
return (0);
}
+ case TYPE_MEMORY_ATTR:
+ {
+ efi_guid_t guid = EFI_MEMORY_ATTRIBUTES_TABLE;
+ struct efi_memory_attribute_table *tbl_addr, *mem_addr;
+ int error;
+ void *buf;
+ size_t len = sizeof(struct efi_memory_attribute_table);
+
+ error = efi_get_table(&guid, (void **)&tbl_addr);
+ if (error)
+ return (error);
+
+ buf = malloc(len, M_TEMP, M_WAITOK);
+ error = physcopyout((vm_paddr_t)tbl_addr, buf, len);
+ if (error) {
+ free(buf, M_TEMP);
+ return (error);
+ }
+
+ mem_addr = (struct efi_memory_attribute_table *)buf;
+ if (mem_addr->version != 2) {
+ free(buf, M_TEMP);
+ return (EINVAL);
+ }
+ len += mem_addr->descriptor_size * mem_addr->num_ents;
+ if (len > EFI_TABLE_ALLOC_MAX) {
+ free(buf, M_TEMP);
+ return (ENOMEM);
+ }
+
+ *table_len = len;
+ if (taddr != NULL)
+ *taddr = tbl_addr;
+ free(buf, M_TEMP);
+ return (0);
+ }
}
return (ENOENT);
}
static int
-copy_table(struct uuid *uuid, void **buf, size_t buf_len, size_t *table_len)
+copy_table(efi_guid_t *guid, void **buf, size_t buf_len, size_t *table_len)
{
static const struct known_table {
- struct uuid uuid;
+ efi_guid_t guid;
enum efi_table_type type;
} tables[] = {
{ EFI_TABLE_ESRT, TYPE_ESRT },
- { EFI_PROPERTIES_TABLE, TYPE_PROP }
+ { EFI_PROPERTIES_TABLE, TYPE_PROP },
+ { EFI_MEMORY_ATTRIBUTES_TABLE, TYPE_MEMORY_ATTR }
};
size_t table_idx;
void *taddr;
int rc;
for (table_idx = 0; table_idx < nitems(tables); table_idx++) {
- if (!bcmp(&tables[table_idx].uuid, uuid, sizeof(*uuid)))
+ if (!bcmp(&tables[table_idx].guid, guid, sizeof(*guid)))
break;
}
@@ -484,31 +533,32 @@ efi_rt_arch_call_nofault(struct efirt_callinfo *ec)
switch (ec->ec_argcnt) {
case 0:
- ec->ec_efi_status = ((register_t (*)(void))ec->ec_fptr)();
+ ec->ec_efi_status = ((register_t EFIABI_ATTR (*)(void))
+ ec->ec_fptr)();
break;
case 1:
- ec->ec_efi_status = ((register_t (*)(register_t))ec->ec_fptr)
- (ec->ec_arg1);
+ ec->ec_efi_status = ((register_t EFIABI_ATTR (*)(register_t))
+ ec->ec_fptr)(ec->ec_arg1);
break;
case 2:
- ec->ec_efi_status = ((register_t (*)(register_t, register_t))
- ec->ec_fptr)(ec->ec_arg1, ec->ec_arg2);
+ ec->ec_efi_status = ((register_t EFIABI_ATTR (*)(register_t,
+ register_t))ec->ec_fptr)(ec->ec_arg1, ec->ec_arg2);
break;
case 3:
- ec->ec_efi_status = ((register_t (*)(register_t, register_t,
- register_t))ec->ec_fptr)(ec->ec_arg1, ec->ec_arg2,
- ec->ec_arg3);
+ ec->ec_efi_status = ((register_t EFIABI_ATTR (*)(register_t,
+ register_t, register_t))ec->ec_fptr)(ec->ec_arg1,
+ ec->ec_arg2, ec->ec_arg3);
break;
case 4:
- ec->ec_efi_status = ((register_t (*)(register_t, register_t,
- register_t, register_t))ec->ec_fptr)(ec->ec_arg1,
- ec->ec_arg2, ec->ec_arg3, ec->ec_arg4);
+ ec->ec_efi_status = ((register_t EFIABI_ATTR (*)(register_t,
+ register_t, register_t, register_t))ec->ec_fptr)(
+ ec->ec_arg1, ec->ec_arg2, ec->ec_arg3, ec->ec_arg4);
break;
case 5:
- ec->ec_efi_status = ((register_t (*)(register_t, register_t,
- register_t, register_t, register_t))ec->ec_fptr)(
- ec->ec_arg1, ec->ec_arg2, ec->ec_arg3, ec->ec_arg4,
- ec->ec_arg5);
+ ec->ec_efi_status = ((register_t EFIABI_ATTR (*)(register_t,
+ register_t, register_t, register_t, register_t))
+ ec->ec_fptr)(ec->ec_arg1, ec->ec_arg2, ec->ec_arg3,
+ ec->ec_arg4, ec->ec_arg5);
break;
default:
panic("efi_rt_arch_call: %d args", (int)ec->ec_argcnt);
@@ -718,7 +768,7 @@ set_time(struct efi_tm *tm)
}
static int
-var_get(efi_char *name, struct uuid *vendor, uint32_t *attrib,
+var_get(efi_char *name, efi_guid_t *vendor, uint32_t *attrib,
size_t *datasize, void *data)
{
struct efirt_callinfo ec;
@@ -742,7 +792,7 @@ var_get(efi_char *name, struct uuid *vendor, uint32_t *attrib,
}
static int
-var_nextname(size_t *namesize, efi_char *name, struct uuid *vendor)
+var_nextname(size_t *namesize, efi_char *name, efi_guid_t *vendor)
{
struct efirt_callinfo ec;
int error;
@@ -763,7 +813,7 @@ var_nextname(size_t *namesize, efi_char *name, struct uuid *vendor)
}
static int
-var_set(efi_char *name, struct uuid *vendor, uint32_t attrib,
+var_set(efi_char *name, efi_guid_t *vendor, uint32_t attrib,
size_t datasize, void *data)
{
struct efirt_callinfo ec;
diff --git a/sys/dev/efidev/efirtc.c b/sys/dev/efidev/efirtc.c
index a7baff673c1c..69d2c0b1af9f 100644
--- a/sys/dev/efidev/efirtc.c
+++ b/sys/dev/efidev/efirtc.c
@@ -52,9 +52,9 @@ efirtc_identify(driver_t *driver, device_t parent)
/* Don't add the driver unless we have working runtime services. */
if (efi_rt_ok() != 0)
return;
- if (device_find_child(parent, "efirtc", -1) != NULL)
+ if (device_find_child(parent, "efirtc", DEVICE_UNIT_ANY) != NULL)
return;
- if (BUS_ADD_CHILD(parent, 0, "efirtc", -1) == NULL)
+ if (BUS_ADD_CHILD(parent, 0, "efirtc", DEVICE_UNIT_ANY) == NULL)
device_printf(parent, "add child failed\n");
}
diff --git a/sys/dev/ena/ena.c b/sys/dev/ena/ena.c
index 3ff32cc9966c..af158b5aea1d 100644
--- a/sys/dev/ena/ena.c
+++ b/sys/dev/ena/ena.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -148,7 +148,7 @@ static int ena_ioctl(if_t, u_long, caddr_t);
static int ena_get_dev_offloads(struct ena_com_dev_get_features_ctx *);
static void ena_update_host_info(struct ena_admin_host_info *, if_t);
static void ena_update_hwassist(struct ena_adapter *);
-static int ena_setup_ifnet(device_t, struct ena_adapter *,
+static void ena_setup_ifnet(device_t, struct ena_adapter *,
struct ena_com_dev_get_features_ctx *);
static int ena_enable_wc(device_t, struct resource *);
static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *,
@@ -156,7 +156,7 @@ static int ena_set_queues_placement_policy(device_t, struct ena_com_dev *,
static int ena_map_llq_mem_bar(device_t, struct ena_com_dev *);
static uint32_t ena_calc_max_io_queue_num(device_t, struct ena_com_dev *,
struct ena_com_dev_get_features_ctx *);
-static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *);
+static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *, struct ena_adapter *);
static void ena_config_host_info(struct ena_com_dev *, device_t);
static int ena_attach(device_t);
static int ena_detach(device_t);
@@ -169,6 +169,11 @@ static int ena_copy_eni_metrics(struct ena_adapter *);
static int ena_copy_srd_metrics(struct ena_adapter *);
static int ena_copy_customer_metrics(struct ena_adapter *);
static void ena_timer_service(void *);
+static enum ena_regs_reset_reason_types check_cdesc_in_tx_cq(struct ena_adapter *,
+ struct ena_ring *);
+#ifdef DEV_NETMAP
+static int ena_reinit_netmap(struct ena_adapter *adapter);
+#endif
static char ena_version[] = ENA_DEVICE_NAME ENA_DRV_MODULE_NAME
" v" ENA_DRV_MODULE_VERSION;
@@ -560,6 +565,32 @@ ena_free_rx_dma_tag(struct ena_adapter *adapter)
return (ret);
}
+int
+validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id, int tx_req_id_rc)
+{
+ struct ena_adapter *adapter = tx_ring->adapter;
+ enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
+
+ if (unlikely(tx_req_id_rc != 0)) {
+ if (tx_req_id_rc == ENA_COM_FAULT) {
+ reset_reason = ENA_REGS_RESET_TX_DESCRIPTOR_MALFORMED;
+ ena_log(adapter->pdev, ERR,
+ "TX descriptor malformed. req_id %hu qid %hu\n",
+ req_id, tx_ring->qid);
+ } else if (tx_req_id_rc == ENA_COM_INVAL) {
+ ena_log_nm(adapter->pdev, WARN,
+ "Invalid req_id %hu in qid %hu\n",
+ req_id, tx_ring->qid);
+ counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
+ }
+
+ ena_trigger_reset(adapter, reset_reason);
+ return (EFAULT);
+ }
+
+ return (0);
+}
+
static void
ena_release_all_tx_dmamap(struct ena_ring *tx_ring)
{
@@ -1133,6 +1164,21 @@ ena_refill_rx_bufs(struct ena_ring *rx_ring, uint32_t num)
return (i);
}
+#ifdef DEV_NETMAP
+static int
+ena_reinit_netmap(struct ena_adapter *adapter)
+{
+ int rc;
+
+ netmap_detach(adapter->ifp);
+ rc = ena_netmap_attach(adapter);
+ if (rc != 0)
+ ena_log(adapter->pdev, ERR, "netmap attach failed: %d\n", rc);
+
+ return rc;
+}
+
+#endif /* DEV_NETMAP */
int
ena_update_buf_ring_size(struct ena_adapter *adapter,
uint32_t new_buf_ring_size)
@@ -1150,6 +1196,12 @@ ena_update_buf_ring_size(struct ena_adapter *adapter,
/* Reconfigure buf ring for all Tx rings. */
ena_free_all_io_rings_resources(adapter);
ena_init_io_rings_advanced(adapter);
+#ifdef DEV_NETMAP
+ rc = ena_reinit_netmap(adapter);
+ if (rc != 0)
+ return rc;
+
+#endif /* DEV_NETMAP */
if (dev_was_up) {
/*
* If ena_up() fails, it's not because of recent buf_ring size
@@ -1167,7 +1219,12 @@ ena_update_buf_ring_size(struct ena_adapter *adapter,
adapter->buf_ring_size = old_buf_ring_size;
ena_free_all_io_rings_resources(adapter);
ena_init_io_rings_advanced(adapter);
+#ifdef DEV_NETMAP
+ rc = ena_reinit_netmap(adapter);
+ if (rc != 0)
+ return rc;
+#endif /* DEV_NETMAP */
ENA_FLAG_SET_ATOMIC(ENA_FLAG_DEV_UP_BEFORE_RESET,
adapter);
ena_trigger_reset(adapter, ENA_REGS_RESET_OS_TRIGGER);
@@ -1195,6 +1252,12 @@ ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size,
/* Configure queues with new size. */
ena_init_io_rings_basic(adapter);
+#ifdef DEV_NETMAP
+ rc = ena_reinit_netmap(adapter);
+ if (rc != 0)
+ return rc;
+
+#endif /* DEV_NETMAP */
if (dev_was_up) {
rc = ena_up(adapter);
if (unlikely(rc != 0)) {
@@ -1206,7 +1269,12 @@ ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size,
adapter->requested_tx_ring_size = old_tx_size;
adapter->requested_rx_ring_size = old_rx_size;
ena_init_io_rings_basic(adapter);
+#ifdef DEV_NETMAP
+ rc = ena_reinit_netmap(adapter);
+ if (rc != 0)
+ return rc;
+#endif /* DEV_NETMAP */
/* And try again. */
rc = ena_up(adapter);
if (unlikely(rc != 0)) {
@@ -1330,7 +1398,12 @@ ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num)
ena_down(adapter);
ena_update_io_rings(adapter, new_num);
+#ifdef DEV_NETMAP
+ rc = ena_reinit_netmap(adapter);
+ if (rc != 0)
+ return rc;
+#endif /* DEV_NETMAP */
if (dev_was_up) {
rc = ena_up(adapter);
if (unlikely(rc != 0)) {
@@ -1340,7 +1413,12 @@ ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num)
new_num, old_num);
ena_update_io_rings(adapter, old_num);
+#ifdef DEV_NETMAP
+ rc = ena_reinit_netmap(adapter);
+ if (rc != 0)
+ return rc;
+#endif /* DEV_NETMAP */
rc = ena_up(adapter);
if (unlikely(rc != 0)) {
ena_log(adapter->pdev, ERR,
@@ -2462,7 +2540,7 @@ ena_update_hwassist(struct ena_adapter *adapter)
if_sethwassistbits(ifp, flags, 0);
}
-static int
+static void
ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
struct ena_com_dev_get_features_ctx *feat)
{
@@ -2470,10 +2548,6 @@ ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
int caps = 0;
ifp = adapter->ifp = if_gethandle(IFT_ETHER);
- if (unlikely(ifp == NULL)) {
- ena_log(pdev, ERR, "can not allocate ifnet structure\n");
- return (ENXIO);
- }
if_initname(ifp, device_get_name(pdev), device_get_unit(pdev));
if_setdev(ifp, pdev);
if_setsoftc(ifp, adapter);
@@ -2516,8 +2590,6 @@ ena_setup_ifnet(device_t pdev, struct ena_adapter *adapter,
ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO);
ether_ifattach(ifp, adapter->mac_addr);
-
- return (0);
}
void
@@ -2684,27 +2756,51 @@ ena_map_llq_mem_bar(device_t pdev, struct ena_com_dev *ena_dev)
}
static inline void
-set_default_llq_configurations(struct ena_llq_configurations *llq_config,
- struct ena_admin_feature_llq_desc *llq)
+ena_set_llq_configurations(struct ena_llq_configurations *llq_config,
+ struct ena_admin_feature_llq_desc *llq, struct ena_adapter *adapter)
{
+ bool use_large_llq;
+
llq_config->llq_header_location = ENA_ADMIN_INLINE_HEADER;
llq_config->llq_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
llq_config->llq_num_decs_before_header =
ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
- if ((llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B) !=
- 0 && ena_force_large_llq_header) {
- llq_config->llq_ring_entry_size =
- ENA_ADMIN_LIST_ENTRY_SIZE_256B;
+
+ switch (ena_force_large_llq_header)
+ {
+ case ENA_LLQ_HEADER_SIZE_POLICY_REGULAR:
+ use_large_llq = false;
+ break;
+ case ENA_LLQ_HEADER_SIZE_POLICY_LARGE:
+ use_large_llq = true;
+ break;
+ case ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT:
+ use_large_llq =
+ (llq->entry_size_recommended == ENA_ADMIN_LIST_ENTRY_SIZE_256B);
+ break;
+ default:
+ use_large_llq = false;
+ ena_log(adapter->pdev, WARN,
+ "force_large_llq_header should have values [0-2]\n");
+ break;
+ }
+
+ if (!(llq->entry_size_ctrl_supported & ENA_ADMIN_LIST_ENTRY_SIZE_256B))
+ use_large_llq = false;
+
+ if (use_large_llq) {
+ llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
llq_config->llq_ring_entry_size_value = 256;
+ adapter->llq_policy = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
} else {
- llq_config->llq_ring_entry_size =
- ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+ llq_config->llq_ring_entry_size = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
llq_config->llq_ring_entry_size_value = 128;
+ adapter->llq_policy = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
}
}
static int
-ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
+ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx, struct ena_adapter *adapter)
{
struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
struct ena_com_dev *ena_dev = ctx->ena_dev;
@@ -2754,30 +2850,34 @@ ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
max_queues->max_packet_rx_descs);
}
- /* round down to the nearest power of 2 */
- max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
- max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
-
- /*
- * When forcing large headers, we multiply the entry size by 2,
- * and therefore divide the queue size by 2, leaving the amount
- * of memory used by the queues unchanged.
- */
- if (ena_force_large_llq_header) {
- if ((llq->entry_size_ctrl_supported &
- ENA_ADMIN_LIST_ENTRY_SIZE_256B) != 0 &&
- ena_dev->tx_mem_queue_type ==
- ENA_ADMIN_PLACEMENT_POLICY_DEV) {
- max_tx_queue_size /= 2;
- ena_log(ctx->pdev, INFO,
- "Forcing large headers and decreasing maximum Tx queue size to %d\n",
- max_tx_queue_size);
+ if (adapter->llq_policy == ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
+ if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+ if (llq->max_wide_llq_depth != max_tx_queue_size) {
+ if (llq->max_wide_llq_depth == 0) {
+ /* if there is no large llq max depth from device, we divide
+ * the queue size by 2, leaving the amount of memory
+ * used by the queues unchanged.
+ */
+ max_tx_queue_size /= 2;
+ } else {
+ max_tx_queue_size = llq->max_wide_llq_depth;
+ }
+ ena_log(ctx->pdev, INFO,
+ "Using large LLQ headers and decreasing maximum Tx queue size to %d\n",
+ max_tx_queue_size);
+ } else {
+ ena_log(ctx->pdev, INFO, "Using large LLQ headers\n");
+ }
} else {
ena_log(ctx->pdev, WARN,
- "Forcing large headers failed: LLQ is disabled or device does not support large headers\n");
+ "Using large headers failed: LLQ is disabled or device does not support large headers\n");
}
}
+ /* round down to the nearest power of 2 */
+ max_tx_queue_size = 1 << (flsl(max_tx_queue_size) - 1);
+ max_rx_queue_size = 1 << (flsl(max_rx_queue_size) - 1);
+
tx_queue_size = clamp_val(tx_queue_size, ENA_MIN_RING_SIZE,
max_tx_queue_size);
rx_queue_size = clamp_val(rx_queue_size, ENA_MIN_RING_SIZE,
@@ -2917,7 +3017,9 @@ ena_device_init(struct ena_adapter *adapter, device_t pdev,
BIT(ENA_ADMIN_FATAL_ERROR) |
BIT(ENA_ADMIN_WARNING) |
BIT(ENA_ADMIN_NOTIFICATION) |
- BIT(ENA_ADMIN_KEEP_ALIVE);
+ BIT(ENA_ADMIN_KEEP_ALIVE) |
+ BIT(ENA_ADMIN_CONF_NOTIFICATIONS) |
+ BIT(ENA_ADMIN_DEVICE_REQUEST_RESET);
aenq_groups &= get_feat_ctx->aenq.supported_groups;
rc = ena_com_set_aenq_config(ena_dev, aenq_groups);
@@ -2928,7 +3030,7 @@ ena_device_init(struct ena_adapter *adapter, device_t pdev,
*wd_active = !!(aenq_groups & BIT(ENA_ADMIN_KEEP_ALIVE));
- set_default_llq_configurations(&llq_config, &get_feat_ctx->llq);
+ ena_set_llq_configurations(&llq_config, &get_feat_ctx->llq, adapter);
rc = ena_set_queues_placement_policy(pdev, ena_dev, &get_feat_ctx->llq,
&llq_config);
@@ -3008,6 +3110,7 @@ static void
check_for_missing_keep_alive(struct ena_adapter *adapter)
{
sbintime_t timestamp, time;
+ enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
if (adapter->wd_active == 0)
return;
@@ -3019,8 +3122,10 @@ check_for_missing_keep_alive(struct ena_adapter *adapter)
time = getsbinuptime() - timestamp;
if (unlikely(time > adapter->keep_alive_timeout)) {
ena_log(adapter->pdev, ERR, "Keep alive watchdog timeout.\n");
- counter_u64_add(adapter->dev_stats.wd_expired, 1);
- ena_trigger_reset(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
+ if (ena_com_aenq_has_keep_alive(adapter->ena_dev))
+ reset_reason = ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT;
+
+ ena_trigger_reset(adapter, reset_reason);
}
}
@@ -3028,11 +3133,15 @@ check_for_missing_keep_alive(struct ena_adapter *adapter)
static void
check_for_admin_com_state(struct ena_adapter *adapter)
{
+ enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_ADMIN_TO;
if (unlikely(ena_com_get_admin_running_state(adapter->ena_dev) == false)) {
ena_log(adapter->pdev, ERR,
"ENA admin queue is not in running state!\n");
counter_u64_add(adapter->dev_stats.admin_q_pause, 1);
- ena_trigger_reset(adapter, ENA_REGS_RESET_ADMIN_TO);
+ if (ena_com_get_missing_admin_interrupt(adapter->ena_dev))
+ reset_reason = ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT;
+
+ ena_trigger_reset(adapter, reset_reason);
}
}
@@ -3060,18 +3169,45 @@ check_for_rx_interrupt_queue(struct ena_adapter *adapter,
return (0);
}
+static enum ena_regs_reset_reason_types
+check_cdesc_in_tx_cq(struct ena_adapter *adapter,
+ struct ena_ring *tx_ring)
+{
+ device_t pdev = adapter->pdev;
+ int rc;
+ u16 req_id;
+
+ rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id);
+ /* TX CQ is empty */
+ if (rc == ENA_COM_TRY_AGAIN) {
+ ena_log(pdev, ERR,
+ "No completion descriptors found in CQ %d\n",
+ tx_ring->qid);
+ return ENA_REGS_RESET_MISS_TX_CMPL;
+ }
+
+ /* TX CQ has cdescs */
+ ena_log(pdev, ERR,
+ "Completion descriptors found in CQ %d",
+ tx_ring->qid);
+
+ return ENA_REGS_RESET_MISS_INTERRUPT;
+}
+
static int
check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
struct ena_ring *tx_ring)
{
+ uint32_t missed_tx = 0, new_missed_tx = 0;
device_t pdev = adapter->pdev;
struct bintime curtime, time;
struct ena_tx_buffer *tx_buf;
int time_since_last_cleanup;
int missing_tx_comp_to;
sbintime_t time_offset;
- uint32_t missed_tx = 0;
int i, rc = 0;
+ enum ena_regs_reset_reason_types reset_reason = ENA_REGS_RESET_MISS_TX_CMPL;
+ bool cleanup_scheduled, cleanup_running;
getbinuptime(&curtime);
@@ -3113,23 +3249,37 @@ check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
"%d msecs have passed since last cleanup. Missing Tx timeout value %d msecs.\n",
tx_ring->qid, i, time_since_last_cleanup,
missing_tx_comp_to);
+ /* Add new TX completions which are missed */
+ new_missed_tx++;
}
tx_buf->print_once = false;
missed_tx++;
}
}
-
+ /* Checking if this TX ring missing TX completions have passed the threshold */
if (unlikely(missed_tx > adapter->missing_tx_threshold)) {
ena_log(pdev, ERR,
"The number of lost tx completion is above the threshold "
"(%d > %d). Reset the device\n",
missed_tx, adapter->missing_tx_threshold);
- ena_trigger_reset(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
+ /* Set the reset flag to prevent ena_cleanup() from running */
+ ENA_FLAG_SET_ATOMIC(ENA_FLAG_TRIGGER_RESET, adapter);
+ /* Need to make sure that ENA_FLAG_TRIGGER_RESET is visible to ena_cleanup() and
+ * that cleanup_running is visible to check_missing_comp_in_tx_queue() to
+ * prevent the case of accessing CQ concurrently with check_cdesc_in_tx_cq()
+ */
+ mb();
+ cleanup_scheduled = !!(atomic_load_16(&tx_ring->que->cleanup_task.ta_pending));
+ cleanup_running = !!(atomic_load_8((&tx_ring->cleanup_running)));
+ if (!(cleanup_scheduled || cleanup_running))
+ reset_reason = check_cdesc_in_tx_cq(adapter, tx_ring);
+
+ adapter->reset_reason = reset_reason;
rc = EIO;
}
-
- counter_u64_add(tx_ring->tx_stats.missing_tx_comp, missed_tx);
+ /* Add the newly discovered missing TX completions */
+ counter_u64_add(tx_ring->tx_stats.missing_tx_comp, new_missed_tx);
return (rc);
}
@@ -3588,6 +3738,7 @@ ena_reset_task(void *arg, int pending)
ENA_LOCK_LOCK();
if (likely(ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))) {
+ ena_increment_reset_counter(adapter);
ena_destroy_device(adapter, false);
ena_restore_device(adapter);
@@ -3706,6 +3857,8 @@ ena_attach(device_t pdev)
goto err_bus_free;
}
+ ena_dev->ena_min_poll_delay_us = ENA_ADMIN_POLL_DELAY_US;
+
/* Initially clear all the flags */
ENA_FLAG_ZERO(adapter);
@@ -3736,7 +3889,7 @@ ena_attach(device_t pdev)
/* Calculate initial and maximum IO queue number and size */
max_num_io_queues = ena_calc_max_io_queue_num(pdev, ena_dev,
&get_feat_ctx);
- rc = ena_calc_io_queue_size(&calc_queue_ctx);
+ rc = ena_calc_io_queue_size(&calc_queue_ctx, adapter);
if (unlikely((rc != 0) || (max_num_io_queues <= 0))) {
rc = EFAULT;
goto err_com_free;
@@ -3811,11 +3964,7 @@ ena_attach(device_t pdev)
ena_sysctl_add_nodes(adapter);
/* setup network interface */
- rc = ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
- if (unlikely(rc != 0)) {
- ena_log(pdev, ERR, "Error with network interface setup\n");
- goto err_customer_metrics_alloc;
- }
+ ena_setup_ifnet(pdev, adapter, &get_feat_ctx);
/* Initialize reset task queue */
TASK_INIT(&adapter->reset_task, 0, ena_reset_task, adapter);
@@ -3851,9 +4000,9 @@ ena_attach(device_t pdev)
#ifdef DEV_NETMAP
err_detach:
ether_ifdetach(adapter->ifp);
-#endif /* DEV_NETMAP */
-err_customer_metrics_alloc:
+ ifmedia_removeall(&adapter->media);
free(adapter->customer_metrics_array, M_DEVBUF);
+#endif /* DEV_NETMAP */
err_metrics_buffer_destroy:
ena_com_delete_customer_metrics_buffer(ena_dev);
err_msix_free:
@@ -3900,8 +4049,14 @@ ena_detach(device_t pdev)
return (EBUSY);
}
+ rc = bus_generic_detach(pdev);
+ if (rc != 0)
+ return (rc);
+
ether_ifdetach(adapter->ifp);
+ ifmedia_removeall(&adapter->media);
+
/* Stop timer service */
ENA_LOCK_LOCK();
ENA_TIMER_DRAIN(adapter);
@@ -3964,7 +4119,7 @@ ena_detach(device_t pdev)
free(ena_dev, M_DEVBUF);
- return (bus_generic_detach(pdev));
+ return (0);
}
/******************************************************************************
@@ -4050,11 +4205,48 @@ unimplemented_aenq_handler(void *adapter_data,
"Unknown event was received or event with unimplemented handler\n");
}
+static void ena_conf_notification(void *adapter_data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+ struct ena_admin_aenq_conf_notifications_desc *desc;
+ u64 bitmap, bit;
+
+ desc = (struct ena_admin_aenq_conf_notifications_desc *)aenq_e;
+ bitmap = desc->notifications_bitmap;
+
+ if (bitmap == 0) {
+ ena_log(adapter->pdev, INFO,
+ "Empty configuration notification bitmap\n");
+ return;
+ }
+
+ for (bit = ffsll(bitmap); bit != 0; bit = ffsll(bitmap)) {
+ bit--;
+ ena_log(adapter->pdev, INFO,
+ "Sub-optimal configuration notification code: %" PRIu64 " Refer to AWS ENA documentation for additional details and mitigation options.\n",
+ bit + 1);
+ // Clear the processed bit
+ bitmap &= ~(1UL << bit);
+ }
+}
+
+static void ena_admin_device_request_reset(void *adapter_data,
+ struct ena_admin_aenq_entry *aenq_e)
+{
+ struct ena_adapter *adapter = (struct ena_adapter *)adapter_data;
+ ena_log(adapter->pdev, WARN,
+ "The device has detected an unhealthy state, reset is requested\n");
+ ena_trigger_reset(adapter, ENA_REGS_RESET_DEVICE_REQUEST);
+}
+
static struct ena_aenq_handlers aenq_handlers = {
.handlers = {
[ENA_ADMIN_LINK_CHANGE] = ena_update_on_link_change,
[ENA_ADMIN_NOTIFICATION] = ena_notification,
[ENA_ADMIN_KEEP_ALIVE] = ena_keep_alive_wd,
+ [ENA_ADMIN_CONF_NOTIFICATIONS] = ena_conf_notification,
+ [ENA_ADMIN_DEVICE_REQUEST_RESET] = ena_admin_device_request_reset,
},
.unimplemented_handler = unimplemented_aenq_handler
};
diff --git a/sys/dev/ena/ena.h b/sys/dev/ena/ena.h
index 3e42568ba8a9..3b01605b4ba7 100644
--- a/sys/dev/ena/ena.h
+++ b/sys/dev/ena/ena.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,8 +38,8 @@
#include "ena-com/ena_eth_com.h"
#define ENA_DRV_MODULE_VER_MAJOR 2
-#define ENA_DRV_MODULE_VER_MINOR 7
-#define ENA_DRV_MODULE_VER_SUBMINOR 0
+#define ENA_DRV_MODULE_VER_MINOR 8
+#define ENA_DRV_MODULE_VER_SUBMINOR 1
#define ENA_DRV_MODULE_NAME "ena"
@@ -146,6 +146,8 @@
/* Max number of timeouted packets before device reset */
#define ENA_DEFAULT_TX_CMP_THRESHOLD (128)
+#define ENA_ADMIN_POLL_DELAY_US 100
+
/*
* Supported PCI vendor and devices IDs
*/
@@ -171,6 +173,15 @@ enum ena_flags_t {
ENA_FLAGS_NUMBER = ENA_FLAG_RSS_ACTIVE
};
+enum ena_llq_header_size_policy_t {
+ /* Policy for Regular LLQ entry size (128B) */
+ ENA_LLQ_HEADER_SIZE_POLICY_REGULAR,
+ /* Policy for Large LLQ entry size (256B) */
+ ENA_LLQ_HEADER_SIZE_POLICY_LARGE,
+ /* Policy for device recommended LLQ entry size */
+ ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT
+};
+
BITSET_DEFINE(_ena_state, ENA_FLAGS_NUMBER);
typedef struct _ena_state ena_state_t;
@@ -325,6 +336,7 @@ struct ena_ring {
};
uint8_t first_interrupt;
+ uint8_t cleanup_running;
uint16_t no_interrupt_event_cnt;
struct ena_com_rx_buf_info ena_bufs[ENA_PKT_MAX_BUFS];
@@ -381,6 +393,19 @@ struct ena_stats_dev {
counter_u64_t interface_up;
counter_u64_t interface_down;
counter_u64_t admin_q_pause;
+ counter_u64_t total_resets;
+ counter_u64_t os_trigger;
+ counter_u64_t missing_tx_cmpl;
+ counter_u64_t bad_rx_req_id;
+ counter_u64_t bad_tx_req_id;
+ counter_u64_t bad_rx_desc_num;
+ counter_u64_t invalid_state;
+ counter_u64_t missing_intr;
+ counter_u64_t tx_desc_malformed;
+ counter_u64_t rx_desc_malformed;
+ counter_u64_t missing_admin_interrupt;
+ counter_u64_t admin_to;
+ counter_u64_t device_request_reset;
};
struct ena_hw_stats {
@@ -443,6 +468,8 @@ struct ena_adapter {
uint8_t mac_addr[ETHER_ADDR_LEN];
/* mdio and phy*/
+ uint8_t llq_policy;
+
ena_state_t flags;
/* IRQ CPU affinity */
@@ -519,6 +546,33 @@ struct ena_adapter {
extern struct sx ena_global_lock;
+#define ENA_RESET_STATS_ENTRY(reset_reason, stat) \
+ [reset_reason] = { \
+ .stat_offset = offsetof(struct ena_stats_dev, stat) / sizeof(u64), \
+ .has_counter = true \
+}
+
+struct ena_reset_stats_offset {
+ int stat_offset;
+ bool has_counter;
+};
+
+static const struct ena_reset_stats_offset resets_to_stats_offset_map[ENA_REGS_RESET_LAST] = {
+ ENA_RESET_STATS_ENTRY(ENA_REGS_RESET_KEEP_ALIVE_TO, wd_expired),
+ ENA_RESET_STATS_ENTRY(ENA_REGS_RESET_ADMIN_TO, admin_to),
+ ENA_RESET_STATS_ENTRY(ENA_REGS_RESET_OS_TRIGGER, os_trigger),
+ ENA_RESET_STATS_ENTRY(ENA_REGS_RESET_MISS_TX_CMPL, missing_tx_cmpl),
+ ENA_RESET_STATS_ENTRY(ENA_REGS_RESET_INV_RX_REQ_ID, bad_rx_req_id),
+ ENA_RESET_STATS_ENTRY(ENA_REGS_RESET_INV_TX_REQ_ID, bad_tx_req_id),
+ ENA_RESET_STATS_ENTRY(ENA_REGS_RESET_TOO_MANY_RX_DESCS, bad_rx_desc_num),
+ ENA_RESET_STATS_ENTRY(ENA_REGS_RESET_DRIVER_INVALID_STATE, invalid_state),
+ ENA_RESET_STATS_ENTRY(ENA_REGS_RESET_MISS_INTERRUPT, missing_intr),
+ ENA_RESET_STATS_ENTRY(ENA_REGS_RESET_TX_DESCRIPTOR_MALFORMED, tx_desc_malformed),
+ ENA_RESET_STATS_ENTRY(ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED, rx_desc_malformed),
+ ENA_RESET_STATS_ENTRY(ENA_REGS_RESET_MISSING_ADMIN_INTERRUPT, missing_admin_interrupt),
+ ENA_RESET_STATS_ENTRY(ENA_REGS_RESET_DEVICE_REQUEST, device_request_reset),
+};
+
int ena_up(struct ena_adapter *adapter);
void ena_down(struct ena_adapter *adapter);
int ena_restore_device(struct ena_adapter *adapter);
@@ -531,6 +585,7 @@ int ena_update_queue_size(struct ena_adapter *adapter, uint32_t new_tx_size,
int ena_update_io_queue_nb(struct ena_adapter *adapter, uint32_t new_num);
int ena_update_base_cpu(struct ena_adapter *adapter, int new_num);
int ena_update_cpu_stride(struct ena_adapter *adapter, uint32_t new_num);
+int validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id, int tx_req_id_rc);
static inline int
ena_mbuf_count(struct mbuf *mbuf)
{
@@ -543,6 +598,23 @@ ena_mbuf_count(struct mbuf *mbuf)
}
static inline void
+ena_increment_reset_counter(struct ena_adapter *adapter)
+{
+ enum ena_regs_reset_reason_types reset_reason = adapter->reset_reason;
+ const struct ena_reset_stats_offset *ena_reset_stats_offset =
+ &resets_to_stats_offset_map[reset_reason];
+
+ if (ena_reset_stats_offset->has_counter) {
+ uint64_t *stat_ptr = (uint64_t *)&adapter->dev_stats +
+ ena_reset_stats_offset->stat_offset;
+
+ counter_u64_add((counter_u64_t)(*stat_ptr), 1);
+ }
+
+ counter_u64_add(adapter->dev_stats.total_resets, 1);
+}
+
+static inline void
ena_trigger_reset(struct ena_adapter *adapter,
enum ena_regs_reset_reason_types reset_reason)
{
diff --git a/sys/dev/ena/ena_datapath.c b/sys/dev/ena/ena_datapath.c
index 66a93bbe7a6c..ab082fa1810f 100644
--- a/sys/dev/ena/ena_datapath.c
+++ b/sys/dev/ena/ena_datapath.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -77,17 +77,24 @@ ena_cleanup(void *arg, int pending)
int qid, ena_qid;
int txc, rxc, i;
- if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
- return;
-
- ena_log_io(adapter->pdev, DBG, "MSI-X TX/RX routine\n");
-
tx_ring = que->tx_ring;
rx_ring = que->rx_ring;
qid = que->id;
ena_qid = ENA_IO_TXQ_IDX(qid);
io_cq = &adapter->ena_dev->io_cq_queues[ena_qid];
+ atomic_store_8(&tx_ring->cleanup_running, 1);
+ /* Need to make sure that ENA_FLAG_TRIGGER_RESET is visible to ena_cleanup() and
+ * that cleanup_running is visible to check_missing_comp_in_tx_queue() to
+ * prevent the case of accessing CQ concurrently with check_cdesc_in_tx_cq()
+ */
+ mb();
+ if (unlikely(((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) ||
+ (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))))
+ return;
+
+ ena_log_io(adapter->pdev, DBG, "MSI-X TX/RX routine\n");
+
atomic_store_8(&tx_ring->first_interrupt, 1);
atomic_store_8(&rx_ring->first_interrupt, 1);
@@ -95,7 +102,8 @@ ena_cleanup(void *arg, int pending)
rxc = ena_rx_cleanup(rx_ring);
txc = ena_tx_cleanup(tx_ring);
- if (unlikely((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0))
+ if (unlikely(((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) ||
+ (ENA_FLAG_ISSET(ENA_FLAG_TRIGGER_RESET, adapter))))
return;
if ((txc != ENA_TX_BUDGET) && (rxc != ENA_RX_BUDGET))
@@ -107,6 +115,7 @@ ena_cleanup(void *arg, int pending)
ENA_TX_IRQ_INTERVAL, true, false);
counter_u64_add(tx_ring->tx_stats.unmask_interrupt_num, 1);
ena_com_unmask_intr(io_cq, &intr_reg);
+ atomic_store_8(&tx_ring->cleanup_running, 0);
}
void
@@ -200,29 +209,22 @@ ena_get_tx_req_id(struct ena_ring *tx_ring, struct ena_com_io_cq *io_cq,
uint16_t *req_id)
{
struct ena_adapter *adapter = tx_ring->adapter;
- int rc;
+ int rc = ena_com_tx_comp_req_id_get(io_cq, req_id);
- rc = ena_com_tx_comp_req_id_get(io_cq, req_id);
- if (rc == ENA_COM_TRY_AGAIN)
+ if (unlikely(rc == ENA_COM_TRY_AGAIN))
return (EAGAIN);
- if (unlikely(rc != 0)) {
- ena_log(adapter->pdev, ERR, "Invalid req_id %hu in qid %hu\n",
+ rc = validate_tx_req_id(tx_ring, *req_id, rc);
+
+ if (unlikely(tx_ring->tx_buffer_info[*req_id].mbuf == NULL)) {
+ ena_log(adapter->pdev, ERR,
+ "tx_info doesn't have valid mbuf. req_id %hu qid %hu\n",
*req_id, tx_ring->qid);
- counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
- goto err;
+ ena_trigger_reset(adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
+ rc = EFAULT;
}
- if (tx_ring->tx_buffer_info[*req_id].mbuf != NULL)
- return (0);
-
- ena_log(adapter->pdev, ERR,
- "tx_info doesn't have valid mbuf. req_id %hu qid %hu\n",
- *req_id, tx_ring->qid);
-err:
- ena_trigger_reset(adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
-
- return (EFAULT);
+ return (rc);
}
/**
@@ -432,7 +434,9 @@ ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
req_id = ena_bufs[buf].req_id;
rx_info = &rx_ring->rx_buffer_info[req_id];
if (unlikely(rx_info->mbuf == NULL)) {
- ena_log(pdev, ERR, "NULL mbuf in rx_info");
+ ena_log(pdev, ERR, "NULL mbuf in rx_info. qid %u req_id %u\n",
+ rx_ring->qid, req_id);
+ ena_trigger_reset(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
return (NULL);
}
@@ -474,7 +478,8 @@ ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
rx_info = &rx_ring->rx_buffer_info[req_id];
if (unlikely(rx_info->mbuf == NULL)) {
- ena_log(pdev, ERR, "NULL mbuf in rx_info");
+ ena_log(pdev, ERR, "NULL mbuf in rx_info. qid %u req_id %u\n",
+ rx_ring->qid, req_id);
/*
* If one of the required mbufs was not allocated yet,
* we can break there.
@@ -486,6 +491,7 @@ ena_rx_mbuf(struct ena_ring *rx_ring, struct ena_com_rx_buf_info *ena_bufs,
* with hw ring.
*/
m_freem(mbuf);
+ ena_trigger_reset(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
return (NULL);
}
@@ -606,6 +612,8 @@ ena_rx_cleanup(struct ena_ring *rx_ring)
counter_u64_add(rx_ring->rx_stats.bad_desc_num,
1);
reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
+ } else if (rc == ENA_COM_FAULT) {
+ reset_reason = ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED;
} else {
counter_u64_add(rx_ring->rx_stats.bad_req_id,
1);
diff --git a/sys/dev/ena/ena_datapath.h b/sys/dev/ena/ena_datapath.h
index 43292b5abbe9..9edf54fe502a 100644
--- a/sys/dev/ena/ena_datapath.h
+++ b/sys/dev/ena/ena_datapath.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ena/ena_netmap.c b/sys/dev/ena/ena_netmap.c
index d95f48f7380c..8a220373ec3f 100644
--- a/sys/dev/ena/ena_netmap.c
+++ b/sys/dev/ena/ena_netmap.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -71,7 +71,6 @@ static void ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *,
struct ena_tx_buffer *);
static void ena_netmap_tx_cleanup(struct ena_netmap_ctx *);
static uint16_t ena_netmap_tx_clean_one(struct ena_netmap_ctx *, uint16_t);
-static inline int validate_tx_req_id(struct ena_ring *, uint16_t);
static int ena_netmap_rx_frames(struct ena_netmap_ctx *);
static int ena_netmap_rx_frame(struct ena_netmap_ctx *);
static int ena_netmap_rx_load_desc(struct ena_netmap_ctx *, uint16_t, int *);
@@ -578,7 +577,7 @@ ena_netmap_tx_map_slots(struct ena_netmap_ctx *ctx,
remaining_len = *packet_len;
delta = 0;
- __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
+ __builtin_prefetch(&ctx->slots[nm_next(ctx->nm_i, ctx->lim)]);
if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
/*
* When the device is in LLQ mode, the driver will copy
@@ -665,7 +664,7 @@ ena_netmap_tx_map_slots(struct ena_netmap_ctx *ctx,
* The first segment is already counted in.
*/
while (delta > 0) {
- __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
+ __builtin_prefetch(&ctx->slots[nm_next(ctx->nm_i, ctx->lim)]);
frag_len = slot->len;
/*
@@ -723,7 +722,7 @@ ena_netmap_tx_map_slots(struct ena_netmap_ctx *ctx,
/* Map all remaining data (regular routine for non-LLQ mode) */
while (remaining_len > 0) {
- __builtin_prefetch(&ctx->slots[ctx->nm_i + 1]);
+ __builtin_prefetch(&ctx->slots[nm_next(ctx->nm_i, ctx->lim)]);
rc = ena_netmap_map_single_slot(ctx->na, slot,
adapter->tx_buf_tag, *nm_maps, &vaddr, &paddr);
@@ -784,10 +783,10 @@ ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *ctx,
/* Next, retain the sockets back to the userspace */
n = nm_info->sockets_used;
while (n--) {
+ ctx->nm_i = nm_prev(ctx->nm_i, ctx->lim);
ctx->slots[ctx->nm_i].buf_idx = nm_info->socket_buf_idx[n];
ctx->slots[ctx->nm_i].flags = NS_BUF_CHANGED;
nm_info->socket_buf_idx[n] = 0;
- ctx->nm_i = nm_prev(ctx->nm_i, ctx->lim);
}
nm_info->sockets_used = 0;
}
@@ -795,25 +794,33 @@ ena_netmap_unmap_last_socket_chain(struct ena_netmap_ctx *ctx,
static void
ena_netmap_tx_cleanup(struct ena_netmap_ctx *ctx)
{
+ struct ena_ring *tx_ring = ctx->ring;
+ int rc;
uint16_t req_id;
uint16_t total_tx_descs = 0;
ctx->nm_i = ctx->kring->nr_hwtail;
- ctx->nt = ctx->ring->next_to_clean;
+ ctx->nt = tx_ring->next_to_clean;
/* Reclaim buffers for completed transmissions */
- while (ena_com_tx_comp_req_id_get(ctx->io_cq, &req_id) >= 0) {
- if (validate_tx_req_id(ctx->ring, req_id) != 0)
+ do {
+ rc = ena_com_tx_comp_req_id_get(ctx->io_cq, &req_id);
+ if(unlikely(rc == ENA_COM_TRY_AGAIN))
+ break;
+
+ rc = validate_tx_req_id(tx_ring, req_id, rc);
+ if(unlikely(rc != 0))
break;
+
total_tx_descs += ena_netmap_tx_clean_one(ctx, req_id);
- }
+ } while (1);
ctx->kring->nr_hwtail = ctx->nm_i;
if (total_tx_descs > 0) {
/* acknowledge completion of sent packets */
- ctx->ring->next_to_clean = ctx->nt;
- ena_com_comp_ack(ctx->ring->ena_com_io_sq, total_tx_descs);
+ tx_ring->next_to_clean = ctx->nt;
+ ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs);
}
}
@@ -856,23 +863,6 @@ ena_netmap_tx_clean_one(struct ena_netmap_ctx *ctx, uint16_t req_id)
return tx_info->tx_descs;
}
-static inline int
-validate_tx_req_id(struct ena_ring *tx_ring, uint16_t req_id)
-{
- struct ena_adapter *adapter = tx_ring->adapter;
-
- if (likely(req_id < tx_ring->ring_size))
- return (0);
-
- ena_log_nm(adapter->pdev, WARN, "Invalid req_id %hu in qid %hu\n",
- req_id, tx_ring->qid);
- counter_u64_add(tx_ring->tx_stats.bad_req_id, 1);
-
- ena_trigger_reset(adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
-
- return (EFAULT);
-}
-
static int
ena_netmap_rxsync(struct netmap_kring *kring, int flags)
{
@@ -948,6 +938,8 @@ ena_netmap_rx_frame(struct ena_netmap_ctx *ctx)
if (rc == ENA_COM_NO_SPACE) {
counter_u64_add(ctx->ring->rx_stats.bad_desc_num, 1);
reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
+ } else if (rc == ENA_COM_FAULT) {
+ reset_reason = ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED;
} else {
counter_u64_add(ctx->ring->rx_stats.bad_req_id, 1);
reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
@@ -972,7 +964,7 @@ ena_netmap_rx_frame(struct ena_netmap_ctx *ctx)
* It just set flag NS_MOREFRAG to all slots, then here flag of
* last slot is cleared.
*/
- ctx->slots[nm_prev(ctx->nm_i, ctx->lim)].flags = NS_BUF_CHANGED;
+ ctx->slots[nm_prev(ctx->nm_i, ctx->lim)].flags &= ~NS_MOREFRAG;
if (rc != 0) {
goto rx_clear_desc;
diff --git a/sys/dev/ena/ena_netmap.h b/sys/dev/ena/ena_netmap.h
index 598fcf1f08b2..4ee8338e4009 100644
--- a/sys/dev/ena/ena_netmap.h
+++ b/sys/dev/ena/ena_netmap.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ena/ena_rss.c b/sys/dev/ena/ena_rss.c
index d90a7fbb253a..b7706e33065d 100644
--- a/sys/dev/ena/ena_rss.c
+++ b/sys/dev/ena/ena_rss.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -279,12 +279,9 @@ ena_rss_indir_init(struct ena_adapter *adapter)
struct ena_indir *indir = adapter->rss_indir;
int rc;
- if (indir == NULL) {
+ if (indir == NULL)
adapter->rss_indir = indir = malloc(sizeof(struct ena_indir),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (indir == NULL)
- return (ENOMEM);
- }
rc = ena_rss_indir_get(adapter, indir->table);
if (rc != 0) {
diff --git a/sys/dev/ena/ena_rss.h b/sys/dev/ena/ena_rss.h
index 1c1c89261b35..64dd41851fec 100644
--- a/sys/dev/ena/ena_rss.h
+++ b/sys/dev/ena/ena_rss.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/ena/ena_sysctl.c b/sys/dev/ena/ena_sysctl.c
index 5eaa3c3e76c3..38e52f9066cc 100644
--- a/sys/dev/ena/ena_sysctl.c
+++ b/sys/dev/ena/ena_sysctl.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -148,17 +148,17 @@ SYSCTL_INT(_hw_ena, OID_AUTO, enable_9k_mbufs, CTLFLAG_RDTUN,
&ena_enable_9k_mbufs, 0, "Use 9 kB mbufs for Rx descriptors");
/*
- * Force the driver to use large LLQ (Low Latency Queue) header. Defaults to
- * false. This option may be important for platforms, which often handle packet
- * headers on Tx with total header size greater than 96B, as it may
- * reduce the latency.
+ * Force the driver to use large or regular LLQ (Low Latency Queue) header size.
+ * Defaults to ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT. This option may be
+ * important for platforms, which often handle packet headers on Tx with total
+ * header size greater than 96B, as it may reduce the latency.
* It also reduces the maximum Tx queue size by half, so it may cause more Tx
* packet drops.
*/
-bool ena_force_large_llq_header = false;
-SYSCTL_BOOL(_hw_ena, OID_AUTO, force_large_llq_header, CTLFLAG_RDTUN,
+int ena_force_large_llq_header = ENA_LLQ_HEADER_SIZE_POLICY_DEFAULT;
+SYSCTL_INT(_hw_ena, OID_AUTO, force_large_llq_header, CTLFLAG_RDTUN,
&ena_force_large_llq_header, 0,
- "Increases maximum supported header size in LLQ mode to 224 bytes, while reducing the maximum Tx queue size by half.\n");
+ "Change default LLQ entry size received from the device");
int ena_rss_table_size = ENA_RX_RSS_TABLE_SIZE;
@@ -275,11 +275,36 @@ ena_sysctl_add_stats(struct ena_adapter *adapter)
&dev_stats->wd_expired, "Watchdog expiry count");
SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "interface_up", CTLFLAG_RD,
&dev_stats->interface_up, "Network interface up count");
- SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "interface_down",
- CTLFLAG_RD, &dev_stats->interface_down,
- "Network interface down count");
- SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "admin_q_pause",
- CTLFLAG_RD, &dev_stats->admin_q_pause, "Admin queue pauses");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "interface_down", CTLFLAG_RD,
+ &dev_stats->interface_down, "Network interface down count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "admin_q_pause", CTLFLAG_RD,
+ &dev_stats->admin_q_pause, "Admin queue pauses");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "os_trigger", CTLFLAG_RD,
+ &dev_stats->os_trigger, "OS trigger count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "missing_tx_cmpl", CTLFLAG_RD,
+ &dev_stats->missing_tx_cmpl, "Missing TX completions resets count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "bad_rx_req_id", CTLFLAG_RD,
+ &dev_stats->bad_rx_req_id, "Bad RX req id count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "bad_tx_req_id", CTLFLAG_RD,
+ &dev_stats->bad_tx_req_id, "Bad TX req id count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "bad_rx_desc_num", CTLFLAG_RD,
+ &dev_stats->bad_rx_desc_num, "Bad RX descriptors number count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "invalid_state", CTLFLAG_RD,
+ &dev_stats->invalid_state, "Driver invalid state count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "missing_intr", CTLFLAG_RD,
+ &dev_stats->missing_intr, "Missing interrupt count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "tx_desc_malformed", CTLFLAG_RD,
+ &dev_stats->tx_desc_malformed, "TX descriptors malformed count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "rx_desc_malformed", CTLFLAG_RD,
+ &dev_stats->rx_desc_malformed, "RX descriptors malformed count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "missing_admin_interrupt", CTLFLAG_RD,
+ &dev_stats->missing_admin_interrupt, "Missing admin interrupts count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "admin_to", CTLFLAG_RD,
+ &dev_stats->admin_to, "Admin queue timeouts count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "device_request_reset", CTLFLAG_RD,
+ &dev_stats->device_request_reset, "Device reset requests count");
+ SYSCTL_ADD_COUNTER_U64(ctx, child, OID_AUTO, "total_resets", CTLFLAG_RD,
+ &dev_stats->total_resets, "Total resets count");
for (i = 0; i < adapter->num_io_queues; ++i, ++tx_ring, ++rx_ring) {
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
diff --git a/sys/dev/ena/ena_sysctl.h b/sys/dev/ena/ena_sysctl.h
index 4f5834214005..9dcfa311d73e 100644
--- a/sys/dev/ena/ena_sysctl.h
+++ b/sys/dev/ena/ena_sysctl.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2015-2023 Amazon.com, Inc. or its affiliates.
+ * Copyright (c) 2015-2024 Amazon.com, Inc. or its affiliates.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -46,6 +46,6 @@ extern int ena_enable_9k_mbufs;
#define ena_mbuf_sz (ena_enable_9k_mbufs ? MJUM9BYTES : MJUMPAGESIZE)
/* Force the driver to use large LLQ (Low Latency Queue) headers. */
-extern bool ena_force_large_llq_header;
+extern int ena_force_large_llq_header;
#endif /* !(ENA_SYSCTL_H) */
diff --git a/sys/dev/enetc/enetc_hw.h b/sys/dev/enetc/enetc_hw.h
index 507c4657453d..323d5529f50a 100644
--- a/sys/dev/enetc/enetc_hw.h
+++ b/sys/dev/enetc/enetc_hw.h
@@ -9,7 +9,6 @@
#define BIT(x) (1UL << (x))
#define GENMASK(h, l) (((~0U) - (1U << (l)) + 1) & (~0U >> (32 - 1 - (h))))
-#define ilog2(x) (flsl(x) - 1)
#define PCI_VENDOR_FREESCALE 0x1957
diff --git a/sys/dev/enetc/if_enetc.c b/sys/dev/enetc/if_enetc.c
index 3c0bc4723b05..53002f9d73ce 100644
--- a/sys/dev/enetc/if_enetc.c
+++ b/sys/dev/enetc/if_enetc.c
@@ -459,8 +459,7 @@ enetc_detach(if_ctx_t ctx)
for (i = 0; i < sc->rx_num_queues; i++)
iflib_irq_free(ctx, &sc->rx_queues[i].irq);
- if (sc->miibus != NULL)
- device_delete_child(sc->dev, sc->miibus);
+ bus_generic_detach(sc->dev);
if (sc->regs != NULL)
error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
@@ -849,7 +848,7 @@ enetc_hash_vid(uint16_t vid)
bool bit;
int i;
- for (i = 0;i < 6;i++) {
+ for (i = 0; i < 6; i++) {
bit = vid & BIT(i);
bit ^= !!(vid & BIT(i + 6));
hash |= bit << i;
@@ -1021,7 +1020,7 @@ enetc_msix_intr_assign(if_ctx_t ctx, int msix)
ENETC_RBICR0_ICEN | ENETC_RBICR0_SET_ICPT(ENETC_RX_INTR_PKT_THR));
}
vector = 0;
- for (i = 0;i < sc->tx_num_queues; i++, vector++) {
+ for (i = 0; i < sc->tx_num_queues; i++, vector++) {
tx_queue = &sc->tx_queues[i];
snprintf(irq_name, sizeof(irq_name), "txq%d", i);
iflib_softirq_alloc_generic(ctx, &tx_queue->irq,
@@ -1131,7 +1130,7 @@ enetc_isc_txd_encap(void *data, if_pkt_info_t ipi)
}
/* Now add remaining descriptors. */
- for (;i < ipi->ipi_nsegs; i++) {
+ for (; i < ipi->ipi_nsegs; i++) {
desc = &queue->ring[pidx];
bzero(desc, sizeof(*desc));
desc->addr = segs[i].ds_addr;
@@ -1344,7 +1343,8 @@ enetc_get_counter(if_ctx_t ctx, ift_counter cnt)
case IFCOUNTER_IERRORS:
return (ENETC_PORT_RD8(sc, ENETC_PM0_RERR));
case IFCOUNTER_OERRORS:
- return (ENETC_PORT_RD8(sc, ENETC_PM0_TERR));
+ return (if_get_counter_default(ifp, cnt) +
+ ENETC_PORT_RD8(sc, ENETC_PM0_TERR));
default:
return (if_get_counter_default(ifp, cnt));
}
diff --git a/sys/dev/enic/cq_desc.h b/sys/dev/enic/cq_desc.h
index ae8847c6d9a1..4fb8cce7212e 100644
--- a/sys/dev/enic/cq_desc.h
+++ b/sys/dev/enic/cq_desc.h
@@ -44,14 +44,6 @@ struct cq_desc {
#define CQ_DESC_COMP_NDX_BITS 12
#define CQ_DESC_COMP_NDX_MASK ((1 << CQ_DESC_COMP_NDX_BITS) - 1)
-static inline void cq_color_enc(struct cq_desc *desc, const u8 color)
-{
- if (color)
- desc->type_color |= (1 << CQ_DESC_COLOR_SHIFT);
- else
- desc->type_color &= ~(1 << CQ_DESC_COLOR_SHIFT);
-}
-
static inline void cq_desc_enc(struct cq_desc *desc,
const u8 type, const u8 color, const u16 q_number,
const u16 completed_index)
@@ -87,11 +79,4 @@ static inline void cq_desc_dec(const struct cq_desc *desc_arg,
CQ_DESC_COMP_NDX_MASK;
}
-static inline void cq_color_dec(const struct cq_desc *desc_arg, u8 *color)
-{
- volatile const struct cq_desc *desc = desc_arg;
-
- *color = (desc->type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
-}
-
#endif /* _CQ_DESC_H_ */
diff --git a/sys/dev/enic/enic.h b/sys/dev/enic/enic.h
index 6d0eb8563efd..eec6de823c9d 100644
--- a/sys/dev/enic/enic.h
+++ b/sys/dev/enic/enic.h
@@ -108,13 +108,13 @@ struct vnic_res {
#define ENIC_DEFAULT_VXLAN_PORT 4789
/*
- * Interrupt 0: LSC and errors
* Interrupt 1: rx queue 0
* Interrupt 2: rx queue 1
* ...
+ * Interrupt x: LSC and errors
*/
#define ENICPMD_LSC_INTR_OFFSET 0
-#define ENICPMD_RXQ_INTR_OFFSET 1
+#define ENICPMD_RXQ_INTR_OFFSET 0
#include "vnic_devcmd.h"
@@ -152,6 +152,9 @@ struct vnic_dev {
u64 args[VNIC_DEVCMD_NARGS];
int in_reset;
struct vnic_intr_coal_timer_info intr_coal_timer_info;
+ struct devcmd2_controller *devcmd2;
+ int (*devcmd_rtn)(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait);
void *(*alloc_consistent)(void *priv, size_t size,
bus_addr_t *dma_handle, struct iflib_dma_info *res, u8 *name);
void (*free_consistent)(void *priv, size_t size, void *vaddr,
@@ -175,6 +178,28 @@ struct intr_queue {
struct enic_softc *softc;
};
+#define ENIC_MAX_LINK_SPEEDS 3
+#define ENIC_LINK_SPEED_10G 10000
+#define ENIC_LINK_SPEED_4G 4000
+#define ENIC_LINK_40G_INDEX 2
+#define ENIC_LINK_10G_INDEX 1
+#define ENIC_LINK_4G_INDEX 0
+#define ENIC_RX_COALESCE_RANGE_END 125
+#define ENIC_AIC_TS_BREAK 100
+
+struct enic_rx_coal {
+ u32 small_pkt_range_start;
+ u32 large_pkt_range_start;
+ u32 range_end;
+ u32 use_adaptive_rx_coalesce;
+};
+
+/* Store only the lower range. Higher range is given by fw. */
+struct enic_intr_mod_range {
+ u32 small_pkt_range_start;
+ u32 large_pkt_range_start;
+};
+
struct enic {
struct enic *next;
struct rte_pci_device *pdev;
@@ -228,7 +253,7 @@ struct enic {
/* interrupt vectors (len = conf_intr_count) */
struct vnic_intr *intr;
- struct intr_queue *intr_queues;;
+ struct intr_queue *intr_queues;
unsigned int intr_count; /* equals enabled interrupts (lsc + rxqs) */
@@ -267,6 +292,9 @@ struct enic {
uint64_t tx_offload_mask; /* PKT_TX flags accepted */
struct enic_softc *softc;
int port_mtu;
+ struct enic_rx_coal rx_coalesce_setting;
+ u32 rx_coalesce_usecs;
+ u32 tx_coalesce_usecs;
};
struct enic_softc {
@@ -307,11 +335,6 @@ struct enic_softc {
/* Per-instance private data structure */
-static inline unsigned int enic_vnic_rq_count(struct enic *enic)
-{
- return enic->rq_count;
-}
-
static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
{
return rq;
@@ -323,21 +346,6 @@ static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
}
static inline uint32_t
-enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
-{
- uint32_t d = i0 + i1;
- d -= (d >= n_descriptors) ? n_descriptors : 0;
- return d;
-}
-
-static inline uint32_t
-enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
-{
- int32_t d = i1 - i0;
- return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d);
-}
-
-static inline uint32_t
enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
{
idx++;
@@ -346,34 +354,14 @@ enic_ring_incr(uint32_t n_descriptors, uint32_t idx)
return idx;
}
-void enic_free_wq(void *txq);
-int enic_alloc_intr_resources(struct enic *enic);
int enic_setup_finish(struct enic *enic);
-int enic_alloc_wq(struct enic *enic, uint16_t queue_idx,
- unsigned int socket_id, uint16_t nb_desc);
void enic_start_wq(struct enic *enic, uint16_t queue_idx);
int enic_stop_wq(struct enic *enic, uint16_t queue_idx);
void enic_start_rq(struct enic *enic, uint16_t queue_idx);
-void enic_free_rq(void *rxq);
-int enic_set_vnic_res(struct enic *enic);
-int enic_init_rss_nic_cfg(struct enic *enic);
-int enic_set_rss_reta(struct enic *enic, union vnic_rss_cpu *rss_cpu);
-int enic_set_vlan_strip(struct enic *enic);
+int enic_stop_rq(struct enic *enic, uint16_t queue_idx);
+void enic_dev_disable(struct enic *enic);
int enic_enable(struct enic *enic);
int enic_disable(struct enic *enic);
-void enic_remove(struct enic *enic);
-int enic_get_link_status(struct enic *enic);
-void enic_dev_stats_clear(struct enic *enic);
-void enic_add_packet_filter(struct enic *enic);
-int enic_set_mac_address(struct enic *enic, uint8_t *mac_addr);
-int enic_del_mac_address(struct enic *enic, int mac_index);
-unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq);
-
-void enic_post_wq_index(struct vnic_wq *wq);
-int enic_probe(struct enic *enic);
-int enic_clsf_init(struct enic *enic);
-void enic_clsf_destroy(struct enic *enic);
-int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
int enic_link_update(struct enic *enic);
bool enic_use_vector_rx_handler(struct enic *enic);
void enic_fdir_info(struct enic *enic);
diff --git a/sys/dev/enic/enic_res.c b/sys/dev/enic/enic_res.c
index d264874557a0..413873ad0fb4 100644
--- a/sys/dev/enic/enic_res.c
+++ b/sys/dev/enic/enic_res.c
@@ -95,11 +95,11 @@ int enic_get_vnic_config(struct enic *enic)
dev_info(enic_get_dev(enic),
"vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x "
- "wq/rq %d/%d mtu d, max mtu:%d\n",
+ "wq/rq %d/%d mtu %d, max mtu:%d\n",
enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2],
enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5],
c->wq_desc_count, c->rq_desc_count,
- /* enic->rte_dev->data->mtu, */ enic->max_mtu);
+ c->mtu, enic->max_mtu);
dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s "
"rss %s intr mode %s type %s timer %d usec "
"loopback tag 0x%04x\n",
diff --git a/sys/dev/enic/enic_res.h b/sys/dev/enic/enic_res.h
index 1a6f3a3ca98f..82963e61a44f 100644
--- a/sys/dev/enic/enic_res.h
+++ b/sys/dev/enic/enic_res.h
@@ -67,7 +67,5 @@ int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type,
u8 ig_vlan_strip_en);
void enic_get_res_counts(struct enic *enic);
void enic_init_vnic_resources(struct enic *enic);
-int enic_alloc_vnic_resources(struct enic *);
-void enic_free_vnic_resources(struct enic *);
#endif /* _ENIC_RES_H_ */
diff --git a/sys/dev/enic/enic_txrx.c b/sys/dev/enic/enic_txrx.c
index 5a557fc7f94a..169041587d06 100644
--- a/sys/dev/enic/enic_txrx.c
+++ b/sys/dev/enic/enic_txrx.c
@@ -103,6 +103,7 @@ enic_isc_txd_encap(void *vsc, if_pkt_info_t pi)
softc = vsc;
enic = &softc->enic;
+ if_softc_ctx_t scctx = softc->scctx;
wq = &enic->wq[pi->ipi_qsidx];
nsegs = pi->ipi_nsegs;
@@ -112,6 +113,9 @@ enic_isc_txd_encap(void *vsc, if_pkt_info_t pi)
head_idx = wq->head_idx;
desc_count = wq->ring.desc_count;
+ if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
+ offload_mode |= WQ_ENET_OFFLOAD_MODE_CSUM;
+
for (i = 0; i < nsegs; i++) {
eop = 0;
cq = 0;
@@ -320,7 +324,7 @@ enic_isc_rxd_flush(void *vsc, uint16_t rxqid, uint8_t flid, qidx_t pidx)
static int
enic_legacy_intr(void *xsc)
{
- return -1;
+ return (1);
}
static inline void
@@ -375,7 +379,7 @@ enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type,
vnic_wq_service(&enic->wq[q_number], cq_desc,
completed_index, NULL, opaque);
- return 0;
+ return (0);
}
static void
@@ -384,7 +388,7 @@ vnic_rq_service(struct vnic_rq *rq, struct cq_desc *cq_desc,
void(*buf_service)(struct vnic_rq *rq, struct cq_desc *cq_desc,
/* struct vnic_rq_buf * *buf, */ int skipped, void *opaque), void *opaque)
{
-
+ if_softc_ctx_t scctx;
if_rxd_info_t ri = (if_rxd_info_t) opaque;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -396,6 +400,8 @@ vnic_rq_service(struct vnic_rq *rq, struct cq_desc *cq_desc,
int cqidx;
if_rxd_frag_t frag;
+ scctx = rq->vdev->softc->scctx;
+
cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
&type, &color, &q_number, &completed_index,
&ingress_port, &fcoe, &eop, &sop, &rss_type,
@@ -419,6 +425,11 @@ vnic_rq_service(struct vnic_rq *rq, struct cq_desc *cq_desc,
ri->iri_cidx = cqidx;
ri->iri_nfrags = 1;
ri->iri_len = bytes_written;
+
+ if ((scctx->isc_capenable & IFCAP_RXCSUM) != 0)
+ if (!csum_not_calc && (tcp_udp_csum_ok || ipv4_csum_ok)) {
+ ri->iri_csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID);
+ }
}
static int
@@ -431,7 +442,7 @@ enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
vnic_rq_service(&enic->rq[ri->iri_qsidx], cq_desc, completed_index,
VNIC_RQ_RETURN_DESC, NULL, /* enic_rq_indicate_buf, */ opaque);
- return 0;
+ return (0);
}
void
@@ -468,10 +479,8 @@ enic_stop_wq(struct enic *enic, uint16_t queue_idx)
int ret;
ret = vnic_wq_disable(&enic->wq[queue_idx]);
- if (ret)
- return ret;
- return 0;
+ return (ret);
}
void
@@ -483,3 +492,19 @@ enic_start_rq(struct enic *enic, uint16_t queue_idx)
vnic_rq_enable(rq);
enic_initial_post_rx(enic, rq);
}
+
+int
+enic_stop_rq(struct enic *enic, uint16_t queue_idx)
+{
+ int ret;
+
+ ret = vnic_rq_disable(&enic->rq[queue_idx]);
+
+ return (ret);
+}
+
+
+void
+enic_dev_disable(struct enic *enic) {
+ vnic_dev_disable(enic->vdev);
+}
diff --git a/sys/dev/enic/if_enic.c b/sys/dev/enic/if_enic.c
index dc0c0d028e20..35620fece6bf 100644
--- a/sys/dev/enic/if_enic.c
+++ b/sys/dev/enic/if_enic.c
@@ -201,11 +201,11 @@ static struct if_shared_ctx enic_sctx_init = {
* descriptor */
.isc_rx_nsegments = 1, /* One mapping per descriptor */
.isc_rx_maxsegsize = ENIC_DEFAULT_RX_MAX_PKT_SIZE,
- .isc_admin_intrcnt = 3,
+ .isc_admin_intrcnt = 2,
.isc_vendor_info = enic_vendor_info_array,
.isc_driver_version = "1",
.isc_driver = &enic_iflib_driver,
- .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ,
+ .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_SKIP_MSIX,
/*
* Number of receive queues per receive queue set, with associated
@@ -236,6 +236,99 @@ enic_register(device_t dev)
}
static int
+enic_allocate_msix(struct enic_softc *softc) {
+ if_ctx_t ctx;
+ if_softc_ctx_t scctx;
+ if_shared_ctx_t sctx;
+ device_t dev;
+ cpuset_t cpus;
+ int queues, vectors, requested;
+ int err = 0;
+
+ dev = softc->dev;
+ ctx = softc->ctx;
+ scctx = softc->scctx;
+ sctx = iflib_get_sctx(ctx);
+
+ if (bus_get_cpus(dev, INTR_CPUS, sizeof(cpus), &cpus) != 0) {
+ device_printf(dev, "Unable to fetch CPU list\n");
+ CPU_COPY(&all_cpus, &cpus);
+ }
+
+
+ queues = CPU_COUNT(&cpus);
+ queues = imin(queues, scctx->isc_nrxqsets);
+ queues = imin(queues, scctx->isc_ntxqsets);
+ requested = queues * 2 + sctx->isc_admin_intrcnt;
+ scctx->isc_nrxqsets = queues;
+ scctx->isc_ntxqsets = queues;
+
+ vectors = requested;
+ if ((err = pci_alloc_msix(dev, &vectors)) != 0) {
+ device_printf(dev,
+ "failed to allocate %d MSI-X vectors, err: %d\n", requested,
+ err);
+ err = 1;
+ goto enic_allocate_msix_out;
+ } else {
+ if (vectors != requested) {
+ device_printf(dev,
+ "Unable to allocate sufficient MSI-X vectors "
+ "(got %d, need %d)\n", requested, vectors);
+ pci_release_msi(dev);
+ err = 1;
+ goto enic_allocate_msix_out;
+ }
+ }
+
+ device_printf(dev, "Using MSI-X interrupts with %d vectors\n",
+ vectors);
+
+ scctx->isc_intr = IFLIB_INTR_MSIX;
+ scctx->isc_vectors = vectors;
+
+enic_allocate_msix_out:
+ return (err);
+
+}
+
+static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
+ {0, 0}, /* 0 - 4 Gbps */
+ {0, 3}, /* 4 - 10 Gbps */
+ {3, 6}, /* 10 - 40 Gbps */
+};
+
+static void enic_set_rx_coal_setting(struct enic *enic)
+{
+ unsigned int speed;
+ int index = -1;
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+
+ /* 1. Read the link speed from fw
+ * 2. Pick the default range for the speed
+ * 3. Update it in enic->rx_coalesce_setting
+ */
+ speed = vnic_dev_port_speed(enic->vdev);
+ if (ENIC_LINK_SPEED_10G < speed)
+ index = ENIC_LINK_40G_INDEX;
+ else if (ENIC_LINK_SPEED_4G < speed)
+ index = ENIC_LINK_10G_INDEX;
+ else
+ index = ENIC_LINK_4G_INDEX;
+
+ rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
+ rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
+ rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
+
+ /* Start with the value provided by UCSM */
+ for (index = 0; index < enic->rq_count; index++)
+ enic->cq[index].cur_rx_coal_timeval =
+ enic->config.intr_timer_usec;
+
+ rx_coal->use_adaptive_rx_coalesce = 1;
+}
+
+static int
enic_attach_pre(if_ctx_t ctx)
{
if_softc_ctx_t scctx;
@@ -283,6 +376,8 @@ enic_attach_pre(if_ctx_t ctx)
ENIC_LOCK(softc);
vnic_dev_register(vdev, &softc->mem, 1);
enic->vdev = vdev;
+ vnic_dev_cmd_init(enic->vdev);
+
vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
vnic_dev_cmd(vdev, CMD_INIT_v1, &a0, &a1, wait);
@@ -326,6 +421,7 @@ enic_attach_pre(if_ctx_t ctx)
/* Set ingress vlan rewrite mode before vnic initialization */
enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
+ enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
enic->ig_vlan_rewrite_mode);
if (err) {
@@ -360,8 +456,10 @@ enic_attach_pre(if_ctx_t ctx)
softc->scctx = iflib_get_softc_ctx(ctx);
scctx = softc->scctx;
scctx->isc_txrx = &enic_txrx;
- scctx->isc_capabilities = scctx->isc_capenable = 0;
+ scctx->isc_capabilities = scctx->isc_capenable = \
+ IFCAP_HWCSUM;
scctx->isc_tx_csum_flags = 0;
+ if_setmtu(softc->ifp, enic->config.mtu);
scctx->isc_max_frame_size = enic->config.mtu + ETHER_HDR_LEN + \
ETHER_CRC_LEN;
scctx->isc_nrxqsets_max = enic->conf_rq_count;
@@ -389,7 +487,6 @@ enic_attach_pre(if_ctx_t ctx)
}
scctx->isc_tx_nsegments = 31;
- scctx->isc_vectors = enic->conf_cq_count;
scctx->isc_msix_bar = -1;
ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
@@ -397,8 +494,7 @@ enic_attach_pre(if_ctx_t ctx)
ifmedia_add(softc->media, IFM_ETHER | IFM_10_FL, 0, NULL);
/*
- * Allocate the CQ here since TX is called first before RX for now
- * assume RX and TX are the same
+ * Allocate the CQ here since TX is called first before RX.
*/
if (softc->enic.cq == NULL)
softc->enic.cq = malloc(sizeof(struct vnic_cq) *
@@ -407,8 +503,6 @@ enic_attach_pre(if_ctx_t ctx)
if (softc->enic.cq == NULL)
return (ENOMEM);
- softc->enic.cq->ntxqsets = softc->enic.wq_count + softc->enic.rq_count;
-
/*
* Allocate the consistent memory for stats and counters upfront so
* both primary and secondary processes can access them.
@@ -416,12 +510,20 @@ enic_attach_pre(if_ctx_t ctx)
err = vnic_dev_alloc_stats_mem(enic->vdev);
if (err) {
dev_err(enic, "Failed to allocate cmd memory, aborting\n");
+ goto err_out_dev_close;
+ }
+
+ err = enic_allocate_msix(softc);
+ if (err) {
+ dev_err(enic, "Failed to allocate MSIX, aborting\n");
+ goto err_out_dev_close;
}
return (rc);
err_out_dev_close:
vnic_dev_close(enic->vdev);
+ vnic_dev_deinit_devcmd2(enic->vdev);
err_out_unregister:
free(softc->vdev.devcmd, M_DEVBUF);
free(softc->enic.intr_queues, M_DEVBUF);
@@ -482,9 +584,10 @@ enic_msix_intr_assign(if_ctx_t ctx, int msix)
snprintf(irq_name, sizeof(irq_name), "etxq%d:%d", i -
scctx->isc_nrxqsets, device_get_unit(softc->dev));
-
- iflib_softirq_alloc_generic(ctx, &enic->intr_queues[i].intr_irq, IFLIB_INTR_TX, &enic->wq[i - scctx->isc_nrxqsets], i - scctx->isc_nrxqsets, irq_name);
-
+ iflib_softirq_alloc_generic(ctx,
+ &enic->intr_queues[i].intr_irq, IFLIB_INTR_TX,
+ &enic->wq[i - scctx->isc_nrxqsets], i - scctx->isc_nrxqsets,
+ irq_name);
enic->intr[i].index = i;
enic->intr[i].vdev = enic->vdev;
@@ -567,6 +670,7 @@ enic_attach_post(if_ctx_t ctx)
enic_setup_sysctl(softc);
enic_init_vnic_resources(enic);
+ enic_set_rx_coal_setting(enic);
enic_setup_finish(enic);
ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
@@ -589,7 +693,9 @@ enic_detach(if_ctx_t ctx)
enic_free_irqs(softc);
ENIC_LOCK(softc);
+ vnic_dev_deinit(enic->vdev);
vnic_dev_close(enic->vdev);
+ vnic_dev_deinit_devcmd2(enic->vdev);
free(softc->vdev.devcmd, M_DEVBUF);
pci_disable_busmaster(softc->dev);
enic_pci_mapping_free(softc);
@@ -626,7 +732,7 @@ enic_tx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs,
for (q = 0; q < ntxqsets; q++) {
struct vnic_wq *wq;
struct vnic_cq *cq;
- unsigned int cq_wq;
+ unsigned int cq_wq;
wq = &softc->enic.wq[q];
cq_wq = enic_cq_wq(&softc->enic, q);
@@ -646,7 +752,6 @@ enic_tx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs,
wq->head_idx = 0;
wq->tail_idx = 0;
- wq->ring.size = wq->ring.desc_count * wq->ring.desc_size;
wq->ring.descs = vaddrs[q * ntxqs + 0];
wq->ring.base_addr = paddrs[q * ntxqs + 0];
@@ -659,7 +764,6 @@ enic_tx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs,
cq->ring.desc_count = softc->scctx->isc_ntxd[q];
cq->ring.desc_avail = cq->ring.desc_count - 1;
- cq->ring.size = cq->ring.desc_count * cq->ring.desc_size;
cq->ring.descs = vaddrs[q * ntxqs + 1];
cq->ring.base_addr = paddrs[q * ntxqs + 1];
@@ -715,7 +819,6 @@ enic_rx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs,
cq->ring.desc_count = softc->scctx->isc_nrxd[1];
cq->ring.desc_avail = cq->ring.desc_count - 1;
- cq->ring.size = cq->ring.desc_count * cq->ring.desc_size;
cq->ring.descs = vaddrs[q * nrxqs + 0];
cq->ring.base_addr = paddrs[q * nrxqs + 0];
@@ -731,7 +834,6 @@ enic_rx_queues_alloc(if_ctx_t ctx, caddr_t * vaddrs, uint64_t * paddrs,
rq->ring.desc_count = softc->scctx->isc_nrxd[0];
rq->ring.desc_avail = rq->ring.desc_count - 1;
- rq->ring.size = rq->ring.desc_count * rq->ring.desc_size;
rq->ring.descs = vaddrs[q * nrxqs + 1];
rq->ring.base_addr = paddrs[q * nrxqs + 1];
rq->need_initial_post = true;
@@ -807,6 +909,11 @@ enic_stop(if_ctx_t ctx)
struct enic *enic;
if_softc_ctx_t scctx;
unsigned int index;
+ struct vnic_wq *wq;
+ struct vnic_rq *rq;
+ struct vnic_cq *cq;
+ unsigned int cq_wq, cq_rq;
+
softc = iflib_get_softc(ctx);
scctx = softc->scctx;
@@ -817,15 +924,36 @@ enic_stop(if_ctx_t ctx)
softc->link_active = 0;
softc->stopped = 1;
+ enic_dev_disable(enic);
+
for (index = 0; index < scctx->isc_ntxqsets; index++) {
enic_stop_wq(enic, index);
vnic_wq_clean(&enic->wq[index]);
vnic_cq_clean(&enic->cq[enic_cq_rq(enic, index)]);
+
+ wq = &softc->enic.wq[index];
+ wq->ring.desc_avail = wq->ring.desc_count - 1;
+ wq->ring.last_count = wq->ring.desc_count;
+ wq->head_idx = 0;
+ wq->tail_idx = 0;
+
+ cq_wq = enic_cq_wq(&softc->enic, index);
+ cq = &softc->enic.cq[cq_wq];
+ cq->ring.desc_avail = cq->ring.desc_count - 1;
}
for (index = 0; index < scctx->isc_nrxqsets; index++) {
+ enic_stop_rq(enic, index);
vnic_rq_clean(&enic->rq[index]);
vnic_cq_clean(&enic->cq[enic_cq_wq(enic, index)]);
+
+ rq = &softc->enic.rq[index];
+ cq_rq = enic_cq_rq(&softc->enic, index);
+ cq = &softc->enic.cq[cq_rq];
+
+ cq->ring.desc_avail = cq->ring.desc_count - 1;
+ rq->ring.desc_avail = rq->ring.desc_count - 1;
+ rq->need_initial_post = true;
}
for (index = 0; index < scctx->isc_vectors; index++) {
@@ -845,6 +973,9 @@ enic_init(if_ctx_t ctx)
scctx = softc->scctx;
enic = &softc->enic;
+
+ enic_init_vnic_resources(enic);
+
for (index = 0; index < scctx->isc_ntxqsets; index++)
enic_prep_wq_for_simple_tx(&softc->enic, index);
@@ -862,6 +993,8 @@ enic_init(if_ctx_t ctx)
vnic_dev_enable_wait(enic->vdev);
ENIC_UNLOCK(softc);
+ softc->stopped = 0;
+
enic_link_status(softc);
}
@@ -942,12 +1075,14 @@ enic_mtu_set(if_ctx_t ctx, uint32_t mtu)
softc = iflib_get_softc(ctx);
enic = &softc->enic;
+ enic_stop(softc->ctx);
if (mtu > enic->port_mtu){
return (EINVAL);
}
enic->config.mtu = mtu;
scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ enic_init(softc->ctx);
return (0);
}
@@ -1026,7 +1161,6 @@ static void
enic_update_admin_status(if_ctx_t ctx)
{
struct enic_softc *softc;
-
softc = iflib_get_softc(ctx);
enic_link_status(softc);
@@ -1100,7 +1234,9 @@ enic_setup_txq_sysctl(struct vnic_wq *wq, int i, struct sysctl_ctx_list *ctx,
{
struct sysctl_oid *txsnode;
struct sysctl_oid_list *txslist;
- struct vnic_stats *stats = wq[i].vdev->stats;
+ struct vnic_stats *stats;
+
+ stats = wq[i].vdev->stats;
txsnode = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hstats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Host Statistics");
@@ -1136,7 +1272,9 @@ enic_setup_rxq_sysctl(struct vnic_rq *rq, int i, struct sysctl_ctx_list *ctx,
{
struct sysctl_oid *rxsnode;
struct sysctl_oid_list *rxslist;
- struct vnic_stats *stats = rq[i].vdev->stats;
+ struct vnic_stats *stats;
+
+ stats = rq[i].vdev->stats;
rxsnode = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "hstats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Host Statistics");
@@ -1357,7 +1495,7 @@ enic_dev_init(struct enic *enic)
if (vnic_dev_overlay_offload_cfg(enic->vdev,
OVERLAY_CFG_VXLAN_PORT_UPDATE, ENIC_DEFAULT_VXLAN_PORT)) {
dev_err(enic, "failed to update vxlan port\n");
- return -EINVAL;
+ return (EINVAL);
}
}
return 0;
@@ -1441,7 +1579,7 @@ enic_dev_wait(struct vnic_dev *vdev, int (*start) (struct vnic_dev *, int),
return 0;
usleep(1000);
}
- return -ETIMEDOUT;
+ return (ETIMEDOUT);
}
static int
@@ -1452,7 +1590,7 @@ enic_map_bar(struct enic_softc *softc, struct enic_bar_info *bar, int bar_num,
if (bar->res != NULL) {
device_printf(softc->dev, "Bar %d already mapped\n", bar_num);
- return EDOOFUS;
+ return (EDOOFUS);
}
bar->rid = PCIR_BAR(bar_num);
@@ -1481,20 +1619,18 @@ enic_init_vnic_resources(struct enic *enic)
unsigned int rxq_interrupt_enable = 0;
unsigned int rxq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
unsigned int txq_interrupt_enable = 0;
- unsigned int txq_interrupt_offset = ENICPMD_RXQ_INTR_OFFSET;
+ unsigned int txq_interrupt_offset;
unsigned int index = 0;
unsigned int cq_idx;
if_softc_ctx_t scctx;
scctx = enic->softc->scctx;
-
rxq_interrupt_enable = 1;
- txq_interrupt_enable = 1;
+ txq_interrupt_enable = 0;
rxq_interrupt_offset = 0;
- txq_interrupt_offset = enic->intr_count - 2;
- txq_interrupt_offset = 1;
+ txq_interrupt_offset = scctx->isc_nrxqsets;
for (index = 0; index < enic->intr_count; index++) {
vnic_intr_alloc(enic->vdev, &enic->intr[index], index);
@@ -1568,7 +1704,7 @@ enic_update_packet_filter(struct enic *enic)
}
static bool
-enic_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
+enic_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event)
{
switch (event) {
case IFLIB_RESTART_VLAN_CONFIG:
diff --git a/sys/dev/enic/vnic_cq.c b/sys/dev/enic/vnic_cq.c
index 72de29e5a381..bd3629530a61 100644
--- a/sys/dev/enic/vnic_cq.c
+++ b/sys/dev/enic/vnic_cq.c
@@ -40,6 +40,4 @@ void vnic_cq_clean(struct vnic_cq *cq)
ENIC_BUS_WRITE_4(cq->ctrl, CQ_HEAD, 0);
ENIC_BUS_WRITE_4(cq->ctrl, CQ_TAIL, 0);
ENIC_BUS_WRITE_4(cq->ctrl, CQ_TAIL_COLOR, 1);
-
- vnic_dev_clear_desc_ring(&cq->ring);
}
diff --git a/sys/dev/enic/vnic_cq.h b/sys/dev/enic/vnic_cq.h
index 26f9009612c5..7f875d57ed74 100644
--- a/sys/dev/enic/vnic_cq.h
+++ b/sys/dev/enic/vnic_cq.h
@@ -63,27 +63,22 @@ struct vnic_cq {
unsigned int to_clean;
unsigned int last_color;
unsigned int interrupt_offset;
+ unsigned int cur_rx_coal_timeval;
+ unsigned int tobe_rx_coal_timeval;
#ifdef ENIC_AIC
struct vnic_rx_bytes_counter pkt_size_counter;
unsigned int cur_rx_coal_timeval;
unsigned int tobe_rx_coal_timeval;
ktime_t prev_ts;
#endif
- int ntxqsets;
- int nrxqsets;
- int ntxqsets_start;
- int nrxqsets_start;
};
-void vnic_cq_free(struct vnic_cq *cq);
void vnic_cq_init(struct vnic_cq *cq, unsigned int flow_control_enable,
unsigned int color_enable, unsigned int cq_head, unsigned int cq_tail,
unsigned int cq_tail_color, unsigned int interrupt_enable,
unsigned int cq_entry_enable, unsigned int message_enable,
unsigned int interrupt_offset, u64 message_addr);
void vnic_cq_clean(struct vnic_cq *cq);
-int vnic_cq_mem_size(struct vnic_cq *cq, unsigned int desc_count,
- unsigned int desc_size);
static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
unsigned int work_to_do,
diff --git a/sys/dev/enic/vnic_dev.c b/sys/dev/enic/vnic_dev.c
index 3425d7372e56..a8228aed69aa 100644
--- a/sys/dev/enic/vnic_dev.c
+++ b/sys/dev/enic/vnic_dev.c
@@ -44,7 +44,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
u8 type;
if (num_bars == 0)
- return -EINVAL;
+ return (EINVAL);
rh = malloc(sizeof(*rh), M_DEVBUF, M_NOWAIT | M_ZERO);
mrh = malloc(sizeof(*mrh), M_DEVBUF, M_NOWAIT | M_ZERO);
@@ -52,7 +52,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
pr_err("vNIC BAR0 res hdr not mem-mapped\n");
free(rh, M_DEVBUF);
free(mrh, M_DEVBUF);
- return -EINVAL;
+ return (EINVAL);
}
/* Check for mgmt vnic in addition to normal vnic */
@@ -69,7 +69,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
rh->magic, rh->version);
free(rh, M_DEVBUF);
free(mrh, M_DEVBUF);
- return -EINVAL;
+ return (EINVAL);
}
}
@@ -97,6 +97,7 @@ static int vnic_dev_discover_res(struct vnic_dev *vdev,
case RES_TYPE_INTR_CTRL:
case RES_TYPE_INTR_PBA_LEGACY:
case RES_TYPE_DEVCMD:
+ case RES_TYPE_DEVCMD2:
break;
default:
ENIC_BUS_READ_REGION_4(softc, mem, r_offset, (void *)r, sizeof(*r) / 4);
@@ -167,17 +168,12 @@ unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
ring->desc_size = VNIC_ALIGN(desc_size, desc_align);
- ring->size = ring->desc_count * ring->desc_size;
- ring->size_unaligned = ring->size + ring->base_align;
+ ring->size_unaligned = ring->desc_count * ring->desc_size \
+ + ring->base_align;
return ring->size_unaligned;
}
-void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring)
-{
- memset(ring->descs, 0, ring->size);
-}
-
static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
int wait)
{
@@ -189,12 +185,12 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS);
if (status == 0xFFFFFFFF) {
/* PCI-e target device is gone */
- return -ENODEV;
+ return (ENODEV);
}
if (status & STAT_BUSY) {
pr_err("Busy devcmd %d\n", _CMD_N(cmd));
- return -EBUSY;
+ return (EBUSY);
}
if (_CMD_DIR(cmd) & _CMD_DIR_WRITE) {
@@ -214,7 +210,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
status = ENIC_BUS_READ_4(devcmd, DEVCMD_STATUS);
if (status == 0xFFFFFFFF) {
/* PCI-e target device is gone */
- return -ENODEV;
+ return (ENODEV);
}
if (!(status & STAT_BUSY)) {
@@ -225,7 +221,7 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
pr_err("Devcmd %d failed " \
"with error code %d\n",
_CMD_N(cmd), err);
- return err;
+ return (err);
}
if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
@@ -237,7 +233,82 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
}
pr_err("Timedout devcmd %d\n", _CMD_N(cmd));
- return -ETIMEDOUT;
+ return (ETIMEDOUT);
+}
+
+static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
+ int wait)
+{
+ struct devcmd2_controller *dc2c = vdev->devcmd2;
+ struct devcmd2_result *result;
+ u8 color;
+ unsigned int i;
+ u32 fetch_index, new_posted;
+ int delay, err;
+ u32 posted = dc2c->posted;
+
+ fetch_index = ENIC_BUS_READ_4(dc2c->wq_ctrl, TX_FETCH_INDEX);
+ if (fetch_index == 0xFFFFFFFF)
+ return (ENODEV);
+
+ new_posted = (posted + 1) % DEVCMD2_RING_SIZE;
+
+ if (new_posted == fetch_index) {
+ device_printf(dev_from_vnic_dev(vdev),
+ "devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n",
+ _CMD_N(cmd), fetch_index, posted);
+ return (EBUSY);
+ }
+
+ dc2c->cmd_ring[posted].cmd = cmd;
+ dc2c->cmd_ring[posted].flags = 0;
+
+ if ((_CMD_FLAGS(cmd) & _CMD_FLAGS_NOWAIT))
+ dc2c->cmd_ring[posted].flags |= DEVCMD2_FNORESULT;
+ if (_CMD_DIR(cmd) & _CMD_DIR_WRITE)
+ for (i = 0; i < VNIC_DEVCMD_NARGS; i++)
+ dc2c->cmd_ring[posted].args[i] = vdev->args[i];
+
+ ENIC_BUS_WRITE_4(dc2c->wq_ctrl, TX_POSTED_INDEX, new_posted);
+ dc2c->posted = new_posted;
+
+ if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
+ return (0);
+
+ result = dc2c->result + dc2c->next_result;
+ color = dc2c->color;
+
+ dc2c->next_result++;
+ if (dc2c->next_result == dc2c->result_size) {
+ dc2c->next_result = 0;
+ dc2c->color = dc2c->color ? 0 : 1;
+ }
+
+ for (delay = 0; delay < wait; delay++) {
+ if (result->color == color) {
+ if (result->error) {
+ err = result->error;
+ if (err != ERR_ECMDUNKNOWN ||
+ cmd != CMD_CAPABILITY)
+ device_printf(dev_from_vnic_dev(vdev),
+ "Error %d devcmd %d\n", err,
+ _CMD_N(cmd));
+ return (err);
+ }
+ if (_CMD_DIR(cmd) & _CMD_DIR_READ)
+ for (i = 0; i < VNIC_DEVCMD2_NARGS; i++)
+ vdev->args[i] = result->results[i];
+
+ return 0;
+ }
+ udelay(100);
+ }
+
+ device_printf(dev_from_vnic_dev(vdev),
+ "devcmd %d timed out\n", _CMD_N(cmd));
+
+
+ return (ETIMEDOUT);
}
static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
@@ -253,7 +324,7 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
*/
if (nargs > VNIC_DEVCMD_NARGS - 2) {
pr_err("number of args %d exceeds the maximum\n", nargs);
- return -EINVAL;
+ return (EINVAL);
}
memset(vdev->args, 0, sizeof(vdev->args));
@@ -261,9 +332,9 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
vdev->args[1] = cmd;
memcpy(&vdev->args[2], args, nargs * sizeof(args[0]));
- err = _vnic_dev_cmd(vdev, proxy_cmd, wait);
+ err = vdev->devcmd_rtn(vdev, proxy_cmd, wait);
if (err)
- return err;
+ return (err);
status = (u32)vdev->args[0];
if (status & STAT_ERROR) {
@@ -271,7 +342,7 @@ static int vnic_dev_cmd_proxy(struct vnic_dev *vdev,
if (err != ERR_ECMDUNKNOWN ||
cmd != CMD_CAPABILITY)
pr_err("Error %d proxy devcmd %d\n", err, _CMD_N(cmd));
- return err;
+ return (err);
}
memcpy(args, &vdev->args[1], nargs * sizeof(args[0]));
@@ -286,16 +357,16 @@ static int vnic_dev_cmd_no_proxy(struct vnic_dev *vdev,
if (nargs > VNIC_DEVCMD_NARGS) {
pr_err("number of args %d exceeds the maximum\n", nargs);
- return -EINVAL;
+ return (EINVAL);
}
memset(vdev->args, 0, sizeof(vdev->args));
memcpy(vdev->args, args, nargs * sizeof(args[0]));
- err = _vnic_dev_cmd(vdev, cmd, wait);
+ err = vdev->devcmd_rtn(vdev, cmd, wait);
memcpy(args, vdev->args, nargs * sizeof(args[0]));
- return err;
+ return (err);
}
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
@@ -328,7 +399,7 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
*a1 = args[1];
}
- return err;
+ return (err);
}
int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
@@ -400,7 +471,7 @@ int vnic_dev_capable_filter_mode(struct vnic_dev *vdev, u32 *mode,
args[1] = 0;
err = vnic_dev_cmd_args(vdev, CMD_CAPABILITY, args, 2, 1000);
if (err)
- return err;
+ return (err);
max_level = args[1];
goto parse_max_level;
} else if (args[2] == FILTER_CAP_MODE_V1) {
@@ -479,7 +550,7 @@ int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
break;
}
- return err;
+ return (err);
}
int vnic_dev_stats_clear(struct vnic_dev *vdev)
@@ -497,7 +568,7 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats)
int rc;
if (!vdev->stats)
- return -ENOMEM;
+ return (ENOMEM);
*stats = vdev->stats;
a0 = vdev->stats_res.idi_paddr;
@@ -524,10 +595,10 @@ int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period,
int err;
if (num_counters > VNIC_MAX_FLOW_COUNTERS)
- return -ENOMEM;
+ return (ENOMEM);
if (period > 0 && (period < VNIC_COUNTER_DMA_MIN_PERIOD ||
num_counters == 0))
- return -EINVAL;
+ return (EINVAL);
args[0] = num_counters;
args[1] = vdev->flow_counters_res.idi_paddr;
@@ -545,7 +616,7 @@ int vnic_dev_counter_dma_cfg(struct vnic_dev *vdev, u32 period,
vdev->flow_counters_dma_active = (num_counters != 0 &&
period != 0);
- return err;
+ return (err);
}
int vnic_dev_close(struct vnic_dev *vdev)
@@ -593,7 +664,7 @@ int vnic_dev_open_done(struct vnic_dev *vdev, int *done)
err = vnic_dev_cmd(vdev, CMD_OPEN_STATUS, &a0, &a1, wait);
if (err)
- return err;
+ return (err);
*done = (a0 == 0);
@@ -611,7 +682,7 @@ int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
err = vnic_dev_cmd(vdev, CMD_GET_MAC_ADDR, &a0, &a1, wait);
if (err)
- return err;
+ return (err);
for (i = 0; i < ETH_ALEN; i++)
mac_addr[i] = ((u8 *)&a0)[i];
@@ -636,7 +707,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
if (err)
pr_err("Can't set packet filter\n");
- return err;
+ return (err);
}
int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
@@ -655,7 +726,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
err);
- return err;
+ return (err);
}
int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
@@ -674,7 +745,7 @@ int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
addr[0], addr[1], addr[2], addr[3], addr[4], addr[5],
err);
- return err;
+ return (err);
}
int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
@@ -771,7 +842,7 @@ int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
vdev->notify_sz = 0;
}
- return err;
+ return (err);
}
int vnic_dev_notify_unset(struct vnic_dev *vdev)
@@ -807,7 +878,8 @@ static int vnic_dev_notify_ready(struct vnic_dev *vdev)
csum += words[i];
} while (csum != words[0]);
- return 1;
+
+ return (1);
}
int vnic_dev_init(struct vnic_dev *vdev, int arg)
@@ -923,7 +995,7 @@ int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev)
iflib_dma_alloc(softc->ctx, sizeof(struct vnic_counter_counts) * VNIC_MAX_FLOW_COUNTERS, &vdev->flow_counters_res, 0);
vdev->flow_counters = (struct vnic_counter_counts *)vdev->flow_counters_res.idi_vaddr;
vdev->flow_counters_dma_active = 0;
- return vdev->flow_counters == NULL ? -ENOMEM : 0;
+ return (vdev->flow_counters == NULL ? ENOMEM : 0);
}
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
@@ -942,6 +1014,85 @@ err_out:
return NULL;
}
+static int vnic_dev_init_devcmd1(struct vnic_dev *vdev)
+{
+ vdev->devcmd = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD, 0);
+ if (!vdev->devcmd)
+ return (ENODEV);
+ vdev->devcmd_rtn = _vnic_dev_cmd;
+
+ return 0;
+}
+
+static int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
+{
+ int err;
+ unsigned int fetch_index;
+
+
+ err = 0;
+
+ if (vdev->devcmd2)
+ return (0);
+
+ vdev->devcmd2 = malloc(sizeof(*vdev->devcmd2), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+
+ if (!vdev->devcmd2) {
+ return (ENOMEM);
+ }
+
+ vdev->devcmd2->color = 1;
+ vdev->devcmd2->result_size = DEVCMD2_RING_SIZE;
+
+ err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE,
+ DEVCMD2_DESC_SIZE);
+
+ if (err) {
+ goto err_free_devcmd2;
+ }
+ vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl;
+ vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs;
+
+ fetch_index = ENIC_BUS_READ_4(vdev->devcmd2->wq.ctrl, TX_FETCH_INDEX);
+ if (fetch_index == 0xFFFFFFFF)
+ return (ENODEV);
+
+ enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0,
+ 0);
+ vdev->devcmd2->posted = fetch_index;
+ vnic_wq_enable(&vdev->devcmd2->wq);
+
+ err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
+ DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
+ if (err)
+ goto err_free_devcmd2;
+
+ vdev->devcmd2->result = vdev->devcmd2->results_ring.descs;
+ vdev->args[0] = (u64)vdev->devcmd2->results_ring.base_addr |
+ VNIC_PADDR_TARGET;
+ vdev->args[1] = DEVCMD2_RING_SIZE;
+
+ err = _vnic_dev_cmd2(vdev, CMD_INITIALIZE_DEVCMD2, 1000);
+ if (err)
+ goto err_free_devcmd2;
+
+ vdev->devcmd_rtn = _vnic_dev_cmd2;
+
+ return (err);
+
+err_free_devcmd2:
+ err = ENOMEM;
+ if (vdev->devcmd2->wq_ctrl)
+ vnic_wq_free(&vdev->devcmd2->wq);
+ if (vdev->devcmd2->result)
+ vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+ free(vdev->devcmd2, M_DEVBUF);
+ vdev->devcmd2 = NULL;
+
+ return (err);
+}
+
/*
* vnic_dev_classifier: Add/Delete classifier entries
* @vdev: vdev of the device
@@ -1037,3 +1188,22 @@ bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx,
device_t dev_from_vnic_dev(struct vnic_dev *vdev) {
return (vdev->softc->dev);
}
+
+int vnic_dev_cmd_init(struct vnic_dev *vdev) {
+ int err;
+ void __iomem *res;
+
+ res = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+ if (res) {
+ err = vnic_dev_init_devcmd2(vdev);
+ if (err)
+ device_printf(dev_from_vnic_dev(vdev),
+ "DEVCMD2 init failed, Using DEVCMD1\n");
+ else
+ return 0;
+ }
+
+ err = vnic_dev_init_devcmd1(vdev);
+
+ return (err);
+}
diff --git a/sys/dev/enic/vnic_dev.h b/sys/dev/enic/vnic_dev.h
index f8ca29f4e175..66583f4d278d 100644
--- a/sys/dev/enic/vnic_dev.h
+++ b/sys/dev/enic/vnic_dev.h
@@ -38,6 +38,7 @@ struct vnic_dev_ring {
unsigned int desc_count;
unsigned int desc_avail;
unsigned int last_count;
+ iflib_dma_info_t ifdip;
};
struct vnic_dev_iomap_info {
@@ -68,7 +69,10 @@ unsigned long vnic_dev_get_res_type_len(struct vnic_dev *vdev,
enum vnic_res_type type);
unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size);
-void vnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
+ unsigned int desc_count, unsigned int desc_size);
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev,
+ struct vnic_dev_ring *ring);
int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
u64 *a0, u64 *a1, int wait);
int vnic_dev_cmd_args(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
@@ -143,7 +147,7 @@ struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
struct rte_pci_device *vnic_dev_get_pdev(struct vnic_dev *vdev);
int vnic_dev_alloc_stats_mem(struct vnic_dev *vdev);
int vnic_dev_alloc_counter_mem(struct vnic_dev *vdev);
-int vnic_dev_cmd_init(struct vnic_dev *vdev, int fallback);
+int vnic_dev_cmd_init(struct vnic_dev *vdev);
int vnic_dev_get_size(void);
int vnic_dev_int13(struct vnic_dev *vdev, u64 arg, u32 op);
int vnic_dev_perbi(struct vnic_dev *vdev, u64 arg, u32 op);
@@ -164,6 +168,7 @@ bool vnic_dev_counter_alloc(struct vnic_dev *vdev, uint32_t *idx);
bool vnic_dev_counter_free(struct vnic_dev *vdev, uint32_t idx);
bool vnic_dev_counter_query(struct vnic_dev *vdev, uint32_t idx,
bool reset, uint64_t *packets, uint64_t *bytes);
+void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev);
device_t dev_from_vnic_dev(struct vnic_dev *vdev);
diff --git a/sys/dev/enic/vnic_intr.c b/sys/dev/enic/vnic_intr.c
index 38e2ea6e066b..8a6494efd5f3 100644
--- a/sys/dev/enic/vnic_intr.c
+++ b/sys/dev/enic/vnic_intr.c
@@ -21,7 +21,7 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
intr->ctrl = vnic_dev_get_res(vdev, RES_TYPE_INTR_CTRL, index);
if (!intr->ctrl) {
pr_err("Failed to hook INTR[%d].ctrl resource\n", index);
- return -EINVAL;
+ return (EINVAL);
}
return 0;
diff --git a/sys/dev/enic/vnic_intr.h b/sys/dev/enic/vnic_intr.h
index 22db66096aae..6d1e8e1cf050 100644
--- a/sys/dev/enic/vnic_intr.h
+++ b/sys/dev/enic/vnic_intr.h
@@ -76,7 +76,7 @@ static inline void vnic_intr_return_credits(struct vnic_intr *intr,
static inline unsigned int vnic_intr_credits(struct vnic_intr *intr)
{
- return ENIC_BUS_READ_4(intr->ctrl, INTR_CREDITS);
+ return (ENIC_BUS_READ_4(intr->ctrl, INTR_CREDITS));
}
static inline void vnic_intr_return_all_credits(struct vnic_intr *intr)
diff --git a/sys/dev/enic/vnic_resource.h b/sys/dev/enic/vnic_resource.h
index 184bfa7401df..d365b8d914ba 100644
--- a/sys/dev/enic/vnic_resource.h
+++ b/sys/dev/enic/vnic_resource.h
@@ -39,6 +39,7 @@ enum vnic_res_type {
RES_TYPE_MQ_RQ, /* MQ Receive queues */
RES_TYPE_MQ_CQ, /* MQ Completion queues */
RES_TYPE_DEPRECATED1, /* Old version of devcmd 2 */
+ RES_TYPE_DEPRECATED2, /* Old version of devcmd 2 */
RES_TYPE_DEVCMD2, /* Device control region */
RES_TYPE_MAX, /* Count of resource types */
};
diff --git a/sys/dev/enic/vnic_rq.c b/sys/dev/enic/vnic_rq.c
index 3720da5f9aa6..4c02347579b1 100644
--- a/sys/dev/enic/vnic_rq.c
+++ b/sys/dev/enic/vnic_rq.c
@@ -40,6 +40,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
fetch_index = 0;
}
+ fetch_index = 0;
vnic_rq_init_start(rq, cq_index,
fetch_index, fetch_index,
error_interrupt_enable,
@@ -50,7 +51,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int vnic_rq_error_status(struct vnic_rq *rq)
{
- return ENIC_BUS_READ_4(rq->ctrl, RX_ERROR_STATUS);
+ return (ENIC_BUS_READ_4(rq->ctrl, RX_ERROR_STATUS));
}
void vnic_rq_enable(struct vnic_rq *rq)
@@ -73,7 +74,7 @@ int vnic_rq_disable(struct vnic_rq *rq)
pr_err("Failed to disable RQ[%d]\n", rq->index);
- return -ETIMEDOUT;
+ return (ETIMEDOUT);
}
void vnic_rq_clean(struct vnic_rq *rq)
@@ -92,6 +93,4 @@ void vnic_rq_clean(struct vnic_rq *rq)
}
ENIC_BUS_WRITE_4(rq->ctrl, RX_POSTED_INDEX, fetch_index);
-
- vnic_dev_clear_desc_ring(&rq->ring);
}
diff --git a/sys/dev/enic/vnic_rq.h b/sys/dev/enic/vnic_rq.h
index ae8c1fdc39bd..9e3d239809c4 100644
--- a/sys/dev/enic/vnic_rq.h
+++ b/sys/dev/enic/vnic_rq.h
@@ -133,7 +133,6 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
-void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error);
unsigned int vnic_rq_error_status(struct vnic_rq *rq);
void vnic_rq_enable(struct vnic_rq *rq);
int vnic_rq_disable(struct vnic_rq *rq);
diff --git a/sys/dev/enic/vnic_rss.h b/sys/dev/enic/vnic_rss.h
index abd7b9f131aa..039041ece5b2 100644
--- a/sys/dev/enic/vnic_rss.h
+++ b/sys/dev/enic/vnic_rss.h
@@ -24,9 +24,4 @@ union vnic_rss_cpu {
u64 raw[32];
};
-void vnic_set_rss_key(union vnic_rss_key *rss_key, u8 *key);
-void vnic_set_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
-void vnic_get_rss_key(union vnic_rss_key *rss_key, u8 *key);
-void vnic_get_rss_cpu(union vnic_rss_cpu *rss_cpu, u8 *cpu);
-
#endif /* _VNIC_RSS_H_ */
diff --git a/sys/dev/enic/vnic_wq.c b/sys/dev/enic/vnic_wq.c
index b032df3392b2..1d3120798798 100644
--- a/sys/dev/enic/vnic_wq.c
+++ b/sys/dev/enic/vnic_wq.c
@@ -7,7 +7,103 @@
#include "vnic_dev.h"
#include "vnic_wq.h"
-void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev,
+ struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size)
+{
+ iflib_dma_info_t ifdip;
+ int err;
+
+ if ((ifdip = malloc(sizeof(struct iflib_dma_info),
+ M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) {
+ device_printf(dev_from_vnic_dev(vdev),
+ "Unable to allocate DMA info memory\n");
+ return (ENOMEM);
+ }
+
+ err = iflib_dma_alloc(vdev->softc->ctx, desc_count * desc_size,
+ ifdip, 0);
+ if (err) {
+ device_printf(dev_from_vnic_dev(vdev),
+ "Unable to allocate DEVCMD2 descriptors\n");
+ err = ENOMEM;
+ goto err_out_alloc;
+ }
+
+ ring->base_addr = ifdip->idi_paddr;
+ ring->descs = ifdip->idi_vaddr;
+ ring->ifdip = ifdip;
+ ring->desc_size = desc_size;
+ ring->desc_count = desc_count;
+ ring->last_count = 0;
+ ring->desc_avail = ring->desc_count - 1;
+
+ ring->base_align = 512;
+ ring->size_unaligned = ring->desc_count * ring->desc_size \
+ + ring->base_align;
+
+ return (err);
+
+ iflib_dma_free(ifdip);
+
+err_out_alloc:
+ free(ifdip, M_DEVBUF);
+ return (err);
+}
+
+void vnic_dev_free_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring)
+{
+ if (ring && ring->descs) {
+ iflib_dma_free(ring->ifdip);
+ free(ring->ifdip, M_DEVBUF);
+ ring->descs = NULL;
+ }
+}
+
+void vnic_wq_free(struct vnic_wq *wq) {
+ vnic_dev_free_desc_ring(wq->vdev, &wq->ring);
+ wq->ctrl = NULL;
+}
+
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size)
+{
+ int err;
+
+ wq->index = 0;
+ wq->vdev = vdev;
+
+
+ wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0);
+ if (!wq->ctrl)
+ return (EINVAL);
+ vnic_wq_disable(wq);
+ err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size);
+
+ return (err);
+}
+
+void vnic_dev_deinit_devcmd2(struct vnic_dev *vdev)
+{
+ if (vdev->devcmd2) {
+ vnic_wq_disable(&vdev->devcmd2->wq);
+ if (vdev->devcmd2->wq_ctrl)
+ vnic_wq_free(&vdev->devcmd2->wq);
+ if (vdev->devcmd2->result)
+ vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
+ free(vdev->devcmd2, M_DEVBUF);
+ vdev->devcmd2 = NULL;
+ }
+}
+
+int vnic_dev_deinit(struct vnic_dev *vdev) {
+ u64 a0 = 0, a1 = 0;
+ int wait = 1000;
+
+ return (vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait));
+ return (0);
+}
+
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
unsigned int fetch_index, unsigned int posted_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
@@ -33,7 +129,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
- vnic_wq_init_start(wq, cq_index, 0, 0,
+ enic_wq_init_start(wq, cq_index, 0, 0,
error_interrupt_enable,
error_interrupt_offset);
wq->cq_pend = 0;
@@ -42,7 +138,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
unsigned int vnic_wq_error_status(struct vnic_wq *wq)
{
- return ENIC_BUS_READ_4(wq->ctrl, TX_ERROR_STATUS);
+ return (ENIC_BUS_READ_4(wq->ctrl, TX_ERROR_STATUS));
}
void vnic_wq_enable(struct vnic_wq *wq)
@@ -65,7 +161,7 @@ int vnic_wq_disable(struct vnic_wq *wq)
pr_err("Failed to disable WQ[%d]\n", wq->index);
- return -ETIMEDOUT;
+ return (ETIMEDOUT);
}
void vnic_wq_clean(struct vnic_wq *wq)
@@ -84,6 +180,4 @@ void vnic_wq_clean(struct vnic_wq *wq)
ENIC_BUS_WRITE_4(wq->ctrl, TX_FETCH_INDEX, 0);
ENIC_BUS_WRITE_4(wq->ctrl, TX_POSTED_INDEX, 0);
ENIC_BUS_WRITE_4(wq->ctrl, TX_ERROR_STATUS, 0);
-
- vnic_dev_clear_desc_ring(&wq->ring);
}
diff --git a/sys/dev/enic/vnic_wq.h b/sys/dev/enic/vnic_wq.h
index c4f551de8441..9ef492adba24 100644
--- a/sys/dev/enic/vnic_wq.h
+++ b/sys/dev/enic/vnic_wq.h
@@ -61,6 +61,20 @@ struct vnic_wq {
uint64_t offloads;
};
+struct devcmd2_controller {
+ struct vnic_res *wq_ctrl;
+ struct vnic_devcmd2 *cmd_ring;
+ struct devcmd2_result *result;
+ u16 next_result;
+ u16 result_size;
+ int color;
+ struct vnic_dev_ring results_ring;
+ struct vnic_res *results_ctrl;
+ struct vnic_wq wq;
+ u32 posted;
+};
+
+
static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
{
/* how many does SW own? */
@@ -92,7 +106,7 @@ buf_idx_incr(uint32_t n_descriptors, uint32_t idx)
}
void vnic_wq_free(struct vnic_wq *wq);
-void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
+void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
unsigned int fetch_index, unsigned int posted_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset);
@@ -104,5 +118,7 @@ unsigned int vnic_wq_error_status(struct vnic_wq *wq);
void vnic_wq_enable(struct vnic_wq *wq);
int vnic_wq_disable(struct vnic_wq *wq);
void vnic_wq_clean(struct vnic_wq *wq);
+int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq,
+ unsigned int desc_count, unsigned int desc_size);
#endif /* _VNIC_WQ_H_ */
diff --git a/sys/dev/eqos/if_eqos.c b/sys/dev/eqos/if_eqos.c
index 566f6ffd0a42..e07d6b48f723 100644
--- a/sys/dev/eqos/if_eqos.c
+++ b/sys/dev/eqos/if_eqos.c
@@ -117,7 +117,8 @@ eqos_miibus_readreg(device_t dev, int phy, int reg)
addr = sc->csr_clock_range |
(phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) |
(reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) |
- GMAC_MAC_MDIO_ADDRESS_GOC_READ | GMAC_MAC_MDIO_ADDRESS_GB;
+ GMAC_MAC_MDIO_ADDRESS_GOC_READ |
+ GMAC_MAC_MDIO_ADDRESS_GB;
WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr);
DELAY(100);
@@ -150,7 +151,8 @@ eqos_miibus_writereg(device_t dev, int phy, int reg, int val)
addr = sc->csr_clock_range |
(phy << GMAC_MAC_MDIO_ADDRESS_PA_SHIFT) |
(reg << GMAC_MAC_MDIO_ADDRESS_RDA_SHIFT) |
- GMAC_MAC_MDIO_ADDRESS_GOC_WRITE | GMAC_MAC_MDIO_ADDRESS_GB;
+ GMAC_MAC_MDIO_ADDRESS_GOC_WRITE |
+ GMAC_MAC_MDIO_ADDRESS_GB;
WR4(sc, GMAC_MAC_MDIO_ADDRESS, addr);
DELAY(100);
@@ -195,7 +197,7 @@ eqos_miibus_statchg(device_t dev)
reg |= GMAC_MAC_CONFIGURATION_FES;
break;
case IFM_1000_T:
- case IFM_1000_SX:
+ case IFM_1000_SX:
reg &= ~GMAC_MAC_CONFIGURATION_PS;
reg &= ~GMAC_MAC_CONFIGURATION_FES;
break;
@@ -241,7 +243,7 @@ eqos_media_change(if_t ifp)
int error;
EQOS_LOCK(sc);
- error = mii_mediachg(device_get_softc(sc->miibus));
+ error = mii_mediachg(device_get_softc(sc->miibus));
EQOS_UNLOCK(sc);
return (error);
}
@@ -329,8 +331,8 @@ eqos_setup_rxdesc(struct eqos_softc *sc, int index, bus_addr_t paddr)
sc->rx.desc_ring[index].des1 = htole32((uint32_t)(paddr >> 32));
sc->rx.desc_ring[index].des2 = htole32(0);
bus_dmamap_sync(sc->rx.desc_tag, sc->rx.desc_map, BUS_DMASYNC_PREWRITE);
- sc->rx.desc_ring[index].des3 = htole32(EQOS_RDES3_OWN | EQOS_RDES3_IOC |
- EQOS_RDES3_BUF1V);
+ sc->rx.desc_ring[index].des3 =
+ htole32(EQOS_RDES3_OWN | EQOS_RDES3_IOC | EQOS_RDES3_BUF1V);
}
static int
@@ -370,8 +372,10 @@ eqos_enable_intr(struct eqos_softc *sc)
{
WR4(sc, GMAC_DMA_CHAN0_INTR_ENABLE,
- GMAC_DMA_CHAN0_INTR_ENABLE_NIE | GMAC_DMA_CHAN0_INTR_ENABLE_AIE |
- GMAC_DMA_CHAN0_INTR_ENABLE_FBE | GMAC_DMA_CHAN0_INTR_ENABLE_RIE |
+ GMAC_DMA_CHAN0_INTR_ENABLE_NIE |
+ GMAC_DMA_CHAN0_INTR_ENABLE_AIE |
+ GMAC_DMA_CHAN0_INTR_ENABLE_FBE |
+ GMAC_DMA_CHAN0_INTR_ENABLE_RIE |
GMAC_DMA_CHAN0_INTR_ENABLE_TIE);
}
@@ -437,13 +441,12 @@ eqos_setup_rxfilter(struct eqos_softc *sc)
eaddr = if_getlladdr(ifp);
val = eaddr[4] | (eaddr[5] << 8);
WR4(sc, GMAC_MAC_ADDRESS0_HIGH, val);
- val = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) |
- (eaddr[3] << 24);
+ val = eaddr[0] | (eaddr[1] << 8) | (eaddr[2] << 16) | (eaddr[3] << 24);
WR4(sc, GMAC_MAC_ADDRESS0_LOW, val);
/* Multicast hash filters */
- WR4(sc, GMAC_MAC_HASH_TABLE_REG0, hash[1]);
- WR4(sc, GMAC_MAC_HASH_TABLE_REG1, hash[0]);
+ WR4(sc, GMAC_MAC_HASH_TABLE_REG0, hash[0]);
+ WR4(sc, GMAC_MAC_HASH_TABLE_REG1, hash[1]);
/* Packet filter config */
WR4(sc, GMAC_MAC_PACKET_FILTER, pfil);
@@ -456,7 +459,7 @@ eqos_reset(struct eqos_softc *sc)
int retry;
WR4(sc, GMAC_DMA_MODE, GMAC_DMA_MODE_SWR);
- for (retry = 2000; retry > 0; retry--) {
+ for (retry = 5000; retry > 0; retry--) {
DELAY(1000);
val = RD4(sc, GMAC_DMA_MODE);
if (!(val & GMAC_DMA_MODE_SWR))
@@ -491,7 +494,7 @@ eqos_init(void *if_softc)
struct eqos_softc *sc = if_softc;
if_t ifp = sc->ifp;
struct mii_data *mii = device_get_softc(sc->miibus);
- uint32_t val;
+ uint32_t val, mtl_tx_val, mtl_rx_val;
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
return;
@@ -508,13 +511,18 @@ eqos_init(void *if_softc)
val = RD4(sc, GMAC_DMA_CHAN0_CONTROL);
val &= ~GMAC_DMA_CHAN0_CONTROL_DSL_MASK;
val |= ((DESC_ALIGN - 16) / 8) << GMAC_DMA_CHAN0_CONTROL_DSL_SHIFT;
- val |= GMAC_DMA_CHAN0_CONTROL_PBLX8;
+ if (sc->pblx8)
+ val |= GMAC_DMA_CHAN0_CONTROL_PBLX8;
WR4(sc, GMAC_DMA_CHAN0_CONTROL, val);
val = RD4(sc, GMAC_DMA_CHAN0_TX_CONTROL);
+ if (sc->txpbl > 0)
+ val |= (sc->txpbl << GMAC_DMA_CHAN0_TXRX_PBL_SHIFT);
val |= GMAC_DMA_CHAN0_TX_CONTROL_OSP;
val |= GMAC_DMA_CHAN0_TX_CONTROL_START;
WR4(sc, GMAC_DMA_CHAN0_TX_CONTROL, val);
val = RD4(sc, GMAC_DMA_CHAN0_RX_CONTROL);
+ if (sc->rxpbl > 0)
+ val |= (sc->rxpbl << GMAC_DMA_CHAN0_TXRX_PBL_SHIFT);
val &= ~GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_MASK;
val |= (MCLBYTES << GMAC_DMA_CHAN0_RX_CONTROL_RBSZ_SHIFT);
val |= GMAC_DMA_CHAN0_RX_CONTROL_START;
@@ -527,11 +535,19 @@ eqos_init(void *if_softc)
GMAC_MMC_CONTROL_CNTPRSTLVL);
/* Configure operation modes */
+ if (sc->thresh_dma_mode) {
+ mtl_tx_val = sc->ttc;
+ mtl_rx_val = sc->rtc;
+ } else {
+ mtl_tx_val = GMAC_MTL_TXQ0_OPERATION_MODE_TSF;
+ mtl_rx_val = GMAC_MTL_RXQ0_OPERATION_MODE_RSF;
+ }
+
WR4(sc, GMAC_MTL_TXQ0_OPERATION_MODE,
- GMAC_MTL_TXQ0_OPERATION_MODE_TSF |
+ mtl_tx_val |
GMAC_MTL_TXQ0_OPERATION_MODE_TXQEN_EN);
WR4(sc, GMAC_MTL_RXQ0_OPERATION_MODE,
- GMAC_MTL_RXQ0_OPERATION_MODE_RSF |
+ mtl_rx_val |
GMAC_MTL_RXQ0_OPERATION_MODE_FEP |
GMAC_MTL_RXQ0_OPERATION_MODE_FUP);
@@ -713,8 +729,7 @@ eqos_rxintr(struct eqos_softc *sc)
if ((m = eqos_alloc_mbufcl(sc))) {
if ((error = eqos_setup_rxbuf(sc, sc->rx.head, m)))
printf("ERROR: Hole in RX ring!!\n");
- }
- else
+ } else
if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
@@ -883,12 +898,10 @@ eqos_ioctl(if_t ifp, u_long cmd, caddr_t data)
eqos_setup_rxfilter(sc);
EQOS_UNLOCK(sc);
}
- }
- else {
+ } else {
eqos_init(sc);
}
- }
- else {
+ } else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
eqos_stop(sc);
}
@@ -995,39 +1008,55 @@ eqos_setup_dma(struct eqos_softc *sc)
int error, i;
/* Set up TX descriptor ring, descriptors, and dma maps */
- if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
- DESC_ALIGN, DESC_BOUNDARY,
- BUS_SPACE_MAXADDR_32BIT,
- BUS_SPACE_MAXADDR, NULL, NULL,
- TX_DESC_SIZE, 1, TX_DESC_SIZE, 0,
- NULL, NULL, &sc->tx.desc_tag))) {
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* Parent tag */
+ DESC_ALIGN, DESC_BOUNDARY, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filterfunc, filterarg */
+ TX_DESC_SIZE, 1, /* maxsize, nsegs */
+ TX_DESC_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->tx.desc_tag);
+ if (error != 0) {
device_printf(sc->dev, "could not create TX ring DMA tag\n");
return (error);
}
- if ((error = bus_dmamem_alloc(sc->tx.desc_tag,
+ error = bus_dmamem_alloc(sc->tx.desc_tag,
(void**)&sc->tx.desc_ring,
BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
- &sc->tx.desc_map))) {
+ &sc->tx.desc_map);
+ if (error != 0) {
device_printf(sc->dev,
"could not allocate TX descriptor ring.\n");
return (error);
}
- if ((error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map,
- sc->tx.desc_ring,
- TX_DESC_SIZE, eqos_get1paddr, &sc->tx.desc_ring_paddr, 0))) {
+
+ error = bus_dmamap_load(sc->tx.desc_tag, sc->tx.desc_map,
+ sc->tx.desc_ring, TX_DESC_SIZE,
+ eqos_get1paddr, &sc->tx.desc_ring_paddr,
+ 0);
+ if (error != 0) {
device_printf(sc->dev,
"could not load TX descriptor ring map.\n");
return (error);
}
- if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
- BUS_SPACE_MAXADDR_32BIT,
- BUS_SPACE_MAXADDR, NULL, NULL,
- MCLBYTES*TX_MAX_SEGS, TX_MAX_SEGS,
- MCLBYTES, 0, NULL, NULL,
- &sc->tx.buf_tag))) {
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* Parent tag */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filterfunc, filterarg */
+ MCLBYTES*TX_MAX_SEGS, TX_MAX_SEGS, /* maxsize, nsegs */
+ MCLBYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->tx.buf_tag);
+ if (error != 0) {
device_printf(sc->dev, "could not create TX buffer DMA tag.\n");
return (error);
}
@@ -1042,39 +1071,54 @@ eqos_setup_dma(struct eqos_softc *sc)
}
/* Set up RX descriptor ring, descriptors, dma maps, and mbufs */
- if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
- DESC_ALIGN, DESC_BOUNDARY,
- BUS_SPACE_MAXADDR_32BIT,
- BUS_SPACE_MAXADDR, NULL, NULL,
- RX_DESC_SIZE, 1, RX_DESC_SIZE, 0,
- NULL, NULL, &sc->rx.desc_tag))) {
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* Parent tag */
+ DESC_ALIGN, DESC_BOUNDARY, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filterfunc, filterarg */
+ RX_DESC_SIZE, 1, /* maxsize, nsegs */
+ RX_DESC_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->rx.desc_tag);
+ if (error != 0) {
device_printf(sc->dev, "could not create RX ring DMA tag.\n");
return (error);
}
- if ((error = bus_dmamem_alloc(sc->rx.desc_tag,
+ error = bus_dmamem_alloc(sc->rx.desc_tag,
(void **)&sc->rx.desc_ring,
BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
- &sc->rx.desc_map))) {
+ &sc->rx.desc_map);
+ if (error != 0) {
device_printf(sc->dev,
"could not allocate RX descriptor ring.\n");
return (error);
}
- if ((error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map,
- sc->rx.desc_ring, RX_DESC_SIZE, eqos_get1paddr,
- &sc->rx.desc_ring_paddr, 0))) {
+ error = bus_dmamap_load(sc->rx.desc_tag, sc->rx.desc_map,
+ sc->rx.desc_ring, RX_DESC_SIZE,
+ eqos_get1paddr, &sc->rx.desc_ring_paddr,
+ 0);
+ if (error != 0) {
device_printf(sc->dev,
"could not load RX descriptor ring map.\n");
return (error);
}
- if ((error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0,
- BUS_SPACE_MAXADDR_32BIT,
- BUS_SPACE_MAXADDR, NULL, NULL,
- MCLBYTES, 1,
- MCLBYTES, 0, NULL, NULL,
- &sc->rx.buf_tag))) {
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev), /* Parent tag */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filterfunc, filterarg */
+ MCLBYTES, 1, /* maxsize, nsegs */
+ MCLBYTES, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->rx.buf_tag);
+ if (error != 0) {
device_printf(sc->dev, "could not create RX buf DMA tag.\n");
return (error);
}
@@ -1112,6 +1156,14 @@ eqos_attach(device_t dev)
int error;
int n;
+ /* default values */
+ sc->thresh_dma_mode = false;
+ sc->pblx8 = true;
+ sc->txpbl = 0;
+ sc->rxpbl = 0;
+ sc->ttc = 0x10;
+ sc->rtc = 0;
+
/* setup resources */
if (bus_alloc_resources(dev, eqos_spec, sc->res)) {
device_printf(dev, "Could not allocate resources\n");
@@ -1128,7 +1180,7 @@ eqos_attach(device_t dev)
GMAC_MAC_VERSION_USERVER_SHIFT;
snpsver = ver & GMAC_MAC_VERSION_SNPSVER_MASK;
- if (snpsver != 0x51) {
+ if (snpsver != 0x51 && snpsver != 0x52) {
device_printf(dev, "EQOS version 0x%02x not supported\n",
snpsver);
return (ENXIO);
@@ -1216,8 +1268,6 @@ eqos_detach(device_t dev)
ether_ifdetach(sc->ifp);
}
- if (sc->miibus)
- device_delete_child(dev, sc->miibus);
bus_generic_detach(dev);
if (sc->irq_handle)
diff --git a/sys/dev/eqos/if_eqos_reg.h b/sys/dev/eqos/if_eqos_reg.h
index f9e7f9368cf2..fe7440bd19c7 100644
--- a/sys/dev/eqos/if_eqos_reg.h
+++ b/sys/dev/eqos/if_eqos_reg.h
@@ -241,6 +241,7 @@
#define GMAC_DMA_CHAN0_RX_END_ADDR 0x1128
#define GMAC_DMA_CHAN0_TX_RING_LEN 0x112C
#define GMAC_DMA_CHAN0_RX_RING_LEN 0x1130
+#define GMAC_DMA_CHAN0_TXRX_PBL_SHIFT 16
#define GMAC_DMA_CHAN0_INTR_ENABLE 0x1134
#define GMAC_DMA_CHAN0_INTR_ENABLE_NIE (1U << 15)
#define GMAC_DMA_CHAN0_INTR_ENABLE_AIE (1U << 14)
diff --git a/sys/dev/eqos/if_eqos_starfive.c b/sys/dev/eqos/if_eqos_starfive.c
new file mode 100644
index 000000000000..62f8b3f38983
--- /dev/null
+++ b/sys/dev/eqos/if_eqos_starfive.c
@@ -0,0 +1,219 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Jari Sihvola <jsihv@gmx.com>
+ */
+
+#include "opt_platform.h"
+#include <sys/cdefs.h>
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+#include <sys/gpio.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <machine/bus.h>
+
+#include <net/if.h>
+#include <net/if_media.h>
+#include <dev/mii/mii.h>
+#include <dev/mii/miivar.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/hwreset/hwreset.h>
+#include <dev/regulator/regulator.h>
+
+#include <dev/eqos/if_eqos_var.h>
+
+#include "if_eqos_if.h"
+#include "gpio_if.h"
+
+#include <dev/clk/clk.h>
+
+/* JH7110's board specific code for eqos Ethernet controller driver */
+
+#define JH7110_CSR_FREQ 198000000
+
+#define WR4(sc, o, v) bus_write_4(sc->base.res[EQOS_RES_MEM], (o), (v))
+
+static const struct ofw_compat_data compat_data[] = {
+ {"starfive,jh7110-dwmac", 1},
+ { NULL, 0}
+};
+
+struct if_eqos_starfive_softc {
+ struct eqos_softc base;
+ clk_t gtx;
+ clk_t tx;
+ clk_t stmmaceth;
+ clk_t pclk;
+};
+
+static int
+if_eqos_starfive_set_speed(device_t dev, int speed)
+{
+ struct if_eqos_starfive_softc *sc = device_get_softc(dev);
+ uint64_t freq;
+ int err;
+
+ switch (speed) {
+ case IFM_1000_T:
+ case IFM_1000_SX:
+ freq = 125000000;
+ break;
+ case IFM_100_TX:
+ freq = 25000000;
+ break;
+ case IFM_10_T:
+ freq = 2500000;
+ break;
+ default:
+ device_printf(dev, "unsupported media %u\n", speed);
+ return (-EINVAL);
+ }
+
+ clk_set_freq(sc->gtx, freq, 0);
+ err = clk_enable(sc->gtx);
+ if (err != 0) {
+ device_printf(dev, "Could not enable clock %s\n",
+ clk_get_name(sc->gtx));
+ }
+
+ return (0);
+}
+
+
+
+static int
+if_eqos_starfive_clk_init(device_t dev)
+{
+ struct if_eqos_starfive_softc *sc = device_get_softc(dev);
+ int err;
+
+ if (clk_get_by_ofw_name(dev, 0, "gtx", &sc->gtx) != 0) {
+ device_printf(sc->base.dev, "could not get gtx clock\n");
+ return (ENXIO);
+ }
+
+ if (clk_get_by_ofw_name(dev, 0, "tx", &sc->tx) == 0) {
+ err = clk_enable(sc->tx);
+ if (err != 0) {
+ device_printf(dev, "Could not enable clock %s\n",
+ clk_get_name(sc->tx));
+ }
+ }
+ if (clk_get_by_ofw_name(dev, 0, "stmmaceth", &sc->stmmaceth) == 0) {
+ err = clk_enable(sc->stmmaceth);
+ if (err != 0) {
+ device_printf(dev, "Could not enable clock %s\n",
+ clk_get_name(sc->stmmaceth));
+ }
+ }
+ if (clk_get_by_ofw_name(dev, 0, "pclk", &sc->pclk) == 0) {
+ err = clk_enable(sc->pclk);
+ if (err != 0) {
+ device_printf(dev, "Could not enable clock %s\n",
+ clk_get_name(sc->pclk));
+ }
+ }
+
+ return (0);
+}
+
+static int
+if_eqos_starfive_init(device_t dev)
+{
+ struct if_eqos_starfive_softc *sc = device_get_softc(dev);
+ hwreset_t rst_ahb, rst_stmmaceth;
+ phandle_t node;
+
+ node = ofw_bus_get_node(dev);
+
+ sc->base.ttc = 0x10;
+ sc->base.rtc = 0;
+
+ if (OF_hasprop(node, "snps,force_thresh_dma_mode"))
+ sc->base.thresh_dma_mode = true;
+
+ if (OF_hasprop(node, "snps,no-pbl-x8"))
+ sc->base.pblx8 = false;
+
+ if (OF_hasprop(node, "snps,txpbl")) {
+ OF_getencprop(node, "snps,txpbl", &sc->base.txpbl,
+ sizeof(sc->base.txpbl));
+ }
+ if (OF_hasprop(node, "snps,rxpbl")) {
+ OF_getencprop(node, "snps,rxpbl", &sc->base.rxpbl,
+ sizeof(sc->base.rxpbl));
+ }
+
+ if (hwreset_get_by_ofw_name(dev, 0, "ahb", &rst_ahb)) {
+ device_printf(dev, "Cannot get ahb reset\n");
+ return (ENXIO);
+ }
+ if (hwreset_assert(rst_ahb) != 0) {
+ device_printf(dev, "Cannot assert ahb reset\n");
+ return (ENXIO);
+ }
+
+ if (hwreset_get_by_ofw_name(dev, 0, "stmmaceth", &rst_stmmaceth)) {
+ device_printf(dev, "Cannot get stmmaceth reset\n");
+ return (ENXIO);
+ }
+ if (hwreset_assert(rst_stmmaceth) != 0) {
+ device_printf(dev, "Cannot assert stmmaceth reset\n");
+ return (ENXIO);
+ }
+
+ sc->base.csr_clock = JH7110_CSR_FREQ;
+ sc->base.csr_clock_range = GMAC_MAC_MDIO_ADDRESS_CR_150_250;
+
+ if (if_eqos_starfive_clk_init(dev) != 0) {
+ device_printf(dev, "Clock initialization failed\n");
+ return (ENXIO);
+ }
+ if (hwreset_deassert(rst_ahb) != 0) {
+ device_printf(dev, "Cannot deassert rst_ahb\n");
+ return (ENXIO);
+ }
+ if (hwreset_deassert(rst_stmmaceth) != 0) {
+ device_printf(dev, "Cannot deassert rst_stmmaceth\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+eqos_starfive_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "DesignWare EQOS Gigabit Ethernet for JH7110");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+
+static device_method_t eqos_starfive_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, eqos_starfive_probe),
+
+ /* EQOS interface */
+ DEVMETHOD(if_eqos_init, if_eqos_starfive_init),
+ DEVMETHOD(if_eqos_set_speed, if_eqos_starfive_set_speed),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(eqos, eqos_starfive_driver, eqos_starfive_methods,
+ sizeof(struct if_eqos_starfive_softc), eqos_driver);
+DRIVER_MODULE(eqos_starfive, simplebus, eqos_starfive_driver, 0, 0);
diff --git a/sys/dev/eqos/if_eqos_var.h b/sys/dev/eqos/if_eqos_var.h
index c21a703747ec..892b15ba589e 100644
--- a/sys/dev/eqos/if_eqos_var.h
+++ b/sys/dev/eqos/if_eqos_var.h
@@ -85,6 +85,13 @@ struct eqos_softc {
bool link_up;
int tx_watchdog;
+ bool thresh_dma_mode;
+ bool pblx8;
+ uint32_t txpbl;
+ uint32_t rxpbl;
+ uint32_t ttc;
+ uint32_t rtc;
+
struct ifnet *ifp;
device_t miibus;
struct mtx lock;
diff --git a/sys/dev/et/if_et.c b/sys/dev/et/if_et.c
index e6d73ab3c860..399c9fa77989 100644
--- a/sys/dev/et/if_et.c
+++ b/sys/dev/et/if_et.c
@@ -231,11 +231,6 @@ et_attach(device_t dev)
callout_init_mtx(&sc->sc_tick, &sc->sc_mtx, 0);
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
/*
* Initialize tunables
@@ -375,8 +370,6 @@ et_detach(device_t dev)
callout_drain(&sc->sc_tick);
}
- if (sc->sc_miibus != NULL)
- device_delete_child(dev, sc->sc_miibus);
bus_generic_detach(dev);
if (sc->sc_irq_handle != NULL)
diff --git a/sys/dev/etherswitch/ar40xx/ar40xx_main.c b/sys/dev/etherswitch/ar40xx/ar40xx_main.c
index 757dbe23071e..d5636d26120b 100644
--- a/sys/dev/etherswitch/ar40xx/ar40xx_main.c
+++ b/sys/dev/etherswitch/ar40xx/ar40xx_main.c
@@ -254,22 +254,23 @@ static int
ar40xx_detach(device_t dev)
{
struct ar40xx_softc *sc = device_get_softc(dev);
- int i;
+ int error, i;
device_printf(sc->sc_dev, "%s: called\n", __func__);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
+
callout_drain(&sc->sc_phy_callout);
/* Free PHYs */
for (i = 0; i < AR40XX_NUM_PHYS; i++) {
- if (sc->sc_phys.miibus[i] != NULL)
- device_delete_child(dev, sc->sc_phys.miibus[i]);
if (sc->sc_phys.ifp[i] != NULL)
if_free(sc->sc_phys.ifp[i]);
free(sc->sc_phys.ifname[i], M_DEVBUF);
}
- bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
@@ -482,9 +483,9 @@ ar40xx_attach(device_t dev)
/* Attach PHYs */
ret = ar40xx_attach_phys(sc);
- ret = bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
- ret = bus_generic_attach(dev);
+ bus_attach_children(dev);
/* Start timer */
callout_init_mtx(&sc->sc_phy_callout, &sc->sc_mtx, 0);
diff --git a/sys/dev/etherswitch/ar40xx/ar40xx_phy.c b/sys/dev/etherswitch/ar40xx/ar40xx_phy.c
index b9a308e3620d..aa02ef25ac7b 100644
--- a/sys/dev/etherswitch/ar40xx/ar40xx_phy.c
+++ b/sys/dev/etherswitch/ar40xx/ar40xx_phy.c
@@ -200,17 +200,9 @@ ar40xx_attach_phys(struct ar40xx_softc *sc)
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev));
for (phy = 0; phy < AR40XX_NUM_PHYS; phy++) {
sc->sc_phys.ifp[phy] = if_alloc(IFT_ETHER);
- if (sc->sc_phys.ifp[phy] == NULL) {
- device_printf(sc->sc_dev,
- "PHY %d: couldn't allocate ifnet structure\n",
- phy);
- err = ENOMEM;
- break;
- }
-
- sc->sc_phys.ifp[phy]->if_softc = sc;
- sc->sc_phys.ifp[phy]->if_flags |= IFF_UP | IFF_BROADCAST |
- IFF_DRV_RUNNING | IFF_SIMPLEX;
+ if_setsoftc(sc->sc_phys.ifp[phy], sc);
+ if_setflagbits(sc->sc_phys.ifp[phy], IFF_UP | IFF_BROADCAST |
+ IFF_DRV_RUNNING | IFF_SIMPLEX, 0);
sc->sc_phys.ifname[phy] = malloc(strlen(name)+1, M_DEVBUF,
M_WAITOK);
bcopy(name, sc->sc_phys.ifname[phy], strlen(name)+1);
@@ -223,7 +215,7 @@ ar40xx_attach_phys(struct ar40xx_softc *sc)
device_printf(sc->sc_dev,
"%s attached to pseudo interface %s\n",
device_get_nameunit(sc->sc_phys.miibus[phy]),
- sc->sc_phys.ifp[phy]->if_xname);
+ if_name(sc->sc_phys.ifp[phy]));
if (err != 0) {
device_printf(sc->sc_dev,
"attaching PHY %d failed\n",
diff --git a/sys/dev/etherswitch/arswitch/arswitch.c b/sys/dev/etherswitch/arswitch/arswitch.c
index 1c2bcefdfe91..c53c82f1750c 100644
--- a/sys/dev/etherswitch/arswitch/arswitch.c
+++ b/sys/dev/etherswitch/arswitch/arswitch.c
@@ -95,7 +95,7 @@ arswitch_probe(device_t dev)
{
struct arswitch_softc *sc;
uint32_t id;
- char *chipname, desc[256];
+ char *chipname;
sc = device_get_softc(dev);
bzero(sc, sizeof(*sc));
@@ -132,12 +132,9 @@ arswitch_probe(device_t dev)
DPRINTF(sc, ARSWITCH_DBG_ANY, "chipname=%s, id=%08x\n", chipname, id);
if (chipname != NULL) {
- snprintf(desc, sizeof(desc),
+ device_set_descf(dev,
"Atheros %s Ethernet Switch (ver %d rev %d)",
- chipname,
- sc->chip_ver,
- sc->chip_rev);
- device_set_desc_copy(dev, desc);
+ chipname, sc->chip_ver, sc->chip_rev);
return (BUS_PROBE_DEFAULT);
}
return (ENXIO);
@@ -153,12 +150,6 @@ arswitch_attach_phys(struct arswitch_softc *sc)
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->sc_dev));
for (phy = 0; phy < sc->numphys; phy++) {
sc->ifp[phy] = if_alloc(IFT_ETHER);
- if (sc->ifp[phy] == NULL) {
- device_printf(sc->sc_dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
if_setsoftc(sc->ifp[phy], sc);
if_setflagbits(sc->ifp[phy], IFF_UP | IFF_BROADCAST |
IFF_DRV_RUNNING | IFF_SIMPLEX, 0);
@@ -172,7 +163,7 @@ arswitch_attach_phys(struct arswitch_softc *sc)
#if 0
DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n",
device_get_nameunit(sc->miibus[phy]),
- sc->ifp[phy]->if_xname);
+ if_name(sc->ifp[phy]));
#endif
if (err != 0) {
device_printf(sc->sc_dev,
@@ -658,14 +649,9 @@ arswitch_attach(device_t dev)
return (err);
}
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
- err = bus_generic_attach(dev);
- if (err != 0) {
- DPRINTF(sc, ARSWITCH_DBG_ANY,
- "%s: bus_generic_attach: err=%d\n", __func__, err);
- return (err);
- }
+ bus_attach_children(dev);
callout_init_mtx(&sc->callout_tick, &sc->sc_mtx, 0);
@@ -680,13 +666,15 @@ static int
arswitch_detach(device_t dev)
{
struct arswitch_softc *sc = device_get_softc(dev);
- int i;
+ int error, i;
callout_drain(&sc->callout_tick);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
+
for (i=0; i < sc->numphys; i++) {
- if (sc->miibus[i] != NULL)
- device_delete_child(dev, sc->miibus[i]);
if (sc->ifp[i] != NULL)
if_free(sc->ifp[i]);
free(sc->ifname[i], M_DEVBUF);
@@ -694,7 +682,6 @@ arswitch_detach(device_t dev)
free(sc->atu.entries, M_DEVBUF);
- bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
diff --git a/sys/dev/etherswitch/e6000sw/e6000sw.c b/sys/dev/etherswitch/e6000sw/e6000sw.c
index 95f1a2e96db6..7e9193f4ba47 100644
--- a/sys/dev/etherswitch/e6000sw/e6000sw.c
+++ b/sys/dev/etherswitch/e6000sw/e6000sw.c
@@ -51,7 +51,7 @@
#include <dev/ofw/ofw_bus.h>
#include <dev/ofw/ofw_bus_subr.h>
#else
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#endif
#include "e6000swreg.h"
@@ -89,6 +89,7 @@ typedef struct e6000sw_softc {
device_t miibus[E6000SW_MAX_PORTS];
struct taskqueue *sc_tq;
struct timeout_task sc_tt;
+ bool is_shutdown;
int vlans[E6000SW_NUM_VLANS];
uint32_t swid;
@@ -195,17 +196,17 @@ DEFINE_CLASS_0(e6000sw, e6000sw_driver, e6000sw_methods,
sizeof(e6000sw_softc_t));
DRIVER_MODULE(e6000sw, mdio, e6000sw_driver, 0, 0);
-DRIVER_MODULE(etherswitch, e6000sw, etherswitch_driver, 0, 0);
DRIVER_MODULE(miibus, e6000sw, miibus_driver, 0, 0);
+DRIVER_MODULE_ORDERED(etherswitch, e6000sw, etherswitch_driver, 0, 0, SI_ORDER_ANY);
MODULE_DEPEND(e6000sw, mdio, 1, 1, 1);
-
+MODULE_DEPEND(e6000sw, etherswitch, 1, 1, 1);
static void
e6000sw_identify(driver_t *driver, device_t parent)
{
- if (device_find_child(parent, "e6000sw", -1) == NULL)
- BUS_ADD_CHILD(parent, 0, "e6000sw", -1);
+ if (device_find_child(parent, "e6000sw", DEVICE_UNIT_ANY) == NULL)
+ BUS_ADD_CHILD(parent, 0, "e6000sw", DEVICE_UNIT_ANY);
}
static int
@@ -216,7 +217,8 @@ e6000sw_probe(device_t dev)
#ifdef FDT
phandle_t switch_node;
#else
- int is_6190;
+ int is_6190 = 0;
+ int is_6190x = 0;
#endif
sc = device_get_softc(dev);
@@ -252,15 +254,25 @@ e6000sw_probe(device_t dev)
device_get_unit(sc->dev), "addr", &sc->sw_addr) != 0)
return (ENXIO);
if (resource_int_value(device_get_name(sc->dev),
- device_get_unit(sc->dev), "is6190", &is_6190) != 0)
+ device_get_unit(sc->dev), "is6190", &is_6190) != 0) {
/*
* Check "is8190" to keep backward compatibility with
* older setups.
*/
resource_int_value(device_get_name(sc->dev),
device_get_unit(sc->dev), "is8190", &is_6190);
+ }
+ resource_int_value(device_get_name(sc->dev),
+ device_get_unit(sc->dev), "is6190x", &is_6190x);
+ if (is_6190 != 0 && is_6190x != 0) {
+ device_printf(dev,
+ "Cannot configure conflicting variants (6190 / 6190x)\n");
+ return (ENXIO);
+ }
if (is_6190 != 0)
sc->swid = MV88E6190;
+ else if (is_6190x != 0)
+ sc->swid = MV88E6190X;
#endif
if (sc->sw_addr < 0 || sc->sw_addr > 32)
return (ENXIO);
@@ -302,6 +314,10 @@ e6000sw_probe(device_t dev)
description = "Marvell 88E6190";
sc->num_ports = 11;
break;
+ case MV88E6190X:
+ description = "Marvell 88E6190X";
+ sc->num_ports = 11;
+ break;
default:
device_printf(dev, "Unrecognized device, id 0x%x.\n", sc->swid);
return (ENXIO);
@@ -332,7 +348,7 @@ e6000sw_parse_fixed_link(e6000sw_softc_t *sc, phandle_t node, uint32_t port)
return (ENXIO);
}
if (speed == 2500 && (MVSWITCH(sc, MV88E6141) ||
- MVSWITCH(sc, MV88E6341) || MVSWITCH(sc, MV88E6190)))
+ MVSWITCH(sc, MV88E6341) || MVSWITCH(sc, MV88E6190) || MVSWITCH(sc, MV88E6190X)))
sc->fixed25_mask |= (1 << port);
}
@@ -454,8 +470,6 @@ e6000sw_init_interface(e6000sw_softc_t *sc, int port)
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->dev));
sc->ifp[port] = if_alloc(IFT_ETHER);
- if (sc->ifp[port] == NULL)
- return (ENOMEM);
if_setsoftc(sc->ifp[port], sc);
if_setflagbits(sc->ifp[port], IFF_UP | IFF_BROADCAST |
IFF_DRV_RUNNING | IFF_SIMPLEX, 0);
@@ -598,22 +612,26 @@ e6000sw_attach(device_t dev)
reg |= PSC_CONTROL_SPD2500;
else
reg |= PSC_CONTROL_SPD1000;
- if (MVSWITCH(sc, MV88E6190) &&
+ if ((MVSWITCH(sc, MV88E6190) ||
+ MVSWITCH(sc, MV88E6190X)) &&
e6000sw_is_fixed25port(sc, port))
reg |= PSC_CONTROL_ALT_SPD;
reg |= PSC_CONTROL_FORCED_DPX | PSC_CONTROL_FULLDPX |
PSC_CONTROL_FORCED_LINK | PSC_CONTROL_LINK_UP |
PSC_CONTROL_FORCED_SPD;
- if (!MVSWITCH(sc, MV88E6190))
+ if (!MVSWITCH(sc, MV88E6190) &&
+ !MVSWITCH(sc, MV88E6190X))
reg |= PSC_CONTROL_FORCED_FC | PSC_CONTROL_FC_ON;
if (MVSWITCH(sc, MV88E6141) ||
MVSWITCH(sc, MV88E6341) ||
- MVSWITCH(sc, MV88E6190))
+ MVSWITCH(sc, MV88E6190) ||
+ MVSWITCH(sc, MV88E6190X))
reg |= PSC_CONTROL_FORCED_EEE;
e6000sw_writereg(sc, REG_PORT(sc, port), PSC_CONTROL,
reg);
/* Power on the SERDES interfaces. */
- if (MVSWITCH(sc, MV88E6190) &&
+ if ((MVSWITCH(sc, MV88E6190) ||
+ MVSWITCH(sc, MV88E6190X)) &&
(port == 9 || port == 10)) {
if (e6000sw_is_fixed25port(sc, port))
sgmii = false;
@@ -644,14 +662,15 @@ e6000sw_attach(device_t dev)
device_printf(dev, "switch is ready.\n");
E6000SW_UNLOCK(sc);
- bus_generic_probe(dev);
- bus_generic_attach(dev);
+ bus_identify_children(dev);
+ bus_attach_children(dev);
taskqueue_enqueue_timeout(sc->sc_tq, &sc->sc_tt, hz);
return (0);
out_fail:
+ E6000SW_UNLOCK(sc);
e6000sw_detach(dev);
return (err);
@@ -847,19 +866,26 @@ e6000sw_writephy_locked(device_t dev, int phy, int reg, int data)
static int
e6000sw_detach(device_t dev)
{
- int phy;
+ int error, phy;
e6000sw_softc_t *sc;
sc = device_get_softc(dev);
- if (device_is_attached(dev))
- taskqueue_drain_timeout(sc->sc_tq, &sc->sc_tt);
+ E6000SW_LOCK(sc);
+ sc->is_shutdown = true;
+ if (sc->sc_tq != NULL) {
+ while (taskqueue_cancel_timeout(sc->sc_tq, &sc->sc_tt, NULL) != 0)
+ taskqueue_drain_timeout(sc->sc_tq, &sc->sc_tt);
+ }
+ E6000SW_UNLOCK(sc);
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
if (sc->sc_tq != NULL)
taskqueue_free(sc->sc_tq);
- device_delete_children(dev);
-
sx_destroy(&sc->sx);
for (phy = 0; phy < sc->num_ports; phy++) {
if (sc->ifp[phy] != NULL)
@@ -1376,11 +1402,17 @@ e6000sw_getvgroup(device_t dev, etherswitch_vlangroup_t *vg)
static __inline struct mii_data*
e6000sw_miiforphy(e6000sw_softc_t *sc, unsigned int phy)
{
+ device_t mii_dev;
if (!e6000sw_is_phyport(sc, phy))
return (NULL);
+ mii_dev = sc->miibus[phy];
+ if (mii_dev == NULL)
+ return (NULL);
+ if (device_get_state(mii_dev) != DS_ATTACHED)
+ return (NULL);
- return (device_get_softc(sc->miibus[phy]));
+ return (device_get_softc(mii_dev));
}
static int
@@ -1583,6 +1615,12 @@ e6000sw_tick(void *arg, int p __unused)
E6000SW_LOCK_ASSERT(sc, SA_UNLOCKED);
E6000SW_LOCK(sc);
+
+ if (sc->is_shutdown) {
+ E6000SW_UNLOCK(sc);
+ return;
+ }
+
for (port = 0; port < sc->num_ports; port++) {
/* Tick only on PHY ports */
if (!e6000sw_is_portenabled(sc, port) ||
@@ -1600,6 +1638,17 @@ e6000sw_tick(void *arg, int p __unused)
&mii->mii_media_status, &mii->mii_media_active);
LIST_FOREACH(miisc, &mii->mii_phys, mii_list) {
+ /*
+ * Note: this is sometimes NULL during PHY
+ * enumeration, although that shouldn't be
+ * happening /after/ tick runs. To work
+ * around this whilst the problem is being
+ * debugged, just do a NULL check here and
+ * continue.
+ */
+ if (mii->mii_media.ifm_cur == NULL)
+ continue;
+
if (IFM_INST(mii->mii_media.ifm_cur->ifm_media)
!= miisc->mii_inst)
continue;
@@ -1607,6 +1656,7 @@ e6000sw_tick(void *arg, int p __unused)
}
}
E6000SW_UNLOCK(sc);
+ taskqueue_enqueue_timeout(sc->sc_tq, &sc->sc_tt, hz);
}
static void
diff --git a/sys/dev/etherswitch/e6000sw/e6000swreg.h b/sys/dev/etherswitch/e6000sw/e6000swreg.h
index 7c952052a401..ec4503faeec5 100644
--- a/sys/dev/etherswitch/e6000sw/e6000swreg.h
+++ b/sys/dev/etherswitch/e6000sw/e6000swreg.h
@@ -47,6 +47,7 @@ struct atu_opt {
#define MV88E6172 0x1720
#define MV88E6176 0x1760
#define MV88E6190 0x1900
+#define MV88E6190X 0x0a00
#define MVSWITCH(_sc, id) ((_sc)->swid == (id))
#define MVSWITCH_MULTICHIP(_sc) ((_sc)->sw_addr != 0)
@@ -56,7 +57,7 @@ struct atu_opt {
*/
#define REG_GLOBAL 0x1b
#define REG_GLOBAL2 0x1c
-#define REG_PORT(_sc, p) ((MVSWITCH((_sc), MV88E6190) ? 0 : 0x10) + (p))
+#define REG_PORT(_sc, p) (((MVSWITCH((_sc), MV88E6190) || MVSWITCH((_sc), MV88E6190X)) ? 0 : 0x10) + (p))
#define REG_NUM_MAX 31
@@ -138,13 +139,13 @@ struct atu_opt {
#define VTU_DATA 7
#define VTU_DATA2 8
-#define VTU_FID_MASK(_sc) (MVSWITCH((_sc), MV88E6190) ? 0xfff : 0xff)
+#define VTU_FID_MASK(_sc) ((MVSWITCH((_sc), MV88E6190) || MVSWITCH((_sc), MV88E6190X)) ? 0xfff : 0xff)
#define VTU_FID_POLICY (1 << 12)
#define VTU_PORT_UNMODIFIED 0
#define VTU_PORT_UNTAGGED 1
#define VTU_PORT_TAGGED 2
#define VTU_PORT_DISCARD 3
-#define VTU_PPREG(_sc) (MVSWITCH((_sc), MV88E6190) ? 8 : 4)
+#define VTU_PPREG(_sc) ((MVSWITCH((_sc), MV88E6190) || MVSWITCH((_sc), MV88E6190X)) ? 8 : 4)
#define VTU_PORT(_sc, p) (((p) % VTU_PPREG(_sc)) * (16 / VTU_PPREG(_sc)))
#define VTU_PORT_MASK 3
#define VTU_BUSY (1 << 15)
@@ -174,7 +175,7 @@ struct atu_opt {
#define ATU_MAC_ADDR45 15
#define ATU_DATA_LAG (1 << 15)
-#define ATU_PORT_MASK(_sc) (MVSWITCH((_sc), MV88E6190) ? 0xfff0 : 0xff0)
+#define ATU_PORT_MASK(_sc) ((MVSWITCH((_sc), MV88E6190) || MVSWITCH((_sc), MV88E6190X)) ? 0xfff0 : 0xff0)
#define ATU_PORT_SHIFT 4
#define ATU_LAG_MASK 0xf0
#define ATU_LAG_SHIFT 4
diff --git a/sys/dev/etherswitch/e6000sw/e6060sw.c b/sys/dev/etherswitch/e6000sw/e6060sw.c
index 19744f5e44a5..0af71692091c 100644
--- a/sys/dev/etherswitch/e6000sw/e6060sw.c
+++ b/sys/dev/etherswitch/e6000sw/e6060sw.c
@@ -162,7 +162,6 @@ e6060sw_probe(device_t dev)
struct e6060sw_softc *sc;
int devid, i;
char *devname;
- char desc[80];
sc = device_get_softc(dev);
bzero(sc, sizeof(*sc));
@@ -193,9 +192,8 @@ e6060sw_probe(device_t dev)
else
return (ENXIO);
- sprintf(desc, "Marvell %s MDIO switch driver at 0x%02x",
+ device_set_descf(dev, "Marvell %s MDIO switch driver at 0x%02x",
devname, sc->smi_offset);
- device_set_desc_copy(dev, desc);
return (BUS_PROBE_DEFAULT);
}
@@ -216,15 +214,9 @@ e6060sw_attach_phys(struct e6060sw_softc *sc)
sc->ifpport[phy] = port;
sc->portphy[port] = phy;
sc->ifp[port] = if_alloc(IFT_ETHER);
- if (sc->ifp[port] == NULL) {
- device_printf(sc->sc_dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
- sc->ifp[port]->if_softc = sc;
- sc->ifp[port]->if_flags |= IFF_UP | IFF_BROADCAST |
- IFF_DRV_RUNNING | IFF_SIMPLEX;
+ if_setsoftc(sc->ifp[port], sc);
+ if_setflagbits(sc->ifp[port], IFF_UP | IFF_BROADCAST |
+ IFF_DRV_RUNNING | IFF_SIMPLEX, 0);
if_initname(sc->ifp[port], name, port);
sc->miibus[port] = malloc(sizeof(device_t), M_E6060SW,
M_WAITOK | M_ZERO);
@@ -233,7 +225,7 @@ e6060sw_attach_phys(struct e6060sw_softc *sc)
BMSR_DEFCAPMASK, phy + sc->smi_offset, MII_OFFSET_ANY, 0);
DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n",
device_get_nameunit(*sc->miibus[port]),
- sc->ifp[port]->if_xname);
+ if_name(sc->ifp[port]));
if (err != 0) {
device_printf(sc->sc_dev,
"attaching PHY %d failed\n",
@@ -314,11 +306,9 @@ e6060sw_attach(device_t dev)
if (err != 0)
return (err);
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
- err = bus_generic_attach(dev);
- if (err != 0)
- return (err);
+ bus_attach_children(dev);
callout_init(&sc->callout_tick, 0);
@@ -331,18 +321,20 @@ static int
e6060sw_detach(device_t dev)
{
struct e6060sw_softc *sc;
- int i, port;
+ int error, i, port;
sc = device_get_softc(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
+
callout_drain(&sc->callout_tick);
for (i = 0; i < MII_NPHY; i++) {
if (((1 << i) & sc->phymask) == 0)
continue;
port = e6060sw_portforphy(sc, i);
- if (sc->miibus[port] != NULL)
- device_delete_child(dev, (*sc->miibus[port]));
if (sc->ifp[port] != NULL)
if_free(sc->ifp[port]);
free(sc->ifname[port], M_E6060SW);
@@ -354,7 +346,6 @@ e6060sw_detach(device_t dev)
free(sc->ifname, M_E6060SW);
free(sc->ifp, M_E6060SW);
- bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
diff --git a/sys/dev/etherswitch/etherswitch.c b/sys/dev/etherswitch/etherswitch.c
index 74afcd259007..ba46d8b2299d 100644
--- a/sys/dev/etherswitch/etherswitch.c
+++ b/sys/dev/etherswitch/etherswitch.c
@@ -82,8 +82,8 @@ static struct cdevsw etherswitch_cdevsw = {
static void
etherswitch_identify(driver_t *driver, device_t parent)
{
- if (device_find_child(parent, "etherswitch", -1) == NULL)
- BUS_ADD_CHILD(parent, 0, "etherswitch", -1);
+ if (device_find_child(parent, "etherswitch", DEVICE_UNIT_ANY) == NULL)
+ BUS_ADD_CHILD(parent, 0, "etherswitch", DEVICE_UNIT_ANY);
}
static int
diff --git a/sys/dev/etherswitch/felix/felix.c b/sys/dev/etherswitch/felix/felix.c
index 92d654961f50..098767ee063e 100644
--- a/sys/dev/etherswitch/felix/felix.c
+++ b/sys/dev/etherswitch/felix/felix.c
@@ -240,9 +240,6 @@ felix_init_interface(felix_softc_t sc, int port)
snprintf(name, IFNAMSIZ, "%sport", device_get_nameunit(sc->dev));
sc->ports[port].ifp = if_alloc(IFT_ETHER);
- if (sc->ports[port].ifp == NULL)
- return (ENOMEM);
-
if_setsoftc(sc->ports[port].ifp, sc);
if_setflags(sc->ports[port].ifp, IFF_UP | IFF_BROADCAST | IFF_MULTICAST |
IFF_DRV_RUNNING | IFF_SIMPLEX);
@@ -466,8 +463,8 @@ felix_attach(device_t dev)
FELIX_UNLOCK(sc);
/* Allow etherswitch to attach as our child. */
- bus_generic_probe(dev);
- bus_generic_attach(dev);
+ bus_identify_children(dev);
+ bus_attach_children(dev);
return (0);
@@ -483,9 +480,10 @@ felix_detach(device_t dev)
int error;
int i;
- error = 0;
sc = device_get_softc(dev);
- bus_generic_detach(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
mtx_lock(&sc->mtx);
callout_stop(&sc->tick_callout);
@@ -500,8 +498,6 @@ felix_detach(device_t dev)
felix_setup(sc);
for (i = 0; i < sc->info.es_nports; i++) {
- if (sc->ports[i].miibus != NULL)
- device_delete_child(dev, sc->ports[i].miibus);
if (sc->ports[i].ifp != NULL)
if_free(sc->ports[i].ifp);
if (sc->ports[i].ifname != NULL)
diff --git a/sys/dev/etherswitch/infineon/adm6996fc.c b/sys/dev/etherswitch/infineon/adm6996fc.c
index cc67616d9fdf..fedab27c2610 100644
--- a/sys/dev/etherswitch/infineon/adm6996fc.c
+++ b/sys/dev/etherswitch/infineon/adm6996fc.c
@@ -153,7 +153,7 @@ adm6996fc_probe(device_t dev)
return (ENXIO);
}
- device_set_desc_copy(dev, "Infineon ADM6996FC/M/MX MDIO switch driver");
+ device_set_desc(dev, "Infineon ADM6996FC/M/MX MDIO switch driver");
return (BUS_PROBE_DEFAULT);
}
@@ -173,28 +173,18 @@ adm6996fc_attach_phys(struct adm6996fc_softc *sc)
sc->ifpport[phy] = port;
sc->portphy[port] = phy;
sc->ifp[port] = if_alloc(IFT_ETHER);
- if (sc->ifp[port] == NULL) {
- device_printf(sc->sc_dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
- sc->ifp[port]->if_softc = sc;
- sc->ifp[port]->if_flags |= IFF_UP | IFF_BROADCAST |
- IFF_DRV_RUNNING | IFF_SIMPLEX;
+ if_setsoftc(sc->ifp[port], sc);
+ if_setflagbits(sc->ifp[port], IFF_UP | IFF_BROADCAST |
+ IFF_DRV_RUNNING | IFF_SIMPLEX, 0);
if_initname(sc->ifp[port], name, port);
sc->miibus[port] = malloc(sizeof(device_t), M_ADM6996FC,
M_WAITOK | M_ZERO);
- if (sc->miibus[port] == NULL) {
- err = ENOMEM;
- goto failed;
- }
err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port],
adm6996fc_ifmedia_upd, adm6996fc_ifmedia_sts, \
BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n",
device_get_nameunit(*sc->miibus[port]),
- sc->ifp[port]->if_xname);
+ if_name(sc->ifp[port]));
if (err != 0) {
device_printf(sc->sc_dev,
"attaching PHY %d failed\n",
@@ -261,12 +251,6 @@ adm6996fc_attach(device_t dev)
sc->portphy = malloc(sizeof(int) * sc->numports, M_ADM6996FC,
M_WAITOK | M_ZERO);
- if (sc->ifp == NULL || sc->ifname == NULL || sc->miibus == NULL ||
- sc->portphy == NULL) {
- err = ENOMEM;
- goto failed;
- }
-
/*
* Attach the PHYs and complete the bus enumeration.
*/
@@ -274,11 +258,9 @@ adm6996fc_attach(device_t dev)
if (err != 0)
goto failed;
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
- err = bus_generic_attach(dev);
- if (err != 0)
- goto failed;
+ bus_attach_children(dev);
callout_init(&sc->callout_tick, 0);
@@ -287,14 +269,10 @@ adm6996fc_attach(device_t dev)
return (0);
failed:
- if (sc->portphy != NULL)
- free(sc->portphy, M_ADM6996FC);
- if (sc->miibus != NULL)
- free(sc->miibus, M_ADM6996FC);
- if (sc->ifname != NULL)
- free(sc->ifname, M_ADM6996FC);
- if (sc->ifp != NULL)
- free(sc->ifp, M_ADM6996FC);
+ free(sc->portphy, M_ADM6996FC);
+ free(sc->miibus, M_ADM6996FC);
+ free(sc->ifname, M_ADM6996FC);
+ free(sc->ifp, M_ADM6996FC);
return (err);
}
@@ -303,18 +281,20 @@ static int
adm6996fc_detach(device_t dev)
{
struct adm6996fc_softc *sc;
- int i, port;
+ int error, i, port;
sc = device_get_softc(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
+
callout_drain(&sc->callout_tick);
for (i = 0; i < MII_NPHY; i++) {
if (((1 << i) & sc->phymask) == 0)
continue;
port = adm6996fc_portforphy(sc, i);
- if (sc->miibus[port] != NULL)
- device_delete_child(dev, (*sc->miibus[port]));
if (sc->ifp[port] != NULL)
if_free(sc->ifp[port]);
free(sc->ifname[port], M_ADM6996FC);
@@ -326,7 +306,6 @@ adm6996fc_detach(device_t dev)
free(sc->ifname, M_ADM6996FC);
free(sc->ifp, M_ADM6996FC);
- bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
diff --git a/sys/dev/etherswitch/ip17x/ip17x.c b/sys/dev/etherswitch/ip17x/ip17x.c
index 218c0c293662..42d3bf990c0e 100644
--- a/sys/dev/etherswitch/ip17x/ip17x.c
+++ b/sys/dev/etherswitch/ip17x/ip17x.c
@@ -83,8 +83,8 @@ static void ip17x_ifmedia_sts(if_t, struct ifmediareq *);
static void
ip17x_identify(driver_t *driver, device_t parent)
{
- if (device_find_child(parent, "ip17x", -1) == NULL)
- BUS_ADD_CHILD(parent, 0, "ip17x", -1);
+ if (device_find_child(parent, "ip17x", DEVICE_UNIT_ANY) == NULL)
+ BUS_ADD_CHILD(parent, 0, "ip17x", DEVICE_UNIT_ANY);
}
static int
@@ -152,7 +152,7 @@ ip17x_probe(device_t dev)
(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
"mii-poll", &sc->miipoll);
#endif
- device_set_desc_copy(dev, "IC+ IP17x switch driver");
+ device_set_desc(dev, "IC+ IP17x switch driver");
return (BUS_PROBE_DEFAULT);
}
@@ -172,12 +172,6 @@ ip17x_attach_phys(struct ip17x_softc *sc)
sc->phyport[phy] = port;
sc->portphy[port] = phy;
sc->ifp[port] = if_alloc(IFT_ETHER);
- if (sc->ifp[port] == NULL) {
- device_printf(sc->sc_dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
if_setsoftc(sc->ifp[port], sc);
if_setflags(sc->ifp[port], IFF_UP | IFF_BROADCAST |
IFF_DRV_RUNNING | IFF_SIMPLEX);
@@ -263,11 +257,9 @@ ip17x_attach(device_t dev)
*/
sc->hal.ip17x_set_vlan_mode(sc, ETHERSWITCH_VLAN_PORT);
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
- err = bus_generic_attach(dev);
- if (err != 0)
- return (err);
+ bus_attach_children(dev);
if (sc->miipoll) {
callout_init(&sc->callout_tick, 0);
@@ -282,7 +274,11 @@ static int
ip17x_detach(device_t dev)
{
struct ip17x_softc *sc;
- int i, port;
+ int error, i, port;
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
sc = device_get_softc(dev);
if (sc->miipoll)
@@ -292,8 +288,6 @@ ip17x_detach(device_t dev)
if (((1 << i) & sc->phymask) == 0)
continue;
port = sc->phyport[i];
- if (sc->miibus[port] != NULL)
- device_delete_child(dev, (*sc->miibus[port]));
if (sc->ifp[port] != NULL)
if_free(sc->ifp[port]);
free(sc->miibus[port], M_IP17X);
@@ -307,7 +301,6 @@ ip17x_detach(device_t dev)
/* Reset the switch. */
sc->hal.ip17x_reset(sc);
- bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
@@ -556,7 +549,7 @@ ip17x_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr)
static int
ip17x_readreg(device_t dev, int addr)
{
- struct ip17x_softc *sc;
+ struct ip17x_softc *sc __diagused;
sc = device_get_softc(dev);
IP17X_LOCK_ASSERT(sc, MA_OWNED);
@@ -568,7 +561,7 @@ ip17x_readreg(device_t dev, int addr)
static int
ip17x_writereg(device_t dev, int addr, int value)
{
- struct ip17x_softc *sc;
+ struct ip17x_softc *sc __diagused;
sc = device_get_softc(dev);
IP17X_LOCK_ASSERT(sc, MA_OWNED);
diff --git a/sys/dev/etherswitch/micrel/ksz8995ma.c b/sys/dev/etherswitch/micrel/ksz8995ma.c
index 43292d3b2007..cbffd5e39f49 100644
--- a/sys/dev/etherswitch/micrel/ksz8995ma.c
+++ b/sys/dev/etherswitch/micrel/ksz8995ma.c
@@ -197,7 +197,7 @@ ksz8995ma_probe(device_t dev)
return (ENXIO);
}
- device_set_desc_copy(dev, "Micrel KSZ8995MA SPI switch driver");
+ device_set_desc(dev, "Micrel KSZ8995MA SPI switch driver");
return (BUS_PROBE_DEFAULT);
}
@@ -219,28 +219,18 @@ ksz8995ma_attach_phys(struct ksz8995ma_softc *sc)
sc->ifpport[phy] = port;
sc->portphy[port] = phy;
sc->ifp[port] = if_alloc(IFT_ETHER);
- if (sc->ifp[port] == NULL) {
- device_printf(sc->sc_dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
- sc->ifp[port]->if_softc = sc;
- sc->ifp[port]->if_flags |= IFF_UP | IFF_BROADCAST |
- IFF_DRV_RUNNING | IFF_SIMPLEX;
+ if_setsoftc(sc->ifp[port], sc);
+ if_setflagbits(sc->ifp[port], IFF_UP | IFF_BROADCAST |
+ IFF_DRV_RUNNING | IFF_SIMPLEX, 0);
if_initname(sc->ifp[port], name, port);
sc->miibus[port] = malloc(sizeof(device_t), M_KSZ8995MA,
M_WAITOK | M_ZERO);
- if (sc->miibus[port] == NULL) {
- err = ENOMEM;
- goto failed;
- }
err = mii_attach(sc->sc_dev, sc->miibus[port], sc->ifp[port],
ksz8995ma_ifmedia_upd, ksz8995ma_ifmedia_sts, \
BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0);
DPRINTF(sc->sc_dev, "%s attached to pseudo interface %s\n",
device_get_nameunit(*sc->miibus[port]),
- sc->ifp[port]->if_xname);
+ if_name(sc->ifp[port]));
if (err != 0) {
device_printf(sc->sc_dev,
"attaching PHY %d failed\n",
@@ -311,12 +301,6 @@ ksz8995ma_attach(device_t dev)
sc->portphy = malloc(sizeof(int) * sc->numports, M_KSZ8995MA,
M_WAITOK | M_ZERO);
- if (sc->ifp == NULL || sc->ifname == NULL || sc->miibus == NULL ||
- sc->portphy == NULL) {
- err = ENOMEM;
- goto failed;
- }
-
/*
* Attach the PHYs and complete the bus enumeration.
*/
@@ -324,11 +308,9 @@ ksz8995ma_attach(device_t dev)
if (err != 0)
goto failed;
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
- err = bus_generic_attach(dev);
- if (err != 0)
- goto failed;
+ bus_attach_children(dev);
callout_init(&sc->callout_tick, 0);
@@ -345,14 +327,10 @@ ksz8995ma_attach(device_t dev)
return (0);
failed:
- if (sc->portphy != NULL)
- free(sc->portphy, M_KSZ8995MA);
- if (sc->miibus != NULL)
- free(sc->miibus, M_KSZ8995MA);
- if (sc->ifname != NULL)
- free(sc->ifname, M_KSZ8995MA);
- if (sc->ifp != NULL)
- free(sc->ifp, M_KSZ8995MA);
+ free(sc->portphy, M_KSZ8995MA);
+ free(sc->miibus, M_KSZ8995MA);
+ free(sc->ifname, M_KSZ8995MA);
+ free(sc->ifp, M_KSZ8995MA);
return (err);
}
@@ -361,7 +339,11 @@ static int
ksz8995ma_detach(device_t dev)
{
struct ksz8995ma_softc *sc;
- int i, port;
+ int error, i, port;
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
sc = device_get_softc(dev);
@@ -371,8 +353,6 @@ ksz8995ma_detach(device_t dev)
if (((1 << i) & sc->phymask) == 0)
continue;
port = ksz8995ma_portforphy(sc, i);
- if (sc->miibus[port] != NULL)
- device_delete_child(dev, (*sc->miibus[port]));
if (sc->ifp[port] != NULL)
if_free(sc->ifp[port]);
free(sc->ifname[port], M_KSZ8995MA);
@@ -384,7 +364,6 @@ ksz8995ma_detach(device_t dev)
free(sc->ifname, M_KSZ8995MA);
free(sc->ifp, M_KSZ8995MA);
- bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
diff --git a/sys/dev/etherswitch/miiproxy.c b/sys/dev/etherswitch/miiproxy.c
index 97d4b7f6eb67..79342a9e8e03 100644
--- a/sys/dev/etherswitch/miiproxy.c
+++ b/sys/dev/etherswitch/miiproxy.c
@@ -265,7 +265,8 @@ miiproxy_attach(device_t dev)
* The ethernet interface needs to call mii_attach_proxy() to pass
* the relevant parameters for rendezvous with the MDIO target.
*/
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static int
@@ -330,8 +331,8 @@ mdioproxy_rendezvous_callback(enum rendezvous_op op, struct rendezvous_entry *re
static void
mdioproxy_identify(driver_t *driver, device_t parent)
{
- if (device_find_child(parent, driver->name, -1) == NULL) {
- BUS_ADD_CHILD(parent, 0, driver->name, -1);
+ if (device_find_child(parent, driver->name, DEVICE_UNIT_ANY) == NULL) {
+ BUS_ADD_CHILD(parent, 0, driver->name, DEVICE_UNIT_ANY);
}
}
@@ -348,7 +349,8 @@ mdioproxy_attach(device_t dev)
{
rendezvous_register_target(dev, mdioproxy_rendezvous_callback);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static int
@@ -368,7 +370,6 @@ device_t
mii_attach_proxy(device_t dev)
{
struct miiproxy_softc *sc;
- int error;
const char *name;
device_t miiproxy;
@@ -380,12 +381,8 @@ mii_attach_proxy(device_t dev)
return (NULL);
}
- miiproxy = device_add_child(dev, miiproxy_driver.name, -1);
- error = bus_generic_attach(dev);
- if (error != 0) {
- device_printf(dev, "can't attach miiproxy\n");
- return (NULL);
- }
+ miiproxy = device_add_child(dev, miiproxy_driver.name, DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
sc = device_get_softc(miiproxy);
sc->parent = dev;
sc->proxy = miiproxy;
@@ -434,3 +431,4 @@ DRIVER_MODULE(mdioproxy, mdio, mdioproxy_driver, 0, 0);
DRIVER_MODULE(miibus, miiproxy, miibus_driver, 0, 0);
MODULE_VERSION(miiproxy, 1);
MODULE_DEPEND(miiproxy, miibus, 1, 1, 1);
+MODULE_DEPEND(miiproxy, mdio, 1, 1, 1);
diff --git a/sys/dev/etherswitch/mtkswitch/mtkswitch.c b/sys/dev/etherswitch/mtkswitch/mtkswitch.c
index 6e235097f189..ff7aee22398f 100644
--- a/sys/dev/etherswitch/mtkswitch/mtkswitch.c
+++ b/sys/dev/etherswitch/mtkswitch/mtkswitch.c
@@ -100,7 +100,7 @@ mtkswitch_probe(device_t dev)
bzero(sc, sizeof(*sc));
sc->sc_switchtype = switch_type;
- device_set_desc_copy(dev, "MTK Switch Driver");
+ device_set_desc(dev, "MTK Switch Driver");
return (0);
}
@@ -121,15 +121,9 @@ mtkswitch_attach_phys(struct mtkswitch_softc *sc)
continue;
}
sc->ifp[phy] = if_alloc(IFT_ETHER);
- if (sc->ifp[phy] == NULL) {
- device_printf(sc->sc_dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
- sc->ifp[phy]->if_softc = sc;
- sc->ifp[phy]->if_flags |= IFF_UP | IFF_BROADCAST |
- IFF_DRV_RUNNING | IFF_SIMPLEX;
+ if_setsoftc(sc->ifp[phy], sc);
+ if_setflagbits(sc->ifp[phy], IFF_UP | IFF_BROADCAST |
+ IFF_DRV_RUNNING | IFF_SIMPLEX, 0);
sc->ifname[phy] = malloc(strlen(name) + 1, M_DEVBUF, M_WAITOK);
bcopy(name, sc->ifname[phy], strlen(name) + 1);
if_initname(sc->ifp[phy], sc->ifname[phy],
@@ -144,7 +138,7 @@ mtkswitch_attach_phys(struct mtkswitch_softc *sc)
} else {
DPRINTF(sc->sc_dev, "%s attached to pseudo interface "
"%s\n", device_get_nameunit(sc->miibus[phy]),
- sc->ifp[phy]->if_xname);
+ if_name(sc->ifp[phy]));
}
}
return (err);
@@ -237,12 +231,9 @@ mtkswitch_attach(device_t dev)
if (err != 0)
return (err);
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
- err = bus_generic_attach(dev);
- DPRINTF(dev, "%s: bus_generic_attach: err=%d\n", __func__, err);
- if (err != 0)
- return (err);
+ bus_attach_children(dev);
callout_init_mtx(&sc->callout_tick, &sc->sc_mtx, 0);
@@ -257,19 +248,20 @@ static int
mtkswitch_detach(device_t dev)
{
struct mtkswitch_softc *sc = device_get_softc(dev);
- int phy;
+ int error, phy;
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
callout_drain(&sc->callout_tick);
for (phy = 0; phy < MTKSWITCH_MAX_PHYS; phy++) {
- if (sc->miibus[phy] != NULL)
- device_delete_child(dev, sc->miibus[phy]);
if (sc->ifp[phy] != NULL)
if_free(sc->ifp[phy]);
free(sc->ifname[phy], M_DEVBUF);
}
- bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
diff --git a/sys/dev/etherswitch/rtl8366/rtl8366rb.c b/sys/dev/etherswitch/rtl8366/rtl8366rb.c
index 8a74ae189284..9000061ae138 100644
--- a/sys/dev/etherswitch/rtl8366/rtl8366rb.c
+++ b/sys/dev/etherswitch/rtl8366/rtl8366rb.c
@@ -133,8 +133,8 @@ rtl8366rb_identify(driver_t *driver, device_t parent)
device_t child;
struct iicbus_ivar *devi;
- if (device_find_child(parent, "rtl8366rb", -1) == NULL) {
- child = BUS_ADD_CHILD(parent, 0, "rtl8366rb", -1);
+ if (device_find_child(parent, "rtl8366rb", DEVICE_UNIT_ANY) == NULL) {
+ child = BUS_ADD_CHILD(parent, 0, "rtl8366rb", DEVICE_UNIT_ANY);
devi = IICBUS_IVAR(child);
devi->addr = RTL8366_IIC_ADDR;
}
@@ -238,12 +238,6 @@ rtl8366rb_attach(device_t dev)
/* PHYs need an interface, so we generate a dummy one */
for (i = 0; i < sc->numphys; i++) {
sc->ifp[i] = if_alloc(IFT_ETHER);
- if (sc->ifp[i] == NULL) {
- device_printf(dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
if_setsoftc(sc->ifp[i], sc);
if_setflagbits(sc->ifp[i], IFF_UP | IFF_BROADCAST | IFF_DRV_RUNNING
| IFF_SIMPLEX, 0);
@@ -260,11 +254,9 @@ rtl8366rb_attach(device_t dev)
}
}
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
- err = bus_generic_attach(dev);
- if (err != 0)
- return (err);
+ bus_attach_children(dev);
callout_init_mtx(&sc->callout_tick, &sc->callout_mtx, 0);
rtl8366rb_tick(sc);
@@ -276,18 +268,19 @@ static int
rtl8366rb_detach(device_t dev)
{
struct rtl8366rb_softc *sc;
- int i;
+ int error, i;
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
sc = device_get_softc(dev);
for (i=0; i < sc->numphys; i++) {
- if (sc->miibus[i])
- device_delete_child(dev, sc->miibus[i]);
if (sc->ifp[i] != NULL)
if_free(sc->ifp[i]);
free(sc->ifname[i], M_DEVBUF);
}
- bus_generic_detach(dev);
callout_drain(&sc->callout_tick);
mtx_destroy(&sc->callout_mtx);
mtx_destroy(&sc->sc_mtx);
diff --git a/sys/dev/etherswitch/ukswitch/ukswitch.c b/sys/dev/etherswitch/ukswitch/ukswitch.c
index 6eff37bb118e..a2e30c3af8a1 100644
--- a/sys/dev/etherswitch/ukswitch/ukswitch.c
+++ b/sys/dev/etherswitch/ukswitch/ukswitch.c
@@ -106,7 +106,7 @@ ukswitch_probe(device_t dev)
sc = device_get_softc(dev);
bzero(sc, sizeof(*sc));
- device_set_desc_copy(dev, "Generic MDIO switch driver");
+ device_set_desc(dev, "Generic MDIO switch driver");
return (BUS_PROBE_DEFAULT);
}
@@ -124,12 +124,6 @@ ukswitch_attach_phys(struct ukswitch_softc *sc)
sc->ifpport[phy] = port;
sc->portphy[port] = phy;
sc->ifp[port] = if_alloc(IFT_ETHER);
- if (sc->ifp[port] == NULL) {
- device_printf(sc->sc_dev, "couldn't allocate ifnet structure\n");
- err = ENOMEM;
- break;
- }
-
if_setsoftc(sc->ifp[port], sc);
if_setflags(sc->ifp[port], IFF_UP | IFF_BROADCAST |
IFF_DRV_RUNNING | IFF_SIMPLEX);
@@ -215,11 +209,9 @@ ukswitch_attach(device_t dev)
if (err != 0)
return (err);
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
- err = bus_generic_attach(dev);
- if (err != 0)
- return (err);
+ bus_attach_children(dev);
callout_init(&sc->callout_tick, 0);
@@ -232,7 +224,11 @@ static int
ukswitch_detach(device_t dev)
{
struct ukswitch_softc *sc = device_get_softc(dev);
- int i, port;
+ int error, i, port;
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
callout_drain(&sc->callout_tick);
@@ -240,8 +236,6 @@ ukswitch_detach(device_t dev)
if (((1 << i) & sc->phymask) == 0)
continue;
port = ukswitch_portforphy(sc, i);
- if (sc->miibus[port] != NULL)
- device_delete_child(dev, (*sc->miibus[port]));
if (sc->ifp[port] != NULL)
if_free(sc->ifp[port]);
free(sc->ifname[port], M_UKSWITCH);
@@ -253,7 +247,6 @@ ukswitch_detach(device_t dev)
free(sc->ifname, M_UKSWITCH);
free(sc->ifp, M_UKSWITCH);
- bus_generic_detach(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
@@ -514,7 +507,7 @@ ukswitch_writephy(device_t dev, int phy, int reg, int data)
static int
ukswitch_readreg(device_t dev, int addr)
{
- struct ukswitch_softc *sc;
+ struct ukswitch_softc *sc __diagused;
sc = device_get_softc(dev);
UKSWITCH_LOCK_ASSERT(sc, MA_OWNED);
@@ -526,7 +519,7 @@ ukswitch_readreg(device_t dev, int addr)
static int
ukswitch_writereg(device_t dev, int addr, int value)
{
- struct ukswitch_softc *sc;
+ struct ukswitch_softc *sc __diagused;
sc = device_get_softc(dev);
UKSWITCH_LOCK_ASSERT(sc, MA_OWNED);
diff --git a/sys/dev/evdev/cdev.c b/sys/dev/evdev/cdev.c
index c9a8258a03a9..9fe1299a0937 100644
--- a/sys/dev/evdev/cdev.c
+++ b/sys/dev/evdev/cdev.c
@@ -91,7 +91,7 @@ static struct cdevsw evdev_cdevsw = {
.d_name = "evdev",
};
-static struct filterops evdev_cdev_filterops = {
+static const struct filterops evdev_cdev_filterops = {
.f_isfd = 1,
.f_attach = NULL,
.f_detach = evdev_kqdetach,
diff --git a/sys/dev/evdev/evdev.c b/sys/dev/evdev/evdev.c
index 87cdaeb91e49..e71f4f389d5c 100644
--- a/sys/dev/evdev/evdev.c
+++ b/sys/dev/evdev/evdev.c
@@ -82,7 +82,7 @@ SYSCTL_INT(_kern_evdev, OID_AUTO, rcpt_mask, CTLFLAG_RWTUN, &evdev_rcpt_mask, 0,
"Who is receiving events: bit0 - sysmouse, bit1 - kbdmux, "
"bit2 - mouse hardware, bit3 - keyboard hardware");
SYSCTL_INT(_kern_evdev, OID_AUTO, sysmouse_t_axis, CTLFLAG_RWTUN,
- &evdev_sysmouse_t_axis, 0, "Extract T-axis from 0-none, 1-ums, 2-psm");
+ &evdev_sysmouse_t_axis, 0, "Extract T-axis from 0-none, 1-ums, 2-psm, 3-wsp");
#endif
SYSCTL_NODE(_kern_evdev, OID_AUTO, input, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"Evdev input devices");
diff --git a/sys/dev/evdev/evdev.h b/sys/dev/evdev/evdev.h
index 2f00d49c485d..2ee374f184cc 100644
--- a/sys/dev/evdev/evdev.h
+++ b/sys/dev/evdev/evdev.h
@@ -62,12 +62,14 @@ extern int evdev_rcpt_mask;
* 0 - do not extract horizontal wheel movement (default).
* 1 - ums(4) horizontal wheel encoding. T-axis is mapped to buttons 6 and 7
* 2 - psm(4) wheels encoding: z = 1,-1 - vert. wheel, z = 2,-2 - horiz. wheel
+ * 3 - wsp(4) horizontal and vertical encoding. T-axis is mapped to button 5.
*/
enum
{
EVDEV_SYSMOUSE_T_AXIS_NONE = 0,
EVDEV_SYSMOUSE_T_AXIS_UMS = 1,
EVDEV_SYSMOUSE_T_AXIS_PSM = 2,
+ EVDEV_SYSMOUSE_T_AXIS_WSP = 3,
};
extern int evdev_sysmouse_t_axis;
diff --git a/sys/dev/evdev/uinput.c b/sys/dev/evdev/uinput.c
index 3bf0e91b7360..9ac9fee8a157 100644
--- a/sys/dev/evdev/uinput.c
+++ b/sys/dev/evdev/uinput.c
@@ -93,13 +93,13 @@ static struct cdevsw uinput_cdevsw = {
static struct cdev *uinput_cdev;
-static struct evdev_methods uinput_ev_methods = {
+static const struct evdev_methods uinput_ev_methods = {
.ev_open = NULL,
.ev_close = NULL,
.ev_event = uinput_ev_event,
};
-static struct filterops uinput_filterops = {
+static const struct filterops uinput_filterops = {
.f_isfd = 1,
.f_attach = NULL,
.f_detach = uinput_kqdetach,
diff --git a/sys/dev/exca/exca.c b/sys/dev/exca/exca.c
index 98e0ffdf9d18..44cb399cd49e 100644
--- a/sys/dev/exca/exca.c
+++ b/sys/dev/exca/exca.c
@@ -643,7 +643,7 @@ exca_init(struct exca_softc *sc, device_t dev,
sc->flags = 0;
sc->getb = exca_mem_getb;
sc->putb = exca_mem_putb;
- sc->pccarddev = device_add_child(dev, "pccard", -1);
+ sc->pccarddev = device_add_child(dev, "pccard", DEVICE_UNIT_ANY);
if (sc->pccarddev == NULL)
DEVPRINTF(brdev, "WARNING: cannot add pccard bus.\n");
else if (device_probe_and_attach(sc->pccarddev) != 0)
diff --git a/sys/dev/fdc/fdc.c b/sys/dev/fdc/fdc.c
index 8563b549bd07..58fcd1d5ea3d 100644
--- a/sys/dev/fdc/fdc.c
+++ b/sys/dev/fdc/fdc.c
@@ -70,6 +70,7 @@
#include <sys/priv.h>
#include <sys/proc.h>
#include <sys/rman.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@@ -77,7 +78,6 @@
#include <machine/bus.h>
#include <machine/clock.h>
-#include <machine/stdarg.h>
#include <isa/isavar.h>
#include <isa/isareg.h>
@@ -1840,7 +1840,7 @@ int
fdc_hints_probe(device_t dev)
{
const char *name, *dname;
- int i, error, dunit;
+ int i, dunit;
/*
* Probe and attach any children. We should probably detect
@@ -1853,8 +1853,7 @@ fdc_hints_probe(device_t dev)
fdc_add_child(dev, dname, dunit);
}
- if ((error = bus_generic_attach(dev)) != 0)
- return (error);
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/fdc/fdc_acpi.c b/sys/dev/fdc/fdc_acpi.c
index 7f991c8f0cb3..ab3160909c1d 100644
--- a/sys/dev/fdc/fdc_acpi.c
+++ b/sys/dev/fdc/fdc_acpi.c
@@ -178,7 +178,8 @@ fdc_acpi_probe_children(device_t bus, device_t dev, void *fde)
free(ctx, M_TEMP);
/* Attach any children found during the probe. */
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static ACPI_STATUS
diff --git a/sys/dev/fdt/fdt_common.c b/sys/dev/fdt/fdt_common.c
index 85d9061759c4..f43551c6310e 100644
--- a/sys/dev/fdt/fdt_common.c
+++ b/sys/dev/fdt/fdt_common.c
@@ -62,12 +62,6 @@
SYSCTL_NODE(_hw, OID_AUTO, fdt, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"Flattened Device Tree");
-vm_paddr_t fdt_immr_pa;
-vm_offset_t fdt_immr_va;
-vm_offset_t fdt_immr_size;
-
-struct fdt_ic_list fdt_ic_list_head = SLIST_HEAD_INITIALIZER(fdt_ic_list_head);
-
static int
fdt_get_range_by_busaddr(phandle_t node, u_long addr, u_long *base,
u_long *size)
@@ -200,38 +194,6 @@ fdt_get_range(phandle_t node, int range_id, u_long *base, u_long *size)
}
int
-fdt_immr_addr(vm_offset_t immr_va)
-{
- phandle_t node;
- u_long base, size;
- int r;
-
- /*
- * Try to access the SOC node directly i.e. through /aliases/.
- */
- if ((node = OF_finddevice("soc")) != -1)
- if (ofw_bus_node_is_compatible(node, "simple-bus"))
- goto moveon;
- /*
- * Find the node the long way.
- */
- if ((node = OF_finddevice("/")) == -1)
- return (ENXIO);
-
- if ((node = fdt_find_compatible(node, "simple-bus", 0)) == 0)
- return (ENXIO);
-
-moveon:
- if ((r = fdt_get_range(node, 0, &base, &size)) == 0) {
- fdt_immr_pa = base;
- fdt_immr_va = immr_va;
- fdt_immr_size = size;
- }
-
- return (r);
-}
-
-int
fdt_is_compatible_strict(phandle_t node, const char *compatible)
{
char compat[FDT_COMPAT_LEN];
@@ -303,13 +265,13 @@ fdt_parent_addr_cells(phandle_t node)
}
u_long
-fdt_data_get(void *data, int cells)
+fdt_data_get(const void *data, int cells)
{
if (cells == 1)
- return (fdt32_to_cpu(*((uint32_t *)data)));
+ return (fdt32_to_cpu(*((const uint32_t *)data)));
- return (fdt64_to_cpu(*((uint64_t *)data)));
+ return (fdt64_to_cpu(*((const uint64_t *)data)));
}
int
@@ -336,22 +298,22 @@ fdt_addrsize_cells(phandle_t node, int *addr_cells, int *size_cells)
}
int
-fdt_data_to_res(pcell_t *data, int addr_cells, int size_cells, u_long *start,
- u_long *count)
+fdt_data_to_res(const pcell_t *data, int addr_cells, int size_cells,
+ u_long *start, u_long *count)
{
/* Address portion. */
if (addr_cells > 2)
return (ERANGE);
- *start = fdt_data_get((void *)data, addr_cells);
+ *start = fdt_data_get((const void *)data, addr_cells);
data += addr_cells;
/* Size portion. */
if (size_cells > 2)
return (ERANGE);
- *count = fdt_data_get((void *)data, size_cells);
+ *count = fdt_data_get((const void *)data, size_cells);
return (0);
}
@@ -443,8 +405,9 @@ fdt_get_phyaddr(phandle_t node, device_t dev, int *phy_addr, void **phy_sc)
}
int
-fdt_get_reserved_regions(struct mem_region *mr, int *mrcnt)
+fdt_foreach_reserved_region(fdt_mem_region_cb cb, void *arg)
{
+ struct mem_region mr;
pcell_t reserve[FDT_REG_CELLS * FDT_MEM_REGIONS];
pcell_t *reservep;
phandle_t memory, root;
@@ -453,64 +416,56 @@ fdt_get_reserved_regions(struct mem_region *mr, int *mrcnt)
root = OF_finddevice("/");
memory = OF_finddevice("/memory");
- if (memory == -1) {
- rv = ENXIO;
- goto out;
- }
+ if (memory == -1)
+ return (ENXIO);
if ((rv = fdt_addrsize_cells(OF_parent(memory), &addr_cells,
&size_cells)) != 0)
- goto out;
+ return (rv);
- if (addr_cells > 2) {
- rv = ERANGE;
- goto out;
- }
+ if (addr_cells > 2)
+ return (ERANGE);
tuple_size = sizeof(pcell_t) * (addr_cells + size_cells);
res_len = OF_getproplen(root, "memreserve");
- if (res_len <= 0 || res_len > sizeof(reserve)) {
- rv = ERANGE;
- goto out;
- }
+ if (res_len <= 0 || res_len > sizeof(reserve))
+ return (ERANGE);
- if (OF_getprop(root, "memreserve", reserve, res_len) <= 0) {
- rv = ENXIO;
- goto out;
- }
+ if (OF_getprop(root, "memreserve", reserve, res_len) <= 0)
+ return (ENXIO);
tuples = res_len / tuple_size;
reservep = (pcell_t *)&reserve;
for (i = 0; i < tuples; i++) {
+ memset(&mr, 0, sizeof(mr));
rv = fdt_data_to_res(reservep, addr_cells, size_cells,
- (u_long *)&mr[i].mr_start, (u_long *)&mr[i].mr_size);
+ (u_long *)&mr.mr_start, (u_long *)&mr.mr_size);
if (rv != 0)
- goto out;
+ return (rv);
+
+ cb(&mr, arg);
reservep += addr_cells + size_cells;
}
- *mrcnt = i;
- rv = 0;
-out:
- return (rv);
+ return (0);
}
int
-fdt_get_reserved_mem(struct mem_region *reserved, int *mreserved)
+fdt_foreach_reserved_mem(fdt_mem_region_cb cb, void *arg)
{
+ struct mem_region mr;
pcell_t reg[FDT_REG_CELLS];
phandle_t child, root;
int addr_cells, size_cells;
- int i, rv;
+ int rv;
root = OF_finddevice("/reserved-memory");
- if (root == -1) {
+ if (root == -1)
return (ENXIO);
- }
if ((rv = fdt_addrsize_cells(root, &addr_cells, &size_cells)) != 0)
return (rv);
@@ -519,7 +474,6 @@ fdt_get_reserved_mem(struct mem_region *reserved, int *mreserved)
panic("Too many address and size cells %d %d", addr_cells,
size_cells);
- i = 0;
for (child = OF_child(root); child != 0; child = OF_peer(child)) {
if (!OF_hasprop(child, "no-map"))
continue;
@@ -529,80 +483,62 @@ fdt_get_reserved_mem(struct mem_region *reserved, int *mreserved)
/* XXX: Does a no-map of a dynamic range make sense? */
continue;
+ memset(&mr, 0, sizeof(mr));
fdt_data_to_res(reg, addr_cells, size_cells,
- (u_long *)&reserved[i].mr_start,
- (u_long *)&reserved[i].mr_size);
- i++;
- }
+ (u_long *)&mr.mr_start, (u_long *)&mr.mr_size);
- *mreserved = i;
+ cb(&mr, arg);
+ }
return (0);
}
int
-fdt_get_mem_regions(struct mem_region *mr, int *mrcnt, uint64_t *memsize)
+fdt_foreach_mem_region(fdt_mem_region_cb cb, void *arg)
{
+ struct mem_region mr;
pcell_t reg[FDT_REG_CELLS * FDT_MEM_REGIONS];
pcell_t *regp;
phandle_t memory;
- uint64_t memory_size;
int addr_cells, size_cells;
int i, reg_len, rv, tuple_size, tuples;
memory = OF_finddevice("/memory");
- if (memory == -1) {
- rv = ENXIO;
- goto out;
- }
+ if (memory == -1)
+ return (ENXIO);
if ((rv = fdt_addrsize_cells(OF_parent(memory), &addr_cells,
&size_cells)) != 0)
- goto out;
+ return (rv);
- if (addr_cells > 2) {
- rv = ERANGE;
- goto out;
- }
+ if (addr_cells > 2)
+ return (ERANGE);
tuple_size = sizeof(pcell_t) * (addr_cells + size_cells);
reg_len = OF_getproplen(memory, "reg");
- if (reg_len <= 0 || reg_len > sizeof(reg)) {
- rv = ERANGE;
- goto out;
- }
+ if (reg_len <= 0 || reg_len > sizeof(reg))
+ return (ERANGE);
- if (OF_getprop(memory, "reg", reg, reg_len) <= 0) {
- rv = ENXIO;
- goto out;
- }
+ if (OF_getprop(memory, "reg", reg, reg_len) <= 0)
+ return (ENXIO);
- memory_size = 0;
tuples = reg_len / tuple_size;
regp = (pcell_t *)&reg;
for (i = 0; i < tuples; i++) {
+ memset(&mr, 0, sizeof(mr));
rv = fdt_data_to_res(regp, addr_cells, size_cells,
- (u_long *)&mr[i].mr_start, (u_long *)&mr[i].mr_size);
+ (u_long *)&mr.mr_start, (u_long *)&mr.mr_size);
if (rv != 0)
- goto out;
+ return (rv);
- regp += addr_cells + size_cells;
- memory_size += mr[i].mr_size;
- }
+ cb(&mr, arg);
- if (memory_size == 0) {
- rv = ERANGE;
- goto out;
+ regp += addr_cells + size_cells;
}
- *mrcnt = i;
- if (memsize != NULL)
- *memsize = memory_size;
- rv = 0;
-out:
- return (rv);
+ return (0);
}
int
diff --git a/sys/dev/fdt/fdt_common.h b/sys/dev/fdt/fdt_common.h
index de0bee5bd003..f597233f9771 100644
--- a/sys/dev/fdt/fdt_common.h
+++ b/sys/dev/fdt/fdt_common.h
@@ -36,7 +36,7 @@
#include <contrib/libfdt/libfdt_env.h>
#include <dev/ofw/ofw_bus.h>
-#define FDT_MEM_REGIONS 16
+#define FDT_MEM_REGIONS 64
#define DI_MAX_INTR_NUM 32
@@ -59,31 +59,22 @@ struct fdt_fixup_entry {
extern struct fdt_fixup_entry fdt_fixup_table[];
#endif
-extern SLIST_HEAD(fdt_ic_list, fdt_ic) fdt_ic_list_head;
-struct fdt_ic {
- SLIST_ENTRY(fdt_ic) fdt_ics;
- ihandle_t iph;
- device_t dev;
-};
-
-extern vm_paddr_t fdt_immr_pa;
-extern vm_offset_t fdt_immr_va;
-extern vm_offset_t fdt_immr_size;
-
#if defined(FDT_DTB_STATIC)
extern u_char fdt_static_dtb;
#endif
SYSCTL_DECL(_hw_fdt);
+typedef void (*fdt_mem_region_cb)(const struct mem_region *, void *);
+
int fdt_addrsize_cells(phandle_t, int *, int *);
-u_long fdt_data_get(void *, int);
-int fdt_data_to_res(pcell_t *, int, int, u_long *, u_long *);
+u_long fdt_data_get(const void *, int);
+int fdt_data_to_res(const pcell_t *, int, int, u_long *, u_long *);
phandle_t fdt_find_compatible(phandle_t, const char *, int);
phandle_t fdt_depth_search_compatible(phandle_t, const char *, int);
-int fdt_get_mem_regions(struct mem_region *, int *, uint64_t *);
-int fdt_get_reserved_mem(struct mem_region *, int *);
-int fdt_get_reserved_regions(struct mem_region *, int *);
+int fdt_foreach_mem_region(fdt_mem_region_cb, void *);
+int fdt_foreach_reserved_mem(fdt_mem_region_cb, void *);
+int fdt_foreach_reserved_region(fdt_mem_region_cb, void *);
int fdt_get_phyaddr(phandle_t, device_t, int *, void **);
int fdt_get_range(phandle_t, int, u_long *, u_long *);
int fdt_immr_addr(vm_offset_t);
diff --git a/sys/dev/fdt/fdt_slicer.c b/sys/dev/fdt/fdt_slicer.c
index 92aa0af90542..50112db5cfae 100644
--- a/sys/dev/fdt/fdt_slicer.c
+++ b/sys/dev/fdt/fdt_slicer.c
@@ -45,7 +45,7 @@
static int fill_slices(device_t dev, const char *provider,
struct flash_slice *slices, int *slices_num);
-static void fdt_slicer_init(void);
+static void fdt_slicer_init(void *);
static int
fill_slices_from_node(phandle_t node, struct flash_slice *slices, int *count)
@@ -138,7 +138,7 @@ fill_slices(device_t dev, const char *provider __unused,
}
static void
-fdt_slicer_init(void)
+fdt_slicer_init(void *dummy __unused)
{
flash_register_slicer(fill_slices, FLASH_SLICES_TYPE_NAND, false);
@@ -147,7 +147,7 @@ fdt_slicer_init(void)
}
static void
-fdt_slicer_cleanup(void)
+fdt_slicer_cleanup(void *dummy __unused)
{
flash_register_slicer(NULL, FLASH_SLICES_TYPE_NAND, true);
@@ -156,12 +156,12 @@ fdt_slicer_cleanup(void)
}
/*
- * Must be initialized after GEOM classes (SI_SUB_DRIVERS/SI_ORDER_SECOND),
+ * Must be initialized after GEOM classes (SI_SUB_DRIVERS/SI_ORDER_THIRD),
* i. e. after g_init() is called, due to the use of the GEOM topology_lock
* in flash_register_slicer(). However, must be before SI_SUB_CONFIGURE.
*/
-SYSINIT(fdt_slicer, SI_SUB_DRIVERS, SI_ORDER_THIRD, fdt_slicer_init, NULL);
-SYSUNINIT(fdt_slicer, SI_SUB_DRIVERS, SI_ORDER_THIRD, fdt_slicer_cleanup, NULL);
+SYSINIT(fdt_slicer, SI_SUB_DRIVERS, SI_ORDER_FOURTH, fdt_slicer_init, NULL);
+SYSUNINIT(fdt_slicer, SI_SUB_DRIVERS, SI_ORDER_FOURTH, fdt_slicer_cleanup, NULL);
static int
mod_handler(module_t mod, int type, void *data)
@@ -178,6 +178,6 @@ static moduledata_t fdt_slicer_mod = {
"fdt_slicer", mod_handler, NULL
};
-DECLARE_MODULE(fdt_slicer, fdt_slicer_mod, SI_SUB_DRIVERS, SI_ORDER_THIRD);
-MODULE_DEPEND(fdt_slicer, g_flashmap, 0, 0, 0);
+DECLARE_MODULE(fdt_slicer, fdt_slicer_mod, SI_SUB_DRIVERS, SI_ORDER_FOURTH);
+MODULE_DEPEND(fdt_slicer, geom_flashmap, 0, 0, 0);
MODULE_VERSION(fdt_slicer, 1);
diff --git a/sys/dev/fdt/simple_mfd.c b/sys/dev/fdt/simple_mfd.c
index 5228c6998821..1c642c6ca99c 100644
--- a/sys/dev/fdt/simple_mfd.c
+++ b/sys/dev/fdt/simple_mfd.c
@@ -223,7 +223,8 @@ simple_mfd_attach(device_t dev)
return (ENXIO);
}
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static int
diff --git a/sys/dev/fdt/simplebus.c b/sys/dev/fdt/simplebus.c
index 37db238f2108..a301fb0f247c 100644
--- a/sys/dev/fdt/simplebus.c
+++ b/sys/dev/fdt/simplebus.c
@@ -182,19 +182,25 @@ simplebus_attach(device_t dev)
if (rv != 0)
return (rv);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
int
simplebus_detach(device_t dev)
{
struct simplebus_softc *sc;
+ int rv;
+
+ rv = bus_generic_detach(dev);
+ if (rv != 0)
+ return (rv);
sc = device_get_softc(dev);
if (sc->ranges != NULL)
free(sc->ranges, M_DEVBUF);
- return (bus_generic_detach(dev));
+ return (0);
}
void
@@ -455,9 +461,6 @@ simplebus_alloc_resource(device_t bus, device_t child, int type, int *rid,
count = rle->count;
}
- if (type == SYS_RES_IOPORT)
- type = SYS_RES_MEMORY;
-
if (type == SYS_RES_MEMORY) {
/* Remap through ranges property */
for (j = 0; j < sc->nranges; j++) {
diff --git a/sys/dev/ffec/if_ffec.c b/sys/dev/ffec/if_ffec.c
index 1a1993c22be7..17fab283fc81 100644
--- a/sys/dev/ffec/if_ffec.c
+++ b/sys/dev/ffec/if_ffec.c
@@ -121,7 +121,8 @@ static struct ofw_compat_data compat_data[] = {
{"fsl,imx53-fec", FECTYPE_IMX53},
{"fsl,imx6q-fec", FECTYPE_IMX6 | FECFLAG_RACC | FECFLAG_GBE },
{"fsl,imx6ul-fec", FECTYPE_IMX6 | FECFLAG_RACC },
- {"fsl,imx6sx-fec", FECTYPE_IMX6 | FECFLAG_RACC },
+ {"fsl,imx6sx-fec", FECTYPE_IMX6 | FECFLAG_RACC | FECFLAG_GBE |
+ FECFLAG_AVB },
{"fsl,imx7d-fec", FECTYPE_IMX6 | FECFLAG_RACC | FECFLAG_GBE |
FECFLAG_AVB },
{"fsl,mvf600-fec", FECTYPE_MVF | FECFLAG_RACC },
@@ -967,7 +968,7 @@ ffec_get_hwaddr(struct ffec_softc *sc, uint8_t *hwaddr)
if (bootverbose) {
device_printf(sc->dev,
- "MAC address %02x:%02x:%02x:%02x:%02x:%02x:\n",
+ "MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
hwaddr[0], hwaddr[1], hwaddr[2],
hwaddr[3], hwaddr[4], hwaddr[5]);
}
diff --git a/sys/dev/filemon/filemon_wrapper.c b/sys/dev/filemon/filemon_wrapper.c
index 3095d197aca0..dabb9eea192b 100644
--- a/sys/dev/filemon/filemon_wrapper.c
+++ b/sys/dev/filemon/filemon_wrapper.c
@@ -33,12 +33,11 @@
#include <sys/filedesc.h>
#include <sys/imgact.h>
#include <sys/priv.h>
+#include <sys/stdarg.h>
#include <sys/sx.h>
#include <sys/sysent.h>
#include <sys/vnode.h>
-#include <machine/stdarg.h>
-
static void filemon_output_event(struct filemon *filemon, const char *fmt, ...)
__printflike(2, 3);
diff --git a/sys/dev/firewire/firewire.c b/sys/dev/firewire/firewire.c
index 4bb683cccfb7..a4316a636720 100644
--- a/sys/dev/firewire/firewire.c
+++ b/sys/dev/firewire/firewire.c
@@ -314,7 +314,7 @@ fw_asystart(struct fw_xfer *xfer)
static void
firewire_identify(driver_t *driver, device_t parent)
{
- BUS_ADD_CHILD(parent, 0, "firewire", -1);
+ BUS_ADD_CHILD(parent, 0, "firewire", DEVICE_UNIT_ANY);
}
static int
@@ -452,10 +452,10 @@ firewire_attach(device_t dev)
0, 0, "fw%d_probe", unit);
/* Locate our children */
- bus_generic_probe(dev);
+ bus_identify_children(dev);
/* launch attachement of the added children */
- bus_generic_attach(dev);
+ bus_attach_children(dev);
/* bus_reset */
FW_GLOCK(fc);
diff --git a/sys/dev/firewire/firewirereg.h b/sys/dev/firewire/firewirereg.h
index 4d2d282dd22d..d17f7a15785a 100644
--- a/sys/dev/firewire/firewirereg.h
+++ b/sys/dev/firewire/firewirereg.h
@@ -293,7 +293,7 @@ extern int firewire_debug;
extern devclass_t firewire_devclass;
extern int firewire_phydma_enable;
-#define FWPRI ((PZERO + 8) | PCATCH)
+#define FWPRI (PWAIT | PCATCH)
#define CALLOUT_INIT(x) callout_init(x, 1 /* mpsafe */)
diff --git a/sys/dev/firewire/fwohci_pci.c b/sys/dev/firewire/fwohci_pci.c
index f16ce1e26565..a6f9f50701f0 100644
--- a/sys/dev/firewire/fwohci_pci.c
+++ b/sys/dev/firewire/fwohci_pci.c
@@ -314,8 +314,8 @@ fwohci_pci_attach(device_t self)
}
/* probe and attach a child device(firewire) */
- bus_generic_probe(self);
- bus_generic_attach(self);
+ bus_identify_children(self);
+ bus_attach_children(self);
return 0;
}
@@ -333,11 +333,6 @@ fwohci_pci_detach(device_t self)
bus_generic_detach(self);
- if (sc->fc.bdev) {
- device_delete_child(self, sc->fc.bdev);
- sc->fc.bdev = NULL;
- }
-
/* disable interrupts that might have been switched on */
if (sc->bst && sc->bsh)
bus_space_write_4(sc->bst, sc->bsh,
diff --git a/sys/dev/firewire/if_fwe.c b/sys/dev/firewire/if_fwe.c
index 60a9806656e8..44d3425799e9 100644
--- a/sys/dev/firewire/if_fwe.c
+++ b/sys/dev/firewire/if_fwe.c
@@ -178,10 +178,6 @@ fwe_attach(device_t dev)
/* fill the rest and attach interface */
ifp = fwe->eth_softc.ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- return (ENOSPC);
- }
if_setsoftc(ifp, &fwe->eth_softc);
if_initname(ifp, device_get_name(dev), unit);
@@ -201,7 +197,7 @@ fwe_attach(device_t dev)
if_setcapenablebit(ifp, IFCAP_VLAN_MTU, 0);
FWEDEBUG(ifp, "interface created\n");
- return 0;
+ return (0);
}
static void
diff --git a/sys/dev/firewire/if_fwip.c b/sys/dev/firewire/if_fwip.c
index b698db6c9620..41143e2e59d4 100644
--- a/sys/dev/firewire/if_fwip.c
+++ b/sys/dev/firewire/if_fwip.c
@@ -153,8 +153,6 @@ fwip_attach(device_t dev)
fwip = ((struct fwip_softc *)device_get_softc(dev));
unit = device_get_unit(dev);
ifp = fwip->fw_softc.fwip_ifp = if_alloc(IFT_IEEE1394);
- if (ifp == NULL)
- return (ENOSPC);
mtx_init(&fwip->mtx, "fwip", NULL, MTX_DEF);
/* XXX */
@@ -199,7 +197,7 @@ fwip_attach(device_t dev)
splx(s);
FWIPDEBUG(ifp, "interface created\n");
- return 0;
+ return (0);
}
static void
@@ -306,13 +304,9 @@ fwip_init(void *arg)
xferq->psize = MCLBYTES;
xferq->queued = 0;
xferq->buf = NULL;
- xferq->bulkxfer = (struct fw_bulkxfer *) malloc(
+ xferq->bulkxfer = malloc(
sizeof(struct fw_bulkxfer) * xferq->bnchunk,
M_FWIP, M_WAITOK);
- if (xferq->bulkxfer == NULL) {
- printf("if_fwip: malloc failed\n");
- return;
- }
STAILQ_INIT(&xferq->stvalid);
STAILQ_INIT(&xferq->stfree);
STAILQ_INIT(&xferq->stdma);
diff --git a/sys/dev/firewire/sbp.c b/sys/dev/firewire/sbp.c
index 5f4ebb747057..be1e60e45e75 100644
--- a/sys/dev/firewire/sbp.c
+++ b/sys/dev/firewire/sbp.c
@@ -303,8 +303,8 @@ SBP_DEBUG(0)
printf("sbp_identify\n");
END_DEBUG
- if (device_find_child(parent, "sbp", -1) == NULL)
- BUS_ADD_CHILD(parent, 0, "sbp", -1);
+ if (device_find_child(parent, "sbp", DEVICE_UNIT_ANY) == NULL)
+ BUS_ADD_CHILD(parent, 0, "sbp", DEVICE_UNIT_ANY);
}
/*
diff --git a/sys/dev/firmware/arm/scmi.c b/sys/dev/firmware/arm/scmi.c
index ef4bcbf13996..6f16b58f49bf 100644
--- a/sys/dev/firmware/arm/scmi.c
+++ b/sys/dev/firmware/arm/scmi.c
@@ -43,6 +43,9 @@
#include <sys/mutex.h>
#include <sys/queue.h>
#include <sys/refcount.h>
+#include <sys/sdt.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
#include <dev/clk/clk.h>
#include <dev/fdt/simplebus.h>
@@ -52,10 +55,30 @@
#include "scmi.h"
#include "scmi_protocols.h"
+SDT_PROVIDER_DEFINE(scmi);
+SDT_PROBE_DEFINE3(scmi, func, scmi_req_alloc, req_alloc,
+ "int", "int", "int");
+SDT_PROBE_DEFINE3(scmi, func, scmi_req_free_unlocked, req_alloc,
+ "int", "int", "int");
+SDT_PROBE_DEFINE3(scmi, func, scmi_req_get, req_alloc,
+ "int", "int", "int");
+SDT_PROBE_DEFINE3(scmi, func, scmi_req_put, req_alloc,
+ "int", "int", "int");
+SDT_PROBE_DEFINE5(scmi, func, scmi_request_tx, xfer_track,
+ "int", "int", "int", "int", "int");
+SDT_PROBE_DEFINE5(scmi, entry, scmi_wait_for_response, xfer_track,
+ "int", "int", "int", "int", "int");
+SDT_PROBE_DEFINE5(scmi, exit, scmi_wait_for_response, xfer_track,
+ "int", "int", "int", "int", "int");
+SDT_PROBE_DEFINE2(scmi, func, scmi_rx_irq_callback, hdr_dump,
+ "int", "int");
+SDT_PROBE_DEFINE5(scmi, func, scmi_process_response, xfer_track,
+ "int", "int", "int", "int", "int");
+
#define SCMI_MAX_TOKEN 1024
#define SCMI_HDR_TOKEN_S 18
-#define SCMI_HDR_TOKEN_BF (0x3fff)
+#define SCMI_HDR_TOKEN_BF (0x3ff)
#define SCMI_HDR_TOKEN_M (SCMI_HDR_TOKEN_BF << SCMI_HDR_TOKEN_S)
#define SCMI_HDR_PROTOCOL_ID_S 10
@@ -87,12 +110,21 @@
#define SCMI_MSG_TOKEN(_hdr) \
(((_hdr) & SCMI_HDR_TOKEN_M) >> SCMI_HDR_TOKEN_S)
+#define SCMI_MSG_PROTOCOL_ID(_hdr) \
+ (((_hdr) & SCMI_HDR_PROTOCOL_ID_M) >> SCMI_HDR_PROTOCOL_ID_S)
+#define SCMI_MSG_MESSAGE_ID(_hdr) \
+ (((_hdr) & SCMI_HDR_MESSAGE_ID_M) >> SCMI_HDR_MESSAGE_ID_S)
+#define SCMI_MSG_TYPE(_hdr) \
+ (((_hdr) & SCMI_HDR_TYPE_ID_M) >> SCMI_HDR_TYPE_ID_S)
struct scmi_req {
int cnt;
bool timed_out;
bool use_polling;
bool done;
+ bool is_raw;
+ device_t dev;
+ struct task tsk;
struct mtx mtx;
LIST_ENTRY(scmi_req) next;
int protocol_id;
@@ -102,6 +134,7 @@ struct scmi_req {
struct scmi_msg msg;
};
+#define tsk_to_req(t) __containerof((t), struct scmi_req, tsk)
#define buf_to_msg(b) __containerof((b), struct scmi_msg, payld)
#define msg_to_req(m) __containerof((m), struct scmi_req, msg)
#define buf_to_req(b) msg_to_req(buf_to_msg(b))
@@ -127,16 +160,21 @@ struct scmi_transport {
struct mtx mtx;
};
-static int scmi_transport_init(struct scmi_softc *);
+static void scmi_transport_configure(struct scmi_transport_desc *, phandle_t);
+static int scmi_transport_init(struct scmi_softc *, phandle_t);
static void scmi_transport_cleanup(struct scmi_softc *);
-static struct scmi_reqs_pool *scmi_reqs_pool_allocate(const int, const int);
+static void scmi_req_async_waiter(void *, int);
+static struct scmi_reqs_pool *scmi_reqs_pool_allocate(device_t, const int,
+ const int);
static void scmi_reqs_pool_free(struct scmi_reqs_pool *);
-static struct scmi_req *scmi_req_alloc(struct scmi_softc *, enum scmi_chan);
+static struct scmi_req *scmi_req_alloc(struct scmi_softc *, enum scmi_chan);
+static struct scmi_req *scmi_req_initialized_alloc(device_t, int, int);
static void scmi_req_free_unlocked(struct scmi_softc *,
- enum scmi_chan, struct scmi_req *);
+ enum scmi_chan, struct scmi_req *);
static void scmi_req_get(struct scmi_softc *, struct scmi_req *);
static void scmi_req_put(struct scmi_softc *, struct scmi_req *);
static int scmi_token_pick(struct scmi_softc *);
+static int scmi_token_reserve(struct scmi_softc *, uint16_t);
static void scmi_token_release_unlocked(struct scmi_softc *, int);
static int scmi_req_track_inflight(struct scmi_softc *,
struct scmi_req *);
@@ -146,11 +184,13 @@ static struct scmi_req *scmi_req_lookup_inflight(struct scmi_softc *, uint32_t);
static int scmi_wait_for_response(struct scmi_softc *,
struct scmi_req *, void **);
-static void scmi_process_response(struct scmi_softc *, uint32_t);
+static void scmi_process_response(struct scmi_softc *, uint32_t,
+ unsigned int);
int
scmi_attach(device_t dev)
{
+ struct sysctl_oid *sysctl_trans;
struct scmi_softc *sc;
phandle_t node;
int error;
@@ -164,17 +204,28 @@ scmi_attach(device_t dev)
simplebus_init(dev, node);
- error = scmi_transport_init(sc);
+ error = scmi_transport_init(sc, node);
if (error != 0)
return (error);
- device_printf(dev, "Transport reply timeout initialized to %dms\n",
- sc->trs_desc.reply_timo_ms);
+ device_printf(dev, "Transport - max_msg:%d max_payld_sz:%lu reply_timo_ms:%d\n",
+ SCMI_MAX_MSG(sc), SCMI_MAX_MSG_PAYLD_SIZE(sc), SCMI_MAX_MSG_TIMEOUT_MS(sc));
+
+ sc->sysctl_root = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(_hw),
+ OID_AUTO, "scmi", CTLFLAG_RD, 0, "SCMI root");
+ sysctl_trans = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(sc->sysctl_root),
+ OID_AUTO, "transport", CTLFLAG_RD, 0, "SCMI Transport properties");
+ SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(sysctl_trans), OID_AUTO, "max_msg",
+ CTLFLAG_RD, &sc->trs_desc.max_msg, 0, "SCMI Max number of inflight messages");
+ SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(sysctl_trans), OID_AUTO, "max_msg_size",
+ CTLFLAG_RD, &sc->trs_desc.max_payld_sz, 0, "SCMI Max message payload size");
+ SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(sysctl_trans), OID_AUTO, "max_rx_timeout_ms",
+ CTLFLAG_RD, &sc->trs_desc.reply_timo_ms, 0, "SCMI Max message RX timeout ms");
/*
* Allow devices to identify.
*/
- bus_generic_probe(dev);
+ bus_identify_children(dev);
/*
* Now walk the OFW tree and attach top-level devices.
@@ -182,9 +233,9 @@ scmi_attach(device_t dev)
for (node = OF_child(node); node > 0; node = OF_peer(node))
simplebus_add_device(dev, node, 0, NULL, -1, NULL);
- error = bus_generic_attach(dev);
+ bus_attach_children(dev);
- return (error);
+ return (0);
}
static int
@@ -212,7 +263,7 @@ DRIVER_MODULE(scmi, simplebus, scmi_driver, 0, 0);
MODULE_VERSION(scmi, 1);
static struct scmi_reqs_pool *
-scmi_reqs_pool_allocate(const int max_msg, const int max_payld_sz)
+scmi_reqs_pool_allocate(device_t dev, const int max_msg, const int max_payld_sz)
{
struct scmi_reqs_pool *rp;
struct scmi_req *req;
@@ -224,6 +275,10 @@ scmi_reqs_pool_allocate(const int max_msg, const int max_payld_sz)
req = malloc(sizeof(*req) + max_payld_sz,
M_DEVBUF, M_ZERO | M_WAITOK);
+ req->dev = dev;
+ req->tsk.ta_context = &req->tsk;
+ req->tsk.ta_func = scmi_req_async_waiter;
+
mtx_init(&req->mtx, "req", "SCMI", MTX_SPIN);
LIST_INSERT_HEAD(&rp->head, req, next);
}
@@ -236,9 +291,9 @@ scmi_reqs_pool_allocate(const int max_msg, const int max_payld_sz)
static void
scmi_reqs_pool_free(struct scmi_reqs_pool *rp)
{
- struct scmi_req *req;
+ struct scmi_req *req, *tmp;
- LIST_FOREACH(req, &rp->head, next) {
+ LIST_FOREACH_SAFE(req, &rp->head, next, tmp) {
mtx_destroy(&req->mtx);
free(req, M_DEVBUF);
}
@@ -247,29 +302,42 @@ scmi_reqs_pool_free(struct scmi_reqs_pool *rp)
free(rp, M_DEVBUF);
}
+static void
+scmi_transport_configure(struct scmi_transport_desc *td, phandle_t node)
+{
+ if (OF_getencprop(node, "arm,max-msg", &td->max_msg, sizeof(td->max_msg)) == -1)
+ td->max_msg = SCMI_DEF_MAX_MSG;
+
+ if (OF_getencprop(node, "arm,max-msg-size", &td->max_payld_sz,
+ sizeof(td->max_payld_sz)) == -1)
+ td->max_payld_sz = SCMI_DEF_MAX_MSG_PAYLD_SIZE;
+}
+
static int
-scmi_transport_init(struct scmi_softc *sc)
+scmi_transport_init(struct scmi_softc *sc, phandle_t node)
{
+ struct scmi_transport_desc *td = &sc->trs_desc;
struct scmi_transport *trs;
int ret;
trs = malloc(sizeof(*trs), M_DEVBUF, M_ZERO | M_WAITOK);
+ scmi_transport_configure(td, node);
+
BIT_FILL(SCMI_MAX_TOKEN, &trs->avail_tokens);
mtx_init(&trs->mtx, "tokens", "SCMI", MTX_SPIN);
- trs->inflight_ht = hashinit(SCMI_MAX_MSG, M_DEVBUF,
- &trs->inflight_mask);
+ trs->inflight_ht = hashinit(td->max_msg, M_DEVBUF, &trs->inflight_mask);
trs->chans[SCMI_CHAN_A2P] =
- scmi_reqs_pool_allocate(SCMI_MAX_MSG, SCMI_MAX_MSG_PAYLD_SIZE);
+ scmi_reqs_pool_allocate(sc->dev, td->max_msg, td->max_payld_sz);
if (trs->chans[SCMI_CHAN_A2P] == NULL) {
free(trs, M_DEVBUF);
return (ENOMEM);
}
trs->chans[SCMI_CHAN_P2A] =
- scmi_reqs_pool_allocate(SCMI_MAX_MSG, SCMI_MAX_MSG_PAYLD_SIZE);
+ scmi_reqs_pool_allocate(sc->dev, td->max_msg, td->max_payld_sz);
if (trs->chans[SCMI_CHAN_P2A] == NULL) {
scmi_reqs_pool_free(trs->chans[SCMI_CHAN_A2P]);
free(trs, M_DEVBUF);
@@ -285,8 +353,13 @@ scmi_transport_init(struct scmi_softc *sc)
return (ret);
}
+ /* Use default transport timeout if not overridden by OF */
+ OF_getencprop(node, "arm,max-rx-timeout-ms", &td->reply_timo_ms,
+ sizeof(td->reply_timo_ms));
+
return (0);
}
+
static void
scmi_transport_cleanup(struct scmi_softc *sc)
{
@@ -300,6 +373,32 @@ scmi_transport_cleanup(struct scmi_softc *sc)
}
static struct scmi_req *
+scmi_req_initialized_alloc(device_t dev, int tx_payld_sz, int rx_payld_sz)
+{
+ struct scmi_softc *sc;
+ struct scmi_req *req;
+
+ sc = device_get_softc(dev);
+
+ if (tx_payld_sz > SCMI_MAX_MSG_PAYLD_SIZE(sc) ||
+ rx_payld_sz > SCMI_MAX_MSG_REPLY_SIZE(sc)) {
+ device_printf(dev, "Unsupported payload size. Drop.\n");
+ return (NULL);
+ }
+
+ /* Pick one from free list */
+ req = scmi_req_alloc(sc, SCMI_CHAN_A2P);
+ if (req == NULL)
+ return (NULL);
+
+ req->msg.tx_len = sizeof(req->msg.hdr) + tx_payld_sz;
+ req->msg.rx_len = rx_payld_sz ?
+ rx_payld_sz + 2 * sizeof(uint32_t) : SCMI_MAX_MSG_SIZE(sc);
+
+ return (req);
+}
+
+static struct scmi_req *
scmi_req_alloc(struct scmi_softc *sc, enum scmi_chan ch_idx)
{
struct scmi_reqs_pool *rp;
@@ -313,8 +412,11 @@ scmi_req_alloc(struct scmi_softc *sc, enum scmi_chan ch_idx)
}
mtx_unlock_spin(&rp->mtx);
- if (req != NULL)
+ if (req != NULL) {
refcount_init(&req->cnt, 1);
+ SDT_PROBE3(scmi, func, scmi_req_alloc, req_alloc,
+ req, refcount_load(&req->cnt), -1);
+ }
return (req);
}
@@ -329,9 +431,13 @@ scmi_req_free_unlocked(struct scmi_softc *sc, enum scmi_chan ch_idx,
mtx_lock_spin(&rp->mtx);
req->timed_out = false;
req->done = false;
+ req->is_raw = false;
refcount_init(&req->cnt, 0);
LIST_INSERT_HEAD(&rp->head, req, next);
mtx_unlock_spin(&rp->mtx);
+
+ SDT_PROBE3(scmi, func, scmi_req_free_unlocked, req_alloc,
+ req, refcount_load(&req->cnt), -1);
}
static void
@@ -346,6 +452,9 @@ scmi_req_get(struct scmi_softc *sc, struct scmi_req *req)
if (!ok)
device_printf(sc->dev, "%s() -- BAD REFCOUNT\n", __func__);
+ SDT_PROBE3(scmi, func, scmi_req_get, req_alloc,
+ req, refcount_load(&req->cnt), SCMI_MSG_TOKEN(req->msg.hdr));
+
return;
}
@@ -354,8 +463,15 @@ scmi_req_put(struct scmi_softc *sc, struct scmi_req *req)
{
mtx_lock_spin(&req->mtx);
if (!refcount_release_if_not_last(&req->cnt)) {
- bzero(&req->msg, sizeof(req->msg) + SCMI_MAX_MSG_PAYLD_SIZE);
+ req->protocol_id = 0;
+ req->message_id = 0;
+ req->token = 0;
+ req->header = 0;
+ bzero(&req->msg, sizeof(req->msg) + SCMI_MAX_MSG_PAYLD_SIZE(sc));
scmi_req_free_unlocked(sc, SCMI_CHAN_A2P, req);
+ } else {
+ SDT_PROBE3(scmi, func, scmi_req_put, req_alloc,
+ req, refcount_load(&req->cnt), SCMI_MSG_TOKEN(req->msg.hdr));
}
mtx_unlock_spin(&req->mtx);
}
@@ -373,7 +489,6 @@ scmi_token_pick(struct scmi_softc *sc)
*/
next_msg_id = sc->trs->next_id++ & SCMI_HDR_TOKEN_BF;
token = BIT_FFS_AT(SCMI_MAX_TOKEN, &sc->trs->avail_tokens, next_msg_id);
- /* TODO Account for wrap-arounds and holes */
if (token != 0)
BIT_CLR(SCMI_MAX_TOKEN, token - 1, &sc->trs->avail_tokens);
mtx_unlock_spin(&sc->trs->mtx);
@@ -389,6 +504,28 @@ scmi_token_pick(struct scmi_softc *sc)
return ((int)(token - 1));
}
+static int
+scmi_token_reserve(struct scmi_softc *sc, uint16_t candidate)
+{
+ int token = -EBUSY, retries = 3;
+
+ do {
+ mtx_lock_spin(&sc->trs->mtx);
+ if (BIT_ISSET(SCMI_MAX_TOKEN, candidate, &sc->trs->avail_tokens)) {
+ BIT_CLR(SCMI_MAX_TOKEN, candidate, &sc->trs->avail_tokens);
+ token = candidate;
+ sc->trs->next_id++;
+ }
+ mtx_unlock_spin(&sc->trs->mtx);
+ if (token == candidate || retries-- == 0)
+ break;
+
+ pause("scmi_tk_reserve", hz);
+ } while (1);
+
+ return (token);
+}
+
static void
scmi_token_release_unlocked(struct scmi_softc *sc, int token)
{
@@ -399,19 +536,23 @@ scmi_token_release_unlocked(struct scmi_softc *sc, int token)
static int
scmi_finalize_req(struct scmi_softc *sc, struct scmi_req *req)
{
- uint32_t header = 0;
+ if (!req->is_raw)
+ req->token = scmi_token_pick(sc);
+ else
+ req->token = scmi_token_reserve(sc, SCMI_MSG_TOKEN(req->msg.hdr));
- req->token = scmi_token_pick(sc);
if (req->token < 0)
return (EBUSY);
- header = req->message_id;
- header |= SCMI_MSG_TYPE_CMD << SCMI_HDR_MESSAGE_TYPE_S;
- header |= req->protocol_id << SCMI_HDR_PROTOCOL_ID_S;
- header |= req->token << SCMI_HDR_TOKEN_S;
+ if (!req->is_raw) {
+ req->msg.hdr = req->message_id;
+ req->msg.hdr |= SCMI_MSG_TYPE_CMD << SCMI_HDR_MESSAGE_TYPE_S;
+ req->msg.hdr |= req->protocol_id << SCMI_HDR_PROTOCOL_ID_S;
+ req->msg.hdr |= req->token << SCMI_HDR_TOKEN_S;
+ }
- req->header = htole32(header);
- req->msg.hdr = htole32(header);
+ /* Save requested header */
+ req->header = req->msg.hdr;
return (0);
}
@@ -469,7 +610,7 @@ scmi_req_lookup_inflight(struct scmi_softc *sc, uint32_t hdr)
}
static void
-scmi_process_response(struct scmi_softc *sc, uint32_t hdr)
+scmi_process_response(struct scmi_softc *sc, uint32_t hdr, uint32_t rx_len)
{
bool timed_out = false;
struct scmi_req *req;
@@ -482,8 +623,13 @@ scmi_process_response(struct scmi_softc *sc, uint32_t hdr)
return;
}
+ SDT_PROBE5(scmi, func, scmi_process_response, xfer_track, req,
+ SCMI_MSG_PROTOCOL_ID(req->msg.hdr), SCMI_MSG_MESSAGE_ID(req->msg.hdr),
+ SCMI_MSG_TOKEN(req->msg.hdr), req->timed_out);
+
mtx_lock_spin(&req->mtx);
req->done = true;
+ req->msg.rx_len = rx_len;
if (!req->timed_out) {
/*
* Consider the case in which a polled message is picked
@@ -512,31 +658,37 @@ scmi_process_response(struct scmi_softc *sc, uint32_t hdr)
}
void
-scmi_rx_irq_callback(device_t dev, void *chan, uint32_t hdr)
+scmi_rx_irq_callback(device_t dev, void *chan, uint32_t hdr, uint32_t rx_len)
{
struct scmi_softc *sc;
sc = device_get_softc(dev);
+ SDT_PROBE2(scmi, func, scmi_rx_irq_callback, hdr_dump, hdr, rx_len);
+
if (SCMI_IS_MSG_TYPE_NOTIF(hdr) || SCMI_IS_MSG_TYPE_DRESP(hdr)) {
device_printf(dev, "DRESP/NOTIF unsupported. Drop.\n");
SCMI_CLEAR_CHANNEL(dev, chan);
return;
}
- scmi_process_response(sc, hdr);
+ scmi_process_response(sc, hdr, rx_len);
}
static int
scmi_wait_for_response(struct scmi_softc *sc, struct scmi_req *req, void **out)
{
+ unsigned int reply_timo_ms = SCMI_MAX_MSG_TIMEOUT_MS(sc);
int ret;
+ SDT_PROBE5(scmi, entry, scmi_wait_for_response, xfer_track, req,
+ SCMI_MSG_PROTOCOL_ID(req->msg.hdr), SCMI_MSG_MESSAGE_ID(req->msg.hdr),
+ SCMI_MSG_TOKEN(req->msg.hdr), reply_timo_ms);
+
if (req->msg.polling) {
bool needs_drop;
- ret = SCMI_POLL_MSG(sc->dev, &req->msg,
- sc->trs_desc.reply_timo_ms);
+ ret = SCMI_POLL_MSG(sc->dev, &req->msg, reply_timo_ms);
/*
* Drop reference to successfully polled req unless it had
* already also been processed on the IRQ path.
@@ -545,6 +697,7 @@ scmi_wait_for_response(struct scmi_softc *sc, struct scmi_req *req, void **out)
*/
mtx_lock_spin(&req->mtx);
needs_drop = (ret == 0) && !req->done;
+ req->timed_out = ret != 0;
mtx_unlock_spin(&req->mtx);
if (needs_drop)
scmi_req_drop_inflight(sc, req);
@@ -554,12 +707,12 @@ scmi_wait_for_response(struct scmi_softc *sc, struct scmi_req *req, void **out)
le32toh(req->msg.hdr), le32toh(req->header));
}
} else {
- ret = tsleep(req, 0, "scmi_wait4",
- (sc->trs_desc.reply_timo_ms * hz) / 1000);
+ ret = tsleep(req, 0, "scmi_wait4", (reply_timo_ms * hz) / 1000);
/* Check for lost wakeups since there is no associated lock */
mtx_lock_spin(&req->mtx);
if (ret != 0 && req->done)
ret = 0;
+ req->timed_out = ret != 0;
mtx_unlock_spin(&req->mtx);
}
@@ -567,17 +720,19 @@ scmi_wait_for_response(struct scmi_softc *sc, struct scmi_req *req, void **out)
SCMI_COLLECT_REPLY(sc->dev, &req->msg);
if (req->msg.payld[0] != 0)
ret = req->msg.payld[0];
- *out = &req->msg.payld[SCMI_MSG_HDR_SIZE];
+ if (out != NULL)
+ *out = &req->msg.payld[SCMI_MSG_HDR_SIZE];
} else {
- mtx_lock_spin(&req->mtx);
- req->timed_out = true;
- mtx_unlock_spin(&req->mtx);
device_printf(sc->dev,
"Request for token 0x%X timed-out.\n", req->token);
}
SCMI_TX_COMPLETE(sc->dev, NULL);
+ SDT_PROBE5(scmi, exit, scmi_wait_for_response, xfer_track, req,
+ SCMI_MSG_PROTOCOL_ID(req->msg.hdr), SCMI_MSG_MESSAGE_ID(req->msg.hdr),
+ SCMI_MSG_TOKEN(req->msg.hdr), req->timed_out);
+
return (ret);
}
@@ -585,27 +740,15 @@ void *
scmi_buf_get(device_t dev, uint8_t protocol_id, uint8_t message_id,
int tx_payld_sz, int rx_payld_sz)
{
- struct scmi_softc *sc;
struct scmi_req *req;
- sc = device_get_softc(dev);
-
- if (tx_payld_sz > SCMI_MAX_MSG_PAYLD_SIZE ||
- rx_payld_sz > SCMI_MAX_MSG_REPLY_SIZE) {
- device_printf(dev, "Unsupported payload size. Drop.\n");
- return (NULL);
- }
-
- /* Pick one from free list */
- req = scmi_req_alloc(sc, SCMI_CHAN_A2P);
+ /* Pick a pre-built req */
+ req = scmi_req_initialized_alloc(dev, tx_payld_sz, rx_payld_sz);
if (req == NULL)
return (NULL);
req->protocol_id = protocol_id & SCMI_HDR_PROTOCOL_ID_BF;
req->message_id = message_id & SCMI_HDR_MESSAGE_ID_BF;
- req->msg.tx_len = sizeof(req->msg.hdr) + tx_payld_sz;
- req->msg.rx_len = rx_payld_sz ?
- rx_payld_sz + 2 * sizeof(uint32_t) : SCMI_MAX_MSG_SIZE;
return (&req->msg.payld[0]);
}
@@ -622,8 +765,50 @@ scmi_buf_put(device_t dev, void *buf)
scmi_req_put(sc, req);
}
+struct scmi_msg *
+scmi_msg_get(device_t dev, int tx_payld_sz, int rx_payld_sz)
+{
+ struct scmi_req *req;
+
+ /* Pick a pre-built req */
+ req = scmi_req_initialized_alloc(dev, tx_payld_sz, rx_payld_sz);
+ if (req == NULL)
+ return (NULL);
+
+ req->is_raw = true;
+
+ return (&req->msg);
+}
+
+static void
+scmi_req_async_waiter(void *context, int pending)
+{
+ struct task *ta = context;
+ struct scmi_softc *sc;
+ struct scmi_req *req;
+
+ req = tsk_to_req(ta);
+ sc = device_get_softc(req->dev);
+ scmi_wait_for_response(sc, req, NULL);
+
+ scmi_msg_put(req->dev, &req->msg);
+}
+
+void
+scmi_msg_put(device_t dev, struct scmi_msg *msg)
+{
+ struct scmi_softc *sc;
+ struct scmi_req *req;
+
+ sc = device_get_softc(dev);
+
+ req = msg_to_req(msg);
+
+ scmi_req_put(sc, req);
+}
+
int
-scmi_request(device_t dev, void *in, void **out)
+scmi_request_tx(device_t dev, void *in)
{
struct scmi_softc *sc;
struct scmi_req *req;
@@ -638,8 +823,11 @@ scmi_request(device_t dev, void *in, void **out)
/* Set inflight and send using transport specific method - refc-2 */
error = scmi_req_track_inflight(sc, req);
- if (error != 0)
+ if (error != 0) {
+ device_printf(dev, "Failed to build req with HDR |%0X|\n",
+ req->msg.hdr);
return (error);
+ }
error = SCMI_XFER_MSG(sc->dev, &req->msg);
if (error != 0) {
@@ -647,5 +835,37 @@ scmi_request(device_t dev, void *in, void **out)
return (error);
}
+ SDT_PROBE5(scmi, func, scmi_request_tx, xfer_track, req,
+ SCMI_MSG_PROTOCOL_ID(req->msg.hdr), SCMI_MSG_MESSAGE_ID(req->msg.hdr),
+ SCMI_MSG_TOKEN(req->msg.hdr), req->msg.polling);
+
+ return (0);
+}
+
+int
+scmi_request(device_t dev, void *in, void **out)
+{
+ struct scmi_softc *sc;
+ struct scmi_req *req;
+ int error;
+
+ error = scmi_request_tx(dev, in);
+ if (error != 0)
+ return (error);
+
+ sc = device_get_softc(dev);
+ req = buf_to_req(in);
+
return (scmi_wait_for_response(sc, req, out));
}
+
+int
+scmi_msg_async_enqueue(struct scmi_msg *msg)
+{
+ struct scmi_req *req;
+
+ req = msg_to_req(msg);
+
+ return taskqueue_enqueue_flags(taskqueue_thread, &req->tsk,
+ TASKQUEUE_FAIL_IF_PENDING | TASKQUEUE_FAIL_IF_CANCELING);
+}
diff --git a/sys/dev/firmware/arm/scmi.h b/sys/dev/firmware/arm/scmi.h
index 345ae6eeb03a..fc52732bd503 100644
--- a/sys/dev/firmware/arm/scmi.h
+++ b/sys/dev/firmware/arm/scmi.h
@@ -32,12 +32,18 @@
#ifndef _ARM64_SCMI_SCMI_H_
#define _ARM64_SCMI_SCMI_H_
+#include <sys/sysctl.h>
+
#include "scmi_if.h"
-#define SCMI_MAX_MSG 32
-#define SCMI_MAX_MSG_PAYLD_SIZE 128
-#define SCMI_MAX_MSG_REPLY_SIZE (SCMI_MAX_MSG_PAYLD_SIZE - sizeof(uint32_t))
-#define SCMI_MAX_MSG_SIZE (SCMI_MAX_MSG_PAYLD_SIZE + sizeof(uint32_t))
+#define SCMI_DEF_MAX_MSG 32
+#define SCMI_DEF_MAX_MSG_PAYLD_SIZE 128
+
+#define SCMI_MAX_MSG_PAYLD_SIZE(sc) ((sc)->trs_desc.max_payld_sz + sizeof(uint32_t))
+#define SCMI_MAX_MSG_REPLY_SIZE(sc) (SCMI_MAX_MSG_PAYLD_SIZE((sc)) + sizeof(uint32_t))
+#define SCMI_MAX_MSG_SIZE(sc) (SCMI_MAX_MSG_REPLY_SIZE(sc) + sizeof(uint32_t))
+#define SCMI_MAX_MSG(sc) ((sc)->trs_desc.max_msg)
+#define SCMI_MAX_MSG_TIMEOUT_MS(sc) ((sc)->trs_desc.reply_timo_ms)
enum scmi_chan {
SCMI_CHAN_A2P,
@@ -47,6 +53,8 @@ enum scmi_chan {
struct scmi_transport_desc {
bool no_completion_irq;
+ unsigned int max_msg;
+ unsigned int max_payld_sz;
unsigned int reply_timo_ms;
};
@@ -58,6 +66,7 @@ struct scmi_softc {
struct mtx mtx;
struct scmi_transport_desc trs_desc;
struct scmi_transport *trs;
+ struct sysctl_oid *sysctl_root;
};
struct scmi_msg {
@@ -74,8 +83,12 @@ struct scmi_msg {
void *scmi_buf_get(device_t dev, uint8_t protocol_id, uint8_t message_id,
int tx_payd_sz, int rx_payld_sz);
void scmi_buf_put(device_t dev, void *buf);
+struct scmi_msg *scmi_msg_get(device_t dev, int tx_payld_sz, int rx_payld_sz);
+void scmi_msg_put(device_t dev, struct scmi_msg *msg);
int scmi_request(device_t dev, void *in, void **);
-void scmi_rx_irq_callback(device_t dev, void *chan, uint32_t hdr);
+int scmi_request_tx(device_t dev, void *in);
+int scmi_msg_async_enqueue(struct scmi_msg *msg);
+void scmi_rx_irq_callback(device_t dev, void *chan, uint32_t hdr, uint32_t rx_len);
DECLARE_CLASS(scmi_driver);
diff --git a/sys/dev/firmware/arm/scmi_mailbox.c b/sys/dev/firmware/arm/scmi_mailbox.c
index 858b81f68845..4ea0433377b2 100644
--- a/sys/dev/firmware/arm/scmi_mailbox.c
+++ b/sys/dev/firmware/arm/scmi_mailbox.c
@@ -70,14 +70,14 @@ static void
scmi_mailbox_a2p_callback(void *arg)
{
struct scmi_mailbox_softc *sc;
- uint32_t msg_header;
+ uint32_t msg_header, rx_len;
int ret;
sc = arg;
- ret = scmi_shmem_read_msg_header(sc->a2p_dev, &msg_header);
+ ret = scmi_shmem_read_msg_header(sc->a2p_dev, &msg_header, &rx_len);
if (ret == 0)
- scmi_rx_irq_callback(sc->base.dev, sc->a2p_dev, msg_header);
+ scmi_rx_irq_callback(sc->base.dev, sc->a2p_dev, msg_header, rx_len);
}
static int
@@ -154,12 +154,12 @@ scmi_mailbox_poll_msg(device_t dev, struct scmi_msg *msg, unsigned int tmo_ms)
sc = device_get_softc(dev);
do {
- if (scmi_shmem_poll_msg(sc->a2p_dev, &msg->hdr))
+ if (scmi_shmem_poll_msg(sc->a2p_dev, &msg->hdr, &msg->rx_len))
break;
DELAY(SCMI_MBOX_POLL_INTERVAL_MS * 1000);
} while (tmo_loops--);
- return (tmo_loops ? 0 : 1);
+ return (tmo_loops > 0 ? 0 : ETIMEDOUT);
}
static int
@@ -171,7 +171,7 @@ scmi_mailbox_collect_reply(device_t dev, struct scmi_msg *msg)
sc = device_get_softc(dev);
ret = scmi_shmem_read_msg_payload(sc->a2p_dev,
- msg->payld, msg->rx_len - SCMI_MSG_HDR_SIZE);
+ msg->payld, msg->rx_len - SCMI_MSG_HDR_SIZE, msg->rx_len);
return (ret);
}
diff --git a/sys/dev/firmware/arm/scmi_shmem.c b/sys/dev/firmware/arm/scmi_shmem.c
index 4a5516abfb4b..32c260c8a9ad 100644
--- a/sys/dev/firmware/arm/scmi_shmem.c
+++ b/sys/dev/firmware/arm/scmi_shmem.c
@@ -247,7 +247,7 @@ scmi_shmem_clear_channel(device_t dev)
}
int
-scmi_shmem_read_msg_header(device_t dev, uint32_t *msg_header)
+scmi_shmem_read_msg_header(device_t dev, uint32_t *msg_header, unsigned int *rx_len)
{
uint32_t length, header;
@@ -256,6 +256,7 @@ scmi_shmem_read_msg_header(device_t dev, uint32_t *msg_header)
if (le32toh(length) < sizeof(header))
return (EINVAL);
+ *rx_len = le32toh(length);
/* Read header. */
scmi_shmem_read(dev, SMT_OFFSET_MSG_HEADER, &header,
SMT_SIZE_MSG_HEADER);
@@ -266,14 +267,11 @@ scmi_shmem_read_msg_header(device_t dev, uint32_t *msg_header)
}
int
-scmi_shmem_read_msg_payload(device_t dev, uint8_t *buf, uint32_t buf_len)
+scmi_shmem_read_msg_payload(device_t dev, uint8_t *buf, uint32_t buf_len, uint32_t rx_len)
{
- uint32_t length, payld_len;
-
- /* Read length. */
- scmi_shmem_read(dev, SMT_OFFSET_LENGTH, &length, SMT_SIZE_LENGTH);
- payld_len = le32toh(length) - SCMI_MSG_HDR_SIZE;
+ uint32_t payld_len;
+ payld_len = rx_len - SCMI_MSG_HDR_SIZE;
if (payld_len > buf_len) {
device_printf(dev,
"RX payload %dbytes exceeds buflen %dbytes. Truncate.\n",
@@ -296,7 +294,7 @@ scmi_shmem_tx_complete(device_t dev)
scmi_shmem_release_channel(sc);
}
-bool scmi_shmem_poll_msg(device_t dev, uint32_t *msg_header)
+bool scmi_shmem_poll_msg(device_t dev, uint32_t *msg_header, uint32_t *rx_len)
{
uint32_t status;
bool ret;
@@ -306,10 +304,8 @@ bool scmi_shmem_poll_msg(device_t dev, uint32_t *msg_header)
ret = (status & (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE));
-
if (ret)
- scmi_shmem_read(dev, SMT_OFFSET_MSG_HEADER, msg_header,
- SMT_SIZE_MSG_HEADER);
+ scmi_shmem_read_msg_header(dev, msg_header, rx_len);
return (ret);
}
@@ -326,4 +322,4 @@ DEFINE_CLASS_1(shmem, shmem_driver, shmem_methods, sizeof(struct shmem_softc),
EARLY_DRIVER_MODULE(shmem, mmio_sram, shmem_driver, 0, 0,
BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
-MODULE_VERSION(scmi, 1);
+MODULE_VERSION(scmi_shmem, 1);
diff --git a/sys/dev/firmware/arm/scmi_shmem.h b/sys/dev/firmware/arm/scmi_shmem.h
index ed8763d5c145..bc8284502129 100644
--- a/sys/dev/firmware/arm/scmi_shmem.h
+++ b/sys/dev/firmware/arm/scmi_shmem.h
@@ -63,9 +63,9 @@ struct scmi_smt_header {
device_t scmi_shmem_get(device_t sdev, phandle_t node, int index);
int scmi_shmem_prepare_msg(device_t dev, uint8_t *msg, uint32_t tx_len,
bool polling);
-bool scmi_shmem_poll_msg(device_t dev, uint32_t *msg_header);
-int scmi_shmem_read_msg_header(device_t dev, uint32_t *msg_header);
-int scmi_shmem_read_msg_payload(device_t dev, uint8_t *buf, uint32_t buf_len);
+bool scmi_shmem_poll_msg(device_t dev, uint32_t *msg_header, uint32_t *rx_len);
+int scmi_shmem_read_msg_header(device_t dev, uint32_t *msg_header, uint32_t *rx_len);
+int scmi_shmem_read_msg_payload(device_t dev, uint8_t *buf, uint32_t buf_len, uint32_t rx_len);
void scmi_shmem_tx_complete(device_t);
void scmi_shmem_clear_channel(device_t);
diff --git a/sys/dev/firmware/arm/scmi_smc.c b/sys/dev/firmware/arm/scmi_smc.c
index a1621ccffa86..81c66ad7bb46 100644
--- a/sys/dev/firmware/arm/scmi_smc.c
+++ b/sys/dev/firmware/arm/scmi_smc.c
@@ -106,7 +106,7 @@ scmi_smc_xfer_msg(device_t dev, struct scmi_msg *msg)
if (ret != 0)
return (ret);
- arm_smccc_smc(sc->smc_id, 0, 0, 0, 0, 0, 0, 0, NULL);
+ arm_smccc_invoke_smc(sc->smc_id, NULL);
return (0);
}
@@ -122,7 +122,7 @@ scmi_smc_poll_msg(device_t dev, struct scmi_msg *msg, unsigned int tmo)
* Nothing to poll since commands are completed as soon as smc
* returns ... but did we get back what we were poling for ?
*/
- scmi_shmem_read_msg_header(sc->a2p_dev, &msg->hdr);
+ scmi_shmem_read_msg_header(sc->a2p_dev, &msg->hdr, &msg->rx_len);
return (0);
}
@@ -136,7 +136,7 @@ scmi_smc_collect_reply(device_t dev, struct scmi_msg *msg)
sc = device_get_softc(dev);
ret = scmi_shmem_read_msg_payload(sc->a2p_dev,
- msg->payld, msg->rx_len - SCMI_MSG_HDR_SIZE);
+ msg->payld, msg->rx_len - SCMI_MSG_HDR_SIZE, msg->rx_len);
return (ret);
}
diff --git a/sys/dev/firmware/arm/scmi_virtio.c b/sys/dev/firmware/arm/scmi_virtio.c
index 12cbb9ecefd5..5252ad9cf9a4 100644
--- a/sys/dev/firmware/arm/scmi_virtio.c
+++ b/sys/dev/firmware/arm/scmi_virtio.c
@@ -76,26 +76,26 @@ scmi_virtio_callback(void *msg, unsigned int len, void *priv)
}
hdr = le32toh(*((uint32_t *)msg));
- scmi_rx_irq_callback(sc->base.dev, msg, hdr);
+ scmi_rx_irq_callback(sc->base.dev, msg, hdr, len);
}
static void *
scmi_virtio_p2a_pool_init(device_t dev, unsigned int max_msg)
{
struct scmi_virtio_softc *sc;
+ unsigned int max_msg_sz;
void *pool;
uint8_t *buf;
int i;
sc = device_get_softc(dev);
+ max_msg_sz = SCMI_MAX_MSG_SIZE(&sc->base);
+ pool = mallocarray(max_msg, max_msg_sz, M_DEVBUF, M_ZERO | M_WAITOK);
- pool = mallocarray(max_msg, SCMI_MAX_MSG_SIZE, M_DEVBUF,
- M_ZERO | M_WAITOK);
-
- for (i = 0, buf = pool; i < max_msg; i++, buf += SCMI_MAX_MSG_SIZE) {
+ for (i = 0, buf = pool; i < max_msg; i++, buf += max_msg_sz) {
/* Feed platform with pre-allocated P2A buffers */
virtio_scmi_message_enqueue(sc->virtio_dev,
- VIRTIO_SCMI_CHAN_P2A, buf, 0, SCMI_MAX_MSG_SIZE);
+ VIRTIO_SCMI_CHAN_P2A, buf, 0, max_msg_sz);
}
device_printf(dev,
@@ -111,7 +111,7 @@ scmi_virtio_clear_channel(device_t dev, void *msg)
sc = device_get_softc(dev);
virtio_scmi_message_enqueue(sc->virtio_dev, VIRTIO_SCMI_CHAN_P2A,
- msg, 0, SCMI_MAX_MSG_SIZE);
+ msg, 0, SCMI_MAX_MSG_SIZE(&sc->base));
}
static int
@@ -225,7 +225,6 @@ scmi_virtio_poll_msg(device_t dev, struct scmi_msg *msg, unsigned int tmo_ms)
}
rx_msg = hdr_to_msg(rx_buf);
- rx_msg->rx_len = rx_len;
/* Complete the polling on any poll path */
if (rx_msg->polling)
atomic_store_rel_int(&rx_msg->poll_done, 1);
@@ -242,7 +241,7 @@ scmi_virtio_poll_msg(device_t dev, struct scmi_msg *msg, unsigned int tmo_ms)
rx_msg->hdr, rx_msg->polling);
if (!rx_msg->polling)
- scmi_rx_irq_callback(sc->base.dev, rx_msg, rx_msg->hdr);
+ scmi_rx_irq_callback(sc->base.dev, rx_msg, rx_msg->hdr, rx_len);
}
return (tmo_loops > 0 ? 0 : ETIMEDOUT);
diff --git a/sys/dev/firmware/xilinx/zynqmp_firmware.c b/sys/dev/firmware/xilinx/zynqmp_firmware.c
index 8ee6c9a21377..dfe41842c40c 100644
--- a/sys/dev/firmware/xilinx/zynqmp_firmware.c
+++ b/sys/dev/firmware/xilinx/zynqmp_firmware.c
@@ -105,7 +105,7 @@ zynqmp_call_smc(uint32_t id, uint32_t a0, uint32_t a1, uint32_t a2, uint32_t a3,
args[0] = id | PM_SIP_SVC;
args[1] = ((uint64_t)a1 << 32) | a0;
args[2] = ((uint64_t)a3 << 32) | a2;
- arm_smccc_smc(args[0], args[1], args[2], 0, 0, 0, 0, 0, &res);
+ arm_smccc_invoke_smc(args[0], args[1], args[2], &res);
if (payload != NULL) {
payload[0] = res.a0 & 0xFFFFFFFF;
payload[1] = res.a0 >> 32;
@@ -477,7 +477,8 @@ zynqmp_firmware_attach(device_t dev)
device_probe_and_attach(cdev);
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static device_method_t zynqmp_firmware_methods[] = {
diff --git a/sys/dev/flash/cqspi.c b/sys/dev/flash/cqspi.c
index 9332ec85477c..4774d14add0e 100644
--- a/sys/dev/flash/cqspi.c
+++ b/sys/dev/flash/cqspi.c
@@ -620,7 +620,7 @@ cqspi_delayed_attach(void *arg)
sc = arg;
cqspi_add_devices(sc->dev);
- bus_generic_attach(sc->dev);
+ bus_attach_children(sc->dev);
config_intrhook_disestablish(&sc->config_intrhook);
}
diff --git a/sys/dev/flash/flexspi/flex_spi.c b/sys/dev/flash/flexspi/flex_spi.c
index 766a1cfaa332..44246f4b1c2d 100644
--- a/sys/dev/flash/flexspi/flex_spi.c
+++ b/sys/dev/flash/flexspi/flex_spi.c
@@ -329,20 +329,20 @@ flex_spi_write_txfifo(struct flex_spi_softc *sc, uint8_t *buf, uint8_t size)
int i, ret, reg;
/* invalid the TXFIFO */
- write_reg(sc, FSPI_IPRXFCR, FSPI_IPTXFCR_CLR);
+ write_reg(sc, FSPI_IPTXFCR, FSPI_IPTXFCR_CLR);
/*
* Default value of water mark level is 8 bytes, hence in single
* read request controller can read max 8 bytes of data.
*/
for (i = 0; i < size; i += 4) {
- /* Wait for RXFIFO available */
+ /* Wait for TXFIFO available */
if (i % 8 == 0) {
ret = reg_read_poll_tout(sc, FSPI_INTR, FSPI_INTR_IPTXWE,
1, 50000, 1);
if (ret)
device_printf(sc->dev,
- "timed out waiting for FSPI_INTR_IPRXWA\n");
+ "timed out waiting for FSPI_INTR_IPTXWE\n");
}
if (size >= (i + 4))
@@ -781,12 +781,6 @@ flex_spi_attach(device_t dev)
}
sc->buf = malloc(sc->erasesize, SECTOR_BUFFER, M_WAITOK);
- if (sc->buf == NULL) {
- device_printf(sc->dev, "Unable to set up allocate internal buffer\n");
- flex_spi_detach(dev);
- return (ENOMEM);
- }
-
/* Move it to per-flash */
sc->disk = disk_alloc();
sc->disk->d_open = flex_spi_open;
diff --git a/sys/dev/flash/w25n.c b/sys/dev/flash/w25n.c
new file mode 100644
index 000000000000..c106b99434ca
--- /dev/null
+++ b/sys/dev/flash/w25n.c
@@ -0,0 +1,603 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Adrian Chadd <adrian@FreeBSD.org>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+
+#include "opt_platform.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bio.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/lock.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <geom/geom_disk.h>
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/openfirm.h>
+#endif
+
+#include <dev/spibus/spi.h>
+#include "spibus_if.h"
+
+#include <dev/flash/w25nreg.h>
+
+#define W25N_SECTORSIZE 512
+
+struct w25n_flash_ident
+{
+ const char *name;
+ uint8_t manufacturer_id;
+ uint16_t device_id;
+ unsigned int sectorsize;
+ unsigned int sectorcount;
+ unsigned int erasesize;
+ unsigned int flags;
+};
+
+struct w25n_softc
+{
+ device_t sc_dev;
+ device_t sc_parent;
+ uint8_t sc_manufacturer_id;
+ uint16_t sc_device_id;
+ unsigned int sc_erasesize;
+ struct mtx sc_mtx;
+ struct disk *sc_disk;
+ struct proc *sc_p;
+ struct bio_queue_head sc_bio_queue;
+ unsigned int sc_flags;
+ unsigned int sc_taskstate;
+};
+
+#define TSTATE_STOPPED 0
+#define TSTATE_STOPPING 1
+#define TSTATE_RUNNING 2
+
+#define W25N_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
+#define W25N_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
+#define W25N_LOCK_INIT(_sc) \
+ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
+ "w25n", MTX_DEF)
+#define W25N_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx);
+#define W25N_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED);
+#define W25N_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
+
+/* disk routines */
+static int w25n_open(struct disk *dp);
+static int w25n_close(struct disk *dp);
+static int w25n_ioctl(struct disk *, u_long, void *, int, struct thread *);
+static void w25n_strategy(struct bio *bp);
+static int w25n_getattr(struct bio *bp);
+static void w25n_task(void *arg);
+
+#define FL_NONE 0x00000000
+
+static struct w25n_flash_ident flash_devices[] = {
+
+ { "w25n01gv", 0xef, 0xaa21, 2048, 64 * 1024, 128 * 1024, FL_NONE },
+};
+
+static int
+w25n_read_status_register(struct w25n_softc *sc, uint8_t reg,
+ uint8_t *retval)
+{
+ uint8_t txBuf[3], rxBuf[3];
+ struct spi_command cmd;
+ int err;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ txBuf[0] = CMD_READ_STATUS;
+ txBuf[1] = reg;
+ cmd.tx_cmd = txBuf;
+ cmd.rx_cmd = rxBuf;
+ cmd.rx_cmd_sz = 3;
+ cmd.tx_cmd_sz = 3;
+ err = SPIBUS_TRANSFER(sc->sc_parent, sc->sc_dev, &cmd);
+ if (err != 0)
+ return (err);
+ *retval = rxBuf[2];
+ return (0);
+}
+
+static int
+w25n_wait_for_device_ready(struct w25n_softc *sc)
+{
+ int err;
+ uint8_t val;
+
+ do {
+ err = w25n_read_status_register(sc, STATUS_REG_3, &val);
+ } while (err == 0 && (val & STATUS_REG_3_BUSY));
+
+ return (err);
+}
+
+static int
+w25n_set_page_address(struct w25n_softc *sc, uint16_t page_idx)
+{
+ uint8_t txBuf[4], rxBuf[4];
+ struct spi_command cmd;
+ int err;
+
+ txBuf[0] = CMD_PAGE_DATA_READ;
+ txBuf[1] = 0; /* dummy */
+ txBuf[2] = (page_idx >> 8) & 0xff;
+ txBuf[3] = (page_idx >> 0) & 0xff;
+ cmd.tx_cmd = txBuf;
+ cmd.rx_cmd = rxBuf;
+ cmd.rx_cmd_sz = 4;
+ cmd.tx_cmd_sz = 4;
+ err = SPIBUS_TRANSFER(sc->sc_parent, sc->sc_dev, &cmd);
+ if (err != 0)
+ return (err);
+ return (0);
+}
+
+static struct w25n_flash_ident*
+w25n_get_device_ident(struct w25n_softc *sc)
+{
+ uint8_t txBuf[8], rxBuf[8];
+ struct spi_command cmd;
+ uint8_t manufacturer_id;
+ uint16_t dev_id;
+ int err, i;
+
+ memset(&cmd, 0, sizeof(cmd));
+ memset(txBuf, 0, sizeof(txBuf));
+ memset(rxBuf, 0, sizeof(rxBuf));
+
+ txBuf[0] = CMD_READ_IDENT;
+ cmd.tx_cmd = &txBuf;
+ cmd.rx_cmd = &rxBuf;
+
+ cmd.tx_cmd_sz = 5;
+ cmd.rx_cmd_sz = 5;
+ err = SPIBUS_TRANSFER(sc->sc_parent, sc->sc_dev, &cmd);
+ if (err)
+ return (NULL);
+
+ manufacturer_id = rxBuf[2];
+ dev_id = (rxBuf[3] << 8) | (rxBuf[4]);
+
+ for (i = 0; i < nitems(flash_devices); i++) {
+ if ((flash_devices[i].manufacturer_id == manufacturer_id) &&
+ (flash_devices[i].device_id == dev_id))
+ return &flash_devices[i];
+ }
+
+ device_printf(sc->sc_dev,
+ "Unknown SPI NAND flash device. Vendor: %02x, device id: %04x\n",
+ manufacturer_id, dev_id);
+ return (NULL);
+}
+
+static int
+w25n_write(struct w25n_softc *sc, off_t offset, caddr_t data, off_t count)
+{
+
+ return (ENXIO);
+
+}
+
+static int
+w25n_read(struct w25n_softc *sc, off_t offset, caddr_t data, off_t count)
+{
+ uint8_t txBuf[4], rxBuf[4];
+ struct spi_command cmd;
+ int err;
+ int read_size;
+ uint16_t page_idx;
+ uint8_t st3, ecc_status;
+
+ /*
+ * We only support reading things at multiples of the page size.
+ */
+ if (count % sc->sc_disk->d_sectorsize != 0) {
+ device_printf(sc->sc_dev, "%s: invalid count\n", __func__);
+ return (EIO);
+ }
+ if (offset % sc->sc_disk->d_sectorsize != 0) {
+ device_printf(sc->sc_dev, "%s: invalid offset\n", __func__);
+ return (EIO);
+ }
+
+ page_idx = offset / sc->sc_disk->d_sectorsize;
+
+ while (count > 0) {
+ /* Wait until we're ready */
+ err = w25n_wait_for_device_ready(sc);
+ if (err != 0) {
+ device_printf(sc->sc_dev, "%s: failed to wait\n",
+ __func__);
+ return (err);
+ }
+
+ /* Issue the page change */
+ err = w25n_set_page_address(sc, page_idx);
+ if (err != 0) {
+ device_printf(sc->sc_dev, "%s: page change failed\n",
+ __func__);
+ return (err);
+ }
+
+ /* Wait until the page change has read in data */
+ err = w25n_wait_for_device_ready(sc);
+ if (err != 0) {
+ device_printf(sc->sc_dev,
+ "%s: failed to wait again\n",
+ __func__);
+ return (err);
+ }
+
+ /*
+ * Now we can issue a read command for the data
+ * in the buffer. We'll read into the data buffer
+ * until we run out of data in this page.
+ *
+ * To simplify things we're not starting at an
+ * arbitrary offset; so the column address here
+ * inside the page is 0. If we later want to support
+ * that kind of operation then we could do the math
+ * here.
+ */
+ read_size = MIN(count, sc->sc_disk->d_sectorsize);
+
+ memset(data, 0xef, read_size);
+
+ txBuf[0] = CMD_FAST_READ;
+ txBuf[1] = 0; /* column address 15:8 */
+ txBuf[2] = 0; /* column address 7:0 */
+ txBuf[3] = 0; /* dummy byte */
+ cmd.tx_cmd_sz = 4;
+ cmd.rx_cmd_sz = 4;
+ cmd.tx_cmd = txBuf;
+ cmd.rx_cmd = rxBuf;
+
+ cmd.tx_data = data;
+ cmd.rx_data = data;
+ cmd.tx_data_sz = read_size;
+ cmd.rx_data_sz = read_size;
+
+ err = SPIBUS_TRANSFER(sc->sc_parent, sc->sc_dev, &cmd);
+ if (err != 0) {
+ device_printf(sc->sc_dev,
+ "ERROR: failed to do FAST_READ (%u)\n",
+ err);
+ return (err);
+ }
+
+ /*
+ * Now, check ECC status bits, see if we had an ECC
+ * error.
+ */
+ err = w25n_read_status_register(sc, STATUS_REG_3, &st3);
+ if (err != 0) {
+ device_printf(sc->sc_dev,
+ "%s: failed to wait again\n", __func__);
+ return (err);
+ }
+ ecc_status = (st3 >> STATUS_REG_3_ECC_STATUS_SHIFT)
+ & STATUS_REG_3_ECC_STATUS_MASK;
+ if ((ecc_status != STATUS_ECC_OK)
+ && (ecc_status != STATUS_ECC_1BIT_OK)) {
+ device_printf(sc->sc_dev,
+ "%s: ECC status failed\n", __func__);
+ return (EIO);
+ }
+
+ count -= read_size;
+ data += read_size;
+ page_idx += 1;
+ }
+
+ return (0);
+}
+
+#ifdef FDT
+static struct ofw_compat_data compat_data[] = {
+ { "spi-nand", 1 },
+ { NULL, 0 },
+};
+#endif
+
+static int
+w25n_probe(device_t dev)
+{
+#ifdef FDT
+ int i;
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ /* First try to match the compatible property to the compat_data */
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 1)
+ goto found;
+
+ /*
+ * Next, try to find a compatible device using the names in the
+ * flash_devices structure
+ */
+ for (i = 0; i < nitems(flash_devices); i++)
+ if (ofw_bus_is_compatible(dev, flash_devices[i].name))
+ goto found;
+
+ return (ENXIO);
+found:
+#endif
+ device_set_desc(dev, "W25N NAND Flash Family");
+
+ return (0);
+}
+
+static int
+w25n_attach(device_t dev)
+{
+ struct w25n_softc *sc;
+ struct w25n_flash_ident *ident;
+ int err;
+ uint8_t st1, st2, st3;
+
+ sc = device_get_softc(dev);
+ sc->sc_dev = dev;
+ sc->sc_parent = device_get_parent(sc->sc_dev);
+
+ W25N_LOCK_INIT(sc);
+
+ ident = w25n_get_device_ident(sc);
+ if (ident == NULL)
+ return (ENXIO);
+
+ if ((err = w25n_wait_for_device_ready(sc)) != 0)
+ return (err);
+
+ /*
+ * Read the configuration, protection and status registers.
+ * Print them out here so the initial configuration can be checked.
+ */
+ err = w25n_read_status_register(sc, STATUS_REG_1, &st1);
+ if (err != 0)
+ return (err);
+ err = w25n_read_status_register(sc, STATUS_REG_2, &st2);
+ if (err != 0)
+ return (err);
+ err = w25n_read_status_register(sc, STATUS_REG_3, &st3);
+ if (err != 0)
+ return (err);
+
+ device_printf(sc->sc_dev,
+ "device type %s, size %dK in %d sectors of %dK, erase size %dK\n",
+ ident->name,
+ ident->sectorcount * ident->sectorsize / 1024,
+ ident->sectorcount, ident->sectorsize / 1024,
+ ident->erasesize / 1024);
+
+ if (bootverbose)
+ device_printf(sc->sc_dev,
+ "status1=0x%08x, status2=0x%08x, status3=0x%08x\n",
+ st1, st2, st3);
+
+ /*
+ * For now we're only going to support parts that have
+ * device ECC enabled. Later on it may be interesting
+ * to do software driven ECC and figure out how we
+ * expose it over GEOM, but that day isn't today.
+ */
+ if ((st2 & STATUS_REG_2_ECC_EN) == 0) {
+ device_printf(sc->sc_dev,
+ "ERROR: only ECC in HW is supported\n");
+ return (err);
+ }
+ if ((st2 & STATUS_REG_2_BUF_EN) == 0) {
+ device_printf(sc->sc_dev,
+ "ERROR: only BUF mode is supported\n");
+ return (err);
+ }
+
+ sc->sc_flags = ident->flags;
+ sc->sc_erasesize = ident->erasesize;
+
+ sc->sc_disk = disk_alloc();
+ sc->sc_disk->d_open = w25n_open;
+ sc->sc_disk->d_close = w25n_close;
+ sc->sc_disk->d_strategy = w25n_strategy;
+ sc->sc_disk->d_getattr = w25n_getattr;
+ sc->sc_disk->d_ioctl = w25n_ioctl;
+ sc->sc_disk->d_name = "nand_flash/spi";
+ sc->sc_disk->d_drv1 = sc;
+ sc->sc_disk->d_maxsize = DFLTPHYS;
+ sc->sc_disk->d_sectorsize = ident->sectorsize;
+ sc->sc_disk->d_mediasize = ident->sectorsize * ident->sectorcount;
+ sc->sc_disk->d_stripesize = sc->sc_erasesize;
+ sc->sc_disk->d_unit = device_get_unit(sc->sc_dev);
+ sc->sc_disk->d_dump = NULL; /* NB: no dumps */
+ strlcpy(sc->sc_disk->d_descr, ident->name,
+ sizeof(sc->sc_disk->d_descr));
+
+ disk_create(sc->sc_disk, DISK_VERSION);
+ bioq_init(&sc->sc_bio_queue);
+ kproc_create(&w25n_task, sc, &sc->sc_p, 0, 0, "task: w25n flash");
+ sc->sc_taskstate = TSTATE_RUNNING;
+
+ return (0);
+}
+
+static int
+w25n_detach(device_t dev)
+{
+ struct w25n_softc *sc;
+ int err;
+
+ sc = device_get_softc(dev);
+ err = 0;
+
+ W25N_LOCK(sc);
+ if (sc->sc_taskstate == TSTATE_RUNNING) {
+ sc->sc_taskstate = TSTATE_STOPPING;
+ wakeup(sc);
+ while (err == 0 && sc->sc_taskstate != TSTATE_STOPPED) {
+ err = msleep(sc, &sc->sc_mtx, 0, "w25nd", hz * 3);
+ if (err != 0) {
+ sc->sc_taskstate = TSTATE_RUNNING;
+ device_printf(sc->sc_dev,
+ "Failed to stop queue task\n");
+ }
+ }
+ }
+ W25N_UNLOCK(sc);
+
+ if (err == 0 && sc->sc_taskstate == TSTATE_STOPPED) {
+ disk_destroy(sc->sc_disk);
+ bioq_flush(&sc->sc_bio_queue, NULL, ENXIO);
+ W25N_LOCK_DESTROY(sc);
+ }
+ return (err);
+}
+
+static int
+w25n_open(struct disk *dp)
+{
+ return (0);
+}
+
+static int
+w25n_close(struct disk *dp)
+{
+
+ return (0);
+}
+
+static int
+w25n_ioctl(struct disk *dp, u_long cmd, void *data, int fflag,
+ struct thread *td)
+{
+
+ return (EINVAL);
+}
+
+static void
+w25n_strategy(struct bio *bp)
+{
+ struct w25n_softc *sc;
+
+ sc = (struct w25n_softc *)bp->bio_disk->d_drv1;
+ W25N_LOCK(sc);
+ bioq_disksort(&sc->sc_bio_queue, bp);
+ wakeup(sc);
+ W25N_UNLOCK(sc);
+}
+
+static int
+w25n_getattr(struct bio *bp)
+{
+ struct w25n_softc *sc;
+ device_t dev;
+
+ if (bp->bio_disk == NULL || bp->bio_disk->d_drv1 == NULL)
+ return (ENXIO);
+
+ sc = bp->bio_disk->d_drv1;
+ dev = sc->sc_dev;
+
+ if (strcmp(bp->bio_attribute, "SPI::device") == 0) {
+ if (bp->bio_length != sizeof(dev))
+ return (EFAULT);
+ bcopy(&dev, bp->bio_data, sizeof(dev));
+ } else
+ return (-1);
+ return (0);
+}
+
+static void
+w25n_task(void *arg)
+{
+ struct w25n_softc *sc = (struct w25n_softc*)arg;
+ struct bio *bp;
+
+ for (;;) {
+ W25N_LOCK(sc);
+ do {
+ if (sc->sc_taskstate == TSTATE_STOPPING) {
+ sc->sc_taskstate = TSTATE_STOPPED;
+ W25N_UNLOCK(sc);
+ wakeup(sc);
+ kproc_exit(0);
+ }
+ bp = bioq_first(&sc->sc_bio_queue);
+ if (bp == NULL)
+ msleep(sc, &sc->sc_mtx, PRIBIO, "w25nq", 0);
+ } while (bp == NULL);
+ bioq_remove(&sc->sc_bio_queue, bp);
+ W25N_UNLOCK(sc);
+
+ switch (bp->bio_cmd) {
+ case BIO_READ:
+ bp->bio_error = w25n_read(sc, bp->bio_offset,
+ bp->bio_data, bp->bio_bcount);
+ break;
+ case BIO_WRITE:
+ bp->bio_error = w25n_write(sc, bp->bio_offset,
+ bp->bio_data, bp->bio_bcount);
+ break;
+ default:
+ bp->bio_error = EOPNOTSUPP;
+ }
+
+
+ biodone(bp);
+ }
+}
+
+static device_method_t w25n_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, w25n_probe),
+ DEVMETHOD(device_attach, w25n_attach),
+ DEVMETHOD(device_detach, w25n_detach),
+
+ { 0, 0 }
+};
+
+static driver_t w25n_driver = {
+ "w25n",
+ w25n_methods,
+ sizeof(struct w25n_softc),
+};
+
+DRIVER_MODULE(w25n, spibus, w25n_driver, 0, 0);
+MODULE_DEPEND(w25n, spibus, 1, 1, 1);
+#ifdef FDT
+MODULE_DEPEND(w25n, fdt_slicer, 1, 1, 1);
+SPIBUS_FDT_PNP_INFO(compat_data);
+#endif
diff --git a/sys/dev/flash/w25nreg.h b/sys/dev/flash/w25nreg.h
new file mode 100644
index 000000000000..0fa880e54b91
--- /dev/null
+++ b/sys/dev/flash/w25nreg.h
@@ -0,0 +1,85 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Adrian Chadd <adrian@FreeBSD.org>.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __W25NREG_H__
+#define __W25NREG_H__
+
+/*
+ * Commands
+ */
+#define CMD_READ_STATUS 0x05
+#define CMD_FAST_READ 0x0B
+#define CMD_PAGE_DATA_READ 0x13
+#define CMD_READ_IDENT 0x9F
+#define CMD_LAST_ECC_FAILURE 0xA9
+#define CMD_BLOCK_ERAS 0xD8
+
+/*
+ * Three status registers - 0xAx, 0xBx, 0xCx.
+ *
+ * status register 1 (0xA0) is for protection config/status
+ * status register 2 (0xB0) is for configuration config/status
+ * status register 3 (0xC0) is for general status
+ */
+
+#define STATUS_REG_1 0xA0
+#define STATUS_REG_1_SRP1 0x10
+#define STATUS_REG_1_WP_EN 0x20
+#define STATUS_REG_1_TOP_BOTTOM_PROT 0x40
+#define STATUS_REG_1_BP0 0x80
+#define STATUS_REG_1_BP1 0x10
+#define STATUS_REG_1_BP2 0x20
+#define STATUS_REG_1_BP3 0x40
+#define STATUS_REG_1_SRP0 0x80
+
+#define STATUS_REG_2 0xB0
+#define STATUS_REG_2_BUF_EN 0x08
+#define STATUS_REG_2_ECC_EN 0x10
+#define STATUS_REG_2_SR1_LOCK 0x20
+#define STATUS_REG_2_OTP_EN 0x40
+#define STATUS_REG_2_OTP_L 0x80
+
+#define STATUS_REG_3 0xC0
+#define STATUS_REG_3_BUSY 0x01
+#define STATUS_REG_3_WRITE_EN_LATCH 0x02
+#define STATUS_REG_3_ERASE_FAIL 0x04
+#define STATUS_REG_3_PROGRAM_FAIL 0x08
+#define STATUS_REG_3_ECC_STATUS_0 0x10
+#define STATUS_REG_3_ECC_STATUS_1 0x20
+#define STATUS_REG_3_ECC_STATUS_SHIFT 4
+#define STATUS_REG_3_ECC_STATUS_MASK 0x03
+#define STATUS_REG_3_BBM_LUT_FULL 0x40
+
+/* ECC status */
+#define STATUS_ECC_OK 0
+#define STATUS_ECC_1BIT_OK 1
+#define STATUS_ECC_2BIT_ERR 2
+#define STATUS_ECC_2BIT_ERR_MULTIPAGE 3
+
+#endif /* __W25NREG_H__ */
diff --git a/sys/dev/ftgpio/ftgpio.c b/sys/dev/ftgpio/ftgpio.c
index 7acfdd5b900e..68787b54bb16 100644
--- a/sys/dev/ftgpio/ftgpio.c
+++ b/sys/dev/ftgpio/ftgpio.c
@@ -398,12 +398,13 @@ ftgpio_attach(device_t dev)
FTGPIO_VERBOSE_PRINTF(sc->dev, "groups GPIO1..GPIO6 enabled\n");
GPIO_UNLOCK(sc);
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
GPIO_LOCK_DESTROY(sc);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/fxp/if_fxp.c b/sys/dev/fxp/if_fxp.c
index d5c977782440..7b17b054abb9 100644
--- a/sys/dev/fxp/if_fxp.c
+++ b/sys/dev/fxp/if_fxp.c
@@ -431,7 +431,7 @@ fxp_attach(device_t dev)
uint32_t val;
uint16_t data;
u_char eaddr[ETHER_ADDR_LEN];
- int error, flags, i, pmc, prefer_iomap;
+ int error, flags, i, prefer_iomap;
error = 0;
sc = device_get_softc(dev);
@@ -443,11 +443,6 @@ fxp_attach(device_t dev)
fxp_serial_ifmedia_sts);
ifp = sc->ifp = if_gethandle(IFT_ETHER);
- if (ifp == (void *)NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
/*
* Enable bus mastering.
@@ -523,8 +518,7 @@ fxp_attach(device_t dev)
if (sc->revision >= FXP_REV_82558_A4 &&
sc->revision != FXP_REV_82559S_A) {
data = sc->eeprom[FXP_EEPROM_MAP_ID];
- if ((data & 0x20) != 0 &&
- pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0)
+ if ((data & 0x20) != 0 && pci_has_pm(sc->dev))
sc->flags |= FXP_FLAG_WOLCAP;
}
@@ -938,8 +932,6 @@ fxp_release(struct fxp_softc *sc)
FXP_LOCK_ASSERT(sc, MA_NOTOWNED);
KASSERT(sc->ih == NULL,
("fxp_release() called with intr handle still active"));
- if (sc->miibus)
- device_delete_child(sc->dev, sc->miibus);
bus_generic_detach(sc->dev);
ifmedia_removeall(&sc->sc_media);
if (sc->fxp_desc.cbl_list) {
@@ -1061,24 +1053,17 @@ fxp_suspend(device_t dev)
{
struct fxp_softc *sc = device_get_softc(dev);
if_t ifp;
- int pmc;
- uint16_t pmstat;
FXP_LOCK(sc);
ifp = sc->ifp;
- if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) {
- pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
- if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
- /* Request PME. */
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- sc->flags |= FXP_FLAG_WOL;
- /* Reconfigure hardware to accept magic frames. */
- if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
- fxp_init_body(sc, 0);
- }
- pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
+ if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0) {
+ /* Request PME. */
+ pci_enable_pme(sc->dev);
+ sc->flags |= FXP_FLAG_WOL;
+ /* Reconfigure hardware to accept magic frames. */
+ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
+ fxp_init_body(sc, 0);
}
fxp_stop(sc);
@@ -1097,17 +1082,11 @@ fxp_resume(device_t dev)
{
struct fxp_softc *sc = device_get_softc(dev);
if_t ifp = sc->ifp;
- int pmc;
- uint16_t pmstat;
FXP_LOCK(sc);
- if (pci_find_cap(sc->dev, PCIY_PMG, &pmc) == 0) {
+ if (pci_has_pm(sc->dev)) {
sc->flags &= ~FXP_FLAG_WOL;
- pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2);
- /* Disable PME and clear PME status. */
- pmstat &= ~PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
if ((sc->flags & FXP_FLAG_WOLCAP) != 0)
CSR_WRITE_1(sc, FXP_CSR_PMDR,
CSR_READ_1(sc, FXP_CSR_PMDR));
diff --git a/sys/dev/gem/if_gem.c b/sys/dev/gem/if_gem.c
index 91389d0dfd2c..74504a950d31 100644
--- a/sys/dev/gem/if_gem.c
+++ b/sys/dev/gem/if_gem.c
@@ -152,8 +152,6 @@ gem_attach(struct gem_softc *sc)
/* Set up ifnet structure. */
ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- return (ENOSPC);
sc->sc_csum_features = GEM_CSUM_FEATURES;
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(sc->sc_dev),
@@ -415,7 +413,7 @@ gem_detach(struct gem_softc *sc)
callout_drain(&sc->sc_rx_ch);
#endif
if_free(ifp);
- device_delete_child(sc->sc_dev, sc->sc_miibus);
+ bus_generic_detach(sc->sc_dev);
for (i = 0; i < GEM_NRXDESC; i++)
if (sc->sc_rxsoft[i].rxs_dmamap != NULL)
diff --git a/sys/dev/glxiic/glxiic.c b/sys/dev/glxiic/glxiic.c
index 16ed337c543b..ef0a0e111339 100644
--- a/sys/dev/glxiic/glxiic.c
+++ b/sys/dev/glxiic/glxiic.c
@@ -284,11 +284,11 @@ glxiic_identify(driver_t *driver, device_t parent)
{
/* Prevent child from being added more than once. */
- if (device_find_child(parent, driver->name, -1) != NULL)
+ if (device_find_child(parent, driver->name, DEVICE_UNIT_ANY) != NULL)
return;
if (pci_get_devid(parent) == GLXIIC_CS5536_DEV_ID) {
- if (device_add_child(parent, driver->name, -1) == NULL)
+ if (device_add_child(parent, driver->name, DEVICE_UNIT_ANY) == NULL)
device_printf(parent, "Could not add glxiic child\n");
}
}
@@ -390,7 +390,8 @@ glxiic_attach(device_t dev)
goto out;
}
- if ((sc->iicbus = device_add_child(dev, "iicbus", -1)) == NULL) {
+ if ((sc->iicbus = device_add_child(dev, "iicbus",
+ DEVICE_UNIT_ANY)) == NULL) {
device_printf(dev, "Could not allocate iicbus instance\n");
error = ENXIO;
goto out;
@@ -408,7 +409,7 @@ glxiic_attach(device_t dev)
glxiic_smb_enable(sc, IIC_FASTEST, 0);
/* Probe and attach the iicbus when interrupts are available. */
- error = bus_delayed_attach_children(dev);
+ bus_delayed_attach_children(dev);
out:
if (error != 0) {
@@ -451,11 +452,8 @@ glxiic_detach(device_t dev)
error = bus_generic_detach(dev);
if (error != 0)
- goto out;
- if (sc->iicbus != NULL)
- error = device_delete_child(dev, sc->iicbus);
+ return (error);
-out:
callout_drain(&sc->callout);
if (sc->smb_res != NULL) {
@@ -479,7 +477,7 @@ out:
GLXIIC_LOCK_DESTROY(sc);
- return (error);
+ return (0);
}
static uint8_t
diff --git a/sys/dev/gpio/acpi_gpiobus.c b/sys/dev/gpio/acpi_gpiobus.c
new file mode 100644
index 000000000000..0d2455cab399
--- /dev/null
+++ b/sys/dev/gpio/acpi_gpiobus.c
@@ -0,0 +1,449 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Ahmad Khalifa <ahmadkhalifa570@gmail.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/gpio.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+
+#include <dev/gpio/gpiobusvar.h>
+#include <dev/gpio/acpi_gpiobusvar.h>
+#include <dev/gpio/gpiobus_internal.h>
+#include <sys/sbuf.h>
+
+#include "gpiobus_if.h"
+
+struct acpi_gpiobus_softc {
+ struct gpiobus_softc super_sc;
+ ACPI_CONNECTION_INFO handler_info;
+};
+
+struct acpi_gpiobus_ctx {
+ struct gpiobus_softc *sc;
+ ACPI_HANDLE dev_handle;
+};
+
+struct acpi_gpiobus_ivar
+{
+ struct gpiobus_ivar gpiobus;
+ ACPI_HANDLE handle;
+};
+
+uint32_t
+acpi_gpiobus_convflags(ACPI_RESOURCE_GPIO *gpio_res)
+{
+ uint32_t flags = 0;
+
+ /* Figure out pin flags */
+ if (gpio_res->ConnectionType == ACPI_RESOURCE_GPIO_TYPE_INT) {
+ switch (gpio_res->Polarity) {
+ case ACPI_ACTIVE_HIGH:
+ flags = gpio_res->Triggering == ACPI_LEVEL_SENSITIVE ?
+ GPIO_INTR_LEVEL_HIGH : GPIO_INTR_EDGE_RISING;
+ break;
+ case ACPI_ACTIVE_LOW:
+ flags = gpio_res->Triggering == ACPI_LEVEL_SENSITIVE ?
+ GPIO_INTR_LEVEL_LOW : GPIO_INTR_EDGE_FALLING;
+ break;
+ case ACPI_ACTIVE_BOTH:
+ flags = GPIO_INTR_EDGE_BOTH;
+ break;
+ }
+
+ flags |= GPIO_PIN_INPUT;
+#ifdef NOT_YET
+ /* This is not currently implemented. */
+ if (gpio_res->Shareable == ACPI_SHARED)
+ flags |= GPIO_INTR_SHAREABLE;
+#endif
+ }
+ if (gpio_res->ConnectionType == ACPI_RESOURCE_GPIO_TYPE_IO) {
+ switch (gpio_res->IoRestriction) {
+ case ACPI_IO_RESTRICT_INPUT:
+ flags |= GPIO_PIN_INPUT;
+ break;
+ case ACPI_IO_RESTRICT_OUTPUT:
+ flags |= GPIO_PIN_OUTPUT;
+ break;
+ }
+ }
+
+ switch (gpio_res->PinConfig) {
+ case ACPI_PIN_CONFIG_PULLUP:
+ flags |= GPIO_PIN_PULLUP;
+ break;
+ case ACPI_PIN_CONFIG_PULLDOWN:
+ flags |= GPIO_PIN_PULLDOWN;
+ break;
+ }
+
+ return (flags);
+}
+
+static ACPI_STATUS
+acpi_gpiobus_enumerate_res(ACPI_RESOURCE *res, void *context)
+{
+ ACPI_RESOURCE_GPIO *gpio_res = &res->Data.Gpio;
+ struct acpi_gpiobus_ctx *ctx = context;
+ struct gpiobus_softc *super_sc = ctx->sc;
+ ACPI_HANDLE handle;
+ uint32_t flags, i;
+
+ if (res->Type != ACPI_RESOURCE_TYPE_GPIO)
+ return (AE_OK);
+
+ if (ACPI_FAILURE(AcpiGetHandle(ACPI_ROOT_OBJECT,
+ gpio_res->ResourceSource.StringPtr, &handle)) ||
+ handle != ctx->dev_handle)
+ return (AE_OK);
+
+ if (__predict_false(gpio_res->PinTableLength > super_sc->sc_npins)) {
+ device_printf(super_sc->sc_busdev,
+ "invalid pin table length %hu, max: %d (bad ACPI tables?)\n",
+ gpio_res->PinTableLength, super_sc->sc_npins);
+ return (AE_LIMIT);
+ }
+
+ flags = acpi_gpiobus_convflags(gpio_res);
+ for (i = 0; i < gpio_res->PinTableLength; i++) {
+ UINT16 pin = gpio_res->PinTable[i];
+
+ if (__predict_false(pin >= super_sc->sc_npins)) {
+ device_printf(super_sc->sc_busdev,
+ "invalid pin 0x%x, max: 0x%x (bad ACPI tables?)\n",
+ pin, super_sc->sc_npins - 1);
+ return (AE_LIMIT);
+ }
+
+ GPIO_PIN_SETFLAGS(super_sc->sc_dev, pin, flags &
+ ~GPIO_INTR_MASK);
+ }
+
+ return (AE_OK);
+}
+
+static ACPI_STATUS
+acpi_gpiobus_enumerate_aei(ACPI_RESOURCE *res, void *context)
+{
+ ACPI_RESOURCE_GPIO *gpio_res = &res->Data.Gpio;
+ uint32_t *npins = context, *pins = npins + 1;
+
+ /*
+ * Check that we have a GpioInt object.
+ * Note that according to the spec this
+ * should always be the case.
+ */
+ if (res->Type != ACPI_RESOURCE_TYPE_GPIO)
+ return (AE_OK);
+ if (gpio_res->ConnectionType != ACPI_RESOURCE_GPIO_TYPE_INT)
+ return (AE_OK);
+
+ for (int i = 0; i < gpio_res->PinTableLength; i++)
+ pins[(*npins)++] = gpio_res->PinTable[i];
+ return (AE_OK);
+}
+
+static ACPI_STATUS
+acpi_gpiobus_enumerate(ACPI_HANDLE handle, UINT32 depth, void *context,
+ void **result)
+{
+ UINT32 sta;
+
+ /*
+ * If no _STA method or if it failed, then assume that
+ * the device is present.
+ */
+ if (!ACPI_FAILURE(acpi_GetInteger(handle, "_STA", &sta)) &&
+ !ACPI_DEVICE_PRESENT(sta))
+ return (AE_OK);
+
+ if (!acpi_has_hid(handle))
+ return (AE_OK);
+
+ /* Look for GPIO resources */
+ AcpiWalkResources(handle, "_CRS", acpi_gpiobus_enumerate_res, context);
+
+ return (AE_OK);
+}
+
+static ACPI_STATUS
+acpi_gpiobus_space_handler(UINT32 function, ACPI_PHYSICAL_ADDRESS address,
+ UINT32 length, UINT64 *value, void *context, void *region_context)
+{
+ ACPI_CONNECTION_INFO *info = context;
+ ACPI_RESOURCE_GPIO *gpio_res;
+ device_t controller;
+ ACPI_RESOURCE *res;
+ ACPI_STATUS status;
+
+ status = AcpiBufferToResource(info->Connection, info->Length, &res);
+ if (ACPI_FAILURE(status) || res->Type != ACPI_RESOURCE_TYPE_GPIO)
+ goto err;
+
+ gpio_res = &res->Data.Gpio;
+ controller = __containerof(info, struct acpi_gpiobus_softc,
+ handler_info)->super_sc.sc_dev;
+
+ switch (function) {
+ case ACPI_WRITE:
+ if (__predict_false(
+ gpio_res->IoRestriction == ACPI_IO_RESTRICT_INPUT))
+ goto err;
+
+ for (int i = 0; i < length; i++)
+ if (GPIO_PIN_SET(controller,
+ gpio_res->PinTable[address + i], (*value & 1 << i) ?
+ GPIO_PIN_HIGH : GPIO_PIN_LOW) != 0)
+ goto err;
+ break;
+ case ACPI_READ:
+ if (__predict_false(
+ gpio_res->IoRestriction == ACPI_IO_RESTRICT_OUTPUT))
+ goto err;
+
+ for (int i = 0; i < length; i++) {
+ uint32_t v;
+
+ if (GPIO_PIN_GET(controller,
+ gpio_res->PinTable[address + i], &v) != 0)
+ goto err;
+ *value |= v << i;
+ }
+ break;
+ default:
+ goto err;
+ }
+
+ ACPI_FREE(res);
+ return (AE_OK);
+
+err:
+ ACPI_FREE(res);
+ return (AE_BAD_PARAMETER);
+}
+
+static void
+acpi_gpiobus_attach_aei(struct acpi_gpiobus_softc *sc, ACPI_HANDLE handle)
+{
+ struct acpi_gpiobus_ivar *devi;
+ ACPI_HANDLE aei_handle;
+ device_t child;
+ uint32_t *pins;
+ ACPI_STATUS status;
+ int err;
+
+ status = AcpiGetHandle(handle, "_AEI", &aei_handle);
+ if (ACPI_FAILURE(status))
+ return;
+
+ /* pins[0] specifies the length of the array. */
+ pins = mallocarray(sc->super_sc.sc_npins + 1,
+ sizeof(uint32_t), M_DEVBUF, M_WAITOK);
+ pins[0] = 0;
+
+ status = AcpiWalkResources(handle, "_AEI",
+ acpi_gpiobus_enumerate_aei, pins);
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->super_sc.sc_busdev,
+ "Failed to enumerate AEI resources\n");
+ free(pins, M_DEVBUF);
+ return;
+ }
+
+ child = BUS_ADD_CHILD(sc->super_sc.sc_busdev, 0, "gpio_aei",
+ DEVICE_UNIT_ANY);
+ if (child == NULL) {
+ device_printf(sc->super_sc.sc_busdev,
+ "Failed to add gpio_aei child\n");
+ free(pins, M_DEVBUF);
+ return;
+ }
+
+ devi = device_get_ivars(child);
+ devi->gpiobus.npins = pins[0];
+ devi->handle = aei_handle;
+
+ err = gpiobus_alloc_ivars(&devi->gpiobus);
+ if (err != 0) {
+ device_printf(sc->super_sc.sc_busdev,
+ "Failed to allocate gpio_aei ivars\n");
+ device_delete_child(sc->super_sc.sc_busdev, child);
+ free(pins, M_DEVBUF);
+ return;
+ }
+
+ for (int i = 0; i < pins[0]; i++)
+ devi->gpiobus.pins[i] = pins[i + 1];
+ free(pins, M_DEVBUF);
+
+ bus_attach_children(sc->super_sc.sc_busdev);
+}
+
+static int
+acpi_gpiobus_probe(device_t dev)
+{
+ device_t controller;
+
+ if (acpi_disabled("gpiobus"))
+ return (ENXIO);
+
+ controller = device_get_parent(dev);
+ if (controller == NULL)
+ return (ENXIO);
+
+ if (acpi_get_handle(controller) == NULL)
+ return (ENXIO);
+
+ device_set_desc(dev, "GPIO bus (ACPI-hinted)");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+acpi_gpiobus_attach(device_t dev)
+{
+ struct acpi_gpiobus_softc *sc;
+ struct acpi_gpiobus_ctx ctx;
+ ACPI_HANDLE handle;
+ ACPI_STATUS status;
+ int err;
+
+ if ((err = gpiobus_attach(dev)) != 0)
+ return (err);
+
+ sc = device_get_softc(dev);
+ handle = acpi_get_handle(sc->super_sc.sc_dev);
+ if (handle == NULL) {
+ gpiobus_detach(dev);
+ return (ENXIO);
+ }
+
+ status = AcpiInstallAddressSpaceHandler(handle, ACPI_ADR_SPACE_GPIO,
+ acpi_gpiobus_space_handler, NULL, &sc->handler_info);
+
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev,
+ "Failed to install GPIO address space handler\n");
+ gpiobus_detach(dev);
+ return (ENXIO);
+ }
+
+ ctx.dev_handle = handle;
+ ctx.sc = &sc->super_sc;
+
+ status = AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
+ ACPI_UINT32_MAX, acpi_gpiobus_enumerate, NULL, &ctx, NULL);
+
+ if (ACPI_FAILURE(status))
+ device_printf(dev, "Failed to enumerate GPIO resources\n");
+
+ /* Look for AEI child */
+ acpi_gpiobus_attach_aei(sc, handle);
+ return (0);
+}
+
+static int
+acpi_gpiobus_detach(device_t dev)
+{
+ struct gpiobus_softc *super_sc;
+ ACPI_STATUS status;
+
+ super_sc = device_get_softc(dev);
+ status = AcpiRemoveAddressSpaceHandler(
+ acpi_get_handle(super_sc->sc_dev), ACPI_ADR_SPACE_GPIO,
+ acpi_gpiobus_space_handler
+ );
+
+ if (ACPI_FAILURE(status))
+ device_printf(dev,
+ "Failed to remove GPIO address space handler\n");
+
+ return (gpiobus_detach(dev));
+}
+
+static int
+acpi_gpiobus_read_ivar(device_t dev, device_t child, int which,
+ uintptr_t *result)
+{
+ struct acpi_gpiobus_ivar *devi = device_get_ivars(child);
+
+ switch (which) {
+ case ACPI_GPIOBUS_IVAR_HANDLE:
+ *result = (uintptr_t)devi->handle;
+ break;
+ default:
+ return (gpiobus_read_ivar(dev, child, which, result));
+ }
+
+ return (0);
+}
+
+static device_t
+acpi_gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
+{
+ return (gpiobus_add_child_common(dev, order, name, unit,
+ sizeof(struct acpi_gpiobus_ivar)));
+}
+
+static int
+acpi_gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb)
+{
+ struct acpi_gpiobus_ivar *devi;
+ int err;
+
+ err = gpiobus_child_location(bus, child, sb);
+ if (err != 0)
+ return (err);
+
+ devi = device_get_ivars(child);
+ sbuf_printf(sb, " handle=%s", acpi_name(devi->handle));
+ return (0);
+}
+
+static device_method_t acpi_gpiobus_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, acpi_gpiobus_probe),
+ DEVMETHOD(device_attach, acpi_gpiobus_attach),
+ DEVMETHOD(device_detach, acpi_gpiobus_detach),
+
+ /* Bus interface */
+ DEVMETHOD(bus_read_ivar, acpi_gpiobus_read_ivar),
+ DEVMETHOD(bus_add_child, acpi_gpiobus_add_child),
+ DEVMETHOD(bus_child_location, acpi_gpiobus_child_location),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(gpiobus, acpi_gpiobus_driver, acpi_gpiobus_methods,
+ sizeof(struct acpi_gpiobus_softc), gpiobus_driver);
+EARLY_DRIVER_MODULE(acpi_gpiobus, gpio, acpi_gpiobus_driver, NULL, NULL,
+ BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE);
+MODULE_VERSION(acpi_gpiobus, 1);
+MODULE_DEPEND(acpi_gpiobus, acpi, 1, 1, 1);
diff --git a/sys/dev/gpio/acpi_gpiobusvar.h b/sys/dev/gpio/acpi_gpiobusvar.h
new file mode 100644
index 000000000000..288e8bd0f2af
--- /dev/null
+++ b/sys/dev/gpio/acpi_gpiobusvar.h
@@ -0,0 +1,48 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Colin Percival
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef __ACPI_GPIOBUS_H__
+#define __ACPI_GPIOBUS_H__
+
+#include <sys/bus.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+
+enum acpi_gpiobus_ivars {
+ ACPI_GPIOBUS_IVAR_HANDLE = 10600
+};
+
+#define ACPI_GPIOBUS_ACCESSOR(var, ivar, type) \
+ __BUS_ACCESSOR(acpi_gpiobus, var, ACPI_GPIOBUS, ivar, type)
+
+ACPI_GPIOBUS_ACCESSOR(handle, HANDLE, ACPI_HANDLE)
+
+#undef ACPI_GPIOBUS_ACCESSOR
+
+uint32_t acpi_gpiobus_convflags(ACPI_RESOURCE_GPIO *);
+
+#endif /* __ACPI_GPIOBUS_H__ */
diff --git a/sys/dev/gpio/bytgpio.c b/sys/dev/gpio/bytgpio.c
index b3f5f02081c5..5d685c155a03 100644
--- a/sys/dev/gpio/bytgpio.c
+++ b/sys/dev/gpio/bytgpio.c
@@ -278,6 +278,8 @@ const struct pinmap_info bytgpio_sus_pins[] = {
GPIO_PIN_MAP(40, 0)
};
+static char *bytgpio_gpio_ids[] = { "INT33FC", NULL };
+
#define SUS_PINS nitems(bytgpio_sus_pins)
#define BYGPIO_PIN_REGISTER(sc, pin, r) ((sc)->sc_pinpad_map[(pin)].reg * 16 + (r))
@@ -538,12 +540,11 @@ bytgpio_pin_toggle(device_t dev, uint32_t pin)
static int
bytgpio_probe(device_t dev)
{
- static char *gpio_ids[] = { "INT33FC", NULL };
int rv;
if (acpi_disabled("gpio"))
return (ENXIO);
- rv = ACPI_ID_PROBE(device_get_parent(dev), dev, gpio_ids, NULL);
+ rv = ACPI_ID_PROBE(device_get_parent(dev), dev, bytgpio_gpio_ids, NULL);
if (rv <= 0)
device_set_desc(dev, "Intel Baytrail GPIO Controller");
return (rv);
@@ -607,7 +608,7 @@ bytgpio_attach(device_t dev)
sc->sc_pad_funcs[pin] = val & BYTGPIO_PCONF0_FUNC_MASK;
}
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
BYTGPIO_LOCK_DESTROY(sc);
bus_release_resource(dev, SYS_RES_MEMORY,
@@ -615,6 +616,7 @@ bytgpio_attach(device_t dev)
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
error:
@@ -675,3 +677,4 @@ static driver_t bytgpio_driver = {
DRIVER_MODULE(bytgpio, acpi, bytgpio_driver, 0, 0);
MODULE_DEPEND(bytgpio, acpi, 1, 1, 1);
MODULE_DEPEND(bytgpio, gpiobus, 1, 1, 1);
+ACPI_PNP_INFO(bytgpio_gpio_ids);
diff --git a/sys/dev/gpio/chvgpio.c b/sys/dev/gpio/chvgpio.c
index 199ad4d6f373..3273aad9242b 100644
--- a/sys/dev/gpio/chvgpio.c
+++ b/sys/dev/gpio/chvgpio.c
@@ -441,7 +441,7 @@ chvgpio_attach(device_t dev)
bus_write_4(sc->sc_mem_res, CHVGPIO_INTERRUPT_MASK, 0);
bus_write_4(sc->sc_mem_res, CHVGPIO_INTERRUPT_STATUS, 0xffff);
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
CHVGPIO_LOCK_DESTROY(sc);
bus_release_resource(dev, SYS_RES_MEMORY,
@@ -451,6 +451,7 @@ chvgpio_attach(device_t dev)
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/gpio/dwgpio/dwgpio.c b/sys/dev/gpio/dwgpio/dwgpio.c
index 5acb99ca591e..3908113d5fd4 100644
--- a/sys/dev/gpio/dwgpio/dwgpio.c
+++ b/sys/dev/gpio/dwgpio/dwgpio.c
@@ -167,12 +167,13 @@ dwgpio_attach(device_t dev)
snprintf(sc->gpio_pins[i].gp_name, GPIOMAXNAME,
"dwgpio%d.%d", device_get_unit(dev), i);
}
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
mtx_destroy(&sc->sc_mtx);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/gpio/dwgpio/dwgpio_bus.c b/sys/dev/gpio/dwgpio/dwgpio_bus.c
index 48de7dc327eb..7f3fc5b91f69 100644
--- a/sys/dev/gpio/dwgpio/dwgpio_bus.c
+++ b/sys/dev/gpio/dwgpio/dwgpio_bus.c
@@ -94,7 +94,7 @@ dwgpiobus_attach(device_t dev)
/*
* Allow devices to identify.
*/
- bus_generic_probe(dev);
+ bus_identify_children(dev);
/*
* Now walk the OFW tree and attach top-level devices.
@@ -102,7 +102,8 @@ dwgpiobus_attach(device_t dev)
for (node = OF_child(node); node > 0; node = OF_peer(node))
simplebus_add_device(dev, node, 0, NULL, -1, NULL);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static int
diff --git a/sys/dev/gpio/gpio_if.m b/sys/dev/gpio/gpio_if.m
index 5501b2b5c0e7..0b6988ceba79 100644
--- a/sys/dev/gpio/gpio_if.m
+++ b/sys/dev/gpio/gpio_if.m
@@ -62,6 +62,22 @@ CODE {
return (0);
}
+
+ static int
+ gpio_default_get_pin_list(device_t dev, uint32_t *pin_list)
+ {
+ uint32_t maxpin;
+ int err;
+
+ err = GPIO_PIN_MAX(dev, &maxpin);
+ if (err != 0)
+ return (ENXIO);
+
+ for (int i = 0; i <= maxpin; i++)
+ pin_list[i] = i;
+
+ return (0);
+ }
};
HEADER {
@@ -185,3 +201,13 @@ METHOD int pin_config_32 {
uint32_t num_pins;
uint32_t *pin_flags;
} DEFAULT gpio_default_nosupport;
+
+#
+# Get the controller's pin numbers. pin_list is expected to be an array with at
+# least GPIO_PIN_MAX() elements. Populates pin_list from 0 to GPIO_PIN_MAX() by
+# default.
+#
+METHOD int get_pin_list {
+ device_t dev;
+ uint32_t *pin_list;
+} DEFAULT gpio_default_get_pin_list;
diff --git a/sys/dev/gpio/gpioaei.c b/sys/dev/gpio/gpioaei.c
new file mode 100644
index 000000000000..7b97277aaf61
--- /dev/null
+++ b/sys/dev/gpio/gpioaei.c
@@ -0,0 +1,259 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Colin Percival
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/types.h>
+#include <sys/bus.h>
+#include <sys/gpio.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#include "gpiobus_if.h"
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <dev/acpica/acpivar.h>
+
+#include <dev/gpio/gpiobusvar.h>
+#include <dev/gpio/acpi_gpiobusvar.h>
+
+enum gpio_aei_type {
+ ACPI_AEI_TYPE_UNKNOWN,
+ ACPI_AEI_TYPE_ELX,
+ ACPI_AEI_TYPE_EVT
+};
+
+struct gpio_aei_ctx {
+ SLIST_ENTRY(gpio_aei_ctx) next;
+ struct resource * intr_res;
+ void * intr_cookie;
+ ACPI_HANDLE handle;
+ gpio_pin_t gpio;
+ uint32_t pin;
+ int intr_rid;
+ enum gpio_aei_type type;
+};
+
+struct gpio_aei_softc {
+ SLIST_HEAD(, gpio_aei_ctx) aei_ctx;
+ ACPI_HANDLE dev_handle;
+ device_t dev;
+};
+
+static int
+gpio_aei_probe(device_t dev)
+{
+
+ /* We only match when gpiobus explicitly requested gpio_aei. */
+ return (BUS_PROBE_NOWILDCARD);
+}
+
+static void
+gpio_aei_intr(void * arg)
+{
+ struct gpio_aei_ctx * ctx = arg;
+
+ /* Ask ACPI to run the appropriate _EVT, _Exx or _Lxx method. */
+ if (ctx->type == ACPI_AEI_TYPE_EVT)
+ acpi_SetInteger(ctx->handle, NULL, ctx->pin);
+ else
+ AcpiEvaluateObject(ctx->handle, NULL, NULL, NULL);
+}
+
+static ACPI_STATUS
+gpio_aei_enumerate(ACPI_RESOURCE * res, void * context)
+{
+ ACPI_RESOURCE_GPIO * gpio_res = &res->Data.Gpio;
+ struct gpio_aei_softc * sc = context;
+ uint32_t flags, maxpin;
+ device_t busdev;
+ int err;
+
+ /*
+ * Check that we have a GpioInt object.
+ * Note that according to the spec this
+ * should always be the case.
+ */
+ if (res->Type != ACPI_RESOURCE_TYPE_GPIO)
+ return (AE_OK);
+ if (gpio_res->ConnectionType != ACPI_RESOURCE_GPIO_TYPE_INT)
+ return (AE_OK);
+
+ flags = acpi_gpiobus_convflags(gpio_res);
+ if (acpi_quirks & ACPI_Q_AEI_NOPULL)
+ flags &= ~GPIO_PIN_PULLUP;
+
+ err = GPIO_PIN_MAX(acpi_get_device(sc->dev_handle), &maxpin);
+ if (err != 0)
+ return (AE_ERROR);
+
+ busdev = GPIO_GET_BUS(acpi_get_device(sc->dev_handle));
+ for (int i = 0; i < gpio_res->PinTableLength; i++) {
+ struct gpio_aei_ctx * ctx;
+ uint32_t pin = gpio_res->PinTable[i];
+
+ if (__predict_false(pin > maxpin)) {
+ device_printf(sc->dev,
+ "Invalid pin 0x%x, max: 0x%x (bad ACPI tables?)\n",
+ pin, maxpin);
+ continue;
+ }
+
+ ctx = malloc(sizeof(struct gpio_aei_ctx), M_DEVBUF, M_WAITOK);
+ ctx->type = ACPI_AEI_TYPE_UNKNOWN;
+ if (pin <= 255) {
+ char objname[5]; /* "_EXX" or "_LXX" */
+ sprintf(objname, "_%c%02X",
+ (flags & GPIO_INTR_EDGE_MASK) ? 'E' : 'L', pin);
+ if (ACPI_SUCCESS(AcpiGetHandle(sc->dev_handle, objname,
+ &ctx->handle)))
+ ctx->type = ACPI_AEI_TYPE_ELX;
+ }
+
+ if (ctx->type == ACPI_AEI_TYPE_UNKNOWN) {
+ if (ACPI_SUCCESS(AcpiGetHandle(sc->dev_handle, "_EVT",
+ &ctx->handle)))
+ ctx->type = ACPI_AEI_TYPE_EVT;
+ else {
+ device_printf(sc->dev,
+ "AEI Device type is unknown for pin 0x%x\n",
+ pin);
+
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+ }
+
+ err = gpio_pin_get_by_bus_pinnum(busdev, pin, &ctx->gpio);
+ if (err != 0) {
+ device_printf(sc->dev, "Cannot acquire pin 0x%x\n",
+ pin);
+
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ err = gpio_pin_setflags(ctx->gpio, flags & ~GPIO_INTR_MASK);
+ if (err != 0) {
+ device_printf(sc->dev,
+ "Cannot set pin flags for pin 0x%x\n", pin);
+
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ ctx->intr_rid = 0;
+ ctx->intr_res = gpio_alloc_intr_resource(sc->dev,
+ &ctx->intr_rid, RF_ACTIVE, ctx->gpio,
+ flags & GPIO_INTR_MASK);
+ if (ctx->intr_res == NULL) {
+ device_printf(sc->dev,
+ "Cannot allocate an IRQ for pin 0x%x\n", pin);
+
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ err = bus_setup_intr(sc->dev, ctx->intr_res, INTR_TYPE_MISC |
+ INTR_MPSAFE | INTR_EXCL | INTR_SLEEPABLE, NULL,
+ gpio_aei_intr, ctx, &ctx->intr_cookie);
+ if (err != 0) {
+ device_printf(sc->dev,
+ "Cannot set up an IRQ for pin 0x%x\n", pin);
+
+ bus_release_resource(sc->dev, ctx->intr_res);
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ ctx->pin = pin;
+ SLIST_INSERT_HEAD(&sc->aei_ctx, ctx, next);
+ }
+
+ return (AE_OK);
+}
+
+static int
+gpio_aei_attach(device_t dev)
+{
+ struct gpio_aei_softc * sc = device_get_softc(dev);
+ ACPI_HANDLE handle;
+ ACPI_STATUS status;
+
+ /* This is us. */
+ device_set_desc(dev, "ACPI Event Information Device");
+
+ handle = acpi_gpiobus_get_handle(dev);
+ status = AcpiGetParent(handle, &sc->dev_handle);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "Cannot get parent of %s\n",
+ acpi_name(handle));
+ return (ENXIO);
+ }
+
+ SLIST_INIT(&sc->aei_ctx);
+ sc->dev = dev;
+
+ status = AcpiWalkResources(sc->dev_handle, "_AEI",
+ gpio_aei_enumerate, sc);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "Failed to enumerate AEI resources\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+gpio_aei_detach(device_t dev)
+{
+ struct gpio_aei_softc * sc = device_get_softc(dev);
+ struct gpio_aei_ctx * ctx, * tctx;
+
+ SLIST_FOREACH_SAFE(ctx, &sc->aei_ctx, next, tctx) {
+ bus_teardown_intr(dev, ctx->intr_res, ctx->intr_cookie);
+ bus_release_resource(dev, ctx->intr_res);
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ }
+
+ return (0);
+}
+
+static device_method_t gpio_aei_methods[] = {
+ /* Device interface. */
+ DEVMETHOD(device_probe, gpio_aei_probe),
+ DEVMETHOD(device_attach, gpio_aei_attach),
+ DEVMETHOD(device_detach, gpio_aei_detach),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_0(gpio_aei, gpio_aei_driver, gpio_aei_methods, sizeof(struct gpio_aei_softc));
+DRIVER_MODULE(gpio_aei, gpiobus, gpio_aei_driver, NULL, NULL);
+MODULE_DEPEND(gpio_aei, acpi_gpiobus, 1, 1, 1);
diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c
index c8fee7d400c1..698b5e5fdd01 100644
--- a/sys/dev/gpio/gpiobus.c
+++ b/sys/dev/gpio/gpiobus.c
@@ -39,6 +39,7 @@
#include <sys/sbuf.h>
#include <dev/gpio/gpiobusvar.h>
+#include <dev/gpio/gpiobus_internal.h>
#include "gpiobus_if.h"
@@ -52,13 +53,10 @@
static void gpiobus_print_pins(struct gpiobus_ivar *, struct sbuf *);
static int gpiobus_parse_pins(struct gpiobus_softc *, device_t, int);
static int gpiobus_probe(device_t);
-static int gpiobus_attach(device_t);
-static int gpiobus_detach(device_t);
static int gpiobus_suspend(device_t);
static int gpiobus_resume(device_t);
static void gpiobus_probe_nomatch(device_t, device_t);
static int gpiobus_print_child(device_t, device_t);
-static int gpiobus_child_location(device_t, device_t, struct sbuf *);
static device_t gpiobus_add_child(device_t, u_int, const char *, int);
static void gpiobus_hinted_child(device_t, const char *, int);
@@ -111,10 +109,9 @@ gpio_alloc_intr_resource(device_t consumer_dev, int *rid, u_int alloc_flags,
res = bus_alloc_resource(consumer_dev, SYS_RES_IRQ, rid, irq, irq, 1,
alloc_flags);
if (res == NULL) {
- intr_free_intr_map_data((struct intr_map_data *)gpio_data);
+ intr_unmap_irq(irq);
return (NULL);
}
- rman_set_virtual(res, gpio_data);
return (res);
}
#else
@@ -215,20 +212,40 @@ gpio_pin_is_active(gpio_pin_t pin, bool *active)
return (0);
}
+/*
+ * Note that this function should only
+ * be used in cases where a pre-existing
+ * gpiobus_pin structure exists. In most
+ * cases, the gpio_pin_get_by_* functions
+ * suffice.
+ */
+int
+gpio_pin_acquire(gpio_pin_t gpio)
+{
+ device_t busdev;
+
+ KASSERT(gpio != NULL, ("GPIO pin is NULL."));
+ KASSERT(gpio->dev != NULL, ("GPIO pin device is NULL."));
+
+ busdev = GPIO_GET_BUS(gpio->dev);
+ if (busdev == NULL)
+ return (ENXIO);
+
+ return (gpiobus_acquire_pin(busdev, gpio->pin));
+}
+
void
gpio_pin_release(gpio_pin_t gpio)
{
device_t busdev;
- if (gpio == NULL)
- return;
-
+ KASSERT(gpio != NULL, ("GPIO pin is NULL."));
KASSERT(gpio->dev != NULL, ("GPIO pin device is NULL."));
busdev = GPIO_GET_BUS(gpio->dev);
- if (busdev != NULL)
- gpiobus_release_pin(busdev, gpio->pin);
+ KASSERT(busdev != NULL, ("gpiobus dev is NULL."));
+ gpiobus_release_pin(busdev, gpio->pin);
free(gpio, M_DEVBUF);
}
@@ -295,38 +312,26 @@ gpiobus_print_pins(struct gpiobus_ivar *devi, struct sbuf *sb)
}
device_t
-gpiobus_attach_bus(device_t dev)
+gpiobus_add_bus(device_t dev)
{
device_t busdev;
- busdev = device_add_child(dev, "gpiobus", -1);
+ busdev = device_add_child(dev, "gpiobus", DEVICE_UNIT_ANY);
if (busdev == NULL)
return (NULL);
- if (device_add_child(dev, "gpioc", -1) == NULL) {
- device_delete_child(dev, busdev);
- return (NULL);
- }
#ifdef FDT
ofw_gpiobus_register_provider(dev);
#endif
- bus_generic_attach(dev);
-
return (busdev);
}
int
gpiobus_detach_bus(device_t dev)
{
- int err;
-
#ifdef FDT
ofw_gpiobus_unregister_provider(dev);
#endif
- err = bus_generic_detach(dev);
- if (err != 0)
- return (err);
-
- return (device_delete_children(dev));
+ return (bus_generic_detach(dev));
}
int
@@ -363,6 +368,37 @@ gpiobus_init_softc(device_t dev)
}
int
+gpiobus_add_gpioc(device_t dev)
+{
+ struct gpiobus_ivar *devi;
+ struct gpiobus_softc *sc;
+ device_t gpioc;
+ int err;
+
+ gpioc = BUS_ADD_CHILD(dev, 0, "gpioc", DEVICE_UNIT_ANY);
+ if (gpioc == NULL)
+ return (ENXIO);
+
+ sc = device_get_softc(dev);
+ devi = device_get_ivars(gpioc);
+
+ devi->npins = sc->sc_npins;
+ err = gpiobus_alloc_ivars(devi);
+ if (err != 0) {
+ device_delete_child(dev, gpioc);
+ return (err);
+ }
+
+ err = GPIO_GET_PIN_LIST(sc->sc_dev, devi->pins);
+ if (err != 0) {
+ device_delete_child(dev, gpioc);
+ gpiobus_free_ivars(devi);
+ }
+
+ return (err);
+}
+
+int
gpiobus_alloc_ivars(struct gpiobus_ivar *devi)
{
@@ -393,14 +429,13 @@ gpiobus_acquire_pin(device_t bus, uint32_t pin)
sc = device_get_softc(bus);
/* Consistency check. */
if (pin >= sc->sc_npins) {
- device_printf(bus,
- "invalid pin %d, max: %d\n", pin, sc->sc_npins - 1);
- return (-1);
+ panic("%s: invalid pin %d, max: %d",
+ device_get_nameunit(bus), pin, sc->sc_npins - 1);
}
/* Mark pin as mapped and give warning if it's already mapped. */
if (sc->sc_pins[pin].mapped) {
device_printf(bus, "warning: pin %d is already mapped\n", pin);
- return (-1);
+ return (EBUSY);
}
sc->sc_pins[pin].mapped = 1;
@@ -408,7 +443,7 @@ gpiobus_acquire_pin(device_t bus, uint32_t pin)
}
/* Release mapped pin */
-int
+void
gpiobus_release_pin(device_t bus, uint32_t pin)
{
struct gpiobus_softc *sc;
@@ -416,19 +451,15 @@ gpiobus_release_pin(device_t bus, uint32_t pin)
sc = device_get_softc(bus);
/* Consistency check. */
if (pin >= sc->sc_npins) {
- device_printf(bus,
- "invalid pin %d, max=%d\n",
- pin, sc->sc_npins - 1);
- return (-1);
+ panic("%s: invalid pin %d, max: %d",
+ device_get_nameunit(bus), pin, sc->sc_npins - 1);
}
- if (!sc->sc_pins[pin].mapped) {
- device_printf(bus, "pin %d is not mapped\n", pin);
- return (-1);
- }
- sc->sc_pins[pin].mapped = 0;
+ if (!sc->sc_pins[pin].mapped)
+ panic("%s: pin %d is not mapped", device_get_nameunit(bus),
+ pin);
- return (0);
+ sc->sc_pins[pin].mapped = 0;
}
static int
@@ -443,8 +474,7 @@ gpiobus_acquire_child_pins(device_t dev, device_t child)
device_printf(child, "cannot acquire pin %d\n",
devi->pins[i]);
while (--i >= 0) {
- (void)gpiobus_release_pin(dev,
- devi->pins[i]);
+ gpiobus_release_pin(dev, devi->pins[i]);
}
gpiobus_free_ivars(devi);
return (EBUSY);
@@ -550,7 +580,7 @@ gpiobus_probe(device_t dev)
return (BUS_PROBE_GENERIC);
}
-static int
+int
gpiobus_attach(device_t dev)
{
int err;
@@ -559,45 +589,38 @@ gpiobus_attach(device_t dev)
if (err != 0)
return (err);
+ err = gpiobus_add_gpioc(dev);
+ if (err != 0)
+ return (err);
+
/*
* Get parent's pins and mark them as unmapped
*/
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
/*
* Since this is not a self-enumerating bus, and since we always add
* children in attach, we have to always delete children here.
*/
-static int
+int
gpiobus_detach(device_t dev)
{
struct gpiobus_softc *sc;
- struct gpiobus_ivar *devi;
- device_t *devlist;
- int i, err, ndevs;
+ int i, err;
sc = GPIOBUS_SOFTC(dev);
KASSERT(mtx_initialized(&sc->sc_mtx),
("gpiobus mutex not initialized"));
GPIOBUS_LOCK_DESTROY(sc);
- if ((err = bus_generic_detach(dev)) != 0)
+ if ((err = bus_detach_children(dev)) != 0)
return (err);
- if ((err = device_get_children(dev, &devlist, &ndevs)) != 0)
- return (err);
- for (i = 0; i < ndevs; i++) {
- devi = GPIOBUS_IVAR(devlist[i]);
- gpiobus_free_ivars(devi);
- resource_list_free(&devi->rl);
- free(devi, M_DEVBUF);
- device_delete_child(dev, devlist[i]);
- }
- free(devlist, M_TEMP);
rman_fini(&sc->sc_intr_rman);
if (sc->sc_pins) {
for (i = 0; i < sc->sc_npins; i++) {
@@ -669,7 +692,7 @@ gpiobus_print_child(device_t dev, device_t child)
return (retval);
}
-static int
+int
gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb)
{
struct gpiobus_ivar *devi;
@@ -681,16 +704,19 @@ gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb)
return (0);
}
-static device_t
-gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
+device_t
+gpiobus_add_child_common(device_t dev, u_int order, const char *name, int unit,
+ size_t ivars_size)
{
device_t child;
struct gpiobus_ivar *devi;
+ KASSERT(ivars_size >= sizeof(struct gpiobus_ivar),
+ ("child ivars must include gpiobus_ivar as their first member"));
child = device_add_child_ordered(dev, order, name, unit);
if (child == NULL)
return (child);
- devi = malloc(sizeof(struct gpiobus_ivar), M_DEVBUF, M_NOWAIT | M_ZERO);
+ devi = malloc(ivars_size, M_DEVBUF, M_NOWAIT | M_ZERO);
if (devi == NULL) {
device_delete_child(dev, child);
return (NULL);
@@ -701,6 +727,26 @@ gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
return (child);
}
+static device_t
+gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
+{
+ return (gpiobus_add_child_common(dev, order, name, unit,
+ sizeof(struct gpiobus_ivar)));
+}
+
+static void
+gpiobus_child_deleted(device_t dev, device_t child)
+{
+ struct gpiobus_ivar *devi;
+
+ devi = GPIOBUS_IVAR(child);
+ if (devi == NULL)
+ return;
+ gpiobus_free_ivars(devi);
+ resource_list_free(&devi->rl);
+ free(devi, M_DEVBUF);
+}
+
static int
gpiobus_rescan(device_t dev)
{
@@ -712,7 +758,7 @@ gpiobus_rescan(device_t dev)
* hints or drivers have arrived since we last tried.
*/
bus_enumerate_hinted_children(dev);
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -720,7 +766,6 @@ static void
gpiobus_hinted_child(device_t bus, const char *dname, int dunit)
{
struct gpiobus_softc *sc = GPIOBUS_SOFTC(bus);
- struct gpiobus_ivar *devi;
device_t child;
const char *pins;
int irq, pinmask;
@@ -730,19 +775,14 @@ gpiobus_hinted_child(device_t bus, const char *dname, int dunit)
}
child = BUS_ADD_CHILD(bus, 0, dname, dunit);
- devi = GPIOBUS_IVAR(child);
if (resource_int_value(dname, dunit, "pins", &pinmask) == 0) {
if (gpiobus_parse_pins(sc, child, pinmask)) {
- resource_list_free(&devi->rl);
- free(devi, M_DEVBUF);
device_delete_child(bus, child);
return;
}
}
else if (resource_string_value(dname, dunit, "pin_list", &pins) == 0) {
if (gpiobus_parse_pin_list(sc, child, pins)) {
- resource_list_free(&devi->rl);
- free(devi, M_DEVBUF);
device_delete_child(bus, child);
return;
}
@@ -754,7 +794,7 @@ gpiobus_hinted_child(device_t bus, const char *dname, int dunit)
}
}
-static int
+int
gpiobus_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
{
struct gpiobus_ivar *devi;
@@ -847,6 +887,25 @@ gpiobus_alloc_resource(device_t bus, device_t child, int type, int *rid,
end, count, flags));
}
+static int
+gpiobus_release_resource(device_t dev, device_t child, struct resource *r)
+{
+ int err;
+#ifdef INTRNG
+ u_int irq;
+
+ irq = rman_get_start(r);
+ MPASS(irq == rman_get_end(r));
+#endif
+ err = bus_generic_rman_release_resource(dev, child, r);
+ if (err != 0)
+ return (err);
+#ifdef INTRNG
+ intr_unmap_irq(irq);
+#endif
+ return (0);
+}
+
static struct resource_list *
gpiobus_get_resource_list(device_t bus __unused, device_t child)
{
@@ -933,7 +992,7 @@ gpiobus_pin_getflags(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_GETFLAGS(sc->sc_dev, devi->pins[pin], flags);
+ return (GPIO_PIN_GETFLAGS(sc->sc_dev, devi->pins[pin], flags));
}
static int
@@ -946,7 +1005,7 @@ gpiobus_pin_getcaps(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], caps);
+ return (GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], caps));
}
static int
@@ -959,7 +1018,7 @@ gpiobus_pin_set(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_SET(sc->sc_dev, devi->pins[pin], value);
+ return (GPIO_PIN_SET(sc->sc_dev, devi->pins[pin], value));
}
static int
@@ -972,7 +1031,7 @@ gpiobus_pin_get(device_t dev, device_t child, uint32_t pin,
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_GET(sc->sc_dev, devi->pins[pin], value);
+ return (GPIO_PIN_GET(sc->sc_dev, devi->pins[pin], value));
}
static int
@@ -984,7 +1043,57 @@ gpiobus_pin_toggle(device_t dev, device_t child, uint32_t pin)
if (pin >= devi->npins)
return (EINVAL);
- return GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin]);
+ return (GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin]));
+}
+
+/*
+ * Verify that a child has all the pins they are requesting
+ * to access in their ivars.
+ */
+static bool
+gpiobus_pin_verify_32(struct gpiobus_ivar *devi, uint32_t first_pin,
+ uint32_t num_pins)
+{
+ if (first_pin + num_pins > devi->npins)
+ return (false);
+
+ /* Make sure the pins are consecutive. */
+ for (uint32_t pin = first_pin; pin < first_pin + num_pins - 1; pin++) {
+ if (devi->pins[pin] + 1 != devi->pins[pin + 1])
+ return (false);
+ }
+
+ return (true);
+}
+
+static int
+gpiobus_pin_access_32(device_t dev, device_t child, uint32_t first_pin,
+ uint32_t clear_pins, uint32_t change_pins, uint32_t *orig_pins)
+{
+ struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
+ struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
+
+ if (!gpiobus_pin_verify_32(devi, first_pin, 32))
+ return (EINVAL);
+
+ return (GPIO_PIN_ACCESS_32(sc->sc_dev, devi->pins[first_pin],
+ clear_pins, change_pins, orig_pins));
+}
+
+static int
+gpiobus_pin_config_32(device_t dev, device_t child, uint32_t first_pin,
+ uint32_t num_pins, uint32_t *pin_flags)
+{
+ struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev);
+ struct gpiobus_ivar *devi = GPIOBUS_IVAR(child);
+
+ if (num_pins > 32)
+ return (EINVAL);
+ if (!gpiobus_pin_verify_32(devi, first_pin, num_pins))
+ return (EINVAL);
+
+ return (GPIO_PIN_CONFIG_32(sc->sc_dev,
+ devi->pins[first_pin], num_pins, pin_flags));
}
static int
@@ -1041,12 +1150,13 @@ static device_method_t gpiobus_methods[] = {
DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
DEVMETHOD(bus_alloc_resource, gpiobus_alloc_resource),
- DEVMETHOD(bus_release_resource, bus_generic_rman_release_resource),
+ DEVMETHOD(bus_release_resource, gpiobus_release_resource),
DEVMETHOD(bus_activate_resource, bus_generic_rman_activate_resource),
DEVMETHOD(bus_deactivate_resource, bus_generic_rman_deactivate_resource),
DEVMETHOD(bus_get_resource_list, gpiobus_get_resource_list),
DEVMETHOD(bus_get_rman, gpiobus_get_rman),
DEVMETHOD(bus_add_child, gpiobus_add_child),
+ DEVMETHOD(bus_child_deleted, gpiobus_child_deleted),
DEVMETHOD(bus_rescan, gpiobus_rescan),
DEVMETHOD(bus_probe_nomatch, gpiobus_probe_nomatch),
DEVMETHOD(bus_print_child, gpiobus_print_child),
@@ -1064,6 +1174,8 @@ static device_method_t gpiobus_methods[] = {
DEVMETHOD(gpiobus_pin_get, gpiobus_pin_get),
DEVMETHOD(gpiobus_pin_set, gpiobus_pin_set),
DEVMETHOD(gpiobus_pin_toggle, gpiobus_pin_toggle),
+ DEVMETHOD(gpiobus_pin_access_32,gpiobus_pin_access_32),
+ DEVMETHOD(gpiobus_pin_config_32,gpiobus_pin_config_32),
DEVMETHOD(gpiobus_pin_getname, gpiobus_pin_getname),
DEVMETHOD(gpiobus_pin_setname, gpiobus_pin_setname),
diff --git a/sys/dev/gpio/gpiobus_if.m b/sys/dev/gpio/gpiobus_if.m
index 8bf29839ef4e..890738c4e809 100644
--- a/sys/dev/gpio/gpiobus_if.m
+++ b/sys/dev/gpio/gpiobus_if.m
@@ -107,6 +107,36 @@ METHOD int pin_setflags {
};
#
+# Simultaneously read and/or change up to 32 adjacent pins.
+# If the device cannot change the pins simultaneously, returns EOPNOTSUPP.
+#
+# More details about using this interface can be found in sys/gpio.h
+#
+METHOD int pin_access_32 {
+ device_t dev;
+ device_t child;
+ uint32_t first_pin;
+ uint32_t clear_pins;
+ uint32_t change_pins;
+ uint32_t *orig_pins;
+};
+
+#
+# Simultaneously configure up to 32 adjacent pins.
+# This is intended to change the configuration of all the pins simultaneously,
+# but unlike pin_access_32, this will not fail if the hardware can't do so.
+#
+# More details about using this interface can be found in sys/gpio.h
+#
+METHOD int pin_config_32 {
+ device_t dev;
+ device_t child;
+ uint32_t first_pin;
+ uint32_t num_pins;
+ uint32_t *pin_flags;
+};
+
+#
# Get the pin name
#
METHOD int pin_getname {
diff --git a/sys/dev/altera/pio/pio.h b/sys/dev/gpio/gpiobus_internal.h
index b1244d721209..58f862343403 100644
--- a/sys/dev/altera/pio/pio.h
+++ b/sys/dev/gpio/gpiobus_internal.h
@@ -1,10 +1,8 @@
/*-
- * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
+ * SPDX-License-Identifier: BSD-2-Clause
*
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
+ * Copyright (c) 2009 Oleksandr Tymoshenko <gonzo@freebsd.org>
+ * All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,15 +24,27 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
+ *
+ */
+
+#ifndef __GPIOBUS_INTERNAL_H__
+#define __GPIOBUS_INTERNAL_H__
+
+/*
+ * Functions shared between gpiobus and other bus classes that derive from it;
+ * these should not be called directly by other drivers.
*/
+int gpiobus_attach(device_t);
+int gpiobus_detach(device_t);
+int gpiobus_init_softc(device_t);
+int gpiobus_alloc_ivars(struct gpiobus_ivar *);
+void gpiobus_free_ivars(struct gpiobus_ivar *);
+int gpiobus_read_ivar(device_t, device_t, int, uintptr_t *);
+int gpiobus_acquire_pin(device_t, uint32_t);
+void gpiobus_release_pin(device_t, uint32_t);
+int gpiobus_child_location(device_t, device_t, struct sbuf *);
+device_t gpiobus_add_child_common(device_t, u_int, const char *, int, size_t);
+int gpiobus_add_gpioc(device_t);
-#define PIO_DATA 0x00
-#define PIO_DIR 0x04
-#define PIO_OUT(n) (1 << n)
-#define PIO_OUT_ALL 0xffffffff
-#define PIO_INT_MASK 0x08
-#define PIO_UNMASK(n) (1 << n)
-#define PIO_UNMASK_ALL 0xffffffff
-#define PIO_EDGECAPT 0x0c
-#define PIO_OUTSET 0x10
-#define PIO_OUTCLR 0x14
+extern driver_t gpiobus_driver;
+#endif
diff --git a/sys/dev/gpio/gpiobusvar.h b/sys/dev/gpio/gpiobusvar.h
index 521132fbac9d..0528efe45525 100644
--- a/sys/dev/gpio/gpiobusvar.h
+++ b/sys/dev/gpio/gpiobusvar.h
@@ -156,6 +156,8 @@ int gpio_pin_get_by_bus_pinnum(device_t _bus, uint32_t _pinnum, gpio_pin_t *_gp)
/* Acquire a pin by child and index (used by direct children of gpiobus). */
int gpio_pin_get_by_child_index(device_t _child, uint32_t _idx, gpio_pin_t *_gp);
+/* Acquire a pin from an existing gpio_pin_t. */
+int gpio_pin_acquire(gpio_pin_t gpio);
/* Release a pin acquired via any gpio_pin_get_xxx() function. */
void gpio_pin_release(gpio_pin_t gpio);
@@ -167,19 +169,8 @@ int gpio_pin_setflags(gpio_pin_t pin, uint32_t flags);
struct resource *gpio_alloc_intr_resource(device_t consumer_dev, int *rid,
u_int alloc_flags, gpio_pin_t pin, uint32_t intr_mode);
-/*
- * Functions shared between gpiobus and other bus classes that derive from it;
- * these should not be called directly by other drivers.
- */
int gpio_check_flags(uint32_t, uint32_t);
-device_t gpiobus_attach_bus(device_t);
+device_t gpiobus_add_bus(device_t);
int gpiobus_detach_bus(device_t);
-int gpiobus_init_softc(device_t);
-int gpiobus_alloc_ivars(struct gpiobus_ivar *);
-void gpiobus_free_ivars(struct gpiobus_ivar *);
-int gpiobus_acquire_pin(device_t, uint32_t);
-int gpiobus_release_pin(device_t, uint32_t);
-
-extern driver_t gpiobus_driver;
#endif /* __GPIOBUS_H__ */
diff --git a/sys/dev/gpio/gpioc.c b/sys/dev/gpio/gpioc.c
index 4ebf958d6974..6c6f79227166 100644
--- a/sys/dev/gpio/gpioc.c
+++ b/sys/dev/gpio/gpioc.c
@@ -45,7 +45,6 @@
#include <dev/gpio/gpiobusvar.h>
-#include "gpio_if.h"
#include "gpiobus_if.h"
#undef GPIOC_DEBUG
@@ -59,7 +58,7 @@
struct gpioc_softc {
device_t sc_dev; /* gpiocX dev */
- device_t sc_pdev; /* gpioX dev */
+ device_t sc_pdev; /* gpiobusX dev */
struct cdev *sc_ctl_dev; /* controller device */
int sc_unit;
int sc_npins;
@@ -69,6 +68,7 @@ struct gpioc_softc {
struct gpioc_pin_intr {
struct gpioc_softc *sc;
gpio_pin_t pin;
+ uint32_t intr_mode;
bool config_locked;
int intr_rid;
struct resource *intr_res;
@@ -112,8 +112,10 @@ struct gpioc_pin_event {
static MALLOC_DEFINE(M_GPIOC, "gpioc", "gpioc device data");
-static int gpioc_allocate_pin_intr(struct gpioc_pin_intr*, uint32_t);
-static int gpioc_release_pin_intr(struct gpioc_pin_intr*);
+static int gpioc_allocate_pin_intr(struct gpioc_softc*,
+ struct gpioc_pin_intr*, uint32_t, uint32_t);
+static int gpioc_release_pin_intr(struct gpioc_softc*,
+ struct gpioc_pin_intr*);
static int gpioc_attach_priv_pin(struct gpioc_cdevpriv*,
struct gpioc_pin_intr*);
static int gpioc_detach_priv_pin(struct gpioc_cdevpriv*,
@@ -151,7 +153,7 @@ static struct cdevsw gpioc_cdevsw = {
.d_name = "gpioc",
};
-static struct filterops gpioc_read_filterops = {
+static const struct filterops gpioc_read_filterops = {
.f_isfd = true,
.f_attach = NULL,
.f_detach = gpioc_kqdetach,
@@ -191,27 +193,36 @@ number_of_events(struct gpioc_cdevpriv *priv)
}
static int
-gpioc_allocate_pin_intr(struct gpioc_pin_intr *intr_conf, uint32_t flags)
+gpioc_allocate_pin_intr(struct gpioc_softc *sc,
+ struct gpioc_pin_intr *intr_conf, uint32_t pin, uint32_t flags)
{
int err;
intr_conf->config_locked = true;
mtx_unlock(&intr_conf->mtx);
- intr_conf->intr_res = gpio_alloc_intr_resource(intr_conf->pin->dev,
+ MPASS(intr_conf->pin == NULL);
+ err = gpio_pin_get_by_bus_pinnum(sc->sc_pdev, pin, &intr_conf->pin);
+ if (err != 0)
+ goto error_exit;
+
+ intr_conf->intr_res = gpio_alloc_intr_resource(sc->sc_dev,
&intr_conf->intr_rid, RF_ACTIVE, intr_conf->pin, flags);
if (intr_conf->intr_res == NULL) {
err = ENXIO;
- goto error_exit;
+ goto error_pin;
}
- err = bus_setup_intr(intr_conf->pin->dev, intr_conf->intr_res,
+ err = bus_setup_intr(sc->sc_dev, intr_conf->intr_res,
INTR_TYPE_MISC | INTR_MPSAFE, NULL, gpioc_interrupt_handler,
intr_conf, &intr_conf->intr_cookie);
- if (err != 0)
- goto error_exit;
+ if (err != 0) {
+ bus_release_resource(sc->sc_dev, intr_conf->intr_res);
+ intr_conf->intr_res = NULL;
+ goto error_pin;
+ }
- intr_conf->pin->flags = flags;
+ intr_conf->intr_mode = flags;
error_exit:
mtx_lock(&intr_conf->mtx);
@@ -219,10 +230,15 @@ error_exit:
wakeup(&intr_conf->config_locked);
return (err);
+
+error_pin:
+ gpio_pin_release(intr_conf->pin);
+ intr_conf->pin = NULL;
+ goto error_exit;
}
static int
-gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
+gpioc_release_pin_intr(struct gpioc_softc *sc, struct gpioc_pin_intr *intr_conf)
{
int err;
@@ -230,8 +246,8 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
mtx_unlock(&intr_conf->mtx);
if (intr_conf->intr_cookie != NULL) {
- err = bus_teardown_intr(intr_conf->pin->dev,
- intr_conf->intr_res, intr_conf->intr_cookie);
+ err = bus_teardown_intr(sc->sc_dev, intr_conf->intr_res,
+ intr_conf->intr_cookie);
if (err != 0)
goto error_exit;
else
@@ -239,7 +255,7 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
}
if (intr_conf->intr_res != NULL) {
- err = bus_release_resource(intr_conf->pin->dev, SYS_RES_IRQ,
+ err = bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
intr_conf->intr_rid, intr_conf->intr_res);
if (err != 0)
goto error_exit;
@@ -249,7 +265,10 @@ gpioc_release_pin_intr(struct gpioc_pin_intr *intr_conf)
}
}
- intr_conf->pin->flags = 0;
+ gpio_pin_release(intr_conf->pin);
+ intr_conf->pin = NULL;
+
+ intr_conf->intr_mode = 0;
err = 0;
error_exit:
@@ -386,7 +405,7 @@ gpioc_get_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
struct gpioc_privs *priv_link;
uint32_t flags;
- flags = intr_conf->pin->flags;
+ flags = intr_conf->intr_mode;
if (flags == 0)
return (0);
@@ -411,7 +430,7 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
int res;
res = 0;
- if (intr_conf->pin->flags == 0 && flags == 0) {
+ if (intr_conf->intr_mode == 0 && flags == 0) {
/* No interrupt configured and none requested: Do nothing. */
return (0);
}
@@ -419,17 +438,17 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
while (intr_conf->config_locked == true)
mtx_sleep(&intr_conf->config_locked, &intr_conf->mtx, 0,
"gpicfg", 0);
- if (intr_conf->pin->flags == 0 && flags != 0) {
+ if (intr_conf->intr_mode == 0 && flags != 0) {
/*
* No interrupt is configured, but one is requested: Allocate
* and setup interrupt on the according pin.
*/
- res = gpioc_allocate_pin_intr(intr_conf, flags);
+ res = gpioc_allocate_pin_intr(sc, intr_conf, pin, flags);
if (res == 0)
res = gpioc_attach_priv_pin(priv, intr_conf);
if (res == EEXIST)
res = 0;
- } else if (intr_conf->pin->flags == flags) {
+ } else if (intr_conf->intr_mode == flags) {
/*
* Same interrupt requested as already configured: Attach the
* cdevpriv to the corresponding pin.
@@ -437,14 +456,14 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
res = gpioc_attach_priv_pin(priv, intr_conf);
if (res == EEXIST)
res = 0;
- } else if (intr_conf->pin->flags != 0 && flags == 0) {
+ } else if (intr_conf->intr_mode != 0 && flags == 0) {
/*
* Interrupt configured, but none requested: Teardown and
* release the pin when no other cdevpriv is attached. Otherwise
* just detach pin and cdevpriv from each other.
*/
if (gpioc_intr_reconfig_allowed(priv, intr_conf)) {
- res = gpioc_release_pin_intr(intr_conf);
+ res = gpioc_release_pin_intr(sc, intr_conf);
}
if (res == 0)
res = gpioc_detach_priv_pin(priv, intr_conf);
@@ -456,9 +475,10 @@ gpioc_set_intr_config(struct gpioc_softc *sc, struct gpioc_cdevpriv *priv,
if (!gpioc_intr_reconfig_allowed(priv, intr_conf))
res = EBUSY;
else {
- res = gpioc_release_pin_intr(intr_conf);
+ res = gpioc_release_pin_intr(sc, intr_conf);
if (res == 0)
- res = gpioc_allocate_pin_intr(intr_conf, flags);
+ res = gpioc_allocate_pin_intr(sc, intr_conf,
+ pin, flags);
if (res == 0)
res = gpioc_attach_priv_pin(priv, intr_conf);
if (res == EEXIST)
@@ -475,18 +495,16 @@ gpioc_interrupt_handler(void *arg)
{
struct gpioc_pin_intr *intr_conf;
struct gpioc_privs *privs;
- struct gpioc_softc *sc;
sbintime_t evtime;
- uint32_t pin_state;
+ bool pin_state;
intr_conf = arg;
- sc = intr_conf->sc;
/* Capture time and pin state first. */
evtime = sbinuptime();
- if (intr_conf->pin->flags & GPIO_INTR_EDGE_BOTH)
- GPIO_PIN_GET(sc->sc_pdev, intr_conf->pin->pin, &pin_state);
- else if (intr_conf->pin->flags & GPIO_INTR_EDGE_RISING)
+ if (intr_conf->intr_mode & GPIO_INTR_EDGE_BOTH)
+ gpio_pin_is_active(intr_conf->pin, &pin_state);
+ else if (intr_conf->intr_mode & GPIO_INTR_EDGE_RISING)
pin_state = true;
else
pin_state = false;
@@ -575,18 +593,11 @@ gpioc_attach(device_t dev)
sc->sc_pdev = device_get_parent(dev);
sc->sc_unit = device_get_unit(dev);
- err = GPIO_PIN_MAX(sc->sc_pdev, &sc->sc_npins);
- sc->sc_npins++; /* Number of pins is one more than max pin number. */
- if (err != 0)
- return (err);
+ sc->sc_npins = gpiobus_get_npins(dev);
sc->sc_pin_intr = malloc(sizeof(struct gpioc_pin_intr) * sc->sc_npins,
M_GPIOC, M_WAITOK | M_ZERO);
for (int i = 0; i < sc->sc_npins; i++) {
- sc->sc_pin_intr[i].pin = malloc(sizeof(struct gpiobus_pin),
- M_GPIOC, M_WAITOK | M_ZERO);
sc->sc_pin_intr[i].sc = sc;
- sc->sc_pin_intr[i].pin->pin = i;
- sc->sc_pin_intr[i].pin->dev = sc->sc_pdev;
mtx_init(&sc->sc_pin_intr[i].mtx, "gpioc pin", NULL, MTX_DEF);
SLIST_INIT(&sc->sc_pin_intr[i].privs);
}
@@ -610,20 +621,16 @@ static int
gpioc_detach(device_t dev)
{
struct gpioc_softc *sc = device_get_softc(dev);
- int err;
if (sc->sc_ctl_dev)
destroy_dev(sc->sc_ctl_dev);
for (int i = 0; i < sc->sc_npins; i++) {
mtx_destroy(&sc->sc_pin_intr[i].mtx);
- free(sc->sc_pin_intr[i].pin, M_GPIOC);
+ MPASS(sc->sc_pin_intr[i].pin == NULL);
}
free(sc->sc_pin_intr, M_GPIOC);
- if ((err = bus_generic_detach(dev)) != 0)
- return (err);
-
return (0);
}
@@ -655,7 +662,7 @@ gpioc_cdevpriv_dtor(void *data)
KASSERT(consistency == 1,
("inconsistent links between pin config and cdevpriv"));
if (gpioc_intr_reconfig_allowed(priv, pin_link->pin)) {
- gpioc_release_pin_intr(pin_link->pin);
+ gpioc_release_pin_intr(priv->sc, pin_link->pin);
}
mtx_unlock(&pin_link->pin->mtx);
SLIST_REMOVE(&priv->pins, pin_link, gpioc_pins, next);
@@ -677,19 +684,18 @@ static int
gpioc_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
{
struct gpioc_cdevpriv *priv;
- int err;
+ int err = 0;
priv = malloc(sizeof(*priv), M_GPIOC, M_WAITOK | M_ZERO);
priv->sc = dev->si_drv1;
- priv->report_option = GPIO_EVENT_REPORT_DETAIL;
- err = devfs_set_cdevpriv(priv, gpioc_cdevpriv_dtor);
- if (err != 0) {
- gpioc_cdevpriv_dtor(priv);
- return (err);
- }
+
mtx_init(&priv->mtx, "gpioc priv", NULL, MTX_DEF);
knlist_init_mtx(&priv->selinfo.si_note, &priv->mtx);
+ priv->async = false;
+ priv->report_option = GPIO_EVENT_REPORT_DETAIL;
+ priv->sigio = NULL;
+
/*
* Allocate a circular buffer for events. The scheme we use for summary
* reporting assumes there will always be a pair of events available to
@@ -698,10 +704,16 @@ gpioc_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
* npins isn't a horrible fifo size for that either.
*/
priv->numevents = priv->sc->sc_npins * 2;
- priv->events = malloc(priv->numevents * sizeof(struct gpio_event_detail),
+ priv->events = malloc(priv->numevents * sizeof(struct gpioc_pin_event),
M_GPIOC, M_WAITOK | M_ZERO);
- return (0);
+ priv->evidx_head = priv->evidx_tail = 0;
+ SLIST_INIT(&priv->pins);
+
+ err = devfs_set_cdevpriv(priv, gpioc_cdevpriv_dtor);
+ if (err != 0)
+ gpioc_cdevpriv_dtor(priv);
+ return (err);
}
static int
@@ -773,7 +785,6 @@ static int
gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
struct thread *td)
{
- device_t bus;
int max_pin, res;
struct gpioc_softc *sc = cdev->si_drv1;
struct gpioc_cdevpriv *priv;
@@ -782,32 +793,35 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
struct gpio_access_32 *a32;
struct gpio_config_32 *c32;
struct gpio_event_config *evcfg;
+ struct gpioc_pin_event *tmp;
uint32_t caps, intrflags;
- bus = GPIO_GET_BUS(sc->sc_pdev);
- if (bus == NULL)
- return (EINVAL);
switch (cmd) {
case GPIOMAXPIN:
- max_pin = -1;
- res = GPIO_PIN_MAX(sc->sc_pdev, &max_pin);
+ res = 0;
+ max_pin = sc->sc_npins - 1;
bcopy(&max_pin, arg, sizeof(max_pin));
break;
case GPIOGETCONFIG:
bcopy(arg, &pin, sizeof(pin));
dprintf("get config pin %d\n", pin.gp_pin);
- res = GPIO_PIN_GETFLAGS(sc->sc_pdev, pin.gp_pin,
+ res = GPIOBUS_PIN_GETFLAGS(sc->sc_pdev, sc->sc_dev, pin.gp_pin,
&pin.gp_flags);
/* Fail early */
- if (res)
+ if (res != 0)
break;
res = devfs_get_cdevpriv((void **)&priv);
- if (res)
+ if (res != 0)
break;
pin.gp_flags |= gpioc_get_intr_config(sc, priv,
pin.gp_pin);
- GPIO_PIN_GETCAPS(sc->sc_pdev, pin.gp_pin, &pin.gp_caps);
- GPIOBUS_PIN_GETNAME(bus, pin.gp_pin, pin.gp_name);
+ res = GPIOBUS_PIN_GETCAPS(sc->sc_pdev, sc->sc_dev, pin.gp_pin,
+ &pin.gp_caps);
+ if (res != 0)
+ break;
+ res = GPIOBUS_PIN_GETNAME(sc->sc_pdev, pin.gp_pin, pin.gp_name);
+ if (res != 0)
+ break;
bcopy(&pin, arg, sizeof(pin));
break;
case GPIOSETCONFIG:
@@ -816,7 +830,8 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
res = devfs_get_cdevpriv((void **)&priv);
if (res != 0)
break;
- res = GPIO_PIN_GETCAPS(sc->sc_pdev, pin.gp_pin, &caps);
+ res = GPIOBUS_PIN_GETCAPS(sc->sc_pdev, sc->sc_dev,
+ pin.gp_pin, &caps);
if (res != 0)
break;
res = gpio_check_flags(caps, pin.gp_flags);
@@ -842,8 +857,8 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
}
if (res != 0)
break;
- res = GPIO_PIN_SETFLAGS(sc->sc_pdev, pin.gp_pin,
- (pin.gp_flags & ~GPIO_INTR_MASK));
+ res = GPIOBUS_PIN_SETFLAGS(sc->sc_pdev, sc->sc_dev, pin.gp_pin,
+ pin.gp_flags & ~GPIO_INTR_MASK);
if (res != 0)
break;
res = gpioc_set_intr_config(sc, priv, pin.gp_pin,
@@ -851,67 +866,78 @@ gpioc_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
break;
case GPIOGET:
bcopy(arg, &req, sizeof(req));
- res = GPIO_PIN_GET(sc->sc_pdev, req.gp_pin,
+ res = GPIOBUS_PIN_GET(sc->sc_pdev, sc->sc_dev, req.gp_pin,
&req.gp_value);
- dprintf("read pin %d -> %d\n",
+ if (res != 0)
+ break;
+ dprintf("read pin %d -> %d\n",
req.gp_pin, req.gp_value);
bcopy(&req, arg, sizeof(req));
break;
case GPIOSET:
bcopy(arg, &req, sizeof(req));
- res = GPIO_PIN_SET(sc->sc_pdev, req.gp_pin,
+ res = GPIOBUS_PIN_SET(sc->sc_pdev, sc->sc_dev, req.gp_pin,
req.gp_value);
- dprintf("write pin %d -> %d\n",
+ dprintf("write pin %d -> %d\n",
req.gp_pin, req.gp_value);
break;
case GPIOTOGGLE:
bcopy(arg, &req, sizeof(req));
- dprintf("toggle pin %d\n",
+ dprintf("toggle pin %d\n",
req.gp_pin);
- res = GPIO_PIN_TOGGLE(sc->sc_pdev, req.gp_pin);
+ res = GPIOBUS_PIN_TOGGLE(sc->sc_pdev, sc->sc_dev, req.gp_pin);
break;
case GPIOSETNAME:
bcopy(arg, &pin, sizeof(pin));
dprintf("set name on pin %d\n", pin.gp_pin);
- res = GPIOBUS_PIN_SETNAME(bus, pin.gp_pin,
+ res = GPIOBUS_PIN_SETNAME(sc->sc_pdev, pin.gp_pin,
pin.gp_name);
break;
case GPIOACCESS32:
a32 = (struct gpio_access_32 *)arg;
- res = GPIO_PIN_ACCESS_32(sc->sc_pdev, a32->first_pin,
- a32->clear_pins, a32->change_pins, &a32->orig_pins);
+ res = GPIOBUS_PIN_ACCESS_32(sc->sc_pdev, sc->sc_dev,
+ a32->first_pin, a32->clear_pins, a32->change_pins,
+ &a32->orig_pins);
break;
case GPIOCONFIG32:
c32 = (struct gpio_config_32 *)arg;
- res = GPIO_PIN_CONFIG_32(sc->sc_pdev, c32->first_pin,
- c32->num_pins, c32->pin_flags);
+ res = GPIOBUS_PIN_CONFIG_32(sc->sc_pdev, sc->sc_dev,
+ c32->first_pin, c32->num_pins, c32->pin_flags);
break;
case GPIOCONFIGEVENTS:
evcfg = (struct gpio_event_config *)arg;
res = devfs_get_cdevpriv((void **)&priv);
if (res != 0)
break;
- /* If any pins have been configured, changes aren't allowed. */
- if (!SLIST_EMPTY(&priv->pins)) {
- res = EINVAL;
- break;
- }
if (evcfg->gp_report_type != GPIO_EVENT_REPORT_DETAIL &&
evcfg->gp_report_type != GPIO_EVENT_REPORT_SUMMARY) {
res = EINVAL;
break;
}
- priv->report_option = evcfg->gp_report_type;
/* Reallocate the events buffer if the user wants it bigger. */
- if (priv->report_option == GPIO_EVENT_REPORT_DETAIL &&
+ tmp = NULL;
+ if (evcfg->gp_report_type == GPIO_EVENT_REPORT_DETAIL &&
priv->numevents < evcfg->gp_fifo_size) {
+ tmp = malloc(evcfg->gp_fifo_size *
+ sizeof(struct gpioc_pin_event), M_GPIOC,
+ M_WAITOK | M_ZERO);
+ }
+ mtx_lock(&priv->mtx);
+ /* If any pins have been configured, changes aren't allowed. */
+ if (!SLIST_EMPTY(&priv->pins)) {
+ mtx_unlock(&priv->mtx);
+ free(tmp, M_GPIOC);
+ res = EINVAL;
+ break;
+ }
+ if (tmp != NULL) {
free(priv->events, M_GPIOC);
+ priv->events = tmp;
priv->numevents = evcfg->gp_fifo_size;
- priv->events = malloc(priv->numevents *
- sizeof(struct gpio_event_detail), M_GPIOC,
- M_WAITOK | M_ZERO);
priv->evidx_head = priv->evidx_tail = 0;
}
+ priv->report_option = evcfg->gp_report_type;
+ mtx_unlock(&priv->mtx);
break;
case FIONBIO:
/*
@@ -1045,9 +1071,6 @@ static device_method_t gpioc_methods[] = {
DEVMETHOD(device_probe, gpioc_probe),
DEVMETHOD(device_attach, gpioc_attach),
DEVMETHOD(device_detach, gpioc_detach),
- DEVMETHOD(device_shutdown, bus_generic_shutdown),
- DEVMETHOD(device_suspend, bus_generic_suspend),
- DEVMETHOD(device_resume, bus_generic_resume),
DEVMETHOD_END
};
@@ -1058,5 +1081,5 @@ driver_t gpioc_driver = {
sizeof(struct gpioc_softc)
};
-DRIVER_MODULE(gpioc, gpio, gpioc_driver, 0, 0);
+DRIVER_MODULE(gpioc, gpiobus, gpioc_driver, 0, 0);
MODULE_VERSION(gpioc, 1);
diff --git a/sys/dev/gpio/gpioiic.c b/sys/dev/gpio/gpioiic.c
index 4f24dac23e51..2197d238cf2b 100644
--- a/sys/dev/gpio/gpioiic.c
+++ b/sys/dev/gpio/gpioiic.c
@@ -320,8 +320,9 @@ gpioiic_attach(device_t dev)
#endif
/* Add the bitbang driver as our only child; it will add iicbus. */
- device_add_child(sc->dev, "iicbb", -1);
- return (bus_generic_attach(dev));
+ device_add_child(sc->dev, "iicbb", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
+ return (0);
}
static int
@@ -330,7 +331,7 @@ gpioiic_detach(device_t dev)
struct gpioiic_softc *sc = device_get_softc(dev);
int err;
- if ((err = bus_generic_detach(dev)) != 0)
+ if ((err = bus_detach_children(dev)) != 0)
return (err);
gpioiic_cleanup(sc);
diff --git a/sys/dev/gpio/gpioled.c b/sys/dev/gpio/gpioled.c
index ba53cb733971..a36c2faef379 100644
--- a/sys/dev/gpio/gpioled.c
+++ b/sys/dev/gpio/gpioled.c
@@ -55,13 +55,13 @@
device_get_nameunit((_sc)->sc_dev), "gpioled", MTX_DEF)
#define GPIOLED_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx)
-struct gpioled_softc
+struct gpioled_softc
{
device_t sc_dev;
device_t sc_busdev;
struct mtx sc_mtx;
struct cdev *sc_leddev;
- int sc_invert;
+ int sc_softinvert;
};
static void gpioled_control(void *, int);
@@ -69,20 +69,17 @@ static int gpioled_probe(device_t);
static int gpioled_attach(device_t);
static int gpioled_detach(device_t);
-static void
+static void
gpioled_control(void *priv, int onoff)
{
struct gpioled_softc *sc;
sc = (struct gpioled_softc *)priv;
+ if (sc->sc_softinvert)
+ onoff = !onoff;
GPIOLED_LOCK(sc);
- if (GPIOBUS_PIN_SETFLAGS(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
- GPIO_PIN_OUTPUT) == 0) {
- if (sc->sc_invert)
- onoff = !onoff;
- GPIOBUS_PIN_SET(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
- onoff ? GPIO_PIN_HIGH : GPIO_PIN_LOW);
- }
+ GPIOBUS_PIN_SET(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
+ onoff ? GPIO_PIN_HIGH : GPIO_PIN_LOW);
GPIOLED_UNLOCK(sc);
}
@@ -95,26 +92,101 @@ gpioled_probe(device_t dev)
}
static int
+gpioled_inv(device_t dev, uint32_t *pin_flags)
+{
+ struct gpioled_softc *sc;
+ int invert;
+ uint32_t pin_caps;
+
+ sc = device_get_softc(dev);
+
+ if (resource_int_value(device_get_name(dev),
+ device_get_unit(dev), "invert", &invert))
+ invert = 0;
+
+ if (GPIOBUS_PIN_GETCAPS(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
+ &pin_caps) != 0) {
+ if (bootverbose)
+ device_printf(sc->sc_dev, "unable to get pin caps\n");
+ return (-1);
+ }
+ if (pin_caps & GPIO_PIN_INVOUT)
+ *pin_flags &= ~GPIO_PIN_INVOUT;
+ sc->sc_softinvert = 0;
+ if (invert) {
+ const char *invmode;
+
+ if (resource_string_value(device_get_name(dev),
+ device_get_unit(dev), "invmode", &invmode))
+ invmode = NULL;
+
+ if (invmode) {
+ if (!strcmp(invmode, "sw"))
+ sc->sc_softinvert = 1;
+ else if (!strcmp(invmode, "hw")) {
+ if (pin_caps & GPIO_PIN_INVOUT)
+ *pin_flags |= GPIO_PIN_INVOUT;
+ else {
+ device_printf(sc->sc_dev, "hardware pin inversion not supported\n");
+ return (-1);
+ }
+ } else {
+ if (strcmp(invmode, "auto") != 0)
+ device_printf(sc->sc_dev, "invalid pin inversion mode\n");
+ invmode = NULL;
+ }
+ }
+ /*
+ * auto inversion mode: use hardware support if available, else fallback to
+ * software emulation.
+ */
+ if (invmode == NULL) {
+ if (pin_caps & GPIO_PIN_INVOUT)
+ *pin_flags |= GPIO_PIN_INVOUT;
+ else
+ sc->sc_softinvert = 1;
+ }
+ }
+ MPASS(!invert ||
+ (((*pin_flags & GPIO_PIN_INVOUT) != 0) && !sc->sc_softinvert) ||
+ (((*pin_flags & GPIO_PIN_INVOUT) == 0) && sc->sc_softinvert));
+ return (invert);
+}
+
+static int
gpioled_attach(device_t dev)
{
struct gpioled_softc *sc;
int state;
const char *name;
+ uint32_t pin_flags;
+ int invert;
sc = device_get_softc(dev);
sc->sc_dev = dev;
sc->sc_busdev = device_get_parent(dev);
GPIOLED_LOCK_INIT(sc);
- state = 0;
-
- if (resource_string_value(device_get_name(dev),
+ if (resource_string_value(device_get_name(dev),
device_get_unit(dev), "name", &name))
name = NULL;
- resource_int_value(device_get_name(dev),
- device_get_unit(dev), "invert", &sc->sc_invert);
- resource_int_value(device_get_name(dev),
- device_get_unit(dev), "state", &state);
+
+ if (resource_int_value(device_get_name(dev),
+ device_get_unit(dev), "state", &state))
+ state = 0;
+
+ pin_flags = GPIO_PIN_OUTPUT;
+ invert = gpioled_inv(dev, &pin_flags);
+ if (invert < 0)
+ return (ENXIO);
+ device_printf(sc->sc_dev, "state %d invert %s\n",
+ state, (invert ? (sc->sc_softinvert ? "sw" : "hw") : "no"));
+ if (GPIOBUS_PIN_SETFLAGS(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
+ pin_flags) != 0) {
+ if (bootverbose)
+ device_printf(sc->sc_dev, "unable to set pin flags, %#x\n", pin_flags);
+ return (ENXIO);
+ }
sc->sc_leddev = led_create_state(gpioled_control, sc, name ? name :
device_get_nameunit(dev), state);
diff --git a/sys/dev/gpio/gpiomdio.c b/sys/dev/gpio/gpiomdio.c
index deb9a25bd290..dc43b2783bc5 100644
--- a/sys/dev/gpio/gpiomdio.c
+++ b/sys/dev/gpio/gpiomdio.c
@@ -213,7 +213,6 @@ static device_method_t gpiomdio_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, gpiomdio_probe),
DEVMETHOD(device_attach, gpiomdio_attach),
- DEVMETHOD(device_detach, bus_generic_detach),
/* MDIO interface */
DEVMETHOD(miibus_readreg, gpiomdio_readreg),
diff --git a/sys/dev/gpio/gpiopps.c b/sys/dev/gpio/gpiopps.c
index bb8afa5e062c..82620a50a798 100644
--- a/sys/dev/gpio/gpiopps.c
+++ b/sys/dev/gpio/gpiopps.c
@@ -160,7 +160,7 @@ gpiopps_detach(device_t dev)
if (sc->ires != NULL)
bus_release_resource(dev, SYS_RES_IRQ, sc->irid, sc->ires);
if (sc->gpin != NULL)
- gpiobus_release_pin(GPIO_GET_BUS(sc->gpin->dev), sc->gpin->pin);
+ gpio_pin_release(sc->gpin);
return (0);
}
diff --git a/sys/dev/gpio/gpiospi.c b/sys/dev/gpio/gpiospi.c
index 15517f74f0b3..e9f052b5ee32 100644
--- a/sys/dev/gpio/gpiospi.c
+++ b/sys/dev/gpio/gpiospi.c
@@ -177,8 +177,9 @@ gpio_spi_attach(device_t dev)
gpio_spi_chip_deactivate(sc, -1);
- device_add_child(dev, "spibus", -1);
- return (bus_generic_attach(dev));
+ device_add_child(dev, "spibus", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
+ return (0);
}
static int
@@ -396,5 +397,5 @@ static driver_t gpio_spi_driver = {
DRIVER_MODULE(gpiospi, gpiobus, gpio_spi_driver, 0, 0);
DRIVER_MODULE(spibus, gpiospi, spibus_driver, 0, 0);
-MODULE_DEPEND(spi, gpiospi, 1, 1, 1);
-MODULE_DEPEND(gpiobus, gpiospi, 1, 1, 1);
+MODULE_DEPEND(gpiospi, spibus, 1, 1, 1);
+MODULE_DEPEND(gpiospi, gpiobus, 1, 1, 1);
diff --git a/sys/dev/gpio/ofw_gpiobus.c b/sys/dev/gpio/ofw_gpiobus.c
index 9173bcd1500c..da1bfbc268b8 100644
--- a/sys/dev/gpio/ofw_gpiobus.c
+++ b/sys/dev/gpio/ofw_gpiobus.c
@@ -36,6 +36,7 @@
#include <sys/module.h>
#include <dev/gpio/gpiobusvar.h>
+#include <dev/gpio/gpiobus_internal.h>
#include <dev/ofw/ofw_bus.h>
#include "gpiobus_if.h"
@@ -157,7 +158,7 @@ ofw_gpiobus_add_fdt_child(device_t bus, const char *drvname, phandle_t child)
/*
* Set up the GPIO child and OFW bus layer devinfo and add it to bus.
*/
- childdev = device_add_child(bus, drvname, -1);
+ childdev = device_add_child(bus, drvname, DEVICE_UNIT_ANY);
if (childdev == NULL)
return (NULL);
dinfo = ofw_gpiobus_setup_devinfo(bus, childdev, child);
@@ -425,7 +426,10 @@ ofw_gpiobus_attach(device_t dev)
err = gpiobus_init_softc(dev);
if (err != 0)
return (err);
- bus_generic_probe(dev);
+ err = gpiobus_add_gpioc(dev);
+ if (err != 0)
+ return (err);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
/*
* Attach the children represented in the device tree.
@@ -440,7 +444,8 @@ ofw_gpiobus_attach(device_t dev)
continue;
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static device_t
@@ -449,28 +454,22 @@ ofw_gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
device_t child;
struct ofw_gpiobus_devinfo *devi;
- child = device_add_child_ordered(dev, order, name, unit);
+ child = gpiobus_add_child_common(dev, order, name, unit,
+ sizeof(struct ofw_gpiobus_devinfo));
if (child == NULL)
- return (child);
- devi = malloc(sizeof(struct ofw_gpiobus_devinfo), M_DEVBUF,
- M_NOWAIT | M_ZERO);
- if (devi == NULL) {
- device_delete_child(dev, child);
- return (0);
- }
+ return (NULL);
/*
* NULL all the OFW-related parts of the ivars for non-OFW
* children.
*/
+ devi = device_get_ivars(child);
devi->opd_obdinfo.obd_node = -1;
devi->opd_obdinfo.obd_name = NULL;
devi->opd_obdinfo.obd_compat = NULL;
devi->opd_obdinfo.obd_type = NULL;
devi->opd_obdinfo.obd_model = NULL;
- device_set_ivars(child, devi);
-
return (child);
}
diff --git a/sys/dev/gpio/pl061.c b/sys/dev/gpio/pl061.c
index 76754fead635..9996b0253c7d 100644
--- a/sys/dev/gpio/pl061.c
+++ b/sys/dev/gpio/pl061.c
@@ -460,6 +460,9 @@ pl061_attach(device_t dev)
goto free_mem;
}
+ /* Mask all interrupts. They will be unmasked as needed later */
+ bus_write_1(sc->sc_mem_res, PL061_INTMASK, 0);
+
ret = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
pl061_intr, NULL, sc, &sc->sc_irq_hdlr);
if (ret) {
@@ -484,14 +487,22 @@ pl061_attach(device_t dev)
}
}
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "pl061", MTX_SPIN);
+
+ if (sc->sc_xref != 0 && !intr_pic_register(dev, sc->sc_xref)) {
+ device_printf(dev, "couldn't register PIC\n");
+ PL061_LOCK_DESTROY(sc);
+ goto free_isrc;
+ }
+
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
device_printf(dev, "couldn't attach gpio bus\n");
+ PL061_LOCK_DESTROY(sc);
goto free_isrc;
}
- mtx_init(&sc->sc_mtx, device_get_nameunit(dev), "pl061", MTX_SPIN);
-
+ bus_attach_children(dev);
return (0);
free_isrc:
@@ -500,6 +511,7 @@ free_isrc:
* for (irq = 0; irq < PL061_NUM_GPIO; irq++)
* intr_isrc_deregister(PIC_INTR_ISRC(sc, irq));
*/
+ bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_hdlr);
bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irq_rid,
sc->sc_irq_res);
free_pic:
@@ -546,8 +558,7 @@ static device_method_t pl061_methods[] = {
/* Bus interface */
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
- DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
- DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
/* GPIO protocol */
DEVMETHOD(gpio_get_bus, pl061_get_bus),
diff --git a/sys/dev/gpio/pl061.h b/sys/dev/gpio/pl061.h
index 809a1168493d..d9fe23e502b1 100644
--- a/sys/dev/gpio/pl061.h
+++ b/sys/dev/gpio/pl061.h
@@ -46,6 +46,7 @@ struct pl061_softc {
struct resource *sc_mem_res;
struct resource *sc_irq_res;
void *sc_irq_hdlr;
+ intptr_t sc_xref;
int sc_mem_rid;
int sc_irq_rid;
struct pl061_pin_irqsrc sc_isrcs[PL061_NUM_GPIO];
diff --git a/sys/dev/gpio/pl061_acpi.c b/sys/dev/gpio/pl061_acpi.c
index f5885025083e..8e9921261e4e 100644
--- a/sys/dev/gpio/pl061_acpi.c
+++ b/sys/dev/gpio/pl061_acpi.c
@@ -67,19 +67,12 @@ pl061_acpi_probe(device_t dev)
static int
pl061_acpi_attach(device_t dev)
{
- int error;
+ struct pl061_softc *sc;
- error = pl061_attach(dev);
- if (error != 0)
- return (error);
+ sc = device_get_softc(dev);
+ sc->sc_xref = ACPI_GPIO_XREF;
- if (!intr_pic_register(dev, ACPI_GPIO_XREF)) {
- device_printf(dev, "couldn't register PIC\n");
- pl061_detach(dev);
- error = ENXIO;
- }
-
- return (error);
+ return (pl061_attach(dev));
}
static device_method_t pl061_acpi_methods[] = {
diff --git a/sys/dev/gpio/pl061_fdt.c b/sys/dev/gpio/pl061_fdt.c
index aa22298b43c6..681b3ccdfdeb 100644
--- a/sys/dev/gpio/pl061_fdt.c
+++ b/sys/dev/gpio/pl061_fdt.c
@@ -61,19 +61,12 @@ pl061_fdt_probe(device_t dev)
static int
pl061_fdt_attach(device_t dev)
{
- int error;
+ struct pl061_softc *sc;
- error = pl061_attach(dev);
- if (error != 0)
- return (error);
+ sc = device_get_softc(dev);
+ sc->sc_xref = OF_xref_from_node(ofw_bus_get_node(dev));
- if (!intr_pic_register(dev, OF_xref_from_node(ofw_bus_get_node(dev)))) {
- device_printf(dev, "couldn't register PIC\n");
- pl061_detach(dev);
- error = ENXIO;
- }
-
- return (error);
+ return (pl061_attach(dev));
}
static device_method_t pl061_fdt_methods[] = {
diff --git a/sys/dev/gpio/qoriq_gpio.c b/sys/dev/gpio/qoriq_gpio.c
index 63a9c3f857f6..d11868a23751 100644
--- a/sys/dev/gpio/qoriq_gpio.c
+++ b/sys/dev/gpio/qoriq_gpio.c
@@ -35,10 +35,10 @@
#include <sys/mutex.h>
#include <sys/rman.h>
#include <sys/gpio.h>
+#include <sys/stdarg.h>
#include <machine/bus.h>
#include <machine/resource.h>
-#include <machine/stdarg.h>
#include <dev/gpio/gpiobusvar.h>
#include <dev/gpio/qoriq_gpio.h>
@@ -369,11 +369,6 @@ qoriq_gpio_attach(device_t dev)
for (i = 0; i <= MAXPIN; i++)
sc->sc_pins[i].gp_caps = DEFAULT_CAPS;
- sc->busdev = gpiobus_attach_bus(dev);
- if (sc->busdev == NULL) {
- qoriq_gpio_detach(dev);
- return (ENOMEM);
- }
/*
* Enable the GPIO Input Buffer for all GPIOs.
* This is safe on devices without a GPIBE register, because those
@@ -384,6 +379,13 @@ qoriq_gpio_attach(device_t dev)
OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev);
+ sc->busdev = gpiobus_add_bus(dev);
+ if (sc->busdev == NULL) {
+ qoriq_gpio_detach(dev);
+ return (ENOMEM);
+ }
+
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/gve/gve.h b/sys/dev/gve/gve.h
index c446199dff2d..64c2a0481817 100644
--- a/sys/dev/gve/gve.h
+++ b/sys/dev/gve/gve.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2023 Google LLC
+ * Copyright (c) 2023-2024 Google LLC
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -47,12 +47,31 @@
#define GVE_TX_MAX_DESCS 4
#define GVE_TX_BUFRING_ENTRIES 4096
+#define GVE_TX_TIMEOUT_PKT_SEC 5
+#define GVE_TX_TIMEOUT_CHECK_CADENCE_SEC 5
+/*
+ * If the driver finds timed out packets on a tx queue it first kicks it and
+ * records the time. If the driver again finds a timeout on the same queue
+ * before the end of the cooldown period, only then will it reset. Thus, for a
+ * reset to be able to occur at all, the cooldown must be at least as long
+ * as the tx timeout checking cadence multiplied by the number of queues.
+ */
+#define GVE_TX_TIMEOUT_MAX_TX_QUEUES 16
+#define GVE_TX_TIMEOUT_KICK_COOLDOWN_SEC \
+ (2 * GVE_TX_TIMEOUT_CHECK_CADENCE_SEC * GVE_TX_TIMEOUT_MAX_TX_QUEUES)
+
+#define GVE_TIMESTAMP_INVALID -1
+
#define ADMINQ_SIZE PAGE_SIZE
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
+#define GVE_4K_RX_BUFFER_SIZE_DQO 4096
/* Each RX bounce buffer page can fit two packet buffers. */
#define GVE_DEFAULT_RX_BUFFER_OFFSET (PAGE_SIZE / 2)
+/* PTYPEs are always 10 bits. */
+#define GVE_NUM_PTYPES 1024
+
/*
* Number of descriptors per queue page list.
* Page count AKA QPL size can be derived by dividing the number of elements in
@@ -60,8 +79,17 @@
*/
#define GVE_QPL_DIVISOR 16
+/* Ring Size Limits */
+#define GVE_DEFAULT_MIN_RX_RING_SIZE 512
+#define GVE_DEFAULT_MIN_TX_RING_SIZE 256
+
static MALLOC_DEFINE(M_GVE, "gve", "gve allocations");
+_Static_assert(MCLBYTES >= GVE_DEFAULT_RX_BUFFER_SIZE,
+ "gve: bad MCLBYTES length");
+_Static_assert(MJUMPAGESIZE >= GVE_4K_RX_BUFFER_SIZE_DQO,
+ "gve: bad MJUMPAGESIZE length");
+
struct gve_dma_handle {
bus_addr_t bus_addr;
void *cpu_addr;
@@ -102,6 +130,7 @@ enum gve_queue_format {
GVE_GQI_RDA_FORMAT = 0x1,
GVE_GQI_QPL_FORMAT = 0x2,
GVE_DQO_RDA_FORMAT = 0x3,
+ GVE_DQO_QPL_FORMAT = 0x4,
};
enum gve_state_flags_bit {
@@ -223,31 +252,93 @@ struct gve_rxq_stats {
counter_u64_t rx_frag_flip_cnt;
counter_u64_t rx_frag_copy_cnt;
counter_u64_t rx_dropped_pkt_desc_err;
+ counter_u64_t rx_dropped_pkt_buf_post_fail;
counter_u64_t rx_dropped_pkt_mbuf_alloc_fail;
+ counter_u64_t rx_mbuf_dmamap_err;
+ counter_u64_t rx_mbuf_mclget_null;
};
#define NUM_RX_STATS (sizeof(struct gve_rxq_stats) / sizeof(counter_u64_t))
+union gve_rx_qpl_buf_id_dqo {
+ struct {
+ uint16_t buf_id:11; /* Index into rx->dqo.bufs */
+ uint8_t frag_num:5; /* Which frag in the QPL page */
+ };
+ uint16_t all;
+} __packed;
+_Static_assert(sizeof(union gve_rx_qpl_buf_id_dqo) == 2,
+ "gve: bad dqo qpl rx buf id length");
+
+struct gve_rx_buf_dqo {
+ union {
+ /* RDA */
+ struct {
+ struct mbuf *mbuf;
+ bus_dmamap_t dmamap;
+ uint64_t addr;
+ bool mapped;
+ };
+ /* QPL */
+ struct {
+ uint8_t num_nic_frags; /* number of pending completions */
+ uint8_t next_idx; /* index of the next frag to post */
+ /* for chaining rx->dqo.used_bufs */
+ STAILQ_ENTRY(gve_rx_buf_dqo) stailq_entry;
+ };
+ };
+ /* for chaining rx->dqo.free_bufs */
+ SLIST_ENTRY(gve_rx_buf_dqo) slist_entry;
+};
+
/* power-of-2 sized receive ring */
struct gve_rx_ring {
struct gve_ring_com com;
struct gve_dma_handle desc_ring_mem;
- struct gve_dma_handle data_ring_mem;
-
- /* accessed in the receive hot path */
- struct {
- struct gve_rx_desc *desc_ring;
- union gve_rx_data_slot *data_ring;
- struct gve_rx_slot_page_info *page_info;
-
- struct gve_rx_ctx ctx;
- struct lro_ctrl lro;
- uint8_t seq_no; /* helps traverse the descriptor ring */
- uint32_t cnt; /* free-running total number of completed packets */
- uint32_t fill_cnt; /* free-running total number of descs and buffs posted */
- uint32_t mask; /* masks the cnt and fill_cnt to the size of the ring */
- struct gve_rxq_stats stats;
- } __aligned(CACHE_LINE_SIZE);
+ uint32_t cnt; /* free-running total number of completed packets */
+ uint32_t fill_cnt; /* free-running total number of descs and buffs posted */
+
+ union {
+ /* GQI-only fields */
+ struct {
+ struct gve_dma_handle data_ring_mem;
+
+ /* accessed in the GQ receive hot path */
+ struct gve_rx_desc *desc_ring;
+ union gve_rx_data_slot *data_ring;
+ struct gve_rx_slot_page_info *page_info;
+ uint32_t mask; /* masks the cnt and fill_cnt to the size of the ring */
+ uint8_t seq_no; /* helps traverse the descriptor ring */
+ };
+
+ /* DQO-only fields */
+ struct {
+ struct gve_dma_handle compl_ring_mem;
+
+ struct gve_rx_compl_desc_dqo *compl_ring;
+ struct gve_rx_desc_dqo *desc_ring;
+ struct gve_rx_buf_dqo *bufs; /* Parking place for posted buffers */
+ bus_dma_tag_t buf_dmatag; /* To dmamap posted mbufs with */
+
+ uint32_t buf_cnt; /* Size of the bufs array */
+ uint32_t mask; /* One less than the sizes of the desc and compl rings */
+ uint32_t head; /* The index at which to post the next buffer at */
+ uint32_t tail; /* The index at which to receive the next compl at */
+ uint8_t cur_gen_bit; /* Gets flipped on every cycle of the compl ring */
+ SLIST_HEAD(, gve_rx_buf_dqo) free_bufs;
+
+ /*
+ * Only used in QPL mode. Pages referred to by if_input-ed mbufs
+ * stay parked here till their wire count comes back to 1.
+ * Pages are moved here after there aren't any pending completions.
+ */
+ STAILQ_HEAD(, gve_rx_buf_dqo) used_bufs;
+ } dqo;
+ };
+
+ struct lro_ctrl lro;
+ struct gve_rx_ctx ctx;
+ struct gve_rxq_stats stats;
} __aligned(CACHE_LINE_SIZE);
@@ -267,6 +358,14 @@ struct gve_tx_fifo {
struct gve_tx_buffer_state {
struct mbuf *mbuf;
+
+ /*
+ * Time at which the xmit tq places descriptors for mbuf's payload on a
+ * tx queue. This timestamp is invalidated when the mbuf is freed and
+ * must be checked for validity when read.
+ */
+ int64_t enqueue_time_sec;
+
struct gve_tx_iovec iov[GVE_TX_MAX_DESCS];
};
@@ -275,13 +374,50 @@ struct gve_txq_stats {
counter_u64_t tpackets;
counter_u64_t tso_packet_cnt;
counter_u64_t tx_dropped_pkt;
- counter_u64_t tx_dropped_pkt_nospace_device;
+ counter_u64_t tx_delayed_pkt_nospace_device;
counter_u64_t tx_dropped_pkt_nospace_bufring;
+ counter_u64_t tx_delayed_pkt_nospace_descring;
+ counter_u64_t tx_delayed_pkt_nospace_compring;
+ counter_u64_t tx_delayed_pkt_nospace_qpl_bufs;
+ counter_u64_t tx_delayed_pkt_tsoerr;
counter_u64_t tx_dropped_pkt_vlan;
+ counter_u64_t tx_mbuf_collapse;
+ counter_u64_t tx_mbuf_defrag;
+ counter_u64_t tx_mbuf_defrag_err;
+ counter_u64_t tx_mbuf_dmamap_enomem_err;
+ counter_u64_t tx_mbuf_dmamap_err;
+ counter_u64_t tx_timeout;
};
#define NUM_TX_STATS (sizeof(struct gve_txq_stats) / sizeof(counter_u64_t))
+struct gve_tx_pending_pkt_dqo {
+ struct mbuf *mbuf;
+
+ /*
+ * Time at which the xmit tq places descriptors for mbuf's payload on a
+ * tx queue. This timestamp is invalidated when the mbuf is freed and
+ * must be checked for validity when read.
+ */
+ int64_t enqueue_time_sec;
+
+ union {
+ /* RDA */
+ bus_dmamap_t dmamap;
+ /* QPL */
+ struct {
+ /*
+ * A linked list of entries from qpl_bufs that served
+ * as the bounce buffer for this packet.
+ */
+ int32_t qpl_buf_head;
+ uint32_t num_qpl_bufs;
+ };
+ };
+ uint8_t state; /* the gve_packet_state enum */
+ int next; /* To chain the free_pending_pkts lists */
+};
+
/* power-of-2 sized transmit ring */
struct gve_tx_ring {
struct gve_ring_com com;
@@ -289,23 +425,134 @@ struct gve_tx_ring {
struct task xmit_task;
struct taskqueue *xmit_tq;
+ bool stopped;
+
+ /* Accessed when writing descriptors */
+ struct buf_ring *br;
+ struct mtx ring_mtx;
+
+ uint32_t req; /* free-running total number of packets written to the nic */
+ uint32_t done; /* free-running total number of completed packets */
+
+ int64_t last_kicked; /* always-valid timestamp in seconds for the last queue kick */
+
+ union {
+ /* GQI specific stuff */
+ struct {
+ union gve_tx_desc *desc_ring;
+ struct gve_tx_buffer_state *info;
+
+ struct gve_tx_fifo fifo;
+
+ uint32_t mask; /* masks the req and done to the size of the ring */
+ };
+
+ /* DQO specific stuff */
+ struct {
+ struct gve_dma_handle compl_ring_mem;
+
+ /* Accessed when writing descriptors */
+ struct {
+ union gve_tx_desc_dqo *desc_ring;
+ uint32_t desc_mask; /* masks head and tail to the size of desc_ring */
+ uint32_t desc_head; /* last desc read by NIC, cached value of hw_tx_head */
+ uint32_t desc_tail; /* last desc written by driver */
+ uint32_t last_re_idx; /* desc which last had "report event" set */
+
+ /*
+ * The head index of a singly linked list containing pending packet objects
+ * to park mbufs till the NIC sends completions. Once this list is depleted,
+ * the "_prd" suffixed producer list, grown by the completion taskqueue,
+ * is stolen.
+ */
+ int32_t free_pending_pkts_csm;
+
+ /*
+ * The head index of a singly linked list representing QPL page fragments
+ * to copy mbuf payload into for the NIC to see. Once this list is depleted,
+ * the "_prd" suffixed producer list, grown by the completion taskqueue,
+ * is stolen.
+ *
+ * Only used in QPL mode. int32_t because atomic_swap_16 doesn't exist.
+ */
+ int32_t free_qpl_bufs_csm;
+ uint32_t qpl_bufs_consumed; /* Allows quickly checking for buf availability */
+ uint32_t qpl_bufs_produced_cached; /* Cached value of qpl_bufs_produced */
+
+ /* DMA params for mapping Tx mbufs. Only used in RDA mode. */
+ bus_dma_tag_t buf_dmatag;
+ } __aligned(CACHE_LINE_SIZE);
+
+ /* Accessed when processing completions */
+ struct {
+ struct gve_tx_compl_desc_dqo *compl_ring;
+ uint32_t compl_mask; /* masks head to the size of compl_ring */
+ uint32_t compl_head; /* last completion read by driver */
+ uint8_t cur_gen_bit; /* NIC flips a bit on every pass */
+ uint32_t hw_tx_head; /* last desc read by NIC */
+
+ /*
+ * The completion taskqueue moves pending-packet objects to this
+ * list after freeing the mbuf. The "_prd" denotes that this is
+ * a producer list. The transmit taskqueue steals this list once
+ * its consumer list, with the "_csm" suffix, is depleted.
+ */
+ int32_t free_pending_pkts_prd;
+
+ /*
+ * The completion taskqueue moves the QPL pages corresponding to a
+ * completed packet into this list. It is only used in QPL mode.
+ * The "_prd" denotes that this is a producer list. The transmit
+ * taskqueue steals this list once its consumer list, with the "_csm"
+ * suffix, is depleted.
+ *
+ * Only used in QPL mode. int32_t because atomic_swap_16 doesn't exist.
+ */
+ int32_t free_qpl_bufs_prd;
+ uint32_t qpl_bufs_produced;
+ } __aligned(CACHE_LINE_SIZE);
+
+ /* Accessed by both the completion and xmit loops */
+ struct {
+ /* completion tags index into this array */
+ struct gve_tx_pending_pkt_dqo *pending_pkts;
+ uint16_t num_pending_pkts;
+
+ /*
+ * Represents QPL page fragments. An index into this array
+ * always represents the same QPL page fragment. The value
+ * is also an index into this array and servers as a means
+ * to chain buffers into linked lists whose heads are
+ * either free_qpl_bufs_prd or free_qpl_bufs_csm or
+ * qpl_bufs_head.
+ */
+ int32_t *qpl_bufs;
+ } __aligned(CACHE_LINE_SIZE);
+ } dqo;
+ };
+ struct gve_txq_stats stats;
+} __aligned(CACHE_LINE_SIZE);
- /* accessed in the transmit hot path */
- struct {
- union gve_tx_desc *desc_ring;
- struct gve_tx_buffer_state *info;
- struct buf_ring *br;
-
- struct gve_tx_fifo fifo;
- struct mtx ring_mtx;
+enum gve_packet_state {
+ /*
+ * Packet does not yet have a dmamap created.
+ * This should always be zero since state is not explicitly initialized.
+ */
+ GVE_PACKET_STATE_UNALLOCATED,
+ /* Packet has a dmamap and is in free list, available to be allocated. */
+ GVE_PACKET_STATE_FREE,
+ /* Packet is expecting a regular data completion */
+ GVE_PACKET_STATE_PENDING_DATA_COMPL,
+};
- uint32_t req; /* free-running total number of packets written to the nic */
- uint32_t done; /* free-running total number of completed packets */
- uint32_t mask; /* masks the req and done to the size of the ring */
- struct gve_txq_stats stats;
- } __aligned(CACHE_LINE_SIZE);
+struct gve_ptype {
+ uint8_t l3_type; /* `gve_l3_type` in gve_adminq.h */
+ uint8_t l4_type; /* `gve_l4_type` in gve_adminq.h */
+};
-} __aligned(CACHE_LINE_SIZE);
+struct gve_ptype_lut {
+ struct gve_ptype ptypes[GVE_NUM_PTYPES];
+};
struct gve_priv {
if_t ifp;
@@ -326,12 +573,17 @@ struct gve_priv {
uint16_t num_event_counters;
uint16_t default_num_queues;
uint16_t tx_desc_cnt;
+ uint16_t max_tx_desc_cnt;
+ uint16_t min_tx_desc_cnt;
uint16_t rx_desc_cnt;
+ uint16_t max_rx_desc_cnt;
+ uint16_t min_rx_desc_cnt;
uint16_t rx_pages_per_qpl;
uint64_t max_registered_pages;
uint64_t num_registered_pages;
uint32_t supported_features;
uint16_t max_mtu;
+ bool modify_ringsize_enabled;
struct gve_dma_handle counter_array_mem;
__be32 *counters;
@@ -339,7 +591,6 @@ struct gve_priv {
struct gve_irq_db *irq_db_indices;
enum gve_queue_format queue_format;
- struct gve_queue_page_list *qpls;
struct gve_queue_config tx_cfg;
struct gve_queue_config rx_cfg;
uint32_t num_queues;
@@ -348,6 +599,8 @@ struct gve_priv {
struct gve_tx_ring *tx;
struct gve_rx_ring *rx;
+ struct gve_ptype_lut *ptype_lut_dqo;
+
/*
* Admin queue - see gve_adminq.h
* Since AQ cmds do not run in steady state, 32 bit counters suffice
@@ -370,6 +623,7 @@ struct gve_priv {
uint32_t adminq_dcfg_device_resources_cnt;
uint32_t adminq_set_driver_parameter_cnt;
uint32_t adminq_verify_driver_compatibility_cnt;
+ uint32_t adminq_get_ptype_map_cnt;
uint32_t interface_up_cnt;
uint32_t interface_down_cnt;
@@ -380,6 +634,12 @@ struct gve_priv {
struct gve_state_flags state_flags;
struct sx gve_iface_lock;
+
+ struct callout tx_timeout_service;
+ /* The index of tx queue that the timer service will check on its next invocation */
+ uint16_t check_tx_queue_idx;
+
+ uint16_t rx_buf_size_dqo;
};
static inline bool
@@ -400,39 +660,89 @@ gve_clear_state_flag(struct gve_priv *priv, int pos)
BIT_CLR_ATOMIC(GVE_NUM_STATE_FLAGS, pos, &priv->state_flags);
}
+static inline bool
+gve_is_gqi(struct gve_priv *priv)
+{
+ return (priv->queue_format == GVE_GQI_QPL_FORMAT);
+}
+
+static inline bool
+gve_is_qpl(struct gve_priv *priv)
+{
+ return (priv->queue_format == GVE_GQI_QPL_FORMAT ||
+ priv->queue_format == GVE_DQO_QPL_FORMAT);
+}
+
+static inline bool
+gve_is_4k_rx_buf(struct gve_priv *priv)
+{
+ return (priv->rx_buf_size_dqo == GVE_4K_RX_BUFFER_SIZE_DQO);
+}
+
+static inline bus_size_t
+gve_rx_dqo_mbuf_segment_size(struct gve_priv *priv)
+{
+ return (gve_is_4k_rx_buf(priv) ? MJUMPAGESIZE : MCLBYTES);
+}
+
/* Defined in gve_main.c */
void gve_schedule_reset(struct gve_priv *priv);
+int gve_adjust_tx_queues(struct gve_priv *priv, uint16_t new_queue_cnt);
+int gve_adjust_rx_queues(struct gve_priv *priv, uint16_t new_queue_cnt);
+int gve_adjust_ring_sizes(struct gve_priv *priv, uint16_t new_desc_cnt, bool is_rx);
/* Register access functions defined in gve_utils.c */
uint32_t gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset);
void gve_reg_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
void gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
+void gve_db_bar_dqo_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val);
/* QPL (Queue Page List) functions defined in gve_qpl.c */
-int gve_alloc_qpls(struct gve_priv *priv);
-void gve_free_qpls(struct gve_priv *priv);
+struct gve_queue_page_list *gve_alloc_qpl(struct gve_priv *priv, uint32_t id,
+ int npages, bool single_kva);
+void gve_free_qpl(struct gve_priv *priv, struct gve_queue_page_list *qpl);
int gve_register_qpls(struct gve_priv *priv);
int gve_unregister_qpls(struct gve_priv *priv);
+void gve_mextadd_free(struct mbuf *mbuf);
/* TX functions defined in gve_tx.c */
-int gve_alloc_tx_rings(struct gve_priv *priv);
-void gve_free_tx_rings(struct gve_priv *priv);
+int gve_alloc_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
+void gve_free_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
int gve_create_tx_rings(struct gve_priv *priv);
int gve_destroy_tx_rings(struct gve_priv *priv);
+int gve_check_tx_timeout_gqi(struct gve_priv *priv, struct gve_tx_ring *tx);
int gve_tx_intr(void *arg);
int gve_xmit_ifp(if_t ifp, struct mbuf *mbuf);
void gve_qflush(if_t ifp);
void gve_xmit_tq(void *arg, int pending);
void gve_tx_cleanup_tq(void *arg, int pending);
+/* TX functions defined in gve_tx_dqo.c */
+int gve_tx_alloc_ring_dqo(struct gve_priv *priv, int i);
+void gve_tx_free_ring_dqo(struct gve_priv *priv, int i);
+void gve_clear_tx_ring_dqo(struct gve_priv *priv, int i);
+int gve_check_tx_timeout_dqo(struct gve_priv *priv, struct gve_tx_ring *tx);
+int gve_tx_intr_dqo(void *arg);
+int gve_xmit_dqo(struct gve_tx_ring *tx, struct mbuf **mbuf_ptr);
+int gve_xmit_dqo_qpl(struct gve_tx_ring *tx, struct mbuf *mbuf);
+void gve_tx_cleanup_tq_dqo(void *arg, int pending);
+
/* RX functions defined in gve_rx.c */
-int gve_alloc_rx_rings(struct gve_priv *priv);
-void gve_free_rx_rings(struct gve_priv *priv);
+int gve_alloc_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
+void gve_free_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx);
int gve_create_rx_rings(struct gve_priv *priv);
int gve_destroy_rx_rings(struct gve_priv *priv);
int gve_rx_intr(void *arg);
void gve_rx_cleanup_tq(void *arg, int pending);
+/* RX functions defined in gve_rx_dqo.c */
+int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int i);
+void gve_rx_free_ring_dqo(struct gve_priv *priv, int i);
+void gve_rx_prefill_buffers_dqo(struct gve_rx_ring *rx);
+void gve_clear_rx_ring_dqo(struct gve_priv *priv, int i);
+int gve_rx_intr_dqo(void *arg);
+void gve_rx_cleanup_tq_dqo(void *arg, int pending);
+
/* DMA functions defined in gve_utils.c */
int gve_dma_alloc_coherent(struct gve_priv *priv, int size, int align,
struct gve_dma_handle *dma);
@@ -447,7 +757,17 @@ int gve_alloc_irqs(struct gve_priv *priv);
void gve_unmask_all_queue_irqs(struct gve_priv *priv);
void gve_mask_all_queue_irqs(struct gve_priv *priv);
-/* Systcl functions defined in gve_sysctl.c*/
+/* Miscellaneous functions defined in gve_utils.c */
+void gve_invalidate_timestamp(int64_t *timestamp_sec);
+int64_t gve_seconds_since(int64_t *timestamp_sec);
+void gve_set_timestamp(int64_t *timestamp_sec);
+bool gve_timestamp_valid(int64_t *timestamp_sec);
+
+/* Systcl functions defined in gve_sysctl.c */
+extern bool gve_disable_hw_lro;
+extern bool gve_allow_4k_rx_buffers;
+extern char gve_queue_format[8];
+extern char gve_version[8];
void gve_setup_sysctl(struct gve_priv *priv);
void gve_accum_stats(struct gve_priv *priv, uint64_t *rpackets,
uint64_t *rbytes, uint64_t *rx_dropped_pkt, uint64_t *tpackets,
diff --git a/sys/dev/gve/gve_adminq.c b/sys/dev/gve/gve_adminq.c
index 3c332607ebd4..9b59570a2af4 100644
--- a/sys/dev/gve/gve_adminq.c
+++ b/sys/dev/gve/gve_adminq.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2023 Google LLC
+ * Copyright (c) 2023-2024 Google LLC
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -57,6 +57,9 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_descriptor *device_descriptor,
struct gve_device_option *option,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
+ struct gve_device_option_dqo_rda **dev_op_dqo_rda,
+ struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
+ struct gve_device_option_modify_ring **dev_op_modify_ring,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
{
uint32_t req_feat_mask = be32toh(option->required_features_mask);
@@ -85,6 +88,68 @@ void gve_parse_device_option(struct gve_priv *priv,
*dev_op_gqi_qpl = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_DQO_RDA:
+ if (option_length < sizeof(**dev_op_dqo_rda) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
+ device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "DQO RDA", (int)sizeof(**dev_op_dqo_rda),
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_dqo_rda)) {
+ device_printf(priv->dev, GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "DQO RDA");
+ }
+ *dev_op_dqo_rda = (void *)(option + 1);
+ break;
+
+ case GVE_DEV_OPT_ID_DQO_QPL:
+ if (option_length < sizeof(**dev_op_dqo_qpl) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL) {
+ device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "DQO QPL", (int)sizeof(**dev_op_dqo_qpl),
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_dqo_qpl)) {
+ device_printf(priv->dev, GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "DQO QPL");
+ }
+ *dev_op_dqo_qpl = (void *)(option + 1);
+ break;
+
+ case GVE_DEV_OPT_ID_MODIFY_RING:
+ if (option_length < (sizeof(**dev_op_modify_ring) -
+ sizeof(struct gve_ring_size_bound)) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) {
+ device_printf(priv->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Modify Ring", (int)sizeof(**dev_op_modify_ring),
+ GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_modify_ring)) {
+ device_printf(priv->dev, GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Modify Ring");
+ }
+ *dev_op_modify_ring = (void *)(option + 1);
+
+ /* Min ring size included; set the minimum ring size. */
+ if (option_length == sizeof(**dev_op_modify_ring)) {
+ priv->min_rx_desc_cnt = max(
+ be16toh((*dev_op_modify_ring)->min_ring_size.rx),
+ GVE_DEFAULT_MIN_RX_RING_SIZE);
+ priv->min_tx_desc_cnt = max(
+ be16toh((*dev_op_modify_ring)->min_ring_size.tx),
+ GVE_DEFAULT_MIN_TX_RING_SIZE);
+ }
+ break;
+
case GVE_DEV_OPT_ID_JUMBO_FRAMES:
if (option_length < sizeof(**dev_op_jumbo_frames) ||
req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
@@ -117,6 +182,9 @@ static int
gve_process_device_options(struct gve_priv *priv,
struct gve_device_descriptor *descriptor,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
+ struct gve_device_option_dqo_rda **dev_op_dqo_rda,
+ struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
+ struct gve_device_option_modify_ring **dev_op_modify_ring,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
{
char *desc_end = (char *)descriptor + be16toh(descriptor->total_length);
@@ -130,12 +198,16 @@ gve_process_device_options(struct gve_priv *priv,
if ((char *)(dev_opt + 1) > desc_end ||
(char *)(dev_opt + 1) + be16toh(dev_opt->option_length) > desc_end) {
device_printf(priv->dev,
- "options exceed device_descriptor's total length.\n");
+ "options exceed device descriptor's total length.\n");
return (EINVAL);
}
gve_parse_device_option(priv, descriptor, dev_opt,
- dev_op_gqi_qpl, dev_op_jumbo_frames);
+ dev_op_gqi_qpl,
+ dev_op_dqo_rda,
+ dev_op_dqo_qpl,
+ dev_op_modify_ring,
+ dev_op_jumbo_frames);
dev_opt = (void *)((char *)(dev_opt + 1) + be16toh(dev_opt->option_length));
}
@@ -221,16 +293,38 @@ gve_adminq_create_rx_queue(struct gve_priv *priv, uint32_t queue_index)
cmd.opcode = htobe32(GVE_ADMINQ_CREATE_RX_QUEUE);
cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
.queue_id = htobe32(queue_index),
- .index = htobe32(queue_index),
.ntfy_id = htobe32(rx->com.ntfy_id),
.queue_resources_addr = htobe64(qres_dma->bus_addr),
- .rx_desc_ring_addr = htobe64(rx->desc_ring_mem.bus_addr),
- .rx_data_ring_addr = htobe64(rx->data_ring_mem.bus_addr),
- .queue_page_list_id = htobe32((rx->com.qpl)->id),
.rx_ring_size = htobe16(priv->rx_desc_cnt),
- .packet_buffer_size = htobe16(GVE_DEFAULT_RX_BUFFER_SIZE),
};
+ if (gve_is_gqi(priv)) {
+ cmd.create_rx_queue.rx_desc_ring_addr =
+ htobe64(rx->desc_ring_mem.bus_addr);
+ cmd.create_rx_queue.rx_data_ring_addr =
+ htobe64(rx->data_ring_mem.bus_addr);
+ cmd.create_rx_queue.index =
+ htobe32(queue_index);
+ cmd.create_rx_queue.queue_page_list_id =
+ htobe32((rx->com.qpl)->id);
+ cmd.create_rx_queue.packet_buffer_size =
+ htobe16(GVE_DEFAULT_RX_BUFFER_SIZE);
+ } else {
+ cmd.create_rx_queue.queue_page_list_id =
+ htobe32(GVE_RAW_ADDRESSING_QPL_ID);
+ cmd.create_rx_queue.rx_desc_ring_addr =
+ htobe64(rx->dqo.compl_ring_mem.bus_addr);
+ cmd.create_rx_queue.rx_data_ring_addr =
+ htobe64(rx->desc_ring_mem.bus_addr);
+ cmd.create_rx_queue.rx_buff_ring_size =
+ htobe16(priv->rx_desc_cnt);
+ cmd.create_rx_queue.enable_rsc =
+ !!((if_getcapenable(priv->ifp) & IFCAP_LRO) &&
+ !gve_disable_hw_lro);
+ cmd.create_rx_queue.packet_buffer_size =
+ htobe16(priv->rx_buf_size_dqo);
+ }
+
return (gve_adminq_execute_cmd(priv, &cmd));
}
@@ -272,11 +366,21 @@ gve_adminq_create_tx_queue(struct gve_priv *priv, uint32_t queue_index)
.queue_id = htobe32(queue_index),
.queue_resources_addr = htobe64(qres_dma->bus_addr),
.tx_ring_addr = htobe64(tx->desc_ring_mem.bus_addr),
- .queue_page_list_id = htobe32((tx->com.qpl)->id),
.ntfy_id = htobe32(tx->com.ntfy_id),
.tx_ring_size = htobe16(priv->tx_desc_cnt),
};
+ if (gve_is_gqi(priv)) {
+ cmd.create_tx_queue.queue_page_list_id =
+ htobe32((tx->com.qpl)->id);
+ } else {
+ cmd.create_tx_queue.queue_page_list_id =
+ htobe32(GVE_RAW_ADDRESSING_QPL_ID);
+ cmd.create_tx_queue.tx_comp_ring_addr =
+ htobe64(tx->dqo.compl_ring_mem.bus_addr);
+ cmd.create_tx_queue.tx_comp_ring_size =
+ htobe16(priv->tx_desc_cnt);
+ }
return (gve_adminq_execute_cmd(priv, &cmd));
}
@@ -320,8 +424,18 @@ gve_adminq_set_mtu(struct gve_priv *priv, uint32_t mtu) {
static void
gve_enable_supported_features(struct gve_priv *priv,
uint32_t supported_features_mask,
+ const struct gve_device_option_modify_ring *dev_op_modify_ring,
const struct gve_device_option_jumbo_frames *dev_op_jumbo_frames)
{
+ if (dev_op_modify_ring &&
+ (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
+ if (bootverbose)
+ device_printf(priv->dev, "MODIFY RING device option enabled.\n");
+ priv->modify_ringsize_enabled = true;
+ priv->max_rx_desc_cnt = be16toh(dev_op_modify_ring->max_ring_size.rx);
+ priv->max_tx_desc_cnt = be16toh(dev_op_modify_ring->max_ring_size.tx);
+ }
+
if (dev_op_jumbo_frames &&
(supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
if (bootverbose)
@@ -338,6 +452,9 @@ gve_adminq_describe_device(struct gve_priv *priv)
struct gve_device_descriptor *desc;
struct gve_dma_handle desc_mem;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
+ struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
+ struct gve_device_option_dqo_qpl *dev_op_dqo_qpl = NULL;
+ struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
uint32_t supported_features_mask = 0;
int rc;
@@ -366,12 +483,40 @@ gve_adminq_describe_device(struct gve_priv *priv)
bus_dmamap_sync(desc_mem.tag, desc_mem.map, BUS_DMASYNC_POSTREAD);
- rc = gve_process_device_options(priv, desc, &dev_op_gqi_qpl,
+ /* Default min in case device options don't have min values */
+ priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
+ priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
+
+ rc = gve_process_device_options(priv, desc,
+ &dev_op_gqi_qpl,
+ &dev_op_dqo_rda,
+ &dev_op_dqo_qpl,
+ &dev_op_modify_ring,
&dev_op_jumbo_frames);
if (rc != 0)
goto free_device_descriptor;
- if (dev_op_gqi_qpl != NULL) {
+ if (dev_op_dqo_rda != NULL) {
+ snprintf(gve_queue_format, sizeof(gve_queue_format),
+ "%s", "DQO RDA");
+ priv->queue_format = GVE_DQO_RDA_FORMAT;
+ supported_features_mask = be32toh(
+ dev_op_dqo_rda->supported_features_mask);
+ if (bootverbose)
+ device_printf(priv->dev,
+ "Driver is running with DQO RDA queue format.\n");
+ } else if (dev_op_dqo_qpl != NULL) {
+ snprintf(gve_queue_format, sizeof(gve_queue_format),
+ "%s", "DQO QPL");
+ priv->queue_format = GVE_DQO_QPL_FORMAT;
+ supported_features_mask = be32toh(
+ dev_op_dqo_qpl->supported_features_mask);
+ if (bootverbose)
+ device_printf(priv->dev,
+ "Driver is running with DQO QPL queue format.\n");
+ } else if (dev_op_gqi_qpl != NULL) {
+ snprintf(gve_queue_format, sizeof(gve_queue_format),
+ "%s", "GQI QPL");
priv->queue_format = GVE_GQI_QPL_FORMAT;
supported_features_mask = be32toh(
dev_op_gqi_qpl->supported_features_mask);
@@ -380,7 +525,7 @@ gve_adminq_describe_device(struct gve_priv *priv)
"Driver is running with GQI QPL queue format.\n");
} else {
device_printf(priv->dev, "No compatible queue formats\n");
- rc = (EINVAL);
+ rc = EINVAL;
goto free_device_descriptor;
}
@@ -394,8 +539,12 @@ gve_adminq_describe_device(struct gve_priv *priv)
priv->default_num_queues = be16toh(desc->default_num_queues);
priv->supported_features = supported_features_mask;
+ /* Default max to current in case modify ring size option is disabled */
+ priv->max_rx_desc_cnt = priv->rx_desc_cnt;
+ priv->max_tx_desc_cnt = priv->tx_desc_cnt;
+
gve_enable_supported_features(priv, supported_features_mask,
- dev_op_jumbo_frames);
+ dev_op_modify_ring, dev_op_jumbo_frames);
for (i = 0; i < ETHER_ADDR_LEN; i++)
priv->mac[i] = desc->mac[i];
@@ -507,6 +656,41 @@ gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
}
int
+gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
+ struct gve_ptype_lut *ptype_lut_dqo)
+{
+ struct gve_adminq_command aq_cmd = (struct gve_adminq_command){};
+ struct gve_ptype_map *ptype_map;
+ struct gve_dma_handle dma;
+ int err = 0;
+ int i;
+
+ err = gve_dma_alloc_coherent(priv, sizeof(*ptype_map), PAGE_SIZE, &dma);
+ if (err)
+ return (err);
+ ptype_map = dma.cpu_addr;
+
+ aq_cmd.opcode = htobe32(GVE_ADMINQ_GET_PTYPE_MAP);
+ aq_cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) {
+ .ptype_map_len = htobe64(sizeof(*ptype_map)),
+ .ptype_map_addr = htobe64(dma.bus_addr),
+ };
+
+ err = gve_adminq_execute_cmd(priv, &aq_cmd);
+ if (err)
+ goto err;
+
+ /* Populate ptype_lut_dqo. */
+ for (i = 0; i < GVE_NUM_PTYPES; i++) {
+ ptype_lut_dqo->ptypes[i].l3_type = ptype_map->ptypes[i].l3_type;
+ ptype_lut_dqo->ptypes[i].l4_type = ptype_map->ptypes[i].l4_type;
+ }
+err:
+ gve_dma_free_coherent(&dma);
+ return (err);
+}
+
+int
gve_adminq_alloc(struct gve_priv *priv)
{
int rc;
@@ -543,6 +727,7 @@ gve_adminq_alloc(struct gve_priv *priv)
priv->adminq_destroy_rx_queue_cnt = 0;
priv->adminq_dcfg_device_resources_cnt = 0;
priv->adminq_set_driver_parameter_cnt = 0;
+ priv->adminq_get_ptype_map_cnt = 0;
gve_reg_bar_write_4(priv, GVE_REG_ADMINQ_ADDR,
priv->adminq_bus_addr / ADMINQ_SIZE);
@@ -772,6 +957,10 @@ gve_adminq_issue_cmd(struct gve_priv *priv, struct gve_adminq_command *cmd_orig)
priv->adminq_verify_driver_compatibility_cnt++;
break;
+ case GVE_ADMINQ_GET_PTYPE_MAP:
+ priv->adminq_get_ptype_map_cnt++;
+ break;
+
default:
device_printf(priv->dev, "Unknown AQ command opcode %d\n", opcode);
}
diff --git a/sys/dev/gve/gve_adminq.h b/sys/dev/gve/gve_adminq.h
index 5923e5f353d1..531a844f7d90 100644
--- a/sys/dev/gve/gve_adminq.h
+++ b/sys/dev/gve/gve_adminq.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2023 Google LLC
+ * Copyright (c) 2023-2024 Google LLC
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -137,18 +137,37 @@ _Static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4,
struct gve_device_option_dqo_rda {
__be32 supported_features_mask;
+ __be16 tx_comp_ring_entries;
+ __be16 rx_buff_ring_entries;
};
-_Static_assert(sizeof(struct gve_device_option_dqo_rda) == 4,
+_Static_assert(sizeof(struct gve_device_option_dqo_rda) == 8,
+ "gve: bad admin queue struct length");
+
+struct gve_device_option_dqo_qpl {
+ __be32 supported_features_mask;
+ __be16 tx_comp_ring_entries;
+ __be16 rx_buff_ring_entries;
+};
+
+_Static_assert(sizeof(struct gve_device_option_dqo_qpl) == 8,
+ "gve: bad admin queue struct length");
+
+struct gve_ring_size_bound {
+ __be16 rx;
+ __be16 tx;
+};
+
+_Static_assert(sizeof(struct gve_ring_size_bound) == 4,
"gve: bad admin queue struct length");
struct gve_device_option_modify_ring {
__be32 supported_features_mask;
- __be16 max_rx_ring_size;
- __be16 max_tx_ring_size;
+ struct gve_ring_size_bound max_ring_size;
+ struct gve_ring_size_bound min_ring_size;
};
-_Static_assert(sizeof(struct gve_device_option_modify_ring) == 8,
+_Static_assert(sizeof(struct gve_device_option_modify_ring) == 12,
"gve: bad admin queue struct length");
struct gve_device_option_jumbo_frames {
@@ -166,6 +185,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_GQI_QPL = 0x3,
GVE_DEV_OPT_ID_DQO_RDA = 0x4,
GVE_DEV_OPT_ID_MODIFY_RING = 0x6,
+ GVE_DEV_OPT_ID_DQO_QPL = 0x7,
GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
};
@@ -180,6 +200,7 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
};
@@ -194,9 +215,8 @@ enum gve_sup_feature_mask {
enum gve_driver_capability {
gve_driver_capability_gqi_qpl = 0,
gve_driver_capability_gqi_rda = 1,
- gve_driver_capability_dqo_qpl = 2, /* reserved for future use */
+ gve_driver_capability_dqo_qpl = 2,
gve_driver_capability_dqo_rda = 3,
- gve_driver_capability_alt_miss_compl = 4,
};
#define GVE_CAP1(a) BIT((int) a)
@@ -209,7 +229,10 @@ enum gve_driver_capability {
* Only a few bits (as shown in `gve_driver_compatibility`) are currently
* defined. The rest are reserved for future use.
*/
-#define GVE_DRIVER_CAPABILITY_FLAGS1 (GVE_CAP1(gve_driver_capability_gqi_qpl))
+#define GVE_DRIVER_CAPABILITY_FLAGS1 \
+ (GVE_CAP1(gve_driver_capability_gqi_qpl) | \
+ GVE_CAP1(gve_driver_capability_dqo_qpl) | \
+ GVE_CAP1(gve_driver_capability_dqo_rda))
#define GVE_DRIVER_CAPABILITY_FLAGS2 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS4 0x0
@@ -282,6 +305,8 @@ struct gve_adminq_create_tx_queue {
_Static_assert(sizeof(struct gve_adminq_create_tx_queue) == 48,
"gve: bad admin queue struct length");
+#define GVE_RAW_ADDRESSING_QPL_ID 0xFFFFFFFF
+
struct gve_adminq_create_rx_queue {
__be32 queue_id;
__be32 index;
@@ -352,6 +377,24 @@ struct stats {
_Static_assert(sizeof(struct stats) == 16,
"gve: bad admin queue struct length");
+/*
+ * These are control path types for PTYPE which are the same as the data path
+ * types.
+ */
+struct gve_ptype_entry {
+ uint8_t l3_type;
+ uint8_t l4_type;
+};
+
+struct gve_ptype_map {
+ struct gve_ptype_entry ptypes[1 << 10]; /* PTYPES are always 10 bits. */
+};
+
+struct gve_adminq_get_ptype_map {
+ __be64 ptype_map_len;
+ __be64 ptype_map_addr;
+};
+
struct gve_adminq_command {
__be32 opcode;
__be32 status;
@@ -368,6 +411,7 @@ struct gve_adminq_command {
struct gve_adminq_set_driver_parameter set_driver_param;
struct gve_adminq_verify_driver_compatibility
verify_driver_compatibility;
+ struct gve_adminq_get_ptype_map get_ptype_map;
uint8_t reserved[56];
};
};
@@ -375,6 +419,24 @@ struct gve_adminq_command {
_Static_assert(sizeof(struct gve_adminq_command) == 64,
"gve: bad admin queue struct length");
+enum gve_l3_type {
+ /* Must be zero so zero initialized LUT is unknown. */
+ GVE_L3_TYPE_UNKNOWN = 0,
+ GVE_L3_TYPE_OTHER,
+ GVE_L3_TYPE_IPV4,
+ GVE_L3_TYPE_IPV6,
+};
+
+enum gve_l4_type {
+ /* Must be zero so zero initialized LUT is unknown. */
+ GVE_L4_TYPE_UNKNOWN = 0,
+ GVE_L4_TYPE_OTHER,
+ GVE_L4_TYPE_TCP,
+ GVE_L4_TYPE_UDP,
+ GVE_L4_TYPE_ICMP,
+ GVE_L4_TYPE_SCTP,
+};
+
int gve_adminq_create_rx_queues(struct gve_priv *priv, uint32_t num_queues);
int gve_adminq_create_tx_queues(struct gve_priv *priv, uint32_t num_queues);
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, uint32_t num_queues);
@@ -387,8 +449,10 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv);
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
void gve_release_adminq(struct gve_priv *priv);
int gve_adminq_register_page_list(struct gve_priv *priv,
- struct gve_queue_page_list *qpl);
+ struct gve_queue_page_list *qpl);
int gve_adminq_unregister_page_list(struct gve_priv *priv, uint32_t page_list_id);
int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
- uint64_t driver_info_len, vm_paddr_t driver_info_addr);
+ uint64_t driver_info_len, vm_paddr_t driver_info_addr);
+int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
+ struct gve_ptype_lut *ptype_lut);
#endif /* _GVE_AQ_H_ */
diff --git a/sys/dev/gve/gve_desc.h b/sys/dev/gve/gve_desc.h
index 5f09cc8b77b8..48c4ac27596b 100644
--- a/sys/dev/gve/gve_desc.h
+++ b/sys/dev/gve/gve_desc.h
@@ -130,10 +130,10 @@ union gve_rx_data_slot {
__be64 addr;
};
-/* GVE Recive Packet Descriptor Seq No */
+/* GVE Receive Packet Descriptor Seq No */
#define GVE_SEQNO(x) (be16toh(x) & 0x7)
-/* GVE Recive Packet Descriptor Flags */
+/* GVE Receive Packet Descriptor Flags */
#define GVE_RXFLG(x) htobe16(1 << (3 + (x)))
#define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */
#define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */
diff --git a/sys/dev/gve/gve_dqo.h b/sys/dev/gve/gve_dqo.h
new file mode 100644
index 000000000000..542f8ff7d888
--- /dev/null
+++ b/sys/dev/gve/gve_dqo.h
@@ -0,0 +1,337 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2024 Google LLC
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* GVE DQO Descriptor formats */
+
+#ifndef _GVE_DESC_DQO_H_
+#define _GVE_DESC_DQO_H_
+
+#include "gve_plat.h"
+
+#define GVE_ITR_ENABLE_BIT_DQO BIT(0)
+#define GVE_ITR_NO_UPDATE_DQO (3 << 3)
+#define GVE_ITR_INTERVAL_DQO_SHIFT 5
+#define GVE_ITR_INTERVAL_DQO_MASK ((1 << 12) - 1)
+#define GVE_TX_IRQ_RATELIMIT_US_DQO 50
+#define GVE_RX_IRQ_RATELIMIT_US_DQO 20
+
+#define GVE_TX_MAX_HDR_SIZE_DQO 255
+#define GVE_TX_MIN_TSO_MSS_DQO 88
+
+/*
+ * Ringing the doorbell too often can hurt performance.
+ *
+ * HW requires this value to be at least 8.
+ */
+#define GVE_RX_BUF_THRESH_DQO 32
+
+/*
+ * Start dropping RX fragments if at least these many
+ * buffers cannot be posted to the NIC.
+ */
+#define GVE_RX_DQO_MIN_PENDING_BUFS 128
+
+/*
+ * gve_rx_qpl_buf_id_dqo's 11 bit wide buf_id field limits the total
+ * number of pages per QPL to 2048.
+ */
+#define GVE_RX_NUM_QPL_PAGES_DQO 2048
+
+/* 2K TX buffers for DQO-QPL */
+#define GVE_TX_BUF_SHIFT_DQO 11
+#define GVE_TX_BUF_SIZE_DQO BIT(GVE_TX_BUF_SHIFT_DQO)
+#define GVE_TX_BUFS_PER_PAGE_DQO (PAGE_SIZE >> GVE_TX_BUF_SHIFT_DQO)
+
+#define GVE_TX_NUM_QPL_PAGES_DQO 512
+
+/* Basic TX descriptor (DTYPE 0x0C) */
+struct gve_tx_pkt_desc_dqo {
+ __le64 buf_addr;
+
+ /* Must be GVE_TX_PKT_DESC_DTYPE_DQO (0xc) */
+ uint8_t dtype:5;
+
+ /* Denotes the last descriptor of a packet. */
+ uint8_t end_of_packet:1;
+ uint8_t checksum_offload_enable:1;
+
+ /* If set, will generate a descriptor completion for this descriptor. */
+ uint8_t report_event:1;
+ uint8_t reserved0;
+ __le16 reserved1;
+
+ /* The TX completion for this packet will contain this tag. */
+ __le16 compl_tag;
+ uint16_t buf_size:14;
+ uint16_t reserved2:2;
+} __packed;
+_Static_assert(sizeof(struct gve_tx_pkt_desc_dqo) == 16,
+ "gve: bad dqo desc struct length");
+
+#define GVE_TX_PKT_DESC_DTYPE_DQO 0xc
+
+/*
+ * Maximum number of data descriptors allowed per packet, or per-TSO segment.
+ */
+#define GVE_TX_MAX_DATA_DESCS_DQO 10
+#define GVE_TX_MAX_BUF_SIZE_DQO ((16 * 1024) - 1)
+#define GVE_TSO_MAXSIZE_DQO IP_MAXPACKET
+
+_Static_assert(GVE_TX_MAX_BUF_SIZE_DQO * GVE_TX_MAX_DATA_DESCS_DQO >=
+ GVE_TSO_MAXSIZE_DQO,
+ "gve: bad tso parameters");
+
+/*
+ * "report_event" on TX packet descriptors may only be reported on the last
+ * descriptor of a TX packet, and they must be spaced apart with at least this
+ * value.
+ */
+#define GVE_TX_MIN_RE_INTERVAL 32
+
+struct gve_tx_context_cmd_dtype {
+ uint8_t dtype:5;
+ uint8_t tso:1;
+ uint8_t reserved1:2;
+ uint8_t reserved2;
+};
+
+_Static_assert(sizeof(struct gve_tx_context_cmd_dtype) == 2,
+ "gve: bad dqo desc struct length");
+
+/*
+ * TX Native TSO Context DTYPE (0x05)
+ *
+ * "flex" fields allow the driver to send additional packet context to HW.
+ */
+struct gve_tx_tso_context_desc_dqo {
+ /* The L4 payload bytes that should be segmented. */
+ uint32_t tso_total_len:24;
+ uint32_t flex10:8;
+
+ /* Max segment size in TSO excluding headers. */
+ uint16_t mss:14;
+ uint16_t reserved:2;
+
+ uint8_t header_len; /* Header length to use for TSO offload */
+ uint8_t flex11;
+ struct gve_tx_context_cmd_dtype cmd_dtype;
+ uint8_t flex0;
+ uint8_t flex5;
+ uint8_t flex6;
+ uint8_t flex7;
+ uint8_t flex8;
+ uint8_t flex9;
+} __packed;
+_Static_assert(sizeof(struct gve_tx_tso_context_desc_dqo) == 16,
+ "gve: bad dqo desc struct length");
+
+#define GVE_TX_TSO_CTX_DESC_DTYPE_DQO 0x5
+
+/* General context descriptor for sending metadata. */
+struct gve_tx_general_context_desc_dqo {
+ uint8_t flex4;
+ uint8_t flex5;
+ uint8_t flex6;
+ uint8_t flex7;
+ uint8_t flex8;
+ uint8_t flex9;
+ uint8_t flex10;
+ uint8_t flex11;
+ struct gve_tx_context_cmd_dtype cmd_dtype;
+ uint16_t reserved;
+ uint8_t flex0;
+ uint8_t flex1;
+ uint8_t flex2;
+ uint8_t flex3;
+} __packed;
+_Static_assert(sizeof(struct gve_tx_general_context_desc_dqo) == 16,
+ "gve: bad dqo desc struct length");
+
+#define GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO 0x4
+
+/*
+ * Logical structure of metadata which is packed into context descriptor flex
+ * fields.
+ */
+struct gve_tx_metadata_dqo {
+ union {
+ struct {
+ uint8_t version;
+
+ /*
+ * A zero value means no l4_hash was associated with the
+ * mbuf.
+ */
+ uint16_t path_hash:15;
+
+ /*
+ * Should be set to 1 if the flow associated with the
+ * mbuf had a rehash from the TCP stack.
+ */
+ uint16_t rehash_event:1;
+ } __packed;
+ uint8_t bytes[12];
+ };
+} __packed;
+_Static_assert(sizeof(struct gve_tx_metadata_dqo) == 12,
+ "gve: bad dqo desc struct length");
+
+#define GVE_TX_METADATA_VERSION_DQO 0
+
+/* Used to access the generation bit within a TX completion descriptor. */
+#define GVE_TX_DESC_DQO_GEN_BYTE_OFFSET 1
+#define GVE_TX_DESC_DQO_GEN_BIT_MASK 0x80
+
+/* TX completion descriptor */
+struct gve_tx_compl_desc_dqo {
+ /*
+ * For types 0-4 this is the TX queue ID associated with this
+ * completion.
+ */
+ uint16_t id:11;
+
+ /* See: GVE_COMPL_TYPE_DQO* */
+ uint16_t type:3;
+ uint16_t reserved0:1;
+
+ /* Flipped by HW to notify the descriptor is populated. */
+ uint16_t generation:1;
+ union {
+ /*
+ * For descriptor completions, this is the last index fetched
+ * by HW + 1.
+ */
+ __le16 tx_head;
+
+ /*
+ * For packet completions, this is the completion tag set on the
+ * TX packet descriptors.
+ */
+ __le16 completion_tag;
+ };
+ __le32 reserved1;
+} __packed;
+_Static_assert(sizeof(struct gve_tx_compl_desc_dqo) == 8,
+ "gve: bad dqo desc struct length");
+
+union gve_tx_desc_dqo {
+ struct gve_tx_pkt_desc_dqo pkt;
+ struct gve_tx_tso_context_desc_dqo tso_ctx;
+ struct gve_tx_general_context_desc_dqo general_ctx;
+};
+
+#define GVE_COMPL_TYPE_DQO_PKT 0x2 /* Packet completion */
+#define GVE_COMPL_TYPE_DQO_DESC 0x4 /* Descriptor completion */
+
+/* Descriptor to post buffers to HW on buffer queue. */
+struct gve_rx_desc_dqo {
+ __le16 buf_id; /* ID returned in Rx completion descriptor */
+ __le16 reserved0;
+ __le32 reserved1;
+ __le64 buf_addr; /* DMA address of the buffer */
+ __le64 header_buf_addr;
+ __le64 reserved2;
+} __packed;
+_Static_assert(sizeof(struct gve_rx_desc_dqo) == 32,
+ "gve: bad dqo desc struct length");
+
+/* Used to access the generation bit within an RX completion descriptor. */
+#define GVE_RX_DESC_DQO_GEN_BYTE_OFFSET 5
+#define GVE_RX_DESC_DQO_GEN_BIT_MASK 0x40
+
+/* Descriptor for HW to notify SW of new packets received on RX queue. */
+struct gve_rx_compl_desc_dqo {
+ /* Must be 1 */
+ uint8_t rxdid:4;
+ uint8_t reserved0:4;
+
+ /* Packet originated from this system rather than the network. */
+ uint8_t loopback:1;
+ /*
+ * Set when IPv6 packet contains a destination options header or routing
+ * header.
+ */
+ uint8_t ipv6_ex_add:1;
+ /* Invalid packet was received. */
+ uint8_t rx_error:1;
+ uint8_t reserved1:5;
+
+ uint16_t packet_type:10;
+ uint16_t ip_hdr_err:1;
+ uint16_t udp_len_err:1;
+ uint16_t raw_cs_invalid:1;
+ uint16_t reserved2:3;
+
+ uint16_t packet_len:14;
+ /* Flipped by HW to notify the descriptor is populated. */
+ uint16_t generation:1;
+ /* Should be zero. */
+ uint16_t buffer_queue_id:1;
+
+ uint16_t header_len:10;
+ uint16_t rsc:1;
+ uint16_t split_header:1;
+ uint16_t reserved3:4;
+
+ uint8_t descriptor_done:1;
+ uint8_t end_of_packet:1;
+ uint8_t header_buffer_overflow:1;
+ uint8_t l3_l4_processed:1;
+ uint8_t csum_ip_err:1;
+ uint8_t csum_l4_err:1;
+ uint8_t csum_external_ip_err:1;
+ uint8_t csum_external_udp_err:1;
+
+ uint8_t status_error1;
+
+ __le16 reserved5;
+ __le16 buf_id; /* Buffer ID which was sent on the buffer queue. */
+
+ union {
+ /* Packet checksum. */
+ __le16 raw_cs;
+ /* Segment length for RSC packets. */
+ __le16 rsc_seg_len;
+ };
+ __le32 hash;
+ __le32 reserved6;
+ __le64 reserved7;
+} __packed;
+
+_Static_assert(sizeof(struct gve_rx_compl_desc_dqo) == 32,
+ "gve: bad dqo desc struct length");
+
+static inline uint8_t
+gve_get_dq_num_frags_in_page(struct gve_priv *priv)
+{
+ return (PAGE_SIZE / priv->rx_buf_size_dqo);
+}
+#endif /* _GVE_DESC_DQO_H_ */
diff --git a/sys/dev/gve/gve_main.c b/sys/dev/gve/gve_main.c
index cd7849778bce..10197a8e15f8 100644
--- a/sys/dev/gve/gve_main.c
+++ b/sys/dev/gve/gve_main.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2023 Google LLC
+ * Copyright (c) 2023-2024 Google LLC
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -30,11 +30,12 @@
*/
#include "gve.h"
#include "gve_adminq.h"
+#include "gve_dqo.h"
-#define GVE_DRIVER_VERSION "GVE-FBSD-1.0.1\n"
+#define GVE_DRIVER_VERSION "GVE-FBSD-1.3.4\n"
#define GVE_VERSION_MAJOR 1
-#define GVE_VERSION_MINOR 0
-#define GVE_VERSION_SUB 1
+#define GVE_VERSION_MINOR 3
+#define GVE_VERSION_SUB 5
#define GVE_DEFAULT_RX_COPYBREAK 256
@@ -49,6 +50,9 @@ static struct gve_dev {
struct sx gve_global_lock;
+static void gve_start_tx_timeout_service(struct gve_priv *priv);
+static void gve_stop_tx_timeout_service(struct gve_priv *priv);
+
static int
gve_verify_driver_compatibility(struct gve_priv *priv)
{
@@ -98,6 +102,72 @@ gve_verify_driver_compatibility(struct gve_priv *priv)
return (err);
}
+static void
+gve_handle_tx_timeout(struct gve_priv *priv, struct gve_tx_ring *tx,
+ int num_timeout_pkts)
+{
+ int64_t time_since_last_kick;
+
+ counter_u64_add_protected(tx->stats.tx_timeout, 1);
+
+ /* last_kicked is never GVE_TIMESTAMP_INVALID so we can skip checking */
+ time_since_last_kick = gve_seconds_since(&tx->last_kicked);
+
+ /* Try kicking first in case the timeout is due to a missed interrupt */
+ if (time_since_last_kick > GVE_TX_TIMEOUT_KICK_COOLDOWN_SEC) {
+ device_printf(priv->dev,
+ "Found %d timed out packet(s) on txq%d, kicking it for completions\n",
+ num_timeout_pkts, tx->com.id);
+ gve_set_timestamp(&tx->last_kicked);
+ taskqueue_enqueue(tx->com.cleanup_tq, &tx->com.cleanup_task);
+ } else {
+ device_printf(priv->dev,
+ "Found %d timed out packet(s) on txq%d with its last kick %jd sec ago which is less than the cooldown period %d. Resetting device\n",
+ num_timeout_pkts, tx->com.id,
+ (intmax_t)time_since_last_kick,
+ GVE_TX_TIMEOUT_KICK_COOLDOWN_SEC);
+ gve_schedule_reset(priv);
+ }
+}
+
+static void
+gve_tx_timeout_service_callback(void *data)
+{
+ struct gve_priv *priv = (struct gve_priv *)data;
+ struct gve_tx_ring *tx;
+ uint16_t num_timeout_pkts;
+
+ tx = &priv->tx[priv->check_tx_queue_idx];
+
+ num_timeout_pkts = gve_is_gqi(priv) ?
+ gve_check_tx_timeout_gqi(priv, tx) :
+ gve_check_tx_timeout_dqo(priv, tx);
+ if (num_timeout_pkts)
+ gve_handle_tx_timeout(priv, tx, num_timeout_pkts);
+
+ priv->check_tx_queue_idx = (priv->check_tx_queue_idx + 1) %
+ priv->tx_cfg.num_queues;
+ callout_reset_sbt(&priv->tx_timeout_service,
+ SBT_1S * GVE_TX_TIMEOUT_CHECK_CADENCE_SEC, 0,
+ gve_tx_timeout_service_callback, (void *)priv, 0);
+}
+
+static void
+gve_start_tx_timeout_service(struct gve_priv *priv)
+{
+ priv->check_tx_queue_idx = 0;
+ callout_init(&priv->tx_timeout_service, true);
+ callout_reset_sbt(&priv->tx_timeout_service,
+ SBT_1S * GVE_TX_TIMEOUT_CHECK_CADENCE_SEC, 0,
+ gve_tx_timeout_service_callback, (void *)priv, 0);
+}
+
+static void
+gve_stop_tx_timeout_service(struct gve_priv *priv)
+{
+ callout_drain(&priv->tx_timeout_service);
+}
+
static int
gve_up(struct gve_priv *priv)
{
@@ -124,9 +194,11 @@ gve_up(struct gve_priv *priv)
if (if_getcapenable(ifp) & IFCAP_TSO6)
if_sethwassistbits(ifp, CSUM_IP6_TSO, 0);
- err = gve_register_qpls(priv);
- if (err != 0)
- goto reset;
+ if (gve_is_qpl(priv)) {
+ err = gve_register_qpls(priv);
+ if (err != 0)
+ goto reset;
+ }
err = gve_create_rx_rings(priv);
if (err != 0)
@@ -146,6 +218,9 @@ gve_up(struct gve_priv *priv)
gve_unmask_all_queue_irqs(priv);
gve_set_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP);
priv->interface_up_cnt++;
+
+ gve_start_tx_timeout_service(priv);
+
return (0);
reset:
@@ -161,6 +236,8 @@ gve_down(struct gve_priv *priv)
if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP))
return;
+ gve_stop_tx_timeout_service(priv);
+
if (gve_get_state_flag(priv, GVE_STATE_FLAG_LINK_UP)) {
if_link_state_change(priv->ifp, LINK_STATE_DOWN);
gve_clear_state_flag(priv, GVE_STATE_FLAG_LINK_UP);
@@ -174,10 +251,13 @@ gve_down(struct gve_priv *priv)
if (gve_destroy_tx_rings(priv) != 0)
goto reset;
- if (gve_unregister_qpls(priv) != 0)
- goto reset;
+ if (gve_is_qpl(priv)) {
+ if (gve_unregister_qpls(priv) != 0)
+ goto reset;
+ }
- gve_mask_all_queue_irqs(priv);
+ if (gve_is_gqi(priv))
+ gve_mask_all_queue_irqs(priv);
gve_clear_state_flag(priv, GVE_STATE_FLAG_QUEUES_UP);
priv->interface_down_cnt++;
return;
@@ -186,10 +266,143 @@ reset:
gve_schedule_reset(priv);
}
+int
+gve_adjust_rx_queues(struct gve_priv *priv, uint16_t new_queue_cnt)
+{
+ int err;
+
+ GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
+
+ gve_down(priv);
+
+ if (new_queue_cnt < priv->rx_cfg.num_queues) {
+ /*
+ * Freeing a ring still preserves its ntfy_id,
+ * which is needed if we create the ring again.
+ */
+ gve_free_rx_rings(priv, new_queue_cnt, priv->rx_cfg.num_queues);
+ } else {
+ err = gve_alloc_rx_rings(priv, priv->rx_cfg.num_queues, new_queue_cnt);
+ if (err != 0) {
+ device_printf(priv->dev, "Failed to allocate new queues");
+ /* Failed to allocate rings, start back up with old ones */
+ gve_up(priv);
+ return (err);
+
+ }
+ }
+ priv->rx_cfg.num_queues = new_queue_cnt;
+
+ err = gve_up(priv);
+ if (err != 0)
+ gve_schedule_reset(priv);
+
+ return (err);
+}
+
+int
+gve_adjust_tx_queues(struct gve_priv *priv, uint16_t new_queue_cnt)
+{
+ int err;
+
+ GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
+
+ gve_down(priv);
+
+ if (new_queue_cnt < priv->tx_cfg.num_queues) {
+ /*
+ * Freeing a ring still preserves its ntfy_id,
+ * which is needed if we create the ring again.
+ */
+ gve_free_tx_rings(priv, new_queue_cnt, priv->tx_cfg.num_queues);
+ } else {
+ err = gve_alloc_tx_rings(priv, priv->tx_cfg.num_queues, new_queue_cnt);
+ if (err != 0) {
+ device_printf(priv->dev, "Failed to allocate new queues");
+ /* Failed to allocate rings, start back up with old ones */
+ gve_up(priv);
+ return (err);
+
+ }
+ }
+ priv->tx_cfg.num_queues = new_queue_cnt;
+
+ err = gve_up(priv);
+ if (err != 0)
+ gve_schedule_reset(priv);
+
+ return (err);
+}
+
+int
+gve_adjust_ring_sizes(struct gve_priv *priv, uint16_t new_desc_cnt, bool is_rx)
+{
+ int err;
+ uint16_t prev_desc_cnt;
+
+ GVE_IFACE_LOCK_ASSERT(priv->gve_iface_lock);
+
+ gve_down(priv);
+
+ if (is_rx) {
+ gve_free_rx_rings(priv, 0, priv->rx_cfg.num_queues);
+ prev_desc_cnt = priv->rx_desc_cnt;
+ priv->rx_desc_cnt = new_desc_cnt;
+ err = gve_alloc_rx_rings(priv, 0, priv->rx_cfg.num_queues);
+ if (err != 0) {
+ device_printf(priv->dev,
+ "Failed to allocate rings. Trying to start back up with previous ring size.");
+ priv->rx_desc_cnt = prev_desc_cnt;
+ err = gve_alloc_rx_rings(priv, 0, priv->rx_cfg.num_queues);
+ }
+ } else {
+ gve_free_tx_rings(priv, 0, priv->tx_cfg.num_queues);
+ prev_desc_cnt = priv->tx_desc_cnt;
+ priv->tx_desc_cnt = new_desc_cnt;
+ err = gve_alloc_tx_rings(priv, 0, priv->tx_cfg.num_queues);
+ if (err != 0) {
+ device_printf(priv->dev,
+ "Failed to allocate rings. Trying to start back up with previous ring size.");
+ priv->tx_desc_cnt = prev_desc_cnt;
+ err = gve_alloc_tx_rings(priv, 0, priv->tx_cfg.num_queues);
+ }
+ }
+
+ if (err != 0) {
+ device_printf(priv->dev, "Failed to allocate rings! Cannot start device back up!");
+ return (err);
+ }
+
+ err = gve_up(priv);
+ if (err != 0) {
+ gve_schedule_reset(priv);
+ return (err);
+ }
+
+ return (0);
+}
+
+static int
+gve_get_dqo_rx_buf_size(struct gve_priv *priv, uint16_t mtu)
+{
+ /*
+ * Use 4k buffers only if mode is DQ, 4k buffers flag is on,
+ * and either hw LRO is enabled or mtu is greater than 2048
+ */
+ if (!gve_is_gqi(priv) && gve_allow_4k_rx_buffers &&
+ (!gve_disable_hw_lro || mtu > GVE_DEFAULT_RX_BUFFER_SIZE))
+ return (GVE_4K_RX_BUFFER_SIZE_DQO);
+
+ return (GVE_DEFAULT_RX_BUFFER_SIZE);
+}
+
static int
gve_set_mtu(if_t ifp, uint32_t new_mtu)
{
struct gve_priv *priv = if_getsoftc(ifp);
+ const uint32_t max_problem_range = 8227;
+ const uint32_t min_problem_range = 7822;
+ uint16_t new_rx_buf_size = gve_get_dqo_rx_buf_size(priv, new_mtu);
int err;
if ((new_mtu > priv->max_mtu) || (new_mtu < ETHERMIN)) {
@@ -198,11 +411,32 @@ gve_set_mtu(if_t ifp, uint32_t new_mtu)
return (EINVAL);
}
+ /*
+ * When hardware LRO is enabled in DQ mode, MTUs within the range
+ * [7822, 8227] trigger hardware issues which cause a drastic drop
+ * in throughput.
+ */
+ if (!gve_is_gqi(priv) && !gve_disable_hw_lro &&
+ new_mtu >= min_problem_range && new_mtu <= max_problem_range &&
+ new_rx_buf_size != GVE_4K_RX_BUFFER_SIZE_DQO) {
+ device_printf(priv->dev,
+ "Cannot set to MTU to %d within the range [%d, %d] while HW LRO is enabled and not using 4k RX Buffers\n",
+ new_mtu, min_problem_range, max_problem_range);
+ return (EINVAL);
+ }
+
err = gve_adminq_set_mtu(priv, new_mtu);
if (err == 0) {
if (bootverbose)
device_printf(priv->dev, "MTU set to %d\n", new_mtu);
if_setmtu(ifp, new_mtu);
+ /* Need to re-alloc RX queues if RX buffer size changed */
+ if (!gve_is_gqi(priv) &&
+ new_rx_buf_size != priv->rx_buf_size_dqo) {
+ gve_free_rx_rings(priv, 0, priv->rx_cfg.num_queues);
+ priv->rx_buf_size_dqo = new_rx_buf_size;
+ gve_alloc_rx_rings(priv, 0, priv->rx_cfg.num_queues);
+ }
} else {
device_printf(priv->dev, "Failed to set MTU to %d\n", new_mtu);
}
@@ -352,18 +586,13 @@ gve_get_counter(if_t ifp, ift_counter cnt)
}
}
-static int
+static void
gve_setup_ifnet(device_t dev, struct gve_priv *priv)
{
int caps = 0;
if_t ifp;
ifp = priv->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(priv->dev, "Failed to allocate ifnet struct\n");
- return (ENXIO);
- }
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setsoftc(ifp, priv);
if_setdev(ifp, dev);
@@ -372,6 +601,18 @@ gve_setup_ifnet(device_t dev, struct gve_priv *priv)
if_settransmitfn(ifp, gve_xmit_ifp);
if_setqflushfn(ifp, gve_qflush);
+ /*
+ * Set TSO limits, must match the arguments to bus_dma_tag_create
+ * when creating tx->dqo.buf_dmatag. Only applies to the RDA mode
+ * because in QPL we copy the entire packet into the bounce buffer
+ * and thus it does not matter how fragmented the mbuf is.
+ */
+ if (!gve_is_gqi(priv) && !gve_is_qpl(priv)) {
+ if_sethwtsomaxsegcount(ifp, GVE_TX_MAX_DATA_DESCS_DQO);
+ if_sethwtsomaxsegsize(ifp, GVE_TX_MAX_BUF_SIZE_DQO);
+ }
+ if_sethwtsomax(ifp, GVE_TSO_MAXSIZE_DQO);
+
#if __FreeBSD_version >= 1400086
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
#else
@@ -401,8 +642,6 @@ gve_setup_ifnet(device_t dev, struct gve_priv *priv)
ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO);
-
- return (0);
}
static int
@@ -454,9 +693,14 @@ static void
gve_free_rings(struct gve_priv *priv)
{
gve_free_irqs(priv);
- gve_free_tx_rings(priv);
- gve_free_rx_rings(priv);
- gve_free_qpls(priv);
+
+ gve_free_tx_rings(priv, 0, priv->tx_cfg.num_queues);
+ free(priv->tx, M_GVE);
+ priv->tx = NULL;
+
+ gve_free_rx_rings(priv, 0, priv->rx_cfg.num_queues);
+ free(priv->rx, M_GVE);
+ priv->rx = NULL;
}
static int
@@ -464,15 +708,15 @@ gve_alloc_rings(struct gve_priv *priv)
{
int err;
- err = gve_alloc_qpls(priv);
- if (err != 0)
- goto abort;
-
- err = gve_alloc_rx_rings(priv);
+ priv->rx = malloc(sizeof(struct gve_rx_ring) * priv->rx_cfg.max_queues,
+ M_GVE, M_WAITOK | M_ZERO);
+ err = gve_alloc_rx_rings(priv, 0, priv->rx_cfg.num_queues);
if (err != 0)
goto abort;
- err = gve_alloc_tx_rings(priv);
+ priv->tx = malloc(sizeof(struct gve_tx_ring) * priv->tx_cfg.max_queues,
+ M_GVE, M_WAITOK | M_ZERO);
+ err = gve_alloc_tx_rings(priv, 0, priv->tx_cfg.num_queues);
if (err != 0)
goto abort;
@@ -488,7 +732,7 @@ abort:
}
static void
-gve_deconfigure_resources(struct gve_priv *priv)
+gve_deconfigure_and_free_device_resources(struct gve_priv *priv)
{
int err;
@@ -506,10 +750,15 @@ gve_deconfigure_resources(struct gve_priv *priv)
gve_free_irq_db_array(priv);
gve_free_counter_array(priv);
+
+ if (priv->ptype_lut_dqo) {
+ free(priv->ptype_lut_dqo, M_GVE);
+ priv->ptype_lut_dqo = NULL;
+ }
}
static int
-gve_configure_resources(struct gve_priv *priv)
+gve_alloc_and_configure_device_resources(struct gve_priv *priv)
{
int err;
@@ -532,13 +781,25 @@ gve_configure_resources(struct gve_priv *priv)
goto abort;
}
+ if (!gve_is_gqi(priv)) {
+ priv->ptype_lut_dqo = malloc(sizeof(*priv->ptype_lut_dqo), M_GVE,
+ M_WAITOK | M_ZERO);
+
+ err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
+ if (err != 0) {
+ device_printf(priv->dev, "Failed to configure ptype lut: err=%d\n",
+ err);
+ goto abort;
+ }
+ }
+
gve_set_state_flag(priv, GVE_STATE_FLAG_RESOURCES_OK);
if (bootverbose)
device_printf(priv->dev, "Configured device resources\n");
return (0);
abort:
- gve_deconfigure_resources(priv);
+ gve_deconfigure_and_free_device_resources(priv);
return (err);
}
@@ -557,7 +818,7 @@ gve_set_queue_cnts(struct gve_priv *priv)
priv->rx_cfg.num_queues);
}
- priv->num_queues = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues;
+ priv->num_queues = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues;
priv->mgmt_msix_idx = priv->num_queues;
}
@@ -603,7 +864,7 @@ static void
gve_destroy(struct gve_priv *priv)
{
gve_down(priv);
- gve_deconfigure_resources(priv);
+ gve_deconfigure_and_free_device_resources(priv);
gve_release_adminq(priv);
}
@@ -616,9 +877,21 @@ gve_restore(struct gve_priv *priv)
if (err != 0)
goto abort;
- err = gve_configure_resources(priv);
- if (err != 0)
+ err = gve_adminq_configure_device_resources(priv);
+ if (err != 0) {
+ device_printf(priv->dev, "Failed to configure device resources: err=%d\n",
+ err);
+ err = (ENXIO);
goto abort;
+ }
+ if (!gve_is_gqi(priv)) {
+ err = gve_adminq_get_ptype_map_dqo(priv, priv->ptype_lut_dqo);
+ if (err != 0) {
+ device_printf(priv->dev, "Failed to configure ptype lut: err=%d\n",
+ err);
+ goto abort;
+ }
+ }
err = gve_up(priv);
if (err != 0)
@@ -632,6 +905,25 @@ abort:
}
static void
+gve_clear_device_resources(struct gve_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_event_counters; i++)
+ priv->counters[i] = 0;
+ bus_dmamap_sync(priv->counter_array_mem.tag, priv->counter_array_mem.map,
+ BUS_DMASYNC_PREWRITE);
+
+ for (i = 0; i < priv->num_queues; i++)
+ priv->irq_db_indices[i] = (struct gve_irq_db){};
+ bus_dmamap_sync(priv->irqs_db_mem.tag, priv->irqs_db_mem.map,
+ BUS_DMASYNC_PREWRITE);
+
+ if (priv->ptype_lut_dqo)
+ *priv->ptype_lut_dqo = (struct gve_ptype_lut){0};
+}
+
+static void
gve_handle_reset(struct gve_priv *priv)
{
if (!gve_get_state_flag(priv, GVE_STATE_FLAG_DO_RESET))
@@ -662,6 +954,8 @@ gve_handle_reset(struct gve_priv *priv)
gve_clear_state_flag(priv, GVE_STATE_FLAG_TX_RINGS_OK);
gve_down(priv);
+ gve_clear_device_resources(priv);
+
gve_restore(priv);
GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
@@ -749,6 +1043,9 @@ gve_attach(device_t dev)
int rid;
int err;
+ snprintf(gve_version, sizeof(gve_version), "%d.%d.%d",
+ GVE_VERSION_MAJOR, GVE_VERSION_MINOR, GVE_VERSION_SUB);
+
priv = device_get_softc(dev);
priv->dev = dev;
GVE_IFACE_LOCK_INIT(priv->gve_iface_lock);
@@ -786,17 +1083,16 @@ gve_attach(device_t dev)
if (err != 0)
goto abort;
- err = gve_configure_resources(priv);
+ err = gve_alloc_and_configure_device_resources(priv);
if (err != 0)
goto abort;
+ priv->rx_buf_size_dqo = gve_get_dqo_rx_buf_size(priv, priv->max_mtu);
err = gve_alloc_rings(priv);
if (err != 0)
goto abort;
- err = gve_setup_ifnet(dev, priv);
- if (err != 0)
- goto abort;
+ gve_setup_ifnet(dev, priv);
priv->rx_copybreak = GVE_DEFAULT_RX_COPYBREAK;
@@ -817,7 +1113,7 @@ gve_attach(device_t dev)
abort:
gve_free_rings(priv);
- gve_deconfigure_resources(priv);
+ gve_deconfigure_and_free_device_resources(priv);
gve_release_adminq(priv);
gve_free_sys_res_mem(priv);
GVE_IFACE_LOCK_DESTROY(priv->gve_iface_lock);
@@ -829,6 +1125,11 @@ gve_detach(device_t dev)
{
struct gve_priv *priv = device_get_softc(dev);
if_t ifp = priv->ifp;
+ int error;
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
ether_ifdetach(ifp);
@@ -845,7 +1146,7 @@ gve_detach(device_t dev)
taskqueue_free(priv->service_tq);
if_free(ifp);
- return (bus_generic_detach(dev));
+ return (0);
}
static device_method_t gve_methods[] = {
diff --git a/sys/dev/gve/gve_plat.h b/sys/dev/gve/gve_plat.h
index ad6bc1c92b36..3185656c5e04 100644
--- a/sys/dev/gve/gve_plat.h
+++ b/sys/dev/gve/gve_plat.h
@@ -85,6 +85,9 @@
typedef uint16_t __be16;
typedef uint32_t __be32;
typedef uint64_t __be64;
+typedef uint16_t __le16;
+typedef uint32_t __le32;
+typedef uint64_t __le64;
#define BIT(nr) (1UL << (nr))
#define FBSD_VERSION_MAJOR (__FreeBSD_version / 100000)
diff --git a/sys/dev/gve/gve_qpl.c b/sys/dev/gve/gve_qpl.c
index 3c6d9af6feee..0e7098dcd4a1 100644
--- a/sys/dev/gve/gve_qpl.c
+++ b/sys/dev/gve/gve_qpl.c
@@ -32,31 +32,13 @@
#include "gve.h"
#include "gve_adminq.h"
+#include "gve_dqo.h"
static MALLOC_DEFINE(M_GVE_QPL, "gve qpl", "gve qpl allocations");
-static uint32_t
-gve_num_tx_qpls(struct gve_priv *priv)
-{
- if (priv->queue_format != GVE_GQI_QPL_FORMAT)
- return (0);
-
- return (priv->tx_cfg.max_queues);
-}
-
-static uint32_t
-gve_num_rx_qpls(struct gve_priv *priv)
-{
- if (priv->queue_format != GVE_GQI_QPL_FORMAT)
- return (0);
-
- return (priv->rx_cfg.max_queues);
-}
-
-static void
-gve_free_qpl(struct gve_priv *priv, uint32_t id)
+void
+gve_free_qpl(struct gve_priv *priv, struct gve_queue_page_list *qpl)
{
- struct gve_queue_page_list *qpl = &priv->qpls[id];
int i;
for (i = 0; i < qpl->num_dmas; i++) {
@@ -91,12 +73,14 @@ gve_free_qpl(struct gve_priv *priv, uint32_t id)
if (qpl->dmas != NULL)
free(qpl->dmas, M_GVE_QPL);
+
+ free(qpl, M_GVE_QPL);
}
-static int
+struct gve_queue_page_list *
gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva)
{
- struct gve_queue_page_list *qpl = &priv->qpls[id];
+ struct gve_queue_page_list *qpl;
int err;
int i;
@@ -104,9 +88,12 @@ gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva)
device_printf(priv->dev, "Reached max number of registered pages %ju > %ju\n",
(uintmax_t)npages + priv->num_registered_pages,
(uintmax_t)priv->max_registered_pages);
- return (EINVAL);
+ return (NULL);
}
+ qpl = malloc(sizeof(struct gve_queue_page_list), M_GVE_QPL,
+ M_WAITOK | M_ZERO);
+
qpl->id = id;
qpl->num_pages = 0;
qpl->num_dmas = 0;
@@ -162,123 +149,111 @@ gve_alloc_qpl(struct gve_priv *priv, uint32_t id, int npages, bool single_kva)
priv->num_registered_pages++;
}
- return (0);
+ return (qpl);
abort:
- gve_free_qpl(priv, id);
- return (err);
+ gve_free_qpl(priv, qpl);
+ return (NULL);
}
-void
-gve_free_qpls(struct gve_priv *priv)
-{
- int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
- int i;
-
- if (num_qpls == 0)
- return;
-
- if (priv->qpls != NULL) {
- for (i = 0; i < num_qpls; i++)
- gve_free_qpl(priv, i);
- free(priv->qpls, M_GVE_QPL);
- }
-}
-
-int gve_alloc_qpls(struct gve_priv *priv)
+int
+gve_register_qpls(struct gve_priv *priv)
{
- int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
+ struct gve_ring_com *com;
+ struct gve_tx_ring *tx;
+ struct gve_rx_ring *rx;
int err;
int i;
- if (num_qpls == 0)
+ if (gve_get_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK))
return (0);
- priv->qpls = malloc(num_qpls * sizeof(*priv->qpls), M_GVE_QPL,
- M_WAITOK | M_ZERO);
-
- for (i = 0; i < gve_num_tx_qpls(priv); i++) {
- err = gve_alloc_qpl(priv, i, priv->tx_desc_cnt / GVE_QPL_DIVISOR,
- /*single_kva=*/true);
- if (err != 0)
- goto abort;
- }
-
- for (; i < num_qpls; i++) {
- err = gve_alloc_qpl(priv, i, priv->rx_desc_cnt, /*single_kva=*/false);
- if (err != 0)
- goto abort;
- }
-
- return (0);
-
-abort:
- gve_free_qpls(priv);
- return (err);
-}
-
-static int
-gve_unregister_n_qpls(struct gve_priv *priv, int n)
-{
- int err;
- int i;
-
- for (i = 0; i < n; i++) {
- err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
+ /* Register TX qpls */
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ tx = &priv->tx[i];
+ com = &tx->com;
+ err = gve_adminq_register_page_list(priv, com->qpl);
if (err != 0) {
device_printf(priv->dev,
- "Failed to unregister qpl %d, err: %d\n",
- priv->qpls[i].id, err);
+ "Failed to register qpl %d, err: %d\n",
+ com->qpl->id, err);
+ /* Caller schedules a reset when this fails */
+ return (err);
}
}
- if (err != 0)
- return (err);
-
- return (0);
-}
-
-int
-gve_register_qpls(struct gve_priv *priv)
-{
- int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
- int err;
- int i;
-
- if (gve_get_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK))
- return (0);
-
- for (i = 0; i < num_qpls; i++) {
- err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
+ /* Register RX qpls */
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ rx = &priv->rx[i];
+ com = &rx->com;
+ err = gve_adminq_register_page_list(priv, com->qpl);
if (err != 0) {
device_printf(priv->dev,
"Failed to register qpl %d, err: %d\n",
- priv->qpls[i].id, err);
- goto abort;
+ com->qpl->id, err);
+ /* Caller schedules a reset when this fails */
+ return (err);
}
}
-
gve_set_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
return (0);
-
-abort:
- gve_unregister_n_qpls(priv, i);
- return (err);
}
int
gve_unregister_qpls(struct gve_priv *priv)
{
- int num_qpls = gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv);
int err;
+ int i;
+ struct gve_ring_com *com;
+ struct gve_tx_ring *tx;
+ struct gve_rx_ring *rx;
if (!gve_get_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK))
return (0);
- err = gve_unregister_n_qpls(priv, num_qpls);
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ tx = &priv->tx[i];
+ com = &tx->com;
+ err = gve_adminq_unregister_page_list(priv, com->qpl->id);
+ if (err != 0) {
+ device_printf(priv->dev,
+ "Failed to unregister qpl %d, err: %d\n",
+ com->qpl->id, err);
+ }
+ }
+
+ for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ rx = &priv->rx[i];
+ com = &rx->com;
+ err = gve_adminq_unregister_page_list(priv, com->qpl->id);
+ if (err != 0) {
+ device_printf(priv->dev,
+ "Failed to unregister qpl %d, err: %d\n",
+ com->qpl->id, err);
+ }
+ }
+
if (err != 0)
return (err);
gve_clear_state_flag(priv, GVE_STATE_FLAG_QPLREG_OK);
return (0);
}
+
+void
+gve_mextadd_free(struct mbuf *mbuf)
+{
+ vm_page_t page = (vm_page_t)mbuf->m_ext.ext_arg1;
+ vm_offset_t va = (vm_offset_t)mbuf->m_ext.ext_arg2;
+
+ /*
+ * Free the page only if this is the last ref.
+ * The interface might no longer exist by the time
+ * this callback is called, see gve_free_qpl.
+ */
+ if (__predict_false(vm_page_unwire_noq(page))) {
+ pmap_qremove(va, 1);
+ kva_free(va, PAGE_SIZE);
+ vm_page_free(page);
+ }
+}
diff --git a/sys/dev/gve/gve_rx.c b/sys/dev/gve/gve_rx.c
index 9be96cf1ee3a..de64375ac4f3 100644
--- a/sys/dev/gve/gve_rx.c
+++ b/sys/dev/gve/gve_rx.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2023 Google LLC
+ * Copyright (c) 2023-2024 Google LLC
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -30,16 +30,14 @@
*/
#include "gve.h"
#include "gve_adminq.h"
+#include "gve_dqo.h"
static void
-gve_rx_free_ring(struct gve_priv *priv, int i)
+gve_rx_free_ring_gqi(struct gve_priv *priv, int i)
{
struct gve_rx_ring *rx = &priv->rx[i];
struct gve_ring_com *com = &rx->com;
- /* Safe to call even if never allocated */
- gve_free_counters((counter_u64_t *)&rx->stats, NUM_RX_STATS);
-
if (rx->page_info != NULL) {
free(rx->page_info, M_GVE);
rx->page_info = NULL;
@@ -55,6 +53,26 @@ gve_rx_free_ring(struct gve_priv *priv, int i)
rx->desc_ring = NULL;
}
+ if (com->qpl != NULL) {
+ gve_free_qpl(priv, com->qpl);
+ com->qpl = NULL;
+ }
+}
+
+static void
+gve_rx_free_ring(struct gve_priv *priv, int i)
+{
+ struct gve_rx_ring *rx = &priv->rx[i];
+ struct gve_ring_com *com = &rx->com;
+
+ /* Safe to call even if never allocated */
+ gve_free_counters((counter_u64_t *)&rx->stats, NUM_RX_STATS);
+
+ if (gve_is_gqi(priv))
+ gve_rx_free_ring_gqi(priv, i);
+ else
+ gve_rx_free_ring_dqo(priv, i);
+
if (com->q_resources != NULL) {
gve_dma_free_coherent(&com->q_resources_mem);
com->q_resources = NULL;
@@ -83,55 +101,82 @@ gve_prefill_rx_slots(struct gve_rx_ring *rx)
}
static int
-gve_rx_alloc_ring(struct gve_priv *priv, int i)
+gve_rx_alloc_ring_gqi(struct gve_priv *priv, int i)
{
struct gve_rx_ring *rx = &priv->rx[i];
struct gve_ring_com *com = &rx->com;
int err;
- com->priv = priv;
- com->id = i;
+ err = gve_dma_alloc_coherent(priv,
+ sizeof(struct gve_rx_desc) * priv->rx_desc_cnt,
+ CACHE_LINE_SIZE, &rx->desc_ring_mem);
+ if (err != 0) {
+ device_printf(priv->dev,
+ "Failed to alloc desc ring for rx ring %d", i);
+ goto abort;
+ }
rx->mask = priv->rx_pages_per_qpl - 1;
+ rx->desc_ring = rx->desc_ring_mem.cpu_addr;
- com->qpl = &priv->qpls[priv->tx_cfg.max_queues + i];
+ com->qpl = gve_alloc_qpl(priv, i + priv->tx_cfg.max_queues,
+ priv->rx_desc_cnt, /*single_kva=*/false);
if (com->qpl == NULL) {
- device_printf(priv->dev, "No QPL left for rx ring %d", i);
- return (ENOMEM);
+ device_printf(priv->dev,
+ "Failed to alloc QPL for rx ring %d", i);
+ err = ENOMEM;
+ goto abort;
}
- rx->page_info = malloc(priv->rx_desc_cnt * sizeof(*rx->page_info), M_GVE,
- M_WAITOK | M_ZERO);
+ rx->page_info = malloc(priv->rx_desc_cnt * sizeof(*rx->page_info),
+ M_GVE, M_WAITOK | M_ZERO);
+
+ err = gve_dma_alloc_coherent(priv,
+ sizeof(union gve_rx_data_slot) * priv->rx_desc_cnt,
+ CACHE_LINE_SIZE, &rx->data_ring_mem);
+ if (err != 0) {
+ device_printf(priv->dev,
+ "Failed to alloc data ring for rx ring %d", i);
+ goto abort;
+ }
+ rx->data_ring = rx->data_ring_mem.cpu_addr;
+
+ gve_prefill_rx_slots(rx);
+ return (0);
+
+abort:
+ gve_rx_free_ring_gqi(priv, i);
+ return (err);
+}
+
+static int
+gve_rx_alloc_ring(struct gve_priv *priv, int i)
+{
+ struct gve_rx_ring *rx = &priv->rx[i];
+ struct gve_ring_com *com = &rx->com;
+ int err;
+
+ com->priv = priv;
+ com->id = i;
gve_alloc_counters((counter_u64_t *)&rx->stats, NUM_RX_STATS);
err = gve_dma_alloc_coherent(priv, sizeof(struct gve_queue_resources),
PAGE_SIZE, &com->q_resources_mem);
if (err != 0) {
- device_printf(priv->dev, "Failed to alloc queue resources for rx ring %d", i);
+ device_printf(priv->dev,
+ "Failed to alloc queue resources for rx ring %d", i);
goto abort;
}
com->q_resources = com->q_resources_mem.cpu_addr;
- err = gve_dma_alloc_coherent(priv,
- sizeof(struct gve_rx_desc) * priv->rx_desc_cnt,
- CACHE_LINE_SIZE, &rx->desc_ring_mem);
- if (err != 0) {
- device_printf(priv->dev, "Failed to alloc desc ring for rx ring %d", i);
- goto abort;
- }
- rx->desc_ring = rx->desc_ring_mem.cpu_addr;
-
- err = gve_dma_alloc_coherent(priv,
- sizeof(union gve_rx_data_slot) * priv->rx_desc_cnt,
- CACHE_LINE_SIZE, &rx->data_ring_mem);
- if (err != 0) {
- device_printf(priv->dev, "Failed to alloc data ring for rx ring %d", i);
+ if (gve_is_gqi(priv))
+ err = gve_rx_alloc_ring_gqi(priv, i);
+ else
+ err = gve_rx_alloc_ring_dqo(priv, i);
+ if (err != 0)
goto abort;
- }
- rx->data_ring = rx->data_ring_mem.cpu_addr;
- gve_prefill_rx_slots(rx);
return (0);
abort:
@@ -140,38 +185,32 @@ abort:
}
int
-gve_alloc_rx_rings(struct gve_priv *priv)
+gve_alloc_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx)
{
- int err = 0;
int i;
+ int err;
- priv->rx = malloc(sizeof(struct gve_rx_ring) * priv->rx_cfg.num_queues,
- M_GVE, M_WAITOK | M_ZERO);
+ KASSERT(priv->rx != NULL, ("priv->rx is NULL!"));
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
+ for (i = start_idx; i < stop_idx; i++) {
err = gve_rx_alloc_ring(priv, i);
if (err != 0)
goto free_rings;
}
return (0);
-
free_rings:
- while (i--)
- gve_rx_free_ring(priv, i);
- free(priv->rx, M_GVE);
+ gve_free_rx_rings(priv, start_idx, i);
return (err);
}
void
-gve_free_rx_rings(struct gve_priv *priv)
+gve_free_rx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx)
{
int i;
- for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ for (i = start_idx; i < stop_idx; i++)
gve_rx_free_ring(priv, i);
-
- free(priv->rx, M_GVE);
}
static void
@@ -217,6 +256,11 @@ gve_clear_rx_ring(struct gve_priv *priv, int i)
{
struct gve_rx_ring *rx = &priv->rx[i];
+ if (!gve_is_gqi(priv)) {
+ gve_clear_rx_ring_dqo(priv, i);
+ return;
+ }
+
rx->seq_no = 1;
rx->cnt = 0;
rx->fill_cnt = 0;
@@ -238,14 +282,21 @@ gve_start_rx_ring(struct gve_priv *priv, int i)
rx->lro.ifp = priv->ifp;
}
- NET_TASK_INIT(&com->cleanup_task, 0, gve_rx_cleanup_tq, rx);
+ if (gve_is_gqi(priv))
+ NET_TASK_INIT(&com->cleanup_task, 0, gve_rx_cleanup_tq, rx);
+ else
+ NET_TASK_INIT(&com->cleanup_task, 0, gve_rx_cleanup_tq_dqo, rx);
com->cleanup_tq = taskqueue_create_fast("gve rx", M_WAITOK,
taskqueue_thread_enqueue, &com->cleanup_tq);
taskqueue_start_threads(&com->cleanup_tq, 1, PI_NET,
"%s rxq %d", device_get_nameunit(priv->dev), i);
- gve_db_bar_write_4(priv, com->db_offset, rx->fill_cnt);
+ if (gve_is_gqi(priv)) {
+ /* GQ RX bufs are prefilled at ring alloc time */
+ gve_db_bar_write_4(priv, com->db_offset, rx->fill_cnt);
+ } else
+ gve_rx_prefill_buffers_dqo(rx);
}
int
@@ -362,24 +413,6 @@ gve_set_rss_type(__be16 flag, struct mbuf *mbuf)
}
static void
-gve_mextadd_free(struct mbuf *mbuf)
-{
- vm_page_t page = (vm_page_t)mbuf->m_ext.ext_arg1;
- vm_offset_t va = (vm_offset_t)mbuf->m_ext.ext_arg2;
-
- /*
- * Free the page only if this is the last ref.
- * The interface might no longer exist by the time
- * this callback is called, see gve_free_qpl.
- */
- if (__predict_false(vm_page_unwire_noq(page))) {
- pmap_qremove(va, 1);
- kva_free(va, PAGE_SIZE);
- vm_page_free(page);
- }
-}
-
-static void
gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
{
const __be64 offset = htobe64(GVE_DEFAULT_RX_BUFFER_OFFSET);
@@ -676,7 +709,7 @@ gve_rx_cleanup_tq(void *arg, int pending)
* interrupt but they will still be handled by the enqueue below.
* Fragments received after the barrier WILL trigger an interrupt.
*/
- mb();
+ atomic_thread_fence_seq_cst();
if (gve_rx_work_pending(rx)) {
gve_db_bar_write_4(priv, rx->com.irq_db_offset, GVE_IRQ_MASK);
diff --git a/sys/dev/gve/gve_rx_dqo.c b/sys/dev/gve/gve_rx_dqo.c
new file mode 100644
index 000000000000..cf914913da09
--- /dev/null
+++ b/sys/dev/gve/gve_rx_dqo.c
@@ -0,0 +1,1035 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2024 Google LLC
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "gve.h"
+#include "gve_adminq.h"
+#include "gve_dqo.h"
+
+static void
+gve_free_rx_mbufs_dqo(struct gve_rx_ring *rx)
+{
+ struct gve_rx_buf_dqo *buf;
+ int i;
+
+ if (gve_is_qpl(rx->com.priv))
+ return;
+
+ for (i = 0; i < rx->dqo.buf_cnt; i++) {
+ buf = &rx->dqo.bufs[i];
+ if (!buf->mbuf)
+ continue;
+
+ bus_dmamap_sync(rx->dqo.buf_dmatag, buf->dmamap,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(rx->dqo.buf_dmatag, buf->dmamap);
+ m_freem(buf->mbuf);
+ buf->mbuf = NULL;
+ }
+}
+
+void
+gve_rx_free_ring_dqo(struct gve_priv *priv, int i)
+{
+ struct gve_rx_ring *rx = &priv->rx[i];
+ struct gve_ring_com *com = &rx->com;
+ int j;
+
+ if (rx->dqo.compl_ring != NULL) {
+ gve_dma_free_coherent(&rx->dqo.compl_ring_mem);
+ rx->dqo.compl_ring = NULL;
+ }
+
+ if (rx->dqo.desc_ring != NULL) {
+ gve_dma_free_coherent(&rx->desc_ring_mem);
+ rx->dqo.desc_ring = NULL;
+ }
+
+ if (rx->dqo.bufs != NULL) {
+ gve_free_rx_mbufs_dqo(rx);
+
+ if (!gve_is_qpl(priv) && rx->dqo.buf_dmatag) {
+ for (j = 0; j < rx->dqo.buf_cnt; j++)
+ if (rx->dqo.bufs[j].mapped)
+ bus_dmamap_destroy(rx->dqo.buf_dmatag,
+ rx->dqo.bufs[j].dmamap);
+ }
+
+ free(rx->dqo.bufs, M_GVE);
+ rx->dqo.bufs = NULL;
+ }
+
+ if (!gve_is_qpl(priv) && rx->dqo.buf_dmatag)
+ bus_dma_tag_destroy(rx->dqo.buf_dmatag);
+
+ if (com->qpl != NULL) {
+ gve_free_qpl(priv, com->qpl);
+ com->qpl = NULL;
+ }
+}
+
+int
+gve_rx_alloc_ring_dqo(struct gve_priv *priv, int i)
+{
+ struct gve_rx_ring *rx = &priv->rx[i];
+ int err;
+ int j;
+
+ err = gve_dma_alloc_coherent(priv,
+ sizeof(struct gve_rx_desc_dqo) * priv->rx_desc_cnt,
+ CACHE_LINE_SIZE, &rx->desc_ring_mem);
+ if (err != 0) {
+ device_printf(priv->dev,
+ "Failed to alloc desc ring for rx ring %d", i);
+ goto abort;
+ }
+ rx->dqo.desc_ring = rx->desc_ring_mem.cpu_addr;
+ rx->dqo.mask = priv->rx_desc_cnt - 1;
+
+ err = gve_dma_alloc_coherent(priv,
+ sizeof(struct gve_rx_compl_desc_dqo) * priv->rx_desc_cnt,
+ CACHE_LINE_SIZE, &rx->dqo.compl_ring_mem);
+ if (err != 0) {
+ device_printf(priv->dev,
+ "Failed to alloc compl ring for rx ring %d", i);
+ goto abort;
+ }
+ rx->dqo.compl_ring = rx->dqo.compl_ring_mem.cpu_addr;
+ rx->dqo.mask = priv->rx_desc_cnt - 1;
+
+ rx->dqo.buf_cnt = gve_is_qpl(priv) ? GVE_RX_NUM_QPL_PAGES_DQO :
+ priv->rx_desc_cnt;
+ rx->dqo.bufs = malloc(rx->dqo.buf_cnt * sizeof(struct gve_rx_buf_dqo),
+ M_GVE, M_WAITOK | M_ZERO);
+
+ if (gve_is_qpl(priv)) {
+ rx->com.qpl = gve_alloc_qpl(priv, i + priv->tx_cfg.max_queues,
+ GVE_RX_NUM_QPL_PAGES_DQO, /*single_kva=*/false);
+ if (rx->com.qpl == NULL) {
+ device_printf(priv->dev,
+ "Failed to alloc QPL for rx ring %d", i);
+ err = ENOMEM;
+ goto abort;
+ }
+ return (0);
+ }
+
+ bus_size_t max_seg_size = gve_rx_dqo_mbuf_segment_size(priv);
+
+ err = bus_dma_tag_create(
+ bus_get_dma_tag(priv->dev), /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ max_seg_size, /* maxsize */
+ 1, /* nsegments */
+ max_seg_size, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockarg */
+ &rx->dqo.buf_dmatag);
+ if (err != 0) {
+ device_printf(priv->dev,
+ "%s: bus_dma_tag_create failed: %d\n",
+ __func__, err);
+ goto abort;
+ }
+
+ for (j = 0; j < rx->dqo.buf_cnt; j++) {
+ err = bus_dmamap_create(rx->dqo.buf_dmatag, 0,
+ &rx->dqo.bufs[j].dmamap);
+ if (err != 0) {
+ device_printf(priv->dev,
+ "err in creating rx buf dmamap %d: %d",
+ j, err);
+ goto abort;
+ }
+ rx->dqo.bufs[j].mapped = true;
+ }
+
+ return (0);
+
+abort:
+ gve_rx_free_ring_dqo(priv, i);
+ return (err);
+}
+
+static void
+gve_rx_clear_desc_ring_dqo(struct gve_rx_ring *rx)
+{
+ struct gve_ring_com *com = &rx->com;
+ int entries;
+ int i;
+
+ entries = com->priv->rx_desc_cnt;
+ for (i = 0; i < entries; i++)
+ rx->dqo.desc_ring[i] = (struct gve_rx_desc_dqo){};
+
+ bus_dmamap_sync(rx->desc_ring_mem.tag, rx->desc_ring_mem.map,
+ BUS_DMASYNC_PREWRITE);
+}
+
+static void
+gve_rx_clear_compl_ring_dqo(struct gve_rx_ring *rx)
+{
+ struct gve_ring_com *com = &rx->com;
+ int i;
+
+ for (i = 0; i < com->priv->rx_desc_cnt; i++)
+ rx->dqo.compl_ring[i] = (struct gve_rx_compl_desc_dqo){};
+
+ bus_dmamap_sync(rx->dqo.compl_ring_mem.tag, rx->dqo.compl_ring_mem.map,
+ BUS_DMASYNC_PREWRITE);
+}
+
+void
+gve_clear_rx_ring_dqo(struct gve_priv *priv, int i)
+{
+ struct gve_rx_ring *rx = &priv->rx[i];
+ int j;
+
+ rx->fill_cnt = 0;
+ rx->cnt = 0;
+ rx->dqo.mask = priv->rx_desc_cnt - 1;
+ rx->dqo.head = 0;
+ rx->dqo.tail = 0;
+ rx->dqo.cur_gen_bit = 0;
+
+ gve_rx_clear_desc_ring_dqo(rx);
+ gve_rx_clear_compl_ring_dqo(rx);
+
+ gve_free_rx_mbufs_dqo(rx);
+
+ if (gve_is_qpl(priv)) {
+ SLIST_INIT(&rx->dqo.free_bufs);
+ STAILQ_INIT(&rx->dqo.used_bufs);
+
+ for (j = 0; j < rx->dqo.buf_cnt; j++) {
+ struct gve_rx_buf_dqo *buf = &rx->dqo.bufs[j];
+
+ vm_page_t page = rx->com.qpl->pages[buf - rx->dqo.bufs];
+ u_int ref_count = atomic_load_int(&page->ref_count);
+
+ /*
+ * An ifconfig down+up might see pages still in flight
+ * from the previous innings.
+ */
+ if (VPRC_WIRE_COUNT(ref_count) == 1)
+ SLIST_INSERT_HEAD(&rx->dqo.free_bufs,
+ buf, slist_entry);
+ else
+ STAILQ_INSERT_TAIL(&rx->dqo.used_bufs,
+ buf, stailq_entry);
+
+ buf->num_nic_frags = 0;
+ buf->next_idx = 0;
+ }
+ } else {
+ SLIST_INIT(&rx->dqo.free_bufs);
+ for (j = 0; j < rx->dqo.buf_cnt; j++)
+ SLIST_INSERT_HEAD(&rx->dqo.free_bufs,
+ &rx->dqo.bufs[j], slist_entry);
+ }
+}
+
+int
+gve_rx_intr_dqo(void *arg)
+{
+ struct gve_rx_ring *rx = arg;
+ struct gve_priv *priv = rx->com.priv;
+ struct gve_ring_com *com = &rx->com;
+
+ if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0))
+ return (FILTER_STRAY);
+
+ /* Interrupts are automatically masked */
+ taskqueue_enqueue(com->cleanup_tq, &com->cleanup_task);
+ return (FILTER_HANDLED);
+}
+
+static void
+gve_rx_advance_head_dqo(struct gve_rx_ring *rx)
+{
+ rx->dqo.head = (rx->dqo.head + 1) & rx->dqo.mask;
+ rx->fill_cnt++; /* rx->fill_cnt is just a sysctl counter */
+
+ if ((rx->dqo.head & (GVE_RX_BUF_THRESH_DQO - 1)) == 0) {
+ bus_dmamap_sync(rx->desc_ring_mem.tag, rx->desc_ring_mem.map,
+ BUS_DMASYNC_PREWRITE);
+ gve_db_bar_dqo_write_4(rx->com.priv, rx->com.db_offset,
+ rx->dqo.head);
+ }
+}
+
+static void
+gve_rx_post_buf_dqo(struct gve_rx_ring *rx, struct gve_rx_buf_dqo *buf)
+{
+ struct gve_rx_desc_dqo *desc;
+
+ bus_dmamap_sync(rx->dqo.buf_dmatag, buf->dmamap,
+ BUS_DMASYNC_PREREAD);
+
+ desc = &rx->dqo.desc_ring[rx->dqo.head];
+ desc->buf_id = htole16(buf - rx->dqo.bufs);
+ desc->buf_addr = htole64(buf->addr);
+
+ gve_rx_advance_head_dqo(rx);
+}
+
+static int
+gve_rx_post_new_mbuf_dqo(struct gve_rx_ring *rx, int how)
+{
+ struct gve_rx_buf_dqo *buf;
+ bus_dma_segment_t segs[1];
+ int nsegs;
+ int err;
+
+ buf = SLIST_FIRST(&rx->dqo.free_bufs);
+ if (__predict_false(!buf)) {
+ device_printf(rx->com.priv->dev,
+ "Unexpected empty free bufs list\n");
+ return (ENOBUFS);
+ }
+ SLIST_REMOVE_HEAD(&rx->dqo.free_bufs, slist_entry);
+
+ bus_size_t segment_size = gve_rx_dqo_mbuf_segment_size(rx->com.priv);
+ buf->mbuf = m_getjcl(how, MT_DATA, M_PKTHDR, segment_size);
+ if (__predict_false(!buf->mbuf)) {
+ err = ENOMEM;
+ counter_enter();
+ counter_u64_add_protected(rx->stats.rx_mbuf_mclget_null, 1);
+ counter_exit();
+ goto abort_with_buf;
+ }
+ buf->mbuf->m_len = segment_size;
+
+ err = bus_dmamap_load_mbuf_sg(rx->dqo.buf_dmatag, buf->dmamap,
+ buf->mbuf, segs, &nsegs, BUS_DMA_NOWAIT);
+ KASSERT(nsegs == 1, ("dma segs for a cluster mbuf is not 1"));
+ if (__predict_false(err != 0)) {
+ counter_enter();
+ counter_u64_add_protected(rx->stats.rx_mbuf_dmamap_err, 1);
+ counter_exit();
+ goto abort_with_mbuf;
+ }
+ buf->addr = segs[0].ds_addr;
+
+ gve_rx_post_buf_dqo(rx, buf);
+ return (0);
+
+abort_with_mbuf:
+ m_freem(buf->mbuf);
+ buf->mbuf = NULL;
+abort_with_buf:
+ SLIST_INSERT_HEAD(&rx->dqo.free_bufs, buf, slist_entry);
+ return (err);
+}
+
+static struct gve_dma_handle *
+gve_get_page_dma_handle(struct gve_rx_ring *rx, struct gve_rx_buf_dqo *buf)
+{
+ return (&(rx->com.qpl->dmas[buf - rx->dqo.bufs]));
+}
+
+static void
+gve_rx_post_qpl_buf_dqo(struct gve_rx_ring *rx, struct gve_rx_buf_dqo *buf,
+ uint8_t frag_num)
+{
+ struct gve_rx_desc_dqo *desc = &rx->dqo.desc_ring[rx->dqo.head];
+ union gve_rx_qpl_buf_id_dqo composed_id;
+ struct gve_dma_handle *page_dma_handle;
+
+ composed_id.buf_id = buf - rx->dqo.bufs;
+ composed_id.frag_num = frag_num;
+ desc->buf_id = htole16(composed_id.all);
+
+ page_dma_handle = gve_get_page_dma_handle(rx, buf);
+ bus_dmamap_sync(page_dma_handle->tag, page_dma_handle->map,
+ BUS_DMASYNC_PREREAD);
+ desc->buf_addr = htole64(page_dma_handle->bus_addr +
+ frag_num * rx->com.priv->rx_buf_size_dqo);
+
+ buf->num_nic_frags++;
+ gve_rx_advance_head_dqo(rx);
+}
+
+static void
+gve_rx_maybe_extract_from_used_bufs(struct gve_rx_ring *rx, bool just_one)
+{
+ struct gve_rx_buf_dqo *hol_blocker = NULL;
+ struct gve_rx_buf_dqo *buf;
+ u_int ref_count;
+ vm_page_t page;
+
+ while (true) {
+ buf = STAILQ_FIRST(&rx->dqo.used_bufs);
+ if (__predict_false(buf == NULL))
+ break;
+
+ page = rx->com.qpl->pages[buf - rx->dqo.bufs];
+ ref_count = atomic_load_int(&page->ref_count);
+
+ if (VPRC_WIRE_COUNT(ref_count) != 1) {
+ /* Account for one head-of-line blocker */
+ if (hol_blocker != NULL)
+ break;
+ hol_blocker = buf;
+ STAILQ_REMOVE_HEAD(&rx->dqo.used_bufs,
+ stailq_entry);
+ continue;
+ }
+
+ STAILQ_REMOVE_HEAD(&rx->dqo.used_bufs,
+ stailq_entry);
+ SLIST_INSERT_HEAD(&rx->dqo.free_bufs,
+ buf, slist_entry);
+ if (just_one)
+ break;
+ }
+
+ if (hol_blocker != NULL)
+ STAILQ_INSERT_HEAD(&rx->dqo.used_bufs,
+ hol_blocker, stailq_entry);
+}
+
+static int
+gve_rx_post_new_dqo_qpl_buf(struct gve_rx_ring *rx)
+{
+ struct gve_rx_buf_dqo *buf;
+
+ buf = SLIST_FIRST(&rx->dqo.free_bufs);
+ if (__predict_false(buf == NULL)) {
+ gve_rx_maybe_extract_from_used_bufs(rx, /*just_one=*/true);
+ buf = SLIST_FIRST(&rx->dqo.free_bufs);
+ if (__predict_false(buf == NULL))
+ return (ENOBUFS);
+ }
+
+ gve_rx_post_qpl_buf_dqo(rx, buf, buf->next_idx);
+ if (buf->next_idx == gve_get_dq_num_frags_in_page(rx->com.priv) - 1)
+ buf->next_idx = 0;
+ else
+ buf->next_idx++;
+
+ /*
+ * We have posted all the frags in this buf to the NIC.
+ * - buf will enter used_bufs once the last completion arrives.
+ * - It will renter free_bufs in gve_rx_maybe_extract_from_used_bufs
+ * when its wire count drops back to 1.
+ */
+ if (buf->next_idx == 0)
+ SLIST_REMOVE_HEAD(&rx->dqo.free_bufs, slist_entry);
+ return (0);
+}
+
+static void
+gve_rx_post_buffers_dqo(struct gve_rx_ring *rx, int how)
+{
+ uint32_t num_pending_bufs;
+ uint32_t num_to_post;
+ uint32_t i;
+ int err;
+
+ num_pending_bufs = (rx->dqo.head - rx->dqo.tail) & rx->dqo.mask;
+ num_to_post = rx->dqo.mask - num_pending_bufs;
+
+ for (i = 0; i < num_to_post; i++) {
+ if (gve_is_qpl(rx->com.priv))
+ err = gve_rx_post_new_dqo_qpl_buf(rx);
+ else
+ err = gve_rx_post_new_mbuf_dqo(rx, how);
+ if (err)
+ break;
+ }
+}
+
+void
+gve_rx_prefill_buffers_dqo(struct gve_rx_ring *rx)
+{
+ gve_rx_post_buffers_dqo(rx, M_WAITOK);
+}
+
+static void
+gve_rx_set_hashtype_dqo(struct mbuf *mbuf, struct gve_ptype *ptype, bool *is_tcp)
+{
+ switch (ptype->l3_type) {
+ case GVE_L3_TYPE_IPV4:
+ switch (ptype->l4_type) {
+ case GVE_L4_TYPE_TCP:
+ *is_tcp = true;
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV4);
+ break;
+ case GVE_L4_TYPE_UDP:
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV4);
+ break;
+ default:
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV4);
+ }
+ break;
+ case GVE_L3_TYPE_IPV6:
+ switch (ptype->l4_type) {
+ case GVE_L4_TYPE_TCP:
+ *is_tcp = true;
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_TCP_IPV6);
+ break;
+ case GVE_L4_TYPE_UDP:
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_UDP_IPV6);
+ break;
+ default:
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_RSS_IPV6);
+ }
+ break;
+ default:
+ M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE_HASH);
+ }
+}
+
+static void
+gve_rx_set_csum_flags_dqo(struct mbuf *mbuf,
+ struct gve_rx_compl_desc_dqo *desc,
+ struct gve_ptype *ptype)
+{
+ /* HW did not identify and process L3 and L4 headers. */
+ if (__predict_false(!desc->l3_l4_processed))
+ return;
+
+ if (ptype->l3_type == GVE_L3_TYPE_IPV4) {
+ if (__predict_false(desc->csum_ip_err ||
+ desc->csum_external_ip_err))
+ return;
+ } else if (ptype->l3_type == GVE_L3_TYPE_IPV6) {
+ /* Checksum should be skipped if this flag is set. */
+ if (__predict_false(desc->ipv6_ex_add))
+ return;
+ }
+
+ if (__predict_false(desc->csum_l4_err))
+ return;
+
+ switch (ptype->l4_type) {
+ case GVE_L4_TYPE_TCP:
+ case GVE_L4_TYPE_UDP:
+ case GVE_L4_TYPE_ICMP:
+ case GVE_L4_TYPE_SCTP:
+ mbuf->m_pkthdr.csum_flags = CSUM_IP_CHECKED |
+ CSUM_IP_VALID |
+ CSUM_DATA_VALID |
+ CSUM_PSEUDO_HDR;
+ mbuf->m_pkthdr.csum_data = 0xffff;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+gve_rx_input_mbuf_dqo(struct gve_rx_ring *rx,
+ struct gve_rx_compl_desc_dqo *compl_desc)
+{
+ struct mbuf *mbuf = rx->ctx.mbuf_head;
+ if_t ifp = rx->com.priv->ifp;
+ struct gve_ptype *ptype;
+ bool do_if_input = true;
+ bool is_tcp = false;
+
+ ptype = &rx->com.priv->ptype_lut_dqo->ptypes[compl_desc->packet_type];
+ gve_rx_set_hashtype_dqo(mbuf, ptype, &is_tcp);
+ mbuf->m_pkthdr.flowid = le32toh(compl_desc->hash);
+ gve_rx_set_csum_flags_dqo(mbuf, compl_desc, ptype);
+
+ mbuf->m_pkthdr.rcvif = ifp;
+ mbuf->m_pkthdr.len = rx->ctx.total_size;
+
+ if (((if_getcapenable(rx->com.priv->ifp) & IFCAP_LRO) != 0) &&
+ is_tcp &&
+ (rx->lro.lro_cnt != 0) &&
+ (tcp_lro_rx(&rx->lro, mbuf, 0) == 0))
+ do_if_input = false;
+
+ if (do_if_input)
+ if_input(ifp, mbuf);
+
+ counter_enter();
+ counter_u64_add_protected(rx->stats.rbytes, rx->ctx.total_size);
+ counter_u64_add_protected(rx->stats.rpackets, 1);
+ counter_exit();
+
+ rx->ctx = (struct gve_rx_ctx){};
+}
+
+static int
+gve_rx_copybreak_dqo(struct gve_rx_ring *rx, void *va,
+ struct gve_rx_compl_desc_dqo *compl_desc, uint16_t frag_len)
+{
+ struct mbuf *mbuf;
+
+ mbuf = m_get2(frag_len, M_NOWAIT, MT_DATA, M_PKTHDR);
+ if (__predict_false(mbuf == NULL))
+ return (ENOMEM);
+
+ counter_enter();
+ counter_u64_add_protected(rx->stats.rx_copybreak_cnt, 1);
+ counter_exit();
+
+ m_copyback(mbuf, 0, frag_len, va);
+ mbuf->m_len = frag_len;
+
+ rx->ctx.mbuf_head = mbuf;
+ rx->ctx.mbuf_tail = mbuf;
+ rx->ctx.total_size += frag_len;
+
+ gve_rx_input_mbuf_dqo(rx, compl_desc);
+ return (0);
+}
+
+static void
+gve_rx_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_compl_desc_dqo *compl_desc,
+ int *work_done)
+{
+ bool is_last_frag = compl_desc->end_of_packet != 0;
+ struct gve_rx_ctx *ctx = &rx->ctx;
+ struct gve_rx_buf_dqo *buf;
+ uint32_t num_pending_bufs;
+ uint16_t frag_len;
+ uint16_t buf_id;
+ int err;
+
+ buf_id = le16toh(compl_desc->buf_id);
+ if (__predict_false(buf_id >= rx->dqo.buf_cnt)) {
+ device_printf(priv->dev, "Invalid rx buf id %d on rxq %d, issuing reset\n",
+ buf_id, rx->com.id);
+ gve_schedule_reset(priv);
+ goto drop_frag_clear_ctx;
+ }
+ buf = &rx->dqo.bufs[buf_id];
+ if (__predict_false(buf->mbuf == NULL)) {
+ device_printf(priv->dev, "Spurious completion for buf id %d on rxq %d, issuing reset\n",
+ buf_id, rx->com.id);
+ gve_schedule_reset(priv);
+ goto drop_frag_clear_ctx;
+ }
+
+ if (__predict_false(ctx->drop_pkt))
+ goto drop_frag;
+
+ if (__predict_false(compl_desc->rx_error)) {
+ counter_enter();
+ counter_u64_add_protected(rx->stats.rx_dropped_pkt_desc_err, 1);
+ counter_exit();
+ goto drop_frag;
+ }
+
+ bus_dmamap_sync(rx->dqo.buf_dmatag, buf->dmamap,
+ BUS_DMASYNC_POSTREAD);
+
+ frag_len = compl_desc->packet_len;
+ if (frag_len <= priv->rx_copybreak && !ctx->mbuf_head && is_last_frag) {
+ err = gve_rx_copybreak_dqo(rx, mtod(buf->mbuf, char*),
+ compl_desc, frag_len);
+ if (__predict_false(err != 0))
+ goto drop_frag;
+ (*work_done)++;
+ gve_rx_post_buf_dqo(rx, buf);
+ return;
+ }
+
+ /*
+ * Although buffer completions may arrive out of order, buffer
+ * descriptors are consumed by the NIC in order. That is, the
+ * buffer at desc_ring[tail] might not be the buffer we got the
+ * completion compl_ring[tail] for: but we know that desc_ring[tail]
+ * has already been read by the NIC.
+ */
+ num_pending_bufs = (rx->dqo.head - rx->dqo.tail) & rx->dqo.mask;
+
+ /*
+ * For every fragment received, try to post a new buffer.
+ *
+ * Failures are okay but only so long as the number of outstanding
+ * buffers is above a threshold.
+ *
+ * Beyond that we drop new packets to reuse their buffers.
+ * Without ensuring a minimum number of buffers for the NIC to
+ * put packets in, we run the risk of getting the queue stuck
+ * for good.
+ */
+ err = gve_rx_post_new_mbuf_dqo(rx, M_NOWAIT);
+ if (__predict_false(err != 0 &&
+ num_pending_bufs <= GVE_RX_DQO_MIN_PENDING_BUFS)) {
+ counter_enter();
+ counter_u64_add_protected(
+ rx->stats.rx_dropped_pkt_mbuf_alloc_fail, 1);
+ counter_exit();
+ goto drop_frag;
+ }
+
+ buf->mbuf->m_len = frag_len;
+ ctx->total_size += frag_len;
+ if (ctx->mbuf_tail == NULL) {
+ ctx->mbuf_head = buf->mbuf;
+ ctx->mbuf_tail = buf->mbuf;
+ } else {
+ buf->mbuf->m_flags &= ~M_PKTHDR;
+ ctx->mbuf_tail->m_next = buf->mbuf;
+ ctx->mbuf_tail = buf->mbuf;
+ }
+
+ /*
+ * Disassociate the mbuf from buf and surrender buf to the free list to
+ * be used by a future mbuf.
+ */
+ bus_dmamap_unload(rx->dqo.buf_dmatag, buf->dmamap);
+ buf->mbuf = NULL;
+ buf->addr = 0;
+ SLIST_INSERT_HEAD(&rx->dqo.free_bufs, buf, slist_entry);
+
+ if (is_last_frag) {
+ gve_rx_input_mbuf_dqo(rx, compl_desc);
+ (*work_done)++;
+ }
+ return;
+
+drop_frag:
+ /* Clear the earlier frags if there were any */
+ m_freem(ctx->mbuf_head);
+ rx->ctx = (struct gve_rx_ctx){};
+ /* Drop the rest of the pkt if there are more frags */
+ ctx->drop_pkt = true;
+ /* Reuse the dropped frag's buffer */
+ gve_rx_post_buf_dqo(rx, buf);
+
+ if (is_last_frag)
+ goto drop_frag_clear_ctx;
+ return;
+
+drop_frag_clear_ctx:
+ counter_enter();
+ counter_u64_add_protected(rx->stats.rx_dropped_pkt, 1);
+ counter_exit();
+ m_freem(ctx->mbuf_head);
+ rx->ctx = (struct gve_rx_ctx){};
+}
+
+static void *
+gve_get_cpu_addr_for_qpl_buf(struct gve_rx_ring *rx,
+ struct gve_rx_buf_dqo *buf, uint8_t buf_frag_num)
+{
+ int page_idx = buf - rx->dqo.bufs;
+ void *va = rx->com.qpl->dmas[page_idx].cpu_addr;
+
+ va = (char *)va + (buf_frag_num * rx->com.priv->rx_buf_size_dqo);
+ return (va);
+}
+
+static int
+gve_rx_add_clmbuf_to_ctx(struct gve_rx_ring *rx,
+ struct gve_rx_ctx *ctx, struct gve_rx_buf_dqo *buf,
+ uint8_t buf_frag_num, uint16_t frag_len)
+{
+ void *va = gve_get_cpu_addr_for_qpl_buf(rx, buf, buf_frag_num);
+ struct mbuf *mbuf;
+ bus_size_t segment_size = gve_rx_dqo_mbuf_segment_size(rx->com.priv);
+
+ if (ctx->mbuf_tail == NULL) {
+ mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, segment_size);
+ if (mbuf == NULL)
+ return (ENOMEM);
+ ctx->mbuf_head = mbuf;
+ ctx->mbuf_tail = mbuf;
+ } else {
+ mbuf = m_getjcl(M_NOWAIT, MT_DATA, 0, segment_size);
+ if (mbuf == NULL)
+ return (ENOMEM);
+ ctx->mbuf_tail->m_next = mbuf;
+ ctx->mbuf_tail = mbuf;
+ }
+
+ mbuf->m_len = frag_len;
+ ctx->total_size += frag_len;
+
+ m_copyback(mbuf, 0, frag_len, va);
+ counter_enter();
+ counter_u64_add_protected(rx->stats.rx_frag_copy_cnt, 1);
+ counter_exit();
+ return (0);
+}
+
+static int
+gve_rx_add_extmbuf_to_ctx(struct gve_rx_ring *rx,
+ struct gve_rx_ctx *ctx, struct gve_rx_buf_dqo *buf,
+ uint8_t buf_frag_num, uint16_t frag_len)
+{
+ struct mbuf *mbuf;
+ void *page_addr;
+ vm_page_t page;
+ int page_idx;
+ void *va;
+
+ if (ctx->mbuf_tail == NULL) {
+ mbuf = m_gethdr(M_NOWAIT, MT_DATA);
+ if (mbuf == NULL)
+ return (ENOMEM);
+ ctx->mbuf_head = mbuf;
+ ctx->mbuf_tail = mbuf;
+ } else {
+ mbuf = m_get(M_NOWAIT, MT_DATA);
+ if (mbuf == NULL)
+ return (ENOMEM);
+ ctx->mbuf_tail->m_next = mbuf;
+ ctx->mbuf_tail = mbuf;
+ }
+
+ mbuf->m_len = frag_len;
+ ctx->total_size += frag_len;
+
+ page_idx = buf - rx->dqo.bufs;
+ page = rx->com.qpl->pages[page_idx];
+ page_addr = rx->com.qpl->dmas[page_idx].cpu_addr;
+ va = (char *)page_addr + (buf_frag_num * rx->com.priv->rx_buf_size_dqo);
+
+ /*
+ * Grab an extra ref to the page so that gve_mextadd_free
+ * does not end up freeing the page while the interface exists.
+ */
+ vm_page_wire(page);
+
+ counter_enter();
+ counter_u64_add_protected(rx->stats.rx_frag_flip_cnt, 1);
+ counter_exit();
+
+ MEXTADD(mbuf, va, frag_len,
+ gve_mextadd_free, page, page_addr,
+ 0, EXT_NET_DRV);
+ return (0);
+}
+
+static void
+gve_rx_dqo_qpl(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_compl_desc_dqo *compl_desc,
+ int *work_done)
+{
+ bool is_last_frag = compl_desc->end_of_packet != 0;
+ union gve_rx_qpl_buf_id_dqo composed_id;
+ struct gve_dma_handle *page_dma_handle;
+ struct gve_rx_ctx *ctx = &rx->ctx;
+ struct gve_rx_buf_dqo *buf;
+ uint32_t num_pending_bufs;
+ uint8_t buf_frag_num;
+ uint16_t frag_len;
+ uint16_t buf_id;
+ int err;
+
+ composed_id.all = le16toh(compl_desc->buf_id);
+ buf_id = composed_id.buf_id;
+ buf_frag_num = composed_id.frag_num;
+
+ if (__predict_false(buf_id >= rx->dqo.buf_cnt)) {
+ device_printf(priv->dev, "Invalid rx buf id %d on rxq %d, issuing reset\n",
+ buf_id, rx->com.id);
+ gve_schedule_reset(priv);
+ goto drop_frag_clear_ctx;
+ }
+ buf = &rx->dqo.bufs[buf_id];
+ if (__predict_false(buf->num_nic_frags == 0 ||
+ buf_frag_num > gve_get_dq_num_frags_in_page(priv) - 1)) {
+ device_printf(priv->dev, "Spurious compl for buf id %d on rxq %d "
+ "with buf_frag_num %d and num_nic_frags %d, issuing reset\n",
+ buf_id, rx->com.id, buf_frag_num, buf->num_nic_frags);
+ gve_schedule_reset(priv);
+ goto drop_frag_clear_ctx;
+ }
+
+ buf->num_nic_frags--;
+
+ if (__predict_false(ctx->drop_pkt))
+ goto drop_frag;
+
+ if (__predict_false(compl_desc->rx_error)) {
+ counter_enter();
+ counter_u64_add_protected(rx->stats.rx_dropped_pkt_desc_err, 1);
+ counter_exit();
+ goto drop_frag;
+ }
+
+ page_dma_handle = gve_get_page_dma_handle(rx, buf);
+ bus_dmamap_sync(page_dma_handle->tag, page_dma_handle->map,
+ BUS_DMASYNC_POSTREAD);
+
+ frag_len = compl_desc->packet_len;
+ if (frag_len <= priv->rx_copybreak && !ctx->mbuf_head && is_last_frag) {
+ void *va = gve_get_cpu_addr_for_qpl_buf(rx, buf, buf_frag_num);
+
+ err = gve_rx_copybreak_dqo(rx, va, compl_desc, frag_len);
+ if (__predict_false(err != 0))
+ goto drop_frag;
+ (*work_done)++;
+ gve_rx_post_qpl_buf_dqo(rx, buf, buf_frag_num);
+ return;
+ }
+
+ num_pending_bufs = (rx->dqo.head - rx->dqo.tail) & rx->dqo.mask;
+ err = gve_rx_post_new_dqo_qpl_buf(rx);
+ if (__predict_false(err != 0 &&
+ num_pending_bufs <= GVE_RX_DQO_MIN_PENDING_BUFS)) {
+ /*
+ * Resort to copying this fragment into a cluster mbuf
+ * when the above threshold is breached and repost the
+ * incoming buffer. If we cannot find cluster mbufs,
+ * just drop the packet (to repost its buffer).
+ */
+ err = gve_rx_add_clmbuf_to_ctx(rx, ctx, buf,
+ buf_frag_num, frag_len);
+ if (err != 0) {
+ counter_enter();
+ counter_u64_add_protected(
+ rx->stats.rx_dropped_pkt_buf_post_fail, 1);
+ counter_exit();
+ goto drop_frag;
+ }
+ gve_rx_post_qpl_buf_dqo(rx, buf, buf_frag_num);
+ } else {
+ err = gve_rx_add_extmbuf_to_ctx(rx, ctx, buf,
+ buf_frag_num, frag_len);
+ if (__predict_false(err != 0)) {
+ counter_enter();
+ counter_u64_add_protected(
+ rx->stats.rx_dropped_pkt_mbuf_alloc_fail, 1);
+ counter_exit();
+ goto drop_frag;
+ }
+ }
+
+ /*
+ * Both the counts need to be checked.
+ *
+ * num_nic_frags == 0 implies no pending completions
+ * but not all frags may have yet been posted.
+ *
+ * next_idx == 0 implies all frags have been posted
+ * but there might be pending completions.
+ */
+ if (buf->num_nic_frags == 0 && buf->next_idx == 0)
+ STAILQ_INSERT_TAIL(&rx->dqo.used_bufs, buf, stailq_entry);
+
+ if (is_last_frag) {
+ gve_rx_input_mbuf_dqo(rx, compl_desc);
+ (*work_done)++;
+ }
+ return;
+
+drop_frag:
+ /* Clear the earlier frags if there were any */
+ m_freem(ctx->mbuf_head);
+ rx->ctx = (struct gve_rx_ctx){};
+ /* Drop the rest of the pkt if there are more frags */
+ ctx->drop_pkt = true;
+ /* Reuse the dropped frag's buffer */
+ gve_rx_post_qpl_buf_dqo(rx, buf, buf_frag_num);
+
+ if (is_last_frag)
+ goto drop_frag_clear_ctx;
+ return;
+
+drop_frag_clear_ctx:
+ counter_enter();
+ counter_u64_add_protected(rx->stats.rx_dropped_pkt, 1);
+ counter_exit();
+ m_freem(ctx->mbuf_head);
+ rx->ctx = (struct gve_rx_ctx){};
+}
+
+static uint8_t
+gve_rx_get_gen_bit(uint8_t *desc)
+{
+ uint8_t byte;
+
+ /*
+ * Prevent generation bit from being read after the rest of the
+ * descriptor.
+ */
+ byte = atomic_load_acq_8(desc + GVE_RX_DESC_DQO_GEN_BYTE_OFFSET);
+ return ((byte & GVE_RX_DESC_DQO_GEN_BIT_MASK) != 0);
+}
+
+static bool
+gve_rx_cleanup_dqo(struct gve_priv *priv, struct gve_rx_ring *rx, int budget)
+{
+ struct gve_rx_compl_desc_dqo *compl_desc;
+ uint32_t work_done = 0;
+
+ NET_EPOCH_ASSERT();
+
+ while (work_done < budget) {
+ bus_dmamap_sync(rx->dqo.compl_ring_mem.tag,
+ rx->dqo.compl_ring_mem.map,
+ BUS_DMASYNC_POSTREAD);
+
+ compl_desc = &rx->dqo.compl_ring[rx->dqo.tail];
+ if (gve_rx_get_gen_bit((uint8_t *)compl_desc) ==
+ rx->dqo.cur_gen_bit)
+ break;
+
+ rx->cnt++;
+ rx->dqo.tail = (rx->dqo.tail + 1) & rx->dqo.mask;
+ rx->dqo.cur_gen_bit ^= (rx->dqo.tail == 0);
+
+ if (gve_is_qpl(priv))
+ gve_rx_dqo_qpl(priv, rx, compl_desc, &work_done);
+ else
+ gve_rx_dqo(priv, rx, compl_desc, &work_done);
+ }
+
+ if (work_done != 0)
+ tcp_lro_flush_all(&rx->lro);
+
+ gve_rx_post_buffers_dqo(rx, M_NOWAIT);
+ if (gve_is_qpl(priv))
+ gve_rx_maybe_extract_from_used_bufs(rx, /*just_one=*/false);
+ return (work_done == budget);
+}
+
+void
+gve_rx_cleanup_tq_dqo(void *arg, int pending)
+{
+ struct gve_rx_ring *rx = arg;
+ struct gve_priv *priv = rx->com.priv;
+
+ if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0))
+ return;
+
+ if (gve_rx_cleanup_dqo(priv, rx, /*budget=*/64)) {
+ taskqueue_enqueue(rx->com.cleanup_tq, &rx->com.cleanup_task);
+ return;
+ }
+
+ gve_db_bar_dqo_write_4(priv, rx->com.irq_db_offset,
+ GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
+}
diff --git a/sys/dev/gve/gve_sysctl.c b/sys/dev/gve/gve_sysctl.c
index 924654f62adc..a3874cc921ee 100644
--- a/sys/dev/gve/gve_sysctl.c
+++ b/sys/dev/gve/gve_sysctl.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2023 Google LLC
+ * Copyright (c) 2023-2024 Google LLC
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -30,6 +30,25 @@
*/
#include "gve.h"
+static SYSCTL_NODE(_hw, OID_AUTO, gve, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
+ "GVE driver parameters");
+
+bool gve_disable_hw_lro = false;
+SYSCTL_BOOL(_hw_gve, OID_AUTO, disable_hw_lro, CTLFLAG_RDTUN,
+ &gve_disable_hw_lro, 0, "Controls if hardware LRO is used");
+
+bool gve_allow_4k_rx_buffers = false;
+SYSCTL_BOOL(_hw_gve, OID_AUTO, allow_4k_rx_buffers, CTLFLAG_RDTUN,
+ &gve_allow_4k_rx_buffers, 0, "Controls if 4K RX Buffers are allowed");
+
+char gve_queue_format[8];
+SYSCTL_STRING(_hw_gve, OID_AUTO, queue_format, CTLFLAG_RD,
+ &gve_queue_format, 0, "Queue format being used by the iface");
+
+char gve_version[8];
+SYSCTL_STRING(_hw_gve, OID_AUTO, driver_version, CTLFLAG_RD,
+ &gve_version, 0, "Driver version");
+
static void
gve_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct gve_rx_ring *rxq)
@@ -69,9 +88,21 @@ gve_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
&stats->rx_dropped_pkt_desc_err,
"Packets dropped due to descriptor error");
SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO,
+ "rx_dropped_pkt_buf_post_fail", CTLFLAG_RD,
+ &stats->rx_dropped_pkt_buf_post_fail,
+ "Packets dropped due to failure to post enough buffers");
+ SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO,
"rx_dropped_pkt_mbuf_alloc_fail", CTLFLAG_RD,
&stats->rx_dropped_pkt_mbuf_alloc_fail,
"Packets dropped due to failed mbuf allocation");
+ SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO,
+ "rx_mbuf_dmamap_err", CTLFLAG_RD,
+ &stats->rx_mbuf_dmamap_err,
+ "Number of rx mbufs which could not be dma mapped");
+ SYSCTL_ADD_COUNTER_U64(ctx, list, OID_AUTO,
+ "rx_mbuf_mclget_null", CTLFLAG_RD,
+ &stats->rx_mbuf_mclget_null,
+ "Number of times when there were no cluster mbufs");
SYSCTL_ADD_U32(ctx, list, OID_AUTO,
"rx_completed_desc", CTLFLAG_RD,
&rxq->cnt, 0, "Number of descriptors completed");
@@ -113,9 +144,9 @@ gve_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
"tx_bytes", CTLFLAG_RD,
&stats->tbytes, "Bytes transmitted");
SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
- "tx_dropped_pkt_nospace_device", CTLFLAG_RD,
- &stats->tx_dropped_pkt_nospace_device,
- "Packets dropped due to no space in device");
+ "tx_delayed_pkt_nospace_device", CTLFLAG_RD,
+ &stats->tx_delayed_pkt_nospace_device,
+ "Packets delayed due to no space in device");
SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
"tx_dropped_pkt_nospace_bufring", CTLFLAG_RD,
&stats->tx_dropped_pkt_nospace_bufring,
@@ -124,6 +155,46 @@ gve_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
"tx_dropped_pkt_vlan", CTLFLAG_RD,
&stats->tx_dropped_pkt_vlan,
"Dropped VLAN packets");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "tx_delayed_pkt_nospace_descring", CTLFLAG_RD,
+ &stats->tx_delayed_pkt_nospace_descring,
+ "Packets delayed due to no space in desc ring");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "tx_delayed_pkt_nospace_compring", CTLFLAG_RD,
+ &stats->tx_delayed_pkt_nospace_compring,
+ "Packets delayed due to no space in comp ring");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "tx_delayed_pkt_nospace_qpl_bufs", CTLFLAG_RD,
+ &stats->tx_delayed_pkt_nospace_qpl_bufs,
+ "Packets delayed due to not enough qpl bufs");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "tx_delayed_pkt_tsoerr", CTLFLAG_RD,
+ &stats->tx_delayed_pkt_tsoerr,
+ "TSO packets delayed due to err in prep errors");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "tx_mbuf_collapse", CTLFLAG_RD,
+ &stats->tx_mbuf_collapse,
+ "tx mbufs that had to be collapsed");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "tx_mbuf_defrag", CTLFLAG_RD,
+ &stats->tx_mbuf_defrag,
+ "tx mbufs that had to be defragged");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "tx_mbuf_defrag_err", CTLFLAG_RD,
+ &stats->tx_mbuf_defrag_err,
+ "tx mbufs that failed defrag");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "tx_mbuf_dmamap_enomem_err", CTLFLAG_RD,
+ &stats->tx_mbuf_dmamap_enomem_err,
+ "tx mbufs that could not be dma-mapped due to low mem");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "tx_mbuf_dmamap_err", CTLFLAG_RD,
+ &stats->tx_mbuf_dmamap_err,
+ "tx mbufs that could not be dma-mapped");
+ SYSCTL_ADD_COUNTER_U64(ctx, tx_list, OID_AUTO,
+ "tx_timeout", CTLFLAG_RD,
+ &stats->tx_timeout,
+ "detections of timed out packets on tx queues");
}
static void
@@ -185,6 +256,9 @@ gve_setup_adminq_stat_sysctl(struct sysctl_ctx_list *ctx,
SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_destroy_rx_queue_cnt",
CTLFLAG_RD, &priv->adminq_destroy_rx_queue_cnt, 0,
"adminq_destroy_rx_queue_cnt");
+ SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO, "adminq_get_ptype_map_cnt",
+ CTLFLAG_RD, &priv->adminq_get_ptype_map_cnt, 0,
+ "adminq_get_ptype_map_cnt");
SYSCTL_ADD_U32(ctx, admin_list, OID_AUTO,
"adminq_dcfg_device_resources_cnt", CTLFLAG_RD,
&priv->adminq_dcfg_device_resources_cnt, 0,
@@ -219,6 +293,175 @@ gve_setup_main_stat_sysctl(struct sysctl_ctx_list *ctx,
&priv->reset_cnt, 0, "Times reset");
}
+static int
+gve_check_num_queues(struct gve_priv *priv, int val, bool is_rx)
+{
+ if (val < 1) {
+ device_printf(priv->dev,
+ "Requested num queues (%u) must be a positive integer\n", val);
+ return (EINVAL);
+ }
+
+ if (val > (is_rx ? priv->rx_cfg.max_queues : priv->tx_cfg.max_queues)) {
+ device_printf(priv->dev,
+ "Requested num queues (%u) is too large\n", val);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+static int
+gve_sysctl_num_tx_queues(SYSCTL_HANDLER_ARGS)
+{
+ struct gve_priv *priv = arg1;
+ int val;
+ int err;
+
+ val = priv->tx_cfg.num_queues;
+ err = sysctl_handle_int(oidp, &val, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ err = gve_check_num_queues(priv, val, /*is_rx=*/false);
+ if (err != 0)
+ return (err);
+
+ if (val != priv->tx_cfg.num_queues) {
+ GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
+ err = gve_adjust_tx_queues(priv, val);
+ GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
+ }
+
+ return (err);
+}
+
+static int
+gve_sysctl_num_rx_queues(SYSCTL_HANDLER_ARGS)
+{
+ struct gve_priv *priv = arg1;
+ int val;
+ int err;
+
+ val = priv->rx_cfg.num_queues;
+ err = sysctl_handle_int(oidp, &val, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ err = gve_check_num_queues(priv, val, /*is_rx=*/true);
+
+ if (err != 0)
+ return (err);
+
+ if (val != priv->rx_cfg.num_queues) {
+ GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
+ err = gve_adjust_rx_queues(priv, val);
+ GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
+ }
+
+ return (err);
+}
+
+static int
+gve_check_ring_size(struct gve_priv *priv, int val, bool is_rx)
+{
+ if (!powerof2(val) || val == 0) {
+ device_printf(priv->dev,
+ "Requested ring size (%u) must be a power of 2\n", val);
+ return (EINVAL);
+ }
+
+ if (val < (is_rx ? priv->min_rx_desc_cnt : priv->min_tx_desc_cnt)) {
+ device_printf(priv->dev,
+ "Requested ring size (%u) cannot be less than %d\n", val,
+ (is_rx ? priv->min_rx_desc_cnt : priv->min_tx_desc_cnt));
+ return (EINVAL);
+ }
+
+
+ if (val > (is_rx ? priv->max_rx_desc_cnt : priv->max_tx_desc_cnt)) {
+ device_printf(priv->dev,
+ "Requested ring size (%u) cannot be greater than %d\n", val,
+ (is_rx ? priv->max_rx_desc_cnt : priv->max_tx_desc_cnt));
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+static int
+gve_sysctl_tx_ring_size(SYSCTL_HANDLER_ARGS)
+{
+ struct gve_priv *priv = arg1;
+ int val;
+ int err;
+
+ val = priv->tx_desc_cnt;
+ err = sysctl_handle_int(oidp, &val, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ err = gve_check_ring_size(priv, val, /*is_rx=*/false);
+ if (err != 0)
+ return (err);
+
+ if (val != priv->tx_desc_cnt) {
+ GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
+ err = gve_adjust_ring_sizes(priv, val, /*is_rx=*/false);
+ GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
+ }
+
+ return (err);
+}
+
+static int
+gve_sysctl_rx_ring_size(SYSCTL_HANDLER_ARGS)
+{
+ struct gve_priv *priv = arg1;
+ int val;
+ int err;
+
+ val = priv->rx_desc_cnt;
+ err = sysctl_handle_int(oidp, &val, 0, req);
+ if (err != 0 || req->newptr == NULL)
+ return (err);
+
+ err = gve_check_ring_size(priv, val, /*is_rx=*/true);
+ if (err != 0)
+ return (err);
+
+ if (val != priv->rx_desc_cnt) {
+ GVE_IFACE_LOCK_LOCK(priv->gve_iface_lock);
+ err = gve_adjust_ring_sizes(priv, val, /*is_rx=*/true);
+ GVE_IFACE_LOCK_UNLOCK(priv->gve_iface_lock);
+ }
+
+ return (err);
+}
+
+static void
+gve_setup_sysctl_writables(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child, struct gve_priv *priv)
+{
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "num_tx_queues",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
+ gve_sysctl_num_tx_queues, "I", "Number of TX queues");
+
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "num_rx_queues",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
+ gve_sysctl_num_rx_queues, "I", "Number of RX queues");
+
+ if (priv->modify_ringsize_enabled) {
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_ring_size",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
+ gve_sysctl_tx_ring_size, "I", "TX ring size");
+
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_ring_size",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0,
+ gve_sysctl_rx_ring_size, "I", "RX ring size");
+ }
+}
+
void gve_setup_sysctl(struct gve_priv *priv)
{
device_t dev;
@@ -234,6 +477,7 @@ void gve_setup_sysctl(struct gve_priv *priv)
gve_setup_queue_stat_sysctl(ctx, child, priv);
gve_setup_adminq_stat_sysctl(ctx, child, priv);
gve_setup_main_stat_sysctl(ctx, child, priv);
+ gve_setup_sysctl_writables(ctx, child, priv);
}
void
diff --git a/sys/dev/gve/gve_tx.c b/sys/dev/gve/gve_tx.c
index 1e62e1226be1..84e3a4c4eb9f 100644
--- a/sys/dev/gve/gve_tx.c
+++ b/sys/dev/gve/gve_tx.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2023 Google LLC
+ * Copyright (c) 2023-2024 Google LLC
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -30,6 +30,7 @@
*/
#include "gve.h"
#include "gve_adminq.h"
+#include "gve_dqo.h"
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
@@ -48,61 +49,112 @@ gve_tx_fifo_init(struct gve_priv *priv, struct gve_tx_ring *tx)
}
static void
-gve_tx_free_ring(struct gve_priv *priv, int i)
+gve_tx_free_ring_gqi(struct gve_priv *priv, int i)
{
struct gve_tx_ring *tx = &priv->tx[i];
struct gve_ring_com *com = &tx->com;
- /* Safe to call even if never alloced */
- gve_free_counters((counter_u64_t *)&tx->stats, NUM_TX_STATS);
-
- if (tx->br != NULL) {
- buf_ring_free(tx->br, M_DEVBUF);
- tx->br = NULL;
+ if (tx->desc_ring != NULL) {
+ gve_dma_free_coherent(&tx->desc_ring_mem);
+ tx->desc_ring = NULL;
}
- if (mtx_initialized(&tx->ring_mtx))
- mtx_destroy(&tx->ring_mtx);
-
if (tx->info != NULL) {
free(tx->info, M_GVE);
tx->info = NULL;
}
- if (tx->desc_ring != NULL) {
- gve_dma_free_coherent(&tx->desc_ring_mem);
- tx->desc_ring = NULL;
+ if (com->qpl != NULL) {
+ gve_free_qpl(priv, com->qpl);
+ com->qpl = NULL;
}
+}
+
+static void
+gve_tx_free_ring(struct gve_priv *priv, int i)
+{
+ struct gve_tx_ring *tx = &priv->tx[i];
+ struct gve_ring_com *com = &tx->com;
+
+ /* Safe to call even if never alloced */
+ gve_free_counters((counter_u64_t *)&tx->stats, NUM_TX_STATS);
+
+ if (mtx_initialized(&tx->ring_mtx))
+ mtx_destroy(&tx->ring_mtx);
if (com->q_resources != NULL) {
gve_dma_free_coherent(&com->q_resources_mem);
com->q_resources = NULL;
}
+
+ if (tx->br != NULL) {
+ buf_ring_free(tx->br, M_DEVBUF);
+ tx->br = NULL;
+ }
+
+ if (gve_is_gqi(priv))
+ gve_tx_free_ring_gqi(priv, i);
+ else
+ gve_tx_free_ring_dqo(priv, i);
}
static int
-gve_tx_alloc_ring(struct gve_priv *priv, int i)
+gve_tx_alloc_ring_gqi(struct gve_priv *priv, int i)
{
struct gve_tx_ring *tx = &priv->tx[i];
struct gve_ring_com *com = &tx->com;
- char mtx_name[16];
int err;
- com->priv = priv;
- com->id = i;
+ err = gve_dma_alloc_coherent(priv,
+ sizeof(union gve_tx_desc) * priv->tx_desc_cnt,
+ CACHE_LINE_SIZE, &tx->desc_ring_mem);
+ if (err != 0) {
+ device_printf(priv->dev,
+ "Failed to alloc desc ring for tx ring %d", i);
+ goto abort;
+ }
+ tx->desc_ring = tx->desc_ring_mem.cpu_addr;
- com->qpl = &priv->qpls[i];
+ com->qpl = gve_alloc_qpl(priv, i, priv->tx_desc_cnt / GVE_QPL_DIVISOR,
+ /*single_kva=*/true);
if (com->qpl == NULL) {
- device_printf(priv->dev, "No QPL left for tx ring %d\n", i);
- return (ENOMEM);
+ device_printf(priv->dev,
+ "Failed to alloc QPL for tx ring %d\n", i);
+ err = ENOMEM;
+ goto abort;
}
err = gve_tx_fifo_init(priv, tx);
if (err != 0)
goto abort;
- tx->info = malloc(sizeof(struct gve_tx_buffer_state) * priv->tx_desc_cnt,
+ tx->info = malloc(
+ sizeof(struct gve_tx_buffer_state) * priv->tx_desc_cnt,
M_GVE, M_WAITOK | M_ZERO);
+ return (0);
+
+abort:
+ gve_tx_free_ring_gqi(priv, i);
+ return (err);
+}
+
+static int
+gve_tx_alloc_ring(struct gve_priv *priv, int i)
+{
+ struct gve_tx_ring *tx = &priv->tx[i];
+ struct gve_ring_com *com = &tx->com;
+ char mtx_name[16];
+ int err;
+
+ com->priv = priv;
+ com->id = i;
+
+ if (gve_is_gqi(priv))
+ err = gve_tx_alloc_ring_gqi(priv, i);
+ else
+ err = gve_tx_alloc_ring_dqo(priv, i);
+ if (err != 0)
+ goto abort;
sprintf(mtx_name, "gvetx%d", i);
mtx_init(&tx->ring_mtx, mtx_name, NULL, MTX_DEF);
@@ -115,19 +167,13 @@ gve_tx_alloc_ring(struct gve_priv *priv, int i)
err = gve_dma_alloc_coherent(priv, sizeof(struct gve_queue_resources),
PAGE_SIZE, &com->q_resources_mem);
if (err != 0) {
- device_printf(priv->dev, "Failed to alloc queue resources for tx ring %d", i);
+ device_printf(priv->dev,
+ "Failed to alloc queue resources for tx ring %d", i);
goto abort;
}
com->q_resources = com->q_resources_mem.cpu_addr;
- err = gve_dma_alloc_coherent(priv,
- sizeof(union gve_tx_desc) * priv->tx_desc_cnt,
- CACHE_LINE_SIZE, &tx->desc_ring_mem);
- if (err != 0) {
- device_printf(priv->dev, "Failed to alloc desc ring for tx ring %d", i);
- goto abort;
- }
- tx->desc_ring = tx->desc_ring_mem.cpu_addr;
+ tx->last_kicked = 0;
return (0);
@@ -137,39 +183,32 @@ abort:
}
int
-gve_alloc_tx_rings(struct gve_priv *priv)
+gve_alloc_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx)
{
- int err = 0;
int i;
+ int err;
- priv->tx = malloc(sizeof(struct gve_tx_ring) * priv->tx_cfg.num_queues,
- M_GVE, M_WAITOK | M_ZERO);
+ KASSERT(priv->tx != NULL, ("priv->tx is NULL!"));
- for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ for (i = start_idx; i < stop_idx; i++) {
err = gve_tx_alloc_ring(priv, i);
if (err != 0)
goto free_rings;
-
}
return (0);
-
free_rings:
- while (i--)
- gve_tx_free_ring(priv, i);
- free(priv->tx, M_GVE);
+ gve_free_tx_rings(priv, start_idx, i);
return (err);
}
void
-gve_free_tx_rings(struct gve_priv *priv)
+gve_free_tx_rings(struct gve_priv *priv, uint16_t start_idx, uint16_t stop_idx)
{
int i;
- for (i = 0; i < priv->tx_cfg.num_queues; i++)
+ for (i = start_idx; i < stop_idx; i++)
gve_tx_free_ring(priv, i);
-
- free(priv->tx, M_GVE);
}
static void
@@ -181,6 +220,7 @@ gve_tx_clear_desc_ring(struct gve_tx_ring *tx)
for (i = 0; i < com->priv->tx_desc_cnt; i++) {
tx->desc_ring[i] = (union gve_tx_desc){};
tx->info[i] = (struct gve_tx_buffer_state){};
+ gve_invalidate_timestamp(&tx->info[i].enqueue_time_sec);
}
bus_dmamap_sync(tx->desc_ring_mem.tag, tx->desc_ring_mem.map,
@@ -209,7 +249,11 @@ gve_start_tx_ring(struct gve_priv *priv, int i)
struct gve_tx_ring *tx = &priv->tx[i];
struct gve_ring_com *com = &tx->com;
- NET_TASK_INIT(&com->cleanup_task, 0, gve_tx_cleanup_tq, tx);
+ atomic_store_bool(&tx->stopped, false);
+ if (gve_is_gqi(priv))
+ NET_TASK_INIT(&com->cleanup_task, 0, gve_tx_cleanup_tq, tx);
+ else
+ NET_TASK_INIT(&com->cleanup_task, 0, gve_tx_cleanup_tq_dqo, tx);
com->cleanup_tq = taskqueue_create_fast("gve tx", M_WAITOK,
taskqueue_thread_enqueue, &com->cleanup_tq);
taskqueue_start_threads(&com->cleanup_tq, 1, PI_NET, "%s txq %d",
@@ -233,8 +277,12 @@ gve_create_tx_rings(struct gve_priv *priv)
if (gve_get_state_flag(priv, GVE_STATE_FLAG_TX_RINGS_OK))
return (0);
- for (i = 0; i < priv->tx_cfg.num_queues; i++)
- gve_clear_tx_ring(priv, i);
+ for (i = 0; i < priv->tx_cfg.num_queues; i++) {
+ if (gve_is_gqi(priv))
+ gve_clear_tx_ring(priv, i);
+ else
+ gve_clear_tx_ring_dqo(priv, i);
+ }
err = gve_adminq_create_tx_queues(priv, priv->tx_cfg.num_queues);
if (err != 0)
@@ -300,6 +348,30 @@ gve_destroy_tx_rings(struct gve_priv *priv)
}
int
+gve_check_tx_timeout_gqi(struct gve_priv *priv, struct gve_tx_ring *tx)
+{
+ struct gve_tx_buffer_state *info;
+ uint32_t pkt_idx;
+ int num_timeouts;
+
+ num_timeouts = 0;
+
+ for (pkt_idx = 0; pkt_idx < priv->tx_desc_cnt; pkt_idx++) {
+ info = &tx->info[pkt_idx];
+
+ if (!gve_timestamp_valid(&info->enqueue_time_sec))
+ continue;
+
+ if (__predict_false(
+ gve_seconds_since(&info->enqueue_time_sec) >
+ GVE_TX_TIMEOUT_PKT_SEC))
+ num_timeouts += 1;
+ }
+
+ return (num_timeouts);
+}
+
+int
gve_tx_intr(void *arg)
{
struct gve_tx_ring *tx = arg;
@@ -351,7 +423,10 @@ gve_tx_cleanup_tq(void *arg, int pending)
if (mbuf == NULL)
continue;
+ gve_invalidate_timestamp(&info->enqueue_time_sec);
+
info->mbuf = NULL;
+
counter_enter();
counter_u64_add_protected(tx->stats.tbytes, mbuf->m_pkthdr.len);
counter_u64_add_protected(tx->stats.tpackets, 1);
@@ -375,7 +450,7 @@ gve_tx_cleanup_tq(void *arg, int pending)
* interrupt but they will still be handled by the enqueue below.
* Completions born after the barrier WILL trigger an interrupt.
*/
- mb();
+ atomic_thread_fence_seq_cst();
nic_done = gve_tx_load_event_counter(priv, tx);
todo = nic_done - tx->done;
@@ -383,6 +458,11 @@ gve_tx_cleanup_tq(void *arg, int pending)
gve_db_bar_write_4(priv, tx->com.irq_db_offset, GVE_IRQ_MASK);
taskqueue_enqueue(tx->com.cleanup_tq, &tx->com.cleanup_task);
}
+
+ if (atomic_load_bool(&tx->stopped) && space_freed) {
+ atomic_store_bool(&tx->stopped, false);
+ taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task);
+ }
}
static void
@@ -627,8 +707,7 @@ gve_xmit(struct gve_tx_ring *tx, struct mbuf *mbuf)
bytes_required = gve_fifo_bytes_required(tx, first_seg_len, pkt_len);
if (__predict_false(!gve_can_tx(tx, bytes_required))) {
counter_enter();
- counter_u64_add_protected(tx->stats.tx_dropped_pkt_nospace_device, 1);
- counter_u64_add_protected(tx->stats.tx_dropped_pkt, 1);
+ counter_u64_add_protected(tx->stats.tx_delayed_pkt_nospace_device, 1);
counter_exit();
return (ENOBUFS);
}
@@ -636,6 +715,8 @@ gve_xmit(struct gve_tx_ring *tx, struct mbuf *mbuf)
/* So that the cleanup taskqueue can free the mbuf eventually. */
info->mbuf = mbuf;
+ gve_set_timestamp(&info->enqueue_time_sec);
+
/*
* We don't want to split the header, so if necessary, pad to the end
* of the fifo and then put the header at the beginning of the fifo.
@@ -689,19 +770,86 @@ gve_xmit(struct gve_tx_ring *tx, struct mbuf *mbuf)
return (0);
}
+static int
+gve_xmit_mbuf(struct gve_tx_ring *tx,
+ struct mbuf **mbuf)
+{
+ if (gve_is_gqi(tx->com.priv))
+ return (gve_xmit(tx, *mbuf));
+
+ if (gve_is_qpl(tx->com.priv))
+ return (gve_xmit_dqo_qpl(tx, *mbuf));
+
+ /*
+ * gve_xmit_dqo might attempt to defrag the mbuf chain.
+ * The reference is passed in so that in the case of
+ * errors, the new mbuf chain is what's put back on the br.
+ */
+ return (gve_xmit_dqo(tx, mbuf));
+}
+
+/*
+ * Has the side-effect of stopping the xmit queue by setting tx->stopped
+ */
+static int
+gve_xmit_retry_enobuf_mbuf(struct gve_tx_ring *tx,
+ struct mbuf **mbuf)
+{
+ int err;
+
+ atomic_store_bool(&tx->stopped, true);
+
+ /*
+ * Room made in the queue BEFORE the barrier will be seen by the
+ * gve_xmit_mbuf retry below.
+ *
+ * If room is made in the queue AFTER the barrier, the cleanup tq
+ * iteration creating the room will either see a tx->stopped value
+ * of 0 or the 1 we just wrote:
+ *
+ * If it sees a 1, then it would enqueue the xmit tq. Enqueue
+ * implies a retry on the waiting pkt.
+ *
+ * If it sees a 0, then that implies a previous iteration overwrote
+ * our 1, and that iteration would enqueue the xmit tq. Enqueue
+ * implies a retry on the waiting pkt.
+ */
+ atomic_thread_fence_seq_cst();
+
+ err = gve_xmit_mbuf(tx, mbuf);
+ if (err == 0)
+ atomic_store_bool(&tx->stopped, false);
+
+ return (err);
+}
+
static void
gve_xmit_br(struct gve_tx_ring *tx)
{
struct gve_priv *priv = tx->com.priv;
struct ifnet *ifp = priv->ifp;
struct mbuf *mbuf;
+ int err;
while ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 &&
(mbuf = drbr_peek(ifp, tx->br)) != NULL) {
+ err = gve_xmit_mbuf(tx, &mbuf);
- if (__predict_false(gve_xmit(tx, mbuf) != 0)) {
- drbr_putback(ifp, tx->br, mbuf);
- taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task);
+ /*
+ * We need to stop this taskqueue when we can't xmit the pkt due
+ * to lack of space in the NIC ring (ENOBUFS). The retry exists
+ * to guard against a TOCTTOU bug that could end up freezing the
+ * queue forever.
+ */
+ if (__predict_false(mbuf != NULL && err == ENOBUFS))
+ err = gve_xmit_retry_enobuf_mbuf(tx, &mbuf);
+
+ if (__predict_false(err != 0 && mbuf != NULL)) {
+ if (err == EINVAL) {
+ drbr_advance(ifp, tx->br);
+ m_freem(mbuf);
+ } else
+ drbr_putback(ifp, tx->br, mbuf);
break;
}
@@ -710,7 +858,12 @@ gve_xmit_br(struct gve_tx_ring *tx)
bus_dmamap_sync(tx->desc_ring_mem.tag, tx->desc_ring_mem.map,
BUS_DMASYNC_PREWRITE);
- gve_db_bar_write_4(priv, tx->com.db_offset, tx->req);
+
+ if (gve_is_gqi(priv))
+ gve_db_bar_write_4(priv, tx->com.db_offset, tx->req);
+ else
+ gve_db_bar_dqo_write_4(priv, tx->com.db_offset,
+ tx->dqo.desc_tail);
}
}
@@ -763,7 +916,8 @@ gve_xmit_ifp(if_t ifp, struct mbuf *mbuf)
is_br_empty = drbr_empty(ifp, tx->br);
err = drbr_enqueue(ifp, tx->br, mbuf);
if (__predict_false(err != 0)) {
- taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task);
+ if (!atomic_load_bool(&tx->stopped))
+ taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task);
counter_enter();
counter_u64_add_protected(tx->stats.tx_dropped_pkt_nospace_bufring, 1);
counter_u64_add_protected(tx->stats.tx_dropped_pkt, 1);
@@ -778,9 +932,8 @@ gve_xmit_ifp(if_t ifp, struct mbuf *mbuf)
if (is_br_empty && (GVE_RING_TRYLOCK(tx) != 0)) {
gve_xmit_br(tx);
GVE_RING_UNLOCK(tx);
- } else {
+ } else if (!atomic_load_bool(&tx->stopped))
taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task);
- }
return (0);
}
diff --git a/sys/dev/gve/gve_tx_dqo.c b/sys/dev/gve/gve_tx_dqo.c
new file mode 100644
index 000000000000..551a7e308d19
--- /dev/null
+++ b/sys/dev/gve/gve_tx_dqo.c
@@ -0,0 +1,1149 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2024 Google LLC
+ *
+ * Redistribution and use in source and binary forms, with or without modification,
+ * are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "opt_inet6.h"
+
+#include "gve.h"
+#include "gve_dqo.h"
+
+static void
+gve_unmap_packet(struct gve_tx_ring *tx,
+ struct gve_tx_pending_pkt_dqo *pending_pkt)
+{
+ bus_dmamap_sync(tx->dqo.buf_dmatag, pending_pkt->dmamap,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(tx->dqo.buf_dmatag, pending_pkt->dmamap);
+}
+
+static void
+gve_clear_qpl_pending_pkt(struct gve_tx_pending_pkt_dqo *pending_pkt)
+{
+ pending_pkt->qpl_buf_head = -1;
+ pending_pkt->num_qpl_bufs = 0;
+}
+
+static void
+gve_free_tx_mbufs_dqo(struct gve_tx_ring *tx)
+{
+ struct gve_tx_pending_pkt_dqo *pending_pkt;
+ int i;
+
+ for (i = 0; i < tx->dqo.num_pending_pkts; i++) {
+ pending_pkt = &tx->dqo.pending_pkts[i];
+ if (!pending_pkt->mbuf)
+ continue;
+
+ if (gve_is_qpl(tx->com.priv))
+ gve_clear_qpl_pending_pkt(pending_pkt);
+ else
+ gve_unmap_packet(tx, pending_pkt);
+
+ m_freem(pending_pkt->mbuf);
+ pending_pkt->mbuf = NULL;
+ }
+}
+
+void
+gve_tx_free_ring_dqo(struct gve_priv *priv, int i)
+{
+ struct gve_tx_ring *tx = &priv->tx[i];
+ struct gve_ring_com *com = &tx->com;
+ int j;
+
+ if (tx->dqo.desc_ring != NULL) {
+ gve_dma_free_coherent(&tx->desc_ring_mem);
+ tx->dqo.desc_ring = NULL;
+ }
+
+ if (tx->dqo.compl_ring != NULL) {
+ gve_dma_free_coherent(&tx->dqo.compl_ring_mem);
+ tx->dqo.compl_ring = NULL;
+ }
+
+ if (tx->dqo.pending_pkts != NULL) {
+ gve_free_tx_mbufs_dqo(tx);
+
+ if (!gve_is_qpl(priv) && tx->dqo.buf_dmatag) {
+ for (j = 0; j < tx->dqo.num_pending_pkts; j++)
+ if (tx->dqo.pending_pkts[j].state !=
+ GVE_PACKET_STATE_UNALLOCATED)
+ bus_dmamap_destroy(tx->dqo.buf_dmatag,
+ tx->dqo.pending_pkts[j].dmamap);
+ }
+
+ free(tx->dqo.pending_pkts, M_GVE);
+ tx->dqo.pending_pkts = NULL;
+ }
+
+ if (!gve_is_qpl(priv) && tx->dqo.buf_dmatag)
+ bus_dma_tag_destroy(tx->dqo.buf_dmatag);
+
+ if (gve_is_qpl(priv) && tx->dqo.qpl_bufs != NULL) {
+ free(tx->dqo.qpl_bufs, M_GVE);
+ tx->dqo.qpl_bufs = NULL;
+ }
+
+ if (com->qpl != NULL) {
+ gve_free_qpl(priv, com->qpl);
+ com->qpl = NULL;
+ }
+}
+
+static int
+gve_tx_alloc_rda_fields_dqo(struct gve_tx_ring *tx)
+{
+ struct gve_priv *priv = tx->com.priv;
+ int err;
+ int j;
+
+ /*
+ * DMA tag for mapping Tx mbufs
+ * The maxsize, nsegments, and maxsegsize params should match
+ * the if_sethwtso* arguments in gve_setup_ifnet in gve_main.c.
+ */
+ err = bus_dma_tag_create(
+ bus_get_dma_tag(priv->dev), /* parent */
+ 1, 0, /* alignment, bounds */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ GVE_TSO_MAXSIZE_DQO, /* maxsize */
+ GVE_TX_MAX_DATA_DESCS_DQO, /* nsegments */
+ GVE_TX_MAX_BUF_SIZE_DQO, /* maxsegsize */
+ BUS_DMA_ALLOCNOW, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockarg */
+ &tx->dqo.buf_dmatag);
+ if (err != 0) {
+ device_printf(priv->dev, "%s: bus_dma_tag_create failed: %d\n",
+ __func__, err);
+ return (err);
+ }
+
+ for (j = 0; j < tx->dqo.num_pending_pkts; j++) {
+ err = bus_dmamap_create(tx->dqo.buf_dmatag, 0,
+ &tx->dqo.pending_pkts[j].dmamap);
+ if (err != 0) {
+ device_printf(priv->dev,
+ "err in creating pending pkt dmamap %d: %d",
+ j, err);
+ return (err);
+ }
+ tx->dqo.pending_pkts[j].state = GVE_PACKET_STATE_FREE;
+ }
+
+ return (0);
+}
+
+int
+gve_tx_alloc_ring_dqo(struct gve_priv *priv, int i)
+{
+ struct gve_tx_ring *tx = &priv->tx[i];
+ uint16_t num_pending_pkts;
+ int err;
+
+ /* Descriptor ring */
+ err = gve_dma_alloc_coherent(priv,
+ sizeof(union gve_tx_desc_dqo) * priv->tx_desc_cnt,
+ CACHE_LINE_SIZE, &tx->desc_ring_mem);
+ if (err != 0) {
+ device_printf(priv->dev,
+ "Failed to alloc desc ring for tx ring %d", i);
+ goto abort;
+ }
+ tx->dqo.desc_ring = tx->desc_ring_mem.cpu_addr;
+
+ /* Completion ring */
+ err = gve_dma_alloc_coherent(priv,
+ sizeof(struct gve_tx_compl_desc_dqo) * priv->tx_desc_cnt,
+ CACHE_LINE_SIZE, &tx->dqo.compl_ring_mem);
+ if (err != 0) {
+ device_printf(priv->dev,
+ "Failed to alloc compl ring for tx ring %d", i);
+ goto abort;
+ }
+ tx->dqo.compl_ring = tx->dqo.compl_ring_mem.cpu_addr;
+
+ /*
+ * pending_pkts array
+ *
+ * The max number of pending packets determines the maximum number of
+ * descriptors which maybe written to the completion queue.
+ *
+ * We must set the number small enough to make sure we never overrun the
+ * completion queue.
+ */
+ num_pending_pkts = priv->tx_desc_cnt;
+ /*
+ * Reserve space for descriptor completions, which will be reported at
+ * most every GVE_TX_MIN_RE_INTERVAL packets.
+ */
+ num_pending_pkts -= num_pending_pkts / GVE_TX_MIN_RE_INTERVAL;
+
+ tx->dqo.num_pending_pkts = num_pending_pkts;
+ tx->dqo.pending_pkts = malloc(
+ sizeof(struct gve_tx_pending_pkt_dqo) * num_pending_pkts,
+ M_GVE, M_WAITOK | M_ZERO);
+
+ if (gve_is_qpl(priv)) {
+ int qpl_buf_cnt;
+
+ tx->com.qpl = gve_alloc_qpl(priv, i, GVE_TX_NUM_QPL_PAGES_DQO,
+ /*single_kva*/false);
+ if (tx->com.qpl == NULL) {
+ device_printf(priv->dev,
+ "Failed to alloc QPL for tx ring %d", i);
+ err = ENOMEM;
+ goto abort;
+ }
+
+ qpl_buf_cnt = GVE_TX_BUFS_PER_PAGE_DQO *
+ tx->com.qpl->num_pages;
+
+ tx->dqo.qpl_bufs = malloc(
+ sizeof(*tx->dqo.qpl_bufs) * qpl_buf_cnt,
+ M_GVE, M_WAITOK | M_ZERO);
+ } else
+ gve_tx_alloc_rda_fields_dqo(tx);
+ return (0);
+
+abort:
+ gve_tx_free_ring_dqo(priv, i);
+ return (err);
+}
+
+static void
+gve_extract_tx_metadata_dqo(const struct mbuf *mbuf,
+ struct gve_tx_metadata_dqo *metadata)
+{
+ uint32_t hash = mbuf->m_pkthdr.flowid;
+ uint16_t path_hash;
+
+ metadata->version = GVE_TX_METADATA_VERSION_DQO;
+ if (hash) {
+ path_hash = hash ^ (hash >> 16);
+
+ path_hash &= (1 << 15) - 1;
+ if (__predict_false(path_hash == 0))
+ path_hash = ~path_hash;
+
+ metadata->path_hash = path_hash;
+ }
+}
+
+static void
+gve_tx_fill_pkt_desc_dqo(struct gve_tx_ring *tx,
+ uint32_t *desc_idx, uint32_t len, uint64_t addr,
+ int16_t compl_tag, bool eop, bool csum_enabled)
+{
+ while (len > 0) {
+ struct gve_tx_pkt_desc_dqo *desc =
+ &tx->dqo.desc_ring[*desc_idx].pkt;
+ uint32_t cur_len = MIN(len, GVE_TX_MAX_BUF_SIZE_DQO);
+ bool cur_eop = eop && cur_len == len;
+
+ *desc = (struct gve_tx_pkt_desc_dqo){
+ .buf_addr = htole64(addr),
+ .dtype = GVE_TX_PKT_DESC_DTYPE_DQO,
+ .end_of_packet = cur_eop,
+ .checksum_offload_enable = csum_enabled,
+ .compl_tag = htole16(compl_tag),
+ .buf_size = cur_len,
+ };
+
+ addr += cur_len;
+ len -= cur_len;
+ *desc_idx = (*desc_idx + 1) & tx->dqo.desc_mask;
+ }
+}
+
+static void
+gve_tx_fill_tso_ctx_desc(struct gve_tx_tso_context_desc_dqo *desc,
+ const struct mbuf *mbuf, const struct gve_tx_metadata_dqo *metadata,
+ int header_len)
+{
+ *desc = (struct gve_tx_tso_context_desc_dqo){
+ .header_len = header_len,
+ .cmd_dtype = {
+ .dtype = GVE_TX_TSO_CTX_DESC_DTYPE_DQO,
+ .tso = 1,
+ },
+ .flex0 = metadata->bytes[0],
+ .flex5 = metadata->bytes[5],
+ .flex6 = metadata->bytes[6],
+ .flex7 = metadata->bytes[7],
+ .flex8 = metadata->bytes[8],
+ .flex9 = metadata->bytes[9],
+ .flex10 = metadata->bytes[10],
+ .flex11 = metadata->bytes[11],
+ };
+ desc->tso_total_len = mbuf->m_pkthdr.len - header_len;
+ desc->mss = mbuf->m_pkthdr.tso_segsz;
+}
+
+static void
+gve_tx_fill_general_ctx_desc(struct gve_tx_general_context_desc_dqo *desc,
+ const struct gve_tx_metadata_dqo *metadata)
+{
+ *desc = (struct gve_tx_general_context_desc_dqo){
+ .flex0 = metadata->bytes[0],
+ .flex1 = metadata->bytes[1],
+ .flex2 = metadata->bytes[2],
+ .flex3 = metadata->bytes[3],
+ .flex4 = metadata->bytes[4],
+ .flex5 = metadata->bytes[5],
+ .flex6 = metadata->bytes[6],
+ .flex7 = metadata->bytes[7],
+ .flex8 = metadata->bytes[8],
+ .flex9 = metadata->bytes[9],
+ .flex10 = metadata->bytes[10],
+ .flex11 = metadata->bytes[11],
+ .cmd_dtype = {.dtype = GVE_TX_GENERAL_CTX_DESC_DTYPE_DQO},
+ };
+}
+
+#define PULLUP_HDR(m, len) \
+do { \
+ if (__predict_false((m)->m_len < (len))) { \
+ (m) = m_pullup((m), (len)); \
+ if ((m) == NULL) \
+ return (EINVAL); \
+ } \
+} while (0)
+
+static int
+gve_prep_tso(struct mbuf *mbuf, int *header_len)
+{
+ uint8_t l3_off, l4_off = 0;
+ struct ether_header *eh;
+ struct tcphdr *th;
+ u_short csum;
+
+ PULLUP_HDR(mbuf, sizeof(*eh));
+ eh = mtod(mbuf, struct ether_header *);
+ KASSERT(eh->ether_type != ETHERTYPE_VLAN,
+ ("VLAN-tagged packets not supported"));
+ l3_off = ETHER_HDR_LEN;
+
+#ifdef INET6
+ if (ntohs(eh->ether_type) == ETHERTYPE_IPV6) {
+ struct ip6_hdr *ip6;
+
+ PULLUP_HDR(mbuf, l3_off + sizeof(*ip6));
+ ip6 = (struct ip6_hdr *)(mtodo(mbuf, l3_off));
+ l4_off = l3_off + sizeof(struct ip6_hdr);
+ csum = in6_cksum_pseudo(ip6, /*len=*/0, IPPROTO_TCP,
+ /*csum=*/0);
+ } else
+#endif
+ if (ntohs(eh->ether_type) == ETHERTYPE_IP) {
+ struct ip *ip;
+
+ PULLUP_HDR(mbuf, l3_off + sizeof(*ip));
+ ip = (struct ip *)(mtodo(mbuf, l3_off));
+ l4_off = l3_off + (ip->ip_hl << 2);
+ csum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr,
+ htons(IPPROTO_TCP));
+ }
+
+ PULLUP_HDR(mbuf, l4_off + sizeof(struct tcphdr *));
+ th = (struct tcphdr *)(mtodo(mbuf, l4_off));
+ *header_len = l4_off + (th->th_off << 2);
+
+ /*
+ * Hardware requires the th->th_sum to not include the TCP payload,
+ * hence we recompute the csum with it excluded.
+ */
+ th->th_sum = csum;
+
+ return (0);
+}
+
+static int
+gve_tx_fill_ctx_descs(struct gve_tx_ring *tx, struct mbuf *mbuf,
+ bool is_tso, uint32_t *desc_idx)
+{
+ struct gve_tx_general_context_desc_dqo *gen_desc;
+ struct gve_tx_tso_context_desc_dqo *tso_desc;
+ struct gve_tx_metadata_dqo metadata;
+ int header_len;
+ int err;
+
+ metadata = (struct gve_tx_metadata_dqo){0};
+ gve_extract_tx_metadata_dqo(mbuf, &metadata);
+
+ if (is_tso) {
+ err = gve_prep_tso(mbuf, &header_len);
+ if (__predict_false(err)) {
+ counter_enter();
+ counter_u64_add_protected(
+ tx->stats.tx_delayed_pkt_tsoerr, 1);
+ counter_exit();
+ return (err);
+ }
+
+ tso_desc = &tx->dqo.desc_ring[*desc_idx].tso_ctx;
+ gve_tx_fill_tso_ctx_desc(tso_desc, mbuf, &metadata, header_len);
+
+ *desc_idx = (*desc_idx + 1) & tx->dqo.desc_mask;
+ counter_enter();
+ counter_u64_add_protected(tx->stats.tso_packet_cnt, 1);
+ counter_exit();
+ }
+
+ gen_desc = &tx->dqo.desc_ring[*desc_idx].general_ctx;
+ gve_tx_fill_general_ctx_desc(gen_desc, &metadata);
+ *desc_idx = (*desc_idx + 1) & tx->dqo.desc_mask;
+ return (0);
+}
+
+static int
+gve_map_mbuf_dqo(struct gve_tx_ring *tx,
+ struct mbuf **mbuf, bus_dmamap_t dmamap,
+ bus_dma_segment_t *segs, int *nsegs, int attempt)
+{
+ struct mbuf *m_new = NULL;
+ int err;
+
+ err = bus_dmamap_load_mbuf_sg(tx->dqo.buf_dmatag, dmamap,
+ *mbuf, segs, nsegs, BUS_DMA_NOWAIT);
+
+ switch (err) {
+ case __predict_true(0):
+ break;
+ case EFBIG:
+ if (__predict_false(attempt > 0))
+ goto abort;
+
+ counter_enter();
+ counter_u64_add_protected(
+ tx->stats.tx_mbuf_collapse, 1);
+ counter_exit();
+
+ /* Try m_collapse before m_defrag */
+ m_new = m_collapse(*mbuf, M_NOWAIT,
+ GVE_TX_MAX_DATA_DESCS_DQO);
+ if (m_new == NULL) {
+ counter_enter();
+ counter_u64_add_protected(
+ tx->stats.tx_mbuf_defrag, 1);
+ counter_exit();
+ m_new = m_defrag(*mbuf, M_NOWAIT);
+ }
+
+ if (__predict_false(m_new == NULL)) {
+ counter_enter();
+ counter_u64_add_protected(
+ tx->stats.tx_mbuf_defrag_err, 1);
+ counter_exit();
+
+ m_freem(*mbuf);
+ *mbuf = NULL;
+ err = ENOMEM;
+ goto abort;
+ } else {
+ *mbuf = m_new;
+ return (gve_map_mbuf_dqo(tx, mbuf, dmamap,
+ segs, nsegs, ++attempt));
+ }
+ case ENOMEM:
+ counter_enter();
+ counter_u64_add_protected(
+ tx->stats.tx_mbuf_dmamap_enomem_err, 1);
+ counter_exit();
+ goto abort;
+ default:
+ goto abort;
+ }
+
+ return (0);
+
+abort:
+ counter_enter();
+ counter_u64_add_protected(tx->stats.tx_mbuf_dmamap_err, 1);
+ counter_exit();
+ return (err);
+}
+
+static uint32_t
+num_avail_desc_ring_slots(const struct gve_tx_ring *tx)
+{
+ uint32_t num_used = (tx->dqo.desc_tail - tx->dqo.desc_head) &
+ tx->dqo.desc_mask;
+
+ return (tx->dqo.desc_mask - num_used);
+}
+
+static struct gve_tx_pending_pkt_dqo *
+gve_alloc_pending_packet(struct gve_tx_ring *tx)
+{
+ int32_t index = tx->dqo.free_pending_pkts_csm;
+ struct gve_tx_pending_pkt_dqo *pending_pkt;
+
+ /*
+ * No pending packets available in the consumer list,
+ * try to steal the producer list.
+ */
+ if (__predict_false(index == -1)) {
+ tx->dqo.free_pending_pkts_csm = atomic_swap_32(
+ &tx->dqo.free_pending_pkts_prd, -1);
+
+ index = tx->dqo.free_pending_pkts_csm;
+ if (__predict_false(index == -1))
+ return (NULL);
+ }
+
+ pending_pkt = &tx->dqo.pending_pkts[index];
+
+ /* Remove pending_pkt from the consumer list */
+ tx->dqo.free_pending_pkts_csm = pending_pkt->next;
+ pending_pkt->state = GVE_PACKET_STATE_PENDING_DATA_COMPL;
+
+ gve_set_timestamp(&pending_pkt->enqueue_time_sec);
+
+ return (pending_pkt);
+}
+
+static void
+gve_free_pending_packet(struct gve_tx_ring *tx,
+ struct gve_tx_pending_pkt_dqo *pending_pkt)
+{
+ int index = pending_pkt - tx->dqo.pending_pkts;
+ int32_t old_head;
+
+ pending_pkt->state = GVE_PACKET_STATE_FREE;
+
+ gve_invalidate_timestamp(&pending_pkt->enqueue_time_sec);
+
+ /* Add pending_pkt to the producer list */
+ while (true) {
+ old_head = atomic_load_acq_32(&tx->dqo.free_pending_pkts_prd);
+
+ pending_pkt->next = old_head;
+ if (atomic_cmpset_32(&tx->dqo.free_pending_pkts_prd,
+ old_head, index))
+ break;
+ }
+}
+
+/*
+ * Has the side-effect of retrieving the value of the last desc index
+ * processed by the NIC. hw_tx_head is written to by the completions-processing
+ * taskqueue upon receiving descriptor-completions.
+ */
+static bool
+gve_tx_has_desc_room_dqo(struct gve_tx_ring *tx, int needed_descs)
+{
+ if (needed_descs <= num_avail_desc_ring_slots(tx))
+ return (true);
+
+ tx->dqo.desc_head = atomic_load_acq_32(&tx->dqo.hw_tx_head);
+ if (needed_descs > num_avail_desc_ring_slots(tx)) {
+ counter_enter();
+ counter_u64_add_protected(
+ tx->stats.tx_delayed_pkt_nospace_descring, 1);
+ counter_exit();
+ return (false);
+ }
+
+ return (0);
+}
+
+static void
+gve_tx_request_desc_compl(struct gve_tx_ring *tx, uint32_t desc_idx)
+{
+ uint32_t last_report_event_interval;
+ uint32_t last_desc_idx;
+
+ last_desc_idx = (desc_idx - 1) & tx->dqo.desc_mask;
+ last_report_event_interval =
+ (last_desc_idx - tx->dqo.last_re_idx) & tx->dqo.desc_mask;
+
+ if (__predict_false(last_report_event_interval >=
+ GVE_TX_MIN_RE_INTERVAL)) {
+ tx->dqo.desc_ring[last_desc_idx].pkt.report_event = true;
+ tx->dqo.last_re_idx = last_desc_idx;
+ }
+}
+
+static bool
+gve_tx_have_enough_qpl_bufs(struct gve_tx_ring *tx, int num_bufs)
+{
+ uint32_t available = tx->dqo.qpl_bufs_produced_cached -
+ tx->dqo.qpl_bufs_consumed;
+
+ if (__predict_true(available >= num_bufs))
+ return (true);
+
+ tx->dqo.qpl_bufs_produced_cached = atomic_load_acq_32(
+ &tx->dqo.qpl_bufs_produced);
+ available = tx->dqo.qpl_bufs_produced_cached -
+ tx->dqo.qpl_bufs_consumed;
+
+ if (__predict_true(available >= num_bufs))
+ return (true);
+ return (false);
+}
+
+static int32_t
+gve_tx_alloc_qpl_buf(struct gve_tx_ring *tx)
+{
+ int32_t buf = tx->dqo.free_qpl_bufs_csm;
+
+ if (__predict_false(buf == -1)) {
+ tx->dqo.free_qpl_bufs_csm = atomic_swap_32(
+ &tx->dqo.free_qpl_bufs_prd, -1);
+ buf = tx->dqo.free_qpl_bufs_csm;
+ if (__predict_false(buf == -1))
+ return (-1);
+ }
+
+ tx->dqo.free_qpl_bufs_csm = tx->dqo.qpl_bufs[buf];
+ tx->dqo.qpl_bufs_consumed++;
+ return (buf);
+}
+
+/*
+ * Tx buffer i corresponds to
+ * qpl_page_id = i / GVE_TX_BUFS_PER_PAGE_DQO
+ * qpl_page_offset = (i % GVE_TX_BUFS_PER_PAGE_DQO) * GVE_TX_BUF_SIZE_DQO
+ */
+static void
+gve_tx_buf_get_addr_dqo(struct gve_tx_ring *tx,
+ int32_t index, void **va, bus_addr_t *dma_addr)
+{
+ int page_id = index >> (PAGE_SHIFT - GVE_TX_BUF_SHIFT_DQO);
+ int offset = (index & (GVE_TX_BUFS_PER_PAGE_DQO - 1)) <<
+ GVE_TX_BUF_SHIFT_DQO;
+
+ *va = (char *)tx->com.qpl->dmas[page_id].cpu_addr + offset;
+ *dma_addr = tx->com.qpl->dmas[page_id].bus_addr + offset;
+}
+
+static struct gve_dma_handle *
+gve_get_page_dma_handle(struct gve_tx_ring *tx, int32_t index)
+{
+ int page_id = index >> (PAGE_SHIFT - GVE_TX_BUF_SHIFT_DQO);
+
+ return (&tx->com.qpl->dmas[page_id]);
+}
+
+static void
+gve_tx_copy_mbuf_and_write_pkt_descs(struct gve_tx_ring *tx,
+ struct mbuf *mbuf, struct gve_tx_pending_pkt_dqo *pkt,
+ bool csum_enabled, int16_t completion_tag,
+ uint32_t *desc_idx)
+{
+ int32_t pkt_len = mbuf->m_pkthdr.len;
+ struct gve_dma_handle *dma;
+ uint32_t copy_offset = 0;
+ int32_t prev_buf = -1;
+ uint32_t copy_len;
+ bus_addr_t addr;
+ int32_t buf;
+ void *va;
+
+ MPASS(pkt->num_qpl_bufs == 0);
+ MPASS(pkt->qpl_buf_head == -1);
+
+ while (copy_offset < pkt_len) {
+ buf = gve_tx_alloc_qpl_buf(tx);
+ /* We already checked for availability */
+ MPASS(buf != -1);
+
+ gve_tx_buf_get_addr_dqo(tx, buf, &va, &addr);
+ copy_len = MIN(GVE_TX_BUF_SIZE_DQO, pkt_len - copy_offset);
+ m_copydata(mbuf, copy_offset, copy_len, va);
+ copy_offset += copy_len;
+
+ dma = gve_get_page_dma_handle(tx, buf);
+ bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
+
+ gve_tx_fill_pkt_desc_dqo(tx, desc_idx,
+ copy_len, addr, completion_tag,
+ /*eop=*/copy_offset == pkt_len,
+ csum_enabled);
+
+ /* Link all the qpl bufs for a packet */
+ if (prev_buf == -1)
+ pkt->qpl_buf_head = buf;
+ else
+ tx->dqo.qpl_bufs[prev_buf] = buf;
+
+ prev_buf = buf;
+ pkt->num_qpl_bufs++;
+ }
+
+ tx->dqo.qpl_bufs[buf] = -1;
+}
+
+int
+gve_xmit_dqo_qpl(struct gve_tx_ring *tx, struct mbuf *mbuf)
+{
+ uint32_t desc_idx = tx->dqo.desc_tail;
+ struct gve_tx_pending_pkt_dqo *pkt;
+ int total_descs_needed;
+ int16_t completion_tag;
+ bool has_csum_flag;
+ int csum_flags;
+ bool is_tso;
+ int nsegs;
+ int err;
+
+ csum_flags = mbuf->m_pkthdr.csum_flags;
+ has_csum_flag = csum_flags & (CSUM_TCP | CSUM_UDP |
+ CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_TSO);
+ is_tso = csum_flags & CSUM_TSO;
+
+ nsegs = howmany(mbuf->m_pkthdr.len, GVE_TX_BUF_SIZE_DQO);
+ /* Check if we have enough room in the desc ring */
+ total_descs_needed = 1 + /* general_ctx_desc */
+ nsegs + /* pkt_desc */
+ (is_tso ? 1 : 0); /* tso_ctx_desc */
+ if (__predict_false(!gve_tx_has_desc_room_dqo(tx, total_descs_needed)))
+ return (ENOBUFS);
+
+ if (!gve_tx_have_enough_qpl_bufs(tx, nsegs)) {
+ counter_enter();
+ counter_u64_add_protected(
+ tx->stats.tx_delayed_pkt_nospace_qpl_bufs, 1);
+ counter_exit();
+ return (ENOBUFS);
+ }
+
+ pkt = gve_alloc_pending_packet(tx);
+ if (pkt == NULL) {
+ counter_enter();
+ counter_u64_add_protected(
+ tx->stats.tx_delayed_pkt_nospace_compring, 1);
+ counter_exit();
+ return (ENOBUFS);
+ }
+ completion_tag = pkt - tx->dqo.pending_pkts;
+ pkt->mbuf = mbuf;
+
+ err = gve_tx_fill_ctx_descs(tx, mbuf, is_tso, &desc_idx);
+ if (err)
+ goto abort;
+
+ gve_tx_copy_mbuf_and_write_pkt_descs(tx, mbuf, pkt,
+ has_csum_flag, completion_tag, &desc_idx);
+
+ /* Remember the index of the last desc written */
+ tx->dqo.desc_tail = desc_idx;
+
+ /*
+ * Request a descriptor completion on the last descriptor of the
+ * packet if we are allowed to by the HW enforced interval.
+ */
+ gve_tx_request_desc_compl(tx, desc_idx);
+
+ tx->req += total_descs_needed; /* tx->req is just a sysctl counter */
+ return (0);
+
+abort:
+ pkt->mbuf = NULL;
+ gve_free_pending_packet(tx, pkt);
+ return (err);
+}
+
+int
+gve_xmit_dqo(struct gve_tx_ring *tx, struct mbuf **mbuf_ptr)
+{
+ bus_dma_segment_t segs[GVE_TX_MAX_DATA_DESCS_DQO];
+ uint32_t desc_idx = tx->dqo.desc_tail;
+ struct gve_tx_pending_pkt_dqo *pkt;
+ struct mbuf *mbuf = *mbuf_ptr;
+ int total_descs_needed;
+ int16_t completion_tag;
+ bool has_csum_flag;
+ int csum_flags;
+ bool is_tso;
+ int nsegs;
+ int err;
+ int i;
+
+ csum_flags = mbuf->m_pkthdr.csum_flags;
+ has_csum_flag = csum_flags & (CSUM_TCP | CSUM_UDP |
+ CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_TSO);
+ is_tso = csum_flags & CSUM_TSO;
+
+ /*
+ * This mbuf might end up needing more than 1 pkt desc.
+ * The actual number, `nsegs` is known only after the
+ * expensive gve_map_mbuf_dqo call. This check beneath
+ * exists to fail early when the desc ring is really full.
+ */
+ total_descs_needed = 1 + /* general_ctx_desc */
+ 1 + /* pkt_desc */
+ (is_tso ? 1 : 0); /* tso_ctx_desc */
+ if (__predict_false(!gve_tx_has_desc_room_dqo(tx, total_descs_needed)))
+ return (ENOBUFS);
+
+ pkt = gve_alloc_pending_packet(tx);
+ if (pkt == NULL) {
+ counter_enter();
+ counter_u64_add_protected(
+ tx->stats.tx_delayed_pkt_nospace_compring, 1);
+ counter_exit();
+ return (ENOBUFS);
+ }
+ completion_tag = pkt - tx->dqo.pending_pkts;
+
+ err = gve_map_mbuf_dqo(tx, mbuf_ptr, pkt->dmamap,
+ segs, &nsegs, /*attempt=*/0);
+ if (err)
+ goto abort;
+ mbuf = *mbuf_ptr; /* gve_map_mbuf_dqo might replace the mbuf chain */
+ pkt->mbuf = mbuf;
+
+ total_descs_needed = 1 + /* general_ctx_desc */
+ nsegs + /* pkt_desc */
+ (is_tso ? 1 : 0); /* tso_ctx_desc */
+ if (__predict_false(
+ !gve_tx_has_desc_room_dqo(tx, total_descs_needed))) {
+ err = ENOBUFS;
+ goto abort_with_dma;
+ }
+
+ err = gve_tx_fill_ctx_descs(tx, mbuf, is_tso, &desc_idx);
+ if (err)
+ goto abort_with_dma;
+
+ bus_dmamap_sync(tx->dqo.buf_dmatag, pkt->dmamap, BUS_DMASYNC_PREWRITE);
+ for (i = 0; i < nsegs; i++) {
+ gve_tx_fill_pkt_desc_dqo(tx, &desc_idx,
+ segs[i].ds_len, segs[i].ds_addr,
+ completion_tag, /*eop=*/i == (nsegs - 1),
+ has_csum_flag);
+ }
+
+ /* Remember the index of the last desc written */
+ tx->dqo.desc_tail = desc_idx;
+
+ /*
+ * Request a descriptor completion on the last descriptor of the
+ * packet if we are allowed to by the HW enforced interval.
+ */
+ gve_tx_request_desc_compl(tx, desc_idx);
+
+ tx->req += total_descs_needed; /* tx->req is just a sysctl counter */
+ return (0);
+
+abort_with_dma:
+ gve_unmap_packet(tx, pkt);
+abort:
+ pkt->mbuf = NULL;
+ gve_free_pending_packet(tx, pkt);
+ return (err);
+}
+
+static void
+gve_reap_qpl_bufs_dqo(struct gve_tx_ring *tx,
+ struct gve_tx_pending_pkt_dqo *pkt)
+{
+ int32_t buf = pkt->qpl_buf_head;
+ struct gve_dma_handle *dma;
+ int32_t qpl_buf_tail;
+ int32_t old_head;
+ int i;
+
+ for (i = 0; i < pkt->num_qpl_bufs; i++) {
+ dma = gve_get_page_dma_handle(tx, buf);
+ bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTWRITE);
+ qpl_buf_tail = buf;
+ buf = tx->dqo.qpl_bufs[buf];
+ }
+ MPASS(buf == -1);
+ buf = qpl_buf_tail;
+
+ while (true) {
+ old_head = atomic_load_32(&tx->dqo.free_qpl_bufs_prd);
+ tx->dqo.qpl_bufs[buf] = old_head;
+
+ /*
+ * The "rel" ensures that the update to dqo.free_qpl_bufs_prd
+ * is visible only after the linked list from this pkt is
+ * attached above to old_head.
+ */
+ if (atomic_cmpset_rel_32(&tx->dqo.free_qpl_bufs_prd,
+ old_head, pkt->qpl_buf_head))
+ break;
+ }
+ /*
+ * The "rel" ensures that the update to dqo.qpl_bufs_produced is
+ * visible only adter the update to dqo.free_qpl_bufs_prd above.
+ */
+ atomic_add_rel_32(&tx->dqo.qpl_bufs_produced, pkt->num_qpl_bufs);
+
+ gve_clear_qpl_pending_pkt(pkt);
+}
+
+static uint64_t
+gve_handle_packet_completion(struct gve_priv *priv,
+ struct gve_tx_ring *tx, uint16_t compl_tag)
+{
+ struct gve_tx_pending_pkt_dqo *pending_pkt;
+ int32_t pkt_len;
+
+ if (__predict_false(compl_tag >= tx->dqo.num_pending_pkts)) {
+ device_printf(priv->dev, "Invalid TX completion tag: %d\n",
+ compl_tag);
+ return (0);
+ }
+
+ pending_pkt = &tx->dqo.pending_pkts[compl_tag];
+
+ /* Packet is allocated but not pending data completion. */
+ if (__predict_false(pending_pkt->state !=
+ GVE_PACKET_STATE_PENDING_DATA_COMPL)) {
+ device_printf(priv->dev,
+ "No pending data completion: %d\n", compl_tag);
+ return (0);
+ }
+
+ pkt_len = pending_pkt->mbuf->m_pkthdr.len;
+
+ if (gve_is_qpl(priv))
+ gve_reap_qpl_bufs_dqo(tx, pending_pkt);
+ else
+ gve_unmap_packet(tx, pending_pkt);
+
+ m_freem(pending_pkt->mbuf);
+ pending_pkt->mbuf = NULL;
+ gve_free_pending_packet(tx, pending_pkt);
+ return (pkt_len);
+}
+
+int
+gve_check_tx_timeout_dqo(struct gve_priv *priv, struct gve_tx_ring *tx)
+{
+ struct gve_tx_pending_pkt_dqo *pending_pkt;
+ int num_timeouts;
+ uint16_t pkt_idx;
+
+ num_timeouts = 0;
+ for (pkt_idx = 0; pkt_idx < tx->dqo.num_pending_pkts; pkt_idx++) {
+ pending_pkt = &tx->dqo.pending_pkts[pkt_idx];
+
+ if (!gve_timestamp_valid(&pending_pkt->enqueue_time_sec))
+ continue;
+
+ if (__predict_false(
+ gve_seconds_since(&pending_pkt->enqueue_time_sec) >
+ GVE_TX_TIMEOUT_PKT_SEC))
+ num_timeouts += 1;
+ }
+
+ return (num_timeouts);
+}
+
+int
+gve_tx_intr_dqo(void *arg)
+{
+ struct gve_tx_ring *tx = arg;
+ struct gve_priv *priv = tx->com.priv;
+ struct gve_ring_com *com = &tx->com;
+
+ if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0))
+ return (FILTER_STRAY);
+
+ /* Interrupts are automatically masked */
+ taskqueue_enqueue(com->cleanup_tq, &com->cleanup_task);
+ return (FILTER_HANDLED);
+}
+
+static void
+gve_tx_clear_desc_ring_dqo(struct gve_tx_ring *tx)
+{
+ struct gve_ring_com *com = &tx->com;
+ int i;
+
+ for (i = 0; i < com->priv->tx_desc_cnt; i++)
+ tx->dqo.desc_ring[i] = (union gve_tx_desc_dqo){};
+
+ bus_dmamap_sync(tx->desc_ring_mem.tag, tx->desc_ring_mem.map,
+ BUS_DMASYNC_PREWRITE);
+}
+
+static void
+gve_tx_clear_compl_ring_dqo(struct gve_tx_ring *tx)
+{
+ struct gve_ring_com *com = &tx->com;
+ int entries;
+ int i;
+
+ entries = com->priv->tx_desc_cnt;
+ for (i = 0; i < entries; i++)
+ tx->dqo.compl_ring[i] = (struct gve_tx_compl_desc_dqo){};
+
+ bus_dmamap_sync(tx->dqo.compl_ring_mem.tag, tx->dqo.compl_ring_mem.map,
+ BUS_DMASYNC_PREWRITE);
+}
+
+void
+gve_clear_tx_ring_dqo(struct gve_priv *priv, int i)
+{
+ struct gve_tx_ring *tx = &priv->tx[i];
+ int j;
+
+ tx->dqo.desc_head = 0;
+ tx->dqo.desc_tail = 0;
+ tx->dqo.desc_mask = priv->tx_desc_cnt - 1;
+ tx->dqo.last_re_idx = 0;
+
+ tx->dqo.compl_head = 0;
+ tx->dqo.compl_mask = priv->tx_desc_cnt - 1;
+ atomic_store_32(&tx->dqo.hw_tx_head, 0);
+ tx->dqo.cur_gen_bit = 0;
+
+ gve_free_tx_mbufs_dqo(tx);
+
+ for (j = 0; j < tx->dqo.num_pending_pkts; j++) {
+ if (gve_is_qpl(tx->com.priv))
+ gve_clear_qpl_pending_pkt(&tx->dqo.pending_pkts[j]);
+ gve_invalidate_timestamp(
+ &tx->dqo.pending_pkts[j].enqueue_time_sec);
+ tx->dqo.pending_pkts[j].next =
+ (j == tx->dqo.num_pending_pkts - 1) ? -1 : j + 1;
+ tx->dqo.pending_pkts[j].state = GVE_PACKET_STATE_FREE;
+ }
+ tx->dqo.free_pending_pkts_csm = 0;
+ atomic_store_rel_32(&tx->dqo.free_pending_pkts_prd, -1);
+
+ if (gve_is_qpl(priv)) {
+ int qpl_buf_cnt = GVE_TX_BUFS_PER_PAGE_DQO *
+ tx->com.qpl->num_pages;
+
+ for (j = 0; j < qpl_buf_cnt - 1; j++)
+ tx->dqo.qpl_bufs[j] = j + 1;
+ tx->dqo.qpl_bufs[j] = -1;
+
+ tx->dqo.free_qpl_bufs_csm = 0;
+ atomic_store_32(&tx->dqo.free_qpl_bufs_prd, -1);
+ atomic_store_32(&tx->dqo.qpl_bufs_produced, qpl_buf_cnt);
+ tx->dqo.qpl_bufs_produced_cached = qpl_buf_cnt;
+ tx->dqo.qpl_bufs_consumed = 0;
+ }
+
+ gve_tx_clear_desc_ring_dqo(tx);
+ gve_tx_clear_compl_ring_dqo(tx);
+}
+
+static uint8_t
+gve_tx_get_gen_bit(uint8_t *desc)
+{
+ uint8_t byte;
+
+ /*
+ * Prevent generation bit from being read after the rest of the
+ * descriptor.
+ */
+ byte = atomic_load_acq_8(desc + GVE_TX_DESC_DQO_GEN_BYTE_OFFSET);
+ return ((byte & GVE_TX_DESC_DQO_GEN_BIT_MASK) != 0);
+}
+
+static bool
+gve_tx_cleanup_dqo(struct gve_priv *priv, struct gve_tx_ring *tx, int budget)
+{
+ struct gve_tx_compl_desc_dqo *compl_desc;
+ uint64_t bytes_done = 0;
+ uint64_t pkts_done = 0;
+ uint16_t compl_tag;
+ int work_done = 0;
+ uint16_t tx_head;
+ uint16_t type;
+
+ while (work_done < budget) {
+ bus_dmamap_sync(tx->dqo.compl_ring_mem.tag,
+ tx->dqo.compl_ring_mem.map,
+ BUS_DMASYNC_POSTREAD);
+
+ compl_desc = &tx->dqo.compl_ring[tx->dqo.compl_head];
+ if (gve_tx_get_gen_bit((uint8_t *)compl_desc) ==
+ tx->dqo.cur_gen_bit)
+ break;
+
+ type = compl_desc->type;
+ if (type == GVE_COMPL_TYPE_DQO_DESC) {
+ /* This is the last descriptor fetched by HW plus one */
+ tx_head = le16toh(compl_desc->tx_head);
+ atomic_store_rel_32(&tx->dqo.hw_tx_head, tx_head);
+ } else if (type == GVE_COMPL_TYPE_DQO_PKT) {
+ compl_tag = le16toh(compl_desc->completion_tag);
+ bytes_done += gve_handle_packet_completion(priv,
+ tx, compl_tag);
+ pkts_done++;
+ }
+
+ tx->dqo.compl_head = (tx->dqo.compl_head + 1) &
+ tx->dqo.compl_mask;
+ /* Flip the generation bit when we wrap around */
+ tx->dqo.cur_gen_bit ^= tx->dqo.compl_head == 0;
+ work_done++;
+ }
+
+ /*
+ * Waking the xmit taskqueue has to occur after room has been made in
+ * the queue.
+ */
+ atomic_thread_fence_seq_cst();
+ if (atomic_load_bool(&tx->stopped) && work_done) {
+ atomic_store_bool(&tx->stopped, false);
+ taskqueue_enqueue(tx->xmit_tq, &tx->xmit_task);
+ }
+
+ tx->done += work_done; /* tx->done is just a sysctl counter */
+ counter_enter();
+ counter_u64_add_protected(tx->stats.tbytes, bytes_done);
+ counter_u64_add_protected(tx->stats.tpackets, pkts_done);
+ counter_exit();
+
+ return (work_done == budget);
+}
+
+void
+gve_tx_cleanup_tq_dqo(void *arg, int pending)
+{
+ struct gve_tx_ring *tx = arg;
+ struct gve_priv *priv = tx->com.priv;
+
+ if (__predict_false((if_getdrvflags(priv->ifp) & IFF_DRV_RUNNING) == 0))
+ return;
+
+ if (gve_tx_cleanup_dqo(priv, tx, /*budget=*/1024)) {
+ taskqueue_enqueue(tx->com.cleanup_tq, &tx->com.cleanup_task);
+ return;
+ }
+
+ gve_db_bar_dqo_write_4(priv, tx->com.irq_db_offset,
+ GVE_ITR_NO_UPDATE_DQO | GVE_ITR_ENABLE_BIT_DQO);
+}
diff --git a/sys/dev/gve/gve_utils.c b/sys/dev/gve/gve_utils.c
index c05488770dbd..707b8f039d88 100644
--- a/sys/dev/gve/gve_utils.c
+++ b/sys/dev/gve/gve_utils.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2023 Google LLC
+ * Copyright (c) 2023-2024 Google LLC
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
@@ -29,6 +29,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "gve.h"
+#include "gve_dqo.h"
uint32_t
gve_reg_bar_read_4(struct gve_priv *priv, bus_size_t offset)
@@ -49,6 +50,12 @@ gve_db_bar_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val)
}
void
+gve_db_bar_dqo_write_4(struct gve_priv *priv, bus_size_t offset, uint32_t val)
+{
+ bus_write_4(priv->db_bar, offset, val);
+}
+
+void
gve_alloc_counters(counter_u64_t *stat, int num_stats)
{
int i;
@@ -227,7 +234,7 @@ gve_free_irqs(struct gve_priv *priv)
return;
}
- num_irqs = priv->tx_cfg.num_queues + priv->rx_cfg.num_queues + 1;
+ num_irqs = priv->tx_cfg.max_queues + priv->rx_cfg.max_queues + 1;
for (i = 0; i < num_irqs; i++) {
irq = &priv->irq_tbl[i];
@@ -261,8 +268,8 @@ gve_free_irqs(struct gve_priv *priv)
int
gve_alloc_irqs(struct gve_priv *priv)
{
- int num_tx = priv->tx_cfg.num_queues;
- int num_rx = priv->rx_cfg.num_queues;
+ int num_tx = priv->tx_cfg.max_queues;
+ int num_rx = priv->rx_cfg.max_queues;
int req_nvecs = num_tx + num_rx + 1;
int got_nvecs = req_nvecs;
struct gve_irq *irq;
@@ -307,7 +314,8 @@ gve_alloc_irqs(struct gve_priv *priv)
}
err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE,
- gve_tx_intr, NULL, &priv->tx[i], &irq->cookie);
+ gve_is_gqi(priv) ? gve_tx_intr : gve_tx_intr_dqo, NULL,
+ &priv->tx[i], &irq->cookie);
if (err != 0) {
device_printf(priv->dev, "Failed to setup irq %d for Tx queue %d, "
"err: %d\n", rid, i, err);
@@ -334,7 +342,8 @@ gve_alloc_irqs(struct gve_priv *priv)
}
err = bus_setup_intr(priv->dev, irq->res, INTR_TYPE_NET | INTR_MPSAFE,
- gve_rx_intr, NULL, &priv->rx[j], &irq->cookie);
+ gve_is_gqi(priv) ? gve_rx_intr : gve_rx_intr_dqo, NULL,
+ &priv->rx[j], &irq->cookie);
if (err != 0) {
device_printf(priv->dev, "Failed to setup irq %d for Rx queue %d, "
"err: %d\n", rid, j, err);
@@ -374,6 +383,24 @@ abort:
return (err);
}
+/*
+ * Builds register value to write to DQO IRQ doorbell to enable with specified
+ * ITR interval.
+ */
+static uint32_t
+gve_setup_itr_interval_dqo(uint32_t interval_us)
+{
+ uint32_t result = GVE_ITR_ENABLE_BIT_DQO;
+
+ /* Interval has 2us granularity. */
+ interval_us >>= 1;
+
+ interval_us &= GVE_ITR_INTERVAL_DQO_MASK;
+ result |= (interval_us << GVE_ITR_INTERVAL_DQO_SHIFT);
+
+ return (result);
+}
+
void
gve_unmask_all_queue_irqs(struct gve_priv *priv)
{
@@ -383,11 +410,20 @@ gve_unmask_all_queue_irqs(struct gve_priv *priv)
for (idx = 0; idx < priv->tx_cfg.num_queues; idx++) {
tx = &priv->tx[idx];
- gve_db_bar_write_4(priv, tx->com.irq_db_offset, 0);
+ if (gve_is_gqi(priv))
+ gve_db_bar_write_4(priv, tx->com.irq_db_offset, 0);
+ else
+ gve_db_bar_dqo_write_4(priv, tx->com.irq_db_offset,
+ gve_setup_itr_interval_dqo(GVE_TX_IRQ_RATELIMIT_US_DQO));
}
+
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
rx = &priv->rx[idx];
- gve_db_bar_write_4(priv, rx->com.irq_db_offset, 0);
+ if (gve_is_gqi(priv))
+ gve_db_bar_write_4(priv, rx->com.irq_db_offset, 0);
+ else
+ gve_db_bar_dqo_write_4(priv, rx->com.irq_db_offset,
+ gve_setup_itr_interval_dqo(GVE_RX_IRQ_RATELIMIT_US_DQO));
}
}
@@ -403,3 +439,46 @@ gve_mask_all_queue_irqs(struct gve_priv *priv)
gve_db_bar_write_4(priv, rx->com.irq_db_offset, GVE_IRQ_MASK);
}
}
+
+/*
+ * In some cases, such as tracking timeout events, we must mark a timestamp as
+ * invalid when we do not want to consider its value. Such timestamps must be
+ * checked for validity before reading them.
+ */
+void
+gve_invalidate_timestamp(int64_t *timestamp_sec)
+{
+ atomic_store_64(timestamp_sec, GVE_TIMESTAMP_INVALID);
+}
+
+/*
+ * Returns 0 if the timestamp is invalid, otherwise returns the elapsed seconds
+ * since the timestamp was set.
+ */
+int64_t
+gve_seconds_since(int64_t *timestamp_sec)
+{
+ struct bintime curr_time;
+ int64_t enqueued_time;
+
+ getbintime(&curr_time);
+ enqueued_time = atomic_load_64(timestamp_sec);
+ if (enqueued_time == GVE_TIMESTAMP_INVALID)
+ return (0);
+ return ((int64_t)(curr_time.sec - enqueued_time));
+}
+
+void
+gve_set_timestamp(int64_t *timestamp_sec)
+{
+ struct bintime curr_time;
+
+ getbintime(&curr_time);
+ atomic_store_64(timestamp_sec, curr_time.sec);
+}
+
+bool
+gve_timestamp_valid(int64_t *timestamp_sec)
+{
+ return (atomic_load_64(timestamp_sec) != GVE_TIMESTAMP_INVALID);
+}
diff --git a/sys/dev/hid/hid.c b/sys/dev/hid/hid.c
index 4b5d4a81b51e..453c37d806fc 100644
--- a/sys/dev/hid/hid.c
+++ b/sys/dev/hid/hid.c
@@ -69,7 +69,7 @@ hid_test_quirk_t *hid_test_quirk_p = &hid_test_quirk_w;
#define MAXLOCCNT 2048
struct hid_pos_data {
- int32_t rid;
+ uint32_t rid;
uint32_t pos;
};
@@ -79,9 +79,9 @@ struct hid_data {
const uint8_t *p;
struct hid_item cur[MAXPUSH];
struct hid_pos_data last_pos[MAXID];
- int32_t usages_min[MAXUSAGE];
- int32_t usages_max[MAXUSAGE];
- int32_t usage_last; /* last seen usage */
+ uint32_t usages_min[MAXUSAGE];
+ uint32_t usages_max[MAXUSAGE];
+ uint32_t usage_last; /* last seen usage */
uint32_t loc_size; /* last seen size */
uint32_t loc_count; /* last seen count */
uint32_t ncount; /* end usage item count */
@@ -117,7 +117,7 @@ hid_clear_local(struct hid_item *c)
}
static void
-hid_switch_rid(struct hid_data *s, struct hid_item *c, int32_t next_rID)
+hid_switch_rid(struct hid_data *s, struct hid_item *c, uint32_t next_rID)
{
uint8_t i;
@@ -242,6 +242,7 @@ hid_get_item(struct hid_data *s, struct hid_item *h)
uint32_t oldpos;
int32_t mask;
int32_t dval;
+ uint32_t uval;
if (s == NULL)
return (0);
@@ -253,10 +254,10 @@ hid_get_item(struct hid_data *s, struct hid_item *h)
if (s->icount < s->ncount) {
/* get current usage */
if (s->iusage < s->nusage) {
- dval = s->usages_min[s->iusage] + s->ousage;
- c->usage = dval;
- s->usage_last = dval;
- if (dval == s->usages_max[s->iusage]) {
+ uval = s->usages_min[s->iusage] + s->ousage;
+ c->usage = uval;
+ s->usage_last = uval;
+ if (uval == s->usages_max[s->iusage]) {
s->iusage ++;
s->ousage = 0;
} else {
@@ -264,7 +265,7 @@ hid_get_item(struct hid_data *s, struct hid_item *h)
}
} else {
DPRINTFN(1, "Using last usage\n");
- dval = s->usage_last;
+ uval = s->usage_last;
}
c->nusages = 1;
/* array type HID item may have multiple usages */
@@ -318,28 +319,32 @@ hid_get_item(struct hid_data *s, struct hid_item *h)
}
switch (bSize) {
case 0:
- dval = 0;
+ uval = 0;
+ dval = uval;
mask = 0;
break;
case 1:
- dval = (int8_t)hid_get_byte(s, 1);
+ uval = hid_get_byte(s, 1);
+ dval = (int8_t)uval;
mask = 0xFF;
break;
case 2:
- dval = hid_get_byte(s, 1);
- dval |= hid_get_byte(s, 1) << 8;
- dval = (int16_t)dval;
+ uval = hid_get_byte(s, 1);
+ uval |= hid_get_byte(s, 1) << 8;
+ dval = (int16_t)uval;
mask = 0xFFFF;
break;
case 4:
- dval = hid_get_byte(s, 1);
- dval |= hid_get_byte(s, 1) << 8;
- dval |= hid_get_byte(s, 1) << 16;
- dval |= hid_get_byte(s, 1) << 24;
+ uval = hid_get_byte(s, 1);
+ uval |= hid_get_byte(s, 1) << 8;
+ uval |= hid_get_byte(s, 1) << 16;
+ uval |= hid_get_byte(s, 1) << 24;
+ dval = uval;
mask = 0xFFFFFFFF;
break;
default:
- dval = hid_get_byte(s, bSize);
+ uval = hid_get_byte(s, bSize);
+ dval = uval;
DPRINTFN(0, "bad length %u (data=0x%02x)\n",
bSize, dval);
continue;
@@ -351,7 +356,7 @@ hid_get_item(struct hid_data *s, struct hid_item *h)
case 8: /* Input */
c->kind = hid_input;
ret:
- c->flags = dval;
+ c->flags = uval;
c->loc.count = s->loc_count;
c->loc.size = s->loc_size;
@@ -381,7 +386,7 @@ hid_get_item(struct hid_data *s, struct hid_item *h)
goto ret;
case 10: /* Collection */
c->kind = hid_collection;
- c->collection = dval;
+ c->collection = uval;
c->collevel++;
c->usage = s->usage_last;
c->nusages = 1;
@@ -407,7 +412,7 @@ hid_get_item(struct hid_data *s, struct hid_item *h)
case 1: /* Global */
switch (bTag) {
case 0:
- c->_usage_page = dval << 16;
+ c->_usage_page = uval << 16;
break;
case 1:
c->logical_minimum = dval;
@@ -422,21 +427,21 @@ hid_get_item(struct hid_data *s, struct hid_item *h)
c->physical_maximum = dval;
break;
case 5:
- c->unit_exponent = dval;
+ c->unit_exponent = uval;
break;
case 6:
- c->unit = dval;
+ c->unit = uval;
break;
case 7:
/* mask because value is unsigned */
- s->loc_size = dval & mask;
+ s->loc_size = uval & mask;
break;
case 8:
- hid_switch_rid(s, c, dval & mask);
+ hid_switch_rid(s, c, uval & mask);
break;
case 9:
/* mask because value is unsigned */
- s->loc_count = dval & mask;
+ s->loc_count = uval & mask;
break;
case 10: /* Push */
/* stop parsing, if invalid push level */
@@ -479,14 +484,14 @@ hid_get_item(struct hid_data *s, struct hid_item *h)
switch (bTag) {
case 0:
if (bSize != 4)
- dval = (dval & mask) | c->_usage_page;
+ uval = (uval & mask) | c->_usage_page;
/* set last usage, in case of a collection */
- s->usage_last = dval;
+ s->usage_last = uval;
if (s->nusage < MAXUSAGE) {
- s->usages_min[s->nusage] = dval;
- s->usages_max[s->nusage] = dval;
+ s->usages_min[s->nusage] = uval;
+ s->usages_max[s->nusage] = uval;
s->nusage ++;
} else {
DPRINTFN(0, "max usage reached\n");
@@ -499,16 +504,16 @@ hid_get_item(struct hid_data *s, struct hid_item *h)
s->susage |= 1;
if (bSize != 4)
- dval = (dval & mask) | c->_usage_page;
- c->usage_minimum = dval;
+ uval = (uval & mask) | c->_usage_page;
+ c->usage_minimum = uval;
goto check_set;
case 2:
s->susage |= 2;
if (bSize != 4)
- dval = (dval & mask) | c->_usage_page;
- c->usage_maximum = dval;
+ uval = (uval & mask) | c->_usage_page;
+ c->usage_maximum = uval;
check_set:
if (s->susage != 3)
@@ -529,25 +534,25 @@ hid_get_item(struct hid_data *s, struct hid_item *h)
s->susage = 0;
break;
case 3:
- c->designator_index = dval;
+ c->designator_index = uval;
break;
case 4:
- c->designator_minimum = dval;
+ c->designator_minimum = uval;
break;
case 5:
- c->designator_maximum = dval;
+ c->designator_maximum = uval;
break;
case 7:
- c->string_index = dval;
+ c->string_index = uval;
break;
case 8:
- c->string_minimum = dval;
+ c->string_minimum = uval;
break;
case 9:
- c->string_maximum = dval;
+ c->string_maximum = uval;
break;
case 10:
- c->set_delimiter = dval;
+ c->set_delimiter = uval;
break;
default:
DPRINTFN(0, "Local bTag=%d\n", bTag);
diff --git a/sys/dev/hid/hid.h b/sys/dev/hid/hid.h
index 09fad96c9559..e56f8ffe772b 100644
--- a/sys/dev/hid/hid.h
+++ b/sys/dev/hid/hid.h
@@ -57,6 +57,7 @@
#define HUP_SCALE 0x008c
#define HUP_CAMERA_CONTROL 0x0090
#define HUP_ARCADE 0x0091
+#define HUP_FIDO 0xf1d0
#define HUP_MICROSOFT 0xff00
/* Usages, generic desktop */
@@ -161,6 +162,9 @@
#define HUC_HEADPHONE 0x0005
#define HUC_AC_PAN 0x0238
+/* Usages, FIDO */
+#define HUF_U2FHID 0x0001
+
#define HID_USAGE2(p,u) (((p) << 16) | (u))
#define HID_GET_USAGE(u) ((u) & 0xffff)
#define HID_GET_USAGE_PAGE(u) (((u) >> 16) & 0xffff)
@@ -233,31 +237,31 @@ struct hid_location {
struct hid_item {
/* Global */
- int32_t _usage_page;
+ uint32_t _usage_page;
int32_t logical_minimum;
int32_t logical_maximum;
int32_t physical_minimum;
int32_t physical_maximum;
- int32_t unit_exponent;
- int32_t unit;
- int32_t report_ID;
+ uint32_t unit_exponent;
+ uint32_t unit;
+ uint32_t report_ID;
/* Local */
int nusages;
union {
- int32_t usage;
- int32_t usages[HID_ITEM_MAXUSAGE];
+ uint32_t usage;
+ uint32_t usages[HID_ITEM_MAXUSAGE];
};
- int32_t usage_minimum;
- int32_t usage_maximum;
- int32_t designator_index;
- int32_t designator_minimum;
- int32_t designator_maximum;
- int32_t string_index;
- int32_t string_minimum;
- int32_t string_maximum;
- int32_t set_delimiter;
+ uint32_t usage_minimum;
+ uint32_t usage_maximum;
+ uint32_t designator_index;
+ uint32_t designator_minimum;
+ uint32_t designator_maximum;
+ uint32_t string_index;
+ uint32_t string_minimum;
+ uint32_t string_maximum;
+ uint32_t set_delimiter;
/* Misc */
- int32_t collection;
+ uint32_t collection;
int collevel;
enum hid_kind kind;
uint32_t flags;
diff --git a/sys/dev/hid/hidbus.c b/sys/dev/hid/hidbus.c
index 99bfd7715c24..683449fca49c 100644
--- a/sys/dev/hid/hidbus.c
+++ b/sys/dev/hid/hidbus.c
@@ -65,7 +65,7 @@ struct hidbus_ivars {
struct mtx *mtx; /* child intr mtx */
hid_intr_t *intr_handler; /* executed under mtx*/
void *intr_ctx;
- unsigned int refcnt; /* protected by mtx */
+ bool active; /* protected by mtx */
struct epoch_context epoch_ctx;
CK_STAILQ_ENTRY(hidbus_ivars) link;
};
@@ -226,7 +226,7 @@ hidbus_enumerate_children(device_t dev, const void* data, hid_size_t len)
while (hid_get_item(hd, &hi)) {
if (hi.kind != hid_collection || hi.collevel != 1)
continue;
- child = BUS_ADD_CHILD(dev, 0, NULL, -1);
+ child = BUS_ADD_CHILD(dev, 0, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(dev, "Could not add HID device\n");
continue;
@@ -267,19 +267,17 @@ hidbus_attach_children(device_t dev)
* attach twice in that case.
*/
sc->nest++;
- bus_generic_probe(dev);
+ bus_identify_children(dev);
sc->nest--;
if (sc->nest != 0)
return (0);
if (hid_is_keyboard(sc->rdesc.data, sc->rdesc.len) != 0)
- error = bus_generic_attach(dev);
+ bus_attach_children(dev);
else
- error = bus_delayed_attach_children(dev);
- if (error != 0)
- device_printf(dev, "failed to attach child: error %d\n", error);
+ bus_delayed_attach_children(dev);
- return (error);
+ return (0);
}
static int
@@ -299,8 +297,7 @@ hidbus_detach_children(device_t dev)
if (is_bus) {
/* If hidbus is passed, delete all children. */
- bus_generic_detach(bus);
- device_delete_children(bus);
+ error = bus_generic_detach(bus);
} else {
/*
* If hidbus child is passed, delete all hidbus children
@@ -401,7 +398,7 @@ hidbus_child_detached(device_t bus, device_t child)
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *tlc = device_get_ivars(child);
- KASSERT(tlc->refcnt == 0, ("Child device is running"));
+ KASSERT(!tlc->active, ("Child device is running"));
tlc->mtx = &sc->mtx;
tlc->intr_handler = NULL;
tlc->flags &= ~HIDBUS_FLAG_CAN_POLL;
@@ -426,7 +423,7 @@ hidbus_child_deleted(device_t bus, device_t child)
struct hidbus_ivars *tlc = device_get_ivars(child);
sx_xlock(&sc->sx);
- KASSERT(tlc->refcnt == 0, ("Child device is running"));
+ KASSERT(!tlc->active, ("Child device is running"));
CK_STAILQ_REMOVE(&sc->tlcs, tlc, hidbus_ivars, link);
sx_unlock(&sc->sx);
epoch_call(INPUT_EPOCH, hidbus_ivar_dtor, &tlc->epoch_ctx);
@@ -525,14 +522,12 @@ hidbus_set_desc(device_t child, const char *suffix)
struct hidbus_softc *sc = device_get_softc(bus);
struct hid_device_info *devinfo = device_get_ivars(bus);
struct hidbus_ivars *tlc = device_get_ivars(child);
- char buf[80];
/* Do not add NULL suffix or if device name already contains it. */
if (suffix != NULL && strcasestr(devinfo->name, suffix) == NULL &&
- (sc->nauto > 1 || (tlc->flags & HIDBUS_FLAG_AUTOCHILD) == 0)) {
- snprintf(buf, sizeof(buf), "%s %s", devinfo->name, suffix);
- device_set_desc_copy(child, buf);
- } else
+ (sc->nauto > 1 || (tlc->flags & HIDBUS_FLAG_AUTOCHILD) == 0))
+ device_set_descf(child, "%s %s", devinfo->name, suffix);
+ else
device_set_desc(child, devinfo->name);
}
@@ -577,7 +572,7 @@ hidbus_intr(void *context, void *buf, hid_size_t len)
if (!HID_IN_POLLING_MODE())
epoch_enter_preempt(INPUT_EPOCH, &et);
CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- if (tlc->refcnt == 0 || tlc->intr_handler == NULL)
+ if (!tlc->active || tlc->intr_handler == NULL)
continue;
if (HID_IN_POLLING_MODE()) {
if ((tlc->flags & HIDBUS_FLAG_CAN_POLL) != 0)
@@ -604,24 +599,17 @@ hidbus_set_intr(device_t child, hid_intr_t *handler, void *context)
static int
hidbus_intr_start(device_t bus, device_t child)
{
- MPASS(bus = device_get_parent(child));
+ MPASS(bus == device_get_parent(child));
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *ivar = device_get_ivars(child);
- struct hidbus_ivars *tlc;
- bool refcnted = false;
int error;
if (sx_xlock_sig(&sc->sx) != 0)
return (EINTR);
- CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- refcnted |= (tlc->refcnt != 0);
- if (tlc == ivar) {
- mtx_lock(tlc->mtx);
- ++tlc->refcnt;
- mtx_unlock(tlc->mtx);
- }
- }
- error = refcnted ? 0 : hid_intr_start(bus);
+ mtx_lock(ivar->mtx);
+ ivar->active = true;
+ mtx_unlock(ivar->mtx);
+ error = hid_intr_start(bus);
sx_unlock(&sc->sx);
return (error);
@@ -630,25 +618,21 @@ hidbus_intr_start(device_t bus, device_t child)
static int
hidbus_intr_stop(device_t bus, device_t child)
{
- MPASS(bus = device_get_parent(child));
+ MPASS(bus == device_get_parent(child));
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *ivar = device_get_ivars(child);
struct hidbus_ivars *tlc;
- bool refcnted = false;
+ bool active = false;
int error;
if (sx_xlock_sig(&sc->sx) != 0)
return (EINTR);
- CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- if (tlc == ivar) {
- mtx_lock(tlc->mtx);
- MPASS(tlc->refcnt != 0);
- --tlc->refcnt;
- mtx_unlock(tlc->mtx);
- }
- refcnted |= (tlc->refcnt != 0);
- }
- error = refcnted ? 0 : hid_intr_stop(bus);
+ mtx_lock(ivar->mtx);
+ ivar->active = false;
+ mtx_unlock(ivar->mtx);
+ CK_STAILQ_FOREACH(tlc, &sc->tlcs, link)
+ active |= tlc->active;
+ error = active ? 0 : hid_intr_stop(bus);
sx_unlock(&sc->sx);
return (error);
diff --git a/sys/dev/hid/hidquirk.h b/sys/dev/hid/hidquirk.h
index 4f8b8acbe201..f6fa9f88c6c9 100644
--- a/sys/dev/hid/hidquirk.h
+++ b/sys/dev/hid/hidquirk.h
@@ -50,6 +50,7 @@
HQ(IS_XBOX360GP), /* device is XBox 360 GamePad */ \
HQ(NOWRITE), /* device does not support writes */ \
HQ(IICHID_SAMPLING), /* IIC backend runs in sampling mode */ \
+ HQ(NO_READAHEAD), /* Disable interrupt after one report */\
\
/* Various quirks */ \
HQ(HID_IGNORE), /* device should be ignored by hid class */ \
diff --git a/sys/dev/hid/hidraw.c b/sys/dev/hid/hidraw.c
index 6a05b633cfc8..4855843cd265 100644
--- a/sys/dev/hid/hidraw.c
+++ b/sys/dev/hid/hidraw.c
@@ -3,7 +3,7 @@
*
* Copyright (c) 1998 The NetBSD Foundation, Inc.
* All rights reserved.
- * Copyright (c) 2020 Vladimir Kondratyev <wulf@FreeBSD.org>
+ * Copyright (c) 2020, 2025 Vladimir Kondratyev <wulf@FreeBSD.org>
*
* This code is derived from software contributed to The NetBSD Foundation
* by Lennart Augustsson (lennart@augustsson.net) at
@@ -85,6 +85,12 @@ SYSCTL_INT(_hw_hid_hidraw, OID_AUTO, debug, CTLFLAG_RWTUN,
free((buf), M_DEVBUF); \
}
+#ifdef HIDRAW_MAKE_UHID_ALIAS
+#define HIDRAW_NAME "uhid"
+#else
+#define HIDRAW_NAME "hidraw"
+#endif
+
struct hidraw_softc {
device_t sc_dev; /* base device */
@@ -172,7 +178,7 @@ static int hidraw_kqread(struct knote *, long);
static void hidraw_kqdetach(struct knote *);
static void hidraw_notify(struct hidraw_softc *);
-static struct filterops hidraw_filterops_read = {
+static const struct filterops hidraw_filterops_read = {
.f_isfd = 1,
.f_detach = hidraw_kqdetach,
.f_event = hidraw_kqread,
@@ -183,8 +189,8 @@ hidraw_identify(driver_t *driver, device_t parent)
{
device_t child;
- if (device_find_child(parent, "hidraw", -1) == NULL) {
- child = BUS_ADD_CHILD(parent, 0, "hidraw",
+ if (device_find_child(parent, HIDRAW_NAME, DEVICE_UNIT_ANY) == NULL) {
+ child = BUS_ADD_CHILD(parent, 0, HIDRAW_NAME,
device_get_unit(parent));
if (child != NULL)
hidbus_set_index(child, HIDRAW_INDEX);
@@ -570,8 +576,10 @@ hidraw_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
struct hidraw_devinfo *hd;
const char *devname;
uint32_t size;
+ hid_size_t actsize;
int id, len;
int error = 0;
+ uint8_t reptype;
DPRINTFN(2, "cmd=%lx\n", cmd);
@@ -747,16 +755,16 @@ hidraw_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
}
size = MIN(hgd->hgd_maxlen, size);
buf = HIDRAW_LOCAL_ALLOC(local_buf, size);
- error = hid_get_report(sc->sc_dev, buf, size, NULL,
+ actsize = 0;
+ error = hid_get_report(sc->sc_dev, buf, size, &actsize,
hgd->hgd_report_type, id);
if (!error)
- error = copyout(buf, hgd->hgd_data, size);
+ error = copyout(buf, hgd->hgd_data, actsize);
HIDRAW_LOCAL_FREE(local_buf, buf);
+ hgd->hgd_actlen = actsize;
#ifdef COMPAT_FREEBSD32
- /*
- * HIDRAW_GET_REPORT is declared _IOWR, but hgd is not written
- * so we don't call update_hgd32().
- */
+ if (hgd32 != NULL)
+ update_hgd32(hgd, hgd32);
#endif
return (error);
@@ -827,6 +835,9 @@ hidraw_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
*/
if (size >= HID_MAX_DESCRIPTOR_SIZE)
return (EINVAL);
+ mtx_lock(&sc->sc_mtx);
+ sc->sc_state.uhid = false;
+ mtx_unlock(&sc->sc_mtx);
buf = HIDRAW_LOCAL_ALLOC(local_buf, size);
error = hid_get_rdesc(sc->sc_dev, buf, size);
if (error == 0) {
@@ -859,6 +870,8 @@ hidraw_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
return (0);
case HIDIOCSFEATURE(0):
+ case HIDIOCSINPUT(0):
+ case HIDIOCSOUTPUT(0):
if (!(sc->sc_fflags & FWRITE))
return (EPERM);
if (len < 2)
@@ -868,10 +881,27 @@ hidraw_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
addr = (uint8_t *)addr + 1;
len--;
}
- return (hid_set_report(sc->sc_dev, addr, len,
- HID_FEATURE_REPORT, id));
+ switch (IOCBASECMD(cmd)) {
+ case HIDIOCSFEATURE(0):
+ reptype = HID_FEATURE_REPORT;
+ break;
+ case HIDIOCSINPUT(0):
+ reptype = HID_INPUT_REPORT;
+ break;
+ case HIDIOCSOUTPUT(0):
+ reptype = HID_OUTPUT_REPORT;
+ break;
+ default:
+ panic("Invalid report type");
+ }
+ error = hid_set_report(sc->sc_dev, addr, len, reptype, id);
+ if (error == 0)
+ td->td_retval[0] = IOCPARM_LEN(cmd);
+ return (error);
case HIDIOCGFEATURE(0):
+ case HIDIOCGINPUT(0):
+ case HIDIOCGOUTPUT(0):
if (!(sc->sc_fflags & FREAD))
return (EPERM);
if (len < 2)
@@ -881,8 +911,27 @@ hidraw_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
addr = (uint8_t *)addr + 1;
len--;
}
- return (hid_get_report(sc->sc_dev, addr, len, NULL,
- HID_FEATURE_REPORT, id));
+ switch (IOCBASECMD(cmd)) {
+ case HIDIOCGFEATURE(0):
+ reptype = HID_FEATURE_REPORT;
+ break;
+ case HIDIOCGINPUT(0):
+ reptype = HID_INPUT_REPORT;
+ break;
+ case HIDIOCGOUTPUT(0):
+ reptype = HID_OUTPUT_REPORT;
+ break;
+ default:
+ panic("Invalid report type");
+ }
+ error = hid_get_report(sc->sc_dev, addr, len, &actsize,
+ reptype, id);
+ if (error == 0) {
+ if (id == 0)
+ actsize++;
+ td->td_retval[0] = actsize;
+ }
+ return (error);
case HIDIOCGRAWUNIQ(0):
strlcpy(addr, sc->sc_hw->serial, len);
@@ -1007,7 +1056,7 @@ static device_method_t hidraw_methods[] = {
};
static driver_t hidraw_driver = {
- "hidraw",
+ HIDRAW_NAME,
hidraw_methods,
sizeof(struct hidraw_softc)
};
diff --git a/sys/dev/hid/hidraw.h b/sys/dev/hid/hidraw.h
index 4095ddb388bb..41aaf285fac3 100644
--- a/sys/dev/hid/hidraw.h
+++ b/sys/dev/hid/hidraw.h
@@ -92,5 +92,9 @@ struct hidraw_devinfo {
#define HIDIOCSFEATURE(len) _IOC(IOC_IN, 'U', 35, len)
#define HIDIOCGFEATURE(len) _IOC(IOC_INOUT, 'U', 36, len)
#define HIDIOCGRAWUNIQ(len) _IOC(IOC_OUT, 'U', 37, len)
+#define HIDIOCSINPUT(len) _IOC(IOC_IN, 'U', 38, len)
+#define HIDIOCGINPUT(len) _IOC(IOC_INOUT, 'U', 39, len)
+#define HIDIOCSOUTPUT(len) _IOC(IOC_IN, 'U', 40, len)
+#define HIDIOCGOUTPUT(len) _IOC(IOC_INOUT, 'U', 41, len)
#endif /* _HID_HIDRAW_H */
diff --git a/sys/dev/hid/hkbd.c b/sys/dev/hid/hkbd.c
index 5eff7557bc42..6255c42d3b62 100644
--- a/sys/dev/hid/hkbd.c
+++ b/sys/dev/hid/hkbd.c
@@ -95,14 +95,16 @@
#ifdef HID_DEBUG
static int hkbd_debug = 0;
+#endif
static int hkbd_no_leds = 0;
static SYSCTL_NODE(_hw_hid, OID_AUTO, hkbd, CTLFLAG_RW, 0, "USB keyboard");
+#ifdef HID_DEBUG
SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, debug, CTLFLAG_RWTUN,
&hkbd_debug, 0, "Debug level");
+#endif
SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, no_leds, CTLFLAG_RWTUN,
&hkbd_no_leds, 0, "Disables setting of keyboard leds");
-#endif
#define INPUT_EPOCH global_epoch_preempt
@@ -1596,8 +1598,16 @@ hkbd_ioctl_locked(keyboard_t *kbd, u_long cmd, caddr_t arg)
sc->sc_state &= ~LOCK_MASK;
sc->sc_state |= *(int *)arg;
- /* set LEDs and quit */
- return (hkbd_ioctl_locked(kbd, KDSETLED, arg));
+ /*
+ * Attempt to set the keyboard LEDs; ignore the return value
+ * intentionally. Note: Some hypervisors/emulators (e.g., QEMU,
+ * Parallels—at least as of the time of writing) may fail when
+ * setting LEDs. This can prevent kbdmux from attaching the
+ * keyboard, which in turn may block the console from accessing
+ * it.
+ */
+ (void)hkbd_ioctl_locked(kbd, KDSETLED, arg);
+ return (0);
case KDSETREPEAT: /* set keyboard repeat rate (new
* interface) */
@@ -1766,10 +1776,8 @@ hkbd_set_leds(struct hkbd_softc *sc, uint8_t leds)
SYSCONS_LOCK_ASSERT();
DPRINTF("leds=0x%02x\n", leds);
-#ifdef HID_DEBUG
if (hkbd_no_leds)
return (0);
-#endif
memset(sc->sc_buffer, 0, HKBD_BUFFER_SIZE);
@@ -1820,6 +1828,7 @@ hkbd_set_leds(struct hkbd_softc *sc, uint8_t leds)
SYSCONS_UNLOCK();
error = hid_write(sc->sc_dev, buf, len);
SYSCONS_LOCK();
+ DPRINTF("error %d", error);
return (error);
}
diff --git a/sys/dev/hid/hms.c b/sys/dev/hid/hms.c
index 0ac1b86c0735..dcb679407cca 100644
--- a/sys/dev/hid/hms.c
+++ b/sys/dev/hid/hms.c
@@ -122,6 +122,7 @@ struct hms_softc {
hid_size_t isize;
uint32_t drift_cnt;
uint32_t drift_thresh;
+ struct hid_location wheel_loc;
#endif
};
@@ -131,6 +132,7 @@ hms_intr(void *context, void *buf, hid_size_t len)
{
struct hidmap *hm = context;
struct hms_softc *sc = device_get_softc(hm->dev);
+ int32_t wheel;
if (len > sc->isize)
len = sc->isize;
@@ -140,8 +142,18 @@ hms_intr(void *context, void *buf, hid_size_t len)
* to return last report data in sampling mode even after touch has
* been ended. That results in cursor drift. Filter out such a
* reports through comparing with previous one.
+ *
+ * Except this results in dropping consecutive mouse wheel events,
+ * because differently from cursor movement they always move by the
+ * same amount. So, don't do it when there's mouse wheel movement.
*/
- if (len == sc->last_irsize && memcmp(buf, sc->last_ir, len) == 0) {
+ if (sc->wheel_loc.size != 0)
+ wheel = hid_get_data(buf, len, &sc->wheel_loc);
+ else
+ wheel = 0;
+
+ if (len == sc->last_irsize && memcmp(buf, sc->last_ir, len) == 0 &&
+ wheel == 0) {
sc->drift_cnt++;
if (sc->drift_thresh != 0 && sc->drift_cnt >= sc->drift_thresh)
return;
@@ -285,9 +297,25 @@ hms_attach(device_t dev)
/* Count number of input usages of variable type mapped to buttons */
for (hi = sc->hm.hid_items;
hi < sc->hm.hid_items + sc->hm.nhid_items;
- hi++)
+ hi++) {
if (hi->type == HIDMAP_TYPE_VARIABLE && hi->evtype == EV_KEY)
nbuttons++;
+#ifdef IICHID_SAMPLING
+ /*
+ * Make note of which part of the report descriptor is the wheel.
+ */
+ if (hi->type == HIDMAP_TYPE_VARIABLE &&
+ hi->evtype == EV_REL && hi->code == REL_WHEEL) {
+ sc->wheel_loc = hi->loc;
+ /*
+ * Account for the leading Report ID byte
+ * if it is a multi-report device.
+ */
+ if (hi->id != 0)
+ sc->wheel_loc.pos += 8;
+ }
+#endif
+ }
/* announce information about the mouse */
device_printf(dev, "%d buttons and [%s%s%s%s%s] coordinates ID=%u\n",
diff --git a/sys/dev/hid/ietp.c b/sys/dev/hid/ietp.c
index 217585a7948b..a9d0295fb121 100644
--- a/sys/dev/hid/ietp.c
+++ b/sys/dev/hid/ietp.c
@@ -102,6 +102,7 @@ struct ietp_softc {
device_t dev;
struct evdev_dev *evdev;
+ bool open;
uint8_t report_id;
hid_size_t report_len;
@@ -198,17 +199,32 @@ static const struct hid_device_id ietp_iic_devs[] = {
IETP_IIC_DEV("ELAN1000"),
};
-static uint8_t const ietp_dummy_rdesc[] = {
+static uint8_t const ietp_dummy_rdesc_lo[] = {
0x05, HUP_GENERIC_DESKTOP, /* Usage Page (Generic Desktop Ctrls) */
0x09, HUG_MOUSE, /* Usage (Mouse) */
0xA1, 0x01, /* Collection (Application) */
0x09, 0x01, /* Usage (0x01) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
0x95, IETP_REPORT_LEN_LO, /* Report Count (IETP_REPORT_LEN_LO) */
0x75, 0x08, /* Report Size (8) */
0x81, 0x02, /* Input (Data,Var,Abs) */
0xC0, /* End Collection */
};
+static uint8_t const ietp_dummy_rdesc_hi[] = {
+ 0x05, HUP_GENERIC_DESKTOP, /* Usage Page (Generic Desktop Ctrls) */
+ 0x09, HUG_MOUSE, /* Usage (Mouse) */
+ 0xA1, 0x01, /* Collection (Application) */
+ 0x09, 0x01, /* Usage (0x01) */
+ 0x15, 0x00, /* Logical Minimum (0) */
+ 0x26, 0xFF, 0x00, /* Logical Maximum (255) */
+ 0x95, IETP_REPORT_LEN_HI, /* Report Count (IETP_REPORT_LEN_HI) */
+ 0x75, 0x08, /* Report Size (8) */
+ 0x81, 0x02, /* Input (Data,Var,Abs) */
+ 0xC0, /* End Collection */
+};
+
static const struct evdev_methods ietp_evdev_methods = {
.ev_open = &ietp_ev_open,
.ev_close = &ietp_ev_close,
@@ -217,13 +233,25 @@ static const struct evdev_methods ietp_evdev_methods = {
static int
ietp_ev_open(struct evdev_dev *evdev)
{
- return (hid_intr_start(evdev_get_softc(evdev)));
+ struct ietp_softc *sc = evdev_get_softc(evdev);
+ int error;
+
+ error = hid_intr_start(sc->dev);
+ if (error == 0)
+ sc->open = true;
+ return (error);
}
static int
ietp_ev_close(struct evdev_dev *evdev)
{
- return (hid_intr_stop(evdev_get_softc(evdev)));
+ struct ietp_softc *sc = evdev_get_softc(evdev);
+ int error;
+
+ error = hid_intr_stop(sc->dev);
+ if (error == 0)
+ sc->open = false;
+ return (error);
}
static int
@@ -275,7 +303,7 @@ ietp_attach(struct ietp_softc *sc)
evdev_set_id(sc->evdev, hw->idBus, hw->idVendor, hw->idProduct,
hw->idVersion);
evdev_set_serial(sc->evdev, hw->serial);
- evdev_set_methods(sc->evdev, sc->dev, &ietp_evdev_methods);
+ evdev_set_methods(sc->evdev, sc, &ietp_evdev_methods);
evdev_set_flag(sc->evdev, EVDEV_FLAG_MT_STCOMPAT);
evdev_set_flag(sc->evdev, EVDEV_FLAG_EXT_EPOCH); /* hidbus child */
@@ -420,28 +448,38 @@ ietp_res2dpmm(uint8_t res, bool hi_precision)
static void
ietp_iic_identify(driver_t *driver, device_t parent)
{
- void *d_ptr;
- hid_size_t d_len;
- int isize;
- uint8_t iid;
+ device_t iichid = device_get_parent(parent);
+ static const uint16_t reg = IETP_PATTERN;
+ uint16_t addr = iicbus_get_addr(iichid) << 1;
+ uint8_t resp[2];
+ uint8_t cmd[2] = { reg & 0xff, (reg >> 8) & 0xff };
+ struct iic_msg msgs[2] = {
+ { addr, IIC_M_WR | IIC_M_NOSTOP, sizeof(cmd), cmd },
+ { addr, IIC_M_RD, sizeof(resp), resp },
+ };
+ struct iic_rdwr_data ird = { msgs, nitems(msgs) };
+ uint8_t pattern;
if (HIDBUS_LOOKUP_ID(parent, ietp_iic_devs) == NULL)
return;
- if (hid_get_report_descr(parent, &d_ptr, &d_len) != 0)
+
+ if (device_get_devclass(iichid) != devclass_find("iichid"))
return;
- /*
- * Some Elantech trackpads have a mangled HID report descriptor, which
- * reads as having an incorrect input size (i.e. < IETP_REPORT_LEN_LO).
- * If the input size is incorrect, load a dummy report descriptor.
- */
+ DPRINTF("Read reg 0x%04x with size %zu\n", reg, sizeof(resp));
- isize = hid_report_size_max(d_ptr, d_len, hid_input, &iid);
- if (isize >= IETP_REPORT_LEN_LO)
+ if (hid_ioctl(parent, I2CRDWR, (uintptr_t)&ird) != 0)
return;
- hid_set_report_descr(parent, ietp_dummy_rdesc,
- sizeof(ietp_dummy_rdesc));
+ DPRINTF("Response: %*D\n", (int)size(resp), resp, " ");
+
+ pattern = (resp[0] == 0xFF && resp[1] == 0xFF) ? 0 : resp[1];
+ if (pattern >= 0x02)
+ hid_set_report_descr(parent, ietp_dummy_rdesc_hi,
+ sizeof(ietp_dummy_rdesc_hi));
+ else
+ hid_set_report_descr(parent, ietp_dummy_rdesc_lo,
+ sizeof(ietp_dummy_rdesc_lo));
}
static int
@@ -584,11 +622,13 @@ ietp_iic_set_absolute_mode(device_t dev, bool enable)
* Some ASUS touchpads need to be powered on to enter absolute mode.
*/
require_wakeup = false;
- for (i = 0; i < nitems(special_fw); i++) {
- if (sc->ic_type == special_fw[i].ic_type &&
- sc->product_id == special_fw[i].product_id) {
- require_wakeup = true;
- break;
+ if (!sc->open) {
+ for (i = 0; i < nitems(special_fw); i++) {
+ if (sc->ic_type == special_fw[i].ic_type &&
+ sc->product_id == special_fw[i].product_id) {
+ require_wakeup = true;
+ break;
+ }
}
}
diff --git a/sys/dev/hid/ps4dshock.c b/sys/dev/hid/ps4dshock.c
index 537f70cadec0..d3c6b3eeadf9 100644
--- a/sys/dev/hid/ps4dshock.c
+++ b/sys/dev/hid/ps4dshock.c
@@ -771,18 +771,34 @@ static const struct hidmap_item ps4dsmtp_map[] = {
static const struct hid_device_id ps4dshock_devs[] = {
{ HID_BVP(BUS_USB, USB_VENDOR_SONY, 0x9cc),
HID_TLC(HUP_GENERIC_DESKTOP, HUG_GAME_PAD) },
+ { HID_BVP(BUS_USB, USB_VENDOR_SONY, 0x5c4),
+ HID_TLC(HUP_GENERIC_DESKTOP, HUG_GAME_PAD) },
+ { HID_BVP(BUS_USB, USB_VENDOR_SONY, 0xba0),
+ HID_TLC(HUP_GENERIC_DESKTOP, HUG_GAME_PAD) },
};
static const struct hid_device_id ps4dsacc_devs[] = {
{ HID_BVP(BUS_USB, USB_VENDOR_SONY, 0x9cc),
HID_TLC(HUP_GENERIC_DESKTOP, HUG_MULTIAXIS_CNTROLLER) },
+ { HID_BVP(BUS_USB, USB_VENDOR_SONY, 0x5c4),
+ HID_TLC(HUP_GENERIC_DESKTOP, HUG_MULTIAXIS_CNTROLLER) },
+ { HID_BVP(BUS_USB, USB_VENDOR_SONY, 0xba0),
+ HID_TLC(HUP_GENERIC_DESKTOP, HUG_MULTIAXIS_CNTROLLER) },
};
static const struct hid_device_id ps4dshead_devs[] = {
{ HID_BVP(BUS_USB, USB_VENDOR_SONY, 0x9cc),
HID_TLC(HUP_CONSUMER, HUC_HEADPHONE) },
+ { HID_BVP(BUS_USB, USB_VENDOR_SONY, 0x5c4),
+ HID_TLC(HUP_CONSUMER, HUC_HEADPHONE) },
+ { HID_BVP(BUS_USB, USB_VENDOR_SONY, 0xba0),
+ HID_TLC(HUP_CONSUMER, HUC_HEADPHONE) },
};
static const struct hid_device_id ps4dsmtp_devs[] = {
{ HID_BVP(BUS_USB, USB_VENDOR_SONY, 0x9cc),
HID_TLC(HUP_DIGITIZERS, HUD_TOUCHPAD) },
+ { HID_BVP(BUS_USB, USB_VENDOR_SONY, 0x5c4),
+ HID_TLC(HUP_DIGITIZERS, HUD_TOUCHPAD) },
+ { HID_BVP(BUS_USB, USB_VENDOR_SONY, 0xba0),
+ HID_TLC(HUP_DIGITIZERS, HUD_TOUCHPAD) },
};
static int
diff --git a/sys/dev/hid/u2f.c b/sys/dev/hid/u2f.c
new file mode 100644
index 000000000000..08f1a5ceedba
--- /dev/null
+++ b/sys/dev/hid/u2f.c
@@ -0,0 +1,603 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022-2023 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_hid.h"
+
+#include <sys/param.h>
+#ifdef COMPAT_FREEBSD32
+#include <sys/abi_compat.h>
+#endif
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+#include <sys/filio.h>
+#include <sys/ioccom.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/poll.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/selinfo.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/taskqueue.h>
+#include <sys/uio.h>
+
+#include <dev/evdev/input.h>
+
+#define HID_DEBUG_VAR u2f_debug
+#include <dev/hid/hid.h>
+#include <dev/hid/hidbus.h>
+#include <dev/hid/hidquirk.h>
+
+#include <dev/usb/usb_ioctl.h>
+
+#ifdef HID_DEBUG
+static int u2f_debug = 0;
+static SYSCTL_NODE(_hw_hid, OID_AUTO, u2f, CTLFLAG_RW, 0,
+ "FIDO/U2F authenticator");
+SYSCTL_INT(_hw_hid_u2f, OID_AUTO, debug, CTLFLAG_RWTUN,
+ &u2f_debug, 0, "Debug level");
+#endif
+
+#define U2F_MAX_REPORT_SIZE 64
+
+/* A match on these entries will load u2f */
+static const struct hid_device_id u2f_devs[] = {
+ { HID_BUS(BUS_USB), HID_TLC(HUP_FIDO, HUF_U2FHID) },
+};
+
+struct u2f_softc {
+ device_t sc_dev; /* base device */
+ struct cdev *dev;
+
+ struct mtx sc_mtx; /* hidbus private mutex */
+ struct task sc_kqtask; /* kqueue task */
+ void *sc_rdesc;
+ hid_size_t sc_rdesc_size;
+ hid_size_t sc_isize;
+ hid_size_t sc_osize;
+ struct selinfo sc_rsel;
+ struct { /* driver state */
+ bool open:1; /* device is open */
+ bool aslp:1; /* waiting for device data in read() */
+ bool sel:1; /* waiting for device data in poll() */
+ bool data:1; /* input report is stored in sc_buf */
+ int reserved:28;
+ } sc_state;
+ int sc_fflags; /* access mode for open lifetime */
+
+ uint8_t sc_buf[U2F_MAX_REPORT_SIZE];
+};
+
+static d_open_t u2f_open;
+static d_read_t u2f_read;
+static d_write_t u2f_write;
+static d_ioctl_t u2f_ioctl;
+static d_poll_t u2f_poll;
+static d_kqfilter_t u2f_kqfilter;
+
+static d_priv_dtor_t u2f_dtor;
+
+static struct cdevsw u2f_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = u2f_open,
+ .d_read = u2f_read,
+ .d_write = u2f_write,
+ .d_ioctl = u2f_ioctl,
+ .d_poll = u2f_poll,
+ .d_kqfilter = u2f_kqfilter,
+ .d_name = "u2f",
+};
+
+static hid_intr_t u2f_intr;
+
+static device_probe_t u2f_probe;
+static device_attach_t u2f_attach;
+static device_detach_t u2f_detach;
+
+static void u2f_kqtask(void *context, int pending);
+static int u2f_kqread(struct knote *, long);
+static void u2f_kqdetach(struct knote *);
+static void u2f_notify(struct u2f_softc *);
+
+static struct filterops u2f_filterops_read = {
+ .f_isfd = 1,
+ .f_detach = u2f_kqdetach,
+ .f_event = u2f_kqread,
+};
+
+static int
+u2f_probe(device_t dev)
+{
+ int error;
+
+ error = HIDBUS_LOOKUP_DRIVER_INFO(dev, u2f_devs);
+ if (error != 0)
+ return (error);
+
+ hidbus_set_desc(dev, "Authenticator");
+
+ return (BUS_PROBE_GENERIC);
+}
+
+static int
+u2f_attach(device_t dev)
+{
+ struct u2f_softc *sc = device_get_softc(dev);
+ struct hid_device_info *hw = __DECONST(struct hid_device_info *,
+ hid_get_device_info(dev));
+ struct make_dev_args mda;
+ int error;
+
+ sc->sc_dev = dev;
+
+ error = hid_get_report_descr(dev, &sc->sc_rdesc, &sc->sc_rdesc_size);
+ if (error != 0)
+ return (ENXIO);
+ sc->sc_isize = hid_report_size_max(sc->sc_rdesc, sc->sc_rdesc_size,
+ hid_input, NULL);
+ if (sc->sc_isize > U2F_MAX_REPORT_SIZE) {
+ device_printf(dev, "Input report size too large. Truncate.\n");
+ sc->sc_isize = U2F_MAX_REPORT_SIZE;
+ }
+ sc->sc_osize = hid_report_size_max(sc->sc_rdesc, sc->sc_rdesc_size,
+ hid_output, NULL);
+ if (sc->sc_osize > U2F_MAX_REPORT_SIZE) {
+ device_printf(dev, "Output report size too large. Truncate.\n");
+ sc->sc_osize = U2F_MAX_REPORT_SIZE;
+ }
+
+ mtx_init(&sc->sc_mtx, "u2f lock", NULL, MTX_DEF);
+ knlist_init_mtx(&sc->sc_rsel.si_note, &sc->sc_mtx);
+ TASK_INIT(&sc->sc_kqtask, 0, u2f_kqtask, sc);
+
+ make_dev_args_init(&mda);
+ mda.mda_flags = MAKEDEV_WAITOK;
+ mda.mda_devsw = &u2f_cdevsw;
+ mda.mda_uid = UID_ROOT;
+ mda.mda_gid = GID_U2F;
+ mda.mda_mode = 0660;
+ mda.mda_si_drv1 = sc;
+
+ error = make_dev_s(&mda, &sc->dev, "u2f/%d", device_get_unit(dev));
+ if (error) {
+ device_printf(dev, "Can not create character device\n");
+ u2f_detach(dev);
+ return (error);
+ }
+#ifndef U2F_DROP_UHID_ALIAS
+ (void)make_dev_alias(sc->dev, "uhid%d", device_get_unit(dev));
+#endif
+
+ hid_add_dynamic_quirk(hw, HQ_NO_READAHEAD);
+
+ hidbus_set_lock(dev, &sc->sc_mtx);
+ hidbus_set_intr(dev, u2f_intr, sc);
+
+ return (0);
+}
+
+static int
+u2f_detach(device_t dev)
+{
+ struct u2f_softc *sc = device_get_softc(dev);
+
+ DPRINTF("sc=%p\n", sc);
+
+ if (sc->dev != NULL) {
+ mtx_lock(&sc->sc_mtx);
+ sc->dev->si_drv1 = NULL;
+ /* Wake everyone */
+ u2f_notify(sc);
+ mtx_unlock(&sc->sc_mtx);
+ destroy_dev(sc->dev);
+ }
+
+ taskqueue_drain(taskqueue_thread, &sc->sc_kqtask);
+ hid_intr_stop(sc->sc_dev);
+
+ knlist_clear(&sc->sc_rsel.si_note, 0);
+ knlist_destroy(&sc->sc_rsel.si_note);
+ seldrain(&sc->sc_rsel);
+ mtx_destroy(&sc->sc_mtx);
+
+ return (0);
+}
+
+void
+u2f_intr(void *context, void *buf, hid_size_t len)
+{
+ struct u2f_softc *sc = context;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ DPRINTFN(5, "len=%d\n", len);
+ DPRINTFN(5, "data = %*D\n", len, buf, " ");
+
+ if (sc->sc_state.data)
+ return;
+
+ if (len > sc->sc_isize)
+ len = sc->sc_isize;
+
+ bcopy(buf, sc->sc_buf, len);
+
+ /* Make sure we don't process old data */
+ if (len < sc->sc_isize)
+ bzero(sc->sc_buf + len, sc->sc_isize - len);
+
+ sc->sc_state.data = true;
+
+ u2f_notify(sc);
+}
+
+static int
+u2f_open(struct cdev *dev, int flag, int mode, struct thread *td)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+ int error;
+
+ if (sc == NULL)
+ return (ENXIO);
+
+ DPRINTF("sc=%p\n", sc);
+
+ mtx_lock(&sc->sc_mtx);
+ if (sc->sc_state.open) {
+ mtx_unlock(&sc->sc_mtx);
+ return (EBUSY);
+ }
+ sc->sc_state.open = true;
+ mtx_unlock(&sc->sc_mtx);
+
+ error = devfs_set_cdevpriv(sc, u2f_dtor);
+ if (error != 0) {
+ mtx_lock(&sc->sc_mtx);
+ sc->sc_state.open = false;
+ mtx_unlock(&sc->sc_mtx);
+ return (error);
+ }
+
+ /* Set up interrupt pipe. */
+ sc->sc_state.data = false;
+ sc->sc_fflags = flag;
+
+ return (0);
+}
+
+
+static void
+u2f_dtor(void *data)
+{
+ struct u2f_softc *sc = data;
+
+#ifdef NOT_YET
+ /* Disable interrupts. */
+ hid_intr_stop(sc->sc_dev);
+#endif
+
+ mtx_lock(&sc->sc_mtx);
+ sc->sc_state.open = false;
+ mtx_unlock(&sc->sc_mtx);
+}
+
+static int
+u2f_read(struct cdev *dev, struct uio *uio, int flag)
+{
+ uint8_t buf[U2F_MAX_REPORT_SIZE];
+ struct u2f_softc *sc = dev->si_drv1;
+ size_t length = 0;
+ int error;
+
+ DPRINTFN(1, "\n");
+
+ if (sc == NULL)
+ return (EIO);
+
+ if (!sc->sc_state.data)
+ hid_intr_start(sc->sc_dev);
+
+ mtx_lock(&sc->sc_mtx);
+ if (dev->si_drv1 == NULL) {
+ error = EIO;
+ goto exit;
+ }
+
+ while (!sc->sc_state.data) {
+ if (flag & O_NONBLOCK) {
+ error = EWOULDBLOCK;
+ goto exit;
+ }
+ sc->sc_state.aslp = true;
+ DPRINTFN(5, "sleep on %p\n", &sc->sc_buf);
+ error = mtx_sleep(&sc->sc_buf, &sc->sc_mtx, PZERO | PCATCH,
+ "u2frd", 0);
+ DPRINTFN(5, "woke, error=%d\n", error);
+ if (dev->si_drv1 == NULL)
+ error = EIO;
+ if (error) {
+ sc->sc_state.aslp = false;
+ goto exit;
+ }
+ }
+
+ if (sc->sc_state.data && uio->uio_resid > 0) {
+ length = min(uio->uio_resid, sc->sc_isize);
+ memcpy(buf, sc->sc_buf, length);
+ sc->sc_state.data = false;
+ }
+exit:
+ mtx_unlock(&sc->sc_mtx);
+ if (length != 0) {
+ /* Copy the data to the user process. */
+ DPRINTFN(5, "got %lu chars\n", (u_long)length);
+ error = uiomove(buf, length, uio);
+ }
+
+ return (error);
+}
+
+static int
+u2f_write(struct cdev *dev, struct uio *uio, int flag)
+{
+ uint8_t buf[U2F_MAX_REPORT_SIZE];
+ struct u2f_softc *sc = dev->si_drv1;
+ int error;
+
+ DPRINTFN(1, "\n");
+
+ if (sc == NULL)
+ return (EIO);
+
+ if (uio->uio_resid != sc->sc_osize)
+ return (EINVAL);
+ error = uiomove(buf, uio->uio_resid, uio);
+ if (error == 0)
+ error = hid_write(sc->sc_dev, buf, sc->sc_osize);
+
+ return (error);
+}
+
+#ifdef COMPAT_FREEBSD32
+static void
+update_ugd32(const struct usb_gen_descriptor *ugd,
+ struct usb_gen_descriptor32 *ugd32)
+{
+ /* Don't update hgd_data pointer */
+ CP(*ugd, *ugd32, ugd_lang_id);
+ CP(*ugd, *ugd32, ugd_maxlen);
+ CP(*ugd, *ugd32, ugd_actlen);
+ CP(*ugd, *ugd32, ugd_offset);
+ CP(*ugd, *ugd32, ugd_config_index);
+ CP(*ugd, *ugd32, ugd_string_index);
+ CP(*ugd, *ugd32, ugd_iface_index);
+ CP(*ugd, *ugd32, ugd_altif_index);
+ CP(*ugd, *ugd32, ugd_endpt_index);
+ CP(*ugd, *ugd32, ugd_report_type);
+ /* Don't update reserved */
+}
+#endif
+
+static int
+u2f_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+ struct thread *td)
+{
+#ifdef COMPAT_FREEBSD32
+ struct usb_gen_descriptor local_ugd;
+ struct usb_gen_descriptor32 *ugd32 = NULL;
+#endif
+ struct u2f_softc *sc = dev->si_drv1;
+ struct usb_gen_descriptor *ugd = (struct usb_gen_descriptor *)addr;
+ uint32_t size;
+
+ DPRINTFN(2, "cmd=%lx\n", cmd);
+
+ if (sc == NULL)
+ return (EIO);
+
+#ifdef COMPAT_FREEBSD32
+ switch (cmd) {
+ case USB_GET_REPORT_DESC32:
+ cmd = _IOC_NEWTYPE(cmd, struct usb_gen_descriptor);
+ ugd32 = (struct usb_gen_descriptor32 *)addr;
+ ugd = &local_ugd;
+ PTRIN_CP(*ugd32, *ugd, ugd_data);
+ CP(*ugd32, *ugd, ugd_lang_id);
+ CP(*ugd32, *ugd, ugd_maxlen);
+ CP(*ugd32, *ugd, ugd_actlen);
+ CP(*ugd32, *ugd, ugd_offset);
+ CP(*ugd32, *ugd, ugd_config_index);
+ CP(*ugd32, *ugd, ugd_string_index);
+ CP(*ugd32, *ugd, ugd_iface_index);
+ CP(*ugd32, *ugd, ugd_altif_index);
+ CP(*ugd32, *ugd, ugd_endpt_index);
+ CP(*ugd32, *ugd, ugd_report_type);
+ /* Don't copy reserved */
+ break;
+ }
+#endif
+
+ /* fixed-length ioctls handling */
+ switch (cmd) {
+ case FIONBIO:
+ /* All handled in the upper FS layer. */
+ return (0);
+
+ case USB_GET_REPORT_DESC:
+ size = MIN(sc->sc_rdesc_size, ugd->ugd_maxlen);
+ ugd->ugd_actlen = size;
+#ifdef COMPAT_FREEBSD32
+ if (ugd32 != NULL)
+ update_ugd32(ugd, ugd32);
+#endif
+ if (ugd->ugd_data == NULL)
+ return (0); /* descriptor length only */
+
+ return (copyout(sc->sc_rdesc, ugd->ugd_data, size));
+
+ case USB_GET_DEVICEINFO:
+ return(hid_ioctl(
+ sc->sc_dev, USB_GET_DEVICEINFO, (uintptr_t)addr));
+ }
+
+ return (EINVAL);
+}
+
+static int
+u2f_poll(struct cdev *dev, int events, struct thread *td)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+ int revents = 0;
+ bool start_intr = false;
+
+ if (sc == NULL)
+ return (POLLHUP);
+
+ if (events & (POLLOUT | POLLWRNORM) && (sc->sc_fflags & FWRITE))
+ revents |= events & (POLLOUT | POLLWRNORM);
+ if (events & (POLLIN | POLLRDNORM) && (sc->sc_fflags & FREAD)) {
+ mtx_lock(&sc->sc_mtx);
+ if (sc->sc_state.data)
+ revents |= events & (POLLIN | POLLRDNORM);
+ else {
+ sc->sc_state.sel = true;
+ start_intr = true;
+ selrecord(td, &sc->sc_rsel);
+ }
+ mtx_unlock(&sc->sc_mtx);
+ if (start_intr)
+ hid_intr_start(sc->sc_dev);
+ }
+
+ return (revents);
+}
+
+static int
+u2f_kqfilter(struct cdev *dev, struct knote *kn)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+
+ if (sc == NULL)
+ return (ENXIO);
+
+ switch(kn->kn_filter) {
+ case EVFILT_READ:
+ if (sc->sc_fflags & FREAD) {
+ kn->kn_fop = &u2f_filterops_read;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ return(EINVAL);
+ }
+ kn->kn_hook = sc;
+
+ knlist_add(&sc->sc_rsel.si_note, kn, 0);
+ return (0);
+}
+
+static void
+u2f_kqtask(void *context, int pending)
+{
+ struct u2f_softc *sc = context;
+
+ hid_intr_start(sc->sc_dev);
+}
+
+static int
+u2f_kqread(struct knote *kn, long hint)
+{
+ struct u2f_softc *sc = kn->kn_hook;
+ int ret;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ if (sc->dev->si_drv1 == NULL) {
+ kn->kn_flags |= EV_EOF;
+ ret = 1;
+ } else {
+ ret = sc->sc_state.data ? 1 : 0;
+ if (!sc->sc_state.data)
+ taskqueue_enqueue(taskqueue_thread, &sc->sc_kqtask);
+ }
+
+ return (ret);
+}
+
+static void
+u2f_kqdetach(struct knote *kn)
+{
+ struct u2f_softc *sc = kn->kn_hook;
+
+ knlist_remove(&sc->sc_rsel.si_note, kn, 0);
+}
+
+static void
+u2f_notify(struct u2f_softc *sc)
+{
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ if (sc->sc_state.aslp) {
+ sc->sc_state.aslp = false;
+ DPRINTFN(5, "waking %p\n", &sc->sc_buf);
+ wakeup(&sc->sc_buf);
+ }
+ if (sc->sc_state.sel) {
+ sc->sc_state.sel = false;
+ selwakeuppri(&sc->sc_rsel, PZERO);
+ }
+ KNOTE_LOCKED(&sc->sc_rsel.si_note, 0);
+}
+
+static device_method_t u2f_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, u2f_probe),
+ DEVMETHOD(device_attach, u2f_attach),
+ DEVMETHOD(device_detach, u2f_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t u2f_driver = {
+#ifdef U2F_DROP_UHID_ALIAS
+ "uf2",
+#else
+ "uhid",
+#endif
+ u2f_methods,
+ sizeof(struct u2f_softc)
+};
+
+DRIVER_MODULE(u2f, hidbus, u2f_driver, NULL, NULL);
+MODULE_DEPEND(u2f, hidbus, 1, 1, 1);
+MODULE_DEPEND(u2f, hid, 1, 1, 1);
+MODULE_VERSION(u2f, 1);
+HID_PNP_INFO(u2f_devs);
diff --git a/sys/dev/hifn/hifn7751.c b/sys/dev/hifn/hifn7751.c
index 23cf019b93a6..2e7545779b09 100644
--- a/sys/dev/hifn/hifn7751.c
+++ b/sys/dev/hifn/hifn7751.c
@@ -637,8 +637,6 @@ hifn_detach(device_t dev)
crypto_unregister_all(sc->sc_cid);
- bus_generic_detach(dev); /*XXX should be no children, right? */
-
bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
/* XXX don't store rid */
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
diff --git a/sys/dev/hpt27xx/hpt27xx_osm_bsd.c b/sys/dev/hpt27xx/hpt27xx_osm_bsd.c
index 32d3e465c569..e086a1554940 100644
--- a/sys/dev/hpt27xx/hpt27xx_osm_bsd.c
+++ b/sys/dev/hpt27xx/hpt27xx_osm_bsd.c
@@ -94,9 +94,6 @@ static int hpt_attach(device_t dev)
size = him->get_adapter_size(&pci_id);
hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK);
- if (!hba->ldm_adapter.him_handle)
- return ENXIO;
-
hba->pcidev = dev;
hba->pciaddr.tree = 0;
hba->pciaddr.bus = pci_get_bus(dev);
@@ -114,10 +111,6 @@ static int hpt_attach(device_t dev)
if (!ldm_register_adapter(&hba->ldm_adapter)) {
size = ldm_get_vbus_size();
vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK);
- if (!vbus_ext) {
- free(hba->ldm_adapter.him_handle, M_DEVBUF);
- return ENXIO;
- }
memset(vbus_ext, 0, sizeof(VBUS_EXT));
vbus_ext->ext_type = EXT_TYPE_VBUS;
ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext);
@@ -146,9 +139,9 @@ static __inline void *__get_free_pages(int order)
M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
}
-static __inline void free_pages(void *p, int order)
+static __inline void free_pages(void *p)
{
- contigfree(p, PAGE_SIZE<<order, M_DEVBUF);
+ free(p, M_DEVBUF);
}
static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
@@ -168,7 +161,6 @@ static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
f->tag, f->count, f->size, f->count*f->size));
for (i=0; i<f->count; i++) {
p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK);
- if (!p) return (ENXIO);
*p = f->head;
f->head = p;
}
@@ -230,7 +222,7 @@ static void hpt_free_mem(PVBUS_EXT vbus_ext)
for (i=0; i<os_max_cache_pages; i++) {
p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus);
HPT_ASSERT(p);
- free_pages(p, 0);
+ free_pages(p);
}
for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
@@ -244,7 +236,7 @@ static void hpt_free_mem(PVBUS_EXT vbus_ext)
while ((p=freelist_get_dma(f, &bus))) {
if (order)
- free_pages(p, order);
+ free_pages(p);
else {
/* can't free immediately since other blocks in this page may still be in the list */
if (((HPT_UPTR)p & (PAGE_SIZE-1))==0)
@@ -254,7 +246,7 @@ static void hpt_free_mem(PVBUS_EXT vbus_ext)
}
while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus)))
- free_pages(p, 0);
+ free_pages(p);
}
static int hpt_init_vbus(PVBUS_EXT vbus_ext)
@@ -1109,10 +1101,6 @@ static void hpt_final_init(void *dummy)
for (i=0; i<os_max_queue_comm; i++) {
POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK);
- if (!ext) {
- os_printk("Can't alloc cmdext(%d)", i);
- return ;
- }
ext->vbus_ext = vbus_ext;
ext->next = vbus_ext->cmdext_list;
vbus_ext->cmdext_list = ext;
@@ -1327,18 +1315,13 @@ static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, stru
if (ioctl_args.nInBufferSize) {
ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
- if (!ioctl_args.lpInBuffer)
- goto invalid;
if (copyin((void*)piop->lpInBuffer,
ioctl_args.lpInBuffer, piop->nInBufferSize))
goto invalid;
}
- if (ioctl_args.nOutBufferSize) {
+ if (ioctl_args.nOutBufferSize)
ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO);
- if (!ioctl_args.lpOutBuffer)
- goto invalid;
- }
hpt_do_ioctl(&ioctl_args);
diff --git a/sys/dev/hpt27xx/hptintf.h b/sys/dev/hpt27xx/hptintf.h
index 558b479ec2ee..eb8105ec5666 100644
--- a/sys/dev/hpt27xx/hptintf.h
+++ b/sys/dev/hpt27xx/hptintf.h
@@ -155,8 +155,8 @@ typedef HPT_U32 DEVICEID;
#define ARRAY_FLAG_NEED_AUTOREBUILD 0x00000080 /* auto-rebuild should start */
#define ARRAY_FLAG_VERIFYING 0x00000100 /* is being verified */
#define ARRAY_FLAG_INITIALIZING 0x00000200 /* is being initialized */
-#define ARRAY_FLAG_TRANSFORMING 0x00000400 /* tranform in progress */
-#define ARRAY_FLAG_NEEDTRANSFORM 0x00000800 /* array need tranform */
+#define ARRAY_FLAG_TRANSFORMING 0x00000400 /* transform in progress */
+#define ARRAY_FLAG_NEEDTRANSFORM 0x00000800 /* array need transform */
#define ARRAY_FLAG_NEEDINITIALIZING 0x00001000 /* the array's initialization hasn't finished*/
#define ARRAY_FLAG_BROKEN_REDUNDANT 0x00002000 /* broken but redundant (raid6) */
#define ARRAY_FLAG_RAID15PLUS 0x80000000 /* display this RAID 1 as RAID 1.5 */
@@ -2018,7 +2018,7 @@ DEVICEID hpt_create_transform_v2(DEVICEID idArray, PCREATE_ARRAY_PARAMS_V3 destI
#endif
/* hpt_step_transform
- * move a block in a tranform progress.
+ * move a block in a transform progress.
* This function is called by mid-layer, not GUI (which uses set_array_state instead).
* Version compatibility: v2.0.0.0 or later
* Parameters:
diff --git a/sys/dev/hpt27xx/os_bsd.h b/sys/dev/hpt27xx/os_bsd.h
index b92cd06e9f06..96c62ae0910f 100644
--- a/sys/dev/hpt27xx/os_bsd.h
+++ b/sys/dev/hpt27xx/os_bsd.h
@@ -39,6 +39,7 @@
#include <sys/types.h>
#include <sys/cons.h>
#include <sys/time.h>
+#include <sys/stdarg.h>
#include <sys/systm.h>
#include <sys/stat.h>
@@ -59,7 +60,6 @@
#include <machine/resource.h>
#include <machine/pci_cfgreg.h>
#include <machine/bus.h>
-#include <machine/stdarg.h>
#include <sys/rman.h>
#include <vm/vm.h>
diff --git a/sys/dev/hptiop/hptiop.c b/sys/dev/hptiop/hptiop.c
index 547957c30ef1..b7b885a7c732 100644
--- a/sys/dev/hptiop/hptiop.c
+++ b/sys/dev/hptiop/hptiop.c
@@ -31,6 +31,7 @@
#include <sys/cons.h>
#include <sys/time.h>
#include <sys/systm.h>
+#include <sys/stdarg.h>
#include <sys/stat.h>
#include <sys/malloc.h>
@@ -49,7 +50,6 @@
#include <machine/resource.h>
#include <machine/bus.h>
-#include <machine/stdarg.h>
#include <sys/rman.h>
#include <vm/vm.h>
@@ -1798,7 +1798,6 @@ static int hptiop_probe(device_t dev)
{
struct hpt_iop_hba *hba;
u_int32_t id;
- static char buf[256];
int sas = 0;
struct hptiop_adapter_ops *ops;
@@ -1851,9 +1850,8 @@ static int hptiop_probe(device_t dev)
pci_get_bus(dev), pci_get_slot(dev),
pci_get_function(dev), pci_get_irq(dev));
- sprintf(buf, "RocketRAID %x %s Controller\n",
- id, sas ? "SAS" : "SATA");
- device_set_desc_copy(dev, buf);
+ device_set_descf(dev, "RocketRAID %x %s Controller",
+ id, sas ? "SAS" : "SATA");
hba = (struct hpt_iop_hba *)device_get_softc(dev);
bzero(hba, sizeof(struct hpt_iop_hba));
diff --git a/sys/dev/hptmv/entry.c b/sys/dev/hptmv/entry.c
index 4a014ce17f4e..f3d58f285b39 100644
--- a/sys/dev/hptmv/entry.c
+++ b/sys/dev/hptmv/entry.c
@@ -430,7 +430,7 @@ static void device_change(IAL_ADAPTER_T *pAdapter , MV_U8 channelIndex, int plug
if(pVDev->pParent)
{
int iMember;
- for(iMember = 0; iMember < pVDev->pParent->u.array.bArnMember; iMember++)
+ for (iMember = 0; iMember < pVDev->pParent->u.array.bArnMember; iMember++)
if((PVDevice)pVDev->pParent->u.array.pMember[iMember] == pVDev)
pVDev->pParent->u.array.pMember[iMember] = NULL;
pVDev->pParent = NULL;
@@ -914,7 +914,7 @@ hptmv_allocate_edma_queues(IAL_ADAPTER_T *pAdapter)
{
MV_ERROR("RR18xx[%d]: Error in Request Quueues Alignment\n",
pAdapter->mvSataAdapter.adapterId);
- contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF);
+ free(pAdapter->requestsArrayBaseAddr, M_DEVBUF);
return -1;
}
/* response queues */
@@ -924,7 +924,7 @@ hptmv_allocate_edma_queues(IAL_ADAPTER_T *pAdapter)
{
MV_ERROR("RR18xx[%d]: Failed to allocate memory for EDMA response"
" queues\n", pAdapter->mvSataAdapter.adapterId);
- contigfree(pAdapter->requestsArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF);
+ free(pAdapter->requestsArrayBaseAddr, M_DEVBUF);
return -1;
}
pAdapter->responsesArrayBaseDmaAddr = fOsPhysicalAddress(pAdapter->responsesArrayBaseAddr);
@@ -941,8 +941,8 @@ hptmv_allocate_edma_queues(IAL_ADAPTER_T *pAdapter)
{
MV_ERROR("RR18xx[%d]: Error in Response Queues Alignment\n",
pAdapter->mvSataAdapter.adapterId);
- contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF);
- contigfree(pAdapter->responsesArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF);
+ free(pAdapter->requestsArrayBaseAddr, M_DEVBUF);
+ free(pAdapter->responsesArrayBaseAddr, M_DEVBUF);
return -1;
}
return 0;
@@ -951,8 +951,8 @@ hptmv_allocate_edma_queues(IAL_ADAPTER_T *pAdapter)
static void
hptmv_free_edma_queues(IAL_ADAPTER_T *pAdapter)
{
- contigfree(pAdapter->requestsArrayBaseAddr, REQUESTS_ARRAY_SIZE, M_DEVBUF);
- contigfree(pAdapter->responsesArrayBaseAddr, RESPONSES_ARRAY_SIZE, M_DEVBUF);
+ free(pAdapter->requestsArrayBaseAddr, M_DEVBUF);
+ free(pAdapter->responsesArrayBaseAddr, M_DEVBUF);
}
static PVOID
@@ -984,7 +984,7 @@ fRegisterVdevice(IAL_ADAPTER_T *pAdapter)
PVBus pVBus;
int i,j;
- for(i=0;i<MV_SATA_CHANNELS_NUM;i++) {
+ for (i = 0; i < MV_SATA_CHANNELS_NUM; i++) {
pPhysical = &(pAdapter->VDevices[i]);
pLogical = pPhysical;
while (pLogical->pParent) pLogical = pLogical->pParent;
@@ -1027,8 +1027,7 @@ GetSpareDisk(_VBUS_ARG PVDevice pArray)
PVDevice pVDevice, pFind = NULL;
int i;
- for(i=0;i<MV_SATA_CHANNELS_NUM;i++)
- {
+ for (i=0; i < MV_SATA_CHANNELS_NUM; i++) {
pVDevice = &pAdapter->VDevices[i];
if(!pVDevice)
continue;
@@ -1356,7 +1355,7 @@ unregister:
goto unregister;
}
- for (i=0; i<MAX_COMMAND_BLOCKS_FOR_EACH_VBUS; i++) {
+ for (i = 0; i < MAX_COMMAND_BLOCKS_FOR_EACH_VBUS; i++) {
FreeCommand(_VBUS_P &(pAdapter->pCommandBlocks[i]));
}
@@ -1370,7 +1369,7 @@ unregister:
memset((void *)pAdapter->pbus_dmamap, 0, sizeof(struct _BUS_DMAMAP) * MAX_QUEUE_COMM);
pAdapter->pbus_dmamap_list = 0;
- for (i=0; i < MAX_QUEUE_COMM; i++) {
+ for (i = 0; i < MAX_QUEUE_COMM; i++) {
PBUS_DMAMAP pmap = &(pAdapter->pbus_dmamap[i]);
pmap->pAdapter = pAdapter;
dmamap_put(pmap);
@@ -1398,7 +1397,7 @@ unregister:
pAdapter->prdTableAlignedAddr = (PUCHAR)(((ULONG_PTR)pAdapter->prdTableAddr + 0x1f) & ~(ULONG_PTR)0x1fL);
{
PUCHAR PRDTable = pAdapter->prdTableAlignedAddr;
- for (i=0; i<PRD_TABLES_FOR_VBUS; i++)
+ for (i = 0; i < PRD_TABLES_FOR_VBUS; i++)
{
/* KdPrint(("i=%d,pAdapter->pFreePRDLink=%p\n",i,pAdapter->pFreePRDLink)); */
FreePRDTable(pAdapter, PRDTable);
@@ -1447,7 +1446,7 @@ unregister:
}
#ifdef SUPPORT_ARRAY
- for(i = MAX_ARRAY_DEVICE - 1; i >= 0; i--) {
+ for (i = MAX_ARRAY_DEVICE - 1; i >= 0; i--) {
pVDev = ArrayTables(i);
mArFreeArrayTable(pVDev);
}
@@ -1467,7 +1466,7 @@ unregister:
_vbus_p->nInstances = 1;
fRegisterVdevice(pAdapter);
- for (channel=0;channel<MV_SATA_CHANNELS_NUM;channel++) {
+ for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) {
pVDev = _vbus_p->pVDevice[channel];
if (pVDev && pVDev->vf_online)
fCheckBootable(pVDev);
@@ -1567,7 +1566,7 @@ fResetActiveCommands(PVBus _vbus_p)
{
MV_SATA_ADAPTER *pMvSataAdapter = &((IAL_ADAPTER_T *)_vbus_p->OsExt)->mvSataAdapter;
MV_U8 channel;
- for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) {
+ for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) {
if (pMvSataAdapter->sataChannel[channel] && pMvSataAdapter->sataChannel[channel]->outstandingCommands)
MvSataResetChannel(pMvSataAdapter,channel);
}
@@ -1590,7 +1589,7 @@ check_cmds:
dataxfer_poll();
xor_poll();
#endif
- for (channel=0;channel< MV_SATA_CHANNELS_NUM;channel++) {
+ for (channel = 0; channel < MV_SATA_CHANNELS_NUM; channel++) {
pMvSataChannel = pMvSataAdapter->sataChannel[channel];
if (pMvSataChannel && pMvSataChannel->outstandingCommands)
{
@@ -1716,7 +1715,7 @@ fDeviceSendCommand(_VBUS_ARG PCommand pCmd)
MV_BOOLEAN is48bit;
MV_U8 channel;
- int i=0;
+ int i = 0;
DECLARE_BUFFER(FPSCAT_GATH, tmpSg);
@@ -2141,7 +2140,7 @@ FlushAdapter(IAL_ADAPTER_T *pAdapter)
hpt_printk(("flush all devices\n"));
/* flush all devices */
- for (i=0; i<MAX_VDEVICE_PER_VBUS; i++) {
+ for (i = 0; i < MAX_VDEVICE_PER_VBUS; i++) {
PVDevice pVDev = pAdapter->VBus.pVDevice[i];
if(pVDev) fFlushVDev(pVDev);
}
@@ -2174,7 +2173,7 @@ Check_Idle_Call(IAL_ADAPTER_T *pAdapter)
{
int i;
PVDevice pArray;
- for(i = 0; i < MAX_ARRAY_PER_VBUS; i++){
+ for (i = 0; i < MAX_ARRAY_PER_VBUS; i++) {
if ((pArray=ArrayTables(i))->u.array.dArStamp==0)
continue;
else if (pArray->u.array.rf_auto_rebuild) {
@@ -2378,7 +2377,7 @@ hpt_free_ccb(union ccb **ccb_Q, union ccb *ccb)
static void hpt_worker_thread(void)
{
- for(;;) {
+ for (;;) {
mtx_lock(&DpcQueue_Lock);
while (DpcQueue_First!=DpcQueue_Last) {
ST_HPT_DPC p;
@@ -2418,7 +2417,7 @@ static void hpt_worker_thread(void)
mtx_lock(&pAdapter->lock);
_vbus_p = &pAdapter->VBus;
- for (i=0;i<MAX_ARRAY_PER_VBUS;i++)
+ for (i = 0; i < MAX_ARRAY_PER_VBUS; i++)
{
if ((pArray=ArrayTables(i))->u.array.dArStamp==0)
continue;
@@ -2472,7 +2471,7 @@ launch_worker_thread(void)
int i;
PVDevice pVDev;
- for(i = 0; i < MAX_ARRAY_PER_VBUS; i++)
+ for (i = 0; i < MAX_ARRAY_PER_VBUS; i++)
if ((pVDev=ArrayTables(i))->u.array.dArStamp==0)
continue;
else{
@@ -2950,13 +2949,13 @@ void
void
os_free_page(_VBUS_ARG void *p)
{
- contigfree(p, 0x1000, M_DEVBUF);
+ free(p, M_DEVBUF);
}
void
os_free_dma_page(_VBUS_ARG void *p)
{
- contigfree(p, 0x1000, M_DEVBUF);
+ free(p, M_DEVBUF);
}
void
diff --git a/sys/dev/hptmv/gui_lib.c b/sys/dev/hptmv/gui_lib.c
index d78fdcca69d2..f11044db733a 100644
--- a/sys/dev/hptmv/gui_lib.c
+++ b/sys/dev/hptmv/gui_lib.c
@@ -86,8 +86,7 @@ check_VDevice_valid(PVDevice p)
while(pAdapter != NULL)
{
_vbus_p = &pAdapter->VBus;
- for (i=0;i<MAX_ARRAY_PER_VBUS;i++)
- {
+ for (i = 0; i<MAX_ARRAY_PER_VBUS; i++) {
pVDevice=ArrayTables(i);
if ((pVDevice->u.array.dArStamp != 0) && (pVDevice == p))
return 0;
@@ -244,9 +243,9 @@ static void get_array_info(PVDevice pVDevice, PHPT_ARRAY_INFO pArrayInfo)
if(pVDevice->u.array.pMember[i] != NULL)
pArrayInfo->Members[pArrayInfo->nDisk++] = VDEV_TO_ID(pVDevice->u.array.pMember[i]);
- for(i=pArrayInfo->nDisk; i<MAX_ARRAY_MEMBERS; i++)
+ for (i = pArrayInfo->nDisk; i < MAX_ARRAY_MEMBERS; i++)
pArrayInfo->Members[i] = INVALID_DEVICEID;
- }
+}
static void get_array_info_v2(PVDevice pVDevice, PHPT_ARRAY_INFO_V2 pArrayInfo)
{
@@ -266,7 +265,7 @@ static void get_array_info_v2(PVDevice pVDevice, PHPT_ARRAY_INFO_V2 pArrayInfo)
if(pVDevice->u.array.pMember[i] != NULL)
pArrayInfo->Members[pArrayInfo->nDisk++] = VDEV_TO_ID(pVDevice->u.array.pMember[i]);
- for(i=pArrayInfo->nDisk; i<MAX_ARRAY_MEMBERS_V2; i++)
+ for (i = pArrayInfo->nDisk; i < MAX_ARRAY_MEMBERS_V2; i++)
pArrayInfo->Members[i] = INVALID_DEVICEID;
}
#endif
@@ -461,8 +460,7 @@ found:
pInfo->IoPort = 0;
pInfo->ControlPort = 0;
- for (i=0; i<2 ;i++)
- {
+ for (i = 0; i < 2; i++) {
pInfo->Devices[i] = (DEVICEID)INVALID_DEVICEID;
}
diff --git a/sys/dev/hptmv/hptproc.c b/sys/dev/hptmv/hptproc.c
index 8f9fffdc5af1..328750d9034c 100644
--- a/sys/dev/hptmv/hptproc.c
+++ b/sys/dev/hptmv/hptproc.c
@@ -32,8 +32,8 @@
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
-#include <machine/stdarg.h>
#ifndef __KERNEL__
#define __KERNEL__
@@ -107,7 +107,7 @@ hpt_set_asc_info(IAL_ADAPTER_T *pAdapter, char *buffer,int length)
return -EINVAL;
}
- for (i=0;i<MV_SATA_CHANNELS_NUM;i++)
+ for (i = 0; i < MV_SATA_CHANNELS_NUM; i++)
if(i == ichan)
goto rebuild;
diff --git a/sys/dev/hptnr/hptnr_osm_bsd.c b/sys/dev/hptnr/hptnr_osm_bsd.c
index 00774bf9be4a..7426873964fb 100644
--- a/sys/dev/hptnr/hptnr_osm_bsd.c
+++ b/sys/dev/hptnr/hptnr_osm_bsd.c
@@ -143,9 +143,9 @@ static __inline void *__get_free_pages(int order)
M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
}
-static __inline void free_pages(void *p, int order)
+static __inline void free_pages(void *p)
{
- contigfree(p, PAGE_SIZE<<order, M_DEVBUF);
+ free(p, M_DEVBUF);
}
static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
@@ -165,7 +165,6 @@ static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
f->tag, f->count, f->size, f->count*f->size));
for (i=0; i<f->count; i++) {
p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK);
- if (!p) return (ENXIO);
*p = f->head;
f->head = p;
}
@@ -227,7 +226,7 @@ static void hpt_free_mem(PVBUS_EXT vbus_ext)
for (i=0; i<os_max_cache_pages; i++) {
p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus);
HPT_ASSERT(p);
- free_pages(p, 0);
+ free_pages(p);
}
for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
@@ -241,7 +240,7 @@ static void hpt_free_mem(PVBUS_EXT vbus_ext)
while ((p=freelist_get_dma(f, &bus))) {
if (order)
- free_pages(p, order);
+ free_pages(p);
else {
/* can't free immediately since other blocks in this page may still be in the list */
if (((HPT_UPTR)p & (PAGE_SIZE-1))==0)
@@ -251,7 +250,7 @@ static void hpt_free_mem(PVBUS_EXT vbus_ext)
}
while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus)))
- free_pages(p, 0);
+ free_pages(p);
}
static int hpt_init_vbus(PVBUS_EXT vbus_ext)
@@ -1389,10 +1388,6 @@ static void hpt_final_init(void *dummy)
for (i=0; i<os_max_queue_comm; i++) {
POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK);
- if (!ext) {
- os_printk("Can't alloc cmdext(%d)", i);
- return ;
- }
ext->vbus_ext = vbus_ext;
ext->next = vbus_ext->cmdext_list;
vbus_ext->cmdext_list = ext;
@@ -1610,19 +1605,14 @@ static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, stru
if (ioctl_args.nInBufferSize) {
ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
- if (!ioctl_args.lpInBuffer)
- goto invalid;
if (copyin((void*)piop->lpInBuffer,
ioctl_args.lpInBuffer, piop->nInBufferSize))
goto invalid;
}
- if (ioctl_args.nOutBufferSize) {
+ if (ioctl_args.nOutBufferSize)
ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO);
- if (!ioctl_args.lpOutBuffer)
- goto invalid;
- }
-
+
hpt_do_ioctl(&ioctl_args);
if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
diff --git a/sys/dev/hptnr/os_bsd.h b/sys/dev/hptnr/os_bsd.h
index 75ac04323d57..d262980721d0 100644
--- a/sys/dev/hptnr/os_bsd.h
+++ b/sys/dev/hptnr/os_bsd.h
@@ -41,6 +41,7 @@
#include <sys/types.h>
#include <sys/cons.h>
#include <sys/time.h>
+#include <sys/stdarg.h>
#include <sys/systm.h>
#include <sys/stat.h>
@@ -61,7 +62,6 @@
#include <machine/resource.h>
#include <machine/pci_cfgreg.h>
#include <machine/bus.h>
-#include <machine/stdarg.h>
#include <sys/rman.h>
#include <vm/vm.h>
diff --git a/sys/dev/hptrr/hptrr_osm_bsd.c b/sys/dev/hptrr/hptrr_osm_bsd.c
index 4208dd620e37..78a051d54cf8 100644
--- a/sys/dev/hptrr/hptrr_osm_bsd.c
+++ b/sys/dev/hptrr/hptrr_osm_bsd.c
@@ -144,9 +144,9 @@ static __inline void *__get_free_pages(int order)
M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
}
-static __inline void free_pages(void *p, int order)
+static __inline void free_pages(void *p)
{
- contigfree(p, PAGE_SIZE<<order, M_DEVBUF);
+ free(p, M_DEVBUF);
}
static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
@@ -228,7 +228,7 @@ static void hpt_free_mem(PVBUS_EXT vbus_ext)
for (i=0; i<os_max_cache_pages; i++) {
p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus);
HPT_ASSERT(p);
- free_pages(p, 0);
+ free_pages(p);
}
for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
@@ -242,7 +242,7 @@ static void hpt_free_mem(PVBUS_EXT vbus_ext)
while ((p=freelist_get_dma(f, &bus))) {
if (order)
- free_pages(p, order);
+ free_pages(p);
else {
/* can't free immediately since other blocks in this page may still be in the list */
if (((HPT_UPTR)p & (PAGE_SIZE-1))==0)
@@ -252,7 +252,7 @@ static void hpt_free_mem(PVBUS_EXT vbus_ext)
}
while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus)))
- free_pages(p, 0);
+ free_pages(p);
}
static int hpt_init_vbus(PVBUS_EXT vbus_ext)
@@ -463,7 +463,7 @@ static void os_cmddone(PCOMMAND pCmd)
ccb->ccb_h.status = CAM_BUSY;
break;
default:
- ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
+ ccb->ccb_h.status = CAM_AUTOSENSE_FAIL;
break;
}
@@ -557,7 +557,7 @@ static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
ccb->ccb_h.target_id >= osm_max_targets ||
(ccb->ccb_h.flags & CAM_CDB_PHYS))
{
- ccb->ccb_h.status = CAM_TID_INVALID;
+ ccb->ccb_h.status = CAM_SEL_TIMEOUT;
xpt_done(ccb);
return;
}
@@ -1032,10 +1032,6 @@ static void hpt_final_init(void *dummy)
for (i=0; i<os_max_queue_comm; i++) {
POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK);
- if (!ext) {
- os_printk("Can't alloc cmdext(%d)", i);
- return ;
- }
ext->vbus_ext = vbus_ext;
ext->next = vbus_ext->cmdext_list;
vbus_ext->cmdext_list = ext;
@@ -1252,19 +1248,14 @@ static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, stru
if (ioctl_args.nInBufferSize) {
ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
- if (!ioctl_args.lpInBuffer)
- goto invalid;
if (copyin((void*)piop->lpInBuffer,
ioctl_args.lpInBuffer, piop->nInBufferSize))
goto invalid;
}
- if (ioctl_args.nOutBufferSize) {
+ if (ioctl_args.nOutBufferSize)
ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO);
- if (!ioctl_args.lpOutBuffer)
- goto invalid;
- }
-
+
hpt_do_ioctl(&ioctl_args);
if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
diff --git a/sys/dev/hptrr/os_bsd.h b/sys/dev/hptrr/os_bsd.h
index b351a13cba3c..927c73ab47f8 100644
--- a/sys/dev/hptrr/os_bsd.h
+++ b/sys/dev/hptrr/os_bsd.h
@@ -43,6 +43,7 @@
#include <sys/types.h>
#include <sys/cons.h>
#include <sys/time.h>
+#include <sys/stdarg.h>
#include <sys/systm.h>
#include <sys/stat.h>
@@ -62,7 +63,6 @@
#include <machine/resource.h>
#include <machine/bus.h>
-#include <machine/stdarg.h>
#include <sys/rman.h>
#include <vm/vm.h>
diff --git a/sys/dev/hwpmc/hwpmc_arm64.c b/sys/dev/hwpmc/hwpmc_arm64.c
index af8d25b098c4..310e43065716 100644
--- a/sys/dev/hwpmc/hwpmc_arm64.c
+++ b/sys/dev/hwpmc/hwpmc_arm64.c
@@ -34,10 +34,12 @@
#include <machine/pmc_mdep.h>
#include <machine/cpu.h>
+#include <machine/machdep.h>
#include "opt_acpi.h"
static int arm64_npmcs;
+static bool arm64_64bit_events __read_mostly = false;
struct arm64_event_code_map {
enum pmc_event pe_ev;
@@ -112,7 +114,7 @@ arm64_counter_disable(unsigned int pmc)
/*
* Performance Monitors Control Register
*/
-static uint32_t
+static uint64_t
arm64_pmcr_read(void)
{
uint32_t reg;
@@ -123,7 +125,7 @@ arm64_pmcr_read(void)
}
static void
-arm64_pmcr_write(uint32_t reg)
+arm64_pmcr_write(uint64_t reg)
{
WRITE_SPECIALREG(pmcr_el0, reg);
@@ -134,7 +136,7 @@ arm64_pmcr_write(uint32_t reg)
/*
* Performance Count Register N
*/
-static uint32_t
+static uint64_t
arm64_pmcn_read(unsigned int pmc)
{
@@ -148,7 +150,7 @@ arm64_pmcn_read(unsigned int pmc)
}
static void
-arm64_pmcn_write(unsigned int pmc, uint32_t reg)
+arm64_pmcn_write(unsigned int pmc, uint64_t reg)
{
KASSERT(pmc < arm64_npmcs, ("%s: illegal PMC number %d", __func__, pmc));
@@ -163,7 +165,7 @@ static int
arm64_allocate_pmc(int cpu, int ri, struct pmc *pm,
const struct pmc_op_pmcallocate *a)
{
- uint32_t config;
+ uint64_t config;
enum pmc_event pe;
KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
@@ -186,10 +188,18 @@ arm64_allocate_pmc(int cpu, int ri, struct pmc *pm,
switch (a->pm_caps & (PMC_CAP_SYSTEM | PMC_CAP_USER)) {
case PMC_CAP_SYSTEM:
+ /* Exclude EL0 */
config |= PMEVTYPER_U;
+ if (in_vhe()) {
+ /* If in VHE we need to include EL2 and exclude EL1 */
+ config |= PMEVTYPER_NSH | PMEVTYPER_P;
+ }
break;
case PMC_CAP_USER:
+ /* Exclude EL1 */
config |= PMEVTYPER_P;
+ /* Exclude EL2 */
+ config &= ~PMEVTYPER_NSH;
break;
default:
/*
@@ -197,11 +207,16 @@ arm64_allocate_pmc(int cpu, int ri, struct pmc *pm,
* (default setting) or if both flags are specified
* (user explicitly requested both qualifiers).
*/
+ if (in_vhe()) {
+ /* If in VHE we need to include EL2 */
+ config |= PMEVTYPER_NSH;
+ }
break;
}
pm->pm_md.pm_arm64.pm_arm64_evsel = config;
- PMCDBG2(MDP, ALL, 2, "arm64-allocate ri=%d -> config=0x%x", ri, config);
+ PMCDBG2(MDP, ALL, 2, "arm64-allocate ri=%d -> config=0x%lx", ri,
+ config);
return (0);
}
@@ -233,7 +248,15 @@ arm64_read_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t *v)
/* Reread counter in case we raced. */
tmp = arm64_pmcn_read(ri);
}
- tmp += 0x100000000llu * pm->pm_pcpu_state[cpu].pps_overflowcnt;
+ /*
+ * If the counter is 32-bit increment the upper bits of the counter.
+ * It it is 64-bit then there is nothing we can do as tmp is already
+ * 64-bit.
+ */
+ if (!arm64_64bit_events) {
+ tmp &= 0xffffffffu;
+ tmp += (uint64_t)pm->pm_pcpu_state[cpu].pps_overflowcnt << 32;
+ }
intr_restore(s);
PMCDBG2(MDP, REA, 2, "arm64-read id=%d -> %jd", ri, tmp);
@@ -267,7 +290,10 @@ arm64_write_pmc(int cpu, int ri, struct pmc *pm, pmc_value_t v)
PMCDBG3(MDP, WRI, 1, "arm64-write cpu=%d ri=%d v=%jx", cpu, ri, v);
- pm->pm_pcpu_state[cpu].pps_overflowcnt = v >> 32;
+ if (!arm64_64bit_events) {
+ pm->pm_pcpu_state[cpu].pps_overflowcnt = v >> 32;
+ v &= 0xffffffffu;
+ }
arm64_pmcn_write(ri, v);
return (0);
@@ -299,7 +325,7 @@ arm64_config_pmc(int cpu, int ri, struct pmc *pm)
static int
arm64_start_pmc(int cpu, int ri, struct pmc *pm)
{
- uint32_t config;
+ uint64_t config;
config = pm->pm_md.pm_arm64.pm_arm64_evsel;
@@ -475,9 +501,10 @@ arm64_pcpu_init(struct pmc_mdep *md, int cpu)
WRITE_SPECIALREG(pmcntenclr_el0, 0xffffffff);
WRITE_SPECIALREG(pmintenclr_el1, 0xffffffff);
- /* Enable unit */
- pmcr = arm64_pmcr_read();
- pmcr |= PMCR_E;
+ /* Enable unit with a useful default state */
+ pmcr = PMCR_LC | PMCR_C | PMCR_P | PMCR_E;
+ if (arm64_64bit_events)
+ pmcr |= PMCR_LP;
arm64_pmcr_write(pmcr);
return (0);
@@ -486,7 +513,7 @@ arm64_pcpu_init(struct pmc_mdep *md, int cpu)
static int
arm64_pcpu_fini(struct pmc_mdep *md, int cpu)
{
- uint32_t pmcr;
+ uint64_t pmcr;
PMCDBG0(MDP, INI, 1, "arm64-pcpu-fini");
@@ -507,13 +534,14 @@ pmc_arm64_initialize(void)
struct pmc_mdep *pmc_mdep;
struct pmc_classdep *pcd;
int classes, idcode, impcode;
- int reg;
+ uint64_t dfr;
+ uint64_t pmcr;
uint64_t midr;
- reg = arm64_pmcr_read();
- arm64_npmcs = (reg & PMCR_N_MASK) >> PMCR_N_SHIFT;
- impcode = (reg & PMCR_IMP_MASK) >> PMCR_IMP_SHIFT;
- idcode = (reg & PMCR_IDCODE_MASK) >> PMCR_IDCODE_SHIFT;
+ pmcr = arm64_pmcr_read();
+ arm64_npmcs = (pmcr & PMCR_N_MASK) >> PMCR_N_SHIFT;
+ impcode = (pmcr & PMCR_IMP_MASK) >> PMCR_IMP_SHIFT;
+ idcode = (pmcr & PMCR_IDCODE_MASK) >> PMCR_IDCODE_SHIFT;
PMCDBG1(MDP, INI, 1, "arm64-init npmcs=%d", arm64_npmcs);
@@ -529,6 +557,12 @@ pmc_arm64_initialize(void)
midr &= ~(CPU_VAR_MASK | CPU_REV_MASK);
snprintf(pmc_cpuid, sizeof(pmc_cpuid), "0x%016lx", midr);
+ /* Check if we have 64-bit counters */
+ if (get_kernel_reg(ID_AA64DFR0_EL1, &dfr)) {
+ if (ID_AA64DFR0_PMUVer_VAL(dfr) >= ID_AA64DFR0_PMUVer_3_5)
+ arm64_64bit_events = true;
+ }
+
/*
* Allocate space for pointers to PMC HW descriptors and for
* the MDEP structure used by MI code.
@@ -576,7 +610,7 @@ pmc_arm64_initialize(void)
pcd->pcd_class = PMC_CLASS_ARMV8;
pcd->pcd_num = arm64_npmcs;
pcd->pcd_ri = pmc_mdep->pmd_npmc;
- pcd->pcd_width = 32;
+ pcd->pcd_width = 64;
pcd->pcd_allocate_pmc = arm64_allocate_pmc;
pcd->pcd_config_pmc = arm64_config_pmc;
diff --git a/sys/dev/hwpmc/hwpmc_arm64.h b/sys/dev/hwpmc/hwpmc_arm64.h
index e1f605a0371b..97909d8e7c92 100644
--- a/sys/dev/hwpmc/hwpmc_arm64.h
+++ b/sys/dev/hwpmc/hwpmc_arm64.h
@@ -42,7 +42,7 @@
#ifdef _KERNEL
/* MD extension for 'struct pmc' */
struct pmc_md_arm64_pmc {
- uint32_t pm_arm64_evsel;
+ uint64_t pm_arm64_evsel;
};
#endif /* _KERNEL */
#endif /* _DEV_HWPMC_ARMV8_H_ */
diff --git a/sys/dev/hwpmc/hwpmc_core.c b/sys/dev/hwpmc/hwpmc_core.c
index bf224ded126f..83784b93718e 100644
--- a/sys/dev/hwpmc/hwpmc_core.c
+++ b/sys/dev/hwpmc/hwpmc_core.c
@@ -1051,7 +1051,7 @@ core_intr(struct trapframe *tf)
counter_u64_add(pmc_stats.pm_intr_ignored, 1);
if (found_interrupt)
- lapic_reenable_pmc();
+ lapic_reenable_pcint();
return (found_interrupt);
}
@@ -1150,7 +1150,7 @@ core2_intr(struct trapframe *tf)
counter_u64_add(pmc_stats.pm_intr_ignored, 1);
if (found_interrupt)
- lapic_reenable_pmc();
+ lapic_reenable_pcint();
/*
* Reenable all non-stalled PMCs.
diff --git a/sys/dev/hwpmc/hwpmc_logging.c b/sys/dev/hwpmc/hwpmc_logging.c
index 6394d7a9cdad..8fd7ef06a977 100644
--- a/sys/dev/hwpmc/hwpmc_logging.c
+++ b/sys/dev/hwpmc/hwpmc_logging.c
@@ -93,88 +93,93 @@ SYSCTL_INT(_kern_hwpmc, OID_AUTO, nbuffers_pcpu, CTLFLAG_RDTUN,
static struct mtx pmc_kthread_mtx; /* sleep lock */
-#define PMCLOG_INIT_BUFFER_DESCRIPTOR(D, buf, domain) do { \
- (D)->plb_fence = ((char *) (buf)) + 1024*pmclog_buffer_size; \
- (D)->plb_base = (D)->plb_ptr = ((char *) (buf)); \
- (D)->plb_domain = domain; \
- } while (0)
+#define PMCLOG_INIT_BUFFER_DESCRIPTOR(D, buf, domain) do { \
+ (D)->plb_fence = ((char *)(buf)) + 1024 * pmclog_buffer_size; \
+ (D)->plb_base = (D)->plb_ptr = ((char *)(buf)); \
+ (D)->plb_domain = domain; \
+} while (0)
-#define PMCLOG_RESET_BUFFER_DESCRIPTOR(D) do { \
- (D)->plb_ptr = (D)->plb_base; \
- } while (0)
+#define PMCLOG_RESET_BUFFER_DESCRIPTOR(D) do { \
+ (D)->plb_ptr = (D)->plb_base; \
+} while (0)
/*
* Log file record constructors.
*/
-#define _PMCLOG_TO_HEADER(T,L) \
+#define _PMCLOG_TO_HEADER(T, L) \
((PMCLOG_HEADER_MAGIC << 24) | (T << 16) | ((L) & 0xFFFF))
/* reserve LEN bytes of space and initialize the entry header */
-#define _PMCLOG_RESERVE_SAFE(PO,TYPE,LEN,ACTION, TSC) do { \
- uint32_t *_le; \
- int _len = roundup((LEN), sizeof(uint32_t)); \
- struct pmclog_header *ph; \
- if ((_le = pmclog_reserve((PO), _len)) == NULL) { \
- ACTION; \
- } \
- ph = (struct pmclog_header *)_le; \
- ph->pl_header =_PMCLOG_TO_HEADER(TYPE,_len); \
- ph->pl_tsc = (TSC); \
- _le += sizeof(*ph)/4 /* skip over timestamp */
+#define _PMCLOG_RESERVE_SAFE(PO, TYPE, LEN, ACTION, TSC) do { \
+ uint32_t *_le; \
+ int _len = roundup((LEN), sizeof(uint32_t)); \
+ struct pmclog_header *ph; \
+ \
+ if ((_le = pmclog_reserve((PO), _len)) == NULL) { \
+ ACTION; \
+ } \
+ ph = (struct pmclog_header *)_le; \
+ ph->pl_header =_PMCLOG_TO_HEADER(TYPE,_len); \
+ ph->pl_tsc = (TSC); \
+ _le += sizeof(*ph) / 4 /* skip over timestamp */
/* reserve LEN bytes of space and initialize the entry header */
-#define _PMCLOG_RESERVE(PO,TYPE,LEN,ACTION) do { \
- uint32_t *_le; \
- int _len = roundup((LEN), sizeof(uint32_t)); \
- uint64_t tsc; \
- struct pmclog_header *ph; \
- tsc = pmc_rdtsc(); \
- spinlock_enter(); \
- if ((_le = pmclog_reserve((PO), _len)) == NULL) { \
- spinlock_exit(); \
- ACTION; \
- } \
- ph = (struct pmclog_header *)_le; \
- ph->pl_header =_PMCLOG_TO_HEADER(TYPE,_len); \
- ph->pl_tsc = tsc; \
- _le += sizeof(*ph)/4 /* skip over timestamp */
-
-
-
-#define PMCLOG_RESERVE_SAFE(P,T,L,TSC) _PMCLOG_RESERVE_SAFE(P,T,L,return,TSC)
-#define PMCLOG_RESERVE(P,T,L) _PMCLOG_RESERVE(P,T,L,return)
-#define PMCLOG_RESERVE_WITH_ERROR(P,T,L) _PMCLOG_RESERVE(P,T,L, \
- error=ENOMEM;goto error)
+#define _PMCLOG_RESERVE(PO, TYPE, LEN, ACTION) do { \
+ uint32_t *_le; \
+ int _len = roundup((LEN), sizeof(uint32_t)); \
+ uint64_t tsc; \
+ struct pmclog_header *ph; \
+ \
+ tsc = pmc_rdtsc(); \
+ spinlock_enter(); \
+ if ((_le = pmclog_reserve((PO), _len)) == NULL) { \
+ spinlock_exit(); \
+ ACTION; \
+ } \
+ ph = (struct pmclog_header *)_le; \
+ ph->pl_header =_PMCLOG_TO_HEADER(TYPE,_len); \
+ ph->pl_tsc = tsc; \
+ _le += sizeof(*ph) / 4 /* skip over timestamp */
+
+#define PMCLOG_RESERVE_SAFE(P, T, L, TSC) \
+ _PMCLOG_RESERVE_SAFE(P, T, L, return, TSC)
+#define PMCLOG_RESERVE(P,T,L) \
+ _PMCLOG_RESERVE(P, T, L, return)
+#define PMCLOG_RESERVE_WITH_ERROR(P, T, L) \
+ _PMCLOG_RESERVE(P, T, L, error = ENOMEM; goto error)
#define PMCLOG_EMIT32(V) do { *_le++ = (V); } while (0)
#define PMCLOG_EMIT64(V) do { \
- *_le++ = (uint32_t) ((V) & 0xFFFFFFFF); \
- *_le++ = (uint32_t) (((V) >> 32) & 0xFFFFFFFF); \
- } while (0)
+ *_le++ = (uint32_t) ((V) & 0xFFFFFFFF); \
+ *_le++ = (uint32_t) (((V) >> 32) & 0xFFFFFFFF); \
+} while (0)
/* Emit a string. Caution: does NOT update _le, so needs to be last */
-#define PMCLOG_EMITSTRING(S,L) do { bcopy((S), _le, (L)); } while (0)
-#define PMCLOG_EMITNULLSTRING(L) do { bzero(_le, (L)); } while (0)
-
-#define PMCLOG_DESPATCH_SAFE(PO) \
- pmclog_release((PO)); \
- } while (0)
-
-#define PMCLOG_DESPATCH_SCHED_LOCK(PO) \
- pmclog_release_flags((PO), 0); \
- } while (0)
-
-#define PMCLOG_DESPATCH(PO) \
- pmclog_release((PO)); \
- spinlock_exit(); \
- } while (0)
-
-#define PMCLOG_DESPATCH_SYNC(PO) \
- pmclog_schedule_io((PO), 1); \
- spinlock_exit(); \
- } while (0)
-
+#define PMCLOG_EMITSTRING(S,L) do { \
+ bcopy((S), _le, (L)); \
+} while (0)
+#define PMCLOG_EMITNULLSTRING(L) do { \
+ bzero(_le, (L)); \
+} while (0)
+
+#define PMCLOG_DESPATCH_SAFE(PO) \
+ pmclog_release((PO)); \
+} while (0)
+
+#define PMCLOG_DESPATCH_SCHED_LOCK(PO) \
+ pmclog_release_flags((PO), 0); \
+} while (0)
+
+#define PMCLOG_DESPATCH(PO) \
+ pmclog_release((PO)); \
+ spinlock_exit(); \
+} while (0)
+
+#define PMCLOG_DESPATCH_SYNC(PO) \
+ pmclog_schedule_io((PO), 1); \
+ spinlock_exit(); \
+} while (0)
#define TSDELTA 4
/*
diff --git a/sys/dev/hwpmc/hwpmc_mod.c b/sys/dev/hwpmc/hwpmc_mod.c
index 9b85c989dc96..a6a6ae68996c 100644
--- a/sys/dev/hwpmc/hwpmc_mod.c
+++ b/sys/dev/hwpmc/hwpmc_mod.c
@@ -210,7 +210,7 @@ static int pmc_attach_one_process(struct proc *p, struct pmc *pm);
static bool pmc_can_allocate_row(int ri, enum pmc_mode mode);
static bool pmc_can_allocate_rowindex(struct proc *p, unsigned int ri,
int cpu);
-static int pmc_can_attach(struct pmc *pm, struct proc *p);
+static bool pmc_can_attach(struct pmc *pm, struct proc *p);
static void pmc_capture_user_callchain(int cpu, int soft,
struct trapframe *tf);
static void pmc_cleanup(void);
@@ -1029,19 +1029,19 @@ pmc_unlink_target_process(struct pmc *pm, struct pmc_process *pp)
* Check if PMC 'pm' may be attached to target process 't'.
*/
-static int
+static bool
pmc_can_attach(struct pmc *pm, struct proc *t)
{
struct proc *o; /* pmc owner */
struct ucred *oc, *tc; /* owner, target credentials */
- int decline_attach, i;
+ bool decline_attach;
/*
* A PMC's owner can always attach that PMC to itself.
*/
if ((o = pm->pm_owner->po_owner) == t)
- return 0;
+ return (true);
PROC_LOCK(o);
oc = o->p_ucred;
@@ -1066,18 +1066,17 @@ pmc_can_attach(struct pmc *pm, struct proc *t)
* Every one of the target's group ids, must be in the owner's
* group list.
*/
- for (i = 0; !decline_attach && i < tc->cr_ngroups; i++)
+ for (int i = 0; !decline_attach && i < tc->cr_ngroups; i++)
decline_attach = !groupmember(tc->cr_groups[i], oc);
-
- /* check the read and saved gids too */
- if (decline_attach == 0)
- decline_attach = !groupmember(tc->cr_rgid, oc) ||
+ if (!decline_attach)
+ decline_attach = !groupmember(tc->cr_gid, oc) ||
+ !groupmember(tc->cr_rgid, oc) ||
!groupmember(tc->cr_svgid, oc);
crfree(tc);
crfree(oc);
- return !decline_attach;
+ return (!decline_attach);
}
/*
@@ -1412,7 +1411,7 @@ pmc_process_exec(struct thread *td, struct pmckern_procexec *pk)
*/
for (ri = 0; ri < md->pmd_npmc; ri++) {
if ((pm = pp->pp_pmcs[ri].pp_pmc) != NULL) {
- if (pmc_can_attach(pm, td->td_proc) != 0) {
+ if (pmc_can_attach(pm, td->td_proc)) {
pmc_detach_one_process(td->td_proc, pm,
PMC_FLAG_NONE);
}
diff --git a/sys/dev/hwpmc/hwpmc_x86.c b/sys/dev/hwpmc/hwpmc_x86.c
index 1d04a6610674..2903c25ef5c9 100644
--- a/sys/dev/hwpmc/hwpmc_x86.c
+++ b/sys/dev/hwpmc/hwpmc_x86.c
@@ -230,7 +230,7 @@ struct pmc_mdep *
pmc_md_initialize(void)
{
int i;
- struct pmc_mdep *md;
+ struct pmc_mdep *md = NULL;
/* determine the CPU kind */
if (cpu_vendor_id == CPU_VENDOR_AMD ||
@@ -238,11 +238,13 @@ pmc_md_initialize(void)
md = pmc_amd_initialize();
else if (cpu_vendor_id == CPU_VENDOR_INTEL)
md = pmc_intel_initialize();
- else
+
+ if (md == NULL)
return (NULL);
+ nmi_register_handler(md->pmd_intr);
/* disallow sampling if we do not have an LAPIC */
- if (md != NULL && !lapic_enable_pmc())
+ if (!lapic_enable_pcint())
for (i = 0; i < md->pmd_nclass; i++) {
if (i == PMC_CLASS_INDEX_SOFT)
continue;
@@ -255,8 +257,10 @@ pmc_md_initialize(void)
void
pmc_md_finalize(struct pmc_mdep *md)
{
-
- lapic_disable_pmc();
+ if (md != NULL) {
+ lapic_disable_pcint();
+ nmi_remove_handler(md->pmd_intr);
+ }
if (cpu_vendor_id == CPU_VENDOR_AMD ||
cpu_vendor_id == CPU_VENDOR_HYGON)
pmc_amd_finalize(md);
diff --git a/sys/dev/hwpmc/pmu_dmc620.c b/sys/dev/hwpmc/pmu_dmc620.c
index 42e5dfdbf154..c33e5264f7de 100644
--- a/sys/dev/hwpmc/pmu_dmc620.c
+++ b/sys/dev/hwpmc/pmu_dmc620.c
@@ -68,7 +68,7 @@ struct pmu_dmc620_softc {
#define RD4(sc, r) bus_read_4((sc)->sc_res[0], (r))
#define WR4(sc, r, v) bus_write_4((sc)->sc_res[0], (r), (v))
-#define MD4(sc, r, c, s) WR4((sc), (r), RD4((sc), (r)) & ~(c) | (s))
+#define MD4(sc, r, c, s) WR4((sc), (r), (RD4((sc), (r)) & ~(c)) | (s))
#define CD2MD4(sc, u, r, c, s) MD4((sc), DMC620_CLKDIV2_REG((u), (r)), (c), (s))
#define CMD4(sc, u, r, c, s) MD4((sc), DMC620_CLK_REG((u), (r)), (c), (s))
diff --git a/sys/dev/hwt/hwt.c b/sys/dev/hwt/hwt.c
new file mode 100644
index 000000000000..c476e6031ba8
--- /dev/null
+++ b/sys/dev/hwt/hwt.c
@@ -0,0 +1,242 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Hardware Tracing framework.
+ *
+ * The framework manages hardware tracing units that collect information
+ * about software execution and store it as events in highly compressed format
+ * into DRAM. The events cover information about control flow changes of a
+ * program, whether branches taken or not, exceptions taken, timing information,
+ * cycles elapsed and more. That allows us to restore entire program flow of a
+ * given application without performance impact.
+ *
+ * Design overview.
+ *
+ * The framework provides character devices for mmap(2) and ioctl(2) system
+ * calls to allow user to manage CPU (hardware) tracing units.
+ *
+ * /dev/hwt:
+ * .ioctl:
+ * hwt_ioctl():
+ * a) HWT_IOC_ALLOC
+ * Allocates kernel tracing context CTX based on requested mode
+ * of operation. Verifies the information that comes with the
+ * request (pid, cpus), allocates unique ID for the context.
+ * Creates a new character device for CTX management.
+ *
+ * /dev/hwt_%d[_%d], ident[, thread_id]
+ * .mmap
+ * Maps tracing buffers of the corresponding thread to userspace.
+ * .ioctl
+ * hwt_thread_ioctl():
+ * a) HWT_IOC_START
+ * Enables tracing unit for a given context.
+ * b) HWT_IOC_RECORD_GET
+ * Transfers (small) record entries collected during program
+ * execution for a given context to userspace, such as mmaping
+ * tables of executable and dynamic libraries, interpreter,
+ * kernel mappings, tid of threads created, etc.
+ * c) HWT_IOC_SET_CONFIG
+ * Allows to specify backend-specific configuration of the
+ * trace unit.
+ * d) HWT_IOC_WAKEUP
+ * Wakes up a thread that is currently sleeping.
+ * e) HWT_IOC_BUFPTR_GET
+ * Transfers current hardware pointer in the filling buffer
+ * to the userspace.
+ * f) HWT_IOC_SVC_BUF
+ * To avoid data loss, userspace may notify kernel it has
+ * copied out the given buffer, so kernel is ok to overwrite
+ *
+ * HWT context lifecycle in THREAD mode of operation:
+ * 1. User invokes HWT_IOC_ALLOC ioctl with information about pid to trace and
+ * size of the buffers for the trace data to allocate.
+ * Some architectures may have different tracing units supported, so user
+ * also provides backend name to use for this context, e.g. "coresight".
+ * 2. Kernel allocates context, lookups the proc for the given pid. Then it
+ * creates first hwt_thread in the context and allocates trace buffers for
+ * it. Immediately, kernel initializes tracing backend.
+ * Kernel creates character device and returns unique identificator of
+ * trace context to the user.
+ * 3. To manage the new context, user opens the character device created.
+ * User invokes HWT_IOC_START ioctl, kernel marks context as RUNNING.
+ * At this point any HWT hook invocation by scheduler enables/disables
+ * tracing for threads associated with the context (threads of the proc).
+ * Any new threads creation (of the target proc) procedures will be invoking
+ * corresponding hooks in HWT framework, so that new hwt_thread and buffers
+ * allocated, character device for mmap(2) created on the fly.
+ * 4. User issues HWT_IOC_RECORD_GET ioctl to fetch information about mmaping
+ * tables and threads created during application startup.
+ * 5. User mmaps tracing buffers of each thread to userspace (using
+ * /dev/hwt_%d_%d % (ident, thread_id) character devices).
+ * 6. User can repeat 4 if expected thread is not yet created during target
+ * application execution.
+ * 7. User issues HWT_IOC_BUFPTR_GET ioctl to get current filling level of the
+ * hardware buffer of a given thread.
+ * 8. User invokes trace decoder library to process available data and see the
+ * results in human readable form.
+ * 9. User repeats 7 if needed.
+ *
+ * HWT context lifecycle in CPU mode of operation:
+ * 1. User invokes HWT_IOC_ALLOC ioctl providing a set of CPU to trace within
+ * single CTX.
+ * 2. Kernel verifies the set of CPU and allocates tracing context, creates
+ * a buffer for each CPU.
+ * Kernel creates a character device for every CPU provided in the request.
+ * Kernel initialized tracing backend.
+ * 3. User opens character devices of interest to map the buffers to userspace.
+ * User can start tracing by invoking HWT_IOC_START on any of character
+ * device within the context, entire context will be marked as RUNNING.
+ * 4. The rest is similar to the THREAD mode.
+ *
+ */
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/eventhandler.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#include <dev/hwt/hwt_context.h>
+#include <dev/hwt/hwt_contexthash.h>
+#include <dev/hwt/hwt_thread.h>
+#include <dev/hwt/hwt_owner.h>
+#include <dev/hwt/hwt_ownerhash.h>
+#include <dev/hwt/hwt_backend.h>
+#include <dev/hwt/hwt_record.h>
+#include <dev/hwt/hwt_ioctl.h>
+#include <dev/hwt/hwt_hook.h>
+
+#define HWT_DEBUG
+#undef HWT_DEBUG
+
+#ifdef HWT_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+static eventhandler_tag hwt_exit_tag;
+static struct cdev *hwt_cdev;
+static struct cdevsw hwt_cdevsw = {
+ .d_version = D_VERSION,
+ .d_name = "hwt",
+ .d_mmap_single = NULL,
+ .d_ioctl = hwt_ioctl
+};
+
+static void
+hwt_process_exit(void *arg __unused, struct proc *p)
+{
+ struct hwt_owner *ho;
+
+ /* Stop HWTs associated with exiting owner, if any. */
+ ho = hwt_ownerhash_lookup(p);
+ if (ho)
+ hwt_owner_shutdown(ho);
+}
+
+static int
+hwt_load(void)
+{
+ struct make_dev_args args;
+ int error;
+
+ make_dev_args_init(&args);
+ args.mda_devsw = &hwt_cdevsw;
+ args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
+ args.mda_uid = UID_ROOT;
+ args.mda_gid = GID_WHEEL;
+ args.mda_mode = 0660;
+ args.mda_si_drv1 = NULL;
+
+ hwt_backend_load();
+ hwt_ctx_load();
+ hwt_contexthash_load();
+ hwt_ownerhash_load();
+ hwt_record_load();
+
+ error = make_dev_s(&args, &hwt_cdev, "hwt");
+ if (error != 0)
+ return (error);
+
+ hwt_exit_tag = EVENTHANDLER_REGISTER(process_exit, hwt_process_exit,
+ NULL, EVENTHANDLER_PRI_ANY);
+
+ hwt_hook_load();
+
+ return (0);
+}
+
+static int
+hwt_unload(void)
+{
+
+ hwt_hook_unload();
+ EVENTHANDLER_DEREGISTER(process_exit, hwt_exit_tag);
+ destroy_dev(hwt_cdev);
+ hwt_record_unload();
+ hwt_ownerhash_unload();
+ hwt_contexthash_unload();
+ hwt_ctx_unload();
+ hwt_backend_unload();
+
+ return (0);
+}
+
+static int
+hwt_modevent(module_t mod, int type, void *data)
+{
+ int error;
+
+ switch (type) {
+ case MOD_LOAD:
+ error = hwt_load();
+ break;
+ case MOD_UNLOAD:
+ error = hwt_unload();
+ break;
+ default:
+ error = 0;
+ break;
+ }
+
+ return (error);
+}
+
+static moduledata_t hwt_mod = {
+ "hwt",
+ hwt_modevent,
+ NULL
+};
+
+DECLARE_MODULE(hwt, hwt_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
+MODULE_VERSION(hwt, 1);
diff --git a/sys/dev/hwt/hwt_backend.c b/sys/dev/hwt/hwt_backend.c
new file mode 100644
index 000000000000..1ba5db0d3d09
--- /dev/null
+++ b/sys/dev/hwt/hwt_backend.c
@@ -0,0 +1,289 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* Hardware Trace (HWT) framework. */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/hwt.h>
+
+#include <dev/hwt/hwt_hook.h>
+#include <dev/hwt/hwt_context.h>
+#include <dev/hwt/hwt_config.h>
+#include <dev/hwt/hwt_thread.h>
+#include <dev/hwt/hwt_backend.h>
+
+#define HWT_BACKEND_DEBUG
+#undef HWT_BACKEND_DEBUG
+
+#ifdef HWT_BACKEND_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+static struct mtx hwt_backend_mtx;
+
+struct hwt_backend_entry {
+ struct hwt_backend *backend;
+ LIST_ENTRY(hwt_backend_entry) next;
+};
+
+static LIST_HEAD(, hwt_backend_entry) hwt_backends;
+
+static MALLOC_DEFINE(M_HWT_BACKEND, "hwt_backend", "HWT backend");
+
+int
+hwt_backend_init(struct hwt_context *ctx)
+{
+ int error;
+
+ dprintf("%s\n", __func__);
+
+ error = ctx->hwt_backend->ops->hwt_backend_init(ctx);
+
+ return (error);
+}
+
+void
+hwt_backend_deinit(struct hwt_context *ctx)
+{
+
+ dprintf("%s\n", __func__);
+
+ ctx->hwt_backend->ops->hwt_backend_deinit(ctx);
+}
+
+int
+hwt_backend_configure(struct hwt_context *ctx, int cpu_id, int thread_id)
+{
+ int error;
+
+ dprintf("%s\n", __func__);
+
+ error = ctx->hwt_backend->ops->hwt_backend_configure(ctx, cpu_id,
+ thread_id);
+
+ return (error);
+}
+
+void
+hwt_backend_enable(struct hwt_context *ctx, int cpu_id)
+{
+
+ dprintf("%s\n", __func__);
+
+ ctx->hwt_backend->ops->hwt_backend_enable(ctx, cpu_id);
+}
+
+void
+hwt_backend_disable(struct hwt_context *ctx, int cpu_id)
+{
+
+ dprintf("%s\n", __func__);
+
+ ctx->hwt_backend->ops->hwt_backend_disable(ctx, cpu_id);
+}
+
+void
+hwt_backend_enable_smp(struct hwt_context *ctx)
+{
+
+ dprintf("%s\n", __func__);
+
+ ctx->hwt_backend->ops->hwt_backend_enable_smp(ctx);
+}
+
+void
+hwt_backend_disable_smp(struct hwt_context *ctx)
+{
+
+ dprintf("%s\n", __func__);
+
+ ctx->hwt_backend->ops->hwt_backend_disable_smp(ctx);
+}
+
+void __unused
+hwt_backend_dump(struct hwt_context *ctx, int cpu_id)
+{
+
+ dprintf("%s\n", __func__);
+
+ ctx->hwt_backend->ops->hwt_backend_dump(cpu_id);
+}
+
+int
+hwt_backend_read(struct hwt_context *ctx, struct hwt_vm *vm, int *ident,
+ vm_offset_t *offset, uint64_t *data)
+{
+ int error;
+
+ dprintf("%s\n", __func__);
+
+ error = ctx->hwt_backend->ops->hwt_backend_read(vm, ident,
+ offset, data);
+
+ return (error);
+}
+
+struct hwt_backend *
+hwt_backend_lookup(const char *name)
+{
+ struct hwt_backend_entry *entry;
+ struct hwt_backend *backend;
+
+ HWT_BACKEND_LOCK();
+ LIST_FOREACH(entry, &hwt_backends, next) {
+ backend = entry->backend;
+ if (strcmp(backend->name, name) == 0) {
+ HWT_BACKEND_UNLOCK();
+ return (backend);
+ }
+ }
+ HWT_BACKEND_UNLOCK();
+
+ return (NULL);
+}
+
+int
+hwt_backend_register(struct hwt_backend *backend)
+{
+ struct hwt_backend_entry *entry;
+
+ if (backend == NULL ||
+ backend->name == NULL ||
+ backend->ops == NULL)
+ return (EINVAL);
+
+ entry = malloc(sizeof(struct hwt_backend_entry), M_HWT_BACKEND,
+ M_WAITOK | M_ZERO);
+ entry->backend = backend;
+
+ HWT_BACKEND_LOCK();
+ LIST_INSERT_HEAD(&hwt_backends, entry, next);
+ HWT_BACKEND_UNLOCK();
+
+ return (0);
+}
+
+int
+hwt_backend_unregister(struct hwt_backend *backend)
+{
+ struct hwt_backend_entry *entry, *tmp;
+
+ if (backend == NULL)
+ return (EINVAL);
+
+ /* TODO: check if not in use */
+
+ HWT_BACKEND_LOCK();
+ LIST_FOREACH_SAFE(entry, &hwt_backends, next, tmp) {
+ if (entry->backend == backend) {
+ LIST_REMOVE(entry, next);
+ HWT_BACKEND_UNLOCK();
+ free(entry, M_HWT_BACKEND);
+ return (0);
+ }
+ }
+ HWT_BACKEND_UNLOCK();
+
+ return (ENOENT);
+}
+
+void
+hwt_backend_load(void)
+{
+
+ mtx_init(&hwt_backend_mtx, "hwt backend", NULL, MTX_DEF);
+ LIST_INIT(&hwt_backends);
+}
+
+void
+hwt_backend_unload(void)
+{
+
+ /* TODO: ensure all unregistered */
+
+ mtx_destroy(&hwt_backend_mtx);
+}
+
+void
+hwt_backend_stop(struct hwt_context *ctx)
+{
+ dprintf("%s\n", __func__);
+
+ ctx->hwt_backend->ops->hwt_backend_stop(ctx);
+}
+
+int
+hwt_backend_svc_buf(struct hwt_context *ctx, void *data, size_t data_size,
+ int data_version)
+{
+ int error;
+
+ dprintf("%s\n", __func__);
+
+ error = ctx->hwt_backend->ops->hwt_backend_svc_buf(ctx, data, data_size,
+ data_version);
+
+ return (error);
+}
+
+int
+hwt_backend_thread_alloc(struct hwt_context *ctx, struct hwt_thread *thr)
+{
+ int error;
+
+ dprintf("%s\n", __func__);
+
+ if (ctx->hwt_backend->ops->hwt_backend_thread_alloc == NULL)
+ return (0);
+ KASSERT(thr->private == NULL,
+ ("%s: thread private data is not NULL\n", __func__));
+ error = ctx->hwt_backend->ops->hwt_backend_thread_alloc(thr);
+
+ return (error);
+}
+
+void
+hwt_backend_thread_free(struct hwt_thread *thr)
+{
+ dprintf("%s\n", __func__);
+
+ if (thr->backend->ops->hwt_backend_thread_free == NULL)
+ return;
+ KASSERT(thr->private != NULL,
+ ("%s: thread private data is NULL\n", __func__));
+ thr->backend->ops->hwt_backend_thread_free(thr);
+
+ return;
+}
diff --git a/sys/dev/hwt/hwt_backend.h b/sys/dev/hwt/hwt_backend.h
new file mode 100644
index 000000000000..3b6c9442a7a6
--- /dev/null
+++ b/sys/dev/hwt/hwt_backend.h
@@ -0,0 +1,87 @@
+/*-
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DEV_HWT_HWT_BACKEND_H_
+#define _DEV_HWT_HWT_BACKEND_H_
+
+struct hwt_backend_ops {
+ int (*hwt_backend_init)(struct hwt_context *);
+ int (*hwt_backend_deinit)(struct hwt_context *);
+ int (*hwt_backend_configure)(struct hwt_context *, int cpu_id,
+ int thread_id);
+ int (*hwt_backend_svc_buf)(struct hwt_context *, void *data,
+ size_t data_size, int data_version);
+ void (*hwt_backend_enable)(struct hwt_context *, int cpu_id);
+ void (*hwt_backend_disable)(struct hwt_context *, int cpu_id);
+ int (*hwt_backend_read)(struct hwt_vm *, int *ident,
+ vm_offset_t *offset, uint64_t *data);
+ void (*hwt_backend_stop)(struct hwt_context *);
+ /* For backends that are tied to local CPU registers */
+ int (*hwt_backend_enable_smp)(struct hwt_context *);
+ int (*hwt_backend_disable_smp)(struct hwt_context *);
+ /* Allocation and initialization of backend-specific thread data. */
+ int (*hwt_backend_thread_alloc)(struct hwt_thread *);
+ void (*hwt_backend_thread_free)(struct hwt_thread *);
+ /* Debugging only. */
+ void (*hwt_backend_dump)(int cpu_id);
+};
+
+struct hwt_backend {
+ const char *name;
+ struct hwt_backend_ops *ops;
+ /* buffers require kernel virtual addresses */
+ bool kva_req;
+};
+
+int hwt_backend_init(struct hwt_context *ctx);
+void hwt_backend_deinit(struct hwt_context *ctx);
+int hwt_backend_configure(struct hwt_context *ctx, int cpu_id, int thread_id);
+void hwt_backend_enable(struct hwt_context *ctx, int cpu_id);
+void hwt_backend_disable(struct hwt_context *ctx, int cpu_id);
+void hwt_backend_enable_smp(struct hwt_context *ctx);
+void hwt_backend_disable_smp(struct hwt_context *ctx);
+void hwt_backend_dump(struct hwt_context *ctx, int cpu_id);
+int hwt_backend_read(struct hwt_context *ctx, struct hwt_vm *vm, int *ident,
+ vm_offset_t *offset, uint64_t *data);
+int hwt_backend_register(struct hwt_backend *);
+int hwt_backend_unregister(struct hwt_backend *);
+void hwt_backend_stop(struct hwt_context *);
+int hwt_backend_svc_buf(struct hwt_context *ctx, void *data, size_t data_size,
+ int data_version);
+struct hwt_backend * hwt_backend_lookup(const char *name);
+int hwt_backend_thread_alloc(struct hwt_context *ctx, struct hwt_thread *);
+void hwt_backend_thread_free(struct hwt_thread *);
+
+void hwt_backend_load(void);
+void hwt_backend_unload(void);
+
+#define HWT_BACKEND_LOCK() mtx_lock(&hwt_backend_mtx)
+#define HWT_BACKEND_UNLOCK() mtx_unlock(&hwt_backend_mtx)
+
+#endif /* !_DEV_HWT_HWT_BACKEND_H_ */
+
diff --git a/sys/dev/hwt/hwt_config.c b/sys/dev/hwt/hwt_config.c
new file mode 100644
index 000000000000..30688e7fc76b
--- /dev/null
+++ b/sys/dev/hwt/hwt_config.c
@@ -0,0 +1,108 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/lock.h>
+#include <sys/hwt.h>
+
+#include <vm/vm.h>
+
+#include <dev/hwt/hwt_hook.h>
+#include <dev/hwt/hwt_context.h>
+#include <dev/hwt/hwt_contexthash.h>
+#include <dev/hwt/hwt_config.h>
+#include <dev/hwt/hwt_thread.h>
+#include <dev/hwt/hwt_record.h>
+
+#define HWT_MAXCONFIGSIZE PAGE_SIZE
+
+#define HWT_CONFIG_DEBUG
+#undef HWT_CONFIG_DEBUG
+
+#ifdef HWT_CONFIG_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+static MALLOC_DEFINE(M_HWT_CONFIG, "hwt_config", "HWT config");
+
+int
+hwt_config_set(struct thread *td, struct hwt_context *ctx,
+ struct hwt_set_config *sconf)
+{
+ size_t config_size;
+ void *old_config;
+ void *config;
+ int error;
+
+ config_size = sconf->config_size;
+ if (config_size == 0)
+ return (0);
+
+ if (config_size > HWT_MAXCONFIGSIZE)
+ return (EFBIG);
+
+ config = malloc(config_size, M_HWT_CONFIG, M_WAITOK | M_ZERO);
+
+ error = copyin(sconf->config, config, config_size);
+ if (error) {
+ free(config, M_HWT_CONFIG);
+ return (error);
+ }
+
+ HWT_CTX_LOCK(ctx);
+ old_config = ctx->config;
+ ctx->config = config;
+ ctx->config_size = sconf->config_size;
+ ctx->config_version = sconf->config_version;
+ HWT_CTX_UNLOCK(ctx);
+
+ if (old_config != NULL)
+ free(old_config, M_HWT_CONFIG);
+
+ return (error);
+}
+
+void
+hwt_config_free(struct hwt_context *ctx)
+{
+
+ if (ctx->config == NULL)
+ return;
+
+ free(ctx->config, M_HWT_CONFIG);
+
+ ctx->config = NULL;
+}
diff --git a/sys/dev/hwt/hwt_config.h b/sys/dev/hwt/hwt_config.h
new file mode 100644
index 000000000000..47485583063c
--- /dev/null
+++ b/sys/dev/hwt/hwt_config.h
@@ -0,0 +1,36 @@
+/*-
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DEV_HWT_HWT_CONFIG_H_
+#define _DEV_HWT_HWT_CONFIG_H_
+
+int hwt_config_set(struct thread *td, struct hwt_context *ctx,
+ struct hwt_set_config *sconf);
+void hwt_config_free(struct hwt_context *ctx);
+
+#endif /* !_DEV_HWT_HWT_CONFIG_H_ */
diff --git a/sys/dev/hwt/hwt_context.c b/sys/dev/hwt/hwt_context.c
new file mode 100644
index 000000000000..9af76cffc928
--- /dev/null
+++ b/sys/dev/hwt/hwt_context.c
@@ -0,0 +1,201 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/bitstring.h>
+#include <sys/conf.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/mutex.h>
+#include <sys/refcount.h>
+#include <sys/rwlock.h>
+#include <sys/hwt.h>
+
+#include <dev/hwt/hwt_hook.h>
+#include <dev/hwt/hwt_context.h>
+#include <dev/hwt/hwt_config.h>
+#include <dev/hwt/hwt_thread.h>
+#include <dev/hwt/hwt_owner.h>
+#include <dev/hwt/hwt_vm.h>
+#include <dev/hwt/hwt_cpu.h>
+
+#define HWT_DEBUG
+#undef HWT_DEBUG
+
+#ifdef HWT_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+static MALLOC_DEFINE(M_HWT_CTX, "hwt_ctx", "Hardware Trace");
+
+static bitstr_t *ident_set;
+static int ident_set_size;
+static struct mtx ident_set_mutex;
+
+static int
+hwt_ctx_ident_alloc(int *new_ident)
+{
+
+ mtx_lock(&ident_set_mutex);
+ bit_ffc(ident_set, ident_set_size, new_ident);
+ if (*new_ident == -1) {
+ mtx_unlock(&ident_set_mutex);
+ return (ENOMEM);
+ }
+ bit_set(ident_set, *new_ident);
+ mtx_unlock(&ident_set_mutex);
+
+ return (0);
+}
+
+static void
+hwt_ctx_ident_free(int ident)
+{
+
+ mtx_lock(&ident_set_mutex);
+ bit_clear(ident_set, ident);
+ mtx_unlock(&ident_set_mutex);
+}
+
+int
+hwt_ctx_alloc(struct hwt_context **ctx0)
+{
+ struct hwt_context *ctx;
+ int error;
+
+ ctx = malloc(sizeof(struct hwt_context), M_HWT_CTX, M_WAITOK | M_ZERO);
+
+ TAILQ_INIT(&ctx->records);
+ TAILQ_INIT(&ctx->threads);
+ TAILQ_INIT(&ctx->cpus);
+ mtx_init(&ctx->mtx, "ctx", NULL, MTX_SPIN);
+ mtx_init(&ctx->rec_mtx, "ctx_rec", NULL, MTX_DEF);
+ refcount_init(&ctx->refcnt, 0);
+
+ error = hwt_ctx_ident_alloc(&ctx->ident);
+ if (error) {
+ printf("could not allocate ident bit str\n");
+ return (error);
+ }
+
+ *ctx0 = ctx;
+
+ return (0);
+}
+
+static void
+hwt_ctx_free_cpus(struct hwt_context *ctx)
+{
+ struct hwt_cpu *cpu;
+
+ do {
+ HWT_CTX_LOCK(ctx);
+ cpu = TAILQ_FIRST(&ctx->cpus);
+ if (cpu)
+ TAILQ_REMOVE(&ctx->cpus, cpu, next);
+ HWT_CTX_UNLOCK(ctx);
+
+ if (cpu == NULL)
+ break;
+
+ /* TODO: move vm_free() to cpu_free()? */
+ hwt_vm_free(cpu->vm);
+ hwt_cpu_free(cpu);
+ } while (1);
+}
+
+static void
+hwt_ctx_free_threads(struct hwt_context *ctx)
+{
+ struct hwt_thread *thr;
+
+ dprintf("%s: remove threads\n", __func__);
+
+ do {
+ HWT_CTX_LOCK(ctx);
+ thr = TAILQ_FIRST(&ctx->threads);
+ if (thr)
+ TAILQ_REMOVE(&ctx->threads, thr, next);
+ HWT_CTX_UNLOCK(ctx);
+
+ if (thr == NULL)
+ break;
+
+ HWT_THR_LOCK(thr);
+ /* TODO: check if thr is sleeping before waking it up. */
+ wakeup(thr);
+ HWT_THR_UNLOCK(thr);
+
+ if (refcount_release(&thr->refcnt))
+ hwt_thread_free(thr);
+ } while (1);
+}
+
+void
+hwt_ctx_free(struct hwt_context *ctx)
+{
+
+ if (ctx->mode == HWT_MODE_CPU)
+ hwt_ctx_free_cpus(ctx);
+ else
+ hwt_ctx_free_threads(ctx);
+
+ hwt_config_free(ctx);
+ hwt_ctx_ident_free(ctx->ident);
+ free(ctx, M_HWT_CTX);
+}
+
+void
+hwt_ctx_put(struct hwt_context *ctx)
+{
+
+ refcount_release(&ctx->refcnt);
+}
+
+void
+hwt_ctx_load(void)
+{
+
+ ident_set_size = (1 << 8);
+ ident_set = bit_alloc(ident_set_size, M_HWT_CTX, M_WAITOK);
+ mtx_init(&ident_set_mutex, "ident set", NULL, MTX_DEF);
+}
+
+void
+hwt_ctx_unload(void)
+{
+
+ mtx_destroy(&ident_set_mutex);
+ free(ident_set, M_HWT_CTX);
+}
diff --git a/sys/dev/hwt/hwt_context.h b/sys/dev/hwt/hwt_context.h
new file mode 100644
index 000000000000..cafb197ae348
--- /dev/null
+++ b/sys/dev/hwt/hwt_context.h
@@ -0,0 +1,86 @@
+/*-
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DEV_HWT_HWT_CONTEXT_H_
+#define _DEV_HWT_HWT_CONTEXT_H_
+
+enum hwt_ctx_state {
+ CTX_STATE_STOPPED,
+ CTX_STATE_RUNNING,
+};
+
+struct hwt_context {
+ TAILQ_HEAD(, hwt_record_entry) records;
+
+ LIST_ENTRY(hwt_context) next_hch; /* Entry in contexthash. */
+ LIST_ENTRY(hwt_context) next_hwts; /* Entry in ho->hwts. */
+
+ int mode;
+ int ident;
+
+ int kqueue_fd;
+ struct thread *hwt_td;
+
+ /* CPU mode. */
+ cpuset_t cpu_map;
+ TAILQ_HEAD(, hwt_cpu) cpus;
+
+ /* Thread mode. */
+ struct proc *proc; /* Target proc. */
+ pid_t pid; /* Target pid. */
+ TAILQ_HEAD(, hwt_thread) threads;
+ int thread_counter;
+ int pause_on_mmap;
+
+ size_t bufsize; /* Trace bufsize for each vm.*/
+
+ void *config;
+ size_t config_size;
+ int config_version;
+
+ struct hwt_owner *hwt_owner;
+ struct hwt_backend *hwt_backend;
+
+ struct mtx mtx;
+ struct mtx rec_mtx;
+ enum hwt_ctx_state state;
+ int refcnt;
+};
+
+#define HWT_CTX_LOCK(ctx) mtx_lock_spin(&(ctx)->mtx)
+#define HWT_CTX_UNLOCK(ctx) mtx_unlock_spin(&(ctx)->mtx)
+#define HWT_CTX_ASSERT_LOCKED(ctx) mtx_assert(&(ctx)->mtx, MA_OWNED)
+
+int hwt_ctx_alloc(struct hwt_context **ctx0);
+void hwt_ctx_free(struct hwt_context *ctx);
+void hwt_ctx_put(struct hwt_context *ctx);
+
+void hwt_ctx_load(void);
+void hwt_ctx_unload(void);
+
+#endif /* !_DEV_HWT_HWT_CONTEXT_H_ */
diff --git a/sys/dev/hwt/hwt_contexthash.c b/sys/dev/hwt/hwt_contexthash.c
new file mode 100644
index 000000000000..5682b7d38e5e
--- /dev/null
+++ b/sys/dev/hwt/hwt_contexthash.c
@@ -0,0 +1,134 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/refcount.h>
+#include <sys/hwt.h>
+
+#include <dev/hwt/hwt_context.h>
+#include <dev/hwt/hwt_contexthash.h>
+#include <dev/hwt/hwt_config.h>
+
+#define HWT_DEBUG
+#undef HWT_DEBUG
+
+#ifdef HWT_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+#define HWT_CONTEXTHASH_SIZE 1024
+
+static MALLOC_DEFINE(M_HWT_CONTEXTHASH, "hwt_chash", "Hardware Trace");
+
+/*
+ * Hash function. Discard the lower 2 bits of the pointer since
+ * these are always zero for our uses. The hash multiplier is
+ * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
+ */
+
+#define _HWT_HM 11400714819323198486u /* hash multiplier */
+#define HWT_HASH_PTR(P, M) ((((unsigned long) (P) >> 2) * _HWT_HM) & (M))
+
+static struct mtx hwt_contexthash_mtx;
+static u_long hwt_contexthashmask;
+static LIST_HEAD(hwt_contexthash, hwt_context) *hwt_contexthash;
+
+/*
+ * To use by hwt_switch_in/out() and hwt_record() only.
+ * This function returns with refcnt acquired.
+ */
+struct hwt_context *
+hwt_contexthash_lookup(struct proc *p)
+{
+ struct hwt_contexthash *hch;
+ struct hwt_context *ctx;
+ int hindex;
+
+ hindex = HWT_HASH_PTR(p, hwt_contexthashmask);
+ hch = &hwt_contexthash[hindex];
+
+ HWT_CTXHASH_LOCK();
+ LIST_FOREACH(ctx, hch, next_hch) {
+ if (ctx->proc == p) {
+ refcount_acquire(&ctx->refcnt);
+ HWT_CTXHASH_UNLOCK();
+ return (ctx);
+ }
+ }
+ HWT_CTXHASH_UNLOCK();
+
+ return (NULL);
+}
+
+void
+hwt_contexthash_insert(struct hwt_context *ctx)
+{
+ struct hwt_contexthash *hch;
+ int hindex;
+
+ hindex = HWT_HASH_PTR(ctx->proc, hwt_contexthashmask);
+ hch = &hwt_contexthash[hindex];
+
+ HWT_CTXHASH_LOCK();
+ LIST_INSERT_HEAD(hch, ctx, next_hch);
+ HWT_CTXHASH_UNLOCK();
+}
+
+void
+hwt_contexthash_remove(struct hwt_context *ctx)
+{
+
+ HWT_CTXHASH_LOCK();
+ LIST_REMOVE(ctx, next_hch);
+ HWT_CTXHASH_UNLOCK();
+}
+
+void
+hwt_contexthash_load(void)
+{
+
+ hwt_contexthash = hashinit(HWT_CONTEXTHASH_SIZE, M_HWT_CONTEXTHASH,
+ &hwt_contexthashmask);
+ mtx_init(&hwt_contexthash_mtx, "hwt ctx hash", "hwt ctx", MTX_SPIN);
+}
+
+void
+hwt_contexthash_unload(void)
+{
+
+ mtx_destroy(&hwt_contexthash_mtx);
+ hashdestroy(hwt_contexthash, M_HWT_CONTEXTHASH, hwt_contexthashmask);
+}
diff --git a/sys/dev/hwt/hwt_contexthash.h b/sys/dev/hwt/hwt_contexthash.h
new file mode 100644
index 000000000000..c3ab7acd2a74
--- /dev/null
+++ b/sys/dev/hwt/hwt_contexthash.h
@@ -0,0 +1,42 @@
+/*-
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DEV_HWT_HWT_CONTEXTHASH_H_
+#define _DEV_HWT_HWT_CONTEXTHASH_H_
+
+struct hwt_context * hwt_contexthash_lookup(struct proc *p);
+void hwt_contexthash_insert(struct hwt_context *ctx);
+void hwt_contexthash_remove(struct hwt_context *ctx);
+
+void hwt_contexthash_load(void);
+void hwt_contexthash_unload(void);
+
+#define HWT_CTXHASH_LOCK() mtx_lock_spin(&hwt_contexthash_mtx)
+#define HWT_CTXHASH_UNLOCK() mtx_unlock_spin(&hwt_contexthash_mtx)
+
+#endif /* !_DEV_HWT_HWT_CONTEXTHASH_H_ */
diff --git a/sys/dev/hwt/hwt_cpu.c b/sys/dev/hwt/hwt_cpu.c
new file mode 100644
index 000000000000..7d38eb082e65
--- /dev/null
+++ b/sys/dev/hwt/hwt_cpu.c
@@ -0,0 +1,115 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/hwt.h>
+
+#include <vm/vm.h>
+
+#include <dev/hwt/hwt_hook.h>
+#include <dev/hwt/hwt_context.h>
+#include <dev/hwt/hwt_contexthash.h>
+#include <dev/hwt/hwt_config.h>
+#include <dev/hwt/hwt_thread.h>
+#include <dev/hwt/hwt_record.h>
+#include <dev/hwt/hwt_cpu.h>
+
+#define HWT_CPU_DEBUG
+#undef HWT_CPU_DEBUG
+
+#ifdef HWT_CPU_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+static MALLOC_DEFINE(M_HWT_CPU, "hwt_cpu", "HWT cpu");
+
+struct hwt_cpu *
+hwt_cpu_alloc(void)
+{
+ struct hwt_cpu *cpu;
+
+ cpu = malloc(sizeof(struct hwt_cpu), M_HWT_CPU, M_WAITOK | M_ZERO);
+
+ return (cpu);
+}
+
+void
+hwt_cpu_free(struct hwt_cpu *cpu)
+{
+
+ free(cpu, M_HWT_CPU);
+}
+
+struct hwt_cpu *
+hwt_cpu_first(struct hwt_context *ctx)
+{
+ struct hwt_cpu *cpu;
+
+ HWT_CTX_ASSERT_LOCKED(ctx);
+
+ cpu = TAILQ_FIRST(&ctx->cpus);
+
+ KASSERT(cpu != NULL, ("cpu is NULL"));
+
+ return (cpu);
+}
+
+struct hwt_cpu *
+hwt_cpu_get(struct hwt_context *ctx, int cpu_id)
+{
+ struct hwt_cpu *cpu, *tcpu;
+
+ HWT_CTX_ASSERT_LOCKED(ctx);
+
+ TAILQ_FOREACH_SAFE(cpu, &ctx->cpus, next, tcpu) {
+ KASSERT(cpu != NULL, ("cpu is NULL"));
+ if (cpu->cpu_id == cpu_id) {
+ return cpu;
+ }
+ }
+
+ return (NULL);
+}
+
+void
+hwt_cpu_insert(struct hwt_context *ctx, struct hwt_cpu *cpu)
+{
+
+ HWT_CTX_ASSERT_LOCKED(ctx);
+
+ TAILQ_INSERT_TAIL(&ctx->cpus, cpu, next);
+}
diff --git a/sys/dev/hwt/hwt_cpu.h b/sys/dev/hwt/hwt_cpu.h
new file mode 100644
index 000000000000..92b89229b6e4
--- /dev/null
+++ b/sys/dev/hwt/hwt_cpu.h
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DEV_HWT_HWT_CPU_H_
+#define _DEV_HWT_HWT_CPU_H_
+
+struct hwt_cpu {
+ int cpu_id;
+ struct hwt_vm *vm;
+ TAILQ_ENTRY(hwt_cpu) next;
+};
+
+struct hwt_cpu * hwt_cpu_alloc(void);
+void hwt_cpu_free(struct hwt_cpu *cpu);
+
+struct hwt_cpu * hwt_cpu_first(struct hwt_context *ctx);
+struct hwt_cpu * hwt_cpu_get(struct hwt_context *ctx, int cpu_id);
+void hwt_cpu_insert(struct hwt_context *ctx, struct hwt_cpu *cpu);
+
+#endif /* !_DEV_HWT_HWT_CPU_H_ */
diff --git a/sys/dev/hwt/hwt_hook.c b/sys/dev/hwt/hwt_hook.c
new file mode 100644
index 000000000000..258279b14f20
--- /dev/null
+++ b/sys/dev/hwt/hwt_hook.c
@@ -0,0 +1,323 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* Hardware Trace (HWT) framework. */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/refcount.h>
+#include <sys/hwt.h>
+
+#include <dev/hwt/hwt_hook.h>
+#include <dev/hwt/hwt_context.h>
+#include <dev/hwt/hwt_contexthash.h>
+#include <dev/hwt/hwt_config.h>
+#include <dev/hwt/hwt_thread.h>
+#include <dev/hwt/hwt_owner.h>
+#include <dev/hwt/hwt_backend.h>
+#include <dev/hwt/hwt_record.h>
+#include <dev/hwt/hwt_vm.h>
+
+#define HWT_DEBUG
+#undef HWT_DEBUG
+
+#ifdef HWT_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+static void
+hwt_switch_in(struct thread *td)
+{
+ struct hwt_context *ctx;
+ struct hwt_thread *thr;
+ struct proc *p;
+ int cpu_id;
+
+ p = td->td_proc;
+
+ cpu_id = PCPU_GET(cpuid);
+
+ ctx = hwt_contexthash_lookup(p);
+ if (ctx == NULL)
+ return;
+
+ if (ctx->state != CTX_STATE_RUNNING) {
+ hwt_ctx_put(ctx);
+ return;
+ }
+
+ thr = hwt_thread_lookup(ctx, td);
+ if (thr == NULL) {
+ hwt_ctx_put(ctx);
+ return;
+ }
+
+ dprintf("%s: thr %p index %d tid %d on cpu_id %d\n", __func__, thr,
+ thr->thread_id, td->td_tid, cpu_id);
+
+ hwt_backend_configure(ctx, cpu_id, thr->thread_id);
+ hwt_backend_enable(ctx, cpu_id);
+
+ hwt_ctx_put(ctx);
+}
+
+static void
+hwt_switch_out(struct thread *td)
+{
+ struct hwt_context *ctx;
+ struct hwt_thread *thr;
+ struct proc *p;
+ int cpu_id;
+
+ p = td->td_proc;
+
+ cpu_id = PCPU_GET(cpuid);
+
+ ctx = hwt_contexthash_lookup(p);
+ if (ctx == NULL)
+ return;
+
+ if (ctx->state != CTX_STATE_RUNNING) {
+ hwt_ctx_put(ctx);
+ return;
+ }
+ thr = hwt_thread_lookup(ctx, td);
+ if (thr == NULL) {
+ hwt_ctx_put(ctx);
+ return;
+ }
+
+ dprintf("%s: thr %p index %d tid %d on cpu_id %d\n", __func__, thr,
+ thr->thread_id, td->td_tid, cpu_id);
+
+ hwt_backend_disable(ctx, cpu_id);
+
+ hwt_ctx_put(ctx);
+}
+
+static void
+hwt_hook_thread_exit(struct thread *td)
+{
+ struct hwt_context *ctx;
+ struct hwt_thread *thr;
+ struct proc *p;
+ int cpu_id;
+
+ p = td->td_proc;
+
+ cpu_id = PCPU_GET(cpuid);
+
+ ctx = hwt_contexthash_lookup(p);
+ if (ctx == NULL)
+ return;
+
+ thr = hwt_thread_lookup(ctx, td);
+ if (thr == NULL) {
+ hwt_ctx_put(ctx);
+ return;
+ }
+
+ thr->state = HWT_THREAD_STATE_EXITED;
+
+ dprintf("%s: thr %p index %d tid %d on cpu_id %d\n", __func__, thr,
+ thr->thread_id, td->td_tid, cpu_id);
+
+ if (ctx->state == CTX_STATE_RUNNING)
+ hwt_backend_disable(ctx, cpu_id);
+
+ hwt_ctx_put(ctx);
+}
+
+static void
+hwt_hook_mmap(struct thread *td)
+{
+ struct hwt_context *ctx;
+ struct hwt_thread *thr;
+ struct proc *p;
+ int pause;
+
+ p = td->td_proc;
+
+ ctx = hwt_contexthash_lookup(p);
+ if (ctx == NULL)
+ return;
+
+ /* The ctx state could be any here. */
+
+ pause = ctx->pause_on_mmap ? 1 : 0;
+
+ thr = hwt_thread_lookup(ctx, td);
+ if (thr == NULL) {
+ hwt_ctx_put(ctx);
+ return;
+ }
+
+ /*
+ * msleep(9) atomically releases the mtx lock, so take refcount
+ * to ensure that thr is not destroyed.
+ * It could not be destroyed prior to this call as we are holding ctx
+ * refcnt.
+ */
+ refcount_acquire(&thr->refcnt);
+ hwt_ctx_put(ctx);
+
+ if (pause) {
+ HWT_THR_LOCK(thr);
+ msleep(thr, &thr->mtx, PCATCH, "hwt-mmap", 0);
+ HWT_THR_UNLOCK(thr);
+ }
+
+ if (refcount_release(&thr->refcnt))
+ hwt_thread_free(thr);
+}
+
+static int
+hwt_hook_thread_create(struct thread *td)
+{
+ struct hwt_record_entry *entry;
+ struct hwt_context *ctx;
+ struct hwt_thread *thr;
+ char path[MAXPATHLEN];
+ size_t bufsize;
+ struct proc *p;
+ int thread_id, kva_req;
+ int error;
+
+ p = td->td_proc;
+
+ /* Step 1. Get CTX and collect information needed. */
+ ctx = hwt_contexthash_lookup(p);
+ if (ctx == NULL)
+ return (ENXIO);
+ thread_id = atomic_fetchadd_int(&ctx->thread_counter, 1);
+ bufsize = ctx->bufsize;
+ kva_req = ctx->hwt_backend->kva_req;
+ sprintf(path, "hwt_%d_%d", ctx->ident, thread_id);
+ hwt_ctx_put(ctx);
+
+ /* Step 2. Allocate some memory without holding ctx ref. */
+ error = hwt_thread_alloc(&thr, path, bufsize, kva_req);
+ if (error) {
+ printf("%s: could not allocate thread, error %d\n",
+ __func__, error);
+ return (error);
+ }
+
+ entry = hwt_record_entry_alloc();
+ entry->record_type = HWT_RECORD_THREAD_CREATE;
+ entry->thread_id = thread_id;
+
+ /* Step 3. Get CTX once again. */
+ ctx = hwt_contexthash_lookup(p);
+ if (ctx == NULL) {
+ hwt_record_entry_free(entry);
+ hwt_thread_free(thr);
+ /* ctx->thread_counter does not matter. */
+ return (ENXIO);
+ }
+ /* Allocate backend-specific thread data. */
+ error = hwt_backend_thread_alloc(ctx, thr);
+ if (error != 0) {
+ dprintf("%s: failed to allocate backend thread data\n",
+ __func__);
+ return (error);
+ }
+
+ thr->vm->ctx = ctx;
+ thr->ctx = ctx;
+ thr->backend = ctx->hwt_backend;
+ thr->thread_id = thread_id;
+ thr->td = td;
+
+ HWT_CTX_LOCK(ctx);
+ hwt_thread_insert(ctx, thr, entry);
+ HWT_CTX_UNLOCK(ctx);
+
+ /* Notify userspace. */
+ hwt_record_wakeup(ctx);
+
+ hwt_ctx_put(ctx);
+
+ return (0);
+}
+
+static void
+hwt_hook_handler(struct thread *td, int func, void *arg)
+{
+ struct proc *p;
+
+ p = td->td_proc;
+ if ((p->p_flag2 & P2_HWT) == 0)
+ return;
+
+ switch (func) {
+ case HWT_SWITCH_IN:
+ hwt_switch_in(td);
+ break;
+ case HWT_SWITCH_OUT:
+ hwt_switch_out(td);
+ break;
+ case HWT_THREAD_CREATE:
+ hwt_hook_thread_create(td);
+ break;
+ case HWT_THREAD_SET_NAME:
+ /* TODO. */
+ break;
+ case HWT_THREAD_EXIT:
+ hwt_hook_thread_exit(td);
+ break;
+ case HWT_EXEC:
+ case HWT_MMAP:
+ hwt_record_td(td, arg, M_WAITOK | M_ZERO);
+ hwt_hook_mmap(td);
+ break;
+ case HWT_RECORD:
+ hwt_record_td(td, arg, M_WAITOK | M_ZERO);
+ break;
+ };
+}
+
+void
+hwt_hook_load(void)
+{
+
+ hwt_hook = hwt_hook_handler;
+}
+
+void
+hwt_hook_unload(void)
+{
+
+ hwt_hook = NULL;
+}
diff --git a/sys/dev/hwt/hwt_hook.h b/sys/dev/hwt/hwt_hook.h
new file mode 100644
index 000000000000..a8eccba3ec43
--- /dev/null
+++ b/sys/dev/hwt/hwt_hook.h
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/hwt_record.h>
+
+#ifndef _DEV_HWT_HWT_HOOK_H_
+#define _DEV_HWT_HWT_HOOK_H_
+
+#define HWT_SWITCH_IN 0
+#define HWT_SWITCH_OUT 1
+#define HWT_THREAD_EXIT 2
+#define HWT_THREAD_CREATE 3
+#define HWT_THREAD_SET_NAME 4
+#define HWT_RECORD 5
+#define HWT_MMAP 6
+#define HWT_EXEC 7
+
+#define HWT_CALL_HOOK(td, func, arg) \
+do { \
+ if (hwt_hook != NULL) \
+ (hwt_hook)((td), (func), (arg)); \
+} while (0)
+
+#define HWT_HOOK_INSTALLED (hwt_hook != NULL)
+
+extern void (*hwt_hook)(struct thread *td, int func, void *arg);
+
+void hwt_hook_load(void);
+void hwt_hook_unload(void);
+
+#endif /* !_DEV_HWT_HWT_HOOK_H_ */
diff --git a/sys/dev/hwt/hwt_intr.h b/sys/dev/hwt/hwt_intr.h
new file mode 100644
index 000000000000..e601969f001c
--- /dev/null
+++ b/sys/dev/hwt/hwt_intr.h
@@ -0,0 +1,33 @@
+/*-
+ * Copyright (c) 2023-2025 Bojan Novković <bnovkov@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DEV_HWT_HWT_INTR_H_
+#define _DEV_HWT_HWT_INTR_H_
+
+#include <machine/frame.h>
+
+extern int (*hwt_intr)(struct trapframe *tf);
+
+#endif /* !_DEV_HWT_HWT_INTR_H_ */
diff --git a/sys/dev/hwt/hwt_ioctl.c b/sys/dev/hwt/hwt_ioctl.c
new file mode 100644
index 000000000000..184c7e72f986
--- /dev/null
+++ b/sys/dev/hwt/hwt_ioctl.c
@@ -0,0 +1,444 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* Hardware Trace (HWT) framework. */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/ioccom.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/mutex.h>
+#include <sys/refcount.h>
+#include <sys/rwlock.h>
+#include <sys/smp.h>
+#include <sys/hwt.h>
+
+#include <dev/hwt/hwt_hook.h>
+#include <dev/hwt/hwt_context.h>
+#include <dev/hwt/hwt_contexthash.h>
+#include <dev/hwt/hwt_config.h>
+#include <dev/hwt/hwt_cpu.h>
+#include <dev/hwt/hwt_thread.h>
+#include <dev/hwt/hwt_owner.h>
+#include <dev/hwt/hwt_ownerhash.h>
+#include <dev/hwt/hwt_backend.h>
+#include <dev/hwt/hwt_record.h>
+#include <dev/hwt/hwt_ioctl.h>
+#include <dev/hwt/hwt_vm.h>
+
+#define HWT_IOCTL_DEBUG
+#undef HWT_IOCTL_DEBUG
+
+#ifdef HWT_IOCTL_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+/* No real reason for these limitations just sanity checks. */
+#define HWT_MAXBUFSIZE (32UL * 1024 * 1024 * 1024) /* 32 GB */
+
+static MALLOC_DEFINE(M_HWT_IOCTL, "hwt_ioctl", "Hardware Trace");
+
+/*
+ * Check if owner process *o can trace target process *t.
+ */
+
+static int
+hwt_priv_check(struct proc *o, struct proc *t)
+{
+ struct ucred *oc, *tc;
+ int error;
+ int i;
+
+ PROC_LOCK(o);
+ oc = o->p_ucred;
+ crhold(oc);
+ PROC_UNLOCK(o);
+
+ PROC_LOCK_ASSERT(t, MA_OWNED);
+ tc = t->p_ucred;
+ crhold(tc);
+
+ error = 0;
+
+ /*
+ * The effective uid of the HWT owner should match at least one
+ * of the effective / real / saved uids of the target process.
+ */
+
+ if (oc->cr_uid != tc->cr_uid &&
+ oc->cr_uid != tc->cr_svuid &&
+ oc->cr_uid != tc->cr_ruid) {
+ error = EPERM;
+ goto done;
+ }
+
+ /*
+ * Everyone of the target's group ids must be in the owner's
+ * group list.
+ */
+ for (i = 0; i < tc->cr_ngroups; i++)
+ if (!groupmember(tc->cr_groups[i], oc)) {
+ error = EPERM;
+ goto done;
+ }
+ if (!groupmember(tc->cr_gid, oc) ||
+ !groupmember(tc->cr_rgid, oc) ||
+ !groupmember(tc->cr_svgid, oc)) {
+ error = EPERM;
+ goto done;
+ }
+
+done:
+ crfree(tc);
+ crfree(oc);
+
+ return (error);
+}
+
+static int
+hwt_ioctl_alloc_mode_thread(struct thread *td, struct hwt_owner *ho,
+ struct hwt_backend *backend, struct hwt_alloc *halloc)
+{
+ struct thread **threads, *td1;
+ struct hwt_record_entry *entry;
+ struct hwt_context *ctx, *ctx1;
+ struct hwt_thread *thr;
+ char path[MAXPATHLEN];
+ struct proc *p;
+ int thread_id;
+ int error;
+ int cnt;
+ int i;
+
+ /* Check if the owner have this pid configured already. */
+ ctx = hwt_owner_lookup_ctx(ho, halloc->pid);
+ if (ctx)
+ return (EEXIST);
+
+ /* Allocate a new HWT context. */
+ error = hwt_ctx_alloc(&ctx);
+ if (error)
+ return (error);
+ ctx->bufsize = halloc->bufsize;
+ ctx->pid = halloc->pid;
+ ctx->hwt_backend = backend;
+ ctx->hwt_owner = ho;
+ ctx->mode = HWT_MODE_THREAD;
+ ctx->hwt_td = td;
+ ctx->kqueue_fd = halloc->kqueue_fd;
+
+ error = copyout(&ctx->ident, halloc->ident, sizeof(int));
+ if (error) {
+ hwt_ctx_free(ctx);
+ return (error);
+ }
+
+ /* Now get the victim proc. */
+ p = pfind(halloc->pid);
+ if (p == NULL) {
+ hwt_ctx_free(ctx);
+ return (ENXIO);
+ }
+
+ /* Ensure we can trace it. */
+ error = hwt_priv_check(td->td_proc, p);
+ if (error) {
+ PROC_UNLOCK(p);
+ hwt_ctx_free(ctx);
+ return (error);
+ }
+
+ /* Ensure it is not being traced already. */
+ ctx1 = hwt_contexthash_lookup(p);
+ if (ctx1) {
+ refcount_release(&ctx1->refcnt);
+ PROC_UNLOCK(p);
+ hwt_ctx_free(ctx);
+ return (EEXIST);
+ }
+
+ /* Allocate hwt threads and buffers. */
+
+ cnt = 0;
+
+ FOREACH_THREAD_IN_PROC(p, td1) {
+ cnt += 1;
+ }
+
+ KASSERT(cnt > 0, ("no threads"));
+
+ threads = malloc(sizeof(struct thread *) * cnt, M_HWT_IOCTL,
+ M_NOWAIT | M_ZERO);
+ if (threads == NULL) {
+ PROC_UNLOCK(p);
+ hwt_ctx_free(ctx);
+ return (ENOMEM);
+ }
+
+ i = 0;
+
+ FOREACH_THREAD_IN_PROC(p, td1) {
+ threads[i++] = td1;
+ }
+
+ ctx->proc = p;
+ PROC_UNLOCK(p);
+
+ for (i = 0; i < cnt; i++) {
+ thread_id = atomic_fetchadd_int(&ctx->thread_counter, 1);
+ sprintf(path, "hwt_%d_%d", ctx->ident, thread_id);
+
+ error = hwt_thread_alloc(&thr, path, ctx->bufsize,
+ ctx->hwt_backend->kva_req);
+ if (error) {
+ free(threads, M_HWT_IOCTL);
+ hwt_ctx_free(ctx);
+ return (error);
+ }
+ /* Allocate backend-specific thread data. */
+ error = hwt_backend_thread_alloc(ctx, thr);
+ if (error != 0) {
+ dprintf("%s: failed to allocate thread backend data\n",
+ __func__);
+ free(threads, M_HWT_IOCTL);
+ hwt_ctx_free(ctx);
+ return (error);
+ }
+
+ /*
+ * Insert a THREAD_CREATE record so userspace picks up
+ * the thread's tracing buffers.
+ */
+ entry = hwt_record_entry_alloc();
+ entry->record_type = HWT_RECORD_THREAD_CREATE;
+ entry->thread_id = thread_id;
+
+ thr->vm->ctx = ctx;
+ thr->td = threads[i];
+ thr->ctx = ctx;
+ thr->backend = ctx->hwt_backend;
+ thr->thread_id = thread_id;
+
+ HWT_CTX_LOCK(ctx);
+ hwt_thread_insert(ctx, thr, entry);
+ HWT_CTX_UNLOCK(ctx);
+ }
+
+ free(threads, M_HWT_IOCTL);
+
+ error = hwt_backend_init(ctx);
+ if (error) {
+ hwt_ctx_free(ctx);
+ return (error);
+ }
+
+ /* hwt_owner_insert_ctx? */
+ mtx_lock(&ho->mtx);
+ LIST_INSERT_HEAD(&ho->hwts, ctx, next_hwts);
+ mtx_unlock(&ho->mtx);
+
+ /*
+ * Hooks are now in action after this, but the ctx is not in RUNNING
+ * state.
+ */
+ hwt_contexthash_insert(ctx);
+
+ p = pfind(halloc->pid);
+ if (p) {
+ p->p_flag2 |= P2_HWT;
+ PROC_UNLOCK(p);
+ }
+
+ return (0);
+}
+
+static int
+hwt_ioctl_alloc_mode_cpu(struct thread *td, struct hwt_owner *ho,
+ struct hwt_backend *backend, struct hwt_alloc *halloc)
+{
+ struct hwt_context *ctx;
+ struct hwt_cpu *cpu;
+ struct hwt_vm *vm;
+ char path[MAXPATHLEN];
+ size_t cpusetsize;
+ cpuset_t cpu_map;
+ int cpu_count = 0;
+ int cpu_id;
+ int error;
+
+ CPU_ZERO(&cpu_map);
+ cpusetsize = min(halloc->cpusetsize, sizeof(cpuset_t));
+ error = copyin(halloc->cpu_map, &cpu_map, cpusetsize);
+ if (error)
+ return (error);
+
+ CPU_FOREACH_ISSET(cpu_id, &cpu_map) {
+#ifdef SMP
+ /* Ensure CPU is not halted. */
+ if (CPU_ISSET(cpu_id, &hlt_cpus_mask))
+ return (ENXIO);
+#endif
+#if 0
+ /* TODO: Check if the owner have this cpu configured already. */
+ ctx = hwt_owner_lookup_ctx_by_cpu(ho, halloc->cpu);
+ if (ctx)
+ return (EEXIST);
+#endif
+
+ cpu_count++;
+ }
+
+ if (cpu_count == 0)
+ return (ENODEV);
+
+ /* Allocate a new HWT context. */
+ error = hwt_ctx_alloc(&ctx);
+ if (error)
+ return (error);
+ ctx->bufsize = halloc->bufsize;
+ ctx->hwt_backend = backend;
+ ctx->hwt_owner = ho;
+ ctx->mode = HWT_MODE_CPU;
+ ctx->cpu_map = cpu_map;
+ ctx->hwt_td = td;
+ ctx->kqueue_fd = halloc->kqueue_fd;
+
+ error = copyout(&ctx->ident, halloc->ident, sizeof(int));
+ if (error) {
+ hwt_ctx_free(ctx);
+ return (error);
+ }
+
+ CPU_FOREACH_ISSET(cpu_id, &cpu_map) {
+ sprintf(path, "hwt_%d_%d", ctx->ident, cpu_id);
+ error = hwt_vm_alloc(ctx->bufsize, ctx->hwt_backend->kva_req,
+ path, &vm);
+ if (error) {
+ /* TODO: remove all allocated cpus. */
+ hwt_ctx_free(ctx);
+ return (error);
+ }
+
+ cpu = hwt_cpu_alloc();
+ cpu->cpu_id = cpu_id;
+ cpu->vm = vm;
+
+ vm->cpu = cpu;
+ vm->ctx = ctx;
+
+ HWT_CTX_LOCK(ctx);
+ hwt_cpu_insert(ctx, cpu);
+ HWT_CTX_UNLOCK(ctx);
+ }
+
+ error = hwt_backend_init(ctx);
+ if (error) {
+ /* TODO: remove all allocated cpus. */
+ hwt_ctx_free(ctx);
+ return (error);
+ }
+
+ /* hwt_owner_insert_ctx? */
+ mtx_lock(&ho->mtx);
+ LIST_INSERT_HEAD(&ho->hwts, ctx, next_hwts);
+ mtx_unlock(&ho->mtx);
+
+ hwt_record_kernel_objects(ctx);
+
+ return (0);
+}
+
+static int
+hwt_ioctl_alloc(struct thread *td, struct hwt_alloc *halloc)
+{
+ char backend_name[HWT_BACKEND_MAXNAMELEN];
+ struct hwt_backend *backend;
+ struct hwt_owner *ho;
+ int error;
+
+ if (halloc->bufsize > HWT_MAXBUFSIZE)
+ return (EINVAL);
+ if (halloc->bufsize % PAGE_SIZE)
+ return (EINVAL);
+ if (halloc->backend_name == NULL)
+ return (EINVAL);
+
+ error = copyinstr(halloc->backend_name, (void *)backend_name,
+ HWT_BACKEND_MAXNAMELEN, NULL);
+ if (error)
+ return (error);
+
+ backend = hwt_backend_lookup(backend_name);
+ if (backend == NULL)
+ return (ENODEV);
+
+ /* First get the owner. */
+ ho = hwt_ownerhash_lookup(td->td_proc);
+ if (ho == NULL) {
+ /* Create a new owner. */
+ ho = hwt_owner_alloc(td->td_proc);
+ if (ho == NULL)
+ return (ENOMEM);
+ hwt_ownerhash_insert(ho);
+ }
+
+ switch (halloc->mode) {
+ case HWT_MODE_THREAD:
+ error = hwt_ioctl_alloc_mode_thread(td, ho, backend, halloc);
+ break;
+ case HWT_MODE_CPU:
+ error = hwt_ioctl_alloc_mode_cpu(td, ho, backend, halloc);
+ break;
+ default:
+ error = ENXIO;
+ };
+
+ return (error);
+}
+
+int
+hwt_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
+ struct thread *td)
+{
+ int error;
+
+ switch (cmd) {
+ case HWT_IOC_ALLOC:
+ /* Allocate HWT context. */
+ error = hwt_ioctl_alloc(td, (struct hwt_alloc *)addr);
+ return (error);
+ default:
+ return (ENXIO);
+ };
+}
diff --git a/sys/dev/beri/virtio/virtio_mmio_platform.h b/sys/dev/hwt/hwt_ioctl.h
index e043f60c98e2..ce4270dc0d44 100644
--- a/sys/dev/beri/virtio/virtio_mmio_platform.h
+++ b/sys/dev/hwt/hwt_ioctl.h
@@ -1,10 +1,8 @@
/*-
- * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
- * All rights reserved.
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
*
- * This software was developed by SRI International and the University of
- * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
- * ("CTSRD"), as part of the DARPA CRASH research programme.
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,8 +26,10 @@
* SUCH DAMAGE.
*/
-#define Q_NOTIFY 0x01
-#define Q_PFN 0x02
-#define Q_INTR 0x04
-#define Q_SEL 0x08
-#define Q_NOTIFY1 0x10
+#ifndef _DEV_HWT_HWT_IOCTL_H
+#define _DEV_HWT_HWT_IOCTL_H
+
+int hwt_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
+ struct thread *td);
+
+#endif /* !_DEV_HWT_HWT_IOCTL_H */
diff --git a/sys/dev/hwt/hwt_owner.c b/sys/dev/hwt/hwt_owner.c
new file mode 100644
index 000000000000..3c82040578de
--- /dev/null
+++ b/sys/dev/hwt/hwt_owner.c
@@ -0,0 +1,157 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/mutex.h>
+#include <sys/refcount.h>
+#include <sys/rwlock.h>
+#include <sys/hwt.h>
+
+#include <dev/hwt/hwt_hook.h>
+#include <dev/hwt/hwt_context.h>
+#include <dev/hwt/hwt_contexthash.h>
+#include <dev/hwt/hwt_config.h>
+#include <dev/hwt/hwt_cpu.h>
+#include <dev/hwt/hwt_thread.h>
+#include <dev/hwt/hwt_owner.h>
+#include <dev/hwt/hwt_ownerhash.h>
+#include <dev/hwt/hwt_backend.h>
+#include <dev/hwt/hwt_vm.h>
+#include <dev/hwt/hwt_record.h>
+
+#define HWT_DEBUG
+#undef HWT_DEBUG
+
+#ifdef HWT_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+static MALLOC_DEFINE(M_HWT_OWNER, "hwt_owner", "Hardware Trace");
+
+struct hwt_context *
+hwt_owner_lookup_ctx(struct hwt_owner *ho, pid_t pid)
+{
+ struct hwt_context *ctx;
+
+ mtx_lock(&ho->mtx);
+ LIST_FOREACH(ctx, &ho->hwts, next_hwts) {
+ if (ctx->pid == pid) {
+ mtx_unlock(&ho->mtx);
+ return (ctx);
+ }
+ }
+ mtx_unlock(&ho->mtx);
+
+ return (NULL);
+}
+
+#if 0
+struct hwt_context *
+hwt_owner_lookup_ctx_by_cpu(struct hwt_owner *ho, int cpu)
+{
+ struct hwt_context *ctx;
+
+ mtx_lock(&ho->mtx);
+ LIST_FOREACH(ctx, &ho->hwts, next_hwts) {
+ if (ctx->cpu == cpu) {
+ mtx_unlock(&ho->mtx);
+ return (ctx);
+ }
+ }
+ mtx_unlock(&ho->mtx);
+
+ return (NULL);
+}
+#endif
+
+struct hwt_owner *
+hwt_owner_alloc(struct proc *p)
+{
+ struct hwt_owner *ho;
+
+ ho = malloc(sizeof(struct hwt_owner), M_HWT_OWNER,
+ M_WAITOK | M_ZERO);
+ ho->p = p;
+
+ LIST_INIT(&ho->hwts);
+ mtx_init(&ho->mtx, "hwts", NULL, MTX_DEF);
+
+ return (ho);
+}
+
+void
+hwt_owner_shutdown(struct hwt_owner *ho)
+{
+ struct hwt_context *ctx;
+
+ dprintf("%s: stopping hwt owner\n", __func__);
+
+ while (1) {
+ mtx_lock(&ho->mtx);
+ ctx = LIST_FIRST(&ho->hwts);
+ if (ctx)
+ LIST_REMOVE(ctx, next_hwts);
+ mtx_unlock(&ho->mtx);
+
+ if (ctx == NULL)
+ break;
+
+ if (ctx->mode == HWT_MODE_THREAD)
+ hwt_contexthash_remove(ctx);
+
+ /*
+ * A hook could be still dealing with this ctx right here.
+ */
+
+ HWT_CTX_LOCK(ctx);
+ ctx->state = 0;
+ HWT_CTX_UNLOCK(ctx);
+
+ /* Ensure hooks invocation is now completed. */
+ while (refcount_load(&ctx->refcnt) > 0)
+ continue;
+
+ /*
+ * Note that a thread could be still sleeping on msleep(9).
+ */
+
+ hwt_backend_deinit(ctx);
+ hwt_record_free_all(ctx);
+ hwt_ctx_free(ctx);
+ }
+
+ hwt_ownerhash_remove(ho);
+ free(ho, M_HWT_OWNER);
+}
diff --git a/sys/dev/hwt/hwt_owner.h b/sys/dev/hwt/hwt_owner.h
new file mode 100644
index 000000000000..2ac569a55050
--- /dev/null
+++ b/sys/dev/hwt/hwt_owner.h
@@ -0,0 +1,45 @@
+/*-
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DEV_HWT_HWT_OWNER_H_
+#define _DEV_HWT_HWT_OWNER_H_
+
+struct hwt_owner {
+ struct proc *p;
+ struct mtx mtx; /* Protects hwts. */
+ LIST_HEAD(, hwt_context) hwts; /* Owned HWTs. */
+ LIST_ENTRY(hwt_owner) next; /* Entry in hwt owner hash. */
+};
+
+
+struct hwt_context * hwt_owner_lookup_ctx(struct hwt_owner *ho, pid_t pid);
+struct hwt_owner * hwt_owner_alloc(struct proc *p);
+void hwt_owner_shutdown(struct hwt_owner *ho);
+struct hwt_context * hwt_owner_lookup_ctx_by_cpu(struct hwt_owner *ho, int cpu);
+
+#endif /* !_DEV_HWT_HWT_OWNER_H_ */
diff --git a/sys/dev/hwt/hwt_ownerhash.c b/sys/dev/hwt/hwt_ownerhash.c
new file mode 100644
index 000000000000..7c9e2232bac4
--- /dev/null
+++ b/sys/dev/hwt/hwt_ownerhash.c
@@ -0,0 +1,141 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/mutex.h>
+#include <sys/rwlock.h>
+#include <sys/hwt.h>
+
+#include <dev/hwt/hwt_owner.h>
+#include <dev/hwt/hwt_ownerhash.h>
+
+#define HWT_DEBUG
+#undef HWT_DEBUG
+
+#ifdef HWT_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+#define HWT_OWNERHASH_SIZE 1024
+
+static MALLOC_DEFINE(M_HWT_OWNERHASH, "hwt_ohash", "Hardware Trace");
+
+/*
+ * Hash function. Discard the lower 2 bits of the pointer since
+ * these are always zero for our uses. The hash multiplier is
+ * round((2^LONG_BIT) * ((sqrt(5)-1)/2)).
+ */
+
+#define _HWT_HM 11400714819323198486u /* hash multiplier */
+#define HWT_HASH_PTR(P, M) ((((unsigned long) (P) >> 2) * _HWT_HM) & (M))
+
+static struct mtx hwt_ownerhash_mtx;
+static u_long hwt_ownerhashmask;
+static LIST_HEAD(hwt_ownerhash, hwt_owner) *hwt_ownerhash;
+
+struct hwt_owner *
+hwt_ownerhash_lookup(struct proc *p)
+{
+ struct hwt_ownerhash *hoh;
+ struct hwt_owner *ho;
+ int hindex;
+
+ hindex = HWT_HASH_PTR(p, hwt_ownerhashmask);
+ hoh = &hwt_ownerhash[hindex];
+
+ HWT_OWNERHASH_LOCK();
+ LIST_FOREACH(ho, hoh, next) {
+ if (ho->p == p) {
+ HWT_OWNERHASH_UNLOCK();
+ return (ho);
+ }
+ }
+ HWT_OWNERHASH_UNLOCK();
+
+ return (NULL);
+}
+
+void
+hwt_ownerhash_insert(struct hwt_owner *ho)
+{
+ struct hwt_ownerhash *hoh;
+ int hindex;
+
+ hindex = HWT_HASH_PTR(ho->p, hwt_ownerhashmask);
+ hoh = &hwt_ownerhash[hindex];
+
+ HWT_OWNERHASH_LOCK();
+ LIST_INSERT_HEAD(hoh, ho, next);
+ HWT_OWNERHASH_UNLOCK();
+}
+
+void
+hwt_ownerhash_remove(struct hwt_owner *ho)
+{
+
+ /* Destroy hwt owner. */
+ HWT_OWNERHASH_LOCK();
+ LIST_REMOVE(ho, next);
+ HWT_OWNERHASH_UNLOCK();
+}
+
+void
+hwt_ownerhash_load(void)
+{
+
+ hwt_ownerhash = hashinit(HWT_OWNERHASH_SIZE, M_HWT_OWNERHASH,
+ &hwt_ownerhashmask);
+ mtx_init(&hwt_ownerhash_mtx, "hwt-owner-hash", "hwt-owner", MTX_DEF);
+}
+
+void
+hwt_ownerhash_unload(void)
+{
+ struct hwt_ownerhash *hoh;
+ struct hwt_owner *ho, *tmp;
+
+ HWT_OWNERHASH_LOCK();
+ for (hoh = hwt_ownerhash;
+ hoh <= &hwt_ownerhash[hwt_ownerhashmask];
+ hoh++) {
+ LIST_FOREACH_SAFE(ho, hoh, next, tmp) {
+ /* TODO: module is in use ? */
+ }
+ }
+ HWT_OWNERHASH_UNLOCK();
+
+ mtx_destroy(&hwt_ownerhash_mtx);
+ hashdestroy(hwt_ownerhash, M_HWT_OWNERHASH, hwt_ownerhashmask);
+}
diff --git a/sys/dev/hwt/hwt_ownerhash.h b/sys/dev/hwt/hwt_ownerhash.h
new file mode 100644
index 000000000000..4a7bc958d0f7
--- /dev/null
+++ b/sys/dev/hwt/hwt_ownerhash.h
@@ -0,0 +1,42 @@
+/*-
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DEV_HWT_HWT_OWNERHASH_H_
+#define _DEV_HWT_HWT_OWNERHASH_H_
+
+struct hwt_owner * hwt_ownerhash_lookup(struct proc *p);
+void hwt_ownerhash_insert(struct hwt_owner *ho);
+void hwt_ownerhash_remove(struct hwt_owner *ho);
+
+void hwt_ownerhash_load(void);
+void hwt_ownerhash_unload(void);
+
+#define HWT_OWNERHASH_LOCK() mtx_lock(&hwt_ownerhash_mtx)
+#define HWT_OWNERHASH_UNLOCK() mtx_unlock(&hwt_ownerhash_mtx)
+
+#endif /* !_DEV_HWT_HWT_OWNERHASH_H_ */
diff --git a/sys/dev/hwt/hwt_record.c b/sys/dev/hwt/hwt_record.c
new file mode 100644
index 000000000000..850ea6f8c5be
--- /dev/null
+++ b/sys/dev/hwt/hwt_record.c
@@ -0,0 +1,302 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/hwt.h>
+#include <sys/linker.h>
+#include <sys/pmckern.h> /* linker_hwpmc_list_objects */
+
+#include <vm/vm.h>
+#include <vm/uma.h>
+
+#include <dev/hwt/hwt_hook.h>
+#include <dev/hwt/hwt_context.h>
+#include <dev/hwt/hwt_contexthash.h>
+#include <dev/hwt/hwt_config.h>
+#include <dev/hwt/hwt_thread.h>
+#include <dev/hwt/hwt_record.h>
+
+#define HWT_RECORD_DEBUG
+#undef HWT_RECORD_DEBUG
+
+#ifdef HWT_RECORD_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+static MALLOC_DEFINE(M_HWT_RECORD, "hwt_record", "Hardware Trace");
+static uma_zone_t record_zone = NULL;
+
+static struct hwt_record_entry *
+hwt_record_clone(struct hwt_record_entry *ent, int flags)
+{
+ struct hwt_record_entry *entry;
+
+ entry = uma_zalloc(record_zone, flags);
+ if (entry == NULL)
+ return (NULL);
+ memcpy(entry, ent, sizeof(struct hwt_record_entry));
+ switch (ent->record_type) {
+ case HWT_RECORD_MMAP:
+ case HWT_RECORD_EXECUTABLE:
+ case HWT_RECORD_KERNEL:
+ entry->fullpath = strdup(ent->fullpath, M_HWT_RECORD);
+ break;
+ default:
+ break;
+ }
+
+ return (entry);
+}
+
+static void
+hwt_record_to_user(struct hwt_record_entry *ent,
+ struct hwt_record_user_entry *usr)
+{
+ usr->record_type = ent->record_type;
+ switch (ent->record_type) {
+ case HWT_RECORD_MMAP:
+ case HWT_RECORD_EXECUTABLE:
+ case HWT_RECORD_KERNEL:
+ usr->addr = ent->addr;
+ usr->baseaddr = ent->baseaddr;
+ strncpy(usr->fullpath, ent->fullpath, MAXPATHLEN);
+ break;
+ case HWT_RECORD_BUFFER:
+ usr->buf_id = ent->buf_id;
+ usr->curpage = ent->curpage;
+ usr->offset = ent->offset;
+ break;
+ case HWT_RECORD_THREAD_CREATE:
+ case HWT_RECORD_THREAD_SET_NAME:
+ usr->thread_id = ent->thread_id;
+ break;
+ default:
+ break;
+ }
+}
+
+void
+hwt_record_load(void)
+{
+ record_zone = uma_zcreate("HWT records",
+ sizeof(struct hwt_record_entry), NULL, NULL, NULL, NULL, 0, 0);
+}
+
+void
+hwt_record_unload(void)
+{
+ uma_zdestroy(record_zone);
+}
+
+void
+hwt_record_ctx(struct hwt_context *ctx, struct hwt_record_entry *ent, int flags)
+{
+ struct hwt_record_entry *entry;
+
+ KASSERT(ent != NULL, ("ent is NULL"));
+ entry = hwt_record_clone(ent, flags);
+ if (entry == NULL) {
+ /* XXX: Not sure what to do here other than logging an error. */
+ return;
+ }
+
+ HWT_CTX_LOCK(ctx);
+ TAILQ_INSERT_TAIL(&ctx->records, entry, next);
+ HWT_CTX_UNLOCK(ctx);
+ hwt_record_wakeup(ctx);
+}
+
+void
+hwt_record_td(struct thread *td, struct hwt_record_entry *ent, int flags)
+{
+ struct hwt_record_entry *entry;
+ struct hwt_context *ctx;
+ struct proc *p;
+
+ p = td->td_proc;
+
+ KASSERT(ent != NULL, ("ent is NULL"));
+ entry = hwt_record_clone(ent, flags);
+ if (entry == NULL) {
+ /* XXX: Not sure what to do here other than logging an error. */
+ return;
+ }
+ ctx = hwt_contexthash_lookup(p);
+ if (ctx == NULL) {
+ hwt_record_entry_free(entry);
+ return;
+ }
+ HWT_CTX_LOCK(ctx);
+ TAILQ_INSERT_TAIL(&ctx->records, entry, next);
+ HWT_CTX_UNLOCK(ctx);
+ hwt_record_wakeup(ctx);
+
+ hwt_ctx_put(ctx);
+}
+
+struct hwt_record_entry *
+hwt_record_entry_alloc(void)
+{
+ return (uma_zalloc(record_zone, M_WAITOK | M_ZERO));
+}
+
+void
+hwt_record_entry_free(struct hwt_record_entry *entry)
+{
+
+ switch (entry->record_type) {
+ case HWT_RECORD_MMAP:
+ case HWT_RECORD_EXECUTABLE:
+ case HWT_RECORD_KERNEL:
+ free(entry->fullpath, M_HWT_RECORD);
+ break;
+ default:
+ break;
+ }
+
+ uma_zfree(record_zone, entry);
+}
+
+static int
+hwt_record_grab(struct hwt_context *ctx,
+ struct hwt_record_user_entry *user_entry, int nitems_req, int wait)
+{
+ struct hwt_record_entry *entry;
+ int i;
+
+ if (wait) {
+ mtx_lock(&ctx->rec_mtx);
+ if (TAILQ_FIRST(&ctx->records) == NULL) {
+ /* Wait until we have new records. */
+ msleep(ctx, &ctx->rec_mtx, PCATCH, "recsnd", 0);
+ }
+ mtx_unlock(&ctx->rec_mtx);
+ }
+
+ for (i = 0; i < nitems_req; i++) {
+ HWT_CTX_LOCK(ctx);
+ entry = TAILQ_FIRST(&ctx->records);
+ if (entry)
+ TAILQ_REMOVE_HEAD(&ctx->records, next);
+ HWT_CTX_UNLOCK(ctx);
+
+ if (entry == NULL)
+ break;
+ hwt_record_to_user(entry, &user_entry[i]);
+ hwt_record_entry_free(entry);
+ }
+
+ return (i);
+}
+
+void
+hwt_record_free_all(struct hwt_context *ctx)
+{
+ struct hwt_record_entry *entry;
+
+ while (1) {
+ HWT_CTX_LOCK(ctx);
+ entry = TAILQ_FIRST(&ctx->records);
+ if (entry)
+ TAILQ_REMOVE_HEAD(&ctx->records, next);
+ HWT_CTX_UNLOCK(ctx);
+
+ if (entry == NULL)
+ break;
+
+ hwt_record_entry_free(entry);
+ }
+}
+
+int
+hwt_record_send(struct hwt_context *ctx, struct hwt_record_get *record_get)
+{
+ struct hwt_record_user_entry *user_entry;
+ int nitems_req;
+ int error;
+ int i;
+
+ nitems_req = 0;
+
+ error = copyin(record_get->nentries, &nitems_req, sizeof(int));
+ if (error)
+ return (error);
+
+ if (nitems_req < 1 || nitems_req > 1024)
+ return (ENXIO);
+
+ user_entry = malloc(sizeof(struct hwt_record_user_entry) * nitems_req,
+ M_HWT_RECORD, M_WAITOK | M_ZERO);
+
+ i = hwt_record_grab(ctx, user_entry, nitems_req, record_get->wait);
+ if (i > 0)
+ error = copyout(user_entry, record_get->records,
+ sizeof(struct hwt_record_user_entry) * i);
+
+ if (error == 0)
+ error = copyout(&i, record_get->nentries, sizeof(int));
+
+ free(user_entry, M_HWT_RECORD);
+
+ return (error);
+}
+
+void
+hwt_record_kernel_objects(struct hwt_context *ctx)
+{
+ struct hwt_record_entry *entry;
+ struct pmckern_map_in *kobase;
+ int i;
+
+ kobase = linker_hwpmc_list_objects();
+ for (i = 0; kobase[i].pm_file != NULL; i++) {
+ entry = hwt_record_entry_alloc();
+ entry->record_type = HWT_RECORD_KERNEL;
+ entry->fullpath = strdup(kobase[i].pm_file, M_HWT_RECORD);
+ entry->addr = kobase[i].pm_address;
+
+ HWT_CTX_LOCK(ctx);
+ TAILQ_INSERT_HEAD(&ctx->records, entry, next);
+ HWT_CTX_UNLOCK(ctx);
+ }
+ free(kobase, M_LINKER);
+}
+
+void
+hwt_record_wakeup(struct hwt_context *ctx)
+{
+ wakeup(ctx);
+}
diff --git a/sys/dev/hwt/hwt_record.h b/sys/dev/hwt/hwt_record.h
new file mode 100644
index 000000000000..3f347ca67d54
--- /dev/null
+++ b/sys/dev/hwt/hwt_record.h
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DEV_HWT_HWT_RECORD_H_
+#define _DEV_HWT_HWT_RECORD_H_
+
+struct hwt_record_get;
+
+void hwt_record_load(void);
+void hwt_record_unload(void);
+
+int hwt_record_send(struct hwt_context *ctx, struct hwt_record_get *record_get);
+void hwt_record_td(struct thread *td, struct hwt_record_entry *ent, int flags);
+void hwt_record_ctx(struct hwt_context *ctx, struct hwt_record_entry *ent,
+ int flags);
+struct hwt_record_entry * hwt_record_entry_alloc(void);
+void hwt_record_entry_free(struct hwt_record_entry *entry);
+void hwt_record_kernel_objects(struct hwt_context *ctx);
+void hwt_record_free_all(struct hwt_context *ctx);
+void hwt_record_wakeup(struct hwt_context *ctx);
+
+#endif /* !_DEV_HWT_HWT_RECORD_H_ */
diff --git a/sys/dev/hwt/hwt_thread.c b/sys/dev/hwt/hwt_thread.c
new file mode 100644
index 000000000000..827c068a681f
--- /dev/null
+++ b/sys/dev/hwt/hwt_thread.c
@@ -0,0 +1,162 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/refcount.h>
+#include <sys/rwlock.h>
+#include <sys/hwt.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_object.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_phys.h>
+
+#include <dev/hwt/hwt_hook.h>
+#include <dev/hwt/hwt_context.h>
+#include <dev/hwt/hwt_contexthash.h>
+#include <dev/hwt/hwt_config.h>
+#include <dev/hwt/hwt_thread.h>
+#include <dev/hwt/hwt_owner.h>
+#include <dev/hwt/hwt_ownerhash.h>
+#include <dev/hwt/hwt_backend.h>
+#include <dev/hwt/hwt_vm.h>
+#include <dev/hwt/hwt_record.h>
+
+#define HWT_THREAD_DEBUG
+#undef HWT_THREAD_DEBUG
+
+#ifdef HWT_THREAD_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+static MALLOC_DEFINE(M_HWT_THREAD, "hwt_thread", "Hardware Trace");
+
+struct hwt_thread *
+hwt_thread_first(struct hwt_context *ctx)
+{
+ struct hwt_thread *thr;
+
+ HWT_CTX_ASSERT_LOCKED(ctx);
+
+ thr = TAILQ_FIRST(&ctx->threads);
+
+ KASSERT(thr != NULL, ("thr is NULL"));
+
+ return (thr);
+}
+
+/*
+ * To use by hwt_switch_in/out() only.
+ */
+struct hwt_thread *
+hwt_thread_lookup(struct hwt_context *ctx, struct thread *td)
+{
+ struct hwt_thread *thr;
+
+ /* Caller of this func holds ctx refcnt right here. */
+
+ HWT_CTX_LOCK(ctx);
+ TAILQ_FOREACH(thr, &ctx->threads, next) {
+ if (thr->td == td) {
+ HWT_CTX_UNLOCK(ctx);
+ return (thr);
+ }
+ }
+ HWT_CTX_UNLOCK(ctx);
+
+ /*
+ * We are here because the hook on thread creation failed to allocate
+ * a thread.
+ */
+
+ return (NULL);
+}
+
+int
+hwt_thread_alloc(struct hwt_thread **thr0, char *path, size_t bufsize,
+ int kva_req)
+{
+ struct hwt_thread *thr;
+ struct hwt_vm *vm;
+ int error;
+
+ error = hwt_vm_alloc(bufsize, kva_req, path, &vm);
+ if (error)
+ return (error);
+
+ thr = malloc(sizeof(struct hwt_thread), M_HWT_THREAD,
+ M_WAITOK | M_ZERO);
+ thr->vm = vm;
+
+ mtx_init(&thr->mtx, "thr", NULL, MTX_DEF);
+
+ refcount_init(&thr->refcnt, 1);
+
+ vm->thr = thr;
+
+ *thr0 = thr;
+
+ return (0);
+}
+
+void
+hwt_thread_free(struct hwt_thread *thr)
+{
+
+ hwt_vm_free(thr->vm);
+ /* Free private backend data, if any. */
+ if (thr->private != NULL)
+ hwt_backend_thread_free(thr);
+ free(thr, M_HWT_THREAD);
+}
+
+/*
+ * Inserts a new thread and a thread creation record into the
+ * context notifies userspace about the newly created thread.
+ */
+void
+hwt_thread_insert(struct hwt_context *ctx, struct hwt_thread *thr,
+ struct hwt_record_entry *entry)
+{
+
+ HWT_CTX_ASSERT_LOCKED(ctx);
+ TAILQ_INSERT_TAIL(&ctx->threads, thr, next);
+ TAILQ_INSERT_TAIL(&ctx->records, entry, next);
+}
diff --git a/sys/dev/sound/midi/sequencer.h b/sys/dev/hwt/hwt_thread.h
index 22ea0ae6c1b6..ccc29aeb3494 100644
--- a/sys/dev/sound/midi/sequencer.h
+++ b/sys/dev/hwt/hwt_thread.h
@@ -1,9 +1,8 @@
/*-
- * SPDX-License-Identifier: BSD-2-Clause
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
*
- * Copyright (c) 2003 Mathew Kanner
- * Copyright (c) 1999 Seigo Tanimura
- * All rights reserved.
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,63 +26,39 @@
* SUCH DAMAGE.
*/
-/*
- * Include file for the midi sequence driver.
- */
-
-#ifndef _SEQUENCER_H_
-#define _SEQUENCER_H_
-
-#define NSEQ_MAX 16
-
-/*
- * many variables should be reduced to a range. Here define a macro
- */
-
-#define RANGE(var, low, high) (var) = \
-((var)<(low)?(low) : (var)>(high)?(high) : (var))
-
-#ifdef _KERNEL
-
-void seq_timer(void *arg);
-
-SYSCTL_DECL(_hw_midi_seq);
-
-extern int seq_debug;
-
-#define SEQ_DEBUG(y, x) \
- do { \
- if (seq_debug >= y) { \
- (x); \
- } \
- } while (0)
-
-SYSCTL_DECL(_hw_midi);
-
-#endif /* _KERNEL */
-
-#define SYNTHPROP_MIDI 1
-#define SYNTHPROP_SYNTH 2
-#define SYNTHPROP_RX 4
-#define SYNTHPROP_TX 8
-
-struct _midi_cmdtab {
- int cmd;
- char *name;
+#ifndef _DEV_HWT_HWT_THREAD_H_
+#define _DEV_HWT_HWT_THREAD_H_
+
+struct hwt_record_entry;
+
+struct hwt_thread {
+ struct hwt_vm *vm;
+ struct hwt_context *ctx;
+ struct hwt_backend *backend;
+ struct thread *td;
+ TAILQ_ENTRY(hwt_thread) next;
+ int thread_id;
+ int state;
+#define HWT_THREAD_STATE_EXITED (1 << 0)
+ struct mtx mtx;
+ u_int refcnt;
+ int cpu_id; /* last cpu_id */
+ void *private; /* backend-specific private data */
};
-typedef struct _midi_cmdtab midi_cmdtab;
-extern midi_cmdtab cmdtab_seqevent[];
-extern midi_cmdtab cmdtab_seqioctl[];
-extern midi_cmdtab cmdtab_timer[];
-extern midi_cmdtab cmdtab_seqcv[];
-extern midi_cmdtab cmdtab_seqccmn[];
-char *midi_cmdname(int cmd, midi_cmdtab * tab);
+/* Thread allocation. */
+int hwt_thread_alloc(struct hwt_thread **thr0, char *path, size_t bufsize,
+ int kva_req);
+void hwt_thread_free(struct hwt_thread *thr);
-enum {
- MORE,
- TIMERARMED,
- QUEUEFULL
-};
+/* Thread list mgt. */
+void hwt_thread_insert(struct hwt_context *ctx, struct hwt_thread *thr, struct hwt_record_entry *entry);
+struct hwt_thread * hwt_thread_first(struct hwt_context *ctx);
+struct hwt_thread * hwt_thread_lookup(struct hwt_context *ctx,
+ struct thread *td);
+
+#define HWT_THR_LOCK(thr) mtx_lock(&(thr)->mtx)
+#define HWT_THR_UNLOCK(thr) mtx_unlock(&(thr)->mtx)
+#define HWT_THR_ASSERT_LOCKED(thr) mtx_assert(&(thr)->mtx, MA_OWNED)
-#endif
+#endif /* !_DEV_HWT_HWT_THREAD_H_ */
diff --git a/sys/dev/hwt/hwt_vm.c b/sys/dev/hwt/hwt_vm.c
new file mode 100644
index 000000000000..6c55e218dcec
--- /dev/null
+++ b/sys/dev/hwt/hwt_vm.c
@@ -0,0 +1,503 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/ioccom.h>
+#include <sys/conf.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/refcount.h>
+#include <sys/rwlock.h>
+#include <sys/hwt.h>
+#include <sys/smp.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_object.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_phys.h>
+
+#include <dev/hwt/hwt_hook.h>
+#include <dev/hwt/hwt_context.h>
+#include <dev/hwt/hwt_contexthash.h>
+#include <dev/hwt/hwt_config.h>
+#include <dev/hwt/hwt_cpu.h>
+#include <dev/hwt/hwt_owner.h>
+#include <dev/hwt/hwt_ownerhash.h>
+#include <dev/hwt/hwt_thread.h>
+#include <dev/hwt/hwt_backend.h>
+#include <dev/hwt/hwt_vm.h>
+#include <dev/hwt/hwt_record.h>
+
+#define HWT_THREAD_DEBUG
+#undef HWT_THREAD_DEBUG
+
+#ifdef HWT_THREAD_DEBUG
+#define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__)
+#else
+#define dprintf(fmt, ...)
+#endif
+
+static MALLOC_DEFINE(M_HWT_VM, "hwt_vm", "Hardware Trace");
+
+static int
+hwt_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset,
+ int prot, vm_page_t *mres)
+{
+
+ return (0);
+}
+
+static int
+hwt_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
+ vm_ooffset_t foff, struct ucred *cred, u_short *color)
+{
+
+ *color = 0;
+
+ return (0);
+}
+
+static void
+hwt_vm_dtor(void *handle)
+{
+
+}
+
+static struct cdev_pager_ops hwt_vm_pager_ops = {
+ .cdev_pg_fault = hwt_vm_fault,
+ .cdev_pg_ctor = hwt_vm_ctor,
+ .cdev_pg_dtor = hwt_vm_dtor
+};
+
+static int
+hwt_vm_alloc_pages(struct hwt_vm *vm, int kva_req)
+{
+ vm_paddr_t low, high, boundary;
+ vm_memattr_t memattr;
+#ifdef __aarch64__
+ uintptr_t va;
+#endif
+ int alignment;
+ vm_page_t m;
+ int pflags;
+ int tries;
+ int i;
+
+ alignment = PAGE_SIZE;
+ low = 0;
+ high = -1UL;
+ boundary = 0;
+ pflags = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO;
+ memattr = VM_MEMATTR_DEVICE;
+
+ if (kva_req) {
+ vm->kvaddr = kva_alloc(vm->npages * PAGE_SIZE);
+ if (!vm->kvaddr)
+ return (ENOMEM);
+ }
+
+ vm->obj = cdev_pager_allocate(vm, OBJT_MGTDEVICE,
+ &hwt_vm_pager_ops, vm->npages * PAGE_SIZE, PROT_READ, 0,
+ curthread->td_ucred);
+
+ for (i = 0; i < vm->npages; i++) {
+ tries = 0;
+retry:
+ m = vm_page_alloc_noobj_contig(pflags, 1, low, high,
+ alignment, boundary, memattr);
+ if (m == NULL) {
+ if (tries < 3) {
+ if (!vm_page_reclaim_contig(pflags, 1, low,
+ high, alignment, boundary))
+ vm_wait(NULL);
+ tries++;
+ goto retry;
+ }
+
+ return (ENOMEM);
+ }
+
+#if 0
+ /* TODO: could not clean device memory on arm64. */
+ if ((m->flags & PG_ZERO) == 0)
+ pmap_zero_page(m);
+#endif
+
+#ifdef __aarch64__
+ va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
+ cpu_dcache_wb_range((void *)va, PAGE_SIZE);
+#endif
+
+ m->valid = VM_PAGE_BITS_ALL;
+ m->oflags &= ~VPO_UNMANAGED;
+ m->flags |= PG_FICTITIOUS;
+ vm->pages[i] = m;
+
+ VM_OBJECT_WLOCK(vm->obj);
+ vm_page_insert(m, vm->obj, i);
+ if (kva_req)
+ pmap_qenter(vm->kvaddr + i * PAGE_SIZE, &m, 1);
+ VM_OBJECT_WUNLOCK(vm->obj);
+ }
+
+ return (0);
+}
+
+static int
+hwt_vm_open(struct cdev *cdev, int oflags, int devtype, struct thread *td)
+{
+
+ dprintf("%s\n", __func__);
+
+ return (0);
+}
+
+static int
+hwt_vm_mmap_single(struct cdev *cdev, vm_ooffset_t *offset,
+ vm_size_t mapsize, struct vm_object **objp, int nprot)
+{
+ struct hwt_vm *vm;
+
+ vm = cdev->si_drv1;
+
+ if (nprot != PROT_READ || *offset != 0)
+ return (ENXIO);
+
+ vm_object_reference(vm->obj);
+ *objp = vm->obj;
+
+ return (0);
+}
+
+static void
+hwt_vm_start_cpu_mode(struct hwt_context *ctx)
+{
+ cpuset_t enable_cpus;
+ int cpu_id;
+
+ CPU_ZERO(&enable_cpus);
+
+ CPU_FOREACH_ISSET(cpu_id, &ctx->cpu_map) {
+#ifdef SMP
+ /* Ensure CPU is not halted. */
+ if (CPU_ISSET(cpu_id, &hlt_cpus_mask))
+ continue;
+#endif
+
+ hwt_backend_configure(ctx, cpu_id, cpu_id);
+
+ CPU_SET(cpu_id, &enable_cpus);
+ }
+
+ if (ctx->hwt_backend->ops->hwt_backend_enable_smp == NULL) {
+ CPU_FOREACH_ISSET(cpu_id, &enable_cpus)
+ hwt_backend_enable(ctx, cpu_id);
+ } else {
+ /* Some backends require enabling all CPUs at once. */
+ hwt_backend_enable_smp(ctx);
+ }
+}
+
+static int
+hwt_vm_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
+ struct thread *td)
+{
+ struct hwt_record_get *rget;
+ struct hwt_set_config *sconf;
+ struct hwt_bufptr_get *ptr_get;
+ struct hwt_svc_buf *sbuf;
+
+ struct hwt_context *ctx;
+ struct hwt_vm *vm;
+ struct hwt_owner *ho;
+
+ vm_offset_t offset;
+ int ident;
+ int error;
+ uint64_t data = 0;
+ void *data2;
+ size_t data_size;
+ int data_version;
+
+ vm = dev->si_drv1;
+ KASSERT(vm != NULL, ("si_drv1 is NULL"));
+
+ ctx = vm->ctx;
+
+ /* Ensure process is registered owner of this HWT. */
+ ho = hwt_ownerhash_lookup(td->td_proc);
+ if (ho == NULL)
+ return (ENXIO);
+
+ if (ctx->hwt_owner != ho)
+ return (EPERM);
+
+ switch (cmd) {
+ case HWT_IOC_START:
+ dprintf("%s: start tracing\n", __func__);
+
+ HWT_CTX_LOCK(ctx);
+ if (ctx->state == CTX_STATE_RUNNING) {
+ /* Already running ? */
+ HWT_CTX_UNLOCK(ctx);
+ return (ENXIO);
+ }
+ ctx->state = CTX_STATE_RUNNING;
+ HWT_CTX_UNLOCK(ctx);
+
+ if (ctx->mode == HWT_MODE_CPU)
+ hwt_vm_start_cpu_mode(ctx);
+ else {
+ /*
+ * Tracing backend will be configured and enabled
+ * during hook invocation. See hwt_hook.c.
+ */
+ }
+
+ break;
+
+ case HWT_IOC_STOP:
+ if (ctx->state == CTX_STATE_STOPPED)
+ return (ENXIO);
+ hwt_backend_stop(ctx);
+ ctx->state = CTX_STATE_STOPPED;
+ break;
+
+ case HWT_IOC_RECORD_GET:
+ rget = (struct hwt_record_get *)addr;
+ error = hwt_record_send(ctx, rget);
+ if (error)
+ return (error);
+ break;
+
+ case HWT_IOC_SET_CONFIG:
+ if (ctx->state == CTX_STATE_RUNNING) {
+ return (ENXIO);
+ }
+ sconf = (struct hwt_set_config *)addr;
+ error = hwt_config_set(td, ctx, sconf);
+ if (error)
+ return (error);
+ ctx->pause_on_mmap = sconf->pause_on_mmap ? 1 : 0;
+ break;
+
+ case HWT_IOC_WAKEUP:
+
+ if (ctx->mode == HWT_MODE_CPU)
+ return (ENXIO);
+
+ KASSERT(vm->thr != NULL, ("thr is NULL"));
+
+ wakeup(vm->thr);
+
+ break;
+
+ case HWT_IOC_BUFPTR_GET:
+ ptr_get = (struct hwt_bufptr_get *)addr;
+
+ error = hwt_backend_read(ctx, vm, &ident, &offset, &data);
+ if (error)
+ return (error);
+
+ if (ptr_get->ident)
+ error = copyout(&ident, ptr_get->ident, sizeof(int));
+ if (error)
+ return (error);
+
+ if (ptr_get->offset)
+ error = copyout(&offset, ptr_get->offset,
+ sizeof(vm_offset_t));
+ if (error)
+ return (error);
+
+ if (ptr_get->data)
+ error = copyout(&data, ptr_get->data, sizeof(uint64_t));
+ if (error)
+ return (error);
+
+ break;
+
+ case HWT_IOC_SVC_BUF:
+ if (ctx->state == CTX_STATE_STOPPED) {
+ return (ENXIO);
+ }
+
+ sbuf = (struct hwt_svc_buf *)addr;
+ data_size = sbuf->data_size;
+ data_version = sbuf->data_version;
+
+ if (data_size == 0 || data_size > PAGE_SIZE)
+ return (EINVAL);
+
+ data2 = malloc(data_size, M_HWT_VM, M_WAITOK | M_ZERO);
+ error = copyin(sbuf->data, data2, data_size);
+ if (error) {
+ free(data2, M_HWT_VM);
+ return (error);
+ }
+
+ error = hwt_backend_svc_buf(ctx, data2, data_size, data_version);
+ if (error) {
+ free(data2, M_HWT_VM);
+ return (error);
+ }
+
+ free(data2, M_HWT_VM);
+ break;
+
+ default:
+ break;
+ }
+
+ return (0);
+}
+
+static struct cdevsw hwt_vm_cdevsw = {
+ .d_version = D_VERSION,
+ .d_name = "hwt",
+ .d_open = hwt_vm_open,
+ .d_mmap_single = hwt_vm_mmap_single,
+ .d_ioctl = hwt_vm_ioctl,
+};
+
+static int
+hwt_vm_create_cdev(struct hwt_vm *vm, char *path)
+{
+ struct make_dev_args args;
+ int error;
+
+ dprintf("%s: path %s\n", __func__, path);
+
+ make_dev_args_init(&args);
+ args.mda_devsw = &hwt_vm_cdevsw;
+ args.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
+ args.mda_uid = UID_ROOT;
+ args.mda_gid = GID_WHEEL;
+ args.mda_mode = 0660;
+ args.mda_si_drv1 = vm;
+
+ error = make_dev_s(&args, &vm->cdev, "%s", path);
+ if (error != 0)
+ return (error);
+
+ return (0);
+}
+
+static int
+hwt_vm_alloc_buffers(struct hwt_vm *vm, int kva_req)
+{
+ int error;
+
+ vm->pages = malloc(sizeof(struct vm_page *) * vm->npages,
+ M_HWT_VM, M_WAITOK | M_ZERO);
+
+ error = hwt_vm_alloc_pages(vm, kva_req);
+ if (error) {
+ printf("%s: could not alloc pages\n", __func__);
+ return (error);
+ }
+
+ return (0);
+}
+
+static void
+hwt_vm_destroy_buffers(struct hwt_vm *vm)
+{
+ vm_page_t m;
+ int i;
+
+ if (vm->ctx->hwt_backend->kva_req && vm->kvaddr != 0) {
+ pmap_qremove(vm->kvaddr, vm->npages);
+ kva_free(vm->kvaddr, vm->npages * PAGE_SIZE);
+ }
+ VM_OBJECT_WLOCK(vm->obj);
+ for (i = 0; i < vm->npages; i++) {
+ m = vm->pages[i];
+ if (m == NULL)
+ break;
+
+ vm_page_busy_acquire(m, 0);
+ cdev_pager_free_page(vm->obj, m);
+ m->flags &= ~PG_FICTITIOUS;
+ vm_page_unwire_noq(m);
+ vm_page_free(m);
+
+ }
+ vm_pager_deallocate(vm->obj);
+ VM_OBJECT_WUNLOCK(vm->obj);
+
+ free(vm->pages, M_HWT_VM);
+}
+
+void
+hwt_vm_free(struct hwt_vm *vm)
+{
+
+ dprintf("%s\n", __func__);
+
+ if (vm->cdev)
+ destroy_dev_sched(vm->cdev);
+ hwt_vm_destroy_buffers(vm);
+ free(vm, M_HWT_VM);
+}
+
+int
+hwt_vm_alloc(size_t bufsize, int kva_req, char *path, struct hwt_vm **vm0)
+{
+ struct hwt_vm *vm;
+ int error;
+
+ vm = malloc(sizeof(struct hwt_vm), M_HWT_VM, M_WAITOK | M_ZERO);
+ vm->npages = bufsize / PAGE_SIZE;
+
+ error = hwt_vm_alloc_buffers(vm, kva_req);
+ if (error) {
+ free(vm, M_HWT_VM);
+ return (error);
+ }
+
+ error = hwt_vm_create_cdev(vm, path);
+ if (error) {
+ hwt_vm_free(vm);
+ return (error);
+ }
+
+ *vm0 = vm;
+
+ return (0);
+}
diff --git a/sys/dev/hwt/hwt_vm.h b/sys/dev/hwt/hwt_vm.h
new file mode 100644
index 000000000000..5002bd43e093
--- /dev/null
+++ b/sys/dev/hwt/hwt_vm.h
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 2023-2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * This work was supported by Innovate UK project 105694, "Digital Security
+ * by Design (DSbD) Technology Platform Prototype".
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DEV_HWT_HWT_VM_H_
+#define _DEV_HWT_HWT_VM_H_
+
+struct hwt_vm {
+ vm_page_t *pages;
+ int npages;
+ vm_object_t obj;
+ vm_offset_t kvaddr;
+ struct cdev *cdev;
+
+ struct hwt_context *ctx;
+ struct hwt_cpu *cpu; /* cpu mode only. */
+ struct hwt_thread *thr; /* thr mode only. */
+};
+
+int hwt_vm_alloc(size_t bufsize, int kva_req, char *path, struct hwt_vm **vm0);
+void hwt_vm_free(struct hwt_vm *vm);
+
+#endif /* !_DEV_HWT_HWT_VM_H_ */
diff --git a/sys/dev/hyperv/hvsock/hv_sock.c b/sys/dev/hyperv/hvsock/hv_sock.c
index 8072765f2d5b..5a69eaa2b47b 100644
--- a/sys/dev/hyperv/hvsock/hv_sock.c
+++ b/sys/dev/hyperv/hvsock/hv_sock.c
@@ -1461,7 +1461,7 @@ hvsock_open_conn_passive(struct vmbus_channel *chan, struct socket *so,
}
/*
- * Create a new socket. This will call pru_attach to complete
+ * Create a new socket. This will call pr_attach() to complete
* the socket initialization and put the new socket onto
* listening socket's sol_incomp list, waiting to be promoted
* to sol_comp list.
diff --git a/sys/dev/hyperv/input/hv_hid.c b/sys/dev/hyperv/input/hv_hid.c
index b8fc9605bf67..ec68581d63a8 100644
--- a/sys/dev/hyperv/input/hv_hid.c
+++ b/sys/dev/hyperv/input/hv_hid.c
@@ -436,16 +436,14 @@ hv_hid_attach(device_t dev)
ret = ENODEV;
goto out;
}
- child = device_add_child(sc->dev, "hidbus", -1);
+ child = device_add_child(sc->dev, "hidbus", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(sc->dev, "failed to add hidbus\n");
ret = ENOMEM;
goto out;
}
device_set_ivars(child, &sc->hdi);
- ret = bus_generic_attach(dev);
- if (ret != 0)
- device_printf(sc->dev, "failed to attach hidbus\n");
+ bus_attach_children(dev);
out:
if (ret != 0)
hv_hid_detach(dev);
@@ -459,7 +457,7 @@ hv_hid_detach(device_t dev)
int ret;
sc = device_get_softc(dev);
- ret = device_delete_children(dev);
+ ret = bus_generic_detach(dev);
if (ret != 0)
return (ret);
if (sc->hs_xact_ctx != NULL)
diff --git a/sys/dev/hyperv/netvsc/if_hn.c b/sys/dev/hyperv/netvsc/if_hn.c
index 9f51f5b32199..ab7671025107 100644
--- a/sys/dev/hyperv/netvsc/if_hn.c
+++ b/sys/dev/hyperv/netvsc/if_hn.c
@@ -898,7 +898,7 @@ hn_check_tcpsyn(struct mbuf *m_head, int *tcpsyn)
PULLUP_HDR(m_head, ehlen + iphlen + sizeof(*th));
th = mtodo(m_head, ehlen + iphlen);
- if (th->th_flags & TH_SYN)
+ if (tcp_get_flags(th) & TH_SYN)
*tcpsyn = 1;
return (m_head);
}
@@ -2355,7 +2355,7 @@ hn_attach(device_t dev)
}
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rsc_switch",
- CTLTYPE_UINT | CTLFLAG_RW, sc, 0, hn_rsc_sysctl, "A",
+ CTLTYPE_UINT | CTLFLAG_RW, sc, 0, hn_rsc_sysctl, "I",
"switch to rsc");
/*
@@ -4523,24 +4523,22 @@ static int
hn_rsc_sysctl(SYSCTL_HANDLER_ARGS)
{
struct hn_softc *sc = arg1;
- uint32_t mtu;
+ int rsc_ctrl, mtu;
int error;
- HN_LOCK(sc);
- error = hn_rndis_get_mtu(sc, &mtu);
- if (error) {
- if_printf(sc->hn_ifp, "failed to get mtu\n");
- goto back;
- }
- error = SYSCTL_OUT(req, &(sc->hn_rsc_ctrl), sizeof(sc->hn_rsc_ctrl));
+
+ rsc_ctrl = sc->hn_rsc_ctrl;
+ error = sysctl_handle_int(oidp, &rsc_ctrl, 0, req);
if (error || req->newptr == NULL)
- goto back;
+ return (error);
+
+ if (sc->hn_rsc_ctrl != rsc_ctrl) {
+ HN_LOCK(sc);
+ sc->hn_rsc_ctrl = rsc_ctrl;
+ mtu = if_getmtu(sc->hn_ifp);
+ error = hn_rndis_reconf_offload(sc, mtu);
+ HN_UNLOCK(sc);
+ }
- error = SYSCTL_IN(req, &(sc->hn_rsc_ctrl), sizeof(sc->hn_rsc_ctrl));
- if (error)
- goto back;
- error = hn_rndis_reconf_offload(sc, mtu);
-back:
- HN_UNLOCK(sc);
return (error);
}
#ifndef RSS
@@ -5131,7 +5129,7 @@ hn_destroy_rx_data(struct hn_softc *sc)
if (sc->hn_rxbuf != NULL) {
if ((sc->hn_flags & HN_FLAG_RXBUF_REF) == 0)
- contigfree(sc->hn_rxbuf, HN_RXBUF_SIZE, M_DEVBUF);
+ free(sc->hn_rxbuf, M_DEVBUF);
else
device_printf(sc->hn_dev, "RXBUF is referenced\n");
sc->hn_rxbuf = NULL;
@@ -5146,8 +5144,7 @@ hn_destroy_rx_data(struct hn_softc *sc)
if (rxr->hn_br == NULL)
continue;
if ((rxr->hn_rx_flags & HN_RX_FLAG_BR_REF) == 0) {
- contigfree(rxr->hn_br, HN_TXBR_SIZE + HN_RXBR_SIZE,
- M_DEVBUF);
+ free(rxr->hn_br, M_DEVBUF);
} else {
device_printf(sc->hn_dev,
"%dth channel bufring is referenced", i);
@@ -5649,7 +5646,7 @@ hn_destroy_tx_data(struct hn_softc *sc)
if (sc->hn_chim != NULL) {
if ((sc->hn_flags & HN_FLAG_CHIM_REF) == 0) {
- contigfree(sc->hn_chim, HN_CHIM_SIZE, M_DEVBUF);
+ free(sc->hn_chim, M_DEVBUF);
} else {
device_printf(sc->hn_dev,
"chimney sending buffer is referenced");
diff --git a/sys/dev/hyperv/pcib/vmbus_pcib.c b/sys/dev/hyperv/pcib/vmbus_pcib.c
index f6237535cce3..7b755e5f9c63 100644
--- a/sys/dev/hyperv/pcib/vmbus_pcib.c
+++ b/sys/dev/hyperv/pcib/vmbus_pcib.c
@@ -25,7 +25,6 @@
*/
#include <sys/cdefs.h>
-#ifdef NEW_PCIB
#include "opt_acpi.h"
#include <sys/param.h>
@@ -1565,14 +1564,14 @@ vmbus_pcib_attach(device_t dev)
vmbus_pcib_prepopulate_bars(hbus);
- hbus->pci_bus = device_add_child(dev, "pci", -1);
+ hbus->pci_bus = device_add_child(dev, "pci", DEVICE_UNIT_ANY);
if (!hbus->pci_bus) {
device_printf(dev, "failed to create pci bus\n");
ret = ENXIO;
goto vmbus_close;
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
hbus->state = hv_pcibus_installed;
@@ -2042,5 +2041,3 @@ DEFINE_CLASS_0(pcib, vmbus_pcib_driver, vmbus_pcib_methods,
DRIVER_MODULE(vmbus_pcib, vmbus, vmbus_pcib_driver, 0, 0);
MODULE_DEPEND(vmbus_pcib, vmbus, 1, 1, 1);
MODULE_DEPEND(vmbus_pcib, pci, 1, 1, 1);
-
-#endif /* NEW_PCIB */
diff --git a/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c b/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c
index eeec169baac5..29a88e76a579 100644
--- a/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c
+++ b/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c
@@ -954,13 +954,18 @@ storvsc_init_requests(device_t dev)
bus_get_dma_tag(dev), /* parent */
1, /* alignment */
PAGE_SIZE, /* boundary */
+#if defined(__i386__) && defined(PAE)
+ BUS_SPACE_MAXADDR_48BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR_48BIT, /* highaddr */
+#else
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
+#endif
NULL, NULL, /* filter, filterarg */
STORVSC_DATA_SIZE_MAX, /* maxsize */
STORVSC_DATA_SEGCNT_MAX, /* nsegments */
STORVSC_DATA_SEGSZ_MAX, /* maxsegsize */
- 0, /* flags */
+ BUS_DMA_KEEP_PG_OFFSET, /* flags */
NULL, /* lockfunc */
NULL, /* lockfuncarg */
&sc->storvsc_req_dtag);
@@ -1828,7 +1833,6 @@ storvsc_xferbuf_prepare(void *arg, bus_dma_segment_t *segs, int nsegs, int error
for (i = 0; i < nsegs; i++) {
#ifdef INVARIANTS
-#if !defined(__aarch64__)
if (nsegs > 1) {
if (i == 0) {
KASSERT((segs[i].ds_addr & PAGE_MASK) +
@@ -1849,7 +1853,6 @@ storvsc_xferbuf_prepare(void *arg, bus_dma_segment_t *segs, int nsegs, int error
}
}
#endif
-#endif
prplist->gpa_page[i] = atop(segs[i].ds_addr);
}
reqp->prp_cnt = nsegs;
diff --git a/sys/dev/hyperv/vmbus/aarch64/hyperv_machdep.c b/sys/dev/hyperv/vmbus/aarch64/hyperv_machdep.c
index dbb6aac2de31..e808cc081535 100644
--- a/sys/dev/hyperv/vmbus/aarch64/hyperv_machdep.c
+++ b/sys/dev/hyperv/vmbus/aarch64/hyperv_machdep.c
@@ -50,10 +50,10 @@
void
arm_hv_set_vreg(u32 msr, u64 value)
{
- arm_smccc_hvc(HV_FUNC_ID,
+ arm_smccc_invoke_hvc(HV_FUNC_ID,
HVCALL_SET_VP_REGISTERS | HV_HYPERCALL_FAST_BIT |
HV_HYPERCALL_REP_COMP_1,
- HV_PARTITION_ID_SELF, HV_VP_INDEX_SELF, msr, 0, value, 0, NULL);
+ HV_PARTITION_ID_SELF, HV_VP_INDEX_SELF, msr, 0, value, NULL);
}
void
@@ -95,8 +95,7 @@ hypercall_md(volatile void *hc_addr, uint64_t in_val, uint64_t in_paddr,
{
struct arm_smccc_res res;
- arm_smccc_hvc(HV_FUNC_ID, in_val, in_paddr, out_paddr, 0, 0, 0, 0,
- &res);
+ arm_smccc_invoke_hvc(HV_FUNC_ID, in_val, in_paddr, out_paddr, &res);
return (res.a0);
}
diff --git a/sys/dev/hyperv/vmbus/hyperv.c b/sys/dev/hyperv/vmbus/hyperv.c
index e0e85a022090..1f85203146d0 100644
--- a/sys/dev/hyperv/vmbus/hyperv.c
+++ b/sys/dev/hyperv/vmbus/hyperv.c
@@ -35,6 +35,7 @@
#include <sys/malloc.h>
#include <sys/systm.h>
#include <sys/timetc.h>
+#include <sys/cpuset.h>
#include <vm/vm.h>
#include <vm/vm_extern.h>
@@ -50,6 +51,7 @@
#include <dev/hyperv/vmbus/x86/hyperv_machdep.h>
#include <dev/hyperv/vmbus/x86/hyperv_reg.h>
#endif
+#include <dev/hyperv/vmbus/vmbus_var.h>
#include <dev/hyperv/vmbus/hyperv_common_reg.h>
#include <dev/hyperv/vmbus/hyperv_var.h>
@@ -72,10 +74,12 @@
MSR_HV_GUESTID_OSID_FREEBSD | \
MSR_HV_GUESTID_OSTYPE_FREEBSD)
+
static bool hyperv_identify(void);
static void hypercall_memfree(void);
static struct hypercall_ctx hypercall_context;
+
uint64_t
hypercall_post_message(bus_addr_t msg_paddr)
{
@@ -90,6 +94,65 @@ hypercall_signal_event(bus_addr_t monprm_paddr)
HYPERCALL_SIGNAL_EVENT, monprm_paddr, 0);
}
+static inline int hv_result(uint64_t status)
+{
+ return status & HV_HYPERCALL_RESULT_MASK;
+}
+
+static inline bool hv_result_success(uint64_t status)
+{
+ return hv_result(status) == HV_STATUS_SUCCESS;
+}
+
+static inline unsigned int hv_repcomp(uint64_t status)
+{
+ /* Bits [43:32] of status have 'Reps completed' data. */
+ return ((status & HV_HYPERCALL_REP_COMP_MASK) >>
+ HV_HYPERCALL_REP_COMP_OFFSET);
+}
+
+/*
+ * Rep hypercalls. Callers of this functions are supposed to ensure that
+ * rep_count and varhead_size comply with Hyper-V hypercall definition.
+ */
+uint64_t
+hv_do_rep_hypercall(uint16_t code, uint16_t rep_count, uint16_t varhead_size,
+ uint64_t input, uint64_t output)
+{
+ uint64_t control = code;
+ uint64_t status;
+ uint16_t rep_comp;
+
+ control |= (uint64_t)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
+ control |= (uint64_t)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
+
+ do {
+ status = hypercall_do_md(control, input, output);
+ if (!hv_result_success(status))
+ return status;
+
+ rep_comp = hv_repcomp(status);
+
+ control &= ~HV_HYPERCALL_REP_START_MASK;
+ control |= (uint64_t)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
+
+ } while (rep_comp < rep_count);
+ if (hv_result_success(status))
+ return HV_STATUS_SUCCESS;
+
+ return status;
+}
+
+uint64_t
+hypercall_do_md(uint64_t input_val, uint64_t input_addr, uint64_t out_addr)
+{
+ uint64_t phys_inaddr, phys_outaddr;
+ phys_inaddr = input_addr ? vtophys(input_addr) : 0;
+ phys_outaddr = out_addr ? vtophys(out_addr) : 0;
+ return hypercall_md(hypercall_context.hc_addr,
+ input_val, phys_inaddr, phys_outaddr);
+}
+
int
hyperv_guid2str(const struct hyperv_guid *guid, char *buf, size_t sz)
{
diff --git a/sys/dev/hyperv/vmbus/hyperv_mmu.c b/sys/dev/hyperv/vmbus/hyperv_mmu.c
new file mode 100644
index 000000000000..8e982974161c
--- /dev/null
+++ b/sys/dev/hyperv/vmbus/hyperv_mmu.c
@@ -0,0 +1,308 @@
+/*-
+ * Copyright (c) 2009-2012,2016-2024 Microsoft Corp.
+ * Copyright (c) 2012 NetApp Inc.
+ * Copyright (c) 2012 Citrix Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/linker.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/sbuf.h>
+#include <sys/smp.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/kdb.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <dev/hyperv/vmbus/x86/hyperv_machdep.h>
+#include <dev/hyperv/vmbus/x86/hyperv_reg.h>
+#include <dev/hyperv/include/hyperv.h>
+#include <dev/hyperv/vmbus/hyperv_var.h>
+#include <dev/hyperv/vmbus/vmbus_reg.h>
+#include <dev/hyperv/vmbus/vmbus_var.h>
+#include <dev/hyperv/vmbus/hyperv_common_reg.h>
+#include "hyperv_mmu.h"
+
+static inline int fill_gva_list(uint64_t gva_list[],
+ unsigned long start, unsigned long end)
+{
+ int gva_n = 0;
+ unsigned long cur = start, diff;
+
+ do {
+ diff = end > cur ? end - cur : 0;
+
+ gva_list[gva_n] = cur;
+ /*
+ * Lower 12 bits encode the number of additional
+ * pages to flush (in addition to the 'cur' page).
+ */
+ if (diff >= HV_TLB_FLUSH_UNIT) {
+ gva_list[gva_n] |= PAGE_MASK;
+ cur += HV_TLB_FLUSH_UNIT;
+ } else if (diff) {
+ gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
+ cur = end;
+ }
+
+ gva_n++;
+
+ } while (cur < end);
+
+ return gva_n;
+}
+
+
+inline int hv_cpumask_to_vpset(struct hv_vpset *vpset,
+ const cpuset_t *cpus, struct vmbus_softc * sc)
+{
+ int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
+ int max_vcpu_bank = hv_max_vp_index / HV_VCPUS_PER_SPARSE_BANK;
+
+ /*
+ * vpset.valid_bank_mask can represent up to
+ * HV_MAX_SPARSE_VCPU_BANKS banks
+ */
+ if (max_vcpu_bank >= HV_MAX_SPARSE_VCPU_BANKS)
+ return 0;
+
+ /*
+ * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
+ * structs are not cleared between calls, we risk flushing unneeded
+ * vCPUs otherwise.
+ */
+ for (vcpu_bank = 0; vcpu_bank <= max_vcpu_bank; vcpu_bank++)
+ vpset->bank_contents[vcpu_bank] = 0;
+
+ /*
+ * Some banks may end up being empty but this is acceptable.
+ */
+ CPU_FOREACH_ISSET(cpu, cpus) {
+ vcpu = VMBUS_PCPU_GET(sc, vcpuid, cpu);
+ if (vcpu == -1)
+ return -1;
+ vcpu_bank = vcpu / HV_VCPUS_PER_SPARSE_BANK;
+ vcpu_offset = vcpu % HV_VCPUS_PER_SPARSE_BANK;
+ set_bit(vcpu_offset, (unsigned long *)
+ &vpset->bank_contents[vcpu_bank]);
+ if (vcpu_bank >= nr_bank)
+ nr_bank = vcpu_bank + 1;
+ }
+ vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
+ return nr_bank;
+}
+
+
+
+
+void
+hv_vm_tlb_flush(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2,
+ enum invl_op_codes op, struct vmbus_softc *sc, smp_invl_local_cb_t curcpu_cb)
+{
+ cpuset_t tmp_mask, mask;
+ struct hyperv_tlb_flush *flush;
+ int cpu, vcpu;
+ int max_gvas, gva_n;
+ uint64_t status = 0;
+ uint64_t cr3;
+
+ /*
+ * Hyper-V doesn't handle the invalidating cache. Let system handle it.
+ */
+ if (op == INVL_OP_CACHE)
+ return smp_targeted_tlb_shootdown_native(pmap, addr1, addr2,
+ curcpu_cb, op);
+
+ flush = *VMBUS_PCPU_PTR(sc, cpu_mem, curcpu);
+ if (flush == NULL)
+ return smp_targeted_tlb_shootdown_native(pmap, addr1, addr2,
+ curcpu_cb, op);
+ /*
+ * It is not necessary to signal other CPUs while booting or
+ * when in the debugger.
+ */
+ if (__predict_false(kdb_active || KERNEL_PANICKED() || !smp_started))
+ goto local_cb;
+
+ KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
+
+ /*
+ * Make a stable copy of the set of CPUs on which the pmap is active.
+ * See if we have to interrupt other CPUs.
+ */
+ CPU_COPY(pmap_invalidate_cpu_mask(pmap), &tmp_mask);
+ CPU_COPY(pmap_invalidate_cpu_mask(pmap), &mask);
+ CPU_CLR(curcpu, &tmp_mask);
+ if (CPU_EMPTY(&tmp_mask))
+ goto local_cb;
+
+ /*
+ * Initiator must have interrupts enabled, which prevents
+ * non-invalidation IPIs that take smp_ipi_mtx spinlock,
+ * from deadlocking with us. On the other hand, preemption
+ * must be disabled to pin initiator to the instance of the
+ * pcpu pc_smp_tlb data and scoreboard line.
+ */
+ KASSERT((read_rflags() & PSL_I) != 0,
+ ("hv_tlb_flush: interrupts disabled"));
+ critical_enter();
+ flush->processor_mask = 0;
+ cr3 = pmap->pm_cr3;
+
+ if (op == INVL_OP_TLB || op == INVL_OP_TLB_INVPCID ||
+ op == INVL_OP_TLB_INVPCID_PTI || op == INVL_OP_TLB_PCID) {
+ flush->address_space = 0;
+ flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
+ } else {
+
+ flush->address_space = cr3;
+ flush->address_space &= ~CR3_PCID_MASK;
+ flush->flags = 0;
+ }
+ if(CPU_CMP(&mask, &all_cpus) == 0) {
+ flush->flags |= HV_FLUSH_ALL_PROCESSORS;
+ } else {
+ if (CPU_FLS(&mask) < mp_ncpus && CPU_FLS(&mask) >= 64)
+ goto do_ex_hypercall;
+
+ CPU_FOREACH_ISSET(cpu, &mask) {
+ vcpu = VMBUS_PCPU_GET(sc, vcpuid, cpu);
+ if (vcpu >= 64)
+ goto do_ex_hypercall;
+
+ set_bit(vcpu, &flush->processor_mask);
+ }
+ if (!flush->processor_mask )
+ goto native;
+ }
+ max_gvas = (PAGE_SIZE - sizeof(*flush)) / sizeof(flush->gva_list[0]);
+ if (addr2 == 0) {
+ flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
+ status = hypercall_do_md(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
+ (uint64_t)flush, (uint64_t)NULL);
+ } else if ((addr2 && (addr2 -addr1)/HV_TLB_FLUSH_UNIT) > max_gvas) {
+ status = hypercall_do_md(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE,
+ (uint64_t)flush, (uint64_t)NULL);
+ } else {
+ gva_n = fill_gva_list(flush->gva_list, addr1, addr2);
+
+ status = hv_do_rep_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST,
+ gva_n, 0, (uint64_t)flush, (uint64_t)NULL);
+
+ }
+ if(status)
+ goto native;
+ sched_unpin();
+ critical_exit();
+ return;
+
+local_cb:
+ critical_enter();
+ curcpu_cb(pmap, addr1, addr2);
+ sched_unpin();
+ critical_exit();
+ return;
+do_ex_hypercall:
+ status = hv_flush_tlb_others_ex(pmap, addr1, addr2, mask, op, sc);
+ if (status)
+ goto native;
+ sched_unpin();
+ critical_exit();
+ return;
+native:
+ critical_exit();
+ return smp_targeted_tlb_shootdown_native(pmap, addr1,
+ addr2, curcpu_cb, op);
+}
+
+uint64_t
+hv_flush_tlb_others_ex(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2,
+ const cpuset_t mask, enum invl_op_codes op, struct vmbus_softc *sc)
+{
+ int nr_bank = 0, max_gvas, gva_n;
+ struct hv_tlb_flush_ex *flush;
+ if(*VMBUS_PCPU_PTR(sc, cpu_mem, curcpu) == NULL)
+ return EINVAL;
+ flush = *VMBUS_PCPU_PTR(sc, cpu_mem, curcpu);
+ uint64_t status = 0;
+ uint64_t cr3;
+
+ if (!(hyperv_recommends & HYPERV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
+ return EINVAL;
+
+ cr3 = pmap->pm_cr3;
+ if (op == INVL_OP_TLB) {
+ flush->address_space = 0;
+ flush->flags = HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES;
+ } else {
+
+ flush->address_space = cr3;
+ flush->address_space &= ~CR3_PCID_MASK;
+ flush->flags = 0;
+ }
+
+ flush->hv_vp_set.valid_bank_mask = 0;
+
+ flush->hv_vp_set.format = HV_GENERIC_SET_SPARSE_4K;
+ nr_bank = hv_cpumask_to_vpset(&flush->hv_vp_set, &mask, sc);
+ if (nr_bank < 0)
+ return EINVAL;
+
+ /*
+ * We can flush not more than max_gvas with one hypercall. Flush the
+ * whole address space if we were asked to do more.
+ */
+ max_gvas = (PAGE_SIZE - sizeof(*flush) - nr_bank *
+ sizeof(flush->hv_vp_set.bank_contents[0])) /
+ sizeof(flush->hv_vp_set.bank_contents[0]);
+
+ if (addr2 == 0) {
+ flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY;
+ status = hv_do_rep_hypercall(
+ HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
+ 0, nr_bank, (uint64_t)flush, (uint64_t)NULL);
+ } else if (addr2 &&
+ ((addr2 - addr1)/HV_TLB_FLUSH_UNIT) > max_gvas) {
+ status = hv_do_rep_hypercall(
+ HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX,
+ 0, nr_bank, (uint64_t)flush, (uint64_t)NULL);
+ } else {
+ gva_n = fill_gva_list(&flush->hv_vp_set.bank_contents[nr_bank],
+ addr1, addr2);
+ status = hv_do_rep_hypercall(
+ HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX,
+ gva_n, nr_bank, (uint64_t)flush, (uint64_t)NULL);
+ }
+ return status;
+}
diff --git a/sys/dev/hyperv/vmbus/hyperv_mmu.h b/sys/dev/hyperv/vmbus/hyperv_mmu.h
new file mode 100644
index 000000000000..e62948d74181
--- /dev/null
+++ b/sys/dev/hyperv/vmbus/hyperv_mmu.h
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2009-2012,2016-2024 Microsoft Corp.
+ * Copyright (c) 2012 NetApp Inc.
+ * Copyright (c) 2012 Citrix Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _HYPERV_MMU_H_
+#define _HYPERV_MMU_H_
+
+#include "vmbus_var.h"
+
+#define HV_VCPUS_PER_SPARSE_BANK (64)
+#define HV_MAX_SPARSE_VCPU_BANKS (64)
+
+
+struct hyperv_tlb_flush {
+ uint64_t address_space;
+ uint64_t flags;
+ uint64_t processor_mask;
+ uint64_t gva_list[];
+}__packed;
+
+struct hv_vpset {
+ uint64_t format;
+ uint64_t valid_bank_mask;
+ uint64_t bank_contents[];
+} __packed;
+
+struct hv_tlb_flush_ex {
+ uint64_t address_space;
+ uint64_t flags;
+ struct hv_vpset hv_vp_set;
+} __packed;
+
+#endif
diff --git a/sys/dev/hyperv/vmbus/hyperv_var.h b/sys/dev/hyperv/vmbus/hyperv_var.h
index 67f6cc4ef706..62cce9026ab0 100644
--- a/sys/dev/hyperv/vmbus/hyperv_var.h
+++ b/sys/dev/hyperv/vmbus/hyperv_var.h
@@ -33,7 +33,18 @@ struct hypercall_ctx {
void *hc_addr;
vm_paddr_t hc_paddr;
};
+
uint64_t hypercall_post_message(bus_addr_t msg_paddr);
uint64_t hypercall_signal_event(bus_addr_t monprm_paddr);
+uint64_t hypercall_do_md(uint64_t input, uint64_t in_addr,
+ uint64_t out_addr);
+struct hv_vpset;
+struct vmbus_softc;
+uint64_t
+hv_do_rep_hypercall(uint16_t code, uint16_t rep_count, uint16_t varhead_size,
+ uint64_t input, uint64_t output);
+int
+hv_cpumask_to_vpset(struct hv_vpset *vpset, const cpuset_t *cpus,
+ struct vmbus_softc *sc);
#endif /* !_HYPERV_VAR_H_ */
diff --git a/sys/dev/hyperv/vmbus/vmbus.c b/sys/dev/hyperv/vmbus/vmbus.c
index 3cc210a5003c..115d4af599ee 100644
--- a/sys/dev/hyperv/vmbus/vmbus.c
+++ b/sys/dev/hyperv/vmbus/vmbus.c
@@ -139,6 +139,10 @@ static void vmbus_event_proc_dummy(struct vmbus_softc *,
int);
static bus_dma_tag_t vmbus_get_dma_tag(device_t parent, device_t child);
static struct vmbus_softc *vmbus_sc;
+#if defined(__x86_64__)
+static int vmbus_alloc_cpu_mem(struct vmbus_softc *sc);
+static void vmbus_free_cpu_mem(struct vmbus_softc *sc);
+#endif
SYSCTL_NODE(_hw, OID_AUTO, vmbus, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
"Hyper-V vmbus");
@@ -146,6 +150,13 @@ SYSCTL_NODE(_hw, OID_AUTO, vmbus, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
static int vmbus_pin_evttask = 1;
SYSCTL_INT(_hw_vmbus, OID_AUTO, pin_evttask, CTLFLAG_RDTUN,
&vmbus_pin_evttask, 0, "Pin event tasks to their respective CPU");
+
+#if defined(__x86_64__)
+static int hv_tlb_hcall = 1;
+SYSCTL_INT(_hw_vmbus, OID_AUTO, tlb_hcall , CTLFLAG_RDTUN,
+ &hv_tlb_hcall, 0, "Use Hyper_V hyercall for tlb flush");
+#endif
+
uint32_t vmbus_current_version;
static const uint32_t vmbus_version[] = {
@@ -208,6 +219,8 @@ static driver_t vmbus_driver = {
sizeof(struct vmbus_softc)
};
+uint32_t hv_max_vp_index;
+
DRIVER_MODULE(vmbus, pcib, vmbus_driver, NULL, NULL);
DRIVER_MODULE(vmbus, acpi_syscontainer, vmbus_driver, NULL, NULL);
@@ -546,8 +559,8 @@ vmbus_scan(struct vmbus_softc *sc)
/*
* Identify, probe and attach for non-channel devices.
*/
- bus_generic_probe(sc->vmbus_dev);
- bus_generic_attach(sc->vmbus_dev);
+ bus_identify_children(sc->vmbus_dev);
+ bus_attach_children(sc->vmbus_dev);
/*
* This taskqueue serializes vmbus devices' attach and detach
@@ -748,6 +761,9 @@ vmbus_synic_setup(void *xsc)
VMBUS_PCPU_GET(sc, vcpuid, cpu) = 0;
}
+ if (VMBUS_PCPU_GET(sc, vcpuid, cpu) > hv_max_vp_index)
+ hv_max_vp_index = VMBUS_PCPU_GET(sc, vcpuid, cpu);
+
/*
* Setup the SynIC message.
*/
@@ -786,6 +802,16 @@ vmbus_synic_setup(void *xsc)
WRMSR(MSR_HV_SCONTROL, val);
}
+#if defined(__x86_64__)
+void
+hyperv_vm_tlb_flush(pmap_t pmap, vm_offset_t addr1, vm_offset_t addr2,
+ smp_invl_local_cb_t curcpu_cb, enum invl_op_codes op)
+{
+ struct vmbus_softc *sc = vmbus_get_softc();
+ return hv_vm_tlb_flush(pmap, addr1, addr2, op, sc, curcpu_cb);
+}
+#endif /*__x86_64__*/
+
static void
vmbus_synic_teardown(void *arg)
{
@@ -874,29 +900,27 @@ vmbus_dma_free(struct vmbus_softc *sc)
int cpu;
if (sc->vmbus_evtflags != NULL) {
- contigfree(sc->vmbus_evtflags, PAGE_SIZE, M_DEVBUF);
+ free(sc->vmbus_evtflags, M_DEVBUF);
sc->vmbus_evtflags = NULL;
sc->vmbus_rx_evtflags = NULL;
sc->vmbus_tx_evtflags = NULL;
}
if (sc->vmbus_mnf1 != NULL) {
- contigfree(sc->vmbus_mnf1, PAGE_SIZE, M_DEVBUF);
+ free(sc->vmbus_mnf1, M_DEVBUF);
sc->vmbus_mnf1 = NULL;
}
if (sc->vmbus_mnf2 != NULL) {
- contigfree(sc->vmbus_mnf2, sizeof(struct vmbus_mnf), M_DEVBUF);
+ free(sc->vmbus_mnf2, M_DEVBUF);
sc->vmbus_mnf2 = NULL;
}
CPU_FOREACH(cpu) {
if (VMBUS_PCPU_GET(sc, message, cpu) != NULL) {
- contigfree(VMBUS_PCPU_GET(sc, message, cpu), PAGE_SIZE,
- M_DEVBUF);
+ free(VMBUS_PCPU_GET(sc, message, cpu), M_DEVBUF);
VMBUS_PCPU_GET(sc, message, cpu) = NULL;
}
if (VMBUS_PCPU_GET(sc, event_flags, cpu) != NULL) {
- contigfree(VMBUS_PCPU_GET(sc, event_flags, cpu),
- PAGE_SIZE, M_DEVBUF);
+ free(VMBUS_PCPU_GET(sc, event_flags, cpu), M_DEVBUF);
VMBUS_PCPU_GET(sc, event_flags, cpu) = NULL;
}
}
@@ -989,7 +1013,8 @@ vmbus_add_child(struct vmbus_channel *chan)
device_t parent = sc->vmbus_dev;
bus_topo_lock();
- chan->ch_dev = device_add_child(parent, NULL, -1);
+
+ chan->ch_dev = device_add_child(parent, NULL, DEVICE_UNIT_ANY);
if (chan->ch_dev == NULL) {
bus_topo_unlock();
device_printf(parent, "device_add_child for chan%u failed\n",
@@ -1043,15 +1068,12 @@ vmbus_alloc_resource(device_t dev, device_t child, int type, int *rid,
device_t parent = device_get_parent(dev);
struct resource *res;
-#ifdef NEW_PCIB
if (type == SYS_RES_MEMORY) {
struct vmbus_softc *sc = device_get_softc(dev);
res = pcib_host_res_alloc(&sc->vmbus_mmio_res, child, type,
rid, start, end, count, flags);
- } else
-#endif
- {
+ } else {
res = BUS_ALLOC_RESOURCE(parent, child, type, rid, start,
end, count, flags);
}
@@ -1132,7 +1154,6 @@ vmbus_get_eventtq_method(device_t bus, device_t dev __unused, int cpu)
return (VMBUS_PCPU_GET(sc, event_tq, cpu));
}
-#ifdef NEW_PCIB
#define VTPM_BASE_ADDR 0xfed40000
#define FOUR_GB (1ULL << 32)
@@ -1284,18 +1305,14 @@ vmbus_fb_mmio_res(device_t dev)
#endif /* aarch64 */
rman_res_t fb_start, fb_end, fb_count;
int fb_height, fb_width;
- caddr_t kmdp;
struct vmbus_softc *sc = device_get_softc(dev);
int rid = 0;
- kmdp = preload_search_by_type("elf kernel");
- if (kmdp == NULL)
- kmdp = preload_search_by_type("elf64 kernel");
- efifb = (struct efi_fb *)preload_search_info(kmdp,
+ efifb = (struct efi_fb *)preload_search_info(preload_kmdp,
MODINFO_METADATA | MODINFOMD_EFI_FB);
#if !defined(__aarch64__)
- vbefb = (struct vbe_fb *)preload_search_info(kmdp,
+ vbefb = (struct vbe_fb *)preload_search_info(preload_kmdp,
MODINFO_METADATA | MODINFOMD_VBE_FB);
#endif /* aarch64 */
if (efifb != NULL) {
@@ -1349,7 +1366,6 @@ vmbus_free_mmio_res(device_t dev)
if (hv_fb_res)
hv_fb_res = NULL;
}
-#endif /* NEW_PCIB */
static void
vmbus_identify(driver_t *driver, device_t parent)
@@ -1358,7 +1374,7 @@ vmbus_identify(driver_t *driver, device_t parent)
if (device_get_unit(parent) != 0 || vm_guest != VM_GUEST_HV ||
(hyperv_features & CPUID_HV_MSR_SYNIC) == 0)
return;
- device_add_child(parent, "vmbus", -1);
+ device_add_child(parent, "vmbus", DEVICE_UNIT_ANY);
}
static int
@@ -1373,6 +1389,42 @@ vmbus_probe(device_t dev)
return (BUS_PROBE_DEFAULT);
}
+#if defined(__x86_64__)
+static int
+vmbus_alloc_cpu_mem(struct vmbus_softc *sc)
+{
+ int cpu;
+
+ CPU_FOREACH(cpu) {
+ void **hv_cpu_mem;
+
+ hv_cpu_mem = VMBUS_PCPU_PTR(sc, cpu_mem, cpu);
+ *hv_cpu_mem = contigmalloc(PAGE_SIZE, M_DEVBUF,
+ M_NOWAIT | M_ZERO, 0ul, ~0ul, PAGE_SIZE, 0);
+
+ if (*hv_cpu_mem == NULL)
+ return ENOMEM;
+ }
+
+ return 0;
+}
+
+static void
+vmbus_free_cpu_mem(struct vmbus_softc *sc)
+{
+ int cpu;
+
+ CPU_FOREACH(cpu) {
+ void **hv_cpu_mem;
+ hv_cpu_mem = VMBUS_PCPU_PTR(sc, cpu_mem, cpu);
+ if(*hv_cpu_mem != NULL) {
+ free(*hv_cpu_mem, M_DEVBUF);
+ *hv_cpu_mem = NULL;
+ }
+ }
+}
+#endif
+
/**
* @brief Main vmbus driver initialization routine.
*
@@ -1398,10 +1450,8 @@ vmbus_doattach(struct vmbus_softc *sc)
if (sc->vmbus_flags & VMBUS_FLAG_ATTACHED)
return (0);
-#ifdef NEW_PCIB
vmbus_get_mmio_res(sc->vmbus_dev);
vmbus_fb_mmio_res(sc->vmbus_dev);
-#endif
sc->vmbus_flags |= VMBUS_FLAG_ATTACHED;
@@ -1462,6 +1512,25 @@ vmbus_doattach(struct vmbus_softc *sc)
if (ret != 0)
goto cleanup;
+#if defined(__x86_64__)
+ /*
+ * Alloc per cpu memory for tlb flush hypercall
+ */
+ if (hv_tlb_hcall) {
+ ret = vmbus_alloc_cpu_mem(sc);
+ if (ret != 0) {
+ hv_tlb_hcall = 0;
+ if (bootverbose)
+ device_printf(sc->vmbus_dev,
+ "cannot alloc contig memory for "
+ "cpu_mem, use system provided "
+ "tlb flush call.\n");
+
+ vmbus_free_cpu_mem(sc);
+ }
+ }
+#endif
+
/*
* Setup SynIC.
*/
@@ -1470,6 +1539,11 @@ vmbus_doattach(struct vmbus_softc *sc)
smp_rendezvous(NULL, vmbus_synic_setup, NULL, sc);
sc->vmbus_flags |= VMBUS_FLAG_SYNIC;
+#if defined(__x86_64__)
+ if (hv_tlb_hcall)
+ smp_targeted_tlb_shootdown = &hyperv_vm_tlb_flush;
+#endif
+
/*
* Initialize vmbus, e.g. connect to Hypervisor.
*/
@@ -1573,6 +1647,16 @@ vmbus_detach(device_t dev)
smp_rendezvous(NULL, vmbus_synic_teardown, NULL, NULL);
}
+#if defined(__x86_64__)
+ /*
+ * Restore the tlb flush to native call
+ */
+ if (hv_tlb_hcall) {
+ smp_targeted_tlb_shootdown = &smp_targeted_tlb_shootdown_native;
+ vmbus_free_cpu_mem(sc);
+ }
+#endif
+
vmbus_intr_teardown(sc);
vmbus_dma_free(sc);
@@ -1585,9 +1669,7 @@ vmbus_detach(device_t dev)
mtx_destroy(&sc->vmbus_prichan_lock);
mtx_destroy(&sc->vmbus_chan_lock);
-#ifdef NEW_PCIB
vmbus_free_mmio_res(dev);
-#endif
#if defined(__aarch64__)
bus_release_resource(device_get_parent(dev), SYS_RES_IRQ, sc->vector,
diff --git a/sys/dev/hyperv/vmbus/vmbus_chan.c b/sys/dev/hyperv/vmbus/vmbus_chan.c
index 0922470d4672..7ea60a499c72 100644
--- a/sys/dev/hyperv/vmbus/vmbus_chan.c
+++ b/sys/dev/hyperv/vmbus/vmbus_chan.c
@@ -34,11 +34,11 @@
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/smp.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <machine/atomic.h>
-#include <machine/stdarg.h>
#include <vm/vm.h>
#include <vm/pmap.h>
@@ -341,8 +341,7 @@ vmbus_chan_open(struct vmbus_channel *chan, int txbr_size, int rxbr_size,
* Allocate the TX+RX bufrings.
*/
KASSERT(chan->ch_bufring == NULL, ("bufrings are allocated"));
- chan->ch_bufring_size = txbr_size + rxbr_size;
- chan->ch_bufring = contigmalloc(chan->ch_bufring_size, M_DEVBUF,
+ chan->ch_bufring = contigmalloc(txbr_size + rxbr_size, M_DEVBUF,
M_WAITOK | M_ZERO, 0ul, ~0ul, PAGE_SIZE, 0);
if (chan->ch_bufring == NULL) {
vmbus_chan_printf(chan, "bufring allocation failed\n");
@@ -368,8 +367,7 @@ vmbus_chan_open(struct vmbus_channel *chan, int txbr_size, int rxbr_size,
"leak %d bytes memory\n", chan->ch_id,
txbr_size + rxbr_size);
} else {
- contigfree(chan->ch_bufring, chan->ch_bufring_size,
- M_DEVBUF);
+ free(chan->ch_bufring, M_DEVBUF);
}
chan->ch_bufring = NULL;
}
@@ -939,7 +937,7 @@ disconnect:
* Destroy the TX+RX bufrings.
*/
if (chan->ch_bufring != NULL) {
- contigfree(chan->ch_bufring, chan->ch_bufring_size, M_DEVBUF);
+ free(chan->ch_bufring, M_DEVBUF);
chan->ch_bufring = NULL;
}
return (error);
@@ -1557,7 +1555,7 @@ vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *event_flags,
continue;
flags = atomic_swap_long(&event_flags[f], 0);
- chid_base = f << VMBUS_EVTFLAG_SHIFT;
+ chid_base = f * VMBUS_EVTFLAG_LEN;
while ((chid_ofs = ffsl(flags)) != 0) {
struct vmbus_channel *chan;
@@ -1601,7 +1599,7 @@ vmbus_event_proc_compat(struct vmbus_softc *sc, int cpu)
eventf = VMBUS_PCPU_GET(sc, event_flags, cpu) + VMBUS_SINT_MESSAGE;
if (atomic_testandclear_long(&eventf->evt_flags[0], 0)) {
vmbus_event_flags_proc(sc, sc->vmbus_rx_evtflags,
- VMBUS_CHAN_MAX_COMPAT >> VMBUS_EVTFLAG_SHIFT);
+ VMBUS_CHAN_MAX_COMPAT / VMBUS_EVTFLAG_LEN);
}
}
@@ -1679,7 +1677,7 @@ vmbus_chan_free(struct vmbus_channel *chan)
KASSERT(chan->ch_poll_intvl == 0, ("chan%u: polling is activated",
chan->ch_id));
- contigfree(chan->ch_monprm, sizeof(struct hyperv_mon_param), M_DEVBUF);
+ free(chan->ch_monprm, M_DEVBUF);
mtx_destroy(&chan->ch_subchan_lock);
sx_destroy(&chan->ch_orphan_lock);
vmbus_rxbr_deinit(&chan->ch_rxbr);
@@ -1905,7 +1903,7 @@ vmbus_chan_msgproc_choffer(struct vmbus_softc *sc,
* Setup event flag.
*/
chan->ch_evtflag =
- &sc->vmbus_tx_evtflags[chan->ch_id >> VMBUS_EVTFLAG_SHIFT];
+ &sc->vmbus_tx_evtflags[chan->ch_id / VMBUS_EVTFLAG_LEN];
chan->ch_evtflag_mask = 1UL << (chan->ch_id & VMBUS_EVTFLAG_MASK);
/*
diff --git a/sys/dev/hyperv/vmbus/vmbus_chanvar.h b/sys/dev/hyperv/vmbus/vmbus_chanvar.h
index e88ada2dd274..c02ec24c70a1 100644
--- a/sys/dev/hyperv/vmbus/vmbus_chanvar.h
+++ b/sys/dev/hyperv/vmbus/vmbus_chanvar.h
@@ -123,7 +123,6 @@ struct vmbus_channel {
struct vmbus_channel *ch_prichan; /* owner primary chan */
void *ch_bufring; /* TX+RX bufrings */
- size_t ch_bufring_size;
uint32_t ch_bufring_gpadl;
struct task ch_attach_task; /* run in ch_mgmt_tq */
diff --git a/sys/dev/hyperv/vmbus/vmbus_et.c b/sys/dev/hyperv/vmbus/vmbus_et.c
index 4ff011cfd77e..33eb94daacd3 100644
--- a/sys/dev/hyperv/vmbus/vmbus_et.c
+++ b/sys/dev/hyperv/vmbus/vmbus_et.c
@@ -127,12 +127,12 @@ static void
vmbus_et_identify(driver_t *driver, device_t parent)
{
if (device_get_unit(parent) != 0 ||
- device_find_child(parent, VMBUS_ET_NAME, -1) != NULL ||
+ device_find_child(parent, VMBUS_ET_NAME, DEVICE_UNIT_ANY) != NULL ||
(hyperv_features & CPUID_HV_ET_MASK) != CPUID_HV_ET_MASK ||
hyperv_tc64 == NULL)
return;
- device_add_child(parent, VMBUS_ET_NAME, -1);
+ device_add_child(parent, VMBUS_ET_NAME, DEVICE_UNIT_ANY);
}
static int
diff --git a/sys/dev/hyperv/vmbus/vmbus_reg.h b/sys/dev/hyperv/vmbus/vmbus_reg.h
index 4aa729475b5d..76cdca0ebeb2 100644
--- a/sys/dev/hyperv/vmbus/vmbus_reg.h
+++ b/sys/dev/hyperv/vmbus/vmbus_reg.h
@@ -60,16 +60,10 @@ CTASSERT(sizeof(struct vmbus_message) == VMBUS_MSG_SIZE);
* Hyper-V SynIC event flags
*/
-#ifdef __LP64__
-#define VMBUS_EVTFLAGS_MAX 32
-#define VMBUS_EVTFLAG_SHIFT 6
-#else
-#define VMBUS_EVTFLAGS_MAX 64
-#define VMBUS_EVTFLAG_SHIFT 5
-#endif
-#define VMBUS_EVTFLAG_LEN (1 << VMBUS_EVTFLAG_SHIFT)
+#define VMBUS_EVTFLAG_LEN (sizeof(u_long) * 8)
#define VMBUS_EVTFLAG_MASK (VMBUS_EVTFLAG_LEN - 1)
#define VMBUS_EVTFLAGS_SIZE 256
+#define VMBUS_EVTFLAGS_MAX (VMBUS_EVTFLAGS_SIZE / sizeof(u_long))
struct vmbus_evtflags {
u_long evt_flags[VMBUS_EVTFLAGS_MAX];
diff --git a/sys/dev/hyperv/vmbus/vmbus_var.h b/sys/dev/hyperv/vmbus/vmbus_var.h
index 023d27c52cea..cadcaa45aae5 100644
--- a/sys/dev/hyperv/vmbus/vmbus_var.h
+++ b/sys/dev/hyperv/vmbus/vmbus_var.h
@@ -32,6 +32,11 @@
#include <sys/taskqueue.h>
#include <sys/rman.h>
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_param.h>
+#include <vm/pmap.h>
+
#include <dev/pci/pcivar.h>
#include <dev/pci/pcib_private.h>
@@ -69,6 +74,9 @@ struct vmbus_pcpu_data {
uint32_t vcpuid; /* virtual cpuid */
int event_flags_cnt;/* # of event flags */
struct vmbus_evtflags *event_flags; /* event flags from host */
+#if defined(__x86_64__)
+ void *cpu_mem; /* For Hyper-V tlb hypercall */
+#endif
/* Rarely used fields */
struct taskqueue *event_tq; /* event taskq */
@@ -119,10 +127,8 @@ struct vmbus_softc {
struct intr_config_hook vmbus_intrhook;
-#ifdef NEW_PCIB
/* The list of usable MMIO ranges for PCIe pass-through */
struct pcib_host_resources vmbus_mmio_res;
-#endif
#if defined(__aarch64__)
struct resource *ires;
@@ -137,6 +143,40 @@ struct vmbus_softc {
#define VMBUS_PCPU_GET(sc, field, cpu) (sc)->vmbus_pcpu[(cpu)].field
#define VMBUS_PCPU_PTR(sc, field, cpu) &(sc)->vmbus_pcpu[(cpu)].field
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE 0x0002
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
+#define HV_FLUSH_ALL_PROCESSORS BIT(0)
+#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
+#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
+#define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE)
+
+
+#define BIT(n) (1ULL << (n))
+#define BITS_PER_LONG (sizeof(long) * NBBY)
+#define BIT_MASK(nr) (1UL << ((nr) & (BITS_PER_LONG - 1)))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define set_bit(i, a) \
+ atomic_set_long(&((volatile unsigned long *)(a))[BIT_WORD(i)], BIT_MASK(i))
+
+#define GENMASK_ULL(h, l) (((~0ULL) >> (64 - (h) - 1)) & ((~0ULL) << (l)))
+
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST 0x0003
+#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX 0x0014
+#define HYPERV_X64_EX_PROCESSOR_MASKS_RECOMMENDED BIT(11)
+#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
+#define HV_STATUS_SUCCESS 0
+#define HV_HYPERCALL_REP_COMP_MASK GENMASK_ULL(43, 32)
+#define HV_HYPERCALL_REP_COMP_OFFSET 32
+
+#define HV_HYPERCALL_VARHEAD_OFFSET 17
+
+#define HV_HYPERCALL_REP_START_MASK GENMASK_ULL(59, 48)
+#define HV_HYPERCALL_REP_START_OFFSET 48
+
+enum HV_GENERIC_SET_FORMAT {
+ HV_GENERIC_SET_SPARSE_4K,
+ HV_GENERIC_SET_ALL,
+};
struct vmbus_channel;
struct trapframe;
@@ -176,4 +216,17 @@ void vmbus_synic_setup1(void *xsc);
void vmbus_synic_teardown1(void);
int vmbus_setup_intr1(struct vmbus_softc *sc);
void vmbus_intr_teardown1(struct vmbus_softc *sc);
+
+extern uint32_t hv_max_vp_index;
+
+
+#if defined(__x86_64__)
+void hyperv_vm_tlb_flush(pmap_t, vm_offset_t,
+ vm_offset_t, smp_invl_local_cb_t, enum invl_op_codes);
+uint64_t hv_flush_tlb_others_ex(pmap_t, vm_offset_t, vm_offset_t,
+ cpuset_t, enum invl_op_codes, struct vmbus_softc *);
+void hv_vm_tlb_flush(pmap_t, vm_offset_t, vm_offset_t,
+ enum invl_op_codes, struct vmbus_softc *,
+ smp_invl_local_cb_t);
+#endif /* __x86_64__ */
#endif /* !_VMBUS_VAR_H_ */
diff --git a/sys/dev/hyperv/vmbus/vmbus_xact.c b/sys/dev/hyperv/vmbus/vmbus_xact.c
index f5f766f2c2fc..eb221ef92b2c 100644
--- a/sys/dev/hyperv/vmbus/vmbus_xact.c
+++ b/sys/dev/hyperv/vmbus/vmbus_xact.c
@@ -104,7 +104,7 @@ static void
vmbus_xact_free(struct vmbus_xact *xact)
{
- contigfree(xact->x_req, xact->x_ctx->xc_req_size, M_DEVBUF);
+ free(xact->x_req, M_DEVBUF);
free(xact->x_resp0, M_DEVBUF);
if (xact->x_priv != NULL)
free(xact->x_priv, M_DEVBUF);
diff --git a/sys/dev/iavf/iavf_iflib.h b/sys/dev/iavf/iavf_iflib.h
index ec083d66a209..83891f9ed520 100644
--- a/sys/dev/iavf/iavf_iflib.h
+++ b/sys/dev/iavf/iavf_iflib.h
@@ -52,6 +52,7 @@
#include <sys/module.h>
#include <sys/sockio.h>
#include <sys/eventhandler.h>
+#include <sys/stdarg.h>
#include <sys/syslog.h>
#include <net/if.h>
@@ -94,7 +95,6 @@
#include <sys/smp.h>
#include <sys/sbuf.h>
#include <machine/smp.h>
-#include <machine/stdarg.h>
#include <net/ethernet.h>
#include <net/iflib.h>
#include "ifdi_if.h"
diff --git a/sys/dev/iavf/iavf_lib.c b/sys/dev/iavf/iavf_lib.c
index 883a722b3a03..433d31904ea4 100644
--- a/sys/dev/iavf/iavf_lib.c
+++ b/sys/dev/iavf/iavf_lib.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2024, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -511,7 +511,7 @@ iavf_get_vsi_res_from_vf_res(struct iavf_sc *sc)
for (int i = 0; i < sc->vf_res->num_vsis; i++) {
/* XXX: We only use the first VSI we find */
- if (sc->vf_res->vsi_res[i].vsi_type == IAVF_VSI_SRIOV)
+ if (sc->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
sc->vsi_res = &sc->vf_res->vsi_res[i];
}
if (!sc->vsi_res) {
@@ -1464,30 +1464,6 @@ iavf_mark_del_vlan_filter(struct iavf_sc *sc, u16 vtag)
}
/**
- * iavf_update_msix_devinfo - Fix MSIX values for pci_msix_count()
- * @dev: pointer to kernel device
- *
- * Fix cached MSI-X control register information. This is a workaround
- * for an issue where VFs spawned in non-passthrough mode on FreeBSD
- * will have their PCI information cached before the PF driver
- * finishes updating their PCI information.
- *
- * @pre Must be called before pci_msix_count()
- */
-void
-iavf_update_msix_devinfo(device_t dev)
-{
- struct pci_devinfo *dinfo;
- u32 msix_ctrl;
-
- dinfo = (struct pci_devinfo *)device_get_ivars(dev);
- /* We can hardcode this offset since we know the device */
- msix_ctrl = pci_read_config(dev, 0x70 + PCIR_MSIX_CTRL, 2);
- dinfo->cfg.msix.msix_ctrl = msix_ctrl;
- dinfo->cfg.msix.msix_msgnum = (msix_ctrl & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
-}
-
-/**
* iavf_disable_queues_with_retries - Send PF multiple DISABLE_QUEUES messages
* @sc: device softc
*
diff --git a/sys/dev/iavf/iavf_lib.h b/sys/dev/iavf/iavf_lib.h
index f3ccd9f0c52f..48c0f4560e5a 100644
--- a/sys/dev/iavf/iavf_lib.h
+++ b/sys/dev/iavf/iavf_lib.h
@@ -40,7 +40,7 @@
#define _IAVF_LIB_H_
#include <sys/malloc.h>
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#ifdef RSS
#include <net/rss_config.h>
@@ -474,7 +474,6 @@ struct iavf_mac_filter *
u64 iavf_baudrate_from_link_speed(struct iavf_sc *sc);
void iavf_add_vlan_filter(struct iavf_sc *sc, u16 vtag);
int iavf_mark_del_vlan_filter(struct iavf_sc *sc, u16 vtag);
-void iavf_update_msix_devinfo(device_t dev);
void iavf_disable_queues_with_retries(struct iavf_sc *);
int iavf_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
diff --git a/sys/dev/iavf/iavf_osdep.c b/sys/dev/iavf/iavf_osdep.c
index 1c3aefe253a1..b81fded48926 100644
--- a/sys/dev/iavf/iavf_osdep.c
+++ b/sys/dev/iavf/iavf_osdep.c
@@ -37,7 +37,7 @@
* independent layer for sharing code between drivers on different operating
* systems.
*/
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include "iavf_iflib.h"
diff --git a/sys/dev/iavf/if_iavf_iflib.c b/sys/dev/iavf/if_iavf_iflib.c
index 714d34eeebe5..e4dd3b1e59a4 100644
--- a/sys/dev/iavf/if_iavf_iflib.c
+++ b/sys/dev/iavf/if_iavf_iflib.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2021, Intel Corporation
+/* Copyright (c) 2024, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -370,7 +370,7 @@ iavf_if_attach_pre(if_ctx_t ctx)
}
scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
* sizeof(union iavf_32byte_rx_desc), DBA_ALIGN);
- scctx->isc_msix_bar = PCIR_BAR(IAVF_MSIX_BAR);
+ scctx->isc_msix_bar = pci_msix_table_bar(dev);
scctx->isc_tx_nsegments = IAVF_MAX_TX_SEGS;
scctx->isc_tx_tso_segments_max = IAVF_MAX_TSO_SEGS;
scctx->isc_tx_tso_size_max = IAVF_TSO_SIZE;
@@ -379,9 +379,6 @@ iavf_if_attach_pre(if_ctx_t ctx)
scctx->isc_capabilities = scctx->isc_capenable = IAVF_CAPS;
scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
- /* Update OS cache of MSIX control register values */
- iavf_update_msix_devinfo(dev);
-
return (0);
err_vc_tq:
diff --git a/sys/dev/ice/ice_adminq_cmd.h b/sys/dev/ice/ice_adminq_cmd.h
index 70b56144faf2..6225abc0f38b 100644
--- a/sys/dev/ice/ice_adminq_cmd.h
+++ b/sys/dev/ice/ice_adminq_cmd.h
@@ -187,7 +187,7 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_ROCEV2_LAG 0x0092
#define ICE_AQC_BIT_ROCEV2_LAG 0x01
#define ICE_AQC_BIT_SRIOV_LAG 0x02
-
+#define ICE_AQC_CAPS_NEXT_CLUSTER_ID 0x0096
u8 major_ver;
u8 minor_ver;
/* Number of resources described by this capability */
@@ -320,7 +320,12 @@ struct ice_aqc_set_port_params {
(0x3F << ICE_AQC_SET_P_PARAMS_LOGI_PORT_ID_S)
#define ICE_AQC_SET_P_PARAMS_IS_LOGI_PORT BIT(14)
#define ICE_AQC_SET_P_PARAMS_SWID_VALID BIT(15)
- u8 reserved[10];
+ u8 lb_mode;
+#define ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_VALID BIT(2)
+#define ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_NORMAL 0x00
+#define ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_NO 0x01
+#define ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_HIGH 0x02
+ u8 reserved[9];
};
/* These resource type defines are used for all switch resource
@@ -1389,7 +1394,18 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
-#define ICE_PHY_TYPE_HIGH_MAX_INDEX 4
+#define ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 BIT_ULL(5)
+#define ICE_PHY_TYPE_HIGH_200G_SR4 BIT_ULL(6)
+#define ICE_PHY_TYPE_HIGH_200G_FR4 BIT_ULL(7)
+#define ICE_PHY_TYPE_HIGH_200G_LR4 BIT_ULL(8)
+#define ICE_PHY_TYPE_HIGH_200G_DR4 BIT_ULL(9)
+#define ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 BIT_ULL(10)
+#define ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC BIT_ULL(11)
+#define ICE_PHY_TYPE_HIGH_200G_AUI4 BIT_ULL(12)
+#define ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC BIT_ULL(13)
+#define ICE_PHY_TYPE_HIGH_200G_AUI8 BIT_ULL(14)
+#define ICE_PHY_TYPE_HIGH_400GBASE_FR8 BIT_ULL(15)
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 15
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
@@ -1541,11 +1557,14 @@ struct ice_aqc_get_link_status {
enum ice_get_link_status_data_version {
ICE_GET_LINK_STATUS_DATA_V1 = 1,
+ ICE_GET_LINK_STATUS_DATA_V2 = 2,
};
#define ICE_GET_LINK_STATUS_DATALEN_V1 32
+#define ICE_GET_LINK_STATUS_DATALEN_V2 56
/* Get link status response data structure, also used for Link Status Event */
+#pragma pack(1)
struct ice_aqc_get_link_status_data {
u8 topo_media_conflict;
#define ICE_AQ_LINK_TOPO_CONFLICT BIT(0)
@@ -1618,7 +1637,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_PWR_QSFP_CLASS_3 2
#define ICE_AQ_LINK_PWR_QSFP_CLASS_4 3
__le16 link_speed;
-#define ICE_AQ_LINK_SPEED_M 0x7FF
+#define ICE_AQ_LINK_SPEED_M 0xFFF
#define ICE_AQ_LINK_SPEED_10MB BIT(0)
#define ICE_AQ_LINK_SPEED_100MB BIT(1)
#define ICE_AQ_LINK_SPEED_1000MB BIT(2)
@@ -1630,12 +1649,37 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_SPEED_40GB BIT(8)
#define ICE_AQ_LINK_SPEED_50GB BIT(9)
#define ICE_AQ_LINK_SPEED_100GB BIT(10)
+#define ICE_AQ_LINK_SPEED_200GB BIT(11)
#define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15)
- __le32 reserved3; /* Aligns next field to 8-byte boundary */
+ __le16 reserved3; /* Aligns next field to 8-byte boundary */
+ u8 ext_fec_status;
+#define ICE_AQ_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */
+ u8 reserved4;
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
__le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ /* Get link status version 2 link partner data */
+ __le64 lp_phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 lp_phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
+ u8 lp_fec_adv;
+#define ICE_AQ_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define ICE_AQ_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define ICE_AQ_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define ICE_AQ_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define ICE_AQ_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define ICE_AQ_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define ICE_AQ_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define ICE_AQ_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define ICE_AQ_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define ICE_AQ_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define ICE_AQ_LINK_LP_PAUSE_ADV BIT(0)
+#define ICE_AQ_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved[5];
};
+#pragma pack()
+
/* Set event mask command (direct 0x0613) */
struct ice_aqc_set_event_mask {
u8 lport_num;
@@ -1793,14 +1837,46 @@ struct ice_aqc_dnl_call_command {
u8 ctx; /* Used in command, reserved in response */
u8 reserved;
__le16 activity_id;
+#define ICE_AQC_ACT_ID_DNL 0x1129
__le32 reserved1;
__le32 addr_high;
__le32 addr_low;
};
+struct ice_aqc_dnl_equa_param {
+ __le16 data_in;
+#define ICE_AQC_RX_EQU_SHIFT 8
+#define ICE_AQC_RX_EQU_PRE2 (0x10 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_PRE1 (0x11 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_TX_EQU_PRE1 0x0
+#define ICE_AQC_TX_EQU_PRE3 0x3
+#define ICE_AQC_TX_EQU_ATTEN 0x4
+#define ICE_AQC_TX_EQU_POST1 0x8
+#define ICE_AQC_TX_EQU_PRE2 0xC
+ __le16 op_code_serdes_sel;
+#define ICE_AQC_OP_CODE_SHIFT 4
+#define ICE_AQC_OP_CODE_RX_EQU (0x9 << ICE_AQC_OP_CODE_SHIFT)
+#define ICE_AQC_OP_CODE_TX_EQU (0x10 << ICE_AQC_OP_CODE_SHIFT)
+ __le32 reserved[3];
+};
+
+struct ice_aqc_dnl_equa_resp {
+ /* Equalization value can be -ve */
+ int val;
+ __le32 reserved[3];
+};
+
/* DNL call command/response buffer (indirect 0x0682) */
struct ice_aqc_dnl_call {
- __le32 stores[4];
+ union {
+ struct ice_aqc_dnl_equa_param txrx_equa_reqs;
+ __le32 stores[4];
+ struct ice_aqc_dnl_equa_resp txrx_equa_resp;
+ } sto;
};
/* Used for both commands:
@@ -1902,8 +1978,8 @@ struct ice_aqc_link_topo_addr {
#define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S)
/* Used to decode the handle field */
#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
-#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
-#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_LOM 0
+#define ICE_AQC_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ BIT(9)
#define ICE_AQC_LINK_TOPO_HANDLE_NODE_S 0
/* In case of a Mezzanine type */
#define ICE_AQC_LINK_TOPO_HANDLE_MEZZ_NODE_M \
@@ -1919,7 +1995,7 @@ struct ice_aqc_link_topo_addr {
struct ice_aqc_get_link_topo {
struct ice_aqc_link_topo_addr addr;
u8 node_part_num;
-#define ICE_ACQ_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
u8 rsvd[9];
};
@@ -2054,6 +2130,7 @@ struct ice_aqc_get_port_options_elem {
#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+#define ICE_AQC_PORT_OPT_MAX_LANE_200G 8
u8 global_scid[2];
u8 phy_scid[2];
u8 pf2port_cid[2];
@@ -2201,6 +2278,29 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_LLDP_STATUS_M_LEN 4 /* In Bits */
#define ICE_AQC_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */
+#define ICE_AQC_NVM_SDP_CFG_PTR_OFFSET 0xD8
+#define ICE_AQC_NVM_SDP_CFG_PTR_RD_LEN 2 /* In Bytes */
+#define ICE_AQC_NVM_SDP_CFG_PTR_M MAKEMASK(0x7FFF, 0)
+#define ICE_AQC_NVM_SDP_CFG_PTR_TYPE_M BIT(15)
+#define ICE_AQC_NVM_SDP_CFG_HEADER_LEN 2 /* In Bytes */
+#define ICE_AQC_NVM_SDP_CFG_SEC_LEN_LEN 2 /* In Bytes */
+#define ICE_AQC_NVM_SDP_CFG_DATA_LEN 14 /* In Bytes */
+#define ICE_AQC_NVM_SDP_CFG_MAX_SECTION_SIZE 7
+#define ICE_AQC_NVM_SDP_CFG_PIN_SIZE 10
+#define ICE_AQC_NVM_SDP_CFG_PIN_OFFSET 6
+#define ICE_AQC_NVM_SDP_CFG_PIN_MASK MAKEMASK(0x3FF, \
+ ICE_AQC_NVM_SDP_CFG_PIN_OFFSET)
+#define ICE_AQC_NVM_SDP_CFG_CHAN_OFFSET 4
+#define ICE_AQC_NVM_SDP_CFG_CHAN_MASK MAKEMASK(0x3, \
+ ICE_AQC_NVM_SDP_CFG_CHAN_OFFSET)
+#define ICE_AQC_NVM_SDP_CFG_DIR_OFFSET 3
+#define ICE_AQC_NVM_SDP_CFG_DIR_MASK MAKEMASK(0x1, \
+ ICE_AQC_NVM_SDP_CFG_DIR_OFFSET)
+#define ICE_AQC_NVM_SDP_CFG_SDP_NUM_OFFSET 0
+#define ICE_AQC_NVM_SDP_CFG_SDP_NUM_MASK MAKEMASK(0x7, \
+ ICE_AQC_NVM_SDP_CFG_SDP_NUM_OFFSET)
+#define ICE_AQC_NVM_SDP_CFG_NA_PIN_MASK MAKEMASK(0x1, 15)
+
#define ICE_AQC_NVM_MINSREV_MOD_ID 0x130
#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B
#define ICE_AQC_NVM_CMPO_MOD_ID 0x153
@@ -2265,6 +2365,29 @@ struct ice_aqc_nvm_checksum {
u8 rsvd2[12];
};
+/* Used for NVM Sanitization command - 0x070C */
+struct ice_aqc_nvm_sanitization {
+ u8 cmd_flags;
+#define ICE_AQ_NVM_SANITIZE_REQ_READ 0
+#define ICE_AQ_NVM_SANITIZE_REQ_OPERATE BIT(0)
+
+#define ICE_AQ_NVM_SANITIZE_READ_SUBJECT_NVM_BITS 0
+#define ICE_AQ_NVM_SANITIZE_READ_SUBJECT_NVM_STATE BIT(1)
+#define ICE_AQ_NVM_SANITIZE_OPERATE_SUBJECT_CLEAR 0
+ u8 values;
+#define ICE_AQ_NVM_SANITIZE_NVM_BITS_HOST_CLEAN_SUPPORT BIT(0)
+#define ICE_AQ_NVM_SANITIZE_NVM_BITS_BMC_CLEAN_SUPPORT BIT(2)
+#define ICE_AQ_NVM_SANITIZE_NVM_STATE_HOST_CLEAN_DONE BIT(0)
+#define ICE_AQ_NVM_SANITIZE_NVM_STATE_HOST_CLEAN_SUCCESS BIT(1)
+#define ICE_AQ_NVM_SANITIZE_NVM_STATE_BMC_CLEAN_DONE BIT(2)
+#define ICE_AQ_NVM_SANITIZE_NVM_STATE_BMC_CLEAN_SUCCESS BIT(3)
+#define ICE_AQ_NVM_SANITIZE_OPERATE_HOST_CLEAN_DONE BIT(0)
+#define ICE_AQ_NVM_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS BIT(1)
+#define ICE_AQ_NVM_SANITIZE_OPERATE_BMC_CLEAN_DONE BIT(2)
+#define ICE_AQ_NVM_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS BIT(3)
+ u8 reserved[14];
+};
+
/*
* Send to PF command (indirect 0x0801) ID is only used by PF
*
@@ -2560,6 +2683,15 @@ struct ice_aqc_get_set_rss_lut {
__le32 addr_low;
};
+/* Sideband Control Interface Commands */
+/* Neighbor Device Request (indirect 0x0C00); also used for the response. */
+struct ice_aqc_neigh_dev_req {
+ __le16 sb_data_len;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Add Tx LAN Queues (indirect 0x0C30) */
struct ice_aqc_add_txqs {
u8 num_qgrps;
@@ -2812,19 +2944,33 @@ struct ice_aqc_event_lan_overflow {
/* Debug Dump Internal Data (indirect 0xFF08) */
struct ice_aqc_debug_dump_internals {
__le16 cluster_id; /* Expresses next cluster ID in response */
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW 0
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED 2
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES 3
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW_E810 0
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_ACL_E810 1
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED_E810 2
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES_E810 3
/* EMP_DRAM only dumpable in device debug mode */
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_EMP_DRAM 4
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK 5
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_EMP_DRAM_E810 4
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK_E810 5
/* AUX_REGS only dumpable in device debug mode */
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_AUX_REGS 6
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB 7
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P 8
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG 9
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE 21
-#define ICE_AQC_DBG_DUMP_CLUSTER_ID_MNG_TRANSACTIONS 22
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_AUX_REGS_E810 6
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB_E810 7
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P_E810 8
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG_E810 9
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE_E810 21
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_MNG_TRANSACTIONS_E810 22
+
+/* Start cluster to discover first available cluster */
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_START_ALL 0
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_SW_E830 100
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_ACL_E830 101
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_TXSCHED_E830 102
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_PROFILES_E830 103
+/* EMP_DRAM only dumpable in device debug mode */
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_LINK_E830 105
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_DCB_E830 107
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_L2P_E830 108
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG_E830 109
+#define ICE_AQC_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE_E830 121
__le16 table_id; /* Used only for non-memory clusters */
__le32 idx; /* In table entries for tables, in bytes for memory */
__le32 addr_high;
@@ -3076,6 +3222,7 @@ struct ice_aq_desc {
struct ice_aqc_nvm nvm;
struct ice_aqc_nvm_cfg nvm_cfg;
struct ice_aqc_nvm_checksum nvm_checksum;
+ struct ice_aqc_nvm_sanitization sanitization;
struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_read_write_alt_direct read_write_alt_direct;
struct ice_aqc_read_write_alt_indirect read_write_alt_indirect;
@@ -3095,6 +3242,7 @@ struct ice_aq_desc {
struct ice_aqc_lldp_filter_ctrl lldp_filter_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key;
+ struct ice_aqc_neigh_dev_req neigh_dev;
struct ice_aqc_add_txqs add_txqs;
struct ice_aqc_dis_txqs dis_txqs;
struct ice_aqc_move_txqs move_txqs;
@@ -3330,6 +3478,7 @@ enum ice_adminq_opc {
ice_aqc_opc_nvm_update_empr = 0x0709,
ice_aqc_opc_nvm_pkg_data = 0x070A,
ice_aqc_opc_nvm_pass_component_tbl = 0x070B,
+ ice_aqc_opc_nvm_sanitization = 0x070C,
/* PF/VF mailbox commands */
ice_mbx_opc_send_msg_to_pf = 0x0801,
@@ -3360,6 +3509,8 @@ enum ice_adminq_opc {
ice_aqc_opc_set_rss_lut = 0x0B03,
ice_aqc_opc_get_rss_key = 0x0B04,
ice_aqc_opc_get_rss_lut = 0x0B05,
+ /* Sideband Control Interface commands */
+ ice_aqc_opc_neighbour_device_request = 0x0C00,
/* Tx queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
diff --git a/sys/dev/ice/ice_bitops.h b/sys/dev/ice/ice_bitops.h
index 499ee41228c3..a623f810c101 100644
--- a/sys/dev/ice/ice_bitops.h
+++ b/sys/dev/ice/ice_bitops.h
@@ -198,7 +198,7 @@ static inline void ice_zero_bitmap(ice_bitmap_t *bmp, u16 size)
* ice_and_bitmap - bitwise AND 2 bitmaps and store result in dst bitmap
* @dst: Destination bitmap that receive the result of the operation
* @bmp1: The first bitmap to intersect
- * @bmp2: The second bitmap to intersect wit the first
+ * @bmp2: The second bitmap to intersect with the first
* @size: Size of the bitmaps in bits
*
* This function performs a bitwise AND on two "source" bitmaps of the same size
@@ -237,7 +237,7 @@ ice_and_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
* ice_or_bitmap - bitwise OR 2 bitmaps and store result in dst bitmap
* @dst: Destination bitmap that receive the result of the operation
* @bmp1: The first bitmap to intersect
- * @bmp2: The second bitmap to intersect wit the first
+ * @bmp2: The second bitmap to intersect with the first
* @size: Size of the bitmaps in bits
*
* This function performs a bitwise OR on two "source" bitmaps of the same size
@@ -445,10 +445,10 @@ ice_bitmap_set(ice_bitmap_t *dst, u16 pos, u16 num_bits)
* Note that this function assumes it is operating on a bitmap declared using
* ice_declare_bitmap.
*/
-static inline int
+static inline u16
ice_bitmap_hweight(ice_bitmap_t *bm, u16 size)
{
- int count = 0;
+ u16 count = 0;
u16 bit = 0;
while (size > (bit = ice_find_next_bit(bm, size, bit))) {
diff --git a/sys/dev/ice/ice_common.c b/sys/dev/ice/ice_common.c
index ef487bcfd0f4..b895f661bc46 100644
--- a/sys/dev/ice/ice_common.c
+++ b/sys/dev/ice/ice_common.c
@@ -32,7 +32,6 @@
#include "ice_common.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
-
#include "ice_flow.h"
#include "ice_switch.h"
@@ -111,6 +110,17 @@ static const char * const ice_link_mode_str_high[] = {
ice_arr_elem_idx(2, "100G_CAUI2"),
ice_arr_elem_idx(3, "100G_AUI2_AOC_ACC"),
ice_arr_elem_idx(4, "100G_AUI2"),
+ ice_arr_elem_idx(5, "200G_CR4_PAM4"),
+ ice_arr_elem_idx(6, "200G_SR4"),
+ ice_arr_elem_idx(7, "200G_FR4"),
+ ice_arr_elem_idx(8, "200G_LR4"),
+ ice_arr_elem_idx(9, "200G_DR4"),
+ ice_arr_elem_idx(10, "200G_KR4_PAM4"),
+ ice_arr_elem_idx(11, "200G_AUI4_AOC_ACC"),
+ ice_arr_elem_idx(12, "200G_AUI4"),
+ ice_arr_elem_idx(13, "200G_AUI8_AOC_ACC"),
+ ice_arr_elem_idx(14, "200G_AUI8"),
+ ice_arr_elem_idx(15, "400GBASE_FR8"),
};
/**
@@ -151,7 +161,7 @@ ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
* This function sets the MAC type of the adapter based on the
* vendor ID and device ID stored in the HW structure.
*/
-enum ice_status ice_set_mac_type(struct ice_hw *hw)
+int ice_set_mac_type(struct ice_hw *hw)
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -188,13 +198,52 @@ enum ice_status ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E823C_SGMII:
hw->mac_type = ICE_MAC_GENERIC;
break;
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ hw->mac_type = ICE_MAC_GENERIC_3K_E825;
+ break;
+ case ICE_DEV_ID_E830_BACKPLANE:
+ case ICE_DEV_ID_E830_QSFP56:
+ case ICE_DEV_ID_E830_SFP:
+ case ICE_DEV_ID_E830C_BACKPLANE:
+ case ICE_DEV_ID_E830_L_BACKPLANE:
+ case ICE_DEV_ID_E830C_QSFP:
+ case ICE_DEV_ID_E830_L_QSFP:
+ case ICE_DEV_ID_E830C_SFP:
+ case ICE_DEV_ID_E830_L_SFP:
+ case ICE_DEV_ID_E835CC_BACKPLANE:
+ case ICE_DEV_ID_E835CC_QSFP56:
+ case ICE_DEV_ID_E835CC_SFP:
+ case ICE_DEV_ID_E835C_BACKPLANE:
+ case ICE_DEV_ID_E835C_QSFP:
+ case ICE_DEV_ID_E835C_SFP:
+ case ICE_DEV_ID_E835_L_BACKPLANE:
+ case ICE_DEV_ID_E835_L_QSFP:
+ case ICE_DEV_ID_E835_L_SFP:
+ hw->mac_type = ICE_MAC_E830;
+ break;
default:
hw->mac_type = ICE_MAC_UNKNOWN;
break;
}
ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
- return ICE_SUCCESS;
+ return 0;
+}
+
+/**
+ * ice_is_generic_mac
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if mac_type is ICE_MAC_GENERIC, false if not
+ */
+bool ice_is_generic_mac(struct ice_hw *hw)
+{
+ return (hw->mac_type == ICE_MAC_GENERIC ||
+ hw->mac_type == ICE_MAC_GENERIC_3K ||
+ hw->mac_type == ICE_MAC_GENERIC_3K_E825);
}
/**
@@ -223,7 +272,7 @@ bool ice_is_e810t(struct ice_hw *hw)
case ICE_SUBDEV_ID_E810T2:
case ICE_SUBDEV_ID_E810T3:
case ICE_SUBDEV_ID_E810T4:
- case ICE_SUBDEV_ID_E810T5:
+ case ICE_SUBDEV_ID_E810T6:
case ICE_SUBDEV_ID_E810T7:
return true;
}
@@ -231,8 +280,8 @@ bool ice_is_e810t(struct ice_hw *hw)
case ICE_DEV_ID_E810C_QSFP:
switch (hw->subsystem_device_id) {
case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T3:
case ICE_SUBDEV_ID_E810T5:
- case ICE_SUBDEV_ID_E810T6:
return true;
}
break;
@@ -244,6 +293,17 @@ bool ice_is_e810t(struct ice_hw *hw)
}
/**
+ * ice_is_e830
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E830 based, false if not.
+ */
+bool ice_is_e830(struct ice_hw *hw)
+{
+ return hw->mac_type == ICE_MAC_E830;
+}
+
+/**
* ice_is_e823
* @hw: pointer to the hardware structure
*
@@ -269,13 +329,32 @@ bool ice_is_e823(struct ice_hw *hw)
}
/**
+ * ice_is_e825c
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E825-C based, false if not.
+ */
+bool ice_is_e825c(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E825C_BACKPLANE:
+ case ICE_DEV_ID_E825C_QSFP:
+ case ICE_DEV_ID_E825C_SFP:
+ case ICE_DEV_ID_E825C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
* Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
* configuration, flow director filters, etc.).
*/
-enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
+int ice_clear_pf_cfg(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -299,14 +378,14 @@ enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
* ice_discover_dev_caps is expected to be called before this function is
* called.
*/
-enum ice_status
+int
ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_manage_mac_read_resp *resp;
struct ice_aqc_manage_mac_read *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u16 flags;
u8 i;
@@ -340,7 +419,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
ETH_ALEN, ICE_NONDMA_TO_NONDMA);
break;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -401,16 +480,21 @@ static void ice_set_media_type(struct ice_port_info *pi)
* type is FIBER
*/
else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
- ICE_MEDIA_OPT_PHY_TYPE_LOW_M, 0) ||
- (phy_type_low & ICE_MEDIA_OPT_PHY_TYPE_LOW_M &&
- phy_type_low & ICE_MEDIA_C2M_PHY_TYPE_LOW_M))
+ ICE_MEDIA_OPT_PHY_TYPE_LOW_M,
+ ICE_MEDIA_OPT_PHY_TYPE_HIGH_M) ||
+ ((phy_type_low & ICE_MEDIA_OPT_PHY_TYPE_LOW_M ||
+ phy_type_high & ICE_MEDIA_OPT_PHY_TYPE_HIGH_M) &&
+ (phy_type_low & ICE_MEDIA_C2M_PHY_TYPE_LOW_M ||
+ phy_type_high & ICE_MEDIA_C2C_PHY_TYPE_HIGH_M)))
*media_type = ICE_MEDIA_FIBER;
/* else if PHY types are only DA, or DA and C2C, then media type DA */
else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
- ICE_MEDIA_DAC_PHY_TYPE_LOW_M, 0) ||
- (phy_type_low & ICE_MEDIA_DAC_PHY_TYPE_LOW_M &&
- (phy_type_low & ICE_MEDIA_C2C_PHY_TYPE_LOW_M ||
- phy_type_high & ICE_MEDIA_C2C_PHY_TYPE_HIGH_M)))
+ ICE_MEDIA_DAC_PHY_TYPE_LOW_M,
+ ICE_MEDIA_DAC_PHY_TYPE_HIGH_M) ||
+ ((phy_type_low & ICE_MEDIA_DAC_PHY_TYPE_LOW_M ||
+ phy_type_high & ICE_MEDIA_DAC_PHY_TYPE_HIGH_M) &&
+ (phy_type_low & ICE_MEDIA_C2C_PHY_TYPE_LOW_M ||
+ phy_type_high & ICE_MEDIA_C2C_PHY_TYPE_HIGH_M)))
*media_type = ICE_MEDIA_DA;
/* else if PHY types are only C2M or only C2C, then media is AUI */
else if (ice_phy_maps_to_media(phy_type_low, phy_type_high,
@@ -435,7 +519,7 @@ static void ice_set_media_type(struct ice_port_info *pi)
*
* Returns the various PHY capabilities supported on the Port (0x0600)
*/
-enum ice_status
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *pcaps,
struct ice_sq_cd *cd)
@@ -443,9 +527,9 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
struct ice_aq_desc desc;
- enum ice_status status;
const char *prefix;
struct ice_hw *hw;
+ int status;
cmd = &desc.params.get_phy;
@@ -510,7 +594,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
pcaps->module_type[2]);
- if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
+ if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
@@ -525,81 +609,65 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
}
/**
- * ice_aq_get_netlist_node
- * @hw: pointer to the hw struct
- * @cmd: get_link_topo AQ structure
- * @node_part_number: output node part number if node found
- * @node_handle: output node handle parameter if node found
+ * ice_aq_get_phy_equalization - function to read serdes equalizer value from
+ * firmware using admin queue command.
+ * @hw: pointer to the HW struct
+ * @data_in: represents the serdes equalization parameter requested
+ * @op_code: represents the serdes number and flag to represent tx or rx
+ * @serdes_num: represents the serdes number
+ * @output: pointer to the caller-supplied buffer to return serdes equalizer
+ *
+ * Returns 0 on success,
+ * non-zero status on error
*/
-enum ice_status
-ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
- u8 *node_part_number, u16 *node_handle)
+int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
+ u8 serdes_num, int *output)
{
+ struct ice_aqc_dnl_call_command *cmd;
+ struct ice_aqc_dnl_call buf;
struct ice_aq_desc desc;
+ int err = 0;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
- desc.params.get_link_topo = *cmd;
+ if (!hw || !output)
+ return (ICE_ERR_PARAM);
- if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
- return ICE_ERR_NOT_SUPPORTED;
+ memset(&buf, 0, sizeof(buf));
+ buf.sto.txrx_equa_reqs.data_in = CPU_TO_LE16(data_in);
+ buf.sto.txrx_equa_reqs.op_code_serdes_sel =
+ CPU_TO_LE16(op_code | (serdes_num & 0xF));
- if (node_handle)
- *node_handle =
- LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
- if (node_part_number)
- *node_part_number = desc.params.get_link_topo.node_part_num;
+ cmd = &desc.params.dnl_call;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call);
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_RD |
+ ICE_AQ_FLAG_SI);
+ desc.datalen = CPU_TO_LE16(sizeof(struct ice_aqc_dnl_call));
+ cmd->activity_id = CPU_TO_LE16(ICE_AQC_ACT_ID_DNL);
+ cmd->ctx = 0;
- return ICE_SUCCESS;
+ err = ice_aq_send_cmd(hw, &desc, &buf,
+ sizeof(struct ice_aqc_dnl_call), NULL);
+ if (!err)
+ *output = buf.sto.txrx_equa_resp.val;
+
+ return err;
}
-#define MAX_NETLIST_SIZE 10
+#define ice_get_link_status_data_ver(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ ICE_GET_LINK_STATUS_DATA_V2 : ICE_GET_LINK_STATUS_DATA_V1)
+
/**
- * ice_find_netlist_node
- * @hw: pointer to the hw struct
- * @node_type_ctx: type of netlist node to look for
- * @node_part_number: node part number to look for
- * @node_handle: output parameter if node found - optional
+ * ice_get_link_status_datalen
+ * @hw: pointer to the HW struct
*
- * Find and return the node handle for a given node type and part number in the
- * netlist. When found ICE_SUCCESS is returned, ICE_ERR_DOES_NOT_EXIST
- * otherwise. If node_handle provided, it would be set to found node handle.
+ * return Get Link Status datalen
*/
-enum ice_status
-ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
- u16 *node_handle)
+static u16 ice_get_link_status_datalen(struct ice_hw *hw)
{
- struct ice_aqc_get_link_topo cmd;
- u8 rec_node_part_number;
- u16 rec_node_handle;
- u8 idx;
-
- for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
- enum ice_status status;
-
- memset(&cmd, 0, sizeof(cmd));
-
- cmd.addr.topo_params.node_type_ctx =
- (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S);
- cmd.addr.topo_params.index = idx;
-
- status = ice_aq_get_netlist_node(hw, &cmd,
- &rec_node_part_number,
- &rec_node_handle);
- if (status)
- return status;
-
- if (rec_node_part_number == node_part_number) {
- if (node_handle)
- *node_handle = rec_node_handle;
- return ICE_SUCCESS;
- }
- }
-
- return ICE_ERR_DOES_NOT_EXIST;
+ return (ice_get_link_status_data_ver(hw) ==
+ ICE_GET_LINK_STATUS_DATA_V1) ? ICE_GET_LINK_STATUS_DATALEN_V1 :
+ ICE_GET_LINK_STATUS_DATALEN_V2;
}
-#define ice_get_link_status_datalen(hw) ICE_GET_LINK_STATUS_DATALEN_V1
-
/**
* ice_aq_get_link_info
* @pi: port information structure
@@ -609,7 +677,7 @@ ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
*
* Get Link Status (0x607). Returns the link status of the adapter.
*/
-enum ice_status
+int
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd)
{
@@ -619,9 +687,9 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_fc_info *hw_fc_info;
bool tx_pause, rx_pause;
struct ice_aq_desc desc;
- enum ice_status status;
struct ice_hw *hw;
u16 cmd_flags;
+ int status;
if (!pi)
return ICE_ERR_PARAM;
@@ -639,7 +707,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
status = ice_aq_send_cmd(hw, &desc, &link_data,
ice_get_link_status_datalen(hw), cd);
- if (status != ICE_SUCCESS)
+ if (status)
return status;
/* save off old link status information */
@@ -696,7 +764,7 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
/* flag cleared so calling functions don't call AQ again */
pi->phy.get_link_info = false;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -721,17 +789,28 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
* Also, because we are operating on transmit timer and fc
* threshold of LFC, we don't turn on any bit in tx_tmr_priority
*/
-#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
+#define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
- /* Retrieve the transmit timer */
- val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
- tx_timer_val = val &
- PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
- cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
+ if ((hw)->mac_type == ICE_MAC_E830) {
+ /* Retrieve the transmit timer */
+ val = rd32(hw, E830_PRTMAC_CL01_PAUSE_QUANTA);
+ tx_timer_val = val & E830_PRTMAC_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_M;
+ cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
- /* Retrieve the fc threshold */
- val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
- fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
+ /* Retrieve the fc threshold */
+ val = rd32(hw, E830_PRTMAC_CL01_QUANTA_THRESH);
+ fc_thres_val = val & E830_PRTMAC_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_M;
+ } else {
+ /* Retrieve the transmit timer */
+ val = rd32(hw, E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(E800_IDX_OF_LFC));
+ tx_timer_val = val &
+ E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
+ cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
+
+ /* Retrieve the fc threshold */
+ val = rd32(hw, E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(E800_IDX_OF_LFC));
+ fc_thres_val = val & E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
+ }
cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
}
@@ -745,7 +824,7 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
*
* Set MAC configuration (0x0603)
*/
-enum ice_status
+int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
struct ice_sq_cd *cd)
{
@@ -772,10 +851,10 @@ ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
* @hw: pointer to the HW struct
*/
-static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
+int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
{
struct ice_switch_info *sw;
- enum ice_status status;
+ int status;
hw->switch_info = (struct ice_switch_info *)
ice_malloc(hw, sizeof(*hw->switch_info));
@@ -793,7 +872,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
ice_free(hw, hw->switch_info);
return status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -866,7 +945,7 @@ ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
* ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
* @hw: pointer to the HW struct
*/
-static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
+void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
{
ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
}
@@ -911,7 +990,7 @@ void ice_print_rollback_msg(struct ice_hw *hw)
orom = &hw->flash.orom;
nvm = &hw->flash.nvm;
- SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
+ (void)SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
nvm->major, nvm->minor, nvm->eetrack, orom->major,
orom->build, orom->patch);
ice_warn(hw,
@@ -934,12 +1013,12 @@ void ice_set_umac_shared(struct ice_hw *hw)
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_init_hw(struct ice_hw *hw)
+int ice_init_hw(struct ice_hw *hw)
{
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
u16 mac_buf_len;
void *mac_buf;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -957,6 +1036,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
return status;
ice_get_itr_intrl_gran(hw);
+ hw->fw_vsi_num = ICE_DFLT_VSI_INVAL;
+
status = ice_create_all_ctrlq(hw);
if (status)
goto err_unroll_cqinit;
@@ -987,9 +1068,11 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
ice_print_rollback_msg(hw);
- status = ice_clear_pf_cfg(hw);
- if (status)
- goto err_unroll_cqinit;
+ if (!hw->skip_clear_pf) {
+ status = ice_clear_pf_cfg(hw);
+ if (status)
+ goto err_unroll_cqinit;
+ }
ice_clear_pxe_mode(hw);
@@ -1005,6 +1088,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_cqinit;
}
+ hw->port_info->loopback_mode = ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_NORMAL;
+
/* set the back pointer to HW */
hw->port_info->hw = hw;
@@ -1088,7 +1173,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_fltr_mgmt_struct;
ice_init_lock(&hw->tnl_lock);
- return ICE_SUCCESS;
+ return 0;
err_unroll_fltr_mgmt_struct:
ice_cleanup_fltr_mgmt_struct(hw);
@@ -1135,7 +1220,7 @@ void ice_deinit_hw(struct ice_hw *hw)
* ice_check_reset - Check to see if a global reset is complete
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_check_reset(struct ice_hw *hw)
+int ice_check_reset(struct ice_hw *hw)
{
u32 cnt, reg = 0, grst_timeout, uld_mask, reset_wait_cnt;
@@ -1187,7 +1272,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
return ICE_ERR_RESET_FAILED;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1197,7 +1282,7 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
* If a global reset has been triggered, this function checks
* for its completion and then issues the PF reset
*/
-static enum ice_status ice_pf_reset(struct ice_hw *hw)
+static int ice_pf_reset(struct ice_hw *hw)
{
u32 cnt, reg, reset_wait_cnt, cfg_lock_timeout;
@@ -1212,7 +1297,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
if (ice_check_reset(hw))
return ICE_ERR_RESET_FAILED;
- return ICE_SUCCESS;
+ return 0;
}
/* Reset the PF */
@@ -1240,7 +1325,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
return ICE_ERR_RESET_FAILED;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1255,7 +1340,7 @@ static enum ice_status ice_pf_reset(struct ice_hw *hw)
* This has to be cleared using ice_clear_pxe_mode again, once the AQ
* interface has been restored in the rebuild flow.
*/
-enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
+int ice_reset(struct ice_hw *hw, enum ice_reset_req req)
{
u32 val = 0;
@@ -1290,7 +1375,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
*
* Copies rxq context from dense structure to HW register space
*/
-static enum ice_status
+static int
ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
{
u8 i;
@@ -1310,7 +1395,7 @@ ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
*((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1321,7 +1406,7 @@ ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
*
* Copies rxq context from HW register space to dense structure
*/
-static enum ice_status
+static int
ice_copy_rxq_ctx_from_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
{
u8 i;
@@ -1341,7 +1426,7 @@ ice_copy_rxq_ctx_from_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i, *ctx);
}
- return ICE_SUCCESS;
+ return 0;
}
/* LAN Rx Queue Context */
@@ -1380,7 +1465,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* it to HW register space and enables the hardware to prefetch descriptors
* instead of only fetching them on demand
*/
-enum ice_status
+int
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
@@ -1404,12 +1489,12 @@ ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
* Read rxq context from HW register space and then converts it from dense
* structure to sparse
*/
-enum ice_status
+int
ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index)
{
u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
- enum ice_status status;
+ int status;
if (!rlan_ctx)
return ICE_ERR_BAD_PTR;
@@ -1428,7 +1513,7 @@ ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
*
* Clears rxq context in HW register space
*/
-enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
+int ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
{
u8 i;
@@ -1439,7 +1524,7 @@ enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
- return ICE_SUCCESS;
+ return 0;
}
/* LAN Tx Queue Context used for set Tx config by ice_aqc_opc_add_txqs,
@@ -1486,7 +1571,7 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
*
* Copies Tx completion queue context from dense structure to HW register space
*/
-static enum ice_status
+static int
ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
u32 tx_cmpltnq_index)
{
@@ -1507,7 +1592,7 @@ ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
*((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
}
- return ICE_SUCCESS;
+ return 0;
}
/* LAN Tx Completion Queue Context */
@@ -1535,7 +1620,7 @@ static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
* Converts completion queue context from sparse to dense structure and then
* writes it to HW register space
*/
-enum ice_status
+int
ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
u32 tx_cmpltnq_index)
@@ -1553,7 +1638,7 @@ ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
*
* Clears Tx completion queue context in HW register space
*/
-enum ice_status
+int
ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
{
u8 i;
@@ -1565,7 +1650,7 @@ ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1576,7 +1661,7 @@ ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
*
* Copies doorbell queue context from dense structure to HW register space
*/
-static enum ice_status
+static int
ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
u32 tx_drbell_q_index)
{
@@ -1597,7 +1682,7 @@ ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
*((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
}
- return ICE_SUCCESS;
+ return 0;
}
/* LAN Tx Doorbell Queue Context info */
@@ -1626,7 +1711,7 @@ static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
* Converts doorbell queue context from sparse to dense structure and then
* writes it to HW register space
*/
-enum ice_status
+int
ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
u32 tx_drbell_q_index)
@@ -1645,7 +1730,7 @@ ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
*
* Clears doorbell queue context in HW register space
*/
-enum ice_status
+int
ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
{
u8 i;
@@ -1657,7 +1742,130 @@ ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
- return ICE_SUCCESS;
+ return 0;
+}
+
+/* Sideband Queue command wrappers */
+
+/**
+ * ice_get_sbq - returns the right control queue to use for sideband
+ * @hw: pointer to the hardware structure
+ */
+static struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
+{
+ if (!ice_is_generic_mac(hw))
+ return &hw->adminq;
+ return &hw->sbq;
+}
+
+/**
+ * ice_sbq_send_cmd - send Sideband Queue command to Sideband Queue
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ * @cd: pointer to command details structure
+ */
+static int
+ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
+ void *buf, u16 buf_size, struct ice_sq_cd *cd)
+{
+ return ice_sq_send_cmd(hw, ice_get_sbq(hw), (struct ice_aq_desc *)desc,
+ buf, buf_size, cd);
+}
+
+/**
+ * ice_sbq_send_cmd_nolock - send Sideband Queue command to Sideband Queue
+ * but do not lock sq_lock
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ * @cd: pointer to command details structure
+ */
+static int
+ice_sbq_send_cmd_nolock(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
+ void *buf, u16 buf_size, struct ice_sq_cd *cd)
+{
+ return ice_sq_send_cmd_nolock(hw, ice_get_sbq(hw),
+ (struct ice_aq_desc *)desc, buf,
+ buf_size, cd);
+}
+
+/**
+ * ice_sbq_rw_reg_lp - Fill Sideband Queue command, with lock parameter
+ * @hw: pointer to the HW struct
+ * @in: message info to be filled in descriptor
+ * @flag: flag to fill desc structure
+ * @lock: true to lock the sq_lock (the usual case); false if the sq_lock has
+ * already been locked at a higher level
+ */
+int ice_sbq_rw_reg_lp(struct ice_hw *hw, struct ice_sbq_msg_input *in,
+ u16 flag, bool lock)
+{
+ struct ice_sbq_cmd_desc desc = {0};
+ struct ice_sbq_msg_req msg = {0};
+ u16 msg_len;
+ int status;
+
+ msg_len = sizeof(msg);
+
+ msg.dest_dev = in->dest_dev;
+ msg.opcode = in->opcode;
+ msg.flags = ICE_SBQ_MSG_FLAGS;
+ msg.sbe_fbe = ICE_SBQ_MSG_SBE_FBE;
+ msg.msg_addr_low = CPU_TO_LE16(in->msg_addr_low);
+ msg.msg_addr_high = CPU_TO_LE32(in->msg_addr_high);
+
+ if (in->opcode)
+ msg.data = CPU_TO_LE32(in->data);
+ else
+ /* data read comes back in completion, so shorten the struct by
+ * sizeof(msg.data)
+ */
+ msg_len -= sizeof(msg.data);
+
+ desc.flags = CPU_TO_LE16(flag);
+ desc.opcode = CPU_TO_LE16(ice_sbq_opc_neigh_dev_req);
+ desc.param0.cmd_len = CPU_TO_LE16(msg_len);
+ if (lock)
+ status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
+ else
+ status = ice_sbq_send_cmd_nolock(hw, &desc, &msg, msg_len,
+ NULL);
+ if (!status && !in->opcode)
+ in->data = LE32_TO_CPU
+ (((struct ice_sbq_msg_cmpl *)&msg)->data);
+ return status;
+}
+
+/**
+ * ice_sbq_rw_reg - Fill Sideband Queue command
+ * @hw: pointer to the HW struct
+ * @in: message info to be filled in descriptor
+ * @flag: flag to fill desc structure
+ */
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag)
+{
+ return ice_sbq_rw_reg_lp(hw, in, flag, true);
+}
+
+/**
+ * ice_sbq_lock - Lock the sideband queue's sq_lock
+ * @hw: pointer to the HW struct
+ */
+void ice_sbq_lock(struct ice_hw *hw)
+{
+ ice_acquire_lock(&ice_get_sbq(hw)->sq_lock);
+}
+
+/**
+ * ice_sbq_unlock - Unlock the sideband queue's sq_lock
+ * @hw: pointer to the HW struct
+ */
+void ice_sbq_unlock(struct ice_hw *hw)
+{
+ ice_release_lock(&ice_get_sbq(hw)->sq_lock);
}
/* FW Admin Queue command wrappers */
@@ -1702,17 +1910,17 @@ static bool ice_should_retry_sq_send_cmd(u16 opcode)
* Retry sending the FW Admin Queue command, multiple times, to the FW Admin
* Queue if the EBUSY AQ error is returned.
*/
-static enum ice_status
+static int
ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc_cpy;
- enum ice_status status;
bool is_cmd_for_retry;
u8 *buf_cpy = NULL;
u8 idx = 0;
u16 opcode;
+ int status;
opcode = LE16_TO_CPU(desc->opcode);
is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
@@ -1732,7 +1940,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
do {
status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
- if (!is_cmd_for_retry || status == ICE_SUCCESS ||
+ if (!is_cmd_for_retry || !status ||
hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
break;
@@ -1763,7 +1971,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
*
* Helper function to send FW Admin Queue commands to the FW Admin Queue.
*/
-enum ice_status
+int
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
@@ -1777,11 +1985,11 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
*
* Get the firmware version (0x0001) from the admin queue commands
*/
-enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
+int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
{
struct ice_aqc_get_ver *resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
resp = &desc.params.get_ver;
@@ -1812,7 +2020,7 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
*
* Send the driver version (0x0002) to the firmware
*/
-enum ice_status
+int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd)
{
@@ -1849,7 +2057,7 @@ ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
* Tell the Firmware that we're shutting down the AdminQ and whether
* or not the driver is unloading as well (0x0003).
*/
-enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
+int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
{
struct ice_aqc_q_shutdown *cmd;
struct ice_aq_desc desc;
@@ -1876,8 +2084,8 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* Requests common resource using the admin queue commands (0x0008).
* When attempting to acquire the Global Config Lock, the driver can
* learn of three states:
- * 1) ICE_SUCCESS - acquired lock, and can perform download package
- * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
+ * 1) 0 - acquired lock, and can perform download package
+ * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
* 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
* successfully downloaded the package; the driver does
* not have to download the package and can continue
@@ -1890,14 +2098,14 @@ enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
* will likely get an error propagated back to it indicating the Download
* Package, Update Package or the Release Resource AQ commands timed out.
*/
-static enum ice_status
+static int
ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
struct ice_sq_cd *cd)
{
struct ice_aqc_req_res *cmd_resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1927,7 +2135,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
*timeout = LE32_TO_CPU(cmd_resp->timeout);
- return ICE_SUCCESS;
+ return 0;
} else if (LE16_TO_CPU(cmd_resp->status) ==
ICE_AQ_RES_GLBL_IN_PROG) {
*timeout = LE32_TO_CPU(cmd_resp->timeout);
@@ -1961,7 +2169,7 @@ ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
*
* release common resource using the admin queue commands (0x0009)
*/
-static enum ice_status
+static int
ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
struct ice_sq_cd *cd)
{
@@ -1989,14 +2197,14 @@ ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
*
* This function will attempt to acquire the ownership of a resource.
*/
-enum ice_status
+int
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout)
{
#define ICE_RES_POLLING_DELAY_MS 10
u32 delay = ICE_RES_POLLING_DELAY_MS;
u32 time_left = timeout;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -2050,8 +2258,8 @@ ice_acquire_res_exit:
*/
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
{
- enum ice_status status;
u32 total_delay = 0;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -2079,7 +2287,7 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
*
* Helper function to allocate/free resources using the admin queue commands
*/
-enum ice_status
+int
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
@@ -2114,12 +2322,12 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
* @btm: allocate from bottom
* @res: pointer to array that will receive the resources
*/
-enum ice_status
+int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = ice_struct_size(buf, elem, num);
buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
@@ -2153,11 +2361,11 @@ ice_alloc_res_exit:
* @num: number of resources
* @res: pointer to array that contains the resources to free
*/
-enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
+int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = ice_struct_size(buf, elem, num);
buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
@@ -2216,10 +2424,10 @@ ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
u8 i;
if (dbg)
- ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %u\n", prefix,
caps->led_pin_num);
else
- ice_info(hw, "%s: led_pin_num = %d\n", prefix,
+ ice_info(hw, "%s: led_pin_num = %u\n", prefix,
caps->led_pin_num);
for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
@@ -2227,10 +2435,10 @@ ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
continue;
if (dbg)
- ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "%s: led[%u] = %u\n",
prefix, i, caps->led[i]);
else
- ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
+ ice_info(hw, "%s: led[%u] = %u\n", prefix, i,
caps->led[i]);
}
}
@@ -2249,10 +2457,10 @@ ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
u8 i;
if (dbg)
- ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %u\n", prefix,
caps->sdp_pin_num);
else
- ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
+ ice_info(hw, "%s: sdp_pin_num = %u\n", prefix,
caps->sdp_pin_num);
for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
@@ -2260,10 +2468,10 @@ ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
continue;
if (dbg)
- ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%u] = %u\n",
prefix, i, caps->sdp[i]);
else
- ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
+ ice_info(hw, "%s: sdp[%u] = %u\n", prefix,
i, caps->sdp[i]);
}
}
@@ -2294,86 +2502,86 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
switch (cap) {
case ICE_AQC_CAPS_SWITCHING_MODE:
caps->switching_mode = number;
- ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %u\n", prefix,
caps->switching_mode);
break;
case ICE_AQC_CAPS_MANAGEABILITY_MODE:
caps->mgmt_mode = number;
caps->mgmt_protocols_mctp = logical_id;
- ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %u\n", prefix,
caps->mgmt_mode);
- ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %u\n", prefix,
caps->mgmt_protocols_mctp);
break;
case ICE_AQC_CAPS_OS2BMC:
caps->os2bmc = number;
- ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc);
+ ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %u\n", prefix, caps->os2bmc);
break;
case ICE_AQC_CAPS_VALID_FUNCTIONS:
caps->valid_functions = number;
- ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = 0x%x\n", prefix,
caps->valid_functions);
break;
case ICE_AQC_CAPS_SRIOV:
caps->sr_iov_1_1 = (number == 1);
- ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %u\n", prefix,
caps->sr_iov_1_1);
break;
case ICE_AQC_CAPS_VMDQ:
caps->vmdq = (number == 1);
- ice_debug(hw, ICE_DBG_INIT, "%s: vmdq = %d\n", prefix, caps->vmdq);
+ ice_debug(hw, ICE_DBG_INIT, "%s: vmdq = %u\n", prefix, caps->vmdq);
break;
case ICE_AQC_CAPS_802_1QBG:
caps->evb_802_1_qbg = (number == 1);
- ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number);
+ ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %u\n", prefix, number);
break;
case ICE_AQC_CAPS_802_1BR:
caps->evb_802_1_qbh = (number == 1);
- ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number);
+ ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %u\n", prefix, number);
break;
case ICE_AQC_CAPS_DCB:
caps->dcb = (number == 1);
caps->active_tc_bitmap = logical_id;
caps->maxtc = phys_id;
- ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
- ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %u\n", prefix, caps->dcb);
+ ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = 0x%x\n", prefix,
caps->active_tc_bitmap);
- ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
+ ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %u\n", prefix, caps->maxtc);
break;
case ICE_AQC_CAPS_ISCSI:
caps->iscsi = (number == 1);
- ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi);
+ ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %u\n", prefix, caps->iscsi);
break;
case ICE_AQC_CAPS_RSS:
caps->rss_table_size = number;
caps->rss_table_entry_width = logical_id;
- ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %u\n", prefix,
caps->rss_table_size);
- ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %u\n", prefix,
caps->rss_table_entry_width);
break;
case ICE_AQC_CAPS_RXQS:
caps->num_rxq = number;
caps->rxq_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %u\n", prefix,
caps->num_rxq);
- ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %u\n", prefix,
caps->rxq_first_id);
break;
case ICE_AQC_CAPS_TXQS:
caps->num_txq = number;
caps->txq_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %u\n", prefix,
caps->num_txq);
- ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %u\n", prefix,
caps->txq_first_id);
break;
case ICE_AQC_CAPS_MSIX:
caps->num_msix_vectors = number;
caps->msix_vector_first_id = phys_id;
- ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %u\n", prefix,
caps->num_msix_vectors);
- ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %u\n", prefix,
caps->msix_vector_first_id);
break;
case ICE_AQC_CAPS_NVM_MGMT:
@@ -2400,30 +2608,30 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
break;
case ICE_AQC_CAPS_CEM:
caps->mgmt_cem = (number == 1);
- ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %u\n", prefix,
caps->mgmt_cem);
break;
case ICE_AQC_CAPS_IWARP:
caps->iwarp = (number == 1);
- ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %d\n", prefix, caps->iwarp);
+ ice_debug(hw, ICE_DBG_INIT, "%s: iwarp = %u\n", prefix, caps->iwarp);
break;
case ICE_AQC_CAPS_ROCEV2_LAG:
caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
- ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
prefix, caps->roce_lag);
break;
case ICE_AQC_CAPS_LED:
if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
caps->led[phys_id] = true;
caps->led_pin_num++;
- ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id);
+ ice_debug(hw, ICE_DBG_INIT, "%s: led[%u] = 1\n", prefix, phys_id);
}
break;
case ICE_AQC_CAPS_SDP:
if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
caps->sdp[phys_id] = true;
caps->sdp_pin_num++;
- ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id);
+ ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%u] = 1\n", prefix, phys_id);
}
break;
case ICE_AQC_CAPS_WR_CSR_PROT:
@@ -2439,16 +2647,16 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->acpi_prog_mthd = !!(phys_id &
ICE_ACPI_PROG_MTHD_M);
caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
- ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %u\n", prefix,
caps->num_wol_proxy_fltr);
- ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix,
+ ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %u\n", prefix,
caps->wol_proxy_vsi_seid);
- ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "%s: apm_wol_support = %u\n",
prefix, caps->apm_wol_support);
break;
case ICE_AQC_CAPS_MAX_MTU:
caps->max_mtu = number;
- ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %u\n",
prefix, caps->max_mtu);
break;
case ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE:
@@ -2482,15 +2690,15 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
caps->ext_topo_dev_img_ver_schema[index] =
(phys_id & ICE_EXT_TOPO_DEV_IMG_VER_SCHEMA) != 0;
ice_debug(hw, ICE_DBG_INIT,
- "%s: ext_topo_dev_img_ver_high[%d] = %d\n",
+ "%s: ext_topo_dev_img_ver_high[%d] = %u\n",
prefix, index,
caps->ext_topo_dev_img_ver_high[index]);
ice_debug(hw, ICE_DBG_INIT,
- "%s: ext_topo_dev_img_ver_low[%d] = %d\n",
+ "%s: ext_topo_dev_img_ver_low[%d] = %u\n",
prefix, index,
caps->ext_topo_dev_img_ver_low[index]);
ice_debug(hw, ICE_DBG_INIT,
- "%s: ext_topo_dev_img_part_num[%d] = %d\n",
+ "%s: ext_topo_dev_img_part_num[%d] = %u\n",
prefix, index,
caps->ext_topo_dev_img_part_num[index]);
ice_debug(hw, ICE_DBG_INIT,
@@ -2520,6 +2728,11 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: orom_recovery_update = %d\n",
prefix, caps->orom_recovery_update);
break;
+ case ICE_AQC_CAPS_NEXT_CLUSTER_ID:
+ caps->next_cluster_id_support = (number == 1);
+ ice_debug(hw, ICE_DBG_INIT, "%s: next_cluster_id_support = %d\n",
+ prefix, caps->next_cluster_id_support);
+ break;
default:
/* Not one of the recognized common capabilities */
found = false;
@@ -2546,7 +2759,7 @@ ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
if (hw->dev_caps.num_funcs > 4) {
/* Max 4 TCs per port */
caps->maxtc = 4;
- ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
+ ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %u (based on #ports)\n",
caps->maxtc);
if (caps->iwarp) {
ice_debug(hw, ICE_DBG_INIT, "forcing RDMA off\n");
@@ -2578,9 +2791,9 @@ ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
func_p->num_allocd_vfs = number;
func_p->vf_base_id = logical_id;
- ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %u\n",
func_p->num_allocd_vfs);
- ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %u\n",
func_p->vf_base_id);
}
@@ -2597,9 +2810,9 @@ ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
struct ice_aqc_list_caps_elem *cap)
{
func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
- ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %u\n",
LE32_TO_CPU(cap->number));
- ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %u\n",
func_p->guar_num_vsi);
}
@@ -2672,7 +2885,7 @@ ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
u32 number = LE32_TO_CPU(cap->number);
dev_p->num_funcs = ice_hweight32(number);
- ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %u\n",
dev_p->num_funcs);
}
@@ -2692,7 +2905,7 @@ ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
u32 number = LE32_TO_CPU(cap->number);
dev_p->num_vfs_exposed = number;
- ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %u\n",
dev_p->num_vfs_exposed);
}
@@ -2711,7 +2924,7 @@ ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
u32 number = LE32_TO_CPU(cap->number);
dev_p->num_vsi_allocd_to_host = number;
- ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %u\n",
dev_p->num_vsi_allocd_to_host);
}
@@ -2730,15 +2943,15 @@ ice_parse_nac_topo_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
dev_p->nac_topo.mode = LE32_TO_CPU(cap->number);
dev_p->nac_topo.id = LE32_TO_CPU(cap->phys_id) & ICE_NAC_TOPO_ID_M;
- ice_info(hw, "PF is configured in %s mode with IP instance ID %d\n",
- (dev_p->nac_topo.mode == 0) ? "primary" : "secondary",
- dev_p->nac_topo.id);
+ ice_info(hw, "PF is configured in %s mode with IP instance ID %u\n",
+ (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ?
+ "primary" : "secondary", dev_p->nac_topo.id);
ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
!!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
!!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
- ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n",
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %u\n",
dev_p->nac_topo.id);
}
@@ -2813,7 +3026,7 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
default:
/* Don't list common capabilities as unknown */
if (!found)
- ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%u]: 0x%x\n",
i, cap);
break;
}
@@ -2826,6 +3039,81 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_aq_get_netlist_node
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ */
+int
+ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
+ return ICE_ERR_NOT_SUPPORTED;
+
+ if (node_handle)
+ *node_handle =
+ LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return 0;
+}
+
+#define MAX_NETLIST_SIZE 10
+/**
+ * ice_find_netlist_node
+ * @hw: pointer to the hw struct
+ * @node_type_ctx: type of netlist node to look for
+ * @node_part_number: node part number to look for
+ * @node_handle: output parameter if node found - optional
+ *
+ * Scan the netlist for a node handle of the given node type and part number.
+ *
+ * If node_handle is non-NULL it will be modified on function exit. It is only
+ * valid if the function returns zero, and should be ignored on any non-zero
+ * return value.
+ *
+ * Returns: 0 if the node is found, ICE_ERR_DOES_NOT_EXIST if no handle was
+ * found, and an error code on failure to access the AQ.
+ */
+int
+ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
+ u16 *node_handle)
+{
+ u8 idx;
+
+ for (idx = 0; idx < MAX_NETLIST_SIZE; idx++) {
+ struct ice_aqc_get_link_topo cmd;
+ u8 rec_node_part_number;
+ int status;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.addr.topo_params.node_type_ctx =
+ (node_type_ctx << ICE_AQC_LINK_TOPO_NODE_TYPE_S);
+ cmd.addr.topo_params.index = idx;
+
+ status = ice_aq_get_netlist_node(hw, &cmd,
+ &rec_node_part_number,
+ node_handle);
+ if (status)
+ return status;
+
+ if (rec_node_part_number == node_part_number)
+ return 0;
+ }
+
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
* ice_aq_list_caps - query function/device capabilities
* @hw: pointer to the HW struct
* @buf: a buffer to hold the capabilities
@@ -2844,13 +3132,13 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
* buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
* firmware could return) to avoid this.
*/
-static enum ice_status
+static int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aqc_list_caps *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.get_cap;
@@ -2875,12 +3163,12 @@ ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
* Read the device capabilities and extract them into the dev_caps structure
* for later use.
*/
-static enum ice_status
+static int
ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
{
- enum ice_status status;
u32 cap_count = 0;
void *cbuf;
+ int status;
cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
if (!cbuf)
@@ -2909,12 +3197,12 @@ ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
* Read the function capabilities and extract them into the func_caps structure
* for later use.
*/
-static enum ice_status
+static int
ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
{
- enum ice_status status;
u32 cap_count = 0;
void *cbuf;
+ int status;
cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
if (!cbuf)
@@ -3002,9 +3290,9 @@ void ice_set_safe_mode_caps(struct ice_hw *hw)
* ice_get_caps - get info about the HW
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_get_caps(struct ice_hw *hw)
+int ice_get_caps(struct ice_hw *hw)
{
- enum ice_status status;
+ int status;
status = ice_discover_dev_caps(hw, &hw->dev_caps);
if (status)
@@ -3022,7 +3310,7 @@ enum ice_status ice_get_caps(struct ice_hw *hw)
*
* This function is used to write MAC address to the NVM (0x0108).
*/
-enum ice_status
+int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd)
{
@@ -3044,7 +3332,7 @@ ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
*
* Tell the firmware that the driver is taking over from PXE (0x0110).
*/
-static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
+static int ice_aq_clear_pxe_mode(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -3078,7 +3366,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
*
* Set Physical port parameters (0x0203)
*/
-enum ice_status
+int
ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
bool save_bad_pac, bool pad_short_pac, bool double_vlan,
struct ice_sq_cd *cd)
@@ -3091,6 +3379,8 @@ ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
cmd = &desc.params.set_port_params;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
+ cmd->lb_mode = pi->loopback_mode |
+ ICE_AQC_SET_P_PARAMS_LOOPBACK_MODE_VALID;
cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
if (save_bad_pac)
cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
@@ -3238,6 +3528,18 @@ ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
case ICE_PHY_TYPE_HIGH_100G_AUI2:
speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
break;
+ case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
+ case ICE_PHY_TYPE_HIGH_200G_SR4:
+ case ICE_PHY_TYPE_HIGH_200G_FR4:
+ case ICE_PHY_TYPE_HIGH_200G_LR4:
+ case ICE_PHY_TYPE_HIGH_200G_DR4:
+ case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
+ case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_200G_AUI4:
+ case ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_200G_AUI8:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB;
+ break;
default:
speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
break;
@@ -3311,12 +3613,12 @@ ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
* mode as the PF may not have the privilege to set some of the PHY Config
* parameters. This status will be indicated by the command response (0x0601).
*/
-enum ice_status
+int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!cfg)
return ICE_ERR_PARAM;
@@ -3349,7 +3651,7 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
- status = ICE_SUCCESS;
+ status = 0;
if (!status)
pi->phy.curr_user_phy_cfg = *cfg;
@@ -3361,10 +3663,10 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
* ice_update_link_info - update status of the HW network link
* @pi: port info structure of the interested logical port
*/
-enum ice_status ice_update_link_info(struct ice_port_info *pi)
+int ice_update_link_info(struct ice_port_info *pi)
{
struct ice_link_status *li;
- enum ice_status status;
+ int status;
if (!pi)
return ICE_ERR_PARAM;
@@ -3388,7 +3690,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL);
- if (status == ICE_SUCCESS)
+ if (!status)
ice_memcpy(li->module_type, &pcaps->module_type,
sizeof(li->module_type),
ICE_NONDMA_TO_NONDMA);
@@ -3488,7 +3790,7 @@ enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
* @cfg: PHY configuration data to set FC mode
* @req_mode: FC mode to configure
*/
-static enum ice_status
+static int
ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fc_mode req_mode)
{
@@ -3501,7 +3803,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
case ICE_FC_AUTO:
{
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
+ int status;
pcaps = (struct ice_aqc_get_phy_caps_data *)
ice_malloc(pi->hw, sizeof(*pcaps));
@@ -3548,7 +3850,7 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
cache_data.data.curr_user_fc_req = req_mode;
ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3559,13 +3861,13 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
*
* Set the requested flow control mode.
*/
-enum ice_status
+int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
{
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || !aq_failures)
return ICE_ERR_BAD_PTR;
@@ -3620,7 +3922,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
for (retry_count = 0; retry_count < retry_max; retry_count++) {
status = ice_update_link_info(pi);
- if (status == ICE_SUCCESS)
+ if (!status)
break;
ice_msec_delay(100, true);
@@ -3706,13 +4008,13 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
* @cfg: PHY configuration data to set FEC mode
* @fec: FEC mode to configure
*/
-enum ice_status
+int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec)
{
struct ice_aqc_get_phy_caps_data *pcaps;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw;
+ int status = 0;
if (!pi || !cfg)
return ICE_ERR_BAD_PTR;
@@ -3802,10 +4104,10 @@ out:
* The variable link_up is invalid if status is non zero. As a
* result of this call, link status reporting becomes enabled
*/
-enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
+int ice_get_link_status(struct ice_port_info *pi, bool *link_up)
{
struct ice_phy_info *phy_info;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
if (!pi || !link_up)
return ICE_ERR_PARAM;
@@ -3833,11 +4135,11 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
*
* Sets up the link and restarts the Auto-Negotiation over the link.
*/
-enum ice_status
+int
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd)
{
- enum ice_status status = ICE_ERR_AQ_ERROR;
+ int status = ICE_ERR_AQ_ERROR;
struct ice_aqc_restart_an *cmd;
struct ice_aq_desc desc;
@@ -3861,7 +4163,7 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
else
pi->phy.curr_user_phy_cfg.caps &= ~ICE_AQC_PHY_EN_LINK;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3873,7 +4175,7 @@ ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
*
* Set event mask (0x0613)
*/
-enum ice_status
+int
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd)
{
@@ -3898,7 +4200,7 @@ ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
*
* Enable/disable loopback on a given port
*/
-enum ice_status
+int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
{
struct ice_aqc_set_mac_lb *cmd;
@@ -3921,7 +4223,7 @@ ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
*
* Set LED value for the given port (0x06e9)
*/
-enum ice_status
+int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd)
{
@@ -3956,14 +4258,14 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
*
* Read/Write SFF EEPROM (0x06EE)
*/
-enum ice_status
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd)
{
struct ice_aqc_sff_eeprom *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!data || (mem_addr & 0xff00))
return ICE_ERR_PARAM;
@@ -3996,7 +4298,7 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
* Program Topology Device NVM (0x06F2)
*
*/
-enum ice_status
+int
ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
struct ice_aqc_link_topo_params *topo_params,
struct ice_sq_cd *cd)
@@ -4025,7 +4327,7 @@ ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
* Read Topology Device NVM (0x06F3)
*
*/
-enum ice_status
+int
ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
struct ice_aqc_link_topo_params *topo_params,
u32 start_address, u8 *data, u8 data_size,
@@ -4033,7 +4335,7 @@ ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
{
struct ice_aqc_read_topo_dev_nvm *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!data || data_size == 0 ||
data_size > ICE_AQC_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
@@ -4054,7 +4356,7 @@ ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
ice_memcpy(data, cmd->data_read, data_size, ICE_NONDMA_TO_NONDMA);
- return ICE_SUCCESS;
+ return 0;
}
static u16 ice_lut_type_to_size(u16 lut_type)
@@ -4114,13 +4416,13 @@ int ice_lut_size_to_type(int lut_size)
*
* Internal function to get (0x0B05) or set (0x0B03) RSS look up table
*/
-static enum ice_status
+static int
__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
{
u16 flags, vsi_id, lut_type, lut_size, glob_lut_idx = 0, vsi_handle;
struct ice_aqc_get_set_rss_lut *cmd_resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u8 *lut;
if (!params)
@@ -4176,7 +4478,7 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params
*
* get the RSS lookup table, PF or VSI type
*/
-enum ice_status
+int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
{
return __ice_aq_get_set_rss_lut(hw, get_params, false);
@@ -4189,7 +4491,7 @@ ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_
*
* set the RSS lookup table, PF or VSI type
*/
-enum ice_status
+int
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
{
return __ice_aq_get_set_rss_lut(hw, set_params, true);
@@ -4204,8 +4506,7 @@ ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_
*
* get (0x0B04) or set (0x0B02) the RSS key per VSI
*/
-static enum
-ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
+static int __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
struct ice_aqc_get_set_rss_keys *key,
bool set)
{
@@ -4238,7 +4539,7 @@ ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
*
* get the RSS key per VSI
*/
-enum ice_status
+int
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *key)
{
@@ -4257,7 +4558,7 @@ ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
*
* set the RSS key per VSI
*/
-enum ice_status
+int
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys)
{
@@ -4289,7 +4590,7 @@ ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
* Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
* flow.
*/
-enum ice_status
+int
ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
struct ice_sq_cd *cd)
@@ -4339,7 +4640,7 @@ ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
*
* Disable LAN Tx queue (0x0C31)
*/
-static enum ice_status
+static int
ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
@@ -4348,7 +4649,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
struct ice_aqc_dis_txq_item *item;
struct ice_aqc_dis_txqs *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u16 i, sz = 0;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -4442,7 +4743,7 @@ do_aq:
*
* Move / Reconfigure Tx LAN queues (0x0C32)
*/
-enum ice_status
+int
ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
bool is_tc_change, bool subseq_call, bool flush_pipe,
u8 timeout, u32 *blocked_cgds,
@@ -4451,7 +4752,7 @@ ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
{
struct ice_aqc_move_txqs *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.move_txqs;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
@@ -4506,7 +4807,7 @@ ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
*
* Add Tx RDMA Qsets (0x0C33)
*/
-enum ice_status
+int
ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
struct ice_aqc_add_rdma_qset_data *qset_list,
u16 buf_size, struct ice_sq_cd *cd)
@@ -4578,13 +4879,13 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
- ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
dest_byte &= ~mask; /* get the bits not changing */
dest_byte |= src_byte; /* add in the new bits */
/* put it all back */
- ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
+ ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -4621,13 +4922,13 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
- ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_NONDMA_TO_NONDMA);
dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
/* put it all back */
- ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
+ ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -4672,13 +4973,13 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
- ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_NONDMA_TO_NONDMA);
dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
/* put it all back */
- ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
+ ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -4723,13 +5024,13 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the target bit string */
dest = dest_ctx + (ce_info->lsb / 8);
- ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_NONDMA_TO_NONDMA);
dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
/* put it all back */
- ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
+ ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -4739,7 +5040,7 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @dest_ctx: pointer to memory for the packed structure
* @ce_info: a description of the structure to be transformed
*/
-enum ice_status
+int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info)
{
@@ -4773,7 +5074,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
}
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -4792,7 +5093,7 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
*
* Get internal FW/HW data (0xFF08) for debug purposes.
*/
-enum ice_status
+int
ice_aq_get_internal_data(struct ice_hw *hw, u16 cluster_id, u16 table_id,
u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
u16 *ret_next_cluster, u16 *ret_next_table,
@@ -4800,7 +5101,7 @@ ice_aq_get_internal_data(struct ice_hw *hw, u16 cluster_id, u16 table_id,
{
struct ice_aqc_debug_dump_internals *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.debug_dump;
@@ -4852,7 +5153,7 @@ ice_read_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the src bit string */
src = src_ctx + (ce_info->lsb / 8);
- ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
dest_byte &= mask;
@@ -4862,7 +5163,7 @@ ice_read_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
target = dest_ctx + ce_info->offset;
/* put it back in the struct */
- ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
+ ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -4889,7 +5190,7 @@ ice_read_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the src bit string */
src = src_ctx + (ce_info->lsb / 8);
- ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&src_word, src, sizeof(src_word), ICE_NONDMA_TO_NONDMA);
/* the data in the memory is stored as little endian so mask it
* correctly
@@ -4905,7 +5206,7 @@ ice_read_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
target = dest_ctx + ce_info->offset;
/* put it back in the struct */
- ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
+ ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -4940,7 +5241,7 @@ ice_read_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the src bit string */
src = src_ctx + (ce_info->lsb / 8);
- ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_NONDMA_TO_NONDMA);
/* the data in the memory is stored as little endian so mask it
* correctly
@@ -4956,7 +5257,7 @@ ice_read_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
target = dest_ctx + ce_info->offset;
/* put it back in the struct */
- ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
+ ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -4991,7 +5292,7 @@ ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
/* get the current bits from the src bit string */
src = src_ctx + (ce_info->lsb / 8);
- ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_NONDMA_TO_NONDMA);
/* the data in the memory is stored as little endian so mask it
* correctly
@@ -5007,7 +5308,7 @@ ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
target = dest_ctx + ce_info->offset;
/* put it back in the struct */
- ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
+ ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_NONDMA);
}
/**
@@ -5016,7 +5317,7 @@ ice_read_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @dest_ctx: pointer to a generic non-packed context structure
* @ce_info: a description of the structure to be read from
*/
-enum ice_status
+int
ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
{
int f;
@@ -5041,7 +5342,7 @@ ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
}
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -5081,7 +5382,7 @@ ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
*
* This function adds one LAN queue
*/
-enum ice_status
+int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -5089,8 +5390,8 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_sched_node *parent;
struct ice_q_ctx *q_ctx;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
@@ -5149,7 +5450,7 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
/* add the LAN queue */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
LE16_TO_CPU(buf->txqs[0].txq_id),
hw->adminq.sq_last_status);
@@ -5186,15 +5487,15 @@ ena_txq_exit:
*
* This function removes queues and their corresponding nodes in SW DB
*/
-enum ice_status
+int
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handles, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
- enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_aqc_dis_txq_item *qg_list;
struct ice_q_ctx *q_ctx;
+ int status = ICE_ERR_DOES_NOT_EXIST;
struct ice_hw *hw;
u16 i, buf_size;
@@ -5244,7 +5545,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
vmvf_num, cd);
- if (status != ICE_SUCCESS)
+ if (status)
break;
ice_free_sched_node(pi, node);
q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
@@ -5264,11 +5565,11 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
*
* This function adds/updates the VSI queues per TC.
*/
-static enum ice_status
+static int
ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *maxqs, u8 owner)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
@@ -5303,7 +5604,7 @@ ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
*
* This function adds/updates the VSI LAN queues per TC.
*/
-enum ice_status
+int
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_lanqs)
{
@@ -5320,7 +5621,7 @@ ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
*
* This function adds/updates the VSI RDMA queues per TC.
*/
-enum ice_status
+int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_rdmaqs)
{
@@ -5339,16 +5640,16 @@ ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
*
* This function adds RDMA qset
*/
-enum ice_status
+int
ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 *rdma_qset, u16 num_qsets, u32 *qset_teid)
{
struct ice_aqc_txsched_elem_data node = { 0 };
struct ice_aqc_add_rdma_qset_data *buf;
struct ice_sched_node *parent;
- enum ice_status status;
struct ice_hw *hw;
u16 i, buf_size;
+ int status;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return ICE_ERR_CFG;
@@ -5389,7 +5690,7 @@ ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
}
status = ice_aq_add_rdma_qsets(hw, 1, buf, buf_size, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_RDMA, "add RDMA qset failed\n");
goto rdma_error_exit;
}
@@ -5415,13 +5716,13 @@ rdma_error_exit:
* @qset_teid: TEID of qset node
* @q_id: list of queue IDs being disabled
*/
-enum ice_status
+int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
struct ice_aqc_dis_txq_item *qg_list;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw;
+ int status = 0;
u16 qg_size;
int i;
@@ -5473,14 +5774,14 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
*
* Get sensor reading (0x0632)
*/
-enum ice_status
+int
ice_aq_get_sensor_reading(struct ice_hw *hw, u8 sensor, u8 format,
struct ice_aqc_get_sensor_reading_resp *data,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sensor_reading *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!data)
return ICE_ERR_PARAM;
@@ -5519,10 +5820,10 @@ static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
*
* Initializes required config data for VSI, FD, ACL, and RSS before replay.
*/
-enum ice_status
+int
ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
{
- enum ice_status status;
+ int status;
u8 i;
/* Delete old entries from replay filter list head if there is any */
@@ -5551,11 +5852,11 @@ ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
* Restore all VSI configuration after reset. It is required to call this
* function with main VSI first.
*/
-enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_switch_info *sw = hw->switch_info;
struct ice_port_info *pi = hw->port_info;
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
@@ -5728,13 +6029,13 @@ ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
* Write one or two dwords to alternate structure. Fields are indicated
* by 'reg_addr0' and 'reg_addr1' register numbers.
*/
-enum ice_status
+int
ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
u32 reg_addr1, u32 reg_val1)
{
struct ice_aqc_read_write_alt_direct *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.read_write_alt_direct;
@@ -5761,13 +6062,13 @@ ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
* by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
* is not passed then only register at 'reg_addr0' is read.
*/
-enum ice_status
+int
ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1)
{
struct ice_aqc_read_write_alt_direct *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.read_write_alt_direct;
@@ -5780,7 +6081,7 @@ ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
- if (status == ICE_SUCCESS) {
+ if (!status) {
*reg_val0 = LE32_TO_CPU(cmd->dword0_value);
if (reg_val1)
@@ -5798,12 +6099,12 @@ ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
*
* Indicates to the FW that alternate structures have been changed.
*/
-enum ice_status
+int
ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
{
struct ice_aqc_done_alt_write *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.done_alt_write;
@@ -5828,10 +6129,10 @@ ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
* Clear the alternate structures of the port from which the function
* is called.
*/
-enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
+int ice_aq_alternate_clear(struct ice_hw *hw)
{
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
@@ -5848,19 +6149,19 @@ enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
*
* This function queries HW element information
*/
-enum ice_status
+int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf)
{
u16 buf_size, num_elem_ret = 0;
- enum ice_status status;
+ int status;
buf_size = sizeof(*buf);
ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
buf->node_teid = CPU_TO_LE32(node_teid);
status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
NULL);
- if (status != ICE_SUCCESS || num_elem_ret != 1)
+ if (status || num_elem_ret != 1)
ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
return status;
}
@@ -5877,7 +6178,7 @@ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
u32 fw_mode;
/* check the current FW mode */
- fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
+ fw_mode = rd32(hw, GL_MNG_FWSM) & E800_GL_MNG_FWSM_FW_MODES_M;
if (fw_mode & ICE_FW_MODE_DBG_M)
return ICE_FW_MODE_DBG;
else if (fw_mode & ICE_FW_MODE_REC_M)
@@ -5895,13 +6196,13 @@ enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
*
* Get the current status of LLDP persistent
*/
-enum ice_status
+int
ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
{
struct ice_port_info *pi = hw->port_info;
- enum ice_status ret;
__le32 raw_data;
u32 data, mask;
+ int ret;
if (!lldp_status)
return ICE_ERR_BAD_PTR;
@@ -5935,14 +6236,14 @@ ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
*
* Get the default status of LLDP persistent
*/
-enum ice_status
+int
ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
{
struct ice_port_info *pi = hw->port_info;
u32 data, mask, loc_data, loc_data_tmp;
- enum ice_status ret;
__le16 loc_raw_data;
__le32 raw_data;
+ int ret;
if (!lldp_status)
return ICE_ERR_BAD_PTR;
@@ -6014,15 +6315,15 @@ exit:
*
* Read I2C (0x06E2)
*/
-enum ice_status
+int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc = { 0 };
struct ice_aqc_i2c *cmd;
- enum ice_status status;
u8 data_size;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_i2c);
cmd = &desc.params.read_write_i2c;
@@ -6064,7 +6365,7 @@ ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
*
* Write I2C (0x06E3)
*/
-enum ice_status
+int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd)
@@ -6105,7 +6406,7 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
*
* Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
*/
-enum ice_status
+int
ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
struct ice_sq_cd *cd)
{
@@ -6132,13 +6433,13 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
* Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
* the topology
*/
-enum ice_status
+int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd)
{
struct ice_aqc_gpio *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
cmd = &desc.params.read_write_gpio;
@@ -6150,7 +6451,7 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
return status;
*value = !!cmd->gpio_val;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -6223,13 +6524,13 @@ bool ice_fw_supports_link_override(struct ice_hw *hw)
*
* Gets the link default override for a port
*/
-enum ice_status
+int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi)
{
u16 i, tlv, tlv_len, tlv_start, buf, offset;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
@@ -6336,7 +6637,7 @@ bool ice_is_fw_health_report_supported(struct ice_hw *hw)
* Configure the health status event types that the firmware will send to this
* PF. The supported event types are: PF-specific, all PFs, and global
*/
-enum ice_status
+int
ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
struct ice_sq_cd *cd)
{
@@ -6369,7 +6670,7 @@ ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
*
* Calls Get Port Options AQC (0x06ea) and verifies result.
*/
-enum ice_status
+int
ice_aq_get_port_options(struct ice_hw *hw,
struct ice_aqc_get_port_options_elem *options,
u8 *option_count, u8 lport, bool lport_valid,
@@ -6378,7 +6679,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
{
struct ice_aqc_get_port_options *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u8 i;
/* options buffer shall be able to hold max returned options */
@@ -6393,7 +6694,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
status = ice_aq_send_cmd(hw, &desc, options,
*option_count * sizeof(*options), NULL);
- if (status != ICE_SUCCESS)
+ if (status)
return status;
/* verify direct FW response & set output parameters */
@@ -6428,7 +6729,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
options[i].pmd, options[i].max_lane_speed);
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -6441,7 +6742,7 @@ ice_aq_get_port_options(struct ice_hw *hw,
*
* Calls Set Port Options AQC (0x06eb).
*/
-enum ice_status
+int
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option)
{
@@ -6472,7 +6773,7 @@ ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
*
* Set the LLDP MIB. (0x0A08)
*/
-enum ice_status
+int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
@@ -6515,7 +6816,7 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
* @vsi_num: absolute HW index for VSI
* @add: boolean for if adding or removing a filter
*/
-enum ice_status
+int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
{
struct ice_aqc_lldp_filter_ctrl *cmd;
@@ -6539,7 +6840,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
* ice_lldp_execute_pending_mib - execute LLDP pending MIB request
* @hw: pointer to HW struct
*/
-enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw)
+int ice_lldp_execute_pending_mib(struct ice_hw *hw)
{
struct ice_aq_desc desc;
@@ -6580,6 +6881,7 @@ static const u32 ice_aq_to_link_speed[] = {
ICE_LINK_SPEED_40000MBPS,
ICE_LINK_SPEED_50000MBPS,
ICE_LINK_SPEED_100000MBPS, /* BIT(10) */
+ ICE_LINK_SPEED_200000MBPS,
};
/**
@@ -6604,6 +6906,8 @@ u32 ice_get_link_speed(u16 index)
*/
bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw)
{
+ if (ice_is_e830(hw))
+ return true;
return ice_is_fw_min_ver(hw, ICE_FW_VER_BRANCH_E810,
ICE_FW_FEC_DIS_AUTO_MAJ,
ICE_FW_FEC_DIS_AUTO_MIN,
diff --git a/sys/dev/ice/ice_common.h b/sys/dev/ice/ice_common.h
index 3abfba874b9c..7bd9de0c94e6 100644
--- a/sys/dev/ice/ice_common.h
+++ b/sys/dev/ice/ice_common.h
@@ -41,6 +41,9 @@
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
+#define LOOPBACK_MODE_NO 0
+#define LOOPBACK_MODE_HIGH 2
+
enum ice_fw_modes {
ICE_FW_MODE_NORMAL,
ICE_FW_MODE_DBG,
@@ -51,49 +54,55 @@ enum ice_fw_modes {
void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq);
bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq);
+int ice_init_fltr_mgmt_struct(struct ice_hw *hw);
+void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw);
void ice_set_umac_shared(struct ice_hw *hw);
-enum ice_status ice_init_hw(struct ice_hw *hw);
+int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
-enum ice_status ice_check_reset(struct ice_hw *hw);
-enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req);
-enum ice_status ice_create_all_ctrlq(struct ice_hw *hw);
-enum ice_status ice_init_all_ctrlq(struct ice_hw *hw);
+int ice_check_reset(struct ice_hw *hw);
+int ice_reset(struct ice_hw *hw, enum ice_reset_req req);
+int ice_create_all_ctrlq(struct ice_hw *hw);
+int ice_init_all_ctrlq(struct ice_hw *hw);
void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
-enum ice_status
+int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending);
-enum ice_status
+int
ice_get_link_status(struct ice_port_info *pi, bool *link_up);
-enum ice_status ice_update_link_info(struct ice_port_info *pi);
-enum ice_status
+int ice_update_link_info(struct ice_port_info *pi);
+int
ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
enum ice_aq_res_access_type access, u32 timeout);
void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res);
-enum ice_status
+int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
-enum ice_status
+int
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
-enum ice_status
+int
ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
-enum ice_status
+int
+ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
+ struct ice_aq_desc *desc, void *buf, u16 buf_size,
+ struct ice_sq_cd *cd);
+int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
void ice_clear_pxe_mode(struct ice_hw *hw);
-enum ice_status ice_get_caps(struct ice_hw *hw);
+int ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_get_internal_data(struct ice_hw *hw, u16 cluster_id, u16 table_id,
u32 start, void *buf, u16 buf_size, u16 *ret_buf_size,
u16 *ret_next_cluster, u16 *ret_next_table,
u32 *ret_next_index, struct ice_sq_cd *cd);
-enum ice_status ice_set_mac_type(struct ice_hw *hw);
+int ice_set_mac_type(struct ice_hw *hw);
/* Define a macro that will align a pointer to point to the next memory address
* that falls on the given power of 2 (i.e., 2, 4, 8, 16, 32, 64...). For
@@ -108,108 +117,110 @@ enum ice_status ice_set_mac_type(struct ice_hw *hw);
#define ice_arr_elem_idx(idx, val) [(idx)] = (val)
-enum ice_status
+int
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
-enum ice_status
+int
ice_read_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
-enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index);
-enum ice_status
+int ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index);
+int
ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index);
-enum ice_status
+int
ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
u32 tx_cmpltnq_index);
-enum ice_status
+int
ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index);
-enum ice_status
+int
ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
u32 tx_drbell_q_index);
int ice_lut_size_to_type(int lut_size);
-enum ice_status
+int
ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params);
-enum ice_status
+int
ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params);
-enum ice_status
+int
ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
-enum ice_status
+int
ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
struct ice_aqc_get_set_rss_keys *keys);
-enum ice_status
+int
ice_aq_add_lan_txq(struct ice_hw *hw, u8 count,
struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
bool is_tc_change, bool subseq_call, bool flush_pipe,
u8 timeout, u32 *blocked_cgds,
struct ice_aqc_move_txqs_data *buf, u16 buf_size,
u8 *txqs_moved, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps,
struct ice_aqc_add_rdma_qset_data *qset_list,
u16 buf_size, struct ice_sq_cd *cd);
bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq);
-enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
+int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading);
void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
-enum ice_status
+int
ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
const struct ice_ctx_ele *ce_info);
-enum ice_status
+int
ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
-enum ice_status
+int
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
-enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
+int ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
bool save_bad_pac, bool pad_short_pac, bool double_vlan,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
u8 *node_part_number, u16 *node_handle);
-enum ice_status
+int
ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx, u8 node_part_number,
u16 *node_handle);
void
ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
u16 link_speeds_bitmap);
-enum ice_status
+int
ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
struct ice_sq_cd *cd);
-enum ice_status ice_clear_pf_cfg(struct ice_hw *hw);
-enum ice_status
+int ice_clear_pf_cfg(struct ice_hw *hw);
+int
ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd);
bool ice_fw_supports_link_override(struct ice_hw *hw);
bool ice_fw_supports_fec_dis_auto(struct ice_hw *hw);
-enum ice_status
+int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
+ u8 serdes_num, int *output);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
-enum ice_status
+int
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update);
bool
@@ -219,84 +230,89 @@ void
ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
-enum ice_status
+int
ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fec_mode fec);
-enum ice_status
+int
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, bool auto_drop,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
u32 ice_get_link_speed(u16 index);
-enum ice_status
+int
ice_aq_prog_topo_dev_nvm(struct ice_hw *hw,
struct ice_aqc_link_topo_params *topo_params,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_read_topo_dev_nvm(struct ice_hw *hw,
struct ice_aqc_link_topo_params *topo_params,
u32 start_address, u8 *buf, u8 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_port_options(struct ice_hw *hw,
struct ice_aqc_get_port_options_elem *options,
u8 *option_count, u8 lport, bool lport_valid,
u8 *active_option_idx, bool *active_option_valid,
u8 *pending_option_idx, bool *pending_option_valid);
-enum ice_status
+int
ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
u8 new_option);
-enum ice_status
+int
__ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data);
-enum ice_status
+int
__ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data);
-enum ice_status
+int
ice_cfg_vsi_rdma(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_rdmaqs);
-enum ice_status
+int
ice_ena_vsi_rdma_qset(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 *rdma_qset, u16 num_qsets, u32 *qset_teid);
-enum ice_status
+int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id);
-enum ice_status
+int
ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
u16 *q_handle, u16 *q_ids, u32 *q_teids,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
u16 *max_lanqs);
-enum ice_status
+int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw);
-enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
+int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
-enum ice_status
+int ice_sbq_rw_reg_lp(struct ice_hw *hw, struct ice_sbq_msg_input *in,
+ u16 flag, bool lock);
+void ice_sbq_lock(struct ice_hw *hw);
+void ice_sbq_unlock(struct ice_hw *hw);
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag);
+int
ice_aq_get_sensor_reading(struct ice_hw *hw, u8 sensor, u8 format,
struct ice_aqc_get_sensor_reading_resp *data,
struct ice_sq_cd *cd);
@@ -311,50 +327,53 @@ ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
struct ice_eth_stats *cur_stats);
enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw);
void ice_print_rollback_msg(struct ice_hw *hw);
+bool ice_is_generic_mac(struct ice_hw *hw);
bool ice_is_e810(struct ice_hw *hw);
bool ice_is_e810t(struct ice_hw *hw);
+bool ice_is_e830(struct ice_hw *hw);
+bool ice_is_e825c(struct ice_hw *hw);
bool ice_is_e823(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
u32 reg_addr1, u32 reg_val1);
-enum ice_status
+int
ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1);
-enum ice_status
+int
ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode,
bool *reset_needed);
-enum ice_status ice_aq_alternate_clear(struct ice_hw *hw);
-enum ice_status
+int ice_aq_alternate_clear(struct ice_hw *hw);
+int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
-enum ice_status
+int
ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
-enum ice_status
+int
ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status);
-enum ice_status
+int
ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
bool ice_is_100m_speed_supported(struct ice_hw *hw);
-enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist);
-enum ice_status
+int ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist);
+int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
-enum ice_status
+int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
-enum ice_status ice_lldp_execute_pending_mib(struct ice_hw *hw);
-enum ice_status
+int ice_lldp_execute_pending_mib(struct ice_hw *hw);
+int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, const u8 *data,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_health_status_config(struct ice_hw *hw, u8 event_source,
struct ice_sq_cd *cd);
bool ice_is_fw_health_report_supported(struct ice_hw *hw);
diff --git a/sys/dev/ice/ice_common_txrx.h b/sys/dev/ice/ice_common_txrx.h
index 865c1d27da44..5bab344ecd83 100644
--- a/sys/dev/ice/ice_common_txrx.h
+++ b/sys/dev/ice/ice_common_txrx.h
@@ -169,7 +169,7 @@ ice_tso_setup(struct ice_tx_queue *txq, if_pkt_info_t pi)
txd->qw1 = htole64(type_cmd_tso_mss);
txd->tunneling_params = htole32(0);
- txq->tso++;
+ txq->stats.tso++;
return ((idx + 1) & (txq->desc_count-1));
}
diff --git a/sys/dev/ice/ice_controlq.c b/sys/dev/ice/ice_controlq.c
index 8aa2a7f765a2..e1a6b0fb5662 100644
--- a/sys/dev/ice/ice_controlq.c
+++ b/sys/dev/ice/ice_controlq.c
@@ -82,6 +82,21 @@ static void ice_mailbox_init_regs(struct ice_hw *hw)
}
/**
+ * ice_sb_init_regs - Initialize Sideband registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_sq and alloc_rq functions have already been called
+ */
+static void ice_sb_init_regs(struct ice_hw *hw)
+{
+ struct ice_ctl_q_info *cq = &hw->sbq;
+
+ ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
+
+ ICE_CQ_INIT_REGS(cq, PF_SB);
+}
+
+/**
* ice_check_sq_alive
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
@@ -104,7 +119,7 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
@@ -113,7 +128,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
if (!cq->sq.desc_buf.va)
return ICE_ERR_NO_MEMORY;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -121,7 +136,7 @@ ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
@@ -129,7 +144,7 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
cq->rq.desc_buf.va = ice_alloc_dma_mem(hw, &cq->rq.desc_buf, size);
if (!cq->rq.desc_buf.va)
return ICE_ERR_NO_MEMORY;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -150,7 +165,7 @@ static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
int i;
@@ -195,7 +210,7 @@ ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
desc->params.generic.param0 = 0;
desc->params.generic.param1 = 0;
}
- return ICE_SUCCESS;
+ return 0;
unwind_alloc_rq_bufs:
/* don't try to free the one that failed... */
@@ -214,7 +229,7 @@ unwind_alloc_rq_bufs:
* @hw: pointer to the hardware structure
* @cq: pointer to the specific Control queue
*/
-static enum ice_status
+static int
ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
int i;
@@ -235,7 +250,7 @@ ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
if (!bi->va)
goto unwind_alloc_sq_bufs;
}
- return ICE_SUCCESS;
+ return 0;
unwind_alloc_sq_bufs:
/* don't try to free the one that failed... */
@@ -249,7 +264,7 @@ unwind_alloc_sq_bufs:
return ICE_ERR_NO_MEMORY;
}
-static enum ice_status
+static int
ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
{
/* Clear Head and Tail */
@@ -265,7 +280,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
if (rd32(hw, ring->bal) != ICE_LO_DWORD(ring->desc_buf.pa))
return ICE_ERR_AQ_ERROR;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -275,7 +290,7 @@ ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
*
* Configure base address and length registers for the transmit queue
*/
-static enum ice_status
+static int
ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
@@ -288,10 +303,10 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
*
* Configure base address and length registers for the receive (event queue)
*/
-static enum ice_status
+static int
ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status status;
+ int status;
status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
if (status)
@@ -300,7 +315,7 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
- return ICE_SUCCESS;
+ return 0;
}
#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
@@ -332,9 +347,9 @@ do { \
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
*/
-static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code;
+ int ret_code;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -394,9 +409,9 @@ init_ctrlq_exit:
* Do *NOT* hold the lock when calling this as the memory allocation routines
* called are not going to be atomic context safe
*/
-static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
+static int ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code;
+ int ret_code;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -449,10 +464,10 @@ init_ctrlq_exit:
*
* The main shutdown routine for the Control Transmit Queue
*/
-static enum ice_status
+static int
ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code = ICE_SUCCESS;
+ int ret_code = 0;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -491,24 +506,27 @@ shutdown_sq_out:
*/
static bool ice_aq_ver_check(struct ice_hw *hw)
{
- if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
+ u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw);
+ u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw);
+
+ if (hw->api_maj_ver > exp_fw_api_ver_major) {
/* Major API version is newer than expected, don't load */
ice_warn(hw, "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
return false;
- } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
- if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
+ } else if (hw->api_maj_ver == exp_fw_api_ver_major) {
+ if (hw->api_min_ver > (exp_fw_api_ver_minor + 2))
ice_info(hw, "The driver for the device detected a newer version (%u.%u) of the NVM image than expected (%u.%u). Please install the most recent version of the network driver.\n",
hw->api_maj_ver, hw->api_min_ver,
- EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
- else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
+ else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor)
ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
hw->api_maj_ver, hw->api_min_ver,
- EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
} else {
/* Major API version is older than expected, log a warning */
ice_info(hw, "The driver for the device detected an older version (%u.%u) of the NVM image than expected (%u.%u). Please update the NVM image.\n",
hw->api_maj_ver, hw->api_min_ver,
- EXP_FW_API_VER_MAJOR, EXP_FW_API_VER_MINOR);
+ exp_fw_api_ver_major, exp_fw_api_ver_minor);
}
return true;
}
@@ -520,10 +538,10 @@ static bool ice_aq_ver_check(struct ice_hw *hw)
*
* The main shutdown routine for the Control Receive Queue
*/
-static enum ice_status
+static int
ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
- enum ice_status ret_code = ICE_SUCCESS;
+ int ret_code = 0;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -570,10 +588,10 @@ void ice_idle_aq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* ice_init_check_adminq - Check version for Admin Queue to know if its alive
* @hw: pointer to the hardware structure
*/
-static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
+static int ice_init_check_adminq(struct ice_hw *hw)
{
struct ice_ctl_q_info *cq = &hw->adminq;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -586,7 +604,7 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
goto init_ctrlq_free_rq;
}
- return ICE_SUCCESS;
+ return 0;
init_ctrlq_free_rq:
ice_shutdown_rq(hw, cq);
@@ -608,10 +626,10 @@ init_ctrlq_free_rq:
*
* NOTE: this function does not initialize the controlq locks
*/
-static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
{
struct ice_ctl_q_info *cq;
- enum ice_status ret_code;
+ int ret_code;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -620,6 +638,10 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
ice_adminq_init_regs(hw);
cq = &hw->adminq;
break;
+ case ICE_CTL_Q_SB:
+ ice_sb_init_regs(hw);
+ cq = &hw->sbq;
+ break;
case ICE_CTL_Q_MAILBOX:
ice_mailbox_init_regs(hw);
cq = &hw->mailboxq;
@@ -649,7 +671,7 @@ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
goto init_ctrlq_free_sq;
/* success! */
- return ICE_SUCCESS;
+ return 0;
init_ctrlq_free_sq:
ice_shutdown_sq(hw, cq);
@@ -657,6 +679,18 @@ init_ctrlq_free_sq:
}
/**
+ * ice_is_sbq_supported - is the sideband queue supported
+ * @hw: pointer to the hardware structure
+ *
+ * Returns true if the sideband control queue interface is
+ * supported for the device, false otherwise
+ */
+static bool ice_is_sbq_supported(struct ice_hw *hw)
+{
+ return ice_is_generic_mac(hw);
+}
+
+/**
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
@@ -678,6 +712,9 @@ ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
if (ice_check_sq_alive(hw, cq))
ice_aq_q_shutdown(hw, unloading);
break;
+ case ICE_CTL_Q_SB:
+ cq = &hw->sbq;
+ break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
break;
@@ -703,6 +740,9 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
/* Shutdown FW admin queue */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
+ /* Shutdown PHY Sideband */
+ if (ice_is_sbq_supported(hw))
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading);
/* Shutdown PF-VF Mailbox */
ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
}
@@ -720,10 +760,10 @@ void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
*
* NOTE: this function does not initialize the controlq locks.
*/
-enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
+int ice_init_all_ctrlq(struct ice_hw *hw)
{
- enum ice_status status;
u32 retry = 0;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -744,6 +784,15 @@ enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
if (status)
return status;
+ /* sideband control queue (SBQ) interface is not supported on some
+ * devices. Initialize if supported, else fallback to the admin queue
+ * interface
+ */
+ if (ice_is_sbq_supported(hw)) {
+ status = ice_init_ctrlq(hw, ICE_CTL_Q_SB);
+ if (status)
+ return status;
+ }
/* Init Mailbox queue */
return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
}
@@ -776,9 +825,11 @@ static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
* driver needs to re-initialize control queues at run time it should call
* ice_init_all_ctrlq instead.
*/
-enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
+int ice_create_all_ctrlq(struct ice_hw *hw)
{
ice_init_ctrlq_locks(&hw->adminq);
+ if (ice_is_sbq_supported(hw))
+ ice_init_ctrlq_locks(&hw->sbq);
ice_init_ctrlq_locks(&hw->mailboxq);
return ice_init_all_ctrlq(hw);
@@ -811,6 +862,8 @@ void ice_destroy_all_ctrlq(struct ice_hw *hw)
ice_shutdown_all_ctrlq(hw, true);
ice_destroy_ctrlq_locks(&hw->adminq);
+ if (ice_is_sbq_supported(hw))
+ ice_destroy_ctrlq_locks(&hw->sbq);
ice_destroy_ctrlq_locks(&hw->mailboxq);
}
@@ -826,16 +879,35 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
struct ice_ctl_q_ring *sq = &cq->sq;
u16 ntc = sq->next_to_clean;
struct ice_aq_desc *desc;
+ u32 head;
desc = ICE_CTL_Q_DESC(*sq, ntc);
- while (rd32(hw, cq->sq.head) != ntc) {
- ice_debug(hw, ICE_DBG_AQ_MSG, "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
+ head = rd32(hw, sq->head);
+ if (head >= sq->count) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Read head value (%d) exceeds allowed range.\n",
+ head);
+ return 0;
+ }
+
+ while (head != ntc) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "ntc %d head %d.\n",
+ ntc, head);
ice_memset(desc, 0, sizeof(*desc), ICE_DMA_MEM);
ntc++;
if (ntc == sq->count)
ntc = 0;
desc = ICE_CTL_Q_DESC(*sq, ntc);
+
+ head = rd32(hw, sq->head);
+ if (head >= sq->count) {
+ ice_debug(hw, ICE_DBG_AQ_MSG,
+ "Read head value (%d) exceeds allowed range.\n",
+ head);
+ return 0;
+ }
}
sq->next_to_clean = ntc;
@@ -858,6 +930,8 @@ static const char *ice_ctl_q_str(enum ice_ctl_q qtype)
return "AQ";
case ICE_CTL_Q_MAILBOX:
return "MBXQ";
+ case ICE_CTL_Q_SB:
+ return "SBQ";
default:
return "Unrecognized CQ";
}
@@ -944,7 +1018,7 @@ bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* command into a descriptor, bumps the send queue tail, waits for the command
* to complete, captures status and data for the command, etc.
*/
-static enum ice_status
+int
ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -952,8 +1026,8 @@ ice_sq_send_cmd_nolock(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_dma_mem *dma_buf = NULL;
struct ice_aq_desc *desc_on_ring;
bool cmd_completed = false;
- enum ice_status status = ICE_SUCCESS;
u32 total_delay = 0;
+ int status = 0;
u16 retval = 0;
u32 val = 0;
@@ -1121,12 +1195,12 @@ sq_send_command_error:
* on the queue, bumps the tail, waits for processing of the command, captures
* command status and results, etc.
*/
-enum ice_status
+int
ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
/* if reset is in progress return a soft error */
if (hw->reset_ongoing)
@@ -1165,15 +1239,15 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
* contains contents of the message, and 'pending' contains the number of
* events left to process.
*/
-enum ice_status
+int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_rq_event_info *e, u16 *pending)
{
u16 ntc = cq->rq.next_to_clean;
enum ice_aq_err rq_last_status;
- enum ice_status ret_code = ICE_SUCCESS;
struct ice_aq_desc *desc;
struct ice_dma_mem *bi;
+ int ret_code = 0;
u16 desc_idx;
u16 datalen;
u16 flags;
diff --git a/sys/dev/ice/ice_controlq.h b/sys/dev/ice/ice_controlq.h
index d48d53a37161..0604ebed250e 100644
--- a/sys/dev/ice/ice_controlq.h
+++ b/sys/dev/ice/ice_controlq.h
@@ -37,6 +37,7 @@
/* Maximum buffer lengths for all control queue types */
#define ICE_AQ_MAX_BUF_LEN 4096
#define ICE_MBXQ_MAX_BUF_LEN 4096
+#define ICE_SBQ_MAX_BUF_LEN 512
#define ICE_CTL_Q_DESC(R, i) \
(&(((struct ice_aq_desc *)((R).desc_buf.va))[i]))
@@ -48,15 +49,32 @@
/* Defines that help manage the driver vs FW API checks.
* Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
*/
-#define EXP_FW_API_VER_BRANCH 0x00
-#define EXP_FW_API_VER_MAJOR 0x01
-#define EXP_FW_API_VER_MINOR 0x05
+#define EXP_FW_API_VER_BRANCH_E830 0x00
+#define EXP_FW_API_VER_MAJOR_E830 0x01
+#define EXP_FW_API_VER_MINOR_E830 0x07
+
+#define EXP_FW_API_VER_BRANCH_E810 0x00
+#define EXP_FW_API_VER_MAJOR_E810 0x01
+#define EXP_FW_API_VER_MINOR_E810 0x05
+
+#define EXP_FW_API_VER_BRANCH_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ EXP_FW_API_VER_BRANCH_E830 : \
+ EXP_FW_API_VER_BRANCH_E810)
+
+#define EXP_FW_API_VER_MAJOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ EXP_FW_API_VER_MAJOR_E830 : \
+ EXP_FW_API_VER_MAJOR_E810)
+
+#define EXP_FW_API_VER_MINOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ EXP_FW_API_VER_MINOR_E830 : \
+ EXP_FW_API_VER_MINOR_E810)
/* Different control queue types: These are mainly for SW consumption. */
enum ice_ctl_q {
ICE_CTL_Q_UNKNOWN = 0,
ICE_CTL_Q_ADMIN,
ICE_CTL_Q_MAILBOX,
+ ICE_CTL_Q_SB,
};
/* Control Queue timeout settings - max delay 1s */
diff --git a/sys/dev/ice/ice_dcb.c b/sys/dev/ice/ice_dcb.c
index a06117f90aad..98da42783fe0 100644
--- a/sys/dev/ice/ice_dcb.c
+++ b/sys/dev/ice/ice_dcb.c
@@ -46,14 +46,14 @@
*
* Requests the complete LLDP MIB (entire packet). (0x0A00)
*/
-enum ice_status
+int
ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
u16 buf_size, u16 *local_len, u16 *remote_len,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_get_mib *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.lldp_get_mib;
@@ -88,7 +88,7 @@ ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
* Enable or Disable posting of an event on ARQ when LLDP MIB
* associated with the interface changes (0x0A01)
*/
-enum ice_status
+int
ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd)
{
@@ -130,14 +130,14 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
* Delete the specified TLV from LLDP Local MIB for the given bridge type.
* The firmware places the entire LLDP MIB in the response buffer. (0x0A04)
*/
-enum ice_status
+int
ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv,
void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_add_delete_tlv *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (tlv_len == 0)
return ICE_ERR_PARAM;
@@ -179,14 +179,14 @@ ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv,
* Firmware will place the complete LLDP MIB in response buffer with the
* updated TLV. (0x0A03)
*/
-enum ice_status
+int
ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
u16 buf_size, u16 old_len, u16 new_len, u16 offset,
u16 *mib_len, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_update_tlv *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.lldp_update_tlv;
@@ -221,7 +221,7 @@ ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
*
* Stop or Shutdown the embedded LLDP Agent (0x0A05)
*/
-enum ice_status
+int
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd)
{
@@ -249,7 +249,7 @@ ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
*
* Start the embedded LLDP Agent on all ports. (0x0A06)
*/
-enum ice_status
+int
ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_start *cmd;
@@ -730,11 +730,11 @@ ice_parse_org_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
*
* Parse DCB configuration from the LLDPDU
*/
-enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
+int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
{
struct ice_lldp_org_tlv *tlv;
- enum ice_status ret = ICE_SUCCESS;
u16 offset = 0;
+ int ret = 0;
u16 typelen;
u16 type;
u16 len;
@@ -780,12 +780,12 @@ enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
*
* Query DCB configuration from the firmware
*/
-enum ice_status
+int
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg)
{
- enum ice_status ret;
u8 *lldpmib;
+ int ret;
/* Allocate the LLDPDU */
lldpmib = (u8 *)ice_malloc(hw, ICE_LLDPDU_SIZE);
@@ -795,7 +795,7 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
ret = ice_aq_get_lldp_mib(hw, bridgetype, mib_type, (void *)lldpmib,
ICE_LLDPDU_SIZE, NULL, NULL, NULL);
- if (ret == ICE_SUCCESS)
+ if (!ret)
/* Parse LLDP MIB to get DCB configuration */
ret = ice_lldp_to_dcb_cfg(lldpmib, dcbcfg);
@@ -815,13 +815,13 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
* This sends out request/release to ignore PFC condition for a TC.
* It will return the TCs for which PFC is currently ignored. (0x0301)
*/
-enum ice_status
+int
ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
struct ice_sq_cd *cd)
{
struct ice_aqc_pfc_ignore *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.pfc_ignore;
@@ -851,17 +851,17 @@ ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
* @cd: pointer to command details structure or NULL
*
* Start/Stop the embedded dcbx Agent. In case that this wrapper function
- * returns ICE_SUCCESS, caller will need to check if FW returns back the same
+ * returns 0, caller will need to check if FW returns back the same
* value as stated in dcbx_agent_status, and react accordingly. (0x0A09)
*/
-enum ice_status
+int
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_stop_start_specific_agent *cmd;
enum ice_adminq_opc opcode;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.lldp_agent_ctrl;
@@ -876,7 +876,7 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
*dcbx_agent_status = false;
- if (status == ICE_SUCCESS &&
+ if (!status &&
cmd->command == ICE_AQC_START_STOP_AGENT_START_DCBX)
*dcbx_agent_status = true;
@@ -891,7 +891,7 @@ ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
*
* Get CEE DCBX mode operational configuration from firmware (0x0A07)
*/
-enum ice_status
+int
ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
struct ice_aqc_get_cee_dcb_cfg_resp *buff,
struct ice_sq_cd *cd)
@@ -912,12 +912,12 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
* This will return an indication if DSCP-based PFC or VLAN-based PFC
* is enabled. (0x0302)
*/
-enum ice_status
+int
ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd)
{
struct ice_aqc_set_query_pfc_mode *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.set_query_pfc_mode;
@@ -940,12 +940,12 @@ ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd)
* This AQ call configures the PFC mdoe to DSCP-based PFC mode or VLAN
* -based PFC (0x0303)
*/
-enum ice_status
+int
ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
{
struct ice_aqc_set_query_pfc_mode *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
return ICE_ERR_PARAM;
@@ -968,7 +968,7 @@ ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
if (cmd->pfc_mode != pfc_mode)
return ICE_ERR_NOT_SUPPORTED;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -980,7 +980,7 @@ ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
* This AQ command will tell FW if it will apply or not apply the default DCB
* configuration when link up (0x0306).
*/
-enum ice_status
+int
ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable,
struct ice_sq_cd *cd)
{
@@ -1126,11 +1126,11 @@ ice_cee_to_dcb_cfg(struct ice_aqc_get_cee_dcb_cfg_resp *cee_cfg,
*
* Get IEEE or CEE mode DCB configuration from the Firmware
*/
-STATIC enum ice_status
+STATIC int
ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
{
struct ice_dcbx_cfg *dcbx_cfg = NULL;
- enum ice_status ret;
+ int ret;
if (!pi)
return ICE_ERR_PARAM;
@@ -1154,7 +1154,7 @@ ice_get_ieee_or_cee_dcb_cfg(struct ice_port_info *pi, u8 dcbx_mode)
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID, dcbx_cfg);
/* Don't treat ENOENT as an error for Remote MIBs */
if (pi->hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
- ret = ICE_SUCCESS;
+ ret = 0;
out:
return ret;
@@ -1166,17 +1166,17 @@ out:
*
* Get DCB configuration from the Firmware
*/
-enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi)
+int ice_get_dcb_cfg(struct ice_port_info *pi)
{
struct ice_aqc_get_cee_dcb_cfg_resp cee_cfg;
struct ice_dcbx_cfg *dcbx_cfg;
- enum ice_status ret;
+ int ret;
if (!pi)
return ICE_ERR_PARAM;
ret = ice_aq_get_cee_dcb_cfg(pi->hw, &cee_cfg, NULL);
- if (ret == ICE_SUCCESS) {
+ if (!ret) {
/* CEE mode */
ret = ice_get_ieee_or_cee_dcb_cfg(pi, ICE_DCBX_MODE_CEE);
ice_cee_to_dcb_cfg(&cee_cfg, pi);
@@ -1234,10 +1234,10 @@ void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
*
* Update DCB configuration from the Firmware
*/
-enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
+int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
{
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
- enum ice_status ret = ICE_SUCCESS;
+ int ret = 0;
if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED;
@@ -1276,10 +1276,10 @@ enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change)
*
* Configure (disable/enable) MIB
*/
-enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
+int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib)
{
struct ice_qos_cfg *qos_cfg = &hw->port_info->qos_cfg;
- enum ice_status ret;
+ int ret;
if (!hw->func_caps.common_cap.dcb)
return ICE_ERR_NOT_SUPPORTED;
@@ -1728,13 +1728,13 @@ void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg)
*
* Set DCB configuration to the Firmware
*/
-enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
+int ice_set_dcb_cfg(struct ice_port_info *pi)
{
u8 mib_type, *lldpmib = NULL;
struct ice_dcbx_cfg *dcbcfg;
- enum ice_status ret;
struct ice_hw *hw;
u16 miblen;
+ int ret;
if (!pi)
return ICE_ERR_PARAM;
@@ -1770,14 +1770,14 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
*
* query current port ETS configuration
*/
-enum ice_status
+int
ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_query_port_ets *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (!pi || !pi->root)
return ICE_ERR_PARAM;
@@ -1796,14 +1796,14 @@ ice_aq_query_port_ets(struct ice_port_info *pi,
*
* update the SW DB with the new TC changes
*/
-enum ice_status
+int
ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf)
{
struct ice_sched_node *node, *tc_node;
struct ice_aqc_txsched_elem_data elem;
- enum ice_status status = ICE_SUCCESS;
u32 teid1, teid2;
+ int status = 0;
u8 i, j;
if (!pi)
@@ -1864,12 +1864,12 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
* query current port ETS configuration and update the
* SW DB with the TC changes
*/
-enum ice_status
+int
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
- enum ice_status status;
+ int status;
ice_acquire_lock(&pi->sched_lock);
status = ice_aq_query_port_ets(pi, buf, buf_size, cd);
diff --git a/sys/dev/ice/ice_dcb.h b/sys/dev/ice/ice_dcb.h
index ec2200afe200..373b0313cb6b 100644
--- a/sys/dev/ice/ice_dcb.h
+++ b/sys/dev/ice/ice_dcb.h
@@ -215,64 +215,64 @@ struct ice_dcbx_variables {
u32 deftsaassignment;
};
-enum ice_status
+int
ice_aq_get_lldp_mib(struct ice_hw *hw, u8 bridge_type, u8 mib_type, void *buf,
u16 buf_size, u16 *local_len, u16 *remote_len,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_add_delete_lldp_tlv(struct ice_hw *hw, u8 bridge_type, bool add_lldp_tlv,
void *buf, u16 buf_size, u16 tlv_len, u16 *mib_len,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_update_lldp_tlv(struct ice_hw *hw, u8 bridge_type, void *buf,
u16 buf_size, u16 old_len, u16 new_len, u16 offset,
u16 *mib_len, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_dcb_ignore_pfc(struct ice_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
struct ice_aqc_get_cee_dcb_cfg_resp *buff,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_query_pfc_mode(struct ice_hw *hw, u8 *pfcmode_ret, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_dcb_parameters(struct ice_hw *hw, bool dcb_enable,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd);
-enum ice_status ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
+int ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg);
u8 ice_get_dcbx_status(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
-enum ice_status ice_get_dcb_cfg(struct ice_port_info *pi);
-enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
+int ice_get_dcb_cfg(struct ice_port_info *pi);
+int ice_set_dcb_cfg(struct ice_port_info *pi);
void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
struct ice_rq_event_info *event);
-enum ice_status ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
+int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
void ice_dcb_cfg_to_lldp(u8 *lldpmib, u16 *miblen, struct ice_dcbx_cfg *dcbcfg);
-enum ice_status
+int
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cmd_details);
-enum ice_status
+int
ice_aq_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf);
-enum ice_status
+int
ice_aq_stop_lldp(struct ice_hw *hw, bool shutdown_lldp_agent, bool persist,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_start_stop_dcbx(struct ice_hw *hw, bool start_dcbx_agent,
bool *dcbx_agent_status, struct ice_sq_cd *cd);
-enum ice_status ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
-enum ice_status
+int ice_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_mib);
+int
ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
struct ice_sq_cd *cd);
#endif /* _ICE_DCB_H_ */
diff --git a/sys/dev/ice/ice_ddp_common.c b/sys/dev/ice/ice_ddp_common.c
index a1573f5ea998..dfc50cc1f966 100644
--- a/sys/dev/ice/ice_ddp_common.c
+++ b/sys/dev/ice/ice_ddp_common.c
@@ -46,14 +46,14 @@
*
* Download Package (0x0C40)
*/
-static enum ice_status
+static int
ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, bool last_buf, u32 *error_offset,
u32 *error_info, struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (error_offset)
*error_offset = 0;
@@ -91,7 +91,7 @@ ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
*
* Upload Section (0x0C41)
*/
-enum ice_status
+int
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd)
{
@@ -115,14 +115,14 @@ ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
*
* Update Package (0x0C42)
*/
-static enum ice_status
+static int
ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf, u16 buf_size,
bool last_buf, u32 *error_offset, u32 *error_info,
struct ice_sq_cd *cd)
{
struct ice_aqc_download_pkg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
if (error_offset)
*error_offset = 0;
@@ -255,10 +255,10 @@ ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx,
* @bufs: pointer to an array of buffers
* @count: the number of buffers in the array
*/
-enum ice_status
+int
ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u32 i;
for (i = 0; i < count; i++) {
@@ -287,10 +287,10 @@ ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
*
* Obtains change lock and updates package.
*/
-enum ice_status
+int
ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- enum ice_status status;
+ int status;
status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
if (status)
@@ -394,8 +394,8 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
return ICE_DDP_PKG_SUCCESS;
for (i = 0; i < count; i++) {
- enum ice_status status;
bool last = false;
+ int status;
bh = (struct ice_buf_hdr *)(bufs + start + i);
@@ -430,7 +430,7 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
*
* Get Package Info List (0x0C43)
*/
-static enum ice_status
+static int
ice_aq_get_pkg_info_list(struct ice_hw *hw,
struct ice_aqc_get_pkg_info_resp *pkg_info,
u16 buf_size, struct ice_sq_cd *cd)
@@ -443,21 +443,6 @@ ice_aq_get_pkg_info_list(struct ice_hw *hw,
}
/**
- * ice_has_signing_seg - determine if package has a signing segment
- * @hw: pointer to the hardware structure
- * @pkg_hdr: pointer to the driver's package hdr
- */
-static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
-{
- struct ice_generic_seg_hdr *seg_hdr;
-
- seg_hdr = (struct ice_generic_seg_hdr *)
- ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr);
-
- return seg_hdr ? true : false;
-}
-
-/**
* ice_get_pkg_segment_id - get correct package segment id, based on device
* @mac_type: MAC type of the device
*/
@@ -466,6 +451,9 @@ static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
u32 seg_id;
switch (mac_type) {
+ case ICE_MAC_E830:
+ seg_id = SEGMENT_TYPE_ICE_E830;
+ break;
case ICE_MAC_GENERIC:
case ICE_MAC_GENERIC_3K:
case ICE_MAC_GENERIC_3K_E825:
@@ -486,6 +474,9 @@ static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
u32 sign_type;
switch (mac_type) {
+ case ICE_MAC_E830:
+ sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB;
+ break;
case ICE_MAC_GENERIC_3K:
sign_type = SEGMENT_SIGN_TYPE_RSA3K;
break;
@@ -535,12 +526,13 @@ ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
* @idx: segment index
* @start: starting buffer
* @count: buffer count
+ * @last_seg: last segment being downloaded
*
* Note: idx must reference a ICE segment
*/
static enum ice_ddp_state
ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
- u32 idx, u32 start, u32 count)
+ u32 idx, u32 start, u32 count, bool last_seg)
{
struct ice_buf_table *bufs;
enum ice_ddp_state state;
@@ -558,7 +550,7 @@ ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
return ICE_DDP_PKG_ERR;
state = ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count,
- true);
+ last_seg);
return state;
}
@@ -577,9 +569,11 @@ ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
{
enum ice_ddp_state state;
struct ice_sign_seg *seg;
+ bool last_seg = true;
u32 conf_idx;
u32 start;
u32 count;
+ u32 flags;
seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
if (!seg) {
@@ -590,13 +584,25 @@ ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
conf_idx = LE32_TO_CPU(seg->signed_seg_idx);
start = LE32_TO_CPU(seg->signed_buf_start);
count = LE32_TO_CPU(seg->signed_buf_count);
+ flags = LE32_TO_CPU(seg->flags);
+
+ if (flags & ICE_SIGN_SEG_FLAGS_VALID)
+ last_seg = !!(flags & ICE_SIGN_SEG_FLAGS_LAST);
state = ice_download_pkg_sig_seg(hw, seg);
if (state)
goto exit;
+ if (count == 0) {
+ /* this is a "Reference Signature Segment" and download should
+ * be only for the buffers in the signature segment (and not
+ * the hardware configuration segment)
+ */
+ goto exit;
+ }
+
state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
- count);
+ count, last_seg);
exit:
return state;
@@ -633,7 +639,7 @@ static enum ice_ddp_state
ice_post_dwnld_pkg_actions(struct ice_hw *hw)
{
enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
- enum ice_status status;
+ int status;
status = ice_set_vlan_mode(hw);
if (status) {
@@ -655,7 +661,7 @@ ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
enum ice_aq_err aq_err = hw->adminq.sq_last_status;
enum ice_ddp_state state = ICE_DDP_PKG_ERR;
- enum ice_status status;
+ int status;
u32 i;
ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id);
@@ -701,8 +707,8 @@ static enum ice_ddp_state
ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
- enum ice_status status;
struct ice_buf_hdr *bh;
+ int status;
if (!bufs || !count)
return ICE_DDP_PKG_ERR;
@@ -779,7 +785,7 @@ ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
{
enum ice_ddp_state state;
- if (hw->pkg_has_signing_seg)
+ if (ice_match_signing_seg(pkg_hdr, hw->pkg_seg_id, hw->pkg_sign_type))
state = ice_download_pkg_with_sig_seg(hw, pkg_hdr);
else
state = ice_download_pkg_without_sig_seg(hw, ice_seg);
@@ -804,7 +810,6 @@ ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
if (!pkg_hdr)
return ICE_DDP_PKG_ERR;
- hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr);
ice_get_signing_req(hw);
ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
@@ -988,7 +993,7 @@ ice_enum_labels(struct ice_seg *ice_seg, u32 type, struct ice_pkg_enum *state,
* The ice_seg parameter must not be NULL since the first call to
* ice_enum_labels requires a pointer to an actual ice_seg structure.
*/
-enum ice_status
+int
ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
u16 *value)
{
@@ -1005,7 +1010,7 @@ ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
label_name = ice_enum_labels(ice_seg, type, &state, &val);
if (label_name && !strcmp(label_name, name)) {
*value = val;
- return ICE_SUCCESS;
+ return 0;
}
ice_seg = NULL;
@@ -1100,7 +1105,6 @@ static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
(pkg_ver->major == ICE_PKG_SUPP_VER_MAJ &&
pkg_ver->minor < ICE_PKG_SUPP_VER_MNR))
return ICE_DDP_PKG_FILE_VERSION_TOO_LOW;
-
return ICE_DDP_PKG_SUCCESS;
}
@@ -1245,7 +1249,7 @@ static int ice_get_prof_index_max(struct ice_hw *hw)
hw->switch_info->max_used_prof_index = max_prof_index;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1269,11 +1273,8 @@ ice_get_ddp_pkg_state(struct ice_hw *hw, bool already_loaded)
} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
return ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED;
- } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
- hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
- return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
} else {
- return ICE_DDP_PKG_ERR;
+ return ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED;
}
}
@@ -1340,12 +1341,6 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
if (state)
return state;
- /* For packages with signing segments, must be a matching segment */
- if (hw->pkg_has_signing_seg)
- if (!ice_match_signing_seg(pkg, hw->pkg_seg_id,
- hw->pkg_sign_type))
- return ICE_DDP_PKG_ERR;
-
/* before downloading the package, check package version for
* compatibility with driver
*/
@@ -1579,7 +1574,7 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
* NOTE: The caller of the function is responsible for freeing the memory
* allocated for every list entry.
*/
-enum ice_status
+int
ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list)
{
@@ -1638,7 +1633,7 @@ ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
ice_warn(hw, "Required profiles not found in currently loaded DDP package");
return ICE_ERR_CFG;
}
- return ICE_SUCCESS;
+ return 0;
err:
LIST_FOR_EACH_ENTRY_SAFE(fvl, tmp, fv_list, ice_sw_fv_list_entry,
@@ -1717,7 +1712,7 @@ void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld)
* result in some wasted space in the buffer.
* Note: all package contents must be in Little Endian form.
*/
-enum ice_status
+int
ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
{
struct ice_buf_hdr *buf;
@@ -1742,7 +1737,7 @@ ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count)
FLEX_ARRAY_SIZE(buf, section_entry, count);
buf->data_end = CPU_TO_LE16(data_end);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1849,7 +1844,7 @@ ice_pkg_buf_alloc_single_section_err:
* result in some wasted space in the buffer.
* Note: all package contents must be in Little Endian form.
*/
-enum ice_status
+int
ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
{
struct ice_buf_hdr *buf;
@@ -1874,7 +1869,7 @@ ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count)
FLEX_ARRAY_SIZE(buf, section_entry, count);
buf->data_end = CPU_TO_LE16(data_end);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2193,7 +2188,7 @@ ice_boost_tcam_handler(u32 sect_type, void *section, u32 index, u32 *offset)
* if it is found. The ice_seg parameter must not be NULL since the first call
* to ice_pkg_enum_entry requires a pointer to an actual ice_segment structure.
*/
-static enum ice_status
+static int
ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
struct ice_boost_tcam_entry **entry)
{
@@ -2212,7 +2207,7 @@ ice_find_boost_entry(struct ice_seg *ice_seg, u16 addr,
ice_boost_tcam_handler);
if (tcam && LE16_TO_CPU(tcam->addr) == addr) {
*entry = tcam;
- return ICE_SUCCESS;
+ return 0;
}
ice_seg = NULL;
@@ -2275,18 +2270,18 @@ void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg)
* or writing of the package. When attempting to obtain write access, the
* caller must check for the following two return values:
*
- * ICE_SUCCESS - Means the caller has acquired the global config lock
+ * 0 - Means the caller has acquired the global config lock
* and can perform writing of the package.
* ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
* package or has found that no update was necessary; in
* this case, the caller can just skip performing any
* update of the package.
*/
-enum ice_status
+int
ice_acquire_global_cfg_lock(struct ice_hw *hw,
enum ice_aq_res_access_type access)
{
- enum ice_status status;
+ int status;
status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
ICE_GLOBAL_CFG_LOCK_TIMEOUT);
@@ -2315,7 +2310,7 @@ void ice_release_global_cfg_lock(struct ice_hw *hw)
*
* This function will request ownership of the change lock.
*/
-enum ice_status
+int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
@@ -2334,6 +2329,22 @@ void ice_release_change_lock(struct ice_hw *hw)
}
/**
+ * ice_is_get_tx_sched_new_format
+ * @hw: pointer to the HW struct
+ *
+ * Determines if the new format for the Tx scheduler get api is supported
+ */
+static bool
+ice_is_get_tx_sched_new_format(struct ice_hw *hw)
+{
+ if (ice_is_e830(hw))
+ return true;
+ if (ice_is_e825c(hw))
+ return true;
+ return false;
+}
+
+/**
* ice_get_set_tx_topo - get or set tx topology
* @hw: pointer to the HW struct
* @buf: pointer to tx topology buffer
@@ -2344,13 +2355,13 @@ void ice_release_change_lock(struct ice_hw *hw)
*
* The function will get or set tx topology
*/
-static enum ice_status
+static int
ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
struct ice_sq_cd *cd, u8 *flags, bool set)
{
struct ice_aqc_get_set_tx_topo *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.get_set_tx_topo;
if (set) {
@@ -2360,11 +2371,16 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
if (buf)
cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
+
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
} else {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
+
+ if (!ice_is_get_tx_sched_new_format(hw))
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
}
- desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
if (status)
return status;
@@ -2372,7 +2388,7 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
if (!set && flags)
*flags = desc.params.get_set_tx_topo.set_flags;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2384,7 +2400,7 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
* The function will apply the new Tx topology from the package buffer
* if available.
*/
-enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
+int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
{
u8 *current_topo, *new_topo = NULL;
struct ice_run_time_cfg_seg *seg;
@@ -2392,8 +2408,8 @@ enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
struct ice_pkg_hdr *pkg_hdr;
enum ice_ddp_state state;
u16 i, size = 0, offset;
- enum ice_status status;
u32 reg = 0;
+ int status;
u8 flags;
if (!buf || !len)
@@ -2514,7 +2530,7 @@ update_topo:
/* Reset is in progress, re-init the hw again */
ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. layer topology might be applied already\n");
ice_check_reset(hw);
- return ICE_SUCCESS;
+ return 0;
}
/* set new topology */
@@ -2531,5 +2547,5 @@ update_topo:
/* CORER will clear the global lock, so no explicit call
* required for release
*/
- return ICE_SUCCESS;
+ return 0;
}
diff --git a/sys/dev/ice/ice_ddp_common.h b/sys/dev/ice/ice_ddp_common.h
index 9305dc83520d..a7b717c3e15e 100644
--- a/sys/dev/ice/ice_ddp_common.h
+++ b/sys/dev/ice/ice_ddp_common.h
@@ -134,6 +134,7 @@ struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE_E810 0x00000010
#define SEGMENT_TYPE_SIGNING 0x00001001
+#define SEGMENT_TYPE_ICE_E830 0x00000017
#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020
__le32 seg_type;
struct ice_pkg_ver seg_format_ver;
@@ -203,7 +204,10 @@ struct ice_sign_seg {
__le32 signed_seg_idx;
__le32 signed_buf_start;
__le32 signed_buf_count;
-#define ICE_SIGN_SEG_RESERVED_COUNT 44
+#define ICE_SIGN_SEG_FLAGS_VALID 0x80000000
+#define ICE_SIGN_SEG_FLAGS_LAST 0x00000001
+ __le32 flags;
+#define ICE_SIGN_SEG_RESERVED_COUNT 40
u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT];
struct ice_buf_table buf_tbl;
};
@@ -413,26 +417,26 @@ struct ice_pkg_enum {
struct ice_hw;
-enum ice_status
+int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
void *
ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
-enum ice_status
+int
ice_pkg_buf_reserve_section(struct ice_buf_build *bld, u16 count);
-enum ice_status
+int
ice_get_sw_fv_list(struct ice_hw *hw, struct ice_prot_lkup_ext *lkups,
ice_bitmap_t *bm, struct LIST_HEAD_TYPE *fv_list);
-enum ice_status
+int
ice_pkg_buf_unreserve_section(struct ice_buf_build *bld, u16 count);
u16 ice_pkg_buf_get_free_space(struct ice_buf_build *bld);
u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
-enum ice_status
+int
ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
-enum ice_status
+int
ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
void ice_release_global_cfg_lock(struct ice_hw *hw);
struct ice_generic_seg_hdr *
@@ -444,7 +448,7 @@ enum ice_ddp_state
ice_get_pkg_info(struct ice_hw *hw);
void ice_init_pkg_hints(struct ice_hw *hw, struct ice_seg *ice_seg);
struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg);
-enum ice_status
+int
ice_acquire_global_cfg_lock(struct ice_hw *hw,
enum ice_aq_res_access_type access);
@@ -473,6 +477,6 @@ ice_pkg_buf_alloc_single_section(struct ice_hw *hw, u32 type, u16 size,
struct ice_buf *ice_pkg_buf(struct ice_buf_build *bld);
void ice_pkg_buf_free(struct ice_hw *hw, struct ice_buf_build *bld);
-enum ice_status ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
+int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
-#endif /* _ICE_DDP_COMMON_H_ */
+#endif /* _ICE_DDP_H_ */
diff --git a/sys/dev/ice/ice_devids.h b/sys/dev/ice/ice_devids.h
index b5cbbfda6a3b..74712c61ae8e 100644
--- a/sys/dev/ice/ice_devids.h
+++ b/sys/dev/ice/ice_devids.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright (c) 2023, Intel Corporation
+/* Copyright (c) 2024, Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -44,6 +44,42 @@
#define ICE_DEV_ID_E823L_1GBE 0x124F
/* Intel(R) Ethernet Connection E823-L for QSFP */
#define ICE_DEV_ID_E823L_QSFP 0x151D
+/* Intel(R) Ethernet Controller E830-CC for backplane */
+#define ICE_DEV_ID_E830_BACKPLANE 0x12D1
+/* Intel(R) Ethernet Controller E830-CC for QSFP */
+#define ICE_DEV_ID_E830_QSFP56 0x12D2
+/* Intel(R) Ethernet Controller E830-CC for SFP */
+#define ICE_DEV_ID_E830_SFP 0x12D3
+/* Intel(R) Ethernet Controller E830-C for backplane */
+#define ICE_DEV_ID_E830C_BACKPLANE 0x12D5
+/* Intel(R) Ethernet Controller E830-L for backplane */
+#define ICE_DEV_ID_E830_L_BACKPLANE 0x12DC
+/* Intel(R) Ethernet Controller E830-C for QSFP */
+#define ICE_DEV_ID_E830C_QSFP 0x12D8
+/* Intel(R) Ethernet Controller E830-L for QSFP */
+#define ICE_DEV_ID_E830_L_QSFP 0x12DD
+/* Intel(R) Ethernet Controller E830-C for SFP */
+#define ICE_DEV_ID_E830C_SFP 0x12DA
+/* Intel(R) Ethernet Controller E830-L for SFP */
+#define ICE_DEV_ID_E830_L_SFP 0x12DE
+/* Intel(R) Ethernet Controller E835-CC for backplane */
+#define ICE_DEV_ID_E835CC_BACKPLANE 0x1248
+/* Intel(R) Ethernet Controller E835-CC for QSFP */
+#define ICE_DEV_ID_E835CC_QSFP56 0x1249
+/* Intel(R) Ethernet Controller E835-CC for SFP */
+#define ICE_DEV_ID_E835CC_SFP 0x124A
+/* Intel(R) Ethernet Controller E835-C for backplane */
+#define ICE_DEV_ID_E835C_BACKPLANE 0x1261
+/* Intel(R) Ethernet Controller E835-C for QSFP */
+#define ICE_DEV_ID_E835C_QSFP 0x1262
+/* Intel(R) Ethernet Controller E835-C for SFP */
+#define ICE_DEV_ID_E835C_SFP 0x1263
+/* Intel(R) Ethernet Controller E835-L for backplane */
+#define ICE_DEV_ID_E835_L_BACKPLANE 0x1265
+/* Intel(R) Ethernet Controller E835-L for QSFP */
+#define ICE_DEV_ID_E835_L_QSFP 0x1266
+/* Intel(R) Ethernet Controller E835-L for SFP */
+#define ICE_DEV_ID_E835_L_SFP 0x1267
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
@@ -52,11 +88,11 @@
#define ICE_DEV_ID_E810C_SFP 0x1593
#define ICE_SUBDEV_ID_E810T 0x000E
#define ICE_SUBDEV_ID_E810T2 0x000F
-#define ICE_SUBDEV_ID_E810T3 0x02E9
-#define ICE_SUBDEV_ID_E810T4 0x02EA
-#define ICE_SUBDEV_ID_E810T5 0x0010
-#define ICE_SUBDEV_ID_E810T6 0x0012
-#define ICE_SUBDEV_ID_E810T7 0x0011
+#define ICE_SUBDEV_ID_E810T3 0x0010
+#define ICE_SUBDEV_ID_E810T4 0x0011
+#define ICE_SUBDEV_ID_E810T5 0x0012
+#define ICE_SUBDEV_ID_E810T6 0x02E9
+#define ICE_SUBDEV_ID_E810T7 0x02EA
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
@@ -91,4 +127,12 @@
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
+/* Intel(R) Ethernet Connection E825-C for backplane */
+#define ICE_DEV_ID_E825C_BACKPLANE 0x579C
+/* Intel(R) Ethernet Connection E825-C for QSFP */
+#define ICE_DEV_ID_E825C_QSFP 0x579D
+/* Intel(R) Ethernet Connection E825-C for SFP */
+#define ICE_DEV_ID_E825C_SFP 0x579E
+/* Intel(R) Ethernet Connection E825-C 1GbE */
+#define ICE_DEV_ID_E825C_SGMII 0x579F
#endif /* _ICE_DEVIDS_H_ */
diff --git a/sys/dev/ice/ice_drv_info.h b/sys/dev/ice/ice_drv_info.h
index 8e1200e08a64..46965f4124bc 100644
--- a/sys/dev/ice/ice_drv_info.h
+++ b/sys/dev/ice/ice_drv_info.h
@@ -62,16 +62,16 @@
* @var ice_rc_version
* @brief driver release candidate version number
*/
-const char ice_driver_version[] = "1.39.13-k";
+const char ice_driver_version[] = "1.43.3-k";
const uint8_t ice_major_version = 1;
-const uint8_t ice_minor_version = 39;
-const uint8_t ice_patch_version = 13;
+const uint8_t ice_minor_version = 43;
+const uint8_t ice_patch_version = 3;
const uint8_t ice_rc_version = 0;
#define PVIDV(vendor, devid, name) \
- PVID(vendor, devid, name " - 1.39.13-k")
+ PVID(vendor, devid, name " - 1.43.3-k")
#define PVIDV_OEM(vendor, devid, svid, sdevid, revid, name) \
- PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.39.13-k")
+ PVID_OEM(vendor, devid, svid, sdevid, revid, name " - 1.43.3-k")
/**
* @var ice_vendor_info_array
@@ -190,6 +190,79 @@ static const pci_vendor_info_t ice_vendor_info_array[] = {
"Intel(R) Ethernet Network Adapter E810-XXV-2 for OCP 3.0"),
PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E810_XXV_SFP,
"Intel(R) Ethernet Controller E810-XXV for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_BACKPLANE,
+ "Intel(R) Ethernet Connection E830-CC for backplane"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_QSFP56,
+ ICE_INTEL_VENDOR_ID, 0x0002, 0,
+ "Intel(R) Ethernet Network Adapter E830-C-Q2 for OCP 3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_QSFP56,
+ ICE_INTEL_VENDOR_ID, 0x0004, 0,
+ "Intel(R) Ethernet Network Adapter E830-CC-Q1 for OCP 3.0"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_QSFP56,
+ "Intel(R) Ethernet Connection E830-CC for QSFP56"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830C_BACKPLANE,
+ "Intel(R) Ethernet Connection E830-C for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830C_QSFP,
+ "Intel(R) Ethernet Connection E830-C for QSFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830C_SFP,
+ "Intel(R) Ethernet Connection E830-C for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_L_BACKPLANE,
+ "Intel(R) Ethernet Connection E830-L for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_L_QSFP,
+ "Intel(R) Ethernet Connection E830-L for QSFP"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_L_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0001, 0,
+ "Intel(R) Ethernet Network Adapter E830-XXV-2 for OCP 3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_L_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0003, 0,
+ "Intel(R) Ethernet Network Adapter E830-XXV-2"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E830_L_SFP,
+ "Intel(R) Ethernet Connection E830-L for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_BACKPLANE,
+ "Intel(R) Ethernet Connection E835-CC for backplane"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_QSFP56,
+ ICE_INTEL_VENDOR_ID, 0x0001, 0,
+ "Intel(R) Ethernet Network Adapter E835-C-Q2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_QSFP56,
+ ICE_INTEL_VENDOR_ID, 0x0002, 0,
+ "Intel(R) Ethernet Network Adapter E835-C-Q2 for OCP 3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_QSFP56,
+ ICE_INTEL_VENDOR_ID, 0x0003, 0,
+ "Intel(R) Ethernet Network Adapter E835-CC-Q1"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_QSFP56,
+ ICE_INTEL_VENDOR_ID, 0x0004, 0,
+ "Intel(R) Ethernet Network Adapter E835-CC-Q1 for OCP 3.0"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_QSFP56,
+ "Intel(R) Ethernet Connection E835-CC for QSFP56"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0001, 0,
+ "Intel(R) Ethernet Network Adapter E835-XXV-2 for OCP 3.0"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0003, 0,
+ "Intel(R) Ethernet Network Adapter E835-XXV-2"),
+ PVIDV_OEM(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_SFP,
+ ICE_INTEL_VENDOR_ID, 0x0004, 0,
+ "Intel(R) Ethernet Network Adapter E835-XXV-4 for OCP 3.0"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835CC_SFP,
+ "Intel(R) Ethernet Connection E835-CC for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835C_BACKPLANE,
+ "Intel(R) Ethernet Connection E835-C for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835C_QSFP,
+ "Intel(R) Ethernet Connection E835-C for QSFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835C_SFP,
+ "Intel(R) Ethernet Connection E835-C for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835_L_BACKPLANE,
+ "Intel(R) Ethernet Connection E835-L for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835_L_QSFP,
+ "Intel(R) Ethernet Connection E835-L for QSFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E835_L_SFP,
+ "Intel(R) Ethernet Connection E835-L for SFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_BACKPLANE,
+ "Intel(R) Ethernet Connection E825-C for backplane"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_QSFP,
+ "Intel(R) Ethernet Connection E825-C for QSFP"),
+ PVIDV(ICE_INTEL_VENDOR_ID, ICE_DEV_ID_E825C_SFP,
+ "Intel(R) Ethernet Connection E825-C for SFP"),
PVID_END
};
diff --git a/sys/dev/ice/ice_features.h b/sys/dev/ice/ice_features.h
index 03b8c63af291..5b23757b1c98 100644
--- a/sys/dev/ice/ice_features.h
+++ b/sys/dev/ice/ice_features.h
@@ -71,6 +71,8 @@ enum feat_list {
ICE_FEATURE_TX_BALANCE,
ICE_FEATURE_DUAL_NAC,
ICE_FEATURE_TEMP_SENSOR,
+ ICE_FEATURE_NEXT_CLUSTER_ID,
+ ICE_FEATURE_PHY_STATISTICS,
/* Must be last entry */
ICE_FEATURE_COUNT
};
@@ -89,6 +91,9 @@ enum feat_list {
static inline void
ice_disable_unsupported_features(ice_bitmap_t __unused *bitmap)
{
+#ifndef PCI_IOV
+ ice_clear_bit(ICE_FEATURE_SRIOV, bitmap);
+#endif
#ifndef DEV_NETMAP
ice_clear_bit(ICE_FEATURE_NETMAP, bitmap);
#endif
diff --git a/sys/dev/ice/ice_flex_pipe.c b/sys/dev/ice/ice_flex_pipe.c
index f103e2aa6e71..683e23483b0a 100644
--- a/sys/dev/ice/ice_flex_pipe.c
+++ b/sys/dev/ice/ice_flex_pipe.c
@@ -193,7 +193,7 @@ void ice_add_tunnel_hint(struct ice_hw *hw, char *label_name, u16 val)
* ------------------------------
* Result: key: b01 10 11 11 00 00
*/
-static enum ice_status
+static int
ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
u8 *key_inv)
{
@@ -237,7 +237,7 @@ ice_gen_key_word(u8 val, u8 valid, u8 dont_care, u8 nvr_mtch, u8 *key,
in_key_inv >>= 1;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -295,7 +295,7 @@ static bool ice_bits_max_set(const u8 *mask, u16 size, u16 max)
* dc == NULL --> dc mask is all 0's (no don't care bits)
* nm == NULL --> nm mask is all 0's (no never match bits)
*/
-static enum ice_status
+static int
ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
u16 len)
{
@@ -324,7 +324,7 @@ ice_set_key(u8 *key, u16 size, u8 *val, u8 *upd, u8 *dc, u8 *nm, u16 off,
key + off + i, key + half_size + off + i))
return ICE_ERR_CFG;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -461,19 +461,19 @@ ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
* creating a package buffer with the tunnel info and issuing an update package
* command.
*/
-enum ice_status
+int
ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
+ int status = ICE_ERR_MAX_LIMIT;
u16 index;
ice_acquire_lock(&hw->tnl_lock);
if (ice_tunnel_port_in_use_hlpr(hw, port, &index)) {
hw->tnl.tbl[index].ref++;
- status = ICE_SUCCESS;
+ status = 0;
goto ice_create_tunnel_end;
}
@@ -548,11 +548,11 @@ ice_create_tunnel_end:
* targeting the specific updates requested and then performing an update
* package.
*/
-enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
+int ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
{
struct ice_boost_tcam_section *sect_rx, *sect_tx;
- enum ice_status status = ICE_ERR_MAX_LIMIT;
struct ice_buf_build *bld;
+ int status = ICE_ERR_MAX_LIMIT;
u16 count = 0;
u16 index;
u16 size;
@@ -563,7 +563,7 @@ enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all)
if (!all && ice_tunnel_port_in_use_hlpr(hw, port, &index))
if (hw->tnl.tbl[index].ref > 1) {
hw->tnl.tbl[index].ref--;
- status = ICE_SUCCESS;
+ status = 0;
goto ice_destroy_tunnel_end;
}
@@ -649,9 +649,9 @@ ice_destroy_tunnel_end:
*
* Replays all tunnels
*/
-enum ice_status ice_replay_tunnels(struct ice_hw *hw)
+int ice_replay_tunnels(struct ice_hw *hw)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u16 i;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -699,7 +699,7 @@ enum ice_status ice_replay_tunnels(struct ice_hw *hw)
* @prot: variable to receive the protocol ID
* @off: variable to receive the protocol offset
*/
-enum ice_status
+int
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off)
{
@@ -716,7 +716,7 @@ ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
*prot = fv_ext[fv_idx].prot_id;
*off = fv_ext[fv_idx].off;
- return ICE_SUCCESS;
+ return 0;
}
/* PTG Management */
@@ -729,11 +729,11 @@ ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
* This function will update the XLT1 hardware table to reflect the new
* packet type group configuration.
*/
-enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
+int ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
{
struct ice_xlt1_section *sect;
struct ice_buf_build *bld;
- enum ice_status status;
+ int status;
u16 index;
bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT1),
@@ -766,14 +766,14 @@ enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk)
* PTG ID that contains it through the PTG parameter, with the value of
* ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
*/
-static enum ice_status
+static int
ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
{
if (ptype >= ICE_XLT1_CNT || !ptg)
return ICE_ERR_PARAM;
*ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -825,7 +825,7 @@ void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg)
* This function will remove the ptype from the specific PTG, and move it to
* the default PTG (ICE_DEFAULT_PTG).
*/
-static enum ice_status
+static int
ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
struct ice_ptg_ptype **ch;
@@ -857,7 +857,7 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -872,11 +872,11 @@ ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
* a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
* default PTG.
*/
-static enum ice_status
+static int
ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
{
- enum ice_status status;
u8 original_ptg;
+ int status;
if (ptype > ICE_XLT1_CNT - 1)
return ICE_ERR_PARAM;
@@ -890,7 +890,7 @@ ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
/* Is ptype already in the correct PTG? */
if (original_ptg == ptg)
- return ICE_SUCCESS;
+ return 0;
/* Remove from original PTG and move back to the default PTG */
if (original_ptg != ICE_DEFAULT_PTG)
@@ -898,7 +898,7 @@ ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
/* Moving to default PTG? Then we're done with this request */
if (ptg == ICE_DEFAULT_PTG)
- return ICE_SUCCESS;
+ return 0;
/* Add ptype to PTG at beginning of list */
hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
@@ -909,7 +909,7 @@ ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
hw->blk[blk].xlt1.t[ptype] = ptg;
- return ICE_SUCCESS;
+ return 0;
}
/* Block / table size info */
@@ -1016,13 +1016,13 @@ ice_match_prop_lst(struct LIST_HEAD_TYPE *list1, struct LIST_HEAD_TYPE *list2)
* This function will update the XLT2 hardware table with the input VSI
* group configuration.
*/
-static enum ice_status
+static int
ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi,
u16 vsig)
{
struct ice_xlt2_section *sect;
struct ice_buf_build *bld;
- enum ice_status status;
+ int status;
bld = ice_pkg_buf_alloc_single_section(hw, ice_sect_id(blk, ICE_XLT2),
ice_struct_size(sect, value, 1),
@@ -1049,14 +1049,14 @@ ice_vsig_update_xlt2_sect(struct ice_hw *hw, enum ice_block blk, u16 vsi,
* This function will update the XLT2 hardware table with the input VSI
* group configuration of used vsis.
*/
-enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
+int ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
{
u16 vsi;
for (vsi = 0; vsi < ICE_MAX_VSI; vsi++) {
/* update only vsis that have been changed */
if (hw->blk[blk].xlt2.vsis[vsi].changed) {
- enum ice_status status;
+ int status;
u16 vsig;
vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
@@ -1068,7 +1068,7 @@ enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
}
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1081,7 +1081,7 @@ enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk)
* This function will lookup the VSI entry in the XLT2 list and return
* the VSI group its associated with.
*/
-enum ice_status
+int
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
{
if (!vsig || vsi >= ICE_MAX_VSI)
@@ -1093,7 +1093,7 @@ ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
*/
*vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1150,7 +1150,7 @@ static u16 ice_vsig_alloc(struct ice_hw *hw, enum ice_block blk)
* for, the list must match exactly, including the order in which the
* characteristics are listed.
*/
-static enum ice_status
+static int
ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
struct LIST_HEAD_TYPE *chs, u16 *vsig)
{
@@ -1161,7 +1161,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
if (xlt2->vsig_tbl[i].in_use &&
ice_match_prop_lst(chs, &xlt2->vsig_tbl[i].prop_lst)) {
*vsig = ICE_VSIG_VALUE(i, hw->pf_id);
- return ICE_SUCCESS;
+ return 0;
}
return ICE_ERR_DOES_NOT_EXIST;
@@ -1176,7 +1176,7 @@ ice_find_dup_props_vsig(struct ice_hw *hw, enum ice_block blk,
* The function will remove all VSIs associated with the input VSIG and move
* them to the DEFAULT_VSIG and mark the VSIG available.
*/
-static enum ice_status
+static int
ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
{
struct ice_vsig_prof *dtmp, *del;
@@ -1224,7 +1224,7 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
*/
INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1237,7 +1237,7 @@ ice_vsig_free(struct ice_hw *hw, enum ice_block blk, u16 vsig)
* The function will remove the input VSI from its VSI group and move it
* to the DEFAULT_VSIG.
*/
-static enum ice_status
+static int
ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
@@ -1253,7 +1253,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
/* entry already in default VSIG, don't have to remove */
if (idx == ICE_DEFAULT_VSIG)
- return ICE_SUCCESS;
+ return 0;
vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
if (!(*vsi_head))
@@ -1280,7 +1280,7 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
vsi_cur->changed = 1;
vsi_cur->next_vsi = NULL;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1295,12 +1295,12 @@ ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
* move the entry to the DEFAULT_VSIG, update the original VSIG and
* then move entry to the new VSIG.
*/
-static enum ice_status
+static int
ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_vsig_vsi *tmp;
- enum ice_status status;
u16 orig_vsig, idx;
+ int status;
idx = vsig & ICE_VSIG_IDX_M;
@@ -1320,7 +1320,7 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
/* no update required if vsigs match */
if (orig_vsig == vsig)
- return ICE_SUCCESS;
+ return 0;
if (orig_vsig != ICE_DEFAULT_VSIG) {
/* remove entry from orig_vsig and add to default VSIG */
@@ -1330,7 +1330,7 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
}
if (idx == ICE_DEFAULT_VSIG)
- return ICE_SUCCESS;
+ return 0;
/* Create VSI entry and add VSIG and prop_mask values */
hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
@@ -1343,7 +1343,7 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
hw->blk[blk].xlt2.t[vsi] = vsig;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1353,7 +1353,7 @@ ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
* @fv: field vector to search for
* @prof_id: receives the profile ID
*/
-static enum ice_status
+static int
ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
struct ice_fv_word *fv, u8 *prof_id)
{
@@ -1368,7 +1368,7 @@ ice_find_prof_id(struct ice_hw *hw, enum ice_block blk,
continue;
*prof_id = i;
- return ICE_SUCCESS;
+ return 0;
}
return ICE_ERR_DOES_NOT_EXIST;
@@ -1424,7 +1424,7 @@ static bool ice_tcam_ent_rsrc_type(enum ice_block blk, u16 *rsrc_type)
* This function allocates a new entry in a Profile ID TCAM for a specific
* block.
*/
-static enum ice_status
+static int
ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
u16 *tcam_idx)
{
@@ -1444,7 +1444,7 @@ ice_alloc_tcam_ent(struct ice_hw *hw, enum ice_block blk, bool btm,
*
* This function frees an entry in a Profile ID TCAM for a specific block.
*/
-static enum ice_status
+static int
ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
{
u16 res_type;
@@ -1464,12 +1464,12 @@ ice_free_tcam_ent(struct ice_hw *hw, enum ice_block blk, u16 tcam_idx)
* This function allocates a new profile ID, which also corresponds to a Field
* Vector (Extraction Sequence) entry.
*/
-static enum ice_status
+static int
ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
{
- enum ice_status status;
u16 res_type;
u16 get_prof;
+ int status;
if (!ice_prof_id_rsrc_type(blk, &res_type))
return ICE_ERR_PARAM;
@@ -1489,7 +1489,7 @@ ice_alloc_prof_id(struct ice_hw *hw, enum ice_block blk, u8 *prof_id)
*
* This function frees a profile ID, which also corresponds to a Field Vector.
*/
-static enum ice_status
+static int
ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
u16 tmp_prof_id = (u16)prof_id;
@@ -1507,7 +1507,7 @@ ice_free_prof_id(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
* @blk: the block from which to free the profile ID
* @prof_id: the profile ID for which to increment the reference count
*/
-static enum ice_status
+static int
ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
if (prof_id > hw->blk[blk].es.count)
@@ -1515,7 +1515,7 @@ ice_prof_inc_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
hw->blk[blk].es.ref_count[prof_id]++;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1548,7 +1548,7 @@ ice_write_es(struct ice_hw *hw, enum ice_block blk, u8 prof_id,
* @blk: the block from which to free the profile ID
* @prof_id: the profile ID for which to decrement the reference count
*/
-static enum ice_status
+static int
ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
{
if (prof_id > hw->blk[blk].es.count)
@@ -1561,7 +1561,7 @@ ice_prof_dec_ref(struct ice_hw *hw, enum ice_block blk, u8 prof_id)
}
}
- return ICE_SUCCESS;
+ return 0;
}
/* Block / table section IDs */
@@ -1811,7 +1811,7 @@ void ice_init_flow_profs(struct ice_hw *hw, u8 blk_idx)
* ice_init_hw_tbls - init hardware table memory
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
+int ice_init_hw_tbls(struct ice_hw *hw)
{
u8 i;
@@ -1916,7 +1916,7 @@ enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
goto err;
}
- return ICE_SUCCESS;
+ return 0;
err:
ice_free_hw_tbls(hw);
@@ -2143,7 +2143,7 @@ void ice_clear_hw_tbls(struct ice_hw *hw)
* @nm_msk: never match mask
* @key: output of profile ID key
*/
-static enum ice_status
+static int
ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
u8 cdid, u16 flags, u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ], u8 nm_msk[ICE_TCAM_KEY_VAL_SZ],
@@ -2199,7 +2199,7 @@ ice_prof_gen_key(struct ice_hw *hw, enum ice_block blk, u8 ptg, u16 vsig,
* @dc_msk: don't care mask
* @nm_msk: never match mask
*/
-static enum ice_status
+static int
ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
u8 prof_id, u8 ptg, u16 vsig, u8 cdid, u16 flags,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ],
@@ -2207,7 +2207,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ])
{
struct ice_prof_tcam_entry;
- enum ice_status status;
+ int status;
status = ice_prof_gen_key(hw, blk, ptg, vsig, cdid, flags, vl_msk,
dc_msk, nm_msk, hw->blk[blk].prof.t[idx].key);
@@ -2226,7 +2226,7 @@ ice_tcam_write_entry(struct ice_hw *hw, enum ice_block blk, u16 idx,
* @vsig: VSIG to query
* @refs: pointer to variable to receive the reference count
*/
-static enum ice_status
+static int
ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
{
u16 idx = vsig & ICE_VSIG_IDX_M;
@@ -2243,7 +2243,7 @@ ice_vsig_get_ref(struct ice_hw *hw, enum ice_block blk, u16 vsig, u16 *refs)
ptr = ptr->next_vsi;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2276,7 +2276,7 @@ ice_has_prof_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl)
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
{
@@ -2307,7 +2307,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
ICE_NONDMA_TO_NONDMA);
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2317,7 +2317,7 @@ ice_prof_bld_es(struct ice_hw *hw, enum ice_block blk,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
struct ice_buf_build *bld, struct LIST_HEAD_TYPE *chgs)
{
@@ -2348,7 +2348,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
ICE_NONDMA_TO_NONDMA);
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2357,7 +2357,7 @@ ice_prof_bld_tcam(struct ice_hw *hw, enum ice_block blk,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
struct LIST_HEAD_TYPE *chgs)
{
@@ -2383,7 +2383,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
p->value[0] = tmp->ptg;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2392,7 +2392,7 @@ ice_prof_bld_xlt1(enum ice_block blk, struct ice_buf_build *bld,
* @bld: the update package buffer build to add to
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
struct LIST_HEAD_TYPE *chgs)
{
@@ -2425,7 +2425,7 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
}
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2434,18 +2434,18 @@ ice_prof_bld_xlt2(enum ice_block blk, struct ice_buf_build *bld,
* @blk: hardware block
* @chgs: the list of changes to make in hardware
*/
-static enum ice_status
+static int
ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
struct LIST_HEAD_TYPE *chgs)
{
struct ice_buf_build *b;
struct ice_chs_chg *tmp;
- enum ice_status status;
u16 pkg_sects;
u16 xlt1 = 0;
u16 xlt2 = 0;
u16 tcam = 0;
u16 es = 0;
+ int status;
u16 sects;
/* count number of sections we need */
@@ -2472,7 +2472,7 @@ ice_upd_prof_hw(struct ice_hw *hw, enum ice_block blk,
sects = xlt1 + xlt2 + tcam + es;
if (!sects)
- return ICE_SUCCESS;
+ return 0;
/* Build update package buffer */
b = ice_pkg_buf_alloc(hw);
@@ -2541,13 +2541,13 @@ error_tmp:
* it will not be written until the first call to ice_add_flow that specifies
* the ID value used here.
*/
-enum ice_status
+int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
ice_bitmap_t *ptypes, struct ice_fv_word *es)
{
ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
struct ice_prof_map *prof;
- enum ice_status status;
+ int status;
u8 prof_id;
u16 ptype;
@@ -2602,7 +2602,7 @@ ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
}
LIST_ADD(&prof->list, &hw->blk[blk].es.prof_map);
- status = ICE_SUCCESS;
+ status = 0;
err_ice_add_prof:
ice_release_lock(&hw->blk[blk].es.prof_map_lock);
@@ -2640,17 +2640,17 @@ ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id)
* @id: profile tracking ID
* @cntxt: context
*/
-enum ice_status
+int
ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
{
- enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_prof_map *entry;
+ int status = ICE_ERR_DOES_NOT_EXIST;
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
entry = ice_search_prof_id(hw, blk, id);
if (entry) {
entry->context = cntxt;
- status = ICE_SUCCESS;
+ status = 0;
}
ice_release_lock(&hw->blk[blk].es.prof_map_lock);
return status;
@@ -2663,17 +2663,17 @@ ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt)
* @id: profile tracking ID
* @cntxt: pointer to variable to receive the context
*/
-enum ice_status
+int
ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt)
{
- enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_prof_map *entry;
+ int status = ICE_ERR_DOES_NOT_EXIST;
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
entry = ice_search_prof_id(hw, blk, id);
if (entry) {
*cntxt = entry->context;
- status = ICE_SUCCESS;
+ status = 0;
}
ice_release_lock(&hw->blk[blk].es.prof_map_lock);
return status;
@@ -2704,14 +2704,14 @@ ice_vsig_prof_id_count(struct ice_hw *hw, enum ice_block blk, u16 vsig)
* @blk: hardware block
* @idx: the index to release
*/
-static enum ice_status
+static int
ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
{
/* Masks to invoke a never match entry */
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFE, 0xFF, 0xFF, 0xFF, 0xFF };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x01, 0x00, 0x00, 0x00, 0x00 };
- enum ice_status status;
+ int status;
/* write the TCAM entry */
status = ice_tcam_write_entry(hw, blk, idx, 0, 0, 0, 0, 0, vl_msk,
@@ -2731,11 +2731,11 @@ ice_rel_tcam_idx(struct ice_hw *hw, enum ice_block blk, u16 idx)
* @blk: hardware block
* @prof: pointer to profile structure to remove
*/
-static enum ice_status
+static int
ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
struct ice_vsig_prof *prof)
{
- enum ice_status status;
+ int status;
u16 i;
for (i = 0; i < prof->tcam_count; i++)
@@ -2747,7 +2747,7 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
return ICE_ERR_HW_TABLE;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2757,7 +2757,7 @@ ice_rem_prof_id(struct ice_hw *hw, enum ice_block blk,
* @vsig: the VSIG to remove
* @chg: the change list
*/
-static enum ice_status
+static int
ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct LIST_HEAD_TYPE *chg)
{
@@ -2769,7 +2769,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
LIST_FOR_EACH_ENTRY_SAFE(d, t,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
ice_vsig_prof, list) {
- enum ice_status status;
+ int status;
status = ice_rem_prof_id(hw, blk, d);
if (status)
@@ -2814,7 +2814,7 @@ ice_rem_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @hdl: profile handle indicating which profile to remove
* @chg: list to receive a record of changes
*/
-static enum ice_status
+static int
ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
struct LIST_HEAD_TYPE *chg)
{
@@ -2825,7 +2825,7 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst,
ice_vsig_prof, list)
if (p->profile_cookie == hdl) {
- enum ice_status status;
+ int status;
if (ice_vsig_prof_id_count(hw, blk, vsig) == 1)
/* this is the last profile, remove the VSIG */
@@ -2848,12 +2848,12 @@ ice_rem_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
* @blk: hardware block
* @id: profile tracking ID
*/
-static enum ice_status
+static int
ice_rem_flow_all(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_chs_chg *del, *tmp;
- enum ice_status status;
struct LIST_HEAD_TYPE chg;
+ int status;
u16 i;
INIT_LIST_HEAD(&chg);
@@ -2889,10 +2889,10 @@ err_ice_rem_flow_all:
* previously created through ice_add_prof. If any existing entries
* are associated with this profile, they will be removed as well.
*/
-enum ice_status ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
+int ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id)
{
struct ice_prof_map *pmap;
- enum ice_status status;
+ int status;
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
@@ -2925,13 +2925,13 @@ err_ice_rem_prof:
* @hdl: profile handle
* @chg: change list
*/
-static enum ice_status
+static int
ice_get_prof(struct ice_hw *hw, enum ice_block blk, u64 hdl,
struct LIST_HEAD_TYPE *chg)
{
- enum ice_status status = ICE_SUCCESS;
struct ice_prof_map *map;
struct ice_chs_chg *p;
+ int status = 0;
u16 i;
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
@@ -2979,7 +2979,7 @@ err_ice_get_prof:
*
* This routine makes a copy of the list of profiles in the specified VSIG.
*/
-static enum ice_status
+static int
ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct LIST_HEAD_TYPE *lst)
{
@@ -2999,7 +2999,7 @@ ice_get_profs_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig,
LIST_ADD_TAIL(&p->list, lst);
}
- return ICE_SUCCESS;
+ return 0;
err_ice_get_profs_vsig:
LIST_FOR_EACH_ENTRY_SAFE(ent1, ent2, lst, ice_vsig_prof, list) {
@@ -3017,13 +3017,13 @@ err_ice_get_profs_vsig:
* @lst: the list to be added to
* @hdl: profile handle of entry to add
*/
-static enum ice_status
+static int
ice_add_prof_to_lst(struct ice_hw *hw, enum ice_block blk,
struct LIST_HEAD_TYPE *lst, u64 hdl)
{
- enum ice_status status = ICE_SUCCESS;
struct ice_prof_map *map;
struct ice_vsig_prof *p;
+ int status = 0;
u16 i;
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
@@ -3064,13 +3064,13 @@ err_ice_add_prof_to_lst:
* @vsig: the VSIG to move the VSI to
* @chg: the change list
*/
-static enum ice_status
+static int
ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
struct LIST_HEAD_TYPE *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
u16 orig_vsig;
+ int status;
p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
if (!p)
@@ -3092,7 +3092,7 @@ ice_move_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig,
LIST_ADD(&p->list_entry, chg);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3124,13 +3124,13 @@ ice_rem_chg_tcam_ent(struct ice_hw *hw, u16 idx, struct LIST_HEAD_TYPE *chg)
*
* This function appends an enable or disable TCAM entry in the change log
*/
-static enum ice_status
+static int
ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
u16 vsig, struct ice_tcam_inf *tcam,
struct LIST_HEAD_TYPE *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
+ int status;
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
@@ -3178,7 +3178,7 @@ ice_prof_tcam_ena_dis(struct ice_hw *hw, enum ice_block blk, bool enable,
/* log change */
LIST_ADD(&p->list_entry, chg);
- return ICE_SUCCESS;
+ return 0;
err_ice_prof_tcam_ena_dis:
ice_free(hw, p);
@@ -3192,13 +3192,13 @@ err_ice_prof_tcam_ena_dis:
* @vsig: the VSIG for which to adjust profile priorities
* @chg: the change list
*/
-static enum ice_status
+static int
ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
struct LIST_HEAD_TYPE *chg)
{
ice_declare_bitmap(ptgs_used, ICE_XLT1_CNT);
- enum ice_status status = ICE_SUCCESS;
struct ice_vsig_prof *t;
+ int status = 0;
u16 idx;
ice_zero_bitmap(ptgs_used, ICE_XLT1_CNT);
@@ -3265,7 +3265,7 @@ ice_adj_prof_priorities(struct ice_hw *hw, enum ice_block blk, u16 vsig,
* @rev: true to add entries to the end of the list
* @chg: the change list
*/
-static enum ice_status
+static int
ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
bool rev, struct LIST_HEAD_TYPE *chg)
{
@@ -3273,11 +3273,11 @@ ice_add_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsig, u64 hdl,
u8 vl_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
u8 dc_msk[ICE_TCAM_KEY_VAL_SZ] = { 0xFF, 0xFF, 0x00, 0x00, 0x00 };
u8 nm_msk[ICE_TCAM_KEY_VAL_SZ] = { 0x00, 0x00, 0x00, 0x00, 0x00 };
- enum ice_status status = ICE_SUCCESS;
struct ice_prof_map *map;
struct ice_vsig_prof *t;
struct ice_chs_chg *p;
u16 vsig_idx, i;
+ int status = 0;
/* Error, if this VSIG already has this profile */
if (ice_has_prof_vsig(hw, blk, vsig, hdl))
@@ -3371,13 +3371,13 @@ err_ice_add_prof_id_vsig:
* @hdl: the profile handle of the profile that will be added to the VSIG
* @chg: the change list
*/
-static enum ice_status
+static int
ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
struct LIST_HEAD_TYPE *chg)
{
- enum ice_status status;
struct ice_chs_chg *p;
u16 new_vsig;
+ int status;
p = (struct ice_chs_chg *)ice_malloc(hw, sizeof(*p));
if (!p)
@@ -3404,7 +3404,7 @@ ice_create_prof_id_vsig(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl,
LIST_ADD(&p->list_entry, chg);
- return ICE_SUCCESS;
+ return 0;
err_ice_create_prof_id_vsig:
/* let caller clean up the change list */
@@ -3421,13 +3421,13 @@ err_ice_create_prof_id_vsig:
* @new_vsig: return of new VSIG
* @chg: the change list
*/
-static enum ice_status
+static int
ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
struct LIST_HEAD_TYPE *lst, u16 *new_vsig,
struct LIST_HEAD_TYPE *chg)
{
struct ice_vsig_prof *t;
- enum ice_status status;
+ int status;
u16 vsig;
vsig = ice_vsig_alloc(hw, blk);
@@ -3448,7 +3448,7 @@ ice_create_vsig_from_lst(struct ice_hw *hw, enum ice_block blk, u16 vsi,
*new_vsig = vsig;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3462,8 +3462,8 @@ static bool
ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
{
struct ice_vsig_prof *t;
- enum ice_status status;
struct LIST_HEAD_TYPE lst;
+ int status;
INIT_LIST_HEAD(&lst);
@@ -3479,7 +3479,7 @@ ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
LIST_DEL(&t->list);
ice_free(hw, t);
- return status == ICE_SUCCESS;
+ return !status;
}
/**
@@ -3496,12 +3496,12 @@ ice_find_prof_vsig(struct ice_hw *hw, enum ice_block blk, u64 hdl, u16 *vsig)
* save time in generating a new VSIG and TCAMs till a match is
* found and subsequent rollback when a matching VSIG is found.
*/
-enum ice_status
+int
ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
{
struct ice_chs_chg *tmp, *del;
struct LIST_HEAD_TYPE chg;
- enum ice_status status;
+ int status;
/* if target VSIG is default the move is invalid */
if ((vsig & ICE_VSIG_IDX_M) == ICE_DEFAULT_VSIG)
@@ -3534,14 +3534,14 @@ ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
* profile indicated by the ID parameter for the VSIs specified in the VSI
* array. Once successfully called, the flow will be enabled.
*/
-enum ice_status
+int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
{
struct ice_vsig_prof *tmp1, *del1;
struct ice_chs_chg *tmp, *del;
struct LIST_HEAD_TYPE union_lst;
- enum ice_status status;
struct LIST_HEAD_TYPE chg;
+ int status;
u16 vsig;
INIT_LIST_HEAD(&union_lst);
@@ -3681,21 +3681,21 @@ err_ice_add_prof_id_flow:
* profile indicated by the ID parameter for the VSIs specified in the VSI
* array. Once successfully called, the flow will be enabled.
*/
-enum ice_status
+int
ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id)
{
u16 i;
for (i = 0; i < count; i++) {
- enum ice_status status;
+ int status;
status = ice_add_prof_id_flow(hw, blk, vsi[i], id);
if (status)
return status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3704,7 +3704,7 @@ ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
* @lst: list to remove the profile from
* @hdl: the profile handle indicating the profile to remove
*/
-static enum ice_status
+static int
ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
{
struct ice_vsig_prof *ent, *tmp;
@@ -3713,7 +3713,7 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
if (ent->profile_cookie == hdl) {
LIST_DEL(&ent->list);
ice_free(hw, ent);
- return ICE_SUCCESS;
+ return 0;
}
return ICE_ERR_DOES_NOT_EXIST;
@@ -3730,13 +3730,13 @@ ice_rem_prof_from_list(struct ice_hw *hw, struct LIST_HEAD_TYPE *lst, u64 hdl)
* profile indicated by the ID parameter for the VSIs specified in the VSI
* array. Once successfully called, the flow will be disabled.
*/
-enum ice_status
+int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl)
{
struct ice_vsig_prof *tmp1, *del1;
struct ice_chs_chg *tmp, *del;
struct LIST_HEAD_TYPE chg, copy;
- enum ice_status status;
+ int status;
u16 vsig;
INIT_LIST_HEAD(&copy);
@@ -3864,19 +3864,19 @@ err_ice_rem_prof_id_flow:
* using ice_add_flow. The ID value will indicated which profile will be
* removed. Once successfully called, the flow will be disabled.
*/
-enum ice_status
+int
ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id)
{
u16 i;
for (i = 0; i < count; i++) {
- enum ice_status status;
+ int status;
status = ice_rem_prof_id_flow(hw, blk, vsi[i], id);
if (status)
return status;
}
- return ICE_SUCCESS;
+ return 0;
}
diff --git a/sys/dev/ice/ice_flex_pipe.h b/sys/dev/ice/ice_flex_pipe.h
index 559905e7fb59..3351b2601e5e 100644
--- a/sys/dev/ice/ice_flex_pipe.h
+++ b/sys/dev/ice/ice_flex_pipe.h
@@ -34,10 +34,10 @@
#include "ice_type.h"
-enum ice_status
+int
ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
u8 *prot, u16 *off);
-enum ice_status
+int
ice_find_label_value(struct ice_seg *ice_seg, char const *name, u32 type,
u16 *value);
void
@@ -45,54 +45,54 @@ ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
ice_bitmap_t *bm);
void
ice_init_prof_result_bm(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
bool
ice_get_open_tunnel_port(struct ice_hw *hw, enum ice_tunnel_type type,
u16 *port);
-enum ice_status
+int
ice_create_tunnel(struct ice_hw *hw, enum ice_tunnel_type type, u16 port);
-enum ice_status ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
+int ice_destroy_tunnel(struct ice_hw *hw, u16 port, bool all);
bool ice_tunnel_port_in_use(struct ice_hw *hw, u16 port, u16 *index);
bool
ice_tunnel_get_type(struct ice_hw *hw, u16 port, enum ice_tunnel_type *type);
-enum ice_status ice_replay_tunnels(struct ice_hw *hw);
+int ice_replay_tunnels(struct ice_hw *hw);
/* XLT1/PType group functions */
-enum ice_status ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk);
+int ice_ptg_update_xlt1(struct ice_hw *hw, enum ice_block blk);
void ice_ptg_free(struct ice_hw *hw, enum ice_block blk, u8 ptg);
/* XLT2/VSI group functions */
-enum ice_status ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk);
-enum ice_status
+int ice_vsig_update_xlt2(struct ice_hw *hw, enum ice_block blk);
+int
ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig);
-enum ice_status
+int
ice_add_prof(struct ice_hw *hw, enum ice_block blk, u64 id,
ice_bitmap_t *ptypes, struct ice_fv_word *es);
struct ice_prof_map *
ice_search_prof_id(struct ice_hw *hw, enum ice_block blk, u64 id);
-enum ice_status
+int
ice_add_vsi_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig);
-enum ice_status
+int
ice_add_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
-enum ice_status
+int
ice_rem_prof_id_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi, u64 hdl);
-enum ice_status
+int
ice_set_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 cntxt);
-enum ice_status
+int
ice_get_prof_context(struct ice_hw *hw, enum ice_block blk, u64 id, u64 *cntxt);
-enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
+int ice_init_hw_tbls(struct ice_hw *hw);
void ice_fill_blk_tbls(struct ice_hw *hw);
void ice_clear_hw_tbls(struct ice_hw *hw);
void ice_free_hw_tbls(struct ice_hw *hw);
-enum ice_status
+int
ice_add_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id);
-enum ice_status
+int
ice_rem_flow(struct ice_hw *hw, enum ice_block blk, u16 vsi[], u8 count,
u64 id);
-enum ice_status
+int
ice_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 id);
void ice_fill_blk_tbls(struct ice_hw *hw);
diff --git a/sys/dev/ice/ice_flow.c b/sys/dev/ice/ice_flow.c
index c04f86445767..a475833aef60 100644
--- a/sys/dev/ice/ice_flow.c
+++ b/sys/dev/ice/ice_flow.c
@@ -417,8 +417,7 @@ struct ice_flow_prof_params {
* @segs: array of one or more packet segments that describe the flow
* @segs_cnt: number of packet segments provided
*/
-static enum ice_status
-ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
+static int ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
{
u8 i;
@@ -434,7 +433,7 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -444,7 +443,7 @@ ice_flow_val_hdrs(struct ice_flow_seg_info *segs, u8 segs_cnt)
* This function identifies the packet types associated with the protocol
* headers being present in packet segments of the specified flow profile.
*/
-static enum ice_status
+static int
ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
{
struct ice_flow_prof *prof;
@@ -544,10 +543,10 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
}
}
- return ICE_SUCCESS;
+ return 0;
}
-/**
+/*
* ice_flow_xtract_fld - Create an extraction sequence entry for the given field
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
@@ -558,7 +557,7 @@ ice_flow_proc_seg_hdrs(struct ice_flow_prof_params *params)
* field. It then allocates one or more extraction sequence entries for the
* given field, and fill the entries with protocol ID and offset information.
*/
-static enum ice_status
+static int
ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
u8 seg, enum ice_flow_field fld)
{
@@ -704,7 +703,7 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
off += ICE_FLOW_FV_EXTRACT_SZ;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -715,11 +714,11 @@ ice_flow_xtract_fld(struct ice_hw *hw, struct ice_flow_prof_params *params,
* This function iterates through all matched fields in the given segments, and
* creates an extraction sequence for the fields.
*/
-static enum ice_status
+static int
ice_flow_create_xtrct_seq(struct ice_hw *hw,
struct ice_flow_prof_params *params)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 i;
for (i = 0; i < params->prof->segs_cnt; i++) {
@@ -744,10 +743,10 @@ ice_flow_create_xtrct_seq(struct ice_hw *hw,
* @hw: pointer to the HW struct
* @params: information about the flow to be processed
*/
-static enum ice_status
+static int
ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
{
- enum ice_status status;
+ int status;
status = ice_flow_proc_seg_hdrs(params);
if (status)
@@ -759,7 +758,7 @@ ice_flow_proc_segs(struct ice_hw *hw, struct ice_flow_prof_params *params)
switch (params->blk) {
case ICE_BLK_RSS:
- status = ICE_SUCCESS;
+ status = 0;
break;
default:
return ICE_ERR_NOT_IMPL;
@@ -869,18 +868,18 @@ ice_flow_find_prof_id(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
* @prof_id: the profile ID handle
* @hw_prof_id: pointer to variable to receive the HW profile ID
*/
-enum ice_status
+int
ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u8 *hw_prof_id)
{
- enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
struct ice_prof_map *map;
+ int status = ICE_ERR_DOES_NOT_EXIST;
ice_acquire_lock(&hw->blk[blk].es.prof_map_lock);
map = ice_search_prof_id(hw, blk, prof_id);
if (map) {
*hw_prof_id = map->prof_id;
- status = ICE_SUCCESS;
+ status = 0;
}
ice_release_lock(&hw->blk[blk].es.prof_map_lock);
return status;
@@ -900,7 +899,7 @@ ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
*
* Assumption: the caller has acquired the lock to the profile list
*/
-static enum ice_status
+static int
ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
enum ice_flow_dir dir, u64 prof_id,
struct ice_flow_seg_info *segs, u8 segs_cnt,
@@ -908,7 +907,7 @@ ice_flow_add_prof_sync(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof **prof)
{
struct ice_flow_prof_params *params;
- enum ice_status status;
+ int status;
u8 i;
if (!prof || (acts_cnt && !acts))
@@ -976,11 +975,11 @@ free_params:
*
* Assumption: the caller has acquired the lock to the profile list
*/
-static enum ice_status
+static int
ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof)
{
- enum ice_status status;
+ int status;
/* Remove all hardware profiles associated with this flow profile */
status = ice_rem_prof(hw, blk, prof->id);
@@ -1003,11 +1002,11 @@ ice_flow_rem_prof_sync(struct ice_hw *hw, enum ice_block blk,
* be added has the same characteristics as the VSIG and will
* thereby have access to all resources added to that VSIG.
*/
-enum ice_status
+int
ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
u16 vsig)
{
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle) || blk >= ICE_BLK_COUNT)
return ICE_ERR_PARAM;
@@ -1030,11 +1029,11 @@ ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+static int
ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
if (!ice_is_bit_set(prof->vsis, vsi_handle)) {
status = ice_add_prof_id_flow(hw, blk,
@@ -1061,11 +1060,11 @@ ice_flow_assoc_prof(struct ice_hw *hw, enum ice_block blk,
* Assumption: the caller has acquired the lock to the profile list
* and the software VSI handle has been validated
*/
-static enum ice_status
+static int
ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
struct ice_flow_prof *prof, u16 vsi_handle)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
if (ice_is_bit_set(prof->vsis, vsi_handle)) {
status = ice_rem_prof_id_flow(hw, blk,
@@ -1094,13 +1093,13 @@ ice_flow_disassoc_prof(struct ice_hw *hw, enum ice_block blk,
* @acts_cnt: number of default actions
* @prof: stores the returned flow profile added
*/
-static enum ice_status
+static int
ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
u64 prof_id, struct ice_flow_seg_info *segs, u8 segs_cnt,
struct ice_flow_action *acts, u8 acts_cnt,
struct ice_flow_prof **prof)
{
- enum ice_status status;
+ int status;
if (segs_cnt > ICE_FLOW_SEG_MAX)
return ICE_ERR_MAX_LIMIT;
@@ -1133,11 +1132,11 @@ ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
* @blk: the block for which the flow profile is to be removed
* @prof_id: unique ID of the flow profile to be removed
*/
-static enum ice_status
+static int
ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id)
{
struct ice_flow_prof *prof;
- enum ice_status status;
+ int status;
ice_acquire_lock(&hw->fl_profs_locks[blk]);
@@ -1275,7 +1274,7 @@ ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
* header value to set flow field segment for further use in flow
* profile entry or removal.
*/
-static enum ice_status
+static int
ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
const struct ice_rss_hash_cfg *cfg)
{
@@ -1323,7 +1322,7 @@ ice_flow_set_rss_seg_info(struct ice_flow_seg_info *segs, u8 seg_cnt,
if (val && !ice_is_pow2(val))
return ICE_ERR_CFG;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1360,18 +1359,18 @@ void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle)
* the VSI from that profile. If the flow profile has no VSIs it will
* be removed.
*/
-enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *p, *t;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u16 vsig;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
if (LIST_EMPTY(&hw->fl_profs[blk]))
- return ICE_SUCCESS;
+ return 0;
ice_acquire_lock(&hw->rss_locks);
LIST_FOR_EACH_ENTRY_SAFE(p, t, &hw->fl_profs[blk], ice_flow_prof,
@@ -1477,7 +1476,7 @@ ice_rem_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
{
enum ice_rss_cfg_hdr_type hdr_type;
@@ -1497,7 +1496,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
r->hash.addl_hdrs == prof->segs[prof->segs_cnt - 1].hdrs &&
r->hash.hdr_type == hdr_type) {
ice_set_bit(vsi_handle, r->vsis);
- return ICE_SUCCESS;
+ return 0;
}
rss_cfg = (struct ice_rss_cfg *)ice_malloc(hw, sizeof(*rss_cfg));
@@ -1512,7 +1511,7 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
LIST_ADD_TAIL(&rss_cfg->l_entry, &hw->rss_list_head);
- return ICE_SUCCESS;
+ return 0;
}
#define ICE_FLOW_PROF_HASH_S 0
@@ -1545,15 +1544,15 @@ ice_add_rss_list(struct ice_hw *hw, u16 vsi_handle, struct ice_flow_prof *prof)
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_add_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_prof *prof = NULL;
struct ice_flow_seg_info *segs;
- enum ice_status status;
u8 segs_cnt;
+ int status;
if (cfg->symm)
return ICE_ERR_PARAM;
@@ -1657,12 +1656,12 @@ exit:
* the input fields to hash on, the flow type and use the VSI number to add
* a flow entry to the profile.
*/
-enum ice_status
+int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
struct ice_rss_hash_cfg local_cfg;
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle) || !cfg ||
cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
@@ -1695,15 +1694,15 @@ ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
*
* Assumption: lock has already been acquired for RSS list
*/
-static enum ice_status
+static int
ice_rem_rss_cfg_sync(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
const enum ice_block blk = ICE_BLK_RSS;
struct ice_flow_seg_info *segs;
struct ice_flow_prof *prof;
- enum ice_status status;
u8 segs_cnt;
+ int status;
segs_cnt = (cfg->hdr_type == ICE_RSS_OUTER_HEADERS) ?
ICE_FLOW_SEG_SINGLE :
@@ -1755,12 +1754,12 @@ out:
* removed. Calls are made to underlying flow apis which will in
* turn build or update buffers for RSS XLT1 section.
*/
-enum ice_status
+int
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg)
{
struct ice_rss_hash_cfg local_cfg;
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle) || !cfg ||
cfg->hdr_type > ICE_RSS_ANY_HEADERS ||
@@ -1827,11 +1826,10 @@ ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
* message, convert it to ICE-compatible values, and configure RSS flow
* profiles.
*/
-enum ice_status
-ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
+int ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
{
- enum ice_status status = ICE_SUCCESS;
struct ice_rss_hash_cfg hcfg;
+ int status = 0;
u64 hash_flds;
if (avf_hash == ICE_AVF_FLOW_FIELD_INVALID ||
@@ -1920,10 +1918,10 @@ ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 avf_hash)
* @hw: pointer to the hardware structure
* @vsi_handle: software VSI handle
*/
-enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle)
{
- enum ice_status status = ICE_SUCCESS;
struct ice_rss_cfg *r;
+ int status = 0;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
diff --git a/sys/dev/ice/ice_flow.h b/sys/dev/ice/ice_flow.h
index 31c369c144e0..45952245d533 100644
--- a/sys/dev/ice/ice_flow.h
+++ b/sys/dev/ice/ice_flow.h
@@ -330,24 +330,24 @@ struct ice_flow_action {
u64
ice_flow_find_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir,
struct ice_flow_seg_info *segs, u8 segs_cnt);
-enum ice_status
+int
ice_flow_assoc_vsig_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi_handle,
u16 vsig);
-enum ice_status
+int
ice_flow_get_hw_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
u8 *hw_prof);
void
ice_flow_set_fld_prefix(struct ice_flow_seg_info *seg, enum ice_flow_field fld,
u16 val_loc, u16 prefix_loc, u8 prefix_sz);
void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
+int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+int
ice_add_avf_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u64 hashed_flds);
-enum ice_status ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
+int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle);
+int
ice_add_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg);
-enum ice_status
+int
ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle,
const struct ice_rss_hash_cfg *cfg);
u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs);
diff --git a/sys/dev/ice/ice_fw_logging.c b/sys/dev/ice/ice_fw_logging.c
index a8caf65aca6d..16a9ab6823bf 100644
--- a/sys/dev/ice/ice_fw_logging.c
+++ b/sys/dev/ice/ice_fw_logging.c
@@ -48,7 +48,7 @@ SDT_PROVIDER_DEFINE(ice_fwlog);
/*
* SDT DTrace probe fired when a firmware log message is received over the
- * AdminQ. It passes the buffer of the firwmare log message along with its
+ * AdminQ. It passes the buffer of the firmware log message along with its
* length in bytes to the DTrace framework.
*/
SDT_PROBE_DEFINE2(ice_fwlog, , , message, "uint8_t *", "int");
@@ -79,7 +79,7 @@ static int ice_sysctl_fwlog_module_log_severity(SYSCTL_HANDLER_ARGS);
static int
ice_reconfig_fw_log(struct ice_softc *sc, struct ice_fwlog_cfg *cfg)
{
- enum ice_status status;
+ int status;
ice_fwlog_init(&sc->hw, cfg);
@@ -223,7 +223,7 @@ ice_sysctl_fwlog_register(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_fwlog_cfg *cfg = &sc->hw.fwlog_cfg;
- enum ice_status status;
+ int status;
int error;
u8 enabled;
@@ -359,23 +359,6 @@ ice_add_fw_logging_tunables(struct ice_softc *sc, struct sysctl_oid *parent)
"Firmware Logging");
fwlog_list = SYSCTL_CHILDREN(fwlog_node);
- cfg->log_resolution = 10;
- SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "log_resolution",
- ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
- 0, ice_sysctl_fwlog_log_resolution,
- "CU", ICE_SYSCTL_HELP_FWLOG_LOG_RESOLUTION);
-
- cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA;
- SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "arq_en",
- ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
- ICE_FWLOG_OPTION_ARQ_ENA, ice_sysctl_fwlog_set_cfg_options,
- "CU", ICE_SYSCTL_HELP_FWLOG_ARQ_ENA);
-
- SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "uart_en",
- ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
- ICE_FWLOG_OPTION_UART_ENA, ice_sysctl_fwlog_set_cfg_options,
- "CU", ICE_SYSCTL_HELP_FWLOG_UART_ENA);
-
SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "on_load",
ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
ICE_FWLOG_OPTION_REGISTER_ON_INIT, ice_sysctl_fwlog_set_cfg_options,
@@ -386,23 +369,43 @@ ice_add_fw_logging_tunables(struct ice_softc *sc, struct sysctl_oid *parent)
0, ice_sysctl_fwlog_register,
"CU", ICE_SYSCTL_HELP_FWLOG_REGISTER);
- module_node = SYSCTL_ADD_NODE(ctx, fwlog_list, OID_AUTO, "severity",
- ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL,
- "Level of log output");
-
- module_list = SYSCTL_CHILDREN(module_node);
-
- for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
- /* Setup some defaults */
- cfg->module_entries[i].module_id = i;
- cfg->module_entries[i].log_level = ICE_FWLOG_LEVEL_NONE;
- module = (enum ice_aqc_fw_logging_mod)i;
+ hw->pf_id = ice_get_pf_id(hw);
+ if (hw->pf_id == 0) {
+ module_node = SYSCTL_ADD_NODE(ctx, fwlog_list, OID_AUTO, "severity",
+ ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL,
+ "Level of log output");
+
+ module_list = SYSCTL_CHILDREN(module_node);
+
+ for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
+ /* Setup some defaults */
+ cfg->module_entries[i].module_id = i;
+ cfg->module_entries[i].log_level = ICE_FWLOG_LEVEL_NONE;
+ module = (enum ice_aqc_fw_logging_mod)i;
+
+ SYSCTL_ADD_PROC(ctx, module_list,
+ OID_AUTO, ice_fw_module_str(module),
+ ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RWTUN, sc,
+ module, ice_sysctl_fwlog_module_log_severity,
+ "A", ICE_SYSCTL_HELP_FWLOG_MODULE_SEVERITY);
+ }
- SYSCTL_ADD_PROC(ctx, module_list,
- OID_AUTO, ice_fw_module_str(module),
- ICE_CTLFLAG_DEBUG | CTLTYPE_STRING | CTLFLAG_RWTUN, sc,
- module, ice_sysctl_fwlog_module_log_severity,
- "A", ICE_SYSCTL_HELP_FWLOG_MODULE_SEVERITY);
+ cfg->log_resolution = 10;
+ SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "log_resolution",
+ ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
+ 0, ice_sysctl_fwlog_log_resolution,
+ "CU", ICE_SYSCTL_HELP_FWLOG_LOG_RESOLUTION);
+
+ cfg->options |= ICE_FWLOG_OPTION_ARQ_ENA;
+ SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "arq_en",
+ ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
+ ICE_FWLOG_OPTION_ARQ_ENA, ice_sysctl_fwlog_set_cfg_options,
+ "CU", ICE_SYSCTL_HELP_FWLOG_ARQ_ENA);
+
+ SYSCTL_ADD_PROC(ctx, fwlog_list, OID_AUTO, "uart_en",
+ ICE_CTLFLAG_DEBUG | CTLTYPE_U8 | CTLFLAG_RWTUN, sc,
+ ICE_FWLOG_OPTION_UART_ENA, ice_sysctl_fwlog_set_cfg_options,
+ "CU", ICE_SYSCTL_HELP_FWLOG_UART_ENA);
}
}
diff --git a/sys/dev/ice/ice_fwlog.c b/sys/dev/ice/ice_fwlog.c
index c3c6d9101627..07ca94ee003d 100644
--- a/sys/dev/ice/ice_fwlog.c
+++ b/sys/dev/ice/ice_fwlog.c
@@ -29,6 +29,7 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include "ice_osdep.h"
#include "ice_common.h"
#include "ice_fwlog.h"
@@ -120,7 +121,7 @@ static bool valid_cfg(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
* ice_init_hw(). Firmware logging will be configured based on these settings
* and also the PF will be registered on init.
*/
-enum ice_status
+int
ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
if (!valid_cfg(hw, cfg))
@@ -128,7 +129,7 @@ ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
cache_cfg(hw, cfg);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -139,14 +140,14 @@ ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
* @options: options from ice_fwlog_cfg->options structure
* @log_resolution: logging resolution
*/
-static enum ice_status
+static int
ice_aq_fwlog_set(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
u16 num_entries, u16 options, u16 log_resolution)
{
struct ice_aqc_fw_log_cfg_resp *fw_modules;
struct ice_aqc_fw_log *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
u16 i;
fw_modules = (struct ice_aqc_fw_log_cfg_resp *)
@@ -208,10 +209,10 @@ bool ice_fwlog_supported(struct ice_hw *hw)
* ice_fwlog_register. Note, that ice_fwlog_register does not need to be called
* for init.
*/
-enum ice_status
+int
ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
- enum ice_status status;
+ int status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
@@ -268,13 +269,13 @@ update_cached_entries(struct ice_hw *hw, struct ice_fwlog_module_entry *entries,
* Only the entries passed in will be affected. All other firmware logging
* settings will be unaffected.
*/
-enum ice_status
+int
ice_fwlog_update_modules(struct ice_hw *hw,
struct ice_fwlog_module_entry *entries,
u16 num_entries)
{
struct ice_fwlog_cfg *cfg;
- enum ice_status status;
+ int status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
@@ -305,7 +306,7 @@ status_out:
* @hw: pointer to the HW structure
* @reg: true to register and false to unregister
*/
-static enum ice_status ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
+static int ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
{
struct ice_aq_desc desc;
@@ -324,9 +325,9 @@ static enum ice_status ice_aq_fwlog_register(struct ice_hw *hw, bool reg)
* After this call the PF will start to receive firmware logging based on the
* configuration set in ice_fwlog_set.
*/
-enum ice_status ice_fwlog_register(struct ice_hw *hw)
+int ice_fwlog_register(struct ice_hw *hw)
{
- enum ice_status status;
+ int status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
@@ -344,9 +345,9 @@ enum ice_status ice_fwlog_register(struct ice_hw *hw)
* ice_fwlog_unregister - Unregister the PF from firmware logging
* @hw: pointer to the HW structure
*/
-enum ice_status ice_fwlog_unregister(struct ice_hw *hw)
+int ice_fwlog_unregister(struct ice_hw *hw)
{
- enum ice_status status;
+ int status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
@@ -365,14 +366,14 @@ enum ice_status ice_fwlog_unregister(struct ice_hw *hw)
* @hw: pointer to the HW structure
* @cfg: firmware logging configuration to populate
*/
-static enum ice_status
+static int
ice_aq_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
struct ice_aqc_fw_log_cfg_resp *fw_modules;
struct ice_aqc_fw_log *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
u16 i, module_id_cnt;
+ int status;
void *buf;
ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
@@ -438,7 +439,7 @@ status_out:
void ice_fwlog_set_support_ena(struct ice_hw *hw)
{
struct ice_fwlog_cfg *cfg;
- enum ice_status status;
+ int status;
hw->fwlog_support_ena = false;
@@ -465,10 +466,10 @@ void ice_fwlog_set_support_ena(struct ice_hw *hw)
* @hw: pointer to the HW structure
* @cfg: config to populate based on current firmware logging settings
*/
-enum ice_status
+int
ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
{
- enum ice_status status;
+ int status;
if (!ice_fwlog_supported(hw))
return ICE_ERR_NOT_SUPPORTED;
@@ -482,7 +483,7 @@ ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg)
cache_cfg(hw, cfg);
- return ICE_SUCCESS;
+ return 0;
}
/**
diff --git a/sys/dev/ice/ice_fwlog.h b/sys/dev/ice/ice_fwlog.h
index 41e41de1e670..4cf5b678049d 100644
--- a/sys/dev/ice/ice_fwlog.h
+++ b/sys/dev/ice/ice_fwlog.h
@@ -76,15 +76,15 @@ struct ice_fwlog_cfg {
void ice_fwlog_set_support_ena(struct ice_hw *hw);
bool ice_fwlog_supported(struct ice_hw *hw);
-enum ice_status ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-enum ice_status ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-enum ice_status ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
-enum ice_status
+int ice_fwlog_init(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
+int ice_fwlog_set(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
+int ice_fwlog_get(struct ice_hw *hw, struct ice_fwlog_cfg *cfg);
+int
ice_fwlog_update_modules(struct ice_hw *hw,
struct ice_fwlog_module_entry *entries,
u16 num_entries);
-enum ice_status ice_fwlog_register(struct ice_hw *hw);
-enum ice_status ice_fwlog_unregister(struct ice_hw *hw);
+int ice_fwlog_register(struct ice_hw *hw);
+int ice_fwlog_unregister(struct ice_hw *hw);
void
ice_fwlog_event_dump(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
#endif /* _ICE_FWLOG_H_ */
diff --git a/sys/dev/ice/ice_hw_autogen.h b/sys/dev/ice/ice_hw_autogen.h
index 8e59ebc76835..3f2778d91a4b 100644
--- a/sys/dev/ice/ice_hw_autogen.h
+++ b/sys/dev/ice/ice_hw_autogen.h
@@ -34,6 +34,26 @@
#ifndef _ICE_HW_AUTOGEN_H_
#define _ICE_HW_AUTOGEN_H_
+#define PRTMAC_CTL_TX_PAUSE_ENABLE_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_TX_PAUSE_ENABLE : E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE)
+#define PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_S : E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_S)
+#define PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_M : E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_M)
+#define PRTMAC_CTL_RX_PAUSE_ENABLE_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_RX_PAUSE_ENABLE : E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE)
+#define PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_S : E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_S)
+#define PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_M : E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_M)
+#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE(_i) (0x000FD000 + ((_i) * 64)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_MAX_INDEX 7
+#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_START_S 0
+#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_START_M MAKEMASK(0x3F, 0)
+#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_END_S 6
+#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_END_M MAKEMASK(0x3F, 6)
+#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_VM_VF_TYPE_S 12
+#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_VM_VF_TYPE_M MAKEMASK(0x3, 12)
+#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_VM_VF_NUM_S 14
+#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_VM_VF_NUM_M MAKEMASK(0x3FF, 14)
+#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_PF_NUM_S 24
+#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_PF_NUM_M MAKEMASK(0x7, 24)
+#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_ENABLE_S 31
+#define E830_GLTCLAN_TSYN_REG_RANGE_ENFORCE_ENABLE_M BIT(31)
#define GL_HIDA(_i) (0x00082000 + ((_i) * 4))
#define GL_HIBA(_i) (0x00081000 + ((_i) * 4))
#define GL_HICR 0x00082040
@@ -43,6 +63,7 @@
#define GLNVM_FLA 0x000B6108
#define GL_HIDA_MAX_INDEX 15
#define GL_HIBA_MAX_INDEX 1023
+#define GL_MNG_FWSM_FW_LOADING_M BIT(30)
#define GL_RDPU_CNTRL 0x00052054 /* Reset Source: CORER */
#define GL_RDPU_CNTRL_RX_PAD_EN_S 0
#define GL_RDPU_CNTRL_RX_PAD_EN_M BIT(0)
@@ -57,9 +78,15 @@
#define GL_RDPU_CNTRL_PE_ACK_REQ_PM_TH_S 10
#define GL_RDPU_CNTRL_PE_ACK_REQ_PM_TH_M MAKEMASK(0x3F, 10)
#define GL_RDPU_CNTRL_REQ_WB_PM_TH_S 16
-#define GL_RDPU_CNTRL_REQ_WB_PM_TH_M MAKEMASK(0x1F, 16)
-#define GL_RDPU_CNTRL_ECO_S 21
-#define GL_RDPU_CNTRL_ECO_M MAKEMASK(0x7FF, 21)
+#define GL_RDPU_CNTRL_REQ_WB_PM_TH_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_RDPU_CNTRL_REQ_WB_PM_TH_M : E800_GL_RDPU_CNTRL_REQ_WB_PM_TH_M)
+#define E800_GL_RDPU_CNTRL_REQ_WB_PM_TH_M MAKEMASK(0x1F, 16)
+#define E830_GL_RDPU_CNTRL_REQ_WB_PM_TH_M MAKEMASK(0x3F, 16)
+#define GL_RDPU_CNTRL_ECO_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_RDPU_CNTRL_ECO_S : E800_GL_RDPU_CNTRL_ECO_S)
+#define E800_GL_RDPU_CNTRL_ECO_S 21
+#define E830_GL_RDPU_CNTRL_ECO_S 23
+#define GL_RDPU_CNTRL_ECO_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_RDPU_CNTRL_ECO_M : E800_GL_RDPU_CNTRL_ECO_M)
+#define E800_GL_RDPU_CNTRL_ECO_M MAKEMASK(0x7FF, 21)
+#define E830_GL_RDPU_CNTRL_ECO_M MAKEMASK(0x1FF, 23)
#define MSIX_PBA(_i) (0x00008000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: FLR */
#define MSIX_PBA_MAX_INDEX 2
#define MSIX_PBA_PENBIT_S 0
@@ -456,9 +483,11 @@
#define PF0INT_OICR_CPM_PAGE_QUEUE_S 1
#define PF0INT_OICR_CPM_PAGE_QUEUE_M BIT(1)
#define PF0INT_OICR_CPM_PAGE_RSV1_S 2
-#define PF0INT_OICR_CPM_PAGE_RSV1_M MAKEMASK(0xFF, 2)
-#define PF0INT_OICR_CPM_PAGE_HH_COMP_S 10
-#define PF0INT_OICR_CPM_PAGE_HH_COMP_M BIT(10)
+#define PF0INT_OICR_CPM_PAGE_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_CPM_PAGE_RSV1_M : E800_PF0INT_OICR_CPM_PAGE_RSV1_M)
+#define E800_PF0INT_OICR_CPM_PAGE_RSV1_M MAKEMASK(0xFF, 2)
+#define E830_PF0INT_OICR_CPM_PAGE_RSV1_M MAKEMASK(0x3F, 2)
+#define E800_PF0INT_OICR_CPM_PAGE_HH_COMP_S 10
+#define E800_PF0INT_OICR_CPM_PAGE_HH_COMP_M BIT(10)
#define PF0INT_OICR_CPM_PAGE_TSYN_TX_S 11
#define PF0INT_OICR_CPM_PAGE_TSYN_TX_M BIT(11)
#define PF0INT_OICR_CPM_PAGE_TSYN_EVNT_S 12
@@ -520,9 +549,11 @@
#define PF0INT_OICR_HLP_PAGE_QUEUE_S 1
#define PF0INT_OICR_HLP_PAGE_QUEUE_M BIT(1)
#define PF0INT_OICR_HLP_PAGE_RSV1_S 2
-#define PF0INT_OICR_HLP_PAGE_RSV1_M MAKEMASK(0xFF, 2)
-#define PF0INT_OICR_HLP_PAGE_HH_COMP_S 10
-#define PF0INT_OICR_HLP_PAGE_HH_COMP_M BIT(10)
+#define PF0INT_OICR_HLP_PAGE_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_HLP_PAGE_RSV1_M : E800_PF0INT_OICR_HLP_PAGE_RSV1_M)
+#define E800_PF0INT_OICR_HLP_PAGE_RSV1_M MAKEMASK(0xFF, 2)
+#define E830_PF0INT_OICR_HLP_PAGE_RSV1_M MAKEMASK(0x3F, 2)
+#define E800_PF0INT_OICR_HLP_PAGE_HH_COMP_S 10
+#define E800_PF0INT_OICR_HLP_PAGE_HH_COMP_M BIT(10)
#define PF0INT_OICR_HLP_PAGE_TSYN_TX_S 11
#define PF0INT_OICR_HLP_PAGE_TSYN_TX_M BIT(11)
#define PF0INT_OICR_HLP_PAGE_TSYN_EVNT_S 12
@@ -569,9 +600,11 @@
#define PF0INT_OICR_PSM_PAGE_QUEUE_S 1
#define PF0INT_OICR_PSM_PAGE_QUEUE_M BIT(1)
#define PF0INT_OICR_PSM_PAGE_RSV1_S 2
-#define PF0INT_OICR_PSM_PAGE_RSV1_M MAKEMASK(0xFF, 2)
-#define PF0INT_OICR_PSM_PAGE_HH_COMP_S 10
-#define PF0INT_OICR_PSM_PAGE_HH_COMP_M BIT(10)
+#define PF0INT_OICR_PSM_PAGE_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_PSM_PAGE_RSV1_M : E800_PF0INT_OICR_PSM_PAGE_RSV1_M)
+#define E800_PF0INT_OICR_PSM_PAGE_RSV1_M MAKEMASK(0xFF, 2)
+#define E830_PF0INT_OICR_PSM_PAGE_RSV1_M MAKEMASK(0x3F, 2)
+#define E800_PF0INT_OICR_PSM_PAGE_HH_COMP_S 10
+#define E800_PF0INT_OICR_PSM_PAGE_HH_COMP_M BIT(10)
#define PF0INT_OICR_PSM_PAGE_TSYN_TX_S 11
#define PF0INT_OICR_PSM_PAGE_TSYN_TX_M BIT(11)
#define PF0INT_OICR_PSM_PAGE_TSYN_EVNT_S 12
@@ -620,10 +653,10 @@
#define QTX_COMM_DBELL_PAGE_MAX_INDEX 16383
#define QTX_COMM_DBELL_PAGE_QTX_COMM_DBELL_S 0
#define QTX_COMM_DBELL_PAGE_QTX_COMM_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
-#define QTX_COMM_DBLQ_DBELL_PAGE(_DBLQ) (0x02F00000 + ((_DBLQ) * 4096)) /* _i=0...255 */ /* Reset Source: CORER */
-#define QTX_COMM_DBLQ_DBELL_PAGE_MAX_INDEX 255
-#define QTX_COMM_DBLQ_DBELL_PAGE_TAIL_S 0
-#define QTX_COMM_DBLQ_DBELL_PAGE_TAIL_M MAKEMASK(0x1FFF, 0)
+#define E800_QTX_COMM_DBLQ_DBELL_PAGE(_DBLQ) (0x02F00000 + ((_DBLQ) * 4096)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E800_QTX_COMM_DBLQ_DBELL_PAGE_MAX_INDEX 255
+#define E800_QTX_COMM_DBLQ_DBELL_PAGE_TAIL_S 0
+#define E800_QTX_COMM_DBLQ_DBELL_PAGE_TAIL_M MAKEMASK(0x1FFF, 0)
#define VSI_MBX_ARQBAH(_VSI) (0x02000018 + ((_VSI) * 4096)) /* _i=0...767 */ /* Reset Source: CORER */
#define VSI_MBX_ARQBAH_MAX_INDEX 767
#define VSI_MBX_ARQBAH_ARQBAH_S 0
@@ -2026,18 +2059,18 @@
#define GLTPB_WB_RL_PERIOD_M MAKEMASK(0xFFFF, 0)
#define GLTPB_WB_RL_EN_S 16
#define GLTPB_WB_RL_EN_M BIT(16)
-#define PRTDCB_FCCFG 0x001E4640 /* Reset Source: GLOBR */
-#define PRTDCB_FCCFG_TFCE_S 3
-#define PRTDCB_FCCFG_TFCE_M MAKEMASK(0x3, 3)
-#define PRTDCB_FCRTV 0x001E4600 /* Reset Source: GLOBR */
-#define PRTDCB_FCRTV_FC_REFRESH_TH_S 0
-#define PRTDCB_FCRTV_FC_REFRESH_TH_M MAKEMASK(0xFFFF, 0)
-#define PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: GLOBR */
-#define PRTDCB_FCTTVN_MAX_INDEX 3
-#define PRTDCB_FCTTVN_TTV_2N_S 0
-#define PRTDCB_FCTTVN_TTV_2N_M MAKEMASK(0xFFFF, 0)
-#define PRTDCB_FCTTVN_TTV_2N_P1_S 16
-#define PRTDCB_FCTTVN_TTV_2N_P1_M MAKEMASK(0xFFFF, 16)
+#define E800_PRTDCB_FCCFG 0x001E4640 /* Reset Source: GLOBR */
+#define E800_PRTDCB_FCCFG_TFCE_S 3
+#define E800_PRTDCB_FCCFG_TFCE_M MAKEMASK(0x3, 3)
+#define E800_PRTDCB_FCRTV 0x001E4600 /* Reset Source: GLOBR */
+#define E800_PRTDCB_FCRTV_FC_REFRESH_TH_S 0
+#define E800_PRTDCB_FCRTV_FC_REFRESH_TH_M MAKEMASK(0xFFFF, 0)
+#define E800_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: GLOBR */
+#define E800_PRTDCB_FCTTVN_MAX_INDEX 3
+#define E800_PRTDCB_FCTTVN_TTV_2N_S 0
+#define E800_PRTDCB_FCTTVN_TTV_2N_M MAKEMASK(0xFFFF, 0)
+#define E800_PRTDCB_FCTTVN_TTV_2N_P1_S 16
+#define E800_PRTDCB_FCTTVN_TTV_2N_P1_M MAKEMASK(0xFFFF, 16)
#define PRTDCB_GENC 0x00083000 /* Reset Source: CORER */
#define PRTDCB_GENC_NUMTC_S 2
#define PRTDCB_GENC_NUMTC_M MAKEMASK(0xF, 2)
@@ -2403,214 +2436,222 @@
#define TPB_WB_RL_TC_STAT_MAX_INDEX 31
#define TPB_WB_RL_TC_STAT_BUCKET_S 0
#define TPB_WB_RL_TC_STAT_BUCKET_M MAKEMASK(0x1FFFF, 0)
-#define GL_ACLEXT_CDMD_L1SEL(_i) (0x00210054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_CDMD_L1SEL_MAX_INDEX 2
-#define GL_ACLEXT_CDMD_L1SEL_RX_SEL_S 0
-#define GL_ACLEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
-#define GL_ACLEXT_CDMD_L1SEL_TX_SEL_S 8
-#define GL_ACLEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
-#define GL_ACLEXT_CDMD_L1SEL_AUX0_SEL_S 16
-#define GL_ACLEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
-#define GL_ACLEXT_CDMD_L1SEL_AUX1_SEL_S 24
-#define GL_ACLEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
-#define GL_ACLEXT_CDMD_L1SEL_BIDIR_ENA_S 30
-#define GL_ACLEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30)
-#define GL_ACLEXT_CTLTBL_L2ADDR(_i) (0x00210084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_CTLTBL_L2ADDR_MAX_INDEX 2
-#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_OFF_S 0
-#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_OFF_M MAKEMASK(0x7, 0)
-#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_IDX_S 8
-#define GL_ACLEXT_CTLTBL_L2ADDR_LINE_IDX_M MAKEMASK(0x7, 8)
-#define GL_ACLEXT_CTLTBL_L2ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_CTLTBL_L2ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_CTLTBL_L2DATA(_i) (0x00210090 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_CTLTBL_L2DATA_MAX_INDEX 2
-#define GL_ACLEXT_CTLTBL_L2DATA_DATA_S 0
-#define GL_ACLEXT_CTLTBL_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_ACLEXT_DFLT_L2PRFL(_i) (0x00210138 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_DFLT_L2PRFL_MAX_INDEX 2
-#define GL_ACLEXT_DFLT_L2PRFL_DFLT_PRFL_S 0
-#define GL_ACLEXT_DFLT_L2PRFL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0)
+#define E800_GL_ACLEXT_CDMD_L1SEL(_i) (0x00210054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_CDMD_L1SEL_MAX_INDEX 2
+#define E800_GL_ACLEXT_CDMD_L1SEL_RX_SEL_S 0
+#define E800_GL_ACLEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
+#define E800_GL_ACLEXT_CDMD_L1SEL_TX_SEL_S 8
+#define E800_GL_ACLEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
+#define E800_GL_ACLEXT_CDMD_L1SEL_AUX0_SEL_S 16
+#define E800_GL_ACLEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
+#define E800_GL_ACLEXT_CDMD_L1SEL_AUX1_SEL_S 24
+#define E800_GL_ACLEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
+#define E800_GL_ACLEXT_CDMD_L1SEL_BIDIR_ENA_S 30
+#define E800_GL_ACLEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30)
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR(_i) (0x00210084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR_LINE_OFF_S 0
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR_LINE_OFF_M MAKEMASK(0x7, 0)
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR_LINE_IDX_S 8
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR_LINE_IDX_M MAKEMASK(0x7, 8)
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_CTLTBL_L2ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_CTLTBL_L2DATA(_i) (0x00210090 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_CTLTBL_L2DATA_MAX_INDEX 2
+#define E800_GL_ACLEXT_CTLTBL_L2DATA_DATA_S 0
+#define E800_GL_ACLEXT_CTLTBL_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_DFLT_L2PRFL(_i) (0x00210138 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_DFLT_L2PRFL_MAX_INDEX 2
+#define E800_GL_ACLEXT_DFLT_L2PRFL_DFLT_PRFL_S 0
+#define E800_GL_ACLEXT_DFLT_L2PRFL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0)
#define GL_ACLEXT_DFLT_L2PRFL_ACL(_i) (0x00393800 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_ACLEXT_DFLT_L2PRFL_ACL_MAX_INDEX 2
#define GL_ACLEXT_DFLT_L2PRFL_ACL_DFLT_PRFL_S 0
#define GL_ACLEXT_DFLT_L2PRFL_ACL_DFLT_PRFL_M MAKEMASK(0xFFFF, 0)
-#define GL_ACLEXT_FLGS_L1SEL0_1(_i) (0x0021006C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_FLGS_L1SEL0_1_MAX_INDEX 2
-#define GL_ACLEXT_FLGS_L1SEL0_1_FLS0_S 0
-#define GL_ACLEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
-#define GL_ACLEXT_FLGS_L1SEL0_1_FLS1_S 16
-#define GL_ACLEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
-#define GL_ACLEXT_FLGS_L1SEL2_3(_i) (0x00210078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_FLGS_L1SEL2_3_MAX_INDEX 2
-#define GL_ACLEXT_FLGS_L1SEL2_3_FLS2_S 0
-#define GL_ACLEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
-#define GL_ACLEXT_FLGS_L1SEL2_3_FLS3_S 16
-#define GL_ACLEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
-#define GL_ACLEXT_FLGS_L1TBL(_i) (0x00210060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_FLGS_L1TBL_MAX_INDEX 2
-#define GL_ACLEXT_FLGS_L1TBL_LSB_S 0
-#define GL_ACLEXT_FLGS_L1TBL_LSB_M MAKEMASK(0xFFFF, 0)
-#define GL_ACLEXT_FLGS_L1TBL_MSB_S 16
-#define GL_ACLEXT_FLGS_L1TBL_MSB_M MAKEMASK(0xFFFF, 16)
-#define GL_ACLEXT_FORCE_L1CDID(_i) (0x00210018 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_FORCE_L1CDID_MAX_INDEX 2
-#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_S 0
-#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0)
-#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
-#define GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
-#define GL_ACLEXT_FORCE_PID(_i) (0x00210000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_FORCE_PID_MAX_INDEX 2
-#define GL_ACLEXT_FORCE_PID_STATIC_PID_S 0
-#define GL_ACLEXT_FORCE_PID_STATIC_PID_M MAKEMASK(0xFFFF, 0)
-#define GL_ACLEXT_FORCE_PID_STATIC_PID_EN_S 31
-#define GL_ACLEXT_FORCE_PID_STATIC_PID_EN_M BIT(31)
-#define GL_ACLEXT_K2N_L2ADDR(_i) (0x00210144 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_K2N_L2ADDR_MAX_INDEX 2
-#define GL_ACLEXT_K2N_L2ADDR_LINE_IDX_S 0
-#define GL_ACLEXT_K2N_L2ADDR_LINE_IDX_M MAKEMASK(0x7F, 0)
-#define GL_ACLEXT_K2N_L2ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_K2N_L2ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_K2N_L2DATA(_i) (0x00210150 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_K2N_L2DATA_MAX_INDEX 2
-#define GL_ACLEXT_K2N_L2DATA_DATA0_S 0
-#define GL_ACLEXT_K2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0)
-#define GL_ACLEXT_K2N_L2DATA_DATA1_S 8
-#define GL_ACLEXT_K2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8)
-#define GL_ACLEXT_K2N_L2DATA_DATA2_S 16
-#define GL_ACLEXT_K2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16)
-#define GL_ACLEXT_K2N_L2DATA_DATA3_S 24
-#define GL_ACLEXT_K2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24)
-#define GL_ACLEXT_L2_PMASK0(_i) (0x002100FC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_L2_PMASK0_MAX_INDEX 2
-#define GL_ACLEXT_L2_PMASK0_BITMASK_S 0
-#define GL_ACLEXT_L2_PMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_ACLEXT_L2_PMASK1(_i) (0x00210108 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_L2_PMASK1_MAX_INDEX 2
-#define GL_ACLEXT_L2_PMASK1_BITMASK_S 0
-#define GL_ACLEXT_L2_PMASK1_BITMASK_M MAKEMASK(0xFFFF, 0)
-#define GL_ACLEXT_L2_TMASK0(_i) (0x00210498 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_L2_TMASK0_MAX_INDEX 2
-#define GL_ACLEXT_L2_TMASK0_BITMASK_S 0
-#define GL_ACLEXT_L2_TMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_ACLEXT_L2_TMASK1(_i) (0x002104A4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_L2_TMASK1_MAX_INDEX 2
-#define GL_ACLEXT_L2_TMASK1_BITMASK_S 0
-#define GL_ACLEXT_L2_TMASK1_BITMASK_M MAKEMASK(0xFF, 0)
-#define GL_ACLEXT_L2BMP0_3(_i) (0x002100A8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_L2BMP0_3_MAX_INDEX 2
-#define GL_ACLEXT_L2BMP0_3_BMP0_S 0
-#define GL_ACLEXT_L2BMP0_3_BMP0_M MAKEMASK(0xFF, 0)
-#define GL_ACLEXT_L2BMP0_3_BMP1_S 8
-#define GL_ACLEXT_L2BMP0_3_BMP1_M MAKEMASK(0xFF, 8)
-#define GL_ACLEXT_L2BMP0_3_BMP2_S 16
-#define GL_ACLEXT_L2BMP0_3_BMP2_M MAKEMASK(0xFF, 16)
-#define GL_ACLEXT_L2BMP0_3_BMP3_S 24
-#define GL_ACLEXT_L2BMP0_3_BMP3_M MAKEMASK(0xFF, 24)
-#define GL_ACLEXT_L2BMP4_7(_i) (0x002100B4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_L2BMP4_7_MAX_INDEX 2
-#define GL_ACLEXT_L2BMP4_7_BMP4_S 0
-#define GL_ACLEXT_L2BMP4_7_BMP4_M MAKEMASK(0xFF, 0)
-#define GL_ACLEXT_L2BMP4_7_BMP5_S 8
-#define GL_ACLEXT_L2BMP4_7_BMP5_M MAKEMASK(0xFF, 8)
-#define GL_ACLEXT_L2BMP4_7_BMP6_S 16
-#define GL_ACLEXT_L2BMP4_7_BMP6_M MAKEMASK(0xFF, 16)
-#define GL_ACLEXT_L2BMP4_7_BMP7_S 24
-#define GL_ACLEXT_L2BMP4_7_BMP7_M MAKEMASK(0xFF, 24)
-#define GL_ACLEXT_L2PRTMOD(_i) (0x0021009C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_L2PRTMOD_MAX_INDEX 2
-#define GL_ACLEXT_L2PRTMOD_XLT1_S 0
-#define GL_ACLEXT_L2PRTMOD_XLT1_M MAKEMASK(0x3, 0)
-#define GL_ACLEXT_L2PRTMOD_XLT2_S 8
-#define GL_ACLEXT_L2PRTMOD_XLT2_M MAKEMASK(0x3, 8)
-#define GL_ACLEXT_N2N_L2ADDR(_i) (0x0021015C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_N2N_L2ADDR_MAX_INDEX 2
-#define GL_ACLEXT_N2N_L2ADDR_LINE_IDX_S 0
-#define GL_ACLEXT_N2N_L2ADDR_LINE_IDX_M MAKEMASK(0x3F, 0)
-#define GL_ACLEXT_N2N_L2ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_N2N_L2ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_N2N_L2DATA(_i) (0x00210168 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_N2N_L2DATA_MAX_INDEX 2
-#define GL_ACLEXT_N2N_L2DATA_DATA0_S 0
-#define GL_ACLEXT_N2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0)
-#define GL_ACLEXT_N2N_L2DATA_DATA1_S 8
-#define GL_ACLEXT_N2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8)
-#define GL_ACLEXT_N2N_L2DATA_DATA2_S 16
-#define GL_ACLEXT_N2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16)
-#define GL_ACLEXT_N2N_L2DATA_DATA3_S 24
-#define GL_ACLEXT_N2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24)
-#define GL_ACLEXT_P2P_L1ADDR(_i) (0x00210024 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_P2P_L1ADDR_MAX_INDEX 2
-#define GL_ACLEXT_P2P_L1ADDR_LINE_IDX_S 0
-#define GL_ACLEXT_P2P_L1ADDR_LINE_IDX_M BIT(0)
-#define GL_ACLEXT_P2P_L1ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_P2P_L1ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_P2P_L1DATA(_i) (0x00210030 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_P2P_L1DATA_MAX_INDEX 2
-#define GL_ACLEXT_P2P_L1DATA_DATA_S 0
-#define GL_ACLEXT_P2P_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_ACLEXT_PID_L2GKTYPE(_i) (0x002100F0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_PID_L2GKTYPE_MAX_INDEX 2
-#define GL_ACLEXT_PID_L2GKTYPE_PID_GKTYPE_S 0
-#define GL_ACLEXT_PID_L2GKTYPE_PID_GKTYPE_M MAKEMASK(0x3, 0)
-#define GL_ACLEXT_PLVL_SEL(_i) (0x0021000C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_PLVL_SEL_MAX_INDEX 2
-#define GL_ACLEXT_PLVL_SEL_PLVL_SEL_S 0
-#define GL_ACLEXT_PLVL_SEL_PLVL_SEL_M BIT(0)
-#define GL_ACLEXT_TCAM_L2ADDR(_i) (0x00210114 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_TCAM_L2ADDR_MAX_INDEX 2
-#define GL_ACLEXT_TCAM_L2ADDR_LINE_IDX_S 0
-#define GL_ACLEXT_TCAM_L2ADDR_LINE_IDX_M MAKEMASK(0x3FF, 0)
-#define GL_ACLEXT_TCAM_L2ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_TCAM_L2ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_TCAM_L2DATALSB(_i) (0x00210120 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_TCAM_L2DATALSB_MAX_INDEX 2
-#define GL_ACLEXT_TCAM_L2DATALSB_DATALSB_S 0
-#define GL_ACLEXT_TCAM_L2DATALSB_DATALSB_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_ACLEXT_TCAM_L2DATAMSB(_i) (0x0021012C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_TCAM_L2DATAMSB_MAX_INDEX 2
-#define GL_ACLEXT_TCAM_L2DATAMSB_DATAMSB_S 0
-#define GL_ACLEXT_TCAM_L2DATAMSB_DATAMSB_M MAKEMASK(0xFF, 0)
-#define GL_ACLEXT_XLT0_L1ADDR(_i) (0x0021003C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_XLT0_L1ADDR_MAX_INDEX 2
-#define GL_ACLEXT_XLT0_L1ADDR_LINE_IDX_S 0
-#define GL_ACLEXT_XLT0_L1ADDR_LINE_IDX_M MAKEMASK(0xFF, 0)
-#define GL_ACLEXT_XLT0_L1ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_XLT0_L1ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_XLT0_L1DATA(_i) (0x00210048 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_XLT0_L1DATA_MAX_INDEX 2
-#define GL_ACLEXT_XLT0_L1DATA_DATA_S 0
-#define GL_ACLEXT_XLT0_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_ACLEXT_XLT1_L2ADDR(_i) (0x002100C0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_XLT1_L2ADDR_MAX_INDEX 2
-#define GL_ACLEXT_XLT1_L2ADDR_LINE_IDX_S 0
-#define GL_ACLEXT_XLT1_L2ADDR_LINE_IDX_M MAKEMASK(0x7FF, 0)
-#define GL_ACLEXT_XLT1_L2ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_XLT1_L2ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_XLT1_L2DATA(_i) (0x002100CC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_XLT1_L2DATA_MAX_INDEX 2
-#define GL_ACLEXT_XLT1_L2DATA_DATA_S 0
-#define GL_ACLEXT_XLT1_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_ACLEXT_XLT2_L2ADDR(_i) (0x002100D8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_XLT2_L2ADDR_MAX_INDEX 2
-#define GL_ACLEXT_XLT2_L2ADDR_LINE_IDX_S 0
-#define GL_ACLEXT_XLT2_L2ADDR_LINE_IDX_M MAKEMASK(0x1FF, 0)
-#define GL_ACLEXT_XLT2_L2ADDR_AUTO_INC_S 31
-#define GL_ACLEXT_XLT2_L2ADDR_AUTO_INC_M BIT(31)
-#define GL_ACLEXT_XLT2_L2DATA(_i) (0x002100E4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
-#define GL_ACLEXT_XLT2_L2DATA_MAX_INDEX 2
-#define GL_ACLEXT_XLT2_L2DATA_DATA_S 0
-#define GL_ACLEXT_XLT2_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_FLGS_L1SEL0_1(_i) (0x0021006C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_FLGS_L1SEL0_1_MAX_INDEX 2
+#define E800_GL_ACLEXT_FLGS_L1SEL0_1_FLS0_S 0
+#define E800_GL_ACLEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
+#define E800_GL_ACLEXT_FLGS_L1SEL0_1_FLS1_S 16
+#define E800_GL_ACLEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
+#define E800_GL_ACLEXT_FLGS_L1SEL2_3(_i) (0x00210078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_FLGS_L1SEL2_3_MAX_INDEX 2
+#define E800_GL_ACLEXT_FLGS_L1SEL2_3_FLS2_S 0
+#define E800_GL_ACLEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
+#define E800_GL_ACLEXT_FLGS_L1SEL2_3_FLS3_S 16
+#define E800_GL_ACLEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
+#define E800_GL_ACLEXT_FLGS_L1TBL(_i) (0x00210060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_FLGS_L1TBL_MAX_INDEX 2
+#define E800_GL_ACLEXT_FLGS_L1TBL_LSB_S 0
+#define E800_GL_ACLEXT_FLGS_L1TBL_LSB_M MAKEMASK(0xFFFF, 0)
+#define E800_GL_ACLEXT_FLGS_L1TBL_MSB_S 16
+#define E800_GL_ACLEXT_FLGS_L1TBL_MSB_M MAKEMASK(0xFFFF, 16)
+#define E800_GL_ACLEXT_FORCE_L1CDID(_i) (0x00210018 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_FORCE_L1CDID_MAX_INDEX 2
+#define E800_GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_S 0
+#define E800_GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_M MAKEMASK(0xF, 0)
+#define E800_GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_S 31
+#define E800_GL_ACLEXT_FORCE_L1CDID_STATIC_CDID_EN_M BIT(31)
+#define E800_GL_ACLEXT_FORCE_PID(_i) (0x00210000 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_FORCE_PID_MAX_INDEX 2
+#define E800_GL_ACLEXT_FORCE_PID_STATIC_PID_S 0
+#define E800_GL_ACLEXT_FORCE_PID_STATIC_PID_M MAKEMASK(0xFFFF, 0)
+#define E800_GL_ACLEXT_FORCE_PID_STATIC_PID_EN_S 31
+#define E800_GL_ACLEXT_FORCE_PID_STATIC_PID_EN_M BIT(31)
+#define E800_GL_ACLEXT_K2N_L2ADDR(_i) (0x00210144 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_K2N_L2ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_K2N_L2ADDR_LINE_IDX_S 0
+#define E800_GL_ACLEXT_K2N_L2ADDR_LINE_IDX_M MAKEMASK(0x7F, 0)
+#define E800_GL_ACLEXT_K2N_L2ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_K2N_L2ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_K2N_L2DATA(_i) (0x00210150 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_K2N_L2DATA_MAX_INDEX 2
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA0_S 0
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0)
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA1_S 8
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8)
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA2_S 16
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16)
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA3_S 24
+#define E800_GL_ACLEXT_K2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24)
+#define E800_GL_ACLEXT_L2_PMASK0(_i) (0x002100FC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_L2_PMASK0_MAX_INDEX 2
+#define E800_GL_ACLEXT_L2_PMASK0_BITMASK_S 0
+#define E800_GL_ACLEXT_L2_PMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_L2_PMASK1(_i) (0x00210108 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_L2_PMASK1_MAX_INDEX 2
+#define E800_GL_ACLEXT_L2_PMASK1_BITMASK_S 0
+#define E800_GL_ACLEXT_L2_PMASK1_BITMASK_M MAKEMASK(0xFFFF, 0)
+#define E800_GL_ACLEXT_L2_TMASK0(_i) (0x00210498 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_L2_TMASK0_MAX_INDEX 2
+#define E800_GL_ACLEXT_L2_TMASK0_BITMASK_S 0
+#define E800_GL_ACLEXT_L2_TMASK0_BITMASK_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_L2_TMASK1(_i) (0x002104A4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_L2_TMASK1_MAX_INDEX 2
+#define E800_GL_ACLEXT_L2_TMASK1_BITMASK_S 0
+#define E800_GL_ACLEXT_L2_TMASK1_BITMASK_M MAKEMASK(0xFF, 0)
+#define E800_GL_ACLEXT_L2BMP0_3(_i) (0x002100A8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_L2BMP0_3_MAX_INDEX 2
+#define E800_GL_ACLEXT_L2BMP0_3_BMP0_S 0
+#define E800_GL_ACLEXT_L2BMP0_3_BMP0_M MAKEMASK(0xFF, 0)
+#define E800_GL_ACLEXT_L2BMP0_3_BMP1_S 8
+#define E800_GL_ACLEXT_L2BMP0_3_BMP1_M MAKEMASK(0xFF, 8)
+#define E800_GL_ACLEXT_L2BMP0_3_BMP2_S 16
+#define E800_GL_ACLEXT_L2BMP0_3_BMP2_M MAKEMASK(0xFF, 16)
+#define E800_GL_ACLEXT_L2BMP0_3_BMP3_S 24
+#define E800_GL_ACLEXT_L2BMP0_3_BMP3_M MAKEMASK(0xFF, 24)
+#define E800_GL_ACLEXT_L2BMP4_7(_i) (0x002100B4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_L2BMP4_7_MAX_INDEX 2
+#define E800_GL_ACLEXT_L2BMP4_7_BMP4_S 0
+#define E800_GL_ACLEXT_L2BMP4_7_BMP4_M MAKEMASK(0xFF, 0)
+#define E800_GL_ACLEXT_L2BMP4_7_BMP5_S 8
+#define E800_GL_ACLEXT_L2BMP4_7_BMP5_M MAKEMASK(0xFF, 8)
+#define E800_GL_ACLEXT_L2BMP4_7_BMP6_S 16
+#define E800_GL_ACLEXT_L2BMP4_7_BMP6_M MAKEMASK(0xFF, 16)
+#define E800_GL_ACLEXT_L2BMP4_7_BMP7_S 24
+#define E800_GL_ACLEXT_L2BMP4_7_BMP7_M MAKEMASK(0xFF, 24)
+#define E800_GL_ACLEXT_L2PRTMOD(_i) (0x0021009C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_L2PRTMOD_MAX_INDEX 2
+#define E800_GL_ACLEXT_L2PRTMOD_XLT1_S 0
+#define E800_GL_ACLEXT_L2PRTMOD_XLT1_M MAKEMASK(0x3, 0)
+#define E800_GL_ACLEXT_L2PRTMOD_XLT2_S 8
+#define E800_GL_ACLEXT_L2PRTMOD_XLT2_M MAKEMASK(0x3, 8)
+#define E800_GL_ACLEXT_N2N_L2ADDR(_i) (0x0021015C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_N2N_L2ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_N2N_L2ADDR_LINE_IDX_S 0
+#define E800_GL_ACLEXT_N2N_L2ADDR_LINE_IDX_M MAKEMASK(0x3F, 0)
+#define E800_GL_ACLEXT_N2N_L2ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_N2N_L2ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_N2N_L2DATA(_i) (0x00210168 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_N2N_L2DATA_MAX_INDEX 2
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA0_S 0
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA0_M MAKEMASK(0xFF, 0)
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA1_S 8
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA1_M MAKEMASK(0xFF, 8)
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA2_S 16
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA2_M MAKEMASK(0xFF, 16)
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA3_S 24
+#define E800_GL_ACLEXT_N2N_L2DATA_DATA3_M MAKEMASK(0xFF, 24)
+#define E800_GL_ACLEXT_P2P_L1ADDR(_i) (0x00210024 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_P2P_L1ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_P2P_L1ADDR_LINE_IDX_S 0
+#define E800_GL_ACLEXT_P2P_L1ADDR_LINE_IDX_M BIT(0)
+#define E800_GL_ACLEXT_P2P_L1ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_P2P_L1ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_P2P_L1DATA(_i) (0x00210030 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_P2P_L1DATA_MAX_INDEX 2
+#define E800_GL_ACLEXT_P2P_L1DATA_DATA_S 0
+#define E800_GL_ACLEXT_P2P_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_PID_L2GKTYPE(_i) (0x002100F0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_PID_L2GKTYPE_MAX_INDEX 2
+#define E800_GL_ACLEXT_PID_L2GKTYPE_PID_GKTYPE_S 0
+#define E800_GL_ACLEXT_PID_L2GKTYPE_PID_GKTYPE_M MAKEMASK(0x3, 0)
+#define E800_GL_ACLEXT_PLVL_SEL(_i) (0x0021000C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_PLVL_SEL_MAX_INDEX 2
+#define E800_GL_ACLEXT_PLVL_SEL_PLVL_SEL_S 0
+#define E800_GL_ACLEXT_PLVL_SEL_PLVL_SEL_M BIT(0)
+#define E800_GL_ACLEXT_TCAM_L2ADDR(_i) (0x00210114 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_TCAM_L2ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_TCAM_L2ADDR_LINE_IDX_S 0
+#define E800_GL_ACLEXT_TCAM_L2ADDR_LINE_IDX_M MAKEMASK(0x3FF, 0)
+#define E800_GL_ACLEXT_TCAM_L2ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_TCAM_L2ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_TCAM_L2DATALSB(_i) (0x00210120 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_TCAM_L2DATALSB_MAX_INDEX 2
+#define E800_GL_ACLEXT_TCAM_L2DATALSB_DATALSB_S 0
+#define E800_GL_ACLEXT_TCAM_L2DATALSB_DATALSB_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_TCAM_L2DATAMSB(_i) (0x0021012C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_TCAM_L2DATAMSB_MAX_INDEX 2
+#define E800_GL_ACLEXT_TCAM_L2DATAMSB_DATAMSB_S 0
+#define E800_GL_ACLEXT_TCAM_L2DATAMSB_DATAMSB_M MAKEMASK(0xFF, 0)
+#define E800_GL_ACLEXT_XLT0_L1ADDR(_i) (0x0021003C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_XLT0_L1ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_XLT0_L1ADDR_LINE_IDX_S 0
+#define E800_GL_ACLEXT_XLT0_L1ADDR_LINE_IDX_M MAKEMASK(0xFF, 0)
+#define E800_GL_ACLEXT_XLT0_L1ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_XLT0_L1ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_XLT0_L1DATA(_i) (0x00210048 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_XLT0_L1DATA_MAX_INDEX 2
+#define E800_GL_ACLEXT_XLT0_L1DATA_DATA_S 0
+#define E800_GL_ACLEXT_XLT0_L1DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_XLT1_L2ADDR(_i) (0x002100C0 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_XLT1_L2ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_XLT1_L2ADDR_LINE_IDX_S 0
+#define E800_GL_ACLEXT_XLT1_L2ADDR_LINE_IDX_M MAKEMASK(0x7FF, 0)
+#define E800_GL_ACLEXT_XLT1_L2ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_XLT1_L2ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_XLT1_L2DATA(_i) (0x002100CC + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_XLT1_L2DATA_MAX_INDEX 2
+#define E800_GL_ACLEXT_XLT1_L2DATA_DATA_S 0
+#define E800_GL_ACLEXT_XLT1_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GL_ACLEXT_XLT2_L2ADDR(_i) (0x002100D8 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_XLT2_L2ADDR_MAX_INDEX 2
+#define E800_GL_ACLEXT_XLT2_L2ADDR_LINE_IDX_S 0
+#define E800_GL_ACLEXT_XLT2_L2ADDR_LINE_IDX_M MAKEMASK(0x1FF, 0)
+#define E800_GL_ACLEXT_XLT2_L2ADDR_AUTO_INC_S 31
+#define E800_GL_ACLEXT_XLT2_L2ADDR_AUTO_INC_M BIT(31)
+#define E800_GL_ACLEXT_XLT2_L2DATA(_i) (0x002100E4 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
+#define E800_GL_ACLEXT_XLT2_L2DATA_MAX_INDEX 2
+#define E800_GL_ACLEXT_XLT2_L2DATA_DATA_S 0
+#define E800_GL_ACLEXT_XLT2_L2DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
#define GL_PREEXT_CDMD_L1SEL(_i) (0x0020F054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PREEXT_CDMD_L1SEL_MAX_INDEX 2
#define GL_PREEXT_CDMD_L1SEL_RX_SEL_S 0
-#define GL_PREEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
+#define GL_PREEXT_CDMD_L1SEL_RX_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_CDMD_L1SEL_RX_SEL_M : E800_GL_PREEXT_CDMD_L1SEL_RX_SEL_M)
+#define E800_GL_PREEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
+#define E830_GL_PREEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x3F, 0)
#define GL_PREEXT_CDMD_L1SEL_TX_SEL_S 8
-#define GL_PREEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
+#define GL_PREEXT_CDMD_L1SEL_TX_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_CDMD_L1SEL_TX_SEL_M : E800_GL_PREEXT_CDMD_L1SEL_TX_SEL_M)
+#define E800_GL_PREEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
+#define E830_GL_PREEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x3F, 8)
#define GL_PREEXT_CDMD_L1SEL_AUX0_SEL_S 16
-#define GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
+#define GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M : E800_GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M)
+#define E800_GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
+#define E830_GL_PREEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x3F, 16)
#define GL_PREEXT_CDMD_L1SEL_AUX1_SEL_S 24
-#define GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
+#define GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M : E800_GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M)
+#define E800_GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
+#define E830_GL_PREEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x3F, 24)
#define GL_PREEXT_CDMD_L1SEL_BIDIR_ENA_S 30
#define GL_PREEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30)
#define GL_PREEXT_CTLTBL_L2ADDR(_i) (0x0020F084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
@@ -2632,15 +2673,23 @@
#define GL_PREEXT_FLGS_L1SEL0_1(_i) (0x0020F06C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PREEXT_FLGS_L1SEL0_1_MAX_INDEX 2
#define GL_PREEXT_FLGS_L1SEL0_1_FLS0_S 0
-#define GL_PREEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
+#define GL_PREEXT_FLGS_L1SEL0_1_FLS0_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_FLGS_L1SEL0_1_FLS0_M : E800_GL_PREEXT_FLGS_L1SEL0_1_FLS0_M)
+#define E800_GL_PREEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
+#define E830_GL_PREEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x3FF, 0)
#define GL_PREEXT_FLGS_L1SEL0_1_FLS1_S 16
-#define GL_PREEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
+#define GL_PREEXT_FLGS_L1SEL0_1_FLS1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_FLGS_L1SEL0_1_FLS1_M : E800_GL_PREEXT_FLGS_L1SEL0_1_FLS1_M)
+#define E800_GL_PREEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
+#define E830_GL_PREEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x3FF, 16)
#define GL_PREEXT_FLGS_L1SEL2_3(_i) (0x0020F078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PREEXT_FLGS_L1SEL2_3_MAX_INDEX 2
#define GL_PREEXT_FLGS_L1SEL2_3_FLS2_S 0
-#define GL_PREEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
+#define GL_PREEXT_FLGS_L1SEL2_3_FLS2_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_FLGS_L1SEL2_3_FLS2_M : E800_GL_PREEXT_FLGS_L1SEL2_3_FLS2_M)
+#define E800_GL_PREEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
+#define E830_GL_PREEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x3FF, 0)
#define GL_PREEXT_FLGS_L1SEL2_3_FLS3_S 16
-#define GL_PREEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
+#define GL_PREEXT_FLGS_L1SEL2_3_FLS3_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PREEXT_FLGS_L1SEL2_3_FLS3_M : E800_GL_PREEXT_FLGS_L1SEL2_3_FLS3_M)
+#define E800_GL_PREEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
+#define E830_GL_PREEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x3FF, 16)
#define GL_PREEXT_FLGS_L1TBL(_i) (0x0020F060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PREEXT_FLGS_L1TBL_MAX_INDEX 2
#define GL_PREEXT_FLGS_L1TBL_LSB_S 0
@@ -2798,13 +2847,21 @@
#define GL_PSTEXT_CDMD_L1SEL(_i) (0x0020E054 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PSTEXT_CDMD_L1SEL_MAX_INDEX 2
#define GL_PSTEXT_CDMD_L1SEL_RX_SEL_S 0
-#define GL_PSTEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
+#define GL_PSTEXT_CDMD_L1SEL_RX_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_CDMD_L1SEL_RX_SEL_M : E800_GL_PSTEXT_CDMD_L1SEL_RX_SEL_M)
+#define E800_GL_PSTEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x1F, 0)
+#define E830_GL_PSTEXT_CDMD_L1SEL_RX_SEL_M MAKEMASK(0x3F, 0)
#define GL_PSTEXT_CDMD_L1SEL_TX_SEL_S 8
-#define GL_PSTEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
+#define GL_PSTEXT_CDMD_L1SEL_TX_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_CDMD_L1SEL_TX_SEL_M : E800_GL_PSTEXT_CDMD_L1SEL_TX_SEL_M)
+#define E800_GL_PSTEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x1F, 8)
+#define E830_GL_PSTEXT_CDMD_L1SEL_TX_SEL_M MAKEMASK(0x3F, 8)
#define GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_S 16
-#define GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
+#define GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M : E800_GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M)
+#define E800_GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x1F, 16)
+#define E830_GL_PSTEXT_CDMD_L1SEL_AUX0_SEL_M MAKEMASK(0x3F, 16)
#define GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_S 24
-#define GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
+#define GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M : E800_GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M)
+#define E800_GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x1F, 24)
+#define E830_GL_PSTEXT_CDMD_L1SEL_AUX1_SEL_M MAKEMASK(0x3F, 24)
#define GL_PSTEXT_CDMD_L1SEL_BIDIR_ENA_S 30
#define GL_PSTEXT_CDMD_L1SEL_BIDIR_ENA_M MAKEMASK(0x3, 30)
#define GL_PSTEXT_CTLTBL_L2ADDR(_i) (0x0020E084 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
@@ -2834,15 +2891,23 @@
#define GL_PSTEXT_FLGS_L1SEL0_1(_i) (0x0020E06C + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PSTEXT_FLGS_L1SEL0_1_MAX_INDEX 2
#define GL_PSTEXT_FLGS_L1SEL0_1_FLS0_S 0
-#define GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
+#define GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M : E800_GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M)
+#define E800_GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x1FF, 0)
+#define E830_GL_PSTEXT_FLGS_L1SEL0_1_FLS0_M MAKEMASK(0x3FF, 0)
#define GL_PSTEXT_FLGS_L1SEL0_1_FLS1_S 16
-#define GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
+#define GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M : E800_GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M)
+#define E800_GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x1FF, 16)
+#define E830_GL_PSTEXT_FLGS_L1SEL0_1_FLS1_M MAKEMASK(0x3FF, 16)
#define GL_PSTEXT_FLGS_L1SEL2_3(_i) (0x0020E078 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PSTEXT_FLGS_L1SEL2_3_MAX_INDEX 2
#define GL_PSTEXT_FLGS_L1SEL2_3_FLS2_S 0
-#define GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
+#define GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M : E800_GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M)
+#define E800_GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x1FF, 0)
+#define E830_GL_PSTEXT_FLGS_L1SEL2_3_FLS2_M MAKEMASK(0x3FF, 0)
#define GL_PSTEXT_FLGS_L1SEL2_3_FLS3_S 16
-#define GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
+#define GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M : E800_GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M)
+#define E800_GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x1FF, 16)
+#define E830_GL_PSTEXT_FLGS_L1SEL2_3_FLS3_M MAKEMASK(0x3FF, 16)
#define GL_PSTEXT_FLGS_L1TBL(_i) (0x0020E060 + ((_i) * 4)) /* _i=0...2 */ /* Reset Source: CORER */
#define GL_PSTEXT_FLGS_L1TBL_MAX_INDEX 2
#define GL_PSTEXT_FLGS_L1TBL_LSB_S 0
@@ -4424,11 +4489,11 @@
#define GLTPB_100G_MAC_FC_THRESH_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0)
#define GLTPB_100G_MAC_FC_THRESH_PORT1_FC_THRESH_S 16
#define GLTPB_100G_MAC_FC_THRESH_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16)
-#define GLTPB_100G_RPB_FC_THRESH 0x0009963C /* Reset Source: CORER */
-#define GLTPB_100G_RPB_FC_THRESH_PORT0_FC_THRESH_S 0
-#define GLTPB_100G_RPB_FC_THRESH_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0)
-#define GLTPB_100G_RPB_FC_THRESH_PORT1_FC_THRESH_S 16
-#define GLTPB_100G_RPB_FC_THRESH_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E800_GLTPB_100G_RPB_FC_THRESH 0x0009963C /* Reset Source: CORER */
+#define E800_GLTPB_100G_RPB_FC_THRESH_PORT0_FC_THRESH_S 0
+#define E800_GLTPB_100G_RPB_FC_THRESH_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E800_GLTPB_100G_RPB_FC_THRESH_PORT1_FC_THRESH_S 16
+#define E800_GLTPB_100G_RPB_FC_THRESH_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16)
#define GLTPB_PACING_10G 0x000994E4 /* Reset Source: CORER */
#define GLTPB_PACING_10G_N_S 0
#define GLTPB_PACING_10G_N_M MAKEMASK(0xFF, 0)
@@ -4484,8 +4549,8 @@
#define GL_UFUSE_SOC_SOC_TYPE_M BIT(10)
#define GL_UFUSE_SOC_BTS_MODE_S 11
#define GL_UFUSE_SOC_BTS_MODE_M BIT(11)
-#define GL_UFUSE_SOC_SPARE_FUSES_S 12
-#define GL_UFUSE_SOC_SPARE_FUSES_M MAKEMASK(0xF, 12)
+#define E800_GL_UFUSE_SOC_SPARE_FUSES_S 12
+#define E800_GL_UFUSE_SOC_SPARE_FUSES_M MAKEMASK(0xF, 12)
#define EMPINT_GPIO_ENA 0x000880C0 /* Reset Source: POR */
#define EMPINT_GPIO_ENA_GPIO0_ENA_S 0
#define EMPINT_GPIO_ENA_GPIO0_ENA_M BIT(0)
@@ -4572,7 +4637,9 @@
#define GLINT_TSYN_PFMSTR_PF_MASTER_M MAKEMASK(0x7, 0)
#define GLINT_TSYN_PHY 0x0016CC50 /* Reset Source: CORER */
#define GLINT_TSYN_PHY_PHY_INDX_S 0
-#define GLINT_TSYN_PHY_PHY_INDX_M MAKEMASK(0x1F, 0)
+#define GLINT_TSYN_PHY_PHY_INDX_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLINT_TSYN_PHY_PHY_INDX_M : E800_GLINT_TSYN_PHY_PHY_INDX_M)
+#define E800_GLINT_TSYN_PHY_PHY_INDX_M MAKEMASK(0x1F, 0)
+#define E830_GLINT_TSYN_PHY_PHY_INDX_M MAKEMASK(0xFF, 0)
#define GLINT_VECT2FUNC(_INT) (0x00162000 + ((_INT) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
#define GLINT_VECT2FUNC_MAX_INDEX 2047
#define GLINT_VECT2FUNC_VF_NUM_S 0
@@ -4632,9 +4699,11 @@
#define PF0INT_OICR_CPM_QUEUE_S 1
#define PF0INT_OICR_CPM_QUEUE_M BIT(1)
#define PF0INT_OICR_CPM_RSV1_S 2
-#define PF0INT_OICR_CPM_RSV1_M MAKEMASK(0xFF, 2)
-#define PF0INT_OICR_CPM_HH_COMP_S 10
-#define PF0INT_OICR_CPM_HH_COMP_M BIT(10)
+#define PF0INT_OICR_CPM_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_CPM_RSV1_M : E800_PF0INT_OICR_CPM_RSV1_M)
+#define E800_PF0INT_OICR_CPM_RSV1_M MAKEMASK(0xFF, 2)
+#define E830_PF0INT_OICR_CPM_RSV1_M MAKEMASK(0x3F, 2)
+#define E800_PF0INT_OICR_CPM_HH_COMP_S 10
+#define E800_PF0INT_OICR_CPM_HH_COMP_M BIT(10)
#define PF0INT_OICR_CPM_TSYN_TX_S 11
#define PF0INT_OICR_CPM_TSYN_TX_M BIT(11)
#define PF0INT_OICR_CPM_TSYN_EVNT_S 12
@@ -4723,9 +4792,11 @@
#define PF0INT_OICR_HLP_QUEUE_S 1
#define PF0INT_OICR_HLP_QUEUE_M BIT(1)
#define PF0INT_OICR_HLP_RSV1_S 2
-#define PF0INT_OICR_HLP_RSV1_M MAKEMASK(0xFF, 2)
-#define PF0INT_OICR_HLP_HH_COMP_S 10
-#define PF0INT_OICR_HLP_HH_COMP_M BIT(10)
+#define PF0INT_OICR_HLP_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_HLP_RSV1_M : E800_PF0INT_OICR_HLP_RSV1_M)
+#define E800_PF0INT_OICR_HLP_RSV1_M MAKEMASK(0xFF, 2)
+#define E830_PF0INT_OICR_HLP_RSV1_M MAKEMASK(0x3F, 2)
+#define E800_PF0INT_OICR_HLP_HH_COMP_S 10
+#define E800_PF0INT_OICR_HLP_HH_COMP_M BIT(10)
#define PF0INT_OICR_HLP_TSYN_TX_S 11
#define PF0INT_OICR_HLP_TSYN_TX_M BIT(11)
#define PF0INT_OICR_HLP_TSYN_EVNT_S 12
@@ -4772,9 +4843,11 @@
#define PF0INT_OICR_PSM_QUEUE_S 1
#define PF0INT_OICR_PSM_QUEUE_M BIT(1)
#define PF0INT_OICR_PSM_RSV1_S 2
-#define PF0INT_OICR_PSM_RSV1_M MAKEMASK(0xFF, 2)
-#define PF0INT_OICR_PSM_HH_COMP_S 10
-#define PF0INT_OICR_PSM_HH_COMP_M BIT(10)
+#define PF0INT_OICR_PSM_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PF0INT_OICR_PSM_RSV1_M : E800_PF0INT_OICR_PSM_RSV1_M)
+#define E800_PF0INT_OICR_PSM_RSV1_M MAKEMASK(0xFF, 2)
+#define E830_PF0INT_OICR_PSM_RSV1_M MAKEMASK(0x3F, 2)
+#define E800_PF0INT_OICR_PSM_HH_COMP_S 10
+#define E800_PF0INT_OICR_PSM_HH_COMP_M BIT(10)
#define PF0INT_OICR_PSM_TSYN_TX_S 11
#define PF0INT_OICR_PSM_TSYN_TX_M BIT(11)
#define PF0INT_OICR_PSM_TSYN_EVNT_S 12
@@ -4895,9 +4968,11 @@
#define PFINT_OICR_QUEUE_S 1
#define PFINT_OICR_QUEUE_M BIT(1)
#define PFINT_OICR_RSV1_S 2
-#define PFINT_OICR_RSV1_M MAKEMASK(0xFF, 2)
-#define PFINT_OICR_HH_COMP_S 10
-#define PFINT_OICR_HH_COMP_M BIT(10)
+#define PFINT_OICR_RSV1_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFINT_OICR_RSV1_M : E800_PFINT_OICR_RSV1_M)
+#define E800_PFINT_OICR_RSV1_M MAKEMASK(0xFF, 2)
+#define E830_PFINT_OICR_RSV1_M MAKEMASK(0x3F, 2)
+#define E800_PFINT_OICR_HH_COMP_S 10
+#define E800_PFINT_OICR_HH_COMP_M BIT(10)
#define PFINT_OICR_TSYN_TX_S 11
#define PFINT_OICR_TSYN_TX_M BIT(11)
#define PFINT_OICR_TSYN_EVNT_S 12
@@ -4963,7 +5038,9 @@
#define PFINT_SB_CTL_INTEVENT_M BIT(31)
#define PFINT_TSYN_MSK 0x0016C980 /* Reset Source: CORER */
#define PFINT_TSYN_MSK_PHY_INDX_S 0
-#define PFINT_TSYN_MSK_PHY_INDX_M MAKEMASK(0x1F, 0)
+#define PFINT_TSYN_MSK_PHY_INDX_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFINT_TSYN_MSK_PHY_INDX_M : E800_PFINT_TSYN_MSK_PHY_INDX_M)
+#define E800_PFINT_TSYN_MSK_PHY_INDX_M MAKEMASK(0x1F, 0)
+#define E830_PFINT_TSYN_MSK_PHY_INDX_M MAKEMASK(0xFF, 0)
#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
#define QINT_RQCTL_MAX_INDEX 2047
#define QINT_RQCTL_MSIX_INDX_S 0
@@ -5230,76 +5307,96 @@
#define VSILAN_QTABLE_QINDEX_0_M MAKEMASK(0x7FF, 0)
#define VSILAN_QTABLE_QINDEX_1_S 16
#define VSILAN_QTABLE_QINDEX_1_M MAKEMASK(0x7FF, 16)
-#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E31C0 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_S 0
-#define PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_M BIT(0)
-#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E34C0 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_S 0
-#define PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_M BIT(0)
-#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E35C0 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_S 0
-#define PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_M BIT(0)
-#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E36C0 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_S 0
-#define PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_M BIT(0)
-#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3220 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_S 0
-#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_M MAKEMASK(0xFFFFFFFF, 0)
-#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3240 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_S 0
-#define PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E3180 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_S 0
-#define PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0)
-#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3280 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_S 0
-#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0)
-#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E32A0 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_S 0
-#define PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_HSEC_CTL_RX_QUANTA_S 0x001E3C40 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_S 0
-#define PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E31A0 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_S 0
-#define PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0)
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_S 0
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_S 0
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E3960 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_S 0
-#define PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0)
-#define PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E3980 /* Reset Source: GLOBR */
-#define PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_S 0
-#define PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_LINK_DOWN_COUNTER 0x001E47C0 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E31C0 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_M BIT(0)
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E34C0 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_M BIT(0)
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E35C0 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_M BIT(0)
+#define E800_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E36C0 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_M BIT(0)
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3220 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3240 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_M MAKEMASK(0xFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E3180 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0)
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3280 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E32A0 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_M MAKEMASK(0xFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_RX_QUANTA_S 0x001E3C40 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_S 0
+#define E800_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_PRTMAC_HSEC_CTL_RX_QUANTA_SHIFT_M MAKEMASK(0xFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E31A0 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_S 0
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0)
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_S 0
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32)) /* _i=0...8 */ /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_S 0
+#define E800_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M MAKEMASK(0xFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E3960 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_S 0
+#define E800_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E3980 /* Reset Source: GLOBR */
+#define E800_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_S 0
+#define E800_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_M MAKEMASK(0xFFFF, 0)
+#define PRTMAC_LINK_DOWN_COUNTER_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_LINK_DOWN_COUNTER : E800_PRTMAC_LINK_DOWN_COUNTER)
+#define E800_PRTMAC_LINK_DOWN_COUNTER 0x001E47C0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_LINK_DOWN_COUNTER 0x001E2460 /* Reset Source: GLOBR */
#define PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_S 0
#define PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_MD_OVRRIDE_ENABLE(_i) (0x001E3C60 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */
-#define PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX 7
+#define PRTMAC_MD_OVRRIDE_ENABLE_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_MD_OVRRIDE_ENABLE(_i) : E800_PRTMAC_MD_OVRRIDE_ENABLE(_i))
+#define E800_PRTMAC_MD_OVRRIDE_ENABLE(_i) (0x001E3C60 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */
+#define E830_PRTMAC_MD_OVRRIDE_ENABLE(_i) (0x001E2500 + ((_i) * 32)) /* _i=0...1 */ /* Reset Source: GLOBR */
+#define PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX : E800_PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX)
+#define E800_PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX 7
+#define E830_PRTMAC_MD_OVRRIDE_ENABLE_MAX_INDEX 1
#define PRTMAC_MD_OVRRIDE_ENABLE_PRTMAC_MD_OVRRIDE_ENABLE_S 0
#define PRTMAC_MD_OVRRIDE_ENABLE_PRTMAC_MD_OVRRIDE_ENABLE_M MAKEMASK(0xFFFFFFFF, 0)
-#define PRTMAC_MD_OVRRIDE_VAL(_i) (0x001E3D60 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */
-#define PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX 7
+#define PRTMAC_MD_OVRRIDE_VAL_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_MD_OVRRIDE_VAL(_i) : E800_PRTMAC_MD_OVRRIDE_VAL(_i))
+#define E800_PRTMAC_MD_OVRRIDE_VAL(_i) (0x001E3D60 + ((_i) * 32)) /* _i=0...7 */ /* Reset Source: GLOBR */
+#define E830_PRTMAC_MD_OVRRIDE_VAL(_i) (0x001E2600 + ((_i) * 32)) /* _i=0...1 */ /* Reset Source: GLOBR */
+#define PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX : E800_PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX)
+#define E800_PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX 7
+#define E830_PRTMAC_MD_OVRRIDE_VAL_MAX_INDEX 1
#define PRTMAC_MD_OVRRIDE_VAL_PRTMAC_MD_OVRRIDE_ENABLE_S 0
#define PRTMAC_MD_OVRRIDE_VAL_PRTMAC_MD_OVRRIDE_ENABLE_M MAKEMASK(0xFFFFFFFF, 0)
#define PRTMAC_RX_CNT_MRKR 0x001E48E0 /* Reset Source: GLOBR */
#define PRTMAC_RX_CNT_MRKR_RX_CNT_MRKR_S 0
#define PRTMAC_RX_CNT_MRKR_RX_CNT_MRKR_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_RX_PKT_DRP_CNT 0x001E3C20 /* Reset Source: GLOBR */
+#define PRTMAC_RX_PKT_DRP_CNT_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_RX_PKT_DRP_CNT : E800_PRTMAC_RX_PKT_DRP_CNT)
+#define E800_PRTMAC_RX_PKT_DRP_CNT 0x001E3C20 /* Reset Source: GLOBR */
+#define E830_PRTMAC_RX_PKT_DRP_CNT 0x001E2420 /* Reset Source: GLOBR */
#define PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_S 0
-#define PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S 16
-#define PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 16)
+#define PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M : E800_PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M)
+#define E800_PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_RX_PKT_DRP_CNT_RX_PKT_DRP_CNT_M MAKEMASK(0xFFF, 0)
+#define PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S : E800_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S)
+#define E800_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S 16
+#define E830_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_S 28
+#define PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M : E800_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M)
+#define E800_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_RX_PKT_DRP_CNT_RX_MKR_PKT_DRP_CNT_M MAKEMASK(0xF, 28)
#define PRTMAC_TX_CNT_MRKR 0x001E48C0 /* Reset Source: GLOBR */
#define PRTMAC_TX_CNT_MRKR_TX_CNT_MRKR_S 0
#define PRTMAC_TX_CNT_MRKR_TX_CNT_MRKR_M MAKEMASK(0xFFFF, 0)
-#define PRTMAC_TX_LNK_UP_CNT 0x001E4840 /* Reset Source: GLOBR */
+#define PRTMAC_TX_LNK_UP_CNT_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTMAC_TX_LNK_UP_CNT : E800_PRTMAC_TX_LNK_UP_CNT)
+#define E800_PRTMAC_TX_LNK_UP_CNT 0x001E4840 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TX_LNK_UP_CNT 0x001E2480 /* Reset Source: GLOBR */
#define PRTMAC_TX_LNK_UP_CNT_TX_LINK_UP_CNT_S 0
#define PRTMAC_TX_LNK_UP_CNT_TX_LINK_UP_CNT_M MAKEMASK(0xFFFF, 0)
#define GL_MDCK_CFG1_TX_PQM 0x002D2DF4 /* Reset Source: CORER */
@@ -5360,8 +5457,8 @@
#define GL_MDCK_EN_TX_PQM_ILLEGAL_VF_QNUM_M BIT(24)
#define GL_MDCK_EN_TX_PQM_QTAIL_GT_RING_LENGTH_S 25
#define GL_MDCK_EN_TX_PQM_QTAIL_GT_RING_LENGTH_M BIT(25)
-#define GL_MDCK_EN_TX_PQM_RSVD_S 26
-#define GL_MDCK_EN_TX_PQM_RSVD_M MAKEMASK(0x3F, 26)
+#define E800_GL_MDCK_EN_TX_PQM_RSVD_S 26
+#define E800_GL_MDCK_EN_TX_PQM_RSVD_M MAKEMASK(0x3F, 26)
#define GL_MDCK_RX 0x0029422C /* Reset Source: CORER */
#define GL_MDCK_RX_DESC_ADDR_S 0
#define GL_MDCK_RX_DESC_ADDR_M BIT(0)
@@ -5470,17 +5567,24 @@
#define GL_FWRESETCNT 0x00083100 /* Reset Source: POR */
#define GL_FWRESETCNT_FWRESETCNT_S 0
#define GL_FWRESETCNT_FWRESETCNT_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_MNG_FW_RAM_STAT 0x0008309C /* Reset Source: POR */
+#define GL_MNG_FW_RAM_STAT_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_FW_RAM_STAT : E800_GL_MNG_FW_RAM_STAT)
+#define E800_GL_MNG_FW_RAM_STAT 0x0008309C /* Reset Source: POR */
+#define E830_GL_MNG_FW_RAM_STAT 0x000830F4 /* Reset Source: POR */
#define GL_MNG_FW_RAM_STAT_FW_RAM_RST_STAT_S 0
#define GL_MNG_FW_RAM_STAT_FW_RAM_RST_STAT_M BIT(0)
#define GL_MNG_FW_RAM_STAT_MNG_MEM_ECC_ERR_S 1
#define GL_MNG_FW_RAM_STAT_MNG_MEM_ECC_ERR_M BIT(1)
#define GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
-#define GL_MNG_FWSM_FW_LOADING_M BIT(30)
#define GL_MNG_FWSM_FW_MODES_S 0
-#define GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x7, 0)
-#define GL_MNG_FWSM_RSV0_S 3
-#define GL_MNG_FWSM_RSV0_M MAKEMASK(0x7F, 3)
+#define GL_MNG_FWSM_FW_MODES_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_FWSM_FW_MODES_M : E800_GL_MNG_FWSM_FW_MODES_M)
+#define E800_GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x7, 0)
+#define E830_GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x3, 0)
+#define GL_MNG_FWSM_RSV0_S_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_FWSM_RSV0_S : E800_GL_MNG_FWSM_RSV0_S)
+#define E800_GL_MNG_FWSM_RSV0_S 3
+#define E830_GL_MNG_FWSM_RSV0_S 2
+#define GL_MNG_FWSM_RSV0_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_FWSM_RSV0_M : E800_GL_MNG_FWSM_RSV0_M)
+#define E800_GL_MNG_FWSM_RSV0_M MAKEMASK(0x7F, 3)
+#define E830_GL_MNG_FWSM_RSV0_M MAKEMASK(0xFF, 2)
#define GL_MNG_FWSM_EEP_RELOAD_IND_S 10
#define GL_MNG_FWSM_EEP_RELOAD_IND_M BIT(10)
#define GL_MNG_FWSM_RSV1_S 11
@@ -5504,12 +5608,20 @@
#define GL_MNG_HWARB_CTRL 0x000B6130 /* Reset Source: POR */
#define GL_MNG_HWARB_CTRL_NCSI_ARB_EN_S 0
#define GL_MNG_HWARB_CTRL_NCSI_ARB_EN_M BIT(0)
-#define GL_MNG_SHA_EXTEND(_i) (0x00083120 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: EMPR */
-#define GL_MNG_SHA_EXTEND_MAX_INDEX 7
+#define GL_MNG_SHA_EXTEND_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_SHA_EXTEND(_i) : E800_GL_MNG_SHA_EXTEND(_i))
+#define E800_GL_MNG_SHA_EXTEND(_i) (0x00083120 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: EMPR */
+#define E830_GL_MNG_SHA_EXTEND(_i) (0x00083340 + ((_i) * 4)) /* _i=0...11 */ /* Reset Source: EMPR */
+#define GL_MNG_SHA_EXTEND_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_SHA_EXTEND_MAX_INDEX : E800_GL_MNG_SHA_EXTEND_MAX_INDEX)
+#define E800_GL_MNG_SHA_EXTEND_MAX_INDEX 7
+#define E830_GL_MNG_SHA_EXTEND_MAX_INDEX 11
#define GL_MNG_SHA_EXTEND_GL_MNG_SHA_EXTEND_S 0
#define GL_MNG_SHA_EXTEND_GL_MNG_SHA_EXTEND_M MAKEMASK(0xFFFFFFFF, 0)
-#define GL_MNG_SHA_EXTEND_ROM(_i) (0x00083160 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: EMPR */
-#define GL_MNG_SHA_EXTEND_ROM_MAX_INDEX 7
+#define GL_MNG_SHA_EXTEND_ROM_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_SHA_EXTEND_ROM(_i) : E800_GL_MNG_SHA_EXTEND_ROM(_i))
+#define E800_GL_MNG_SHA_EXTEND_ROM(_i) (0x00083160 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: EMPR */
+#define E830_GL_MNG_SHA_EXTEND_ROM(_i) (0x000832C0 + ((_i) * 4)) /* _i=0...11 */ /* Reset Source: EMPR */
+#define GL_MNG_SHA_EXTEND_ROM_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MNG_SHA_EXTEND_ROM_MAX_INDEX : E800_GL_MNG_SHA_EXTEND_ROM_MAX_INDEX)
+#define E800_GL_MNG_SHA_EXTEND_ROM_MAX_INDEX 7
+#define E830_GL_MNG_SHA_EXTEND_ROM_MAX_INDEX 11
#define GL_MNG_SHA_EXTEND_ROM_GL_MNG_SHA_EXTEND_ROM_S 0
#define GL_MNG_SHA_EXTEND_ROM_GL_MNG_SHA_EXTEND_ROM_M MAKEMASK(0xFFFFFFFF, 0)
#define GL_MNG_SHA_EXTEND_STATUS 0x00083148 /* Reset Source: EMPR */
@@ -5908,8 +6020,8 @@
#define GLPCI_CAPSUP 0x0009DE8C /* Reset Source: PCIR */
#define GLPCI_CAPSUP_PCIE_VER_S 0
#define GLPCI_CAPSUP_PCIE_VER_M BIT(0)
-#define GLPCI_CAPSUP_RESERVED_2_S 1
-#define GLPCI_CAPSUP_RESERVED_2_M BIT(1)
+#define E800_GLPCI_CAPSUP_RESERVED_2_S 1
+#define E800_GLPCI_CAPSUP_RESERVED_2_M BIT(1)
#define GLPCI_CAPSUP_LTR_EN_S 2
#define GLPCI_CAPSUP_LTR_EN_M BIT(2)
#define GLPCI_CAPSUP_TPH_EN_S 3
@@ -6359,9 +6471,9 @@
#define PFPE_MRTEIDXMASK 0x0050A300 /* Reset Source: PFR */
#define PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_S 0
#define PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0)
-#define PFPE_RCVUNEXPECTEDERROR 0x0050A380 /* Reset Source: PFR */
-#define PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_S 0
-#define PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
+#define E800_PFPE_RCVUNEXPECTEDERROR 0x0050A380 /* Reset Source: PFR */
+#define E800_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_S 0
+#define E800_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
#define PFPE_TCPNOWTIMER 0x0050A280 /* Reset Source: PFR */
#define PFPE_TCPNOWTIMER_TCP_NOW_S 0
#define PFPE_TCPNOWTIMER_TCP_NOW_M MAKEMASK(0xFFFFFFFF, 0)
@@ -6430,10 +6542,10 @@
#define VFPE_IPCONFIG0_USEENTIREIDRANGE_M BIT(16)
#define VFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_S 17
#define VFPE_IPCONFIG0_UDP_SRC_PORT_MASK_EN_M BIT(17)
-#define VFPE_RCVUNEXPECTEDERROR(_VF) (0x00509C00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
-#define VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 255
-#define VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_S 0
-#define VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
+#define E800_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00509C00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define E800_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 255
+#define E800_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_S 0
+#define E800_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
#define VFPE_TCPNOWTIMER(_VF) (0x00509400 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
#define VFPE_TCPNOWTIMER_MAX_INDEX 255
#define VFPE_TCPNOWTIMER_TCP_NOW_S 0
@@ -7137,15 +7249,21 @@
#define GLRPB_DHW(_i) (0x000AC000 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
#define GLRPB_DHW_MAX_INDEX 15
#define GLRPB_DHW_DHW_TCN_S 0
-#define GLRPB_DHW_DHW_TCN_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_DHW_DHW_TCN_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_DHW_DHW_TCN_M : E800_GLRPB_DHW_DHW_TCN_M)
+#define E800_GLRPB_DHW_DHW_TCN_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_DHW_DHW_TCN_M MAKEMASK(0x3FFFFF, 0)
#define GLRPB_DLW(_i) (0x000AC044 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
#define GLRPB_DLW_MAX_INDEX 15
#define GLRPB_DLW_DLW_TCN_S 0
-#define GLRPB_DLW_DLW_TCN_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_DLW_DLW_TCN_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_DLW_DLW_TCN_M : E800_GLRPB_DLW_DLW_TCN_M)
+#define E800_GLRPB_DLW_DLW_TCN_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_DLW_DLW_TCN_M MAKEMASK(0x3FFFFF, 0)
#define GLRPB_DPS(_i) (0x000AC084 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: CORER */
#define GLRPB_DPS_MAX_INDEX 15
#define GLRPB_DPS_DPS_TCN_S 0
-#define GLRPB_DPS_DPS_TCN_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_DPS_DPS_TCN_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_DPS_DPS_TCN_M : E800_GLRPB_DPS_DPS_TCN_M)
+#define E800_GLRPB_DPS_DPS_TCN_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_DPS_DPS_TCN_M MAKEMASK(0x3FFFFF, 0)
#define GLRPB_DSI_EN 0x000AC324 /* Reset Source: CORER */
#define GLRPB_DSI_EN_DSI_EN_S 0
#define GLRPB_DSI_EN_DSI_EN_M BIT(0)
@@ -7154,15 +7272,21 @@
#define GLRPB_SHW(_i) (0x000AC120 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLRPB_SHW_MAX_INDEX 7
#define GLRPB_SHW_SHW_S 0
-#define GLRPB_SHW_SHW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_SHW_SHW_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_SHW_SHW_M : E800_GLRPB_SHW_SHW_M)
+#define E800_GLRPB_SHW_SHW_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_SHW_SHW_M MAKEMASK(0x3FFFFF, 0)
#define GLRPB_SLW(_i) (0x000AC140 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLRPB_SLW_MAX_INDEX 7
#define GLRPB_SLW_SLW_S 0
-#define GLRPB_SLW_SLW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_SLW_SLW_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_SLW_SLW_M : E800_GLRPB_SLW_SLW_M)
+#define E800_GLRPB_SLW_SLW_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_SLW_SLW_M MAKEMASK(0x3FFFFF, 0)
#define GLRPB_SPS(_i) (0x000AC0C4 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLRPB_SPS_MAX_INDEX 7
#define GLRPB_SPS_SPS_TCN_S 0
-#define GLRPB_SPS_SPS_TCN_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_SPS_SPS_TCN_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_SPS_SPS_TCN_M : E800_GLRPB_SPS_SPS_TCN_M)
+#define E800_GLRPB_SPS_SPS_TCN_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_SPS_SPS_TCN_M MAKEMASK(0x3FFFFF, 0)
#define GLRPB_TC_CFG(_i) (0x000AC2A4 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLRPB_TC_CFG_MAX_INDEX 31
#define GLRPB_TC_CFG_D_POOL_S 0
@@ -7172,11 +7296,15 @@
#define GLRPB_TCHW(_i) (0x000AC330 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLRPB_TCHW_MAX_INDEX 31
#define GLRPB_TCHW_TCHW_S 0
-#define GLRPB_TCHW_TCHW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_TCHW_TCHW_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_TCHW_TCHW_M : E800_GLRPB_TCHW_TCHW_M)
+#define E800_GLRPB_TCHW_TCHW_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_TCHW_TCHW_M MAKEMASK(0x3FFFFF, 0)
#define GLRPB_TCLW(_i) (0x000AC3B0 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
#define GLRPB_TCLW_MAX_INDEX 31
#define GLRPB_TCLW_TCLW_S 0
-#define GLRPB_TCLW_TCLW_M MAKEMASK(0xFFFFF, 0)
+#define GLRPB_TCLW_TCLW_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLRPB_TCLW_TCLW_M : E800_GLRPB_TCLW_TCLW_M)
+#define E800_GLRPB_TCLW_TCLW_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRPB_TCLW_TCLW_M MAKEMASK(0x3FFFFF, 0)
#define GLQF_APBVT(_i) (0x00450000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
#define GLQF_APBVT_MAX_INDEX 2047
#define GLQF_APBVT_APBVT_S 0
@@ -7189,9 +7317,13 @@
#define GLQF_FD_CLSN1_HITLBCNT_M MAKEMASK(0xFFFFFFFF, 0)
#define GLQF_FD_CNT 0x00460018 /* Reset Source: CORER */
#define GLQF_FD_CNT_FD_GCNT_S 0
-#define GLQF_FD_CNT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define GLQF_FD_CNT_FD_GCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FD_CNT_FD_GCNT_M : E800_GLQF_FD_CNT_FD_GCNT_M)
+#define E800_GLQF_FD_CNT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define E830_GLQF_FD_CNT_FD_GCNT_M MAKEMASK(0xFFFF, 0)
#define GLQF_FD_CNT_FD_BCNT_S 16
-#define GLQF_FD_CNT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define GLQF_FD_CNT_FD_BCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FD_CNT_FD_BCNT_M : E800_GLQF_FD_CNT_FD_BCNT_M)
+#define E800_GLQF_FD_CNT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define E830_GLQF_FD_CNT_FD_BCNT_M MAKEMASK(0xFFFF, 16)
#define GLQF_FD_CTL 0x00460000 /* Reset Source: CORER */
#define GLQF_FD_CTL_FDLONG_S 0
#define GLQF_FD_CTL_FDLONG_M MAKEMASK(0xF, 0)
@@ -7201,12 +7333,18 @@
#define GLQF_FD_CTL_FLT_ADDR_REPORT_M BIT(5)
#define GLQF_FD_SIZE 0x00460010 /* Reset Source: CORER */
#define GLQF_FD_SIZE_FD_GSIZE_S 0
-#define GLQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x7FFF, 0)
+#define GLQF_FD_SIZE_FD_GSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FD_SIZE_FD_GSIZE_M : E800_GLQF_FD_SIZE_FD_GSIZE_M)
+#define E800_GLQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x7FFF, 0)
+#define E830_GLQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0xFFFF, 0)
#define GLQF_FD_SIZE_FD_BSIZE_S 16
-#define GLQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x7FFF, 16)
+#define GLQF_FD_SIZE_FD_BSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FD_SIZE_FD_BSIZE_M : E800_GLQF_FD_SIZE_FD_BSIZE_M)
+#define E800_GLQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x7FFF, 16)
+#define E830_GLQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0xFFFF, 16)
#define GLQF_FDCNT_0 0x00460020 /* Reset Source: CORER */
#define GLQF_FDCNT_0_BUCKETCNT_S 0
-#define GLQF_FDCNT_0_BUCKETCNT_M MAKEMASK(0x7FFF, 0)
+#define GLQF_FDCNT_0_BUCKETCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_GLQF_FDCNT_0_BUCKETCNT_M : E800_GLQF_FDCNT_0_BUCKETCNT_M)
+#define E800_GLQF_FDCNT_0_BUCKETCNT_M MAKEMASK(0x7FFF, 0)
+#define E830_GLQF_FDCNT_0_BUCKETCNT_M MAKEMASK(0xFFFF, 0)
#define GLQF_FDCNT_0_CNT_NOT_VLD_S 31
#define GLQF_FDCNT_0_CNT_NOT_VLD_M BIT(31)
#define GLQF_FDEVICTENA(_i) (0x00452000 + ((_i) * 4)) /* _i=0...3 */ /* Reset Source: CORER */
@@ -7430,22 +7568,34 @@
#define GLQF_PROF2TC_REGION_7_M MAKEMASK(0x7, 29)
#define PFQF_FD_CNT 0x00460180 /* Reset Source: CORER */
#define PFQF_FD_CNT_FD_GCNT_S 0
-#define PFQF_FD_CNT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define PFQF_FD_CNT_FD_GCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_CNT_FD_GCNT_M : E800_PFQF_FD_CNT_FD_GCNT_M)
+#define E800_PFQF_FD_CNT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define E830_PFQF_FD_CNT_FD_GCNT_M MAKEMASK(0xFFFF, 0)
#define PFQF_FD_CNT_FD_BCNT_S 16
-#define PFQF_FD_CNT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define PFQF_FD_CNT_FD_BCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_CNT_FD_BCNT_M : E800_PFQF_FD_CNT_FD_BCNT_M)
+#define E800_PFQF_FD_CNT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define E830_PFQF_FD_CNT_FD_BCNT_M MAKEMASK(0xFFFF, 16)
#define PFQF_FD_ENA 0x0043A000 /* Reset Source: CORER */
#define PFQF_FD_ENA_FD_ENA_S 0
#define PFQF_FD_ENA_FD_ENA_M BIT(0)
#define PFQF_FD_SIZE 0x00460100 /* Reset Source: CORER */
#define PFQF_FD_SIZE_FD_GSIZE_S 0
-#define PFQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x7FFF, 0)
+#define PFQF_FD_SIZE_FD_GSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_SIZE_FD_GSIZE_M : E800_PFQF_FD_SIZE_FD_GSIZE_M)
+#define E800_PFQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x7FFF, 0)
+#define E830_PFQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0xFFFF, 0)
#define PFQF_FD_SIZE_FD_BSIZE_S 16
-#define PFQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x7FFF, 16)
+#define PFQF_FD_SIZE_FD_BSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_SIZE_FD_BSIZE_M : E800_PFQF_FD_SIZE_FD_BSIZE_M)
+#define E800_PFQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x7FFF, 16)
+#define E830_PFQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0xFFFF, 16)
#define PFQF_FD_SUBTRACT 0x00460200 /* Reset Source: CORER */
#define PFQF_FD_SUBTRACT_FD_GCNT_S 0
-#define PFQF_FD_SUBTRACT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define PFQF_FD_SUBTRACT_FD_GCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_SUBTRACT_FD_GCNT_M : E800_PFQF_FD_SUBTRACT_FD_GCNT_M)
+#define E800_PFQF_FD_SUBTRACT_FD_GCNT_M MAKEMASK(0x7FFF, 0)
+#define E830_PFQF_FD_SUBTRACT_FD_GCNT_M MAKEMASK(0xFFFF, 0)
#define PFQF_FD_SUBTRACT_FD_BCNT_S 16
-#define PFQF_FD_SUBTRACT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define PFQF_FD_SUBTRACT_FD_BCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_PFQF_FD_SUBTRACT_FD_BCNT_M : E800_PFQF_FD_SUBTRACT_FD_BCNT_M)
+#define E800_PFQF_FD_SUBTRACT_FD_BCNT_M MAKEMASK(0x7FFF, 16)
+#define E830_PFQF_FD_SUBTRACT_FD_BCNT_M MAKEMASK(0xFFFF, 16)
#define PFQF_HLUT(_i) (0x00430000 + ((_i) * 64)) /* _i=0...511 */ /* Reset Source: CORER */
#define PFQF_HLUT_MAX_INDEX 511
#define PFQF_HLUT_LUT0_S 0
@@ -7673,20 +7823,20 @@
#define GLPRT_AORCL_AORCL_M MAKEMASK(0xFFFFFFFF, 0)
#define GLPRT_BPRCH(_i) (0x00381384 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLPRT_BPRCH_MAX_INDEX 7
-#define GLPRT_BPRCH_UPRCH_S 0
-#define GLPRT_BPRCH_UPRCH_M MAKEMASK(0xFF, 0)
+#define E800_GLPRT_BPRCH_UPRCH_S 0
+#define E800_GLPRT_BPRCH_UPRCH_M MAKEMASK(0xFF, 0)
#define GLPRT_BPRCL(_i) (0x00381380 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLPRT_BPRCL_MAX_INDEX 7
-#define GLPRT_BPRCL_UPRCH_S 0
-#define GLPRT_BPRCL_UPRCH_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GLPRT_BPRCL_UPRCH_S 0
+#define E800_GLPRT_BPRCL_UPRCH_M MAKEMASK(0xFFFFFFFF, 0)
#define GLPRT_BPTCH(_i) (0x00381244 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLPRT_BPTCH_MAX_INDEX 7
-#define GLPRT_BPTCH_UPRCH_S 0
-#define GLPRT_BPTCH_UPRCH_M MAKEMASK(0xFF, 0)
+#define E800_GLPRT_BPTCH_UPRCH_S 0
+#define E800_GLPRT_BPTCH_UPRCH_M MAKEMASK(0xFF, 0)
#define GLPRT_BPTCL(_i) (0x00381240 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLPRT_BPTCL_MAX_INDEX 7
-#define GLPRT_BPTCL_UPRCH_S 0
-#define GLPRT_BPTCL_UPRCH_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GLPRT_BPTCL_UPRCH_S 0
+#define E800_GLPRT_BPTCL_UPRCH_M MAKEMASK(0xFFFFFFFF, 0)
#define GLPRT_CRCERRS(_i) (0x00380100 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLPRT_CRCERRS_MAX_INDEX 7
#define GLPRT_CRCERRS_CRCERRS_S 0
@@ -8001,8 +8151,8 @@
#define GLPRT_UPTCH_UPTCH_M MAKEMASK(0xFF, 0)
#define GLPRT_UPTCL(_i) (0x003811C0 + ((_i) * 8)) /* _i=0...7 */ /* Reset Source: CORER */
#define GLPRT_UPTCL_MAX_INDEX 7
-#define GLPRT_UPTCL_VUPTCH_S 0
-#define GLPRT_UPTCL_VUPTCH_M MAKEMASK(0xFFFFFFFF, 0)
+#define E800_GLPRT_UPTCL_VUPTCH_S 0
+#define E800_GLPRT_UPTCL_VUPTCH_M MAKEMASK(0xFFFFFFFF, 0)
#define GLSTAT_ACL_CNT_0_H(_i) (0x00388004 + ((_i) * 8)) /* _i=0...511 */ /* Reset Source: CORER */
#define GLSTAT_ACL_CNT_0_H_MAX_INDEX 511
#define GLSTAT_ACL_CNT_0_H_CNT_MSB_S 0
@@ -8897,9 +9047,13 @@
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
#define VSIQF_FD_CNT_MAX_INDEX 767
#define VSIQF_FD_CNT_FD_GCNT_S 0
-#define VSIQF_FD_CNT_FD_GCNT_M MAKEMASK(0x3FFF, 0)
+#define VSIQF_FD_CNT_FD_GCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VSIQF_FD_CNT_FD_GCNT_M : E800_VSIQF_FD_CNT_FD_GCNT_M)
+#define E800_VSIQF_FD_CNT_FD_GCNT_M MAKEMASK(0x3FFF, 0)
+#define E830_VSIQF_FD_CNT_FD_GCNT_M MAKEMASK(0xFFFF, 0)
#define VSIQF_FD_CNT_FD_BCNT_S 16
-#define VSIQF_FD_CNT_FD_BCNT_M MAKEMASK(0x3FFF, 16)
+#define VSIQF_FD_CNT_FD_BCNT_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VSIQF_FD_CNT_FD_BCNT_M : E800_VSIQF_FD_CNT_FD_BCNT_M)
+#define E800_VSIQF_FD_CNT_FD_BCNT_M MAKEMASK(0x3FFF, 16)
+#define E830_VSIQF_FD_CNT_FD_BCNT_M MAKEMASK(0xFFFF, 16)
#define VSIQF_FD_CTL1(_VSI) (0x00411000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
#define VSIQF_FD_CTL1_MAX_INDEX 767
#define VSIQF_FD_CTL1_FLT_ENA_S 0
@@ -8923,9 +9077,13 @@
#define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
#define VSIQF_FD_SIZE_MAX_INDEX 767
#define VSIQF_FD_SIZE_FD_GSIZE_S 0
-#define VSIQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x3FFF, 0)
+#define VSIQF_FD_SIZE_FD_GSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VSIQF_FD_SIZE_FD_GSIZE_M : E800_VSIQF_FD_SIZE_FD_GSIZE_M)
+#define E800_VSIQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0x3FFF, 0)
+#define E830_VSIQF_FD_SIZE_FD_GSIZE_M MAKEMASK(0xFFFF, 0)
#define VSIQF_FD_SIZE_FD_BSIZE_S 16
-#define VSIQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x3FFF, 16)
+#define VSIQF_FD_SIZE_FD_BSIZE_M_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VSIQF_FD_SIZE_FD_BSIZE_M : E800_VSIQF_FD_SIZE_FD_BSIZE_M)
+#define E800_VSIQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0x3FFF, 16)
+#define E830_VSIQF_FD_SIZE_FD_BSIZE_M MAKEMASK(0xFFFF, 16)
#define VSIQF_HASH_CTL(_VSI) (0x0040D000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
#define VSIQF_HASH_CTL_MAX_INDEX 767
#define VSIQF_HASH_CTL_HASH_LUT_SEL_S 0
@@ -9049,7 +9207,9 @@
#define PFPM_WUS_FLX7_M BIT(23)
#define PFPM_WUS_FW_RST_WK_S 31
#define PFPM_WUS_FW_RST_WK_M BIT(31)
-#define PRTPM_SAH(_i) (0x001E3BA0 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */
+#define PRTPM_SAH_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTPM_SAH(_i) : E800_PRTPM_SAH(_i))
+#define E800_PRTPM_SAH(_i) (0x001E3BA0 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */
+#define E830_PRTPM_SAH(_i) (0x001E2380 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */
#define PRTPM_SAH_MAX_INDEX 3
#define PRTPM_SAH_PFPM_SAH_S 0
#define PRTPM_SAH_PFPM_SAH_M MAKEMASK(0xFFFF, 0)
@@ -9059,7 +9219,9 @@
#define PRTPM_SAH_MC_MAG_EN_M BIT(30)
#define PRTPM_SAH_AV_S 31
#define PRTPM_SAH_AV_M BIT(31)
-#define PRTPM_SAL(_i) (0x001E3B20 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */
+#define PRTPM_SAL_BY_MAC(hw, _i) ((hw)->mac_type == ICE_MAC_E830 ? E830_PRTPM_SAL(_i) : E800_PRTPM_SAL(_i))
+#define E800_PRTPM_SAL(_i) (0x001E3B20 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */
+#define E830_PRTPM_SAL(_i) (0x001E2300 + ((_i) * 32)) /* _i=0...3 */ /* Reset Source: PFR */
#define PRTPM_SAL_MAX_INDEX 3
#define PRTPM_SAL_PFPM_SAL_S 0
#define PRTPM_SAL_PFPM_SAL_M MAKEMASK(0xFFFFFFFF, 0)
@@ -9072,7 +9234,9 @@
#define GLPE_CQM_FUNC_INVALIDATE_VM_VF_TYPE_M MAKEMASK(0x3, 13)
#define GLPE_CQM_FUNC_INVALIDATE_ENABLE_S 31
#define GLPE_CQM_FUNC_INVALIDATE_ENABLE_M BIT(31)
-#define VFPE_MRTEIDXMASK 0x00009000 /* Reset Source: PFR */
+#define VFPE_MRTEIDXMASK_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VFPE_MRTEIDXMASK : E800_VFPE_MRTEIDXMASK)
+#define E800_VFPE_MRTEIDXMASK 0x00009000 /* Reset Source: PFR */
+#define E830_VFPE_MRTEIDXMASK(_VF) (0x00509800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
#define VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_S 0
#define VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0)
#define GLTSYN_HH_DLAY 0x0008881C /* Reset Source: CORER */
@@ -9175,8 +9339,12 @@
#define VFINT_ITR0_MAX_INDEX 2
#define VFINT_ITR0_INTERVAL_S 0
#define VFINT_ITR0_INTERVAL_M MAKEMASK(0xFFF, 0)
-#define VFINT_ITRN(_i, _j) (0x00002800 + ((_i) * 4 + (_j) * 12)) /* _i=0...2, _j=0...63 */ /* Reset Source: CORER */
-#define VFINT_ITRN_MAX_INDEX 2
+#define VFINT_ITRN_BY_MAC(hw, _i, _j) ((hw)->mac_type == ICE_MAC_E830 ? E830_VFINT_ITRN(_i, _j) : E800_VFINT_ITRN(_i, _j))
+#define E800_VFINT_ITRN(_i, _j) (0x00002800 + ((_i) * 4 + (_j) * 12)) /* _i=0...2, _j=0...63 */ /* Reset Source: CORER */
+#define E830_VFINT_ITRN(_i, _j) (0x00002800 + ((_i) * 4 + (_j) * 64)) /* _i=0...15, _j=0...2 */ /* Reset Source: CORER */
+#define VFINT_ITRN_MAX_INDEX_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? E830_VFINT_ITRN_MAX_INDEX : E800_VFINT_ITRN_MAX_INDEX)
+#define E800_VFINT_ITRN_MAX_INDEX 2
+#define E830_VFINT_ITRN_MAX_INDEX 15
#define VFINT_ITRN_INTERVAL_S 0
#define VFINT_ITRN_INTERVAL_M MAKEMASK(0xFFF, 0)
#define QRX_TAIL1(_QRX) (0x00002000 + ((_QRX) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
@@ -9471,13 +9639,13 @@
#define VFPE_IPCONFIG01_USEENTIREIDRANGE_M BIT(16)
#define VFPE_IPCONFIG01_UDP_SRC_PORT_MASK_EN_S 17
#define VFPE_IPCONFIG01_UDP_SRC_PORT_MASK_EN_M BIT(17)
-#define VFPE_MRTEIDXMASK1(_VF) (0x00509800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
-#define VFPE_MRTEIDXMASK1_MAX_INDEX 255
-#define VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_S 0
-#define VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0)
-#define VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset Source: VFR */
-#define VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_S 0
-#define VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
+#define E800_VFPE_MRTEIDXMASK1(_VF) (0x00509800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: PFR */
+#define E800_VFPE_MRTEIDXMASK1_MAX_INDEX 255
+#define E800_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_S 0
+#define E800_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_M MAKEMASK(0x1F, 0)
+#define E800_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset Source: VFR */
+#define E800_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_S 0
+#define E800_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_M MAKEMASK(0xFFFFFF, 0)
#define VFPE_TCPNOWTIMER1 0x0000A800 /* Reset Source: VFR */
#define VFPE_TCPNOWTIMER1_TCP_NOW_S 0
#define VFPE_TCPNOWTIMER1_TCP_NOW_M MAKEMASK(0xFFFFFFFF, 0)
@@ -9486,5 +9654,1646 @@
#define VFPE_WQEALLOC1_PEQPID_M MAKEMASK(0x3FFFF, 0)
#define VFPE_WQEALLOC1_WQE_DESC_INDEX_S 20
#define VFPE_WQEALLOC1_WQE_DESC_INDEX_M MAKEMASK(0xFFF, 20)
+#define E830_GL_QRX_CONTEXT_CTL 0x00296640 /* Reset Source: CORER */
+#define E830_GL_QRX_CONTEXT_CTL_QUEUE_ID_S 0
+#define E830_GL_QRX_CONTEXT_CTL_QUEUE_ID_M MAKEMASK(0xFFF, 0)
+#define E830_GL_QRX_CONTEXT_CTL_CMD_S 16
+#define E830_GL_QRX_CONTEXT_CTL_CMD_M MAKEMASK(0x7, 16)
+#define E830_GL_QRX_CONTEXT_CTL_CMD_EXEC_S 19
+#define E830_GL_QRX_CONTEXT_CTL_CMD_EXEC_M BIT(19)
+#define E830_GL_QRX_CONTEXT_DATA(_i) (0x00296620 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_QRX_CONTEXT_DATA_MAX_INDEX 7
+#define E830_GL_QRX_CONTEXT_DATA_DATA_S 0
+#define E830_GL_QRX_CONTEXT_DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_QRX_CONTEXT_STAT 0x00296644 /* Reset Source: CORER */
+#define E830_GL_QRX_CONTEXT_STAT_CMD_IN_PROG_S 0
+#define E830_GL_QRX_CONTEXT_STAT_CMD_IN_PROG_M BIT(0)
+#define E830_GL_RCB_INTERNAL(_i) (0x00122600 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define E830_GL_RCB_INTERNAL_MAX_INDEX 63
+#define E830_GL_RCB_INTERNAL_INTERNAL_S 0
+#define E830_GL_RCB_INTERNAL_INTERNAL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_RLAN_INTERNAL(_i) (0x00296700 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define E830_GL_RLAN_INTERNAL_MAX_INDEX 63
+#define E830_GL_RLAN_INTERNAL_INTERNAL_S 0
+#define E830_GL_RLAN_INTERNAL_INTERNAL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS 0x002D30F0 /* Reset Source: CORER */
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_DBLQ_S 0
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_DBLQ_M MAKEMASK(0xFF, 0)
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_FDBL_S 8
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_FDBL_M MAKEMASK(0xFF, 8)
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_TXT_S 16
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_MAX_CREDITS_TXT_M MAKEMASK(0xFF, 16)
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS 0x002D30F4 /* Reset Source: CORER */
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_DBLQ_S 0
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_DBLQ_M MAKEMASK(0x3F, 0)
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_FDBL_S 6
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_FDBL_M MAKEMASK(0x3F, 6)
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_TXT_S 12
+#define E830_GLPQMDBL_PQMDBL_IN_WRR_WEIGHTS_TXT_M MAKEMASK(0x3F, 12)
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS 0x002D30F8 /* Reset Source: CORER */
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS_DBLQ_FDBL_S 0
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS_DBLQ_FDBL_M MAKEMASK(0xFF, 0)
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS_TXT_S 8
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_MAX_CREDITS_TXT_M MAKEMASK(0xFF, 8)
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS 0x002D30FC /* Reset Source: CORER */
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS_DBLQ_FDBL_S 0
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS_DBLQ_FDBL_M MAKEMASK(0x3F, 0)
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS_TXT_S 6
+#define E830_GLPQMDBL_PQMDBL_OUT_WRR_WEIGHTS_TXT_M MAKEMASK(0x3F, 6)
+#define E830_GLQTX_TXTIME_DBELL_LSB(_DBQM) (0x002E0000 + ((_DBQM) * 8)) /* _i=0...16383 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_DBELL_LSB_MAX_INDEX 16383
+#define E830_GLQTX_TXTIME_DBELL_LSB_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_DBELL_LSB_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLQTX_TXTIME_DBELL_MSB(_DBQM) (0x002E0004 + ((_DBQM) * 8)) /* _i=0...16383 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_DBELL_MSB_MAX_INDEX 16383
+#define E830_GLQTX_TXTIME_DBELL_MSB_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_DBELL_MSB_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTCLAN_CQ_CNTX2_SRC_VSI_S 18
+#define E830_GLTCLAN_CQ_CNTX2_SRC_VSI_M MAKEMASK(0x3FF, 18)
+#define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS 0x002D320C /* Reset Source: CORER */
+#define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS_DBL_S 0
+#define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS_DBL_M MAKEMASK(0xFF, 0)
+#define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS_COMP_S 8
+#define E830_GLTXTIME_DBL_COMP_WRR_MAX_CREDITS_COMP_M MAKEMASK(0xFF, 8)
+#define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS 0x002D3210 /* Reset Source: CORER */
+#define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS_DBL_S 0
+#define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS_DBL_M MAKEMASK(0x3F, 0)
+#define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS_COMP_S 6
+#define E830_GLTXTIME_DBL_COMP_WRR_WEIGHTS_COMP_M MAKEMASK(0x3F, 6)
+#define E830_GLTXTIME_FETCH_PROFILE(_i, _j) (0x002D3500 + ((_i) * 4 + (_j) * 64)) /* _i=0...15, _j=0...15 */ /* Reset Source: CORER */
+#define E830_GLTXTIME_FETCH_PROFILE_MAX_INDEX 15
+#define E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_S 0
+#define E830_GLTXTIME_FETCH_PROFILE_FETCH_TS_DESC_M MAKEMASK(0x1FF, 0)
+#define E830_GLTXTIME_FETCH_PROFILE_FETCH_FIFO_TRESH_S 9
+#define E830_GLTXTIME_FETCH_PROFILE_FETCH_FIFO_TRESH_M MAKEMASK(0x7F, 9)
+#define E830_GLTXTIME_OUTST_REQ_CNTL 0x002D3214 /* Reset Source: CORER */
+#define E830_GLTXTIME_OUTST_REQ_CNTL_THRESHOLD_S 0
+#define E830_GLTXTIME_OUTST_REQ_CNTL_THRESHOLD_M MAKEMASK(0x3FF, 0)
+#define E830_GLTXTIME_OUTST_REQ_CNTL_SNAPSHOT_S 10
+#define E830_GLTXTIME_OUTST_REQ_CNTL_SNAPSHOT_M MAKEMASK(0x3FF, 10)
+#define E830_GLTXTIME_QTX_CNTX_CTL 0x002D3204 /* Reset Source: CORER */
+#define E830_GLTXTIME_QTX_CNTX_CTL_QUEUE_ID_S 0
+#define E830_GLTXTIME_QTX_CNTX_CTL_QUEUE_ID_M MAKEMASK(0x7FF, 0)
+#define E830_GLTXTIME_QTX_CNTX_CTL_CMD_S 16
+#define E830_GLTXTIME_QTX_CNTX_CTL_CMD_M MAKEMASK(0x7, 16)
+#define E830_GLTXTIME_QTX_CNTX_CTL_CMD_EXEC_S 19
+#define E830_GLTXTIME_QTX_CNTX_CTL_CMD_EXEC_M BIT(19)
+#define E830_GLTXTIME_QTX_CNTX_DATA(_i) (0x002D3104 + ((_i) * 4)) /* _i=0...6 */ /* Reset Source: CORER */
+#define E830_GLTXTIME_QTX_CNTX_DATA_MAX_INDEX 6
+#define E830_GLTXTIME_QTX_CNTX_DATA_DATA_S 0
+#define E830_GLTXTIME_QTX_CNTX_DATA_DATA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTXTIME_QTX_CNTX_STAT 0x002D3208 /* Reset Source: CORER */
+#define E830_GLTXTIME_QTX_CNTX_STAT_CMD_IN_PROG_S 0
+#define E830_GLTXTIME_QTX_CNTX_STAT_CMD_IN_PROG_M BIT(0)
+#define E830_GLTXTIME_TS_CFG 0x002D3100 /* Reset Source: CORER */
+#define E830_GLTXTIME_TS_CFG_TXTIME_ENABLE_S 0
+#define E830_GLTXTIME_TS_CFG_TXTIME_ENABLE_M BIT(0)
+#define E830_GLTXTIME_TS_CFG_STORAGE_MODE_S 2
+#define E830_GLTXTIME_TS_CFG_STORAGE_MODE_M MAKEMASK(0x7, 2)
+#define E830_GLTXTIME_TS_CFG_PIPE_LATENCY_STATIC_S 5
+#define E830_GLTXTIME_TS_CFG_PIPE_LATENCY_STATIC_M MAKEMASK(0x1FFF, 5)
+#define E830_MBX_PF_DEC_ERR 0x00234100 /* Reset Source: CORER */
+#define E830_MBX_PF_DEC_ERR_DEC_ERR_S 0
+#define E830_MBX_PF_DEC_ERR_DEC_ERR_M BIT(0)
+#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH 0x00234000 /* Reset Source: CORER */
+#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH_TRESH_S 0
+#define E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH_TRESH_M MAKEMASK(0x3FF, 0)
+#define E830_MBX_VF_DEC_TRIG(_VF) (0x00233800 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_MBX_VF_DEC_TRIG_MAX_INDEX 255
+#define E830_MBX_VF_DEC_TRIG_DEC_S 0
+#define E830_MBX_VF_DEC_TRIG_DEC_M MAKEMASK(0x3FF, 0)
+#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(_VF) (0x00233000 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT_MAX_INDEX 255
+#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT_MSGS_S 0
+#define E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT_MSGS_M MAKEMASK(0x3FF, 0)
+#define E830_GLRCB_AG_ARBITER_CONFIG 0x00122500 /* Reset Source: CORER */
+#define E830_GLRCB_AG_ARBITER_CONFIG_CREDIT_MAX_S 0
+#define E830_GLRCB_AG_ARBITER_CONFIG_CREDIT_MAX_M MAKEMASK(0xFFFFF, 0)
+#define E830_GLRCB_AG_DCB_ARBITER_CONFIG 0x00122518 /* Reset Source: CORER */
+#define E830_GLRCB_AG_DCB_ARBITER_CONFIG_CREDIT_MAX_S 0
+#define E830_GLRCB_AG_DCB_ARBITER_CONFIG_CREDIT_MAX_M MAKEMASK(0x7F, 0)
+#define E830_GLRCB_AG_DCB_ARBITER_CONFIG_STRICT_WRR_S 7
+#define E830_GLRCB_AG_DCB_ARBITER_CONFIG_STRICT_WRR_M BIT(7)
+#define E830_GLRCB_AG_DCB_NODE_CONFIG(_i) (0x00122510 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GLRCB_AG_DCB_NODE_CONFIG_MAX_INDEX 1
+#define E830_GLRCB_AG_DCB_NODE_CONFIG_BWSHARE_S 0
+#define E830_GLRCB_AG_DCB_NODE_CONFIG_BWSHARE_M MAKEMASK(0xF, 0)
+#define E830_GLRCB_AG_DCB_NODE_STATE(_i) (0x00122508 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GLRCB_AG_DCB_NODE_STATE_MAX_INDEX 1
+#define E830_GLRCB_AG_DCB_NODE_STATE_CREDITS_S 0
+#define E830_GLRCB_AG_DCB_NODE_STATE_CREDITS_M MAKEMASK(0xFF, 0)
+#define E830_GLRCB_AG_NODE_CONFIG(_i) (0x001224E0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GLRCB_AG_NODE_CONFIG_MAX_INDEX 7
+#define E830_GLRCB_AG_NODE_CONFIG_BWSHARE_S 0
+#define E830_GLRCB_AG_NODE_CONFIG_BWSHARE_M MAKEMASK(0x7F, 0)
+#define E830_GLRCB_AG_NODE_STATE(_i) (0x001224C0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GLRCB_AG_NODE_STATE_MAX_INDEX 7
+#define E830_GLRCB_AG_NODE_STATE_CREDITS_S 0
+#define E830_GLRCB_AG_NODE_STATE_CREDITS_M MAKEMASK(0xFFFFF, 0)
+#define E830_PRT_AG_PORT_FC_MAP 0x00122520 /* Reset Source: CORER */
+#define E830_PRT_AG_PORT_FC_MAP_AG_BITMAP_S 0
+#define E830_PRT_AG_PORT_FC_MAP_AG_BITMAP_M MAKEMASK(0xFF, 0)
+#define E830_GL_FW_LOGS_CTL 0x000827F8 /* Reset Source: POR */
+#define E830_GL_FW_LOGS_CTL_PAGE_SELECT_S 0
+#define E830_GL_FW_LOGS_CTL_PAGE_SELECT_M MAKEMASK(0x3FF, 0)
+#define E830_GL_FW_LOGS_STS 0x000827FC /* Reset Source: POR */
+#define E830_GL_FW_LOGS_STS_MAX_PAGE_S 0
+#define E830_GL_FW_LOGS_STS_MAX_PAGE_M MAKEMASK(0x3FF, 0)
+#define E830_GL_FW_LOGS_STS_FW_LOGS_ENA_S 31
+#define E830_GL_FW_LOGS_STS_FW_LOGS_ENA_M BIT(31)
+#define E830_GL_RTCTL 0x000827F0 /* Reset Source: POR */
+#define E830_GL_RTCTL_RTCTL_S 0
+#define E830_GL_RTCTL_RTCTL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_RTCTM 0x000827F4 /* Reset Source: POR */
+#define E830_GL_RTCTM_RTCTM_S 0
+#define E830_GL_RTCTM_RTCTM_M MAKEMASK(0xFFFF, 0)
+#define E830_GLGEN_RTRIG_EMPR_WO_GLOBR_S 3
+#define E830_GLGEN_RTRIG_EMPR_WO_GLOBR_M BIT(3)
+#define E830_GLPE_TSCD_NUM_PQS 0x0051E2FC /* Reset Source: CORER */
+#define E830_GLPE_TSCD_NUM_PQS_NUM_PQS_S 0
+#define E830_GLPE_TSCD_NUM_PQS_NUM_PQS_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTPB_100G_RPB_FC_THRESH2 0x0009972C /* Reset Source: CORER */
+#define E830_GLTPB_100G_RPB_FC_THRESH2_PORT4_FC_THRESH_S 0
+#define E830_GLTPB_100G_RPB_FC_THRESH2_PORT4_FC_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_GLTPB_100G_RPB_FC_THRESH2_PORT5_FC_THRESH_S 16
+#define E830_GLTPB_100G_RPB_FC_THRESH2_PORT5_FC_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_GLTPB_100G_RPB_FC_THRESH3 0x00099730 /* Reset Source: CORER */
+#define E830_GLTPB_100G_RPB_FC_THRESH3_PORT6_FC_THRESH_S 0
+#define E830_GLTPB_100G_RPB_FC_THRESH3_PORT6_FC_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_GLTPB_100G_RPB_FC_THRESH3_PORT7_FC_THRESH_S 16
+#define E830_GLTPB_100G_RPB_FC_THRESH3_PORT7_FC_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PORT_TIMER_SEL(_i) (0x00088BE0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_PORT_TIMER_SEL_MAX_INDEX 7
+#define E830_PORT_TIMER_SEL_TIMER_SEL_S 0
+#define E830_PORT_TIMER_SEL_TIMER_SEL_M BIT(0)
+#define E830_GL_RDPU_CNTRL_CHECKSUM_COMPLETE_INV_S 22
+#define E830_GL_RDPU_CNTRL_CHECKSUM_COMPLETE_INV_M BIT(22)
+#define E830_PRTMAC_SHORT_PAC_DROP_BYTE_CNT 0x001E2280 /* Reset Source: GLOBR */
+#define E830_PRTMAC_SHORT_PAC_DROP_BYTE_CNT_PRTMAC_SHORT_PAC_DROP_BYTE_CNT_S 0
+#define E830_PRTMAC_SHORT_PAC_DROP_BYTE_CNT_PRTMAC_SHORT_PAC_DROP_BYTE_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTTSYN_TXTIME_H(_i) (0x001E5800 + ((_i) * 32)) /* _i=0...63 */ /* Reset Source: GLOBR */
+#define E830_PRTTSYN_TXTIME_H_MAX_INDEX 63
+#define E830_PRTTSYN_TXTIME_H_TX_TIMESTAMP_HIGH_S 0
+#define E830_PRTTSYN_TXTIME_H_TX_TIMESTAMP_HIGH_M MAKEMASK(0xFF, 0)
+#define E830_PRTTSYN_TXTIME_L(_i) (0x001E5000 + ((_i) * 32)) /* _i=0...63 */ /* Reset Source: GLOBR */
+#define E830_PRTTSYN_TXTIME_L_MAX_INDEX 63
+#define E830_PRTTSYN_TXTIME_L_TX_VALID_S 0
+#define E830_PRTTSYN_TXTIME_L_TX_VALID_M BIT(0)
+#define E830_PRTTSYN_TXTIME_L_TX_TIMESTAMP_LOW_S 1
+#define E830_PRTTSYN_TXTIME_L_TX_TIMESTAMP_LOW_M MAKEMASK(0x7FFFFFFF, 1)
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN 0x000FD200 /* Reset Source: CORER */
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PF_TSYN_PKT_FROM_Q_NOT_ALLOWED_S 0
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PF_TSYN_PKT_FROM_Q_NOT_ALLOWED_M BIT(0)
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PF_TSYN_PKT_RANGE_VIOLATION_S 1
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PF_TSYN_PKT_RANGE_VIOLATION_M BIT(1)
+#define E830_GL_MDET_RX_FIFO 0x00296840 /* Reset Source: CORER */
+#define E830_GL_MDET_RX_FIFO_FUNC_NUM_S 0
+#define E830_GL_MDET_RX_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0)
+#define E830_GL_MDET_RX_FIFO_PF_NUM_S 10
+#define E830_GL_MDET_RX_FIFO_PF_NUM_M MAKEMASK(0x7, 10)
+#define E830_GL_MDET_RX_FIFO_FUNC_TYPE_S 13
+#define E830_GL_MDET_RX_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13)
+#define E830_GL_MDET_RX_FIFO_MAL_TYPE_S 15
+#define E830_GL_MDET_RX_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15)
+#define E830_GL_MDET_RX_FIFO_FIFO_FULL_S 20
+#define E830_GL_MDET_RX_FIFO_FIFO_FULL_M BIT(20)
+#define E830_GL_MDET_RX_FIFO_VALID_S 21
+#define E830_GL_MDET_RX_FIFO_VALID_M BIT(21)
+#define E830_GL_MDET_RX_FIFO_EVENT_CNT_S 24
+#define E830_GL_MDET_RX_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24)
+#define E830_GL_MDET_RX_PF_CNT(_i) (0x00296800 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_RX_PF_CNT_MAX_INDEX 7
+#define E830_GL_MDET_RX_PF_CNT_CNT_S 0
+#define E830_GL_MDET_RX_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_RX_VF(_i) (0x00296820 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_RX_VF_MAX_INDEX 7
+#define E830_GL_MDET_RX_VF_VF_MAL_EVENT_S 0
+#define E830_GL_MDET_RX_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_TX_PQM_FIFO 0x002D4B00 /* Reset Source: CORER */
+#define E830_GL_MDET_TX_PQM_FIFO_FUNC_NUM_S 0
+#define E830_GL_MDET_TX_PQM_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0)
+#define E830_GL_MDET_TX_PQM_FIFO_PF_NUM_S 10
+#define E830_GL_MDET_TX_PQM_FIFO_PF_NUM_M MAKEMASK(0x7, 10)
+#define E830_GL_MDET_TX_PQM_FIFO_FUNC_TYPE_S 13
+#define E830_GL_MDET_TX_PQM_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13)
+#define E830_GL_MDET_TX_PQM_FIFO_MAL_TYPE_S 15
+#define E830_GL_MDET_TX_PQM_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15)
+#define E830_GL_MDET_TX_PQM_FIFO_FIFO_FULL_S 20
+#define E830_GL_MDET_TX_PQM_FIFO_FIFO_FULL_M BIT(20)
+#define E830_GL_MDET_TX_PQM_FIFO_VALID_S 21
+#define E830_GL_MDET_TX_PQM_FIFO_VALID_M BIT(21)
+#define E830_GL_MDET_TX_PQM_FIFO_EVENT_CNT_S 24
+#define E830_GL_MDET_TX_PQM_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24)
+#define E830_GL_MDET_TX_PQM_PF_CNT(_i) (0x002D4AC0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_TX_PQM_PF_CNT_MAX_INDEX 7
+#define E830_GL_MDET_TX_PQM_PF_CNT_CNT_S 0
+#define E830_GL_MDET_TX_PQM_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_TX_PQM_VF(_i) (0x002D4AE0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_TX_PQM_VF_MAX_INDEX 7
+#define E830_GL_MDET_TX_PQM_VF_VF_MAL_EVENT_S 0
+#define E830_GL_MDET_TX_PQM_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_TX_TCLAN_FIFO 0x000FCFD0 /* Reset Source: CORER */
+#define E830_GL_MDET_TX_TCLAN_FIFO_FUNC_NUM_S 0
+#define E830_GL_MDET_TX_TCLAN_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0)
+#define E830_GL_MDET_TX_TCLAN_FIFO_PF_NUM_S 10
+#define E830_GL_MDET_TX_TCLAN_FIFO_PF_NUM_M MAKEMASK(0x7, 10)
+#define E830_GL_MDET_TX_TCLAN_FIFO_FUNC_TYPE_S 13
+#define E830_GL_MDET_TX_TCLAN_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13)
+#define E830_GL_MDET_TX_TCLAN_FIFO_MAL_TYPE_S 15
+#define E830_GL_MDET_TX_TCLAN_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15)
+#define E830_GL_MDET_TX_TCLAN_FIFO_FIFO_FULL_S 20
+#define E830_GL_MDET_TX_TCLAN_FIFO_FIFO_FULL_M BIT(20)
+#define E830_GL_MDET_TX_TCLAN_FIFO_VALID_S 21
+#define E830_GL_MDET_TX_TCLAN_FIFO_VALID_M BIT(21)
+#define E830_GL_MDET_TX_TCLAN_FIFO_EVENT_CNT_S 24
+#define E830_GL_MDET_TX_TCLAN_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24)
+#define E830_GL_MDET_TX_TCLAN_PF_CNT(_i) (0x000FCF90 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_TX_TCLAN_PF_CNT_MAX_INDEX 7
+#define E830_GL_MDET_TX_TCLAN_PF_CNT_CNT_S 0
+#define E830_GL_MDET_TX_TCLAN_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_TX_TCLAN_VF(_i) (0x000FCFB0 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_TX_TCLAN_VF_MAX_INDEX 7
+#define E830_GL_MDET_TX_TCLAN_VF_VF_MAL_EVENT_S 0
+#define E830_GL_MDET_TX_TCLAN_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_TX_TDPU_FIFO 0x00049D80 /* Reset Source: CORER */
+#define E830_GL_MDET_TX_TDPU_FIFO_FUNC_NUM_S 0
+#define E830_GL_MDET_TX_TDPU_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0)
+#define E830_GL_MDET_TX_TDPU_FIFO_PF_NUM_S 10
+#define E830_GL_MDET_TX_TDPU_FIFO_PF_NUM_M MAKEMASK(0x7, 10)
+#define E830_GL_MDET_TX_TDPU_FIFO_FUNC_TYPE_S 13
+#define E830_GL_MDET_TX_TDPU_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13)
+#define E830_GL_MDET_TX_TDPU_FIFO_MAL_TYPE_S 15
+#define E830_GL_MDET_TX_TDPU_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15)
+#define E830_GL_MDET_TX_TDPU_FIFO_FIFO_FULL_S 20
+#define E830_GL_MDET_TX_TDPU_FIFO_FIFO_FULL_M BIT(20)
+#define E830_GL_MDET_TX_TDPU_FIFO_VALID_S 21
+#define E830_GL_MDET_TX_TDPU_FIFO_VALID_M BIT(21)
+#define E830_GL_MDET_TX_TDPU_FIFO_EVENT_CNT_S 24
+#define E830_GL_MDET_TX_TDPU_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24)
+#define E830_GL_MDET_TX_TDPU_PF_CNT(_i) (0x00049D40 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_TX_TDPU_PF_CNT_MAX_INDEX 7
+#define E830_GL_MDET_TX_TDPU_PF_CNT_CNT_S 0
+#define E830_GL_MDET_TX_TDPU_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_TX_TDPU_VF(_i) (0x00049D60 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_TX_TDPU_VF_MAX_INDEX 7
+#define E830_GL_MDET_TX_TDPU_VF_VF_MAL_EVENT_S 0
+#define E830_GL_MDET_TX_TDPU_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MNG_ECDSA_PUBKEY_HIGH(_i) (0x00083400 + ((_i) * 4)) /* _i=0...11 */ /* Reset Source: EMPR */
+#define E830_GL_MNG_ECDSA_PUBKEY_HIGH_MAX_INDEX 11
+#define E830_GL_MNG_ECDSA_PUBKEY_HIGH_GL_MNG_ECDSA_PUBKEY_S 0
+#define E830_GL_MNG_ECDSA_PUBKEY_HIGH_GL_MNG_ECDSA_PUBKEY_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MNG_ECDSA_PUBKEY_LOW(_i) (0x00083300 + ((_i) * 4)) /* _i=0...11 */ /* Reset Source: EMPR */
+#define E830_GL_MNG_ECDSA_PUBKEY_LOW_MAX_INDEX 11
+#define E830_GL_MNG_ECDSA_PUBKEY_LOW_GL_MNG_ECDSA_PUBKEY_S 0
+#define E830_GL_MNG_ECDSA_PUBKEY_LOW_GL_MNG_ECDSA_PUBKEY_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_PPRS_RX_SIZE_CTRL_0(_i) (0x00084900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GL_PPRS_RX_SIZE_CTRL_0_MAX_INDEX 1
+#define E830_GL_PPRS_RX_SIZE_CTRL_0_MAX_HEADER_SIZE_S 16
+#define E830_GL_PPRS_RX_SIZE_CTRL_0_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16)
+#define E830_GL_PPRS_RX_SIZE_CTRL_1(_i) (0x00085900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GL_PPRS_RX_SIZE_CTRL_1_MAX_INDEX 1
+#define E830_GL_PPRS_RX_SIZE_CTRL_1_MAX_HEADER_SIZE_S 16
+#define E830_GL_PPRS_RX_SIZE_CTRL_1_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16)
+#define E830_GL_PPRS_RX_SIZE_CTRL_2(_i) (0x00086900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GL_PPRS_RX_SIZE_CTRL_2_MAX_INDEX 1
+#define E830_GL_PPRS_RX_SIZE_CTRL_2_MAX_HEADER_SIZE_S 16
+#define E830_GL_PPRS_RX_SIZE_CTRL_2_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16)
+#define E830_GL_PPRS_RX_SIZE_CTRL_3(_i) (0x00087900 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GL_PPRS_RX_SIZE_CTRL_3_MAX_INDEX 1
+#define E830_GL_PPRS_RX_SIZE_CTRL_3_MAX_HEADER_SIZE_S 16
+#define E830_GL_PPRS_RX_SIZE_CTRL_3_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP 0x00200740 /* Reset Source: CORER */
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_0_S 0
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_0_M MAKEMASK(0xFF, 0)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_1_S 8
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_1_M MAKEMASK(0xFF, 8)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_0_S 16
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_0_M MAKEMASK(0xFF, 16)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_1_S 24
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_1_M MAKEMASK(0xFF, 24)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP 0x00200744 /* Reset Source: CORER */
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_0_S 0
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_0_M MAKEMASK(0xFF, 0)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_1_S 8
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_1_M MAKEMASK(0xFF, 8)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_0_S 16
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_0_M MAKEMASK(0xFF, 16)
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_1_S 24
+#define E830_GL_RPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_1_M MAKEMASK(0xFF, 24)
+#define E830_GL_RPRS_PROT_ID_MAP(_i) (0x00200800 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_GL_RPRS_PROT_ID_MAP_MAX_INDEX 255
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID0_S 0
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID0_M MAKEMASK(0xFF, 0)
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID1_S 8
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID1_M MAKEMASK(0xFF, 8)
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID2_S 16
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID2_M MAKEMASK(0xFF, 16)
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID3_S 24
+#define E830_GL_RPRS_PROT_ID_MAP_PROT_ID3_M MAKEMASK(0xFF, 24)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL(_i) (0x00201000 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_MAX_INDEX 63
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_0_S 0
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_0_M MAKEMASK(0x3, 0)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_1_S 2
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_1_M MAKEMASK(0x3, 2)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_2_S 4
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_2_M MAKEMASK(0x3, 4)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_3_S 6
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_3_M MAKEMASK(0x3, 6)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_4_S 8
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_4_M MAKEMASK(0x3, 8)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_5_S 10
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_5_M MAKEMASK(0x3, 10)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_6_S 12
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_6_M MAKEMASK(0x3, 12)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_7_S 14
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_7_M MAKEMASK(0x3, 14)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_8_S 16
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_8_M MAKEMASK(0x3, 16)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_9_S 18
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_9_M MAKEMASK(0x3, 18)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_10_S 20
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_10_M MAKEMASK(0x3, 20)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_11_S 22
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_11_M MAKEMASK(0x3, 22)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_12_S 24
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_12_M MAKEMASK(0x3, 24)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_13_S 26
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_13_M MAKEMASK(0x3, 26)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_14_S 28
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_14_M MAKEMASK(0x3, 28)
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_15_S 30
+#define E830_GL_RPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_15_M MAKEMASK(0x3, 30)
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL 0x00200748 /* Reset Source: CORER */
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_0_EN_S 0
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_0_EN_M BIT(0)
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_1_EN_S 1
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_1_EN_M BIT(1)
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_0_S 2
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_0_M BIT(2)
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_1_S 3
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_1_M BIT(3)
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_0_S 4
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_0_M BIT(4)
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_1_S 5
+#define E830_GL_RPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_1_M BIT(5)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP 0x00203A04 /* Reset Source: CORER */
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_0_S 0
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_0_M MAKEMASK(0xFF, 0)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_1_S 8
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV4_PROT_ID_1_M MAKEMASK(0xFF, 8)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_0_S 16
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_0_M MAKEMASK(0xFF, 16)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_1_S 24
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_IP_IPV6_PROT_ID_1_M MAKEMASK(0xFF, 24)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP 0x00203A08 /* Reset Source: CORER */
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_0_S 0
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_0_M MAKEMASK(0xFF, 0)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_1_S 8
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_TCP_PROT_ID_1_M MAKEMASK(0xFF, 8)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_0_S 16
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_0_M MAKEMASK(0xFF, 16)
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_1_S 24
+#define E830_GL_TPRS_CSUM_PROT_ID_CFG_UDP_TCP_UDP_PROT_ID_1_M MAKEMASK(0xFF, 24)
+#define E830_GL_TPRS_PROT_ID_MAP(_i) (0x00202200 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_GL_TPRS_PROT_ID_MAP_MAX_INDEX 255
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID0_S 0
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID0_M MAKEMASK(0xFF, 0)
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID1_S 8
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID1_M MAKEMASK(0xFF, 8)
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID2_S 16
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID2_M MAKEMASK(0xFF, 16)
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID3_S 24
+#define E830_GL_TPRS_PROT_ID_MAP_PROT_ID3_M MAKEMASK(0xFF, 24)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL(_i) (0x00202A00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_MAX_INDEX 63
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_0_S 0
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_0_M MAKEMASK(0x3, 0)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_1_S 2
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_1_M MAKEMASK(0x3, 2)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_2_S 4
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_2_M MAKEMASK(0x3, 4)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_3_S 6
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_3_M MAKEMASK(0x3, 6)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_4_S 8
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_4_M MAKEMASK(0x3, 8)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_5_S 10
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_5_M MAKEMASK(0x3, 10)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_6_S 12
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_6_M MAKEMASK(0x3, 12)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_7_S 14
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_7_M MAKEMASK(0x3, 14)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_8_S 16
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_8_M MAKEMASK(0x3, 16)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_9_S 18
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_9_M MAKEMASK(0x3, 18)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_10_S 20
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_10_M MAKEMASK(0x3, 20)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_11_S 22
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_11_M MAKEMASK(0x3, 22)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_12_S 24
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_12_M MAKEMASK(0x3, 24)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_13_S 26
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_13_M MAKEMASK(0x3, 26)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_14_S 28
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_14_M MAKEMASK(0x3, 28)
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_15_S 30
+#define E830_GL_TPRS_PROT_ID_MAP_PRFL_PTYPE_PRFL_15_M MAKEMASK(0x3, 30)
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL 0x00203A00 /* Reset Source: CORER */
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_0_EN_S 0
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_0_EN_M BIT(0)
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_1_EN_S 1
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_UDP_LEN_1_EN_M BIT(1)
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_0_S 2
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_0_M BIT(2)
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_1_S 3
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_LEN_1_M BIT(3)
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_0_S 4
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_0_M BIT(4)
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_1_S 5
+#define E830_GL_TPRS_VALIDATE_CHECKS_CTL_VALIDATE_L3_L4_COHERENT_1_M BIT(5)
+#define E830_PRT_TDPU_TX_SIZE_CTRL 0x00049D20 /* Reset Source: CORER */
+#define E830_PRT_TDPU_TX_SIZE_CTRL_MAX_HEADER_SIZE_S 16
+#define E830_PRT_TDPU_TX_SIZE_CTRL_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16)
+#define E830_PRT_TPB_RX_LB_SIZE_CTRL 0x00099740 /* Reset Source: CORER */
+#define E830_PRT_TPB_RX_LB_SIZE_CTRL_MAX_HEADER_SIZE_S 16
+#define E830_PRT_TPB_RX_LB_SIZE_CTRL_MAX_HEADER_SIZE_M MAKEMASK(0x3FF, 16)
+#define E830_GLQTX_TXTIME_DBELL_LSB_PAGE(_DBQM) (0x04000008 + ((_DBQM) * 4096)) /* _i=0...16383 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_DBELL_LSB_PAGE_MAX_INDEX 16383
+#define E830_GLQTX_TXTIME_DBELL_LSB_PAGE_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_DBELL_LSB_PAGE_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLQTX_TXTIME_DBELL_MSB_PAGE(_DBQM) (0x0400000C + ((_DBQM) * 4096)) /* _i=0...16383 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_DBELL_MSB_PAGE_MAX_INDEX 16383
+#define E830_GLQTX_TXTIME_DBELL_MSB_PAGE_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_DBELL_MSB_PAGE_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PF0INT_OICR_PSM_PAGE_PTM_COMP_S 8
+#define E830_PF0INT_OICR_PSM_PAGE_PTM_COMP_M BIT(8)
+#define E830_PF0INT_OICR_PSM_PAGE_PQM_DBL_TO_S 9
+#define E830_PF0INT_OICR_PSM_PAGE_PQM_DBL_TO_M BIT(9)
+#define E830_PF0INT_OICR_PSM_PAGE_RSV5_S 10
+#define E830_PF0INT_OICR_PSM_PAGE_RSV5_M BIT(10)
+#define E830_GL_HIBA(_i) (0x00081000 + ((_i) * 4)) /* _i=0...1023 */ /* Reset Source: EMPR */
+#define E830_GL_HIBA_MAX_INDEX 1023
+#define E830_GL_HIBA_GL_HIBA_S 0
+#define E830_GL_HIBA_GL_HIBA_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_HICR 0x00082040 /* Reset Source: EMPR */
+#define E830_GL_HICR_C_S 1
+#define E830_GL_HICR_C_M BIT(1)
+#define E830_GL_HICR_SV_S 2
+#define E830_GL_HICR_SV_M BIT(2)
+#define E830_GL_HICR_EV_S 3
+#define E830_GL_HICR_EV_M BIT(3)
+#define E830_GL_HICR_EN 0x00082044 /* Reset Source: EMPR */
+#define E830_GL_HICR_EN_EN_S 0
+#define E830_GL_HICR_EN_EN_M BIT(0)
+#define E830_GL_HIDA(_i) (0x00082000 + ((_i) * 4)) /* _i=0...15 */ /* Reset Source: EMPR */
+#define E830_GL_HIDA_MAX_INDEX 15
+#define E830_GL_HIDA_GL_HIDB_S 0
+#define E830_GL_HIDA_GL_HIDB_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLFLXP_RXDID_FLX_WRD_0_SPARE_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_0_SPARE_M MAKEMASK(0xF, 18)
+#define E830_GLFLXP_RXDID_FLX_WRD_1_SPARE_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_1_SPARE_M MAKEMASK(0xF, 18)
+#define E830_GLFLXP_RXDID_FLX_WRD_2_SPARE_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_2_SPARE_M MAKEMASK(0xF, 18)
+#define E830_GLFLXP_RXDID_FLX_WRD_3_SPARE_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_3_SPARE_M MAKEMASK(0xF, 18)
+#define E830_GLFLXP_RXDID_FLX_WRD_4_SPARE_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_4_SPARE_M MAKEMASK(0xF, 18)
+#define E830_GLFLXP_RXDID_FLX_WRD_5_SPARE_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_5_SPARE_M MAKEMASK(0xF, 18)
+#define E830_GLFLXP_RXDID_FLX_WRD_6(_i) (0x0045CE00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define E830_GLFLXP_RXDID_FLX_WRD_6_MAX_INDEX 63
+#define E830_GLFLXP_RXDID_FLX_WRD_6_PROT_MDID_S 0
+#define E830_GLFLXP_RXDID_FLX_WRD_6_PROT_MDID_M MAKEMASK(0xFF, 0)
+#define E830_GLFLXP_RXDID_FLX_WRD_6_EXTRACTION_OFFSET_S 8
+#define E830_GLFLXP_RXDID_FLX_WRD_6_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8)
+#define E830_GLFLXP_RXDID_FLX_WRD_6_L2TAG_OVRD_EN_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_6_L2TAG_OVRD_EN_M BIT(18)
+#define E830_GLFLXP_RXDID_FLX_WRD_6_SPARE_S 19
+#define E830_GLFLXP_RXDID_FLX_WRD_6_SPARE_M MAKEMASK(0x7, 19)
+#define E830_GLFLXP_RXDID_FLX_WRD_6_RXDID_OPCODE_S 30
+#define E830_GLFLXP_RXDID_FLX_WRD_6_RXDID_OPCODE_M MAKEMASK(0x3, 30)
+#define E830_GLFLXP_RXDID_FLX_WRD_7(_i) (0x0045CF00 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define E830_GLFLXP_RXDID_FLX_WRD_7_MAX_INDEX 63
+#define E830_GLFLXP_RXDID_FLX_WRD_7_PROT_MDID_S 0
+#define E830_GLFLXP_RXDID_FLX_WRD_7_PROT_MDID_M MAKEMASK(0xFF, 0)
+#define E830_GLFLXP_RXDID_FLX_WRD_7_EXTRACTION_OFFSET_S 8
+#define E830_GLFLXP_RXDID_FLX_WRD_7_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8)
+#define E830_GLFLXP_RXDID_FLX_WRD_7_L2TAG_OVRD_EN_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_7_L2TAG_OVRD_EN_M BIT(18)
+#define E830_GLFLXP_RXDID_FLX_WRD_7_SPARE_S 19
+#define E830_GLFLXP_RXDID_FLX_WRD_7_SPARE_M MAKEMASK(0x7, 19)
+#define E830_GLFLXP_RXDID_FLX_WRD_7_RXDID_OPCODE_S 30
+#define E830_GLFLXP_RXDID_FLX_WRD_7_RXDID_OPCODE_M MAKEMASK(0x3, 30)
+#define E830_GLFLXP_RXDID_FLX_WRD_8(_i) (0x0045D500 + ((_i) * 4)) /* _i=0...63 */ /* Reset Source: CORER */
+#define E830_GLFLXP_RXDID_FLX_WRD_8_MAX_INDEX 63
+#define E830_GLFLXP_RXDID_FLX_WRD_8_PROT_MDID_S 0
+#define E830_GLFLXP_RXDID_FLX_WRD_8_PROT_MDID_M MAKEMASK(0xFF, 0)
+#define E830_GLFLXP_RXDID_FLX_WRD_8_EXTRACTION_OFFSET_S 8
+#define E830_GLFLXP_RXDID_FLX_WRD_8_EXTRACTION_OFFSET_M MAKEMASK(0x3FF, 8)
+#define E830_GLFLXP_RXDID_FLX_WRD_8_L2TAG_OVRD_EN_S 18
+#define E830_GLFLXP_RXDID_FLX_WRD_8_L2TAG_OVRD_EN_M BIT(18)
+#define E830_GLFLXP_RXDID_FLX_WRD_8_SPARE_S 19
+#define E830_GLFLXP_RXDID_FLX_WRD_8_SPARE_M MAKEMASK(0x7, 19)
+#define E830_GLFLXP_RXDID_FLX_WRD_8_RXDID_OPCODE_S 30
+#define E830_GLFLXP_RXDID_FLX_WRD_8_RXDID_OPCODE_M MAKEMASK(0x3, 30)
+#define E830_GL_FW_LOGS(_i) (0x00082800 + ((_i) * 4)) /* _i=0...255 */ /* Reset Source: POR */
+#define E830_GL_FW_LOGS_MAX_INDEX 255
+#define E830_GL_FW_LOGS_GL_FW_LOGS_S 0
+#define E830_GL_FW_LOGS_GL_FW_LOGS_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_FWSTS_FWABS_S 10
+#define E830_GL_FWSTS_FWABS_M MAKEMASK(0x3, 10)
+#define E830_GL_FWSTS_FW_FAILOVER_TRIG_S 12
+#define E830_GL_FWSTS_FW_FAILOVER_TRIG_M BIT(12)
+#define E830_GLGEN_RSTAT_EMPR_WO_GLOBR_CNT_S 19
+#define E830_GLGEN_RSTAT_EMPR_WO_GLOBR_CNT_M MAKEMASK(0x3, 19)
+#define E830_GLGEN_RSTAT_EMPR_TYPE_S 21
+#define E830_GLGEN_RSTAT_EMPR_TYPE_M BIT(21)
+#define E830_GLPCI_PLATFORM_INFO 0x0009DDC4 /* Reset Source: POR */
+#define E830_GLPCI_PLATFORM_INFO_PLATFORM_TYPE_S 0
+#define E830_GLPCI_PLATFORM_INFO_PLATFORM_TYPE_M MAKEMASK(0xFF, 0)
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PKT_FROM_Q_NOT_ALLOWED_S 21
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PKT_FROM_Q_NOT_ALLOWED_M BIT(21)
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PKT_RANGE_VIOLATION_S 22
+#define E830_GL_MDCK_TDAT_TCLAN_TSYN_PKT_RANGE_VIOLATION_M BIT(22)
+#define E830_GL_MDCK_TDAT_TCLAN_DESC_TYPE_ACL_DTYPE_NOT_ALLOWED_S 23
+#define E830_GL_MDCK_TDAT_TCLAN_DESC_TYPE_ACL_DTYPE_NOT_ALLOWED_M BIT(23)
+#define E830_GL_TPB_LOCAL_TOPO 0x000996F4 /* Reset Source: CORER */
+#define E830_GL_TPB_LOCAL_TOPO_ALLOW_TOPO_OVERRIDE_S 0
+#define E830_GL_TPB_LOCAL_TOPO_ALLOW_TOPO_OVERRIDE_M BIT(0)
+#define E830_GL_TPB_LOCAL_TOPO_TOPO_VAL_S 1
+#define E830_GL_TPB_LOCAL_TOPO_TOPO_VAL_M MAKEMASK(0x3, 1)
+#define E830_GL_TPB_PM_RESET 0x000996F0 /* Reset Source: CORER */
+#define E830_GL_TPB_PM_RESET_MAC_PM_RESET_S 0
+#define E830_GL_TPB_PM_RESET_MAC_PM_RESET_M BIT(0)
+#define E830_GL_TPB_PM_RESET_RPB_PM_RESET_S 1
+#define E830_GL_TPB_PM_RESET_RPB_PM_RESET_M BIT(1)
+#define E830_GLTPB_100G_MAC_FC_THRESH1 0x00099724 /* Reset Source: CORER */
+#define E830_GLTPB_100G_MAC_FC_THRESH1_PORT2_FC_THRESH_S 0
+#define E830_GLTPB_100G_MAC_FC_THRESH1_PORT2_FC_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_GLTPB_100G_MAC_FC_THRESH1_PORT3_FC_THRESH_S 16
+#define E830_GLTPB_100G_MAC_FC_THRESH1_PORT3_FC_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_GLTPB_100G_RPB_FC_THRESH0 0x0009963C /* Reset Source: CORER */
+#define E830_GLTPB_100G_RPB_FC_THRESH0_PORT0_FC_THRESH_S 0
+#define E830_GLTPB_100G_RPB_FC_THRESH0_PORT0_FC_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_GLTPB_100G_RPB_FC_THRESH0_PORT1_FC_THRESH_S 16
+#define E830_GLTPB_100G_RPB_FC_THRESH0_PORT1_FC_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_GLTPB_100G_RPB_FC_THRESH1 0x00099728 /* Reset Source: CORER */
+#define E830_GLTPB_100G_RPB_FC_THRESH1_PORT2_FC_THRESH_S 0
+#define E830_GLTPB_100G_RPB_FC_THRESH1_PORT2_FC_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_GLTPB_100G_RPB_FC_THRESH1_PORT3_FC_THRESH_S 16
+#define E830_GLTPB_100G_RPB_FC_THRESH1_PORT3_FC_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_GL_UFUSE_SOC_MAX_PORT_SPEED_S 12
+#define E830_GL_UFUSE_SOC_MAX_PORT_SPEED_M MAKEMASK(0xFFFF, 12)
+#define E830_PF0INT_OICR_PSM_PTM_COMP_S 8
+#define E830_PF0INT_OICR_PSM_PTM_COMP_M BIT(8)
+#define E830_PF0INT_OICR_PSM_PQM_DBL_TO_S 9
+#define E830_PF0INT_OICR_PSM_PQM_DBL_TO_M BIT(9)
+#define E830_PF0INT_OICR_PSM_RSV5_S 10
+#define E830_PF0INT_OICR_PSM_RSV5_M BIT(10)
+#define E830_PFINT_OICR_PTM_COMP_S 8
+#define E830_PFINT_OICR_PTM_COMP_M BIT(8)
+#define E830_PFINT_OICR_PQM_DBL_TO_S 9
+#define E830_PFINT_OICR_PQM_DBL_TO_M BIT(9)
+#define E830_PFINT_OICR_RSV5_S 10
+#define E830_PFINT_OICR_RSV5_M BIT(10)
+#define E830_QRX_CTRL_IDE_S 27
+#define E830_QRX_CTRL_IDE_M BIT(27)
+#define E830_PRTMAC_200G_CL01_PAUSE_QUANTA 0x001E3854 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_200G_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL01_PAUSE_QUANTA_CL1_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_200G_CL01_PAUSE_QUANTA_CL1_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_CL01_QUANTA_THRESH 0x001E3864 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_S 0
+#define E830_PRTMAC_200G_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL01_QUANTA_THRESH_CL1_QUANTA_THRESH_S 16
+#define E830_PRTMAC_200G_CL01_QUANTA_THRESH_CL1_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_CL23_PAUSE_QUANTA 0x001E3858 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL23_PAUSE_QUANTA_CL2_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_200G_CL23_PAUSE_QUANTA_CL2_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL23_PAUSE_QUANTA_CL3_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_200G_CL23_PAUSE_QUANTA_CL3_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_CL23_QUANTA_THRESH 0x001E3868 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL23_QUANTA_THRESH_CL2_QUANTA_THRESH_S 0
+#define E830_PRTMAC_200G_CL23_QUANTA_THRESH_CL2_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL23_QUANTA_THRESH_CL3_QUANTA_THRESH_S 16
+#define E830_PRTMAC_200G_CL23_QUANTA_THRESH_CL3_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_CL45_PAUSE_QUANTA 0x001E385C /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL45_PAUSE_QUANTA_CL4_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_200G_CL45_PAUSE_QUANTA_CL4_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL45_PAUSE_QUANTA_CL5_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_200G_CL45_PAUSE_QUANTA_CL5_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_CL45_QUANTA_THRESH 0x001E386C /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL45_QUANTA_THRESH_CL4_QUANTA_THRESH_S 0
+#define E830_PRTMAC_200G_CL45_QUANTA_THRESH_CL4_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL45_QUANTA_THRESH_CL5_QUANTA_THRESH_S 16
+#define E830_PRTMAC_200G_CL45_QUANTA_THRESH_CL5_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_CL67_PAUSE_QUANTA 0x001E3860 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL67_PAUSE_QUANTA_CL6_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_200G_CL67_PAUSE_QUANTA_CL6_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL67_PAUSE_QUANTA_CL7_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_200G_CL67_PAUSE_QUANTA_CL7_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_CL67_QUANTA_THRESH 0x001E3870 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CL67_QUANTA_THRESH_CL6_QUANTA_THRESH_S 0
+#define E830_PRTMAC_200G_CL67_QUANTA_THRESH_CL6_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_CL67_QUANTA_THRESH_CL7_QUANTA_THRESH_S 16
+#define E830_PRTMAC_200G_CL67_QUANTA_THRESH_CL7_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_COMMAND_CONFIG 0x001E3808 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_ENA_S 0
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_ENA_M BIT(0)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_RX_ENA_S 1
+#define E830_PRTMAC_200G_COMMAND_CONFIG_RX_ENA_M BIT(1)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PROMIS_EN_S 4
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PROMIS_EN_M BIT(4)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAD_EN_S 5
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAD_EN_M BIT(5)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_CRC_FWD_S 6
+#define E830_PRTMAC_200G_COMMAND_CONFIG_CRC_FWD_M BIT(6)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_FWD_S 7
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_FWD_M BIT(7)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_IGNORE_S 8
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_IGNORE_M BIT(8)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_ADDR_INS_S 9
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_ADDR_INS_M BIT(9)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_LOOPBACK_EN_S 10
+#define E830_PRTMAC_200G_COMMAND_CONFIG_LOOPBACK_EN_M BIT(10)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_PAD_EN_S 11
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_PAD_EN_M BIT(11)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_SW_RESET_S 12
+#define E830_PRTMAC_200G_COMMAND_CONFIG_SW_RESET_M BIT(12)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_CNTL_FRM_ENA_S 13
+#define E830_PRTMAC_200G_COMMAND_CONFIG_CNTL_FRM_ENA_M BIT(13)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_RX_ERR_DISC_S 14
+#define E830_PRTMAC_200G_COMMAND_CONFIG_RX_ERR_DISC_M BIT(14)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PHY_TXENA_S 15
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PHY_TXENA_M BIT(15)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_SEND_IDLE_S 16
+#define E830_PRTMAC_200G_COMMAND_CONFIG_SEND_IDLE_M BIT(16)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_NO_LGTH_CHECK_S 17
+#define E830_PRTMAC_200G_COMMAND_CONFIG_NO_LGTH_CHECK_M BIT(17)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PFC_MODE_S 19
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PFC_MODE_M BIT(19)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_PFC_COMP_S 20
+#define E830_PRTMAC_200G_COMMAND_CONFIG_PAUSE_PFC_COMP_M BIT(20)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_RX_SFD_ANY_S 21
+#define E830_PRTMAC_200G_COMMAND_CONFIG_RX_SFD_ANY_M BIT(21)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_FLUSH_S 22
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_FLUSH_M BIT(22)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_FLT_TX_STOP_S 25
+#define E830_PRTMAC_200G_COMMAND_CONFIG_FLT_TX_STOP_M BIT(25)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_FIFO_RESET_S 26
+#define E830_PRTMAC_200G_COMMAND_CONFIG_TX_FIFO_RESET_M BIT(26)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_FLT_HDL_DIS_S 27
+#define E830_PRTMAC_200G_COMMAND_CONFIG_FLT_HDL_DIS_M BIT(27)
+#define E830_PRTMAC_200G_COMMAND_CONFIG_INV_LOOP_S 31
+#define E830_PRTMAC_200G_COMMAND_CONFIG_INV_LOOP_M BIT(31)
+#define E830_PRTMAC_200G_CRC_INV_M 0x001E384C /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_CRC_INV_MASK_CRC_INV_MASK_S 0
+#define E830_PRTMAC_200G_CRC_INV_MASK_CRC_INV_MASK_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_200G_FRM_LENGTH 0x001E3814 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_FRM_LENGTH_FRM_LENGTH_S 0
+#define E830_PRTMAC_200G_FRM_LENGTH_FRM_LENGTH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_FRM_LENGTH_TX_MTU_S 16
+#define E830_PRTMAC_200G_FRM_LENGTH_TX_MTU_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_HASHTABLE_LOAD 0x001E382C /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_HASHTABLE_LOAD_HASH_TABLE_ADDR_S 0
+#define E830_PRTMAC_200G_HASHTABLE_LOAD_HASH_TABLE_ADDR_M MAKEMASK(0x3F, 0)
+#define E830_PRTMAC_200G_HASHTABLE_LOAD_MCAST_EN_S 8
+#define E830_PRTMAC_200G_HASHTABLE_LOAD_MCAST_EN_M BIT(8)
+#define E830_PRTMAC_200G_MAC_ADDR_0 0x001E380C /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_MAC_ADDR_0_MAC_ADDR_0_S 0
+#define E830_PRTMAC_200G_MAC_ADDR_0_MAC_ADDR_0_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_200G_MAC_ADDR_1 0x001E3810 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_MAC_ADDR_1_MAC_ADDR_1_S 0
+#define E830_PRTMAC_200G_MAC_ADDR_1_MAC_ADDR_1_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS 0x001E3830 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_BUSY_S 0
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_BUSY_M BIT(0)
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_RD_ERR_S 1
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_RD_ERR_M BIT(1)
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_HOLD_TIME_S 2
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_HOLD_TIME_M MAKEMASK(0x7, 2)
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_DIS_PREAMBLE_S 5
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_DIS_PREAMBLE_M BIT(5)
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_CLS_45_EN_S 6
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_CLS_45_EN_M BIT(6)
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_CLK_DIVISOR_S 7
+#define E830_PRTMAC_200G_MDIO_CFG_STATUS_MDIO_CLK_DIVISOR_M MAKEMASK(0x1FF, 7)
+#define E830_PRTMAC_200G_MDIO_COMMAND 0x001E3834 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_MDIO_COMMAND_MDIO_COMMAND_S 0
+#define E830_PRTMAC_200G_MDIO_COMMAND_MDIO_COMMAND_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_MDIO_COMMAND_RESERVED_2_S 16
+#define E830_PRTMAC_200G_MDIO_COMMAND_RESERVED_2_M MAKEMASK(0x7FFF, 16)
+#define E830_PRTMAC_200G_MDIO_COMMAND_MDIO_BUSY_S 31
+#define E830_PRTMAC_200G_MDIO_COMMAND_MDIO_BUSY_M BIT(31)
+#define E830_PRTMAC_200G_MDIO_DATA 0x001E3838 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_MDIO_DATA_MDIO_DATA_S 0
+#define E830_PRTMAC_200G_MDIO_DATA_MDIO_DATA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_MDIO_DATA_RESERVED_2_S 16
+#define E830_PRTMAC_200G_MDIO_DATA_RESERVED_2_M MAKEMASK(0x7FFF, 16)
+#define E830_PRTMAC_200G_MDIO_DATA_MDIO_BUSY_S 31
+#define E830_PRTMAC_200G_MDIO_DATA_MDIO_BUSY_M BIT(31)
+#define E830_PRTMAC_200G_MDIO_REGADDR 0x001E383C /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_MDIO_REGADDR_MDIO_REGADDR_S 0
+#define E830_PRTMAC_200G_MDIO_REGADDR_MDIO_REGADDR_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_200G_REVISION 0x001E3800 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_REVISION_CORE_REVISION_S 0
+#define E830_PRTMAC_200G_REVISION_CORE_REVISION_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_200G_REVISION_CORE_VERSION_S 8
+#define E830_PRTMAC_200G_REVISION_CORE_VERSION_M MAKEMASK(0xFF, 8)
+#define E830_PRTMAC_200G_REVISION_CUSTOMER_VERSION_S 16
+#define E830_PRTMAC_200G_REVISION_CUSTOMER_VERSION_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_RX_PAUSE_STATUS 0x001E3874 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_RX_PAUSE_STATUS_RX_PAUSE_STATUS_S 0
+#define E830_PRTMAC_200G_RX_PAUSE_STATUS_RX_PAUSE_STATUS_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_200G_SCRATCH 0x001E3804 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_SCRATCH_SCRATCH_S 0
+#define E830_PRTMAC_200G_SCRATCH_SCRATCH_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_200G_STATUS 0x001E3840 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_STATUS_RX_LOC_FAULT_S 0
+#define E830_PRTMAC_200G_STATUS_RX_LOC_FAULT_M BIT(0)
+#define E830_PRTMAC_200G_STATUS_RX_REM_FAULT_S 1
+#define E830_PRTMAC_200G_STATUS_RX_REM_FAULT_M BIT(1)
+#define E830_PRTMAC_200G_STATUS_PHY_LOS_S 2
+#define E830_PRTMAC_200G_STATUS_PHY_LOS_M BIT(2)
+#define E830_PRTMAC_200G_STATUS_TS_AVAIL_S 3
+#define E830_PRTMAC_200G_STATUS_TS_AVAIL_M BIT(3)
+#define E830_PRTMAC_200G_STATUS_RESERVED_5_S 4
+#define E830_PRTMAC_200G_STATUS_RESERVED_5_M BIT(4)
+#define E830_PRTMAC_200G_STATUS_TX_EMPTY_S 5
+#define E830_PRTMAC_200G_STATUS_TX_EMPTY_M BIT(5)
+#define E830_PRTMAC_200G_STATUS_RX_EMPTY_S 6
+#define E830_PRTMAC_200G_STATUS_RX_EMPTY_M BIT(6)
+#define E830_PRTMAC_200G_STATUS_RESERVED1_S 7
+#define E830_PRTMAC_200G_STATUS_RESERVED1_M BIT(7)
+#define E830_PRTMAC_200G_STATUS_TX_ISIDLE_S 8
+#define E830_PRTMAC_200G_STATUS_TX_ISIDLE_M BIT(8)
+#define E830_PRTMAC_200G_STATUS_RESERVED2_S 9
+#define E830_PRTMAC_200G_STATUS_RESERVED2_M MAKEMASK(0x7FFFFF, 9)
+#define E830_PRTMAC_200G_TS_TIMESTAMP 0x001E387C /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_TS_TIMESTAMP_TS_TIMESTAMP_S 0
+#define E830_PRTMAC_200G_TS_TIMESTAMP_TS_TIMESTAMP_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_200G_TX_FIFO_SECTIONS 0x001E3820 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_TX_FIFO_SECTIONS_TX_SECTION_AVAIL_THRESHOLD_S 0
+#define E830_PRTMAC_200G_TX_FIFO_SECTIONS_TX_SECTION_AVAIL_THRESHOLD_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_200G_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_THRESHOLD_S 16
+#define E830_PRTMAC_200G_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_THRESHOLD_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_200G_TX_IPG_LENGTH 0x001E3844 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_TX_IPG_LENGTH_AVG_IPG_LEN_S 0
+#define E830_PRTMAC_200G_TX_IPG_LENGTH_AVG_IPG_LEN_M MAKEMASK(0x7F, 0)
+#define E830_PRTMAC_200G_TX_IPG_LENGTH_IPG_COMP_12_0_S 19
+#define E830_PRTMAC_200G_TX_IPG_LENGTH_IPG_COMP_12_0_M MAKEMASK(0x1FFF, 19)
+#define E830_PRTMAC_200G_XIF_MODE 0x001E3880 /* Reset Source: GLOBR */
+#define E830_PRTMAC_200G_XIF_MODE_RESERVED_1_S 0
+#define E830_PRTMAC_200G_XIF_MODE_RESERVED_1_M MAKEMASK(0x1F, 0)
+#define E830_PRTMAC_200G_XIF_MODE_ONE_STEP_ENA_S 5
+#define E830_PRTMAC_200G_XIF_MODE_ONE_STEP_ENA_M BIT(5)
+#define E830_PRTMAC_200G_XIF_MODE_PFC_PULSE_MODE_S 17
+#define E830_PRTMAC_200G_XIF_MODE_PFC_PULSE_MODE_M BIT(17)
+#define E830_PRTMAC_200G_XIF_MODE_PFC_LP_MODE_S 18
+#define E830_PRTMAC_200G_XIF_MODE_PFC_LP_MODE_M BIT(18)
+#define E830_PRTMAC_200G_XIF_MODE_PFC_LP_16PRI_S 19
+#define E830_PRTMAC_200G_XIF_MODE_PFC_LP_16PRI_M BIT(19)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_0 0x001E3C00 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_0_APPROVED_SW_ADDR_MAC_100G_0_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_0_APPROVED_SW_ADDR_MAC_100G_0_M MAKEMASK(0x3F, 0)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_1 0x001E3C20 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_1_APPROVED_SW_ADDR_MAC_100G_1_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_1_APPROVED_SW_ADDR_MAC_100G_1_M MAKEMASK(0x3F, 0)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_2 0x001E3C40 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_2_APPROVED_SW_ADDR_MAC_100G_2_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_2_APPROVED_SW_ADDR_MAC_100G_2_M MAKEMASK(0x3F, 0)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_3 0x001E3C60 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_3_APPROVED_SW_ADDR_MAC_100G_3_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_100G_3_APPROVED_SW_ADDR_MAC_100G_3_M MAKEMASK(0x3F, 0)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_0 0x001E3C80 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_0_APPROVED_SW_ADDR_MAC_200G_0_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_0_APPROVED_SW_ADDR_MAC_200G_0_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_1 0x001E3CA0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_1_APPROVED_SW_ADDR_MAC_200G_1_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_1_APPROVED_SW_ADDR_MAC_200G_1_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_2 0x001E3CC0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_2_APPROVED_SW_ADDR_MAC_200G_2_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_2_APPROVED_SW_ADDR_MAC_200G_2_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_3 0x001E3CE0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_3_APPROVED_SW_ADDR_MAC_200G_3_S 0
+#define E830_PRTMAC_APPROVED_SW_ADDR_MAC_200G_3_APPROVED_SW_ADDR_MAC_200G_3_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_CF_GEN_STATUS 0x001E33C0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CF_GEN_STATUS_CF_GEN_SENT_S 0
+#define E830_PRTMAC_CF_GEN_STATUS_CF_GEN_SENT_M BIT(0)
+#define E830_PRTMAC_CL01_PAUSE_QUANTA 0x001E32A0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_CL01_PAUSE_QUANTA_CL0_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL01_PAUSE_QUANTA_CL1_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_CL01_PAUSE_QUANTA_CL1_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_CL01_QUANTA_THRESH 0x001E3320 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_S 0
+#define E830_PRTMAC_CL01_QUANTA_THRESH_CL0_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL01_QUANTA_THRESH_CL1_QUANTA_THRESH_S 16
+#define E830_PRTMAC_CL01_QUANTA_THRESH_CL1_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_CL23_PAUSE_QUANTA 0x001E32C0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL23_PAUSE_QUANTA_CL2_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_CL23_PAUSE_QUANTA_CL2_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL23_PAUSE_QUANTA_CL3_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_CL23_PAUSE_QUANTA_CL3_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_CL23_QUANTA_THRESH 0x001E3340 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL23_QUANTA_THRESH_CL2_QUANTA_THRESH_S 0
+#define E830_PRTMAC_CL23_QUANTA_THRESH_CL2_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL23_QUANTA_THRESH_CL3_QUANTA_THRESH_S 16
+#define E830_PRTMAC_CL23_QUANTA_THRESH_CL3_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_CL45_PAUSE_QUANTA 0x001E32E0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL45_PAUSE_QUANTA_CL4_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_CL45_PAUSE_QUANTA_CL4_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL45_PAUSE_QUANTA_CL5_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_CL45_PAUSE_QUANTA_CL5_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_CL45_QUANTA_THRESH 0x001E3360 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL45_QUANTA_THRESH_CL4_QUANTA_THRESH_S 0
+#define E830_PRTMAC_CL45_QUANTA_THRESH_CL4_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL45_QUANTA_THRESH_CL5_QUANTA_THRESH_S 16
+#define E830_PRTMAC_CL45_QUANTA_THRESH_CL5_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_CL67_PAUSE_QUANTA 0x001E3300 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL67_PAUSE_QUANTA_CL6_PAUSE_QUANTA_S 0
+#define E830_PRTMAC_CL67_PAUSE_QUANTA_CL6_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL67_PAUSE_QUANTA_CL7_PAUSE_QUANTA_S 16
+#define E830_PRTMAC_CL67_PAUSE_QUANTA_CL7_PAUSE_QUANTA_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_CL67_QUANTA_THRESH 0x001E3380 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CL67_QUANTA_THRESH_CL6_QUANTA_THRESH_S 0
+#define E830_PRTMAC_CL67_QUANTA_THRESH_CL6_QUANTA_THRESH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_CL67_QUANTA_THRESH_CL7_QUANTA_THRESH_S 16
+#define E830_PRTMAC_CL67_QUANTA_THRESH_CL7_QUANTA_THRESH_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_COMMAND_CONFIG 0x001E3040 /* Reset Source: GLOBR */
+#define E830_PRTMAC_COMMAND_CONFIG_TX_ENA_S 0
+#define E830_PRTMAC_COMMAND_CONFIG_TX_ENA_M BIT(0)
+#define E830_PRTMAC_COMMAND_CONFIG_RX_ENA_S 1
+#define E830_PRTMAC_COMMAND_CONFIG_RX_ENA_M BIT(1)
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED1_S 3
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED1_M BIT(3)
+#define E830_PRTMAC_COMMAND_CONFIG_PROMIS_EN_S 4
+#define E830_PRTMAC_COMMAND_CONFIG_PROMIS_EN_M BIT(4)
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED2_S 5
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED2_M BIT(5)
+#define E830_PRTMAC_COMMAND_CONFIG_CRC_FWD_S 6
+#define E830_PRTMAC_COMMAND_CONFIG_CRC_FWD_M BIT(6)
+#define E830_PRTMAC_COMMAND_CONFIG_PAUSE_FWD_S 7
+#define E830_PRTMAC_COMMAND_CONFIG_PAUSE_FWD_M BIT(7)
+#define E830_PRTMAC_COMMAND_CONFIG_PAUSE_IGNORE_S 8
+#define E830_PRTMAC_COMMAND_CONFIG_PAUSE_IGNORE_M BIT(8)
+#define E830_PRTMAC_COMMAND_CONFIG_TX_ADDR_INS_S 9
+#define E830_PRTMAC_COMMAND_CONFIG_TX_ADDR_INS_M BIT(9)
+#define E830_PRTMAC_COMMAND_CONFIG_LOOP_ENA_S 10
+#define E830_PRTMAC_COMMAND_CONFIG_LOOP_ENA_M BIT(10)
+#define E830_PRTMAC_COMMAND_CONFIG_TX_PAD_EN_S 11
+#define E830_PRTMAC_COMMAND_CONFIG_TX_PAD_EN_M BIT(11)
+#define E830_PRTMAC_COMMAND_CONFIG_SW_RESET_S 12
+#define E830_PRTMAC_COMMAND_CONFIG_SW_RESET_M BIT(12)
+#define E830_PRTMAC_COMMAND_CONFIG_CNTL_FRM_ENA_S 13
+#define E830_PRTMAC_COMMAND_CONFIG_CNTL_FRM_ENA_M BIT(13)
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED3_S 14
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED3_M BIT(14)
+#define E830_PRTMAC_COMMAND_CONFIG_PHY_TXENA_S 15
+#define E830_PRTMAC_COMMAND_CONFIG_PHY_TXENA_M BIT(15)
+#define E830_PRTMAC_COMMAND_CONFIG_FORCE_SEND__S 16
+#define E830_PRTMAC_COMMAND_CONFIG_FORCE_SEND__M BIT(16)
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED4_S 17
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED4_M BIT(17)
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED5_S 18
+#define E830_PRTMAC_COMMAND_CONFIG_RESERVED5_M BIT(18)
+#define E830_PRTMAC_COMMAND_CONFIG_PFC_MODE_S 19
+#define E830_PRTMAC_COMMAND_CONFIG_PFC_MODE_M BIT(19)
+#define E830_PRTMAC_COMMAND_CONFIG_PAUSE_PFC_COMP_S 20
+#define E830_PRTMAC_COMMAND_CONFIG_PAUSE_PFC_COMP_M BIT(20)
+#define E830_PRTMAC_COMMAND_CONFIG_RX_SFD_ANY_S 21
+#define E830_PRTMAC_COMMAND_CONFIG_RX_SFD_ANY_M BIT(21)
+#define E830_PRTMAC_COMMAND_CONFIG_TX_FLUSH_S 22
+#define E830_PRTMAC_COMMAND_CONFIG_TX_FLUSH_M BIT(22)
+#define E830_PRTMAC_COMMAND_CONFIG_TX_LOWP_ENA_S 23
+#define E830_PRTMAC_COMMAND_CONFIG_TX_LOWP_ENA_M BIT(23)
+#define E830_PRTMAC_COMMAND_CONFIG_REG_LOWP_RXEMPTY_S 24
+#define E830_PRTMAC_COMMAND_CONFIG_REG_LOWP_RXEMPTY_M BIT(24)
+#define E830_PRTMAC_COMMAND_CONFIG_FLT_TX_STOP_S 25
+#define E830_PRTMAC_COMMAND_CONFIG_FLT_TX_STOP_M BIT(25)
+#define E830_PRTMAC_COMMAND_CONFIG_TX_FIFO_RESET_S 26
+#define E830_PRTMAC_COMMAND_CONFIG_TX_FIFO_RESET_M BIT(26)
+#define E830_PRTMAC_COMMAND_CONFIG_FLT_HDL_DIS_S 27
+#define E830_PRTMAC_COMMAND_CONFIG_FLT_HDL_DIS_M BIT(27)
+#define E830_PRTMAC_COMMAND_CONFIG_TX_PAUSE_DIS_S 28
+#define E830_PRTMAC_COMMAND_CONFIG_TX_PAUSE_DIS_M BIT(28)
+#define E830_PRTMAC_COMMAND_CONFIG_RX_PAUSE_DIS_S 29
+#define E830_PRTMAC_COMMAND_CONFIG_RX_PAUSE_DIS_M BIT(29)
+#define E830_PRTMAC_COMMAND_CONFIG_SHORT_PREAM_S 30
+#define E830_PRTMAC_COMMAND_CONFIG_SHORT_PREAM_M BIT(30)
+#define E830_PRTMAC_COMMAND_CONFIG_NO_PREAM_S 31
+#define E830_PRTMAC_COMMAND_CONFIG_NO_PREAM_M BIT(31)
+#define E830_PRTMAC_CRC_INV_M 0x001E3260 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CRC_INV_MASK_CRC_INV_MASK_S 0
+#define E830_PRTMAC_CRC_INV_MASK_CRC_INV_MASK_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_CRC_MODE 0x001E3240 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CRC_MODE_DISABLE_RX_CRC_CHECKING_S 16
+#define E830_PRTMAC_CRC_MODE_DISABLE_RX_CRC_CHECKING_M BIT(16)
+#define E830_PRTMAC_CRC_MODE_ONE_BYTE_CRC_S 18
+#define E830_PRTMAC_CRC_MODE_ONE_BYTE_CRC_M BIT(18)
+#define E830_PRTMAC_CRC_MODE_TWO_BYTES_CRC_S 19
+#define E830_PRTMAC_CRC_MODE_TWO_BYTES_CRC_M BIT(19)
+#define E830_PRTMAC_CRC_MODE_ZERO_BYTE_CRC_S 20
+#define E830_PRTMAC_CRC_MODE_ZERO_BYTE_CRC_M BIT(20)
+#define E830_PRTMAC_CSR_TIMEOUT_CFG 0x001E3D00 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CSR_TIMEOUT_CFG_CSR_TIMEOUT_EN_S 0
+#define E830_PRTMAC_CSR_TIMEOUT_CFG_CSR_TIMEOUT_EN_M BIT(0)
+#define E830_PRTMAC_CTL_RX_CFG 0x001E2160 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CTL_RX_CFG_SUB_CRC_STAT_S 0
+#define E830_PRTMAC_CTL_RX_CFG_SUB_CRC_STAT_M BIT(0)
+#define E830_PRTMAC_CTL_RX_CFG_FRM_DROP_FOR_STAT_MODE_S 1
+#define E830_PRTMAC_CTL_RX_CFG_FRM_DROP_FOR_STAT_MODE_M MAKEMASK(0x3, 1)
+#define E830_PRTMAC_CTL_RX_CFG_MAC_PAC_AFULL_TRSH_S 3
+#define E830_PRTMAC_CTL_RX_CFG_MAC_PAC_AFULL_TRSH_M MAKEMASK(0x7, 3)
+#define E830_PRTMAC_CTL_RX_PAUSE_ENABLE 0x001E2180 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_S 0
+#define E830_PRTMAC_CTL_RX_PAUSE_ENABLE_RX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0)
+#define E830_PRTMAC_CTL_TX_PAUSE_ENABLE 0x001E21A0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_S 0
+#define E830_PRTMAC_CTL_TX_PAUSE_ENABLE_TX_PAUSE_ENABLE_M MAKEMASK(0x1FF, 0)
+#define E830_PRTMAC_FRM_LENGTH 0x001E30A0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_FRM_LENGTH_FRM_LENGTH_S 0
+#define E830_PRTMAC_FRM_LENGTH_FRM_LENGTH_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_FRM_LENGTH_TX_MTU_S 16
+#define E830_PRTMAC_FRM_LENGTH_TX_MTU_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_MAC_ADDR_0 0x001E3060 /* Reset Source: GLOBR */
+#define E830_PRTMAC_MAC_ADDR_0_MAC_ADDR_0_S 0
+#define E830_PRTMAC_MAC_ADDR_0_MAC_ADDR_0_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_MAC_ADDR_1 0x001E3080 /* Reset Source: GLOBR */
+#define E830_PRTMAC_MAC_ADDR_1_MAC_ADDR_1_S 0
+#define E830_PRTMAC_MAC_ADDR_1_MAC_ADDR_1_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_MDIO_CFG_STATUS 0x001E3180 /* Reset Source: GLOBR */
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_BUSY_S 0
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_BUSY_M BIT(0)
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_RD_ERR_S 1
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_RD_ERR_M BIT(1)
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_HOLD_TIME_S 2
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_HOLD_TIME_M MAKEMASK(0x7, 2)
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_DIS_PREAMBLE_S 5
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_DIS_PREAMBLE_M BIT(5)
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_CLS_45_EN_S 6
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_CLS_45_EN_M BIT(6)
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_CLK_DIVISOR_S 7
+#define E830_PRTMAC_MDIO_CFG_STATUS_MDIO_CLK_DIVISOR_M MAKEMASK(0x1FF, 7)
+#define E830_PRTMAC_MDIO_COMMAND 0x001E31A0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_MDIO_COMMAND_MDIO_COMMAND_S 0
+#define E830_PRTMAC_MDIO_COMMAND_MDIO_COMMAND_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_MDIO_COMMAND_RESERVED_2_S 16
+#define E830_PRTMAC_MDIO_COMMAND_RESERVED_2_M MAKEMASK(0x7FFF, 16)
+#define E830_PRTMAC_MDIO_COMMAND_MDIO_BUSY_S 31
+#define E830_PRTMAC_MDIO_COMMAND_MDIO_BUSY_M BIT(31)
+#define E830_PRTMAC_MDIO_DATA 0x001E31C0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_MDIO_DATA_MDIO_DATA_S 0
+#define E830_PRTMAC_MDIO_DATA_MDIO_DATA_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_MDIO_DATA_RESERVED_2_S 16
+#define E830_PRTMAC_MDIO_DATA_RESERVED_2_M MAKEMASK(0x7FFF, 16)
+#define E830_PRTMAC_MDIO_DATA_MDIO_BUSY_S 31
+#define E830_PRTMAC_MDIO_DATA_MDIO_BUSY_M BIT(31)
+#define E830_PRTMAC_MDIO_REGADDR 0x001E31E0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_MDIO_REGADDR_MDIO_REGADDR_S 0
+#define E830_PRTMAC_MDIO_REGADDR_MDIO_REGADDR_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_REVISION 0x001E3000 /* Reset Source: GLOBR */
+#define E830_PRTMAC_REVISION_CORE_REVISION_S 0
+#define E830_PRTMAC_REVISION_CORE_REVISION_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_REVISION_CORE_VERSION_S 8
+#define E830_PRTMAC_REVISION_CORE_VERSION_M MAKEMASK(0xFF, 8)
+#define E830_PRTMAC_REVISION_CUSTOMER_VERSION_S 16
+#define E830_PRTMAC_REVISION_CUSTOMER_VERSION_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_RX_OFLOW_PKT_DRP_BSOP_CNT 0x001E24C0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_RX_OFLOW_PKT_DRP_BSOP_CNT_RX_OFLOW_PKT_DRP_BSOP_CNT_S 0
+#define E830_PRTMAC_RX_OFLOW_PKT_DRP_BSOP_CNT_RX_OFLOW_PKT_DRP_BSOP_CNT_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_RX_PAUSE_STATUS 0x001E33A0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_RX_PAUSE_STATUS_RX_PAUSE_STATUS_S 0
+#define E830_PRTMAC_RX_PAUSE_STATUS_RX_PAUSE_STATUS_M MAKEMASK(0xFF, 0)
+#define E830_PRTMAC_RX_PKT_DRP_CNT_RX_OFLOW_PKT_DRP_CNT_S 12
+#define E830_PRTMAC_RX_PKT_DRP_CNT_RX_OFLOW_PKT_DRP_CNT_M MAKEMASK(0xFFFF, 12)
+#define E830_PRTMAC_SCRATCH 0x001E3020 /* Reset Source: GLOBR */
+#define E830_PRTMAC_SCRATCH_SCRATCH_S 0
+#define E830_PRTMAC_SCRATCH_SCRATCH_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_STATUS 0x001E3200 /* Reset Source: GLOBR */
+#define E830_PRTMAC_STATUS_RX_LOC_FAULT_S 0
+#define E830_PRTMAC_STATUS_RX_LOC_FAULT_M BIT(0)
+#define E830_PRTMAC_STATUS_RX_REM_FAULT_S 1
+#define E830_PRTMAC_STATUS_RX_REM_FAULT_M BIT(1)
+#define E830_PRTMAC_STATUS_PHY_LOS_S 2
+#define E830_PRTMAC_STATUS_PHY_LOS_M BIT(2)
+#define E830_PRTMAC_STATUS_TS_AVAIL_S 3
+#define E830_PRTMAC_STATUS_TS_AVAIL_M BIT(3)
+#define E830_PRTMAC_STATUS_RX_LOWP_S 4
+#define E830_PRTMAC_STATUS_RX_LOWP_M BIT(4)
+#define E830_PRTMAC_STATUS_TX_EMPTY_S 5
+#define E830_PRTMAC_STATUS_TX_EMPTY_M BIT(5)
+#define E830_PRTMAC_STATUS_RX_EMPTY_S 6
+#define E830_PRTMAC_STATUS_RX_EMPTY_M BIT(6)
+#define E830_PRTMAC_STATUS_RX_LINT_FAULT_S 7
+#define E830_PRTMAC_STATUS_RX_LINT_FAULT_M BIT(7)
+#define E830_PRTMAC_STATUS_TX_ISIDLE_S 8
+#define E830_PRTMAC_STATUS_TX_ISIDLE_M BIT(8)
+#define E830_PRTMAC_STATUS_RESERVED_10_S 9
+#define E830_PRTMAC_STATUS_RESERVED_10_M MAKEMASK(0x7FFFFF, 9)
+#define E830_PRTMAC_STATUS_SPARE 0x001E2740 /* Reset Source: GLOBR */
+#define E830_PRTMAC_STATUS_SPARE_DFD_STATUS_SPARE_S 0
+#define E830_PRTMAC_STATUS_SPARE_DFD_STATUS_SPARE_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_TS_RX_PCS_LATENCY 0x001E2220 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TS_RX_PCS_LATENCY_TS_RX_PCS_LATENCY_S 0
+#define E830_PRTMAC_TS_RX_PCS_LATENCY_TS_RX_PCS_LATENCY_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_TS_TIMESTAMP 0x001E33E0 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TS_TIMESTAMP_TS_TIMESTAMP_S 0
+#define E830_PRTMAC_TS_TIMESTAMP_TS_TIMESTAMP_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_TS_TX_MEM_VALID_H 0x001E2020 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TS_TX_MEM_VALID_H_TIMESTAMP_TX_VALID_ARR_H_S 0
+#define E830_PRTMAC_TS_TX_MEM_VALID_H_TIMESTAMP_TX_VALID_ARR_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_TS_TX_MEM_VALID_L 0x001E2000 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TS_TX_MEM_VALID_L_TIMESTAMP_TX_VALID_ARR_L_S 0
+#define E830_PRTMAC_TS_TX_MEM_VALID_L_TIMESTAMP_TX_VALID_ARR_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PRTMAC_TS_TX_PCS_LATENCY 0x001E2200 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TS_TX_PCS_LATENCY_TS_TX_PCS_LATENCY_S 0
+#define E830_PRTMAC_TS_TX_PCS_LATENCY_TS_TX_PCS_LATENCY_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_TX_FIFO_SECTIONS 0x001E3100 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TX_FIFO_SECTIONS_TX_SECTION_AVAIL_THRESHOLD_S 0
+#define E830_PRTMAC_TX_FIFO_SECTIONS_TX_SECTION_AVAIL_THRESHOLD_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_THRESHOLD_S 16
+#define E830_PRTMAC_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_THRESHOLD_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_TX_IPG_LENGTH 0x001E3220 /* Reset Source: GLOBR */
+#define E830_PRTMAC_TX_IPG_LENGTH_AVG_IPG_LEN_S 0
+#define E830_PRTMAC_TX_IPG_LENGTH_AVG_IPG_LEN_M MAKEMASK(0x3F, 0)
+#define E830_PRTMAC_TX_IPG_LENGTH_IPG_COMP_23_16_S 8
+#define E830_PRTMAC_TX_IPG_LENGTH_IPG_COMP_23_16_M MAKEMASK(0xFF, 8)
+#define E830_PRTMAC_TX_IPG_LENGTH_IPG_COMP_15_0_S 16
+#define E830_PRTMAC_TX_IPG_LENGTH_IPG_COMP_15_0_M MAKEMASK(0xFFFF, 16)
+#define E830_PRTMAC_USER_TX_PAUSE_CNT 0x001E2760 /* Reset Source: GLOBR */
+#define E830_PRTMAC_USER_TX_PAUSE_CNT_USER_TX_PAUSE_CNT_S 0
+#define E830_PRTMAC_USER_TX_PAUSE_CNT_USER_TX_PAUSE_CNT_M MAKEMASK(0xFFFF, 0)
+#define E830_PRTMAC_XIF_MODE 0x001E3400 /* Reset Source: GLOBR */
+#define E830_PRTMAC_XIF_MODE_XGMII_ENA_S 0
+#define E830_PRTMAC_XIF_MODE_XGMII_ENA_M BIT(0)
+#define E830_PRTMAC_XIF_MODE_RESERVED_2_S 1
+#define E830_PRTMAC_XIF_MODE_RESERVED_2_M MAKEMASK(0x7, 1)
+#define E830_PRTMAC_XIF_MODE_PAUSETIMERX8_S 4
+#define E830_PRTMAC_XIF_MODE_PAUSETIMERX8_M BIT(4)
+#define E830_PRTMAC_XIF_MODE_ONE_STEP_ENA_S 5
+#define E830_PRTMAC_XIF_MODE_ONE_STEP_ENA_M BIT(5)
+#define E830_PRTMAC_XIF_MODE_RX_PAUSE_BYPASS_S 6
+#define E830_PRTMAC_XIF_MODE_RX_PAUSE_BYPASS_M BIT(6)
+#define E830_PRTMAC_XIF_MODE_RESERVED1_S 7
+#define E830_PRTMAC_XIF_MODE_RESERVED1_M BIT(7)
+#define E830_PRTMAC_XIF_MODE_TX_MAC_RS_ERR_S 8
+#define E830_PRTMAC_XIF_MODE_TX_MAC_RS_ERR_M BIT(8)
+#define E830_PRTMAC_XIF_MODE_TS_DELTA_MODE_S 9
+#define E830_PRTMAC_XIF_MODE_TS_DELTA_MODE_M BIT(9)
+#define E830_PRTMAC_XIF_MODE_TS_DELAY_MODE_S 10
+#define E830_PRTMAC_XIF_MODE_TS_DELAY_MODE_M BIT(10)
+#define E830_PRTMAC_XIF_MODE_TS_BINARY_MODE_S 11
+#define E830_PRTMAC_XIF_MODE_TS_BINARY_MODE_M BIT(11)
+#define E830_PRTMAC_XIF_MODE_TS_UPD64_MODE_S 12
+#define E830_PRTMAC_XIF_MODE_TS_UPD64_MODE_M BIT(12)
+#define E830_PRTMAC_XIF_MODE_RESERVED2_S 13
+#define E830_PRTMAC_XIF_MODE_RESERVED2_M MAKEMASK(0x7, 13)
+#define E830_PRTMAC_XIF_MODE_RX_CNT_MODE_S 16
+#define E830_PRTMAC_XIF_MODE_RX_CNT_MODE_M BIT(16)
+#define E830_PRTMAC_XIF_MODE_PFC_PULSE_MODE_S 17
+#define E830_PRTMAC_XIF_MODE_PFC_PULSE_MODE_M BIT(17)
+#define E830_PRTMAC_XIF_MODE_PFC_LP_MODE_S 18
+#define E830_PRTMAC_XIF_MODE_PFC_LP_MODE_M BIT(18)
+#define E830_PRTMAC_XIF_MODE_PFC_LP_16PRI_S 19
+#define E830_PRTMAC_XIF_MODE_PFC_LP_16PRI_M BIT(19)
+#define E830_PRTMAC_XIF_MODE_TS_SFD_ENA_S 20
+#define E830_PRTMAC_XIF_MODE_TS_SFD_ENA_M BIT(20)
+#define E830_PRTMAC_XIF_MODE_RESERVED3_S 21
+#define E830_PRTMAC_XIF_MODE_RESERVED3_M MAKEMASK(0x7FF, 21)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF 0x001E2700 /* Reset Source: GLOBR */
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF0_S 0
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF0_M MAKEMASK(0xF, 0)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF1_S 4
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF1_M MAKEMASK(0xF, 4)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF2_S 8
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF2_M MAKEMASK(0xF, 8)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF3_S 12
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF3_M MAKEMASK(0xF, 12)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF4_S 16
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF4_M MAKEMASK(0xF, 16)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF5_S 20
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF5_M MAKEMASK(0xF, 20)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF6_S 24
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF6_M MAKEMASK(0xF, 24)
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF7_S 28
+#define E830_PRTPM_DFD_WOL_CNTR_PER_PF_PF7_M MAKEMASK(0xF, 28)
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_SW_ABOVE_HW_TAIL_S 28
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_SW_ABOVE_HW_TAIL_M BIT(28)
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_SAME_TAIL_S 29
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_SAME_TAIL_M BIT(29)
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_TAIL_GE_QLEN_S 30
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_TAIL_GE_QLEN_M BIT(30)
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_UR_S 31
+#define E830_GL_MDCK_EN_TX_PQM_TXT_MAL_UR_M BIT(31)
+#define E830_GL_MDET_HIF_UR_FIFO 0x00096844 /* Reset Source: CORER */
+#define E830_GL_MDET_HIF_UR_FIFO_FUNC_NUM_S 0
+#define E830_GL_MDET_HIF_UR_FIFO_FUNC_NUM_M MAKEMASK(0x3FF, 0)
+#define E830_GL_MDET_HIF_UR_FIFO_PF_NUM_S 10
+#define E830_GL_MDET_HIF_UR_FIFO_PF_NUM_M MAKEMASK(0x7, 10)
+#define E830_GL_MDET_HIF_UR_FIFO_FUNC_TYPE_S 13
+#define E830_GL_MDET_HIF_UR_FIFO_FUNC_TYPE_M MAKEMASK(0x3, 13)
+#define E830_GL_MDET_HIF_UR_FIFO_MAL_TYPE_S 15
+#define E830_GL_MDET_HIF_UR_FIFO_MAL_TYPE_M MAKEMASK(0x1F, 15)
+#define E830_GL_MDET_HIF_UR_FIFO_FIFO_FULL_S 20
+#define E830_GL_MDET_HIF_UR_FIFO_FIFO_FULL_M BIT(20)
+#define E830_GL_MDET_HIF_UR_FIFO_VALID_S 21
+#define E830_GL_MDET_HIF_UR_FIFO_VALID_M BIT(21)
+#define E830_GL_MDET_HIF_UR_FIFO_EVENT_CNT_S 24
+#define E830_GL_MDET_HIF_UR_FIFO_EVENT_CNT_M MAKEMASK(0xFF, 24)
+#define E830_GL_MDET_HIF_UR_PF_CNT(_i) (0x00096804 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_HIF_UR_PF_CNT_MAX_INDEX 7
+#define E830_GL_MDET_HIF_UR_PF_CNT_CNT_S 0
+#define E830_GL_MDET_HIF_UR_PF_CNT_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GL_MDET_HIF_UR_VF(_i) (0x00096824 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GL_MDET_HIF_UR_VF_MAX_INDEX 7
+#define E830_GL_MDET_HIF_UR_VF_VF_MAL_EVENT_S 0
+#define E830_GL_MDET_HIF_UR_VF_VF_MAL_EVENT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PF_MDET_HIF_UR 0x00096880 /* Reset Source: CORER */
+#define E830_PF_MDET_HIF_UR_VALID_S 0
+#define E830_PF_MDET_HIF_UR_VALID_M BIT(0)
+#define E830_VM_MDET_TX_TCLAN(_i) (0x000FC348 + ((_i) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define E830_VM_MDET_TX_TCLAN_MAX_INDEX 767
+#define E830_VM_MDET_TX_TCLAN_VALID_S 0
+#define E830_VM_MDET_TX_TCLAN_VALID_M BIT(0)
+#define E830_VP_MDET_HIF_UR(_VF) (0x00096C00 + ((_VF) * 4)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_VP_MDET_HIF_UR_MAX_INDEX 255
+#define E830_VP_MDET_HIF_UR_VALID_S 0
+#define E830_VP_MDET_HIF_UR_VALID_M BIT(0)
+#define E830_GLNVM_FLA_GLOBAL_LOCKED_S 7
+#define E830_GLNVM_FLA_GLOBAL_LOCKED_M BIT(7)
+#define E830_DMA_AGENT_AT0 0x000BE268 /* Reset Source: PCIR */
+#define E830_DMA_AGENT_AT0_RLAN_PASID_SELECTED_S 0
+#define E830_DMA_AGENT_AT0_RLAN_PASID_SELECTED_M MAKEMASK(0x3, 0)
+#define E830_DMA_AGENT_AT0_TCLAN_PASID_SELECTED_S 2
+#define E830_DMA_AGENT_AT0_TCLAN_PASID_SELECTED_M MAKEMASK(0x3, 2)
+#define E830_DMA_AGENT_AT0_PQM_DBL_PASID_SELECTED_S 4
+#define E830_DMA_AGENT_AT0_PQM_DBL_PASID_SELECTED_M MAKEMASK(0x3, 4)
+#define E830_DMA_AGENT_AT0_PQM_DESC_PASID_SELECTED_S 6
+#define E830_DMA_AGENT_AT0_PQM_DESC_PASID_SELECTED_M MAKEMASK(0x3, 6)
+#define E830_DMA_AGENT_AT0_PQM_TS_DESC_PASID_SELECTED_S 8
+#define E830_DMA_AGENT_AT0_PQM_TS_DESC_PASID_SELECTED_M MAKEMASK(0x3, 8)
+#define E830_DMA_AGENT_AT0_RDPU_PASID_SELECTED_S 10
+#define E830_DMA_AGENT_AT0_RDPU_PASID_SELECTED_M MAKEMASK(0x3, 10)
+#define E830_DMA_AGENT_AT0_TDPU_PASID_SELECTED_S 12
+#define E830_DMA_AGENT_AT0_TDPU_PASID_SELECTED_M MAKEMASK(0x3, 12)
+#define E830_DMA_AGENT_AT0_MBX_PASID_SELECTED_S 14
+#define E830_DMA_AGENT_AT0_MBX_PASID_SELECTED_M MAKEMASK(0x3, 14)
+#define E830_DMA_AGENT_AT0_MNG_PASID_SELECTED_S 16
+#define E830_DMA_AGENT_AT0_MNG_PASID_SELECTED_M MAKEMASK(0x3, 16)
+#define E830_DMA_AGENT_AT0_TEP_PMAT_PASID_SELECTED_S 18
+#define E830_DMA_AGENT_AT0_TEP_PMAT_PASID_SELECTED_M MAKEMASK(0x3, 18)
+#define E830_DMA_AGENT_AT0_RX_PE_PASID_SELECTED_S 20
+#define E830_DMA_AGENT_AT0_RX_PE_PASID_SELECTED_M MAKEMASK(0x3, 20)
+#define E830_DMA_AGENT_AT0_TX_PE_PASID_SELECTED_S 22
+#define E830_DMA_AGENT_AT0_TX_PE_PASID_SELECTED_M MAKEMASK(0x3, 22)
+#define E830_DMA_AGENT_AT0_PEPMAT_PASID_SELECTED_S 24
+#define E830_DMA_AGENT_AT0_PEPMAT_PASID_SELECTED_M MAKEMASK(0x3, 24)
+#define E830_DMA_AGENT_AT0_FPMAT_PASID_SELECTED_S 26
+#define E830_DMA_AGENT_AT0_FPMAT_PASID_SELECTED_M MAKEMASK(0x3, 26)
+#define E830_DMA_AGENT_AT1 0x000BE26C /* Reset Source: PCIR */
+#define E830_DMA_AGENT_AT1_RLAN_PASID_SELECTED_S 0
+#define E830_DMA_AGENT_AT1_RLAN_PASID_SELECTED_M MAKEMASK(0x3, 0)
+#define E830_DMA_AGENT_AT1_TCLAN_PASID_SELECTED_S 2
+#define E830_DMA_AGENT_AT1_TCLAN_PASID_SELECTED_M MAKEMASK(0x3, 2)
+#define E830_DMA_AGENT_AT1_PQM_DBL_PASID_SELECTED_S 4
+#define E830_DMA_AGENT_AT1_PQM_DBL_PASID_SELECTED_M MAKEMASK(0x3, 4)
+#define E830_DMA_AGENT_AT1_PQM_DESC_PASID_SELECTED_S 6
+#define E830_DMA_AGENT_AT1_PQM_DESC_PASID_SELECTED_M MAKEMASK(0x3, 6)
+#define E830_DMA_AGENT_AT1_PQM_TS_DESC_PASID_SELECTED_S 8
+#define E830_DMA_AGENT_AT1_PQM_TS_DESC_PASID_SELECTED_M MAKEMASK(0x3, 8)
+#define E830_DMA_AGENT_AT1_RDPU_PASID_SELECTED_S 10
+#define E830_DMA_AGENT_AT1_RDPU_PASID_SELECTED_M MAKEMASK(0x3, 10)
+#define E830_DMA_AGENT_AT1_TDPU_PASID_SELECTED_S 12
+#define E830_DMA_AGENT_AT1_TDPU_PASID_SELECTED_M MAKEMASK(0x3, 12)
+#define E830_DMA_AGENT_AT1_MBX_PASID_SELECTED_S 14
+#define E830_DMA_AGENT_AT1_MBX_PASID_SELECTED_M MAKEMASK(0x3, 14)
+#define E830_DMA_AGENT_AT1_MNG_PASID_SELECTED_S 16
+#define E830_DMA_AGENT_AT1_MNG_PASID_SELECTED_M MAKEMASK(0x3, 16)
+#define E830_DMA_AGENT_AT1_TEP_PMAT_PASID_SELECTED_S 18
+#define E830_DMA_AGENT_AT1_TEP_PMAT_PASID_SELECTED_M MAKEMASK(0x3, 18)
+#define E830_DMA_AGENT_AT1_RX_PE_PASID_SELECTED_S 20
+#define E830_DMA_AGENT_AT1_RX_PE_PASID_SELECTED_M MAKEMASK(0x3, 20)
+#define E830_DMA_AGENT_AT1_TX_PE_PASID_SELECTED_S 22
+#define E830_DMA_AGENT_AT1_TX_PE_PASID_SELECTED_M MAKEMASK(0x3, 22)
+#define E830_DMA_AGENT_AT1_PEPMAT_PASID_SELECTED_S 24
+#define E830_DMA_AGENT_AT1_PEPMAT_PASID_SELECTED_M MAKEMASK(0x3, 24)
+#define E830_DMA_AGENT_AT1_FPMAT_PASID_SELECTED_S 26
+#define E830_DMA_AGENT_AT1_FPMAT_PASID_SELECTED_M MAKEMASK(0x3, 26)
+#define E830_GLPCI_CAPSUP_DOE_EN_S 1
+#define E830_GLPCI_CAPSUP_DOE_EN_M BIT(1)
+#define E830_GLPCI_CAPSUP_GEN5_EXT_EN_S 12
+#define E830_GLPCI_CAPSUP_GEN5_EXT_EN_M BIT(12)
+#define E830_GLPCI_CAPSUP_PTM_EN_S 13
+#define E830_GLPCI_CAPSUP_PTM_EN_M BIT(13)
+#define E830_GLPCI_CAPSUP_SNPS_RAS_EN_S 14
+#define E830_GLPCI_CAPSUP_SNPS_RAS_EN_M BIT(14)
+#define E830_GLPCI_CAPSUP_SIOV_EN_S 15
+#define E830_GLPCI_CAPSUP_SIOV_EN_M BIT(15)
+#define E830_GLPCI_CAPSUP_PTM_VSEC_EN_S 22
+#define E830_GLPCI_CAPSUP_PTM_VSEC_EN_M BIT(22)
+#define E830_GLPCI_CAPSUP_SNPS_RAS_PROT_EN_S 23
+#define E830_GLPCI_CAPSUP_SNPS_RAS_PROT_EN_M BIT(23)
+#define E830_GLPCI_DOE_BUSY_STATUS 0x0009DF70 /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_REQ_S 0
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_REQ_M BIT(0)
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_EMPR_S 1
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_EMPR_M BIT(1)
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_PCIER_S 2
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_PCIER_M BIT(2)
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_FLR_S 3
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_FLR_M BIT(3)
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_CFG_ABORT_S 4
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_CFG_ABORT_M BIT(4)
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_FW_S 5
+#define E830_GLPCI_DOE_BUSY_STATUS_BUSY_FW_M BIT(5)
+#define E830_GLPCI_DOE_CFG 0x0009DF54 /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_CFG_ENABLE_S 0
+#define E830_GLPCI_DOE_CFG_ENABLE_M BIT(0)
+#define E830_GLPCI_DOE_CFG_ITR_SUPPORT_S 1
+#define E830_GLPCI_DOE_CFG_ITR_SUPPORT_M BIT(1)
+#define E830_GLPCI_DOE_CFG_POISON_CFGWR_PIOSF_EP_BIT_S 2
+#define E830_GLPCI_DOE_CFG_POISON_CFGWR_PIOSF_EP_BIT_M BIT(2)
+#define E830_GLPCI_DOE_CFG_POISON_CFGWR_SBIOSF_AER_MSG_S 3
+#define E830_GLPCI_DOE_CFG_POISON_CFGWR_SBIOSF_AER_MSG_M BIT(3)
+#define E830_GLPCI_DOE_CFG_MSIX_VECTOR_S 8
+#define E830_GLPCI_DOE_CFG_MSIX_VECTOR_M MAKEMASK(0x7FF, 8)
+#define E830_GLPCI_DOE_CTRL 0x0009DF60 /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_CTRL_BUSY_FW_SET_S 0
+#define E830_GLPCI_DOE_CTRL_BUSY_FW_SET_M BIT(0)
+#define E830_GLPCI_DOE_CTRL_DOE_CFG_ERR_SET_S 1
+#define E830_GLPCI_DOE_CTRL_DOE_CFG_ERR_SET_M BIT(1)
+#define E830_GLPCI_DOE_DBG 0x0009DF6C /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_DBG_CFG_BUSY_S 0
+#define E830_GLPCI_DOE_DBG_CFG_BUSY_M BIT(0)
+#define E830_GLPCI_DOE_DBG_CFG_DATA_OBJECT_READY_S 1
+#define E830_GLPCI_DOE_DBG_CFG_DATA_OBJECT_READY_M BIT(1)
+#define E830_GLPCI_DOE_DBG_CFG_ERROR_S 2
+#define E830_GLPCI_DOE_DBG_CFG_ERROR_M BIT(2)
+#define E830_GLPCI_DOE_DBG_CFG_INTERRUPT_ENABLE_S 3
+#define E830_GLPCI_DOE_DBG_CFG_INTERRUPT_ENABLE_M BIT(3)
+#define E830_GLPCI_DOE_DBG_CFG_INTERRUPT_STATUS_S 4
+#define E830_GLPCI_DOE_DBG_CFG_INTERRUPT_STATUS_M BIT(4)
+#define E830_GLPCI_DOE_DBG_REQ_BUF_SW_WR_PTR_S 8
+#define E830_GLPCI_DOE_DBG_REQ_BUF_SW_WR_PTR_M MAKEMASK(0x1FF, 8)
+#define E830_GLPCI_DOE_DBG_RESP_BUF_SW_RD_PTR_S 20
+#define E830_GLPCI_DOE_DBG_RESP_BUF_SW_RD_PTR_M MAKEMASK(0x1FF, 20)
+#define E830_GLPCI_DOE_ERR_EN 0x0009DF64 /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_ERR_EN_RD_REQ_BUF_ECC_ERR_EN_S 0
+#define E830_GLPCI_DOE_ERR_EN_RD_REQ_BUF_ECC_ERR_EN_M BIT(0)
+#define E830_GLPCI_DOE_ERR_EN_RD_RESP_BUF_ECC_ERR_EN_S 1
+#define E830_GLPCI_DOE_ERR_EN_RD_RESP_BUF_ECC_ERR_EN_M BIT(1)
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_CFG_POISONED_EN_S 2
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_CFG_POISONED_EN_M BIT(2)
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_REQ_EN_S 3
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_REQ_EN_M BIT(3)
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_REQ_EN_S 4
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_REQ_EN_M BIT(4)
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_FW_EN_S 5
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_FW_EN_M BIT(5)
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_FW_EN_S 6
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_FW_EN_M BIT(6)
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_OVERFLOW_EN_S 7
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_OVERFLOW_EN_M BIT(7)
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_REQ_BUF_EMPTY_EN_S 8
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_REQ_BUF_EMPTY_EN_M BIT(8)
+#define E830_GLPCI_DOE_ERR_EN_SW_RD_RESP_BUF_ON_READY_LOW_EN_S 9
+#define E830_GLPCI_DOE_ERR_EN_SW_RD_RESP_BUF_ON_READY_LOW_EN_M BIT(9)
+#define E830_GLPCI_DOE_ERR_EN_SW_REQ_DURING_MNG_RST_EN_S 10
+#define E830_GLPCI_DOE_ERR_EN_SW_REQ_DURING_MNG_RST_EN_M BIT(10)
+#define E830_GLPCI_DOE_ERR_EN_FW_SET_ERROR_EN_S 11
+#define E830_GLPCI_DOE_ERR_EN_FW_SET_ERROR_EN_M BIT(11)
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_ABORT_EN_S 12
+#define E830_GLPCI_DOE_ERR_EN_SW_WR_REQ_BUF_ON_BUSY_DUE_ABORT_EN_M BIT(12)
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_ABORT_EN_S 13
+#define E830_GLPCI_DOE_ERR_EN_SW_GO_ON_BUSY_DUE_ABORT_EN_M BIT(13)
+#define E830_GLPCI_DOE_ERR_EN_SW_RD_RESP_BUF_ON_BUSY_DUE_ABORT_EN_S 14
+#define E830_GLPCI_DOE_ERR_EN_SW_RD_RESP_BUF_ON_BUSY_DUE_ABORT_EN_M BIT(14)
+#define E830_GLPCI_DOE_ERR_STATUS 0x0009DF68 /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_ERR_STATUS_RD_REQ_BUF_ECC_ERR_S 0
+#define E830_GLPCI_DOE_ERR_STATUS_RD_REQ_BUF_ECC_ERR_M BIT(0)
+#define E830_GLPCI_DOE_ERR_STATUS_RD_RESP_BUF_ECC_ERR_S 1
+#define E830_GLPCI_DOE_ERR_STATUS_RD_RESP_BUF_ECC_ERR_M BIT(1)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_CFG_POISONED_S 2
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_CFG_POISONED_M BIT(2)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_REQ_S 3
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_REQ_M BIT(3)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_REQ_S 4
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_REQ_M BIT(4)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_FW_S 5
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_FW_M BIT(5)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_FW_S 6
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_FW_M BIT(6)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_OVERFLOW_S 7
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_OVERFLOW_M BIT(7)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_REQ_BUF_EMPTY_S 8
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_REQ_BUF_EMPTY_M BIT(8)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_RD_RESP_BUF_ON_READY_LOW_S 9
+#define E830_GLPCI_DOE_ERR_STATUS_SW_RD_RESP_BUF_ON_READY_LOW_M BIT(9)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_REQ_DURING_MNG_RST_S 10
+#define E830_GLPCI_DOE_ERR_STATUS_SW_REQ_DURING_MNG_RST_M BIT(10)
+#define E830_GLPCI_DOE_ERR_STATUS_FW_SET_ERROR_S 11
+#define E830_GLPCI_DOE_ERR_STATUS_FW_SET_ERROR_M BIT(11)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_ABORT_S 12
+#define E830_GLPCI_DOE_ERR_STATUS_SW_WR_REQ_BUF_ON_BUSY_DUE_ABORT_M BIT(12)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_ABORT_S 13
+#define E830_GLPCI_DOE_ERR_STATUS_SW_GO_ON_BUSY_DUE_ABORT_M BIT(13)
+#define E830_GLPCI_DOE_ERR_STATUS_SW_RD_RESP_BUF_ON_BUSY_DUE_ABORT_S 14
+#define E830_GLPCI_DOE_ERR_STATUS_SW_RD_RESP_BUF_ON_BUSY_DUE_ABORT_M BIT(14)
+#define E830_GLPCI_DOE_ERR_STATUS_CFG_ERR_IDX_S 24
+#define E830_GLPCI_DOE_ERR_STATUS_CFG_ERR_IDX_M MAKEMASK(0x1F, 24)
+#define E830_GLPCI_DOE_REQ_MSG_NUM_DWS 0x0009DF58 /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_REQ_MSG_NUM_DWS_GLPCI_DOE_REQ_MSG_NUM_DWS_S 0
+#define E830_GLPCI_DOE_REQ_MSG_NUM_DWS_GLPCI_DOE_REQ_MSG_NUM_DWS_M MAKEMASK(0x1FF, 0)
+#define E830_GLPCI_DOE_RESP 0x0009DF5C /* Reset Source: PCIR */
+#define E830_GLPCI_DOE_RESP_MSG_NUM_DWS_S 0
+#define E830_GLPCI_DOE_RESP_MSG_NUM_DWS_M MAKEMASK(0x1FF, 0)
+#define E830_GLPCI_DOE_RESP_READY_SET_S 16
+#define E830_GLPCI_DOE_RESP_READY_SET_M BIT(16)
+#define E830_GLPCI_ERR_DBG 0x0009DF84 /* Reset Source: PCIR */
+#define E830_GLPCI_ERR_DBG_ERR_MIFO_FULL_DROP_CTR_S 0
+#define E830_GLPCI_ERR_DBG_ERR_MIFO_FULL_DROP_CTR_M MAKEMASK(0x3, 0)
+#define E830_GLPCI_ERR_DBG_PCIE2SB_AER_MSG_SM_S 2
+#define E830_GLPCI_ERR_DBG_PCIE2SB_AER_MSG_SM_M BIT(2)
+#define E830_GLPCI_ERR_DBG_PCIE2SB_AER_MSG_FIFO_NUM_ENTRIES_S 3
+#define E830_GLPCI_ERR_DBG_PCIE2SB_AER_MSG_FIFO_NUM_ENTRIES_M MAKEMASK(0x7, 3)
+#define E830_GLPCI_ERR_DBG_ERR_MIFO_NUM_ENTRIES_S 6
+#define E830_GLPCI_ERR_DBG_ERR_MIFO_NUM_ENTRIES_M MAKEMASK(0xF, 6)
+#define E830_GLPCI_NPQ_CFG_HIGH_TO_S 20
+#define E830_GLPCI_NPQ_CFG_HIGH_TO_M BIT(20)
+#define E830_GLPCI_NPQ_CFG_INC_150MS_TO_S 21
+#define E830_GLPCI_NPQ_CFG_INC_150MS_TO_M BIT(21)
+#define E830_GLPCI_PUSH_PQM_CTRL 0x0009DF74 /* Reset Source: POR */
+#define E830_GLPCI_PUSH_PQM_CTRL_PF_LEGACY_RANGE_EN_S 0
+#define E830_GLPCI_PUSH_PQM_CTRL_PF_LEGACY_RANGE_EN_M BIT(0)
+#define E830_GLPCI_PUSH_PQM_CTRL_PF_TXTIME_RANGE_EN_S 1
+#define E830_GLPCI_PUSH_PQM_CTRL_PF_TXTIME_RANGE_EN_M BIT(1)
+#define E830_GLPCI_PUSH_PQM_CTRL_PF_4K_RANGE_EN_S 2
+#define E830_GLPCI_PUSH_PQM_CTRL_PF_4K_RANGE_EN_M BIT(2)
+#define E830_GLPCI_PUSH_PQM_CTRL_VF_LEGACY_RANGE_EN_S 3
+#define E830_GLPCI_PUSH_PQM_CTRL_VF_LEGACY_RANGE_EN_M BIT(3)
+#define E830_GLPCI_PUSH_PQM_CTRL_VF_TXTIME_RANGE_EN_S 4
+#define E830_GLPCI_PUSH_PQM_CTRL_VF_TXTIME_RANGE_EN_M BIT(4)
+#define E830_GLPCI_PUSH_PQM_CTRL_PUSH_PQM_IF_TO_VAL_S 8
+#define E830_GLPCI_PUSH_PQM_CTRL_PUSH_PQM_IF_TO_VAL_M MAKEMASK(0xF, 8)
+#define E830_GLPCI_PUSH_PQM_CTRL_PUSH_PQM_IF_TO_DIS_S 12
+#define E830_GLPCI_PUSH_PQM_CTRL_PUSH_PQM_IF_TO_DIS_M BIT(12)
+#define E830_GLPCI_PUSH_PQM_CTRL_RD_COMP_LEN_2DWS_ONE_CHUNK_EN_S 16
+#define E830_GLPCI_PUSH_PQM_CTRL_RD_COMP_LEN_2DWS_ONE_CHUNK_EN_M BIT(16)
+#define E830_GLPCI_PUSH_PQM_CTRL_RD_COMP_LEN_1DW_ON_XLR_S 17
+#define E830_GLPCI_PUSH_PQM_CTRL_RD_COMP_LEN_1DW_ON_XLR_M BIT(17)
+#define E830_GLPCI_PUSH_PQM_DBG 0x0009DF7C /* Reset Source: PCIR */
+#define E830_GLPCI_PUSH_PQM_DBG_EVENTS_CTR_S 0
+#define E830_GLPCI_PUSH_PQM_DBG_EVENTS_CTR_M MAKEMASK(0xFF, 0)
+#define E830_GLPCI_PUSH_PQM_DBG_DROP_CTR_S 8
+#define E830_GLPCI_PUSH_PQM_DBG_DROP_CTR_M MAKEMASK(0xFF, 8)
+#define E830_GLPCI_PUSH_PQM_DBG_ASYNC_FIFO_USED_SPACE_S 16
+#define E830_GLPCI_PUSH_PQM_DBG_ASYNC_FIFO_USED_SPACE_M MAKEMASK(0xF, 16)
+#define E830_GLPCI_PUSH_PQM_DBG_CDT_FIFO_USED_SPACE_S 20
+#define E830_GLPCI_PUSH_PQM_DBG_CDT_FIFO_USED_SPACE_M MAKEMASK(0x1F, 20)
+#define E830_GLPCI_PUSH_PQM_DBG_CDT_FIFO_PUSH_WHEN_FULL_ERR_S 25
+#define E830_GLPCI_PUSH_PQM_DBG_CDT_FIFO_PUSH_WHEN_FULL_ERR_M BIT(25)
+#define E830_GLPCI_PUSH_PQM_IF_TO_STATUS 0x0009DF78 /* Reset Source: PCIR */
+#define E830_GLPCI_PUSH_PQM_IF_TO_STATUS_GLPCI_PUSH_PQM_IF_TO_STATUS_S 0
+#define E830_GLPCI_PUSH_PQM_IF_TO_STATUS_GLPCI_PUSH_PQM_IF_TO_STATUS_M BIT(0)
+#define E830_GLPCI_RDPU_CMD_DBG 0x000BE264 /* Reset Source: PCIR */
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU0_CMD_POP_CNT_S 0
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU0_CMD_POP_CNT_M MAKEMASK(0xFF, 0)
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU1_CMD_POP_CNT_S 8
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU1_CMD_POP_CNT_M MAKEMASK(0xFF, 8)
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU2_CMD_POP_CNT_S 16
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU2_CMD_POP_CNT_M MAKEMASK(0xFF, 16)
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU3_CMD_POP_CNT_S 24
+#define E830_GLPCI_RDPU_CMD_DBG_RDPU3_CMD_POP_CNT_M MAKEMASK(0xFF, 24)
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG0 0x000BE25C /* Reset Source: PCIR */
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG0_RDPU0_CMD_NUM_ENTRIES_S 0
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG0_RDPU0_CMD_NUM_ENTRIES_M MAKEMASK(0x1FF, 0)
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG0_RDPU1_CMD_NUM_ENTRIES_S 16
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG0_RDPU1_CMD_NUM_ENTRIES_M MAKEMASK(0x1FF, 16)
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG1 0x000BE260 /* Reset Source: PCIR */
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG1_RDPU2_CMD_NUM_ENTRIES_S 0
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG1_RDPU2_CMD_NUM_ENTRIES_M MAKEMASK(0x1FF, 0)
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG1_RDPU3_CMD_NUM_ENTRIES_S 16
+#define E830_GLPCI_RDPU_CMD_FIFO_DBG1_RDPU3_CMD_NUM_ENTRIES_M MAKEMASK(0x1FF, 16)
+#define E830_GLPCI_RDPU_TAG 0x000BE258 /* Reset Source: PCIR */
+#define E830_GLPCI_RDPU_TAG_OVERRIDE_DELAY_S 0
+#define E830_GLPCI_RDPU_TAG_OVERRIDE_DELAY_M MAKEMASK(0xFF, 0)
+#define E830_GLPCI_RDPU_TAG_EXPECTED_TAG_S 8
+#define E830_GLPCI_RDPU_TAG_EXPECTED_TAG_M MAKEMASK(0x3FF, 8)
+#define E830_GLPCI_SB_AER_MSG_OUT 0x0009DF80 /* Reset Source: PCIR */
+#define E830_GLPCI_SB_AER_MSG_OUT_EN_S 0
+#define E830_GLPCI_SB_AER_MSG_OUT_EN_M BIT(0)
+#define E830_GLPCI_SB_AER_MSG_OUT_ANF_SET_EN_S 1
+#define E830_GLPCI_SB_AER_MSG_OUT_ANF_SET_EN_M BIT(1)
+#define E830_PF_FUNC_RID_HOST_S 16
+#define E830_PF_FUNC_RID_HOST_M MAKEMASK(0x3, 16)
+#define E830_GLPES_PFRXNPECNMARKEDPKTSHI(_i) (0x00553004 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define E830_GLPES_PFRXNPECNMARKEDPKTSHI_MAX_INDEX 127
+#define E830_GLPES_PFRXNPECNMARKEDPKTSHI_RXNPECNMARKEDPKTSHI_S 0
+#define E830_GLPES_PFRXNPECNMARKEDPKTSHI_RXNPECNMARKEDPKTSHI_M MAKEMASK(0xFFFFFF, 0)
+#define E830_GLPES_PFRXNPECNMARKEDPKTSLO(_i) (0x00553000 + ((_i) * 8)) /* _i=0...127 */ /* Reset Source: CORER */
+#define E830_GLPES_PFRXNPECNMARKEDPKTSLO_MAX_INDEX 127
+#define E830_GLPES_PFRXNPECNMARKEDPKTSLO_RXNPECNMARKEDPKTSLO_S 0
+#define E830_GLPES_PFRXNPECNMARKEDPKTSLO_RXNPECNMARKEDPKTSLO_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLPES_PFRXRPCNPHANDLED(_i) (0x00552C00 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define E830_GLPES_PFRXRPCNPHANDLED_MAX_INDEX 127
+#define E830_GLPES_PFRXRPCNPHANDLED_RXRPCNPHANDLED_S 0
+#define E830_GLPES_PFRXRPCNPHANDLED_RXRPCNPHANDLED_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLPES_PFRXRPCNPIGNORED(_i) (0x00552800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define E830_GLPES_PFRXRPCNPIGNORED_MAX_INDEX 127
+#define E830_GLPES_PFRXRPCNPIGNORED_RXRPCNPIGNORED_S 0
+#define E830_GLPES_PFRXRPCNPIGNORED_RXRPCNPIGNORED_M MAKEMASK(0xFFFFFF, 0)
+#define E830_GLPES_PFTXNPCNPSENT(_i) (0x00553800 + ((_i) * 4)) /* _i=0...127 */ /* Reset Source: CORER */
+#define E830_GLPES_PFTXNPCNPSENT_MAX_INDEX 127
+#define E830_GLPES_PFTXNPCNPSENT_TXNPCNPSENT_S 0
+#define E830_GLPES_PFTXNPCNPSENT_TXNPCNPSENT_M MAKEMASK(0xFFFFFF, 0)
+#define E830_GLQF_FLAT_HLUT(_i) (0x004C0000 + ((_i) * 4)) /* _i=0...8191 */ /* Reset Source: CORER */
+#define E830_GLQF_FLAT_HLUT_MAX_INDEX 8191
+#define E830_GLQF_FLAT_HLUT_LUT0_S 0
+#define E830_GLQF_FLAT_HLUT_LUT0_M MAKEMASK(0xFF, 0)
+#define E830_GLQF_FLAT_HLUT_LUT1_S 8
+#define E830_GLQF_FLAT_HLUT_LUT1_M MAKEMASK(0xFF, 8)
+#define E830_GLQF_FLAT_HLUT_LUT2_S 16
+#define E830_GLQF_FLAT_HLUT_LUT2_M MAKEMASK(0xFF, 16)
+#define E830_GLQF_FLAT_HLUT_LUT3_S 24
+#define E830_GLQF_FLAT_HLUT_LUT3_M MAKEMASK(0xFF, 24)
+#define E830_GLQF_QGRP_CNTX(_i) (0x00490000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define E830_GLQF_QGRP_CNTX_MAX_INDEX 2047
+#define E830_GLQF_QGRP_CNTX_QG_LUT_BASE_S 0
+#define E830_GLQF_QGRP_CNTX_QG_LUT_BASE_M MAKEMASK(0x7FFF, 0)
+#define E830_GLQF_QGRP_CNTX_QG_LUT_SIZE_S 16
+#define E830_GLQF_QGRP_CNTX_QG_LUT_SIZE_M MAKEMASK(0xF, 16)
+#define E830_GLQF_QGRP_CNTX_VSI_S 20
+#define E830_GLQF_QGRP_CNTX_VSI_M MAKEMASK(0x3FF, 20)
+#define E830_GLQF_QGRP_PF_OWNER(_i) (0x00484000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset Source: CORER */
+#define E830_GLQF_QGRP_PF_OWNER_MAX_INDEX 2047
+#define E830_GLQF_QGRP_PF_OWNER_OWNER_PF_S 0
+#define E830_GLQF_QGRP_PF_OWNER_OWNER_PF_M MAKEMASK(0x7, 0)
+#define E830_PFQF_LUT_ALLOC 0x0048E000 /* Reset Source: CORER */
+#define E830_PFQF_LUT_ALLOC_LUT_BASE_S 0
+#define E830_PFQF_LUT_ALLOC_LUT_BASE_M MAKEMASK(0x7FFF, 0)
+#define E830_PFQF_LUT_ALLOC_LUT_SIZE_S 16
+#define E830_PFQF_LUT_ALLOC_LUT_SIZE_M MAKEMASK(0xF, 16)
+#define E830_VSIQF_DEF_QGRP(_VSI) (0x00486000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
+#define E830_VSIQF_DEF_QGRP_MAX_INDEX 767
+#define E830_VSIQF_DEF_QGRP_DEF_QGRP_S 0
+#define E830_VSIQF_DEF_QGRP_DEF_QGRP_M MAKEMASK(0x7FF, 0)
+#define E830_GLPRT_BPRCH_BPRCH_S 0
+#define E830_GLPRT_BPRCH_BPRCH_M MAKEMASK(0xFF, 0)
+#define E830_GLPRT_BPRCL_BPRCL_S 0
+#define E830_GLPRT_BPRCL_BPRCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLPRT_BPTCH_BPTCH_S 0
+#define E830_GLPRT_BPTCH_BPTCH_M MAKEMASK(0xFF, 0)
+#define E830_GLPRT_BPTCL_BPTCL_S 0
+#define E830_GLPRT_BPTCL_BPTCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLPRT_UPTCL_UPTCL_S 0
+#define E830_GLPRT_UPTCL_UPTCL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLPTM_ART_CTL 0x00088B50 /* Reset Source: POR */
+#define E830_GLPTM_ART_CTL_ACTIVE_S 0
+#define E830_GLPTM_ART_CTL_ACTIVE_M BIT(0)
+#define E830_GLPTM_ART_CTL_TIME_OUT_S 1
+#define E830_GLPTM_ART_CTL_TIME_OUT_M BIT(1)
+#define E830_GLPTM_ART_CTL_PTM_READY_S 2
+#define E830_GLPTM_ART_CTL_PTM_READY_M BIT(2)
+#define E830_GLPTM_ART_CTL_PTM_AUTO_S 3
+#define E830_GLPTM_ART_CTL_PTM_AUTO_M BIT(3)
+#define E830_GLPTM_ART_CTL_PTM_AUTO_LATCH_S 4
+#define E830_GLPTM_ART_CTL_PTM_AUTO_LATCH_M BIT(4)
+#define E830_GLPTM_ART_CTL_LATCH_PTP_T1_S 5
+#define E830_GLPTM_ART_CTL_LATCH_PTP_T1_M BIT(5)
+#define E830_GLPTM_ART_CTL_AUTO_POURSE_S 6
+#define E830_GLPTM_ART_CTL_AUTO_POURSE_M BIT(6)
+#define E830_GLPTM_ART_TIME_H 0x00088B54 /* Reset Source: POR */
+#define E830_GLPTM_ART_TIME_H_ART_TIME_H_S 0
+#define E830_GLPTM_ART_TIME_H_ART_TIME_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLPTM_ART_TIME_L 0x00088B58 /* Reset Source: POR */
+#define E830_GLPTM_ART_TIME_L_ART_TIME_L_S 0
+#define E830_GLPTM_ART_TIME_L_ART_TIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_PTMTIME_H(_i) (0x00088B48 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GLTSYN_PTMTIME_H_MAX_INDEX 1
+#define E830_GLTSYN_PTMTIME_H_TSYNEVNT_H_S 0
+#define E830_GLTSYN_PTMTIME_H_TSYNEVNT_H_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_PTMTIME_L(_i) (0x00088B40 + ((_i) * 4)) /* _i=0...1 */ /* Reset Source: CORER */
+#define E830_GLTSYN_PTMTIME_L_MAX_INDEX 1
+#define E830_GLTSYN_PTMTIME_L_TSYNEVNT_L_S 0
+#define E830_GLTSYN_PTMTIME_L_TSYNEVNT_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_H_0_AL 0x0008A004 /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_H_0_AL_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_H_0_AL_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_H_1_AL 0x0008B004 /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_H_1_AL_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_H_1_AL_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_L_0_AL 0x0008A000 /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_L_0_AL_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_L_0_AL_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_L_1_AL 0x0008B000 /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_L_1_AL_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_L_1_AL_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_PFPTM_SEM 0x00088B00 /* Reset Source: PFR */
+#define E830_PFPTM_SEM_BUSY_S 0
+#define E830_PFPTM_SEM_BUSY_M BIT(0)
+#define E830_PFPTM_SEM_PF_OWNER_S 4
+#define E830_PFPTM_SEM_PF_OWNER_M MAKEMASK(0x7, 4)
+#define E830_VSI_PASID_1(_VSI) (0x00094000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
+#define E830_VSI_PASID_1_MAX_INDEX 767
+#define E830_VSI_PASID_1_PASID_S 0
+#define E830_VSI_PASID_1_PASID_M MAKEMASK(0xFFFFF, 0)
+#define E830_VSI_PASID_1_EN_S 31
+#define E830_VSI_PASID_1_EN_M BIT(31)
+#define E830_VSI_PASID_2(_VSI) (0x00095000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
+#define E830_VSI_PASID_2_MAX_INDEX 767
+#define E830_VSI_PASID_2_PASID_S 0
+#define E830_VSI_PASID_2_PASID_M MAKEMASK(0xFFFFF, 0)
+#define E830_VSI_PASID_2_EN_S 31
+#define E830_VSI_PASID_2_EN_M BIT(31)
+#define E830_GLPE_CQM_FUNC_INVALIDATE_PMF_ID_S 15
+#define E830_GLPE_CQM_FUNC_INVALIDATE_PMF_ID_M MAKEMASK(0x3F, 15)
+#define E830_GLPE_CQM_FUNC_INVALIDATE_INVALIDATE_TYPE_S 29
+#define E830_GLPE_CQM_FUNC_INVALIDATE_INVALIDATE_TYPE_M MAKEMASK(0x3, 29)
+#define E830_VFPE_MRTEIDXMASK_MAX_INDEX 255
+#define E830_VSIQF_QGRP_CFG(_VSI) (0x00492000 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: CORER */
+#define E830_VSIQF_QGRP_CFG_MAX_INDEX 767
+#define E830_VSIQF_QGRP_CFG_VSI_QGRP_ENABLE_S 0
+#define E830_VSIQF_QGRP_CFG_VSI_QGRP_ENABLE_M BIT(0)
+#define E830_VSIQF_QGRP_CFG_VSI_QGRP_GEN_INDEX_S 1
+#define E830_VSIQF_QGRP_CFG_VSI_QGRP_GEN_INDEX_M MAKEMASK(0x7, 1)
+#define E830_GLDCB_RTC_BLOCKED 0x0012274C /* Reset Source: CORER */
+#define E830_GLDCB_RTC_BLOCKED_BLOCKED_S 0
+#define E830_GLDCB_RTC_BLOCKED_BLOCKED_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLDCB_RTCID 0x00122900 /* Reset Source: CORER */
+#define E830_GLDCB_RTCID_IMM_DROP_TC_S 0
+#define E830_GLDCB_RTCID_IMM_DROP_TC_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLDCB_RTCTI_CDS_SET 0x00122748 /* Reset Source: CORER */
+#define E830_GLDCB_RTCTI_CDS_SET_CDS_SET_S 0
+#define E830_GLDCB_RTCTI_CDS_SET_CDS_SET_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLDCB_RTCTQ_PD(_i) (0x00122700 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GLDCB_RTCTQ_PD_MAX_INDEX 7
+#define E830_GLDCB_RTCTQ_PD_RXQNUM_S 0
+#define E830_GLDCB_RTCTQ_PD_RXQNUM_M MAKEMASK(0x7FF, 0)
+#define E830_GLDCB_RTCTQ_PD_IS_PF_Q_S 16
+#define E830_GLDCB_RTCTQ_PD_IS_PF_Q_M BIT(16)
+#define E830_GLDCB_RTCTQ_SET 0x00122750 /* Reset Source: CORER */
+#define E830_GLDCB_RTCTQ_SET_RTCTQ_VALID_S 0
+#define E830_GLDCB_RTCTQ_SET_RTCTQ_VALID_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLDCB_RTCTQ_STICKY_EN 0x00122754 /* Reset Source: CORER */
+#define E830_GLDCB_RTCTQ_STICKY_EN_EN_S 0
+#define E830_GLDCB_RTCTQ_STICKY_EN_EN_M BIT(0)
+#define E830_GLDCB_RTCTS_PD(_i) (0x00122720 + ((_i) * 4)) /* _i=0...7 */ /* Reset Source: CORER */
+#define E830_GLDCB_RTCTS_PD_MAX_INDEX 7
+#define E830_GLDCB_RTCTS_PD_PFCTIMER_S 0
+#define E830_GLDCB_RTCTS_PD_PFCTIMER_M MAKEMASK(0x3FFF, 0)
+#define E830_GLRPB_TC_TOTAL_PC(_i) (0x000ACD00 + ((_i) * 4)) /* _i=0...31 */ /* Reset Source: CORER */
+#define E830_GLRPB_TC_TOTAL_PC_MAX_INDEX 31
+#define E830_GLRPB_TC_TOTAL_PC_BYTE_CNT_S 0
+#define E830_GLRPB_TC_TOTAL_PC_BYTE_CNT_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_VFINT_ITRN_64(_i, _j) (0x00002C00 + ((_i) * 4 + (_j) * 256)) /* _i=0...63, _j=0...2 */ /* Reset Source: CORER */
+#define E830_VFINT_ITRN_64_MAX_INDEX 63
+#define E830_VFINT_ITRN_64_INTERVAL_S 0
+#define E830_VFINT_ITRN_64_INTERVAL_M MAKEMASK(0xFFF, 0)
+#define E830_GLQTX_TXTIME_DBELL_LSB1(_DBQM) (0x0000D000 + ((_DBQM) * 8)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_DBELL_LSB1_MAX_INDEX 255
+#define E830_GLQTX_TXTIME_DBELL_LSB1_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_DBELL_LSB1_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLQTX_TXTIME_DBELL_MSB1(_DBQM) (0x0000D004 + ((_DBQM) * 8)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_DBELL_MSB1_MAX_INDEX 255
+#define E830_GLQTX_TXTIME_DBELL_MSB1_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_DBELL_MSB1_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLQTX_TXTIME_LARGE_DBELL_LSB(_DBQM) (0x00040000 + ((_DBQM) * 8)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_LARGE_DBELL_LSB_MAX_INDEX 255
+#define E830_GLQTX_TXTIME_LARGE_DBELL_LSB_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_LARGE_DBELL_LSB_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLQTX_TXTIME_LARGE_DBELL_MSB(_DBQM) (0x00040004 + ((_DBQM) * 8)) /* _i=0...255 */ /* Reset Source: CORER */
+#define E830_GLQTX_TXTIME_LARGE_DBELL_MSB_MAX_INDEX 255
+#define E830_GLQTX_TXTIME_LARGE_DBELL_MSB_QTX_TXTIME_DBELL_S 0
+#define E830_GLQTX_TXTIME_LARGE_DBELL_MSB_QTX_TXTIME_DBELL_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_H_0_AL1 0x00003004 /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_H_0_AL1_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_H_0_AL1_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_H_1_AL1 0x0000300C /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_H_1_AL1_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_H_1_AL1_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_L_0_AL1 0x00003000 /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_L_0_AL1_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_L_0_AL1_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_GLTSYN_TIME_L_1_AL1 0x00003008 /* Reset Source: CORER */
+#define E830_GLTSYN_TIME_L_1_AL1_TSYNTIME_L_S 0
+#define E830_GLTSYN_TIME_L_1_AL1_TSYNTIME_L_M MAKEMASK(0xFFFFFFFF, 0)
+#define E830_VSI_VSI2F_LEM(_VSI) (0x006100A0 + ((_VSI) * 4)) /* _i=0...767 */ /* Reset Source: PFR */
+#define E830_VSI_VSI2F_LEM_MAX_INDEX 767
+#define E830_VSI_VSI2F_LEM_VFVMNUMBER_S 0
+#define E830_VSI_VSI2F_LEM_VFVMNUMBER_M MAKEMASK(0x3FF, 0)
+#define E830_VSI_VSI2F_LEM_FUNCTIONTYPE_S 10
+#define E830_VSI_VSI2F_LEM_FUNCTIONTYPE_M MAKEMASK(0x3, 10)
+#define E830_VSI_VSI2F_LEM_PFNUMBER_S 12
+#define E830_VSI_VSI2F_LEM_PFNUMBER_M MAKEMASK(0x7, 12)
+#define E830_VSI_VSI2F_LEM_BUFFERNUMBER_S 16
+#define E830_VSI_VSI2F_LEM_BUFFERNUMBER_M MAKEMASK(0x7, 16)
+#define E830_VSI_VSI2F_LEM_VSI_NUMBER_S 20
+#define E830_VSI_VSI2F_LEM_VSI_NUMBER_M MAKEMASK(0x3FF, 20)
+#define E830_VSI_VSI2F_LEM_VSI_ENABLE_S 31
+#define E830_VSI_VSI2F_LEM_VSI_ENABLE_M BIT(31)
#endif /* !_ICE_HW_AUTOGEN_H_ */
diff --git a/sys/dev/ice/ice_iflib.h b/sys/dev/ice/ice_iflib.h
index 4ac5fffe5b7e..e1d5307a9516 100644
--- a/sys/dev/ice/ice_iflib.h
+++ b/sys/dev/ice/ice_iflib.h
@@ -139,6 +139,9 @@ struct ice_irq_vector {
* @tc: traffic class queue belongs to
* @q_handle: qidx in tc; used in TXQ enable functions
*
+ * ice_iov.c requires the following parameters (when PCI_IOV is defined):
+ * @itr_idx: ITR index to use for this queue
+ *
* Other parameters may be iflib driver specific
*/
struct ice_tx_queue {
@@ -146,7 +149,6 @@ struct ice_tx_queue {
struct ice_tx_desc *tx_base;
bus_addr_t tx_paddr;
struct tx_stats stats;
- u64 tso;
u16 desc_count;
u32 tail;
struct ice_irq_vector *irqv;
@@ -154,6 +156,9 @@ struct ice_tx_queue {
u32 me;
u16 q_handle;
u8 tc;
+#ifdef PCI_IOV
+ u8 itr_idx;
+#endif
/* descriptor writeback status */
qidx_t *tx_rsq;
@@ -176,6 +181,9 @@ struct ice_tx_queue {
* @stats: queue statistics
* @tc: traffic class queue belongs to
*
+ * ice_iov.c requires the following parameters (when PCI_IOV is defined):
+ * @itr_idx: ITR index to use for this queue
+ *
* Other parameters may be iflib driver specific
*/
struct ice_rx_queue {
@@ -188,6 +196,9 @@ struct ice_rx_queue {
struct ice_irq_vector *irqv;
u32 me;
u8 tc;
+#ifdef PCI_IOV
+ u8 itr_idx;
+#endif
struct if_irq que_irq;
};
@@ -333,6 +344,10 @@ struct ice_softc {
ice_declare_bitmap(feat_cap, ICE_FEATURE_COUNT);
ice_declare_bitmap(feat_en, ICE_FEATURE_COUNT);
+#ifdef PCI_IOV
+ struct ice_vf *vfs;
+ u16 num_vfs;
+#endif
struct ice_resmgr os_imgr;
/* For mirror interface */
struct ice_mirr_if *mirr_if;
diff --git a/sys/dev/ice/ice_iov.c b/sys/dev/ice/ice_iov.c
new file mode 100644
index 000000000000..c5a3e1060e44
--- /dev/null
+++ b/sys/dev/ice/ice_iov.c
@@ -0,0 +1,1856 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2025, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file ice_iov.c
+ * @brief Virtualization support functions
+ *
+ * Contains functions for enabling and managing PCIe virtual function devices,
+ * including enabling new VFs, and managing VFs over the virtchnl interface.
+ */
+
+#include "ice_iov.h"
+
+static struct ice_vf *ice_iov_get_vf(struct ice_softc *sc, int vf_num);
+static void ice_iov_ready_vf(struct ice_softc *sc, struct ice_vf *vf);
+static void ice_reset_vf(struct ice_softc *sc, struct ice_vf *vf,
+ bool trigger_vflr);
+static void ice_iov_setup_intr_mapping(struct ice_softc *sc, struct ice_vf *vf);
+
+static void ice_vc_version_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_get_vf_res_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_add_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_del_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static bool ice_vc_isvalid_ring_len(u16 ring_len);
+static void ice_vc_cfg_vsi_qs_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_cfg_rss_key_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_set_rss_hena_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_enable_queues_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_notify_vf_link_state(struct ice_softc *sc, struct ice_vf *vf);
+static void ice_vc_disable_queues_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_cfg_irq_map_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_get_stats_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_eth_stats_to_virtchnl_eth_stats(struct ice_eth_stats *istats,
+ struct virtchnl_eth_stats *vstats);
+static void ice_vc_cfg_rss_lut_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_cfg_promisc_mode_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_add_vlan_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_del_vlan_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static enum virtchnl_status_code ice_iov_err_to_virt_err(int ice_err);
+static int ice_vf_validate_mac(struct ice_vf *vf, const uint8_t *addr);
+
+/**
+ * ice_iov_attach - Initialize SR-IOV PF host support
+ * @sc: device softc structure
+ *
+ * Initialize SR-IOV PF host support at the end of the driver attach process.
+ *
+ * @pre Must be called from sleepable context (calls malloc() w/ M_WAITOK)
+ *
+ * @returns 0 if successful, or
+ * - ENOMEM if there is no memory for the PF/VF schemas or iov device
+ * - ENXIO if the device isn't PCI-E or doesn't support the same SR-IOV
+ * version as the kernel
+ * - ENOENT if the device doesn't have the SR-IOV capability
+ */
+int
+ice_iov_attach(struct ice_softc *sc)
+{
+ device_t dev = sc->dev;
+ nvlist_t *pf_schema, *vf_schema;
+ int error;
+
+ pf_schema = pci_iov_schema_alloc_node();
+ vf_schema = pci_iov_schema_alloc_node();
+
+ pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
+ pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
+ IOV_SCHEMA_HASDEFAULT, TRUE);
+ pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
+ IOV_SCHEMA_HASDEFAULT, FALSE);
+ pci_iov_schema_add_bool(vf_schema, "allow-promisc",
+ IOV_SCHEMA_HASDEFAULT, FALSE);
+ pci_iov_schema_add_uint16(vf_schema, "num-queues",
+ IOV_SCHEMA_HASDEFAULT, ICE_DEFAULT_VF_QUEUES);
+ pci_iov_schema_add_uint16(vf_schema, "mirror-src-vsi",
+ IOV_SCHEMA_HASDEFAULT, ICE_INVALID_MIRROR_VSI);
+ pci_iov_schema_add_uint16(vf_schema, "max-vlan-allowed",
+ IOV_SCHEMA_HASDEFAULT, ICE_DEFAULT_VF_VLAN_LIMIT);
+ pci_iov_schema_add_uint16(vf_schema, "max-mac-filters",
+ IOV_SCHEMA_HASDEFAULT, ICE_DEFAULT_VF_FILTER_LIMIT);
+
+ error = pci_iov_attach(dev, pf_schema, vf_schema);
+ if (error != 0) {
+ device_printf(dev,
+ "pci_iov_attach failed (error=%s)\n",
+ ice_err_str(error));
+ ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en);
+ } else
+ ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_en);
+
+ return (error);
+}
+
+/**
+ * ice_iov_detach - Teardown SR-IOV PF host support
+ * @sc: device softc structure
+ *
+ * Teardown SR-IOV PF host support at the start of the driver detach process.
+ *
+ * @returns 0 if successful or IOV support hasn't been setup, or
+ * - EBUSY if VFs still exist
+ */
+int
+ice_iov_detach(struct ice_softc *sc)
+{
+ device_t dev = sc->dev;
+ int error;
+
+ error = pci_iov_detach(dev);
+ if (error != 0) {
+ device_printf(dev,
+ "pci_iov_detach failed (error=%s)\n",
+ ice_err_str(error));
+ }
+
+ return (error);
+}
+
+/**
+ * ice_iov_init - Called by the OS before the first VF is created.
+ * @sc: device softc structure
+ * @num_vfs: number of VFs to setup resources for
+ * @params: configuration parameters for the PF
+ *
+ * @returns 0 if successful or an error code on failure
+ */
+int
+ice_iov_init(struct ice_softc *sc, uint16_t num_vfs, const nvlist_t *params __unused)
+{
+ /* Allocate array of VFs, for tracking */
+ sc->vfs = (struct ice_vf *)malloc(sizeof(struct ice_vf) * num_vfs, M_ICE, M_NOWAIT |
+ M_ZERO);
+ if (sc->vfs == NULL)
+ return (ENOMEM);
+
+ /* Initialize each VF with basic information */
+ for (int i = 0; i < num_vfs; i++)
+ sc->vfs[i].vf_num = i;
+
+ /* Save off number of configured VFs */
+ sc->num_vfs = num_vfs;
+
+ return (0);
+}
+
+/**
+ * ice_iov_get_vf - Get pointer to VF at given index
+ * @sc: device softc structure
+ * @vf_num: Index of VF to retrieve
+ *
+ * @remark will throw an assertion if vf_num is not in the
+ * range of allocated VFs
+ *
+ * @returns a pointer to the VF structure at the given index
+ */
+static struct ice_vf *
+ice_iov_get_vf(struct ice_softc *sc, int vf_num)
+{
+ MPASS(vf_num < sc->num_vfs);
+
+ return &sc->vfs[vf_num];
+}
+
+/**
+ * ice_iov_add_vf - Called by the OS for each VF to create
+ * @sc: device softc structure
+ * @vfnum: index of VF to configure
+ * @params: configuration parameters for the VF
+ *
+ * @returns 0 if successful or an error code on failure
+ */
+int
+ice_iov_add_vf(struct ice_softc *sc, uint16_t vfnum, const nvlist_t *params)
+{
+ struct ice_tx_queue *txq;
+ struct ice_rx_queue *rxq;
+ device_t dev = sc->dev;
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ int vf_num_queues;
+ const void *mac;
+ size_t size;
+ int error;
+ int i;
+
+ vf = ice_iov_get_vf(sc, vfnum);
+ vf->vf_flags = VF_FLAG_ENABLED;
+
+ /* This VF needs at least one VSI */
+ vsi = ice_alloc_vsi(sc, ICE_VSI_VF);
+ if (vsi == NULL)
+ return (ENOMEM);
+ vf->vsi = vsi;
+ vsi->vf_num = vfnum;
+
+ vf_num_queues = nvlist_get_number(params, "num-queues");
+ /* Validate and clamp value if invalid */
+ if (vf_num_queues < 1 || vf_num_queues > ICE_MAX_SCATTERED_QUEUES)
+ device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
+ vf_num_queues, vf->vf_num);
+ if (vf_num_queues < 1) {
+ device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
+ vf_num_queues = 1;
+ } else if (vf_num_queues > ICE_MAX_SCATTERED_QUEUES) {
+ device_printf(dev, "Setting VF %d num-queues to %d\n",
+ vf->vf_num, ICE_MAX_SCATTERED_QUEUES);
+ vf_num_queues = ICE_MAX_SCATTERED_QUEUES;
+ }
+ vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED;
+
+ /* Reserve VF queue allocation from PF queues */
+ ice_alloc_vsi_qmap(vsi, vf_num_queues, vf_num_queues);
+ vsi->num_tx_queues = vsi->num_rx_queues = vf_num_queues;
+
+ /* Assign Tx queues from PF space */
+ error = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap,
+ vsi->num_tx_queues);
+ if (error) {
+ device_printf(sc->dev, "Unable to assign VF Tx queues: %s\n",
+ ice_err_str(error));
+ goto release_vsi;
+ }
+
+ /* Assign Rx queues from PF space */
+ error = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap,
+ vsi->num_rx_queues);
+ if (error) {
+ device_printf(sc->dev, "Unable to assign VF Rx queues: %s\n",
+ ice_err_str(error));
+ goto release_vsi;
+ }
+
+ vsi->max_frame_size = ICE_MAX_FRAME_SIZE;
+
+ /* Allocate queue structure memory */
+ vsi->tx_queues = (struct ice_tx_queue *)
+ malloc(sizeof(struct ice_tx_queue) * vsi->num_tx_queues, M_ICE,
+ M_NOWAIT | M_ZERO);
+ if (!vsi->tx_queues) {
+ device_printf(sc->dev, "VF-%d: Unable to allocate Tx queue memory\n",
+ vfnum);
+ error = ENOMEM;
+ goto release_vsi;
+ }
+ for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) {
+ txq->me = i;
+ txq->vsi = vsi;
+ }
+
+ /* Allocate queue structure memory */
+ vsi->rx_queues = (struct ice_rx_queue *)
+ malloc(sizeof(struct ice_rx_queue) * vsi->num_rx_queues, M_ICE,
+ M_NOWAIT | M_ZERO);
+ if (!vsi->rx_queues) {
+ device_printf(sc->dev, "VF-%d: Unable to allocate Rx queue memory\n",
+ vfnum);
+ error = ENOMEM;
+ goto free_txqs;
+ }
+ for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++) {
+ rxq->me = i;
+ rxq->vsi = vsi;
+ }
+
+ /* Allocate space to store the IRQ vector data */
+ vf->num_irq_vectors = vf_num_queues + 1;
+ vf->tx_irqvs = (struct ice_irq_vector *)
+ malloc(sizeof(struct ice_irq_vector) * (vf->num_irq_vectors),
+ M_ICE, M_NOWAIT);
+ if (!vf->tx_irqvs) {
+ device_printf(sc->dev,
+ "Unable to allocate TX irqv memory for VF-%d's %d vectors\n",
+ vfnum, vf->num_irq_vectors);
+ error = ENOMEM;
+ goto free_rxqs;
+ }
+ vf->rx_irqvs = (struct ice_irq_vector *)
+ malloc(sizeof(struct ice_irq_vector) * (vf->num_irq_vectors),
+ M_ICE, M_NOWAIT);
+ if (!vf->rx_irqvs) {
+ device_printf(sc->dev,
+ "Unable to allocate RX irqv memory for VF-%d's %d vectors\n",
+ vfnum, vf->num_irq_vectors);
+ error = ENOMEM;
+ goto free_txirqvs;
+ }
+
+ /* Assign VF interrupts from PF space */
+ if (!(vf->vf_imap =
+ (u16 *)malloc(sizeof(u16) * vf->num_irq_vectors,
+ M_ICE, M_NOWAIT))) {
+ device_printf(dev, "Unable to allocate VF-%d imap memory\n", vfnum);
+ error = ENOMEM;
+ goto free_rxirqvs;
+ }
+ error = ice_resmgr_assign_contiguous(&sc->dev_imgr, vf->vf_imap, vf->num_irq_vectors);
+ if (error) {
+ device_printf(dev, "Unable to assign VF-%d interrupt mapping: %s\n",
+ vfnum, ice_err_str(error));
+ goto free_imap;
+ }
+
+ if (nvlist_exists_binary(params, "mac-addr")) {
+ mac = nvlist_get_binary(params, "mac-addr", &size);
+ memcpy(vf->mac, mac, ETHER_ADDR_LEN);
+
+ if (nvlist_get_bool(params, "allow-set-mac"))
+ vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
+ } else
+ /*
+ * If the administrator has not specified a MAC address then
+ * we must allow the VF to choose one.
+ */
+ vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
+
+ if (nvlist_get_bool(params, "mac-anti-spoof"))
+ vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
+
+ if (nvlist_get_bool(params, "allow-promisc"))
+ vf->vf_flags |= VF_FLAG_PROMISC_CAP;
+
+ vsi->mirror_src_vsi = nvlist_get_number(params, "mirror-src-vsi");
+
+ vf->vlan_limit = nvlist_get_number(params, "max-vlan-allowed");
+ vf->mac_filter_limit = nvlist_get_number(params, "max-mac-filters");
+
+ vf->vf_flags |= VF_FLAG_VLAN_CAP;
+
+ /* Create and setup VSI in HW */
+ error = ice_initialize_vsi(vsi);
+ if (error) {
+ device_printf(sc->dev, "Unable to initialize VF %d VSI: %s\n",
+ vfnum, ice_err_str(error));
+ goto release_imap;
+ }
+
+ /* Add the broadcast address */
+ error = ice_add_vsi_mac_filter(vsi, broadcastaddr);
+ if (error) {
+ device_printf(sc->dev, "Unable to add broadcast filter VF %d VSI: %s\n",
+ vfnum, ice_err_str(error));
+ goto release_imap;
+ }
+
+ ice_iov_ready_vf(sc, vf);
+
+ return (0);
+
+release_imap:
+ ice_resmgr_release_map(&sc->dev_imgr, vf->vf_imap,
+ vf->num_irq_vectors);
+free_imap:
+ free(vf->vf_imap, M_ICE);
+ vf->vf_imap = NULL;
+free_rxirqvs:
+ free(vf->rx_irqvs, M_ICE);
+ vf->rx_irqvs = NULL;
+free_txirqvs:
+ free(vf->tx_irqvs, M_ICE);
+ vf->tx_irqvs = NULL;
+free_rxqs:
+ free(vsi->rx_queues, M_ICE);
+ vsi->rx_queues = NULL;
+free_txqs:
+ free(vsi->tx_queues, M_ICE);
+ vsi->tx_queues = NULL;
+release_vsi:
+ ice_release_vsi(vsi);
+ vf->vsi = NULL;
+ return (error);
+}
+
+/**
+ * ice_iov_uninit - Called by the OS when VFs are destroyed
+ * @sc: device softc structure
+ */
+void
+ice_iov_uninit(struct ice_softc *sc)
+{
+ struct ice_vf *vf;
+ struct ice_vsi *vsi;
+
+ /* Release per-VF resources */
+ for (int i = 0; i < sc->num_vfs; i++) {
+ vf = &sc->vfs[i];
+ vsi = vf->vsi;
+
+ /* Free VF interrupt reservation */
+ if (vf->vf_imap) {
+ free(vf->vf_imap, M_ICE);
+ vf->vf_imap = NULL;
+ }
+
+ /* Free queue interrupt mapping trackers */
+ if (vf->tx_irqvs) {
+ free(vf->tx_irqvs, M_ICE);
+ vf->tx_irqvs = NULL;
+ }
+ if (vf->rx_irqvs) {
+ free(vf->rx_irqvs, M_ICE);
+ vf->rx_irqvs = NULL;
+ }
+
+ if (!vsi)
+ continue;
+
+ /* Free VSI queues */
+ if (vsi->tx_queues) {
+ free(vsi->tx_queues, M_ICE);
+ vsi->tx_queues = NULL;
+ }
+ if (vsi->rx_queues) {
+ free(vsi->rx_queues, M_ICE);
+ vsi->rx_queues = NULL;
+ }
+
+ ice_release_vsi(vsi);
+ vf->vsi = NULL;
+ }
+
+ /* Release memory used for VF tracking */
+ if (sc->vfs) {
+ free(sc->vfs, M_ICE);
+ sc->vfs = NULL;
+ }
+ sc->num_vfs = 0;
+}
+
+/**
+ * ice_iov_handle_vflr - Process VFLR event
+ * @sc: device softc structure
+ *
+ * Identifys which VFs have been reset and re-configure
+ * them.
+ */
+void
+ice_iov_handle_vflr(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct ice_vf *vf;
+ u32 reg, reg_idx, bit_idx;
+
+ for (int i = 0; i < sc->num_vfs; i++) {
+ vf = &sc->vfs[i];
+
+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_num) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_num) % 32;
+ reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
+ if (reg & BIT(bit_idx))
+ ice_reset_vf(sc, vf, false);
+ }
+}
+
+/**
+ * ice_iov_ready_vf - Setup VF interrupts and mark it as ready
+ * @sc: device softc structure
+ * @vf: driver's VF structure for the VF to update
+ *
+ * Clears VF reset triggering bit, sets up the PF<->VF interrupt
+ * mapping and marks the VF as active in the HW so that the VF
+ * driver can use it.
+ */
+static void
+ice_iov_ready_vf(struct ice_softc *sc, struct ice_vf *vf)
+{
+ struct ice_hw *hw = &sc->hw;
+ u32 reg;
+
+ /* Clear the triggering bit */
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_num));
+ reg &= ~VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_num), reg);
+
+ /* Setup VF interrupt allocation and mapping */
+ ice_iov_setup_intr_mapping(sc, vf);
+
+ /* Indicate to the VF that reset is done */
+ wr32(hw, VFGEN_RSTAT(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_reset_vf - Perform a hardware reset (VFR) on a VF
+ * @sc: device softc structure
+ * @vf: driver's VF structure for VF to be reset
+ * @trigger_vflr: trigger a reset or only handle already executed reset
+ *
+ * Performs a VFR for the given VF. This function busy waits until the
+ * reset completes in the HW, notifies the VF that the reset is done
+ * by setting a bit in a HW register, then returns.
+ *
+ * @remark This also sets up the PF<->VF interrupt mapping and allocations in
+ * the hardware after the hardware reset is finished, via
+ * ice_iov_setup_intr_mapping()
+ */
+static void
+ice_reset_vf(struct ice_softc *sc, struct ice_vf *vf, bool trigger_vflr)
+{
+ u16 global_vf_num, reg_idx, bit_idx;
+ struct ice_hw *hw = &sc->hw;
+ int status;
+ u32 reg;
+ int i;
+
+ global_vf_num = vf->vf_num + hw->func_caps.vf_base_id;
+
+ if (trigger_vflr) {
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_num));
+ reg |= VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_num), reg);
+ }
+
+ /* clear the VFLR bit for the VF in a GLGEN_VFLRSTAT register */
+ reg_idx = (global_vf_num) / 32;
+ bit_idx = (global_vf_num) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ ice_flush(hw);
+
+ /* Wait until there are no pending PCI transactions */
+ wr32(hw, PF_PCI_CIAA,
+ ICE_PCIE_DEV_STATUS | (global_vf_num << PF_PCI_CIAA_VF_NUM_S));
+
+ for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
+ reg = rd32(hw, PF_PCI_CIAD);
+ if (!(reg & PCIEM_STA_TRANSACTION_PND))
+ break;
+
+ DELAY(ICE_PCI_CIAD_WAIT_DELAY_US);
+ }
+ if (i == ICE_PCI_CIAD_WAIT_COUNT)
+ device_printf(sc->dev,
+ "VF-%d PCI transactions stuck\n", vf->vf_num);
+
+ /* Disable TX queues, which is required during VF reset */
+ status = ice_dis_vsi_txq(hw->port_info, vf->vsi->idx, 0, 0, NULL, NULL,
+ NULL, ICE_VF_RESET, vf->vf_num, NULL);
+ if (status)
+ device_printf(sc->dev,
+ "%s: Failed to disable LAN Tx queues: err %s aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+
+ /* Then check for the VF reset to finish in HW */
+ for (i = 0; i < ICE_VPGEN_VFRSTAT_WAIT_COUNT; i++) {
+ reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_num));
+ if ((reg & VPGEN_VFRSTAT_VFRD_M))
+ break;
+
+ DELAY(ICE_VPGEN_VFRSTAT_WAIT_DELAY_US);
+ }
+ if (i == ICE_VPGEN_VFRSTAT_WAIT_COUNT)
+ device_printf(sc->dev,
+ "VF-%d Reset is stuck\n", vf->vf_num);
+
+ ice_iov_ready_vf(sc, vf);
+}
+
+/**
+ * ice_vc_get_vf_res_msg - Handle VIRTCHNL_OP_GET_VF_RESOURCES msg from VF
+ * @sc: device private structure
+ * @vf: VF tracking structure
+ * @msg_buf: raw message buffer from the VF
+ *
+ * Receives a message from the VF listing its supported capabilities, and
+ * replies to the VF with information about what resources the PF has
+ * allocated for the VF.
+ *
+ * @remark This always replies to the VF with a success status; it does not
+ * fail. It's up to the VF driver to reject or complain about the PF's response.
+ */
+static void
+ice_vc_get_vf_res_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct virtchnl_vf_resource *vf_res;
+ struct virtchnl_vsi_resource *vsi_res;
+ u16 vf_res_len;
+ u32 vf_caps;
+
+ /* XXX: Only support one VSI per VF, so this size doesn't need adjusting */
+ vf_res_len = sizeof(struct virtchnl_vf_resource);
+ vf_res = (struct virtchnl_vf_resource *)malloc(vf_res_len, M_ICE,
+ M_WAITOK | M_ZERO);
+
+ vf_res->num_vsis = 1;
+ vf_res->num_queue_pairs = vf->vsi->num_tx_queues;
+ vf_res->max_vectors = vf_res->num_queue_pairs + 1;
+
+ vf_res->rss_key_size = ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE;
+ vf_res->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vf_res->max_mtu = 0;
+
+ vf_res->vf_cap_flags = VF_BASE_MODE_OFFLOADS;
+ if (msg_buf != NULL) {
+ vf_caps = *((u32 *)(msg_buf));
+
+ if (vf_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+ vf_res->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+
+ if (vf_caps & VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
+ vf_res->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_WB_ON_ITR;
+ }
+
+ vsi_res = &vf_res->vsi_res[0];
+ vsi_res->vsi_id = vf->vsi->idx;
+ vsi_res->num_queue_pairs = vf->vsi->num_tx_queues;
+ vsi_res->vsi_type = VIRTCHNL_VSI_SRIOV;
+ vsi_res->qset_handle = 0;
+ if (!ETHER_IS_ZERO(vf->mac))
+ memcpy(vsi_res->default_mac_addr, vf->mac, ETHER_ADDR_LEN);
+
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_VF_RESOURCES,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)vf_res, vf_res_len, NULL);
+
+ free(vf_res, M_ICE);
+}
+
+/**
+ * ice_vc_version_msg - Handle VIRTCHNL_OP_VERSION msg from VF
+ * @sc: device private structure
+ * @vf: VF tracking structure
+ * @msg_buf: raw message buffer from the VF
+ *
+ * Receives a version message from the VF, and responds to the VF with
+ * the version number that the PF will use.
+ *
+ * @remark This always replies to the VF with a success status; it does not
+ * fail.
+ */
+static void
+ice_vc_version_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ struct virtchnl_version_info *recv_vf_version;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+
+ recv_vf_version = (struct virtchnl_version_info *)msg_buf;
+
+ /* VFs running the 1.0 API expect to get 1.0 back */
+ if (VF_IS_V10(recv_vf_version)) {
+ vf->version.major = 1;
+ vf->version.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+ } else {
+ vf->version.major = VIRTCHNL_VERSION_MAJOR;
+ vf->version.minor = VIRTCHNL_VERSION_MINOR;
+
+ if ((recv_vf_version->major != VIRTCHNL_VERSION_MAJOR) ||
+ (recv_vf_version->minor != VIRTCHNL_VERSION_MINOR))
+ device_printf(dev,
+ "%s: VF-%d requested version (%d.%d) differs from PF version (%d.%d)\n",
+ __func__, vf->vf_num,
+ recv_vf_version->major, recv_vf_version->minor,
+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
+ }
+
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_VERSION,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&vf->version, sizeof(vf->version),
+ NULL);
+}
+
+/**
+ * ice_vf_validate_mac - Validate MAC address before adding it
+ * @vf: VF tracking structure
+ * @addr: MAC address to validate
+ *
+ * Validate a MAC address before adding it to a VF during the handling
+ * of a VIRTCHNL_OP_ADD_ETH_ADDR operation. Notably, this also checks if
+ * the VF is allowed to set its own arbitrary MAC addresses.
+ *
+ * Returns 0 if MAC address is valid for the given vf
+ */
+static int
+ice_vf_validate_mac(struct ice_vf *vf, const uint8_t *addr)
+{
+
+ if (ETHER_IS_ZERO(addr) || ETHER_IS_BROADCAST(addr))
+ return (EINVAL);
+
+ /*
+ * If the VF is not allowed to change its MAC address, don't let it
+ * set a MAC filter for an address that is not a multicast address and
+ * is not its assigned MAC.
+ */
+ if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
+ !(ETHER_IS_MULTICAST(addr) || !bcmp(addr, vf->mac, ETHER_ADDR_LEN)))
+ return (EPERM);
+
+ return (0);
+}
+
+/**
+ * ice_vc_add_eth_addr_msg - Handle VIRTCHNL_OP_ADD_ETH_ADDR msg from VF
+ * @sc: device private structure
+ * @vf: VF tracking structure
+ * @msg_buf: raw message buffer from the VF
+ *
+ * Receives a list of MAC addresses from the VF and adds those addresses
+ * to the VSI's filter list.
+ */
+static void
+ice_vc_add_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_ether_addr_list *addr_list;
+ struct ice_hw *hw = &sc->hw;
+ u16 added_addr_cnt = 0;
+ int error = 0;
+
+ addr_list = (struct virtchnl_ether_addr_list *)msg_buf;
+
+ if (addr_list->num_elements >
+ (vf->mac_filter_limit - vf->mac_filter_cnt)) {
+ v_status = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto done;
+ }
+
+ for (int i = 0; i < addr_list->num_elements; i++) {
+ u8 *addr = addr_list->list[i].addr;
+
+ /* The type flag is currently ignored; every MAC address is
+ * treated as the LEGACY type
+ */
+
+ error = ice_vf_validate_mac(vf, addr);
+ if (error == EPERM) {
+ device_printf(sc->dev,
+ "%s: VF-%d: Not permitted to add MAC addr for VSI %d\n",
+ __func__, vf->vf_num, vf->vsi->idx);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ continue;
+ } else if (error) {
+ device_printf(sc->dev,
+ "%s: VF-%d: Did not add invalid MAC addr for VSI %d\n",
+ __func__, vf->vf_num, vf->vsi->idx);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ continue;
+ }
+
+ error = ice_add_vsi_mac_filter(vf->vsi, addr);
+ if (error) {
+ device_printf(sc->dev,
+ "%s: VF-%d: Error adding MAC addr for VSI %d\n",
+ __func__, vf->vf_num, vf->vsi->idx);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ continue;
+ }
+ /* Don't count VF's MAC against its MAC filter limit */
+ if (memcmp(addr, vf->mac, ETHER_ADDR_LEN))
+ added_addr_cnt++;
+ }
+
+ vf->mac_filter_cnt += added_addr_cnt;
+
+done:
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ADD_ETH_ADDR,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_del_eth_addr_msg - Handle VIRTCHNL_OP_DEL_ETH_ADDR msg from VF
+ * @sc: device private structure
+ * @vf: VF tracking structure
+ * @msg_buf: raw message buffer from the VF
+ *
+ * Receives a list of MAC addresses from the VF and removes those addresses
+ * from the VSI's filter list.
+ */
+static void
+ice_vc_del_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_ether_addr_list *addr_list;
+ struct ice_hw *hw = &sc->hw;
+ u16 deleted_addr_cnt = 0;
+ int error = 0;
+
+ addr_list = (struct virtchnl_ether_addr_list *)msg_buf;
+
+ for (int i = 0; i < addr_list->num_elements; i++) {
+ error = ice_remove_vsi_mac_filter(vf->vsi, addr_list->list[i].addr);
+ if (error) {
+ device_printf(sc->dev,
+ "%s: VF-%d: Error removing MAC addr for VSI %d\n",
+ __func__, vf->vf_num, vf->vsi->idx);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ continue;
+ }
+ /* Don't count VF's MAC against its MAC filter limit */
+ if (memcmp(addr_list->list[i].addr, vf->mac, ETHER_ADDR_LEN))
+ deleted_addr_cnt++;
+ }
+
+ if (deleted_addr_cnt >= vf->mac_filter_cnt)
+ vf->mac_filter_cnt = 0;
+ else
+ vf->mac_filter_cnt -= deleted_addr_cnt;
+
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DEL_ETH_ADDR,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_add_vlan_msg - Handle VIRTCHNL_OP_ADD_VLAN msg from VF
+ * @sc: PF's softc structure
+ * @vf: VF tracking structure
+ * @msg_buf: message buffer from VF
+ *
+ * Adds the VLANs in msg_buf to the VF's VLAN filter list.
+ */
+static void
+ice_vc_add_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct virtchnl_vlan_filter_list *vlan_list;
+ int status = 0;
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi = vf->vsi;
+
+ vlan_list = (struct virtchnl_vlan_filter_list *)msg_buf;
+
+ if (vlan_list->vsi_id != vsi->idx) {
+ device_printf(sc->dev,
+ "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n",
+ vf->vf_num, vsi->idx, vlan_list->vsi_id);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ if (vlan_list->num_elements > (vf->vlan_limit - vf->vlan_cnt)) {
+ v_status = VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ goto done;
+ }
+
+ status = ice_add_vlan_hw_filters(vsi, vlan_list->vlan_id,
+ vlan_list->num_elements);
+ if (status) {
+ device_printf(sc->dev,
+ "VF-%d: Failure adding VLANs to VSI %d, err %s aq_err %s\n",
+ vf->vf_num, vsi->idx, ice_status_str(status),
+ ice_aq_str(sc->hw.adminq.sq_last_status));
+ v_status = ice_iov_err_to_virt_err(status);
+ goto done;
+ }
+
+ vf->vlan_cnt += vlan_list->num_elements;
+
+done:
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ADD_VLAN,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_del_vlan_msg - Handle VIRTCHNL_OP_DEL_VLAN msg from VF
+ * @sc: PF's softc structure
+ * @vf: VF tracking structure
+ * @msg_buf: message buffer from VF
+ *
+ * Removes the VLANs in msg_buf from the VF's VLAN filter list.
+ */
+static void
+ice_vc_del_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct virtchnl_vlan_filter_list *vlan_list;
+ int status = 0;
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi = vf->vsi;
+
+ vlan_list = (struct virtchnl_vlan_filter_list *)msg_buf;
+
+ if (vlan_list->vsi_id != vsi->idx) {
+ device_printf(sc->dev,
+ "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n",
+ vf->vf_num, vsi->idx, vlan_list->vsi_id);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ status = ice_remove_vlan_hw_filters(vsi, vlan_list->vlan_id,
+ vlan_list->num_elements);
+ if (status) {
+ device_printf(sc->dev,
+ "VF-%d: Failure deleting VLANs from VSI %d, err %s aq_err %s\n",
+ vf->vf_num, vsi->idx, ice_status_str(status),
+ ice_aq_str(sc->hw.adminq.sq_last_status));
+ v_status = ice_iov_err_to_virt_err(status);
+ goto done;
+ }
+
+ if (vlan_list->num_elements >= vf->vlan_cnt)
+ vf->vlan_cnt = 0;
+ else
+ vf->vlan_cnt -= vlan_list->num_elements;
+
+done:
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DEL_VLAN,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_validate_ring_len - Check to see if a descriptor ring length is valid
+ * @ring_len: length of ring
+ *
+ * Check whether a ring size value is valid.
+ *
+ * @returns true if given ring size is valid
+ */
+static bool
+ice_vc_isvalid_ring_len(u16 ring_len)
+{
+ return (ring_len >= ICE_MIN_DESC_COUNT &&
+ ring_len <= ICE_MAX_DESC_COUNT &&
+ !(ring_len % ICE_DESC_COUNT_INCR));
+}
+
+/**
+ * ice_vc_cfg_vsi_qs_msg - Handle VIRTCHNL_OP_CONFIG_VSI_QUEUES msg from VF
+ * @sc: PF's softc structure
+ * @vf: VF tracking structure
+ * @msg_buf: message buffer from VF
+ */
+static void
+ice_vc_cfg_vsi_qs_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ device_t dev = sc->dev;
+ struct ice_hw *hw = &sc->hw;
+ struct virtchnl_vsi_queue_config_info *vqci;
+ struct virtchnl_queue_pair_info *vqpi;
+ enum virtchnl_status_code status = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi = vf->vsi;
+ struct ice_tx_queue *txq;
+ struct ice_rx_queue *rxq;
+ int i, error = 0;
+
+ vqci = (struct virtchnl_vsi_queue_config_info *)msg_buf;
+
+ if (vqci->num_queue_pairs > vf->vsi->num_tx_queues &&
+ vqci->num_queue_pairs > vf->vsi->num_rx_queues) {
+ status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ ice_vsi_disable_tx(vf->vsi);
+ ice_control_all_rx_queues(vf->vsi, false);
+
+ /*
+ * Clear TX and RX queues config in case VF
+ * requests different number of queues.
+ */
+ for (i = 0; i < vsi->num_tx_queues; i++) {
+ txq = &vsi->tx_queues[i];
+
+ txq->desc_count = 0;
+ txq->tx_paddr = 0;
+ txq->tc = 0;
+ }
+
+ for (i = 0; i < vsi->num_rx_queues; i++) {
+ rxq = &vsi->rx_queues[i];
+
+ rxq->desc_count = 0;
+ rxq->rx_paddr = 0;
+ }
+
+ vqpi = vqci->qpair;
+ for (i = 0; i < vqci->num_queue_pairs; i++, vqpi++) {
+ /* Initial parameter validation */
+ if (vqpi->txq.vsi_id != vf->vsi->idx ||
+ vqpi->rxq.vsi_id != vf->vsi->idx ||
+ vqpi->txq.queue_id != vqpi->rxq.queue_id ||
+ vqpi->txq.headwb_enabled ||
+ vqpi->rxq.splithdr_enabled ||
+ vqpi->rxq.crc_disable ||
+ !(ice_vc_isvalid_ring_len(vqpi->txq.ring_len)) ||
+ !(ice_vc_isvalid_ring_len(vqpi->rxq.ring_len))) {
+ status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ /* Copy parameters into VF's queue/VSI structs */
+ txq = &vsi->tx_queues[vqpi->txq.queue_id];
+
+ txq->desc_count = vqpi->txq.ring_len;
+ txq->tx_paddr = vqpi->txq.dma_ring_addr;
+ txq->q_handle = vqpi->txq.queue_id;
+ txq->tc = 0;
+
+ rxq = &vsi->rx_queues[vqpi->rxq.queue_id];
+
+ rxq->desc_count = vqpi->rxq.ring_len;
+ rxq->rx_paddr = vqpi->rxq.dma_ring_addr;
+ vsi->mbuf_sz = vqpi->rxq.databuffer_size;
+ }
+
+ /* Configure TX queues in HW */
+ error = ice_cfg_vsi_for_tx(vsi);
+ if (error) {
+ device_printf(dev,
+ "VF-%d: Unable to configure VSI for Tx: %s\n",
+ vf->vf_num, ice_err_str(error));
+ status = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ goto done;
+ }
+
+ /* Configure RX queues in HW */
+ error = ice_cfg_vsi_for_rx(vsi);
+ if (error) {
+ device_printf(dev,
+ "VF-%d: Unable to configure VSI for Rx: %s\n",
+ vf->vf_num, ice_err_str(error));
+ status = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ ice_vsi_disable_tx(vsi);
+ goto done;
+ }
+
+done:
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_cfg_rss_key_msg - Handle VIRTCHNL_OP_CONFIG_RSS_KEY msg from VF
+ * @sc: PF's softc structure
+ * @vf: VF tracking structure
+ * @msg_buf: message buffer from VF
+ *
+ * Sets the RSS key for the given VF, using the contents of msg_buf.
+ */
+static void
+ice_vc_cfg_rss_key_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ struct ice_aqc_get_set_rss_keys keydata =
+ { .standard_rss_key = {0}, .extended_hash_key = {0} };
+ struct ice_hw *hw = &sc->hw;
+ struct virtchnl_rss_key *vrk;
+ int status = 0;
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi = vf->vsi;
+
+ vrk = (struct virtchnl_rss_key *)msg_buf;
+
+ if (vrk->vsi_id != vsi->idx) {
+ device_printf(sc->dev,
+ "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n",
+ vf->vf_num, vsi->idx, vrk->vsi_id);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ if ((vrk->key_len >
+ (ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE +
+ ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE)) ||
+ vrk->key_len == 0) {
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ memcpy(&keydata, vrk->key, vrk->key_len);
+
+ status = ice_aq_set_rss_key(hw, vsi->idx, &keydata);
+ if (status) {
+ device_printf(sc->dev,
+ "ice_aq_set_rss_key status %s, error %s\n",
+ ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
+ v_status = ice_iov_err_to_virt_err(status);
+ goto done;
+ }
+
+done:
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_RSS_KEY,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_cfg_rss_lut_msg - Handle VIRTCHNL_OP_CONFIG_RSS_LUT msg from VF
+ * @sc: PF's softc structure
+ * @vf: VF tracking structure
+ * @msg_buf: message buffer from VF
+ *
+ * Adds the LUT from the VF in msg_buf to the PF via an admin queue call.
+ */
+static void
+ice_vc_cfg_rss_lut_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct virtchnl_rss_lut *vrl;
+ int status = 0;
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_aq_get_set_rss_lut_params lut_params = {};
+ struct ice_vsi *vsi = vf->vsi;
+
+ vrl = (struct virtchnl_rss_lut *)msg_buf;
+
+ if (vrl->vsi_id != vsi->idx) {
+ device_printf(sc->dev,
+ "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n",
+ vf->vf_num, vsi->idx, vrl->vsi_id);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ if (vrl->lut_entries > ICE_VSIQF_HLUT_ARRAY_SIZE) {
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ lut_params.vsi_handle = vsi->idx;
+ lut_params.lut_size = vsi->rss_table_size;
+ lut_params.lut_type = vsi->rss_lut_type;
+ lut_params.lut = vrl->lut;
+ lut_params.global_lut_id = 0;
+
+ status = ice_aq_set_rss_lut(hw, &lut_params);
+ if (status) {
+ device_printf(sc->dev,
+ "VF-%d: Cannot set RSS lut, err %s aq_err %s\n",
+ vf->vf_num, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ v_status = ice_iov_err_to_virt_err(status);
+ }
+
+done:
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_RSS_LUT,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_set_rss_hena_msg - Handle VIRTCHNL_OP_SET_RSS_HENA msg from VF
+ * @sc: PF's softc structure
+ * @vf: VF tracking structure
+ * @msg_buf: message buffer from VF
+ *
+ * Adds the VF's hena (hash enable) bits as flow types to the PF's RSS flow
+ * type list.
+ */
+static void
+ice_vc_set_rss_hena_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct virtchnl_rss_hena *vrh;
+ int status = 0;
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi = vf->vsi;
+
+ MPASS(vsi != NULL);
+
+ vrh = (struct virtchnl_rss_hena *)msg_buf;
+
+ /*
+ * Remove existing configuration to make sure only requested
+ * config is applied and allow VFs to disable RSS completly.
+ */
+ status = ice_rem_vsi_rss_cfg(hw, vsi->idx);
+ if (vrh->hena) {
+ /*
+ * Problem with removing config is not fatal, when new one
+ * is requested. Warn about it but try to apply new config
+ * anyway.
+ */
+ if (status)
+ device_printf(sc->dev,
+ "ice_rem_vsi_rss_cfg status %s, error %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ status = ice_add_avf_rss_cfg(hw, vsi->idx, vrh->hena);
+ if (status)
+ device_printf(sc->dev,
+ "ice_add_avf_rss_cfg status %s, error %s\n",
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ }
+ v_status = ice_iov_err_to_virt_err(status);
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_SET_RSS_HENA,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_enable_queues_msg - Handle VIRTCHNL_OP_ENABLE_QUEUES msg from VF
+ * @sc: PF's softc structure
+ * @vf: VF tracking structure
+ * @msg_buf: message buffer from VF
+ *
+ * Enables VF queues selected in msg_buf for Tx/Rx traffic.
+ *
+ * @remark Only actually operates on Rx queues; Tx queues are enabled in
+ * CONFIG_VSI_QUEUES message handler.
+ */
+static void
+ice_vc_enable_queues_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct virtchnl_queue_select *vqs;
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi = vf->vsi;
+ int bit, error = 0;
+
+ vqs = (struct virtchnl_queue_select *)msg_buf;
+
+ if (vqs->vsi_id != vsi->idx) {
+ device_printf(sc->dev,
+ "%s: VF-%d: Message has invalid VSI ID (expected %d, got %d)\n",
+ __func__, vf->vf_num, vsi->idx, vqs->vsi_id);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ if (!vqs->rx_queues && !vqs->tx_queues) {
+ device_printf(sc->dev,
+ "%s: VF-%d: message queue masks are empty\n",
+ __func__, vf->vf_num);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ /* Validate rx_queue mask */
+ bit = fls(vqs->rx_queues);
+ if (bit > vsi->num_rx_queues) {
+ device_printf(sc->dev,
+ "%s: VF-%d: message's rx_queues map (0x%08x) has invalid bit set (%d)\n",
+ __func__, vf->vf_num, vqs->rx_queues, bit);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ /* Tx ring enable is handled in an earlier message. */
+ for_each_set_bit(bit, &vqs->rx_queues, 32) {
+ error = ice_control_rx_queue(vsi, bit, true);
+ if (error) {
+ device_printf(sc->dev,
+ "Unable to enable Rx ring %d for receive: %s\n",
+ bit, ice_err_str(error));
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+ }
+
+done:
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ENABLE_QUEUES,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_disable_queues_msg - Handle VIRTCHNL_OP_DISABLE_QUEUES msg
+ * @sc: PF's softc structure
+ * @vf: VF tracking structure
+ * @msg_buf: message buffer from VF
+ *
+ * Disables all VF queues for the VF's VSI.
+ *
+ * @remark Unlike the ENABLE_QUEUES handler, this operates on both
+ * Tx and Rx queues
+ */
+static void
+ice_vc_disable_queues_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf __unused)
+{
+ struct ice_hw *hw = &sc->hw;
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi = vf->vsi;
+ int error = 0;
+
+ error = ice_control_all_rx_queues(vsi, false);
+ if (error) {
+ device_printf(sc->dev,
+ "Unable to disable Rx rings for transmit: %s\n",
+ ice_err_str(error));
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ error = ice_vsi_disable_tx(vsi);
+ if (error) {
+ /* Already prints an error message */
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ }
+
+done:
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DISABLE_QUEUES,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_cfg_irq_map_msg - Handle VIRTCHNL_OP_CFG_IRQ_MAP msg from VF
+ * @sc: PF's softc structure
+ * @vf: VF tracking structure
+ * @msg_buf: message buffer from VF
+ *
+ * Configures the interrupt vectors described in the message in msg_buf. The
+ * VF needs to send this message during init, so that queues can be allowed
+ * to generate interrupts.
+ */
+static void
+ice_vc_cfg_irq_map_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+#define ICE_VIRTCHNL_QUEUE_MAP_SIZE 16
+ struct ice_hw *hw = &sc->hw;
+ struct virtchnl_irq_map_info *vimi;
+ struct virtchnl_vector_map *vvm;
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi = vf->vsi;
+ u16 vector;
+
+ vimi = (struct virtchnl_irq_map_info *)msg_buf;
+
+ if (vimi->num_vectors > vf->num_irq_vectors) {
+ device_printf(sc->dev,
+ "%s: VF-%d: message has more vectors (%d) than configured for VF (%d)\n",
+ __func__, vf->vf_num, vimi->num_vectors, vf->num_irq_vectors);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ vvm = vimi->vecmap;
+ /* Save off information from message */
+ for (int i = 0; i < vimi->num_vectors; i++, vvm++) {
+ struct ice_tx_queue *txq;
+ struct ice_rx_queue *rxq;
+ int bit;
+
+ if (vvm->vsi_id != vf->vsi->idx) {
+ device_printf(sc->dev,
+ "%s: VF-%d: message's VSI ID (%d) does not match VF's (%d) for vector %d\n",
+ __func__, vf->vf_num, vvm->vsi_id, vf->vsi->idx, i);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ /* vvm->vector_id is relative to VF space */
+ vector = vvm->vector_id;
+
+ if (vector >= vf->num_irq_vectors) {
+ device_printf(sc->dev,
+ "%s: VF-%d: message's vector ID (%d) is greater than VF's max ID (%d)\n",
+ __func__, vf->vf_num, vector, vf->num_irq_vectors - 1);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ /* The Misc/Admin Queue vector doesn't need mapping */
+ if (vector == 0)
+ continue;
+
+ /* coverity[address_of] */
+ for_each_set_bit(bit, &vvm->txq_map, ICE_VIRTCHNL_QUEUE_MAP_SIZE) {
+ if (bit >= vsi->num_tx_queues) {
+ device_printf(sc->dev,
+ "%s: VF-%d: txq map has invalid bit set\n",
+ __func__, vf->vf_num);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ vf->tx_irqvs[vector].me = vector;
+
+ txq = &vsi->tx_queues[bit];
+ txq->irqv = &vf->tx_irqvs[vector];
+ txq->itr_idx = vvm->txitr_idx;
+ }
+ /* coverity[address_of] */
+ for_each_set_bit(bit, &vvm->rxq_map, ICE_VIRTCHNL_QUEUE_MAP_SIZE) {
+ if (bit >= vsi->num_rx_queues) {
+ device_printf(sc->dev,
+ "%s: VF-%d: rxq map has invalid bit set\n",
+ __func__, vf->vf_num);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+ vf->rx_irqvs[vector].me = vector;
+
+ rxq = &vsi->rx_queues[bit];
+ rxq->irqv = &vf->rx_irqvs[vector];
+ rxq->itr_idx = vvm->rxitr_idx;
+ }
+ }
+
+ /* Write to T/RQCTL registers to actually map vectors to queues */
+ for (int i = 0; i < vf->vsi->num_rx_queues; i++)
+ if (vsi->rx_queues[i].irqv != NULL)
+ ice_configure_rxq_interrupt(hw, vsi->rx_qmap[i],
+ vsi->rx_queues[i].irqv->me, vsi->rx_queues[i].itr_idx);
+
+ for (int i = 0; i < vf->vsi->num_tx_queues; i++)
+ if (vsi->tx_queues[i].irqv != NULL)
+ ice_configure_txq_interrupt(hw, vsi->tx_qmap[i],
+ vsi->tx_queues[i].irqv->me, vsi->tx_queues[i].itr_idx);
+
+ ice_flush(hw);
+
+done:
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_eth_stats_to_virtchnl_eth_stats - Convert stats for virtchnl
+ * @istats: VSI stats from HW to convert
+ * @vstats: stats struct to copy to
+ *
+ * This function copies all known stats in struct virtchnl_eth_stats from the
+ * input struct ice_eth_stats to an output struct virtchnl_eth_stats.
+ *
+ * @remark These two structure types currently have the same definition up to
+ * the size of struct virtchnl_eth_stats (on FreeBSD), but that could change
+ * in the future.
+ */
+static void
+ice_eth_stats_to_virtchnl_eth_stats(struct ice_eth_stats *istats,
+ struct virtchnl_eth_stats *vstats)
+{
+ vstats->rx_bytes = istats->rx_bytes;
+ vstats->rx_unicast = istats->rx_unicast;
+ vstats->rx_multicast = istats->rx_multicast;
+ vstats->rx_broadcast = istats->rx_broadcast;
+ vstats->rx_discards = istats->rx_discards;
+ vstats->rx_unknown_protocol = istats->rx_unknown_protocol;
+ vstats->tx_bytes = istats->tx_bytes;
+ vstats->tx_unicast = istats->tx_unicast;
+ vstats->tx_multicast = istats->tx_multicast;
+ vstats->tx_broadcast = istats->tx_broadcast;
+ vstats->tx_discards = istats->tx_discards;
+ vstats->tx_errors = istats->tx_errors;
+}
+
+/**
+ * ice_vc_get_stats_msg - Handle VIRTCHNL_OP_GET_STATS msg
+ * @sc: device private structure
+ * @vf: VF tracking structure
+ * @msg_buf: raw message buffer from the VF
+ *
+ * Updates the VF's VSI stats and sends those stats back to the VF.
+ */
+static void
+ice_vc_get_stats_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ struct virtchnl_queue_select *vqs;
+ struct virtchnl_eth_stats stats;
+ struct ice_vsi *vsi = vf->vsi;
+ struct ice_hw *hw = &sc->hw;
+
+ vqs = (struct virtchnl_queue_select *)msg_buf;
+
+ if (vqs->vsi_id != vsi->idx) {
+ device_printf(sc->dev,
+ "%s: VF-%d: message has invalid VSI ID %d (VF has VSI ID %d)\n",
+ __func__, vf->vf_num, vqs->vsi_id, vsi->idx);
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_STATS,
+ VIRTCHNL_STATUS_ERR_PARAM, NULL, 0, NULL);
+ }
+
+ ice_update_vsi_hw_stats(vf->vsi);
+ ice_eth_stats_to_virtchnl_eth_stats(&vsi->hw_stats.cur, &stats);
+
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_STATS,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&stats,
+ sizeof(struct virtchnl_eth_stats), NULL);
+}
+
+/**
+ * ice_vc_cfg_promisc_mode_msg - Handle VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE
+ * @sc: PF's softc structure
+ * @vf: VF tracking structure
+ * @msg_buf: message buffer from VF
+ *
+ * Configures the promiscuous modes for the given VSI in msg_buf.
+ */
+static void
+ice_vc_cfg_promisc_mode_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct virtchnl_promisc_info *vpi;
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ int status = 0;
+ struct ice_vsi *vsi = vf->vsi;
+ ice_declare_bitmap(old_promisc_mask, ICE_PROMISC_MAX);
+ ice_declare_bitmap(req_promisc_mask, ICE_PROMISC_MAX);
+ ice_declare_bitmap(clear_promisc_mask, ICE_PROMISC_MAX);
+ ice_declare_bitmap(set_promisc_mask, ICE_PROMISC_MAX);
+ ice_declare_bitmap(old_req_xor_mask, ICE_PROMISC_MAX);
+ u16 vid;
+
+ vpi = (struct virtchnl_promisc_info *)msg_buf;
+
+ /* Check to see if VF has permission to configure promiscuous mode */
+ if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
+ device_printf(sc->dev,
+ "VF-%d: attempted to configure promiscuous mode\n",
+ vf->vf_num);
+ /* Don't reply to VF with an error */
+ goto done;
+ }
+
+ if (vpi->vsi_id != vsi->idx) {
+ device_printf(sc->dev,
+ "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n",
+ vf->vf_num, vsi->idx, vpi->vsi_id);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ if (vpi->flags & ~ICE_VIRTCHNL_VALID_PROMISC_FLAGS) {
+ device_printf(sc->dev,
+ "VF-%d: Message has invalid promiscuous flags set (valid 0x%02x, got 0x%02x)\n",
+ vf->vf_num, ICE_VIRTCHNL_VALID_PROMISC_FLAGS,
+ vpi->flags);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+
+ }
+
+ ice_zero_bitmap(req_promisc_mask, ICE_PROMISC_MAX);
+ /* Convert virtchnl flags to ice AQ promiscuous mode flags */
+ if (vpi->flags & FLAG_VF_UNICAST_PROMISC) {
+ ice_set_bit(ICE_PROMISC_UCAST_TX, req_promisc_mask);
+ ice_set_bit(ICE_PROMISC_UCAST_RX, req_promisc_mask);
+ }
+ if (vpi->flags & FLAG_VF_MULTICAST_PROMISC) {
+ ice_set_bit(ICE_PROMISC_MCAST_TX, req_promisc_mask);
+ ice_set_bit(ICE_PROMISC_MCAST_RX, req_promisc_mask);
+ }
+
+ status = ice_get_vsi_promisc(hw, vsi->idx, old_promisc_mask, &vid);
+ if (status) {
+ device_printf(sc->dev,
+ "VF-%d: Failed to get promiscuous mode mask for VSI %d, err %s aq_err %s\n",
+ vf->vf_num, vsi->idx,
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ v_status = ice_iov_err_to_virt_err(status);
+ goto done;
+ }
+
+ /* Figure out what got added and what got removed */
+ ice_zero_bitmap(old_req_xor_mask, ICE_PROMISC_MAX);
+ ice_xor_bitmap(old_req_xor_mask, old_promisc_mask, req_promisc_mask, ICE_PROMISC_MAX);
+ ice_and_bitmap(clear_promisc_mask, old_req_xor_mask, old_promisc_mask, ICE_PROMISC_MAX);
+ ice_and_bitmap(set_promisc_mask, old_req_xor_mask, req_promisc_mask, ICE_PROMISC_MAX);
+
+ if (ice_is_any_bit_set(clear_promisc_mask, ICE_PROMISC_MAX)) {
+ status = ice_clear_vsi_promisc(hw, vsi->idx,
+ clear_promisc_mask, 0);
+ if (status) {
+ device_printf(sc->dev,
+ "VF-%d: Failed to clear promiscuous mode for VSI %d, err %s aq_err %s\n",
+ vf->vf_num, vsi->idx,
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ v_status = ice_iov_err_to_virt_err(status);
+ goto done;
+ }
+ }
+
+ if (ice_is_any_bit_set(set_promisc_mask, ICE_PROMISC_MAX)) {
+ status = ice_set_vsi_promisc(hw, vsi->idx, set_promisc_mask, 0);
+ if (status) {
+ device_printf(sc->dev,
+ "VF-%d: Failed to set promiscuous mode for VSI %d, err %s aq_err %s\n",
+ vf->vf_num, vsi->idx,
+ ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ v_status = ice_iov_err_to_virt_err(status);
+ goto done;
+ }
+ }
+
+done:
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_notify_all_vfs_link_state - Notify all VFs of PF link state
+ * @sc: device private structure
+ *
+ * Sends a message to all VFs about the status of the PF's link
+ * state. For more details, @see ice_vc_notify_vf_link_state.
+ */
+void
+ice_vc_notify_all_vfs_link_state(struct ice_softc *sc)
+{
+ for (int i = 0; i < sc->num_vfs; i++)
+ ice_vc_notify_vf_link_state(sc, &sc->vfs[i]);
+}
+
+/**
+ * ice_vc_notify_vf_link_state - Notify VF of PF link state
+ * @sc: device private structure
+ * @vf: VF tracking structure
+ *
+ * Sends an event message to the specified VF with information about
+ * the current link state from the PF's port. This includes whether
+ * link is up or down, and the link speed in 100Mbps units.
+ */
+static void
+ice_vc_notify_vf_link_state(struct ice_softc *sc, struct ice_vf *vf)
+{
+ struct virtchnl_pf_event event = {};
+ struct ice_hw *hw = &sc->hw;
+
+ event.event = VIRTCHNL_EVENT_LINK_CHANGE;
+ event.severity = PF_EVENT_SEVERITY_INFO;
+ event.event_data.link_event_adv.link_status = sc->link_up;
+ event.event_data.link_event_adv.link_speed =
+ (u32)ice_conv_link_speed_to_virtchnl(true,
+ hw->port_info->phy.link_info.link_speed);
+
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_EVENT,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&event, sizeof(event), NULL);
+}
+
+/**
+ * ice_vc_handle_vf_msg - Handle a message from a VF
+ * @sc: device private structure
+ * @event: event received from the HW MBX queue
+ *
+ * Called whenever an event is received from a VF on the HW mailbox queue.
+ * Responsible for handling these messages as well as responding to the
+ * VF afterwards, depending on the received message type.
+ */
+void
+ice_vc_handle_vf_msg(struct ice_softc *sc, struct ice_rq_event_info *event)
+{
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ struct ice_vf *vf;
+ int err = 0;
+
+ u32 v_opcode = event->desc.cookie_high;
+ u16 v_id = event->desc.retval;
+ u8 *msg = event->msg_buf;
+ u16 msglen = event->msg_len;
+
+ if (v_id >= sc->num_vfs) {
+ device_printf(dev, "%s: Received msg from invalid VF-%d: opcode %d, len %d\n",
+ __func__, v_id, v_opcode, msglen);
+ return;
+ }
+
+ vf = &sc->vfs[v_id];
+
+ /* Perform basic checks on the msg */
+ err = virtchnl_vc_validate_vf_msg(&vf->version, v_opcode, msg, msglen);
+ if (err) {
+ device_printf(dev, "%s: Received invalid msg from VF-%d: opcode %d, len %d, error %d\n",
+ __func__, vf->vf_num, v_opcode, msglen, err);
+ ice_aq_send_msg_to_vf(hw, v_id, v_opcode, VIRTCHNL_STATUS_ERR_PARAM, NULL, 0, NULL);
+ return;
+ }
+
+ switch (v_opcode) {
+ case VIRTCHNL_OP_VERSION:
+ ice_vc_version_msg(sc, vf, msg);
+ break;
+ case VIRTCHNL_OP_RESET_VF:
+ ice_reset_vf(sc, vf, true);
+ break;
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ ice_vc_get_vf_res_msg(sc, vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_ETH_ADDR:
+ ice_vc_add_eth_addr_msg(sc, vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_ETH_ADDR:
+ ice_vc_del_eth_addr_msg(sc, vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_VLAN:
+ ice_vc_add_vlan_msg(sc, vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_VLAN:
+ ice_vc_del_vlan_msg(sc, vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ ice_vc_cfg_vsi_qs_msg(sc, vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_KEY:
+ ice_vc_cfg_rss_key_msg(sc, vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_RSS_LUT:
+ ice_vc_cfg_rss_lut_msg(sc, vf, msg);
+ break;
+ case VIRTCHNL_OP_SET_RSS_HENA:
+ ice_vc_set_rss_hena_msg(sc, vf, msg);
+ break;
+ case VIRTCHNL_OP_ENABLE_QUEUES:
+ ice_vc_enable_queues_msg(sc, vf, msg);
+ ice_vc_notify_vf_link_state(sc, vf);
+ break;
+ case VIRTCHNL_OP_DISABLE_QUEUES:
+ ice_vc_disable_queues_msg(sc, vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ ice_vc_cfg_irq_map_msg(sc, vf, msg);
+ break;
+ case VIRTCHNL_OP_GET_STATS:
+ ice_vc_get_stats_msg(sc, vf, msg);
+ break;
+ case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ ice_vc_cfg_promisc_mode_msg(sc, vf, msg);
+ break;
+ default:
+ device_printf(dev, "%s: Received unknown msg from VF-%d: opcode %d, len %d\n",
+ __func__, vf->vf_num, v_opcode, msglen);
+ ice_aq_send_msg_to_vf(hw, v_id, v_opcode,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED, NULL, 0, NULL);
+ break;
+ }
+}
+
+/**
+ * ice_iov_setup_intr_mapping - Setup interrupt config for a VF
+ * @sc: device softc structure
+ * @vf: driver's VF structure for VF to be configured
+ *
+ * Before a VF can be used, and after a VF reset, the PF must configure
+ * the VF's interrupt allocation registers. This includes allocating
+ * interrupts from the PF's interrupt pool to the VF using the
+ * VPINT_ALLOC(_PCI) registers, and setting up a mapping from PF vectors
+ * to VF vectors in GLINT_VECT2FUNC.
+ *
+ * As well, this sets up queue allocation registers and maps the mailbox
+ * interrupt for the VF.
+ */
+static void
+ice_iov_setup_intr_mapping(struct ice_softc *sc, struct ice_vf *vf)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct ice_vsi *vsi = vf->vsi;
+ u16 v;
+
+ /* Calculate indices for register ops below */
+ u16 vf_first_irq_idx = vf->vf_imap[0];
+ u16 vf_last_irq_idx = (vf_first_irq_idx + vf->num_irq_vectors) - 1;
+ u16 abs_vf_first_irq_idx = hw->func_caps.common_cap.msix_vector_first_id +
+ vf_first_irq_idx;
+ u16 abs_vf_last_irq_idx = (abs_vf_first_irq_idx + vf->num_irq_vectors) - 1;
+ u16 abs_vf_num = vf->vf_num + hw->func_caps.vf_base_id;
+
+ /* Map out VF interrupt allocation in global device space. Both
+ * VPINT_ALLOC and VPINT_ALLOC_PCI use the same values.
+ */
+ wr32(hw, VPINT_ALLOC(vf->vf_num),
+ (((abs_vf_first_irq_idx << VPINT_ALLOC_FIRST_S) & VPINT_ALLOC_FIRST_M) |
+ ((abs_vf_last_irq_idx << VPINT_ALLOC_LAST_S) & VPINT_ALLOC_LAST_M) |
+ VPINT_ALLOC_VALID_M));
+ wr32(hw, VPINT_ALLOC_PCI(vf->vf_num),
+ (((abs_vf_first_irq_idx << VPINT_ALLOC_PCI_FIRST_S) & VPINT_ALLOC_PCI_FIRST_M) |
+ ((abs_vf_last_irq_idx << VPINT_ALLOC_PCI_LAST_S) & VPINT_ALLOC_PCI_LAST_M) |
+ VPINT_ALLOC_PCI_VALID_M));
+
+ /* Create inverse mapping of vectors to PF/VF combinations */
+ for (v = vf_first_irq_idx; v <= vf_last_irq_idx; v++)
+ {
+ wr32(hw, GLINT_VECT2FUNC(v),
+ (((abs_vf_num << GLINT_VECT2FUNC_VF_NUM_S) & GLINT_VECT2FUNC_VF_NUM_M) |
+ ((hw->pf_id << GLINT_VECT2FUNC_PF_NUM_S) & GLINT_VECT2FUNC_PF_NUM_M)));
+ }
+
+ /* Map mailbox interrupt to MSI-X index 0. Disable ITR for it, too. */
+ wr32(hw, VPINT_MBX_CTL(abs_vf_num),
+ ((0 << VPINT_MBX_CTL_MSIX_INDX_S) & VPINT_MBX_CTL_MSIX_INDX_M) |
+ ((0x3 << VPINT_MBX_CTL_ITR_INDX_S) & VPINT_MBX_CTL_ITR_INDX_M) |
+ VPINT_MBX_CTL_CAUSE_ENA_M);
+
+ /* Mark the TX queue mapping registers as valid */
+ wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_num), VPLAN_TXQ_MAPENA_TX_ENA_M);
+
+ /* Indicate to HW that VF has scattered queue allocation */
+ wr32(hw, VPLAN_TX_QBASE(vf->vf_num), VPLAN_TX_QBASE_VFQTABLE_ENA_M);
+ for (int i = 0; i < vsi->num_tx_queues; i++) {
+ wr32(hw, VPLAN_TX_QTABLE(i, vf->vf_num),
+ (vsi->tx_qmap[i] << VPLAN_TX_QTABLE_QINDEX_S) & VPLAN_TX_QTABLE_QINDEX_M);
+ }
+
+ /* Mark the RX queue mapping registers as valid */
+ wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_num), VPLAN_RXQ_MAPENA_RX_ENA_M);
+ wr32(hw, VPLAN_RX_QBASE(vf->vf_num), VPLAN_RX_QBASE_VFQTABLE_ENA_M);
+ for (int i = 0; i < vsi->num_rx_queues; i++) {
+ wr32(hw, VPLAN_RX_QTABLE(i, vf->vf_num),
+ (vsi->rx_qmap[i] << VPLAN_RX_QTABLE_QINDEX_S) & VPLAN_RX_QTABLE_QINDEX_M);
+ }
+}
+
+/**
+ * ice_err_to_virt err - translate ice errors into virtchnl errors
+ * @ice_err: status returned from ice function
+ */
+static enum virtchnl_status_code
+ice_iov_err_to_virt_err(int ice_err)
+{
+ switch (ice_err) {
+ case 0:
+ return VIRTCHNL_STATUS_SUCCESS;
+ case ICE_ERR_BAD_PTR:
+ case ICE_ERR_INVAL_SIZE:
+ case ICE_ERR_DEVICE_NOT_SUPPORTED:
+ case ICE_ERR_PARAM:
+ case ICE_ERR_CFG:
+ return VIRTCHNL_STATUS_ERR_PARAM;
+ case ICE_ERR_NO_MEMORY:
+ return VIRTCHNL_STATUS_ERR_NO_MEMORY;
+ case ICE_ERR_NOT_READY:
+ case ICE_ERR_RESET_FAILED:
+ case ICE_ERR_FW_API_VER:
+ case ICE_ERR_AQ_ERROR:
+ case ICE_ERR_AQ_TIMEOUT:
+ case ICE_ERR_AQ_FULL:
+ case ICE_ERR_AQ_NO_WORK:
+ case ICE_ERR_AQ_EMPTY:
+ return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
+ default:
+ return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ }
+}
diff --git a/sys/dev/ice/ice_iov.h b/sys/dev/ice/ice_iov.h
new file mode 100644
index 000000000000..c4fb3e932e3f
--- /dev/null
+++ b/sys/dev/ice/ice_iov.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2025, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file ice_iov.h
+ * @brief header for IOV functionality
+ *
+ * This header includes definitions used to implement device Virtual Functions
+ * for the ice driver.
+ */
+
+#ifndef _ICE_IOV_H_
+#define _ICE_IOV_H_
+
+#include <sys/types.h>
+#include <sys/bus.h>
+#include <sys/nv.h>
+#include <sys/iov_schema.h>
+#include <sys/param.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcireg.h>
+
+#include <dev/pci/pci_iov.h>
+
+#include "ice_iflib.h"
+#include "ice_vf_mbx.h"
+
+/**
+ * @enum ice_vf_flags
+ * @brief VF state flags
+ *
+ * Used to indicate the status of a PF's VF, as well as indicating what each VF
+ * is capabile of. Intended to be modified only using atomic operations, so
+ * they can be read and modified in places that aren't locked.
+ *
+ * Used in struct ice_vf's vf_flags field.
+ */
+enum ice_vf_flags {
+ VF_FLAG_ENABLED = BIT(0),
+ VF_FLAG_SET_MAC_CAP = BIT(1),
+ VF_FLAG_VLAN_CAP = BIT(2),
+ VF_FLAG_PROMISC_CAP = BIT(3),
+ VF_FLAG_MAC_ANTI_SPOOF = BIT(4),
+};
+
+/**
+ * @struct ice_vf
+ * @brief PF's VF software context
+ *
+ * Represents the state and options for a VF spawned from a PF.
+ */
+struct ice_vf {
+ struct ice_vsi *vsi;
+ u32 vf_flags;
+
+ u8 mac[ETHER_ADDR_LEN];
+ u16 vf_num;
+ struct virtchnl_version_info version;
+
+ u16 mac_filter_limit;
+ u16 mac_filter_cnt;
+ u16 vlan_limit;
+ u16 vlan_cnt;
+
+ u16 num_irq_vectors;
+ u16 *vf_imap;
+ struct ice_irq_vector *tx_irqvs;
+ struct ice_irq_vector *rx_irqvs;
+};
+
+#define ICE_PCIE_DEV_STATUS 0xAA
+
+#define ICE_PCI_CIAD_WAIT_COUNT 100
+#define ICE_PCI_CIAD_WAIT_DELAY_US 1
+#define ICE_VPGEN_VFRSTAT_WAIT_COUNT 100
+#define ICE_VPGEN_VFRSTAT_WAIT_DELAY_US 20
+
+#define ICE_VIRTCHNL_VALID_PROMISC_FLAGS (FLAG_VF_UNICAST_PROMISC | \
+ FLAG_VF_MULTICAST_PROMISC)
+
+#define ICE_DEFAULT_VF_VLAN_LIMIT 64
+#define ICE_DEFAULT_VF_FILTER_LIMIT 16
+
+int ice_iov_attach(struct ice_softc *sc);
+int ice_iov_detach(struct ice_softc *sc);
+
+int ice_iov_init(struct ice_softc *sc, uint16_t num_vfs, const nvlist_t *params);
+int ice_iov_add_vf(struct ice_softc *sc, uint16_t vfnum, const nvlist_t *params);
+void ice_iov_uninit(struct ice_softc *sc);
+
+void ice_iov_handle_vflr(struct ice_softc *sc);
+
+void ice_vc_handle_vf_msg(struct ice_softc *sc, struct ice_rq_event_info *event);
+void ice_vc_notify_all_vfs_link_state(struct ice_softc *sc);
+
+#endif /* _ICE_IOV_H_ */
+
diff --git a/sys/dev/ice/ice_lan_tx_rx.h b/sys/dev/ice/ice_lan_tx_rx.h
index 860958bffbaf..eedacdab0216 100644
--- a/sys/dev/ice/ice_lan_tx_rx.h
+++ b/sys/dev/ice/ice_lan_tx_rx.h
@@ -630,7 +630,7 @@ enum ice_rxdid {
ICE_RXDID_LAST = 63,
};
-/* Recceive Flex descriptor Dword Index */
+/* Receive Flex descriptor Dword Index */
enum ice_flex_word {
ICE_RX_FLEX_DWORD_0 = 0,
ICE_RX_FLEX_DWORD_1,
@@ -948,10 +948,9 @@ struct ice_tx_ctx_desc {
__le64 qw1;
};
-#define ICE_TX_GCS_DESC_START 0 /* 7 BITS */
-#define ICE_TX_GCS_DESC_OFFSET 7 /* 4 BITS */
-#define ICE_TX_GCS_DESC_TYPE 11 /* 2 BITS */
-#define ICE_TX_GCS_DESC_ENA 13 /* 1 BIT */
+#define ICE_TX_GCS_DESC_START 0 /* 8 BITS */
+#define ICE_TX_GCS_DESC_OFFSET 8 /* 4 BITS */
+#define ICE_TX_GCS_DESC_TYPE 12 /* 3 BITS */
#define ICE_TXD_CTX_QW1_DTYPE_S 0
#define ICE_TXD_CTX_QW1_DTYPE_M (0xFUL << ICE_TXD_CTX_QW1_DTYPE_S)
@@ -2375,4 +2374,5 @@ static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
#define ICE_LINK_SPEED_40000MBPS 40000
#define ICE_LINK_SPEED_50000MBPS 50000
#define ICE_LINK_SPEED_100000MBPS 100000
+#define ICE_LINK_SPEED_200000MBPS 200000
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/sys/dev/ice/ice_lib.c b/sys/dev/ice/ice_lib.c
index 659412450fce..8b6349f686eb 100644
--- a/sys/dev/ice/ice_lib.c
+++ b/sys/dev/ice/ice_lib.c
@@ -42,6 +42,9 @@
#include "ice_lib.h"
#include "ice_iflib.h"
+#ifdef PCI_IOV
+#include "ice_iov.h"
+#endif
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <machine/resource.h>
@@ -79,6 +82,7 @@ static void ice_process_link_event(struct ice_softc *sc, struct ice_rq_event_inf
static void ice_process_ctrlq_event(struct ice_softc *sc, const char *qname,
struct ice_rq_event_info *event);
static void ice_nvm_version_str(struct ice_hw *hw, struct sbuf *buf);
+static void ice_update_port_oversize(struct ice_softc *sc, u64 rx_errors);
static void ice_active_pkg_version_str(struct ice_hw *hw, struct sbuf *buf);
static void ice_os_pkg_version_str(struct ice_hw *hw, struct sbuf *buf);
static bool ice_filter_is_mcast(struct ice_vsi *vsi, struct ice_fltr_info *info);
@@ -181,7 +185,22 @@ static bool ice_dscp_is_mapped(struct ice_dcbx_cfg *dcbcfg);
static void ice_start_dcbx_agent(struct ice_softc *sc);
static u16 ice_fw_debug_dump_print_cluster(struct ice_softc *sc,
struct sbuf *sbuf, u16 cluster_id);
+static void ice_fw_debug_dump_print_clusters(struct ice_softc *sc,
+ struct sbuf *sbuf);
static void ice_remove_vsi_mirroring(struct ice_vsi *vsi);
+static int ice_get_tx_rx_equalizations(struct ice_hw *hw, u8 serdes_num,
+ struct ice_serdes_equalization *ptr);
+static int ice_fec_counter_read(struct ice_hw *hw, u32 receiver_id,
+ u32 reg_offset, u16 *output);
+static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ struct ice_fec_stats_to_sysctl *fec_stats);
+static bool ice_is_serdes_muxed(struct ice_hw *hw);
+static int ice_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed);
+static int ice_update_port_topology(u8 lport,
+ struct ice_port_topology *port_topology,
+ bool is_muxed);
+static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
+ struct ice_port_topology *port_topology);
static int ice_module_init(void);
static int ice_module_exit(void);
@@ -231,6 +250,7 @@ static int ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_rx_errors_stat(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_dump_dcbx_cfg(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_dump_vsi_cfg(SYSCTL_HANDLER_ARGS);
+static int ice_sysctl_dump_phy_stats(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_ets_min_rate(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_up2tc_map(SYSCTL_HANDLER_ARGS);
static int ice_sysctl_pfc_config(SYSCTL_HANDLER_ARGS);
@@ -313,6 +333,10 @@ ice_set_ctrlq_len(struct ice_hw *hw)
hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN;
+ hw->sbq.num_rq_entries = ICE_SBQ_LEN;
+ hw->sbq.num_sq_entries = ICE_SBQ_LEN;
+ hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN;
+ hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN;
}
/**
@@ -426,31 +450,21 @@ ice_setup_pf_vsi(struct ice_softc *sc)
* all queues for this VSI are not yet assigned an index and thus,
* not ready for use.
*
- * Returns an error code on failure.
*/
-int
+void
ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues,
const int max_rx_queues)
{
- struct ice_softc *sc = vsi->sc;
int i;
MPASS(max_tx_queues > 0);
MPASS(max_rx_queues > 0);
/* Allocate Tx queue mapping memory */
- if (!(vsi->tx_qmap =
- (u16 *) malloc(sizeof(u16) * max_tx_queues, M_ICE, M_WAITOK))) {
- device_printf(sc->dev, "Unable to allocate Tx qmap memory\n");
- return (ENOMEM);
- }
+ vsi->tx_qmap = malloc(sizeof(u16) * max_tx_queues, M_ICE, M_WAITOK);
/* Allocate Rx queue mapping memory */
- if (!(vsi->rx_qmap =
- (u16 *) malloc(sizeof(u16) * max_rx_queues, M_ICE, M_WAITOK))) {
- device_printf(sc->dev, "Unable to allocate Rx qmap memory\n");
- goto free_tx_qmap;
- }
+ vsi->rx_qmap = malloc(sizeof(u16) * max_rx_queues, M_ICE, M_WAITOK);
/* Mark every queue map as invalid to start with */
for (i = 0; i < max_tx_queues; i++) {
@@ -459,14 +473,6 @@ ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues,
for (i = 0; i < max_rx_queues; i++) {
vsi->rx_qmap[i] = ICE_INVALID_RES_IDX;
}
-
- return 0;
-
-free_tx_qmap:
- free(vsi->tx_qmap, M_ICE);
- vsi->tx_qmap = NULL;
-
- return (ENOMEM);
}
/**
@@ -646,7 +652,7 @@ ice_setup_vsi_mirroring(struct ice_vsi *vsi)
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
u16 rule_id, dest_vsi;
u16 count = 1;
@@ -692,7 +698,7 @@ static void
ice_remove_vsi_mirroring(struct ice_vsi *vsi)
{
struct ice_hw *hw = &vsi->sc->hw;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
bool keep_alloc = false;
if (vsi->rule_mir_ingress != ICE_INVAL_MIRROR_RULE_ID)
@@ -702,7 +708,7 @@ ice_remove_vsi_mirroring(struct ice_vsi *vsi)
device_printf(vsi->sc->dev, "Could not remove mirror VSI ingress rule, err %s aq_err %s\n",
ice_status_str(status), ice_aq_str(hw->adminq.sq_last_status));
- status = ICE_SUCCESS;
+ status = 0;
if (vsi->rule_mir_egress != ICE_INVAL_MIRROR_RULE_ID)
status = ice_aq_delete_mir_rule(hw, vsi->rule_mir_egress, keep_alloc, NULL);
@@ -727,7 +733,7 @@ ice_initialize_vsi(struct ice_vsi *vsi)
struct ice_vsi_ctx ctx = { 0 };
struct ice_hw *hw = &vsi->sc->hw;
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- enum ice_status status;
+ int status;
int err;
/* For now, we only have code supporting PF VSIs */
@@ -738,6 +744,12 @@ ice_initialize_vsi(struct ice_vsi *vsi)
case ICE_VSI_VMDQ2:
ctx.flags = ICE_AQ_VSI_TYPE_VMDQ2;
break;
+#ifdef PCI_IOV
+ case ICE_VSI_VF:
+ ctx.flags = ICE_AQ_VSI_TYPE_VF;
+ ctx.vf_num = vsi->vf_num;
+ break;
+#endif
default:
return (ENODEV);
}
@@ -801,7 +813,7 @@ ice_deinit_vsi(struct ice_vsi *vsi)
struct ice_vsi_ctx ctx = { 0 };
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
/* Assert that the VSI pointer matches in the list */
MPASS(vsi == sc->all_vsi[vsi->idx]);
@@ -881,6 +893,8 @@ uint64_t
ice_aq_speed_to_rate(struct ice_port_info *pi)
{
switch (pi->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_200GB:
+ return IF_Gbps(200);
case ICE_AQ_LINK_SPEED_100GB:
return IF_Gbps(100);
case ICE_AQ_LINK_SPEED_50GB:
@@ -919,6 +933,8 @@ static const char *
ice_aq_speed_to_str(struct ice_port_info *pi)
{
switch (pi->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_200GB:
+ return "200 Gbps";
case ICE_AQ_LINK_SPEED_100GB:
return "100 Gbps";
case ICE_AQ_LINK_SPEED_50GB:
@@ -1116,6 +1132,26 @@ ice_get_phy_type_high(uint64_t phy_type_high)
return IFM_100G_AUI2_AC;
case ICE_PHY_TYPE_HIGH_100G_AUI2:
return IFM_100G_AUI2;
+ case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
+ return IFM_200G_CR4_PAM4;
+ case ICE_PHY_TYPE_HIGH_200G_SR4:
+ return IFM_200G_SR4;
+ case ICE_PHY_TYPE_HIGH_200G_FR4:
+ return IFM_200G_FR4;
+ case ICE_PHY_TYPE_HIGH_200G_LR4:
+ return IFM_200G_LR4;
+ case ICE_PHY_TYPE_HIGH_200G_DR4:
+ return IFM_200G_DR4;
+ case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
+ return IFM_200G_KR4_PAM4;
+ case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
+ return IFM_200G_AUI4_AC;
+ case ICE_PHY_TYPE_HIGH_200G_AUI4:
+ return IFM_200G_AUI4;
+ case ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC:
+ return IFM_200G_AUI8_AC;
+ case ICE_PHY_TYPE_HIGH_200G_AUI8:
+ return IFM_200G_AUI8;
default:
return IFM_UNKNOWN;
}
@@ -1210,7 +1246,17 @@ ice_phy_types_to_max_rate(struct ice_port_info *pi)
IF_Gbps(100ULL),
IF_Gbps(100ULL),
IF_Gbps(100ULL),
- IF_Gbps(100ULL)
+ IF_Gbps(100ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
+ IF_Gbps(200ULL),
};
/* coverity[address_of] */
@@ -1244,12 +1290,12 @@ ice_phy_types_to_max_rate(struct ice_port_info *pi)
* @pre this function must be protected from being called while another thread
* is accessing the ifmedia types.
*/
-enum ice_status
+int
ice_add_media_types(struct ice_softc *sc, struct ifmedia *media)
{
struct ice_aqc_get_phy_caps_data pcaps = { 0 };
struct ice_port_info *pi = sc->hw.port_info;
- enum ice_status status;
+ int status;
uint64_t phy_low, phy_high;
int bit;
@@ -1266,7 +1312,7 @@ ice_add_media_types(struct ice_softc *sc, struct ifmedia *media)
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
&pcaps, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(sc->dev,
"%s: ice_aq_get_phy_caps (ACTIVE) failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -1323,7 +1369,7 @@ ice_add_media_types(struct ice_softc *sc, struct ifmedia *media)
ifmedia_add(media, IFM_ETHER | IFM_AUTO, 0, NULL);
ifmedia_set(media, IFM_ETHER | IFM_AUTO);
- return (ICE_SUCCESS);
+ return (0);
}
/**
@@ -1570,6 +1616,12 @@ ice_setup_tx_ctx(struct ice_tx_queue *txq, struct ice_tlan_ctx *tlan_ctx, u16 pf
case ICE_VSI_VMDQ2:
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
break;
+#ifdef PCI_IOV
+ case ICE_VSI_VF:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
+ tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_num;
+ break;
+#endif
default:
return (ENODEV);
}
@@ -1607,7 +1659,7 @@ ice_cfg_vsi_for_tx(struct ice_vsi *vsi)
struct ice_aqc_add_tx_qgrp *qg;
struct ice_hw *hw = &vsi->sc->hw;
device_t dev = vsi->sc->dev;
- enum ice_status status;
+ int status;
int i;
int err = 0;
u16 qg_size, pf_q;
@@ -1623,6 +1675,10 @@ ice_cfg_vsi_for_tx(struct ice_vsi *vsi)
struct ice_tlan_ctx tlan_ctx = { 0 };
struct ice_tx_queue *txq = &vsi->tx_queues[i];
+ /* Last configured queue */
+ if (txq->desc_count == 0)
+ break;
+
pf_q = vsi->tx_qmap[txq->me];
qg->txqs[0].txq_id = htole16(pf_q);
@@ -1672,7 +1728,7 @@ ice_setup_rx_ctx(struct ice_rx_queue *rxq)
struct ice_vsi *vsi = rxq->vsi;
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
u32 rxdid = ICE_RXDID_FLEX_NIC;
u32 regval;
u16 pf_q;
@@ -1751,6 +1807,10 @@ ice_cfg_vsi_for_rx(struct ice_vsi *vsi)
for (i = 0; i < vsi->num_rx_queues; i++) {
MPASS(vsi->mbuf_sz > 0);
+ /* Last configured queue */
+ if (vsi->rx_queues[i].desc_count == 0)
+ break;
+
err = ice_setup_rx_ctx(&vsi->rx_queues[i]);
if (err)
return err;
@@ -1954,7 +2014,7 @@ ice_add_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr)
struct ice_list_head mac_addr_list;
struct ice_hw *hw = &vsi->sc->hw;
device_t dev = vsi->sc->dev;
- enum ice_status status;
+ int status;
int err = 0;
INIT_LIST_HEAD(&mac_addr_list);
@@ -2024,7 +2084,7 @@ ice_remove_vsi_mac_filter(struct ice_vsi *vsi, const u8 *addr)
struct ice_list_head mac_addr_list;
struct ice_hw *hw = &vsi->sc->hw;
device_t dev = vsi->sc->dev;
- enum ice_status status;
+ int status;
int err = 0;
INIT_LIST_HEAD(&mac_addr_list);
@@ -2150,7 +2210,7 @@ ice_process_link_event(struct ice_softc *sc,
struct ice_port_info *pi = sc->hw.port_info;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
/* Sanity check that the data length isn't too small */
MPASS(le16toh(e->desc.datalen) >= ICE_GET_LINK_STATUS_DATALEN_V1);
@@ -2185,7 +2245,7 @@ ice_process_link_event(struct ice_softc *sc,
if (!(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) {
if (!ice_testandset_state(&sc->state, ICE_STATE_NO_MEDIA)) {
status = ice_aq_set_link_restart_an(pi, false, NULL);
- if (status != ICE_SUCCESS && hw->adminq.sq_last_status != ICE_AQ_RC_EMODE)
+ if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EMODE)
device_printf(dev,
"%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -2220,6 +2280,11 @@ ice_process_ctrlq_event(struct ice_softc *sc, const char *qname,
case ice_aqc_opc_get_link_status:
ice_process_link_event(sc, event);
break;
+#ifdef PCI_IOV
+ case ice_mbx_opc_send_msg_to_pf:
+ ice_vc_handle_vf_msg(sc, event);
+ break;
+#endif
case ice_aqc_opc_fw_logs_event:
ice_handle_fw_log_event(sc, &event->desc, event->msg_buf);
break;
@@ -2255,7 +2320,7 @@ ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending)
struct ice_rq_event_info event = { { 0 } };
struct ice_hw *hw = &sc->hw;
struct ice_ctl_q_info *cq;
- enum ice_status status;
+ int status;
const char *qname;
int loop = 0;
@@ -2264,6 +2329,10 @@ ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending)
cq = &hw->adminq;
qname = "Admin";
break;
+ case ICE_CTL_Q_SB:
+ cq = &hw->sbq;
+ qname = "Sideband";
+ break;
case ICE_CTL_Q_MAILBOX:
cq = &hw->mailboxq;
qname = "Mailbox";
@@ -2295,14 +2364,9 @@ ice_process_ctrlq(struct ice_softc *sc, enum ice_ctl_q q_type, u16 *pending)
if (status == ICE_ERR_AQ_NO_WORK)
break;
if (status) {
- if (q_type == ICE_CTL_Q_ADMIN)
- device_printf(sc->dev,
- "%s Receive Queue event error %s\n",
- qname, ice_status_str(status));
- else
- device_printf(sc->dev,
- "%s Receive Queue event error %s\n",
- qname, ice_status_str(status));
+ device_printf(sc->dev,
+ "%s Receive Queue event error %s\n",
+ qname, ice_status_str(status));
free(event.msg_buf, M_ICE);
return (EIO);
}
@@ -2497,6 +2561,22 @@ ice_print_nvm_version(struct ice_softc *sc)
}
/**
+ * ice_update_port_oversize - Update port oversize stats
+ * @sc: device private structure
+ * @rx_errors: VSI error drops
+ *
+ * Add ERROR_CNT from GLV_REPC VSI register and rx_oversize stats counter
+ */
+static void
+ice_update_port_oversize(struct ice_softc *sc, u64 rx_errors)
+{
+ struct ice_hw_port_stats *cur_ps;
+ cur_ps = &sc->stats.cur;
+
+ sc->soft_stats.rx_roc_error = rx_errors + cur_ps->rx_oversize;
+}
+
+/**
* ice_update_vsi_hw_stats - Update VSI-specific ethernet statistics counters
* @vsi: the VSI to be updated
*
@@ -2540,7 +2620,7 @@ ice_update_vsi_hw_stats(struct ice_vsi *vsi)
ice_stat_update_repc(hw, vsi->idx, vsi->hw_stats.offsets_loaded,
cur_es);
-
+ ice_update_port_oversize(vsi->sc, cur_es->rx_errors);
#undef ICE_VSI_STAT40
#undef ICE_VSI_STAT32
@@ -2722,7 +2802,7 @@ ice_sysctl_pba_number(SYSCTL_HANDLER_ARGS)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
u8 pba_string[32] = "";
- enum ice_status status;
+ int status;
UNREFERENCED_PARAMETER(arg2);
@@ -2914,7 +2994,17 @@ static const uint16_t phy_link_speeds[] = {
ICE_AQ_LINK_SPEED_100GB,
ICE_AQ_LINK_SPEED_100GB,
ICE_AQ_LINK_SPEED_100GB,
- ICE_AQ_LINK_SPEED_100GB
+ ICE_AQ_LINK_SPEED_100GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
+ ICE_AQ_LINK_SPEED_200GB,
};
#define ICE_SYSCTL_HELP_ADVERTISE_SPEED \
@@ -2932,6 +3022,7 @@ static const uint16_t phy_link_speeds[] = {
"\n\t 0x100 - 40G" \
"\n\t 0x200 - 50G" \
"\n\t 0x400 - 100G" \
+"\n\t 0x800 - 200G" \
"\n\t0x8000 - Unknown" \
"\n\t" \
"\nUse \"sysctl -x\" to view flags properly."
@@ -3015,6 +3106,17 @@ static const uint16_t phy_link_speeds[] = {
ICE_PHY_TYPE_HIGH_100G_CAUI2 | \
ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \
ICE_PHY_TYPE_HIGH_100G_AUI2)
+#define ICE_PHYS_200GB \
+ (ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 | \
+ ICE_PHY_TYPE_HIGH_200G_SR4 | \
+ ICE_PHY_TYPE_HIGH_200G_FR4 | \
+ ICE_PHY_TYPE_HIGH_200G_LR4 | \
+ ICE_PHY_TYPE_HIGH_200G_DR4 | \
+ ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 | \
+ ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC | \
+ ICE_PHY_TYPE_HIGH_200G_AUI4 | \
+ ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC | \
+ ICE_PHY_TYPE_HIGH_200G_AUI8)
/**
* ice_aq_phy_types_to_link_speeds - Convert the PHY Types to speeds
@@ -3081,6 +3183,8 @@ ice_sysctl_speeds_to_aq_phy_types(u16 sysctl_speeds, u64 *phy_type_low,
*phy_type_low |= ICE_PHYS_100GB_LOW;
*phy_type_high |= ICE_PHYS_100GB_HIGH;
}
+ if (sysctl_speeds & ICE_AQ_LINK_SPEED_200GB)
+ *phy_type_high |= ICE_PHYS_200GB;
}
/**
@@ -3121,7 +3225,7 @@ ice_intersect_phy_types_and_speeds(struct ice_softc *sc,
"DFLT" };
struct ice_hw *hw = &sc->hw;
struct ice_port_info *pi = hw->port_info;
- enum ice_status status;
+ int status;
u16 report_speeds, temp_speeds;
u8 report_type;
bool apply_speed_filter = false;
@@ -3150,7 +3254,7 @@ ice_intersect_phy_types_and_speeds(struct ice_softc *sc,
apply_speed_filter = true;
status = ice_aq_get_phy_caps(pi, false, phy_data->report_mode, &pcaps, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(sc->dev,
"%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n",
__func__, report_types[report_type],
@@ -3223,7 +3327,7 @@ ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS)
if ((ret) || (req->newptr == NULL))
return (ret);
- if (sysctl_speeds > 0x7FF) {
+ if (sysctl_speeds > ICE_SYSCTL_SPEEDS_VALID_RANGE) {
device_printf(dev,
"%s: \"%u\" is outside of the range of acceptable values.\n",
__func__, sysctl_speeds);
@@ -3232,7 +3336,8 @@ ice_sysctl_advertise_speed(SYSCTL_HANDLER_ARGS)
pi->phy.curr_user_speed_req = sysctl_speeds;
- if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) && !sc->link_up)
+ if (!ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) &&
+ !sc->link_up && !(if_getflags(sc->ifp) & IFF_UP))
return 0;
/* Apply settings requested by user */
@@ -3373,7 +3478,7 @@ ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS)
enum ice_fc_mode old_mode, new_mode;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
int ret, fc_num;
bool mode_set = false;
struct sbuf buf;
@@ -3387,7 +3492,7 @@ ice_sysctl_fc_config(SYSCTL_HANDLER_ARGS)
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
&pcaps, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -3530,7 +3635,7 @@ __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high)
struct ice_aqc_set_phy_cfg_data cfg = { 0 };
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
uint64_t types;
int ret;
@@ -3541,7 +3646,7 @@ __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high)
status = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_ACTIVE_CFG,
&pcaps, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -3567,7 +3672,7 @@ __ice_sysctl_phy_type_handler(SYSCTL_HANDLER_ARGS, bool is_phy_type_high)
cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
status = ice_aq_set_phy_cfg(hw, hw->port_info, &cfg, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_aq_set_phy_cfg failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -3630,7 +3735,7 @@ ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode)
struct ice_hw *hw = &sc->hw;
struct ice_port_info *pi = hw->port_info;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
int ret;
UNREFERENCED_PARAMETER(arg2);
@@ -3643,7 +3748,7 @@ ice_sysctl_phy_caps(SYSCTL_HANDLER_ARGS, u8 report_mode)
return (ESHUTDOWN);
status = ice_aq_get_phy_caps(pi, true, report_mode, &pcaps, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_aq_get_phy_caps failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -3729,7 +3834,7 @@ ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS)
struct ice_aqc_get_link_status *resp;
struct ice_aq_desc desc;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
int ret;
UNREFERENCED_PARAMETER(arg2);
@@ -3750,7 +3855,7 @@ ice_sysctl_phy_link_status(SYSCTL_HANDLER_ARGS)
resp->lport_num = pi->lport;
status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_aq_send_cmd failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -3780,7 +3885,7 @@ ice_sysctl_fw_cur_lldp_persist_status(SYSCTL_HANDLER_ARGS)
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
struct sbuf *sbuf;
u32 lldp_state;
@@ -3821,7 +3926,7 @@ ice_sysctl_fw_dflt_lldp_persist_status(SYSCTL_HANDLER_ARGS)
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
struct sbuf *sbuf;
u32 lldp_state;
@@ -3886,7 +3991,7 @@ ice_sysctl_fw_lldp_agent(SYSCTL_HANDLER_ARGS)
struct ice_dcbx_cfg *local_dcbx_cfg;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
int ret;
u32 old_state;
u8 fw_lldp_enabled;
@@ -4022,7 +4127,7 @@ ice_sysctl_ets_min_rate(SYSCTL_HANDLER_ARGS)
struct ice_port_info *pi;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
struct sbuf *sbuf;
int ret;
@@ -4132,7 +4237,7 @@ ice_sysctl_up2tc_map(SYSCTL_HANDLER_ARGS)
struct ice_port_info *pi;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
struct sbuf *sbuf;
int ret;
@@ -4175,7 +4280,8 @@ ice_sysctl_up2tc_map(SYSCTL_HANDLER_ARGS)
if (!hw->port_info->qos_cfg.is_sw_lldp)
return (EPERM);
- ret = ice_ets_str_to_tbl(up2tc_user_buf, new_up2tc, 7);
+ ret = ice_ets_str_to_tbl(up2tc_user_buf, new_up2tc,
+ ICE_MAX_TRAFFIC_CLASS - 1);
if (ret) {
device_printf(dev, "%s: Could not parse input priority assignment table: %s\n",
__func__, up2tc_user_buf);
@@ -4220,7 +4326,7 @@ ice_config_pfc(struct ice_softc *sc, u8 new_mode)
struct ice_hw *hw = &sc->hw;
struct ice_port_info *pi;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
pi = hw->port_info;
local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
@@ -4344,7 +4450,7 @@ ice_sysctl_pfc_mode(SYSCTL_HANDLER_ARGS)
struct ice_port_info *pi;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
u8 user_pfc_mode, aq_pfc_mode;
int ret;
@@ -4583,7 +4689,7 @@ ice_add_device_sysctls(struct ice_softc *sc)
hw_node = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "hw", CTLFLAG_RD,
NULL, "Port Hardware Statistics");
- ice_add_sysctls_mac_stats(ctx, hw_node, &sc->stats.cur);
+ ice_add_sysctls_mac_stats(ctx, hw_node, sc);
/* Add the main PF VSI stats now. Other VSIs will add their own stats
* during creation
@@ -4835,7 +4941,6 @@ ice_sysctl_rx_errors_stat(SYSCTL_HANDLER_ARGS)
stat += hs->rx_fragments;
stat += hs->rx_oversize;
stat += hs->rx_jabber;
- stat += hs->rx_len_errors;
stat += hs->crc_errors;
stat += hs->illegal_bytes;
@@ -5075,17 +5180,18 @@ ice_add_sysctls_mac_pfc_stats(struct sysctl_ctx_list *ctx,
* ice_add_sysctls_mac_stats - Add sysctls for global MAC statistics
* @ctx: the sysctl ctx to use
* @parent: parent node to add the sysctls under
- * @stats: the hw ports stat structure to pull values from
+ * @sc: device private structure
*
* Add global MAC statistics sysctls.
*/
void
ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid *parent,
- struct ice_hw_port_stats *stats)
+ struct ice_softc *sc)
{
struct sysctl_oid *mac_node;
struct sysctl_oid_list *parent_list, *mac_list;
+ struct ice_hw_port_stats *stats = &sc->stats.cur;
parent_list = SYSCTL_CHILDREN(parent);
@@ -5110,9 +5216,7 @@ ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
{&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
{&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
{&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
- {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
{&stats->rx_jabber, "rx_jabber", "Received Jabber"},
- {&stats->rx_len_errors, "rx_length_errors", "Receive Length Errors"},
{&stats->eth.rx_discards, "rx_discards",
"Discarded Rx Packets by Port (shortage of storage space)"},
/* Packet Transmission Stats */
@@ -5145,6 +5249,11 @@ ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
entry->description);
entry++;
}
+ /* Port oversize packet stats */
+ SYSCTL_ADD_U64(ctx, mac_list, OID_AUTO, "rx_oversized",
+ CTLFLAG_RD | CTLFLAG_STATS, &sc->soft_stats.rx_roc_error,
+ 0, "Oversized packets received");
+
}
/**
@@ -5186,6 +5295,9 @@ ice_configure_misc_interrupts(struct ice_softc *sc)
/* Associate the Mailbox interrupt with ITR 0, and enable it */
wr32(hw, PFINT_MBX_CTL, PFINT_MBX_CTL_CAUSE_ENA_M);
+ /* Associate the SB Queue interrupt with ITR 0, and enable it */
+ wr32(hw, PFINT_SB_CTL, PFINT_SB_CTL_CAUSE_ENA_M);
+
/* Associate the AdminQ interrupt with ITR 0, and enable it */
wr32(hw, PFINT_FW_CTL, PFINT_FW_CTL_CAUSE_ENA_M);
}
@@ -5331,7 +5443,7 @@ ice_sync_multicast_filters(struct ice_softc *sc)
struct ice_fltr_mgmt_list_entry *itr;
struct ice_mcast_sync_data data = {};
struct ice_list_head *rules, remove_list;
- enum ice_status status;
+ int status;
int err = 0;
INIT_LIST_HEAD(&data.add_list);
@@ -5417,13 +5529,13 @@ free_filter_lists:
*
* Programs HW filters so that the given VSI will receive the specified VLANs.
*/
-enum ice_status
+int
ice_add_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid, u16 length)
{
struct ice_hw *hw = &vsi->sc->hw;
struct ice_list_head vlan_list;
struct ice_fltr_list_entry *vlan_entries;
- enum ice_status status;
+ int status;
MPASS(length > 0);
@@ -5468,7 +5580,7 @@ done:
*
* Programs a HW filter so that the given VSI will receive the specified VLAN.
*/
-enum ice_status
+int
ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid)
{
return ice_add_vlan_hw_filters(vsi, &vid, 1);
@@ -5482,13 +5594,13 @@ ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid)
*
* Removes previously programmed HW filters for the specified VSI.
*/
-enum ice_status
+int
ice_remove_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid, u16 length)
{
struct ice_hw *hw = &vsi->sc->hw;
struct ice_list_head vlan_list;
struct ice_fltr_list_entry *vlan_entries;
- enum ice_status status;
+ int status;
MPASS(length > 0);
@@ -5533,7 +5645,7 @@ done:
*
* Removes a previously programmed HW filter for the specified VSI.
*/
-enum ice_status
+int
ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid)
{
return ice_remove_vlan_hw_filters(vsi, &vid, 1);
@@ -6141,35 +6253,36 @@ ice_sysctl_dump_state_flags(SYSCTL_HANDLER_ARGS)
}
#define ICE_SYSCTL_DEBUG_MASK_HELP \
-"\nSelect debug statements to print to kernel messages" \
+"\nSelect debug statements to print to kernel message log" \
"\nFlags:" \
-"\n\t 0x1 - Function Tracing" \
-"\n\t 0x2 - Driver Initialization" \
-"\n\t 0x4 - Release" \
-"\n\t 0x8 - FW Logging" \
-"\n\t 0x10 - Link" \
-"\n\t 0x20 - PHY" \
-"\n\t 0x40 - Queue Context" \
-"\n\t 0x80 - NVM" \
-"\n\t 0x100 - LAN" \
-"\n\t 0x200 - Flow" \
-"\n\t 0x400 - DCB" \
-"\n\t 0x800 - Diagnostics" \
-"\n\t 0x1000 - Flow Director" \
-"\n\t 0x2000 - Switch" \
-"\n\t 0x4000 - Scheduler" \
-"\n\t 0x8000 - RDMA" \
-"\n\t 0x10000 - DDP Package" \
-"\n\t 0x20000 - Resources" \
-"\n\t 0x40000 - ACL" \
-"\n\t 0x80000 - PTP" \
-"\n\t 0x100000 - Admin Queue messages" \
-"\n\t 0x200000 - Admin Queue descriptors" \
-"\n\t 0x400000 - Admin Queue descriptor buffers" \
-"\n\t 0x800000 - Admin Queue commands" \
-"\n\t 0x1000000 - Parser" \
-"\n\t ..." \
-"\n\t 0x8000000 - (Reserved for user)" \
+"\n\t 0x1 - Function Tracing" \
+"\n\t 0x2 - Driver Initialization" \
+"\n\t 0x4 - Release" \
+"\n\t 0x8 - FW Logging" \
+"\n\t 0x10 - Link" \
+"\n\t 0x20 - PHY" \
+"\n\t 0x40 - Queue Context" \
+"\n\t 0x80 - NVM" \
+"\n\t 0x100 - LAN" \
+"\n\t 0x200 - Flow" \
+"\n\t 0x400 - DCB" \
+"\n\t 0x800 - Diagnostics" \
+"\n\t 0x1000 - Flow Director" \
+"\n\t 0x2000 - Switch" \
+"\n\t 0x4000 - Scheduler" \
+"\n\t 0x8000 - RDMA" \
+"\n\t 0x10000 - DDP Package" \
+"\n\t 0x20000 - Resources" \
+"\n\t 0x40000 - ACL" \
+"\n\t 0x80000 - PTP" \
+"\n\t ..." \
+"\n\t 0x1000000 - Admin Queue messages" \
+"\n\t 0x2000000 - Admin Queue descriptors" \
+"\n\t 0x4000000 - Admin Queue descriptor buffers" \
+"\n\t 0x8000000 - Admin Queue commands" \
+"\n\t 0x10000000 - Parser" \
+"\n\t ..." \
+"\n\t 0x80000000 - (Reserved for user)" \
"\n\t" \
"\nUse \"sysctl -x\" to view flags properly."
@@ -6262,7 +6375,7 @@ ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS)
{
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
enum ice_reset_req reset_type = ICE_RESET_INVAL;
const char *reset_message;
int ret;
@@ -6367,15 +6480,16 @@ ice_sysctl_request_reset(SYSCTL_HANDLER_ARGS)
#define ICE_SYSCTL_HELP_FW_DEBUG_DUMP_CLUSTER_SETTING \
"\nSelect clusters to dump with \"dump\" sysctl" \
"\nFlags:" \
+"\n\t 0 - All clusters (default)" \
"\n\t 0x1 - Switch" \
"\n\t 0x2 - ACL" \
"\n\t 0x4 - Tx Scheduler" \
-"\n\t 0x8 - Profile Configuration" \
+"\n\t 0x8 - Profile Configuration" \
"\n\t 0x20 - Link" \
"\n\t 0x80 - DCB" \
"\n\t 0x100 - L2P" \
-"\n\t 0x400000 - Manageability Transactions" \
-"\n\t" \
+"\n\t 0x400000 - Manageability Transactions (excluding E830)" \
+"\n" \
"\nUse \"sysctl -x\" to view flags properly."
/**
@@ -6409,7 +6523,13 @@ ice_sysctl_fw_debug_dump_cluster_setting(SYSCTL_HANDLER_ARGS)
if ((ret) || (req->newptr == NULL))
return (ret);
- if (clusters & ~(ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK)) {
+ u32 valid_cluster_mask;
+ if (ice_is_e830(&sc->hw))
+ valid_cluster_mask = ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK_E830;
+ else
+ valid_cluster_mask = ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK_E810;
+
+ if (clusters & ~(valid_cluster_mask)) {
device_printf(dev,
"%s: ERROR: Incorrect settings requested\n",
__func__);
@@ -6444,7 +6564,7 @@ ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 clu
device_t dev = sc->dev;
u16 data_buf_size = ICE_AQ_MAX_BUF_LEN;
const u8 reserved_buf[8] = {};
- enum ice_status status;
+ int status;
int counter = 0;
u8 *data_buf;
@@ -6515,7 +6635,13 @@ ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 clu
/* Adjust loop variables */
memset(data_buf, 0, data_buf_size);
bool same_table_next = (table_id == ret_next_table);
- bool last_table_next = (ret_next_table == 0xff || ret_next_table == 0xffff);
+ bool last_table_next;
+ if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_NEXT_CLUSTER_ID))
+ last_table_next =
+ (ret_next_table == 0xffff);
+ else
+ last_table_next =
+ (ret_next_table == 0xff || ret_next_table == 0xffff);
bool last_offset_next = (ret_next_index == 0xffffffff || ret_next_index == 0);
if ((!same_table_next && !last_offset_next) ||
@@ -6551,8 +6677,59 @@ ice_fw_debug_dump_print_cluster(struct ice_softc *sc, struct sbuf *sbuf, u16 clu
return ret_next_cluster;
}
+/**
+ * ice_fw_debug_dump_print_clusters - Print data from FW clusters to sbuf
+ * @sc: the device softc
+ * @sbuf: initialized sbuf to print data to
+ *
+ * Handles dumping all of the clusters to dump to the indicated sbuf. The
+ * clusters do dump are determined by the value in the
+ * fw_debug_dump_cluster_mask field in the sc argument.
+ *
+ * @remark Only intended to be used by the sysctl handler
+ * ice_sysctl_fw_debug_dump_do_dump
+ */
+static void
+ice_fw_debug_dump_print_clusters(struct ice_softc *sc, struct sbuf *sbuf)
+{
+ u16 next_cluster_id, max_cluster_id, start_cluster_id;
+ u32 cluster_mask = sc->fw_debug_dump_cluster_mask;
+ struct ice_hw *hw = &sc->hw;
+ int bit;
+
+ ice_debug(hw, ICE_DBG_DIAG, "%s: Debug Dump running...\n", __func__);
+
+ if (ice_is_e830(hw)) {
+ max_cluster_id = ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG_E830;
+ start_cluster_id = ICE_AQC_DBG_DUMP_CLUSTER_ID_SW_E830;
+ } else {
+ max_cluster_id = ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG_E810;
+ start_cluster_id = ICE_AQC_DBG_DUMP_CLUSTER_ID_SW_E810;
+ }
+
+ if (cluster_mask != 0) {
+ for_each_set_bit(bit, &cluster_mask,
+ sizeof(cluster_mask) * BITS_PER_BYTE) {
+ ice_fw_debug_dump_print_cluster(sc, sbuf,
+ bit + start_cluster_id);
+ }
+ } else {
+ next_cluster_id = start_cluster_id;
+
+ /* We don't support QUEUE_MNG and FULL_CSR_SPACE */
+ do {
+ next_cluster_id =
+ ice_fw_debug_dump_print_cluster(sc, sbuf, next_cluster_id);
+ } while ((next_cluster_id != 0) &&
+ (next_cluster_id < max_cluster_id));
+ }
+
+}
+
#define ICE_SYSCTL_HELP_FW_DEBUG_DUMP_DO_DUMP \
-"\nWrite 1 to output a FW debug dump containing the clusters specified by the \"clusters\" sysctl" \
+"\nWrite 1 to output a FW debug dump containing the clusters specified by the" \
+"\n\"clusters\" sysctl." \
+"\n" \
"\nThe \"-b\" flag must be used in order to dump this data as binary data because" \
"\nthis data is opaque and not a string."
@@ -6583,7 +6760,7 @@ ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS)
struct ice_softc *sc = (struct ice_softc *)arg1;
device_t dev = sc->dev;
struct sbuf *sbuf;
- int bit, ret;
+ int ret;
UNREFERENCED_PARAMETER(arg2);
@@ -6652,19 +6829,7 @@ ice_sysctl_fw_debug_dump_do_dump(SYSCTL_HANDLER_ARGS)
sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
sbuf_clear_flags(sbuf, SBUF_INCLUDENUL);
- ice_debug(&sc->hw, ICE_DBG_DIAG, "%s: Debug Dump running...\n", __func__);
-
- if (sc->fw_debug_dump_cluster_mask) {
- for_each_set_bit(bit, &sc->fw_debug_dump_cluster_mask,
- sizeof(sc->fw_debug_dump_cluster_mask) * 8)
- ice_fw_debug_dump_print_cluster(sc, sbuf, bit);
- } else {
- u16 next_cluster_id = 0;
- /* We don't support QUEUE_MNG and FULL_CSR_SPACE */
- do {
- next_cluster_id = ice_fw_debug_dump_print_cluster(sc, sbuf, next_cluster_id);
- } while (next_cluster_id != 0 && next_cluster_id < ICE_AQC_DBG_DUMP_CLUSTER_ID_QUEUE_MNG);
- }
+ ice_fw_debug_dump_print_clusters(sc, sbuf);
sbuf_finish(sbuf);
sbuf_delete(sbuf);
@@ -6795,6 +6960,13 @@ ice_add_debug_sysctls(struct ice_softc *sc)
ice_sysctl_negotiated_fc, "A",
"Current Negotiated Flow Control mode");
+ if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_PHY_STATISTICS)) {
+ SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "phy_statistics",
+ CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, ice_sysctl_dump_phy_stats, "A",
+ "Dumps PHY statistics from firmware");
+ }
+
SYSCTL_ADD_PROC(ctx, debug_list, OID_AUTO, "local_dcbx_cfg",
CTLTYPE_STRING | CTLFLAG_RD, sc, ICE_AQ_LLDP_MIB_LOCAL,
ice_sysctl_dump_dcbx_cfg, "A",
@@ -6813,6 +6985,10 @@ ice_add_debug_sysctls(struct ice_softc *sc)
sc, 0, ice_sysctl_query_port_ets, "A",
"Prints selected output from Query Port ETS AQ command");
+ SYSCTL_ADD_U64(ctx, debug_list, OID_AUTO, "rx_length_errors",
+ CTLFLAG_RD | CTLFLAG_STATS, &sc->stats.cur.rx_len_errors, 0,
+ "Receive Length Errors (SNAP packets)");
+
sw_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "switch",
ICE_CTLFLAG_DEBUG | CTLFLAG_RD, NULL,
"Switch Configuration");
@@ -6866,7 +7042,7 @@ ice_vsi_disable_tx(struct ice_vsi *vsi)
{
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
u32 *q_teids;
u16 *q_ids, *q_handles;
size_t q_teids_size, q_ids_size, q_handles_size;
@@ -7092,6 +7268,7 @@ ice_add_txq_sysctls(struct ice_tx_queue *txq)
{ &txq->stats.tx_packets, "tx_packets", "Queue Packets Transmitted" },
{ &txq->stats.tx_bytes, "tx_bytes", "Queue Bytes Transmitted" },
{ &txq->stats.mss_too_small, "mss_too_small", "TSO sends with an MSS less than 64" },
+ { &txq->stats.tso, "tso", "TSO packets" },
{ 0, 0, 0 }
};
@@ -7203,7 +7380,7 @@ ice_set_rss_key(struct ice_vsi *vsi)
struct ice_aqc_get_set_rss_keys keydata = { .standard_rss_key = {0} };
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
/*
* If the RSS kernel interface is disabled, this will return the
@@ -7238,7 +7415,7 @@ ice_set_rss_flow_flds(struct ice_vsi *vsi)
struct ice_hw *hw = &sc->hw;
struct ice_rss_hash_cfg rss_cfg = { 0, 0, ICE_RSS_ANY_HEADERS, false };
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
u_int rss_hash_config;
rss_hash_config = rss_gethashconfig();
@@ -7324,7 +7501,7 @@ ice_set_rss_lut(struct ice_vsi *vsi)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
struct ice_aq_get_set_rss_lut_params lut_params;
- enum ice_status status;
+ int status;
int i, err = 0;
u8 *lut;
@@ -7534,14 +7711,14 @@ ice_log_pkg_init(struct ice_softc *sc, enum ice_ddp_state pkg_status)
* ice_deinit_hw(). This allows the firmware reference to be immediately
* released using firmware_put.
*/
-enum ice_status
+int
ice_load_pkg_file(struct ice_softc *sc)
{
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
enum ice_ddp_state state;
const struct firmware *pkg;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 cached_layer_count;
u8 *buf_copy;
@@ -7566,7 +7743,7 @@ ice_load_pkg_file(struct ice_softc *sc)
status = ice_cfg_tx_topo(&sc->hw, buf_copy, pkg->datasize);
free(buf_copy, M_ICE);
/* Success indicates a change was made */
- if (status == ICE_SUCCESS) {
+ if (!status) {
/* 9 -> 5 */
if (cached_layer_count == 9)
device_printf(dev,
@@ -7580,6 +7757,12 @@ ice_load_pkg_file(struct ice_softc *sc)
/* Status is ICE_ERR_CFG when DDP does not support transmit balancing */
device_printf(dev,
"DDP package does not support transmit balancing feature - please update to the latest DDP package and try again\n");
+ } else if (status == ICE_ERR_ALREADY_EXISTS) {
+ /* Requested config already loaded */
+ } else if (status == ICE_ERR_AQ_ERROR) {
+ device_printf(dev,
+ "Error configuring transmit balancing: %s\n",
+ ice_status_str(status));
}
}
@@ -7630,12 +7813,13 @@ ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter)
case IFCOUNTER_IERRORS:
return (hs->crc_errors + hs->illegal_bytes +
hs->mac_local_faults + hs->mac_remote_faults +
- hs->rx_len_errors + hs->rx_undersize +
- hs->rx_oversize + hs->rx_fragments + hs->rx_jabber);
+ hs->rx_undersize + hs->rx_oversize + hs->rx_fragments +
+ hs->rx_jabber);
case IFCOUNTER_OPACKETS:
return (es->tx_unicast + es->tx_multicast + es->tx_broadcast);
case IFCOUNTER_OERRORS:
- return (es->tx_errors);
+ return (if_get_counter_default(vsi->sc->ifp, counter) +
+ es->tx_errors);
case IFCOUNTER_COLLISIONS:
return (0);
case IFCOUNTER_IBYTES:
@@ -7649,7 +7833,8 @@ ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter)
case IFCOUNTER_IQDROPS:
return (es->rx_discards);
case IFCOUNTER_OQDROPS:
- return (hs->tx_dropped_link_down);
+ return (if_get_counter_default(vsi->sc->ifp, counter) +
+ hs->tx_dropped_link_down);
case IFCOUNTER_NOPROTO:
return (es->rx_unknown_protocol);
default:
@@ -7689,7 +7874,7 @@ int
ice_replay_all_vsi_cfg(struct ice_softc *sc)
{
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
int i;
for (i = 0 ; i < sc->num_available_vsi; i++) {
@@ -7730,7 +7915,7 @@ ice_clean_vsi_rss_cfg(struct ice_vsi *vsi)
struct ice_softc *sc = vsi->sc;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
status = ice_rem_vsi_rss_cfg(hw, vsi->idx);
if (status)
@@ -7779,7 +7964,7 @@ static const char *
ice_requested_fec_mode(struct ice_port_info *pi)
{
struct ice_aqc_get_phy_caps_data pcaps = { 0 };
- enum ice_status status;
+ int status;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
&pcaps, NULL);
@@ -7889,7 +8074,7 @@ ice_update_laa_mac(struct ice_softc *sc)
{
const u8 *lladdr = (const u8 *)if_getlladdr(sc->ifp);
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
/* If the address is the same, then there is nothing to update */
if (!memcmp(lladdr, hw->port_info->mac.lan_addr, ETHER_ADDR_LEN))
@@ -7918,7 +8103,9 @@ ice_update_laa_mac(struct ice_softc *sc)
* @sc: device softc
*
* This will potentially print out a warning message if bus bandwidth
- * is insufficient for full-speed operation.
+ * is insufficient for full-speed operation. This will not print out anything
+ * for E82x devices since those are in SoCs, do not report valid PCIe info,
+ * and cannot be moved to a different slot.
*
* This should only be called once, during the attach process, after
* hw->port_info has been filled out with port link topology information
@@ -7932,6 +8119,9 @@ ice_get_and_print_bus_info(struct ice_softc *sc)
u16 pci_link_status;
int offset;
+ if (!ice_is_e810(hw) && !ice_is_e830(hw))
+ return;
+
pci_find_cap(dev, PCIY_EXPRESS, &offset);
pci_link_status = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
@@ -7954,7 +8144,7 @@ ice_get_and_print_bus_info(struct ice_softc *sc)
* a 64-bit baudrate.
* @speed: enum value to convert
*
- * This only goes up to PCIE Gen 4.
+ * This only goes up to PCIE Gen 5.
*/
static uint64_t
ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed)
@@ -7971,6 +8161,8 @@ ice_pcie_bus_speed_to_rate(enum ice_pcie_bus_speed speed)
return IF_Gbps(8);
case ice_pcie_speed_16_0GT:
return IF_Gbps(16);
+ case ice_pcie_speed_32_0GT:
+ return IF_Gbps(32);
case ice_pcie_speed_unknown:
default:
return 0;
@@ -8029,10 +8221,12 @@ ice_pcie_bandwidth_check(struct ice_softc *sc)
pcie_width = ice_pcie_lnk_width_to_int(hw->bus.width);
/*
- * If 2x100, clamp ports to 1 -- 2nd port is intended for
- * failover.
+ * If 2x100 on E810 or 2x200 on E830, clamp ports to 1 -- 2nd port is
+ * intended for failover.
*/
- if (port_speed == IF_Gbps(100))
+ if ((port_speed >= IF_Gbps(100)) &&
+ ((port_speed == IF_Gbps(100) && ice_is_e810(hw)) ||
+ (port_speed == IF_Gbps(200) && ice_is_e830(hw))))
num_ports = 1;
return !!((num_ports * port_speed) > pcie_speed * pcie_width);
@@ -8046,18 +8240,19 @@ ice_pcie_bandwidth_check(struct ice_softc *sc)
static void
ice_print_bus_link_data(device_t dev, struct ice_hw *hw)
{
- device_printf(dev, "PCI Express Bus: Speed %s %s\n",
- ((hw->bus.speed == ice_pcie_speed_16_0GT) ? "16.0GT/s" :
+ device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
+ ((hw->bus.speed == ice_pcie_speed_32_0GT) ? "32.0GT/s" :
+ (hw->bus.speed == ice_pcie_speed_16_0GT) ? "16.0GT/s" :
(hw->bus.speed == ice_pcie_speed_8_0GT) ? "8.0GT/s" :
(hw->bus.speed == ice_pcie_speed_5_0GT) ? "5.0GT/s" :
(hw->bus.speed == ice_pcie_speed_2_5GT) ? "2.5GT/s" : "Unknown"),
- (hw->bus.width == ice_pcie_lnk_x32) ? "Width x32" :
- (hw->bus.width == ice_pcie_lnk_x16) ? "Width x16" :
- (hw->bus.width == ice_pcie_lnk_x12) ? "Width x12" :
- (hw->bus.width == ice_pcie_lnk_x8) ? "Width x8" :
- (hw->bus.width == ice_pcie_lnk_x4) ? "Width x4" :
- (hw->bus.width == ice_pcie_lnk_x2) ? "Width x2" :
- (hw->bus.width == ice_pcie_lnk_x1) ? "Width x1" : "Width Unknown");
+ (hw->bus.width == ice_pcie_lnk_x32) ? "x32" :
+ (hw->bus.width == ice_pcie_lnk_x16) ? "x16" :
+ (hw->bus.width == ice_pcie_lnk_x12) ? "x12" :
+ (hw->bus.width == ice_pcie_lnk_x8) ? "x8" :
+ (hw->bus.width == ice_pcie_lnk_x4) ? "x4" :
+ (hw->bus.width == ice_pcie_lnk_x2) ? "x2" :
+ (hw->bus.width == ice_pcie_lnk_x1) ? "x1" : "Unknown");
}
/**
@@ -8098,6 +8293,7 @@ ice_set_pci_link_status_data(struct ice_hw *hw, u16 link_status)
case ice_pcie_speed_5_0GT:
case ice_pcie_speed_8_0GT:
case ice_pcie_speed_16_0GT:
+ case ice_pcie_speed_32_0GT:
hw->bus.speed = (enum ice_pcie_bus_speed)reg;
break;
default:
@@ -8118,7 +8314,7 @@ int
ice_init_link_events(struct ice_softc *sc)
{
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
u16 wanted_events;
/* Set the bits for the events that we want to be notified by */
@@ -8147,6 +8343,11 @@ ice_init_link_events(struct ice_softc *sc)
return (0);
}
+#ifndef GL_MDET_TX_TCLAN
+/* Temporarily use this redefinition until the definition is fixed */
+#define GL_MDET_TX_TCLAN E800_GL_MDET_TX_TCLAN
+#define PF_MDET_TX_TCLAN E800_PF_MDET_TX_TCLAN
+#endif /* !defined(GL_MDET_TX_TCLAN) */
/**
* ice_handle_mdd_event - Handle possibly malicious events
* @sc: the device softc
@@ -8274,7 +8475,7 @@ ice_start_dcbx_agent(struct ice_softc *sc)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
bool dcbx_agent_status;
- enum ice_status status;
+ int status;
hw->port_info->qos_cfg.dcbx_status = ice_get_dcbx_status(hw);
@@ -8310,7 +8511,7 @@ ice_init_dcb_setup(struct ice_softc *sc)
struct ice_dcbx_cfg *local_dcbx_cfg;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
u8 pfcmode_ret;
/* Don't do anything if DCB isn't supported */
@@ -8686,7 +8887,7 @@ ice_pf_vsi_cfg_tc(struct ice_softc *sc, u8 tc_map)
struct ice_hw *hw = &sc->hw;
struct ice_vsi_ctx ctx = { 0 };
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
u8 num_tcs = 0;
int i = 0;
@@ -8876,7 +9077,7 @@ ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib)
struct ice_hw *hw = &sc->hw;
struct ice_port_info *pi;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
pi = sc->hw.port_info;
local_dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
@@ -8914,7 +9115,7 @@ ice_do_dcb_reconfig(struct ice_softc *sc, bool pending_mib)
/* Query ETS configuration and update SW Tx scheduler info */
status = ice_query_port_ets(pi, &port_ets, sizeof(port_ets), NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"Query Port ETS AQ call failed, err %s aq_err %s\n",
ice_status_str(status),
@@ -8950,7 +9151,7 @@ ice_handle_mib_change_event(struct ice_softc *sc, struct ice_rq_event_info *even
device_t dev = sc->dev;
struct ice_hw *hw = &sc->hw;
bool needs_reconfig, mib_is_pending;
- enum ice_status status;
+ int status;
u8 mib_type, bridge_type;
ASSERT_CFG_LOCKED(sc);
@@ -9032,7 +9233,7 @@ ice_send_version(struct ice_softc *sc)
struct ice_driver_ver driver_version = {0};
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
driver_version.major_ver = ice_major_version;
driver_version.minor_ver = ice_minor_version;
@@ -9128,7 +9329,7 @@ ice_cfg_pf_ethertype_filters(struct ice_softc *sc)
struct ice_vsi *vsi = &sc->pf_vsi;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
int err = 0;
INIT_LIST_HEAD(&ethertype_list);
@@ -9186,7 +9387,7 @@ ice_add_rx_lldp_filter(struct ice_softc *sc)
struct ice_vsi *vsi = &sc->pf_vsi;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
int err;
u16 vsi_num;
@@ -9254,7 +9455,7 @@ ice_del_rx_lldp_filter(struct ice_softc *sc)
struct ice_vsi *vsi = &sc->pf_vsi;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
int err;
u16 vsi_num;
@@ -9324,15 +9525,28 @@ ice_init_link_configuration(struct ice_softc *sc)
struct ice_port_info *pi = sc->hw.port_info;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status, retry_count = 0;
+retry:
pi->phy.get_link_info = true;
status = ice_get_link_status(pi, &sc->link_up);
- if (status != ICE_SUCCESS) {
- device_printf(dev,
- "%s: ice_get_link_status failed; status %s, aq_err %s\n",
- __func__, ice_status_str(status),
- ice_aq_str(hw->adminq.sq_last_status));
+
+ if (status) {
+ if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN) {
+ retry_count++;
+ ice_debug(hw, ICE_DBG_LINK,
+ "%s: ice_get_link_status failed with EAGAIN, attempt %d\n",
+ __func__, retry_count);
+ if (retry_count < ICE_LINK_AQ_MAX_RETRIES) {
+ ice_msec_pause(ICE_LINK_RETRY_DELAY);
+ goto retry;
+ }
+ } else {
+ device_printf(dev,
+ "%s: ice_get_link_status failed; status %s, aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+ }
return;
}
@@ -9350,7 +9564,7 @@ ice_init_link_configuration(struct ice_softc *sc)
*/
ice_set_state(&sc->state, ICE_STATE_NO_MEDIA);
status = ice_aq_set_link_restart_an(pi, false, NULL);
- if (status != ICE_SUCCESS && hw->adminq.sq_last_status != ICE_AQ_RC_EMODE)
+ if (status && hw->adminq.sq_last_status != ICE_AQ_RC_EMODE)
device_printf(dev,
"%s: ice_aq_set_link_restart_an: status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -9490,7 +9704,7 @@ ice_apply_saved_fec_req_to_cfg(struct ice_softc *sc,
struct ice_aqc_set_phy_cfg_data *cfg)
{
struct ice_port_info *pi = sc->hw.port_info;
- enum ice_status status;
+ int status;
cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
status = ice_cfg_phy_fec(pi, cfg, pi->phy.curr_user_fec_req);
@@ -9556,7 +9770,7 @@ ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
u64 phy_low, phy_high;
- enum ice_status status;
+ int status;
enum ice_fec_mode dflt_fec_mode;
u16 dflt_user_speed;
@@ -9567,7 +9781,7 @@ ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings)
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
&pcaps, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_aq_get_phy_caps (ACTIVE) failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -9606,7 +9820,7 @@ ice_apply_saved_phy_cfg(struct ice_softc *sc, u8 settings)
cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK;
status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
/* Don't indicate failure if there's no media in the port.
* The settings have been saved and will apply when media
* is inserted.
@@ -9667,7 +9881,7 @@ ice_set_link_management_mode(struct ice_softc *sc)
struct ice_port_info *pi = sc->hw.port_info;
device_t dev = sc->dev;
struct ice_link_default_override_tlv tlv = { 0 };
- enum ice_status status;
+ int status;
/* Port must be in strict mode if FW version is below a certain
* version. (i.e. Don't set lenient mode features)
@@ -9676,7 +9890,7 @@ ice_set_link_management_mode(struct ice_softc *sc)
return;
status = ice_get_link_default_override(&tlv, pi);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_get_link_default_override failed; status %s, aq_err %s\n",
__func__, ice_status_str(status),
@@ -9727,7 +9941,7 @@ ice_set_link(struct ice_softc *sc, bool enabled)
{
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
if (ice_driver_is_detaching(sc))
return;
@@ -9739,7 +9953,7 @@ ice_set_link(struct ice_softc *sc, bool enabled)
ice_apply_saved_phy_cfg(sc, ICE_APPLY_LS_FEC_FC);
else {
status = ice_aq_set_link_restart_an(hw->port_info, false, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
device_printf(dev,
"%s: Link control not enabled in current device mode\n",
@@ -9773,14 +9987,14 @@ ice_init_saved_phy_cfg(struct ice_softc *sc)
struct ice_aqc_get_phy_caps_data pcaps = { 0 };
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
u64 phy_low, phy_high;
u8 report_mode = ICE_AQC_REPORT_TOPO_CAP_MEDIA;
if (ice_is_bit_set(sc->feat_en, ICE_FEATURE_LINK_MGMT_VER_2))
report_mode = ICE_AQC_REPORT_DFLT_CFG;
status = ice_aq_get_phy_caps(pi, false, report_mode, &pcaps, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"%s: ice_aq_get_phy_caps (%s) failed; status %s, aq_err %s\n",
__func__,
@@ -9868,7 +10082,7 @@ ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
size_t ifd_len = ifd->ifd_len, malloc_len;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
u8 *nvm_buffer;
int err;
@@ -9947,7 +10161,7 @@ ice_handle_nvm_access_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
/* Convert private status to an error code for proper ioctl response */
switch (status) {
- case ICE_SUCCESS:
+ case 0:
err = (0);
break;
case ICE_ERR_NO_MEMORY:
@@ -9984,7 +10198,7 @@ ice_read_sff_eeprom(struct ice_softc *sc, u16 dev_addr, u16 offset, u8* data, u1
{
struct ice_hw *hw = &sc->hw;
int ret = 0, retries = 0;
- enum ice_status status;
+ int status;
if (length > 16)
return (EINVAL);
@@ -10167,6 +10381,13 @@ ice_alloc_intr_tracking(struct ice_softc *sc)
device_t dev = sc->dev;
int err;
+ if (hw->func_caps.common_cap.num_msix_vectors > ICE_MAX_MSIX_VECTORS) {
+ device_printf(dev, "%s: Invalid num_msix_vectors value (%u) received from FW.\n",
+ __func__,
+ hw->func_caps.common_cap.num_msix_vectors);
+ return (EINVAL);
+ }
+
/* Initialize the interrupt allocation manager */
err = ice_resmgr_init_contig_only(&sc->dev_imgr,
hw->func_caps.common_cap.num_msix_vectors);
@@ -10281,7 +10502,8 @@ ice_apply_supported_speed_filter(u16 report_speeds, u8 mod_type)
if (module == IS_QSFP)
speed_mask = ~((u16)ICE_AQ_LINK_SPEED_10GB - 1);
}
- if (report_speeds & ICE_AQ_LINK_SPEED_100GB)
+ if ((report_speeds & ICE_AQ_LINK_SPEED_100GB) ||
+ (report_speeds & ICE_AQ_LINK_SPEED_200GB))
speed_mask = ~((u16)ICE_AQ_LINK_SPEED_25GB - 1);
return (report_speeds & speed_mask);
}
@@ -10296,11 +10518,11 @@ ice_apply_supported_speed_filter(u16 report_speeds, u8 mod_type)
void
ice_init_health_events(struct ice_softc *sc)
{
- enum ice_status status;
+ int status;
u8 health_mask;
if ((!ice_is_bit_set(sc->feat_cap, ICE_FEATURE_HEALTH_STATUS)) ||
- (!sc->enable_health_events))
+ (!sc->enable_health_events))
return;
health_mask = ICE_AQC_HEALTH_STATUS_SET_PF_SPECIFIC_MASK |
@@ -10422,7 +10644,8 @@ ice_print_health_status_string(device_t dev,
device_printf(dev, "Possible Solution: Change the module or use Intel(R) Ethernet Port Configuration Tool to configure the port option to match the current module speed.\n");
break;
case ICE_AQC_HEALTH_STATUS_ERR_PARALLEL_FAULT:
- device_printf(dev, "A parallel fault was detected.\n");
+ device_printf(dev, "All configured link modes were attempted but failed to establish link.\n");
+ device_printf(dev, "The device will restart the process to establish link.\n");
device_printf(dev, "Possible Solution: Check link partner connection and configuration.\n");
break;
case ICE_AQC_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED:
@@ -10508,7 +10731,7 @@ ice_set_default_local_lldp_mib(struct ice_softc *sc)
struct ice_hw *hw = &sc->hw;
struct ice_port_info *pi;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
/* Set Local MIB can disrupt flow control settings for
* non-DCB-supported devices.
@@ -10585,7 +10808,7 @@ ice_sysctl_dump_dcbx_cfg(SYSCTL_HANDLER_ARGS)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
struct sbuf *sbuf;
- enum ice_status status;
+ int status;
u8 maxtcs, dcbx_status, is_sw_lldp;
UNREFERENCED_PARAMETER(oidp);
@@ -10620,7 +10843,7 @@ ice_sysctl_dump_dcbx_cfg(SYSCTL_HANDLER_ARGS)
}
status = ice_aq_get_cee_dcb_cfg(hw, &cee_cfg, NULL);
- if (status == ICE_SUCCESS)
+ if (!status)
dcbcfg->dcbx_mode = ICE_DCBX_MODE_CEE;
else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
dcbcfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
@@ -10703,7 +10926,7 @@ ice_sysctl_dump_vsi_cfg(SYSCTL_HANDLER_ARGS)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
struct sbuf *sbuf;
- enum ice_status status;
+ int status;
UNREFERENCED_PARAMETER(oidp);
UNREFERENCED_PARAMETER(arg2);
@@ -10715,7 +10938,7 @@ ice_sysctl_dump_vsi_cfg(SYSCTL_HANDLER_ARGS)
ctx.vsi_num = ice_get_hw_vsi_num(hw, sc->pf_vsi.idx);
status = ice_aq_get_vsi_params(hw, &ctx, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"Get VSI AQ call failed, err %s aq_err %s\n",
ice_status_str(status),
@@ -10762,6 +10985,520 @@ ice_sysctl_dump_vsi_cfg(SYSCTL_HANDLER_ARGS)
}
/**
+ * ice_get_tx_rx_equalizations -- read serdes tx rx equalization params
+ * @hw: pointer to the HW struct
+ * @serdes_num: represents the serdes number
+ * @ptr: structure to read all serdes parameter for given serdes
+ *
+ * returns all serdes equalization parameter supported per serdes number
+ */
+static int
+ice_get_tx_rx_equalizations(struct ice_hw *hw, u8 serdes_num,
+ struct ice_serdes_equalization *ptr)
+{
+ int err = 0;
+
+ if (!ptr)
+ return (EOPNOTSUPP);
+
+#define ICE_GET_PHY_EQUALIZATION(equ, dir, value) \
+ ice_aq_get_phy_equalization(hw, equ, dir, serdes_num, &(ptr->value))
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_PRE1,
+ ICE_AQC_OP_CODE_RX_EQU, rx_equalization_pre1);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_PRE2,
+ ICE_AQC_OP_CODE_RX_EQU, rx_equalization_pre2);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_POST1,
+ ICE_AQC_OP_CODE_RX_EQU, rx_equalization_post1);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_BFLF,
+ ICE_AQC_OP_CODE_RX_EQU, rx_equalization_bflf);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_BFHF,
+ ICE_AQC_OP_CODE_RX_EQU, rx_equalization_bfhf);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_RX_EQU_DRATE,
+ ICE_AQC_OP_CODE_RX_EQU, rx_equalization_drate);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_PRE1,
+ ICE_AQC_OP_CODE_TX_EQU, tx_equalization_pre1);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_PRE2,
+ ICE_AQC_OP_CODE_TX_EQU, tx_equalization_pre2);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_PRE3,
+ ICE_AQC_OP_CODE_TX_EQU, tx_equalization_pre3);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_ATTEN,
+ ICE_AQC_OP_CODE_TX_EQU, tx_equalization_atten);
+ if (err)
+ return err;
+
+ err = ICE_GET_PHY_EQUALIZATION(ICE_AQC_TX_EQU_POST1,
+ ICE_AQC_OP_CODE_TX_EQU, tx_equalization_post1);
+ if (err)
+ return err;
+
+ return (0);
+}
+
+/**
+ * ice_fec_counter_read - reads FEC stats from PHY
+ * @hw: pointer to the HW struct
+ * @receiver_id: pcsquad at registerlevel
+ * @reg_offset: register for the current request
+ * @output: pointer to the caller-supplied buffer to return requested fec stats
+ *
+ * Returns fec stats from phy
+ */
+static int
+ice_fec_counter_read(struct ice_hw *hw, u32 receiver_id, u32 reg_offset,
+ u16 *output)
+{
+ u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI);
+ struct ice_sbq_msg_input msg = {};
+ int err = 0;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_addr_low = ICE_LO_WORD(reg_offset);
+ msg.msg_addr_high = ICE_LO_DWORD(receiver_id);
+ msg.opcode = ice_sbq_msg_rd;
+ msg.dest_dev = rmn_0;
+
+ err = ice_sbq_rw_reg(hw, &msg, flag);
+ if (err) {
+ return err;
+ }
+ *output = ICE_LO_WORD(msg.data);
+ return (0);
+}
+
+/**
+ * ice_get_port_fec_stats - returns fec correctable, uncorrectable stats per pcsquad, pcsport
+ * @hw: pointer to the HW struct
+ * @pcs_quad: pcsquad for input port
+ * @pcs_port: pcsport for input port
+ * @fec_stats: buffer to hold fec statistics for given port
+ *
+ * Returns fec stats
+ */
+static int
+ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ struct ice_fec_stats_to_sysctl *fec_stats)
+{
+ u32 uncorr_low_reg = 0, uncorr_high_reg = 0;
+ u16 uncorr_low_val = 0, uncorr_high_val = 0;
+ u32 corr_low_reg = 0, corr_high_reg = 0;
+ u16 corr_low_val = 0, corr_high_val = 0;
+ u32 receiver_id = 0;
+ int err;
+
+ switch (pcs_port) {
+ case 0:
+ corr_low_reg = ICE_RS_FEC_CORR_LOW_REG_PORT0;
+ corr_high_reg = ICE_RS_FEC_CORR_HIGH_REG_PORT0;
+ uncorr_low_reg = ICE_RS_FEC_UNCORR_LOW_REG_PORT0;
+ uncorr_high_reg = ICE_RS_FEC_UNCORR_HIGH_REG_PORT0;
+ break;
+ case 1:
+ corr_low_reg = ICE_RS_FEC_CORR_LOW_REG_PORT1;
+ corr_high_reg = ICE_RS_FEC_CORR_HIGH_REG_PORT1;
+ uncorr_low_reg = ICE_RS_FEC_UNCORR_LOW_REG_PORT1;
+ uncorr_high_reg = ICE_RS_FEC_UNCORR_HIGH_REG_PORT1;
+ break;
+ case 2:
+ corr_low_reg = ICE_RS_FEC_CORR_LOW_REG_PORT2;
+ corr_high_reg = ICE_RS_FEC_CORR_HIGH_REG_PORT2;
+ uncorr_low_reg = ICE_RS_FEC_UNCORR_LOW_REG_PORT2;
+ uncorr_high_reg = ICE_RS_FEC_UNCORR_HIGH_REG_PORT2;
+ break;
+ case 3:
+ corr_low_reg = ICE_RS_FEC_CORR_LOW_REG_PORT3;
+ corr_high_reg = ICE_RS_FEC_CORR_HIGH_REG_PORT3;
+ uncorr_low_reg = ICE_RS_FEC_UNCORR_LOW_REG_PORT3;
+ uncorr_high_reg = ICE_RS_FEC_UNCORR_HIGH_REG_PORT3;
+ break;
+ default:
+ return (EINVAL);
+ }
+ if (pcs_quad == 0)
+ receiver_id = ICE_RS_FEC_RECEIVER_ID_PCS0; /* MTIP PCS Quad 0 -FEC */
+ else if (pcs_quad == 1)
+ receiver_id = ICE_RS_FEC_RECEIVER_ID_PCS1; /* MTIP PCS Quad 1 -FEC */
+ else
+ return (EINVAL);
+
+ err = ice_fec_counter_read(hw, receiver_id, corr_low_reg,
+ &corr_low_val);
+ if (err)
+ return err;
+
+ err = ice_fec_counter_read(hw, receiver_id, corr_high_reg,
+ &corr_high_val);
+ if (err)
+ return err;
+
+ err = ice_fec_counter_read(hw, receiver_id, uncorr_low_reg,
+ &uncorr_low_val);
+ if (err)
+ return err;
+
+ err = ice_fec_counter_read(hw, receiver_id, uncorr_high_reg,
+ &uncorr_high_val);
+ if (err)
+ return err;
+
+ fec_stats->fec_corr_cnt_low = corr_low_val;
+ fec_stats->fec_corr_cnt_high = corr_high_val;
+ fec_stats->fec_uncorr_cnt_low = uncorr_low_val;
+ fec_stats->fec_uncorr_cnt_high = uncorr_high_val;
+
+ return (0);
+}
+
+/**
+ * ice_is_serdes_muxed - returns whether serdes is muxed in hardware
+ * @hw: pointer to the HW struct
+ *
+ * Returns True : when serdes is muxed
+ * False: when serdes is not muxed
+ */
+static bool
+ice_is_serdes_muxed(struct ice_hw *hw)
+{
+ return (rd32(hw, 0xB81E0) & 0x4);
+}
+
+/**
+ * ice_get_maxspeed - Get the max speed for given lport
+ * @hw: pointer to the HW struct
+ * @lport: logical port for which max speed is requested
+ * @max_speed: return max speed for input lport
+ */
+static int
+ice_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {};
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ bool active_valid, pending_valid;
+ u8 active_idx, pending_idx;
+ int status;
+
+ status = ice_aq_get_port_options(hw, options, &option_count,
+ lport, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+
+ if (status || active_idx >= ICE_AQC_PORT_OPT_MAX) {
+ ice_debug(hw, ICE_DBG_PHY, "Port split read err: %d\n", status);
+ return (EIO);
+ }
+
+ if (active_valid) {
+ ice_debug(hw, ICE_DBG_PHY, "Active idx: %d\n", active_idx);
+ } else {
+ ice_debug(hw, ICE_DBG_PHY, "No valid Active option\n");
+ return (EINVAL);
+ }
+ *max_speed = options[active_idx].max_lane_speed;
+
+ return (0);
+}
+
+/**
+ * ice_update_port_topology - update port topology
+ * @lport: logical port for which physical info requested
+ * @port_topology: buffer to hold port topology
+ * @is_muxed: serdes is muxed in hardware
+ */
+static int
+ice_update_port_topology(u8 lport, struct ice_port_topology *port_topology,
+ bool is_muxed)
+{
+ switch (lport) {
+ case 0:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 0;
+ port_topology->primary_serdes_lane = 0;
+ break;
+ case 1:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 0;
+ if (is_muxed == true)
+ port_topology->primary_serdes_lane = 2;
+ else
+ port_topology->primary_serdes_lane = 4;
+ break;
+ case 2:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 1;
+ port_topology->primary_serdes_lane = 1;
+ break;
+ case 3:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 1;
+ if (is_muxed == true)
+ port_topology->primary_serdes_lane = 3;
+ else
+ port_topology->primary_serdes_lane = 5;
+ break;
+ case 4:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 2;
+ break;
+ case 5:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 6;
+ break;
+ case 6:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 3;
+ break;
+ case 7:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 7;
+ break;
+ default:
+ return (EINVAL);
+ }
+ return 0;
+}
+
+/**
+ * ice_get_port_topology - returns physical topology
+ * @hw: pointer to the HW struct
+ * @lport: logical port for which physical info requested
+ * @port_topology: buffer to hold port topology
+ *
+ * Returns the physical component associated with the Port like pcsquad, pcsport, serdesnumber
+ */
+static int
+ice_get_port_topology(struct ice_hw *hw, u8 lport,
+ struct ice_port_topology *port_topology)
+{
+ struct ice_aqc_get_link_topo cmd;
+ bool is_muxed = false;
+ u8 cage_type = 0;
+ u16 node_handle;
+ u8 ctx = 0;
+ int err;
+
+ if (!hw || !port_topology)
+ return (EINVAL);
+
+ if (hw->device_id >= ICE_DEV_ID_E810_XXV_BACKPLANE) {
+ port_topology->serdes_lane_count = 1;
+ if (lport == 0) {
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 0;
+ port_topology->primary_serdes_lane = 0;
+ } else if (lport == 1) {
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 0;
+ port_topology->primary_serdes_lane = 1;
+ } else {
+ return (EINVAL);
+ }
+ return (0);
+ }
+
+ memset(&cmd, 0, sizeof(cmd));
+ ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
+ ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
+ cmd.addr.topo_params.node_type_ctx = ctx;
+ cmd.addr.topo_params.index = 0;
+ cmd.addr.topo_params.lport_num = 0;
+ cmd.addr.topo_params.lport_num_valid = 0;
+
+ err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle);
+ if (err)
+ return (EINVAL);
+
+ is_muxed = ice_is_serdes_muxed(hw);
+
+ err = ice_update_port_topology(lport, port_topology, is_muxed);
+ if (err)
+ return err;
+
+ if (cage_type == 0x11 || /* SFP */
+ cage_type == 0x12) { /* SFP28 */
+ port_topology->serdes_lane_count = 1;
+ } else if (cage_type == 0x13 || /* QSFP */
+ cage_type == 0x14) { /* QSFP28 */
+ u8 max_speed = 0;
+
+ err = ice_get_maxspeed(hw, port_topology->primary_serdes_lane,
+ &max_speed);
+ if (err)
+ return err;
+
+ if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_M)
+ device_printf(ice_hw_to_dev(hw),
+ "%s: WARNING: reported max_lane_speed is N/A\n",
+ __func__);
+
+ if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G)
+ port_topology->serdes_lane_count = 4;
+ else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G)
+ port_topology->serdes_lane_count = 2;
+ else
+ port_topology->serdes_lane_count = 1;
+ } else
+ return (EINVAL);
+
+ ice_debug(hw, ICE_DBG_PHY, "%s: Port Topology (lport %d):\n",
+ __func__, lport);
+ ice_debug(hw, ICE_DBG_PHY, "serdes lane count %d\n",
+ port_topology->serdes_lane_count);
+ ice_debug(hw, ICE_DBG_PHY, "pcs quad select %d\n",
+ port_topology->pcs_quad_select);
+ ice_debug(hw, ICE_DBG_PHY, "pcs port %d\n",
+ port_topology->pcs_port);
+ ice_debug(hw, ICE_DBG_PHY, "primary serdes lane %d\n",
+ port_topology->primary_serdes_lane);
+
+ return (0);
+}
+
+/**
+ * ice_sysctl_dump_phy_stats - print PHY stats
+ * @oidp: sysctl oid structure
+ * @arg1: pointer to private data structure
+ * @arg2: unused
+ * @req: sysctl request pointer
+ */
+static int
+ice_sysctl_dump_phy_stats(SYSCTL_HANDLER_ARGS)
+{
+ struct ice_regdump_to_sysctl ice_prv_regs_buf = {};
+ struct ice_softc *sc = (struct ice_softc *)arg1;
+ struct ice_port_topology port_topology;
+ struct ice_hw *hw = &sc->hw;
+ struct ice_port_info *pi;
+ device_t dev = sc->dev;
+ u8 serdes_num = 0;
+ unsigned int i;
+ int err = 0;
+ struct sbuf *sbuf;
+
+ pi = hw->port_info;
+
+ if (!pi) {
+ device_printf(dev, "Port info structure is null\n");
+ return (EINVAL);
+ }
+
+ UNREFERENCED_PARAMETER(oidp);
+ UNREFERENCED_PARAMETER(arg2);
+ UNREFERENCED_PARAMETER(req);
+
+ if (ice_driver_is_detaching(sc))
+ return (ESHUTDOWN);
+
+ if (ice_get_port_topology(hw, pi->lport, &port_topology) != 0) {
+ device_printf(dev,
+ "Extended register dump failed for Lport %d\n",
+ pi->lport);
+ return (EIO);
+ }
+
+ if (port_topology.serdes_lane_count > ICE_MAX_SERDES_LANE_COUNT) {
+ device_printf(dev,
+ "Extended register dump failed: Lport %d Serdes count %d\n",
+ pi->lport,
+ port_topology.serdes_lane_count);
+ return (EINVAL);
+ }
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ /* Get serdes equalization parameter for available serdes */
+ for (i = 0; i < port_topology.serdes_lane_count; i++) {
+ serdes_num = port_topology.primary_serdes_lane + i;
+ err = ice_get_tx_rx_equalizations(hw, serdes_num,
+ &(ice_prv_regs_buf.equalization[i]));
+ if (err) {
+ device_printf(dev,
+ "Serdes equalization get failed Lport %d Serdes %d Err %d\n",
+ pi->lport,serdes_num, err);
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+ return (EIO);
+ }
+ sbuf_printf(sbuf, "\nSerdes lane: %d\n", i);
+ sbuf_printf(sbuf, "RX PRE1 = %d\n",
+ ice_prv_regs_buf.equalization[i].rx_equalization_pre1);
+ sbuf_printf(sbuf, "RX PRE2 = %d\n",
+ (s16)ice_prv_regs_buf.equalization[i].rx_equalization_pre2);
+ sbuf_printf(sbuf, "RX POST1 = %d\n",
+ ice_prv_regs_buf.equalization[i].rx_equalization_post1);
+ sbuf_printf(sbuf, "RX BFLF = %d\n",
+ ice_prv_regs_buf.equalization[i].rx_equalization_bflf);
+ sbuf_printf(sbuf, "RX BFHF = %d\n",
+ ice_prv_regs_buf.equalization[i].rx_equalization_bfhf);
+ sbuf_printf(sbuf, "RX DRATE = %d\n",
+ (s16)ice_prv_regs_buf.equalization[i].rx_equalization_drate);
+ sbuf_printf(sbuf, "TX PRE1 = %d\n",
+ ice_prv_regs_buf.equalization[i].tx_equalization_pre1);
+ sbuf_printf(sbuf, "TX PRE2 = %d\n",
+ ice_prv_regs_buf.equalization[i].tx_equalization_pre2);
+ sbuf_printf(sbuf, "TX PRE3 = %d\n",
+ ice_prv_regs_buf.equalization[i].tx_equalization_pre3);
+ sbuf_printf(sbuf, "TX POST1 = %d\n",
+ ice_prv_regs_buf.equalization[i].tx_equalization_post1);
+ sbuf_printf(sbuf, "TX ATTEN = %d\n",
+ ice_prv_regs_buf.equalization[i].tx_equalization_atten);
+ }
+
+ /* Get fec correctable , uncorrectable counter */
+ err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select,
+ port_topology.pcs_port,
+ &(ice_prv_regs_buf.stats));
+ if (err) {
+ device_printf(dev, "failed to get FEC stats Lport %d Err %d\n",
+ pi->lport, err);
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+ return (EIO);
+ }
+
+ sbuf_printf(sbuf, "\nRS FEC Corrected codeword count = %d\n",
+ ((u32)ice_prv_regs_buf.stats.fec_corr_cnt_high << 16) |
+ ice_prv_regs_buf.stats.fec_corr_cnt_low);
+ sbuf_printf(sbuf, "RS FEC Uncorrected codeword count = %d\n",
+ ((u32)ice_prv_regs_buf.stats.fec_uncorr_cnt_high << 16) |
+ ice_prv_regs_buf.stats.fec_uncorr_cnt_low);
+
+ /* Finish */
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return (0);
+}
+
+/**
* ice_ets_str_to_tbl - Parse string into ETS table
* @str: input string to parse
* @table: output eight values used for ETS values
@@ -10844,7 +11581,7 @@ ice_sysctl_query_port_ets(SYSCTL_HANDLER_ARGS)
struct ice_port_info *pi;
device_t dev = sc->dev;
struct sbuf *sbuf;
- enum ice_status status;
+ int status;
int i = 0;
UNREFERENCED_PARAMETER(oidp);
@@ -10856,7 +11593,7 @@ ice_sysctl_query_port_ets(SYSCTL_HANDLER_ARGS)
pi = hw->port_info;
status = ice_aq_query_port_ets(pi, &port_ets, sizeof(port_ets), NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"Query Port ETS AQ call failed, err %s aq_err %s\n",
ice_status_str(status),
@@ -10915,7 +11652,7 @@ ice_sysctl_dscp2tc_map(SYSCTL_HANDLER_ARGS)
struct ice_port_info *pi;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
struct sbuf *sbuf;
int ret;
@@ -10962,7 +11699,8 @@ ice_sysctl_dscp2tc_map(SYSCTL_HANDLER_ARGS)
* needs to be done for ETS settings, so this function can be re-used
* for that purpose.
*/
- ret = ice_ets_str_to_tbl(dscp_user_buf, new_dscp_table_seg, 8);
+ ret = ice_ets_str_to_tbl(dscp_user_buf, new_dscp_table_seg,
+ ICE_MAX_TRAFFIC_CLASS - 1);
if (ret) {
device_printf(dev, "%s: Could not parse input DSCP2TC table: %s\n",
__func__, dscp_user_buf);
@@ -11000,7 +11738,7 @@ ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
struct ice_debug_dump_cmd *ddc;
- enum ice_status status;
+ int status;
int err = 0;
/* Returned arguments from the Admin Queue */
@@ -11101,7 +11839,7 @@ ice_handle_debug_dump_ioctl(struct ice_softc *sc, struct ifdrv *ifd)
aq_error:
/* Convert private status to an error code for proper ioctl response */
switch (status) {
- case ICE_SUCCESS:
+ case 0:
err = (0);
break;
case ICE_ERR_NO_MEMORY:
@@ -11199,7 +11937,7 @@ ice_sysctl_temperature(SYSCTL_HANDLER_ARGS)
struct ice_softc *sc = (struct ice_softc *)arg1;
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
UNREFERENCED_PARAMETER(oidp);
UNREFERENCED_PARAMETER(arg2);
@@ -11209,7 +11947,7 @@ ice_sysctl_temperature(SYSCTL_HANDLER_ARGS)
status = ice_aq_get_sensor_reading(hw, ICE_AQC_INT_TEMP_SENSOR,
ICE_AQC_INT_TEMP_FORMAT, &resp, NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
device_printf(dev,
"Get Sensor Reading AQ call failed, err %s aq_err %s\n",
ice_status_str(status),
diff --git a/sys/dev/ice/ice_lib.h b/sys/dev/ice/ice_lib.h
index cfd848d370bb..640bdf8fed7b 100644
--- a/sys/dev/ice/ice_lib.h
+++ b/sys/dev/ice/ice_lib.h
@@ -40,6 +40,9 @@
#ifndef _ICE_LIB_H_
#define _ICE_LIB_H_
+/* include kernel options first */
+#include "ice_opts.h"
+
#include <sys/types.h>
#include <sys/bus.h>
#include <sys/rman.h>
@@ -155,6 +158,7 @@ struct ice_bar_info {
#define ICE_MAX_TSO_HDR_SEGS 3
#define ICE_MSIX_BAR 3
+#define ICE_MAX_MSIX_VECTORS (GLINT_DYN_CTL_MAX_INDEX + 1)
#define ICE_DEFAULT_DESC_COUNT 1024
#define ICE_MAX_DESC_COUNT 8160
@@ -287,6 +291,12 @@ struct ice_bar_info {
#define ICE_APPLY_FEC_FC (ICE_APPLY_FEC | ICE_APPLY_FC)
#define ICE_APPLY_LS_FEC_FC (ICE_APPLY_LS_FEC | ICE_APPLY_FC)
+/*
+ * Mask of valid flags that can be used as an input for the
+ * advertise_speed sysctl.
+ */
+#define ICE_SYSCTL_SPEEDS_VALID_RANGE 0xFFF
+
/**
* @enum ice_dyn_idx_t
* @brief Dynamic Control ITR indexes
@@ -303,7 +313,7 @@ enum ice_dyn_idx_t {
ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
};
-/* By convenction ITR0 is used for RX, and ITR1 is used for TX */
+/* By convention ITR0 is used for RX, and ITR1 is used for TX */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
@@ -313,6 +323,28 @@ enum ice_dyn_idx_t {
#define ICE_DFLT_TX_ITR 50
#define ICE_DFLT_RX_ITR 50
+/* RS FEC register values */
+#define ICE_RS_FEC_REG_SHIFT 2
+#define ICE_RS_FEC_RECV_ID_SHIFT 4
+#define ICE_RS_FEC_CORR_LOW_REG_PORT0 (0x02 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_CORR_HIGH_REG_PORT0 (0x03 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_LOW_REG_PORT0 (0x04 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_HIGH_REG_PORT0 (0x05 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_CORR_LOW_REG_PORT1 (0x42 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_CORR_HIGH_REG_PORT1 (0x43 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_LOW_REG_PORT1 (0x44 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_HIGH_REG_PORT1 (0x45 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_CORR_LOW_REG_PORT2 (0x4A << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_CORR_HIGH_REG_PORT2 (0x4B << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_LOW_REG_PORT2 (0x4C << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_HIGH_REG_PORT2 (0x4D << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_CORR_LOW_REG_PORT3 (0x52 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_CORR_HIGH_REG_PORT3 (0x53 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_LOW_REG_PORT3 (0x54 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_UNCORR_HIGH_REG_PORT3 (0x55 << ICE_RS_FEC_REG_SHIFT)
+#define ICE_RS_FEC_RECEIVER_ID_PCS0 (0x33 << ICE_RS_FEC_RECV_ID_SHIFT)
+#define ICE_RS_FEC_RECEIVER_ID_PCS1 (0x34 << ICE_RS_FEC_RECV_ID_SHIFT)
+
/**
* ice_itr_to_reg - Convert an ITR setting into its register equivalent
* @hw: The device HW structure
@@ -367,6 +399,16 @@ enum ice_rx_dtype {
#define ICE_I2C_MAX_RETRIES 10
/*
+ * The Get Link Status AQ command and other link commands can return
+ * EAGAIN, indicating that the FW Link Management engine is busy.
+ * Define the number of times that the driver should retry sending these
+ * commands and the amount of time it should wait between those retries
+ * (in milliseconds) here.
+ */
+#define ICE_LINK_AQ_MAX_RETRIES 10
+#define ICE_LINK_RETRY_DELAY 17
+
+/*
* The Start LLDP Agent AQ command will fail if it's sent too soon after
* the LLDP agent is stopped. The period between the stop and start
* commands must currently be at least 2 seconds.
@@ -374,10 +416,11 @@ enum ice_rx_dtype {
#define ICE_START_LLDP_RETRY_WAIT (2 * hz)
/*
- * Only certain cluster IDs are valid for the FW debug dump functionality,
- * so define a mask of those here.
+ * Only certain clusters are valid for certain devices for the FW debug dump
+ * functionality, so define masks of those here.
*/
-#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK 0x4001AF
+#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK_E810 0x4001AF
+#define ICE_FW_DEBUG_DUMP_VALID_CLUSTER_MASK_E830 0x1AF
struct ice_softc;
@@ -428,6 +471,7 @@ struct tx_stats {
u64 tx_bytes;
u64 tx_packets;
u64 mss_too_small;
+ u64 tso;
u64 cso[ICE_CSO_STAT_TX_COUNT];
};
@@ -485,6 +529,9 @@ struct ice_pf_sw_stats {
/* # of detected MDD events for Tx and Rx */
u32 tx_mdd_count;
u32 rx_mdd_count;
+
+ u64 rx_roc_error; /* port oversize packet stats, error_cnt \
+ from GLV_REPC VSI register + RxOversize */
};
/**
@@ -564,6 +611,10 @@ struct ice_vsi {
u16 mirror_src_vsi;
u16 rule_mir_ingress;
u16 rule_mir_egress;
+
+#ifdef PCI_IOV
+ u8 vf_num; /* Index of owning VF, if applicable */
+#endif
};
/**
@@ -581,6 +632,58 @@ struct ice_debug_dump_cmd {
};
/**
+ * @struct ice_serdes_equalization
+ * @brief serdes equalization info
+ */
+struct ice_serdes_equalization {
+ int rx_equalization_pre1;
+ int rx_equalization_pre2;
+ int rx_equalization_post1;
+ int rx_equalization_bflf;
+ int rx_equalization_bfhf;
+ int rx_equalization_drate;
+ int tx_equalization_pre1;
+ int tx_equalization_pre2;
+ int tx_equalization_pre3;
+ int tx_equalization_atten;
+ int tx_equalization_post1;
+};
+
+/**
+ * @struct ice_fec_stats_to_sysctl
+ * @brief FEC stats register value of port
+ */
+struct ice_fec_stats_to_sysctl {
+ u16 fec_corr_cnt_low;
+ u16 fec_corr_cnt_high;
+ u16 fec_uncorr_cnt_low;
+ u16 fec_uncorr_cnt_high;
+};
+
+#define ICE_MAX_SERDES_LANE_COUNT 4
+
+/**
+ * @struct ice_regdump_to_sysctl
+ * @brief PHY stats of port
+ */
+struct ice_regdump_to_sysctl {
+ /* A multilane port can have max 4 serdes */
+ struct ice_serdes_equalization equalization[ICE_MAX_SERDES_LANE_COUNT];
+ struct ice_fec_stats_to_sysctl stats;
+};
+
+/**
+ * @struct ice_port_topology
+ * @brief Port topology from lport i.e. serdes mapping, pcsquad, macport, cage
+ */
+struct ice_port_topology {
+ u16 pcs_port;
+ u16 primary_serdes_lane;
+ u16 serdes_lane_count;
+ u16 pcs_quad_select;
+};
+
+/**
* @enum ice_state
* @brief Driver state flags
*
@@ -612,6 +715,7 @@ enum ice_state {
ICE_STATE_FIRST_INIT_LINK,
ICE_STATE_DO_CREATE_MIRR_INTFC,
ICE_STATE_DO_DESTROY_MIRR_INTFC,
+ ICE_STATE_PHY_FW_INIT_PENDING,
/* This entry must be last */
ICE_STATE_LAST,
};
@@ -713,7 +817,7 @@ struct ice_str_buf {
};
struct ice_str_buf _ice_aq_str(enum ice_aq_err aq_err);
-struct ice_str_buf _ice_status_str(enum ice_status status);
+struct ice_str_buf _ice_status_str(int status);
struct ice_str_buf _ice_err_str(int err);
struct ice_str_buf _ice_fltr_flag_str(u16 flag);
struct ice_str_buf _ice_log_sev_str(u8 log_level);
@@ -830,7 +934,7 @@ void ice_free_bar(device_t dev, struct ice_bar_info *bar);
void ice_set_ctrlq_len(struct ice_hw *hw);
void ice_release_vsi(struct ice_vsi *vsi);
struct ice_vsi *ice_alloc_vsi(struct ice_softc *sc, enum ice_vsi_type type);
-int ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues,
+void ice_alloc_vsi_qmap(struct ice_vsi *vsi, const int max_tx_queues,
const int max_rx_queues);
void ice_free_vsi_qmaps(struct ice_vsi *vsi);
int ice_initialize_vsi(struct ice_vsi *vsi);
@@ -838,7 +942,7 @@ void ice_deinit_vsi(struct ice_vsi *vsi);
uint64_t ice_aq_speed_to_rate(struct ice_port_info *pi);
int ice_get_phy_type_low(uint64_t phy_type_low);
int ice_get_phy_type_high(uint64_t phy_type_high);
-enum ice_status ice_add_media_types(struct ice_softc *sc, struct ifmedia *media);
+int ice_add_media_types(struct ice_softc *sc, struct ifmedia *media);
void ice_configure_rxq_interrupt(struct ice_hw *hw, u16 rxqid, u16 vector, u8 itr_idx);
void ice_configure_all_rxq_interrupts(struct ice_vsi *vsi);
void ice_configure_txq_interrupt(struct ice_hw *hw, u16 txqid, u16 vector, u8 itr_idx);
@@ -864,15 +968,15 @@ void ice_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
void ice_add_vsi_sysctls(struct ice_vsi *vsi);
void ice_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid *parent,
- struct ice_hw_port_stats *stats);
+ struct ice_softc *sc);
void ice_configure_misc_interrupts(struct ice_softc *sc);
int ice_sync_multicast_filters(struct ice_softc *sc);
-enum ice_status ice_add_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid,
+int ice_add_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid,
u16 length);
-enum ice_status ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
-enum ice_status ice_remove_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid,
+int ice_add_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
+int ice_remove_vlan_hw_filters(struct ice_vsi *vsi, u16 *vid,
u16 length);
-enum ice_status ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
+int ice_remove_vlan_hw_filter(struct ice_vsi *vsi, u16 vid);
void ice_add_vsi_tunables(struct ice_vsi *vsi, struct sysctl_oid *parent);
void ice_del_vsi_sysctl_ctx(struct ice_vsi *vsi);
void ice_add_device_tunables(struct ice_softc *sc);
@@ -887,7 +991,7 @@ void ice_add_txq_sysctls(struct ice_tx_queue *txq);
void ice_add_rxq_sysctls(struct ice_rx_queue *rxq);
int ice_config_rss(struct ice_vsi *vsi);
void ice_clean_all_vsi_rss_cfg(struct ice_softc *sc);
-enum ice_status ice_load_pkg_file(struct ice_softc *sc);
+int ice_load_pkg_file(struct ice_softc *sc);
void ice_log_pkg_init(struct ice_softc *sc, enum ice_ddp_state pkg_status);
uint64_t ice_get_ifnet_counter(struct ice_vsi *vsi, ift_counter counter);
void ice_save_pci_info(struct ice_hw *hw, device_t dev);
diff --git a/sys/dev/ice/ice_nvm.c b/sys/dev/ice/ice_nvm.c
index 5234cb265f9b..ff30adfe8fa7 100644
--- a/sys/dev/ice/ice_nvm.c
+++ b/sys/dev/ice/ice_nvm.c
@@ -46,7 +46,7 @@
*
* Read the NVM using the admin queue commands (0x0701)
*/
-enum ice_status
+int
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd)
@@ -92,14 +92,14 @@ ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
* Returns a status code on failure. Note that the data pointer may be
* partially updated if some reads succeed before a failure.
*/
-enum ice_status
+int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram)
{
- enum ice_status status;
u32 inlen = *length;
u32 bytes_read = 0;
bool last_cmd;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -157,7 +157,7 @@ ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
*
* Update the NVM using the admin queue commands (0x0703)
*/
-enum ice_status
+int
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd)
@@ -198,12 +198,11 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
*
* Erase the NVM sector using the admin queue commands (0x0702)
*/
-enum ice_status
-ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
+int ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
- enum ice_status status;
+ int status;
__le16 len;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -240,13 +239,13 @@ ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd)
*
* Reads single or multiple feature/field ID and data (0x0704)
*/
-enum ice_status
+int
ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data,
u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd)
{
struct ice_aqc_nvm_cfg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -275,7 +274,7 @@ ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data,
*
* Writes single or multiple feature/field ID and data (0x0705)
*/
-enum ice_status
+int
ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
u16 elem_count, struct ice_sq_cd *cd)
{
@@ -301,7 +300,7 @@ ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
* @offset: offset in words from module start
* @words: number of words to access
*/
-static enum ice_status
+static int
ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
{
if ((offset + words) > hw->flash.sr_words) {
@@ -323,7 +322,7 @@ ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -334,11 +333,11 @@ ice_check_sr_access_params(struct ice_hw *hw, u32 offset, u16 words)
*
* Reads one 16 bit word from the Shadow RAM using ice_read_flat_nvm.
*/
-enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
+int ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
{
u32 bytes = sizeof(u16);
- enum ice_status status;
__le16 data_local;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -352,7 +351,7 @@ enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
return status;
*data = LE16_TO_CPU(data_local);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -365,11 +364,11 @@ enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
*
* Writes a 16 bit words buffer to the Shadow RAM using the admin command.
*/
-static enum ice_status
+static int
ice_write_sr_aq(struct ice_hw *hw, u32 offset, u16 words, __le16 *data,
bool last_command)
{
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -391,11 +390,11 @@ ice_write_sr_aq(struct ice_hw *hw, u32 offset, u16 words, __le16 *data,
* Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
* taken before reading the buffer and later released.
*/
-static enum ice_status
+static int
ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
{
u32 bytes = *words * 2, i;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -421,12 +420,11 @@ ice_read_sr_buf_aq(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
*
* This function will request NVM ownership.
*/
-enum ice_status
-ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
+int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
if (hw->flash.blank_nvm_mode)
- return ICE_SUCCESS;
+ return 0;
return ice_acquire_res(hw, ICE_NVM_RES_ID, access, ICE_NVM_TIMEOUT);
}
@@ -532,11 +530,11 @@ static u32 ice_get_flash_bank_offset(struct ice_hw *hw, enum ice_bank_select ban
* hw->flash.banks data being setup by ice_determine_active_flash_banks()
* during initialization.
*/
-static enum ice_status
+static int
ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
u32 offset, u8 *data, u32 length)
{
- enum ice_status status;
+ int status;
u32 start;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -569,11 +567,11 @@ ice_read_flash_module(struct ice_hw *hw, enum ice_bank_select bank, u16 module,
* Read the specified word from the active NVM module. This includes the CSS
* header at the start of the NVM module.
*/
-static enum ice_status
+static int
ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
__le16 data_local;
+ int status;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_NVM_BANK_PTR, offset * sizeof(u16),
(_FORCE_ u8 *)&data_local, sizeof(u16));
@@ -592,13 +590,13 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
* Read the CSS header length from the NVM CSS header and add the Authentication
* header size, and then convert to words.
*/
-static enum ice_status
+static int
ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
u32 *hdr_len)
{
u16 hdr_len_l, hdr_len_h;
- enum ice_status status;
u32 hdr_len_dword;
+ int status;
status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L,
&hdr_len_l);
@@ -616,7 +614,7 @@ ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
*hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -629,11 +627,11 @@ ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
* Read the specified word from the copy of the Shadow RAM found in the
* specified NVM module.
*/
-static enum ice_status
+static int
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
u32 hdr_len;
+ int status;
status = ice_get_nvm_css_hdr_len(hw, bank, &hdr_len);
if (status)
@@ -655,11 +653,11 @@ ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u
* Note that unlike the NVM module, the CSS data is stored at the end of the
* module instead of at the beginning.
*/
-static enum ice_status
+static int
ice_read_orom_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
__le16 data_local;
+ int status;
status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, offset * sizeof(u16),
(_FORCE_ u8 *)&data_local, sizeof(u16));
@@ -678,11 +676,11 @@ ice_read_orom_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u
*
* Read a word from the specified netlist bank.
*/
-static enum ice_status
+static int
ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- enum ice_status status;
__le16 data_local;
+ int status;
status = ice_read_flash_module(hw, bank, ICE_SR_NETLIST_BANK_PTR, offset * sizeof(u16),
(_FORCE_ u8 *)&data_local, sizeof(u16));
@@ -700,9 +698,9 @@ ice_read_netlist_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset
*
* Reads one 16 bit word from the Shadow RAM using the ice_read_sr_word_aq.
*/
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
+int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
{
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (!status) {
@@ -713,6 +711,8 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
return status;
}
+#define check_add_overflow __builtin_add_overflow
+
/**
* ice_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
* @hw: pointer to hardware structure
@@ -724,56 +724,71 @@ enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data)
* Area (PFA) and returns the TLV pointer and length. The caller can
* use these to read the variable length TLV value.
*/
-enum ice_status
+int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
- enum ice_status status;
- u16 pfa_len, pfa_ptr;
- u16 next_tlv;
+ u16 pfa_len, pfa_ptr, next_tlv, max_tlv;
+ int status;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_INIT, "Preserved Field Array pointer.\n");
return status;
}
status = ice_read_sr_word(hw, pfa_ptr, &pfa_len);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
return status;
}
- /* Starting with first TLV after PFA length, iterate through the list
+
+ if (check_add_overflow(pfa_ptr, (u16)(pfa_len - 1), &max_tlv)) {
+ ice_debug(hw, ICE_DBG_INIT, "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n",
+ pfa_ptr, pfa_len);
+ return ICE_ERR_INVAL_SIZE;
+ }
+
+ /* The Preserved Fields Area contains a sequence of TLVs which define
+ * its contents. The PFA length includes all of the TLVs, plus its
+ * initial length word itself, *and* one final word at the end of all
+ * of the TLVs.
+ *
+ * Starting with first TLV after PFA length, iterate through the list
* of TLVs to find the requested one.
*/
next_tlv = pfa_ptr + 1;
- while (next_tlv < pfa_ptr + pfa_len) {
+ while (next_tlv < max_tlv) {
u16 tlv_sub_module_type;
u16 tlv_len;
/* Read TLV type */
- status = ice_read_sr_word(hw, next_tlv, &tlv_sub_module_type);
- if (status != ICE_SUCCESS) {
+ status = ice_read_sr_word(hw, (u16)next_tlv,
+ &tlv_sub_module_type);
+ if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV type.\n");
break;
}
/* Read TLV length */
- status = ice_read_sr_word(hw, next_tlv + 1, &tlv_len);
- if (status != ICE_SUCCESS) {
+ status = ice_read_sr_word(hw, (u16)(next_tlv + 1), &tlv_len);
+ if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read TLV length.\n");
break;
}
if (tlv_sub_module_type == module_type) {
if (tlv_len) {
- *module_tlv = next_tlv;
+ *module_tlv = (u16)next_tlv;
*module_tlv_len = tlv_len;
- return ICE_SUCCESS;
+ return 0;
}
return ICE_ERR_INVAL_SIZE;
}
- /* Check next TLV, i.e. current TLV pointer + length + 2 words
- * (for current TLV's type and length)
- */
- next_tlv = next_tlv + tlv_len + 2;
+
+ if (check_add_overflow(next_tlv, (u16)2, &next_tlv) ||
+ check_add_overflow(next_tlv, tlv_len, &next_tlv)) {
+ ice_debug(hw, ICE_DBG_INIT, "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n",
+ tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len);
+ return ICE_ERR_INVAL_SIZE;
+ }
}
/* Module does not exist */
return ICE_ERR_DOES_NOT_EXIST;
@@ -787,24 +802,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
*
* Reads the part number string from the NVM.
*/
-enum ice_status
-ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
+int ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
{
u16 pba_tlv, pba_tlv_len;
- enum ice_status status;
u16 pba_word, pba_size;
+ int status;
u16 i;
status = ice_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
ICE_SR_PBA_BLOCK_PTR);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block TLV.\n");
return status;
}
/* pba_size is the next word */
status = ice_read_sr_word(hw, (pba_tlv + 2), &pba_size);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Section size.\n");
return status;
}
@@ -825,7 +839,7 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
for (i = 0; i < pba_size; i++) {
status = ice_read_sr_word(hw, (pba_tlv + 2 + 1) + i, &pba_word);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read PBA Block word %d.\n", i);
return status;
}
@@ -847,10 +861,10 @@ ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size)
* Read the security revision out of the CSS header of the active NVM module
* bank.
*/
-static enum ice_status ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev)
+static int ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev)
{
- enum ice_status status;
u16 srev_l, srev_h;
+ int status;
status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_SREV_L, &srev_l);
if (status)
@@ -862,7 +876,7 @@ static enum ice_status ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select
*srev = srev_h << 16 | srev_l;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -874,11 +888,11 @@ static enum ice_status ice_get_nvm_srev(struct ice_hw *hw, enum ice_bank_select
* Read the NVM EETRACK ID and map version of the main NVM image bank, filling
* in the NVM info structure.
*/
-static enum ice_status
+static int
ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nvm_info *nvm)
{
u16 eetrack_lo, eetrack_hi, ver;
- enum ice_status status;
+ int status;
status = ice_read_nvm_sr_copy(hw, bank, ICE_SR_NVM_DEV_STARTER_VER, &ver);
if (status) {
@@ -906,7 +920,7 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv
if (status)
ice_debug(hw, ICE_DBG_NVM, "Failed to read NVM security revision.\n");
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -918,7 +932,7 @@ ice_get_nvm_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_nv
* inactive NVM bank. Used to access version data for a pending update that
* has not yet been activated.
*/
-enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
+int ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm)
{
return ice_get_nvm_ver_info(hw, ICE_INACTIVE_FLASH_BANK, nvm);
}
@@ -932,13 +946,13 @@ enum ice_status ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info
* Read the security revision out of the CSS header of the active OROM module
* bank.
*/
-static enum ice_status ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev)
+static int ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select bank, u32 *srev)
{
u32 orom_size_word = hw->flash.banks.orom_size / 2;
- enum ice_status status;
u16 srev_l, srev_h;
u32 css_start;
u32 hdr_len;
+ int status;
status = ice_get_nvm_css_hdr_len(hw, bank, &hdr_len);
if (status)
@@ -964,7 +978,7 @@ static enum ice_status ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select
*srev = srev_h << 16 | srev_l;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -976,13 +990,14 @@ static enum ice_status ice_get_orom_srev(struct ice_hw *hw, enum ice_bank_select
* Searches through the Option ROM flash contents to locate the CIVD data for
* the image.
*/
-static enum ice_status
+static int
ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_orom_civd_info *civd)
{
- u8 *orom_data;
- enum ice_status status;
+ struct ice_orom_civd_info civd_data_section;
+ int status;
u32 offset;
+ u32 tmp;
/* The CIVD section is located in the Option ROM aligned to 512 bytes.
* The first 4 bytes must contain the ASCII characters "$CIV".
@@ -993,38 +1008,37 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
* usually somewhere in the middle of the bank. We need to scan the
* Option ROM bank to locate it.
*
- * It's significantly faster to read the entire Option ROM up front
- * using the maximum page size, than to read each possible location
- * with a separate firmware command.
*/
- orom_data = (u8 *)ice_calloc(hw, hw->flash.banks.orom_size, sizeof(u8));
- if (!orom_data)
- return ICE_ERR_NO_MEMORY;
-
- status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR, 0,
- orom_data, hw->flash.banks.orom_size);
- if (status) {
- ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
- goto exit_error;
- }
/* Scan the memory buffer to locate the CIVD data section */
for (offset = 0; (offset + 512) <= hw->flash.banks.orom_size; offset += 512) {
- struct ice_orom_civd_info *tmp;
u8 sum = 0, i;
- tmp = (struct ice_orom_civd_info *)&orom_data[offset];
+ status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR,
+ offset, (u8 *)&tmp, sizeof(tmp));
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Unable to read Option ROM data\n");
+ return status;
+ }
/* Skip forward until we find a matching signature */
- if (memcmp("$CIV", tmp->signature, sizeof(tmp->signature)) != 0)
+ if (memcmp("$CIV", &tmp, sizeof(tmp)) != 0)
continue;
ice_debug(hw, ICE_DBG_NVM, "Found CIVD section at offset %u\n",
offset);
+ status = ice_read_flash_module(hw, bank, ICE_SR_1ST_OROM_BANK_PTR,
+ offset, (u8 *)&civd_data_section,
+ sizeof(civd_data_section));
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Unable to read CIVD data\n");
+ goto exit_error;
+ }
+
/* Verify that the simple checksum is zero */
- for (i = 0; i < sizeof(*tmp); i++)
- sum += ((u8 *)tmp)[i];
+ for (i = 0; i < sizeof(civd_data_section); i++)
+ sum += ((u8 *)&civd_data_section)[i];
if (sum) {
ice_debug(hw, ICE_DBG_NVM, "Found CIVD data with invalid checksum of %u\n",
@@ -1033,16 +1047,15 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
goto exit_error;
}
- *civd = *tmp;
- ice_free(hw, orom_data);
- return ICE_SUCCESS;
+ *civd = civd_data_section;
+
+ return 0;
}
status = ICE_ERR_NVM;
ice_debug(hw, ICE_DBG_NVM, "Unable to locate CIVD data within the Option ROM\n");
exit_error:
- ice_free(hw, orom_data);
return status;
}
@@ -1055,12 +1068,12 @@ exit_error:
* Read Option ROM version and security revision from the Option ROM flash
* section.
*/
-static enum ice_status
+static int
ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_orom_info *orom)
{
struct ice_orom_civd_info civd;
- enum ice_status status;
u32 combo_ver;
+ int status;
status = ice_get_orom_civd_data(hw, bank, &civd);
if (status) {
@@ -1080,7 +1093,7 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o
return status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1092,7 +1105,7 @@ ice_get_orom_ver_info(struct ice_hw *hw, enum ice_bank_select bank, struct ice_o
* section of flash. Used to access version data for a pending update that has
* not yet been activated.
*/
-enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
+int ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom)
{
return ice_get_orom_ver_info(hw, ICE_INACTIVE_FLASH_BANK, orom);
}
@@ -1107,13 +1120,13 @@ enum ice_status ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_inf
* Topology section to find the Netlist ID block and extract the relevant
* information into the netlist version structure.
*/
-static enum ice_status
+static int
ice_get_netlist_info(struct ice_hw *hw, enum ice_bank_select bank,
struct ice_netlist_info *netlist)
{
u16 module_id, length, node_count, i;
- enum ice_status status;
u16 *id_blk;
+ int status;
status = ice_read_netlist_module(hw, bank, ICE_NETLIST_TYPE_OFFSET, &module_id);
if (status)
@@ -1181,7 +1194,7 @@ exit_error:
*
* Get the netlist version information
*/
-enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist)
+int ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_info *netlist)
{
return ice_get_netlist_info(hw, ICE_ACTIVE_FLASH_BANK, netlist);
}
@@ -1195,7 +1208,7 @@ enum ice_status ice_get_netlist_ver_info(struct ice_hw *hw, struct ice_netlist_i
* extract version data of a pending flash update in order to display the
* version data.
*/
-enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
+int ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist)
{
return ice_get_netlist_info(hw, ICE_INACTIVE_FLASH_BANK, netlist);
}
@@ -1208,10 +1221,10 @@ enum ice_status ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netli
* the actual size is smaller. Use bisection to determine the accessible size
* of flash memory.
*/
-static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
+static int ice_discover_flash_size(struct ice_hw *hw)
{
u32 min_size = 0, max_size = ICE_AQC_NVM_MAX_OFFSET + 1;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1229,7 +1242,7 @@ static enum ice_status ice_discover_flash_size(struct ice_hw *hw)
hw->adminq.sq_last_status == ICE_AQ_RC_EINVAL) {
ice_debug(hw, ICE_DBG_NVM, "%s: New upper bound of %u bytes\n",
__func__, offset);
- status = ICE_SUCCESS;
+ status = 0;
max_size = offset;
} else if (!status) {
ice_debug(hw, ICE_DBG_NVM, "%s: New lower bound of %u bytes\n",
@@ -1265,10 +1278,9 @@ err_read_flat_nvm:
* sector size by using the highest bit. The reported pointer value will be in
* bytes, intended for flat NVM reads.
*/
-static enum ice_status
-ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
+static int ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
{
- enum ice_status status;
+ int status;
u16 value;
status = ice_read_sr_word(hw, offset, &value);
@@ -1281,7 +1293,7 @@ ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
else
*pointer = value * 2;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1297,10 +1309,9 @@ ice_read_sr_pointer(struct ice_hw *hw, u16 offset, u32 *pointer)
* Each area size word is specified in 4KB sector units. This function reports
* the size in bytes, intended for flat NVM reads.
*/
-static enum ice_status
-ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
+static int ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
{
- enum ice_status status;
+ int status;
u16 value;
status = ice_read_sr_word(hw, offset, &value);
@@ -1310,7 +1321,7 @@ ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
/* Area sizes are always specified in 4KB units */
*size = value * 4 * 1024;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1323,12 +1334,11 @@ ice_read_sr_area_size(struct ice_hw *hw, u16 offset, u32 *size)
* structure for later use in order to calculate the correct offset to read
* from the active module.
*/
-static enum ice_status
-ice_determine_active_flash_banks(struct ice_hw *hw)
+static int ice_determine_active_flash_banks(struct ice_hw *hw)
{
struct ice_bank_info *banks = &hw->flash.banks;
- enum ice_status status;
u16 ctrl_word;
+ int status;
status = ice_read_sr_word(hw, ICE_SR_NVM_CTRL_WORD, &ctrl_word);
if (status) {
@@ -1393,7 +1403,7 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
return status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1403,12 +1413,12 @@ ice_determine_active_flash_banks(struct ice_hw *hw)
* This function reads and populates NVM settings such as Shadow RAM size,
* max_timeout, and blank_nvm_mode
*/
-enum ice_status ice_init_nvm(struct ice_hw *hw)
+int ice_init_nvm(struct ice_hw *hw)
{
struct ice_flash_info *flash = &hw->flash;
- enum ice_status status;
u32 fla, gens_stat;
u8 sr_size;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1459,7 +1469,7 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
if (status)
ice_debug(hw, ICE_DBG_INIT, "Failed to read netlist info.\n");
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1473,10 +1483,10 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
* method. The buf read is preceded by the NVM ownership take
* and followed by the release.
*/
-enum ice_status
+int
ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
{
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (!status) {
@@ -1498,7 +1508,7 @@ ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data)
* reception) by caller. To commit SR to NVM update checksum function
* should be called.
*/
-enum ice_status
+int
__ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data)
{
__le16 data_local = CPU_TO_LE16(*data);
@@ -1521,11 +1531,11 @@ __ice_write_sr_word(struct ice_hw *hw, u32 offset, const u16 *data)
* on ARQ completion event reception by caller. To commit SR to NVM update
* checksum function should be called.
*/
-enum ice_status
+int
__ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data)
{
- enum ice_status status;
__le16 *data_local;
+ int status;
void *vmem;
u32 i;
@@ -1559,12 +1569,12 @@ __ice_write_sr_buf(struct ice_hw *hw, u32 offset, u16 words, const u16 *data)
* is customer specific and unknown. Therefore, this function skips all maximum
* possible size of VPD (1kB).
*/
-static enum ice_status ice_calc_sr_checksum(struct ice_hw *hw, u16 *checksum)
+static int ice_calc_sr_checksum(struct ice_hw *hw, u16 *checksum)
{
- enum ice_status status = ICE_SUCCESS;
u16 pcie_alt_module = 0;
u16 checksum_local = 0;
u16 vpd_module;
+ int status = 0;
void *vmem;
u16 *data;
u16 i;
@@ -1596,7 +1606,7 @@ static enum ice_status ice_calc_sr_checksum(struct ice_hw *hw, u16 *checksum)
u16 words = ICE_SR_SECTOR_SIZE_IN_WORDS;
status = ice_read_sr_buf_aq(hw, i, &words, data);
- if (status != ICE_SUCCESS)
+ if (status)
goto ice_calc_sr_checksum_exit;
}
@@ -1630,11 +1640,11 @@ ice_calc_sr_checksum_exit:
* on ARQ completion event reception by caller.
* This function will commit SR to NVM.
*/
-enum ice_status ice_update_sr_checksum(struct ice_hw *hw)
+int ice_update_sr_checksum(struct ice_hw *hw)
{
- enum ice_status status;
__le16 le_sum;
u16 checksum;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1655,11 +1665,11 @@ enum ice_status ice_update_sr_checksum(struct ice_hw *hw)
* Performs checksum calculation and validates the Shadow RAM SW checksum.
* If the caller does not need checksum, the value can be NULL.
*/
-enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum)
+int ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum)
{
- enum ice_status status;
u16 checksum_local;
u16 checksum_sr;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1694,11 +1704,11 @@ enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum)
*
* Verify NVM PFA checksum validity (0x0706)
*/
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
+int ice_nvm_validate_checksum(struct ice_hw *hw)
{
struct ice_aqc_nvm_checksum *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
@@ -1725,11 +1735,11 @@ enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw)
*
* Recalculate NVM PFA checksum (0x0706)
*/
-enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw)
+int ice_nvm_recalculate_checksum(struct ice_hw *hw)
{
struct ice_aqc_nvm_checksum *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
status = ice_acquire_nvm(hw, ICE_RES_READ);
if (status)
@@ -1767,12 +1777,11 @@ enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw)
* is updated with the flags reported by firmware indicating certain status,
* such as whether EMP reset is enabled.
*/
-enum ice_status
-ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
+int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc;
- enum ice_status err;
+ int err;
cmd = &desc.params.nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
@@ -1795,11 +1804,11 @@ ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
* Read the Minimum Security Revision TLV and extract the revision values from
* the flash image into a readable structure for processing.
*/
-enum ice_status
+int
ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs)
{
struct ice_aqc_nvm_minsrev data;
- enum ice_status status;
+ int status;
u16 valid;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1840,7 +1849,7 @@ ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs)
minsrevs->orom_valid = true;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1853,11 +1862,11 @@ ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs)
* fields to determine what update is being requested. If the valid bit is not
* set for that module, then the associated minsrev will be left as is.
*/
-enum ice_status
+int
ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs)
{
struct ice_aqc_nvm_minsrev data;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1911,7 +1920,7 @@ exit_release_res:
* Fill in the data section of the NVM access request with a copy of the NVM
* features structure.
*/
-enum ice_status
+int
ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data)
{
@@ -1932,7 +1941,7 @@ ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
data->drv_features.size = sizeof(struct ice_nvm_features);
data->drv_features.features[0] = ICE_NVM_FEATURES_0_REG_ACCESS;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1977,7 +1986,7 @@ u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd)
* register offset. First validates that the module and flags are correct, and
* then ensures that the register offset is one of the accepted registers.
*/
-static enum ice_status
+static int
ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
{
u32 module, flags, offset;
@@ -2005,18 +2014,18 @@ ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
case GLNVM_GENS:
case GLNVM_FLA:
case PF_FUNC_RID:
- return ICE_SUCCESS;
+ return 0;
default:
break;
}
for (i = 0; i <= GL_HIDA_MAX_INDEX; i++)
if (offset == (u32)GL_HIDA(i))
- return ICE_SUCCESS;
+ return 0;
for (i = 0; i <= GL_HIBA_MAX_INDEX; i++)
if (offset == (u32)GL_HIBA(i))
- return ICE_SUCCESS;
+ return 0;
/* All other register offsets are not valid */
return ICE_ERR_OUT_OF_RANGE;
@@ -2030,11 +2039,11 @@ ice_validate_nvm_rw_reg(struct ice_nvm_access_cmd *cmd)
*
* Process an NVM access request to read a register.
*/
-enum ice_status
+int
ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data)
{
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -2052,7 +2061,7 @@ ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
/* Read the register and store the contents in the data field */
data->regval = rd32(hw, cmd->offset);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2063,11 +2072,11 @@ ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
*
* Process an NVM access request to write a register.
*/
-enum ice_status
+int
ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data)
{
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -2077,21 +2086,24 @@ ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
return status;
/* Reject requests to write to read-only registers */
- switch (cmd->offset) {
- case GL_HICR_EN:
- case GLGEN_RSTAT:
- return ICE_ERR_OUT_OF_RANGE;
- default:
- break;
+ if (hw->mac_type == ICE_MAC_E830) {
+ if (cmd->offset == E830_GL_HICR_EN)
+ return ICE_ERR_OUT_OF_RANGE;
+ } else {
+ if (cmd->offset == GL_HICR_EN)
+ return ICE_ERR_OUT_OF_RANGE;
}
+ if (cmd->offset == GLGEN_RSTAT)
+ return ICE_ERR_OUT_OF_RANGE;
+
ice_debug(hw, ICE_DBG_NVM, "NVM access: writing register %08x with value %08x\n",
cmd->offset, data->regval);
/* Write the data field to the specified register */
wr32(hw, cmd->offset, data->regval);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2107,7 +2119,7 @@ ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
* For valid commands, perform the necessary function, copying the data into
* the provided data buffer.
*/
-enum ice_status
+int
ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data)
{
@@ -2146,3 +2158,59 @@ ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
}
}
+/**
+ * ice_nvm_sanitize_operate - Clear the user data
+ * @hw: pointer to the HW struct
+ *
+ * Clear user data from NVM using AQ command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ice_nvm_sanitize_operate(struct ice_hw *hw)
+{
+ s32 status;
+ u8 values;
+
+ u8 cmd_flags = ICE_AQ_NVM_SANITIZE_REQ_OPERATE |
+ ICE_AQ_NVM_SANITIZE_OPERATE_SUBJECT_CLEAR;
+
+ status = ice_nvm_sanitize(hw, cmd_flags, &values);
+ if (status)
+ return status;
+ if ((!(values & ICE_AQ_NVM_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+ !(values & ICE_AQ_NVM_SANITIZE_OPERATE_BMC_CLEAN_DONE)) ||
+ ((values & ICE_AQ_NVM_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+ !(values & ICE_AQ_NVM_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS)) ||
+ ((values & ICE_AQ_NVM_SANITIZE_OPERATE_BMC_CLEAN_DONE) &&
+ !(values & ICE_AQ_NVM_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS)))
+ return ICE_ERR_AQ_ERROR;
+
+ return ICE_SUCCESS;
+}
+
+/**
+ * ice_nvm_sanitize - Sanitize NVM
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flag to the ACI command
+ * @values: values returned from the command
+ *
+ * Sanitize NVM using AQ command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ice_nvm_sanitize(struct ice_hw *hw, u8 cmd_flags, u8 *values)
+{
+ struct ice_aqc_nvm_sanitization *cmd;
+ struct ice_aq_desc desc;
+ s32 status;
+
+ cmd = &desc.params.sanitization;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_sanitization);
+ cmd->cmd_flags = cmd_flags;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (values)
+ *values = cmd->values;
+
+ return status;
+}
diff --git a/sys/dev/ice/ice_nvm.h b/sys/dev/ice/ice_nvm.h
index f43381c10ac5..310e17260d12 100644
--- a/sys/dev/ice/ice_nvm.h
+++ b/sys/dev/ice/ice_nvm.h
@@ -96,65 +96,67 @@ union ice_nvm_access_data {
u32 ice_nvm_access_get_module(struct ice_nvm_access_cmd *cmd);
u32 ice_nvm_access_get_flags(struct ice_nvm_access_cmd *cmd);
u32 ice_nvm_access_get_adapter(struct ice_nvm_access_cmd *cmd);
-enum ice_status
+int
ice_nvm_access_read(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data);
-enum ice_status
+int
ice_nvm_access_write(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data);
-enum ice_status
+int
ice_nvm_access_get_features(struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data);
-enum ice_status
+int
ice_handle_nvm_access(struct ice_hw *hw, struct ice_nvm_access_cmd *cmd,
union ice_nvm_access_data *data);
-enum ice_status
+int
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
void *data, bool last_command, bool read_shadow_ram,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
-enum ice_status
+int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type);
-enum ice_status
+int
ice_get_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs);
-enum ice_status
+int
ice_update_nvm_minsrevs(struct ice_hw *hw, struct ice_minsrev_info *minsrevs);
-enum ice_status
+int
ice_get_inactive_orom_ver(struct ice_hw *hw, struct ice_orom_info *orom);
-enum ice_status
+int
ice_get_inactive_nvm_ver(struct ice_hw *hw, struct ice_nvm_info *nvm);
-enum ice_status
+int
ice_get_inactive_netlist_ver(struct ice_hw *hw, struct ice_netlist_info *netlist);
-enum ice_status
+int
ice_read_pba_string(struct ice_hw *hw, u8 *pba_num, u32 pba_num_size);
-enum ice_status ice_init_nvm(struct ice_hw *hw);
-enum ice_status ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
-enum ice_status ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data);
-enum ice_status
+int ice_init_nvm(struct ice_hw *hw);
+int ice_read_sr_word(struct ice_hw *hw, u16 offset, u16 *data);
+int ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data);
+int
ice_read_sr_buf(struct ice_hw *hw, u16 offset, u16 *words, u16 *data);
-enum ice_status
+int
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
u16 length, void *data, bool last_command, u8 command_flags,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_read_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, u16 field_id, void *data,
u16 buf_size, u16 *elem_count, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_write_nvm_cfg(struct ice_hw *hw, u8 cmd_flags, void *data, u16 buf_size,
u16 elem_count, struct ice_sq_cd *cd);
-enum ice_status ice_update_sr_checksum(struct ice_hw *hw);
-enum ice_status ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum);
-enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
-enum ice_status ice_nvm_recalculate_checksum(struct ice_hw *hw);
-enum ice_status
+int ice_update_sr_checksum(struct ice_hw *hw);
+int ice_validate_sr_checksum(struct ice_hw *hw, u16 *checksum);
+int ice_nvm_validate_checksum(struct ice_hw *hw);
+int ice_nvm_recalculate_checksum(struct ice_hw *hw);
+int
ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags);
+s32 ice_nvm_sanitize_operate(struct ice_hw *hw);
+s32 ice_nvm_sanitize(struct ice_hw *hw, u8 cmd_flags, u8 *values);
#endif /* _ICE_NVM_H_ */
diff --git a/sys/dev/ice/ice_osdep.c b/sys/dev/ice/ice_osdep.c
index 88b4984c290f..bb3d4ef98040 100644
--- a/sys/dev/ice/ice_osdep.c
+++ b/sys/dev/ice/ice_osdep.c
@@ -40,7 +40,7 @@
#include "ice_common.h"
#include "ice_iflib.h"
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include <sys/time.h>
/**
diff --git a/sys/dev/ice/ice_protocol_type.h b/sys/dev/ice/ice_protocol_type.h
index 300d61bfb5d9..b90c25e6c427 100644
--- a/sys/dev/ice/ice_protocol_type.h
+++ b/sys/dev/ice/ice_protocol_type.h
@@ -143,7 +143,7 @@ enum ice_prot_id {
ICE_PROT_LLDP_OF = 117,
ICE_PROT_ARP_OF = 118,
ICE_PROT_EAPOL_OF = 120,
- ICE_PROT_META_ID = 255, /* when offset == metaddata */
+ ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
diff --git a/sys/dev/ice/ice_rdma.c b/sys/dev/ice/ice_rdma.c
index 3fe12cec7adc..79bc675b570d 100644
--- a/sys/dev/ice/ice_rdma.c
+++ b/sys/dev/ice/ice_rdma.c
@@ -168,7 +168,7 @@ ice_rdma_qset_register_request(struct ice_rdma_peer *peer, struct ice_rdma_qset_
struct ice_vsi *vsi = NULL;
struct ice_dcbx_cfg *dcbx_cfg;
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
int count, i, ret = 0;
uint32_t *qset_teid;
uint16_t *qs_handle;
diff --git a/sys/dev/ice/ice_rss.h b/sys/dev/ice/ice_rss.h
index ede1fad5abe7..df485f4b1f5a 100644
--- a/sys/dev/ice/ice_rss.h
+++ b/sys/dev/ice/ice_rss.h
@@ -68,14 +68,6 @@ CTASSERT(ICE_AQC_GET_SET_RSS_KEY_DATA_RSS_KEY_SIZE >= RSS_KEYSIZE);
#define RSS_HASHTYPE_RSS_UDP_IPV6 (1 << 9) /* IPv6 UDP 4-tuple */
#define RSS_HASHTYPE_RSS_UDP_IPV6_EX (1 << 10) /* IPv6 UDP 4-tuple + ext hdrs */
-#define ICE_DEFAULT_RSS_HASH_CONFIG \
- ((u_int)(RSS_HASHTYPE_RSS_IPV4 | \
- RSS_HASHTYPE_RSS_TCP_IPV4 | \
- RSS_HASHTYPE_RSS_UDP_IPV4 | \
- RSS_HASHTYPE_RSS_IPV6 | \
- RSS_HASHTYPE_RSS_TCP_IPV6 | \
- RSS_HASHTYPE_RSS_UDP_IPV6))
-
#define rss_getkey(key) ice_get_default_rss_key(key)
#define rss_getnumbuckets() (mp_ncpus)
#define rss_get_indirection_to_bucket(index) (index)
@@ -113,4 +105,12 @@ rss_hash2bucket(uint32_t hash_val, uint32_t hash_type, uint32_t *bucket_id)
#endif /* !RSS */
+#define ICE_DEFAULT_RSS_HASH_CONFIG \
+ ((u_int)(RSS_HASHTYPE_RSS_IPV4 | \
+ RSS_HASHTYPE_RSS_TCP_IPV4 | \
+ RSS_HASHTYPE_RSS_UDP_IPV4 | \
+ RSS_HASHTYPE_RSS_IPV6 | \
+ RSS_HASHTYPE_RSS_TCP_IPV6 | \
+ RSS_HASHTYPE_RSS_UDP_IPV6))
+
#endif /* _ICE_COMMON_COMPAT_H_ */
diff --git a/sys/dev/ice/ice_sbq_cmd.h b/sys/dev/ice/ice_sbq_cmd.h
new file mode 100644
index 000000000000..3f8f38c32186
--- /dev/null
+++ b/sys/dev/ice/ice_sbq_cmd.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2024, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ICE_SBQ_CMD_H_
+#define _ICE_SBQ_CMD_H_
+
+/* This header file defines the Sideband Queue commands, error codes and
+ * descriptor format. It is shared between Firmware and Software.
+ */
+
+/* Sideband Queue command structure and opcodes */
+enum ice_sbq_opc {
+ /* Sideband Queue commands */
+ ice_sbq_opc_neigh_dev_req = 0x0C00,
+ ice_sbq_opc_neigh_dev_ev = 0x0C01
+};
+
+/* Sideband Queue descriptor. Indirect command
+ * and non posted
+ */
+struct ice_sbq_cmd_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 cmd_retval;
+
+ /* Opaque message data */
+ __le32 cookie_high;
+ __le32 cookie_low;
+
+ union {
+ __le16 cmd_len;
+ __le16 cmpl_len;
+ } param0;
+
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_sbq_evt_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 cmd_retval;
+ u8 data[24];
+};
+
+enum ice_sbq_msg_dev {
+ rmn_0 = 0x02,
+ rmn_1 = 0x03,
+ rmn_2 = 0x04,
+ cgu = 0x06
+};
+
+enum ice_sbq_msg_opcode {
+ ice_sbq_msg_rd = 0x00,
+ ice_sbq_msg_wr = 0x01
+};
+
+#define ICE_SBQ_MSG_FLAGS 0x40
+#define ICE_SBQ_MSG_SBE_FBE 0x0F
+
+struct ice_sbq_msg_req {
+ u8 dest_dev;
+ u8 src_dev;
+ u8 opcode;
+ u8 flags;
+ u8 sbe_fbe;
+ u8 func_id;
+ __le16 msg_addr_low;
+ __le32 msg_addr_high;
+ __le32 data;
+};
+
+struct ice_sbq_msg_cmpl {
+ u8 dest_dev;
+ u8 src_dev;
+ u8 opcode;
+ u8 flags;
+ __le32 data;
+};
+
+/* Internal struct */
+struct ice_sbq_msg_input {
+ u8 dest_dev;
+ u8 opcode;
+ u16 msg_addr_low;
+ u32 msg_addr_high;
+ u32 data;
+};
+#endif /* _ICE_SBQ_CMD_H_ */
diff --git a/sys/dev/ice/ice_sched.c b/sys/dev/ice/ice_sched.c
index cd0d7de62b33..d57733dbfa7e 100644
--- a/sys/dev/ice/ice_sched.c
+++ b/sys/dev/ice/ice_sched.c
@@ -39,7 +39,7 @@
* This function inserts the root node of the scheduling tree topology
* to the SW DB.
*/
-static enum ice_status
+static int
ice_sched_add_root_node(struct ice_port_info *pi,
struct ice_aqc_txsched_elem_data *info)
{
@@ -62,9 +62,9 @@ ice_sched_add_root_node(struct ice_port_info *pi,
return ICE_ERR_NO_MEMORY;
}
- ice_memcpy(&root->info, info, sizeof(*info), ICE_DMA_TO_NONDMA);
+ ice_memcpy(&root->info, info, sizeof(*info), ICE_NONDMA_TO_NONDMA);
pi->root = root;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -83,6 +83,9 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
{
u16 i;
+ if (!start_node)
+ return NULL;
+
/* The TEID is same as that of the start_node */
if (ICE_TXSCHED_GET_NODE_TEID(start_node) == teid)
return start_node;
@@ -123,14 +126,14 @@ ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid)
*
* This function sends a scheduling elements cmd (cmd_opc)
*/
-static enum ice_status
+static int
ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
u16 elems_req, void *buf, u16 buf_size,
u16 *elems_resp, struct ice_sq_cd *cd)
{
struct ice_aqc_sched_elem_cmd *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.sched_elem_cmd;
ice_fill_dflt_direct_cmd_desc(&desc, cmd_opc);
@@ -154,7 +157,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
*
* Query scheduling elements (0x0404)
*/
-enum ice_status
+int
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd)
@@ -173,7 +176,7 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* This function inserts a scheduler node to the SW DB.
*/
-enum ice_status
+int
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info,
struct ice_sched_node *prealloc_node)
@@ -181,8 +184,8 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data elem;
struct ice_sched_node *parent;
struct ice_sched_node *node;
- enum ice_status status;
struct ice_hw *hw;
+ int status;
if (!pi)
return ICE_ERR_PARAM;
@@ -226,7 +229,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
node->tx_sched_layer = layer;
parent->children[parent->num_children++] = node;
node->info = elem;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -240,7 +243,7 @@ ice_sched_add_node(struct ice_port_info *pi, u8 layer,
*
* Delete scheduling elements (0x040F)
*/
-static enum ice_status
+static int
ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_delete_elem *buf, u16 buf_size,
u16 *grps_del, struct ice_sq_cd *cd)
@@ -259,14 +262,14 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* This function remove nodes from HW
*/
-static enum ice_status
+static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
u16 num_nodes, u32 *node_teids)
{
struct ice_aqc_delete_elem *buf;
u16 i, num_groups_removed = 0;
- enum ice_status status;
u16 buf_size;
+ int status;
buf_size = ice_struct_size(buf, teid, num_nodes);
buf = (struct ice_aqc_delete_elem *)ice_malloc(hw, buf_size);
@@ -280,7 +283,7 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
&num_groups_removed, NULL);
- if (status != ICE_SUCCESS || num_groups_removed != 1)
+ if (status || num_groups_removed != 1)
ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
hw->adminq.sq_last_status);
@@ -400,14 +403,14 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
*
* Get default scheduler topology (0x400)
*/
-static enum ice_status
+static int
ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
struct ice_aqc_get_topo_elem *buf, u16 buf_size,
u8 *num_branches, struct ice_sq_cd *cd)
{
struct ice_aqc_get_topo *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.get_topo;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_dflt_topo);
@@ -430,7 +433,7 @@ ice_aq_get_dflt_topo(struct ice_hw *hw, u8 lport,
*
* Add scheduling elements (0x0401)
*/
-static enum ice_status
+static int
ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_add_elem *buf, u16 buf_size,
u16 *grps_added, struct ice_sq_cd *cd)
@@ -451,7 +454,7 @@ ice_aq_add_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* Configure scheduling elements (0x0403)
*/
-static enum ice_status
+static int
ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_cfgd, struct ice_sq_cd *cd)
@@ -472,7 +475,7 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
*
* Move scheduling elements (0x0408)
*/
-enum ice_status
+int
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd)
@@ -493,7 +496,7 @@ ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
*
* Suspend scheduling elements (0x0409)
*/
-static enum ice_status
+static int
ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
@@ -513,7 +516,7 @@ ice_aq_suspend_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
*
* resume scheduling elements (0x040A)
*/
-static enum ice_status
+static int
ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
u16 buf_size, u16 *elems_ret, struct ice_sq_cd *cd)
{
@@ -531,7 +534,7 @@ ice_aq_resume_sched_elems(struct ice_hw *hw, u16 elems_req, __le32 *buf,
*
* Query scheduler resource allocation (0x0412)
*/
-static enum ice_status
+static int
ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
struct ice_aqc_query_txsched_res_resp *buf,
struct ice_sq_cd *cd)
@@ -551,13 +554,13 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
*
* This function suspends or resumes HW nodes
*/
-static enum ice_status
+static int
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
bool suspend)
{
u16 i, buf_size, num_elem_ret = 0;
- enum ice_status status;
__le32 *buf;
+ int status;
buf_size = sizeof(*buf) * num_nodes;
buf = (__le32 *)ice_malloc(hw, buf_size);
@@ -575,7 +578,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
status = ice_aq_resume_sched_elems(hw, num_nodes, buf,
buf_size, &num_elem_ret,
NULL);
- if (status != ICE_SUCCESS || num_elem_ret != num_nodes)
+ if (status || num_elem_ret != num_nodes)
ice_debug(hw, ICE_DBG_SCHED, "suspend/resume failed\n");
ice_free(hw, buf);
@@ -589,7 +592,7 @@ ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
* @tc: TC number
* @new_numqs: number of queues
*/
-static enum ice_status
+static int
ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
@@ -605,7 +608,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
if (!vsi_ctx->lan_q_ctx[tc])
return ICE_ERR_NO_MEMORY;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
- return ICE_SUCCESS;
+ return 0;
}
/* num queues are increased, update the queue contexts */
if (new_numqs > vsi_ctx->num_lan_q_entries[tc]) {
@@ -621,7 +624,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
vsi_ctx->lan_q_ctx[tc] = q_ctx;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -631,7 +634,7 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
* @tc: TC number
* @new_numqs: number of queues
*/
-static enum ice_status
+static int
ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
@@ -647,7 +650,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
if (!vsi_ctx->rdma_q_ctx[tc])
return ICE_ERR_NO_MEMORY;
vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
- return ICE_SUCCESS;
+ return 0;
}
/* num queues are increased, update the queue contexts */
if (new_numqs > vsi_ctx->num_rdma_q_entries[tc]) {
@@ -663,7 +666,7 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
vsi_ctx->rdma_q_ctx[tc] = q_ctx;
vsi_ctx->num_rdma_q_entries[tc] = new_numqs;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -678,14 +681,14 @@ ice_alloc_rdma_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
*
* RL profile function to add, query, or remove profile(s)
*/
-static enum ice_status
+static int
ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
u16 num_profiles, struct ice_aqc_rl_profile_elem *buf,
u16 buf_size, u16 *num_processed, struct ice_sq_cd *cd)
{
struct ice_aqc_rl_profile *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.rl_profile;
@@ -709,7 +712,7 @@ ice_aq_rl_profile(struct ice_hw *hw, enum ice_adminq_opc opcode,
*
* Add RL profile (0x0410)
*/
-static enum ice_status
+static int
ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
u16 *num_profiles_added, struct ice_sq_cd *cd)
@@ -728,7 +731,7 @@ ice_aq_add_rl_profile(struct ice_hw *hw, u16 num_profiles,
*
* Query RL profile (0x0411)
*/
-enum ice_status
+int
ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -748,7 +751,7 @@ ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
*
* Remove RL profile (0x0415)
*/
-static enum ice_status
+static int
ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
u16 *num_profiles_removed, struct ice_sq_cd *cd)
@@ -767,14 +770,14 @@ ice_aq_remove_rl_profile(struct ice_hw *hw, u16 num_profiles,
* its associated parameters from HW DB,and locally. The caller needs to
* hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_del_rl_profile(struct ice_hw *hw,
struct ice_aqc_rl_profile_info *rl_info)
{
struct ice_aqc_rl_profile_elem *buf;
u16 num_profiles_removed;
- enum ice_status status;
u16 num_profiles = 1;
+ int status;
if (rl_info->prof_id_ref != 0)
return ICE_ERR_IN_USE;
@@ -810,7 +813,7 @@ static void ice_sched_clear_rl_prof(struct ice_port_info *pi)
LIST_FOR_EACH_ENTRY_SAFE(rl_prof_elem, rl_prof_tmp,
&hw->rl_prof_list[ln],
ice_aqc_rl_profile_info, list_entry) {
- enum ice_status status;
+ int status;
rl_prof_elem->prof_id_ref = 0;
status = ice_sched_del_rl_profile(hw, rl_prof_elem);
@@ -923,7 +926,7 @@ void ice_sched_cleanup_all(struct ice_hw *hw)
*
* Configure Node Attributes (0x0417)
*/
-enum ice_status
+int
ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes,
struct ice_aqc_node_attr_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -950,7 +953,7 @@ ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes,
*
* Configure L2 Node CGD (0x0414)
*/
-enum ice_status
+int
ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes,
struct ice_aqc_cfg_l2_node_cgd_elem *buf,
u16 buf_size, struct ice_sq_cd *cd)
@@ -979,7 +982,7 @@ ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_l2_nodes,
*
* This function add nodes to HW as well as to SW DB for a given layer
*/
-enum ice_status
+int
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer, u16 num_nodes,
u16 *num_nodes_added, u32 *first_node_teid,
@@ -988,8 +991,8 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *prev, *new_node;
struct ice_aqc_add_elem *buf;
u16 i, num_groups_added = 0;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u16 buf_size;
u32 teid;
@@ -1019,7 +1022,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
status = ice_aq_add_sched_elems(hw, 1, buf, buf_size,
&num_groups_added, NULL);
- if (status != ICE_SUCCESS || num_groups_added != 1) {
+ if (status || num_groups_added != 1) {
ice_debug(hw, ICE_DBG_SCHED, "add node failed FW Error %d\n",
hw->adminq.sq_last_status);
ice_free(hw, buf);
@@ -1034,7 +1037,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
else
status = ice_sched_add_node(pi, layer, &buf->generic[i], NULL);
- if (status != ICE_SUCCESS) {
+ if (status) {
ice_debug(hw, ICE_DBG_SCHED, "add nodes in SW DB failed status =%d\n",
status);
break;
@@ -1083,7 +1086,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
*
* Add nodes into specific hw layer.
*/
-static enum ice_status
+static int
ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
@@ -1095,7 +1098,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
*num_nodes_added = 0;
if (!num_nodes)
- return ICE_SUCCESS;
+ return 0;
if (!parent || layer < pi->hw->sw_entry_point_layer)
return ICE_ERR_PARAM;
@@ -1127,7 +1130,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
*
* This function add nodes to a given layer.
*/
-static enum ice_status
+static int
ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
@@ -1136,7 +1139,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
{
u32 *first_teid_ptr = first_node_teid;
u16 new_num_nodes = num_nodes;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u32 temp;
*num_nodes_added = 0;
@@ -1147,7 +1150,7 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
layer, new_num_nodes,
first_teid_ptr,
&num_added);
- if (status == ICE_SUCCESS)
+ if (!status)
*num_nodes_added += num_added;
/* added more nodes than requested ? */
if (*num_nodes_added > num_nodes) {
@@ -1157,10 +1160,10 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
break;
}
/* break if all the nodes are added successfully */
- if (status == ICE_SUCCESS && (*num_nodes_added == num_nodes))
+ if (!status && (*num_nodes_added == num_nodes))
break;
/* break if the error is not max limit */
- if (status != ICE_SUCCESS && status != ICE_ERR_MAX_LIMIT)
+ if (status && status != ICE_ERR_MAX_LIMIT)
break;
/* Exceeded the max children */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
@@ -1255,7 +1258,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
}
if (node && node->info.data.elem_type == ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = LE32_TO_CPU(node->info.node_teid);
- enum ice_status status;
+ int status;
/* remove the default leaf node */
status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
@@ -1301,13 +1304,13 @@ static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
* resources, default topology created by firmware and storing the information
* in SW DB.
*/
-enum ice_status ice_sched_init_port(struct ice_port_info *pi)
+int ice_sched_init_port(struct ice_port_info *pi)
{
struct ice_aqc_get_topo_elem *buf;
- enum ice_status status;
struct ice_hw *hw;
u8 num_branches;
u16 num_elems;
+ int status;
u8 i, j;
if (!pi)
@@ -1430,12 +1433,12 @@ struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid)
*
* query FW for allocated scheduler resources and store in HW struct
*/
-enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
+int ice_sched_query_res_alloc(struct ice_hw *hw)
{
struct ice_aqc_query_txsched_res_resp *buf;
- enum ice_status status = ICE_SUCCESS;
__le16 max_sibl;
- u8 i;
+ int status = 0;
+ u16 i;
if (hw->layer_info)
return status;
@@ -1721,12 +1724,12 @@ ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
static bool ice_sched_check_node(struct ice_hw *hw, struct ice_sched_node *node)
{
struct ice_aqc_txsched_elem_data buf;
- enum ice_status status;
u32 node_teid;
+ int status;
node_teid = LE32_TO_CPU(node->info.node_teid);
status = ice_sched_query_elem(hw, node_teid, &buf);
- if (status != ICE_SUCCESS)
+ if (status)
return false;
if (memcmp(&buf, &node->info, sizeof(buf))) {
@@ -1777,7 +1780,7 @@ ice_sched_calc_vsi_child_nodes(struct ice_hw *hw, u16 num_qs, u16 *num_nodes)
* This function adds the VSI child nodes to tree. It gets called for
* LAN and RDMA separately.
*/
-static enum ice_status
+static int
ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes,
u8 owner)
@@ -1792,7 +1795,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
vsil = ice_sched_get_vsi_layer(hw);
parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
- enum ice_status status;
+ int status;
if (!parent)
return ICE_ERR_CFG;
@@ -1801,7 +1804,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
num_nodes[i],
&first_node_teid,
&num_added);
- if (status != ICE_SUCCESS || num_nodes[i] != num_added)
+ if (status || num_nodes[i] != num_added)
return ICE_ERR_CFG;
/* The newly added node can be a new parent for the next
@@ -1820,7 +1823,7 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
}
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1882,7 +1885,7 @@ ice_sched_calc_vsi_support_nodes(struct ice_port_info *pi,
* This function adds the VSI supported nodes into Tx tree including the
* VSI, its parent and intermediate nodes in below layers
*/
-static enum ice_status
+static int
ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *tc_node, u16 *num_nodes)
{
@@ -1896,13 +1899,13 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
- enum ice_status status;
+ int status;
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
i, num_nodes[i],
&first_node_teid,
&num_added);
- if (status != ICE_SUCCESS || num_nodes[i] != num_added)
+ if (status || num_nodes[i] != num_added)
return ICE_ERR_CFG;
/* The newly added node can be a new parent for the next
@@ -1921,7 +1924,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
parent->vsi_handle = vsi_handle;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -1932,7 +1935,7 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
*
* This function adds a new VSI into scheduler tree
*/
-static enum ice_status
+static int
ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
{
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
@@ -1960,7 +1963,7 @@ ice_sched_add_vsi_to_topo(struct ice_port_info *pi, u16 vsi_handle, u8 tc)
*
* This function updates the VSI child nodes based on the number of queues
*/
-static enum ice_status
+static int
ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, u16 new_numqs, u8 owner)
{
@@ -1968,8 +1971,8 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
struct ice_sched_node *vsi_node;
struct ice_sched_node *tc_node;
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u16 prev_numqs;
tc_node = ice_sched_get_tc_node(pi, tc);
@@ -2019,7 +2022,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
else
vsi_ctx->sched.max_rdmaq[tc] = new_numqs;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2035,14 +2038,14 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
* enabled and VSI is in suspended state then resume the VSI back. If TC is
* disabled then suspend the VSI if it is not already.
*/
-enum ice_status
+int
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable)
{
struct ice_sched_node *vsi_node, *tc_node;
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
+ int status = 0;
ice_debug(pi->hw, ICE_DBG_SCHED, "add/config VSI %d\n", vsi_handle);
tc_node = ice_sched_get_tc_node(pi, tc);
@@ -2160,11 +2163,11 @@ static bool ice_sched_is_leaf_node_present(struct ice_sched_node *node)
* This function removes the VSI and its LAN or RDMA children nodes from the
* scheduler tree.
*/
-static enum ice_status
+static int
ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_vsi_ctx *vsi_ctx;
+ int status = ICE_ERR_PARAM;
u8 i;
ice_debug(pi->hw, ICE_DBG_SCHED, "removing VSI %d\n", vsi_handle);
@@ -2217,7 +2220,7 @@ ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner)
else
vsi_ctx->sched.max_rdmaq[i] = 0;
}
- status = ICE_SUCCESS;
+ status = 0;
exit_sched_rm_vsi_cfg:
ice_release_lock(&pi->sched_lock);
@@ -2232,7 +2235,7 @@ exit_sched_rm_vsi_cfg:
* This function clears the VSI and its LAN children nodes from scheduler tree
* for all TCs.
*/
-enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
+int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN);
}
@@ -2245,7 +2248,7 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
* This function clears the VSI and its RDMA children nodes from scheduler tree
* for all TCs.
*/
-enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
+int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
}
@@ -2285,7 +2288,7 @@ bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node)
* This function retrieves the tree topology from the firmware for a given
* node TEID to the root node.
*/
-enum ice_status
+int
ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
struct ice_sq_cd *cd)
@@ -2405,15 +2408,15 @@ ice_sched_update_parent(struct ice_sched_node *new_parent,
*
* This function move the child nodes to a given parent.
*/
-enum ice_status
+int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
struct ice_aqc_move_elem *buf;
struct ice_sched_node *node;
- enum ice_status status = ICE_SUCCESS;
u16 i, grps_movd = 0;
struct ice_hw *hw;
+ int status = 0;
u16 buf_len;
hw = pi->hw;
@@ -2468,16 +2471,16 @@ move_err_exit:
* This function moves a VSI to an aggregator node or its subtree.
* Intermediate nodes may be created if required.
*/
-static enum ice_status
+static int
ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
u8 tc)
{
struct ice_sched_node *vsi_node, *agg_node, *tc_node, *parent;
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
u32 first_node_teid, vsi_teid;
- enum ice_status status;
u16 num_nodes_added;
u8 aggl, vsil, i;
+ int status;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
@@ -2493,7 +2496,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
/* Is this VSI already part of given aggregator? */
if (ice_sched_find_node_in_subtree(pi->hw, agg_node, vsi_node))
- return ICE_SUCCESS;
+ return 0;
aggl = ice_sched_get_agg_layer(pi->hw);
vsil = ice_sched_get_vsi_layer(pi->hw);
@@ -2518,7 +2521,7 @@ ice_sched_move_vsi_to_agg(struct ice_port_info *pi, u16 vsi_handle, u32 agg_id,
num_nodes[i],
&first_node_teid,
&num_nodes_added);
- if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added)
+ if (status || num_nodes[i] != num_nodes_added)
return ICE_ERR_CFG;
/* The newly added node can be a new parent for the next
@@ -2550,14 +2553,14 @@ move_nodes:
* aggregator VSI info based on passed in boolean parameter rm_vsi_info. The
* caller holds the scheduler lock.
*/
-static enum ice_status
+static int
ice_move_all_vsi_to_dflt_agg(struct ice_port_info *pi,
struct ice_sched_agg_info *agg_info, u8 tc,
bool rm_vsi_info)
{
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_agg_vsi_info *tmp;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
LIST_FOR_EACH_ENTRY_SAFE(agg_vsi_info, tmp, &agg_info->agg_vsi_list,
ice_sched_agg_vsi_info, list_entry) {
@@ -2614,7 +2617,7 @@ ice_sched_is_agg_inuse(struct ice_port_info *pi, struct ice_sched_node *node)
* This function removes the aggregator node and intermediate nodes if any
* from the given TC
*/
-static enum ice_status
+static int
ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
struct ice_sched_node *tc_node, *agg_node;
@@ -2648,7 +2651,7 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
}
ice_free_sched_node(pi, agg_node);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2662,11 +2665,11 @@ ice_sched_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
* the aggregator configuration completely for requested TC. The caller needs
* to hold the scheduler lock.
*/
-static enum ice_status
+static int
ice_rm_agg_cfg_tc(struct ice_port_info *pi, struct ice_sched_agg_info *agg_info,
u8 tc, bool rm_vsi_info)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
/* If nothing to remove - return success */
if (!ice_is_tc_ena(agg_info->tc_bitmap[0], tc))
@@ -2695,7 +2698,7 @@ exit_rm_agg_cfg_tc:
* Save aggregator TC bitmap. This function needs to be called with scheduler
* lock held.
*/
-static enum ice_status
+static int
ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
ice_bitmap_t *tc_bitmap)
{
@@ -2706,7 +2709,7 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
return ICE_ERR_PARAM;
ice_cp_bitmap(agg_info->replay_tc_bitmap, tc_bitmap,
ICE_MAX_TRAFFIC_CLASS);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2718,15 +2721,15 @@ ice_save_agg_tc_bitmap(struct ice_port_info *pi, u32 agg_id,
* This function creates an aggregator node and intermediate nodes if required
* for the given TC
*/
-static enum ice_status
+static int
ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
struct ice_sched_node *parent, *agg_node, *tc_node;
u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
u32 first_node_teid;
u16 num_nodes_added;
+ int status = 0;
u8 i, aggl;
tc_node = ice_sched_get_tc_node(pi, tc);
@@ -2772,7 +2775,7 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
num_nodes[i],
&first_node_teid,
&num_nodes_added);
- if (status != ICE_SUCCESS || num_nodes[i] != num_nodes_added)
+ if (status || num_nodes[i] != num_nodes_added)
return ICE_ERR_CFG;
/* The newly added node can be a new parent for the next
@@ -2789,7 +2792,7 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
}
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2808,13 +2811,13 @@ ice_sched_add_agg_cfg(struct ice_port_info *pi, u32 agg_id, u8 tc)
* resources and remove aggregator ID.
* This function needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
enum ice_agg_type agg_type, ice_bitmap_t *tc_bitmap)
{
struct ice_sched_agg_info *agg_info;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u8 tc;
agg_info = ice_get_agg_info(hw, agg_id);
@@ -2870,12 +2873,12 @@ ice_sched_cfg_agg(struct ice_port_info *pi, u32 agg_id,
*
* This function configures aggregator node(s).
*/
-enum ice_status
+int
ice_cfg_agg(struct ice_port_info *pi, u32 agg_id, enum ice_agg_type agg_type,
u8 tc_bitmap)
{
ice_bitmap_t bitmap = tc_bitmap;
- enum ice_status status;
+ int status;
ice_acquire_lock(&pi->sched_lock);
status = ice_sched_cfg_agg(pi, agg_id, agg_type,
@@ -2943,7 +2946,7 @@ ice_get_vsi_agg_info(struct ice_hw *hw, u16 vsi_handle)
* Save VSI to aggregator TC bitmap. This function needs to call with scheduler
* lock held.
*/
-static enum ice_status
+static int
ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
ice_bitmap_t *tc_bitmap)
{
@@ -2959,7 +2962,7 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
return ICE_ERR_PARAM;
ice_cp_bitmap(agg_vsi_info->replay_tc_bitmap, tc_bitmap,
ICE_MAX_TRAFFIC_CLASS);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2973,14 +2976,14 @@ ice_save_agg_vsi_tc_bitmap(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
* already associated to the aggregator node then no operation is performed on
* the tree. This function needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_assoc_vsi_to_agg(struct ice_port_info *pi, u32 agg_id,
u16 vsi_handle, ice_bitmap_t *tc_bitmap)
{
struct ice_sched_agg_vsi_info *agg_vsi_info, *old_agg_vsi_info = NULL;
struct ice_sched_agg_info *agg_info, *old_agg_info;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u8 tc;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
@@ -3071,14 +3074,14 @@ static void ice_sched_rm_unused_rl_prof(struct ice_hw *hw)
* returns success or error on config sched element failure. The caller
* needs to hold scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_aqc_txsched_elem_data *info)
{
struct ice_aqc_txsched_elem_data buf;
- enum ice_status status;
u16 elem_cfgd = 0;
u16 num_elems = 1;
+ int status;
buf = *info;
/* For TC nodes, CIR config is not supported */
@@ -3116,13 +3119,13 @@ ice_sched_update_elem(struct ice_hw *hw, struct ice_sched_node *node,
*
* This function configures node element's BW allocation.
*/
-static enum ice_status
+static int
ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
enum ice_rl_type rl_type, u16 bw_alloc)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
- enum ice_status status;
+ int status;
buf = node->info;
data = &buf.data;
@@ -3150,12 +3153,12 @@ ice_sched_cfg_node_bw_alloc(struct ice_hw *hw, struct ice_sched_node *node,
*
* Move or associate VSI to a new or default aggregator node.
*/
-enum ice_status
+int
ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
u8 tc_bitmap)
{
ice_bitmap_t bitmap = tc_bitmap;
- enum ice_status status;
+ int status;
ice_acquire_lock(&pi->sched_lock);
status = ice_sched_assoc_vsi_to_agg(pi, agg_id, vsi_handle,
@@ -3175,10 +3178,10 @@ ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
* This function removes aggregator reference to VSI and delete aggregator ID
* info. It removes the aggregator configuration completely.
*/
-enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id)
+int ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id)
{
struct ice_sched_agg_info *agg_info;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
ice_acquire_lock(&pi->sched_lock);
@@ -3257,7 +3260,7 @@ ice_set_clear_eir_bw_alloc(struct ice_bw_type_info *bw_t_info, u16 bw_alloc)
*
* Save BW alloc information of VSI type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u16 bw_alloc)
{
@@ -3280,7 +3283,7 @@ ice_sched_save_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
default:
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3350,7 +3353,7 @@ static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
*
* Save BW information of VSI type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
@@ -3374,7 +3377,7 @@ ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
default:
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3402,7 +3405,7 @@ static void ice_set_clear_prio(struct ice_bw_type_info *bw_t_info, u8 prio)
*
* Save priority information of VSI type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 prio)
{
@@ -3416,7 +3419,7 @@ ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
if (tc >= ICE_MAX_TRAFFIC_CLASS)
return ICE_ERR_PARAM;
ice_set_clear_prio(&vsi_ctx->sched.bw_t_info[tc], prio);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3429,7 +3432,7 @@ ice_sched_save_vsi_prio(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
*
* Save BW alloc information of AGG type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type, u16 bw_alloc)
{
@@ -3450,7 +3453,7 @@ ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc,
default:
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3463,7 +3466,7 @@ ice_sched_save_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 tc,
*
* Save BW information of AGG type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
@@ -3487,7 +3490,7 @@ ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
default:
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3501,11 +3504,11 @@ ice_sched_save_agg_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
* This function configures BW limit of VSI scheduling node based on TC
* information.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status;
+ int status;
status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
ICE_AGG_TYPE_VSI,
@@ -3528,11 +3531,11 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* This function configures default BW limit of VSI scheduling node based on TC
* information.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type)
{
- enum ice_status status;
+ int status;
status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
ICE_AGG_TYPE_VSI,
@@ -3558,11 +3561,11 @@ ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* This function applies BW limit to aggregator scheduling node based on TC
* information.
*/
-enum ice_status
+int
ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status;
+ int status;
status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
tc, rl_type, bw);
@@ -3584,11 +3587,11 @@ ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
* This function applies default BW limit to aggregator scheduling node based
* on TC information.
*/
-enum ice_status
+int
ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type)
{
- enum ice_status status;
+ int status;
status = ice_sched_set_node_bw_lmt_per_tc(pi, agg_id, ICE_AGG_TYPE_AGG,
tc, rl_type,
@@ -3613,7 +3616,7 @@ ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
* Configure shared rate limiter(SRL) of all VSI type nodes across all traffic
* classes for VSI matching handle.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw,
u32 max_bw, u32 shared_bw)
{
@@ -3629,7 +3632,7 @@ ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw,
* This function removes the shared rate limiter(SRL) of all VSI type nodes
* across all traffic classes for VSI matching handle.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle)
{
return ice_sched_set_vsi_bw_shared_lmt(pi, vsi_handle,
@@ -3649,7 +3652,7 @@ ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle)
* This function configures the shared rate limiter(SRL) of all aggregator type
* nodes across all traffic classes for aggregator matching agg_id.
*/
-enum ice_status
+int
ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
u32 max_bw, u32 shared_bw)
{
@@ -3665,7 +3668,7 @@ ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
* This function removes the shared rate limiter(SRL) of all aggregator type
* nodes across all traffic classes for aggregator matching agg_id.
*/
-enum ice_status
+int
ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id)
{
return ice_sched_set_agg_bw_shared_lmt(pi, agg_id, ICE_SCHED_DFLT_BW,
@@ -3685,7 +3688,7 @@ ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id)
* This function configures the shared rate limiter(SRL) of all aggregator type
* nodes across all traffic classes for aggregator matching agg_id.
*/
-enum ice_status
+int
ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
u32 min_bw, u32 max_bw, u32 shared_bw)
{
@@ -3702,7 +3705,7 @@ ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
* This function configures the shared rate limiter(SRL) of all aggregator type
* nodes across all traffic classes for aggregator matching agg_id.
*/
-enum ice_status
+int
ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc)
{
return ice_sched_set_agg_bw_shared_lmt_per_tc(pi, agg_id, tc,
@@ -3721,11 +3724,11 @@ ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc)
* This function configures the queue node priority (Sibling Priority) of the
* passed in VSI's queue(s) for a given traffic class (TC).
*/
-enum ice_status
+int
ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
u8 *q_prio)
{
- enum ice_status status = ICE_ERR_PARAM;
+ int status = ICE_ERR_PARAM;
u16 i;
ice_acquire_lock(&pi->sched_lock);
@@ -3761,17 +3764,17 @@ ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
* This function configures the node priority (Sibling Priority) of the
* passed in VSI's for a given traffic class (TC) of an Aggregator ID.
*/
-enum ice_status
+int
ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id,
u16 num_vsis, u16 *vsi_handle_arr,
u8 *node_prio, u8 tc)
{
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_sched_node *tc_node, *agg_node;
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_agg_info *agg_info;
bool agg_id_present = false;
struct ice_hw *hw = pi->hw;
+ int status = ICE_ERR_PARAM;
u16 i;
ice_acquire_lock(&pi->sched_lock);
@@ -3848,11 +3851,11 @@ exit_agg_priority_per_tc:
* This function configures the BW allocation of the passed in VSI's
* node(s) for enabled traffic class.
*/
-enum ice_status
+int
ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap,
enum ice_rl_type rl_type, u8 *bw_alloc)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
@@ -3900,14 +3903,14 @@ ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap,
* This function configures the BW allocation of passed in aggregator for
* enabled traffic class(s).
*/
-enum ice_status
+int
ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap,
enum ice_rl_type rl_type, u8 *bw_alloc)
{
struct ice_sched_agg_info *agg_info;
bool agg_id_present = false;
- enum ice_status status = ICE_SUCCESS;
struct ice_hw *hw = pi->hw;
+ int status = 0;
u8 tc;
ice_acquire_lock(&pi->sched_lock);
@@ -4002,12 +4005,12 @@ static u16 ice_sched_calc_wakeup(struct ice_hw *hw, s32 bw)
*
* This function converts the BW to profile structure format.
*/
-static enum ice_status
+static int
ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
struct ice_aqc_rl_profile_elem *profile)
{
- enum ice_status status = ICE_ERR_PARAM;
s64 bytes_per_sec, ts_rate, mv_tmp;
+ int status = ICE_ERR_PARAM;
bool found = false;
s32 encode = 0;
s64 mv = 0;
@@ -4052,7 +4055,7 @@ ice_sched_bw_to_rl_profile(struct ice_hw *hw, u32 bw,
profile->rl_multiply = CPU_TO_LE16(mv);
profile->wake_up_calc = CPU_TO_LE16(wm);
profile->rl_encode = CPU_TO_LE16(encode);
- status = ICE_SUCCESS;
+ status = 0;
} else {
status = ICE_ERR_DOES_NOT_EXIST;
}
@@ -4080,8 +4083,8 @@ ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
struct ice_aqc_rl_profile_info *rl_prof_elem;
u16 profiles_added = 0, num_profiles = 1;
struct ice_aqc_rl_profile_elem *buf;
- enum ice_status status;
u8 profile_type;
+ int status;
if (!hw || layer_num >= hw->num_tx_sched_layers)
return NULL;
@@ -4114,7 +4117,7 @@ ice_sched_add_rl_profile(struct ice_hw *hw, enum ice_rl_type rl_type,
return NULL;
status = ice_sched_bw_to_rl_profile(hw, bw, &rl_prof_elem->profile);
- if (status != ICE_SUCCESS)
+ if (status)
goto exit_add_rl_prof;
rl_prof_elem->bw = bw;
@@ -4149,7 +4152,7 @@ exit_add_rl_prof:
*
* This function configures node element's BW limit.
*/
-static enum ice_status
+static int
ice_sched_cfg_node_bw_lmt(struct ice_hw *hw, struct ice_sched_node *node,
enum ice_rl_type rl_type, u16 rl_prof_id)
{
@@ -4293,12 +4296,12 @@ ice_sched_get_srl_node(struct ice_sched_node *node, u8 srl_layer)
* 'profile_type' and profile ID as 'profile_id'. The caller needs to hold
* scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
u16 profile_id)
{
struct ice_aqc_rl_profile_info *rl_prof_elem;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
if (!hw || layer_num >= hw->num_tx_sched_layers)
return ICE_ERR_PARAM;
@@ -4319,7 +4322,7 @@ ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
break;
}
if (status == ICE_ERR_IN_USE)
- status = ICE_SUCCESS;
+ status = 0;
return status;
}
@@ -4334,15 +4337,15 @@ ice_sched_rm_rl_profile(struct ice_hw *hw, u8 layer_num, u8 profile_type,
* type CIR, EIR, or SRL to default. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
struct ice_sched_node *node,
enum ice_rl_type rl_type, u8 layer_num)
{
- enum ice_status status;
struct ice_hw *hw;
u8 profile_type;
u16 rl_prof_id;
+ int status;
u16 old_id;
hw = pi->hw;
@@ -4373,7 +4376,7 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
/* Remove stale RL profile ID */
if (old_id == ICE_SCHED_DFLT_RL_PROF_ID ||
old_id == ICE_SCHED_INVAL_PROF_ID)
- return ICE_SUCCESS;
+ return 0;
return ice_sched_rm_rl_profile(hw, layer_num, profile_type, old_id);
}
@@ -4390,14 +4393,14 @@ ice_sched_set_node_bw_dflt(struct ice_port_info *pi,
* node's RL profile ID of type CIR, EIR, or SRL, and removes old profile
* ID from local database. The caller needs to hold scheduler lock.
*/
-enum ice_status
+int
ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw, u8 layer_num)
{
struct ice_aqc_rl_profile_info *rl_prof_info;
- enum ice_status status = ICE_ERR_PARAM;
struct ice_hw *hw = pi->hw;
u16 old_id, rl_prof_id;
+ int status = ICE_ERR_PARAM;
rl_prof_info = ice_sched_add_rl_profile(hw, rl_type, bw, layer_num);
if (!rl_prof_info)
@@ -4419,7 +4422,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
/* Check for old ID removal */
if ((old_id == ICE_SCHED_DFLT_RL_PROF_ID && rl_type != ICE_SHARED_BW) ||
old_id == ICE_SCHED_INVAL_PROF_ID || old_id == rl_prof_id)
- return ICE_SUCCESS;
+ return 0;
return ice_sched_rm_rl_profile(hw, layer_num,
rl_prof_info->profile.flags &
@@ -4434,7 +4437,7 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
*
* This function sets priority of a node among it's siblings.
*/
-enum ice_status
+int
ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
u16 priority)
{
@@ -4459,7 +4462,7 @@ ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *nod
*
* This function sets weight of the node for WFQ algorithm.
*/
-enum ice_status
+int
ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node, u16 weight)
{
struct ice_aqc_txsched_elem_data buf;
@@ -4491,7 +4494,7 @@ ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node,
* NOTE: Caller provides the correct SRL node in case of shared profile
* settings.
*/
-enum ice_status
+int
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw)
{
@@ -4524,7 +4527,7 @@ ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
* type CIR, EIR, or SRL to default. This function needs to be called
* with the scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
struct ice_sched_node *node,
enum ice_rl_type rl_type)
@@ -4542,7 +4545,7 @@ ice_sched_set_node_bw_dflt_lmt(struct ice_port_info *pi,
* behalf of the requested node (first argument). This function needs to be
* called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
{
/* SRL profiles are not available on all layers. Check if the
@@ -4555,7 +4558,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
node->num_children == 1) ||
((sel_layer == node->tx_sched_layer - 1) &&
(node->parent && node->parent->num_children == 1)))
- return ICE_SUCCESS;
+ return 0;
return ICE_ERR_CFG;
}
@@ -4568,7 +4571,7 @@ ice_sched_validate_srl_node(struct ice_sched_node *node, u8 sel_layer)
*
* Save BW information of queue type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
{
switch (rl_type) {
@@ -4584,7 +4587,7 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
default:
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -4598,13 +4601,13 @@ ice_sched_save_q_bw(struct ice_q_ctx *q_ctx, enum ice_rl_type rl_type, u32 bw)
*
* This function sets BW limit of queue scheduling node.
*/
-static enum ice_status
+static int
ice_sched_set_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_node *node;
struct ice_q_ctx *q_ctx;
+ int status = ICE_ERR_PARAM;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
return ICE_ERR_PARAM;
@@ -4661,7 +4664,7 @@ exit_q_bw_lmt:
*
* This function configures BW limit of queue scheduling node.
*/
-enum ice_status
+int
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw)
{
@@ -4679,7 +4682,7 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
*
* This function configures BW default limit of queue scheduling node.
*/
-enum ice_status
+int
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type)
{
@@ -4697,7 +4700,7 @@ ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
* This function saves the modified values of bandwidth settings for later
* replay purpose (restore) after reset.
*/
-static enum ice_status
+static int
ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
@@ -4716,9 +4719,12 @@ ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc,
default:
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
+#define ICE_SCHED_GENERIC_STRICT_MODE BIT(4)
+#define ICE_SCHED_GENERIC_PRIO_S 1
+
/**
* ice_sched_set_tc_node_bw_lmt - sets TC node BW limit
* @pi: port information structure
@@ -4728,12 +4734,14 @@ ice_sched_save_tc_node_bw(struct ice_port_info *pi, u8 tc,
*
* This function configures bandwidth limit of TC node.
*/
-static enum ice_status
+static int
ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status = ICE_ERR_PARAM;
+ struct ice_aqc_txsched_elem_data buf;
+ struct ice_aqc_txsched_elem *data;
struct ice_sched_node *tc_node;
+ int status = ICE_ERR_PARAM;
if (tc >= ICE_MAX_TRAFFIC_CLASS)
return status;
@@ -4741,6 +4749,17 @@ ice_sched_set_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
goto exit_set_tc_node_bw;
+
+ /* update node's generic field */
+ buf = tc_node->info;
+ data = &buf.data;
+ data->valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
+ data->generic = (tc << ICE_SCHED_GENERIC_PRIO_S) |
+ ICE_SCHED_GENERIC_STRICT_MODE;
+ status = ice_sched_update_elem(pi->hw, tc_node, &buf);
+ if (status)
+ goto exit_set_tc_node_bw;
+
if (bw == ICE_SCHED_DFLT_BW)
status = ice_sched_set_node_bw_dflt_lmt(pi, tc_node, rl_type);
else
@@ -4763,7 +4782,7 @@ exit_set_tc_node_bw:
* This function configures BW limit of TC node.
* Note: The minimum guaranteed reservation is done via DCBX.
*/
-enum ice_status
+int
ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
@@ -4778,7 +4797,7 @@ ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
*
* This function configures BW default limit of TC node.
*/
-enum ice_status
+int
ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type)
{
@@ -4794,7 +4813,7 @@ ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc,
*
* Save BW alloc information of VSI type node for post replay use.
*/
-static enum ice_status
+static int
ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u16 bw_alloc)
{
@@ -4812,7 +4831,7 @@ ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
default:
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -4826,12 +4845,12 @@ ice_sched_save_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
* changed settings for replay purpose, and return success if it succeeds
* in modifying bandwidth alloc setting.
*/
-static enum ice_status
+static int
ice_sched_set_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u8 bw_alloc)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_node *tc_node;
+ int status = ICE_ERR_PARAM;
if (tc >= ICE_MAX_TRAFFIC_CLASS)
return status;
@@ -4860,7 +4879,7 @@ exit_set_tc_node_bw_alloc:
* This function configures BW limit of TC node.
* Note: The minimum guaranteed reservation is done via DCBX.
*/
-enum ice_status
+int
ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u8 bw_alloc)
{
@@ -4876,11 +4895,11 @@ ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
* and sets node's BW limit to default. This function needs to be
* called with the scheduler lock held.
*/
-enum ice_status
+int
ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle)
{
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
if (!ice_is_vsi_valid(pi->hw, vsi_handle))
@@ -4992,13 +5011,13 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
* This function sets BW limit of VSI or Aggregator scheduling node
* based on TC information from passed in argument BW.
*/
-enum ice_status
+int
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status = ICE_ERR_PARAM;
struct ice_sched_node *node;
+ int status = ICE_ERR_PARAM;
if (!pi)
return status;
@@ -5031,7 +5050,7 @@ exit_set_node_bw_lmt_per_tc:
* different than the VSI node layer on all TC(s).This function needs to be
* called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle)
{
u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM;
@@ -5044,7 +5063,7 @@ ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle)
ice_for_each_traffic_class(tc) {
struct ice_sched_node *tc_node, *vsi_node;
enum ice_rl_type rl_type = ICE_SHARED_BW;
- enum ice_status status;
+ int status;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
@@ -5070,7 +5089,7 @@ ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle)
if (status)
return status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -5086,12 +5105,12 @@ ice_sched_validate_vsi_srl_node(struct ice_port_info *pi, u16 vsi_handle)
* class, and saves those value for later use for replaying purposes. The
* caller holds the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, struct ice_sched_node *srl_node,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status;
+ int status;
if (bw == ICE_SCHED_DFLT_BW) {
status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type);
@@ -5118,13 +5137,13 @@ ice_sched_set_save_vsi_srl_node_bw(struct ice_port_info *pi, u16 vsi_handle,
* is passed, it removes the corresponding bw from the node. The caller
* holds scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle,
u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw)
{
struct ice_sched_node *tc_node, *vsi_node, *cfg_node;
- enum ice_status status;
u8 layer_num;
+ int status;
tc_node = ice_sched_get_tc_node(pi, tc);
if (!tc_node)
@@ -5172,11 +5191,11 @@ ice_sched_set_vsi_node_srl_per_tc(struct ice_port_info *pi, u16 vsi_handle,
* classes for VSI matching handle. When BW value of ICE_SCHED_DFLT_BW is
* passed, it removes those value(s) from the node.
*/
-enum ice_status
+int
ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
u32 min_bw, u32 max_bw, u32 shared_bw)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
if (!pi)
@@ -5222,13 +5241,13 @@ exit_set_vsi_bw_shared_lmt:
* different than the AGG node layer on all TC(s).This function needs to be
* called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id)
{
u8 sel_layer = ICE_SCHED_INVAL_LAYER_NUM;
struct ice_sched_agg_info *agg_info;
bool agg_id_present = false;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
LIST_FOR_EACH_ENTRY(agg_info, &pi->hw->agg_list, ice_sched_agg_info,
@@ -5277,13 +5296,13 @@ ice_sched_validate_agg_srl_node(struct ice_port_info *pi, u32 agg_id)
*
* This function validates aggregator id. Caller holds the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id)
{
struct ice_sched_agg_info *agg_info;
struct ice_sched_agg_info *tmp;
bool agg_id_present = false;
- enum ice_status status;
+ int status;
status = ice_sched_validate_agg_srl_node(pi, agg_id);
if (status)
@@ -5299,7 +5318,7 @@ ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id)
if (!agg_id_present)
return ICE_ERR_PARAM;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -5315,12 +5334,12 @@ ice_sched_validate_agg_id(struct ice_port_info *pi, u32 agg_id)
* requested traffic class, and saves those value for later use for
* replaying purposes. The caller holds the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
struct ice_sched_node *srl_node,
enum ice_rl_type rl_type, u32 bw)
{
- enum ice_status status;
+ int status;
if (bw == ICE_SCHED_DFLT_BW) {
status = ice_sched_set_node_bw_dflt_lmt(pi, srl_node, rl_type);
@@ -5347,13 +5366,13 @@ ice_sched_set_save_agg_srl_node_bw(struct ice_port_info *pi, u32 agg_id, u8 tc,
* value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node. Caller
* holds the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id,
u8 tc, u32 min_bw, u32 max_bw, u32 shared_bw)
{
struct ice_sched_node *tc_node, *agg_node, *cfg_node;
enum ice_rl_type rl_type = ICE_SHARED_BW;
- enum ice_status status = ICE_ERR_CFG;
+ int status = ICE_ERR_CFG;
u8 layer_num;
tc_node = ice_sched_get_tc_node(pi, tc);
@@ -5402,11 +5421,11 @@ ice_sched_set_agg_node_srl_per_tc(struct ice_port_info *pi, u32 agg_id,
* BW value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the
* node(s).
*/
-enum ice_status
+int
ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id,
u32 min_bw, u32 max_bw, u32 shared_bw)
{
- enum ice_status status;
+ int status;
u8 tc;
if (!pi)
@@ -5454,12 +5473,12 @@ exit_agg_bw_shared_lmt:
* node for a given traffic class for aggregator matching agg_id. When BW
* value of ICE_SCHED_DFLT_BW is passed, it removes SRL from the node.
*/
-enum ice_status
+int
ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id,
u8 tc, u32 min_bw, u32 max_bw,
u32 shared_bw)
{
- enum ice_status status;
+ int status;
if (!pi)
return ICE_ERR_PARAM;
@@ -5485,14 +5504,14 @@ exit_agg_bw_shared_lmt_per_tc:
* This function configures node element's sibling priority only. This
* function needs to be called with scheduler lock held.
*/
-enum ice_status
+int
ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
struct ice_sched_node *node, u8 priority)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
+ int status;
if (!hw)
return ICE_ERR_PARAM;
@@ -5518,7 +5537,7 @@ ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
* burst size value is used for future rate limit calls. It doesn't change the
* existing or previously created RL profiles.
*/
-enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
+int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
{
u16 burst_size_to_prog;
@@ -5547,7 +5566,7 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
burst_size_to_prog |= (u16)(bytes / 1024);
}
hw->max_burst_size = burst_size_to_prog;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -5559,13 +5578,13 @@ enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes)
* This function configures node element's priority value. It
* needs to be called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
u8 priority)
{
struct ice_aqc_txsched_elem_data buf;
struct ice_aqc_txsched_elem *data;
- enum ice_status status;
+ int status;
buf = node->info;
data = &buf.data;
@@ -5586,18 +5605,18 @@ ice_sched_replay_node_prio(struct ice_hw *hw, struct ice_sched_node *node,
* This function restores node's BW from bw_t_info. The caller needs
* to hold the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
struct ice_bw_type_info *bw_t_info)
{
struct ice_port_info *pi = hw->port_info;
- enum ice_status status = ICE_ERR_PARAM;
+ int status = ICE_ERR_PARAM;
u16 bw_alloc;
if (!node)
return status;
if (!ice_is_any_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_CNT))
- return ICE_SUCCESS;
+ return 0;
if (ice_is_bit_set(bw_t_info->bw_t_bitmap, ICE_BW_TYPE_PRIO)) {
status = ice_sched_replay_node_prio(hw, node,
bw_t_info->generic);
@@ -5644,11 +5663,11 @@ ice_sched_replay_node_bw(struct ice_hw *hw, struct ice_sched_node *node,
* This function re-creates aggregator type nodes. The caller needs to hold
* the scheduler lock.
*/
-static enum ice_status
+static int
ice_sched_replay_agg_bw(struct ice_hw *hw, struct ice_sched_agg_info *agg_info)
{
struct ice_sched_node *tc_node, *agg_node;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
if (!agg_info)
@@ -5721,7 +5740,7 @@ void ice_sched_replay_agg(struct ice_hw *hw)
ICE_MAX_TRAFFIC_CLASS)) {
ice_declare_bitmap(replay_bitmap,
ICE_MAX_TRAFFIC_CLASS);
- enum ice_status status;
+ int status;
ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
ice_sched_get_ena_tc_bitmap(pi,
@@ -5777,9 +5796,9 @@ void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw)
*
* Replay root node BW settings.
*/
-enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi)
+int ice_sched_replay_root_node_bw(struct ice_port_info *pi)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
if (!pi->hw)
return ICE_ERR_PARAM;
@@ -5797,9 +5816,9 @@ enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi)
*
* This function replay TC nodes.
*/
-enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
+int ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
{
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
if (!pi->hw)
@@ -5829,7 +5848,7 @@ enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi)
* This function replays VSI type nodes bandwidth. This function needs to be
* called with scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *tc_bitmap)
{
@@ -5837,7 +5856,7 @@ ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle,
struct ice_port_info *pi = hw->port_info;
struct ice_bw_type_info *bw_t_info;
struct ice_vsi_ctx *vsi_ctx;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 tc;
vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
@@ -5869,24 +5888,24 @@ ice_sched_replay_vsi_bw(struct ice_hw *hw, u16 vsi_handle,
* their node bandwidth information. This function needs to be called with
* scheduler lock held.
*/
-static enum ice_status
+static int
ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
{
ice_declare_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
struct ice_sched_agg_vsi_info *agg_vsi_info;
struct ice_port_info *pi = hw->port_info;
struct ice_sched_agg_info *agg_info;
- enum ice_status status;
+ int status;
ice_zero_bitmap(replay_bitmap, ICE_MAX_TRAFFIC_CLASS);
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
agg_info = ice_get_vsi_agg_info(hw, vsi_handle);
if (!agg_info)
- return ICE_SUCCESS; /* Not present in list - default Agg case */
+ return 0; /* Not present in list - default Agg case */
agg_vsi_info = ice_get_agg_vsi_info(agg_info, vsi_handle);
if (!agg_vsi_info)
- return ICE_SUCCESS; /* Not present in list - default Agg case */
+ return 0; /* Not present in list - default Agg case */
ice_sched_get_ena_tc_bitmap(pi, agg_info->replay_tc_bitmap,
replay_bitmap);
/* Replay aggregator node associated to vsi_handle */
@@ -5920,10 +5939,10 @@ ice_sched_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
* This function replays association of VSI to aggregator type nodes, and
* node bandwidth information.
*/
-enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
+int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_port_info *pi = hw->port_info;
- enum ice_status status;
+ int status;
ice_acquire_lock(&pi->sched_lock);
status = ice_sched_replay_vsi_agg(hw, vsi_handle);
@@ -5939,7 +5958,7 @@ enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle)
* This function replays queue type node bandwidth. This function needs to be
* called with scheduler lock held.
*/
-enum ice_status
+int
ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx)
{
struct ice_sched_node *q_node;
diff --git a/sys/dev/ice/ice_sched.h b/sys/dev/ice/ice_sched.h
index 490a7719a960..db79e9e23621 100644
--- a/sys/dev/ice/ice_sched.h
+++ b/sys/dev/ice/ice_sched.h
@@ -110,54 +110,54 @@ struct ice_sched_agg_info {
};
/* FW AQ command calls */
-enum ice_status
+int
ice_aq_query_rl_profile(struct ice_hw *hw, u16 num_profiles,
struct ice_aqc_rl_profile_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_cfg_node_attr(struct ice_hw *hw, u16 num_nodes,
struct ice_aqc_node_attr_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_cfg_l2_node_cgd(struct ice_hw *hw, u16 num_nodes,
struct ice_aqc_cfg_l2_node_cgd_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
struct ice_aqc_move_elem *buf, u16 buf_size,
u16 *grps_movd, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_sched_set_node_bw_lmt(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
enum ice_rl_type rl_type, u32 bw, u8 layer_num);
-enum ice_status
+int
ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer, u16 num_nodes,
u16 *num_nodes_added, u32 *first_node_teid,
struct ice_sched_node **prealloc_node);
-enum ice_status
+int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list);
-enum ice_status
+int
ice_sched_set_node_priority(struct ice_port_info *pi, struct ice_sched_node *node,
u16 priority);
-enum ice_status
+int
ice_sched_set_node_weight(struct ice_port_info *pi, struct ice_sched_node *node,
u16 weight);
-enum ice_status ice_sched_init_port(struct ice_port_info *pi);
-enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
+int ice_sched_init_port(struct ice_port_info *pi);
+int ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_get_psm_clk_freq(struct ice_hw *hw);
/* Functions to cleanup scheduler SW DB */
@@ -170,7 +170,7 @@ struct ice_sched_node *ice_sched_get_node(struct ice_port_info *pi, u32 teid);
struct ice_sched_node *
ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid);
/* Add a scheduling node into SW DB for given info */
-enum ice_status
+int
ice_sched_add_node(struct ice_port_info *pi, u8 layer,
struct ice_aqc_txsched_elem_data *info,
struct ice_sched_node *prealloc_node);
@@ -182,112 +182,112 @@ struct ice_sched_node *ice_sched_get_tc_node(struct ice_port_info *pi, u8 tc);
struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner);
-enum ice_status
+int
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
-enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
-enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
+int ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+int ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
struct ice_sched_node *
ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
u16 vsi_handle);
bool ice_sched_is_tree_balanced(struct ice_hw *hw, struct ice_sched_node *node);
-enum ice_status
+int
ice_aq_query_node_to_root(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf, u16 buf_size,
struct ice_sq_cd *cd);
/* Tx scheduler rate limiter functions */
-enum ice_status
+int
ice_cfg_agg(struct ice_port_info *pi, u32 agg_id,
enum ice_agg_type agg_type, u8 tc_bitmap);
-enum ice_status
+int
ice_move_vsi_to_agg(struct ice_port_info *pi, u32 agg_id, u16 vsi_handle,
u8 tc_bitmap);
-enum ice_status ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id);
-enum ice_status
+int ice_rm_agg_cfg(struct ice_port_info *pi, u32 agg_id);
+int
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type);
-enum ice_status
+int
ice_cfg_tc_node_bw_lmt(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_tc_node_bw_dflt_lmt(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type);
-enum ice_status
+int
ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type);
-enum ice_status
+int
ice_cfg_agg_bw_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_cfg_agg_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
enum ice_rl_type rl_type);
-enum ice_status
+int
ice_cfg_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle, u32 min_bw,
u32 max_bw, u32 shared_bw);
-enum ice_status
+int
ice_cfg_vsi_bw_no_shared_lmt(struct ice_port_info *pi, u16 vsi_handle);
-enum ice_status
+int
ice_cfg_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
u32 max_bw, u32 shared_bw);
-enum ice_status
+int
ice_cfg_agg_bw_no_shared_lmt(struct ice_port_info *pi, u32 agg_id);
-enum ice_status
+int
ice_cfg_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id, u8 tc,
u32 min_bw, u32 max_bw, u32 shared_bw);
-enum ice_status
+int
ice_cfg_agg_bw_no_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id,
u8 tc);
-enum ice_status
+int
ice_cfg_vsi_q_priority(struct ice_port_info *pi, u16 num_qs, u32 *q_ids,
u8 *q_prio);
-enum ice_status
+int
ice_cfg_vsi_bw_alloc(struct ice_port_info *pi, u16 vsi_handle, u8 ena_tcmap,
enum ice_rl_type rl_type, u8 *bw_alloc);
-enum ice_status
+int
ice_cfg_agg_vsi_priority_per_tc(struct ice_port_info *pi, u32 agg_id,
u16 num_vsis, u16 *vsi_handle_arr,
u8 *node_prio, u8 tc);
-enum ice_status
+int
ice_cfg_agg_bw_alloc(struct ice_port_info *pi, u32 agg_id, u8 ena_tcmap,
enum ice_rl_type rl_type, u8 *bw_alloc);
bool
ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
struct ice_sched_node *node);
-enum ice_status
+int
ice_sched_set_agg_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle);
-enum ice_status
+int
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc,
enum ice_rl_type rl_type, u32 bw);
-enum ice_status
+int
ice_sched_set_vsi_bw_shared_lmt(struct ice_port_info *pi, u16 vsi_handle,
u32 min_bw, u32 max_bw, u32 shared_bw);
-enum ice_status
+int
ice_sched_set_agg_bw_shared_lmt(struct ice_port_info *pi, u32 agg_id, u32 min_bw,
u32 max_bw, u32 shared_bw);
-enum ice_status
+int
ice_sched_set_agg_bw_shared_lmt_per_tc(struct ice_port_info *pi, u32 agg_id,
u8 tc, u32 min_bw, u32 max_bw,
u32 shared_bw);
-enum ice_status
+int
ice_sched_cfg_sibl_node_prio(struct ice_port_info *pi,
struct ice_sched_node *node, u8 priority);
-enum ice_status
+int
ice_cfg_tc_node_bw_alloc(struct ice_port_info *pi, u8 tc,
enum ice_rl_type rl_type, u8 bw_alloc);
-enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
+int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
-enum ice_status ice_sched_replay_tc_node_bw(struct ice_port_info *pi);
-enum ice_status ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status ice_sched_replay_root_node_bw(struct ice_port_info *pi);
-enum ice_status ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
+int ice_sched_replay_tc_node_bw(struct ice_port_info *pi);
+int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
+int ice_sched_replay_root_node_bw(struct ice_port_info *pi);
+int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/sys/dev/ice/ice_strings.c b/sys/dev/ice/ice_strings.c
index b341b2815fea..1b377a1bf518 100644
--- a/sys/dev/ice/ice_strings.c
+++ b/sys/dev/ice/ice_strings.c
@@ -179,13 +179,13 @@ _ice_aq_str(enum ice_aq_err aq_err)
* Otherwise, use the scratch space to format the status code into a number.
*/
struct ice_str_buf
-_ice_status_str(enum ice_status status)
+_ice_status_str(int status)
{
struct ice_str_buf buf = { .str = "" };
const char *str = NULL;
switch (status) {
- case ICE_SUCCESS:
+ case 0:
str = "OK";
break;
case ICE_ERR_PARAM:
@@ -1052,6 +1052,8 @@ ice_state_to_str(enum ice_state state)
return "DO_CREATE_MIRR_INTFC";
case ICE_STATE_DO_DESTROY_MIRR_INTFC:
return "DO_DESTROY_MIRR_INTFC";
+ case ICE_STATE_PHY_FW_INIT_PENDING:
+ return "PHY_FW_INIT_PENDING";
case ICE_STATE_LAST:
return NULL;
}
diff --git a/sys/dev/ice/ice_switch.c b/sys/dev/ice/ice_switch.c
index e02390e10ef8..1880d6abdd26 100644
--- a/sys/dev/ice/ice_switch.c
+++ b/sys/dev/ice/ice_switch.c
@@ -72,7 +72,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle);
* Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes.
*/
-enum ice_status
+int
ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
{
struct ice_sw_recipe *recps;
@@ -93,7 +93,7 @@ ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
*recp_list = recps;
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -121,14 +121,14 @@ ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list)
* in response buffer. The caller of this function to use *num_elems while
* parsing the response buffer.
*/
-static enum ice_status
+static int
ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
u16 buf_size, u16 *req_desc, u16 *num_elems,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_sw_cfg *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
cmd = &desc.params.get_sw_conf;
@@ -149,10 +149,10 @@ ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp_elem *buf,
* @shared_res: true to allocate as a shared resource and false to allocate as a dedicated resource
* @global_lut_id: output parameter for the RSS global LUT's ID
*/
-enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
+int ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
- enum ice_status status;
+ int status;
u16 buf_len;
buf_len = ice_struct_size(sw_buf, elem, 1);
@@ -184,11 +184,11 @@ ice_alloc_global_lut_exit:
* @hw: pointer to the HW struct
* @global_lut_id: ID of the RSS global LUT to free
*/
-enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
+int ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
u16 buf_len, num_elems = 1;
- enum ice_status status;
+ int status;
buf_len = ice_struct_size(sw_buf, elem, num_elems);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
@@ -218,14 +218,14 @@ enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id)
*
* allocates switch resources (SWID and VEB counter) (0x0208)
*/
-enum ice_status
+int
ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
u16 *counter_id)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
struct ice_aqc_res_elem *sw_ele;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = ice_struct_size(sw_buf, elem, 1);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
@@ -302,10 +302,10 @@ ice_alloc_sw_exit:
* releasing other resources even after it encounters error.
* The error code returned is the last error it encountered.
*/
-enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
+int ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
{
struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
- enum ice_status status, ret_status;
+ int status, ret_status;
u16 buf_len;
buf_len = ice_struct_size(sw_buf, elem, 1);
@@ -364,14 +364,14 @@ enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
*
* Add a VSI context to the hardware (0x0210)
*/
-enum ice_status
+int
ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *res;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
res = &desc.params.add_update_free_vsi_res;
@@ -408,14 +408,14 @@ ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Free VSI context info from hardware (0x0213)
*/
-enum ice_status
+int
ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
resp = &desc.params.add_update_free_vsi_res;
@@ -443,14 +443,14 @@ ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware (0x0211)
*/
-enum ice_status
+int
ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_update_free_vsi_resp *resp;
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
resp = &desc.params.add_update_free_vsi_res;
@@ -528,7 +528,7 @@ ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
* @hw: pointer to the HW struct
* @vsi_handle: VSI handle
*/
-static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
+void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
{
struct ice_vsi_ctx *vsi;
u8 i;
@@ -590,12 +590,12 @@ void ice_clear_all_vsi_ctx(struct ice_hw *hw)
* If this function gets called after reset for existing VSIs then update
* with the new HW VSI number in the corresponding VSI handle list entry.
*/
-enum ice_status
+int
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_vsi_ctx *tmp_vsi_ctx;
- enum ice_status status;
+ int status;
if (vsi_handle >= ICE_MAX_VSI)
return ICE_ERR_PARAM;
@@ -619,7 +619,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -632,11 +632,11 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
*
* Free VSI context info from hardware as well as from VSI handle list
*/
-enum ice_status
+int
ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd)
{
- enum ice_status status;
+ int status;
if (!ice_is_vsi_valid(hw, vsi_handle))
return ICE_ERR_PARAM;
@@ -656,7 +656,7 @@ ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
*
* Update VSI context in the hardware
*/
-enum ice_status
+int
ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
@@ -672,11 +672,11 @@ ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
* @vsi_handle: VSI SW index
* @enable: boolean for enable/disable
*/
-enum ice_status
+int
ice_cfg_iwarp_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
{
struct ice_vsi_ctx *ctx, *cached_ctx;
- enum ice_status status;
+ int status;
cached_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!cached_ctx)
@@ -715,14 +715,14 @@ ice_cfg_iwarp_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable)
*
* Get VSI context info from hardware (0x0212)
*/
-enum ice_status
+int
ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd)
{
struct ice_aqc_add_get_update_free_vsi *cmd;
struct ice_aqc_get_vsi_resp *resp;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
cmd = &desc.params.vsi_cmd;
resp = &desc.params.get_vsi_resp;
@@ -756,16 +756,16 @@ ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
*
* Add/Update Mirror Rule (0x260).
*/
-enum ice_status
+int
ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
u16 count, struct ice_mir_rule_buf *mr_buf,
struct ice_sq_cd *cd, u16 *rule_id)
{
struct ice_aqc_add_update_mir_rule *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
__le16 *mr_list = NULL;
u16 buf_size = 0;
+ int status;
switch (rule_type) {
case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
@@ -854,7 +854,7 @@ ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
*
* Delete Mirror Rule (0x261).
*/
-enum ice_status
+int
ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
struct ice_sq_cd *cd)
{
@@ -886,15 +886,15 @@ ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
*
* allocates or free a VSI list resource
*/
-static enum ice_status
+static int
ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
struct ice_aqc_res_elem *vsi_ele;
- enum ice_status status;
u16 buf_len;
+ int status;
buf_len = ice_struct_size(sw_buf, elem, 1);
sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
@@ -945,7 +945,7 @@ ice_aq_alloc_free_vsi_list_exit:
*
* Sets the storm control configuration (0x0280)
*/
-enum ice_status
+int
ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
u32 ctl_bitmask)
{
@@ -972,12 +972,12 @@ ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
*
* Gets the storm control configuration (0x0281)
*/
-enum ice_status
+int
ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
u32 *ctl_bitmask)
{
- enum ice_status status;
struct ice_aq_desc desc;
+ int status;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
@@ -1009,12 +1009,12 @@ ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
*
* Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
*/
-enum ice_status
+int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -1064,13 +1064,13 @@ ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
/* ice_get_initial_sw_cfg - Get initial port and default VSI data
* @hw: pointer to the hardware structure
*/
-enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
+int ice_get_initial_sw_cfg(struct ice_hw *hw)
{
struct ice_aqc_get_sw_cfg_resp_elem *rbuf;
- enum ice_status status;
u8 num_total_ports;
u16 req_desc = 0;
u16 num_elems;
+ int status;
u8 j = 0;
u16 i;
@@ -1117,6 +1117,12 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
ICE_AQC_GET_SW_CONF_RESP_TYPE_S);
switch (res_type) {
+ case ICE_AQC_GET_SW_CONF_RESP_VSI:
+ if (hw->fw_vsi_num != ICE_DFLT_VSI_INVAL)
+ ice_debug(hw, ICE_DBG_SW, "fw_vsi_num %d -> %d\n",
+ hw->fw_vsi_num, vsi_port_num);
+ hw->fw_vsi_num = vsi_port_num;
+ break;
case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
if (j == num_total_ports) {
@@ -1191,8 +1197,10 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
(fi->lkup_type == ICE_SW_LKUP_MAC &&
!IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
(fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
- !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
- fi->lan_en = true;
+ !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr))) {
+ if (!fi->fltVeb_en)
+ fi->lan_en = true;
+ }
} else {
fi->lan_en = true;
}
@@ -1355,7 +1363,7 @@ ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
* Create a large action to hold software marker and update the switch rule
* entry pointed by m_ent with newly created large action
*/
-static enum ice_status
+static int
ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
u16 sw_marker, u16 l_id)
{
@@ -1367,9 +1375,9 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* 3. GENERIC VALUE action to hold the software marker ID
*/
const u16 num_lg_acts = 3;
- enum ice_status status;
u16 lg_act_size;
u16 rules_size;
+ int status;
u32 act;
u16 id;
@@ -1456,19 +1464,19 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
* @counter_id: VLAN counter ID returned as part of allocate resource
* @l_id: large action resource ID
*/
-static enum ice_status
+static int
ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
u16 counter_id, u16 l_id)
{
struct ice_sw_rule_lkup_rx_tx *rx_tx;
struct ice_sw_rule_lg_act *lg_act;
- enum ice_status status;
/* 2 actions will be added while adding a large action counter */
const int num_acts = 2;
u16 lg_act_size;
u16 rules_size;
u16 f_rule_id;
u32 act;
+ int status;
u16 id;
if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
@@ -1583,15 +1591,15 @@ ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* Call AQ command to add a new switch rule or update existing switch rule
* using the given VSI list ID
*/
-static enum ice_status
+static int
ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
enum ice_sw_lkup_type lkup_type)
{
struct ice_sw_rule_vsi_list *s_rule;
- enum ice_status status;
u16 s_rule_size;
u16 rule_type;
+ int status;
int i;
if (!num_vsi)
@@ -1646,11 +1654,11 @@ exit:
* @vsi_list_id: stores the ID of the VSI list to be created
* @lkup_type: switch rule filter's lookup type
*/
-static enum ice_status
+static int
ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
{
- enum ice_status status;
+ int status;
status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
ice_aqc_opc_alloc_res);
@@ -1673,13 +1681,13 @@ ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
* to the corresponding filter management list to track this switch rule
* and VSI mapping
*/
-static enum ice_status
+static int
ice_create_pkt_fwd_rule(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
struct ice_fltr_list_entry *f_entry)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
struct ice_sw_rule_lkup_rx_tx *s_rule;
- enum ice_status status;
+ int status;
s_rule = (struct ice_sw_rule_lkup_rx_tx *)
ice_malloc(hw, ice_struct_size(s_rule, hdr_data,
@@ -1734,11 +1742,11 @@ ice_create_pkt_fwd_rule_exit:
* Call AQ command to update a previously created switch rule with a
* VSI list ID
*/
-static enum ice_status
+static int
ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
{
struct ice_sw_rule_lkup_rx_tx *s_rule;
- enum ice_status status;
+ int status;
s_rule = (struct ice_sw_rule_lkup_rx_tx *)
ice_malloc(hw, ice_struct_size(s_rule, hdr_data,
@@ -1766,13 +1774,14 @@ ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
*
* Updates unicast switch filter rules based on VEB/VEPA mode
*/
-enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
+int ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
- enum ice_status status = ICE_SUCCESS;
- struct ice_switch_info *sw = NULL;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
+ struct ice_switch_info *sw;
+ int status = 0;
+
sw = hw->switch_info;
rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
@@ -1824,14 +1833,15 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Add the new VSI to the previously created VSI list set
* using the update switch rule command
*/
-static enum ice_status
+static int
ice_add_update_vsi_list(struct ice_hw *hw,
struct ice_fltr_mgmt_list_entry *m_entry,
struct ice_fltr_info *cur_fltr,
struct ice_fltr_info *new_fltr)
{
- enum ice_status status = ICE_SUCCESS;
u16 vsi_list_id = 0;
+ int status = 0;
+
if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
return ICE_ERR_NOT_IMPL;
@@ -1851,7 +1861,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle_arr[2];
/* A rule already exists with the new VSI being added */
- if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
+ if (cur_fltr->vsi_handle == new_fltr->vsi_handle)
return ICE_ERR_ALREADY_EXISTS;
vsi_handle_arr[0] = cur_fltr->vsi_handle;
@@ -1899,7 +1909,7 @@ ice_add_update_vsi_list(struct ice_hw *hw,
/* A rule already exists with the new VSI being added */
if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
- return ICE_SUCCESS;
+ return ICE_ERR_ALREADY_EXISTS;
/* Update the previously created VSI list set with
* the new VSI ID passed in
@@ -2008,14 +2018,14 @@ ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
*
* Adds or updates the rule lists for a given recipe
*/
-static enum ice_status
+static int
ice_add_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
u8 lport, struct ice_fltr_list_entry *f_entry)
{
struct ice_fltr_info *new_fltr, *cur_fltr;
struct ice_fltr_mgmt_list_entry *m_entry;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
return ICE_ERR_PARAM;
@@ -2058,7 +2068,7 @@ exit_add_rule_internal:
* The VSI list should be emptied before this function is called to remove the
* VSI list.
*/
-static enum ice_status
+static int
ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
enum ice_sw_lkup_type lkup_type)
{
@@ -2076,13 +2086,13 @@ ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
* @fm_list: filter management entry for which the VSI list management needs to
* be done
*/
-static enum ice_status
+static int
ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_mgmt_list_entry *fm_list)
{
enum ice_sw_lkup_type lkup_type;
- enum ice_status status = ICE_SUCCESS;
u16 vsi_list_id;
+ int status = 0;
if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
fm_list->vsi_count == 0)
@@ -2163,14 +2173,14 @@ ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
* @recp_list: recipe list for which the rule needs to removed
* @f_entry: rule entry containing filter information
*/
-static enum ice_status
+static int
ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
struct ice_fltr_list_entry *f_entry)
{
struct ice_fltr_mgmt_list_entry *list_elem;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = ICE_SUCCESS;
bool remove_rule = false;
+ int status = 0;
u16 vsi_handle;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
@@ -2180,6 +2190,7 @@ ice_remove_rule_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
rule_lock = &recp_list->filt_rule_lock;
ice_acquire_lock(rule_lock);
+
list_elem = ice_find_rule_entry(&recp_list->filt_rules,
&f_entry->fltr_info);
if (!list_elem) {
@@ -2259,14 +2270,14 @@ exit:
* information for all resource types. Each resource type is an
* ice_aqc_get_res_resp_elem structure.
*/
-enum ice_status
+int
ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_get_res_alloc *resp;
- enum ice_status status;
struct ice_aq_desc desc;
+ int status;
if (!buf)
return ICE_ERR_BAD_PTR;
@@ -2296,14 +2307,14 @@ ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
* @desc_id: input - first desc ID to start; output - next desc ID
* @cd: pointer to command details structure or NULL
*/
-enum ice_status
+int
ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
bool res_shared, u16 *desc_id, struct ice_sq_cd *cd)
{
struct ice_aqc_get_allocd_res_desc *cmd;
struct ice_aq_desc desc;
- enum ice_status status;
+ int status;
ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
@@ -2342,7 +2353,7 @@ ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
* check for duplicates in this case, removing duplicates from a given
* list should be taken care of in the caller of this function.
*/
-static enum ice_status
+static int
ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
struct ice_switch_info *sw, u8 lport)
{
@@ -2352,8 +2363,8 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
struct LIST_HEAD_TYPE *rule_head;
u16 total_elem_left, s_rule_size;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = ICE_SUCCESS;
u16 num_unicast = 0;
+ int status = 0;
u8 elem_sent;
s_rule = NULL;
@@ -2403,7 +2414,7 @@ ice_add_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
ice_acquire_lock(rule_lock);
/* Exit if no suitable entries were found for adding bulk switch rule */
if (!num_unicast) {
- status = ICE_SUCCESS;
+ status = 0;
goto ice_add_mac_exit;
}
@@ -2493,7 +2504,7 @@ ice_add_mac_exit:
*
* Function add MAC rule for logical port from HW struct
*/
-enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
+int ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
{
if (!m_list || !hw)
return ICE_ERR_PARAM;
@@ -2508,7 +2519,7 @@ enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
* @recp_list: recipe list for which rule has to be added
* @f_entry: filter entry containing one VLAN information
*/
-static enum ice_status
+static int
ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
struct ice_fltr_list_entry *f_entry)
{
@@ -2517,7 +2528,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_sw_recipe *recp_list,
enum ice_sw_lkup_type lkup_type;
u16 vsi_list_id = 0, vsi_handle;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
return ICE_ERR_PARAM;
@@ -2662,7 +2673,7 @@ exit:
* @v_list: list of VLAN entries and forwarding information
* @sw: pointer to switch info struct for which function add rule
*/
-static enum ice_status
+static int
ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
struct ice_switch_info *sw)
{
@@ -2680,7 +2691,7 @@ ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
if (v_list_itr->status)
return v_list_itr->status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2690,7 +2701,7 @@ ice_add_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
*
* Function add VLAN rule for logical port from HW struct
*/
-enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
+int ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
{
if (!v_list || !hw)
return ICE_ERR_PARAM;
@@ -2709,7 +2720,7 @@ enum ice_status ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
* the filter list with the necessary fields (including flags to
* indicate Tx or Rx rules).
*/
-static enum ice_status
+static int
ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
struct ice_switch_info *sw, u8 lport)
{
@@ -2733,7 +2744,7 @@ ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
if (em_list_itr->status)
return em_list_itr->status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2743,7 +2754,7 @@ ice_add_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
*
* Function add ethertype rule for logical port from HW struct
*/
-enum ice_status
+int
ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
{
if (!em_list || !hw)
@@ -2759,7 +2770,7 @@ ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
* @em_list: list of ethertype or ethertype MAC entries
* @sw: pointer to switch info struct for which function add rule
*/
-static enum ice_status
+static int
ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
struct ice_switch_info *sw)
{
@@ -2782,7 +2793,7 @@ ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
if (em_list_itr->status)
return em_list_itr->status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2791,7 +2802,7 @@ ice_remove_eth_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list,
* @em_list: list of ethertype and forwarding information
*
*/
-enum ice_status
+int
ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
{
if (!em_list || !hw)
@@ -2808,7 +2819,7 @@ ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
* Get resource type for a large action depending on the number
* of single actions that it contains.
*/
-static enum ice_status
+static int
ice_get_lg_act_aqc_res_type(u16 *res_type, int num_acts)
{
if (!res_type)
@@ -2835,7 +2846,7 @@ ice_get_lg_act_aqc_res_type(u16 *res_type, int num_acts)
return ICE_ERR_PARAM;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -2844,12 +2855,12 @@ ice_get_lg_act_aqc_res_type(u16 *res_type, int num_acts)
* @l_id: large action ID to fill it in
* @num_acts: number of actions to hold with a large action entry
*/
-static enum ice_status
+static int
ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
{
struct ice_aqc_alloc_free_res_elem *sw_buf;
- enum ice_status status;
u16 buf_len, res_type;
+ int status;
if (!l_id)
return ICE_ERR_BAD_PTR;
@@ -2925,7 +2936,7 @@ void ice_rem_all_sw_rules_info(struct ice_hw *hw)
* add filter rule to set/unset given VSI as default VSI for the switch
* (represented by swid)
*/
-enum ice_status
+int
ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
u8 direction)
{
@@ -2933,9 +2944,10 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
struct ice_sw_recipe *recp_list = NULL;
struct ice_fltr_info f_info;
struct ice_hw *hw = pi->hw;
- enum ice_status status;
u8 lport = pi->lport;
u16 hw_vsi_id;
+ int status;
+
recp_list = &pi->hw->switch_info->recp_list[ICE_SW_LKUP_DFLT];
if (!ice_is_vsi_valid(hw, vsi_handle))
@@ -3051,7 +3063,7 @@ ice_find_ucast_rule_entry(struct LIST_HEAD_TYPE *list_head,
* the entries passed into m_list were added previously. It will not attempt to
* do a partial remove of entries that were found.
*/
-static enum ice_status
+static int
ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
struct ice_sw_recipe *recp_list)
{
@@ -3095,7 +3107,7 @@ ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
if (list_itr->status)
return list_itr->status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3104,7 +3116,7 @@ ice_remove_mac_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list,
* @m_list: list of MAC addresses and forwarding information
*
*/
-enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
+int ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
{
struct ice_sw_recipe *recp_list;
@@ -3118,7 +3130,7 @@ enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
* @v_list: list of VLAN entries and forwarding information
* @recp_list: list from which function remove VLAN
*/
-static enum ice_status
+static int
ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
struct ice_sw_recipe *recp_list)
{
@@ -3135,7 +3147,7 @@ ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
if (v_list_itr->status)
return v_list_itr->status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3144,7 +3156,7 @@ ice_remove_vlan_rule(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list,
* @v_list: list of VLAN and forwarding information
*
*/
-enum ice_status
+int
ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
{
struct ice_sw_recipe *recp_list;
@@ -3185,7 +3197,7 @@ ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
* fltr_info.fwd_id fields. These are set such that later logic can
* extract which VSI to remove the fltr from, and pass on that information.
*/
-static enum ice_status
+static int
ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct LIST_HEAD_TYPE *vsi_list_head,
struct ice_fltr_info *fi)
@@ -3212,7 +3224,7 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
LIST_ADD(&tmp->list_entry, vsi_list_head);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3228,13 +3240,13 @@ ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
* Note that this means all entries in vsi_list_head must be explicitly
* deallocated by the caller when done with list.
*/
-static enum ice_status
+static int
ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
struct LIST_HEAD_TYPE *lkup_list_head,
struct LIST_HEAD_TYPE *vsi_list_head)
{
struct ice_fltr_mgmt_list_entry *fm_entry;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
/* check to make sure VSI ID is valid and within boundary */
if (!ice_is_vsi_valid(hw, vsi_handle))
@@ -3307,7 +3319,7 @@ static void ice_determine_promisc_mask(struct ice_fltr_info *fi,
* @sw: pointer to switch info struct for which function add rule
* @lkup: switch rule filter lookup type
*/
-static enum ice_status
+static int
_ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 *vid,
struct ice_switch_info *sw, enum ice_sw_lkup_type lkup)
@@ -3343,7 +3355,7 @@ _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
}
ice_release_lock(rule_lock);
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3353,7 +3365,7 @@ _ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
* @promisc_mask: pointer to mask to be filled in
* @vid: VLAN ID of promisc VLAN VSI
*/
-enum ice_status
+int
ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 *vid)
{
@@ -3371,7 +3383,7 @@ ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
* @promisc_mask: pointer to mask to be filled in
* @vid: VLAN ID of promisc VLAN VSI
*/
-enum ice_status
+int
ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 *vid)
{
@@ -3389,7 +3401,7 @@ ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle,
* @recp_id: recipe ID for which the rule needs to removed
* @v_list: list of promisc entries
*/
-static enum ice_status
+static int
ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
struct LIST_HEAD_TYPE *v_list)
{
@@ -3404,7 +3416,7 @@ ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
if (v_list_itr->status)
return v_list_itr->status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
@@ -3415,7 +3427,7 @@ ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
* @vid: VLAN ID to clear VLAN promiscuous
* @sw: pointer to switch info struct for which function add rule
*/
-static enum ice_status
+static int
_ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 vid,
struct ice_switch_info *sw)
@@ -3427,7 +3439,7 @@ _ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
struct ice_fltr_mgmt_list_entry *itr;
struct LIST_HEAD_TYPE *rule_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 recipe_id;
if (!ice_is_vsi_valid(hw, vsi_handle))
@@ -3495,7 +3507,7 @@ free_fltr_list:
* @promisc_mask: pointer to mask of promiscuous config bits to clear
* @vid: VLAN ID to clear VLAN promiscuous
*/
-enum ice_status
+int
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 vid)
{
@@ -3515,7 +3527,7 @@ ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
* @lport: logical port number to configure promisc mode
* @sw: pointer to switch info struct for which function add rule
*/
-static enum ice_status
+static int
_ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 vid, u8 lport,
struct ice_switch_info *sw)
@@ -3523,9 +3535,9 @@ _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
ice_declare_bitmap(p_mask, ICE_PROMISC_MAX);
struct ice_fltr_list_entry f_list_entry;
- struct ice_fltr_info new_fltr;
- enum ice_status status = ICE_SUCCESS;
bool is_tx_fltr, is_rx_lb_fltr;
+ struct ice_fltr_info new_fltr;
+ int status = 0;
u16 hw_vsi_id;
int pkt_type;
u8 recipe_id;
@@ -3632,7 +3644,7 @@ _ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
status = ice_add_rule_internal(hw, recp_list, lport,
&f_list_entry);
- if (status != ICE_SUCCESS)
+ if (status)
goto set_promisc_exit;
}
@@ -3647,7 +3659,7 @@ set_promisc_exit:
* @promisc_mask: pointer to mask of promiscuous config bits
* @vid: VLAN ID to set VLAN promiscuous
*/
-enum ice_status
+int
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 vid)
{
@@ -3670,7 +3682,7 @@ ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
*
* Configure VSI with all associated VLANs to given promiscuous mode(s)
*/
-static enum ice_status
+static int
_ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, bool rm_vlan_promisc,
u8 lport, struct ice_switch_info *sw)
@@ -3679,7 +3691,7 @@ _ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
struct LIST_HEAD_TYPE vsi_list_head;
struct LIST_HEAD_TYPE *vlan_head;
struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
- enum ice_status status;
+ int status;
u16 vlan_id;
INIT_LIST_HEAD(&vsi_list_head);
@@ -3732,7 +3744,7 @@ free_fltr_list:
*
* Configure VSI with all associated VLANs to given promiscuous mode(s)
*/
-enum ice_status
+int
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, bool rm_vlan_promisc)
{
@@ -3761,7 +3773,7 @@ ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
struct LIST_HEAD_TYPE *rule_head;
struct ice_fltr_list_entry *tmp;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
- enum ice_status status;
+ int status;
INIT_LIST_HEAD(&remove_list_head);
rule_lock = &recp_list[lkup].filt_rule_lock;
@@ -3855,13 +3867,13 @@ void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
* @num_items: number of entries requested for FD resource type
* @counter_id: counter index returned by AQ call
*/
-static enum ice_status
+static int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
/* Allocate resource */
buf_len = ice_struct_size(buf, elem, 1);
@@ -3893,13 +3905,13 @@ exit:
* @num_items: number of entries to be freed for FD resource type
* @counter_id: counter ID resource which needs to be freed
*/
-static enum ice_status
+static int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id)
{
struct ice_aqc_alloc_free_res_elem *buf;
- enum ice_status status;
u16 buf_len;
+ int status;
/* Free resource */
buf_len = ice_struct_size(buf, elem, 1);
@@ -3926,7 +3938,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
* @hw: pointer to the hardware structure
* @counter_id: returns counter index
*/
-enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
+int ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
{
return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
@@ -3938,7 +3950,7 @@ enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
* @hw: pointer to the hardware structure
* @counter_id: counter index to be freed
*/
-enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
+int ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
{
return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
@@ -3951,7 +3963,7 @@ enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
* @f_info: filter info structure containing the MAC filter information
* @sw_marker: sw marker to tag the Rx descriptor with
*/
-enum ice_status
+int
ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
u16 sw_marker)
{
@@ -3960,9 +3972,9 @@ ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
struct ice_sw_recipe *recp_list;
struct LIST_HEAD_TYPE l_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
- enum ice_status ret;
bool entry_exists;
u16 lg_act_id;
+ int ret;
if (f_info->fltr_act != ICE_FWD_TO_VSI)
return ICE_ERR_PARAM;
@@ -4047,7 +4059,7 @@ exit_error:
* @f_info: pointer to filter info structure containing the MAC filter
* information
*/
-enum ice_status
+int
ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
{
struct ice_fltr_mgmt_list_entry *m_entry;
@@ -4055,10 +4067,10 @@ ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
struct ice_sw_recipe *recp_list;
struct LIST_HEAD_TYPE l_head;
struct ice_lock *rule_lock; /* Lock to protect filter rule list */
- enum ice_status ret;
bool entry_exist;
u16 counter_id;
u16 lg_act_id;
+ int ret;
if (f_info->fltr_act != ICE_FWD_TO_VSI)
return ICE_ERR_PARAM;
@@ -4146,14 +4158,14 @@ exit_error:
* @list_head: list for which filters needs to be replayed
* @recp_id: Recipe ID for which rules need to be replayed
*/
-static enum ice_status
+static int
ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
{
struct ice_fltr_mgmt_list_entry *itr;
- enum ice_status status = ICE_SUCCESS;
struct ice_sw_recipe *recp_list;
u8 lport = hw->port_info->lport;
struct LIST_HEAD_TYPE l_head;
+ int status = 0;
if (LIST_EMPTY(list_head))
return status;
@@ -4177,7 +4189,7 @@ ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
status = ice_add_rule_internal(hw, recp_list, lport,
&f_entry);
- if (status != ICE_SUCCESS)
+ if (status)
goto end;
continue;
}
@@ -4200,7 +4212,7 @@ ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
status = ice_add_rule_internal(hw, recp_list,
lport,
&f_entry);
- if (status != ICE_SUCCESS)
+ if (status)
goto end;
}
}
@@ -4217,10 +4229,10 @@ end:
* NOTE: This function does not clean up partially added filters on error.
* It is up to caller of the function to issue a reset or fail early.
*/
-enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
+int ice_replay_all_fltr(struct ice_hw *hw)
{
struct ice_switch_info *sw = hw->switch_info;
- enum ice_status status = ICE_SUCCESS;
+ int status = ICE_SUCCESS;
u8 i;
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
@@ -4245,14 +4257,14 @@ enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
* Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
* It is required to pass valid VSI handle.
*/
-static enum ice_status
+static int
ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
struct ice_switch_info *sw, u16 vsi_handle, u8 recp_id,
struct LIST_HEAD_TYPE *list_head)
{
struct ice_fltr_mgmt_list_entry *itr;
- enum ice_status status = ICE_SUCCESS;
struct ice_sw_recipe *recp_list;
+ int status = 0;
u16 hw_vsi_id;
if (LIST_EMPTY(list_head))
@@ -4273,15 +4285,13 @@ ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
status = ice_add_rule_internal(hw, recp_list,
pi->lport,
&f_entry);
- if (status != ICE_SUCCESS)
+ if (status)
goto end;
continue;
}
if (!itr->vsi_list_info ||
!ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
continue;
- /* Clearing it so that the logic can add it back */
- ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
f_entry.fltr_info.vsi_handle = vsi_handle;
f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
/* update the src in case it is VSI num */
@@ -4293,7 +4303,7 @@ ice_replay_vsi_fltr(struct ice_hw *hw, struct ice_port_info *pi,
status = ice_add_rule_internal(hw, recp_list,
pi->lport,
&f_entry);
- if (status != ICE_SUCCESS)
+ if (status)
goto end;
}
end:
@@ -4308,12 +4318,12 @@ end:
*
* Replays filters for requested VSI via vsi_handle.
*/
-enum ice_status
+int
ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
u16 vsi_handle)
{
struct ice_switch_info *sw = NULL;
- enum ice_status status = ICE_SUCCESS;
+ int status = 0;
u8 i;
sw = hw->switch_info;
@@ -4326,11 +4336,11 @@ ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
if (!sw->recp_list[i].adv_rule)
status = ice_replay_vsi_fltr(hw, pi, sw, vsi_handle, i,
head);
- if (status != ICE_SUCCESS)
+ if (status)
return status;
}
- return ICE_SUCCESS;
+ return 0;
}
/**
diff --git a/sys/dev/ice/ice_switch.h b/sys/dev/ice/ice_switch.h
index 60d3dfdf2fb7..53b4a997628c 100644
--- a/sys/dev/ice/ice_switch.h
+++ b/sys/dev/ice/ice_switch.h
@@ -129,6 +129,7 @@ struct ice_fltr_info {
union {
struct {
u8 mac_addr[ETH_ALEN];
+ u16 sw_id;
} mac;
struct {
u8 mac_addr[ETH_ALEN];
@@ -138,6 +139,7 @@ struct ice_fltr_info {
u16 vlan_id;
u16 tpid;
u8 tpid_valid;
+ u16 sw_id;
} vlan;
/* Set lkup_type as ICE_SW_LKUP_ETHERTYPE
* if just using ethertype as filter. Set lkup_type as
@@ -175,6 +177,7 @@ struct ice_fltr_info {
/* Rule creations populate these indicators basing on the switch type */
u8 lb_en; /* Indicate if packet can be looped back */
u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
+ u8 fltVeb_en; /* Indicate if VSI is connected to floating VEB */
};
struct ice_adv_lkup_elem {
@@ -346,7 +349,7 @@ struct ice_vsi_list_map_info {
struct ice_fltr_list_entry {
struct LIST_ENTRY_TYPE list_entry;
- enum ice_status status;
+ int status;
struct ice_fltr_info fltr_info;
};
@@ -430,13 +433,13 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
u16 *pkt_len,
const struct ice_dummy_pkt_offsets **offsets);
-enum ice_status
+int
ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
struct ice_sw_rule_lkup_rx_tx *s_rule,
const u8 *dummy_pkt, u16 pkt_len,
const struct ice_dummy_pkt_offsets *offsets);
-enum ice_status
+int
ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid);
@@ -445,7 +448,7 @@ ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
u16 lkups_cnt, u16 recp_id,
struct ice_adv_rule_info *rinfo);
-enum ice_status
+int
ice_adv_add_update_vsi_list(struct ice_hw *hw,
struct ice_adv_fltr_mgmt_list_entry *m_entry,
struct ice_adv_rule_info *cur_fltr,
@@ -456,123 +459,123 @@ ice_find_vsi_list_entry(struct ice_sw_recipe *recp_list, u16 vsi_handle,
u16 *vsi_list_id);
/* VSI related commands */
-enum ice_status
+int
ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
bool keep_vsi_alloc, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle);
+void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle);
void ice_clear_all_vsi_ctx(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
u16 count, struct ice_mir_rule_buf *mr_buf,
struct ice_sq_cd *cd, u16 *rule_id);
-enum ice_status
+int
ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
u32 *ctl_bitmask);
-enum ice_status
+int
ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
u32 ctl_bitmask);
/* Switch config */
-enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw);
-
-enum ice_status
+int ice_get_initial_sw_cfg(struct ice_hw *hw);
+int
ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id);
-enum ice_status
+int
ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id);
-enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
-enum ice_status ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id);
-enum ice_status ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id);
-enum ice_status
+int ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
+int ice_alloc_rss_global_lut(struct ice_hw *hw, bool shared_res, u16 *global_lut_id);
+int ice_free_rss_global_lut(struct ice_hw *hw, u16 global_lut_id);
+int
ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
u16 *counter_id);
-enum ice_status
+int
ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id);
-enum ice_status
+int
ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries,
struct ice_aqc_get_res_resp_elem *buf, u16 buf_size,
struct ice_sq_cd *cd);
-enum ice_status
+int
ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
struct ice_aqc_res_elem *buf, u16 buf_size, u16 res_type,
bool res_shared, u16 *desc_id, struct ice_sq_cd *cd);
-enum ice_status
+int
ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list);
-enum ice_status ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list);
+int ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list);
void ice_rem_all_sw_rules_info(struct ice_hw *hw);
-enum ice_status ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_lst);
-enum ice_status ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_lst);
-enum ice_status
+int ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_lst);
+int ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_lst);
+int
ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list);
-enum ice_status
+int
ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list);
-enum ice_status
+int
ice_cfg_iwarp_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
-enum ice_status
+int
ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
u16 sw_marker);
-enum ice_status
+int
ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
/* Promisc/defport setup for VSIs */
-enum ice_status
+int
ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
u8 direction);
bool ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
bool *rule_exists);
-enum ice_status
+int
ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 vid);
-enum ice_status
+int
ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 vid);
-enum ice_status
+int
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, bool rm_vlan_promisc);
/* Get VSIs Promisc/defport settings */
-enum ice_status
+int
ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 *vid);
-enum ice_status
+int
ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle,
ice_bitmap_t *promisc_mask, u16 *vid);
-enum ice_status ice_replay_all_fltr(struct ice_hw *hw);
+int ice_replay_all_fltr(struct ice_hw *hw);
-enum ice_status
+int
ice_init_def_sw_recp(struct ice_hw *hw, struct ice_sw_recipe **recp_list);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle);
-enum ice_status
+int
ice_replay_vsi_all_fltr(struct ice_hw *hw, struct ice_port_info *pi,
u16 vsi_handle);
void ice_rm_sw_replay_rule_info(struct ice_hw *hw, struct ice_switch_info *sw);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
-enum ice_status
+int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd);
#endif /* _ICE_SWITCH_H_ */
diff --git a/sys/dev/ice/ice_type.h b/sys/dev/ice/ice_type.h
index 70312a28e4e4..da535459aec6 100644
--- a/sys/dev/ice/ice_type.h
+++ b/sys/dev/ice/ice_type.h
@@ -43,6 +43,7 @@
#include "ice_controlq.h"
#include "ice_flex_type.h"
#include "ice_protocol_type.h"
+#include "ice_sbq_cmd.h"
#include "ice_vlan_mode.h"
#include "ice_fwlog.h"
@@ -224,6 +225,7 @@ enum ice_mac_type {
ICE_MAC_UNKNOWN = 0,
ICE_MAC_VF,
ICE_MAC_E810,
+ ICE_MAC_E830,
ICE_MAC_GENERIC,
ICE_MAC_GENERIC_3K,
ICE_MAC_GENERIC_3K_E825,
@@ -257,7 +259,9 @@ enum ice_media_type {
ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC)
#define ICE_MEDIA_C2M_PHY_TYPE_HIGH_M (ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC | \
- ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC)
+ ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \
+ ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC | \
+ ICE_PHY_TYPE_HIGH_200G_AUI8_AOC_ACC)
#define ICE_MEDIA_OPT_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_1000BASE_SX | \
ICE_PHY_TYPE_LOW_1000BASE_LX | \
@@ -277,6 +281,12 @@ enum ice_media_type {
ICE_PHY_TYPE_LOW_50GBASE_FR | \
ICE_PHY_TYPE_LOW_100GBASE_DR)
+#define ICE_MEDIA_OPT_PHY_TYPE_HIGH_M (ICE_PHY_TYPE_HIGH_200G_SR4 | \
+ ICE_PHY_TYPE_HIGH_200G_LR4 | \
+ ICE_PHY_TYPE_HIGH_200G_FR4 | \
+ ICE_PHY_TYPE_HIGH_200G_DR4 | \
+ ICE_PHY_TYPE_HIGH_400GBASE_FR8)
+
#define ICE_MEDIA_BP_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_1000BASE_KX | \
ICE_PHY_TYPE_LOW_2500BASE_KX | \
ICE_PHY_TYPE_LOW_5GBASE_KR | \
@@ -290,7 +300,8 @@ enum ice_media_type {
ICE_PHY_TYPE_LOW_100GBASE_KR4 | \
ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4)
-#define ICE_MEDIA_BP_PHY_TYPE_HIGH_M ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4
+#define ICE_MEDIA_BP_PHY_TYPE_HIGH_M (ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4 | \
+ ICE_PHY_TYPE_HIGH_200G_KR4_PAM4)
#define ICE_MEDIA_DAC_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_10G_SFI_DA | \
ICE_PHY_TYPE_LOW_25GBASE_CR | \
@@ -303,6 +314,8 @@ enum ice_media_type {
ICE_PHY_TYPE_LOW_50GBASE_CP | \
ICE_PHY_TYPE_LOW_100GBASE_CP2)
+#define ICE_MEDIA_DAC_PHY_TYPE_HIGH_M ICE_PHY_TYPE_HIGH_200G_CR4_PAM4
+
#define ICE_MEDIA_C2C_PHY_TYPE_LOW_M (ICE_PHY_TYPE_LOW_100M_SGMII | \
ICE_PHY_TYPE_LOW_1G_SGMII | \
ICE_PHY_TYPE_LOW_2500BASE_X | \
@@ -316,7 +329,9 @@ enum ice_media_type {
ICE_PHY_TYPE_LOW_100G_AUI4)
#define ICE_MEDIA_C2C_PHY_TYPE_HIGH_M (ICE_PHY_TYPE_HIGH_100G_CAUI2 | \
- ICE_PHY_TYPE_HIGH_100G_AUI2)
+ ICE_PHY_TYPE_HIGH_100G_AUI2 | \
+ ICE_PHY_TYPE_HIGH_200G_AUI4 | \
+ ICE_PHY_TYPE_HIGH_200G_AUI8)
/* Software VSI types. */
enum ice_vsi_type {
@@ -504,6 +519,7 @@ struct ice_hw_common_caps {
bool dyn_flattening_en;
/* Support for OROM update in Recovery Mode */
bool orom_recovery_update;
+ bool next_cluster_id_support;
};
#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
@@ -557,7 +573,8 @@ enum ice_pcie_bus_speed {
ice_pcie_speed_2_5GT = 0x14,
ice_pcie_speed_5_0GT = 0x15,
ice_pcie_speed_8_0GT = 0x16,
- ice_pcie_speed_16_0GT = 0x17
+ ice_pcie_speed_16_0GT = 0x17,
+ ice_pcie_speed_32_0GT = 0x18,
};
/* PCI bus widths */
@@ -938,6 +955,7 @@ struct ice_port_info {
u16 sw_id; /* Initial switch ID belongs to port */
u16 pf_vf_num;
u8 port_state;
+ u8 loopback_mode;
#define ICE_SCHED_PORT_STATE_INIT 0x0
#define ICE_SCHED_PORT_STATE_READY 0x1
u8 lport;
@@ -1044,6 +1062,7 @@ enum ice_phy_model {
ICE_PHY_UNSUP = -1,
ICE_PHY_E810 = 1,
ICE_PHY_E822,
+ ICE_PHY_E830,
};
/* Port hardware description */
@@ -1061,6 +1080,7 @@ struct ice_hw {
u64 debug_mask; /* BITMAP for debug mask */
enum ice_mac_type mac_type;
+ u16 fw_vsi_num;
/* pci info */
u16 device_id;
u16 vendor_id;
@@ -1097,6 +1117,7 @@ struct ice_hw {
/* Control Queue info */
struct ice_ctl_q_info adminq;
+ struct ice_ctl_q_info sbq;
struct ice_ctl_q_info mailboxq;
u8 api_branch; /* API branch version */
u8 api_maj_ver; /* API major version */
@@ -1146,7 +1167,6 @@ struct ice_hw {
u32 pkg_seg_id;
u32 pkg_sign_type;
u32 active_track_id;
- u8 pkg_has_signing_seg:1;
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
@@ -1180,6 +1200,7 @@ struct ice_hw {
u8 dvm_ena;
bool subscribable_recipes_supported;
+ bool skip_clear_pf;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
@@ -1456,4 +1477,9 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_AUTO_DROP_MAJ 1
#define ICE_FW_API_AUTO_DROP_MIN 4
+static inline bool
+ice_is_nac_dual(struct ice_hw *hw)
+{
+ return !!(hw->dev_caps.nac_topo.mode & ICE_NAC_TOPO_DUAL_M);
+}
#endif /* _ICE_TYPE_H_ */
diff --git a/sys/dev/ice/ice_vf_mbx.c b/sys/dev/ice/ice_vf_mbx.c
new file mode 100644
index 000000000000..387a1c6739a6
--- /dev/null
+++ b/sys/dev/ice/ice_vf_mbx.c
@@ -0,0 +1,471 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2025, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "ice_common.h"
+#include "ice_hw_autogen.h"
+#include "ice_vf_mbx.h"
+
+/**
+ * ice_aq_send_msg_to_vf
+ * @hw: pointer to the hardware structure
+ * @vfid: VF ID to send msg
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cd: pointer to command details
+ *
+ * Send message to VF driver (0x0802) using mailbox
+ * queue and asynchronously sending message via
+ * ice_sq_send_cmd() function
+ */
+int
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_pf_vf_msg *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_vf);
+
+ cmd = &desc.params.virt;
+ cmd->id = CPU_TO_LE32(vfid);
+
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+
+ if (msglen)
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+}
+
+/**
+ * ice_aq_send_msg_to_pf
+ * @hw: pointer to the hardware structure
+ * @v_opcode: opcodes for VF-PF communication
+ * @v_retval: return error code
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ * @cd: pointer to command details
+ *
+ * Send message to PF driver using mailbox queue. By default, this
+ * message is sent asynchronously, i.e. ice_sq_send_cmd()
+ * does not wait for completion before returning.
+ */
+int
+ice_aq_send_msg_to_pf(struct ice_hw *hw, enum virtchnl_ops v_opcode,
+ int v_retval, u8 *msg, u16 msglen,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_mbx_opc_send_msg_to_pf);
+ desc.cookie_high = CPU_TO_LE32(v_opcode);
+ desc.cookie_low = CPU_TO_LE32(v_retval);
+
+ if (msglen)
+ desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
+
+ return ice_sq_send_cmd(hw, &hw->mailboxq, &desc, msg, msglen, cd);
+}
+
+static const u32 ice_legacy_aq_to_vc_speed[] = {
+ VIRTCHNL_LINK_SPEED_100MB, /* BIT(0) */
+ VIRTCHNL_LINK_SPEED_100MB,
+ VIRTCHNL_LINK_SPEED_1GB,
+ VIRTCHNL_LINK_SPEED_1GB,
+ VIRTCHNL_LINK_SPEED_1GB,
+ VIRTCHNL_LINK_SPEED_10GB,
+ VIRTCHNL_LINK_SPEED_20GB,
+ VIRTCHNL_LINK_SPEED_25GB,
+ VIRTCHNL_LINK_SPEED_40GB,
+ VIRTCHNL_LINK_SPEED_40GB,
+ VIRTCHNL_LINK_SPEED_40GB,
+};
+
+/**
+ * ice_conv_link_speed_to_virtchnl
+ * @adv_link_support: determines the format of the returned link speed
+ * @link_speed: variable containing the link_speed to be converted
+ *
+ * Convert link speed supported by HW to link speed supported by virtchnl.
+ * If adv_link_support is true, then return link speed in Mbps. Else return
+ * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
+ * needs to cast back to an enum virtchnl_link_speed in the case where
+ * adv_link_support is false, but when adv_link_support is true the caller can
+ * expect the speed in Mbps.
+ */
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed)
+{
+ /* convert a BIT() value into an array index */
+ u16 index = (u16)(ice_fls(link_speed) - 1);
+
+ if (adv_link_support)
+ return ice_get_link_speed(index);
+ else if (index < ARRAY_SIZE(ice_legacy_aq_to_vc_speed))
+ /* Virtchnl speeds are not defined for every speed supported in
+ * the hardware. To maintain compatibility with older AVF
+ * drivers, while reporting the speed the new speed values are
+ * resolved to the closest known virtchnl speeds
+ */
+ return ice_legacy_aq_to_vc_speed[index];
+
+ return VIRTCHNL_LINK_SPEED_UNKNOWN;
+}
+
+/* The mailbox overflow detection algorithm helps to check if there
+ * is a possibility of a malicious VF transmitting too many MBX messages to the
+ * PF.
+ * 1. The mailbox snapshot structure, ice_mbx_snapshot, is initialized during
+ * driver initialization in ice_init_hw() using ice_mbx_init_snapshot().
+ * The struct ice_mbx_snapshot helps to track and traverse a static window of
+ * messages within the mailbox queue while looking for a malicious VF.
+ *
+ * 2. When the caller starts processing its mailbox queue in response to an
+ * interrupt, the structure ice_mbx_snapshot is expected to be cleared before
+ * the algorithm can be run for the first time for that interrupt. This
+ * requires calling ice_mbx_reset_snapshot() as well as calling
+ * ice_mbx_reset_vf_info() for each VF tracking structure.
+ *
+ * 3. For every message read by the caller from the MBX Queue, the caller must
+ * call the detection algorithm's entry function ice_mbx_vf_state_handler().
+ * Before every call to ice_mbx_vf_state_handler() the struct ice_mbx_data is
+ * filled as it is required to be passed to the algorithm.
+ *
+ * 4. Every time a message is read from the MBX queue, a tracking structure
+ * for the VF must be passed to the state handler. The boolean output
+ * report_malvf from ice_mbx_vf_state_handler() serves as an indicator to the
+ * caller whether it must report this VF as malicious or not.
+ *
+ * 5. When a VF is identified to be malicious, the caller can send a message
+ * to the system administrator.
+ *
+ * 6. The PF is responsible for maintaining the struct ice_mbx_vf_info
+ * structure for each VF. The PF should clear the VF tracking structure if the
+ * VF is reset. When a VF is shut down and brought back up, we will then
+ * assume that the new VF is not malicious and may report it again if we
+ * detect it again.
+ *
+ * 7. The function ice_mbx_reset_snapshot() is called to reset the information
+ * in ice_mbx_snapshot for every new mailbox interrupt handled.
+ */
+#define ICE_RQ_DATA_MASK(rq_data) ((rq_data) & PF_MBX_ARQH_ARQH_M)
+/* Using the highest value for an unsigned 16-bit value 0xFFFF to indicate that
+ * the max messages check must be ignored in the algorithm
+ */
+#define ICE_IGNORE_MAX_MSG_CNT 0xFFFF
+
+/**
+ * ice_mbx_reset_snapshot - Initialize mailbox snapshot structure
+ * @snap: pointer to the mailbox snapshot
+ */
+static void ice_mbx_reset_snapshot(struct ice_mbx_snapshot *snap)
+{
+ struct ice_mbx_vf_info *vf_info;
+
+ /* Clear mbx_buf in the mailbox snaphot structure and setting the
+ * mailbox snapshot state to a new capture.
+ */
+ ice_memset(&snap->mbx_buf, 0, sizeof(snap->mbx_buf), ICE_NONDMA_MEM);
+ snap->mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+
+ /* Reset message counts for all VFs to zero */
+ LIST_FOR_EACH_ENTRY(vf_info, &snap->mbx_vf, ice_mbx_vf_info, list_entry)
+ vf_info->msg_count = 0;
+}
+
+/**
+ * ice_mbx_traverse - Pass through mailbox snapshot
+ * @hw: pointer to the HW struct
+ * @new_state: new algorithm state
+ *
+ * Traversing the mailbox static snapshot without checking
+ * for malicious VFs.
+ */
+static void
+ice_mbx_traverse(struct ice_hw *hw,
+ enum ice_mbx_snapshot_state *new_state)
+{
+ struct ice_mbx_snap_buffer_data *snap_buf;
+ u32 num_iterations;
+
+ snap_buf = &hw->mbx_snapshot.mbx_buf;
+
+ /* As mailbox buffer is circular, applying a mask
+ * on the incremented iteration count.
+ */
+ num_iterations = ICE_RQ_DATA_MASK(++snap_buf->num_iterations);
+
+ /* Checking either of the below conditions to exit snapshot traversal:
+ * Condition-1: If the number of iterations in the mailbox is equal to
+ * the mailbox head which would indicate that we have reached the end
+ * of the static snapshot.
+ * Condition-2: If the maximum messages serviced in the mailbox for a
+ * given interrupt is the highest possible value then there is no need
+ * to check if the number of messages processed is equal to it. If not
+ * check if the number of messages processed is greater than or equal
+ * to the maximum number of mailbox entries serviced in current work item.
+ */
+ if (num_iterations == snap_buf->head ||
+ (snap_buf->max_num_msgs_mbx < ICE_IGNORE_MAX_MSG_CNT &&
+ ++snap_buf->num_msg_proc >= snap_buf->max_num_msgs_mbx))
+ *new_state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT;
+}
+
+/**
+ * ice_mbx_detect_malvf - Detect malicious VF in snapshot
+ * @hw: pointer to the HW struct
+ * @vf_info: mailbox tracking structure for a VF
+ * @new_state: new algorithm state
+ * @is_malvf: boolean output to indicate if VF is malicious
+ *
+ * This function tracks the number of asynchronous messages
+ * sent per VF and marks the VF as malicious if it exceeds
+ * the permissible number of messages to send.
+ */
+static int
+ice_mbx_detect_malvf(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info,
+ enum ice_mbx_snapshot_state *new_state,
+ bool *is_malvf)
+{
+ /* increment the message count for this VF */
+ vf_info->msg_count++;
+
+ if (vf_info->msg_count >= ICE_ASYNC_VF_MSG_THRESHOLD)
+ *is_malvf = true;
+
+ /* continue to iterate through the mailbox snapshot */
+ ice_mbx_traverse(hw, new_state);
+
+ return 0;
+}
+
+/**
+ * ice_e830_mbx_vf_dec_trig - Decrements the VF mailbox queue counter
+ * @hw: pointer to the HW struct
+ * @event: pointer to the control queue receive event
+ *
+ * This function triggers to decrement the counter
+ * MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT when the driver replenishes
+ * the buffers at the PF mailbox queue.
+ */
+void ice_e830_mbx_vf_dec_trig(struct ice_hw *hw,
+ struct ice_rq_event_info *event)
+{
+ u16 vfid = LE16_TO_CPU(event->desc.retval);
+
+ wr32(hw, E830_MBX_VF_DEC_TRIG(vfid), 1);
+}
+
+/**
+ * ice_mbx_vf_clear_cnt_e830 - Clear the VF mailbox queue count
+ * @hw: pointer to the HW struct
+ * @vf_id: VF ID in the PF space
+ *
+ * This function clears the counter MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT, and should
+ * be called when a VF is created and on VF reset.
+ */
+void ice_mbx_vf_clear_cnt_e830(struct ice_hw *hw, u16 vf_id)
+{
+ u32 reg = rd32(hw, E830_MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT(vf_id));
+
+ wr32(hw, E830_MBX_VF_DEC_TRIG(vf_id), reg);
+}
+
+/**
+ * ice_mbx_vf_state_handler - Handle states of the overflow algorithm
+ * @hw: pointer to the HW struct
+ * @mbx_data: pointer to structure containing mailbox data
+ * @vf_info: mailbox tracking structure for the VF in question
+ * @report_malvf: boolean output to indicate whether VF should be reported
+ *
+ * The function serves as an entry point for the malicious VF
+ * detection algorithm by handling the different states and state
+ * transitions of the algorithm:
+ * New snapshot: This state is entered when creating a new static
+ * snapshot. The data from any previous mailbox snapshot is
+ * cleared and a new capture of the mailbox head and tail is
+ * logged. This will be the new static snapshot to detect
+ * asynchronous messages sent by VFs. On capturing the snapshot
+ * and depending on whether the number of pending messages in that
+ * snapshot exceed the watermark value, the state machine enters
+ * traverse or detect states.
+ * Traverse: If pending message count is below watermark then iterate
+ * through the snapshot without any action on VF.
+ * Detect: If pending message count exceeds watermark traverse
+ * the static snapshot and look for a malicious VF.
+ */
+int
+ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
+ struct ice_mbx_vf_info *vf_info, bool *report_malvf)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+ struct ice_mbx_snap_buffer_data *snap_buf;
+ struct ice_ctl_q_info *cq = &hw->mailboxq;
+ enum ice_mbx_snapshot_state new_state;
+ int status = 0;
+ bool is_malvf = false;
+
+ if (!report_malvf || !mbx_data || !vf_info)
+ return ICE_ERR_BAD_PTR;
+
+ *report_malvf = false;
+
+ /* When entering the mailbox state machine assume that the VF
+ * is not malicious until detected.
+ */
+ /* Checking if max messages allowed to be processed while servicing current
+ * interrupt is not less than the defined AVF message threshold.
+ */
+ if (mbx_data->max_num_msgs_mbx <= ICE_ASYNC_VF_MSG_THRESHOLD)
+ return ICE_ERR_INVAL_SIZE;
+
+ /* The watermark value should not be lesser than the threshold limit
+ * set for the number of asynchronous messages a VF can send to mailbox
+ * nor should it be greater than the maximum number of messages in the
+ * mailbox serviced in current interrupt.
+ */
+ if (mbx_data->async_watermark_val < ICE_ASYNC_VF_MSG_THRESHOLD ||
+ mbx_data->async_watermark_val > mbx_data->max_num_msgs_mbx)
+ return ICE_ERR_PARAM;
+
+ new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+ snap_buf = &snap->mbx_buf;
+
+ switch (snap_buf->state) {
+ case ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT:
+ /* Clear any previously held data in mailbox snapshot structure. */
+ ice_mbx_reset_snapshot(snap);
+
+ /* Collect the pending ARQ count, number of messages processed and
+ * the maximum number of messages allowed to be processed from the
+ * Mailbox for current interrupt.
+ */
+ snap_buf->num_pending_arq = mbx_data->num_pending_arq;
+ snap_buf->num_msg_proc = mbx_data->num_msg_proc;
+ snap_buf->max_num_msgs_mbx = mbx_data->max_num_msgs_mbx;
+
+ /* Capture a new static snapshot of the mailbox by logging the
+ * head and tail of snapshot and set num_iterations to the tail
+ * value to mark the start of the iteration through the snapshot.
+ */
+ snap_buf->head = ICE_RQ_DATA_MASK(cq->rq.next_to_clean +
+ mbx_data->num_pending_arq);
+ snap_buf->tail = ICE_RQ_DATA_MASK(cq->rq.next_to_clean - 1);
+ snap_buf->num_iterations = snap_buf->tail;
+
+ /* Pending ARQ messages returned by ice_clean_rq_elem
+ * is the difference between the head and tail of the
+ * mailbox queue. Comparing this value against the watermark
+ * helps to check if we potentially have malicious VFs.
+ */
+ if (snap_buf->num_pending_arq >=
+ mbx_data->async_watermark_val) {
+ new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+ status = ice_mbx_detect_malvf(hw, vf_info, &new_state, &is_malvf);
+ } else {
+ new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+ ice_mbx_traverse(hw, &new_state);
+ }
+ break;
+
+ case ICE_MAL_VF_DETECT_STATE_TRAVERSE:
+ new_state = ICE_MAL_VF_DETECT_STATE_TRAVERSE;
+ ice_mbx_traverse(hw, &new_state);
+ break;
+
+ case ICE_MAL_VF_DETECT_STATE_DETECT:
+ new_state = ICE_MAL_VF_DETECT_STATE_DETECT;
+ status = ice_mbx_detect_malvf(hw, vf_info, &new_state, &is_malvf);
+ break;
+
+ default:
+ new_state = ICE_MAL_VF_DETECT_STATE_INVALID;
+ status = ICE_ERR_CFG;
+ }
+
+ snap_buf->state = new_state;
+
+ /* Only report VFs as malicious the first time we detect it */
+ if (is_malvf && !vf_info->malicious) {
+ vf_info->malicious = 1;
+ *report_malvf = true;
+ }
+
+ return status;
+}
+
+/**
+ * ice_mbx_clear_malvf - Clear VF mailbox info
+ * @vf_info: the mailbox tracking structure for a VF
+ *
+ * In case of a VF reset, this function shall be called to clear the VF's
+ * current mailbox tracking state.
+ */
+void ice_mbx_clear_malvf(struct ice_mbx_vf_info *vf_info)
+{
+ vf_info->malicious = 0;
+ vf_info->msg_count = 0;
+}
+
+/**
+ * ice_mbx_init_vf_info - Initialize a new VF mailbox tracking info
+ * @hw: pointer to the hardware structure
+ * @vf_info: the mailbox tracking info structure for a VF
+ *
+ * Initialize a VF mailbox tracking info structure and insert it into the
+ * snapshot list.
+ *
+ * If you remove the VF, you must also delete the associated VF info structure
+ * from the linked list.
+ */
+void ice_mbx_init_vf_info(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ ice_mbx_clear_malvf(vf_info);
+ LIST_ADD(&vf_info->list_entry, &snap->mbx_vf);
+}
+
+/**
+ * ice_mbx_init_snapshot - Initialize mailbox snapshot data
+ * @hw: pointer to the hardware structure
+ *
+ * Clear the mailbox snapshot structure and initialize the VF mailbox list.
+ */
+void ice_mbx_init_snapshot(struct ice_hw *hw)
+{
+ struct ice_mbx_snapshot *snap = &hw->mbx_snapshot;
+
+ INIT_LIST_HEAD(&snap->mbx_vf);
+ ice_mbx_reset_snapshot(snap);
+}
diff --git a/sys/dev/ice/ice_vf_mbx.h b/sys/dev/ice/ice_vf_mbx.h
new file mode 100644
index 000000000000..3b185ac89c11
--- /dev/null
+++ b/sys/dev/ice/ice_vf_mbx.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2025, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ICE_VF_MBX_H_
+#define _ICE_VF_MBX_H_
+
+#include "ice_type.h"
+#include "ice_controlq.h"
+
+/* Defining the mailbox message threshold as 63 asynchronous
+ * pending messages. Normal VF functionality does not require
+ * sending more than 63 asynchronous pending message.
+ */
+
+ /* Threshold value should be used to initialize
+ * MBX_VF_IN_FLIGHT_MSGS_AT_PF_CNT register.
+ */
+#define ICE_ASYNC_VF_MSG_THRESHOLD 63
+
+int
+ice_aq_send_msg_to_pf(struct ice_hw *hw, enum virtchnl_ops v_opcode,
+ int v_retval, u8 *msg, u16 msglen,
+ struct ice_sq_cd *cd);
+int
+ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
+ u8 *msg, u16 msglen, struct ice_sq_cd *cd);
+
+u32 ice_conv_link_speed_to_virtchnl(bool adv_link_support, u16 link_speed);
+
+void ice_e830_mbx_vf_dec_trig(struct ice_hw *hw,
+ struct ice_rq_event_info *event);
+void ice_mbx_vf_clear_cnt_e830(struct ice_hw *hw, u16 vf_id);
+int
+ice_mbx_vf_state_handler(struct ice_hw *hw, struct ice_mbx_data *mbx_data,
+ struct ice_mbx_vf_info *vf_info, bool *report_malvf);
+void ice_mbx_clear_malvf(struct ice_mbx_vf_info *vf_info);
+void ice_mbx_init_vf_info(struct ice_hw *hw, struct ice_mbx_vf_info *vf_info);
+void ice_mbx_init_snapshot(struct ice_hw *hw);
+#endif /* _ICE_VF_MBX_H_ */
diff --git a/sys/dev/ice/ice_vlan_mode.c b/sys/dev/ice/ice_vlan_mode.c
index e0c6d7897607..d3429651ce11 100644
--- a/sys/dev/ice/ice_vlan_mode.c
+++ b/sys/dev/ice/ice_vlan_mode.c
@@ -37,13 +37,13 @@
* @hw: pointer to the HW struct
* @dvm: output variable to determine if DDP supports DVM(true) or SVM(false)
*/
-static enum ice_status
+static int
ice_pkg_get_supported_vlan_mode(struct ice_hw *hw, bool *dvm)
{
u16 meta_init_size = sizeof(struct ice_meta_init_section);
struct ice_meta_init_section *sect;
struct ice_buf_build *bld;
- enum ice_status status;
+ int status;
/* if anything fails, we assume there is no DVM support */
*dvm = false;
@@ -88,7 +88,7 @@ ice_pkg_get_supported_vlan_mode(struct ice_hw *hw, bool *dvm)
*
* Get VLAN Mode Parameters (0x020D)
*/
-static enum ice_status
+static int
ice_aq_get_vlan_mode(struct ice_hw *hw,
struct ice_aqc_get_vlan_mode *get_params)
{
@@ -118,7 +118,7 @@ ice_aq_get_vlan_mode(struct ice_hw *hw,
static bool ice_aq_is_dvm_ena(struct ice_hw *hw)
{
struct ice_aqc_get_vlan_mode get_params = { 0 };
- enum ice_status status;
+ int status;
status = ice_aq_get_vlan_mode(hw, &get_params);
if (status) {
@@ -163,7 +163,7 @@ static void ice_cache_vlan_mode(struct ice_hw *hw)
*/
static bool ice_pkg_supports_dvm(struct ice_hw *hw)
{
- enum ice_status status;
+ int status;
bool pkg_supports_dvm;
status = ice_pkg_get_supported_vlan_mode(hw, &pkg_supports_dvm);
@@ -183,7 +183,7 @@ static bool ice_pkg_supports_dvm(struct ice_hw *hw)
static bool ice_fw_supports_dvm(struct ice_hw *hw)
{
struct ice_aqc_get_vlan_mode get_vlan_mode = { 0 };
- enum ice_status status;
+ int status;
/* If firmware returns success, then it supports DVM, else it only
* supports SVM
@@ -230,7 +230,7 @@ static bool ice_is_dvm_supported(struct ice_hw *hw)
*
* Set VLAN Mode Parameters (0x020C)
*/
-static enum ice_status
+static int
ice_aq_set_vlan_mode(struct ice_hw *hw,
struct ice_aqc_set_vlan_mode *set_params)
{
@@ -265,10 +265,10 @@ ice_aq_set_vlan_mode(struct ice_hw *hw,
* ice_set_svm - set single VLAN mode
* @hw: pointer to the HW structure
*/
-static enum ice_status ice_set_svm(struct ice_hw *hw)
+static int ice_set_svm(struct ice_hw *hw)
{
struct ice_aqc_set_vlan_mode *set_params;
- enum ice_status status;
+ int status;
status = ice_aq_set_port_params(hw->port_info, 0, false, false, false, NULL);
if (status) {
@@ -298,10 +298,10 @@ static enum ice_status ice_set_svm(struct ice_hw *hw)
* ice_set_vlan_mode
* @hw: pointer to the HW structure
*/
-enum ice_status ice_set_vlan_mode(struct ice_hw *hw)
+int ice_set_vlan_mode(struct ice_hw *hw)
{
if (!ice_is_dvm_supported(hw))
- return ICE_SUCCESS;
+ return 0;
return ice_set_svm(hw);
}
diff --git a/sys/dev/ice/ice_vlan_mode.h b/sys/dev/ice/ice_vlan_mode.h
index 8b1a56c98a3d..14d132775e3a 100644
--- a/sys/dev/ice/ice_vlan_mode.h
+++ b/sys/dev/ice/ice_vlan_mode.h
@@ -37,7 +37,7 @@
struct ice_hw;
bool ice_is_dvm_ena(struct ice_hw *hw);
-enum ice_status ice_set_vlan_mode(struct ice_hw *hw);
+int ice_set_vlan_mode(struct ice_hw *hw);
void ice_post_pkg_dwnld_vlan_mode_cfg(struct ice_hw *hw);
#endif /* _ICE_VLAN_MODE_H */
diff --git a/sys/dev/ice/if_ice_iflib.c b/sys/dev/ice/if_ice_iflib.c
index 4e451bf3fb55..1469d2916465 100644
--- a/sys/dev/ice/if_ice_iflib.c
+++ b/sys/dev/ice/if_ice_iflib.c
@@ -42,6 +42,9 @@
#include "ice_drv_info.h"
#include "ice_switch.h"
#include "ice_sched.h"
+#ifdef PCI_IOV
+#include "ice_iov.h"
+#endif
#include <sys/module.h>
#include <sys/sockio.h>
@@ -84,6 +87,13 @@ static int ice_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req);
static int ice_if_suspend(if_ctx_t ctx);
static int ice_if_resume(if_ctx_t ctx);
static bool ice_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
+static void ice_init_link(struct ice_softc *sc);
+#ifdef PCI_IOV
+static int ice_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params);
+static void ice_if_iov_uninit(if_ctx_t ctx);
+static int ice_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params);
+static void ice_if_vflr_handle(if_ctx_t ctx);
+#endif
static int ice_setup_mirror_vsi(struct ice_mirr_if *mif);
static int ice_wire_mirror_intrs(struct ice_mirr_if *mif);
static void ice_free_irqvs_subif(struct ice_mirr_if *mif);
@@ -157,6 +167,11 @@ static device_method_t ice_methods[] = {
DEVMETHOD(device_shutdown, iflib_device_shutdown),
DEVMETHOD(device_suspend, iflib_device_suspend),
DEVMETHOD(device_resume, iflib_device_resume),
+#ifdef PCI_IOV
+ DEVMETHOD(pci_iov_init, iflib_device_iov_init),
+ DEVMETHOD(pci_iov_uninit, iflib_device_iov_uninit),
+ DEVMETHOD(pci_iov_add_vf, iflib_device_iov_add_vf),
+#endif
DEVMETHOD_END
};
@@ -197,6 +212,12 @@ static device_method_t ice_iflib_methods[] = {
DEVMETHOD(ifdi_suspend, ice_if_suspend),
DEVMETHOD(ifdi_resume, ice_if_resume),
DEVMETHOD(ifdi_needs_restart, ice_if_needs_restart),
+#ifdef PCI_IOV
+ DEVMETHOD(ifdi_iov_vf_add, ice_if_iov_vf_add),
+ DEVMETHOD(ifdi_iov_init, ice_if_iov_init),
+ DEVMETHOD(ifdi_iov_uninit, ice_if_iov_uninit),
+ DEVMETHOD(ifdi_vflr_handle, ice_if_vflr_handle),
+#endif
DEVMETHOD_END
};
@@ -458,7 +479,7 @@ ice_if_attach_pre(if_ctx_t ctx)
{
struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
enum ice_fw_modes fw_mode;
- enum ice_status status;
+ int status;
if_softc_ctx_t scctx;
struct ice_hw *hw;
device_t dev;
@@ -472,6 +493,7 @@ ice_if_attach_pre(if_ctx_t ctx)
sc->media = iflib_get_media(ctx);
sc->sctx = iflib_get_sctx(ctx);
sc->iflib_ctx_lock = iflib_ctx_lock_get(ctx);
+ sc->ifp = iflib_get_ifp(ctx);
dev = sc->dev = iflib_get_dev(ctx);
scctx = sc->scctx = iflib_get_softc_ctx(ctx);
@@ -551,7 +573,7 @@ reinit_hw:
* of the hardware
*/
err = ice_load_pkg_file(sc);
- if (err == ICE_SUCCESS) {
+ if (!err) {
ice_deinit_hw(hw);
goto reinit_hw;
}
@@ -631,12 +653,8 @@ reinit_hw:
*/
ice_setup_pf_vsi(sc);
- err = ice_alloc_vsi_qmap(&sc->pf_vsi, scctx->isc_ntxqsets_max,
+ ice_alloc_vsi_qmap(&sc->pf_vsi, scctx->isc_ntxqsets_max,
scctx->isc_nrxqsets_max);
- if (err) {
- device_printf(dev, "Unable to allocate VSI Queue maps\n");
- goto free_main_vsi;
- }
/* Allocate MSI-X vectors (due to isc_flags IFLIB_SKIP_MSIX) */
err = ice_allocate_msix(sc);
@@ -713,7 +731,7 @@ static void
ice_update_link_status(struct ice_softc *sc, bool update_media)
{
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
/* Never report link up when in recovery mode */
if (ice_test_state(&sc->state, ICE_STATE_RECOVERY_MODE))
@@ -735,6 +753,9 @@ ice_update_link_status(struct ice_softc *sc, bool update_media)
iflib_link_state_change(sc->ctx, LINK_STATE_DOWN, 0);
ice_rdma_link_change(sc, LINK_STATE_DOWN, 0);
}
+#ifdef PCI_IOV
+ ice_vc_notify_all_vfs_link_state(sc);
+#endif
update_media = true;
}
@@ -761,7 +782,7 @@ ice_if_attach_post(if_ctx_t ctx)
{
struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
- enum ice_status status;
+ int status;
int err;
ASSERT_CTX_LOCKED(sc);
@@ -776,8 +797,6 @@ ice_if_attach_post(if_ctx_t ctx)
* handler is called, so wait until attach_post to setup the
* isc_max_frame_size.
*/
-
- sc->ifp = ifp;
sc->scctx->isc_max_frame_size = if_getmtu(ifp) +
ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN;
@@ -830,17 +849,24 @@ ice_if_attach_post(if_ctx_t ctx)
* was previously in DSCP PFC mode.
*/
status = ice_aq_set_pfc_mode(&sc->hw, ICE_AQC_PFC_VLAN_BASED_PFC, NULL);
- if (status != ICE_SUCCESS)
+ if (status)
device_printf(sc->dev, "Setting pfc mode failed, status %s\n", ice_status_str(status));
ice_add_device_sysctls(sc);
+#ifdef PCI_IOV
+ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_SRIOV)) {
+ err = ice_iov_attach(sc);
+ if (err == ENOMEM)
+ return (err);
+ }
+#endif /* PCI_IOV */
+
/* Get DCBX/LLDP state and start DCBX agent */
ice_init_dcb_setup(sc);
- /* Setup link configuration parameters */
- ice_init_link_configuration(sc);
- ice_update_link_status(sc, true);
+ /* Setup link, if PHY FW is ready */
+ ice_init_link(sc);
/* Configure interrupt causes for the administrative interrupt */
ice_configure_misc_interrupts(sc);
@@ -939,7 +965,7 @@ ice_if_detach(if_ctx_t ctx)
{
struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
struct ice_vsi *vsi = &sc->pf_vsi;
- enum ice_status status;
+ int status;
int i;
ASSERT_CTX_LOCKED(sc);
@@ -958,6 +984,11 @@ ice_if_detach(if_ctx_t ctx)
ice_destroy_mirror_interface(sc);
ice_rdma_pf_detach(sc);
+#ifdef PCI_IOV
+ if (ice_is_bit_set(sc->feat_cap, ICE_FEATURE_SRIOV))
+ ice_iov_detach(sc);
+#endif /* PCI_IOV */
+
/* Free allocated media types */
ifmedia_removeall(sc->media);
@@ -1681,6 +1712,11 @@ ice_if_msix_intr_assign(if_ctx_t ctx, int msix)
/* For future interrupt assignments */
sc->last_rid = rid + sc->irdma_vectors;
+#ifdef PCI_IOV
+ /* Create soft IRQ for handling VF resets */
+ iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_IOV, sc, 0, "iov");
+#endif
+
return (0);
fail:
for (; i >= 0; i--, vector--)
@@ -1860,7 +1896,7 @@ ice_if_promisc_set(if_ctx_t ctx, int flags)
struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
- enum ice_status status;
+ int status;
bool promisc_enable = flags & IFF_PROMISC;
bool multi_enable = flags & IFF_ALLMULTI;
ice_declare_bitmap(promisc_mask, ICE_PROMISC_MAX);
@@ -2141,12 +2177,24 @@ ice_poll_for_media_avail(struct ice_softc *sc)
struct ice_hw *hw = &sc->hw;
struct ice_port_info *pi = hw->port_info;
+ /* E830 only: There's no interrupt for when the PHY FW has finished loading,
+ * so poll for the status in the media task here if it's previously
+ * been detected that it's still loading.
+ */
+ if (ice_is_e830(hw) &&
+ ice_test_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING)) {
+ if (rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M)
+ ice_clear_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING);
+ else
+ return;
+ }
+
if (ice_test_state(&sc->state, ICE_STATE_NO_MEDIA)) {
pi->phy.get_link_info = true;
ice_get_link_status(pi, &sc->link_up);
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
- enum ice_status status;
+ int status;
/* Re-enable link and re-apply user link settings */
if (ice_test_state(&sc->state, ICE_STATE_LINK_ACTIVE_ON_DOWN) ||
@@ -2270,7 +2318,12 @@ ice_transition_recovery_mode(struct ice_softc *sc)
ice_rdma_pf_detach(sc);
ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap);
+#ifdef PCI_IOV
+ if (ice_test_and_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en))
+ ice_iov_detach(sc);
+#else
ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en);
+#endif /* PCI_IOV */
ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap);
ice_vsi_del_txqs_ctx(vsi);
@@ -2318,7 +2371,12 @@ ice_transition_safe_mode(struct ice_softc *sc)
ice_rdma_pf_detach(sc);
ice_clear_bit(ICE_FEATURE_RDMA, sc->feat_cap);
+#ifdef PCI_IOV
+ if (ice_test_and_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en))
+ ice_iov_detach(sc);
+#else
ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en);
+#endif /* PCI_IOV */
ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_cap);
ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap);
@@ -2386,6 +2444,12 @@ ice_if_update_admin_status(if_ctx_t ctx)
if (pending > 0)
reschedule = true;
+ if (ice_is_generic_mac(&sc->hw)) {
+ ice_process_ctrlq(sc, ICE_CTL_Q_SB, &pending);
+ if (pending > 0)
+ reschedule = true;
+ }
+
ice_process_ctrlq(sc, ICE_CTL_Q_MAILBOX, &pending);
if (pending > 0)
reschedule = true;
@@ -2397,6 +2461,15 @@ ice_if_update_admin_status(if_ctx_t ctx)
/* Check and update link status */
ice_update_link_status(sc, false);
+#ifdef PCI_IOV
+ /*
+ * Schedule VFs' reset handler after global resets
+ * and other events were processed.
+ */
+ if (ice_testandclear_state(&sc->state, ICE_STATE_VFLR_PENDING))
+ iflib_iov_intr_deferred(ctx);
+#endif
+
/*
* If there are still messages to process, we need to reschedule
* ourselves. Otherwise, we can just re-enable the interrupt. We'll be
@@ -2564,7 +2637,7 @@ ice_rebuild(struct ice_softc *sc)
struct ice_hw *hw = &sc->hw;
device_t dev = sc->dev;
enum ice_ddp_state pkg_state;
- enum ice_status status;
+ int status;
int err;
sc->rebuild_ticks = ticks;
@@ -2602,7 +2675,9 @@ ice_rebuild(struct ice_softc *sc)
}
/* Re-enable FW logging. Keep going even if this fails */
- status = ice_fwlog_set(hw, &hw->fwlog_cfg);
+ status = ICE_SUCCESS;
+ if (hw->pf_id == 0)
+ status = ice_fwlog_set(hw, &hw->fwlog_cfg);
if (!status) {
/*
* We should have the most updated cached copy of the
@@ -2702,11 +2777,11 @@ ice_rebuild(struct ice_softc *sc)
if (hw->port_info->qos_cfg.is_sw_lldp)
ice_add_rx_lldp_filter(sc);
- /* Refresh link status */
+ /* Apply previous link settings and refresh link status, if PHY
+ * FW is ready.
+ */
ice_clear_state(&sc->state, ICE_STATE_LINK_STATUS_REPORTED);
- sc->hw.port_info->phy.get_link_info = true;
- ice_get_link_status(sc->hw.port_info, &sc->link_up);
- ice_update_link_status(sc, true);
+ ice_init_link(sc);
/* RDMA interface will be restarted by the stack re-init */
@@ -2726,7 +2801,7 @@ ice_rebuild(struct ice_softc *sc)
goto err_deinit_pf_vsi;
}
- log(LOG_INFO, "%s: device rebuild successful\n", sc->ifp->if_xname);
+ log(LOG_INFO, "%s: device rebuild successful\n", if_name(sc->ifp));
/* In order to completely restore device functionality, the iflib core
* needs to be reset. We need to request an iflib reset. Additionally,
@@ -2774,7 +2849,7 @@ static void
ice_handle_reset_event(struct ice_softc *sc)
{
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
device_t dev = sc->dev;
/* When a CORER, GLOBR, or EMPR is about to happen, the hardware will
@@ -2792,12 +2867,23 @@ ice_handle_reset_event(struct ice_softc *sc)
* resetting.
*/
IFLIB_CTX_UNLOCK(sc);
+
+#define ICE_EMPR_ADDL_WAIT_MSEC_SLOW 20000
+ if ((ice_is_e830(hw) || ice_is_e825c(hw)) &&
+ (((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_RESET_TYPE_M) >>
+ GLGEN_RSTAT_RESET_TYPE_S) == ICE_RESET_EMPR))
+ ice_msec_pause(ICE_EMPR_ADDL_WAIT_MSEC_SLOW);
+
status = ice_check_reset(hw);
IFLIB_CTX_LOCK(sc);
if (status) {
device_printf(dev, "Device never came out of reset, err %s\n",
ice_status_str(status));
+
ice_set_state(&sc->state, ICE_STATE_RESET_FAILED);
+ ice_clear_state(&sc->state, ICE_STATE_RESET_PFR_REQ);
+ ice_clear_state(&sc->state, ICE_STATE_PREPARED_FOR_RESET);
+ device_printf(dev, "Reset failed; please reload the device driver\n");
return;
}
@@ -2829,7 +2915,7 @@ static void
ice_handle_pf_reset_request(struct ice_softc *sc)
{
struct ice_hw *hw = &sc->hw;
- enum ice_status status;
+ int status;
/* Check for PF reset requests */
if (!ice_testandclear_state(&sc->state, ICE_STATE_RESET_PFR_REQ))
@@ -2880,7 +2966,13 @@ ice_init_device_features(struct ice_softc *sc)
ice_set_bit(ICE_FEATURE_HAS_PBA, sc->feat_cap);
ice_set_bit(ICE_FEATURE_DCB, sc->feat_cap);
ice_set_bit(ICE_FEATURE_TX_BALANCE, sc->feat_cap);
+ ice_set_bit(ICE_FEATURE_PHY_STATISTICS, sc->feat_cap);
+ if (ice_is_e810(hw))
+ ice_set_bit(ICE_FEATURE_PHY_STATISTICS, sc->feat_en);
+
+ if (ice_is_e825c(hw))
+ ice_set_bit(ICE_FEATURE_DUAL_NAC, sc->feat_cap);
/* Disable features due to hardware limitations... */
if (!hw->func_caps.common_cap.rss_table_size)
ice_clear_bit(ICE_FEATURE_RSS, sc->feat_cap);
@@ -2915,6 +3007,12 @@ ice_init_device_features(struct ice_softc *sc)
ice_set_bit(ICE_FEATURE_TEMP_SENSOR, sc->feat_cap);
ice_set_bit(ICE_FEATURE_TEMP_SENSOR, sc->feat_en);
}
+
+ if (hw->func_caps.common_cap.next_cluster_id_support ||
+ hw->dev_caps.common_cap.next_cluster_id_support) {
+ ice_set_bit(ICE_FEATURE_NEXT_CLUSTER_ID, sc->feat_cap);
+ ice_set_bit(ICE_FEATURE_NEXT_CLUSTER_ID, sc->feat_en);
+ }
}
/**
@@ -2961,7 +3059,7 @@ static void
ice_if_vlan_register(if_ctx_t ctx, u16 vtag)
{
struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
- enum ice_status status;
+ int status;
ASSERT_CTX_LOCKED(sc);
@@ -2991,7 +3089,7 @@ static void
ice_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
{
struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
- enum ice_status status;
+ int status;
ASSERT_CTX_LOCKED(sc);
@@ -3279,6 +3377,110 @@ ice_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event)
}
}
+/**
+ * ice_init_link - Do link configuration and link status reporting
+ * @sc: driver private structure
+ *
+ * Contains an extra check that skips link config when an E830 device
+ * does not have the "FW_LOADING"/"PHYBUSY" bit set in GL_MNG_FWSM set.
+ */
+static void
+ice_init_link(struct ice_softc *sc)
+{
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+
+ /* Check if FW is ready before setting up link; defer setup to the
+ * admin task if it isn't.
+ */
+ if (ice_is_e830(hw) &&
+ (rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M)) {
+ ice_set_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING);
+ device_printf(dev,
+ "Link initialization is blocked by PHY FW initialization.\n");
+ device_printf(dev,
+ "Link initialization will continue after PHY FW initialization completes.\n");
+ /* Do not access PHY config while PHY FW is busy initializing */
+ } else {
+ ice_clear_state(&sc->state, ICE_STATE_PHY_FW_INIT_PENDING);
+ ice_init_link_configuration(sc);
+ ice_update_link_status(sc, true);
+ }
+
+}
+
+#ifdef PCI_IOV
+/**
+ * ice_if_iov_init - iov init handler for iflib
+ * @ctx: iflib context pointer
+ * @num_vfs: number of VFs to create
+ * @params: configuration parameters for the PF
+ *
+ * Configure the driver for SR-IOV mode. Used to setup things like memory
+ * before any VFs are created.
+ *
+ * @remark This is a wrapper for ice_iov_init
+ */
+static int
+ice_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+
+ return ice_iov_init(sc, num_vfs, params);
+}
+
+/**
+ * ice_if_iov_uninit - iov uninit handler for iflib
+ * @ctx: iflib context pointer
+ *
+ * Destroys VFs and frees their memory and resources.
+ *
+ * @remark This is a wrapper for ice_iov_uninit
+ */
+static void
+ice_if_iov_uninit(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+
+ ice_iov_uninit(sc);
+}
+
+/**
+ * ice_if_iov_vf_add - iov add vf handler for iflib
+ * @ctx: iflib context pointer
+ * @vfnum: index of VF to configure
+ * @params: configuration parameters for the VF
+ *
+ * Sets up the VF given by the vfnum index. This is called by the OS
+ * for each VF created by the PF driver after it is spawned.
+ *
+ * @remark This is a wrapper for ice_iov_vf_add
+ */
+static int
+ice_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+
+ return ice_iov_add_vf(sc, vfnum, params);
+}
+
+/**
+ * ice_if_vflr_handle - iov VFLR handler
+ * @ctx: iflib context pointer
+ *
+ * Performs the necessar teardown or setup required for a VF after
+ * a VFLR is initiated.
+ *
+ * @remark This is a wrapper for ice_iov_handle_vflr
+ */
+static void
+ice_if_vflr_handle(if_ctx_t ctx)
+{
+ struct ice_softc *sc = (struct ice_softc *)iflib_get_softc(ctx);
+ ice_iov_handle_vflr(sc);
+}
+#endif /* PCI_IOV */
+
extern struct if_txrx ice_subif_txrx;
/**
@@ -3518,12 +3720,7 @@ ice_setup_mirror_vsi(struct ice_mirr_if *mif)
mif->vsi = vsi;
/* Reserve VSI queue allocation from PF queues */
- ret = ice_alloc_vsi_qmap(vsi, ICE_DEFAULT_VF_QUEUES, ICE_DEFAULT_VF_QUEUES);
- if (ret) {
- device_printf(dev, "%s: Unable to allocate mirror VSI queue maps (%d queues): %s\n",
- __func__, ICE_DEFAULT_VF_QUEUES, ice_err_str(ret));
- goto release_vsi;
- }
+ ice_alloc_vsi_qmap(vsi, ICE_DEFAULT_VF_QUEUES, ICE_DEFAULT_VF_QUEUES);
vsi->num_tx_queues = vsi->num_rx_queues = ICE_DEFAULT_VF_QUEUES;
/* Assign Tx queues from PF space */
diff --git a/sys/dev/ice/virtchnl.h b/sys/dev/ice/virtchnl.h
index d7a1d3f254bf..8af63296560e 100644
--- a/sys/dev/ice/virtchnl.h
+++ b/sys/dev/ice/virtchnl.h
@@ -2042,6 +2042,34 @@ struct virtchnl_quanta_cfg {
VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg);
+/*
+ * VIRTCHNL_OP_HQOS_READ_TREE
+ * VIRTCHNL_OP_HQOS_ELEM_ADD
+ * VIRTCHNL_OP_HQOS_ELEM_DEL
+ * VIRTCHNL_OP_HQOS_ELEM_BW_SET
+ * List with tc and queus HW QoS values
+ */
+struct virtchnl_hqos_cfg {
+#define VIRTCHNL_HQOS_ELEM_TYPE_NODE 0
+#define VIRTCHNL_HQOS_ELEM_TYPE_LEAF 1
+ u8 node_type;
+ u8 pad[7];
+ u32 teid;
+ u32 parent_teid;
+ u64 tx_max;
+ u64 tx_share;
+ u32 tx_priority;
+ u32 tx_weight;
+};
+VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_hqos_cfg);
+
+struct virtchnl_hqos_cfg_list {
+ u16 num_elem;
+ u8 pad[6];
+ struct virtchnl_hqos_cfg cfg[1];
+};
+VIRTCHNL_CHECK_STRUCT_LEN(48, virtchnl_hqos_cfg_list);
+
/* Since VF messages are limited by u16 size, precalculate the maximum possible
* values of nested elements in virtchnl structures that virtual channel can
* possibly handle in a single message.
diff --git a/sys/dev/ichiic/ig4_acpi.c b/sys/dev/ichiic/ig4_acpi.c
index 3f370ae7abb9..1275790a4412 100644
--- a/sys/dev/ichiic/ig4_acpi.c
+++ b/sys/dev/ichiic/ig4_acpi.c
@@ -55,6 +55,7 @@ static char *ig4iic_ids[] = {
"INT33C3",
"INT3432",
"INT3433",
+ "INT3446",
"80860F41",
"808622C1",
"AMDI0510",
diff --git a/sys/dev/ichiic/ig4_iic.c b/sys/dev/ichiic/ig4_iic.c
index 652d5a084f8b..cd88b28a2d52 100644
--- a/sys/dev/ichiic/ig4_iic.c
+++ b/sys/dev/ichiic/ig4_iic.c
@@ -72,7 +72,7 @@
#include <dev/ichiic/ig4_reg.h>
#include <dev/ichiic/ig4_var.h>
-#define DO_POLL(sc) (cold || kdb_active || SCHEDULER_STOPPED() || sc->poll)
+#define DO_POLL(sc) (cold || kdb_active || SCHEDULER_STOPPED())
/*
* tLOW, tHIGH periods of the SCL clock and maximal falling time of both
@@ -720,14 +720,11 @@ ig4iic_callback(device_t dev, int index, caddr_t data)
if ((how & IIC_WAIT) == 0) {
if (sx_try_xlock(&sc->call_lock) == 0)
error = IIC_EBUSBSY;
- else
- sc->poll = true;
} else
sx_xlock(&sc->call_lock);
break;
case IIC_RELEASE_BUS:
- sc->poll = false;
sx_unlock(&sc->call_lock);
break;
@@ -1045,7 +1042,7 @@ ig4iic_attach(ig4iic_softc_t *sc)
goto done;
ig4iic_get_fifo(sc);
- sc->iicbus = device_add_child(sc->dev, "iicbus", -1);
+ sc->iicbus = device_add_child(sc->dev, "iicbus", DEVICE_UNIT_ANY);
if (sc->iicbus == NULL) {
device_printf(sc->dev, "iicbus driver not found\n");
error = ENXIO;
@@ -1069,11 +1066,7 @@ ig4iic_attach(ig4iic_softc_t *sc)
"Unable to setup irq: error %d\n", error);
}
- error = bus_generic_attach(sc->dev);
- if (error) {
- device_printf(sc->dev,
- "failed to attach child: error %d\n", error);
- }
+ bus_attach_children(sc->dev);
done:
return (error);
@@ -1084,13 +1077,9 @@ ig4iic_detach(ig4iic_softc_t *sc)
{
int error;
- if (device_is_attached(sc->dev)) {
- error = bus_generic_detach(sc->dev);
- if (error)
- return (error);
- }
- if (sc->iicbus)
- device_delete_child(sc->dev, sc->iicbus);
+ error = bus_generic_detach(sc->dev);
+ if (error)
+ return (error);
if (sc->intr_handle)
bus_teardown_intr(sc->dev, sc->intr_res, sc->intr_handle);
diff --git a/sys/dev/ichiic/ig4_pci.c b/sys/dev/ichiic/ig4_pci.c
index 9f12e713cdeb..3a49e220e335 100644
--- a/sys/dev/ichiic/ig4_pci.c
+++ b/sys/dev/ichiic/ig4_pci.c
@@ -148,6 +148,12 @@ static int ig4iic_pci_detach(device_t dev);
#define PCI_CHIP_GEMINILAKE_I2C_5 0x31b68086
#define PCI_CHIP_GEMINILAKE_I2C_6 0x31b88086
#define PCI_CHIP_GEMINILAKE_I2C_7 0x31ba8086
+#define PCI_CHIP_JASPERLAKE_I2C_0 0x4de88086
+#define PCI_CHIP_JASPERLAKE_I2C_1 0x4de98086
+#define PCI_CHIP_JASPERLAKE_I2C_2 0x4dea8086
+#define PCI_CHIP_JASPERLAKE_I2C_3 0x4deb8086
+#define PCI_CHIP_JASPERLAKE_I2C_4 0x4dc58086
+#define PCI_CHIP_JASPERLAKE_I2C_5 0x4dc68086
#define PCI_CHIP_ALDERLAKE_P_I2C_0 0x51e88086
#define PCI_CHIP_ALDERLAKE_P_I2C_1 0x51e98086
#define PCI_CHIP_ALDERLAKE_P_I2C_2 0x51ea8086
@@ -168,6 +174,24 @@ static int ig4iic_pci_detach(device_t dev);
#define PCI_CHIP_ALDERLAKE_M_I2C_3 0x54eb8086
#define PCI_CHIP_ALDERLAKE_M_I2C_4 0x54c58086
#define PCI_CHIP_ALDERLAKE_M_I2C_5 0x54c68086
+#define PCI_CHIP_RAPTORLAKE_S_I2C_0 0x7a4c8086
+#define PCI_CHIP_RAPTORLAKE_S_I2C_1 0x7a4d8086
+#define PCI_CHIP_RAPTORLAKE_S_I2C_2 0x7a4e8086
+#define PCI_CHIP_RAPTORLAKE_S_I2C_3 0x7a4f8086
+#define PCI_CHIP_RAPTORLAKE_S_I2C_4 0x7a7c8086
+#define PCI_CHIP_RAPTORLAKE_S_I2C_5 0x7a7d8086
+#define PCI_CHIP_METEORLAKE_M_I2C_0 0x7e788086
+#define PCI_CHIP_METEORLAKE_M_I2C_1 0x7e798086
+#define PCI_CHIP_METEORLAKE_M_I2C_2 0x7e508086
+#define PCI_CHIP_METEORLAKE_M_I2C_3 0x7e518086
+#define PCI_CHIP_METEORLAKE_M_I2C_4 0x7e7a8086
+#define PCI_CHIP_METEORLAKE_M_I2C_5 0x7e7b8086
+#define PCI_CHIP_ARROWLAKE_U_I2C_0 0x77788086
+#define PCI_CHIP_ARROWLAKE_U_I2C_1 0x77798086
+#define PCI_CHIP_ARROWLAKE_U_I2C_2 0x777a8086
+#define PCI_CHIP_ARROWLAKE_U_I2C_3 0x777b8086
+#define PCI_CHIP_ARROWLAKE_U_I2C_4 0x77508086
+#define PCI_CHIP_ARROWLAKE_U_I2C_5 0x77518086
struct ig4iic_pci_device {
uint32_t devid;
@@ -260,6 +284,12 @@ static struct ig4iic_pci_device ig4iic_pci_devices[] = {
{ PCI_CHIP_GEMINILAKE_I2C_5, "Intel Gemini Lake I2C Controller-5", IG4_GEMINILAKE},
{ PCI_CHIP_GEMINILAKE_I2C_6, "Intel Gemini Lake I2C Controller-6", IG4_GEMINILAKE},
{ PCI_CHIP_GEMINILAKE_I2C_7, "Intel Gemini Lake I2C Controller-7", IG4_GEMINILAKE},
+ { PCI_CHIP_JASPERLAKE_I2C_0, "Intel Jasper Lake I2C Controller-0", IG4_TIGERLAKE},
+ { PCI_CHIP_JASPERLAKE_I2C_1, "Intel Jasper Lake I2C Controller-1", IG4_TIGERLAKE},
+ { PCI_CHIP_JASPERLAKE_I2C_2, "Intel Jasper Lake I2C Controller-2", IG4_TIGERLAKE},
+ { PCI_CHIP_JASPERLAKE_I2C_3, "Intel Jasper Lake I2C Controller-3", IG4_TIGERLAKE},
+ { PCI_CHIP_JASPERLAKE_I2C_4, "Intel Jasper Lake I2C Controller-4", IG4_TIGERLAKE},
+ { PCI_CHIP_JASPERLAKE_I2C_5, "Intel Jasper Lake I2C Controller-5", IG4_TIGERLAKE},
{ PCI_CHIP_ALDERLAKE_P_I2C_0, "Intel Alder Lake-P I2C Controller-0", IG4_TIGERLAKE},
{ PCI_CHIP_ALDERLAKE_P_I2C_1, "Intel Alder Lake-P I2C Controller-1", IG4_TIGERLAKE},
{ PCI_CHIP_ALDERLAKE_P_I2C_2, "Intel Alder Lake-P I2C Controller-2", IG4_TIGERLAKE},
@@ -280,6 +310,24 @@ static struct ig4iic_pci_device ig4iic_pci_devices[] = {
{ PCI_CHIP_ALDERLAKE_M_I2C_3, "Intel Alder Lake-M I2C Controller-3", IG4_TIGERLAKE},
{ PCI_CHIP_ALDERLAKE_M_I2C_4, "Intel Alder Lake-M I2C Controller-4", IG4_TIGERLAKE},
{ PCI_CHIP_ALDERLAKE_M_I2C_5, "Intel Alder Lake-M I2C Controller-5", IG4_TIGERLAKE},
+ { PCI_CHIP_RAPTORLAKE_S_I2C_0, "Intel Raptor Lake-S I2C Controller-0", IG4_TIGERLAKE},
+ { PCI_CHIP_RAPTORLAKE_S_I2C_1, "Intel Raptor Lake-S I2C Controller-1", IG4_TIGERLAKE},
+ { PCI_CHIP_RAPTORLAKE_S_I2C_2, "Intel Raptor Lake-S I2C Controller-2", IG4_TIGERLAKE},
+ { PCI_CHIP_RAPTORLAKE_S_I2C_3, "Intel Raptor Lake-S I2C Controller-3", IG4_TIGERLAKE},
+ { PCI_CHIP_RAPTORLAKE_S_I2C_4, "Intel Raptor Lake-S I2C Controller-4", IG4_TIGERLAKE},
+ { PCI_CHIP_RAPTORLAKE_S_I2C_5, "Intel Raptor Lake-S I2C Controller-5", IG4_TIGERLAKE},
+ { PCI_CHIP_METEORLAKE_M_I2C_0, "Intel Meteor Lake-M I2C Controller-0", IG4_TIGERLAKE},
+ { PCI_CHIP_METEORLAKE_M_I2C_1, "Intel Meteor Lake-M I2C Controller-1", IG4_TIGERLAKE},
+ { PCI_CHIP_METEORLAKE_M_I2C_2, "Intel Meteor Lake-M I2C Controller-2", IG4_TIGERLAKE},
+ { PCI_CHIP_METEORLAKE_M_I2C_3, "Intel Meteor Lake-M I2C Controller-3", IG4_TIGERLAKE},
+ { PCI_CHIP_METEORLAKE_M_I2C_4, "Intel Meteor Lake-M I2C Controller-4", IG4_TIGERLAKE},
+ { PCI_CHIP_METEORLAKE_M_I2C_5, "Intel Meteor Lake-M I2C Controller-5", IG4_TIGERLAKE},
+ { PCI_CHIP_ARROWLAKE_U_I2C_0, "Intel Arrow Lake-H/U I2C Controller-0", IG4_TIGERLAKE},
+ { PCI_CHIP_ARROWLAKE_U_I2C_1, "Intel Arrow Lake-H/U I2C Controller-1", IG4_TIGERLAKE},
+ { PCI_CHIP_ARROWLAKE_U_I2C_2, "Intel Arrow Lake-H/U I2C Controller-2", IG4_TIGERLAKE},
+ { PCI_CHIP_ARROWLAKE_U_I2C_3, "Intel Arrow Lake-H/U I2C Controller-3", IG4_TIGERLAKE},
+ { PCI_CHIP_ARROWLAKE_U_I2C_4, "Intel Arrow Lake-H/U I2C Controller-4", IG4_TIGERLAKE},
+ { PCI_CHIP_ARROWLAKE_U_I2C_5, "Intel Arrow Lake-H/U I2C Controller-5", IG4_TIGERLAKE},
};
static int
diff --git a/sys/dev/ichiic/ig4_var.h b/sys/dev/ichiic/ig4_var.h
index 989cf23779a2..0d000ab34c1d 100644
--- a/sys/dev/ichiic/ig4_var.h
+++ b/sys/dev/ichiic/ig4_var.h
@@ -93,7 +93,6 @@ struct ig4iic_softc {
bool platform_attached : 1;
bool use_10bit : 1;
bool slave_valid : 1;
- bool poll: 1;
/*
* Locking semantics:
diff --git a/sys/dev/ichsmb/ichsmb.c b/sys/dev/ichsmb/ichsmb.c
index 94fc03d2bc67..c5e9e2f1b9ed 100644
--- a/sys/dev/ichsmb/ichsmb.c
+++ b/sys/dev/ichsmb/ichsmb.c
@@ -112,7 +112,8 @@ ichsmb_attach(device_t dev)
mtx_init(&sc->mutex, device_get_nameunit(dev), "ichsmb", MTX_DEF);
/* Add child: an instance of the "smbus" device */
- if ((sc->smb = device_add_child(dev, DRIVER_SMBUS, -1)) == NULL) {
+ if ((sc->smb = device_add_child(dev, DRIVER_SMBUS,
+ DEVICE_UNIT_ANY)) == NULL) {
device_printf(dev, "no \"%s\" child found\n", DRIVER_SMBUS);
error = ENXIO;
goto fail;
@@ -130,7 +131,8 @@ ichsmb_attach(device_t dev)
}
/* Attach children when interrupts are available */
- return (bus_delayed_attach_children(dev));
+ bus_delayed_attach_children(dev);
+ return (0);
fail:
mtx_destroy(&sc->mutex);
return (error);
@@ -695,7 +697,6 @@ ichsmb_detach(device_t dev)
error = bus_generic_detach(dev);
if (error)
return (error);
- device_delete_child(dev, sc->smb);
ichsmb_release_resources(sc);
mtx_destroy(&sc->mutex);
diff --git a/sys/dev/ichsmb/ichsmb_pci.c b/sys/dev/ichsmb/ichsmb_pci.c
index 675a15daaf1b..e4d87fe1fed2 100644
--- a/sys/dev/ichsmb/ichsmb_pci.c
+++ b/sys/dev/ichsmb/ichsmb_pci.c
@@ -107,12 +107,16 @@
#define ID_COMETLAKE2 0x06a3
#define ID_TIGERLAKE 0xa0a3
#define ID_TIGERLAKE2 0x43a3
+#define ID_ELKHARTLAKE 0x4b23
#define ID_GEMINILAKE 0x31d4
#define ID_CEDARFORK 0x18df
#define ID_ICELAKE 0x34a3
#define ID_ALDERLAKE 0x7aa3
#define ID_ALDERLAKE2 0x51a3
#define ID_ALDERLAKE3 0x54a3
+#define ID_METEORLAKE 0x7e22
+#define ID_METEORLAKE2 0x7f23
+#define ID_METEORLAKE3 0xae22
static const struct pci_device_table ichsmb_devices[] = {
{ PCI_DEV(PCI_VENDOR_INTEL, ID_82801AA),
@@ -203,6 +207,8 @@ static const struct pci_device_table ichsmb_devices[] = {
PCI_DESCR("Intel Tiger Lake SMBus controller") },
{ PCI_DEV(PCI_VENDOR_INTEL, ID_TIGERLAKE2),
PCI_DESCR("Intel Tiger Lake SMBus controller") },
+ { PCI_DEV(PCI_VENDOR_INTEL, ID_ELKHARTLAKE),
+ PCI_DESCR("Intel Elkhart Lake SMBus controller") },
{ PCI_DEV(PCI_VENDOR_INTEL, ID_GEMINILAKE),
PCI_DESCR("Intel Gemini Lake SMBus controller") },
{ PCI_DEV(PCI_VENDOR_INTEL, ID_CEDARFORK),
@@ -215,6 +221,12 @@ static const struct pci_device_table ichsmb_devices[] = {
PCI_DESCR("Intel Alder Lake SMBus controller") },
{ PCI_DEV(PCI_VENDOR_INTEL, ID_ALDERLAKE3),
PCI_DESCR("Intel Alder Lake SMBus controller") },
+ { PCI_DEV(PCI_VENDOR_INTEL, ID_METEORLAKE),
+ PCI_DESCR("Intel Meteor Lake SMBus controller") },
+ { PCI_DEV(PCI_VENDOR_INTEL, ID_METEORLAKE2),
+ PCI_DESCR("Intel Meteor Lake SMBus controller") },
+ { PCI_DEV(PCI_VENDOR_INTEL, ID_METEORLAKE3),
+ PCI_DESCR("Intel Meteor Lake SMBus controller") },
};
/* Internal functions */
diff --git a/sys/dev/ichwd/i6300esbwd.c b/sys/dev/ichwd/i6300esbwd.c
new file mode 100644
index 000000000000..03d504a350aa
--- /dev/null
+++ b/sys/dev/ichwd/i6300esbwd.c
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+/*
+ * Reference: Intel 6300ESB Controller Hub Datasheet Section 16
+ */
+
+#include <sys/param.h>
+#include <sys/eventhandler.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+#include <sys/errno.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <machine/resource.h>
+#include <sys/watchdog.h>
+
+#include <dev/pci/pcireg.h>
+
+#include <dev/ichwd/ichwd.h>
+#include <dev/ichwd/i6300esbwd.h>
+
+#include <x86/pci_cfgreg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_private.h>
+
+struct i6300esbwd_softc {
+ device_t dev;
+ int res_id;
+ struct resource *res;
+ eventhandler_tag ev_tag;
+ bool locked;
+};
+
+static const struct i6300esbwd_pci_id {
+ uint16_t id;
+ const char *name;
+} i6300esbwd_pci_devices[] = {
+ { DEVICEID_6300ESB_2, "6300ESB Watchdog Timer" },
+};
+
+static uint16_t __unused
+i6300esbwd_cfg_read(struct i6300esbwd_softc *sc)
+{
+ return (pci_read_config(sc->dev, WDT_CONFIG_REG, 2));
+}
+
+static void
+i6300esbwd_cfg_write(struct i6300esbwd_softc *sc, uint16_t val)
+{
+ pci_write_config(sc->dev, WDT_CONFIG_REG, val, 2);
+}
+
+static uint8_t
+i6300esbwd_lock_read(struct i6300esbwd_softc *sc)
+{
+ return (pci_read_config(sc->dev, WDT_LOCK_REG, 1));
+}
+
+static void
+i6300esbwd_lock_write(struct i6300esbwd_softc *sc, uint8_t val)
+{
+ pci_write_config(sc->dev, WDT_LOCK_REG, val, 1);
+}
+
+/*
+ * According to Intel 6300ESB I/O Controller Hub Datasheet 16.5.2,
+ * the resource should be unlocked before modifing any registers.
+ * The way to unlock is by write 0x80, 0x86 to the reload register.
+ */
+static void
+i6300esbwd_unlock_res(struct i6300esbwd_softc *sc)
+{
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_UNLOCK_SEQ_1_VAL);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_UNLOCK_SEQ_2_VAL);
+}
+
+static int
+i6300esbwd_sysctl_locked(SYSCTL_HANDLER_ARGS)
+{
+ struct i6300esbwd_softc *sc = (struct i6300esbwd_softc *)arg1;
+ int error;
+ int result;
+
+ result = sc->locked;
+ error = sysctl_handle_int(oidp, &result, 0, req);
+
+ if (error || !req->newptr)
+ return (error);
+
+ if (result == 1 && !sc->locked) {
+ i6300esbwd_lock_write(sc, i6300esbwd_lock_read(sc) | WDT_LOCK);
+ sc->locked = true;
+ }
+
+ return (0);
+}
+
+static void
+i6300esbwd_event(void *arg, unsigned int cmd, int *error)
+{
+ struct i6300esbwd_softc *sc = arg;
+ uint32_t timeout;
+ uint16_t regval;
+
+ cmd &= WD_INTERVAL;
+ if (cmd != 0 &&
+ (cmd < WD_TO_1MS || (cmd - WD_TO_1MS) >= WDT_PRELOAD_BIT)) {
+ *error = EINVAL;
+ return;
+ }
+ timeout = 1 << (cmd - WD_TO_1MS);
+
+ /* reset the timer to prevent timeout a timeout is about to occur */
+ i6300esbwd_unlock_res(sc);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD);
+
+ if (!cmd) {
+ /*
+ * when the lock is enabled, we are unable to overwrite LOCK
+ * register
+ */
+ if (sc->locked)
+ *error = EPERM;
+ else
+ i6300esbwd_lock_write(sc,
+ i6300esbwd_lock_read(sc) & ~WDT_ENABLE);
+ return;
+ }
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_4(sc->res, WDT_PRELOAD_1_REG, timeout);
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_4(sc->res, WDT_PRELOAD_2_REG, timeout);
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD);
+
+ if (!sc->locked) {
+ i6300esbwd_lock_write(sc, WDT_ENABLE);
+ regval = i6300esbwd_lock_read(sc);
+ sc->locked = regval & WDT_LOCK;
+ }
+}
+
+static int
+i6300esbwd_probe(device_t dev)
+{
+ const struct i6300esbwd_pci_id *pci_id;
+ uint16_t pci_dev_id;
+ int err = ENXIO;
+
+ if (pci_get_vendor(dev) != VENDORID_INTEL)
+ goto end;
+
+ pci_dev_id = pci_get_device(dev);
+ for (pci_id = i6300esbwd_pci_devices;
+ pci_id < i6300esbwd_pci_devices + nitems(i6300esbwd_pci_devices);
+ ++pci_id) {
+ if (pci_id->id == pci_dev_id) {
+ device_set_desc(dev, pci_id->name);
+ err = BUS_PROBE_DEFAULT;
+ break;
+ }
+ }
+
+end:
+ return (err);
+}
+
+static int
+i6300esbwd_attach(device_t dev)
+{
+ struct i6300esbwd_softc *sc = device_get_softc(dev);
+ uint16_t regval;
+
+ sc->dev = dev;
+ sc->res_id = PCIR_BAR(0);
+ sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->res_id,
+ RF_ACTIVE);
+ if (sc->res == NULL) {
+ device_printf(dev, "unable to map memory region\n");
+ return (ENXIO);
+ }
+
+ i6300esbwd_cfg_write(sc, WDT_INT_TYPE_DISABLED_VAL);
+ regval = i6300esbwd_lock_read(sc);
+ if (regval & WDT_LOCK)
+ sc->locked = true;
+ else {
+ sc->locked = false;
+ i6300esbwd_lock_write(sc, WDT_TOUT_CNF_WT_MODE);
+ }
+
+ i6300esbwd_unlock_res(sc);
+ bus_write_2(sc->res, WDT_RELOAD_REG, WDT_RELOAD | WDT_TIMEOUT);
+
+ sc->ev_tag = EVENTHANDLER_REGISTER(watchdog_list, i6300esbwd_event, sc,
+ 0);
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "locked",
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
+ i6300esbwd_sysctl_locked, "I",
+ "Lock the timer so that we cannot disable it");
+
+ return (0);
+}
+
+static int
+i6300esbwd_detach(device_t dev)
+{
+ struct i6300esbwd_softc *sc = device_get_softc(dev);
+
+ if (sc->ev_tag)
+ EVENTHANDLER_DEREGISTER(watchdog_list, sc->ev_tag);
+
+ if (sc->res)
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->res_id, sc->res);
+
+ return (0);
+}
+
+static device_method_t i6300esbwd_methods[] = {
+ DEVMETHOD(device_probe, i6300esbwd_probe),
+ DEVMETHOD(device_attach, i6300esbwd_attach),
+ DEVMETHOD(device_detach, i6300esbwd_detach),
+ DEVMETHOD(device_shutdown, i6300esbwd_detach),
+ DEVMETHOD_END
+};
+
+static driver_t i6300esbwd_driver = {
+ "i6300esbwd",
+ i6300esbwd_methods,
+ sizeof(struct i6300esbwd_softc),
+};
+
+DRIVER_MODULE(i6300esbwd, pci, i6300esbwd_driver, NULL, NULL);
diff --git a/sys/dev/ichwd/i6300esbwd.h b/sys/dev/ichwd/i6300esbwd.h
new file mode 100644
index 000000000000..39ed5d5a84f6
--- /dev/null
+++ b/sys/dev/ichwd/i6300esbwd.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#ifndef _I6300ESBWD_H_
+#define _I6300ESBWD_H_
+
+#define WDT_CONFIG_REG 0x60
+#define WDT_LOCK_REG 0x68
+
+#define WDT_PRELOAD_1_REG 0x00
+#define WDT_PRELOAD_2_REG 0x04
+#define WDT_INTR_REG 0x08
+#define WDT_RELOAD_REG 0x0C
+
+/* For config register */
+#define WDT_OUTPUT_EN (0x1 << 5)
+#define WDT_PRE_SEL (0x1 << 2)
+#define WDT_INT_TYPE_BITS (0x3)
+#define WDT_INT_TYPE_IRQ_VAL (0x0)
+#define WDT_INT_TYPE_RES_VAL (0x1)
+#define WDT_INT_TYPE_SMI_VAL (0x2)
+#define WDT_INT_TYPE_DISABLED_VAL (0x3)
+
+/* For lock register */
+#define WDT_TOUT_CNF_WT_MODE (0x0 << 2)
+#define WDT_TOUT_CNF_FR_MODE (0x1 << 2)
+#define WDT_ENABLE (0x02)
+#define WDT_LOCK (0x01)
+
+/* For preload 1/2 registers */
+#define WDT_PRELOAD_BIT 20
+#define WDT_PRELOAD_BITS ((0x1 << WDT_PRELOAD_BIT) - 1)
+
+/* For interrupt register */
+#define WDT_INTR_ACT (0x01 << 0)
+
+/* For reload register */
+#define WDT_TIMEOUT (0x01 << 9)
+#define WDT_RELOAD (0x01 << 8)
+#define WDT_UNLOCK_SEQ_1_VAL 0x80
+#define WDT_UNLOCK_SEQ_2_VAL 0x86
+
+#endif /* _I6300ESBWD_H_ */
diff --git a/sys/dev/ichwd/ichwd.c b/sys/dev/ichwd/ichwd.c
index cade2cc4fb45..5481553cc175 100644
--- a/sys/dev/ichwd/ichwd.c
+++ b/sys/dev/ichwd/ichwd.c
@@ -90,7 +90,7 @@ static struct ichwd_device ichwd_devices[] = {
{ DEVICEID_82801E, "Intel 82801E watchdog timer", 5, 1 },
{ DEVICEID_82801EB, "Intel 82801EB watchdog timer", 5, 1 },
{ DEVICEID_82801EBR, "Intel 82801EB/ER watchdog timer", 5, 1 },
- { DEVICEID_6300ESB, "Intel 6300ESB watchdog timer", 5, 1 },
+ { DEVICEID_6300ESB_1, "Intel 6300ESB watchdog timer", 5, 1 },
{ DEVICEID_82801FBR, "Intel 82801FB/FR watchdog timer", 6, 2 },
{ DEVICEID_ICH6M, "Intel ICH6M watchdog timer", 6, 2 },
{ DEVICEID_ICH6W, "Intel ICH6W watchdog timer", 6, 2 },
diff --git a/sys/dev/ichwd/ichwd.h b/sys/dev/ichwd/ichwd.h
index 90fda08b74c1..72d0ca1cd6aa 100644
--- a/sys/dev/ichwd/ichwd.h
+++ b/sys/dev/ichwd/ichwd.h
@@ -151,7 +151,8 @@ struct ichwd_softc {
#define DEVICEID_82801E 0x2450
#define DEVICEID_82801EB 0x24dc
#define DEVICEID_82801EBR 0x24d0
-#define DEVICEID_6300ESB 0x25a1
+#define DEVICEID_6300ESB_1 0x25a1
+#define DEVICEID_6300ESB_2 0x25ab
#define DEVICEID_82801FBR 0x2640
#define DEVICEID_ICH6M 0x2641
#define DEVICEID_ICH6W 0x2642
diff --git a/sys/dev/ida/ida.c b/sys/dev/ida/ida.c
index 390d07d010d6..fa135b7fc9ed 100644
--- a/sys/dev/ida/ida.c
+++ b/sys/dev/ida/ida.c
@@ -301,7 +301,7 @@ ida_setup(struct ida_softc *ida)
mtx_unlock(&ida->lock);
for (i = 0; i < cinfo.num_drvs; i++) {
- child = device_add_child(ida->dev, /*"idad"*/NULL, -1);
+ child = device_add_child(ida->dev, /*"idad"*/NULL, DEVICE_UNIT_ANY);
if (child != NULL)
device_set_ivars(child, (void *)(intptr_t)i);
}
@@ -333,7 +333,7 @@ ida_startup(void *arg)
config_intrhook_disestablish(&ida->ich);
bus_topo_lock();
- bus_generic_attach(ida->dev);
+ bus_attach_children(ida->dev);
bus_topo_unlock();
}
@@ -348,9 +348,6 @@ ida_detach(device_t dev)
error = bus_generic_detach(dev);
if (error)
return (error);
- error = device_delete_children(dev);
- if (error)
- return (error);
/*
* XXX
diff --git a/sys/dev/igc/if_igc.c b/sys/dev/igc/if_igc.c
index bfa33c82f7ba..f199a128c783 100644
--- a/sys/dev/igc/if_igc.c
+++ b/sys/dev/igc/if_igc.c
@@ -1,9 +1,9 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
+ * Copyright (c) 2001-2024, Intel Corporation
* Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org>
- * All rights reserved.
- * Copyright (c) 2021 Rubicon Communications, LLC (Netgate)
+ * Copyright (c) 2021-2024 Rubicon Communications, LLC (Netgate)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -49,22 +49,38 @@
static const pci_vendor_info_t igc_vendor_info_array[] =
{
/* Intel(R) PRO/1000 Network Connection - igc */
- PVID(0x8086, IGC_DEV_ID_I225_LM, "Intel(R) Ethernet Controller I225-LM"),
- PVID(0x8086, IGC_DEV_ID_I225_V, "Intel(R) Ethernet Controller I225-V"),
- PVID(0x8086, IGC_DEV_ID_I225_K, "Intel(R) Ethernet Controller I225-K"),
- PVID(0x8086, IGC_DEV_ID_I225_I, "Intel(R) Ethernet Controller I225-I"),
- PVID(0x8086, IGC_DEV_ID_I220_V, "Intel(R) Ethernet Controller I220-V"),
- PVID(0x8086, IGC_DEV_ID_I225_K2, "Intel(R) Ethernet Controller I225-K(2)"),
- PVID(0x8086, IGC_DEV_ID_I225_LMVP, "Intel(R) Ethernet Controller I225-LMvP(2)"),
- PVID(0x8086, IGC_DEV_ID_I226_K, "Intel(R) Ethernet Controller I226-K"),
- PVID(0x8086, IGC_DEV_ID_I226_LMVP, "Intel(R) Ethernet Controller I226-LMvP"),
- PVID(0x8086, IGC_DEV_ID_I225_IT, "Intel(R) Ethernet Controller I225-IT(2)"),
- PVID(0x8086, IGC_DEV_ID_I226_LM, "Intel(R) Ethernet Controller I226-LM"),
- PVID(0x8086, IGC_DEV_ID_I226_V, "Intel(R) Ethernet Controller I226-V"),
- PVID(0x8086, IGC_DEV_ID_I226_IT, "Intel(R) Ethernet Controller I226-IT"),
- PVID(0x8086, IGC_DEV_ID_I221_V, "Intel(R) Ethernet Controller I221-V"),
- PVID(0x8086, IGC_DEV_ID_I226_BLANK_NVM, "Intel(R) Ethernet Controller I226(blankNVM)"),
- PVID(0x8086, IGC_DEV_ID_I225_BLANK_NVM, "Intel(R) Ethernet Controller I225(blankNVM)"),
+ PVID(0x8086, IGC_DEV_ID_I225_LM,
+ "Intel(R) Ethernet Controller I225-LM"),
+ PVID(0x8086, IGC_DEV_ID_I225_V,
+ "Intel(R) Ethernet Controller I225-V"),
+ PVID(0x8086, IGC_DEV_ID_I225_K,
+ "Intel(R) Ethernet Controller I225-K"),
+ PVID(0x8086, IGC_DEV_ID_I225_I,
+ "Intel(R) Ethernet Controller I225-IT"),
+ PVID(0x8086, IGC_DEV_ID_I220_V,
+ "Intel(R) Ethernet Controller I220-V"),
+ PVID(0x8086, IGC_DEV_ID_I225_K2,
+ "Intel(R) Ethernet Controller I225-K(2)"),
+ PVID(0x8086, IGC_DEV_ID_I225_LMVP,
+ "Intel(R) Ethernet Controller I225-LMvP(2)"),
+ PVID(0x8086, IGC_DEV_ID_I226_K,
+ "Intel(R) Ethernet Controller I226-K"),
+ PVID(0x8086, IGC_DEV_ID_I226_LMVP,
+ "Intel(R) Ethernet Controller I226-LMvP"),
+ PVID(0x8086, IGC_DEV_ID_I225_IT,
+ "Intel(R) Ethernet Controller I225-IT(2)"),
+ PVID(0x8086, IGC_DEV_ID_I226_LM,
+ "Intel(R) Ethernet Controller I226-LM"),
+ PVID(0x8086, IGC_DEV_ID_I226_V,
+ "Intel(R) Ethernet Controller I226-V"),
+ PVID(0x8086, IGC_DEV_ID_I226_IT,
+ "Intel(R) Ethernet Controller I226-IT"),
+ PVID(0x8086, IGC_DEV_ID_I221_V,
+ "Intel(R) Ethernet Controller I221-V"),
+ PVID(0x8086, IGC_DEV_ID_I226_BLANK_NVM,
+ "Intel(R) Ethernet Controller I226(blankNVM)"),
+ PVID(0x8086, IGC_DEV_ID_I225_BLANK_NVM,
+ "Intel(R) Ethernet Controller I225(blankNVM)"),
/* required last entry */
PVID_END
};
@@ -72,65 +88,71 @@ static const pci_vendor_info_t igc_vendor_info_array[] =
/*********************************************************************
* Function prototypes
*********************************************************************/
-static void *igc_register(device_t dev);
-static int igc_if_attach_pre(if_ctx_t ctx);
-static int igc_if_attach_post(if_ctx_t ctx);
-static int igc_if_detach(if_ctx_t ctx);
-static int igc_if_shutdown(if_ctx_t ctx);
-static int igc_if_suspend(if_ctx_t ctx);
-static int igc_if_resume(if_ctx_t ctx);
-
-static int igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets);
-static int igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets);
-static void igc_if_queues_free(if_ctx_t ctx);
+static void *igc_register(device_t);
+static int igc_if_attach_pre(if_ctx_t);
+static int igc_if_attach_post(if_ctx_t);
+static int igc_if_detach(if_ctx_t);
+static int igc_if_shutdown(if_ctx_t);
+static int igc_if_suspend(if_ctx_t);
+static int igc_if_resume(if_ctx_t);
+
+static int igc_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
+ int);
+static int igc_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
+ int);
+static void igc_if_queues_free(if_ctx_t);
static uint64_t igc_if_get_counter(if_ctx_t, ift_counter);
-static void igc_if_init(if_ctx_t ctx);
-static void igc_if_stop(if_ctx_t ctx);
+static void igc_if_init(if_ctx_t);
+static void igc_if_stop(if_ctx_t);
static void igc_if_media_status(if_ctx_t, struct ifmediareq *);
-static int igc_if_media_change(if_ctx_t ctx);
-static int igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu);
-static void igc_if_timer(if_ctx_t ctx, uint16_t qid);
-static void igc_if_watchdog_reset(if_ctx_t ctx);
-static bool igc_if_needs_restart(if_ctx_t ctx, enum iflib_restart_event event);
-
-static void igc_identify_hardware(if_ctx_t ctx);
-static int igc_allocate_pci_resources(if_ctx_t ctx);
-static void igc_free_pci_resources(if_ctx_t ctx);
-static void igc_reset(if_ctx_t ctx);
-static int igc_setup_interface(if_ctx_t ctx);
-static int igc_setup_msix(if_ctx_t ctx);
-
-static void igc_initialize_transmit_unit(if_ctx_t ctx);
-static void igc_initialize_receive_unit(if_ctx_t ctx);
-
-static void igc_if_intr_enable(if_ctx_t ctx);
-static void igc_if_intr_disable(if_ctx_t ctx);
-static int igc_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid);
-static int igc_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid);
-static void igc_if_multi_set(if_ctx_t ctx);
-static void igc_if_update_admin_status(if_ctx_t ctx);
-static void igc_if_debug(if_ctx_t ctx);
-static void igc_update_stats_counters(struct igc_adapter *);
-static void igc_add_hw_stats(struct igc_adapter *adapter);
-static int igc_if_set_promisc(if_ctx_t ctx, int flags);
-static void igc_setup_vlan_hw_support(if_ctx_t ctx);
+static int igc_if_media_change(if_ctx_t);
+static int igc_if_mtu_set(if_ctx_t, uint32_t);
+static void igc_if_timer(if_ctx_t, uint16_t);
+static void igc_if_watchdog_reset(if_ctx_t);
+static bool igc_if_needs_restart(if_ctx_t, enum iflib_restart_event);
+
+static void igc_identify_hardware(if_ctx_t);
+static int igc_allocate_pci_resources(if_ctx_t);
+static void igc_free_pci_resources(if_ctx_t);
+static void igc_reset(if_ctx_t);
+static int igc_setup_interface(if_ctx_t);
+static int igc_setup_msix(if_ctx_t);
+
+static void igc_initialize_transmit_unit(if_ctx_t);
+static void igc_initialize_receive_unit(if_ctx_t);
+
+static void igc_if_intr_enable(if_ctx_t);
+static void igc_if_intr_disable(if_ctx_t);
+static int igc_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
+static int igc_if_tx_queue_intr_enable(if_ctx_t, uint16_t);
+static void igc_if_multi_set(if_ctx_t);
+static void igc_if_update_admin_status(if_ctx_t);
+static void igc_if_debug(if_ctx_t);
+static void igc_update_stats_counters(struct igc_softc *);
+static void igc_add_hw_stats(struct igc_softc *);
+static int igc_if_set_promisc(if_ctx_t, int);
+static void igc_setup_vlan_hw_support(if_ctx_t);
+static void igc_fw_version(struct igc_softc *);
+static void igc_sbuf_fw_version(struct igc_fw_version *, struct sbuf *);
+static void igc_print_fw_version(struct igc_softc *);
+static int igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS);
static int igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS);
-static void igc_print_nvm_info(struct igc_adapter *);
+static void igc_print_nvm_info(struct igc_softc *);
static int igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS);
static int igc_get_rs(SYSCTL_HANDLER_ARGS);
-static void igc_print_debug_info(struct igc_adapter *);
+static void igc_print_debug_info(struct igc_softc *);
static int igc_is_valid_ether_addr(u8 *);
-static int igc_sysctl_int_delay(SYSCTL_HANDLER_ARGS);
-static void igc_add_int_delay_sysctl(struct igc_adapter *, const char *,
- const char *, struct igc_int_delay_info *, int, int);
+static void igc_neweitr(struct igc_softc *, struct igc_rx_queue *,
+ struct tx_ring *, struct rx_ring *);
+static int igc_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
/* Management and WOL Support */
-static void igc_get_hw_control(struct igc_adapter *);
-static void igc_release_hw_control(struct igc_adapter *);
-static void igc_get_wakeup(if_ctx_t ctx);
-static void igc_enable_wakeup(if_ctx_t ctx);
+static void igc_get_hw_control(struct igc_softc *);
+static void igc_release_hw_control(struct igc_softc *);
+static void igc_get_wakeup(if_ctx_t);
+static void igc_enable_wakeup(if_ctx_t);
-int igc_intr(void *arg);
+int igc_intr(void *);
/* MSI-X handlers */
static int igc_if_msix_intr_assign(if_ctx_t, int);
@@ -138,11 +160,12 @@ static int igc_msix_link(void *);
static void igc_handle_link(void *context);
static int igc_set_flowcntl(SYSCTL_HANDLER_ARGS);
+static int igc_sysctl_dmac(SYSCTL_HANDLER_ARGS);
static int igc_sysctl_eee(SYSCTL_HANDLER_ARGS);
static int igc_get_regs(SYSCTL_HANDLER_ARGS);
-static void igc_configure_queues(struct igc_adapter *adapter);
+static void igc_configure_queues(struct igc_softc *);
/*********************************************************************
@@ -161,7 +184,7 @@ static device_method_t igc_methods[] = {
};
static driver_t igc_driver = {
- "igc", igc_methods, sizeof(struct igc_adapter),
+ "igc", igc_methods, sizeof(struct igc_softc),
};
DRIVER_MODULE(igc, pci, igc_driver, 0, 0);
@@ -204,19 +227,13 @@ static device_method_t igc_if_methods[] = {
};
static driver_t igc_if_driver = {
- "igc_if", igc_if_methods, sizeof(struct igc_adapter)
+ "igc_if", igc_if_methods, sizeof(struct igc_softc)
};
/*********************************************************************
* Tunable default values.
*********************************************************************/
-#define IGC_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000)
-#define IGC_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024)
-
-#define MAX_INTS_PER_SEC 8000
-#define DEFAULT_ITR (1000000000/(MAX_INTS_PER_SEC * 256))
-
/* Allow common code without TSO */
#ifndef CSUM_TSO
#define CSUM_TSO 0
@@ -229,28 +246,13 @@ static int igc_disable_crc_stripping = 0;
SYSCTL_INT(_hw_igc, OID_AUTO, disable_crc_stripping, CTLFLAG_RDTUN,
&igc_disable_crc_stripping, 0, "Disable CRC Stripping");
-static int igc_tx_int_delay_dflt = IGC_TICKS_TO_USECS(IGC_TIDV_VAL);
-static int igc_rx_int_delay_dflt = IGC_TICKS_TO_USECS(IGC_RDTR_VAL);
-SYSCTL_INT(_hw_igc, OID_AUTO, tx_int_delay, CTLFLAG_RDTUN, &igc_tx_int_delay_dflt,
- 0, "Default transmit interrupt delay in usecs");
-SYSCTL_INT(_hw_igc, OID_AUTO, rx_int_delay, CTLFLAG_RDTUN, &igc_rx_int_delay_dflt,
- 0, "Default receive interrupt delay in usecs");
-
-static int igc_tx_abs_int_delay_dflt = IGC_TICKS_TO_USECS(IGC_TADV_VAL);
-static int igc_rx_abs_int_delay_dflt = IGC_TICKS_TO_USECS(IGC_RADV_VAL);
-SYSCTL_INT(_hw_igc, OID_AUTO, tx_abs_int_delay, CTLFLAG_RDTUN,
- &igc_tx_abs_int_delay_dflt, 0,
- "Default transmit interrupt delay limit in usecs");
-SYSCTL_INT(_hw_igc, OID_AUTO, rx_abs_int_delay, CTLFLAG_RDTUN,
- &igc_rx_abs_int_delay_dflt, 0,
- "Default receive interrupt delay limit in usecs");
-
static int igc_smart_pwr_down = false;
-SYSCTL_INT(_hw_igc, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN, &igc_smart_pwr_down,
+SYSCTL_INT(_hw_igc, OID_AUTO, smart_pwr_down, CTLFLAG_RDTUN,
+ &igc_smart_pwr_down,
0, "Set to true to leave smart power down enabled on newer adapters");
/* Controls whether promiscuous also shows bad packets */
-static int igc_debug_sbp = true;
+static int igc_debug_sbp = false;
SYSCTL_INT(_hw_igc, OID_AUTO, sbp, CTLFLAG_RDTUN, &igc_debug_sbp, 0,
"Show bad packets in promiscuous mode");
@@ -260,9 +262,18 @@ SYSCTL_INT(_hw_igc, OID_AUTO, eee_setting, CTLFLAG_RDTUN, &igc_eee_setting, 0,
"Enable Energy Efficient Ethernet");
/*
+ * AIM: Adaptive Interrupt Moderation
+ * which means that the interrupt rate is varied over time based on the
+ * traffic for that interrupt vector
+ */
+static int igc_enable_aim = 1;
+SYSCTL_INT(_hw_igc, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &igc_enable_aim,
+ 0, "Enable adaptive interrupt moderation (1=normal, 2=lowlatency)");
+
+/*
** Tuneable Interrupt rate
*/
-static int igc_max_interrupt_rate = 20000;
+static int igc_max_interrupt_rate = IGC_INTS_DEFAULT;
SYSCTL_INT(_hw_igc, OID_AUTO, max_interrupt_rate, CTLFLAG_RDTUN,
&igc_max_interrupt_rate, 0, "Maximum interrupts per second");
@@ -285,7 +296,8 @@ static struct if_shared_ctx igc_sctx_init = {
.isc_vendor_info = igc_vendor_info_array,
.isc_driver_version = "1",
.isc_driver = &igc_if_driver,
- .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
+ .isc_flags =
+ IFLIB_NEED_SCRATCH | IFLIB_TSO_INIT_IP | IFLIB_NEED_ZERO_CSUM,
.isc_nrxd_min = {IGC_MIN_RXD},
.isc_ntxd_min = {IGC_MIN_TXD},
@@ -304,8 +316,8 @@ static struct if_shared_ctx igc_sctx_init = {
static int igc_get_regs(SYSCTL_HANDLER_ARGS)
{
- struct igc_adapter *adapter = (struct igc_adapter *)arg1;
- struct igc_hw *hw = &adapter->hw;
+ struct igc_softc *sc = (struct igc_softc *)arg1;
+ struct igc_hw *hw = &sc->hw;
struct sbuf *sb;
u32 *regs_buff;
int rc;
@@ -381,7 +393,7 @@ static int igc_get_regs(SYSCTL_HANDLER_ARGS)
#ifdef DUMP_DESCS
{
- if_softc_ctx_t scctx = adapter->shared;
+ if_softc_ctx_t scctx = sc->shared;
struct rx_ring *rxr = &rx_que->rxr;
struct tx_ring *txr = &tx_que->txr;
int ntxd = scctx->isc_ntxd[0];
@@ -391,15 +403,20 @@ static int igc_get_regs(SYSCTL_HANDLER_ARGS)
for (j = 0; j < nrxd; j++) {
u32 staterr = le32toh(rxr->rx_base[j].wb.upper.status_error);
u32 length = le32toh(rxr->rx_base[j].wb.upper.length);
- sbuf_printf(sb, "\tReceive Descriptor Address %d: %08" PRIx64 " Error:%d Length:%d\n", j, rxr->rx_base[j].read.buffer_addr, staterr, length);
+ sbuf_printf(sb, "\tReceive Descriptor Address %d: %08"
+ PRIx64 " Error:%d Length:%d\n",
+ j, rxr->rx_base[j].read.buffer_addr, staterr, length);
}
for (j = 0; j < min(ntxd, 256); j++) {
unsigned int *ptr = (unsigned int *)&txr->tx_base[j];
- sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x [3]: %08x eop: %d DD=%d\n",
- j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop,
- buf->eop != -1 ? txr->tx_base[buf->eop].upper.fields.status & IGC_TXD_STAT_DD : 0);
+ sbuf_printf(sb, "\tTXD[%03d] [0]: %08x [1]: %08x [2]: %08x"
+ "[3]: %08x eop: %d DD=%d\n",
+ j, ptr[0], ptr[1], ptr[2], ptr[3], buf->eop,
+ buf->eop != -1 ?
+ txr->tx_base[buf->eop].upper.fields.status &
+ IGC_TXD_STAT_DD : 0);
}
}
@@ -443,7 +460,7 @@ igc_set_num_queues(if_ctx_t ctx)
static int
igc_if_attach_pre(if_ctx_t ctx)
{
- struct igc_adapter *adapter;
+ struct igc_softc *sc;
if_softc_ctx_t scctx;
device_t dev;
struct igc_hw *hw;
@@ -451,53 +468,96 @@ igc_if_attach_pre(if_ctx_t ctx)
INIT_DEBUGOUT("igc_if_attach_pre: begin");
dev = iflib_get_dev(ctx);
- adapter = iflib_get_softc(ctx);
+ sc = iflib_get_softc(ctx);
- adapter->ctx = adapter->osdep.ctx = ctx;
- adapter->dev = adapter->osdep.dev = dev;
- scctx = adapter->shared = iflib_get_softc_ctx(ctx);
- adapter->media = iflib_get_media(ctx);
- hw = &adapter->hw;
+ sc->ctx = sc->osdep.ctx = ctx;
+ sc->dev = sc->osdep.dev = dev;
+ scctx = sc->shared = iflib_get_softc_ctx(ctx);
+ sc->media = iflib_get_media(ctx);
+ hw = &sc->hw;
/* SYSCTL stuff */
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "nvm", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
- adapter, 0, igc_sysctl_nvm_info, "I", "NVM Information");
+ sc, 0, igc_sysctl_nvm_info, "I", "NVM Information");
+
+ sc->enable_aim = igc_enable_aim;
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "enable_aim", CTLFLAG_RW,
+ &sc->enable_aim, 0,
+ "Interrupt Moderation (1=normal, 2=lowlatency)");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
+ sc, 0, igc_sysctl_print_fw_version, "A",
+ "Prints FW/NVM Versions");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
- adapter, 0, igc_sysctl_debug_info, "I", "Debug Information");
+ sc, 0, igc_sysctl_debug_info, "I", "Debug Information");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
- adapter, 0, igc_set_flowcntl, "I", "Flow Control");
+ sc, 0, igc_set_flowcntl, "I", "Flow Control");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "reg_dump",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter, 0,
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
igc_get_regs, "A", "Dump Registers");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "rs_dump",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, adapter, 0,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
igc_get_rs, "I", "Dump RS indexes");
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "dmac",
+ CTLTYPE_INT | CTLFLAG_RW, sc, 0,
+ igc_sysctl_dmac, "I", "DMA Coalesce");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "tso_tcp_flags_mask_first_segment",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ sc, 0, igc_sysctl_tso_tcp_flags_mask, "IU",
+ "TSO TCP flags mask for first segment");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "tso_tcp_flags_mask_middle_segment",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ sc, 1, igc_sysctl_tso_tcp_flags_mask, "IU",
+ "TSO TCP flags mask for middle segment");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "tso_tcp_flags_mask_last_segment",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ sc, 2, igc_sysctl_tso_tcp_flags_mask, "IU",
+ "TSO TCP flags mask for last segment");
+
/* Determine hardware and mac info */
igc_identify_hardware(ctx);
scctx->isc_tx_nsegments = IGC_MAX_SCATTER;
- scctx->isc_nrxqsets_max = scctx->isc_ntxqsets_max = igc_set_num_queues(ctx);
+ scctx->isc_nrxqsets_max =
+ scctx->isc_ntxqsets_max = igc_set_num_queues(ctx);
if (bootverbose)
device_printf(dev, "attach_pre capping queues at %d\n",
scctx->isc_ntxqsets_max);
- scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(union igc_adv_tx_desc), IGC_DBA_ALIGN);
- scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union igc_adv_rx_desc), IGC_DBA_ALIGN);
+ scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] *
+ sizeof(union igc_adv_tx_desc), IGC_DBA_ALIGN);
+ scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] *
+ sizeof(union igc_adv_rx_desc), IGC_DBA_ALIGN);
scctx->isc_txd_size[0] = sizeof(union igc_adv_tx_desc);
scctx->isc_rxd_size[0] = sizeof(union igc_adv_rx_desc);
scctx->isc_txrx = &igc_txrx;
@@ -536,29 +596,6 @@ igc_if_attach_pre(if_ctx_t ctx)
igc_setup_msix(ctx);
igc_get_bus_info(hw);
- /* Set up some sysctls for the tunable interrupt delays */
- igc_add_int_delay_sysctl(adapter, "rx_int_delay",
- "receive interrupt delay in usecs", &adapter->rx_int_delay,
- IGC_REGISTER(hw, IGC_RDTR), igc_rx_int_delay_dflt);
- igc_add_int_delay_sysctl(adapter, "tx_int_delay",
- "transmit interrupt delay in usecs", &adapter->tx_int_delay,
- IGC_REGISTER(hw, IGC_TIDV), igc_tx_int_delay_dflt);
- igc_add_int_delay_sysctl(adapter, "rx_abs_int_delay",
- "receive interrupt delay limit in usecs",
- &adapter->rx_abs_int_delay,
- IGC_REGISTER(hw, IGC_RADV),
- igc_rx_abs_int_delay_dflt);
- igc_add_int_delay_sysctl(adapter, "tx_abs_int_delay",
- "transmit interrupt delay limit in usecs",
- &adapter->tx_abs_int_delay,
- IGC_REGISTER(hw, IGC_TADV),
- igc_tx_abs_int_delay_dflt);
- igc_add_int_delay_sysctl(adapter, "itr",
- "interrupt delay limit in usecs/4",
- &adapter->tx_itr,
- IGC_REGISTER(hw, IGC_ITR),
- DEFAULT_ITR);
-
hw->mac.autoneg = DO_AUTO_NEG;
hw->phy.autoneg_wait_to_complete = false;
hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
@@ -572,14 +609,15 @@ igc_if_attach_pre(if_ctx_t ctx)
* Set the frame limits assuming
* standard ethernet sized frames.
*/
- scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size =
+ scctx->isc_max_frame_size = sc->hw.mac.max_frame_size =
ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE;
/* Allocate multicast array memory. */
- adapter->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN *
+ sc->mta = malloc(sizeof(u8) * ETHER_ADDR_LEN *
MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT);
- if (adapter->mta == NULL) {
- device_printf(dev, "Can not allocate multicast setup array\n");
+ if (sc->mta == NULL) {
+ device_printf(dev,
+ "Can not allocate multicast setup array\n");
error = ENOMEM;
goto err_late;
}
@@ -590,12 +628,12 @@ igc_if_attach_pre(if_ctx_t ctx)
" due to SOL/IDER session.\n");
/* Sysctl for setting Energy Efficient Ethernet */
- adapter->hw.dev_spec._i225.eee_disable = igc_eee_setting;
+ sc->hw.dev_spec._i225.eee_disable = igc_eee_setting;
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, "eee_control",
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
- adapter, 0, igc_sysctl_eee, "I",
+ sc, 0, igc_sysctl_eee, "I",
"Disable Energy Efficient Ethernet");
/*
@@ -623,7 +661,7 @@ igc_if_attach_pre(if_ctx_t ctx)
/* Copy the permanent MAC address out of the EEPROM */
if (igc_read_mac_addr(hw) < 0) {
device_printf(dev, "EEPROM read error while reading MAC"
- " address\n");
+ " address\n");
error = EIO;
goto err_late;
}
@@ -634,6 +672,11 @@ igc_if_attach_pre(if_ctx_t ctx)
goto err_late;
}
+ /* Save the EEPROM/NVM versions */
+ igc_fw_version(sc);
+
+ igc_print_fw_version(sc);
+
/*
* Get Wake-on-Lan and Management info for later use
*/
@@ -641,7 +684,7 @@ igc_if_attach_pre(if_ctx_t ctx)
/* Enable only WOL MAGIC by default */
scctx->isc_capenable &= ~IFCAP_WOL;
- if (adapter->wol != 0)
+ if (sc->wol != 0)
scctx->isc_capenable |= IFCAP_WOL_MAGIC;
iflib_set_mac(ctx, hw->mac.addr);
@@ -649,10 +692,10 @@ igc_if_attach_pre(if_ctx_t ctx)
return (0);
err_late:
- igc_release_hw_control(adapter);
+ igc_release_hw_control(sc);
err_pci:
igc_free_pci_resources(ctx);
- free(adapter->mta, M_DEVBUF);
+ free(sc->mta, M_DEVBUF);
return (error);
}
@@ -660,8 +703,8 @@ err_pci:
static int
igc_if_attach_post(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- struct igc_hw *hw = &adapter->hw;
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ struct igc_hw *hw = &sc->hw;
int error = 0;
/* Setup OS specific network interface */
@@ -673,23 +716,23 @@ igc_if_attach_post(if_ctx_t ctx)
igc_reset(ctx);
/* Initialize statistics */
- igc_update_stats_counters(adapter);
+ igc_update_stats_counters(sc);
hw->mac.get_link_status = true;
igc_if_update_admin_status(ctx);
- igc_add_hw_stats(adapter);
+ igc_add_hw_stats(sc);
/* the driver can now take control from firmware */
- igc_get_hw_control(adapter);
+ igc_get_hw_control(sc);
INIT_DEBUGOUT("igc_if_attach_post: end");
return (error);
err_late:
- igc_release_hw_control(adapter);
+ igc_release_hw_control(sc);
igc_free_pci_resources(ctx);
igc_if_queues_free(ctx);
- free(adapter->mta, M_DEVBUF);
+ free(sc->mta, M_DEVBUF);
return (error);
}
@@ -706,13 +749,13 @@ err_late:
static int
igc_if_detach(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
INIT_DEBUGOUT("igc_if_detach: begin");
- igc_phy_hw_reset(&adapter->hw);
+ igc_phy_hw_reset(&sc->hw);
- igc_release_hw_control(adapter);
+ igc_release_hw_control(sc);
igc_free_pci_resources(ctx);
return (0);
@@ -736,9 +779,9 @@ igc_if_shutdown(if_ctx_t ctx)
static int
igc_if_suspend(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
- igc_release_hw_control(adapter);
+ igc_release_hw_control(sc);
igc_enable_wakeup(ctx);
return (0);
}
@@ -755,10 +798,10 @@ static int
igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
{
int max_frame_size;
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
if_softc_ctx_t scctx = iflib_get_softc_ctx(ctx);
- IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
+ IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)");
/* 9K Jumbo Frame size */
max_frame_size = 9234;
@@ -767,7 +810,7 @@ igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
return (EINVAL);
}
- scctx->isc_max_frame_size = adapter->hw.mac.max_frame_size =
+ scctx->isc_max_frame_size = sc->hw.mac.max_frame_size =
mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
return (0);
}
@@ -784,8 +827,8 @@ igc_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
static void
igc_if_init(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- if_softc_ctx_t scctx = adapter->shared;
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = sc->shared;
if_t ifp = iflib_get_ifp(ctx);
struct igc_tx_queue *tx_que;
int i;
@@ -793,17 +836,18 @@ igc_if_init(if_ctx_t ctx)
INIT_DEBUGOUT("igc_if_init: begin");
/* Get the latest mac address, User can use a LAA */
- bcopy(if_getlladdr(ifp), adapter->hw.mac.addr,
+ bcopy(if_getlladdr(ifp), sc->hw.mac.addr,
ETHER_ADDR_LEN);
/* Put the address into the Receive Address Array */
- igc_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+ igc_rar_set(&sc->hw, sc->hw.mac.addr, 0);
/* Initialize the hardware */
igc_reset(ctx);
igc_if_update_admin_status(ctx);
- for (i = 0, tx_que = adapter->tx_queues; i < adapter->tx_num_queues; i++, tx_que++) {
+ for (i = 0, tx_que = sc->tx_queues; i < sc->tx_num_queues;
+ i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
txr->tx_rs_cidx = txr->tx_rs_pidx;
@@ -817,7 +861,7 @@ igc_if_init(if_ctx_t ctx)
}
/* Setup VLAN support, basic and offload if available */
- IGC_WRITE_REG(&adapter->hw, IGC_VET, ETHERTYPE_VLAN);
+ IGC_WRITE_REG(&sc->hw, IGC_VET, ETHERTYPE_VLAN);
/* Prepare transmit descriptors and buffers */
igc_initialize_transmit_unit(ctx);
@@ -825,7 +869,7 @@ igc_if_init(if_ctx_t ctx)
/* Setup Multicast table */
igc_if_multi_set(ctx);
- adapter->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
+ sc->rx_mbuf_sz = iflib_get_rx_mbuf_sz(ctx);
igc_initialize_receive_unit(ctx);
/* Set up VLAN support */
@@ -833,20 +877,164 @@ igc_if_init(if_ctx_t ctx)
/* Don't lose promiscuous settings */
igc_if_set_promisc(ctx, if_getflags(ifp));
- igc_clear_hw_cntrs_base_generic(&adapter->hw);
+ igc_clear_hw_cntrs_base_generic(&sc->hw);
- if (adapter->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */
- igc_configure_queues(adapter);
+ if (sc->intr_type == IFLIB_INTR_MSIX) /* Set up queue routing */
+ igc_configure_queues(sc);
/* this clears any pending interrupts */
- IGC_READ_REG(&adapter->hw, IGC_ICR);
- IGC_WRITE_REG(&adapter->hw, IGC_ICS, IGC_ICS_LSC);
+ IGC_READ_REG(&sc->hw, IGC_ICR);
+ IGC_WRITE_REG(&sc->hw, IGC_ICS, IGC_ICS_LSC);
/* the driver can now take control from firmware */
- igc_get_hw_control(adapter);
+ igc_get_hw_control(sc);
/* Set Energy Efficient Ethernet */
- igc_set_eee_i225(&adapter->hw, true, true, true);
+ igc_set_eee_i225(&sc->hw, true, true, true);
+}
+
+enum eitr_latency_target {
+ eitr_latency_disabled = 0,
+ eitr_latency_lowest = 1,
+ eitr_latency_low = 2,
+ eitr_latency_bulk = 3
+};
+/*********************************************************************
+ *
+ * Helper to calculate next EITR value for AIM
+ *
+ *********************************************************************/
+static void
+igc_neweitr(struct igc_softc *sc, struct igc_rx_queue *que,
+ struct tx_ring *txr, struct rx_ring *rxr)
+{
+ struct igc_hw *hw = &sc->hw;
+ unsigned long bytes, bytes_per_packet, packets;
+ unsigned long rxbytes, rxpackets, txbytes, txpackets;
+ u32 neweitr;
+ u8 nextlatency;
+
+ rxbytes = atomic_load_long(&rxr->rx_bytes);
+ txbytes = atomic_load_long(&txr->tx_bytes);
+
+ /* Idle, do nothing */
+ if (txbytes == 0 && rxbytes == 0)
+ return;
+
+ neweitr = 0;
+
+ if (sc->enable_aim) {
+ nextlatency = rxr->rx_nextlatency;
+
+ /* Use half default (4K) ITR if sub-gig */
+ if (sc->link_speed < 1000) {
+ neweitr = IGC_INTS_4K;
+ goto igc_set_next_eitr;
+ }
+ /* Want at least enough packet buffer for two frames to AIM */
+ if (sc->shared->isc_max_frame_size * 2 > (sc->pba << 10)) {
+ neweitr = igc_max_interrupt_rate;
+ sc->enable_aim = 0;
+ goto igc_set_next_eitr;
+ }
+
+ bytes = bytes_per_packet = 0;
+ /* Get largest values from the associated tx and rx ring */
+ txpackets = atomic_load_long(&txr->tx_packets);
+ if (txpackets != 0) {
+ bytes = txbytes;
+ bytes_per_packet = txbytes / txpackets;
+ packets = txpackets;
+ }
+ rxpackets = atomic_load_long(&rxr->rx_packets);
+ if (rxpackets != 0) {
+ bytes = lmax(bytes, rxbytes);
+ bytes_per_packet =
+ lmax(bytes_per_packet, rxbytes / rxpackets);
+ packets = lmax(packets, rxpackets);
+ }
+
+ /* Latency state machine */
+ switch (nextlatency) {
+ case eitr_latency_disabled: /* Bootstrapping */
+ nextlatency = eitr_latency_low;
+ break;
+ case eitr_latency_lowest: /* 70k ints/s */
+ /* TSO and jumbo frames */
+ if (bytes_per_packet > 8000)
+ nextlatency = eitr_latency_bulk;
+ else if ((packets < 5) && (bytes > 512))
+ nextlatency = eitr_latency_low;
+ break;
+ case eitr_latency_low: /* 20k ints/s */
+ if (bytes > 10000) {
+ /* Handle TSO */
+ if (bytes_per_packet > 8000)
+ nextlatency = eitr_latency_bulk;
+ else if ((packets < 10) ||
+ (bytes_per_packet > 1200))
+ nextlatency = eitr_latency_bulk;
+ else if (packets > 35)
+ nextlatency = eitr_latency_lowest;
+ } else if (bytes_per_packet > 2000) {
+ nextlatency = eitr_latency_bulk;
+ } else if (packets < 3 && bytes < 512) {
+ nextlatency = eitr_latency_lowest;
+ }
+ break;
+ case eitr_latency_bulk: /* 4k ints/s */
+ if (bytes > 25000) {
+ if (packets > 35)
+ nextlatency = eitr_latency_low;
+ } else if (bytes < 1500)
+ nextlatency = eitr_latency_low;
+ break;
+ default:
+ nextlatency = eitr_latency_low;
+ device_printf(sc->dev,
+ "Unexpected neweitr transition %d\n",
+ nextlatency);
+ break;
+ }
+
+ /* Trim itr_latency_lowest for default AIM setting */
+ if (sc->enable_aim == 1 && nextlatency == eitr_latency_lowest)
+ nextlatency = eitr_latency_low;
+
+ /* Request new latency */
+ rxr->rx_nextlatency = nextlatency;
+ } else {
+ /* We may have toggled to AIM disabled */
+ nextlatency = eitr_latency_disabled;
+ rxr->rx_nextlatency = nextlatency;
+ }
+
+ /* ITR state machine */
+ switch(nextlatency) {
+ case eitr_latency_lowest:
+ neweitr = IGC_INTS_70K;
+ break;
+ case eitr_latency_low:
+ neweitr = IGC_INTS_20K;
+ break;
+ case eitr_latency_bulk:
+ neweitr = IGC_INTS_4K;
+ break;
+ case eitr_latency_disabled:
+ default:
+ neweitr = igc_max_interrupt_rate;
+ break;
+ }
+
+igc_set_next_eitr:
+ neweitr = IGC_INTS_TO_EITR(neweitr);
+
+ neweitr |= IGC_EITR_CNT_IGNR;
+
+ if (neweitr != que->eitr_setting) {
+ que->eitr_setting = neweitr;
+ IGC_WRITE_REG(hw, IGC_EITR(que->msix), que->eitr_setting);
+ }
}
/*********************************************************************
@@ -857,11 +1045,15 @@ igc_if_init(if_ctx_t ctx)
int
igc_intr(void *arg)
{
- struct igc_adapter *adapter = arg;
- if_ctx_t ctx = adapter->ctx;
+ struct igc_softc *sc = arg;
+ struct igc_hw *hw = &sc->hw;
+ struct igc_rx_queue *que = &sc->rx_queues[0];
+ struct tx_ring *txr = &sc->tx_queues[0].txr;
+ struct rx_ring *rxr = &que->rxr;
+ if_ctx_t ctx = sc->ctx;
u32 reg_icr;
- reg_icr = IGC_READ_REG(&adapter->hw, IGC_ICR);
+ reg_icr = IGC_READ_REG(hw, IGC_ICR);
/* Hot eject? */
if (reg_icr == 0xffffffff)
@@ -887,7 +1079,15 @@ igc_intr(void *arg)
igc_handle_link(ctx);
if (reg_icr & IGC_ICR_RXO)
- adapter->rx_overruns++;
+ sc->rx_overruns++;
+
+ igc_neweitr(sc, que, txr, rxr);
+
+ /* Reset state */
+ txr->tx_bytes = 0;
+ txr->tx_packets = 0;
+ rxr->rx_bytes = 0;
+ rxr->rx_packets = 0;
return (FILTER_SCHEDULE_THREAD);
}
@@ -895,20 +1095,20 @@ igc_intr(void *arg)
static int
igc_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- struct igc_rx_queue *rxq = &adapter->rx_queues[rxqid];
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ struct igc_rx_queue *rxq = &sc->rx_queues[rxqid];
- IGC_WRITE_REG(&adapter->hw, IGC_EIMS, rxq->eims);
+ IGC_WRITE_REG(&sc->hw, IGC_EIMS, rxq->eims);
return (0);
}
static int
igc_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- struct igc_tx_queue *txq = &adapter->tx_queues[txqid];
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ struct igc_tx_queue *txq = &sc->tx_queues[txqid];
- IGC_WRITE_REG(&adapter->hw, IGC_EIMS, txq->eims);
+ IGC_WRITE_REG(&sc->hw, IGC_EIMS, txq->eims);
return (0);
}
@@ -921,9 +1121,20 @@ static int
igc_msix_que(void *arg)
{
struct igc_rx_queue *que = arg;
+ struct igc_softc *sc = que->sc;
+ struct tx_ring *txr = &sc->tx_queues[que->msix].txr;
+ struct rx_ring *rxr = &que->rxr;
++que->irqs;
+ igc_neweitr(sc, que, txr, rxr);
+
+ /* Reset state */
+ txr->tx_bytes = 0;
+ txr->tx_packets = 0;
+ rxr->rx_bytes = 0;
+ rxr->rx_packets = 0;
+
return (FILTER_SCHEDULE_THREAD);
}
@@ -935,22 +1146,22 @@ igc_msix_que(void *arg)
static int
igc_msix_link(void *arg)
{
- struct igc_adapter *adapter = arg;
+ struct igc_softc *sc = arg;
u32 reg_icr;
- ++adapter->link_irq;
- MPASS(adapter->hw.back != NULL);
- reg_icr = IGC_READ_REG(&adapter->hw, IGC_ICR);
+ ++sc->link_irq;
+ MPASS(sc->hw.back != NULL);
+ reg_icr = IGC_READ_REG(&sc->hw, IGC_ICR);
if (reg_icr & IGC_ICR_RXO)
- adapter->rx_overruns++;
+ sc->rx_overruns++;
if (reg_icr & (IGC_ICR_RXSEQ | IGC_ICR_LSC)) {
- igc_handle_link(adapter->ctx);
+ igc_handle_link(sc->ctx);
}
- IGC_WRITE_REG(&adapter->hw, IGC_IMS, IGC_IMS_LSC);
- IGC_WRITE_REG(&adapter->hw, IGC_EIMS, adapter->link_mask);
+ IGC_WRITE_REG(&sc->hw, IGC_IMS, IGC_IMS_LSC);
+ IGC_WRITE_REG(&sc->hw, IGC_EIMS, sc->link_mask);
return (FILTER_HANDLED);
}
@@ -959,9 +1170,9 @@ static void
igc_handle_link(void *context)
{
if_ctx_t ctx = context;
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
- adapter->hw.mac.get_link_status = true;
+ sc->hw.mac.get_link_status = true;
iflib_admin_intr_deferred(ctx);
}
@@ -976,7 +1187,7 @@ igc_handle_link(void *context)
static void
igc_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
INIT_DEBUGOUT("igc_if_media_status: begin");
@@ -985,28 +1196,28 @@ igc_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
ifmr->ifm_status = IFM_AVALID;
ifmr->ifm_active = IFM_ETHER;
- if (!adapter->link_active) {
+ if (!sc->link_active) {
return;
}
ifmr->ifm_status |= IFM_ACTIVE;
- switch (adapter->link_speed) {
+ switch (sc->link_speed) {
case 10:
ifmr->ifm_active |= IFM_10_T;
break;
case 100:
ifmr->ifm_active |= IFM_100_TX;
- break;
+ break;
case 1000:
ifmr->ifm_active |= IFM_1000_T;
break;
case 2500:
- ifmr->ifm_active |= IFM_2500_T;
- break;
+ ifmr->ifm_active |= IFM_2500_T;
+ break;
}
- if (adapter->link_duplex == FULL_DUPLEX)
+ if (sc->link_duplex == FULL_DUPLEX)
ifmr->ifm_active |= IFM_FDX;
else
ifmr->ifm_active |= IFM_HDX;
@@ -1023,7 +1234,7 @@ igc_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
static int
igc_if_media_change(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
struct ifmedia *ifm = iflib_get_media(ctx);
INIT_DEBUGOUT("igc_if_media_change: begin");
@@ -1031,32 +1242,32 @@ igc_if_media_change(if_ctx_t ctx)
if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
return (EINVAL);
- adapter->hw.mac.autoneg = DO_AUTO_NEG;
+ sc->hw.mac.autoneg = DO_AUTO_NEG;
switch (IFM_SUBTYPE(ifm->ifm_media)) {
case IFM_AUTO:
- adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+ sc->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+ break;
+ case IFM_2500_T:
+ sc->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
break;
- case IFM_2500_T:
- adapter->hw.phy.autoneg_advertised = ADVERTISE_2500_FULL;
- break;
case IFM_1000_T:
- adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
break;
case IFM_100_TX:
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
- adapter->hw.phy.autoneg_advertised = ADVERTISE_100_FULL;
+ sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL;
else
- adapter->hw.phy.autoneg_advertised = ADVERTISE_100_HALF;
+ sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF;
break;
case IFM_10_T:
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
- adapter->hw.phy.autoneg_advertised = ADVERTISE_10_FULL;
+ sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL;
else
- adapter->hw.phy.autoneg_advertised = ADVERTISE_10_HALF;
+ sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF;
break;
default:
- device_printf(adapter->dev, "Unsupported media type\n");
+ device_printf(sc->dev, "Unsupported media type\n");
}
igc_if_init(ctx);
@@ -1067,12 +1278,12 @@ igc_if_media_change(if_ctx_t ctx)
static int
igc_if_set_promisc(if_ctx_t ctx, int flags)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
u32 reg_rctl;
int mcnt = 0;
- reg_rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL);
+ reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL);
reg_rctl &= ~(IGC_RCTL_SBP | IGC_RCTL_UPE);
if (flags & IFF_ALLMULTI)
mcnt = MAX_NUM_MULTICAST_ADDRESSES;
@@ -1082,18 +1293,18 @@ igc_if_set_promisc(if_ctx_t ctx, int flags)
/* Don't disable if in MAX groups */
if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
reg_rctl &= (~IGC_RCTL_MPE);
- IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl);
+ IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl);
if (flags & IFF_PROMISC) {
reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
/* Turn this on if you want to see bad packets */
if (igc_debug_sbp)
reg_rctl |= IGC_RCTL_SBP;
- IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl);
+ IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl);
} else if (flags & IFF_ALLMULTI) {
reg_rctl |= IGC_RCTL_MPE;
reg_rctl &= ~IGC_RCTL_UPE;
- IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl);
+ IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl);
}
return (0);
}
@@ -1121,20 +1332,20 @@ igc_copy_maddr(void *arg, struct sockaddr_dl *sdl, u_int idx)
static void
igc_if_multi_set(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
- u8 *mta; /* Multicast array memory */
+ u8 *mta; /* Multicast array memory */
u32 reg_rctl = 0;
int mcnt = 0;
IOCTL_DEBUGOUT("igc_set_multi: begin");
- mta = adapter->mta;
+ mta = sc->mta;
bzero(mta, sizeof(u8) * ETHER_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES);
mcnt = if_foreach_llmaddr(ifp, igc_copy_maddr, mta);
- reg_rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL);
+ reg_rctl = IGC_READ_REG(&sc->hw, IGC_RCTL);
if (if_getflags(ifp) & IFF_PROMISC) {
reg_rctl |= (IGC_RCTL_UPE | IGC_RCTL_MPE);
@@ -1142,16 +1353,16 @@ igc_if_multi_set(if_ctx_t ctx)
if (igc_debug_sbp)
reg_rctl |= IGC_RCTL_SBP;
} else if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES ||
- if_getflags(ifp) & IFF_ALLMULTI) {
- reg_rctl |= IGC_RCTL_MPE;
+ if_getflags(ifp) & IFF_ALLMULTI) {
+ reg_rctl |= IGC_RCTL_MPE;
reg_rctl &= ~IGC_RCTL_UPE;
- } else
+ } else
reg_rctl &= ~(IGC_RCTL_UPE | IGC_RCTL_MPE);
if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
- igc_update_mc_addr_list(&adapter->hw, mta, mcnt);
+ igc_update_mc_addr_list(&sc->hw, mta, mcnt);
- IGC_WRITE_REG(&adapter->hw, IGC_RCTL, reg_rctl);
+ IGC_WRITE_REG(&sc->hw, IGC_RCTL, reg_rctl);
}
/*********************************************************************
@@ -1175,8 +1386,8 @@ igc_if_timer(if_ctx_t ctx, uint16_t qid)
static void
igc_if_update_admin_status(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- struct igc_hw *hw = &adapter->hw;
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ struct igc_hw *hw = &sc->hw;
device_t dev = iflib_get_dev(ctx);
u32 link_check, thstat, ctrl;
@@ -1200,36 +1411,36 @@ igc_if_update_admin_status(if_ctx_t ctx)
}
/* Now check for a transition */
- if (link_check && (adapter->link_active == 0)) {
- igc_get_speed_and_duplex(hw, &adapter->link_speed,
- &adapter->link_duplex);
+ if (link_check && (sc->link_active == 0)) {
+ igc_get_speed_and_duplex(hw, &sc->link_speed,
+ &sc->link_duplex);
if (bootverbose)
device_printf(dev, "Link is up %d Mbps %s\n",
- adapter->link_speed,
- ((adapter->link_duplex == FULL_DUPLEX) ?
+ sc->link_speed,
+ ((sc->link_duplex == FULL_DUPLEX) ?
"Full Duplex" : "Half Duplex"));
- adapter->link_active = 1;
+ sc->link_active = 1;
iflib_link_state_change(ctx, LINK_STATE_UP,
- IF_Mbps(adapter->link_speed));
- } else if (!link_check && (adapter->link_active == 1)) {
- adapter->link_speed = 0;
- adapter->link_duplex = 0;
- adapter->link_active = 0;
+ IF_Mbps(sc->link_speed));
+ } else if (!link_check && (sc->link_active == 1)) {
+ sc->link_speed = 0;
+ sc->link_duplex = 0;
+ sc->link_active = 0;
iflib_link_state_change(ctx, LINK_STATE_DOWN, 0);
}
- igc_update_stats_counters(adapter);
+ igc_update_stats_counters(sc);
}
static void
igc_if_watchdog_reset(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
/*
* Just count the event; iflib(4) will already trigger a
* sufficient reset of the controller.
*/
- adapter->watchdog_events++;
+ sc->watchdog_events++;
}
/*********************************************************************
@@ -1241,12 +1452,12 @@ igc_if_watchdog_reset(if_ctx_t ctx)
static void
igc_if_stop(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
INIT_DEBUGOUT("igc_if_stop: begin");
- igc_reset_hw(&adapter->hw);
- IGC_WRITE_REG(&adapter->hw, IGC_WUC, 0);
+ igc_reset_hw(&sc->hw);
+ IGC_WRITE_REG(&sc->hw, IGC_WUC, 0);
}
/*********************************************************************
@@ -1258,22 +1469,22 @@ static void
igc_identify_hardware(if_ctx_t ctx)
{
device_t dev = iflib_get_dev(ctx);
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
/* Make sure our PCI config space has the necessary stuff set */
- adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+ sc->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
/* Save off the information about this board */
- adapter->hw.vendor_id = pci_get_vendor(dev);
- adapter->hw.device_id = pci_get_device(dev);
- adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
- adapter->hw.subsystem_vendor_id =
+ sc->hw.vendor_id = pci_get_vendor(dev);
+ sc->hw.device_id = pci_get_device(dev);
+ sc->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1);
+ sc->hw.subsystem_vendor_id =
pci_read_config(dev, PCIR_SUBVEND_0, 2);
- adapter->hw.subsystem_device_id =
+ sc->hw.subsystem_device_id =
pci_read_config(dev, PCIR_SUBDEV_0, 2);
/* Do Shared Code Init and Setup */
- if (igc_set_mac_type(&adapter->hw)) {
+ if (igc_set_mac_type(&sc->hw)) {
device_printf(dev, "Setup init failure\n");
return;
}
@@ -1282,23 +1493,24 @@ igc_identify_hardware(if_ctx_t ctx)
static int
igc_allocate_pci_resources(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
device_t dev = iflib_get_dev(ctx);
int rid;
rid = PCIR_BAR(0);
- adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ sc->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
- if (adapter->memory == NULL) {
- device_printf(dev, "Unable to allocate bus resource: memory\n");
+ if (sc->memory == NULL) {
+ device_printf(dev,
+ "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
- adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->memory);
- adapter->osdep.mem_bus_space_handle =
- rman_get_bushandle(adapter->memory);
- adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle;
+ sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->memory);
+ sc->osdep.mem_bus_space_handle =
+ rman_get_bushandle(sc->memory);
+ sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
- adapter->hw.back = &adapter->osdep;
+ sc->hw.back = &sc->osdep;
return (0);
}
@@ -1311,20 +1523,23 @@ igc_allocate_pci_resources(if_ctx_t ctx)
static int
igc_if_msix_intr_assign(if_ctx_t ctx, int msix)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- struct igc_rx_queue *rx_que = adapter->rx_queues;
- struct igc_tx_queue *tx_que = adapter->tx_queues;
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ struct igc_rx_queue *rx_que = sc->rx_queues;
+ struct igc_tx_queue *tx_que = sc->tx_queues;
int error, rid, i, vector = 0, rx_vectors;
char buf[16];
/* First set up ring resources */
- for (i = 0; i < adapter->rx_num_queues; i++, rx_que++, vector++) {
+ for (i = 0; i < sc->rx_num_queues; i++, rx_que++, vector++) {
rid = vector + 1;
snprintf(buf, sizeof(buf), "rxq%d", i);
- error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RXTX, igc_msix_que, rx_que, rx_que->me, buf);
+ error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
+ IFLIB_INTR_RXTX, igc_msix_que, rx_que, rx_que->me, buf);
if (error) {
- device_printf(iflib_get_dev(ctx), "Failed to allocate que int %d err: %d", i, error);
- adapter->rx_num_queues = i + 1;
+ device_printf(iflib_get_dev(ctx),
+ "Failed to allocate que int %d err: %d",
+ i, error);
+ sc->rx_num_queues = i + 1;
goto fail;
}
@@ -1341,14 +1556,14 @@ igc_if_msix_intr_assign(if_ctx_t ctx, int msix)
rx_vectors = vector;
vector = 0;
- for (i = 0; i < adapter->tx_num_queues; i++, tx_que++, vector++) {
+ for (i = 0; i < sc->tx_num_queues; i++, tx_que++, vector++) {
snprintf(buf, sizeof(buf), "txq%d", i);
- tx_que = &adapter->tx_queues[i];
+ tx_que = &sc->tx_queues[i];
iflib_softirq_alloc_generic(ctx,
- &adapter->rx_queues[i % adapter->rx_num_queues].que_irq,
+ &sc->rx_queues[i % sc->rx_num_queues].que_irq,
IFLIB_INTR_TX, tx_que, tx_que->me, buf);
- tx_que->msix = (vector % adapter->rx_num_queues);
+ tx_que->msix = (vector % sc->rx_num_queues);
/*
* Set the bit to enable interrupt
@@ -1361,26 +1576,28 @@ igc_if_msix_intr_assign(if_ctx_t ctx, int msix)
/* Link interrupt */
rid = rx_vectors + 1;
- error = iflib_irq_alloc_generic(ctx, &adapter->irq, rid, IFLIB_INTR_ADMIN, igc_msix_link, adapter, 0, "aq");
+ error = iflib_irq_alloc_generic(ctx, &sc->irq, rid, IFLIB_INTR_ADMIN,
+ igc_msix_link, sc, 0, "aq");
if (error) {
- device_printf(iflib_get_dev(ctx), "Failed to register admin handler");
+ device_printf(iflib_get_dev(ctx),
+ "Failed to register admin handler");
goto fail;
}
- adapter->linkvec = rx_vectors;
+ sc->linkvec = rx_vectors;
return (0);
fail:
- iflib_irq_free(ctx, &adapter->irq);
- rx_que = adapter->rx_queues;
- for (int i = 0; i < adapter->rx_num_queues; i++, rx_que++)
+ iflib_irq_free(ctx, &sc->irq);
+ rx_que = sc->rx_queues;
+ for (int i = 0; i < sc->rx_num_queues; i++, rx_que++)
iflib_irq_free(ctx, &rx_que->que_irq);
return (error);
}
static void
-igc_configure_queues(struct igc_adapter *adapter)
+igc_configure_queues(struct igc_softc *sc)
{
- struct igc_hw *hw = &adapter->hw;
+ struct igc_hw *hw = &sc->hw;
struct igc_rx_queue *rx_que;
struct igc_tx_queue *tx_que;
u32 ivar = 0, newitr = 0;
@@ -1392,10 +1609,10 @@ igc_configure_queues(struct igc_adapter *adapter)
/* Turn on MSI-X */
/* RX entries */
- for (int i = 0; i < adapter->rx_num_queues; i++) {
+ for (int i = 0; i < sc->rx_num_queues; i++) {
u32 index = i >> 1;
ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index);
- rx_que = &adapter->rx_queues[i];
+ rx_que = &sc->rx_queues[i];
if (i & 1) {
ivar &= 0xFF00FFFF;
ivar |= (rx_que->msix | IGC_IVAR_VALID) << 16;
@@ -1406,10 +1623,10 @@ igc_configure_queues(struct igc_adapter *adapter)
IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar);
}
/* TX entries */
- for (int i = 0; i < adapter->tx_num_queues; i++) {
+ for (int i = 0; i < sc->tx_num_queues; i++) {
u32 index = i >> 1;
ivar = IGC_READ_REG_ARRAY(hw, IGC_IVAR0, index);
- tx_que = &adapter->tx_queues[i];
+ tx_que = &sc->tx_queues[i];
if (i & 1) {
ivar &= 0x00FFFFFF;
ivar |= (tx_que->msix | IGC_IVAR_VALID) << 24;
@@ -1418,22 +1635,22 @@ igc_configure_queues(struct igc_adapter *adapter)
ivar |= (tx_que->msix | IGC_IVAR_VALID) << 8;
}
IGC_WRITE_REG_ARRAY(hw, IGC_IVAR0, index, ivar);
- adapter->que_mask |= tx_que->eims;
+ sc->que_mask |= tx_que->eims;
}
/* And for the link interrupt */
- ivar = (adapter->linkvec | IGC_IVAR_VALID) << 8;
- adapter->link_mask = 1 << adapter->linkvec;
+ ivar = (sc->linkvec | IGC_IVAR_VALID) << 8;
+ sc->link_mask = 1 << sc->linkvec;
IGC_WRITE_REG(hw, IGC_IVAR_MISC, ivar);
/* Set the starting interrupt rate */
if (igc_max_interrupt_rate > 0)
- newitr = (4000000 / igc_max_interrupt_rate) & 0x7FFC;
+ newitr = IGC_INTS_TO_EITR(igc_max_interrupt_rate);
newitr |= IGC_EITR_CNT_IGNR;
- for (int i = 0; i < adapter->rx_num_queues; i++) {
- rx_que = &adapter->rx_queues[i];
+ for (int i = 0; i < sc->rx_num_queues; i++) {
+ rx_que = &sc->rx_queues[i];
IGC_WRITE_REG(hw, IGC_EITR(rx_que->msix), newitr);
}
@@ -1443,34 +1660,34 @@ igc_configure_queues(struct igc_adapter *adapter)
static void
igc_free_pci_resources(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- struct igc_rx_queue *que = adapter->rx_queues;
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ struct igc_rx_queue *que = sc->rx_queues;
device_t dev = iflib_get_dev(ctx);
/* Release all MSI-X queue resources */
- if (adapter->intr_type == IFLIB_INTR_MSIX)
- iflib_irq_free(ctx, &adapter->irq);
+ if (sc->intr_type == IFLIB_INTR_MSIX)
+ iflib_irq_free(ctx, &sc->irq);
- for (int i = 0; i < adapter->rx_num_queues; i++, que++) {
+ for (int i = 0; i < sc->rx_num_queues; i++, que++) {
iflib_irq_free(ctx, &que->que_irq);
}
- if (adapter->memory != NULL) {
+ if (sc->memory != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY,
- rman_get_rid(adapter->memory), adapter->memory);
- adapter->memory = NULL;
+ rman_get_rid(sc->memory), sc->memory);
+ sc->memory = NULL;
}
- if (adapter->flash != NULL) {
+ if (sc->flash != NULL) {
bus_release_resource(dev, SYS_RES_MEMORY,
- rman_get_rid(adapter->flash), adapter->flash);
- adapter->flash = NULL;
+ rman_get_rid(sc->flash), sc->flash);
+ sc->flash = NULL;
}
- if (adapter->ioport != NULL) {
+ if (sc->ioport != NULL) {
bus_release_resource(dev, SYS_RES_IOPORT,
- rman_get_rid(adapter->ioport), adapter->ioport);
- adapter->ioport = NULL;
+ rman_get_rid(sc->ioport), sc->ioport);
+ sc->ioport = NULL;
}
}
@@ -1487,18 +1704,18 @@ igc_setup_msix(if_ctx_t ctx)
*
**********************************************************************/
static void
-igc_init_dmac(struct igc_adapter *adapter, u32 pba)
+igc_init_dmac(struct igc_softc *sc, u32 pba)
{
- device_t dev = adapter->dev;
- struct igc_hw *hw = &adapter->hw;
- u32 dmac, reg = ~IGC_DMACR_DMAC_EN;
- u16 hwm;
- u16 max_frame_size;
- int status;
+ device_t dev = sc->dev;
+ struct igc_hw *hw = &sc->hw;
+ u32 dmac, reg = ~IGC_DMACR_DMAC_EN;
+ u16 hwm;
+ u16 max_frame_size;
+ int status;
- max_frame_size = adapter->shared->isc_max_frame_size;
+ max_frame_size = sc->shared->isc_max_frame_size;
- if (adapter->dmac == 0) { /* Disabling it */
+ if (sc->dmac == 0) { /* Disabling it */
IGC_WRITE_REG(hw, IGC_DMACR, reg);
return;
} else
@@ -1536,9 +1753,9 @@ igc_init_dmac(struct igc_adapter *adapter, u32 pba)
status = IGC_READ_REG(hw, IGC_STATUS);
if ((status & IGC_STATUS_2P5_SKU) &&
(!(status & IGC_STATUS_2P5_SKU_OVER)))
- reg |= ((adapter->dmac * 5) >> 6);
+ reg |= ((sc->dmac * 5) >> 6);
else
- reg |= (adapter->dmac >> 5);
+ reg |= (sc->dmac >> 5);
IGC_WRITE_REG(hw, IGC_DMACR, reg);
@@ -1574,21 +1791,21 @@ igc_init_dmac(struct igc_adapter *adapter, u32 pba)
/*********************************************************************
*
* Initialize the hardware to a configuration as specified by the
- * adapter structure.
+ * softc structure.
*
**********************************************************************/
static void
igc_reset(if_ctx_t ctx)
{
device_t dev = iflib_get_dev(ctx);
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- struct igc_hw *hw = &adapter->hw;
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ struct igc_hw *hw = &sc->hw;
u32 rx_buffer_size;
u32 pba;
INIT_DEBUGOUT("igc_reset: begin");
/* Let the firmware know the OS is in control */
- igc_get_hw_control(adapter);
+ igc_get_hw_control(sc);
/*
* Packet Buffer Allocation (PBA)
@@ -1604,7 +1821,8 @@ igc_reset(if_ctx_t ctx)
* response (Rx) to Ethernet PAUSE frames.
* - High water mark should allow for at least two frames to be
* received after sending an XOFF.
- * - Low water mark works best when it is very near the high water mark.
+ * - Low water mark works best when it is very near the high water
+ * mark.
* This allows the receiver to restart by sending XON when it has
* drained a bit. Here we use an arbitrary value of 1500 which will
* restart after one full frame is pulled from the buffer. There
@@ -1615,12 +1833,12 @@ igc_reset(if_ctx_t ctx)
*/
rx_buffer_size = (pba & 0xffff) << 10;
hw->fc.high_water = rx_buffer_size -
- roundup2(adapter->hw.mac.max_frame_size, 1024);
+ roundup2(sc->hw.mac.max_frame_size, 1024);
/* 16-byte granularity */
hw->fc.low_water = hw->fc.high_water - 16;
- if (adapter->fc) /* locally set flow control value? */
- hw->fc.requested_mode = adapter->fc;
+ if (sc->fc) /* locally set flow control value? */
+ hw->fc.requested_mode = sc->fc;
else
hw->fc.requested_mode = igc_fc_full;
@@ -1639,7 +1857,10 @@ igc_reset(if_ctx_t ctx)
}
/* Setup DMA Coalescing */
- igc_init_dmac(adapter, pba);
+ igc_init_dmac(sc, pba);
+
+ /* Save the final PBA off if it needs to be used elsewhere i.e. AIM */
+ sc->pba = pba;
IGC_WRITE_REG(hw, IGC_VET, ETHERTYPE_VLAN);
igc_get_phy_info(hw);
@@ -1653,9 +1874,9 @@ igc_reset(if_ctx_t ctx)
#define RSSKEYLEN 10
static void
-igc_initialize_rss_mapping(struct igc_adapter *adapter)
+igc_initialize_rss_mapping(struct igc_softc *sc)
{
- struct igc_hw *hw = &adapter->hw;
+ struct igc_hw *hw = &sc->hw;
int i;
int queue_id;
u32 reta;
@@ -1692,9 +1913,9 @@ igc_initialize_rss_mapping(struct igc_adapter *adapter)
* the case so we don't go out of bounds
* indexing arrays and such.
*/
- queue_id = queue_id % adapter->rx_num_queues;
+ queue_id = queue_id % sc->rx_num_queues;
#else
- queue_id = (i % adapter->rx_num_queues);
+ queue_id = (i % sc->rx_num_queues);
#endif
/* Adjust if required */
queue_id = queue_id << shift;
@@ -1752,13 +1973,13 @@ static int
igc_setup_interface(if_ctx_t ctx)
{
if_t ifp = iflib_get_ifp(ctx);
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- if_softc_ctx_t scctx = adapter->shared;
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = sc->shared;
INIT_DEBUGOUT("igc_setup_interface: begin");
/* Single Queue */
- if (adapter->tx_num_queues == 1) {
+ if (sc->tx_num_queues == 1) {
if_setsendqlen(ifp, scctx->isc_ntxd[0] - 1);
if_setsendqready(ifp);
}
@@ -1767,62 +1988,66 @@ igc_setup_interface(if_ctx_t ctx)
* Specify the media types supported by this adapter and register
* callbacks to update media and link information
*/
- ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T, 0, NULL);
- ifmedia_add(adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
- ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL);
- ifmedia_add(adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
- ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
- ifmedia_add(adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL);
- ifmedia_add(adapter->media, IFM_ETHER | IFM_2500_T, 0, NULL);
-
- ifmedia_add(adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL);
- ifmedia_set(adapter->media, IFM_ETHER | IFM_AUTO);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_10_T, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
+
+ ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO);
return (0);
}
static int
-igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets)
+igc_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int ntxqs, int ntxqsets)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- if_softc_ctx_t scctx = adapter->shared;
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = sc->shared;
int error = IGC_SUCCESS;
struct igc_tx_queue *que;
int i, j;
- MPASS(adapter->tx_num_queues > 0);
- MPASS(adapter->tx_num_queues == ntxqsets);
+ MPASS(sc->tx_num_queues > 0);
+ MPASS(sc->tx_num_queues == ntxqsets);
/* First allocate the top level queue structs */
- if (!(adapter->tx_queues =
+ if (!(sc->tx_queues =
(struct igc_tx_queue *) malloc(sizeof(struct igc_tx_queue) *
- adapter->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
+ sc->tx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(iflib_get_dev(ctx),
+ "Unable to allocate queue memory\n");
return(ENOMEM);
}
- for (i = 0, que = adapter->tx_queues; i < adapter->tx_num_queues; i++, que++) {
+ for (i = 0, que = sc->tx_queues; i < sc->tx_num_queues; i++, que++) {
/* Set up some basics */
struct tx_ring *txr = &que->txr;
- txr->adapter = que->adapter = adapter;
+ txr->sc = que->sc = sc;
que->me = txr->me = i;
/* Allocate report status array */
- if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(iflib_get_dev(ctx), "failed to allocate rs_idxs memory\n");
+ if (!(txr->tx_rsq = (qidx_t *) malloc(sizeof(qidx_t) *
+ scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(iflib_get_dev(ctx),
+ "failed to allocate rs_idxs memory\n");
error = ENOMEM;
goto fail;
}
for (j = 0; j < scctx->isc_ntxd[0]; j++)
txr->tx_rsq[j] = QIDX_INVALID;
- /* get the virtual and physical address of the hardware queues */
+ /* get virtual and physical address of the hardware queues */
txr->tx_base = (struct igc_tx_desc *)vaddrs[i*ntxqs];
txr->tx_paddr = paddrs[i*ntxqs];
}
if (bootverbose)
device_printf(iflib_get_dev(ctx),
- "allocated for %d tx_queues\n", adapter->tx_num_queues);
+ "allocated for %d tx_queues\n", sc->tx_num_queues);
return (0);
fail:
igc_if_queues_free(ctx);
@@ -1830,40 +2055,42 @@ fail:
}
static int
-igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets)
+igc_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
+ int nrxqs, int nrxqsets)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
int error = IGC_SUCCESS;
struct igc_rx_queue *que;
int i;
- MPASS(adapter->rx_num_queues > 0);
- MPASS(adapter->rx_num_queues == nrxqsets);
+ MPASS(sc->rx_num_queues > 0);
+ MPASS(sc->rx_num_queues == nrxqsets);
/* First allocate the top level queue structs */
- if (!(adapter->rx_queues =
+ if (!(sc->rx_queues =
(struct igc_rx_queue *) malloc(sizeof(struct igc_rx_queue) *
- adapter->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(iflib_get_dev(ctx), "Unable to allocate queue memory\n");
+ sc->rx_num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(iflib_get_dev(ctx),
+ "Unable to allocate queue memory\n");
error = ENOMEM;
goto fail;
}
- for (i = 0, que = adapter->rx_queues; i < nrxqsets; i++, que++) {
+ for (i = 0, que = sc->rx_queues; i < nrxqsets; i++, que++) {
/* Set up some basics */
struct rx_ring *rxr = &que->rxr;
- rxr->adapter = que->adapter = adapter;
+ rxr->sc = que->sc = sc;
rxr->que = que;
que->me = rxr->me = i;
- /* get the virtual and physical address of the hardware queues */
+ /* get virtual and physical address of the hardware queues */
rxr->rx_base = (union igc_rx_desc_extended *)vaddrs[i*nrxqs];
rxr->rx_paddr = paddrs[i*nrxqs];
}
if (bootverbose)
device_printf(iflib_get_dev(ctx),
- "allocated for %d rx_queues\n", adapter->rx_num_queues);
+ "allocated for %d rx_queues\n", sc->rx_num_queues);
return (0);
fail:
@@ -1874,12 +2101,12 @@ fail:
static void
igc_if_queues_free(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- struct igc_tx_queue *tx_que = adapter->tx_queues;
- struct igc_rx_queue *rx_que = adapter->rx_queues;
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ struct igc_tx_queue *tx_que = sc->tx_queues;
+ struct igc_rx_queue *rx_que = sc->rx_queues;
if (tx_que != NULL) {
- for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
+ for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
if (txr->tx_rsq == NULL)
break;
@@ -1887,19 +2114,17 @@ igc_if_queues_free(if_ctx_t ctx)
free(txr->tx_rsq, M_DEVBUF);
txr->tx_rsq = NULL;
}
- free(adapter->tx_queues, M_DEVBUF);
- adapter->tx_queues = NULL;
+ free(sc->tx_queues, M_DEVBUF);
+ sc->tx_queues = NULL;
}
if (rx_que != NULL) {
- free(adapter->rx_queues, M_DEVBUF);
- adapter->rx_queues = NULL;
+ free(sc->rx_queues, M_DEVBUF);
+ sc->rx_queues = NULL;
}
- igc_release_hw_control(adapter);
-
- if (adapter->mta != NULL) {
- free(adapter->mta, M_DEVBUF);
+ if (sc->mta != NULL) {
+ free(sc->mta, M_DEVBUF);
}
}
@@ -1911,20 +2136,20 @@ igc_if_queues_free(if_ctx_t ctx)
static void
igc_initialize_transmit_unit(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- if_softc_ctx_t scctx = adapter->shared;
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = sc->shared;
struct igc_tx_queue *que;
struct tx_ring *txr;
- struct igc_hw *hw = &adapter->hw;
+ struct igc_hw *hw = &sc->hw;
u32 tctl, txdctl = 0;
INIT_DEBUGOUT("igc_initialize_transmit_unit: begin");
- for (int i = 0; i < adapter->tx_num_queues; i++, txr++) {
+ for (int i = 0; i < sc->tx_num_queues; i++, txr++) {
u64 bus_addr;
caddr_t offp, endp;
- que = &adapter->tx_queues[i];
+ que = &sc->tx_queues[i];
txr = &que->txr;
bus_addr = txr->tx_paddr;
@@ -1945,8 +2170,8 @@ igc_initialize_transmit_unit(if_ctx_t ctx)
IGC_WRITE_REG(hw, IGC_TDH(i), 0);
HW_DEBUGOUT2("Base = %x, Length = %x\n",
- IGC_READ_REG(&adapter->hw, IGC_TDBAL(i)),
- IGC_READ_REG(&adapter->hw, IGC_TDLEN(i)));
+ IGC_READ_REG(&sc->hw, IGC_TDBAL(i)),
+ IGC_READ_REG(&sc->hw, IGC_TDLEN(i)));
txdctl = 0; /* clear txdctl */
txdctl |= 0x1f; /* PTHRESH */
@@ -1960,13 +2185,13 @@ igc_initialize_transmit_unit(if_ctx_t ctx)
}
/* Program the Transmit Control Register */
- tctl = IGC_READ_REG(&adapter->hw, IGC_TCTL);
+ tctl = IGC_READ_REG(&sc->hw, IGC_TCTL);
tctl &= ~IGC_TCTL_CT;
tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN |
- (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT));
+ (IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT));
/* This write will effectively turn on the transmit unit. */
- IGC_WRITE_REG(&adapter->hw, IGC_TCTL, tctl);
+ IGC_WRITE_REG(&sc->hw, IGC_TCTL, tctl);
}
/*********************************************************************
@@ -1979,10 +2204,10 @@ igc_initialize_transmit_unit(if_ctx_t ctx)
static void
igc_initialize_receive_unit(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- if_softc_ctx_t scctx = adapter->shared;
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = sc->shared;
if_t ifp = iflib_get_ifp(ctx);
- struct igc_hw *hw = &adapter->hw;
+ struct igc_hw *hw = &sc->hw;
struct igc_rx_queue *que;
int i;
u32 psize, rctl, rxcsum, srrctl = 0;
@@ -2015,40 +2240,34 @@ igc_initialize_receive_unit(if_ctx_t ctx)
if (!igc_disable_crc_stripping)
rctl |= IGC_RCTL_SECRC;
- /*
- * Set the interrupt throttling rate. Value is calculated
- * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns)
- */
- IGC_WRITE_REG(hw, IGC_ITR, DEFAULT_ITR);
-
rxcsum = IGC_READ_REG(hw, IGC_RXCSUM);
if (if_getcapenable(ifp) & IFCAP_RXCSUM) {
rxcsum |= IGC_RXCSUM_CRCOFL;
- if (adapter->tx_num_queues > 1)
+ if (sc->tx_num_queues > 1)
rxcsum |= IGC_RXCSUM_PCSD;
else
rxcsum |= IGC_RXCSUM_IPPCSE;
} else {
- if (adapter->tx_num_queues > 1)
+ if (sc->tx_num_queues > 1)
rxcsum |= IGC_RXCSUM_PCSD;
else
rxcsum &= ~IGC_RXCSUM_TUOFL;
}
IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
- if (adapter->rx_num_queues > 1)
- igc_initialize_rss_mapping(adapter);
+ if (sc->rx_num_queues > 1)
+ igc_initialize_rss_mapping(sc);
if (if_getmtu(ifp) > ETHERMTU) {
psize = scctx->isc_max_frame_size;
/* are we on a vlan? */
if (if_vlantrunkinuse(ifp))
psize += VLAN_TAG_SIZE;
- IGC_WRITE_REG(&adapter->hw, IGC_RLPML, psize);
+ IGC_WRITE_REG(&sc->hw, IGC_RLPML, psize);
}
/* Set maximum packet buffer len */
- srrctl |= (adapter->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
+ srrctl |= (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
IGC_SRRCTL_BSIZEPKT_SHIFT;
/* srrctl above overrides this but set the register to a sane value */
rctl |= IGC_RCTL_SZ_2048;
@@ -2059,14 +2278,14 @@ igc_initialize_receive_unit(if_ctx_t ctx)
*
* This drops frames rather than hanging the RX MAC for all queues.
*/
- if ((adapter->rx_num_queues > 1) &&
- (adapter->fc == igc_fc_none ||
- adapter->fc == igc_fc_rx_pause)) {
+ if ((sc->rx_num_queues > 1) &&
+ (sc->fc == igc_fc_none ||
+ sc->fc == igc_fc_rx_pause)) {
srrctl |= IGC_SRRCTL_DROP_EN;
}
/* Setup the Base and Length of the Rx Descriptor Rings */
- for (i = 0, que = adapter->rx_queues; i < adapter->rx_num_queues; i++, que++) {
+ for (i = 0, que = sc->rx_queues; i < sc->rx_num_queues; i++, que++) {
struct rx_ring *rxr = &que->rxr;
u64 bus_addr = rxr->rx_paddr;
u32 rxdctl;
@@ -2079,11 +2298,9 @@ igc_initialize_receive_unit(if_ctx_t ctx)
#endif
IGC_WRITE_REG(hw, IGC_RDLEN(i),
- scctx->isc_nrxd[0] * sizeof(struct igc_rx_desc));
- IGC_WRITE_REG(hw, IGC_RDBAH(i),
- (uint32_t)(bus_addr >> 32));
- IGC_WRITE_REG(hw, IGC_RDBAL(i),
- (uint32_t)bus_addr);
+ scctx->isc_nrxd[0] * sizeof(struct igc_rx_desc));
+ IGC_WRITE_REG(hw, IGC_RDBAH(i), (uint32_t)(bus_addr >> 32));
+ IGC_WRITE_REG(hw, IGC_RDBAL(i), (uint32_t)bus_addr);
IGC_WRITE_REG(hw, IGC_SRRCTL(i), srrctl);
/* Setup the Head and Tail Descriptor Pointers */
IGC_WRITE_REG(hw, IGC_RDH(i), 0);
@@ -2110,8 +2327,8 @@ igc_initialize_receive_unit(if_ctx_t ctx)
static void
igc_setup_vlan_hw_support(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- struct igc_hw *hw = &adapter->hw;
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ struct igc_hw *hw = &sc->hw;
struct ifnet *ifp = iflib_get_ifp(ctx);
u32 reg;
@@ -2132,12 +2349,12 @@ igc_setup_vlan_hw_support(if_ctx_t ctx)
static void
igc_if_intr_enable(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- struct igc_hw *hw = &adapter->hw;
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ struct igc_hw *hw = &sc->hw;
u32 mask;
- if (__predict_true(adapter->intr_type == IFLIB_INTR_MSIX)) {
- mask = (adapter->que_mask | adapter->link_mask);
+ if (__predict_true(sc->intr_type == IFLIB_INTR_MSIX)) {
+ mask = (sc->que_mask | sc->link_mask);
IGC_WRITE_REG(hw, IGC_EIAC, mask);
IGC_WRITE_REG(hw, IGC_EIAM, mask);
IGC_WRITE_REG(hw, IGC_EIMS, mask);
@@ -2150,10 +2367,10 @@ igc_if_intr_enable(if_ctx_t ctx)
static void
igc_if_intr_disable(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
- struct igc_hw *hw = &adapter->hw;
+ struct igc_softc *sc = iflib_get_softc(ctx);
+ struct igc_hw *hw = &sc->hw;
- if (__predict_true(adapter->intr_type == IFLIB_INTR_MSIX)) {
+ if (__predict_true(sc->intr_type == IFLIB_INTR_MSIX)) {
IGC_WRITE_REG(hw, IGC_EIMC, 0xffffffff);
IGC_WRITE_REG(hw, IGC_EIAC, 0);
}
@@ -2168,15 +2385,15 @@ igc_if_intr_disable(if_ctx_t ctx)
* this means that the network i/f is open.
*/
static void
-igc_get_hw_control(struct igc_adapter *adapter)
+igc_get_hw_control(struct igc_softc *sc)
{
u32 ctrl_ext;
- if (adapter->vf_ifp)
+ if (sc->vf_ifp)
return;
- ctrl_ext = IGC_READ_REG(&adapter->hw, IGC_CTRL_EXT);
- IGC_WRITE_REG(&adapter->hw, IGC_CTRL_EXT,
+ ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT);
+ IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT,
ctrl_ext | IGC_CTRL_EXT_DRV_LOAD);
}
@@ -2187,12 +2404,12 @@ igc_get_hw_control(struct igc_adapter *adapter)
* f/w this means that the network i/f is closed.
*/
static void
-igc_release_hw_control(struct igc_adapter *adapter)
+igc_release_hw_control(struct igc_softc *sc)
{
u32 ctrl_ext;
- ctrl_ext = IGC_READ_REG(&adapter->hw, IGC_CTRL_EXT);
- IGC_WRITE_REG(&adapter->hw, IGC_CTRL_EXT,
+ ctrl_ext = IGC_READ_REG(&sc->hw, IGC_CTRL_EXT);
+ IGC_WRITE_REG(&sc->hw, IGC_CTRL_EXT,
ctrl_ext & ~IGC_CTRL_EXT_DRV_LOAD);
return;
}
@@ -2217,14 +2434,14 @@ igc_is_valid_ether_addr(u8 *addr)
static void
igc_get_wakeup(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
u16 eeprom_data = 0, apme_mask;
apme_mask = IGC_WUC_APME;
- eeprom_data = IGC_READ_REG(&adapter->hw, IGC_WUC);
+ eeprom_data = IGC_READ_REG(&sc->hw, IGC_WUC);
if (eeprom_data & apme_mask)
- adapter->wol = IGC_WUFC_LNKC;
+ sc->wol = IGC_WUFC_LNKC;
}
@@ -2234,14 +2451,13 @@ igc_get_wakeup(if_ctx_t ctx)
static void
igc_enable_wakeup(if_ctx_t ctx)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
device_t dev = iflib_get_dev(ctx);
if_t ifp = iflib_get_ifp(ctx);
int error = 0;
- u32 pmc, ctrl, rctl;
- u16 status;
+ u32 ctrl, rctl;
- if (pci_find_cap(dev, PCIY_PMG, &pmc) != 0)
+ if (!pci_has_pm(dev))
return;
/*
@@ -2249,37 +2465,34 @@ igc_enable_wakeup(if_ctx_t ctx)
* is set with all bits on by default.
*/
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) == 0)
- adapter->wol &= ~IGC_WUFC_MAG;
+ sc->wol &= ~IGC_WUFC_MAG;
if ((if_getcapenable(ifp) & IFCAP_WOL_UCAST) == 0)
- adapter->wol &= ~IGC_WUFC_EX;
+ sc->wol &= ~IGC_WUFC_EX;
if ((if_getcapenable(ifp) & IFCAP_WOL_MCAST) == 0)
- adapter->wol &= ~IGC_WUFC_MC;
+ sc->wol &= ~IGC_WUFC_MC;
else {
- rctl = IGC_READ_REG(&adapter->hw, IGC_RCTL);
+ rctl = IGC_READ_REG(&sc->hw, IGC_RCTL);
rctl |= IGC_RCTL_MPE;
- IGC_WRITE_REG(&adapter->hw, IGC_RCTL, rctl);
+ IGC_WRITE_REG(&sc->hw, IGC_RCTL, rctl);
}
- if (!(adapter->wol & (IGC_WUFC_EX | IGC_WUFC_MAG | IGC_WUFC_MC)))
+ if (!(sc->wol & (IGC_WUFC_EX | IGC_WUFC_MAG | IGC_WUFC_MC)))
goto pme;
/* Advertise the wakeup capability */
- ctrl = IGC_READ_REG(&adapter->hw, IGC_CTRL);
+ ctrl = IGC_READ_REG(&sc->hw, IGC_CTRL);
ctrl |= IGC_CTRL_ADVD3WUC;
- IGC_WRITE_REG(&adapter->hw, IGC_CTRL, ctrl);
+ IGC_WRITE_REG(&sc->hw, IGC_CTRL, ctrl);
/* Enable wakeup by the MAC */
- IGC_WRITE_REG(&adapter->hw, IGC_WUC, IGC_WUC_PME_EN);
- IGC_WRITE_REG(&adapter->hw, IGC_WUFC, adapter->wol);
+ IGC_WRITE_REG(&sc->hw, IGC_WUC, IGC_WUC_PME_EN);
+ IGC_WRITE_REG(&sc->hw, IGC_WUFC, sc->wol);
pme:
- status = pci_read_config(dev, pmc + PCIR_POWER_STATUS, 2);
- status &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if (!error && (if_getcapenable(ifp) & IFCAP_WOL))
- status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(dev, pmc + PCIR_POWER_STATUS, status, 2);
+ pci_enable_pme(dev);
return;
}
@@ -2290,100 +2503,104 @@ pme:
*
**********************************************************************/
static void
-igc_update_stats_counters(struct igc_adapter *adapter)
-{
- u64 prev_xoffrxc = adapter->stats.xoffrxc;
-
- adapter->stats.crcerrs += IGC_READ_REG(&adapter->hw, IGC_CRCERRS);
- adapter->stats.mpc += IGC_READ_REG(&adapter->hw, IGC_MPC);
- adapter->stats.scc += IGC_READ_REG(&adapter->hw, IGC_SCC);
- adapter->stats.ecol += IGC_READ_REG(&adapter->hw, IGC_ECOL);
-
- adapter->stats.mcc += IGC_READ_REG(&adapter->hw, IGC_MCC);
- adapter->stats.latecol += IGC_READ_REG(&adapter->hw, IGC_LATECOL);
- adapter->stats.colc += IGC_READ_REG(&adapter->hw, IGC_COLC);
- adapter->stats.colc += IGC_READ_REG(&adapter->hw, IGC_RERC);
- adapter->stats.dc += IGC_READ_REG(&adapter->hw, IGC_DC);
- adapter->stats.rlec += IGC_READ_REG(&adapter->hw, IGC_RLEC);
- adapter->stats.xonrxc += IGC_READ_REG(&adapter->hw, IGC_XONRXC);
- adapter->stats.xontxc += IGC_READ_REG(&adapter->hw, IGC_XONTXC);
- adapter->stats.xoffrxc += IGC_READ_REG(&adapter->hw, IGC_XOFFRXC);
+igc_update_stats_counters(struct igc_softc *sc)
+{
+ u64 prev_xoffrxc = sc->stats.xoffrxc;
+
+ sc->stats.crcerrs += IGC_READ_REG(&sc->hw, IGC_CRCERRS);
+ sc->stats.mpc += IGC_READ_REG(&sc->hw, IGC_MPC);
+ sc->stats.scc += IGC_READ_REG(&sc->hw, IGC_SCC);
+ sc->stats.ecol += IGC_READ_REG(&sc->hw, IGC_ECOL);
+
+ sc->stats.mcc += IGC_READ_REG(&sc->hw, IGC_MCC);
+ sc->stats.latecol += IGC_READ_REG(&sc->hw, IGC_LATECOL);
+ sc->stats.colc += IGC_READ_REG(&sc->hw, IGC_COLC);
+ sc->stats.colc += IGC_READ_REG(&sc->hw, IGC_RERC);
+ sc->stats.dc += IGC_READ_REG(&sc->hw, IGC_DC);
+ sc->stats.rlec += IGC_READ_REG(&sc->hw, IGC_RLEC);
+ sc->stats.xonrxc += IGC_READ_REG(&sc->hw, IGC_XONRXC);
+ sc->stats.xontxc += IGC_READ_REG(&sc->hw, IGC_XONTXC);
+ sc->stats.xoffrxc += IGC_READ_REG(&sc->hw, IGC_XOFFRXC);
/*
* For watchdog management we need to know if we have been
* paused during the last interval, so capture that here.
*/
- if (adapter->stats.xoffrxc != prev_xoffrxc)
- adapter->shared->isc_pause_frames = 1;
- adapter->stats.xofftxc += IGC_READ_REG(&adapter->hw, IGC_XOFFTXC);
- adapter->stats.fcruc += IGC_READ_REG(&adapter->hw, IGC_FCRUC);
- adapter->stats.prc64 += IGC_READ_REG(&adapter->hw, IGC_PRC64);
- adapter->stats.prc127 += IGC_READ_REG(&adapter->hw, IGC_PRC127);
- adapter->stats.prc255 += IGC_READ_REG(&adapter->hw, IGC_PRC255);
- adapter->stats.prc511 += IGC_READ_REG(&adapter->hw, IGC_PRC511);
- adapter->stats.prc1023 += IGC_READ_REG(&adapter->hw, IGC_PRC1023);
- adapter->stats.prc1522 += IGC_READ_REG(&adapter->hw, IGC_PRC1522);
- adapter->stats.tlpic += IGC_READ_REG(&adapter->hw, IGC_TLPIC);
- adapter->stats.rlpic += IGC_READ_REG(&adapter->hw, IGC_RLPIC);
- adapter->stats.gprc += IGC_READ_REG(&adapter->hw, IGC_GPRC);
- adapter->stats.bprc += IGC_READ_REG(&adapter->hw, IGC_BPRC);
- adapter->stats.mprc += IGC_READ_REG(&adapter->hw, IGC_MPRC);
- adapter->stats.gptc += IGC_READ_REG(&adapter->hw, IGC_GPTC);
+ if (sc->stats.xoffrxc != prev_xoffrxc)
+ sc->shared->isc_pause_frames = 1;
+ sc->stats.xofftxc += IGC_READ_REG(&sc->hw, IGC_XOFFTXC);
+ sc->stats.fcruc += IGC_READ_REG(&sc->hw, IGC_FCRUC);
+ sc->stats.prc64 += IGC_READ_REG(&sc->hw, IGC_PRC64);
+ sc->stats.prc127 += IGC_READ_REG(&sc->hw, IGC_PRC127);
+ sc->stats.prc255 += IGC_READ_REG(&sc->hw, IGC_PRC255);
+ sc->stats.prc511 += IGC_READ_REG(&sc->hw, IGC_PRC511);
+ sc->stats.prc1023 += IGC_READ_REG(&sc->hw, IGC_PRC1023);
+ sc->stats.prc1522 += IGC_READ_REG(&sc->hw, IGC_PRC1522);
+ sc->stats.tlpic += IGC_READ_REG(&sc->hw, IGC_TLPIC);
+ sc->stats.rlpic += IGC_READ_REG(&sc->hw, IGC_RLPIC);
+ sc->stats.gprc += IGC_READ_REG(&sc->hw, IGC_GPRC);
+ sc->stats.bprc += IGC_READ_REG(&sc->hw, IGC_BPRC);
+ sc->stats.mprc += IGC_READ_REG(&sc->hw, IGC_MPRC);
+ sc->stats.gptc += IGC_READ_REG(&sc->hw, IGC_GPTC);
/* For the 64-bit byte counters the low dword must be read first. */
/* Both registers clear on the read of the high dword */
- adapter->stats.gorc += IGC_READ_REG(&adapter->hw, IGC_GORCL) +
- ((u64)IGC_READ_REG(&adapter->hw, IGC_GORCH) << 32);
- adapter->stats.gotc += IGC_READ_REG(&adapter->hw, IGC_GOTCL) +
- ((u64)IGC_READ_REG(&adapter->hw, IGC_GOTCH) << 32);
-
- adapter->stats.rnbc += IGC_READ_REG(&adapter->hw, IGC_RNBC);
- adapter->stats.ruc += IGC_READ_REG(&adapter->hw, IGC_RUC);
- adapter->stats.rfc += IGC_READ_REG(&adapter->hw, IGC_RFC);
- adapter->stats.roc += IGC_READ_REG(&adapter->hw, IGC_ROC);
- adapter->stats.rjc += IGC_READ_REG(&adapter->hw, IGC_RJC);
-
- adapter->stats.tor += IGC_READ_REG(&adapter->hw, IGC_TORH);
- adapter->stats.tot += IGC_READ_REG(&adapter->hw, IGC_TOTH);
-
- adapter->stats.tpr += IGC_READ_REG(&adapter->hw, IGC_TPR);
- adapter->stats.tpt += IGC_READ_REG(&adapter->hw, IGC_TPT);
- adapter->stats.ptc64 += IGC_READ_REG(&adapter->hw, IGC_PTC64);
- adapter->stats.ptc127 += IGC_READ_REG(&adapter->hw, IGC_PTC127);
- adapter->stats.ptc255 += IGC_READ_REG(&adapter->hw, IGC_PTC255);
- adapter->stats.ptc511 += IGC_READ_REG(&adapter->hw, IGC_PTC511);
- adapter->stats.ptc1023 += IGC_READ_REG(&adapter->hw, IGC_PTC1023);
- adapter->stats.ptc1522 += IGC_READ_REG(&adapter->hw, IGC_PTC1522);
- adapter->stats.mptc += IGC_READ_REG(&adapter->hw, IGC_MPTC);
- adapter->stats.bptc += IGC_READ_REG(&adapter->hw, IGC_BPTC);
+ sc->stats.gorc += IGC_READ_REG(&sc->hw, IGC_GORCL) +
+ ((u64)IGC_READ_REG(&sc->hw, IGC_GORCH) << 32);
+ sc->stats.gotc += IGC_READ_REG(&sc->hw, IGC_GOTCL) +
+ ((u64)IGC_READ_REG(&sc->hw, IGC_GOTCH) << 32);
+
+ sc->stats.rnbc += IGC_READ_REG(&sc->hw, IGC_RNBC);
+ sc->stats.ruc += IGC_READ_REG(&sc->hw, IGC_RUC);
+ sc->stats.rfc += IGC_READ_REG(&sc->hw, IGC_RFC);
+ sc->stats.roc += IGC_READ_REG(&sc->hw, IGC_ROC);
+ sc->stats.rjc += IGC_READ_REG(&sc->hw, IGC_RJC);
+
+ sc->stats.mgprc += IGC_READ_REG(&sc->hw, IGC_MGTPRC);
+ sc->stats.mgpdc += IGC_READ_REG(&sc->hw, IGC_MGTPDC);
+ sc->stats.mgptc += IGC_READ_REG(&sc->hw, IGC_MGTPTC);
+
+ sc->stats.tor += IGC_READ_REG(&sc->hw, IGC_TORH);
+ sc->stats.tot += IGC_READ_REG(&sc->hw, IGC_TOTH);
+
+ sc->stats.tpr += IGC_READ_REG(&sc->hw, IGC_TPR);
+ sc->stats.tpt += IGC_READ_REG(&sc->hw, IGC_TPT);
+ sc->stats.ptc64 += IGC_READ_REG(&sc->hw, IGC_PTC64);
+ sc->stats.ptc127 += IGC_READ_REG(&sc->hw, IGC_PTC127);
+ sc->stats.ptc255 += IGC_READ_REG(&sc->hw, IGC_PTC255);
+ sc->stats.ptc511 += IGC_READ_REG(&sc->hw, IGC_PTC511);
+ sc->stats.ptc1023 += IGC_READ_REG(&sc->hw, IGC_PTC1023);
+ sc->stats.ptc1522 += IGC_READ_REG(&sc->hw, IGC_PTC1522);
+ sc->stats.mptc += IGC_READ_REG(&sc->hw, IGC_MPTC);
+ sc->stats.bptc += IGC_READ_REG(&sc->hw, IGC_BPTC);
/* Interrupt Counts */
- adapter->stats.iac += IGC_READ_REG(&adapter->hw, IGC_IAC);
- adapter->stats.rxdmtc += IGC_READ_REG(&adapter->hw, IGC_RXDMTC);
+ sc->stats.iac += IGC_READ_REG(&sc->hw, IGC_IAC);
+ sc->stats.rxdmtc += IGC_READ_REG(&sc->hw, IGC_RXDMTC);
- adapter->stats.algnerrc += IGC_READ_REG(&adapter->hw, IGC_ALGNERRC);
- adapter->stats.tncrs += IGC_READ_REG(&adapter->hw, IGC_TNCRS);
- adapter->stats.htdpmc += IGC_READ_REG(&adapter->hw, IGC_HTDPMC);
- adapter->stats.tsctc += IGC_READ_REG(&adapter->hw, IGC_TSCTC);
+ sc->stats.algnerrc += IGC_READ_REG(&sc->hw, IGC_ALGNERRC);
+ sc->stats.tncrs += IGC_READ_REG(&sc->hw, IGC_TNCRS);
+ sc->stats.htdpmc += IGC_READ_REG(&sc->hw, IGC_HTDPMC);
+ sc->stats.tsctc += IGC_READ_REG(&sc->hw, IGC_TSCTC);
}
static uint64_t
igc_if_get_counter(if_ctx_t ctx, ift_counter cnt)
{
- struct igc_adapter *adapter = iflib_get_softc(ctx);
+ struct igc_softc *sc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
switch (cnt) {
case IFCOUNTER_COLLISIONS:
- return (adapter->stats.colc);
+ return (sc->stats.colc);
case IFCOUNTER_IERRORS:
- return (adapter->dropped_pkts + adapter->stats.rxerrc +
- adapter->stats.crcerrs + adapter->stats.algnerrc +
- adapter->stats.ruc + adapter->stats.roc +
- adapter->stats.mpc + adapter->stats.htdpmc);
+ return (sc->dropped_pkts + sc->stats.rxerrc +
+ sc->stats.crcerrs + sc->stats.algnerrc +
+ sc->stats.ruc + sc->stats.roc +
+ sc->stats.mpc + sc->stats.htdpmc);
case IFCOUNTER_OERRORS:
- return (adapter->stats.ecol + adapter->stats.latecol +
- adapter->watchdog_events);
+ return (if_get_counter_default(ifp, cnt) +
+ sc->stats.ecol + sc->stats.latecol + sc->watchdog_events);
default:
return (if_get_counter_default(ifp, cnt));
}
@@ -2411,28 +2628,62 @@ igc_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
static int
igc_sysctl_reg_handler(SYSCTL_HANDLER_ARGS)
{
- struct igc_adapter *adapter;
+ struct igc_softc *sc;
u_int val;
- adapter = oidp->oid_arg1;
- val = IGC_READ_REG(&adapter->hw, oidp->oid_arg2);
+ sc = oidp->oid_arg1;
+ val = IGC_READ_REG(&sc->hw, oidp->oid_arg2);
return (sysctl_handle_int(oidp, &val, 0, req));
}
+/* Per queue holdoff interrupt rate handler */
+static int
+igc_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct igc_rx_queue *rque;
+ struct igc_tx_queue *tque;
+ struct igc_hw *hw;
+ int error;
+ u32 reg, usec, rate;
+
+ bool tx = oidp->oid_arg2;
+
+ if (tx) {
+ tque = oidp->oid_arg1;
+ hw = &tque->sc->hw;
+ reg = IGC_READ_REG(hw, IGC_EITR(tque->me));
+ } else {
+ rque = oidp->oid_arg1;
+ hw = &rque->sc->hw;
+ reg = IGC_READ_REG(hw, IGC_EITR(rque->msix));
+ }
+
+ usec = (reg & IGC_QVECTOR_MASK);
+ if (usec > 0)
+ rate = IGC_INTS_TO_EITR(usec);
+ else
+ rate = 0;
+
+ error = sysctl_handle_int(oidp, &rate, 0, req);
+ if (error || !req->newptr)
+ return error;
+ return 0;
+}
+
/*
* Add sysctl variables, one per statistic, to the system.
*/
static void
-igc_add_hw_stats(struct igc_adapter *adapter)
+igc_add_hw_stats(struct igc_softc *sc)
{
- device_t dev = iflib_get_dev(adapter->ctx);
- struct igc_tx_queue *tx_que = adapter->tx_queues;
- struct igc_rx_queue *rx_que = adapter->rx_queues;
+ device_t dev = iflib_get_dev(sc->ctx);
+ struct igc_tx_queue *tx_que = sc->tx_queues;
+ struct igc_rx_queue *rx_que = sc->rx_queues;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
- struct igc_hw_stats *stats = &adapter->stats;
+ struct igc_hw_stats *stats = &sc->stats;
struct sysctl_oid *stat_node, *queue_node, *int_node;
struct sysctl_oid_list *stat_list, *queue_list, *int_list;
@@ -2442,228 +2693,339 @@ igc_add_hw_stats(struct igc_adapter *adapter)
/* Driver Statistics */
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
- CTLFLAG_RD, &adapter->dropped_pkts,
- "Driver dropped packets");
+ CTLFLAG_RD, &sc->dropped_pkts,
+ "Driver dropped packets");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
- CTLFLAG_RD, &adapter->link_irq,
- "Link MSI-X IRQ Handled");
+ CTLFLAG_RD, &sc->link_irq,
+ "Link MSI-X IRQ Handled");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns",
- CTLFLAG_RD, &adapter->rx_overruns,
- "RX overruns");
+ CTLFLAG_RD, &sc->rx_overruns,
+ "RX overruns");
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts",
- CTLFLAG_RD, &adapter->watchdog_events,
- "Watchdog timeouts");
+ CTLFLAG_RD, &sc->watchdog_events,
+ "Watchdog timeouts");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "device_control",
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
- adapter, IGC_CTRL, igc_sysctl_reg_handler, "IU",
+ sc, IGC_CTRL, igc_sysctl_reg_handler, "IU",
"Device Control Register");
SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_control",
CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
- adapter, IGC_RCTL, igc_sysctl_reg_handler, "IU",
+ sc, IGC_RCTL, igc_sysctl_reg_handler, "IU",
"Receiver Control Register");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water",
- CTLFLAG_RD, &adapter->hw.fc.high_water, 0,
- "Flow Control High Watermark");
+ CTLFLAG_RD, &sc->hw.fc.high_water, 0,
+ "Flow Control High Watermark");
SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water",
- CTLFLAG_RD, &adapter->hw.fc.low_water, 0,
- "Flow Control Low Watermark");
+ CTLFLAG_RD, &sc->hw.fc.low_water, 0,
+ "Flow Control Low Watermark");
- for (int i = 0; i < adapter->tx_num_queues; i++, tx_que++) {
+ for (int i = 0; i < sc->tx_num_queues; i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
snprintf(namebuf, QUEUE_NAME_LEN, "queue_tx_%d", i);
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "TX Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
+ CTLTYPE_UINT | CTLFLAG_RD, tx_que,
+ true, igc_sysctl_interrupt_rate_handler, "IU",
+ "Interrupt Rate");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter,
+ CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
IGC_TDH(txr->me), igc_sysctl_reg_handler, "IU",
"Transmit Descriptor Head");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter,
+ CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
IGC_TDT(txr->me), igc_sysctl_reg_handler, "IU",
"Transmit Descriptor Tail");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "tx_irq",
- CTLFLAG_RD, &txr->tx_irq,
- "Queue MSI-X Transmit Interrupts");
+ CTLFLAG_RD, &txr->tx_irq,
+ "Queue MSI-X Transmit Interrupts");
}
- for (int j = 0; j < adapter->rx_num_queues; j++, rx_que++) {
+ for (int j = 0; j < sc->rx_num_queues; j++, rx_que++) {
struct rx_ring *rxr = &rx_que->rxr;
snprintf(namebuf, QUEUE_NAME_LEN, "queue_rx_%d", j);
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "RX Queue Name");
queue_list = SYSCTL_CHILDREN(queue_node);
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
+ CTLTYPE_UINT | CTLFLAG_RD, rx_que,
+ false, igc_sysctl_interrupt_rate_handler, "IU",
+ "Interrupt Rate");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter,
+ CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
IGC_RDH(rxr->me), igc_sysctl_reg_handler, "IU",
"Receive Descriptor Head");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, adapter,
+ CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc,
IGC_RDT(rxr->me), igc_sysctl_reg_handler, "IU",
"Receive Descriptor Tail");
SYSCTL_ADD_ULONG(ctx, queue_list, OID_AUTO, "rx_irq",
- CTLFLAG_RD, &rxr->rx_irq,
- "Queue MSI-X Receive Interrupts");
+ CTLFLAG_RD, &rxr->rx_irq,
+ "Queue MSI-X Receive Interrupts");
}
/* MAC stats get their own sub node */
-
stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Statistics");
stat_list = SYSCTL_CHILDREN(stat_node);
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "excess_coll",
- CTLFLAG_RD, &stats->ecol,
- "Excessive collisions");
+ CTLFLAG_RD, &stats->ecol,
+ "Excessive collisions");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "single_coll",
- CTLFLAG_RD, &stats->scc,
- "Single collisions");
+ CTLFLAG_RD, &stats->scc,
+ "Single collisions");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "multiple_coll",
- CTLFLAG_RD, &stats->mcc,
- "Multiple collisions");
+ CTLFLAG_RD, &stats->mcc,
+ "Multiple collisions");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "late_coll",
- CTLFLAG_RD, &stats->latecol,
- "Late collisions");
+ CTLFLAG_RD, &stats->latecol,
+ "Late collisions");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "collision_count",
- CTLFLAG_RD, &stats->colc,
- "Collision Count");
+ CTLFLAG_RD, &stats->colc,
+ "Collision Count");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "symbol_errors",
- CTLFLAG_RD, &adapter->stats.symerrs,
- "Symbol Errors");
+ CTLFLAG_RD, &sc->stats.symerrs,
+ "Symbol Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "sequence_errors",
- CTLFLAG_RD, &adapter->stats.sec,
- "Sequence Errors");
+ CTLFLAG_RD, &sc->stats.sec,
+ "Sequence Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "defer_count",
- CTLFLAG_RD, &adapter->stats.dc,
- "Defer Count");
+ CTLFLAG_RD, &sc->stats.dc,
+ "Defer Count");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "missed_packets",
- CTLFLAG_RD, &adapter->stats.mpc,
- "Missed Packets");
+ CTLFLAG_RD, &sc->stats.mpc,
+ "Missed Packets");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_length_errors",
+ CTLFLAG_RD, &sc->stats.rlec,
+ "Receive Length Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_no_buff",
- CTLFLAG_RD, &adapter->stats.rnbc,
- "Receive No Buffers");
+ CTLFLAG_RD, &sc->stats.rnbc,
+ "Receive No Buffers");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_undersize",
- CTLFLAG_RD, &adapter->stats.ruc,
- "Receive Undersize");
+ CTLFLAG_RD, &sc->stats.ruc,
+ "Receive Undersize");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_fragmented",
- CTLFLAG_RD, &adapter->stats.rfc,
- "Fragmented Packets Received ");
+ CTLFLAG_RD, &sc->stats.rfc,
+ "Fragmented Packets Received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_oversize",
- CTLFLAG_RD, &adapter->stats.roc,
- "Oversized Packets Received");
+ CTLFLAG_RD, &sc->stats.roc,
+ "Oversized Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_jabber",
- CTLFLAG_RD, &adapter->stats.rjc,
- "Recevied Jabber");
+ CTLFLAG_RD, &sc->stats.rjc,
+ "Recevied Jabber");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "recv_errs",
- CTLFLAG_RD, &adapter->stats.rxerrc,
- "Receive Errors");
+ CTLFLAG_RD, &sc->stats.rxerrc,
+ "Receive Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "crc_errs",
- CTLFLAG_RD, &adapter->stats.crcerrs,
- "CRC errors");
+ CTLFLAG_RD, &sc->stats.crcerrs,
+ "CRC errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "alignment_errs",
- CTLFLAG_RD, &adapter->stats.algnerrc,
- "Alignment Errors");
+ CTLFLAG_RD, &sc->stats.algnerrc,
+ "Alignment Errors");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_recvd",
- CTLFLAG_RD, &adapter->stats.xonrxc,
- "XON Received");
+ CTLFLAG_RD, &sc->stats.xonrxc,
+ "XON Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xon_txd",
- CTLFLAG_RD, &adapter->stats.xontxc,
- "XON Transmitted");
+ CTLFLAG_RD, &sc->stats.xontxc,
+ "XON Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_recvd",
- CTLFLAG_RD, &adapter->stats.xoffrxc,
- "XOFF Received");
+ CTLFLAG_RD, &sc->stats.xoffrxc,
+ "XOFF Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "xoff_txd",
- CTLFLAG_RD, &adapter->stats.xofftxc,
- "XOFF Transmitted");
+ CTLFLAG_RD, &sc->stats.xofftxc,
+ "XOFF Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "unsupported_fc_recvd",
+ CTLFLAG_RD, &sc->stats.fcruc,
+ "Unsupported Flow Control Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_recvd",
+ CTLFLAG_RD, &sc->stats.mgprc,
+ "Management Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_drop",
+ CTLFLAG_RD, &sc->stats.mgpdc,
+ "Management Packets Dropped");
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mgmt_pkts_txd",
+ CTLFLAG_RD, &sc->stats.mgptc,
+ "Management Packets Transmitted");
/* Packet Reception Stats */
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd",
- CTLFLAG_RD, &adapter->stats.tpr,
- "Total Packets Received ");
+ CTLFLAG_RD, &sc->stats.tpr,
+ "Total Packets Received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd",
- CTLFLAG_RD, &adapter->stats.gprc,
- "Good Packets Received");
+ CTLFLAG_RD, &sc->stats.gprc,
+ "Good Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd",
- CTLFLAG_RD, &adapter->stats.bprc,
- "Broadcast Packets Received");
+ CTLFLAG_RD, &sc->stats.bprc,
+ "Broadcast Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd",
- CTLFLAG_RD, &adapter->stats.mprc,
- "Multicast Packets Received");
+ CTLFLAG_RD, &sc->stats.mprc,
+ "Multicast Packets Received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_64",
- CTLFLAG_RD, &adapter->stats.prc64,
- "64 byte frames received ");
+ CTLFLAG_RD, &sc->stats.prc64,
+ "64 byte frames received ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127",
- CTLFLAG_RD, &adapter->stats.prc127,
- "65-127 byte frames received");
+ CTLFLAG_RD, &sc->stats.prc127,
+ "65-127 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255",
- CTLFLAG_RD, &adapter->stats.prc255,
- "128-255 byte frames received");
+ CTLFLAG_RD, &sc->stats.prc255,
+ "128-255 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511",
- CTLFLAG_RD, &adapter->stats.prc511,
- "256-511 byte frames received");
+ CTLFLAG_RD, &sc->stats.prc511,
+ "256-511 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023",
- CTLFLAG_RD, &adapter->stats.prc1023,
- "512-1023 byte frames received");
+ CTLFLAG_RD, &sc->stats.prc1023,
+ "512-1023 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522",
- CTLFLAG_RD, &adapter->stats.prc1522,
- "1023-1522 byte frames received");
+ CTLFLAG_RD, &sc->stats.prc1522,
+ "1023-1522 byte frames received");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd",
- CTLFLAG_RD, &adapter->stats.gorc,
- "Good Octets Received");
+ CTLFLAG_RD, &sc->stats.gorc,
+ "Good Octets Received");
/* Packet Transmission Stats */
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_octets_txd",
- CTLFLAG_RD, &adapter->stats.gotc,
- "Good Octets Transmitted");
+ CTLFLAG_RD, &sc->stats.gotc,
+ "Good Octets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd",
- CTLFLAG_RD, &adapter->stats.tpt,
- "Total Packets Transmitted");
+ CTLFLAG_RD, &sc->stats.tpt,
+ "Total Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd",
- CTLFLAG_RD, &adapter->stats.gptc,
- "Good Packets Transmitted");
+ CTLFLAG_RD, &sc->stats.gptc,
+ "Good Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd",
- CTLFLAG_RD, &adapter->stats.bptc,
- "Broadcast Packets Transmitted");
+ CTLFLAG_RD, &sc->stats.bptc,
+ "Broadcast Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd",
- CTLFLAG_RD, &adapter->stats.mptc,
- "Multicast Packets Transmitted");
+ CTLFLAG_RD, &sc->stats.mptc,
+ "Multicast Packets Transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_64",
- CTLFLAG_RD, &adapter->stats.ptc64,
- "64 byte frames transmitted ");
+ CTLFLAG_RD, &sc->stats.ptc64,
+ "64 byte frames transmitted ");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127",
- CTLFLAG_RD, &adapter->stats.ptc127,
- "65-127 byte frames transmitted");
+ CTLFLAG_RD, &sc->stats.ptc127,
+ "65-127 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255",
- CTLFLAG_RD, &adapter->stats.ptc255,
- "128-255 byte frames transmitted");
+ CTLFLAG_RD, &sc->stats.ptc255,
+ "128-255 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511",
- CTLFLAG_RD, &adapter->stats.ptc511,
- "256-511 byte frames transmitted");
+ CTLFLAG_RD, &sc->stats.ptc511,
+ "256-511 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023",
- CTLFLAG_RD, &adapter->stats.ptc1023,
- "512-1023 byte frames transmitted");
+ CTLFLAG_RD, &sc->stats.ptc1023,
+ "512-1023 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522",
- CTLFLAG_RD, &adapter->stats.ptc1522,
- "1024-1522 byte frames transmitted");
+ CTLFLAG_RD, &sc->stats.ptc1522,
+ "1024-1522 byte frames transmitted");
SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, "tso_txd",
- CTLFLAG_RD, &adapter->stats.tsctc,
- "TSO Contexts Transmitted");
+ CTLFLAG_RD, &sc->stats.tsctc,
+ "TSO Contexts Transmitted");
/* Interrupt Stats */
-
int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Interrupt Statistics");
int_list = SYSCTL_CHILDREN(int_node);
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "asserts",
- CTLFLAG_RD, &adapter->stats.iac,
- "Interrupt Assertion Count");
+ CTLFLAG_RD, &sc->stats.iac,
+ "Interrupt Assertion Count");
SYSCTL_ADD_UQUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh",
- CTLFLAG_RD, &adapter->stats.rxdmtc,
- "Rx Desc Min Thresh Count");
+ CTLFLAG_RD, &sc->stats.rxdmtc,
+ "Rx Desc Min Thresh Count");
+}
+
+static void
+igc_fw_version(struct igc_softc *sc)
+{
+ struct igc_hw *hw = &sc->hw;
+ struct igc_fw_version *fw_ver = &sc->fw_ver;
+
+ *fw_ver = (struct igc_fw_version){0};
+
+ igc_get_fw_version(hw, fw_ver);
+}
+
+static void
+igc_sbuf_fw_version(struct igc_fw_version *fw_ver, struct sbuf *buf)
+{
+ const char *space = "";
+
+ if (fw_ver->eep_major || fw_ver->eep_minor || fw_ver->eep_build) {
+ sbuf_printf(buf, "EEPROM V%d.%d-%d", fw_ver->eep_major,
+ fw_ver->eep_minor, fw_ver->eep_build);
+ space = " ";
+ }
+
+ if (fw_ver->invm_major || fw_ver->invm_minor ||
+ fw_ver->invm_img_type) {
+ sbuf_printf(buf, "%sNVM V%d.%d imgtype%d",
+ space, fw_ver->invm_major, fw_ver->invm_minor,
+ fw_ver->invm_img_type);
+ space = " ";
+ }
+
+ if (fw_ver->or_valid) {
+ sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
+ space, fw_ver->or_major, fw_ver->or_build,
+ fw_ver->or_patch);
+ space = " ";
+ }
+
+ if (fw_ver->etrack_id)
+ sbuf_printf(buf, "%seTrack 0x%08x", space, fw_ver->etrack_id);
+}
+
+static void
+igc_print_fw_version(struct igc_softc *sc )
+{
+ device_t dev = sc->dev;
+ struct sbuf *buf;
+ int error = 0;
+
+ buf = sbuf_new_auto();
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return;
+ }
+
+ igc_sbuf_fw_version(&sc->fw_ver, buf);
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+ else if (sbuf_len(buf))
+ device_printf(dev, "%s\n", sbuf_data(buf));
+
+ sbuf_delete(buf);
+}
+
+static int
+igc_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
+{
+ struct igc_softc *sc = (struct igc_softc *)arg1;
+ device_t dev = sc->dev;
+ struct sbuf *buf;
+ int error = 0;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ igc_sbuf_fw_version(&sc->fw_ver, buf);
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+
+ sbuf_delete(buf);
+
+ return (0);
}
/**********************************************************************
@@ -2676,7 +3038,7 @@ igc_add_hw_stats(struct igc_adapter *adapter)
static int
igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
{
- struct igc_adapter *adapter = (struct igc_adapter *)arg1;
+ struct igc_softc *sc = (struct igc_softc *)arg1;
int error;
int result;
@@ -2692,13 +3054,13 @@ igc_sysctl_nvm_info(SYSCTL_HANDLER_ARGS)
* the screen.
*/
if (result == 1)
- igc_print_nvm_info(adapter);
+ igc_print_nvm_info(sc);
return (error);
}
static void
-igc_print_nvm_info(struct igc_adapter *adapter)
+igc_print_nvm_info(struct igc_softc *sc)
{
u16 eeprom_data;
int i, j, row = 0;
@@ -2711,67 +3073,49 @@ igc_print_nvm_info(struct igc_adapter *adapter)
j = 0; ++row;
printf("\n0x00%x0 ",row);
}
- igc_read_nvm(&adapter->hw, i, 1, &eeprom_data);
+ igc_read_nvm(&sc->hw, i, 1, &eeprom_data);
printf("%04x ", eeprom_data);
}
printf("\n");
}
static int
-igc_sysctl_int_delay(SYSCTL_HANDLER_ARGS)
+igc_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)
{
- struct igc_int_delay_info *info;
- struct igc_adapter *adapter;
- u32 regval;
- int error, usecs, ticks;
+ struct igc_softc *sc;
+ u32 reg, val, shift;
+ int error, mask;
- info = (struct igc_int_delay_info *) arg1;
- usecs = info->value;
- error = sysctl_handle_int(oidp, &usecs, 0, req);
- if (error != 0 || req->newptr == NULL)
- return (error);
- if (usecs < 0 || usecs > IGC_TICKS_TO_USECS(65535))
- return (EINVAL);
- info->value = usecs;
- ticks = IGC_USECS_TO_TICKS(usecs);
- if (info->offset == IGC_ITR) /* units are 256ns here */
- ticks *= 4;
-
- adapter = info->adapter;
-
- regval = IGC_READ_OFFSET(&adapter->hw, info->offset);
- regval = (regval & ~0xffff) | (ticks & 0xffff);
- /* Handle a few special cases. */
- switch (info->offset) {
- case IGC_RDTR:
+ sc = oidp->oid_arg1;
+ switch (oidp->oid_arg2) {
+ case 0:
+ reg = IGC_DTXTCPFLGL;
+ shift = 0;
break;
- case IGC_TIDV:
- if (ticks == 0) {
- adapter->txd_cmd &= ~IGC_TXD_CMD_IDE;
- /* Don't write 0 into the TIDV register. */
- regval++;
- } else
- adapter->txd_cmd |= IGC_TXD_CMD_IDE;
+ case 1:
+ reg = IGC_DTXTCPFLGL;
+ shift = 16;
+ break;
+ case 2:
+ reg = IGC_DTXTCPFLGH;
+ shift = 0;
+ break;
+ default:
+ return (EINVAL);
break;
}
- IGC_WRITE_OFFSET(&adapter->hw, info->offset, regval);
+ val = IGC_READ_REG(&sc->hw, reg);
+ mask = (val >> shift) & 0xfff;
+ error = sysctl_handle_int(oidp, &mask, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (mask < 0 || mask > 0xfff)
+ return (EINVAL);
+ val = (val & ~(0xfff << shift)) | (mask << shift);
+ IGC_WRITE_REG(&sc->hw, reg, val);
return (0);
}
-static void
-igc_add_int_delay_sysctl(struct igc_adapter *adapter, const char *name,
- const char *description, struct igc_int_delay_info *info,
- int offset, int value)
-{
- info->adapter = adapter;
- info->offset = offset;
- info->value = value;
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)),
- OID_AUTO, name, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
- info, 0, igc_sysctl_int_delay, "I", description);
-}
-
/*
* Set flow control using sysctl:
* Flow control values:
@@ -2785,14 +3129,14 @@ igc_set_flowcntl(SYSCTL_HANDLER_ARGS)
{
int error;
static int input = 3; /* default is full */
- struct igc_adapter *adapter = (struct igc_adapter *) arg1;
+ struct igc_softc *sc = (struct igc_softc *) arg1;
error = sysctl_handle_int(oidp, &input, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
- if (input == adapter->fc) /* no change? */
+ if (input == sc->fc) /* no change? */
return (error);
switch (input) {
@@ -2800,16 +3144,65 @@ igc_set_flowcntl(SYSCTL_HANDLER_ARGS)
case igc_fc_tx_pause:
case igc_fc_full:
case igc_fc_none:
- adapter->hw.fc.requested_mode = input;
- adapter->fc = input;
+ sc->hw.fc.requested_mode = input;
+ sc->fc = input;
break;
default:
/* Do nothing */
return (error);
}
- adapter->hw.fc.current_mode = adapter->hw.fc.requested_mode;
- igc_force_mac_fc(&adapter->hw);
+ sc->hw.fc.current_mode = sc->hw.fc.requested_mode;
+ igc_force_mac_fc(&sc->hw);
+ return (error);
+}
+
+/*
+ * Manage DMA Coalesce:
+ * Control values:
+ * 0/1 - off/on
+ * Legal timer values are:
+ * 250,500,1000-10000 in thousands
+ */
+static int
+igc_sysctl_dmac(SYSCTL_HANDLER_ARGS)
+{
+ struct igc_softc *sc = (struct igc_softc *) arg1;
+ int error;
+
+ error = sysctl_handle_int(oidp, &sc->dmac, 0, req);
+
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ switch (sc->dmac) {
+ case 0:
+ /* Disabling */
+ break;
+ case 1: /* Just enable and use default */
+ sc->dmac = 1000;
+ break;
+ case 250:
+ case 500:
+ case 1000:
+ case 2000:
+ case 3000:
+ case 4000:
+ case 5000:
+ case 6000:
+ case 7000:
+ case 8000:
+ case 9000:
+ case 10000:
+ /* Legal values - allow */
+ break;
+ default:
+ /* Do nothing, illegal value */
+ sc->dmac = 0;
+ return (EINVAL);
+ }
+ /* Reinit the interface */
+ igc_if_init(sc->ctx);
return (error);
}
@@ -2821,16 +3214,16 @@ igc_set_flowcntl(SYSCTL_HANDLER_ARGS)
static int
igc_sysctl_eee(SYSCTL_HANDLER_ARGS)
{
- struct igc_adapter *adapter = (struct igc_adapter *) arg1;
+ struct igc_softc *sc = (struct igc_softc *) arg1;
int error, value;
- value = adapter->hw.dev_spec._i225.eee_disable;
+ value = sc->hw.dev_spec._i225.eee_disable;
error = sysctl_handle_int(oidp, &value, 0, req);
if (error || req->newptr == NULL)
return (error);
- adapter->hw.dev_spec._i225.eee_disable = (value != 0);
- igc_if_init(adapter->ctx);
+ sc->hw.dev_spec._i225.eee_disable = (value != 0);
+ igc_if_init(sc->ctx);
return (0);
}
@@ -2838,7 +3231,7 @@ igc_sysctl_eee(SYSCTL_HANDLER_ARGS)
static int
igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
{
- struct igc_adapter *adapter;
+ struct igc_softc *sc;
int error;
int result;
@@ -2849,8 +3242,8 @@ igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
return (error);
if (result == 1) {
- adapter = (struct igc_adapter *) arg1;
- igc_print_debug_info(adapter);
+ sc = (struct igc_softc *) arg1;
+ igc_print_debug_info(sc);
}
return (error);
@@ -2859,7 +3252,7 @@ igc_sysctl_debug_info(SYSCTL_HANDLER_ARGS)
static int
igc_get_rs(SYSCTL_HANDLER_ARGS)
{
- struct igc_adapter *adapter = (struct igc_adapter *) arg1;
+ struct igc_softc *sc = (struct igc_softc *) arg1;
int error;
int result;
@@ -2868,7 +3261,7 @@ igc_get_rs(SYSCTL_HANDLER_ARGS)
if (error || !req->newptr || result != 1)
return (error);
- igc_dump_rs(adapter);
+ igc_dump_rs(sc);
return (error);
}
@@ -2884,12 +3277,12 @@ igc_if_debug(if_ctx_t ctx)
* needed for debugging a problem. -jfv
*/
static void
-igc_print_debug_info(struct igc_adapter *adapter)
+igc_print_debug_info(struct igc_softc *sc)
{
- device_t dev = iflib_get_dev(adapter->ctx);
- if_t ifp = iflib_get_ifp(adapter->ctx);
- struct tx_ring *txr = &adapter->tx_queues->txr;
- struct rx_ring *rxr = &adapter->rx_queues->rxr;
+ device_t dev = iflib_get_dev(sc->ctx);
+ if_t ifp = iflib_get_ifp(sc->ctx);
+ struct tx_ring *txr = &sc->tx_queues->txr;
+ struct rx_ring *rxr = &sc->rx_queues->rxr;
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
printf("Interface is RUNNING ");
@@ -2901,17 +3294,17 @@ igc_print_debug_info(struct igc_adapter *adapter)
else
printf("and ACTIVE\n");
- for (int i = 0; i < adapter->tx_num_queues; i++, txr++) {
+ for (int i = 0; i < sc->tx_num_queues; i++, txr++) {
device_printf(dev, "TX Queue %d ------\n", i);
device_printf(dev, "hw tdh = %d, hw tdt = %d\n",
- IGC_READ_REG(&adapter->hw, IGC_TDH(i)),
- IGC_READ_REG(&adapter->hw, IGC_TDT(i)));
+ IGC_READ_REG(&sc->hw, IGC_TDH(i)),
+ IGC_READ_REG(&sc->hw, IGC_TDT(i)));
}
- for (int j=0; j < adapter->rx_num_queues; j++, rxr++) {
+ for (int j=0; j < sc->rx_num_queues; j++, rxr++) {
device_printf(dev, "RX Queue %d ------\n", j);
device_printf(dev, "hw rdh = %d, hw rdt = %d\n",
- IGC_READ_REG(&adapter->hw, IGC_RDH(j)),
- IGC_READ_REG(&adapter->hw, IGC_RDT(j)));
+ IGC_READ_REG(&sc->hw, IGC_RDH(j)),
+ IGC_READ_REG(&sc->hw, IGC_RDT(j)));
}
}
diff --git a/sys/dev/igc/if_igc.h b/sys/dev/igc/if_igc.h
index 0c22ce9f76f4..236a16c4add3 100644
--- a/sys/dev/igc/if_igc.h
+++ b/sys/dev/igc/if_igc.h
@@ -1,9 +1,9 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
+ * Copyright (c) 2001-2024, Intel Corporation
* Copyright (c) 2016 Nicole Graziano <nicole@nextbsd.org>
- * All rights reserved.
- * Copyright (c) 2021 Rubicon Communications, LLC (Netgate)
+ * Copyright (c) 2021-2024 Rubicon Communications, LLC (Netgate)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -132,65 +132,6 @@
#define IGC_MAX_RXD 4096
/*
- * IGC_TIDV_VAL - Transmit Interrupt Delay Value
- * Valid Range: 0-65535 (0=off)
- * Default Value: 64
- * This value delays the generation of transmit interrupts in units of
- * 1.024 microseconds. Transmit interrupt reduction can improve CPU
- * efficiency if properly tuned for specific network traffic. If the
- * system is reporting dropped transmits, this value may be set too high
- * causing the driver to run out of available transmit descriptors.
- */
-#define IGC_TIDV_VAL 64
-
-/*
- * IGC_TADV_VAL - Transmit Absolute Interrupt Delay Value
- * Valid Range: 0-65535 (0=off)
- * Default Value: 64
- * This value, in units of 1.024 microseconds, limits the delay in which a
- * transmit interrupt is generated. Useful only if IGC_TIDV is non-zero,
- * this value ensures that an interrupt is generated after the initial
- * packet is sent on the wire within the set amount of time. Proper tuning,
- * along with IGC_TIDV_VAL, may improve traffic throughput in specific
- * network conditions.
- */
-#define IGC_TADV_VAL 64
-
-/*
- * IGC_RDTR_VAL - Receive Interrupt Delay Timer (Packet Timer)
- * Valid Range: 0-65535 (0=off)
- * Default Value: 0
- * This value delays the generation of receive interrupts in units of 1.024
- * microseconds. Receive interrupt reduction can improve CPU efficiency if
- * properly tuned for specific network traffic. Increasing this value adds
- * extra latency to frame reception and can end up decreasing the throughput
- * of TCP traffic. If the system is reporting dropped receives, this value
- * may be set too high, causing the driver to run out of available receive
- * descriptors.
- *
- * CAUTION: When setting IGC_RDTR to a value other than 0, adapters
- * may hang (stop transmitting) under certain network conditions.
- * If this occurs a WATCHDOG message is logged in the system
- * event log. In addition, the controller is automatically reset,
- * restoring the network connection. To eliminate the potential
- * for the hang ensure that IGC_RDTR is set to 0.
- */
-#define IGC_RDTR_VAL 0
-
-/*
- * Receive Interrupt Absolute Delay Timer
- * Valid Range: 0-65535 (0=off)
- * Default Value: 64
- * This value, in units of 1.024 microseconds, limits the delay in which a
- * receive interrupt is generated. Useful only if IGC_RDTR is non-zero,
- * this value ensures that an interrupt is generated after the initial
- * packet is received within the set amount of time. Proper tuning,
- * along with IGC_RDTR, may improve traffic throughput in specific network
- * conditions.
- */
-#define IGC_RADV_VAL 64
-
-/*
* This parameter controls whether or not autonegotation is enabled.
* 0 - Disable autonegotiation
* 1 - Enable autonegotiation
@@ -222,6 +163,17 @@
#define IGC_TX_PTHRESH 8
#define IGC_TX_HTHRESH 1
+/* Define the interrupt rates and EITR helpers */
+#define IGC_INTS_4K 4000
+#define IGC_INTS_20K 20000
+#define IGC_INTS_70K 70000
+#define IGC_INTS_DEFAULT 8000
+#define IGC_EITR_DIVIDEND 1000000
+#define IGC_EITR_SHIFT 2
+#define IGC_QVECTOR_MASK 0x7FFC
+#define IGC_INTS_TO_EITR(i) (((IGC_EITR_DIVIDEND/i) & IGC_QVECTOR_MASK) << \
+ IGC_EITR_SHIFT)
+
/*
* TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
* multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will
@@ -254,10 +206,10 @@
CSUM_IP_SCTP | CSUM_IP6_UDP | CSUM_IP6_TCP | \
CSUM_IP6_SCTP) /* Offload bits in mbuf flag */
-struct igc_adapter;
+struct igc_softc;
struct igc_int_delay_info {
- struct igc_adapter *adapter; /* Back-pointer to the adapter struct */
+ struct igc_softc *sc; /* Back-pointer to the softc struct */
int offset; /* Register offset to read/write */
int value; /* Current value in usecs */
};
@@ -266,9 +218,9 @@ struct igc_int_delay_info {
* The transmit ring, one per tx queue
*/
struct tx_ring {
- struct igc_adapter *adapter;
+ struct igc_softc *sc;
struct igc_tx_desc *tx_base;
- uint64_t tx_paddr;
+ uint64_t tx_paddr;
qidx_t *tx_rsq;
uint8_t me;
qidx_t tx_rs_cidx;
@@ -277,7 +229,12 @@ struct tx_ring {
/* Interrupt resources */
void *tag;
struct resource *res;
- unsigned long tx_irq;
+
+ /* Soft stats */
+ unsigned long tx_irq;
+ unsigned long tx_packets;
+ unsigned long tx_bytes;
+
/* Saved csum offloading context information */
int csum_flags;
@@ -296,7 +253,7 @@ struct tx_ring {
* The Receive ring, one per rx queue
*/
struct rx_ring {
- struct igc_adapter *adapter;
+ struct igc_softc *sc;
struct igc_rx_queue *que;
u32 me;
u32 payload;
@@ -312,28 +269,32 @@ struct rx_ring {
unsigned long rx_discarded;
unsigned long rx_packets;
unsigned long rx_bytes;
+
+ /* Next requested EITR latency */
+ u8 rx_nextlatency;
};
struct igc_tx_queue {
- struct igc_adapter *adapter;
- u32 msix;
- u32 eims; /* This queue's EIMS bit */
- u32 me;
- struct tx_ring txr;
+ struct igc_softc *sc;
+ u32 msix;
+ u32 eims; /* This queue's EIMS bit */
+ u32 me;
+ struct tx_ring txr;
};
struct igc_rx_queue {
- struct igc_adapter *adapter;
+ struct igc_softc *sc;
u32 me;
u32 msix;
u32 eims;
+ u32 eitr_setting;
struct rx_ring rxr;
u64 irqs;
struct if_irq que_irq;
};
-/* Our adapter structure */
-struct igc_adapter {
+/* Our softc structure */
+struct igc_softc {
if_t ifp;
struct igc_hw hw;
@@ -374,6 +335,8 @@ struct igc_adapter {
u32 rx_mbuf_sz;
+ int enable_aim;
+
/* Management and WOL features */
u32 wol;
@@ -387,10 +350,13 @@ struct igc_adapter {
u16 link_duplex;
u32 smartspeed;
u32 dmac;
+ u32 pba;
int link_mask;
u64 que_mask;
+ struct igc_fw_version fw_ver;
+
struct igc_int_delay_info tx_int_delay;
struct igc_int_delay_info tx_abs_int_delay;
struct igc_int_delay_info rx_int_delay;
@@ -407,7 +373,7 @@ struct igc_adapter {
u16 vf_ifp;
};
-void igc_dump_rs(struct igc_adapter *);
+void igc_dump_rs(struct igc_softc *);
#define IGC_RSSRK_SIZE 4
#define IGC_RSSRK_VAL(key, i) (key[(i) * IGC_RSSRK_SIZE] | \
diff --git a/sys/dev/igc/igc_api.c b/sys/dev/igc/igc_api.c
index 9e91e7a4c73f..da499274fca4 100644
--- a/sys/dev/igc/igc_api.c
+++ b/sys/dev/igc/igc_api.c
@@ -109,6 +109,8 @@ s32 igc_set_mac_type(struct igc_hw *hw)
case IGC_DEV_ID_I220_V:
case IGC_DEV_ID_I225_K2:
case IGC_DEV_ID_I225_LMVP:
+ case IGC_DEV_ID_I226_K:
+ case IGC_DEV_ID_I226_LMVP:
case IGC_DEV_ID_I225_IT:
case IGC_DEV_ID_I226_LM:
case IGC_DEV_ID_I226_V:
diff --git a/sys/dev/igc/igc_defines.h b/sys/dev/igc/igc_defines.h
index 1701918c3a9c..09f75fe2787e 100644
--- a/sys/dev/igc/igc_defines.h
+++ b/sys/dev/igc/igc_defines.h
@@ -96,7 +96,6 @@
#define IGC_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */
#define IGC_RXD_ERR_IPE 0x40 /* IP Checksum Error */
#define IGC_RXD_ERR_RXE 0x80 /* Rx Data Error */
-#define IGC_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
#define IGC_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */
#define IGC_RXDEXT_STATERR_LB 0x00040000
@@ -967,11 +966,31 @@
#define IGC_FLASH_UPDATES 2000
/* NVM Word Offsets */
-#define NVM_COMPAT 0x0003
-#define NVM_ID_LED_SETTINGS 0x0004
-#define NVM_FUTURE_INIT_WORD1 0x0019
-#define NVM_COMPAT_VALID_CSUM 0x0001
+#define NVM_COMPAT 0x0003
+#define NVM_ID_LED_SETTINGS 0x0004
+#define NVM_VERSION 0x0005
+#define NVM_FUTURE_INIT_WORD1 0x0019
+#define NVM_COMPAT_VALID_CSUM 0x0001
#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040
+#define NVM_ETRACK_WORD 0x0042
+#define NVM_ETRACK_HIWORD 0x0043
+#define NVM_COMB_VER_OFF 0x0083
+#define NVM_COMB_VER_PTR 0x003d
+
+/* NVM version defines */
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x0FF0
+#define NVM_IMAGE_ID_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_MINOR_SHIFT 4
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
+#define NVM_ETRACK_VALID 0x8000
+#define NVM_NEW_DEC_MASK 0x0F00
+#define NVM_HEX_CONV 16
+#define NVM_HEX_TENS 10
#define NVM_INIT_CONTROL2_REG 0x000F
#define NVM_INIT_CONTROL3_PORT_B 0x0014
diff --git a/sys/dev/igc/igc_nvm.c b/sys/dev/igc/igc_nvm.c
index d86e04ffa0dc..b476a5fdbeac 100644
--- a/sys/dev/igc/igc_nvm.c
+++ b/sys/dev/igc/igc_nvm.c
@@ -716,4 +716,85 @@ static void igc_reload_nvm_generic(struct igc_hw *hw)
IGC_WRITE_FLUSH(hw);
}
+/**
+ * igc_get_fw_version - Get firmware version information
+ * @hw: pointer to the HW structure
+ * @fw_vers: pointer to output version structure
+ *
+ * unsupported/not present features return 0 in version structure
+ **/
+void igc_get_fw_version(struct igc_hw *hw, struct igc_fw_version *fw_vers)
+{
+ u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
+ u8 q, hval, rem, result;
+ u16 comb_verh, comb_verl, comb_offset;
+
+ memset(fw_vers, 0, sizeof(struct igc_fw_version));
+ /*
+ * basic eeprom version numbers, bits used vary by part and by tool
+ * used to create the nvm images. Check which data format we have.
+ */
+ switch (hw->mac.type) {
+ case igc_i225:
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ /* find combo image version */
+ hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+ if (comb_offset && comb_offset != NVM_VER_INVALID) {
+ hw->nvm.ops.read(hw, NVM_COMB_VER_OFF + comb_offset + 1,
+ 1, &comb_verh);
+ hw->nvm.ops.read(hw, NVM_COMB_VER_OFF + comb_offset,
+ 1, &comb_verl);
+
+ /* get Option Rom version if it exists and is valid */
+ if (comb_verh && comb_verl &&
+ comb_verh != NVM_VER_INVALID &&
+ comb_verl != NVM_VER_INVALID) {
+ fw_vers->or_valid = true;
+ fw_vers->or_major = comb_verl >>
+ NVM_COMB_VER_SHFT;
+ fw_vers->or_build = (comb_verl <<
+ NVM_COMB_VER_SHFT) |
+ (comb_verh >>
+ NVM_COMB_VER_SHFT);
+ fw_vers->or_patch = comb_verh &
+ NVM_COMB_VER_MASK;
+ }
+ }
+ break;
+ default:
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+ return;
+ }
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+
+ /* check for old style version format in newer images*/
+ if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
+ eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
+ } else {
+ eeprom_verl = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ }
+ /* Convert minor value to hex before assigning to output struct
+ * Val to be converted will not be higher than 99, per tool output
+ */
+ q = eeprom_verl / NVM_HEX_CONV;
+ hval = q * NVM_HEX_TENS;
+ rem = eeprom_verl % NVM_HEX_CONV;
+ result = hval + rem;
+ fw_vers->eep_minor = result;
+
+ if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
+ | eeprom_verl;
+ } else if ((etrack_test & NVM_ETRACK_VALID) == 0) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) |
+ eeprom_verl;
+ }
+}
diff --git a/sys/dev/igc/igc_nvm.h b/sys/dev/igc/igc_nvm.h
index eae0db4b484b..b4b602af6595 100644
--- a/sys/dev/igc/igc_nvm.h
+++ b/sys/dev/igc/igc_nvm.h
@@ -7,6 +7,22 @@
#ifndef _IGC_NVM_H_
#define _IGC_NVM_H_
+struct igc_fw_version {
+ u32 etrack_id;
+ u16 eep_major;
+ u16 eep_minor;
+ u16 eep_build;
+
+ u8 invm_major;
+ u8 invm_minor;
+ u8 invm_img_type;
+
+ bool or_valid;
+ u16 or_major;
+ u16 or_build;
+ u16 or_patch;
+};
+
void igc_init_nvm_ops_generic(struct igc_hw *hw);
s32 igc_null_read_nvm(struct igc_hw *hw, u16 a, u16 b, u16 *c);
void igc_null_nvm_generic(struct igc_hw *hw);
@@ -26,5 +42,7 @@ s32 igc_write_nvm_spi(struct igc_hw *hw, u16 offset, u16 words,
u16 *data);
s32 igc_update_nvm_checksum_generic(struct igc_hw *hw);
void igc_release_nvm_generic(struct igc_hw *hw);
+void igc_get_fw_version(struct igc_hw *hw,
+ struct igc_fw_version *fw_vers);
#endif
diff --git a/sys/dev/igc/igc_regs.h b/sys/dev/igc/igc_regs.h
index f4ded07ce6ab..17fa89e492e8 100644
--- a/sys/dev/igc/igc_regs.h
+++ b/sys/dev/igc/igc_regs.h
@@ -27,7 +27,6 @@
#define IGC_CONNSW 0x00034 /* Copper/Fiber switch control - RW */
#define IGC_VET 0x00038 /* VLAN Ether Type - RW */
#define IGC_ICR 0x01500 /* Intr Cause Read - RC/W1C */
-#define IGC_ITR 0x000C4 /* Interrupt Throttling Rate - RW */
#define IGC_ICS 0x01504 /* Intr Cause Set - WO */
#define IGC_IMS 0x01508 /* Intr Mask Set/Read - RW */
#define IGC_IMC 0x0150C /* Intr Mask Clear - WO */
@@ -78,8 +77,6 @@
#define IGC_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
/* Split and Replication Rx Control - RW */
#define IGC_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
-#define IGC_RDTR 0x02820 /* Rx Delay Timer - RW */
-#define IGC_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */
/* Shadow Ram Write Register - RW */
#define IGC_SRWR 0x12018
#define IGC_EEC_REG 0x12010
@@ -148,8 +145,8 @@
#define IGC_FFVT_REG(_i) (0x09800 + ((_i) * 8))
#define IGC_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
#define IGC_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
-#define IGC_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */
-#define IGC_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */
+#define IGC_DTXTCPFLGL 0x0359C /* DMA Tx Control flag low - RW */
+#define IGC_DTXTCPFLGH 0x035A0 /* DMA Tx Control flag high - RW */
/* Statistics Register Descriptions */
#define IGC_CRCERRS 0x04000 /* CRC Error Count - R/clr */
#define IGC_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */
diff --git a/sys/dev/igc/igc_txrx.c b/sys/dev/igc/igc_txrx.c
index 7601513a709e..92ba81c79c58 100644
--- a/sys/dev/igc/igc_txrx.c
+++ b/sys/dev/igc/igc_txrx.c
@@ -44,29 +44,27 @@
/*********************************************************************
* Local Function prototypes
*********************************************************************/
-static int igc_isc_txd_encap(void *arg, if_pkt_info_t pi);
-static void igc_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx);
-static int igc_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear);
+static int igc_isc_txd_encap(void *, if_pkt_info_t);
+static void igc_isc_txd_flush(void *, uint16_t, qidx_t);
+static int igc_isc_txd_credits_update(void *, uint16_t, bool);
-static void igc_isc_rxd_refill(void *arg, if_rxd_update_t iru);
+static void igc_isc_rxd_refill(void *, if_rxd_update_t);
-static void igc_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
- qidx_t pidx);
-static int igc_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx,
- qidx_t budget);
+static void igc_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t);
+static int igc_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t);
-static int igc_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri);
+static int igc_isc_rxd_pkt_get(void *, if_rxd_info_t);
-static int igc_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi,
- uint32_t *cmd_type_len, uint32_t *olinfo_status);
-static int igc_tso_setup(struct tx_ring *txr, if_pkt_info_t pi,
- uint32_t *cmd_type_len, uint32_t *olinfo_status);
+static int igc_tx_ctx_setup(struct tx_ring *, if_pkt_info_t, uint32_t *,
+ uint32_t *);
+static int igc_tso_setup(struct tx_ring *, if_pkt_info_t, uint32_t *,
+ uint32_t *);
-static void igc_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype);
-static int igc_determine_rsstype(uint16_t pkt_info);
+static void igc_rx_checksum(uint32_t, if_rxd_info_t, uint32_t);
+static int igc_determine_rsstype(uint16_t);
-extern void igc_if_enable_intr(if_ctx_t ctx);
-extern int igc_intr(void *arg);
+extern void igc_if_enable_intr(if_ctx_t);
+extern int igc_intr(void *);
struct if_txrx igc_txrx = {
.ift_txd_encap = igc_isc_txd_encap,
@@ -80,9 +78,9 @@ struct if_txrx igc_txrx = {
};
void
-igc_dump_rs(struct igc_adapter *adapter)
+igc_dump_rs(struct igc_softc *sc)
{
- if_softc_ctx_t scctx = adapter->shared;
+ if_softc_ctx_t scctx = sc->shared;
struct igc_tx_queue *que;
struct tx_ring *txr;
qidx_t i, ntxd, qid, cur;
@@ -91,23 +89,27 @@ igc_dump_rs(struct igc_adapter *adapter)
printf("\n");
ntxd = scctx->isc_ntxd[0];
- for (qid = 0; qid < adapter->tx_num_queues; qid++) {
- que = &adapter->tx_queues[qid];
+ for (qid = 0; qid < sc->tx_num_queues; qid++) {
+ que = &sc->tx_queues[qid];
txr = &que->txr;
rs_cidx = txr->tx_rs_cidx;
if (rs_cidx != txr->tx_rs_pidx) {
cur = txr->tx_rsq[rs_cidx];
status = txr->tx_base[cur].upper.fields.status;
if (!(status & IGC_TXD_STAT_DD))
- printf("qid[%d]->tx_rsq[%d]: %d clear ", qid, rs_cidx, cur);
+ printf("qid[%d]->tx_rsq[%d]: %d clear ",
+ qid, rs_cidx, cur);
} else {
rs_cidx = (rs_cidx-1)&(ntxd-1);
cur = txr->tx_rsq[rs_cidx];
- printf("qid[%d]->tx_rsq[rs_cidx-1=%d]: %d ", qid, rs_cidx, cur);
+ printf("qid[%d]->tx_rsq[rs_cidx-1=%d]: %d ",
+ qid, rs_cidx, cur);
}
- printf("cidx_prev=%d rs_pidx=%d ",txr->tx_cidx_processed, txr->tx_rs_pidx);
+ printf("cidx_prev=%d rs_pidx=%d ",txr->tx_cidx_processed,
+ txr->tx_rs_pidx);
for (i = 0; i < ntxd; i++) {
- if (txr->tx_base[i].upper.fields.status & IGC_TXD_STAT_DD)
+ if (txr->tx_base[i].upper.fields.status &
+ IGC_TXD_STAT_DD)
printf("%d set ", i);
}
printf("\n");
@@ -140,14 +142,15 @@ igc_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
break;
default:
panic("%s: CSUM_TSO but no supported IP version (0x%04x)",
- __func__, ntohs(pi->ipi_etype));
+ __func__, ntohs(pi->ipi_etype));
break;
}
TXD = (struct igc_adv_tx_context_desc *) &txr->tx_base[pi->ipi_pidx];
/* This is used in the transmit desc in encap */
- paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
+ paylen = pi->ipi_len - pi->ipi_ehdrlen - pi->ipi_ip_hlen -
+ pi->ipi_tcp_hlen;
/* VLAN MACLEN IPLEN */
if (pi->ipi_mflags & M_VLANTAG) {
@@ -182,8 +185,8 @@ igc_tso_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
*
**********************************************************************/
static int
-igc_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
- uint32_t *olinfo_status)
+igc_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi,
+ uint32_t *cmd_type_len, uint32_t *olinfo_status)
{
struct igc_adv_tx_context_desc *TXD;
uint32_t vlan_macip_lens, type_tucmd_mlhl;
@@ -263,7 +266,7 @@ igc_tx_ctx_setup(struct tx_ring *txr, if_pkt_info_t pi, uint32_t *cmd_type_len,
static int
igc_isc_txd_encap(void *arg, if_pkt_info_t pi)
{
- struct igc_adapter *sc = arg;
+ struct igc_softc *sc = arg;
if_softc_ctx_t scctx = sc->shared;
struct igc_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
struct tx_ring *txr = &que->txr;
@@ -277,7 +280,7 @@ igc_isc_txd_encap(void *arg, if_pkt_info_t pi)
pidx_last = olinfo_status = 0;
/* Basic descriptor defines */
cmd_type_len = (IGC_ADVTXD_DTYP_DATA |
- IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DCMD_DEXT);
+ IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DCMD_DEXT);
if (pi->ipi_mflags & M_VLANTAG)
cmd_type_len |= IGC_ADVTXD_DCMD_VLE;
@@ -316,25 +319,29 @@ igc_isc_txd_encap(void *arg, if_pkt_info_t pi)
txd->read.cmd_type_len |= htole32(IGC_ADVTXD_DCMD_EOP | txd_flags);
pi->ipi_new_pidx = i;
+ /* Sent data accounting for AIM */
+ txr->tx_bytes += pi->ipi_len;
+ ++txr->tx_packets;
+
return (0);
}
static void
igc_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
{
- struct igc_adapter *adapter = arg;
- struct igc_tx_queue *que = &adapter->tx_queues[txqid];
- struct tx_ring *txr = &que->txr;
+ struct igc_softc *sc = arg;
+ struct igc_tx_queue *que = &sc->tx_queues[txqid];
+ struct tx_ring *txr = &que->txr;
- IGC_WRITE_REG(&adapter->hw, IGC_TDT(txr->me), pidx);
+ IGC_WRITE_REG(&sc->hw, IGC_TDT(txr->me), pidx);
}
static int
igc_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
{
- struct igc_adapter *adapter = arg;
- if_softc_ctx_t scctx = adapter->shared;
- struct igc_tx_queue *que = &adapter->tx_queues[txqid];
+ struct igc_softc *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ struct igc_tx_queue *que = &sc->tx_queues[txqid];
struct tx_ring *txr = &que->txr;
qidx_t processed = 0;
@@ -368,12 +375,13 @@ igc_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
MPASS(delta > 0);
processed += delta;
- prev = cur;
+ prev = cur;
rs_cidx = (rs_cidx + 1) & (ntxd-1);
- if (rs_cidx == txr->tx_rs_pidx)
+ if (rs_cidx == txr->tx_rs_pidx)
break;
cur = txr->tx_rsq[rs_cidx];
- status = ((union igc_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
+ status =
+ ((union igc_adv_tx_desc *)&txr->tx_base[cur])->wb.status;
} while ((status & IGC_TXD_STAT_DD));
txr->tx_rs_cidx = rs_cidx;
@@ -384,7 +392,7 @@ igc_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
static void
igc_isc_rxd_refill(void *arg, if_rxd_update_t iru)
{
- struct igc_adapter *sc = arg;
+ struct igc_softc *sc = arg;
if_softc_ctx_t scctx = sc->shared;
uint16_t rxqid = iru->iru_qsidx;
struct igc_rx_queue *que = &sc->rx_queues[rxqid];
@@ -409,9 +417,10 @@ igc_isc_rxd_refill(void *arg, if_rxd_update_t iru)
}
static void
-igc_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
+igc_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused,
+ qidx_t pidx)
{
- struct igc_adapter *sc = arg;
+ struct igc_softc *sc = arg;
struct igc_rx_queue *que = &sc->rx_queues[rxqid];
struct rx_ring *rxr = &que->rxr;
@@ -421,7 +430,7 @@ igc_isc_rxd_flush(void *arg, uint16_t rxqid, uint8_t flid __unused, qidx_t pidx)
static int
igc_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
{
- struct igc_adapter *sc = arg;
+ struct igc_softc *sc = arg;
if_softc_ctx_t scctx = sc->shared;
struct igc_rx_queue *que = &sc->rx_queues[rxqid];
struct rx_ring *rxr = &que->rxr;
@@ -453,9 +462,9 @@ igc_isc_rxd_available(void *arg, uint16_t rxqid, qidx_t idx, qidx_t budget)
static int
igc_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
- struct igc_adapter *adapter = arg;
- if_softc_ctx_t scctx = adapter->shared;
- struct igc_rx_queue *que = &adapter->rx_queues[ri->iri_qsidx];
+ struct igc_softc *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ struct igc_rx_queue *que = &sc->rx_queues[ri->iri_qsidx];
struct rx_ring *rxr = &que->rxr;
union igc_adv_rx_desc *rxd;
@@ -475,7 +484,8 @@ igc_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
MPASS ((staterr & IGC_RXD_STAT_DD) != 0);
len = le16toh(rxd->wb.upper.length);
- ptype = le32toh(rxd->wb.lower.lo_dword.data) & IGC_PKTTYPE_MASK;
+ ptype =
+ le32toh(rxd->wb.lower.lo_dword.data) & IGC_PKTTYPE_MASK;
ri->iri_len += len;
rxr->rx_bytes += ri->iri_len;
@@ -485,7 +495,7 @@ igc_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
/* Make sure bad packets are discarded */
if (eop && ((staterr & IGC_RXDEXT_STATERR_RXE) != 0)) {
- adapter->dropped_pkts++;
+ sc->dropped_pkts++;
++rxr->rx_discarded;
return (EBADMSG);
}
@@ -556,7 +566,8 @@ igc_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
(ptype & IGC_RXDADV_PKTTYPE_SCTP) != 0)) {
ri->iri_csum_flags |= CSUM_SCTP_VALID;
} else {
- ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ ri->iri_csum_flags |=
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
ri->iri_csum_data = htons(0xffff);
}
}
diff --git a/sys/dev/iicbus/acpi_iicbus.c b/sys/dev/iicbus/acpi_iicbus.c
index 4b61387b0ed6..873a96b04823 100644
--- a/sys/dev/iicbus/acpi_iicbus.c
+++ b/sys/dev/iicbus/acpi_iicbus.c
@@ -75,7 +75,7 @@ struct acpi_iicbus_ivars {
ACPI_HANDLE handle;
};
-static int install_space_handler = 0;
+static int install_space_handler = 1;
TUNABLE_INT("hw.iicbus.enable_acpi_space_handler", &install_space_handler);
static inline bool
@@ -512,7 +512,7 @@ acpi_iicbus_enumerate_child(ACPI_HANDLE handle, UINT32 level,
return (AE_OK);
}
- child = BUS_ADD_CHILD(iicbus, 0, NULL, -1);
+ child = BUS_ADD_CHILD(iicbus, 0, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(iicbus, "add child failed\n");
return (AE_OK);
diff --git a/sys/dev/iicbus/adc/ads111x.c b/sys/dev/iicbus/adc/ads111x.c
index dd223f69bb5f..21924627cc68 100644
--- a/sys/dev/iicbus/adc/ads111x.c
+++ b/sys/dev/iicbus/adc/ads111x.c
@@ -559,6 +559,8 @@ ads111x_attach(device_t dev)
return (err);
}
+ sx_init(&sc->lock, "ads111x");
+
/* Add the sysctl handler to set the chip configuration register. */
ctx = device_get_sysctl_ctx(dev);
tree = device_get_sysctl_tree(dev);
@@ -575,8 +577,6 @@ ads111x_attach(device_t dev)
/* Set up channels based on metadata or default config. */
ads111x_add_channels(sc);
- sx_init(&sc->lock, "ads111x");
-
return (0);
}
diff --git a/sys/dev/iicbus/controller/cadence/cdnc_i2c.c b/sys/dev/iicbus/controller/cadence/cdnc_i2c.c
index 61f4975c10de..2e7950dab9c1 100644
--- a/sys/dev/iicbus/controller/cadence/cdnc_i2c.c
+++ b/sys/dev/iicbus/controller/cadence/cdnc_i2c.c
@@ -42,11 +42,11 @@
#include <sys/mutex.h>
#include <sys/resource.h>
#include <sys/rman.h>
+#include <sys/stdarg.h>
#include <sys/uio.h>
#include <machine/bus.h>
#include <machine/resource.h>
-#include <machine/stdarg.h>
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/ofw_bus.h>
@@ -612,12 +612,13 @@ cdnc_i2c_attach(device_t dev)
return (err);
}
- sc->iicbus = device_add_child(dev, "iicbus", -1);
+ sc->iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY);
cdnc_i2c_add_sysctls(dev);
/* Probe and attach iicbus when interrupts work. */
- return (bus_delayed_attach_children(dev));
+ bus_delayed_attach_children(dev);
+ return (0);
}
static int
@@ -625,18 +626,13 @@ cdnc_i2c_detach(device_t dev)
{
struct cdnc_i2c_softc *sc = device_get_softc(dev);
- if (device_is_attached(dev))
- bus_generic_detach(dev);
+ bus_generic_detach(dev);
if (sc->ref_clk != NULL) {
clk_release(sc->ref_clk);
sc->ref_clk = NULL;
}
- /* Delete iic bus. */
- if (sc->iicbus)
- device_delete_child(dev, sc->iicbus);
-
/* Disable hardware. */
if (sc->mem_res != NULL) {
sc->cfg_reg_shadow = 0;
diff --git a/sys/dev/iicbus/controller/opencores/iicoc_fdt.c b/sys/dev/iicbus/controller/opencores/iicoc_fdt.c
index 649027038659..2423d2b87272 100644
--- a/sys/dev/iicbus/controller/opencores/iicoc_fdt.c
+++ b/sys/dev/iicbus/controller/opencores/iicoc_fdt.c
@@ -119,7 +119,7 @@ iicoc_attach(device_t dev)
sc->i2cfreq = XLP_I2C_FREQ;
iicoc_init(dev);
- sc->iicbus = device_add_child(dev, "iicbus", -1);
+ sc->iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY);
if (sc->iicbus == NULL) {
device_printf(dev, "Could not allocate iicbus instance.\n");
error = ENXIO;
@@ -127,7 +127,7 @@ iicoc_attach(device_t dev)
}
/* Probe and attach the iicbus when interrupts are available. */
- config_intrhook_oneshot((ich_func_t)bus_generic_attach, dev);
+ bus_delayed_attach_children(dev);
return (0);
diff --git a/sys/dev/iicbus/controller/opencores/iicoc_pci.c b/sys/dev/iicbus/controller/opencores/iicoc_pci.c
index a1ed7e9cbc68..f01b4514936e 100644
--- a/sys/dev/iicbus/controller/opencores/iicoc_pci.c
+++ b/sys/dev/iicbus/controller/opencores/iicoc_pci.c
@@ -50,7 +50,6 @@ iicoc_detach(device_t dev)
struct iicoc_softc *sc;
sc = device_get_softc(dev);
- device_delete_children(dev);
bus_generic_detach(dev);
bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res);
mtx_destroy(&sc->sc_mtx);
@@ -80,7 +79,7 @@ iicoc_attach(device_t dev)
return (-1);
}
iicoc_init(dev);
- sc->iicbus = device_add_child(dev, "iicbus", -1);
+ sc->iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY);
if (sc->iicbus == NULL) {
device_printf(dev, "Could not allocate iicbus instance.\n");
bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid,
@@ -88,7 +87,7 @@ iicoc_attach(device_t dev)
mtx_destroy(&sc->sc_mtx);
return (-1);
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/iicbus/controller/qcom/geni_iic.c b/sys/dev/iicbus/controller/qcom/geni_iic.c
new file mode 100644
index 000000000000..f53fc1d3f1cd
--- /dev/null
+++ b/sys/dev/iicbus/controller/qcom/geni_iic.c
@@ -0,0 +1,608 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Poul-Henning Kamp <phk@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * QualComm GENI I2C controller
+ *
+ * The GENI is actually a multi-protocol serial controller, so a lot of
+ * this can probably be shared if we ever get to those protocols.
+ *
+ * The best open "documentation" of the hardware is the Linux device driver
+ * from which much was learned, and we tip our hat to the authors of it.
+ */
+
+#include <sys/cdefs.h>
+
+#include "opt_acpi.h"
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/endian.h>
+#include <sys/time.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+#include <sys/sx.h>
+#include <sys/bus.h>
+
+#include <machine/bus.h>
+#include <sys/rman.h>
+
+#include <dev/iicbus/iicbus.h>
+#include <dev/iicbus/iiconf.h>
+
+#include <dev/iicbus/controller/qcom/geni_iic_var.h>
+
+#define GENI_ALL_REGISTERS(THIS_MACRO) \
+ THIS_MACRO(GENI_FORCE_DEFAULT_REG, 0x020) \
+ THIS_MACRO(GENI_OUTPUT_CTRL, 0x024) \
+ THIS_MACRO(GENI_STATUS, 0x040) \
+ THIS_MACRO(GENI_SER_M_CLK_CFG, 0x048) \
+ THIS_MACRO(GENI_SER_S_CLK_CFG, 0x04c) \
+ THIS_MACRO(GENI_IF_DISABLE_RO, 0x064) \
+ THIS_MACRO(GENI_FW_REVISION_RO, 0x068) \
+ THIS_MACRO(GENI_CLK_SEL, 0x07c) \
+ THIS_MACRO(GENI_CFG_SEQ_START, 0x084) \
+ THIS_MACRO(GENI_BYTE_GRANULARITY, 0x254) \
+ THIS_MACRO(GENI_DMA_MODE_EN, 0x258) \
+ THIS_MACRO(GENI_TX_PACKING_CFG0, 0x260) \
+ THIS_MACRO(GENI_TX_PACKING_CFG1, 0x264) \
+ THIS_MACRO(GENI_I2C_TX_TRANS_LEN, 0x26c) \
+ THIS_MACRO(GENI_I2C_RX_TRANS_LEN, 0x270) \
+ THIS_MACRO(GENI_I2C_SCL_COUNTERS, 0x278) \
+ THIS_MACRO(GENI_RX_PACKING_CFG0, 0x284) \
+ THIS_MACRO(GENI_RX_PACKING_CFG1, 0x288) \
+ THIS_MACRO(GENI_M_CMD0, 0x600) \
+ THIS_MACRO(GENI_M_CMD_CTRL_REG, 0x604) \
+ THIS_MACRO(GENI_M_IRQ_STATUS, 0x610) \
+ THIS_MACRO(GENI_M_IRQ_EN, 0x614) \
+ THIS_MACRO(GENI_M_IRQ_CLEAR, 0x618) \
+ THIS_MACRO(GENI_M_IRQ_EN_SET, 0x61c) \
+ THIS_MACRO(GENI_M_IRQ_EN_CLEAR, 0x620) \
+ THIS_MACRO(GENI_S_CMD0, 0x630) \
+ THIS_MACRO(GENI_S_CMD_CTRL_REG, 0x634) \
+ THIS_MACRO(GENI_S_IRQ_STATUS, 0x640) \
+ THIS_MACRO(GENI_S_IRQ_EN, 0x644) \
+ THIS_MACRO(GENI_S_IRQ_CLEAR, 0x648) \
+ THIS_MACRO(GENI_S_IRQ_EN_SET, 0x64c) \
+ THIS_MACRO(GENI_S_IRQ_EN_CLEAR, 0x650) \
+ THIS_MACRO(GENI_TX_FIFOn, 0x700) \
+ THIS_MACRO(GENI_RX_FIFOn, 0x780) \
+ THIS_MACRO(GENI_TX_FIFO_STATUS, 0x800) \
+ THIS_MACRO(GENI_RX_FIFO_STATUS, 0x804) \
+ THIS_MACRO(GENI_TX_WATERMARK_REG, 0x80c) \
+ THIS_MACRO(GENI_RX_WATERMARK_REG, 0x810) \
+ THIS_MACRO(GENI_RX_RFR_WATERMARK_REG, 0x814) \
+ THIS_MACRO(GENI_IOS, 0x908) \
+ THIS_MACRO(GENI_M_GP_LENGTH, 0x910) \
+ THIS_MACRO(GENI_S_GP_LENGTH, 0x914) \
+ THIS_MACRO(GENI_DMA_TX_IRQ_STAT, 0xc40) \
+ THIS_MACRO(GENI_DMA_TX_IRQ_CLR, 0xc44) \
+ THIS_MACRO(GENI_DMA_TX_IRQ_EN, 0xc48) \
+ THIS_MACRO(GENI_DMA_TX_IRQ_EN_CLR, 0xc4c) \
+ THIS_MACRO(GENI_DMA_TX_IRQ_EN_SET, 0xc50) \
+ THIS_MACRO(GENI_DMA_TX_FSM_RST, 0xc58) \
+ THIS_MACRO(GENI_DMA_RX_IRQ_STAT, 0xd40) \
+ THIS_MACRO(GENI_DMA_RX_IRQ_CLR, 0xd44) \
+ THIS_MACRO(GENI_DMA_RX_IRQ_EN, 0xd48) \
+ THIS_MACRO(GENI_DMA_RX_IRQ_EN_CLR, 0xd4c) \
+ THIS_MACRO(GENI_DMA_RX_IRQ_EN_SET, 0xd50) \
+ THIS_MACRO(GENI_DMA_RX_LEN_IN, 0xd54) \
+ THIS_MACRO(GENI_DMA_RX_FSM_RST, 0xd58) \
+ THIS_MACRO(GENI_IRQ_EN, 0xe1c) \
+ THIS_MACRO(GENI_HW_PARAM_0, 0xe24) \
+ THIS_MACRO(GENI_HW_PARAM_1, 0xe28)
+
+enum geni_registers {
+#define ITER_MACRO(name, offset) name = offset,
+ GENI_ALL_REGISTERS(ITER_MACRO)
+#undef ITER_MACRO
+};
+
+#define RD(sc, reg) bus_read_4((sc)->regs_res, reg)
+#define WR(sc, reg, val) bus_write_4((sc)->regs_res, reg, val)
+
+static void
+geni_dump_regs(geniiic_softc_t *sc)
+{
+ device_printf(sc->dev, "Register Dump\n");
+#define DUMP_MACRO(name, offset) \
+ device_printf(sc->dev, \
+ " %08x %04x " #name "\n", \
+ RD(sc, offset), offset);
+ GENI_ALL_REGISTERS(DUMP_MACRO)
+#undef DUMP_MACRO
+}
+
+static unsigned geniiic_debug_units = 0;
+
+static SYSCTL_NODE(_hw, OID_AUTO, geniiic, CTLFLAG_RW, 0, "GENI I2C");
+SYSCTL_INT(_hw_geniiic, OID_AUTO, debug_units, CTLFLAG_RWTUN,
+ &geniiic_debug_units, 1, "Bitmask of units to debug");
+
+
+static driver_filter_t geniiic_intr;
+
+static int
+geniiic_intr(void *cookie)
+{
+ uint32_t m_status, rx_fifo_status;
+ int retval = FILTER_STRAY;
+ geniiic_softc_t *sc = cookie;
+
+ mtx_lock_spin(&sc->intr_lock);
+ m_status = RD(sc, GENI_M_IRQ_STATUS);
+
+ rx_fifo_status = RD(sc, GENI_RX_FIFO_STATUS);
+ if (sc->rx_buf != NULL && rx_fifo_status & 0x3f) {
+
+ // Number of whole FIFO words, each 4 bytes
+ unsigned gotlen = (((rx_fifo_status & 0x3f) << 2)-1) * 4;
+
+ // Valid bytes in the last FIFO word
+ // (Field is 3 bits, we'll only ever see 0…3)
+ gotlen += (rx_fifo_status >> 28) & 0x7;
+
+ unsigned cnt;
+ for (cnt = 0; cnt < (rx_fifo_status & 0x3f); cnt++) {
+ uint32_t data = RD(sc, GENI_RX_FIFOn);
+ unsigned u;
+ for (u = 0; u < 4 && sc->rx_len && gotlen; u++) {
+ *sc->rx_buf++ = data & 0xff;
+ data >>= 8;
+ sc->rx_len--;
+ gotlen--;
+ }
+ }
+ }
+ if (m_status & (1<<26)) {
+ WR(sc, GENI_M_IRQ_CLEAR, (1<<26));
+ retval = FILTER_HANDLED;
+ }
+
+ if (m_status & (1<<0)) {
+ sc->rx_complete = true;
+ WR(sc, GENI_M_IRQ_EN_CLEAR, (1<<0));
+ WR(sc, GENI_M_IRQ_EN_CLEAR, (1<<26));
+ WR(sc, GENI_M_IRQ_CLEAR, (1<<0));
+ wakeup(sc);
+ retval = FILTER_HANDLED;
+ }
+ sc->cmd_status = m_status;
+
+ if (sc->rx_buf == NULL) {
+ device_printf(sc->dev,
+ "Interrupt m_stat %x rx_fifo_status %x retval %d\n",
+ m_status, rx_fifo_status, retval);
+ WR(sc, GENI_M_IRQ_EN, 0);
+ WR(sc, GENI_M_IRQ_CLEAR, m_status);
+ device_printf(sc->dev,
+ "Interrupt M_IRQ_STATUS 0x%x M_IRQ_EN 0x%x\n",
+ RD(sc, GENI_M_IRQ_STATUS), RD(sc, GENI_M_IRQ_EN));
+ device_printf(sc->dev,
+ "Interrupt S_IRQ_STATUS 0x%x S_IRQ_EN 0x%x\n",
+ RD(sc, GENI_S_IRQ_STATUS), RD(sc, GENI_S_IRQ_EN));
+ device_printf(sc->dev,
+ "Interrupt DMA_TX_IRQ_STAT 0x%x DMA_RX_IRQ_STAT 0x%x\n",
+ RD(sc, GENI_DMA_TX_IRQ_STAT), RD(sc, GENI_DMA_RX_IRQ_STAT));
+ device_printf(sc->dev,
+ "Interrupt DMA_TX_IRQ_EN 0x%x DMA_RX_IRQ_EN 0x%x\n",
+ RD(sc, GENI_DMA_TX_IRQ_EN), RD(sc, GENI_DMA_RX_IRQ_EN));
+ WR(sc, GENI_DMA_TX_IRQ_EN_CLR, RD(sc, GENI_DMA_TX_IRQ_STAT));
+ WR(sc, GENI_DMA_TX_IRQ_CLR, RD(sc, GENI_DMA_TX_IRQ_STAT));
+ WR(sc, GENI_DMA_RX_IRQ_EN_CLR, RD(sc, GENI_DMA_RX_IRQ_STAT));
+ WR(sc, GENI_DMA_RX_IRQ_CLR, RD(sc, GENI_DMA_RX_IRQ_STAT));
+ }
+ mtx_unlock_spin(&sc->intr_lock);
+ return(retval);
+}
+
+static int
+geniiic_wait_m_ireq(geniiic_softc_t *sc, uint32_t bits)
+{
+ uint32_t status;
+ int timeout;
+
+ for (timeout = 0; timeout < 10000; timeout++) {
+ status = RD(sc, GENI_M_IRQ_STATUS);
+ if (status & bits) {
+ return (0);
+ }
+ DELAY(10);
+ }
+ return (IIC_ETIMEOUT);
+}
+
+static int
+geniiic_read(geniiic_softc_t *sc,
+ uint8_t slave, uint8_t *buf, uint16_t len, bool nonfinal)
+{
+ uint32_t cmd, istatus;
+
+ istatus = RD(sc, GENI_M_IRQ_STATUS);
+ WR(sc, GENI_M_IRQ_CLEAR, istatus);
+
+ sc->rx_complete = false;
+ sc->rx_fifo = false;
+ sc->rx_buf = buf;
+ sc->rx_len = len;
+ WR(sc, GENI_I2C_RX_TRANS_LEN, len);
+
+ // GENI_M_CMD0_OPCODE_I2C_READ << M_OPCODE_SHFT
+ cmd = (0x2 << 27);
+
+ // GENI_M_CMD0_SLV_ADDR_SHIFT
+ cmd |= slave << 9;
+
+ if (nonfinal) {
+ // GENI_M_CMD0_STOP_STRETCH
+ cmd |= (1<<2);
+ }
+ WR(sc, GENI_RX_WATERMARK_REG, sc->rx_fifo_size - 4);
+
+ // CMD_DONE, RX_FIFO_WATERMARK
+ WR(sc, GENI_M_IRQ_EN, (1<<0) | (1<<26));
+
+ // M_IRQ
+ WR(sc, GENI_IRQ_EN, (1<<2));
+
+ WR(sc, GENI_M_CMD0, cmd);
+
+ mtx_lock_spin(&sc->intr_lock);
+ sc->rx_fifo = false;
+ unsigned msec;
+ for (msec = 0; msec < 100; msec++) {
+ msleep_spin_sbt(sc, &sc->intr_lock,
+ "geniwait", SBT_1MS, SBT_1MS / 10, 0);
+ if (sc->rx_complete)
+ break;
+ }
+ if (msec > sc->worst) {
+ device_printf(sc->dev,
+ "Tworst from %u to %u\n", sc->worst, msec);
+ if (msec != 100)
+ sc->worst = msec;
+ }
+
+ if (!sc->rx_complete) {
+ // S_GENI_CMD_CANCEL
+ WR(sc, GENI_M_CMD_CTRL_REG, (1<<2));
+
+ WR(sc, GENI_IRQ_EN, 0);
+ device_printf(sc->dev,
+ "Incomplete read (residual %x)\n", sc->rx_len);
+ }
+
+ sc->rx_buf = NULL;
+ len = sc->rx_len;
+ sc->rx_len = 0;
+
+ mtx_unlock_spin(&sc->intr_lock);
+
+#define COMPLAIN(about) \
+ device_printf(sc->dev, \
+ "read " about " slave=0x%x len=0x%x, cmd=0x%x cmd_status=0x%x\n", \
+ slave, len, cmd, sc->cmd_status \
+ )
+
+ if (geniiic_debug_units) {
+ unsigned unit = device_get_unit(sc->dev);
+ if (unit < 32 && geniiic_debug_units & (1<<unit) && len == 0) {
+ COMPLAIN("OK");
+ return(IIC_NOERR);
+ }
+ }
+ if (len == 0)
+ return(IIC_NOERR);
+
+ if (sc->cmd_status & (1<<10)) {
+ COMPLAIN("ESTATUS");
+ return(IIC_ESTATUS);
+ }
+ if (len) {
+ COMPLAIN("EUNDERFLOW");
+ return(IIC_EUNDERFLOW);
+ }
+ COMPLAIN("EBUSERR");
+ return (IIC_EBUSERR);
+#undef COMPLAIN
+}
+
+static int
+geniiic_write(geniiic_softc_t *sc,
+ uint8_t slave, uint8_t *buf, uint16_t len, bool nonfinal)
+{
+ uint32_t status, data, cmd;
+ int timeout, error;
+
+ status = RD(sc, GENI_M_IRQ_STATUS);
+ WR(sc, GENI_M_IRQ_CLEAR, status);
+
+ WR(sc, GENI_I2C_TX_TRANS_LEN, len);
+
+ // GENI_M_CMD0_OPCODE_I2C_WRITE << M_OPCODE_SHFT
+ cmd = (0x1 << 27);
+
+ // GENI_M_CMD0_SLV_ADDR_SHIFT
+ cmd |= slave << 9;
+
+ if (nonfinal) {
+ // GENI_M_CMD0_STOP_STRETCH
+ cmd |= (1<<2);
+ }
+ WR(sc, GENI_M_CMD0, cmd);
+ for(timeout = 0; len > 0 && timeout < 100; timeout++) {
+ status = RD(sc, GENI_TX_FIFO_STATUS);
+ if (status < 16) {
+ data = 0;
+ if (len) { data |= *buf << 0; buf++; len--; }
+ if (len) { data |= *buf << 8; buf++; len--; }
+ if (len) { data |= *buf << 16; buf++; len--; }
+ if (len) { data |= *buf << 24; buf++; len--; }
+ WR(sc, GENI_TX_FIFOn, data);
+ } else {
+ DELAY(10);
+ }
+ }
+
+ // GENI_M_IRQ_CMD_DONE
+ error = geniiic_wait_m_ireq(sc, 1);
+
+ if (len == 0 && error == 0)
+ return(IIC_NOERR);
+ device_printf(sc->dev,
+ "write ERR len=%d, error=%d cmd=0x%x\n", len, error, cmd);
+ return (IIC_EBUSERR);
+}
+
+static void
+geniiic_dumpmsg(device_t dev, struct iic_msg *msgs, uint32_t nmsgs)
+{
+ unsigned u;
+
+ device_printf(dev, "transfer:\n");
+ for (u = 0; u < nmsgs; u++) {
+ device_printf(dev,
+ " [%d] slave=0x%x, flags=0x%x len=0x%x buf=%p\n",
+ u, msgs[u].slave, msgs[u].flags, msgs[u].len, msgs[u].buf
+ );
+ }
+}
+
+int
+geniiic_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs)
+{
+ geniiic_softc_t *sc = device_get_softc(dev);
+ unsigned u;
+ int error;
+
+ if (sc->nfail > 4) {
+ pause_sbt("geniic_fail", SBT_1S * 5, SBT_1S, 0);
+ return (IIC_ERESOURCE);
+ }
+
+ sx_xlock(&sc->real_bus_lock);
+
+ if (geniiic_debug_units) {
+ unsigned unit = device_get_unit(dev);
+ if (unit < 32 && geniiic_debug_units & (1<<unit)) {
+ geniiic_dumpmsg(dev, msgs, nmsgs);
+ }
+ }
+
+ error = 0;
+ for (u = 0; u < nmsgs; u++) {
+ bool nonfinal =
+ (u < nmsgs - 1) && (msgs[u].flags & IIC_M_NOSTOP);
+ unsigned slave = msgs[u].slave >> 1;
+ if (msgs[u].flags & IIC_M_RD) {
+ error = geniiic_read(sc,
+ slave, msgs[u].buf, msgs[u].len, nonfinal);
+ } else {
+ error = geniiic_write(sc,
+ slave, msgs[u].buf, msgs[u].len, nonfinal);
+ }
+ }
+ if (error) {
+ device_printf(dev, "transfer error %d\n", error);
+ geniiic_dumpmsg(dev, msgs, nmsgs);
+ }
+ if (error) {
+ geniiic_reset(dev, 0, 0, NULL);
+ }
+ if (error)
+ sc->nfail++;
+ else
+ sc->nfail = 0;
+ sx_xunlock(&sc->real_bus_lock);
+ return (error);
+}
+
+int
+geniiic_reset(device_t dev, u_char speed, u_char addr, u_char *oldaddr)
+{
+ geniiic_softc_t *sc = device_get_softc(dev);
+ unsigned u;
+
+ device_printf(dev, "reset\n");
+ WR(sc, GENI_M_IRQ_EN, 0);
+ WR(sc, GENI_M_IRQ_CLEAR, ~0);
+ WR(sc, GENI_DMA_TX_IRQ_EN_CLR, ~0);
+ WR(sc, GENI_DMA_TX_IRQ_CLR, ~0);
+ WR(sc, GENI_DMA_RX_IRQ_EN_CLR, ~0);
+ WR(sc, GENI_DMA_RX_IRQ_CLR, ~0);
+
+ // S_GENI_CMD_ABORT
+ WR(sc, GENI_M_CMD_CTRL_REG, (1<<1));
+
+ WR(sc, GENI_DMA_RX_FSM_RST, 1);
+ for (u = 0; u < 1000; u++) {
+ if (RD(sc, GENI_DMA_RX_IRQ_STAT) & 0x8)
+ break;
+ DELAY(10);
+ }
+ if (u > 0)
+ device_printf(dev, "RXRESET time %u\n", u);
+ WR(sc, GENI_DMA_TX_FSM_RST, 1);
+ for (u = 0; u < 1000; u++) {
+ if (RD(sc, GENI_DMA_TX_IRQ_STAT) & 0x8)
+ break;
+ DELAY(10);
+ }
+ if (u > 0)
+ device_printf(dev, "TXRESET time %u\n", u);
+ return (0);
+}
+
+int
+geniiic_callback(device_t dev, int index, caddr_t data)
+{
+ geniiic_softc_t *sc = device_get_softc(dev);
+ int error = 0;
+
+ return(0);
+ switch (index) {
+ case IIC_REQUEST_BUS:
+ if (sx_try_xlock(&sc->bus_lock) == 0)
+ error = IIC_EBUSBSY;
+ else
+ sc->bus_locked = true;
+ break;
+
+ case IIC_RELEASE_BUS:
+ if (!sc->bus_locked) {
+ device_printf(dev, "Unlocking unlocked bus\n");
+ }
+ sc->bus_locked = false;
+ sx_xunlock(&sc->bus_lock);
+ break;
+
+ default:
+ device_printf(dev, "callback unknown %d\n", index);
+ error = errno2iic(EINVAL);
+ }
+
+ return (error);
+}
+
+int
+geniiic_attach(geniiic_softc_t *sc)
+{
+ int error = 0;
+
+ if (bootverbose)
+ geni_dump_regs(sc);
+ mtx_init(&sc->intr_lock, "geniiic intr lock", NULL, MTX_SPIN);
+ sx_init(&sc->real_bus_lock, "geniiic real bus lock");
+ sx_init(&sc->bus_lock, "geniiic bus lock");
+
+ sc->rx_fifo_size = (RD(sc, GENI_HW_PARAM_1) >> 16) & 0x3f;
+ device_printf(sc->dev, " RX fifo size= 0x%x\n", sc->rx_fifo_size);
+
+ // We might want to set/check the following registers:
+ // GENI_BYTE_GRANULARITY (0x00000000)
+ // GENI_TX_PACKING_CFG0 (0x0007f8fe)
+ // GENI_TX_PACKING_CFG1 (000ffefe)
+ // GENI_RX_PACKING_CFG0 (0x0007f8fe)
+ // GENI_RX_PACKING_CFG1 (000ffefe)
+
+ sc->iicbus = device_add_child(sc->dev, "iicbus", DEVICE_UNIT_ANY);
+ if (sc->iicbus == NULL) {
+ device_printf(sc->dev, "iicbus driver not found\n");
+ return(ENXIO);
+ }
+
+ error = bus_setup_intr(sc->dev,
+ sc->intr_res, INTR_TYPE_MISC | INTR_MPSAFE,
+ geniiic_intr, NULL, sc, &sc->intr_handle);
+ if (error) {
+ device_printf(sc->dev,
+ "Unable to setup irq: error %d\n", error);
+ }
+
+ bus_attach_children(sc->dev);
+ return (error);
+}
+
+int
+geniiic_detach(geniiic_softc_t *sc)
+{
+ int error = 0;
+
+ error = bus_generic_detach(sc->dev);
+ if (error)
+ return (error);
+
+ WR(sc, GENI_M_IRQ_EN, 0);
+
+ if (sc->intr_handle) {
+ bus_teardown_intr(sc->dev, sc->intr_res, sc->intr_handle);
+ }
+
+ sx_xlock(&sc->bus_lock);
+ sx_xlock(&sc->real_bus_lock);
+
+ geniiic_reset(sc->dev, 0, 0, NULL);
+ sc->iicbus = NULL;
+ sc->intr_handle = NULL;
+
+ sx_xunlock(&sc->real_bus_lock);
+ sx_xunlock(&sc->bus_lock);
+
+ sx_destroy(&sc->real_bus_lock);
+ sx_destroy(&sc->bus_lock);
+
+ mtx_destroy(&sc->intr_lock);
+ return (error);
+}
+
+int
+geniiic_suspend(geniiic_softc_t *sc)
+{
+ int error;
+
+ device_printf(sc->dev, "suspend method is NO-OP (good luck!)\n");
+
+ error = bus_generic_suspend(sc->dev);
+
+ return (error);
+}
+
+int geniiic_resume(geniiic_softc_t *sc)
+{
+ int error;
+
+ device_printf(sc->dev, "resume method is NO-OP (good luck!)\n");
+
+ error = bus_generic_resume(sc->dev);
+
+ return (error);
+}
+
+DRIVER_MODULE(iicbus, geniiic, iicbus_driver, NULL, NULL);
+DRIVER_MODULE(acpi_iicbus, geniiic, acpi_iicbus_driver, NULL, NULL);
+MODULE_DEPEND(geniiic, iicbus, IICBUS_MINVER, IICBUS_PREFVER, IICBUS_MAXVER);
+MODULE_VERSION(geniiic, 1);
diff --git a/sys/dev/iicbus/controller/qcom/geni_iic_acpi.c b/sys/dev/iicbus/controller/qcom/geni_iic_acpi.c
new file mode 100644
index 000000000000..2105071f5609
--- /dev/null
+++ b/sys/dev/iicbus/controller/qcom/geni_iic_acpi.c
@@ -0,0 +1,189 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Poul-Henning Kamp <phk@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+#include "opt_acpi.h"
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/proc.h>
+#include <sys/rman.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+
+#include <dev/acpica/acpivar.h>
+#include <dev/iicbus/iiconf.h>
+
+#include <dev/iicbus/controller/qcom/geni_iic_var.h>
+
+static int geniiic_acpi_probe(device_t dev);
+static int geniiic_acpi_attach(device_t dev);
+static int geniiic_acpi_detach(device_t dev);
+
+static char *geniiic_ids[] = {
+ "QCOM0C10",
+ NULL
+};
+
+static int
+geniiic_acpi_probe(device_t dev)
+{
+ int rv;
+
+ if (acpi_disabled("geniiic"))
+ return (ENXIO);
+ rv = ACPI_ID_PROBE(device_get_parent(dev), dev, geniiic_ids, NULL);
+ if (rv > 0)
+ return (rv);
+
+ device_set_desc(dev, "Qualcomm GENI I2C Controller");
+ return (rv);
+}
+
+static int
+geniiic_acpi_attach(device_t dev)
+{
+ geniiic_softc_t *sc;
+ char *str;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ sc->dev = dev;
+ error = ACPI_ID_PROBE(device_get_parent(dev), dev, geniiic_ids, &str);
+ if (error > 0)
+ return (error);
+
+ sc->regs_rid = 0;
+ sc->regs_res = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &sc->regs_rid, RF_ACTIVE);
+ if (sc->regs_res == NULL) {
+ device_printf(dev, "unable to map registers\n");
+ geniiic_acpi_detach(dev);
+ return (ENXIO);
+ }
+ sc->intr_rid = 0;
+ sc->intr_res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &sc->intr_rid, RF_SHAREABLE | RF_ACTIVE);
+ if (sc->intr_res == NULL) {
+ device_printf(dev, "unable to map interrupt\n");
+ geniiic_acpi_detach(dev);
+ return (ENXIO);
+ }
+ sc->platform_attached = true;
+
+ error = geniiic_attach(sc);
+ if (error)
+ geniiic_acpi_detach(dev);
+
+ return (error);
+}
+
+static int
+geniiic_acpi_detach(device_t dev)
+{
+ geniiic_softc_t *sc = device_get_softc(dev);
+ int error;
+
+ if (sc->platform_attached) {
+ error = geniiic_detach(sc);
+ if (error)
+ return (error);
+ sc->platform_attached = false;
+ }
+
+ if (sc->intr_res) {
+ bus_release_resource(dev, SYS_RES_IRQ,
+ sc->intr_rid, sc->intr_res);
+ sc->intr_res = NULL;
+ }
+ if (sc->regs_res) {
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ sc->regs_rid, sc->regs_res);
+ sc->regs_res = NULL;
+ }
+
+ return (0);
+}
+
+static int
+geniiic_acpi_suspend(device_t dev)
+{
+ geniiic_softc_t *sc = device_get_softc(dev);
+
+ return (geniiic_suspend(sc));
+}
+
+static int
+geniiic_acpi_resume(device_t dev)
+{
+ geniiic_softc_t *sc = device_get_softc(dev);
+
+ return (geniiic_resume(sc));
+}
+
+static device_method_t geniiic_acpi_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, geniiic_acpi_probe),
+ DEVMETHOD(device_attach, geniiic_acpi_attach),
+ DEVMETHOD(device_detach, geniiic_acpi_detach),
+ DEVMETHOD(device_suspend, geniiic_acpi_suspend),
+ DEVMETHOD(device_resume, geniiic_acpi_resume),
+
+ /* Bus interface */
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+ DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource),
+ DEVMETHOD(bus_release_resource, bus_generic_release_resource),
+ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
+ DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
+ DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
+
+ /* iicbus interface */
+ DEVMETHOD(iicbus_transfer, geniiic_transfer),
+ DEVMETHOD(iicbus_reset, geniiic_reset),
+ DEVMETHOD(iicbus_callback, geniiic_callback),
+
+ DEVMETHOD_END
+};
+
+static driver_t geniiic_acpi_driver = {
+ "geniiic",
+ geniiic_acpi_methods,
+ sizeof(struct geniiic_softc),
+};
+
+DRIVER_MODULE_ORDERED(geniiic, acpi, geniiic_acpi_driver, 0, 0, SI_ORDER_ANY);
+MODULE_DEPEND(geniiic, acpi, 1, 1, 1);
+ACPI_PNP_INFO(geniiic_ids);
diff --git a/sys/dev/iicbus/controller/qcom/geni_iic_var.h b/sys/dev/iicbus/controller/qcom/geni_iic_var.h
new file mode 100644
index 000000000000..9ce8200c6fe5
--- /dev/null
+++ b/sys/dev/iicbus/controller/qcom/geni_iic_var.h
@@ -0,0 +1,80 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Poul-Henning Kamp <phk@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef _GENIIIC_GENI_VAR_H_
+#define _GENIIIC_GENI_VAR_H_
+
+#include "bus_if.h"
+#include "device_if.h"
+#include "iicbus_if.h"
+
+struct geniiic_softc {
+ device_t dev;
+ device_t iicbus;
+ struct resource *regs_res;
+ int regs_rid;
+ struct resource *intr_res;
+ int intr_rid;
+ void *intr_handle;
+ int intr_type;
+ uint32_t intr_mask;
+
+ bool bus_locked;
+
+ bool platform_attached;
+
+ int nfail;
+ unsigned worst;
+
+ unsigned rx_fifo_size;
+ bool rx_complete;
+ bool rx_fifo;
+ uint8_t *rx_buf;
+ unsigned rx_len;
+ uint32_t cmd_status;
+
+ // Protect access to the bus
+ struct sx bus_lock;
+ struct sx real_bus_lock;
+
+ // Coordinate with interrupt routine
+ struct mtx intr_lock;
+};
+
+typedef struct geniiic_softc geniiic_softc_t;
+
+int geniiic_attach(geniiic_softc_t *sc);
+int geniiic_detach(geniiic_softc_t *sc);
+int geniiic_suspend(geniiic_softc_t *sc);
+int geniiic_resume(geniiic_softc_t *sc);
+
+extern iicbus_transfer_t geniiic_transfer;
+extern iicbus_reset_t geniiic_reset;
+extern iicbus_callback_t geniiic_callback;
+
+#endif /* _GENIIIC_GENI_VAR_H_ */
diff --git a/sys/dev/iicbus/controller/rockchip/rk_i2c.c b/sys/dev/iicbus/controller/rockchip/rk_i2c.c
index 4a431649de49..9317adbcfd98 100644
--- a/sys/dev/iicbus/controller/rockchip/rk_i2c.c
+++ b/sys/dev/iicbus/controller/rockchip/rk_i2c.c
@@ -281,13 +281,26 @@ rk_i2c_send_stop(struct rk_i2c_softc *sc)
{
uint32_t reg;
- RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_STOPIEN);
+ if (!(sc->msg->flags & IIC_M_NOSTOP)) {
+ RK_I2C_WRITE(sc, RK_I2C_IEN, RK_I2C_IEN_STOPIEN);
- sc->state = STATE_STOP;
+ sc->state = STATE_STOP;
- reg = RK_I2C_READ(sc, RK_I2C_CON);
- reg |= RK_I2C_CON_STOP;
- RK_I2C_WRITE(sc, RK_I2C_CON, reg);
+ reg = RK_I2C_READ(sc, RK_I2C_CON);
+ reg |= RK_I2C_CON_STOP;
+ RK_I2C_WRITE(sc, RK_I2C_CON, reg);
+ } else {
+ /*
+ * Do not actually set stop bit, set up conditions to
+ * emulate repeated start by clearing all state.
+ */
+ sc->state = STATE_IDLE;
+ sc->transfer_done = 1;
+
+ reg = RK_I2C_READ(sc, RK_I2C_CON);
+ reg &= ~RK_I2C_CON_CTRL_MASK;
+ RK_I2C_WRITE(sc, RK_I2C_CON, reg);
+ }
}
static void
@@ -350,9 +363,9 @@ rk_i2c_intr_locked(struct rk_i2c_softc *sc)
case STATE_READ:
rk_i2c_drain_rx(sc);
- if (sc->cnt == sc->msg->len)
+ if (sc->cnt == sc->msg->len) {
rk_i2c_send_stop(sc);
- else {
+ } else {
sc->mode = RK_I2C_CON_MODE_RX;
reg = RK_I2C_READ(sc, RK_I2C_CON) & \
~RK_I2C_CON_CTRL_MASK;
@@ -369,7 +382,6 @@ rk_i2c_intr_locked(struct rk_i2c_softc *sc)
RK_I2C_WRITE(sc, RK_I2C_CON, reg);
RK_I2C_WRITE(sc, RK_I2C_MRXCNT, transfer_len);
}
-
break;
case STATE_WRITE:
if (sc->cnt < sc->msg->len) {
@@ -378,12 +390,10 @@ rk_i2c_intr_locked(struct rk_i2c_softc *sc)
RK_I2C_IEN_NAKRCVIEN);
transfer_len = rk_i2c_fill_tx(sc);
RK_I2C_WRITE(sc, RK_I2C_MTXCNT, transfer_len);
- break;
- } else if (!(sc->msg->flags & IIC_M_NOSTOP)) {
+ } else {
rk_i2c_send_stop(sc);
- break;
}
- /* passthru */
+ break;
case STATE_STOP:
/* Disable stop bit */
reg = RK_I2C_READ(sc, RK_I2C_CON);
@@ -515,7 +525,7 @@ rk_i2c_transfer(device_t dev, struct iic_msg *msgs, uint32_t nmsgs)
if (nmsgs - i >= 2 && msgs[i].len < 4 &&
msgs[i].flags == (IIC_M_WR | IIC_M_NOSTOP) &&
- msgs[i + 1].flags == IIC_M_RD &&
+ (msgs[i + 1].flags & IIC_M_RD) == IIC_M_RD &&
(msgs[i].slave & ~LSB) == (msgs[i + 1].slave & ~LSB)) {
sc->mode = RK_I2C_CON_MODE_RRX;
@@ -648,14 +658,14 @@ rk_i2c_attach(device_t dev)
}
}
- sc->iicbus = device_add_child(dev, "iicbus", -1);
+ sc->iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY);
if (sc->iicbus == NULL) {
device_printf(dev, "cannot add iicbus child device\n");
error = ENXIO;
goto fail;
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
@@ -676,10 +686,6 @@ rk_i2c_detach(device_t dev)
if ((error = bus_generic_detach(dev)) != 0)
return (error);
- if (sc->iicbus != NULL)
- if ((error = device_delete_child(dev, sc->iicbus)) != 0)
- return (error);
-
if (sc->sclk != NULL)
clk_release(sc->sclk);
if (sc->pclk != NULL)
diff --git a/sys/dev/iicbus/controller/twsi/twsi.c b/sys/dev/iicbus/controller/twsi/twsi.c
index 55d12b6ee5ae..46704e1eab65 100644
--- a/sys/dev/iicbus/controller/twsi/twsi.c
+++ b/sys/dev/iicbus/controller/twsi/twsi.c
@@ -812,12 +812,13 @@ twsi_attach(device_t dev)
&sc->debug, 0, "Set debug level (zero to disable)");
/* Attach the iicbus. */
- if ((sc->iicbus = device_add_child(dev, "iicbus", -1)) == NULL) {
+ if ((sc->iicbus = device_add_child(dev, "iicbus",
+ DEVICE_UNIT_ANY)) == NULL) {
device_printf(dev, "could not allocate iicbus instance\n");
twsi_detach(dev);
return (ENXIO);
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
config_intrhook_oneshot(twsi_intr_start, dev);
@@ -835,10 +836,6 @@ twsi_detach(device_t dev)
if ((rv = bus_generic_detach(dev)) != 0)
return (rv);
- if (sc->iicbus != NULL)
- if ((rv = device_delete_child(dev, sc->iicbus)) != 0)
- return (rv);
-
if (sc->intrhand != NULL)
bus_teardown_intr(sc->dev, sc->res[1], sc->intrhand);
diff --git a/sys/dev/iicbus/controller/vybrid/vf_i2c.c b/sys/dev/iicbus/controller/vybrid/vf_i2c.c
index d3c3664af78c..4735a95cf5cd 100644
--- a/sys/dev/iicbus/controller/vybrid/vf_i2c.c
+++ b/sys/dev/iicbus/controller/vybrid/vf_i2c.c
@@ -184,7 +184,7 @@ vf_i2c_attach_common(device_t dev)
mtx_unlock(&sc->mutex);
- sc->iicbus = device_add_child(dev, "iicbus", -1);
+ sc->iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY);
if (sc->iicbus == NULL) {
device_printf(dev, "could not add iicbus child");
@@ -193,7 +193,7 @@ vf_i2c_attach_common(device_t dev)
return (ENXIO);
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -207,23 +207,17 @@ i2c_detach(device_t dev)
sc = device_get_softc(dev);
vf_i2c_dbg(sc, "i2c detach\n");
- mtx_lock(&sc->mutex);
-
- if (sc->freq == 0) {
- vf_i2c_dbg(sc, "Writing 0x00 to clock divider register\n");
- WRITE1(sc, I2C_IBFD, 0x00);
- }
-
error = bus_generic_detach(dev);
if (error != 0) {
device_printf(dev, "cannot detach child devices.\n");
return (error);
}
- error = device_delete_child(dev, sc->iicbus);
- if (error != 0) {
- device_printf(dev, "could not delete iicbus child.\n");
- return (error);
+ mtx_lock(&sc->mutex);
+
+ if (sc->freq == 0) {
+ vf_i2c_dbg(sc, "Writing 0x00 to clock divider register\n");
+ WRITE1(sc, I2C_IBFD, 0x00);
}
bus_release_resources(dev, i2c_spec, sc->res);
diff --git a/sys/dev/iicbus/gpio/pcf8574.c b/sys/dev/iicbus/gpio/pcf8574.c
index ab6e2bc07d1f..bf60dec67557 100644
--- a/sys/dev/iicbus/gpio/pcf8574.c
+++ b/sys/dev/iicbus/gpio/pcf8574.c
@@ -142,12 +142,13 @@ pcf8574_attach(device_t dev)
(void)pcf8574_write(sc, 0xff);
sx_init(&sc->lock, "pcf8574");
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
device_printf(dev, "Could not create busdev child\n");
sx_destroy(&sc->lock);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
@@ -158,9 +159,7 @@ pcf8574_detach(device_t dev)
sc = device_get_softc(dev);
- if (sc->busdev != NULL)
- gpiobus_detach_bus(sc->busdev);
-
+ gpiobus_detach_bus(dev);
sx_destroy(&sc->lock);
return (0);
}
diff --git a/sys/dev/iicbus/gpio/tca64xx.c b/sys/dev/iicbus/gpio/tca64xx.c
index 3b3bca9936f1..ab8fedd3f8fd 100644
--- a/sys/dev/iicbus/gpio/tca64xx.c
+++ b/sys/dev/iicbus/gpio/tca64xx.c
@@ -261,14 +261,13 @@ tca64xx_attach(device_t dev)
sc->addr = iicbus_get_addr(dev);
mtx_init(&sc->mtx, "tca64xx gpio", "gpio", MTX_DEF);
- sc->busdev = gpiobus_attach_bus(dev);
+ OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
device_printf(dev, "Could not create busdev child\n");
return (ENXIO);
}
- OF_device_register_xref(OF_xref_from_node(ofw_bus_get_node(dev)), dev);
-
#ifdef DEBUG
switch (sc->chip) {
case TCA6416_TYPE:
@@ -282,6 +281,7 @@ tca64xx_attach(device_t dev)
}
#endif
+ bus_attach_children(dev);
return (0);
}
@@ -292,9 +292,7 @@ tca64xx_detach(device_t dev)
sc = device_get_softc(dev);
- if (sc->busdev != NULL)
- gpiobus_detach_bus(sc->busdev);
-
+ gpiobus_detach_bus(dev);
mtx_destroy(&sc->mtx);
return (0);
diff --git a/sys/dev/iicbus/if_ic.c b/sys/dev/iicbus/if_ic.c
index 52ab5afb9c4e..caca35a3d22b 100644
--- a/sys/dev/iicbus/if_ic.c
+++ b/sys/dev/iicbus/if_ic.c
@@ -162,8 +162,6 @@ icattach(device_t dev)
if_t ifp;
ifp = sc->ic_ifp = if_alloc(IFT_PARA);
- if (ifp == NULL)
- return (ENOSPC);
mtx_init(&sc->ic_lock, device_get_nameunit(dev), MTX_NETWORK_LOCK,
MTX_DEF);
diff --git a/sys/dev/iicbus/iic.c b/sys/dev/iicbus/iic.c
index 8c9dbb6bc145..3b7d603005aa 100644
--- a/sys/dev/iicbus/iic.c
+++ b/sys/dev/iicbus/iic.c
@@ -133,8 +133,8 @@ static void
iic_identify(driver_t *driver, device_t parent)
{
- if (device_find_child(parent, "iic", -1) == NULL)
- BUS_ADD_CHILD(parent, 0, "iic", -1);
+ if (device_find_child(parent, "iic", DEVICE_UNIT_ANY) == NULL)
+ BUS_ADD_CHILD(parent, 0, "iic", DEVICE_UNIT_ANY);
}
static int
@@ -239,7 +239,8 @@ iicuio_move(struct iic_cdevpriv *priv, struct uio *uio, int last)
num_bytes = MIN(uio->uio_resid, sizeof(buffer));
transferred_bytes = 0;
- if (uio->uio_rw == UIO_WRITE) {
+ switch (uio->uio_rw) {
+ case UIO_WRITE:
error = uiomove(buffer, num_bytes, uio);
while ((error == 0) && (transferred_bytes < num_bytes)) {
@@ -248,13 +249,14 @@ iicuio_move(struct iic_cdevpriv *priv, struct uio *uio, int last)
num_bytes - transferred_bytes, &written_bytes, 0);
transferred_bytes += written_bytes;
}
-
- } else if (uio->uio_rw == UIO_READ) {
+ break;
+ case UIO_READ:
error = iicbus_read(parent, buffer,
num_bytes, &transferred_bytes,
((uio->uio_resid <= sizeof(buffer)) ? last : 0), 0);
if (error == 0)
error = uiomove(buffer, transferred_bytes, uio);
+ break;
}
}
@@ -290,10 +292,14 @@ iicuio(struct cdev *dev, struct uio *uio, int ioflag)
return (error);
}
- if (uio->uio_rw == UIO_READ)
+ switch (uio->uio_rw) {
+ case UIO_READ:
addr = priv->addr | LSB;
- else
+ break;
+ case UIO_WRITE:
addr = priv->addr & ~LSB;
+ break;
+ }
error = iicbus_start(parent, addr, 0);
if (error != 0)
diff --git a/sys/dev/iicbus/iicbb.c b/sys/dev/iicbus/iicbb.c
index 5d6ac48d4543..5f6423135f46 100644
--- a/sys/dev/iicbus/iicbb.c
+++ b/sys/dev/iicbus/iicbb.c
@@ -80,7 +80,6 @@ struct iicbb_softc {
static int iicbb_attach(device_t);
static void iicbb_child_detached(device_t, device_t);
-static int iicbb_detach(device_t);
static int iicbb_print_child(device_t, device_t);
static int iicbb_probe(device_t);
@@ -101,7 +100,7 @@ static device_method_t iicbb_methods[] = {
/* device interface */
DEVMETHOD(device_probe, iicbb_probe),
DEVMETHOD(device_attach, iicbb_attach),
- DEVMETHOD(device_detach, iicbb_detach),
+ DEVMETHOD(device_detach, bus_generic_detach),
/* bus interface */
DEVMETHOD(bus_child_detached, iicbb_child_detached),
@@ -144,7 +143,7 @@ iicbb_attach(device_t dev)
{
struct iicbb_softc *sc = (struct iicbb_softc *)device_get_softc(dev);
- sc->iicbus = device_add_child(dev, "iicbus", -1);
+ sc->iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY);
if (!sc->iicbus)
return (ENXIO);
@@ -164,17 +163,7 @@ iicbb_attach(device_t dev)
"io_latency", CTLFLAG_RWTUN, &sc->io_latency,
0, "Estimate of pin toggling latency, microseconds");
- bus_generic_attach(dev);
- return (0);
-}
-
-static int
-iicbb_detach(device_t dev)
-{
-
- bus_generic_detach(dev);
- device_delete_children(dev);
-
+ bus_attach_children(dev);
return (0);
}
@@ -342,7 +331,7 @@ iicbb_getack(device_t dev)
{
struct iicbb_softc *sc = device_get_softc(dev);
int noack, err;
- int t;
+ int t = 0;
/* Release SDA so that the slave can drive it. */
err = iicbb_clockin(dev, 1);
@@ -352,12 +341,13 @@ iicbb_getack(device_t dev)
}
/* Sample SDA until ACK (low) or udelay runs out. */
- for (t = 0; t < sc->udelay; t++) {
+ do {
noack = I2C_GETSDA(dev);
if (!noack)
break;
DELAY(1);
- }
+ t++;
+ } while(t < sc->udelay);
DELAY(sc->udelay - t);
iicbb_clockout(dev);
diff --git a/sys/dev/iicbus/iicbus.c b/sys/dev/iicbus/iicbus.c
index 33fb5c875c50..0894ddddb8e8 100644
--- a/sys/dev/iicbus/iicbus.c
+++ b/sys/dev/iicbus/iicbus.c
@@ -125,9 +125,9 @@ iicbus_attach_common(device_t dev, u_int bus_freq)
}
printf("\n");
#endif
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -144,7 +144,7 @@ iicbus_detach(device_t dev)
struct iicbus_softc *sc = IICBUS_SOFTC(dev);
int err;
- if ((err = device_delete_children(dev)) != 0)
+ if ((err = bus_generic_detach(dev)) != 0)
return (err);
iicbus_reset(dev, IIC_FASTEST, 0, NULL);
mtx_destroy(&sc->lock);
@@ -249,6 +249,18 @@ iicbus_add_child(device_t dev, u_int order, const char *name, int unit)
}
static void
+iicbus_child_deleted(device_t dev, device_t child)
+{
+ struct iicbus_ivar *devi;
+
+ devi = device_get_ivars(child);
+ if (devi == NULL)
+ return;
+ resource_list_free(&devi->rl);
+ free(devi, M_DEVBUF);
+}
+
+static void
iicbus_hinted_child(device_t bus, const char *dname, int dunit)
{
device_t child;
@@ -360,6 +372,7 @@ static device_method_t iicbus_methods[] = {
DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
DEVMETHOD(bus_get_resource_list, iicbus_get_resource_list),
DEVMETHOD(bus_add_child, iicbus_add_child),
+ DEVMETHOD(bus_child_deleted, iicbus_child_deleted),
DEVMETHOD(bus_print_child, iicbus_print_child),
DEVMETHOD(bus_probe_nomatch, iicbus_probe_nomatch),
DEVMETHOD(bus_read_ivar, iicbus_read_ivar),
diff --git a/sys/dev/iicbus/iichid.c b/sys/dev/iicbus/iichid.c
index ff443afbf30a..5ca3f1b84e48 100644
--- a/sys/dev/iicbus/iichid.c
+++ b/sys/dev/iicbus/iichid.c
@@ -39,6 +39,7 @@
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
+#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@@ -97,6 +98,8 @@ enum {
#define I2C_HID_POWER_ON 0x0
#define I2C_HID_POWER_OFF 0x1
+#define IICHID_RESET_TIMEOUT 5 /* seconds */
+
/*
* Since interrupt resource acquisition is not always possible (in case of GPIO
* interrupts) iichid now supports a sampling_mode.
@@ -155,6 +158,7 @@ enum iichid_powerstate_how {
*/
struct iichid_softc {
device_t dev;
+ struct mtx mtx;
bool probe_done;
int probe_result;
@@ -189,6 +193,7 @@ struct iichid_softc {
bool open; /* iicbus lock */
bool suspend; /* iicbus lock */
bool power_on; /* iicbus lock */
+ bool reset_acked; /* iichid mtx */
};
static device_probe_t iichid_probe;
@@ -270,42 +275,36 @@ iichid_cmd_read(struct iichid_softc* sc, void *buf, iichid_size_t maxlen,
* 6.1.3 - Retrieval of Input Reports
* DEVICE returns the length (2 Bytes) and the entire Input Report.
*/
- uint8_t actbuf[2] = { 0, 0 };
- /* Read actual input report length. */
+
+ memset(buf, 0xaa, 2); // In case nothing gets read
struct iic_msg msgs[] = {
- { sc->addr, IIC_M_RD | IIC_M_NOSTOP, sizeof(actbuf), actbuf },
+ { sc->addr, IIC_M_RD, maxlen, buf },
};
- uint16_t actlen;
int error;
error = iicbus_transfer(sc->dev, msgs, nitems(msgs));
if (error != 0)
return (error);
- actlen = actbuf[0] | actbuf[1] << 8;
- if (actlen <= 2 || actlen == 0xFFFF || maxlen == 0) {
- /* Read and discard 1 byte to send I2C STOP condition. */
- msgs[0] = (struct iic_msg)
- { sc->addr, IIC_M_RD | IIC_M_NOSTART, 1, actbuf };
- actlen = 0;
- } else {
- actlen -= 2;
- if (actlen > maxlen) {
- DPRINTF(sc, "input report too big. requested=%d "
- "received=%d\n", maxlen, actlen);
- actlen = maxlen;
+ DPRINTFN(sc, 5, "%*D\n", msgs[0].len, msgs[0].buf, " ");
+
+ uint16_t actlen = le16dec(buf);
+
+ if (actlen == 0) {
+ if (!sc->reset_acked) {
+ mtx_lock(&sc->mtx);
+ sc->reset_acked = true;
+ wakeup(&sc->reset_acked);
+ mtx_unlock(&sc->mtx);
}
- /* Read input report itself. */
- msgs[0] = (struct iic_msg)
- { sc->addr, IIC_M_RD | IIC_M_NOSTART, actlen, buf };
}
- error = iicbus_transfer(sc->dev, msgs, 1);
- if (error == 0 && actual_len != NULL)
+ if (actlen <= 2 || actlen > maxlen) {
+ actlen = 0;
+ }
+ if (actual_len != NULL) {
*actual_len = actlen;
-
- DPRINTFN(sc, 5,
- "%*D - %*D\n", 2, actbuf, " ", msgs[0].len, msgs[0].buf, " ");
+ }
return (error);
}
@@ -541,7 +540,7 @@ iichid_sampling_task(void *context, int pending)
error = iichid_cmd_read(sc, sc->intr_buf, sc->intr_bufsize, &actual);
if (error == 0) {
if (actual > 0) {
- sc->intr_handler(sc->intr_ctx, sc->intr_buf, actual);
+ sc->intr_handler(sc->intr_ctx, sc->intr_buf + 2, actual - 2);
sc->missing_samples = 0;
if (sc->dup_size != actual ||
memcmp(sc->dup_buf, sc->intr_buf, actual) != 0) {
@@ -552,7 +551,7 @@ iichid_sampling_task(void *context, int pending)
++sc->dup_samples;
} else {
if (++sc->missing_samples == 1)
- sc->intr_handler(sc->intr_ctx, sc->intr_buf, 0);
+ sc->intr_handler(sc->intr_ctx, sc->intr_buf + 2, 0);
sc->dup_samples = 0;
}
} else
@@ -579,7 +578,7 @@ iichid_intr(void *context)
{
struct iichid_softc *sc;
device_t parent;
- iichid_size_t maxlen, actual;
+ iichid_size_t actual;
int error;
sc = context;
@@ -601,13 +600,14 @@ iichid_intr(void *context)
* (to ON) before any other command. As some hardware requires reads to
* acknowledge interrupts we fetch only length header and discard it.
*/
- maxlen = sc->power_on ? sc->intr_bufsize : 0;
- error = iichid_cmd_read(sc, sc->intr_buf, maxlen, &actual);
+ THREAD_SLEEPING_OK();
+ error = iichid_cmd_read(sc, sc->intr_buf, sc->intr_bufsize, &actual);
+ THREAD_NO_SLEEPING();
if (error == 0) {
- if (sc->power_on) {
+ if (sc->power_on && sc->open) {
if (actual != 0)
- sc->intr_handler(sc->intr_ctx, sc->intr_buf,
- actual);
+ sc->intr_handler(sc->intr_ctx, sc->intr_buf + 2,
+ actual - 2);
else
DPRINTF(sc, "no data received\n");
}
@@ -809,42 +809,50 @@ iichid_intr_setup(device_t dev, device_t child __unused, hid_intr_t intr,
void *context, struct hid_rdesc_info *rdesc)
{
struct iichid_softc *sc;
+ device_t parent;
if (intr == NULL)
return;
sc = device_get_softc(dev);
/*
- * Do not rely on wMaxInputLength, as some devices may set it to
- * a wrong length. Find the longest input report in report descriptor.
+ * Start with wMaxInputLength to follow HID-over-I2C specs. Than if
+ * semi-HID device like ietp(4) requested changing of input buffer
+ * size with report descriptor overloading, find the longest input
+ * report in the descriptor, and add two for the length field.
*/
- rdesc->rdsize = rdesc->isize;
+ rdesc->rdsize = rdesc->rdsize == 0 ?
+ le16toh(sc->desc.wMaxInputLength) - 2 : rdesc->isize;
/* Write and get/set_report sizes are limited by I2C-HID protocol. */
rdesc->grsize = rdesc->srsize = IICHID_SIZE_MAX;
rdesc->wrsize = IICHID_SIZE_MAX;
+ parent = device_get_parent(sc->dev);
+ iicbus_request_bus(parent, sc->dev, IIC_WAIT);
+
sc->intr_handler = intr;
sc->intr_ctx = context;
- sc->intr_buf = malloc(rdesc->rdsize, M_DEVBUF, M_WAITOK | M_ZERO);
- sc->intr_bufsize = rdesc->rdsize;
+ sc->intr_bufsize = rdesc->rdsize + 2;
+ sc->intr_buf = realloc(sc->intr_buf, sc->intr_bufsize,
+ M_DEVBUF, M_WAITOK | M_ZERO);
#ifdef IICHID_SAMPLING
- sc->dup_buf = malloc(rdesc->rdsize, M_DEVBUF, M_WAITOK | M_ZERO);
+ sc->dup_buf = realloc(sc->dup_buf, sc->intr_bufsize,
+ M_DEVBUF, M_WAITOK | M_ZERO);
taskqueue_start_threads(&sc->taskqueue, 1, PI_TTY,
"%s taskq", device_get_nameunit(sc->dev));
#endif
+ iicbus_release_bus(parent, sc->dev);
}
static void
iichid_intr_unsetup(device_t dev, device_t child __unused)
{
+#ifdef IICHID_SAMPLING
struct iichid_softc *sc;
sc = device_get_softc(dev);
-#ifdef IICHID_SAMPLING
taskqueue_drain_all(sc->taskqueue);
- free(sc->dup_buf, M_DEVBUF);
#endif
- free(sc->intr_buf, M_DEVBUF);
}
static int
@@ -854,7 +862,8 @@ iichid_intr_start(device_t dev, device_t child __unused)
sc = device_get_softc(dev);
DPRINTF(sc, "iichid device open\n");
- iichid_set_power_state(sc, IICHID_PS_ON, IICHID_PS_NULL);
+ if (!sc->open)
+ iichid_set_power_state(sc, IICHID_PS_ON, IICHID_PS_NULL);
return (0);
}
@@ -887,7 +896,7 @@ iichid_intr_poll(device_t dev, device_t child __unused)
sc = device_get_softc(dev);
error = iichid_cmd_read(sc, sc->intr_buf, sc->intr_bufsize, &actual);
if (error == 0 && actual != 0)
- sc->intr_handler(sc->intr_ctx, sc->intr_buf, actual);
+ sc->intr_handler(sc->intr_ctx, sc->intr_buf + 2, actual);
}
/*
@@ -914,6 +923,7 @@ iichid_read(device_t dev, device_t child __unused, void *buf,
{
struct iichid_softc *sc;
device_t parent;
+ uint8_t *tmpbuf;
int error;
if (maxlen > IICHID_SIZE_MAX)
@@ -922,8 +932,12 @@ iichid_read(device_t dev, device_t child __unused, void *buf,
parent = device_get_parent(sc->dev);
error = iicbus_request_bus(parent, sc->dev, IIC_WAIT);
if (error == 0) {
- error = iichid_cmd_read(sc, buf, maxlen, actlen);
+ tmpbuf = malloc(maxlen + 2, M_DEVBUF, M_WAITOK | M_ZERO);
+ error = iichid_cmd_read(sc, tmpbuf, maxlen + 2, actlen);
iicbus_release_bus(parent, sc->dev);
+ if (*actlen > 0)
+ memcpy(buf, tmpbuf + 2, *actlen);
+ free(tmpbuf, M_DEVBUF);
}
return (iic2errno(error));
}
@@ -1037,7 +1051,6 @@ iichid_probe(device_t dev)
{
struct iichid_softc *sc;
ACPI_HANDLE handle;
- char buf[80];
uint16_t config_reg;
int error, reg;
@@ -1081,7 +1094,8 @@ iichid_probe(device_t dev)
}
if (le16toh(sc->desc.wHIDDescLength) != 30 ||
- le16toh(sc->desc.bcdVersion) != 0x100) {
+ le16toh(sc->desc.bcdVersion) != 0x100 ||
+ le16toh(sc->desc.wMaxInputLength) < 2) {
DPRINTF(sc, "HID descriptor is broken\n");
return (ENXIO);
}
@@ -1097,10 +1111,8 @@ iichid_probe(device_t dev)
sc->probe_result = BUS_PROBE_DEFAULT;
done:
- if (sc->probe_result <= BUS_PROBE_SPECIFIC) {
- snprintf(buf, sizeof(buf), "%s I2C HID device", sc->hw.name);
- device_set_desc_copy(dev, buf);
- }
+ if (sc->probe_result <= BUS_PROBE_SPECIFIC)
+ device_set_descf(dev, "%s I2C HID device", sc->hw.name);
return (sc->probe_result);
}
@@ -1117,21 +1129,11 @@ iichid_attach(device_t dev)
device_printf(dev, "failed to power on: %d\n", error);
return (ENXIO);
}
- /*
- * Windows driver sleeps for 1ms between the SET_POWER and RESET
- * commands. So we too as some devices may depend on this.
- */
- pause("iichid", (hz + 999) / 1000);
-
- error = iichid_reset(sc);
- if (error) {
- device_printf(dev, "failed to reset hardware: %d\n", error);
- error = ENXIO;
- goto done;
- }
-
sc->power_on = true;
+ mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF);
+ sc->intr_bufsize = le16toh(sc->desc.wMaxInputLength) - 2;
+ sc->intr_buf = malloc(sc->intr_bufsize, M_DEVBUF, M_WAITOK | M_ZERO);
TASK_INIT(&sc->suspend_task, 0, iichid_suspend_task, sc);
#ifdef IICHID_SAMPLING
sc->taskqueue = taskqueue_create_fast("iichid_tq", M_WAITOK | M_ZERO,
@@ -1142,6 +1144,7 @@ iichid_attach(device_t dev)
sc->sampling_rate_slow = -1;
sc->sampling_rate_fast = IICHID_SAMPLING_RATE_FAST;
sc->sampling_hysteresis = IICHID_SAMPLING_HYSTERESIS;
+ sc->dup_buf = malloc(sc->intr_bufsize, M_DEVBUF, M_WAITOK | M_ZERO);
#endif
sc->irq_rid = 0;
@@ -1157,13 +1160,14 @@ iichid_attach(device_t dev)
if (sc->irq_res == NULL || error != 0) {
#ifdef IICHID_SAMPLING
device_printf(sc->dev,
- "Interrupt setup failed. Fallback to sampling\n");
+ "Using sampling mode\n");
sc->sampling_rate_slow = IICHID_SAMPLING_RATE_SLOW;
#else
device_printf(sc->dev, "Interrupt setup failed\n");
if (sc->irq_res != NULL)
bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid,
sc->irq_res);
+ iichid_detach(dev);
error = ENXIO;
goto done;
#endif
@@ -1186,14 +1190,42 @@ iichid_attach(device_t dev)
&sc->sampling_hysteresis, 0,
"number of missing samples before enabling of slow mode");
hid_add_dynamic_quirk(&sc->hw, HQ_IICHID_SAMPLING);
+#endif /* IICHID_SAMPLING */
+
+ /*
+ * Windows driver sleeps for 1ms between the SET_POWER and RESET
+ * commands. So we too as some devices may depend on this.
+ */
+ pause("iichid", (hz + 999) / 1000);
+
+ error = iichid_reset(sc);
+ if (error) {
+ device_printf(dev, "failed to reset hardware: %d\n", error);
+ iichid_detach(dev);
+ error = ENXIO;
+ goto done;
+ }
+ /* Wait for RESET response */
+#ifdef IICHID_SAMPLING
if (sc->sampling_rate_slow >= 0) {
pause("iichid", (hz + 999) / 1000);
- (void)iichid_cmd_read(sc, NULL, 0, NULL);
- }
+ (void)iichid_cmd_read(sc, sc->intr_buf, 0, NULL);
+ } else
#endif /* IICHID_SAMPLING */
+ {
+ mtx_lock(&sc->mtx);
+ if (!sc->reset_acked && !cold) {
+ error = mtx_sleep(&sc->reset_acked, &sc->mtx, 0,
+ "iichid_reset", hz * IICHID_RESET_TIMEOUT);
+ if (error != 0)
+ device_printf(sc->dev,
+ "Reset timeout expired\n");
+ }
+ mtx_unlock(&sc->mtx);
+ }
- child = device_add_child(dev, "hidbus", -1);
+ child = device_add_child(dev, "hidbus", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(sc->dev, "Could not add I2C device\n");
iichid_detach(dev);
@@ -1202,14 +1234,15 @@ iichid_attach(device_t dev)
}
device_set_ivars(child, &sc->hw);
- error = bus_generic_attach(dev);
- if (error) {
- device_printf(dev, "failed to attach child: error %d\n", error);
- iichid_detach(dev);
- }
+ bus_attach_children(dev);
+ error = 0;
done:
- (void)iichid_set_power(sc, I2C_HID_POWER_OFF);
- sc->power_on = false;
+ iicbus_request_bus(device_get_parent(dev), dev, IIC_WAIT);
+ if (!sc->open) {
+ (void)iichid_set_power(sc, I2C_HID_POWER_OFF);
+ sc->power_on = false;
+ }
+ iicbus_release_bus(device_get_parent(dev), dev);
return (error);
}
@@ -1220,7 +1253,7 @@ iichid_detach(device_t dev)
int error;
sc = device_get_softc(dev);
- error = device_delete_children(dev);
+ error = bus_generic_detach(dev);
if (error)
return (error);
iichid_teardown_interrupt(sc);
@@ -1231,7 +1264,10 @@ iichid_detach(device_t dev)
if (sc->taskqueue != NULL)
taskqueue_free(sc->taskqueue);
sc->taskqueue = NULL;
+ free(sc->dup_buf, M_DEVBUF);
#endif
+ free(sc->intr_buf, M_DEVBUF);
+ mtx_destroy(&sc->mtx);
return (0);
}
diff --git a/sys/dev/iicbus/iiconf.h b/sys/dev/iicbus/iiconf.h
index ccf1661bba7f..2fe95c0e9f6d 100644
--- a/sys/dev/iicbus/iiconf.h
+++ b/sys/dev/iicbus/iiconf.h
@@ -32,7 +32,7 @@
#include <dev/iicbus/iic.h>
-#define IICPRI (PZERO+8) /* XXX sleep/wakeup queue priority */
+#define IICPRI (PWAIT) /* XXX sleep/wakeup queue priority */
#define LSB 0x1
diff --git a/sys/dev/iicbus/iicsmb.c b/sys/dev/iicbus/iicsmb.c
index 308884e609ec..e03e789dc05d 100644
--- a/sys/dev/iicbus/iicsmb.c
+++ b/sys/dev/iicbus/iicsmb.c
@@ -134,8 +134,8 @@ static void
iicsmb_identify(driver_t *driver, device_t parent)
{
- if (device_find_child(parent, "iicsmb", -1) == NULL)
- BUS_ADD_CHILD(parent, 0, "iicsmb", -1);
+ if (device_find_child(parent, "iicsmb", DEVICE_UNIT_ANY) == NULL)
+ BUS_ADD_CHILD(parent, 0, "iicsmb", DEVICE_UNIT_ANY);
}
static int
@@ -152,10 +152,10 @@ iicsmb_attach(device_t dev)
mtx_init(&sc->lock, "iicsmb", NULL, MTX_DEF);
- sc->smbus = device_add_child(dev, "smbus", -1);
+ sc->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY);
/* probe and attach the smbus */
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -166,7 +166,6 @@ iicsmb_detach(device_t dev)
struct iicsmb_softc *sc = (struct iicsmb_softc *)device_get_softc(dev);
bus_generic_detach(dev);
- device_delete_children(dev);
mtx_destroy(&sc->lock);
return (0);
diff --git a/sys/dev/iicbus/mux/iic_gpiomux.c b/sys/dev/iicbus/mux/iic_gpiomux.c
index c4dbe4066bd0..929b0f961af4 100644
--- a/sys/dev/iicbus/mux/iic_gpiomux.c
+++ b/sys/dev/iicbus/mux/iic_gpiomux.c
@@ -212,7 +212,7 @@ gpiomux_attach(device_t dev)
/* Init the core driver, have it add our child downstream buses. */
if ((err = iicmux_attach(dev, busdev, numchannels)) == 0)
- bus_generic_attach(dev);
+ bus_attach_children(dev);
errexit:
diff --git a/sys/dev/iicbus/mux/iicmux.c b/sys/dev/iicbus/mux/iicmux.c
index 5eafc8a5a21b..96164719577c 100644
--- a/sys/dev/iicbus/mux/iicmux.c
+++ b/sys/dev/iicbus/mux/iicmux.c
@@ -279,7 +279,7 @@ iicmux_attach_children(struct iicmux_softc *sc)
idx, sc->numbuses);
continue;
}
- sc->childdevs[idx] = device_add_child(sc->dev, "iicbus", -1);
+ sc->childdevs[idx] = device_add_child(sc->dev, "iicbus", DEVICE_UNIT_ANY);
sc->childnodes[idx] = child;
if (sc->maxbus < (int)idx)
sc->maxbus = idx;
@@ -295,7 +295,7 @@ iicmux_attach_children(struct iicmux_softc *sc)
* Add an iicbus child for every downstream bus supported by the mux.
*/
for (i = 0; i < sc->numbuses; ++i) {
- sc->childdevs[i] = device_add_child(sc->dev, "iicbus", -1);
+ sc->childdevs[i] = device_add_child(sc->dev, "iicbus", DEVICE_UNIT_ANY);
sc->maxbus = i;
}
diff --git a/sys/dev/iicbus/mux/ltc430x.c b/sys/dev/iicbus/mux/ltc430x.c
index 984f79223ee5..3dc410da0152 100644
--- a/sys/dev/iicbus/mux/ltc430x.c
+++ b/sys/dev/iicbus/mux/ltc430x.c
@@ -211,7 +211,7 @@ ltc430x_attach(device_t dev)
* the probe and attach code of any child iicbus instances it added.
*/
if ((err = iicmux_attach(dev, device_get_parent(dev), numchan)) == 0)
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (err);
}
diff --git a/sys/dev/iicbus/mux/pca9547.c b/sys/dev/iicbus/mux/pca9547.c
index b76f8d132b43..654a81e6f3e1 100644
--- a/sys/dev/iicbus/mux/pca9547.c
+++ b/sys/dev/iicbus/mux/pca9547.c
@@ -120,7 +120,7 @@ pca9547_attach(device_t dev)
rv = iicmux_attach(sc->dev, device_get_parent(dev), 8);
if (rv != 0)
return (rv);
- rv = bus_generic_attach(dev);
+ bus_attach_children(dev);
return (rv);
}
diff --git a/sys/dev/iicbus/mux/pca954x.c b/sys/dev/iicbus/mux/pca954x.c
index befa0db39172..1925f6c90e83 100644
--- a/sys/dev/iicbus/mux/pca954x.c
+++ b/sys/dev/iicbus/mux/pca954x.c
@@ -214,7 +214,7 @@ pca954x_attach(device_t dev)
sc->descr = descr = pca954x_find_chip(dev);
error = iicmux_attach(dev, device_get_parent(dev), descr->numchannels);
if (error == 0)
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (error);
}
diff --git a/sys/dev/iicbus/ofw_iicbus.c b/sys/dev/iicbus/ofw_iicbus.c
index 977865abdba0..84ddcbbdd3d5 100644
--- a/sys/dev/iicbus/ofw_iicbus.c
+++ b/sys/dev/iicbus/ofw_iicbus.c
@@ -127,7 +127,7 @@ ofw_iicbus_attach(device_t dev)
iicbus_reset(dev, IIC_FASTEST, 0, NULL);
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
/*
@@ -185,7 +185,7 @@ ofw_iicbus_attach(device_t dev)
continue;
}
- childdev = device_add_child(dev, NULL, -1);
+ childdev = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
resource_list_init(&dinfo->opd_dinfo.rl);
ofw_bus_intr_to_rl(childdev, child,
&dinfo->opd_dinfo.rl, NULL);
@@ -194,7 +194,8 @@ ofw_iicbus_attach(device_t dev)
/* Register bus */
OF_device_register_xref(OF_xref_from_node(node), dev);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static device_t
diff --git a/sys/dev/iicbus/pmic/act8846.c b/sys/dev/iicbus/pmic/act8846.c
index 5e166247f79b..6e5d85fdc6b0 100644
--- a/sys/dev/iicbus/pmic/act8846.c
+++ b/sys/dev/iicbus/pmic/act8846.c
@@ -215,7 +215,8 @@ act8846_attach(device_t dev)
if (rv != 0)
goto fail;
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
fail:
LOCK_DESTROY(sc);
@@ -226,11 +227,16 @@ static int
act8846_detach(device_t dev)
{
struct act8846_softc *sc;
+ int error;
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
sc = device_get_softc(dev);
LOCK_DESTROY(sc);
- return (bus_generic_detach(dev));
+ return (0);
}
static device_method_t act8846_methods[] = {
diff --git a/sys/dev/iicbus/rtc/ds1307.c b/sys/dev/iicbus/rtc/ds1307.c
index e8aa6a258edc..67a753bb3edb 100644
--- a/sys/dev/iicbus/rtc/ds1307.c
+++ b/sys/dev/iicbus/rtc/ds1307.c
@@ -346,7 +346,7 @@ mark_epson_time_valid(struct ds1307_softc *sc)
if (error) {
device_printf(dev, "%s cannot read Control 2 register: %d\n",
__func__, error);
- return (false);
+ return (error);
}
control_mask = (RX8035_CTRL_2_PON | RX8035_CTRL_2_XSTP | RX8035_CTRL_2_VDET);
@@ -356,9 +356,9 @@ mark_epson_time_valid(struct ds1307_softc *sc)
if (error) {
device_printf(dev, "%s cannot write to Control 2 register: %d\n",
__func__, error);
- return (false);
+ return (error);
}
- return (true);
+ return (0);
}
static bool is_dev_time_valid(struct ds1307_softc *sc)
diff --git a/sys/dev/iicbus/rtc/ds3231.c b/sys/dev/iicbus/rtc/ds3231.c
index 23e88eb7ecd3..eca408aab7cb 100644
--- a/sys/dev/iicbus/rtc/ds3231.c
+++ b/sys/dev/iicbus/rtc/ds3231.c
@@ -450,7 +450,7 @@ ds3231_start(void *xdev)
SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "temp_conv",
CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, sc, 0,
ds3231_conv_sysctl, "IU",
- "DS3231 start a new temperature converstion");
+ "DS3231 start a new temperature conversion");
SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "bbsqw",
CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, sc, 0,
ds3231_bbsqw_sysctl, "IU",
diff --git a/sys/dev/imcsmb/imcsmb.c b/sys/dev/imcsmb/imcsmb.c
index fe766e1e1893..50280ad1b0f7 100644
--- a/sys/dev/imcsmb/imcsmb.c
+++ b/sys/dev/imcsmb/imcsmb.c
@@ -53,7 +53,6 @@
/* Device methods */
static int imcsmb_attach(device_t dev);
-static int imcsmb_detach(device_t dev);
static int imcsmb_probe(device_t dev);
/* SMBus methods */
@@ -90,7 +89,7 @@ imcsmb_attach(device_t dev)
sc->regs = device_get_ivars(dev);
/* Create the smbus child */
- sc->smbus = device_add_child(dev, "smbus", -1);
+ sc->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY);
if (sc->smbus == NULL) {
/* Nothing has been allocated, so there's no cleanup. */
device_printf(dev, "Child smbus not added\n");
@@ -99,39 +98,14 @@ imcsmb_attach(device_t dev)
}
/* Attach the smbus child. */
- if ((rc = bus_generic_attach(dev)) != 0) {
- device_printf(dev, "Failed to attach smbus: %d\n", rc);
- }
+ bus_attach_children(dev);
+ rc = 0;
out:
return (rc);
}
/**
- * device_detach() method. attach() didn't do any allocations, so all that's
- * needed here is to free up any downstream drivers and children.
- *
- * @author Joe Kloss
- *
- * @param[in] dev
- * Device being detached.
- */
-static int
-imcsmb_detach(device_t dev)
-{
- int rc;
-
- /* Detach any attached drivers */
- rc = bus_generic_detach(dev);
- if (rc == 0) {
- /* Remove all children */
- rc = device_delete_children(dev);
- }
-
- return (rc);
-}
-
-/**
* device_probe() method. All the actual probing was done by the imcsmb_pci
* parent, so just report success.
*
@@ -523,7 +497,7 @@ out:
static device_method_t imcsmb_methods[] = {
/* Device interface */
DEVMETHOD(device_attach, imcsmb_attach),
- DEVMETHOD(device_detach, imcsmb_detach),
+ DEVMETHOD(device_detach, bus_generic_detach),
DEVMETHOD(device_probe, imcsmb_probe),
/* smbus methods */
diff --git a/sys/dev/imcsmb/imcsmb_pci.c b/sys/dev/imcsmb/imcsmb_pci.c
index e4dd10d6dd1e..6e9e601989b1 100644
--- a/sys/dev/imcsmb/imcsmb_pci.c
+++ b/sys/dev/imcsmb/imcsmb_pci.c
@@ -146,7 +146,6 @@ static struct imcsmb_pci_device {
/* Device methods. */
static int imcsmb_pci_attach(device_t dev);
-static int imcsmb_pci_detach(device_t dev);
static int imcsmb_pci_probe(device_t dev);
/**
@@ -174,7 +173,7 @@ imcsmb_pci_attach(device_t dev)
/* Create the imcsmbX children */
for (unit = 0; unit < 2; unit++) {
- child = device_add_child(dev, "imcsmb", -1);
+ child = device_add_child(dev, "imcsmb", DEVICE_UNIT_ANY);
if (child == NULL) {
/* Nothing has been allocated, so there's no cleanup. */
device_printf(dev, "Child imcsmb not added\n");
@@ -188,40 +187,14 @@ imcsmb_pci_attach(device_t dev)
}
/* Attach the imcsmbX children. */
- if ((rc = bus_generic_attach(dev)) != 0) {
- device_printf(dev, "failed to attach children: %d\n", rc);
- goto out;
- }
+ bus_attach_children(dev);
+ rc = 0;
out:
return (rc);
}
/**
- * device_detach() method. attach() didn't do any allocations, so all that's
- * needed here is to free up any downstream drivers and children.
- *
- * @author Joe Kloss
- *
- * @param[in] dev
- * Device being detached.
- */
-static int
-imcsmb_pci_detach(device_t dev)
-{
- int rc;
-
- /* Detach any attached drivers */
- rc = bus_generic_detach(dev);
- if (rc == 0) {
- /* Remove all children */
- rc = device_delete_children(dev);
- }
-
- return (rc);
-}
-
-/**
* device_probe() method. Look for the right PCI vendor/device IDs.
*
* @author Joe Kloss, rpokala
@@ -320,7 +293,7 @@ imcsmb_pci_request_bus(device_t dev)
static device_method_t imcsmb_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_attach, imcsmb_pci_attach),
- DEVMETHOD(device_detach, imcsmb_pci_detach),
+ DEVMETHOD(device_detach, bus_generic_detach),
DEVMETHOD(device_probe, imcsmb_pci_probe),
DEVMETHOD_END
diff --git a/sys/dev/intel/spi.c b/sys/dev/intel/spi.c
index 3bcbd8fbd4f4..9a0d4305227d 100644
--- a/sys/dev/intel/spi.c
+++ b/sys/dev/intel/spi.c
@@ -527,9 +527,10 @@ intelspi_attach(device_t dev)
intelspi_init(sc);
- device_add_child(dev, "spibus", -1);
+ device_add_child(dev, "spibus", DEVICE_UNIT_ANY);
- return (bus_delayed_attach_children(dev));
+ bus_delayed_attach_children(dev);
+ return (0);
error:
INTELSPI_LOCK_DESTROY(sc);
@@ -549,9 +550,14 @@ int
intelspi_detach(device_t dev)
{
struct intelspi_softc *sc;
+ int error;
sc = device_get_softc(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
+
INTELSPI_LOCK_DESTROY(sc);
if (sc->sc_irq_ih)
@@ -565,7 +571,7 @@ intelspi_detach(device_t dev)
bus_release_resource(dev, SYS_RES_IRQ,
sc->sc_irq_rid, sc->sc_irq_res);
- return (device_delete_children(dev));
+ return (0);
}
int
diff --git a/sys/dev/intpm/intpm.c b/sys/dev/intpm/intpm.c
index 90994f8053f6..fd3838bbcc14 100644
--- a/sys/dev/intpm/intpm.c
+++ b/sys/dev/intpm/intpm.c
@@ -55,6 +55,7 @@ struct intsmb_softc {
int cfg_irq9;
int sb8xx;
int poll;
+ int type;
struct mtx lock;
};
@@ -135,28 +136,44 @@ sb8xx_attach(device_t dev)
struct resource *res;
uint32_t devid;
uint8_t revid;
- uint16_t addr;
+ uint32_t addr;
int rid;
int rc;
bool enabled;
sc = device_get_softc(dev);
+ devid = pci_get_devid(dev);
+ revid = pci_get_revid(dev);
+
+ /*
+ * Comment from Linux i2c-piix4.c:
+ *
+ * cd6h/cd7h port I/O accesses can be disabled on AMD processors
+ * w/ SMBus PCI revision ID 0x51 or greater. MMIO is supported on
+ * the same processors and is the recommended access method.
+ */
+ if (devid == AMDCZ_SMBUS_DEVID && revid >= AMDCZ51_SMBUS_REVID) {
+ sc->type = SYS_RES_MEMORY;
+ addr = AMDFCH41_MMIO_ADDR + AMDFCH41_MMIO_PM_OFF;
+ } else {
+ sc->type = SYS_RES_IOPORT;
+ addr = AMDSB_PMIO_INDEX;
+ }
+
rid = 0;
- rc = bus_set_resource(dev, SYS_RES_IOPORT, rid, AMDSB_PMIO_INDEX,
+ rc = bus_set_resource(dev, sc->type, rid, addr,
AMDSB_PMIO_WIDTH);
if (rc != 0) {
device_printf(dev, "bus_set_resource for PM IO failed\n");
return (ENXIO);
}
- res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
+ res = bus_alloc_resource_any(dev, sc->type, &rid,
RF_ACTIVE);
if (res == NULL) {
device_printf(dev, "bus_alloc_resource for PM IO failed\n");
return (ENXIO);
}
- devid = pci_get_devid(dev);
- revid = pci_get_revid(dev);
if (devid == AMDSB_SMBUS_DEVID ||
(devid == AMDFCH_SMBUS_DEVID && revid < AMDFCH41_SMBUS_REVID) ||
(devid == AMDCZ_SMBUS_DEVID && revid < AMDCZ49_SMBUS_REVID)) {
@@ -165,6 +182,10 @@ sb8xx_attach(device_t dev)
addr |= amd_pmio_read(res, AMDSB8_PM_SMBUS_EN);
enabled = (addr & AMDSB8_SMBUS_EN) != 0;
addr &= AMDSB8_SMBUS_ADDR_MASK;
+ } else if (devid == AMDCZ_SMBUS_DEVID && revid >= AMDCZ51_SMBUS_REVID) {
+ addr = bus_read_1(res, AMDFCH41_PM_DECODE_EN0);
+ enabled = (addr & AMDFCH41_SMBUS_EN) != 0;
+ addr = AMDFCH41_MMIO_ADDR + AMDFCH41_MMIO_SMBUS_OFF;
} else {
addr = amd_pmio_read(res, AMDFCH41_PM_DECODE_EN0);
enabled = (addr & AMDFCH41_SMBUS_EN) != 0;
@@ -172,8 +193,8 @@ sb8xx_attach(device_t dev)
addr <<= 8;
}
- bus_release_resource(dev, SYS_RES_IOPORT, rid, res);
- bus_delete_resource(dev, SYS_RES_IOPORT, rid);
+ bus_release_resource(dev, sc->type, rid, res);
+ bus_delete_resource(dev, sc->type, rid);
if (!enabled) {
device_printf(dev, "SB8xx/SB9xx/FCH SMBus not enabled\n");
@@ -181,13 +202,13 @@ sb8xx_attach(device_t dev)
}
sc->io_rid = 0;
- rc = bus_set_resource(dev, SYS_RES_IOPORT, sc->io_rid, addr,
+ rc = bus_set_resource(dev, sc->type, sc->io_rid, addr,
AMDSB_SMBIO_WIDTH);
if (rc != 0) {
device_printf(dev, "bus_set_resource for SMBus IO failed\n");
return (ENXIO);
}
- sc->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->io_rid,
+ sc->io_res = bus_alloc_resource_any(dev, sc->type, &sc->io_rid,
RF_ACTIVE);
if (sc->io_res == NULL) {
device_printf(dev, "Could not allocate I/O space\n");
@@ -202,14 +223,13 @@ intsmb_release_resources(device_t dev)
{
struct intsmb_softc *sc = device_get_softc(dev);
- if (sc->smbus)
- device_delete_child(dev, sc->smbus);
+ device_delete_children(dev);
if (sc->irq_hand)
bus_teardown_intr(dev, sc->irq_res, sc->irq_hand);
if (sc->irq_res)
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res);
if (sc->io_res)
- bus_release_resource(dev, SYS_RES_IOPORT, sc->io_rid,
+ bus_release_resource(dev, sc->type, sc->io_rid,
sc->io_res);
mtx_destroy(&sc->lock);
}
@@ -227,6 +247,7 @@ intsmb_attach(device_t dev)
mtx_init(&sc->lock, device_get_nameunit(dev), "intsmb", MTX_DEF);
sc->cfg_irq9 = 0;
+ sc->type = SYS_RES_IOPORT;
switch (pci_get_devid(dev)) {
#ifndef NO_CHANGE_PCICONF
case 0x71138086: /* Intel 82371AB */
@@ -255,7 +276,7 @@ intsmb_attach(device_t dev)
}
sc->io_rid = PCI_BASE_ADDR_SMB;
- sc->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->io_rid,
+ sc->io_res = bus_alloc_resource_any(dev, sc->type, &sc->io_rid,
RF_ACTIVE);
if (sc->io_res == NULL) {
device_printf(dev, "Could not allocate I/O space\n");
@@ -326,7 +347,7 @@ intsmb_attach(device_t dev)
no_intr:
sc->isbusy = 0;
- sc->smbus = device_add_child(dev, "smbus", -1);
+ sc->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY);
if (sc->smbus == NULL) {
device_printf(dev, "failed to add smbus child\n");
error = ENXIO;
@@ -858,7 +879,7 @@ intsmb_bread(device_t dev, u_char slave, char cmd, u_char *count, char *buf)
if (nread != 0 && nread <= SMBBLOCKTRANS_MAX) {
*count = nread;
for (i = 0; i < nread; i++)
- bus_read_1(sc->io_res, PIIX4_SMBBLKDAT);
+ buf[i] = bus_read_1(sc->io_res, PIIX4_SMBBLKDAT);
} else
error = SMB_EBUSERR;
}
diff --git a/sys/dev/ioat/ioat.c b/sys/dev/ioat/ioat.c
index e38b20c005f1..c658eeb4fe06 100644
--- a/sys/dev/ioat/ioat.c
+++ b/sys/dev/ioat/ioat.c
@@ -44,6 +44,7 @@
#include <sys/rman.h>
#include <sys/sbuf.h>
#include <sys/smp.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <sys/time.h>
@@ -51,7 +52,6 @@
#include <dev/pci/pcivar.h>
#include <machine/bus.h>
#include <machine/resource.h>
-#include <machine/stdarg.h>
#ifdef DDB
#include <ddb/ddb.h>
diff --git a/sys/dev/ioat/ioat_test.c b/sys/dev/ioat/ioat_test.c
index 9c081e0fb0b9..958297f65a56 100644
--- a/sys/dev/ioat/ioat_test.c
+++ b/sys/dev/ioat/ioat_test.c
@@ -35,12 +35,12 @@
#include <sys/module.h>
#include <sys/mutex.h>
#include <sys/rman.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
#include <machine/bus.h>
#include <machine/resource.h>
-#include <machine/stdarg.h>
#include <vm/vm.h>
#include <vm/vm_param.h>
#include <vm/pmap.h>
@@ -82,17 +82,11 @@ static void _ioat_test_log(int verbosity, const char *fmt, ...);
static void
ioat_test_transaction_destroy(struct test_transaction *tx)
{
- struct ioat_test *test;
int i;
- test = tx->test;
-
for (i = 0; i < IOAT_MAX_BUFS; i++) {
if (tx->buf[i] != NULL) {
- if (test->testkind == IOAT_TEST_DMA_8K)
- free(tx->buf[i], M_IOAT_TEST);
- else
- contigfree(tx->buf[i], tx->length, M_IOAT_TEST);
+ free(tx->buf[i], M_IOAT_TEST);
tx->buf[i] = NULL;
}
}
diff --git a/sys/dev/iommu/busdma_iommu.c b/sys/dev/iommu/busdma_iommu.c
index d870e2af3984..668ccf056463 100644
--- a/sys/dev/iommu/busdma_iommu.c
+++ b/sys/dev/iommu/busdma_iommu.c
@@ -114,8 +114,8 @@ iommu_bus_dma_is_dev_disabled(int domain, int bus, int slot, int func)
* domain, and must collectively be assigned to use either IOMMU or
* bounce mapping.
*/
-device_t
-iommu_get_requester(device_t dev, uint16_t *rid)
+int
+iommu_get_requester(device_t dev, device_t *requesterp, uint16_t *rid)
{
devclass_t pci_class;
device_t l, pci, pcib, pcip, pcibp, requester;
@@ -126,6 +126,13 @@ iommu_get_requester(device_t dev, uint16_t *rid)
pci_class = devclass_find("pci");
l = requester = dev;
+ pci = device_get_parent(dev);
+ if (pci == NULL || device_get_devclass(pci) != pci_class) {
+ *rid = 0; /* XXXKIB: Could be ACPI HID */
+ *requesterp = NULL;
+ return (ENOTTY);
+ }
+
*rid = pci_get_rid(dev);
/*
@@ -135,16 +142,39 @@ iommu_get_requester(device_t dev, uint16_t *rid)
*/
for (;;) {
pci = device_get_parent(l);
- KASSERT(pci != NULL, ("iommu_get_requester(%s): NULL parent "
- "for %s", device_get_name(dev), device_get_name(l)));
- KASSERT(device_get_devclass(pci) == pci_class,
- ("iommu_get_requester(%s): non-pci parent %s for %s",
- device_get_name(dev), device_get_name(pci),
- device_get_name(l)));
+ if (pci == NULL) {
+ if (bootverbose) {
+ printf(
+ "iommu_get_requester(%s): NULL parent for %s\n",
+ device_get_name(dev), device_get_name(l));
+ }
+ *rid = 0;
+ *requesterp = NULL;
+ return (ENXIO);
+ }
+ if (device_get_devclass(pci) != pci_class) {
+ if (bootverbose) {
+ printf(
+ "iommu_get_requester(%s): non-pci parent %s for %s\n",
+ device_get_name(dev), device_get_name(pci),
+ device_get_name(l));
+ }
+ *rid = 0;
+ *requesterp = NULL;
+ return (ENXIO);
+ }
pcib = device_get_parent(pci);
- KASSERT(pcib != NULL, ("iommu_get_requester(%s): NULL bridge "
- "for %s", device_get_name(dev), device_get_name(pci)));
+ if (pcib == NULL) {
+ if (bootverbose) {
+ printf(
+ "iommu_get_requester(%s): NULL bridge for %s\n",
+ device_get_name(dev), device_get_name(pci));
+ }
+ *rid = 0;
+ *requesterp = NULL;
+ return (ENXIO);
+ }
/*
* The parent of our "bridge" isn't another PCI bus,
@@ -223,7 +253,8 @@ iommu_get_requester(device_t dev, uint16_t *rid)
}
}
}
- return (requester);
+ *requesterp = requester;
+ return (0);
}
struct iommu_ctx *
@@ -231,10 +262,13 @@ iommu_instantiate_ctx(struct iommu_unit *unit, device_t dev, bool rmrr)
{
device_t requester;
struct iommu_ctx *ctx;
+ int error;
bool disabled;
uint16_t rid;
- requester = iommu_get_requester(dev, &rid);
+ error = iommu_get_requester(dev, &requester, &rid);
+ if (error != 0)
+ return (NULL);
/*
* If the user requested the IOMMU disabled for the device, we
@@ -278,11 +312,7 @@ iommu_get_dev_ctx(device_t dev)
if (!unit->dma_enabled)
return (NULL);
-#if defined(__amd64__) || defined(__i386__)
- dmar_quirks_pre_use(unit);
- dmar_instantiate_rmrr_ctxs(unit);
-#endif
-
+ iommu_unit_pre_instantiate_ctx(unit);
return (iommu_instantiate_ctx(unit, dev, false));
}
@@ -395,6 +425,8 @@ static int
iommu_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
{
struct bus_dma_tag_iommu *dmat;
+ struct iommu_unit *iommu;
+ struct iommu_ctx *ctx;
int error;
error = 0;
@@ -405,8 +437,12 @@ iommu_bus_dma_tag_destroy(bus_dma_tag_t dmat1)
error = EBUSY;
goto out;
}
- if (dmat == dmat->ctx->tag)
- iommu_free_ctx(dmat->ctx);
+ ctx = dmat->ctx;
+ if (dmat == ctx->tag) {
+ iommu = ctx->domain->iommu;
+ IOMMU_LOCK(iommu);
+ iommu_free_ctx_locked(iommu, dmat->ctx);
+ }
free(dmat->segments, M_IOMMU_DMAMAP);
free(dmat, M_DEVBUF);
}
@@ -963,10 +999,14 @@ iommu_init_busdma(struct iommu_unit *unit)
{
int error;
- unit->dma_enabled = 1;
+ unit->dma_enabled = 0;
error = TUNABLE_INT_FETCH("hw.iommu.dma", &unit->dma_enabled);
if (error == 0) /* compatibility */
TUNABLE_INT_FETCH("hw.dmar.dma", &unit->dma_enabled);
+ SYSCTL_ADD_INT(&unit->sysctl_ctx,
+ SYSCTL_CHILDREN(device_get_sysctl_tree(unit->dev)),
+ OID_AUTO, "dma", CTLFLAG_RD, &unit->dma_enabled, 0,
+ "DMA ops enabled");
TAILQ_INIT(&unit->delayed_maps);
TASK_INIT(&unit->dmamap_load_task, 0, iommu_bus_task_dmamap, unit);
unit->delayed_taskqueue = taskqueue_create("iommu", M_WAITOK,
diff --git a/sys/dev/iommu/iommu.h b/sys/dev/iommu/iommu.h
index 157f4c62423f..55044042c5d2 100644
--- a/sys/dev/iommu/iommu.h
+++ b/sys/dev/iommu/iommu.h
@@ -31,7 +31,11 @@
#ifndef _DEV_IOMMU_IOMMU_H_
#define _DEV_IOMMU_IOMMU_H_
+#include <sys/_task.h>
+#include <vm/vm.h>
+#include <vm/vm_page.h>
#include <dev/iommu/iommu_types.h>
+#include <dev/pci/pcireg.h>
struct bus_dma_tag_common;
struct iommu_map_entry;
@@ -61,12 +65,14 @@ struct iommu_map_entry {
RB_ENTRY(iommu_map_entry) rb_entry; /* Links for domain entries */
struct iommu_domain *domain;
struct iommu_qi_genseq gseq;
+ struct spglist pgtbl_free;
};
struct iommu_unit {
struct mtx lock;
device_t dev;
int unit;
+ struct sysctl_ctx_list sysctl_ctx;
int dma_enabled;
@@ -85,10 +91,10 @@ struct iommu_unit {
};
struct iommu_domain_map_ops {
- int (*map)(struct iommu_domain *domain, iommu_gaddr_t base,
- iommu_gaddr_t size, vm_page_t *ma, uint64_t pflags, int flags);
- int (*unmap)(struct iommu_domain *domain, iommu_gaddr_t base,
- iommu_gaddr_t size, int flags);
+ int (*map)(struct iommu_domain *domain, struct iommu_map_entry *entry,
+ vm_page_t *ma, uint64_t pflags, int flags);
+ int (*unmap)(struct iommu_domain *domain, struct iommu_map_entry *entry,
+ int flags);
};
/*
@@ -115,11 +121,14 @@ struct iommu_domain {
iommu_gaddr_t msi_base; /* (d) Arch-specific */
vm_paddr_t msi_phys; /* (d) Arch-specific */
u_int flags; /* (u) */
+ LIST_HEAD(, iommu_ctx) contexts;/* (u) */
};
struct iommu_ctx {
struct iommu_domain *domain; /* (c) */
struct bus_dma_tag_iommu *tag; /* (c) Root tag */
+ LIST_ENTRY(iommu_ctx) link; /* (u) Member in the domain list */
+ u_int refs; /* (u) References from tags */
u_long loads; /* atomic updates, for stat only */
u_long unloads; /* same */
u_int flags; /* (u) */
@@ -158,9 +167,10 @@ void iommu_domain_unload_entry(struct iommu_map_entry *entry, bool free,
void iommu_domain_unload(struct iommu_domain *domain,
struct iommu_map_entries_tailq *entries, bool cansleep);
+void iommu_unit_pre_instantiate_ctx(struct iommu_unit *iommu);
struct iommu_ctx *iommu_instantiate_ctx(struct iommu_unit *iommu,
device_t dev, bool rmrr);
-device_t iommu_get_requester(device_t dev, uint16_t *rid);
+int iommu_get_requester(device_t dev, device_t *requester, uint16_t *rid);
int iommu_init_busdma(struct iommu_unit *unit);
void iommu_fini_busdma(struct iommu_unit *unit);
diff --git a/sys/dev/iommu/iommu_gas.c b/sys/dev/iommu/iommu_gas.c
index 4b6141b981da..80e37341b3dc 100644
--- a/sys/dev/iommu/iommu_gas.c
+++ b/sys/dev/iommu/iommu_gas.c
@@ -77,7 +77,7 @@ static int iommu_check_free;
#endif
static void
-intel_gas_init(void)
+intel_gas_init(void *dummy __unused)
{
iommu_map_entry_zone = uma_zcreate("IOMMU_MAP_ENTRY",
@@ -96,9 +96,12 @@ iommu_gas_alloc_entry(struct iommu_domain *domain, u_int flags)
res = uma_zalloc(iommu_map_entry_zone, ((flags & IOMMU_PGF_WAITOK) !=
0 ? M_WAITOK : M_NOWAIT) | M_ZERO);
- if (res != NULL && domain != NULL) {
- res->domain = domain;
- atomic_add_int(&domain->entries_cnt, 1);
+ if (res != NULL) {
+ SLIST_INIT(&res->pgtbl_free);
+ if (domain != NULL) {
+ res->domain = domain;
+ atomic_add_int(&domain->entries_cnt, 1);
+ }
}
return (res);
}
@@ -107,7 +110,12 @@ void
iommu_gas_free_entry(struct iommu_map_entry *entry)
{
struct iommu_domain *domain;
+ int n __unused;
+ n = vm_page_free_pages_toq(&entry->pgtbl_free, false);
+#if defined(__i386__) || defined(__amd64__)
+ atomic_subtract_int(&iommu_tbl_pagecnt, n);
+#endif
domain = entry->domain;
if (domain != NULL)
atomic_subtract_int(&domain->entries_cnt, 1);
@@ -826,8 +834,7 @@ iommu_gas_map(struct iommu_domain *domain,
entry->flags |= eflags;
IOMMU_DOMAIN_UNLOCK(domain);
- error = domain->ops->map(domain, entry->start,
- entry->end - entry->start, ma, eflags,
+ error = domain->ops->map(domain, entry, ma, eflags,
((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0));
if (error == ENOMEM) {
iommu_domain_unload_entry(entry, true,
@@ -868,9 +875,14 @@ iommu_gas_map_region(struct iommu_domain *domain, struct iommu_map_entry *entry,
if (entry->end == entry->start)
return (0);
- error = domain->ops->map(domain, entry->start,
- entry->end - entry->start, ma + OFF_TO_IDX(start - entry->start),
- eflags, ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0));
+ /*
+ * iommu_gas_alloc_region() might clipped the entry start and
+ * end positions. Adjust the beginning of the ma array to map
+ * the pages at the requested relative positions.
+ */
+ error = domain->ops->map(domain, entry,
+ ma + OFF_TO_IDX(start - entry->start), eflags,
+ ((flags & IOMMU_MF_CANWAIT) != 0 ? IOMMU_PGF_WAITOK : 0));
if (error == ENOMEM) {
iommu_domain_unload_entry(entry, false,
(flags & IOMMU_MF_CANWAIT) != 0);
@@ -979,8 +991,7 @@ iommu_unmap_msi(struct iommu_ctx *ctx)
if (entry == NULL)
return;
- domain->ops->unmap(domain, entry->start, entry->end -
- entry->start, IOMMU_PGF_WAITOK);
+ domain->ops->unmap(domain, entry, IOMMU_PGF_WAITOK);
iommu_gas_free_space(entry);
diff --git a/sys/dev/ipmi/ipmi.c b/sys/dev/ipmi/ipmi.c
index efb8a7e7669b..5f759017441c 100644
--- a/sys/dev/ipmi/ipmi.c
+++ b/sys/dev/ipmi/ipmi.c
@@ -360,7 +360,7 @@ ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data,
kreq->ir_request[req->msg.data_len + 7] =
ipmi_ipmb_checksum(&kreq->ir_request[4],
req->msg.data_len + 3);
- error = ipmi_submit_driver_request(sc, kreq, MAX_TIMEOUT);
+ error = ipmi_submit_driver_request(sc, kreq);
if (error != 0)
return (error);
@@ -565,11 +565,10 @@ ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
/* Perform an internal driver request. */
int
-ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req,
- int timo)
+ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req)
{
- return (sc->ipmi_driver_request(sc, req, timo));
+ return (sc->ipmi_driver_request(sc, req));
}
/*
@@ -636,7 +635,7 @@ ipmi_reset_watchdog(struct ipmi_softc *sc)
IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
IPMI_RESET_WDOG, 0, 0);
- error = ipmi_submit_driver_request(sc, req, 0);
+ error = ipmi_submit_driver_request(sc, req);
if (error) {
device_printf(sc->ipmi_dev, "Failed to reset watchdog\n");
} else if (req->ir_compcode == 0x80) {
@@ -677,7 +676,7 @@ ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec)
req->ir_request[4] = 0;
req->ir_request[5] = 0;
}
- error = ipmi_submit_driver_request(sc, req, 0);
+ error = ipmi_submit_driver_request(sc, req);
if (error) {
device_printf(sc->ipmi_dev, "Failed to set watchdog\n");
} else if (req->ir_compcode != 0) {
@@ -812,7 +811,7 @@ ipmi_power_cycle(void *arg, int howto)
IPMI_CHASSIS_CONTROL, 1, 0);
req->ir_request[0] = IPMI_CC_POWER_CYCLE;
- ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
+ ipmi_submit_driver_request(sc, req);
if (req->ir_error != 0 || req->ir_compcode != 0) {
device_printf(sc->ipmi_dev, "Power cycling via IPMI failed code %#x %#x\n",
@@ -859,7 +858,7 @@ ipmi_startup(void *arg)
IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
IPMI_GET_DEVICE_ID, 0, 15);
- error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
+ error = ipmi_submit_driver_request(sc, req);
if (error == EWOULDBLOCK) {
device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n");
return;
@@ -888,7 +887,7 @@ ipmi_startup(void *arg)
IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
IPMI_CLEAR_FLAGS, 1, 0);
- ipmi_submit_driver_request(sc, req, 0);
+ ipmi_submit_driver_request(sc, req);
/* XXX: Magic numbers */
if (req->ir_compcode == 0xc0) {
@@ -903,7 +902,7 @@ ipmi_startup(void *arg)
IPMI_GET_CHANNEL_INFO, 1, 0);
req->ir_request[0] = i;
- error = ipmi_submit_driver_request(sc, req, 0);
+ error = ipmi_submit_driver_request(sc, req);
if (error != 0 || req->ir_compcode != 0)
break;
@@ -918,7 +917,7 @@ ipmi_startup(void *arg)
IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
IPMI_GET_WDOG, 0, 0);
- error = ipmi_submit_driver_request(sc, req, 0);
+ error = ipmi_submit_driver_request(sc, req);
if (error == 0 && req->ir_compcode == 0x00) {
device_printf(dev, "Attached watchdog\n");
diff --git a/sys/dev/ipmi/ipmi_bt.c b/sys/dev/ipmi/ipmi_bt.c
index 2e92bdb0699e..c13397abd253 100644
--- a/sys/dev/ipmi/ipmi_bt.c
+++ b/sys/dev/ipmi/ipmi_bt.c
@@ -85,7 +85,7 @@
#define BT_IM_BMC_HWRST (1L << 7)
static int bt_polled_request(struct ipmi_softc *, struct ipmi_request *);
-static int bt_driver_request(struct ipmi_softc *, struct ipmi_request *, int);
+static int bt_driver_request(struct ipmi_softc *, struct ipmi_request *);
static int bt_wait(struct ipmi_softc *, uint8_t, uint8_t);
static int bt_reset(struct ipmi_softc *);
@@ -247,7 +247,7 @@ bt_loop(void *arg)
IPMI_LOCK(sc);
while ((req = ipmi_dequeue_request(sc)) != NULL) {
IPMI_UNLOCK(sc);
- (void)bt_driver_request(sc, req, 0);
+ (void)bt_driver_request(sc, req);
IPMI_LOCK(sc);
sc->ipmi_bt_seq++;
ipmi_complete_request(sc, req);
@@ -265,7 +265,7 @@ bt_startup(struct ipmi_softc *sc)
}
static int
-bt_driver_request(struct ipmi_softc *sc, struct ipmi_request *req, int timo __unused)
+bt_driver_request(struct ipmi_softc *sc, struct ipmi_request *req)
{
int i, ok;
diff --git a/sys/dev/ipmi/ipmi_isa.c b/sys/dev/ipmi/ipmi_isa.c
index 432c63b327ee..0c74307db00d 100644
--- a/sys/dev/ipmi/ipmi_isa.c
+++ b/sys/dev/ipmi/ipmi_isa.c
@@ -57,7 +57,7 @@ ipmi_isa_identify(driver_t *driver, device_t parent)
uint32_t devid;
if (ipmi_smbios_identify(&info) && info.iface_type != SSIF_MODE &&
- device_find_child(parent, "ipmi", -1) == NULL) {
+ device_find_child(parent, "ipmi", DEVICE_UNIT_ANY) == NULL) {
/*
* XXX: Hack alert. On some broken systems, the IPMI
* interface is described via SMBIOS, but the actual
@@ -70,7 +70,7 @@ ipmi_isa_identify(driver_t *driver, device_t parent)
if (devid != 0xffffffff &&
ipmi_pci_match(devid & 0xffff, devid >> 16) != NULL)
return;
- BUS_ADD_CHILD(parent, 0, "ipmi", -1);
+ BUS_ADD_CHILD(parent, 0, "ipmi", DEVICE_UNIT_ANY);
}
}
diff --git a/sys/dev/ipmi/ipmi_kcs.c b/sys/dev/ipmi/ipmi_kcs.c
index 3f1d84d708ce..be8e6664b717 100644
--- a/sys/dev/ipmi/ipmi_kcs.c
+++ b/sys/dev/ipmi/ipmi_kcs.c
@@ -488,13 +488,13 @@ kcs_startup(struct ipmi_softc *sc)
}
static int
-kcs_driver_request_queue(struct ipmi_softc *sc, struct ipmi_request *req, int timo)
+kcs_driver_request_queue(struct ipmi_softc *sc, struct ipmi_request *req)
{
int error;
IPMI_LOCK(sc);
ipmi_polled_enqueue_request_highpri(sc, req);
- error = msleep(req, &sc->ipmi_requests_lock, 0, "ipmireq", timo);
+ error = msleep(req, &sc->ipmi_requests_lock, 0, "ipmireq", 0);
if (error == 0)
error = req->ir_error;
IPMI_UNLOCK(sc);
@@ -517,13 +517,13 @@ kcs_driver_request_poll(struct ipmi_softc *sc, struct ipmi_request *req)
}
static int
-kcs_driver_request(struct ipmi_softc *sc, struct ipmi_request *req, int timo)
+kcs_driver_request(struct ipmi_softc *sc, struct ipmi_request *req)
{
if (KERNEL_PANICKED() || dumping)
return (kcs_driver_request_poll(sc, req));
else
- return (kcs_driver_request_queue(sc, req, timo));
+ return (kcs_driver_request_queue(sc, req));
}
diff --git a/sys/dev/ipmi/ipmi_opal.c b/sys/dev/ipmi/ipmi_opal.c
index 084caba45184..0393dd92cc53 100644
--- a/sys/dev/ipmi/ipmi_opal.c
+++ b/sys/dev/ipmi/ipmi_opal.c
@@ -93,6 +93,7 @@ opal_ipmi_recv(struct opal_ipmi_softc *sc, uint64_t *msg_len, int timo)
opal_call(OPAL_POLL_EVENTS, NULL);
err = opal_call(OPAL_IPMI_RECV, sc->sc_interface,
vtophys(sc->sc_msg), vtophys(msg_len));
+ *msg_len = be64toh(*msg_len);
if (err != OPAL_EMPTY)
break;
@@ -248,15 +249,14 @@ opal_ipmi_startup(struct ipmi_softc *sc)
}
static int
-opal_ipmi_driver_request(struct ipmi_softc *isc, struct ipmi_request *req,
- int timo)
+opal_ipmi_driver_request(struct ipmi_softc *isc, struct ipmi_request *req)
{
struct opal_ipmi_softc *sc = (struct opal_ipmi_softc *)isc;
int i, err;
for (i = 0; i < 3; i++) {
IPMI_LOCK(&sc->ipmi);
- err = opal_ipmi_polled_request(sc, req, timo);
+ err = opal_ipmi_polled_request(sc, req, 0);
IPMI_UNLOCK(&sc->ipmi);
if (err == 0)
break;
diff --git a/sys/dev/ipmi/ipmi_smbios.c b/sys/dev/ipmi/ipmi_smbios.c
index b30328e3e616..29aa74127041 100644
--- a/sys/dev/ipmi/ipmi_smbios.c
+++ b/sys/dev/ipmi/ipmi_smbios.c
@@ -150,7 +150,7 @@ static void
ipmi_smbios_probe(struct ipmi_get_info *info)
{
#ifdef ARCH_MAY_USE_EFI
- struct uuid efi_smbios;
+ efi_guid_t efi_smbios = EFI_TABLE_SMBIOS;
void *addr_efi;
#endif
struct smbios_eps *header;
@@ -161,15 +161,16 @@ ipmi_smbios_probe(struct ipmi_get_info *info)
bzero(info, sizeof(struct ipmi_get_info));
#ifdef ARCH_MAY_USE_EFI
- efi_smbios = (struct uuid)EFI_TABLE_SMBIOS;
if (!efi_get_table(&efi_smbios, &addr_efi))
addr = (vm_paddr_t)addr_efi;
#endif
+#if defined(__amd64__) || defined(__i386__)
if (addr == 0)
/* Find the SMBIOS table header. */
addr = bios_sigsearch(SMBIOS_START, SMBIOS_SIG, SMBIOS_LEN,
SMBIOS_STEP, SMBIOS_OFF);
+#endif
if (addr == 0)
return;
@@ -190,8 +191,8 @@ ipmi_smbios_probe(struct ipmi_get_info *info)
/* Now map the actual table and walk it looking for an IPMI entry. */
table = pmap_mapbios(header->structure_table_address,
header->structure_table_length);
- smbios_walk_table(table, header->number_structures, smbios_ipmi_info,
- info);
+ smbios_walk_table(table, header->number_structures,
+ header->structure_table_length, smbios_ipmi_info, info);
/* Unmap everything. */
pmap_unmapbios(table, header->structure_table_length);
diff --git a/sys/dev/ipmi/ipmi_smbus.c b/sys/dev/ipmi/ipmi_smbus.c
index 59b8dd76675a..9516b3dfa487 100644
--- a/sys/dev/ipmi/ipmi_smbus.c
+++ b/sys/dev/ipmi/ipmi_smbus.c
@@ -59,8 +59,8 @@ ipmi_smbus_identify(driver_t *driver, device_t parent)
struct ipmi_get_info info;
if (ipmi_smbios_identify(&info) && info.iface_type == SSIF_MODE &&
- device_find_child(parent, "ipmi", -1) == NULL)
- BUS_ADD_CHILD(parent, 0, "ipmi", -1);
+ device_find_child(parent, "ipmi", DEVICE_UNIT_ANY) == NULL)
+ BUS_ADD_CHILD(parent, 0, "ipmi", DEVICE_UNIT_ANY);
}
static int
diff --git a/sys/dev/ipmi/ipmi_smic.c b/sys/dev/ipmi/ipmi_smic.c
index 1bcede44f920..0a80562db7dc 100644
--- a/sys/dev/ipmi/ipmi_smic.c
+++ b/sys/dev/ipmi/ipmi_smic.c
@@ -388,7 +388,7 @@ smic_startup(struct ipmi_softc *sc)
}
static int
-smic_driver_request(struct ipmi_softc *sc, struct ipmi_request *req, int timo)
+smic_driver_request(struct ipmi_softc *sc, struct ipmi_request *req)
{
int i, ok;
diff --git a/sys/dev/ipmi/ipmi_ssif.c b/sys/dev/ipmi/ipmi_ssif.c
index 0c22d35421ef..c83cccc75123 100644
--- a/sys/dev/ipmi/ipmi_ssif.c
+++ b/sys/dev/ipmi/ipmi_ssif.c
@@ -359,15 +359,14 @@ ssif_startup(struct ipmi_softc *sc)
}
static int
-ssif_driver_request(struct ipmi_softc *sc, struct ipmi_request *req, int timo)
+ssif_driver_request(struct ipmi_softc *sc, struct ipmi_request *req)
{
int error;
IPMI_LOCK(sc);
error = ipmi_polled_enqueue_request(sc, req);
if (error == 0)
- error = msleep(req, &sc->ipmi_requests_lock, 0, "ipmireq",
- timo);
+ error = msleep(req, &sc->ipmi_requests_lock, 0, "ipmireq", 0);
if (error == 0)
error = req->ir_error;
IPMI_UNLOCK(sc);
diff --git a/sys/dev/ipmi/ipmivars.h b/sys/dev/ipmi/ipmivars.h
index 17227c2053db..6ab8b128b820 100644
--- a/sys/dev/ipmi/ipmivars.h
+++ b/sys/dev/ipmi/ipmivars.h
@@ -133,7 +133,7 @@ struct ipmi_softc {
driver_intr_t *ipmi_intr;
int (*ipmi_startup)(struct ipmi_softc *);
int (*ipmi_enqueue_request)(struct ipmi_softc *, struct ipmi_request *);
- int (*ipmi_driver_request)(struct ipmi_softc *, struct ipmi_request *, int);
+ int (*ipmi_driver_request)(struct ipmi_softc *, struct ipmi_request *);
};
#define ipmi_ssif_smbus_address _iface.ssif.smbus_address
@@ -247,8 +247,7 @@ struct ipmi_request *ipmi_dequeue_request(struct ipmi_softc *);
void ipmi_free_request(struct ipmi_request *);
int ipmi_polled_enqueue_request(struct ipmi_softc *, struct ipmi_request *);
int ipmi_polled_enqueue_request_highpri(struct ipmi_softc *, struct ipmi_request *);
-int ipmi_submit_driver_request(struct ipmi_softc *, struct ipmi_request *,
- int);
+int ipmi_submit_driver_request(struct ipmi_softc *, struct ipmi_request *);
/* Identify BMC interface via SMBIOS. */
int ipmi_smbios_identify(struct ipmi_get_info *);
diff --git a/sys/dev/ips/ips.c b/sys/dev/ips/ips.c
index 3ed9aa5f2504..7a73aee2ef49 100644
--- a/sys/dev/ips/ips.c
+++ b/sys/dev/ips/ips.c
@@ -269,29 +269,17 @@ static int ips_diskdev_init(ips_softc_t *sc)
ips_diskdev_statename(sc->drives[i].state));
if(sc->drives[i].state == IPS_LD_OKAY ||
sc->drives[i].state == IPS_LD_DEGRADED){
- sc->diskdev[i] = device_add_child(sc->dev, NULL, -1);
+ sc->diskdev[i] = device_add_child(sc->dev, NULL, DEVICE_UNIT_ANY);
device_set_ivars(sc->diskdev[i],(void *)(uintptr_t) i);
}
}
- if(bus_generic_attach(sc->dev)){
- device_printf(sc->dev, "Attaching bus failed\n");
- }
+ bus_attach_children(sc->dev);
return 0;
}
static int ips_diskdev_free(ips_softc_t *sc)
{
- int i;
- int error = 0;
- for(i = 0; i < IPS_MAX_NUM_DRIVES; i++){
- if(sc->diskdev[i]) {
- error = device_delete_child(sc->dev, sc->diskdev[i]);
- if(error)
- return error;
- }
- }
- bus_generic_detach(sc->dev);
- return 0;
+ return (bus_generic_detach(sc->dev));
}
/* ips_timeout is periodically called to make sure no commands sent
diff --git a/sys/dev/ipw/if_ipw.c b/sys/dev/ipw/if_ipw.c
index 051f046d26ad..9db562669487 100644
--- a/sys/dev/ipw/if_ipw.c
+++ b/sys/dev/ipw/if_ipw.c
@@ -283,6 +283,8 @@ ipw_attach(device_t dev)
| IEEE80211_C_WPA /* 802.11i supported */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/* read MAC address from EEPROM */
val = ipw_read_prom_word(sc, IPW_EEPROM_MAC + 0);
ic->ic_macaddr[0] = val >> 8;
@@ -838,8 +840,8 @@ ipw_media_status(if_t ifp, struct ifmediareq *imr)
struct ipw_softc *sc = ic->ic_softc;
/* read current transmission rate from adapter */
- vap->iv_bss->ni_txrate = ipw_cvtrate(
- ipw_read_table1(sc, IPW_INFO_CURRENT_TX_RATE) & 0xf);
+ ieee80211_node_set_txrate_dot11rate(vap->iv_bss,
+ ipw_cvtrate(ipw_read_table1(sc, IPW_INFO_CURRENT_TX_RATE) & 0xf));
ieee80211_media_status(ifp, imr);
}
@@ -1119,7 +1121,7 @@ ipw_fix_channel(struct ipw_softc *sc, struct mbuf *m)
wh = mtod(m, struct ieee80211_frame *);
- if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
+ if (!IEEE80211_IS_MGMT(wh))
return;
subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
@@ -1557,6 +1559,7 @@ ipw_tx_start(struct ipw_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
wh = mtod(m0, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m0);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/irdma/irdma_cm.c b/sys/dev/irdma/irdma_cm.c
index 167e3c67390c..d4d4f328fb43 100644
--- a/sys/dev/irdma/irdma_cm.c
+++ b/sys/dev/irdma/irdma_cm.c
@@ -395,25 +395,25 @@ irdma_form_ah_cm_frame(struct irdma_cm_node *cm_node,
if (flags & SET_ACK) {
cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
tcph->th_ack = htonl(cm_node->tcp_cntxt.loc_ack_num);
- tcph->th_flags |= TH_ACK;
+ tcp_set_flags(tcph, tcp_get_flags(tcph) | TH_ACK);
} else {
tcph->th_ack = 0;
}
if (flags & SET_SYN) {
cm_node->tcp_cntxt.loc_seq_num++;
- tcph->th_flags |= TH_SYN;
+ tcp_set_flags(tcph, tcp_get_flags(tcph) | TH_SYN);
} else {
cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
}
if (flags & SET_FIN) {
cm_node->tcp_cntxt.loc_seq_num++;
- tcph->th_flags |= TH_FIN;
+ tcp_set_flags(tcph, tcp_get_flags(tcph) | TH_FIN);
}
if (flags & SET_RST)
- tcph->th_flags |= TH_RST;
+ tcp_set_flags(tcph, tcp_get_flags(tcph) | TH_RST);
tcph->th_off = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
sqbuf->tcphlen = tcph->th_off << 2;
@@ -582,25 +582,25 @@ irdma_form_uda_cm_frame(struct irdma_cm_node *cm_node,
if (flags & SET_ACK) {
cm_node->tcp_cntxt.loc_ack_num = cm_node->tcp_cntxt.rcv_nxt;
tcph->th_ack = htonl(cm_node->tcp_cntxt.loc_ack_num);
- tcph->th_flags |= TH_ACK;
+ tcp_set_flags(tcph, tcp_get_flags(tcph) | TH_ACK);
} else {
tcph->th_ack = 0;
}
if (flags & SET_SYN) {
cm_node->tcp_cntxt.loc_seq_num++;
- tcph->th_flags |= TH_SYN;
+ tcp_set_flags(tcph, tcp_get_flags(tcph) | TH_SYN);
} else {
cm_node->tcp_cntxt.loc_seq_num += hdr_len + pd_len;
}
if (flags & SET_FIN) {
cm_node->tcp_cntxt.loc_seq_num++;
- tcph->th_flags |= TH_FIN;
+ tcp_set_flags(tcph, tcp_get_flags(tcph) | TH_FIN);
}
if (flags & SET_RST)
- tcph->th_flags |= TH_RST;
+ tcp_set_flags(tcph, tcp_get_flags(tcph) | TH_RST);
tcph->th_off = (u16)((sizeof(*tcph) + opts_len + 3) >> 2);
sqbuf->tcphlen = tcph->th_off << 2;
@@ -796,7 +796,7 @@ irdma_handle_tcp_options(struct irdma_cm_node *cm_node,
if (optionsize) {
ret = irdma_process_options(cm_node, optionsloc, optionsize,
- (u32)tcph->th_flags & TH_SYN);
+ (u32)tcp_get_flags(tcph) & TH_SYN);
if (ret) {
irdma_debug(&cm_node->iwdev->rf->sc_dev, IRDMA_DEBUG_CM,
"Node %p, Sending Reset\n", cm_node);
@@ -1316,7 +1316,7 @@ irdma_cm_timer_tick(struct timer_list *t)
struct irdma_timer_entry *send_entry, *close_entry;
struct list_head *list_core_temp;
struct list_head *list_node;
- struct irdma_cm_core *cm_core = from_timer(cm_core, t, tcp_timer);
+ struct irdma_cm_core *cm_core = timer_container_of(cm_core, t, tcp_timer);
struct irdma_sc_vsi *vsi;
u32 settimer = 0;
unsigned long timetosend;
@@ -2767,16 +2767,16 @@ irdma_process_pkt(struct irdma_cm_node *cm_node,
u32 fin_set = 0;
int err;
- if (tcph->th_flags & TH_RST) {
+ if (tcp_get_flags(tcph) & TH_RST) {
pkt_type = IRDMA_PKT_TYPE_RST;
- } else if (tcph->th_flags & TH_SYN) {
+ } else if (tcp_get_flags(tcph) & TH_SYN) {
pkt_type = IRDMA_PKT_TYPE_SYN;
- if (tcph->th_flags & TH_ACK)
+ if (tcp_get_flags(tcph) & TH_ACK)
pkt_type = IRDMA_PKT_TYPE_SYNACK;
- } else if (tcph->th_flags & TH_ACK) {
+ } else if (tcp_get_flags(tcph) & TH_ACK) {
pkt_type = IRDMA_PKT_TYPE_ACK;
}
- if (tcph->th_flags & TH_FIN)
+ if (tcp_get_flags(tcph) & TH_FIN)
fin_set = 1;
switch (pkt_type) {
@@ -3067,7 +3067,7 @@ irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
/*
* Only type of packet accepted are for the PASSIVE open (syn only)
*/
- if (!(tcph->th_flags & TH_SYN) || tcph->th_flags & TH_ACK)
+ if (!(tcp_get_flags(tcph) & TH_SYN) || tcp_get_flags(tcph) & TH_ACK)
return;
listener = irdma_find_listener(cm_core,
@@ -3093,7 +3093,7 @@ irdma_receive_ilq(struct irdma_sc_vsi *vsi, struct irdma_puda_buf *rbuf)
return;
}
- if (!(tcph->th_flags & (TH_RST | TH_FIN))) {
+ if (!(tcp_get_flags(tcph) & (TH_RST | TH_FIN))) {
cm_node->state = IRDMA_CM_STATE_LISTENING;
} else {
irdma_rem_ref_cm_node(cm_node);
diff --git a/sys/dev/irdma/irdma_ctrl.c b/sys/dev/irdma/irdma_ctrl.c
index dc42b15392c5..79ed14a60670 100644
--- a/sys/dev/irdma/irdma_ctrl.c
+++ b/sys/dev/irdma/irdma_ctrl.c
@@ -4909,7 +4909,7 @@ irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
struct irdma_virt_mem virt_mem;
u32 i, mem_size;
u32 qpwanted, mrwanted, pblewanted;
- u32 powerof2, hte;
+ u32 hte;
u32 sd_needed;
u32 sd_diff;
u32 loop_count = 0;
@@ -4938,12 +4938,8 @@ irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
hmc_info->sd_table.sd_cnt, max_sds);
qpwanted = min(qp_count, hmc_info->hmc_obj[IRDMA_HMC_IW_QP].max_cnt);
-
- powerof2 = 1;
- while (powerof2 <= qpwanted)
- powerof2 *= 2;
- powerof2 /= 2;
- qpwanted = powerof2;
+ if (qpwanted != 0)
+ qpwanted = rounddown_pow_of_two(qpwanted);
mrwanted = hmc_info->hmc_obj[IRDMA_HMC_IW_MR].max_cnt;
pblewanted = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].max_cnt;
@@ -4986,11 +4982,9 @@ irdma_cfg_fpm_val(struct irdma_sc_dev *dev, u32 qp_count)
hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt = mrwanted;
hte = round_up(qpwanted + hmc_info->hmc_obj[IRDMA_HMC_IW_FSIMC].cnt, 512);
- powerof2 = 1;
- while (powerof2 < hte)
- powerof2 *= 2;
+ hte = roundup_pow_of_two(hte);
hmc_info->hmc_obj[IRDMA_HMC_IW_HTE].cnt =
- powerof2 * hmc_fpm_misc->ht_multiplier;
+ hte * hmc_fpm_misc->ht_multiplier;
if (dev->hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1)
cfg_fpm_value_gen_1(dev, hmc_info, qpwanted);
else
diff --git a/sys/dev/irdma/irdma_utils.c b/sys/dev/irdma/irdma_utils.c
index 5fc37022981f..038f1980082b 100644
--- a/sys/dev/irdma/irdma_utils.c
+++ b/sys/dev/irdma/irdma_utils.c
@@ -876,7 +876,7 @@ irdma_terminate_done(struct irdma_sc_qp *qp, int timeout_occurred)
static void
irdma_terminate_timeout(struct timer_list *t)
{
- struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer);
+ struct irdma_qp *iwqp = timer_container_of(iwqp, t, terminate_timer);
struct irdma_sc_qp *qp = &iwqp->sc_qp;
irdma_terminate_done(qp, 1);
@@ -1528,7 +1528,7 @@ static void
irdma_hw_stats_timeout(struct timer_list *t)
{
struct irdma_vsi_pestat *pf_devstat =
- from_timer(pf_devstat, t, stats_timer);
+ timer_container_of(pf_devstat, t, stats_timer);
struct irdma_sc_vsi *sc_vsi = pf_devstat->vsi;
if (sc_vsi->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2)
diff --git a/sys/dev/isci/isci.c b/sys/dev/isci/isci.c
index 2b5d4301f6b2..1dede4e45b4c 100644
--- a/sys/dev/isci/isci.c
+++ b/sys/dev/isci/isci.c
@@ -235,7 +235,7 @@ isci_detach(device_t device)
sci_pool_get(controller->unmap_buffer_pool, unmap_buffer);
if (unmap_buffer == NULL)
break;
- contigfree(unmap_buffer, PAGE_SIZE, M_ISCI);
+ free(unmap_buffer, M_ISCI);
}
}
diff --git a/sys/dev/isci/isci_logger.c b/sys/dev/isci/isci_logger.c
index 62685d52faf9..b5653603a04e 100644
--- a/sys/dev/isci/isci_logger.c
+++ b/sys/dev/isci/isci_logger.c
@@ -37,7 +37,7 @@
#include <dev/isci/scil/scic_user_callback.h>
#include <dev/isci/scil/sci_logger.h>
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include <sys/time.h>
#define ERROR_LEVEL 0
diff --git a/sys/dev/isci/scil/intel_sata.h b/sys/dev/isci/scil/intel_sata.h
index 4cf4adf03e07..fdad5be9b083 100644
--- a/sys/dev/isci/scil/intel_sata.h
+++ b/sys/dev/isci/scil/intel_sata.h
@@ -61,7 +61,7 @@
*
* @brief This file defines all of the SATA releated constants, enumerations,
* and types. Please note that this file does not necessarily contain
- * an exhaustive list of all contants and commands.
+ * an exhaustive list of all constants and commands.
*/
/**
diff --git a/sys/dev/isci/scil/sati_util.c b/sys/dev/isci/scil/sati_util.c
index 22c3111fe9ca..300ecbd5367b 100644
--- a/sys/dev/isci/scil/sati_util.c
+++ b/sys/dev/isci/scil/sati_util.c
@@ -70,7 +70,7 @@
/**
* @brief This method will set the data direction, protocol, and transfer
- * kength for an ATA non-data command.
+ * length for an ATA non-data command.
*
* @pre It is expected that the user will use this method for setting these
* values in a non-data ATA command constuct.
diff --git a/sys/dev/isci/scil/sci_abstract_list.c b/sys/dev/isci/scil/sci_abstract_list.c
index 7c02e0b17156..1948043e2aed 100644
--- a/sys/dev/isci/scil/sci_abstract_list.c
+++ b/sys/dev/isci/scil/sci_abstract_list.c
@@ -230,13 +230,6 @@ void sci_abstract_list_print(
while (alElement_p != NULL)
{
-#ifdef UNIT_TEST_DEBUG
- /* Check to see if we found the object for which we are searching. */
- printf("ITEM next_p 0x%x prev_p 0x%x obj_p 0x%x, 0x%x\n",
- alElement_p->next_p,
- alElement_p->previous_p,
- (U32*) (alElement_p->object_p));
-#endif
alElement_p = alElement_p->next_p;
}
}
diff --git a/sys/dev/isci/scil/scif_sas_smp_remote_device.c b/sys/dev/isci/scil/scif_sas_smp_remote_device.c
index d6055adc13f9..c72402f66889 100644
--- a/sys/dev/isci/scil/scif_sas_smp_remote_device.c
+++ b/sys/dev/isci/scil/scif_sas_smp_remote_device.c
@@ -194,7 +194,7 @@ SCI_STATUS scif_sas_smp_remote_device_decode_smp_response(
//if Core set the status of this io to be RETRY_REQUIRED, we should
//retry the IO without even decode the response.
- if (completion_status == SCI_FAILURE_RETRY_REQUIRED)
+ if (completion_status == SCI_IO_FAILURE_RETRY_REQUIRED)
{
scif_sas_smp_remote_device_continue_current_activity(
fw_device, fw_request, SCI_FAILURE_RETRY_REQUIRED
diff --git a/sys/dev/iscsi/icl_soft.c b/sys/dev/iscsi/icl_soft.c
index 832ff8135ec5..812793a9fba3 100644
--- a/sys/dev/iscsi/icl_soft.c
+++ b/sys/dev/iscsi/icl_soft.c
@@ -1139,7 +1139,7 @@ icl_soft_conn_pdu_append_bio(struct icl_conn *ic, struct icl_pdu *request,
while (len > 0) {
if (m == NULL) {
m = mb_alloc_ext_pgs(flags & ~ICL_NOCOPY,
- icl_soft_free_mext_pg);
+ icl_soft_free_mext_pg, 0);
if (__predict_false(m == NULL))
return (ENOMEM);
atomic_add_int(&isp->ref_cnt, 1);
diff --git a/sys/dev/iser/iser_verbs.c b/sys/dev/iser/iser_verbs.c
index f5f057b961ef..f0c7e524ccf0 100644
--- a/sys/dev/iser/iser_verbs.c
+++ b/sys/dev/iser/iser_verbs.c
@@ -212,8 +212,6 @@ iser_create_device_ib_res(struct iser_device *device)
device->comps = malloc(device->comps_used * sizeof(*device->comps),
M_ISER_VERBS, M_WAITOK | M_ZERO);
- if (!device->comps)
- goto comps_err;
max_cqe = min(ISER_MAX_CQ_LEN, ib_dev->attrs.max_cqe);
@@ -280,7 +278,6 @@ cq_err:
ib_dealloc_pd(device->pd);
pd_err:
free(device->comps, M_ISER_VERBS);
-comps_err:
ISER_ERR("failed to allocate an IB resource");
return (1);
}
@@ -343,11 +340,6 @@ iser_create_fastreg_desc(struct ib_device *ib_device, struct ib_pd *pd)
int ret;
desc = malloc(sizeof(*desc), M_ISER_VERBS, M_WAITOK | M_ZERO);
- if (!desc) {
- ISER_ERR("Failed to allocate a new fastreg descriptor");
- return (NULL);
- }
-
ret = iser_alloc_reg_res(ib_device, pd, &desc->rsc);
if (ret) {
ISER_ERR("failed to allocate reg_resources");
@@ -509,9 +501,6 @@ iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
goto inc_refcnt;
device = malloc(sizeof *device, M_ISER_VERBS, M_WAITOK | M_ZERO);
- if (device == NULL)
- goto out;
-
/* assign this device to the device */
device->ib_device = cma_id->device;
/* init the device and link it into ig device list */
diff --git a/sys/dev/isl/isl.c b/sys/dev/isl/isl.c
index 558fc88975fa..6a0d406aeeda 100644
--- a/sys/dev/isl/isl.c
+++ b/sys/dev/isl/isl.c
@@ -149,7 +149,7 @@ static void
isl_identify(driver_t *driver, device_t parent)
{
- if (device_find_child(parent, "asl", -1)) {
+ if (device_find_child(parent, "asl", DEVICE_UNIT_ANY)) {
if (bootverbose)
printf("asl: device(s) already created\n");
return;
@@ -157,7 +157,7 @@ isl_identify(driver_t *driver, device_t parent)
/* Check if we can communicate to our slave. */
if (init_device(dev, 0x88, 1) == 0)
- BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "isl", -1);
+ BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "isl", DEVICE_UNIT_ANY);
}
#endif
diff --git a/sys/dev/ismt/ismt.c b/sys/dev/ismt/ismt.c
index 3f3c7bdf26c8..5e6b7c8ebf18 100644
--- a/sys/dev/ismt/ismt.c
+++ b/sys/dev/ismt/ismt.c
@@ -532,8 +532,6 @@ ismt_detach(device_t dev)
if (error)
return (error);
- device_delete_child(dev, sc->smbdev);
-
if (sc->intr_handle != NULL) {
bus_teardown_intr(dev, sc->intr_res, sc->intr_handle);
sc->intr_handle = NULL;
@@ -588,7 +586,8 @@ ismt_attach(device_t dev)
sc->pcidev = dev;
pci_enable_busmaster(dev);
- if ((sc->smbdev = device_add_child(dev, "smbus", -1)) == NULL) {
+ if ((sc->smbdev = device_add_child(dev, "smbus",
+ DEVICE_UNIT_ANY)) == NULL) {
device_printf(dev, "no smbus child found\n");
err = ENXIO;
goto fail;
@@ -607,11 +606,7 @@ ismt_attach(device_t dev)
sc->mmio_handle = rman_get_bushandle(sc->mmio_res);
/* Attach "smbus" child */
- if ((err = bus_generic_attach(dev)) != 0) {
- device_printf(dev, "failed to attach child: %d\n", err);
- err = ENXIO;
- goto fail;
- }
+ bus_attach_children(dev);
bus_dma_tag_create(bus_get_dma_tag(dev), 4, PAGE_SIZE,
BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
diff --git a/sys/dev/isp/isp.c b/sys/dev/isp/isp.c
index b53171add5f5..14d8147c3562 100644
--- a/sys/dev/isp/isp.c
+++ b/sys/dev/isp/isp.c
@@ -457,7 +457,10 @@ isp_reset(ispsoftc_t *isp, int do_load_defaults)
if (IS_27XX(isp)) {
switch (isp_load_risc(isp, 0)) {
case ISP_ABORTED:
- /* download ispfw(4) as it's newer than flash */
+ /*
+ * download ispfw(4) as it's newer than flash, or
+ * the user requested it.
+ */
dodnld = 1;
break;
case ISP_SUCCESS:
@@ -1539,7 +1542,15 @@ isp_getpdb(ispsoftc_t *isp, int chan, uint16_t id, isp_pdb_t *pdb)
chan, id, pdb->portid, un.bill.pdb_flags,
un.bill.pdb_curstate, un.bill.pdb_laststate);
- if (un.bill.pdb_curstate < PDB2400_STATE_PLOGI_DONE || un.bill.pdb_curstate > PDB2400_STATE_LOGGED_IN) {
+ /*
+ * XXX KDM this is broken for NVMe. Need to determine whether this
+ * is an NVMe target, and if so, check the NVMe status bits. We are
+ * probably missing more bits for proper NVMe support, though.
+ */
+ if (((un.bill.pdb_curstate & PDB2400_STATE_FCP_MASK) <
+ PDB2400_STATE_PLOGI_DONE)
+ || ((un.bill.pdb_curstate & PDB2400_STATE_FCP_MASK) >
+ PDB2400_STATE_LOGGED_IN)) {
mbs.param[0] = MBOX_NOT_LOGGED_IN;
return (mbs.param[0]);
}
@@ -3088,6 +3099,7 @@ isp_control(ispsoftc_t *isp, ispctl_t ctl, ...)
if (ab->abrt_nphdl == ISP24XX_ABRT_OKAY)
return (0);
isp_prt(isp, ISP_LOGWARN, "Chan %d handle %d abort returned 0x%x", chan, tgt, ab->abrt_nphdl);
+ break;
}
case ISPCTL_FCLINK_TEST:
{
@@ -5214,7 +5226,20 @@ isp_load_risc_flash(ispsoftc_t *isp, uint32_t *srisc_addr, uint32_t faddr)
/* If ispfw(4) is loaded compare versions and use the newest */
if (isp->isp_osinfo.ispfw != NULL) {
+ int ispfw_newer = 0;
+
if (ISP_FW_NEWER_THANX(fcp->fw_ispfwrev, fcp->fw_flashrev)) {
+ ispfw_newer = 1;
+ }
+
+ if (isp->isp_confopts & ISP_CFG_FWLOAD_FORCE) {
+ isp_prt(isp, ISP_LOGCONFIG,
+ "Loading RISC with %s ispfw(4) firmware %s",
+ (ispfw_newer == 0) ? "older" : "newer",
+ "because fwload_force is set");
+ return (ISP_ABORTED);
+ }
+ if (ispfw_newer != 0) {
isp_prt(isp, ISP_LOGCONFIG,
"Loading RISC with newer ispfw(4) firmware");
return (ISP_ABORTED);
diff --git a/sys/dev/isp/isp_freebsd.c b/sys/dev/isp/isp_freebsd.c
index fdf4c0eb4e1f..b496eae1b466 100644
--- a/sys/dev/isp/isp_freebsd.c
+++ b/sys/dev/isp/isp_freebsd.c
@@ -626,8 +626,9 @@ isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb)
*/
#ifdef ISP_TARGET_MODE
static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t);
-static atio_private_data_t *isp_get_atpd(ispsoftc_t *, int, uint32_t);
+static atio_private_data_t *isp_get_atpd(ispsoftc_t *, int, uint32_t, void *);
static atio_private_data_t *isp_find_atpd(ispsoftc_t *, int, uint32_t);
+static atio_private_data_t *isp_find_atpd_ccb(ispsoftc_t *, int, uint32_t, void *);
static void isp_put_atpd(ispsoftc_t *, int, atio_private_data_t *);
static inot_private_data_t *isp_get_ntpd(ispsoftc_t *, int);
static inot_private_data_t *isp_find_ntpd(ispsoftc_t *, int, uint32_t, uint32_t);
@@ -715,7 +716,7 @@ isp_tmcmd_restart(ispsoftc_t *isp)
}
static atio_private_data_t *
-isp_get_atpd(ispsoftc_t *isp, int chan, uint32_t tag)
+isp_get_atpd(ispsoftc_t *isp, int chan, uint32_t tag, void *ccb)
{
struct isp_fc *fc = ISP_FC_PC(isp, chan);
atio_private_data_t *atp;
@@ -723,6 +724,7 @@ isp_get_atpd(ispsoftc_t *isp, int chan, uint32_t tag)
atp = LIST_FIRST(&fc->atfree);
if (atp) {
LIST_REMOVE(atp, next);
+ atp->ccb = ccb;
atp->tag = tag;
LIST_INSERT_HEAD(&fc->atused[ATPDPHASH(tag)], atp, next);
}
@@ -742,6 +744,23 @@ isp_find_atpd(ispsoftc_t *isp, int chan, uint32_t tag)
return (NULL);
}
+/*
+ * Similar to above, but in addition to tag searches for opaque CCB pointer,
+ * It can be used in situations when the tag alone may already be reused.
+ */
+static atio_private_data_t *
+isp_find_atpd_ccb(ispsoftc_t *isp, int chan, uint32_t tag, void *ccb)
+{
+ struct isp_fc *fc = ISP_FC_PC(isp, chan);
+ atio_private_data_t *atp;
+
+ LIST_FOREACH(atp, &fc->atused[ATPDPHASH(tag)], next) {
+ if (atp->tag == tag && atp->ccb == ccb)
+ return (atp);
+ }
+ return (NULL);
+}
+
static void
isp_put_atpd(ispsoftc_t *isp, int chan, atio_private_data_t *atp)
{
@@ -1378,7 +1397,7 @@ isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep)
*/
goto noresrc;
}
- atp = isp_get_atpd(isp, chan, aep->at_rxid);
+ atp = isp_get_atpd(isp, chan, aep->at_rxid, atiop);
if (atp == NULL) {
isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid);
isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0);
@@ -1733,32 +1752,8 @@ isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp, uint32_
/*
* This case is for a responding to an ABTS frame
*/
- if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) {
-
- /*
- * Overload nt_need_ack here to mark whether we've terminated the associated command.
- */
- if (mp->nt_need_ack) {
- abts_t *abts = (abts_t *)mp->nt_lreserved;
-
- ISP_MEMZERO(cto, sizeof (ct7_entry_t));
- isp_prt(isp, ISP_LOGTDEBUG0, "%s: [%x] terminating after ABTS received", __func__, abts->abts_rxid_task);
- cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
- cto->ct_header.rqs_entry_count = 1;
- cto->ct_nphdl = mp->nt_nphdl;
- cto->ct_rxid = abts->abts_rxid_task;
- cto->ct_iid_lo = mp->nt_sid;
- cto->ct_iid_hi = mp->nt_sid >> 16;
- cto->ct_oxid = abts->abts_ox_id;
- cto->ct_vpidx = mp->nt_channel;
- cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
- if (isp_send_entry(isp, cto)) {
- return (ENOMEM);
- }
- mp->nt_need_ack = 0;
- }
- return (isp_acknak_abts(isp, mp->nt_lreserved, 0));
- }
+ if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD)
+ return (isp_acknak_abts(isp, mp->nt_lreserved, (rsp == 0) ? 0 : EINVAL));
/*
* General purpose acknowledgement
@@ -1890,37 +1885,25 @@ bad:
}
}
+/*
+ * Clean aborted commands pending restart
+ */
static void
isp_target_mark_aborted_early(ispsoftc_t *isp, int chan, tstate_t *tptr, uint32_t tag_id)
{
- struct isp_fc *fc = ISP_FC_PC(isp, chan);
- atio_private_data_t *atp;
inot_private_data_t *ntp, *tmp;
uint32_t this_tag_id;
- /*
- * First, clean any commands pending restart
- */
STAILQ_FOREACH_SAFE(ntp, &tptr->restart_queue, next, tmp) {
this_tag_id = ((at7_entry_t *)ntp->data)->at_rxid;
if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) {
+ STAILQ_REMOVE(&tptr->restart_queue, ntp,
+ inot_private_data, next);
isp_endcmd(isp, ntp->data, NIL_HANDLE, chan,
ECMD_TERMINATE, 0);
isp_put_ntpd(isp, chan, ntp);
- STAILQ_REMOVE(&tptr->restart_queue, ntp,
- inot_private_data, next);
}
}
-
- /*
- * Now mark other ones dead as well.
- */
- for (atp = fc->atpool; atp < &fc->atpool[ATPDPSIZE]; atp++) {
- if (atp->lun != tptr->ts_lun)
- continue;
- if ((uint64_t)tag_id == TAG_ANY || atp->tag == tag_id)
- atp->dead = 1;
- }
}
#endif
@@ -2283,6 +2266,25 @@ isp_kthread(void *arg)
}
#ifdef ISP_TARGET_MODE
+static int
+isp_abort_atpd(ispsoftc_t *isp, int chan, atio_private_data_t *atp)
+{
+ uint8_t storage[QENTRY_LEN];
+ ct7_entry_t *cto = (ct7_entry_t *) storage;
+
+ ISP_MEMZERO(cto, sizeof (ct7_entry_t));
+ cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
+ cto->ct_header.rqs_entry_count = 1;
+ cto->ct_nphdl = atp->nphdl;
+ cto->ct_vpidx = chan;
+ cto->ct_iid_lo = atp->sid;
+ cto->ct_iid_hi = atp->sid >> 16;
+ cto->ct_rxid = atp->tag;
+ cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
+ cto->ct_oxid = atp->oxid;
+ return (isp_send_entry(isp, cto));
+}
+
static void
isp_abort_atio(ispsoftc_t *isp, union ccb *ccb)
{
@@ -2308,30 +2310,16 @@ isp_abort_atio(ispsoftc_t *isp, union ccb *ccb)
}
/* Search for the ATIO among running. */
- atp = isp_find_atpd(isp, XS_CHANNEL(accb), accb->atio.tag_id);
+ atp = isp_find_atpd_ccb(isp, XS_CHANNEL(accb), accb->atio.tag_id, accb);
if (atp != NULL) {
- /* Send TERMINATE to firmware. */
- if (!atp->dead) {
- uint8_t storage[QENTRY_LEN];
- ct7_entry_t *cto = (ct7_entry_t *) storage;
-
- ISP_MEMZERO(cto, sizeof (ct7_entry_t));
- cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
- cto->ct_header.rqs_entry_count = 1;
- cto->ct_nphdl = atp->nphdl;
- cto->ct_rxid = atp->tag;
- cto->ct_iid_lo = atp->sid;
- cto->ct_iid_hi = atp->sid >> 16;
- cto->ct_oxid = atp->oxid;
- cto->ct_vpidx = XS_CHANNEL(accb);
- cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
- isp_send_entry(isp, cto);
+ if (isp_abort_atpd(isp, XS_CHANNEL(accb), atp)) {
+ ccb->ccb_h.status = CAM_UA_ABORT;
+ return;
}
isp_put_atpd(isp, XS_CHANNEL(accb), atp);
- ccb->ccb_h.status = CAM_REQ_CMP;
- } else {
- ccb->ccb_h.status = CAM_UA_ABORT;
}
+
+ ccb->ccb_h.status = CAM_REQ_CMP;
}
static void
@@ -2504,6 +2492,7 @@ isp_action(struct cam_sim *sim, union ccb *ccb)
}
case XPT_NOTIFY_ACKNOWLEDGE: /* notify ack */
{
+ atio_private_data_t *atp;
inot_private_data_t *ntp;
/*
@@ -2522,8 +2511,24 @@ isp_action(struct cam_sim *sim, union ccb *ccb)
xpt_done(ccb);
break;
}
- if (isp_handle_platform_target_notify_ack(isp, &ntp->nt,
- (ccb->ccb_h.flags & CAM_SEND_STATUS) ? ccb->cna2.arg : 0)) {
+
+ /*
+ * Target should abort all affected tasks before ACK-ing INOT,
+ * but if/since it doesn't, add this hack to allow tag reuse.
+ * We can not do it if some CTIOs are in progress, or we won't
+ * handle the completions. In such case just block new ones.
+ */
+ uint32_t rsp = (ccb->ccb_h.flags & CAM_SEND_STATUS) ? ccb->cna2.arg : 0;
+ if (ntp->nt.nt_ncode == NT_ABORT_TASK && (rsp & 0xff) == 0 &&
+ (atp = isp_find_atpd(isp, XS_CHANNEL(ccb), ccb->cna2.seq_id)) != NULL) {
+ if (atp->ctcnt == 0 &&
+ isp_abort_atpd(isp, XS_CHANNEL(ccb), atp) == 0)
+ isp_put_atpd(isp, XS_CHANNEL(ccb), atp);
+ else
+ atp->dead = 1;
+ }
+
+ if (isp_handle_platform_target_notify_ack(isp, &ntp->nt, rsp)) {
cam_freeze_devq(ccb->ccb_h.path);
cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0);
ccb->ccb_h.status &= ~CAM_STATUS_MASK;
diff --git a/sys/dev/isp/isp_freebsd.h b/sys/dev/isp/isp_freebsd.h
index bd5bba92c0a9..f557ad1ff5fb 100644
--- a/sys/dev/isp/isp_freebsd.h
+++ b/sys/dev/isp/isp_freebsd.h
@@ -42,6 +42,7 @@
#include <sys/mutex.h>
#include <sys/condvar.h>
#include <sys/rman.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#include <sys/proc.h>
@@ -50,7 +51,6 @@
#include <machine/bus.h>
#include <machine/cpu.h>
-#include <machine/stdarg.h>
#include <cam/cam.h>
#include <cam/cam_debug.h>
@@ -89,10 +89,11 @@ void isp_put_ecmd(struct ispsoftc *, isp_ecmd_t *);
#include <dev/isp/isp_target.h>
typedef struct atio_private_data {
LIST_ENTRY(atio_private_data) next;
+ void * ccb;
+ uint32_t tag; /* typically f/w RX_ID */
uint32_t orig_datalen;
uint32_t bytes_xfered;
uint32_t bytes_in_transit;
- uint32_t tag; /* typically f/w RX_ID */
lun_id_t lun;
uint32_t nphdl;
uint32_t sid;
@@ -102,9 +103,8 @@ typedef struct atio_private_data {
uint16_t word3; /* PRLI word3 params */
uint16_t ctcnt; /* number of CTIOs currently active */
uint8_t seqno; /* CTIO sequence number */
- uint32_t
- srr_notify_rcvd : 1,
- cdb0 : 8,
+ uint8_t cdb0;
+ uint16_t srr_notify_rcvd : 1,
sendst : 1,
dead : 1,
tattr : 3,
diff --git a/sys/dev/isp/isp_pci.c b/sys/dev/isp/isp_pci.c
index e7f9d4b77e38..e8fd7b3cf571 100644
--- a/sys/dev/isp/isp_pci.c
+++ b/sys/dev/isp/isp_pci.c
@@ -291,6 +291,15 @@ isp_get_generic_options(device_t dev, ispsoftc_t *isp)
isp->isp_confopts |= ISP_CFG_NORELOAD;
}
tval = 0;
+ if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_force", &tval) == 0 && tval != 0) {
+ isp->isp_confopts |= ISP_CFG_FWLOAD_FORCE;
+ }
+ if ((isp->isp_confopts & (ISP_CFG_NORELOAD|ISP_CFG_FWLOAD_FORCE)) ==
+ (ISP_CFG_NORELOAD|ISP_CFG_FWLOAD_FORCE)) {
+ device_printf(dev, "WARNING: both fwload_disable and "
+ "fwload_force set, ispfw(4) loading disabled\n");
+ }
+ tval = 0;
if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) {
isp->isp_confopts |= ISP_CFG_NONVRAM;
}
diff --git a/sys/dev/isp/isp_target.c b/sys/dev/isp/isp_target.c
index 081524aff53c..9d8f8e2a3766 100644
--- a/sys/dev/isp/isp_target.c
+++ b/sys/dev/isp/isp_target.c
@@ -481,10 +481,14 @@ isp_acknak_abts(ispsoftc_t *isp, void *arg, int errno)
ISP_MEMCPY(rsp, abts, QENTRY_LEN);
rsp->abts_rsp_header.rqs_entry_type = RQSTYPE_ABTS_RSP;
+ isp_prt(isp, ISP_LOGTINFO, "[0x%x] ABTS of 0x%x being %s'd",
+ rsp->abts_rsp_rxid_abts, rsp->abts_rsp_rxid_task,
+ (errno == 0) ? "BA_ACC" : "BA_RJT");
+ rsp->abts_rsp_r_ctl = (errno == 0) ? BA_ACC : BA_RJT;
+
/*
* Swap destination and source for response.
*/
- rsp->abts_rsp_r_ctl = BA_ACC;
tmpw = rsp->abts_rsp_did_lo;
tmpb = rsp->abts_rsp_did_hi;
rsp->abts_rsp_did_lo = rsp->abts_rsp_sid_lo;
@@ -505,15 +509,14 @@ isp_acknak_abts(ispsoftc_t *isp, void *arg, int errno)
rx_id = rsp->abts_rsp_rx_id;
ox_id = rsp->abts_rsp_ox_id;
ISP_MEMZERO(&rsp->abts_rsp_payload.ba_acc, sizeof (rsp->abts_rsp_payload.ba_acc));
- isp_prt(isp, ISP_LOGTINFO, "[0x%x] ABTS of 0x%x being BA_ACC'd", rsp->abts_rsp_rxid_abts, rsp->abts_rsp_rxid_task);
rsp->abts_rsp_payload.ba_acc.aborted_rx_id = rx_id;
rsp->abts_rsp_payload.ba_acc.aborted_ox_id = ox_id;
rsp->abts_rsp_payload.ba_acc.high_seq_cnt = 0xffff;
} else {
- ISP_MEMZERO(&rsp->abts_rsp_payload.ba_rjt, sizeof (rsp->abts_rsp_payload.ba_acc));
+ ISP_MEMZERO(&rsp->abts_rsp_payload.ba_rjt, sizeof (rsp->abts_rsp_payload.ba_rjt));
switch (errno) {
case ENOMEM:
- rsp->abts_rsp_payload.ba_rjt.reason = 5; /* Logical Unit Busy */
+ rsp->abts_rsp_payload.ba_rjt.reason = 5; /* Logical busy */
break;
default:
rsp->abts_rsp_payload.ba_rjt.reason = 9; /* Unable to perform command request */
diff --git a/sys/dev/isp/ispmbox.h b/sys/dev/isp/ispmbox.h
index c6ac9d9ce971..978ed4dc1638 100644
--- a/sys/dev/isp/ispmbox.h
+++ b/sys/dev/isp/ispmbox.h
@@ -890,6 +890,10 @@ typedef struct {
#define PDB2400_CLASS2 0x0010
#define PDB2400_ADDR_VALID 0x0002
+/*
+ * For NVMe, the state is the high nibble. For FCP, the state is the low
+ * nibble. This appears to have changed with the 9.x firmware.
+ */
#define PDB2400_STATE_PLOGI_PEND 0x03
#define PDB2400_STATE_PLOGI_DONE 0x04
#define PDB2400_STATE_PRLI_PEND 0x05
@@ -897,6 +901,8 @@ typedef struct {
#define PDB2400_STATE_PORT_UNAVAIL 0x07
#define PDB2400_STATE_PRLO_PEND 0x09
#define PDB2400_STATE_LOGO_PEND 0x0B
+#define PDB2400_STATE_FCP_MASK 0x0f
+#define PDB2400_STATE_NVME_SHIFT 4
/*
* Common elements from the above two structures that are actually useful to us.
diff --git a/sys/dev/isp/ispvar.h b/sys/dev/isp/ispvar.h
index 6c3430246b29..abb712a395c1 100644
--- a/sys/dev/isp/ispvar.h
+++ b/sys/dev/isp/ispvar.h
@@ -612,6 +612,7 @@ struct ispsoftc {
#define ISP_CFG_16GB 0x8000 /* force 16Gb connection (26XX only) */
#define ISP_CFG_32GB 0x10000 /* force 32Gb connection (27XX only) */
#define ISP_CFG_64GB 0x20000 /* force 64Gb connection (28XX only) */
+#define ISP_CFG_FWLOAD_FORCE 0x40000 /* Prefer ispfw(4) even if older */
/*
* For each channel, the outer layers should know what role that channel
diff --git a/sys/dev/iwi/if_iwi.c b/sys/dev/iwi/if_iwi.c
index 4d71550f512e..26b8037186a6 100644
--- a/sys/dev/iwi/if_iwi.c
+++ b/sys/dev/iwi/if_iwi.c
@@ -371,6 +371,8 @@ iwi_attach(device_t dev)
#endif
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/* read MAC address from EEPROM */
val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 0);
ic->ic_macaddr[0] = val & 0xff;
@@ -927,8 +929,8 @@ iwi_media_status(if_t ifp, struct ifmediareq *imr)
/* read current transmission rate from adapter */
ni = ieee80211_ref_node(vap->iv_bss);
- ni->ni_txrate =
- iwi_cvtrate(CSR_READ_4(sc, IWI_CSR_CURRENT_TX_RATE));
+ ieee80211_node_set_txrate_dot11rate(ni,
+ iwi_cvtrate(CSR_READ_4(sc, IWI_CSR_CURRENT_TX_RATE)));
ieee80211_free_node(ni);
ieee80211_media_status(ifp, imr);
}
@@ -1834,6 +1836,8 @@ iwi_tx_start(struct iwi_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
} else
staid = 0;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/iwm/if_iwm.c b/sys/dev/iwm/if_iwm.c
index c8eb46ed9795..6840c6a4d00a 100644
--- a/sys/dev/iwm/if_iwm.c
+++ b/sys/dev/iwm/if_iwm.c
@@ -197,32 +197,60 @@ _Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
#define IWM_NUM_2GHZ_CHANNELS 14
#define IWM_N_HW_ADDR_MASK 0xF
-/*
- * XXX For now, there's simply a fixed set of rate table entries
- * that are populated.
- */
const struct iwm_rate {
- uint8_t rate;
+ uint16_t rate;
uint8_t plcp;
+ uint8_t ht_plcp;
} iwm_rates[] = {
- { 2, IWM_RATE_1M_PLCP },
- { 4, IWM_RATE_2M_PLCP },
- { 11, IWM_RATE_5M_PLCP },
- { 22, IWM_RATE_11M_PLCP },
- { 12, IWM_RATE_6M_PLCP },
- { 18, IWM_RATE_9M_PLCP },
- { 24, IWM_RATE_12M_PLCP },
- { 36, IWM_RATE_18M_PLCP },
- { 48, IWM_RATE_24M_PLCP },
- { 72, IWM_RATE_36M_PLCP },
- { 96, IWM_RATE_48M_PLCP },
- { 108, IWM_RATE_54M_PLCP },
+ /* Legacy */ /* HT */
+ { 2, IWM_RATE_1M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
+ { 4, IWM_RATE_2M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
+ { 11, IWM_RATE_5M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
+ { 22, IWM_RATE_11M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
+ { 12, IWM_RATE_6M_PLCP, IWM_RATE_HT_SISO_MCS_0_PLCP },
+ { 18, IWM_RATE_9M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
+ { 24, IWM_RATE_12M_PLCP, IWM_RATE_HT_SISO_MCS_1_PLCP },
+ { 26, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_8_PLCP },
+ { 36, IWM_RATE_18M_PLCP, IWM_RATE_HT_SISO_MCS_2_PLCP },
+ { 48, IWM_RATE_24M_PLCP, IWM_RATE_HT_SISO_MCS_3_PLCP },
+ { 52, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_9_PLCP },
+ { 72, IWM_RATE_36M_PLCP, IWM_RATE_HT_SISO_MCS_4_PLCP },
+ { 78, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_10_PLCP },
+ { 96, IWM_RATE_48M_PLCP, IWM_RATE_HT_SISO_MCS_5_PLCP },
+ { 104, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_11_PLCP },
+ { 108, IWM_RATE_54M_PLCP, IWM_RATE_HT_SISO_MCS_6_PLCP },
+ { 128, IWM_RATE_INVM_PLCP, IWM_RATE_HT_SISO_MCS_7_PLCP },
+ { 156, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_12_PLCP },
+ { 208, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_13_PLCP },
+ { 234, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_14_PLCP },
+ { 260, IWM_RATE_INVM_PLCP, IWM_RATE_HT_MIMO2_MCS_15_PLCP },
};
#define IWM_RIDX_CCK 0
#define IWM_RIDX_OFDM 4
#define IWM_RIDX_MAX (nitems(iwm_rates)-1)
#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
+#define IWM_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
+
+/* Convert an MCS index into an iwm_rates[] index. */
+const int iwm_mcs2ridx[] = {
+ IWM_RATE_MCS_0_INDEX,
+ IWM_RATE_MCS_1_INDEX,
+ IWM_RATE_MCS_2_INDEX,
+ IWM_RATE_MCS_3_INDEX,
+ IWM_RATE_MCS_4_INDEX,
+ IWM_RATE_MCS_5_INDEX,
+ IWM_RATE_MCS_6_INDEX,
+ IWM_RATE_MCS_7_INDEX,
+ IWM_RATE_MCS_8_INDEX,
+ IWM_RATE_MCS_9_INDEX,
+ IWM_RATE_MCS_10_INDEX,
+ IWM_RATE_MCS_11_INDEX,
+ IWM_RATE_MCS_12_INDEX,
+ IWM_RATE_MCS_13_INDEX,
+ IWM_RATE_MCS_14_INDEX,
+ IWM_RATE_MCS_15_INDEX,
+};
struct iwm_nvm_section {
uint16_t length;
@@ -2192,7 +2220,8 @@ iwm_parse_nvm_data(struct iwm_softc *sc,
sku = iwm_get_sku(sc, nvm_sw, phy_sku);
data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
- data->sku_cap_11n_enable = 0;
+ data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
+ data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
@@ -3416,7 +3445,7 @@ iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
struct ieee80211_node *ni = &in->in_ni;
struct ieee80211vap *vap = ni->ni_vap;
int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
- int new_rate, cur_rate = vap->iv_bss->ni_txrate;
+ int new_rate, cur_rate;
boolean_t rate_matched;
uint8_t tx_resp_rate;
@@ -3434,6 +3463,7 @@ iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
le32toh(tx_resp->initial_rate),
(int) le16toh(tx_resp->wireless_media_time));
+ cur_rate = ieee80211_node_get_txrate_dot11rate(vap->iv_bss);
tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
/* For rate control, ignore frames sent at different initial rate */
@@ -3472,11 +3502,11 @@ iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
if (rate_matched) {
ieee80211_ratectl_tx_complete(ni, txs);
- int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
- new_rate = vap->iv_bss->ni_txrate;
+ ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
+ new_rate = ieee80211_node_get_txrate_dot11rate(vap->iv_bss);
if (new_rate != 0 && new_rate != cur_rate) {
struct iwm_node *in = IWM_NODE(vap->iv_bss);
- iwm_setrates(sc, in, rix);
+ iwm_setrates(sc, in, new_rate);
iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
}
}
@@ -3666,7 +3696,8 @@ iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
} else {
/* for data frames, use RS table */
IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
- ridx = iwm_rate2ridx(sc, ni->ni_txrate);
+ ridx = iwm_rate2ridx(sc,
+ ieee80211_node_get_txrate_dot11rate(ni));
if (ridx == -1)
ridx = 0;
@@ -3742,6 +3773,10 @@ iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
+ /* Offloaded sequence number assignment; non-AMPDU case */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/* Encrypt the frame if need be. */
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
/* Retrieve key for TX && do software encryption. */
@@ -4239,7 +4274,7 @@ iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
static void
-iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
+iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int dot11rate)
{
struct ieee80211_node *ni = &in->in_ni;
struct iwm_lq_cmd *lq = &in->in_lq;
@@ -4247,8 +4282,27 @@ iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
int nrates = rs->rs_nrates;
int i, ridx, tab = 0;
// int txant = 0;
+ int rix;
- KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
+ /*
+ * Look up the rate index for the given legacy rate from
+ * the rs_rates table. Default to the lowest rate if it's
+ * not found (which is obviously hugely problematic.)
+ */
+ rix = -1;
+ for (i = 0; i < nrates; i++) {
+ int rate = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+ if (rate == dot11rate) {
+ rix = i;
+ break;
+ }
+ }
+ if (rix < 0) {
+ device_printf(sc->sc_dev,
+ "%s: failed to lookup dot11rate (%d)\n",
+ __func__, dot11rate);
+ rix = 0;
+ }
if (nrates > nitems(lq->rs_table)) {
device_printf(sc->sc_dev,
@@ -4528,8 +4582,9 @@ iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
iwm_enable_beacon_filter(sc, ivp);
iwm_power_update_mac(sc);
iwm_update_quotas(sc, ivp);
- int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
- iwm_setrates(sc, in, rix);
+ ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
+ iwm_setrates(sc, in,
+ ieee80211_node_get_txrate_dot11rate(&in->in_ni));
if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
device_printf(sc->sc_dev,
@@ -6091,7 +6146,8 @@ iwm_attach(device_t dev)
// IEEE80211_C_BGSCAN /* capable of bg scanning */
;
/* Advertise full-offload scanning */
- ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_OFFLOAD;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
sc->sc_phyctxt[i].id = i;
sc->sc_phyctxt[i].color = 0;
diff --git a/sys/dev/iwm/if_iwmreg.h b/sys/dev/iwm/if_iwmreg.h
index d9c7bb7a89f9..296d07bc4b31 100644
--- a/sys/dev/iwm/if_iwmreg.h
+++ b/sys/dev/iwm/if_iwmreg.h
@@ -4481,9 +4481,7 @@ struct iwm_beacon_filter_cmd {
#define IWM_RATE_HT_SISO_MCS_9_PLCP IWM_RATE_HT_SISO_MCS_INV_PLCP
/*
- * These serve as indexes into
- * struct iwm_rate_info fw_rate_idx_to_plcp[IWM_RATE_COUNT];
- * TODO: avoid overlap between legacy and HT rates
+ * These serve as indexes into struct iwm_rate iwm_rates[IWM_RIDX_MAX].
*/
enum {
IWM_RATE_1M_INDEX = 0,
@@ -4500,28 +4498,34 @@ enum {
IWM_RATE_9M_INDEX,
IWM_RATE_12M_INDEX,
IWM_RATE_MCS_1_INDEX = IWM_RATE_12M_INDEX,
+ IWM_RATE_MCS_8_INDEX,
+ IWM_FIRST_HT_MIMO2_RATE = IWM_RATE_MCS_8_INDEX,
IWM_RATE_18M_INDEX,
IWM_RATE_MCS_2_INDEX = IWM_RATE_18M_INDEX,
IWM_RATE_24M_INDEX,
IWM_RATE_MCS_3_INDEX = IWM_RATE_24M_INDEX,
+ IWM_RATE_MCS_9_INDEX,
IWM_RATE_36M_INDEX,
IWM_RATE_MCS_4_INDEX = IWM_RATE_36M_INDEX,
+ IWM_RATE_MCS_10_INDEX,
IWM_RATE_48M_INDEX,
IWM_RATE_MCS_5_INDEX = IWM_RATE_48M_INDEX,
+ IWM_RATE_MCS_11_INDEX,
IWM_RATE_54M_INDEX,
IWM_RATE_MCS_6_INDEX = IWM_RATE_54M_INDEX,
IWM_LAST_NON_HT_RATE = IWM_RATE_54M_INDEX,
- IWM_RATE_60M_INDEX,
- IWM_RATE_MCS_7_INDEX = IWM_RATE_60M_INDEX,
- IWM_LAST_HT_RATE = IWM_RATE_MCS_7_INDEX,
- IWM_RATE_MCS_8_INDEX,
- IWM_RATE_MCS_9_INDEX,
+ IWM_RATE_MCS_7_INDEX,
+ IWM_LAST_HT_SISO_RATE = IWM_RATE_MCS_7_INDEX,
+ IWM_RATE_MCS_12_INDEX,
+ IWM_RATE_MCS_13_INDEX,
+ IWM_RATE_MCS_14_INDEX,
+ IWM_RATE_MCS_15_INDEX,
+ IWM_LAST_HT_RATE = IWM_RATE_MCS_15_INDEX,
IWM_LAST_VHT_RATE = IWM_RATE_MCS_9_INDEX,
IWM_RATE_COUNT_LEGACY = IWM_LAST_NON_HT_RATE + 1,
- IWM_RATE_COUNT = IWM_LAST_VHT_RATE + 1,
+ IWM_RATE_COUNT = IWM_LAST_HT_RATE + 1,
};
-
#define IWM_RATE_BIT_MSK(r) (1 << (IWM_RATE_##r##M_INDEX))
/* fw API values for legacy bit rates, both OFDM and CCK */
diff --git a/sys/dev/iwm/if_iwmvar.h b/sys/dev/iwm/if_iwmvar.h
index 63201e5b3120..f31057c07b9b 100644
--- a/sys/dev/iwm/if_iwmvar.h
+++ b/sys/dev/iwm/if_iwmvar.h
@@ -200,6 +200,7 @@ struct iwm_nvm_data {
int sku_cap_11n_enable;
int sku_cap_amt_enable;
int sku_cap_ipan_enable;
+ int sku_cap_mimo_disable;
uint8_t radio_cfg_type;
uint8_t radio_cfg_step;
diff --git a/sys/dev/iwn/if_iwn.c b/sys/dev/iwn/if_iwn.c
index 3b1d879914b6..a949103f20d4 100644
--- a/sys/dev/iwn/if_iwn.c
+++ b/sys/dev/iwn/if_iwn.c
@@ -584,6 +584,11 @@ iwn_attach(device_t dev)
| IEEE80211_C_PMGT /* Station-side power mgmt */
;
+ /* Driver / firmware assigned sequence numbers */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+ /* Don't originate null data frames in net80211 */
+ ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
+
/* Read MAC address, channels, etc from EEPROM. */
if ((error = iwn_read_eeprom(sc, ic->ic_macaddr)) != 0) {
device_printf(dev, "could not read EEPROM, error %d\n",
@@ -2813,22 +2818,17 @@ iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni,
plcp = IEEE80211_RV(rate) | IWN_RFLAG_MCS;
/*
- * XXX the following should only occur if both
- * the local configuration _and_ the remote node
- * advertise these capabilities. Thus this code
- * may need fixing!
- */
-
- /*
* Set the channel width and guard interval.
+ *
+ * Take into account the local configuration and
+ * the node/peer advertised abilities.
*/
if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
plcp |= IWN_RFLAG_HT40;
- if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40)
+ if (ieee80211_ht_check_tx_shortgi_40(ni))
plcp |= IWN_RFLAG_SGI;
- } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
+ } else if (ieee80211_ht_check_tx_shortgi_20(ni))
plcp |= IWN_RFLAG_SGI;
- }
/*
* Ensure the selected rate matches the link quality
@@ -4485,7 +4485,7 @@ iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni,
/*
* Figure out if we're using 11n or not here.
*/
- if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0)
+ if (ieee80211_ht_check_tx_ht(ni))
is_11n = 1;
else
is_11n = 0;
@@ -4575,13 +4575,16 @@ iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
else {
/* XXX pass pktlen */
(void) ieee80211_ratectl_rate(ni, NULL, 0);
- rate = ni->ni_txrate;
+ rate = ieee80211_node_get_txrate_dot11rate(ni);
}
/*
* XXX TODO: Group addressed frames aren't aggregated and must
* go to the normal non-aggregation queue, and have a NONQOS TID
* assigned from net80211.
+ *
+ * TODO: same with NULL QOS frames, which we shouldn't be sending
+ * anyway ourselves (and should stub out / warn / etc.)
*/
ac = M_WME_GETAC(m);
@@ -4594,6 +4597,10 @@ iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
ac = *(int *)tap->txa_private;
}
+ /* Only assign if not A-MPDU; the A-MPDU TX path will do its own */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/* Encrypt the frame if need be. */
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
/* Retrieve key for TX. */
@@ -4624,9 +4631,7 @@ iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
IEEE80211_QOS_ACKPOLICY_NOACK)
flags |= IWN_TX_NEED_ACK;
}
- if ((wh->i_fc[0] &
- (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
- (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
+ if (IEEE80211_IS_CTL_BAR(wh))
flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */
if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
@@ -5365,7 +5370,7 @@ iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
* 11n _and_ we have some 11n rates, or don't
* try.
*/
- if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) {
+ if (ieee80211_ht_check_tx_ht(ni)) {
rs = (struct ieee80211_rateset *) &ni->ni_htrates;
is_11n = 1;
} else {
@@ -7532,7 +7537,7 @@ iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
break;
}
if (qid == sc->ntxqs) {
- DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n",
+ DPRINTF(sc, IWN_DEBUG_XMIT, "%s: no free aggregation queue\n",
__func__);
return 0;
}
diff --git a/sys/dev/iwx/if_iwx.c b/sys/dev/iwx/if_iwx.c
new file mode 100644
index 000000000000..04ed09f04604
--- /dev/null
+++ b/sys/dev/iwx/if_iwx.c
@@ -0,0 +1,11065 @@
+/*-
+ * SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) AND ISC
+ */
+
+/* $OpenBSD: if_iwx.c,v 1.175 2023/07/05 15:07:28 stsp Exp $ */
+
+/*
+ *
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Tom Jones <thj@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ */
+
+/*-
+ * Copyright (c) 2024 Future Crew, LLC
+ * Author: Mikhail Pchelin <misha@FreeBSD.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * Copyright (c) 2014, 2016 genua gmbh <info@genua.de>
+ * Author: Stefan Sperling <stsp@openbsd.org>
+ * Copyright (c) 2014 Fixup Software Ltd.
+ * Copyright (c) 2017, 2019, 2020 Stefan Sperling <stsp@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*-
+ * Based on BSD-licensed source modules in the Linux iwlwifi driver,
+ * which were used as the reference documentation for this implementation.
+ *
+ ******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************
+ */
+
+/*-
+ * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/module.h>
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/rman.h>
+#include <sys/rwlock.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+#include <sys/linker.h>
+#include <sys/firmware.h>
+#include <sys/epoch.h>
+#include <sys/kdb.h>
+
+#include <machine/bus.h>
+#include <machine/endian.h>
+#include <machine/resource.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <net/bpf.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+
+#include <netinet/in.h>
+#include <netinet/if_ether.h>
+
+#include <net80211/ieee80211_var.h>
+#include <net80211/ieee80211_radiotap.h>
+#include <net80211/ieee80211_regdomain.h>
+#include <net80211/ieee80211_ratectl.h>
+#include <net80211/ieee80211_vht.h>
+
+int iwx_himark = 224;
+int iwx_lomark = 192;
+
+#define IWX_FBSD_RSP_V3 3
+#define IWX_FBSD_RSP_V4 4
+
+#define DEVNAME(_sc) (device_get_nameunit((_sc)->sc_dev))
+#define IC2IFP(ic) (((struct ieee80211vap *)TAILQ_FIRST(&(ic)->ic_vaps))->iv_ifp)
+
+#define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
+#define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
+
+#include <dev/iwx/if_iwxreg.h>
+#include <dev/iwx/if_iwxvar.h>
+
+#include <dev/iwx/if_iwx_debug.h>
+
+#define PCI_CFG_RETRY_TIMEOUT 0x41
+
+#define PCI_VENDOR_INTEL 0x8086
+#define PCI_PRODUCT_INTEL_WL_22500_1 0x2723 /* Wi-Fi 6 AX200 */
+#define PCI_PRODUCT_INTEL_WL_22500_2 0x02f0 /* Wi-Fi 6 AX201 */
+#define PCI_PRODUCT_INTEL_WL_22500_3 0xa0f0 /* Wi-Fi 6 AX201 */
+#define PCI_PRODUCT_INTEL_WL_22500_4 0x34f0 /* Wi-Fi 6 AX201 */
+#define PCI_PRODUCT_INTEL_WL_22500_5 0x06f0 /* Wi-Fi 6 AX201 */
+#define PCI_PRODUCT_INTEL_WL_22500_6 0x43f0 /* Wi-Fi 6 AX201 */
+#define PCI_PRODUCT_INTEL_WL_22500_7 0x3df0 /* Wi-Fi 6 AX201 */
+#define PCI_PRODUCT_INTEL_WL_22500_8 0x4df0 /* Wi-Fi 6 AX201 */
+#define PCI_PRODUCT_INTEL_WL_22500_9 0x2725 /* Wi-Fi 6 AX210 */
+#define PCI_PRODUCT_INTEL_WL_22500_10 0x2726 /* Wi-Fi 6 AX211 */
+#define PCI_PRODUCT_INTEL_WL_22500_11 0x51f0 /* Wi-Fi 6 AX211 */
+#define PCI_PRODUCT_INTEL_WL_22500_12 0x7a70 /* Wi-Fi 6 AX211 */
+#define PCI_PRODUCT_INTEL_WL_22500_13 0x7af0 /* Wi-Fi 6 AX211 */
+#define PCI_PRODUCT_INTEL_WL_22500_14 0x7e40 /* Wi-Fi 6 AX210 */
+#define PCI_PRODUCT_INTEL_WL_22500_15 0x7f70 /* Wi-Fi 6 AX211 */
+#define PCI_PRODUCT_INTEL_WL_22500_16 0x54f0 /* Wi-Fi 6 AX211 */
+#define PCI_PRODUCT_INTEL_WL_22500_17 0x51f1 /* Wi-Fi 6 AX211 */
+
+static const struct iwx_devices {
+ uint16_t device;
+ char *name;
+} iwx_devices[] = {
+ { PCI_PRODUCT_INTEL_WL_22500_1, "Wi-Fi 6 AX200" },
+ { PCI_PRODUCT_INTEL_WL_22500_2, "Wi-Fi 6 AX201" },
+ { PCI_PRODUCT_INTEL_WL_22500_3, "Wi-Fi 6 AX201" },
+ { PCI_PRODUCT_INTEL_WL_22500_4, "Wi-Fi 6 AX201" },
+ { PCI_PRODUCT_INTEL_WL_22500_5, "Wi-Fi 6 AX201" },
+ { PCI_PRODUCT_INTEL_WL_22500_6, "Wi-Fi 6 AX201" },
+ { PCI_PRODUCT_INTEL_WL_22500_7, "Wi-Fi 6 AX201" },
+ { PCI_PRODUCT_INTEL_WL_22500_8, "Wi-Fi 6 AX201" },
+ { PCI_PRODUCT_INTEL_WL_22500_9, "Wi-Fi 6 AX210" },
+ { PCI_PRODUCT_INTEL_WL_22500_10, "Wi-Fi 6 AX211" },
+ { PCI_PRODUCT_INTEL_WL_22500_11, "Wi-Fi 6 AX211" },
+ { PCI_PRODUCT_INTEL_WL_22500_12, "Wi-Fi 6 AX211" },
+ { PCI_PRODUCT_INTEL_WL_22500_13, "Wi-Fi 6 AX211" },
+ { PCI_PRODUCT_INTEL_WL_22500_14, "Wi-Fi 6 AX210" },
+ { PCI_PRODUCT_INTEL_WL_22500_15, "Wi-Fi 6 AX211" },
+ { PCI_PRODUCT_INTEL_WL_22500_16, "Wi-Fi 6 AX211" },
+ { PCI_PRODUCT_INTEL_WL_22500_17, "Wi-Fi 6 AX211" },
+};
+
+static const uint8_t iwx_nvm_channels_8000[] = {
+ /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ /* 5 GHz */
+ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
+ 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+ 149, 153, 157, 161, 165, 169, 173, 177, 181
+};
+
+static const uint8_t iwx_nvm_channels_uhb[] = {
+ /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ /* 5 GHz */
+ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
+ 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+ 149, 153, 157, 161, 165, 169, 173, 177, 181,
+ /* 6-7 GHz */
+ 1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61, 65, 69,
+ 73, 77, 81, 85, 89, 93, 97, 101, 105, 109, 113, 117, 121, 125, 129,
+ 133, 137, 141, 145, 149, 153, 157, 161, 165, 169, 173, 177, 181, 185,
+ 189, 193, 197, 201, 205, 209, 213, 217, 221, 225, 229, 233
+};
+
+#define IWX_NUM_2GHZ_CHANNELS 14
+#define IWX_NUM_5GHZ_CHANNELS 37
+
+const struct iwx_rate {
+ uint16_t rate;
+ uint8_t plcp;
+ uint8_t ht_plcp;
+} iwx_rates[] = {
+ /* Legacy */ /* HT */
+ { 2, IWX_RATE_1M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 4, IWX_RATE_2M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 11, IWX_RATE_5M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 22, IWX_RATE_11M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 12, IWX_RATE_6M_PLCP, IWX_RATE_HT_SISO_MCS_0_PLCP },
+ { 18, IWX_RATE_9M_PLCP, IWX_RATE_HT_SISO_MCS_INV_PLCP },
+ { 24, IWX_RATE_12M_PLCP, IWX_RATE_HT_SISO_MCS_1_PLCP },
+ { 26, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_8_PLCP },
+ { 36, IWX_RATE_18M_PLCP, IWX_RATE_HT_SISO_MCS_2_PLCP },
+ { 48, IWX_RATE_24M_PLCP, IWX_RATE_HT_SISO_MCS_3_PLCP },
+ { 52, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_9_PLCP },
+ { 72, IWX_RATE_36M_PLCP, IWX_RATE_HT_SISO_MCS_4_PLCP },
+ { 78, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_10_PLCP },
+ { 96, IWX_RATE_48M_PLCP, IWX_RATE_HT_SISO_MCS_5_PLCP },
+ { 104, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_11_PLCP },
+ { 108, IWX_RATE_54M_PLCP, IWX_RATE_HT_SISO_MCS_6_PLCP },
+ { 128, IWX_RATE_INVM_PLCP, IWX_RATE_HT_SISO_MCS_7_PLCP },
+ { 156, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_12_PLCP },
+ { 208, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_13_PLCP },
+ { 234, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_14_PLCP },
+ { 260, IWX_RATE_INVM_PLCP, IWX_RATE_HT_MIMO2_MCS_15_PLCP },
+};
+#define IWX_RIDX_CCK 0
+#define IWX_RIDX_OFDM 4
+#define IWX_RIDX_MAX (nitems(iwx_rates)-1)
+#define IWX_RIDX_IS_CCK(_i_) ((_i_) < IWX_RIDX_OFDM)
+#define IWX_RIDX_IS_OFDM(_i_) ((_i_) >= IWX_RIDX_OFDM)
+#define IWX_RVAL_IS_OFDM(_i_) ((_i_) >= 12 && (_i_) != 22)
+
+/* Convert an MCS index into an iwx_rates[] index. */
+const int iwx_mcs2ridx[] = {
+ IWX_RATE_MCS_0_INDEX,
+ IWX_RATE_MCS_1_INDEX,
+ IWX_RATE_MCS_2_INDEX,
+ IWX_RATE_MCS_3_INDEX,
+ IWX_RATE_MCS_4_INDEX,
+ IWX_RATE_MCS_5_INDEX,
+ IWX_RATE_MCS_6_INDEX,
+ IWX_RATE_MCS_7_INDEX,
+ IWX_RATE_MCS_8_INDEX,
+ IWX_RATE_MCS_9_INDEX,
+ IWX_RATE_MCS_10_INDEX,
+ IWX_RATE_MCS_11_INDEX,
+ IWX_RATE_MCS_12_INDEX,
+ IWX_RATE_MCS_13_INDEX,
+ IWX_RATE_MCS_14_INDEX,
+ IWX_RATE_MCS_15_INDEX,
+};
+
+static uint8_t iwx_lookup_cmd_ver(struct iwx_softc *, uint8_t, uint8_t);
+static uint8_t iwx_lookup_notif_ver(struct iwx_softc *, uint8_t, uint8_t);
+static int iwx_store_cscheme(struct iwx_softc *, const uint8_t *, size_t);
+#if 0
+static int iwx_alloc_fw_monitor_block(struct iwx_softc *, uint8_t, uint8_t);
+static int iwx_alloc_fw_monitor(struct iwx_softc *, uint8_t);
+#endif
+static int iwx_apply_debug_destination(struct iwx_softc *);
+static void iwx_set_ltr(struct iwx_softc *);
+static int iwx_ctxt_info_init(struct iwx_softc *, const struct iwx_fw_sects *);
+static int iwx_ctxt_info_gen3_init(struct iwx_softc *,
+ const struct iwx_fw_sects *);
+static void iwx_ctxt_info_free_fw_img(struct iwx_softc *);
+static void iwx_ctxt_info_free_paging(struct iwx_softc *);
+static int iwx_init_fw_sec(struct iwx_softc *, const struct iwx_fw_sects *,
+ struct iwx_context_info_dram *);
+static void iwx_fw_version_str(char *, size_t, uint32_t, uint32_t, uint32_t);
+static int iwx_firmware_store_section(struct iwx_softc *, enum iwx_ucode_type,
+ const uint8_t *, size_t);
+static int iwx_set_default_calib(struct iwx_softc *, const void *);
+static void iwx_fw_info_free(struct iwx_fw_info *);
+static int iwx_read_firmware(struct iwx_softc *);
+static uint32_t iwx_prph_addr_mask(struct iwx_softc *);
+static uint32_t iwx_read_prph_unlocked(struct iwx_softc *, uint32_t);
+static uint32_t iwx_read_prph(struct iwx_softc *, uint32_t);
+static void iwx_write_prph_unlocked(struct iwx_softc *, uint32_t, uint32_t);
+static void iwx_write_prph(struct iwx_softc *, uint32_t, uint32_t);
+static uint32_t iwx_read_umac_prph(struct iwx_softc *, uint32_t);
+static void iwx_write_umac_prph(struct iwx_softc *, uint32_t, uint32_t);
+static int iwx_read_mem(struct iwx_softc *, uint32_t, void *, int);
+static int iwx_poll_bit(struct iwx_softc *, int, uint32_t, uint32_t, int);
+static int iwx_nic_lock(struct iwx_softc *);
+static void iwx_nic_assert_locked(struct iwx_softc *);
+static void iwx_nic_unlock(struct iwx_softc *);
+static int iwx_set_bits_mask_prph(struct iwx_softc *, uint32_t, uint32_t,
+ uint32_t);
+static int iwx_set_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
+static int iwx_clear_bits_prph(struct iwx_softc *, uint32_t, uint32_t);
+static void iwx_dma_map_addr(void *, bus_dma_segment_t *, int, int);
+static int iwx_dma_contig_alloc(bus_dma_tag_t, struct iwx_dma_info *,
+ bus_size_t, bus_size_t);
+static void iwx_dma_contig_free(struct iwx_dma_info *);
+static int iwx_alloc_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
+static void iwx_disable_rx_dma(struct iwx_softc *);
+static void iwx_reset_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
+static void iwx_free_rx_ring(struct iwx_softc *, struct iwx_rx_ring *);
+static int iwx_alloc_tx_ring(struct iwx_softc *, struct iwx_tx_ring *, int);
+static void iwx_reset_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
+static void iwx_free_tx_ring(struct iwx_softc *, struct iwx_tx_ring *);
+static void iwx_enable_rfkill_int(struct iwx_softc *);
+static int iwx_check_rfkill(struct iwx_softc *);
+static void iwx_enable_interrupts(struct iwx_softc *);
+static void iwx_enable_fwload_interrupt(struct iwx_softc *);
+#if 0
+static void iwx_restore_interrupts(struct iwx_softc *);
+#endif
+static void iwx_disable_interrupts(struct iwx_softc *);
+static void iwx_ict_reset(struct iwx_softc *);
+static int iwx_set_hw_ready(struct iwx_softc *);
+static int iwx_prepare_card_hw(struct iwx_softc *);
+static int iwx_force_power_gating(struct iwx_softc *);
+static void iwx_apm_config(struct iwx_softc *);
+static int iwx_apm_init(struct iwx_softc *);
+static void iwx_apm_stop(struct iwx_softc *);
+static int iwx_allow_mcast(struct iwx_softc *);
+static void iwx_init_msix_hw(struct iwx_softc *);
+static void iwx_conf_msix_hw(struct iwx_softc *, int);
+static int iwx_clear_persistence_bit(struct iwx_softc *);
+static int iwx_start_hw(struct iwx_softc *);
+static void iwx_stop_device(struct iwx_softc *);
+static void iwx_nic_config(struct iwx_softc *);
+static int iwx_nic_rx_init(struct iwx_softc *);
+static int iwx_nic_init(struct iwx_softc *);
+static int iwx_enable_txq(struct iwx_softc *, int, int, int, int);
+static int iwx_disable_txq(struct iwx_softc *sc, int, int, uint8_t);
+static void iwx_post_alive(struct iwx_softc *);
+static int iwx_schedule_session_protection(struct iwx_softc *,
+ struct iwx_node *, uint32_t);
+static void iwx_unprotect_session(struct iwx_softc *, struct iwx_node *);
+static void iwx_init_channel_map(struct ieee80211com *, int, int *,
+ struct ieee80211_channel[]);
+static int iwx_mimo_enabled(struct iwx_softc *);
+static void iwx_init_reorder_buffer(struct iwx_reorder_buffer *, uint16_t,
+ uint16_t);
+static void iwx_clear_reorder_buffer(struct iwx_softc *, struct iwx_rxba_data *);
+static void iwx_sta_rx_agg(struct iwx_softc *, struct ieee80211_node *, uint8_t,
+ uint16_t, uint16_t, int, int);
+static void iwx_sta_tx_agg_start(struct iwx_softc *,
+ struct ieee80211_node *, uint8_t);
+static void iwx_ba_rx_task(void *, int);
+static void iwx_ba_tx_task(void *, int);
+static void iwx_set_mac_addr_from_csr(struct iwx_softc *, struct iwx_nvm_data *);
+static int iwx_is_valid_mac_addr(const uint8_t *);
+static void iwx_flip_hw_address(uint32_t, uint32_t, uint8_t *);
+static int iwx_nvm_get(struct iwx_softc *);
+static int iwx_load_firmware(struct iwx_softc *);
+static int iwx_start_fw(struct iwx_softc *);
+static int iwx_pnvm_handle_section(struct iwx_softc *, const uint8_t *, size_t);
+static int iwx_pnvm_parse(struct iwx_softc *, const uint8_t *, size_t);
+static void iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *);
+static int iwx_load_pnvm(struct iwx_softc *);
+static int iwx_send_tx_ant_cfg(struct iwx_softc *, uint8_t);
+static int iwx_send_phy_cfg_cmd(struct iwx_softc *);
+static int iwx_load_ucode_wait_alive(struct iwx_softc *);
+static int iwx_send_dqa_cmd(struct iwx_softc *);
+static int iwx_run_init_mvm_ucode(struct iwx_softc *, int);
+static int iwx_config_ltr(struct iwx_softc *);
+static void iwx_update_rx_desc(struct iwx_softc *, struct iwx_rx_ring *, int, bus_dma_segment_t *);
+static int iwx_rx_addbuf(struct iwx_softc *, int, int);
+static int iwx_rxmq_get_signal_strength(struct iwx_softc *, struct iwx_rx_mpdu_desc *);
+static void iwx_rx_rx_phy_cmd(struct iwx_softc *, struct iwx_rx_packet *,
+ struct iwx_rx_data *);
+static int iwx_get_noise(const struct iwx_statistics_rx_non_phy *);
+static int iwx_rx_hwdecrypt(struct iwx_softc *, struct mbuf *, uint32_t);
+#if 0
+int iwx_ccmp_decap(struct iwx_softc *, struct mbuf *,
+ struct ieee80211_node *, struct ieee80211_rxinfo *);
+#endif
+static void iwx_rx_frame(struct iwx_softc *, struct mbuf *, int, uint32_t,
+ int, int, uint32_t, uint8_t);
+static void iwx_clear_tx_desc(struct iwx_softc *, struct iwx_tx_ring *, int);
+static void iwx_txd_done(struct iwx_softc *, struct iwx_tx_ring *,
+ struct iwx_tx_data *);
+static void iwx_txq_advance(struct iwx_softc *, struct iwx_tx_ring *, uint16_t);
+static void iwx_rx_tx_cmd(struct iwx_softc *, struct iwx_rx_packet *,
+ struct iwx_rx_data *);
+static void iwx_clear_oactive(struct iwx_softc *, struct iwx_tx_ring *);
+static void iwx_rx_bmiss(struct iwx_softc *, struct iwx_rx_packet *,
+ struct iwx_rx_data *);
+static int iwx_binding_cmd(struct iwx_softc *, struct iwx_node *, uint32_t);
+static uint8_t iwx_get_vht_ctrl_pos(struct ieee80211com *, struct ieee80211_channel *);
+static int iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *,
+ struct iwx_phy_ctxt *, uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
+#if 0
+static int iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *, struct iwx_phy_ctxt *,
+ uint8_t, uint8_t, uint32_t, uint8_t, uint8_t, int);
+#endif
+static int iwx_phy_ctxt_cmd(struct iwx_softc *, struct iwx_phy_ctxt *,
+ uint8_t, uint8_t, uint32_t, uint32_t, uint8_t, uint8_t);
+static int iwx_send_cmd(struct iwx_softc *, struct iwx_host_cmd *);
+static int iwx_send_cmd_pdu(struct iwx_softc *, uint32_t, uint32_t, uint16_t,
+ const void *);
+static int iwx_send_cmd_status(struct iwx_softc *, struct iwx_host_cmd *,
+ uint32_t *);
+static int iwx_send_cmd_pdu_status(struct iwx_softc *, uint32_t, uint16_t,
+ const void *, uint32_t *);
+static void iwx_free_resp(struct iwx_softc *, struct iwx_host_cmd *);
+static void iwx_cmd_done(struct iwx_softc *, int, int, int);
+static uint32_t iwx_fw_rateidx_ofdm(uint8_t);
+static uint32_t iwx_fw_rateidx_cck(uint8_t);
+static const struct iwx_rate *iwx_tx_fill_cmd(struct iwx_softc *,
+ struct iwx_node *, struct ieee80211_frame *, uint16_t *, uint32_t *,
+ struct mbuf *);
+static void iwx_tx_update_byte_tbl(struct iwx_softc *, struct iwx_tx_ring *, int,
+ uint16_t, uint16_t);
+static int iwx_tx(struct iwx_softc *, struct mbuf *,
+ struct ieee80211_node *);
+static int iwx_flush_sta_tids(struct iwx_softc *, int, uint16_t);
+static int iwx_drain_sta(struct iwx_softc *sc, struct iwx_node *, int);
+static int iwx_flush_sta(struct iwx_softc *, struct iwx_node *);
+static int iwx_beacon_filter_send_cmd(struct iwx_softc *,
+ struct iwx_beacon_filter_cmd *);
+static int iwx_update_beacon_abort(struct iwx_softc *, struct iwx_node *,
+ int);
+static void iwx_power_build_cmd(struct iwx_softc *, struct iwx_node *,
+ struct iwx_mac_power_cmd *);
+static int iwx_power_mac_update_mode(struct iwx_softc *, struct iwx_node *);
+static int iwx_power_update_device(struct iwx_softc *);
+#if 0
+static int iwx_enable_beacon_filter(struct iwx_softc *, struct iwx_node *);
+#endif
+static int iwx_disable_beacon_filter(struct iwx_softc *);
+static int iwx_add_sta_cmd(struct iwx_softc *, struct iwx_node *, int);
+static int iwx_rm_sta_cmd(struct iwx_softc *, struct iwx_node *);
+static int iwx_rm_sta(struct iwx_softc *, struct iwx_node *);
+static int iwx_fill_probe_req(struct iwx_softc *,
+ struct iwx_scan_probe_req *);
+static int iwx_config_umac_scan_reduced(struct iwx_softc *);
+static uint16_t iwx_scan_umac_flags_v2(struct iwx_softc *, int);
+static void iwx_scan_umac_dwell_v10(struct iwx_softc *,
+ struct iwx_scan_general_params_v10 *, int);
+static void iwx_scan_umac_fill_general_p_v10(struct iwx_softc *,
+ struct iwx_scan_general_params_v10 *, uint16_t, int);
+static void iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *,
+ struct iwx_scan_channel_params_v6 *, uint32_t, int);
+static int iwx_umac_scan_v14(struct iwx_softc *, int);
+static void iwx_mcc_update(struct iwx_softc *, struct iwx_mcc_chub_notif *);
+static uint8_t iwx_ridx2rate(struct ieee80211_rateset *, int);
+static int iwx_rval2ridx(int);
+static void iwx_ack_rates(struct iwx_softc *, struct iwx_node *, int *,
+ int *);
+static void iwx_mac_ctxt_cmd_common(struct iwx_softc *, struct iwx_node *,
+ struct iwx_mac_ctx_cmd *, uint32_t);
+static void iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *, struct iwx_node *,
+ struct iwx_mac_data_sta *, int);
+static int iwx_mac_ctxt_cmd(struct iwx_softc *, struct iwx_node *,
+ uint32_t, int);
+static int iwx_clear_statistics(struct iwx_softc *);
+static int iwx_scan(struct iwx_softc *);
+static int iwx_bgscan(struct ieee80211com *);
+static int iwx_enable_mgmt_queue(struct iwx_softc *);
+static int iwx_disable_mgmt_queue(struct iwx_softc *);
+static int iwx_rs_rval2idx(uint8_t);
+static uint16_t iwx_rs_ht_rates(struct iwx_softc *, struct ieee80211_node *,
+ int);
+static uint16_t iwx_rs_vht_rates(struct iwx_softc *, struct ieee80211_node *, int);
+static int iwx_rs_init_v3(struct iwx_softc *, struct iwx_node *);
+static int iwx_rs_init_v4(struct iwx_softc *, struct iwx_node *);
+static int iwx_rs_init(struct iwx_softc *, struct iwx_node *);
+static int iwx_phy_send_rlc(struct iwx_softc *, struct iwx_phy_ctxt *,
+ uint8_t, uint8_t);
+static int iwx_phy_ctxt_update(struct iwx_softc *, struct iwx_phy_ctxt *,
+ struct ieee80211_channel *, uint8_t, uint8_t, uint32_t, uint8_t,
+ uint8_t);
+static int iwx_auth(struct ieee80211vap *, struct iwx_softc *);
+static int iwx_deauth(struct iwx_softc *);
+static int iwx_run(struct ieee80211vap *, struct iwx_softc *);
+static int iwx_run_stop(struct iwx_softc *);
+static struct ieee80211_node * iwx_node_alloc(struct ieee80211vap *,
+ const uint8_t[IEEE80211_ADDR_LEN]);
+#if 0
+int iwx_set_key(struct ieee80211com *, struct ieee80211_node *,
+ struct ieee80211_key *);
+void iwx_setkey_task(void *);
+void iwx_delete_key(struct ieee80211com *,
+ struct ieee80211_node *, struct ieee80211_key *);
+#endif
+static int iwx_newstate(struct ieee80211vap *, enum ieee80211_state, int);
+static void iwx_endscan(struct iwx_softc *);
+static void iwx_fill_sf_command(struct iwx_softc *, struct iwx_sf_cfg_cmd *,
+ struct ieee80211_node *);
+static int iwx_sf_config(struct iwx_softc *, int);
+static int iwx_send_bt_init_conf(struct iwx_softc *);
+static int iwx_send_soc_conf(struct iwx_softc *);
+static int iwx_send_update_mcc_cmd(struct iwx_softc *, const char *);
+static int iwx_send_temp_report_ths_cmd(struct iwx_softc *);
+static int iwx_init_hw(struct iwx_softc *);
+static int iwx_init(struct iwx_softc *);
+static void iwx_stop(struct iwx_softc *);
+static void iwx_watchdog(void *);
+static const char *iwx_desc_lookup(uint32_t);
+static void iwx_nic_error(struct iwx_softc *);
+static void iwx_dump_driver_status(struct iwx_softc *);
+static void iwx_nic_umac_error(struct iwx_softc *);
+static void iwx_rx_mpdu_mq(struct iwx_softc *, struct mbuf *, void *, size_t);
+static int iwx_rx_pkt_valid(struct iwx_rx_packet *);
+static void iwx_rx_pkt(struct iwx_softc *, struct iwx_rx_data *,
+ struct mbuf *);
+static void iwx_notif_intr(struct iwx_softc *);
+#if 0
+/* XXX-THJ - I don't have hardware for this */
+static int iwx_intr(void *);
+#endif
+static void iwx_intr_msix(void *);
+static int iwx_preinit(struct iwx_softc *);
+static void iwx_attach_hook(void *);
+static const struct iwx_device_cfg *iwx_find_device_cfg(struct iwx_softc *);
+static int iwx_probe(device_t);
+static int iwx_attach(device_t);
+static int iwx_detach(device_t);
+
+/* FreeBSD specific glue */
+u_int8_t etherbroadcastaddr[ETHER_ADDR_LEN] =
+ { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+u_int8_t etheranyaddr[ETHER_ADDR_LEN] =
+ { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+
+#if IWX_DEBUG
+#define DPRINTF(x) do { if (sc->sc_debug == IWX_DEBUG_ANY) { printf x; } } while (0)
+#else
+#define DPRINTF(x) do { ; } while (0)
+#endif
+
+/* FreeBSD specific functions */
+static struct ieee80211vap * iwx_vap_create(struct ieee80211com *,
+ const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
+ const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
+static void iwx_vap_delete(struct ieee80211vap *);
+static void iwx_parent(struct ieee80211com *);
+static void iwx_scan_start(struct ieee80211com *);
+static void iwx_scan_end(struct ieee80211com *);
+static void iwx_update_mcast(struct ieee80211com *ic);
+static void iwx_scan_curchan(struct ieee80211_scan_state *, unsigned long);
+static void iwx_scan_mindwell(struct ieee80211_scan_state *);
+static void iwx_set_channel(struct ieee80211com *);
+static void iwx_endscan_cb(void *, int );
+static int iwx_wme_update(struct ieee80211com *);
+static int iwx_raw_xmit(struct ieee80211_node *, struct mbuf *,
+ const struct ieee80211_bpf_params *);
+static int iwx_transmit(struct ieee80211com *, struct mbuf *);
+static void iwx_start(struct iwx_softc *);
+static int iwx_ampdu_rx_start(struct ieee80211_node *,
+ struct ieee80211_rx_ampdu *, int, int, int);
+static void iwx_ampdu_rx_stop(struct ieee80211_node *,
+ struct ieee80211_rx_ampdu *);
+static int iwx_addba_request(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int, int, int);
+static int iwx_addba_response(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int, int, int);
+static void iwx_key_update_begin(struct ieee80211vap *);
+static void iwx_key_update_end(struct ieee80211vap *);
+static int iwx_key_alloc(struct ieee80211vap *, struct ieee80211_key *,
+ ieee80211_keyix *,ieee80211_keyix *);
+static int iwx_key_set(struct ieee80211vap *, const struct ieee80211_key *);
+static int iwx_key_delete(struct ieee80211vap *,
+ const struct ieee80211_key *);
+static int iwx_suspend(device_t);
+static int iwx_resume(device_t);
+static void iwx_radiotap_attach(struct iwx_softc *);
+
+/* OpenBSD compat defines */
+#define IEEE80211_HTOP0_SCO_SCN 0
+#define IEEE80211_VHTOP0_CHAN_WIDTH_HT 0
+#define IEEE80211_VHTOP0_CHAN_WIDTH_80 1
+
+#define IEEE80211_HT_RATESET_SISO 0
+#define IEEE80211_HT_RATESET_MIMO2 2
+
+const struct ieee80211_rateset ieee80211_std_rateset_11a =
+ { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
+
+const struct ieee80211_rateset ieee80211_std_rateset_11b =
+ { 4, { 2, 4, 11, 22 } };
+
+const struct ieee80211_rateset ieee80211_std_rateset_11g =
+ { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
+
+inline int
+ieee80211_has_addr4(const struct ieee80211_frame *wh)
+{
+ return (wh->i_fc[1] & IEEE80211_FC1_DIR_MASK) ==
+ IEEE80211_FC1_DIR_DSTODS;
+}
+
+static uint8_t
+iwx_lookup_cmd_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
+{
+ const struct iwx_fw_cmd_version *entry;
+ int i;
+
+ for (i = 0; i < sc->n_cmd_versions; i++) {
+ entry = &sc->cmd_versions[i];
+ if (entry->group == grp && entry->cmd == cmd)
+ return entry->cmd_ver;
+ }
+
+ return IWX_FW_CMD_VER_UNKNOWN;
+}
+
+uint8_t
+iwx_lookup_notif_ver(struct iwx_softc *sc, uint8_t grp, uint8_t cmd)
+{
+ const struct iwx_fw_cmd_version *entry;
+ int i;
+
+ for (i = 0; i < sc->n_cmd_versions; i++) {
+ entry = &sc->cmd_versions[i];
+ if (entry->group == grp && entry->cmd == cmd)
+ return entry->notif_ver;
+ }
+
+ return IWX_FW_CMD_VER_UNKNOWN;
+}
+
+static int
+iwx_store_cscheme(struct iwx_softc *sc, const uint8_t *data, size_t dlen)
+{
+ const struct iwx_fw_cscheme_list *l = (const void *)data;
+
+ if (dlen < sizeof(*l) ||
+ dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
+ return EINVAL;
+
+ /* we don't actually store anything for now, always use s/w crypto */
+
+ return 0;
+}
+
+static int
+iwx_ctxt_info_alloc_dma(struct iwx_softc *sc,
+ const struct iwx_fw_onesect *sec, struct iwx_dma_info *dram)
+{
+ int err = iwx_dma_contig_alloc(sc->sc_dmat, dram, sec->fws_len, 1);
+ if (err) {
+ printf("%s: could not allocate context info DMA memory\n",
+ DEVNAME(sc));
+ return err;
+ }
+
+ memcpy(dram->vaddr, sec->fws_data, sec->fws_len);
+
+ return 0;
+}
+
+static void
+iwx_ctxt_info_free_paging(struct iwx_softc *sc)
+{
+ struct iwx_self_init_dram *dram = &sc->init_dram;
+ int i;
+
+ if (!dram->paging)
+ return;
+
+ /* free paging*/
+ for (i = 0; i < dram->paging_cnt; i++)
+ iwx_dma_contig_free(&dram->paging[i]);
+
+ free(dram->paging, M_DEVBUF);
+ dram->paging_cnt = 0;
+ dram->paging = NULL;
+}
+
+static int
+iwx_get_num_sections(const struct iwx_fw_sects *fws, int start)
+{
+ int i = 0;
+
+ while (start < fws->fw_count &&
+ fws->fw_sect[start].fws_devoff != IWX_CPU1_CPU2_SEPARATOR_SECTION &&
+ fws->fw_sect[start].fws_devoff != IWX_PAGING_SEPARATOR_SECTION) {
+ start++;
+ i++;
+ }
+
+ return i;
+}
+
+static int
+iwx_init_fw_sec(struct iwx_softc *sc, const struct iwx_fw_sects *fws,
+ struct iwx_context_info_dram *ctxt_dram)
+{
+ struct iwx_self_init_dram *dram = &sc->init_dram;
+ int i, ret, fw_cnt = 0;
+
+ KASSERT(dram->paging == NULL, ("iwx_init_fw_sec"));
+
+ dram->lmac_cnt = iwx_get_num_sections(fws, 0);
+ /* add 1 due to separator */
+ dram->umac_cnt = iwx_get_num_sections(fws, dram->lmac_cnt + 1);
+ /* add 2 due to separators */
+ dram->paging_cnt = iwx_get_num_sections(fws,
+ dram->lmac_cnt + dram->umac_cnt + 2);
+
+ IWX_UNLOCK(sc);
+ dram->fw = mallocarray(dram->umac_cnt + dram->lmac_cnt,
+ sizeof(*dram->fw), M_DEVBUF, M_ZERO | M_NOWAIT);
+ if (!dram->fw) {
+ printf("%s: could not allocate memory for firmware sections\n",
+ DEVNAME(sc));
+ IWX_LOCK(sc);
+ return ENOMEM;
+ }
+
+ dram->paging = mallocarray(dram->paging_cnt, sizeof(*dram->paging),
+ M_DEVBUF, M_ZERO | M_WAITOK);
+ IWX_LOCK(sc);
+ if (!dram->paging) {
+ printf("%s: could not allocate memory for firmware paging\n",
+ DEVNAME(sc));
+ return ENOMEM;
+ }
+
+ /* initialize lmac sections */
+ for (i = 0; i < dram->lmac_cnt; i++) {
+ ret = iwx_ctxt_info_alloc_dma(sc, &fws->fw_sect[i],
+ &dram->fw[fw_cnt]);
+ if (ret)
+ return ret;
+ ctxt_dram->lmac_img[i] =
+ htole64(dram->fw[fw_cnt].paddr);
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: firmware LMAC section %d at 0x%llx size %lld\n",
+ __func__, i,
+ (unsigned long long)dram->fw[fw_cnt].paddr,
+ (unsigned long long)dram->fw[fw_cnt].size);
+ fw_cnt++;
+ }
+
+ /* initialize umac sections */
+ for (i = 0; i < dram->umac_cnt; i++) {
+ /* access FW with +1 to make up for lmac separator */
+ ret = iwx_ctxt_info_alloc_dma(sc,
+ &fws->fw_sect[fw_cnt + 1], &dram->fw[fw_cnt]);
+ if (ret)
+ return ret;
+ ctxt_dram->umac_img[i] =
+ htole64(dram->fw[fw_cnt].paddr);
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: firmware UMAC section %d at 0x%llx size %lld\n",
+ __func__, i,
+ (unsigned long long)dram->fw[fw_cnt].paddr,
+ (unsigned long long)dram->fw[fw_cnt].size);
+ fw_cnt++;
+ }
+
+ /*
+ * Initialize paging.
+ * Paging memory isn't stored in dram->fw as the umac and lmac - it is
+ * stored separately.
+ * This is since the timing of its release is different -
+ * while fw memory can be released on alive, the paging memory can be
+ * freed only when the device goes down.
+ * Given that, the logic here in accessing the fw image is a bit
+ * different - fw_cnt isn't changing so loop counter is added to it.
+ */
+ for (i = 0; i < dram->paging_cnt; i++) {
+ /* access FW with +2 to make up for lmac & umac separators */
+ int fw_idx = fw_cnt + i + 2;
+
+ ret = iwx_ctxt_info_alloc_dma(sc,
+ &fws->fw_sect[fw_idx], &dram->paging[i]);
+ if (ret)
+ return ret;
+
+ ctxt_dram->virtual_img[i] = htole64(dram->paging[i].paddr);
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: firmware paging section %d at 0x%llx size %lld\n",
+ __func__, i,
+ (unsigned long long)dram->paging[i].paddr,
+ (unsigned long long)dram->paging[i].size);
+ }
+
+ return 0;
+}
+
+static void
+iwx_fw_version_str(char *buf, size_t bufsize,
+ uint32_t major, uint32_t minor, uint32_t api)
+{
+ /*
+ * Starting with major version 35 the Linux driver prints the minor
+ * version in hexadecimal.
+ */
+ if (major >= 35)
+ snprintf(buf, bufsize, "%u.%08x.%u", major, minor, api);
+ else
+ snprintf(buf, bufsize, "%u.%u.%u", major, minor, api);
+}
+#if 0
+static int
+iwx_alloc_fw_monitor_block(struct iwx_softc *sc, uint8_t max_power,
+ uint8_t min_power)
+{
+ struct iwx_dma_info *fw_mon = &sc->fw_mon;
+ uint32_t size = 0;
+ uint8_t power;
+ int err;
+
+ if (fw_mon->size)
+ return 0;
+
+ for (power = max_power; power >= min_power; power--) {
+ size = (1 << power);
+
+ err = iwx_dma_contig_alloc(sc->sc_dmat, fw_mon, size, 0);
+ if (err)
+ continue;
+
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: allocated 0x%08x bytes for firmware monitor.\n",
+ DEVNAME(sc), size);
+ break;
+ }
+
+ if (err) {
+ fw_mon->size = 0;
+ return err;
+ }
+
+ if (power != max_power)
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: Sorry - debug buffer is only %luK while you requested %luK\n",
+ DEVNAME(sc), (unsigned long)(1 << (power - 10)),
+ (unsigned long)(1 << (max_power - 10)));
+
+ return 0;
+}
+
+static int
+iwx_alloc_fw_monitor(struct iwx_softc *sc, uint8_t max_power)
+{
+ if (!max_power) {
+ /* default max_power is maximum */
+ max_power = 26;
+ } else {
+ max_power += 11;
+ }
+
+ if (max_power > 26) {
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: External buffer size for monitor is too big %d, "
+ "check the FW TLV\n", DEVNAME(sc), max_power);
+ return 0;
+ }
+
+ if (sc->fw_mon.size)
+ return 0;
+
+ return iwx_alloc_fw_monitor_block(sc, max_power, 11);
+}
+#endif
+
+static int
+iwx_apply_debug_destination(struct iwx_softc *sc)
+{
+#if 0
+ struct iwx_fw_dbg_dest_tlv_v1 *dest_v1;
+ int i, err;
+ uint8_t mon_mode, size_power, base_shift, end_shift;
+ uint32_t base_reg, end_reg;
+
+ dest_v1 = sc->sc_fw.dbg_dest_tlv_v1;
+ mon_mode = dest_v1->monitor_mode;
+ size_power = dest_v1->size_power;
+ base_reg = le32toh(dest_v1->base_reg);
+ end_reg = le32toh(dest_v1->end_reg);
+ base_shift = dest_v1->base_shift;
+ end_shift = dest_v1->end_shift;
+
+ DPRINTF(("%s: applying debug destination %d\n", DEVNAME(sc), mon_mode));
+
+ if (mon_mode == EXTERNAL_MODE) {
+ err = iwx_alloc_fw_monitor(sc, size_power);
+ if (err)
+ return err;
+ }
+
+ if (!iwx_nic_lock(sc))
+ return EBUSY;
+
+ for (i = 0; i < sc->sc_fw.n_dest_reg; i++) {
+ uint32_t addr, val;
+ uint8_t op;
+
+ addr = le32toh(dest_v1->reg_ops[i].addr);
+ val = le32toh(dest_v1->reg_ops[i].val);
+ op = dest_v1->reg_ops[i].op;
+
+ DPRINTF(("%s: op=%u addr=%u val=%u\n", __func__, op, addr, val));
+ switch (op) {
+ case CSR_ASSIGN:
+ IWX_WRITE(sc, addr, val);
+ break;
+ case CSR_SETBIT:
+ IWX_SETBITS(sc, addr, (1 << val));
+ break;
+ case CSR_CLEARBIT:
+ IWX_CLRBITS(sc, addr, (1 << val));
+ break;
+ case PRPH_ASSIGN:
+ iwx_write_prph(sc, addr, val);
+ break;
+ case PRPH_SETBIT:
+ err = iwx_set_bits_prph(sc, addr, (1 << val));
+ if (err)
+ return err;
+ break;
+ case PRPH_CLEARBIT:
+ err = iwx_clear_bits_prph(sc, addr, (1 << val));
+ if (err)
+ return err;
+ break;
+ case PRPH_BLOCKBIT:
+ if (iwx_read_prph(sc, addr) & (1 << val))
+ goto monitor;
+ break;
+ default:
+ DPRINTF(("%s: FW debug - unknown OP %d\n",
+ DEVNAME(sc), op));
+ break;
+ }
+ }
+
+monitor:
+ if (mon_mode == EXTERNAL_MODE && sc->fw_mon.size) {
+ iwx_write_prph(sc, le32toh(base_reg),
+ sc->fw_mon.paddr >> base_shift);
+ iwx_write_prph(sc, end_reg,
+ (sc->fw_mon.paddr + sc->fw_mon.size - 256)
+ >> end_shift);
+ }
+
+ iwx_nic_unlock(sc);
+ return 0;
+#else
+ return 0;
+#endif
+}
+
+static void
+iwx_set_ltr(struct iwx_softc *sc)
+{
+ uint32_t ltr_val = IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
+ ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
+ IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT) &
+ IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK) |
+ ((250 << IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT) &
+ IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK) |
+ IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
+ ((IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC <<
+ IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT) &
+ IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK) |
+ (250 & IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
+
+ /*
+ * To workaround hardware latency issues during the boot process,
+ * initialize the LTR to ~250 usec (see ltr_val above).
+ * The firmware initializes this again later (to a smaller value).
+ */
+ if (!sc->sc_integrated) {
+ IWX_WRITE(sc, IWX_CSR_LTR_LONG_VAL_AD, ltr_val);
+ } else if (sc->sc_integrated &&
+ sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
+ iwx_write_prph(sc, IWX_HPM_MAC_LTR_CSR,
+ IWX_HPM_MAC_LRT_ENABLE_ALL);
+ iwx_write_prph(sc, IWX_HPM_UMAC_LTR, ltr_val);
+ }
+}
+
+int
+iwx_ctxt_info_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
+{
+ struct iwx_context_info *ctxt_info;
+ struct iwx_context_info_rbd_cfg *rx_cfg;
+ uint32_t control_flags = 0;
+ uint64_t paddr;
+ int err;
+
+ ctxt_info = sc->ctxt_info_dma.vaddr;
+ memset(ctxt_info, 0, sizeof(*ctxt_info));
+
+ ctxt_info->version.version = 0;
+ ctxt_info->version.mac_id =
+ htole16((uint16_t)IWX_READ(sc, IWX_CSR_HW_REV));
+ /* size is in DWs */
+ ctxt_info->version.size = htole16(sizeof(*ctxt_info) / 4);
+
+ KASSERT(IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) < 0xF,
+ ("IWX_RX_QUEUE_CB_SIZE exceeds rate table size"));
+
+ control_flags = IWX_CTXT_INFO_TFD_FORMAT_LONG |
+ (IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE) <<
+ IWX_CTXT_INFO_RB_CB_SIZE_POS) |
+ (IWX_CTXT_INFO_RB_SIZE_4K << IWX_CTXT_INFO_RB_SIZE_POS);
+ ctxt_info->control.control_flags = htole32(control_flags);
+
+ /* initialize RX default queue */
+ rx_cfg = &ctxt_info->rbd_cfg;
+ rx_cfg->free_rbd_addr = htole64(sc->rxq.free_desc_dma.paddr);
+ rx_cfg->used_rbd_addr = htole64(sc->rxq.used_desc_dma.paddr);
+ rx_cfg->status_wr_ptr = htole64(sc->rxq.stat_dma.paddr);
+
+ /* initialize TX command queue */
+ ctxt_info->hcmd_cfg.cmd_queue_addr =
+ htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
+ ctxt_info->hcmd_cfg.cmd_queue_size =
+ IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
+
+ /* allocate ucode sections in dram and set addresses */
+ err = iwx_init_fw_sec(sc, fws, &ctxt_info->dram);
+ if (err) {
+ iwx_ctxt_info_free_fw_img(sc);
+ return err;
+ }
+
+ /* Configure debug, if exists */
+ if (sc->sc_fw.dbg_dest_tlv_v1) {
+#if 1
+ err = iwx_apply_debug_destination(sc);
+ if (err) {
+ iwx_ctxt_info_free_fw_img(sc);
+ return err;
+ }
+#endif
+ }
+
+ /*
+ * Write the context info DMA base address. The device expects a
+ * 64-bit address but a simple bus_space_write_8 to this register
+ * won't work on some devices, such as the AX201.
+ */
+ paddr = sc->ctxt_info_dma.paddr;
+ IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA, paddr & 0xffffffff);
+ IWX_WRITE(sc, IWX_CSR_CTXT_INFO_BA + 4, paddr >> 32);
+
+ /* kick FW self load */
+ if (!iwx_nic_lock(sc)) {
+ iwx_ctxt_info_free_fw_img(sc);
+ return EBUSY;
+ }
+
+ iwx_set_ltr(sc);
+ iwx_write_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
+ iwx_nic_unlock(sc);
+
+ /* Context info will be released upon alive or failure to get one */
+
+ return 0;
+}
+
+static int
+iwx_ctxt_info_gen3_init(struct iwx_softc *sc, const struct iwx_fw_sects *fws)
+{
+ struct iwx_context_info_gen3 *ctxt_info_gen3;
+ struct iwx_prph_scratch *prph_scratch;
+ struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
+ uint16_t cb_size;
+ uint32_t control_flags, scratch_size;
+ uint64_t paddr;
+ int err;
+
+ if (sc->sc_fw.iml == NULL || sc->sc_fw.iml_len == 0) {
+ printf("%s: no image loader found in firmware file\n",
+ DEVNAME(sc));
+ iwx_ctxt_info_free_fw_img(sc);
+ return EINVAL;
+ }
+
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->iml_dma,
+ sc->sc_fw.iml_len, 1);
+ if (err) {
+ printf("%s: could not allocate DMA memory for "
+ "firmware image loader\n", DEVNAME(sc));
+ iwx_ctxt_info_free_fw_img(sc);
+ return ENOMEM;
+ }
+
+ prph_scratch = sc->prph_scratch_dma.vaddr;
+ memset(prph_scratch, 0, sizeof(*prph_scratch));
+ prph_sc_ctrl = &prph_scratch->ctrl_cfg;
+ prph_sc_ctrl->version.version = 0;
+ prph_sc_ctrl->version.mac_id = htole16(IWX_READ(sc, IWX_CSR_HW_REV));
+ prph_sc_ctrl->version.size = htole16(sizeof(*prph_scratch) / 4);
+
+ control_flags = IWX_PRPH_SCRATCH_RB_SIZE_4K |
+ IWX_PRPH_SCRATCH_MTR_MODE |
+ (IWX_PRPH_MTR_FORMAT_256B & IWX_PRPH_SCRATCH_MTR_FORMAT);
+ if (sc->sc_imr_enabled)
+ control_flags |= IWX_PRPH_SCRATCH_IMR_DEBUG_EN;
+ prph_sc_ctrl->control.control_flags = htole32(control_flags);
+
+ /* initialize RX default queue */
+ prph_sc_ctrl->rbd_cfg.free_rbd_addr =
+ htole64(sc->rxq.free_desc_dma.paddr);
+
+ /* allocate ucode sections in dram and set addresses */
+ err = iwx_init_fw_sec(sc, fws, &prph_scratch->dram);
+ if (err) {
+ iwx_dma_contig_free(&sc->iml_dma);
+ iwx_ctxt_info_free_fw_img(sc);
+ return err;
+ }
+
+ ctxt_info_gen3 = sc->ctxt_info_dma.vaddr;
+ memset(ctxt_info_gen3, 0, sizeof(*ctxt_info_gen3));
+ ctxt_info_gen3->prph_info_base_addr = htole64(sc->prph_info_dma.paddr);
+ ctxt_info_gen3->prph_scratch_base_addr =
+ htole64(sc->prph_scratch_dma.paddr);
+ scratch_size = sizeof(*prph_scratch);
+ ctxt_info_gen3->prph_scratch_size = htole32(scratch_size);
+ ctxt_info_gen3->cr_head_idx_arr_base_addr =
+ htole64(sc->rxq.stat_dma.paddr);
+ ctxt_info_gen3->tr_tail_idx_arr_base_addr =
+ htole64(sc->prph_info_dma.paddr + PAGE_SIZE / 2);
+ ctxt_info_gen3->cr_tail_idx_arr_base_addr =
+ htole64(sc->prph_info_dma.paddr + 3 * PAGE_SIZE / 4);
+ ctxt_info_gen3->mtr_base_addr =
+ htole64(sc->txq[IWX_DQA_CMD_QUEUE].desc_dma.paddr);
+ ctxt_info_gen3->mcr_base_addr = htole64(sc->rxq.used_desc_dma.paddr);
+ cb_size = IWX_TFD_QUEUE_CB_SIZE(IWX_TX_RING_COUNT);
+ ctxt_info_gen3->mtr_size = htole16(cb_size);
+ cb_size = IWX_RX_QUEUE_CB_SIZE(IWX_MQ_RX_TABLE_SIZE);
+ ctxt_info_gen3->mcr_size = htole16(cb_size);
+
+ memcpy(sc->iml_dma.vaddr, sc->sc_fw.iml, sc->sc_fw.iml_len);
+
+ paddr = sc->ctxt_info_dma.paddr;
+ IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR, paddr & 0xffffffff);
+ IWX_WRITE(sc, IWX_CSR_CTXT_INFO_ADDR + 4, paddr >> 32);
+
+ paddr = sc->iml_dma.paddr;
+ IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR, paddr & 0xffffffff);
+ IWX_WRITE(sc, IWX_CSR_IML_DATA_ADDR + 4, paddr >> 32);
+ IWX_WRITE(sc, IWX_CSR_IML_SIZE_ADDR, sc->sc_fw.iml_len);
+
+ IWX_SETBITS(sc, IWX_CSR_CTXT_INFO_BOOT_CTRL,
+ IWX_CSR_AUTO_FUNC_BOOT_ENA);
+
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s:%d kicking fw to get going\n", __func__, __LINE__);
+
+ /* kick FW self load */
+ if (!iwx_nic_lock(sc)) {
+ iwx_dma_contig_free(&sc->iml_dma);
+ iwx_ctxt_info_free_fw_img(sc);
+ return EBUSY;
+ }
+ iwx_set_ltr(sc);
+ iwx_write_umac_prph(sc, IWX_UREG_CPU_INIT_RUN, 1);
+ iwx_nic_unlock(sc);
+
+ /* Context info will be released upon alive or failure to get one */
+ return 0;
+}
+
+static void
+iwx_ctxt_info_free_fw_img(struct iwx_softc *sc)
+{
+ struct iwx_self_init_dram *dram = &sc->init_dram;
+ int i;
+
+ if (!dram->fw)
+ return;
+
+ for (i = 0; i < dram->lmac_cnt + dram->umac_cnt; i++)
+ iwx_dma_contig_free(&dram->fw[i]);
+
+ free(dram->fw, M_DEVBUF);
+ dram->lmac_cnt = 0;
+ dram->umac_cnt = 0;
+ dram->fw = NULL;
+}
+
+static int
+iwx_firmware_store_section(struct iwx_softc *sc, enum iwx_ucode_type type,
+ const uint8_t *data, size_t dlen)
+{
+ struct iwx_fw_sects *fws;
+ struct iwx_fw_onesect *fwone;
+
+ if (type >= IWX_UCODE_TYPE_MAX)
+ return EINVAL;
+ if (dlen < sizeof(uint32_t))
+ return EINVAL;
+
+ fws = &sc->sc_fw.fw_sects[type];
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: ucode type %d section %d\n", DEVNAME(sc), type, fws->fw_count);
+ if (fws->fw_count >= IWX_UCODE_SECT_MAX)
+ return EINVAL;
+
+ fwone = &fws->fw_sect[fws->fw_count];
+
+ /* first 32bit are device load offset */
+ memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
+
+ /* rest is data */
+ fwone->fws_data = data + sizeof(uint32_t);
+ fwone->fws_len = dlen - sizeof(uint32_t);
+
+ fws->fw_count++;
+ fws->fw_totlen += fwone->fws_len;
+
+ return 0;
+}
+
+#define IWX_DEFAULT_SCAN_CHANNELS 40
+/* Newer firmware might support more channels. Raise this value if needed. */
+#define IWX_MAX_SCAN_CHANNELS 67 /* as of iwx-cc-a0-62 firmware */
+
+struct iwx_tlv_calib_data {
+ uint32_t ucode_type;
+ struct iwx_tlv_calib_ctrl calib;
+} __packed;
+
+static int
+iwx_set_default_calib(struct iwx_softc *sc, const void *data)
+{
+ const struct iwx_tlv_calib_data *def_calib = data;
+ uint32_t ucode_type = le32toh(def_calib->ucode_type);
+
+ if (ucode_type >= IWX_UCODE_TYPE_MAX)
+ return EINVAL;
+
+ sc->sc_default_calib[ucode_type].flow_trigger =
+ def_calib->calib.flow_trigger;
+ sc->sc_default_calib[ucode_type].event_trigger =
+ def_calib->calib.event_trigger;
+
+ return 0;
+}
+
+static void
+iwx_fw_info_free(struct iwx_fw_info *fw)
+{
+ free(fw->fw_rawdata, M_DEVBUF);
+ fw->fw_rawdata = NULL;
+ fw->fw_rawsize = 0;
+ /* don't touch fw->fw_status */
+ memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
+ free(fw->iml, M_DEVBUF);
+ fw->iml = NULL;
+ fw->iml_len = 0;
+}
+
+#define IWX_FW_ADDR_CACHE_CONTROL 0xC0000000
+
+static int
+iwx_read_firmware(struct iwx_softc *sc)
+{
+ struct iwx_fw_info *fw = &sc->sc_fw;
+ const struct iwx_tlv_ucode_header *uhdr;
+ struct iwx_ucode_tlv tlv;
+ uint32_t tlv_type;
+ const uint8_t *data;
+ int err = 0;
+ size_t len;
+ const struct firmware *fwp;
+
+ if (fw->fw_status == IWX_FW_STATUS_DONE)
+ return 0;
+
+ fw->fw_status = IWX_FW_STATUS_INPROGRESS;
+ fwp = firmware_get(sc->sc_fwname);
+ sc->sc_fwp = fwp;
+
+ if (fwp == NULL) {
+ printf("%s: could not read firmware %s\n",
+ DEVNAME(sc), sc->sc_fwname);
+ err = ENOENT;
+ goto out;
+ }
+
+ IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s:%d %s: using firmware %s\n",
+ __func__, __LINE__, DEVNAME(sc), sc->sc_fwname);
+
+
+ sc->sc_capaflags = 0;
+ sc->sc_capa_n_scan_channels = IWX_DEFAULT_SCAN_CHANNELS;
+ memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
+ memset(sc->sc_ucode_api, 0, sizeof(sc->sc_ucode_api));
+ sc->n_cmd_versions = 0;
+
+ uhdr = (const void *)(fwp->data);
+ if (*(const uint32_t *)fwp->data != 0
+ || le32toh(uhdr->magic) != IWX_TLV_UCODE_MAGIC) {
+ printf("%s: invalid firmware %s\n",
+ DEVNAME(sc), sc->sc_fwname);
+ err = EINVAL;
+ goto out;
+ }
+
+ iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
+ IWX_UCODE_MAJOR(le32toh(uhdr->ver)),
+ IWX_UCODE_MINOR(le32toh(uhdr->ver)),
+ IWX_UCODE_API(le32toh(uhdr->ver)));
+
+ data = uhdr->data;
+ len = fwp->datasize - sizeof(*uhdr);
+
+ while (len >= sizeof(tlv)) {
+ size_t tlv_len;
+ const void *tlv_data;
+
+ memcpy(&tlv, data, sizeof(tlv));
+ tlv_len = le32toh(tlv.length);
+ tlv_type = le32toh(tlv.type);
+
+ len -= sizeof(tlv);
+ data += sizeof(tlv);
+ tlv_data = data;
+
+ if (len < tlv_len) {
+ printf("%s: firmware too short: %zu bytes\n",
+ DEVNAME(sc), len);
+ err = EINVAL;
+ goto parse_out;
+ }
+
+ switch (tlv_type) {
+ case IWX_UCODE_TLV_PROBE_MAX_LEN:
+ if (tlv_len < sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_capa_max_probe_len
+ = le32toh(*(const uint32_t *)tlv_data);
+ if (sc->sc_capa_max_probe_len >
+ IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ break;
+ case IWX_UCODE_TLV_PAN:
+ if (tlv_len) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_capaflags |= IWX_UCODE_TLV_FLAGS_PAN;
+ break;
+ case IWX_UCODE_TLV_FLAGS:
+ if (tlv_len < sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ /*
+ * Apparently there can be many flags, but Linux driver
+ * parses only the first one, and so do we.
+ *
+ * XXX: why does this override IWX_UCODE_TLV_PAN?
+ * Intentional or a bug? Observations from
+ * current firmware file:
+ * 1) TLV_PAN is parsed first
+ * 2) TLV_FLAGS contains TLV_FLAGS_PAN
+ * ==> this resets TLV_PAN to itself... hnnnk
+ */
+ sc->sc_capaflags = le32toh(*(const uint32_t *)tlv_data);
+ break;
+ case IWX_UCODE_TLV_CSCHEME:
+ err = iwx_store_cscheme(sc, tlv_data, tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_NUM_OF_CPU: {
+ uint32_t num_cpu;
+ if (tlv_len != sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ num_cpu = le32toh(*(const uint32_t *)tlv_data);
+ if (num_cpu < 1 || num_cpu > 2) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ break;
+ }
+ case IWX_UCODE_TLV_SEC_RT:
+ err = iwx_firmware_store_section(sc,
+ IWX_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_SEC_INIT:
+ err = iwx_firmware_store_section(sc,
+ IWX_UCODE_TYPE_INIT, tlv_data, tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_SEC_WOWLAN:
+ err = iwx_firmware_store_section(sc,
+ IWX_UCODE_TYPE_WOW, tlv_data, tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_DEF_CALIB:
+ if (tlv_len != sizeof(struct iwx_tlv_calib_data)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ err = iwx_set_default_calib(sc, tlv_data);
+ if (err)
+ goto parse_out;
+ break;
+ case IWX_UCODE_TLV_PHY_SKU:
+ if (tlv_len != sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_fw_phy_config = le32toh(*(const uint32_t *)tlv_data);
+ break;
+
+ case IWX_UCODE_TLV_API_CHANGES_SET: {
+ const struct iwx_ucode_api *api;
+ int idx, i;
+ if (tlv_len != sizeof(*api)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ api = (const struct iwx_ucode_api *)tlv_data;
+ idx = le32toh(api->api_index);
+ if (idx >= howmany(IWX_NUM_UCODE_TLV_API, 32)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ for (i = 0; i < 32; i++) {
+ if ((le32toh(api->api_flags) & (1 << i)) == 0)
+ continue;
+ setbit(sc->sc_ucode_api, i + (32 * idx));
+ }
+ break;
+ }
+
+ case IWX_UCODE_TLV_ENABLED_CAPABILITIES: {
+ const struct iwx_ucode_capa *capa;
+ int idx, i;
+ if (tlv_len != sizeof(*capa)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ capa = (const struct iwx_ucode_capa *)tlv_data;
+ idx = le32toh(capa->api_index);
+ if (idx >= howmany(IWX_NUM_UCODE_TLV_CAPA, 32)) {
+ goto parse_out;
+ }
+ for (i = 0; i < 32; i++) {
+ if ((le32toh(capa->api_capa) & (1 << i)) == 0)
+ continue;
+ setbit(sc->sc_enabled_capa, i + (32 * idx));
+ }
+ break;
+ }
+
+ case IWX_UCODE_TLV_SDIO_ADMA_ADDR:
+ case IWX_UCODE_TLV_FW_GSCAN_CAPA:
+ /* ignore, not used by current driver */
+ break;
+
+ case IWX_UCODE_TLV_SEC_RT_USNIFFER:
+ err = iwx_firmware_store_section(sc,
+ IWX_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
+ tlv_len);
+ if (err)
+ goto parse_out;
+ break;
+
+ case IWX_UCODE_TLV_PAGING:
+ if (tlv_len != sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ break;
+
+ case IWX_UCODE_TLV_N_SCAN_CHANNELS:
+ if (tlv_len != sizeof(uint32_t)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_capa_n_scan_channels =
+ le32toh(*(const uint32_t *)tlv_data);
+ if (sc->sc_capa_n_scan_channels > IWX_MAX_SCAN_CHANNELS) {
+ err = ERANGE;
+ goto parse_out;
+ }
+ break;
+
+ case IWX_UCODE_TLV_FW_VERSION:
+ if (tlv_len != sizeof(uint32_t) * 3) {
+ err = EINVAL;
+ goto parse_out;
+ }
+
+ iwx_fw_version_str(sc->sc_fwver, sizeof(sc->sc_fwver),
+ le32toh(((const uint32_t *)tlv_data)[0]),
+ le32toh(((const uint32_t *)tlv_data)[1]),
+ le32toh(((const uint32_t *)tlv_data)[2]));
+ break;
+
+ case IWX_UCODE_TLV_FW_DBG_DEST: {
+ const struct iwx_fw_dbg_dest_tlv_v1 *dest_v1 = NULL;
+
+ fw->dbg_dest_ver = (const uint8_t *)tlv_data;
+ if (*fw->dbg_dest_ver != 0) {
+ err = EINVAL;
+ goto parse_out;
+ }
+
+ if (fw->dbg_dest_tlv_init)
+ break;
+ fw->dbg_dest_tlv_init = true;
+
+ dest_v1 = (const void *)tlv_data;
+ fw->dbg_dest_tlv_v1 = dest_v1;
+ fw->n_dest_reg = tlv_len -
+ offsetof(struct iwx_fw_dbg_dest_tlv_v1, reg_ops);
+ fw->n_dest_reg /= sizeof(dest_v1->reg_ops[0]);
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: found debug dest; n_dest_reg=%d\n",
+ __func__, fw->n_dest_reg);
+ break;
+ }
+
+ case IWX_UCODE_TLV_FW_DBG_CONF: {
+ const struct iwx_fw_dbg_conf_tlv *conf = (const void *)tlv_data;
+
+ if (!fw->dbg_dest_tlv_init ||
+ conf->id >= nitems(fw->dbg_conf_tlv) ||
+ fw->dbg_conf_tlv[conf->id] != NULL)
+ break;
+
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "Found debug configuration: %d\n", conf->id);
+ fw->dbg_conf_tlv[conf->id] = conf;
+ fw->dbg_conf_tlv_len[conf->id] = tlv_len;
+ break;
+ }
+
+ case IWX_UCODE_TLV_UMAC_DEBUG_ADDRS: {
+ const struct iwx_umac_debug_addrs *dbg_ptrs =
+ (const void *)tlv_data;
+
+ if (tlv_len != sizeof(*dbg_ptrs)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
+ break;
+ sc->sc_uc.uc_umac_error_event_table =
+ le32toh(dbg_ptrs->error_info_addr) &
+ ~IWX_FW_ADDR_CACHE_CONTROL;
+ sc->sc_uc.error_event_table_tlv_status |=
+ IWX_ERROR_EVENT_TABLE_UMAC;
+ break;
+ }
+
+ case IWX_UCODE_TLV_LMAC_DEBUG_ADDRS: {
+ const struct iwx_lmac_debug_addrs *dbg_ptrs =
+ (const void *)tlv_data;
+
+ if (tlv_len != sizeof(*dbg_ptrs)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ if (sc->sc_device_family < IWX_DEVICE_FAMILY_22000)
+ break;
+ sc->sc_uc.uc_lmac_error_event_table[0] =
+ le32toh(dbg_ptrs->error_event_table_ptr) &
+ ~IWX_FW_ADDR_CACHE_CONTROL;
+ sc->sc_uc.error_event_table_tlv_status |=
+ IWX_ERROR_EVENT_TABLE_LMAC1;
+ break;
+ }
+
+ case IWX_UCODE_TLV_FW_MEM_SEG:
+ break;
+
+ case IWX_UCODE_TLV_IML:
+ if (sc->sc_fw.iml != NULL) {
+ free(fw->iml, M_DEVBUF);
+ fw->iml_len = 0;
+ }
+ sc->sc_fw.iml = malloc(tlv_len, M_DEVBUF,
+ M_WAITOK | M_ZERO);
+ if (sc->sc_fw.iml == NULL) {
+ err = ENOMEM;
+ goto parse_out;
+ }
+ memcpy(sc->sc_fw.iml, tlv_data, tlv_len);
+ sc->sc_fw.iml_len = tlv_len;
+ break;
+
+ case IWX_UCODE_TLV_CMD_VERSIONS:
+ if (tlv_len % sizeof(struct iwx_fw_cmd_version)) {
+ tlv_len /= sizeof(struct iwx_fw_cmd_version);
+ tlv_len *= sizeof(struct iwx_fw_cmd_version);
+ }
+ if (sc->n_cmd_versions != 0) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ if (tlv_len > sizeof(sc->cmd_versions)) {
+ err = EINVAL;
+ goto parse_out;
+ }
+ memcpy(&sc->cmd_versions[0], tlv_data, tlv_len);
+ sc->n_cmd_versions = tlv_len / sizeof(struct iwx_fw_cmd_version);
+ break;
+
+ case IWX_UCODE_TLV_FW_RECOVERY_INFO:
+ break;
+
+ case IWX_UCODE_TLV_FW_FSEQ_VERSION:
+ case IWX_UCODE_TLV_PHY_INTEGRATION_VERSION:
+ case IWX_UCODE_TLV_FW_NUM_STATIONS:
+ case IWX_UCODE_TLV_FW_NUM_BEACONS:
+ break;
+
+ /* undocumented TLVs found in iwx-cc-a0-46 image */
+ case 58:
+ case 0x1000003:
+ case 0x1000004:
+ break;
+
+ /* undocumented TLVs found in iwx-cc-a0-48 image */
+ case 0x1000000:
+ case 0x1000002:
+ break;
+
+ case IWX_UCODE_TLV_TYPE_DEBUG_INFO:
+ case IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION:
+ case IWX_UCODE_TLV_TYPE_HCMD:
+ case IWX_UCODE_TLV_TYPE_REGIONS:
+ case IWX_UCODE_TLV_TYPE_TRIGGERS:
+ case IWX_UCODE_TLV_TYPE_CONF_SET:
+ case IWX_UCODE_TLV_SEC_TABLE_ADDR:
+ case IWX_UCODE_TLV_D3_KEK_KCK_ADDR:
+ case IWX_UCODE_TLV_CURRENT_PC:
+ break;
+
+ /* undocumented TLV found in iwx-cc-a0-67 image */
+ case 0x100000b:
+ break;
+
+ /* undocumented TLV found in iwx-ty-a0-gf-a0-73 image */
+ case 0x101:
+ break;
+
+ /* undocumented TLV found in iwx-ty-a0-gf-a0-77 image */
+ case 0x100000c:
+ break;
+
+ /* undocumented TLV found in iwx-ty-a0-gf-a0-89 image */
+ case 69:
+ break;
+
+ default:
+ err = EINVAL;
+ goto parse_out;
+ }
+
+ /*
+ * Check for size_t overflow and ignore missing padding at
+ * end of firmware file.
+ */
+ if (roundup(tlv_len, 4) > len)
+ break;
+
+ len -= roundup(tlv_len, 4);
+ data += roundup(tlv_len, 4);
+ }
+
+ KASSERT(err == 0, ("unhandled fw parse error"));
+
+parse_out:
+ if (err) {
+ printf("%s: firmware parse error %d, "
+ "section type %d\n", DEVNAME(sc), err, tlv_type);
+ }
+
+out:
+ if (err) {
+ fw->fw_status = IWX_FW_STATUS_NONE;
+ if (fw->fw_rawdata != NULL)
+ iwx_fw_info_free(fw);
+ } else
+ fw->fw_status = IWX_FW_STATUS_DONE;
+ return err;
+}
+
+static uint32_t
+iwx_prph_addr_mask(struct iwx_softc *sc)
+{
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ return 0x00ffffff;
+ else
+ return 0x000fffff;
+}
+
+static uint32_t
+iwx_read_prph_unlocked(struct iwx_softc *sc, uint32_t addr)
+{
+ uint32_t mask = iwx_prph_addr_mask(sc);
+ IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_RADDR, ((addr & mask) | (3 << 24)));
+ IWX_BARRIER_READ_WRITE(sc);
+ return IWX_READ(sc, IWX_HBUS_TARG_PRPH_RDAT);
+}
+
+uint32_t
+iwx_read_prph(struct iwx_softc *sc, uint32_t addr)
+{
+ iwx_nic_assert_locked(sc);
+ return iwx_read_prph_unlocked(sc, addr);
+}
+
+static void
+iwx_write_prph_unlocked(struct iwx_softc *sc, uint32_t addr, uint32_t val)
+{
+ uint32_t mask = iwx_prph_addr_mask(sc);
+ IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WADDR, ((addr & mask) | (3 << 24)));
+ IWX_BARRIER_WRITE(sc);
+ IWX_WRITE(sc, IWX_HBUS_TARG_PRPH_WDAT, val);
+}
+
+static void
+iwx_write_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
+{
+ iwx_nic_assert_locked(sc);
+ iwx_write_prph_unlocked(sc, addr, val);
+}
+
+static uint32_t
+iwx_read_umac_prph(struct iwx_softc *sc, uint32_t addr)
+{
+ return iwx_read_prph(sc, addr + sc->sc_umac_prph_offset);
+}
+
+static void
+iwx_write_umac_prph(struct iwx_softc *sc, uint32_t addr, uint32_t val)
+{
+ iwx_write_prph(sc, addr + sc->sc_umac_prph_offset, val);
+}
+
+static int
+iwx_read_mem(struct iwx_softc *sc, uint32_t addr, void *buf, int dwords)
+{
+ int offs, err = 0;
+ uint32_t *vals = buf;
+
+ if (iwx_nic_lock(sc)) {
+ IWX_WRITE(sc, IWX_HBUS_TARG_MEM_RADDR, addr);
+ for (offs = 0; offs < dwords; offs++)
+ vals[offs] = le32toh(IWX_READ(sc, IWX_HBUS_TARG_MEM_RDAT));
+ iwx_nic_unlock(sc);
+ } else {
+ err = EBUSY;
+ }
+ return err;
+}
+
+static int
+iwx_poll_bit(struct iwx_softc *sc, int reg, uint32_t bits, uint32_t mask,
+ int timo)
+{
+ for (;;) {
+ if ((IWX_READ(sc, reg) & mask) == (bits & mask)) {
+ return 1;
+ }
+ if (timo < 10) {
+ return 0;
+ }
+ timo -= 10;
+ DELAY(10);
+ }
+}
+
+static int
+iwx_nic_lock(struct iwx_softc *sc)
+{
+ if (sc->sc_nic_locks > 0) {
+ iwx_nic_assert_locked(sc);
+ sc->sc_nic_locks++;
+ return 1; /* already locked */
+ }
+
+ IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ DELAY(2);
+
+ if (iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
+ | IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 150000)) {
+ sc->sc_nic_locks++;
+ return 1;
+ }
+
+ printf("%s: acquiring device failed\n", DEVNAME(sc));
+ return 0;
+}
+
+static void
+iwx_nic_assert_locked(struct iwx_softc *sc)
+{
+ if (sc->sc_nic_locks <= 0)
+ panic("%s: nic locks counter %d", DEVNAME(sc), sc->sc_nic_locks);
+}
+
+static void
+iwx_nic_unlock(struct iwx_softc *sc)
+{
+ if (sc->sc_nic_locks > 0) {
+ if (--sc->sc_nic_locks == 0)
+ IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ } else
+ printf("%s: NIC already unlocked\n", DEVNAME(sc));
+}
+
+static int
+iwx_set_bits_mask_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits,
+ uint32_t mask)
+{
+ uint32_t val;
+
+ if (iwx_nic_lock(sc)) {
+ val = iwx_read_prph(sc, reg) & mask;
+ val |= bits;
+ iwx_write_prph(sc, reg, val);
+ iwx_nic_unlock(sc);
+ return 0;
+ }
+ return EBUSY;
+}
+
+static int
+iwx_set_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
+{
+ return iwx_set_bits_mask_prph(sc, reg, bits, ~0);
+}
+
+static int
+iwx_clear_bits_prph(struct iwx_softc *sc, uint32_t reg, uint32_t bits)
+{
+ return iwx_set_bits_mask_prph(sc, reg, 0, ~bits);
+}
+
+static void
+iwx_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ if (error != 0)
+ return;
+ KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
+ *(bus_addr_t *)arg = segs[0].ds_addr;
+}
+
+static int
+iwx_dma_contig_alloc(bus_dma_tag_t tag, struct iwx_dma_info *dma,
+ bus_size_t size, bus_size_t alignment)
+{
+ int error;
+
+ dma->tag = NULL;
+ dma->map = NULL;
+ dma->size = size;
+ dma->vaddr = NULL;
+
+ error = bus_dma_tag_create(tag, alignment,
+ 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size,
+ 1, size, 0, NULL, NULL, &dma->tag);
+ if (error != 0)
+ goto fail;
+
+ error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr,
+ BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map);
+ if (error != 0)
+ goto fail;
+
+ error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size,
+ iwx_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT);
+ if (error != 0) {
+ bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
+ dma->vaddr = NULL;
+ goto fail;
+ }
+
+ bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
+
+ return 0;
+
+fail:
+ iwx_dma_contig_free(dma);
+ return error;
+}
+
+static void
+iwx_dma_contig_free(struct iwx_dma_info *dma)
+{
+ if (dma->vaddr != NULL) {
+ bus_dmamap_sync(dma->tag, dma->map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(dma->tag, dma->map);
+ bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
+ dma->vaddr = NULL;
+ }
+ if (dma->tag != NULL) {
+ bus_dma_tag_destroy(dma->tag);
+ dma->tag = NULL;
+ }
+}
+
+static int
+iwx_alloc_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
+{
+ bus_size_t size;
+ int i, err;
+
+ ring->cur = 0;
+
+ /* Allocate RX descriptors (256-byte aligned). */
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ size = sizeof(struct iwx_rx_transfer_desc);
+ else
+ size = sizeof(uint64_t);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma,
+ size * IWX_RX_MQ_RING_COUNT, 256);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not allocate RX ring DMA memory\n");
+ goto fail;
+ }
+ ring->desc = ring->free_desc_dma.vaddr;
+
+ /* Allocate RX status area (16-byte aligned). */
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ size = sizeof(uint16_t);
+ else
+ size = sizeof(*ring->stat);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma, size, 16);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not allocate RX status DMA memory\n");
+ goto fail;
+ }
+ ring->stat = ring->stat_dma.vaddr;
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ size = sizeof(struct iwx_rx_completion_desc);
+ else
+ size = sizeof(uint32_t);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
+ size * IWX_RX_MQ_RING_COUNT, 256);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not allocate RX ring DMA memory\n");
+ goto fail;
+ }
+
+ err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR, NULL, NULL, IWX_RBUF_SIZE, 1, IWX_RBUF_SIZE,
+ 0, NULL, NULL, &ring->data_dmat);
+
+ for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
+ struct iwx_rx_data *data = &ring->data[i];
+
+ memset(data, 0, sizeof(*data));
+ err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not create RX buf DMA map\n");
+ goto fail;
+ }
+
+ err = iwx_rx_addbuf(sc, IWX_RBUF_SIZE, i);
+ if (err)
+ goto fail;
+ }
+ return 0;
+
+fail: iwx_free_rx_ring(sc, ring);
+ return err;
+}
+
+static void
+iwx_disable_rx_dma(struct iwx_softc *sc)
+{
+ int ntries;
+
+ if (iwx_nic_lock(sc)) {
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ iwx_write_umac_prph(sc, IWX_RFH_RXF_DMA_CFG_GEN3, 0);
+ else
+ iwx_write_prph(sc, IWX_RFH_RXF_DMA_CFG, 0);
+ for (ntries = 0; ntries < 1000; ntries++) {
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ if (iwx_read_umac_prph(sc,
+ IWX_RFH_GEN_STATUS_GEN3) & IWX_RXF_DMA_IDLE)
+ break;
+ } else {
+ if (iwx_read_prph(sc, IWX_RFH_GEN_STATUS) &
+ IWX_RXF_DMA_IDLE)
+ break;
+ }
+ DELAY(10);
+ }
+ iwx_nic_unlock(sc);
+ }
+}
+
+static void
+iwx_reset_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
+{
+ ring->cur = 0;
+ bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
+ BUS_DMASYNC_PREWRITE);
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ uint16_t *status = sc->rxq.stat_dma.vaddr;
+ *status = 0;
+ } else
+ memset(ring->stat, 0, sizeof(*ring->stat));
+ bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map,
+ BUS_DMASYNC_POSTWRITE);
+
+}
+
+static void
+iwx_free_rx_ring(struct iwx_softc *sc, struct iwx_rx_ring *ring)
+{
+ int i;
+
+ iwx_dma_contig_free(&ring->free_desc_dma);
+ iwx_dma_contig_free(&ring->stat_dma);
+ iwx_dma_contig_free(&ring->used_desc_dma);
+
+ for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++) {
+ struct iwx_rx_data *data = &ring->data[i];
+ if (data->m != NULL) {
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(ring->data_dmat, data->map);
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ if (data->map != NULL) {
+ bus_dmamap_destroy(ring->data_dmat, data->map);
+ data->map = NULL;
+ }
+ }
+ if (ring->data_dmat != NULL) {
+ bus_dma_tag_destroy(ring->data_dmat);
+ ring->data_dmat = NULL;
+ }
+}
+
+static int
+iwx_alloc_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring, int qid)
+{
+ bus_addr_t paddr;
+ bus_size_t size;
+ int i, err;
+ size_t bc_tbl_size;
+ bus_size_t bc_align;
+ size_t mapsize;
+
+ ring->qid = qid;
+ ring->queued = 0;
+ ring->cur = 0;
+ ring->cur_hw = 0;
+ ring->tail = 0;
+ ring->tail_hw = 0;
+
+ /* Allocate TX descriptors (256-byte aligned). */
+ size = IWX_TX_RING_COUNT * sizeof(struct iwx_tfh_tfd);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not allocate TX ring DMA memory\n");
+ goto fail;
+ }
+ ring->desc = ring->desc_dma.vaddr;
+
+ /*
+ * The hardware supports up to 512 Tx rings which is more
+ * than we currently need.
+ *
+ * In DQA mode we use 1 command queue + 1 default queue for
+ * management, control, and non-QoS data frames.
+ * The command is queue sc->txq[0], our default queue is sc->txq[1].
+ *
+ * Tx aggregation requires additional queues, one queue per TID for
+ * which aggregation is enabled. We map TID 0-7 to sc->txq[2:9].
+ * Firmware may assign its own internal IDs for these queues
+ * depending on which TID gets aggregation enabled first.
+ * The driver maintains a table mapping driver-side queue IDs
+ * to firmware-side queue IDs.
+ */
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ bc_tbl_size = sizeof(struct iwx_gen3_bc_tbl_entry) *
+ IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210;
+ bc_align = 128;
+ } else {
+ bc_tbl_size = sizeof(struct iwx_agn_scd_bc_tbl);
+ bc_align = 64;
+ }
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->bc_tbl, bc_tbl_size,
+ bc_align);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not allocate byte count table DMA memory\n");
+ goto fail;
+ }
+
+ size = IWX_TX_RING_COUNT * sizeof(struct iwx_device_cmd);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size,
+ IWX_FIRST_TB_SIZE_ALIGN);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not allocate cmd DMA memory\n");
+ goto fail;
+ }
+ ring->cmd = ring->cmd_dma.vaddr;
+
+ /* FW commands may require more mapped space than packets. */
+ if (qid == IWX_DQA_CMD_QUEUE)
+ mapsize = (sizeof(struct iwx_cmd_header) +
+ IWX_MAX_CMD_PAYLOAD_SIZE);
+ else
+ mapsize = MCLBYTES;
+ err = bus_dma_tag_create(sc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT,
+ BUS_SPACE_MAXADDR, NULL, NULL, mapsize, IWX_TFH_NUM_TBS - 2,
+ mapsize, 0, NULL, NULL, &ring->data_dmat);
+
+ paddr = ring->cmd_dma.paddr;
+ for (i = 0; i < IWX_TX_RING_COUNT; i++) {
+ struct iwx_tx_data *data = &ring->data[i];
+
+ data->cmd_paddr = paddr;
+ paddr += sizeof(struct iwx_device_cmd);
+
+ err = bus_dmamap_create(ring->data_dmat, 0, &data->map);
+ if (err) {
+ device_printf(sc->sc_dev,
+ "could not create TX buf DMA map\n");
+ goto fail;
+ }
+ }
+ KASSERT(paddr == ring->cmd_dma.paddr + size, ("bad paddr in txr alloc"));
+ return 0;
+
+fail:
+ return err;
+}
+
+static void
+iwx_reset_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
+{
+ int i;
+
+ for (i = 0; i < IWX_TX_RING_COUNT; i++) {
+ struct iwx_tx_data *data = &ring->data[i];
+
+ if (data->m != NULL) {
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->data_dmat, data->map);
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ }
+
+ /* Clear byte count table. */
+ memset(ring->bc_tbl.vaddr, 0, ring->bc_tbl.size);
+
+ /* Clear TX descriptors. */
+ memset(ring->desc, 0, ring->desc_dma.size);
+ bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
+ BUS_DMASYNC_PREWRITE);
+ sc->qfullmsk &= ~(1 << ring->qid);
+ sc->qenablemsk &= ~(1 << ring->qid);
+ for (i = 0; i < nitems(sc->aggqid); i++) {
+ if (sc->aggqid[i] == ring->qid) {
+ sc->aggqid[i] = 0;
+ break;
+ }
+ }
+ ring->queued = 0;
+ ring->cur = 0;
+ ring->cur_hw = 0;
+ ring->tail = 0;
+ ring->tail_hw = 0;
+ ring->tid = 0;
+}
+
+static void
+iwx_free_tx_ring(struct iwx_softc *sc, struct iwx_tx_ring *ring)
+{
+ int i;
+
+ iwx_dma_contig_free(&ring->desc_dma);
+ iwx_dma_contig_free(&ring->cmd_dma);
+ iwx_dma_contig_free(&ring->bc_tbl);
+
+ for (i = 0; i < IWX_TX_RING_COUNT; i++) {
+ struct iwx_tx_data *data = &ring->data[i];
+
+ if (data->m != NULL) {
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->data_dmat, data->map);
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ if (data->map != NULL) {
+ bus_dmamap_destroy(ring->data_dmat, data->map);
+ data->map = NULL;
+ }
+ }
+ if (ring->data_dmat != NULL) {
+ bus_dma_tag_destroy(ring->data_dmat);
+ ring->data_dmat = NULL;
+ }
+}
+
+static void
+iwx_enable_rfkill_int(struct iwx_softc *sc)
+{
+ if (!sc->sc_msix) {
+ sc->sc_intmask = IWX_CSR_INT_BIT_RF_KILL;
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
+ } else {
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ sc->sc_fh_init_mask);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ ~IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL);
+ sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL;
+ }
+
+ IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN);
+}
+
+static int
+iwx_check_rfkill(struct iwx_softc *sc)
+{
+ uint32_t v;
+ int rv;
+
+ /*
+ * "documentation" is not really helpful here:
+ * 27: HW_RF_KILL_SW
+ * Indicates state of (platform's) hardware RF-Kill switch
+ *
+ * But apparently when it's off, it's on ...
+ */
+ v = IWX_READ(sc, IWX_CSR_GP_CNTRL);
+ rv = (v & IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
+ if (rv) {
+ sc->sc_flags |= IWX_FLAG_RFKILL;
+ } else {
+ sc->sc_flags &= ~IWX_FLAG_RFKILL;
+ }
+
+ return rv;
+}
+
+static void
+iwx_enable_interrupts(struct iwx_softc *sc)
+{
+ if (!sc->sc_msix) {
+ sc->sc_intmask = IWX_CSR_INI_SET_MASK;
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
+ } else {
+ /*
+ * fh/hw_mask keeps all the unmasked causes.
+ * Unlike msi, in msix cause is enabled when it is unset.
+ */
+ sc->sc_hw_mask = sc->sc_hw_init_mask;
+ sc->sc_fh_mask = sc->sc_fh_init_mask;
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ ~sc->sc_fh_mask);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ ~sc->sc_hw_mask);
+ }
+}
+
+static void
+iwx_enable_fwload_interrupt(struct iwx_softc *sc)
+{
+ if (!sc->sc_msix) {
+ sc->sc_intmask = IWX_CSR_INT_BIT_ALIVE | IWX_CSR_INT_BIT_FH_RX;
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
+ } else {
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ ~IWX_MSIX_HW_INT_CAUSES_REG_ALIVE);
+ sc->sc_hw_mask = IWX_MSIX_HW_INT_CAUSES_REG_ALIVE;
+ /*
+ * Leave all the FH causes enabled to get the ALIVE
+ * notification.
+ */
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ ~sc->sc_fh_init_mask);
+ sc->sc_fh_mask = sc->sc_fh_init_mask;
+ }
+}
+
+#if 0
+static void
+iwx_restore_interrupts(struct iwx_softc *sc)
+{
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, sc->sc_intmask);
+}
+#endif
+
+static void
+iwx_disable_interrupts(struct iwx_softc *sc)
+{
+ if (!sc->sc_msix) {
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
+
+ /* acknowledge all interrupts */
+ IWX_WRITE(sc, IWX_CSR_INT, ~0);
+ IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
+ } else {
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ sc->sc_fh_init_mask);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ sc->sc_hw_init_mask);
+ }
+}
+
+static void
+iwx_ict_reset(struct iwx_softc *sc)
+{
+ iwx_disable_interrupts(sc);
+
+ memset(sc->ict_dma.vaddr, 0, IWX_ICT_SIZE);
+ sc->ict_cur = 0;
+
+ /* Set physical address of ICT (4KB aligned). */
+ IWX_WRITE(sc, IWX_CSR_DRAM_INT_TBL_REG,
+ IWX_CSR_DRAM_INT_TBL_ENABLE
+ | IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK
+ | IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER
+ | sc->ict_dma.paddr >> IWX_ICT_PADDR_SHIFT);
+
+ /* Switch to ICT interrupt mode in driver. */
+ sc->sc_flags |= IWX_FLAG_USE_ICT;
+
+ IWX_WRITE(sc, IWX_CSR_INT, ~0);
+ iwx_enable_interrupts(sc);
+}
+
+#define IWX_HW_READY_TIMEOUT 50
+static int
+iwx_set_hw_ready(struct iwx_softc *sc)
+{
+ int ready;
+
+ IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ ready = iwx_poll_bit(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ IWX_HW_READY_TIMEOUT);
+ if (ready)
+ IWX_SETBITS(sc, IWX_CSR_MBOX_SET_REG,
+ IWX_CSR_MBOX_SET_REG_OS_ALIVE);
+
+ DPRINTF(("%s: ready=%d\n", __func__, ready));
+ return ready;
+}
+#undef IWX_HW_READY_TIMEOUT
+
+static int
+iwx_prepare_card_hw(struct iwx_softc *sc)
+{
+ int t = 0;
+ int ntries;
+
+ if (iwx_set_hw_ready(sc))
+ return 0;
+
+ IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
+ IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ DELAY(1000);
+
+ for (ntries = 0; ntries < 10; ntries++) {
+ /* If HW is not ready, prepare the conditions to check again */
+ IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ do {
+ if (iwx_set_hw_ready(sc))
+ return 0;
+ DELAY(200);
+ t += 200;
+ } while (t < 150000);
+ DELAY(25000);
+ }
+
+ return ETIMEDOUT;
+}
+
+static int
+iwx_force_power_gating(struct iwx_softc *sc)
+{
+ int err;
+
+ err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
+ IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+ if (err)
+ return err;
+ DELAY(20);
+ err = iwx_set_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
+ IWX_HPM_HIPM_GEN_CFG_CR_PG_EN |
+ IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN);
+ if (err)
+ return err;
+ DELAY(20);
+ err = iwx_clear_bits_prph(sc, IWX_HPM_HIPM_GEN_CFG,
+ IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+ return err;
+}
+
+static void
+iwx_apm_config(struct iwx_softc *sc)
+{
+ uint16_t lctl, cap;
+ int pcie_ptr;
+ int error;
+
+ /*
+ * L0S states have been found to be unstable with our devices
+ * and in newer hardware they are not officially supported at
+ * all, so we must always set the L0S_DISABLED bit.
+ */
+ IWX_SETBITS(sc, IWX_CSR_GIO_REG, IWX_CSR_GIO_REG_VAL_L0S_DISABLED);
+
+ error = pci_find_cap(sc->sc_dev, PCIY_EXPRESS, &pcie_ptr);
+ if (error != 0) {
+ printf("can't fill pcie_ptr\n");
+ return;
+ }
+
+ lctl = pci_read_config(sc->sc_dev, pcie_ptr + PCIER_LINK_CTL,
+ sizeof(lctl));
+#define PCI_PCIE_LCSR_ASPM_L0S 0x00000001
+ sc->sc_pm_support = !(lctl & PCI_PCIE_LCSR_ASPM_L0S);
+#define PCI_PCIE_DCSR2 0x28
+ cap = pci_read_config(sc->sc_dev, pcie_ptr + PCI_PCIE_DCSR2,
+ sizeof(lctl));
+#define PCI_PCIE_DCSR2_LTREN 0x00000400
+ sc->sc_ltr_enabled = (cap & PCI_PCIE_DCSR2_LTREN) ? 1 : 0;
+#define PCI_PCIE_LCSR_ASPM_L1 0x00000002
+ DPRINTF(("%s: L1 %sabled - LTR %sabled\n",
+ DEVNAME(sc),
+ (lctl & PCI_PCIE_LCSR_ASPM_L1) ? "En" : "Dis",
+ sc->sc_ltr_enabled ? "En" : "Dis"));
+#undef PCI_PCIE_LCSR_ASPM_L0S
+#undef PCI_PCIE_DCSR2
+#undef PCI_PCIE_DCSR2_LTREN
+#undef PCI_PCIE_LCSR_ASPM_L1
+}
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * e.g. after platform boot or shutdown.
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+static int
+iwx_apm_init(struct iwx_softc *sc)
+{
+ int err = 0;
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ IWX_SETBITS(sc, IWX_CSR_GIO_CHICKEN_BITS,
+ IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ IWX_SETBITS(sc, IWX_CSR_DBG_HPET_MEM_REG, IWX_CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ */
+ IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ iwx_apm_config(sc);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ IWX_SETBITS(sc, IWX_CSR_GP_CNTRL, IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. iwx_write_prph()
+ * and accesses to uCode SRAM.
+ */
+ if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
+ printf("%s: timeout waiting for clock stabilization\n",
+ DEVNAME(sc));
+ err = ETIMEDOUT;
+ goto out;
+ }
+ out:
+ if (err)
+ printf("%s: apm init error %d\n", DEVNAME(sc), err);
+ return err;
+}
+
+static void
+iwx_apm_stop(struct iwx_softc *sc)
+{
+ IWX_SETBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
+ IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ IWX_SETBITS(sc, IWX_CSR_HW_IF_CONFIG_REG,
+ IWX_CSR_HW_IF_CONFIG_REG_PREPARE |
+ IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME);
+ DELAY(1000);
+ IWX_CLRBITS(sc, IWX_CSR_DBG_LINK_PWR_MGMT_REG,
+ IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED);
+ DELAY(5000);
+
+ /* stop device's busmaster DMA activity */
+ IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ if (!iwx_poll_bit(sc, IWX_CSR_RESET,
+ IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
+ printf("%s: timeout waiting for bus master\n", DEVNAME(sc));
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+
+static void
+iwx_init_msix_hw(struct iwx_softc *sc)
+{
+ iwx_conf_msix_hw(sc, 0);
+
+ if (!sc->sc_msix)
+ return;
+
+ sc->sc_fh_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_FH_INT_MASK_AD);
+ sc->sc_fh_mask = sc->sc_fh_init_mask;
+ sc->sc_hw_init_mask = ~IWX_READ(sc, IWX_CSR_MSIX_HW_INT_MASK_AD);
+ sc->sc_hw_mask = sc->sc_hw_init_mask;
+}
+
+static void
+iwx_conf_msix_hw(struct iwx_softc *sc, int stopped)
+{
+ int vector = 0;
+
+ if (!sc->sc_msix) {
+ /* Newer chips default to MSIX. */
+ if (!stopped && iwx_nic_lock(sc)) {
+ iwx_write_umac_prph(sc, IWX_UREG_CHICK,
+ IWX_UREG_CHICK_MSI_ENABLE);
+ iwx_nic_unlock(sc);
+ }
+ return;
+ }
+
+ if (!stopped && iwx_nic_lock(sc)) {
+ iwx_write_umac_prph(sc, IWX_UREG_CHICK,
+ IWX_UREG_CHICK_MSIX_ENABLE);
+ iwx_nic_unlock(sc);
+ }
+
+ /* Disable all interrupts */
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_MASK_AD, ~0);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_MASK_AD, ~0);
+
+ /* Map fallback-queue (command/mgmt) to a single vector */
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(0),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ /* Map RSS queue (data) to the same vector */
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_RX_IVAR(1),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+
+ /* Enable the RX queues cause interrupts */
+ IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ IWX_MSIX_FH_INT_CAUSES_Q0 | IWX_MSIX_FH_INT_CAUSES_Q1);
+
+ /* Map non-RX causes to the same vector */
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_S2D),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_FH_ERR),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_ALIVE),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_WAKEUP),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_CT_KILL),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_RF_KILL),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_PERIODIC),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SW_ERR),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_SCD),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_FH_TX),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HW_ERR),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+ IWX_WRITE_1(sc, IWX_CSR_MSIX_IVAR(IWX_MSIX_IVAR_CAUSE_REG_HAP),
+ vector | IWX_MSIX_NON_AUTO_CLEAR_CAUSE);
+
+ /* Enable non-RX causes interrupts */
+ IWX_CLRBITS(sc, IWX_CSR_MSIX_FH_INT_MASK_AD,
+ IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM |
+ IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM |
+ IWX_MSIX_FH_INT_CAUSES_S2D |
+ IWX_MSIX_FH_INT_CAUSES_FH_ERR);
+ IWX_CLRBITS(sc, IWX_CSR_MSIX_HW_INT_MASK_AD,
+ IWX_MSIX_HW_INT_CAUSES_REG_ALIVE |
+ IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP |
+ IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE |
+ IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL |
+ IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL |
+ IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC |
+ IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR |
+ IWX_MSIX_HW_INT_CAUSES_REG_SCD |
+ IWX_MSIX_HW_INT_CAUSES_REG_FH_TX |
+ IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR |
+ IWX_MSIX_HW_INT_CAUSES_REG_HAP);
+}
+
+static int
+iwx_clear_persistence_bit(struct iwx_softc *sc)
+{
+ uint32_t hpm, wprot;
+
+ hpm = iwx_read_prph_unlocked(sc, IWX_HPM_DEBUG);
+ if (hpm != 0xa5a5a5a0 && (hpm & IWX_PERSISTENCE_BIT)) {
+ wprot = iwx_read_prph_unlocked(sc, IWX_PREG_PRPH_WPROT_22000);
+ if (wprot & IWX_PREG_WFPM_ACCESS) {
+ printf("%s: cannot clear persistence bit\n",
+ DEVNAME(sc));
+ return EPERM;
+ }
+ iwx_write_prph_unlocked(sc, IWX_HPM_DEBUG,
+ hpm & ~IWX_PERSISTENCE_BIT);
+ }
+
+ return 0;
+}
+
+static int
+iwx_start_hw(struct iwx_softc *sc)
+{
+ int err;
+
+ err = iwx_prepare_card_hw(sc);
+ if (err)
+ return err;
+
+ if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000) {
+ err = iwx_clear_persistence_bit(sc);
+ if (err)
+ return err;
+ }
+
+ /* Reset the entire device */
+ IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
+ DELAY(5000);
+
+ if (sc->sc_device_family == IWX_DEVICE_FAMILY_22000 &&
+ sc->sc_integrated) {
+ IWX_SETBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ DELAY(20);
+ if (!iwx_poll_bit(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
+ printf("%s: timeout waiting for clock stabilization\n",
+ DEVNAME(sc));
+ return ETIMEDOUT;
+ }
+
+ err = iwx_force_power_gating(sc);
+ if (err)
+ return err;
+
+ /* Reset the entire device */
+ IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
+ DELAY(5000);
+ }
+
+ err = iwx_apm_init(sc);
+ if (err)
+ return err;
+
+ iwx_init_msix_hw(sc);
+
+ iwx_enable_rfkill_int(sc);
+ iwx_check_rfkill(sc);
+
+ return 0;
+}
+
+static void
+iwx_stop_device(struct iwx_softc *sc)
+{
+ int i;
+
+ iwx_disable_interrupts(sc);
+ sc->sc_flags &= ~IWX_FLAG_USE_ICT;
+
+ iwx_disable_rx_dma(sc);
+ iwx_reset_rx_ring(sc, &sc->rxq);
+ for (i = 0; i < nitems(sc->txq); i++)
+ iwx_reset_tx_ring(sc, &sc->txq[i]);
+#if 0
+ /* XXX-THJ: Tidy up BA state on stop */
+ for (i = 0; i < IEEE80211_NUM_TID; i++) {
+ struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
+ if (ba->ba_state != IEEE80211_BA_AGREED)
+ continue;
+ ieee80211_delba_request(ic, ni, 0, 1, i);
+ }
+#endif
+ /* Make sure (redundant) we've released our request to stay awake */
+ IWX_CLRBITS(sc, IWX_CSR_GP_CNTRL,
+ IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (sc->sc_nic_locks > 0)
+ printf("%s: %d active NIC locks forcefully cleared\n",
+ DEVNAME(sc), sc->sc_nic_locks);
+ sc->sc_nic_locks = 0;
+
+ /* Stop the device, and put it in low power state */
+ iwx_apm_stop(sc);
+
+ /* Reset the on-board processor. */
+ IWX_SETBITS(sc, IWX_CSR_RESET, IWX_CSR_RESET_REG_FLAG_SW_RESET);
+ DELAY(5000);
+
+ /*
+ * Upon stop, the IVAR table gets erased, so msi-x won't
+ * work. This causes a bug in RF-KILL flows, since the interrupt
+ * that enables radio won't fire on the correct irq, and the
+ * driver won't be able to handle the interrupt.
+ * Configure the IVAR table again after reset.
+ */
+ iwx_conf_msix_hw(sc, 1);
+
+ /*
+ * Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * Clear the interrupt again.
+ */
+ iwx_disable_interrupts(sc);
+
+ /* Even though we stop the HW we still want the RF kill interrupt. */
+ iwx_enable_rfkill_int(sc);
+ iwx_check_rfkill(sc);
+
+ iwx_prepare_card_hw(sc);
+
+ iwx_ctxt_info_free_paging(sc);
+ iwx_dma_contig_free(&sc->pnvm_dma);
+}
+
+static void
+iwx_nic_config(struct iwx_softc *sc)
+{
+ uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
+ uint32_t mask, val, reg_val = 0;
+
+ radio_cfg_type = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_TYPE) >>
+ IWX_FW_PHY_CFG_RADIO_TYPE_POS;
+ radio_cfg_step = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_STEP) >>
+ IWX_FW_PHY_CFG_RADIO_STEP_POS;
+ radio_cfg_dash = (sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RADIO_DASH) >>
+ IWX_FW_PHY_CFG_RADIO_DASH_POS;
+
+ reg_val |= IWX_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
+ IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
+ reg_val |= IWX_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
+ IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
+
+ /* radio configuration */
+ reg_val |= radio_cfg_type << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
+ reg_val |= radio_cfg_step << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
+ reg_val |= radio_cfg_dash << IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
+
+ mask = IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH |
+ IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP |
+ IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP |
+ IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH |
+ IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE |
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI;
+
+ val = IWX_READ(sc, IWX_CSR_HW_IF_CONFIG_REG);
+ val &= ~mask;
+ val |= reg_val;
+ IWX_WRITE(sc, IWX_CSR_HW_IF_CONFIG_REG, val);
+}
+
+static int
+iwx_nic_rx_init(struct iwx_softc *sc)
+{
+ IWX_WRITE_1(sc, IWX_CSR_INT_COALESCING, IWX_HOST_INT_TIMEOUT_DEF);
+
+ /*
+ * We don't configure the RFH; the firmware will do that.
+ * Rx descriptors are set when firmware sends an ALIVE interrupt.
+ */
+ return 0;
+}
+
+static int
+iwx_nic_init(struct iwx_softc *sc)
+{
+ int err;
+
+ iwx_apm_init(sc);
+ if (sc->sc_device_family < IWX_DEVICE_FAMILY_AX210)
+ iwx_nic_config(sc);
+
+ err = iwx_nic_rx_init(sc);
+ if (err)
+ return err;
+
+ IWX_SETBITS(sc, IWX_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
+
+ return 0;
+}
+
+/* Map ieee80211_edca_ac categories to firmware Tx FIFO. */
+const uint8_t iwx_ac_to_tx_fifo[] = {
+ IWX_GEN2_EDCA_TX_FIFO_BE,
+ IWX_GEN2_EDCA_TX_FIFO_BK,
+ IWX_GEN2_EDCA_TX_FIFO_VI,
+ IWX_GEN2_EDCA_TX_FIFO_VO,
+};
+
+static int
+iwx_enable_txq(struct iwx_softc *sc, int sta_id, int qid, int tid,
+ int num_slots)
+{
+ struct iwx_rx_packet *pkt;
+ struct iwx_tx_queue_cfg_rsp *resp;
+ struct iwx_tx_queue_cfg_cmd cmd_v0;
+ struct iwx_scd_queue_cfg_cmd cmd_v3;
+ struct iwx_host_cmd hcmd = {
+ .flags = IWX_CMD_WANT_RESP,
+ .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
+ };
+ struct iwx_tx_ring *ring = &sc->txq[qid];
+ int err, fwqid, cmd_ver;
+ uint32_t wr_idx;
+ size_t resp_len;
+
+ DPRINTF(("%s: tid=%i\n", __func__, tid));
+ DPRINTF(("%s: qid=%i\n", __func__, qid));
+ iwx_reset_tx_ring(sc, ring);
+
+ cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_SCD_QUEUE_CONFIG_CMD);
+ if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
+ memset(&cmd_v0, 0, sizeof(cmd_v0));
+ cmd_v0.sta_id = sta_id;
+ cmd_v0.tid = tid;
+ cmd_v0.flags = htole16(IWX_TX_QUEUE_CFG_ENABLE_QUEUE);
+ cmd_v0.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
+ cmd_v0.byte_cnt_addr = htole64(ring->bc_tbl.paddr);
+ cmd_v0.tfdq_addr = htole64(ring->desc_dma.paddr);
+ hcmd.id = IWX_SCD_QUEUE_CFG;
+ hcmd.data[0] = &cmd_v0;
+ hcmd.len[0] = sizeof(cmd_v0);
+ } else if (cmd_ver == 3) {
+ memset(&cmd_v3, 0, sizeof(cmd_v3));
+ cmd_v3.operation = htole32(IWX_SCD_QUEUE_ADD);
+ cmd_v3.u.add.tfdq_dram_addr = htole64(ring->desc_dma.paddr);
+ cmd_v3.u.add.bc_dram_addr = htole64(ring->bc_tbl.paddr);
+ cmd_v3.u.add.cb_size = htole32(IWX_TFD_QUEUE_CB_SIZE(num_slots));
+ cmd_v3.u.add.flags = htole32(0);
+ cmd_v3.u.add.sta_mask = htole32(1 << sta_id);
+ cmd_v3.u.add.tid = tid;
+ hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
+ IWX_SCD_QUEUE_CONFIG_CMD);
+ hcmd.data[0] = &cmd_v3;
+ hcmd.len[0] = sizeof(cmd_v3);
+ } else {
+ printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
+ DEVNAME(sc), cmd_ver);
+ return ENOTSUP;
+ }
+
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err)
+ return err;
+
+ pkt = hcmd.resp_pkt;
+ if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp_len = iwx_rx_packet_payload_len(pkt);
+ if (resp_len != sizeof(*resp)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp = (void *)pkt->data;
+ fwqid = le16toh(resp->queue_number);
+ wr_idx = le16toh(resp->write_pointer);
+
+ /* Unlike iwlwifi, we do not support dynamic queue ID assignment. */
+ if (fwqid != qid) {
+ DPRINTF(("%s: === fwqid != qid\n", __func__));
+ err = EIO;
+ goto out;
+ }
+
+ if (wr_idx != ring->cur_hw) {
+ DPRINTF(("%s: === (wr_idx != ring->cur_hw)\n", __func__));
+ err = EIO;
+ goto out;
+ }
+
+ sc->qenablemsk |= (1 << qid);
+ ring->tid = tid;
+out:
+ iwx_free_resp(sc, &hcmd);
+ return err;
+}
+
+static int
+iwx_disable_txq(struct iwx_softc *sc, int sta_id, int qid, uint8_t tid)
+{
+ struct iwx_rx_packet *pkt;
+ struct iwx_tx_queue_cfg_rsp *resp;
+ struct iwx_tx_queue_cfg_cmd cmd_v0;
+ struct iwx_scd_queue_cfg_cmd cmd_v3;
+ struct iwx_host_cmd hcmd = {
+ .flags = IWX_CMD_WANT_RESP,
+ .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
+ };
+ struct iwx_tx_ring *ring = &sc->txq[qid];
+ int err, cmd_ver;
+
+ cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_SCD_QUEUE_CONFIG_CMD);
+ if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN) {
+ memset(&cmd_v0, 0, sizeof(cmd_v0));
+ cmd_v0.sta_id = sta_id;
+ cmd_v0.tid = tid;
+ cmd_v0.flags = htole16(0); /* clear "queue enabled" flag */
+ cmd_v0.cb_size = htole32(0);
+ cmd_v0.byte_cnt_addr = htole64(0);
+ cmd_v0.tfdq_addr = htole64(0);
+ hcmd.id = IWX_SCD_QUEUE_CFG;
+ hcmd.data[0] = &cmd_v0;
+ hcmd.len[0] = sizeof(cmd_v0);
+ } else if (cmd_ver == 3) {
+ memset(&cmd_v3, 0, sizeof(cmd_v3));
+ cmd_v3.operation = htole32(IWX_SCD_QUEUE_REMOVE);
+ cmd_v3.u.remove.sta_mask = htole32(1 << sta_id);
+ cmd_v3.u.remove.tid = tid;
+ hcmd.id = IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
+ IWX_SCD_QUEUE_CONFIG_CMD);
+ hcmd.data[0] = &cmd_v3;
+ hcmd.len[0] = sizeof(cmd_v3);
+ } else {
+ printf("%s: unsupported SCD_QUEUE_CFG command version %d\n",
+ DEVNAME(sc), cmd_ver);
+ return ENOTSUP;
+ }
+
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err)
+ return err;
+
+ pkt = hcmd.resp_pkt;
+ if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
+ err = EIO;
+ goto out;
+ }
+
+ sc->qenablemsk &= ~(1 << qid);
+ iwx_reset_tx_ring(sc, ring);
+out:
+ iwx_free_resp(sc, &hcmd);
+ return err;
+}
+
+static void
+iwx_post_alive(struct iwx_softc *sc)
+{
+ int txcmd_ver;
+
+ iwx_ict_reset(sc);
+
+ txcmd_ver = iwx_lookup_notif_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD) ;
+ if (txcmd_ver != IWX_FW_CMD_VER_UNKNOWN && txcmd_ver > 6)
+ sc->sc_rate_n_flags_version = 2;
+ else
+ sc->sc_rate_n_flags_version = 1;
+
+ txcmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_TX_CMD);
+}
+
+static int
+iwx_schedule_session_protection(struct iwx_softc *sc, struct iwx_node *in,
+ uint32_t duration_tu)
+{
+
+ struct iwx_session_prot_cmd cmd = {
+ .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color)),
+ .action = htole32(IWX_FW_CTXT_ACTION_ADD),
+ .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
+ .duration_tu = htole32(duration_tu),
+ };
+ uint32_t cmd_id;
+ int err;
+
+ cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
+ err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
+ if (!err)
+ sc->sc_flags |= IWX_FLAG_TE_ACTIVE;
+ return err;
+}
+
+static void
+iwx_unprotect_session(struct iwx_softc *sc, struct iwx_node *in)
+{
+ struct iwx_session_prot_cmd cmd = {
+ .id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color)),
+ .action = htole32(IWX_FW_CTXT_ACTION_REMOVE),
+ .conf_id = htole32(IWX_SESSION_PROTECT_CONF_ASSOC),
+ .duration_tu = 0,
+ };
+ uint32_t cmd_id;
+
+ /* Do nothing if the time event has already ended. */
+ if ((sc->sc_flags & IWX_FLAG_TE_ACTIVE) == 0)
+ return;
+
+ cmd_id = iwx_cmd_id(IWX_SESSION_PROTECTION_CMD, IWX_MAC_CONF_GROUP, 0);
+ if (iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd) == 0)
+ sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
+}
+
+/*
+ * NVM read access and content parsing. We do not support
+ * external NVM or writing NVM.
+ */
+
+static uint8_t
+iwx_fw_valid_tx_ant(struct iwx_softc *sc)
+{
+ uint8_t tx_ant;
+
+ tx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_TX_CHAIN)
+ >> IWX_FW_PHY_CFG_TX_CHAIN_POS);
+
+ if (sc->sc_nvm.valid_tx_ant)
+ tx_ant &= sc->sc_nvm.valid_tx_ant;
+
+ return tx_ant;
+}
+
+static uint8_t
+iwx_fw_valid_rx_ant(struct iwx_softc *sc)
+{
+ uint8_t rx_ant;
+
+ rx_ant = ((sc->sc_fw_phy_config & IWX_FW_PHY_CFG_RX_CHAIN)
+ >> IWX_FW_PHY_CFG_RX_CHAIN_POS);
+
+ if (sc->sc_nvm.valid_rx_ant)
+ rx_ant &= sc->sc_nvm.valid_rx_ant;
+
+ return rx_ant;
+}
+
+static void
+iwx_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
+ struct ieee80211_channel chans[])
+{
+ struct iwx_softc *sc = ic->ic_softc;
+ struct iwx_nvm_data *data = &sc->sc_nvm;
+ uint8_t bands[IEEE80211_MODE_BYTES];
+ const uint8_t *nvm_channels;
+ uint32_t ch_flags;
+ int ch_idx, nchan;
+
+ if (sc->sc_uhb_supported) {
+ nchan = nitems(iwx_nvm_channels_uhb);
+ nvm_channels = iwx_nvm_channels_uhb;
+ } else {
+ nchan = nitems(iwx_nvm_channels_8000);
+ nvm_channels = iwx_nvm_channels_8000;
+ }
+
+ /* 2.4Ghz; 1-13: 11b/g channels. */
+ if (!data->sku_cap_band_24GHz_enable)
+ goto band_5;
+
+ memset(bands, 0, sizeof(bands));
+ setbit(bands, IEEE80211_MODE_11B);
+ setbit(bands, IEEE80211_MODE_11G);
+ setbit(bands, IEEE80211_MODE_11NG);
+ for (ch_idx = 0;
+ ch_idx < IWX_NUM_2GHZ_CHANNELS && ch_idx < nchan;
+ ch_idx++) {
+
+ uint32_t nflags = 0;
+ int cflags = 0;
+
+ if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4) {
+ ch_flags = le32_to_cpup(
+ sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
+ } else {
+ ch_flags = le16_to_cpup(
+ sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
+ }
+ if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
+ continue;
+
+ if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
+ cflags |= NET80211_CBW_FLAG_HT40;
+
+ /* XXX-BZ nflags RADAR/DFS/INDOOR */
+
+ /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
+ nvm_channels[ch_idx],
+ ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_B),
+ /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
+ nflags, bands, cflags);
+ }
+
+band_5:
+ /* 5Ghz */
+ if (!data->sku_cap_band_52GHz_enable)
+ goto band_6;
+
+
+ memset(bands, 0, sizeof(bands));
+ setbit(bands, IEEE80211_MODE_11A);
+ setbit(bands, IEEE80211_MODE_11NA);
+ setbit(bands, IEEE80211_MODE_VHT_5GHZ);
+
+ for (ch_idx = IWX_NUM_2GHZ_CHANNELS;
+ ch_idx < (IWX_NUM_2GHZ_CHANNELS + IWX_NUM_5GHZ_CHANNELS) && ch_idx < nchan;
+ ch_idx++) {
+ uint32_t nflags = 0;
+ int cflags = 0;
+
+ if (sc->sc_rsp_vers == IWX_FBSD_RSP_V4)
+ ch_flags = le32_to_cpup(
+ sc->sc_rsp_info.rsp_v4.regulatory.channel_profile + ch_idx);
+ else
+ ch_flags = le16_to_cpup(
+ sc->sc_rsp_info.rsp_v3.regulatory.channel_profile + ch_idx);
+
+ if ((ch_flags & IWX_NVM_CHANNEL_VALID) == 0)
+ continue;
+
+ if ((ch_flags & IWX_NVM_CHANNEL_40MHZ) != 0)
+ cflags |= NET80211_CBW_FLAG_HT40;
+ if ((ch_flags & IWX_NVM_CHANNEL_80MHZ) != 0)
+ cflags |= NET80211_CBW_FLAG_VHT80;
+ if ((ch_flags & IWX_NVM_CHANNEL_160MHZ) != 0)
+ cflags |= NET80211_CBW_FLAG_VHT160;
+
+ /* XXX-BZ nflags RADAR/DFS/INDOOR */
+
+ /* error = */ ieee80211_add_channel_cbw(chans, maxchans, nchans,
+ nvm_channels[ch_idx],
+ ieee80211_ieee2mhz(nvm_channels[ch_idx], IEEE80211_CHAN_A),
+ /* max_power IWL_DEFAULT_MAX_TX_POWER */ 22,
+ nflags, bands, cflags);
+ }
+band_6:
+ /* 6GHz one day ... */
+ return;
+}
+
+static int
+iwx_mimo_enabled(struct iwx_softc *sc)
+{
+
+ return !sc->sc_nvm.sku_cap_mimo_disable;
+}
+
+static void
+iwx_init_reorder_buffer(struct iwx_reorder_buffer *reorder_buf,
+ uint16_t ssn, uint16_t buf_size)
+{
+ reorder_buf->head_sn = ssn;
+ reorder_buf->num_stored = 0;
+ reorder_buf->buf_size = buf_size;
+ reorder_buf->last_amsdu = 0;
+ reorder_buf->last_sub_index = 0;
+ reorder_buf->removed = 0;
+ reorder_buf->valid = 0;
+ reorder_buf->consec_oldsn_drops = 0;
+ reorder_buf->consec_oldsn_ampdu_gp2 = 0;
+ reorder_buf->consec_oldsn_prev_drop = 0;
+}
+
+static void
+iwx_clear_reorder_buffer(struct iwx_softc *sc, struct iwx_rxba_data *rxba)
+{
+ struct iwx_reorder_buffer *reorder_buf = &rxba->reorder_buf;
+
+ reorder_buf->removed = 1;
+ rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
+}
+
+#define IWX_MAX_RX_BA_SESSIONS 16
+
+static struct iwx_rxba_data *
+iwx_find_rxba_data(struct iwx_softc *sc, uint8_t tid)
+{
+ int i;
+
+ for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
+ if (sc->sc_rxba_data[i].baid ==
+ IWX_RX_REORDER_DATA_INVALID_BAID)
+ continue;
+ if (sc->sc_rxba_data[i].tid == tid)
+ return &sc->sc_rxba_data[i];
+ }
+
+ return NULL;
+}
+
+static int
+iwx_sta_rx_agg_baid_cfg_cmd(struct iwx_softc *sc, struct ieee80211_node *ni,
+ uint8_t tid, uint16_t ssn, uint16_t winsize, int timeout_val, int start,
+ uint8_t *baid)
+{
+ struct iwx_rx_baid_cfg_cmd cmd;
+ uint32_t new_baid = 0;
+ int err;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ if (start) {
+ cmd.action = IWX_RX_BAID_ACTION_ADD;
+ cmd.alloc.sta_id_mask = htole32(1 << IWX_STATION_ID);
+ cmd.alloc.tid = tid;
+ cmd.alloc.ssn = htole16(ssn);
+ cmd.alloc.win_size = htole16(winsize);
+ } else {
+ struct iwx_rxba_data *rxba;
+
+ rxba = iwx_find_rxba_data(sc, tid);
+ if (rxba == NULL)
+ return ENOENT;
+ *baid = rxba->baid;
+
+ cmd.action = IWX_RX_BAID_ACTION_REMOVE;
+ if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_RX_BAID_ALLOCATION_CONFIG_CMD) == 1) {
+ cmd.remove_v1.baid = rxba->baid;
+ } else {
+ cmd.remove.sta_id_mask = htole32(1 << IWX_STATION_ID);
+ cmd.remove.tid = tid;
+ }
+ }
+
+ err = iwx_send_cmd_pdu_status(sc, IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
+ IWX_RX_BAID_ALLOCATION_CONFIG_CMD), sizeof(cmd), &cmd, &new_baid);
+ if (err)
+ return err;
+
+ if (start) {
+ if (new_baid >= nitems(sc->sc_rxba_data))
+ return ERANGE;
+ *baid = new_baid;
+ }
+
+ return 0;
+}
+
+static void
+iwx_sta_rx_agg(struct iwx_softc *sc, struct ieee80211_node *ni, uint8_t tid,
+ uint16_t ssn, uint16_t winsize, int timeout_val, int start)
+{
+ int err;
+ struct iwx_rxba_data *rxba = NULL;
+ uint8_t baid = 0;
+
+ if (start && sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS) {
+ return;
+ }
+
+ if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) {
+ err = iwx_sta_rx_agg_baid_cfg_cmd(sc, ni, tid, ssn, winsize,
+ timeout_val, start, &baid);
+ } else {
+ panic("sta_rx_agg unsupported hw");
+ }
+ if (err) {
+ DPRINTF(("%s: iwx_sta_rx_agg_sta err=%i\n", __func__, err));
+ return;
+ } else
+ DPRINTF(("%s: iwx_sta_rx_agg_sta success\n", __func__));
+
+ rxba = &sc->sc_rxba_data[baid];
+
+ /* Deaggregation is done in hardware. */
+ if (start) {
+ if (rxba->baid != IWX_RX_REORDER_DATA_INVALID_BAID) {
+ return;
+ }
+ rxba->sta_id = IWX_STATION_ID;
+ rxba->tid = tid;
+ rxba->baid = baid;
+ rxba->timeout = timeout_val;
+ getmicrouptime(&rxba->last_rx);
+ iwx_init_reorder_buffer(&rxba->reorder_buf, ssn,
+ winsize);
+ if (timeout_val != 0) {
+ DPRINTF(("%s: timeout_val != 0\n", __func__));
+ return;
+ }
+ } else
+ iwx_clear_reorder_buffer(sc, rxba);
+
+ if (start) {
+ sc->sc_rx_ba_sessions++;
+ } else if (sc->sc_rx_ba_sessions > 0)
+ sc->sc_rx_ba_sessions--;
+}
+
+static void
+iwx_sta_tx_agg_start(struct iwx_softc *sc, struct ieee80211_node *ni,
+ uint8_t tid)
+{
+ int err, qid;
+
+ qid = sc->aggqid[tid];
+ if (qid == 0) {
+ /* Firmware should pick the next unused Tx queue. */
+ qid = fls(sc->qenablemsk);
+ }
+
+ DPRINTF(("%s: qid=%i\n", __func__, qid));
+
+ /*
+ * Simply enable the queue.
+ * Firmware handles Tx Ba session setup and teardown.
+ */
+ if ((sc->qenablemsk & (1 << qid)) == 0) {
+ if (!iwx_nic_lock(sc)) {
+ return;
+ }
+ err = iwx_enable_txq(sc, IWX_STATION_ID, qid, tid,
+ IWX_TX_RING_COUNT);
+ iwx_nic_unlock(sc);
+ if (err) {
+ printf("%s: could not enable Tx queue %d "
+ "(error %d)\n", DEVNAME(sc), qid, err);
+ return;
+ }
+ }
+ ni->ni_tx_ampdu[tid].txa_flags = IEEE80211_AGGR_RUNNING;
+ DPRINTF(("%s: will set sc->aggqid[%i]=%i\n", __func__, tid, qid));
+ sc->aggqid[tid] = qid;
+}
+
+static void
+iwx_ba_rx_task(void *arg, int npending __unused)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_node *ni = vap->iv_bss;
+ int tid;
+
+ IWX_LOCK(sc);
+ for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
+ if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
+ break;
+ if (sc->ba_rx.start_tidmask & (1 << tid)) {
+ struct iwx_rx_ba *ba = &sc->ni_rx_ba[tid];
+ DPRINTF(("%s: ba->ba_flags=%x\n", __func__,
+ ba->ba_flags));
+ if (ba->ba_flags == IWX_BA_DONE) {
+ DPRINTF(("%s: ampdu for tid %i already added\n",
+ __func__, tid));
+ break;
+ }
+
+ DPRINTF(("%s: ampdu rx start for tid %i\n", __func__,
+ tid));
+ iwx_sta_rx_agg(sc, ni, tid, ba->ba_winstart,
+ ba->ba_winsize, ba->ba_timeout_val, 1);
+ sc->ba_rx.start_tidmask &= ~(1 << tid);
+ ba->ba_flags = IWX_BA_DONE;
+ } else if (sc->ba_rx.stop_tidmask & (1 << tid)) {
+ iwx_sta_rx_agg(sc, ni, tid, 0, 0, 0, 0);
+ sc->ba_rx.stop_tidmask &= ~(1 << tid);
+ }
+ }
+ IWX_UNLOCK(sc);
+}
+
+static void
+iwx_ba_tx_task(void *arg, int npending __unused)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_node *ni = vap->iv_bss;
+ int tid;
+
+ IWX_LOCK(sc);
+ for (tid = 0; tid < IWX_MAX_TID_COUNT; tid++) {
+ if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
+ break;
+ if (sc->ba_tx.start_tidmask & (1 << tid)) {
+ DPRINTF(("%s: ampdu tx start for tid %i\n", __func__,
+ tid));
+ iwx_sta_tx_agg_start(sc, ni, tid);
+ sc->ba_tx.start_tidmask &= ~(1 << tid);
+ sc->sc_flags |= IWX_FLAG_AMPDUTX;
+ }
+ }
+
+ IWX_UNLOCK(sc);
+}
+
+static void
+iwx_set_mac_addr_from_csr(struct iwx_softc *sc, struct iwx_nvm_data *data)
+{
+ uint32_t mac_addr0, mac_addr1;
+
+ memset(data->hw_addr, 0, sizeof(data->hw_addr));
+
+ if (!iwx_nic_lock(sc))
+ return;
+
+ mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_STRAP(sc)));
+ mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_STRAP(sc)));
+
+ iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+
+ /* If OEM fused a valid address, use it instead of the one in OTP. */
+ if (iwx_is_valid_mac_addr(data->hw_addr)) {
+ iwx_nic_unlock(sc);
+ return;
+ }
+
+ mac_addr0 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR0_OTP(sc)));
+ mac_addr1 = htole32(IWX_READ(sc, IWX_CSR_MAC_ADDR1_OTP(sc)));
+
+ iwx_flip_hw_address(mac_addr0, mac_addr1, data->hw_addr);
+
+ iwx_nic_unlock(sc);
+}
+
+static int
+iwx_is_valid_mac_addr(const uint8_t *addr)
+{
+ static const uint8_t reserved_mac[] = {
+ 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
+ };
+
+ return (memcmp(reserved_mac, addr, ETHER_ADDR_LEN) != 0 &&
+ memcmp(etherbroadcastaddr, addr, sizeof(etherbroadcastaddr)) != 0 &&
+ memcmp(etheranyaddr, addr, sizeof(etheranyaddr)) != 0 &&
+ !ETHER_IS_MULTICAST(addr));
+}
+
+static void
+iwx_flip_hw_address(uint32_t mac_addr0, uint32_t mac_addr1, uint8_t *dest)
+{
+ const uint8_t *hw_addr;
+
+ hw_addr = (const uint8_t *)&mac_addr0;
+ dest[0] = hw_addr[3];
+ dest[1] = hw_addr[2];
+ dest[2] = hw_addr[1];
+ dest[3] = hw_addr[0];
+
+ hw_addr = (const uint8_t *)&mac_addr1;
+ dest[4] = hw_addr[1];
+ dest[5] = hw_addr[0];
+}
+
+static int
+iwx_nvm_get(struct iwx_softc *sc)
+{
+ struct iwx_nvm_get_info cmd = {};
+ struct iwx_nvm_data *nvm = &sc->sc_nvm;
+ struct iwx_host_cmd hcmd = {
+ .flags = IWX_CMD_WANT_RESP | IWX_CMD_SEND_IN_RFKILL,
+ .data = { &cmd, },
+ .len = { sizeof(cmd) },
+ .id = IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
+ IWX_NVM_GET_INFO)
+ };
+ int err = 0;
+ uint32_t mac_flags;
+ /*
+ * All the values in iwx_nvm_get_info_rsp v4 are the same as
+ * in v3, except for the channel profile part of the
+ * regulatory. So we can just access the new struct, with the
+ * exception of the latter.
+ */
+ struct iwx_nvm_get_info_rsp *rsp;
+ struct iwx_nvm_get_info_rsp_v3 *rsp_v3;
+ int v4 = isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REGULATORY_NVM_INFO);
+ size_t resp_len = v4 ? sizeof(*rsp) : sizeof(*rsp_v3);
+
+ hcmd.resp_pkt_len = sizeof(struct iwx_rx_packet) + resp_len;
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err) {
+ printf("%s: failed to send cmd (error %d)", __func__, err);
+ return err;
+ }
+
+ if (iwx_rx_packet_payload_len(hcmd.resp_pkt) != resp_len) {
+ printf("%s: iwx_rx_packet_payload_len=%d\n", __func__,
+ iwx_rx_packet_payload_len(hcmd.resp_pkt));
+ printf("%s: resp_len=%zu\n", __func__, resp_len);
+ err = EIO;
+ goto out;
+ }
+
+ memset(nvm, 0, sizeof(*nvm));
+
+ iwx_set_mac_addr_from_csr(sc, nvm);
+ if (!iwx_is_valid_mac_addr(nvm->hw_addr)) {
+ printf("%s: no valid mac address was found\n", DEVNAME(sc));
+ err = EINVAL;
+ goto out;
+ }
+
+ rsp = (void *)hcmd.resp_pkt->data;
+
+ /* Initialize general data */
+ nvm->nvm_version = le16toh(rsp->general.nvm_version);
+ nvm->n_hw_addrs = rsp->general.n_hw_addrs;
+
+ /* Initialize MAC sku data */
+ mac_flags = le32toh(rsp->mac_sku.mac_sku_flags);
+ nvm->sku_cap_11ac_enable =
+ !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
+ nvm->sku_cap_11n_enable =
+ !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
+ nvm->sku_cap_11ax_enable =
+ !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
+ nvm->sku_cap_band_24GHz_enable =
+ !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
+ nvm->sku_cap_band_52GHz_enable =
+ !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED);
+ nvm->sku_cap_mimo_disable =
+ !!(mac_flags & IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED);
+
+ /* Initialize PHY sku data */
+ nvm->valid_tx_ant = (uint8_t)le32toh(rsp->phy_sku.tx_chains);
+ nvm->valid_rx_ant = (uint8_t)le32toh(rsp->phy_sku.rx_chains);
+
+ if (le32toh(rsp->regulatory.lar_enabled) &&
+ isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_SUPPORT)) {
+ nvm->lar_enabled = 1;
+ }
+
+ memcpy(&sc->sc_rsp_info, rsp, resp_len);
+ if (v4) {
+ sc->sc_rsp_vers = IWX_FBSD_RSP_V4;
+ } else {
+ sc->sc_rsp_vers = IWX_FBSD_RSP_V3;
+ }
+out:
+ iwx_free_resp(sc, &hcmd);
+ return err;
+}
+
+static int
+iwx_load_firmware(struct iwx_softc *sc)
+{
+ struct iwx_fw_sects *fws;
+ int err;
+
+ IWX_ASSERT_LOCKED(sc)
+
+ sc->sc_uc.uc_intr = 0;
+ sc->sc_uc.uc_ok = 0;
+
+ fws = &sc->sc_fw.fw_sects[IWX_UCODE_TYPE_REGULAR];
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ err = iwx_ctxt_info_gen3_init(sc, fws);
+ else
+ err = iwx_ctxt_info_init(sc, fws);
+ if (err) {
+ printf("%s: could not init context info\n", DEVNAME(sc));
+ return err;
+ }
+
+ /* wait for the firmware to load */
+ err = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwxuc", hz);
+ if (err || !sc->sc_uc.uc_ok) {
+ printf("%s: firmware upload failed, %d\n", DEVNAME(sc), err);
+ iwx_ctxt_info_free_paging(sc);
+ }
+
+ iwx_dma_contig_free(&sc->iml_dma);
+ iwx_ctxt_info_free_fw_img(sc);
+
+ if (!sc->sc_uc.uc_ok)
+ return EINVAL;
+
+ return err;
+}
+
+static int
+iwx_start_fw(struct iwx_softc *sc)
+{
+ int err;
+
+ IWX_WRITE(sc, IWX_CSR_INT, ~0);
+
+ iwx_disable_interrupts(sc);
+
+ /* make sure rfkill handshake bits are cleared */
+ IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR, IWX_CSR_UCODE_SW_BIT_RFKILL);
+ IWX_WRITE(sc, IWX_CSR_UCODE_DRV_GP1_CLR,
+ IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable firmware load interrupt */
+ IWX_WRITE(sc, IWX_CSR_INT, ~0);
+
+ err = iwx_nic_init(sc);
+ if (err) {
+ printf("%s: unable to init nic\n", DEVNAME(sc));
+ return err;
+ }
+
+ iwx_enable_fwload_interrupt(sc);
+
+ return iwx_load_firmware(sc);
+}
+
+static int
+iwx_pnvm_handle_section(struct iwx_softc *sc, const uint8_t *data,
+ size_t len)
+{
+ const struct iwx_ucode_tlv *tlv;
+ uint32_t sha1 = 0;
+ uint16_t mac_type = 0, rf_id = 0;
+ uint8_t *pnvm_data = NULL, *tmp;
+ int hw_match = 0;
+ uint32_t size = 0;
+ int err;
+
+ while (len >= sizeof(*tlv)) {
+ uint32_t tlv_len, tlv_type;
+
+ len -= sizeof(*tlv);
+ tlv = (const void *)data;
+
+ tlv_len = le32toh(tlv->length);
+ tlv_type = le32toh(tlv->type);
+
+ if (len < tlv_len) {
+ printf("%s: invalid TLV len: %zd/%u\n",
+ DEVNAME(sc), len, tlv_len);
+ err = EINVAL;
+ goto out;
+ }
+
+ data += sizeof(*tlv);
+
+ switch (tlv_type) {
+ case IWX_UCODE_TLV_PNVM_VERSION:
+ if (tlv_len < sizeof(uint32_t))
+ break;
+
+ sha1 = le32_to_cpup((const uint32_t *)data);
+ break;
+ case IWX_UCODE_TLV_HW_TYPE:
+ if (tlv_len < 2 * sizeof(uint16_t))
+ break;
+
+ if (hw_match)
+ break;
+
+ mac_type = le16_to_cpup((const uint16_t *)data);
+ rf_id = le16_to_cpup((const uint16_t *)(data +
+ sizeof(uint16_t)));
+
+ if (mac_type == IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev) &&
+ rf_id == IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id))
+ hw_match = 1;
+ break;
+ case IWX_UCODE_TLV_SEC_RT: {
+ const struct iwx_pnvm_section *section;
+ uint32_t data_len;
+
+ section = (const void *)data;
+ data_len = tlv_len - sizeof(*section);
+
+ /* TODO: remove, this is a deprecated separator */
+ if (le32_to_cpup((const uint32_t *)data) == 0xddddeeee)
+ break;
+
+ tmp = malloc(size + data_len, M_DEVBUF,
+ M_WAITOK | M_ZERO);
+ if (tmp == NULL) {
+ err = ENOMEM;
+ goto out;
+ }
+ // XXX:misha pnvm_data is NULL and size is 0 at first pass
+ memcpy(tmp, pnvm_data, size);
+ memcpy(tmp + size, section->data, data_len);
+ free(pnvm_data, M_DEVBUF);
+ pnvm_data = tmp;
+ size += data_len;
+ break;
+ }
+ case IWX_UCODE_TLV_PNVM_SKU:
+ /* New PNVM section started, stop parsing. */
+ goto done;
+ default:
+ break;
+ }
+
+ if (roundup(tlv_len, 4) > len)
+ break;
+ len -= roundup(tlv_len, 4);
+ data += roundup(tlv_len, 4);
+ }
+done:
+ if (!hw_match || size == 0) {
+ err = ENOENT;
+ goto out;
+ }
+
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->pnvm_dma, size, 1);
+ if (err) {
+ printf("%s: could not allocate DMA memory for PNVM\n",
+ DEVNAME(sc));
+ err = ENOMEM;
+ goto out;
+ }
+ memcpy(sc->pnvm_dma.vaddr, pnvm_data, size);
+ iwx_ctxt_info_gen3_set_pnvm(sc);
+ sc->sc_pnvm_ver = sha1;
+out:
+ free(pnvm_data, M_DEVBUF);
+ return err;
+}
+
+static int
+iwx_pnvm_parse(struct iwx_softc *sc, const uint8_t *data, size_t len)
+{
+ const struct iwx_ucode_tlv *tlv;
+
+ while (len >= sizeof(*tlv)) {
+ uint32_t tlv_len, tlv_type;
+
+ len -= sizeof(*tlv);
+ tlv = (const void *)data;
+
+ tlv_len = le32toh(tlv->length);
+ tlv_type = le32toh(tlv->type);
+
+ if (len < tlv_len || roundup(tlv_len, 4) > len)
+ return EINVAL;
+
+ if (tlv_type == IWX_UCODE_TLV_PNVM_SKU) {
+ const struct iwx_sku_id *sku_id =
+ (const void *)(data + sizeof(*tlv));
+
+ data += sizeof(*tlv) + roundup(tlv_len, 4);
+ len -= roundup(tlv_len, 4);
+
+ if (sc->sc_sku_id[0] == le32toh(sku_id->data[0]) &&
+ sc->sc_sku_id[1] == le32toh(sku_id->data[1]) &&
+ sc->sc_sku_id[2] == le32toh(sku_id->data[2]) &&
+ iwx_pnvm_handle_section(sc, data, len) == 0)
+ return 0;
+ } else {
+ data += sizeof(*tlv) + roundup(tlv_len, 4);
+ len -= roundup(tlv_len, 4);
+ }
+ }
+
+ return ENOENT;
+}
+
+/* Make AX210 firmware loading context point at PNVM image in DMA memory. */
+static void
+iwx_ctxt_info_gen3_set_pnvm(struct iwx_softc *sc)
+{
+ struct iwx_prph_scratch *prph_scratch;
+ struct iwx_prph_scratch_ctrl_cfg *prph_sc_ctrl;
+
+ prph_scratch = sc->prph_scratch_dma.vaddr;
+ prph_sc_ctrl = &prph_scratch->ctrl_cfg;
+
+ prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = htole64(sc->pnvm_dma.paddr);
+ prph_sc_ctrl->pnvm_cfg.pnvm_size = htole32(sc->pnvm_dma.size);
+
+ bus_dmamap_sync(sc->sc_dmat, sc->pnvm_dma.map, BUS_DMASYNC_PREWRITE);
+}
+
+/*
+ * Load platform-NVM (non-volatile-memory) data from the filesystem.
+ * This data apparently contains regulatory information and affects device
+ * channel configuration.
+ * The SKU of AX210 devices tells us which PNVM file section is needed.
+ * Pre-AX210 devices store NVM data onboard.
+ */
+static int
+iwx_load_pnvm(struct iwx_softc *sc)
+{
+ const int wait_flags = IWX_PNVM_COMPLETE;
+ int err = 0;
+ const struct firmware *pnvm;
+
+ if (sc->sc_sku_id[0] == 0 &&
+ sc->sc_sku_id[1] == 0 &&
+ sc->sc_sku_id[2] == 0)
+ return 0;
+
+ if (sc->sc_pnvm_name) {
+ if (sc->pnvm_dma.vaddr == NULL) {
+ IWX_UNLOCK(sc);
+ pnvm = firmware_get(sc->sc_pnvm_name);
+ if (pnvm == NULL) {
+ printf("%s: could not read %s (error %d)\n",
+ DEVNAME(sc), sc->sc_pnvm_name, err);
+ IWX_LOCK(sc);
+ return EINVAL;
+ }
+ sc->sc_pnvm = pnvm;
+
+ err = iwx_pnvm_parse(sc, pnvm->data, pnvm->datasize);
+ IWX_LOCK(sc);
+ if (err && err != ENOENT) {
+ return EINVAL;
+ }
+ } else
+ iwx_ctxt_info_gen3_set_pnvm(sc);
+ }
+
+ if (!iwx_nic_lock(sc)) {
+ return EBUSY;
+ }
+
+ /*
+ * If we don't have a platform NVM file simply ask firmware
+ * to proceed without it.
+ */
+
+ iwx_write_umac_prph(sc, IWX_UREG_DOORBELL_TO_ISR6,
+ IWX_UREG_DOORBELL_TO_ISR6_PNVM);
+
+ /* Wait for the pnvm complete notification from firmware. */
+ while ((sc->sc_init_complete & wait_flags) != wait_flags) {
+ err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
+ if (err)
+ break;
+ }
+
+ iwx_nic_unlock(sc);
+
+ return err;
+}
+
+static int
+iwx_send_tx_ant_cfg(struct iwx_softc *sc, uint8_t valid_tx_ant)
+{
+ struct iwx_tx_ant_cfg_cmd tx_ant_cmd = {
+ .valid = htole32(valid_tx_ant),
+ };
+
+ return iwx_send_cmd_pdu(sc, IWX_TX_ANT_CONFIGURATION_CMD,
+ 0, sizeof(tx_ant_cmd), &tx_ant_cmd);
+}
+
+static int
+iwx_send_phy_cfg_cmd(struct iwx_softc *sc)
+{
+ struct iwx_phy_cfg_cmd phy_cfg_cmd;
+
+ phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
+ phy_cfg_cmd.calib_control.event_trigger =
+ sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].event_trigger;
+ phy_cfg_cmd.calib_control.flow_trigger =
+ sc->sc_default_calib[IWX_UCODE_TYPE_REGULAR].flow_trigger;
+
+ return iwx_send_cmd_pdu(sc, IWX_PHY_CONFIGURATION_CMD, 0,
+ sizeof(phy_cfg_cmd), &phy_cfg_cmd);
+}
+
+static int
+iwx_send_dqa_cmd(struct iwx_softc *sc)
+{
+ struct iwx_dqa_enable_cmd dqa_cmd = {
+ .cmd_queue = htole32(IWX_DQA_CMD_QUEUE),
+ };
+ uint32_t cmd_id;
+
+ cmd_id = iwx_cmd_id(IWX_DQA_ENABLE_CMD, IWX_DATA_PATH_GROUP, 0);
+ return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(dqa_cmd), &dqa_cmd);
+}
+
+static int
+iwx_load_ucode_wait_alive(struct iwx_softc *sc)
+{
+ int err;
+
+ IWX_UNLOCK(sc);
+ err = iwx_read_firmware(sc);
+ IWX_LOCK(sc);
+ if (err)
+ return err;
+
+ err = iwx_start_fw(sc);
+ if (err)
+ return err;
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ err = iwx_load_pnvm(sc);
+ if (err)
+ return err;
+ }
+
+ iwx_post_alive(sc);
+
+ return 0;
+}
+
+static int
+iwx_run_init_mvm_ucode(struct iwx_softc *sc, int readnvm)
+{
+ const int wait_flags = IWX_INIT_COMPLETE;
+ struct iwx_nvm_access_complete_cmd nvm_complete = {};
+ struct iwx_init_extended_cfg_cmd init_cfg = {
+ .init_flags = htole32(IWX_INIT_NVM),
+ };
+
+ int err;
+
+ if ((sc->sc_flags & IWX_FLAG_RFKILL) && !readnvm) {
+ printf("%s: radio is disabled by hardware switch\n",
+ DEVNAME(sc));
+ return EPERM;
+ }
+
+ sc->sc_init_complete = 0;
+ err = iwx_load_ucode_wait_alive(sc);
+ if (err) {
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: failed to load init firmware\n", DEVNAME(sc));
+ return err;
+ } else {
+ IWX_DPRINTF(sc, IWX_DEBUG_FIRMWARE_TLV,
+ "%s: successfully loaded init firmware\n", __func__);
+ }
+
+ /*
+ * Send init config command to mark that we are sending NVM
+ * access commands
+ */
+ err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_SYSTEM_GROUP,
+ IWX_INIT_EXTENDED_CFG_CMD), 0, sizeof(init_cfg), &init_cfg);
+ if (err) {
+ printf("%s: IWX_INIT_EXTENDED_CFG_CMD error=%d\n", __func__,
+ err);
+ return err;
+ }
+
+ err = iwx_send_cmd_pdu(sc, IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
+ IWX_NVM_ACCESS_COMPLETE), 0, sizeof(nvm_complete), &nvm_complete);
+ if (err) {
+ return err;
+ }
+
+ /* Wait for the init complete notification from the firmware. */
+ while ((sc->sc_init_complete & wait_flags) != wait_flags) {
+ err = msleep(&sc->sc_init_complete, &sc->sc_mtx, 0, "iwxinit", 2 * hz);
+ if (err) {
+ DPRINTF(("%s: will return err=%d\n", __func__, err));
+ return err;
+ } else {
+ DPRINTF(("%s: sc_init_complete == IWX_INIT_COMPLETE\n",
+ __func__));
+ }
+ }
+
+ if (readnvm) {
+ err = iwx_nvm_get(sc);
+ DPRINTF(("%s: err=%d\n", __func__, err));
+ if (err) {
+ printf("%s: failed to read nvm (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ } else {
+ DPRINTF(("%s: successfully read nvm\n", DEVNAME(sc)));
+ }
+ IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
+ }
+ return 0;
+}
+
+static int
+iwx_config_ltr(struct iwx_softc *sc)
+{
+ struct iwx_ltr_config_cmd cmd = {
+ .flags = htole32(IWX_LTR_CFG_FLAG_FEATURE_ENABLE),
+ };
+
+ if (!sc->sc_ltr_enabled)
+ return 0;
+
+ return iwx_send_cmd_pdu(sc, IWX_LTR_CONFIG, 0, sizeof(cmd), &cmd);
+}
+
+static void
+iwx_update_rx_desc(struct iwx_softc *sc, struct iwx_rx_ring *ring, int idx,
+ bus_dma_segment_t *seg)
+{
+ struct iwx_rx_data *data = &ring->data[idx];
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ struct iwx_rx_transfer_desc *desc = ring->desc;
+ desc[idx].rbid = htole16(idx & 0xffff);
+ desc[idx].addr = htole64((*seg).ds_addr);
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_PREWRITE);
+ } else {
+ ((uint64_t *)ring->desc)[idx] =
+ htole64((*seg).ds_addr);
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_PREWRITE);
+ }
+}
+
+static int
+iwx_rx_addbuf(struct iwx_softc *sc, int size, int idx)
+{
+ struct iwx_rx_ring *ring = &sc->rxq;
+ struct iwx_rx_data *data = &ring->data[idx];
+ struct mbuf *m;
+ int err;
+ int fatal = 0;
+ bus_dma_segment_t seg;
+ int nsegs;
+
+ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
+ if (m == NULL)
+ return ENOBUFS;
+
+ if (data->m != NULL) {
+ bus_dmamap_unload(ring->data_dmat, data->map);
+ fatal = 1;
+ }
+
+ m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
+ err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, &seg,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (err) {
+ /* XXX */
+ if (fatal)
+ panic("could not load RX mbuf");
+ m_freem(m);
+ return err;
+ }
+ data->m = m;
+ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
+
+ /* Update RX descriptor. */
+ iwx_update_rx_desc(sc, ring, idx, &seg);
+ return 0;
+}
+
+static int
+iwx_rxmq_get_signal_strength(struct iwx_softc *sc,
+ struct iwx_rx_mpdu_desc *desc)
+{
+ int energy_a, energy_b;
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ energy_a = desc->v3.energy_a;
+ energy_b = desc->v3.energy_b;
+ } else {
+ energy_a = desc->v1.energy_a;
+ energy_b = desc->v1.energy_b;
+ }
+ energy_a = energy_a ? -energy_a : -256;
+ energy_b = energy_b ? -energy_b : -256;
+ return MAX(energy_a, energy_b);
+}
+
+static int
+iwx_rxmq_get_chains(struct iwx_softc *sc,
+ struct iwx_rx_mpdu_desc *desc)
+{
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ return ((desc->v3.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >>
+ IWX_RATE_MCS_ANT_POS);
+ else
+ return ((desc->v1.rate_n_flags & IWX_RATE_MCS_ANT_AB_MSK) >>
+ IWX_RATE_MCS_ANT_POS);
+}
+
+static void
+iwx_rx_rx_phy_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
+ struct iwx_rx_data *data)
+{
+ struct iwx_rx_phy_info *phy_info = (void *)pkt->data;
+ struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
+ int qid = cmd_hdr->qid;
+ struct iwx_tx_ring *ring = &sc->txq[qid];
+
+ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
+ memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
+}
+
+/*
+ * Retrieve the average noise (in dBm) among receivers.
+ */
+static int
+iwx_get_noise(const struct iwx_statistics_rx_non_phy *stats)
+{
+ int i, total, nbant, noise;
+
+ total = nbant = noise = 0;
+ for (i = 0; i < 3; i++) {
+ noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
+ if (noise) {
+ total += noise;
+ nbant++;
+ }
+ }
+
+ /* There should be at least one antenna but check anyway. */
+ return (nbant == 0) ? -127 : (total / nbant) - 107;
+}
+
+#if 0
+int
+iwx_ccmp_decap(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
+ struct ieee80211_rxinfo *rxi)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_key *k;
+ struct ieee80211_frame *wh;
+ uint64_t pn, *prsc;
+ uint8_t *ivp;
+ uint8_t tid;
+ int hdrlen, hasqos;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ hdrlen = ieee80211_get_hdrlen(wh);
+ ivp = (uint8_t *)wh + hdrlen;
+
+ /* find key for decryption */
+ k = ieee80211_get_rxkey(ic, m, ni);
+ if (k == NULL || k->k_cipher != IEEE80211_CIPHER_CCMP)
+ return 1;
+
+ /* Check that ExtIV bit is be set. */
+ if (!(ivp[3] & IEEE80211_WEP_EXTIV))
+ return 1;
+
+ hasqos = ieee80211_has_qos(wh);
+ tid = hasqos ? ieee80211_get_qos(wh) & IEEE80211_QOS_TID : 0;
+ prsc = &k->k_rsc[tid];
+
+ /* Extract the 48-bit PN from the CCMP header. */
+ pn = (uint64_t)ivp[0] |
+ (uint64_t)ivp[1] << 8 |
+ (uint64_t)ivp[4] << 16 |
+ (uint64_t)ivp[5] << 24 |
+ (uint64_t)ivp[6] << 32 |
+ (uint64_t)ivp[7] << 40;
+ if (rxi->rxi_flags & IEEE80211_RXI_HWDEC_SAME_PN) {
+ if (pn < *prsc) {
+ ic->ic_stats.is_ccmp_replays++;
+ return 1;
+ }
+ } else if (pn <= *prsc) {
+ ic->ic_stats.is_ccmp_replays++;
+ return 1;
+ }
+ /* Last seen packet number is updated in ieee80211_inputm(). */
+
+ /*
+ * Some firmware versions strip the MIC, and some don't. It is not
+ * clear which of the capability flags could tell us what to expect.
+ * For now, keep things simple and just leave the MIC in place if
+ * it is present.
+ *
+ * The IV will be stripped by ieee80211_inputm().
+ */
+ return 0;
+}
+#endif
+
+static int
+iwx_rx_hwdecrypt(struct iwx_softc *sc, struct mbuf *m, uint32_t rx_pkt_status)
+{
+ struct ieee80211_frame *wh;
+ int ret = 0;
+ uint8_t type, subtype;
+
+ wh = mtod(m, struct ieee80211_frame *);
+
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ if (type == IEEE80211_FC0_TYPE_CTL) {
+ return 0;
+ }
+
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ if (IEEE80211_QOS_HAS_SEQ(wh) && (subtype & IEEE80211_FC0_SUBTYPE_NODATA)) {
+ return 0;
+ }
+
+
+ if (((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) !=
+ IEEE80211_FC0_TYPE_CTL)
+ && (wh->i_fc[1] & IEEE80211_FC1_PROTECTED)) {
+ if ((rx_pkt_status & IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) !=
+ IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
+ DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC\n", __func__));
+ ret = 1;
+ goto out;
+ }
+ /* Check whether decryption was successful or not. */
+ if ((rx_pkt_status &
+ (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
+ IWX_RX_MPDU_RES_STATUS_MIC_OK)) !=
+ (IWX_RX_MPDU_RES_STATUS_DEC_DONE |
+ IWX_RX_MPDU_RES_STATUS_MIC_OK)) {
+ DPRINTF(("%s: not IWX_RX_MPDU_RES_STATUS_MIC_OK\n", __func__));
+ ret = 1;
+ goto out;
+ }
+ }
+ out:
+ return ret;
+}
+
+static void
+iwx_rx_frame(struct iwx_softc *sc, struct mbuf *m, int chanidx,
+ uint32_t rx_pkt_status, int is_shortpre, int rate_n_flags,
+ uint32_t device_timestamp, uint8_t rssi)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_frame *wh;
+ struct ieee80211_node *ni;
+
+ /*
+ * We need to turn the hardware provided channel index into a channel
+ * and then find it in our ic_channels array
+ */
+ if (chanidx < 0 || chanidx >= nitems(ic->ic_channels)) {
+ /*
+ * OpenBSD points this at the ibss chan, which it defaults to
+ * channel 1 and then never touches again. Skip a step.
+ */
+ printf("iwx: %s:%d controlling chanidx to 1 (%d)\n", __func__, __LINE__, chanidx);
+ chanidx = 1;
+ }
+
+ int channel = chanidx;
+ for (int i = 0; i < ic->ic_nchans; i++) {
+ if (ic->ic_channels[i].ic_ieee == channel) {
+ chanidx = i;
+ }
+ }
+ ic->ic_curchan = &ic->ic_channels[chanidx];
+
+ wh = mtod(m, struct ieee80211_frame *);
+ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
+
+#if 0 /* XXX hw decrypt */
+ if ((rxi->rxi_flags & IEEE80211_RXI_HWDEC) &&
+ iwx_ccmp_decap(sc, m, ni, rxi) != 0) {
+ m_freem(m);
+ ieee80211_release_node(ic, ni);
+ return;
+ }
+#endif
+ if (ieee80211_radiotap_active_vap(vap)) {
+ struct iwx_rx_radiotap_header *tap = &sc->sc_rxtap;
+ uint16_t chan_flags;
+ int have_legacy_rate = 1;
+ uint8_t mcs, rate;
+
+ tap->wr_flags = 0;
+ if (is_shortpre)
+ tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
+ tap->wr_chan_freq =
+ htole16(ic->ic_channels[chanidx].ic_freq);
+ chan_flags = ic->ic_channels[chanidx].ic_flags;
+#if 0
+ if (ic->ic_curmode != IEEE80211_MODE_11N &&
+ ic->ic_curmode != IEEE80211_MODE_11AC) {
+ chan_flags &= ~IEEE80211_CHAN_HT;
+ chan_flags &= ~IEEE80211_CHAN_40MHZ;
+ }
+ if (ic->ic_curmode != IEEE80211_MODE_11AC)
+ chan_flags &= ~IEEE80211_CHAN_VHT;
+#else
+ chan_flags &= ~IEEE80211_CHAN_HT;
+#endif
+ tap->wr_chan_flags = htole16(chan_flags);
+ tap->wr_dbm_antsignal = rssi;
+ tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
+ tap->wr_tsft = device_timestamp;
+
+ if (sc->sc_rate_n_flags_version >= 2) {
+ uint32_t mod_type = (rate_n_flags &
+ IWX_RATE_MCS_MOD_TYPE_MSK);
+ const struct ieee80211_rateset *rs = NULL;
+ uint32_t ridx;
+ have_legacy_rate = (mod_type == IWX_RATE_MCS_CCK_MSK ||
+ mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK);
+ mcs = (rate_n_flags & IWX_RATE_HT_MCS_CODE_MSK);
+ ridx = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK);
+ if (mod_type == IWX_RATE_MCS_CCK_MSK)
+ rs = &ieee80211_std_rateset_11b;
+ else if (mod_type == IWX_RATE_MCS_LEGACY_OFDM_MSK)
+ rs = &ieee80211_std_rateset_11a;
+ if (rs && ridx < rs->rs_nrates) {
+ rate = (rs->rs_rates[ridx] &
+ IEEE80211_RATE_VAL);
+ } else
+ rate = 0;
+ } else {
+ have_legacy_rate = ((rate_n_flags &
+ (IWX_RATE_MCS_HT_MSK_V1 |
+ IWX_RATE_MCS_VHT_MSK_V1)) == 0);
+ mcs = (rate_n_flags &
+ (IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 |
+ IWX_RATE_HT_MCS_NSS_MSK_V1));
+ rate = (rate_n_flags & IWX_RATE_LEGACY_RATE_MSK_V1);
+ }
+ if (!have_legacy_rate) {
+ tap->wr_rate = (0x80 | mcs);
+ } else {
+ switch (rate) {
+ /* CCK rates. */
+ case 10: tap->wr_rate = 2; break;
+ case 20: tap->wr_rate = 4; break;
+ case 55: tap->wr_rate = 11; break;
+ case 110: tap->wr_rate = 22; break;
+ /* OFDM rates. */
+ case 0xd: tap->wr_rate = 12; break;
+ case 0xf: tap->wr_rate = 18; break;
+ case 0x5: tap->wr_rate = 24; break;
+ case 0x7: tap->wr_rate = 36; break;
+ case 0x9: tap->wr_rate = 48; break;
+ case 0xb: tap->wr_rate = 72; break;
+ case 0x1: tap->wr_rate = 96; break;
+ case 0x3: tap->wr_rate = 108; break;
+ /* Unknown rate: should not happen. */
+ default: tap->wr_rate = 0;
+ }
+ // XXX hack - this needs rebased with the new rate stuff anyway
+ tap->wr_rate = rate;
+ }
+ }
+
+ IWX_UNLOCK(sc);
+ if (ni == NULL) {
+ if (ieee80211_input_mimo_all(ic, m) == -1)
+ printf("%s:%d input_all returned -1\n", __func__, __LINE__);
+ } else {
+
+ if (ieee80211_input_mimo(ni, m) == -1)
+ printf("%s:%d input_all returned -1\n", __func__, __LINE__);
+ ieee80211_free_node(ni);
+ }
+ IWX_LOCK(sc);
+}
+
+static void
+iwx_rx_mpdu_mq(struct iwx_softc *sc, struct mbuf *m, void *pktdata,
+ size_t maxlen)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_node *ni = vap->iv_bss;
+ struct ieee80211_key *k;
+ struct ieee80211_rx_stats rxs;
+ struct iwx_rx_mpdu_desc *desc;
+ uint32_t len, hdrlen, rate_n_flags, device_timestamp;
+ int rssi;
+ uint8_t chanidx;
+ uint16_t phy_info;
+ size_t desc_size;
+ int pad = 0;
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ desc_size = sizeof(*desc);
+ else
+ desc_size = IWX_RX_DESC_SIZE_V1;
+
+ if (maxlen < desc_size) {
+ m_freem(m);
+ return; /* drop */
+ }
+
+ desc = (struct iwx_rx_mpdu_desc *)pktdata;
+
+ if (!(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_CRC_OK)) ||
+ !(desc->status & htole16(IWX_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
+ printf("%s: Bad CRC or FIFO: 0x%08X\n", __func__, desc->status);
+ m_freem(m);
+ return; /* drop */
+ }
+
+ len = le16toh(desc->mpdu_len);
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ /* Allow control frames in monitor mode. */
+ if (len < sizeof(struct ieee80211_frame_cts)) {
+ m_freem(m);
+ return;
+ }
+
+ } else if (len < sizeof(struct ieee80211_frame)) {
+ m_freem(m);
+ return;
+ }
+ if (len > maxlen - desc_size) {
+ m_freem(m);
+ return;
+ }
+
+ // TODO: arithmetic on a pointer to void is a GNU extension
+ m->m_data = (char *)pktdata + desc_size;
+ m->m_pkthdr.len = m->m_len = len;
+
+ /* Account for padding following the frame header. */
+ if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_PAD) {
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ if (type == IEEE80211_FC0_TYPE_CTL) {
+ switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
+ case IEEE80211_FC0_SUBTYPE_CTS:
+ hdrlen = sizeof(struct ieee80211_frame_cts);
+ break;
+ case IEEE80211_FC0_SUBTYPE_ACK:
+ hdrlen = sizeof(struct ieee80211_frame_ack);
+ break;
+ default:
+ hdrlen = sizeof(struct ieee80211_frame_min);
+ break;
+ }
+ } else
+ hdrlen = ieee80211_hdrsize(wh);
+
+ if ((le16toh(desc->status) &
+ IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
+ // CCMP header length
+ hdrlen += 8;
+ }
+
+ memmove(m->m_data + 2, m->m_data, hdrlen);
+ m_adj(m, 2);
+
+ }
+
+ if ((le16toh(desc->status) &
+ IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK) ==
+ IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC) {
+ pad = 1;
+ }
+
+// /*
+// * Hardware de-aggregates A-MSDUs and copies the same MAC header
+// * in place for each subframe. But it leaves the 'A-MSDU present'
+// * bit set in the frame header. We need to clear this bit ourselves.
+// * (XXX This workaround is not required on AX200/AX201 devices that
+// * have been tested by me, but it's unclear when this problem was
+// * fixed in the hardware. It definitely affects the 9k generation.
+// * Leaving this in place for now since some 9k/AX200 hybrids seem
+// * to exist that we may eventually add support for.)
+// *
+// * And we must allow the same CCMP PN for subframes following the
+// * first subframe. Otherwise they would be discarded as replays.
+// */
+ if (desc->mac_flags2 & IWX_RX_MPDU_MFLG2_AMSDU) {
+ DPRINTF(("%s: === IWX_RX_MPDU_MFLG2_AMSDU\n", __func__));
+// struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+// uint8_t subframe_idx = (desc->amsdu_info &
+// IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK);
+// if (subframe_idx > 0)
+// rxi.rxi_flags |= IEEE80211_RXI_HWDEC_SAME_PN;
+// if (ieee80211_has_qos(wh) && ieee80211_has_addr4(wh) &&
+// m->m_len >= sizeof(struct ieee80211_qosframe_addr4)) {
+// struct ieee80211_qosframe_addr4 *qwh4 = mtod(m,
+// struct ieee80211_qosframe_addr4 *);
+// qwh4->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
+// } else if (ieee80211_has_qos(wh) &&
+// m->m_len >= sizeof(struct ieee80211_qosframe)) {
+// struct ieee80211_qosframe *qwh = mtod(m,
+// struct ieee80211_qosframe *);
+// qwh->i_qos[0] &= htole16(~IEEE80211_QOS_AMSDU);
+// }
+ }
+
+ /*
+ * Verify decryption before duplicate detection. The latter uses
+ * the TID supplied in QoS frame headers and this TID is implicitly
+ * verified as part of the CCMP nonce.
+ */
+ k = ieee80211_crypto_get_txkey(ni, m);
+ if (k != NULL &&
+ (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) &&
+ iwx_rx_hwdecrypt(sc, m, le16toh(desc->status)/*, &rxi*/)) {
+ DPRINTF(("%s: iwx_rx_hwdecrypt failed\n", __func__));
+ m_freem(m);
+ return;
+ }
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ rate_n_flags = le32toh(desc->v3.rate_n_flags);
+ chanidx = desc->v3.channel;
+ device_timestamp = le32toh(desc->v3.gp2_on_air_rise);
+ } else {
+ rate_n_flags = le32toh(desc->v1.rate_n_flags);
+ chanidx = desc->v1.channel;
+ device_timestamp = le32toh(desc->v1.gp2_on_air_rise);
+ }
+
+ phy_info = le16toh(desc->phy_info);
+
+ rssi = iwx_rxmq_get_signal_strength(sc, desc);
+ rssi = (0 - IWX_MIN_DBM) + rssi; /* normalize */
+ rssi = MIN(rssi, (IWX_MAX_DBM - IWX_MIN_DBM)); /* clip to max. 100% */
+
+ memset(&rxs, 0, sizeof(rxs));
+ rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
+ rxs.r_flags |= IEEE80211_R_BAND;
+ rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
+ rxs.r_flags |= IEEE80211_R_TSF32 | IEEE80211_R_TSF_START;
+
+ rxs.c_ieee = chanidx;
+ rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
+ chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
+ rxs.c_band = chanidx <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
+ rxs.c_rx_tsf = device_timestamp;
+ rxs.c_chain = iwx_rxmq_get_chains(sc, desc);
+ if (rxs.c_chain != 0)
+ rxs.r_flags |= IEEE80211_R_C_CHAIN;
+
+ /* rssi is in 1/2db units */
+ rxs.c_rssi = rssi * 2;
+ rxs.c_nf = sc->sc_noise;
+
+ if (pad) {
+ rxs.c_pktflags |= IEEE80211_RX_F_DECRYPTED;
+ rxs.c_pktflags |= IEEE80211_RX_F_IV_STRIP;
+ }
+
+ if (ieee80211_add_rx_params(m, &rxs) == 0) {
+ printf("%s: ieee80211_add_rx_params failed\n", __func__);
+ return;
+ }
+
+ ieee80211_add_rx_params(m, &rxs);
+
+#if 0
+ if (iwx_rx_reorder(sc, m, chanidx, desc,
+ (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
+ rate_n_flags, device_timestamp, &rxi, ml))
+ return;
+#endif
+
+ if (pad) {
+#define TRIM 8
+ struct ieee80211_frame *wh = mtod(m, struct ieee80211_frame *);
+ hdrlen = ieee80211_hdrsize(wh);
+ memmove(m->m_data + TRIM, m->m_data, hdrlen);
+ m_adj(m, TRIM);
+#undef TRIM
+ }
+
+ iwx_rx_frame(sc, m, chanidx, le16toh(desc->status),
+ (phy_info & IWX_RX_MPDU_PHY_SHORT_PREAMBLE),
+ rate_n_flags, device_timestamp, rssi);
+}
+
+static void
+iwx_clear_tx_desc(struct iwx_softc *sc, struct iwx_tx_ring *ring, int idx)
+{
+ struct iwx_tfh_tfd *desc = &ring->desc[idx];
+ uint8_t num_tbs = le16toh(desc->num_tbs) & 0x1f;
+ int i;
+
+ /* First TB is never cleared - it is bidirectional DMA data. */
+ for (i = 1; i < num_tbs; i++) {
+ struct iwx_tfh_tb *tb = &desc->tbs[i];
+ memset(tb, 0, sizeof(*tb));
+ }
+ desc->num_tbs = htole16(1);
+
+ bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
+ BUS_DMASYNC_PREWRITE);
+}
+
+static void
+iwx_txd_done(struct iwx_softc *sc, struct iwx_tx_ring *ring,
+ struct iwx_tx_data *txd)
+{
+ bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->data_dmat, txd->map);
+
+ ieee80211_tx_complete(&txd->in->in_ni, txd->m, 0);
+ txd->m = NULL;
+ txd->in = NULL;
+}
+
+static void
+iwx_txq_advance(struct iwx_softc *sc, struct iwx_tx_ring *ring, uint16_t idx)
+{
+ struct iwx_tx_data *txd;
+
+ while (ring->tail_hw != idx) {
+ txd = &ring->data[ring->tail];
+ if (txd->m != NULL) {
+ iwx_clear_tx_desc(sc, ring, ring->tail);
+ iwx_tx_update_byte_tbl(sc, ring, ring->tail, 0, 0);
+ iwx_txd_done(sc, ring, txd);
+ ring->queued--;
+ if (ring->queued < 0)
+ panic("caught negative queue count");
+ }
+ ring->tail = (ring->tail + 1) % IWX_TX_RING_COUNT;
+ ring->tail_hw = (ring->tail_hw + 1) % sc->max_tfd_queue_size;
+ }
+}
+
+static void
+iwx_rx_tx_cmd(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
+ struct iwx_rx_data *data)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
+ struct iwx_cmd_header *cmd_hdr = &pkt->hdr;
+ int qid = cmd_hdr->qid, status, txfail;
+ struct iwx_tx_ring *ring = &sc->txq[qid];
+ struct iwx_tx_resp *tx_resp = (void *)pkt->data;
+ uint32_t ssn;
+ uint32_t len = iwx_rx_packet_len(pkt);
+ int idx = cmd_hdr->idx;
+ struct iwx_tx_data *txd = &ring->data[idx];
+ struct mbuf *m = txd->m;
+
+ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD);
+
+ /* Sanity checks. */
+ if (sizeof(*tx_resp) > len)
+ return;
+ if (qid < IWX_FIRST_AGG_TX_QUEUE && tx_resp->frame_count > 1)
+ return;
+ if (qid >= IWX_FIRST_AGG_TX_QUEUE && sizeof(*tx_resp) + sizeof(ssn) +
+ tx_resp->frame_count * sizeof(tx_resp->status) > len)
+ return;
+
+ sc->sc_tx_timer[qid] = 0;
+
+ if (tx_resp->frame_count > 1) /* A-MPDU */
+ return;
+
+ status = le16toh(tx_resp->status.status) & IWX_TX_STATUS_MSK;
+ txfail = (status != IWX_TX_STATUS_SUCCESS &&
+ status != IWX_TX_STATUS_DIRECT_DONE);
+
+#ifdef __not_yet__
+ /* TODO: Replace accounting below with ieee80211_tx_complete() */
+ ieee80211_tx_complete(&in->in_ni, m, txfail);
+#else
+ if (txfail)
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ else {
+ if_inc_counter(ifp, IFCOUNTER_OBYTES, m->m_pkthdr.len);
+ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
+ if (m->m_flags & M_MCAST)
+ if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
+ }
+#endif
+ /*
+ * On hardware supported by iwx(4) the SSN counter corresponds
+ * to a Tx ring index rather than a sequence number.
+ * Frames up to this index (non-inclusive) can now be freed.
+ */
+ memcpy(&ssn, &tx_resp->status + tx_resp->frame_count, sizeof(ssn));
+ ssn = le32toh(ssn);
+ if (ssn < sc->max_tfd_queue_size) {
+ iwx_txq_advance(sc, ring, ssn);
+ iwx_clear_oactive(sc, ring);
+ }
+}
+
+static void
+iwx_clear_oactive(struct iwx_softc *sc, struct iwx_tx_ring *ring)
+{
+ IWX_ASSERT_LOCKED(sc);
+
+ if (ring->queued < iwx_lomark) {
+ sc->qfullmsk &= ~(1 << ring->qid);
+ if (sc->qfullmsk == 0 /* && ifq_is_oactive(&ifp->if_snd) */) {
+ /*
+ * Well, we're in interrupt context, but then again
+ * I guess net80211 does all sorts of stunts in
+ * interrupt context, so maybe this is no biggie.
+ */
+ iwx_start(sc);
+ }
+ }
+}
+
+static void
+iwx_rx_compressed_ba(struct iwx_softc *sc, struct iwx_rx_packet *pkt)
+{
+ struct iwx_compressed_ba_notif *ba_res = (void *)pkt->data;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_node *in = IWX_NODE(vap->iv_bss);
+ struct ieee80211_node *ni = &in->in_ni;
+ struct iwx_tx_ring *ring;
+ uint16_t i, tfd_cnt, ra_tid_cnt, idx;
+ int qid;
+
+// if (ic->ic_state != IEEE80211_S_RUN)
+// return;
+
+ if (iwx_rx_packet_payload_len(pkt) < sizeof(*ba_res))
+ return;
+
+ if (ba_res->sta_id != IWX_STATION_ID)
+ return;
+
+ in = (void *)ni;
+
+ tfd_cnt = le16toh(ba_res->tfd_cnt);
+ ra_tid_cnt = le16toh(ba_res->ra_tid_cnt);
+ if (!tfd_cnt || iwx_rx_packet_payload_len(pkt) < (sizeof(*ba_res) +
+ sizeof(ba_res->ra_tid[0]) * ra_tid_cnt +
+ sizeof(ba_res->tfd[0]) * tfd_cnt))
+ return;
+
+ for (i = 0; i < tfd_cnt; i++) {
+ struct iwx_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
+ uint8_t tid;
+
+ tid = ba_tfd->tid;
+ if (tid >= nitems(sc->aggqid))
+ continue;
+
+ qid = sc->aggqid[tid];
+ if (qid != htole16(ba_tfd->q_num))
+ continue;
+
+ ring = &sc->txq[qid];
+
+#if 0
+ ba = &ni->ni_tx_ba[tid];
+ if (ba->ba_state != IEEE80211_BA_AGREED)
+ continue;
+#endif
+ idx = le16toh(ba_tfd->tfd_index);
+ sc->sc_tx_timer[qid] = 0;
+ iwx_txq_advance(sc, ring, idx);
+ iwx_clear_oactive(sc, ring);
+ }
+}
+
+static void
+iwx_rx_bmiss(struct iwx_softc *sc, struct iwx_rx_packet *pkt,
+ struct iwx_rx_data *data)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_missed_beacons_notif *mbn = (void *)pkt->data;
+ uint32_t missed;
+
+ if ((ic->ic_opmode != IEEE80211_M_STA) ||
+ (vap->iv_state != IEEE80211_S_RUN))
+ return;
+
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
+
+ IWX_DPRINTF(sc, IWX_DEBUG_BEACON,
+ "%s: mac_id=%u, cmslrx=%u, cmb=%u, neb=%d, nrb=%u\n",
+ __func__,
+ le32toh(mbn->mac_id),
+ le32toh(mbn->consec_missed_beacons_since_last_rx),
+ le32toh(mbn->consec_missed_beacons),
+ le32toh(mbn->num_expected_beacons),
+ le32toh(mbn->num_recvd_beacons));
+
+ missed = le32toh(mbn->consec_missed_beacons_since_last_rx);
+ if (missed > vap->iv_bmissthreshold) {
+ ieee80211_beacon_miss(ic);
+ }
+}
+
+static int
+iwx_binding_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action)
+{
+ struct iwx_binding_cmd cmd;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_vap *ivp = IWX_VAP(vap);
+ struct iwx_phy_ctxt *phyctxt = ivp->phy_ctxt;
+ uint32_t mac_id = IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color);
+ int i, err, active = (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE);
+ uint32_t status;
+
+ if (action == IWX_FW_CTXT_ACTION_ADD && active)
+ panic("binding already added");
+ if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
+ panic("binding already removed");
+
+ if (phyctxt == NULL) /* XXX race with iwx_stop() */
+ return EINVAL;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.id_and_color
+ = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
+ cmd.action = htole32(action);
+ cmd.phy = htole32(IWX_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
+
+ cmd.macs[0] = htole32(mac_id);
+ for (i = 1; i < IWX_MAX_MACS_IN_BINDING; i++)
+ cmd.macs[i] = htole32(IWX_FW_CTXT_INVALID);
+
+ if (IEEE80211_IS_CHAN_2GHZ(phyctxt->channel) ||
+ !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
+ cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
+ else
+ cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
+
+ status = 0;
+ err = iwx_send_cmd_pdu_status(sc, IWX_BINDING_CONTEXT_CMD, sizeof(cmd),
+ &cmd, &status);
+ if (err == 0 && status != 0)
+ err = EIO;
+
+ return err;
+}
+
+static uint8_t
+iwx_get_vht_ctrl_pos(struct ieee80211com *ic, struct ieee80211_channel *chan)
+{
+ int ctlchan = ieee80211_chan2ieee(ic, chan);
+ int midpoint = chan->ic_vht_ch_freq1;
+
+ /*
+ * The FW is expected to check the control channel position only
+ * when in HT/VHT and the channel width is not 20MHz. Return
+ * this value as the default one:
+ */
+ uint8_t pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+
+ switch (ctlchan - midpoint) {
+ case -6:
+ pos = IWX_PHY_VHT_CTRL_POS_2_BELOW;
+ break;
+ case -2:
+ pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+ break;
+ case 2:
+ pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
+ break;
+ case 6:
+ pos = IWX_PHY_VHT_CTRL_POS_2_ABOVE;
+ break;
+ default:
+ break;
+ }
+
+ return pos;
+}
+
+static int
+iwx_phy_ctxt_cmd_uhb_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
+ uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
+ uint8_t vht_chan_width, int cmdver)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_phy_context_cmd_uhb cmd;
+ uint8_t active_cnt, idle_cnt;
+ struct ieee80211_channel *chan = ctxt->channel;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
+ ctxt->color));
+ cmd.action = htole32(action);
+
+ if (IEEE80211_IS_CHAN_2GHZ(chan) ||
+ !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
+ cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
+ else
+ cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
+
+ cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
+ IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
+ cmd.ci.channel = htole32(ieee80211_chan2ieee(ic, chan));
+
+ if (IEEE80211_IS_CHAN_VHT80(chan)) {
+ cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
+ } else if (IEEE80211_IS_CHAN_HT40(chan)) {
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
+ if (IEEE80211_IS_CHAN_HT40D(chan))
+ cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
+ else
+ cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+ } else {
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
+ cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+ }
+
+ if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_RLC_CONFIG_CMD) != 2) {
+ idle_cnt = chains_static;
+ active_cnt = chains_dynamic;
+ cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
+ IWX_PHY_RX_CHAIN_VALID_POS);
+ cmd.rxchain_info |= htole32(idle_cnt <<
+ IWX_PHY_RX_CHAIN_CNT_POS);
+ cmd.rxchain_info |= htole32(active_cnt <<
+ IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
+ }
+
+ return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
+}
+
+#if 0
+int
+iwx_phy_ctxt_cmd_v3_v4(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
+ uint8_t chains_static, uint8_t chains_dynamic, uint32_t action, uint8_t sco,
+ uint8_t vht_chan_width, int cmdver)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_phy_context_cmd cmd;
+ uint8_t active_cnt, idle_cnt;
+ struct ieee80211_channel *chan = ctxt->channel;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(ctxt->id,
+ ctxt->color));
+ cmd.action = htole32(action);
+
+ if (IEEE80211_IS_CHAN_2GHZ(ctxt->channel) ||
+ !isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CDB_SUPPORT))
+ cmd.lmac_id = htole32(IWX_LMAC_24G_INDEX);
+ else
+ cmd.lmac_id = htole32(IWX_LMAC_5G_INDEX);
+
+ cmd.ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
+ IWX_PHY_BAND_24 : IWX_PHY_BAND_5;
+ cmd.ci.channel = ieee80211_chan2ieee(ic, chan);
+ if (vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80) {
+ cmd.ci.ctrl_pos = iwx_get_vht_ctrl_pos(ic, chan);
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE80;
+ } else if (chan->ic_flags & IEEE80211_CHAN_40MHZ) {
+ if (sco == IEEE80211_HTOP0_SCO_SCA) {
+ /* secondary chan above -> control chan below */
+ cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
+ } else if (sco == IEEE80211_HTOP0_SCO_SCB) {
+ /* secondary chan below -> control chan above */
+ cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_ABOVE;
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE40;
+ } else {
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
+ cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+ }
+ } else {
+ cmd.ci.width = IWX_PHY_VHT_CHANNEL_MODE20;
+ cmd.ci.ctrl_pos = IWX_PHY_VHT_CTRL_POS_1_BELOW;
+ }
+
+ if (cmdver < 4 && iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_RLC_CONFIG_CMD) != 2) {
+ idle_cnt = chains_static;
+ active_cnt = chains_dynamic;
+ cmd.rxchain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
+ IWX_PHY_RX_CHAIN_VALID_POS);
+ cmd.rxchain_info |= htole32(idle_cnt <<
+ IWX_PHY_RX_CHAIN_CNT_POS);
+ cmd.rxchain_info |= htole32(active_cnt <<
+ IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
+ }
+
+ return iwx_send_cmd_pdu(sc, IWX_PHY_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
+}
+#endif
+
+static int
+iwx_phy_ctxt_cmd(struct iwx_softc *sc, struct iwx_phy_ctxt *ctxt,
+ uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
+ uint32_t apply_time, uint8_t sco, uint8_t vht_chan_width)
+{
+ int cmdver;
+
+ cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_PHY_CONTEXT_CMD);
+ if (cmdver != 3 && cmdver != 4) {
+ printf("%s: firmware does not support phy-context-cmd v3/v4\n",
+ DEVNAME(sc));
+ return ENOTSUP;
+ }
+
+ /*
+ * Intel increased the size of the fw_channel_info struct and neglected
+ * to bump the phy_context_cmd struct, which contains an fw_channel_info
+ * member in the middle.
+ * To keep things simple we use a separate function to handle the larger
+ * variant of the phy context command.
+ */
+ if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS)) {
+ return iwx_phy_ctxt_cmd_uhb_v3_v4(sc, ctxt, chains_static,
+ chains_dynamic, action, sco, vht_chan_width, cmdver);
+ } else
+ panic("Unsupported old hardware contact thj@");
+
+#if 0
+ return iwx_phy_ctxt_cmd_v3_v4(sc, ctxt, chains_static, chains_dynamic,
+ action, sco, vht_chan_width, cmdver);
+#endif
+}
+
+static int
+iwx_send_cmd(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
+{
+ struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
+ struct iwx_tfh_tfd *desc;
+ struct iwx_tx_data *txdata;
+ struct iwx_device_cmd *cmd;
+ struct mbuf *m;
+ bus_addr_t paddr;
+ uint64_t addr;
+ int err = 0, i, paylen, off/*, s*/;
+ int idx, code, async, group_id;
+ size_t hdrlen, datasz;
+ uint8_t *data;
+ int generation = sc->sc_generation;
+ bus_dma_segment_t seg[10];
+ int nsegs;
+
+ code = hcmd->id;
+ async = hcmd->flags & IWX_CMD_ASYNC;
+ idx = ring->cur;
+
+ for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
+ paylen += hcmd->len[i];
+ }
+
+ /* If this command waits for a response, allocate response buffer. */
+ hcmd->resp_pkt = NULL;
+ if (hcmd->flags & IWX_CMD_WANT_RESP) {
+ uint8_t *resp_buf;
+ KASSERT(!async, ("async command want response"));
+ KASSERT(hcmd->resp_pkt_len >= sizeof(struct iwx_rx_packet),
+ ("wrong pkt len 1"));
+ KASSERT(hcmd->resp_pkt_len <= IWX_CMD_RESP_MAX,
+ ("wrong pkt len 2"));
+ if (sc->sc_cmd_resp_pkt[idx] != NULL)
+ return ENOSPC;
+ resp_buf = malloc(hcmd->resp_pkt_len, M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+ if (resp_buf == NULL)
+ return ENOMEM;
+ sc->sc_cmd_resp_pkt[idx] = resp_buf;
+ sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
+ } else {
+ sc->sc_cmd_resp_pkt[idx] = NULL;
+ }
+
+ desc = &ring->desc[idx];
+ txdata = &ring->data[idx];
+
+ /*
+ * XXX Intel inside (tm)
+ * Firmware API versions >= 50 reject old-style commands in
+ * group 0 with a "BAD_COMMAND" firmware error. We must pretend
+ * that such commands were in the LONG_GROUP instead in order
+ * for firmware to accept them.
+ */
+ if (iwx_cmd_groupid(code) == 0) {
+ code = IWX_WIDE_ID(IWX_LONG_GROUP, code);
+ txdata->flags |= IWX_TXDATA_FLAG_CMD_IS_NARROW;
+ } else
+ txdata->flags &= ~IWX_TXDATA_FLAG_CMD_IS_NARROW;
+
+ group_id = iwx_cmd_groupid(code);
+
+ hdrlen = sizeof(cmd->hdr_wide);
+ datasz = sizeof(cmd->data_wide);
+
+ if (paylen > datasz) {
+ /* Command is too large to fit in pre-allocated space. */
+ size_t totlen = hdrlen + paylen;
+ if (paylen > IWX_MAX_CMD_PAYLOAD_SIZE) {
+ printf("%s: firmware command too long (%zd bytes)\n",
+ DEVNAME(sc), totlen);
+ err = EINVAL;
+ goto out;
+ }
+ if (totlen > IWX_RBUF_SIZE)
+ panic("totlen > IWX_RBUF_SIZE");
+ m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWX_RBUF_SIZE);
+ if (m == NULL) {
+ printf("%s: could not get fw cmd mbuf (%i bytes)\n",
+ DEVNAME(sc), IWX_RBUF_SIZE);
+ err = ENOMEM;
+ goto out;
+ }
+ m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
+ err = bus_dmamap_load_mbuf_sg(ring->data_dmat, txdata->map, m,
+ seg, &nsegs, BUS_DMA_NOWAIT);
+ if (nsegs > 20)
+ panic("nsegs > 20");
+ DPRINTF(("%s: nsegs=%i\n", __func__, nsegs));
+ if (err) {
+ printf("%s: could not load fw cmd mbuf (%zd bytes)\n",
+ DEVNAME(sc), totlen);
+ m_freem(m);
+ goto out;
+ }
+ txdata->m = m; /* mbuf will be freed in iwx_cmd_done() */
+ cmd = mtod(m, struct iwx_device_cmd *);
+ paddr = seg[0].ds_addr;
+ } else {
+ cmd = &ring->cmd[idx];
+ paddr = txdata->cmd_paddr;
+ }
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->hdr_wide.opcode = iwx_cmd_opcode(code);
+ cmd->hdr_wide.group_id = group_id;
+ cmd->hdr_wide.qid = ring->qid;
+ cmd->hdr_wide.idx = idx;
+ cmd->hdr_wide.length = htole16(paylen);
+ cmd->hdr_wide.version = iwx_cmd_version(code);
+ data = cmd->data_wide;
+
+ for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
+ if (hcmd->len[i] == 0)
+ continue;
+ memcpy(data + off, hcmd->data[i], hcmd->len[i]);
+ off += hcmd->len[i];
+ }
+ KASSERT(off == paylen, ("off %d != paylen %d", off, paylen));
+
+ desc->tbs[0].tb_len = htole16(MIN(hdrlen + paylen, IWX_FIRST_TB_SIZE));
+ addr = htole64(paddr);
+ memcpy(&desc->tbs[0].addr, &addr, sizeof(addr));
+ if (hdrlen + paylen > IWX_FIRST_TB_SIZE) {
+ DPRINTF(("%s: hdrlen=%zu paylen=%d\n", __func__, hdrlen,
+ paylen));
+ desc->tbs[1].tb_len = htole16(hdrlen + paylen -
+ IWX_FIRST_TB_SIZE);
+ addr = htole64(paddr + IWX_FIRST_TB_SIZE);
+ memcpy(&desc->tbs[1].addr, &addr, sizeof(addr));
+ desc->num_tbs = htole16(2);
+ } else
+ desc->num_tbs = htole16(1);
+
+ if (paylen > datasz) {
+ bus_dmamap_sync(ring->data_dmat, txdata->map,
+ BUS_DMASYNC_PREWRITE);
+ } else {
+ bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
+ BUS_DMASYNC_PREWRITE);
+ }
+ bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
+ BUS_DMASYNC_PREWRITE);
+
+ /* Kick command ring. */
+ ring->queued++;
+ ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
+ ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
+ DPRINTF(("%s: ring->cur_hw=%i\n", __func__, ring->cur_hw));
+ IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
+
+ if (!async) {
+ err = msleep(desc, &sc->sc_mtx, PCATCH, "iwxcmd", hz);
+ if (err == 0) {
+ /* if hardware is no longer up, return error */
+ if (generation != sc->sc_generation) {
+ err = ENXIO;
+ goto out;
+ }
+
+ /* Response buffer will be freed in iwx_free_resp(). */
+ hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
+ sc->sc_cmd_resp_pkt[idx] = NULL;
+ } else if (generation == sc->sc_generation) {
+ free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
+ sc->sc_cmd_resp_pkt[idx] = NULL;
+ }
+ }
+out:
+ return err;
+}
+
+static int
+iwx_send_cmd_pdu(struct iwx_softc *sc, uint32_t id, uint32_t flags,
+ uint16_t len, const void *data)
+{
+ struct iwx_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ .flags = flags,
+ };
+
+ return iwx_send_cmd(sc, &cmd);
+}
+
+static int
+iwx_send_cmd_status(struct iwx_softc *sc, struct iwx_host_cmd *cmd,
+ uint32_t *status)
+{
+ struct iwx_rx_packet *pkt;
+ struct iwx_cmd_response *resp;
+ int err, resp_len;
+
+ KASSERT(((cmd->flags & IWX_CMD_WANT_RESP) == 0), ("IWX_CMD_WANT_RESP"));
+ cmd->flags |= IWX_CMD_WANT_RESP;
+ cmd->resp_pkt_len = sizeof(*pkt) + sizeof(*resp);
+
+ err = iwx_send_cmd(sc, cmd);
+ if (err)
+ return err;
+
+ pkt = cmd->resp_pkt;
+ if (pkt == NULL || (pkt->hdr.flags & IWX_CMD_FAILED_MSK))
+ return EIO;
+
+ resp_len = iwx_rx_packet_payload_len(pkt);
+ if (resp_len != sizeof(*resp)) {
+ iwx_free_resp(sc, cmd);
+ return EIO;
+ }
+
+ resp = (void *)pkt->data;
+ *status = le32toh(resp->status);
+ iwx_free_resp(sc, cmd);
+ return err;
+}
+
+static int
+iwx_send_cmd_pdu_status(struct iwx_softc *sc, uint32_t id, uint16_t len,
+ const void *data, uint32_t *status)
+{
+ struct iwx_host_cmd cmd = {
+ .id = id,
+ .len = { len, },
+ .data = { data, },
+ };
+
+ return iwx_send_cmd_status(sc, &cmd, status);
+}
+
+static void
+iwx_free_resp(struct iwx_softc *sc, struct iwx_host_cmd *hcmd)
+{
+ KASSERT((hcmd->flags & (IWX_CMD_WANT_RESP)) == IWX_CMD_WANT_RESP,
+ ("hcmd flags !IWX_CMD_WANT_RESP"));
+ free(hcmd->resp_pkt, M_DEVBUF);
+ hcmd->resp_pkt = NULL;
+}
+
+static void
+iwx_cmd_done(struct iwx_softc *sc, int qid, int idx, int code)
+{
+ struct iwx_tx_ring *ring = &sc->txq[IWX_DQA_CMD_QUEUE];
+ struct iwx_tx_data *data;
+
+ if (qid != IWX_DQA_CMD_QUEUE) {
+ return; /* Not a command ack. */
+ }
+
+ data = &ring->data[idx];
+
+ if (data->m != NULL) {
+ bus_dmamap_sync(ring->data_dmat, data->map,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->data_dmat, data->map);
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ wakeup(&ring->desc[idx]);
+
+ DPRINTF(("%s: command 0x%x done\n", __func__, code));
+ if (ring->queued == 0) {
+ DPRINTF(("%s: unexpected firmware response to command 0x%x\n",
+ DEVNAME(sc), code));
+ } else if (ring->queued > 0)
+ ring->queued--;
+}
+
+static uint32_t
+iwx_fw_rateidx_ofdm(uint8_t rval)
+{
+ /* Firmware expects indices which match our 11a rate set. */
+ const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11a;
+ int i;
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
+ return i;
+ }
+
+ return 0;
+}
+
+static uint32_t
+iwx_fw_rateidx_cck(uint8_t rval)
+{
+ /* Firmware expects indices which match our 11b rate set. */
+ const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11b;
+ int i;
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
+ return i;
+ }
+
+ return 0;
+}
+
+static int
+iwx_min_basic_rate(struct ieee80211com *ic)
+{
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_node *ni = vap->iv_bss;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ struct ieee80211_channel *c = ni->ni_chan;
+ int i, min, rval;
+
+ min = -1;
+
+ if (c == IEEE80211_CHAN_ANYC) {
+ printf("%s: channel is IEEE80211_CHAN_ANYC\n", __func__);
+ return -1;
+ }
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) == 0)
+ continue;
+ rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
+ if (min == -1)
+ min = rval;
+ else if (rval < min)
+ min = rval;
+ }
+
+ /* Default to 1 Mbit/s on 2GHz and 6 Mbit/s on 5GHz. */
+ if (min == -1)
+ min = IEEE80211_IS_CHAN_2GHZ(c) ? 2 : 12;
+
+ return min;
+}
+
+/*
+ * Determine the Tx command flags and Tx rate+flags to use.
+ * Return the selected Tx rate.
+ */
+static const struct iwx_rate *
+iwx_tx_fill_cmd(struct iwx_softc *sc, struct iwx_node *in,
+ struct ieee80211_frame *wh, uint16_t *flags, uint32_t *rate_n_flags,
+ struct mbuf *m)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ const struct iwx_rate *rinfo = NULL;
+ int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ int ridx = iwx_min_basic_rate(ic);
+ int min_ridx, rate_flags;
+ uint8_t rval;
+
+ /* We're in the process of clearing the node, no channel already */
+ if (ridx == -1)
+ return NULL;
+
+ min_ridx = iwx_rval2ridx(ridx);
+
+ *flags = 0;
+
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
+ type != IEEE80211_FC0_TYPE_DATA) {
+ /* for non-data, use the lowest supported rate */
+ ridx = min_ridx;
+ *flags |= IWX_TX_FLAGS_CMD_RATE;
+ } else if (ni->ni_flags & IEEE80211_NODE_VHT) {
+ /* TODO: VHT - the ridx / rate array doesn't have VHT rates yet */
+ ridx = iwx_min_basic_rate(ic);
+ } else if (ni->ni_flags & IEEE80211_NODE_HT) {
+ ridx = iwx_mcs2ridx[ieee80211_node_get_txrate_dot11rate(ni)
+ & ~IEEE80211_RATE_MCS];
+ } else {
+ rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
+ & IEEE80211_RATE_VAL);
+ ridx = iwx_rval2ridx(rval);
+ if (ridx < min_ridx)
+ ridx = min_ridx;
+ }
+
+ if (m->m_flags & M_EAPOL)
+ *flags |= IWX_TX_FLAGS_HIGH_PRI;
+
+ rinfo = &iwx_rates[ridx];
+
+ /*
+ * Do not fill rate_n_flags if firmware controls the Tx rate.
+ * For data frames we rely on Tx rate scaling in firmware by default.
+ */
+ if ((*flags & IWX_TX_FLAGS_CMD_RATE) == 0) {
+ *rate_n_flags = 0;
+ return rinfo;
+ }
+
+ /*
+ * Forcing a CCK/OFDM legacy rate is important for management frames.
+ * Association will only succeed if we do this correctly.
+ */
+
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,"%s%d:: min_ridx=%i\n", __func__, __LINE__, min_ridx);
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: ridx=%i\n", __func__, __LINE__, ridx);
+ rate_flags = IWX_RATE_MCS_ANT_A_MSK;
+ if (IWX_RIDX_IS_CCK(ridx)) {
+ if (sc->sc_rate_n_flags_version >= 2)
+ rate_flags |= IWX_RATE_MCS_CCK_MSK;
+ else
+ rate_flags |= IWX_RATE_MCS_CCK_MSK_V1;
+ } else if (sc->sc_rate_n_flags_version >= 2)
+ rate_flags |= IWX_RATE_MCS_LEGACY_OFDM_MSK;
+
+ rval = (rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]
+ & IEEE80211_RATE_VAL);
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d: rval=%i dot11 %d\n", __func__, __LINE__,
+ rval, rs->rs_rates[ieee80211_node_get_txrate_dot11rate(ni)]);
+
+ if (sc->sc_rate_n_flags_version >= 2) {
+ if (rate_flags & IWX_RATE_MCS_LEGACY_OFDM_MSK) {
+ rate_flags |= (iwx_fw_rateidx_ofdm(rval) &
+ IWX_RATE_LEGACY_RATE_MSK);
+ } else {
+ rate_flags |= (iwx_fw_rateidx_cck(rval) &
+ IWX_RATE_LEGACY_RATE_MSK);
+ }
+ } else
+ rate_flags |= rinfo->plcp;
+
+ *rate_n_flags = rate_flags;
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d flags=0x%x\n",
+ __func__, __LINE__,*flags);
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d rate_n_flags=0x%x\n",
+ __func__, __LINE__, *rate_n_flags);
+
+ if (sc->sc_debug & IWX_DEBUG_TXRATE)
+ print_ratenflags(__func__, __LINE__,
+ *rate_n_flags, sc->sc_rate_n_flags_version);
+
+ return rinfo;
+}
+
+static void
+iwx_tx_update_byte_tbl(struct iwx_softc *sc, struct iwx_tx_ring *txq,
+ int idx, uint16_t byte_cnt, uint16_t num_tbs)
+{
+ uint8_t filled_tfd_size, num_fetch_chunks;
+ uint16_t len = byte_cnt;
+ uint16_t bc_ent;
+
+ filled_tfd_size = offsetof(struct iwx_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwx_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ num_fetch_chunks = howmany(filled_tfd_size, 64) - 1;
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ struct iwx_gen3_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.vaddr;
+ /* Starting from AX210, the HW expects bytes */
+ bc_ent = htole16(len | (num_fetch_chunks << 14));
+ scd_bc_tbl[idx].tfd_offset = bc_ent;
+ } else {
+ struct iwx_agn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.vaddr;
+ /* Before AX210, the HW expects DW */
+ len = howmany(len, 4);
+ bc_ent = htole16(len | (num_fetch_chunks << 12));
+ scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ }
+
+ bus_dmamap_sync(sc->sc_dmat, txq->bc_tbl.map, BUS_DMASYNC_PREWRITE);
+}
+
+static int
+iwx_tx(struct iwx_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_node *in = (void *)ni;
+ struct iwx_tx_ring *ring;
+ struct iwx_tx_data *data;
+ struct iwx_tfh_tfd *desc;
+ struct iwx_device_cmd *cmd;
+ struct ieee80211_frame *wh;
+ struct ieee80211_key *k = NULL;
+ const struct iwx_rate *rinfo;
+ uint64_t paddr;
+ u_int hdrlen;
+ uint32_t rate_n_flags;
+ uint16_t num_tbs, flags, offload_assist = 0;
+ uint8_t type, subtype;
+ int i, totlen, err, pad, qid;
+#define IWM_MAX_SCATTER 20
+ bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
+ int nsegs;
+ struct mbuf *m1;
+ size_t txcmd_size;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ wh = mtod(m, struct ieee80211_frame *);
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
+ hdrlen = ieee80211_anyhdrsize(wh);
+
+ qid = sc->first_data_qid;
+
+ /* Put QoS frames on the data queue which maps to their TID. */
+ if (IEEE80211_QOS_HAS_SEQ(wh) && (sc->sc_flags & IWX_FLAG_AMPDUTX)) {
+ uint16_t qos = ieee80211_gettid(wh);
+ uint8_t tid = qos & IEEE80211_QOS_TID;
+#if 0
+ /*
+ * XXX-THJ: TODO when we enable ba we need to manage the
+ * mappings
+ */
+ struct ieee80211_tx_ba *ba;
+ ba = &ni->ni_tx_ba[tid];
+
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
+ type == IEEE80211_FC0_TYPE_DATA &&
+ subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
+ subtype != IEEE80211_FC0_SUBTYPE_BAR &&
+ sc->aggqid[tid] != 0 /*&&
+ ba->ba_state == IEEE80211_BA_AGREED*/) {
+ qid = sc->aggqid[tid];
+#else
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
+ type == IEEE80211_FC0_TYPE_DATA &&
+ subtype != IEEE80211_FC0_SUBTYPE_NODATA &&
+ sc->aggqid[tid] != 0) {
+ qid = sc->aggqid[tid];
+#endif
+ }
+ }
+
+ ring = &sc->txq[qid];
+ desc = &ring->desc[ring->cur];
+ memset(desc, 0, sizeof(*desc));
+ data = &ring->data[ring->cur];
+
+ cmd = &ring->cmd[ring->cur];
+ cmd->hdr.code = IWX_TX_CMD;
+ cmd->hdr.flags = 0;
+ cmd->hdr.qid = ring->qid;
+ cmd->hdr.idx = ring->cur;
+
+ rinfo = iwx_tx_fill_cmd(sc, in, wh, &flags, &rate_n_flags, m);
+ if (rinfo == NULL)
+ return EINVAL;
+
+ /* Offloaded sequence number assignment; non-AMPDU case */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
+ /* Radiotap */
+ if (ieee80211_radiotap_active_vap(vap)) {
+ struct iwx_tx_radiotap_header *tap = &sc->sc_txtap;
+
+ tap->wt_flags = 0;
+ tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
+ tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
+ tap->wt_rate = rinfo->rate;
+ if (k != NULL)
+ tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
+ ieee80211_radiotap_tx(vap, m);
+ }
+
+ /* Encrypt - CCMP via direct HW path, TKIP/WEP indirected openbsd-style for now */
+ if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
+ k = ieee80211_crypto_get_txkey(ni, m);
+ if (k == NULL) {
+ printf("%s: k is NULL!\n", __func__);
+ m_freem(m);
+ return (ENOBUFS);
+ } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
+ k->wk_keytsc++;
+ } else {
+ k->wk_cipher->ic_encap(k, m);
+
+ /* 802.11 headers may have moved */
+ wh = mtod(m, struct ieee80211_frame *);
+ flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
+ }
+ } else
+ flags |= IWX_TX_FLAGS_ENCRYPT_DIS;
+
+ totlen = m->m_pkthdr.len;
+
+ if (hdrlen & 3) {
+ /* First segment length must be a multiple of 4. */
+ pad = 4 - (hdrlen & 3);
+ offload_assist |= IWX_TX_CMD_OFFLD_PAD;
+ } else
+ pad = 0;
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ struct iwx_tx_cmd_gen3 *tx = (void *)cmd->data;
+ memset(tx, 0, sizeof(*tx));
+ tx->len = htole16(totlen);
+ tx->offload_assist = htole32(offload_assist);
+ tx->flags = htole16(flags);
+ tx->rate_n_flags = htole32(rate_n_flags);
+ memcpy(tx->hdr, wh, hdrlen);
+ txcmd_size = sizeof(*tx);
+ } else {
+ struct iwx_tx_cmd_gen2 *tx = (void *)cmd->data;
+ memset(tx, 0, sizeof(*tx));
+ tx->len = htole16(totlen);
+ tx->offload_assist = htole16(offload_assist);
+ tx->flags = htole32(flags);
+ tx->rate_n_flags = htole32(rate_n_flags);
+ memcpy(tx->hdr, wh, hdrlen);
+ txcmd_size = sizeof(*tx);
+ }
+
+ /* Trim 802.11 header. */
+ m_adj(m, hdrlen);
+
+ err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (err && err != EFBIG) {
+ printf("%s: can't map mbuf (error %d)\n", DEVNAME(sc), err);
+ m_freem(m);
+ return err;
+ }
+ if (err) {
+ /* Too many DMA segments, linearize mbuf. */
+ m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
+ if (m1 == NULL) {
+ printf("%s: could not defrag mbufs\n", __func__);
+ m_freem(m);
+ return (ENOBUFS);
+ }
+ m = m1;
+ err = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
+ segs, &nsegs, BUS_DMA_NOWAIT);
+ if (err) {
+ printf("%s: can't map mbuf (error %d)\n", __func__,
+ err);
+ m_freem(m);
+ return (err);
+ }
+ }
+ data->m = m;
+ data->in = in;
+
+ /* Fill TX descriptor. */
+ num_tbs = 2 + nsegs;
+ desc->num_tbs = htole16(num_tbs);
+
+ desc->tbs[0].tb_len = htole16(IWX_FIRST_TB_SIZE);
+ paddr = htole64(data->cmd_paddr);
+ memcpy(&desc->tbs[0].addr, &paddr, sizeof(paddr));
+ if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[0].tb_len)) >> 32)
+ DPRINTF(("%s: TB0 crosses 32bit boundary\n", __func__));
+ desc->tbs[1].tb_len = htole16(sizeof(struct iwx_cmd_header) +
+ txcmd_size + hdrlen + pad - IWX_FIRST_TB_SIZE);
+ paddr = htole64(data->cmd_paddr + IWX_FIRST_TB_SIZE);
+ memcpy(&desc->tbs[1].addr, &paddr, sizeof(paddr));
+
+ if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[1].tb_len)) >> 32)
+ DPRINTF(("%s: TB1 crosses 32bit boundary\n", __func__));
+
+ /* Other DMA segments are for data payload. */
+ for (i = 0; i < nsegs; i++) {
+ seg = &segs[i];
+ desc->tbs[i + 2].tb_len = htole16(seg->ds_len);
+ paddr = htole64(seg->ds_addr);
+ memcpy(&desc->tbs[i + 2].addr, &paddr, sizeof(paddr));
+ if (data->cmd_paddr >> 32 != (data->cmd_paddr + le32toh(desc->tbs[i + 2].tb_len)) >> 32)
+ DPRINTF(("%s: TB%d crosses 32bit boundary\n", __func__, i + 2));
+ }
+
+ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
+ BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
+ BUS_DMASYNC_PREWRITE);
+
+ iwx_tx_update_byte_tbl(sc, ring, ring->cur, totlen, num_tbs);
+
+ /* Kick TX ring. */
+ ring->cur = (ring->cur + 1) % IWX_TX_RING_COUNT;
+ ring->cur_hw = (ring->cur_hw + 1) % sc->max_tfd_queue_size;
+ IWX_WRITE(sc, IWX_HBUS_TARG_WRPTR, ring->qid << 16 | ring->cur_hw);
+
+ /* Mark TX ring as full if we reach a certain threshold. */
+ if (++ring->queued > iwx_himark) {
+ sc->qfullmsk |= 1 << ring->qid;
+ }
+
+ sc->sc_tx_timer[ring->qid] = 15;
+
+ return 0;
+}
+
+static int
+iwx_flush_sta_tids(struct iwx_softc *sc, int sta_id, uint16_t tids)
+{
+ struct iwx_rx_packet *pkt;
+ struct iwx_tx_path_flush_cmd_rsp *resp;
+ struct iwx_tx_path_flush_cmd flush_cmd = {
+ .sta_id = htole32(sta_id),
+ .tid_mask = htole16(tids),
+ };
+ struct iwx_host_cmd hcmd = {
+ .id = IWX_TXPATH_FLUSH,
+ .len = { sizeof(flush_cmd), },
+ .data = { &flush_cmd, },
+ .flags = IWX_CMD_WANT_RESP,
+ .resp_pkt_len = sizeof(*pkt) + sizeof(*resp),
+ };
+ int err, resp_len, i, num_flushed_queues;
+
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err)
+ return err;
+
+ pkt = hcmd.resp_pkt;
+ if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp_len = iwx_rx_packet_payload_len(pkt);
+ /* Some firmware versions don't provide a response. */
+ if (resp_len == 0)
+ goto out;
+ else if (resp_len != sizeof(*resp)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp = (void *)pkt->data;
+
+ if (le16toh(resp->sta_id) != sta_id) {
+ err = EIO;
+ goto out;
+ }
+
+ num_flushed_queues = le16toh(resp->num_flushed_queues);
+ if (num_flushed_queues > IWX_TX_FLUSH_QUEUE_RSP) {
+ err = EIO;
+ goto out;
+ }
+
+ for (i = 0; i < num_flushed_queues; i++) {
+ struct iwx_flush_queue_info *queue_info = &resp->queues[i];
+ uint16_t tid = le16toh(queue_info->tid);
+ uint16_t read_after = le16toh(queue_info->read_after_flush);
+ uint16_t qid = le16toh(queue_info->queue_num);
+ struct iwx_tx_ring *txq;
+
+ if (qid >= nitems(sc->txq))
+ continue;
+
+ txq = &sc->txq[qid];
+ if (tid != txq->tid)
+ continue;
+
+ iwx_txq_advance(sc, txq, read_after);
+ }
+out:
+ iwx_free_resp(sc, &hcmd);
+ return err;
+}
+
+#define IWX_FLUSH_WAIT_MS 2000
+
+static int
+iwx_drain_sta(struct iwx_softc *sc, struct iwx_node* in, int drain)
+{
+ struct iwx_add_sta_cmd cmd;
+ int err;
+ uint32_t status;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.mac_id_n_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color));
+ cmd.sta_id = IWX_STATION_ID;
+ cmd.add_modify = IWX_STA_MODE_MODIFY;
+ cmd.station_flags = drain ? htole32(IWX_STA_FLG_DRAIN_FLOW) : 0;
+ cmd.station_flags_msk = htole32(IWX_STA_FLG_DRAIN_FLOW);
+
+ status = IWX_ADD_STA_SUCCESS;
+ err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA,
+ sizeof(cmd), &cmd, &status);
+ if (err) {
+ printf("%s: could not update sta (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ switch (status & IWX_ADD_STA_STATUS_MASK) {
+ case IWX_ADD_STA_SUCCESS:
+ break;
+ default:
+ err = EIO;
+ printf("%s: Couldn't %s draining for station\n",
+ DEVNAME(sc), drain ? "enable" : "disable");
+ break;
+ }
+
+ return err;
+}
+
+static int
+iwx_flush_sta(struct iwx_softc *sc, struct iwx_node *in)
+{
+ int err;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ sc->sc_flags |= IWX_FLAG_TXFLUSH;
+
+ err = iwx_drain_sta(sc, in, 1);
+ if (err)
+ goto done;
+
+ err = iwx_flush_sta_tids(sc, IWX_STATION_ID, 0xffff);
+ if (err) {
+ printf("%s: could not flush Tx path (error %d)\n",
+ DEVNAME(sc), err);
+ goto done;
+ }
+
+ /*
+ * XXX-THJ: iwx_wait_tx_queues_empty was here, but it was a nope in the
+ * fc drive rand has has been replaced in OpenBSD.
+ */
+
+ err = iwx_drain_sta(sc, in, 0);
+done:
+ sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
+ return err;
+}
+
+#define IWX_POWER_KEEP_ALIVE_PERIOD_SEC 25
+
+static int
+iwx_beacon_filter_send_cmd(struct iwx_softc *sc,
+ struct iwx_beacon_filter_cmd *cmd)
+{
+ return iwx_send_cmd_pdu(sc, IWX_REPLY_BEACON_FILTERING_CMD,
+ 0, sizeof(struct iwx_beacon_filter_cmd), cmd);
+}
+
+static int
+iwx_update_beacon_abort(struct iwx_softc *sc, struct iwx_node *in, int enable)
+{
+ struct iwx_beacon_filter_cmd cmd = {
+ IWX_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = htole32(1),
+ .ba_enable_beacon_abort = htole32(enable),
+ };
+
+ if (!sc->sc_bf.bf_enabled)
+ return 0;
+
+ sc->sc_bf.ba_enabled = enable;
+ return iwx_beacon_filter_send_cmd(sc, &cmd);
+}
+
+static void
+iwx_power_build_cmd(struct iwx_softc *sc, struct iwx_node *in,
+ struct iwx_mac_power_cmd *cmd)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ int dtim_period, dtim_msec, keep_alive;
+
+ cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color));
+ if (vap->iv_dtim_period)
+ dtim_period = vap->iv_dtim_period;
+ else
+ dtim_period = 1;
+
+ /*
+ * Regardless of power management state the driver must set
+ * keep alive period. FW will use it for sending keep alive NDPs
+ * immediately after association. Check that keep alive period
+ * is at least 3 * DTIM.
+ */
+ dtim_msec = dtim_period * ni->ni_intval;
+ keep_alive = MAX(3 * dtim_msec, 1000 * IWX_POWER_KEEP_ALIVE_PERIOD_SEC);
+ keep_alive = roundup(keep_alive, 1000) / 1000;
+ cmd->keep_alive_seconds = htole16(keep_alive);
+
+ if (ic->ic_opmode != IEEE80211_M_MONITOR)
+ cmd->flags = htole16(IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+}
+
+static int
+iwx_power_mac_update_mode(struct iwx_softc *sc, struct iwx_node *in)
+{
+ int err;
+ int ba_enable;
+ struct iwx_mac_power_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ iwx_power_build_cmd(sc, in, &cmd);
+
+ err = iwx_send_cmd_pdu(sc, IWX_MAC_PM_POWER_TABLE, 0,
+ sizeof(cmd), &cmd);
+ if (err != 0)
+ return err;
+
+ ba_enable = !!(cmd.flags &
+ htole16(IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
+ return iwx_update_beacon_abort(sc, in, ba_enable);
+}
+
+static int
+iwx_power_update_device(struct iwx_softc *sc)
+{
+ struct iwx_device_power_cmd cmd = { };
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ if (ic->ic_opmode != IEEE80211_M_MONITOR)
+ cmd.flags = htole16(IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+ return iwx_send_cmd_pdu(sc,
+ IWX_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
+}
+#if 0
+static int
+iwx_enable_beacon_filter(struct iwx_softc *sc, struct iwx_node *in)
+{
+ struct iwx_beacon_filter_cmd cmd = {
+ IWX_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = htole32(1),
+ .ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled),
+ };
+ int err;
+
+ err = iwx_beacon_filter_send_cmd(sc, &cmd);
+ if (err == 0)
+ sc->sc_bf.bf_enabled = 1;
+
+ return err;
+}
+#endif
+static int
+iwx_disable_beacon_filter(struct iwx_softc *sc)
+{
+ struct iwx_beacon_filter_cmd cmd;
+ int err;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ err = iwx_beacon_filter_send_cmd(sc, &cmd);
+ if (err == 0)
+ sc->sc_bf.bf_enabled = 0;
+
+ return err;
+}
+
+static int
+iwx_add_sta_cmd(struct iwx_softc *sc, struct iwx_node *in, int update)
+{
+ struct iwx_add_sta_cmd add_sta_cmd;
+ int err, i;
+ uint32_t status, aggsize;
+ const uint32_t max_aggsize = (IWX_STA_FLG_MAX_AGG_SIZE_64K >>
+ IWX_STA_FLG_MAX_AGG_SIZE_SHIFT);
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211_htrateset *htrs = &ni->ni_htrates;
+
+ if (!update && (sc->sc_flags & IWX_FLAG_STA_ACTIVE))
+ panic("STA already added");
+
+ memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ add_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
+ add_sta_cmd.station_type = IWX_STA_GENERAL_PURPOSE;
+ } else {
+ add_sta_cmd.sta_id = IWX_STATION_ID;
+ add_sta_cmd.station_type = IWX_STA_LINK;
+ }
+ add_sta_cmd.mac_id_n_color
+ = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
+ if (!update) {
+ if (ic->ic_opmode == IEEE80211_M_MONITOR)
+ IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
+ etheranyaddr);
+ else
+ IEEE80211_ADDR_COPY(&add_sta_cmd.addr,
+ in->in_macaddr);
+ }
+ DPRINTF(("%s: add_sta_cmd.addr=%s\n", __func__,
+ ether_sprintf(add_sta_cmd.addr)));
+ add_sta_cmd.add_modify = update ? 1 : 0;
+ add_sta_cmd.station_flags_msk
+ |= htole32(IWX_STA_FLG_FAT_EN_MSK | IWX_STA_FLG_MIMO_EN_MSK);
+
+ if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
+ add_sta_cmd.station_flags_msk
+ |= htole32(IWX_STA_FLG_MAX_AGG_SIZE_MSK |
+ IWX_STA_FLG_AGG_MPDU_DENS_MSK);
+
+ if (iwx_mimo_enabled(sc)) {
+ if (ni->ni_flags & IEEE80211_NODE_VHT) {
+ add_sta_cmd.station_flags |=
+ htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
+ } else {
+ int hasmimo = 0;
+ for (i = 0; i < htrs->rs_nrates; i++) {
+ if (htrs->rs_rates[i] > 7) {
+ hasmimo = 1;
+ break;
+ }
+ }
+ if (hasmimo) {
+ add_sta_cmd.station_flags |=
+ htole32(IWX_STA_FLG_MIMO_EN_MIMO2);
+ }
+ }
+ }
+
+ if (ni->ni_flags & IEEE80211_NODE_HT &&
+ IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
+ add_sta_cmd.station_flags |= htole32(
+ IWX_STA_FLG_FAT_EN_40MHZ);
+ }
+
+
+ if (ni->ni_flags & IEEE80211_NODE_VHT) {
+ if (IEEE80211_IS_CHAN_VHT80(ni->ni_chan)) {
+ add_sta_cmd.station_flags |= htole32(
+ IWX_STA_FLG_FAT_EN_80MHZ);
+ }
+ // XXX-misha: TODO get real ampdu size
+ aggsize = max_aggsize;
+ } else {
+ aggsize = _IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
+ IEEE80211_HTCAP_MAXRXAMPDU);
+ }
+
+ if (aggsize > max_aggsize)
+ aggsize = max_aggsize;
+ add_sta_cmd.station_flags |= htole32((aggsize <<
+ IWX_STA_FLG_MAX_AGG_SIZE_SHIFT) &
+ IWX_STA_FLG_MAX_AGG_SIZE_MSK);
+
+ switch (_IEEE80211_MASKSHIFT(le16toh(ni->ni_htparam),
+ IEEE80211_HTCAP_MPDUDENSITY)) {
+ case IEEE80211_HTCAP_MPDUDENSITY_2:
+ add_sta_cmd.station_flags
+ |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_2US);
+ break;
+ case IEEE80211_HTCAP_MPDUDENSITY_4:
+ add_sta_cmd.station_flags
+ |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_4US);
+ break;
+ case IEEE80211_HTCAP_MPDUDENSITY_8:
+ add_sta_cmd.station_flags
+ |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_8US);
+ break;
+ case IEEE80211_HTCAP_MPDUDENSITY_16:
+ add_sta_cmd.station_flags
+ |= htole32(IWX_STA_FLG_AGG_MPDU_DENS_16US);
+ break;
+ default:
+ break;
+ }
+ }
+
+ status = IWX_ADD_STA_SUCCESS;
+ err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA, sizeof(add_sta_cmd),
+ &add_sta_cmd, &status);
+ if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
+ err = EIO;
+
+ return err;
+}
+
+static int
+iwx_rm_sta_cmd(struct iwx_softc *sc, struct iwx_node *in)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_rm_sta_cmd rm_sta_cmd;
+ int err;
+
+ if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
+ panic("sta already removed");
+
+ memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+ if (ic->ic_opmode == IEEE80211_M_MONITOR)
+ rm_sta_cmd.sta_id = IWX_MONITOR_STA_ID;
+ else
+ rm_sta_cmd.sta_id = IWX_STATION_ID;
+
+ err = iwx_send_cmd_pdu(sc, IWX_REMOVE_STA, 0, sizeof(rm_sta_cmd),
+ &rm_sta_cmd);
+
+ return err;
+}
+
+static int
+iwx_rm_sta(struct iwx_softc *sc, struct iwx_node *in)
+{
+ int err, i, cmd_ver;
+
+ err = iwx_flush_sta(sc, in);
+ if (err) {
+ printf("%s: could not flush Tx path (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ /*
+ * New SCD_QUEUE_CONFIG API requires explicit queue removal
+ * before a station gets removed.
+ */
+ cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_SCD_QUEUE_CONFIG_CMD);
+ if (cmd_ver != 0 && cmd_ver != IWX_FW_CMD_VER_UNKNOWN) {
+ err = iwx_disable_mgmt_queue(sc);
+ if (err)
+ return err;
+ for (i = IWX_FIRST_AGG_TX_QUEUE;
+ i < IWX_LAST_AGG_TX_QUEUE; i++) {
+ struct iwx_tx_ring *ring = &sc->txq[i];
+ if ((sc->qenablemsk & (1 << i)) == 0)
+ continue;
+ err = iwx_disable_txq(sc, IWX_STATION_ID,
+ ring->qid, ring->tid);
+ if (err) {
+ printf("%s: could not disable Tx queue %d "
+ "(error %d)\n", DEVNAME(sc), ring->qid,
+ err);
+ return err;
+ }
+ }
+ }
+
+ err = iwx_rm_sta_cmd(sc, in);
+ if (err) {
+ printf("%s: could not remove STA (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ in->in_flags = 0;
+
+ sc->sc_rx_ba_sessions = 0;
+ sc->ba_rx.start_tidmask = 0;
+ sc->ba_rx.stop_tidmask = 0;
+ memset(sc->aggqid, 0, sizeof(sc->aggqid));
+ sc->ba_tx.start_tidmask = 0;
+ sc->ba_tx.stop_tidmask = 0;
+ for (i = IWX_FIRST_AGG_TX_QUEUE; i < IWX_LAST_AGG_TX_QUEUE; i++)
+ sc->qenablemsk &= ~(1 << i);
+
+#if 0
+ for (i = 0; i < IEEE80211_NUM_TID; i++) {
+ struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[i];
+ if (ba->ba_state != IEEE80211_BA_AGREED)
+ continue;
+ ieee80211_delba_request(ic, ni, 0, 1, i);
+ }
+#endif
+ /* Clear ampdu rx state (GOS-1525) */
+ for (i = 0; i < IWX_MAX_TID_COUNT; i++) {
+ struct iwx_rx_ba *ba = &sc->ni_rx_ba[i];
+ ba->ba_flags = 0;
+ }
+
+ return 0;
+}
+
+static uint8_t
+iwx_umac_scan_fill_channels(struct iwx_softc *sc,
+ struct iwx_scan_channel_cfg_umac *chan, size_t chan_nitems,
+ int n_ssids, uint32_t channel_cfg_flags)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+ struct ieee80211_channel *c;
+ uint8_t nchan;
+ int j;
+
+ for (nchan = j = 0;
+ j < ss->ss_last &&
+ nchan < sc->sc_capa_n_scan_channels;
+ j++) {
+ uint8_t channel_num;
+
+ c = ss->ss_chans[j];
+ channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
+ if (isset(sc->sc_ucode_api,
+ IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER)) {
+ chan->v2.channel_num = channel_num;
+ if (IEEE80211_IS_CHAN_2GHZ(c))
+ chan->v2.band = IWX_PHY_BAND_24;
+ else
+ chan->v2.band = IWX_PHY_BAND_5;
+ chan->v2.iter_count = 1;
+ chan->v2.iter_interval = 0;
+ } else {
+ chan->v1.channel_num = channel_num;
+ chan->v1.iter_count = 1;
+ chan->v1.iter_interval = htole16(0);
+ }
+ chan->flags |= htole32(channel_cfg_flags);
+ chan++;
+ nchan++;
+ }
+
+ return nchan;
+}
+
+static int
+iwx_fill_probe_req(struct iwx_softc *sc, struct iwx_scan_probe_req *preq)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
+ struct ieee80211_rateset *rs;
+ size_t remain = sizeof(preq->buf);
+ uint8_t *frm, *pos;
+
+ memset(preq, 0, sizeof(*preq));
+
+ if (remain < sizeof(*wh) + 2)
+ return ENOBUFS;
+
+ /*
+ * Build a probe request frame. Most of the following code is a
+ * copy & paste of what is done in net80211.
+ */
+ wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
+ IEEE80211_FC0_SUBTYPE_PROBE_REQ;
+ wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr2, vap ? vap->iv_myaddr : ic->ic_macaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
+ *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
+ *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
+
+ frm = (uint8_t *)(wh + 1);
+ *frm++ = IEEE80211_ELEMID_SSID;
+ *frm++ = 0;
+ /* hardware inserts SSID */
+
+ /* Tell the firmware where the MAC header is. */
+ preq->mac_header.offset = 0;
+ preq->mac_header.len = htole16(frm - (uint8_t *)wh);
+ remain -= frm - (uint8_t *)wh;
+
+ /* Fill in 2GHz IEs and tell firmware where they are. */
+ rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
+ if (remain < 4 + rs->rs_nrates)
+ return ENOBUFS;
+ } else if (remain < 2 + rs->rs_nrates)
+ return ENOBUFS;
+ preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
+ pos = frm;
+ frm = ieee80211_add_rates(frm, rs);
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE)
+ frm = ieee80211_add_xrates(frm, rs);
+ remain -= frm - pos;
+
+ if (isset(sc->sc_enabled_capa,
+ IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
+ if (remain < 3)
+ return ENOBUFS;
+ *frm++ = IEEE80211_ELEMID_DSPARMS;
+ *frm++ = 1;
+ *frm++ = 0;
+ remain -= 3;
+ }
+ preq->band_data[0].len = htole16(frm - pos);
+
+ if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
+ /* Fill in 5GHz IEs. */
+ rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
+ if (remain < 4 + rs->rs_nrates)
+ return ENOBUFS;
+ } else if (remain < 2 + rs->rs_nrates)
+ return ENOBUFS;
+ preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
+ pos = frm;
+ frm = ieee80211_add_rates(frm, rs);
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE)
+ frm = ieee80211_add_xrates(frm, rs);
+ preq->band_data[1].len = htole16(frm - pos);
+ remain -= frm - pos;
+ if (vap->iv_vht_flags & IEEE80211_FVHT_VHT) {
+ if (remain < 14)
+ return ENOBUFS;
+ frm = ieee80211_add_vhtcap(frm, vap->iv_bss);
+ remain -= frm - pos;
+ preq->band_data[1].len = htole16(frm - pos);
+ }
+ }
+
+ /* Send 11n IEs on both 2GHz and 5GHz bands. */
+ preq->common_data.offset = htole16(frm - (uint8_t *)wh);
+ pos = frm;
+ if (vap->iv_flags_ht & IEEE80211_FHT_HT) {
+ if (remain < 28)
+ return ENOBUFS;
+ frm = ieee80211_add_htcap(frm, vap->iv_bss);
+ /* XXX add WME info? */
+ remain -= frm - pos;
+ }
+
+ preq->common_data.len = htole16(frm - pos);
+
+ return 0;
+}
+
+static int
+iwx_config_umac_scan_reduced(struct iwx_softc *sc)
+{
+ struct iwx_scan_config scan_cfg;
+ struct iwx_host_cmd hcmd = {
+ .id = iwx_cmd_id(IWX_SCAN_CFG_CMD, IWX_LONG_GROUP, 0),
+ .len[0] = sizeof(scan_cfg),
+ .data[0] = &scan_cfg,
+ .flags = 0,
+ };
+ int cmdver;
+
+ if (!isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG)) {
+ printf("%s: firmware does not support reduced scan config\n",
+ DEVNAME(sc));
+ return ENOTSUP;
+ }
+
+ memset(&scan_cfg, 0, sizeof(scan_cfg));
+
+ /*
+ * SCAN_CFG version >= 5 implies that the broadcast
+ * STA ID field is deprecated.
+ */
+ cmdver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP, IWX_SCAN_CFG_CMD);
+ if (cmdver == IWX_FW_CMD_VER_UNKNOWN || cmdver < 5)
+ scan_cfg.bcast_sta_id = 0xff;
+
+ scan_cfg.tx_chains = htole32(iwx_fw_valid_tx_ant(sc));
+ scan_cfg.rx_chains = htole32(iwx_fw_valid_rx_ant(sc));
+
+ return iwx_send_cmd(sc, &hcmd);
+}
+
+static uint16_t
+iwx_scan_umac_flags_v2(struct iwx_softc *sc, int bgscan)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+ uint16_t flags = 0;
+
+ if (ss->ss_nssid == 0) {
+ DPRINTF(("%s: Passive scan started\n", __func__));
+ flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
+ }
+
+ flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL;
+ flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE;
+ flags |= IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL;
+
+ return flags;
+}
+
+#define IWX_SCAN_DWELL_ACTIVE 10
+#define IWX_SCAN_DWELL_PASSIVE 110
+
+/* adaptive dwell max budget time [TU] for full scan */
+#define IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN 300
+/* adaptive dwell max budget time [TU] for directed scan */
+#define IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN 100
+/* adaptive dwell default high band APs number */
+#define IWX_SCAN_ADWELL_DEFAULT_HB_N_APS 8
+/* adaptive dwell default low band APs number */
+#define IWX_SCAN_ADWELL_DEFAULT_LB_N_APS 2
+/* adaptive dwell default APs number in social channels (1, 6, 11) */
+#define IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL 10
+/* adaptive dwell number of APs override for p2p friendly GO channels */
+#define IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY 10
+/* adaptive dwell number of APs override for social channels */
+#define IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS 2
+
+static void
+iwx_scan_umac_dwell_v10(struct iwx_softc *sc,
+ struct iwx_scan_general_params_v10 *general_params, int bgscan)
+{
+ uint32_t suspend_time, max_out_time;
+ uint8_t active_dwell, passive_dwell;
+
+ active_dwell = IWX_SCAN_DWELL_ACTIVE;
+ passive_dwell = IWX_SCAN_DWELL_PASSIVE;
+
+ general_params->adwell_default_social_chn =
+ IWX_SCAN_ADWELL_DEFAULT_N_APS_SOCIAL;
+ general_params->adwell_default_2g = IWX_SCAN_ADWELL_DEFAULT_LB_N_APS;
+ general_params->adwell_default_5g = IWX_SCAN_ADWELL_DEFAULT_HB_N_APS;
+
+ if (bgscan)
+ general_params->adwell_max_budget =
+ htole16(IWX_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
+ else
+ general_params->adwell_max_budget =
+ htole16(IWX_SCAN_ADWELL_MAX_BUDGET_FULL_SCAN);
+
+ general_params->scan_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
+ if (bgscan) {
+ max_out_time = htole32(120);
+ suspend_time = htole32(120);
+ } else {
+ max_out_time = htole32(0);
+ suspend_time = htole32(0);
+ }
+ general_params->max_out_of_time[IWX_SCAN_LB_LMAC_IDX] =
+ htole32(max_out_time);
+ general_params->suspend_time[IWX_SCAN_LB_LMAC_IDX] =
+ htole32(suspend_time);
+ general_params->max_out_of_time[IWX_SCAN_HB_LMAC_IDX] =
+ htole32(max_out_time);
+ general_params->suspend_time[IWX_SCAN_HB_LMAC_IDX] =
+ htole32(suspend_time);
+
+ general_params->active_dwell[IWX_SCAN_LB_LMAC_IDX] = active_dwell;
+ general_params->passive_dwell[IWX_SCAN_LB_LMAC_IDX] = passive_dwell;
+ general_params->active_dwell[IWX_SCAN_HB_LMAC_IDX] = active_dwell;
+ general_params->passive_dwell[IWX_SCAN_HB_LMAC_IDX] = passive_dwell;
+}
+
+static void
+iwx_scan_umac_fill_general_p_v10(struct iwx_softc *sc,
+ struct iwx_scan_general_params_v10 *gp, uint16_t gen_flags, int bgscan)
+{
+ iwx_scan_umac_dwell_v10(sc, gp, bgscan);
+
+ gp->flags = htole16(gen_flags);
+
+ if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1)
+ gp->num_of_fragments[IWX_SCAN_LB_LMAC_IDX] = 3;
+ if (gen_flags & IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
+ gp->num_of_fragments[IWX_SCAN_HB_LMAC_IDX] = 3;
+
+ gp->scan_start_mac_id = 0;
+}
+
+static void
+iwx_scan_umac_fill_ch_p_v6(struct iwx_softc *sc,
+ struct iwx_scan_channel_params_v6 *cp, uint32_t channel_cfg_flags,
+ int n_ssid)
+{
+ cp->flags = IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER;
+
+ cp->count = iwx_umac_scan_fill_channels(sc, cp->channel_config,
+ nitems(cp->channel_config), n_ssid, channel_cfg_flags);
+
+ cp->n_aps_override[0] = IWX_SCAN_ADWELL_N_APS_GO_FRIENDLY;
+ cp->n_aps_override[1] = IWX_SCAN_ADWELL_N_APS_SOCIAL_CHS;
+}
+
+static int
+iwx_umac_scan_v14(struct iwx_softc *sc, int bgscan)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_scan_state *ss = ic->ic_scan;
+ struct iwx_host_cmd hcmd = {
+ .id = iwx_cmd_id(IWX_SCAN_REQ_UMAC, IWX_LONG_GROUP, 0),
+ .len = { 0, },
+ .data = { NULL, },
+ .flags = 0,
+ };
+ struct iwx_scan_req_umac_v14 *cmd = &sc->sc_umac_v14_cmd;
+ struct iwx_scan_req_params_v14 *scan_p;
+ int err, async = bgscan, n_ssid = 0;
+ uint16_t gen_flags;
+ uint32_t bitmap_ssid = 0;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ bzero(cmd, sizeof(struct iwx_scan_req_umac_v14));
+
+ scan_p = &cmd->scan_params;
+
+ cmd->ooc_priority = htole32(IWX_SCAN_PRIORITY_EXT_6);
+ cmd->uid = htole32(0);
+
+ gen_flags = iwx_scan_umac_flags_v2(sc, bgscan);
+ iwx_scan_umac_fill_general_p_v10(sc, &scan_p->general_params,
+ gen_flags, bgscan);
+
+ scan_p->periodic_params.schedule[0].interval = htole16(0);
+ scan_p->periodic_params.schedule[0].iter_count = 1;
+
+ err = iwx_fill_probe_req(sc, &scan_p->probe_params.preq);
+ if (err) {
+ printf("%s: iwx_fill_probe_req failed (error %d)\n", __func__,
+ err);
+ return err;
+ }
+
+ for (int i=0; i < ss->ss_nssid; i++) {
+ scan_p->probe_params.direct_scan[i].id = IEEE80211_ELEMID_SSID;
+ scan_p->probe_params.direct_scan[i].len =
+ MIN(ss->ss_ssid[i].len, IEEE80211_NWID_LEN);
+ DPRINTF(("%s: Active scan started for ssid ", __func__));
+ memcpy(scan_p->probe_params.direct_scan[i].ssid,
+ ss->ss_ssid[i].ssid, ss->ss_ssid[i].len);
+ n_ssid++;
+ bitmap_ssid |= (1 << i);
+ }
+ DPRINTF(("%s: bitmap_ssid=0x%x\n", __func__, bitmap_ssid));
+
+ iwx_scan_umac_fill_ch_p_v6(sc, &scan_p->channel_params, bitmap_ssid,
+ n_ssid);
+
+ hcmd.len[0] = sizeof(*cmd);
+ hcmd.data[0] = (void *)cmd;
+ hcmd.flags |= async ? IWX_CMD_ASYNC : 0;
+
+ err = iwx_send_cmd(sc, &hcmd);
+ return err;
+}
+
+static void
+iwx_mcc_update(struct iwx_softc *sc, struct iwx_mcc_chub_notif *notif)
+{
+ char alpha2[3];
+
+ snprintf(alpha2, sizeof(alpha2), "%c%c",
+ (le16toh(notif->mcc) & 0xff00) >> 8, le16toh(notif->mcc) & 0xff);
+
+ IWX_DPRINTF(sc, IWX_DEBUG_FW, "%s: firmware has detected regulatory domain '%s' "
+ "(0x%x)\n", DEVNAME(sc), alpha2, le16toh(notif->mcc));
+
+ /* TODO: Schedule a task to send MCC_UPDATE_CMD? */
+}
+
+uint8_t
+iwx_ridx2rate(struct ieee80211_rateset *rs, int ridx)
+{
+ int i;
+ uint8_t rval;
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
+ if (rval == iwx_rates[ridx].rate)
+ return rs->rs_rates[i];
+ }
+
+ return 0;
+}
+
+static int
+iwx_rval2ridx(int rval)
+{
+ int ridx;
+
+ for (ridx = 0; ridx < nitems(iwx_rates); ridx++) {
+ if (iwx_rates[ridx].plcp == IWX_RATE_INVM_PLCP)
+ continue;
+ if (rval == iwx_rates[ridx].rate)
+ break;
+ }
+
+ return ridx;
+}
+
+static void
+iwx_ack_rates(struct iwx_softc *sc, struct iwx_node *in, int *cck_rates,
+ int *ofdm_rates)
+{
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ int lowest_present_ofdm = -1;
+ int lowest_present_cck = -1;
+ uint8_t cck = 0;
+ uint8_t ofdm = 0;
+ int i;
+
+ if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
+ IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
+ for (i = IWX_FIRST_CCK_RATE; i < IWX_FIRST_OFDM_RATE; i++) {
+ if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
+ continue;
+ cck |= (1 << i);
+ if (lowest_present_cck == -1 || lowest_present_cck > i)
+ lowest_present_cck = i;
+ }
+ }
+ for (i = IWX_FIRST_OFDM_RATE; i <= IWX_LAST_NON_HT_RATE; i++) {
+ if ((iwx_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
+ continue;
+ ofdm |= (1 << (i - IWX_FIRST_OFDM_RATE));
+ if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
+ lowest_present_ofdm = i;
+ }
+
+ /*
+ * Now we've got the basic rates as bitmaps in the ofdm and cck
+ * variables. This isn't sufficient though, as there might not
+ * be all the right rates in the bitmap. E.g. if the only basic
+ * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
+ * and 6 Mbps because the 802.11-2007 standard says in 9.6:
+ *
+ * [...] a STA responding to a received frame shall transmit
+ * its Control Response frame [...] at the highest rate in the
+ * BSSBasicRateSet parameter that is less than or equal to the
+ * rate of the immediately previous frame in the frame exchange
+ * sequence ([...]) and that is of the same modulation class
+ * ([...]) as the received frame. If no rate contained in the
+ * BSSBasicRateSet parameter meets these conditions, then the
+ * control frame sent in response to a received frame shall be
+ * transmitted at the highest mandatory rate of the PHY that is
+ * less than or equal to the rate of the received frame, and
+ * that is of the same modulation class as the received frame.
+ *
+ * As a consequence, we need to add all mandatory rates that are
+ * lower than all of the basic rates to these bitmaps.
+ */
+
+ if (IWX_RATE_24M_INDEX < lowest_present_ofdm)
+ ofdm |= IWX_RATE_BIT_MSK(24) >> IWX_FIRST_OFDM_RATE;
+ if (IWX_RATE_12M_INDEX < lowest_present_ofdm)
+ ofdm |= IWX_RATE_BIT_MSK(12) >> IWX_FIRST_OFDM_RATE;
+ /* 6M already there or needed so always add */
+ ofdm |= IWX_RATE_BIT_MSK(6) >> IWX_FIRST_OFDM_RATE;
+
+ /*
+ * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
+ * Note, however:
+ * - if no CCK rates are basic, it must be ERP since there must
+ * be some basic rates at all, so they're OFDM => ERP PHY
+ * (or we're in 5 GHz, and the cck bitmap will never be used)
+ * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
+ * - if 5.5M is basic, 1M and 2M are mandatory
+ * - if 2M is basic, 1M is mandatory
+ * - if 1M is basic, that's the only valid ACK rate.
+ * As a consequence, it's not as complicated as it sounds, just add
+ * any lower rates to the ACK rate bitmap.
+ */
+ if (IWX_RATE_11M_INDEX < lowest_present_cck)
+ cck |= IWX_RATE_BIT_MSK(11) >> IWX_FIRST_CCK_RATE;
+ if (IWX_RATE_5M_INDEX < lowest_present_cck)
+ cck |= IWX_RATE_BIT_MSK(5) >> IWX_FIRST_CCK_RATE;
+ if (IWX_RATE_2M_INDEX < lowest_present_cck)
+ cck |= IWX_RATE_BIT_MSK(2) >> IWX_FIRST_CCK_RATE;
+ /* 1M already there or needed so always add */
+ cck |= IWX_RATE_BIT_MSK(1) >> IWX_FIRST_CCK_RATE;
+
+ *cck_rates = cck;
+ *ofdm_rates = ofdm;
+}
+
+static void
+iwx_mac_ctxt_cmd_common(struct iwx_softc *sc, struct iwx_node *in,
+ struct iwx_mac_ctx_cmd *cmd, uint32_t action)
+{
+#define IWX_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_node *ni = vap->iv_bss;
+ int cck_ack_rates, ofdm_ack_rates;
+
+ cmd->id_and_color = htole32(IWX_FW_CMD_ID_AND_COLOR(in->in_id,
+ in->in_color));
+ cmd->action = htole32(action);
+
+ if (action == IWX_FW_CTXT_ACTION_REMOVE)
+ return;
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR)
+ cmd->mac_type = htole32(IWX_FW_MAC_TYPE_LISTENER);
+ else if (ic->ic_opmode == IEEE80211_M_STA)
+ cmd->mac_type = htole32(IWX_FW_MAC_TYPE_BSS_STA);
+ else
+ panic("unsupported operating mode %d", ic->ic_opmode);
+ cmd->tsf_id = htole32(IWX_TSF_ID_A);
+
+ IEEE80211_ADDR_COPY(cmd->node_addr, vap->iv_myaddr);
+ DPRINTF(("%s: cmd->node_addr=%s\n", __func__,
+ ether_sprintf(cmd->node_addr)));
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ IEEE80211_ADDR_COPY(cmd->bssid_addr, etherbroadcastaddr);
+ return;
+ }
+
+ IEEE80211_ADDR_COPY(cmd->bssid_addr, in->in_macaddr);
+ DPRINTF(("%s: cmd->bssid_addr=%s\n", __func__,
+ ether_sprintf(cmd->bssid_addr)));
+ iwx_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
+ cmd->cck_rates = htole32(cck_ack_rates);
+ cmd->ofdm_rates = htole32(ofdm_ack_rates);
+
+ cmd->cck_short_preamble
+ = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
+ ? IWX_MAC_FLG_SHORT_PREAMBLE : 0);
+ cmd->short_slot
+ = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
+ ? IWX_MAC_FLG_SHORT_SLOT : 0);
+
+ struct chanAccParams chp;
+ ieee80211_wme_vap_getparams(vap, &chp);
+
+ for (int i = 0; i < WME_NUM_AC; i++) {
+ int txf = iwx_ac_to_tx_fifo[i];
+ cmd->ac[txf].cw_min = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmin);
+ cmd->ac[txf].cw_max = IWX_EXP2(chp.cap_wmeParams[i].wmep_logcwmax);
+ cmd->ac[txf].aifsn = chp.cap_wmeParams[i].wmep_aifsn;
+ cmd->ac[txf].fifos_mask = (1 << txf);
+ cmd->ac[txf].edca_txop = chp.cap_wmeParams[i].wmep_txopLimit;
+
+ cmd->ac[txf].edca_txop = htole16(chp.cap_wmeParams[i].wmep_txopLimit * 32);
+ }
+
+ if (ni->ni_flags & IEEE80211_NODE_QOS) {
+ DPRINTF(("%s: === IEEE80211_NODE_QOS\n", __func__));
+ cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_UPDATE_EDCA);
+ }
+
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ switch (vap->iv_curhtprotmode) {
+ case IEEE80211_HTINFO_OPMODE_PURE:
+ break;
+ case IEEE80211_HTINFO_OPMODE_PROTOPT:
+ case IEEE80211_HTINFO_OPMODE_MIXED:
+ cmd->protection_flags |=
+ htole32(IWX_MAC_PROT_FLG_HT_PROT |
+ IWX_MAC_PROT_FLG_FAT_PROT);
+ break;
+ case IEEE80211_HTINFO_OPMODE_HT20PR:
+ if (in->in_phyctxt &&
+ (in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_ABOVE ||
+ in->in_phyctxt->sco == IEEE80211_HTINFO_2NDCHAN_BELOW)) {
+ cmd->protection_flags |=
+ htole32(IWX_MAC_PROT_FLG_HT_PROT |
+ IWX_MAC_PROT_FLG_FAT_PROT);
+ }
+ break;
+ default:
+ break;
+ }
+ cmd->qos_flags |= htole32(IWX_MAC_QOS_FLG_TGN);
+ DPRINTF(("%s: === IWX_MAC_QOS_FLG_TGN\n", __func__));
+ }
+
+ if (ic->ic_flags & IEEE80211_F_USEPROT)
+ cmd->protection_flags |= htole32(IWX_MAC_PROT_FLG_TGG_PROTECT);
+ cmd->filter_flags = htole32(IWX_MAC_FILTER_ACCEPT_GRP);
+#undef IWX_EXP2
+}
+
+static void
+iwx_mac_ctxt_cmd_fill_sta(struct iwx_softc *sc, struct iwx_node *in,
+ struct iwx_mac_data_sta *sta, int assoc)
+{
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ uint32_t dtim_off;
+ uint64_t tsf;
+ int dtim_period;
+
+ dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
+ tsf = le64toh(ni->ni_tstamp.tsf);
+ dtim_period = vap->iv_dtim_period;
+
+ sta->is_assoc = htole32(assoc);
+
+ if (assoc) {
+ sta->dtim_time = htole32(tsf + dtim_off);
+ sta->dtim_tsf = htole64(tsf + dtim_off);
+ // XXX: unset in iwm
+ sta->assoc_beacon_arrive_time = 0;
+ }
+ sta->bi = htole32(ni->ni_intval);
+ sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
+ sta->data_policy = htole32(0);
+ sta->listen_interval = htole32(10);
+ sta->assoc_id = htole32(ni->ni_associd);
+}
+
+static int
+iwx_mac_ctxt_cmd(struct iwx_softc *sc, struct iwx_node *in, uint32_t action,
+ int assoc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_node *ni = &in->in_ni;
+ struct iwx_mac_ctx_cmd cmd;
+ int active = (sc->sc_flags & IWX_FLAG_MAC_ACTIVE);
+
+ if (action == IWX_FW_CTXT_ACTION_ADD && active)
+ panic("MAC already added");
+ if (action == IWX_FW_CTXT_ACTION_REMOVE && !active)
+ panic("MAC already removed");
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ iwx_mac_ctxt_cmd_common(sc, in, &cmd, action);
+
+ if (action == IWX_FW_CTXT_ACTION_REMOVE) {
+ return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0,
+ sizeof(cmd), &cmd);
+ }
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_PROMISC |
+ IWX_MAC_FILTER_IN_CONTROL_AND_MGMT |
+ IWX_MAC_FILTER_ACCEPT_GRP |
+ IWX_MAC_FILTER_IN_BEACON |
+ IWX_MAC_FILTER_IN_PROBE_REQUEST |
+ IWX_MAC_FILTER_IN_CRC32);
+ // XXX: dtim period is in vap
+ } else if (!assoc || !ni->ni_associd /*|| !ni->ni_dtimperiod*/) {
+ /*
+ * Allow beacons to pass through as long as we are not
+ * associated or we do not have dtim period information.
+ */
+ cmd.filter_flags |= htole32(IWX_MAC_FILTER_IN_BEACON);
+ }
+ iwx_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
+ return iwx_send_cmd_pdu(sc, IWX_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
+}
+
+static int
+iwx_clear_statistics(struct iwx_softc *sc)
+{
+ struct iwx_statistics_cmd scmd = {
+ .flags = htole32(IWX_STATISTICS_FLG_CLEAR)
+ };
+ struct iwx_host_cmd cmd = {
+ .id = IWX_STATISTICS_CMD,
+ .len[0] = sizeof(scmd),
+ .data[0] = &scmd,
+ .flags = IWX_CMD_WANT_RESP,
+ .resp_pkt_len = sizeof(struct iwx_notif_statistics),
+ };
+ int err;
+
+ err = iwx_send_cmd(sc, &cmd);
+ if (err)
+ return err;
+
+ iwx_free_resp(sc, &cmd);
+ return 0;
+}
+
+static int
+iwx_scan(struct iwx_softc *sc)
+{
+ int err;
+ err = iwx_umac_scan_v14(sc, 0);
+
+ if (err) {
+ printf("%s: could not initiate scan\n", DEVNAME(sc));
+ return err;
+ }
+ return 0;
+}
+
+static int
+iwx_bgscan(struct ieee80211com *ic)
+{
+ struct iwx_softc *sc = ic->ic_softc;
+ int err;
+
+ err = iwx_umac_scan_v14(sc, 1);
+ if (err) {
+ printf("%s: could not initiate scan\n", DEVNAME(sc));
+ return err;
+ }
+ return 0;
+}
+
+static int
+iwx_enable_mgmt_queue(struct iwx_softc *sc)
+{
+ int err;
+
+ sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
+
+ /*
+ * Non-QoS frames use the "MGMT" TID and queue.
+ * Other TIDs and data queues are reserved for QoS data frames.
+ */
+ err = iwx_enable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
+ IWX_MGMT_TID, IWX_TX_RING_COUNT);
+ if (err) {
+ printf("%s: could not enable Tx queue %d (error %d)\n",
+ DEVNAME(sc), sc->first_data_qid, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+iwx_disable_mgmt_queue(struct iwx_softc *sc)
+{
+ int err, cmd_ver;
+
+ /* Explicit removal is only required with old SCD_QUEUE_CFG command. */
+ cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_SCD_QUEUE_CONFIG_CMD);
+ if (cmd_ver == 0 || cmd_ver == IWX_FW_CMD_VER_UNKNOWN)
+ return 0;
+
+ sc->first_data_qid = IWX_DQA_CMD_QUEUE + 1;
+
+ err = iwx_disable_txq(sc, IWX_STATION_ID, sc->first_data_qid,
+ IWX_MGMT_TID);
+ if (err) {
+ printf("%s: could not disable Tx queue %d (error %d)\n",
+ DEVNAME(sc), sc->first_data_qid, err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+iwx_rs_rval2idx(uint8_t rval)
+{
+ /* Firmware expects indices which match our 11g rate set. */
+ const struct ieee80211_rateset *rs = &ieee80211_std_rateset_11g;
+ int i;
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rval)
+ return i;
+ }
+
+ return -1;
+}
+
+static uint16_t
+iwx_rs_ht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int rsidx)
+{
+ uint16_t htrates = 0;
+ struct ieee80211_htrateset *htrs = &ni->ni_htrates;
+ int i;
+
+ if (rsidx == IEEE80211_HT_RATESET_SISO) {
+ for (i = 0; i < htrs->rs_nrates; i++) {
+ if (htrs->rs_rates[i] <= 7)
+ htrates |= (1 << htrs->rs_rates[i]);
+ }
+ } else if (rsidx == IEEE80211_HT_RATESET_MIMO2) {
+ for (i = 0; i < htrs->rs_nrates; i++) {
+ if (htrs->rs_rates[i] > 7 && htrs->rs_rates[i] <= 15)
+ htrates |= (1 << (htrs->rs_rates[i] - 8));
+ }
+ } else
+ panic(("iwx_rs_ht_rates"));
+
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE,
+ "%s:%d rsidx=%i htrates=0x%x\n", __func__, __LINE__, rsidx, htrates);
+
+ return htrates;
+}
+
+uint16_t
+iwx_rs_vht_rates(struct iwx_softc *sc, struct ieee80211_node *ni, int num_ss)
+{
+ uint16_t rx_mcs;
+ int max_mcs = -1;
+#define IEEE80211_VHT_MCS_FOR_SS_MASK(n) (0x3 << (2*((n)-1)))
+#define IEEE80211_VHT_MCS_FOR_SS_SHIFT(n) (2*((n)-1))
+ rx_mcs = (ni->ni_vht_mcsinfo.tx_mcs_map &
+ IEEE80211_VHT_MCS_FOR_SS_MASK(num_ss)) >>
+ IEEE80211_VHT_MCS_FOR_SS_SHIFT(num_ss);
+
+ switch (rx_mcs) {
+ case IEEE80211_VHT_MCS_NOT_SUPPORTED:
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_7:
+ max_mcs = 7;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_8:
+ max_mcs = 8;
+ break;
+ case IEEE80211_VHT_MCS_SUPPORT_0_9:
+ /* Disable VHT MCS 9 for 20MHz-only stations. */
+ if ((ni->ni_htcap & IEEE80211_HTCAP_CHWIDTH40) == 0)
+ max_mcs = 8;
+ else
+ max_mcs = 9;
+ break;
+ default:
+ /* Should not happen; Values above cover the possible range. */
+ panic("invalid VHT Rx MCS value %u", rx_mcs);
+ }
+
+ return ((1 << (max_mcs + 1)) - 1);
+}
+
+static int
+iwx_rs_init_v3(struct iwx_softc *sc, struct iwx_node *in)
+{
+#if 1
+ panic("iwx: Trying to init rate set on untested version");
+#else
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ struct iwx_tlc_config_cmd_v3 cfg_cmd;
+ uint32_t cmd_id;
+ int i;
+ size_t cmd_size = sizeof(cfg_cmd);
+
+ memset(&cfg_cmd, 0, sizeof(cfg_cmd));
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+ int idx = iwx_rs_rval2idx(rval);
+ if (idx == -1)
+ return EINVAL;
+ cfg_cmd.non_ht_rates |= (1 << idx);
+ }
+
+ if (ni->ni_flags & IEEE80211_NODE_VHT) {
+ cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
+ cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_vht_rates(sc, ni, 1));
+ cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_vht_rates(sc, ni, 2));
+ } else if (ni->ni_flags & IEEE80211_NODE_HT) {
+ cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
+ cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_ht_rates(sc, ni,
+ IEEE80211_HT_RATESET_SISO));
+ cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_ht_rates(sc, ni,
+ IEEE80211_HT_RATESET_MIMO2));
+ } else
+ cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
+
+ cfg_cmd.sta_id = IWX_STATION_ID;
+ if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
+ else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
+ in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
+ else
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
+ cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
+ if (ni->ni_flags & IEEE80211_NODE_VHT)
+ cfg_cmd.max_mpdu_len = htole16(3895);
+ else
+ cfg_cmd.max_mpdu_len = htole16(3839);
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ if (ieee80211_node_supports_ht_sgi20(ni)) {
+ cfg_cmd.sgi_ch_width_supp |= (1 <<
+ IWX_TLC_MNG_CH_WIDTH_20MHZ);
+ }
+ if (ieee80211_node_supports_ht_sgi40(ni)) {
+ cfg_cmd.sgi_ch_width_supp |= (1 <<
+ IWX_TLC_MNG_CH_WIDTH_40MHZ);
+ }
+ }
+ if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
+ ieee80211_node_supports_vht_sgi80(ni))
+ cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
+
+ cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
+ return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
+#endif
+}
+
+static int
+iwx_rs_init_v4(struct iwx_softc *sc, struct iwx_node *in)
+{
+ struct ieee80211_node *ni = &in->in_ni;
+ struct ieee80211_rateset *rs = &ni->ni_rates;
+ struct ieee80211_htrateset *htrs = &ni->ni_htrates;
+ struct iwx_tlc_config_cmd_v4 cfg_cmd;
+ uint32_t cmd_id;
+ int i;
+ int sgi80 = 0;
+ size_t cmd_size = sizeof(cfg_cmd);
+
+ memset(&cfg_cmd, 0, sizeof(cfg_cmd));
+
+ for (i = 0; i < rs->rs_nrates; i++) {
+ uint8_t rval = rs->rs_rates[i] & IEEE80211_RATE_VAL;
+ int idx = iwx_rs_rval2idx(rval);
+ if (idx == -1)
+ return EINVAL;
+ cfg_cmd.non_ht_rates |= (1 << idx);
+ }
+ for (i = 0; i < htrs->rs_nrates; i++) {
+ DPRINTF(("%s: htrate=%i\n", __func__, htrs->rs_rates[i]));
+ }
+
+ if (ni->ni_flags & IEEE80211_NODE_VHT) {
+ cfg_cmd.mode = IWX_TLC_MNG_MODE_VHT;
+ cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_vht_rates(sc, ni, 1));
+ cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_vht_rates(sc, ni, 2));
+
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
+ __func__, __LINE__,
+ cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
+ __func__, __LINE__,
+ cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
+ } else if (ni->ni_flags & IEEE80211_NODE_HT) {
+ cfg_cmd.mode = IWX_TLC_MNG_MODE_HT;
+ cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_ht_rates(sc, ni,
+ IEEE80211_HT_RATESET_SISO));
+ cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80] =
+ htole16(iwx_rs_ht_rates(sc, ni,
+ IEEE80211_HT_RATESET_MIMO2));
+
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d SISO=0x%x\n",
+ __func__, __LINE__,
+ cfg_cmd.ht_rates[IWX_TLC_NSS_1][IWX_TLC_MCS_PER_BW_80]);
+ IWX_DPRINTF(sc, IWX_DEBUG_TXRATE, "%s:%d MIMO2=0x%x\n",
+ __func__, __LINE__,
+ cfg_cmd.ht_rates[IWX_TLC_NSS_2][IWX_TLC_MCS_PER_BW_80]);
+ } else
+ cfg_cmd.mode = IWX_TLC_MNG_MODE_NON_HT;
+
+ cfg_cmd.sta_id = IWX_STATION_ID;
+#if 0
+ if (in->in_phyctxt->vht_chan_width == IEEE80211_VHTOP0_CHAN_WIDTH_80)
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
+ else if (in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCA ||
+ in->in_phyctxt->sco == IEEE80211_HTOP0_SCO_SCB)
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
+ else
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
+#endif
+ if (IEEE80211_IS_CHAN_VHT80(in->in_ni.ni_chan)) {
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_80MHZ;
+ } else if (IEEE80211_IS_CHAN_HT40(in->in_ni.ni_chan)) {
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_40MHZ;
+ } else {
+ cfg_cmd.max_ch_width = IWX_TLC_MNG_CH_WIDTH_20MHZ;
+ }
+
+ cfg_cmd.chains = IWX_TLC_MNG_CHAIN_A_MSK | IWX_TLC_MNG_CHAIN_B_MSK;
+ if (ni->ni_flags & IEEE80211_NODE_VHT)
+ cfg_cmd.max_mpdu_len = htole16(3895);
+ else
+ cfg_cmd.max_mpdu_len = htole16(3839);
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) {
+ cfg_cmd.sgi_ch_width_supp |= (1 <<
+ IWX_TLC_MNG_CH_WIDTH_20MHZ);
+ }
+ if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) {
+ cfg_cmd.sgi_ch_width_supp |= (1 <<
+ IWX_TLC_MNG_CH_WIDTH_40MHZ);
+ }
+ }
+ sgi80 = _IEEE80211_MASKSHIFT(ni->ni_vhtcap,
+ IEEE80211_VHTCAP_SHORT_GI_80);
+ if ((ni->ni_flags & IEEE80211_NODE_VHT) && sgi80) {
+ cfg_cmd.sgi_ch_width_supp |= (1 << IWX_TLC_MNG_CH_WIDTH_80MHZ);
+ }
+
+ cmd_id = iwx_cmd_id(IWX_TLC_MNG_CONFIG_CMD, IWX_DATA_PATH_GROUP, 0);
+ return iwx_send_cmd_pdu(sc, cmd_id, IWX_CMD_ASYNC, cmd_size, &cfg_cmd);
+}
+
+static int
+iwx_rs_init(struct iwx_softc *sc, struct iwx_node *in)
+{
+ int cmd_ver;
+
+ cmd_ver = iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_TLC_MNG_CONFIG_CMD);
+ if (cmd_ver == 4)
+ return iwx_rs_init_v4(sc, in);
+ else
+ return iwx_rs_init_v3(sc, in);
+}
+
+
+/**
+ * @brief Turn the given TX rate control notification into an ieee80211_node_txrate
+ *
+ * This populates the given txrate node with the TX rate control notification.
+ *
+ * @param sc driver softc
+ * @param notif firmware notification
+ * @param ni ieee80211_node update
+ * @returns true if updated, false if not
+ */
+static bool
+iwx_rs_update_node_txrate(struct iwx_softc *sc,
+ const struct iwx_tlc_update_notif *notif, struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ /* XXX TODO: create an inline function in if_iwxreg.h? */
+ static int cck_idx_to_rate[] = { 2, 4, 11, 22, 2, 2, 2, 2 };
+ static int ofdm_idx_to_rate[] = { 12, 18, 24, 36, 48, 72, 96, 108 };
+
+ uint32_t rate_n_flags;
+ uint32_t type;
+
+ /* Extract the rate and command version */
+ rate_n_flags = le32toh(notif->rate);
+
+ if (sc->sc_rate_n_flags_version != 2) {
+ net80211_ic_printf(ic,
+ "%s: unsupported rate_n_flags version (%d)\n",
+ __func__,
+ sc->sc_rate_n_flags_version);
+ return (false);
+ }
+
+ if (sc->sc_debug & IWX_DEBUG_TXRATE)
+ print_ratenflags(__func__, __LINE__,
+ rate_n_flags, sc->sc_rate_n_flags_version);
+
+ type = (rate_n_flags & IWX_RATE_MCS_MOD_TYPE_MSK);
+ switch (type) {
+ case IWX_RATE_MCS_CCK_MSK:
+ ieee80211_node_set_txrate_dot11rate(ni,
+ cck_idx_to_rate[rate_n_flags & IWX_RATE_LEGACY_RATE_MSK]);
+ return (true);
+ case IWX_RATE_MCS_LEGACY_OFDM_MSK:
+ ieee80211_node_set_txrate_dot11rate(ni,
+ ofdm_idx_to_rate[rate_n_flags & IWX_RATE_LEGACY_RATE_MSK]);
+ return (true);
+ case IWX_RATE_MCS_HT_MSK:
+ /*
+ * TODO: the current API doesn't include channel width
+ * and other flags, so we can't accurately store them yet!
+ *
+ * channel width: (flags & IWX_RATE_MCS_CHAN_WIDTH_MSK)
+ * >> IWX_RATE_MCS_CHAN_WIDTH_POS)
+ * LDPC: (flags & (1 << 16))
+ */
+ ieee80211_node_set_txrate_ht_mcsrate(ni,
+ IWX_RATE_HT_MCS_INDEX(rate_n_flags));
+ return (true);
+ case IWX_RATE_MCS_VHT_MSK:
+ /* TODO: same comment on channel width, etc above */
+ ieee80211_node_set_txrate_vht_rate(ni,
+ IWX_RATE_VHT_MCS_CODE(rate_n_flags),
+ IWX_RATE_VHT_MCS_NSS(rate_n_flags));
+ return (true);
+ default:
+ net80211_ic_printf(ic,
+ "%s: unsupported chosen rate type in "
+ "IWX_RATE_MCS_MOD_TYPE (%d)\n", __func__,
+ type >> IWX_RATE_MCS_MOD_TYPE_POS);
+ return (false);
+ }
+
+ /* Default: if we get here, we didn't successfully update anything */
+ return (false);
+}
+
+/**
+ * @brief Process a firmware rate control update and update net80211.
+ *
+ * Since firmware is doing rate control, this just needs to update
+ * the txrate in the ieee80211_node entry.
+ */
+static void
+iwx_rs_update(struct iwx_softc *sc, struct iwx_tlc_update_notif *notif)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ /* XXX TODO: get a node ref! */
+ struct ieee80211_node *ni = (void *)vap->iv_bss;
+
+ /*
+ * For now the iwx driver only supports a single vdev with a single
+ * node; it doesn't yet support ibss/hostap/multiple vdevs.
+ */
+ if (notif->sta_id != IWX_STATION_ID ||
+ (le32toh(notif->flags) & IWX_TLC_NOTIF_FLAG_RATE) == 0)
+ return;
+
+ iwx_rs_update_node_txrate(sc, notif, ni);
+}
+
+static int
+iwx_phy_send_rlc(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
+ uint8_t chains_static, uint8_t chains_dynamic)
+{
+ struct iwx_rlc_config_cmd cmd;
+ uint32_t cmd_id;
+ uint8_t active_cnt, idle_cnt;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ idle_cnt = chains_static;
+ active_cnt = chains_dynamic;
+
+ cmd.phy_id = htole32(phyctxt->id);
+ cmd.rlc.rx_chain_info = htole32(iwx_fw_valid_rx_ant(sc) <<
+ IWX_PHY_RX_CHAIN_VALID_POS);
+ cmd.rlc.rx_chain_info |= htole32(idle_cnt << IWX_PHY_RX_CHAIN_CNT_POS);
+ cmd.rlc.rx_chain_info |= htole32(active_cnt <<
+ IWX_PHY_RX_CHAIN_MIMO_CNT_POS);
+
+ cmd_id = iwx_cmd_id(IWX_RLC_CONFIG_CMD, IWX_DATA_PATH_GROUP, 2);
+ return iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
+}
+
+static int
+iwx_phy_ctxt_update(struct iwx_softc *sc, struct iwx_phy_ctxt *phyctxt,
+ struct ieee80211_channel *chan, uint8_t chains_static,
+ uint8_t chains_dynamic, uint32_t apply_time, uint8_t sco,
+ uint8_t vht_chan_width)
+{
+ uint16_t band_flags = (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
+ int err;
+
+ if (chan == IEEE80211_CHAN_ANYC) {
+ printf("%s: GOS-3833: IEEE80211_CHAN_ANYC triggered\n",
+ DEVNAME(sc));
+ return EIO;
+ }
+
+ if (isset(sc->sc_enabled_capa,
+ IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) &&
+ (phyctxt->channel->ic_flags & band_flags) !=
+ (chan->ic_flags & band_flags)) {
+ err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
+ chains_dynamic, IWX_FW_CTXT_ACTION_REMOVE, apply_time, sco,
+ vht_chan_width);
+ if (err) {
+ printf("%s: could not remove PHY context "
+ "(error %d)\n", DEVNAME(sc), err);
+ return err;
+ }
+ phyctxt->channel = chan;
+ err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
+ chains_dynamic, IWX_FW_CTXT_ACTION_ADD, apply_time, sco,
+ vht_chan_width);
+ if (err) {
+ printf("%s: could not add PHY context "
+ "(error %d)\n", DEVNAME(sc), err);
+ return err;
+ }
+ } else {
+ phyctxt->channel = chan;
+ err = iwx_phy_ctxt_cmd(sc, phyctxt, chains_static,
+ chains_dynamic, IWX_FW_CTXT_ACTION_MODIFY, apply_time, sco,
+ vht_chan_width);
+ if (err) {
+ printf("%s: could not update PHY context (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ }
+
+ phyctxt->sco = sco;
+ phyctxt->vht_chan_width = vht_chan_width;
+
+ DPRINTF(("%s: phyctxt->channel->ic_ieee=%d\n", __func__,
+ phyctxt->channel->ic_ieee));
+ DPRINTF(("%s: phyctxt->sco=%d\n", __func__, phyctxt->sco));
+ DPRINTF(("%s: phyctxt->vht_chan_width=%d\n", __func__,
+ phyctxt->vht_chan_width));
+
+ if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_RLC_CONFIG_CMD) == 2)
+ return iwx_phy_send_rlc(sc, phyctxt,
+ chains_static, chains_dynamic);
+
+ return 0;
+}
+
+static int
+iwx_auth(struct ieee80211vap *vap, struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in;
+ struct iwx_vap *ivp = IWX_VAP(vap);
+ struct ieee80211_node *ni;
+ uint32_t duration;
+ int generation = sc->sc_generation, err;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ ni = ieee80211_ref_node(vap->iv_bss);
+ in = IWX_NODE(ni);
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
+ ic->ic_bsschan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
+ IEEE80211_VHTOP0_CHAN_WIDTH_HT);
+ if (err)
+ return err;
+ } else {
+ err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
+ in->in_ni.ni_chan, 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
+ IEEE80211_VHTOP0_CHAN_WIDTH_HT);
+ if (err)
+ return err;
+ }
+ ivp->phy_ctxt = &sc->sc_phyctxt[0];
+ IEEE80211_ADDR_COPY(in->in_macaddr, in->in_ni.ni_macaddr);
+ DPRINTF(("%s: in-in_macaddr=%s\n", __func__,
+ ether_sprintf(in->in_macaddr)));
+
+ err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD, 0);
+ if (err) {
+ printf("%s: could not add MAC context (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ sc->sc_flags |= IWX_FLAG_MAC_ACTIVE;
+
+ err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_ADD);
+ if (err) {
+ printf("%s: could not add binding (error %d)\n",
+ DEVNAME(sc), err);
+ goto rm_mac_ctxt;
+ }
+ sc->sc_flags |= IWX_FLAG_BINDING_ACTIVE;
+
+ err = iwx_add_sta_cmd(sc, in, 0);
+ if (err) {
+ printf("%s: could not add sta (error %d)\n",
+ DEVNAME(sc), err);
+ goto rm_binding;
+ }
+ sc->sc_flags |= IWX_FLAG_STA_ACTIVE;
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR) {
+ err = iwx_enable_txq(sc, IWX_MONITOR_STA_ID,
+ IWX_DQA_INJECT_MONITOR_QUEUE, IWX_MGMT_TID,
+ IWX_TX_RING_COUNT);
+ if (err)
+ goto rm_sta;
+ return 0;
+ }
+
+ err = iwx_enable_mgmt_queue(sc);
+ if (err)
+ goto rm_sta;
+
+ err = iwx_clear_statistics(sc);
+ if (err)
+ goto rm_mgmt_queue;
+
+ /*
+ * Prevent the FW from wandering off channel during association
+ * by "protecting" the session with a time event.
+ */
+ if (in->in_ni.ni_intval)
+ duration = in->in_ni.ni_intval * 9;
+ else
+ duration = 900;
+ return iwx_schedule_session_protection(sc, in, duration);
+
+rm_mgmt_queue:
+ if (generation == sc->sc_generation)
+ iwx_disable_mgmt_queue(sc);
+rm_sta:
+ if (generation == sc->sc_generation) {
+ iwx_rm_sta_cmd(sc, in);
+ sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
+ }
+rm_binding:
+ if (generation == sc->sc_generation) {
+ iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
+ sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
+ }
+rm_mac_ctxt:
+ if (generation == sc->sc_generation) {
+ iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
+ sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
+ }
+ return err;
+}
+
+static int
+iwx_deauth(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_node *in = IWX_NODE(vap->iv_bss);
+ int err;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ iwx_unprotect_session(sc, in);
+
+ if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
+ err = iwx_rm_sta(sc, in);
+ if (err)
+ return err;
+ sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
+ }
+
+ if (sc->sc_flags & IWX_FLAG_BINDING_ACTIVE) {
+ err = iwx_binding_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE);
+ if (err) {
+ printf("%s: could not remove binding (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
+ }
+
+ DPRINTF(("%s: IWX_FLAG_MAC_ACTIVE=%d\n", __func__, sc->sc_flags &
+ IWX_FLAG_MAC_ACTIVE));
+ if (sc->sc_flags & IWX_FLAG_MAC_ACTIVE) {
+ err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_REMOVE, 0);
+ if (err) {
+ printf("%s: could not remove MAC context (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+ sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
+ }
+
+ /* Move unused PHY context to a default channel. */
+ //TODO uncommented in obsd, but stays on the way of auth->auth
+ err = iwx_phy_ctxt_update(sc, &sc->sc_phyctxt[0],
+ &ic->ic_channels[1], 1, 1, 0, IEEE80211_HTOP0_SCO_SCN,
+ IEEE80211_VHTOP0_CHAN_WIDTH_HT);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int
+iwx_run(struct ieee80211vap *vap, struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = IWX_NODE(vap->iv_bss);
+ struct ieee80211_node *ni = &in->in_ni;
+ struct iwx_vap *ivp = IWX_VAP(vap);
+ int err;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ uint8_t chains = iwx_mimo_enabled(sc) ? 2 : 1;
+ uint8_t sco, vht_chan_width;
+ sco = IEEE80211_HTOP0_SCO_SCN;
+ if ((ni->ni_flags & IEEE80211_NODE_VHT) &&
+ IEEE80211_IS_CHAN_VHT80(ni->ni_chan))
+ vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_80;
+ else
+ vht_chan_width = IEEE80211_VHTOP0_CHAN_WIDTH_HT;
+ err = iwx_phy_ctxt_update(sc, ivp->phy_ctxt,
+ ivp->phy_ctxt->channel, chains, chains,
+ 0, sco, vht_chan_width);
+ if (err) {
+ printf("%s: failed to update PHY\n", DEVNAME(sc));
+ return err;
+ }
+ }
+
+ /* Update STA again to apply HT and VHT settings. */
+ err = iwx_add_sta_cmd(sc, in, 1);
+ if (err) {
+ printf("%s: could not update STA (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ /* We have now been assigned an associd by the AP. */
+ err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 1);
+ if (err) {
+ printf("%s: failed to update MAC\n", DEVNAME(sc));
+ return err;
+ }
+
+ err = iwx_sf_config(sc, IWX_SF_FULL_ON);
+ if (err) {
+ printf("%s: could not set sf full on (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ err = iwx_allow_mcast(sc);
+ if (err) {
+ printf("%s: could not allow mcast (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ err = iwx_power_update_device(sc);
+ if (err) {
+ printf("%s: could not send power command (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+#ifdef notyet
+ /*
+ * Disabled for now. Default beacon filter settings
+ * prevent net80211 from getting ERP and HT protection
+ * updates from beacons.
+ */
+ err = iwx_enable_beacon_filter(sc, in);
+ if (err) {
+ printf("%s: could not enable beacon filter\n",
+ DEVNAME(sc));
+ return err;
+ }
+#endif
+ err = iwx_power_mac_update_mode(sc, in);
+ if (err) {
+ printf("%s: could not update MAC power (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ if (ic->ic_opmode == IEEE80211_M_MONITOR)
+ return 0;
+
+ err = iwx_rs_init(sc, in);
+ if (err) {
+ printf("%s: could not init rate scaling (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+iwx_run_stop(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_node *in = IWX_NODE(vap->iv_bss);
+ struct ieee80211_node *ni = &in->in_ni;
+ int err, i;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ err = iwx_flush_sta(sc, in);
+ if (err) {
+ printf("%s: could not flush Tx path (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ /*
+ * Stop Rx BA sessions now. We cannot rely on the BA task
+ * for this when moving out of RUN state since it runs in a
+ * separate thread.
+ * Note that in->in_ni (struct ieee80211_node) already represents
+ * our new access point in case we are roaming between APs.
+ * This means we cannot rely on struct ieee802111_node to tell
+ * us which BA sessions exist.
+ */
+ // TODO agg
+ for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
+ struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
+ if (rxba->baid == IWX_RX_REORDER_DATA_INVALID_BAID)
+ continue;
+ iwx_sta_rx_agg(sc, ni, rxba->tid, 0, 0, 0, 0);
+ }
+
+ err = iwx_sf_config(sc, IWX_SF_INIT_OFF);
+ if (err)
+ return err;
+
+ err = iwx_disable_beacon_filter(sc);
+ if (err) {
+ printf("%s: could not disable beacon filter (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ /* Mark station as disassociated. */
+ err = iwx_mac_ctxt_cmd(sc, in, IWX_FW_CTXT_ACTION_MODIFY, 0);
+ if (err) {
+ printf("%s: failed to update MAC\n", DEVNAME(sc));
+ return err;
+ }
+
+ return 0;
+}
+
+static struct ieee80211_node *
+iwx_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ return malloc(sizeof (struct iwx_node), M_80211_NODE,
+ M_NOWAIT | M_ZERO);
+}
+
+#if 0
+int
+iwx_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
+ struct ieee80211_key *k)
+{
+ struct iwx_softc *sc = ic->ic_softc;
+ struct iwx_node *in = (void *)ni;
+ struct iwx_setkey_task_arg *a;
+ int err;
+
+ if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
+ /* Fallback to software crypto for other ciphers. */
+ err = ieee80211_set_key(ic, ni, k);
+ if (!err && in != NULL && (k->k_flags & IEEE80211_KEY_GROUP))
+ in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
+ return err;
+ }
+
+ if (sc->setkey_nkeys >= nitems(sc->setkey_arg))
+ return ENOSPC;
+
+ a = &sc->setkey_arg[sc->setkey_cur];
+ a->sta_id = IWX_STATION_ID;
+ a->ni = ni;
+ a->k = k;
+ sc->setkey_cur = (sc->setkey_cur + 1) % nitems(sc->setkey_arg);
+ sc->setkey_nkeys++;
+ iwx_add_task(sc, systq, &sc->setkey_task);
+ return EBUSY;
+}
+
+int
+iwx_add_sta_key(struct iwx_softc *sc, int sta_id, struct ieee80211_node *ni,
+ struct ieee80211_key *k)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_node *in = (void *)ni;
+ struct iwx_add_sta_key_cmd cmd;
+ uint32_t status;
+ const int want_keymask = (IWX_NODE_FLAG_HAVE_PAIRWISE_KEY |
+ IWX_NODE_FLAG_HAVE_GROUP_KEY);
+ int err;
+
+ /*
+ * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
+ * Currently we only implement station mode where 'ni' is always
+ * ic->ic_bss so there is no need to validate arguments beyond this:
+ */
+ KASSERT(ni == ic->ic_bss);
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
+ IWX_STA_KEY_FLG_WEP_KEY_MAP |
+ ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
+ IWX_STA_KEY_FLG_KEYID_MSK));
+ if (k->k_flags & IEEE80211_KEY_GROUP) {
+ cmd.common.key_offset = 1;
+ cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
+ } else
+ cmd.common.key_offset = 0;
+
+ memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
+ cmd.common.sta_id = sta_id;
+
+ cmd.transmit_seq_cnt = htole64(k->k_tsc);
+
+ status = IWX_ADD_STA_SUCCESS;
+ err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
+ &status);
+ if (sc->sc_flags & IWX_FLAG_SHUTDOWN)
+ return ECANCELED;
+ if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
+ err = EIO;
+ if (err) {
+ IEEE80211_SEND_MGMT(ic, ni, IEEE80211_FC0_SUBTYPE_DEAUTH,
+ IEEE80211_REASON_AUTH_LEAVE);
+ ieee80211_new_state(ic, IEEE80211_S_SCAN, -1);
+ return err;
+ }
+
+ if (k->k_flags & IEEE80211_KEY_GROUP)
+ in->in_flags |= IWX_NODE_FLAG_HAVE_GROUP_KEY;
+ else
+ in->in_flags |= IWX_NODE_FLAG_HAVE_PAIRWISE_KEY;
+
+ if ((in->in_flags & want_keymask) == want_keymask) {
+ DPRINTF(("marking port %s valid\n",
+ ether_sprintf(ni->ni_macaddr)));
+ ni->ni_port_valid = 1;
+ ieee80211_set_link_state(ic, LINK_STATE_UP);
+ }
+
+ return 0;
+}
+
+void
+iwx_setkey_task(void *arg)
+{
+ struct iwx_softc *sc = arg;
+ struct iwx_setkey_task_arg *a;
+ int err = 0, s = splnet();
+
+ while (sc->setkey_nkeys > 0) {
+ if (err || (sc->sc_flags & IWX_FLAG_SHUTDOWN))
+ break;
+ a = &sc->setkey_arg[sc->setkey_tail];
+ err = iwx_add_sta_key(sc, a->sta_id, a->ni, a->k);
+ a->sta_id = 0;
+ a->ni = NULL;
+ a->k = NULL;
+ sc->setkey_tail = (sc->setkey_tail + 1) %
+ nitems(sc->setkey_arg);
+ sc->setkey_nkeys--;
+ }
+
+ refcnt_rele_wake(&sc->task_refs);
+ splx(s);
+}
+
+void
+iwx_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
+ struct ieee80211_key *k)
+{
+ struct iwx_softc *sc = ic->ic_softc;
+ struct iwx_add_sta_key_cmd cmd;
+
+ if (k->k_cipher != IEEE80211_CIPHER_CCMP) {
+ /* Fallback to software crypto for other ciphers. */
+ ieee80211_delete_key(ic, ni, k);
+ return;
+ }
+
+ if ((sc->sc_flags & IWX_FLAG_STA_ACTIVE) == 0)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.common.key_flags = htole16(IWX_STA_KEY_NOT_VALID |
+ IWX_STA_KEY_FLG_NO_ENC | IWX_STA_KEY_FLG_WEP_KEY_MAP |
+ ((k->k_id << IWX_STA_KEY_FLG_KEYID_POS) &
+ IWX_STA_KEY_FLG_KEYID_MSK));
+ memcpy(cmd.common.key, k->k_key, MIN(sizeof(cmd.common.key), k->k_len));
+ if (k->k_flags & IEEE80211_KEY_GROUP)
+ cmd.common.key_offset = 1;
+ else
+ cmd.common.key_offset = 0;
+ cmd.common.sta_id = IWX_STATION_ID;
+
+ iwx_send_cmd_pdu(sc, IWX_ADD_STA_KEY, IWX_CMD_ASYNC, sizeof(cmd), &cmd);
+}
+#endif
+
+static int
+iwx_newstate_sub(struct ieee80211vap *vap, enum ieee80211_state nstate)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct iwx_softc *sc = ic->ic_softc;
+ enum ieee80211_state ostate = vap->iv_state;
+ int err = 0;
+
+ IWX_LOCK(sc);
+
+ if (nstate <= ostate || nstate > IEEE80211_S_RUN) {
+ switch (ostate) {
+ case IEEE80211_S_RUN:
+ err = iwx_run_stop(sc);
+ if (err)
+ goto out;
+ /* FALLTHROUGH */
+ case IEEE80211_S_ASSOC:
+ case IEEE80211_S_AUTH:
+ if (nstate <= IEEE80211_S_AUTH) {
+ err = iwx_deauth(sc);
+ if (err)
+ goto out;
+ }
+ /* FALLTHROUGH */
+ case IEEE80211_S_SCAN:
+ case IEEE80211_S_INIT:
+ default:
+ break;
+ }
+//
+// /* Die now if iwx_stop() was called while we were sleeping. */
+// if (sc->sc_flags & IWX_FLAG_SHUTDOWN) {
+// refcnt_rele_wake(&sc->task_refs);
+// splx(s);
+// return;
+// }
+ }
+
+ switch (nstate) {
+ case IEEE80211_S_INIT:
+ break;
+
+ case IEEE80211_S_SCAN:
+ break;
+
+ case IEEE80211_S_AUTH:
+ err = iwx_auth(vap, sc);
+ break;
+
+ case IEEE80211_S_ASSOC:
+ break;
+
+ case IEEE80211_S_RUN:
+ err = iwx_run(vap, sc);
+ break;
+ default:
+ break;
+ }
+
+out:
+ IWX_UNLOCK(sc);
+
+ return (err);
+}
+
+static int
+iwx_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ struct iwx_vap *ivp = IWX_VAP(vap);
+ struct ieee80211com *ic = vap->iv_ic;
+ enum ieee80211_state ostate = vap->iv_state;
+ int err;
+
+ /*
+ * Prevent attempts to transition towards the same state, unless
+ * we are scanning in which case a SCAN -> SCAN transition
+ * triggers another scan iteration. And AUTH -> AUTH is needed
+ * to support band-steering.
+ */
+ if (ostate == nstate && nstate != IEEE80211_S_SCAN &&
+ nstate != IEEE80211_S_AUTH)
+ return 0;
+ IEEE80211_UNLOCK(ic);
+ err = iwx_newstate_sub(vap, nstate);
+ IEEE80211_LOCK(ic);
+ if (err == 0)
+ err = ivp->iv_newstate(vap, nstate, arg);
+
+ return (err);
+}
+
+static void
+iwx_endscan(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+ if ((sc->sc_flags & (IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN)) == 0)
+ return;
+
+ sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
+
+ ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
+ wakeup(&vap->iv_state); /* wake up iwx_newstate */
+}
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in default configuration
+ */
+static const uint32_t
+iwx_sf_full_timeout_def[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
+ {
+ htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
+ htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWX_SF_AGG_UNICAST_AGING_TIMER_DEF),
+ htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWX_SF_MCAST_AGING_TIMER_DEF),
+ htole32(IWX_SF_MCAST_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWX_SF_BA_AGING_TIMER_DEF),
+ htole32(IWX_SF_BA_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWX_SF_TX_RE_AGING_TIMER_DEF),
+ htole32(IWX_SF_TX_RE_IDLE_TIMER_DEF)
+ },
+};
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in single BSS MAC configuration.
+ */
+static const uint32_t
+iwx_sf_full_timeout[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES] = {
+ {
+ htole32(IWX_SF_SINGLE_UNICAST_AGING_TIMER),
+ htole32(IWX_SF_SINGLE_UNICAST_IDLE_TIMER)
+ },
+ {
+ htole32(IWX_SF_AGG_UNICAST_AGING_TIMER),
+ htole32(IWX_SF_AGG_UNICAST_IDLE_TIMER)
+ },
+ {
+ htole32(IWX_SF_MCAST_AGING_TIMER),
+ htole32(IWX_SF_MCAST_IDLE_TIMER)
+ },
+ {
+ htole32(IWX_SF_BA_AGING_TIMER),
+ htole32(IWX_SF_BA_IDLE_TIMER)
+ },
+ {
+ htole32(IWX_SF_TX_RE_AGING_TIMER),
+ htole32(IWX_SF_TX_RE_IDLE_TIMER)
+ },
+};
+
+static void
+iwx_fill_sf_command(struct iwx_softc *sc, struct iwx_sf_cfg_cmd *sf_cmd,
+ struct ieee80211_node *ni)
+{
+ int i, j, watermark;
+
+ sf_cmd->watermark[IWX_SF_LONG_DELAY_ON] = htole32(IWX_SF_W_MARK_SCAN);
+
+ /*
+ * If we are in association flow - check antenna configuration
+ * capabilities of the AP station, and choose the watermark accordingly.
+ */
+ if (ni) {
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ struct ieee80211_htrateset *htrs = &ni->ni_htrates;
+ int hasmimo = 0;
+ for (i = 0; i < htrs->rs_nrates; i++) {
+ if (htrs->rs_rates[i] > 7) {
+ hasmimo = 1;
+ break;
+ }
+ }
+ if (hasmimo)
+ watermark = IWX_SF_W_MARK_MIMO2;
+ else
+ watermark = IWX_SF_W_MARK_SISO;
+ } else {
+ watermark = IWX_SF_W_MARK_LEGACY;
+ }
+ /* default watermark value for unassociated mode. */
+ } else {
+ watermark = IWX_SF_W_MARK_MIMO2;
+ }
+ sf_cmd->watermark[IWX_SF_FULL_ON] = htole32(watermark);
+
+ for (i = 0; i < IWX_SF_NUM_SCENARIO; i++) {
+ for (j = 0; j < IWX_SF_NUM_TIMEOUT_TYPES; j++) {
+ sf_cmd->long_delay_timeouts[i][j] =
+ htole32(IWX_SF_LONG_DELAY_AGING_TIMER);
+ }
+ }
+
+ if (ni) {
+ memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout,
+ sizeof(iwx_sf_full_timeout));
+ } else {
+ memcpy(sf_cmd->full_on_timeouts, iwx_sf_full_timeout_def,
+ sizeof(iwx_sf_full_timeout_def));
+ }
+
+}
+
+static int
+iwx_sf_config(struct iwx_softc *sc, int new_state)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_node *ni = vap->iv_bss;
+ struct iwx_sf_cfg_cmd sf_cmd = {
+ .state = htole32(new_state),
+ };
+ int err = 0;
+
+ switch (new_state) {
+ case IWX_SF_UNINIT:
+ case IWX_SF_INIT_OFF:
+ iwx_fill_sf_command(sc, &sf_cmd, NULL);
+ break;
+ case IWX_SF_FULL_ON:
+ iwx_fill_sf_command(sc, &sf_cmd, ni);
+ break;
+ default:
+ return EINVAL;
+ }
+
+ err = iwx_send_cmd_pdu(sc, IWX_REPLY_SF_CFG_CMD, IWX_CMD_ASYNC,
+ sizeof(sf_cmd), &sf_cmd);
+ return err;
+}
+
+static int
+iwx_send_bt_init_conf(struct iwx_softc *sc)
+{
+ struct iwx_bt_coex_cmd bt_cmd;
+
+ bzero(&bt_cmd, sizeof(struct iwx_bt_coex_cmd));
+
+ bt_cmd.mode = htole32(IWX_BT_COEX_NW);
+ bt_cmd.enabled_modules |= BT_COEX_SYNC2SCO_ENABLED;
+ bt_cmd.enabled_modules |= BT_COEX_HIGH_BAND_RET;
+
+
+ return iwx_send_cmd_pdu(sc, IWX_BT_CONFIG, 0, sizeof(bt_cmd),
+ &bt_cmd);
+}
+
+static int
+iwx_send_soc_conf(struct iwx_softc *sc)
+{
+ struct iwx_soc_configuration_cmd cmd;
+ int err;
+ uint32_t cmd_id, flags = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ /*
+ * In VER_1 of this command, the discrete value is considered
+ * an integer; In VER_2, it's a bitmask. Since we have only 2
+ * values in VER_1, this is backwards-compatible with VER_2,
+ * as long as we don't set any other flag bits.
+ */
+ if (!sc->sc_integrated) { /* VER_1 */
+ flags = IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE;
+ } else { /* VER_2 */
+ uint8_t scan_cmd_ver;
+ if (sc->sc_ltr_delay != IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE)
+ flags |= (sc->sc_ltr_delay &
+ IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK);
+ scan_cmd_ver = iwx_lookup_cmd_ver(sc, IWX_LONG_GROUP,
+ IWX_SCAN_REQ_UMAC);
+ if (scan_cmd_ver != IWX_FW_CMD_VER_UNKNOWN &&
+ scan_cmd_ver >= 2 && sc->sc_low_latency_xtal)
+ flags |= IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY;
+ }
+ cmd.flags = htole32(flags);
+
+ cmd.latency = htole32(sc->sc_xtal_latency);
+
+ cmd_id = iwx_cmd_id(IWX_SOC_CONFIGURATION_CMD, IWX_SYSTEM_GROUP, 0);
+ err = iwx_send_cmd_pdu(sc, cmd_id, 0, sizeof(cmd), &cmd);
+ if (err)
+ printf("%s: failed to set soc latency: %d\n", DEVNAME(sc), err);
+ return err;
+}
+
+static int
+iwx_send_update_mcc_cmd(struct iwx_softc *sc, const char *alpha2)
+{
+ struct iwx_mcc_update_cmd mcc_cmd;
+ struct iwx_host_cmd hcmd = {
+ .id = IWX_MCC_UPDATE_CMD,
+ .flags = IWX_CMD_WANT_RESP,
+ .data = { &mcc_cmd },
+ };
+ struct iwx_rx_packet *pkt;
+ struct iwx_mcc_update_resp *resp;
+ size_t resp_len;
+ int err;
+
+ memset(&mcc_cmd, 0, sizeof(mcc_cmd));
+ mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
+ if (isset(sc->sc_ucode_api, IWX_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
+ isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC))
+ mcc_cmd.source_id = IWX_MCC_SOURCE_GET_CURRENT;
+ else
+ mcc_cmd.source_id = IWX_MCC_SOURCE_OLD_FW;
+
+ hcmd.len[0] = sizeof(struct iwx_mcc_update_cmd);
+ hcmd.resp_pkt_len = IWX_CMD_RESP_MAX;
+
+ err = iwx_send_cmd(sc, &hcmd);
+ if (err)
+ return err;
+
+ pkt = hcmd.resp_pkt;
+ if (!pkt || (pkt->hdr.flags & IWX_CMD_FAILED_MSK)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp_len = iwx_rx_packet_payload_len(pkt);
+ if (resp_len < sizeof(*resp)) {
+ err = EIO;
+ goto out;
+ }
+
+ resp = (void *)pkt->data;
+ if (resp_len != sizeof(*resp) +
+ resp->n_channels * sizeof(resp->channels[0])) {
+ err = EIO;
+ goto out;
+ }
+
+ DPRINTF(("MCC status=0x%x mcc=0x%x cap=0x%x time=0x%x geo_info=0x%x source_id=0x%d n_channels=%u\n",
+ resp->status, resp->mcc, resp->cap, resp->time, resp->geo_info, resp->source_id, resp->n_channels));
+
+out:
+ iwx_free_resp(sc, &hcmd);
+
+ return err;
+}
+
+static int
+iwx_send_temp_report_ths_cmd(struct iwx_softc *sc)
+{
+ struct iwx_temp_report_ths_cmd cmd;
+ int err;
+
+ /*
+ * In order to give responsibility for critical-temperature-kill
+ * and TX backoff to FW we need to send an empty temperature
+ * reporting command at init time.
+ */
+ memset(&cmd, 0, sizeof(cmd));
+
+ err = iwx_send_cmd_pdu(sc,
+ IWX_WIDE_ID(IWX_PHY_OPS_GROUP, IWX_TEMP_REPORTING_THRESHOLDS_CMD),
+ 0, sizeof(cmd), &cmd);
+ if (err)
+ printf("%s: TEMP_REPORT_THS_CMD command failed (error %d)\n",
+ DEVNAME(sc), err);
+
+ return err;
+}
+
+static int
+iwx_init_hw(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ int err = 0, i;
+
+ err = iwx_run_init_mvm_ucode(sc, 0);
+ if (err)
+ return err;
+
+ if (!iwx_nic_lock(sc))
+ return EBUSY;
+
+ err = iwx_send_tx_ant_cfg(sc, iwx_fw_valid_tx_ant(sc));
+ if (err) {
+ printf("%s: could not init tx ant config (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+
+ if (sc->sc_tx_with_siso_diversity) {
+ err = iwx_send_phy_cfg_cmd(sc);
+ if (err) {
+ printf("%s: could not send phy config (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+ }
+
+ err = iwx_send_bt_init_conf(sc);
+ if (err) {
+ printf("%s: could not init bt coex (error %d)\n",
+ DEVNAME(sc), err);
+ return err;
+ }
+
+ err = iwx_send_soc_conf(sc);
+ if (err) {
+ printf("%s: iwx_send_soc_conf failed\n", __func__);
+ return err;
+ }
+
+ if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_DQA_SUPPORT)) {
+ printf("%s: === IWX_UCODE_TLV_CAPA_DQA_SUPPORT\n", __func__);
+ err = iwx_send_dqa_cmd(sc);
+ if (err) {
+ printf("%s: IWX_UCODE_TLV_CAPA_DQA_SUPPORT "
+ "failed (error %d)\n", __func__, err);
+ return err;
+ }
+ }
+ // TODO phyctxt
+ for (i = 0; i < IWX_NUM_PHY_CTX; i++) {
+ /*
+ * The channel used here isn't relevant as it's
+ * going to be overwritten in the other flows.
+ * For now use the first channel we have.
+ */
+ sc->sc_phyctxt[i].id = i;
+ sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
+ err = iwx_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
+ IWX_FW_CTXT_ACTION_ADD, 0, 0, 0);
+ if (err) {
+ printf("%s: could not add phy context %d (error %d)\n",
+ DEVNAME(sc), i, err);
+ goto err;
+ }
+ if (iwx_lookup_cmd_ver(sc, IWX_DATA_PATH_GROUP,
+ IWX_RLC_CONFIG_CMD) == 2) {
+ err = iwx_phy_send_rlc(sc, &sc->sc_phyctxt[i], 1, 1);
+ if (err) {
+ printf("%s: could not configure RLC for PHY "
+ "%d (error %d)\n", DEVNAME(sc), i, err);
+ goto err;
+ }
+ }
+ }
+
+ err = iwx_config_ltr(sc);
+ if (err) {
+ printf("%s: PCIe LTR configuration failed (error %d)\n",
+ DEVNAME(sc), err);
+ }
+
+ if (isset(sc->sc_enabled_capa, IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW)) {
+ err = iwx_send_temp_report_ths_cmd(sc);
+ if (err) {
+ printf("%s: iwx_send_temp_report_ths_cmd failed\n",
+ __func__);
+ goto err;
+ }
+ }
+
+ err = iwx_power_update_device(sc);
+ if (err) {
+ printf("%s: could not send power command (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+
+ if (sc->sc_nvm.lar_enabled) {
+ err = iwx_send_update_mcc_cmd(sc, "ZZ");
+ if (err) {
+ printf("%s: could not init LAR (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+ }
+
+ err = iwx_config_umac_scan_reduced(sc);
+ if (err) {
+ printf("%s: could not configure scan (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+
+ err = iwx_disable_beacon_filter(sc);
+ if (err) {
+ printf("%s: could not disable beacon filter (error %d)\n",
+ DEVNAME(sc), err);
+ goto err;
+ }
+
+err:
+ iwx_nic_unlock(sc);
+ return err;
+}
+
+/* Allow multicast from our BSSID. */
+static int
+iwx_allow_mcast(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_node *in = IWX_NODE(vap->iv_bss);
+ struct iwx_mcast_filter_cmd *cmd;
+ size_t size;
+ int err;
+
+ size = roundup(sizeof(*cmd), 4);
+ cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (cmd == NULL)
+ return ENOMEM;
+ cmd->filter_own = 1;
+ cmd->port_id = 0;
+ cmd->count = 0;
+ cmd->pass_all = 1;
+ IEEE80211_ADDR_COPY(cmd->bssid, in->in_macaddr);
+
+ err = iwx_send_cmd_pdu(sc, IWX_MCAST_FILTER_CMD,
+ 0, size, cmd);
+ free(cmd, M_DEVBUF);
+ return err;
+}
+
+static int
+iwx_init(struct iwx_softc *sc)
+{
+ int err, generation;
+ generation = ++sc->sc_generation;
+ iwx_preinit(sc);
+
+ err = iwx_start_hw(sc);
+ if (err) {
+ printf("%s: iwx_start_hw failed\n", __func__);
+ return err;
+ }
+
+ err = iwx_init_hw(sc);
+ if (err) {
+ if (generation == sc->sc_generation)
+ iwx_stop_device(sc);
+ printf("%s: iwx_init_hw failed (error %d)\n", __func__, err);
+ return err;
+ }
+
+ sc->sc_flags |= IWX_FLAG_HW_INITED;
+ callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
+
+ return 0;
+}
+
+static void
+iwx_start(struct iwx_softc *sc)
+{
+ struct ieee80211_node *ni;
+ struct mbuf *m;
+
+ IWX_ASSERT_LOCKED(sc);
+
+ while (sc->qfullmsk == 0 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
+ ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
+ if (iwx_tx(sc, m, ni) != 0) {
+ if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
+ continue;
+ }
+ }
+}
+
+static void
+iwx_stop(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_vap *ivp = IWX_VAP(vap);
+
+ iwx_stop_device(sc);
+
+ /* Reset soft state. */
+ sc->sc_generation++;
+ ivp->phy_ctxt = NULL;
+
+ sc->sc_flags &= ~(IWX_FLAG_SCANNING | IWX_FLAG_BGSCAN);
+ sc->sc_flags &= ~IWX_FLAG_MAC_ACTIVE;
+ sc->sc_flags &= ~IWX_FLAG_BINDING_ACTIVE;
+ sc->sc_flags &= ~IWX_FLAG_STA_ACTIVE;
+ sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
+ sc->sc_flags &= ~IWX_FLAG_HW_ERR;
+ sc->sc_flags &= ~IWX_FLAG_SHUTDOWN;
+ sc->sc_flags &= ~IWX_FLAG_TXFLUSH;
+
+ sc->sc_rx_ba_sessions = 0;
+ sc->ba_rx.start_tidmask = 0;
+ sc->ba_rx.stop_tidmask = 0;
+ memset(sc->aggqid, 0, sizeof(sc->aggqid));
+ sc->ba_tx.start_tidmask = 0;
+ sc->ba_tx.stop_tidmask = 0;
+}
+
+static void
+iwx_watchdog(void *arg)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ int i;
+
+ /*
+ * We maintain a separate timer for each Tx queue because
+ * Tx aggregation queues can get "stuck" while other queues
+ * keep working. The Linux driver uses a similar workaround.
+ */
+ for (i = 0; i < nitems(sc->sc_tx_timer); i++) {
+ if (sc->sc_tx_timer[i] > 0) {
+ if (--sc->sc_tx_timer[i] == 0) {
+ printf("%s: device timeout\n", DEVNAME(sc));
+
+ iwx_nic_error(sc);
+ iwx_dump_driver_status(sc);
+ ieee80211_restart_all(ic);
+ return;
+ }
+ }
+ }
+ callout_reset(&sc->watchdog_to, hz, iwx_watchdog, sc);
+}
+
+/*
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with uint32_t-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwx_error_event_table {
+ uint32_t valid; /* (nonzero) valid, (0) log is empty */
+ uint32_t error_id; /* type of error */
+ uint32_t trm_hw_status0; /* TRM HW status */
+ uint32_t trm_hw_status1; /* TRM HW status */
+ uint32_t blink2; /* branch link */
+ uint32_t ilink1; /* interrupt link */
+ uint32_t ilink2; /* interrupt link */
+ uint32_t data1; /* error-specific data */
+ uint32_t data2; /* error-specific data */
+ uint32_t data3; /* error-specific data */
+ uint32_t bcon_time; /* beacon timer */
+ uint32_t tsf_low; /* network timestamp function timer */
+ uint32_t tsf_hi; /* network timestamp function timer */
+ uint32_t gp1; /* GP1 timer register */
+ uint32_t gp2; /* GP2 timer register */
+ uint32_t fw_rev_type; /* firmware revision type */
+ uint32_t major; /* uCode version major */
+ uint32_t minor; /* uCode version minor */
+ uint32_t hw_ver; /* HW Silicon version */
+ uint32_t brd_ver; /* HW board version */
+ uint32_t log_pc; /* log program counter */
+ uint32_t frame_ptr; /* frame pointer */
+ uint32_t stack_ptr; /* stack pointer */
+ uint32_t hcmd; /* last host command header */
+ uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
+ uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
+ uint32_t wait_event; /* wait event() caller address */
+ uint32_t l2p_control; /* L2pControlField */
+ uint32_t l2p_duration; /* L2pDurationField */
+ uint32_t l2p_mhvalid; /* L2pMhValidBits */
+ uint32_t l2p_addr_match; /* L2pAddrMatchStat */
+ uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ uint32_t u_timestamp; /* indicate when the date and time of the
+ * compilation */
+ uint32_t flow_handler; /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
+
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwx_umac_error_event_table {
+ uint32_t valid; /* (nonzero) valid, (0) log is empty */
+ uint32_t error_id; /* type of error */
+ uint32_t blink1; /* branch link */
+ uint32_t blink2; /* branch link */
+ uint32_t ilink1; /* interrupt link */
+ uint32_t ilink2; /* interrupt link */
+ uint32_t data1; /* error-specific data */
+ uint32_t data2; /* error-specific data */
+ uint32_t data3; /* error-specific data */
+ uint32_t umac_major;
+ uint32_t umac_minor;
+ uint32_t frame_pointer; /* core register 27*/
+ uint32_t stack_pointer; /* core register 28 */
+ uint32_t cmd_header; /* latest host cmd sent to UMAC */
+ uint32_t nic_isr_pref; /* ISR status register */
+} __packed;
+
+#define ERROR_START_OFFSET (1 * sizeof(uint32_t))
+#define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
+
+static void
+iwx_nic_umac_error(struct iwx_softc *sc)
+{
+ struct iwx_umac_error_event_table table;
+ uint32_t base;
+
+ base = sc->sc_uc.uc_umac_error_event_table;
+
+ if (base < 0x400000) {
+ printf("%s: Invalid error log pointer 0x%08x\n",
+ DEVNAME(sc), base);
+ return;
+ }
+
+ if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
+ printf("%s: reading errlog failed\n", DEVNAME(sc));
+ return;
+ }
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ printf("%s: Start UMAC Error Log Dump:\n", DEVNAME(sc));
+ printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
+ sc->sc_flags, table.valid);
+ }
+
+ printf("%s: 0x%08X | %s\n", DEVNAME(sc), table.error_id,
+ iwx_desc_lookup(table.error_id));
+ printf("%s: 0x%08X | umac branchlink1\n", DEVNAME(sc), table.blink1);
+ printf("%s: 0x%08X | umac branchlink2\n", DEVNAME(sc), table.blink2);
+ printf("%s: 0x%08X | umac interruptlink1\n", DEVNAME(sc), table.ilink1);
+ printf("%s: 0x%08X | umac interruptlink2\n", DEVNAME(sc), table.ilink2);
+ printf("%s: 0x%08X | umac data1\n", DEVNAME(sc), table.data1);
+ printf("%s: 0x%08X | umac data2\n", DEVNAME(sc), table.data2);
+ printf("%s: 0x%08X | umac data3\n", DEVNAME(sc), table.data3);
+ printf("%s: 0x%08X | umac major\n", DEVNAME(sc), table.umac_major);
+ printf("%s: 0x%08X | umac minor\n", DEVNAME(sc), table.umac_minor);
+ printf("%s: 0x%08X | frame pointer\n", DEVNAME(sc),
+ table.frame_pointer);
+ printf("%s: 0x%08X | stack pointer\n", DEVNAME(sc),
+ table.stack_pointer);
+ printf("%s: 0x%08X | last host cmd\n", DEVNAME(sc), table.cmd_header);
+ printf("%s: 0x%08X | isr status reg\n", DEVNAME(sc),
+ table.nic_isr_pref);
+}
+
+#define IWX_FW_SYSASSERT_CPU_MASK 0xf0000000
+static struct {
+ const char *name;
+ uint8_t num;
+} advanced_lookup[] = {
+ { "NMI_INTERRUPT_WDG", 0x34 },
+ { "SYSASSERT", 0x35 },
+ { "UCODE_VERSION_MISMATCH", 0x37 },
+ { "BAD_COMMAND", 0x38 },
+ { "BAD_COMMAND", 0x39 },
+ { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
+ { "FATAL_ERROR", 0x3D },
+ { "NMI_TRM_HW_ERR", 0x46 },
+ { "NMI_INTERRUPT_TRM", 0x4C },
+ { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
+ { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
+ { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
+ { "NMI_INTERRUPT_HOST", 0x66 },
+ { "NMI_INTERRUPT_LMAC_FATAL", 0x70 },
+ { "NMI_INTERRUPT_UMAC_FATAL", 0x71 },
+ { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 },
+ { "NMI_INTERRUPT_ACTION_PT", 0x7C },
+ { "NMI_INTERRUPT_UNKNOWN", 0x84 },
+ { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
+ { "ADVANCED_SYSASSERT", 0 },
+};
+
+static const char *
+iwx_desc_lookup(uint32_t num)
+{
+ int i;
+
+ for (i = 0; i < nitems(advanced_lookup) - 1; i++)
+ if (advanced_lookup[i].num ==
+ (num & ~IWX_FW_SYSASSERT_CPU_MASK))
+ return advanced_lookup[i].name;
+
+ /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
+ return advanced_lookup[i].name;
+}
+
+/*
+ * Support for dumping the error log seemed like a good idea ...
+ * but it's mostly hex junk and the only sensible thing is the
+ * hw/ucode revision (which we know anyway). Since it's here,
+ * I'll just leave it in, just in case e.g. the Intel guys want to
+ * help us decipher some "ADVANCED_SYSASSERT" later.
+ */
+static void
+iwx_nic_error(struct iwx_softc *sc)
+{
+ struct iwx_error_event_table table;
+ uint32_t base;
+
+ printf("%s: dumping device error log\n", DEVNAME(sc));
+ printf("%s: GOS-3758: 1\n", __func__);
+ base = sc->sc_uc.uc_lmac_error_event_table[0];
+ printf("%s: GOS-3758: 2\n", __func__);
+ if (base < 0x400000) {
+ printf("%s: Invalid error log pointer 0x%08x\n",
+ DEVNAME(sc), base);
+ return;
+ }
+
+ printf("%s: GOS-3758: 3\n", __func__);
+ if (iwx_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
+ printf("%s: reading errlog failed\n", DEVNAME(sc));
+ return;
+ }
+
+ printf("%s: GOS-3758: 4\n", __func__);
+ if (!table.valid) {
+ printf("%s: errlog not found, skipping\n", DEVNAME(sc));
+ return;
+ }
+
+ printf("%s: GOS-3758: 5\n", __func__);
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ printf("%s: Start Error Log Dump:\n", DEVNAME(sc));
+ printf("%s: Status: 0x%x, count: %d\n", DEVNAME(sc),
+ sc->sc_flags, table.valid);
+ }
+
+ printf("%s: GOS-3758: 6\n", __func__);
+ printf("%s: 0x%08X | %-28s\n", DEVNAME(sc), table.error_id,
+ iwx_desc_lookup(table.error_id));
+ printf("%s: %08X | trm_hw_status0\n", DEVNAME(sc),
+ table.trm_hw_status0);
+ printf("%s: %08X | trm_hw_status1\n", DEVNAME(sc),
+ table.trm_hw_status1);
+ printf("%s: %08X | branchlink2\n", DEVNAME(sc), table.blink2);
+ printf("%s: %08X | interruptlink1\n", DEVNAME(sc), table.ilink1);
+ printf("%s: %08X | interruptlink2\n", DEVNAME(sc), table.ilink2);
+ printf("%s: %08X | data1\n", DEVNAME(sc), table.data1);
+ printf("%s: %08X | data2\n", DEVNAME(sc), table.data2);
+ printf("%s: %08X | data3\n", DEVNAME(sc), table.data3);
+ printf("%s: %08X | beacon time\n", DEVNAME(sc), table.bcon_time);
+ printf("%s: %08X | tsf low\n", DEVNAME(sc), table.tsf_low);
+ printf("%s: %08X | tsf hi\n", DEVNAME(sc), table.tsf_hi);
+ printf("%s: %08X | time gp1\n", DEVNAME(sc), table.gp1);
+ printf("%s: %08X | time gp2\n", DEVNAME(sc), table.gp2);
+ printf("%s: %08X | uCode revision type\n", DEVNAME(sc),
+ table.fw_rev_type);
+ printf("%s: %08X | uCode version major\n", DEVNAME(sc),
+ table.major);
+ printf("%s: %08X | uCode version minor\n", DEVNAME(sc),
+ table.minor);
+ printf("%s: %08X | hw version\n", DEVNAME(sc), table.hw_ver);
+ printf("%s: %08X | board version\n", DEVNAME(sc), table.brd_ver);
+ printf("%s: %08X | hcmd\n", DEVNAME(sc), table.hcmd);
+ printf("%s: %08X | isr0\n", DEVNAME(sc), table.isr0);
+ printf("%s: %08X | isr1\n", DEVNAME(sc), table.isr1);
+ printf("%s: %08X | isr2\n", DEVNAME(sc), table.isr2);
+ printf("%s: %08X | isr3\n", DEVNAME(sc), table.isr3);
+ printf("%s: %08X | isr4\n", DEVNAME(sc), table.isr4);
+ printf("%s: %08X | last cmd Id\n", DEVNAME(sc), table.last_cmd_id);
+ printf("%s: %08X | wait_event\n", DEVNAME(sc), table.wait_event);
+ printf("%s: %08X | l2p_control\n", DEVNAME(sc), table.l2p_control);
+ printf("%s: %08X | l2p_duration\n", DEVNAME(sc), table.l2p_duration);
+ printf("%s: %08X | l2p_mhvalid\n", DEVNAME(sc), table.l2p_mhvalid);
+ printf("%s: %08X | l2p_addr_match\n", DEVNAME(sc), table.l2p_addr_match);
+ printf("%s: %08X | lmpm_pmg_sel\n", DEVNAME(sc), table.lmpm_pmg_sel);
+ printf("%s: %08X | timestamp\n", DEVNAME(sc), table.u_timestamp);
+ printf("%s: %08X | flow_handler\n", DEVNAME(sc), table.flow_handler);
+
+ if (sc->sc_uc.uc_umac_error_event_table)
+ iwx_nic_umac_error(sc);
+}
+
+static void
+iwx_dump_driver_status(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ enum ieee80211_state state = vap->iv_state;
+ int i;
+
+ printf("driver status:\n");
+ for (i = 0; i < nitems(sc->txq); i++) {
+ struct iwx_tx_ring *ring = &sc->txq[i];
+ printf(" tx ring %2d: qid=%-2d cur=%-3d "
+ "cur_hw=%-3d queued=%-3d\n",
+ i, ring->qid, ring->cur, ring->cur_hw,
+ ring->queued);
+ }
+ printf(" rx ring: cur=%d\n", sc->rxq.cur);
+ printf(" 802.11 state %s\n", ieee80211_state_name[state]);
+}
+
+#define SYNC_RESP_STRUCT(_var_, _pkt_) \
+do { \
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); \
+ _var_ = (void *)((_pkt_)+1); \
+} while (/*CONSTCOND*/0)
+
+static int
+iwx_rx_pkt_valid(struct iwx_rx_packet *pkt)
+{
+ int qid, idx, code;
+
+ qid = pkt->hdr.qid & ~0x80;
+ idx = pkt->hdr.idx;
+ code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
+
+ return (!(qid == 0 && idx == 0 && code == 0) &&
+ pkt->len_n_flags != htole32(IWX_FH_RSCSR_FRAME_INVALID));
+}
+
+static void
+iwx_rx_pkt(struct iwx_softc *sc, struct iwx_rx_data *data, struct mbuf *ml)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct iwx_rx_packet *pkt, *nextpkt;
+ uint32_t offset = 0, nextoff = 0, nmpdu = 0, len;
+ struct mbuf *m0, *m;
+ const size_t minsz = sizeof(pkt->len_n_flags) + sizeof(pkt->hdr);
+ int qid, idx, code, handled = 1;
+
+ m0 = data->m;
+ while (m0 && offset + minsz < IWX_RBUF_SIZE) {
+ pkt = (struct iwx_rx_packet *)(m0->m_data + offset);
+ qid = pkt->hdr.qid;
+ idx = pkt->hdr.idx;
+ code = IWX_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
+
+ if (!iwx_rx_pkt_valid(pkt))
+ break;
+
+ /*
+ * XXX Intel inside (tm)
+ * Any commands in the LONG_GROUP could actually be in the
+ * LEGACY group. Firmware API versions >= 50 reject commands
+ * in group 0, forcing us to use this hack.
+ */
+ if (iwx_cmd_groupid(code) == IWX_LONG_GROUP) {
+ struct iwx_tx_ring *ring = &sc->txq[qid];
+ struct iwx_tx_data *txdata = &ring->data[idx];
+ if (txdata->flags & IWX_TXDATA_FLAG_CMD_IS_NARROW)
+ code = iwx_cmd_opcode(code);
+ }
+
+ len = sizeof(pkt->len_n_flags) + iwx_rx_packet_len(pkt);
+ if (len < minsz || len > (IWX_RBUF_SIZE - offset))
+ break;
+
+ // TODO ???
+ if (code == IWX_REPLY_RX_MPDU_CMD && ++nmpdu == 1) {
+ /* Take mbuf m0 off the RX ring. */
+ if (iwx_rx_addbuf(sc, IWX_RBUF_SIZE, sc->rxq.cur)) {
+ break;
+ }
+ KASSERT((data->m != m0), ("%s: data->m != m0", __func__));
+ }
+
+ switch (code) {
+ case IWX_REPLY_RX_PHY_CMD:
+ /* XXX-THJ: I've not managed to hit this path in testing */
+ iwx_rx_rx_phy_cmd(sc, pkt, data);
+ break;
+
+ case IWX_REPLY_RX_MPDU_CMD: {
+ size_t maxlen = IWX_RBUF_SIZE - offset - minsz;
+ nextoff = offset +
+ roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
+ nextpkt = (struct iwx_rx_packet *)
+ (m0->m_data + nextoff);
+ /* AX210 devices ship only one packet per Rx buffer. */
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210 ||
+ nextoff + minsz >= IWX_RBUF_SIZE ||
+ !iwx_rx_pkt_valid(nextpkt)) {
+ /* No need to copy last frame in buffer. */
+ if (offset > 0)
+ m_adj(m0, offset);
+ iwx_rx_mpdu_mq(sc, m0, pkt->data, maxlen);
+ m0 = NULL; /* stack owns m0 now; abort loop */
+ } else {
+ /*
+ * Create an mbuf which points to the current
+ * packet. Always copy from offset zero to
+ * preserve m_pkthdr.
+ */
+ m = m_copym(m0, 0, M_COPYALL, M_NOWAIT);
+ if (m == NULL) {
+ m_freem(m0);
+ m0 = NULL;
+ break;
+ }
+ m_adj(m, offset);
+ iwx_rx_mpdu_mq(sc, m, pkt->data, maxlen);
+ }
+ break;
+ }
+
+// case IWX_BAR_FRAME_RELEASE:
+// iwx_rx_bar_frame_release(sc, pkt, ml);
+// break;
+//
+ case IWX_TX_CMD:
+ iwx_rx_tx_cmd(sc, pkt, data);
+ break;
+
+ case IWX_BA_NOTIF:
+ iwx_rx_compressed_ba(sc, pkt);
+ break;
+
+ case IWX_MISSED_BEACONS_NOTIFICATION:
+ IWX_DPRINTF(sc, IWX_DEBUG_BEACON,
+ "%s: IWX_MISSED_BEACONS_NOTIFICATION\n",
+ __func__);
+ iwx_rx_bmiss(sc, pkt, data);
+ break;
+
+ case IWX_MFUART_LOAD_NOTIFICATION:
+ break;
+
+ case IWX_ALIVE: {
+ struct iwx_alive_resp_v4 *resp4;
+ struct iwx_alive_resp_v5 *resp5;
+ struct iwx_alive_resp_v6 *resp6;
+
+ DPRINTF(("%s: firmware alive\n", __func__));
+ sc->sc_uc.uc_ok = 0;
+
+ /*
+ * For v5 and above, we can check the version, for older
+ * versions we need to check the size.
+ */
+ if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
+ IWX_ALIVE) == 6) {
+ SYNC_RESP_STRUCT(resp6, pkt);
+ if (iwx_rx_packet_payload_len(pkt) !=
+ sizeof(*resp6)) {
+ sc->sc_uc.uc_intr = 1;
+ wakeup(&sc->sc_uc);
+ break;
+ }
+ sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
+ resp6->lmac_data[0].dbg_ptrs.error_event_table_ptr);
+ sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
+ resp6->lmac_data[1].dbg_ptrs.error_event_table_ptr);
+ sc->sc_uc.uc_log_event_table = le32toh(
+ resp6->lmac_data[0].dbg_ptrs.log_event_table_ptr);
+ sc->sc_uc.uc_umac_error_event_table = le32toh(
+ resp6->umac_data.dbg_ptrs.error_info_addr);
+ sc->sc_sku_id[0] =
+ le32toh(resp6->sku_id.data[0]);
+ sc->sc_sku_id[1] =
+ le32toh(resp6->sku_id.data[1]);
+ sc->sc_sku_id[2] =
+ le32toh(resp6->sku_id.data[2]);
+ if (resp6->status == IWX_ALIVE_STATUS_OK) {
+ sc->sc_uc.uc_ok = 1;
+ }
+ } else if (iwx_lookup_notif_ver(sc, IWX_LEGACY_GROUP,
+ IWX_ALIVE) == 5) {
+ SYNC_RESP_STRUCT(resp5, pkt);
+ if (iwx_rx_packet_payload_len(pkt) !=
+ sizeof(*resp5)) {
+ sc->sc_uc.uc_intr = 1;
+ wakeup(&sc->sc_uc);
+ break;
+ }
+ sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
+ resp5->lmac_data[0].dbg_ptrs.error_event_table_ptr);
+ sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
+ resp5->lmac_data[1].dbg_ptrs.error_event_table_ptr);
+ sc->sc_uc.uc_log_event_table = le32toh(
+ resp5->lmac_data[0].dbg_ptrs.log_event_table_ptr);
+ sc->sc_uc.uc_umac_error_event_table = le32toh(
+ resp5->umac_data.dbg_ptrs.error_info_addr);
+ sc->sc_sku_id[0] =
+ le32toh(resp5->sku_id.data[0]);
+ sc->sc_sku_id[1] =
+ le32toh(resp5->sku_id.data[1]);
+ sc->sc_sku_id[2] =
+ le32toh(resp5->sku_id.data[2]);
+ if (resp5->status == IWX_ALIVE_STATUS_OK)
+ sc->sc_uc.uc_ok = 1;
+ } else if (iwx_rx_packet_payload_len(pkt) == sizeof(*resp4)) {
+ SYNC_RESP_STRUCT(resp4, pkt);
+ sc->sc_uc.uc_lmac_error_event_table[0] = le32toh(
+ resp4->lmac_data[0].dbg_ptrs.error_event_table_ptr);
+ sc->sc_uc.uc_lmac_error_event_table[1] = le32toh(
+ resp4->lmac_data[1].dbg_ptrs.error_event_table_ptr);
+ sc->sc_uc.uc_log_event_table = le32toh(
+ resp4->lmac_data[0].dbg_ptrs.log_event_table_ptr);
+ sc->sc_uc.uc_umac_error_event_table = le32toh(
+ resp4->umac_data.dbg_ptrs.error_info_addr);
+ if (resp4->status == IWX_ALIVE_STATUS_OK)
+ sc->sc_uc.uc_ok = 1;
+ } else
+ printf("unknown payload version");
+
+ sc->sc_uc.uc_intr = 1;
+ wakeup(&sc->sc_uc);
+ break;
+ }
+
+ case IWX_STATISTICS_NOTIFICATION: {
+ struct iwx_notif_statistics *stats;
+ SYNC_RESP_STRUCT(stats, pkt);
+ memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
+ sc->sc_noise = iwx_get_noise(&stats->rx.general);
+ break;
+ }
+
+ case IWX_DTS_MEASUREMENT_NOTIFICATION:
+ case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
+ IWX_DTS_MEASUREMENT_NOTIF_WIDE):
+ case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
+ IWX_TEMP_REPORTING_THRESHOLDS_CMD):
+ break;
+
+ case IWX_WIDE_ID(IWX_PHY_OPS_GROUP,
+ IWX_CT_KILL_NOTIFICATION): {
+ struct iwx_ct_kill_notif *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+ printf("%s: device at critical temperature (%u degC), "
+ "stopping device\n",
+ DEVNAME(sc), le16toh(notif->temperature));
+ sc->sc_flags |= IWX_FLAG_HW_ERR;
+ ieee80211_restart_all(ic);
+ break;
+ }
+
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
+ IWX_SCD_QUEUE_CONFIG_CMD):
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
+ IWX_RX_BAID_ALLOCATION_CONFIG_CMD):
+ case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
+ IWX_SESSION_PROTECTION_CMD):
+ case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
+ IWX_NVM_GET_INFO):
+ case IWX_ADD_STA_KEY:
+ case IWX_PHY_CONFIGURATION_CMD:
+ case IWX_TX_ANT_CONFIGURATION_CMD:
+ case IWX_ADD_STA:
+ case IWX_MAC_CONTEXT_CMD:
+ case IWX_REPLY_SF_CFG_CMD:
+ case IWX_POWER_TABLE_CMD:
+ case IWX_LTR_CONFIG:
+ case IWX_PHY_CONTEXT_CMD:
+ case IWX_BINDING_CONTEXT_CMD:
+ case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_CFG_CMD):
+ case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_REQ_UMAC):
+ case IWX_WIDE_ID(IWX_LONG_GROUP, IWX_SCAN_ABORT_UMAC):
+ case IWX_REPLY_BEACON_FILTERING_CMD:
+ case IWX_MAC_PM_POWER_TABLE:
+ case IWX_TIME_QUOTA_CMD:
+ case IWX_REMOVE_STA:
+ case IWX_TXPATH_FLUSH:
+ case IWX_BT_CONFIG:
+ case IWX_MCC_UPDATE_CMD:
+ case IWX_TIME_EVENT_CMD:
+ case IWX_STATISTICS_CMD:
+ case IWX_SCD_QUEUE_CFG: {
+ size_t pkt_len;
+
+ if (sc->sc_cmd_resp_pkt[idx] == NULL)
+ break;
+
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
+
+ pkt_len = sizeof(pkt->len_n_flags) +
+ iwx_rx_packet_len(pkt);
+
+ if ((pkt->hdr.flags & IWX_CMD_FAILED_MSK) ||
+ pkt_len < sizeof(*pkt) ||
+ pkt_len > sc->sc_cmd_resp_len[idx]) {
+ free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF);
+ sc->sc_cmd_resp_pkt[idx] = NULL;
+ break;
+ }
+
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
+ memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
+ break;
+ }
+
+ case IWX_INIT_COMPLETE_NOTIF:
+ sc->sc_init_complete |= IWX_INIT_COMPLETE;
+ wakeup(&sc->sc_init_complete);
+ break;
+
+ case IWX_SCAN_COMPLETE_UMAC: {
+ DPRINTF(("%s: >>> IWX_SCAN_COMPLETE_UMAC\n", __func__));
+ struct iwx_umac_scan_complete *notif __attribute__((unused));
+ SYNC_RESP_STRUCT(notif, pkt);
+ DPRINTF(("%s: scan complete notif->status=%d\n", __func__,
+ notif->status));
+ ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
+ iwx_endscan(sc);
+ break;
+ }
+
+ case IWX_SCAN_ITERATION_COMPLETE_UMAC: {
+ DPRINTF(("%s: >>> IWX_SCAN_ITERATION_COMPLETE_UMAC\n",
+ __func__));
+ struct iwx_umac_scan_iter_complete_notif *notif __attribute__((unused));
+ SYNC_RESP_STRUCT(notif, pkt);
+ DPRINTF(("%s: iter scan complete notif->status=%d\n", __func__,
+ notif->status));
+ iwx_endscan(sc);
+ break;
+ }
+
+ case IWX_MCC_CHUB_UPDATE_CMD: {
+ struct iwx_mcc_chub_notif *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+ iwx_mcc_update(sc, notif);
+ break;
+ }
+
+ case IWX_REPLY_ERROR: {
+ struct iwx_error_resp *resp;
+ SYNC_RESP_STRUCT(resp, pkt);
+ printf("%s: firmware error 0x%x, cmd 0x%x\n",
+ DEVNAME(sc), le32toh(resp->error_type),
+ resp->cmd_id);
+ break;
+ }
+
+ case IWX_TIME_EVENT_NOTIFICATION: {
+ struct iwx_time_event_notif *notif;
+ uint32_t action;
+ SYNC_RESP_STRUCT(notif, pkt);
+
+ if (sc->sc_time_event_uid != le32toh(notif->unique_id))
+ break;
+ action = le32toh(notif->action);
+ if (action & IWX_TE_V2_NOTIF_HOST_EVENT_END)
+ sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
+ break;
+ }
+
+ case IWX_WIDE_ID(IWX_MAC_CONF_GROUP,
+ IWX_SESSION_PROTECTION_NOTIF): {
+ struct iwx_session_prot_notif *notif;
+ uint32_t status, start, conf_id;
+
+ SYNC_RESP_STRUCT(notif, pkt);
+
+ status = le32toh(notif->status);
+ start = le32toh(notif->start);
+ conf_id = le32toh(notif->conf_id);
+ /* Check for end of successful PROTECT_CONF_ASSOC. */
+ if (status == 1 && start == 0 &&
+ conf_id == IWX_SESSION_PROTECT_CONF_ASSOC)
+ sc->sc_flags &= ~IWX_FLAG_TE_ACTIVE;
+ break;
+ }
+
+ case IWX_WIDE_ID(IWX_SYSTEM_GROUP,
+ IWX_FSEQ_VER_MISMATCH_NOTIFICATION):
+ break;
+
+ /*
+ * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
+ * messages. Just ignore them for now.
+ */
+ case IWX_DEBUG_LOG_MSG:
+ break;
+
+ case IWX_MCAST_FILTER_CMD:
+ break;
+
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_DQA_ENABLE_CMD):
+ break;
+
+ case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_SOC_CONFIGURATION_CMD):
+ break;
+
+ case IWX_WIDE_ID(IWX_SYSTEM_GROUP, IWX_INIT_EXTENDED_CFG_CMD):
+ break;
+
+ case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
+ IWX_NVM_ACCESS_COMPLETE):
+ break;
+
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RX_NO_DATA_NOTIF):
+ break; /* happens in monitor mode; ignore for now */
+
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_TLC_MNG_CONFIG_CMD):
+ break;
+
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP,
+ IWX_TLC_MNG_UPDATE_NOTIF): {
+ struct iwx_tlc_update_notif *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+ (void)notif;
+ if (iwx_rx_packet_payload_len(pkt) == sizeof(*notif))
+ iwx_rs_update(sc, notif);
+ break;
+ }
+
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, IWX_RLC_CONFIG_CMD):
+ break;
+
+ /* undocumented notification from iwx-ty-a0-gf-a0-77 image */
+ case IWX_WIDE_ID(IWX_DATA_PATH_GROUP, 0xf8):
+ break;
+
+ case IWX_WIDE_ID(IWX_REGULATORY_AND_NVM_GROUP,
+ IWX_PNVM_INIT_COMPLETE):
+ DPRINTF(("%s: IWX_PNVM_INIT_COMPLETE\n", __func__));
+ sc->sc_init_complete |= IWX_PNVM_COMPLETE;
+ wakeup(&sc->sc_init_complete);
+ break;
+
+ default:
+ handled = 0;
+ /* XXX wulf: Get rid of bluetooth-related spam */
+ if ((code == 0xc2 && pkt->len_n_flags == 0x0000000c) ||
+ (code == 0xce && pkt->len_n_flags == 0x2000002c))
+ break;
+ printf("%s: unhandled firmware response 0x%x/0x%x "
+ "rx ring %d[%d]\n",
+ DEVNAME(sc), code, pkt->len_n_flags,
+ (qid & ~0x80), idx);
+ break;
+ }
+
+ /*
+ * uCode sets bit 0x80 when it originates the notification,
+ * i.e. when the notification is not a direct response to a
+ * command sent by the driver.
+ * For example, uCode issues IWX_REPLY_RX when it sends a
+ * received frame to the driver.
+ */
+ if (handled && !(qid & (1 << 7))) {
+ iwx_cmd_done(sc, qid, idx, code);
+ }
+
+ offset += roundup(len, IWX_FH_RSCSR_FRAME_ALIGN);
+
+ /* AX210 devices ship only one packet per Rx buffer. */
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ break;
+ }
+
+ if (m0 && m0 != data->m)
+ m_freem(m0);
+}
+
+static void
+iwx_notif_intr(struct iwx_softc *sc)
+{
+ struct mbuf m;
+ uint16_t hw;
+
+ bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
+ BUS_DMASYNC_POSTREAD);
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ uint16_t *status = sc->rxq.stat_dma.vaddr;
+ hw = le16toh(*status) & 0xfff;
+ } else
+ hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
+ hw &= (IWX_RX_MQ_RING_COUNT - 1);
+ while (sc->rxq.cur != hw) {
+ struct iwx_rx_data *data = &sc->rxq.data[sc->rxq.cur];
+
+ bus_dmamap_sync(sc->rxq.data_dmat, data->map,
+ BUS_DMASYNC_POSTREAD);
+
+ iwx_rx_pkt(sc, data, &m);
+ sc->rxq.cur = (sc->rxq.cur + 1) % IWX_RX_MQ_RING_COUNT;
+ }
+
+ /*
+ * Tell the firmware what we have processed.
+ * Seems like the hardware gets upset unless we align the write by 8??
+ */
+ hw = (hw == 0) ? IWX_RX_MQ_RING_COUNT - 1 : hw - 1;
+ IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, hw & ~7);
+}
+
+#if 0
+int
+iwx_intr(void *arg)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ifnet *ifp = IC2IFP(ic);
+ int r1, r2, rv = 0;
+
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
+
+ if (sc->sc_flags & IWX_FLAG_USE_ICT) {
+ uint32_t *ict = sc->ict_dma.vaddr;
+ int tmp;
+
+ tmp = htole32(ict[sc->ict_cur]);
+ if (!tmp)
+ goto out_ena;
+
+ /*
+ * ok, there was something. keep plowing until we have all.
+ */
+ r1 = r2 = 0;
+ while (tmp) {
+ r1 |= tmp;
+ ict[sc->ict_cur] = 0;
+ sc->ict_cur = (sc->ict_cur+1) % IWX_ICT_COUNT;
+ tmp = htole32(ict[sc->ict_cur]);
+ }
+
+ /* this is where the fun begins. don't ask */
+ if (r1 == 0xffffffff)
+ r1 = 0;
+
+ /* i am not expected to understand this */
+ if (r1 & 0xc0000)
+ r1 |= 0x8000;
+ r1 = (0xff & r1) | ((0xff00 & r1) << 16);
+ } else {
+ r1 = IWX_READ(sc, IWX_CSR_INT);
+ if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
+ goto out;
+ r2 = IWX_READ(sc, IWX_CSR_FH_INT_STATUS);
+ }
+ if (r1 == 0 && r2 == 0) {
+ goto out_ena;
+ }
+
+ IWX_WRITE(sc, IWX_CSR_INT, r1 | ~sc->sc_intmask);
+
+ if (r1 & IWX_CSR_INT_BIT_ALIVE) {
+#if 0
+ int i;
+ /* Firmware has now configured the RFH. */
+ for (i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
+ iwx_update_rx_desc(sc, &sc->rxq, i);
+#endif
+ IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
+ }
+
+
+ if (r1 & IWX_CSR_INT_BIT_RF_KILL) {
+ iwx_check_rfkill(sc);
+ rv = 1;
+ goto out_ena;
+ }
+
+ if (r1 & IWX_CSR_INT_BIT_SW_ERR) {
+ if (ifp->if_flags & IFF_DEBUG) {
+ iwx_nic_error(sc);
+ iwx_dump_driver_status(sc);
+ }
+ printf("%s: fatal firmware error\n", DEVNAME(sc));
+ ieee80211_restart_all(ic);
+ rv = 1;
+ goto out;
+
+ }
+
+ if (r1 & IWX_CSR_INT_BIT_HW_ERR) {
+ printf("%s: hardware error, stopping device \n", DEVNAME(sc));
+ iwx_stop(sc);
+ rv = 1;
+ goto out;
+ }
+
+ /* firmware chunk loaded */
+ if (r1 & IWX_CSR_INT_BIT_FH_TX) {
+ IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_TX_MASK);
+
+ sc->sc_fw_chunk_done = 1;
+ wakeup(&sc->sc_fw);
+ }
+
+ if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX |
+ IWX_CSR_INT_BIT_RX_PERIODIC)) {
+ if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX)) {
+ IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, IWX_CSR_FH_INT_RX_MASK);
+ }
+ if (r1 & IWX_CSR_INT_BIT_RX_PERIODIC) {
+ IWX_WRITE(sc, IWX_CSR_INT, IWX_CSR_INT_BIT_RX_PERIODIC);
+ }
+
+ /* Disable periodic interrupt; we use it as just a one-shot. */
+ IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG, IWX_CSR_INT_PERIODIC_DIS);
+
+ /*
+ * Enable periodic interrupt in 8 msec only if we received
+ * real RX interrupt (instead of just periodic int), to catch
+ * any dangling Rx interrupt. If it was just the periodic
+ * interrupt, there was no dangling Rx activity, and no need
+ * to extend the periodic interrupt; one-shot is enough.
+ */
+ if (r1 & (IWX_CSR_INT_BIT_FH_RX | IWX_CSR_INT_BIT_SW_RX))
+ IWX_WRITE_1(sc, IWX_CSR_INT_PERIODIC_REG,
+ IWX_CSR_INT_PERIODIC_ENA);
+
+ iwx_notif_intr(sc);
+ }
+
+ rv = 1;
+
+ out_ena:
+ iwx_restore_interrupts(sc);
+ out:
+ return rv;
+}
+#endif
+
+static void
+iwx_intr_msix(void *arg)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ uint32_t inta_fh, inta_hw;
+ int vector = 0;
+
+ IWX_LOCK(sc);
+
+ inta_fh = IWX_READ(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD);
+ inta_hw = IWX_READ(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD);
+ IWX_WRITE(sc, IWX_CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
+ IWX_WRITE(sc, IWX_CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
+ inta_fh &= sc->sc_fh_mask;
+ inta_hw &= sc->sc_hw_mask;
+
+ if (inta_fh & IWX_MSIX_FH_INT_CAUSES_Q0 ||
+ inta_fh & IWX_MSIX_FH_INT_CAUSES_Q1) {
+ iwx_notif_intr(sc);
+ }
+
+ /* firmware chunk loaded */
+ if (inta_fh & IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
+ sc->sc_fw_chunk_done = 1;
+ wakeup(&sc->sc_fw);
+ }
+
+ if ((inta_fh & IWX_MSIX_FH_INT_CAUSES_FH_ERR) ||
+ (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
+ (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
+ if (sc->sc_debug) {
+ iwx_nic_error(sc);
+ iwx_dump_driver_status(sc);
+ }
+ printf("%s: fatal firmware error\n", DEVNAME(sc));
+ ieee80211_restart_all(ic);
+ goto out;
+ }
+
+ if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL) {
+ iwx_check_rfkill(sc);
+ }
+
+ if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR) {
+ printf("%s: hardware error, stopping device \n", DEVNAME(sc));
+ sc->sc_flags |= IWX_FLAG_HW_ERR;
+ iwx_stop(sc);
+ goto out;
+ }
+
+ if (inta_hw & IWX_MSIX_HW_INT_CAUSES_REG_ALIVE) {
+ IWX_DPRINTF(sc, IWX_DEBUG_TRACE,
+ "%s:%d WARNING: Skipping rx desc update\n",
+ __func__, __LINE__);
+#if 0
+ /*
+ * XXX-THJ: we don't have the dma segment handy. This is hacked
+ * out in the fc release, return to it if we ever get this
+ * warning.
+ */
+ /* Firmware has now configured the RFH. */
+ for (int i = 0; i < IWX_RX_MQ_RING_COUNT; i++)
+ iwx_update_rx_desc(sc, &sc->rxq, i);
+#endif
+ IWX_WRITE(sc, IWX_RFH_Q0_FRBDCB_WIDX_TRG, 8);
+ }
+
+ /*
+ * Before sending the interrupt the HW disables it to prevent
+ * a nested interrupt. This is done by writing 1 to the corresponding
+ * bit in the mask register. After handling the interrupt, it should be
+ * re-enabled by clearing this bit. This register is defined as
+ * write 1 clear (W1C) register, meaning that it's being clear
+ * by writing 1 to the bit.
+ */
+ IWX_WRITE(sc, IWX_CSR_MSIX_AUTOMASK_ST_AD, 1 << vector);
+out:
+ IWX_UNLOCK(sc);
+ return;
+}
+
+/*
+ * The device info table below contains device-specific config overrides.
+ * The most important parameter derived from this table is the name of the
+ * firmware image to load.
+ *
+ * The Linux iwlwifi driver uses an "old" and a "new" device info table.
+ * The "old" table matches devices based on PCI vendor/product IDs only.
+ * The "new" table extends this with various device parameters derived
+ * from MAC type, and RF type.
+ *
+ * In iwlwifi "old" and "new" tables share the same array, where "old"
+ * entries contain dummy values for data defined only for "new" entries.
+ * As of 2022, Linux developers are still in the process of moving entries
+ * from "old" to "new" style and it looks like this effort has stalled in
+ * in some work-in-progress state for quite a while. Linux commits moving
+ * entries from "old" to "new" have at times been reverted due to regressions.
+ * Part of this complexity comes from iwlwifi supporting both iwm(4) and iwx(4)
+ * devices in the same driver.
+ *
+ * Our table below contains mostly "new" entries declared in iwlwifi
+ * with the _IWL_DEV_INFO() macro (with a leading underscore).
+ * Other devices are matched based on PCI vendor/product ID as usual,
+ * unless matching specific PCI subsystem vendor/product IDs is required.
+ *
+ * Some "old"-style entries are required to identify the firmware image to use.
+ * Others might be used to print a specific marketing name into Linux dmesg,
+ * but we can't be sure whether the corresponding devices would be matched
+ * correctly in the absence of their entries. So we include them just in case.
+ */
+
+struct iwx_dev_info {
+ uint16_t device;
+ uint16_t subdevice;
+ uint16_t mac_type;
+ uint16_t rf_type;
+ uint8_t mac_step;
+ uint8_t rf_id;
+ uint8_t no_160;
+ uint8_t cores;
+ uint8_t cdb;
+ uint8_t jacket;
+ const struct iwx_device_cfg *cfg;
+};
+
+#define _IWX_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
+ _rf_id, _no_160, _cores, _cdb, _jacket, _cfg) \
+ { .device = (_device), .subdevice = (_subdevice), .cfg = &(_cfg), \
+ .mac_type = _mac_type, .rf_type = _rf_type, \
+ .no_160 = _no_160, .cores = _cores, .rf_id = _rf_id, \
+ .mac_step = _mac_step, .cdb = _cdb, .jacket = _jacket }
+
+#define IWX_DEV_INFO(_device, _subdevice, _cfg) \
+ _IWX_DEV_INFO(_device, _subdevice, IWX_CFG_ANY, IWX_CFG_ANY, \
+ IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_ANY, \
+ IWX_CFG_ANY, IWX_CFG_ANY, _cfg)
+
+/*
+ * When adding entries to this table keep in mind that entries must
+ * be listed in the same order as in the Linux driver. Code walks this
+ * table backwards and uses the first matching entry it finds.
+ * Device firmware must be available in fw_update(8).
+ */
+static const struct iwx_dev_info iwx_dev_info_table[] = {
+ /* So with HR */
+ IWX_DEV_INFO(0x2725, 0x0090, iwx_2ax_cfg_so_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x0020, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x2020, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x0024, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x0310, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x0510, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x0A10, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0xE020, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0xE024, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x4020, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x6020, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x6024, iwx_2ax_cfg_ty_gf_a0),
+ IWX_DEV_INFO(0x2725, 0x1673, iwx_2ax_cfg_ty_gf_a0), /* killer_1675w */
+ IWX_DEV_INFO(0x2725, 0x1674, iwx_2ax_cfg_ty_gf_a0), /* killer_1675x */
+ IWX_DEV_INFO(0x51f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
+ IWX_DEV_INFO(0x51f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
+ IWX_DEV_INFO(0x51f1, 0x1691, iwx_2ax_cfg_so_gf4_a0),
+ IWX_DEV_INFO(0x51f1, 0x1692, iwx_2ax_cfg_so_gf4_a0),
+ IWX_DEV_INFO(0x54f0, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
+ IWX_DEV_INFO(0x54f0, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
+ IWX_DEV_INFO(0x7a70, 0x0090, iwx_2ax_cfg_so_gf_a0_long),
+ IWX_DEV_INFO(0x7a70, 0x0098, iwx_2ax_cfg_so_gf_a0_long),
+ IWX_DEV_INFO(0x7a70, 0x00b0, iwx_2ax_cfg_so_gf4_a0_long),
+ IWX_DEV_INFO(0x7a70, 0x0310, iwx_2ax_cfg_so_gf_a0_long),
+ IWX_DEV_INFO(0x7a70, 0x0510, iwx_2ax_cfg_so_gf_a0_long),
+ IWX_DEV_INFO(0x7a70, 0x0a10, iwx_2ax_cfg_so_gf_a0_long),
+ IWX_DEV_INFO(0x7af0, 0x0090, iwx_2ax_cfg_so_gf_a0),
+ IWX_DEV_INFO(0x7af0, 0x0098, iwx_2ax_cfg_so_gf_a0),
+ IWX_DEV_INFO(0x7af0, 0x00b0, iwx_2ax_cfg_so_gf4_a0),
+ IWX_DEV_INFO(0x7a70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
+ IWX_DEV_INFO(0x7a70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
+ IWX_DEV_INFO(0x7af0, 0x0310, iwx_2ax_cfg_so_gf_a0),
+ IWX_DEV_INFO(0x7af0, 0x0510, iwx_2ax_cfg_so_gf_a0),
+ IWX_DEV_INFO(0x7af0, 0x0a10, iwx_2ax_cfg_so_gf_a0),
+ IWX_DEV_INFO(0x7f70, 0x1691, iwx_2ax_cfg_so_gf4_a0), /* killer_1690s */
+ IWX_DEV_INFO(0x7f70, 0x1692, iwx_2ax_cfg_so_gf4_a0), /* killer_1690i */
+
+ /* So with GF2 */
+ IWX_DEV_INFO(0x2726, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
+ IWX_DEV_INFO(0x2726, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
+ IWX_DEV_INFO(0x51f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
+ IWX_DEV_INFO(0x51f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
+ IWX_DEV_INFO(0x54f0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
+ IWX_DEV_INFO(0x54f0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
+ IWX_DEV_INFO(0x7a70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
+ IWX_DEV_INFO(0x7a70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
+ IWX_DEV_INFO(0x7af0, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
+ IWX_DEV_INFO(0x7af0, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
+ IWX_DEV_INFO(0x7f70, 0x1671, iwx_2ax_cfg_so_gf_a0), /* killer_1675s */
+ IWX_DEV_INFO(0x7f70, 0x1672, iwx_2ax_cfg_so_gf_a0), /* killer_1675i */
+
+ /* Qu with Jf, C step */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9461_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* iwl9461 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9462 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_qu_c0_jf_b0_cfg), /* 9560 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY,
+ iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550s */
+ _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY,
+ iwx_9560_qu_c0_jf_b0_cfg), /* 9560_killer_1550i */
+
+ /* QuZ with Jf */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9461 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_9560_quz_a0_jf_b0_cfg), /* 9462 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, 0x1551,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY,
+ iwx_9560_quz_a0_jf_b0_cfg), /* killer_1550s */
+ _IWX_DEV_INFO(IWX_CFG_ANY, 0x1552,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY,
+ iwx_9560_quz_a0_jf_b0_cfg), /* 9560_killer_1550i */
+
+ /* Qu with Hr, B step */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
+ IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
+ IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_qu_b0_hr1_b0), /* AX101 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_B_STEP,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_qu_b0_hr_b0), /* AX203 */
+
+ /* Qu with Hr, C step */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
+ IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_qu_c0_hr1_b0), /* AX101 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_qu_c0_hr_b0), /* AX203 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QU, IWX_SILICON_C_STEP,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_qu_c0_hr_b0), /* AX201 */
+
+ /* QuZ with Hr */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
+ IWX_CFG_ANY, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_quz_a0_hr1_b0), /* AX101 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_QUZ, IWX_SILICON_B_STEP,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_cfg_quz_a0_hr_b0), /* AX203 */
+
+ /* SoF with JF2 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
+
+ /* SoF with JF */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_name */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
+
+ /* So with Hr */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_cfg_so_a0_hr_b0), /* AX203 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_cfg_so_a0_hr_b0), /* ax101 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_cfg_so_a0_hr_b0), /* ax201 */
+
+ /* So-F with Hr */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_NO_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_cfg_so_a0_hr_b0), /* AX203 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_HR1, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_cfg_so_a0_hr_b0), /* AX101 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_HR2, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_cfg_so_a0_hr_b0), /* AX201 */
+
+ /* So-F with GF */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_2ax_cfg_so_gf_a0), /* AX211 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SOF, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
+ iwx_2ax_cfg_so_gf4_a0), /* AX411 */
+
+ /* So with GF */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_NO_CDB, IWX_CFG_ANY,
+ iwx_2ax_cfg_so_gf_a0), /* AX211 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_GF, IWX_CFG_ANY,
+ IWX_CFG_160, IWX_CFG_ANY, IWX_CFG_CDB, IWX_CFG_ANY,
+ iwx_2ax_cfg_so_gf4_a0), /* AX411 */
+
+ /* So with JF2 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF2, IWX_CFG_RF_ID_JF,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9560 */
+
+ /* So with JF */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9461_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462_160 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* iwl9461 */
+ _IWX_DEV_INFO(IWX_CFG_ANY, IWX_CFG_ANY,
+ IWX_CFG_MAC_TYPE_SO, IWX_CFG_ANY,
+ IWX_CFG_RF_TYPE_JF1, IWX_CFG_RF_ID_JF1_DIV,
+ IWX_CFG_NO_160, IWX_CFG_CORES_BT, IWX_CFG_NO_CDB,
+ IWX_CFG_ANY, iwx_2ax_cfg_so_jf_b0), /* 9462 */
+};
+
+static int
+iwx_preinit(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ int err;
+
+ err = iwx_prepare_card_hw(sc);
+ if (err) {
+ printf("%s: could not initialize hardware\n", DEVNAME(sc));
+ return err;
+ }
+
+ if (sc->attached) {
+ return 0;
+ }
+
+ err = iwx_start_hw(sc);
+ if (err) {
+ printf("%s: could not initialize hardware\n", DEVNAME(sc));
+ return err;
+ }
+
+ err = iwx_run_init_mvm_ucode(sc, 1);
+ iwx_stop_device(sc);
+ if (err) {
+ printf("%s: failed to stop device\n", DEVNAME(sc));
+ return err;
+ }
+
+ /* Print version info and MAC address on first successful fw load. */
+ sc->attached = 1;
+ if (sc->sc_pnvm_ver) {
+ printf("%s: hw rev 0x%x, fw %s, pnvm %08x, "
+ "address %s\n",
+ DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
+ sc->sc_fwver, sc->sc_pnvm_ver,
+ ether_sprintf(sc->sc_nvm.hw_addr));
+ } else {
+ printf("%s: hw rev 0x%x, fw %s, address %s\n",
+ DEVNAME(sc), sc->sc_hw_rev & IWX_CSR_HW_REV_TYPE_MSK,
+ sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
+ }
+
+ /* not all hardware can do 5GHz band */
+ if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
+ memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
+ sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
+
+ return 0;
+}
+
+static void
+iwx_attach_hook(void *self)
+{
+ struct iwx_softc *sc = (void *)self;
+ struct ieee80211com *ic = &sc->sc_ic;
+ int err;
+
+ IWX_LOCK(sc);
+ err = iwx_preinit(sc);
+ IWX_UNLOCK(sc);
+ if (err != 0)
+ goto out;
+
+ iwx_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
+ ic->ic_channels);
+
+ ieee80211_ifattach(ic);
+ ic->ic_vap_create = iwx_vap_create;
+ ic->ic_vap_delete = iwx_vap_delete;
+ ic->ic_raw_xmit = iwx_raw_xmit;
+ ic->ic_node_alloc = iwx_node_alloc;
+ ic->ic_scan_start = iwx_scan_start;
+ ic->ic_scan_end = iwx_scan_end;
+ ic->ic_update_mcast = iwx_update_mcast;
+ ic->ic_getradiocaps = iwx_init_channel_map;
+
+ ic->ic_set_channel = iwx_set_channel;
+ ic->ic_scan_curchan = iwx_scan_curchan;
+ ic->ic_scan_mindwell = iwx_scan_mindwell;
+ ic->ic_wme.wme_update = iwx_wme_update;
+ ic->ic_parent = iwx_parent;
+ ic->ic_transmit = iwx_transmit;
+
+ sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start;
+ ic->ic_ampdu_rx_start = iwx_ampdu_rx_start;
+ sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop;
+ ic->ic_ampdu_rx_stop = iwx_ampdu_rx_stop;
+
+ sc->sc_addba_request = ic->ic_addba_request;
+ ic->ic_addba_request = iwx_addba_request;
+ sc->sc_addba_response = ic->ic_addba_response;
+ ic->ic_addba_response = iwx_addba_response;
+
+ iwx_radiotap_attach(sc);
+ ieee80211_announce(ic);
+out:
+ config_intrhook_disestablish(&sc->sc_preinit_hook);
+}
+
+const struct iwx_device_cfg *
+iwx_find_device_cfg(struct iwx_softc *sc)
+{
+ uint16_t sdev_id, mac_type, rf_type;
+ uint8_t mac_step, cdb, jacket, rf_id, no_160, cores;
+ int i;
+
+ sdev_id = pci_get_subdevice(sc->sc_dev);
+ mac_type = IWX_CSR_HW_REV_TYPE(sc->sc_hw_rev);
+ mac_step = IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2);
+ rf_type = IWX_CSR_HW_RFID_TYPE(sc->sc_hw_rf_id);
+ cdb = IWX_CSR_HW_RFID_IS_CDB(sc->sc_hw_rf_id);
+ jacket = IWX_CSR_HW_RFID_IS_JACKET(sc->sc_hw_rf_id);
+
+ rf_id = IWX_SUBDEVICE_RF_ID(sdev_id);
+ no_160 = IWX_SUBDEVICE_NO_160(sdev_id);
+ cores = IWX_SUBDEVICE_CORES(sdev_id);
+
+ for (i = nitems(iwx_dev_info_table) - 1; i >= 0; i--) {
+ const struct iwx_dev_info *dev_info = &iwx_dev_info_table[i];
+
+ if (dev_info->device != (uint16_t)IWX_CFG_ANY &&
+ dev_info->device != sc->sc_pid)
+ continue;
+
+ if (dev_info->subdevice != (uint16_t)IWX_CFG_ANY &&
+ dev_info->subdevice != sdev_id)
+ continue;
+
+ if (dev_info->mac_type != (uint16_t)IWX_CFG_ANY &&
+ dev_info->mac_type != mac_type)
+ continue;
+
+ if (dev_info->mac_step != (uint8_t)IWX_CFG_ANY &&
+ dev_info->mac_step != mac_step)
+ continue;
+
+ if (dev_info->rf_type != (uint16_t)IWX_CFG_ANY &&
+ dev_info->rf_type != rf_type)
+ continue;
+
+ if (dev_info->cdb != (uint8_t)IWX_CFG_ANY &&
+ dev_info->cdb != cdb)
+ continue;
+
+ if (dev_info->jacket != (uint8_t)IWX_CFG_ANY &&
+ dev_info->jacket != jacket)
+ continue;
+
+ if (dev_info->rf_id != (uint8_t)IWX_CFG_ANY &&
+ dev_info->rf_id != rf_id)
+ continue;
+
+ if (dev_info->no_160 != (uint8_t)IWX_CFG_ANY &&
+ dev_info->no_160 != no_160)
+ continue;
+
+ if (dev_info->cores != (uint8_t)IWX_CFG_ANY &&
+ dev_info->cores != cores)
+ continue;
+
+ return dev_info->cfg;
+ }
+
+ return NULL;
+}
+
+static int
+iwx_probe(device_t dev)
+{
+ int i;
+
+ for (i = 0; i < nitems(iwx_devices); i++) {
+ if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
+ pci_get_device(dev) == iwx_devices[i].device) {
+ device_set_desc(dev, iwx_devices[i].name);
+
+ /*
+ * Due to significant existing deployments using
+ * iwlwifi lower the priority of iwx.
+ *
+ * This inverts the advice in bus.h where drivers
+ * supporting newer hardware should return
+ * BUS_PROBE_DEFAULT and drivers for older devices
+ * return BUS_PROBE_LOW_PRIORITY.
+ *
+ */
+ return (BUS_PROBE_LOW_PRIORITY);
+ }
+ }
+
+ return (ENXIO);
+}
+
+static int
+iwx_attach(device_t dev)
+{
+ struct iwx_softc *sc = device_get_softc(dev);
+ struct ieee80211com *ic = &sc->sc_ic;
+ const struct iwx_device_cfg *cfg;
+ int err;
+ int txq_i, i, j;
+ size_t ctxt_info_size;
+ int rid;
+ int count;
+ int error;
+ sc->sc_dev = dev;
+ sc->sc_pid = pci_get_device(dev);
+ sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
+
+ TASK_INIT(&sc->sc_es_task, 0, iwx_endscan_cb, sc);
+ IWX_LOCK_INIT(sc);
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
+ TASK_INIT(&sc->ba_rx_task, 0, iwx_ba_rx_task, sc);
+ TASK_INIT(&sc->ba_tx_task, 0, iwx_ba_tx_task, sc);
+ sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &sc->sc_tq);
+ error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
+ if (error != 0) {
+ device_printf(dev, "can't start taskq thread, error %d\n",
+ error);
+ return (ENXIO);
+ }
+
+ pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off);
+ if (sc->sc_cap_off == 0) {
+ device_printf(dev, "PCIe capability structure not found!\n");
+ return (ENXIO);
+ }
+
+ /*
+ * We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
+
+ if (pci_msix_count(dev)) {
+ sc->sc_msix = 1;
+ } else {
+ device_printf(dev, "no MSI-X found\n");
+ return (ENXIO);
+ }
+
+ pci_enable_busmaster(dev);
+ rid = PCIR_BAR(0);
+ sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
+ if (sc->sc_mem == NULL) {
+ device_printf(sc->sc_dev, "can't map mem space\n");
+ return (ENXIO);
+ }
+ sc->sc_st = rman_get_bustag(sc->sc_mem);
+ sc->sc_sh = rman_get_bushandle(sc->sc_mem);
+
+ count = 1;
+ rid = 0;
+ if (pci_alloc_msix(dev, &count) == 0)
+ rid = 1;
+ DPRINTF(("%s: count=%d\n", __func__, count));
+ sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
+ (rid != 0 ? 0 : RF_SHAREABLE));
+ if (sc->sc_irq == NULL) {
+ device_printf(dev, "can't map interrupt\n");
+ return (ENXIO);
+ }
+ error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
+ NULL, iwx_intr_msix, sc, &sc->sc_ih);
+ if (error != 0) {
+ device_printf(dev, "can't establish interrupt\n");
+ return (ENXIO);
+ }
+
+ /* Clear pending interrupts. */
+ IWX_WRITE(sc, IWX_CSR_INT_MASK, 0);
+ IWX_WRITE(sc, IWX_CSR_INT, ~0);
+ IWX_WRITE(sc, IWX_CSR_FH_INT_STATUS, ~0);
+
+ sc->sc_hw_rev = IWX_READ(sc, IWX_CSR_HW_REV);
+ DPRINTF(("%s: sc->sc_hw_rev=%d\n", __func__, sc->sc_hw_rev));
+ sc->sc_hw_rf_id = IWX_READ(sc, IWX_CSR_HW_RF_ID);
+ DPRINTF(("%s: sc->sc_hw_rf_id =%d\n", __func__, sc->sc_hw_rf_id));
+
+ /*
+ * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
+ * changed, and now the revision step also includes bit 0-1 (no more
+ * "dash" value). To keep hw_rev backwards compatible - we'll store it
+ * in the old format.
+ */
+ sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
+ (IWX_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
+
+ switch (sc->sc_pid) {
+ case PCI_PRODUCT_INTEL_WL_22500_1:
+ sc->sc_fwname = IWX_CC_A_FW;
+ sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
+ sc->sc_integrated = 0;
+ sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
+ sc->sc_low_latency_xtal = 0;
+ sc->sc_xtal_latency = 0;
+ sc->sc_tx_with_siso_diversity = 0;
+ sc->sc_uhb_supported = 0;
+ break;
+ case PCI_PRODUCT_INTEL_WL_22500_2:
+ case PCI_PRODUCT_INTEL_WL_22500_5:
+ /* These devices should be QuZ only. */
+ if (sc->sc_hw_rev != IWX_CSR_HW_REV_TYPE_QUZ) {
+ device_printf(dev, "unsupported AX201 adapter\n");
+ return (ENXIO);
+ }
+ sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
+ sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
+ sc->sc_integrated = 1;
+ sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
+ sc->sc_low_latency_xtal = 0;
+ sc->sc_xtal_latency = 500;
+ sc->sc_tx_with_siso_diversity = 0;
+ sc->sc_uhb_supported = 0;
+ break;
+ case PCI_PRODUCT_INTEL_WL_22500_3:
+ if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
+ sc->sc_fwname = IWX_QU_C_HR_B_FW;
+ else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
+ sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
+ else
+ sc->sc_fwname = IWX_QU_B_HR_B_FW;
+ sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
+ sc->sc_integrated = 1;
+ sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_200;
+ sc->sc_low_latency_xtal = 0;
+ sc->sc_xtal_latency = 500;
+ sc->sc_tx_with_siso_diversity = 0;
+ sc->sc_uhb_supported = 0;
+ break;
+ case PCI_PRODUCT_INTEL_WL_22500_4:
+ case PCI_PRODUCT_INTEL_WL_22500_7:
+ case PCI_PRODUCT_INTEL_WL_22500_8:
+ if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
+ sc->sc_fwname = IWX_QU_C_HR_B_FW;
+ else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
+ sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
+ else
+ sc->sc_fwname = IWX_QU_B_HR_B_FW;
+ sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
+ sc->sc_integrated = 1;
+ sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820;
+ sc->sc_low_latency_xtal = 0;
+ sc->sc_xtal_latency = 1820;
+ sc->sc_tx_with_siso_diversity = 0;
+ sc->sc_uhb_supported = 0;
+ break;
+ case PCI_PRODUCT_INTEL_WL_22500_6:
+ if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QU_C0)
+ sc->sc_fwname = IWX_QU_C_HR_B_FW;
+ else if (sc->sc_hw_rev == IWX_CSR_HW_REV_TYPE_QUZ)
+ sc->sc_fwname = IWX_QUZ_A_HR_B_FW;
+ else
+ sc->sc_fwname = IWX_QU_B_HR_B_FW;
+ sc->sc_device_family = IWX_DEVICE_FAMILY_22000;
+ sc->sc_integrated = 1;
+ sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
+ sc->sc_low_latency_xtal = 1;
+ sc->sc_xtal_latency = 12000;
+ sc->sc_tx_with_siso_diversity = 0;
+ sc->sc_uhb_supported = 0;
+ break;
+ case PCI_PRODUCT_INTEL_WL_22500_9:
+ case PCI_PRODUCT_INTEL_WL_22500_10:
+ case PCI_PRODUCT_INTEL_WL_22500_11:
+ case PCI_PRODUCT_INTEL_WL_22500_13:
+ /* _14 is an MA device, not yet supported */
+ case PCI_PRODUCT_INTEL_WL_22500_15:
+ case PCI_PRODUCT_INTEL_WL_22500_16:
+ sc->sc_fwname = IWX_SO_A_GF_A_FW;
+ sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
+ sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
+ sc->sc_integrated = 0;
+ sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE;
+ sc->sc_low_latency_xtal = 0;
+ sc->sc_xtal_latency = 0;
+ sc->sc_tx_with_siso_diversity = 0;
+ sc->sc_uhb_supported = 1;
+ break;
+ case PCI_PRODUCT_INTEL_WL_22500_12:
+ case PCI_PRODUCT_INTEL_WL_22500_17:
+ sc->sc_fwname = IWX_SO_A_GF_A_FW;
+ sc->sc_pnvm_name = IWX_SO_A_GF_A_PNVM;
+ sc->sc_device_family = IWX_DEVICE_FAMILY_AX210;
+ sc->sc_integrated = 1;
+ sc->sc_ltr_delay = IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500;
+ sc->sc_low_latency_xtal = 1;
+ sc->sc_xtal_latency = 12000;
+ sc->sc_tx_with_siso_diversity = 0;
+ sc->sc_uhb_supported = 0;
+ sc->sc_imr_enabled = 1;
+ break;
+ default:
+ device_printf(dev, "unknown adapter type\n");
+ return (ENXIO);
+ }
+
+ cfg = iwx_find_device_cfg(sc);
+ DPRINTF(("%s: cfg=%p\n", __func__, cfg));
+ if (cfg) {
+ sc->sc_fwname = cfg->fw_name;
+ sc->sc_pnvm_name = cfg->pnvm_name;
+ sc->sc_tx_with_siso_diversity = cfg->tx_with_siso_diversity;
+ sc->sc_uhb_supported = cfg->uhb_supported;
+ if (cfg->xtal_latency) {
+ sc->sc_xtal_latency = cfg->xtal_latency;
+ sc->sc_low_latency_xtal = cfg->low_latency_xtal;
+ }
+ }
+
+ sc->mac_addr_from_csr = 0x380; /* differs on BZ hw generation */
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ sc->sc_umac_prph_offset = 0x300000;
+ sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX_GEN3;
+ } else
+ sc->max_tfd_queue_size = IWX_TFD_QUEUE_SIZE_MAX;
+
+ /* Allocate DMA memory for loading firmware. */
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210)
+ ctxt_info_size = sizeof(struct iwx_context_info_gen3);
+ else
+ ctxt_info_size = sizeof(struct iwx_context_info);
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ctxt_info_dma,
+ ctxt_info_size, 1);
+ if (err) {
+ device_printf(dev,
+ "could not allocate memory for loading firmware\n");
+ return (ENXIO);
+ }
+
+ if (sc->sc_device_family >= IWX_DEVICE_FAMILY_AX210) {
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_scratch_dma,
+ sizeof(struct iwx_prph_scratch), 1);
+ if (err) {
+ device_printf(dev,
+ "could not allocate prph scratch memory\n");
+ goto fail1;
+ }
+
+ /*
+ * Allocate prph information. The driver doesn't use this.
+ * We use the second half of this page to give the device
+ * some dummy TR/CR tail pointers - which shouldn't be
+ * necessary as we don't use this, but the hardware still
+ * reads/writes there and we can't let it go do that with
+ * a NULL pointer.
+ */
+ KASSERT((sizeof(struct iwx_prph_info) < PAGE_SIZE / 2),
+ ("iwx_prph_info has wrong size"));
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->prph_info_dma,
+ PAGE_SIZE, 1);
+ if (err) {
+ device_printf(dev,
+ "could not allocate prph info memory\n");
+ goto fail1;
+ }
+ }
+
+ /* Allocate interrupt cause table (ICT).*/
+ err = iwx_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
+ IWX_ICT_SIZE, 1<<IWX_ICT_PADDR_SHIFT);
+ if (err) {
+ device_printf(dev, "could not allocate ICT table\n");
+ goto fail1;
+ }
+
+ for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
+ err = iwx_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
+ if (err) {
+ device_printf(dev, "could not allocate TX ring %d\n",
+ txq_i);
+ goto fail4;
+ }
+ }
+
+ err = iwx_alloc_rx_ring(sc, &sc->rxq);
+ if (err) {
+ device_printf(sc->sc_dev, "could not allocate RX ring\n");
+ goto fail4;
+ }
+
+#ifdef IWX_DEBUG
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
+ CTLFLAG_RWTUN, &sc->sc_debug, 0, "bitmask to control debugging");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "himark",
+ CTLFLAG_RW, &iwx_himark, 0, "queues high watermark");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "lomark",
+ CTLFLAG_RW, &iwx_lomark, 0, "queues low watermark");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "qfullmsk",
+ CTLFLAG_RD, &sc->qfullmsk, 0, "queue fullmask");
+
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue0",
+ CTLFLAG_RD, &sc->txq[0].queued, 0, "queue 0");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue1",
+ CTLFLAG_RD, &sc->txq[1].queued, 0, "queue 1");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue2",
+ CTLFLAG_RD, &sc->txq[2].queued, 0, "queue 2");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue3",
+ CTLFLAG_RD, &sc->txq[3].queued, 0, "queue 3");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue4",
+ CTLFLAG_RD, &sc->txq[4].queued, 0, "queue 4");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue5",
+ CTLFLAG_RD, &sc->txq[5].queued, 0, "queue 5");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue6",
+ CTLFLAG_RD, &sc->txq[6].queued, 0, "queue 6");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "queue7",
+ CTLFLAG_RD, &sc->txq[7].queued, 0, "queue 7");
+#endif
+ ic->ic_softc = sc;
+ ic->ic_name = device_get_nameunit(sc->sc_dev);
+ ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
+ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
+
+ /* Set device capabilities. */
+ ic->ic_caps =
+ IEEE80211_C_STA |
+ IEEE80211_C_MONITOR |
+ IEEE80211_C_WPA | /* WPA/RSN */
+ IEEE80211_C_WME |
+ IEEE80211_C_PMGT |
+ IEEE80211_C_SHSLOT | /* short slot time supported */
+ IEEE80211_C_SHPREAMBLE | /* short preamble supported */
+ IEEE80211_C_BGSCAN /* capable of bg scanning */
+ ;
+ ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
+ /* Enable seqno offload */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+ /* Don't send null data frames; let firmware do it */
+ ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
+
+ ic->ic_txstream = 2;
+ ic->ic_rxstream = 2;
+ ic->ic_htcaps |= IEEE80211_HTC_HT
+ | IEEE80211_HTCAP_SMPS_OFF
+ | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */
+ | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
+ | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/
+ | IEEE80211_HTC_AMPDU /* tx A-MPDU */
+// | IEEE80211_HTC_RX_AMSDU_AMPDU /* TODO: hw reorder */
+ | IEEE80211_HTCAP_MAXAMSDU_3839; /* max A-MSDU length */
+
+ ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM;
+
+ /*
+ * XXX: setupcurchan() expects vhtcaps to be non-zero
+ * https://bugs.freebsd.org/274156
+ */
+ ic->ic_vht_cap.vht_cap_info |= IEEE80211_VHTCAP_MAX_MPDU_LENGTH_3895
+ | IEEE80211_VHTCAP_SHORT_GI_80
+ | 3 << IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK_S
+ | IEEE80211_VHTCAP_RX_ANTENNA_PATTERN
+ | IEEE80211_VHTCAP_TX_ANTENNA_PATTERN;
+
+ ic->ic_flags_ext |= IEEE80211_FEXT_VHT;
+ int mcsmap = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
+ IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
+ IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
+ ic->ic_vht_cap.supp_mcs.tx_mcs_map = htole16(mcsmap);
+ ic->ic_vht_cap.supp_mcs.rx_mcs_map = htole16(mcsmap);
+
+ callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0);
+ for (i = 0; i < nitems(sc->sc_rxba_data); i++) {
+ struct iwx_rxba_data *rxba = &sc->sc_rxba_data[i];
+ rxba->baid = IWX_RX_REORDER_DATA_INVALID_BAID;
+ rxba->sc = sc;
+ for (j = 0; j < nitems(rxba->entries); j++)
+ mbufq_init(&rxba->entries[j].frames, ifqmaxlen);
+ }
+
+ sc->sc_preinit_hook.ich_func = iwx_attach_hook;
+ sc->sc_preinit_hook.ich_arg = sc;
+ if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
+ device_printf(dev,
+ "config_intrhook_establish failed\n");
+ goto fail4;
+ }
+
+ return (0);
+
+fail4:
+ while (--txq_i >= 0)
+ iwx_free_tx_ring(sc, &sc->txq[txq_i]);
+ iwx_free_rx_ring(sc, &sc->rxq);
+ if (sc->ict_dma.vaddr != NULL)
+ iwx_dma_contig_free(&sc->ict_dma);
+
+fail1:
+ iwx_dma_contig_free(&sc->ctxt_info_dma);
+ iwx_dma_contig_free(&sc->prph_scratch_dma);
+ iwx_dma_contig_free(&sc->prph_info_dma);
+ return (ENXIO);
+}
+
+static int
+iwx_detach(device_t dev)
+{
+ struct iwx_softc *sc = device_get_softc(dev);
+ int txq_i;
+
+ iwx_stop_device(sc);
+
+ taskqueue_drain_all(sc->sc_tq);
+ taskqueue_free(sc->sc_tq);
+
+ ieee80211_ifdetach(&sc->sc_ic);
+
+ callout_drain(&sc->watchdog_to);
+
+ for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++)
+ iwx_free_tx_ring(sc, &sc->txq[txq_i]);
+ iwx_free_rx_ring(sc, &sc->rxq);
+
+ if (sc->sc_fwp != NULL) {
+ firmware_put(sc->sc_fwp, FIRMWARE_UNLOAD);
+ sc->sc_fwp = NULL;
+ }
+
+ if (sc->sc_pnvm != NULL) {
+ firmware_put(sc->sc_pnvm, FIRMWARE_UNLOAD);
+ sc->sc_pnvm = NULL;
+ }
+
+ if (sc->sc_irq != NULL) {
+ bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
+ bus_release_resource(dev, SYS_RES_IRQ,
+ rman_get_rid(sc->sc_irq), sc->sc_irq);
+ pci_release_msi(dev);
+ }
+ if (sc->sc_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ rman_get_rid(sc->sc_mem), sc->sc_mem);
+
+ IWX_LOCK_DESTROY(sc);
+
+ return (0);
+}
+
+static void
+iwx_radiotap_attach(struct iwx_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
+ "->%s begin\n", __func__);
+
+ ieee80211_radiotap_attach(ic,
+ &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
+ IWX_TX_RADIOTAP_PRESENT,
+ &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
+ IWX_RX_RADIOTAP_PRESENT);
+
+ IWX_DPRINTF(sc, IWX_DEBUG_RESET | IWX_DEBUG_TRACE,
+ "->%s end\n", __func__);
+}
+
+struct ieee80211vap *
+iwx_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
+ enum ieee80211_opmode opmode, int flags,
+ const uint8_t bssid[IEEE80211_ADDR_LEN],
+ const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ struct iwx_vap *ivp;
+ struct ieee80211vap *vap;
+
+ if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
+ return NULL;
+ ivp = malloc(sizeof(struct iwx_vap), M_80211_VAP, M_WAITOK | M_ZERO);
+ vap = &ivp->iv_vap;
+ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
+ vap->iv_bmissthreshold = 10; /* override default */
+ /* Override with driver methods. */
+ ivp->iv_newstate = vap->iv_newstate;
+ vap->iv_newstate = iwx_newstate;
+
+ ivp->id = IWX_DEFAULT_MACID;
+ ivp->color = IWX_DEFAULT_COLOR;
+
+ ivp->have_wme = TRUE;
+ ivp->ps_disabled = FALSE;
+
+ vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
+ vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4;
+
+ /* h/w crypto support */
+ vap->iv_key_alloc = iwx_key_alloc;
+ vap->iv_key_delete = iwx_key_delete;
+ vap->iv_key_set = iwx_key_set;
+ vap->iv_key_update_begin = iwx_key_update_begin;
+ vap->iv_key_update_end = iwx_key_update_end;
+
+ ieee80211_ratectl_init(vap);
+ /* Complete setup. */
+ ieee80211_vap_attach(vap, ieee80211_media_change,
+ ieee80211_media_status, mac);
+ ic->ic_opmode = opmode;
+
+ return vap;
+}
+
+static void
+iwx_vap_delete(struct ieee80211vap *vap)
+{
+ struct iwx_vap *ivp = IWX_VAP(vap);
+
+ ieee80211_ratectl_deinit(vap);
+ ieee80211_vap_detach(vap);
+ free(ivp, M_80211_VAP);
+}
+
+static void
+iwx_parent(struct ieee80211com *ic)
+{
+ struct iwx_softc *sc = ic->ic_softc;
+ IWX_LOCK(sc);
+
+ if (sc->sc_flags & IWX_FLAG_HW_INITED) {
+ iwx_stop(sc);
+ sc->sc_flags &= ~IWX_FLAG_HW_INITED;
+ } else {
+ iwx_init(sc);
+ ieee80211_start_all(ic);
+ }
+ IWX_UNLOCK(sc);
+}
+
+static int
+iwx_suspend(device_t dev)
+{
+ struct iwx_softc *sc = device_get_softc(dev);
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ if (sc->sc_flags & IWX_FLAG_HW_INITED) {
+ ieee80211_suspend_all(ic);
+
+ iwx_stop(sc);
+ sc->sc_flags &= ~IWX_FLAG_HW_INITED;
+ }
+ return (0);
+}
+
+static int
+iwx_resume(device_t dev)
+{
+ struct iwx_softc *sc = device_get_softc(dev);
+ struct ieee80211com *ic = &sc->sc_ic;
+ int err;
+
+ /*
+ * We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
+
+ IWX_LOCK(sc);
+
+ err = iwx_init(sc);
+ if (err) {
+ iwx_stop_device(sc);
+ IWX_UNLOCK(sc);
+ return err;
+ }
+
+ IWX_UNLOCK(sc);
+
+ ieee80211_resume_all(ic);
+ return (0);
+}
+
+static void
+iwx_scan_start(struct ieee80211com *ic)
+{
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwx_softc *sc = ic->ic_softc;
+ int err;
+
+ IWX_LOCK(sc);
+ if ((ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) == 0)
+ err = iwx_scan(sc);
+ else
+ err = iwx_bgscan(ic);
+ IWX_UNLOCK(sc);
+ if (err)
+ ieee80211_cancel_scan(vap);
+
+ return;
+}
+
+static void
+iwx_update_mcast(struct ieee80211com *ic)
+{
+}
+
+static void
+iwx_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
+{
+}
+
+static void
+iwx_scan_mindwell(struct ieee80211_scan_state *ss)
+{
+}
+
+static void
+iwx_scan_end(struct ieee80211com *ic)
+{
+ iwx_endscan(ic->ic_softc);
+}
+
+static void
+iwx_set_channel(struct ieee80211com *ic)
+{
+#if 0
+ struct iwx_softc *sc = ic->ic_softc;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+ IWX_DPRINTF(sc, IWX_DEBUG_NI , "%s:%d NOT IMPLEMENTED\n", __func__, __LINE__);
+ iwx_phy_ctxt_task((void *)sc);
+#endif
+}
+
+static void
+iwx_endscan_cb(void *arg, int pending)
+{
+ struct iwx_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ DPRINTF(("scan ended\n"));
+ ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
+}
+
+static int
+iwx_wme_update(struct ieee80211com *ic)
+{
+ return 0;
+}
+
+static int
+iwx_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
+ const struct ieee80211_bpf_params *params)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct iwx_softc *sc = ic->ic_softc;
+ int err;
+
+ IWX_LOCK(sc);
+ if (sc->sc_flags & IWX_FLAG_STA_ACTIVE) {
+ err = iwx_tx(sc, m, ni);
+ IWX_UNLOCK(sc);
+ return err;
+ } else {
+ IWX_UNLOCK(sc);
+ return EIO;
+ }
+}
+
+static int
+iwx_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct iwx_softc *sc = ic->ic_softc;
+ int error;
+
+ // TODO: mbufq_enqueue in iwm
+ // TODO dequeue in iwm_start, counters, locking
+ IWX_LOCK(sc);
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ IWX_UNLOCK(sc);
+ return (error);
+ }
+
+ iwx_start(sc);
+ IWX_UNLOCK(sc);
+ return (0);
+}
+
+static int
+iwx_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap,
+ int baparamset, int batimeout, int baseqctl)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct iwx_softc *sc = ic->ic_softc;
+ int tid;
+
+ tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
+ sc->ni_rx_ba[tid].ba_winstart =
+ _IEEE80211_MASKSHIFT(le16toh(baseqctl), IEEE80211_BASEQ_START);
+ sc->ni_rx_ba[tid].ba_winsize =
+ _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_BUFSIZ);
+ sc->ni_rx_ba[tid].ba_timeout_val = batimeout;
+
+ if (sc->sc_rx_ba_sessions >= IWX_MAX_RX_BA_SESSIONS ||
+ tid >= IWX_MAX_TID_COUNT)
+ return ENOSPC;
+
+ if (sc->ba_rx.start_tidmask & (1 << tid)) {
+ DPRINTF(("%s: tid %d already added\n", __func__, tid));
+ return EBUSY;
+ }
+ DPRINTF(("%s: sc->ba_rx.start_tidmask=%x\n", __func__, sc->ba_rx.start_tidmask));
+
+ sc->ba_rx.start_tidmask |= (1 << tid);
+ DPRINTF(("%s: tid=%i\n", __func__, tid));
+ DPRINTF(("%s: ba_winstart=%i\n", __func__, sc->ni_rx_ba[tid].ba_winstart));
+ DPRINTF(("%s: ba_winsize=%i\n", __func__, sc->ni_rx_ba[tid].ba_winsize));
+ DPRINTF(("%s: ba_timeout_val=%i\n", __func__, sc->ni_rx_ba[tid].ba_timeout_val));
+
+ taskqueue_enqueue(sc->sc_tq, &sc->ba_rx_task);
+
+ // TODO:misha move to ba_task (serialize)
+ sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl);
+
+ return (0);
+}
+
+static void
+iwx_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap)
+{
+ return;
+}
+
+static int
+iwx_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
+ int dialogtoken, int baparamset, int batimeout)
+{
+ struct iwx_softc *sc = ni->ni_ic->ic_softc;
+ int tid;
+
+ tid = _IEEE80211_MASKSHIFT(le16toh(baparamset), IEEE80211_BAPS_TID);
+ DPRINTF(("%s: tid=%i\n", __func__, tid));
+ sc->ba_tx.start_tidmask |= (1 << tid);
+ taskqueue_enqueue(sc->sc_tq, &sc->ba_tx_task);
+ return 0;
+}
+
+
+static int
+iwx_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap,
+ int code, int baparamset, int batimeout)
+{
+ return 0;
+}
+
+static void
+iwx_key_update_begin(struct ieee80211vap *vap)
+{
+ return;
+}
+
+static void
+iwx_key_update_end(struct ieee80211vap *vap)
+{
+ return;
+}
+
+static int
+iwx_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
+ ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix)
+{
+
+ if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_AES_CCM) {
+ return 1;
+ }
+ if (!(&vap->iv_nw_keys[0] <= k &&
+ k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
+ /*
+ * Not in the global key table, the driver should handle this
+ * by allocating a slot in the h/w key table/cache. In
+ * lieu of that return key slot 0 for any unicast key
+ * request. We disallow the request if this is a group key.
+ * This default policy does the right thing for legacy hardware
+ * with a 4 key table. It also handles devices that pass
+ * packets through untouched when marked with the WEP bit
+ * and key index 0.
+ */
+ if (k->wk_flags & IEEE80211_KEY_GROUP)
+ return 0;
+ *keyix = 0; /* NB: use key index 0 for ucast key */
+ } else {
+ *keyix = ieee80211_crypto_get_key_wepidx(vap, k);
+ }
+ *rxkeyix = IEEE80211_KEYIX_NONE; /* XXX maybe *keyix? */
+ return 1;
+}
+
+static int
+iwx_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct iwx_softc *sc = ic->ic_softc;
+ struct iwx_add_sta_key_cmd cmd;
+ uint32_t status;
+ int err;
+ int id;
+
+ if (k->wk_cipher->ic_cipher != IEEE80211_CIPHER_AES_CCM) {
+ return 1;
+ }
+
+ IWX_LOCK(sc);
+ /*
+ * Keys are stored in 'ni' so 'k' is valid if 'ni' is valid.
+ * Currently we only implement station mode where 'ni' is always
+ * ic->ic_bss so there is no need to validate arguments beyond this:
+ */
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ if (k->wk_flags & IEEE80211_KEY_GROUP) {
+ DPRINTF(("%s: adding group key\n", __func__));
+ } else {
+ DPRINTF(("%s: adding key\n", __func__));
+ }
+ if (k >= &vap->iv_nw_keys[0] &&
+ k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])
+ id = (k - vap->iv_nw_keys);
+ else
+ id = (0);
+ DPRINTF(("%s: setting keyid=%i\n", __func__, id));
+ cmd.common.key_flags = htole16(IWX_STA_KEY_FLG_CCM |
+ IWX_STA_KEY_FLG_WEP_KEY_MAP |
+ ((id << IWX_STA_KEY_FLG_KEYID_POS) &
+ IWX_STA_KEY_FLG_KEYID_MSK));
+ if (k->wk_flags & IEEE80211_KEY_GROUP) {
+ cmd.common.key_offset = 1;
+ cmd.common.key_flags |= htole16(IWX_STA_KEY_MULTICAST);
+ } else {
+ cmd.common.key_offset = 0;
+ }
+ memcpy(cmd.common.key, k->wk_key, MIN(sizeof(cmd.common.key),
+ k->wk_keylen));
+ DPRINTF(("%s: wk_keylen=%i\n", __func__, k->wk_keylen));
+ for (int i=0; i<k->wk_keylen; i++) {
+ DPRINTF(("%s: key[%d]=%x\n", __func__, i, k->wk_key[i]));
+ }
+ cmd.common.sta_id = IWX_STATION_ID;
+
+ cmd.transmit_seq_cnt = htole64(k->wk_keytsc);
+ DPRINTF(("%s: k->wk_keytsc=%lu\n", __func__, k->wk_keytsc));
+
+ status = IWX_ADD_STA_SUCCESS;
+ err = iwx_send_cmd_pdu_status(sc, IWX_ADD_STA_KEY, sizeof(cmd), &cmd,
+ &status);
+ if (!err && (status & IWX_ADD_STA_STATUS_MASK) != IWX_ADD_STA_SUCCESS)
+ err = EIO;
+ if (err) {
+ printf("%s: can't set wpa2 keys (error %d)\n", __func__, err);
+ IWX_UNLOCK(sc);
+ return err;
+ } else
+ DPRINTF(("%s: key added successfully\n", __func__));
+ IWX_UNLOCK(sc);
+ return 1;
+}
+
+static int
+iwx_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
+{
+ return 1;
+}
+
+static device_method_t iwx_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, iwx_probe),
+ DEVMETHOD(device_attach, iwx_attach),
+ DEVMETHOD(device_detach, iwx_detach),
+ DEVMETHOD(device_suspend, iwx_suspend),
+ DEVMETHOD(device_resume, iwx_resume),
+
+ DEVMETHOD_END
+};
+
+static driver_t iwx_pci_driver = {
+ "iwx",
+ iwx_pci_methods,
+ sizeof (struct iwx_softc)
+};
+
+DRIVER_MODULE(iwx, pci, iwx_pci_driver, NULL, NULL);
+MODULE_PNP_INFO("U16:device;D:#;T:vendor=0x8086", pci, iwx_pci_driver,
+ iwx_devices, nitems(iwx_devices));
+MODULE_DEPEND(iwx, firmware, 1, 1, 1);
+MODULE_DEPEND(iwx, pci, 1, 1, 1);
+MODULE_DEPEND(iwx, wlan, 1, 1, 1);
diff --git a/sys/dev/iwx/if_iwx_debug.c b/sys/dev/iwx/if_iwx_debug.c
new file mode 100644
index 000000000000..0c6658094282
--- /dev/null
+++ b/sys/dev/iwx/if_iwx_debug.c
@@ -0,0 +1,370 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org>
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Tom Jones <thj@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <net/ethernet.h>
+
+#include <net80211/ieee80211.h>
+
+#define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
+
+#include <dev/iwx/if_iwxreg.h>
+#include <dev/iwx/if_iwx_debug.h>
+
+static int print_codes[][2] = {
+#if 0
+ for example:
+ IWX_LEGACY_GROUP, IWX_ADD_STA_KEY,
+ IWX_LEGACY_GROUP, IWX_SCD_QUEUE_CONFIG_CMD,
+ IWX_LEGACY_GROUP, IWX_ADD_STA,
+ IWX_LEGACY_GROUP, IWX_REMOVE_STA,
+#endif
+};
+
+struct opcode_label {
+ uint8_t opcode;
+ const char *label;
+};
+
+static struct opcode_label command_group[] = {
+ { 0x0, "IWX_LEGACY_GROUP"},
+ { 0x1, "IWX_LONG_GROUP"},
+ { 0x2, "IWX_SYSTEM_GROUP"},
+ { 0x3, "IWX_MAC_CONF_GROUP"},
+ { 0x4, "IWX_PHY_OPS_GROUP"},
+ { 0x5, "IWX_DATA_PATH_GROUP"},
+ { 0xb, "IWX_PROT_OFFLOAD_GROUP"},
+ { 0xc, "IWX_REGULATORY_AND_NVM_GROUP"},
+ { 0, NULL }
+};
+
+static struct opcode_label legacy_opcodes[] = {
+ { 0xc0, "IWX_REPLY_RX_PHY_CMD" },
+ { 0xc1, "IWX_REPLY_RX_MPDU_CMD" },
+ { 0xc2, "IWX_BAR_FRAME_RELEASE" },
+ { 0xc3, "IWX_FRAME_RELEASE" },
+ { 0xc5, "IWX_BA_NOTIF" },
+ { 0x62, "IWX_TEMPERATURE_NOTIFICATION" },
+ { 0xc8, "IWX_MCC_UPDATE_CMD" },
+ { 0xc9, "IWX_MCC_CHUB_UPDATE_CMD" },
+ { 0x65, "IWX_CALIBRATION_CFG_CMD" },
+ { 0x66, "IWX_CALIBRATION_RES_NOTIFICATION" },
+ { 0x67, "IWX_CALIBRATION_COMPLETE_NOTIFICATION" },
+ { 0x68, "IWX_RADIO_VERSION_NOTIFICATION" },
+ { 0x00, "IWX_CMD_DTS_MEASUREMENT_TRIGGER_WIDE" },
+ { 0x01, "IWX_SOC_CONFIGURATION_CMD" },
+ { 0x02, "IWX_REPLY_ERROR" },
+ { 0x03, "IWX_CTDP_CONFIG_CMD" },
+ { 0x04, "IWX_INIT_COMPLETE_NOTIF" },
+ { 0x05, "IWX_SESSION_PROTECTION_CMD" },
+ { 0x5d, "IWX_BT_COEX_CI" },
+ { 0x07, "IWX_FW_ERROR_RECOVERY_CMD" },
+ { 0x08, "IWX_RLC_CONFIG_CMD" },
+ { 0xd0, "IWX_MCAST_FILTER_CMD" },
+ { 0xd1, "IWX_REPLY_SF_CFG_CMD" },
+ { 0xd2, "IWX_REPLY_BEACON_FILTERING_CMD" },
+ { 0xd3, "IWX_D3_CONFIG_CMD" },
+ { 0xd4, "IWX_PROT_OFFLOAD_CONFIG_CMD" },
+ { 0xd5, "IWX_OFFLOADS_QUERY_CMD" },
+ { 0xd6, "IWX_REMOTE_WAKE_CONFIG_CMD" },
+ { 0x77, "IWX_POWER_TABLE_CMD" },
+ { 0x78, "IWX_PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION" },
+ { 0xcc, "IWX_BT_COEX_PRIO_TABLE" },
+ { 0xcd, "IWX_BT_COEX_PROT_ENV" },
+ { 0xce, "IWX_BT_PROFILE_NOTIFICATION" },
+ { 0x6a, "IWX_PHY_CONFIGURATION_CMD" },
+ { 0x16, "IWX_RX_BAID_ALLOCATION_CONFIG_CMD" },
+ { 0x17, "IWX_ADD_STA_KEY" },
+ { 0x18, "IWX_ADD_STA" },
+ { 0x19, "IWX_REMOVE_STA" },
+ { 0xe0, "IWX_WOWLAN_PATTERNS" },
+ { 0xe1, "IWX_WOWLAN_CONFIGURATION" },
+ { 0xe2, "IWX_WOWLAN_TSC_RSC_PARAM" },
+ { 0xe3, "IWX_WOWLAN_TKIP_PARAM" },
+ { 0xe4, "IWX_WOWLAN_KEK_KCK_MATERIAL" },
+ { 0xe5, "IWX_WOWLAN_GET_STATUSES" },
+ { 0xe6, "IWX_WOWLAN_TX_POWER_PER_DB" },
+ { 0x0f, "IWX_SCAN_COMPLETE_UMAC" },
+ { 0x88, "IWX_NVM_ACCESS_CMD" },
+ { 0x20, "IWX_WEP_KEY" },
+ { 0xdc, "IWX_CMD_DTS_MEASUREMENT_TRIGGER" },
+ { 0xdd, "IWX_DTS_MEASUREMENT_NOTIFICATION" },
+ { 0x28, "IWX_MAC_CONTEXT_CMD" },
+ { 0x29, "IWX_TIME_EVENT_CMD" },
+ { 0x01, "IWX_ALIVE" },
+ { 0xf0, "IWX_REPLY_DEBUG_CMD" },
+ { 0x90, "IWX_BEACON_NOTIFICATION" },
+ { 0xf5, "IWX_RX_NO_DATA_NOTIF" },
+ { 0x08, "IWX_PHY_CONTEXT_CMD" },
+ { 0x91, "IWX_BEACON_TEMPLATE_CMD" },
+ { 0xf6, "IWX_THERMAL_DUAL_CHAIN_REQUEST" },
+ { 0x09, "IWX_DBG_CFG" },
+ { 0xf7, "IWX_DEBUG_LOG_MSG" },
+ { 0x1c, "IWX_TX_CMD" },
+ { 0x1d, "IWX_SCD_QUEUE_CFG" },
+ { 0x1e, "IWX_TXPATH_FLUSH" },
+ { 0x1f, "IWX_MGMT_MCAST_KEY" },
+ { 0x98, "IWX_TX_ANT_CONFIGURATION_CMD" },
+ { 0xee, "IWX_LTR_CONFIG" },
+ { 0x8e, "IWX_SET_CALIB_DEFAULT_CMD" },
+ { 0xFE, "IWX_CT_KILL_NOTIFICATION" },
+ { 0xFF, "IWX_DTS_MEASUREMENT_NOTIF_WIDE" },
+ { 0x2a, "IWX_TIME_EVENT_NOTIFICATION" },
+ { 0x2b, "IWX_BINDING_CONTEXT_CMD" },
+ { 0x2c, "IWX_TIME_QUOTA_CMD" },
+ { 0x2d, "IWX_NON_QOS_TX_COUNTER_CMD" },
+ { 0xa0, "IWX_CARD_STATE_CMD" },
+ { 0xa1, "IWX_CARD_STATE_NOTIFICATION" },
+ { 0xa2, "IWX_MISSED_BEACONS_NOTIFICATION" },
+ { 0x0c, "IWX_SCAN_CFG_CMD" },
+ { 0x0d, "IWX_SCAN_REQ_UMAC" },
+ { 0xfb, "IWX_SESSION_PROTECTION_NOTIF" },
+ { 0x0e, "IWX_SCAN_ABORT_UMAC" },
+ { 0xfe, "IWX_PNVM_INIT_COMPLETE" },
+ { 0xa9, "IWX_MAC_PM_POWER_TABLE" },
+ { 0xff, "IWX_FSEQ_VER_MISMATCH_NOTIFICATION | IWX_REPLY_MAX" },
+ { 0x9b, "IWX_BT_CONFIG" },
+ { 0x9c, "IWX_STATISTICS_CMD" },
+ { 0x9d, "IWX_STATISTICS_NOTIFICATION" },
+ { 0x9f, "IWX_REDUCE_TX_POWER_CMD" },
+ { 0xb1, "IWX_MFUART_LOAD_NOTIFICATION" },
+ { 0xb5, "IWX_SCAN_ITERATION_COMPLETE_UMAC" },
+ { 0x54, "IWX_NET_DETECT_CONFIG_CMD" },
+ { 0x56, "IWX_NET_DETECT_PROFILES_QUERY_CMD" },
+ { 0x57, "IWX_NET_DETECT_PROFILES_CMD" },
+ { 0x58, "IWX_NET_DETECT_HOTSPOTS_CMD" },
+ { 0x59, "IWX_NET_DETECT_HOTSPOTS_QUERY_CMD" },
+ { 0, NULL }
+};
+
+/* SYSTEM_GROUP group subcommand IDs */
+static struct opcode_label system_opcodes[] = {
+ { 0x00, "IWX_SHARED_MEM_CFG_CMD" },
+ { 0x01, "IWX_SOC_CONFIGURATION_CMD" },
+ { 0x03, "IWX_INIT_EXTENDED_CFG_CMD" },
+ { 0x07, "IWX_FW_ERROR_RECOVERY_CMD" },
+ { 0xff, "IWX_FSEQ_VER_MISMATCH_NOTIFICATION | IWX_REPLY_MAX" },
+ { 0, NULL }
+};
+
+/* MAC_CONF group subcommand IDs */
+static struct opcode_label macconf_opcodes[] = {
+ { 0x05, "IWX_SESSION_PROTECTION_CMD" },
+ { 0xfb, "IWX_SESSION_PROTECTION_NOTIF" },
+ { 0, NULL }
+};
+
+/* DATA_PATH group subcommand IDs */
+static struct opcode_label data_opcodes[] = {
+ { 0x00, "IWX_DQA_ENABLE_CMD" },
+ { 0x08, "IWX_RLC_CONFIG_CMD" },
+ { 0x0f, "IWX_TLC_MNG_CONFIG_CMD" },
+ { 0x16, "IWX_RX_BAID_ALLOCATION_CONFIG_CMD" },
+ { 0x17, "IWX_SCD_QUEUE_CONFIG_CMD" },
+ { 0xf5, "IWX_RX_NO_DATA_NOTIF" },
+ { 0xf6, "IWX_THERMAL_DUAL_CHAIN_REQUEST" },
+ { 0xf7, "IWX_TLC_MNG_UPDATE_NOTIF" },
+ { 0, NULL }
+};
+
+/* REGULATORY_AND_NVM group subcommand IDs */
+static struct opcode_label reg_opcodes[] = {
+ { 0x00, "IWX_NVM_ACCESS_COMPLETE" },
+ { 0x02, "IWX_NVM_GET_INFO " },
+ { 0xfe, "IWX_PNVM_INIT_COMPLETE" },
+ { 0, NULL }
+};
+
+/* PHY_OPS subcommand IDs */
+static struct opcode_label phyops_opcodes[] = {
+ {0x00, "IWX_CMD_DTS_MEASUREMENT_TRIGGER_WIDE"},
+ {0x03, "IWX_CTDP_CONFIG_CMD"},
+ {0x04, "IWX_TEMP_REPORTING_THRESHOLDS_CMD"},
+ {0xFE, "IWX_CT_KILL_NOTIFICATION"},
+ {0xFF, "IWX_DTS_MEASUREMENT_NOTIF_WIDE"},
+};
+
+static const char *
+get_label(struct opcode_label *table, uint8_t opcode)
+{
+ struct opcode_label *op = table;
+ while(op->label != NULL) {
+ if (op->opcode == opcode)
+ return op->label;
+ op++;
+ }
+ return "NOT FOUND IN TABLE";
+}
+
+static struct opcode_label *
+get_table(uint8_t group)
+{
+ switch (group)
+ {
+ case IWX_LEGACY_GROUP:
+ case IWX_LONG_GROUP:
+ return legacy_opcodes;
+ break;
+ case IWX_SYSTEM_GROUP:
+ return system_opcodes;
+ break;
+ case IWX_MAC_CONF_GROUP:
+ return macconf_opcodes;
+ break;
+ case IWX_DATA_PATH_GROUP:
+ return data_opcodes;
+ break;
+ case IWX_REGULATORY_AND_NVM_GROUP:
+ return reg_opcodes;
+ break;
+ case IWX_PHY_OPS_GROUP:
+ return phyops_opcodes;
+ break;
+ case IWX_PROT_OFFLOAD_GROUP:
+ break;
+ }
+ return NULL;
+}
+
+void
+print_opcode(const char *func, int line, uint32_t code)
+{
+ int print = 0;
+ uint8_t opcode = iwx_cmd_opcode(code);
+ uint8_t group = iwx_cmd_groupid(code);
+
+ struct opcode_label *table = get_table(group);
+ if (table == NULL) {
+ printf("Couldn't find opcode table for 0x%08x", code);
+ return;
+ }
+
+ for (int i = 0; i < nitems(print_codes); i++)
+ if (print_codes[i][0] == group && print_codes[i][1] == opcode)
+ print = 1;
+
+ if (print) {
+ printf("%s:%d \t%s\t%s\t(0x%08x)\n", func, line,
+ get_label(command_group, group),
+ get_label(table, opcode), code);
+ }
+}
+
+void
+print_ratenflags(const char *func, int line, uint32_t flags, int ver)
+{
+ printf("%s:%d\n\t flags 0x%08x ", func, line, flags);
+
+ if (ver >= 2) {
+ printf(" rate_n_flags version 2\n");
+
+ uint32_t type = (flags & IWX_RATE_MCS_MOD_TYPE_MSK) >> IWX_RATE_MCS_MOD_TYPE_POS;
+
+ switch(type)
+ {
+ case 0:
+ printf("\t(0) Legacy CCK: ");
+ switch (flags & IWX_RATE_LEGACY_RATE_MSK)
+ {
+ case 0:
+ printf("(0) 0xa - 1 Mbps\n");
+ break;
+ case 1:
+ printf("(1) 0x14 - 2 Mbps\n");
+ break;
+ case 2:
+ printf("(2) 0x37 - 5.5 Mbps\n");
+ break;
+ case 3:
+ printf("(3) 0x6e - 11 nbps\n");
+ break;
+ }
+ break;
+ case 1:
+ printf("\t(1) Legacy OFDM \n");
+ switch (flags & IWX_RATE_LEGACY_RATE_MSK)
+ {
+ case 0:
+ printf("(0) 6 Mbps\n");
+ break;
+ case 1:
+ printf("(1) 9 Mbps\n");
+ break;
+ case 2:
+ printf("(2) 12 Mbps\n");
+ break;
+ case 3:
+ printf("(3) 18 Mbps\n");
+ break;
+ case 4:
+ printf("(4) 24 Mbps\n");
+ break;
+ case 5:
+ printf("(5) 36 Mbps\n");
+ break;
+ case 6:
+ printf("(6) 48 Mbps\n");
+ break;
+ case 7:
+ printf("(7) 54 Mbps\n");
+ break;
+ }
+ break;
+ case 2:
+ printf("\t(2) High-throughput (HT)\n");
+ break;
+ case 3:
+ printf("\t(3) Very High-throughput (VHT) \n");
+ break;
+ case 4:
+ printf("\t(4) High-efficiency (HE)\n");
+ break;
+ case 5:
+ printf("\t(5) Extremely High-throughput (EHT)\n");
+ break;
+ default:
+ printf("invalid\n");
+ }
+
+ /* Not a legacy rate. */
+ if (type > 1) {
+ printf("\tMCS %d ", IWX_RATE_HT_MCS_INDEX(flags));
+ switch((flags & IWX_RATE_MCS_CHAN_WIDTH_MSK) >> IWX_RATE_MCS_CHAN_WIDTH_POS)
+ {
+ case 0:
+ printf("20MHz ");
+ break;
+ case 1:
+ printf("40MHz ");
+ break;
+ case 2:
+ printf("80MHz ");
+ break;
+ case 3:
+ printf("160MHz ");
+ break;
+ case 4:
+ printf("320MHz ");
+ break;
+
+ }
+ printf("antennas: (%s|%s) ",
+ flags & (1 << 14) ? "A" : " ",
+ flags & (1 << 15) ? "B" : " ");
+ if (flags & (1 << 16))
+ printf("ldpc ");
+ printf("\n");
+ }
+ } else {
+ printf("%s:%d rate_n_flags versions other than < 2 not implemented",
+ __func__, __LINE__);
+ }
+}
diff --git a/sys/dev/iwx/if_iwx_debug.h b/sys/dev/iwx/if_iwx_debug.h
new file mode 100644
index 000000000000..0079a7e7e753
--- /dev/null
+++ b/sys/dev/iwx/if_iwx_debug.h
@@ -0,0 +1,59 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2015 Adrian Chadd <adrian@FreeBSD.org>
+ * Copyright (c) 2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Tom Jones <thj@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ */
+
+#ifndef __IF_IWX_DEBUG_H__
+#define __IF_IWX_DEBUG_H__
+
+#ifdef IWX_DEBUG
+enum {
+ IWX_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
+ IWX_DEBUG_RECV = 0x00000002, /* basic recv operation */
+ IWX_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */
+ IWX_DEBUG_TXPOW = 0x00000008, /* tx power processing */
+ IWX_DEBUG_RESET = 0x00000010, /* reset processing */
+ IWX_DEBUG_OPS = 0x00000020, /* iwx_ops processing */
+ IWX_DEBUG_BEACON = 0x00000040, /* beacon handling */
+ IWX_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */
+ IWX_DEBUG_INTR = 0x00000100, /* ISR */
+ IWX_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */
+ IWX_DEBUG_NODE = 0x00000400, /* node management */
+ IWX_DEBUG_LED = 0x00000800, /* led management */
+ IWX_DEBUG_CMD = 0x00001000, /* cmd submission */
+ IWX_DEBUG_TXRATE = 0x00002000, /* TX rate debugging */
+ IWX_DEBUG_PWRSAVE = 0x00004000, /* Power save operations */
+ IWX_DEBUG_SCAN = 0x00008000, /* Scan related operations */
+ IWX_DEBUG_STATS = 0x00010000, /* Statistics updates */
+ IWX_DEBUG_FIRMWARE_TLV = 0x00020000, /* Firmware TLV parsing */
+ IWX_DEBUG_TRANS = 0x00040000, /* Transport layer (eg PCIe) */
+ IWX_DEBUG_EEPROM = 0x00080000, /* EEPROM/channel information */
+ IWX_DEBUG_TEMP = 0x00100000, /* Thermal Sensor handling */
+ IWX_DEBUG_FW = 0x00200000, /* Firmware management */
+ IWX_DEBUG_LAR = 0x00400000, /* Location Aware Regulatory */
+ IWX_DEBUG_TE = 0x00800000, /* Time Event handling */
+ /* 0x0n000000 are available */
+ IWX_DEBUG_NI = 0x10000000, /* Not Implemented */
+ IWX_DEBUG_REGISTER = 0x20000000, /* print chipset register */
+ IWX_DEBUG_TRACE = 0x40000000, /* Print begin and start driver function */
+ IWX_DEBUG_FATAL = 0x80000000, /* fatal errors */
+ IWX_DEBUG_ANY = 0xffffffff
+};
+
+#define IWX_DPRINTF(sc, m, fmt, ...) do { \
+ if (sc->sc_debug & (m)) \
+ device_printf(sc->sc_dev, fmt, ##__VA_ARGS__); \
+} while (0)
+#else
+#define IWX_DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0)
+#endif
+
+void print_opcode(const char *, int, uint32_t);
+void print_ratenflags(const char *, int , uint32_t , int );
+
+#endif /* __IF_IWX_DEBUG_H__ */
diff --git a/sys/dev/iwx/if_iwxreg.h b/sys/dev/iwx/if_iwxreg.h
new file mode 100644
index 000000000000..f3d1f078b48e
--- /dev/null
+++ b/sys/dev/iwx/if_iwxreg.h
@@ -0,0 +1,7926 @@
+/*-
+ * SPDX-License-Identifier: GPL-2.0-only OR BSD-3-Clause
+ */
+
+/* $OpenBSD: if_iwxreg.h,v 1.51 2023/03/06 11:18:37 stsp Exp $ */
+
+/*-
+ * Based on BSD-licensed source modules in the Linux iwlwifi driver,
+ * which were used as the reference documentation for this implementation.
+ *
+ ******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************
+ */
+
+
+/* maximum number of DRAM map entries supported by FW */
+#define IWX_MAX_DRAM_ENTRY 64
+#define IWX_CSR_CTXT_INFO_BA 0x40
+
+/**
+ * enum iwx_context_info_flags - Context information control flags
+ * @IWX_CTXT_INFO_AUTO_FUNC_INIT: If set, FW will not wait before interrupting
+ * the init done for driver command that configures several system modes
+ * @IWX_CTXT_INFO_EARLY_DEBUG: enable early debug
+ * @IWX_CTXT_INFO_ENABLE_CDMP: enable core dump
+ * @IWX_CTXT_INFO_RB_CB_SIZE_POS: position of the RBD Cyclic Buffer Size
+ * exponent, the actual size is 2**value, valid sizes are 8-2048.
+ * The value is four bits long. Maximum valid exponent is 12
+ * @IWX_CTXT_INFO_TFD_FORMAT_LONG: use long TFD Format (the
+ * default is short format - not supported by the driver)
+ * @IWX_CTXT_INFO_RB_SIZE_POS: RB size position
+ * (values are IWX_CTXT_INFO_RB_SIZE_*K)
+ * @IWX_CTXT_INFO_RB_SIZE_1K: Value for 1K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_2K: Value for 2K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_4K: Value for 4K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_8K: Value for 8K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_12K: Value for 12K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_16K: Value for 16K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_20K: Value for 20K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_24K: Value for 24K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_28K: Value for 28K RB size
+ * @IWX_CTXT_INFO_RB_SIZE_32K: Value for 32K RB size
+ */
+enum iwx_context_info_flags {
+ IWX_CTXT_INFO_AUTO_FUNC_INIT = (1 << 0),
+ IWX_CTXT_INFO_EARLY_DEBUG = (1 << 1),
+ IWX_CTXT_INFO_ENABLE_CDMP = (1 << 2),
+ IWX_CTXT_INFO_RB_CB_SIZE_POS = 4,
+ IWX_CTXT_INFO_TFD_FORMAT_LONG = (1 << 8),
+ IWX_CTXT_INFO_RB_SIZE_POS = 9,
+ IWX_CTXT_INFO_RB_SIZE_1K = 0x1,
+ IWX_CTXT_INFO_RB_SIZE_2K = 0x2,
+ IWX_CTXT_INFO_RB_SIZE_4K = 0x4,
+ IWX_CTXT_INFO_RB_SIZE_8K = 0x8,
+ IWX_CTXT_INFO_RB_SIZE_12K = 0x9,
+ IWX_CTXT_INFO_RB_SIZE_16K = 0xa,
+ IWX_CTXT_INFO_RB_SIZE_20K = 0xb,
+ IWX_CTXT_INFO_RB_SIZE_24K = 0xc,
+ IWX_CTXT_INFO_RB_SIZE_28K = 0xd,
+ IWX_CTXT_INFO_RB_SIZE_32K = 0xe,
+};
+
+/*
+ * struct iwx_context_info_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: context information version id
+ * @size: the size of the context information in DWs
+ */
+struct iwx_context_info_version {
+ uint16_t mac_id;
+ uint16_t version;
+ uint16_t size;
+ uint16_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info_control - version structure
+ * @control_flags: context information flags see &enum iwx_context_info_flags
+ */
+struct iwx_context_info_control {
+ uint32_t control_flags;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info_dram - images DRAM map
+ * each entry in the map represents a DRAM chunk of up to 32 KB
+ * @umac_img: UMAC image DRAM map
+ * @lmac_img: LMAC image DRAM map
+ * @virtual_img: paged image DRAM map
+ */
+struct iwx_context_info_dram {
+ uint64_t umac_img[IWX_MAX_DRAM_ENTRY];
+ uint64_t lmac_img[IWX_MAX_DRAM_ENTRY];
+ uint64_t virtual_img[IWX_MAX_DRAM_ENTRY];
+} __packed;
+
+/*
+ * struct iwx_context_info_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @used_rbd_addr: default queue used RB CB base address
+ * @status_wr_ptr: default queue used RB status write pointer
+ */
+struct iwx_context_info_rbd_cfg {
+ uint64_t free_rbd_addr;
+ uint64_t used_rbd_addr;
+ uint64_t status_wr_ptr;
+} __packed;
+
+/*
+ * struct iwx_context_info_hcmd_cfg - command queue configuration
+ * @cmd_queue_addr: address of command queue
+ * @cmd_queue_size: number of entries
+ */
+struct iwx_context_info_hcmd_cfg {
+ uint64_t cmd_queue_addr;
+ uint8_t cmd_queue_size;
+ uint8_t reserved[7];
+} __packed;
+
+/*
+ * struct iwx_context_info_dump_cfg - Core Dump configuration
+ * @core_dump_addr: core dump (debug DRAM address) start address
+ * @core_dump_size: size, in DWs
+ */
+struct iwx_context_info_dump_cfg {
+ uint64_t core_dump_addr;
+ uint32_t core_dump_size;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info_pnvm_cfg - platform NVM data configuration
+ * @platform_nvm_addr: Platform NVM data start address
+ * @platform_nvm_size: size in DWs
+ */
+struct iwx_context_info_pnvm_cfg {
+ uint64_t platform_nvm_addr;
+ uint32_t platform_nvm_size;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info_early_dbg_cfg - early debug configuration for
+ * dumping DRAM addresses
+ * @early_debug_addr: early debug start address
+ * @early_debug_size: size in DWs
+ */
+struct iwx_context_info_early_dbg_cfg {
+ uint64_t early_debug_addr;
+ uint32_t early_debug_size;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * struct iwx_context_info - device INIT configuration
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @rbd_cfg: default RX queue configuration
+ * @hcmd_cfg: command queue configuration
+ * @dump_cfg: core dump data
+ * @edbg_cfg: early debug configuration
+ * @pnvm_cfg: platform nvm configuration
+ * @dram: firmware image addresses in DRAM
+ */
+struct iwx_context_info {
+ struct iwx_context_info_version version;
+ struct iwx_context_info_control control;
+ uint64_t reserved0;
+ struct iwx_context_info_rbd_cfg rbd_cfg;
+ struct iwx_context_info_hcmd_cfg hcmd_cfg;
+ uint32_t reserved1[4];
+ struct iwx_context_info_dump_cfg dump_cfg;
+ struct iwx_context_info_early_dbg_cfg edbg_cfg;
+ struct iwx_context_info_pnvm_cfg pnvm_cfg;
+ uint32_t reserved2[16];
+ struct iwx_context_info_dram dram;
+ uint32_t reserved3[16];
+} __packed;
+
+
+/*
+ * Context info definitions for AX210 devices.
+ */
+
+#define IWX_CSR_CTXT_INFO_BOOT_CTRL 0x0
+#define IWX_CSR_CTXT_INFO_ADDR 0x118
+#define IWX_CSR_IML_DATA_ADDR 0x120
+#define IWX_CSR_IML_SIZE_ADDR 0x128
+#define IWX_CSR_IML_RESP_ADDR 0x12c
+
+/* Set bit for enabling automatic function boot */
+#define IWX_CSR_AUTO_FUNC_BOOT_ENA (1 << 1)
+/* Set bit for initiating function boot */
+#define IWX_CSR_AUTO_FUNC_INIT (1 << 7)
+
+/**
+ * iwx_prph_scratch_mtr_format - tfd size configuration
+ * @IWX_PRPH_MTR_FORMAT_16B: 16 bit tfd
+ * @IWX_PRPH_MTR_FORMAT_32B: 32 bit tfd
+ * @IWX_PRPH_MTR_FORMAT_64B: 64 bit tfd
+ * @IWX_PRPH_MTR_FORMAT_256B: 256 bit tfd
+ */
+#define IWX_PRPH_MTR_FORMAT_16B 0x0
+#define IWX_PRPH_MTR_FORMAT_32B 0x40000
+#define IWX_PRPH_MTR_FORMAT_64B 0x80000
+#define IWX_PRPH_MTR_FORMAT_256B 0xC0000
+
+/**
+ * iwx_prph_scratch_flags - PRPH scratch control flags
+ * @IWX_PRPH_SCRATCH_IMR_DEBUG_EN: IMR support for debug
+ * @IWX_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
+ * @IWX_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
+ * in hwm config.
+ * @IWX_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM
+ * @IWX_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for
+ * multicomm.
+ * @IWX_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW
+ * @IWX_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K)
+ * @IWX_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for
+ * completion descriptor, 1 for responses (legacy)
+ * @IWX_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
+ * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
+ * 3: 256 bit.
+ * @IWX_PRPH_SCRATCH_RB_SIZE_EXT_MASK: RB size full information, ignored
+ * by older firmware versions, so set IWX_PRPH_SCRATCH_RB_SIZE_4K
+ * appropriately; use the below values for this.
+ * @IWX_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size
+ * @IWX_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size
+ * @IWX_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size
+ */
+#define IWX_PRPH_SCRATCH_IMR_DEBUG_EN (1 << 1)
+#define IWX_PRPH_SCRATCH_EARLY_DEBUG_EN (1 << 4)
+#define IWX_PRPH_SCRATCH_EDBG_DEST_DRAM (1 << 8)
+#define IWX_PRPH_SCRATCH_EDBG_DEST_INTERNAL (1 << 9)
+#define IWX_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER (1 << 10)
+#define IWX_PRPH_SCRATCH_EDBG_DEST_TB22DTF (1 << 11)
+#define IWX_PRPH_SCRATCH_RB_SIZE_4K (1 << 16)
+#define IWX_PRPH_SCRATCH_MTR_MODE (1 << 17)
+#define IWX_PRPH_SCRATCH_MTR_FORMAT ((1 << 18) | (1 << 19))
+#define IWX_PRPH_SCRATCH_RB_SIZE_EXT_MASK (0xf << 20)
+#define IWX_PRPH_SCRATCH_RB_SIZE_EXT_8K (8 << 20)
+#define IWX_PRPH_SCRATCH_RB_SIZE_EXT_12K (9 << 20)
+#define IWX_PRPH_SCRATCH_RB_SIZE_EXT_16K (10 << 20)
+
+/*
+ * struct iwx_prph_scratch_version - version structure
+ * @mac_id: SKU and revision id
+ * @version: prph scratch information version id
+ * @size: the size of the context information in DWs
+ * @reserved: reserved
+ */
+struct iwx_prph_scratch_version {
+ uint16_t mac_id;
+ uint16_t version;
+ uint16_t size;
+ uint16_t reserved;
+} __packed; /* PERIPH_SCRATCH_VERSION_S */
+
+/*
+ * struct iwx_prph_scratch_control - control structure
+ * @control_flags: context information flags see &iwx_prph_scratch_flags
+ * @reserved: reserved
+ */
+struct iwx_prph_scratch_control {
+ uint32_t control_flags;
+ uint32_t reserved;
+} __packed; /* PERIPH_SCRATCH_CONTROL_S */
+
+/*
+ * struct iwx_prph_scratch_pnvm_cfg - ror config
+ * @pnvm_base_addr: PNVM start address
+ * @pnvm_size: PNVM size in DWs
+ * @reserved: reserved
+ */
+struct iwx_prph_scratch_pnvm_cfg {
+ uint64_t pnvm_base_addr;
+ uint32_t pnvm_size;
+ uint32_t reserved;
+} __packed; /* PERIPH_SCRATCH_PNVM_CFG_S */
+
+struct iwx_pnvm_section {
+ uint32_t offset;
+ const uint8_t data[];
+} __packed;
+
+/*
+ * struct iwx_prph_scratch_hwm_cfg - hwm config
+ * @hwm_base_addr: hwm start address
+ * @hwm_size: hwm size in DWs
+ * @debug_token_config: debug preset
+ */
+struct iwx_prph_scratch_hwm_cfg {
+ uint64_t hwm_base_addr;
+ uint32_t hwm_size;
+ uint32_t debug_token_config;
+} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
+
+/*
+ * struct iwx_prph_scratch_rbd_cfg - RBDs configuration
+ * @free_rbd_addr: default queue free RB CB base address
+ * @reserved: reserved
+ */
+struct iwx_prph_scratch_rbd_cfg {
+ uint64_t free_rbd_addr;
+ uint32_t reserved;
+} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
+
+/*
+ * struct iwx_prph_scratch_uefi_cfg - prph scratch reduce power table
+ * @base_addr: reduce power table address
+ * @size: table size in dwords
+ */
+struct iwx_prph_scratch_uefi_cfg {
+ uint64_t base_addr;
+ uint32_t size;
+ uint32_t reserved;
+} __packed; /* PERIPH_SCRATCH_UEFI_CFG_S */
+
+/*
+ * struct iwx_prph_scratch_ctrl_cfg - prph scratch ctrl and config
+ * @version: version information of context info and HW
+ * @control: control flags of FH configurations
+ * @pnvm_cfg: ror configuration
+ * @hwm_cfg: hwm configuration
+ * @rbd_cfg: default RX queue configuration
+ */
+struct iwx_prph_scratch_ctrl_cfg {
+ struct iwx_prph_scratch_version version;
+ struct iwx_prph_scratch_control control;
+ struct iwx_prph_scratch_pnvm_cfg pnvm_cfg;
+ struct iwx_prph_scratch_hwm_cfg hwm_cfg;
+ struct iwx_prph_scratch_rbd_cfg rbd_cfg;
+ struct iwx_prph_scratch_uefi_cfg reduce_power_cfg;
+} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
+
+/*
+ * struct iwx_prph_scratch - peripheral scratch mapping
+ * @ctrl_cfg: control and configuration of prph scratch
+ * @dram: firmware images addresses in DRAM
+ * @reserved: reserved
+ */
+struct iwx_prph_scratch {
+ struct iwx_prph_scratch_ctrl_cfg ctrl_cfg;
+ uint32_t reserved[12];
+ struct iwx_context_info_dram dram;
+} __packed; /* PERIPH_SCRATCH_S */
+
+/*
+ * struct iwx_prph_info - peripheral information
+ * @boot_stage_mirror: reflects the value in the Boot Stage CSR register
+ * @ipc_status_mirror: reflects the value in the IPC Status CSR register
+ * @sleep_notif: indicates the peripheral sleep status
+ * @reserved: reserved
+ */
+struct iwx_prph_info {
+ uint32_t boot_stage_mirror;
+ uint32_t ipc_status_mirror;
+ uint32_t sleep_notif;
+ uint32_t reserved;
+} __packed; /* PERIPH_INFO_S */
+
+/*
+ * struct iwx_context_info_gen3 - device INIT configuration
+ * @version: version of the context information
+ * @size: size of context information in DWs
+ * @config: context in which the peripheral would execute - a subset of
+ * capability csr register published by the peripheral
+ * @prph_info_base_addr: the peripheral information structure start address
+ * @cr_head_idx_arr_base_addr: the completion ring head index array
+ * start address
+ * @tr_tail_idx_arr_base_addr: the transfer ring tail index array
+ * start address
+ * @cr_tail_idx_arr_base_addr: the completion ring tail index array
+ * start address
+ * @tr_head_idx_arr_base_addr: the transfer ring head index array
+ * start address
+ * @cr_idx_arr_size: number of entries in the completion ring index array
+ * @tr_idx_arr_size: number of entries in the transfer ring index array
+ * @mtr_base_addr: the message transfer ring start address
+ * @mcr_base_addr: the message completion ring start address
+ * @mtr_size: number of entries which the message transfer ring can hold
+ * @mcr_size: number of entries which the message completion ring can hold
+ * @mtr_doorbell_vec: the doorbell vector associated with the message
+ * transfer ring
+ * @mcr_doorbell_vec: the doorbell vector associated with the message
+ * completion ring
+ * @mtr_msi_vec: the MSI which shall be generated by the peripheral after
+ * completing a transfer descriptor in the message transfer ring
+ * @mcr_msi_vec: the MSI which shall be generated by the peripheral after
+ * completing a completion descriptor in the message completion ring
+ * @mtr_opt_header_size: the size of the optional header in the transfer
+ * descriptor associated with the message transfer ring in DWs
+ * @mtr_opt_footer_size: the size of the optional footer in the transfer
+ * descriptor associated with the message transfer ring in DWs
+ * @mcr_opt_header_size: the size of the optional header in the completion
+ * descriptor associated with the message completion ring in DWs
+ * @mcr_opt_footer_size: the size of the optional footer in the completion
+ * descriptor associated with the message completion ring in DWs
+ * @msg_rings_ctrl_flags: message rings control flags
+ * @prph_info_msi_vec: the MSI which shall be generated by the peripheral
+ * after updating the Peripheral Information structure
+ * @prph_scratch_base_addr: the peripheral scratch structure start address
+ * @prph_scratch_size: the size of the peripheral scratch structure in DWs
+ * @reserved: reserved
+ */
+struct iwx_context_info_gen3 {
+ uint16_t version;
+ uint16_t size;
+ uint32_t config;
+ uint64_t prph_info_base_addr;
+ uint64_t cr_head_idx_arr_base_addr;
+ uint64_t tr_tail_idx_arr_base_addr;
+ uint64_t cr_tail_idx_arr_base_addr;
+ uint64_t tr_head_idx_arr_base_addr;
+ uint16_t cr_idx_arr_size;
+ uint16_t tr_idx_arr_size;
+ uint64_t mtr_base_addr;
+ uint64_t mcr_base_addr;
+ uint16_t mtr_size;
+ uint16_t mcr_size;
+ uint16_t mtr_doorbell_vec;
+ uint16_t mcr_doorbell_vec;
+ uint16_t mtr_msi_vec;
+ uint16_t mcr_msi_vec;
+ uint8_t mtr_opt_header_size;
+ uint8_t mtr_opt_footer_size;
+ uint8_t mcr_opt_header_size;
+ uint8_t mcr_opt_footer_size;
+ uint16_t msg_rings_ctrl_flags;
+ uint16_t prph_info_msi_vec;
+ uint64_t prph_scratch_base_addr;
+ uint32_t prph_scratch_size;
+ uint32_t reserved;
+} __packed; /* IPC_CONTEXT_INFO_S */
+
+#define IWX_MGMT_TID 15
+
+#define IWX_MQ_RX_TABLE_SIZE 512
+
+/* cb size is the exponent */
+#define IWX_RX_QUEUE_CB_SIZE(x) ((sizeof(x) <= 4) ? (fls(x) - 1) : (flsl(x) - 1))
+
+/*
+ * CSR (control and status registers)
+ *
+ * CSR registers are mapped directly into PCI bus space, and are accessible
+ * whenever platform supplies power to device, even when device is in
+ * low power states due to driver-invoked device resets
+ * (e.g. IWX_CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
+ *
+ * Use iwl_write32() and iwl_read32() family to access these registers;
+ * these provide simple PCI bus access, without waking up the MAC.
+ * Do not use iwl_write_direct32() family for these registers;
+ * no need to "grab nic access" via IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
+ * The MAC (uCode processor, etc.) does not need to be powered up for accessing
+ * the CSR registers.
+ *
+ * NOTE: Device does need to be awake in order to read this memory
+ * via IWX_CSR_EEPROM and IWX_CSR_OTP registers
+ */
+#define IWX_CSR_HW_IF_CONFIG_REG (0x000) /* hardware interface config */
+#define IWX_CSR_INT_COALESCING (0x004) /* accum ints, 32-usec units */
+#define IWX_CSR_INT (0x008) /* host interrupt status/ack */
+#define IWX_CSR_INT_MASK (0x00c) /* host interrupt enable */
+#define IWX_CSR_FH_INT_STATUS (0x010) /* busmaster int status/ack*/
+#define IWX_CSR_GPIO_IN (0x018) /* read external chip pins */
+#define IWX_CSR_RESET (0x020) /* busmaster enable, NMI, etc*/
+#define IWX_CSR_GP_CNTRL (0x024)
+
+/* 2nd byte of IWX_CSR_INT_COALESCING, not accessible via iwl_write32()! */
+#define IWX_CSR_INT_PERIODIC_REG (0x005)
+
+/*
+ * Hardware revision info
+ * Bit fields:
+ * 31-16: Reserved
+ * 15-4: Type of device: see IWX_CSR_HW_REV_TYPE_xxx definitions
+ * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
+ * 1-0: "Dash" (-) value, as in A-1, etc.
+ */
+#define IWX_CSR_HW_REV (0x028)
+
+/*
+ * RF ID revision info
+ * Bit fields:
+ * 31:24: Reserved (set to 0x0)
+ * 23:12: Type
+ * 11:8: Step (A - 0x0, B - 0x1, etc)
+ * 7:4: Dash
+ * 3:0: Flavor
+ */
+#define IWX_CSR_HW_RF_ID (0x09c)
+
+
+#define IWX_CSR_GIO_REG (0x03C)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox registers.
+ * SET/CLR registers set/clear bit(s) if "1" is written.
+ */
+#define IWX_CSR_UCODE_DRV_GP1 (0x054)
+#define IWX_CSR_UCODE_DRV_GP1_SET (0x058)
+#define IWX_CSR_UCODE_DRV_GP1_CLR (0x05c)
+#define IWX_CSR_UCODE_DRV_GP2 (0x060)
+
+#define IWX_CSR_MBOX_SET_REG (0x088)
+#define IWX_CSR_MBOX_SET_REG_OS_ALIVE 0x20
+
+#define IWX_CSR_DRAM_INT_TBL_REG (0x0A0)
+#define IWX_CSR_MAC_SHADOW_REG_CTRL (0x0A8) /* 6000 and up */
+
+/* LTR control */
+#define IWX_CSR_LTR_LONG_VAL_AD (0x0d4)
+#define IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ 0x80000000
+#define IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_MASK 0x1c000000
+#define IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE_SHIFT 24
+#define IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_MASK 0x03ff0000
+#define IWX_CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL_SHIFT 16
+#define IWX_CSR_LTR_LONG_VAL_AD_SNOOP_REQ 0x00008000
+#define IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_MASK 0x00001c00
+#define IWX_CSR_LTR_LONG_VAL_AD_SNOOP_SCALE_SHIFT 8
+#define IWX_CSR_LTR_LONG_VAL_AD_SNOOP_VAL 0x000003ff
+#define IWX_CSR_LTR_LONG_VAL_AD_SCALE_USEC 2
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define IWX_CSR_GIO_CHICKEN_BITS (0x100)
+
+#define IWX_CSR_DBG_HPET_MEM_REG (0x240)
+#define IWX_CSR_DBG_LINK_PWR_MGMT_REG (0x250)
+
+/* Bits for IWX_CSR_HW_IF_CONFIG_REG */
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x000000C0)
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE (0x00000C00)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH (0x00003000)
+#define IWX_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP (0x0000C000)
+
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH (0)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP (2)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_BOARD_VER (6)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE (10)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH (12)
+#define IWX_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP (14)
+
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
+#define IWX_CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define IWX_CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
+#define IWX_CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000)
+#define IWX_CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
+
+#define IWX_CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
+#define IWX_CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
+
+/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
+ * acknowledged (reset) by host writing "1" to flagged bits. */
+#define IWX_CSR_INT_BIT_FH_RX (1U << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
+#define IWX_CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
+#define IWX_CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
+#define IWX_CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
+#define IWX_CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
+#define IWX_CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
+#define IWX_CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
+#define IWX_CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
+#define IWX_CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses */
+#define IWX_CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
+#define IWX_CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
+
+#define IWX_CSR_INI_SET_MASK (IWX_CSR_INT_BIT_FH_RX | \
+ IWX_CSR_INT_BIT_HW_ERR | \
+ IWX_CSR_INT_BIT_FH_TX | \
+ IWX_CSR_INT_BIT_SW_ERR | \
+ IWX_CSR_INT_BIT_RF_KILL | \
+ IWX_CSR_INT_BIT_SW_RX | \
+ IWX_CSR_INT_BIT_WAKEUP | \
+ IWX_CSR_INT_BIT_ALIVE | \
+ IWX_CSR_INT_BIT_RX_PERIODIC)
+
+/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
+#define IWX_CSR_FH_INT_BIT_ERR (1U << 31) /* Error */
+#define IWX_CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
+#define IWX_CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
+#define IWX_CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
+#define IWX_CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
+#define IWX_CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
+
+#define IWX_CSR_FH_INT_RX_MASK (IWX_CSR_FH_INT_BIT_HI_PRIOR | \
+ IWX_CSR_FH_INT_BIT_RX_CHNL1 | \
+ IWX_CSR_FH_INT_BIT_RX_CHNL0)
+
+#define IWX_CSR_FH_INT_TX_MASK (IWX_CSR_FH_INT_BIT_TX_CHNL1 | \
+ IWX_CSR_FH_INT_BIT_TX_CHNL0)
+
+/**
+ * struct iwx_rx_transfer_desc - transfer descriptor AX210
+ * @addr: ptr to free buffer start address
+ * @rbid: unique tag of the buffer
+ * @reserved: reserved
+ */
+struct iwx_rx_transfer_desc {
+ uint16_t rbid;
+ uint16_t reserved[3];
+ uint64_t addr;
+};
+
+#define IWX_RX_CD_FLAGS_FRAGMENTED (1 << 0)
+
+/**
+ * struct iwx_rx_completion_desc - completion descriptor AX210
+ * @reserved1: reserved
+ * @rbid: unique tag of the received buffer
+ * @flags: flags (0: fragmented, all others: reserved)
+ * @reserved2: reserved
+ */
+struct iwx_rx_completion_desc {
+ uint32_t reserved1;
+ uint16_t rbid;
+ uint8_t flags;
+ uint8_t reserved2[25];
+};
+
+/* RESET */
+#define IWX_CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
+#define IWX_CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
+#define IWX_CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
+#define IWX_CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
+#define IWX_CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
+#define IWX_CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
+
+/*
+ * GP (general purpose) CONTROL REGISTER
+ * Bit fields:
+ * 27: HW_RF_KILL_SW
+ * Indicates state of (platform's) hardware RF-Kill switch
+ * 26-24: POWER_SAVE_TYPE
+ * Indicates current power-saving mode:
+ * 000 -- No power saving
+ * 001 -- MAC power-down
+ * 010 -- PHY (radio) power-down
+ * 011 -- Error
+ * 9-6: SYS_CONFIG
+ * Indicates current system configuration, reflecting pins on chip
+ * as forced high/low by device circuit board.
+ * 4: GOING_TO_SLEEP
+ * Indicates MAC is entering a power-saving sleep power-down.
+ * Not a good time to access device-internal resources.
+ * 3: MAC_ACCESS_REQ
+ * Host sets this to request and maintain MAC wakeup, to allow host
+ * access to device-internal resources. Host must wait for
+ * MAC_CLOCK_READY (and !GOING_TO_SLEEP) before accessing non-CSR
+ * device registers.
+ * 2: INIT_DONE
+ * Host sets this to put device into fully operational D0 power mode.
+ * Host resets this after SW_RESET to put device into low power mode.
+ * 0: MAC_CLOCK_READY
+ * Indicates MAC (ucode processor, etc.) is powered up and can run.
+ * Internal resources are accessible.
+ * NOTE: This does not indicate that the processor is actually running.
+ * NOTE: This does not indicate that device has completed
+ * init or post-power-down restore of internal SRAM memory.
+ * Use IWX_CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP as indication that
+ * SRAM is restored and uCode is in normal operation mode.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ * NOTE: After device reset, this bit remains "0" until host sets
+ * INIT_DONE
+ */
+#define IWX_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
+
+#define IWX_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
+
+#define IWX_CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_RFKILL_WAKE_L1A_EN (0x04000000)
+#define IWX_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
+
+
+/* HW REV */
+#define IWX_CSR_HW_REV_DASH(_val) (((_val) & 0x0000003) >> 0)
+#define IWX_CSR_HW_REV_STEP(_val) (((_val) & 0x000000C) >> 2)
+#define IWX_CSR_HW_REV_TYPE(_val) (((_val) & 0x000FFF0) >> 4)
+
+#define IWX_CSR_HW_REV_TYPE_MSK (0x000FFF0)
+#define IWX_CSR_HW_REV_TYPE_QU_B0 (0x0000334)
+#define IWX_CSR_HW_REV_TYPE_QU_C0 (0x0000338)
+#define IWX_CSR_HW_REV_TYPE_QUZ (0x0000354)
+#define IWX_CSR_HW_REV_TYPE_SO (0x0000370)
+#define IWX_CSR_HW_REV_TYPE_TY (0x0000420)
+
+/* HW RFID */
+#define IWX_CSR_HW_RFID_FLAVOR(_val) (((_val) & 0x000000F) >> 0)
+#define IWX_CSR_HW_RFID_DASH(_val) (((_val) & 0x00000F0) >> 4)
+#define IWX_CSR_HW_RFID_STEP(_val) (((_val) & 0x0000F00) >> 8)
+#define IWX_CSR_HW_RFID_TYPE(_val) (((_val) & 0x0FFF000) >> 12)
+#define IWX_CSR_HW_RFID_IS_CDB(_val) (((_val) & 0x10000000) >> 28)
+#define IWX_CSR_HW_RFID_IS_JACKET(_val) (((_val) & 0x20000000) >> 29)
+
+/* CSR GIO */
+#define IWX_CSR_GIO_REG_VAL_L0S_DISABLED (0x00000002)
+
+/*
+ * UCODE-DRIVER GP (general purpose) mailbox register 1
+ * Host driver and uCode write and/or read this register to communicate with
+ * each other.
+ * Bit fields:
+ * 4: UCODE_DISABLE
+ * Host sets this to request permanent halt of uCode, same as
+ * sending CARD_STATE command with "halt" bit set.
+ * 3: CT_KILL_EXIT
+ * Host sets this to request exit from CT_KILL state, i.e. host thinks
+ * device temperature is low enough to continue normal operation.
+ * 2: CMD_BLOCKED
+ * Host sets this during RF KILL power-down sequence (HW, SW, CT KILL)
+ * to release uCode to clear all Tx and command queues, enter
+ * unassociated mode, and power down.
+ * NOTE: Some devices also use HBUS_TARG_MBX_C register for this bit.
+ * 1: SW_BIT_RFKILL
+ * Host sets this when issuing CARD_STATE command to request
+ * device sleep.
+ * 0: MAC_SLEEP
+ * uCode sets this when preparing a power-saving power-down.
+ * uCode resets this when power-up is complete and SRAM is sane.
+ * NOTE: device saves internal SRAM data to host when powering down,
+ * and must restore this data after powering back up.
+ * MAC_SLEEP is the best indication that restore is complete.
+ * Later devices (5xxx/6xxx/1xxx) use non-volatile SRAM, and
+ * do not need to save/restore it.
+ */
+#define IWX_CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
+#define IWX_CSR_UCODE_SW_BIT_RFKILL (0x00000002)
+#define IWX_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
+#define IWX_CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
+#define IWX_CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE (0x00000020)
+
+/* GIO Chicken Bits (PCI Express bus link power management) */
+#define IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
+#define IWX_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
+
+/* HPET MEM debug */
+#define IWX_CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
+
+/* DRAM INT TABLE */
+#define IWX_CSR_DRAM_INT_TBL_ENABLE (1U << 31)
+#define IWX_CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28)
+#define IWX_CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
+
+/* 22000 configuration registers */
+
+/*
+ * TFH Configuration register.
+ *
+ * BIT fields:
+ *
+ * Bits 3:0:
+ * Define the maximum number of pending read requests.
+ * Maximum configuration value allowed is 0xC
+ * Bits 9:8:
+ * Define the maximum transfer size. (64 / 128 / 256)
+ * Bit 10:
+ * When bit is set and transfer size is set to 128B, the TFH will enable
+ * reading chunks of more than 64B only if the read address is aligned to 128B.
+ * In case of DRAM read address which is not aligned to 128B, the TFH will
+ * enable transfer size which doesn't cross 64B DRAM address boundary.
+*/
+#define IWX_TFH_TRANSFER_MODE (0x1F40)
+#define IWX_TFH_TRANSFER_MAX_PENDING_REQ 0xc
+#define IWX_TFH_CHUNK_SIZE_128 (1 << 8)
+#define IWX_TFH_CHUNK_SPLIT_MODE (1 << 10)
+
+/*
+ * Defines the offset address in dwords referring from the beginning of the
+ * Tx CMD which will be updated in DRAM.
+ * Note that the TFH offset address for Tx CMD update is always referring to
+ * the start of the TFD first TB.
+ * In case of a DRAM Tx CMD update the TFH will update PN and Key ID
+ */
+#define IWX_TFH_TXCMD_UPDATE_CFG (0x1F48)
+
+/*
+ * Controls TX DMA operation
+ *
+ * BIT fields:
+ *
+ * Bits 31:30: Enable the SRAM DMA channel.
+ * Turning on bit 31 will kick the SRAM2DRAM DMA.
+ * Note that the sram2dram may be enabled only after configuring the DRAM and
+ * SRAM addresses registers and the byte count register.
+ * Bits 25:24: Defines the interrupt target upon dram2sram transfer done. When
+ * set to 1 - interrupt is sent to the driver
+ * Bit 0: Indicates the snoop configuration
+*/
+#define IWX_TFH_SRV_DMA_CHNL0_CTRL (0x1F60)
+#define IWX_TFH_SRV_DMA_SNOOP (1 << 0)
+#define IWX_TFH_SRV_DMA_TO_DRIVER (1 << 24)
+#define IWX_TFH_SRV_DMA_START (1U << 31)
+
+/* Defines the DMA SRAM write start address to transfer a data block */
+#define IWX_TFH_SRV_DMA_CHNL0_SRAM_ADDR (0x1F64)
+
+/* Defines the 64bits DRAM start address to read the DMA data block from */
+#define IWX_TFH_SRV_DMA_CHNL0_DRAM_ADDR (0x1F68)
+
+/*
+ * Defines the number of bytes to transfer from DRAM to SRAM.
+ * Note that this register may be configured with non-dword aligned size.
+ */
+#define IWX_TFH_SRV_DMA_CHNL0_BC (0x1F70)
+
+/* 9000 rx series registers */
+
+#define IWX_RFH_Q0_FRBDCB_BA_LSB 0xA08000 /* 64 bit address */
+#define IWX_RFH_Q_FRBDCB_BA_LSB(q) (IWX_RFH_Q0_FRBDCB_BA_LSB + (q) * 8)
+/* Write index table */
+#define IWX_RFH_Q0_FRBDCB_WIDX 0xA08080
+#define IWX_RFH_Q_FRBDCB_WIDX(q) (IWX_RFH_Q0_FRBDCB_WIDX + (q) * 4)
+/* Write index table - shadow registers */
+#define IWX_RFH_Q0_FRBDCB_WIDX_TRG 0x1C80
+#define IWX_RFH_Q_FRBDCB_WIDX_TRG(q) (IWX_RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4)
+/* Read index table */
+#define IWX_RFH_Q0_FRBDCB_RIDX 0xA080C0
+#define IWX_RFH_Q_FRBDCB_RIDX(q) (IWX_RFH_Q0_FRBDCB_RIDX + (q) * 4)
+/* Used list table */
+#define IWX_RFH_Q0_URBDCB_BA_LSB 0xA08100 /* 64 bit address */
+#define IWX_RFH_Q_URBDCB_BA_LSB(q) (IWX_RFH_Q0_URBDCB_BA_LSB + (q) * 8)
+/* Write index table */
+#define IWX_RFH_Q0_URBDCB_WIDX 0xA08180
+#define IWX_RFH_Q_URBDCB_WIDX(q) (IWX_RFH_Q0_URBDCB_WIDX + (q) * 4)
+#define IWX_RFH_Q0_URBDCB_VAID 0xA081C0
+#define IWX_RFH_Q_URBDCB_VAID(q) (IWX_RFH_Q0_URBDCB_VAID + (q) * 4)
+/* stts */
+#define IWX_RFH_Q0_URBD_STTS_WPTR_LSB 0xA08200 /*64 bits address */
+#define IWX_RFH_Q_URBD_STTS_WPTR_LSB(q) (IWX_RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8)
+
+#define IWX_RFH_Q0_ORB_WPTR_LSB 0xA08280
+#define IWX_RFH_Q_ORB_WPTR_LSB(q) (IWX_RFH_Q0_ORB_WPTR_LSB + (q) * 8)
+#define IWX_RFH_RBDBUF_RBD0_LSB 0xA08300
+#define IWX_RFH_RBDBUF_RBD_LSB(q) (IWX_RFH_RBDBUF_RBD0_LSB + (q) * 8)
+
+/**
+ * RFH Status Register
+ *
+ * Bit fields:
+ *
+ * Bit 29: RBD_FETCH_IDLE
+ * This status flag is set by the RFH when there is no active RBD fetch from
+ * DRAM.
+ * Once the RFH RBD controller starts fetching (or when there is a pending
+ * RBD read response from DRAM), this flag is immediately turned off.
+ *
+ * Bit 30: SRAM_DMA_IDLE
+ * This status flag is set by the RFH when there is no active transaction from
+ * SRAM to DRAM.
+ * Once the SRAM to DRAM DMA is active, this flag is immediately turned off.
+ *
+ * Bit 31: RXF_DMA_IDLE
+ * This status flag is set by the RFH when there is no active transaction from
+ * RXF to DRAM.
+ * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
+ */
+#define IWX_RFH_GEN_STATUS 0xA09808
+#define IWX_RFH_GEN_STATUS_GEN3 0xA07824
+#define IWX_RBD_FETCH_IDLE (1 << 29)
+#define IWX_SRAM_DMA_IDLE (1 << 30)
+#define IWX_RXF_DMA_IDLE (1U << 31)
+
+/* DMA configuration */
+#define IWX_RFH_RXF_DMA_CFG 0xA09820
+#define IWX_RFH_RXF_DMA_CFG_GEN3 0xA07880
+/* RB size */
+#define IWX_RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
+#define IWX_RFH_RXF_DMA_RB_SIZE_POS 16
+#define IWX_RFH_RXF_DMA_RB_SIZE_1K (0x1 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_2K (0x2 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_4K (0x4 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_8K (0x8 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_12K (0x9 << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_16K (0xA << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_20K (0xB << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_24K (0xC << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_28K (0xD << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RB_SIZE_32K (0xE << IWX_RFH_RXF_DMA_RB_SIZE_POS)
+/* RB Circular Buffer size:defines the table sizes in RBD units */
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_MASK (0x00F00000) /* bits 20-23 */
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_POS 20
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_8 (0x3 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_16 (0x4 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_32 (0x5 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_64 (0x7 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_128 (0x7 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_256 (0x8 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_512 (0x9 << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_1024 (0xA << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_RBDCB_SIZE_2048 (0xB << IWX_RFH_RXF_DMA_RBDCB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_MIN_RB_SIZE_MASK (0x03000000) /* bit 24-25 */
+#define IWX_RFH_RXF_DMA_MIN_RB_SIZE_POS 24
+#define IWX_RFH_RXF_DMA_MIN_RB_4_8 (3 << IWX_RFH_RXF_DMA_MIN_RB_SIZE_POS)
+#define IWX_RFH_RXF_DMA_DROP_TOO_LARGE_MASK (0x04000000) /* bit 26 */
+#define IWX_RFH_RXF_DMA_SINGLE_FRAME_MASK (0x20000000) /* bit 29 */
+#define IWX_RFH_DMA_EN_MASK (0xC0000000) /* bits 30-31*/
+#define IWX_RFH_DMA_EN_ENABLE_VAL (1U << 31)
+
+#define IWX_RFH_RXF_RXQ_ACTIVE 0xA0980C
+
+#define IWX_RFH_GEN_CFG 0xA09800
+#define IWX_RFH_GEN_CFG_SERVICE_DMA_SNOOP (1 << 0)
+#define IWX_RFH_GEN_CFG_RFH_DMA_SNOOP (1 << 1)
+#define IWX_RFH_GEN_CFG_RB_CHUNK_SIZE_128 0x00000010
+#define IWX_RFH_GEN_CFG_RB_CHUNK_SIZE_64 0x00000000
+/* the driver assumes everywhere that the default RXQ is 0 */
+#define IWX_RFH_GEN_CFG_DEFAULT_RXQ_NUM 0xF00
+
+/* end of 9000 rx series registers */
+
+/*
+ * This register is written by driver and is read by uCode during boot flow.
+ * Note this address is cleared after MAC reset.
+ */
+#define IWX_UREG_UCODE_LOAD_STATUS (0xa05c40)
+#define IWX_UREG_CPU_INIT_RUN (0xa05c44)
+
+/*
+ * HBUS (Host-side Bus)
+ *
+ * HBUS registers are mapped directly into PCI bus space, but are used
+ * to indirectly access device's internal memory or registers that
+ * may be powered-down.
+ *
+ * Use iwl_write_direct32()/iwl_read_direct32() family for these registers;
+ * host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
+ * to make sure the MAC (uCode processor, etc.) is powered up for accessing
+ * internal resources.
+ *
+ * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * these provide only simple PCI bus access, without waking up the MAC.
+ */
+#define IWX_HBUS_BASE (0x400)
+
+/*
+ * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
+ * structures, error log, event log, verifying uCode load).
+ * First write to address register, then read from or write to data register
+ * to complete the job. Once the address register is set up, accesses to
+ * data registers auto-increment the address by one dword.
+ * Bit usage for address registers (read or write):
+ * 0-31: memory address within device
+ */
+#define IWX_HBUS_TARG_MEM_RADDR (IWX_HBUS_BASE+0x00c)
+#define IWX_HBUS_TARG_MEM_WADDR (IWX_HBUS_BASE+0x010)
+#define IWX_HBUS_TARG_MEM_WDAT (IWX_HBUS_BASE+0x018)
+#define IWX_HBUS_TARG_MEM_RDAT (IWX_HBUS_BASE+0x01c)
+
+/*
+ * Registers for accessing device's internal peripheral registers
+ * (e.g. SCD, BSM, etc.). First write to address register,
+ * then read from or write to data register to complete the job.
+ * Bit usage for address registers (read or write):
+ * 0-15: register address (offset) within device
+ * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
+ */
+#define IWX_HBUS_TARG_PRPH_WADDR (IWX_HBUS_BASE+0x044)
+#define IWX_HBUS_TARG_PRPH_RADDR (IWX_HBUS_BASE+0x048)
+#define IWX_HBUS_TARG_PRPH_WDAT (IWX_HBUS_BASE+0x04c)
+#define IWX_HBUS_TARG_PRPH_RDAT (IWX_HBUS_BASE+0x050)
+
+/* enable the ID buf for read */
+#define IWX_WFPM_PS_CTL_CLR 0xa0300c
+#define IWX_WFMP_MAC_ADDR_0 0xa03080
+#define IWX_WFMP_MAC_ADDR_1 0xa03084
+#define IWX_LMPM_PMG_EN 0xa01cec
+#define IWX_RADIO_REG_SYS_MANUAL_DFT_0 0xad4078
+#define IWX_RFIC_REG_RD 0xad0470
+#define IWX_WFPM_CTRL_REG 0xa03030
+#define IWX_WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK 0x08000000
+#define IWX_ENABLE_WFPM 0x80000000
+
+#define IWX_AUX_MISC_MASTER1_EN 0xa20818
+#define IWX_AUX_MISC_MASTER1_EN_SBE_MSK 0x1
+#define IWX_AUX_MISC_MASTER1_SMPHR_STATUS 0xa20800
+#define IWX_RSA_ENABLE 0xa24b08
+#define IWX_PREG_AUX_BUS_WPROT_0 0xa04cc0
+#define IWX_PREG_PRPH_WPROT_9000 0xa04ce0
+#define IWX_PREG_PRPH_WPROT_22000 0xa04d00
+#define IWX_SB_CFG_OVERRIDE_ADDR 0xa26c78
+#define IWX_SB_CFG_OVERRIDE_ENABLE 0x8000
+#define IWX_SB_CFG_BASE_OVERRIDE 0xa20000
+#define IWX_SB_MODIFY_CFG_FLAG 0xa03088
+#define IWX_UMAG_SB_CPU_1_STATUS 0xa038c0
+#define IWX_UMAG_SB_CPU_2_STATUS 0xa038c4
+
+#define IWX_UREG_CHICK 0xa05c00
+#define IWX_UREG_CHICK_MSI_ENABLE (1 << 24)
+#define IWX_UREG_CHICK_MSIX_ENABLE (1 << 25)
+
+#define IWX_HPM_DEBUG 0xa03440
+#define IWX_PERSISTENCE_BIT (1 << 12)
+#define IWX_PREG_WFPM_ACCESS (1 << 12)
+
+#define IWX_HPM_HIPM_GEN_CFG 0xa03458
+#define IWX_HPM_HIPM_GEN_CFG_CR_PG_EN (1 << 0)
+#define IWX_HPM_HIPM_GEN_CFG_CR_SLP_EN (1 << 1)
+#define IWX_HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE (1 << 10)
+
+#define IWX_UREG_DOORBELL_TO_ISR6 0xa05c04
+#define IWX_UREG_DOORBELL_TO_ISR6_NMI_BIT (1 << 0)
+#define IWX_UREG_DOORBELL_TO_ISR6_RESET_HANDSHAKE ((1 << 0) | (1 << 1))
+#define IWX_UREG_DOORBELL_TO_ISR6_SUSPEND (1 << 18)
+#define IWX_UREG_DOORBELL_TO_ISR6_RESUME (1 << 19)
+#define IWX_UREG_DOORBELL_TO_ISR6_PNVM (1 << 20)
+
+/* LTR control (Qu only) */
+#define IWX_HPM_MAC_LTR_CSR 0xa0348c
+#define IWX_HPM_MAC_LRT_ENABLE_ALL 0xf
+/* also uses CSR_LTR_* for values */
+#define IWX_HPM_UMAC_LTR 0xa03480
+
+/*
+ * Per-Tx-queue write pointer (index, really!)
+ * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Bit usage:
+ * 0-7: queue write index
+ * 11-8: queue selector
+ */
+#define IWX_HBUS_TARG_WRPTR (IWX_HBUS_BASE+0x060)
+
+/**********************************************************
+ * CSR values
+ **********************************************************/
+ /*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ */
+#define IWX_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IWX_HOST_INT_TIMEOUT_DEF (0x40)
+#define IWX_HOST_INT_TIMEOUT_MIN (0x0)
+#define IWX_HOST_INT_OPER_MODE (1U << 31)
+
+/*****************************************************************************
+ * MSIX related registers *
+ *****************************************************************************/
+
+#define IWX_CSR_MSIX_BASE (0x2000)
+#define IWX_CSR_MSIX_FH_INT_CAUSES_AD (IWX_CSR_MSIX_BASE + 0x800)
+#define IWX_CSR_MSIX_FH_INT_MASK_AD (IWX_CSR_MSIX_BASE + 0x804)
+#define IWX_CSR_MSIX_HW_INT_CAUSES_AD (IWX_CSR_MSIX_BASE + 0x808)
+#define IWX_CSR_MSIX_HW_INT_MASK_AD (IWX_CSR_MSIX_BASE + 0x80C)
+#define IWX_CSR_MSIX_AUTOMASK_ST_AD (IWX_CSR_MSIX_BASE + 0x810)
+#define IWX_CSR_MSIX_RX_IVAR_AD_REG (IWX_CSR_MSIX_BASE + 0x880)
+#define IWX_CSR_MSIX_IVAR_AD_REG (IWX_CSR_MSIX_BASE + 0x890)
+#define IWX_CSR_MSIX_PENDING_PBA_AD (IWX_CSR_MSIX_BASE + 0x1000)
+#define IWX_CSR_MSIX_RX_IVAR(cause) (IWX_CSR_MSIX_RX_IVAR_AD_REG + (cause))
+#define IWX_CSR_MSIX_IVAR(cause) (IWX_CSR_MSIX_IVAR_AD_REG + (cause))
+
+/*
+ * Causes for the FH register interrupts
+ */
+enum msix_fh_int_causes {
+ IWX_MSIX_FH_INT_CAUSES_Q0 = (1 << 0),
+ IWX_MSIX_FH_INT_CAUSES_Q1 = (1 << 1),
+ IWX_MSIX_FH_INT_CAUSES_D2S_CH0_NUM = (1 << 16),
+ IWX_MSIX_FH_INT_CAUSES_D2S_CH1_NUM = (1 << 17),
+ IWX_MSIX_FH_INT_CAUSES_S2D = (1 << 19),
+ IWX_MSIX_FH_INT_CAUSES_FH_ERR = (1 << 21),
+};
+
+/*
+ * Causes for the HW register interrupts
+ */
+enum mix_hw_int_causes {
+ IWX_MSIX_HW_INT_CAUSES_REG_ALIVE = (1 << 0),
+ IWX_MSIX_HW_INT_CAUSES_REG_WAKEUP = (1 << 1),
+ IWX_MSIX_HW_INT_CAUSES_REG_RESET_DONE = (1 << 2),
+ IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = (1 << 5),
+ IWX_MSIX_HW_INT_CAUSES_REG_CT_KILL = (1 << 6),
+ IWX_MSIX_HW_INT_CAUSES_REG_RF_KILL = (1 << 7),
+ IWX_MSIX_HW_INT_CAUSES_REG_PERIODIC = (1 << 8),
+ IWX_MSIX_HW_INT_CAUSES_REG_SW_ERR = (1 << 25),
+ IWX_MSIX_HW_INT_CAUSES_REG_SCD = (1 << 26),
+ IWX_MSIX_HW_INT_CAUSES_REG_FH_TX = (1 << 27),
+ IWX_MSIX_HW_INT_CAUSES_REG_HW_ERR = (1 << 29),
+ IWX_MSIX_HW_INT_CAUSES_REG_HAP = (1 << 30),
+};
+
+/*
+ * Registers to map causes to vectors
+ */
+enum msix_ivar_for_cause {
+ IWX_MSIX_IVAR_CAUSE_D2S_CH0_NUM = 0x0,
+ IWX_MSIX_IVAR_CAUSE_D2S_CH1_NUM = 0x1,
+ IWX_MSIX_IVAR_CAUSE_S2D = 0x3,
+ IWX_MSIX_IVAR_CAUSE_FH_ERR = 0x5,
+ IWX_MSIX_IVAR_CAUSE_REG_ALIVE = 0x10,
+ IWX_MSIX_IVAR_CAUSE_REG_WAKEUP = 0x11,
+ IWX_MSIX_IVAR_CAUSE_REG_RESET_DONE = 0x12,
+ IWX_MSIX_IVAR_CAUSE_REG_CT_KILL = 0x16,
+ IWX_MSIX_IVAR_CAUSE_REG_RF_KILL = 0x17,
+ IWX_MSIX_IVAR_CAUSE_REG_PERIODIC = 0x18,
+ IWX_MSIX_IVAR_CAUSE_REG_SW_ERR = 0x29,
+ IWX_MSIX_IVAR_CAUSE_REG_SCD = 0x2a,
+ IWX_MSIX_IVAR_CAUSE_REG_FH_TX = 0x2b,
+ IWX_MSIX_IVAR_CAUSE_REG_HW_ERR = 0x2d,
+ IWX_MSIX_IVAR_CAUSE_REG_HAP = 0x2e,
+};
+
+#define IWX_MSIX_AUTO_CLEAR_CAUSE (0 << 7)
+#define IWX_MSIX_NON_AUTO_CLEAR_CAUSE (1 << 7)
+
+#define IWX_CSR_ADDR_BASE(sc) ((sc)->mac_addr_from_csr)
+#define IWX_CSR_MAC_ADDR0_OTP(sc) (IWX_CSR_ADDR_BASE(sc) + 0x00)
+#define IWX_CSR_MAC_ADDR1_OTP(sc) (IWX_CSR_ADDR_BASE(sc) + 0x04)
+#define IWX_CSR_MAC_ADDR0_STRAP(sc) (IWX_CSR_ADDR_BASE(sc) + 0x08)
+#define IWX_CSR_MAC_ADDR1_STRAP(sc) (IWX_CSR_ADDR_BASE(sc) + 0x0c)
+
+/**
+ * uCode API flags
+ * @IWX_UCODE_TLV_FLAGS_PAN: This is PAN capable microcode; this previously
+ * was a separate TLV but moved here to save space.
+ * @IWX_UCODE_TLV_FLAGS_NEWSCAN: new uCode scan behaviour on hidden SSID,
+ * treats good CRC threshold as a boolean
+ * @IWX_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
+ * @IWX_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
+ * @IWX_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
+ * @IWX_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWX_UCODE_TLV_FLAGS_SHORT_BL: 16 entries of black list instead of 64 in scan
+ * offload profile config command.
+ * @IWX_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
+ * (rather than two) IPv6 addresses
+ * @IWX_UCODE_TLV_FLAGS_NO_BASIC_SSID: not sending a probe with the SSID element
+ * from the probe request template.
+ * @IWX_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL: new NS offload (small version)
+ * @IWX_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
+ * @IWX_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
+ * single bound interface).
+ * @IWX_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
+ * @IWX_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
+ * @IWX_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
+ * @IWX_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
+ * @IWX_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
+ *
+ */
+#define IWX_UCODE_TLV_FLAGS_PAN (1 << 0)
+#define IWX_UCODE_TLV_FLAGS_NEWSCAN (1 << 1)
+#define IWX_UCODE_TLV_FLAGS_MFP (1 << 2)
+#define IWX_UCODE_TLV_FLAGS_P2P (1 << 3)
+#define IWX_UCODE_TLV_FLAGS_DW_BC_TABLE (1 << 4)
+#define IWX_UCODE_TLV_FLAGS_SHORT_BL (1 << 7)
+#define IWX_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS (1 << 10)
+#define IWX_UCODE_TLV_FLAGS_NO_BASIC_SSID (1 << 12)
+#define IWX_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL (1 << 15)
+#define IWX_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE (1 << 16)
+#define IWX_UCODE_TLV_FLAGS_P2P_PS (1 << 21)
+#define IWX_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM (1 << 22)
+#define IWX_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM (1 << 23)
+#define IWX_UCODE_TLV_FLAGS_UAPSD_SUPPORT (1 << 24)
+#define IWX_UCODE_TLV_FLAGS_EBS_SUPPORT (1 << 25)
+#define IWX_UCODE_TLV_FLAGS_P2P_PS_UAPSD (1 << 26)
+#define IWX_UCODE_TLV_FLAGS_BCAST_FILTERING (1 << 29)
+#define IWX_UCODE_TLV_FLAGS_GO_UAPSD (1 << 30)
+#define IWX_UCODE_TLV_FLAGS_LTE_COEX (1U << 31)
+
+#define IWX_UCODE_TLV_FLAG_BITS \
+ "\020\1PAN\2NEWSCAN\3MFP\4P2P\5DW_BC_TABLE\6NEWBT_COEX\7PM_CMD\10SHORT_BL\11RX_ENERGY\12TIME_EVENT_V2\13D3_6_IPV6\14BF_UPDATED\15NO_BASIC_SSID\17D3_CONTINUITY\20NEW_NSOFFL_S\21NEW_NSOFFL_L\22SCHED_SCAN\24STA_KEY_CMD\25DEVICE_PS_CMD\26P2P_PS\27P2P_PS_DCM\30P2P_PS_SCM\31UAPSD_SUPPORT\32EBS\33P2P_PS_UAPSD\36BCAST_FILTERING\37GO_UAPSD\40LTE_COEX"
+
+/**
+ * uCode TLV api
+ * @IWX_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
+ * longer than the passive one, which is essential for fragmented scan.
+ * @IWX_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
+ * @IWX_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
+ * @IWX_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
+ * @IWX_UCODE_TLV_API_NEW_VERSION: new versioning format
+ * @IWX_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
+ * (command version 3) that supports per-chain limits
+ * @IWX_UCODE_TLV_API_SCAN_TSF_REPORT: Scan start time reported in scan
+ * iteration complete notification, and the timestamp reported for RX
+ * received during scan, are reported in TSF of the mac specified in the
+ * scan request.
+ * @IWX_UCODE_TLV_API_TKIP_MIC_KEYS: This ucode supports version 2 of
+ * ADD_MODIFY_STA_KEY_API_S_VER_2.
+ * @IWX_UCODE_TLV_API_STA_TYPE: This ucode supports station type assignment.
+ * @IWX_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
+ * instead of 3.
+ * @IWX_UCODE_TLV_API_NEW_RX_STATS: should new RX STATISTICS API be used
+ * @IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
+ * SCAN_CONFIG_DB_CMD_API_S.
+ *
+ * @IWX_NUM_UCODE_TLV_API: number of bits used
+ */
+#define IWX_UCODE_TLV_API_FRAGMENTED_SCAN 8
+#define IWX_UCODE_TLV_API_WIFI_MCC_UPDATE 9
+#define IWX_UCODE_TLV_API_WIDE_CMD_HDR 14
+#define IWX_UCODE_TLV_API_LQ_SS_PARAMS 18
+#define IWX_UCODE_TLV_API_NEW_VERSION 20
+#define IWX_UCODE_TLV_API_EXT_SCAN_PRIORITY 24
+#define IWX_UCODE_TLV_API_TX_POWER_CHAIN 27
+#define IWX_UCODE_TLV_API_SCAN_TSF_REPORT 28
+#define IWX_UCODE_TLV_API_TKIP_MIC_KEYS 29
+#define IWX_UCODE_TLV_API_STA_TYPE 30
+#define IWX_UCODE_TLV_API_NAN2_VER2 31
+#define IWX_UCODE_TLV_API_ADAPTIVE_DWELL 32
+#define IWX_UCODE_TLV_API_NEW_RX_STATS 35
+#define IWX_UCODE_TLV_API_WOWLAN_KEY_MATERIAL 36
+#define IWX_UCODE_TLV_API_QUOTA_LOW_LATENCY 38
+#define IWX_UCODE_TLV_API_DEPRECATE_TTAK 41
+#define IWX_UCODE_TLV_API_ADAPTIVE_DWELL_V2 42
+#define IWX_UCODE_TLV_API_NAN_NOTIF_V2 43
+#define IWX_UCODE_TLV_API_FRAG_EBS 44
+#define IWX_UCODE_TLV_API_REDUCE_TX_POWER 45
+#define IWX_UCODE_TLV_API_SHORT_BEACON_NOTIF 46
+#define IWX_UCODE_TLV_API_BEACON_FILTER_V4 47
+#define IWX_UCODE_TLV_API_REGULATORY_NVM_INFO 48
+#define IWX_UCODE_TLV_API_FTM_NEW_RANGE_REQ 49
+#define IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG 56
+#define IWX_UCODE_TLV_API_SCAN_OFFLOAD_CHANS 50
+#define IWX_UCODE_TLV_API_MBSSID_HE 52
+#define IWX_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE 53
+#define IWX_UCODE_TLV_API_FTM_RTT_ACCURACY 54
+#define IWX_UCODE_TLV_API_SAR_TABLE_VER 55
+#define IWX_UCODE_TLV_API_REDUCED_SCAN_CONFIG 56
+#define IWX_UCODE_TLV_API_ADWELL_HB_DEF_N_AP 57
+#define IWX_UCODE_TLV_API_SCAN_EXT_CHAN_VER 58
+#define IWX_UCODE_TLV_API_BAND_IN_RX_DATA 59
+#define IWX_NUM_UCODE_TLV_API 128
+
+#define IWX_UCODE_TLV_API_BITS \
+ "\020\10FRAGMENTED_SCAN\11WIFI_MCC_UPDATE\16WIDE_CMD_HDR\22LQ_SS_PARAMS\30EXT_SCAN_PRIO\33TX_POWER_CHAIN\35TKIP_MIC_KEYS"
+
+/**
+ * uCode capabilities
+ * @IWX_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
+ * @IWX_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
+ * @IWX_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
+ * @IWX_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
+ * @IWX_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
+ * @IWX_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
+ * @IWX_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
+ * tx power value into TPC Report action frame and Link Measurement Report
+ * action frame
+ * @IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current
+ * channel in DS parameter set element in probe requests.
+ * @IWX_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
+ * probe requests.
+ * @IWX_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
+ * @IWX_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
+ * which also implies support for the scheduler configuration command
+ * @IWX_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
+ * @IWX_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
+ * @IWX_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ * @IWX_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
+ * @IWX_UCODE_TLV_CAPA_2G_COEX_SUPPORT: supports 2G coex Command
+ * @IWX_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
+ * @IWX_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
+ * @IWX_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD: support p2p standalone U-APSD
+ * @IWX_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
+ * @IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
+ * sources for the MCC. This TLV bit is a future replacement to
+ * IWX_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
+ * is supported.
+ * @IWX_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
+ * @IWX_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
+ * @IWX_UCODE_TLV_CAPA_NAN_SUPPORT: supports NAN
+ * @IWX_UCODE_TLV_CAPA_UMAC_UPLOAD: supports upload mode in umac (1=supported,
+ * 0=no support)
+ * @IWx_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS: firmware supports ultra high band
+ * (6 GHz).
+ * @IWX_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
+ * @IWX_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
+ * @IWX_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
+ * @IWX_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what
+ * antenna the beacon should be transmitted
+ * @IWX_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
+ * from AP and will send it upon d0i3 exit.
+ * @IWX_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
+ * @IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
+ * @IWX_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
+ * thresholds reporting
+ * @IWX_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
+ * @IWX_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
+ * regular image.
+ * @IWX_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
+ * memory addresses from the firmware.
+ * @IWX_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
+ * @IWX_UCODE_TLV_CAPA_LMAC_UPLOAD: supports upload mode in lmac (1=supported,
+ * 0=no support)
+ *
+ * @IWX_NUM_UCODE_TLV_CAPA: number of bits used
+ */
+#define IWX_UCODE_TLV_CAPA_D0I3_SUPPORT 0
+#define IWX_UCODE_TLV_CAPA_LAR_SUPPORT 1
+#define IWX_UCODE_TLV_CAPA_UMAC_SCAN 2
+#define IWX_UCODE_TLV_CAPA_BEAMFORMER 3
+#define IWX_UCODE_TLV_CAPA_TOF_SUPPORT 5
+#define IWX_UCODE_TLV_CAPA_TDLS_SUPPORT 6
+#define IWX_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT 8
+#define IWX_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT 9
+#define IWX_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT 10
+#define IWX_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT 11
+#define IWX_UCODE_TLV_CAPA_DQA_SUPPORT 12
+#define IWX_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH 13
+#define IWX_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG 17
+#define IWX_UCODE_TLV_CAPA_HOTSPOT_SUPPORT 18
+#define IWX_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT 19
+#define IWX_UCODE_TLV_CAPA_2G_COEX_SUPPORT 20
+#define IWX_UCODE_TLV_CAPA_CSUM_SUPPORT 21
+#define IWX_UCODE_TLV_CAPA_RADIO_BEACON_STATS 22
+#define IWX_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD 26
+#define IWX_UCODE_TLV_CAPA_BT_COEX_PLCR 28
+#define IWX_UCODE_TLV_CAPA_LAR_MULTI_MCC 29
+#define IWX_UCODE_TLV_CAPA_BT_COEX_RRC 30
+#define IWX_UCODE_TLV_CAPA_GSCAN_SUPPORT 31
+#define IWX_UCODE_TLV_CAPA_NAN_SUPPORT 34
+#define IWX_UCODE_TLV_CAPA_UMAC_UPLOAD 35
+#define IWM_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT 37
+#define IWX_UCODE_TLV_CAPA_STA_PM_NOTIF 38
+#define IWX_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT 39
+#define IWX_UCODE_TLV_CAPA_CDB_SUPPORT 40
+#define IWX_UCODE_TLV_CAPA_D0I3_END_FIRST 41
+#define IWX_UCODE_TLV_CAPA_TLC_OFFLOAD 43
+#define IWX_UCODE_TLV_CAPA_DYNAMIC_QUOTA 44
+#define IWX_UCODE_TLV_CAPA_COEX_SCHEMA_2 45
+#define IWX_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD 46
+#define IWX_UCODE_TLV_CAPA_FTM_CALIBRATED 47
+#define IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS 48
+#define IWX_UCODE_TLV_CAPA_CS_MODIFY 49
+#define IWX_UCODE_TLV_CAPA_SET_LTR_GEN2 50
+#define IWX_UCODE_TLV_CAPA_SET_PPAG 52
+#define IWX_UCODE_TLV_CAPA_TAS_CFG 53
+#define IWX_UCODE_TLV_CAPA_SESSION_PROT_CMD 54
+#define IWX_UCODE_TLV_CAPA_PROTECTED_TWT 56
+#define IWX_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE 57
+#define IWX_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN 58
+#define IWX_UCODE_TLV_CAPA_PROTECTED_TWT 56
+#define IWX_UCODE_TLV_CAPA_FW_RESET_HANDSHAKE 57
+#define IWX_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN 58
+#define IWX_UCODE_TLV_CAPA_BAID_ML_SUPPORT 63
+#define IWX_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE 64
+#define IWX_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS 65
+#define IWX_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT 67
+#define IWX_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT 68
+#define IWX_UCODE_TLV_CAPA_CSA_AND_TBTT_OFFLOAD 70
+#define IWX_UCODE_TLV_CAPA_BEACON_ANT_SELECTION 71
+#define IWX_UCODE_TLV_CAPA_BEACON_STORING 72
+#define IWX_UCODE_TLV_CAPA_LAR_SUPPORT_V3 73
+#define IWX_UCODE_TLV_CAPA_CT_KILL_BY_FW 74
+#define IWX_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT 75
+#define IWX_UCODE_TLV_CAPA_CTDP_SUPPORT 76
+#define IWX_UCODE_TLV_CAPA_USNIFFER_UNIFIED 77
+#define IWX_UCODE_TLV_CAPA_LMAC_UPLOAD 79
+#define IWX_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG 80
+#define IWX_UCODE_TLV_CAPA_LQM_SUPPORT 81
+#define IWX_UCODE_TLV_CAPA_TX_POWER_ACK 84
+#define IWX_UCODE_TLV_CAPA_D3_DEBUG 87
+#define IWX_UCODE_TLV_CAPA_LED_CMD_SUPPORT 88
+#define IWX_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT 89
+#define IWX_UCODE_TLV_CAPA_CSI_REPORTING 90
+#define IWX_UCODE_TLV_CAPA_CSI_REPORTING_V2 91
+#define IWX_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP 92
+#define IWX_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP 93
+#define IWX_UCODE_TLV_CAPA_MLME_OFFLOAD 96
+#define IWX_UCODE_TLV_CAPA_BIGTK_SUPPORT 100
+#define IWX_UCODE_TLV_CAPA_RFIM_SUPPORT 102
+
+#define IWX_NUM_UCODE_TLV_CAPA 128
+
+/*
+ * For 16.0 uCode and above, there is no differentiation between sections,
+ * just an offset to the HW address.
+ */
+#define IWX_CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
+#define IWX_PAGING_SEPARATOR_SECTION 0xAAAABBBB
+
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IWX_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IWX_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IWX_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IWX_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwx_tlv_calib_ctrl {
+ uint32_t flow_trigger;
+ uint32_t event_trigger;
+} __packed;
+
+#define IWX_FW_PHY_CFG_RADIO_TYPE_POS 0
+#define IWX_FW_PHY_CFG_RADIO_TYPE (0x3 << IWX_FW_PHY_CFG_RADIO_TYPE_POS)
+#define IWX_FW_PHY_CFG_RADIO_STEP_POS 2
+#define IWX_FW_PHY_CFG_RADIO_STEP (0x3 << IWX_FW_PHY_CFG_RADIO_STEP_POS)
+#define IWX_FW_PHY_CFG_RADIO_DASH_POS 4
+#define IWX_FW_PHY_CFG_RADIO_DASH (0x3 << IWX_FW_PHY_CFG_RADIO_DASH_POS)
+#define IWX_FW_PHY_CFG_TX_CHAIN_POS 16
+#define IWX_FW_PHY_CFG_TX_CHAIN (0xf << IWX_FW_PHY_CFG_TX_CHAIN_POS)
+#define IWX_FW_PHY_CFG_RX_CHAIN_POS 20
+#define IWX_FW_PHY_CFG_RX_CHAIN (0xf << IWX_FW_PHY_CFG_RX_CHAIN_POS)
+
+/**
+ * struct iwx_fw_cipher_scheme - a cipher scheme supported by FW.
+ * @cipher: a cipher suite selector
+ * @flags: cipher scheme flags (currently reserved for a future use)
+ * @hdr_len: a size of MPDU security header
+ * @pn_len: a size of PN
+ * @pn_off: an offset of pn from the beginning of the security header
+ * @key_idx_off: an offset of key index byte in the security header
+ * @key_idx_mask: a bit mask of key_idx bits
+ * @key_idx_shift: bit shift needed to get key_idx
+ * @mic_len: mic length in bytes
+ * @hw_cipher: a HW cipher index used in host commands
+ */
+struct iwx_fw_cipher_scheme {
+ uint32_t cipher;
+ uint8_t flags;
+ uint8_t hdr_len;
+ uint8_t pn_len;
+ uint8_t pn_off;
+ uint8_t key_idx_off;
+ uint8_t key_idx_mask;
+ uint8_t key_idx_shift;
+ uint8_t mic_len;
+ uint8_t hw_cipher;
+} __packed;
+
+/**
+ * struct iwx_fw_cscheme_list - a cipher scheme list
+ * @size: a number of entries
+ * @cs: cipher scheme entries
+ */
+struct iwx_fw_cscheme_list {
+ uint8_t size;
+ struct iwx_fw_cipher_scheme cs[];
+} __packed;
+
+/* v1/v2 uCode file layout */
+struct iwx_ucode_header {
+ uint32_t ver; /* major/minor/API/serial */
+ union {
+ struct {
+ uint32_t inst_size; /* bytes of runtime code */
+ uint32_t data_size; /* bytes of runtime data */
+ uint32_t init_size; /* bytes of init code */
+ uint32_t init_data_size; /* bytes of init data */
+ uint32_t boot_size; /* bytes of bootstrap code */
+ uint8_t data[0]; /* in same order as sizes */
+ } v1;
+ struct {
+ uint32_t build; /* build number */
+ uint32_t inst_size; /* bytes of runtime code */
+ uint32_t data_size; /* bytes of runtime data */
+ uint32_t init_size; /* bytes of init code */
+ uint32_t init_data_size; /* bytes of init data */
+ uint32_t boot_size; /* bytes of bootstrap code */
+ uint8_t data[0]; /* in same order as sizes */
+ } v2;
+ } u;
+};
+
+/*
+ * new TLV uCode file layout
+ *
+ * The new TLV file format contains TLVs, that each specify
+ * some piece of data.
+ */
+
+#define IWX_UCODE_TLV_INVALID 0 /* unused */
+#define IWX_UCODE_TLV_INST 1
+#define IWX_UCODE_TLV_DATA 2
+#define IWX_UCODE_TLV_INIT 3
+#define IWX_UCODE_TLV_INIT_DATA 4
+#define IWX_UCODE_TLV_BOOT 5
+#define IWX_UCODE_TLV_PROBE_MAX_LEN 6 /* a uint32_t value */
+#define IWX_UCODE_TLV_PAN 7
+#define IWX_UCODE_TLV_RUNT_EVTLOG_PTR 8
+#define IWX_UCODE_TLV_RUNT_EVTLOG_SIZE 9
+#define IWX_UCODE_TLV_RUNT_ERRLOG_PTR 10
+#define IWX_UCODE_TLV_INIT_EVTLOG_PTR 11
+#define IWX_UCODE_TLV_INIT_EVTLOG_SIZE 12
+#define IWX_UCODE_TLV_INIT_ERRLOG_PTR 13
+#define IWX_UCODE_TLV_ENHANCE_SENS_TBL 14
+#define IWX_UCODE_TLV_PHY_CALIBRATION_SIZE 15
+#define IWX_UCODE_TLV_WOWLAN_INST 16
+#define IWX_UCODE_TLV_WOWLAN_DATA 17
+#define IWX_UCODE_TLV_FLAGS 18
+#define IWX_UCODE_TLV_SEC_RT 19
+#define IWX_UCODE_TLV_SEC_INIT 20
+#define IWX_UCODE_TLV_SEC_WOWLAN 21
+#define IWX_UCODE_TLV_DEF_CALIB 22
+#define IWX_UCODE_TLV_PHY_SKU 23
+#define IWX_UCODE_TLV_SECURE_SEC_RT 24
+#define IWX_UCODE_TLV_SECURE_SEC_INIT 25
+#define IWX_UCODE_TLV_SECURE_SEC_WOWLAN 26
+#define IWX_UCODE_TLV_NUM_OF_CPU 27
+#define IWX_UCODE_TLV_CSCHEME 28
+#define IWX_UCODE_TLV_API_CHANGES_SET 29
+#define IWX_UCODE_TLV_ENABLED_CAPABILITIES 30
+#define IWX_UCODE_TLV_N_SCAN_CHANNELS 31
+#define IWX_UCODE_TLV_PAGING 32
+#define IWX_UCODE_TLV_SEC_RT_USNIFFER 34
+#define IWX_UCODE_TLV_SDIO_ADMA_ADDR 35
+#define IWX_UCODE_TLV_FW_VERSION 36
+#define IWX_UCODE_TLV_FW_DBG_DEST 38
+#define IWX_UCODE_TLV_FW_DBG_CONF 39
+#define IWX_UCODE_TLV_FW_DBG_TRIGGER 40
+#define IWX_UCODE_TLV_CMD_VERSIONS 48
+#define IWX_UCODE_TLV_FW_GSCAN_CAPA 50
+#define IWX_UCODE_TLV_FW_MEM_SEG 51
+#define IWX_UCODE_TLV_IML 52
+#define IWX_UCODE_TLV_FW_FMAC_API_VERSION 53
+#define IWX_UCODE_TLV_UMAC_DEBUG_ADDRS 54
+#define IWX_UCODE_TLV_LMAC_DEBUG_ADDRS 55
+#define IWX_UCODE_TLV_FW_RECOVERY_INFO 57
+#define IWX_UCODE_TLV_HW_TYPE 58
+#define IWX_UCODE_TLV_FW_FMAC_RECOVERY_INFO 59
+#define IWX_UCODE_TLV_FW_FSEQ_VERSION 60
+#define IWX_UCODE_TLV_PHY_INTEGRATION_VERSION 61
+#define IWX_UCODE_TLV_PNVM_VERSION 62
+#define IWX_UCODE_TLV_PNVM_SKU 64
+
+#define IWX_UCODE_TLV_SEC_TABLE_ADDR 66
+#define IWX_UCODE_TLV_D3_KEK_KCK_ADDR 67
+#define IWX_UCODE_TLV_CURRENT_PC 68
+
+#define IWX_UCODE_TLV_CONST_BASE 0x100
+#define IWX_UCODE_TLV_FW_NUM_STATIONS (IWX_UCODE_TLV_CONST_BASE + 0)
+#define IWX_UCODE_TLV_FW_NUM_BEACONS (IWX_UCODE_TLV_CONST_BASE + 2)
+
+#define IWX_UCODE_TLV_DEBUG_BASE 0x1000005
+#define IWX_UCODE_TLV_TYPE_DEBUG_INFO (IWX_UCODE_TLV_DEBUG_BASE + 0)
+#define IWX_UCODE_TLV_TYPE_BUFFER_ALLOCATION (IWX_UCODE_TLV_DEBUG_BASE + 1)
+#define IWX_UCODE_TLV_TYPE_HCMD (IWX_UCODE_TLV_DEBUG_BASE + 2)
+#define IWX_UCODE_TLV_TYPE_REGIONS (IWX_UCODE_TLV_DEBUG_BASE + 3)
+#define IWX_UCODE_TLV_TYPE_TRIGGERS (IWX_UCODE_TLV_DEBUG_BASE + 4)
+#define IWX_UCODE_TLV_TYPE_CONF_SET (IWX_UCODE_TLV_DEBUG_BASE + 5)
+#define IWX_UCODE_TLV_DEBUG_MAX IWX_UCODE_TLV_TYPE_CONF_SET
+
+
+struct iwx_ucode_tlv {
+ uint32_t type; /* see above */
+ uint32_t length; /* not including type/length fields */
+ uint8_t data[0];
+};
+
+struct iwx_ucode_api {
+ uint32_t api_index;
+ uint32_t api_flags;
+} __packed;
+
+struct iwx_ucode_capa {
+ uint32_t api_index;
+ uint32_t api_capa;
+} __packed;
+
+#define IWX_TLV_UCODE_MAGIC 0x0a4c5749
+
+struct iwx_tlv_ucode_header {
+ /*
+ * The TLV style ucode header is distinguished from
+ * the v1/v2 style header by first four bytes being
+ * zero, as such is an invalid combination of
+ * major/minor/API/serial versions.
+ */
+ uint32_t zero;
+ uint32_t magic;
+ uint8_t human_readable[64];
+ uint32_t ver; /* major/minor/API/serial */
+ uint32_t build;
+ uint64_t ignore;
+ /*
+ * The data contained herein has a TLV layout,
+ * see above for the TLV header and types.
+ * Note that each TLV is padded to a length
+ * that is a multiple of 4 for alignment.
+ */
+ uint8_t data[0];
+};
+
+/*
+ * Registers in this file are internal, not PCI bus memory mapped.
+ * Driver accesses these via IWX_HBUS_TARG_PRPH_* registers.
+ */
+#define IWX_PRPH_BASE (0x00000)
+#define IWX_PRPH_END (0xFFFFF)
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * struct iwx_rb_status - receive buffer status
+ * host memory mapped FH registers
+ * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the index of the current RB
+ * in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the index of the RX Frame
+ * which was transferred
+ */
+struct iwx_rb_status {
+ uint16_t closed_rb_num;
+ uint16_t closed_fr_num;
+ uint16_t finished_rb_num;
+ uint16_t finished_fr_nam;
+ uint32_t unused;
+} __packed;
+
+
+#define IWX_TFD_QUEUE_SIZE_MAX (256)
+#define IWX_TFD_QUEUE_SIZE_MAX_GEN3 (65536)
+/* cb size is the exponent - 3 */
+#define IWX_TFD_QUEUE_CB_SIZE(x) (IWX_RX_QUEUE_CB_SIZE(x) - 3)
+#define IWX_TFD_QUEUE_SIZE_BC_DUP (64)
+#define IWX_TFD_QUEUE_BC_SIZE (IWX_TFD_QUEUE_SIZE_MAX + \
+ IWX_TFD_QUEUE_SIZE_BC_DUP)
+#define IWX_TFD_QUEUE_BC_SIZE_GEN3_AX210 1024
+#define IWX_TFD_QUEUE_BC_SIZE_GEN3_BZ (1024 * 4)
+#define IWX_TFH_NUM_TBS 25
+
+/**
+ * struct iwx_tfh_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @tb_len length of the tx buffer
+ * @addr 64 bits dma address
+ */
+struct iwx_tfh_tb {
+ uint16_t tb_len;
+ uint64_t addr;
+} __packed;
+
+/**
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs.
+ * For pre 22000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
+ * For 22000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
+ *
+ * Each TFD contains pointer/size information for up to 25 data buffers
+ * in host DRAM. These buffers collectively contain the (one) frame described
+ * by the TFD. Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM. Each buffer has max size
+ * of (4K - 4). The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+
+/**
+ * struct iwx_tfh_tfd - Transmit Frame Descriptor (TFD)
+ * @ num_tbs 0-4 number of active tbs
+ * 5 -15 reserved
+ * @ tbs[25] transmit frame buffer descriptors
+ * @ __pad padding
+ */
+struct iwx_tfh_tfd {
+ uint16_t num_tbs;
+ struct iwx_tfh_tb tbs[IWX_TFH_NUM_TBS];
+ uint32_t __pad;
+} __packed;
+
+/* Fixed (non-configurable) rx data from phy */
+
+/**
+ * struct iwx_agn_schedq_bc_tbl scheduler byte count table
+ * base physical address provided by IWX_SCD_DRAM_BASE_ADDR
+ * @tfd_offset 0-11 - tx command byte count
+ * 12-13 - number of 64 byte chunks
+ * 14-15 - reserved
+ */
+struct iwx_agn_scd_bc_tbl {
+ uint16_t tfd_offset[IWX_TFD_QUEUE_BC_SIZE];
+} __packed;
+
+/**
+ * struct iwx_gen3_bc_tbl_entry scheduler byte count table entry gen3
+ * For AX210 and up, the table no longer needs to be contiguous in memory.
+ * @tfd_offset: 0-13 - tx command byte count
+ * 14-15 - number of 64 byte chunks
+ */
+struct iwx_gen3_bc_tbl_entry {
+ uint16_t tfd_offset;
+} __packed;
+
+/**
+ * DQA - Dynamic Queue Allocation -introduction
+ *
+ * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi
+ * to allow dynamic allocation of queues on-demand, rather than allocate them
+ * statically ahead of time. Ideally, we would like to allocate one queue
+ * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2
+ * even if it also needs to send traffic to a sleeping STA1, without being
+ * blocked by the sleeping station.
+ *
+ * Although the queues in DQA mode are dynamically allocated, there are still
+ * some queues that are statically allocated:
+ * TXQ #0 - command queue
+ * TXQ #1 - aux frames
+ */
+
+/* static DQA Tx queue numbers */
+#define IWX_DQA_CMD_QUEUE 0
+#define IWX_DQA_AUX_QUEUE 1
+
+#define IWX_DQA_INJECT_MONITOR_QUEUE 2 /* used in monitor mode only */
+#define IWX_DQA_MGMT_QUEUE 1 /* default queue other modes */
+
+/* Reserve 8 DQA Tx queues for QoS data frames. */
+#define IWX_MAX_TID_COUNT 8
+#define IWX_FIRST_AGG_TX_QUEUE (IWX_DQA_MGMT_QUEUE + 1)
+#define IWX_LAST_AGG_TX_QUEUE (IWX_FIRST_AGG_TX_QUEUE + IWX_MAX_TID_COUNT - 1)
+#define IWX_NUM_TX_QUEUES (IWX_LAST_AGG_TX_QUEUE + 1)
+
+/**
+ * Max Tx window size is the max number of contiguous TFDs that the scheduler
+ * can keep track of at one time when creating block-ack chains of frames.
+ * Note that "64" matches the number of ack bits in a block-ack packet.
+ */
+#define IWX_FRAME_LIMIT 64
+
+#define IWX_TX_FIFO_BK 0
+#define IWX_TX_FIFO_BE 1
+#define IWX_TX_FIFO_VI 2
+#define IWX_TX_FIFO_VO 3
+#define IWX_TX_FIFO_MCAST 5
+#define IWX_TX_FIFO_CMD 7
+
+enum iwx_gen2_tx_fifo {
+ IWX_GEN2_TX_FIFO_CMD = 0,
+ IWX_GEN2_EDCA_TX_FIFO_BK,
+ IWX_GEN2_EDCA_TX_FIFO_BE,
+ IWX_GEN2_EDCA_TX_FIFO_VI,
+ IWX_GEN2_EDCA_TX_FIFO_VO,
+ IWX_GEN2_TRIG_TX_FIFO_BK,
+ IWX_GEN2_TRIG_TX_FIFO_BE,
+ IWX_GEN2_TRIG_TX_FIFO_VI,
+ IWX_GEN2_TRIG_TX_FIFO_VO,
+};
+
+/**
+ * TXQ config options
+ * @TX_QUEUE_CFG_ENABLE_QUEUE: enable a queue
+ * @TX_QUEUE_CFG_TFD_SHORT_FORMAT: use short TFD format
+ */
+#define IWX_TX_QUEUE_CFG_ENABLE_QUEUE (1 << 0)
+#define IWX_TX_QUEUE_CFG_TFD_SHORT_FORMAT (1 << 1)
+
+#define IWX_DEFAULT_QUEUE_SIZE IWX_TFD_QUEUE_SIZE_MAX
+
+/**
+ * struct iwx_tx_queue_cfg_cmd - txq hw scheduler config command
+ * @sta_id: station id
+ * @tid: tid of the queue
+ * @flags: see &enum iwl_tx_queue_cfg_actions
+ * @cb_size: size of TFD cyclic buffer. Value is exponent - 3.
+ * Minimum value 0 (8 TFDs), maximum value 5 (256 TFDs)
+ * @byte_cnt_addr: address of byte count table
+ * @tfdq_addr: address of TFD circular buffer
+ */
+struct iwx_tx_queue_cfg_cmd {
+ uint8_t sta_id;
+ uint8_t tid;
+ uint16_t flags;
+ uint32_t cb_size;
+ uint64_t byte_cnt_addr;
+ uint64_t tfdq_addr;
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_tx_queue_cfg_rsp - response to txq hw scheduler config
+ * @queue_number: queue number assigned to this RA -TID
+ * @flags: set on failure
+ * @write_pointer: initial value for write pointer
+ * @reserved: reserved
+ */
+struct iwx_tx_queue_cfg_rsp {
+ uint16_t queue_number;
+ uint16_t flags;
+ uint16_t write_pointer;
+ uint16_t reserved;
+} __packed; /* TX_QUEUE_CFG_RSP_API_S_VER_2 */
+
+
+/*
+ * Commands
+ */
+#define IWX_ALIVE 0x1
+#define IWX_REPLY_ERROR 0x2
+#define IWX_INIT_COMPLETE_NOTIF 0x4
+
+/* PHY context commands */
+#define IWX_PHY_CONTEXT_CMD 0x8
+#define IWX_DBG_CFG 0x9
+
+/* UMAC scan commands */
+#define IWX_SCAN_ITERATION_COMPLETE_UMAC 0xb5
+#define IWX_SCAN_CFG_CMD 0xc
+#define IWX_SCAN_REQ_UMAC 0xd
+#define IWX_SCAN_ABORT_UMAC 0xe
+#define IWX_SCAN_COMPLETE_UMAC 0xf
+
+/* station table */
+#define IWX_ADD_STA_KEY 0x17
+#define IWX_ADD_STA 0x18
+#define IWX_REMOVE_STA 0x19
+
+/* TX */
+#define IWX_TX_CMD 0x1c
+#define IWX_TXPATH_FLUSH 0x1e
+#define IWX_MGMT_MCAST_KEY 0x1f
+
+/* scheduler config */
+#define IWX_SCD_QUEUE_CFG 0x1d
+
+/* global key */
+#define IWX_WEP_KEY 0x20
+
+/* MAC and Binding commands */
+#define IWX_MAC_CONTEXT_CMD 0x28
+#define IWX_TIME_EVENT_CMD 0x29 /* both CMD and response */
+#define IWX_TIME_EVENT_NOTIFICATION 0x2a
+#define IWX_BINDING_CONTEXT_CMD 0x2b
+#define IWX_TIME_QUOTA_CMD 0x2c
+#define IWX_NON_QOS_TX_COUNTER_CMD 0x2d
+
+/* Calibration */
+#define IWX_TEMPERATURE_NOTIFICATION 0x62
+#define IWX_CALIBRATION_CFG_CMD 0x65
+#define IWX_CALIBRATION_RES_NOTIFICATION 0x66
+#define IWX_CALIBRATION_COMPLETE_NOTIFICATION 0x67
+#define IWX_RADIO_VERSION_NOTIFICATION 0x68
+
+/* Phy */
+#define IWX_PHY_CONFIGURATION_CMD 0x6a
+
+/* Power - legacy power table command */
+#define IWX_POWER_TABLE_CMD 0x77
+#define IWX_PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION 0x78
+#define IWX_LTR_CONFIG 0xee
+
+/* NVM */
+#define IWX_NVM_ACCESS_CMD 0x88
+
+#define IWX_SET_CALIB_DEFAULT_CMD 0x8e
+
+#define IWX_BEACON_NOTIFICATION 0x90
+#define IWX_BEACON_TEMPLATE_CMD 0x91
+#define IWX_TX_ANT_CONFIGURATION_CMD 0x98
+#define IWX_BT_CONFIG 0x9b
+#define IWX_STATISTICS_CMD 0x9c
+#define IWX_STATISTICS_NOTIFICATION 0x9d
+#define IWX_REDUCE_TX_POWER_CMD 0x9f
+
+/* RF-KILL commands and notifications */
+#define IWX_CARD_STATE_CMD 0xa0
+#define IWX_CARD_STATE_NOTIFICATION 0xa1
+
+#define IWX_MISSED_BEACONS_NOTIFICATION 0xa2
+
+#define IWX_MFUART_LOAD_NOTIFICATION 0xb1
+
+/* Power - new power table command */
+#define IWX_MAC_PM_POWER_TABLE 0xa9
+
+#define IWX_REPLY_RX_PHY_CMD 0xc0
+#define IWX_REPLY_RX_MPDU_CMD 0xc1
+#define IWX_BAR_FRAME_RELEASE 0xc2
+#define IWX_FRAME_RELEASE 0xc3
+#define IWX_BA_NOTIF 0xc5
+
+/* Location Aware Regulatory */
+#define IWX_MCC_UPDATE_CMD 0xc8
+#define IWX_MCC_CHUB_UPDATE_CMD 0xc9
+
+/* BT Coex */
+#define IWX_BT_COEX_PRIO_TABLE 0xcc
+#define IWX_BT_COEX_PROT_ENV 0xcd
+#define IWX_BT_PROFILE_NOTIFICATION 0xce
+#define IWX_BT_COEX_CI 0x5d
+
+#define IWX_REPLY_SF_CFG_CMD 0xd1
+#define IWX_REPLY_BEACON_FILTERING_CMD 0xd2
+
+/* DTS measurements */
+#define IWX_CMD_DTS_MEASUREMENT_TRIGGER 0xdc
+#define IWX_DTS_MEASUREMENT_NOTIFICATION 0xdd
+
+#define IWX_REPLY_DEBUG_CMD 0xf0
+#define IWX_DEBUG_LOG_MSG 0xf7
+
+#define IWX_MCAST_FILTER_CMD 0xd0
+
+/* D3 commands/notifications */
+#define IWX_D3_CONFIG_CMD 0xd3
+#define IWX_PROT_OFFLOAD_CONFIG_CMD 0xd4
+#define IWX_OFFLOADS_QUERY_CMD 0xd5
+#define IWX_REMOTE_WAKE_CONFIG_CMD 0xd6
+
+/* for WoWLAN in particular */
+#define IWX_WOWLAN_PATTERNS 0xe0
+#define IWX_WOWLAN_CONFIGURATION 0xe1
+#define IWX_WOWLAN_TSC_RSC_PARAM 0xe2
+#define IWX_WOWLAN_TKIP_PARAM 0xe3
+#define IWX_WOWLAN_KEK_KCK_MATERIAL 0xe4
+#define IWX_WOWLAN_GET_STATUSES 0xe5
+#define IWX_WOWLAN_TX_POWER_PER_DB 0xe6
+
+/* and for NetDetect */
+#define IWX_NET_DETECT_CONFIG_CMD 0x54
+#define IWX_NET_DETECT_PROFILES_QUERY_CMD 0x56
+#define IWX_NET_DETECT_PROFILES_CMD 0x57
+#define IWX_NET_DETECT_HOTSPOTS_CMD 0x58
+#define IWX_NET_DETECT_HOTSPOTS_QUERY_CMD 0x59
+
+/* system group command IDs */
+#define IWX_FSEQ_VER_MISMATCH_NOTIFICATION 0xff
+
+#define IWX_REPLY_MAX 0xff
+
+/* PHY_OPS subcommand IDs */
+#define IWX_CMD_DTS_MEASUREMENT_TRIGGER_WIDE 0x0
+#define IWX_CTDP_CONFIG_CMD 0x03
+#define IWX_TEMP_REPORTING_THRESHOLDS_CMD 0x04
+#define IWX_CT_KILL_NOTIFICATION 0xFE
+#define IWX_DTS_MEASUREMENT_NOTIF_WIDE 0xFF
+
+/* command groups */
+#define IWX_LEGACY_GROUP 0x0
+#define IWX_LONG_GROUP 0x1
+#define IWX_SYSTEM_GROUP 0x2
+#define IWX_MAC_CONF_GROUP 0x3
+#define IWX_PHY_OPS_GROUP 0x4
+#define IWX_DATA_PATH_GROUP 0x5
+#define IWX_PROT_OFFLOAD_GROUP 0xb
+#define IWX_REGULATORY_AND_NVM_GROUP 0xc
+
+/* SYSTEM_GROUP group subcommand IDs */
+
+#define IWX_SHARED_MEM_CFG_CMD 0x00
+#define IWX_SOC_CONFIGURATION_CMD 0x01
+#define IWX_INIT_EXTENDED_CFG_CMD 0x03
+#define IWX_FW_ERROR_RECOVERY_CMD 0x07
+
+/* MAC_CONF group subcommand IDs */
+#define IWX_SESSION_PROTECTION_CMD 0x05
+#define IWX_SESSION_PROTECTION_NOTIF 0xfb
+
+/* DATA_PATH group subcommand IDs */
+#define IWX_DQA_ENABLE_CMD 0x00
+#define IWX_RLC_CONFIG_CMD 0x08
+#define IWX_TLC_MNG_CONFIG_CMD 0x0f
+#define IWX_RX_BAID_ALLOCATION_CONFIG_CMD 0x16
+#define IWX_SCD_QUEUE_CONFIG_CMD 0x17
+#define IWX_RX_NO_DATA_NOTIF 0xf5
+#define IWX_TLC_MNG_UPDATE_NOTIF 0xf7
+
+/* REGULATORY_AND_NVM group subcommand IDs */
+#define IWX_NVM_ACCESS_COMPLETE 0x00
+#define IWX_NVM_GET_INFO 0x02
+#define IWX_NVM_GET_INFO 0x02
+#define IWX_PNVM_INIT_COMPLETE 0xfe
+
+/*
+ * struct iwx_dqa_enable_cmd
+ * @cmd_queue: the TXQ number of the command queue
+ */
+struct iwx_dqa_enable_cmd {
+ uint32_t cmd_queue;
+} __packed; /* DQA_CONTROL_CMD_API_S_VER_1 */
+
+/**
+ * struct iwx_cmd_response - generic response struct for most commands
+ * @status: status of the command asked, changes for each one
+ */
+struct iwx_cmd_response {
+ uint32_t status;
+};
+
+/*
+ * struct iwx_tx_ant_cfg_cmd
+ * @valid: valid antenna configuration
+ */
+struct iwx_tx_ant_cfg_cmd {
+ uint32_t valid;
+} __packed;
+
+/*
+ * Calibration control struct.
+ * Sent as part of the phy configuration command.
+ * @flow_trigger: bitmap for which calibrations to perform according to
+ * flow triggers.
+ * @event_trigger: bitmap for which calibrations to perform according to
+ * event triggers.
+ */
+struct iwx_calib_ctrl {
+ uint32_t flow_trigger;
+ uint32_t event_trigger;
+} __packed;
+
+/* This defines the bitmap of various calibrations to enable in both
+ * init ucode and runtime ucode through IWX_CALIBRATION_CFG_CMD.
+ */
+#define IWX_CALIB_CFG_XTAL_IDX (1 << 0)
+#define IWX_CALIB_CFG_TEMPERATURE_IDX (1 << 1)
+#define IWX_CALIB_CFG_VOLTAGE_READ_IDX (1 << 2)
+#define IWX_CALIB_CFG_PAPD_IDX (1 << 3)
+#define IWX_CALIB_CFG_TX_PWR_IDX (1 << 4)
+#define IWX_CALIB_CFG_DC_IDX (1 << 5)
+#define IWX_CALIB_CFG_BB_FILTER_IDX (1 << 6)
+#define IWX_CALIB_CFG_LO_LEAKAGE_IDX (1 << 7)
+#define IWX_CALIB_CFG_TX_IQ_IDX (1 << 8)
+#define IWX_CALIB_CFG_TX_IQ_SKEW_IDX (1 << 9)
+#define IWX_CALIB_CFG_RX_IQ_IDX (1 << 10)
+#define IWX_CALIB_CFG_RX_IQ_SKEW_IDX (1 << 11)
+#define IWX_CALIB_CFG_SENSITIVITY_IDX (1 << 12)
+#define IWX_CALIB_CFG_CHAIN_NOISE_IDX (1 << 13)
+#define IWX_CALIB_CFG_DISCONNECTED_ANT_IDX (1 << 14)
+#define IWX_CALIB_CFG_ANT_COUPLING_IDX (1 << 15)
+#define IWX_CALIB_CFG_DAC_IDX (1 << 16)
+#define IWX_CALIB_CFG_ABS_IDX (1 << 17)
+#define IWX_CALIB_CFG_AGC_IDX (1 << 18)
+
+/*
+ * Phy configuration command.
+ */
+struct iwx_phy_cfg_cmd {
+ uint32_t phy_cfg;
+ struct iwx_calib_ctrl calib_control;
+} __packed;
+
+#define IWX_PHY_CFG_RADIO_TYPE ((1 << 0) | (1 << 1))
+#define IWX_PHY_CFG_RADIO_STEP ((1 << 2) | (1 << 3))
+#define IWX_PHY_CFG_RADIO_DASH ((1 << 4) | (1 << 5))
+#define IWX_PHY_CFG_PRODUCT_NUMBER ((1 << 6) | (1 << 7))
+#define IWX_PHY_CFG_TX_CHAIN_A (1 << 8)
+#define IWX_PHY_CFG_TX_CHAIN_B (1 << 9)
+#define IWX_PHY_CFG_TX_CHAIN_C (1 << 10)
+#define IWX_PHY_CFG_RX_CHAIN_A (1 << 12)
+#define IWX_PHY_CFG_RX_CHAIN_B (1 << 13)
+#define IWX_PHY_CFG_RX_CHAIN_C (1 << 14)
+
+#define IWX_MAX_DTS_TRIPS 8
+
+/**
+ * struct iwx_ct_kill_notif - CT-kill entry notification
+ *
+ * @temperature: the current temperature in celsius
+ * @reserved: reserved
+ */
+struct iwx_ct_kill_notif {
+ uint16_t temperature;
+ uint16_t reserved;
+} __packed; /* GRP_PHY_CT_KILL_NTF */
+
+/**
+ * struct iwx_temp_report_ths_cmd - set temperature thresholds
+ * (IWX_TEMP_REPORTING_THRESHOLDS_CMD)
+ *
+ * @num_temps: number of temperature thresholds passed
+ * @thresholds: array with the thresholds to be configured
+ */
+struct iwx_temp_report_ths_cmd {
+ uint32_t num_temps;
+ uint16_t thresholds[IWX_MAX_DTS_TRIPS];
+} __packed; /* GRP_PHY_TEMP_REPORTING_THRESHOLDS_CMD */
+
+/*
+ * channel flags in NVM
+ * @IWX_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
+ * @IWX_NVM_CHANNEL_IBSS: usable as an IBSS channel
+ * @IWX_NVM_CHANNEL_ACTIVE: active scanning allowed
+ * @IWX_NVM_CHANNEL_RADAR: radar detection required
+ * @IWX_NVM_CHANNEL_INDOOR_ONLY: only indoor use is allowed
+ * @IWX_NVM_CHANNEL_GO_CONCURRENT: GO operation is allowed when connected to BSS
+ * on same channel on 2.4 or same UNII band on 5.2
+ * @IWX_NVM_CHANNEL_DFS: dynamic freq selection candidate
+ * @IWX_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
+ * @IWX_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
+ * @IWX_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
+ * @IWX_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
+ * @IWX_NVM_CHANNEL_DC_HIGH: DC HIGH required/allowed (?)
+ */
+#define IWX_NVM_CHANNEL_VALID (1 << 0)
+#define IWX_NVM_CHANNEL_IBSS (1 << 1)
+#define IWX_NVM_CHANNEL_ACTIVE (1 << 3)
+#define IWX_NVM_CHANNEL_RADAR (1 << 4)
+#define IWX_NVM_CHANNEL_INDOOR_ONLY (1 << 5)
+#define IWX_NVM_CHANNEL_GO_CONCURRENT (1 << 6)
+#define IWX_NVM_CHANNEL_DFS (1 << 7)
+#define IWX_NVM_CHANNEL_WIDE (1 << 8)
+#define IWX_NVM_CHANNEL_40MHZ (1 << 9)
+#define IWX_NVM_CHANNEL_80MHZ (1 << 10)
+#define IWX_NVM_CHANNEL_160MHZ (1 << 11)
+#define IWX_NVM_CHANNEL_DC_HIGH (1 << 12)
+
+/**
+ * struct iwx_nvm_access_complete_cmd - NVM_ACCESS commands are completed
+ * @reserved: reserved
+ */
+struct iwx_nvm_access_complete_cmd {
+ uint32_t reserved;
+} __packed; /* NVM_ACCESS_COMPLETE_CMD_API_S_VER_1 */
+
+/*
+ * struct iwx_nvm_get_info - request to get NVM data
+ */
+struct iwx_nvm_get_info {
+ uint32_t reserved;
+} __packed; /* REGULATORY_NVM_GET_INFO_CMD_API_S_VER_1 */
+
+/**
+ * enum iwx_nvm_info_general_flags - flags in NVM_GET_INFO resp
+ * @NVM_GENERAL_FLAGS_EMPTY_OTP: 1 if OTP is empty
+ */
+#define IWX_NVM_GENERAL_FLAGS_EMPTY_OTP (1 << 0)
+
+/**
+ * struct iwx_nvm_get_info_general - general NVM data
+ * @flags: bit 0: 1 - empty, 0 - non-empty
+ * @nvm_version: nvm version
+ * @board_type: board type
+ * @n_hw_addrs: number of reserved MAC addresses
+ */
+struct iwx_nvm_get_info_general {
+ uint32_t flags;
+ uint16_t nvm_version;
+ uint8_t board_type;
+ uint8_t n_hw_addrs;
+} __packed; /* REGULATORY_NVM_GET_INFO_GENERAL_S_VER_2 */
+
+/**
+ * iwx_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku
+ * @NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED: true if 2.4 band enabled
+ * @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled
+ * @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled
+ * @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled
+ * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
+ * @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled
+ * @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled
+ * @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled
+ * @NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED: true if API lock enabled
+ */
+#define IWX_NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED (1 << 0)
+#define IWX_NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED (1 << 1)
+#define IWX_NVM_MAC_SKU_FLAGS_802_11N_ENABLED (1 << 2)
+#define IWX_NVM_MAC_SKU_FLAGS_802_11AC_ENABLED (1 << 3)
+#define IWX_NVM_MAC_SKU_FLAGS_802_11AX_ENABLED (1 << 4)
+#define IWX_NVM_MAC_SKU_FLAGS_MIMO_DISABLED (1 << 5)
+#define IWX_NVM_MAC_SKU_FLAGS_WAPI_ENABLED (1 << 8)
+#define IWX_NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED (1 << 14)
+#define IWX_NVM_MAC_SKU_FLAGS_API_LOCK_ENABLED (1 << 15)
+
+/**
+ * struct iwx_nvm_get_info_sku - mac information
+ * @mac_sku_flags: flags for SKU, see &enum iwl_nvm_mac_sku_flags
+ */
+struct iwx_nvm_get_info_sku {
+ uint32_t mac_sku_flags;
+} __packed; /* REGULATORY_NVM_GET_INFO_MAC_SKU_SECTION_S_VER_2 */
+
+/**
+ * struct iwx_nvm_get_info_phy - phy information
+ * @tx_chains: BIT 0 chain A, BIT 1 chain B
+ * @rx_chains: BIT 0 chain A, BIT 1 chain B
+ */
+struct iwx_nvm_get_info_phy {
+ uint32_t tx_chains;
+ uint32_t rx_chains;
+} __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
+
+#define IWX_NUM_CHANNELS_V1 51
+#define IWX_NUM_CHANNELS 110
+
+/**
+ * struct iwx_nvm_get_info_regulatory - regulatory information
+ * @lar_enabled: is LAR enabled
+ * @channel_profile: regulatory data of this channel
+ * @reserved: reserved
+ */
+struct iwx_nvm_get_info_regulatory_v1 {
+ uint32_t lar_enabled;
+ uint16_t channel_profile[IWX_NUM_CHANNELS_V1];
+ uint16_t reserved;
+} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
+
+/**
+ * struct iwx_nvm_get_info_regulatory - regulatory information
+ * @lar_enabled: is LAR enabled
+ * @n_channels: number of valid channels in the array
+ * @channel_profile: regulatory data of this channel
+ */
+struct iwx_nvm_get_info_regulatory {
+ uint32_t lar_enabled;
+ uint32_t n_channels;
+ uint32_t channel_profile[IWX_NUM_CHANNELS];
+} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_2 */
+
+/**
+ * struct iwx_nvm_get_info_rsp_v3 - response to get NVM data
+ * @general: general NVM data
+ * @mac_sku: data relating to MAC sku
+ * @phy_sku: data relating to PHY sku
+ * @regulatory: regulatory data
+ */
+struct iwx_nvm_get_info_rsp_v3 {
+ struct iwx_nvm_get_info_general general;
+ struct iwx_nvm_get_info_sku mac_sku;
+ struct iwx_nvm_get_info_phy phy_sku;
+ struct iwx_nvm_get_info_regulatory_v1 regulatory;
+} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
+
+/**
+ * struct iwx_nvm_get_info_rsp - response to get NVM data
+ * @general: general NVM data
+ * @mac_sku: data relating to MAC sku
+ * @phy_sku: data relating to PHY sku
+ * @regulatory: regulatory data
+ */
+struct iwx_nvm_get_info_rsp {
+ struct iwx_nvm_get_info_general general;
+ struct iwx_nvm_get_info_sku mac_sku;
+ struct iwx_nvm_get_info_phy phy_sku;
+ struct iwx_nvm_get_info_regulatory regulatory;
+} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_4 */
+
+
+#define IWX_ALIVE_STATUS_ERR 0xDEAD
+#define IWX_ALIVE_STATUS_OK 0xCAFE
+
+struct iwx_lmac_debug_addrs {
+ uint32_t error_event_table_ptr; /* SRAM address for error log */
+ uint32_t log_event_table_ptr; /* SRAM address for LMAC event log */
+ uint32_t cpu_register_ptr;
+ uint32_t dbgm_config_ptr;
+ uint32_t alive_counter_ptr;
+ uint32_t scd_base_ptr; /* SRAM address for SCD */
+ uint32_t st_fwrd_addr; /* pointer to Store and forward */
+ uint32_t st_fwrd_size;
+} __packed; /* UCODE_DEBUG_ADDRS_API_S_VER_2 */
+
+struct iwx_lmac_alive {
+ uint32_t ucode_major;
+ uint32_t ucode_minor;
+ uint8_t ver_subtype;
+ uint8_t ver_type;
+ uint8_t mac;
+ uint8_t opt;
+ uint32_t timestamp;
+ struct iwx_lmac_debug_addrs dbg_ptrs;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_3 */
+
+struct iwx_umac_debug_addrs {
+ uint32_t error_info_addr; /* SRAM address for UMAC error log */
+ uint32_t dbg_print_buff_addr;
+} __packed; /* UMAC_DEBUG_ADDRS_API_S_VER_1 */
+
+struct iwx_umac_alive {
+ uint32_t umac_major; /* UMAC version: major */
+ uint32_t umac_minor; /* UMAC version: minor */
+ struct iwx_umac_debug_addrs dbg_ptrs;
+} __packed; /* UMAC_ALIVE_DATA_API_S_VER_2 */
+
+struct iwx_alive_resp_v4 {
+ uint16_t status;
+ uint16_t flags;
+ struct iwx_lmac_alive lmac_data[2];
+ struct iwx_umac_alive umac_data;
+} __packed; /* ALIVE_RES_API_S_VER_4 */
+
+struct iwx_sku_id {
+ uint32_t data[3];
+} __packed; /* SKU_ID_API_S_VER_1 */
+
+struct iwx_alive_resp_v5 {
+ uint16_t status;
+ uint16_t flags;
+ struct iwx_lmac_alive lmac_data[2];
+ struct iwx_umac_alive umac_data;
+ struct iwx_sku_id sku_id;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_5 */
+
+struct iwx_imr_alive_info {
+ uint64_t base_addr;
+ uint32_t size;
+ uint32_t enabled;
+} __packed; /* IMR_ALIVE_INFO_API_S_VER_1 */
+
+struct iwx_alive_resp_v6 {
+ uint16_t status;
+ uint16_t flags;
+ struct iwx_lmac_alive lmac_data[2];
+ struct iwx_umac_alive umac_data;
+ struct iwx_sku_id sku_id;
+ struct iwx_imr_alive_info imr;
+} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */
+
+
+#define IWX_SOC_CONFIG_CMD_FLAGS_DISCRETE (1 << 0)
+#define IWX_SOC_CONFIG_CMD_FLAGS_LOW_LATENCY (1 << 1)
+
+#define IWX_SOC_FLAGS_LTR_APPLY_DELAY_MASK 0xc
+#define IWX_SOC_FLAGS_LTR_APPLY_DELAY_NONE 0
+#define IWX_SOC_FLAGS_LTR_APPLY_DELAY_200 1
+#define IWX_SOC_FLAGS_LTR_APPLY_DELAY_2500 2
+#define IWX_SOC_FLAGS_LTR_APPLY_DELAY_1820 3
+
+/**
+ * struct iwx_soc_configuration_cmd - Set device stabilization latency
+ *
+ * @flags: soc settings flags. In VER_1, we can only set the DISCRETE
+ * flag, because the FW treats the whole value as an integer. In
+ * VER_2, we can set the bits independently.
+ * @latency: time for SOC to ensure stable power & XTAL
+ */
+struct iwx_soc_configuration_cmd {
+ uint32_t flags;
+ uint32_t latency;
+} __packed; /*
+ * SOC_CONFIGURATION_CMD_S_VER_1 (see description above)
+ * SOC_CONFIGURATION_CMD_S_VER_2
+ */
+
+/**
+ * commands driver may send before finishing init flow
+ * @IWX_INIT_DEBUG_CFG: driver is going to send debug config command
+ * @IWX_INIT_NVM: driver is going to send NVM_ACCESS commands
+ */
+#define IWX_INIT_DEBUG_CFG (1 << 0)
+#define IWX_INIT_NVM (1 << 1)
+
+/**
+ * struct iwx_extended_cfg_cmd - mark what commands ucode should wait for
+ * before finishing init flows
+ * @init_flags: IWX_INIT_* flag bits
+ */
+struct iwx_init_extended_cfg_cmd {
+ uint32_t init_flags;
+} __packed; /* INIT_EXTENDED_CFG_CMD_API_S_VER_1 */
+
+/* Error response/notification */
+#define IWX_FW_ERR_UNKNOWN_CMD 0x0
+#define IWX_FW_ERR_INVALID_CMD_PARAM 0x1
+#define IWX_FW_ERR_SERVICE 0x2
+#define IWX_FW_ERR_ARC_MEMORY 0x3
+#define IWX_FW_ERR_ARC_CODE 0x4
+#define IWX_FW_ERR_WATCH_DOG 0x5
+#define IWX_FW_ERR_WEP_GRP_KEY_INDX 0x10
+#define IWX_FW_ERR_WEP_KEY_SIZE 0x11
+#define IWX_FW_ERR_OBSOLETE_FUNC 0x12
+#define IWX_FW_ERR_UNEXPECTED 0xFE
+#define IWX_FW_ERR_FATAL 0xFF
+
+/**
+ * struct iwx_error_resp - FW error indication
+ * ( IWX_REPLY_ERROR = 0x2 )
+ * @error_type: one of IWX_FW_ERR_*
+ * @cmd_id: the command ID for which the error occurred
+ * @bad_cmd_seq_num: sequence number of the erroneous command
+ * @error_service: which service created the error, applicable only if
+ * error_type = 2, otherwise 0
+ * @timestamp: TSF in usecs.
+ */
+struct iwx_error_resp {
+ uint32_t error_type;
+ uint8_t cmd_id;
+ uint8_t reserved1;
+ uint16_t bad_cmd_seq_num;
+ uint32_t error_service;
+ uint64_t timestamp;
+} __packed;
+
+enum iwx_fw_dbg_reg_operator {
+ CSR_ASSIGN,
+ CSR_SETBIT,
+ CSR_CLEARBIT,
+
+ PRPH_ASSIGN,
+ PRPH_SETBIT,
+ PRPH_CLEARBIT,
+
+ INDIRECT_ASSIGN,
+ INDIRECT_SETBIT,
+ INDIRECT_CLEARBIT,
+
+ PRPH_BLOCKBIT,
+};
+
+/**
+ * struct iwx_fw_dbg_reg_op - an operation on a register
+ *
+ * @op: &enum iwx_fw_dbg_reg_operator
+ * @addr: offset of the register
+ * @val: value
+ */
+struct iwx_fw_dbg_reg_op {
+ uint8_t op;
+ uint8_t reserved[3];
+ uint32_t addr;
+ uint32_t val;
+} __packed;
+
+/**
+ * enum iwx_fw_dbg_monitor_mode - available monitor recording modes
+ *
+ * @SMEM_MODE: monitor stores the data in SMEM
+ * @EXTERNAL_MODE: monitor stores the data in allocated DRAM
+ * @MARBH_MODE: monitor stores the data in MARBH buffer
+ * @MIPI_MODE: monitor outputs the data through the MIPI interface
+ */
+enum iwx_fw_dbg_monitor_mode {
+ SMEM_MODE = 0,
+ EXTERNAL_MODE = 1,
+ MARBH_MODE = 2,
+ MIPI_MODE = 3,
+};
+
+/**
+ * struct iwx_fw_dbg_mem_seg_tlv - configures the debug data memory segments
+ *
+ * @data_type: the memory segment type to record
+ * @ofs: the memory segment offset
+ * @len: the memory segment length, in bytes
+ *
+ * This parses IWX_UCODE_TLV_FW_MEM_SEG
+ */
+struct iwx_fw_dbg_mem_seg_tlv {
+ uint32_t data_type;
+ uint32_t ofs;
+ uint32_t len;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_dest_tlv_v1 - configures the destination of the debug data
+ *
+ * @version: version of the TLV - currently 0
+ * @monitor_mode: &enum iwx_fw_dbg_monitor_mode
+ * @size_power: buffer size will be 2^(size_power + 11)
+ * @base_reg: addr of the base addr register (PRPH)
+ * @end_reg: addr of the end addr register (PRPH)
+ * @write_ptr_reg: the addr of the reg of the write pointer
+ * @wrap_count: the addr of the reg of the wrap_count
+ * @base_shift: shift right of the base addr reg
+ * @end_shift: shift right of the end addr reg
+ * @reg_ops: array of registers operations
+ *
+ * This parses IWX_UCODE_TLV_FW_DBG_DEST
+ */
+struct iwx_fw_dbg_dest_tlv_v1 {
+ uint8_t version;
+ uint8_t monitor_mode;
+ uint8_t size_power;
+ uint8_t reserved;
+ uint32_t base_reg;
+ uint32_t end_reg;
+ uint32_t write_ptr_reg;
+ uint32_t wrap_count;
+ uint8_t base_shift;
+ uint8_t end_shift;
+ struct iwx_fw_dbg_reg_op reg_ops[0];
+} __packed;
+
+/* Mask of the register for defining the LDBG MAC2SMEM buffer SMEM size */
+#define IWX_LDBG_M2S_BUF_SIZE_MSK 0x0fff0000
+/* Mask of the register for defining the LDBG MAC2SMEM SMEM base address */
+#define IWX_LDBG_M2S_BUF_BA_MSK 0x00000fff
+/* The smem buffer chunks are in units of 256 bits */
+#define IWX_M2S_UNIT_SIZE 0x100
+
+struct iwx_fw_dbg_dest_tlv {
+ uint8_t version;
+ uint8_t monitor_mode;
+ uint8_t size_power;
+ uint8_t reserved;
+ uint32_t cfg_reg;
+ uint32_t write_ptr_reg;
+ uint32_t wrap_count;
+ uint8_t base_shift;
+ uint8_t size_shift;
+ struct iwx_fw_dbg_reg_op reg_ops[0];
+} __packed;
+
+struct iwx_fw_dbg_conf_hcmd {
+ uint8_t id;
+ uint8_t reserved;
+ uint16_t len;
+ uint8_t data[0];
+} __packed;
+
+/**
+ * enum iwx_fw_dbg_trigger_mode - triggers functionalities
+ *
+ * @IWX_FW_DBG_TRIGGER_START: when trigger occurs re-conf the dbg mechanism
+ * @IWX_FW_DBG_TRIGGER_STOP: when trigger occurs pull the dbg data
+ * @IWX_FW_DBG_TRIGGER_MONITOR_ONLY: when trigger occurs trigger is set to
+ * collect only monitor data
+ */
+enum iwx_fw_dbg_trigger_mode {
+ IWX_FW_DBG_TRIGGER_START = (1 << 0),
+ IWX_FW_DBG_TRIGGER_STOP = (1 << 1),
+ IWX_FW_DBG_TRIGGER_MONITOR_ONLY = (1 << 2),
+};
+
+/**
+ * enum iwx_fw_dbg_trigger_flags - the flags supported by wrt triggers
+ * @IWX_FW_DBG_FORCE_RESTART: force a firmware restart
+ */
+enum iwx_fw_dbg_trigger_flags {
+ IWX_FW_DBG_FORCE_RESTART = (1 << 0),
+};
+
+/**
+ * enum iwx_fw_dbg_trigger_vif_type - define the VIF type for a trigger
+ * @IWX_FW_DBG_CONF_VIF_ANY: any vif type
+ * @IWX_FW_DBG_CONF_VIF_IBSS: IBSS mode
+ * @IWX_FW_DBG_CONF_VIF_STATION: BSS mode
+ * @IWX_FW_DBG_CONF_VIF_AP: AP mode
+ * @IWX_FW_DBG_CONF_VIF_P2P_CLIENT: P2P Client mode
+ * @IWX_FW_DBG_CONF_VIF_P2P_GO: P2P GO mode
+ * @IWX_FW_DBG_CONF_VIF_P2P_DEVICE: P2P device
+ * @IWX_FW_DBG_CONF_VIF_NAN: NAN device
+ */
+enum iwx_fw_dbg_trigger_vif_type {
+ IWX_FW_DBG_CONF_VIF_ANY = 0,
+ IWX_FW_DBG_CONF_VIF_IBSS = 1,
+ IWX_FW_DBG_CONF_VIF_STATION = 2,
+ IWX_FW_DBG_CONF_VIF_AP = 3,
+ IWX_FW_DBG_CONF_VIF_P2P_CLIENT = 8,
+ IWX_FW_DBG_CONF_VIF_P2P_GO = 9,
+ IWX_FW_DBG_CONF_VIF_P2P_DEVICE = 10,
+ IWX_FW_DBG_CONF_VIF_NAN = 12,
+};
+
+/**
+ * enum iwl_fw_dbg_trigger - triggers available
+ *
+ * @FW_DBG_TRIGGER_USER: trigger log collection by user
+ * This should not be defined as a trigger to the driver, but a value the
+ * driver should set to indicate that the trigger was initiated by the
+ * user.
+ * @FW_DBG_TRIGGER_FW_ASSERT: trigger log collection when the firmware asserts
+ * @FW_DBG_TRIGGER_MISSED_BEACONS: trigger log collection when beacons are
+ * missed.
+ * @FW_DBG_TRIGGER_CHANNEL_SWITCH: trigger log collection upon channel switch.
+ * @FW_DBG_TRIGGER_FW_NOTIF: trigger log collection when the firmware sends a
+ * command response or a notification.
+ * @FW_DBG_TRIGGER_MLME: trigger log collection upon MLME event.
+ * @FW_DBG_TRIGGER_STATS: trigger log collection upon statistics threshold.
+ * @FW_DBG_TRIGGER_RSSI: trigger log collection when the rssi of the beacon
+ * goes below a threshold.
+ * @FW_DBG_TRIGGER_TXQ_TIMERS: configures the timers for the Tx queue hang
+ * detection.
+ * @FW_DBG_TRIGGER_TIME_EVENT: trigger log collection upon time events related
+ * events.
+ * @FW_DBG_TRIGGER_BA: trigger log collection upon BlockAck related events.
+ * @FW_DBG_TX_LATENCY: trigger log collection when the tx latency goes above a
+ * threshold.
+ * @FW_DBG_TDLS: trigger log collection upon TDLS related events.
+ * @FW_DBG_TRIGGER_TX_STATUS: trigger log collection upon tx status when
+ * the firmware sends a tx reply.
+ * @FW_DBG_TRIGGER_USER_EXTENDED: trigger log collection upon user space
+ * request.
+ * @FW_DBG_TRIGGER_ALIVE_TIMEOUT: trigger log collection if alive flow timeouts
+ * @FW_DBG_TRIGGER_DRIVER: trigger log collection upon a flow failure
+ * in the driver.
+ */
+enum iwx_fw_dbg_trigger {
+ IWX_FW_DBG_TRIGGER_INVALID = 0,
+ IWX_FW_DBG_TRIGGER_USER,
+ IWX_FW_DBG_TRIGGER_FW_ASSERT,
+ IWX_FW_DBG_TRIGGER_MISSED_BEACONS,
+ IWX_FW_DBG_TRIGGER_CHANNEL_SWITCH,
+ IWX_FW_DBG_TRIGGER_FW_NOTIF,
+ IWX_FW_DBG_TRIGGER_MLME,
+ IWX_FW_DBG_TRIGGER_STATS,
+ IWX_FW_DBG_TRIGGER_RSSI,
+ IWX_FW_DBG_TRIGGER_TXQ_TIMERS,
+ IWX_FW_DBG_TRIGGER_TIME_EVENT,
+ IWX_FW_DBG_TRIGGER_BA,
+ IWX_FW_DBG_TRIGGER_TX_LATENCY,
+ IWX_FW_DBG_TRIGGER_TDLS,
+ IWX_FW_DBG_TRIGGER_TX_STATUS,
+ IWX_FW_DBG_TRIGGER_USER_EXTENDED,
+ IWX_FW_DBG_TRIGGER_ALIVE_TIMEOUT,
+ IWX_FW_DBG_TRIGGER_DRIVER,
+
+ /* must be last */
+ IWX_FW_DBG_TRIGGER_MAX,
+};
+
+
+/**
+ * struct iwx_fw_dbg_trigger_tlv - a TLV that describes the trigger
+ * @id: &enum iwx_fw_dbg_trigger
+ * @vif_type: &enum iwx_fw_dbg_trigger_vif_type
+ * @stop_conf_ids: bitmap of configurations this trigger relates to.
+ * if the mode is %IWX_FW_DBG_TRIGGER_STOP, then if the bit corresponding
+ * to the currently running configuration is set, the data should be
+ * collected.
+ * @stop_delay: how many milliseconds to wait before collecting the data
+ * after the STOP trigger fires.
+ * @mode: &enum iwx_fw_dbg_trigger_mode - can be stop / start of both
+ * @start_conf_id: if mode is %IWX_FW_DBG_TRIGGER_START, this defines what
+ * configuration should be applied when the triggers kicks in.
+ * @occurrences: number of occurrences. 0 means the trigger will never fire.
+ * @trig_dis_ms: the time, in milliseconds, after an occurrence of this
+ * trigger in which another occurrence should be ignored.
+ * @flags: &enum iwx_fw_dbg_trigger_flags
+ */
+struct iwx_fw_dbg_trigger_tlv {
+ uint32_t id;
+ uint32_t vif_type;
+ uint32_t stop_conf_ids;
+ uint32_t stop_delay;
+ uint8_t mode;
+ uint8_t start_conf_id;
+ uint16_t occurrences;
+ uint16_t trig_dis_ms;
+ uint8_t flags;
+ uint8_t reserved[5];
+
+ uint8_t data[0];
+} __packed;
+
+#define IWX_FW_DBG_START_FROM_ALIVE 0
+#define IWX_FW_DBG_CONF_MAX 32
+#define IWX_FW_DBG_INVALID 0xff
+
+/**
+ * struct iwx_fw_dbg_trigger_missed_bcon - configures trigger for missed beacons
+ * @stop_consec_missed_bcon: stop recording if threshold is crossed.
+ * @stop_consec_missed_bcon_since_rx: stop recording if threshold is crossed.
+ * @start_consec_missed_bcon: start recording if threshold is crossed.
+ * @start_consec_missed_bcon_since_rx: start recording if threshold is crossed.
+ * @reserved1: reserved
+ * @reserved2: reserved
+ */
+struct iwx_fw_dbg_trigger_missed_bcon {
+ uint32_t stop_consec_missed_bcon;
+ uint32_t stop_consec_missed_bcon_since_rx;
+ uint32_t reserved2[2];
+ uint32_t start_consec_missed_bcon;
+ uint32_t start_consec_missed_bcon_since_rx;
+ uint32_t reserved1[2];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_cmd - configures trigger for messages from FW.
+ * cmds: the list of commands to trigger the collection on
+ */
+struct iwx_fw_dbg_trigger_cmd {
+ struct cmd {
+ uint8_t cmd_id;
+ uint8_t group_id;
+ } __packed cmds[16];
+} __packed;
+
+/**
+ * iwx_fw_dbg_trigger_stats - configures trigger for statistics
+ * @stop_offset: the offset of the value to be monitored
+ * @stop_threshold: the threshold above which to collect
+ * @start_offset: the offset of the value to be monitored
+ * @start_threshold: the threshold above which to start recording
+ */
+struct iwx_fw_dbg_trigger_stats {
+ uint32_t stop_offset;
+ uint32_t stop_threshold;
+ uint32_t start_offset;
+ uint32_t start_threshold;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_low_rssi - trigger for low beacon RSSI
+ * @rssi: RSSI value to trigger at
+ */
+struct iwx_fw_dbg_trigger_low_rssi {
+ uint32_t rssi;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_mlme - configures trigger for mlme events
+ * @stop_auth_denied: number of denied authentication to collect
+ * @stop_auth_timeout: number of authentication timeout to collect
+ * @stop_rx_deauth: number of Rx deauth before to collect
+ * @stop_tx_deauth: number of Tx deauth before to collect
+ * @stop_assoc_denied: number of denied association to collect
+ * @stop_assoc_timeout: number of association timeout to collect
+ * @stop_connection_loss: number of connection loss to collect
+ * @start_auth_denied: number of denied authentication to start recording
+ * @start_auth_timeout: number of authentication timeout to start recording
+ * @start_rx_deauth: number of Rx deauth to start recording
+ * @start_tx_deauth: number of Tx deauth to start recording
+ * @start_assoc_denied: number of denied association to start recording
+ * @start_assoc_timeout: number of association timeout to start recording
+ * @start_connection_loss: number of connection loss to start recording
+ */
+struct iwx_fw_dbg_trigger_mlme {
+ uint8_t stop_auth_denied;
+ uint8_t stop_auth_timeout;
+ uint8_t stop_rx_deauth;
+ uint8_t stop_tx_deauth;
+
+ uint8_t stop_assoc_denied;
+ uint8_t stop_assoc_timeout;
+ uint8_t stop_connection_loss;
+ uint8_t reserved;
+
+ uint8_t start_auth_denied;
+ uint8_t start_auth_timeout;
+ uint8_t start_rx_deauth;
+ uint8_t start_tx_deauth;
+
+ uint8_t start_assoc_denied;
+ uint8_t start_assoc_timeout;
+ uint8_t start_connection_loss;
+ uint8_t reserved2;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_txq_timer - configures the Tx queue's timer
+ * @command_queue: timeout for the command queue in ms
+ * @bss: timeout for the queues of a BSS (except for TDLS queues) in ms
+ * @softap: timeout for the queues of a softAP in ms
+ * @p2p_go: timeout for the queues of a P2P GO in ms
+ * @p2p_client: timeout for the queues of a P2P client in ms
+ * @p2p_device: timeout for the queues of a P2P device in ms
+ * @ibss: timeout for the queues of an IBSS in ms
+ * @tdls: timeout for the queues of a TDLS station in ms
+ */
+struct iwx_fw_dbg_trigger_txq_timer {
+ uint32_t command_queue;
+ uint32_t bss;
+ uint32_t softap;
+ uint32_t p2p_go;
+ uint32_t p2p_client;
+ uint32_t p2p_device;
+ uint32_t ibss;
+ uint32_t tdls;
+ uint32_t reserved[4];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_time_event - configures a time event trigger
+ * time_Events: a list of tuples <id, action_bitmap>. The driver will issue a
+ * trigger each time a time event notification that relates to time event
+ * id with one of the actions in the bitmap is received and
+ * BIT(notif->status) is set in status_bitmap.
+ *
+ */
+struct iwx_fw_dbg_trigger_time_event {
+ struct {
+ uint32_t id;
+ uint32_t action_bitmap;
+ uint32_t status_bitmap;
+ } __packed time_events[16];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_ba - configures BlockAck related trigger
+ * rx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ * when an Rx BlockAck session is started.
+ * rx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ * when an Rx BlockAck session is stopped.
+ * tx_ba_start: tid bitmap to configure on what tid the trigger should occur
+ * when a Tx BlockAck session is started.
+ * tx_ba_stop: tid bitmap to configure on what tid the trigger should occur
+ * when a Tx BlockAck session is stopped.
+ * rx_bar: tid bitmap to configure on what tid the trigger should occur
+ * when a BAR is received (for a Tx BlockAck session).
+ * tx_bar: tid bitmap to configure on what tid the trigger should occur
+ * when a BAR is send (for an Rx BlocAck session).
+ * frame_timeout: tid bitmap to configure on what tid the trigger should occur
+ * when a frame times out in the reordering buffer.
+ */
+struct iwx_fw_dbg_trigger_ba {
+ uint16_t rx_ba_start;
+ uint16_t rx_ba_stop;
+ uint16_t tx_ba_start;
+ uint16_t tx_ba_stop;
+ uint16_t rx_bar;
+ uint16_t tx_bar;
+ uint16_t frame_timeout;
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_tx_latency - configures tx latency related trigger
+ * @thrshold: the wanted threshold.
+ * @tid_bitmap: the tid to apply the threshold on
+ * @mode: recording mode (Internal buffer or continues recording)
+ * @window: the size of the window before collecting.
+ * @reserved: reserved.
+ */
+struct iwx_fw_dbg_trigger_tx_latency {
+ uint32_t thrshold;
+ uint16_t tid_bitmap;
+ uint16_t mode;
+ uint32_t window;
+ uint32_t reserved[4];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_tdls - configures trigger for TDLS events.
+ * @action_bitmap: the TDLS action to trigger the collection upon
+ * @peer_mode: trigger on specific peer or all
+ * @peer: the TDLS peer to trigger the collection on
+ */
+struct iwx_fw_dbg_trigger_tdls {
+ uint8_t action_bitmap;
+ uint8_t peer_mode;
+ uint8_t peer[ETHER_ADDR_LEN];
+ uint8_t reserved[4];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_trigger_tx_status - configures trigger for tx response
+ * status.
+ * @statuses: the list of statuses to trigger the collection on
+ */
+struct iwx_fw_dbg_trigger_tx_status {
+ struct tx_status {
+ uint8_t status;
+ uint8_t reserved[3];
+ } __packed statuses[16];
+ uint32_t reserved[2];
+} __packed;
+
+/**
+ * struct iwx_fw_dbg_conf_tlv - a TLV that describes a debug configuration.
+ * @id: conf id
+ * @usniffer: should the uSniffer image be used
+ * @num_of_hcmds: how many HCMDs to send are present here
+ * @hcmd: a variable length host command to be sent to apply the configuration.
+ * If there is more than one HCMD to send, they will appear one after the
+ * other and be sent in the order that they appear in.
+ * This parses IWX_UCODE_TLV_FW_DBG_CONF. The user can add up-to
+ * %IWX_FW_DBG_CONF_MAX configuration per run.
+ */
+struct iwx_fw_dbg_conf_tlv {
+ uint8_t id;
+ uint8_t usniffer;
+ uint8_t reserved;
+ uint8_t num_of_hcmds;
+ struct iwx_fw_dbg_conf_hcmd hcmd;
+} __packed;
+
+#define IWX_FW_CMD_VER_UNKNOWN 99
+
+/**
+ * struct iwx_fw_cmd_version - firmware command version entry
+ * @cmd: command ID
+ * @group: group ID
+ * @cmd_ver: command version
+ * @notif_ver: notification version
+ */
+struct iwx_fw_cmd_version {
+ uint8_t cmd;
+ uint8_t group;
+ uint8_t cmd_ver;
+ uint8_t notif_ver;
+} __packed;
+
+/* Common PHY, MAC and Bindings definitions */
+
+#define IWX_MAX_MACS_IN_BINDING (3)
+#define IWX_MAX_BINDINGS (4)
+#define IWX_AUX_BINDING_INDEX (3)
+#define IWX_MAX_PHYS (4)
+
+/* Used to extract ID and color from the context dword */
+#define IWX_FW_CTXT_ID_POS (0)
+#define IWX_FW_CTXT_ID_MSK (0xff << IWX_FW_CTXT_ID_POS)
+#define IWX_FW_CTXT_COLOR_POS (8)
+#define IWX_FW_CTXT_COLOR_MSK (0xff << IWX_FW_CTXT_COLOR_POS)
+#define IWX_FW_CTXT_INVALID (0xffffffff)
+
+#define IWX_FW_CMD_ID_AND_COLOR(_id, _color) ((_id << IWX_FW_CTXT_ID_POS) |\
+ (_color << IWX_FW_CTXT_COLOR_POS))
+
+/* Possible actions on PHYs, MACs and Bindings */
+#define IWX_FW_CTXT_ACTION_STUB 0
+#define IWX_FW_CTXT_ACTION_ADD 1
+#define IWX_FW_CTXT_ACTION_MODIFY 2
+#define IWX_FW_CTXT_ACTION_REMOVE 3
+#define IWX_FW_CTXT_ACTION_NUM 4
+/* COMMON_CONTEXT_ACTION_API_E_VER_1 */
+
+/* Time Events */
+
+/* Time Event types, according to MAC type */
+
+/* BSS Station Events */
+#define IWX_TE_BSS_STA_AGGRESSIVE_ASSOC 0
+#define IWX_TE_BSS_STA_ASSOC 1
+#define IWX_TE_BSS_EAP_DHCP_PROT 2
+#define IWX_TE_BSS_QUIET_PERIOD 3
+
+/* P2P Device Events */
+#define IWX_TE_P2P_DEVICE_DISCOVERABLE 4
+#define IWX_TE_P2P_DEVICE_LISTEN 5
+#define IWX_TE_P2P_DEVICE_ACTION_SCAN 6
+#define IWX_TE_P2P_DEVICE_FULL_SCAN 7
+
+/* P2P Client Events */
+#define IWX_TE_P2P_CLIENT_AGGRESSIVE_ASSOC 8
+#define IWX_TE_P2P_CLIENT_ASSOC 9
+#define IWX_TE_P2P_CLIENT_QUIET_PERIOD 10
+
+/* P2P GO Events */
+#define IWX_TE_P2P_GO_ASSOC_PROT 11
+#define IWX_TE_P2P_GO_REPETITIVE_NOA 12
+#define IWX_TE_P2P_GO_CT_WINDOW 13
+
+/* WiDi Sync Events */
+#define IWX_TE_WIDI_TX_SYNC 14
+
+/* Time event - defines for command API */
+
+/**
+ * DOC: Time Events - what is it?
+ *
+ * Time Events are a fw feature that allows the driver to control the presence
+ * of the device on the channel. Since the fw supports multiple channels
+ * concurrently, the fw may choose to jump to another channel at any time.
+ * In order to make sure that the fw is on a specific channel at a certain time
+ * and for a certain duration, the driver needs to issue a time event.
+ *
+ * The simplest example is for BSS association. The driver issues a time event,
+ * waits for it to start, and only then tells mac80211 that we can start the
+ * association. This way, we make sure that the association will be done
+ * smoothly and won't be interrupted by channel switch decided within the fw.
+ */
+
+ /**
+ * DOC: The flow against the fw
+ *
+ * When the driver needs to make sure we are in a certain channel, at a certain
+ * time and for a certain duration, it sends a Time Event. The flow against the
+ * fw goes like this:
+ * 1) Driver sends a TIME_EVENT_CMD to the fw
+ * 2) Driver gets the response for that command. This response contains the
+ * Unique ID (UID) of the event.
+ * 3) The fw sends notification when the event starts.
+ *
+ * Of course the API provides various options that allow to cover parameters
+ * of the flow.
+ * What is the duration of the event?
+ * What is the start time of the event?
+ * Is there an end-time for the event?
+ * How much can the event be delayed?
+ * Can the event be split?
+ * If yes what is the maximal number of chunks?
+ * etc...
+ */
+
+/*
+ * @IWX_TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @IWX_TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ * the first fragment is scheduled.
+ * @IWX_TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ * the first 2 fragments are scheduled.
+ * @IWX_TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ * number of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+#define IWX_TE_V2_FRAG_NONE 0
+#define IWX_TE_V2_FRAG_SINGLE 1
+#define IWX_TE_V2_FRAG_DUAL 2
+#define IWX_TE_V2_FRAG_MAX 0xfe
+#define IWX_TE_V2_FRAG_ENDLESS 0xff
+
+/* Repeat the time event endlessly (until removed) */
+#define IWX_TE_V2_REPEAT_ENDLESS 0xff
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define IWX_TE_V2_REPEAT_MAX 0xfe
+
+#define IWX_TE_V2_PLACEMENT_POS 12
+#define IWX_TE_V2_ABSENCE_POS 15
+
+/* Time event policy values
+ * A notification (both event and fragment) includes a status indicating weather
+ * the FW was able to schedule the event or not. For fragment start/end
+ * notification the status is always success. There is no start/end fragment
+ * notification for monolithic events.
+ *
+ * @IWX_TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
+ * @IWX_TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @IWX_TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @IWX_TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @IWX_TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @IWX_TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @IWX_TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @IWX_TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @IWX_TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ * @IWX_TE_V2_DEP_OTHER: depends on another time event
+ * @IWX_TE_V2_DEP_TSF: depends on a specific time
+ * @IWX_TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of the same MAC
+ * @IWX_TE_V2_ABSENCE: are we present or absent during the Time Event.
+ */
+#define IWX_TE_V2_DEFAULT_POLICY 0x0
+
+/* notifications (event start/stop, fragment start/stop) */
+#define IWX_TE_V2_NOTIF_HOST_EVENT_START (1 << 0)
+#define IWX_TE_V2_NOTIF_HOST_EVENT_END (1 << 1)
+#define IWX_TE_V2_NOTIF_INTERNAL_EVENT_START (1 << 2)
+#define IWX_TE_V2_NOTIF_INTERNAL_EVENT_END (1 << 3)
+
+#define IWX_TE_V2_NOTIF_HOST_FRAG_START (1 << 4)
+#define IWX_TE_V2_NOTIF_HOST_FRAG_END (1 << 5)
+#define IWX_TE_V2_NOTIF_INTERNAL_FRAG_START (1 << 6)
+#define IWX_TE_V2_NOTIF_INTERNAL_FRAG_END (1 << 7)
+#define IWX_T2_V2_START_IMMEDIATELY (1 << 11)
+
+#define IWX_TE_V2_NOTIF_MSK 0xff
+
+/* placement characteristics */
+#define IWX_TE_V2_DEP_OTHER (1 << IWX_TE_V2_PLACEMENT_POS)
+#define IWX_TE_V2_DEP_TSF (1 << (IWX_TE_V2_PLACEMENT_POS + 1))
+#define IWX_TE_V2_EVENT_SOCIOPATHIC (1 << (IWX_TE_V2_PLACEMENT_POS + 2))
+
+/* are we present or absent during the Time Event. */
+#define IWX_TE_V2_ABSENCE (1 << IWX_TE_V2_ABSENCE_POS)
+
+/**
+ * struct iwx_time_event_cmd_api - configuring Time Events
+ * with struct IWX_MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
+ * with version 1. determined by IWX_UCODE_TLV_FLAGS)
+ * ( IWX_TIME_EVENT_CMD = 0x29 )
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: action to perform, one of IWX_FW_CTXT_ACTION_*
+ * @id: this field has two meanings, depending on the action:
+ * If the action is ADD, then it means the type of event to add.
+ * For all other actions it is the unique event ID assigned when the
+ * event was added by the FW.
+ * @apply_time: When to start the Time Event (in GP2)
+ * @max_delay: maximum delay to event's start (apply time), in TU
+ * @depends_on: the unique ID of the event we depend on (if any)
+ * @interval: interval between repetitions, in TU
+ * @duration: duration of event in TU
+ * @repeat: how many repetitions to do, can be IWX_TE_REPEAT_ENDLESS
+ * @max_frags: maximal number of fragments the Time Event can be divided to
+ * @policy: defines whether uCode shall notify the host or other uCode modules
+ * on event and/or fragment start and/or end
+ * using one of IWX_TE_INDEPENDENT, IWX_TE_DEP_OTHER, IWX_TE_DEP_TSF
+ * IWX_TE_EVENT_SOCIOPATHIC
+ * using IWX_TE_ABSENCE and using IWX_TE_NOTIF_*
+ */
+struct iwx_time_event_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ uint32_t id;
+ /* IWX_MAC_TIME_EVENT_DATA_API_S_VER_2 */
+ uint32_t apply_time;
+ uint32_t max_delay;
+ uint32_t depends_on;
+ uint32_t interval;
+ uint32_t duration;
+ uint8_t repeat;
+ uint8_t max_frags;
+ uint16_t policy;
+} __packed; /* IWX_MAC_TIME_EVENT_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_time_event_resp - response structure to iwx_time_event_cmd
+ * @status: bit 0 indicates success, all others specify errors
+ * @id: the Time Event type
+ * @unique_id: the unique ID assigned (in ADD) or given (others) to the TE
+ * @id_and_color: ID and color of the relevant MAC
+ */
+struct iwx_time_event_resp {
+ uint32_t status;
+ uint32_t id;
+ uint32_t unique_id;
+ uint32_t id_and_color;
+} __packed; /* IWX_MAC_TIME_EVENT_RSP_API_S_VER_1 */
+
+/**
+ * struct iwx_time_event_notif - notifications of time event start/stop
+ * ( IWX_TIME_EVENT_NOTIFICATION = 0x2a )
+ * @timestamp: action timestamp in GP2
+ * @session_id: session's unique id
+ * @unique_id: unique id of the Time Event itself
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: one of IWX_TE_NOTIF_START or IWX_TE_NOTIF_END
+ * @status: true if scheduled, false otherwise (not executed)
+ */
+struct iwx_time_event_notif {
+ uint32_t timestamp;
+ uint32_t session_id;
+ uint32_t unique_id;
+ uint32_t id_and_color;
+ uint32_t action;
+ uint32_t status;
+} __packed; /* IWX_MAC_TIME_EVENT_NTFY_API_S_VER_1 */
+
+/**
+ * enum iwx_session_prot_conf_id - session protection's configurations
+ * @SESSION_PROTECT_CONF_ASSOC: Start a session protection for association.
+ * The firmware will allocate two events.
+ * Valid for BSS_STA and P2P_STA.
+ * * A rather short event that can't be fragmented and with a very
+ * high priority. If every goes well (99% of the cases) the
+ * association should complete within this first event. During
+ * that event, no other activity will happen in the firmware,
+ * which is why it can't be too long.
+ * The length of this event is hard-coded in the firmware: 300TUs.
+ * * Another event which can be much longer (its duration is
+ * configurable by the driver) which has a slightly lower
+ * priority and that can be fragmented allowing other activities
+ * to run while this event is running.
+ * The firmware will automatically remove both events once the driver sets
+ * the BSS MAC as associated. Neither of the events will be removed
+ * for the P2P_STA MAC.
+ * Only the duration is configurable for this protection.
+ * @SESSION_PROTECT_CONF_GO_CLIENT_ASSOC: not used
+ * @SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV: Schedule the P2P Device to be in
+ * listen mode. Will be fragmented. Valid only on the P2P Device MAC.
+ * Valid only on the P2P Device MAC. The firmware will take into account
+ * the duration, the interval and the repetition count.
+ * @SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION: Schedule the P2P Device to be be
+ * able to run the GO Negotiation. Will not be fragmented and not
+ * repetitive. Valid only on the P2P Device MAC. Only the duration will
+ * be taken into account.
+ * @SESSION_PROTECT_CONF_MAX_ID: not used
+ */
+enum iwx_session_prot_conf_id {
+ IWX_SESSION_PROTECT_CONF_ASSOC,
+ IWX_SESSION_PROTECT_CONF_GO_CLIENT_ASSOC,
+ IWX_SESSION_PROTECT_CONF_P2P_DEVICE_DISCOV,
+ IWX_SESSION_PROTECT_CONF_P2P_GO_NEGOTIATION,
+ IWX_SESSION_PROTECT_CONF_MAX_ID,
+}; /* SESSION_PROTECTION_CONF_ID_E_VER_1 */
+
+/**
+ * struct iwx_session_prot_cmd - configure a session protection
+ * @id_and_color: the id and color of the mac for which this session protection
+ * is sent
+ * @action: can be either FW_CTXT_ACTION_ADD or FW_CTXT_ACTION_REMOVE
+ * @conf_id: see &enum iwx_mvm_session_prot_conf_id
+ * @duration_tu: the duration of the whole protection in TUs.
+ * @repetition_count: not used
+ * @interval: not used
+ *
+ * Note: the session protection will always be scheduled to start as
+ * early as possible, but the maximum delay is configuration dependent.
+ * The firmware supports only one concurrent session protection per vif.
+ * Adding a new session protection will remove any currently running session.
+ */
+struct iwx_session_prot_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 hdr */
+ uint32_t id_and_color;
+ uint32_t action;
+ uint32_t conf_id;
+ uint32_t duration_tu;
+ uint32_t repetition_count;
+ uint32_t interval;
+} __packed; /* SESSION_PROTECTION_CMD_API_S_VER_1 */
+
+/**
+ * struct iwx_session_prot_notif - session protection started / ended
+ * @mac_id: the mac id for which the session protection started / ended
+ * @status: 1 means success, 0 means failure
+ * @start: 1 means the session protection started, 0 means it ended
+ * @conf_id: see &enum iwx_mvm_session_prot_conf_id
+ *
+ * Note that any session protection will always get two notifications: start
+ * and end even the firmware could not schedule it.
+ */
+struct iwx_session_prot_notif {
+ uint32_t mac_id;
+ uint32_t status;
+ uint32_t start;
+ uint32_t conf_id;
+} __packed; /* SESSION_PROTECTION_NOTIFICATION_API_S_VER_2 */
+
+
+/* Bindings and Time Quota */
+
+/**
+ * struct iwx_binding_cmd - configuring bindings
+ * ( IWX_BINDING_CONTEXT_CMD = 0x2b )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of IWX_FW_CTXT_ACTION_*
+ * @macs: array of MAC id and colors which belong to the binding
+ * @phy: PHY id and color which belongs to the binding
+ * @lmac_id: the lmac id the binding belongs to
+ */
+struct iwx_binding_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWX_BINDING_DATA_API_S_VER_1 */
+ uint32_t macs[IWX_MAX_MACS_IN_BINDING];
+ uint32_t phy;
+ uint32_t lmac_id;
+} __packed; /* IWX_BINDING_CMD_API_S_VER_2 */
+
+#define IWX_LMAC_24G_INDEX 0
+#define IWX_LMAC_5G_INDEX 1
+
+/* The maximal number of fragments in the FW's schedule session */
+#define IWX_MAX_QUOTA 128
+
+/**
+ * struct iwx_time_quota_data - configuration of time quota per binding
+ * @id_and_color: ID and color of the relevant Binding
+ * @quota: absolute time quota in TU. The scheduler will try to divide the
+ * remaining quota (after Time Events) according to this quota.
+ * @max_duration: max uninterrupted context duration in TU
+ */
+struct iwx_time_quota_data {
+ uint32_t id_and_color;
+ uint32_t quota;
+ uint32_t max_duration;
+} __packed; /* IWX_TIME_QUOTA_DATA_API_S_VER_1 */
+
+/**
+ * struct iwx_time_quota_cmd - configuration of time quota between bindings
+ * ( IWX_TIME_QUOTA_CMD = 0x2c )
+ * @quotas: allocations per binding
+ */
+struct iwx_time_quota_cmd {
+ struct iwx_time_quota_data quotas[IWX_MAX_BINDINGS];
+} __packed; /* IWX_TIME_QUOTA_ALLOCATION_CMD_API_S_VER_1 */
+
+
+/* PHY context */
+
+/* Supported bands */
+#define IWX_PHY_BAND_5 (0)
+#define IWX_PHY_BAND_24 (1)
+
+/* Supported channel width, vary if there is VHT support */
+#define IWX_PHY_VHT_CHANNEL_MODE20 (0x0)
+#define IWX_PHY_VHT_CHANNEL_MODE40 (0x1)
+#define IWX_PHY_VHT_CHANNEL_MODE80 (0x2)
+#define IWX_PHY_VHT_CHANNEL_MODE160 (0x3)
+
+/*
+ * Control channel position:
+ * For legacy set bit means upper channel, otherwise lower.
+ * For VHT - bit-2 marks if the control is lower/upper relative to center-freq
+ * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0.
+ * center_freq
+ * |
+ * 40Mhz |_______|_______|
+ * 80Mhz |_______|_______|_______|_______|
+ * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______|
+ * code 011 010 001 000 | 100 101 110 111
+ */
+#define IWX_PHY_VHT_CTRL_POS_1_BELOW (0x0)
+#define IWX_PHY_VHT_CTRL_POS_2_BELOW (0x1)
+#define IWX_PHY_VHT_CTRL_POS_3_BELOW (0x2)
+#define IWX_PHY_VHT_CTRL_POS_4_BELOW (0x3)
+#define IWX_PHY_VHT_CTRL_POS_1_ABOVE (0x4)
+#define IWX_PHY_VHT_CTRL_POS_2_ABOVE (0x5)
+#define IWX_PHY_VHT_CTRL_POS_3_ABOVE (0x6)
+#define IWX_PHY_VHT_CTRL_POS_4_ABOVE (0x7)
+
+/*
+ * @band: IWX_PHY_BAND_*
+ * @channel: channel number
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ */
+struct iwx_fw_channel_info_v1 {
+ uint8_t band;
+ uint8_t channel;
+ uint8_t width;
+ uint8_t ctrl_pos;
+} __packed; /* CHANNEL_CONFIG_API_S_VER_1 */
+
+/*
+ * struct iwx_fw_channel_info - channel information
+ *
+ * @channel: channel number
+ * @band: PHY_BAND_*
+ * @width: PHY_[VHT|LEGACY]_CHANNEL_*
+ * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_*
+ * @reserved: for future use and alignment
+ */
+struct iwx_fw_channel_info {
+ uint32_t channel;
+ uint8_t band;
+ uint8_t width;
+ uint8_t ctrl_pos;
+ uint8_t reserved;
+} __packed; /*CHANNEL_CONFIG_API_S_VER_2 */
+
+#define IWX_PHY_RX_CHAIN_DRIVER_FORCE_POS (0)
+#define IWX_PHY_RX_CHAIN_DRIVER_FORCE_MSK \
+ (0x1 << IWX_PHY_RX_CHAIN_DRIVER_FORCE_POS)
+#define IWX_PHY_RX_CHAIN_VALID_POS (1)
+#define IWX_PHY_RX_CHAIN_VALID_MSK \
+ (0x7 << IWX_PHY_RX_CHAIN_VALID_POS)
+#define IWX_PHY_RX_CHAIN_FORCE_SEL_POS (4)
+#define IWX_PHY_RX_CHAIN_FORCE_SEL_MSK \
+ (0x7 << IWX_PHY_RX_CHAIN_FORCE_SEL_POS)
+#define IWX_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7)
+#define IWX_PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \
+ (0x7 << IWX_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS)
+#define IWX_PHY_RX_CHAIN_CNT_POS (10)
+#define IWX_PHY_RX_CHAIN_CNT_MSK \
+ (0x3 << IWX_PHY_RX_CHAIN_CNT_POS)
+#define IWX_PHY_RX_CHAIN_MIMO_CNT_POS (12)
+#define IWX_PHY_RX_CHAIN_MIMO_CNT_MSK \
+ (0x3 << IWX_PHY_RX_CHAIN_MIMO_CNT_POS)
+#define IWX_PHY_RX_CHAIN_MIMO_FORCE_POS (14)
+#define IWX_PHY_RX_CHAIN_MIMO_FORCE_MSK \
+ (0x1 << IWX_PHY_RX_CHAIN_MIMO_FORCE_POS)
+
+/* TODO: fix the value, make it depend on firmware at runtime? */
+#define IWX_NUM_PHY_CTX 3
+
+/**
+ * struct iwl_phy_context_cmd - config of the PHY context
+ * ( IWX_PHY_CONTEXT_CMD = 0x8 )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of IWX_FW_CTXT_ACTION_*
+ * @lmac_id: the lmac id the phy context belongs to
+ * @ci: channel info
+ * @rxchain_info: ???
+ * @dsp_cfg_flags: set to 0
+ * @reserved: reserved to align to 64 bit
+ */
+struct iwx_phy_context_cmd_uhb {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* PHY_CONTEXT_DATA_API_S_VER_3 */
+ struct iwx_fw_channel_info ci;
+ uint32_t lmac_id;
+ uint32_t rxchain_info;
+ uint32_t dsp_cfg_flags;
+ uint32_t reserved;
+} __packed; /* PHY_CONTEXT_CMD_API_VER_3 */
+struct iwx_phy_context_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* PHY_CONTEXT_DATA_API_S_VER_3, PHY_CONTEXT_DATA_API_S_VER_4 */
+ struct iwx_fw_channel_info_v1 ci;
+ uint32_t lmac_id;
+ uint32_t rxchain_info; /* reserved in _VER_4 */
+ uint32_t dsp_cfg_flags;
+ uint32_t reserved;
+} __packed; /* PHY_CONTEXT_CMD_API_VER_3, PHY_CONTEXT_CMD_API_VER_4 */
+
+/* TODO: complete missing documentation */
+/**
+ * struct iwx_phy_context_cmd - config of the PHY context
+ * ( IWX_PHY_CONTEXT_CMD = 0x8 )
+ * @id_and_color: ID and color of the relevant Binding
+ * @action: action to perform, one of IWX_FW_CTXT_ACTION_*
+ * @apply_time: 0 means immediate apply and context switch.
+ * other value means apply new params after X usecs
+ * @tx_param_color: ???
+ * @channel_info:
+ * @txchain_info: ???
+ * @rxchain_info: ???
+ * @acquisition_data: ???
+ * @dsp_cfg_flags: set to 0
+ */
+/*
+ * XXX Intel forgot to bump the PHY_CONTEXT command API when they increased
+ * the size of fw_channel_info from v1 to v2.
+ * To keep things simple we define two versions of this struct, and both
+ * are labeled as CMD_API_VER_1. (The Linux iwlwifi driver performs dark
+ * magic with pointers to struct members instead.)
+ */
+/* This version must be used if IWX_UCODE_TLV_CAPA_ULTRA_HB_CHANNELS is set: */
+struct iwx_phy_context_cmd_uhb_v1 {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWX_PHY_CONTEXT_DATA_API_S_VER_1 */
+ uint32_t apply_time;
+ uint32_t tx_param_color;
+ struct iwx_fw_channel_info ci;
+ uint32_t txchain_info;
+ uint32_t rxchain_info;
+ uint32_t acquisition_data;
+ uint32_t dsp_cfg_flags;
+} __packed; /* IWX_PHY_CONTEXT_CMD_API_VER_1 */
+/* This version must be used otherwise: */
+struct iwx_phy_context_cmd_v1 {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWX_PHY_CONTEXT_DATA_API_S_VER_1 */
+ uint32_t apply_time;
+ uint32_t tx_param_color;
+ struct iwx_fw_channel_info_v1 ci;
+ uint32_t txchain_info;
+ uint32_t rxchain_info;
+ uint32_t acquisition_data;
+ uint32_t dsp_cfg_flags;
+} __packed; /* IWX_PHY_CONTEXT_CMD_API_VER_1 */
+
+
+#define IWX_RX_INFO_PHY_CNT 8
+#define IWX_RX_INFO_ENERGY_ANT_ABC_IDX 1
+#define IWX_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
+#define IWX_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
+#define IWX_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
+#define IWX_RX_INFO_ENERGY_ANT_A_POS 0
+#define IWX_RX_INFO_ENERGY_ANT_B_POS 8
+#define IWX_RX_INFO_ENERGY_ANT_C_POS 16
+
+#define IWX_RX_INFO_AGC_IDX 1
+#define IWX_RX_INFO_RSSI_AB_IDX 2
+#define IWX_OFDM_AGC_A_MSK 0x0000007f
+#define IWX_OFDM_AGC_A_POS 0
+#define IWX_OFDM_AGC_B_MSK 0x00003f80
+#define IWX_OFDM_AGC_B_POS 7
+#define IWX_OFDM_AGC_CODE_MSK 0x3fe00000
+#define IWX_OFDM_AGC_CODE_POS 20
+#define IWX_OFDM_RSSI_INBAND_A_MSK 0x00ff
+#define IWX_OFDM_RSSI_A_POS 0
+#define IWX_OFDM_RSSI_ALLBAND_A_MSK 0xff00
+#define IWX_OFDM_RSSI_ALLBAND_A_POS 8
+#define IWX_OFDM_RSSI_INBAND_B_MSK 0xff0000
+#define IWX_OFDM_RSSI_B_POS 16
+#define IWX_OFDM_RSSI_ALLBAND_B_MSK 0xff000000
+#define IWX_OFDM_RSSI_ALLBAND_B_POS 24
+
+/**
+ * struct iwx_rx_phy_info - phy info
+ * (IWX_REPLY_RX_PHY_CMD = 0xc0)
+ * @non_cfg_phy_cnt: non configurable DSP phy data byte count
+ * @cfg_phy_cnt: configurable DSP phy data byte count
+ * @stat_id: configurable DSP phy data set ID
+ * @reserved1:
+ * @system_timestamp: GP2 at on air rise
+ * @timestamp: TSF at on air rise
+ * @beacon_time_stamp: beacon at on-air rise
+ * @phy_flags: general phy flags: band, modulation, ...
+ * @channel: channel number
+ * @non_cfg_phy_buf: for various implementations of non_cfg_phy
+ * @rate_n_flags: IWX_RATE_MCS_*
+ * @byte_count: frame's byte-count
+ * @frame_time: frame's time on the air, based on byte count and frame rate
+ * calculation
+ * @mac_active_msk: what MACs were active when the frame was received
+ *
+ * Before each Rx, the device sends this data. It contains PHY information
+ * about the reception of the packet.
+ */
+struct iwx_rx_phy_info {
+ uint8_t non_cfg_phy_cnt;
+ uint8_t cfg_phy_cnt;
+ uint8_t stat_id;
+ uint8_t reserved1;
+ uint32_t system_timestamp;
+ uint64_t timestamp;
+ uint32_t beacon_time_stamp;
+ uint16_t phy_flags;
+#define IWX_PHY_INFO_FLAG_SHPREAMBLE (1 << 2)
+ uint16_t channel;
+ uint32_t non_cfg_phy[IWX_RX_INFO_PHY_CNT];
+ uint32_t rate_n_flags;
+ uint32_t byte_count;
+ uint16_t mac_active_msk;
+ uint16_t frame_time;
+} __packed;
+
+struct iwx_rx_mpdu_res_start {
+ uint16_t byte_count;
+ uint16_t reserved;
+} __packed;
+
+/**
+ * Values to parse %iwx_rx_phy_info phy_flags
+ * @IWX_RX_RES_PHY_FLAGS_BAND_24: true if the packet was received on 2.4 band
+ * @IWX_RX_RES_PHY_FLAGS_MOD_CCK:
+ * @IWX_RX_RES_PHY_FLAGS_SHORT_PREAMBLE: true if packet's preamble was short
+ * @IWX_RX_RES_PHY_FLAGS_NARROW_BAND:
+ * @IWX_RX_RES_PHY_FLAGS_ANTENNA: antenna on which the packet was received
+ * @IWX_RX_RES_PHY_FLAGS_AGG: set if the packet was part of an A-MPDU
+ * @IWX_RX_RES_PHY_FLAGS_OFDM_HT: The frame was an HT frame
+ * @IWX_RX_RES_PHY_FLAGS_OFDM_GF: The frame used GF preamble
+ * @IWX_RX_RES_PHY_FLAGS_OFDM_VHT: The frame was a VHT frame
+ */
+#define IWX_RX_RES_PHY_FLAGS_BAND_24 (1 << 0)
+#define IWX_RX_RES_PHY_FLAGS_MOD_CCK (1 << 1)
+#define IWX_RX_RES_PHY_FLAGS_SHORT_PREAMBLE (1 << 2)
+#define IWX_RX_RES_PHY_FLAGS_NARROW_BAND (1 << 3)
+#define IWX_RX_RES_PHY_FLAGS_ANTENNA (0x7 << 4)
+#define IWX_RX_RES_PHY_FLAGS_ANTENNA_POS 4
+#define IWX_RX_RES_PHY_FLAGS_AGG (1 << 7)
+#define IWX_RX_RES_PHY_FLAGS_OFDM_HT (1 << 8)
+#define IWX_RX_RES_PHY_FLAGS_OFDM_GF (1 << 9)
+#define IWX_RX_RES_PHY_FLAGS_OFDM_VHT (1 << 10)
+
+/**
+ * Values written by fw for each Rx packet
+ * @IWX_RX_MPDU_RES_STATUS_CRC_OK: CRC is fine
+ * @IWX_RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
+ * @IWX_RX_MPDU_RES_STATUS_SRC_STA_FOUND:
+ * @IWX_RX_MPDU_RES_STATUS_KEY_VALID:
+ * @IWX_RX_MPDU_RES_STATUS_KEY_PARAM_OK:
+ * @IWX_RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
+ * @IWX_RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
+ * in the driver.
+ * @IWX_RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
+ * @IWX_RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or
+ * alg = CCM only. Checks replay attack for 11w frames. Relevant only if
+ * %IWX_RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
+ * @IWX_RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
+ * @IWX_RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
+ * @IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
+ * @IWX_RX_MPDU_RES_STATUS_SEC_TKIP_ENC: this frame is encrypted using TKIP
+ * @IWX_RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC: this frame is encrypted using CCM_CMAC
+ * @IWX_RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
+ * @IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
+ * @IWX_RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
+ * @IWX_RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP:
+ * @IWX_RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP:
+ * @IWX_RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT:
+ * @IWX_RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
+ * @IWX_RX_MPDU_RES_STATUS_HASH_INDEX_MSK:
+ * @IWX_RX_MPDU_RES_STATUS_STA_ID_MSK:
+ * @IWX_RX_MPDU_RES_STATUS_RRF_KILL:
+ * @IWX_RX_MPDU_RES_STATUS_FILTERING_MSK:
+ * @IWX_RX_MPDU_RES_STATUS2_FILTERING_MSK:
+ */
+#define IWX_RX_MPDU_RES_STATUS_CRC_OK (1 << 0)
+#define IWX_RX_MPDU_RES_STATUS_OVERRUN_OK (1 << 1)
+#define IWX_RX_MPDU_RES_STATUS_SRC_STA_FOUND (1 << 2)
+#define IWX_RX_MPDU_RES_STATUS_KEY_VALID (1 << 3)
+#define IWX_RX_MPDU_RES_STATUS_KEY_PARAM_OK (1 << 4)
+#define IWX_RX_MPDU_RES_STATUS_ICV_OK (1 << 5)
+#define IWX_RX_MPDU_RES_STATUS_MIC_OK (1 << 6)
+#define IWX_RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
+#define IWX_RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR (1 << 7)
+#define IWX_RX_MPDU_RES_STATUS_SEC_NO_ENC (0 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_WEP_ENC (1 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_CCM_ENC (2 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_TKIP_ENC (3 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_EXT_ENC (4 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_CCM_CMAC_ENC (6 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_ENC_ERR (7 << 8)
+#define IWX_RX_MPDU_RES_STATUS_SEC_ENC_MSK (7 << 8)
+#define IWX_RX_MPDU_RES_STATUS_DEC_DONE (1 << 11)
+#define IWX_RX_MPDU_RES_STATUS_PROTECT_FRAME_BIT_CMP (1 << 12)
+#define IWX_RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP (1 << 13)
+#define IWX_RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT (1 << 14)
+#define IWX_RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME (1 << 15)
+#define IWX_RX_MPDU_RES_STATUS_HASH_INDEX_MSK (0x3F0000)
+#define IWX_RX_MPDU_RES_STATUS_STA_ID_MSK (0x1f000000)
+#define IWX_RX_MPDU_RES_STATUS_RRF_KILL (1 << 29)
+#define IWX_RX_MPDU_RES_STATUS_FILTERING_MSK (0xc00000)
+#define IWX_RX_MPDU_RES_STATUS2_FILTERING_MSK (0xc0000000)
+
+#define IWX_RX_MPDU_MFLG1_ADDRTYPE_MASK 0x03
+#define IWX_RX_MPDU_MFLG1_MIC_CRC_LEN_MASK 0xf0
+#define IWX_RX_MPDU_MFLG1_MIC_CRC_LEN_SHIFT 3
+
+#define IWX_RX_MPDU_MFLG2_HDR_LEN_MASK 0x1f
+#define IWX_RX_MPDU_MFLG2_PAD 0x20
+#define IWX_RX_MPDU_MFLG2_AMSDU 0x40
+
+#define IWX_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK 0x7f
+#define IWX_RX_MPDU_AMSDU_LAST_SUBFRAME 0x80
+
+#define IWX_RX_MPDU_PHY_AMPDU (1 << 5)
+#define IWX_RX_MPDU_PHY_AMPDU_TOGGLE (1 << 6)
+#define IWX_RX_MPDU_PHY_SHORT_PREAMBLE (1 << 7)
+#define IWX_RX_MPDU_PHY_NCCK_ADDTL_NTFY (1 << 7)
+#define IWX_RX_MPDU_PHY_TSF_OVERLOAD (1 << 8)
+
+struct iwx_rx_mpdu_desc_v3 {
+ union {
+ uint32_t filter_match;
+ uint32_t phy_data3;
+ };
+ union {
+ uint32_t rss_hash;
+ uint32_t phy_data2;
+ };
+ uint32_t partial_hash; /* ip/tcp header hash w/o some fields */
+ uint16_t raw_xsum;
+ uint16_t reserved_xsum;
+ uint32_t rate_n_flags;
+ uint8_t energy_a;
+ uint8_t energy_b;
+ uint8_t channel;
+ uint8_t mac_context;
+ uint32_t gp2_on_air_rise;
+ union {
+ /*
+ * TSF value on air rise (INA), only valid if
+ * IWX_RX_MPDU_PHY_TSF_OVERLOAD isn't set
+ */
+ struct {
+ uint32_t tsf_on_air_rise0;
+ uint32_t tsf_on_air_rise1;
+ };
+
+ struct {
+ uint32_t phy_data0;
+
+ /* Only valid if IWX_RX_MPDU_PHY_TSF_OVERLOAD is set. */
+ uint32_t phy_data1;
+ };
+ };
+ uint32_t reserved[2];
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
+ RX_MPDU_RES_START_API_S_VER_5 */
+
+struct iwx_rx_mpdu_desc_v1 {
+ union {
+ uint32_t rss_hash;
+ uint32_t phy_data2;
+ };
+ union {
+ uint32_t filter_match;
+ uint32_t phy_data3;
+ };
+ uint32_t rate_n_flags;
+ uint8_t energy_a;
+ uint8_t energy_b;
+ uint8_t channel;
+ uint8_t mac_context;
+ uint32_t gp2_on_air_rise;
+ union {
+ struct {
+ uint32_t tsf_on_air_rise0;
+ uint32_t tsf_on_air_rise1;
+ };
+ struct {
+ uint32_t phy_data0;
+ uint32_t phy_data1;
+ };
+ };
+} __packed;
+
+#define IWX_RX_REORDER_DATA_INVALID_BAID 0x7f
+
+#define IWX_RX_MPDU_REORDER_NSSN_MASK 0x00000fff
+#define IWX_RX_MPDU_REORDER_SN_MASK 0x00fff000
+#define IWX_RX_MPDU_REORDER_SN_SHIFT 12
+#define IWX_RX_MPDU_REORDER_BAID_MASK 0x7f000000
+#define IWX_RX_MPDU_REORDER_BAID_SHIFT 24
+#define IWX_RX_MPDU_REORDER_BA_OLD_SN 0x80000000
+
+struct iwx_rx_mpdu_desc {
+ uint16_t mpdu_len;
+ uint8_t mac_flags1;
+ uint8_t mac_flags2;
+ uint8_t amsdu_info;
+ uint16_t phy_info;
+ uint8_t mac_phy_idx;
+ uint16_t raw_csum;
+ union {
+ uint16_t l3l4_flags;
+ uint16_t phy_data4;
+ };
+ uint16_t status;
+ uint8_t hash_filter;
+ uint8_t sta_id_flags;
+ uint32_t reorder_data;
+ union {
+ struct iwx_rx_mpdu_desc_v1 v1;
+ struct iwx_rx_mpdu_desc_v3 v3;
+ };
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
+ RX_MPDU_RES_START_API_S_VER_4,
+ RX_MPDU_RES_START_API_S_VER_5 */
+
+#define IWX_RX_DESC_SIZE_V1 ((sizeof(struct iwx_rx_mpdu_desc) - \
+ sizeof(struct iwx_rx_mpdu_desc_v3)) + sizeof(struct iwx_rx_mpdu_desc_v1))
+
+struct iwx_frame_release {
+ uint8_t baid;
+ uint8_t reserved;
+ uint16_t nssn;
+};
+
+/**
+ * enum iwx_bar_frame_release_sta_tid - STA/TID information for BAR release
+ * @IWX_BAR_FRAME_RELEASE_TID_MASK: TID mask
+ * @IWX_BAR_FRAME_RELEASE_STA_MASK: STA mask
+ */
+#define IWX_BAR_FRAME_RELEASE_TID_MASK 0x0000000f
+#define IWX_BAR_FRAME_RELEASE_STA_MASK 0x000001f0
+#define IWX_BAR_FRAME_RELEASE_STA_SHIFT 4
+
+/**
+ * enum iwx_bar_frame_release_ba_info - BA information for BAR release
+ * @IWL_BAR_FRAME_RELEASE_NSSN_MASK: NSSN mask
+ * @IWL_BAR_FRAME_RELEASE_SN_MASK: SN mask (ignored by driver)
+ * @IWL_BAR_FRAME_RELEASE_BAID_MASK: BAID mask
+ */
+#define IWX_BAR_FRAME_RELEASE_NSSN_MASK 0x00000fff
+#define IWX_BAR_FRAME_RELEASE_SN_MASK 0x00fff000
+#define IWX_BAR_FRAME_RELEASE_SN_SHIFT 12
+#define IWX_BAR_FRAME_RELEASE_BAID_MASK 0x3f000000
+#define IWX_BAR_FRAME_RELEASE_BAID_SHIFT 24
+
+/**
+ * struct iwx_bar_frame_release - frame release from BAR info
+ * @sta_tid: STA & TID information, see &enum iwx_bar_frame_release_sta_tid.
+ * @ba_info: BA information, see &enum iwx_bar_frame_release_ba_info.
+ */
+struct iwx_bar_frame_release {
+ uint32_t sta_tid;
+ uint32_t ba_info;
+} __packed; /* RX_BAR_TO_FRAME_RELEASE_API_S_VER_1 */
+
+/**
+ * struct iwx_radio_version_notif - information on the radio version
+ * ( IWX_RADIO_VERSION_NOTIFICATION = 0x68 )
+ * @radio_flavor:
+ * @radio_step:
+ * @radio_dash:
+ */
+struct iwx_radio_version_notif {
+ uint32_t radio_flavor;
+ uint32_t radio_step;
+ uint32_t radio_dash;
+} __packed; /* IWX_RADIO_VERSION_NOTOFICATION_S_VER_1 */
+
+#define IWX_CARD_ENABLED 0x00
+#define IWX_HW_CARD_DISABLED 0x01
+#define IWX_SW_CARD_DISABLED 0x02
+#define IWX_CT_KILL_CARD_DISABLED 0x04
+#define IWX_HALT_CARD_DISABLED 0x08
+#define IWX_CARD_DISABLED_MSK 0x0f
+#define IWX_CARD_IS_RX_ON 0x10
+
+/**
+ * struct iwx_radio_version_notif - information on the radio version
+ * (IWX_CARD_STATE_NOTIFICATION = 0xa1 )
+ * @flags: %iwx_card_state_flags
+ */
+struct iwx_card_state_notif {
+ uint32_t flags;
+} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
+
+/**
+ * struct iwx_missed_beacons_notif - information on missed beacons
+ * ( IWX_MISSED_BEACONS_NOTIFICATION = 0xa2 )
+ * @mac_id: interface ID
+ * @consec_missed_beacons_since_last_rx: number of consecutive missed
+ * beacons since last RX.
+ * @consec_missed_beacons: number of consecutive missed beacons
+ * @num_expected_beacons:
+ * @num_recvd_beacons:
+ */
+struct iwx_missed_beacons_notif {
+ uint32_t mac_id;
+ uint32_t consec_missed_beacons_since_last_rx;
+ uint32_t consec_missed_beacons;
+ uint32_t num_expected_beacons;
+ uint32_t num_recvd_beacons;
+} __packed; /* IWX_MISSED_BEACON_NTFY_API_S_VER_3 */
+
+/**
+ * struct iwx_mfuart_load_notif - mfuart image version & status
+ * ( IWX_MFUART_LOAD_NOTIFICATION = 0xb1 )
+ * @installed_ver: installed image version
+ * @external_ver: external image version
+ * @status: MFUART loading status
+ * @duration: MFUART loading time
+*/
+struct iwx_mfuart_load_notif {
+ uint32_t installed_ver;
+ uint32_t external_ver;
+ uint32_t status;
+ uint32_t duration;
+} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/
+
+/**
+ * struct iwx_set_calib_default_cmd - set default value for calibration.
+ * ( IWX_SET_CALIB_DEFAULT_CMD = 0x8e )
+ * @calib_index: the calibration to set value for
+ * @length: of data
+ * @data: the value to set for the calibration result
+ */
+struct iwx_set_calib_default_cmd {
+ uint16_t calib_index;
+ uint16_t length;
+ uint8_t data[0];
+} __packed; /* IWX_PHY_CALIB_OVERRIDE_VALUES_S */
+
+#define IWX_MAX_PORT_ID_NUM 2
+#define IWX_MAX_MCAST_FILTERING_ADDRESSES 256
+
+/**
+ * struct iwx_mcast_filter_cmd - configure multicast filter.
+ * @filter_own: Set 1 to filter out multicast packets sent by station itself
+ * @port_id: Multicast MAC addresses array specifier. This is a strange way
+ * to identify network interface adopted in host-device IF.
+ * It is used by FW as index in array of addresses. This array has
+ * IWX_MAX_PORT_ID_NUM members.
+ * @count: Number of MAC addresses in the array
+ * @pass_all: Set 1 to pass all multicast packets.
+ * @bssid: current association BSSID.
+ * @addr_list: Place holder for array of MAC addresses.
+ * IMPORTANT: add padding if necessary to ensure DWORD alignment.
+ */
+struct iwx_mcast_filter_cmd {
+ uint8_t filter_own;
+ uint8_t port_id;
+ uint8_t count;
+ uint8_t pass_all;
+ uint8_t bssid[6];
+ uint8_t reserved[2];
+ uint8_t addr_list[0];
+} __packed; /* IWX_MCAST_FILTERING_CMD_API_S_VER_1 */
+
+struct iwx_statistics_dbg {
+ uint32_t burst_check;
+ uint32_t burst_count;
+ uint32_t wait_for_silence_timeout_cnt;
+ uint32_t reserved[3];
+} __packed; /* IWX_STATISTICS_DEBUG_API_S_VER_2 */
+
+struct iwx_statistics_div {
+ uint32_t tx_on_a;
+ uint32_t tx_on_b;
+ uint32_t exec_time;
+ uint32_t probe_time;
+ uint32_t rssi_ant;
+ uint32_t reserved2;
+} __packed; /* IWX_STATISTICS_SLOW_DIV_API_S_VER_2 */
+
+struct iwx_statistics_bt_activity {
+ uint32_t hi_priority_tx_req_cnt;
+ uint32_t hi_priority_tx_denied_cnt;
+ uint32_t lo_priority_tx_req_cnt;
+ uint32_t lo_priority_tx_denied_cnt;
+ uint32_t hi_priority_rx_req_cnt;
+ uint32_t hi_priority_rx_denied_cnt;
+ uint32_t lo_priority_rx_req_cnt;
+ uint32_t lo_priority_rx_denied_cnt;
+} __packed; /* IWX_STATISTICS_BT_ACTIVITY_API_S_VER_1 */
+
+struct iwx_statistics_general_common {
+ uint32_t radio_temperature;
+ struct iwx_statistics_dbg dbg;
+ uint32_t sleep_time;
+ uint32_t slots_out;
+ uint32_t slots_idle;
+ uint32_t ttl_timestamp;
+ struct iwx_statistics_div slow_div;
+ uint32_t rx_enable_counter;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ uint32_t num_of_sos_states;
+ uint32_t beacon_filtered;
+ uint32_t missed_beacons;
+ uint8_t beacon_filter_average_energy;
+ uint8_t beacon_filter_reason;
+ uint8_t beacon_filter_current_energy;
+ uint8_t beacon_filter_reserved;
+ uint32_t beacon_filter_delta_time;
+ struct iwx_statistics_bt_activity bt_activity;
+ uint64_t rx_time;
+ uint64_t on_time_rf;
+ uint64_t on_time_scan;
+ uint64_t tx_time;
+} __packed; /* STATISTICS_GENERAL_API_S_VER_10 */
+
+struct iwx_statistics_rx_non_phy {
+ uint32_t bogus_cts; /* CTS received when not expecting CTS */
+ uint32_t bogus_ack; /* ACK received when not expecting ACK */
+ uint32_t non_bssid_frames; /* number of frames with BSSID that
+ * doesn't belong to the STA BSSID */
+ uint32_t filtered_frames; /* count frames that were dumped in the
+ * filtering process */
+ uint32_t non_channel_beacons; /* beacons with our bss id but not on
+ * our serving channel */
+ uint32_t channel_beacons; /* beacons with our bss id and in our
+ * serving channel */
+ uint32_t num_missed_bcon; /* number of missed beacons */
+ uint32_t adc_rx_saturation_time; /* count in 0.8us units the time the
+ * ADC was in saturation */
+ uint32_t ina_detection_search_time;/* total time (in 0.8us) searched
+ * for INA */
+ uint32_t beacon_silence_rssi[3];/* RSSI silence after beacon frame */
+ uint32_t interference_data_flag; /* flag for interference data
+ * availability. 1 when data is
+ * available. */
+ uint32_t channel_load; /* counts RX Enable time in uSec */
+ uint32_t dsp_false_alarms; /* DSP false alarm (both OFDM
+ * and CCK) counter */
+ uint32_t beacon_rssi_a;
+ uint32_t beacon_rssi_b;
+ uint32_t beacon_rssi_c;
+ uint32_t beacon_energy_a;
+ uint32_t beacon_energy_b;
+ uint32_t beacon_energy_c;
+ uint32_t num_bt_kills;
+ uint32_t mac_id;
+ uint32_t directed_data_mpdu;
+} __packed; /* IWX_STATISTICS_RX_NON_PHY_API_S_VER_3 */
+
+struct iwx_statistics_rx_phy {
+ uint32_t ina_cnt;
+ uint32_t fina_cnt;
+ uint32_t plcp_err;
+ uint32_t crc32_err;
+ uint32_t overrun_err;
+ uint32_t early_overrun_err;
+ uint32_t crc32_good;
+ uint32_t false_alarm_cnt;
+ uint32_t fina_sync_err_cnt;
+ uint32_t sfd_timeout;
+ uint32_t fina_timeout;
+ uint32_t unresponded_rts;
+ uint32_t rxe_frame_limit_overrun;
+ uint32_t sent_ack_cnt;
+ uint32_t sent_cts_cnt;
+ uint32_t sent_ba_rsp_cnt;
+ uint32_t dsp_self_kill;
+ uint32_t mh_format_err;
+ uint32_t re_acq_main_rssi_sum;
+ uint32_t reserved;
+} __packed; /* IWX_STATISTICS_RX_PHY_API_S_VER_2 */
+
+struct iwx_statistics_rx_ht_phy {
+ uint32_t plcp_err;
+ uint32_t overrun_err;
+ uint32_t early_overrun_err;
+ uint32_t crc32_good;
+ uint32_t crc32_err;
+ uint32_t mh_format_err;
+ uint32_t agg_crc32_good;
+ uint32_t agg_mpdu_cnt;
+ uint32_t agg_cnt;
+ uint32_t unsupport_mcs;
+} __packed; /* IWX_STATISTICS_HT_RX_PHY_API_S_VER_1 */
+
+/*
+ * The first MAC indices (starting from 0)
+ * are available to the driver, AUX follows
+ */
+#define IWX_MAC_INDEX_AUX 4
+#define IWX_MAC_INDEX_MIN_DRIVER 0
+#define IWX_NUM_MAC_INDEX_DRIVER IWX_MAC_INDEX_AUX
+
+#define IWX_STATION_COUNT 16
+
+#define IWX_MAX_CHAINS 3
+
+struct iwx_statistics_tx_non_phy_agg {
+ uint32_t ba_timeout;
+ uint32_t ba_reschedule_frames;
+ uint32_t scd_query_agg_frame_cnt;
+ uint32_t scd_query_no_agg;
+ uint32_t scd_query_agg;
+ uint32_t scd_query_mismatch;
+ uint32_t frame_not_ready;
+ uint32_t underrun;
+ uint32_t bt_prio_kill;
+ uint32_t rx_ba_rsp_cnt;
+ int8_t txpower[IWX_MAX_CHAINS];
+ int8_t reserved;
+ uint32_t reserved2;
+} __packed; /* IWX_STATISTICS_TX_NON_PHY_AGG_API_S_VER_1 */
+
+struct iwx_statistics_tx_channel_width {
+ uint32_t ext_cca_narrow_ch20[1];
+ uint32_t ext_cca_narrow_ch40[2];
+ uint32_t ext_cca_narrow_ch80[3];
+ uint32_t ext_cca_narrow_ch160[4];
+ uint32_t last_tx_ch_width_indx;
+ uint32_t rx_detected_per_ch_width[4];
+ uint32_t success_per_ch_width[4];
+ uint32_t fail_per_ch_width[4];
+}; /* IWX_STATISTICS_TX_CHANNEL_WIDTH_API_S_VER_1 */
+
+struct iwx_statistics_tx {
+ uint32_t preamble_cnt;
+ uint32_t rx_detected_cnt;
+ uint32_t bt_prio_defer_cnt;
+ uint32_t bt_prio_kill_cnt;
+ uint32_t few_bytes_cnt;
+ uint32_t cts_timeout;
+ uint32_t ack_timeout;
+ uint32_t expected_ack_cnt;
+ uint32_t actual_ack_cnt;
+ uint32_t dump_msdu_cnt;
+ uint32_t burst_abort_next_frame_mismatch_cnt;
+ uint32_t burst_abort_missing_next_frame_cnt;
+ uint32_t cts_timeout_collision;
+ uint32_t ack_or_ba_timeout_collision;
+ struct iwx_statistics_tx_non_phy_agg agg;
+ struct iwx_statistics_tx_channel_width channel_width;
+} __packed; /* IWX_STATISTICS_TX_API_S_VER_4 */
+
+struct iwx_statistics_general {
+ struct iwx_statistics_general_common common;
+ uint32_t beacon_counter[IWX_MAC_INDEX_AUX];
+ uint8_t beacon_average_energy[IWX_MAC_INDEX_AUX];
+ uint8_t reserved[8 - IWX_MAC_INDEX_AUX];
+} __packed; /* STATISTICS_GENERAL_API_S_VER_10 */
+
+struct iwx_statistics_rx {
+ struct iwx_statistics_rx_phy ofdm;
+ struct iwx_statistics_rx_phy cck;
+ struct iwx_statistics_rx_non_phy general;
+ struct iwx_statistics_rx_ht_phy ofdm_ht;
+} __packed; /* IWX_STATISTICS_RX_API_S_VER_3 */
+
+/*
+ * IWX_STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ *
+ * By default, uCode issues this notification after receiving a beacon
+ * while associated. To disable this behavior, set DISABLE_NOTIF flag in the
+ * IWX_REPLY_STATISTICS_CMD 0x9c, above.
+ *
+ * Statistics counters continue to increment beacon after beacon, but are
+ * cleared when changing channels or when driver issues IWX_REPLY_STATISTICS_CMD
+ * 0x9c with CLEAR_STATS bit set (see above).
+ *
+ * uCode also issues this notification during scans. uCode clears statistics
+ * appropriately so that each notification contains statistics for only the
+ * one channel that has just been scanned.
+ */
+
+/**
+ * struct iwx_statistics_load - RX statistics for multi-queue devices
+ * @air_time: accumulated air time, per mac
+ * @byte_count: accumulated byte count, per mac
+ * @pkt_count: accumulated packet count, per mac
+ * @avg_energy: average RSSI, per station
+ */
+struct iwx_statistics_load {
+ uint32_t air_time[IWX_MAC_INDEX_AUX];
+ uint32_t byte_count[IWX_MAC_INDEX_AUX];
+ uint32_t pkt_count[IWX_MAC_INDEX_AUX];
+ uint8_t avg_energy[IWX_STATION_COUNT];
+} __packed; /* STATISTICS_RX_MAC_STATION_S_VER_3 */
+
+struct iwx_notif_statistics {
+ uint32_t flag;
+ struct iwx_statistics_rx rx;
+ struct iwx_statistics_tx tx;
+ struct iwx_statistics_general general;
+ struct iwx_statistics_load load_stats;
+} __packed; /* STATISTICS_NTFY_API_S_VER_13 */
+
+
+/**
+ * flags used in statistics notification
+ * @IWX_STATISTICS_REPLY_FLG_CLEAR: statistics were cleared after this report
+ */
+#define IWX_STATISTICS_REPLY_FLG_CLEAR 0x01
+
+/**
+ * flags used in statistics command
+ * @IWX_STATISTICS_FLG_CLEAR: request to clear statistics after the report
+ * that's sent after this command
+ * @IWX_STATISTICS_FLG_DISABLE_NOTIF: disable unilateral statistics
+ * notifications
+ */
+#define IWX_STATISTICS_FLG_CLEAR 0x01
+#define IWX_STATISTICS_FLG_DISABLE_NOTIF 0x02
+
+/**
+ * struct iwx_statistics_cmd - statistics config command
+ * @flags: IWX_STATISTICS_* flags
+ */
+struct iwx_statistics_cmd {
+ uint32_t flags;
+} __packed; /* STATISTICS_CMD_API_S_VER_1 */
+
+
+/***********************************
+ * Smart Fifo API
+ ***********************************/
+/* Smart Fifo state */
+#define IWX_SF_LONG_DELAY_ON 0 /* should never be called by driver */
+#define IWX_SF_FULL_ON 1
+#define IWX_SF_UNINIT 2
+#define IWX_SF_INIT_OFF 3
+#define IWX_SF_HW_NUM_STATES 4
+
+/* Smart Fifo possible scenario */
+#define IWX_SF_SCENARIO_SINGLE_UNICAST 0
+#define IWX_SF_SCENARIO_AGG_UNICAST 1
+#define IWX_SF_SCENARIO_MULTICAST 2
+#define IWX_SF_SCENARIO_BA_RESP 3
+#define IWX_SF_SCENARIO_TX_RESP 4
+#define IWX_SF_NUM_SCENARIO 5
+
+#define IWX_SF_TRANSIENT_STATES_NUMBER 2 /* IWX_SF_LONG_DELAY_ON and IWX_SF_FULL_ON */
+#define IWX_SF_NUM_TIMEOUT_TYPES 2 /* Aging timer and Idle timer */
+
+/* smart FIFO default values */
+#define IWX_SF_W_MARK_SISO 4096
+#define IWX_SF_W_MARK_MIMO2 8192
+#define IWX_SF_W_MARK_MIMO3 6144
+#define IWX_SF_W_MARK_LEGACY 4096
+#define IWX_SF_W_MARK_SCAN 4096
+
+/* SF Scenarios timers for default configuration (aligned to 32 uSec) */
+#define IWX_SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWX_SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWX_SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWX_SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWX_SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */
+#define IWX_SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWX_SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWX_SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWX_SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWX_SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */
+
+/* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */
+#define IWX_SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define IWX_SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define IWX_SF_AGG_UNICAST_IDLE_TIMER 320 /* 300 uSec */
+#define IWX_SF_AGG_UNICAST_AGING_TIMER 2016 /* 2 mSec */
+#define IWX_SF_MCAST_IDLE_TIMER 2016 /* 2 mSec */
+#define IWX_SF_MCAST_AGING_TIMER 10016 /* 10 mSec */
+#define IWX_SF_BA_IDLE_TIMER 320 /* 300 uSec */
+#define IWX_SF_BA_AGING_TIMER 2016 /* 2 mSec */
+#define IWX_SF_TX_RE_IDLE_TIMER 320 /* 300 uSec */
+#define IWX_SF_TX_RE_AGING_TIMER 2016 /* 2 mSec */
+
+#define IWX_SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */
+
+#define IWX_SF_CFG_DUMMY_NOTIF_OFF (1 << 16)
+
+/**
+ * Smart Fifo configuration command.
+ * @state: smart fifo state, types listed in enum %iwx_sf_state.
+ * @watermark: Minimum allowed available free space in RXF for transient state.
+ * @long_delay_timeouts: aging and idle timer values for each scenario
+ * in long delay state.
+ * @full_on_timeouts: timer values for each scenario in full on state.
+ */
+struct iwx_sf_cfg_cmd {
+ uint32_t state;
+ uint32_t watermark[IWX_SF_TRANSIENT_STATES_NUMBER];
+ uint32_t long_delay_timeouts[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES];
+ uint32_t full_on_timeouts[IWX_SF_NUM_SCENARIO][IWX_SF_NUM_TIMEOUT_TYPES];
+} __packed; /* IWX_SF_CFG_API_S_VER_2 */
+
+#define IWX_AC_BK 0
+#define IWX_AC_BE 1
+#define IWX_AC_VI 2
+#define IWX_AC_VO 3
+#define IWX_AC_NUM 4
+
+/**
+ * MAC context flags
+ * @IWX_MAC_PROT_FLG_TGG_PROTECT: 11g protection when transmitting OFDM frames,
+ * this will require CCK RTS/CTS2self.
+ * RTS/CTS will protect full burst time.
+ * @IWX_MAC_PROT_FLG_HT_PROT: enable HT protection
+ * @IWX_MAC_PROT_FLG_FAT_PROT: protect 40 MHz transmissions
+ * @IWX_MAC_PROT_FLG_SELF_CTS_EN: allow CTS2self
+ */
+#define IWX_MAC_PROT_FLG_TGG_PROTECT (1 << 3)
+#define IWX_MAC_PROT_FLG_HT_PROT (1 << 23)
+#define IWX_MAC_PROT_FLG_FAT_PROT (1 << 24)
+#define IWX_MAC_PROT_FLG_SELF_CTS_EN (1 << 30)
+
+#define IWX_MAC_FLG_SHORT_SLOT (1 << 4)
+#define IWX_MAC_FLG_SHORT_PREAMBLE (1 << 5)
+
+/**
+ * Supported MAC types
+ * @IWX_FW_MAC_TYPE_FIRST: lowest supported MAC type
+ * @IWX_FW_MAC_TYPE_AUX: Auxiliary MAC (internal)
+ * @IWX_FW_MAC_TYPE_LISTENER: monitor MAC type (?)
+ * @IWX_FW_MAC_TYPE_PIBSS: Pseudo-IBSS
+ * @IWX_FW_MAC_TYPE_IBSS: IBSS
+ * @IWX_FW_MAC_TYPE_BSS_STA: BSS (managed) station
+ * @IWX_FW_MAC_TYPE_P2P_DEVICE: P2P Device
+ * @IWX_FW_MAC_TYPE_P2P_STA: P2P client
+ * @IWX_FW_MAC_TYPE_GO: P2P GO
+ * @IWX_FW_MAC_TYPE_TEST: ?
+ * @IWX_FW_MAC_TYPE_MAX: highest support MAC type
+ */
+#define IWX_FW_MAC_TYPE_FIRST 1
+#define IWX_FW_MAC_TYPE_AUX IWX_FW_MAC_TYPE_FIRST
+#define IWX_FW_MAC_TYPE_LISTENER 2
+#define IWX_FW_MAC_TYPE_PIBSS 3
+#define IWX_FW_MAC_TYPE_IBSS 4
+#define IWX_FW_MAC_TYPE_BSS_STA 5
+#define IWX_FW_MAC_TYPE_P2P_DEVICE 6
+#define IWX_FW_MAC_TYPE_P2P_STA 7
+#define IWX_FW_MAC_TYPE_GO 8
+#define IWX_FW_MAC_TYPE_TEST 9
+#define IWX_FW_MAC_TYPE_MAX IWX_FW_MAC_TYPE_TEST
+/* IWX_MAC_CONTEXT_TYPE_API_E_VER_1 */
+
+/**
+ * TSF hw timer ID
+ * @IWX_TSF_ID_A: use TSF A
+ * @IWX_TSF_ID_B: use TSF B
+ * @IWX_TSF_ID_C: use TSF C
+ * @IWX_TSF_ID_D: use TSF D
+ * @IWX_NUM_TSF_IDS: number of TSF timers available
+ */
+#define IWX_TSF_ID_A 0
+#define IWX_TSF_ID_B 1
+#define IWX_TSF_ID_C 2
+#define IWX_TSF_ID_D 3
+#define IWX_NUM_TSF_IDS 4
+/* IWX_TSF_ID_API_E_VER_1 */
+
+/**
+ * struct iwx_mac_data_ap - configuration data for AP MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ * @dtim_interval: dtim transmit time in TU
+ * @dtim_reciprocal: 2^32 / dtim_interval
+ * @mcast_qid: queue ID for multicast traffic
+ * NOTE: obsolete from VER2 and on
+ * @beacon_template: beacon template ID
+ */
+struct iwx_mac_data_ap {
+ uint32_t beacon_time;
+ uint64_t beacon_tsf;
+ uint32_t bi;
+ uint32_t bi_reciprocal;
+ uint32_t dtim_interval;
+ uint32_t dtim_reciprocal;
+ uint32_t mcast_qid;
+ uint32_t beacon_template;
+} __packed; /* AP_MAC_DATA_API_S_VER_2 */
+
+/**
+ * struct iwx_mac_data_ibss - configuration data for IBSS MAC context
+ * @beacon_time: beacon transmit time in system time
+ * @beacon_tsf: beacon transmit time in TSF
+ * @bi: beacon interval in TU
+ * @bi_reciprocal: 2^32 / bi
+ * @beacon_template: beacon template ID
+ */
+struct iwx_mac_data_ibss {
+ uint32_t beacon_time;
+ uint64_t beacon_tsf;
+ uint32_t bi;
+ uint32_t bi_reciprocal;
+ uint32_t beacon_template;
+} __packed; /* IBSS_MAC_DATA_API_S_VER_1 */
+
+/**
+ * enum iwx_mac_data_policy - policy of the data path for this MAC
+ * @TWT_SUPPORTED: twt is supported
+ * @MORE_DATA_ACK_SUPPORTED: AP supports More Data Ack according to
+ * paragraph 9.4.1.17 in P802.11ax_D4 specification. Used for TWT
+ * early termination detection.
+ * @FLEXIBLE_TWT_SUPPORTED: AP supports flexible TWT schedule
+ * @PROTECTED_TWT_SUPPORTED: AP supports protected TWT frames (with 11w)
+ * @BROADCAST_TWT_SUPPORTED: AP and STA support broadcast TWT
+ * @COEX_HIGH_PRIORITY_ENABLE: high priority mode for BT coex, to be used
+ * during 802.1X negotiation (and allowed during 4-way-HS)
+ */
+#define IWX_TWT_SUPPORTED BIT (1 << 0)
+#define IWX_MORE_DATA_ACK_SUPPORTED (1 << 1)
+#define IWX_FLEXIBLE_TWT_SUPPORTED (1 << 2)
+#define IWX_PROTECTED_TWT_SUPPORTED (1 << 3)
+#define IWX_BROADCAST_TWT_SUPPORTED (1 << 4)
+#define IWX_COEX_HIGH_PRIORITY_ENABLE (1 << 5)
+
+/**
+ * struct iwx_mac_data_sta - configuration data for station MAC context
+ * @is_assoc: 1 for associated state, 0 otherwise
+ * @dtim_time: DTIM arrival time in system time
+ * @dtim_tsf: DTIM arrival time in TSF
+ * @bi: beacon interval in TU, applicable only when associated
+ * @data_policy: see &enum iwl_mac_data_policy
+ * @dtim_interval: DTIM interval in TU, applicable only when associated
+ * @listen_interval: in beacon intervals, applicable only when associated
+ * @assoc_id: unique ID assigned by the AP during association
+ */
+struct iwx_mac_data_sta {
+ uint32_t is_assoc;
+ uint32_t dtim_time;
+ uint64_t dtim_tsf;
+ uint32_t bi;
+ uint32_t reserved1;
+ uint32_t dtim_interval;
+ uint32_t data_policy;
+ uint32_t listen_interval;
+ uint32_t assoc_id;
+ uint32_t assoc_beacon_arrive_time;
+} __packed; /* IWX_STA_MAC_DATA_API_S_VER_2 */
+
+/**
+ * struct iwx_mac_data_go - configuration data for P2P GO MAC context
+ * @ap: iwx_mac_data_ap struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ * 0 indicates that there is no CT window.
+ * @opp_ps_enabled: indicate that opportunistic PS allowed
+ */
+struct iwx_mac_data_go {
+ struct iwx_mac_data_ap ap;
+ uint32_t ctwin;
+ uint32_t opp_ps_enabled;
+} __packed; /* GO_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwx_mac_data_p2p_sta - configuration data for P2P client MAC context
+ * @sta: iwx_mac_data_sta struct with most config data
+ * @ctwin: client traffic window in TU (period after TBTT when GO is present).
+ * 0 indicates that there is no CT window.
+ */
+struct iwx_mac_data_p2p_sta {
+ struct iwx_mac_data_sta sta;
+ uint32_t ctwin;
+} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */
+
+/**
+ * struct iwx_mac_data_pibss - Pseudo IBSS config data
+ * @stats_interval: interval in TU between statistics notifications to host.
+ */
+struct iwx_mac_data_pibss {
+ uint32_t stats_interval;
+} __packed; /* PIBSS_MAC_DATA_API_S_VER_1 */
+
+/*
+ * struct iwx_mac_data_p2p_dev - configuration data for the P2P Device MAC
+ * context.
+ * @is_disc_extended: if set to true, P2P Device discoverability is enabled on
+ * other channels as well. This should be to true only in case that the
+ * device is discoverable and there is an active GO. Note that setting this
+ * field when not needed, will increase the number of interrupts and have
+ * effect on the platform power, as this setting opens the Rx filters on
+ * all macs.
+ */
+struct iwx_mac_data_p2p_dev {
+ uint32_t is_disc_extended;
+} __packed; /* _P2P_DEV_MAC_DATA_API_S_VER_1 */
+
+/**
+ * MAC context filter flags
+ * @IWX_MAC_FILTER_IN_PROMISC: accept all data frames
+ * @IWX_MAC_FILTER_IN_CONTROL_AND_MGMT: pass all management and
+ * control frames to the host
+ * @IWX_MAC_FILTER_ACCEPT_GRP: accept multicast frames
+ * @IWX_MAC_FILTER_DIS_DECRYPT: don't decrypt unicast frames
+ * @IWX_MAC_FILTER_DIS_GRP_DECRYPT: don't decrypt multicast frames
+ * @IWX_MAC_FILTER_IN_BEACON: transfer foreign BSS's beacons to host
+ * (in station mode when associated)
+ * @IWX_MAC_FILTER_OUT_BCAST: filter out all broadcast frames
+ * @IWX_MAC_FILTER_IN_CRC32: extract FCS and append it to frames
+ * @IWX_MAC_FILTER_IN_PROBE_REQUEST: pass probe requests to host
+ */
+#define IWX_MAC_FILTER_IN_PROMISC (1 << 0)
+#define IWX_MAC_FILTER_IN_CONTROL_AND_MGMT (1 << 1)
+#define IWX_MAC_FILTER_ACCEPT_GRP (1 << 2)
+#define IWX_MAC_FILTER_DIS_DECRYPT (1 << 3)
+#define IWX_MAC_FILTER_DIS_GRP_DECRYPT (1 << 4)
+#define IWX_MAC_FILTER_IN_BEACON (1 << 6)
+#define IWX_MAC_FILTER_OUT_BCAST (1 << 8)
+#define IWX_MAC_FILTER_IN_CRC32 (1 << 11)
+#define IWX_MAC_FILTER_IN_PROBE_REQUEST (1 << 12)
+
+/**
+ * QoS flags
+ * @IWX_MAC_QOS_FLG_UPDATE_EDCA: ?
+ * @IWX_MAC_QOS_FLG_TGN: HT is enabled
+ * @IWX_MAC_QOS_FLG_TXOP_TYPE: ?
+ *
+ */
+#define IWX_MAC_QOS_FLG_UPDATE_EDCA (1 << 0)
+#define IWX_MAC_QOS_FLG_TGN (1 << 1)
+#define IWX_MAC_QOS_FLG_TXOP_TYPE (1 << 4)
+
+/**
+ * struct iwx_ac_qos - QOS timing params for IWX_MAC_CONTEXT_CMD
+ * @cw_min: Contention window, start value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x0f.
+ * @cw_max: Contention window, max value in numbers of slots.
+ * Should be a power-of-2, minus 1. Device's default is 0x3f.
+ * @aifsn: Number of slots in Arbitration Interframe Space (before
+ * performing random backoff timing prior to Tx). Device default 1.
+ * @fifos_mask: FIFOs used by this MAC for this AC
+ * @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
+ *
+ * One instance of this config struct for each of 4 EDCA access categories
+ * in struct iwx_qosparam_cmd.
+ *
+ * Device will automatically increase contention window by (2*CW) + 1 for each
+ * transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
+ * value, to cap the CW value.
+ */
+struct iwx_ac_qos {
+ uint16_t cw_min;
+ uint16_t cw_max;
+ uint8_t aifsn;
+ uint8_t fifos_mask;
+ uint16_t edca_txop;
+} __packed; /* IWX_AC_QOS_API_S_VER_2 */
+
+/**
+ * struct iwx_mac_ctx_cmd - command structure to configure MAC contexts
+ * ( IWX_MAC_CONTEXT_CMD = 0x28 )
+ * @id_and_color: ID and color of the MAC
+ * @action: action to perform, one of IWX_FW_CTXT_ACTION_*
+ * @mac_type: one of IWX_FW_MAC_TYPE_*
+ * @tsf_id: TSF HW timer, one of IWX_TSF_ID_*
+ * @node_addr: MAC address
+ * @bssid_addr: BSSID
+ * @cck_rates: basic rates available for CCK
+ * @ofdm_rates: basic rates available for OFDM
+ * @protection_flags: combination of IWX_MAC_PROT_FLG_FLAG_*
+ * @cck_short_preamble: 0x20 for enabling short preamble, 0 otherwise
+ * @short_slot: 0x10 for enabling short slots, 0 otherwise
+ * @filter_flags: combination of IWX_MAC_FILTER_*
+ * @qos_flags: from IWX_MAC_QOS_FLG_*
+ * @ac: one iwx_mac_qos configuration for each AC
+ * @mac_specific: one of struct iwx_mac_data_*, according to mac_type
+ */
+struct iwx_mac_ctx_cmd {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ uint32_t id_and_color;
+ uint32_t action;
+ /* IWX_MAC_CONTEXT_COMMON_DATA_API_S_VER_1 */
+ uint32_t mac_type;
+ uint32_t tsf_id;
+ uint8_t node_addr[6];
+ uint16_t reserved_for_node_addr;
+ uint8_t bssid_addr[6];
+ uint16_t reserved_for_bssid_addr;
+ uint32_t cck_rates;
+ uint32_t ofdm_rates;
+ uint32_t protection_flags;
+ uint32_t cck_short_preamble;
+ uint32_t short_slot;
+ uint32_t filter_flags;
+ /* IWX_MAC_QOS_PARAM_API_S_VER_1 */
+ uint32_t qos_flags;
+ struct iwx_ac_qos ac[IWX_AC_NUM+1];
+ /* IWX_MAC_CONTEXT_COMMON_DATA_API_S */
+ union {
+ struct iwx_mac_data_ap ap;
+ struct iwx_mac_data_go go;
+ struct iwx_mac_data_sta sta;
+ struct iwx_mac_data_p2p_sta p2p_sta;
+ struct iwx_mac_data_p2p_dev p2p_dev;
+ struct iwx_mac_data_pibss pibss;
+ struct iwx_mac_data_ibss ibss;
+ };
+} __packed; /* IWX_MAC_CONTEXT_CMD_API_S_VER_1 */
+
+static inline uint32_t iwx_reciprocal(uint32_t v)
+{
+ if (!v)
+ return 0;
+ return 0xFFFFFFFF / v;
+}
+
+/* Power Management Commands, Responses, Notifications */
+
+/**
+ * masks for LTR config command flags
+ * @IWX_LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
+ * @IWX_LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
+ * memory access
+ * @IWX_LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
+ * reg change
+ * @IWX_LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
+ * D0 to D3
+ * @IWX_LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
+ * @IWX_LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
+ * @IWX_LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
+ */
+#define IWX_LTR_CFG_FLAG_FEATURE_ENABLE 0x00000001
+#define IWX_LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS 0x00000002
+#define IWX_LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH 0x00000004
+#define IWX_LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 0x00000008
+#define IWX_LTR_CFG_FLAG_SW_SET_SHORT 0x00000010
+#define IWX_LTR_CFG_FLAG_SW_SET_LONG 0x00000020
+#define IWX_LTR_CFG_FLAG_DENIE_C10_ON_PD 0x00000040
+
+#define IWX_LTR_VALID_STATES_NUM 4
+
+/**
+ * struct iwx_ltr_config_cmd - configures the LTR
+ * @flags: See %enum iwx_ltr_config_flags
+ * @static_long:
+ * @static_short:
+ * @ltr_cfg_values:
+ * @ltr_short_idle_timeout:
+ */
+struct iwx_ltr_config_cmd {
+ uint32_t flags;
+ uint32_t static_long;
+ uint32_t static_short;
+ uint32_t ltr_cfg_values[IWX_LTR_VALID_STATES_NUM];
+ uint32_t ltr_short_idle_timeout;
+} __packed; /* LTR_CAPABLE_API_S_VER_2 */
+
+/* Radio LP RX Energy Threshold measured in dBm */
+#define IWX_POWER_LPRX_RSSI_THRESHOLD 75
+#define IWX_POWER_LPRX_RSSI_THRESHOLD_MAX 94
+#define IWX_POWER_LPRX_RSSI_THRESHOLD_MIN 30
+
+/**
+ * Masks for iwx_mac_power_cmd command flags
+ * @IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
+ * receiver and transmitter. '0' - does not allow.
+ * @IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
+ * '1' Driver enables PM (use rest of parameters)
+ * @IWX_POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
+ * '1' PM could sleep over DTIM till listen Interval.
+ * @IWX_POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
+ * access categories are both delivery and trigger enabled.
+ * @IWX_POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
+ * PBW Snoozing enabled
+ * @IWX_POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
+ * @IWX_POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
+ * @IWX_POWER_FLAGS_AP_UAPSD_MISBEHAVING_ENA_MSK: AP/GO's uAPSD misbehaving
+ * detection enablement
+*/
+#define IWX_POWER_FLAGS_POWER_SAVE_ENA_MSK (1 << 0)
+#define IWX_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK (1 << 1)
+#define IWX_POWER_FLAGS_SKIP_OVER_DTIM_MSK (1 << 2)
+#define IWX_POWER_FLAGS_SNOOZE_ENA_MSK (1 << 5)
+#define IWX_POWER_FLAGS_BT_SCO_ENA (1 << 8)
+#define IWX_POWER_FLAGS_ADVANCE_PM_ENA_MSK (1 << 9)
+#define IWX_POWER_FLAGS_LPRX_ENA_MSK (1 << 11)
+#define IWX_POWER_FLAGS_UAPSD_MISBEHAVING_ENA_MSK (1 << 12)
+
+#define IWX_POWER_VEC_SIZE 5
+
+/**
+ * Masks for device power command flags
+ * @IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK:
+ * '1' Allow to save power by turning off receiver and transmitter.
+ * '0' Do not allow. This flag should be always set to '1' unless
+ * one needs to disable actual power down for debug purposes.
+ * @IWX_DEVICE_POWER_FLAGS_CAM_MSK:
+ * '1' CAM (Continuous Active Mode) is set, power management is disabled.
+ * '0' Power management is enabled, one of the power schemes is applied.
+ */
+#define IWX_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK (1 << 0)
+#define IWX_DEVICE_POWER_FLAGS_CAM_MSK (1 << 13)
+
+/**
+ * struct iwx_device_power_cmd - device wide power command.
+ * IWX_POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ *
+ * @flags: Power table command flags from IWX_DEVICE_POWER_FLAGS_*
+ */
+struct iwx_device_power_cmd {
+ /* PM_POWER_TABLE_CMD_API_S_VER_6 */
+ uint16_t flags;
+ uint16_t reserved;
+} __packed;
+
+/**
+ * struct iwx_mac_power_cmd - New power command containing uAPSD support
+ * IWX_MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
+ * @id_and_color: MAC context identifier
+ * @flags: Power table command flags from POWER_FLAGS_*
+ * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
+ * Minimum allowed:- 3 * DTIM. Keep alive period must be
+ * set regardless of power scheme or current power state.
+ * FW use this value also when PM is disabled.
+ * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - legacy PM
+ * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - legacy PM
+ * @sleep_interval: not in use
+ * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
+ * is set. For example, if it is required to skip over
+ * one DTIM, this value need to be set to 2 (DTIM periods).
+ * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - uAPSD
+ * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - uAPSD
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ * Default: 80dbm
+ * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
+ * @snooze_interval: Maximum time between attempts to retrieve buffered data
+ * from the AP [msec]
+ * @snooze_window: A window of time in which PBW snoozing insures that all
+ * packets received. It is also the minimum time from last
+ * received unicast RX packet, before client stops snoozing
+ * for data. [msec]
+ * @snooze_step: TBD
+ * @qndp_tid: TID client shall use for uAPSD QNDP triggers
+ * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for
+ * each corresponding AC.
+ * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
+ * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
+ * values.
+ * @heavy_tx_thld_packets: TX threshold measured in number of packets
+ * @heavy_rx_thld_packets: RX threshold measured in number of packets
+ * @heavy_tx_thld_percentage: TX threshold measured in load's percentage
+ * @heavy_rx_thld_percentage: RX threshold measured in load's percentage
+ * @limited_ps_threshold:
+*/
+struct iwx_mac_power_cmd {
+ /* CONTEXT_DESC_API_T_VER_1 */
+ uint32_t id_and_color;
+
+ /* CLIENT_PM_POWER_TABLE_S_VER_1 */
+ uint16_t flags;
+ uint16_t keep_alive_seconds;
+ uint32_t rx_data_timeout;
+ uint32_t tx_data_timeout;
+ uint32_t rx_data_timeout_uapsd;
+ uint32_t tx_data_timeout_uapsd;
+ uint8_t lprx_rssi_threshold;
+ uint8_t skip_dtim_periods;
+ uint16_t snooze_interval;
+ uint16_t snooze_window;
+ uint8_t snooze_step;
+ uint8_t qndp_tid;
+ uint8_t uapsd_ac_flags;
+ uint8_t uapsd_max_sp;
+ uint8_t heavy_tx_thld_packets;
+ uint8_t heavy_rx_thld_packets;
+ uint8_t heavy_tx_thld_percentage;
+ uint8_t heavy_rx_thld_percentage;
+ uint8_t limited_ps_threshold;
+ uint8_t reserved;
+} __packed;
+
+#define IWX_DEFAULT_PS_TX_DATA_TIMEOUT (100 * 1000)
+#define IWX_DEFAULT_PS_RX_DATA_TIMEOUT (100 * 1000)
+
+/*
+ * struct iwx_uapsd_misbehaving_ap_notif - FW sends this notification when
+ * associated AP is identified as improperly implementing uAPSD protocol.
+ * IWX_PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78
+ * @sta_id: index of station in uCode's station table - associated AP ID in
+ * this context.
+ */
+struct iwx_uapsd_misbehaving_ap_notif {
+ uint32_t sta_id;
+ uint8_t mac_id;
+ uint8_t reserved[3];
+} __packed;
+
+/**
+ * struct iwx_beacon_filter_cmd
+ * IWX_REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
+ * @id_and_color: MAC context identifier
+ * @bf_energy_delta: Used for RSSI filtering, if in 'normal' state. Send beacon
+ * to driver if delta in Energy values calculated for this and last
+ * passed beacon is greater than this threshold. Zero value means that
+ * the Energy change is ignored for beacon filtering, and beacon will
+ * not be forced to be sent to driver regardless of this delta. Typical
+ * energy delta 5dB.
+ * @bf_roaming_energy_delta: Used for RSSI filtering, if in 'roaming' state.
+ * Send beacon to driver if delta in Energy values calculated for this
+ * and last passed beacon is greater than this threshold. Zero value
+ * means that the Energy change is ignored for beacon filtering while in
+ * Roaming state, typical energy delta 1dB.
+ * @bf_roaming_state: Used for RSSI filtering. If absolute Energy values
+ * calculated for current beacon is less than the threshold, use
+ * Roaming Energy Delta Threshold, otherwise use normal Energy Delta
+ * Threshold. Typical energy threshold is -72dBm.
+ * @bf_temp_threshold: This threshold determines the type of temperature
+ * filtering (Slow or Fast) that is selected (Units are in Celsius):
+ * If the current temperature is above this threshold - Fast filter
+ * will be used, If the current temperature is below this threshold -
+ * Slow filter will be used.
+ * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature change is ignored for
+ * beacon filtering; beacons will not be forced to be sent to driver
+ * regardless of whether its temperature has been changed.
+ * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature change is ignored for
+ * beacon filtering; beacons will not be forced to be sent to driver
+ * regardless of whether its temperature has been changed.
+ * @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
+ * @bf_escape_timer: Send beacons to driver if no beacons were passed
+ * for a specific period of time. Units: Beacons.
+ * @ba_escape_timer: Fully receive and parse beacon if no beacons were passed
+ * for a longer period of time then this escape-timeout. Units: Beacons.
+ * @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
+ * @bf_threshold_absolute_low: See below.
+ * @bf_threshold_absolute_high: Send Beacon to driver if Energy value calculated
+ * for this beacon crossed this absolute threshold. For the 'Increase'
+ * direction the bf_energy_absolute_low[i] is used. For the 'Decrease'
+ * direction the bf_energy_absolute_high[i] is used. Zero value means
+ * that this specific threshold is ignored for beacon filtering, and
+ * beacon will not be forced to be sent to driver due to this setting.
+ */
+struct iwx_beacon_filter_cmd {
+ uint32_t bf_energy_delta;
+ uint32_t bf_roaming_energy_delta;
+ uint32_t bf_roaming_state;
+ uint32_t bf_temp_threshold;
+ uint32_t bf_temp_fast_filter;
+ uint32_t bf_temp_slow_filter;
+ uint32_t bf_enable_beacon_filter;
+ uint32_t bf_debug_flag;
+ uint32_t bf_escape_timer;
+ uint32_t ba_escape_timer;
+ uint32_t ba_enable_beacon_abort;
+ uint32_t bf_threshold_absolute_low[2];
+ uint32_t bf_threshold_absolute_high[2];
+} __packed; /* BEACON_FILTER_CONFIG_API_S_VER_4 */
+
+/* Beacon filtering and beacon abort */
+#define IWX_BF_ENERGY_DELTA_DEFAULT 5
+#define IWX_BF_ENERGY_DELTA_MAX 255
+#define IWX_BF_ENERGY_DELTA_MIN 0
+
+#define IWX_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
+#define IWX_BF_ROAMING_ENERGY_DELTA_MAX 255
+#define IWX_BF_ROAMING_ENERGY_DELTA_MIN 0
+
+#define IWX_BF_ROAMING_STATE_DEFAULT 72
+#define IWX_BF_ROAMING_STATE_MAX 255
+#define IWX_BF_ROAMING_STATE_MIN 0
+
+#define IWX_BF_TEMP_THRESHOLD_DEFAULT 112
+#define IWX_BF_TEMP_THRESHOLD_MAX 255
+#define IWX_BF_TEMP_THRESHOLD_MIN 0
+
+#define IWX_BF_TEMP_FAST_FILTER_DEFAULT 1
+#define IWX_BF_TEMP_FAST_FILTER_MAX 255
+#define IWX_BF_TEMP_FAST_FILTER_MIN 0
+
+#define IWX_BF_TEMP_SLOW_FILTER_DEFAULT 5
+#define IWX_BF_TEMP_SLOW_FILTER_MAX 255
+#define IWX_BF_TEMP_SLOW_FILTER_MIN 0
+
+#define IWX_BF_ENABLE_BEACON_FILTER_DEFAULT 1
+
+#define IWX_BF_DEBUG_FLAG_DEFAULT 0
+
+#define IWX_BF_ESCAPE_TIMER_DEFAULT 50
+#define IWX_BF_ESCAPE_TIMER_MAX 1024
+#define IWX_BF_ESCAPE_TIMER_MIN 0
+
+#define IWX_BA_ESCAPE_TIMER_DEFAULT 6
+#define IWX_BA_ESCAPE_TIMER_D3 9
+#define IWX_BA_ESCAPE_TIMER_MAX 1024
+#define IWX_BA_ESCAPE_TIMER_MIN 0
+
+#define IWX_BA_ENABLE_BEACON_ABORT_DEFAULT 1
+
+#define IWX_BF_CMD_CONFIG_DEFAULTS \
+ .bf_energy_delta = htole32(IWX_BF_ENERGY_DELTA_DEFAULT), \
+ .bf_roaming_energy_delta = \
+ htole32(IWX_BF_ROAMING_ENERGY_DELTA_DEFAULT), \
+ .bf_roaming_state = htole32(IWX_BF_ROAMING_STATE_DEFAULT), \
+ .bf_temp_threshold = htole32(IWX_BF_TEMP_THRESHOLD_DEFAULT), \
+ .bf_temp_fast_filter = htole32(IWX_BF_TEMP_FAST_FILTER_DEFAULT), \
+ .bf_temp_slow_filter = htole32(IWX_BF_TEMP_SLOW_FILTER_DEFAULT), \
+ .bf_debug_flag = htole32(IWX_BF_DEBUG_FLAG_DEFAULT), \
+ .bf_escape_timer = htole32(IWX_BF_ESCAPE_TIMER_DEFAULT), \
+ .ba_escape_timer = htole32(IWX_BA_ESCAPE_TIMER_DEFAULT)
+
+/* uCode API values for HT/VHT bit rates */
+#define IWX_RATE_HT_SISO_MCS_0_PLCP 0
+#define IWX_RATE_HT_SISO_MCS_1_PLCP 1
+#define IWX_RATE_HT_SISO_MCS_2_PLCP 2
+#define IWX_RATE_HT_SISO_MCS_3_PLCP 3
+#define IWX_RATE_HT_SISO_MCS_4_PLCP 4
+#define IWX_RATE_HT_SISO_MCS_5_PLCP 5
+#define IWX_RATE_HT_SISO_MCS_6_PLCP 6
+#define IWX_RATE_HT_SISO_MCS_7_PLCP 7
+#define IWX_RATE_HT_MIMO2_MCS_8_PLCP 0x8
+#define IWX_RATE_HT_MIMO2_MCS_9_PLCP 0x9
+#define IWX_RATE_HT_MIMO2_MCS_10_PLCP 0xA
+#define IWX_RATE_HT_MIMO2_MCS_11_PLCP 0xB
+#define IWX_RATE_HT_MIMO2_MCS_12_PLCP 0xC
+#define IWX_RATE_HT_MIMO2_MCS_13_PLCP 0xD
+#define IWX_RATE_HT_MIMO2_MCS_14_PLCP 0xE
+#define IWX_RATE_HT_MIMO2_MCS_15_PLCP 0xF
+#define IWX_RATE_VHT_SISO_MCS_0_PLCP 0
+#define IWX_RATE_VHT_SISO_MCS_1_PLCP 1
+#define IWX_RATE_VHT_SISO_MCS_2_PLCP 2
+#define IWX_RATE_VHT_SISO_MCS_3_PLCP 3
+#define IWX_RATE_VHT_SISO_MCS_4_PLCP 4
+#define IWX_RATE_VHT_SISO_MCS_5_PLCP 5
+#define IWX_RATE_VHT_SISO_MCS_6_PLCP 6
+#define IWX_RATE_VHT_SISO_MCS_7_PLCP 7
+#define IWX_RATE_VHT_SISO_MCS_8_PLCP 8
+#define IWX_RATE_VHT_SISO_MCS_9_PLCP 9
+#define IWX_RATE_VHT_MIMO2_MCS_0_PLCP 0x10
+#define IWX_RATE_VHT_MIMO2_MCS_1_PLCP 0x11
+#define IWX_RATE_VHT_MIMO2_MCS_2_PLCP 0x12
+#define IWX_RATE_VHT_MIMO2_MCS_3_PLCP 0x13
+#define IWX_RATE_VHT_MIMO2_MCS_4_PLCP 0x14
+#define IWX_RATE_VHT_MIMO2_MCS_5_PLCP 0x15
+#define IWX_RATE_VHT_MIMO2_MCS_6_PLCP 0x16
+#define IWX_RATE_VHT_MIMO2_MCS_7_PLCP 0x17
+#define IWX_RATE_VHT_MIMO2_MCS_8_PLCP 0x18
+#define IWX_RATE_VHT_MIMO2_MCS_9_PLCP 0x19
+#define IWX_RATE_HT_SISO_MCS_INV_PLCP 0x20
+#define IWX_RATE_HT_MIMO2_MCS_INV_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+#define IWX_RATE_VHT_SISO_MCS_INV_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+#define IWX_RATE_VHT_MIMO2_MCS_INV_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+#define IWX_RATE_HT_SISO_MCS_8_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+#define IWX_RATE_HT_SISO_MCS_9_PLCP IWX_RATE_HT_SISO_MCS_INV_PLCP
+
+/*
+ * These serve as indexes into struct iwx_rate iwx_rates[IWX_RIDX_MAX].
+ */
+enum {
+ IWX_RATE_1M_INDEX = 0,
+ IWX_FIRST_CCK_RATE = IWX_RATE_1M_INDEX,
+ IWX_RATE_2M_INDEX,
+ IWX_RATE_5M_INDEX,
+ IWX_RATE_11M_INDEX,
+ IWX_LAST_CCK_RATE = IWX_RATE_11M_INDEX,
+ IWX_RATE_6M_INDEX,
+ IWX_FIRST_OFDM_RATE = IWX_RATE_6M_INDEX,
+ IWX_RATE_MCS_0_INDEX = IWX_RATE_6M_INDEX,
+ IWX_FIRST_HT_RATE = IWX_RATE_MCS_0_INDEX,
+ IWX_FIRST_VHT_RATE = IWX_RATE_MCS_0_INDEX,
+ IWX_RATE_9M_INDEX,
+ IWX_RATE_12M_INDEX,
+ IWX_RATE_MCS_1_INDEX = IWX_RATE_12M_INDEX,
+ IWX_RATE_MCS_8_INDEX,
+ IWX_FIRST_HT_MIMO2_RATE = IWX_RATE_MCS_8_INDEX,
+ IWX_RATE_18M_INDEX,
+ IWX_RATE_MCS_2_INDEX = IWX_RATE_18M_INDEX,
+ IWX_RATE_24M_INDEX,
+ IWX_RATE_MCS_3_INDEX = IWX_RATE_24M_INDEX,
+ IWX_RATE_MCS_9_INDEX,
+ IWX_RATE_36M_INDEX,
+ IWX_RATE_MCS_4_INDEX = IWX_RATE_36M_INDEX,
+ IWX_RATE_MCS_10_INDEX,
+ IWX_RATE_48M_INDEX,
+ IWX_RATE_MCS_5_INDEX = IWX_RATE_48M_INDEX,
+ IWX_RATE_MCS_11_INDEX,
+ IWX_RATE_54M_INDEX,
+ IWX_RATE_MCS_6_INDEX = IWX_RATE_54M_INDEX,
+ IWX_LAST_NON_HT_RATE = IWX_RATE_54M_INDEX,
+ IWX_RATE_MCS_7_INDEX,
+ IWX_LAST_HT_SISO_RATE = IWX_RATE_MCS_7_INDEX,
+ IWX_RATE_MCS_12_INDEX,
+ IWX_RATE_MCS_13_INDEX,
+ IWX_RATE_MCS_14_INDEX,
+ IWX_RATE_MCS_15_INDEX,
+ IWX_LAST_HT_RATE = IWX_RATE_MCS_15_INDEX,
+ IWX_LAST_VHT_RATE = IWX_RATE_MCS_9_INDEX,
+ IWX_RATE_COUNT_LEGACY = IWX_LAST_NON_HT_RATE + 1,
+ IWX_RATE_COUNT = IWX_LAST_HT_RATE + 1,
+};
+
+#define IWX_RATE_BIT_MSK(r) (1 << (IWX_RATE_##r##M_INDEX))
+
+/* fw API values for legacy bit rates, both OFDM and CCK */
+#define IWX_RATE_6M_PLCP 13
+#define IWX_RATE_9M_PLCP 15
+#define IWX_RATE_12M_PLCP 5
+#define IWX_RATE_18M_PLCP 7
+#define IWX_RATE_24M_PLCP 9
+#define IWX_RATE_36M_PLCP 11
+#define IWX_RATE_48M_PLCP 1
+#define IWX_RATE_54M_PLCP 3
+#define IWX_RATE_1M_PLCP 10
+#define IWX_RATE_2M_PLCP 20
+#define IWX_RATE_5M_PLCP 55
+#define IWX_RATE_11M_PLCP 110
+#define IWX_RATE_INVM_PLCP 0xff
+
+/*
+ * rate_n_flags bit fields version 1
+ *
+ * The 32-bit value has different layouts in the low 8 bites depending on the
+ * format. There are three formats, HT, VHT and legacy (11abg, with subformats
+ * for CCK and OFDM).
+ *
+ * High-throughput (HT) rate format
+ * bit 8 is 1, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Very High-throughput (VHT) rate format
+ * bit 8 is 0, bit 26 is 1, bit 9 is 0 (OFDM)
+ * Legacy OFDM rate format for bits 7:0
+ * bit 8 is 0, bit 26 is 0, bit 9 is 0 (OFDM)
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK)
+ */
+
+/* Bit 8: (1) HT format, (0) legacy or VHT format */
+#define IWX_RATE_MCS_HT_POS 8
+#define IWX_RATE_MCS_HT_MSK_V1 (1 << IWX_RATE_MCS_HT_POS)
+
+/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
+#define IWX_RATE_MCS_CCK_POS_V1 9
+#define IWX_RATE_MCS_CCK_MSK_V1 (1 << IWX_RATE_MCS_CCK_POS_V1)
+
+/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */
+#define IWX_RATE_MCS_VHT_POS_V1 26
+#define IWX_RATE_MCS_VHT_MSK_V1 (1 << IWX_RATE_MCS_VHT_POS_V1)
+
+
+/*
+ * High-throughput (HT) rate format for bits 7:0
+ *
+ * 2-0: MCS rate base
+ * 0) 6 Mbps
+ * 1) 12 Mbps
+ * 2) 18 Mbps
+ * 3) 24 Mbps
+ * 4) 36 Mbps
+ * 5) 48 Mbps
+ * 6) 54 Mbps
+ * 7) 60 Mbps
+ * 4-3: 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ * 5: Value of 0x20 in bits 7:0 indicates 6 Mbps HT40 duplicate data
+ * (bits 7-6 are zero)
+ *
+ * Together the low 5 bits work out to the MCS index because we don't
+ * support MCSes above 15/23, and 0-7 have one stream, 8-15 have two
+ * streams and 16-23 have three streams. We could also support MCS 32
+ * which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
+ */
+#define IWX_RATE_HT_MCS_RATE_CODE_MSK_V1 0x7
+#define IWX_RATE_HT_MCS_NSS_POS_V1 3
+#define IWX_RATE_HT_MCS_NSS_MSK_V1 (3 << IWX_RATE_HT_MCS_NSS_POS_V1)
+#define IWX_RATE_HT_MCS_MIMO2_MSK_V1 (1 << IWX_RATE_HT_MCS_NSS_POS_V1)
+
+/* Bit 10: (1) Use Green Field preamble */
+#define IWX_RATE_HT_MCS_GF_POS 10
+#define IWX_RATE_HT_MCS_GF_MSK (1 << IWX_RATE_HT_MCS_GF_POS)
+
+#define IWX_RATE_HT_MCS_INDEX_MSK_V1 0x3f
+
+/*
+ * Very High-throughput (VHT) rate format for bits 7:0
+ *
+ * 3-0: VHT MCS (0-9)
+ * 5-4: number of streams - 1:
+ * 0) Single stream (SISO)
+ * 1) Dual stream (MIMO)
+ * 2) Triple stream (MIMO)
+ */
+
+/* Bit 4-5: (0) SISO, (1) MIMO2 (2) MIMO3 */
+#define IWX_RATE_VHT_MCS_RATE_CODE_MSK 0xf
+#define IWX_RATE_VHT_MCS_NSS_POS 4
+#define IWX_RATE_VHT_MCS_NSS_MSK (3 << IWX_RATE_VHT_MCS_NSS_POS)
+#define IWX_RATE_VHT_MCS_MIMO2_MSK (1 << IWX_RATE_VHT_MCS_NSS_POS)
+
+/*
+ * Legacy OFDM rate format for bits 7:0
+ *
+ * 3-0: 0xD) 6 Mbps
+ * 0xF) 9 Mbps
+ * 0x5) 12 Mbps
+ * 0x7) 18 Mbps
+ * 0x9) 24 Mbps
+ * 0xB) 36 Mbps
+ * 0x1) 48 Mbps
+ * 0x3) 54 Mbps
+ * (bits 7-4 are 0)
+ *
+ * Legacy CCK rate format for bits 7:0:
+ * bit 8 is 0, bit 26 is 0, bit 9 is 1 (CCK):
+ *
+ * 6-0: 10) 1 Mbps
+ * 20) 2 Mbps
+ * 55) 5.5 Mbps
+ * 110) 11 Mbps
+ * (bit 7 is 0)
+ */
+#define IWX_RATE_LEGACY_RATE_MSK_V1 0xff
+
+/* Bit 10 - OFDM HE */
+#define IWX_RATE_MCS_HE_POS_V1 10
+#define IWX_RATE_MCS_HE_MSK_V1 (1 << RATE_MCS_HE_POS_V1)
+
+/*
+ * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
+ * 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT
+ */
+#define IWX_RATE_MCS_CHAN_WIDTH_POS 11
+#define IWX_RATE_MCS_CHAN_WIDTH_MSK_V1 (3 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_20_V1 (0 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_40_V1 (1 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_80_V1 (2 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_160_V1 (3 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+
+/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
+#define IWX_RATE_MCS_SGI_POS_V1 13
+#define IWX_RATE_MCS_SGI_MSK_V1 (1 << IWX_RATE_MCS_SGI_POS_V1)
+
+/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C (unused) */
+#define IWX_RATE_MCS_ANT_POS 14
+#define IWX_RATE_MCS_ANT_A_MSK (1 << IWX_RATE_MCS_ANT_POS)
+#define IWX_RATE_MCS_ANT_B_MSK (2 << IWX_RATE_MCS_ANT_POS)
+#define IWX_RATE_MCS_ANT_AB_MSK (IWX_RATE_MCS_ANT_A_MSK | \
+ IWX_RATE_MCS_ANT_B_MSK)
+#define IWX_RATE_MCS_ANT_MSK IWX_RATE_MCS_ANT_ABC_MSK
+#define IWX_RATE_MCS_ANT_NUM 2
+
+/* Bit 17: (0) SS, (1) SS*2 */
+#define IWX_RATE_MCS_STBC_POS 17
+#define IWX_RATE_MCS_STBC_MSK (1 << IWX_RATE_MCS_STBC_POS)
+
+/* Bit 18: OFDM-HE dual carrier mode */
+#define IWX_RATE_HE_DUAL_CARRIER_MODE 18
+#define IWX_RATE_HE_DUAL_CARRIER_MODE_MSK (1 << IWX_RATE_HE_DUAL_CARRIER_MODE)
+
+/* Bit 19: (0) Beamforming is off, (1) Beamforming is on */
+#define IWX_RATE_MCS_BF_POS 19
+#define IWX_RATE_MCS_BF_MSK (1 << IWX_RATE_MCS_BF_POS)
+
+/*
+ * Bit 20-21: HE LTF type and guard interval
+ * HE (ext) SU:
+ * 0 1xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 & SGI (bit 13) clear 4xLTF+3.2us
+ * 3 & SGI (bit 13) set 4xLTF+0.8us
+ * HE MU:
+ * 0 4xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * HE TRIG:
+ * 0 1xLTF+1.6us
+ * 1 2xLTF+1.6us
+ * 2 4xLTF+3.2us
+ * 3 (does not occur)
+ */
+#define IWX_RATE_MCS_HE_GI_LTF_POS 20
+#define IWX_RATE_MCS_HE_GI_LTF_MSK_V1 (3 << IWX_RATE_MCS_HE_GI_LTF_POS)
+
+/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
+#define IWX_RATE_MCS_HE_TYPE_POS_V1 22
+#define IWX_RATE_MCS_HE_TYPE_SU_V1 (0 << IWX_RATE_MCS_HE_TYPE_POS_V1)
+#define IWX_RATE_MCS_HE_TYPE_EXT_SU_V1 (1 << IWX_RATE_MCS_HE_TYPE_POS_V1)
+#define IWX_RATE_MCS_HE_TYPE_MU_V1 (2 << IWX_RATE_MCS_HE_TYPE_POS_V1)
+#define IWX_RATE_MCS_HE_TYPE_TRIG_V1 (3 << IWX_RATE_MCS_HE_TYPE_POS_V1)
+#define IWX_RATE_MCS_HE_TYPE_MSK_V1 (3 << IWX_RATE_MCS_HE_TYPE_POS_V1)
+
+/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
+#define IWX_RATE_MCS_DUP_POS_V1 24
+#define IWX_RATE_MCS_DUP_MSK_V1 (3 << IWX_RATE_MCS_DUP_POS_V1)
+
+/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */
+#define IWX_RATE_MCS_LDPC_POS_V1 27
+#define IWX_RATE_MCS_LDPC_MSK_V1 (1 << IWX_RATE_MCS_LDPC_POS_V1)
+
+/* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
+#define IWX_RATE_MCS_HE_106T_POS_V1 28
+#define IWX_RATE_MCS_HE_106T_MSK_V1 (1 << IWX_RATE_MCS_HE_106T_POS_V1)
+
+/* Bit 30-31: (1) RTS, (2) CTS */
+#define IWX_RATE_MCS_RTS_REQUIRED_POS (30)
+#define IWX_RATE_MCS_RTS_REQUIRED_MSK (1 << IWX_RATE_MCS_RTS_REQUIRED_POS)
+#define IWX_RATE_MCS_CTS_REQUIRED_POS (31)
+#define IWX_RATE_MCS_CTS_REQUIRED_MSK (1 << IWX_RATE_MCS_CTS_REQUIRED_POS)
+
+
+/* rate_n_flags bit field version 2
+ *
+ * The 32-bit value has different layouts in the low 8 bits depending on the
+ * format. There are three formats, HT, VHT and legacy (11abg, with subformats
+ * for CCK and OFDM).
+ *
+ */
+
+/* Bits 10-8: rate format
+ * (0) Legacy CCK (1) Legacy OFDM (2) High-throughput (HT)
+ * (3) Very High-throughput (VHT) (4) High-efficiency (HE)
+ * (5) Extremely High-throughput (EHT)
+ */
+#define IWX_RATE_MCS_MOD_TYPE_POS 8
+#define IWX_RATE_MCS_MOD_TYPE_MSK (0x7 << IWX_RATE_MCS_MOD_TYPE_POS)
+#define IWX_RATE_MCS_CCK_MSK (0 << IWX_RATE_MCS_MOD_TYPE_POS)
+#define IWX_RATE_MCS_LEGACY_OFDM_MSK (1 << IWX_RATE_MCS_MOD_TYPE_POS)
+#define IWX_RATE_MCS_HT_MSK (2 << IWX_RATE_MCS_MOD_TYPE_POS)
+#define IWX_RATE_MCS_VHT_MSK (3 << IWX_RATE_MCS_MOD_TYPE_POS)
+#define IWX_RATE_MCS_HE_MSK (4 << IWX_RATE_MCS_MOD_TYPE_POS)
+#define IWX_RATE_MCS_EHT_MSK (5 << IWX_RATE_MCS_MOD_TYPE_POS)
+
+/*
+ * Legacy CCK rate format for bits 0:3:
+ *
+ * (0) 0xa - 1 Mbps
+ * (1) 0x14 - 2 Mbps
+ * (2) 0x37 - 5.5 Mbps
+ * (3) 0x6e - 11 nbps
+ *
+ * Legacy OFDM rate format for bits 0:3:
+ *
+ * (0) 6 Mbps
+ * (1) 9 Mbps
+ * (2) 12 Mbps
+ * (3) 18 Mbps
+ * (4) 24 Mbps
+ * (5) 36 Mbps
+ * (6) 48 Mbps
+ * (7) 54 Mbps
+ *
+ */
+#define IWX_RATE_LEGACY_RATE_MSK 0x7
+
+/*
+ * HT, VHT, HE, EHT rate format for bits 3:0
+ * 3-0: MCS
+ *
+ */
+#define IWX_RATE_HT_MCS_CODE_MSK 0x7
+#define IWX_RATE_MCS_NSS_POS 4
+#define IWX_RATE_MCS_NSS_MSK (1 << IWX_RATE_MCS_NSS_POS)
+#define IWX_RATE_MCS_CODE_MSK 0xf
+#define IWX_RATE_HT_MCS_INDEX(r) ((((r) & IWX_RATE_MCS_NSS_MSK) >> 1) | \
+ ((r) & IWX_RATE_HT_MCS_CODE_MSK))
+
+#define IWX_RATE_VHT_MCS_CODE(r) ((r) & IWX_RATE_HT_MCS_CODE_MSK)
+#define IWX_RATE_VHT_MCS_NSS(r) \
+ ((((r) & IWX_RATE_MCS_NSS_MSK) == 0) >> IWX_RATE_MCS_NSS_POS)
+
+/* Bits 7-5: reserved */
+
+/*
+ * Bits 13-11: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz, (4) 320MHz
+ */
+#define IWX_RATE_MCS_CHAN_WIDTH_MSK (0x7 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_20 (0 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_40 (1 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_80 (2 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_160 (3 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+#define IWX_RATE_MCS_CHAN_WIDTH_320 (4 << IWX_RATE_MCS_CHAN_WIDTH_POS)
+
+/* Bit 15-14: Antenna selection:
+ * Bit 14: Ant A active
+ * Bit 15: Ant B active
+ *
+ * All relevant definitions are same as in v1
+ */
+
+/* Bit 16 (1) LDPC enables, (0) LDPC disabled */
+#define IWX_RATE_MCS_LDPC_POS 16
+#define IWX_RATE_MCS_LDPC_MSK (1 << IWX_RATE_MCS_LDPC_POS)
+
+/* Bit 17: (0) SS, (1) SS*2 (same as v1) */
+
+/* Bit 18: OFDM-HE dual carrier mode (same as v1) */
+
+/* Bit 19: (0) Beamforming is off, (1) Beamforming is on (same as v1) */
+
+/*
+ * Bit 22-20: HE LTF type and guard interval
+ * CCK:
+ * 0 long preamble
+ * 1 short preamble
+ * HT/VHT:
+ * 0 0.8us
+ * 1 0.4us
+ * HE (ext) SU:
+ * 0 1xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * 4 4xLTF+0.8us
+ * HE MU:
+ * 0 4xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * HE TRIG:
+ * 0 1xLTF+1.6us
+ * 1 2xLTF+1.6us
+ * 2 4xLTF+3.2us
+ * */
+#define IWX_RATE_MCS_HE_GI_LTF_MSK (0x7 << IWX_RATE_MCS_HE_GI_LTF_POS)
+#define IWX_RATE_MCS_SGI_POS IWX_RATE_MCS_HE_GI_LTF_POS
+#define IWX_RATE_MCS_SGI_MSK (1 << IWX_RATE_MCS_SGI_POS)
+#define IWX_RATE_MCS_HE_SU_4_LTF 3
+#define IWX_RATE_MCS_HE_SU_4_LTF_08_GI 4
+
+/* Bit 24-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
+#define IWX_RATE_MCS_HE_TYPE_POS 23
+#define IWX_RATE_MCS_HE_TYPE_SU (0 << IWX_RATE_MCS_HE_TYPE_POS)
+#define IWX_RATE_MCS_HE_TYPE_EXT_SU (1 << IWX_RATE_MCS_HE_TYPE_POS)
+#define IWX_RATE_MCS_HE_TYPE_MU (2 << IWX_RATE_MCS_HE_TYPE_POS)
+#define IWX_RATE_MCS_HE_TYPE_TRIG (3 << IWX_RATE_MCS_HE_TYPE_POS)
+#define IWX_RATE_MCS_HE_TYPE_MSK (3 << IWX_RATE_MCS_HE_TYPE_POS)
+
+/* Bit 25: duplicate channel enabled
+ *
+ * if this bit is set, duplicate is according to BW (bits 11-13):
+ *
+ * CCK: 2x 20MHz
+ * OFDM Legacy: N x 20Mhz, (N = BW \ 2 , either 2, 4, 8, 16)
+ * EHT: 2 x BW/2, (80 - 2x40, 160 - 2x80, 320 - 2x160)
+ * */
+#define IWX_RATE_MCS_DUP_POS 25
+#define IWX_RATE_MCS_DUP_MSK (1 << IWX_RATE_MCS_DUP_POS)
+
+/* Bit 26: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
+#define IWX_RATE_MCS_HE_106T_POS 26
+#define IWX_RATE_MCS_HE_106T_MSK (1 << IWX_RATE_MCS_HE_106T_POS)
+
+/* Bit 27: EHT extra LTF:
+ * instead of 1 LTF for SISO use 2 LTFs,
+ * instead of 2 LTFs for NSTS=2 use 4 LTFs*/
+#define IWX_RATE_MCS_EHT_EXTRA_LTF_POS 27
+#define IWX_RATE_MCS_EHT_EXTRA_LTF_MSK (1 << IWX_RATE_MCS_EHT_EXTRA_LTF_POS)
+
+/* Bit 31-28: reserved */
+
+
+/* Link Quality definitions */
+
+/* # entries in rate scale table to support Tx retries */
+#define IWX_LQ_MAX_RETRY_NUM 16
+
+/* Link quality command flags bit fields */
+
+/* Bit 0: (0) Don't use RTS (1) Use RTS */
+#define IWX_LQ_FLAG_USE_RTS_POS 0
+#define IWX_LQ_FLAG_USE_RTS_MSK (1 << IWX_LQ_FLAG_USE_RTS_POS)
+
+/* Bit 1-3: LQ command color. Used to match responses to LQ commands */
+#define IWX_LQ_FLAG_COLOR_POS 1
+#define IWX_LQ_FLAG_COLOR_MSK (7 << IWX_LQ_FLAG_COLOR_POS)
+
+/* Bit 4-5: Tx RTS BW Signalling
+ * (0) No RTS BW signalling
+ * (1) Static BW signalling
+ * (2) Dynamic BW signalling
+ */
+#define IWX_LQ_FLAG_RTS_BW_SIG_POS 4
+#define IWX_LQ_FLAG_RTS_BW_SIG_NONE (0 << IWX_LQ_FLAG_RTS_BW_SIG_POS)
+#define IWX_LQ_FLAG_RTS_BW_SIG_STATIC (1 << IWX_LQ_FLAG_RTS_BW_SIG_POS)
+#define IWX_LQ_FLAG_RTS_BW_SIG_DYNAMIC (2 << IWX_LQ_FLAG_RTS_BW_SIG_POS)
+
+/* Bit 6: (0) No dynamic BW selection (1) Allow dynamic BW selection
+ * Dynamic BW selection allows Tx with narrower BW then requested in rates
+ */
+#define IWX_LQ_FLAG_DYNAMIC_BW_POS 6
+#define IWX_LQ_FLAG_DYNAMIC_BW_MSK (1 << IWX_LQ_FLAG_DYNAMIC_BW_POS)
+
+#define IWX_RLC_CHAIN_INFO_DRIVER_FORCE (1 << 0)
+#define IWL_RLC_CHAIN_INFO_VALID 0x000e
+#define IWL_RLC_CHAIN_INFO_FORCE 0x0070
+#define IWL_RLC_CHAIN_INFO_FORCE_MIMO 0x0380
+#define IWL_RLC_CHAIN_INFO_COUNT 0x0c00
+#define IWL_RLC_CHAIN_INFO_MIMO_COUNT 0x3000
+
+/**
+ * struct iwx_rlc_properties - RLC properties
+ * @rx_chain_info: RX chain info, IWX_RLC_CHAIN_INFO_*
+ * @reserved: reserved
+ */
+struct iwx_rlc_properties {
+ uint32_t rx_chain_info;
+ uint32_t reserved;
+} __packed; /* RLC_PROPERTIES_S_VER_1 */
+
+#define IWX_SAD_MODE_ENABLED (1 << 0)
+#define IWX_SAD_MODE_DEFAULT_ANT_MSK 0x6
+#define IWX_SAD_MODE_DEFAULT_ANT_FW 0x0
+#define IWX_SAD_MODE_DEFAULT_ANT_A 0x2
+#define IWX_SAD_MODE_DEFAULT_ANT_B 0x4
+
+/**
+ * struct iwx_sad_properties - SAD properties
+ * @chain_a_sad_mode: chain A SAD mode, IWX_SAD_MODE_*
+ * @chain_b_sad_mode: chain B SAD mode, IWX_SAD_MODE_*
+ * @mac_id: MAC index
+ * @reserved: reserved
+ */
+struct iwx_sad_properties {
+ uint32_t chain_a_sad_mode;
+ uint32_t chain_b_sad_mode;
+ uint32_t mac_id;
+ uint32_t reserved;
+} __packed;
+
+/**
+ * struct iwx_rlc_config_cmd - RLC configuration
+ * @phy_id: PHY index
+ * @rlc: RLC properties, &struct iwx_rlc_properties
+ * @sad: SAD (single antenna diversity) options, &struct iwx_sad_properties
+ * @flags: flags, IWX_RLC_FLAGS_*
+ * @reserved: reserved
+ */
+struct iwx_rlc_config_cmd {
+ uint32_t phy_id;
+ struct iwx_rlc_properties rlc;
+ struct iwx_sad_properties sad;
+ uint8_t flags;
+ uint8_t reserved[3];
+} __packed; /* RLC_CONFIG_CMD_API_S_VER_2 */
+
+#define IWX_MAX_BAID_OLD 16 /* MAX_IMMEDIATE_BA_API_D_VER_2 */
+#define IWX_MAX_BAID 32 /* MAX_IMMEDIATE_BA_API_D_VER_3 */
+
+/**
+ * BAID allocation/config action
+ * @IWX_RX_BAID_ACTION_ADD: add a new BAID session
+ * @IWX_RX_BAID_ACTION_MODIFY: modify the BAID session
+ * @IWX_RX_BAID_ACTION_REMOVE: remove the BAID session
+ */
+#define IWX_RX_BAID_ACTION_ADD 0
+#define IWX_RX_BAID_ACTION_MODIFY 1
+#define IWX_RX_BAID_ACTION_REMOVE 2
+/* RX_BAID_ALLOCATION_ACTION_E_VER_1 */
+
+/**
+ * struct iwx_rx_baid_cfg_cmd_alloc - BAID allocation data
+ * @sta_id_mask: station ID mask
+ * @tid: the TID for this session
+ * @reserved: reserved
+ * @ssn: the starting sequence number
+ * @win_size: RX BA session window size
+ */
+struct iwx_rx_baid_cfg_cmd_alloc {
+ uint32_t sta_id_mask;
+ uint8_t tid;
+ uint8_t reserved[3];
+ uint16_t ssn;
+ uint16_t win_size;
+} __packed; /* RX_BAID_ALLOCATION_ADD_CMD_API_S_VER_1 */
+
+/**
+ * struct iwx_rx_baid_cfg_cmd_modify - BAID modification data
+ * @old_sta_id_mask: old station ID mask
+ * @new_sta_id_mask: new station ID mask
+ * @tid: TID of the BAID
+ */
+struct iwx_rx_baid_cfg_cmd_modify {
+ uint32_t old_sta_id_mask;
+ uint32_t new_sta_id_mask;
+ uint32_t tid;
+} __packed; /* RX_BAID_ALLOCATION_MODIFY_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_rx_baid_cfg_cmd_remove_v1 - BAID removal data
+ * @baid: the BAID to remove
+ */
+struct iwx_rx_baid_cfg_cmd_remove_v1 {
+ uint32_t baid;
+} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_1 */
+
+/**
+ * struct iwx_rx_baid_cfg_cmd_remove - BAID removal data
+ * @sta_id_mask: the station mask of the BAID to remove
+ * @tid: the TID of the BAID to remove
+ */
+struct iwx_rx_baid_cfg_cmd_remove {
+ uint32_t sta_id_mask;
+ uint32_t tid;
+} __packed; /* RX_BAID_ALLOCATION_REMOVE_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_rx_baid_cfg_cmd - BAID allocation/config command
+ * @action: the action, from &enum iwx_rx_baid_action
+ */
+struct iwx_rx_baid_cfg_cmd {
+ uint32_t action;
+ union {
+ struct iwx_rx_baid_cfg_cmd_alloc alloc;
+ struct iwx_rx_baid_cfg_cmd_modify modify;
+ struct iwx_rx_baid_cfg_cmd_remove_v1 remove_v1;
+ struct iwx_rx_baid_cfg_cmd_remove remove;
+ }; /* RX_BAID_ALLOCATION_OPERATION_API_U_VER_2 */
+} __packed; /* RX_BAID_ALLOCATION_CONFIG_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_rx_baid_cfg_resp - BAID allocation response
+ * @baid: the allocated BAID
+ */
+struct iwx_rx_baid_cfg_resp {
+ uint32_t baid;
+}; /* RX_BAID_ALLOCATION_RESPONSE_API_S_VER_1 */
+
+/**
+ * scheduler queue operation
+ * @IWX_SCD_QUEUE_ADD: allocate a new queue
+ * @IWX_SCD_QUEUE_REMOVE: remove a queue
+ * @IWX_SCD_QUEUE_MODIFY: modify a queue
+ */
+#define IWX_SCD_QUEUE_ADD 0
+#define IWX_SCD_QUEUE_REMOVE 1
+#define IWX_SCD_QUEUE_MODIFY 2
+
+/**
+ * struct iwx_scd_queue_cfg_cmd - scheduler queue allocation command
+ * @operation: the operation, see &enum iwl_scd_queue_cfg_operation
+ * @u.add.sta_mask: station mask
+ * @u.add.tid: TID
+ * @u.add.reserved: reserved
+ * @u.add.flags: flags from &enum iwl_tx_queue_cfg_actions, except
+ * %TX_QUEUE_CFG_ENABLE_QUEUE is not valid
+ * @u.add.cb_size: size code
+ * @u.add.bc_dram_addr: byte-count table IOVA
+ * @u.add.tfdq_dram_addr: TFD queue IOVA
+ * @u.remove.sta_mask: station mask of queue to remove
+ * @u.remove.tid: TID of queue to remove
+ * @u.modify.old_sta_mask: old station mask for modify
+ * @u.modify.tid: TID of queue to modify
+ * @u.modify.new_sta_mask: new station mask for modify
+ */
+struct iwx_scd_queue_cfg_cmd {
+ uint32_t operation;
+ union {
+ struct {
+ uint32_t sta_mask;
+ uint8_t tid;
+ uint8_t reserved[3];
+ uint32_t flags;
+ uint32_t cb_size;
+ uint64_t bc_dram_addr;
+ uint64_t tfdq_dram_addr;
+ } __packed add; /* TX_QUEUE_CFG_CMD_ADD_API_S_VER_1 */
+ struct {
+ uint32_t sta_mask;
+ uint32_t tid;
+ } __packed remove; /* TX_QUEUE_CFG_CMD_REMOVE_API_S_VER_1 */
+ struct {
+ uint32_t old_sta_mask;
+ uint32_t tid;
+ uint32_t new_sta_mask;
+ } __packed modify; /* TX_QUEUE_CFG_CMD_MODIFY_API_S_VER_1 */
+ } __packed u; /* TX_QUEUE_CFG_CMD_OPERATION_API_U_VER_1 */
+} __packed; /* TX_QUEUE_CFG_CMD_API_S_VER_3 */
+
+/**
+ * Options for TLC config flags
+ * @IWX_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for
+ * bandwidths <= 80MHz
+ * @IWX_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
+ * @IWX_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK: enable STBC in HE at 160MHz
+ * bandwidth
+ * @IWX_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK: enable HE Dual Carrier Modulation
+ * for BPSK (MCS 0) with 1 spatial
+ * stream
+ * @IWX_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK: enable HE Dual Carrier Modulation
+ * for BPSK (MCS 0) with 2 spatial
+ * streams
+ */
+#define IWX_TLC_MNG_CFG_FLAGS_STBC_MSK (1 << 0)
+#define IWX_TLC_MNG_CFG_FLAGS_LDPC_MSK (1 << 1)
+#define IWX_TLC_MNG_CFG_FLAGS_HE_STBC_160MHZ_MSK (1 << 2)
+#define IWX_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_1_MSK (1 << 3)
+#define IWX_TLC_MNG_CFG_FLAGS_HE_DCM_NSS_2_MSK (1 << 4)
+
+/**
+ * enum iwx_tlc_mng_cfg_cw - channel width options
+ * @IWX_TLC_MNG_CH_WIDTH_20MHZ: 20MHZ channel
+ * @IWX_TLC_MNG_CH_WIDTH_40MHZ: 40MHZ channel
+ * @IWX_TLC_MNG_CH_WIDTH_80MHZ: 80MHZ channel
+ * @IWX_TLC_MNG_CH_WIDTH_160MHZ: 160MHZ channel
+ * @IWX_TLC_MNG_CH_WIDTH_320MHZ: 320MHZ channel
+ */
+enum iwx_tlc_mng_cfg_cw {
+ IWX_TLC_MNG_CH_WIDTH_20MHZ,
+ IWX_TLC_MNG_CH_WIDTH_40MHZ,
+ IWX_TLC_MNG_CH_WIDTH_80MHZ,
+ IWX_TLC_MNG_CH_WIDTH_160MHZ,
+ IWX_TLC_MNG_CH_WIDTH_320MHZ,
+};
+
+/**
+ * @IWX_TLC_MNG_CHAIN_A_MSK: chain A
+ * @IWX_TLC_MNG_CHAIN_B_MSK: chain B
+ */
+#define IWX_TLC_MNG_CHAIN_A_MSK (1 << 0)
+#define IWX_TLC_MNG_CHAIN_B_MSK (1 << 1)
+
+/**
+ * enum iwx_tlc_mng_cfg_mode - supported modes
+ * @IWX_TLC_MNG_MODE_CCK: enable CCK
+ * @IWX_TLC_MNG_MODE_OFDM_NON_HT: enable OFDM (non HT)
+ * @IWX_TLC_MNG_MODE_NON_HT: enable non HT
+ * @IWX_TLC_MNG_MODE_HT: enable HT
+ * @IWX_TLC_MNG_MODE_VHT: enable VHT
+ * @IWX_TLC_MNG_MODE_HE: enable HE
+ * @IWX_TLC_MNG_MODE_EHT: enable EHT
+ */
+enum iwx_tlc_mng_cfg_mode {
+ IWX_TLC_MNG_MODE_CCK = 0,
+ IWX_TLC_MNG_MODE_OFDM_NON_HT = IWX_TLC_MNG_MODE_CCK,
+ IWX_TLC_MNG_MODE_NON_HT = IWX_TLC_MNG_MODE_CCK,
+ IWX_TLC_MNG_MODE_HT,
+ IWX_TLC_MNG_MODE_VHT,
+ IWX_TLC_MNG_MODE_HE,
+ IWX_TLC_MNG_MODE_EHT,
+};
+
+/**
+ * @IWX_TLC_MNG_HT_RATE_MCS0: index of MCS0
+ * @IWX_TLC_MNG_HT_RATE_MCS1: index of MCS1
+ * @IWX_TLC_MNG_HT_RATE_MCS2: index of MCS2
+ * @IWX_TLC_MNG_HT_RATE_MCS3: index of MCS3
+ * @IWX_TLC_MNG_HT_RATE_MCS4: index of MCS4
+ * @IWX_TLC_MNG_HT_RATE_MCS5: index of MCS5
+ * @IWX_TLC_MNG_HT_RATE_MCS6: index of MCS6
+ * @IWX_TLC_MNG_HT_RATE_MCS7: index of MCS7
+ * @IWX_TLC_MNG_HT_RATE_MCS8: index of MCS8
+ * @IWX_TLC_MNG_HT_RATE_MCS9: index of MCS9
+ * @IWX_TLC_MNG_HT_RATE_MCS10: index of MCS10
+ * @IWX_TLC_MNG_HT_RATE_MCS11: index of MCS11
+ * @IWX_TLC_MNG_HT_RATE_MAX: maximal rate for HT/VHT
+ */
+enum iwx_tlc_mng_ht_rates {
+ IWX_TLC_MNG_HT_RATE_MCS0 = 0,
+ IWX_TLC_MNG_HT_RATE_MCS1,
+ IWX_TLC_MNG_HT_RATE_MCS2,
+ IWX_TLC_MNG_HT_RATE_MCS3,
+ IWX_TLC_MNG_HT_RATE_MCS4,
+ IWX_TLC_MNG_HT_RATE_MCS5,
+ IWX_TLC_MNG_HT_RATE_MCS6,
+ IWX_TLC_MNG_HT_RATE_MCS7,
+ IWX_TLC_MNG_HT_RATE_MCS8,
+ IWX_TLC_MNG_HT_RATE_MCS9,
+ IWX_TLC_MNG_HT_RATE_MCS10,
+ IWX_TLC_MNG_HT_RATE_MCS11,
+ IWX_TLC_MNG_HT_RATE_MAX = IWX_TLC_MNG_HT_RATE_MCS11,
+};
+
+#define IWX_TLC_NSS_1 0
+#define IWX_TLC_NSS_2 1
+#define IWX_TLC_NSS_MAX 2
+
+
+/**
+ * IWX_TLC_MCS_PER_BW - mcs index per BW
+ * @IWX_TLC_MCS_PER_BW_80: mcs for bw - 20Hhz, 40Hhz, 80Hhz
+ * @IWX_TLC_MCS_PER_BW_160: mcs for bw - 160Mhz
+ * @IWX_TLC_MCS_PER_BW_320: mcs for bw - 320Mhz
+ * @IWX_TLC_MCS_PER_BW_NUM_V3: number of entries up to version 3
+ * @IWX_TLC_MCS_PER_BW_NUM_V4: number of entries from version 4
+ */
+#define IWX_TLC_MCS_PER_BW_80 0
+#define IWX_TLC_MCS_PER_BW_160 1
+#define IWX_TLC_MCS_PER_BW_320 2
+#define IWX_TLC_MCS_PER_BW_NUM_V3 (IWX_TLC_MCS_PER_BW_160 + 1)
+#define IWX_TLC_MCS_PER_BW_NUM_V4 (IWX_TLC_MCS_PER_BW_320 + 1)
+
+/**
+ * struct iwx_tlc_config_cmd_v3 - TLC configuration version 3
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_ch_width: max supported channel width from @enum iwx_tlc_mng_cfg_cw
+ * @mode: &enum iwx_tlc_mng_cfg_mode
+ * @chains: bitmask of IWX_TLC_MNG_CHAIN_*_MSK
+ * @amsdu: 1 = TX amsdu is supported, 0 = not supported
+ * @flags: bitmask of IWX_TLC_MNG_CFG_*
+ * @non_ht_rates: bitmap of supported legacy rates
+ * @ht_rates: MCS index 0 - 11, per <nss, channel-width>
+ * pair (0 - 80mhz width and below, 1 - 160mhz).
+ * @max_mpdu_len: max MPDU length, in bytes
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * use (1 << IWX_TLC_MNG_CFG_CW_*)
+ * @reserved2: reserved
+ * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
+ * set zero for no limit.
+ */
+struct iwx_tlc_config_cmd_v3 {
+ uint8_t sta_id;
+ uint8_t reserved1[3];
+ uint8_t max_ch_width;
+ uint8_t mode;
+ uint8_t chains;
+ uint8_t amsdu;
+ uint16_t flags;
+ uint16_t non_ht_rates;
+ uint16_t ht_rates[IWX_TLC_NSS_MAX][IWX_TLC_MCS_PER_BW_NUM_V3];
+ uint16_t max_mpdu_len;
+ uint8_t sgi_ch_width_supp;
+ uint8_t reserved2;
+ uint32_t max_tx_op;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_3 */
+
+/**
+ * struct iwx_tlc_config_cmd_v4 - TLC configuration
+ * @sta_id: station id
+ * @reserved1: reserved
+ * @max_ch_width: max supported channel width from @enum iwx_tlc_mng_cfg_cw
+ * @mode: &enum iwx_tlc_mng_cfg_mode
+ * @chains: bitmask of IWX_TLC_MNG_CHAIN_*_MSK
+ * @sgi_ch_width_supp: bitmap of SGI support per channel width
+ * use (1 << IWX_TLC_MNG_CFG_CW_*)
+ * @flags: bitmask of IWX_TLC_MNG_CFG_*
+ * @non_ht_rates: bitmap of supported legacy rates
+ * @ht_rates: MCS index 0 - 11, per <nss, channel-width>
+ * pair (0 - 80mhz width and below, 1 - 160mhz, 2 - 320mhz).
+ * @max_mpdu_len: max MPDU length, in bytes
+ * @max_tx_op: max TXOP in uSecs for all AC (BK, BE, VO, VI),
+ * set zero for no limit.
+ */
+struct iwx_tlc_config_cmd_v4 {
+ uint8_t sta_id;
+ uint8_t reserved1[3];
+ uint8_t max_ch_width;
+ uint8_t mode;
+ uint8_t chains;
+ uint8_t sgi_ch_width_supp;
+ uint16_t flags;
+ uint16_t non_ht_rates;
+ uint16_t ht_rates[IWX_TLC_NSS_MAX][IWX_TLC_MCS_PER_BW_NUM_V4];
+ uint16_t max_mpdu_len;
+ uint16_t max_tx_op;
+} __packed; /* TLC_MNG_CONFIG_CMD_API_S_VER_4 */
+
+/**
+ * @IWX_TLC_NOTIF_FLAG_RATE: last initial rate update
+ * @IWX_TLC_NOTIF_FLAG_AMSDU: umsdu parameters update
+ */
+#define IWX_TLC_NOTIF_FLAG_RATE (1 << 0)
+#define IWX_TLC_NOTIF_FLAG_AMSDU (1 << 1)
+
+/**
+ * struct iwx_tlc_update_notif - TLC notification from FW
+ * @sta_id: station id
+ * @reserved: reserved
+ * @flags: bitmap of notifications reported
+ * @rate: current initial rate; using rate_n_flags version 1 if notification
+ * version is < 3 at run-time, else rate_n_flags version 2
+ * @amsdu_size: Max AMSDU size, in bytes
+ * @amsdu_enabled: bitmap for per-TID AMSDU enablement
+ */
+struct iwx_tlc_update_notif {
+ uint8_t sta_id;
+ uint8_t reserved[3];
+ uint32_t flags;
+ uint32_t rate;
+ uint32_t amsdu_size;
+ uint32_t amsdu_enabled;
+} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
+
+/* Antenna flags. */
+#define IWX_ANT_A (1 << 0)
+#define IWX_ANT_B (1 << 1)
+#define IWX_ANT_C (1 << 2)
+/* Shortcuts. */
+#define IWX_ANT_AB (IWX_ANT_A | IWX_ANT_B)
+#define IWX_ANT_BC (IWX_ANT_B | IWX_ANT_C)
+#define IWX_ANT_ABC (IWX_ANT_A | IWX_ANT_B | IWX_ANT_C)
+
+/*
+ * TX command security control
+ */
+#define IWX_TX_CMD_SEC_WEP 0x01
+#define IWX_TX_CMD_SEC_CCM 0x02
+#define IWX_TX_CMD_SEC_TKIP 0x03
+#define IWX_TX_CMD_SEC_EXT 0x04
+#define IWX_TX_CMD_SEC_MSK 0x07
+#define IWX_TX_CMD_SEC_WEP_KEY_IDX_POS 6
+#define IWX_TX_CMD_SEC_WEP_KEY_IDX_MSK 0xc0
+#define IWX_TX_CMD_SEC_KEY128 0x08
+
+/* TODO: how does these values are OK with only 16 bit variable??? */
+/*
+ * TX command next frame info
+ *
+ * bits 0:2 - security control (IWX_TX_CMD_SEC_*)
+ * bit 3 - immediate ACK required
+ * bit 4 - rate is taken from STA table
+ * bit 5 - frame belongs to BA stream
+ * bit 6 - immediate BA response expected
+ * bit 7 - unused
+ * bits 8:15 - Station ID
+ * bits 16:31 - rate
+ */
+#define IWX_TX_CMD_NEXT_FRAME_ACK_MSK (0x8)
+#define IWX_TX_CMD_NEXT_FRAME_STA_RATE_MSK (0x10)
+#define IWX_TX_CMD_NEXT_FRAME_BA_MSK (0x20)
+#define IWX_TX_CMD_NEXT_FRAME_IMM_BA_RSP_MSK (0x40)
+#define IWX_TX_CMD_NEXT_FRAME_FLAGS_MSK (0xf8)
+#define IWX_TX_CMD_NEXT_FRAME_STA_ID_MSK (0xff00)
+#define IWX_TX_CMD_NEXT_FRAME_STA_ID_POS (8)
+#define IWX_TX_CMD_NEXT_FRAME_RATE_MSK (0xffff0000)
+#define IWX_TX_CMD_NEXT_FRAME_RATE_POS (16)
+
+/*
+ * TX command Frame life time in us - to be written in pm_frame_timeout
+ */
+#define IWX_TX_CMD_LIFE_TIME_INFINITE 0xFFFFFFFF
+#define IWX_TX_CMD_LIFE_TIME_DEFAULT 2000000 /* 2000 ms*/
+#define IWX_TX_CMD_LIFE_TIME_PROBE_RESP 40000 /* 40 ms */
+#define IWX_TX_CMD_LIFE_TIME_EXPIRED_FRAME 0
+
+/*
+ * TID for non QoS frames - to be written in tid_tspec
+ */
+#define IWX_TID_NON_QOS 0
+
+/*
+ * Limits on the retransmissions - to be written in {data,rts}_retry_limit
+ */
+#define IWX_DEFAULT_TX_RETRY 15
+#define IWX_MGMT_DFAULT_RETRY_LIMIT 3
+#define IWX_RTS_DFAULT_RETRY_LIMIT 3
+#define IWX_BAR_DFAULT_RETRY_LIMIT 60
+#define IWX_LOW_RETRY_LIMIT 7
+
+/*
+ * The FH will write back to the first TB only, so we need to copy some data
+ * into the buffer regardless of whether it should be mapped or not.
+ * This indicates how big the first TB must be to include the scratch buffer
+ * and the assigned PN.
+ * Since PN location is 8 bytes at offset 12, it's 20 now.
+ * If we make it bigger then allocations will be bigger and copy slower, so
+ * that's probably not useful.
+ */
+#define IWX_FIRST_TB_SIZE 20
+#define IWX_FIRST_TB_SIZE_ALIGN ((IWX_FIRST_TB_SIZE + (64 - 1)) & ~(64 - 1))
+
+/**
+ * %iwx_tx_cmd offload_assist values
+ * @TX_CMD_OFFLD_IP_HDR: offset to start of IP header (in words)
+ * from mac header end. For normal case it is 4 words for SNAP.
+ * note: tx_cmd, mac header and pad are not counted in the offset.
+ * This is used to help the offload in case there is tunneling such as
+ * IPv6 in IPv4, in such case the ip header offset should point to the
+ * inner ip header and IPv4 checksum of the external header should be
+ * calculated by driver.
+ * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum
+ * @TX_CMD_OFFLD_L3_EN: enable IP header checksum
+ * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV
+ * field. Doesn't include the pad.
+ * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for
+ * alignment
+ * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU
+ */
+#define IWX_TX_CMD_OFFLD_IP_HDR(x) ((x) << 0)
+#define IWX_TX_CMD_OFFLD_L4_EN (1 << 6)
+#define IWX_TX_CMD_OFFLD_L3_EN (1 << 7)
+#define IWX_TX_CMD_OFFLD_MH_SIZE(x) ((x) << 8)
+#define IWX_TX_CMD_OFFLD_PAD (1 << 13)
+#define IWX_TX_CMD_OFFLD_AMSDU (1 << 14)
+#define IWX_TX_CMD_OFFLD_MH_MASK 0x1f
+#define IWX_TX_CMD_OFFLD_IP_HDR_MASK 0x3f
+
+struct iwx_dram_sec_info {
+ uint32_t pn_low;
+ uint16_t pn_high;
+ uint16_t aux_info;
+} __packed; /* DRAM_SEC_INFO_API_S_VER_1 */
+
+/**
+ * bitmasks for tx_flags in TX command for 22000
+ * @IWX_TX_FLAGS_CMD_RATE: use rate from the TX command
+ * @IWX_TX_FLAGS_ENCRYPT_DIS: frame should not be encrypted, even if it belongs
+ * to a secured STA
+ * @IWX_TX_FLAGS_HIGH_PRI: high priority frame (like EAPOL) - can affect rate
+ * selection, retry limits and BT kill
+ */
+/* Valid for TX_FLAGS_BITS_API_S_VER_3: */
+#define IWX_TX_FLAGS_CMD_RATE (1 << 0)
+#define IWX_TX_FLAGS_ENCRYPT_DIS (1 << 1)
+#define IWX_TX_FLAGS_HIGH_PRI (1 << 2)
+/* Valid for TX_FLAGS_BITS_API_S_VER_4 and above: */
+#define IWX_TX_FLAGS_RTS (1 << 3)
+#define IWX_TX_FLAGS_CTS (1 << 4)
+/* TX_FLAGS_BITS_API_S_VER_4 */
+
+/**
+ * struct iwx_tx_cmd_gen2 - TX command struct to FW for 22000 devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
+ * @flags: combination of TX_FLAGS_*
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @hdr: 802.11 header
+ */
+struct iwx_tx_cmd_gen2 {
+ uint16_t len;
+ uint16_t offload_assist;
+ uint32_t flags;
+ struct iwx_dram_sec_info dram_info;
+ uint32_t rate_n_flags;
+ struct ieee80211_frame hdr[0];
+} __packed; /* TX_CMD_API_S_VER_7,
+ TX_CMD_API_S_VER_9 */
+
+/**
+ * struct iwx_tx_cmd_gen3 - TX command struct to FW for AX210+ devices
+ * ( TX_CMD = 0x1c )
+ * @len: in bytes of the payload, see below for details
+ * @flags: combination of TX_FLAGS_*
+ * @offload_assist: TX offload configuration
+ * @dram_info: FW internal DRAM storage
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @reserved: reserved
+ * @hdr: 802.11 header
+ */
+struct iwx_tx_cmd_gen3 {
+ uint16_t len;
+ uint16_t flags;
+ uint32_t offload_assist;
+ struct iwx_dram_sec_info dram_info;
+ uint32_t rate_n_flags;
+ uint8_t reserved[8];
+ struct ieee80211_frame hdr[];
+} __packed; /* TX_CMD_API_S_VER_8,
+ TX_CMD_API_S_VER_10 */
+
+/*
+ * TX response related data
+ */
+
+/*
+ * status that is returned by the fw after attempts to Tx
+ * @IWX_TX_STATUS_FAIL_STA_COLOR_MISMATCH: mismatch between color of Tx cmd and
+ * STA table
+ * Valid only if frame_count =1
+ */
+#define IWX_TX_STATUS_MSK 0x000000ff
+#define IWX_TX_STATUS_SUCCESS 0x01
+#define IWX_TX_STATUS_DIRECT_DONE 0x02
+/* postpone TX */
+#define IWX_TX_STATUS_POSTPONE_DELAY 0x40
+#define IWX_TX_STATUS_POSTPONE_FEW_BYTES 0x41
+#define IWX_TX_STATUS_POSTPONE_BT_PRIO 0x42
+#define IWX_TX_STATUS_POSTPONE_QUIET_PERIOD 0x43
+#define IWX_TX_STATUS_POSTPONE_CALC_TTAK 0x44
+/* abort TX */
+#define IWX_TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY 0x81
+#define IWX_TX_STATUS_FAIL_SHORT_LIMIT 0x82
+#define IWX_TX_STATUS_FAIL_LONG_LIMIT 0x83
+#define IWX_TX_STATUS_FAIL_UNDERRUN 0x84
+#define IWX_TX_STATUS_FAIL_DRAIN_FLOW 0x85
+#define IWX_TX_STATUS_FAIL_RFKILL_FLUSH 0x86
+#define IWX_TX_STATUS_FAIL_LIFE_EXPIRE 0x87
+#define IWX_TX_STATUS_FAIL_DEST_PS 0x88
+#define IWX_TX_STATUS_FAIL_HOST_ABORTED 0x89
+#define IWX_TX_STATUS_FAIL_BT_RETRY 0x8a
+#define IWX_TX_STATUS_FAIL_STA_INVALID 0x8b
+#define IWX_TX_STATUS_FAIL_FRAG_DROPPED 0x8c
+#define IWX_TX_STATUS_FAIL_TID_DISABLE 0x8d
+#define IWX_TX_STATUS_FAIL_FIFO_FLUSHED 0x8e
+#define IWX_TX_STATUS_FAIL_SMALL_CF_POLL 0x8f
+#define IWX_TX_STATUS_FAIL_FW_DROP 0x90
+#define IWX_TX_STATUS_FAIL_STA_COLOR_MISMATCH 0x91
+#define IWX_TX_STATUS_INTERNAL_ABORT 0x92
+#define IWX_TX_MODE_MSK 0x00000f00
+#define IWX_TX_MODE_NO_BURST 0x00000000
+#define IWX_TX_MODE_IN_BURST_SEQ 0x00000100
+#define IWX_TX_MODE_FIRST_IN_BURST 0x00000200
+#define IWX_TX_QUEUE_NUM_MSK 0x0001f000
+#define IWX_TX_NARROW_BW_MSK 0x00060000
+#define IWX_TX_NARROW_BW_1DIV2 0x00020000
+#define IWX_TX_NARROW_BW_1DIV4 0x00040000
+#define IWX_TX_NARROW_BW_1DIV8 0x00060000
+
+/*
+ * TX aggregation status
+ * @IWX_AGG_TX_STATE_TRY_CNT_MSK: Retry count for 1st frame in aggregation (retries
+ * occur if tx failed for this frame when it was a member of a previous
+ * aggregation block). If rate scaling is used, retry count indicates the
+ * rate table entry used for all frames in the new agg.
+ * @IWX_AGG_TX_STATE_SEQ_NUM_MSK: Command ID and sequence number of Tx command for
+ * this frame
+ */
+#define IWX_AGG_TX_STATE_STATUS_MSK 0x0fff
+#define IWX_AGG_TX_STATE_TRANSMITTED 0x0000
+#define IWX_AGG_TX_STATE_UNDERRUN 0x0001
+#define IWX_AGG_TX_STATE_BT_PRIO 0x0002
+#define IWX_AGG_TX_STATE_FEW_BYTES 0x0004
+#define IWX_AGG_TX_STATE_ABORT 0x0008
+#define IWX_AGG_TX_STATE_LAST_SENT_TTL 0x0010
+#define IWX_AGG_TX_STATE_LAST_SENT_TRY_CNT 0x0020
+#define IWX_AGG_TX_STATE_LAST_SENT_BT_KILL 0x0040
+#define IWX_AGG_TX_STATE_SCD_QUERY 0x0080
+#define IWX_AGG_TX_STATE_TEST_BAD_CRC32 0x0100
+#define IWX_AGG_TX_STATE_RESPONSE 0x01ff
+#define IWX_AGG_TX_STATE_DUMP_TX 0x0200
+#define IWX_AGG_TX_STATE_DELAY_TX 0x0400
+#define IWX_AGG_TX_STATE_TRY_CNT_POS 12
+#define IWX_AGG_TX_STATE_TRY_CNT_MSK (0xf << IWX_AGG_TX_STATE_TRY_CNT_POS)
+
+#define IWX_AGG_TX_STATE_LAST_SENT_MSK (IWX_AGG_TX_STATE_LAST_SENT_TTL| \
+ IWX_AGG_TX_STATE_LAST_SENT_TRY_CNT| \
+ IWX_AGG_TX_STATE_LAST_SENT_BT_KILL)
+
+/*
+ * The mask below describes a status where we are absolutely sure that the MPDU
+ * wasn't sent. For BA/Underrun we cannot be that sure. All we know that we've
+ * written the bytes to the TXE, but we know nothing about what the DSP did.
+ */
+#define IWX_AGG_TX_STAT_FRAME_NOT_SENT (IWX_AGG_TX_STATE_FEW_BYTES | \
+ IWX_AGG_TX_STATE_ABORT | \
+ IWX_AGG_TX_STATE_SCD_QUERY)
+
+/*
+ * IWX_REPLY_TX = 0x1c (response)
+ *
+ * This response may be in one of two slightly different formats, indicated
+ * by the frame_count field:
+ *
+ * 1) No aggregation (frame_count == 1). This reports Tx results for a single
+ * frame. Multiple attempts, at various bit rates, may have been made for
+ * this frame.
+ *
+ * 2) Aggregation (frame_count > 1). This reports Tx results for two or more
+ * frames that used block-acknowledge. All frames were transmitted at
+ * same rate. Rate scaling may have been used if first frame in this new
+ * agg block failed in previous agg block(s).
+ *
+ * Note that, for aggregation, ACK (block-ack) status is not delivered
+ * here; block-ack has not been received by the time the device records
+ * this status.
+ * This status relates to reasons the tx might have been blocked or aborted
+ * within the device, rather than whether it was received successfully by
+ * the destination station.
+ */
+
+/**
+ * struct iwx_agg_tx_status - per packet TX aggregation status
+ * @status: enum iwx_tx_agg_status
+ * @sequence: Sequence # for this frame's Tx cmd (not SSN!)
+ */
+struct iwx_agg_tx_status {
+ uint16_t status;
+ uint16_t sequence;
+} __packed;
+
+/*
+ * definitions for initial rate index field
+ * bits [3:0] initial rate index
+ * bits [6:4] rate table color, used for the initial rate
+ * bit-7 invalid rate indication
+ */
+#define IWX_TX_RES_INIT_RATE_INDEX_MSK 0x0f
+#define IWX_TX_RES_RATE_TABLE_COLOR_MSK 0x70
+#define IWX_TX_RES_INV_RATE_INDEX_MSK 0x80
+
+#define IWX_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f)
+#define IWX_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4)
+
+/**
+ * struct iwx_tx_resp_v3 - notifies that fw is TXing a packet
+ * ( IWX_REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ * Tx of all the batch. IWX_RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ * for agg: RTS + CTS + aggregation tx time + block-ack time.
+ * in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @status: for non-agg: frame status IWX_TX_STATUS_*
+ * for agg: status of 1st frame, IWX_AGG_TX_STATE_*; other frame status fields
+ * follow this one, up to frame_count.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwx_get_scd_ssn for more details.
+ */
+struct iwx_tx_resp_v3 {
+ uint8_t frame_count;
+ uint8_t bt_kill_count;
+ uint8_t failure_rts;
+ uint8_t failure_frame;
+ uint32_t initial_rate;
+ uint16_t wireless_media_time;
+
+ uint8_t pa_status;
+ uint8_t pa_integ_res_a[3];
+ uint8_t pa_integ_res_b[3];
+ uint8_t pa_integ_res_c[3];
+ uint16_t measurement_req_id;
+ uint16_t reserved;
+
+ uint32_t tfd_info;
+ uint16_t seq_ctl;
+ uint16_t byte_cnt;
+ uint8_t tlc_info;
+ uint8_t ra_tid;
+ uint16_t frame_ctrl;
+
+ struct iwx_agg_tx_status status;
+} __packed; /* IWX_TX_RSP_API_S_VER_3 */
+
+/**
+ * struct iwx_tx_resp - notifies that fw is TXing a packet
+ * ( REPLY_TX = 0x1c )
+ * @frame_count: 1 no aggregation, >1 aggregation
+ * @bt_kill_count: num of times blocked by bluetooth (unused for agg)
+ * @failure_rts: num of failures due to unsuccessful RTS
+ * @failure_frame: num failures due to no ACK (unused for agg)
+ * @initial_rate: for non-agg: rate of the successful Tx. For agg: rate of the
+ * Tx of all the batch. RATE_MCS_*
+ * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK.
+ * for agg: RTS + CTS + aggregation tx time + block-ack time.
+ * in usec.
+ * @pa_status: tx power info
+ * @pa_integ_res_a: tx power info
+ * @pa_integ_res_b: tx power info
+ * @pa_integ_res_c: tx power info
+ * @measurement_req_id: tx power info
+ * @reduced_tpc: transmit power reduction used
+ * @reserved: reserved
+ * @tfd_info: TFD information set by the FH
+ * @seq_ctl: sequence control from the Tx cmd
+ * @byte_cnt: byte count from the Tx cmd
+ * @tlc_info: TLC rate info
+ * @ra_tid: bits [3:0] = ra, bits [7:4] = tid
+ * @frame_ctrl: frame control
+ * @tx_queue: TX queue for this response
+ * @reserved2: reserved for padding/alignment
+ * @status: for non-agg: frame status TX_STATUS_*
+ * For version 6 TX response isn't received for aggregation at all.
+ *
+ * After the array of statuses comes the SSN of the SCD. Look at
+ * %iwl_mvm_get_scd_ssn for more details.
+ */
+struct iwx_tx_resp {
+ uint8_t frame_count;
+ uint8_t bt_kill_count;
+ uint8_t failure_rts;
+ uint8_t failure_frame;
+ uint32_t initial_rate;
+ uint16_t wireless_media_time;
+
+ uint8_t pa_status;
+ uint8_t pa_integ_res_a[3];
+ uint8_t pa_integ_res_b[3];
+ uint8_t pa_integ_res_c[3];
+ uint16_t measurement_req_id;
+ uint8_t reduced_tpc;
+ uint8_t reserved;
+
+ uint32_t tfd_info;
+ uint16_t seq_ctl;
+ uint16_t byte_cnt;
+ uint8_t tlc_info;
+ uint8_t ra_tid;
+ uint16_t frame_ctrl;
+ uint16_t tx_queue;
+ uint16_t reserved2;
+ struct iwx_agg_tx_status status;
+} __packed; /* TX_RSP_API_S_VER_6 */
+
+/**
+ * struct iwx_compressed_ba_tfd - progress of a TFD queue
+ * @q_num: TFD queue number
+ * @tfd_index: Index of first un-acked frame in the TFD queue
+ * @scd_queue: For debug only - the physical queue the TFD queue is bound to
+ * @tid: TID of the queue (0-7)
+ * @reserved: reserved for alignment
+ */
+struct iwx_compressed_ba_tfd {
+ uint16_t q_num;
+ uint16_t tfd_index;
+ uint8_t scd_queue;
+ uint8_t tid;
+ uint8_t reserved[2];
+} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
+
+/**
+ * struct iwx_compressed_ba_ratid - progress of a RA TID queue
+ * @q_num: RA TID queue number
+ * @tid: TID of the queue
+ * @ssn: BA window current SSN
+ */
+struct iwx_compressed_ba_ratid {
+ uint8_t q_num;
+ uint8_t tid;
+ uint16_t ssn;
+} __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */
+
+/*
+ * enum iwx_ba_resp_flags - TX aggregation status
+ * @IWX_MVM_BA_RESP_TX_AGG: generated due to BA
+ * @IWX_MVM_BA_RESP_TX_BAR: generated due to BA after BAR
+ * @IWX_MVM_BA_RESP_TX_AGG_FAIL: aggregation didn't receive BA
+ * @IWX_MVM_BA_RESP_TX_UNDERRUN: aggregation got underrun
+ * @IWX_MVM_BA_RESP_TX_BT_KILL: aggregation got BT-kill
+ * @IWX_MVM_BA_RESP_TX_DSP_TIMEOUT: aggregation didn't finish within the
+ * expected time
+ */
+enum iwx_ba_resp_flags {
+ IWX_MVM_BA_RESP_TX_AGG,
+ IWX_MVM_BA_RESP_TX_BAR,
+ IWX_MVM_BA_RESP_TX_AGG_FAIL,
+ IWX_MVM_BA_RESP_TX_UNDERRUN,
+ IWX_MVM_BA_RESP_TX_BT_KILL,
+ IWX_MVM_BA_RESP_TX_DSP_TIMEOUT
+};
+
+/**
+ * struct iwx_compressed_ba_notif - notifies about reception of BA
+ * ( BA_NOTIF = 0xc5 )
+ * @flags: status flag, see the &iwx_ba_resp_flags
+ * @sta_id: Index of recipient (BA-sending) station in fw's station table
+ * @reduced_txp: power reduced according to TPC. This is the actual value and
+ * not a copy from the LQ command. Thus, if not the first rate was used
+ * for Tx-ing then this value will be set to 0 by FW.
+ * @tlc_rate_info: TLC rate info, initial rate index, TLC table color
+ * @retry_cnt: retry count
+ * @query_byte_cnt: SCD query byte count
+ * @query_frame_cnt: SCD query frame count
+ * @txed: number of frames sent in the aggregation (all-TIDs)
+ * @done: number of frames that were Acked by the BA (all-TIDs)
+ * @reserved: reserved (for alignment)
+ * @wireless_time: Wireless-media time
+ * @tx_rate: the rate the aggregation was sent at
+ * @tfd_cnt: number of TFD-Q elements
+ * @ra_tid_cnt: number of RATID-Q elements
+ * @tfd: array of TFD queue status updates. See &iwx_compressed_ba_tfd
+ * for details. Length in @tfd_cnt.
+ * @ra_tid: array of RA-TID queue status updates. For debug purposes only. See
+ * &iwx_compressed_ba_ratid for more details. Length in @ra_tid_cnt.
+ */
+struct iwx_compressed_ba_notif {
+ uint32_t flags;
+ uint8_t sta_id;
+ uint8_t reduced_txp;
+ uint8_t tlc_rate_info;
+ uint8_t retry_cnt;
+ uint32_t query_byte_cnt;
+ uint16_t query_frame_cnt;
+ uint16_t txed;
+ uint16_t done;
+ uint16_t reserved;
+ uint32_t wireless_time;
+ uint32_t tx_rate;
+ uint16_t tfd_cnt;
+ uint16_t ra_tid_cnt;
+ struct iwx_compressed_ba_ratid ra_tid[0];
+ struct iwx_compressed_ba_tfd tfd[];
+} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
+
+
+struct iwx_beacon_notif {
+ struct iwx_tx_resp_v3 beacon_notify_hdr;
+ uint64_t tsf;
+ uint32_t ibss_mgr_status;
+} __packed;
+
+/**
+ * dump (flush) control flags
+ * @IWX_DUMP_TX_FIFO_FLUSH: Dump MSDUs until the FIFO is empty
+ * and the TFD queues are empty.
+ */
+#define IWX_DUMP_TX_FIFO_FLUSH (1 << 1)
+
+/**
+ * struct iwx_tx_path_flush_cmd -- queue/FIFO flush command
+ * @queues_ctl: bitmap of queues to flush
+ * @flush_ctl: control flags
+ * @reserved: reserved
+ */
+struct iwx_tx_path_flush_cmd_v1 {
+ uint32_t queues_ctl;
+ uint16_t flush_ctl;
+ uint16_t reserved;
+} __packed; /* IWX_TX_PATH_FLUSH_CMD_API_S_VER_1 */
+
+/**
+ * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * @sta_id: station ID to flush
+ * @tid_mask: TID mask to flush
+ * @reserved: reserved
+ */
+struct iwx_tx_path_flush_cmd {
+ uint32_t sta_id;
+ uint16_t tid_mask;
+ uint16_t reserved;
+} __packed; /* TX_PATH_FLUSH_CMD_API_S_VER_2 */
+
+#define IWX_TX_FLUSH_QUEUE_RSP 16
+
+/**
+ * struct iwx_flush_queue_info - virtual flush queue info
+ * @queue_num: virtual queue id
+ * @read_before_flush: read pointer before flush
+ * @read_after_flush: read pointer after flush
+ */
+struct iwx_flush_queue_info {
+ uint16_t tid;
+ uint16_t queue_num;
+ uint16_t read_before_flush;
+ uint16_t read_after_flush;
+} __packed; /* TFDQ_FLUSH_INFO_API_S_VER_1 */
+
+/**
+ * struct iwx_tx_path_flush_cmd_rsp -- queue/FIFO flush command response
+ * @num_flushed_queues: number of queues in queues array
+ * @queues: all flushed queues
+ */
+struct iwx_tx_path_flush_cmd_rsp {
+ uint16_t sta_id;
+ uint16_t num_flushed_queues;
+ struct iwx_flush_queue_info queues[IWX_TX_FLUSH_QUEUE_RSP];
+} __packed; /* TX_PATH_FLUSH_CMD_RSP_API_S_VER_1 */
+
+
+/**
+ * iwx_get_scd_ssn - returns the SSN of the SCD
+ * @tx_resp: the Tx response from the fw (agg or non-agg)
+ *
+ * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
+ * it can't know that everything will go well until the end of the AMPDU, it
+ * can't know in advance the number of MPDUs that will be sent in the current
+ * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
+ * Hence, it can't know in advance what the SSN of the SCD will be at the end
+ * of the batch. This is why the SSN of the SCD is written at the end of the
+ * whole struct at a variable offset. This function knows how to cope with the
+ * variable offset and returns the SSN of the SCD.
+ */
+static inline uint32_t iwx_get_scd_ssn(struct iwx_tx_resp *tx_resp)
+{
+ return le32_to_cpup((uint32_t *)&tx_resp->status +
+ tx_resp->frame_count) & 0xfff;
+}
+
+/**
+ * struct iwx_scd_txq_cfg_cmd - New txq hw scheduler config command
+ * @token:
+ * @sta_id: station id
+ * @tid:
+ * @scd_queue: scheduler queue to config
+ * @enable: 1 queue enable, 0 queue disable
+ * @aggregate: 1 aggregated queue, 0 otherwise
+ * @tx_fifo: %enum iwx_tx_fifo
+ * @window: BA window size
+ * @ssn: SSN for the BA agreement
+ */
+struct iwx_scd_txq_cfg_cmd {
+ uint8_t token;
+ uint8_t sta_id;
+ uint8_t tid;
+ uint8_t scd_queue;
+ uint8_t enable;
+ uint8_t aggregate;
+ uint8_t tx_fifo;
+ uint8_t window;
+ uint16_t ssn;
+ uint16_t reserved;
+} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwx_scd_txq_cfg_rsp
+ * @token: taken from the command
+ * @sta_id: station id from the command
+ * @tid: tid from the command
+ * @scd_queue: scd_queue from the command
+ */
+struct iwx_scd_txq_cfg_rsp {
+ uint8_t token;
+ uint8_t sta_id;
+ uint8_t tid;
+ uint8_t scd_queue;
+} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */
+
+
+/* Scan Commands, Responses, Notifications */
+
+/* Max number of IEs for direct SSID scans in a command */
+#define IWX_PROBE_OPTION_MAX 20
+
+/**
+ * struct iwx_ssid_ie - directed scan network information element
+ *
+ * Up to 20 of these may appear in IWX_REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwx_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
+ * SSID IEs get transmitted in reverse order of entry.
+ */
+struct iwx_ssid_ie {
+ uint8_t id;
+ uint8_t len;
+ uint8_t ssid[IEEE80211_NWID_LEN];
+} __packed; /* IWX_SCAN_DIRECT_SSID_IE_API_S_VER_1 */
+
+/* scan offload */
+#define IWX_SCAN_MAX_BLACKLIST_LEN 64
+#define IWX_SCAN_SHORT_BLACKLIST_LEN 16
+#define IWX_SCAN_MAX_PROFILES 11
+#define IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE 512
+
+/* Default watchdog (in MS) for scheduled scan iteration */
+#define IWX_SCHED_SCAN_WATCHDOG cpu_to_le16(15000)
+
+#define IWX_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define IWX_CAN_ABORT_STATUS 1
+
+#define IWX_FULL_SCAN_MULTIPLIER 5
+#define IWX_FAST_SCHED_SCAN_ITERATIONS 3
+#define IWX_MAX_SCHED_SCAN_PLANS 2
+
+/**
+ * iwx_scan_schedule_lmac - schedule of scan offload
+ * @delay: delay between iterations, in seconds.
+ * @iterations: num of scan iterations
+ * @full_scan_mul: number of partial scans before each full scan
+ */
+struct iwx_scan_schedule_lmac {
+ uint16_t delay;
+ uint8_t iterations;
+ uint8_t full_scan_mul;
+} __packed; /* SCAN_SCHEDULE_API_S */
+
+/**
+ * iwx_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
+ * @tx_flags: combination of TX_CMD_FLG_*
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @sta_id: index of destination station in FW station table
+ * @reserved: for alignment and future use
+ */
+struct iwx_scan_req_tx_cmd {
+ uint32_t tx_flags;
+ uint32_t rate_n_flags;
+ uint8_t sta_id;
+ uint8_t reserved[3];
+} __packed;
+
+#define IWX_UNIFIED_SCAN_CHANNEL_FULL (1 << 27)
+#define IWX_UNIFIED_SCAN_CHANNEL_PARTIAL (1 << 28)
+
+/**
+ * iwx_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2
+ * @flags: bits 1-20: directed scan to i'th ssid
+ * other bits &enum iwx_scan_channel_flags_lmac
+ * @channel_number: channel number 1-13 etc
+ * @iter_count: scan iteration on this channel
+ * @iter_interval: interval in seconds between iterations on one channel
+ */
+struct iwx_scan_channel_cfg_lmac {
+ uint32_t flags;
+ uint16_t channel_num;
+ uint16_t iter_count;
+ uint32_t iter_interval;
+} __packed;
+
+/*
+ * iwx_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
+ * @offset: offset in the data block
+ * @len: length of the segment
+ */
+struct iwx_scan_probe_segment {
+ uint16_t offset;
+ uint16_t len;
+} __packed;
+
+/* iwx_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2
+ * @mac_header: first (and common) part of the probe
+ * @band_data: band specific data
+ * @common_data: last (and common) part of the probe
+ * @buf: raw data block
+ */
+struct iwx_scan_probe_req_v1 {
+ struct iwx_scan_probe_segment mac_header;
+ struct iwx_scan_probe_segment band_data[2];
+ struct iwx_scan_probe_segment common_data;
+ uint8_t buf[IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE];
+} __packed;
+
+/* iwl_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_v2
+ * @mac_header: first (and common) part of the probe
+ * @band_data: band specific data
+ * @common_data: last (and common) part of the probe
+ * @buf: raw data block
+ */
+struct iwx_scan_probe_req {
+ struct iwx_scan_probe_segment mac_header;
+ struct iwx_scan_probe_segment band_data[3];
+ struct iwx_scan_probe_segment common_data;
+ uint8_t buf[IWX_SCAN_OFFLOAD_PROBE_REQ_SIZE];
+} __packed;
+
+
+#define IWX_SCAN_CHANNEL_FLAG_EBS (1 << 0)
+#define IWX_SCAN_CHANNEL_FLAG_EBS_ACCURATE (1 << 1)
+#define IWX_SCAN_CHANNEL_FLAG_CACHE_ADD (1 << 2)
+#define IWX_SCAN_CHANNEL_FLAG_EBS_FRAG (1 << 3)
+#define IWX_SCAN_CHANNEL_FLAG_FORCE_EBS (1 << 4)
+#define IWX_SCAN_CHANNEL_FLAG_ENABLE_CHAN_ORDER (1 << 5)
+#define IWX_SCAN_CHANNEL_FLAG_6G_PSC_NO_FILTER (1 << 6)
+
+/* iwx_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
+ * @flags: enum iwx_scan_channel_flags
+ * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
+ * involved.
+ * 1 - EBS is disabled.
+ * 2 - every second scan will be full scan(and so on).
+ */
+struct iwx_scan_channel_opt {
+ uint16_t flags;
+ uint16_t non_ebs_ratio;
+} __packed;
+
+#define IWX_SCAN_PRIORITY_LOW 0
+#define IWX_SCAN_PRIORITY_MEDIUM 1
+#define IWX_SCAN_PRIORITY_HIGH 2
+
+enum iwx_scan_priority_ext {
+ IWX_SCAN_PRIORITY_EXT_0_LOWEST,
+ IWX_SCAN_PRIORITY_EXT_1,
+ IWX_SCAN_PRIORITY_EXT_2,
+ IWX_SCAN_PRIORITY_EXT_3,
+ IWX_SCAN_PRIORITY_EXT_4,
+ IWX_SCAN_PRIORITY_EXT_5,
+ IWX_SCAN_PRIORITY_EXT_6,
+ IWX_SCAN_PRIORITY_EXT_7_HIGHEST,
+};
+
+/**
+ * iwx_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
+ * @last_schedule_line: last schedule line executed (fast or regular)
+ * @last_schedule_iteration: last scan iteration executed before scan abort
+ * @status: enum iwx_scan_offload_complete_status
+ * @ebs_status: EBS success status &enum iwx_scan_ebs_status
+ * @time_after_last_iter; time in seconds elapsed after last iteration
+ */
+struct iwx_periodic_scan_complete {
+ uint8_t last_schedule_line;
+ uint8_t last_schedule_iteration;
+ uint8_t status;
+ uint8_t ebs_status;
+ uint32_t time_after_last_iter;
+ uint32_t reserved;
+} __packed;
+
+/**
+ * struct iwx_scan_results_notif - scan results for one channel -
+ * SCAN_RESULT_NTF_API_S_VER_3
+ * @channel: which channel the results are from
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
+ * @probe_status: IWX_SCAN_PROBE_STATUS_*, indicates success of probe request
+ * @num_probe_not_sent: # of request that weren't sent due to not enough time
+ * @duration: duration spent in channel, in usecs
+ */
+struct iwx_scan_results_notif {
+ uint8_t channel;
+ uint8_t band;
+ uint8_t probe_status;
+ uint8_t num_probe_not_sent;
+ uint32_t duration;
+} __packed;
+
+#define IWX_SCAN_CLIENT_SCHED_SCAN (1 << 0)
+#define IWX_SCAN_CLIENT_NETDETECT (1 << 1)
+#define IWX_SCAN_CLIENT_ASSET_TRACKING (1 << 2)
+
+/**
+ * iwx_scan_offload_blacklist - IWX_SCAN_OFFLOAD_BLACKLIST_S
+ * @ssid: MAC address to filter out
+ * @reported_rssi: AP rssi reported to the host
+ * @client_bitmap: clients ignore this entry - enum scan_framework_client
+ */
+struct iwx_scan_offload_blacklist {
+ uint8_t ssid[ETHER_ADDR_LEN];
+ uint8_t reported_rssi;
+ uint8_t client_bitmap;
+} __packed;
+
+#define IWX_NETWORK_TYPE_BSS 1
+#define IWX_NETWORK_TYPE_IBSS 2
+#define IWX_NETWORK_TYPE_ANY 3
+
+#define IWX_SCAN_OFFLOAD_SELECT_2_4 0x4
+#define IWX_SCAN_OFFLOAD_SELECT_5_2 0x8
+#define IWX_SCAN_OFFLOAD_SELECT_ANY 0xc
+
+/**
+ * iwx_scan_offload_profile - IWX_SCAN_OFFLOAD_PROFILE_S
+ * @ssid_index: index to ssid list in fixed part
+ * @unicast_cipher: encryption algorithm to match - bitmap
+ * @aut_alg: authentication algorithm to match - bitmap
+ * @network_type: enum iwx_scan_offload_network_type
+ * @band_selection: enum iwx_scan_offload_band_selection
+ * @client_bitmap: clients waiting for match - enum scan_framework_client
+ */
+struct iwx_scan_offload_profile {
+ uint8_t ssid_index;
+ uint8_t unicast_cipher;
+ uint8_t auth_alg;
+ uint8_t network_type;
+ uint8_t band_selection;
+ uint8_t client_bitmap;
+ uint8_t reserved[2];
+} __packed;
+
+/**
+ * iwx_scan_offload_profile_cfg - IWX_SCAN_OFFLOAD_PROFILES_CFG_API_S_VER_1
+ * @blacklist: AP list to filter off from scan results
+ * @profiles: profiles to search for match
+ * @blacklist_len: length of blacklist
+ * @num_profiles: num of profiles in the list
+ * @match_notify: clients waiting for match found notification
+ * @pass_match: clients waiting for the results
+ * @active_clients: active clients bitmap - enum scan_framework_client
+ * @any_beacon_notify: clients waiting for match notification without match
+ */
+struct iwx_scan_offload_profile_cfg {
+ struct iwx_scan_offload_profile profiles[IWX_SCAN_MAX_PROFILES];
+ uint8_t blacklist_len;
+ uint8_t num_profiles;
+ uint8_t match_notify;
+ uint8_t pass_match;
+ uint8_t active_clients;
+ uint8_t any_beacon_notify;
+ uint8_t reserved[2];
+} __packed;
+
+#define IWX_SCAN_OFFLOAD_COMPLETED 1
+#define IWX_SCAN_OFFLOAD_ABORTED 2
+
+/* UMAC Scan API */
+
+#define IWX_SCAN_CONFIG_FLAG_ACTIVATE (1 << 0)
+#define IWX_SCAN_CONFIG_FLAG_DEACTIVATE (1 << 1)
+#define IWX_SCAN_CONFIG_FLAG_FORBID_CHUB_REQS (1 << 2)
+#define IWX_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS (1 << 3)
+#define IWX_SCAN_CONFIG_FLAG_SET_TX_CHAINS (1 << 8)
+#define IWX_SCAN_CONFIG_FLAG_SET_RX_CHAINS (1 << 9)
+#define IWX_SCAN_CONFIG_FLAG_SET_AUX_STA_ID (1 << 10)
+#define IWX_SCAN_CONFIG_FLAG_SET_ALL_TIMES (1 << 11)
+#define IWX_SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES (1 << 12)
+#define IWX_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS (1 << 13)
+#define IWX_SCAN_CONFIG_FLAG_SET_LEGACY_RATES (1 << 14)
+#define IWX_SCAN_CONFIG_FLAG_SET_MAC_ADDR (1 << 15)
+#define IWX_SCAN_CONFIG_FLAG_SET_FRAGMENTED (1 << 16)
+#define IWX_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED (1 << 17)
+#define IWX_SCAN_CONFIG_FLAG_SET_CAM_MODE (1 << 18)
+#define IWX_SCAN_CONFIG_FLAG_CLEAR_CAM_MODE (1 << 19)
+#define IWX_SCAN_CONFIG_FLAG_SET_PROMISC_MODE (1 << 20)
+#define IWX_SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE (1 << 21)
+
+/* Bits 26-31 are for num of channels in channel_array */
+#define IWX_SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
+
+/* OFDM basic rates */
+#define IWX_SCAN_CONFIG_RATE_6M (1 << 0)
+#define IWX_SCAN_CONFIG_RATE_9M (1 << 1)
+#define IWX_SCAN_CONFIG_RATE_12M (1 << 2)
+#define IWX_SCAN_CONFIG_RATE_18M (1 << 3)
+#define IWX_SCAN_CONFIG_RATE_24M (1 << 4)
+#define IWX_SCAN_CONFIG_RATE_36M (1 << 5)
+#define IWX_SCAN_CONFIG_RATE_48M (1 << 6)
+#define IWX_SCAN_CONFIG_RATE_54M (1 << 7)
+/* CCK basic rates */
+#define IWX_SCAN_CONFIG_RATE_1M (1 << 8)
+#define IWX_SCAN_CONFIG_RATE_2M (1 << 9)
+#define IWX_SCAN_CONFIG_RATE_5M (1 << 10)
+#define IWX_SCAN_CONFIG_RATE_11M (1 << 11)
+
+/* Bits 16-27 are for supported rates */
+#define IWX_SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16)
+
+#define IWX_CHANNEL_FLAG_EBS (1 << 0)
+#define IWX_CHANNEL_FLAG_ACCURATE_EBS (1 << 1)
+#define IWX_CHANNEL_FLAG_EBS_ADD (1 << 2)
+#define IWX_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE (1 << 3)
+
+/**
+ * struct iwx_scan_dwell
+ * @active: default dwell time for active scan
+ * @passive: default dwell time for passive scan
+ * @fragmented: default dwell time for fragmented scan
+ * @extended: default dwell time for channels 1, 6 and 11
+ */
+struct iwx_scan_dwell {
+ uint8_t active;
+ uint8_t passive;
+ uint8_t fragmented;
+ uint8_t extended;
+} __packed;
+
+
+#define IWX_SCAN_TWO_LMACS 2
+#define IWX_SCAN_LB_LMAC_IDX 0 /* low-band */
+#define IWX_SCAN_HB_LMAC_IDX 1 /* high-band */
+
+/**
+ * struct iwl_scan_config
+ * @enable_cam_mode: whether to enable CAM mode.
+ * @enable_promiscuous_mode: whether to enable promiscuous mode
+ * @bcast_sta_id: the index of the station in the fw. Deprecated starting with
+ * API version 5.
+ * @reserved: reserved
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ */
+struct iwx_scan_config {
+ uint8_t enable_cam_mode;
+ uint8_t enable_promiscuous_mode;
+ uint8_t bcast_sta_id;
+ uint8_t reserved;
+ uint32_t tx_chains;
+ uint32_t rx_chains;
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_5 */
+
+/**
+ * struct iwx_scan_config_v2
+ * @flags: enum scan_config_flags
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ * @legacy_rates: default legacy rates - enum scan_config_rates
+ * @out_of_channel_time: default max out of serving channel time
+ * @suspend_time: default max suspend time
+ * @dwell_active: default dwell time for active scan
+ * @dwell_passive: default dwell time for passive scan
+ * @dwell_fragmented: default dwell time for fragmented scan
+ * @dwell_extended: default dwell time for channels 1, 6 and 11
+ * @mac_addr: default mac address to be used in probes
+ * @bcast_sta_id: the index of the station in the fw
+ * @channel_flags: default channel flags - enum iwx_channel_flags
+ * scan_config_channel_flag
+ * @channel_array: default supported channels
+ */
+struct iwx_scan_config_v2 {
+ uint32_t flags;
+ uint32_t tx_chains;
+ uint32_t rx_chains;
+ uint32_t legacy_rates;
+ uint32_t out_of_channel_time[IWX_SCAN_TWO_LMACS];
+ uint32_t suspend_time[IWX_SCAN_TWO_LMACS];
+ struct iwx_scan_dwell dwell;
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ uint8_t bcast_sta_id;
+ uint8_t channel_flags;
+ uint8_t channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */
+
+/**
+ * iwx_umac_scan_flags
+ *@IWX_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
+ * can be preempted by other scan requests with higher priority.
+ * The low priority scan will be resumed when the higher priority scan is
+ * completed.
+ *@IWX_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
+ * when scan starts.
+ */
+#define IWX_UMAC_SCAN_FLAG_PREEMPTIVE (1 << 0)
+#define IWX_UMAC_SCAN_FLAG_START_NOTIF (1 << 1)
+
+#define IWX_UMAC_SCAN_UID_TYPE_OFFSET 0
+#define IWX_UMAC_SCAN_UID_SEQ_OFFSET 8
+
+#define IWX_UMAC_SCAN_GEN_FLAGS_PERIODIC (1 << 0)
+#define IWX_UMAC_SCAN_GEN_FLAGS_OVER_BT (1 << 1)
+#define IWX_UMAC_SCAN_GEN_FLAGS_PASS_ALL (1 << 2)
+#define IWX_UMAC_SCAN_GEN_FLAGS_PASSIVE (1 << 3)
+#define IWX_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT (1 << 4)
+#define IWX_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE (1 << 5)
+#define IWX_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID (1 << 6)
+#define IWX_UMAC_SCAN_GEN_FLAGS_FRAGMENTED (1 << 7)
+#define IWX_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED (1 << 8)
+#define IWX_UMAC_SCAN_GEN_FLAGS_MATCH (1 << 9)
+#define IWX_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL (1 << 10)
+/* Extended dwell is obsolete when adaptive dwell is used, making this
+ * bit reusable. Hence, probe request defer is used only when adaptive
+ * dwell is supported. */
+#define IWX_UMAC_SCAN_GEN_FLAGS_PROB_REQ_DEFER_SUPP (1 << 10)
+#define IWX_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED (1 << 11)
+#define IWX_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL (1 << 13)
+#define IWX_UMAC_SCAN_GEN_FLAGS_MAX_CHNL_TIME (1 << 14)
+#define IWX_UMAC_SCAN_GEN_FLAGS_PROB_REQ_HIGH_TX_RATE (1 << 15)
+
+/**
+ * UMAC scan general flags #2
+ * @IWX_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL: Whether to send a complete
+ * notification per channel or not.
+ * @IWX_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel
+ * reorder optimization or not.
+ */
+#define IWX_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL (1 << 0)
+#define IWX_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER (1 << 1)
+
+/**
+ * UMAC scan general flags version 2
+ *
+ * The FW flags were reordered and hence the driver introduce version 2
+ *
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC: periodic or scheduled
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL: pass all probe responses and beacons
+ * during scan iterations
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE: send complete notification
+ * on every iteration instead of only once after the last iteration
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1: fragmented scan LMAC1
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2: fragmented scan LMAC2
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_MATCH: does this scan check for profile matching
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS: use all valid chains for RX
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL: works with adaptive dwell
+ * for active channel
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE: can be preempted by other requests
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_NTF_START: send notification of scan start
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID: matching on multiple SSIDs
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE: all the channels scanned
+ * as passive
+ * @IWX_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN: at the end of 2.4GHz and
+ * 5.2Ghz bands scan, trigger scan on 6GHz band to discover
+ * the reported collocated APs
+ */
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_PERIODIC (1 << 0)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_PASS_ALL (1 << 1)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_NTFY_ITER_COMPLETE (1 << 2)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1 (1 << 3)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2 (1 << 4)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_MATCH (1 << 5)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS (1 << 6)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_ADAPTIVE_DWELL (1 << 7)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_PREEMPTIVE (1 << 8)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_NTF_START (1 << 9)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_MULTI_SSID (1 << 10)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE (1 << 11)
+#define IWX_UMAC_SCAN_GEN_FLAGS_V2_TRIGGER_UHB_SCAN (1 << 12)
+
+/**
+ * struct iwx_scan_channel_cfg_umac
+ * @flags: bitmap - 0-19: directed scan to i'th ssid.
+ * @channel_num: channel number 1-13 etc.
+ * @iter_count: repetition count for the channel.
+ * @iter_interval: interval between two scan iterations on one channel.
+ */
+struct iwx_scan_channel_cfg_umac {
+ uint32_t flags;
+ union {
+ struct {
+ uint8_t channel_num;
+ uint8_t iter_count;
+ uint16_t iter_interval;
+ } v1; /* SCAN_CHANNEL_CFG_S_VER1 */
+ struct {
+ uint8_t channel_num;
+ uint8_t band;
+ uint8_t iter_count;
+ uint8_t iter_interval;
+ } v2; /* SCAN_CHANNEL_CFG_S_VER{2,3,4} */
+ };
+} __packed;
+
+/**
+ * struct iwx_scan_umac_schedule
+ * @interval: interval in seconds between scan iterations
+ * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop
+ * @reserved: for alignment and future use
+ */
+struct iwx_scan_umac_schedule {
+ uint16_t interval;
+ uint8_t iter_count;
+ uint8_t reserved;
+} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */
+
+/**
+ * struct iwx_scan_req_umac_tail - the rest of the UMAC scan request command
+ * parameters following channels configuration array.
+ * @schedule: two scheduling plans.
+ * @delay: delay in TUs before starting the first scan iteration
+ * @reserved: for future use and alignment
+ * @preq: probe request with IEs blocks
+ * @direct_scan: list of SSIDs for directed active scan
+ */
+struct iwx_scan_req_umac_tail_v1 {
+ /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+ struct iwx_scan_umac_schedule schedule[IWX_MAX_SCHED_SCAN_PLANS];
+ uint16_t delay;
+ uint16_t reserved;
+ /* SCAN_PROBE_PARAMS_API_S_VER_1 */
+ struct iwx_scan_probe_req_v1 preq;
+ struct iwx_ssid_ie direct_scan[IWX_PROBE_OPTION_MAX];
+} __packed;
+
+/**
+ * struct iwx_scan_req_umac_tail - the rest of the UMAC scan request command
+ * parameters following channels configuration array.
+ * @schedule: two scheduling plans.
+ * @delay: delay in TUs before starting the first scan iteration
+ * @reserved: for future use and alignment
+ * @preq: probe request with IEs blocks
+ * @direct_scan: list of SSIDs for directed active scan
+ */
+struct iwx_scan_req_umac_tail_v2 {
+ /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+ struct iwx_scan_umac_schedule schedule[IWX_MAX_SCHED_SCAN_PLANS];
+ uint16_t delay;
+ uint16_t reserved;
+ /* SCAN_PROBE_PARAMS_API_S_VER_2 */
+ struct iwx_scan_probe_req preq;
+ struct iwx_ssid_ie direct_scan[IWX_PROBE_OPTION_MAX];
+} __packed;
+
+/**
+ * struct iwx_scan_umac_chan_param
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @reserved: for future use and alignment
+ */
+struct iwx_scan_umac_chan_param {
+ uint8_t flags;
+ uint8_t count;
+ uint16_t reserved;
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+
+#define IWX_SCAN_LB_LMAC_IDX 0
+#define IWX_SCAN_HB_LMAC_IDX 1
+
+/**
+ * struct iwx_scan_req_umac
+ * @flags: &enum iwl_umac_scan_flags
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwl_scan_priority
+ * @general_flags: &enum iwl_umac_scan_general_flags
+ * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
+ * @extended_dwell: dwell time for channels 1, 6 and 11
+ * @active_dwell: dwell time for active scan per LMAC
+ * @passive_dwell: dwell time for passive scan per LMAC
+ * @fragmented_dwell: dwell time for fragmented passive scan
+ * @adwell_default_n_aps: for adaptive dwell the default number of APs
+ * per channel
+ * @adwell_default_n_aps_social: for adaptive dwell the default
+ * number of APs per social (1,6,11) channel
+ * @general_flags2: &enum iwl_umac_scan_general_flags2
+ * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added
+ * to total scan time
+ * @max_out_time: max out of serving channel time, per LMAC - for CDB there
+ * are 2 LMACs (high band and low band)
+ * @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
+ * @scan_priority: scan internal prioritization &enum iwl_scan_priority
+ * @num_of_fragments: Number of fragments needed for full coverage per band.
+ * Relevant only for fragmented scan.
+ * @channel: &struct iwx_scan_umac_chan_param
+ * @reserved: for future use and alignment
+ * @reserved3: for future use and alignment
+ * @data: &struct iwx_scan_channel_cfg_umac and
+ * &struct iwx_scan_req_umac_tail
+ */
+struct iwx_scan_req_umac {
+ uint32_t flags;
+ uint32_t uid;
+ uint32_t ooc_priority;
+ /* SCAN_GENERAL_PARAMS_API_S_VER_1 */
+ uint16_t general_flags;
+ uint8_t reserved;
+ uint8_t scan_start_mac_id;
+ union {
+ struct {
+ uint8_t extended_dwell;
+ uint8_t active_dwell;
+ uint8_t passive_dwell;
+ uint8_t fragmented_dwell;
+ uint32_t max_out_time;
+ uint32_t suspend_time;
+ uint32_t scan_priority;
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+ struct {
+ uint8_t extended_dwell;
+ uint8_t active_dwell;
+ uint8_t passive_dwell;
+ uint8_t fragmented_dwell;
+ uint32_t max_out_time[2];
+ uint32_t suspend_time[2];
+ uint32_t scan_priority;
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
+ struct {
+ uint8_t active_dwell;
+ uint8_t passive_dwell;
+ uint8_t fragmented_dwell;
+ uint8_t adwell_default_n_aps;
+ uint8_t adwell_default_n_aps_social;
+ uint8_t reserved3;
+ uint16_t adwell_max_budget;
+ uint32_t max_out_time[2];
+ uint32_t suspend_time[2];
+ uint32_t scan_priority;
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
+ struct {
+ uint8_t active_dwell[2];
+ uint8_t reserved2;
+ uint8_t adwell_default_n_aps;
+ uint8_t adwell_default_n_aps_social;
+ uint8_t general_flags2;
+ uint16_t adwell_max_budget;
+ uint32_t max_out_time[2];
+ uint32_t suspend_time[2];
+ uint32_t scan_priority;
+ uint8_t passive_dwell[2];
+ uint8_t num_of_fragments[2];
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v8; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_8 */
+ struct {
+ uint8_t active_dwell[2];
+ uint8_t adwell_default_hb_n_aps;
+ uint8_t adwell_default_lb_n_aps;
+ uint8_t adwell_default_n_aps_social;
+ uint8_t general_flags2;
+ uint16_t adwell_max_budget;
+ uint32_t max_out_time[2];
+ uint32_t suspend_time[2];
+ uint32_t scan_priority;
+ uint8_t passive_dwell[2];
+ uint8_t num_of_fragments[2];
+ struct iwx_scan_umac_chan_param channel;
+ uint8_t data[];
+ } v9; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_9 */
+ };
+} __packed;
+
+#define IWX_SCAN_REQ_UMAC_SIZE_V8 sizeof(struct iwx_scan_req_umac)
+#define IWX_SCAN_REQ_UMAC_SIZE_V7 48
+#define IWX_SCAN_REQ_UMAC_SIZE_V6 44
+#define IWX_SCAN_REQ_UMAC_SIZE_V1 36
+
+/**
+ * struct iwx_scan_general_params_v10
+ * @flags: &enum iwx_umac_scan_flags
+ * @reserved: reserved for future
+ * @scan_start_mac_id: report the scan start TSF time according to this mac TSF
+ * @active_dwell: dwell time for active scan per LMAC
+ * @adwell_default_2g: adaptive dwell default number of APs
+ * for 2.4GHz channel
+ * @adwell_default_5g: adaptive dwell default number of APs
+ * for 5GHz channels
+ * @adwell_default_social_chn: adaptive dwell default number of
+ * APs per social channel
+ * @reserved1: reserved for future
+ * @adwell_max_budget: the maximal number of TUs that adaptive dwell
+ * can add to the total scan time
+ * @max_out_of_time: max out of serving channel time, per LMAC
+ * @suspend_time: max suspend time, per LMAC
+ * @scan_priority: priority of the request
+ * @passive_dwell: continues dwell time for passive channel
+ * (without adaptive dwell)
+ * @num_of_fragments: number of fragments needed for full fragmented
+ * scan coverage.
+ */
+struct iwx_scan_general_params_v10 {
+ uint16_t flags;
+ uint8_t reserved;
+ uint8_t scan_start_mac_id;
+ uint8_t active_dwell[IWX_SCAN_TWO_LMACS];
+ uint8_t adwell_default_2g;
+ uint8_t adwell_default_5g;
+ uint8_t adwell_default_social_chn;
+ uint8_t reserved1;
+ uint16_t adwell_max_budget;
+ uint32_t max_out_of_time[IWX_SCAN_TWO_LMACS];
+ uint32_t suspend_time[IWX_SCAN_TWO_LMACS];
+ uint32_t scan_priority;
+ uint8_t passive_dwell[IWX_SCAN_TWO_LMACS];
+ uint8_t num_of_fragments[IWX_SCAN_TWO_LMACS];
+} __packed; /* SCAN_GENERAL_PARAMS_API_S_VER_10 */
+
+/**
+ * struct iwx_scan_channel_params_v6
+ * @flags: channel flags &enum iwl_scan_channel_flags
+ * @count: num of channels in scan request
+ * @n_aps_override: override the number of APs the FW uses to calculate dwell
+ * time when adaptive dwell is used.
+ * Channel k will use n_aps_override[i] when BIT(20 + i) is set in
+ * channel_config[k].flags
+ * @channel_config: array of explicit channel configurations
+ * for 2.4Ghz and 5.2Ghz bands
+ */
+struct iwx_scan_channel_params_v6 {
+ uint8_t flags;
+ uint8_t count;
+ uint8_t n_aps_override[2];
+ struct iwx_scan_channel_cfg_umac channel_config[67];
+} __packed; /* SCAN_CHANNEL_PARAMS_API_S_VER_6 */
+
+/**
+ * struct iwx_scan_periodic_parms_v1
+ * @schedule: can scheduling parameter
+ * @delay: initial delay of the periodic scan in seconds
+ * @reserved: reserved for future
+ */
+struct iwx_scan_periodic_parms_v1 {
+ struct iwx_scan_umac_schedule schedule[IWX_MAX_SCHED_SCAN_PLANS];
+ uint16_t delay;
+ uint16_t reserved;
+} __packed; /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+
+#define IWX_SCAN_SHORT_SSID_MAX_SIZE 8
+#define IWX_SCAN_BSSID_MAX_SIZE 16
+
+/**
+ * struct iwx_scan_probe_params_v4
+ * @preq: scan probe request params
+ * @short_ssid_num: number of valid short SSIDs in short ssid array
+ * @bssid_num: number of valid bssid in bssids array
+ * @reserved: reserved
+ * @direct_scan: list of ssids
+ * @short_ssid: array of short ssids
+ * @bssid_array: array of bssids
+ */
+struct iwx_scan_probe_params_v4 {
+ struct iwx_scan_probe_req preq;
+ uint8_t short_ssid_num;
+ uint8_t bssid_num;
+ uint16_t reserved;
+ struct iwx_ssid_ie direct_scan[IWX_PROBE_OPTION_MAX];
+ uint32_t short_ssid[IWX_SCAN_SHORT_SSID_MAX_SIZE];
+ uint8_t bssid_array[IWX_SCAN_BSSID_MAX_SIZE][ETHER_ADDR_LEN];
+} __packed; /* SCAN_PROBE_PARAMS_API_S_VER_4 */
+
+/**
+ * struct iwx_scan_req_params_v14
+ * @general_params: &struct iwx_scan_general_params_v10
+ * @channel_params: &struct iwx_scan_channel_params_v6
+ * @periodic_params: &struct iwx_scan_periodic_parms_v1
+ * @probe_params: &struct iwx_scan_probe_params_v4
+ */
+struct iwx_scan_req_params_v14 {
+ struct iwx_scan_general_params_v10 general_params;
+ struct iwx_scan_channel_params_v6 channel_params;
+ struct iwx_scan_periodic_parms_v1 periodic_params;
+ struct iwx_scan_probe_params_v4 probe_params;
+} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_14 */
+
+/**
+ * struct iwx_scan_req_umac_v14
+ * @uid: scan id, &enum iwl_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwx_scan_priority
+ * @scan_params: scan parameters
+ */
+struct iwx_scan_req_umac_v14 {
+ uint32_t uid;
+ uint32_t ooc_priority;
+ struct iwx_scan_req_params_v14 scan_params;
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_14 */
+
+/**
+ * struct iwx_umac_scan_abort
+ * @uid: scan id, &enum iwx_umac_scan_uid_offsets
+ * @flags: reserved
+ */
+struct iwx_umac_scan_abort {
+ uint32_t uid;
+ uint32_t flags;
+} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
+
+/**
+ * struct iwx_umac_scan_complete
+ * @uid: scan id, &enum iwx_umac_scan_uid_offsets
+ * @last_schedule: last scheduling line
+ * @last_iter: last scan iteration number
+ * @scan status: &enum iwx_scan_offload_complete_status
+ * @ebs_status: &enum iwx_scan_ebs_status
+ * @time_from_last_iter: time elapsed from last iteration
+ * @reserved: for future use
+ */
+struct iwx_umac_scan_complete {
+ uint32_t uid;
+ uint8_t last_schedule;
+ uint8_t last_iter;
+ uint8_t status;
+ uint8_t ebs_status;
+ uint32_t time_from_last_iter;
+ uint32_t reserved;
+} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
+#define IWX_SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5
+/**
+ * struct iwx_scan_offload_profile_match - match information
+ * @bssid: matched bssid
+ * @channel: channel where the match occurred
+ * @energy:
+ * @matching_feature:
+ * @matching_channels: bitmap of channels that matched, referencing
+ * the channels passed in tue scan offload request
+ */
+struct iwx_scan_offload_profile_match {
+ uint8_t bssid[ETHER_ADDR_LEN];
+ uint16_t reserved;
+ uint8_t channel;
+ uint8_t energy;
+ uint8_t matching_feature;
+ uint8_t matching_channels[IWX_SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
+} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
+
+/**
+ * struct iwx_scan_offload_profiles_query - match results query response
+ * @matched_profiles: bitmap of matched profiles, referencing the
+ * matches passed in the scan offload request
+ * @last_scan_age: age of the last offloaded scan
+ * @n_scans_done: number of offloaded scans done
+ * @gp2_d0u: GP2 when D0U occurred
+ * @gp2_invoked: GP2 when scan offload was invoked
+ * @resume_while_scanning: not used
+ * @self_recovery: obsolete
+ * @reserved: reserved
+ * @matches: array of match information, one for each match
+ */
+struct iwx_scan_offload_profiles_query {
+ uint32_t matched_profiles;
+ uint32_t last_scan_age;
+ uint32_t n_scans_done;
+ uint32_t gp2_d0u;
+ uint32_t gp2_invoked;
+ uint8_t resume_while_scanning;
+ uint8_t self_recovery;
+ uint16_t reserved;
+ struct iwx_scan_offload_profile_match matches[IWX_SCAN_MAX_PROFILES];
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
+
+/**
+ * struct iwx_umac_scan_iter_complete_notif - notifies end of scanning iteration
+ * @uid: scan id, &enum iwx_umac_scan_uid_offsets
+ * @scanned_channels: number of channels scanned and number of valid elements in
+ * results array
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: array of scan results, only "scanned_channels" of them are valid
+ */
+struct iwx_umac_scan_iter_complete_notif {
+ uint32_t uid;
+ uint8_t scanned_channels;
+ uint8_t status;
+ uint8_t bt_status;
+ uint8_t last_channel;
+ uint32_t tsf_low;
+ uint32_t tsf_high;
+ struct iwx_scan_results_notif results[];
+} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
+#define IWX_GSCAN_START_CMD 0x0
+#define IWX_GSCAN_STOP_CMD 0x1
+#define IWX_GSCAN_SET_HOTLIST_CMD 0x2
+#define IWX_GSCAN_RESET_HOTLIST_CMD 0x3
+#define IWX_GSCAN_SET_SIGNIFICANT_CHANGE_CMD 0x4
+#define IWX_GSCAN_RESET_SIGNIFICANT_CHANGE_CMD 0x5
+#define IWX_GSCAN_SIGNIFICANT_CHANGE_EVENT 0xFD
+#define IWX_GSCAN_HOTLIST_CHANGE_EVENT 0xFE
+#define IWX_GSCAN_RESULTS_AVAILABLE_EVENT 0xFF
+
+/* STA API */
+
+/**
+ * flags for the ADD_STA host command
+ * @IWX_STA_FLG_REDUCED_TX_PWR_CTRL:
+ * @IWX_STA_FLG_REDUCED_TX_PWR_DATA:
+ * @IWX_STA_FLG_DISABLE_TX: set if TX should be disabled
+ * @IWX_STA_FLG_PS: set if STA is in Power Save
+ * @IWX_STA_FLG_INVALID: set if STA is invalid
+ * @IWX_STA_FLG_DLP_EN: Direct Link Protocol is enabled
+ * @IWX_STA_FLG_SET_ALL_KEYS: the current key applies to all key IDs
+ * @IWX_STA_FLG_DRAIN_FLOW: drain flow
+ * @IWX_STA_FLG_PAN: STA is for PAN interface
+ * @IWX_STA_FLG_CLASS_AUTH:
+ * @IWX_STA_FLG_CLASS_ASSOC:
+ * @IWX_STA_FLG_CLASS_MIMO_PROT:
+ * @IWX_STA_FLG_MAX_AGG_SIZE_MSK: maximal size for A-MPDU
+ * @IWX_STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation
+ * @IWX_STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is
+ * initialised by driver and can be updated by fw upon reception of
+ * action frames that can change the channel width. When cleared the fw
+ * will send all the frames in 20MHz even when FAT channel is requested.
+ * @IWX_STA_FLG_MIMO_EN_MSK: support for MIMO. This flag is initialised by the
+ * driver and can be updated by fw upon reception of action frames.
+ * @IWX_STA_FLG_MFP_EN: Management Frame Protection
+ */
+#define IWX_STA_FLG_REDUCED_TX_PWR_CTRL (1 << 3)
+#define IWX_STA_FLG_REDUCED_TX_PWR_DATA (1 << 6)
+
+#define IWX_STA_FLG_DISABLE_TX (1 << 4)
+
+#define IWX_STA_FLG_PS (1 << 8)
+#define IWX_STA_FLG_DRAIN_FLOW (1 << 12)
+#define IWX_STA_FLG_PAN (1 << 13)
+#define IWX_STA_FLG_CLASS_AUTH (1 << 14)
+#define IWX_STA_FLG_CLASS_ASSOC (1 << 15)
+#define IWX_STA_FLG_RTS_MIMO_PROT (1 << 17)
+
+#define IWX_STA_FLG_MAX_AGG_SIZE_SHIFT 19
+#define IWX_STA_FLG_MAX_AGG_SIZE_8K (0 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_16K (1 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_32K (2 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_64K (3 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_128K (4 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_256K (5 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_512K (6 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_1024K (7 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_2M (8 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_4M (9 << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+#define IWX_STA_FLG_MAX_AGG_SIZE_MSK (0xf << IWX_STA_FLG_MAX_AGG_SIZE_SHIFT)
+
+#define IWX_STA_FLG_AGG_MPDU_DENS_SHIFT 23
+#define IWX_STA_FLG_AGG_MPDU_DENS_2US (4 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+#define IWX_STA_FLG_AGG_MPDU_DENS_4US (5 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+#define IWX_STA_FLG_AGG_MPDU_DENS_8US (6 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+#define IWX_STA_FLG_AGG_MPDU_DENS_16US (7 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+#define IWX_STA_FLG_AGG_MPDU_DENS_MSK (7 << IWX_STA_FLG_AGG_MPDU_DENS_SHIFT)
+
+#define IWX_STA_FLG_FAT_EN_20MHZ (0 << 26)
+#define IWX_STA_FLG_FAT_EN_40MHZ (1 << 26)
+#define IWX_STA_FLG_FAT_EN_80MHZ (2 << 26)
+#define IWX_STA_FLG_FAT_EN_160MHZ (3 << 26)
+#define IWX_STA_FLG_FAT_EN_MSK (3 << 26)
+
+#define IWX_STA_FLG_MIMO_EN_SISO (0 << 28)
+#define IWX_STA_FLG_MIMO_EN_MIMO2 (1 << 28)
+#define IWX_STA_FLG_MIMO_EN_MIMO3 (2 << 28)
+#define IWX_STA_FLG_MIMO_EN_MSK (3 << 28)
+
+/**
+ * key flags for the ADD_STA host command
+ * @IWX_STA_KEY_FLG_NO_ENC: no encryption
+ * @IWX_STA_KEY_FLG_WEP: WEP encryption algorithm
+ * @IWX_STA_KEY_FLG_CCM: CCMP encryption algorithm
+ * @IWX_STA_KEY_FLG_TKIP: TKIP encryption algorithm
+ * @IWX_STA_KEY_FLG_EXT: extended cipher algorithm (depends on the FW support)
+ * @IWX_STA_KEY_FLG_CMAC: CMAC encryption algorithm
+ * @IWX_STA_KEY_FLG_ENC_UNKNOWN: unknown encryption algorithm
+ * @IWX_STA_KEY_FLG_EN_MSK: mask for encryption algorithm value
+ * @IWX_STA_KEY_FLG_WEP_KEY_MAP: wep is either a group key (0 - legacy WEP) or from
+ * station info array (1 - n 1X mode)
+ * @IWX_STA_KEY_FLG_KEYID_MSK: the index of the key
+ * @IWX_STA_KEY_NOT_VALID: key is invalid
+ * @IWX_STA_KEY_FLG_WEP_13BYTES: set for 13 bytes WEP key
+ * @IWX_STA_KEY_MULTICAST: set for multicast key
+ * @IWX_STA_KEY_MFP: key is used for Management Frame Protection
+ */
+#define IWX_STA_KEY_FLG_NO_ENC (0 << 0)
+#define IWX_STA_KEY_FLG_WEP (1 << 0)
+#define IWX_STA_KEY_FLG_CCM (2 << 0)
+#define IWX_STA_KEY_FLG_TKIP (3 << 0)
+#define IWX_STA_KEY_FLG_EXT (4 << 0)
+#define IWX_STA_KEY_FLG_CMAC (6 << 0)
+#define IWX_STA_KEY_FLG_ENC_UNKNOWN (7 << 0)
+#define IWX_STA_KEY_FLG_EN_MSK (7 << 0)
+#define IWX_STA_KEY_FLG_WEP_KEY_MAP (1 << 3)
+#define IWX_STA_KEY_FLG_KEYID_POS 8
+#define IWX_STA_KEY_FLG_KEYID_MSK (3 << IWX_STA_KEY_FLG_KEYID_POS)
+#define IWX_STA_KEY_NOT_VALID (1 << 11)
+#define IWX_STA_KEY_FLG_WEP_13BYTES (1 << 12)
+#define IWX_STA_KEY_MULTICAST (1 << 14)
+#define IWX_STA_KEY_MFP (1 << 15)
+
+/**
+ * indicate to the fw what flag are being changed
+ * @IWX_STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
+ * @IWX_STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
+ * @IWX_STA_MODIFY_TX_RATE: unused
+ * @IWX_STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
+ * @IWX_STA_MODIFY_REMOVE_BA_TID: this command modifies %remove_immediate_ba_tid
+ * @IWX_STA_MODIFY_SLEEPING_STA_TX_COUNT: this command modifies %sleep_tx_count
+ * @IWX_STA_MODIFY_PROT_TH:
+ * @IWX_STA_MODIFY_QUEUES: modify the queues used by this station
+ */
+#define IWX_STA_MODIFY_QUEUE_REMOVAL (1 << 0)
+#define IWX_STA_MODIFY_TID_DISABLE_TX (1 << 1)
+#define IWX_STA_MODIFY_TX_RATE (1 << 2)
+#define IWX_STA_MODIFY_ADD_BA_TID (1 << 3)
+#define IWX_STA_MODIFY_REMOVE_BA_TID (1 << 4)
+#define IWX_STA_MODIFY_SLEEPING_STA_TX_COUNT (1 << 5)
+#define IWX_STA_MODIFY_PROT_TH (1 << 6)
+#define IWX_STA_MODIFY_QUEUES (1 << 7)
+
+#define IWX_STA_MODE_MODIFY 1
+
+/**
+ * type of sleep of the station
+ * @IWX_STA_SLEEP_STATE_AWAKE:
+ * @IWX_STA_SLEEP_STATE_PS_POLL:
+ * @IWX_STA_SLEEP_STATE_UAPSD:
+ * @IWX_STA_SLEEP_STATE_MOREDATA: set more-data bit on
+ * (last) released frame
+ */
+#define IWX_STA_SLEEP_STATE_AWAKE 0
+#define IWX_STA_SLEEP_STATE_PS_POLL (1 << 0)
+#define IWX_STA_SLEEP_STATE_UAPSD (1 << 1)
+#define IWX_STA_SLEEP_STATE_MOREDATA (1 << 2)
+
+/* STA ID and color bits definitions */
+#define IWX_STA_ID_SEED (0x0f)
+#define IWX_STA_ID_POS (0)
+#define IWX_STA_ID_MSK (IWX_STA_ID_SEED << IWX_STA_ID_POS)
+
+#define IWX_STA_COLOR_SEED (0x7)
+#define IWX_STA_COLOR_POS (4)
+#define IWX_STA_COLOR_MSK (IWX_STA_COLOR_SEED << IWX_STA_COLOR_POS)
+
+#define IWX_STA_ID_N_COLOR_GET_COLOR(id_n_color) \
+ (((id_n_color) & IWX_STA_COLOR_MSK) >> IWX_STA_COLOR_POS)
+#define IWX_STA_ID_N_COLOR_GET_ID(id_n_color) \
+ (((id_n_color) & IWX_STA_ID_MSK) >> IWX_STA_ID_POS)
+
+#define IWX_STA_KEY_MAX_NUM (16)
+#define IWX_STA_KEY_IDX_INVALID (0xff)
+#define IWX_STA_KEY_MAX_DATA_KEY_NUM (4)
+#define IWX_MAX_GLOBAL_KEYS (4)
+#define IWX_STA_KEY_LEN_WEP40 (5)
+#define IWX_STA_KEY_LEN_WEP104 (13)
+
+/**
+ * struct iwx_keyinfo - key information
+ * @key_flags: type %iwx_sta_key_flag
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ * @key_offset: key offset in the fw's key table
+ * @key: 16-byte unicast decryption key
+ * @tx_secur_seq_cnt: initial RSC / PN needed for replay check
+ * @hw_tkip_mic_rx_key: byte: MIC Rx Key - used for TKIP only
+ * @hw_tkip_mic_tx_key: byte: MIC Tx Key - used for TKIP only
+ */
+struct iwx_keyinfo {
+ uint16_t key_flags;
+ uint8_t tkip_rx_tsc_byte2;
+ uint8_t reserved1;
+ uint16_t tkip_rx_ttak[5];
+ uint8_t key_offset;
+ uint8_t reserved2;
+ uint8_t key[16];
+ uint64_t tx_secur_seq_cnt;
+ uint64_t hw_tkip_mic_rx_key;
+ uint64_t hw_tkip_mic_tx_key;
+} __packed;
+
+#define IWX_ADD_STA_STATUS_MASK 0xFF
+#define IWX_ADD_STA_BAID_VALID_MASK 0x8000
+#define IWX_ADD_STA_BAID_MASK 0x7F00
+#define IWX_ADD_STA_BAID_SHIFT 8
+
+/**
+ * struct iwx_add_sta_cmd - Add/modify a station in the fw's sta table.
+ * ( REPLY_ADD_STA = 0x18 )
+ * @add_modify: see &enum iwl_sta_mode
+ * @awake_acs: ACs to transmit data on while station is sleeping (for U-APSD)
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ * AMPDU for tid x. Set %STA_MODIFY_TID_DISABLE_TX to change this field.
+ * @mac_id_n_color: the Mac context this station belongs to,
+ * see &enum iwl_ctxt_id_and_color
+ * @addr: station's MAC address
+ * @reserved2: reserved
+ * @sta_id: index of station in uCode's station table
+ * @modify_mask: STA_MODIFY_*, selects which parameters to modify vs. leave
+ * alone. 1 - modify, 0 - don't change.
+ * @reserved3: reserved
+ * @station_flags: look at &enum iwl_sta_flags
+ * @station_flags_msk: what of %station_flags have changed,
+ * also &enum iwl_sta_flags
+ * @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
+ * Set %STA_MODIFY_ADD_BA_TID to use this field, and also set
+ * add_immediate_ba_ssn.
+ * @remove_immediate_ba_tid: tid for which to remove block-ack support (Rx)
+ * Set %STA_MODIFY_REMOVE_BA_TID to use this field
+ * @add_immediate_ba_ssn: ssn for the Rx block-ack session. Used together with
+ * add_immediate_ba_tid.
+ * @sleep_tx_count: number of packets to transmit to station even though it is
+ * asleep. Used to synchronise PS-poll and u-APSD responses while ucode
+ * keeps track of STA sleep state.
+ * @station_type: type of this station. See &enum iwl_sta_type.
+ * @sleep_state_flags: Look at &enum iwl_sta_sleep_flag.
+ * @assoc_id: assoc_id to be sent in VHT PLCP (9-bit), for grp use 0, for AP
+ * mac-addr.
+ * @beamform_flags: beam forming controls
+ * @tfd_queue_msk: tfd queues used by this station.
+ * Obsolete for new TX API (9 and above).
+ * @rx_ba_window: aggregation window size
+ * @sp_length: the size of the SP in actual number of frames
+ * @uapsd_acs: 4 LS bits are trigger enabled ACs, 4 MS bits are the deliver
+ * enabled ACs.
+ *
+ * The device contains an internal table of per-station information, with info
+ * on security keys, aggregation parameters, and Tx rates for initial Tx
+ * attempt and any retries (set by REPLY_TX_LINK_QUALITY_CMD).
+ *
+ * ADD_STA sets up the table entry for one station, either creating a new
+ * entry, or modifying a pre-existing one.
+ */
+struct iwx_add_sta_cmd {
+ uint8_t add_modify;
+ uint8_t awake_acs;
+ uint16_t tid_disable_tx;
+ uint32_t mac_id_n_color;
+ uint8_t addr[ETHER_ADDR_LEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
+ uint16_t reserved2;
+ uint8_t sta_id;
+ uint8_t modify_mask;
+ uint16_t reserved3;
+ uint32_t station_flags;
+ uint32_t station_flags_msk;
+ uint8_t add_immediate_ba_tid;
+ uint8_t remove_immediate_ba_tid;
+ uint16_t add_immediate_ba_ssn;
+ uint16_t sleep_tx_count;
+ uint8_t sleep_state_flags;
+ uint8_t station_type;
+ uint16_t assoc_id;
+ uint16_t beamform_flags;
+ uint32_t tfd_queue_msk;
+ uint16_t rx_ba_window;
+ uint8_t sp_length;
+ uint8_t uapsd_acs;
+} __packed; /* ADD_STA_CMD_API_S_VER_10 */
+
+/**
+ * FW station types
+ * ( REPLY_ADD_STA = 0x18 )
+ * @IWX_STA_LINK: Link station - normal RX and TX traffic.
+ * @IWX_STA_GENERAL_PURPOSE: General purpose. In AP mode used for beacons
+ * and probe responses.
+ * @IWX_STA_MULTICAST: multicast traffic,
+ * @IWX_STA_TDLS_LINK: TDLS link station
+ * @IWX_STA_AUX_ACTIVITY: auxiliary station (scan, ROC and so on).
+ */
+#define IWX_STA_LINK 0
+#define IWX_STA_GENERAL_PURPOSE 1
+#define IWX_STA_MULTICAST 2
+#define IWX_STA_TDLS_LINK 3
+#define IWX_STA_AUX_ACTIVITY 4
+
+/**
+ * struct iwx_add_sta_key_common - add/modify sta key common part
+ * ( REPLY_ADD_STA_KEY = 0x17 )
+ * @sta_id: index of station in uCode's station table
+ * @key_offset: key offset in key storage
+ * @key_flags: IWX_STA_KEY_FLG_*
+ * @key: key material data
+ * @rx_secur_seq_cnt: RX security sequence counter for the key
+ */
+struct iwx_add_sta_key_common {
+ uint8_t sta_id;
+ uint8_t key_offset;
+ uint16_t key_flags;
+ uint8_t key[32];
+ uint8_t rx_secur_seq_cnt[16];
+} __packed;
+
+/**
+ * struct iwx_add_sta_key_cmd_v1 - add/modify sta key
+ * @common: see &struct iwx_add_sta_key_common
+ * @tkip_rx_tsc_byte2: TSC[2] for key mix ph1 detection
+ * @reserved: reserved
+ * @tkip_rx_ttak: 10-byte unicast TKIP TTAK for Rx
+ */
+struct iwx_add_sta_key_cmd_v1 {
+ struct iwx_add_sta_key_common common;
+ uint8_t tkip_rx_tsc_byte2;
+ uint8_t reserved;
+ uint16_t tkip_rx_ttak[5];
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_1 */
+
+/**
+ * struct iwx_add_sta_key_cmd - add/modify sta key
+ * @common: see &struct iwx_add_sta_key_common
+ * @rx_mic_key: TKIP RX unicast or multicast key
+ * @tx_mic_key: TKIP TX key
+ * @transmit_seq_cnt: TSC, transmit packet number
+ */
+struct iwx_add_sta_key_cmd {
+ struct iwx_add_sta_key_common common;
+ uint64_t rx_mic_key;
+ uint64_t tx_mic_key;
+ uint64_t transmit_seq_cnt;
+} __packed; /* ADD_MODIFY_STA_KEY_API_S_VER_2 */
+
+/**
+ * status in the response to ADD_STA command
+ * @IWX_ADD_STA_SUCCESS: operation was executed successfully
+ * @IWX_ADD_STA_STATIONS_OVERLOAD: no room left in the fw's station table
+ * @IWX_ADD_STA_IMMEDIATE_BA_FAILURE: can't add Rx block ack session
+ * @IWX_ADD_STA_MODIFY_NON_EXISTING_STA: driver requested to modify a station
+ * that doesn't exist.
+ */
+#define IWX_ADD_STA_SUCCESS 0x1
+#define IWX_ADD_STA_STATIONS_OVERLOAD 0x2
+#define IWX_ADD_STA_IMMEDIATE_BA_FAILURE 0x4
+#define IWX_ADD_STA_MODIFY_NON_EXISTING_STA 0x8
+
+/**
+ * struct iwx_rm_sta_cmd - Add / modify a station in the fw's station table
+ * ( IWX_REMOVE_STA = 0x19 )
+ * @sta_id: the station id of the station to be removed
+ */
+struct iwx_rm_sta_cmd {
+ uint8_t sta_id;
+ uint8_t reserved[3];
+} __packed; /* IWX_REMOVE_STA_CMD_API_S_VER_2 */
+
+/**
+ * struct iwx_mgmt_mcast_key_cmd
+ * ( IWX_MGMT_MCAST_KEY = 0x1f )
+ * @ctrl_flags: %iwx_sta_key_flag
+ * @IGTK:
+ * @K1: IGTK master key
+ * @K2: IGTK sub key
+ * @sta_id: station ID that support IGTK
+ * @key_id:
+ * @receive_seq_cnt: initial RSC/PN needed for replay check
+ */
+struct iwx_mgmt_mcast_key_cmd {
+ uint32_t ctrl_flags;
+ uint8_t IGTK[16];
+ uint8_t K1[16];
+ uint8_t K2[16];
+ uint32_t key_id;
+ uint32_t sta_id;
+ uint64_t receive_seq_cnt;
+} __packed; /* SEC_MGMT_MULTICAST_KEY_CMD_API_S_VER_1 */
+
+struct iwx_wep_key {
+ uint8_t key_index;
+ uint8_t key_offset;
+ uint16_t reserved1;
+ uint8_t key_size;
+ uint8_t reserved2[3];
+ uint8_t key[16];
+} __packed;
+
+struct iwx_wep_key_cmd {
+ uint32_t mac_id_n_color;
+ uint8_t num_keys;
+ uint8_t decryption_type;
+ uint8_t flags;
+ uint8_t reserved;
+ struct iwx_wep_key wep_key[0];
+} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
+
+/*
+ * BT coex
+ */
+
+#define IWX_BT_COEX_DISABLE 0x0
+#define IWX_BT_COEX_NW 0x1
+#define IWX_BT_COEX_BT 0x2
+#define IWX_BT_COEX_WIFI 0x3
+/* BT_COEX_MODES_E */
+
+#define IWX_BT_COEX_MPLUT_ENABLED (1 << 0)
+#define IWX_BT_COEX_MPLUT_BOOST_ENABLED (1 << 1)
+#define IWX_BT_COEX_SYNC2SCO_ENABLED (1 << 2)
+#define IWX_BT_COEX_CORUN_ENABLED (1 << 3)
+#define IWX_BT_COEX_HIGH_BAND_RET (1 << 4)
+/* BT_COEX_MODULES_ENABLE_E_VER_1 */
+
+enum iwx_bt_coex_enabled_modules {
+ BT_COEX_DISABLE = 1 << 0,
+ BT_COEX_MPLUT_BOOST_ENABLED = 1 << 1,
+ BT_COEX_SYNC2SCO_ENABLED = 1 << 2,
+ BT_COEX_CORUN_ENABLED = 1 << 3,
+ BT_COEX_HIGH_BAND_RET = 1 << 4,
+};
+
+/**
+ * struct iwx_bt_coex_cmd - bt coex configuration command
+ * @mode: enum %iwx_bt_coex_mode
+ * @enabled_modules: enum %iwx_bt_coex_enabled_modules
+ *
+ * The structure is used for the BT_COEX command.
+ */
+struct iwx_bt_coex_cmd {
+ uint32_t mode;
+ uint32_t enabled_modules;
+} __packed; /* BT_COEX_CMD_API_S_VER_6 */
+
+
+/*
+ * Location Aware Regulatory (LAR) API - MCC updates
+ */
+
+/**
+ * struct iwx_mcc_update_cmd - Request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: the source from where we got the MCC, see IWX_MCC_SOURCE_*
+ * @reserved: reserved for alignment
+ * @key: integrity key for MCC API OEM testing
+ * @reserved2: reserved
+ */
+struct iwx_mcc_update_cmd {
+ uint16_t mcc;
+ uint8_t source_id;
+ uint8_t reserved;
+ uint32_t key;
+ uint32_t reserved2[5];
+} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
+
+/**
+ * iwx_mcc_update_resp_v3 - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwx_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @source_id: the MCC source, see IWX_MCC_SOURCE_*
+ * @time: time elapsed from the MCC test start (in 30 seconds TU)
+ * @reserved: reserved.
+ * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
+ * channels, depending on platform)
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ * 16bits are used.
+ */
+struct iwx_mcc_update_resp_v3 {
+ uint32_t status;
+ uint16_t mcc;
+ uint8_t cap;
+ uint8_t source_id;
+ uint16_t time;
+ uint16_t geo_info;
+ uint32_t n_channels;
+ uint32_t channels[0];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_3 */
+
+/**
+ * geographic information.
+ * @GEO_NO_INFO: no special info for this geo profile.
+ * @GEO_WMM_ETSI_5GHZ_INFO: this geo profile limits the WMM params
+ * for the 5 GHz band.
+ */
+#define IWX_GEO_NO_INFO 0
+#define IWX_GEO_WMM_ETSI_5GHZ_INFO (1 << 0)
+
+/**
+ * struct iwx_mcc_update_resp - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwl_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @time: time elapsed from the MCC test start (in units of 30 seconds)
+ * @geo_info: geographic specific profile information
+ * see IWX_GEO_*
+ * @source_id: the MCC source, see IWX_MCC_SOURCE_*
+ * @reserved: for four bytes alignment.
+ * @n_channels: number of channels in @channels_data.
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ * 16bits are used.
+ */
+struct iwx_mcc_update_resp {
+ uint32_t status;
+ uint16_t mcc;
+ uint16_t cap;
+ uint16_t time;
+ uint16_t geo_info;
+ uint8_t source_id;
+ uint8_t reserved[3];
+ uint32_t n_channels;
+ uint32_t channels[0];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_4 */
+
+/**
+ * struct iwx_mcc_chub_notif - chub notifies of mcc change
+ * (MCC_CHUB_UPDATE_CMD = 0xc9)
+ * The Chub (Communication Hub, CommsHUB) is a HW component that connects to
+ * the cellular and connectivity cores that gets updates of the mcc, and
+ * notifies the ucode directly of any mcc change.
+ * The ucode requests the driver to request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: identity of the change originator, see IWX_MCC_SOURCE_*
+ * @reserved1: reserved for alignment
+ */
+struct iwx_mcc_chub_notif {
+ uint16_t mcc;
+ uint8_t source_id;
+ uint8_t reserved1;
+} __packed; /* LAR_MCC_NOTIFY_S */
+
+enum iwx_mcc_update_status {
+ IWX_MCC_RESP_NEW_CHAN_PROFILE,
+ IWX_MCC_RESP_SAME_CHAN_PROFILE,
+ IWX_MCC_RESP_INVALID,
+ IWX_MCC_RESP_NVM_DISABLED,
+ IWX_MCC_RESP_ILLEGAL,
+ IWX_MCC_RESP_LOW_PRIORITY,
+ IWX_MCC_RESP_TEST_MODE_ACTIVE,
+ IWX_MCC_RESP_TEST_MODE_NOT_ACTIVE,
+ IWX_MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE,
+};
+
+#define IWX_MCC_SOURCE_OLD_FW 0
+#define IWX_MCC_SOURCE_ME 1
+#define IWX_MCC_SOURCE_BIOS 2
+#define IWX_MCC_SOURCE_3G_LTE_HOST 3
+#define IWX_MCC_SOURCE_3G_LTE_DEVICE 4
+#define IWX_MCC_SOURCE_WIFI 5
+#define IWX_MCC_SOURCE_RESERVED 6
+#define IWX_MCC_SOURCE_DEFAULT 7
+#define IWX_MCC_SOURCE_UNINITIALIZED 8
+#define IWX_MCC_SOURCE_MCC_API 9
+#define IWX_MCC_SOURCE_GET_CURRENT 0x10
+#define IWX_MCC_SOURCE_GETTING_MCC_TEST_MODE 0x11
+
+/*
+ * From Linux commit ab02165ccec4c78162501acedeef1a768acdb811:
+ * As the firmware is slowly running out of command IDs and grouping of
+ * commands is desirable anyway, the firmware is extending the command
+ * header from 4 bytes to 8 bytes to introduce a group (in place of the
+ * former flags field, since that's always 0 on commands and thus can
+ * be easily used to distinguish between the two).
+ *
+ * These functions retrieve specific information from the id field in
+ * the iwx_host_cmd struct which contains the command id, the group id,
+ * and the version of the command.
+*/
+static inline uint8_t
+iwx_cmd_opcode(uint32_t cmdid)
+{
+ return cmdid & 0xff;
+}
+
+static inline uint8_t
+iwx_cmd_groupid(uint32_t cmdid)
+{
+ return ((cmdid & 0Xff00) >> 8);
+}
+
+static inline uint8_t
+iwx_cmd_version(uint32_t cmdid)
+{
+ return ((cmdid & 0xff0000) >> 16);
+}
+
+static inline uint32_t
+iwx_cmd_id(uint8_t opcode, uint8_t groupid, uint8_t version)
+{
+ return opcode + (groupid << 8) + (version << 16);
+}
+
+/* make uint16_t wide id out of uint8_t group and opcode */
+#define IWX_WIDE_ID(grp, opcode) ((grp << 8) | opcode)
+
+struct iwx_cmd_header {
+ uint8_t code;
+ uint8_t flags;
+ uint8_t idx;
+ uint8_t qid;
+} __packed;
+
+struct iwx_cmd_header_wide {
+ uint8_t opcode;
+ uint8_t group_id;
+ uint8_t idx;
+ uint8_t qid;
+ uint16_t length;
+ uint8_t reserved;
+ uint8_t version;
+} __packed;
+
+#define IWX_POWER_SCHEME_CAM 1
+#define IWX_POWER_SCHEME_BPS 2
+#define IWX_POWER_SCHEME_LP 3
+
+#define IWX_DEF_CMD_PAYLOAD_SIZE 320
+#define IWX_MAX_CMD_PAYLOAD_SIZE (4096 - sizeof(struct iwx_cmd_header_wide))
+#define IWX_CMD_FAILED_MSK 0x40
+
+/**
+ * struct iwx_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for commands that
+ * aren't fully copied and use other TFD space.
+ */
+struct iwx_device_cmd {
+ union {
+ struct {
+ struct iwx_cmd_header hdr;
+ uint8_t data[IWX_DEF_CMD_PAYLOAD_SIZE];
+ };
+ struct {
+ struct iwx_cmd_header_wide hdr_wide;
+ uint8_t data_wide[IWX_DEF_CMD_PAYLOAD_SIZE -
+ sizeof(struct iwx_cmd_header_wide) +
+ sizeof(struct iwx_cmd_header)];
+ };
+ };
+} __packed;
+
+struct iwx_rx_packet {
+ /*
+ * The first 4 bytes of the RX frame header contain both the RX frame
+ * size and some flags.
+ * Bit fields:
+ * 31: flag flush RB request
+ * 30: flag ignore TC (terminal counter) request
+ * 29: flag fast IRQ request
+ * 28-26: Reserved
+ * 25: Offload enabled
+ * 24: RPF enabled
+ * 23: RSS enabled
+ * 22: Checksum enabled
+ * 21-16: RX queue
+ * 15-14: Reserved
+ * 13-00: RX frame size
+ */
+ uint32_t len_n_flags;
+ struct iwx_cmd_header hdr;
+ uint8_t data[];
+} __packed;
+
+#define IWX_FH_RSCSR_FRAME_SIZE_MSK 0x00003fff
+#define IWX_FH_RSCSR_FRAME_INVALID 0x55550000
+#define IWX_FH_RSCSR_FRAME_ALIGN 0x40
+#define IWX_FH_RSCSR_RPA_EN (1 << 25)
+#define IWX_FH_RSCSR_RADA_EN (1 << 26)
+#define IWX_FH_RSCSR_RXQ_POS 16
+#define IWX_FH_RSCSR_RXQ_MASK 0x3F0000
+
+static uint32_t
+iwx_rx_packet_len(const struct iwx_rx_packet *pkt)
+{
+
+ return le32toh(pkt->len_n_flags) & IWX_FH_RSCSR_FRAME_SIZE_MSK;
+}
+
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wunused-function"
+static uint32_t
+iwx_rx_packet_payload_len(const struct iwx_rx_packet *pkt)
+{
+
+ return iwx_rx_packet_len(pkt) - sizeof(pkt->hdr);
+}
+#pragma clang diagnostic pop
+
+#define IWX_MIN_DBM -100
+#define IWX_MAX_DBM -33 /* realistic guess */
+
+#define IWX_READ(sc, reg) \
+ bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg))
+
+#define IWX_WRITE(sc, reg, val) \
+ bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val))
+
+#define IWX_WRITE_1(sc, reg, val) \
+ bus_space_write_1((sc)->sc_st, (sc)->sc_sh, (reg), (val))
+
+#define IWX_SETBITS(sc, reg, mask) { \
+ IWX_WRITE(sc, reg, IWX_READ(sc, reg) | (mask)); }
+
+#define IWX_CLRBITS(sc, reg, mask) \
+ IWX_WRITE(sc, reg, IWX_READ(sc, reg) & ~(mask))
+
+#define IWX_BARRIER_WRITE(sc) \
+ bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->sc_sz, \
+ BUS_SPACE_BARRIER_WRITE)
+
+#define IWX_BARRIER_READ_WRITE(sc) \
+ bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->sc_sz, \
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)
diff --git a/sys/dev/iwx/if_iwxvar.h b/sys/dev/iwx/if_iwxvar.h
new file mode 100644
index 000000000000..1ac0bc24577c
--- /dev/null
+++ b/sys/dev/iwx/if_iwxvar.h
@@ -0,0 +1,924 @@
+/*-
+ * SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) AND ISC
+ */
+
+/* $OpenBSD: if_iwxvar.h,v 1.41 2023/03/06 11:53:24 stsp Exp $ */
+
+/*
+ * Copyright (c) 2014 genua mbh <info@genua.de>
+ * Copyright (c) 2014 Fixup Software Ltd.
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*-
+ * Based on BSD-licensed source modules in the Linux iwlwifi driver,
+ * which were used as the reference documentation for this implementation.
+ *
+ ******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Deutschland GmbH
+ * Copyright(c) 2018 - 2019 Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************
+ */
+
+/*-
+ * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+struct iwx_rx_radiotap_header {
+ struct ieee80211_radiotap_header wr_ihdr;
+ uint64_t wr_tsft;
+ uint8_t wr_flags;
+ uint8_t wr_rate;
+ uint16_t wr_chan_freq;
+ uint16_t wr_chan_flags;
+ int8_t wr_dbm_antsignal;
+ int8_t wr_dbm_antnoise;
+} __packed;
+
+#define IWX_RX_RADIOTAP_PRESENT \
+ ((1 << IEEE80211_RADIOTAP_TSFT) | \
+ (1 << IEEE80211_RADIOTAP_FLAGS) | \
+ (1 << IEEE80211_RADIOTAP_RATE) | \
+ (1 << IEEE80211_RADIOTAP_CHANNEL) | \
+ (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | \
+ (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE))
+
+struct iwx_tx_radiotap_header {
+ struct ieee80211_radiotap_header wt_ihdr;
+ uint8_t wt_flags;
+ uint8_t wt_rate;
+ uint16_t wt_chan_freq;
+ uint16_t wt_chan_flags;
+} __packed;
+
+#define IWX_TX_RADIOTAP_PRESENT \
+ ((1 << IEEE80211_RADIOTAP_FLAGS) | \
+ (1 << IEEE80211_RADIOTAP_RATE) | \
+ (1 << IEEE80211_RADIOTAP_CHANNEL))
+
+#define IWX_UCODE_SECT_MAX 57
+
+/*
+ * fw_status is used to determine if we've already parsed the firmware file
+ *
+ * In addition to the following, status < 0 ==> -error
+ */
+#define IWX_FW_STATUS_NONE 0
+#define IWX_FW_STATUS_INPROGRESS 1
+#define IWX_FW_STATUS_DONE 2
+
+enum iwx_ucode_type {
+ IWX_UCODE_TYPE_REGULAR,
+ IWX_UCODE_TYPE_INIT,
+ IWX_UCODE_TYPE_WOW,
+ IWX_UCODE_TYPE_REGULAR_USNIFFER,
+ IWX_UCODE_TYPE_MAX
+};
+
+struct iwx_fw_info {
+ void *fw_rawdata;
+ size_t fw_rawsize;
+ int fw_status;
+
+ struct iwx_fw_sects {
+ struct iwx_fw_onesect {
+ const void *fws_data;
+ uint32_t fws_len;
+ uint32_t fws_devoff;
+ } fw_sect[IWX_UCODE_SECT_MAX];
+ size_t fw_totlen;
+ int fw_count;
+ } fw_sects[IWX_UCODE_TYPE_MAX];
+
+ /* FW debug data parsed for driver usage */
+ int dbg_dest_tlv_init;
+ const uint8_t *dbg_dest_ver;
+ uint8_t n_dest_reg;
+ const struct iwx_fw_dbg_dest_tlv_v1 *dbg_dest_tlv_v1;
+
+ const struct iwx_fw_dbg_conf_tlv *dbg_conf_tlv[IWX_FW_DBG_CONF_MAX];
+ size_t dbg_conf_tlv_len[IWX_FW_DBG_CONF_MAX];
+ struct iwx_fw_dbg_trigger_tlv *dbg_trigger_tlv[IWX_FW_DBG_TRIGGER_MAX];
+ size_t dbg_trigger_tlv_len[IWX_FW_DBG_TRIGGER_MAX];
+ struct iwx_fw_dbg_mem_seg_tlv *dbg_mem_tlv;
+ size_t n_mem_tlv;
+
+ /* Copy of firmware image loader found in file. */
+ uint8_t *iml;
+ size_t iml_len;
+};
+
+struct iwx_nvm_data {
+ int n_hw_addrs;
+ uint8_t hw_addr[ETHER_ADDR_LEN];
+
+ int sku_cap_band_24GHz_enable;
+ int sku_cap_band_52GHz_enable;
+ int sku_cap_11n_enable;
+ int sku_cap_11ac_enable;
+ int sku_cap_11ax_enable;
+ int sku_cap_amt_enable;
+ int sku_cap_ipan_enable;
+ int sku_cap_mimo_disable;
+ int lar_enabled;
+
+ uint8_t valid_tx_ant, valid_rx_ant;
+
+ uint16_t nvm_version;
+};
+
+/* max bufs per tfd the driver will use */
+#define IWX_MAX_CMD_TBS_PER_TFD 2
+
+struct iwx_host_cmd {
+ const void *data[IWX_MAX_CMD_TBS_PER_TFD];
+ struct iwx_rx_packet *resp_pkt;
+ size_t resp_pkt_len;
+ unsigned long _rx_page_addr;
+ uint32_t _rx_page_order;
+ int handler_status;
+
+ uint32_t flags;
+ uint16_t len[IWX_MAX_CMD_TBS_PER_TFD];
+ uint8_t dataflags[IWX_MAX_CMD_TBS_PER_TFD];
+ uint32_t id;
+};
+
+/*
+ * DMA glue is from iwn
+ */
+
+struct iwx_dma_info {
+ bus_dma_tag_t tag;
+ bus_dmamap_t map;
+ bus_dma_segment_t seg;
+ bus_addr_t paddr;
+ void *vaddr;
+ bus_size_t size;
+};
+
+#define IWX_TX_RING_COUNT IWX_DEFAULT_QUEUE_SIZE
+#define IWX_TX_RING_LOMARK 192
+#define IWX_TX_RING_HIMARK 224
+
+struct iwx_tx_data {
+ bus_dmamap_t map;
+ bus_addr_t cmd_paddr;
+ struct mbuf *m;
+ struct iwx_node *in;
+ int flags;
+#define IWX_TXDATA_FLAG_CMD_IS_NARROW 0x01
+};
+
+struct iwx_tx_ring {
+ struct iwx_dma_info desc_dma;
+ struct iwx_dma_info cmd_dma;
+ struct iwx_dma_info bc_tbl;
+ struct iwx_tfh_tfd *desc;
+ struct iwx_device_cmd *cmd;
+ struct iwx_tx_data data[IWX_TX_RING_COUNT];
+ int qid;
+ int queued;
+ int cur;
+ int cur_hw;
+ int tail;
+ int tail_hw;
+ int tid;
+ bus_dma_tag_t data_dmat;
+};
+
+#define IWX_RX_MQ_RING_COUNT 512
+/* Linux driver optionally uses 8k buffer */
+#define IWX_RBUF_SIZE 4096
+
+struct iwx_rx_data {
+ struct mbuf *m;
+ bus_dmamap_t map;
+};
+
+struct iwx_rx_ring {
+ struct iwx_dma_info free_desc_dma;
+ struct iwx_dma_info stat_dma;
+ struct iwx_dma_info used_desc_dma;
+ void *desc;
+ struct iwx_rb_status *stat;
+ struct iwx_rx_data data[IWX_RX_MQ_RING_COUNT];
+ int cur;
+ bus_dma_tag_t data_dmat;
+};
+
+#define IWX_FLAG_USE_ICT 0x01 /* using Interrupt Cause Table */
+#define IWX_FLAG_RFKILL 0x02 /* radio kill switch is set */
+#define IWX_FLAG_SCANNING 0x04 /* scan in progress */
+#define IWX_FLAG_MAC_ACTIVE 0x08 /* MAC context added to firmware */
+#define IWX_FLAG_BINDING_ACTIVE 0x10 /* MAC->PHY binding added to firmware */
+#define IWX_FLAG_STA_ACTIVE 0x20 /* AP added to firmware station table */
+#define IWX_FLAG_TE_ACTIVE 0x40 /* time event is scheduled */
+#define IWX_FLAG_HW_ERR 0x80 /* hardware error occurred */
+#define IWX_FLAG_SHUTDOWN 0x100 /* shutting down; new tasks forbidden */
+#define IWX_FLAG_BGSCAN 0x200 /* background scan in progress */
+#define IWX_FLAG_TXFLUSH 0x400 /* Tx queue flushing in progress */
+#define IWX_FLAG_HW_INITED 0x800 /* Hardware initialized */
+#define IWX_FLAG_AMPDUTX 0x1000
+
+struct iwx_ucode_status {
+ uint32_t uc_lmac_error_event_table[2];
+ uint32_t uc_umac_error_event_table;
+ uint32_t uc_log_event_table;
+ unsigned int error_event_table_tlv_status;
+
+ int uc_ok;
+ int uc_intr;
+};
+
+#define IWX_ERROR_EVENT_TABLE_LMAC1 (1 << 0)
+#define IWX_ERROR_EVENT_TABLE_LMAC2 (1 << 1)
+#define IWX_ERROR_EVENT_TABLE_UMAC (1 << 2)
+
+#define IWX_CMD_RESP_MAX PAGE_SIZE
+
+/* lower blocks contain EEPROM image and calibration data */
+#define IWX_OTP_LOW_IMAGE_SIZE_FAMILY_7000 16384
+#define IWX_OTP_LOW_IMAGE_SIZE_FAMILY_8000 32768
+
+#define IWX_TE_SESSION_PROTECTION_MAX_TIME_MS 1000
+#define IWX_TE_SESSION_PROTECTION_MIN_TIME_MS 400
+
+enum IWX_CMD_MODE {
+ IWX_CMD_ASYNC = (1 << 0),
+ IWX_CMD_WANT_RESP = (1 << 1),
+ IWX_CMD_SEND_IN_RFKILL = (1 << 2),
+};
+enum iwx_hcmd_dataflag {
+ IWX_HCMD_DFL_NOCOPY = (1 << 0),
+ IWX_HCMD_DFL_DUP = (1 << 1),
+};
+
+#define IWX_NUM_PAPD_CH_GROUPS 9
+#define IWX_NUM_TXP_CH_GROUPS 9
+
+struct iwx_phy_ctxt {
+ uint16_t id;
+ uint16_t color;
+ uint32_t ref;
+ struct ieee80211_channel *channel;
+ uint8_t sco; /* 40 MHz secondary channel offset */
+ uint8_t vht_chan_width;
+};
+
+struct iwx_bf_data {
+ int bf_enabled; /* filtering */
+ int ba_enabled; /* abort */
+ int ave_beacon_signal;
+ int last_cqm_event;
+};
+
+/**
+ * struct iwx_self_init_dram - dram data used by self init process
+ * @fw: lmac and umac dram data
+ * @lmac_cnt: number of lmac sections in fw image
+ * @umac_cnt: number of umac sections in fw image
+ * @paging: paging dram data
+ * @paging_cnt: number of paging sections needed by fw image
+ */
+struct iwx_self_init_dram {
+ struct iwx_dma_info *fw;
+ int lmac_cnt;
+ int umac_cnt;
+ struct iwx_dma_info *paging;
+ int paging_cnt;
+};
+
+/**
+ * struct iwx_reorder_buffer - per ra/tid/queue reorder buffer
+ * @head_sn: reorder window head sn
+ * @num_stored: number of mpdus stored in the buffer
+ * @buf_size: the reorder buffer size as set by the last addba request
+ * @queue: queue of this reorder buffer
+ * @last_amsdu: track last ASMDU SN for duplication detection
+ * @last_sub_index: track ASMDU sub frame index for duplication detection
+ * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
+ * it is the time of last received sub-frame
+ * @removed: prevent timer re-arming
+ * @valid: reordering is valid for this queue
+ * @consec_oldsn_drops: consecutive drops due to old SN
+ * @consec_oldsn_ampdu_gp2: A-MPDU GP2 timestamp to track
+ * when to apply old SN consecutive drop workaround
+ * @consec_oldsn_prev_drop: track whether or not an MPDU
+ * that was single/part of the previous A-MPDU was
+ * dropped due to old SN
+ */
+struct iwx_reorder_buffer {
+ uint16_t head_sn;
+ uint16_t num_stored;
+ uint16_t buf_size;
+ uint16_t last_amsdu;
+ uint8_t last_sub_index;
+ struct callout reorder_timer;
+ int removed;
+ int valid;
+ unsigned int consec_oldsn_drops;
+ uint32_t consec_oldsn_ampdu_gp2;
+ unsigned int consec_oldsn_prev_drop;
+#define IWX_AMPDU_CONSEC_DROPS_DELBA 10
+};
+
+/**
+ * struct iwx_reorder_buf_entry - reorder buffer entry per frame sequence number
+ * @frames: list of mbufs stored (A-MSDU subframes share a sequence number)
+ * @reorder_time: time the packet was stored in the reorder buffer
+ */
+struct iwx_reorder_buf_entry {
+ struct mbufq frames;
+ struct timeval reorder_time;
+ uint32_t rx_pkt_status;
+ int chanidx;
+ int is_shortpre;
+ uint32_t rate_n_flags;
+ uint32_t device_timestamp;
+ struct ieee80211_rx_stats rxi;
+};
+
+/**
+ * struct iwx_rxba_data - BA session data
+ * @sta_id: station id
+ * @tid: tid of the session
+ * @baid: baid of the session
+ * @timeout: the timeout set in the addba request
+ * @entries_per_queue: # of buffers per queue
+ * @last_rx: last rx timestamp, updated only if timeout passed from last update
+ * @session_timer: timer to check if BA session expired, runs at 2 * timeout
+ * @sc: softc pointer, needed for timer context
+ * @reorder_buf: reorder buffer
+ * @reorder_buf_data: buffered frames, one entry per sequence number
+ */
+struct iwx_rxba_data {
+ uint8_t sta_id;
+ uint8_t tid;
+ uint8_t baid;
+ uint16_t timeout;
+ uint16_t entries_per_queue;
+ struct timeval last_rx;
+ struct callout session_timer;
+ struct iwx_softc *sc;
+ struct iwx_reorder_buffer reorder_buf;
+ struct iwx_reorder_buf_entry entries[IEEE80211_AGGR_BAWMAX];
+};
+
+static inline struct iwx_rxba_data *
+iwx_rxba_data_from_reorder_buf(struct iwx_reorder_buffer *buf)
+{
+ return (void *)((uint8_t *)buf -
+ offsetof(struct iwx_rxba_data, reorder_buf));
+}
+
+/**
+ * struct iwx_rxq_dup_data - per station per rx queue data
+ * @last_seq: last sequence per tid for duplicate packet detection
+ * @last_sub_frame: last subframe packet
+ */
+struct iwx_rxq_dup_data {
+ uint16_t last_seq[IWX_MAX_TID_COUNT + 1];
+ uint8_t last_sub_frame[IWX_MAX_TID_COUNT + 1];
+};
+
+struct iwx_setkey_task_arg {
+ int sta_id;
+ struct ieee80211_node *ni;
+ struct ieee80211_key *k;
+};
+
+struct iwx_ba_task_data {
+ uint32_t start_tidmask;
+ uint32_t stop_tidmask;
+};
+
+
+/*
+ * Device configuration parameters which cannot be detected based on
+ * PCI vendor/product ID alone.
+ */
+struct iwx_device_cfg {
+ const char *fw_name;
+ const char *pnvm_name;
+ int tx_with_siso_diversity;
+ int uhb_supported;
+ int xtal_latency;
+ int low_latency_xtal;
+};
+
+/* Firmware listed here must be available in fw_update(8). */
+#define IWX_CC_A_FW "iwlwifi-cc-a0-77.ucode"
+#define IWX_TY_A_GF_A_FW "iwlwifi-ty-a0-gf-a0-77.ucode"
+#define IWX_TY_A_GF_A_PNVM "iwlwifi-ty-a0-gf-a0.pnvm"
+#define IWX_QU_B_HR_B_FW "iwlwifi-Qu-b0-hr-b0-77.ucode"
+#define IWX_QU_B_JF_B_FW "iwlwifi-Qu-b0-jf-b0-77.ucode"
+#define IWX_QU_C_HR_B_FW "iwlwifi-Qu-c0-hr-b0-77.ucode"
+#define IWX_QU_C_JF_B_FW "iwlwifi-Qu-c0-jf-b0-77.ucode"
+#define IWX_QUZ_A_HR_B_FW "iwlwifi-QuZ-a0-hr-b0-77.ucode"
+#define IWX_QUZ_A_JF_B_FW "iwlwifi-QuZ-a0-jf-b0-77.ucode"
+#define IWX_SO_A_GF_A_FW "iwlwifi-so-a0-gf-a0-77.ucode"
+#define IWX_SO_A_GF_A_PNVM "iwlwifi-so-a0-gf-a0.pnvm"
+#define IWX_SO_A_GF4_A_FW "iwlwifi-so-a0-gf4-a0-77.ucode"
+#define IWX_SO_A_GF4_A_PNVM "iwlwifi-so-a0-gf4-a0.pnvm"
+#define IWX_SO_A_HR_B_FW "iwlwifi-so-a0-hr-b0-77.ucode"
+#define IWX_SO_A_JF_B_FW "iwlwifi-so-a0-jf-b0-77.ucode"
+
+const struct iwx_device_cfg iwx_9560_quz_a0_jf_b0_cfg = {
+ .fw_name = IWX_QUZ_A_JF_B_FW,
+};
+
+const struct iwx_device_cfg iwx_9560_qu_c0_jf_b0_cfg = {
+ .fw_name = IWX_QU_C_JF_B_FW,
+};
+
+const struct iwx_device_cfg iwx_qu_b0_hr1_b0 = {
+ .fw_name = IWX_QU_B_HR_B_FW,
+ .tx_with_siso_diversity = true,
+};
+
+const struct iwx_device_cfg iwx_qu_b0_hr_b0 = {
+ .fw_name = IWX_QU_B_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_ax201_cfg_qu_hr = {
+ .fw_name = IWX_QU_B_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_qu_c0_hr1_b0 = {
+ .fw_name = IWX_QU_C_HR_B_FW,
+ .tx_with_siso_diversity = true,
+};
+
+const struct iwx_device_cfg iwx_qu_c0_hr_b0 = {
+ .fw_name = IWX_QU_C_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_ax201_cfg_qu_c0_hr_b0 = {
+ .fw_name = IWX_QU_C_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_quz_a0_hr1_b0 = {
+ .fw_name = IWX_QUZ_A_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_ax201_cfg_quz_hr = {
+ .fw_name = IWX_QUZ_A_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_cfg_so_a0_hr_b0 = {
+ .fw_name = IWX_SO_A_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_cfg_quz_a0_hr_b0 = {
+ .fw_name = IWX_QUZ_A_HR_B_FW,
+};
+
+const struct iwx_device_cfg iwx_2ax_cfg_so_gf_a0 = {
+ .fw_name = IWX_SO_A_GF_A_FW,
+ .pnvm_name = IWX_SO_A_GF_A_PNVM,
+ .uhb_supported = 1,
+};
+
+const struct iwx_device_cfg iwx_2ax_cfg_so_gf_a0_long = {
+ .fw_name = IWX_SO_A_GF_A_FW,
+ .pnvm_name = IWX_SO_A_GF_A_PNVM,
+ .uhb_supported = 1,
+ .xtal_latency = 12000,
+ .low_latency_xtal = 1,
+};
+
+const struct iwx_device_cfg iwx_2ax_cfg_so_gf4_a0 = {
+ .fw_name = IWX_SO_A_GF4_A_FW,
+ .pnvm_name = IWX_SO_A_GF4_A_PNVM,
+ .uhb_supported = 1,
+ .xtal_latency = 12000,
+ .low_latency_xtal = 1,
+};
+
+const struct iwx_device_cfg iwx_2ax_cfg_so_gf4_a0_long = {
+ .fw_name = IWX_SO_A_GF4_A_FW,
+ .pnvm_name = IWX_SO_A_GF4_A_PNVM,
+ .uhb_supported = 1,
+};
+
+const struct iwx_device_cfg iwx_2ax_cfg_ty_gf_a0 = {
+ .fw_name = IWX_TY_A_GF_A_FW,
+ .pnvm_name = IWX_TY_A_GF_A_PNVM,
+};
+
+const struct iwx_device_cfg iwx_2ax_cfg_so_jf_b0 = {
+ .fw_name = IWX_SO_A_JF_B_FW,
+};
+
+#define IWX_CFG_ANY (~0)
+
+#define IWX_CFG_MAC_TYPE_QU 0x33
+#define IWX_CFG_MAC_TYPE_QUZ 0x35
+#define IWX_CFG_MAC_TYPE_QNJ 0x36
+#define IWX_CFG_MAC_TYPE_SO 0x37
+#define IWX_CFG_MAC_TYPE_SNJ 0x42
+#define IWX_CFG_MAC_TYPE_SOF 0x43
+#define IWX_CFG_MAC_TYPE_MA 0x44
+#define IWX_CFG_MAC_TYPE_BZ 0x46
+#define IWX_CFG_MAC_TYPE_GL 0x47
+
+#define IWX_CFG_RF_TYPE_JF2 0x105
+#define IWX_CFG_RF_TYPE_JF1 0x108
+#define IWX_CFG_RF_TYPE_HR2 0x10a
+#define IWX_CFG_RF_TYPE_HR1 0x10c
+#define IWX_CFG_RF_TYPE_GF 0x10d
+#define IWX_CFG_RF_TYPE_MR 0x110
+#define IWX_CFG_RF_TYPE_MS 0x111
+#define IWX_CFG_RF_TYPE_FM 0x112
+
+#define IWX_CFG_RF_ID_JF 0x3
+#define IWX_CFG_RF_ID_JF1 0x6
+#define IWX_CFG_RF_ID_JF1_DIV 0xa
+
+#define IWX_CFG_NO_160 0x1
+#define IWX_CFG_160 0x0
+
+#define IWX_CFG_CORES_BT 0x0
+
+#define IWX_CFG_NO_CDB 0x0
+#define IWX_CFG_CDB 0x1
+
+#define IWX_SUBDEVICE_RF_ID(subdevice) ((uint16_t)((subdevice) & 0x00f0) >> 4)
+#define IWX_SUBDEVICE_NO_160(subdevice) ((uint16_t)((subdevice) & 0x0200) >> 9)
+#define IWX_SUBDEVICE_CORES(subdevice) ((uint16_t)((subdevice) & 0x1c00) >> 10)
+
+struct iwx_rx_ba {
+ int ba_timeout_val;
+ u_int16_t ba_params;
+ u_int16_t ba_winstart;
+ u_int16_t ba_winend;
+ u_int16_t ba_winsize;
+#define IWX_BA_DONE 1
+ int ba_flags;
+};
+
+struct iwx_softc {
+ device_t sc_dev;
+ struct ieee80211com sc_ic;
+ int (*sc_newstate)(struct ieee80211com *, enum ieee80211_state, int);
+ int sc_newstate_pending;
+ int attached;
+
+ struct task init_task; /* NB: not reference-counted */
+ struct task newstate_task;
+ enum ieee80211_state ns_nstate;
+ int ns_arg;
+
+ /* Task for firmware BlockAck setup/teardown and its arguments. */
+ struct task ba_rx_task;
+ struct task ba_tx_task;
+ struct iwx_ba_task_data ba_rx;
+ struct iwx_ba_task_data ba_tx;
+
+ /* Task for setting encryption keys and its arguments. */
+ struct task setkey_task;
+ /*
+ * At present we need to process at most two keys at once:
+ * Our pairwise key and a group key.
+ * When hostap mode is implemented this array needs to grow or
+ * it might become a bottleneck for associations that occur at
+ * roughly the same time.
+ */
+ struct iwx_setkey_task_arg setkey_arg[2];
+ int setkey_cur;
+ int setkey_tail;
+ int setkey_nkeys;
+
+ /* Task for ERP/HT prot/slot-time/EDCA updates. */
+ struct task mac_ctxt_task;
+
+ /* Task for HT 20/40 MHz channel width updates. */
+ struct task phy_ctxt_task;
+
+ bus_space_tag_t sc_st;
+ bus_space_handle_t sc_sh;
+ bus_size_t sc_sz;
+ bus_dma_tag_t sc_dmat;
+
+ u_int16_t sc_pid;
+ void *sc_pct;
+ u_int32_t sc_pcitag;
+
+ void *sc_ih;
+ int sc_msix;
+
+ /* TX/RX rings. */
+ struct iwx_tx_ring txq[IWX_NUM_TX_QUEUES];
+ struct iwx_rx_ring rxq;
+ int qfullmsk;
+ int qenablemsk;
+ int first_data_qid;
+ int aggqid[WME_NUM_TID];
+ int max_tfd_queue_size;
+
+ int sc_sf_state;
+
+ /* ICT table. */
+ struct iwx_dma_info ict_dma;
+ int ict_cur;
+
+ int sc_hw_rev;
+#define IWX_SILICON_A_STEP 0
+#define IWX_SILICON_B_STEP 1
+#define IWX_SILICON_C_STEP 2
+#define IWX_SILICON_Z_STEP 0xf
+ int sc_hw_id;
+ int sc_hw_rf_id;
+ int sc_device_family;
+#define IWX_DEVICE_FAMILY_22000 1
+#define IWX_DEVICE_FAMILY_AX210 2
+ uint32_t sc_sku_id[3];
+ uint32_t mac_addr_from_csr;
+
+ struct iwx_dma_info ctxt_info_dma;
+ struct iwx_self_init_dram init_dram;
+ struct iwx_dma_info prph_scratch_dma;
+ struct iwx_dma_info prph_info_dma;
+ struct iwx_dma_info iml_dma;
+ struct iwx_dma_info pnvm_dma;
+ uint32_t sc_pnvm_ver;
+
+ int sc_fw_chunk_done;
+ int sc_init_complete;
+#define IWX_INIT_COMPLETE 0x01
+#define IWX_CALIB_COMPLETE 0x02
+#define IWX_PNVM_COMPLETE 0x04
+
+ struct iwx_ucode_status sc_uc;
+ char sc_fwver[32];
+
+ int sc_capaflags;
+ int sc_capa_max_probe_len;
+ int sc_capa_n_scan_channels;
+ uint8_t sc_ucode_api[howmany(IWX_NUM_UCODE_TLV_API, NBBY)];
+ uint8_t sc_enabled_capa[howmany(IWX_NUM_UCODE_TLV_CAPA, NBBY)];
+#define IWX_MAX_FW_CMD_VERSIONS 704
+ struct iwx_fw_cmd_version cmd_versions[IWX_MAX_FW_CMD_VERSIONS];
+ int n_cmd_versions;
+ int sc_rate_n_flags_version;
+
+ int sc_intmask;
+ int sc_flags;
+
+ uint32_t sc_fh_init_mask;
+ uint32_t sc_hw_init_mask;
+ uint32_t sc_fh_mask;
+ uint32_t sc_hw_mask;
+
+ int sc_generation;
+
+ struct rwlock ioctl_rwl;
+
+ int sc_cap_off; /* PCIe caps */
+
+ const char *sc_fwname;
+ struct iwx_fw_info sc_fw;
+ struct iwx_dma_info fw_mon;
+ int sc_fw_phy_config;
+ struct iwx_tlv_calib_ctrl sc_default_calib[IWX_UCODE_TYPE_MAX];
+
+ struct iwx_nvm_data sc_nvm;
+ struct iwx_bf_data sc_bf;
+ const char *sc_pnvm_name;
+
+ int sc_tx_timer[IWX_NUM_TX_QUEUES];
+ int sc_rx_ba_sessions;
+
+ struct task bgscan_done_task;
+ struct ieee80211_node_switch_bss_arg *bgscan_unref_arg;
+ size_t bgscan_unref_arg_size;
+
+ int sc_scan_last_antenna;
+
+ int sc_staid;
+ int sc_nodecolor;
+
+ uint8_t *sc_cmd_resp_pkt[IWX_TX_RING_COUNT];
+ size_t sc_cmd_resp_len[IWX_TX_RING_COUNT];
+ int sc_nic_locks;
+
+ struct taskq *sc_nswq;
+
+ struct iwx_rx_phy_info sc_last_phy_info;
+ int sc_ampdu_ref;
+ struct iwx_rxba_data sc_rxba_data[IWX_MAX_BAID];
+
+ uint32_t sc_time_event_uid;
+
+ /* phy contexts. we only use the first one */
+ struct iwx_phy_ctxt sc_phyctxt[IWX_NUM_PHY_CTX];
+
+ struct iwx_notif_statistics sc_stats;
+ int sc_noise;
+
+ int sc_pm_support;
+ int sc_ltr_enabled;
+
+ int sc_integrated;
+ int sc_tx_with_siso_diversity;
+ int sc_max_tfd_queue_size;
+ int sc_ltr_delay;
+ int sc_xtal_latency;
+ int sc_low_latency_xtal;
+ int sc_uhb_supported;
+ int sc_umac_prph_offset;
+ int sc_imr_enabled;
+
+ caddr_t sc_drvbpf;
+
+ union {
+ struct iwx_rx_radiotap_header th;
+ uint8_t pad[IEEE80211_RADIOTAP_HDRLEN];
+ } sc_rxtapu;
+#define sc_rxtap sc_rxtapu.th
+ int sc_rxtap_len;
+
+ union {
+ struct iwx_tx_radiotap_header th;
+ uint8_t pad[IEEE80211_RADIOTAP_HDRLEN];
+ } sc_txtapu;
+#define sc_txtap sc_txtapu.th
+ int sc_txtap_len;
+
+ /* XXX: FreeBSD specific */
+ struct mtx sc_mtx;
+ struct resource *sc_mem;
+ struct resource *sc_irq;
+ struct intr_config_hook sc_preinit_hook;
+ struct task sc_es_task;
+ struct mbufq sc_snd;
+ struct iwx_rx_ba ni_rx_ba[WME_NUM_TID];
+ struct taskqueue *sc_tq;
+ int (*sc_ampdu_rx_start)(struct ieee80211_node *,
+ struct ieee80211_rx_ampdu *, int, int, int);
+ void (*sc_ampdu_rx_stop)(struct ieee80211_node *,
+ struct ieee80211_rx_ampdu *);
+ int (*sc_addba_request)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int, int, int);
+ int (*sc_addba_response)(struct ieee80211_node *,
+ struct ieee80211_tx_ampdu *, int, int, int);
+ struct callout watchdog_to;
+ const struct firmware *sc_fwp;
+ const struct firmware *sc_pnvm;
+
+ struct iwx_scan_req_umac_v14 sc_umac_v14_cmd;
+
+ /* This is needed to support older firmware versions */
+ int sc_rsp_vers;
+ union {
+ struct iwx_nvm_get_info_rsp rsp_v4;
+ struct iwx_nvm_get_info_rsp_v3 rsp_v3;
+
+ } sc_rsp_info;
+ uint32_t sc_debug;
+
+ /* XXX-TODO addba_stop? */
+};
+
+#define IWX_LOCK_INIT(_sc) \
+ mtx_init(&(_sc)->sc_mtx, device_get_nameunit((_sc)->sc_dev), \
+ MTX_NETWORK_LOCK, MTX_DEF);
+#define IWX_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
+#define IWX_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
+#define IWX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->sc_mtx)
+#define IWX_ASSERT_LOCKED(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED);
+
+struct iwx_vap {
+ struct ieee80211vap iv_vap;
+ int is_uploaded;
+ int iv_auth;
+
+ int (*iv_newstate)(struct ieee80211vap *,
+ enum ieee80211_state, int);
+
+ struct iwx_phy_ctxt *phy_ctxt;
+
+ uint16_t id;
+ uint16_t color;
+
+ boolean_t have_wme;
+ /*
+ * QoS data from net80211, need to store this here
+ * as net80211 has a separate callback but we need
+ * to have the data for the MAC context
+ */
+ struct {
+ uint16_t cw_min;
+ uint16_t cw_max;
+ uint16_t edca_txop;
+ uint8_t aifsn;
+ } queue_params[WME_NUM_AC];
+
+ /* indicates that this interface requires PS to be disabled */
+ boolean_t ps_disabled;
+};
+#define IWX_VAP(_vap) ((struct iwx_vap *)(_vap))
+
+struct iwx_node {
+ struct ieee80211_node in_ni;
+ struct iwx_phy_ctxt *in_phyctxt;
+ uint8_t in_macaddr[ETHER_ADDR_LEN];
+
+ uint16_t in_id;
+ uint16_t in_color;
+
+ struct iwx_rxq_dup_data dup_data;
+
+ int in_flags;
+#define IWX_NODE_FLAG_HAVE_PAIRWISE_KEY 0x01
+#define IWX_NODE_FLAG_HAVE_GROUP_KEY 0x02
+};
+
+#define IWX_NODE(_ni) ((struct iwx_node *)(_ni))
+
+#define IWX_STATION_ID 0
+#define IWX_AUX_STA_ID 1
+
+#define IWX_DEFAULT_MACID 0
+#define IWX_DEFAULT_COLOR 0
+#define IWX_DEFAULT_TSFID 0
+
+#define IWX_STATION_ID 0
+#define IWX_AUX_STA_ID 1
+#define IWX_MONITOR_STA_ID 2
+
+#define IWX_ICT_SIZE 4096
+#define IWX_ICT_COUNT (IWX_ICT_SIZE / sizeof (uint32_t))
+#define IWX_ICT_PADDR_SHIFT 12
diff --git a/sys/dev/ixgbe/if_bypass.c b/sys/dev/ixgbe/if_bypass.c
index e9ea77dfb49c..138b4e17db0d 100644
--- a/sys/dev/ixgbe/if_bypass.c
+++ b/sys/dev/ixgbe/if_bypass.c
@@ -1,4 +1,4 @@
-/******************************************************************************
+/*****************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
@@ -29,7 +29,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-******************************************************************************/
+*****************************************************************************/
#include "ixgbe.h"
@@ -114,11 +114,11 @@ ixgbe_get_bypass_time(u32 *year, u32 *sec)
static int
ixgbe_bp_version(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
- static int version = 0;
- u32 cmd;
+ int error = 0;
+ static int version = 0;
+ u32 cmd;
ixgbe_bypass_mutex_enter(sc);
cmd = BYPASS_PAGE_CTL2 | BYPASS_WE;
@@ -154,15 +154,14 @@ err:
static int
ixgbe_bp_set_state(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
- static int state = 0;
+ int error = 0;
+ static int state = 0;
/* Get the current state */
ixgbe_bypass_mutex_enter(sc);
- error = hw->mac.ops.bypass_rw(hw,
- BYPASS_PAGE_CTL0, &state);
+ error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &state);
ixgbe_bypass_mutex_clear(sc);
if (error != 0)
return (error);
@@ -216,10 +215,10 @@ out:
static int
ixgbe_bp_timeout(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
- static int timeout = 0;
+ int error = 0;
+ static int timeout = 0;
/* Get the current value */
ixgbe_bypass_mutex_enter(sc);
@@ -259,10 +258,10 @@ ixgbe_bp_timeout(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_main_on(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
- static int main_on = 0;
+ int error = 0;
+ static int main_on = 0;
ixgbe_bypass_mutex_enter(sc);
error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &main_on);
@@ -301,10 +300,10 @@ ixgbe_bp_main_on(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_main_off(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
- static int main_off = 0;
+ int error = 0;
+ static int main_off = 0;
ixgbe_bypass_mutex_enter(sc);
error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &main_off);
@@ -343,10 +342,10 @@ ixgbe_bp_main_off(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_aux_on(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
- static int aux_on = 0;
+ int error = 0;
+ static int aux_on = 0;
ixgbe_bypass_mutex_enter(sc);
error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &aux_on);
@@ -385,10 +384,10 @@ ixgbe_bp_aux_on(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_aux_off(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
- static int aux_off = 0;
+ int error = 0;
+ static int aux_off = 0;
ixgbe_bypass_mutex_enter(sc);
error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL0, &aux_off);
@@ -432,11 +431,11 @@ ixgbe_bp_aux_off(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_wd_set(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- int error, tmp;
- static int timeout = 0;
- u32 mask, arg;
+ int error, tmp;
+ static int timeout = 0;
+ u32 mask, arg;
/* Get the current hardware value */
ixgbe_bypass_mutex_enter(sc);
@@ -503,11 +502,11 @@ ixgbe_bp_wd_set(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_wd_reset(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
struct ixgbe_hw *hw = &sc->hw;
- u32 sec, year;
- int cmd, count = 0, error = 0;
- int reset_wd = 0;
+ u32 sec, year;
+ int cmd, count = 0, error = 0;
+ int reset_wd = 0;
error = sysctl_handle_int(oidp, &reset_wd, 0, req);
if ((error) || (req->newptr == NULL))
@@ -530,8 +529,7 @@ ixgbe_bp_wd_reset(SYSCTL_HANDLER_ARGS)
error = IXGBE_BYPASS_FW_WRITE_FAILURE;
break;
}
- error = hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &reset_wd);
- if (error != 0) {
+ if (hw->mac.ops.bypass_rw(hw, BYPASS_PAGE_CTL1, &reset_wd)) {
error = IXGBE_ERR_INVALID_ARGUMENT;
break;
}
@@ -550,14 +548,14 @@ ixgbe_bp_wd_reset(SYSCTL_HANDLER_ARGS)
static int
ixgbe_bp_log(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
- struct ixgbe_hw *hw = &sc->hw;
- u32 cmd, base, head;
- u32 log_off, count = 0;
- static int status = 0;
- u8 data;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *) arg1;
+ struct ixgbe_hw *hw = &sc->hw;
+ u32 cmd, base, head;
+ u32 log_off, count = 0;
+ static int status = 0;
+ u8 data;
struct ixgbe_bypass_eeprom eeprom[BYPASS_MAX_LOGS];
- int i, error = 0;
+ int i, error = 0;
error = sysctl_handle_int(oidp, &status, 0, req);
if ((error) || (req->newptr == NULL))
@@ -640,12 +638,15 @@ ixgbe_bp_log(SYSCTL_HANDLER_ARGS)
BYPASS_LOG_EVENT_SHIFT;
u8 action = eeprom[count].actions & BYPASS_LOG_ACTION_M;
u16 day_mon[2][13] = {
- {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365},
- {0, 31, 59, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366}
+ {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304,
+ 334, 365},
+ {0, 31, 59, 91, 121, 152, 182, 213, 244, 274, 305,
+ 335, 366}
};
char *event_str[] = {"unknown", "main on", "aux on",
"main off", "aux off", "WDT", "user" };
- char *action_str[] = {"ignore", "normal", "bypass", "isolate",};
+ char *action_str[] =
+ {"ignore", "normal", "bypass", "isolate",};
/* verify vaild data 1 - 6 */
if (event < BYPASS_EVENT_MAIN_ON || event > BYPASS_EVENT_USR)
@@ -712,11 +713,11 @@ unlock_err:
void
ixgbe_bypass_init(struct ixgbe_softc *sc)
{
- struct ixgbe_hw *hw = &sc->hw;
- device_t dev = sc->dev;
- struct sysctl_oid *bp_node;
+ struct ixgbe_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+ struct sysctl_oid *bp_node;
struct sysctl_oid_list *bp_list;
- u32 mask, value, sec, year;
+ u32 mask, value, sec, year;
if (!(sc->feat_cap & IXGBE_FEATURE_BYPASS))
return;
@@ -724,13 +725,13 @@ ixgbe_bypass_init(struct ixgbe_softc *sc)
/* First set up time for the hardware */
ixgbe_get_bypass_time(&year, &sec);
- mask = BYPASS_CTL1_TIME_M
- | BYPASS_CTL1_VALID_M
- | BYPASS_CTL1_OFFTRST_M;
+ mask = BYPASS_CTL1_TIME_M |
+ BYPASS_CTL1_VALID_M |
+ BYPASS_CTL1_OFFTRST_M;
- value = (sec & BYPASS_CTL1_TIME_M)
- | BYPASS_CTL1_VALID
- | BYPASS_CTL1_OFFTRST;
+ value = (sec & BYPASS_CTL1_TIME_M) |
+ BYPASS_CTL1_VALID |
+ BYPASS_CTL1_OFFTRST;
ixgbe_bypass_mutex_enter(sc);
hw->mac.ops.bypass_set(hw, BYPASS_PAGE_CTL1, mask, value);
@@ -745,8 +746,7 @@ ixgbe_bypass_init(struct ixgbe_softc *sc)
*/
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "bypass_log",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "bypass_log", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
sc, 0, ixgbe_bp_log, "I", "Bypass Log");
/* All other setting are hung from the 'bypass' node */
@@ -757,39 +757,39 @@ ixgbe_bypass_init(struct ixgbe_softc *sc)
bp_list = SYSCTL_CHILDREN(bp_node);
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "version", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "version", CTLTYPE_INT | CTLFLAG_RD,
sc, 0, ixgbe_bp_version, "I", "Bypass Version");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "state", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "state", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_bp_set_state, "I", "Bypass State");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "timeout", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "timeout", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_bp_timeout, "I", "Bypass Timeout");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "main_on", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "main_on", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_bp_main_on, "I", "Bypass Main On");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "main_off", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "main_off", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_bp_main_off, "I", "Bypass Main Off");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "aux_on", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "aux_on", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_bp_aux_on, "I", "Bypass Aux On");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "aux_off", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "aux_off", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_bp_aux_off, "I", "Bypass Aux Off");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "wd_set", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "wd_set", CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_bp_wd_set, "I", "Set BP Watchdog");
SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), bp_list,
- OID_AUTO, "wd_reset", CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
+ OID_AUTO, "wd_reset", CTLTYPE_INT | CTLFLAG_WR,
sc, 0, ixgbe_bp_wd_reset, "S", "Bypass WD Reset");
sc->feat_en |= IXGBE_FEATURE_BYPASS;
diff --git a/sys/dev/ixgbe/if_fdir.c b/sys/dev/ixgbe/if_fdir.c
index 6c52cc452987..37f45cb3808f 100644
--- a/sys/dev/ixgbe/if_fdir.c
+++ b/sys/dev/ixgbe/if_fdir.c
@@ -1,4 +1,4 @@
-/******************************************************************************
+/*****************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
@@ -29,7 +29,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-******************************************************************************/
+*****************************************************************************/
#include "ixgbe.h"
@@ -51,9 +51,9 @@ ixgbe_init_fdir(struct ixgbe_softc *sc)
void
ixgbe_reinit_fdir(void *context)
{
- if_ctx_t ctx = context;
+ if_ctx_t ctx = context;
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_t ifp = iflib_get_ifp(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
if (!(sc->feat_en & IXGBE_FEATURE_FDIR))
return;
@@ -79,16 +79,16 @@ ixgbe_reinit_fdir(void *context)
void
ixgbe_atr(struct tx_ring *txr, struct mbuf *mp)
{
- struct ixgbe_softc *sc = txr->sc;
- struct ix_queue *que;
- struct ip *ip;
- struct tcphdr *th;
- struct udphdr *uh;
- struct ether_vlan_header *eh;
+ struct ixgbe_softc *sc = txr->sc;
+ struct ix_queue *que;
+ struct ip *ip;
+ struct tcphdr *th;
+ struct udphdr *uh;
+ struct ether_vlan_header *eh;
union ixgbe_atr_hash_dword input = {.dword = 0};
union ixgbe_atr_hash_dword common = {.dword = 0};
- int ehdrlen, ip_hlen;
- u16 etype;
+ int ehdrlen, ip_hlen;
+ u16 etype;
eh = mtod(mp, struct ether_vlan_header *);
if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c
index 17f1f73a526e..1d36fd11f368 100644
--- a/sys/dev/ixgbe/if_ix.c
+++ b/sys/dev/ixgbe/if_ix.c
@@ -1,4 +1,4 @@
-/******************************************************************************
+/*****************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
@@ -29,7 +29,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-******************************************************************************/
+*****************************************************************************/
#include "opt_inet.h"
#include "opt_inet6.h"
@@ -45,7 +45,7 @@
/************************************************************************
* Driver version
************************************************************************/
-static const char ixgbe_driver_version[] = "4.0.1-k";
+static const char ixgbe_driver_version[] = "5.0.1-k";
/************************************************************************
* PCI Device ID Table
@@ -58,53 +58,104 @@ static const char ixgbe_driver_version[] = "4.0.1-k";
************************************************************************/
static const pci_vendor_info_t ixgbe_vendor_info_array[] =
{
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT, "Intel(R) 82598EB AF (Dual Fiber)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT, "Intel(R) 82598EB AF (Fiber)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4, "Intel(R) 82598EB AT (CX4)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT, "Intel(R) 82598EB AT"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2, "Intel(R) 82598EB AT2"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT, "Intel(R) 82598EB AF DA (Dual Fiber)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT, "Intel(R) 82598EB AT (Dual CX4)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR, "Intel(R) 82598EB AF (Dual Fiber LR)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM, "Intel(R) 82598EB AF (Dual Fiber SR)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM, "Intel(R) 82598EB LOM"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4, "Intel(R) X520 82599 (KX4)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ, "Intel(R) X520 82599 (KX4 Mezzanine)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP, "Intel(R) X520 82599ES (SFI/SFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM, "Intel(R) X520 82599 (XAUI/BX4)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4, "Intel(R) X520 82599 (Dual CX4)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM, "Intel(R) X520-T 82599 LOM"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS, "Intel(R) X520 82599 LS"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE, "Intel(R) X520 82599 (Combined Backplane)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE, "Intel(R) X520 82599 (Backplane w/FCoE)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2, "Intel(R) X520 82599 (Dual SFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE, "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP, "Intel(R) X520-1 82599EN (SFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP, "Intel(R) X520-4 82599 (Quad SFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP, "Intel(R) X520-Q1 82599 (QSFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T, "Intel(R) X540-AT2"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR, "Intel(R) X552 (KR Backplane)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4, "Intel(R) X552 (KX4 Backplane)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T, "Intel(R) X552/X557-AT (10GBASE-T)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T, "Intel(R) X552 (1000BASE-T)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP, "Intel(R) X552 (SFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR, "Intel(R) X553 (KR Backplane)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L, "Intel(R) X553 L (KR Backplane)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP, "Intel(R) X553 (SFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N, "Intel(R) X553 N (SFP+)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII, "Intel(R) X553 (1GbE SGMII)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L, "Intel(R) X553 L (1GbE SGMII)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T, "Intel(R) X553/X557-AT (10GBASE-T)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T, "Intel(R) X553 (1GbE)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L, "Intel(R) X553 L (1GbE)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS, "Intel(R) X540-T2 (Bypass)"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS, "Intel(R) X520 82599 (Bypass)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT,
+ "Intel(R) 82598EB AF (Dual Fiber)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT,
+ "Intel(R) 82598EB AF (Fiber)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4,
+ "Intel(R) 82598EB AT (CX4)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT,
+ "Intel(R) 82598EB AT"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2,
+ "Intel(R) 82598EB AT2"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598, "Intel(R) 82598"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT,
+ "Intel(R) 82598EB AF DA (Dual Fiber)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT,
+ "Intel(R) 82598EB AT (Dual CX4)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR,
+ "Intel(R) 82598EB AF (Dual Fiber LR)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM,
+ "Intel(R) 82598EB AF (Dual Fiber SR)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM,
+ "Intel(R) 82598EB LOM"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4,
+ "Intel(R) X520 82599 (KX4)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ,
+ "Intel(R) X520 82599 (KX4 Mezzanine)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP,
+ "Intel(R) X520 82599ES (SFI/SFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM,
+ "Intel(R) X520 82599 (XAUI/BX4)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4,
+ "Intel(R) X520 82599 (Dual CX4)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM,
+ "Intel(R) X520-T 82599 LOM"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS,
+ "Intel(R) X520 82599 LS"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE,
+ "Intel(R) X520 82599 (Combined Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE,
+ "Intel(R) X520 82599 (Backplane w/FCoE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2,
+ "Intel(R) X520 82599 (Dual SFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE,
+ "Intel(R) X520 82599 (Dual SFP+ w/FCoE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP,
+ "Intel(R) X520-1 82599EN (SFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP,
+ "Intel(R) X520-4 82599 (Quad SFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP,
+ "Intel(R) X520-Q1 82599 (QSFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T,
+ "Intel(R) X540-AT2"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1, "Intel(R) X540-T1"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T, "Intel(R) X550-T2"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1, "Intel(R) X550-T1"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR,
+ "Intel(R) X552 (KR Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4,
+ "Intel(R) X552 (KX4 Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T,
+ "Intel(R) X552/X557-AT (10GBASE-T)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T,
+ "Intel(R) X552 (1000BASE-T)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP,
+ "Intel(R) X552 (SFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR,
+ "Intel(R) X553 (KR Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L,
+ "Intel(R) X553 L (KR Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP,
+ "Intel(R) X553 (SFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N,
+ "Intel(R) X553 N (SFP+)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII,
+ "Intel(R) X553 (1GbE SGMII)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L,
+ "Intel(R) X553 L (1GbE SGMII)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T,
+ "Intel(R) X553/X557-AT (10GBASE-T)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T,
+ "Intel(R) X553 (1GbE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L,
+ "Intel(R) X553 L (1GbE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_BYPASS,
+ "Intel(R) X540-T2 (Bypass)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS,
+ "Intel(R) X520 82599 (Bypass)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_BACKPLANE,
+ "Intel(R) E610 (Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SFP,
+ "Intel(R) E610 (SFP)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_2_5G_T,
+ "Intel(R) E610 (2.5 GbE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_10G_T,
+ "Intel(R) E610 (10 GbE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SGMII,
+ "Intel(R) E610 (SGMII)"),
/* required last entry */
- PVID_END
+ PVID_END
};
static void *ixgbe_register(device_t);
@@ -127,10 +178,13 @@ static int ixgbe_if_mtu_set(if_ctx_t, uint32_t);
static void ixgbe_if_crcstrip_set(if_ctx_t, int, int);
static void ixgbe_if_multi_set(if_ctx_t);
static int ixgbe_if_promisc_set(if_ctx_t, int);
-static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
-static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
+static int ixgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
+ int);
+static int ixgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
+ int);
static void ixgbe_if_queues_free(if_ctx_t);
static void ixgbe_if_timer(if_ctx_t, uint16_t);
+static const char *ixgbe_link_speed_to_str(u32 link_speed);
static void ixgbe_if_update_admin_status(if_ctx_t);
static void ixgbe_if_vlan_register(if_ctx_t, u16);
static void ixgbe_if_vlan_unregister(if_ctx_t, u16);
@@ -138,6 +192,8 @@ static int ixgbe_if_i2c_req(if_ctx_t, struct ifi2creq *);
static bool ixgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event);
int ixgbe_intr(void *);
+static int ixgbe_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data);
+
/************************************************************************
* Function prototypes
************************************************************************/
@@ -172,6 +228,7 @@ static void ixgbe_add_media_types(if_ctx_t);
static void ixgbe_update_stats_counters(struct ixgbe_softc *);
static void ixgbe_config_link(if_ctx_t);
static void ixgbe_get_slot_info(struct ixgbe_softc *);
+static void ixgbe_fw_mode_timer(void *);
static void ixgbe_check_wol_support(struct ixgbe_softc *);
static void ixgbe_enable_rx_drop(struct ixgbe_softc *);
static void ixgbe_disable_rx_drop(struct ixgbe_softc *);
@@ -184,6 +241,13 @@ static void ixgbe_setup_vlan_hw_support(if_ctx_t);
static void ixgbe_config_gpie(struct ixgbe_softc *);
static void ixgbe_config_delay_values(struct ixgbe_softc *);
+static void ixgbe_add_debug_sysctls(struct ixgbe_softc *sc);
+static void ixgbe_add_debug_dump_sysctls(struct ixgbe_softc *sc);
+static int ixgbe_debug_dump_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd);
+static u8 ixgbe_debug_dump_print_cluster(struct ixgbe_softc *sc,
+ struct sbuf *sbuf, u8 cluster_id);
+static int ixgbe_nvm_access_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd);
+
/* Sysctl handlers */
static int ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS);
@@ -203,11 +267,19 @@ static int ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS);
static int ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
+
+static int ixgbe_sysctl_debug_dump_set_clusters(SYSCTL_HANDLER_ARGS);
+static int ixgbe_sysctl_dump_debug_dump(SYSCTL_HANDLER_ARGS);
/* Deferred interrupt tasklets */
static void ixgbe_handle_msf(void *);
static void ixgbe_handle_mod(void *);
static void ixgbe_handle_phy(void *);
+static void ixgbe_handle_fw_event(void *);
+
+static int ixgbe_enable_lse(struct ixgbe_softc *sc);
+static int ixgbe_disable_lse(struct ixgbe_softc *sc);
/************************************************************************
* FreeBSD Device Interface Entry Points
@@ -270,6 +342,7 @@ static device_method_t ixgbe_if_methods[] = {
DEVMETHOD(ifdi_get_counter, ixgbe_if_get_counter),
DEVMETHOD(ifdi_i2c_req, ixgbe_if_i2c_req),
DEVMETHOD(ifdi_needs_restart, ixgbe_if_needs_restart),
+ DEVMETHOD(ifdi_priv_ioctl, ixgbe_if_priv_ioctl),
#ifdef PCI_IOV
DEVMETHOD(ifdi_iov_init, ixgbe_if_iov_init),
DEVMETHOD(ifdi_iov_uninit, ixgbe_if_iov_uninit),
@@ -316,7 +389,8 @@ static int ixgbe_smart_speed = ixgbe_smart_speed_on;
* but this allows it to be forced off for testing.
*/
static int ixgbe_enable_msix = 1;
-SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix, 0,
+SYSCTL_INT(_hw_ix, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixgbe_enable_msix,
+ 0,
"Enable MSI-X interrupts");
/*
@@ -334,12 +408,14 @@ SYSCTL_INT(_hw_ix, OID_AUTO, unsupported_sfp, CTLFLAG_RDTUN,
* so we'll default to turning it off.
*/
static int ixgbe_enable_fdir = 0;
-SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir, 0,
+SYSCTL_INT(_hw_ix, OID_AUTO, enable_fdir, CTLFLAG_RDTUN, &ixgbe_enable_fdir,
+ 0,
"Enable Flow Director");
/* Receive-Side Scaling */
static int ixgbe_enable_rss = 1;
-SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
+SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss,
+ 0,
"Enable Receive-Side Scaling (RSS)");
/*
@@ -349,7 +425,8 @@ SYSCTL_INT(_hw_ix, OID_AUTO, enable_rss, CTLFLAG_RDTUN, &ixgbe_enable_rss, 0,
* traffic for that interrupt vector
*/
static int ixgbe_enable_aim = false;
-SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim, 0,
+SYSCTL_INT(_hw_ix, OID_AUTO, enable_aim, CTLFLAG_RWTUN, &ixgbe_enable_aim,
+ 0,
"Enable adaptive interrupt moderation");
#if 0
@@ -405,9 +482,9 @@ ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
int ntxqs, int ntxqsets)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_softc_ctx_t scctx = sc->shared;
+ if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que;
- int i, j, error;
+ int i, j, error;
MPASS(sc->num_tx_queues > 0);
MPASS(sc->num_tx_queues == ntxqsets);
@@ -415,8 +492,8 @@ ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
/* Allocate queue structure memory */
sc->tx_queues =
- (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
- M_IXGBE, M_NOWAIT | M_ZERO);
+ (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) *
+ ntxqsets, M_IXGBE, M_NOWAIT | M_ZERO);
if (!sc->tx_queues) {
device_printf(iflib_get_dev(ctx),
"Unable to allocate TX ring memory\n");
@@ -427,20 +504,20 @@ ixgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
struct tx_ring *txr = &que->txr;
/* In case SR-IOV is enabled, align the index properly */
- txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
- i);
+ txr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
txr->sc = que->sc = sc;
/* Allocate report status array */
- txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
+ txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) *
+ scctx->isc_ntxd[0], M_IXGBE, M_NOWAIT | M_ZERO);
if (txr->tx_rsq == NULL) {
error = ENOMEM;
goto fail;
}
for (j = 0; j < scctx->isc_ntxd[0]; j++)
txr->tx_rsq[j] = QIDX_INVALID;
- /* get the virtual and physical address of the hardware queues */
+ /* get virtual and physical address of the hardware queues */
txr->tail = IXGBE_TDT(txr->me);
txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i];
txr->tx_paddr = paddrs[i];
@@ -472,9 +549,9 @@ static int
ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
int nrxqs, int nrxqsets)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *que;
- int i;
+ int i;
MPASS(sc->num_rx_queues > 0);
MPASS(sc->num_rx_queues == nrxqsets);
@@ -483,7 +560,7 @@ ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
/* Allocate queue structure memory */
sc->rx_queues =
(struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue)*nrxqsets,
- M_IXGBE, M_NOWAIT | M_ZERO);
+ M_IXGBE, M_NOWAIT | M_ZERO);
if (!sc->rx_queues) {
device_printf(iflib_get_dev(ctx),
"Unable to allocate TX ring memory\n");
@@ -494,8 +571,7 @@ ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
struct rx_ring *rxr = &que->rxr;
/* In case SR-IOV is enabled, align the index properly */
- rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool,
- i);
+ rxr->me = ixgbe_vf_que_index(sc->iov_mode, sc->pool, i);
rxr->sc = que->sc = sc;
@@ -519,10 +595,10 @@ ixgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
static void
ixgbe_if_queues_free(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_tx_queue *tx_que = sc->tx_queues;
struct ix_rx_queue *rx_que = sc->rx_queues;
- int i;
+ int i;
if (tx_que != NULL) {
for (i = 0; i < sc->num_tx_queues; i++, tx_que++) {
@@ -550,10 +626,10 @@ static void
ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- u32 reta = 0, mrqc, rss_key[10];
- int queue_id, table_size, index_mult;
- int i, j;
- u32 rss_hash_config;
+ u32 reta = 0, mrqc, rss_key[10];
+ int queue_id, table_size, index_mult;
+ int i, j;
+ u32 rss_hash_config;
if (sc->feat_en & IXGBE_FEATURE_RSS) {
/* Fetch the configured RSS key */
@@ -573,6 +649,7 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
table_size = 512;
break;
default:
@@ -605,8 +682,8 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
if (i < 128)
IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
else
- IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32),
- reta);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_ERETA((i >> 2) - 32), reta);
reta = 0;
}
}
@@ -624,12 +701,12 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
* and so we end up with a mix of 2-tuple and 4-tuple
* traffic.
*/
- rss_hash_config = RSS_HASHTYPE_RSS_IPV4
- | RSS_HASHTYPE_RSS_TCP_IPV4
- | RSS_HASHTYPE_RSS_IPV6
- | RSS_HASHTYPE_RSS_TCP_IPV6
- | RSS_HASHTYPE_RSS_IPV6_EX
- | RSS_HASHTYPE_RSS_TCP_IPV6_EX;
+ rss_hash_config = RSS_HASHTYPE_RSS_IPV4 |
+ RSS_HASHTYPE_RSS_TCP_IPV4 |
+ RSS_HASHTYPE_RSS_IPV6 |
+ RSS_HASHTYPE_RSS_TCP_IPV6 |
+ RSS_HASHTYPE_RSS_IPV6_EX |
+ RSS_HASHTYPE_RSS_TCP_IPV6_EX;
}
mrqc = IXGBE_MRQC_RSSEN;
@@ -663,14 +740,14 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
static void
ixgbe_initialize_receive_units(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_softc_ctx_t scctx = sc->shared;
- struct ixgbe_hw *hw = &sc->hw;
- if_t ifp = iflib_get_ifp(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_softc_ctx_t scctx = sc->shared;
+ struct ixgbe_hw *hw = &sc->hw;
+ if_t ifp = iflib_get_ifp(ctx);
struct ix_rx_queue *que;
- int i, j;
- u32 bufsz, fctrl, srrctl, rxcsum;
- u32 hlreg;
+ int i, j;
+ u32 bufsz, fctrl, srrctl, rxcsum;
+ u32 hlreg;
/*
* Make sure receives are disabled while
@@ -701,7 +778,7 @@ ixgbe_initialize_receive_units(if_ctx_t ctx)
/* Setup the Base and Length of the Rx Descriptor Ring */
for (i = 0, que = sc->rx_queues; i < sc->num_rx_queues; i++, que++) {
struct rx_ring *rxr = &que->rxr;
- u64 rdba = rxr->rx_paddr;
+ u64 rdba = rxr->rx_paddr;
j = rxr->me;
@@ -743,10 +820,10 @@ ixgbe_initialize_receive_units(if_ctx_t ctx)
}
if (sc->hw.mac.type != ixgbe_mac_82598EB) {
- u32 psrtype = IXGBE_PSRTYPE_TCPHDR
- | IXGBE_PSRTYPE_UDPHDR
- | IXGBE_PSRTYPE_IPV4HDR
- | IXGBE_PSRTYPE_IPV6HDR;
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR;
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
}
@@ -776,9 +853,9 @@ ixgbe_initialize_receive_units(if_ctx_t ctx)
static void
ixgbe_initialize_transmit_units(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- struct ixgbe_hw *hw = &sc->hw;
- if_softc_ctx_t scctx = sc->shared;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
+ if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que;
int i;
@@ -819,7 +896,8 @@ ixgbe_initialize_transmit_units(if_ctx_t ctx)
txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(j));
break;
default:
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
+ txctrl =
+ IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(j));
break;
}
txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
@@ -828,7 +906,8 @@ ixgbe_initialize_transmit_units(if_ctx_t ctx)
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(j), txctrl);
break;
default:
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j), txctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(j),
+ txctrl);
break;
}
@@ -852,6 +931,32 @@ ixgbe_initialize_transmit_units(if_ctx_t ctx)
} /* ixgbe_initialize_transmit_units */
+static int
+ixgbe_check_fw_api_version(struct ixgbe_softc *sc)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
+ device_printf(sc->dev,
+ "The driver for the device stopped because the NVM "
+ "image is newer than expected. You must install the "
+ "most recent version of the network driver.\n");
+ return (EOPNOTSUPP);
+ } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR &&
+ hw->api_min_ver > (IXGBE_FW_API_VER_MINOR + 2)) {
+ device_printf(sc->dev,
+ "The driver for the device detected a newer version of "
+ "the NVM image than expected. Please install the most "
+ "recent version of the network driver.\n");
+ } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR ||
+ hw->api_min_ver < IXGBE_FW_API_VER_MINOR - 2) {
+ device_printf(sc->dev,
+ "The driver for the device detected an older version "
+ "of the NVM image than expected. "
+ "Please update the NVM image.\n");
+ }
+ return (0);
+}
+
/************************************************************************
* ixgbe_register
************************************************************************/
@@ -873,12 +978,13 @@ ixgbe_register(device_t dev)
static int
ixgbe_if_attach_pre(if_ctx_t ctx)
{
- struct ixgbe_softc *sc;
- device_t dev;
- if_softc_ctx_t scctx;
+ struct ixgbe_softc *sc;
+ device_t dev;
+ if_softc_ctx_t scctx;
struct ixgbe_hw *hw;
- int error = 0;
- u32 ctrl_ext;
+ int error = 0;
+ u32 ctrl_ext;
+ size_t i;
INIT_DEBUGOUT("ixgbe_attach: begin");
@@ -919,8 +1025,15 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
goto err_pci;
}
- if (hw->mac.ops.fw_recovery_mode && hw->mac.ops.fw_recovery_mode(hw)) {
- device_printf(dev, "Firmware recovery mode detected. Limiting "
+ if (hw->mac.type == ixgbe_mac_E610)
+ ixgbe_init_aci(hw);
+
+ sc->do_debug_dump = false;
+
+ if (hw->mac.ops.fw_recovery_mode &&
+ hw->mac.ops.fw_recovery_mode(hw)) {
+ device_printf(dev,
+ "Firmware recovery mode detected. Limiting "
"functionality.\nRefer to the Intel(R) Ethernet Adapters "
"and Devices User Guide for details on firmware recovery "
"mode.");
@@ -928,8 +1041,11 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
goto err_pci;
}
- if (hw->mbx.ops.init_params)
- hw->mbx.ops.init_params(hw);
+ /* 82598 Does not support SR-IOV, initialize everything else */
+ if (hw->mac.type >= ixgbe_mac_82599_vf) {
+ for (i = 0; i < sc->num_vfs; i++)
+ hw->mbx.ops[i].init_params(hw);
+ }
hw->allow_unsupported_sfp = allow_unsupported_sfp;
@@ -984,7 +1100,12 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
error = ixgbe_start_hw(hw);
switch (error) {
case IXGBE_ERR_EEPROM_VERSION:
- device_printf(dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues associated with your hardware.\nIf you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
+ device_printf(dev,
+ "This device is a pre-production adapter/LOM. Please be"
+ " aware there may be issues associated with your"
+ " hardware.\nIf you are experiencing problems please"
+ " contact your Intel or hardware representative who"
+ " provided you with this hardware.\n");
break;
case IXGBE_ERR_SFP_NOT_SUPPORTED:
device_printf(dev, "Unsupported SFP+ Module\n");
@@ -997,6 +1118,12 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
break;
}
+ /* Check the FW API version */
+ if (hw->mac.type == ixgbe_mac_E610 && ixgbe_check_fw_api_version(sc)) {
+ error = EIO;
+ goto err_pci;
+ }
+
/* Most of the iflib initialization... */
iflib_set_mac(ctx, hw->mac.addr);
@@ -1050,6 +1177,9 @@ err_pci:
IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
ixgbe_free_pci_resources(ctx);
+ if (hw->mac.type == ixgbe_mac_E610)
+ ixgbe_shutdown_aci(hw);
+
return (error);
} /* ixgbe_if_attach_pre */
@@ -1066,15 +1196,14 @@ static int
ixgbe_if_attach_post(if_ctx_t ctx)
{
device_t dev;
- struct ixgbe_softc *sc;
+ struct ixgbe_softc *sc;
struct ixgbe_hw *hw;
- int error = 0;
+ int error = 0;
dev = iflib_get_dev(ctx);
sc = iflib_get_softc(ctx);
hw = &sc->hw;
-
if (sc->intr_type == IFLIB_INTR_LEGACY &&
(sc->feat_cap & IXGBE_FEATURE_LEGACY_IRQ) == 0) {
device_printf(dev, "Device does not support legacy interrupts");
@@ -1083,10 +1212,11 @@ ixgbe_if_attach_post(if_ctx_t ctx)
}
/* Allocate multicast array memory. */
- sc->mta = malloc(sizeof(*sc->mta) *
- MAX_NUM_MULTICAST_ADDRESSES, M_IXGBE, M_NOWAIT);
+ sc->mta = malloc(sizeof(*sc->mta) * MAX_NUM_MULTICAST_ADDRESSES,
+ M_IXGBE, M_NOWAIT);
if (sc->mta == NULL) {
- device_printf(dev, "Can not allocate multicast setup array\n");
+ device_printf(dev,
+ "Can not allocate multicast setup array\n");
error = ENOMEM;
goto err;
}
@@ -1137,6 +1267,17 @@ ixgbe_if_attach_post(if_ctx_t ctx)
/* Add sysctls */
ixgbe_add_device_sysctls(ctx);
+ /* Init recovery mode timer and state variable */
+ if (sc->feat_en & IXGBE_FEATURE_RECOVERY_MODE) {
+ sc->recovery_mode = 0;
+
+ /* Set up the timer callout */
+ callout_init(&sc->fw_mode_timer, true);
+
+ /* Start the task */
+ callout_reset(&sc->fw_mode_timer, hz, ixgbe_fw_mode_timer, sc);
+ }
+
return (0);
err:
return (error);
@@ -1155,7 +1296,7 @@ static void
ixgbe_check_wol_support(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- u16 dev_caps = 0;
+ u16 dev_caps = 0;
/* Find out WoL support for port */
sc->wol_support = hw->wol_enabled = 0;
@@ -1179,7 +1320,7 @@ ixgbe_check_wol_support(struct ixgbe_softc *sc)
static int
ixgbe_setup_interface(if_ctx_t ctx)
{
- if_t ifp = iflib_get_ifp(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
struct ixgbe_softc *sc = iflib_get_softc(ctx);
INIT_DEBUGOUT("ixgbe_setup_interface: begin");
@@ -1205,7 +1346,7 @@ static uint64_t
ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_t ifp = iflib_get_ifp(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
switch (cnt) {
case IFCOUNTER_IPACKETS:
@@ -1224,8 +1365,6 @@ ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
return (0);
case IFCOUNTER_IQDROPS:
return (sc->iqdrops);
- case IFCOUNTER_OQDROPS:
- return (0);
case IFCOUNTER_IERRORS:
return (sc->ierrors);
default:
@@ -1239,10 +1378,9 @@ ixgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt)
static int
ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- struct ixgbe_hw *hw = &sc->hw;
- int i;
-
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
+ int i;
if (hw->phy.ops.read_i2c_byte == NULL)
return (ENXIO);
@@ -1252,7 +1390,8 @@ ixgbe_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req)
return (0);
} /* ixgbe_if_i2c_req */
-/* ixgbe_if_needs_restart - Tell iflib when the driver needs to be reinitialized
+/* ixgbe_if_needs_restart - Tell iflib when the driver needs to be
+ * reinitialized
* @ctx: iflib context
* @event: event code to check
*
@@ -1271,21 +1410,267 @@ ixgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event)
}
/************************************************************************
+ * ixgbe_if_priv_ioctl - Ioctl handler for driver
+ *
+ * Handler for custom driver specific ioctls
+ *
+ * return 0 on success, positive on failure
+ ************************************************************************/
+static int
+ixgbe_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data)
+{
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ifdrv *ifd;
+ device_t dev = sc->dev;
+
+ /* Make sure the command type is valid */
+ switch (command) {
+ case SIOCSDRVSPEC:
+ case SIOCGDRVSPEC:
+ /* Accepted commands */
+ break;
+ case SIOCGPRIVATE_0:
+ /*
+ * Although we do not support this ioctl command, it's expected
+ * that iflib will forward it to the IFDI_PRIV_IOCTL handler.
+ * Do not print a message in this case.
+ */
+ return (ENOTSUP);
+ default:
+ /*
+ * If we get a different command for this function, it's
+ * definitely unexpected, so log a message indicating what
+ * command we got for debugging purposes.
+ */
+ device_printf(dev,
+ "%s: unexpected ioctl command %08lx\n",
+ __func__, command);
+ return (EINVAL);
+ }
+
+ ifd = (struct ifdrv *)data;
+
+ switch (ifd->ifd_cmd) {
+ case IXGBE_NVM_ACCESS:
+ IOCTL_DEBUGOUT("ioctl: NVM ACCESS");
+ return (ixgbe_nvm_access_ioctl(sc, ifd));
+ case IXGBE_DEBUG_DUMP:
+ IOCTL_DEBUGOUT("ioctl: DEBUG DUMP");
+ return (ixgbe_debug_dump_ioctl(sc, ifd));
+ default:
+ IOCTL_DEBUGOUT1(
+ "ioctl: UNKNOWN SIOC(S|G)DRVSPEC (0x%X) command\n",
+ (int)ifd->ifd_cmd);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+/************************************************************************
+ * ixgbe_nvm_access_ioctl
+ *
+ * Handles an NVM access ioctl request
+ ************************************************************************/
+static int
+ixgbe_nvm_access_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd)
+{
+ struct ixgbe_nvm_access_data *data;
+ struct ixgbe_nvm_access_cmd *cmd;
+ struct ixgbe_hw *hw = &sc->hw;
+ size_t ifd_len = ifd->ifd_len;
+ size_t malloc_len;
+ device_t dev = sc->dev;
+ u8 *nvm_buffer;
+ s32 error = 0;
+
+ /*
+ * ifioctl forwards SIOCxDRVSPEC to iflib without conducting
+ * a privilege check. Subsequently, iflib passes the ioctl to the driver
+ * without verifying privileges. To prevent non-privileged threads from
+ * accessing this interface, perform a privilege check at this point.
+ */
+ error = priv_check(curthread, PRIV_DRIVER);
+ if (error)
+ return (error);
+
+ if (ifd_len < sizeof(*cmd)) {
+ device_printf(dev,
+ "%s: ifdrv length is too small. Got %zu, "
+ "but expected %zu\n",
+ __func__, ifd_len, sizeof(*cmd));
+ return (EINVAL);
+ }
+
+ if (ifd->ifd_data == NULL) {
+ device_printf(dev, "%s: No ifd data buffer.\n",
+ __func__);
+ return (EINVAL);
+ }
+
+ malloc_len = max(ifd_len, sizeof(*data) + sizeof(*cmd));
+
+ nvm_buffer = (u8 *)malloc(malloc_len, M_IXGBE, M_ZERO | M_NOWAIT);
+ if (!nvm_buffer)
+ return (ENOMEM);
+
+ /* Copy the NVM access command and data in from user space */
+ error = copyin(ifd->ifd_data, nvm_buffer, ifd_len);
+ if (error) {
+ device_printf(dev, "%s: Failed to copy data in, error: %d\n",
+ __func__, error);
+ goto cleanup_free_nvm_buffer;
+ }
+
+ /*
+ * The NVM command structure is immediately followed by data which
+ * varies in size based on the command.
+ */
+ cmd = (struct ixgbe_nvm_access_cmd *)nvm_buffer;
+ data = (struct ixgbe_nvm_access_data *)
+ (nvm_buffer + sizeof(struct ixgbe_nvm_access_cmd));
+
+ /* Handle the NVM access request */
+ error = ixgbe_handle_nvm_access(hw, cmd, data);
+ if (error) {
+ device_printf(dev, "%s: NVM access request failed, error %d\n",
+ __func__, error);
+ }
+
+ /* Copy the possibly modified contents of the handled request out */
+ error = copyout(nvm_buffer, ifd->ifd_data, ifd_len);
+ if (error) {
+ device_printf(dev, "%s: Copying response back to "
+ "user space failed, error %d\n",
+ __func__, error);
+ goto cleanup_free_nvm_buffer;
+ }
+
+cleanup_free_nvm_buffer:
+ free(nvm_buffer, M_IXGBE);
+ return (error);
+}
+
+/************************************************************************
+ * ixgbe_debug_dump_ioctl
+ *
+ * Makes debug dump of internal FW/HW data.
+ ************************************************************************/
+static int
+ixgbe_debug_dump_ioctl(struct ixgbe_softc *sc, struct ifdrv *ifd)
+{
+ struct ixgbe_debug_dump_cmd *dd_cmd;
+ struct ixgbe_hw *hw = &sc->hw;
+ size_t ifd_len = ifd->ifd_len;
+ device_t dev = sc->dev;
+ s32 error = 0;
+
+ if (!(sc->feat_en & IXGBE_FEATURE_DBG_DUMP))
+ return (ENODEV);
+
+ /* Data returned from ACI command */
+ u16 ret_buf_size = 0;
+ u16 ret_next_cluster = 0;
+ u16 ret_next_table = 0;
+ u32 ret_next_index = 0;
+
+ /*
+ * ifioctl forwards SIOCxDRVSPEC to iflib without conducting
+ * a privilege check. Subsequently, iflib passes the ioctl to the driver
+ * without verifying privileges. To prevent non-privileged threads from
+ * accessing this interface, perform a privilege check at this point.
+ */
+ error = priv_check(curthread, PRIV_DRIVER);
+ if (error)
+ return (error);
+
+ if (ifd_len < sizeof(*dd_cmd)) {
+ device_printf(dev,
+ "%s: ifdrv length is too small. Got %zu, "
+ "but expected %zu\n",
+ __func__, ifd_len, sizeof(*dd_cmd));
+ return (EINVAL);
+ }
+
+ if (ifd->ifd_data == NULL) {
+ device_printf(dev, "%s: No ifd data buffer.\n",
+ __func__);
+ return (EINVAL);
+ }
+
+ dd_cmd = (struct ixgbe_debug_dump_cmd *)malloc(ifd_len, M_IXGBE,
+ M_NOWAIT | M_ZERO);
+ if (!dd_cmd) {
+ error = -ENOMEM;
+ goto out;
+ }
+ /* copy data from userspace */
+ error = copyin(ifd->ifd_data, dd_cmd, ifd_len);
+ if (error) {
+ device_printf(dev, "%s: Failed to copy data in, error: %d\n",
+ __func__, error);
+ goto out;
+ }
+
+ /* ACI command requires buf_size arg to be grater than 0 */
+ if (dd_cmd->data_size == 0) {
+ device_printf(dev, "%s: data_size must be greater than 0\n",
+ __func__);
+ error = EINVAL;
+ goto out;
+ }
+
+ /* Zero the data buffer memory space */
+ memset(dd_cmd->data, 0, ifd_len - sizeof(*dd_cmd));
+
+ error = ixgbe_aci_get_internal_data(hw, dd_cmd->cluster_id,
+ dd_cmd->table_id, dd_cmd->offset, dd_cmd->data, dd_cmd->data_size,
+ &ret_buf_size, &ret_next_cluster, &ret_next_table, &ret_next_index);
+ if (error) {
+ device_printf(dev,
+ "%s: Failed to get internal FW/HW data, error: %d\n",
+ __func__, error);
+ goto out;
+ }
+
+ dd_cmd->cluster_id = ret_next_cluster;
+ dd_cmd->table_id = ret_next_table;
+ dd_cmd->offset = ret_next_index;
+ dd_cmd->data_size = ret_buf_size;
+
+ error = copyout(dd_cmd, ifd->ifd_data, ifd->ifd_len);
+ if (error) {
+ device_printf(dev,
+ "%s: Failed to copy data out, error: %d\n",
+ __func__, error);
+ }
+
+out:
+ free(dd_cmd, M_IXGBE);
+
+ return (error);
+}
+
+/************************************************************************
* ixgbe_add_media_types
************************************************************************/
static void
ixgbe_add_media_types(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- device_t dev = iflib_get_dev(ctx);
- u64 layer;
+ device_t dev = iflib_get_dev(ctx);
+ u64 layer;
layer = sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
/* Media types with matching FreeBSD media defines */
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_5000BASE_T)
+ ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
+ ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
@@ -1299,9 +1684,11 @@ ixgbe_add_media_types(if_ctx_t ctx)
}
if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
- layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
+ layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA) {
ifmedia_add(sc->media, IFM_ETHER | IFM_10G_TWINAX, 0,
NULL);
+ ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+ }
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR) {
ifmedia_add(sc->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
@@ -1350,8 +1737,10 @@ ixgbe_add_media_types(if_ctx_t ctx)
ifmedia_add(sc->media, IFM_ETHER | IFM_2500_SX, 0, NULL);
}
#endif
- if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX)
+ if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_BX) {
device_printf(dev, "Media supported: 1000baseBX\n");
+ ifmedia_add(sc->media, IFM_ETHER | IFM_1000_BX, 0, NULL);
+ }
if (hw->device_id == IXGBE_DEV_ID_82598AT) {
ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T | IFM_FDX,
@@ -1383,6 +1772,7 @@ ixgbe_is_sfp(struct ixgbe_hw *hw)
}
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
return (true);
return (false);
@@ -1397,10 +1787,10 @@ ixgbe_is_sfp(struct ixgbe_hw *hw)
static void
ixgbe_config_link(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- u32 autoneg, err = 0;
- bool sfp, negotiate;
+ u32 autoneg, err = 0;
+ bool sfp, negotiate;
sfp = ixgbe_is_sfp(hw);
@@ -1449,6 +1839,15 @@ ixgbe_config_link(if_ctx_t ctx)
IXGBE_LINK_SPEED_5GB_FULL);
}
+ if (hw->mac.type == ixgbe_mac_E610) {
+ hw->phy.ops.init(hw);
+ err = ixgbe_enable_lse(sc);
+ if (err)
+ device_printf(sc->dev,
+ "Failed to enable Link Status Event, "
+ "error: %d", err);
+ }
+
if (hw->mac.ops.setup_link)
err = hw->mac.ops.setup_link(hw, autoneg,
sc->link_up);
@@ -1461,11 +1860,11 @@ ixgbe_config_link(if_ctx_t ctx)
static void
ixgbe_update_stats_counters(struct ixgbe_softc *sc)
{
- struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_hw *hw = &sc->hw;
struct ixgbe_hw_stats *stats = &sc->stats.pf;
- u32 missed_rx = 0, bprc, lxon, lxoff, total;
- u32 lxoffrxc;
- u64 total_missed_rx = 0;
+ u32 missed_rx = 0, bprc, lxon, lxoff, total;
+ u32 lxoffrxc;
+ u64 total_missed_rx = 0;
stats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
stats->illerrc += IXGBE_READ_REG(hw, IXGBE_ILLERRC);
@@ -1593,8 +1992,8 @@ ixgbe_update_stats_counters(struct ixgbe_softc *sc)
* - jabber count.
*/
IXGBE_SET_IERRORS(sc, stats->crcerrs + stats->illerrc +
- stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc + stats->roc +
- stats->rjc);
+ stats->mpc[0] + stats->rlec + stats->ruc + stats->rfc +
+ stats->roc + stats->rjc);
} /* ixgbe_update_stats_counters */
/************************************************************************
@@ -1605,19 +2004,19 @@ ixgbe_update_stats_counters(struct ixgbe_softc *sc)
static void
ixgbe_add_hw_stats(struct ixgbe_softc *sc)
{
- device_t dev = iflib_get_dev(sc->ctx);
- struct ix_rx_queue *rx_que;
- struct ix_tx_queue *tx_que;
+ device_t dev = iflib_get_dev(sc->ctx);
+ struct ix_rx_queue *rx_que;
+ struct ix_tx_queue *tx_que;
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
- struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(dev);
struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
- struct ixgbe_hw_stats *stats = &sc->stats.pf;
- struct sysctl_oid *stat_node, *queue_node;
+ struct ixgbe_hw_stats *stats = &sc->stats.pf;
+ struct sysctl_oid *stat_node, *queue_node;
struct sysctl_oid_list *stat_list, *queue_list;
- int i;
+ int i;
#define QUEUE_NAME_LEN 32
- char namebuf[QUEUE_NAME_LEN];
+ char namebuf[QUEUE_NAME_LEN];
/* Driver Statistics */
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped",
@@ -1627,7 +2026,8 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc)
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "link_irq",
CTLFLAG_RD, &sc->link_irq, "Link MSI-X IRQ Handled");
- for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
+ for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
+ i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
@@ -1635,11 +2035,13 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc)
queue_list = SYSCTL_CHILDREN(queue_node);
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
- ixgbe_sysctl_tdh_handler, "IU", "Transmit Descriptor Head");
+ CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
+ ixgbe_sysctl_tdh_handler, "IU",
+ "Transmit Descriptor Head");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, txr, 0,
- ixgbe_sysctl_tdt_handler, "IU", "Transmit Descriptor Tail");
+ CTLTYPE_UINT | CTLFLAG_RD, txr, 0,
+ ixgbe_sysctl_tdt_handler, "IU",
+ "Transmit Descriptor Tail");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
CTLFLAG_RD, &txr->tso_tx, "TSO");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
@@ -1647,7 +2049,8 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc)
"Queue Packets Transmitted");
}
- for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
+ for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
+ i++, rx_que++) {
struct rx_ring *rxr = &rx_que->rxr;
snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i);
queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf,
@@ -1655,7 +2058,7 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc)
queue_list = SYSCTL_CHILDREN(queue_node);
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate",
- CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ CTLTYPE_UINT | CTLFLAG_RW,
&sc->rx_queues[i], 0,
ixgbe_sysctl_interrupt_rate_handler, "IU",
"Interrupt Rate");
@@ -1663,11 +2066,13 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc)
CTLFLAG_RD, &(sc->rx_queues[i].irqs),
"irqs on this queue");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
- ixgbe_sysctl_rdh_handler, "IU", "Receive Descriptor Head");
+ CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
+ ixgbe_sysctl_rdh_handler, "IU",
+ "Receive Descriptor Head");
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail",
- CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, rxr, 0,
- ixgbe_sysctl_rdt_handler, "IU", "Receive Descriptor Tail");
+ CTLTYPE_UINT | CTLFLAG_RD, rxr, 0,
+ ixgbe_sysctl_rdt_handler, "IU",
+ "Receive Descriptor Tail");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
CTLFLAG_RD, &rxr->rx_packets, "Queue Packets Received");
SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
@@ -1679,7 +2084,6 @@ ixgbe_add_hw_stats(struct ixgbe_softc *sc)
}
/* MAC stats get their own sub node */
-
stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "MAC Statistics");
stat_list = SYSCTL_CHILDREN(stat_node);
@@ -1789,12 +2193,16 @@ static int
ixgbe_sysctl_tdh_handler(SYSCTL_HANDLER_ARGS)
{
struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
- int error;
- unsigned int val;
+ int error;
+ unsigned int val;
if (!txr)
return (0);
+
+ if (atomic_load_acq_int(&txr->sc->recovery_mode))
+ return (EPERM);
+
val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDH(txr->me));
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
@@ -1812,12 +2220,15 @@ static int
ixgbe_sysctl_tdt_handler(SYSCTL_HANDLER_ARGS)
{
struct tx_ring *txr = ((struct tx_ring *)oidp->oid_arg1);
- int error;
- unsigned int val;
+ int error;
+ unsigned int val;
if (!txr)
return (0);
+ if (atomic_load_acq_int(&txr->sc->recovery_mode))
+ return (EPERM);
+
val = IXGBE_READ_REG(&txr->sc->hw, IXGBE_TDT(txr->me));
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
@@ -1835,12 +2246,15 @@ static int
ixgbe_sysctl_rdh_handler(SYSCTL_HANDLER_ARGS)
{
struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
- int error;
- unsigned int val;
+ int error;
+ unsigned int val;
if (!rxr)
return (0);
+ if (atomic_load_acq_int(&rxr->sc->recovery_mode))
+ return (EPERM);
+
val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDH(rxr->me));
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
@@ -1858,12 +2272,15 @@ static int
ixgbe_sysctl_rdt_handler(SYSCTL_HANDLER_ARGS)
{
struct rx_ring *rxr = ((struct rx_ring *)oidp->oid_arg1);
- int error;
- unsigned int val;
+ int error;
+ unsigned int val;
if (!rxr)
return (0);
+ if (atomic_load_acq_int(&rxr->sc->recovery_mode))
+ return (EPERM);
+
val = IXGBE_READ_REG(&rxr->sc->hw, IXGBE_RDT(rxr->me));
error = sysctl_handle_int(oidp, &val, 0, req);
if (error || !req->newptr)
@@ -1884,7 +2301,7 @@ static void
ixgbe_if_vlan_register(if_ctx_t ctx, u16 vtag)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- u16 index, bit;
+ u16 index, bit;
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
@@ -1902,7 +2319,7 @@ static void
ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- u16 index, bit;
+ u16 index, bit;
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
@@ -1918,12 +2335,12 @@ ixgbe_if_vlan_unregister(if_ctx_t ctx, u16 vtag)
static void
ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
{
- if_t ifp = iflib_get_ifp(ctx);
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- struct rx_ring *rxr;
- int i;
- u32 ctrl;
+ struct rx_ring *rxr;
+ int i;
+ u32 ctrl;
/*
@@ -1932,15 +2349,18 @@ ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
* the VFTA and other state, so if there
* have been no vlan's registered do nothing.
*/
- if (sc->num_vlans == 0 || (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) {
+ if (sc->num_vlans == 0 ||
+ (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) == 0) {
/* Clear the vlan hw flag */
for (i = 0; i < sc->num_rx_queues; i++) {
rxr = &sc->rx_queues[i].rxr;
/* On 82599 the VLAN enable is per/queue in RXDCTL */
if (hw->mac.type != ixgbe_mac_82598EB) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
+ ctrl = IXGBE_READ_REG(hw,
+ IXGBE_RXDCTL(rxr->me));
ctrl &= ~IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
+ ctrl);
}
rxr->vtag_strip = false;
}
@@ -1960,9 +2380,11 @@ ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
rxr = &sc->rx_queues[i].rxr;
/* On 82599 the VLAN enable is per/queue in RXDCTL */
if (hw->mac.type != ixgbe_mac_82598EB) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
+ ctrl = IXGBE_READ_REG(hw,
+ IXGBE_RXDCTL(rxr->me));
ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me), ctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxr->me),
+ ctrl);
}
rxr->vtag_strip = true;
}
@@ -1999,11 +2421,11 @@ ixgbe_setup_vlan_hw_support(if_ctx_t ctx)
static void
ixgbe_get_slot_info(struct ixgbe_softc *sc)
{
- device_t dev = iflib_get_dev(sc->ctx);
+ device_t dev = iflib_get_dev(sc->ctx);
struct ixgbe_hw *hw = &sc->hw;
- int bus_info_valid = true;
- u32 offset;
- u16 link;
+ int bus_info_valid = true;
+ u32 offset;
+ u16 link;
/* Some devices are behind an internal bridge */
switch (hw->device_id) {
@@ -2059,31 +2481,42 @@ get_parent_info:
ixgbe_set_pci_config_data_generic(hw, link);
display:
- device_printf(dev, "PCI Express Bus: Speed %s %s\n",
- ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
+ device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
+ ((hw->bus.speed == ixgbe_bus_speed_16000) ? "16.0GT/s" :
+ (hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
(hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
(hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
"Unknown"),
- ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
- (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
- (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
+ ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
+ (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
+ (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
"Unknown"));
if (bus_info_valid) {
if ((hw->device_id != IXGBE_DEV_ID_82599_SFP_SF_QP) &&
((hw->bus.width <= ixgbe_bus_width_pcie_x4) &&
(hw->bus.speed == ixgbe_bus_speed_2500))) {
- device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
- device_printf(dev, "For optimal performance a x8 PCIE, or x4 PCIE Gen2 slot is required.\n");
+ device_printf(dev,
+ "PCI-Express bandwidth available for this card"
+ " is not sufficient for optimal performance.\n");
+ device_printf(dev,
+ "For optimal performance a x8 PCIE, or x4 PCIE"
+ " Gen2 slot is required.\n");
}
if ((hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP) &&
((hw->bus.width <= ixgbe_bus_width_pcie_x8) &&
(hw->bus.speed < ixgbe_bus_speed_8000))) {
- device_printf(dev, "PCI-Express bandwidth available for this card\n is not sufficient for optimal performance.\n");
- device_printf(dev, "For optimal performance a x8 PCIE Gen3 slot is required.\n");
+ device_printf(dev,
+ "PCI-Express bandwidth available for this card"
+ " is not sufficient for optimal performance.\n");
+ device_printf(dev,
+ "For optimal performance a x8 PCIE Gen3 slot is"
+ " required.\n");
}
} else
- device_printf(dev, "Unable to determine slot speed/width. The speed/width reported are that of the internal switch.\n");
+ device_printf(dev,
+ "Unable to determine slot speed/width. The speed/width"
+ " reported are that of the internal switch.\n");
return;
} /* ixgbe_get_slot_info */
@@ -2096,11 +2529,11 @@ display:
static int
ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *rx_que = sc->rx_queues;
struct ix_tx_queue *tx_que;
- int error, rid, vector = 0;
- char buf[16];
+ int error, rid, vector = 0;
+ char buf[16];
/* Admin Que is vector 0*/
rid = vector + 1;
@@ -2109,11 +2542,13 @@ ixgbe_if_msix_intr_assign(if_ctx_t ctx, int msix)
snprintf(buf, sizeof(buf), "rxq%d", i);
error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
- IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me, buf);
+ IFLIB_INTR_RXTX, ixgbe_msix_que, rx_que, rx_que->rxr.me,
+ buf);
if (error) {
device_printf(iflib_get_dev(ctx),
- "Failed to allocate que int %d err: %d", i, error);
+ "Failed to allocate que int %d err: %d",
+ i,error);
sc->num_rx_queues = i + 1;
goto fail;
}
@@ -2154,6 +2589,7 @@ ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
{
uint32_t newitr = 0;
struct rx_ring *rxr = &que->rxr;
+ /* FIXME struct tx_ring *txr = ... ->txr; */
/*
* Do Adaptive Interrupt Moderation:
@@ -2169,12 +2605,18 @@ ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
que->eitr_setting = 0;
/* Idle, do nothing */
if (rxr->bytes == 0) {
+ /* FIXME && txr->bytes == 0 */
return;
}
- if ((rxr->bytes) && (rxr->packets)) {
- newitr = (rxr->bytes / rxr->packets);
- }
+ if ((rxr->bytes) && (rxr->packets))
+ newitr = rxr->bytes / rxr->packets;
+ /* FIXME for transmit accounting
+ * if ((txr->bytes) && (txr->packets))
+ * newitr = txr->bytes/txr->packets;
+ * if ((rxr->bytes) && (rxr->packets))
+ * newitr = max(newitr, (rxr->bytes / rxr->packets));
+ */
newitr += 24; /* account for hardware frame, crc */
/* set an upper boundary */
@@ -2197,6 +2639,8 @@ ixgbe_perform_aim(struct ixgbe_softc *sc, struct ix_rx_queue *que)
que->eitr_setting = newitr;
/* Reset state */
+ /* FIXME txr->bytes = 0; */
+ /* FIXME txr->packets = 0; */
rxr->bytes = 0;
rxr->packets = 0;
@@ -2210,8 +2654,8 @@ static int
ixgbe_msix_que(void *arg)
{
struct ix_rx_queue *que = arg;
- struct ixgbe_softc *sc = que->sc;
- if_t ifp = iflib_get_ifp(que->sc->ctx);
+ struct ixgbe_softc *sc = que->sc;
+ if_t ifp = iflib_get_ifp(que->sc->ctx);
/* Protect against spurious interrupts */
if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
@@ -2237,9 +2681,9 @@ ixgbe_msix_que(void *arg)
static void
ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- int layer;
+ int layer;
INIT_DEBUGOUT("ixgbe_if_media_status: begin");
@@ -2252,14 +2696,17 @@ ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
ifmr->ifm_status |= IFM_ACTIVE;
layer = sc->phy_layer;
- if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
- layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
- layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
- layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
+ if (layer & IXGBE_PHYSICAL_LAYERS_BASE_T_ALL)
switch (sc->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
break;
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
+ break;
case IXGBE_LINK_SPEED_1GB_FULL:
ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
break;
@@ -2270,21 +2717,15 @@ ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
ifmr->ifm_active |= IFM_10_T | IFM_FDX;
break;
}
- if (hw->mac.type == ixgbe_mac_X550)
- switch (sc->link_speed) {
- case IXGBE_LINK_SPEED_5GB_FULL:
- ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
- break;
- case IXGBE_LINK_SPEED_2_5GB_FULL:
- ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
- break;
- }
if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
switch (sc->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
ifmr->ifm_active |= IFM_10G_TWINAX | IFM_FDX;
break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ ifmr->ifm_active |= IFM_1000_KX | IFM_FDX;
+ break;
}
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_LR)
switch (sc->link_speed) {
@@ -2402,9 +2843,9 @@ ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
static int
ixgbe_if_media_change(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- struct ifmedia *ifm = iflib_get_media(ctx);
- struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ifmedia *ifm = iflib_get_media(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
ixgbe_link_speed speed = 0;
INIT_DEBUGOUT("ixgbe_if_media_change: begin");
@@ -2446,6 +2887,7 @@ ixgbe_if_media_change(if_ctx_t ctx)
#endif
case IFM_1000_LX:
case IFM_1000_SX:
+ case IFM_1000_BX:
speed |= IXGBE_LINK_SPEED_1GB_FULL;
break;
case IFM_1000_T:
@@ -2496,16 +2938,17 @@ static int
ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_t ifp = iflib_get_ifp(ctx);
- u32 rctl;
- int mcnt = 0;
+ if_t ifp = iflib_get_ifp(ctx);
+ u32 rctl;
+ int mcnt = 0;
rctl = IXGBE_READ_REG(&sc->hw, IXGBE_FCTRL);
rctl &= (~IXGBE_FCTRL_UPE);
if (if_getflags(ifp) & IFF_ALLMULTI)
mcnt = MAX_NUM_MULTICAST_ADDRESSES;
else {
- mcnt = min(if_llmaddr_count(ifp), MAX_NUM_MULTICAST_ADDRESSES);
+ mcnt = min(if_llmaddr_count(ifp),
+ MAX_NUM_MULTICAST_ADDRESSES);
}
if (mcnt < MAX_NUM_MULTICAST_ADDRESSES)
rctl &= (~IXGBE_FCTRL_MPE);
@@ -2528,10 +2971,10 @@ ixgbe_if_promisc_set(if_ctx_t ctx, int flags)
static int
ixgbe_msix_link(void *arg)
{
- struct ixgbe_softc *sc = arg;
+ struct ixgbe_softc *sc = arg;
struct ixgbe_hw *hw = &sc->hw;
- u32 eicr, eicr_mask;
- s32 retval;
+ u32 eicr, eicr_mask;
+ s32 retval;
++sc->link_irq;
@@ -2551,6 +2994,11 @@ ixgbe_msix_link(void *arg)
sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
}
+ if (eicr & IXGBE_EICR_FW_EVENT) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FW_EVENT);
+ sc->task_requests |= IXGBE_REQUEST_TASK_FWEVENT;
+ }
+
if (sc->hw.mac.type != ixgbe_mac_82598EB) {
if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
(eicr & IXGBE_EICR_FLOW_DIR)) {
@@ -2563,10 +3011,12 @@ ixgbe_msix_link(void *arg)
} else
if (eicr & IXGBE_EICR_ECC) {
device_printf(iflib_get_dev(sc->ctx),
- "Received ECC Err, initiating reset\n");
- hw->mac.flags |= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ "Received ECC Err, initiating reset\n");
+ hw->mac.flags |=
+ ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
ixgbe_reset_hw(hw);
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR,
+ IXGBE_EICR_ECC);
}
/* Check for over temp condition */
@@ -2583,7 +3033,8 @@ ixgbe_msix_link(void *arg)
if (retval != IXGBE_ERR_OVERTEMP)
break;
device_printf(iflib_get_dev(sc->ctx),
- "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
+ "\nCRITICAL: OVER TEMP!!"
+ " PHY IS SHUT DOWN!!\n");
device_printf(iflib_get_dev(sc->ctx),
"System shutdown required!\n");
break;
@@ -2594,21 +3045,28 @@ ixgbe_msix_link(void *arg)
if (retval != IXGBE_ERR_OVERTEMP)
break;
device_printf(iflib_get_dev(sc->ctx),
- "\nCRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
+ "\nCRITICAL: OVER TEMP!!"
+ " PHY IS SHUT DOWN!!\n");
device_printf(iflib_get_dev(sc->ctx),
"System shutdown required!\n");
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_TS);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR,
+ IXGBE_EICR_TS);
break;
}
}
/* Check for VF message */
if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
- (eicr & IXGBE_EICR_MAILBOX))
+ (eicr & IXGBE_EICR_MAILBOX)) {
sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
+ }
}
- if (ixgbe_is_sfp(hw)) {
+ /*
+ * On E610, the firmware handles PHY configuration, so
+ * there is no need to perform any SFP-specific tasks.
+ */
+ if (hw->mac.type != ixgbe_mac_E610 && ixgbe_is_sfp(hw)) {
/* Pluggable optics-related interrupt */
if (hw->mac.type >= ixgbe_mac_X540)
eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
@@ -2631,7 +3089,8 @@ ixgbe_msix_link(void *arg)
/* Check for fan failure */
if (sc->feat_en & IXGBE_FEATURE_FAN_FAIL) {
ixgbe_check_fan_failure(sc, eicr, true);
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
+ IXGBE_WRITE_REG(hw, IXGBE_EICR,
+ IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
}
/* External PHY interrupt */
@@ -2641,7 +3100,8 @@ ixgbe_msix_link(void *arg)
sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
}
- return (sc->task_requests != 0) ? FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
+ return (sc->task_requests != 0) ?
+ FILTER_SCHEDULE_THREAD : FILTER_HANDLED;
} /* ixgbe_msix_link */
/************************************************************************
@@ -2651,8 +3111,11 @@ static int
ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
{
struct ix_rx_queue *que = ((struct ix_rx_queue *)oidp->oid_arg1);
- int error;
- unsigned int reg, usec, rate;
+ int error;
+ unsigned int reg, usec, rate;
+
+ if (atomic_load_acq_int(&que->sc->recovery_mode))
+ return (EPERM);
reg = IXGBE_READ_REG(&que->sc->hw, IXGBE_EITR(que->msix));
usec = ((reg & 0x0FF8) >> 3);
@@ -2677,14 +3140,272 @@ ixgbe_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS)
} /* ixgbe_sysctl_interrupt_rate_handler */
/************************************************************************
+ * ixgbe_debug_dump_print_cluster
+ ************************************************************************/
+static u8
+ixgbe_debug_dump_print_cluster(struct ixgbe_softc *sc, struct sbuf *sbuf,
+ u8 cluster_id)
+{
+ u16 data_buf_size = IXGBE_ACI_MAX_BUFFER_SIZE;
+ device_t dev = sc->dev;
+ struct ixgbe_hw *hw = &sc->hw;
+ const u8 reserved_buf[8] = {};
+ int max_aci_calls = 1000;
+ int error, counter = 0;
+ u8 *data_buf;
+
+ /* Input parameters / loop variables */
+ u16 table_id = 0;
+ u32 offset = 0;
+
+ /* Data returned from ACI command */
+ u16 ret_buf_size = 0;
+ u16 ret_next_cluster = 0;
+ u16 ret_next_table = 0;
+ u32 ret_next_index = 0;
+
+ data_buf = (u8 *)malloc(data_buf_size, M_IXGBE, M_NOWAIT | M_ZERO);
+ if (!data_buf)
+ return (0);
+
+ DEBUGOUT2("%s: dumping cluster id (relative) %d\n",
+ __func__, cluster_id);
+
+ do {
+ DEBUGOUT3("table_id 0x%04x offset 0x%08x buf_size %d\n",
+ table_id, offset, data_buf_size);
+
+ error = ixgbe_aci_get_internal_data(hw, cluster_id, table_id,
+ offset, data_buf, data_buf_size, &ret_buf_size,
+ &ret_next_cluster, &ret_next_table, &ret_next_index);
+ if (error) {
+ device_printf(dev,
+ "%s: Failed to get internal FW/HW data, error: %d, "
+ "last aci status: %d\n",
+ __func__, error, hw->aci.last_status);
+ break;
+ }
+
+ DEBUGOUT3("ret_table_id 0x%04x ret_offset 0x%08x "
+ "ret_buf_size %d\n",
+ ret_next_table, ret_next_index, ret_buf_size);
+
+ /* Print cluster id */
+ u32 print_cluster_id = (u32)cluster_id;
+ sbuf_bcat(sbuf, &print_cluster_id, sizeof(print_cluster_id));
+ /* Print table id */
+ u32 print_table_id = (u32)table_id;
+ sbuf_bcat(sbuf, &print_table_id, sizeof(print_table_id));
+ /* Print table length */
+ u32 print_table_length = (u32)ret_buf_size;
+ sbuf_bcat(sbuf, &print_table_length,
+ sizeof(print_table_length));
+ /* Print current offset */
+ u32 print_curr_offset = offset;
+ sbuf_bcat(sbuf, &print_curr_offset, sizeof(print_curr_offset));
+ /* Print reserved bytes */
+ sbuf_bcat(sbuf, reserved_buf, sizeof(reserved_buf));
+ /* Print data */
+ sbuf_bcat(sbuf, data_buf, ret_buf_size);
+
+ /* Prepare for the next loop spin */
+ memset(data_buf, 0, data_buf_size);
+
+ bool last_index = (ret_next_index == 0xffffffff);
+ bool last_table = ((ret_next_table == 0xff ||
+ ret_next_table == 0xffff) &&
+ last_index);
+
+ if (last_table) {
+ /* End of the cluster */
+ DEBUGOUT1("End of the cluster ID %d\n", cluster_id);
+ break;
+ } else if (last_index) {
+ /* End of the table */
+ table_id = ret_next_table;
+ offset = 0;
+ } else {
+ /* More data left in the table */
+ offset = ret_next_index;
+ }
+ } while (++counter < max_aci_calls);
+
+ if (counter >= max_aci_calls)
+ device_printf(dev, "Exceeded nr of ACI calls for cluster %d\n",
+ cluster_id);
+
+ free(data_buf, M_IXGBE);
+
+ return (++cluster_id);
+} /* ixgbe_print_debug_dump_cluster */
+
+/************************************************************************
+ * ixgbe_sysctl_debug_dump_set_clusters
+ *
+ * Sets the cluster to dump from FW when Debug Dump requested.
+ ************************************************************************/
+static int
+ixgbe_sysctl_debug_dump_set_clusters(SYSCTL_HANDLER_ARGS)
+{
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ u32 clusters = sc->debug_dump_cluster_mask;
+ device_t dev = sc->dev;
+ int error;
+
+ error = sysctl_handle_32(oidp, &clusters, 0, req);
+ if ((error) || !req->newptr)
+ return (error);
+
+ if (clusters & ~(IXGBE_DBG_DUMP_VALID_CLUSTERS_MASK)) {
+ device_printf(dev,
+ "%s: Unrecognized parameter: %u\n",
+ __func__, clusters);
+ sc->debug_dump_cluster_mask =
+ IXGBE_ACI_DBG_DUMP_CLUSTER_ID_INVALID;
+ return (EINVAL);
+ }
+
+ sc->debug_dump_cluster_mask = clusters;
+
+ return (0);
+} /* ixgbe_sysctl_debug_dump_set_clusters */
+
+/************************************************************************
+ * ixgbe_sysctl_dump_debug_dump
+ ************************************************************************/
+static int
+ixgbe_sysctl_dump_debug_dump(SYSCTL_HANDLER_ARGS)
+{
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ device_t dev = sc->dev;
+ struct sbuf *sbuf;
+ int error = 0;
+
+ UNREFERENCED_PARAMETER(arg2);
+
+ if (!sc->do_debug_dump) {
+ if (req->oldptr == NULL && req->newptr == NULL) {
+ error = SYSCTL_OUT(req, 0, 0);
+ return (error);
+ }
+
+ char input_buf[2] = "";
+ error = sysctl_handle_string(oidp, input_buf,
+ sizeof(input_buf), req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+
+ if (input_buf[0] == '1') {
+ if (sc->debug_dump_cluster_mask ==
+ IXGBE_ACI_DBG_DUMP_CLUSTER_ID_INVALID) {
+ device_printf(dev,
+ "Debug Dump failed because an invalid "
+ "cluster was specified.\n");
+ return (EINVAL);
+ }
+
+ sc->do_debug_dump = true;
+ return (0);
+ }
+
+ return (EINVAL);
+ }
+
+ /* Caller just wants the upper bound for size */
+ if (req->oldptr == NULL && req->newptr == NULL) {
+ size_t est_output_len = IXGBE_DBG_DUMP_BASE_SIZE;
+ if (sc->debug_dump_cluster_mask & 0x2)
+ est_output_len += IXGBE_DBG_DUMP_BASE_SIZE;
+ error = SYSCTL_OUT(req, 0, est_output_len);
+ return (error);
+ }
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ sbuf_clear_flags(sbuf, SBUF_INCLUDENUL);
+
+ DEBUGOUT("FW Debug Dump running...\n");
+
+ if (sc->debug_dump_cluster_mask) {
+ for (u8 id = 0; id <= IXGBE_ACI_DBG_DUMP_CLUSTER_ID_MAX; id++) {
+ if (sc->debug_dump_cluster_mask & BIT(id)) {
+ DEBUGOUT1("Dumping cluster ID %u...\n", id);
+ ixgbe_debug_dump_print_cluster(sc, sbuf, id);
+ }
+ }
+ } else {
+ u8 next_cluster_id = 0;
+ do {
+ DEBUGOUT1("Dumping cluster ID %u...\n",
+ next_cluster_id);
+ next_cluster_id = ixgbe_debug_dump_print_cluster(sc,
+ sbuf, next_cluster_id);
+ } while (next_cluster_id != 0 &&
+ next_cluster_id <= IXGBE_ACI_DBG_DUMP_CLUSTER_ID_MAX);
+ }
+
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ sc->do_debug_dump = false;
+
+ return (error);
+} /* ixgbe_sysctl_dump_debug_dump */
+
+/************************************************************************
+ * ixgbe_add_debug_dump_sysctls
+ ************************************************************************/
+static void
+ixgbe_add_debug_dump_sysctls(struct ixgbe_softc *sc)
+{
+ struct sysctl_oid_list *debug_list, *dump_list;
+ struct sysctl_oid *dump_node;
+ struct sysctl_ctx_list *ctx;
+ device_t dev = sc->dev;
+
+ ctx = device_get_sysctl_ctx(dev);
+ debug_list = SYSCTL_CHILDREN(sc->debug_sysctls);
+
+ dump_node = SYSCTL_ADD_NODE(ctx, debug_list, OID_AUTO, "dump",
+ CTLFLAG_RD, NULL, "Internal FW/HW Dump");
+ dump_list = SYSCTL_CHILDREN(dump_node);
+
+ SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "clusters",
+ CTLTYPE_U32 | CTLFLAG_RW, sc, 0,
+ ixgbe_sysctl_debug_dump_set_clusters, "SU",
+ IXGBE_SYSCTL_DESC_DEBUG_DUMP_SET_CLUSTER);
+
+ SYSCTL_ADD_PROC(ctx, dump_list, OID_AUTO, "dump",
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
+ ixgbe_sysctl_dump_debug_dump, "",
+ IXGBE_SYSCTL_DESC_DUMP_DEBUG_DUMP);
+} /* ixgbe_add_debug_dump_sysctls */
+
+static void
+ixgbe_add_debug_sysctls(struct ixgbe_softc *sc)
+{
+ struct sysctl_oid_list *ctx_list;
+ struct sysctl_ctx_list *ctx;
+ device_t dev = sc->dev;
+
+ ctx = device_get_sysctl_ctx(dev);
+ ctx_list = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
+
+ sc->debug_sysctls = SYSCTL_ADD_NODE(ctx, ctx_list, OID_AUTO, "debug",
+ CTLFLAG_RD, NULL, "Debug Sysctls");
+
+ if (sc->feat_en & IXGBE_FEATURE_DBG_DUMP)
+ ixgbe_add_debug_dump_sysctls(sc);
+} /* ixgbe_add_debug_sysctls */
+
+/************************************************************************
* ixgbe_add_device_sysctls
************************************************************************/
static void
ixgbe_add_device_sysctls(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
struct sysctl_oid_list *child;
struct sysctl_ctx_list *ctx_list;
@@ -2693,12 +3414,12 @@ ixgbe_add_device_sysctls(if_ctx_t ctx)
/* Sysctls for all devices */
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fc",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_sysctl_flowcntl, "I",
IXGBE_SYSCTL_DESC_SET_FC);
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "advertise_speed",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_sysctl_advertise, "I",
IXGBE_SYSCTL_DESC_ADV_SPEED);
@@ -2707,35 +3428,54 @@ ixgbe_add_device_sysctls(if_ctx_t ctx)
&sc->enable_aim, 0, "Interrupt Moderation");
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "fw_version",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
+ CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
ixgbe_sysctl_print_fw_version, "A", "Prints FW/NVM Versions");
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
+ "tso_tcp_flags_mask_first_segment",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ sc, 0, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
+ "TSO TCP flags mask for first segment");
+
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
+ "tso_tcp_flags_mask_middle_segment",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ sc, 1, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
+ "TSO TCP flags mask for middle segment");
+
+ SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO,
+ "tso_tcp_flags_mask_last_segment",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ sc, 2, ixgbe_sysctl_tso_tcp_flags_mask, "IU",
+ "TSO TCP flags mask for last segment");
+
#ifdef IXGBE_DEBUG
/* testing sysctls (for all devices) */
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "power_state",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ CTLTYPE_INT | CTLFLAG_RW,
sc, 0, ixgbe_sysctl_power_state,
"I", "PCI Power State");
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "print_rss_config",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
+ CTLTYPE_STRING | CTLFLAG_RD, sc, 0,
ixgbe_sysctl_print_rss_config, "A", "Prints RSS Configuration");
#endif
/* for X550 series devices */
if (hw->mac.type >= ixgbe_mac_X550)
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "dmac",
- CTLTYPE_U16 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ CTLTYPE_U16 | CTLFLAG_RW,
sc, 0, ixgbe_sysctl_dmac,
"I", "DMA Coalesce");
/* for WoL-capable devices */
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wol_enable",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
- ixgbe_sysctl_wol_enable, "I", "Enable/Disable Wake on LAN");
+ CTLTYPE_INT | CTLFLAG_RW, sc, 0,
+ ixgbe_sysctl_wol_enable, "I",
+ "Enable/Disable Wake on LAN");
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "wufc",
- CTLTYPE_U32 | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
+ CTLTYPE_U32 | CTLFLAG_RW,
sc, 0, ixgbe_sysctl_wufc,
"I", "Enable/Disable Wake Up Filters");
}
@@ -2746,26 +3486,29 @@ ixgbe_add_device_sysctls(if_ctx_t ctx)
struct sysctl_oid_list *phy_list;
phy_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "phy",
- CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "External PHY sysctls");
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
+ "External PHY sysctls");
phy_list = SYSCTL_CHILDREN(phy_node);
SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO, "temp",
- CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
+ CTLTYPE_U16 | CTLFLAG_RD,
sc, 0, ixgbe_sysctl_phy_temp,
"I", "Current External PHY Temperature (Celsius)");
SYSCTL_ADD_PROC(ctx_list, phy_list, OID_AUTO,
"overtemp_occurred",
- CTLTYPE_U16 | CTLFLAG_RD | CTLFLAG_NEEDGIANT, sc, 0,
+ CTLTYPE_U16 | CTLFLAG_RD, sc, 0,
ixgbe_sysctl_phy_overtemp_occurred, "I",
"External PHY High Temperature Event Occurred");
}
if (sc->feat_cap & IXGBE_FEATURE_EEE) {
SYSCTL_ADD_PROC(ctx_list, child, OID_AUTO, "eee_state",
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc, 0,
+ CTLTYPE_INT | CTLFLAG_RW, sc, 0,
ixgbe_sysctl_eee_state, "I", "EEE Power Save State");
}
+
+ ixgbe_add_debug_sysctls(sc);
} /* ixgbe_add_device_sysctls */
/************************************************************************
@@ -2775,15 +3518,16 @@ static int
ixgbe_allocate_pci_resources(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- int rid;
+ device_t dev = iflib_get_dev(ctx);
+ int rid;
rid = PCIR_BAR(0);
sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (!(sc->pci_mem)) {
- device_printf(dev, "Unable to allocate bus resource: memory\n");
+ device_printf(dev,
+ "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
@@ -2810,8 +3554,8 @@ static int
ixgbe_if_detach(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- u32 ctrl_ext;
+ device_t dev = iflib_get_dev(ctx);
+ u32 ctrl_ext;
INIT_DEBUGOUT("ixgbe_detach: begin");
@@ -2827,7 +3571,15 @@ ixgbe_if_detach(if_ctx_t ctx)
ctrl_ext &= ~IXGBE_CTRL_EXT_DRV_LOAD;
IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
+ callout_drain(&sc->fw_mode_timer);
+
+ if (sc->hw.mac.type == ixgbe_mac_E610) {
+ ixgbe_disable_lse(sc);
+ ixgbe_shutdown_aci(&sc->hw);
+ }
+
ixgbe_free_pci_resources(ctx);
+
free(sc->mta, M_IXGBE);
return (0);
@@ -2841,10 +3593,10 @@ ixgbe_if_detach(if_ctx_t ctx)
static int
ixgbe_setup_low_power_mode(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- device_t dev = iflib_get_dev(ctx);
- s32 error = 0;
+ device_t dev = iflib_get_dev(ctx);
+ s32 error = 0;
if (!hw->wol_enabled)
ixgbe_set_phy_power(hw, false);
@@ -2857,8 +3609,9 @@ ixgbe_setup_low_power_mode(if_ctx_t ctx)
IXGBE_READ_REG(hw, IXGBE_GRC_BY_MAC(hw)) & ~(u32)2);
/*
- * Clear Wake Up Status register to prevent any previous wakeup
- * events from waking us up immediately after we suspend.
+ * Clear Wake Up Status register to prevent any previous
+ * wakeup events from waking us up immediately after we
+ * suspend.
*/
IXGBE_WRITE_REG(hw, IXGBE_WUS, 0xffffffff);
@@ -2877,7 +3630,8 @@ ixgbe_setup_low_power_mode(if_ctx_t ctx)
ixgbe_if_stop(ctx);
error = hw->phy.ops.enter_lplu(hw);
if (error)
- device_printf(dev, "Error entering LPLU: %d\n", error);
+ device_printf(dev, "Error entering LPLU: %d\n",
+ error);
hw->phy.reset_disable = false;
} else {
/* Just stop for other adapters */
@@ -2927,11 +3681,11 @@ ixgbe_if_suspend(if_ctx_t ctx)
static int
ixgbe_if_resume(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- if_t ifp = iflib_get_ifp(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
struct ixgbe_hw *hw = &sc->hw;
- u32 wus;
+ u32 wus;
INIT_DEBUGOUT("ixgbe_resume: begin");
@@ -3035,17 +3789,17 @@ ixgbe_if_crcstrip_set(if_ctx_t ctx, int onoff, int crcstrip)
void
ixgbe_if_init(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_t ifp = iflib_get_ifp(ctx);
- device_t dev = iflib_get_dev(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
+ device_t dev = iflib_get_dev(ctx);
struct ixgbe_hw *hw = &sc->hw;
struct ix_rx_queue *rx_que;
struct ix_tx_queue *tx_que;
- u32 txdctl, mhadd;
- u32 rxdctl, rxctrl;
- u32 ctrl_ext;
+ u32 txdctl, mhadd;
+ u32 rxdctl, rxctrl;
+ u32 ctrl_ext;
- int i, j, err;
+ int i, j, err;
INIT_DEBUGOUT("ixgbe_if_init: begin");
@@ -3094,7 +3848,8 @@ ixgbe_if_init(if_ctx_t ctx)
}
/* Now enable all the queues */
- for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues; i++, tx_que++) {
+ for (i = 0, tx_que = sc->tx_queues; i < sc->num_tx_queues;
+ i++, tx_que++) {
struct tx_ring *txr = &tx_que->txr;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txr->me));
@@ -3112,7 +3867,8 @@ ixgbe_if_init(if_ctx_t ctx)
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txr->me), txdctl);
}
- for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues; i++, rx_que++) {
+ for (i = 0, rx_que = sc->rx_queues; i < sc->num_rx_queues;
+ i++, rx_que++) {
struct rx_ring *rxr = &rx_que->rxr;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxr->me));
@@ -3242,6 +3998,7 @@ ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
if (type == -1) { /* MISC IVAR */
index = (entry & 1) * 8;
ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
@@ -3268,7 +4025,7 @@ ixgbe_configure_ivars(struct ixgbe_softc *sc)
{
struct ix_rx_queue *rx_que = sc->rx_queues;
struct ix_tx_queue *tx_que = sc->tx_queues;
- u32 newitr;
+ u32 newitr;
if (ixgbe_max_interrupt_rate > 0)
newitr = (4000000 / ixgbe_max_interrupt_rate) & 0x0FF8;
@@ -3307,16 +4064,16 @@ static void
ixgbe_config_gpie(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- u32 gpie;
+ u32 gpie;
gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
if (sc->intr_type == IFLIB_INTR_MSIX) {
/* Enable Enhanced MSI-X mode */
- gpie |= IXGBE_GPIE_MSIX_MODE
- | IXGBE_GPIE_EIAME
- | IXGBE_GPIE_PBA_SUPPORT
- | IXGBE_GPIE_OCD;
+ gpie |= IXGBE_GPIE_MSIX_MODE |
+ IXGBE_GPIE_EIAME |
+ IXGBE_GPIE_PBA_SUPPORT |
+ IXGBE_GPIE_OCD;
}
/* Fan Failure Interrupt */
@@ -3353,7 +4110,7 @@ static void
ixgbe_config_delay_values(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- u32 rxpb, frame, size, tmp;
+ u32 rxpb, frame, size, tmp;
frame = sc->max_frame_size;
@@ -3413,19 +4170,20 @@ ixgbe_mc_filter_apply(void *arg, struct sockaddr_dl *sdl, u_int idx)
static void
ixgbe_if_multi_set(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_mc_addr *mta;
- if_t ifp = iflib_get_ifp(ctx);
- u8 *update_ptr;
- u32 fctrl;
- u_int mcnt;
+ if_t ifp = iflib_get_ifp(ctx);
+ u8 *update_ptr;
+ u32 fctrl;
+ u_int mcnt;
IOCTL_DEBUGOUT("ixgbe_if_multi_set: begin");
mta = sc->mta;
bzero(mta, sizeof(*mta) * MAX_NUM_MULTICAST_ADDRESSES);
- mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply, sc);
+ mcnt = if_foreach_llmaddr(iflib_get_ifp(ctx), ixgbe_mc_filter_apply,
+ sc);
if (mcnt < MAX_NUM_MULTICAST_ADDRESSES) {
update_ptr = (u8 *)mta;
@@ -3494,6 +4252,35 @@ ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
} /* ixgbe_if_timer */
/************************************************************************
+ * ixgbe_fw_mode_timer - FW mode timer routine
+ ************************************************************************/
+static void
+ixgbe_fw_mode_timer(void *arg)
+{
+ struct ixgbe_softc *sc = arg;
+ struct ixgbe_hw *hw = &sc->hw;
+
+ if (ixgbe_fw_recovery_mode(hw)) {
+ if (atomic_cmpset_acq_int(&sc->recovery_mode, 0, 1)) {
+ /* Firmware error detected, entering recovery mode */
+ device_printf(sc->dev,
+ "Firmware recovery mode detected. Limiting"
+ " functionality. Refer to the Intel(R) Ethernet"
+ " Adapters and Devices User Guide for details on"
+ " firmware recovery mode.\n");
+
+ if (hw->adapter_stopped == FALSE)
+ ixgbe_if_stop(sc->ctx);
+ }
+ } else
+ atomic_cmpset_acq_int(&sc->recovery_mode, 1, 0);
+
+
+ callout_reset(&sc->fw_mode_timer, hz,
+ ixgbe_fw_mode_timer, sc);
+} /* ixgbe_fw_mode_timer */
+
+/************************************************************************
* ixgbe_sfp_probe
*
* Determine if a port had optics inserted.
@@ -3501,10 +4288,10 @@ ixgbe_if_timer(if_ctx_t ctx, uint16_t qid)
static bool
ixgbe_sfp_probe(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- device_t dev = iflib_get_dev(ctx);
- bool result = false;
+ device_t dev = iflib_get_dev(ctx);
+ bool result = false;
if ((hw->phy.type == ixgbe_phy_nl) &&
(hw->phy.sfp_type == ixgbe_sfp_type_not_present)) {
@@ -3514,7 +4301,8 @@ ixgbe_sfp_probe(if_ctx_t ctx)
ret = hw->phy.ops.reset(hw);
sc->sfp_probe = false;
if (ret == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- device_printf(dev, "Unsupported SFP+ module detected!");
+ device_printf(dev,
+ "Unsupported SFP+ module detected!");
device_printf(dev,
"Reload driver with supported module.\n");
goto out;
@@ -3534,11 +4322,11 @@ out:
static void
ixgbe_handle_mod(void *context)
{
- if_ctx_t ctx = context;
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_ctx_t ctx = context;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- device_t dev = iflib_get_dev(ctx);
- u32 err, cage_full = 0;
+ device_t dev = iflib_get_dev(ctx);
+ u32 err, cage_full = 0;
if (sc->hw.need_crosstalk_fix) {
switch (hw->mac.type) {
@@ -3590,11 +4378,11 @@ handle_mod_out:
static void
ixgbe_handle_msf(void *context)
{
- if_ctx_t ctx = context;
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_ctx_t ctx = context;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- u32 autoneg;
- bool negotiate;
+ u32 autoneg;
+ bool negotiate;
/* get_supported_phy_layer will call hw->phy.ops.identify_sfp() */
sc->phy_layer = ixgbe_get_supported_physical_layer(hw);
@@ -3617,20 +4405,112 @@ ixgbe_handle_msf(void *context)
static void
ixgbe_handle_phy(void *context)
{
- if_ctx_t ctx = context;
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_ctx_t ctx = context;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- int error;
+ int error;
error = hw->phy.ops.handle_lasi(hw);
if (error == IXGBE_ERR_OVERTEMP)
- device_printf(sc->dev, "CRITICAL: EXTERNAL PHY OVER TEMP!! PHY will downshift to lower power state!\n");
+ device_printf(sc->dev,
+ "CRITICAL: EXTERNAL PHY OVER TEMP!!"
+ " PHY will downshift to lower power state!\n");
else if (error)
device_printf(sc->dev,
"Error handling LASI interrupt: %d\n", error);
} /* ixgbe_handle_phy */
/************************************************************************
+ * ixgbe_enable_lse - enable link status events
+ *
+ * Sets mask and enables link status events
+ ************************************************************************/
+s32 ixgbe_enable_lse(struct ixgbe_softc *sc)
+{
+ s32 error;
+
+ u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
+ IXGBE_ACI_LINK_EVENT_MEDIA_NA |
+ IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
+ IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
+
+ error = ixgbe_configure_lse(&sc->hw, TRUE, mask);
+ if (error)
+ return (error);
+
+ sc->lse_mask = mask;
+ return (IXGBE_SUCCESS);
+} /* ixgbe_enable_lse */
+
+/************************************************************************
+ * ixgbe_disable_lse - disable link status events
+ ************************************************************************/
+s32 ixgbe_disable_lse(struct ixgbe_softc *sc)
+{
+ s32 error;
+
+ error = ixgbe_configure_lse(&sc->hw, false, sc->lse_mask);
+ if (error)
+ return (error);
+
+ sc->lse_mask = 0;
+ return (IXGBE_SUCCESS);
+} /* ixgbe_disable_lse */
+
+/************************************************************************
+ * ixgbe_handle_fw_event - Tasklet for MSI-X Link Status Event interrupts
+ ************************************************************************/
+static void
+ixgbe_handle_fw_event(void *context)
+{
+ if_ctx_t ctx = context;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_aci_event event;
+ bool pending = false;
+ s32 error;
+
+ event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
+ event.msg_buf = malloc(event.buf_len, M_IXGBE, M_ZERO | M_NOWAIT);
+ if (!event.msg_buf) {
+ device_printf(sc->dev, "Can not allocate buffer for "
+ "event message\n");
+ return;
+ }
+
+ do {
+ error = ixgbe_aci_get_event(hw, &event, &pending);
+ if (error) {
+ device_printf(sc->dev, "Error getting event from "
+ "FW:%d\n", error);
+ break;
+ }
+
+ switch (le16toh(event.desc.opcode)) {
+ case ixgbe_aci_opc_get_link_status:
+ sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
+ break;
+
+ case ixgbe_aci_opc_temp_tca_event:
+ if (hw->adapter_stopped == FALSE)
+ ixgbe_if_stop(ctx);
+ device_printf(sc->dev,
+ "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
+ device_printf(sc->dev, "System shutdown required!\n");
+ break;
+
+ default:
+ device_printf(sc->dev,
+ "Unknown FW event captured, opcode=0x%04X\n",
+ le16toh(event.desc.opcode));
+ break;
+ }
+ } while (pending);
+
+ free(event.msg_buf, M_IXGBE);
+} /* ixgbe_handle_fw_event */
+
+/************************************************************************
* ixgbe_if_stop - Stop the hardware
*
* Disables all traffic on the adapter by issuing a
@@ -3639,7 +4519,7 @@ ixgbe_handle_phy(void *context)
static void
ixgbe_if_stop(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
INIT_DEBUGOUT("ixgbe_if_stop: begin\n");
@@ -3663,6 +4543,33 @@ ixgbe_if_stop(if_ctx_t ctx)
} /* ixgbe_if_stop */
/************************************************************************
+ * ixgbe_link_speed_to_str - Convert link speed to string
+ *
+ * Helper function to convert link speed constants to human-readable
+ * string representations in conventional Gbps or Mbps.
+ ************************************************************************/
+static const char *
+ixgbe_link_speed_to_str(u32 link_speed)
+{
+ switch (link_speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ return "10 Gbps";
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ return "5 Gbps";
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ return "2.5 Gbps";
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ return "1 Gbps";
+ case IXGBE_LINK_SPEED_100_FULL:
+ return "100 Mbps";
+ case IXGBE_LINK_SPEED_10_FULL:
+ return "10 Mbps";
+ default:
+ return "Unknown";
+ }
+} /* ixgbe_link_speed_to_str */
+
+/************************************************************************
* ixgbe_update_link_status - Update OS on link state
*
* Note: Only updates the OS on the cached link state.
@@ -3673,14 +4580,14 @@ static void
ixgbe_if_update_admin_status(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
+ device_t dev = iflib_get_dev(ctx);
if (sc->link_up) {
if (sc->link_active == false) {
if (bootverbose)
- device_printf(dev, "Link is up %d Gbps %s \n",
- ((sc->link_speed == 128) ? 10 : 1),
- "Full Duplex");
+ device_printf(dev,
+ "Link is up %s Full Duplex\n",
+ ixgbe_link_speed_to_str(sc->link_speed));
sc->link_active = true;
/* Update any Flow Control changes */
ixgbe_fc_enable(&sc->hw);
@@ -3704,6 +4611,8 @@ ixgbe_if_update_admin_status(if_ctx_t ctx)
}
/* Handle task requests from msix_link() */
+ if (sc->task_requests & IXGBE_REQUEST_TASK_FWEVENT)
+ ixgbe_handle_fw_event(ctx);
if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
ixgbe_handle_mod(ctx);
if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
@@ -3725,7 +4634,7 @@ ixgbe_if_update_admin_status(if_ctx_t ctx)
static void
ixgbe_config_dmac(struct ixgbe_softc *sc)
{
- struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_hw *hw = &sc->hw;
struct ixgbe_dmac_config *dcfg = &hw->mac.dmac_config;
if (hw->mac.type < ixgbe_mac_X550 || !hw->mac.ops.dmac_config)
@@ -3751,10 +4660,10 @@ ixgbe_config_dmac(struct ixgbe_softc *sc)
void
ixgbe_if_enable_intr(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
struct ix_rx_queue *que = sc->rx_queues;
- u32 mask, fwsm;
+ u32 mask, fwsm;
mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
@@ -3791,6 +4700,9 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
mask |= IXGBE_EICR_GPI_SDP0_X540;
mask |= IXGBE_EIMS_ECC;
break;
+ case ixgbe_mac_E610:
+ mask |= IXGBE_EIMS_FW_EVENT;
+ break;
default:
break;
}
@@ -3813,6 +4725,7 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
/* Don't autoclear Link */
mask &= ~IXGBE_EIMS_OTHER;
mask &= ~IXGBE_EIMS_LSC;
+ mask &= ~IXGBE_EIMS_FW_EVENT;
if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
mask &= ~IXGBE_EIMS_MAILBOX;
IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
@@ -3831,7 +4744,7 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
} /* ixgbe_if_enable_intr */
/************************************************************************
- * ixgbe_disable_intr
+ * ixgbe_if_disable_intr
************************************************************************/
static void
ixgbe_if_disable_intr(if_ctx_t ctx)
@@ -3857,7 +4770,8 @@ ixgbe_if_disable_intr(if_ctx_t ctx)
static void
ixgbe_link_intr_enable(if_ctx_t ctx)
{
- struct ixgbe_hw *hw = &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
+ struct ixgbe_hw *hw =
+ &((struct ixgbe_softc *)iflib_get_softc(ctx))->hw;
/* Re-enable other interrupts */
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
@@ -3869,7 +4783,7 @@ ixgbe_link_intr_enable(if_ctx_t ctx)
static int
ixgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *que = &sc->rx_queues[rxqid];
ixgbe_enable_queue(sc, que->msix);
@@ -3884,8 +4798,8 @@ static void
ixgbe_enable_queue(struct ixgbe_softc *sc, u32 vector)
{
struct ixgbe_hw *hw = &sc->hw;
- u64 queue = 1ULL << vector;
- u32 mask;
+ u64 queue = 1ULL << vector;
+ u32 mask;
if (hw->mac.type == ixgbe_mac_82598EB) {
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
@@ -3907,8 +4821,8 @@ static void
ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
{
struct ixgbe_hw *hw = &sc->hw;
- u64 queue = 1ULL << vector;
- u32 mask;
+ u64 queue = 1ULL << vector;
+ u32 mask;
if (hw->mac.type == ixgbe_mac_82598EB) {
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
@@ -3929,11 +4843,11 @@ ixgbe_disable_queue(struct ixgbe_softc *sc, u32 vector)
int
ixgbe_intr(void *arg)
{
- struct ixgbe_softc *sc = arg;
+ struct ixgbe_softc *sc = arg;
struct ix_rx_queue *que = sc->rx_queues;
- struct ixgbe_hw *hw = &sc->hw;
- if_ctx_t ctx = sc->ctx;
- u32 eicr, eicr_mask;
+ struct ixgbe_hw *hw = &sc->hw;
+ if_ctx_t ctx = sc->ctx;
+ u32 eicr, eicr_mask;
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
@@ -3944,11 +4858,12 @@ ixgbe_intr(void *arg)
}
/* Check for fan failure */
- if ((hw->device_id == IXGBE_DEV_ID_82598AT) &&
+ if ((sc->feat_en & IXGBE_FEATURE_FAN_FAIL) &&
(eicr & IXGBE_EICR_GPI_SDP1)) {
device_printf(sc->dev,
"\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
- IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS,
+ IXGBE_EICR_GPI_SDP1_BY_MAC(hw));
}
/* Link status change */
@@ -3979,8 +4894,9 @@ ixgbe_intr(void *arg)
/* External PHY interrupt */
if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
- (eicr & IXGBE_EICR_GPI_SDP0_X540))
+ (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
+ }
return (FILTER_SCHEDULE_THREAD);
} /* ixgbe_intr */
@@ -3992,8 +4908,8 @@ static void
ixgbe_free_pci_resources(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- struct ix_rx_queue *que = sc->rx_queues;
- device_t dev = iflib_get_dev(ctx);
+ struct ix_rx_queue *que = sc->rx_queues;
+ device_t dev = iflib_get_dev(ctx);
/* Release all MSI-X queue resources */
if (sc->intr_type == IFLIB_INTR_MSIX)
@@ -4019,10 +4935,10 @@ static int
ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc;
- int error, fc;
+ int error, fc;
sc = (struct ixgbe_softc *)arg1;
- fc = sc->hw.fc.current_mode;
+ fc = sc->hw.fc.requested_mode;
error = sysctl_handle_int(oidp, &fc, 0, req);
if ((error) || (req->newptr == NULL))
@@ -4051,12 +4967,10 @@ ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
case ixgbe_fc_rx_pause:
case ixgbe_fc_tx_pause:
case ixgbe_fc_full:
- sc->hw.fc.requested_mode = fc;
if (sc->num_rx_queues > 1)
ixgbe_disable_rx_drop(sc);
break;
case ixgbe_fc_none:
- sc->hw.fc.requested_mode = ixgbe_fc_none;
if (sc->num_rx_queues > 1)
ixgbe_enable_rx_drop(sc);
break;
@@ -4064,6 +4978,8 @@ ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
return (EINVAL);
}
+ sc->hw.fc.requested_mode = fc;
+
/* Don't autoneg if forcing a value */
sc->hw.fc.disable_fc_autoneg = true;
ixgbe_fc_enable(&sc->hw);
@@ -4084,8 +5000,8 @@ static void
ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- struct rx_ring *rxr;
- u32 srrctl;
+ struct rx_ring *rxr;
+ u32 srrctl;
for (int i = 0; i < sc->num_rx_queues; i++) {
rxr = &sc->rx_queues[i].rxr;
@@ -4097,8 +5013,9 @@ ixgbe_enable_rx_drop(struct ixgbe_softc *sc)
/* enable drop for each vf */
for (int i = 0; i < sc->num_vfs; i++) {
IXGBE_WRITE_REG(hw, IXGBE_QDE,
- (IXGBE_QDE_WRITE | (i << IXGBE_QDE_IDX_SHIFT) |
- IXGBE_QDE_ENABLE));
+ (IXGBE_QDE_WRITE |
+ (i << IXGBE_QDE_IDX_SHIFT) |
+ IXGBE_QDE_ENABLE));
}
} /* ixgbe_enable_rx_drop */
@@ -4109,8 +5026,8 @@ static void
ixgbe_disable_rx_drop(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- struct rx_ring *rxr;
- u32 srrctl;
+ struct rx_ring *rxr;
+ u32 srrctl;
for (int i = 0; i < sc->num_rx_queues; i++) {
rxr = &sc->rx_queues[i].rxr;
@@ -4135,9 +5052,12 @@ static int
ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc;
- int error, advertise;
+ int error, advertise;
sc = (struct ixgbe_softc *)arg1;
+ if (atomic_load_acq_int(&sc->recovery_mode))
+ return (EPERM);
+
advertise = sc->advertise;
error = sysctl_handle_int(oidp, &advertise, 0, req);
@@ -4162,12 +5082,12 @@ ixgbe_sysctl_advertise(SYSCTL_HANDLER_ARGS)
static int
ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
{
- device_t dev = iflib_get_dev(sc->ctx);
- struct ixgbe_hw *hw;
+ device_t dev = iflib_get_dev(sc->ctx);
+ struct ixgbe_hw *hw;
ixgbe_link_speed speed = 0;
ixgbe_link_speed link_caps = 0;
- s32 err = IXGBE_NOT_IMPLEMENTED;
- bool negotiate = false;
+ s32 err = IXGBE_NOT_IMPLEMENTED;
+ bool negotiate = false;
/* Checks to validate new value */
if (sc->advertise == advertise) /* no change */
@@ -4181,12 +5101,16 @@ ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
if (!((hw->phy.media_type == ixgbe_media_type_copper) ||
(hw->phy.multispeed_fiber))) {
- device_printf(dev, "Advertised speed can only be set on copper or multispeed fiber media types.\n");
+ device_printf(dev,
+ "Advertised speed can only be set on copper or multispeed"
+ " fiber media types.\n");
return (EINVAL);
}
if (advertise < 0x1 || advertise > 0x3F) {
- device_printf(dev, "Invalid advertised speed; valid modes are 0x1 through 0x3F\n");
+ device_printf(dev,
+ "Invalid advertised speed; valid modes are 0x1 through"
+ " 0x3F\n");
return (EINVAL);
}
@@ -4194,7 +5118,9 @@ ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
err = hw->mac.ops.get_link_capabilities(hw, &link_caps,
&negotiate);
if (err != IXGBE_SUCCESS) {
- device_printf(dev, "Unable to determine supported advertise speeds\n");
+ device_printf(dev,
+ "Unable to determine supported advertise speeds"
+ "\n");
return (ENODEV);
}
}
@@ -4202,42 +5128,54 @@ ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
/* Set new value and report new advertised mode */
if (advertise & 0x1) {
if (!(link_caps & IXGBE_LINK_SPEED_100_FULL)) {
- device_printf(dev, "Interface does not support 100Mb advertised speed\n");
+ device_printf(dev,
+ "Interface does not support 100Mb advertised"
+ " speed\n");
return (EINVAL);
}
speed |= IXGBE_LINK_SPEED_100_FULL;
}
if (advertise & 0x2) {
if (!(link_caps & IXGBE_LINK_SPEED_1GB_FULL)) {
- device_printf(dev, "Interface does not support 1Gb advertised speed\n");
+ device_printf(dev,
+ "Interface does not support 1Gb advertised speed"
+ "\n");
return (EINVAL);
}
speed |= IXGBE_LINK_SPEED_1GB_FULL;
}
if (advertise & 0x4) {
if (!(link_caps & IXGBE_LINK_SPEED_10GB_FULL)) {
- device_printf(dev, "Interface does not support 10Gb advertised speed\n");
+ device_printf(dev,
+ "Interface does not support 10Gb advertised speed"
+ "\n");
return (EINVAL);
}
speed |= IXGBE_LINK_SPEED_10GB_FULL;
}
if (advertise & 0x8) {
if (!(link_caps & IXGBE_LINK_SPEED_10_FULL)) {
- device_printf(dev, "Interface does not support 10Mb advertised speed\n");
+ device_printf(dev,
+ "Interface does not support 10Mb advertised speed"
+ "\n");
return (EINVAL);
}
speed |= IXGBE_LINK_SPEED_10_FULL;
}
if (advertise & 0x10) {
if (!(link_caps & IXGBE_LINK_SPEED_2_5GB_FULL)) {
- device_printf(dev, "Interface does not support 2.5G advertised speed\n");
+ device_printf(dev,
+ "Interface does not support 2.5G advertised speed"
+ "\n");
return (EINVAL);
}
speed |= IXGBE_LINK_SPEED_2_5GB_FULL;
}
if (advertise & 0x20) {
if (!(link_caps & IXGBE_LINK_SPEED_5GB_FULL)) {
- device_printf(dev, "Interface does not support 5G advertised speed\n");
+ device_printf(dev,
+ "Interface does not support 5G advertised speed"
+ "\n");
return (EINVAL);
}
speed |= IXGBE_LINK_SPEED_5GB_FULL;
@@ -4265,11 +5203,11 @@ ixgbe_set_advertise(struct ixgbe_softc *sc, int advertise)
static int
ixgbe_get_default_advertise(struct ixgbe_softc *sc)
{
- struct ixgbe_hw *hw = &sc->hw;
- int speed;
+ struct ixgbe_hw *hw = &sc->hw;
+ int speed;
ixgbe_link_speed link_caps = 0;
- s32 err;
- bool negotiate = false;
+ s32 err;
+ bool negotiate = false;
/*
* Advertised speed means nothing unless it's copper or
@@ -4319,9 +5257,9 @@ static int
ixgbe_sysctl_dmac(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
- if_t ifp = iflib_get_ifp(sc->ctx);
- int error;
- u16 newval;
+ if_t ifp = iflib_get_ifp(sc->ctx);
+ int error;
+ u16 newval;
newval = sc->dmac;
error = sysctl_handle_16(oidp, &newval, 0, req);
@@ -4374,8 +5312,8 @@ static int
ixgbe_sysctl_power_state(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
- device_t dev = sc->dev;
- int curr_ps, new_ps, error = 0;
+ device_t dev = sc->dev;
+ int curr_ps, new_ps, error = 0;
curr_ps = new_ps = pci_get_powerstate(dev);
@@ -4414,8 +5352,8 @@ ixgbe_sysctl_wol_enable(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
struct ixgbe_hw *hw = &sc->hw;
- int new_wol_enabled;
- int error = 0;
+ int new_wol_enabled;
+ int error = 0;
new_wol_enabled = hw->wol_enabled;
error = sysctl_handle_int(oidp, &new_wol_enabled, 0, req);
@@ -4454,8 +5392,8 @@ static int
ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
- int error = 0;
- u32 new_wufc;
+ int error = 0;
+ u32 new_wufc;
new_wufc = sc->wufc;
@@ -4482,12 +5420,15 @@ ixgbe_sysctl_wufc(SYSCTL_HANDLER_ARGS)
static int
ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
struct ixgbe_hw *hw = &sc->hw;
- device_t dev = sc->dev;
- struct sbuf *buf;
- int error = 0, reta_size;
- u32 reg;
+ device_t dev = sc->dev;
+ struct sbuf *buf;
+ int error = 0, reta_size;
+ u32 reg;
+
+ if (atomic_load_acq_int(&sc->recovery_mode))
+ return (EPERM);
buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (!buf) {
@@ -4540,9 +5481,12 @@ ixgbe_sysctl_print_rss_config(SYSCTL_HANDLER_ARGS)
static int
ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
struct ixgbe_hw *hw = &sc->hw;
- u16 reg;
+ u16 reg;
+
+ if (atomic_load_acq_int(&sc->recovery_mode))
+ return (EPERM);
if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
device_printf(iflib_get_dev(sc->ctx),
@@ -4553,7 +5497,8 @@ ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
if (hw->phy.ops.read_reg(hw, IXGBE_PHY_CURRENT_TEMP,
IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &reg)) {
device_printf(iflib_get_dev(sc->ctx),
- "Error reading from PHY's current temperature register\n");
+ "Error reading from PHY's current temperature register"
+ "\n");
return (EAGAIN);
}
@@ -4572,9 +5517,12 @@ ixgbe_sysctl_phy_temp(SYSCTL_HANDLER_ARGS)
static int
ixgbe_sysctl_phy_overtemp_occurred(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
struct ixgbe_hw *hw = &sc->hw;
- u16 reg;
+ u16 reg;
+
+ if (atomic_load_acq_int(&sc->recovery_mode))
+ return (EPERM);
if (hw->device_id != IXGBE_DEV_ID_X550EM_X_10G_T) {
device_printf(iflib_get_dev(sc->ctx),
@@ -4608,10 +5556,13 @@ static int
ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
{
struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
- device_t dev = sc->dev;
- if_t ifp = iflib_get_ifp(sc->ctx);
- int curr_eee, new_eee, error = 0;
- s32 retval;
+ device_t dev = sc->dev;
+ if_t ifp = iflib_get_ifp(sc->ctx);
+ int curr_eee, new_eee, error = 0;
+ s32 retval;
+
+ if (atomic_load_acq_int(&sc->recovery_mode))
+ return (EPERM);
curr_eee = new_eee = !!(sc->feat_en & IXGBE_FEATURE_EEE);
@@ -4651,17 +5602,54 @@ ixgbe_sysctl_eee_state(SYSCTL_HANDLER_ARGS)
return (error);
} /* ixgbe_sysctl_eee_state */
+static int
+ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS)
+{
+ struct ixgbe_softc *sc;
+ u32 reg, val, shift;
+ int error, mask;
+
+ sc = oidp->oid_arg1;
+ switch (oidp->oid_arg2) {
+ case 0:
+ reg = IXGBE_DTXTCPFLGL;
+ shift = 0;
+ break;
+ case 1:
+ reg = IXGBE_DTXTCPFLGL;
+ shift = 16;
+ break;
+ case 2:
+ reg = IXGBE_DTXTCPFLGH;
+ shift = 0;
+ break;
+ default:
+ return (EINVAL);
+ break;
+ }
+ val = IXGBE_READ_REG(&sc->hw, reg);
+ mask = (val >> shift) & 0xfff;
+ error = sysctl_handle_int(oidp, &mask, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+ if (mask < 0 || mask > 0xfff)
+ return (EINVAL);
+ val = (val & ~(0xfff << shift)) | (mask << shift);
+ IXGBE_WRITE_REG(&sc->hw, reg, val);
+ return (0);
+}
+
/************************************************************************
* ixgbe_init_device_features
************************************************************************/
static void
ixgbe_init_device_features(struct ixgbe_softc *sc)
{
- sc->feat_cap = IXGBE_FEATURE_NETMAP
- | IXGBE_FEATURE_RSS
- | IXGBE_FEATURE_MSI
- | IXGBE_FEATURE_MSIX
- | IXGBE_FEATURE_LEGACY_IRQ;
+ sc->feat_cap = IXGBE_FEATURE_NETMAP |
+ IXGBE_FEATURE_RSS |
+ IXGBE_FEATURE_MSI |
+ IXGBE_FEATURE_MSIX |
+ IXGBE_FEATURE_LEGACY_IRQ;
/* Set capabilities first... */
switch (sc->hw.mac.type) {
@@ -4677,15 +5665,20 @@ ixgbe_init_device_features(struct ixgbe_softc *sc)
sc->feat_cap |= IXGBE_FEATURE_BYPASS;
break;
case ixgbe_mac_X550:
+ sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
sc->feat_cap |= IXGBE_FEATURE_TEMP_SENSOR;
sc->feat_cap |= IXGBE_FEATURE_SRIOV;
sc->feat_cap |= IXGBE_FEATURE_FDIR;
break;
case ixgbe_mac_X550EM_x:
+ sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
sc->feat_cap |= IXGBE_FEATURE_SRIOV;
sc->feat_cap |= IXGBE_FEATURE_FDIR;
+ if (sc->hw.device_id == IXGBE_DEV_ID_X550EM_X_KR)
+ sc->feat_cap |= IXGBE_FEATURE_EEE;
break;
case ixgbe_mac_X550EM_a:
+ sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
sc->feat_cap |= IXGBE_FEATURE_SRIOV;
sc->feat_cap |= IXGBE_FEATURE_FDIR;
sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
@@ -4704,6 +5697,10 @@ ixgbe_init_device_features(struct ixgbe_softc *sc)
if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
break;
+ case ixgbe_mac_E610:
+ sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
+ sc->feat_cap |= IXGBE_FEATURE_DBG_DUMP;
+ break;
default:
break;
}
@@ -4721,6 +5718,12 @@ ixgbe_init_device_features(struct ixgbe_softc *sc)
/* Thermal Sensor */
if (sc->feat_cap & IXGBE_FEATURE_TEMP_SENSOR)
sc->feat_en |= IXGBE_FEATURE_TEMP_SENSOR;
+ /* Recovery mode */
+ if (sc->feat_cap & IXGBE_FEATURE_RECOVERY_MODE)
+ sc->feat_en |= IXGBE_FEATURE_RECOVERY_MODE;
+ /* FW Debug Dump */
+ if (sc->feat_cap & IXGBE_FEATURE_DBG_DUMP)
+ sc->feat_en |= IXGBE_FEATURE_DBG_DUMP;
/* Enabled via global sysctl... */
/* Flow Director */
@@ -4728,7 +5731,9 @@ ixgbe_init_device_features(struct ixgbe_softc *sc)
if (sc->feat_cap & IXGBE_FEATURE_FDIR)
sc->feat_en |= IXGBE_FEATURE_FDIR;
else
- device_printf(sc->dev, "Device does not support Flow Director. Leaving disabled.");
+ device_printf(sc->dev,
+ "Device does not support Flow Director."
+ " Leaving disabled.");
}
/*
* Message Signal Interrupts - Extended (MSI-X)
@@ -4762,7 +5767,8 @@ ixgbe_check_fan_failure(struct ixgbe_softc *sc, u32 reg, bool in_interrupt)
IXGBE_ESDP_SDP1;
if (reg & mask)
- device_printf(sc->dev, "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
+ device_printf(sc->dev,
+ "\nCRITICAL: FAN FAILURE!! REPLACE IMMEDIATELY!!\n");
} /* ixgbe_check_fan_failure */
/************************************************************************
@@ -4772,14 +5778,43 @@ static void
ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
{
struct ixgbe_nvm_version nvm_ver = {0};
- uint16_t phyfw = 0;
- int status;
const char *space = "";
+ ixgbe_get_nvm_version(hw, &nvm_ver); /* NVM version */
ixgbe_get_oem_prod_version(hw, &nvm_ver); /* OEM's NVM version */
+ ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack a build ID in Intel's SCM */
ixgbe_get_orom_version(hw, &nvm_ver); /* Option ROM */
- ixgbe_get_etk_id(hw, &nvm_ver); /* eTrack identifies a build in Intel's SCM */
- status = ixgbe_get_phy_firmware_version(hw, &phyfw);
+
+ /* FW version */
+ if ((nvm_ver.phy_fw_maj == 0x0 &&
+ nvm_ver.phy_fw_min == 0x0 &&
+ nvm_ver.phy_fw_id == 0x0) ||
+ (nvm_ver.phy_fw_maj == 0xF &&
+ nvm_ver.phy_fw_min == 0xFF &&
+ nvm_ver.phy_fw_id == 0xF)) {
+ /* If major, minor and id numbers are set to 0,
+ * reading FW version is unsupported. If major number
+ * is set to 0xF, minor is set to 0xFF and id is set
+ * to 0xF, this means that number read is invalid. */
+ } else
+ sbuf_printf(buf, "fw %d.%d.%d ",
+ nvm_ver.phy_fw_maj, nvm_ver.phy_fw_min,
+ nvm_ver.phy_fw_id);
+
+ /* NVM version */
+ if ((nvm_ver.nvm_major == 0x0 &&
+ nvm_ver.nvm_minor == 0x0 &&
+ nvm_ver.nvm_id == 0x0) ||
+ (nvm_ver.nvm_major == 0xF &&
+ nvm_ver.nvm_minor == 0xFF &&
+ nvm_ver.nvm_id == 0xF)) {
+ /* If major, minor and id numbers are set to 0,
+ * reading NVM version is unsupported. If major number
+ * is set to 0xF, minor is set to 0xFF and id is set
+ * to 0xF, this means that number read is invalid. */
+ } else
+ sbuf_printf(buf, "nvm %x.%02x.%x ",
+ nvm_ver.nvm_major, nvm_ver.nvm_minor, nvm_ver.nvm_id);
if (nvm_ver.oem_valid) {
sbuf_printf(buf, "NVM OEM V%d.%d R%d", nvm_ver.oem_major,
@@ -4789,18 +5824,15 @@ ixgbe_sbuf_fw_version(struct ixgbe_hw *hw, struct sbuf *buf)
if (nvm_ver.or_valid) {
sbuf_printf(buf, "%sOption ROM V%d-b%d-p%d",
- space, nvm_ver.or_major, nvm_ver.or_build, nvm_ver.or_patch);
+ space, nvm_ver.or_major, nvm_ver.or_build,
+ nvm_ver.or_patch);
space = " ";
}
if (nvm_ver.etk_id != ((NVM_VER_INVALID << NVM_ETK_SHIFT) |
- NVM_VER_INVALID)) {
+ NVM_VER_INVALID | 0xFFFFFFFF)) {
sbuf_printf(buf, "%seTrack 0x%08x", space, nvm_ver.etk_id);
- space = " ";
}
-
- if (phyfw != 0 && status == IXGBE_SUCCESS)
- sbuf_printf(buf, "%sPHY FW V%d", space, phyfw);
} /* ixgbe_sbuf_fw_version */
/************************************************************************
@@ -4838,7 +5870,7 @@ ixgbe_print_fw_version(if_ctx_t ctx)
static int
ixgbe_sysctl_print_fw_version(SYSCTL_HANDLER_ARGS)
{
- struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
+ struct ixgbe_softc *sc = (struct ixgbe_softc *)arg1;
struct ixgbe_hw *hw = &sc->hw;
device_t dev = sc->dev;
struct sbuf *buf;
diff --git a/sys/dev/ixgbe/if_ixv.c b/sys/dev/ixgbe/if_ixv.c
index 66a1e4fe2df1..8a1c1aae041d 100644
--- a/sys/dev/ixgbe/if_ixv.c
+++ b/sys/dev/ixgbe/if_ixv.c
@@ -1,4 +1,4 @@
-/******************************************************************************
+/*****************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
@@ -29,7 +29,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-******************************************************************************/
+*****************************************************************************/
#include "opt_inet.h"
@@ -58,13 +58,20 @@ static const char ixv_driver_version[] = "2.0.1-k";
************************************************************************/
static const pci_vendor_info_t ixv_vendor_info_array[] =
{
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF, "Intel(R) X520 82599 Virtual Function"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF, "Intel(R) X540 Virtual Function"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF, "Intel(R) X550 Virtual Function"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF, "Intel(R) X552 Virtual Function"),
- PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF, "Intel(R) X553 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF,
+ "Intel(R) X520 82599 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF,
+ "Intel(R) X540 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF,
+ "Intel(R) X550 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF,
+ "Intel(R) X552 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF,
+ "Intel(R) X553 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_VF,
+ "Intel(R) E610 Virtual Function"),
/* required last entry */
-PVID_END
+ PVID_END
};
/************************************************************************
@@ -76,8 +83,10 @@ static int ixv_if_attach_post(if_ctx_t);
static int ixv_if_detach(if_ctx_t);
static int ixv_if_rx_queue_intr_enable(if_ctx_t, uint16_t);
-static int ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
-static int ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int);
+static int ixv_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
+ int);
+static int ixv_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int,
+ int);
static void ixv_if_queues_free(if_ctx_t);
static void ixv_identify_hardware(if_ctx_t);
static void ixv_init_device_features(struct ixgbe_softc *);
@@ -239,17 +248,17 @@ ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
int ntxqs, int ntxqsets)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_softc_ctx_t scctx = sc->shared;
+ if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que;
- int i, j, error;
+ int i, j, error;
MPASS(sc->num_tx_queues == ntxqsets);
MPASS(ntxqs == 1);
/* Allocate queue structure memory */
sc->tx_queues =
- (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) * ntxqsets,
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ (struct ix_tx_queue *)malloc(sizeof(struct ix_tx_queue) *
+ ntxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
if (!sc->tx_queues) {
device_printf(iflib_get_dev(ctx),
"Unable to allocate TX ring memory\n");
@@ -263,13 +272,14 @@ ixv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
txr->sc = que->sc = sc;
/* Allocate report status array */
- if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ if (!(txr->tx_rsq = (qidx_t *)malloc(sizeof(qidx_t) *
+ scctx->isc_ntxd[0], M_DEVBUF, M_NOWAIT | M_ZERO))) {
error = ENOMEM;
goto fail;
}
for (j = 0; j < scctx->isc_ntxd[0]; j++)
txr->tx_rsq[j] = QIDX_INVALID;
- /* get the virtual and physical address of the hardware queues */
+ /* get virtual and physical address of the hardware queues */
txr->tail = IXGBE_VFTDT(txr->me);
txr->tx_base = (union ixgbe_adv_tx_desc *)vaddrs[i*ntxqs];
txr->tx_paddr = paddrs[i*ntxqs];
@@ -299,15 +309,15 @@ ixv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs,
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *que;
- int i, error;
+ int i, error;
MPASS(sc->num_rx_queues == nrxqsets);
MPASS(nrxqs == 1);
/* Allocate queue structure memory */
sc->rx_queues =
- (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) * nrxqsets,
- M_DEVBUF, M_NOWAIT | M_ZERO);
+ (struct ix_rx_queue *)malloc(sizeof(struct ix_rx_queue) *
+ nrxqsets, M_DEVBUF, M_NOWAIT | M_ZERO);
if (!sc->rx_queues) {
device_printf(iflib_get_dev(ctx),
"Unable to allocate TX ring memory\n");
@@ -348,7 +358,7 @@ ixv_if_queues_free(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_tx_queue *que = sc->tx_queues;
- int i;
+ int i;
if (que == NULL)
goto free;
@@ -382,11 +392,11 @@ free:
static int
ixv_if_attach_pre(if_ctx_t ctx)
{
- struct ixgbe_softc *sc;
- device_t dev;
- if_softc_ctx_t scctx;
+ struct ixgbe_softc *sc;
+ device_t dev;
+ if_softc_ctx_t scctx;
struct ixgbe_hw *hw;
- int error = 0;
+ int error = 0;
INIT_DEBUGOUT("ixv_attach: begin");
@@ -458,7 +468,7 @@ ixv_if_attach_pre(if_ctx_t ctx)
/* Check if VF was disabled by PF */
error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
if (error) {
- /* PF is not capable of controlling VF state. Enable the link. */
+ /* PF is not capable of controlling VF state. Enable link. */
sc->link_enabled = true;
}
@@ -522,8 +532,8 @@ static int
ixv_if_attach_post(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- int error = 0;
+ device_t dev = iflib_get_dev(ctx);
+ int error = 0;
/* Setup OS specific network interface */
error = ixv_setup_interface(ctx);
@@ -568,7 +578,7 @@ ixv_if_mtu_set(if_ctx_t ctx, uint32_t mtu)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
- int error = 0;
+ int error = 0;
IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
if (mtu > IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) {
@@ -596,9 +606,9 @@ ixv_if_init(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_t ifp = iflib_get_ifp(ctx);
- device_t dev = iflib_get_dev(ctx);
+ device_t dev = iflib_get_dev(ctx);
struct ixgbe_hw *hw = &sc->hw;
- int error = 0;
+ int error = 0;
INIT_DEBUGOUT("ixv_if_init: begin");
hw->adapter_stopped = false;
@@ -670,8 +680,8 @@ static inline void
ixv_enable_queue(struct ixgbe_softc *sc, u32 vector)
{
struct ixgbe_hw *hw = &sc->hw;
- u32 queue = 1 << vector;
- u32 mask;
+ u32 queue = 1 << vector;
+ u32 mask;
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
@@ -684,8 +694,8 @@ static inline void
ixv_disable_queue(struct ixgbe_softc *sc, u32 vector)
{
struct ixgbe_hw *hw = &sc->hw;
- u64 queue = (u64)(1 << vector);
- u32 mask;
+ u64 queue = (u64)(1 << vector);
+ u32 mask;
mask = (IXGBE_EIMS_RTX_QUEUE & queue);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
@@ -699,7 +709,7 @@ static int
ixv_msix_que(void *arg)
{
struct ix_rx_queue *que = arg;
- struct ixgbe_softc *sc = que->sc;
+ struct ixgbe_softc *sc = que->sc;
ixv_disable_queue(sc, que->msix);
++que->irqs;
@@ -713,9 +723,9 @@ ixv_msix_que(void *arg)
static int
ixv_msix_mbx(void *arg)
{
- struct ixgbe_softc *sc = arg;
+ struct ixgbe_softc *sc = arg;
struct ixgbe_hw *hw = &sc->hw;
- u32 reg;
+ u32 reg;
++sc->link_irq;
@@ -811,11 +821,13 @@ static int
ixv_negotiate_api(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- int mbx_api[] = { ixgbe_mbox_api_12,
- ixgbe_mbox_api_11,
- ixgbe_mbox_api_10,
- ixgbe_mbox_api_unknown };
- int i = 0;
+ int mbx_api[] = {
+ ixgbe_mbox_api_12,
+ ixgbe_mbox_api_11,
+ ixgbe_mbox_api_10,
+ ixgbe_mbox_api_unknown
+ };
+ int i = 0;
while (mbx_api[i] != ixgbe_mbox_api_unknown) {
if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
@@ -830,7 +842,8 @@ ixv_negotiate_api(struct ixgbe_softc *sc)
static u_int
ixv_if_multi_set_cb(void *cb_arg, struct sockaddr_dl *addr, u_int cnt)
{
- bcopy(LLADDR(addr), &((u8 *)cb_arg)[cnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
+ bcopy(LLADDR(addr),
+ &((u8 *)cb_arg)[cnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
IXGBE_ETH_LENGTH_OF_ADDRESS);
return (++cnt);
@@ -844,11 +857,11 @@ ixv_if_multi_set_cb(void *cb_arg, struct sockaddr_dl *addr, u_int cnt)
static void
ixv_if_multi_set(if_ctx_t ctx)
{
- u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- u8 *update_ptr;
- if_t ifp = iflib_get_ifp(ctx);
- int mcnt = 0;
+ u8 mta[MAX_NUM_MULTICAST_ADDRESSES * IXGBE_ETH_LENGTH_OF_ADDRESS];
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ u8 *update_ptr;
+ if_t ifp = iflib_get_ifp(ctx);
+ int mcnt = 0;
IOCTL_DEBUGOUT("ixv_if_multi_set: begin");
@@ -908,8 +921,8 @@ static void
ixv_if_update_admin_status(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- s32 status;
+ device_t dev = iflib_get_dev(ctx);
+ s32 status;
sc->hw.mac.get_link_status = true;
@@ -955,7 +968,7 @@ ixv_if_update_admin_status(if_ctx_t ctx)
static void
ixv_if_stop(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
INIT_DEBUGOUT("ixv_stop: begin\n");
@@ -981,8 +994,8 @@ ixv_if_stop(if_ctx_t ctx)
static void
ixv_identify_hardware(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ device_t dev = iflib_get_dev(ctx);
struct ixgbe_hw *hw = &sc->hw;
/* Save off the information about this board */
@@ -1009,6 +1022,9 @@ ixv_identify_hardware(if_ctx_t ctx)
case IXGBE_DEV_ID_X550EM_A_VF:
hw->mac.type = ixgbe_mac_X550EM_a_vf;
break;
+ case IXGBE_DEV_ID_E610_VF:
+ hw->mac.type = ixgbe_mac_E610_vf;
+ break;
default:
device_printf(dev, "unknown mac type\n");
hw->mac.type = ixgbe_mac_unknown;
@@ -1023,22 +1039,24 @@ static int
ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
+ device_t dev = iflib_get_dev(ctx);
struct ix_rx_queue *rx_que = sc->rx_queues;
struct ix_tx_queue *tx_que;
- int error, rid, vector = 0;
- char buf[16];
+ int error, rid, vector = 0;
+ char buf[16];
for (int i = 0; i < sc->num_rx_queues; i++, vector++, rx_que++) {
rid = vector + 1;
snprintf(buf, sizeof(buf), "rxq%d", i);
error = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid,
- IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me, buf);
+ IFLIB_INTR_RXTX, ixv_msix_que, rx_que, rx_que->rxr.me,
+ buf);
if (error) {
device_printf(iflib_get_dev(ctx),
- "Failed to allocate que int %d err: %d", i, error);
+ "Failed to allocate que int %d err: %d",
+ i, error);
sc->num_rx_queues = i + 1;
goto fail;
}
@@ -1073,11 +1091,15 @@ ixv_if_msix_intr_assign(if_ctx_t ctx, int msix)
*/
if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
int msix_ctrl;
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
+ if (pci_find_cap(dev, PCIY_MSIX, &rid)) {
+ device_printf(dev,
+ "Finding MSIX capability failed\n");
+ } else {
+ rid += PCIR_MSIX_CTRL;
+ msix_ctrl = pci_read_config(dev, rid, 2);
+ msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+ pci_write_config(dev, rid, msix_ctrl, 2);
+ }
}
return (0);
@@ -1098,21 +1120,21 @@ static int
ixv_allocate_pci_resources(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- device_t dev = iflib_get_dev(ctx);
- int rid;
+ device_t dev = iflib_get_dev(ctx);
+ int rid;
rid = PCIR_BAR(0);
sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
RF_ACTIVE);
if (!(sc->pci_mem)) {
- device_printf(dev, "Unable to allocate bus resource: memory\n");
+ device_printf(dev,
+ "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem);
- sc->osdep.mem_bus_space_handle =
- rman_get_bushandle(sc->pci_mem);
+ sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->pci_mem);
sc->hw.hw_addr = (u8 *)&sc->osdep.mem_bus_space_handle;
return (0);
@@ -1126,7 +1148,7 @@ ixv_free_pci_resources(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *que = sc->rx_queues;
- device_t dev = iflib_get_dev(ctx);
+ device_t dev = iflib_get_dev(ctx);
/* Release all MSI-X queue resources */
if (sc->intr_type == IFLIB_INTR_MSIX)
@@ -1153,7 +1175,7 @@ ixv_setup_interface(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
if_softc_ctx_t scctx = sc->shared;
- if_t ifp = iflib_get_ifp(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
INIT_DEBUGOUT("ixv_setup_interface: begin");
@@ -1175,7 +1197,7 @@ static uint64_t
ixv_if_get_counter(if_ctx_t ctx, ift_counter cnt)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_t ifp = iflib_get_ifp(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
switch (cnt) {
case IFCOUNTER_IPACKETS:
@@ -1219,16 +1241,16 @@ static void
ixv_initialize_transmit_units(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- struct ixgbe_hw *hw = &sc->hw;
- if_softc_ctx_t scctx = sc->shared;
+ struct ixgbe_hw *hw = &sc->hw;
+ if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que = sc->tx_queues;
- int i;
+ int i;
for (i = 0; i < sc->num_tx_queues; i++, que++) {
struct tx_ring *txr = &que->txr;
- u64 tdba = txr->tx_paddr;
- u32 txctrl, txdctl;
- int j = txr->me;
+ u64 tdba = txr->tx_paddr;
+ u32 txctrl, txdctl;
+ int j = txr->me;
/* Set WTHRESH to 8, burst writeback */
txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
@@ -1278,10 +1300,10 @@ static void
ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
{
struct ixgbe_hw *hw = &sc->hw;
- u32 reta = 0, mrqc, rss_key[10];
- int queue_id;
- int i, j;
- u32 rss_hash_config;
+ u32 reta = 0, mrqc, rss_key[10];
+ int queue_id;
+ int i, j;
+ u32 rss_hash_config;
if (sc->feat_en & IXGBE_FEATURE_RSS) {
/* Fetch the configured RSS key */
@@ -1348,22 +1370,25 @@ ixv_initialize_rss_mapping(struct ixgbe_softc *sc)
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
- device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_IPV6_EX defined, but not supported\n",
- __func__);
+ device_printf(sc->dev,
+ "%s: RSS_HASHTYPE_RSS_IPV6_EX defined,"
+ " but not supported\n", __func__);
if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6_EX)
- device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined, but not supported\n",
- __func__);
+ device_printf(sc->dev,
+ "%s: RSS_HASHTYPE_RSS_TCP_IPV6_EX defined,"
+ " but not supported\n", __func__);
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6_EX)
- device_printf(sc->dev, "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined, but not supported\n",
- __func__);
+ device_printf(sc->dev,
+ "%s: RSS_HASHTYPE_RSS_UDP_IPV6_EX defined,"
+ " but not supported\n", __func__);
IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
} /* ixv_initialize_rss_mapping */
-
+#define BSIZEPKT_ROUNDUP ((1<<IXGBE_SRRCTL_BSIZEPKT_SHIFT)-1)
/************************************************************************
* ixv_initialize_receive_units - Setup receive registers and features.
************************************************************************/
@@ -1371,22 +1396,22 @@ static void
ixv_initialize_receive_units(if_ctx_t ctx)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- if_softc_ctx_t scctx;
- struct ixgbe_hw *hw = &sc->hw;
- if_t ifp = iflib_get_ifp(ctx);
+ if_softc_ctx_t scctx;
+ struct ixgbe_hw *hw = &sc->hw;
+#ifdef DEV_NETMAP
+ if_t ifp = iflib_get_ifp(ctx);
+#endif
struct ix_rx_queue *que = sc->rx_queues;
- u32 bufsz, psrtype;
+ u32 bufsz, psrtype;
- if (if_getmtu(ifp) > ETHERMTU)
- bufsz = 4096 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- else
- bufsz = 2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ bufsz = (sc->rx_mbuf_sz + BSIZEPKT_ROUNDUP) >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
- psrtype = IXGBE_PSRTYPE_TCPHDR
- | IXGBE_PSRTYPE_UDPHDR
- | IXGBE_PSRTYPE_IPV4HDR
- | IXGBE_PSRTYPE_IPV6HDR
- | IXGBE_PSRTYPE_L2HDR;
+ psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
if (sc->num_rx_queues > 1)
psrtype |= 1 << 29;
@@ -1395,15 +1420,18 @@ ixv_initialize_receive_units(if_ctx_t ctx)
/* Tell PF our max_frame size */
if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
- device_printf(sc->dev, "There is a problem with the PF setup. It is likely the receive unit for this VF will not function correctly.\n");
+ device_printf(sc->dev,
+ "There is a problem with the PF setup. It is likely the"
+ " receive unit for this VF will not function correctly."
+ "\n");
}
scctx = sc->shared;
for (int i = 0; i < sc->num_rx_queues; i++, que++) {
struct rx_ring *rxr = &que->rxr;
- u64 rdba = rxr->rx_paddr;
- u32 reg, rxdctl;
- int j = rxr->me;
+ u64 rdba = rxr->rx_paddr;
+ u32 reg, rxdctl;
+ int j = rxr->me;
/* Disable the queue */
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
@@ -1494,10 +1522,10 @@ ixv_initialize_receive_units(if_ctx_t ctx)
static void
ixv_setup_vlan_support(if_ctx_t ctx)
{
- if_t ifp = iflib_get_ifp(ctx);
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
- u32 ctrl, vid, vfta, retry;
+ u32 ctrl, vid, vfta, retry;
/*
* We get here thru if_init, meaning
@@ -1568,7 +1596,7 @@ static void
ixv_if_register_vlan(if_ctx_t ctx, u16 vtag)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- u16 index, bit;
+ u16 index, bit;
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
@@ -1586,7 +1614,7 @@ static void
ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
{
struct ixgbe_softc *sc = iflib_get_softc(ctx);
- u16 index, bit;
+ u16 index, bit;
index = (vtag >> 5) & 0x7F;
bit = vtag & 0x1F;
@@ -1600,10 +1628,10 @@ ixv_if_unregister_vlan(if_ctx_t ctx, u16 vtag)
static void
ixv_if_enable_intr(if_ctx_t ctx)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw = &sc->hw;
struct ix_rx_queue *que = sc->rx_queues;
- u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+ u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
@@ -1635,7 +1663,7 @@ ixv_if_disable_intr(if_ctx_t ctx)
static int
ixv_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ix_rx_queue *que = &sc->rx_queues[rxqid];
ixv_enable_queue(sc, que->rxr.me);
@@ -1655,7 +1683,7 @@ static void
ixv_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
{
struct ixgbe_hw *hw = &sc->hw;
- u32 ivar, index;
+ u32 ivar, index;
vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -1805,18 +1833,18 @@ ixv_update_stats(struct ixgbe_softc *sc)
static void
ixv_add_stats_sysctls(struct ixgbe_softc *sc)
{
- device_t dev = sc->dev;
- struct ix_tx_queue *tx_que = sc->tx_queues;
- struct ix_rx_queue *rx_que = sc->rx_queues;
- struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
- struct sysctl_oid *tree = device_get_sysctl_tree(dev);
- struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+ device_t dev = sc->dev;
+ struct ix_tx_queue *tx_que = sc->tx_queues;
+ struct ix_rx_queue *rx_que = sc->rx_queues;
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+ struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
struct ixgbevf_hw_stats *stats = &sc->stats.vf;
- struct sysctl_oid *stat_node, *queue_node;
- struct sysctl_oid_list *stat_list, *queue_list;
+ struct sysctl_oid *stat_node, *queue_node;
+ struct sysctl_oid_list *stat_list, *queue_list;
#define QUEUE_NAME_LEN 32
- char namebuf[QUEUE_NAME_LEN];
+ char namebuf[QUEUE_NAME_LEN];
/* Driver Statistics */
SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
@@ -1919,9 +1947,9 @@ ixv_sysctl_debug(SYSCTL_HANDLER_ARGS)
static void
ixv_init_device_features(struct ixgbe_softc *sc)
{
- sc->feat_cap = IXGBE_FEATURE_NETMAP
- | IXGBE_FEATURE_VF
- | IXGBE_FEATURE_LEGACY_TX;
+ sc->feat_cap = IXGBE_FEATURE_NETMAP |
+ IXGBE_FEATURE_VF |
+ IXGBE_FEATURE_LEGACY_TX;
/* A tad short on feature flags for VFs, atm. */
switch (sc->hw.mac.type) {
@@ -1932,6 +1960,7 @@ ixv_init_device_features(struct ixgbe_softc *sc)
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
+ case ixgbe_mac_E610_vf:
sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
sc->feat_cap |= IXGBE_FEATURE_RSS;
break;
diff --git a/sys/dev/ixgbe/if_sriov.c b/sys/dev/ixgbe/if_sriov.c
index 7cdd287b85bf..1998cdb016f7 100644
--- a/sys/dev/ixgbe/if_sriov.c
+++ b/sys/dev/ixgbe/if_sriov.c
@@ -95,33 +95,33 @@ ixgbe_align_all_queue_indices(struct ixgbe_softc *sc)
/* Support functions for SR-IOV/VF management */
static inline void
-ixgbe_send_vf_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
+ixgbe_send_vf_msg(struct ixgbe_hw *hw, struct ixgbe_vf *vf, u32 msg)
{
if (vf->flags & IXGBE_VF_CTS)
msg |= IXGBE_VT_MSGTYPE_CTS;
- sc->hw.mbx.ops.write(&sc->hw, &msg, 1, vf->pool);
+ ixgbe_write_mbx(hw, &msg, 1, vf->pool);
}
static inline void
-ixgbe_send_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
+ixgbe_send_vf_success(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
{
msg &= IXGBE_VT_MSG_MASK;
- ixgbe_send_vf_msg(sc, vf, msg | IXGBE_VT_MSGTYPE_ACK);
+ ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_SUCCESS);
}
static inline void
-ixgbe_send_vf_nack(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
+ixgbe_send_vf_failure(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 msg)
{
msg &= IXGBE_VT_MSG_MASK;
- ixgbe_send_vf_msg(sc, vf, msg | IXGBE_VT_MSGTYPE_NACK);
+ ixgbe_send_vf_msg(&sc->hw, vf, msg | IXGBE_VT_MSGTYPE_FAILURE);
}
static inline void
ixgbe_process_vf_ack(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
{
if (!(vf->flags & IXGBE_VF_CTS))
- ixgbe_send_vf_nack(sc, vf, 0);
+ ixgbe_send_vf_failure(sc, vf, 0);
}
static inline boolean_t
@@ -210,14 +210,14 @@ ixgbe_ping_all_vfs(struct ixgbe_softc *sc)
for (int i = 0; i < sc->num_vfs; i++) {
vf = &sc->vfs[i];
if (vf->flags & IXGBE_VF_ACTIVE)
- ixgbe_send_vf_msg(sc, vf, IXGBE_PF_CONTROL_MSG);
+ ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
}
} /* ixgbe_ping_all_vfs */
static void
ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
- uint16_t tag)
+ uint16_t tag)
{
struct ixgbe_hw *hw;
uint32_t vmolr, vmvir;
@@ -254,11 +254,21 @@ ixgbe_vf_set_default_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf->pool), vmvir);
} /* ixgbe_vf_set_default_vlan */
+static void
+ixgbe_clear_vfmbmem(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ uint32_t vf_index = IXGBE_VF_INDEX(vf->pool);
+ uint16_t mbx_size = hw->mbx.size;
+ uint16_t i;
+
+ for (i = 0; i < mbx_size; ++i)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_index), i, 0x0);
+} /* ixgbe_clear_vfmbmem */
static boolean_t
ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
{
-
/*
* Frame size compatibility between PF and VF is only a problem on
* 82599-based cards. X540 and later support any combination of jumbo
@@ -271,8 +281,8 @@ ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
case IXGBE_API_VER_1_0:
case IXGBE_API_VER_UNKNOWN:
/*
- * On legacy (1.0 and older) VF versions, we don't support jumbo
- * frames on either the PF or the VF.
+ * On legacy (1.0 and older) VF versions, we don't support
+ * jumbo frames on either the PF or the VF.
*/
if (sc->max_frame_size > ETHER_MAX_LEN ||
vf->maximum_frame_size > ETHER_MAX_LEN)
@@ -291,8 +301,8 @@ ixgbe_vf_frame_size_compatible(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
return (true);
/*
- * Jumbo frames only work with VFs if the PF is also using jumbo
- * frames.
+ * Jumbo frames only work with VFs if the PF is also using
+ * jumbo frames.
*/
if (sc->max_frame_size <= ETHER_MAX_LEN)
return (true);
@@ -310,6 +320,8 @@ ixgbe_process_vf_reset(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
// XXX clear multicast addresses
ixgbe_clear_rar(&sc->hw, vf->rar_index);
+ ixgbe_clear_vfmbmem(sc, vf);
+ ixgbe_toggle_txdctl(&sc->hw, IXGBE_VF_INDEX(vf->pool));
vf->api_ver = IXGBE_API_VER_UNKNOWN;
} /* ixgbe_process_vf_reset */
@@ -362,19 +374,19 @@ ixgbe_vf_reset_msg(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
if (ixgbe_validate_mac_addr(vf->ether_addr) == 0) {
ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr,
vf->pool, true);
- ack = IXGBE_VT_MSGTYPE_ACK;
+ ack = IXGBE_VT_MSGTYPE_SUCCESS;
} else
- ack = IXGBE_VT_MSGTYPE_NACK;
+ ack = IXGBE_VT_MSGTYPE_FAILURE;
ixgbe_vf_enable_transmit(sc, vf);
ixgbe_vf_enable_receive(sc, vf);
vf->flags |= IXGBE_VF_CTS;
- resp[0] = IXGBE_VF_RESET | ack | IXGBE_VT_MSGTYPE_CTS;
+ resp[0] = IXGBE_VF_RESET | ack;
bcopy(vf->ether_addr, &resp[1], ETHER_ADDR_LEN);
resp[3] = hw->mac.mc_filter_type;
- hw->mbx.ops.write(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
+ ixgbe_write_mbx(hw, resp, IXGBE_VF_PERMADDR_MSG_LEN, vf->pool);
} /* ixgbe_vf_reset_msg */
@@ -387,12 +399,12 @@ ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
/* Check that the VF has permission to change the MAC address. */
if (!(vf->flags & IXGBE_VF_CAP_MAC) && ixgbe_vf_mac_changed(vf, mac)) {
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
return;
}
if (ixgbe_validate_mac_addr(mac) != 0) {
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
return;
}
@@ -401,7 +413,7 @@ ixgbe_vf_set_mac(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
ixgbe_set_rar(&sc->hw, vf->rar_index, vf->ether_addr, vf->pool,
true);
- ixgbe_send_vf_ack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
} /* ixgbe_vf_set_mac */
@@ -435,7 +447,7 @@ ixgbe_vf_set_mc_addr(struct ixgbe_softc *sc, struct ixgbe_vf *vf, u32 *msg)
vmolr |= IXGBE_VMOLR_ROMPE;
IXGBE_WRITE_REG(&sc->hw, IXGBE_VMOLR(vf->pool), vmolr);
- ixgbe_send_vf_ack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
} /* ixgbe_vf_set_mc_addr */
@@ -451,18 +463,18 @@ ixgbe_vf_set_vlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
tag = msg[1] & IXGBE_VLVF_VLANID_MASK;
if (!(vf->flags & IXGBE_VF_CAP_VLAN)) {
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
return;
}
/* It is illegal to enable vlan tag 0. */
if (tag == 0 && enable != 0) {
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
return;
}
ixgbe_set_vfta(hw, tag, vf->pool, enable, false);
- ixgbe_send_vf_ack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
} /* ixgbe_vf_set_vlan */
@@ -477,7 +489,7 @@ ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
if (vf_max_size < ETHER_CRC_LEN) {
/* We intentionally ACK invalid LPE requests. */
- ixgbe_send_vf_ack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
return;
}
@@ -485,7 +497,7 @@ ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
if (vf_max_size > IXGBE_MAX_FRAME_SIZE) {
/* We intentionally ACK invalid LPE requests. */
- ixgbe_send_vf_ack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
return;
}
@@ -507,16 +519,16 @@ ixgbe_vf_set_lpe(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd);
}
- ixgbe_send_vf_ack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
} /* ixgbe_vf_set_lpe */
static void
ixgbe_vf_set_macvlan(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
- uint32_t *msg)
+ uint32_t *msg)
{
//XXX implement this
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
} /* ixgbe_vf_set_macvlan */
@@ -524,23 +536,23 @@ static void
ixgbe_vf_api_negotiate(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
uint32_t *msg)
{
-
switch (msg[1]) {
case IXGBE_API_VER_1_0:
case IXGBE_API_VER_1_1:
vf->api_ver = msg[1];
- ixgbe_send_vf_ack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
break;
default:
vf->api_ver = IXGBE_API_VER_UNKNOWN;
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
break;
}
} /* ixgbe_vf_api_negotiate */
static void
-ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
+ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf,
+ uint32_t *msg)
{
struct ixgbe_hw *hw;
uint32_t resp[IXGBE_VF_GET_QUEUES_RESP_LEN];
@@ -552,11 +564,11 @@ ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
switch (msg[0]) {
case IXGBE_API_VER_1_0:
case IXGBE_API_VER_UNKNOWN:
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
return;
}
- resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK |
+ resp[0] = IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS |
IXGBE_VT_MSGTYPE_CTS;
num_queues = ixgbe_vf_queues(sc->iov_mode);
@@ -565,16 +577,16 @@ ixgbe_vf_get_queues(struct ixgbe_softc *sc, struct ixgbe_vf *vf, uint32_t *msg)
resp[IXGBE_VF_TRANS_VLAN] = (vf->default_vlan != 0);
resp[IXGBE_VF_DEF_QUEUE] = 0;
- hw->mbx.ops.write(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
+ ixgbe_write_mbx(hw, resp, IXGBE_VF_GET_QUEUES_RESP_LEN, vf->pool);
} /* ixgbe_vf_get_queues */
static void
ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
{
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
#ifdef KTR
- if_t ifp = iflib_get_ifp(ctx);
+ if_t ifp = iflib_get_ifp(ctx);
#endif
struct ixgbe_hw *hw;
uint32_t msg[IXGBE_VFMAILBOX_SIZE];
@@ -582,7 +594,7 @@ ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
hw = &sc->hw;
- error = hw->mbx.ops.read(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
+ error = ixgbe_read_mbx(hw, msg, IXGBE_VFMAILBOX_SIZE, vf->pool);
if (error != 0)
return;
@@ -595,7 +607,7 @@ ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
}
if (!(vf->flags & IXGBE_VF_CTS)) {
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_success(sc, vf, msg[0]);
return;
}
@@ -622,17 +634,16 @@ ixgbe_process_vf_msg(if_ctx_t ctx, struct ixgbe_vf *vf)
ixgbe_vf_get_queues(sc, vf, msg);
break;
default:
- ixgbe_send_vf_nack(sc, vf, msg[0]);
+ ixgbe_send_vf_failure(sc, vf, msg[0]);
}
} /* ixgbe_process_vf_msg */
-
/* Tasklet for handling VF -> PF mailbox messages */
void
ixgbe_handle_mbx(void *context)
{
- if_ctx_t ctx = context;
- struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ if_ctx_t ctx = context;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
struct ixgbe_hw *hw;
struct ixgbe_vf *vf;
int i;
@@ -643,13 +654,16 @@ ixgbe_handle_mbx(void *context)
vf = &sc->vfs[i];
if (vf->flags & IXGBE_VF_ACTIVE) {
- if (hw->mbx.ops.check_for_rst(hw, vf->pool) == 0)
+ if (hw->mbx.ops[vf->pool].check_for_rst(hw,
+ vf->pool) == 0)
ixgbe_process_vf_reset(sc, vf);
- if (hw->mbx.ops.check_for_msg(hw, vf->pool) == 0)
+ if (hw->mbx.ops[vf->pool].check_for_msg(hw,
+ vf->pool) == 0)
ixgbe_process_vf_msg(ctx, vf);
- if (hw->mbx.ops.check_for_ack(hw, vf->pool) == 0)
+ if (hw->mbx.ops[vf->pool].check_for_ack(hw,
+ vf->pool) == 0)
ixgbe_process_vf_ack(sc, vf);
}
}
@@ -698,8 +712,10 @@ ixgbe_if_iov_init(if_ctx_t ctx, u16 num_vfs, const nvlist_t *config)
}
sc->num_vfs = num_vfs;
- ixgbe_if_init(sc->ctx);
+ ixgbe_init_mbx_params_pf(&sc->hw);
+
sc->feat_en |= IXGBE_FEATURE_SRIOV;
+ ixgbe_if_init(sc->ctx);
return (retval);
@@ -769,7 +785,7 @@ ixgbe_init_vf(struct ixgbe_softc *sc, struct ixgbe_vf *vf)
ixgbe_vf_enable_transmit(sc, vf);
ixgbe_vf_enable_receive(sc, vf);
- ixgbe_send_vf_msg(sc, vf, IXGBE_PF_CONTROL_MSG);
+ ixgbe_send_vf_msg(&sc->hw, vf, IXGBE_PF_CONTROL_MSG);
} /* ixgbe_init_vf */
void
@@ -784,27 +800,27 @@ ixgbe_initialize_iov(struct ixgbe_softc *sc)
/* RMW appropriate registers based on IOV mode */
/* Read... */
- mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
+ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
/* Modify... */
- mrqc &= ~IXGBE_MRQC_MRQE_MASK;
- mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
- gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
+ mrqc &= ~IXGBE_MRQC_MRQE_MASK;
+ mtqc = IXGBE_MTQC_VT_ENA; /* No initial MTQC read needed */
+ gcr_ext |= IXGBE_GCR_EXT_MSIX_EN;
gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
- gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
switch (sc->iov_mode) {
case IXGBE_64_VM:
- mrqc |= IXGBE_MRQC_VMDQRSS64EN;
- mtqc |= IXGBE_MTQC_64VF;
+ mrqc |= IXGBE_MRQC_VMDQRSS64EN;
+ mtqc |= IXGBE_MTQC_64VF;
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
- gpie |= IXGBE_GPIE_VTMODE_64;
+ gpie |= IXGBE_GPIE_VTMODE_64;
break;
case IXGBE_32_VM:
- mrqc |= IXGBE_MRQC_VMDQRSS32EN;
- mtqc |= IXGBE_MTQC_32VF;
+ mrqc |= IXGBE_MRQC_VMDQRSS32EN;
+ mtqc |= IXGBE_MTQC_32VF;
gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
- gpie |= IXGBE_GPIE_VTMODE_32;
+ gpie |= IXGBE_GPIE_VTMODE_32;
break;
default:
panic("Unexpected SR-IOV mode %d", sc->iov_mode);
diff --git a/sys/dev/ixgbe/ix_txrx.c b/sys/dev/ixgbe/ix_txrx.c
index a593cb136760..76c718e2c252 100644
--- a/sys/dev/ixgbe/ix_txrx.c
+++ b/sys/dev/ixgbe/ix_txrx.c
@@ -1,4 +1,4 @@
-/******************************************************************************
+/*****************************************************************************
Copyright (c) 2001-2017, Intel Corporation
All rights reserved.
@@ -29,7 +29,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-******************************************************************************/
+*****************************************************************************/
#ifndef IXGBE_STANDALONE_BUILD
#include "opt_inet.h"
@@ -80,7 +80,7 @@ ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
{
uint32_t vlan_macip_lens, type_tucmd_mlhl;
uint32_t olinfo_status, mss_l4len_idx, pktlen, offload;
- u8 ehdrlen;
+ u8 ehdrlen;
offload = true;
olinfo_status = mss_l4len_idx = vlan_macip_lens = type_tucmd_mlhl = 0;
@@ -105,9 +105,12 @@ ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
/* First check if TSO is to be used */
if (pi->ipi_csum_flags & CSUM_TSO) {
/* This is used in the transmit desc in encap */
- pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen - pi->ipi_tcp_hlen;
- mss_l4len_idx |= (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
+ pktlen = pi->ipi_len - ehdrlen - pi->ipi_ip_hlen -
+ pi->ipi_tcp_hlen;
+ mss_l4len_idx |=
+ (pi->ipi_tso_segsz << IXGBE_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |=
+ (pi->ipi_tcp_hlen << IXGBE_ADVTXD_L4LEN_SHIFT);
}
olinfo_status |= pktlen << IXGBE_ADVTXD_PAYLEN_SHIFT;
@@ -126,7 +129,8 @@ ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
switch (pi->ipi_ipproto) {
case IPPROTO_TCP:
- if (pi->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
+ if (pi->ipi_csum_flags &
+ (CSUM_IP_TCP | CSUM_IP6_TCP | CSUM_TSO))
type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
else
offload = false;
@@ -168,17 +172,17 @@ ixgbe_tx_ctx_setup(struct ixgbe_adv_tx_context_desc *TXD, if_pkt_info_t pi)
static int
ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
{
- struct ixgbe_softc *sc = arg;
- if_softc_ctx_t scctx = sc->shared;
- struct ix_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
- struct tx_ring *txr = &que->txr;
- int nsegs = pi->ipi_nsegs;
- bus_dma_segment_t *segs = pi->ipi_segs;
- union ixgbe_adv_tx_desc *txd = NULL;
+ struct ixgbe_softc *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ struct ix_tx_queue *que = &sc->tx_queues[pi->ipi_qsidx];
+ struct tx_ring *txr = &que->txr;
+ int nsegs = pi->ipi_nsegs;
+ bus_dma_segment_t *segs = pi->ipi_segs;
+ union ixgbe_adv_tx_desc *txd = NULL;
struct ixgbe_adv_tx_context_desc *TXD;
- int i, j, first, pidx_last;
- uint32_t olinfo_status, cmd, flags;
- qidx_t ntxd;
+ int i, j, first, pidx_last;
+ uint32_t olinfo_status, cmd, flags;
+ qidx_t ntxd;
cmd = (IXGBE_ADVTXD_DTYP_DATA |
IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT);
@@ -249,9 +253,9 @@ ixgbe_isc_txd_encap(void *arg, if_pkt_info_t pi)
static void
ixgbe_isc_txd_flush(void *arg, uint16_t txqid, qidx_t pidx)
{
- struct ixgbe_softc *sc = arg;
+ struct ixgbe_softc *sc = arg;
struct ix_tx_queue *que = &sc->tx_queues[txqid];
- struct tx_ring *txr = &que->txr;
+ struct tx_ring *txr = &que->txr;
IXGBE_WRITE_REG(&sc->hw, txr->tail, pidx);
} /* ixgbe_isc_txd_flush */
@@ -263,14 +267,14 @@ static int
ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
{
struct ixgbe_softc *sc = arg;
- if_softc_ctx_t scctx = sc->shared;
+ if_softc_ctx_t scctx = sc->shared;
struct ix_tx_queue *que = &sc->tx_queues[txqid];
- struct tx_ring *txr = &que->txr;
- qidx_t processed = 0;
- int updated;
- qidx_t cur, prev, ntxd, rs_cidx;
- int32_t delta;
- uint8_t status;
+ struct tx_ring *txr = &que->txr;
+ qidx_t processed = 0;
+ int updated;
+ qidx_t cur, prev, ntxd, rs_cidx;
+ int32_t delta;
+ uint8_t status;
rs_cidx = txr->tx_rs_cidx;
if (rs_cidx == txr->tx_rs_pidx)
@@ -319,9 +323,9 @@ ixgbe_isc_txd_credits_update(void *arg, uint16_t txqid, bool clear)
static void
ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
{
- struct ixgbe_softc *sc = arg;
- struct ix_rx_queue *que = &sc->rx_queues[iru->iru_qsidx];
- struct rx_ring *rxr = &que->rxr;
+ struct ixgbe_softc *sc = arg;
+ struct ix_rx_queue *que = &sc->rx_queues[iru->iru_qsidx];
+ struct rx_ring *rxr = &que->rxr;
uint64_t *paddrs;
int i;
uint32_t next_pidx, pidx;
@@ -342,11 +346,12 @@ ixgbe_isc_rxd_refill(void *arg, if_rxd_update_t iru)
* ixgbe_isc_rxd_flush
************************************************************************/
static void
-ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pidx)
+ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused,
+ qidx_t pidx)
{
- struct ixgbe_softc *sc = arg;
+ struct ixgbe_softc *sc = arg;
struct ix_rx_queue *que = &sc->rx_queues[qsidx];
- struct rx_ring *rxr = &que->rxr;
+ struct rx_ring *rxr = &que->rxr;
IXGBE_WRITE_REG(&sc->hw, rxr->tail, pidx);
} /* ixgbe_isc_rxd_flush */
@@ -357,12 +362,12 @@ ixgbe_isc_rxd_flush(void *arg, uint16_t qsidx, uint8_t flidx __unused, qidx_t pi
static int
ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
{
- struct ixgbe_softc *sc = arg;
- struct ix_rx_queue *que = &sc->rx_queues[qsidx];
- struct rx_ring *rxr = &que->rxr;
+ struct ixgbe_softc *sc = arg;
+ struct ix_rx_queue *que = &sc->rx_queues[qsidx];
+ struct rx_ring *rxr = &que->rxr;
union ixgbe_adv_rx_desc *rxd;
- uint32_t staterr;
- int cnt, i, nrxd;
+ uint32_t staterr;
+ int cnt, i, nrxd;
nrxd = sc->shared->isc_nrxd[0];
for (cnt = 0, i = pidx; cnt < nrxd && cnt <= budget;) {
@@ -391,16 +396,16 @@ ixgbe_isc_rxd_available(void *arg, uint16_t qsidx, qidx_t pidx, qidx_t budget)
static int
ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
{
- struct ixgbe_softc *sc = arg;
- if_softc_ctx_t scctx = sc->shared;
- struct ix_rx_queue *que = &sc->rx_queues[ri->iri_qsidx];
- struct rx_ring *rxr = &que->rxr;
- union ixgbe_adv_rx_desc *rxd;
+ struct ixgbe_softc *sc = arg;
+ if_softc_ctx_t scctx = sc->shared;
+ struct ix_rx_queue *que = &sc->rx_queues[ri->iri_qsidx];
+ struct rx_ring *rxr = &que->rxr;
+ union ixgbe_adv_rx_desc *rxd;
- uint16_t pkt_info, len, cidx, i;
- uint32_t ptype;
- uint32_t staterr = 0;
- bool eop;
+ uint16_t pkt_info, len, cidx, i;
+ uint32_t ptype;
+ uint32_t staterr = 0;
+ bool eop;
i = 0;
cidx = ri->iri_cidx;
@@ -425,7 +430,8 @@ ixgbe_isc_rxd_pkt_get(void *arg, if_rxd_info_t ri)
/* Make sure bad packets are discarded */
if (eop && (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) != 0) {
if (sc->feat_en & IXGBE_FEATURE_VF)
- if_inc_counter(ri->iri_ifp, IFCOUNTER_IERRORS, 1);
+ if_inc_counter(ri->iri_ifp, IFCOUNTER_IERRORS,
+ 1);
rxr->rx_discarded++;
return (EBADMSG);
@@ -478,7 +484,8 @@ ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
uint8_t errors = (uint8_t)(staterr >> 24);
/* If there is a layer 3 or 4 error we are done */
- if (__predict_false(errors & (IXGBE_RXD_ERR_IPE | IXGBE_RXD_ERR_TCPE)))
+ if (__predict_false(errors &
+ (IXGBE_RXD_ERR_IPE | IXGBE_RXD_ERR_TCPE)))
return;
/* IP Checksum Good */
@@ -492,7 +499,8 @@ ixgbe_rx_checksum(uint32_t staterr, if_rxd_info_t ri, uint32_t ptype)
(ptype & IXGBE_RXDADV_PKTTYPE_SCTP) != 0)) {
ri->iri_csum_flags |= CSUM_SCTP_VALID;
} else {
- ri->iri_csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ ri->iri_csum_flags |=
+ CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
ri->iri_csum_data = htons(0xffff);
}
}
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index 3dae3aeebaa1..624b71acabea 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -1,4 +1,4 @@
-/******************************************************************************
+/*****************************************************************************
SPDX-License-Identifier: BSD-3-Clause
Copyright (c) 2001-2017, Intel Corporation
@@ -30,7 +30,7 @@
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
-******************************************************************************/
+*****************************************************************************/
#ifndef _IXGBE_H_
#define _IXGBE_H_
@@ -46,6 +46,7 @@
#include <sys/module.h>
#include <sys/sockio.h>
#include <sys/eventhandler.h>
+#include <sys/priv.h>
#include <net/if.h>
#include <net/if_var.h>
@@ -86,6 +87,7 @@
#include "ixgbe_phy.h"
#include "ixgbe_vf.h"
#include "ixgbe_features.h"
+#include "ixgbe_e610.h"
/* Tunables */
@@ -195,6 +197,15 @@
CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
+/* All BASE-T Physical layers */
+#define IXGBE_PHYSICAL_LAYERS_BASE_T_ALL \
+ (IXGBE_PHYSICAL_LAYER_10GBASE_T |\
+ IXGBE_PHYSICAL_LAYER_5000BASE_T |\
+ IXGBE_PHYSICAL_LAYER_2500BASE_T |\
+ IXGBE_PHYSICAL_LAYER_1000BASE_T |\
+ IXGBE_PHYSICAL_LAYER_100BASE_TX |\
+ IXGBE_PHYSICAL_LAYER_10BASE_T)
+
#define IXGBE_CAPS (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_TSO | \
IFCAP_LRO | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | \
IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU | \
@@ -434,6 +445,10 @@ struct ixgbe_softc {
/* Bypass */
struct ixgbe_bp_data bypass;
+ /* Firmware error check */
+ int recovery_mode;
+ struct callout fw_mode_timer;
+
/* Misc stats maintained by the driver */
unsigned long dropped_pkts;
unsigned long mbuf_header_failed;
@@ -460,6 +475,21 @@ struct ixgbe_softc {
/* Feature capable/enabled flags. See ixgbe_features.h */
u32 feat_cap;
u32 feat_en;
+ u16 lse_mask;
+
+ struct sysctl_oid *debug_sysctls;
+ u32 debug_dump_cluster_mask;
+ bool do_debug_dump;
+};
+
+struct ixgbe_debug_dump_cmd {
+ u32 offset; /* offset to read/write from table, in bytes */
+ u8 cluster_id; /* also used to get next cluster id */
+ u16 table_id;
+ u16 data_size; /* size of data field, in bytes */
+ u16 reserved1;
+ u32 reserved2;
+ u8 data[];
};
/* Precision Time Sync (IEEE 1588) defines */
@@ -484,6 +514,43 @@ struct ixgbe_softc {
#define IXGBE_PHY_CURRENT_TEMP 0xC820
#define IXGBE_PHY_OVERTEMP_STATUS 0xC830
+/**
+ * The ioctl command number used by NVM update for accessing the driver for
+ * NVM access commands.
+ */
+#define IXGBE_NVM_ACCESS \
+ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5)
+
+/*
+ * The ioctl command number used by a userspace tool for accessing the driver
+ * for getting debug dump data from the firmware.
+ */
+#define IXGBE_DEBUG_DUMP \
+ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 6)
+
+/* Debug Dump related definitions */
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_INVALID 0xFFFFFF
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_BASE 50
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_MAX 1
+
+#define IXGBE_DBG_DUMP_VALID_CLUSTERS_MASK 0x3
+#define IXGBE_DBG_DUMP_BASE_SIZE (2 * 1024 * 1024)
+
+#define IXGBE_SYSCTL_DESC_DEBUG_DUMP_SET_CLUSTER \
+"\nSelect clusters to dump with \"dump\" sysctl" \
+"\nFlags:" \
+"\n\t 0x1 - Link" \
+"\n\t 0x2 - Full CSR Space, excluding RCW registers" \
+"\n\t" \
+"\nUse \"sysctl -x\" to view flags properly."
+
+#define IXGBE_SYSCTL_DESC_DUMP_DEBUG_DUMP \
+"\nWrite 1 to output a FW debug dump containing the clusters " \
+"specified by the \"clusters\" sysctl" \
+"\nThe \"-b\" flag must be used in order to dump this data " \
+"as binary data because" \
+"\nthis data is opaque and not a string."
+
/* Sysctl help messages; displayed with sysctl -d */
#define IXGBE_SYSCTL_DESC_ADV_SPEED \
"\nControl advertised link speed using these flags:\n" \
diff --git a/sys/dev/ixgbe/ixgbe_82599.c b/sys/dev/ixgbe/ixgbe_82599.c
index 8c3df0fd4f59..50902c6c356d 100644
--- a/sys/dev/ixgbe/ixgbe_82599.c
+++ b/sys/dev/ixgbe/ixgbe_82599.c
@@ -324,6 +324,7 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
struct ixgbe_phy_info *phy = &hw->phy;
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
s32 ret_val;
+ u16 i;
DEBUGFUNC("ixgbe_init_ops_82599");
@@ -385,7 +386,8 @@ s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw)
mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
& IXGBE_FWSM_MODE_MASK);
- hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+ for (i = 0; i < 64; i++)
+ hw->mbx.ops[i].init_params = ixgbe_init_mbx_params_pf;
/* EEPROM */
eeprom->ops.read = ixgbe_read_eeprom_82599;
@@ -433,12 +435,25 @@ s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
*autoneg = true;
goto out;
}
+ if (hw->phy.sfp_type == ixgbe_sfp_type_da_cu_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_da_cu_core1) {
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ *autoneg = true;
+
+ if (hw->phy.multispeed_fiber)
+ *speed |= IXGBE_LINK_SPEED_1GB_FULL;
+
+ goto out;
+ }
+
/*
* Determine link capabilities based on the stored value of AUTOC,
* which represents EEPROM defaults. If AUTOC value has not
@@ -1535,7 +1550,7 @@ u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
}
/**
- * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
+ * ixgbe_fdir_add_signature_filter_82599 - Adds a signature hash filter
* @hw: pointer to hardware structure
* @input: unique input dword
* @common: compressed common input dword
@@ -1757,7 +1772,9 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
case 0x0000:
/* mask VLAN ID */
fdirm |= IXGBE_FDIRM_VLANID;
- /* FALLTHROUGH */
+ /* mask VLAN priority */
+ fdirm |= IXGBE_FDIRM_VLANP;
+ break;
case 0x0FFF:
/* mask VLAN priority */
fdirm |= IXGBE_FDIRM_VLANP;
@@ -2047,7 +2064,9 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
DEBUGOUT(" Error on src/dst port\n");
return IXGBE_ERR_CONFIG;
}
- /* FALLTHROUGH */
+ input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+ break;
case IXGBE_ATR_FLOW_TYPE_TCPV4:
case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4:
case IXGBE_ATR_FLOW_TYPE_UDPV4:
diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c
index eded950e2881..f11f52a646e4 100644
--- a/sys/dev/ixgbe/ixgbe_api.c
+++ b/sys/dev/ixgbe/ixgbe_api.c
@@ -112,11 +112,15 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
case ixgbe_mac_X550EM_a:
status = ixgbe_init_ops_X550EM_a(hw);
break;
+ case ixgbe_mac_E610:
+ status = ixgbe_init_ops_E610(hw);
+ break;
case ixgbe_mac_82599_vf:
case ixgbe_mac_X540_vf:
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
+ case ixgbe_mac_E610_vf:
status = ixgbe_init_ops_vf(hw);
break;
default:
@@ -240,6 +244,18 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
hw->mac.type = ixgbe_mac_X550EM_a_vf;
hw->mvals = ixgbe_mvals_X550EM_a;
break;
+ case IXGBE_DEV_ID_E610_BACKPLANE:
+ case IXGBE_DEV_ID_E610_SFP:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
+ case IXGBE_DEV_ID_E610_SGMII:
+ hw->mac.type = ixgbe_mac_E610;
+ hw->mvals = ixgbe_mvals_X550EM_a;
+ break;
+ case IXGBE_DEV_ID_E610_VF:
+ hw->mac.type = ixgbe_mac_E610_vf;
+ hw->mvals = ixgbe_mvals_X550EM_a;
+ break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
@@ -904,7 +920,7 @@ s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val)
}
/**
- * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum
+ * ixgbe_update_eeprom_checksum - Updates the EEPROM checksum
* @hw: pointer to hardware structure
**/
s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw)
@@ -1134,6 +1150,19 @@ s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on,
}
/**
+ * ixgbe_toggle_txdctl - Toggle VF's queues
+ * @hw: pointer to hardware structure
+ * @vind: VMDq pool index
+ *
+ * Enable and disable each queue in VF.
+ */
+s32 ixgbe_toggle_txdctl(struct ixgbe_hw *hw, u32 vind)
+{
+ return ixgbe_call_func(hw, hw->mac.ops.toggle_txdctl, (hw,
+ vind), IXGBE_NOT_IMPLEMENTED);
+}
+
+/**
* ixgbe_fc_enable - Enable flow control
* @hw: pointer to hardware structure
*
@@ -1417,15 +1446,15 @@ s32 ixgbe_bypass_rw(struct ixgbe_hw *hw, u32 cmd, u32 *status)
/**
* ixgbe_bypass_valid_rd - Verify valid return from bit-bang.
+ * @hw: pointer to hardware structure
+ * @in_reg: The register cmd for the bit-bang read.
+ * @out_reg: The register returned from a bit-bang read.
*
* If we send a write we can't be sure it took until we can read back
* that same register. It can be a problem as some of the fields may
* for valid reasons change inbetween the time wrote the register and
* we read it again to verify. So this function check everything we
* can check and then assumes it worked.
- *
- * @u32 in_reg - The register cmd for the bit-bang read.
- * @u32 out_reg - The register returned from a bit-bang read.
**/
bool ixgbe_bypass_valid_rd(struct ixgbe_hw *hw, u32 in_reg, u32 out_reg)
{
diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h
index 9134971d9c98..2b4cec8d110e 100644
--- a/sys/dev/ixgbe/ixgbe_api.h
+++ b/sys/dev/ixgbe/ixgbe_api.h
@@ -48,6 +48,7 @@ extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
@@ -131,6 +132,7 @@ s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan,
s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on, u32 *vfta_delta, u32 vfta,
bool vlvf_bypass);
+s32 ixgbe_toggle_txdctl(struct ixgbe_hw *hw, u32 vind);
s32 ixgbe_fc_enable(struct ixgbe_hw *hw);
s32 ixgbe_setup_fc(struct ixgbe_hw *hw);
s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
@@ -148,7 +150,6 @@ u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw);
s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw);
-s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw);
s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl,
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index 6c1396ad964f..bff022585a03 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -133,6 +133,7 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw)
mac->ops.init_uta_tables = NULL;
mac->ops.enable_rx = ixgbe_enable_rx_generic;
mac->ops.disable_rx = ixgbe_disable_rx_generic;
+ mac->ops.toggle_txdctl = ixgbe_toggle_txdctl_generic;
/* Flow Control */
mac->ops.fc_enable = ixgbe_fc_enable_generic;
@@ -171,12 +172,13 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
- /* flow control autoneg black list */
+ /* flow control autoneg block list */
switch (hw->device_id) {
case IXGBE_DEV_ID_X550EM_A_SFP:
case IXGBE_DEV_ID_X550EM_A_SFP_N:
case IXGBE_DEV_ID_X550EM_A_QSFP:
case IXGBE_DEV_ID_X550EM_A_QSFP_N:
+ case IXGBE_DEV_ID_E610_SFP:
supported = false;
break;
default:
@@ -209,6 +211,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
supported = true;
break;
default:
@@ -268,8 +272,8 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
if (ret_val != IXGBE_SUCCESS)
goto out;
- /* only backplane uses autoc */
- /* FALLTHROUGH */
+ reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
+ break;
case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber_qsfp:
case ixgbe_media_type_fiber:
@@ -615,7 +619,8 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550) {
if (hw->phy.id == 0)
ixgbe_identify_phy(hw);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
@@ -713,7 +718,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
return ret_val;
}
- if (length == 0xFFFF || length == 0) {
+ if (length == 0xFFFF || length == 0 || length > hw->eeprom.word_size) {
DEBUGOUT("NVM PBA number section invalid length\n");
return IXGBE_ERR_PBA_SECTION;
}
@@ -1036,6 +1041,9 @@ void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
case IXGBE_PCI_LINK_SPEED_8000:
hw->bus.speed = ixgbe_bus_speed_8000;
break;
+ case IXGBE_PCI_LINK_SPEED_16000:
+ hw->bus.speed = ixgbe_bus_speed_16000;
+ break;
default:
hw->bus.speed = ixgbe_bus_speed_unknown;
break;
@@ -1058,7 +1066,9 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_bus_info_generic");
/* Get the negotiated link width and speed from PCI config space */
- link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+ link_status = IXGBE_READ_PCIE_WORD(hw, hw->mac.type == ixgbe_mac_E610 ?
+ IXGBE_PCI_LINK_STATUS_E610 :
+ IXGBE_PCI_LINK_STATUS);
ixgbe_set_pci_config_data_generic(hw, link_status);
@@ -1146,10 +1156,10 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
msec_delay(2);
/*
- * Prevent the PCI-E bus from hanging by disabling PCI-E master
+ * Prevent the PCI-E bus from hanging by disabling PCI-E primary
* access and verify no pending requests
*/
- return ixgbe_disable_pcie_master(hw);
+ return ixgbe_disable_pcie_primary(hw);
}
/**
@@ -1877,7 +1887,6 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_eeprom_semaphore");
-
/* Get SMBI software semaphore between device drivers first */
for (i = 0; i < timeout; i++) {
/*
@@ -3208,32 +3217,32 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
}
/**
- * ixgbe_disable_pcie_master - Disable PCI-express master access
+ * ixgbe_disable_pcie_primary - Disable PCI-express primary access
* @hw: pointer to hardware structure
*
- * Disables PCI-Express master access and verifies there are no pending
- * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable
- * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS
- * is returned signifying master requests disabled.
+ * Disables PCI-Express primary access and verifies there are no pending
+ * requests. IXGBE_ERR_PRIMARY_REQUESTS_PENDING is returned if primary disable
+ * bit hasn't caused the primary requests to be disabled, else IXGBE_SUCCESS
+ * is returned signifying primary requests disabled.
**/
-s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
+s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw)
{
s32 status = IXGBE_SUCCESS;
u32 i, poll;
u16 value;
- DEBUGFUNC("ixgbe_disable_pcie_master");
+ DEBUGFUNC("ixgbe_disable_pcie_primary");
/* Always set this bit to ensure any future transactions are blocked */
IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
- /* Exit if master requests are blocked */
+ /* Exit if primary requests are blocked */
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
IXGBE_REMOVED(hw->hw_addr))
goto out;
- /* Poll for master request bit to clear */
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ /* Poll for primary request bit to clear */
+ for (i = 0; i < IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT; i++) {
usec_delay(100);
if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
goto out;
@@ -3241,13 +3250,13 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
/*
* Two consecutive resets are required via CTRL.RST per datasheet
- * 5.2.5.3.2 Master Disable. We set a flag to inform the reset routine
- * of this need. The first reset prevents new master requests from
+ * 5.2.5.3.2 Primary Disable. We set a flag to inform the reset routine
+ * of this need. The first reset prevents new primary requests from
* being issued by our device. We then must wait 1usec or more for any
* remaining completions from the PCIe bus to trickle in, and then reset
* again to clear out any effects they may have had on our device.
*/
- DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n");
+ DEBUGOUT("GIO Primary Disable bit didn't clear - requesting resets\n");
hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
if (hw->mac.type >= ixgbe_mac_X550)
@@ -3269,7 +3278,7 @@ s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
ERROR_REPORT1(IXGBE_ERROR_POLLING,
"PCIe transaction pending bit also did not clear.\n");
- status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+ status = IXGBE_ERR_PRIMARY_REQUESTS_PENDING;
out:
return status;
@@ -3362,7 +3371,6 @@ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
-
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
@@ -3691,6 +3699,10 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
+ case ixgbe_mac_E610:
+ pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
default:
return msix_count;
}
@@ -3866,14 +3878,15 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/**
+ * ixgbe_set_vmdq_san_mac_generic - Associate default VMDq pool index with
+ * a rx address
+ * @hw: pointer to hardware struct
+ * @vmdq: VMDq pool index
+ *
* This function should only be involved in the IOV mode.
* In IOV mode, Default pool is next pool after the number of
* VFs advertized and not 0.
* MPSAR table needs to be updated for SAN_MAC RAR [hw->mac.san_mac_rar_index]
- *
- * ixgbe_set_vmdq_san_mac - Associate default VMDq pool index with a rx address
- * @hw: pointer to hardware struct
- * @vmdq: VMDq pool index
**/
s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
{
@@ -4138,6 +4151,61 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_toggle_txdctl_generic - Toggle VF's queues
+ * @hw: pointer to hardware structure
+ * @vf_number: VF index
+ *
+ * Enable and disable each queue in VF.
+ */
+s32 ixgbe_toggle_txdctl_generic(struct ixgbe_hw *hw, u32 vf_number)
+{
+ u8 queue_count, i;
+ u32 offset, reg;
+
+ if (vf_number > 63)
+ return IXGBE_ERR_PARAM;
+
+ /*
+ * Determine number of queues by checking
+ * number of virtual functions
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ switch (reg & IXGBE_GCR_EXT_VT_MODE_MASK) {
+ case IXGBE_GCR_EXT_VT_MODE_64:
+ queue_count = 2;
+ break;
+ case IXGBE_GCR_EXT_VT_MODE_32:
+ queue_count = 4;
+ break;
+ case IXGBE_GCR_EXT_VT_MODE_16:
+ queue_count = 8;
+ break;
+ default:
+ return IXGBE_ERR_CONFIG;
+ }
+
+ /* Toggle queues */
+ for (i = 0; i < queue_count; ++i) {
+ /* Calculate offset of current queue */
+ offset = queue_count * vf_number + i;
+
+ /* Enable queue */
+ reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
+ reg |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Disable queue */
+ reg = IXGBE_READ_REG(hw, IXGBE_PVFTXDCTL(offset));
+ reg &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_PVFTXDCTL(offset), reg);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
* @hw: pointer to hardware structure
*
@@ -4265,7 +4333,8 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type == ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_E610) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
@@ -4778,8 +4847,10 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom,
rxpktsize <<= IXGBE_RXPBSIZE_SHIFT;
for (; i < (num_pb / 2); i++)
IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
- /* configure remaining packet buffers */
- /* FALLTHROUGH */
+ rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
+ for (; i < num_pb; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize);
+ break;
case PBA_STRATEGY_EQUAL:
rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT;
for (; i < num_pb; i++)
@@ -4880,7 +4951,7 @@ static const u8 ixgbe_emc_therm_limit[4] = {
};
/**
- * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data
+ * ixgbe_get_thermal_sensor_data_generic - Gathers thermal sensor data
* @hw: pointer to hardware structure
*
* Returns the thermal sensor data structure
@@ -5148,15 +5219,14 @@ s32 ixgbe_bypass_rw_generic(struct ixgbe_hw *hw, u32 cmd, u32 *status)
/**
* ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
+ * @in_reg: The register cmd for the bit-bang read.
+ * @out_reg: The register returned from a bit-bang read.
*
* If we send a write we can't be sure it took until we can read back
* that same register. It can be a problem as some of the fields may
* for valid reasons change inbetween the time wrote the register and
* we read it again to verify. So this function check everything we
* can check and then assumes it worked.
- *
- * @u32 in_reg - The register cmd for the bit-bang read.
- * @u32 out_reg - The register returned from a bit-bang read.
**/
bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
{
@@ -5207,7 +5277,7 @@ bool ixgbe_bypass_valid_rd_generic(u32 in_reg, u32 out_reg)
* ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
*
* @hw: pointer to hardware structure
- * @cmd: The control word we are setting.
+ * @ctrl: The control word we are setting.
* @event: The event we are setting in the FW. This also happens to
* be the mask for the event we are setting (handy)
* @action: The action we set the event to in the FW. This is in a
@@ -5392,6 +5462,105 @@ void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver)
}
}
+/**
+ * ixgbe_get_nvm_version - Return version of NVM and its components
+ *
+ * @hw: pointer to hardware structure
+ * @nvm_ver: pointer to output structure
+ *
+ * irrelevant component fields will return 0, read errors will return 0xff
+ **/
+void ixgbe_get_nvm_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver)
+{
+ u16 word, phy_ver;
+
+ DEBUGFUNC("ixgbe_get_nvm_version");
+
+ memset(nvm_ver, 0, sizeof(struct ixgbe_nvm_version));
+
+ /* eeprom version is mac-type specific */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ /* version of eeprom section */
+ if (ixgbe_read_eeprom(hw, NVM_EEP_OFFSET_82598, &word))
+ word = NVM_VER_INVALID;
+ nvm_ver->nvm_major = ((word & NVM_EEP_MAJOR_MASK)
+ >> NVM_EEP_MAJ_SHIFT);
+ nvm_ver->nvm_minor = ((word & NVM_EEP_MINOR_MASK)
+ >> NVM_EEP_MIN_SHIFT);
+ nvm_ver->nvm_id = (word & NVM_EEP_ID_MASK);
+ break;
+ case ixgbe_mac_X540:
+ /* version of eeprom section */
+ if (ixgbe_read_eeprom(hw, NVM_EEP_OFFSET_X540, &word))
+ word = NVM_VER_INVALID;
+ nvm_ver->nvm_major = ((word & NVM_EEP_MAJOR_MASK)
+ >> NVM_EEP_MAJ_SHIFT);
+ nvm_ver->nvm_minor = ((word & NVM_EEP_MINOR_MASK)
+ >> NVM_EEP_MIN_SHIFT);
+ nvm_ver->nvm_id = (word & NVM_EEP_ID_MASK);
+ break;
+
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
+ /* version of eeprom section */
+ if (ixgbe_read_eeprom(hw, NVM_EEP_OFFSET_X540, &word))
+ word = NVM_VER_INVALID;
+ nvm_ver->nvm_major = ((word & NVM_EEP_MAJOR_MASK)
+ >> NVM_EEP_MAJ_SHIFT);
+ nvm_ver->nvm_minor = (word & NVM_EEP_X550_MINOR_MASK);
+
+ break;
+ default:
+ break;
+ }
+
+ /* phy version is mac-type specific */
+ switch (hw->mac.type) {
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
+ /* intel phy firmware version */
+ if (ixgbe_read_eeprom(hw, NVM_EEP_PHY_OFF_X540, &word))
+ word = NVM_VER_INVALID;
+ nvm_ver->phy_fw_maj = ((word & NVM_PHY_MAJOR_MASK)
+ >> NVM_PHY_MAJ_SHIFT);
+ nvm_ver->phy_fw_min = ((word & NVM_PHY_MINOR_MASK)
+ >> NVM_PHY_MIN_SHIFT);
+ nvm_ver->phy_fw_id = (word & NVM_PHY_ID_MASK);
+ break;
+ default:
+ break;
+ }
+
+ ixgbe_get_etk_id(hw, nvm_ver);
+
+ /* devstarter image */
+ if (ixgbe_read_eeprom(hw, NVM_DS_OFFSET, &word))
+ word = NVM_VER_INVALID;
+ nvm_ver->devstart_major = ((word & NVM_DS_MAJOR_MASK) >> NVM_DS_SHIFT);
+ nvm_ver->devstart_minor = (word & NVM_DS_MINOR_MASK);
+
+ /* OEM customization word */
+ if (ixgbe_read_eeprom(hw, NVM_OEM_OFFSET, &nvm_ver->oem_specific))
+ nvm_ver->oem_specific = NVM_VER_INVALID;
+
+ /* vendor (not intel) phy firmware version */
+ if (ixgbe_get_phy_firmware_version(hw, &phy_ver))
+ phy_ver = NVM_VER_INVALID;
+ nvm_ver->phy_vend_maj = ((phy_ver & NVM_PHYVEND_MAJOR_MASK)
+ >> NVM_PHYVEND_SHIFT);
+ nvm_ver->phy_vend_min = (phy_ver & NVM_PHYVEND_MINOR_MASK);
+
+ /* Option Rom may or may not be present. Start with pointer */
+ ixgbe_get_orom_version(hw, nvm_ver);
+ return;
+}
/**
* ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg
diff --git a/sys/dev/ixgbe/ixgbe_common.h b/sys/dev/ixgbe/ixgbe_common.h
index a55003b4cfe4..a2da9c834f8f 100644
--- a/sys/dev/ixgbe/ixgbe_common.h
+++ b/sys/dev/ixgbe/ixgbe_common.h
@@ -118,7 +118,7 @@ s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask);
-s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
+s32 ixgbe_disable_pcie_primary(struct ixgbe_hw *hw);
s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
@@ -141,6 +141,7 @@ s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlvf_bypass);
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass);
+s32 ixgbe_toggle_txdctl_generic(struct ixgbe_hw *hw, u32 vind);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
@@ -195,6 +196,8 @@ void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
void ixgbe_get_orom_version(struct ixgbe_hw *hw,
struct ixgbe_nvm_version *nvm_ver);
+void ixgbe_get_nvm_version(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_version *nvm_ver);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
diff --git a/sys/dev/ixgbe/ixgbe_dcb.c b/sys/dev/ixgbe/ixgbe_dcb.c
index 0ebc5456eda5..29ee3117edcb 100644
--- a/sys/dev/ixgbe/ixgbe_dcb.c
+++ b/sys/dev/ixgbe/ixgbe_dcb.c
@@ -293,7 +293,7 @@ void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction,
}
/**
- * ixgbe_dcb_config - Struct containing DCB settings.
+ * ixgbe_dcb_check_config_cee - Struct containing DCB settings.
* @dcb_config: Pointer to DCB config structure
*
* This function checks DCB rules for DCB settings.
diff --git a/sys/dev/ixgbe/ixgbe_dcb.h b/sys/dev/ixgbe/ixgbe_dcb.h
index b31dfae0cdfa..54decd4d081d 100644
--- a/sys/dev/ixgbe/ixgbe_dcb.h
+++ b/sys/dev/ixgbe/ixgbe_dcb.h
@@ -40,9 +40,9 @@
/* DCB defines */
/* DCB credit calculation defines */
#define IXGBE_DCB_CREDIT_QUANTUM 64
-#define IXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */
+#define IXGBE_DCB_MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */
#define IXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/
-#define IXGBE_DCB_MAX_CREDIT (2 * IXGBE_DCB_MAX_CREDIT_REFILL)
+#define IXGBE_DCB_MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1024 / 64B */
/* 513 for 32KB TSO packet */
#define IXGBE_DCB_MIN_TSO_CREDIT \
diff --git a/sys/dev/ixgbe/ixgbe_e610.c b/sys/dev/ixgbe/ixgbe_e610.c
new file mode 100644
index 000000000000..18c4612446e0
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_e610.c
@@ -0,0 +1,5533 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_e610.h"
+#include "ixgbe_x550.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+#include "ixgbe_api.h"
+
+/**
+ * ixgbe_init_aci - initialization routine for Admin Command Interface
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the ACI lock.
+ */
+void ixgbe_init_aci(struct ixgbe_hw *hw)
+{
+ ixgbe_init_lock(&hw->aci.lock);
+}
+
+/**
+ * ixgbe_shutdown_aci - shutdown routine for Admin Command Interface
+ * @hw: pointer to the hardware structure
+ *
+ * Destroy the ACI lock.
+ */
+void ixgbe_shutdown_aci(struct ixgbe_hw *hw)
+{
+ ixgbe_destroy_lock(&hw->aci.lock);
+}
+
+/**
+ * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
+ * be resent
+ * @opcode: ACI opcode
+ *
+ * Check if ACI command should be sent again depending on the provided opcode.
+ *
+ * Return: true if the sending command routine should be repeated,
+ * otherwise false.
+ */
+static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
+{
+ switch (opcode) {
+ case ixgbe_aci_opc_disable_rxen:
+ case ixgbe_aci_opc_get_phy_caps:
+ case ixgbe_aci_opc_get_link_status:
+ case ixgbe_aci_opc_get_link_topo:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
+ * Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Admin Command is sent using CSR by setting descriptor and buffer in specific
+ * registers.
+ *
+ * Return: the exit code of the operation.
+ * * - IXGBE_SUCCESS - success.
+ * * - IXGBE_ERR_ACI_DISABLED - CSR mechanism is not enabled.
+ * * - IXGBE_ERR_ACI_BUSY - CSR mechanism is busy.
+ * * - IXGBE_ERR_PARAM - buf_size is too big or
+ * invalid argument buf or buf_size.
+ * * - IXGBE_ERR_ACI_TIMEOUT - Admin Command X command timeout.
+ * * - IXGBE_ERR_ACI_ERROR - Admin Command X invalid state of HICR register or
+ * Admin Command failed because of bad opcode was returned or
+ * Admin Command failed with error Y.
+ */
+static s32
+ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u32 hicr = 0, tmp_buf_size = 0, i = 0;
+ u32 *raw_desc = (u32 *)desc;
+ s32 status = IXGBE_SUCCESS;
+ bool valid_buf = false;
+ u32 *tmp_buf = NULL;
+ u16 opcode = 0;
+
+ do {
+ hw->aci.last_status = IXGBE_ACI_RC_OK;
+
+ /* It's necessary to check if mechanism is enabled */
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if (!(hicr & PF_HICR_EN)) {
+ status = IXGBE_ERR_ACI_DISABLED;
+ break;
+ }
+ if (hicr & PF_HICR_C) {
+ hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
+ status = IXGBE_ERR_ACI_BUSY;
+ break;
+ }
+ opcode = desc->opcode;
+
+ if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE) {
+ status = IXGBE_ERR_PARAM;
+ break;
+ }
+
+ if (buf)
+ desc->flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF);
+
+ /* Check if buf and buf_size are proper params */
+ if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF)) {
+ if ((buf && buf_size == 0) ||
+ (buf == NULL && buf_size)) {
+ status = IXGBE_ERR_PARAM;
+ break;
+ }
+ if (buf && buf_size)
+ valid_buf = true;
+ }
+
+ if (valid_buf == true) {
+ if (buf_size % 4 == 0)
+ tmp_buf_size = buf_size;
+ else
+ tmp_buf_size = (buf_size & (u16)(~0x03)) + 4;
+
+ tmp_buf = (u32*)ixgbe_malloc(hw, tmp_buf_size);
+ if (!tmp_buf)
+ return IXGBE_ERR_OUT_OF_MEM;
+
+ /* tmp_buf will be firstly filled with 0xFF and after
+ * that the content of buf will be written into it.
+ * This approach lets us use valid buf_size and
+ * prevents us from reading past buf area
+ * when buf_size mod 4 not equal to 0.
+ */
+ memset(tmp_buf, 0xFF, tmp_buf_size);
+ memcpy(tmp_buf, buf, buf_size);
+
+ if (tmp_buf_size > IXGBE_ACI_LG_BUF)
+ desc->flags |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_LB);
+
+ desc->datalen = IXGBE_CPU_TO_LE16(buf_size);
+
+ if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD)) {
+ for (i = 0; i < tmp_buf_size / 4; i++) {
+ IXGBE_WRITE_REG(hw, PF_HIBA(i),
+ IXGBE_LE32_TO_CPU(tmp_buf[i]));
+ }
+ }
+ }
+
+ /* Descriptor is written to specific registers */
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
+ IXGBE_WRITE_REG(hw, PF_HIDA(i),
+ IXGBE_LE32_TO_CPU(raw_desc[i]));
+
+ /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
+ * PF_HICR_EV
+ */
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ hicr = (hicr | PF_HICR_C) & ~(PF_HICR_SV | PF_HICR_EV);
+ IXGBE_WRITE_REG(hw, PF_HICR, hicr);
+
+ /* Wait for sync Admin Command response */
+ for (i = 0; i < IXGBE_ACI_SYNC_RESPONSE_TIMEOUT; i += 1) {
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if ((hicr & PF_HICR_SV) || !(hicr & PF_HICR_C))
+ break;
+
+ msec_delay(1);
+ }
+
+ /* Wait for async Admin Command response */
+ if ((hicr & PF_HICR_SV) && (hicr & PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT;
+ i += 1) {
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if ((hicr & PF_HICR_EV) || !(hicr & PF_HICR_C))
+ break;
+
+ msec_delay(1);
+ }
+ }
+
+ /* Read sync Admin Command response */
+ if ((hicr & PF_HICR_SV)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA(i));
+ raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
+ }
+ }
+
+ /* Read async Admin Command response */
+ if ((hicr & PF_HICR_EV) && !(hicr & PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA_2(i));
+ raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
+ }
+ }
+
+ /* Handle timeout and invalid state of HICR register */
+ if (hicr & PF_HICR_C) {
+ status = IXGBE_ERR_ACI_TIMEOUT;
+ break;
+ } else if (!(hicr & PF_HICR_SV) && !(hicr & PF_HICR_EV)) {
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ /* For every command other than 0x0014 treat opcode mismatch
+ * as an error. Response to 0x0014 command read from HIDA_2
+ * is a descriptor of an event which is expected to contain
+ * different opcode than the command.
+ */
+ if (desc->opcode != opcode &&
+ opcode != IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ if (desc->retval != IXGBE_ACI_RC_OK) {
+ hw->aci.last_status = (enum ixgbe_aci_err)desc->retval;
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ /* Write a response values to a buf */
+ if (valid_buf && (desc->flags &
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF))) {
+ for (i = 0; i < tmp_buf_size / 4; i++) {
+ tmp_buf[i] = IXGBE_READ_REG(hw, PF_HIBA(i));
+ tmp_buf[i] = IXGBE_CPU_TO_LE32(tmp_buf[i]);
+ }
+ memcpy(buf, tmp_buf, buf_size);
+ }
+ } while (0);
+
+ if (tmp_buf)
+ ixgbe_free(hw, tmp_buf);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Helper function to send FW Admin Commands to the FW Admin Command Interface.
+ *
+ * Retry sending the FW Admin Command multiple times to the FW ACI
+ * if the EBUSY Admin Command error is returned.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ struct ixgbe_aci_desc desc_cpy;
+ enum ixgbe_aci_err last_status;
+ bool is_cmd_for_retry;
+ u8 *buf_cpy = NULL;
+ s32 status;
+ u16 opcode;
+ u8 idx = 0;
+
+ opcode = IXGBE_LE16_TO_CPU(desc->opcode);
+ is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
+ memset(&desc_cpy, 0, sizeof(desc_cpy));
+
+ if (is_cmd_for_retry) {
+ if (buf) {
+ buf_cpy = (u8 *)ixgbe_malloc(hw, buf_size);
+ if (!buf_cpy)
+ return IXGBE_ERR_OUT_OF_MEM;
+ }
+ memcpy(&desc_cpy, desc, sizeof(desc_cpy));
+ }
+
+ do {
+ ixgbe_acquire_lock(&hw->aci.lock);
+ status = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
+ last_status = hw->aci.last_status;
+ ixgbe_release_lock(&hw->aci.lock);
+
+ if (!is_cmd_for_retry || status == IXGBE_SUCCESS ||
+ (last_status != IXGBE_ACI_RC_EBUSY && status != IXGBE_ERR_ACI_ERROR))
+ break;
+
+ if (buf)
+ memcpy(buf, buf_cpy, buf_size);
+ memcpy(desc, &desc_cpy, sizeof(desc_cpy));
+
+ msec_delay(IXGBE_ACI_SEND_DELAY_TIME_MS);
+ } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE);
+
+ if (buf_cpy)
+ ixgbe_free(hw, buf_cpy);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_check_event_pending - check if there are any pending events
+ * @hw: pointer to the HW struct
+ *
+ * Determine if there are any pending events.
+ *
+ * Return: true if there are any currently pending events
+ * otherwise false.
+ */
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
+{
+ u32 ep_bit_mask;
+ u32 fwsts;
+
+ ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
+
+ /* Check state of Event Pending (EP) bit */
+ fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
+ return (fwsts & ep_bit_mask) ? true : false;
+}
+
+/**
+ * ixgbe_aci_get_event - get an event from ACI
+ * @hw: pointer to the HW struct
+ * @e: event information structure
+ * @pending: optional flag signaling that there are more pending events
+ *
+ * Obtain an event from ACI and return its content
+ * through 'e' using ACI command (0x0014).
+ * Provide information if there are more events
+ * to retrieve through 'pending'.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!e || (!e->msg_buf && e->buf_len) || (e->msg_buf && !e->buf_len))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_acquire_lock(&hw->aci.lock);
+
+ /* Check if there are any events pending */
+ if (!ixgbe_aci_check_event_pending(hw)) {
+ status = IXGBE_ERR_ACI_NO_EVENTS;
+ goto aci_get_event_exit;
+ }
+
+ /* Obtain pending event */
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
+ status = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
+ if (status)
+ goto aci_get_event_exit;
+
+ /* Returned 0x0014 opcode indicates that no event was obtained */
+ if (desc.opcode == IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
+ status = IXGBE_ERR_ACI_NO_EVENTS;
+ goto aci_get_event_exit;
+ }
+
+ /* Determine size of event data */
+ e->msg_len = MIN_T(u16, IXGBE_LE16_TO_CPU(desc.datalen), e->buf_len);
+ /* Write event descriptor to event info structure */
+ memcpy(&e->desc, &desc, sizeof(e->desc));
+
+ /* Check if there are any further events pending */
+ if (pending) {
+ *pending = ixgbe_aci_check_event_pending(hw);
+ }
+
+aci_get_event_exit:
+ ixgbe_release_lock(&hw->aci.lock);
+
+ return status;
+}
+
+/**
+ * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Helper function to fill the descriptor desc with default values
+ * and the provided opcode.
+ */
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
+{
+ /* zero out the desc */
+ memset(desc, 0, sizeof(*desc));
+ desc->opcode = IXGBE_CPU_TO_LE16(opcode);
+ desc->flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_SI);
+}
+
+/**
+ * ixgbe_aci_get_fw_ver - get the firmware version
+ * @hw: pointer to the HW struct
+ *
+ * Get the firmware version using ACI command (0x0001).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_ver *resp;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ resp = &desc.params.get_ver;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ if (!status) {
+ hw->fw_branch = resp->fw_branch;
+ hw->fw_maj_ver = resp->fw_major;
+ hw->fw_min_ver = resp->fw_minor;
+ hw->fw_patch = resp->fw_patch;
+ hw->fw_build = IXGBE_LE32_TO_CPU(resp->fw_build);
+ hw->api_branch = resp->api_branch;
+ hw->api_maj_ver = resp->api_major;
+ hw->api_min_ver = resp->api_minor;
+ hw->api_patch = resp->api_patch;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_send_driver_ver - send the driver version to firmware
+ * @hw: pointer to the HW struct
+ * @dv: driver's major, minor version
+ *
+ * Send the driver version to the firmware
+ * using the ACI command (0x0002).
+ *
+ * Return: the exit code of the operation.
+ * Returns IXGBE_ERR_PARAM, if dv is NULL.
+ */
+s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv)
+{
+ struct ixgbe_aci_cmd_driver_ver *cmd;
+ struct ixgbe_aci_desc desc;
+ u16 len;
+
+ cmd = &desc.params.driver_ver;
+
+ if (!dv)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_driver_ver);
+
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+ cmd->major_ver = dv->major_ver;
+ cmd->minor_ver = dv->minor_ver;
+ cmd->build_ver = dv->build_ver;
+ cmd->subbuild_ver = dv->subbuild_ver;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
+ len++;
+
+ return ixgbe_aci_send_cmd(hw, &desc, dv->driver_string, len);
+}
+
+/**
+ * ixgbe_aci_req_res - request a common resource
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ *
+ * Requests a common resource using the ACI command (0x0008).
+ * Specifies the maximum time the driver may hold the resource.
+ * If the requested resource is currently occupied by some other driver,
+ * a busy return value is returned and the timeout field value indicates the
+ * maximum time the current owner has to free it.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32
+ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u8 sdp_number,
+ u32 *timeout)
+{
+ struct ixgbe_aci_cmd_req_res *cmd_resp;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd_resp = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
+
+ cmd_resp->res_id = IXGBE_CPU_TO_LE16(res);
+ cmd_resp->access_type = IXGBE_CPU_TO_LE16(access);
+ cmd_resp->res_number = IXGBE_CPU_TO_LE32(sdp_number);
+ cmd_resp->timeout = IXGBE_CPU_TO_LE32(*timeout);
+ *timeout = 0;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
+ */
+ if (!status || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
+ *timeout = IXGBE_LE32_TO_CPU(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_release_res - release a common resource using ACI
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @sdp_number: resource number
+ *
+ * Release a common resource using ACI command (0x0009).
+ *
+ * Return: the exit code of the operation.
+ */
+static s32
+ixgbe_aci_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ u8 sdp_number)
+{
+ struct ixgbe_aci_cmd_req_res *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
+
+ cmd->res_id = IXGBE_CPU_TO_LE16(res);
+ cmd->res_number = IXGBE_CPU_TO_LE32(sdp_number);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_acquire_res - acquire the ownership of a resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ * @access: access type (read or write)
+ * @timeout: timeout in milliseconds
+ *
+ * Make an attempt to acquire the ownership of a resource using
+ * the ixgbe_aci_req_res to utilize ACI.
+ * In case if some other driver has previously acquired the resource and
+ * performed any necessary updates, the IXGBE_ERR_ACI_NO_WORK is returned,
+ * and the caller does not obtain the resource and has no further work to do.
+ * If needed, the function will poll until the current lock owner timeouts.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout)
+{
+#define IXGBE_RES_POLLING_DELAY_MS 10
+ u32 delay = IXGBE_RES_POLLING_DELAY_MS;
+ u32 res_timeout = timeout;
+ u32 retry_timeout = 0;
+ s32 status;
+
+ status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* A return code of IXGBE_ERR_ACI_NO_WORK means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
+ */
+ if (status == IXGBE_ERR_ACI_NO_WORK)
+ goto ixgbe_acquire_res_exit;
+
+ /* If necessary, poll until the current lock owner timeouts.
+ * Set retry_timeout to the timeout value reported by the FW in the
+ * response to the "Request Resource Ownership" (0x0008) Admin Command
+ * as it indicates the maximum time the current owner of the resource
+ * is allowed to hold it.
+ */
+ retry_timeout = res_timeout;
+ while (status && retry_timeout && res_timeout) {
+ msec_delay(delay);
+ retry_timeout = (retry_timeout > delay) ?
+ retry_timeout - delay : 0;
+ status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ if (status == IXGBE_ERR_ACI_NO_WORK)
+ /* lock free, but no work to do */
+ break;
+
+ if (!status)
+ /* lock acquired */
+ break;
+ }
+
+ixgbe_acquire_res_exit:
+ return status;
+}
+
+/**
+ * ixgbe_release_res - release a common resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ *
+ * Release a common resource using ixgbe_aci_release_res.
+ */
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
+{
+ u32 total_delay = 0;
+ s32 status;
+
+ status = ixgbe_aci_release_res(hw, res, 0);
+
+ /* There are some rare cases when trying to release the resource
+ * results in an admin command timeout, so handle them correctly.
+ */
+ while ((status == IXGBE_ERR_ACI_TIMEOUT) &&
+ (total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT)) {
+ msec_delay(1);
+ status = ixgbe_aci_release_res(hw, res, 0);
+ total_delay++;
+ }
+}
+
+/**
+ * ixgbe_parse_common_caps - Parse common device/function capabilities
+ * @hw: pointer to the HW struct
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Return: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool
+ixgbe_parse_common_caps(struct ixgbe_hw *hw, struct ixgbe_hw_common_caps *caps,
+ struct ixgbe_aci_cmd_list_caps_elem *elem,
+ const char *prefix)
+{
+ u32 logical_id = IXGBE_LE32_TO_CPU(elem->logical_id);
+ u32 phys_id = IXGBE_LE32_TO_CPU(elem->phys_id);
+ u32 number = IXGBE_LE32_TO_CPU(elem->number);
+ u16 cap = IXGBE_LE16_TO_CPU(elem->cap);
+ bool found = true;
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ break;
+ case IXGBE_ACI_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_VMDQ:
+ caps->vmdq = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ break;
+ case IXGBE_ACI_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_NVM_VER:
+ break;
+ case IXGBE_ACI_CAPS_NVM_MGMT:
+ caps->sec_rev_disabled =
+ (number & IXGBE_NVM_MGMT_SEC_REV_DISABLED) ?
+ true : false;
+ caps->update_disabled =
+ (number & IXGBE_NVM_MGMT_UPDATE_DISABLED) ?
+ true : false;
+ caps->nvm_unified_update =
+ (number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ caps->netlist_auth =
+ (number & IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ?
+ true : false;
+ break;
+ case IXGBE_ACI_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ break;
+ case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
+ caps->pcie_reset_avoidance = (number > 0);
+ break;
+ case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
+ caps->reset_restrict_support = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
+ {
+ u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
+
+ caps->ext_topo_dev_img_ver_high[index] = number;
+ caps->ext_topo_dev_img_ver_low[index] = logical_id;
+ caps->ext_topo_dev_img_part_num[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
+ IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S;
+ caps->ext_topo_dev_img_load_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
+ caps->ext_topo_dev_img_prog_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
+ break;
+ }
+ case IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE:
+ caps->orom_recovery_update = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_NEXT_CLUSTER_ID:
+ caps->next_cluster_id_support = (number == 1);
+ DEBUGOUT2("%s: next_cluster_id_support = %d\n",
+ prefix, caps->next_cluster_id_support);
+ break;
+ default:
+ /* Not one of the recognized common capabilities */
+ found = false;
+ }
+
+ return found;
+}
+
+/**
+ * ixgbe_hweight8 - count set bits among the 8 lowest bits
+ * @w: variable storing set bits to count
+ *
+ * Return: the number of set bits among the 8 lowest bits in the provided value.
+ */
+static u8 ixgbe_hweight8(u32 w)
+{
+ u8 hweight = 0, i;
+
+ for (i = 0; i < 8; i++)
+ if (w & (1 << i))
+ hweight++;
+
+ return hweight;
+}
+
+/**
+ * ixgbe_hweight32 - count set bits among the 32 lowest bits
+ * @w: variable storing set bits to count
+ *
+ * Return: the number of set bits among the 32 lowest bits in the
+ * provided value.
+ */
+static u8 ixgbe_hweight32(u32 w)
+{
+ u32 bitMask = 0x1, i;
+ u8 bitCnt = 0;
+
+ for (i = 0; i < 32; i++)
+ {
+ if (w & bitMask)
+ bitCnt++;
+
+ bitMask = bitMask << 0x1;
+ }
+
+ return bitCnt;
+}
+
+/**
+ * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_funcs = ixgbe_hweight32(number);
+}
+
+/**
+ * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VF for device capabilities.
+ */
+static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_vfs_exposed = number;
+}
+
+/**
+ * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
+ */
+static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_vsi_allocd_to_host = number;
+}
+
+/**
+ * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_FD for device capabilities.
+ */
+static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_flow_director_fltr = number;
+}
+
+/**
+ * ixgbe_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(dev_p, 0, sizeof(*dev_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
+ bool found;
+
+ found = ixgbe_parse_common_caps(hw, &dev_p->common_cap,
+ &cap_resp[i], "dev caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ ixgbe_parse_valid_functions_cap(hw, dev_p,
+ &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_FD:
+ ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ if (!found)
+ break;
+ }
+ }
+
+}
+
+/**
+ * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VF.
+ */
+static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 logical_id = IXGBE_LE32_TO_CPU(cap->logical_id);
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ func_p->num_allocd_vfs = number;
+ func_p->vf_base_id = logical_id;
+}
+
+/**
+ * ixgbe_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
+ *
+ * Return: the number of resources per PF or 0, if no PH are available.
+ */
+static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
+{
+ u8 funcs;
+
+#define IXGBE_CAPS_VALID_FUNCS_M 0xFF
+ funcs = ixgbe_hweight8(hw->dev_caps.common_cap.valid_functions &
+ IXGBE_CAPS_VALID_FUNCS_M);
+
+ if (!funcs)
+ return 0;
+
+ return max / funcs;
+}
+
+/**
+ * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
+ */
+static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
+}
+
+/**
+ * ixgbe_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
+ */
+static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(func_p, 0, sizeof(*func_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
+ ixgbe_parse_common_caps(hw, &func_p->common_cap,
+ &cap_resp[i], "func caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+
+}
+
+/**
+ * ixgbe_aci_list_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
+ *
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return IXGBE_ERR_OUT_OF_MEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
+ * buffer that firmware could return) to avoid this.
+ *
+ * Return: the exit code of the operation.
+ * Exit code of IXGBE_ERR_OUT_OF_MEM means the buffer size is too small.
+ */
+s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc)
+{
+ struct ixgbe_aci_cmd_list_caps *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.get_cap;
+
+ if (opc != ixgbe_aci_opc_list_func_caps &&
+ opc != ixgbe_aci_opc_list_dev_caps)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
+ status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (cap_count)
+ *cap_count = IXGBE_LE32_TO_CPU(cmd->count);
+
+ return status;
+}
+
+/**
+ * ixgbe_discover_dev_caps - Read and extract device capabilities
+ * @hw: pointer to the hardware structure
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps)
+{
+ u32 status, cap_count = 0;
+ u8 *cbuf = NULL;
+
+ cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if (!cbuf)
+ return IXGBE_ERR_OUT_OF_MEM;
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_dev_caps);
+ if (!status)
+ ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+
+ if (cbuf)
+ ixgbe_free(hw, cbuf);
+
+ return status;
+}
+
+/**
+ * ixgbe_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps)
+{
+ u32 cap_count = 0;
+ u8 *cbuf = NULL;
+ s32 status;
+
+ cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if(!cbuf)
+ return IXGBE_ERR_OUT_OF_MEM;
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_func_caps);
+ if (!status)
+ ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
+
+ if (cbuf)
+ ixgbe_free(hw, cbuf);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ *
+ * Retrieve both device and function capabilities.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_caps(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ status = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
+ if (status)
+ return status;
+
+ return ixgbe_discover_func_caps(hw, &hw->func_caps);
+}
+
+/**
+ * ixgbe_aci_disable_rxen - disable RX
+ * @hw: pointer to the HW struct
+ *
+ * Request a safe disable of Receive Enable using ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_disable_rxen *cmd;
+ struct ixgbe_aci_desc desc;
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ cmd = &desc.params.disable_rxen;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
+
+ cmd->lport_num = (u8)hw->bus.func;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_phy_caps - returns PHY capabilities
+ * @hw: pointer to the HW struct
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ *
+ * Returns the various PHY capabilities supported on the Port
+ * using ACI command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
+{
+ struct ixgbe_aci_cmd_get_phy_caps *cmd;
+ u16 pcaps_size = sizeof(*pcaps);
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.get_phy;
+
+ if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
+
+ if (qual_mods)
+ cmd->param0 |= IXGBE_CPU_TO_LE16(IXGBE_ACI_GET_PHY_RQM);
+
+ cmd->param0 |= IXGBE_CPU_TO_LE16(report_mode);
+ status = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
+
+ if (status == IXGBE_SUCCESS &&
+ report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
+ hw->phy.phy_type_low = IXGBE_LE64_TO_CPU(pcaps->phy_type_low);
+ hw->phy.phy_type_high = IXGBE_LE64_TO_CPU(pcaps->phy_type_high);
+ memcpy(hw->link.link_info.module_type, &pcaps->module_type,
+ sizeof(hw->link.link_info.module_type));
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_phy_caps_equals_cfg - check if capabilities match the PHY config
+ * @phy_caps: PHY capabilities
+ * @phy_cfg: PHY configuration
+ *
+ * Helper function to determine if PHY capabilities match PHY
+ * configuration
+ *
+ * Return: true if PHY capabilities match PHY configuration.
+ */
+bool
+ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *phy_caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *phy_cfg)
+{
+ u8 caps_mask, cfg_mask;
+
+ if (!phy_caps || !phy_cfg)
+ return false;
+
+ /* These bits are not common between capabilities and configuration.
+ * Do not use them to determine equality.
+ */
+ caps_mask = IXGBE_ACI_PHY_CAPS_MASK & ~(IXGBE_ACI_PHY_AN_MODE |
+ IXGBE_ACI_PHY_EN_MOD_QUAL);
+ cfg_mask = IXGBE_ACI_PHY_ENA_VALID_MASK &
+ ~IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
+ phy_caps->phy_type_high != phy_cfg->phy_type_high ||
+ ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
+ phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
+ phy_caps->eee_cap != phy_cfg->eee_cap ||
+ phy_caps->eeer_value != phy_cfg->eeer_value ||
+ phy_caps->link_fec_options != phy_cfg->link_fec_opt)
+ return false;
+
+ return true;
+}
+
+/**
+ * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @caps: PHY ability structure to copy data from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy data from PHY capabilities data structure
+ * to PHY configuration data structure
+ */
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ if (!caps || !cfg)
+ return;
+
+ memset(cfg, 0, sizeof(*cfg));
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+}
+
+/**
+ * ixgbe_aci_set_phy_cfg - set PHY configuration
+ * @hw: pointer to the HW struct
+ * @cfg: structure with PHY configuration data to be set
+ *
+ * Set the various PHY configuration parameters supported on the Port
+ * using ACI command (0x0601).
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!cfg)
+ return IXGBE_ERR_PARAM;
+
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ if (cfg->caps & ~IXGBE_ACI_PHY_ENA_VALID_MASK) {
+ cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
+ }
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
+
+ if (!status)
+ hw->phy.curr_user_phy_cfg = *cfg;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_set_link_restart_an - set up link and restart AN
+ * @hw: pointer to the HW struct
+ * @ena_link: if true: enable link, if false: disable link
+ *
+ * Function sets up the link and restarts the Auto-Negotiation over the link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
+{
+ struct ixgbe_aci_cmd_restart_an *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.restart_an;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
+
+ cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
+ if (ena_link)
+ cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+ else
+ cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
+ * @hw: pointer to the HW struct
+ *
+ * Try to identify the media type based on the phy type.
+ * If more than one media type, the ixgbe_media_type_unknown is returned.
+ * First, phy_type_low is checked, then phy_type_high.
+ * If none are identified, the ixgbe_media_type_unknown is returned
+ *
+ * Return: type of a media based on phy type in form of enum.
+ */
+static enum ixgbe_media_type
+ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
+{
+ struct ixgbe_link_status *hw_link_info;
+
+ if (!hw)
+ return ixgbe_media_type_unknown;
+
+ hw_link_info = &hw->link.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ixgbe_media_type_unknown;
+
+ if (hw_link_info->phy_type_low) {
+ /* 1G SGMII is a special case where some DA cable PHYs
+ * may show this as an option when it really shouldn't
+ * be since SGMII is meant to be between a MAC and a PHY
+ * in a backplane. Try to detect this case and handle it
+ */
+ if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
+ (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+ hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+ return ixgbe_media_type_da;
+
+ switch (hw_link_info->phy_type_low) {
+ case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_100BASE_TX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_T:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_T:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_T:
+ return ixgbe_media_type_copper;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
+ return ixgbe_media_type_da;
+ case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_X:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
+ return ixgbe_media_type_backplane;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case IXGBE_PHY_TYPE_HIGH_10BASE_T:
+ return ixgbe_media_type_copper;
+ }
+ }
+ return ixgbe_media_type_unknown;
+}
+
+/**
+ * ixgbe_update_link_info - update status of the HW network link
+ * @hw: pointer to the HW struct
+ *
+ * Update the status of the HW network link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_link_info(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
+ struct ixgbe_link_status *li;
+ s32 status;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ li = &hw->link.link_info;
+
+ status = ixgbe_aci_get_link_info(hw, true, NULL);
+ if (status)
+ return status;
+
+ if (li->link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
+ pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
+ ixgbe_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return IXGBE_ERR_OUT_OF_MEM;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ pcaps);
+
+ if (status == IXGBE_SUCCESS)
+ memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type));
+
+ ixgbe_free(hw, pcaps);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_link_status - get status of the HW network link
+ * @hw: pointer to the HW struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ if (!hw || !link_up)
+ return IXGBE_ERR_PARAM;
+
+ if (hw->link.get_link_info) {
+ status = ixgbe_update_link_info(hw);
+ if (status) {
+ return status;
+ }
+ }
+
+ *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_get_link_info - get the link status
+ * @hw: pointer to the HW struct
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ *
+ * Get the current Link Status using ACI command (0x607).
+ * The current link can be optionally provided to update
+ * the status.
+ *
+ * Return: the link status of the adapter.
+ */
+s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_link_status *li_old, *li;
+ struct ixgbe_fc_info *hw_fc_info;
+ struct ixgbe_aci_desc desc;
+ bool tx_pause, rx_pause;
+ u8 cmd_flags;
+ s32 status;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ li_old = &hw->link.link_info_old;
+ li = &hw->link.link_info;
+ hw_fc_info = &hw->fc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = cmd_flags;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* save off old link status information */
+ *li_old = *li;
+
+ /* update current link status information */
+ li->link_speed = IXGBE_LE16_TO_CPU(link_data.link_speed);
+ li->phy_type_low = IXGBE_LE64_TO_CPU(link_data.phy_type_low);
+ li->phy_type_high = IXGBE_LE64_TO_CPU(link_data.phy_type_high);
+ li->link_info = link_data.link_info;
+ li->link_cfg_err = link_data.link_cfg_err;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = IXGBE_LE16_TO_CPU(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
+ IXGBE_ACI_CFG_PACING_TYPE_M);
+
+ /* update fc info */
+ tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
+ rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
+ if (tx_pause && rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_full;
+ else if (tx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_tx_pause;
+ else if (rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_rx_pause;
+ else
+ hw_fc_info->current_mode = ixgbe_fc_none;
+
+ li->lse_ena = !!(resp->cmd_flags & IXGBE_ACI_LSE_IS_ENABLED);
+
+ /* save link status information */
+ if (link)
+ *link = *li;
+
+ /* flag cleared so calling functions don't call AQ again */
+ hw->link.get_link_info = false;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_set_event_mask - set event mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ *
+ * Set the event mask using ACI command (0x0613).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
+{
+ struct ixgbe_aci_cmd_set_event_mask *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
+
+ cmd->event_mask = IXGBE_CPU_TO_LE16(mask);
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_configure_lse - enable/disable link status events
+ * @hw: pointer to the HW struct
+ * @activate: bool value deciding if lse should be enabled nor disabled
+ * @mask: event mask to be set; a set bit means deactivation of the
+ * corresponding event
+ *
+ * Set the event mask and then enable or disable link status events
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
+{
+ s32 rc;
+
+ rc = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
+ if (rc) {
+ return rc;
+ }
+
+ /* Enabling link status events generation by fw */
+ rc = ixgbe_aci_get_link_info(hw, activate, NULL);
+ if (rc) {
+ return rc;
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_get_netlist_node - get a node handle
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get the netlist node and assigns it to
+ * the provided handle using ACI command (0x06E0).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
+ return IXGBE_ERR_NOT_SUPPORTED;
+
+ if (node_handle)
+ *node_handle =
+ IXGBE_LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_find_netlist_node - find a node handle
+ * @hw: pointer to the hw struct
+ * @node_type_ctx: type of netlist node to look for
+ * @node_part_number: node part number to look for
+ * @node_handle: output parameter if node found - optional
+ *
+ * Find and return the node handle for a given node type and part number in the
+ * netlist. When found IXGBE_SUCCESS is returned, IXGBE_ERR_NOT_SUPPORTED
+ * otherwise. If @node_handle provided, it would be set to found node handle.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
+ u8 node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_cmd_get_link_topo cmd;
+ u8 rec_node_part_number;
+ u16 rec_node_handle;
+ s32 status;
+ u8 idx;
+
+ for (idx = 0; idx < IXGBE_MAX_NETLIST_SIZE; idx++) {
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.addr.topo_params.node_type_ctx =
+ (node_type_ctx << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S);
+ cmd.addr.topo_params.index = idx;
+
+ status = ixgbe_aci_get_netlist_node(hw, &cmd,
+ &rec_node_part_number,
+ &rec_node_handle);
+ if (status)
+ return status;
+
+ if (rec_node_part_number == node_part_number) {
+ if (node_handle)
+ *node_handle = rec_node_handle;
+ return IXGBE_SUCCESS;
+ }
+ }
+
+ return IXGBE_ERR_NOT_SUPPORTED;
+}
+
+/**
+ * ixgbe_aci_read_i2c - read I2C register value
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [7] - Repeated start,
+ * bits [6:5] data offset size,
+ * bit [4] - I2C address type, bits [3:0] - data size
+ * to read (0-16 bytes)
+ * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
+ *
+ * Read the value of the I2C pin register using ACI command (0x06E2).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data)
+{
+ struct ixgbe_aci_desc desc = { 0 };
+ struct ixgbe_aci_cmd_i2c *cmd;
+ u8 data_size;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ if (!data)
+ return IXGBE_ERR_PARAM;
+
+ data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
+ IXGBE_ACI_I2C_DATA_SIZE_S;
+
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status) {
+ struct ixgbe_aci_cmd_read_i2c_resp *resp;
+ u8 i;
+
+ resp = &desc.params.read_i2c_resp;
+ for (i = 0; i < data_size; i++) {
+ *data = resp->i2c_data[i];
+ data++;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_write_i2c - write a value to I2C register
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size
+ * to write (0-7 bytes)
+ * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
+ *
+ * Write a value to the I2C pin register using ACI command (0x06E3).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data)
+{
+ struct ixgbe_aci_desc desc = { 0 };
+ struct ixgbe_aci_cmd_i2c *cmd;
+ u8 i, data_size;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
+ IXGBE_ACI_I2C_DATA_SIZE_S;
+
+ /* data_size limited to 4 */
+ if (data_size > 4)
+ return IXGBE_ERR_PARAM;
+
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ for (i = 0; i < data_size; i++) {
+ cmd->i2c_data[i] = *data;
+ data++;
+ }
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_set_port_id_led - set LED value for the given port
+ * @hw: pointer to the HW struct
+ * @orig_mode: set LED original mode
+ *
+ * Set LED value for the given port (0x06E9)
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
+{
+ struct ixgbe_aci_cmd_set_port_id_led *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_port_id_led;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
+
+ cmd->lport_num = (u8)hw->bus.func;
+ cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
+
+ if (orig_mode)
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
+ else
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_set_gpio - set GPIO pin state
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: SW provide IO value to set in the LSB
+ *
+ * Set the GPIO pin state that is a part of the topology
+ * using ACI command (0x06EC).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool value)
+{
+ struct ixgbe_aci_cmd_gpio *cmd;
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+ cmd->gpio_val = value ? 1 : 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_gpio - get GPIO pin state
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: IO value read
+ *
+ * Get the value of a GPIO signal which is part of the topology
+ * using ACI command (0x06ED).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value)
+{
+ struct ixgbe_aci_cmd_gpio *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (status)
+ return status;
+
+ *value = !!cmd->gpio_val;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_sff_eeprom - read/write SFF EEPROM
+ * @hw: pointer to the HW struct
+ * @lport: bits [7:0] = logical port, bit [8] = logical port valid
+ * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
+ * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
+ * @page: QSFP page
+ * @page_bank_ctrl: configuration of SFF/CMIS paging and banking control
+ * @data: pointer to data buffer to be read/written to the I2C device.
+ * @length: 1-16 for read, 1 for write.
+ * @write: 0 read, 1 for write.
+ *
+ * Read/write SFF EEPROM using ACI command (0x06EE).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
+ u8 length, bool write)
+{
+ struct ixgbe_aci_cmd_sff_eeprom *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!data || (mem_addr & 0xff00))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_sff_eeprom);
+ cmd = &desc.params.read_write_sff_param;
+ desc.flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+ cmd->lport_num = (u8)(lport & 0xff);
+ cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(((bus_addr >> 1) &
+ IXGBE_ACI_SFF_I2CBUS_7BIT_M) |
+ ((page_bank_ctrl <<
+ IXGBE_ACI_SFF_PAGE_BANK_CTRL_S) &
+ IXGBE_ACI_SFF_PAGE_BANK_CTRL_M));
+ cmd->i2c_offset = IXGBE_CPU_TO_LE16(mem_addr & 0xff);
+ cmd->module_page = page;
+ if (write)
+ cmd->i2c_bus_addr |= IXGBE_CPU_TO_LE16(IXGBE_ACI_SFF_IS_WRITE);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, data, length);
+ return status;
+}
+
+/**
+ * ixgbe_aci_prog_topo_dev_nvm - program Topology Device NVM
+ * @hw: pointer to the hardware structure
+ * @topo_params: pointer to structure storing topology parameters for a device
+ *
+ * Program Topology Device NVM using ACI command (0x06F2).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params)
+{
+ struct ixgbe_aci_cmd_prog_topo_dev_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.prog_topo_dev_nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_prog_topo_dev_nvm);
+
+ memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_read_topo_dev_nvm - read Topology Device NVM
+ * @hw: pointer to the hardware structure
+ * @topo_params: pointer to structure storing topology parameters for a device
+ * @start_address: byte offset in the topology device NVM
+ * @data: pointer to data buffer
+ * @data_size: number of bytes to be read from the topology device NVM
+ * Read Topology Device NVM (0x06F3)
+ *
+ * Read Topology of Device NVM using ACI command (0x06F3).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params,
+ u32 start_address, u8 *data, u8 data_size)
+{
+ struct ixgbe_aci_cmd_read_topo_dev_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!data || data_size == 0 ||
+ data_size > IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
+ return IXGBE_ERR_PARAM;
+
+ cmd = &desc.params.read_topo_dev_nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_topo_dev_nvm);
+
+ desc.datalen = IXGBE_CPU_TO_LE16(data_size);
+ memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
+ cmd->start_address = IXGBE_CPU_TO_LE32(start_address);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (status)
+ return status;
+
+ memcpy(data, cmd->data_read, data_size);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * Request NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if ((fla & GLNVM_FLA_LOCKED_M) == 0)
+ return IXGBE_SUCCESS;
+
+ return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
+ IXGBE_NVM_TIMEOUT);
+}
+
+/**
+ * ixgbe_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * Release NVM ownership.
+ */
+void ixgbe_release_nvm(struct ixgbe_hw *hw)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if ((fla & GLNVM_FLA_LOCKED_M) == 0)
+ return;
+
+ ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
+}
+
+
+/**
+ * ixgbe_aci_read_nvm - read NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
+ *
+ * Read the NVM using ACI command (0x0701).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
+
+ if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = IXGBE_CPU_TO_LE16(length);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_aci_erase_nvm - erase NVM sector
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ *
+ * Erase the NVM sector using the ACI command (0x0702).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+ s32 status;
+ __le16 len;
+
+ /* read a length value from SR, so module_typeid is equal to 0 */
+ /* calculate offset where module size is placed from bytes to words */
+ /* set last command and read from SR values to true */
+ status = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
+ true);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
+
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->length = len;
+ cmd->offset_low = 0;
+ cmd->offset_high = 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_update_nvm - update NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @command_flags: command parameters
+ *
+ * Update the NVM using the ACI command (0x0703).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
+
+ cmd->cmd_flags |= command_flags;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = IXGBE_CPU_TO_LE16(length);
+
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_aci_read_nvm_cfg - read an NVM config block
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM access admin command bits
+ * @field_id: field or feature ID
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @elem_count: pointer to count of elements read by FW
+ *
+ * Reads a single or multiple feature/field ID and data using ACI command
+ * (0x0704).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ u16 field_id, void *data, u16 buf_size,
+ u16 *elem_count)
+{
+ struct ixgbe_aci_cmd_nvm_cfg *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.nvm_cfg;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_read);
+
+ cmd->cmd_flags = cmd_flags;
+ cmd->id = IXGBE_CPU_TO_LE16(field_id);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
+ if (!status && elem_count)
+ *elem_count = IXGBE_LE16_TO_CPU(cmd->count);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_write_nvm_cfg - write an NVM config block
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM access admin command bits
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @elem_count: count of elements to be written
+ *
+ * Writes a single or multiple feature/field ID and data using ACI command
+ * (0x0705).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_write_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ void *data, u16 buf_size, u16 elem_count)
+{
+ struct ixgbe_aci_cmd_nvm_cfg *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.nvm_cfg;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_write);
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ cmd->count = IXGBE_CPU_TO_LE16(elem_count);
+ cmd->cmd_flags = cmd_flags;
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
+}
+
+/**
+ * ixgbe_nvm_validate_checksum - validate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity using ACI command (0x0706).
+ * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ if (!status)
+ if (IXGBE_LE16_TO_CPU(cmd->checksum) !=
+ IXGBE_ACI_NVM_CHECKSUM_CORRECT) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Invalid Shadow Ram checksum");
+ status = IXGBE_ERR_NVM_CHECKSUM;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_nvm_recalculate_checksum - recalculate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Recalculate NVM PFA checksum using ACI command (0x0706).
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_RECALC;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_nvm_write_activate - NVM activate write
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flags for write activate command
+ * @response_flags: response indicators from firmware
+ *
+ * Update the control word with the required banks' validity bits
+ * and dumps the Shadow RAM to flash using ACI command (0x0707).
+ *
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
+ *
+ * On successful return of the firmware command, the response_flags variable
+ * is updated with the flags reported by firmware indicating certain status,
+ * such as whether EMP reset is enabled.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+ s32 status;
+
+ cmd = &desc.params.nvm;
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_write_activate);
+
+ cmd->cmd_flags = LO_BYTE(cmd_flags);
+ cmd->offset_high = HI_BYTE(cmd_flags);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status && response_flags)
+ *response_flags = cmd->cmd_flags;
+
+ return status;
+}
+
+/**
+ * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive flash bank
+ * @module: the module to read from
+ *
+ * Based on the module, lookup the module offset from the beginning of the
+ * flash.
+ *
+ * Return: the flash offset. Note that a value of zero is invalid and must be
+ * treated as an error.
+ */
+static u32 ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ enum ixgbe_flash_bank active_bank;
+ bool second_bank_active;
+ u32 offset, size;
+
+ switch (module) {
+ case E610_SR_1ST_NVM_BANK_PTR:
+ offset = banks->nvm_ptr;
+ size = banks->nvm_size;
+ active_bank = banks->nvm_bank;
+ break;
+ case E610_SR_1ST_OROM_BANK_PTR:
+ offset = banks->orom_ptr;
+ size = banks->orom_size;
+ active_bank = banks->orom_bank;
+ break;
+ case E610_SR_NETLIST_BANK_PTR:
+ offset = banks->netlist_ptr;
+ size = banks->netlist_size;
+ active_bank = banks->netlist_bank;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (active_bank) {
+ case IXGBE_1ST_FLASH_BANK:
+ second_bank_active = false;
+ break;
+ case IXGBE_2ND_FLASH_BANK:
+ second_bank_active = true;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The second flash bank is stored immediately following the first
+ * bank. Based on whether the 1st or 2nd bank is active, and whether
+ * we want the active or inactive bank, calculate the desired offset.
+ */
+ switch (bank) {
+ case IXGBE_ACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? size : 0);
+ case IXGBE_INACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? 0 : size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_flash_module - Read a word from one of the main NVM modules
+ * @hw: pointer to the HW structure
+ * @bank: which bank of the module to read
+ * @module: the module to read
+ * @offset: the offset into the module in bytes
+ * @data: storage for the word read from the flash
+ * @length: bytes of data to read
+ *
+ * Read data from the specified flash module. The bank parameter indicates
+ * whether or not to read from the active bank or the inactive bank of that
+ * module.
+ *
+ * The word will be read using flat NVM access, and relies on the
+ * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
+ * during initialization.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_flash_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module, u32 offset, u8 *data, u32 length)
+{
+ s32 status;
+ u32 start;
+
+ start = ixgbe_get_flash_bank_offset(hw, bank, module);
+ if (!start) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
+
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_netlist_module - Read data from the netlist module area
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive module
+ * @offset: offset into the netlist to read from
+ * @data: storage for returned word value
+ *
+ * Read a word from the specified netlist bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_netlist_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
+ offset * sizeof(u16),
+ (u8 *)&data_local,
+ sizeof(u16));
+ if (!status)
+ *data = IXGBE_LE16_TO_CPU(data_local);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_nvm_module - Read from the active main NVM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive NVM module
+ * @offset: offset into the NVM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active NVM module. This includes the CSS
+ * header at the start of the NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_nvm_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_1ST_NVM_BANK_PTR,
+ offset * sizeof(u16),
+ (u8 *)&data_local,
+ sizeof(u16));
+ if (!status)
+ *data = IXGBE_LE16_TO_CPU(data_local);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_nvm_css_hdr_len - Read the CSS header length from the
+ * NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the
+ * Authentication header size, and then convert to words.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ s32 status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (status)
+ return status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (status)
+ return status;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size
+ */
+ hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
+ *hdr_len = (hdr_len_dword * 2) + IXGBE_NVM_AUTH_HEADER_LEN;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive NVM module
+ * @offset: offset into the Shadow RAM copy to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the copy of the Shadow RAM found in the
+ * specified NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ u32 hdr_len;
+ s32 status;
+
+ status = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (status)
+ return status;
+
+ hdr_len = ROUND_UP(hdr_len, 32);
+
+ return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
+}
+
+/**
+ * ixgbe_get_nvm_minsrevs - Get the minsrevs values from flash
+ * @hw: pointer to the HW struct
+ * @minsrevs: structure to store NVM and OROM minsrev values
+ *
+ * Read the Minimum Security Revision TLV and extract
+ * the revision values from the flash image
+ * into a readable structure for processing.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw,
+ struct ixgbe_minsrev_info *minsrevs)
+{
+ struct ixgbe_aci_cmd_nvm_minsrev data;
+ s32 status;
+ u16 valid;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID,
+ 0, sizeof(data), &data,
+ true, false);
+
+ ixgbe_release_nvm(hw);
+
+ if (status)
+ return status;
+
+ valid = IXGBE_LE16_TO_CPU(data.validity);
+
+ /* Extract NVM minimum security revision */
+ if (valid & IXGBE_ACI_NVM_MINSREV_NVM_VALID) {
+ u16 minsrev_l = IXGBE_LE16_TO_CPU(data.nvm_minsrev_l);
+ u16 minsrev_h = IXGBE_LE16_TO_CPU(data.nvm_minsrev_h);
+
+ minsrevs->nvm = minsrev_h << 16 | minsrev_l;
+ minsrevs->nvm_valid = true;
+ }
+
+ /* Extract the OROM minimum security revision */
+ if (valid & IXGBE_ACI_NVM_MINSREV_OROM_VALID) {
+ u16 minsrev_l = IXGBE_LE16_TO_CPU(data.orom_minsrev_l);
+ u16 minsrev_h = IXGBE_LE16_TO_CPU(data.orom_minsrev_h);
+
+ minsrevs->orom = minsrev_h << 16 | minsrev_l;
+ minsrevs->orom_valid = true;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_update_nvm_minsrevs - Update minsrevs TLV data in flash
+ * @hw: pointer to the HW struct
+ * @minsrevs: minimum security revision information
+ *
+ * Update the NVM or Option ROM minimum security revision fields in the PFA
+ * area of the flash. Reads the minsrevs->nvm_valid and minsrevs->orom_valid
+ * fields to determine what update is being requested. If the valid bit is not
+ * set for that module, then the associated minsrev will be left as is.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_nvm_minsrevs(struct ixgbe_hw *hw,
+ struct ixgbe_minsrev_info *minsrevs)
+{
+ struct ixgbe_aci_cmd_nvm_minsrev data;
+ s32 status;
+
+ if (!minsrevs->nvm_valid && !minsrevs->orom_valid) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ /* Get current data */
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
+ sizeof(data), &data, true, false);
+ if (status)
+ goto exit_release_res;
+
+ if (minsrevs->nvm_valid) {
+ data.nvm_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->nvm & 0xFFFF);
+ data.nvm_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->nvm >> 16);
+ data.validity |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_NVM_VALID);
+ }
+
+ if (minsrevs->orom_valid) {
+ data.orom_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->orom & 0xFFFF);
+ data.orom_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->orom >> 16);
+ data.validity |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_OROM_VALID);
+ }
+
+ /* Update flash data */
+ status = ixgbe_aci_update_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
+ sizeof(data), &data, false,
+ IXGBE_ACI_NVM_SPECIAL_UPDATE);
+ if (status)
+ goto exit_release_res;
+
+ /* Dump the Shadow RAM to the flash */
+ status = ixgbe_nvm_write_activate(hw, 0, NULL);
+
+exit_release_res:
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active NVM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank, u32 *srev)
+{
+ u16 srev_l, srev_h;
+ s32 status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
+ if (status)
+ return status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
+ if (status)
+ return status;
+
+ *srev = srev_h << 16 | srev_l;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_nvm_ver_info - Read NVM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @nvm: pointer to NVM info structure
+ *
+ * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
+ * in the nvm info structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_nvm_info *nvm)
+{
+ u16 eetrack_lo, eetrack_hi, ver;
+ s32 status;
+
+ status = ixgbe_read_nvm_sr_copy(hw, bank,
+ E610_SR_NVM_DEV_STARTER_VER, &ver);
+ if (status) {
+ return status;
+ }
+
+ nvm->major = (ver & E610_NVM_VER_HI_MASK) >> E610_NVM_VER_HI_SHIFT;
+ nvm->minor = (ver & E610_NVM_VER_LO_MASK) >> E610_NVM_VER_LO_SHIFT;
+
+ status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_LO,
+ &eetrack_lo);
+ if (status) {
+ return status;
+ }
+ status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_HI,
+ &eetrack_hi);
+ if (status) {
+ return status;
+ }
+
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ status = ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * inactive NVM bank. Used to access version data for a pending update that
+ * has not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * active NVM bank.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_netlist_info
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @netlist: pointer to netlist version info structure
+ *
+ * Get the netlist version information from the requested bank. Reads the Link
+ * Topology section to find the Netlist ID block and extract the relevant
+ * information into the netlist version structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_netlist_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_netlist_info *netlist)
+{
+ u16 module_id, length, node_count, i;
+ u16 *id_blk;
+ s32 status;
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
+ &module_id);
+ if (status)
+ return status;
+
+ if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID) {
+ return IXGBE_ERR_NVM;
+ }
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
+ &length);
+ if (status)
+ return status;
+
+ /* sanity check that we have at least enough words to store the
+ * netlist ID block
+ */
+ if (length < IXGBE_NETLIST_ID_BLK_SIZE) {
+ return IXGBE_ERR_NVM;
+ }
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
+ &node_count);
+ if (status)
+ return status;
+ node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
+
+ id_blk = (u16 *)ixgbe_calloc(hw, IXGBE_NETLIST_ID_BLK_SIZE,
+ sizeof(*id_blk));
+ if (!id_blk)
+ return IXGBE_ERR_NO_SPACE;
+
+ /* Read out the entire Netlist ID Block at once. */
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
+ IXGBE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16),
+ (u8 *)id_blk,
+ IXGBE_NETLIST_ID_BLK_SIZE * sizeof(u16));
+ if (status)
+ goto exit_error;
+
+ for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
+ id_blk[i] = IXGBE_LE16_TO_CPU(((__le16 *)id_blk)[i]);
+
+ netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
+ netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
+ netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
+ netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
+
+exit_error:
+ ixgbe_free(hw, id_blk);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_inactive_netlist_ver
+ * @hw: pointer to the HW struct
+ * @netlist: pointer to netlist version info structure
+ *
+ * Read the netlist version data from the inactive netlist bank. Used to
+ * extract version data of a pending flash update in order to display the
+ * version data.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist)
+{
+ return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
+}
+
+/**
+ * ixgbe_read_sr_pointer - Read the value of a Shadow RAM pointer word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM word to read
+ * @pointer: pointer value read from Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to a pointer value specified
+ * in bytes. This function assumes the specified offset is a valid pointer
+ * word.
+ *
+ * Each pointer word specifies whether it is stored in word size or 4KB
+ * sector size by using the highest bit. The reported pointer value will be in
+ * bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_sr_pointer(struct ixgbe_hw *hw, u16 offset, u32 *pointer)
+{
+ s32 status;
+ u16 value;
+
+ status = ixgbe_read_ee_aci_E610(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Determine if the pointer is in 4KB or word units */
+ if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
+ *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024;
+ else
+ *pointer = value * 2;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM to read
+ * @size: size value read from the Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to an area size value
+ * specified in bytes. This function assumes the specified offset is a valid
+ * area size word.
+ *
+ * Each area size word is specified in 4KB sector units. This function reports
+ * the size in bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
+{
+ s32 status;
+ u16 value;
+
+ status = ixgbe_read_ee_aci_E610(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Area sizes are always specified in 4KB units */
+ *size = value * 4 * 1024;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_discover_flash_size - Discover the available flash size.
+ * @hw: pointer to the HW struct
+ *
+ * The device flash could be up to 16MB in size. However, it is possible that
+ * the actual size is smaller. Use bisection to determine the accessible size
+ * of flash memory.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_discover_flash_size(struct ixgbe_hw *hw)
+{
+ u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ while ((max_size - min_size) > 1) {
+ u32 offset = (max_size + min_size) / 2;
+ u32 len = 1;
+ u8 data;
+
+ status = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
+ if (status == IXGBE_ERR_ACI_ERROR &&
+ hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
+ status = IXGBE_SUCCESS;
+ max_size = offset;
+ } else if (!status) {
+ min_size = offset;
+ } else {
+ /* an unexpected error occurred */
+ goto err_read_flat_nvm;
+ }
+ }
+
+ hw->flash.flash_size = max_size;
+
+err_read_flat_nvm:
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_determine_active_flash_banks - Discover active bank for each module
+ * @hw: pointer to the HW struct
+ *
+ * Read the Shadow RAM control word and determine which banks are active for
+ * the NVM, OROM, and Netlist modules. Also read and calculate the associated
+ * pointer and size. These values are then cached into the ixgbe_flash_info
+ * structure for later use in order to calculate the correct offset to read
+ * from the active module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ u16 ctrl_word;
+ s32 status;
+
+ status = ixgbe_read_ee_aci_E610(hw, E610_SR_NVM_CTRL_WORD, &ctrl_word);
+ if (status) {
+ return status;
+ }
+
+ /* Check that the control word indicates validity */
+ if ((ctrl_word & IXGBE_SR_CTRL_WORD_1_M) >> IXGBE_SR_CTRL_WORD_1_S !=
+ IXGBE_SR_CTRL_WORD_VALID) {
+ return IXGBE_ERR_CONFIG;
+ }
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
+ banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
+ banks->orom_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->orom_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
+ banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_NVM_BANK_PTR,
+ &banks->nvm_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_NVM_BANK_SIZE,
+ &banks->nvm_size);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_OROM_BANK_PTR,
+ &banks->orom_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_OROM_BANK_SIZE,
+ &banks->orom_size);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_NETLIST_BANK_PTR,
+ &banks->netlist_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_NETLIST_BANK_SIZE,
+ &banks->netlist_size);
+ if (status) {
+ return status;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_nvm - initializes NVM setting
+ * @hw: pointer to the HW struct
+ *
+ * Read and populate NVM settings such as Shadow RAM size,
+ * max_timeout, and blank_nvm_mode
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_nvm(struct ixgbe_hw *hw)
+{
+ struct ixgbe_flash_info *flash = &hw->flash;
+ u32 fla, gens_stat, status;
+ u8 sr_size;
+
+ /* The SR size is stored regardless of the NVM programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
+
+ /* Switching to words (sr_size contains power of 2) */
+ flash->sr_words = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
+ flash->blank_nvm_mode = false;
+ } else {
+ /* Blank programming mode */
+ flash->blank_nvm_mode = true;
+ return IXGBE_ERR_NVM_BLANK_MODE;
+ }
+
+ status = ixgbe_discover_flash_size(hw);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_determine_active_flash_banks(hw);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->nvm);
+ if (status) {
+ return status;
+ }
+
+ /* read the netlist version information */
+ status = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->netlist);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_sanitize_operate - Clear the user data
+ * @hw: pointer to the HW struct
+ *
+ * Clear user data from NVM using ACI command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u8 values;
+
+ u8 cmd_flags = IXGBE_ACI_SANITIZE_REQ_OPERATE |
+ IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR;
+
+ status = ixgbe_sanitize_nvm(hw, cmd_flags, &values);
+ if (status)
+ return status;
+ if ((!(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE)) ||
+ ((values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS)) ||
+ ((values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS)))
+ return IXGBE_ERR_ACI_ERROR;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_sanitize_nvm - Sanitize NVM
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flag to the ACI command
+ * @values: values returned from the command
+ *
+ * Sanitize NVM using ACI command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm_sanitization *cmd;
+ s32 status;
+
+ cmd = &desc.params.nvm_sanitization;
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_sanitization);
+ cmd->cmd_flags = cmd_flags;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (values)
+ *values = cmd->values;
+
+ return status;
+}
+
+/**
+ * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ u32 bytes = sizeof(u16);
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+ (u8 *)&data_local, true);
+ if (status)
+ return status;
+
+ *data = IXGBE_LE16_TO_CPU(data_local);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_sr_buf_aci - Reads Shadow RAM buf via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
+ * taken before reading the buffer and later released.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data)
+{
+ u32 bytes = *words * 2, i;
+ s32 status;
+
+ status = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
+
+ *words = bytes / 2;
+
+ for (i = 0; i < *words; i++)
+ data[i] = IXGBE_LE16_TO_CPU(((__le16 *)data)[i]);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
+ * from being exceeded in case of Shadow RAM read requests and ensures that no
+ * single read request exceeds the maximum 4KB read for a single admin command.
+ *
+ * Returns a status code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram)
+{
+ u32 inlen = *length;
+ u32 bytes_read = 0;
+ bool last_cmd;
+ s32 status;
+
+ *length = 0;
+
+ /* Verify the length of the read if this is for the Shadow RAM */
+ if (read_shadow_ram && ((offset + inlen) >
+ (hw->eeprom.word_size * 2u))) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ do {
+ u32 read_size, sector_offset;
+
+ /* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
+ * Additionally, a read from the Shadow RAM may not cross over
+ * a sector boundary. Conveniently, the sector size is also 4KB.
+ */
+ sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
+ read_size = MIN_T(u32,
+ IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
+ inlen - bytes_read);
+
+ last_cmd = !(bytes_read + read_size < inlen);
+
+ /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
+ * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
+ * maximum size guarantees that it will fit within the 2 bytes.
+ */
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
+ offset, (u16)read_size,
+ data + bytes_read, last_cmd,
+ read_shadow_ram);
+ if (status)
+ break;
+
+ bytes_read += read_size;
+ offset += read_size;
+ } while (!last_cmd);
+
+ *length = bytes_read;
+ return status;
+}
+
+/**
+ * ixgbe_check_sr_access_params - verify params for Shadow RAM R/W operations.
+ * @hw: pointer to the HW structure
+ * @offset: offset in words from module start
+ * @words: number of words to access
+ *
+ * Check if all the parameters are valid
+ * before performing any Shadow RAM read/write operations.
+ *
+ * Return: the exit code of the operation.
+ * * - IXGBE_SUCCESS - success.
+ * * - IXGBE_ERR_PARAM - NVM error: offset beyond SR limit or
+ * NVM error: tried to access more words then the set limit or
+ * NVM error: cannot spread over two sectors.
+ */
+static s32 ixgbe_check_sr_access_params(struct ixgbe_hw *hw, u32 offset,
+ u16 words)
+{
+ if ((offset + words) > hw->eeprom.word_size) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ if (words > IXGBE_SR_SECTOR_SIZE_IN_WORDS) {
+ /* We can access only up to 4KB (one sector),
+ * in one Admin Command write
+ */
+ return IXGBE_ERR_PARAM;
+ }
+
+ if (((offset + (words - 1)) / IXGBE_SR_SECTOR_SIZE_IN_WORDS) !=
+ (offset / IXGBE_SR_SECTOR_SIZE_IN_WORDS)) {
+ /* A single access cannot spread over two sectors */
+ return IXGBE_ERR_PARAM;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_sr_word_aci - Writes Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to write
+ * @data: word to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * by a caller. To commit SR to NVM update checksum function should be called.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_sr_word_aci(struct ixgbe_hw *hw, u32 offset, const u16 *data)
+{
+ __le16 data_local = IXGBE_CPU_TO_LE16(*data);
+ s32 status;
+
+ status = ixgbe_check_sr_access_params(hw, offset, 1);
+ if (!status)
+ status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
+ BYTES_PER_WORD, &data_local,
+ false, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_sr_buf_aci - Writes Shadow RAM buf
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM buffer to write
+ * @words: number of words to write
+ * @data: words to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * by a caller. To commit SR to NVM update checksum function should be called.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_sr_buf_aci(struct ixgbe_hw *hw, u32 offset, u16 words,
+ const u16 *data)
+{
+ __le16 *data_local;
+ s32 status;
+ void *vmem;
+ u32 i;
+
+ vmem = ixgbe_calloc(hw, words, sizeof(u16));
+ if (!vmem)
+ return IXGBE_ERR_OUT_OF_MEM;
+ data_local = (__le16 *)vmem;
+
+ for (i = 0; i < words; i++)
+ data_local[i] = IXGBE_CPU_TO_LE16(data[i]);
+
+ /* Here we will only write one buffer as the size of the modules
+ * mirrored in the Shadow RAM is always less than 4K.
+ */
+ status = ixgbe_check_sr_access_params(hw, offset, words);
+ if (!status)
+ status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
+ BYTES_PER_WORD * words,
+ data_local, false, 0);
+
+ ixgbe_free(hw, vmem);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_write - write to alternate structure
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be written
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be written
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure using ACI command (0x0900).
+ * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 reg_val0, u32 reg_addr1, u32 reg_val1)
+{
+ struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.read_write_alt_direct;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_alt_direct);
+ cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
+ cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
+ cmd->dword0_value = IXGBE_CPU_TO_LE32(reg_val0);
+ cmd->dword1_value = IXGBE_CPU_TO_LE32(reg_val1);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_read - read from alternate structure
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure using ACI command (0x0902).
+ * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
+ * If 'reg_val1' pointer is not passed then only register at 'reg_addr0'
+ * is read.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 *reg_val0, u32 reg_addr1, u32 *reg_val1)
+{
+ struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.read_write_alt_direct;
+
+ if (!reg_val0)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_alt_direct);
+ cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
+ cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ if (status == IXGBE_SUCCESS) {
+ *reg_val0 = IXGBE_LE32_TO_CPU(cmd->dword0_value);
+
+ if (reg_val1)
+ *reg_val1 = IXGBE_LE32_TO_CPU(cmd->dword1_value);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_write_done - check if writing to alternate structure
+ * is done
+ * @hw: pointer to the HW structure.
+ * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ * @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ * Indicates to the FW that alternate structures have been changed.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
+ bool *reset_needed)
+{
+ struct ixgbe_aci_cmd_done_alt_write *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.done_alt_write;
+
+ if (!reset_needed)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_done_alt_write);
+ cmd->flags = bios_mode;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status)
+ *reset_needed = (IXGBE_LE16_TO_CPU(cmd->flags) &
+ IXGBE_ACI_RESP_RESET_NEEDED) != 0;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_clear - clear alternate structure
+ * @hw: pointer to the HW structure.
+ *
+ * Clear the alternate structures of the port from which the function
+ * is called.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_clear_port_alt_write);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_get_internal_data - get internal FW/HW data
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table ID within cluster
+ * @start: index of line in the block to read
+ * @buf: dump buffer
+ * @buf_size: dump buffer size
+ * @ret_buf_size: return buffer size (returned by FW)
+ * @ret_next_cluster: next cluster to read (returned by FW)
+ * @ret_next_table: next block to read (returned by FW)
+ * @ret_next_index: next index to read (returned by FW)
+ *
+ * Get internal FW/HW data using ACI command (0xFF08) for debug purposes.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
+ u16 table_id, u32 start, void *buf,
+ u16 buf_size, u16 *ret_buf_size,
+ u16 *ret_next_cluster, u16 *ret_next_table,
+ u32 *ret_next_index)
+{
+ struct ixgbe_aci_cmd_debug_dump_internals *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.debug_dump;
+
+ if (buf_size == 0 || !buf)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_debug_dump_internals);
+
+ cmd->cluster_id = IXGBE_CPU_TO_LE16(cluster_id);
+ cmd->table_id = IXGBE_CPU_TO_LE16(table_id);
+ cmd->idx = IXGBE_CPU_TO_LE32(start);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (!status) {
+ if (ret_buf_size)
+ *ret_buf_size = IXGBE_LE16_TO_CPU(desc.datalen);
+ if (ret_next_cluster)
+ *ret_next_cluster = IXGBE_LE16_TO_CPU(cmd->cluster_id);
+ if (ret_next_table)
+ *ret_next_table = IXGBE_LE16_TO_CPU(cmd->table_id);
+ if (ret_next_index)
+ *ret_next_index = IXGBE_LE32_TO_CPU(cmd->idx);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_nvm_rw_reg - Check that an NVM access request is valid
+ * @cmd: NVM access command structure
+ *
+ * Validates that an NVM access structure is request to read or write a valid
+ * register offset. First validates that the module and flags are correct, and
+ * then ensures that the register offset is one of the accepted registers.
+ *
+ * Return: 0 if the register access is valid, out of range error code otherwise.
+ */
+static s32
+ixgbe_validate_nvm_rw_reg(struct ixgbe_nvm_access_cmd *cmd)
+{
+ u16 i;
+
+ switch (cmd->offset) {
+ case GL_HICR:
+ case GL_HICR_EN: /* Note, this register is read only */
+ case GL_FWSTS:
+ case GL_MNG_FWSM:
+ case GLNVM_GENS:
+ case GLNVM_FLA:
+ case GL_FWRESETCNT:
+ return 0;
+ default:
+ break;
+ }
+
+ for (i = 0; i <= GL_HIDA_MAX_INDEX; i++)
+ if (cmd->offset == (u32)GL_HIDA(i))
+ return 0;
+
+ for (i = 0; i <= GL_HIBA_MAX_INDEX; i++)
+ if (cmd->offset == (u32)GL_HIBA(i))
+ return 0;
+
+ /* All other register offsets are not valid */
+ return IXGBE_ERR_OUT_OF_RANGE;
+}
+
+/**
+ * ixgbe_nvm_access_read - Handle an NVM read request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command to process
+ * @data: storage for the register value read
+ *
+ * Process an NVM access request to read a register.
+ *
+ * Return: 0 if the register read is valid and successful,
+ * out of range error code otherwise.
+ */
+static s32 ixgbe_nvm_access_read(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ s32 status;
+
+ /* Always initialize the output data, even on failure */
+ memset(&data->regval, 0, cmd->data_size);
+
+ /* Make sure this is a valid read/write access request */
+ status = ixgbe_validate_nvm_rw_reg(cmd);
+ if (status)
+ return status;
+
+ DEBUGOUT1("NVM access: reading register %08x\n", cmd->offset);
+
+ /* Read the register and store the contents in the data field */
+ data->regval = IXGBE_READ_REG(hw, cmd->offset);
+
+ return 0;
+}
+
+/**
+ * ixgbe_nvm_access_write - Handle an NVM write request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command to process
+ * @data: NVM access data to write
+ *
+ * Process an NVM access request to write a register.
+ *
+ * Return: 0 if the register write is valid and successful,
+ * out of range error code otherwise.
+ */
+static s32 ixgbe_nvm_access_write(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ s32 status;
+
+ /* Make sure this is a valid read/write access request */
+ status = ixgbe_validate_nvm_rw_reg(cmd);
+ if (status)
+ return status;
+
+ /* Reject requests to write to read-only registers */
+ switch (cmd->offset) {
+ case GL_HICR_EN:
+ return IXGBE_ERR_OUT_OF_RANGE;
+ default:
+ break;
+ }
+
+ DEBUGOUT2("NVM access: writing register %08x with value %08x\n",
+ cmd->offset, data->regval);
+
+ /* Write the data field to the specified register */
+ IXGBE_WRITE_REG(hw, cmd->offset, data->regval);
+
+ return 0;
+}
+
+/**
+ * ixgbe_handle_nvm_access - Handle an NVM access request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command info
+ * @data: pointer to read or return data
+ *
+ * Process an NVM access request. Read the command structure information and
+ * determine if it is valid. If not, report an error indicating the command
+ * was invalid.
+ *
+ * For valid commands, perform the necessary function, copying the data into
+ * the provided data buffer.
+ *
+ * Return: 0 if the nvm access request is valid and successful,
+ * error code otherwise.
+ */
+s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ switch (cmd->command) {
+ case IXGBE_NVM_CMD_READ:
+ return ixgbe_nvm_access_read(hw, cmd, data);
+ case IXGBE_NVM_CMD_WRITE:
+ return ixgbe_nvm_access_write(hw, cmd, data);
+ default:
+ return IXGBE_ERR_PARAM;
+ }
+}
+
+/**
+ * ixgbe_aci_set_health_status_config - Configure FW health events
+ * @hw: pointer to the HW struct
+ * @event_source: type of diagnostic events to enable
+ *
+ * Configure the health status event types that the firmware will send to this
+ * PF using ACI command (0xFF20). The supported event types are: PF-specific,
+ * all PFs, and global.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source)
+{
+ struct ixgbe_aci_cmd_set_health_status_config *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_health_status_config;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_set_health_status_config);
+
+ cmd->event_source = event_source;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_init_ops_E610 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for E610.
+ * Does not touch the hardware.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ ret_val = ixgbe_init_ops_X550(hw);
+
+ /* MAC */
+ mac->ops.reset_hw = ixgbe_reset_hw_E610;
+ mac->ops.start_hw = ixgbe_start_hw_E610;
+ mac->ops.get_media_type = ixgbe_get_media_type_E610;
+ mac->ops.get_supported_physical_layer =
+ ixgbe_get_supported_physical_layer_E610;
+ mac->ops.get_san_mac_addr = NULL;
+ mac->ops.set_san_mac_addr = NULL;
+ mac->ops.get_wwn_prefix = NULL;
+ mac->ops.setup_link = ixgbe_setup_link_E610;
+ mac->ops.check_link = ixgbe_check_link_E610;
+ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_E610;
+ mac->ops.setup_fc = ixgbe_setup_fc_E610;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_E610;
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_E610;
+ mac->ops.disable_rx = ixgbe_disable_rx_E610;
+ mac->ops.setup_eee = ixgbe_setup_eee_E610;
+ mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_E610;
+ mac->ops.fw_rollback_mode = ixgbe_fw_rollback_mode_E610;
+ mac->ops.get_fw_tsam_mode = ixgbe_get_fw_tsam_mode_E610;
+ mac->ops.get_fw_version = ixgbe_aci_get_fw_ver;
+ mac->ops.get_nvm_version = ixgbe_get_active_nvm_ver;
+ mac->ops.get_thermal_sensor_data = NULL;
+ mac->ops.init_thermal_sensor_thresh = NULL;
+
+ /* PHY */
+ phy->ops.init = ixgbe_init_phy_ops_E610;
+ phy->ops.identify = ixgbe_identify_phy_E610;
+ phy->eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ phy->eee_speeds_advertised = phy->eee_speeds_supported;
+
+ /* Additional ops overrides for e610 to go here */
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_E610;
+ eeprom->ops.read = ixgbe_read_ee_aci_E610;
+ eeprom->ops.read_buffer = ixgbe_read_ee_aci_buffer_E610;
+ eeprom->ops.write = ixgbe_write_ee_aci_E610;
+ eeprom->ops.write_buffer = ixgbe_write_ee_aci_buffer_E610;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_E610;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_E610;
+ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_E610;
+ eeprom->ops.read_pba_string = ixgbe_read_pba_string_E610;
+
+ /* Initialize bus function number */
+ hw->mac.ops.set_lan_id(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_reset_hw_E610 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and perform a reset.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_reset_hw_E610");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+ status = hw->phy.ops.init(hw);
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
+ status);
+mac_reset_top:
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Reset polling failed to complete.\n");
+ }
+ msec_delay(100);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+reset_hw_out:
+ return status;
+}
+
+/**
+ * ixgbe_start_hw_E610 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Gets firmware version and if API version matches it
+ * starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ ret_val = hw->mac.ops.get_fw_version(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ ixgbe_start_hw_gen2(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_media_type_E610 - Gets media type
+ * @hw: pointer to the HW struct
+ *
+ * In order to get the media type, the function gets PHY
+ * capabilities and later on use them to identify the PHY type
+ * checking phy_type_high and phy_type_low.
+ *
+ * Return: the type of media in form of ixgbe_media_type enum
+ * or ixgbe_media_type_unknown in case of an error.
+ */
+enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_mask = 0;
+ s32 rc;
+ u8 i;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc) {
+ return ixgbe_media_type_unknown;
+ }
+
+ /* If there is no link but PHY (dongle) is available SW should use
+ * Get PHY Caps admin command instead of Get Link Status, find most
+ * significant bit that is set in PHY types reported by the command
+ * and use it to discover media type.
+ */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
+ /* Get PHY Capabilities */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc) {
+ return ixgbe_media_type_unknown;
+ }
+
+ /* Check if there is some bit set in phy_type_high */
+ for (i = 64; i > 0; i--) {
+ phy_mask = (u64)((u64)1 << (i - 1));
+ if ((pcaps.phy_type_high & phy_mask) != 0) {
+ /* If any bit is set treat it as PHY type */
+ hw->link.link_info.phy_type_high = phy_mask;
+ hw->link.link_info.phy_type_low = 0;
+ break;
+ }
+ phy_mask = 0;
+ }
+
+ /* If nothing found in phy_type_high search in phy_type_low */
+ if (phy_mask == 0) {
+ for (i = 64; i > 0; i--) {
+ phy_mask = (u64)((u64)1 << (i - 1));
+ if ((pcaps.phy_type_low & phy_mask) != 0) {
+ /* If any bit is set treat it as PHY type */
+ hw->link.link_info.phy_type_high = 0;
+ hw->link.link_info.phy_type_low = phy_mask;
+ break;
+ }
+ }
+ }
+
+ }
+
+ /* Based on link status or search above try to discover media type */
+ hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
+
+ return hw->phy.media_type;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_E610 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ *
+ * Return: the exit code of the operation.
+ **/
+u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw)
+{
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_type;
+ s32 rc;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return IXGBE_PHYSICAL_LAYER_UNKNOWN;
+
+ phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_low);
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_100BASE_TX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_LR)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_SR)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_SX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_SX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_KX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_KX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_5GBASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_5000BASE_T;
+
+ phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_high);
+ if(phy_type & IXGBE_PHY_TYPE_HIGH_10BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_setup_link_E610 - Set up link
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
+ *
+ * Set up the link with the specified speed.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ /* Simply request FW to perform proper PHY setup */
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_check_link_E610 - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Determine if the link is up and the current link speed
+ * using ACI command (0x0607).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ s32 rc;
+ u32 i;
+
+ if (!speed || !link_up)
+ return IXGBE_ERR_PARAM;
+
+ /* Set get_link_info flag to ensure that fresh
+ * link information will be obtained from FW
+ * by sending Get Link Status admin command. */
+ hw->link.get_link_info = true;
+
+ /* Update link information in adapter context. */
+ rc = ixgbe_get_link_status(hw, link_up);
+ if (rc)
+ return rc;
+
+ /* Wait for link up if it was requested. */
+ if (link_up_wait_to_complete && *link_up == false) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ msec_delay(100);
+ hw->link.get_link_info = true;
+ rc = ixgbe_get_link_status(hw, link_up);
+ if (rc)
+ return rc;
+ if (*link_up)
+ break;
+ }
+ }
+
+ /* Use link information in adapter context updated by the call
+ * to ixgbe_get_link_status() to determine current link speed.
+ * Link speed information is valid only when link up was
+ * reported by FW. */
+ if (*link_up) {
+ switch (hw->link.link_info.link_speed) {
+ case IXGBE_ACI_LINK_SPEED_10MB:
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_100MB:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_1000MB:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_2500MB:
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_5GB:
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_10GB:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ } else {
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_link_capabilities_E610 - Determine link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determine speed and AN parameters of a link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ if (!speed || !autoneg)
+ return IXGBE_ERR_PARAM;
+
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
+ * @hw: pointer to hardware structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
+ *
+ * Configures PHY Flow Control according to the provided configuration.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data* pcaps = NULL;
+ s32 status = IXGBE_SUCCESS;
+ u8 pause_mask = 0x0;
+
+ if (!cfg)
+ return IXGBE_ERR_PARAM;
+
+ switch (req_mode) {
+ case ixgbe_fc_auto:
+ {
+ pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
+ ixgbe_malloc(hw, sizeof(*pcaps));
+ if (!pcaps) {
+ status = IXGBE_ERR_OUT_OF_MEM;
+ goto out;
+ }
+
+ /* Query the value of FC that both the NIC and the attached
+ * media can do. */
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, pcaps);
+ if (status)
+ goto out;
+
+ pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+
+ break;
+ }
+ case ixgbe_fc_full:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ /* clear the old pause settings */
+ cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
+ IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
+
+ /* set the new capabilities */
+ cfg->caps |= pause_mask;
+
+out:
+ if (pcaps)
+ ixgbe_free(hw, pcaps);
+ return status;
+}
+
+/**
+ * ixgbe_setup_fc_E610 - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Set up flow control. This has to be done during init time.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data cfg = { 0 };
+ s32 status;
+
+ /* Get the current PHY config */
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
+ if (status)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
+
+ /* Configure the set PHY data */
+ status = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
+ if (status)
+ return status;
+
+ /* If the capabilities have changed, then set the new config */
+ if (cfg.caps != pcaps.caps) {
+ cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &cfg);
+ if (status)
+ return status;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_fc_autoneg_E610 - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configure Flow Control.
+ */
+void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ /* Get current link status.
+ * Current FC mode will be stored in the hw context. */
+ status = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (status) {
+ goto out;
+ }
+
+ /* Check if the link is up */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)) {
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ /* Check if auto-negotiation has completed */
+ if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED)) {
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+out:
+ if (status == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_E610 - Send driver version to FW
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @minor: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
+ *
+ * Send driver version number to Firmware using ACI command (0x0002).
+ *
+ * Return: the exit code of the operation.
+ * IXGBE_SUCCESS - OK
+ * IXGBE_ERR_PARAM - incorrect parameters were given
+ * IXGBE_ERR_ACI_ERROR - encountered an error during sending the command
+ * IXGBE_ERR_ACI_TIMEOUT - a timeout occurred
+ * IXGBE_ERR_OUT_OF_MEM - ran out of memory
+ */
+s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 minor, u8 build,
+ u8 sub, u16 len, const char *driver_ver)
+{
+ size_t limited_len = min(len, (u16)IXGBE_DRV_VER_STR_LEN_E610);
+ struct ixgbe_driver_ver dv;
+
+ DEBUGFUNC("ixgbe_set_fw_drv_ver_E610");
+
+ if (!len || !driver_ver)
+ return IXGBE_ERR_PARAM;
+
+ dv.major_ver = maj;
+ dv.minor_ver = minor;
+ dv.build_ver = build;
+ dv.subbuild_ver = sub;
+
+ memset(dv.driver_string, 0, IXGBE_DRV_VER_STR_LEN_E610);
+ memcpy(dv.driver_string, driver_ver, limited_len);
+
+ return ixgbe_aci_send_driver_ver(hw, &dv);
+}
+
+/**
+ * ixgbe_disable_rx_E610 - Disable RX unit
+ * @hw: pointer to hardware structure
+ *
+ * Disable RX DMA unit on E610 with use of ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+void ixgbe_disable_rx_E610(struct ixgbe_hw *hw)
+{
+ u32 rxctrl;
+
+ DEBUGFUNC("ixgbe_disable_rx_E610");
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ u32 pfdtxgswc;
+ s32 status;
+
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ status = ixgbe_aci_disable_rxen(hw);
+
+ /* If we fail - disable RX using register write */
+ if (status) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+ }
+}
+
+/**
+ * ixgbe_setup_eee_E610 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enables/disable EEE based on enable_eee flag.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ u16 eee_cap = 0;
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ if (enable_eee) {
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_100BASE_TX;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_T;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_KX;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_T;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_KR;
+ if (phy_caps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10BASE_T;
+ }
+
+ /* Set EEE capability for particular PHY types */
+ phy_cfg.eee_cap = IXGBE_CPU_TO_LE16(eee_cap);
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_fw_recovery_mode_E610 - Check FW NVM recovery mode
+ * @hw: pointer to hardware structure
+ *
+ * Checks FW NVM recovery mode by
+ * reading the value of the dedicated register.
+ *
+ * Return: true if FW is in recovery mode, otherwise false.
+ */
+bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
+
+ return !!(fwsm & GL_MNG_FWSM_FW_MODES_RECOVERY_M);
+}
+
+/**
+ * ixgbe_fw_rollback_mode_E610 - Check FW NVM Rollback
+ * @hw: pointer to hardware structure
+ *
+ * Checks FW NVM Rollback mode by reading the
+ * value of the dedicated register.
+ *
+ * Return: true if FW is in Rollback mode, otherwise false.
+ */
+bool ixgbe_fw_rollback_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
+
+ return !!(fwsm & GL_MNG_FWSM_FW_MODES_ROLLBACK_M);
+}
+
+/**
+ * ixgbe_get_fw_tsam_mode_E610 - Check FW NVM Thermal Sensor Autonomous Mode
+ * @hw: pointer to hardware structure
+ *
+ * Checks Thermal Sensor Autonomous Mode by reading the
+ * value of the dedicated register.
+ *
+ * Return: true if FW is in TSAM, otherwise false.
+ */
+bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_X550EM_a);
+
+ return !!(fwsm & IXGBE_FWSM_TS_ENABLED);
+}
+
+/**
+ * ixgbe_init_phy_ops_E610 - PHY specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY type was not known.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ phy->ops.identify_sfp = ixgbe_identify_module_E610;
+ phy->ops.read_reg = NULL; /* PHY reg access is not required */
+ phy->ops.write_reg = NULL;
+ phy->ops.read_reg_mdi = NULL;
+ phy->ops.write_reg_mdi = NULL;
+ phy->ops.setup_link = ixgbe_setup_phy_link_E610;
+ phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_E610;
+ phy->ops.read_i2c_byte = NULL; /* disabled for E610 */
+ phy->ops.write_i2c_byte = NULL; /* disabled for E610 */
+ phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_E610;
+ phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_E610;
+ phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_E610;
+ phy->ops.i2c_bus_clear = NULL; /* do not use generic implementation */
+ phy->ops.check_overtemp = ixgbe_check_overtemp_E610;
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
+ phy->ops.set_phy_power = ixgbe_set_phy_power_E610;
+ else
+ phy->ops.set_phy_power = NULL;
+ phy->ops.enter_lplu = ixgbe_enter_lplu_E610;
+ phy->ops.handle_lasi = NULL; /* no implementation for E610 */
+ phy->ops.read_i2c_byte_unlocked = NULL; /* disabled for E610 */
+ phy->ops.write_i2c_byte_unlocked = NULL; /* disabled for E610 */
+
+ /* TODO: Set functions pointers based on device ID */
+
+ /* Identify the PHY */
+ ret_val = phy->ops.identify(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* TODO: Set functions pointers based on PHY type */
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_identify_phy_E610 - Identify PHY
+ * @hw: pointer to hardware structure
+ *
+ * Determine PHY type, supported speeds and PHY ID.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ s32 rc;
+
+ /* Set PHY type */
+ hw->phy.type = ixgbe_phy_fw;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return rc;
+
+ if (!(pcaps.module_compliance_enforcement &
+ IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
+ /* Handle lenient mode */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
+ &pcaps);
+ if (rc)
+ return rc;
+ }
+
+ /* Determine supported speeds */
+ hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
+
+ if (pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* 2.5 and 5 Gbps link speeds must be excluded from the
+ * auto-negotiation set used during driver initialization due to
+ * compatibility issues with certain switches. Those issues do not
+ * exist in case of E610 2.5G SKU device (0x57b1).
+ */
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+
+ /* Set PHY ID */
+ memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_module_E610 - Identify SFP module type
+ * @hw: pointer to hardware structure
+ *
+ * Identify the SFP module type.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw)
+{
+ bool media_available;
+ u8 module_type;
+ s32 rc;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc)
+ goto err;
+
+ media_available =
+ (hw->link.link_info.link_info &
+ IXGBE_ACI_MEDIA_AVAILABLE) ? true : false;
+
+ if (media_available) {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+
+ /* Get module type from hw context updated by ixgbe_update_link_info() */
+ module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
+
+ if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ }
+ rc = IXGBE_SUCCESS;
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ rc = IXGBE_ERR_SFP_NOT_PRESENT;
+ }
+err:
+ return rc;
+}
+
+/**
+ * ixgbe_setup_phy_link_E610 - Sets up firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Set the parameters for the firmware-controlled PHYs.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
+ u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
+ u64 sup_phy_type_low, sup_phy_type_high;
+ s32 rc;
+
+ rc = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (rc) {
+ goto err;
+ }
+
+ /* If media is not available get default config */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ rmode = IXGBE_ACI_REPORT_DFLT_CFG;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
+ if (rc) {
+ goto err;
+ }
+
+ sup_phy_type_low = pcaps.phy_type_low;
+ sup_phy_type_high = pcaps.phy_type_high;
+
+ /* Get Active configuration to avoid unintended changes */
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (rc) {
+ goto err;
+ }
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
+
+ /* Set default PHY types for a given speed */
+ pcfg.phy_type_low = 0;
+ pcfg.phy_type_high = 0;
+
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
+ }
+
+ /* Mask the set values to avoid requesting unsupported link types */
+ pcfg.phy_type_low &= sup_phy_type_low;
+ pcfg.phy_type_high &= sup_phy_type_high;
+
+ if (pcfg.phy_type_high != pcaps.phy_type_high ||
+ pcfg.phy_type_low != pcaps.phy_type_low ||
+ pcfg.caps != pcaps.caps) {
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ rc = ixgbe_aci_set_phy_cfg(hw, &pcfg);
+ }
+
+err:
+ return rc;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_E610 - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ *
+ * Determines PHY FW version based on response to Get PHY Capabilities
+ * admin command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ s32 status;
+
+ if (!firmware_version)
+ return IXGBE_ERR_PARAM;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (status)
+ return status;
+
+ /* TODO: determine which bytes of the 8-byte phy_fw_ver
+ * field should be written to the 2-byte firmware_version
+ * output argument. */
+ memcpy(firmware_version, pcaps.phy_fw_ver, sizeof(u16));
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_E610 - Reads 8 bit word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @sff8472_data: value read
+ *
+ * Performs byte read operation from SFP module's SFF-8472 data over I2C.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR2,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ sff8472_data, 1, false);
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_E610 - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation from SFP module's EEPROM over I2C interface.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ eeprom_data, 1, false);
+}
+
+/**
+ * ixgbe_write_i2c_eeprom_E610 - Writes 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to write
+ * @eeprom_data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ &eeprom_data, 1, true);
+}
+
+/**
+ * ixgbe_check_overtemp_E610 - Check firmware-controlled PHYs for overtemp
+ * @hw: pointer to hardware structure
+ *
+ * Get the link status and check if the PHY temperature alarm detected.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_aci_desc desc;
+ s32 status = IXGBE_SUCCESS;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_LSE_NOP);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_data.ext_info & IXGBE_ACI_LINK_PHY_TEMP_ALARM) {
+ ERROR_REPORT1(IXGBE_ERROR_CAUTION,
+ "PHY Temperature Alarm detected");
+ status = IXGBE_ERR_OVERTEMP;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_set_phy_power_E610 - Control power for copper PHY
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ *
+ * Set the power on/off of the PHY
+ * by getting its capabilities and setting the appropriate
+ * configuration parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ if (on) {
+ phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
+ } else {
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
+ }
+
+ /* PHY is already in requested power mode */
+ if (phy_caps.caps == phy_cfg.caps)
+ return IXGBE_SUCCESS;
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_enter_lplu_E610 - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_init_eeprom_params_E610 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 gens_stat;
+ u8 sr_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->type = ixgbe_flash;
+
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >>
+ GLNVM_GENS_SR_SIZE_S;
+
+ /* Switching to words (sr_size contains power of 2) */
+ eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_ee_aci_E610 - Read EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_word_aci(hw, offset, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_ee_aci_buffer_E610- Read EEPROM word(s) using admin commands.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_aci_E610 - Write EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with writing.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_write_sr_word_aci(hw, (u32)offset, &data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_aci_buffer_E610 - Write EEPROM word(s) using admin commands.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with writing.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_write_sr_buf_aci(hw, (u32)offset, words, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_E610 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * Calculate SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ * If the EEPROM params are not initialized, the function
+ * initializes them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the negative error code on error, or the 16-bit checksum
+ */
+s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw)
+{
+ bool nvm_acquired = false;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 checksum = 0;
+ u16 vpd_module;
+ void *vmem;
+ s32 status;
+ u16 *data;
+ u16 i;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ vmem = ixgbe_calloc(hw, IXGBE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16));
+ if (!vmem)
+ return IXGBE_ERR_OUT_OF_MEM;
+ data = (u16 *)vmem;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+ nvm_acquired = true;
+
+ /* read pointer to VPD area */
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_VPD_PTR, &vpd_module);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+
+ /* read pointer to PCIe Alt Auto-load module */
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->eeprom.word_size; i++) {
+ /* Read SR page */
+ if ((i % IXGBE_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+ u16 words = IXGBE_SR_SECTOR_SIZE_IN_WORDS;
+
+ status = ixgbe_read_sr_buf_aci(hw, i, &words, data);
+ if (status != IXGBE_SUCCESS)
+ goto ixgbe_calc_sr_checksum_exit;
+ }
+
+ /* Skip Checksum word */
+ if (i == E610_SR_SW_CHECKSUM_WORD)
+ continue;
+ /* Skip VPD module (convert byte size to word count) */
+ if (i >= (u32)vpd_module &&
+ i < ((u32)vpd_module + E610_SR_VPD_SIZE_WORDS))
+ continue;
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if (i >= (u32)pcie_alt_module &&
+ i < ((u32)pcie_alt_module + E610_SR_PCIE_ALT_SIZE_WORDS))
+ continue;
+
+ checksum_local += data[i % IXGBE_SR_SECTOR_SIZE_IN_WORDS];
+ }
+
+ checksum = (u16)IXGBE_SR_SW_CHECKSUM_BASE - checksum_local;
+
+ixgbe_calc_sr_checksum_exit:
+ if(nvm_acquired)
+ ixgbe_release_nvm(hw);
+ ixgbe_free(hw, vmem);
+
+ if(!status)
+ return (s32)checksum;
+ else
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_E610 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to Shadow RAM, software sends the admin command
+ * to recalculate and update EEPROM checksum and instructs the hardware
+ * to update the flash.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_nvm_recalculate_checksum(hw);
+ if (status)
+ return status;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_nvm_write_activate(hw, IXGBE_ACI_NVM_ACTIV_REQ_EMPR,
+ NULL);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_E610 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ u32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_nvm_validate_checksum(hw);
+
+ if (status)
+ return status;
+
+ if (checksum_val) {
+ u16 tmp_checksum;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
+ &tmp_checksum);
+ ixgbe_release_nvm(hw);
+
+ if (!status)
+ *checksum_val = tmp_checksum;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Finds the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
+ u16 *module_tlv_len, u16 module_type)
+{
+ u16 pfa_len, pfa_ptr, pfa_end_ptr;
+ u16 next_tlv;
+ s32 status;
+
+ status = ixgbe_read_ee_aci_E610(hw, E610_SR_PFA_PTR, &pfa_ptr);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+ status = ixgbe_read_ee_aci_E610(hw, pfa_ptr, &pfa_len);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ pfa_end_ptr = pfa_ptr + pfa_len;
+ while (next_tlv < pfa_end_ptr) {
+ u16 tlv_sub_module_type, tlv_len;
+
+ /* Read TLV type */
+ status = ixgbe_read_ee_aci_E610(hw, next_tlv,
+ &tlv_sub_module_type);
+ if (status != IXGBE_SUCCESS) {
+ break;
+ }
+ /* Read TLV length */
+ status = ixgbe_read_ee_aci_E610(hw, next_tlv + 1, &tlv_len);
+ if (status != IXGBE_SUCCESS) {
+ break;
+ }
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return IXGBE_SUCCESS;
+ }
+ return IXGBE_ERR_INVAL_SIZE;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length)
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return IXGBE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ixgbe_read_pba_string_E610 - Reads part number string from NVM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the NVM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the NVM.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ u16 pba_tlv, pba_tlv_len;
+ u16 pba_word, pba_size;
+ s32 status;
+ u16 i;
+
+ status = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
+ E610_SR_PBA_BLOCK_PTR);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ /* pba_size is the next word */
+ status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2), &pba_size);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ if (pba_tlv_len < pba_size) {
+ return IXGBE_ERR_INVAL_SIZE;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
+ */
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2 + 1) + i,
+ &pba_word);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
+
+ return status;
+}
diff --git a/sys/dev/ixgbe/ixgbe_e610.h b/sys/dev/ixgbe/ixgbe_e610.h
new file mode 100644
index 000000000000..94e600139499
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_e610.h
@@ -0,0 +1,224 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#ifndef _IXGBE_E610_H_
+#define _IXGBE_E610_H_
+
+#include "ixgbe_type.h"
+
+void ixgbe_init_aci(struct ixgbe_hw *hw);
+void ixgbe_shutdown_aci(struct ixgbe_hw *hw);
+s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size);
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw);
+s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending);
+
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode);
+
+s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw);
+s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv);
+s32 ixgbe_aci_set_pf_context(struct ixgbe_hw *hw, u8 pf_id);
+
+s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout);
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res);
+s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc);
+s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps);
+s32 ixgbe_discover_func_caps(struct ixgbe_hw* hw,
+ struct ixgbe_hw_func_caps* func_caps);
+s32 ixgbe_get_caps(struct ixgbe_hw *hw);
+s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw);
+s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps);
+bool ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link);
+s32 ixgbe_update_link_info(struct ixgbe_hw *hw);
+s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up);
+s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link);
+s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
+s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
+
+s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
+s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
+ u8 node_part_number, u16 *node_handle);
+s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data);
+s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data);
+
+s32 ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode);
+s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool value);
+s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value);
+s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
+ u8 length, bool write);
+s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params);
+s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params,
+ u32 start_address, u8 *data, u8 data_size);
+
+s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access);
+void ixgbe_release_nvm(struct ixgbe_hw *hw);
+
+s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram);
+
+s32 ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid);
+s32 ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags);
+
+s32 ixgbe_aci_read_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ u16 field_id, void *data, u16 buf_size,
+ u16 *elem_count);
+s32 ixgbe_aci_write_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ void *data, u16 buf_size, u16 elem_count);
+
+s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw);
+s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw);
+
+s32 ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags);
+
+s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw, struct ixgbe_minsrev_info *minsrevs);
+s32 ixgbe_update_nvm_minsrevs(struct ixgbe_hw *hw, struct ixgbe_minsrev_info *minsrevs);
+
+s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+
+s32 ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw, struct ixgbe_netlist_info *netlist);
+s32 ixgbe_init_nvm(struct ixgbe_hw *hw);
+
+s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw);
+s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values);
+
+s32 ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words, u16 *data);
+s32 ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram);
+
+s32 ixgbe_write_sr_word_aci(struct ixgbe_hw *hw, u32 offset, const u16 *data);
+s32 ixgbe_write_sr_buf_aci(struct ixgbe_hw *hw, u32 offset, u16 words, const u16 *data);
+
+s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 reg_val0, u32 reg_addr1, u32 reg_val1);
+s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 *reg_val0, u32 reg_addr1, u32 *reg_val1);
+s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
+ bool *reset_needed);
+s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw);
+
+s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
+ u16 table_id, u32 start, void *buf,
+ u16 buf_size, u16 *ret_buf_size,
+ u16 *ret_next_cluster, u16 *ret_next_table,
+ u32 *ret_next_index);
+
+s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data);
+
+s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source);
+
+/* E610 operations */
+s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait);
+s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode);
+s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+ u8 sub, u16 len, const char *driver_ver);
+void ixgbe_disable_rx_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee);
+bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw);
+bool ixgbe_fw_rollback_mode_E610(struct ixgbe_hw *hw);
+bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw);
+s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
+s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data);
+s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw);
+s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on);
+s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw);
+s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw);
+s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_write_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw);
+s32 ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
+
+#endif /* _IXGBE_E610_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_features.h b/sys/dev/ixgbe/ixgbe_features.h
index ed35a6ed458c..bee9040319d8 100644
--- a/sys/dev/ixgbe/ixgbe_features.h
+++ b/sys/dev/ixgbe/ixgbe_features.h
@@ -56,6 +56,8 @@
#define IXGBE_FEATURE_EEE (u32)(1 << 11)
#define IXGBE_FEATURE_LEGACY_IRQ (u32)(1 << 12)
#define IXGBE_FEATURE_NEEDS_CTXD (u32)(1 << 13)
+#define IXGBE_FEATURE_RECOVERY_MODE (u32)(1 << 15)
+#define IXGBE_FEATURE_DBG_DUMP (u32)(1 << 16)
/* Check for OS support. Undefine features if not included in the OS */
#ifndef PCI_IOV
diff --git a/sys/dev/ixgbe/ixgbe_mbx.c b/sys/dev/ixgbe/ixgbe_mbx.c
index d12aadea7097..7f58a9202c9e 100644
--- a/sys/dev/ixgbe/ixgbe_mbx.c
+++ b/sys/dev/ixgbe/ixgbe_mbx.c
@@ -35,6 +35,9 @@
#include "ixgbe_type.h"
#include "ixgbe_mbx.h"
+static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id);
+static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id);
+
/**
* ixgbe_read_mbx - Reads a message from the mailbox
* @hw: pointer to the HW structure
@@ -47,42 +50,94 @@
s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_read_mbx");
/* limit read to size of mailbox */
- if (size > mbx->size)
+ if (size > mbx->size) {
+ ERROR_REPORT3(IXGBE_ERROR_ARGUMENT,
+ "Invalid mailbox message size %u, changing to %u",
+ size, mbx->size);
size = mbx->size;
+ }
+
+ if (mbx->ops[mbx_id].read)
+ return mbx->ops[mbx_id].read(hw, msg, size, mbx_id);
+
+ return IXGBE_ERR_CONFIG;
+}
+
+/**
+ * ixgbe_poll_mbx - Wait for message and read it from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+s32 ixgbe_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val;
+
+ DEBUGFUNC("ixgbe_poll_mbx");
- if (mbx->ops.read)
- ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+ if (!mbx->ops[mbx_id].read || !mbx->ops[mbx_id].check_for_msg ||
+ !mbx->timeout)
+ return IXGBE_ERR_CONFIG;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size) {
+ ERROR_REPORT3(IXGBE_ERROR_ARGUMENT,
+ "Invalid mailbox message size %u, changing to %u",
+ size, mbx->size);
+ size = mbx->size;
+ }
+
+ ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ return mbx->ops[mbx_id].read(hw, msg, size, mbx_id);
return ret_val;
}
/**
- * ixgbe_write_mbx - Write a message to the mailbox
+ * ixgbe_write_mbx - Write a message to the mailbox and wait for ACK
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
- * returns SUCCESS if it successfully copied message into the buffer
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ACK to that message within specified period
+ *
+ * Note that the caller to this function must lock before calling, since
+ * multiple threads can destroy each other messages.
**/
s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_SUCCESS;
+ s32 ret_val = IXGBE_ERR_MBX;
DEBUGFUNC("ixgbe_write_mbx");
+ /*
+ * exit if either we can't write, release
+ * or there is no timeout defined
+ */
+ if (!mbx->ops[mbx_id].write || !mbx->ops[mbx_id].check_for_ack ||
+ !mbx->ops[mbx_id].release || !mbx->timeout)
+ return IXGBE_ERR_CONFIG;
+
if (size > mbx->size) {
- ret_val = IXGBE_ERR_MBX;
+ ret_val = IXGBE_ERR_PARAM;
ERROR_REPORT2(IXGBE_ERROR_ARGUMENT,
- "Invalid mailbox message size %d", size);
- } else if (mbx->ops.write)
- ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+ "Invalid mailbox message size %u", size);
+ } else {
+ ret_val = mbx->ops[mbx_id].write(hw, msg, size, mbx_id);
+ }
return ret_val;
}
@@ -97,12 +152,12 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_CONFIG;
DEBUGFUNC("ixgbe_check_for_msg");
- if (mbx->ops.check_for_msg)
- ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+ if (mbx->ops[mbx_id].check_for_msg)
+ ret_val = mbx->ops[mbx_id].check_for_msg(hw, mbx_id);
return ret_val;
}
@@ -117,12 +172,12 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_CONFIG;
DEBUGFUNC("ixgbe_check_for_ack");
- if (mbx->ops.check_for_ack)
- ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+ if (mbx->ops[mbx_id].check_for_ack)
+ ret_val = mbx->ops[mbx_id].check_for_ack(hw, mbx_id);
return ret_val;
}
@@ -137,12 +192,32 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_CONFIG;
DEBUGFUNC("ixgbe_check_for_rst");
- if (mbx->ops.check_for_rst)
- ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+ if (mbx->ops[mbx_id].check_for_rst)
+ ret_val = mbx->ops[mbx_id].check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_clear_mbx - Clear Mailbox Memory
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * Set VFMBMEM of given VF to 0x0.
+ **/
+s32 ixgbe_clear_mbx(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_CONFIG;
+
+ DEBUGFUNC("ixgbe_clear_mbx");
+
+ if (mbx->ops[mbx_id].clear)
+ ret_val = mbx->ops[mbx_id].clear(hw, mbx_id);
return ret_val;
}
@@ -161,22 +236,23 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
DEBUGFUNC("ixgbe_poll_for_msg");
- if (!countdown || !mbx->ops.check_for_msg)
- goto out;
+ if (!countdown || !mbx->ops[mbx_id].check_for_msg)
+ return IXGBE_ERR_CONFIG;
- while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ while (countdown && mbx->ops[mbx_id].check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
usec_delay(mbx->usec_delay);
}
- if (countdown == 0)
+ if (countdown == 0) {
ERROR_REPORT2(IXGBE_ERROR_POLLING,
- "Polling for VF%d mailbox message timedout", mbx_id);
+ "Polling for VF%u mailbox message timedout", mbx_id);
+ return IXGBE_ERR_TIMEOUT;
+ }
-out:
- return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+ return IXGBE_SUCCESS;
}
/**
@@ -193,115 +269,71 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
DEBUGFUNC("ixgbe_poll_for_ack");
- if (!countdown || !mbx->ops.check_for_ack)
- goto out;
+ if (!countdown || !mbx->ops[mbx_id].check_for_ack)
+ return IXGBE_ERR_CONFIG;
- while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ while (countdown && mbx->ops[mbx_id].check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
usec_delay(mbx->usec_delay);
}
- if (countdown == 0)
+ if (countdown == 0) {
ERROR_REPORT2(IXGBE_ERROR_POLLING,
- "Polling for VF%d mailbox ack timedout", mbx_id);
+ "Polling for VF%u mailbox ack timedout", mbx_id);
+ return IXGBE_ERR_TIMEOUT;
+ }
-out:
- return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX;
+ return IXGBE_SUCCESS;
}
/**
- * ixgbe_read_posted_mbx - Wait for message notification and receive message
+ * ixgbe_read_mailbox_vf - read VF's mailbox register
* @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- * @mbx_id: id of mailbox to write
*
- * returns SUCCESS if it successfully received a message notification and
- * copied it into the receive buffer.
+ * This function is used to read the mailbox register dedicated for VF without
+ * losing the read to clear status bits.
**/
-s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+static u32 ixgbe_read_mailbox_vf(struct ixgbe_hw *hw)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
-
- DEBUGFUNC("ixgbe_read_posted_mbx");
+ u32 vf_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
- if (!mbx->ops.read)
- goto out;
-
- ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+ vf_mailbox |= hw->mbx.vf_mailbox;
+ hw->mbx.vf_mailbox |= vf_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
- /* if ack received read message, otherwise we timed out */
- if (!ret_val)
- ret_val = mbx->ops.read(hw, msg, size, mbx_id);
-out:
- return ret_val;
+ return vf_mailbox;
}
-/**
- * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
- * @hw: pointer to the HW structure
- * @msg: The message buffer
- * @size: Length of buffer
- * @mbx_id: id of mailbox to write
- *
- * returns SUCCESS if it successfully copied message into the buffer and
- * received an ack to that message within delay * timeout period
- **/
-s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+static void ixgbe_clear_msg_vf(struct ixgbe_hw *hw)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = IXGBE_ERR_MBX;
-
- DEBUGFUNC("ixgbe_write_posted_mbx");
-
- /* exit if either we can't write or there isn't a defined timeout */
- if (!mbx->ops.write || !mbx->timeout)
- goto out;
+ u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
- /* send msg */
- ret_val = mbx->ops.write(hw, msg, size, mbx_id);
-
- /* if msg sent wait until we receive an ack */
- if (!ret_val)
- ret_val = ixgbe_poll_for_ack(hw, mbx_id);
-out:
- return ret_val;
+ if (vf_mailbox & IXGBE_VFMAILBOX_PFSTS) {
+ hw->mbx.stats.reqs++;
+ hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFSTS;
+ }
}
-/**
- * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
- * @hw: pointer to the HW structure
- *
- * Setups up the mailbox read and write message function pointers
- **/
-void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+static void ixgbe_clear_ack_vf(struct ixgbe_hw *hw)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
- mbx->ops.read_posted = ixgbe_read_posted_mbx;
- mbx->ops.write_posted = ixgbe_write_posted_mbx;
+ if (vf_mailbox & IXGBE_VFMAILBOX_PFACK) {
+ hw->mbx.stats.acks++;
+ hw->mbx.vf_mailbox &= ~IXGBE_VFMAILBOX_PFACK;
+ }
}
-/**
- * ixgbe_read_v2p_mailbox - read v2p mailbox
- * @hw: pointer to the HW structure
- *
- * This function is used to read the v2p mailbox without losing the read to
- * clear status bits.
- **/
-static u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
+static void ixgbe_clear_rst_vf(struct ixgbe_hw *hw)
{
- u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+ u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
- v2p_mailbox |= hw->mbx.v2p_mailbox;
- hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
-
- return v2p_mailbox;
+ if (vf_mailbox & (IXGBE_VFMAILBOX_RSTI | IXGBE_VFMAILBOX_RSTD)) {
+ hw->mbx.stats.rsts++;
+ hw->mbx.vf_mailbox &= ~(IXGBE_VFMAILBOX_RSTI |
+ IXGBE_VFMAILBOX_RSTD);
+ }
}
/**
@@ -314,15 +346,12 @@ static u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw)
**/
static s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
{
- u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw);
- s32 ret_val = IXGBE_ERR_MBX;
+ u32 vf_mailbox = ixgbe_read_mailbox_vf(hw);
- if (v2p_mailbox & mask)
- ret_val = IXGBE_SUCCESS;
-
- hw->mbx.v2p_mailbox &= ~mask;
+ if (vf_mailbox & mask)
+ return IXGBE_SUCCESS;
- return ret_val;
+ return IXGBE_ERR_MBX;
}
/**
@@ -334,17 +363,13 @@ static s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
**/
static s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
- s32 ret_val = IXGBE_ERR_MBX;
-
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_msg_vf");
- if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
- ret_val = IXGBE_SUCCESS;
- hw->mbx.stats.reqs++;
- }
+ if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS))
+ return IXGBE_SUCCESS;
- return ret_val;
+ return IXGBE_ERR_MBX;
}
/**
@@ -356,17 +381,16 @@ static s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id)
**/
static s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
- s32 ret_val = IXGBE_ERR_MBX;
-
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_ack_vf");
if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
- ret_val = IXGBE_SUCCESS;
- hw->mbx.stats.acks++;
+ /* TODO: should this be autocleared? */
+ ixgbe_clear_ack_vf(hw);
+ return IXGBE_SUCCESS;
}
- return ret_val;
+ return IXGBE_ERR_MBX;
}
/**
@@ -378,18 +402,17 @@ static s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id)
**/
static s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
{
- s32 ret_val = IXGBE_ERR_MBX;
-
UNREFERENCED_1PARAMETER(mbx_id);
DEBUGFUNC("ixgbe_check_for_rst_vf");
- if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
- IXGBE_VFMAILBOX_RSTI))) {
- ret_val = IXGBE_SUCCESS;
- hw->mbx.stats.rsts++;
+ if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_RSTI |
+ IXGBE_VFMAILBOX_RSTD)) {
+ /* TODO: should this be autocleared? */
+ ixgbe_clear_rst_vf(hw);
+ return IXGBE_SUCCESS;
}
- return ret_val;
+ return IXGBE_ERR_MBX;
}
/**
@@ -400,21 +423,115 @@ static s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id)
**/
static s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
s32 ret_val = IXGBE_ERR_MBX;
+ u32 vf_mailbox;
DEBUGFUNC("ixgbe_obtain_mbx_lock_vf");
- /* Take ownership of the buffer */
- IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+ if (!mbx->timeout)
+ return IXGBE_ERR_CONFIG;
- /* reserve mailbox for vf use */
- if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
- ret_val = IXGBE_SUCCESS;
+ while (countdown--) {
+ /* Reserve mailbox for VF use */
+ vf_mailbox = ixgbe_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_VFU;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* Verify that VF is the owner of the lock */
+ if (ixgbe_read_mailbox_vf(hw) & IXGBE_VFMAILBOX_VFU) {
+ ret_val = IXGBE_SUCCESS;
+ break;
+ }
+
+ /* Wait a bit before trying again */
+ usec_delay(mbx->usec_delay);
+ }
+
+ if (ret_val != IXGBE_SUCCESS) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Failed to obtain mailbox lock");
+ ret_val = IXGBE_ERR_TIMEOUT;
+ }
return ret_val;
}
/**
+ * ixgbe_release_mbx_lock_dummy - release mailbox lock
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to read
+ **/
+static void ixgbe_release_mbx_lock_dummy(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ UNREFERENCED_2PARAMETER(hw, mbx_id);
+
+ DEBUGFUNC("ixgbe_release_mbx_lock_dummy");
+}
+
+/**
+ * ixgbe_release_mbx_lock_vf - release mailbox lock
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to read
+ **/
+static void ixgbe_release_mbx_lock_vf(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ u32 vf_mailbox;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+
+ DEBUGFUNC("ixgbe_release_mbx_lock_vf");
+
+ /* Return ownership of the buffer */
+ vf_mailbox = ixgbe_read_mailbox_vf(hw);
+ vf_mailbox &= ~IXGBE_VFMAILBOX_VFU;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+}
+
+/**
+ * ixgbe_write_mbx_vf_legacy - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ s32 ret_val;
+ u16 i;
+
+ UNREFERENCED_1PARAMETER(mbx_id);
+ DEBUGFUNC("ixgbe_write_mbx_vf_legacy");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ return ret_val;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_vf(hw, 0);
+ ixgbe_clear_msg_vf(hw);
+ ixgbe_check_for_ack_vf(hw, 0);
+ ixgbe_clear_ack_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* interrupt the PF to tell it a message has been sent */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_write_mbx_vf - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
@@ -426,6 +543,7 @@ static s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
static s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
+ u32 vf_mailbox;
s32 ret_val;
u16 i;
@@ -436,11 +554,11 @@ static s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_vf(hw);
if (ret_val)
- goto out_no_write;
+ goto out;
/* flush msg and acks as we are overwriting the message buffer */
- ixgbe_check_for_msg_vf(hw, 0);
- ixgbe_check_for_ack_vf(hw, 0);
+ ixgbe_clear_msg_vf(hw);
+ ixgbe_clear_ack_vf(hw);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
@@ -449,15 +567,22 @@ static s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
/* update stats */
hw->mbx.stats.msgs_tx++;
- /* Drop VFU and interrupt the PF to tell it a message has been sent */
- IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+ /* interrupt the PF to tell it a message has been sent */
+ vf_mailbox = ixgbe_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_REQ;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* if msg sent wait until we receive an ack */
+ ixgbe_poll_for_ack(hw, mbx_id);
+
+out:
+ hw->mbx.ops[mbx_id].release(hw, mbx_id);
-out_no_write:
return ret_val;
}
/**
- * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
+ * ixgbe_read_mbx_vf_legacy - Reads a message from the inbox intended for vf
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
@@ -465,19 +590,19 @@ out_no_write:
*
* returns SUCCESS if it successfully read message from buffer
**/
-static s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+static s32 ixgbe_read_mbx_vf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
{
- s32 ret_val = IXGBE_SUCCESS;
+ s32 ret_val;
u16 i;
- DEBUGFUNC("ixgbe_read_mbx_vf");
+ DEBUGFUNC("ixgbe_read_mbx_vf_legacy");
UNREFERENCED_1PARAMETER(mbx_id);
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_vf(hw);
if (ret_val)
- goto out_no_read;
+ return ret_val;
/* copy the message from the mailbox memory buffer */
for (i = 0; i < size; i++)
@@ -489,34 +614,74 @@ static s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
/* update stats */
hw->mbx.stats.msgs_rx++;
-out_no_read:
- return ret_val;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfully read message from buffer
+ **/
+static s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ u32 vf_mailbox;
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_read_mbx_vf");
+ UNREFERENCED_1PARAMETER(mbx_id);
+
+ /* check if there is a message from PF */
+ ret_val = ixgbe_check_for_msg_vf(hw, 0);
+ if (ret_val != IXGBE_SUCCESS)
+ return IXGBE_ERR_MBX_NOMSG;
+
+ ixgbe_clear_msg_vf(hw);
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt */
+ vf_mailbox = ixgbe_read_mailbox_vf(hw);
+ vf_mailbox |= IXGBE_VFMAILBOX_ACK;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, vf_mailbox);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+ return IXGBE_SUCCESS;
}
/**
* ixgbe_init_mbx_params_vf - set initial values for vf mailbox
* @hw: pointer to the HW structure
*
- * Initializes the hw->mbx struct to correct values for vf mailbox
+ * Initializes single set the hw->mbx struct to correct values for vf mailbox
+ * Set of legacy functions is being used here
*/
void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- /* start mailbox as timed out and let the reset_hw call set the timeout
- * value to begin communications */
- mbx->timeout = 0;
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
mbx->size = IXGBE_VFMAILBOX_SIZE;
- mbx->ops.read = ixgbe_read_mbx_vf;
- mbx->ops.write = ixgbe_write_mbx_vf;
- mbx->ops.read_posted = ixgbe_read_posted_mbx;
- mbx->ops.write_posted = ixgbe_write_posted_mbx;
- mbx->ops.check_for_msg = ixgbe_check_for_msg_vf;
- mbx->ops.check_for_ack = ixgbe_check_for_ack_vf;
- mbx->ops.check_for_rst = ixgbe_check_for_rst_vf;
+ /* VF has only one mailbox connection, no need for more IDs */
+ mbx->ops[0].release = ixgbe_release_mbx_lock_dummy;
+ mbx->ops[0].read = ixgbe_read_mbx_vf_legacy;
+ mbx->ops[0].write = ixgbe_write_mbx_vf_legacy;
+ mbx->ops[0].check_for_msg = ixgbe_check_for_msg_vf;
+ mbx->ops[0].check_for_ack = ixgbe_check_for_ack_vf;
+ mbx->ops[0].check_for_rst = ixgbe_check_for_rst_vf;
+ mbx->ops[0].clear = NULL;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
@@ -525,62 +690,119 @@ void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw)
mbx->stats.rsts = 0;
}
+/**
+ * ixgbe_upgrade_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+void ixgbe_upgrade_mbx_params_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+ mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ /* VF has only one mailbox connection, no need for more IDs */
+ mbx->ops[0].release = ixgbe_release_mbx_lock_vf;
+ mbx->ops[0].read = ixgbe_read_mbx_vf;
+ mbx->ops[0].write = ixgbe_write_mbx_vf;
+ mbx->ops[0].check_for_msg = ixgbe_check_for_msg_vf;
+ mbx->ops[0].check_for_ack = ixgbe_check_for_ack_vf;
+ mbx->ops[0].check_for_rst = ixgbe_check_for_rst_vf;
+ mbx->ops[0].clear = NULL;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
+
+static void ixgbe_clear_msg_pf(struct ixgbe_hw *hw, u16 vf_id)
+{
+ u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
+ s32 index = IXGBE_PFMBICR_INDEX(vf_id);
+ u32 pfmbicr;
+
+ pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
+
+ if (pfmbicr & (IXGBE_PFMBICR_VFREQ_VF1 << vf_shift))
+ hw->mbx.stats.reqs++;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index),
+ IXGBE_PFMBICR_VFREQ_VF1 << vf_shift);
+}
+
+static void ixgbe_clear_ack_pf(struct ixgbe_hw *hw, u16 vf_id)
+{
+ u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
+ s32 index = IXGBE_PFMBICR_INDEX(vf_id);
+ u32 pfmbicr;
+
+ pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
+
+ if (pfmbicr & (IXGBE_PFMBICR_VFACK_VF1 << vf_shift))
+ hw->mbx.stats.acks++;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFMBICR(index),
+ IXGBE_PFMBICR_VFACK_VF1 << vf_shift);
+}
+
static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
{
- u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
- s32 ret_val = IXGBE_ERR_MBX;
+ u32 pfmbicr = IXGBE_READ_REG(hw, IXGBE_PFMBICR(index));
- if (mbvficr & mask) {
- ret_val = IXGBE_SUCCESS;
- IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+ if (pfmbicr & mask) {
+ return IXGBE_SUCCESS;
}
- return ret_val;
+ return IXGBE_ERR_MBX;
}
/**
* ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
* @hw: pointer to the HW structure
- * @vf_number: the VF index
+ * @vf_id: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_id)
{
- s32 ret_val = IXGBE_ERR_MBX;
- s32 index = IXGBE_MBVFICR_INDEX(vf_number);
- u32 vf_bit = vf_number % 16;
+ u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
+ s32 index = IXGBE_PFMBICR_INDEX(vf_id);
DEBUGFUNC("ixgbe_check_for_msg_pf");
- if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
- index)) {
- ret_val = IXGBE_SUCCESS;
- hw->mbx.stats.reqs++;
- }
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_PFMBICR_VFREQ_VF1 << vf_shift,
+ index))
+ return IXGBE_SUCCESS;
- return ret_val;
+ return IXGBE_ERR_MBX;
}
/**
* ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
* @hw: pointer to the HW structure
- * @vf_number: the VF index
+ * @vf_id: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_id)
{
+ u32 vf_shift = IXGBE_PFMBICR_SHIFT(vf_id);
+ s32 index = IXGBE_PFMBICR_INDEX(vf_id);
s32 ret_val = IXGBE_ERR_MBX;
- s32 index = IXGBE_MBVFICR_INDEX(vf_number);
- u32 vf_bit = vf_number % 16;
DEBUGFUNC("ixgbe_check_for_ack_pf");
- if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_PFMBICR_VFACK_VF1 << vf_shift,
index)) {
ret_val = IXGBE_SUCCESS;
- hw->mbx.stats.acks++;
+ /* TODO: should this be autocleared? */
+ ixgbe_clear_ack_pf(hw, vf_id);
}
return ret_val;
@@ -589,28 +811,28 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
/**
* ixgbe_check_for_rst_pf - checks to see if the VF has reset
* @hw: pointer to the HW structure
- * @vf_number: the VF index
+ * @vf_id: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
-static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_id)
{
- u32 reg_offset = (vf_number < 32) ? 0 : 1;
- u32 vf_shift = vf_number % 32;
- u32 vflre = 0;
+ u32 vf_shift = IXGBE_PFVFLRE_SHIFT(vf_id);
+ u32 index = IXGBE_PFVFLRE_INDEX(vf_id);
s32 ret_val = IXGBE_ERR_MBX;
+ u32 vflre = 0;
DEBUGFUNC("ixgbe_check_for_rst_pf");
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
- vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+ vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLRE(index));
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
case ixgbe_mac_X540:
- vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
+ vflre = IXGBE_READ_REG(hw, IXGBE_PFVFLREC(index));
break;
default:
break;
@@ -618,7 +840,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
if (vflre & (1 << vf_shift)) {
ret_val = IXGBE_SUCCESS;
- IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFLREC(index), (1 << vf_shift));
hw->mbx.stats.rsts++;
}
@@ -628,121 +850,297 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
/**
* ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
* @hw: pointer to the HW structure
- * @vf_number: the VF index
+ * @vf_id: the VF index
*
* return SUCCESS if we obtained the mailbox lock
**/
-static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_id)
{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
s32 ret_val = IXGBE_ERR_MBX;
- u32 p2v_mailbox;
+ u32 pf_mailbox;
DEBUGFUNC("ixgbe_obtain_mbx_lock_pf");
- /* Take ownership of the buffer */
- IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+ if (!mbx->timeout)
+ return IXGBE_ERR_CONFIG;
- /* reserve mailbox for vf use */
- p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
- if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
- ret_val = IXGBE_SUCCESS;
- else
- ERROR_REPORT2(IXGBE_ERROR_POLLING,
- "Failed to obtain mailbox lock for VF%d", vf_number);
+ while (countdown--) {
+ /* Reserve mailbox for PF use */
+ pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+ /* Check if other thread holds the PF lock already */
+ if (pf_mailbox & IXGBE_PFMAILBOX_PFU)
+ goto retry;
+
+ pf_mailbox |= IXGBE_PFMAILBOX_PFU;
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
+
+ /* Verify that PF is the owner of the lock */
+ pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+ if (pf_mailbox & IXGBE_PFMAILBOX_PFU) {
+ ret_val = IXGBE_SUCCESS;
+ break;
+ }
+
+ retry:
+ /* Wait a bit before trying again */
+ usec_delay(mbx->usec_delay);
+ }
+
+ if (ret_val != IXGBE_SUCCESS) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Failed to obtain mailbox lock");
+ ret_val = IXGBE_ERR_TIMEOUT;
+ }
return ret_val;
}
/**
+ * ixgbe_release_mbx_lock_pf - release mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_id: the VF index
+ **/
+static void ixgbe_release_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_id)
+{
+ u32 pf_mailbox;
+
+ DEBUGFUNC("ixgbe_release_mbx_lock_pf");
+
+ /* Return ownership of the buffer */
+ pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+ pf_mailbox &= ~IXGBE_PFMAILBOX_PFU;
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
+}
+
+/**
+ * ixgbe_write_mbx_pf_legacy - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_id: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_pf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_id)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_write_mbx_pf_legacy");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
+ if (ret_val)
+ return ret_val;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_pf(hw, vf_id);
+ ixgbe_clear_msg_pf(hw, vf_id);
+ ixgbe_check_for_ack_pf(hw, vf_id);
+ ixgbe_clear_ack_pf(hw, vf_id);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_write_mbx_pf - Places a message in the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
- * @vf_number: the VF index
+ * @vf_id: the VF index
*
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_id)
{
+ u32 pf_mailbox;
s32 ret_val;
u16 i;
DEBUGFUNC("ixgbe_write_mbx_pf");
/* lock the mailbox to prevent pf/vf race condition */
- ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
if (ret_val)
- goto out_no_write;
+ goto out;
/* flush msg and acks as we are overwriting the message buffer */
- ixgbe_check_for_msg_pf(hw, vf_number);
- ixgbe_check_for_ack_pf(hw, vf_number);
+ ixgbe_clear_msg_pf(hw, vf_id);
+ ixgbe_clear_ack_pf(hw, vf_id);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
- IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, msg[i]);
- /* Interrupt VF to tell it a message has been sent and release buffer*/
- IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+ /* interrupt VF to tell it a message has been sent */
+ pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+ pf_mailbox |= IXGBE_PFMAILBOX_STS;
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
+
+ /* if msg sent wait until we receive an ack */
+ if (msg[0] & IXGBE_VT_MSGTYPE_CTS)
+ ixgbe_poll_for_ack(hw, vf_id);
/* update stats */
hw->mbx.stats.msgs_tx++;
-out_no_write:
+out:
+ hw->mbx.ops[vf_id].release(hw, vf_id);
+
return ret_val;
}
/**
+ * ixgbe_read_mbx_pf_legacy - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_id: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+static s32 ixgbe_read_mbx_pf_legacy(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_id)
+{
+ s32 ret_val;
+ u16 i;
+
+ DEBUGFUNC("ixgbe_read_mbx_pf_legacy");
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_id);
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i);
+
+ /* Acknowledge the message and release buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), IXGBE_PFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
* ixgbe_read_mbx_pf - Read a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
- * @vf_number: the VF index
+ * @vf_id: the VF index
*
* This function copies a message from the mailbox buffer to the caller's
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF request so no polling for message is needed.
**/
static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_id)
{
+ u32 pf_mailbox;
s32 ret_val;
u16 i;
DEBUGFUNC("ixgbe_read_mbx_pf");
- /* lock the mailbox to prevent pf/vf race condition */
- ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
- if (ret_val)
- goto out_no_read;
+ /* check if there is a message from VF */
+ ret_val = ixgbe_check_for_msg_pf(hw, vf_id);
+ if (ret_val != IXGBE_SUCCESS)
+ return IXGBE_ERR_MBX_NOMSG;
+
+ ixgbe_clear_msg_pf(hw, vf_id);
/* copy the message to the mailbox memory buffer */
for (i = 0; i < size; i++)
- msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i);
/* Acknowledge the message and release buffer */
- IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+ pf_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_id));
+ pf_mailbox |= IXGBE_PFMAILBOX_ACK;
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_id), pf_mailbox);
/* update stats */
hw->mbx.stats.msgs_rx++;
-out_no_read:
- return ret_val;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_clear_mbx_pf - Clear Mailbox Memory
+ * @hw: pointer to the HW structure
+ * @vf_id: the VF index
+ *
+ * Set VFMBMEM of given VF to 0x0.
+ **/
+static s32 ixgbe_clear_mbx_pf(struct ixgbe_hw *hw, u16 vf_id)
+{
+ u16 mbx_size = hw->mbx.size;
+ u16 i;
+
+ if (vf_id > 63)
+ return IXGBE_ERR_PARAM;
+
+ for (i = 0; i < mbx_size; ++i)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_id), i, 0x0);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_mbx_params_pf_id - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ * @vf_id: the VF index
+ *
+ * Initializes single set of the hw->mbx struct to correct values for pf mailbox
+ * Set of legacy functions is being used here
+ */
+void ixgbe_init_mbx_params_pf_id(struct ixgbe_hw *hw, u16 vf_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ mbx->ops[vf_id].release = ixgbe_release_mbx_lock_dummy;
+ mbx->ops[vf_id].read = ixgbe_read_mbx_pf_legacy;
+ mbx->ops[vf_id].write = ixgbe_write_mbx_pf_legacy;
+ mbx->ops[vf_id].check_for_msg = ixgbe_check_for_msg_pf;
+ mbx->ops[vf_id].check_for_ack = ixgbe_check_for_ack_pf;
+ mbx->ops[vf_id].check_for_rst = ixgbe_check_for_rst_pf;
+ mbx->ops[vf_id].clear = ixgbe_clear_mbx_pf;
}
/**
* ixgbe_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
*
- * Initializes the hw->mbx struct to correct values for pf mailbox
+ * Initializes all sets of the hw->mbx struct to correct values for pf
+ * mailbox. One set corresponds to single VF. It also initializes counters
+ * and general variables. A set of legacy functions is used by default.
*/
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
{
+ u16 i;
struct ixgbe_mbx_info *mbx = &hw->mbx;
+ /* Ensure we are not calling this function from VF */
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
@@ -750,18 +1148,59 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
hw->mac.type != ixgbe_mac_X540)
return;
- mbx->timeout = 0;
- mbx->usec_delay = 0;
+ /* Initialize common mailbox settings */
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+ mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+ /* Initialize counters with zeroes */
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ /* No matter of VF number, we initialize params for all 64 VFs. */
+ /* TODO: 1. Add a define for max VF and refactor SHARED to get rid
+ * of magic number for that (63 or 64 depending on use case.)
+ * 2. rewrite the code to dynamically allocate mbx->ops[vf_id] for
+ * certain number of VFs instead of default maximum value of 64 (0..63)
+ */
+ for (i = 0; i < 64; i++)
+ ixgbe_init_mbx_params_pf_id(hw, i);
+}
+
+/**
+ * ixgbe_upgrade_mbx_params_pf - Upgrade initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ * @vf_id: the VF index
+ *
+ * Initializes the hw->mbx struct to new function set for improved
+ * stability and handling of messages.
+ */
+void ixgbe_upgrade_mbx_params_pf(struct ixgbe_hw *hw, u16 vf_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ /* Ensure we are not calling this function from VF */
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a &&
+ hw->mac.type != ixgbe_mac_X540)
+ return;
+
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+ mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY;
mbx->size = IXGBE_VFMAILBOX_SIZE;
- mbx->ops.read = ixgbe_read_mbx_pf;
- mbx->ops.write = ixgbe_write_mbx_pf;
- mbx->ops.read_posted = ixgbe_read_posted_mbx;
- mbx->ops.write_posted = ixgbe_write_posted_mbx;
- mbx->ops.check_for_msg = ixgbe_check_for_msg_pf;
- mbx->ops.check_for_ack = ixgbe_check_for_ack_pf;
- mbx->ops.check_for_rst = ixgbe_check_for_rst_pf;
+ mbx->ops[vf_id].release = ixgbe_release_mbx_lock_pf;
+ mbx->ops[vf_id].read = ixgbe_read_mbx_pf;
+ mbx->ops[vf_id].write = ixgbe_write_mbx_pf;
+ mbx->ops[vf_id].check_for_msg = ixgbe_check_for_msg_pf;
+ mbx->ops[vf_id].check_for_ack = ixgbe_check_for_ack_pf;
+ mbx->ops[vf_id].check_for_rst = ixgbe_check_for_rst_pf;
+ mbx->ops[vf_id].clear = ixgbe_clear_mbx_pf;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
diff --git a/sys/dev/ixgbe/ixgbe_mbx.h b/sys/dev/ixgbe/ixgbe_mbx.h
index 47b9327bb896..e6519963242e 100644
--- a/sys/dev/ixgbe/ixgbe_mbx.h
+++ b/sys/dev/ixgbe/ixgbe_mbx.h
@@ -35,10 +35,43 @@
#ifndef _IXGBE_MBX_H_
#define _IXGBE_MBX_H_
-#include "ixgbe_type.h"
+struct ixgbe_hw;
+
+struct ixgbe_mbx_operations {
+ void (*init_params)(struct ixgbe_hw *hw);
+ void (*release)(struct ixgbe_hw *hw, u16 mbx_id);
+ s32 (*read)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+ s32 (*write)(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+ s32 (*check_for_msg)(struct ixgbe_hw *hw, u16 vf_number);
+ s32 (*check_for_ack)(struct ixgbe_hw *hw, u16 vf_number);
+ s32 (*check_for_rst)(struct ixgbe_hw *hw, u16 vf_number);
+ s32 (*clear)(struct ixgbe_hw *hw, u16 vf_number);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ /*
+ * PF: One set of operations for each VF to handle various API versions
+ * at the same time
+ * VF: Only the very first (0) set should be used
+ */
+ struct ixgbe_mbx_operations ops[64];
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u32 vf_mailbox;
+ u16 size;
+};
#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
-#define IXGBE_ERR_MBX -100
#define IXGBE_VFMAILBOX 0x002FC
#define IXGBE_VFMBMEM 0x00200
@@ -60,22 +93,22 @@
#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
-#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
-#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
-#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
-#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+#define IXGBE_PFMBICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_PFMBICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_PFMBICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_PFMBICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is IXGBE_PF_*.
- * Message ACK's are the value or'd with 0xF0000000
+ * Message results are the value or'd with 0xF0000000
*/
-#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
- * this are the ACK */
-#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
- * this are the NACK */
-#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
- * clear to send requests */
+#define IXGBE_VT_MSGTYPE_SUCCESS 0x80000000 /* Messages or'd with this
+ * have succeeded */
+#define IXGBE_VT_MSGTYPE_FAILURE 0x40000000 /* Messages or'd with this
+ * have failed */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests */
#define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for extra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
@@ -92,6 +125,9 @@ enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */
ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */
ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */
+ /* API 1.4 is being used in the upstream for IPsec */
+ ixgbe_mbox_api_14, /* API version 1.4, linux/freebsd VF driver */
+ ixgbe_mbox_api_15, /* API version 1.5, linux/freebsd VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
@@ -153,15 +189,17 @@ enum ixgbevf_xcast_modes {
#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
-s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
-s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
-s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
-void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
-void ixgbe_init_mbx_params_vf(struct ixgbe_hw *);
-void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+s32 ixgbe_poll_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id);
+s32 ixgbe_clear_mbx(struct ixgbe_hw *hw, u16 vf_number);
+void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw);
+void ixgbe_upgrade_mbx_params_vf(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_pf_id(struct ixgbe_hw *hw, u16 vf_id);
+void ixgbe_upgrade_mbx_params_pf(struct ixgbe_hw *hw, u16 vf_id);
#endif /* _IXGBE_MBX_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_osdep.c b/sys/dev/ixgbe/ixgbe_osdep.c
index 2fa651df8936..9bd9ce63b786 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.c
+++ b/sys/dev/ixgbe/ixgbe_osdep.c
@@ -33,6 +33,12 @@
#include "ixgbe.h"
+inline device_t
+ixgbe_dev_from_hw(struct ixgbe_hw *hw)
+{
+ return ((struct ixgbe_softc *)hw->back)->dev;
+}
+
inline u16
ixgbe_read_pci_cfg(struct ixgbe_hw *hw, u32 reg)
{
@@ -108,3 +114,29 @@ ixgbe_link_speed_to_baudrate(ixgbe_link_speed speed)
return baudrate;
}
+
+void
+ixgbe_init_lock(struct ixgbe_lock *lock)
+{
+ mtx_init(&lock->mutex, "mutex",
+ "ixgbe ACI lock", MTX_DEF | MTX_DUPOK);
+}
+
+void
+ixgbe_acquire_lock(struct ixgbe_lock *lock)
+{
+ mtx_lock(&lock->mutex);
+}
+
+void
+ixgbe_release_lock(struct ixgbe_lock *lock)
+{
+ mtx_unlock(&lock->mutex);
+}
+
+void
+ixgbe_destroy_lock(struct ixgbe_lock *lock)
+{
+ if (mtx_initialized(&lock->mutex))
+ mtx_destroy(&lock->mutex);
+}
diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h
index eca15f0f3816..8cf1d13736ce 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.h
+++ b/sys/dev/ixgbe/ixgbe_osdep.h
@@ -133,7 +133,9 @@ enum {
/* XXX these need to be revisited */
#define IXGBE_CPU_TO_LE16 htole16
#define IXGBE_CPU_TO_LE32 htole32
+#define IXGBE_LE16_TO_CPU le16toh
#define IXGBE_LE32_TO_CPU le32toh
+#define IXGBE_LE64_TO_CPU le64toh
#define IXGBE_LE32_TO_CPUS(x) *(x) = le32dec(x)
#define IXGBE_CPU_TO_BE16 htobe16
#define IXGBE_CPU_TO_BE32 htobe32
@@ -146,6 +148,7 @@ typedef int16_t s16;
typedef uint32_t u32;
typedef int32_t s32;
typedef uint64_t u64;
+typedef int64_t s64;
#ifndef __bool_true_false_are_defined
typedef boolean_t bool;
#endif
@@ -195,8 +198,14 @@ struct ixgbe_osdep
bus_space_handle_t mem_bus_space_handle;
};
+struct ixgbe_lock
+{
+ struct mtx mutex;
+};
+
/* These routines need struct ixgbe_hw declared */
struct ixgbe_hw;
+device_t ixgbe_dev_from_hw(struct ixgbe_hw *hw);
/* These routines are needed by the shared code */
extern u16 ixgbe_read_pci_cfg(struct ixgbe_hw *, u32);
@@ -221,4 +230,27 @@ extern void ixgbe_write_reg_array(struct ixgbe_hw *, u32, u32, u32);
#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, val) \
ixgbe_write_reg_array(a, reg, offset, val)
+void ixgbe_init_lock(struct ixgbe_lock *);
+void ixgbe_destroy_lock(struct ixgbe_lock *);
+void ixgbe_acquire_lock(struct ixgbe_lock *);
+void ixgbe_release_lock(struct ixgbe_lock *);
+
+static inline void *
+ixgbe_calloc(struct ixgbe_hw __unused *hw, size_t count, size_t size)
+{
+ return (malloc(count * size, M_DEVBUF, M_ZERO | M_NOWAIT));
+}
+
+static inline void *
+ixgbe_malloc(struct ixgbe_hw __unused *hw, size_t size)
+{
+ return (malloc(size, M_DEVBUF, M_ZERO | M_NOWAIT));
+}
+
+static inline void
+ixgbe_free(struct ixgbe_hw __unused *hw, void *addr)
+{
+ free(addr, M_DEVBUF);
+}
+
#endif /* _IXGBE_OSDEP_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_phy.c b/sys/dev/ixgbe/ixgbe_phy.c
index f4bee6c34f53..2a735ead9a12 100644
--- a/sys/dev/ixgbe/ixgbe_phy.c
+++ b/sys/dev/ixgbe/ixgbe_phy.c
@@ -462,8 +462,7 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
case TN1010_PHY_ID:
phy_type = ixgbe_phy_tn;
break;
- case X550_PHY_ID2:
- case X550_PHY_ID3:
+ case X550_PHY_ID:
case X540_PHY_ID:
phy_type = ixgbe_phy_aq;
break;
@@ -588,7 +587,7 @@ void ixgbe_restart_auto_neg(struct ixgbe_hw *hw)
}
/**
- * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
+ * ixgbe_read_phy_reg_mdi - Reads a value from a specified PHY register without
* the SWFW lock
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
@@ -1424,6 +1423,13 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
else
hw->phy.sfp_type =
ixgbe_sfp_type_1g_lx_core1;
+ } else if (comp_codes_1g & IXGBE_SFF_BASEBX10_CAPABLE) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_bx_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_1g_bx_core1;
} else {
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
}
@@ -1437,7 +1443,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
(comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
- (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)) ||
+ (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) ||
+ (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE))
hw->phy.multispeed_fiber = true;
/* Determine PHY vendor */
@@ -1488,7 +1496,12 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.type = ixgbe_phy_sfp_intel;
break;
default:
- hw->phy.type = ixgbe_phy_sfp_unknown;
+ if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+ hw->phy.type = ixgbe_phy_sfp_passive_unknown;
+ else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+ hw->phy.type = ixgbe_phy_sfp_active_unknown;
+ else
+ hw->phy.type = ixgbe_phy_sfp_unknown;
break;
}
}
@@ -1496,10 +1509,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
/* Allow any DA cable vendor */
if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
IXGBE_SFF_DA_ACTIVE_CABLE)) {
- if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
- hw->phy.type = ixgbe_phy_sfp_passive_unknown;
- else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
- hw->phy.type = ixgbe_phy_sfp_active_unknown;
status = IXGBE_SUCCESS;
goto out;
}
@@ -1511,7 +1520,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
hw->phy.type = ixgbe_phy_sfp_unsupported;
status = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -1530,7 +1541,9 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) {
/* Make sure we're a supported PHY type */
if (hw->phy.type == ixgbe_phy_sfp_intel) {
status = IXGBE_SUCCESS;
@@ -1613,6 +1626,8 @@ u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw)
physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX;
+ else if (comp_codes_1g & IXGBE_SFF_BASEBX10_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
break;
case ixgbe_phy_qsfp_intel:
case ixgbe_phy_qsfp_unknown:
@@ -1861,12 +1876,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 ||
sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core0)
+ sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+ sfp_type == ixgbe_sfp_type_1g_bx_core0)
sfp_type = ixgbe_sfp_type_srlr_core0;
else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 ||
sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
- sfp_type == ixgbe_sfp_type_1g_sx_core1)
+ sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ sfp_type == ixgbe_sfp_type_1g_bx_core1)
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
diff --git a/sys/dev/ixgbe/ixgbe_phy.h b/sys/dev/ixgbe/ixgbe_phy.h
index 1fa2acb77354..c1ba73851397 100644
--- a/sys/dev/ixgbe/ixgbe_phy.h
+++ b/sys/dev/ixgbe/ixgbe_phy.h
@@ -49,6 +49,7 @@
#define IXGBE_SFF_1GBE_COMP_CODES 0x6
#define IXGBE_SFF_10GBE_COMP_CODES 0x3
#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_BITRATE_NOMINAL 0xC
#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
#define IXGBE_SFF_SFF_8472_SWAP 0x5C
#define IXGBE_SFF_SFF_8472_COMP 0x5E
@@ -73,6 +74,7 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_BASEBX10_CAPABLE 0x40
#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
diff --git a/sys/dev/ixgbe/ixgbe_rss.h b/sys/dev/ixgbe/ixgbe_rss.h
index c00273587aaa..84c802671195 100644
--- a/sys/dev/ixgbe/ixgbe_rss.h
+++ b/sys/dev/ixgbe/ixgbe_rss.h
@@ -48,6 +48,7 @@
#define RSS_HASHTYPE_RSS_IPV6_EX (1 << 5)
#define RSS_HASHTYPE_RSS_TCP_IPV6_EX (1 << 6)
#define RSS_HASHTYPE_RSS_UDP_IPV4 (1 << 7)
+#define RSS_HASHTYPE_RSS_UDP_IPV4_EX (1 << 8)
#define RSS_HASHTYPE_RSS_UDP_IPV6 (1 << 9)
#define RSS_HASHTYPE_RSS_UDP_IPV6_EX (1 << 10)
diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h
index b8aeaf51f86c..0bbe7806d41d 100644
--- a/sys/dev/ixgbe/ixgbe_type.h
+++ b/sys/dev/ixgbe/ixgbe_type.h
@@ -74,6 +74,7 @@
*/
#include "ixgbe_osdep.h"
+#include "ixgbe_type_e610.h"
/* Override this by setting IOMEM in your ixgbe_osdep.h header */
#define IOMEM
@@ -150,12 +151,19 @@
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0
+#define IXGBE_DEV_ID_E610_BACKPLANE 0x57AE
+#define IXGBE_DEV_ID_E610_SFP 0x57AF
+#define IXGBE_DEV_ID_E610_10G_T 0x57B0
+#define IXGBE_DEV_ID_E610_2_5G_T 0x57B1
+#define IXGBE_DEV_ID_E610_SGMII 0x57B2
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
#define IXGBE_DEV_ID_X550EM_A_VF_HV 0x15B4
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_E610_VF 0x57AD
+#define IXGBE_SUBDEV_ID_E610_VF_HV 0x0001
#define IXGBE_CAT(r, m) IXGBE_##r##m
@@ -202,6 +210,10 @@
#define IXGBE_FLA_X550EM_x IXGBE_FLA
#define IXGBE_FLA_X550EM_a 0x15F68
#define IXGBE_FLA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FLA)
+#define IXGBE_FLA_FL_SIZE_SHIFT_X540 17
+#define IXGBE_FLA_FL_SIZE_SHIFT_X550 12
+#define IXGBE_FLA_FL_SIZE_MASK_X540 (0x7 << IXGBE_FLA_FL_SIZE_SHIFT_X540)
+#define IXGBE_FLA_FL_SIZE_MASK_X550 (0x7 << IXGBE_FLA_FL_SIZE_SHIFT_X550)
#define IXGBE_EEMNGCTL 0x10110
#define IXGBE_EEMNGDATA 0x10114
@@ -284,6 +296,41 @@
#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN)
#define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500
+/* NVM component version fields */
+#define NVM_VERSZ_LONG 64
+#define NVM_VERSZ_SHORT 32
+#define NVM_VER_LONG \
+ "DS_%x.%x NVM_%x.%02x.%x PHY_%x.%02x.%x OEM_%04x EtkId_%x OR_%x.%x.%x\n"
+#define NVM_VER_SHORT1 "%02x.%02x %x %x.%x.%x\n"
+#define NVM_VER_SHORT2 "%02x.%02x.%x %x.%02x.%x %x %x.%x.%x\n"
+
+#define NVM_EEP_MAJOR_MASK 0xF000
+#define NVM_EEP_MINOR_MASK 0xFF0
+#define NVM_EEP_ID_MASK 0xF
+#define NVM_EEP_MAJ_SHIFT 12
+#define NVM_EEP_MIN_SHIFT 4
+
+#define NVM_EEP_OFFSET_82598 0x2A
+#define NVM_EEP_OFFSET_X540 0x18
+#define NVM_EEP_X550_MINOR_MASK 0xFF
+#define NVM_EEP_PHY_OFF_X540 0x19
+#define NVM_PHY_MAJOR_MASK 0xF000
+#define NVM_PHY_MINOR_MASK 0xFF0
+#define NVM_PHY_ID_MASK 0xF
+#define NVM_PHY_MAJ_SHIFT 12
+#define NVM_PHY_MIN_SHIFT 4
+
+#define NVM_DS_OFFSET 0x29
+#define NVM_DS_MAJOR_MASK 0xF000
+#define NVM_DS_MINOR_MASK 0xF
+#define NVM_DS_SHIFT 12
+
+#define NVM_OEM_OFFSET 0x2A
+
+#define NVM_PHYVEND_MAJOR_MASK 0xFF00
+#define NVM_PHYVEND_MINOR_MASK 0xFF
+#define NVM_PHYVEND_SHIFT 8
+
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
#define IXGBE_EMC_INTERNAL_DATA 0x00
#define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20
@@ -344,6 +391,16 @@ struct ixgbe_nvm_version {
u16 or_build;
u8 or_patch;
+ u8 phy_fw_maj;
+ u16 phy_fw_min;
+ u8 phy_fw_id;
+
+ u8 devstart_major;
+ u8 devstart_minor;
+ u16 oem_specific;
+
+ u8 phy_vend_maj;
+ u8 phy_vend_min;
};
/* Interrupt Registers */
@@ -483,8 +540,14 @@ struct ixgbe_nvm_version {
#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */
/* 64 Mailboxes, 16 DW each */
#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i)))
+#define IXGBE_PFMBICR_INDEX(_i) ((_i) >> 4)
+#define IXGBE_PFMBICR_SHIFT(_i) ((_i) % 16)
#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */
#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */
+#define IXGBE_PFVFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_PFVFLREC(_i) (0x00700 + ((_i) * 4))
+#define IXGBE_PFVFLRE_INDEX(_i) ((_i) >> 5)
+#define IXGBE_PFVFLRE_SHIFT(_i) ((_i) % 32)
#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
@@ -1437,6 +1500,7 @@ struct ixgbe_dmac_config {
#define IXGBE_BARCTRL_FLSIZE 0x0700
#define IXGBE_BARCTRL_FLSIZE_SHIFT 8
#define IXGBE_BARCTRL_CSRSIZE 0x2000
+#define IXGBE_BARCTRL_CSRSIZE_SHIFT 13
/* RSCCTL Bit Masks */
#define IXGBE_RSCCTL_RSCEN 0x01
@@ -1482,7 +1546,7 @@ struct ixgbe_dmac_config {
#define IXGBE_PSRTYPE_RQPL_SHIFT 29
/* CTRL Bit Masks */
-#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */
+#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Primary Disable bit */
#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */
#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST)
@@ -1696,6 +1760,7 @@ struct ixgbe_dmac_config {
#define TN1010_PHY_ID 0x00A19410
#define TNX_FW_REV 0xB
#define X540_PHY_ID 0x01540200
+#define X550_PHY_ID 0x01540220
#define X550_PHY_ID2 0x01540223
#define X550_PHY_ID3 0x01540221
#define X557_PHY_ID 0x01540240
@@ -1832,7 +1897,7 @@ enum {
/* VFRE bitmask */
#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
-#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+#define IXGBE_VF_INIT_TIMEOUT 10000 /* Number of retries to clear RSTI */
/* RDHMPN and TDHMPN bitmasks */
#define IXGBE_RDHMPN_RDICADDR 0x007FF800
@@ -1912,6 +1977,7 @@ enum {
#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_FW_EVENT 0x00200000 /* Async FW event */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
@@ -1947,6 +2013,7 @@ enum {
#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
@@ -1968,6 +2035,7 @@ enum {
#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
@@ -1990,6 +2058,7 @@ enum {
#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
@@ -2129,7 +2198,7 @@ enum {
/* STATUS Bit Masks */
#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */
#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/
-#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Ena Status */
+#define IXGBE_STATUS_GIO 0x00080000 /* GIO Primary Ena Status */
#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */
#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */
@@ -2397,6 +2466,7 @@ enum {
#define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR 0x11
#define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR 0x04
+#define IXGBE_PCIE_MSIX_E610_CAPS 0xB2
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -2514,6 +2584,7 @@ enum {
#define IXGBE_PCI_DEVICE_STATUS 0xAA
#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_LINK_STATUS_E610 0x82
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
#define IXGBE_PCI_LINK_WIDTH_1 0x10
@@ -2524,6 +2595,7 @@ enum {
#define IXGBE_PCI_LINK_SPEED_2500 0x1
#define IXGBE_PCI_LINK_SPEED_5000 0x2
#define IXGBE_PCI_LINK_SPEED_8000 0x3
+#define IXGBE_PCI_LINK_SPEED_16000 0x4
#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
@@ -2539,8 +2611,8 @@ enum {
#define IXGBE_PCIDEVCTRL2_4_8s 0xd
#define IXGBE_PCIDEVCTRL2_17_34s 0xe
-/* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+/* Number of 100 microseconds we wait for PCI Express primary disable */
+#define IXGBE_PCI_PRIMARY_DISABLE_TIMEOUT 800
/* Check whether address is multicast. This is little-endian specific check.*/
#define IXGBE_IS_MULTICAST(Address) \
@@ -2898,11 +2970,6 @@ enum {
#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
-/* SR-IOV specific macros */
-#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
-#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
-#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
-#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
/* Translated register #defines */
#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P)))
#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P)))
@@ -3103,6 +3170,7 @@ enum ixgbe_fdir_pballoc_type {
#define FW_SHADOW_RAM_DUMP_LEN 0
#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */
#define FW_NVM_DATA_OFFSET 3
+#define FW_ANVM_DATA_OFFSET 3
#define FW_MAX_READ_BUFFER_SIZE 1024
#define FW_DISABLE_RXEN_CMD 0xDE
#define FW_DISABLE_RXEN_LEN 0x1
@@ -3174,6 +3242,8 @@ enum ixgbe_fdir_pballoc_type {
#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u
#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu
+#define IXGBE_SR_IMMEDIATE_VALUES_PTR 0x4E
+
/* Host Interface Command Structures */
#pragma pack(push, 1)
@@ -3409,6 +3479,7 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */
#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_MACLEN_MASK (0x7F << IXGBE_ADVTXD_MACLEN_SHIFT) /* Adv ctxt desc mac len mask */
#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
@@ -3477,6 +3548,8 @@ typedef u64 ixgbe_physical_layer;
#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000
#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000
#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000
+#define IXGBE_PHYSICAL_LAYER_2500BASE_T 0x20000
+#define IXGBE_PHYSICAL_LAYER_5000BASE_T 0x40000
/* Flow Control Data Sheet defined values
* Calculation and defines taken from 802.1bb Annex O
@@ -3685,6 +3758,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
ixgbe_mac_X550EM_a_vf,
+ ixgbe_mac_E610,
+ ixgbe_mac_E610_vf,
ixgbe_num_macs
};
@@ -3749,6 +3824,8 @@ enum ixgbe_sfp_type {
ixgbe_sfp_type_1g_sx_core1 = 12,
ixgbe_sfp_type_1g_lx_core0 = 13,
ixgbe_sfp_type_1g_lx_core1 = 14,
+ ixgbe_sfp_type_1g_bx_core0 = 15,
+ ixgbe_sfp_type_1g_bx_core1 = 16,
ixgbe_sfp_type_not_present = 0xFFFE,
ixgbe_sfp_type_unknown = 0xFFFF
};
@@ -3762,7 +3839,9 @@ enum ixgbe_media_type {
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
- ixgbe_media_type_virtual
+ ixgbe_media_type_virtual,
+ ixgbe_media_type_da,
+ ixgbe_media_type_aui
};
/* Flow Control Settings */
@@ -3771,6 +3850,7 @@ enum ixgbe_fc_mode {
ixgbe_fc_rx_pause,
ixgbe_fc_tx_pause,
ixgbe_fc_full,
+ ixgbe_fc_auto,
ixgbe_fc_default
};
@@ -3803,6 +3883,7 @@ enum ixgbe_bus_speed {
ixgbe_bus_speed_2500 = 2500,
ixgbe_bus_speed_5000 = 5000,
ixgbe_bus_speed_8000 = 8000,
+ ixgbe_bus_speed_16000 = 16000,
ixgbe_bus_speed_reserved
};
@@ -3947,6 +4028,7 @@ struct ixgbe_eeprom_operations {
s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
s32 (*update_checksum)(struct ixgbe_hw *);
s32 (*calc_checksum)(struct ixgbe_hw *);
+ s32 (*read_pba_string)(struct ixgbe_hw *, u8 *, u32);
};
struct ixgbe_mac_operations {
@@ -4023,6 +4105,7 @@ struct ixgbe_mac_operations {
s32 (*init_uta_tables)(struct ixgbe_hw *);
void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
+ s32 (*toggle_txdctl)(struct ixgbe_hw *hw, u32 vf_index);
s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
s32 (*set_rlpml)(struct ixgbe_hw *, u16);
@@ -4057,6 +4140,10 @@ struct ixgbe_mac_operations {
void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap);
void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
+ bool (*fw_rollback_mode)(struct ixgbe_hw *hw);
+ bool (*get_fw_tsam_mode)(struct ixgbe_hw *hw);
+ s32 (*get_fw_version)(struct ixgbe_hw *hw);
+ s32 (*get_nvm_version)(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
};
struct ixgbe_phy_operations {
@@ -4101,6 +4188,9 @@ struct ixgbe_link_operations {
struct ixgbe_link_info {
struct ixgbe_link_operations ops;
u8 addr;
+ struct ixgbe_link_status link_info;
+ struct ixgbe_link_status link_info_old;
+ u8 get_link_info;
};
struct ixgbe_eeprom_info {
@@ -4172,39 +4262,13 @@ struct ixgbe_phy_info {
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
u32 nw_mng_if_sel;
+ u64 phy_type_low;
+ u64 phy_type_high;
+ struct ixgbe_aci_cmd_set_phy_cfg_data curr_user_phy_cfg;
};
#include "ixgbe_mbx.h"
-struct ixgbe_mbx_operations {
- void (*init_params)(struct ixgbe_hw *hw);
- s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
- s32 (*check_for_msg)(struct ixgbe_hw *, u16);
- s32 (*check_for_ack)(struct ixgbe_hw *, u16);
- s32 (*check_for_rst)(struct ixgbe_hw *, u16);
-};
-
-struct ixgbe_mbx_stats {
- u32 msgs_tx;
- u32 msgs_rx;
-
- u32 acks;
- u32 reqs;
- u32 rsts;
-};
-
-struct ixgbe_mbx_info {
- struct ixgbe_mbx_operations ops;
- struct ixgbe_mbx_stats stats;
- u32 timeout;
- u32 usec_delay;
- u32 v2p_mailbox;
- u16 size;
-};
-
struct ixgbe_hw {
u8 IOMEM *hw_addr;
void *back;
@@ -4228,6 +4292,23 @@ struct ixgbe_hw {
bool allow_unsupported_sfp;
bool wol_enabled;
bool need_crosstalk_fix;
+ u32 fw_rst_cnt;
+ u8 api_branch;
+ u8 api_maj_ver;
+ u8 api_min_ver;
+ u8 api_patch;
+ u8 fw_branch;
+ u8 fw_maj_ver;
+ u8 fw_min_ver;
+ u8 fw_patch;
+ u32 fw_build;
+ struct ixgbe_aci_info aci;
+ struct ixgbe_flash_info flash;
+ struct ixgbe_hw_dev_caps dev_caps;
+ struct ixgbe_hw_func_caps func_caps;
+ struct ixgbe_fwlog_cfg fwlog_cfg;
+ bool fwlog_support_ena;
+ struct ixgbe_fwlog_ring fwlog_ring;
};
#define ixgbe_call_func(hw, func, params, error) \
@@ -4247,7 +4328,7 @@ struct ixgbe_hw {
#define IXGBE_ERR_ADAPTER_STOPPED -9
#define IXGBE_ERR_INVALID_MAC_ADDR -10
#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11
-#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12
+#define IXGBE_ERR_PRIMARY_REQUESTS_PENDING -12
#define IXGBE_ERR_INVALID_LINK_SETTINGS -13
#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14
#define IXGBE_ERR_RESET_FAILED -15
@@ -4275,6 +4356,27 @@ struct ixgbe_hw {
#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
#define IXGBE_ERR_FW_RESP_INVALID -39
#define IXGBE_ERR_TOKEN_RETRY -40
+#define IXGBE_ERR_MBX -41
+#define IXGBE_ERR_MBX_NOMSG -42
+#define IXGBE_ERR_TIMEOUT -43
+
+#define IXGBE_ERR_NOT_SUPPORTED -45
+#define IXGBE_ERR_OUT_OF_RANGE -46
+
+#define IXGBE_ERR_NVM -50
+#define IXGBE_ERR_NVM_CHECKSUM -51
+#define IXGBE_ERR_BUF_TOO_SHORT -52
+#define IXGBE_ERR_NVM_BLANK_MODE -53
+#define IXGBE_ERR_INVAL_SIZE -54
+#define IXGBE_ERR_DOES_NOT_EXIST -55
+
+#define IXGBE_ERR_ACI_ERROR -100
+#define IXGBE_ERR_ACI_DISABLED -101
+#define IXGBE_ERR_ACI_TIMEOUT -102
+#define IXGBE_ERR_ACI_BUSY -103
+#define IXGBE_ERR_ACI_NO_WORK -104
+#define IXGBE_ERR_ACI_NO_EVENTS -105
+#define IXGBE_ERR_FW_API_VER -106
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
@@ -4504,5 +4606,6 @@ struct ixgbe_bypass_eeprom {
#define IXGBE_REQUEST_TASK_FDIR 0x08
#define IXGBE_REQUEST_TASK_PHY 0x10
#define IXGBE_REQUEST_TASK_LSC 0x20
+#define IXGBE_REQUEST_TASK_FWEVENT 0x40
#endif /* _IXGBE_TYPE_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_type_e610.h b/sys/dev/ixgbe/ixgbe_type_e610.h
new file mode 100644
index 000000000000..e300030c3ba4
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_type_e610.h
@@ -0,0 +1,2278 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#ifndef _IXGBE_TYPE_E610_H_
+#define _IXGBE_TYPE_E610_H_
+
+
+/* Generic defines */
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* !BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* !BIT_ULL */
+#ifndef BITS_PER_BYTE
+#define BITS_PER_BYTE 8
+#endif /* !BITS_PER_BYTE */
+#ifndef DIVIDE_AND_ROUND_UP
+#define DIVIDE_AND_ROUND_UP(a, b) (((a) + (b) - 1) / (b))
+#endif /* !DIVIDE_AND_ROUND_UP */
+
+#ifndef ROUND_UP
+/**
+ * ROUND_UP - round up to next arbitrary multiple (not a power of 2)
+ * @a: value to round up
+ * @b: arbitrary multiple
+ *
+ * Round up to the next multiple of the arbitrary b.
+ */
+#define ROUND_UP(a, b) ((b) * DIVIDE_AND_ROUND_UP((a), (b)))
+#endif /* !ROUND_UP */
+
+#define MAKEMASK(mask, shift) (mask << shift)
+
+#define BYTES_PER_WORD 2
+#define BYTES_PER_DWORD 4
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG 64
+#endif /* !BITS_PER_LONG */
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif /* !BITS_PER_LONG_LONG */
+#undef GENMASK
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#undef GENMASK_ULL
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+/* Data type manipulation macros. */
+#define HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+#define HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define LO_WORD(x) ((u16)((x) & 0xFFFF))
+#define HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define LO_BYTE(x) ((u8)((x) & 0xFF))
+
+#ifndef MIN_T
+#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
+#endif
+
+#define IS_ASCII(_ch) ((_ch) < 0x80)
+
+/**
+ * ixgbe_struct_size - size of struct with C99 flexible array member
+ * @ptr: pointer to structure
+ * @field: flexible array member (last member of the structure)
+ * @num: number of elements of that flexible array member
+ */
+#define ixgbe_struct_size(ptr, field, num) \
+ (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
+
+/* General E610 defines */
+#define IXGBE_MAX_VSI 768
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define E610_SR_VPD_SIZE_WORDS 512
+#define E610_SR_PCIE_ALT_SIZE_WORDS 512
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_NVM_DEV_STARTER_VER 0x18
+#define E610_NVM_VER_LO_SHIFT 0
+#define E610_NVM_VER_LO_MASK (0xff << E610_NVM_VER_LO_SHIFT)
+#define E610_NVM_VER_HI_SHIFT 12
+#define E610_NVM_VER_HI_MASK (0xf << E610_NVM_VER_HI_SHIFT)
+#define E610_SR_NVM_MAP_VER 0x29
+#define E610_SR_NVM_EETRACK_LO 0x2D
+#define E610_SR_NVM_EETRACK_HI 0x2E
+#define E610_SR_VPD_PTR 0x2F
+#define E610_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define E610_SR_SW_CHECKSUM_WORD 0x3F
+#define E610_SR_PFA_PTR 0x40
+#define E610_SR_1ST_NVM_BANK_PTR 0x42
+#define E610_SR_NVM_BANK_SIZE 0x43
+#define E610_SR_1ST_OROM_BANK_PTR 0x44
+#define E610_SR_OROM_BANK_SIZE 0x45
+#define E610_SR_NETLIST_BANK_PTR 0x46
+#define E610_SR_NETLIST_BANK_SIZE 0x47
+#define E610_SR_POINTER_TYPE_BIT BIT(15)
+#define E610_SR_POINTER_MASK 0x7fff
+#define E610_SR_HALF_4KB_SECTOR_UNITS 2048
+#define E610_GET_PFA_POINTER_IN_WORDS(offset) \
+ ((offset & E610_SR_POINTER_TYPE_BIT) == E610_SR_POINTER_TYPE_BIT) ? \
+ ((offset & E610_SR_POINTER_MASK) * E610_SR_HALF_4KB_SECTOR_UNITS) : \
+ (offset & E610_SR_POINTER_MASK)
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_NVM_CTRL_WORD 0x00
+#define E610_SR_PBA_BLOCK_PTR 0x16
+
+/* The Orom version topology */
+#define IXGBE_OROM_VER_PATCH_SHIFT 0
+#define IXGBE_OROM_VER_PATCH_MASK (0xff << IXGBE_OROM_VER_PATCH_SHIFT)
+#define IXGBE_OROM_VER_BUILD_SHIFT 8
+#define IXGBE_OROM_VER_BUILD_MASK (0xffff << IXGBE_OROM_VER_BUILD_SHIFT)
+#define IXGBE_OROM_VER_SHIFT 24
+#define IXGBE_OROM_VER_MASK (0xff << IXGBE_OROM_VER_SHIFT)
+
+/* CSS Header words */
+#define IXGBE_NVM_CSS_HDR_LEN_L 0x02
+#define IXGBE_NVM_CSS_HDR_LEN_H 0x03
+#define IXGBE_NVM_CSS_SREV_L 0x14
+#define IXGBE_NVM_CSS_SREV_H 0x15
+
+/* Length of Authentication header section in words */
+#define IXGBE_NVM_AUTH_HEADER_LEN 0x08
+
+/* The Netlist ID Block is located after all of the Link Topology nodes. */
+#define IXGBE_NETLIST_ID_BLK_SIZE 0x30
+#define IXGBE_NETLIST_ID_BLK_OFFSET(n) IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
+
+/* netlist ID block field offsets (word offsets) */
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
+#define IXGBE_NETLIST_ID_BLK_TYPE_LOW 0x06
+#define IXGBE_NETLIST_ID_BLK_TYPE_HIGH 0x07
+#define IXGBE_NETLIST_ID_BLK_REV_LOW 0x08
+#define IXGBE_NETLIST_ID_BLK_REV_HIGH 0x09
+#define IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
+#define IXGBE_NETLIST_ID_BLK_CUST_VER 0x2F
+
+/* The Link Topology Netlist section is stored as a series of words. It is
+ * stored in the NVM as a TLV, with the first two words containing the type
+ * and length.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_MOD_ID 0x011B
+#define IXGBE_NETLIST_TYPE_OFFSET 0x0000
+#define IXGBE_NETLIST_LEN_OFFSET 0x0001
+
+/* The Link Topology section follows the TLV header. When reading the netlist
+ * using ixgbe_read_netlist_module, we need to account for the 2-word TLV
+ * header.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
+#define IXGBE_LINK_TOPO_MODULE_LEN IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0000)
+#define IXGBE_LINK_TOPO_NODE_COUNT IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0001)
+#define IXGBE_LINK_TOPO_NODE_COUNT_M MAKEMASK(0x3FF, 0)
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define IXGBE_SR_CTRL_WORD_1_S 0x06
+#define IXGBE_SR_CTRL_WORD_1_M (0x03 << IXGBE_SR_CTRL_WORD_1_S)
+#define IXGBE_SR_CTRL_WORD_VALID 0x1
+#define IXGBE_SR_CTRL_WORD_OROM_BANK BIT(3)
+#define IXGBE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
+#define IXGBE_SR_CTRL_WORD_NVM_BANK BIT(5)
+#define IXGBE_SR_NVM_PTR_4KB_UNITS BIT(15)
+
+/* These macros strip from NVM Image Revision the particular part of NVM ver:
+ major ver, minor ver and image id */
+#define E610_NVM_MAJOR_VER(x) ((x & 0xF000) >> 12)
+#define E610_NVM_MINOR_VER(x) (x & 0x00FF)
+
+/* Shadow RAM related */
+#define IXGBE_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define IXGBE_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define IXGBE_SR_SW_CHECKSUM_BASE 0xBABA
+
+/* Netlist */
+#define IXGBE_MAX_NETLIST_SIZE 10
+
+/* General registers */
+
+/* Firmware Status Register (GL_FWSTS) */
+#define GL_FWSTS 0x00083048 /* Reset Source: POR */
+#define GL_FWSTS_FWS0B_S 0
+#define GL_FWSTS_FWS0B_M MAKEMASK(0xFF, 0)
+#define GL_FWSTS_FWROWD_S 8
+#define GL_FWSTS_FWROWD_M BIT(8)
+#define GL_FWSTS_FWRI_S 9
+#define GL_FWSTS_FWRI_M BIT(9)
+#define GL_FWSTS_FWS1B_S 16
+#define GL_FWSTS_FWS1B_M MAKEMASK(0xFF, 16)
+#define GL_FWSTS_EP_PF0 BIT(24)
+#define GL_FWSTS_EP_PF1 BIT(25)
+
+/* Recovery mode values of Firmware Status 1 Byte (FWS1B) bitfield */
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_CORER_LEGACY 0x0B
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_GLOBR_LEGACY 0x0C
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_CORER 0x30
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_GLOBR 0x31
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_TRANSITION 0x32
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_NVM 0x33
+
+/* Firmware Status (GL_MNG_FWSM) */
+#define GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
+#define GL_MNG_FWSM_FW_MODES_S 0
+#define GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x7, 0)
+#define GL_MNG_FWSM_RSV0_S 2
+#define GL_MNG_FWSM_RSV0_M MAKEMASK(0xFF, 2)
+#define GL_MNG_FWSM_EEP_RELOAD_IND_S 10
+#define GL_MNG_FWSM_EEP_RELOAD_IND_M BIT(10)
+#define GL_MNG_FWSM_RSV1_S 11
+#define GL_MNG_FWSM_RSV1_M MAKEMASK(0xF, 11)
+#define GL_MNG_FWSM_RSV2_S 15
+#define GL_MNG_FWSM_RSV2_M BIT(15)
+#define GL_MNG_FWSM_PCIR_AL_FAILURE_S 16
+#define GL_MNG_FWSM_PCIR_AL_FAILURE_M BIT(16)
+#define GL_MNG_FWSM_POR_AL_FAILURE_S 17
+#define GL_MNG_FWSM_POR_AL_FAILURE_M BIT(17)
+#define GL_MNG_FWSM_RSV3_S 18
+#define GL_MNG_FWSM_RSV3_M BIT(18)
+#define GL_MNG_FWSM_EXT_ERR_IND_S 19
+#define GL_MNG_FWSM_EXT_ERR_IND_M MAKEMASK(0x3F, 19)
+#define GL_MNG_FWSM_RSV4_S 25
+#define GL_MNG_FWSM_RSV4_M BIT(25)
+#define GL_MNG_FWSM_RESERVED_11_S 26
+#define GL_MNG_FWSM_RESERVED_11_M MAKEMASK(0xF, 26)
+#define GL_MNG_FWSM_RSV5_S 30
+#define GL_MNG_FWSM_RSV5_M MAKEMASK(0x3, 30)
+
+/* FW mode indications */
+#define GL_MNG_FWSM_FW_MODES_DEBUG_M BIT(0)
+#define GL_MNG_FWSM_FW_MODES_RECOVERY_M BIT(1)
+#define GL_MNG_FWSM_FW_MODES_ROLLBACK_M BIT(2)
+
+/* Global NVM General Status Register */
+#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */
+#define GLNVM_GENS_NVM_PRES_S 0
+#define GLNVM_GENS_NVM_PRES_M BIT(0)
+#define GLNVM_GENS_SR_SIZE_S 5
+#define GLNVM_GENS_SR_SIZE_M MAKEMASK(0x7, 5)
+#define GLNVM_GENS_BANK1VAL_S 8
+#define GLNVM_GENS_BANK1VAL_M BIT(8)
+#define GLNVM_GENS_ALT_PRST_S 23
+#define GLNVM_GENS_ALT_PRST_M BIT(23)
+#define GLNVM_GENS_FL_AUTO_RD_S 25
+#define GLNVM_GENS_FL_AUTO_RD_M BIT(25)
+
+/* Flash Access Register */
+#define GLNVM_FLA 0x000B6108 /* Reset Source: POR */
+#define GLNVM_FLA_LOCKED_S 6
+#define GLNVM_FLA_LOCKED_M BIT(6)
+
+/* Bit Bang registers */
+#define RDASB_MSGCTL 0x000B6820
+#define RDASB_MSGCTL_HDR_DWS_S 0
+#define RDASB_MSGCTL_EXP_RDW_S 8
+#define RDASB_MSGCTL_CMDV_M BIT(31)
+#define RDASB_RSPCTL 0x000B6824
+#define RDASB_RSPCTL_BAD_LENGTH_M BIT(30)
+#define RDASB_RSPCTL_NOT_SUCCESS_M BIT(31)
+#define RDASB_WHDR0 0x000B68F4
+#define RDASB_WHDR1 0x000B68F8
+#define RDASB_WHDR2 0x000B68FC
+#define RDASB_WHDR3 0x000B6900
+#define RDASB_WHDR4 0x000B6904
+#define RDASB_RHDR0 0x000B6AFC
+#define RDASB_RHDR0_RESPONSE_S 27
+#define RDASB_RHDR0_RESPONSE_M MAKEMASK(0x7, 27)
+#define RDASB_RDATA0 0x000B6B00
+#define RDASB_RDATA1 0x000B6B04
+
+/* SPI Registers */
+#define SPISB_MSGCTL 0x000B7020
+#define SPISB_MSGCTL_HDR_DWS_S 0
+#define SPISB_MSGCTL_EXP_RDW_S 8
+#define SPISB_MSGCTL_MSG_MODE_S 26
+#define SPISB_MSGCTL_TOKEN_MODE_S 28
+#define SPISB_MSGCTL_BARCLR_S 30
+#define SPISB_MSGCTL_CMDV_S 31
+#define SPISB_MSGCTL_CMDV_M BIT(31)
+#define SPISB_RSPCTL 0x000B7024
+#define SPISB_RSPCTL_BAD_LENGTH_M BIT(30)
+#define SPISB_RSPCTL_NOT_SUCCESS_M BIT(31)
+#define SPISB_WHDR0 0x000B70F4
+#define SPISB_WHDR0_DEST_SEL_S 12
+#define SPISB_WHDR0_OPCODE_SEL_S 16
+#define SPISB_WHDR0_TAG_S 24
+#define SPISB_WHDR1 0x000B70F8
+#define SPISB_WHDR2 0x000B70FC
+#define SPISB_RDATA 0x000B7300
+#define SPISB_WDATA 0x000B7100
+
+/* Firmware Reset Count register */
+#define GL_FWRESETCNT 0x00083100 /* Reset Source: POR */
+#define GL_FWRESETCNT_FWRESETCNT_S 0
+#define GL_FWRESETCNT_FWRESETCNT_M MAKEMASK(0xFFFFFFFF, 0)
+
+/* Admin Command Interface (ACI) registers */
+#define PF_HIDA(_i) (0x00085000 + ((_i) * 4))
+#define PF_HIDA_2(_i) (0x00085020 + ((_i) * 4))
+#define PF_HIBA(_i) (0x00084000 + ((_i) * 4))
+#define PF_HICR 0x00082048
+
+#define PF_HIDA_MAX_INDEX 15
+#define PF_HIBA_MAX_INDEX 1023
+
+#define PF_HICR_EN BIT(0)
+#define PF_HICR_C BIT(1)
+#define PF_HICR_SV BIT(2)
+#define PF_HICR_EV BIT(3)
+
+#define GL_HIDA(_i) (0x00082000 + ((_i) * 4))
+#define GL_HIDA_2(_i) (0x00082020 + ((_i) * 4))
+#define GL_HIBA(_i) (0x00081000 + ((_i) * 4))
+#define GL_HICR 0x00082040
+
+#define GL_HIDA_MAX_INDEX 15
+#define GL_HIBA_MAX_INDEX 1023
+
+#define GL_HICR_C BIT(1)
+#define GL_HICR_SV BIT(2)
+#define GL_HICR_EV BIT(3)
+
+#define GL_HICR_EN 0x00082044
+
+#define GL_HICR_EN_CHECK BIT(0)
+
+/* Admin Command Interface (ACI) defines */
+/* Defines that help manage the driver vs FW API checks.
+ */
+#define IXGBE_FW_API_VER_BRANCH 0x00
+#define IXGBE_FW_API_VER_MAJOR 0x01
+#define IXGBE_FW_API_VER_MINOR 0x07
+#define IXGBE_FW_API_VER_DIFF_ALLOWED 0x02
+
+#define IXGBE_ACI_DESC_SIZE 32
+#define IXGBE_ACI_DESC_SIZE_IN_DWORDS IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD
+
+#define IXGBE_ACI_MAX_BUFFER_SIZE 4096 /* Size in bytes */
+#define IXGBE_ACI_DESC_COOKIE_L_DWORD_OFFSET 3
+#define IXGBE_ACI_SEND_DELAY_TIME_MS 10
+#define IXGBE_ACI_SEND_MAX_EXECUTE 3
+/* [ms] timeout of waiting for sync response */
+#define IXGBE_ACI_SYNC_RESPONSE_TIMEOUT 100000
+/* [ms] timeout of waiting for async response */
+#define IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT 150000
+/* [ms] timeout of waiting for resource release */
+#define IXGBE_ACI_RELEASE_RES_TIMEOUT 10000
+
+/* Timestamp spacing for Tools ACI: queue is active if spacing is within the range [LO..HI] */
+#define IXGBE_TOOLS_ACI_ACTIVE_STAMP_SPACING_LO 0
+#define IXGBE_TOOLS_ACI_ACTIVE_STAMP_SPACING_HI 200
+
+/* Timestamp spacing for Tools ACI: queue is expired if spacing is outside the range [LO..HI] */
+#define IXGBE_TOOLS_ACI_EXPIRED_STAMP_SPACING_LO -5
+#define IXGBE_TOOLS_ACI_EXPIRED_STAMP_SPACING_HI 205
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define IXGBE_ACI_LG_BUF 512
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets */
+#define IXGBE_ACI_FLAG_DD_S 0
+#define IXGBE_ACI_FLAG_CMP_S 1
+#define IXGBE_ACI_FLAG_ERR_S 2
+#define IXGBE_ACI_FLAG_VFE_S 3
+#define IXGBE_ACI_FLAG_LB_S 9
+#define IXGBE_ACI_FLAG_RD_S 10
+#define IXGBE_ACI_FLAG_VFC_S 11
+#define IXGBE_ACI_FLAG_BUF_S 12
+#define IXGBE_ACI_FLAG_SI_S 13
+#define IXGBE_ACI_FLAG_EI_S 14
+#define IXGBE_ACI_FLAG_FE_S 15
+
+#define IXGBE_ACI_FLAG_DD BIT(IXGBE_ACI_FLAG_DD_S) /* 0x1 */
+#define IXGBE_ACI_FLAG_CMP BIT(IXGBE_ACI_FLAG_CMP_S) /* 0x2 */
+#define IXGBE_ACI_FLAG_ERR BIT(IXGBE_ACI_FLAG_ERR_S) /* 0x4 */
+#define IXGBE_ACI_FLAG_VFE BIT(IXGBE_ACI_FLAG_VFE_S) /* 0x8 */
+#define IXGBE_ACI_FLAG_LB BIT(IXGBE_ACI_FLAG_LB_S) /* 0x200 */
+#define IXGBE_ACI_FLAG_RD BIT(IXGBE_ACI_FLAG_RD_S) /* 0x400 */
+#define IXGBE_ACI_FLAG_VFC BIT(IXGBE_ACI_FLAG_VFC_S) /* 0x800 */
+#define IXGBE_ACI_FLAG_BUF BIT(IXGBE_ACI_FLAG_BUF_S) /* 0x1000 */
+#define IXGBE_ACI_FLAG_SI BIT(IXGBE_ACI_FLAG_SI_S) /* 0x2000 */
+#define IXGBE_ACI_FLAG_EI BIT(IXGBE_ACI_FLAG_EI_S) /* 0x4000 */
+#define IXGBE_ACI_FLAG_FE BIT(IXGBE_ACI_FLAG_FE_S) /* 0x8000 */
+
+/* Admin Command Interface (ACI) error codes */
+enum ixgbe_aci_err {
+ IXGBE_ACI_RC_OK = 0, /* Success */
+ IXGBE_ACI_RC_EPERM = 1, /* Operation not permitted */
+ IXGBE_ACI_RC_ENOENT = 2, /* No such element */
+ IXGBE_ACI_RC_ESRCH = 3, /* Bad opcode */
+ IXGBE_ACI_RC_EINTR = 4, /* Operation interrupted */
+ IXGBE_ACI_RC_EIO = 5, /* I/O error */
+ IXGBE_ACI_RC_ENXIO = 6, /* No such resource */
+ IXGBE_ACI_RC_E2BIG = 7, /* Arg too long */
+ IXGBE_ACI_RC_EAGAIN = 8, /* Try again */
+ IXGBE_ACI_RC_ENOMEM = 9, /* Out of memory */
+ IXGBE_ACI_RC_EACCES = 10, /* Permission denied */
+ IXGBE_ACI_RC_EFAULT = 11, /* Bad address */
+ IXGBE_ACI_RC_EBUSY = 12, /* Device or resource busy */
+ IXGBE_ACI_RC_EEXIST = 13, /* Object already exists */
+ IXGBE_ACI_RC_EINVAL = 14, /* Invalid argument */
+ IXGBE_ACI_RC_ENOTTY = 15, /* Not a typewriter */
+ IXGBE_ACI_RC_ENOSPC = 16, /* No space left or allocation failure */
+ IXGBE_ACI_RC_ENOSYS = 17, /* Function not implemented */
+ IXGBE_ACI_RC_ERANGE = 18, /* Parameter out of range */
+ IXGBE_ACI_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ IXGBE_ACI_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ IXGBE_ACI_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ IXGBE_ACI_RC_EFBIG = 22, /* File too big */
+ IXGBE_ACI_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */
+ IXGBE_ACI_RC_ENOSEC = 24, /* Missing security manifest */
+ IXGBE_ACI_RC_EBADSIG = 25, /* Bad RSA signature */
+ IXGBE_ACI_RC_ESVN = 26, /* SVN number prohibits this package */
+ IXGBE_ACI_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ IXGBE_ACI_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+ IXGBE_ACI_RC_EACCES_BMCU = 29, /* BMC Update in progress */
+};
+
+/* Admin Command Interface (ACI) opcodes */
+enum ixgbe_aci_opc {
+ ixgbe_aci_opc_get_ver = 0x0001,
+ ixgbe_aci_opc_driver_ver = 0x0002,
+ ixgbe_aci_opc_get_exp_err = 0x0005,
+
+ /* resource ownership */
+ ixgbe_aci_opc_req_res = 0x0008,
+ ixgbe_aci_opc_release_res = 0x0009,
+
+ /* device/function capabilities */
+ ixgbe_aci_opc_list_func_caps = 0x000A,
+ ixgbe_aci_opc_list_dev_caps = 0x000B,
+
+ /* safe disable of RXEN */
+ ixgbe_aci_opc_disable_rxen = 0x000C,
+
+ /* FW events */
+ ixgbe_aci_opc_get_fw_event = 0x0014,
+
+ /* PHY commands */
+ ixgbe_aci_opc_get_phy_caps = 0x0600,
+ ixgbe_aci_opc_set_phy_cfg = 0x0601,
+ ixgbe_aci_opc_restart_an = 0x0605,
+ ixgbe_aci_opc_get_link_status = 0x0607,
+ ixgbe_aci_opc_set_event_mask = 0x0613,
+ ixgbe_aci_opc_get_link_topo = 0x06E0,
+ ixgbe_aci_opc_read_i2c = 0x06E2,
+ ixgbe_aci_opc_write_i2c = 0x06E3,
+ ixgbe_aci_opc_read_mdio = 0x06E4,
+ ixgbe_aci_opc_write_mdio = 0x06E5,
+ ixgbe_aci_opc_set_gpio_by_func = 0x06E6,
+ ixgbe_aci_opc_get_gpio_by_func = 0x06E7,
+ ixgbe_aci_opc_set_port_id_led = 0x06E9,
+ ixgbe_aci_opc_set_gpio = 0x06EC,
+ ixgbe_aci_opc_get_gpio = 0x06ED,
+ ixgbe_aci_opc_sff_eeprom = 0x06EE,
+ ixgbe_aci_opc_prog_topo_dev_nvm = 0x06F2,
+ ixgbe_aci_opc_read_topo_dev_nvm = 0x06F3,
+
+ /* NVM commands */
+ ixgbe_aci_opc_nvm_read = 0x0701,
+ ixgbe_aci_opc_nvm_erase = 0x0702,
+ ixgbe_aci_opc_nvm_write = 0x0703,
+ ixgbe_aci_opc_nvm_cfg_read = 0x0704,
+ ixgbe_aci_opc_nvm_cfg_write = 0x0705,
+ ixgbe_aci_opc_nvm_checksum = 0x0706,
+ ixgbe_aci_opc_nvm_write_activate = 0x0707,
+ ixgbe_aci_opc_nvm_sr_dump = 0x0707,
+ ixgbe_aci_opc_nvm_save_factory_settings = 0x0708,
+ ixgbe_aci_opc_nvm_update_empr = 0x0709,
+ ixgbe_aci_opc_nvm_pkg_data = 0x070A,
+ ixgbe_aci_opc_nvm_pass_component_tbl = 0x070B,
+ ixgbe_aci_opc_nvm_sanitization = 0x070C,
+
+ /* Alternate Structure Commands */
+ ixgbe_aci_opc_write_alt_direct = 0x0900,
+ ixgbe_aci_opc_write_alt_indirect = 0x0901,
+ ixgbe_aci_opc_read_alt_direct = 0x0902,
+ ixgbe_aci_opc_read_alt_indirect = 0x0903,
+ ixgbe_aci_opc_done_alt_write = 0x0904,
+ ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+
+ ixgbe_aci_opc_temp_tca_event = 0x0C94,
+
+ /* debug commands */
+ ixgbe_aci_opc_debug_dump_internals = 0xFF08,
+
+ /* SystemDiagnostic commands */
+ ixgbe_aci_opc_set_health_status_config = 0xFF20,
+ ixgbe_aci_opc_get_supported_health_status_codes = 0xFF21,
+ ixgbe_aci_opc_get_health_status = 0xFF22,
+ ixgbe_aci_opc_clear_health_status = 0xFF23,
+
+ /* FW Logging Commands */
+ ixgbe_aci_opc_fw_logs_config = 0xFF30,
+ ixgbe_aci_opc_fw_logs_register = 0xFF31,
+ ixgbe_aci_opc_fw_logs_query = 0xFF32,
+ ixgbe_aci_opc_fw_logs_event = 0xFF33,
+ ixgbe_aci_opc_fw_logs_get = 0xFF34,
+ ixgbe_aci_opc_fw_logs_clear = 0xFF35
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define IXGBE_CHECK_STRUCT_LEN(n, X) enum ixgbe_static_assert_enum_##X \
+ { ixgbe_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used to generate a compilation error if a variable-length
+ * structure is not exactly the correct length assuming a single element of
+ * the variable-length object as the last element of the structure. It gives
+ * a divide by zero error if the structure is not of the correct size,
+ * otherwise it creates an enum that is never used.
+ */
+#define IXGBE_CHECK_VAR_LEN_STRUCT_LEN(n, X, T) enum ixgbe_static_assert_enum_##X \
+ { ixgbe_static_assert_##X = (n) / \
+ (((sizeof(struct X) + sizeof(T)) == (n)) ? 1 : 0) }
+
+/* This macro is used to ensure that parameter structures (i.e. structures
+ * in the params union member of struct ixgbe_aci_desc) are 16 bytes in length.
+ *
+ * NOT intended to be used to check the size of an indirect command/response
+ * additional data buffer (e.g. struct foo) which should just happen to be 16
+ * bytes (instead, use IXGBE_CHECK_STRUCT_LEN(16, foo) for that).
+ */
+#define IXGBE_CHECK_PARAM_LEN(X) IXGBE_CHECK_STRUCT_LEN(16, X)
+
+struct ixgbe_aci_cmd_generic {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_generic);
+
+/* Get version (direct 0x0001) */
+struct ixgbe_aci_cmd_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_ver);
+
+#define IXGBE_DRV_VER_STR_LEN_E610 32
+
+struct ixgbe_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 driver_string[IXGBE_DRV_VER_STR_LEN_E610];
+};
+
+/* Send driver version (indirect 0x0002) */
+struct ixgbe_aci_cmd_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_driver_ver);
+
+/* Get Expanded Error Code (0x0005, direct) */
+struct ixgbe_aci_cmd_get_exp_err {
+ __le32 reason;
+#define IXGBE_ACI_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF
+ __le32 identifier;
+ u8 rsvd[8];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_exp_err);
+
+/* FW update timeout definitions are in milliseconds */
+#define IXGBE_NVM_TIMEOUT 180000
+#define IXGBE_CHANGE_LOCK_TIMEOUT 1000
+#define IXGBE_GLOBAL_CFG_LOCK_TIMEOUT 3000
+
+enum ixgbe_aci_res_access_type {
+ IXGBE_RES_READ = 1,
+ IXGBE_RES_WRITE
+};
+
+enum ixgbe_aci_res_ids {
+ IXGBE_NVM_RES_ID = 1,
+ IXGBE_SPD_RES_ID,
+ IXGBE_CHANGE_LOCK_RES_ID,
+ IXGBE_GLOBAL_CFG_LOCK_RES_ID
+};
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+struct ixgbe_aci_cmd_req_res {
+ __le16 res_id;
+#define IXGBE_ACI_RES_ID_NVM 1
+#define IXGBE_ACI_RES_ID_SDP 2
+#define IXGBE_ACI_RES_ID_CHNG_LOCK 3
+#define IXGBE_ACI_RES_ID_GLBL_LOCK 4
+ __le16 access_type;
+#define IXGBE_ACI_RES_ACCESS_READ 1
+#define IXGBE_ACI_RES_ACCESS_WRITE 2
+
+ /* Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided
+ * in milliseconds.
+ */
+ __le32 timeout;
+#define IXGBE_ACI_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define IXGBE_ACI_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define IXGBE_ACI_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define IXGBE_ACI_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+ /* For SDP: pin ID of the SDP */
+ __le32 res_number;
+ /* Status is only used for IXGBE_ACI_RES_ID_GLBL_LOCK */
+ __le16 status;
+#define IXGBE_ACI_RES_GLBL_SUCCESS 0
+#define IXGBE_ACI_RES_GLBL_IN_PROG 1
+#define IXGBE_ACI_RES_GLBL_DONE 2
+ u8 reserved[2];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_req_res);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct ixgbe_aci_cmd_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_list_caps);
+
+/* Device/Function buffer entry, repeated per reported capability */
+struct ixgbe_aci_cmd_list_caps_elem {
+ __le16 cap;
+#define IXGBE_ACI_CAPS_VALID_FUNCTIONS 0x0005
+#define IXGBE_ACI_MAX_VALID_FUNCTIONS 0x8
+#define IXGBE_ACI_CAPS_SRIOV 0x0012
+#define IXGBE_ACI_CAPS_VF 0x0013
+#define IXGBE_ACI_CAPS_VMDQ 0x0014
+#define IXGBE_ACI_CAPS_VSI 0x0017
+#define IXGBE_ACI_CAPS_DCB 0x0018
+#define IXGBE_ACI_CAPS_RSS 0x0040
+#define IXGBE_ACI_CAPS_RXQS 0x0041
+#define IXGBE_ACI_CAPS_TXQS 0x0042
+#define IXGBE_ACI_CAPS_MSIX 0x0043
+#define IXGBE_ACI_CAPS_FD 0x0045
+#define IXGBE_ACI_CAPS_MAX_MTU 0x0047
+#define IXGBE_ACI_CAPS_NVM_VER 0x0048
+#define IXGBE_ACI_CAPS_INLINE_IPSEC 0x0070
+#define IXGBE_ACI_CAPS_NUM_ENABLED_PORTS 0x0072
+#define IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
+#define IXGBE_ACI_CAPS_NVM_MGMT 0x0080
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0 0x0081
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1 0x0082
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2 0x0083
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3 0x0084
+#define IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE 0x0090
+#define IXGBE_ACI_CAPS_NEXT_CLUSTER_ID 0x0096
+ u8 major_ver;
+ u8 minor_ver;
+ /* Number of resources described by this capability */
+ __le32 number;
+ /* Only meaningful for some types of resources */
+ __le32 logical_id;
+ /* Only meaningful for some types of resources */
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+IXGBE_CHECK_STRUCT_LEN(32, ixgbe_aci_cmd_list_caps_elem);
+
+/* Disable RXEN (direct 0x000C) */
+struct ixgbe_aci_cmd_disable_rxen {
+ u8 lport_num;
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_disable_rxen);
+
+/* Get FW Event (indirect 0x0014) */
+struct ixgbe_aci_cmd_get_fw_event {
+ __le16 fw_buf_status;
+#define IXGBE_ACI_GET_FW_EVENT_STATUS_OBTAINED BIT(0)
+#define IXGBE_ACI_GET_FW_EVENT_STATUS_PENDING BIT(1)
+ u8 rsvd[14];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_fw_event);
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ixgbe_aci_cmd_get_phy_caps {
+ u8 lport_num;
+ u8 reserved;
+ __le16 param0;
+ /* 18.0 - Report qualified modules */
+#define IXGBE_ACI_GET_PHY_RQM BIT(0)
+ /* 18.1 - 18.3 : Report mode
+ * 000b - Report topology capabilities, without media
+ * 001b - Report topology capabilities, with media
+ * 010b - Report Active configuration
+ * 011b - Report PHY Type and FEC mode capabilities
+ * 100b - Report Default capabilities
+ */
+#define IXGBE_ACI_REPORT_MODE_S 1
+#define IXGBE_ACI_REPORT_MODE_M (7 << IXGBE_ACI_REPORT_MODE_S)
+#define IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA 0
+#define IXGBE_ACI_REPORT_TOPO_CAP_MEDIA BIT(1)
+#define IXGBE_ACI_REPORT_ACTIVE_CFG BIT(2)
+#define IXGBE_ACI_REPORT_DFLT_CFG BIT(3)
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_phy_caps);
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define IXGBE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0)
+#define IXGBE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5)
+#define IXGBE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18)
+#define IXGBE_PHY_TYPE_LOW_MAX_INDEX 18
+/* The second set of defines is for phy_type_high. */
+#define IXGBE_PHY_TYPE_HIGH_10BASE_T BIT_ULL(1)
+#define IXGBE_PHY_TYPE_HIGH_10M_SGMII BIT_ULL(2)
+#define IXGBE_PHY_TYPE_HIGH_2500M_SGMII BIT_ULL(56)
+#define IXGBE_PHY_TYPE_HIGH_100M_USXGMII BIT_ULL(57)
+#define IXGBE_PHY_TYPE_HIGH_1G_USXGMII BIT_ULL(58)
+#define IXGBE_PHY_TYPE_HIGH_2500M_USXGMII BIT_ULL(59)
+#define IXGBE_PHY_TYPE_HIGH_5G_USXGMII BIT_ULL(60)
+#define IXGBE_PHY_TYPE_HIGH_10G_USXGMII BIT_ULL(61)
+#define IXGBE_PHY_TYPE_HIGH_MAX_INDEX 61
+
+struct ixgbe_aci_cmd_get_phy_caps_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_EN_TX_LINK_PAUSE BIT(0)
+#define IXGBE_ACI_PHY_EN_RX_LINK_PAUSE BIT(1)
+#define IXGBE_ACI_PHY_LOW_POWER_MODE BIT(2)
+#define IXGBE_ACI_PHY_EN_LINK BIT(3)
+#define IXGBE_ACI_PHY_AN_MODE BIT(4)
+#define IXGBE_ACI_PHY_EN_MOD_QUAL BIT(5)
+#define IXGBE_ACI_PHY_EN_LESM BIT(6)
+#define IXGBE_ACI_PHY_EN_AUTO_FEC BIT(7)
+#define IXGBE_ACI_PHY_CAPS_MASK MAKEMASK(0xff, 0)
+ u8 low_power_ctrl_an;
+#define IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE28 BIT(1)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE73 BIT(2)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE37 BIT(3)
+ __le16 eee_cap;
+#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX BIT(0)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T BIT(1)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T BIT(2)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX BIT(3)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR BIT(4)
+#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR BIT(5)
+#define IXGBE_ACI_PHY_EEE_EN_10BASE_T BIT(11)
+ __le16 eeer_value;
+ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
+ u8 link_fec_options;
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
+#define IXGBE_ACI_PHY_FEC_25G_RS_528_REQ BIT(2)
+#define IXGBE_ACI_PHY_FEC_25G_KR_REQ BIT(3)
+#define IXGBE_ACI_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define IXGBE_ACI_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
+#define IXGBE_ACI_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define IXGBE_ACI_PHY_FEC_MASK MAKEMASK(0xdf, 0)
+ u8 module_compliance_enforcement;
+#define IXGBE_ACI_MOD_ENFORCE_STRICT_MODE BIT(0)
+ u8 extended_compliance_code;
+#define IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE 3
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+#define IXGBE_ACI_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define IXGBE_ACI_MOD_TYPE_IDENT 1
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7)
+#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
+ u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
+#define IXGBE_ACI_QUAL_MOD_COUNT_MAX 16
+ struct {
+ u8 v_oui[3];
+ u8 rsvd3;
+ u8 v_part[16];
+ __le32 v_rev;
+ __le64 rsvd4;
+ } qual_modules[IXGBE_ACI_QUAL_MOD_COUNT_MAX];
+};
+
+IXGBE_CHECK_STRUCT_LEN(560, ixgbe_aci_cmd_get_phy_caps_data);
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ixgbe_aci_cmd_set_phy_cfg {
+ u8 reserved[8];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_phy_cfg);
+
+/* Set PHY config command data structure */
+struct ixgbe_aci_cmd_set_phy_cfg_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_ENA_VALID_MASK MAKEMASK(0xef, 0)
+#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define IXGBE_ACI_PHY_ENA_LOW_POWER BIT(2)
+#define IXGBE_ACI_PHY_ENA_LINK BIT(3)
+#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define IXGBE_ACI_PHY_ENA_LESM BIT(6)
+#define IXGBE_ACI_PHY_ENA_AUTO_FEC BIT(7)
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
+ __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 module_compliance_enforcement;
+};
+
+IXGBE_CHECK_STRUCT_LEN(24, ixgbe_aci_cmd_set_phy_cfg_data);
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ixgbe_aci_cmd_restart_an {
+ u8 reserved[2];
+ u8 cmd_flags;
+#define IXGBE_ACI_RESTART_AN_LINK_RESTART BIT(1)
+#define IXGBE_ACI_RESTART_AN_LINK_ENABLE BIT(2)
+ u8 reserved2[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_restart_an);
+
+#pragma pack(1)
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status {
+ u8 reserved[2];
+ u8 cmd_flags;
+#define IXGBE_ACI_LSE_M 0x3
+#define IXGBE_ACI_LSE_NOP 0x0
+#define IXGBE_ACI_LSE_DIS 0x2
+#define IXGBE_ACI_LSE_ENA 0x3
+ /* only response uses this flag */
+#define IXGBE_ACI_LSE_IS_ENABLED 0x1
+ u8 reserved2[5];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_link_status);
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status_data {
+ u8 topo_media_conflict;
+#define IXGBE_ACI_LINK_TOPO_CONFLICT BIT(0)
+#define IXGBE_ACI_LINK_MEDIA_CONFLICT BIT(1)
+#define IXGBE_ACI_LINK_TOPO_CORRUPT BIT(2)
+#define IXGBE_ACI_LINK_TOPO_UNREACH_PRT BIT(4)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define IXGBE_ACI_LINK_TOPO_UNSUPP_MEDIA BIT(7)
+ u8 link_cfg_err;
+#define IXGBE_ACI_LINK_CFG_ERR BIT(0)
+#define IXGBE_ACI_LINK_CFG_COMPLETED BIT(1)
+#define IXGBE_ACI_LINK_ACT_PORT_OPT_INVAL BIT(2)
+#define IXGBE_ACI_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
+#define IXGBE_ACI_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
+#define IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
+#define IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
+ u8 link_info;
+#define IXGBE_ACI_LINK_UP BIT(0) /* Link Status */
+#define IXGBE_ACI_LINK_FAULT BIT(1)
+#define IXGBE_ACI_LINK_FAULT_TX BIT(2)
+#define IXGBE_ACI_LINK_FAULT_RX BIT(3)
+#define IXGBE_ACI_LINK_FAULT_REMOTE BIT(4)
+#define IXGBE_ACI_LINK_UP_PORT BIT(5) /* External Port Link Status */
+#define IXGBE_ACI_MEDIA_AVAILABLE BIT(6)
+#define IXGBE_ACI_SIGNAL_DETECT BIT(7)
+ u8 an_info;
+#define IXGBE_ACI_AN_COMPLETED BIT(0)
+#define IXGBE_ACI_LP_AN_ABILITY BIT(1)
+#define IXGBE_ACI_PD_FAULT BIT(2) /* Parallel Detection Fault */
+#define IXGBE_ACI_FEC_EN BIT(3)
+#define IXGBE_ACI_PHY_LOW_POWER BIT(4) /* Low Power State */
+#define IXGBE_ACI_LINK_PAUSE_TX BIT(5)
+#define IXGBE_ACI_LINK_PAUSE_RX BIT(6)
+#define IXGBE_ACI_QUALIFIED_MODULE BIT(7)
+ u8 ext_info;
+#define IXGBE_ACI_LINK_PHY_TEMP_ALARM BIT(0)
+#define IXGBE_ACI_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
+ /* Port Tx Suspended */
+#define IXGBE_ACI_LINK_TX_S 2
+#define IXGBE_ACI_LINK_TX_M (0x03 << IXGBE_ACI_LINK_TX_S)
+#define IXGBE_ACI_LINK_TX_ACTIVE 0
+#define IXGBE_ACI_LINK_TX_DRAINED 1
+#define IXGBE_ACI_LINK_TX_FLUSHED 3
+ u8 lb_status;
+#define IXGBE_ACI_LINK_LB_PHY_LCL BIT(0)
+#define IXGBE_ACI_LINK_LB_PHY_RMT BIT(1)
+#define IXGBE_ACI_LINK_LB_MAC_LCL BIT(2)
+#define IXGBE_ACI_LINK_LB_PHY_IDX_S 3
+#define IXGBE_ACI_LINK_LB_PHY_IDX_M (0x7 << IXGBE_ACI_LB_PHY_IDX_S)
+ __le16 max_frame_size;
+ u8 cfg;
+#define IXGBE_ACI_LINK_25G_KR_FEC_EN BIT(0)
+#define IXGBE_ACI_LINK_25G_RS_528_FEC_EN BIT(1)
+#define IXGBE_ACI_LINK_25G_RS_544_FEC_EN BIT(2)
+#define IXGBE_ACI_FEC_MASK MAKEMASK(0x7, 0)
+ /* Pacing Config */
+#define IXGBE_ACI_CFG_PACING_S 3
+#define IXGBE_ACI_CFG_PACING_M (0xF << IXGBE_ACI_CFG_PACING_S)
+#define IXGBE_ACI_CFG_PACING_TYPE_M BIT(7)
+#define IXGBE_ACI_CFG_PACING_TYPE_AVG 0
+#define IXGBE_ACI_CFG_PACING_TYPE_FIXED IXGBE_ACI_CFG_PACING_TYPE_M
+ /* External Device Power Ability */
+ u8 power_desc;
+#define IXGBE_ACI_PWR_CLASS_M 0x3F
+#define IXGBE_ACI_LINK_PWR_BASET_LOW_HIGH 0
+#define IXGBE_ACI_LINK_PWR_BASET_HIGH 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_1 0
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_2 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_3 2
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_4 3
+ __le16 link_speed;
+#define IXGBE_ACI_LINK_SPEED_M 0x7FF
+#define IXGBE_ACI_LINK_SPEED_10MB BIT(0)
+#define IXGBE_ACI_LINK_SPEED_100MB BIT(1)
+#define IXGBE_ACI_LINK_SPEED_1000MB BIT(2)
+#define IXGBE_ACI_LINK_SPEED_2500MB BIT(3)
+#define IXGBE_ACI_LINK_SPEED_5GB BIT(4)
+#define IXGBE_ACI_LINK_SPEED_10GB BIT(5)
+#define IXGBE_ACI_LINK_SPEED_20GB BIT(6)
+#define IXGBE_ACI_LINK_SPEED_25GB BIT(7)
+#define IXGBE_ACI_LINK_SPEED_40GB BIT(8)
+#define IXGBE_ACI_LINK_SPEED_50GB BIT(9)
+#define IXGBE_ACI_LINK_SPEED_100GB BIT(10)
+#define IXGBE_ACI_LINK_SPEED_200GB BIT(11)
+#define IXGBE_ACI_LINK_SPEED_UNKNOWN BIT(15)
+ __le16 reserved3; /* Aligns next field to 8-byte boundary */
+ u8 ext_fec_status;
+#define IXGBE_ACI_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */
+ u8 reserved4;
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ /* Get link status version 2 link partner data */
+ __le64 lp_phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 lp_phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 lp_fec_adv;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define IXGBE_ACI_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define IXGBE_ACI_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define IXGBE_ACI_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define IXGBE_ACI_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define IXGBE_ACI_LINK_LP_PAUSE_ADV BIT(0)
+#define IXGBE_ACI_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved5[5];
+};
+#pragma pack()
+
+IXGBE_CHECK_STRUCT_LEN(56, ixgbe_aci_cmd_get_link_status_data);
+
+/* Set event mask command (direct 0x0613) */
+struct ixgbe_aci_cmd_set_event_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define IXGBE_ACI_LINK_EVENT_UPDOWN BIT(1)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_NA BIT(2)
+#define IXGBE_ACI_LINK_EVENT_LINK_FAULT BIT(3)
+#define IXGBE_ACI_LINK_EVENT_PHY_TEMP_ALARM BIT(4)
+#define IXGBE_ACI_LINK_EVENT_EXCESSIVE_ERRORS BIT(5)
+#define IXGBE_ACI_LINK_EVENT_SIGNAL_DETECT BIT(6)
+#define IXGBE_ACI_LINK_EVENT_AN_COMPLETED BIT(7)
+#define IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
+#define IXGBE_ACI_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define IXGBE_ACI_LINK_EVENT_TOPO_CONFLICT BIT(10)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_CONFLICT BIT(11)
+#define IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
+ u8 reserved1[6];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_event_mask);
+
+struct ixgbe_aci_cmd_link_topo_params {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_S 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_M (0xF << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S)
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_PHY 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED 4
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE 6
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPS 11
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_S 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_M \
+ (0xF << IXGBE_ACI_LINK_TOPO_NODE_CTX_S)
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_BOARD 1
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT 2
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE 3
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_DIRECT_BUS_ACCESS 5
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE_BUS_ADDRESS 6
+ u8 index;
+};
+
+IXGBE_CHECK_STRUCT_LEN(4, ixgbe_aci_cmd_link_topo_params);
+
+struct ixgbe_aci_cmd_link_topo_addr {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le16 handle;
+#define IXGBE_ACI_LINK_TOPO_HANDLE_S 0
+#define IXGBE_ACI_LINK_TOPO_HANDLE_M (0x3FF << IXGBE_ACI_LINK_TOPO_HANDLE_S)
+/* Used to decode the handle field */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+#define IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S 0
+/* In case of a Mezzanine type */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_NODE_M \
+ (0x3F << IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_S 6
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_M \
+ (0x7 << IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_S)
+/* In case of a LOM type */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_LOM_NODE_M \
+ (0x1FF << IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S)
+};
+
+IXGBE_CHECK_STRUCT_LEN(6, ixgbe_aci_cmd_link_topo_addr);
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ixgbe_aci_cmd_get_link_topo {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 node_part_num;
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_E610_PTC 0x49
+ u8 rsvd[9];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_link_topo);
+
+/* Read/Write I2C (direct, 0x06E2/0x06E3) */
+struct ixgbe_aci_cmd_i2c {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ __le16 i2c_addr;
+ u8 i2c_params;
+#define IXGBE_ACI_I2C_DATA_SIZE_S 0
+#define IXGBE_ACI_I2C_DATA_SIZE_M (0xF << IXGBE_ACI_I2C_DATA_SIZE_S)
+#define IXGBE_ACI_I2C_ADDR_TYPE_M BIT(4)
+#define IXGBE_ACI_I2C_ADDR_TYPE_7BIT 0
+#define IXGBE_ACI_I2C_ADDR_TYPE_10BIT IXGBE_ACI_I2C_ADDR_TYPE_M
+#define IXGBE_ACI_I2C_DATA_OFFSET_S 5
+#define IXGBE_ACI_I2C_DATA_OFFSET_M (0x3 << IXGBE_ACI_I2C_DATA_OFFSET_S)
+#define IXGBE_ACI_I2C_USE_REPEATED_START BIT(7)
+ u8 rsvd;
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_I2C_ADDR_7BIT_MASK 0x7F
+#define IXGBE_ACI_I2C_ADDR_10BIT_MASK 0x3FF
+ u8 i2c_data[4]; /* Used only by write command, reserved in read. */
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_i2c);
+
+/* Read I2C Response (direct, 0x06E2) */
+struct ixgbe_aci_cmd_read_i2c_resp {
+ u8 i2c_data[16];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_i2c_resp);
+
+/* Read/Write MDIO (direct, 0x06E4/0x06E5) */
+struct ixgbe_aci_cmd_mdio {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ u8 mdio_device_addr;
+#define IXGBE_ACI_MDIO_DEV_S 0
+#define IXGBE_ACI_MDIO_DEV_M (0x1F << IXGBE_ACI_MDIO_DEV_S)
+#define IXGBE_ACI_MDIO_CLAUSE_22 BIT(5)
+#define IXGBE_ACI_MDIO_CLAUSE_45 BIT(6)
+ u8 mdio_bus_address;
+#define IXGBE_ACI_MDIO_BUS_ADDR_S 0
+#define IXGBE_ACI_MDIO_BUS_ADDR_M (0x1F << IXGBE_ACI_MDIO_BUS_ADDR_S)
+ __le16 offset;
+ __le16 data; /* Input in write cmd, output in read cmd. */
+ u8 rsvd1[4];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_mdio);
+
+/* Set/Get GPIO By Function (direct, 0x06E6/0x06E7) */
+struct ixgbe_aci_cmd_gpio_by_func {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ u8 io_func_num;
+#define IXGBE_ACI_GPIO_FUNC_S 0
+#define IXGBE_ACI_GPIO_FUNC_M (0x1F << IXGBE_ACI_GPIO_IO_FUNC_NUM_S)
+ u8 io_value; /* Input in write cmd, output in read cmd. */
+#define IXGBE_ACI_GPIO_ON BIT(0)
+#define IXGBE_ACI_GPIO_OFF 0
+ u8 rsvd[8];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_gpio_by_func);
+
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ixgbe_aci_cmd_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_PORT_ID_PORT_NUM_VALID BIT(0)
+ u8 ident_mode;
+#define IXGBE_ACI_PORT_IDENT_LED_BLINK BIT(0)
+#define IXGBE_ACI_PORT_IDENT_LED_ORIG 0
+ u8 rsvd[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_port_id_led);
+
+/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
+struct ixgbe_aci_cmd_gpio {
+ __le16 gpio_ctrl_handle;
+#define IXGBE_ACI_GPIO_HANDLE_S 0
+#define IXGBE_ACI_GPIO_HANDLE_M (0x3FF << IXGBE_ACI_GPIO_HANDLE_S)
+ u8 gpio_num;
+ u8 gpio_val;
+ u8 rsvd[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_gpio);
+
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ixgbe_aci_cmd_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_SFF_I2CBUS_7BIT_M 0x7F
+#define IXGBE_ACI_SFF_I2CBUS_10BIT_M 0x3FF
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_M BIT(10)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_7BIT 0
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_10BIT IXGBE_ACI_SFF_I2CBUS_TYPE_M
+#define IXGBE_ACI_SFF_PAGE_BANK_CTRL_S 11
+#define IXGBE_ACI_SFF_PAGE_BANK_CTRL_M (0x3 << IXGBE_ACI_SFF_PAGE_BANK_CTRL_S)
+#define IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE 0
+#define IXGBE_ACI_SFF_UPDATE_PAGE 1
+#define IXGBE_ACI_SFF_UPDATE_BANK 2
+#define IXGBE_ACI_SFF_UPDATE_PAGE_BANK 3
+#define IXGBE_ACI_SFF_IS_WRITE BIT(15)
+ __le16 i2c_offset;
+ u8 module_bank;
+ u8 module_page;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_sff_eeprom);
+
+/* Program Topology Device NVM (direct, 0x06F2) */
+struct ixgbe_aci_cmd_prog_topo_dev_nvm {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ u8 rsvd[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_prog_topo_dev_nvm);
+
+/* Read Topology Device NVM (direct, 0x06F3) */
+struct ixgbe_aci_cmd_read_topo_dev_nvm {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le32 start_address;
+#define IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE 8
+ u8 data_read[IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_topo_dev_nvm);
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Write commands (indirect 0x0703)
+ * NVM Write Activate commands (direct 0x0707)
+ * NVM Shadow RAM Dump commands (direct 0x0707)
+ */
+struct ixgbe_aci_cmd_nvm {
+#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF
+ __le16 offset_low;
+ u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_LAST_CMD BIT(0)
+#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
+#define IXGBE_ACI_NVM_PRESERVATION_S 1 /* Used by NVM Write Activate only */
+#define IXGBE_ACI_NVM_PRESERVATION_M (3 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_NO_PRESERVATION (0 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_PRESERVE_ALL BIT(1)
+#define IXGBE_ACI_NVM_FACTORY_DEFAULT (2 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_PRESERVE_SELECTED (3 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_OROM BIT(4)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define IXGBE_ACI_NVM_SPECIAL_UPDATE BIT(6)
+#define IXGBE_ACI_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_MASK MAKEMASK(0x7, 3)
+#define IXGBE_ACI_NVM_FLASH_ONLY BIT(7)
+#define IXGBE_ACI_NVM_RESET_LVL_M MAKEMASK(0x3, 0) /* Write reply only */
+#define IXGBE_ACI_NVM_POR_FLAG 0
+#define IXGBE_ACI_NVM_PERST_FLAG 1
+#define IXGBE_ACI_NVM_EMPR_FLAG 2
+#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define IXGBE_ACI_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
+ __le16 module_typeid;
+ __le16 length;
+#define IXGBE_ACI_NVM_ERASE_LEN 0xFFFF
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Module_Type ID, needed offset and read_len for struct ixgbe_aci_cmd_nvm. */
+#define IXGBE_ACI_NVM_SECTOR_UNIT 4096 /* In Bytes */
+#define IXGBE_ACI_NVM_WORD_UNIT 2 /* In Bytes */
+
+#define IXGBE_ACI_NVM_START_POINT 0
+#define IXGBE_ACI_NVM_EMP_SR_PTR_OFFSET 0x90
+#define IXGBE_ACI_NVM_EMP_SR_PTR_RD_LEN 2 /* In Bytes */
+#define IXGBE_ACI_NVM_EMP_SR_PTR_M MAKEMASK(0x7FFF, 0)
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_S 15
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_M BIT(15)
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_SECTOR 1
+
+#define IXGBE_ACI_NVM_LLDP_CFG_PTR_OFFSET 0x46
+#define IXGBE_ACI_NVM_LLDP_CFG_HEADER_LEN 2 /* In Bytes */
+#define IXGBE_ACI_NVM_LLDP_CFG_PTR_RD_LEN 2 /* In Bytes */
+
+#define IXGBE_ACI_NVM_LLDP_PRESERVED_MOD_ID 0x129
+#define IXGBE_ACI_NVM_CUR_LLDP_PERSIST_RD_OFFSET 2 /* In Bytes */
+#define IXGBE_ACI_NVM_LLDP_STATUS_M MAKEMASK(0xF, 0)
+#define IXGBE_ACI_NVM_LLDP_STATUS_M_LEN 4 /* In Bits */
+#define IXGBE_ACI_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */
+
+#define IXGBE_ACI_NVM_MINSREV_MOD_ID 0x130
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm);
+
+/* Used for reading and writing MinSRev using 0x0701 and 0x0703. Note that the
+ * type field is excluded from the section when reading and writing from
+ * a module using the module_typeid field with these AQ commands.
+ */
+struct ixgbe_aci_cmd_nvm_minsrev {
+ __le16 length;
+ __le16 validity;
+#define IXGBE_ACI_NVM_MINSREV_NVM_VALID BIT(0)
+#define IXGBE_ACI_NVM_MINSREV_OROM_VALID BIT(1)
+ __le16 nvm_minsrev_l;
+ __le16 nvm_minsrev_h;
+ __le16 orom_minsrev_l;
+ __le16 orom_minsrev_h;
+};
+
+IXGBE_CHECK_STRUCT_LEN(12, ixgbe_aci_cmd_nvm_minsrev);
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+struct ixgbe_aci_cmd_nvm_cfg {
+ u8 cmd_flags;
+#define IXGBE_ACI_ANVM_MULTIPLE_ELEMS BIT(0)
+#define IXGBE_ACI_ANVM_IMMEDIATE_FIELD BIT(1)
+#define IXGBE_ACI_ANVM_NEW_CFG BIT(2)
+ u8 reserved;
+ __le16 count;
+ __le16 id;
+ u8 reserved1[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_cfg);
+
+struct ixgbe_aci_cmd_nvm_cfg_data {
+ __le16 field_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
+IXGBE_CHECK_STRUCT_LEN(6, ixgbe_aci_cmd_nvm_cfg_data);
+
+/* NVM Checksum Command (direct, 0x0706) */
+struct ixgbe_aci_cmd_nvm_checksum {
+ u8 flags;
+#define IXGBE_ACI_NVM_CHECKSUM_VERIFY BIT(0)
+#define IXGBE_ACI_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define IXGBE_ACI_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_checksum);
+
+/* Used for NVM Sanitization command - 0x070C */
+struct ixgbe_aci_cmd_nvm_sanitization {
+ u8 cmd_flags;
+#define IXGBE_ACI_SANITIZE_REQ_READ 0
+#define IXGBE_ACI_SANITIZE_REQ_OPERATE BIT(0)
+
+#define IXGBE_ACI_SANITIZE_READ_SUBJECT_NVM_BITS 0
+#define IXGBE_ACI_SANITIZE_READ_SUBJECT_NVM_STATE BIT(1)
+#define IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR 0
+ u8 values;
+#define IXGBE_ACI_SANITIZE_NVM_BITS_HOST_CLEAN_SUPPORT BIT(0)
+#define IXGBE_ACI_SANITIZE_NVM_BITS_BMC_CLEAN_SUPPORT BIT(2)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_HOST_CLEAN_DONE BIT(0)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_HOST_CLEAN_SUCCESS BIT(1)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_BMC_CLEAN_DONE BIT(2)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_BMC_CLEAN_SUCCESS BIT(3)
+#define IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE BIT(0)
+#define IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS BIT(1)
+#define IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE BIT(2)
+#define IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS BIT(3)
+ u8 reserved[14];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_sanitization);
+
+/* Write/Read Alternate - Direct (direct 0x0900/0x0902) */
+struct ixgbe_aci_cmd_read_write_alt_direct {
+ __le32 dword0_addr;
+ __le32 dword0_value;
+ __le32 dword1_addr;
+ __le32 dword1_value;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_write_alt_direct);
+
+/* Write/Read Alternate - Indirect (indirect 0x0901/0x0903) */
+struct ixgbe_aci_cmd_read_write_alt_indirect {
+ __le32 base_dword_addr;
+ __le32 num_dwords;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_write_alt_indirect);
+
+/* Done Alternate Write (direct 0x0904) */
+struct ixgbe_aci_cmd_done_alt_write {
+ u8 flags;
+#define IXGBE_ACI_CMD_UEFI_BIOS_MODE BIT(0)
+#define IXGBE_ACI_RESP_RESET_NEEDED BIT(1)
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_done_alt_write);
+
+/* Clear Port Alternate Write (direct 0x0906) */
+struct ixgbe_aci_cmd_clear_port_alt_write {
+ u8 reserved[16];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_clear_port_alt_write);
+
+/* Get CGU abilities command response data structure (indirect 0x0C61) */
+struct ixgbe_aci_cmd_get_cgu_abilities {
+ u8 num_inputs;
+ u8 num_outputs;
+ u8 pps_dpll_idx;
+ u8 synce_dpll_idx;
+ __le32 max_in_freq;
+ __le32 max_in_phase_adj;
+ __le32 max_out_freq;
+ __le32 max_out_phase_adj;
+ u8 cgu_part_num;
+ u8 rsvd[3];
+};
+
+IXGBE_CHECK_STRUCT_LEN(24, ixgbe_aci_cmd_get_cgu_abilities);
+
+#define IXGBE_ACI_NODE_HANDLE_VALID BIT(10)
+#define IXGBE_ACI_NODE_HANDLE MAKEMASK(0x3FF, 0)
+#define IXGBE_ACI_DRIVING_CLK_NUM_SHIFT 10
+#define IXGBE_ACI_DRIVING_CLK_NUM MAKEMASK(0x3F, IXGBE_ACI_DRIVING_CLK_NUM_SHIFT)
+
+/* Set CGU input config (direct 0x0C62) */
+struct ixgbe_aci_cmd_set_cgu_input_config {
+ u8 input_idx;
+ u8 flags1;
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ BIT(6)
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY BIT(7)
+ u8 flags2;
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_input_config);
+
+/* Get CGU input config response descriptor structure (direct 0x0C63) */
+struct ixgbe_aci_cmd_get_cgu_input_config {
+ u8 input_idx;
+ u8 status;
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_LOS BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_SCM_FAIL BIT(1)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_CFM_FAIL BIT(2)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_GST_FAIL BIT(3)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_PFM_FAIL BIT(4)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL BIT(6)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_ESYNC_CAP BIT(7)
+ u8 type;
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_READ_ONLY BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_GPS BIT(4)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_EXTERNAL BIT(5)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_PHY BIT(6)
+ u8 flags1;
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_PHASE_DELAY_SUPP BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_1PPS_SUPP BIT(2)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_10MHZ_SUPP BIT(3)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_ANYFREQ BIT(7)
+ __le32 freq;
+ __le32 phase_delay;
+ u8 flags2;
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_input_config);
+
+/* Set CGU output config (direct 0x0C64) */
+struct ixgbe_aci_cmd_set_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define IXGBE_ACI_SET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_FREQ BIT(2)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_PHASE BIT(3)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_SRC_SEL BIT(4)
+ u8 src_sel;
+#define IXGBE_ACI_SET_CGU_OUT_CFG_DPLL_SRC_SEL MAKEMASK(0x1F, 0)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_output_config);
+
+/* Get CGU output config (direct 0x0C65) */
+struct ixgbe_aci_cmd_get_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define IXGBE_ACI_GET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_ESYNC_ABILITY BIT(2)
+ u8 src_sel;
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT 0
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL \
+ MAKEMASK(0x1F, IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT 5
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE \
+ MAKEMASK(0x7, IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT)
+ u8 rsvd;
+ __le32 freq;
+ __le32 src_freq;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_output_config);
+
+/* Get CGU DPLL status (direct 0x0C66) */
+struct ixgbe_aci_cmd_get_cgu_dpll_status {
+ u8 dpll_num;
+ u8 ref_state;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_LOS BIT(0)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_SCM BIT(1)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_CFM BIT(2)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_GST BIT(3)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_PFM BIT(4)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_FAST_LOCK_EN BIT(5)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_ESYNC BIT(6)
+ __le16 dpll_state;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_LOCK BIT(0)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_HO BIT(1)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_HO_READY BIT(2)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_FLHIT BIT(5)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_PSLHIT BIT(7)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT 8
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SEL \
+ MAKEMASK(0x1F, IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE_SHIFT 13
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE \
+ MAKEMASK(0x7, IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE_SHIFT)
+ __le32 phase_offset_h;
+ __le32 phase_offset_l;
+ u8 eec_mode;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_1 0xA
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_2 0xB
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_UNKNOWN 0xF
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_dpll_status);
+
+/* Set CGU DPLL config (direct 0x0C67) */
+struct ixgbe_aci_cmd_set_cgu_dpll_config {
+ u8 dpll_num;
+ u8 ref_state;
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_LOS BIT(0)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_SCM BIT(1)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_CFM BIT(2)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_GST BIT(3)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_PFM BIT(4)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_FLOCK_EN BIT(5)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_ESYNC BIT(6)
+ u8 rsvd;
+ u8 config;
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_CLK_REF_SEL MAKEMASK(0x1F, 0)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_MODE MAKEMASK(0x7, 5)
+ u8 rsvd2[8];
+ u8 eec_mode;
+ u8 rsvd3[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_dpll_config);
+
+/* Set CGU reference priority (direct 0x0C68) */
+struct ixgbe_aci_cmd_set_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority;
+ u8 rsvd[11];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_ref_prio);
+
+/* Get CGU reference priority (direct 0x0C69) */
+struct ixgbe_aci_cmd_get_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority; /* Valid only in response */
+ u8 rsvd[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_ref_prio);
+
+/* Get CGU info (direct 0x0C6A) */
+struct ixgbe_aci_cmd_get_cgu_info {
+ __le32 cgu_id;
+ __le32 cgu_cfg_ver;
+ __le32 cgu_fw_ver;
+ u8 node_part_num;
+ u8 dev_rev;
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_info);
+
+struct ixgbe_aci_cmd_temp_tca_event {
+ u8 event_desc;
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_SHIFT 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_NVM 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_EVENT_STATE 1
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_ALL 2
+
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_SHIFT 2
+#define IXGBE_TEMP_TCA_EVENT_DESC_WARNING_CLEARED 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_CLEARED 1
+#define IXGBE_TEMP_TCA_EVENT_DESC_WARNING_RAISED 2
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_RAISED 3
+
+ u8 reserved;
+ __le16 temperature;
+ __le16 thermal_sensor_max_value;
+ __le16 thermal_sensor_min_value;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_temp_tca_event);
+
+/* Debug Dump Internal Data (indirect 0xFF08) */
+struct ixgbe_aci_cmd_debug_dump_internals {
+ __le16 cluster_id; /* Expresses next cluster ID in response */
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_LINK 0
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE 1
+ __le16 table_id; /* Used only for non-memory clusters */
+ __le32 idx; /* In table entries for tables, in bytes for memory */
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_debug_dump_internals);
+
+/* Set Health Status (direct 0xFF20) */
+struct ixgbe_aci_cmd_set_health_status_config {
+ u8 event_source;
+#define IXGBE_ACI_HEALTH_STATUS_SET_PF_SPECIFIC_MASK BIT(0)
+#define IXGBE_ACI_HEALTH_STATUS_SET_ALL_PF_MASK BIT(1)
+#define IXGBE_ACI_HEALTH_STATUS_SET_GLOBAL_MASK BIT(2)
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_health_status_config);
+
+#define IXGBE_ACI_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT 0x101
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_TYPE 0x102
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_QUAL 0x103
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_COMM 0x104
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_CONFLICT 0x105
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_NOT_PRESENT 0x106
+#define IXGBE_ACI_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED 0x107
+#define IXGBE_ACI_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT 0x108
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE 0x109
+#define IXGBE_ACI_HEALTH_STATUS_ERR_INVALID_LINK_CFG 0x10B
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PORT_ACCESS 0x10C
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PORT_UNREACHABLE 0x10D
+#define IXGBE_ACI_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED 0x10F
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PARALLEL_FAULT 0x110
+#define IXGBE_ACI_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED 0x111
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NETLIST_TOPO 0x112
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NETLIST 0x113
+#define IXGBE_ACI_HEALTH_STATUS_ERR_TOPO_CONFLICT 0x114
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LINK_HW_ACCESS 0x115
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LINK_RUNTIME 0x116
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DNL_INIT 0x117
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PHY_NVM_PROG 0x120
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PHY_FW_LOAD 0x121
+#define IXGBE_ACI_HEALTH_STATUS_INFO_RECOVERY 0x500
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FLASH_ACCESS 0x501
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_AUTH 0x502
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_AUTH 0x503
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DDP_AUTH 0x504
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_COMPAT 0x505
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_COMPAT 0x506
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION 0x507
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION 0x508
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DCB_MIB 0x509
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MNG_TIMEOUT 0x50A
+#define IXGBE_ACI_HEALTH_STATUS_ERR_BMC_RESET 0x50B
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LAST_MNG_FAIL 0x50C
+#define IXGBE_ACI_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL 0x50D
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FW_LOOP 0x1000
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FW_PFR_FAIL 0x1001
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LAST_FAIL_AQ 0x1002
+
+/* Get Health Status codes (indirect 0xFF21) */
+struct ixgbe_aci_cmd_get_supported_health_status_codes {
+ __le16 health_code_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_supported_health_status_codes);
+
+/* Get Health Status (indirect 0xFF22) */
+struct ixgbe_aci_cmd_get_health_status {
+ __le16 health_status_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_health_status);
+
+/* Get Health Status event buffer entry, (0xFF22)
+ * repeated per reported health status
+ */
+struct ixgbe_aci_cmd_health_status_elem {
+ __le16 health_status_code;
+ __le16 event_source;
+#define IXGBE_ACI_HEALTH_STATUS_PF (0x1)
+#define IXGBE_ACI_HEALTH_STATUS_PORT (0x2)
+#define IXGBE_ACI_HEALTH_STATUS_GLOBAL (0x3)
+ __le32 internal_data1;
+#define IXGBE_ACI_HEALTH_STATUS_UNDEFINED_DATA (0xDEADBEEF)
+ __le32 internal_data2;
+};
+
+IXGBE_CHECK_STRUCT_LEN(12, ixgbe_aci_cmd_health_status_elem);
+
+/* Clear Health Status (direct 0xFF23) */
+struct ixgbe_aci_cmd_clear_health_status {
+ __le32 reserved[4];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_clear_health_status);
+
+enum ixgbe_aci_fw_logging_mod {
+ IXGBE_ACI_FW_LOG_ID_GENERAL = 0,
+ IXGBE_ACI_FW_LOG_ID_CTRL = 1,
+ IXGBE_ACI_FW_LOG_ID_LINK = 2,
+ IXGBE_ACI_FW_LOG_ID_LINK_TOPO = 3,
+ IXGBE_ACI_FW_LOG_ID_DNL = 4,
+ IXGBE_ACI_FW_LOG_ID_I2C = 5,
+ IXGBE_ACI_FW_LOG_ID_SDP = 6,
+ IXGBE_ACI_FW_LOG_ID_MDIO = 7,
+ IXGBE_ACI_FW_LOG_ID_ADMINQ = 8,
+ IXGBE_ACI_FW_LOG_ID_HDMA = 9,
+ IXGBE_ACI_FW_LOG_ID_LLDP = 10,
+ IXGBE_ACI_FW_LOG_ID_DCBX = 11,
+ IXGBE_ACI_FW_LOG_ID_DCB = 12,
+ IXGBE_ACI_FW_LOG_ID_XLR = 13,
+ IXGBE_ACI_FW_LOG_ID_NVM = 14,
+ IXGBE_ACI_FW_LOG_ID_AUTH = 15,
+ IXGBE_ACI_FW_LOG_ID_VPD = 16,
+ IXGBE_ACI_FW_LOG_ID_IOSF = 17,
+ IXGBE_ACI_FW_LOG_ID_PARSER = 18,
+ IXGBE_ACI_FW_LOG_ID_SW = 19,
+ IXGBE_ACI_FW_LOG_ID_SCHEDULER = 20,
+ IXGBE_ACI_FW_LOG_ID_TXQ = 21,
+ IXGBE_ACI_FW_LOG_ID_ACL = 22,
+ IXGBE_ACI_FW_LOG_ID_POST = 23,
+ IXGBE_ACI_FW_LOG_ID_WATCHDOG = 24,
+ IXGBE_ACI_FW_LOG_ID_TASK_DISPATCH = 25,
+ IXGBE_ACI_FW_LOG_ID_MNG = 26,
+ IXGBE_ACI_FW_LOG_ID_SYNCE = 27,
+ IXGBE_ACI_FW_LOG_ID_HEALTH = 28,
+ IXGBE_ACI_FW_LOG_ID_TSDRV = 29,
+ IXGBE_ACI_FW_LOG_ID_PFREG = 30,
+ IXGBE_ACI_FW_LOG_ID_MDLVER = 31,
+ IXGBE_ACI_FW_LOG_ID_MAX = 32,
+};
+
+/* Only a single log level should be set and all log levels under the set value
+ * are enabled, e.g. if log level is set to IXGBE_FWLOG_LEVEL_VERBOSE, then all
+ * other log levels are included (except IXGBE_FWLOG_LEVEL_NONE)
+ */
+enum ixgbe_fwlog_level {
+ IXGBE_FWLOG_LEVEL_NONE = 0,
+ IXGBE_FWLOG_LEVEL_ERROR = 1,
+ IXGBE_FWLOG_LEVEL_WARNING = 2,
+ IXGBE_FWLOG_LEVEL_NORMAL = 3,
+ IXGBE_FWLOG_LEVEL_VERBOSE = 4,
+ IXGBE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
+};
+
+struct ixgbe_fwlog_module_entry {
+ /* module ID for the corresponding firmware logging event */
+ u16 module_id;
+ /* verbosity level for the module_id */
+ u8 log_level;
+};
+
+struct ixgbe_fwlog_cfg {
+ /* list of modules for configuring log level */
+ struct ixgbe_fwlog_module_entry module_entries[IXGBE_ACI_FW_LOG_ID_MAX];
+#define IXGBE_FWLOG_OPTION_ARQ_ENA BIT(0)
+#define IXGBE_FWLOG_OPTION_UART_ENA BIT(1)
+ /* set before calling ixgbe_fwlog_init() so the PF registers for firmware
+ * logging on initialization
+ */
+#define IXGBE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
+ /* set in the ixgbe_fwlog_get() response if the PF is registered for FW
+ * logging events over ARQ
+ */
+#define IXGBE_FWLOG_OPTION_IS_REGISTERED BIT(3)
+ /* options used to configure firmware logging */
+ u16 options;
+ /* minimum number of log events sent per Admin Receive Queue event */
+ u8 log_resolution;
+};
+
+struct ixgbe_fwlog_data {
+ u16 data_size;
+ u8 *data;
+};
+
+struct ixgbe_fwlog_ring {
+ struct ixgbe_fwlog_data *rings;
+ u16 size;
+ u16 head;
+ u16 tail;
+};
+
+#define IXGBE_FWLOG_RING_SIZE_DFLT 256
+#define IXGBE_FWLOG_RING_SIZE_MAX 512
+
+/* Set FW Logging configuration (indirect 0xFF30)
+ * Register for FW Logging (indirect 0xFF31)
+ * Query FW Logging (indirect 0xFF32)
+ * FW Log Event (indirect 0xFF33)
+ * Get FW Log (indirect 0xFF34)
+ * Clear FW Log (indirect 0xFF35)
+ */
+struct ixgbe_aci_cmd_fw_log {
+ u8 cmd_flags;
+#define IXGBE_ACI_FW_LOG_CONF_UART_EN BIT(0)
+#define IXGBE_ACI_FW_LOG_CONF_AQ_EN BIT(1)
+#define IXGBE_ACI_FW_LOG_QUERY_REGISTERED BIT(2)
+#define IXGBE_ACI_FW_LOG_CONF_SET_VALID BIT(3)
+#define IXGBE_ACI_FW_LOG_AQ_REGISTER BIT(0)
+#define IXGBE_ACI_FW_LOG_AQ_QUERY BIT(2)
+#define IXGBE_ACI_FW_LOG_PERSISTENT BIT(0)
+ u8 rsp_flag;
+#define IXGBE_ACI_FW_LOG_MORE_DATA BIT(1)
+ __le16 fw_rt_msb;
+ union {
+ struct {
+ __le32 fw_rt_lsb;
+ } sync;
+ struct {
+ __le16 log_resolution;
+#define IXGBE_ACI_FW_LOG_MIN_RESOLUTION (1)
+#define IXGBE_ACI_FW_LOG_MAX_RESOLUTION (128)
+ __le16 mdl_cnt;
+ } cfg;
+ } ops;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_fw_log);
+
+/* Response Buffer for:
+ * Set Firmware Logging Configuration (0xFF30)
+ * Query FW Logging (0xFF32)
+ */
+struct ixgbe_aci_cmd_fw_log_cfg_resp {
+ __le16 module_identifier;
+ u8 log_level;
+ u8 rsvd0;
+};
+
+IXGBE_CHECK_STRUCT_LEN(4, ixgbe_aci_cmd_fw_log_cfg_resp);
+
+/**
+ * struct ixgbe_aq_desc - Admin Command (AC) descriptor
+ * @flags: IXGBE_ACI_FLAG_* flags
+ * @opcode: Admin command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts via the Admin Command Interface
+ * (ACI). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Command Interface (ACI) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct ixgbe_aci_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct ixgbe_aci_cmd_generic generic;
+ struct ixgbe_aci_cmd_get_ver get_ver;
+ struct ixgbe_aci_cmd_driver_ver driver_ver;
+ struct ixgbe_aci_cmd_get_exp_err exp_err;
+ struct ixgbe_aci_cmd_req_res res_owner;
+ struct ixgbe_aci_cmd_list_caps get_cap;
+ struct ixgbe_aci_cmd_disable_rxen disable_rxen;
+ struct ixgbe_aci_cmd_get_fw_event get_fw_event;
+ struct ixgbe_aci_cmd_get_phy_caps get_phy;
+ struct ixgbe_aci_cmd_set_phy_cfg set_phy;
+ struct ixgbe_aci_cmd_restart_an restart_an;
+ struct ixgbe_aci_cmd_get_link_status get_link_status;
+ struct ixgbe_aci_cmd_set_event_mask set_event_mask;
+ struct ixgbe_aci_cmd_get_link_topo get_link_topo;
+ struct ixgbe_aci_cmd_i2c read_write_i2c;
+ struct ixgbe_aci_cmd_read_i2c_resp read_i2c_resp;
+ struct ixgbe_aci_cmd_mdio read_write_mdio;
+ struct ixgbe_aci_cmd_mdio read_mdio;
+ struct ixgbe_aci_cmd_mdio write_mdio;
+ struct ixgbe_aci_cmd_set_port_id_led set_port_id_led;
+ struct ixgbe_aci_cmd_gpio_by_func read_write_gpio_by_func;
+ struct ixgbe_aci_cmd_gpio read_write_gpio;
+ struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param;
+ struct ixgbe_aci_cmd_prog_topo_dev_nvm prog_topo_dev_nvm;
+ struct ixgbe_aci_cmd_read_topo_dev_nvm read_topo_dev_nvm;
+ struct ixgbe_aci_cmd_nvm nvm;
+ struct ixgbe_aci_cmd_nvm_cfg nvm_cfg;
+ struct ixgbe_aci_cmd_nvm_checksum nvm_checksum;
+ struct ixgbe_aci_cmd_read_write_alt_direct read_write_alt_direct;
+ struct ixgbe_aci_cmd_read_write_alt_indirect read_write_alt_indirect;
+ struct ixgbe_aci_cmd_done_alt_write done_alt_write;
+ struct ixgbe_aci_cmd_clear_port_alt_write clear_port_alt_write;
+ struct ixgbe_aci_cmd_debug_dump_internals debug_dump;
+ struct ixgbe_aci_cmd_set_health_status_config
+ set_health_status_config;
+ struct ixgbe_aci_cmd_get_supported_health_status_codes
+ get_supported_health_status_codes;
+ struct ixgbe_aci_cmd_get_health_status get_health_status;
+ struct ixgbe_aci_cmd_clear_health_status clear_health_status;
+ struct ixgbe_aci_cmd_fw_log fw_log;
+ struct ixgbe_aci_cmd_nvm_sanitization nvm_sanitization;
+ } params;
+};
+
+/* E610-specific adapter context structures */
+
+struct ixgbe_link_status {
+ /* Refer to ixgbe_aci_phy_type for bits definition */
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u8 topo_media_conflict;
+ u16 max_frame_size;
+ u16 link_speed;
+ u16 req_speeds;
+ u8 link_cfg_err;
+ u8 lse_ena; /* Link Status Event notification */
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 fec_info;
+ u8 pacing;
+ /* Refer to #define from module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE] of
+ * ixgbe_aci_get_phy_caps structure
+ */
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+};
+
+/* Common HW capabilities for SW use */
+struct ixgbe_hw_common_caps {
+ /* Write CSR protection */
+ u64 wr_csr_prot;
+ u32 switching_mode;
+ /* switching mode supported - EVB switching (including cloud) */
+#define IXGBE_NVM_IMAGE_TYPE_EVB 0x0
+
+ /* Manageability mode & supported protocols over MCTP */
+ u32 mgmt_mode;
+#define IXGBE_MGMT_MODE_PASS_THRU_MODE_M 0xF
+#define IXGBE_MGMT_MODE_CTL_INTERFACE_M 0xF0
+#define IXGBE_MGMT_MODE_REDIR_SB_INTERFACE_M 0xF00
+
+ u32 mgmt_protocols_mctp;
+#define IXGBE_MGMT_MODE_PROTO_RSVD BIT(0)
+#define IXGBE_MGMT_MODE_PROTO_PLDM BIT(1)
+#define IXGBE_MGMT_MODE_PROTO_OEM BIT(2)
+#define IXGBE_MGMT_MODE_PROTO_NC_SI BIT(3)
+
+ u32 os2bmc;
+ u32 valid_functions;
+ /* DCB capabilities */
+ u32 active_tc_bitmap;
+ u32 maxtc;
+
+ /* RSS related capabilities */
+ u32 rss_table_size; /* 512 for PFs and 64 for VFs */
+ u32 rss_table_entry_width; /* RSS Entry width in bits */
+
+ /* Tx/Rx queues */
+ u32 num_rxq; /* Number/Total Rx queues */
+ u32 rxq_first_id; /* First queue ID for Rx queues */
+ u32 num_txq; /* Number/Total Tx queues */
+ u32 txq_first_id; /* First queue ID for Tx queues */
+
+ /* MSI-X vectors */
+ u32 num_msix_vectors;
+ u32 msix_vector_first_id;
+
+ /* Max MTU for function or device */
+ u32 max_mtu;
+
+ /* WOL related */
+ u32 num_wol_proxy_fltr;
+ u32 wol_proxy_vsi_seid;
+
+ /* LED/SDP pin count */
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+
+ /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */
+#define IXGBE_MAX_SUPPORTED_GPIO_LED 12
+#define IXGBE_MAX_SUPPORTED_GPIO_SDP 8
+ u8 led[IXGBE_MAX_SUPPORTED_GPIO_LED];
+ u8 sdp[IXGBE_MAX_SUPPORTED_GPIO_SDP];
+ /* SR-IOV virtualization */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+ /* VMDQ */
+ u8 vmdq; /* VMDQ supported */
+
+ /* EVB capabilities */
+ u8 evb_802_1_qbg; /* Edge Virtual Bridging */
+ u8 evb_802_1_qbh; /* Bridge Port Extension */
+
+ u8 dcb;
+ u8 iscsi;
+ u8 mgmt_cem;
+
+ /* WoL and APM support */
+#define IXGBE_WOL_SUPPORT_M BIT(0)
+#define IXGBE_ACPI_PROG_MTHD_M BIT(1)
+#define IXGBE_PROXY_SUPPORT_M BIT(2)
+ u8 apm_wol_support;
+ u8 acpi_prog_mthd;
+ u8 proxy_support;
+ bool sec_rev_disabled;
+ bool update_disabled;
+ bool nvm_unified_update;
+ bool netlist_auth;
+#define IXGBE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
+#define IXGBE_NVM_MGMT_UPDATE_DISABLED BIT(1)
+#define IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+#define IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5)
+ bool no_drop_policy_support;
+ /* PCIe reset avoidance */
+ bool pcie_reset_avoidance; /* false: not supported, true: supported */
+ /* Post update reset restriction */
+ bool reset_restrict_support; /* false: not supported, true: supported */
+
+ /* External topology device images within the NVM */
+#define IXGBE_EXT_TOPO_DEV_IMG_COUNT 4
+ u32 ext_topo_dev_img_ver_high[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u32 ext_topo_dev_img_ver_low[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u8 ext_topo_dev_img_part_num[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S 8
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M \
+ MAKEMASK(0xFF, IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S)
+ bool ext_topo_dev_img_load_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
+ bool ext_topo_dev_img_prog_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
+ /* Support for OROM update in Recovery Mode. */
+ bool orom_recovery_update;
+ bool next_cluster_id_support;
+};
+
+#pragma pack(1)
+struct ixgbe_orom_civd_info {
+ u8 signature[4]; /* Must match ASCII '$CIV' characters */
+ u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */
+ __le32 combo_ver; /* Combo Image Version number */
+ u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
+ __le16 combo_name[32]; /* Unicode string representing the Combo Image version */
+};
+#pragma pack()
+
+/* Function specific capabilities */
+struct ixgbe_hw_func_caps {
+ struct ixgbe_hw_common_caps common_cap;
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
+ u32 guar_num_vsi;
+ bool no_drop_policy_ena;
+};
+
+/* Device wide capabilities */
+struct ixgbe_hw_dev_caps {
+ struct ixgbe_hw_common_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
+ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_flow_director_fltr; /* Number of FD filters available */
+ u32 num_funcs;
+};
+
+/* ACI event information */
+struct ixgbe_aci_event {
+ struct ixgbe_aci_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+struct ixgbe_aci_info {
+ enum ixgbe_aci_err last_status; /* last status of sent admin command */
+ struct ixgbe_lock lock; /* admin command interface lock */
+};
+
+/* Minimum Security Revision information */
+struct ixgbe_minsrev_info {
+ u32 nvm;
+ u32 orom;
+ u8 nvm_valid : 1;
+ u8 orom_valid : 1;
+};
+
+/* Enumeration of which flash bank is desired to read from, either the active
+ * bank or the inactive bank. Used to abstract 1st and 2nd bank notion from
+ * code which just wants to read the active or inactive flash bank.
+ */
+enum ixgbe_bank_select {
+ IXGBE_ACTIVE_FLASH_BANK,
+ IXGBE_INACTIVE_FLASH_BANK,
+};
+
+/* Option ROM version information */
+struct ixgbe_orom_info {
+ u8 major; /* Major version of OROM */
+ u8 patch; /* Patch version of OROM */
+ u16 build; /* Build version of OROM */
+ u32 srev; /* Security revision */
+};
+
+/* NVM version information */
+struct ixgbe_nvm_info {
+ u32 eetrack;
+ u32 srev;
+ u8 major;
+ u8 minor;
+};
+
+/* netlist version information */
+struct ixgbe_netlist_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+};
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ixgbe_flash_bank {
+ IXGBE_INVALID_FLASH_BANK,
+ IXGBE_1ST_FLASH_BANK,
+ IXGBE_2ND_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ixgbe_bank_info {
+ u32 nvm_ptr; /* Pointer to 1st NVM bank */
+ u32 nvm_size; /* Size of NVM bank */
+ u32 orom_ptr; /* Pointer to 1st OROM bank */
+ u32 orom_size; /* Size of OROM bank */
+ u32 netlist_ptr; /* Pointer to 1st Netlist bank */
+ u32 netlist_size; /* Size of Netlist bank */
+ enum ixgbe_flash_bank nvm_bank; /* Active NVM bank */
+ enum ixgbe_flash_bank orom_bank; /* Active OROM bank */
+ enum ixgbe_flash_bank netlist_bank; /* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ixgbe_flash_info {
+ struct ixgbe_orom_info orom; /* Option ROM version info */
+ struct ixgbe_nvm_info nvm; /* NVM version information */
+ struct ixgbe_netlist_info netlist; /* Netlist version info */
+ struct ixgbe_bank_info banks; /* Flash Bank information */
+ u16 sr_words; /* Shadow RAM size in words */
+ u32 flash_size; /* Size of available flash in bytes */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+};
+
+#define IXGBE_NVM_CMD_READ 0x0000000B
+#define IXGBE_NVM_CMD_WRITE 0x0000000C
+
+/* NVM Access command */
+struct ixgbe_nvm_access_cmd {
+ u32 command; /* NVM command: READ or WRITE */
+ u32 offset; /* Offset to read/write, in bytes */
+ u32 data_size; /* Size of data field, in bytes */
+};
+
+/* NVM Access data */
+struct ixgbe_nvm_access_data {
+ u32 regval; /* Storage for register value */
+};
+
+#endif /* _IXGBE_TYPE_E610_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_vf.c b/sys/dev/ixgbe/ixgbe_vf.c
index 91df9b7dd1c3..4e48f7f33c9d 100644
--- a/sys/dev/ixgbe/ixgbe_vf.c
+++ b/sys/dev/ixgbe/ixgbe_vf.c
@@ -49,6 +49,8 @@
**/
s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
{
+ u16 i;
+
/* MAC */
hw->mac.ops.init_hw = ixgbe_init_hw_vf;
hw->mac.ops.reset_hw = ixgbe_reset_hw_vf;
@@ -82,7 +84,8 @@ s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw)
hw->mac.max_tx_queues = 1;
hw->mac.max_rx_queues = 1;
- hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf;
+ for (i = 0; i < 64; i++)
+ hw->mbx.ops[i].init_params = ixgbe_init_mbx_params_vf;
return IXGBE_SUCCESS;
}
@@ -185,6 +188,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
/* reset the api version */
hw->api_version = ixgbe_mbox_api_10;
+ ixgbe_init_mbx_params_vf(hw);
DEBUGOUT("Issuing a function level reset to MAC\n");
@@ -194,7 +198,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
msec_delay(50);
/* we cannot reset while the RSTI / RSTD bits are asserted */
- while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
+ while (!mbx->ops[0].check_for_rst(hw, 0) && timeout) {
timeout--;
usec_delay(5);
}
@@ -209,7 +213,7 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
msgbuf[0] = IXGBE_VF_RESET;
- mbx->ops.write_posted(hw, msgbuf, 1, 0);
+ ixgbe_write_mbx(hw, msgbuf, 1, 0);
msec_delay(10);
@@ -218,16 +222,16 @@ s32 ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
* also set up the mc_filter_type which is piggy backed
* on the mac address in word 3
*/
- ret_val = mbx->ops.read_posted(hw, msgbuf,
- IXGBE_VF_PERMADDR_MSG_LEN, 0);
+ ret_val = ixgbe_poll_mbx(hw, msgbuf,
+ IXGBE_VF_PERMADDR_MSG_LEN, 0);
if (ret_val)
return ret_val;
- if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK) &&
- msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
+ msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_INVALID_MAC_ADDR;
- if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
@@ -324,13 +328,12 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
static s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
u32 *retmsg, u16 size)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 retval = mbx->ops.write_posted(hw, msg, size, 0);
+ s32 retval = ixgbe_write_mbx(hw, msg, size, 0);
if (retval)
return retval;
- return mbx->ops.read_posted(hw, retmsg, size, 0);
+ return ixgbe_poll_mbx(hw, retmsg, size, 0);
}
/**
@@ -356,9 +359,9 @@ s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- /* if nacked the address was rejected, use "perm_addr" */
+ /* if we had failure, the address was rejected, use "perm_addr" */
if (!ret_val &&
- (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) {
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
ixgbe_get_mac_addr_vf(hw, hw->mac.addr);
return IXGBE_ERR_MBX;
}
@@ -380,7 +383,6 @@ s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count, ixgbe_mc_addr_itr next,
bool clear)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
u16 *vector_list = (u16 *)&msgbuf[1];
u32 vector;
@@ -412,7 +414,7 @@ s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
vector_list[i] = (u16)vector;
}
- return mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE, 0);
+ return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
}
/**
@@ -434,6 +436,7 @@ s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
/* Fall through */
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_15:
break;
default:
return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
@@ -447,7 +450,7 @@ s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
return err;
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
return IXGBE_SUCCESS;
}
@@ -470,7 +473,7 @@ s32 ixgbe_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
- if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK)) {
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
ret_val = IXGBE_ERR_MBX;
} else {
ret_val = IXGBE_SUCCESS;
@@ -503,10 +506,10 @@ s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
- if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_ACK))
+ if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_SUCCESS;
- return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK);
+ return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE);
}
/**
@@ -571,7 +574,7 @@ s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
if (!ret_val) {
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
+ if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_OUT_OF_MEM;
}
@@ -608,12 +611,13 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
struct ixgbe_mbx_info *mbx = &hw->mbx;
struct ixgbe_mac_info *mac = &hw->mac;
s32 ret_val = IXGBE_SUCCESS;
- u32 links_reg;
u32 in_msg = 0;
+ u32 links_reg;
+
UNREFERENCED_1PARAMETER(autoneg_wait_to_complete);
/* If we were hit with a reset drop the link */
- if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ if (!mbx->ops[0].check_for_rst(hw, 0) || !mbx->timeout)
mac->get_link_status = true;
if (!mac->get_link_status)
@@ -642,7 +646,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
switch (links_reg & IXGBE_LINKS_SPEED_82599) {
case IXGBE_LINKS_SPEED_10G_82599:
*speed = IXGBE_LINK_SPEED_10GB_FULL;
- if (hw->mac.type >= ixgbe_mac_X550) {
+ if (hw->mac.type >= ixgbe_mac_X550_vf) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
}
@@ -652,7 +656,8 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type == ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_E610_vf) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
@@ -660,7 +665,7 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
case IXGBE_LINKS_SPEED_10_X550EM_A:
*speed = IXGBE_LINK_SPEED_UNKNOWN;
/* Since Reserved in older MAC's */
- if (hw->mac.type >= ixgbe_mac_X550)
+ if (hw->mac.type >= ixgbe_mac_X550_vf)
*speed = IXGBE_LINK_SPEED_10_FULL;
break;
default:
@@ -670,19 +675,22 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
/* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error
*/
- if (mbx->ops.read(hw, &in_msg, 1, 0))
+ if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) {
+ if (hw->api_version >= ixgbe_mbox_api_15)
+ mac->get_link_status = false;
goto out;
+ }
if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
- /* msg is not CTS and is NACK we must have lost CTS status */
- if (in_msg & IXGBE_VT_MSGTYPE_NACK)
- ret_val = -1;
+ /* msg is not CTS and is FAILURE we must have lost CTS status */
+ if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
+ ret_val = IXGBE_ERR_MBX;
goto out;
}
/* the pf is talking, if we timed out in the past we reinit */
if (!mbx->timeout) {
- ret_val = -1;
+ ret_val = IXGBE_ERR_TIMEOUT;
goto out;
}
@@ -713,7 +721,7 @@ s32 ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
if (retval)
return retval;
if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
- (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
+ (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
return IXGBE_ERR_MBX;
return 0;
@@ -739,7 +747,7 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* Store value and return 0 on success */
- if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_ACK)) {
+ if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_SUCCESS)) {
hw->api_version = api;
return 0;
}
@@ -761,6 +769,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
case ixgbe_mbox_api_11:
case ixgbe_mbox_api_12:
case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_15:
break;
default:
return 0;
@@ -775,11 +784,11 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/*
- * if we we didn't get an ACK there must have been
+ * if we we didn't get a SUCCESS there must have been
* some sort of mailbox error so we should treat it
* as such
*/
- if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_ACK))
+ if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS))
return IXGBE_ERR_MBX;
/* record and validate values from message */
diff --git a/sys/dev/ixgbe/ixgbe_x540.c b/sys/dev/ixgbe/ixgbe_x540.c
index c06a19555a8b..57cec5b52e18 100644
--- a/sys/dev/ixgbe/ixgbe_x540.c
+++ b/sys/dev/ixgbe/ixgbe_x540.c
@@ -62,6 +62,7 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
struct ixgbe_phy_info *phy = &hw->phy;
struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
s32 ret_val;
+ u16 i;
DEBUGFUNC("ixgbe_init_ops_X540");
@@ -145,7 +146,8 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw)
mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw))
& IXGBE_FWSM_MODE_MASK);
- hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf;
+ for (i = 0; i < 64; i++)
+ hw->mbx.ops[i].init_params = ixgbe_init_mbx_params_pf;
/* LEDs */
mac->ops.blink_led_start = ixgbe_blink_led_start_X540;
diff --git a/sys/dev/ixgbe/ixgbe_x550.c b/sys/dev/ixgbe/ixgbe_x550.c
index ad7e7abd7a12..7f07190f832c 100644
--- a/sys/dev/ixgbe/ixgbe_x550.c
+++ b/sys/dev/ixgbe/ixgbe_x550.c
@@ -355,8 +355,7 @@ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
/* set up for CS4227 usage */
ixgbe_setup_mux_ctl(hw);
ixgbe_check_cs4227(hw);
- /* Fallthrough */
-
+ return ixgbe_identify_sfp_module_X550em(hw);
case IXGBE_DEV_ID_X550EM_A_SFP_N:
return ixgbe_identify_sfp_module_X550em(hw);
break;
@@ -750,7 +749,7 @@ static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw)
}
/**
- * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs
+ * ixgbe_fc_autoneg_fw - Set up flow control for FW-controlled PHYs
* @hw: pointer to hardware structure
*
* Called at init time to set up flow control.
@@ -799,14 +798,8 @@ s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw)
/* Start with generic X550EM init */
ret_val = ixgbe_init_ops_X550EM(hw);
- if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
- hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) {
- mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
- mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
- } else {
- mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a;
- mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a;
- }
+ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550;
+ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550;
mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a;
mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a;
@@ -1288,72 +1281,6 @@ s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
}
/**
- * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register
- * of the IOSF device
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit PHY register to write
- * @device_type: 3 bit device type
- * @data: Data to write to the register
- **/
-s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 data)
-{
- struct ixgbe_hic_internal_phy_req write_cmd;
- s32 status;
- UNREFERENCED_1PARAMETER(device_type);
-
- memset(&write_cmd, 0, sizeof(write_cmd));
- write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
- write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
- write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
- write_cmd.port_number = hw->bus.lan_id;
- write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
- write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
- write_cmd.write_data = IXGBE_CPU_TO_BE32(data);
-
- status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd,
- sizeof(write_cmd),
- IXGBE_HI_COMMAND_TIMEOUT, false);
-
- return status;
-}
-
-/**
- * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device
- * @hw: pointer to hardware structure
- * @reg_addr: 32 bit PHY register to write
- * @device_type: 3 bit device type
- * @data: Pointer to read data from the register
- **/
-s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 *data)
-{
- union {
- struct ixgbe_hic_internal_phy_req cmd;
- struct ixgbe_hic_internal_phy_resp rsp;
- } hic;
- s32 status;
- UNREFERENCED_1PARAMETER(device_type);
-
- memset(&hic, 0, sizeof(hic));
- hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
- hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
- hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
- hic.cmd.port_number = hw->bus.lan_id;
- hic.cmd.command_type = FW_INT_PHY_REQ_READ;
- hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr);
-
- status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd,
- sizeof(hic.cmd),
- IXGBE_HI_COMMAND_TIMEOUT, true);
-
- /* Extract the register value from the response. */
- *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data);
-
- return status;
-}
-
-/**
* ixgbe_disable_mdd_X550
* @hw: pointer to hardware structure
*
@@ -1569,6 +1496,8 @@ static s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear)
case ixgbe_sfp_type_1g_sx_core1:
case ixgbe_sfp_type_1g_lx_core0:
case ixgbe_sfp_type_1g_lx_core1:
+ case ixgbe_sfp_type_1g_bx_core0:
+ case ixgbe_sfp_type_1g_bx_core1:
*linear = false;
break;
case ixgbe_sfp_type_unknown:
@@ -1876,7 +1805,7 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
}
/**
- * ixgbe_get_link_capabilities_x550em - Determines link capabilities
+ * ixgbe_get_link_capabilities_X550em - Determines link capabilities
* @hw: pointer to hardware structure
* @speed: pointer to link speed
* @autoneg: true when autoneg or autotry is enabled
@@ -1902,9 +1831,11 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
/* Check if 1G SFP module. */
if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1
- || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
- hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) {
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 ||
+ hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1) {
*speed = IXGBE_LINK_SPEED_1GB_FULL;
return IXGBE_SUCCESS;
}
@@ -1941,7 +1872,9 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw,
break;
}
}
- /* fall through */
+ *speed = IXGBE_LINK_SPEED_10GB_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ break;
default:
*speed = IXGBE_LINK_SPEED_10GB_FULL |
IXGBE_LINK_SPEED_1GB_FULL;
@@ -3236,7 +3169,7 @@ out:
}
/**
- * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif
+ * ixgbe_write_ee_hostif_data_X550 - Write EEPROM word using hostif
* @hw: pointer to hardware structure
* @offset: offset of word in the EEPROM to write
* @data: word write to the EEPROM
@@ -3661,7 +3594,9 @@ u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
break;
}
}
- /* fall through */
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
+ IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ break;
case ixgbe_phy_x550em_xfi:
physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR |
IXGBE_PHYSICAL_LAYER_1000BASE_KX;
@@ -3704,7 +3639,7 @@ u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw)
}
/**
- * ixgbe_get_bus_info_x550em - Set PCI bus info
+ * ixgbe_get_bus_info_X550em - Set PCI bus info
* @hw: pointer to hardware structure
*
* Sets bus link width and speed to unknown because X550em is
@@ -3769,7 +3704,7 @@ void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
}
/**
- * ixgbe_enter_lplu_x550em - Transition to low power states
+ * ixgbe_enter_lplu_t_x550em - Transition to low power states
* @hw: pointer to hardware structure
*
* Configures Low Power Link Up on transition to low power states
@@ -3877,7 +3812,7 @@ s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
}
/**
- * ixgbe_get_lcd_x550em - Determine lowest common denominator
+ * ixgbe_get_lcd_t_x550em - Determine lowest common denominator
* @hw: pointer to hardware structure
* @lcd_speed: pointer to lowest common link speed
*
@@ -4311,36 +4246,39 @@ static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask)
DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a");
- while (--retries) {
- status = IXGBE_SUCCESS;
- if (hmask)
- status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
- if (status) {
- DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n",
- status);
- return status;
- }
- if (!(mask & IXGBE_GSSR_TOKEN_SM))
- return IXGBE_SUCCESS;
+ status = IXGBE_SUCCESS;
+ if (hmask)
+ status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
+
+ if (status) {
+ DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n", status);
+ return status;
+ }
+ if (!(mask & IXGBE_GSSR_TOKEN_SM))
+ return IXGBE_SUCCESS;
+
+ while (--retries) {
status = ixgbe_get_phy_token(hw);
- if (status == IXGBE_ERR_TOKEN_RETRY)
- DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
- status);
if (status == IXGBE_SUCCESS)
return IXGBE_SUCCESS;
- if (hmask)
- ixgbe_release_swfw_sync_X540(hw, hmask);
-
if (status != IXGBE_ERR_TOKEN_RETRY) {
- DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n",
- status);
+ DEBUGOUT1("Retry acquiring the PHY token failed, Status = %d\n", status);
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
return status;
}
+
+ if (status == IXGBE_ERR_TOKEN_RETRY)
+ DEBUGOUT1("Could not acquire PHY token, Status = %d\n",
+ status);
}
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+
DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n",
hw->phy.id);
return status;
diff --git a/sys/dev/ixgbe/ixgbe_x550.h b/sys/dev/ixgbe/ixgbe_x550.h
index 245e128266aa..8bff5e3d2bf6 100644
--- a/sys/dev/ixgbe/ixgbe_x550.h
+++ b/sys/dev/ixgbe/ixgbe_x550.h
@@ -69,10 +69,6 @@ s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver, u16 len, const char *str);
s32 ixgbe_get_phy_token(struct ixgbe_hw *);
s32 ixgbe_put_phy_token(struct ixgbe_hw *);
-s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 data);
-s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u32 *data);
void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw);
void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw);
void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap);
@@ -107,7 +103,6 @@ s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
s32 ixgbe_setup_fc_fiber_x550em_a(struct ixgbe_hw *hw);
s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw);
-s32 ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw);
diff --git a/sys/dev/ixl/i40e_register.h b/sys/dev/ixl/i40e_register.h
index 22a0f779db09..c267fef41af3 100644
--- a/sys/dev/ixl/i40e_register.h
+++ b/sys/dev/ixl/i40e_register.h
@@ -2431,10 +2431,14 @@
#define I40E_GL_FCOERPDC_MAX_INDEX 143
#define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
-#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
-#define I40E_GL_RXERR1_L_MAX_INDEX 143
-#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
-#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
+#define I40E_GL_RXERR1H(_i) (0x00318004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1H_MAX_INDEX 143
+#define I40E_GL_RXERR1H_RXERR1H_SHIFT 0
+#define I40E_GL_RXERR1H_RXERR1H_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1H_RXERR1H_SHIFT)
+#define I40E_GL_RXERR1L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1L_MAX_INDEX 143
+#define I40E_GL_RXERR1L_RXERR1L_SHIFT 0
+#define I40E_GL_RXERR1L_RXERR1L_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1L_RXERR1L_SHIFT)
#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
#define I40E_GL_RXERR2_L_MAX_INDEX 143
#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
diff --git a/sys/dev/ixl/i40e_type.h b/sys/dev/ixl/i40e_type.h
index 9c2809a3e41a..20e8f09b0813 100644
--- a/sys/dev/ixl/i40e_type.h
+++ b/sys/dev/ixl/i40e_type.h
@@ -1472,6 +1472,7 @@ struct i40e_hw_port_stats {
u64 rx_undersize; /* ruc */
u64 rx_fragments; /* rfc */
u64 rx_oversize; /* roc */
+ u64 rx_err1; /* rxerr1 */
u64 rx_jabber; /* rjc */
u64 tx_size_64; /* ptc64 */
u64 tx_size_127; /* ptc127 */
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index 60e66aeaf579..bfaf6cd69e58 100644
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -1151,13 +1151,20 @@ ixl_if_enable_intr(if_ctx_t ctx)
struct ixl_pf *pf = iflib_get_softc(ctx);
struct ixl_vsi *vsi = &pf->vsi;
struct i40e_hw *hw = vsi->hw;
- struct ixl_rx_queue *que = vsi->rx_queues;
+ struct ixl_rx_queue *rx_que = vsi->rx_queues;
ixl_enable_intr0(hw);
/* Enable queue interrupts */
- for (int i = 0; i < vsi->num_rx_queues; i++, que++)
- /* TODO: Queue index parameter is probably wrong */
- ixl_enable_queue(hw, que->rxr.me);
+ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
+ for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
+ ixl_enable_queue(hw, rx_que->rxr.me);
+ } else {
+ /*
+ * Set PFINT_LNKLST0 FIRSTQ_INDX to 0x0 to enable
+ * triggering interrupts by queues.
+ */
+ wr32(hw, I40E_PFINT_LNKLST0, 0x0);
+ }
}
/*
@@ -1175,11 +1182,13 @@ ixl_if_disable_intr(if_ctx_t ctx)
if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
- ixl_disable_queue(hw, rx_que->msix - 1);
+ ixl_disable_queue(hw, rx_que->rxr.me);
} else {
- // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
- // stops queues from triggering interrupts
- wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
+ /*
+ * Set PFINT_LNKLST0 FIRSTQ_INDX to End of List (0x7FF)
+ * to stop queues from triggering interrupts.
+ */
+ wr32(hw, I40E_PFINT_LNKLST0, IXL_QUEUE_EOL);
}
}
@@ -1471,17 +1480,33 @@ ixl_if_multi_set(if_ctx_t ctx)
struct ixl_pf *pf = iflib_get_softc(ctx);
struct ixl_vsi *vsi = &pf->vsi;
struct i40e_hw *hw = vsi->hw;
+ enum i40e_status_code status;
int mcnt;
+ if_t ifp = iflib_get_ifp(ctx);
IOCTL_DEBUGOUT("ixl_if_multi_set: begin");
/* Delete filters for removed multicast addresses */
ixl_del_multi(vsi, false);
- mcnt = min(if_llmaddr_count(iflib_get_ifp(ctx)), MAX_MULTICAST_ADDR);
+ mcnt = min(if_llmaddr_count(ifp), MAX_MULTICAST_ADDR);
if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) {
- i40e_aq_set_vsi_multicast_promiscuous(hw,
+ /* Check if promisc mode is already enabled, if yes return */
+ if (vsi->flags & IXL_FLAGS_MC_PROMISC)
+ return;
+
+ status = i40e_aq_set_vsi_multicast_promiscuous(hw,
vsi->seid, TRUE, NULL);
+ if (status != I40E_SUCCESS)
+ if_printf(ifp, "Failed to enable multicast promiscuous "
+ "mode, status: %s\n", i40e_stat_str(hw, status));
+ else {
+ if_printf(ifp, "Enabled multicast promiscuous mode\n");
+
+ /* Set the flag to track promiscuous mode */
+ vsi->flags |= IXL_FLAGS_MC_PROMISC;
+ }
+ /* Delete all existing MC filters */
ixl_del_multi(vsi, true);
return;
}
@@ -1684,6 +1709,13 @@ ixl_if_promisc_set(if_ctx_t ctx, int flags)
return (err);
err = i40e_aq_set_vsi_multicast_promiscuous(hw,
vsi->seid, multi, NULL);
+
+ /* Update the multicast promiscuous flag based on the new state */
+ if (multi)
+ vsi->flags |= IXL_FLAGS_MC_PROMISC;
+ else
+ vsi->flags &= ~IXL_FLAGS_MC_PROMISC;
+
return (err);
}
@@ -1776,7 +1808,7 @@ ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
case IFCOUNTER_OPACKETS:
return (vsi->opackets);
case IFCOUNTER_OERRORS:
- return (vsi->oerrors);
+ return (if_get_counter_default(ifp, cnt) + vsi->oerrors);
case IFCOUNTER_COLLISIONS:
/* Collisions are by standard impossible in 40G/10G Ethernet */
return (0);
@@ -1791,7 +1823,7 @@ ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt)
case IFCOUNTER_IQDROPS:
return (vsi->iqdrops);
case IFCOUNTER_OQDROPS:
- return (vsi->oqdrops);
+ return (if_get_counter_default(ifp, cnt) + vsi->oqdrops);
case IFCOUNTER_NOPROTO:
return (vsi->noproto);
default:
diff --git a/sys/dev/ixl/ixl.h b/sys/dev/ixl/ixl.h
index f45354d29300..ab0f38307d90 100644
--- a/sys/dev/ixl/ixl.h
+++ b/sys/dev/ixl/ixl.h
@@ -89,6 +89,7 @@
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
#include <sys/proc.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#include <sys/endian.h>
#include <sys/taskqueue.h>
@@ -96,7 +97,6 @@
#include <sys/smp.h>
#include <sys/sbuf.h>
#include <machine/smp.h>
-#include <machine/stdarg.h>
#ifdef RSS
#include <net/rss_config.h>
@@ -202,6 +202,7 @@
#define IXL_FLAGS_KEEP_TSO6 (1 << 1)
#define IXL_FLAGS_USES_MSIX (1 << 2)
#define IXL_FLAGS_IS_VF (1 << 3)
+#define IXL_FLAGS_MC_PROMISC (1 << 4)
#define IXL_VSI_IS_PF(v) ((v->flags & IXL_FLAGS_IS_VF) == 0)
#define IXL_VSI_IS_VF(v) ((v->flags & IXL_FLAGS_IS_VF) != 0)
diff --git a/sys/dev/ixl/ixl_pf.h b/sys/dev/ixl/ixl_pf.h
index 96303c43d4e8..f0d42f18cef0 100644
--- a/sys/dev/ixl/ixl_pf.h
+++ b/sys/dev/ixl/ixl_pf.h
@@ -318,7 +318,9 @@ void ixl_disable_queue(struct i40e_hw *, int);
void ixl_enable_intr0(struct i40e_hw *);
void ixl_disable_intr0(struct i40e_hw *);
void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf);
-void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
+void ixl_stat_update64(struct i40e_hw *, u32, bool,
+ u64 *, u64 *);
+void ixl_stat_update48(struct i40e_hw *, u32, bool,
u64 *, u64 *);
void ixl_stat_update32(struct i40e_hw *, u32, bool,
u64 *, u64 *);
diff --git a/sys/dev/ixl/ixl_pf_main.c b/sys/dev/ixl/ixl_pf_main.c
index af253faeac7a..b62619ced5cb 100644
--- a/sys/dev/ixl/ixl_pf_main.c
+++ b/sys/dev/ixl/ixl_pf_main.c
@@ -91,6 +91,7 @@ static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_debug_queue_int_ctln(SYSCTL_HANDLER_ARGS);
#ifdef IXL_DEBUG
static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
@@ -605,26 +606,16 @@ void
ixl_add_multi(struct ixl_vsi *vsi)
{
if_t ifp = vsi->ifp;
- struct i40e_hw *hw = vsi->hw;
int mcnt = 0;
struct ixl_add_maddr_arg cb_arg;
- enum i40e_status_code status;
IOCTL_DEBUGOUT("ixl_add_multi: begin");
- mcnt = if_llmaddr_count(ifp);
- if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
- status = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
- TRUE, NULL);
- if (status != I40E_SUCCESS)
- if_printf(ifp, "Failed to enable multicast promiscuous "
- "mode, status: %s\n", i40e_stat_str(hw, status));
- else
- if_printf(ifp, "Enabled multicast promiscuous mode\n");
- /* Delete all existing MC filters */
- ixl_del_multi(vsi, true);
- return;
- }
+ /*
+ * There is no need to check if the number of multicast addresses
+ * exceeds the MAX_MULTICAST_ADDR threshold and set promiscuous mode
+ * here, as all callers already handle this case.
+ */
cb_arg.vsi = vsi;
LIST_INIT(&cb_arg.to_add);
@@ -664,6 +655,15 @@ ixl_dis_multi_promisc(struct ixl_vsi *vsi, int vsi_mcnt)
int ifp_mcnt = 0;
enum i40e_status_code status;
+ /*
+ * Check if multicast promiscuous mode was actually enabled.
+ * If promiscuous mode was not enabled, don't attempt to disable it.
+ * Also, don't disable if IFF_PROMISC or IFF_ALLMULTI is set.
+ */
+ if (!(vsi->flags & IXL_FLAGS_MC_PROMISC) ||
+ (if_getflags(ifp) & (IFF_PROMISC | IFF_ALLMULTI)))
+ return;
+
ifp_mcnt = if_llmaddr_count(ifp);
/*
* Equal lists or empty ifp list mean the list has not been changed
@@ -684,6 +684,8 @@ ixl_dis_multi_promisc(struct ixl_vsi *vsi, int vsi_mcnt)
return;
}
+ /* Clear the flag since promiscuous mode is now disabled */
+ vsi->flags &= ~IXL_FLAGS_MC_PROMISC;
if_printf(ifp, "Disabled multicast promiscuous mode\n");
ixl_add_multi(vsi);
@@ -2109,45 +2111,37 @@ ixl_update_stats_counters(struct ixl_pf *pf)
ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
pf->stat_offsets_loaded,
&osd->illegal_bytes, &nsd->illegal_bytes);
- ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
- I40E_GLPRT_GORCL(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_GORCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.rx_bytes, &nsd->eth.rx_bytes);
- ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
- I40E_GLPRT_GOTCL(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_GOTCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.tx_bytes, &nsd->eth.tx_bytes);
ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
pf->stat_offsets_loaded,
&osd->eth.rx_discards,
&nsd->eth.rx_discards);
- ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
- I40E_GLPRT_UPRCL(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_UPRCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.rx_unicast,
&nsd->eth.rx_unicast);
- ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
- I40E_GLPRT_UPTCL(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_UPTCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.tx_unicast,
&nsd->eth.tx_unicast);
- ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
- I40E_GLPRT_MPRCL(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_MPRCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.rx_multicast,
&nsd->eth.rx_multicast);
- ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
- I40E_GLPRT_MPTCL(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_MPTCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.tx_multicast,
&nsd->eth.tx_multicast);
- ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
- I40E_GLPRT_BPRCL(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_BPRCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.rx_broadcast,
&nsd->eth.rx_broadcast);
- ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
- I40E_GLPRT_BPTCL(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_BPTCL(hw->port),
pf->stat_offsets_loaded,
&osd->eth.tx_broadcast,
&nsd->eth.tx_broadcast);
@@ -2191,62 +2185,48 @@ ixl_update_stats_counters(struct ixl_pf *pf)
vsi->shared->isc_pause_frames = 1;
/* Packet size stats rx */
- ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
- I40E_GLPRT_PRC64L(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_PRC64L(hw->port),
pf->stat_offsets_loaded,
&osd->rx_size_64, &nsd->rx_size_64);
- ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
- I40E_GLPRT_PRC127L(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_PRC127L(hw->port),
pf->stat_offsets_loaded,
&osd->rx_size_127, &nsd->rx_size_127);
- ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
- I40E_GLPRT_PRC255L(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_PRC255L(hw->port),
pf->stat_offsets_loaded,
&osd->rx_size_255, &nsd->rx_size_255);
- ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
- I40E_GLPRT_PRC511L(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_PRC511L(hw->port),
pf->stat_offsets_loaded,
&osd->rx_size_511, &nsd->rx_size_511);
- ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
- I40E_GLPRT_PRC1023L(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_PRC1023L(hw->port),
pf->stat_offsets_loaded,
&osd->rx_size_1023, &nsd->rx_size_1023);
- ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
- I40E_GLPRT_PRC1522L(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_PRC1522L(hw->port),
pf->stat_offsets_loaded,
&osd->rx_size_1522, &nsd->rx_size_1522);
- ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
- I40E_GLPRT_PRC9522L(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_PRC9522L(hw->port),
pf->stat_offsets_loaded,
&osd->rx_size_big, &nsd->rx_size_big);
/* Packet size stats tx */
- ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
- I40E_GLPRT_PTC64L(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_PTC64L(hw->port),
pf->stat_offsets_loaded,
&osd->tx_size_64, &nsd->tx_size_64);
- ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
- I40E_GLPRT_PTC127L(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_PTC127L(hw->port),
pf->stat_offsets_loaded,
&osd->tx_size_127, &nsd->tx_size_127);
- ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
- I40E_GLPRT_PTC255L(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_PTC255L(hw->port),
pf->stat_offsets_loaded,
&osd->tx_size_255, &nsd->tx_size_255);
- ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
- I40E_GLPRT_PTC511L(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_PTC511L(hw->port),
pf->stat_offsets_loaded,
&osd->tx_size_511, &nsd->tx_size_511);
- ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
- I40E_GLPRT_PTC1023L(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_PTC1023L(hw->port),
pf->stat_offsets_loaded,
&osd->tx_size_1023, &nsd->tx_size_1023);
- ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
- I40E_GLPRT_PTC1522L(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_PTC1522L(hw->port),
pf->stat_offsets_loaded,
&osd->tx_size_1522, &nsd->tx_size_1522);
- ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
- I40E_GLPRT_PTC9522L(hw->port),
+ ixl_stat_update48(hw, I40E_GLPRT_PTC9522L(hw->port),
pf->stat_offsets_loaded,
&osd->tx_size_big, &nsd->tx_size_big);
@@ -2256,9 +2236,29 @@ ixl_update_stats_counters(struct ixl_pf *pf)
ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
pf->stat_offsets_loaded,
&osd->rx_fragments, &nsd->rx_fragments);
+
+ u64 rx_roc;
ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
pf->stat_offsets_loaded,
- &osd->rx_oversize, &nsd->rx_oversize);
+ &osd->rx_oversize, &rx_roc);
+
+ /*
+ * Read from RXERR1 register to get the count for the packets
+ * larger than RX MAX and include that in total rx_oversize count.
+ *
+ * Also need to add BIT(7) to hw->port value while indexing
+ * I40E_GL_RXERR1 register as indexes 0..127 are for VFs when
+ * SR-IOV is enabled. Indexes 128..143 are for PFs.
+ */
+ u64 rx_err1;
+ ixl_stat_update64(hw,
+ I40E_GL_RXERR1L(hw->pf_id + BIT(7)),
+ pf->stat_offsets_loaded,
+ &osd->rx_err1,
+ &rx_err1);
+
+ nsd->rx_oversize = rx_roc + rx_err1;
+
ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
pf->stat_offsets_loaded,
&osd->rx_jabber, &nsd->rx_jabber);
@@ -2305,37 +2305,29 @@ ixl_update_eth_stats(struct ixl_vsi *vsi)
vsi->stat_offsets_loaded,
&oes->rx_discards, &es->rx_discards);
- ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
- I40E_GLV_GORCL(stat_idx),
+ ixl_stat_update48(hw, I40E_GLV_GORCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_bytes, &es->rx_bytes);
- ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
- I40E_GLV_UPRCL(stat_idx),
+ ixl_stat_update48(hw, I40E_GLV_UPRCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_unicast, &es->rx_unicast);
- ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
- I40E_GLV_MPRCL(stat_idx),
+ ixl_stat_update48(hw, I40E_GLV_MPRCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_multicast, &es->rx_multicast);
- ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
- I40E_GLV_BPRCL(stat_idx),
+ ixl_stat_update48(hw, I40E_GLV_BPRCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_broadcast, &es->rx_broadcast);
- ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
- I40E_GLV_GOTCL(stat_idx),
+ ixl_stat_update48(hw, I40E_GLV_GOTCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_bytes, &es->tx_bytes);
- ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
- I40E_GLV_UPTCL(stat_idx),
+ ixl_stat_update48(hw, I40E_GLV_UPTCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_unicast, &es->tx_unicast);
- ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
- I40E_GLV_MPTCL(stat_idx),
+ ixl_stat_update48(hw, I40E_GLV_MPTCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_multicast, &es->tx_multicast);
- ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
- I40E_GLV_BPTCL(stat_idx),
+ ixl_stat_update48(hw, I40E_GLV_BPTCL(stat_idx),
vsi->stat_offsets_loaded,
&oes->tx_broadcast, &es->tx_broadcast);
vsi->stat_offsets_loaded = true;
@@ -2409,28 +2401,56 @@ ixl_vsi_reset_stats(struct ixl_vsi *vsi)
}
/**
- * Read and update a 48 bit stat from the hw
+ * Helper function for reading and updating 48/64 bit stats from the hw
*
* Since the device stats are not reset at PFReset, they likely will not
* be zeroed when the driver starts. We'll save the first values read
* and use them as offsets to be subtracted from the raw values in order
* to report stats that count from zero.
**/
-void
-ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
- bool offset_loaded, u64 *offset, u64 *stat)
+static void
+_ixl_stat_update_helper(struct i40e_hw *hw, u32 reg,
+ bool offset_loaded, u64 mask, u64 *offset, u64 *stat)
{
- u64 new_data;
-
- new_data = rd64(hw, loreg);
+ u64 new_data = rd64(hw, reg);
if (!offset_loaded)
*offset = new_data;
if (new_data >= *offset)
*stat = new_data - *offset;
else
- *stat = (new_data + ((u64)1 << 48)) - *offset;
- *stat &= 0xFFFFFFFFFFFFULL;
+ *stat = (new_data + mask) - *offset + 1;
+ *stat &= mask;
+}
+
+/**
+ * Read and update a 48 bit stat from the hw
+ **/
+void
+ixl_stat_update48(struct i40e_hw *hw, u32 reg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ _ixl_stat_update_helper(hw,
+ reg,
+ offset_loaded,
+ 0xFFFFFFFFFFFFULL,
+ offset,
+ stat);
+}
+
+/**
+ * ixl_stat_update64 - read and update a 64 bit stat from the chip.
+ **/
+void
+ixl_stat_update64(struct i40e_hw *hw, u32 reg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ _ixl_stat_update_helper(hw,
+ reg,
+ offset_loaded,
+ 0xFFFFFFFFFFFFFFFFULL,
+ offset,
+ stat);
}
/**
@@ -2510,6 +2530,12 @@ ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
OID_AUTO, "queue_interrupt_table",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "queue_int_ctln",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
+ pf, 0, ixl_sysctl_debug_queue_int_ctln, "A",
+ "View MSI-X control registers for RX queues");
}
void
@@ -4881,6 +4907,7 @@ ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct ixl_vsi *vsi = &pf->vsi;
+ struct i40e_hw *hw = vsi->hw;
device_t dev = pf->dev;
struct sbuf *buf;
int error = 0;
@@ -4897,11 +4924,52 @@ ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
sbuf_cat(buf, "\n");
for (int i = 0; i < vsi->num_rx_queues; i++) {
rx_que = &vsi->rx_queues[i];
- sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
+ sbuf_printf(buf,
+ "(rxq %3d): %d LNKLSTN: %08x QINT_RQCTL: %08x\n",
+ i, rx_que->msix,
+ rd32(hw, I40E_PFINT_LNKLSTN(rx_que->msix - 1)),
+ rd32(hw, I40E_QINT_RQCTL(rx_que->msix - 1)));
}
for (int i = 0; i < vsi->num_tx_queues; i++) {
tx_que = &vsi->tx_queues[i];
- sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
+ sbuf_printf(buf, "(txq %3d): %d QINT_TQCTL: %08x\n",
+ i, tx_que->msix,
+ rd32(hw, I40E_QINT_TQCTL(tx_que->msix - 1)));
+ }
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+ sbuf_delete(buf);
+
+ return (error);
+}
+
+static int
+ixl_sysctl_debug_queue_int_ctln(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct i40e_hw *hw = vsi->hw;
+ device_t dev = pf->dev;
+ struct sbuf *buf;
+ int error = 0;
+
+ struct ixl_rx_queue *rx_que = vsi->rx_queues;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ sbuf_cat(buf, "\n");
+ for (int i = 0; i < vsi->num_rx_queues; i++) {
+ rx_que = &vsi->rx_queues[i];
+ sbuf_printf(buf,
+ "(rxq %3d): %d PFINT_DYN_CTLN: %08x\n",
+ i, rx_que->msix,
+ rd32(hw, I40E_PFINT_DYN_CTLN(rx_que->msix - 1)));
}
error = sbuf_finish(buf);
diff --git a/sys/dev/jedec_dimm/jedec_dimm.c b/sys/dev/jedec_dimm/jedec_dimm.c
index ddaa0e96856e..697e7695a009 100644
--- a/sys/dev/jedec_dimm/jedec_dimm.c
+++ b/sys/dev/jedec_dimm/jedec_dimm.c
@@ -265,7 +265,6 @@ jedec_dimm_attach(device_t dev)
uint16_t vendorid;
bool tsod_present;
int rc;
- int new_desc_len;
enum dram_type type;
struct jedec_dimm_softc *sc;
struct sysctl_ctx_list *ctx;
@@ -273,7 +272,6 @@ jedec_dimm_attach(device_t dev)
struct sysctl_oid_list *children;
const char *tsod_match;
const char *slotid_str;
- char *new_desc;
sc = device_get_softc(dev);
ctx = device_get_sysctl_ctx(dev);
@@ -447,26 +445,13 @@ no_tsod:
* device description.
*/
if ((tsod_match != NULL) || (sc->slotid_str != NULL)) {
- new_desc_len = strlen(device_get_desc(dev));
- if (tsod_match != NULL) {
- new_desc_len += strlen(tsod_match);
- new_desc_len += 4; /* " w/ " */
- }
- if (sc->slotid_str != NULL) {
- new_desc_len += strlen(sc->slotid_str);
- new_desc_len += 3; /* space + parens */
- }
- new_desc_len++; /* terminator */
- new_desc = malloc(new_desc_len, M_TEMP, (M_WAITOK | M_ZERO));
- (void) snprintf(new_desc, new_desc_len, "%s%s%s%s%s%s",
+ device_set_descf(dev, "%s%s%s%s%s%s",
device_get_desc(dev),
(tsod_match ? " w/ " : ""),
(tsod_match ? tsod_match : ""),
(sc->slotid_str ? " (" : ""),
(sc->slotid_str ? sc->slotid_str : ""),
(sc->slotid_str ? ")" : ""));
- device_set_desc_copy(dev, new_desc);
- free(new_desc, M_TEMP);
}
out:
diff --git a/sys/dev/jme/if_jme.c b/sys/dev/jme/if_jme.c
index aebeb9c617fa..d9982a2f031c 100644
--- a/sys/dev/jme/if_jme.c
+++ b/sys/dev/jme/if_jme.c
@@ -625,7 +625,7 @@ jme_attach(device_t dev)
struct mii_data *mii;
uint32_t reg;
uint16_t burst;
- int error, i, mii_flags, msic, msixc, pmc;
+ int error, i, mii_flags, msic, msixc;
error = 0;
sc = device_get_softc(dev);
@@ -804,12 +804,6 @@ jme_attach(device_t dev)
goto fail;
ifp = sc->jme_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "cannot allocate ifnet structure.\n");
- error = ENXIO;
- goto fail;
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -821,8 +815,7 @@ jme_attach(device_t dev)
/* JMC250 supports Tx/Rx checksum offload as well as TSO. */
if_setcapabilities(ifp, IFCAP_HWCSUM | IFCAP_TSO4);
if_sethwassist(ifp, JME_CSUM_FEATURES | CSUM_TSO);
- if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0) {
- sc->jme_flags |= JME_FLAG_PMCAP;
+ if (pci_has_pm(dev)) {
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
}
if_setcapenable(ifp, if_getcapabilities(ifp));
@@ -878,12 +871,6 @@ jme_attach(device_t dev)
/* Create local taskq. */
sc->jme_tq = taskqueue_create_fast("jme_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->jme_tq);
- if (sc->jme_tq == NULL) {
- device_printf(dev, "could not create taskqueue.\n");
- ether_ifdetach(ifp);
- error = ENXIO;
- goto fail;
- }
taskqueue_start_threads(&sc->jme_tq, 1, PI_NET, "%s taskq",
device_get_nameunit(sc->jme_dev));
@@ -940,10 +927,6 @@ jme_detach(device_t dev)
sc->jme_tq = NULL;
}
- if (sc->jme_miibus != NULL) {
- device_delete_child(dev, sc->jme_miibus);
- sc->jme_miibus = NULL;
- }
bus_generic_detach(dev);
jme_dma_free(sc);
@@ -1578,12 +1561,10 @@ jme_setwol(struct jme_softc *sc)
{
if_t ifp;
uint32_t gpr, pmcs;
- uint16_t pmstat;
- int pmc;
JME_LOCK_ASSERT(sc);
- if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) != 0) {
+ if (!pci_has_pm(sc->jme_dev)) {
/* Remove Tx MAC/offload clock to save more power. */
if ((sc->jme_flags & JME_FLAG_TXCLK) != 0)
CSR_WRITE_4(sc, JME_GHC, CSR_READ_4(sc, JME_GHC) &
@@ -1618,11 +1599,8 @@ jme_setwol(struct jme_softc *sc)
~(GHC_TX_OFFLD_CLK_100 | GHC_TX_MAC_CLK_100 |
GHC_TX_OFFLD_CLK_1000 | GHC_TX_MAC_CLK_1000));
/* Request PME. */
- pmstat = pci_read_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->jme_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
+ pci_enable_pme(sc->jme_dev);
if ((if_getcapenable(ifp) & IFCAP_WOL) == 0) {
/* No WOL, PHY power down. */
jme_phy_down(sc);
@@ -1649,21 +1627,11 @@ jme_resume(device_t dev)
{
struct jme_softc *sc;
if_t ifp;
- uint16_t pmstat;
- int pmc;
sc = device_get_softc(dev);
- JME_LOCK(sc);
- if (pci_find_cap(sc->jme_dev, PCIY_PMG, &pmc) == 0) {
- pmstat = pci_read_config(sc->jme_dev,
- pmc + PCIR_POWER_STATUS, 2);
- /* Disable PME clear PME status. */
- pmstat &= ~PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->jme_dev,
- pmc + PCIR_POWER_STATUS, pmstat, 2);
- }
/* Wakeup PHY. */
+ JME_LOCK(sc);
jme_phy_up(sc);
ifp = sc->jme_ifp;
if ((if_getflags(ifp) & IFF_UP) != 0) {
diff --git a/sys/dev/jme/if_jmevar.h b/sys/dev/jme/if_jmevar.h
index c22c0dee1077..5be250567f8c 100644
--- a/sys/dev/jme/if_jmevar.h
+++ b/sys/dev/jme/if_jmevar.h
@@ -190,7 +190,6 @@ struct jme_softc {
#define JME_FLAG_PCIX 0x00000004
#define JME_FLAG_MSI 0x00000008
#define JME_FLAG_MSIX 0x00000010
-#define JME_FLAG_PMCAP 0x00000020
#define JME_FLAG_FASTETH 0x00000040
#define JME_FLAG_NOJUMBO 0x00000080
#define JME_FLAG_RXCLK 0x00000100
diff --git a/sys/dev/kvm_clock/kvm_clock.c b/sys/dev/kvm_clock/kvm_clock.c
index dd756b99b5e0..43da9b69edc8 100644
--- a/sys/dev/kvm_clock/kvm_clock.c
+++ b/sys/dev/kvm_clock/kvm_clock.c
@@ -141,7 +141,7 @@ kvm_clock_identify(driver_t *driver, device_t parent)
if ((regs[0] &
(KVM_FEATURE_CLOCKSOURCE2 | KVM_FEATURE_CLOCKSOURCE)) == 0)
return;
- if (device_find_child(parent, KVM_CLOCK_DEVNAME, -1))
+ if (device_find_child(parent, KVM_CLOCK_DEVNAME, DEVICE_UNIT_ANY))
return;
BUS_ADD_CHILD(parent, 0, KVM_CLOCK_DEVNAME, 0);
}
diff --git a/sys/dev/le/lance.c b/sys/dev/le/lance.c
index 74ae09ee14c3..f5e41e82bb42 100644
--- a/sys/dev/le/lance.c
+++ b/sys/dev/le/lance.c
@@ -107,8 +107,6 @@ lance_config(struct lance_softc *sc, const char* name, int unit)
return (ENXIO);
ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- return (ENOSPC);
callout_init_mtx(&sc->sc_wdog_ch, &sc->sc_mtx, 0);
@@ -195,7 +193,8 @@ lance_attach(struct lance_softc *sc)
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if_setcapenablebit(ifp, IFCAP_VLAN_MTU, 0);
- gone_in(15, "le: 10/100 NIC no longer needed for Qemu/MIPS");
+ gone_in(15, "Warning! le(4) to be removed: no longer needed for "
+ "Qemu/MIPS\n");
}
void
diff --git a/sys/dev/lge/if_lge.c b/sys/dev/lge/if_lge.c
index d77866ee3cad..c5cfafc0bd22 100644
--- a/sys/dev/lge/if_lge.c
+++ b/sys/dev/lge/if_lge.c
@@ -512,11 +512,6 @@ lge_attach(device_t dev)
}
ifp = sc->lge_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -560,8 +555,7 @@ lge_attach(device_t dev)
fail:
lge_free_jumbo_mem(sc);
if (sc->lge_ldata)
- contigfree(sc->lge_ldata,
- sizeof(struct lge_list_data), M_DEVBUF);
+ free(sc->lge_ldata, M_DEVBUF);
if (ifp)
if_free(ifp);
if (sc->lge_irq)
@@ -589,13 +583,12 @@ lge_detach(device_t dev)
ether_ifdetach(ifp);
bus_generic_detach(dev);
- device_delete_child(dev, sc->lge_miibus);
bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand);
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq);
bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res);
- contigfree(sc->lge_ldata, sizeof(struct lge_list_data), M_DEVBUF);
+ free(sc->lge_ldata, M_DEVBUF);
if_free(ifp);
lge_free_jumbo_mem(sc);
mtx_destroy(&sc->lge_mtx);
@@ -789,7 +782,7 @@ lge_free_jumbo_mem(struct lge_softc *sc)
free(entry, M_DEVBUF);
}
- contigfree(sc->lge_cdata.lge_jumbo_buf, LGE_JMEM, M_DEVBUF);
+ free(sc->lge_cdata.lge_jumbo_buf, M_DEVBUF);
return;
}
diff --git a/sys/dev/liquidio/base/lio_request_manager.c b/sys/dev/liquidio/base/lio_request_manager.c
index f4eae0c8bf31..95eac12ecf3b 100644
--- a/sys/dev/liquidio/base/lio_request_manager.c
+++ b/sys/dev/liquidio/base/lio_request_manager.c
@@ -159,11 +159,6 @@ lio_init_instr_queue(struct octeon_device *oct, union octeon_txpciq txpciq,
db_tq = &oct->check_db_tq[iq_no];
db_tq->tq = taskqueue_create("lio_check_db_timeout", M_WAITOK,
taskqueue_thread_enqueue, &db_tq->tq);
- if (db_tq->tq == NULL) {
- lio_dev_err(oct, "check db wq create failed for iq %d\n",
- iq_no);
- return (1);
- }
TIMEOUT_TASK_INIT(db_tq->tq, &db_tq->work, 0, lio_check_db_timeout,
(void *)db_tq);
@@ -179,10 +174,6 @@ lio_init_instr_queue(struct octeon_device *oct, union octeon_txpciq txpciq,
oct->instr_queue[iq_no]->br =
buf_ring_alloc(LIO_BR_SIZE, M_DEVBUF, M_WAITOK,
&oct->instr_queue[iq_no]->enq_lock);
- if (oct->instr_queue[iq_no]->br == NULL) {
- lio_dev_err(oct, "Critical Failure setting up buf ring\n");
- return (1);
- }
return (0);
}
diff --git a/sys/dev/liquidio/base/lio_response_manager.c b/sys/dev/liquidio/base/lio_response_manager.c
index 12a3ad60521e..ac5fc6229885 100644
--- a/sys/dev/liquidio/base/lio_response_manager.c
+++ b/sys/dev/liquidio/base/lio_response_manager.c
@@ -59,10 +59,6 @@ lio_setup_response_list(struct octeon_device *oct)
ctq = &oct->dma_comp_tq;
ctq->tq = taskqueue_create("lio_dma_comp", M_WAITOK,
taskqueue_thread_enqueue, &ctq->tq);
- if (ctq->tq == NULL) {
- lio_dev_err(oct, "failed to create wq thread\n");
- return (-ENOMEM);
- }
TIMEOUT_TASK_INIT(ctq->tq, &ctq->work, 0, lio_poll_req_completion,
(void *)ctq);
diff --git a/sys/dev/liquidio/lio_ioctl.c b/sys/dev/liquidio/lio_ioctl.c
index 10c88b209051..b2fd54f59580 100644
--- a/sys/dev/liquidio/lio_ioctl.c
+++ b/sys/dev/liquidio/lio_ioctl.c
@@ -481,7 +481,7 @@ lio_get_new_flags(if_t ifp)
* Accept all multicast addresses if there are more than we
* can handle
*/
- if (if_getamcount(ifp) > LIO_MAX_MULTICAST_ADDR)
+ if (if_llmaddr_count(ifp) > LIO_MAX_MULTICAST_ADDR)
f |= LIO_IFFLAG_ALLMULTI;
}
if (if_getflags(ifp) & IFF_BROADCAST)
diff --git a/sys/dev/liquidio/lio_main.c b/sys/dev/liquidio/lio_main.c
index bec510af0c65..3c73a6b10eed 100644
--- a/sys/dev/liquidio/lio_main.c
+++ b/sys/dev/liquidio/lio_main.c
@@ -200,7 +200,6 @@ lio_probe(device_t dev)
uint16_t device_id;
uint16_t subdevice_id;
uint8_t revision_id;
- char device_ver[256];
vendor_id = pci_get_vendor(dev);
if (vendor_id != PCI_VENDOR_ID_CAVIUM)
@@ -216,9 +215,8 @@ lio_probe(device_t dev)
(device_id == tbl->device_id) &&
(subdevice_id == tbl->subdevice_id) &&
(revision_id == tbl->revision_id)) {
- sprintf(device_ver, "%s, Version - %s",
- lio_strings[tbl->index], LIO_VERSION);
- device_set_desc_copy(dev, device_ver);
+ device_set_descf(dev, "%s, Version - %s",
+ lio_strings[tbl->index], LIO_VERSION);
return (BUS_PROBE_DEFAULT);
}
@@ -1329,11 +1327,6 @@ lio_setup_nic_devices(struct octeon_device *octeon_dev)
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- lio_dev_err(octeon_dev, "Device allocation failed\n");
- goto setup_nic_dev_fail;
- }
-
lio = malloc(sizeof(struct lio), M_DEVBUF, M_NOWAIT | M_ZERO);
if (lio == NULL) {
@@ -1861,10 +1854,6 @@ lio_setup_rx_oom_poll_fn(if_t ifp)
rx_status_tq->tq = taskqueue_create("lio_rx_oom_status", M_WAITOK,
taskqueue_thread_enqueue,
&rx_status_tq->tq);
- if (rx_status_tq->tq == NULL) {
- lio_dev_err(oct, "unable to create lio rx oom status tq\n");
- return (-1);
- }
TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0,
lio_poll_check_rx_oom_status, (void *)rx_status_tq);
diff --git a/sys/dev/liquidio/lio_sysctl.c b/sys/dev/liquidio/lio_sysctl.c
index 729f4d432274..61a7e96098c8 100644
--- a/sys/dev/liquidio/lio_sysctl.c
+++ b/sys/dev/liquidio/lio_sysctl.c
@@ -744,9 +744,6 @@ lio_get_regs(SYSCTL_HANDLER_ARGS)
regbuf = malloc(sizeof(char) * LIO_REGDUMP_LEN_XXXX, M_DEVBUF,
M_WAITOK | M_ZERO);
- if (regbuf == NULL)
- return (error);
-
switch (oct->chip_id) {
case LIO_CN23XX_PF_VID:
len += lio_cn23xx_pf_read_csr_reg(regbuf, oct);
diff --git a/sys/dev/malo/if_malo.c b/sys/dev/malo/if_malo.c
index 56310085ef5f..2e4f3967ace4 100644
--- a/sys/dev/malo/if_malo.c
+++ b/sys/dev/malo/if_malo.c
@@ -94,13 +94,9 @@ enum {
MALO_DEBUG_FW = 0x00008000, /* firmware */
MALO_DEBUG_ANY = 0xffffffff
};
-#define IS_BEACON(wh) \
- ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | \
- IEEE80211_FC0_SUBTYPE_MASK)) == \
- (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
#define IFF_DUMPPKTS_RECV(sc, wh) \
(((sc->malo_debug & MALO_DEBUG_RECV) && \
- ((sc->malo_debug & MALO_DEBUG_RECV_ALL) || !IS_BEACON(wh))))
+ ((sc->malo_debug & MALO_DEBUG_RECV_ALL) || !IEEE80211_IS_MGMT_BEACON(wh))))
#define IFF_DUMPPKTS_XMIT(sc) \
(sc->malo_debug & MALO_DEBUG_XMIT)
#define DPRINTF(sc, m, fmt, ...) do { \
@@ -267,6 +263,8 @@ malo_attach(uint16_t devid, struct malo_softc *sc)
;
IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->malo_hwspecs.macaddr);
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/*
* Transmit requires space in the packet for a special format transmit
* record and optional padding between this record and the payload.
@@ -902,7 +900,7 @@ malo_updatetxrate(struct ieee80211_node *ni, int rix)
static const int ieeerates[] =
{ 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 96, 108 };
if (rix < nitems(ieeerates))
- ni->ni_txrate = ieeerates[rix];
+ ieee80211_node_set_txrate_dot11rate(ni, ieeerates[rix]);
}
static int
@@ -1025,8 +1023,6 @@ static int
malo_tx_start(struct malo_softc *sc, struct ieee80211_node *ni,
struct malo_txbuf *bf, struct mbuf *m0)
{
-#define IS_DATA_FRAME(wh) \
- ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK)) == IEEE80211_FC0_TYPE_DATA)
int error, iswep;
int hdrlen, pktlen;
struct ieee80211_frame *wh;
@@ -1046,6 +1042,8 @@ malo_tx_start(struct malo_softc *sc, struct ieee80211_node *ni,
} else
qos = 0;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (iswep) {
struct ieee80211_key *k;
@@ -1150,7 +1148,7 @@ malo_tx_start(struct malo_softc *sc, struct ieee80211_node *ni,
ds->pktptr = htole32(bf->bf_segs[0].ds_addr);
ds->pktlen = htole16(bf->bf_segs[0].ds_len);
/* NB: pPhysNext setup once, don't touch */
- ds->datarate = IS_DATA_FRAME(wh) ? 1 : 0;
+ ds->datarate = IEEE80211_IS_DATA(wh) ? 1 : 0;
ds->sap_pktinfo = 0;
ds->format = 0;
@@ -1183,7 +1181,7 @@ malo_tx_start(struct malo_softc *sc, struct ieee80211_node *ni,
#endif
MALO_TXQ_LOCK(txq);
- if (!IS_DATA_FRAME(wh))
+ if (!IEEE80211_IS_DATA(wh))
ds->status |= htole32(1);
ds->status |= htole32(MALO_TXD_STATUS_FW_OWNED);
STAILQ_INSERT_TAIL(&txq->active, bf, bf_list);
diff --git a/sys/dev/mana/gdma_main.c b/sys/dev/mana/gdma_main.c
index 13f8a30762b1..b339badad925 100644
--- a/sys/dev/mana/gdma_main.c
+++ b/sys/dev/mana/gdma_main.c
@@ -221,7 +221,7 @@ mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
if (!gc || !gmi)
return EINVAL;
- if (length < PAGE_SIZE || (length != roundup_pow_of_two(length)))
+ if (length < PAGE_SIZE || !powerof2(length))
return EINVAL;
err = bus_dma_tag_create(bus_get_dma_tag(gc->dev), /* parent */
@@ -868,9 +868,6 @@ int mana_gd_create_hwc_queue(struct gdma_dev *gd,
int err;
queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!queue)
- return ENOMEM;
-
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
if (err)
@@ -942,7 +939,7 @@ mana_gd_create_dma_region(struct gdma_dev *gd,
int err;
int i;
- if (length < PAGE_SIZE || !is_power_of_2(length)) {
+ if (length < PAGE_SIZE || !powerof2(length)) {
mana_err(NULL, "gmi size incorrect: %u\n", length);
return EINVAL;
}
@@ -962,9 +959,6 @@ mana_gd_create_dma_region(struct gdma_dev *gd,
}
req = malloc(req_msg_size, M_DEVBUF, M_WAITOK | M_ZERO);
- if (!req)
- return ENOMEM;
-
mana_gd_init_req_hdr(&req->hdr, GDMA_CREATE_DMA_REGION,
req_msg_size, sizeof(resp));
req->length = length;
@@ -1008,9 +1002,6 @@ mana_gd_create_mana_eq(struct gdma_dev *gd,
return EINVAL;
queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!queue)
- return ENOMEM;
-
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
if (err)
@@ -1056,9 +1047,6 @@ int mana_gd_create_mana_wq_cq(struct gdma_dev *gd,
return EINVAL;
queue = malloc(sizeof(*queue), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!queue)
- return ENOMEM;
-
gmi = &queue->mem_info;
err = mana_gd_alloc_memory(gc, spec->queue_size, gmi);
if (err)
@@ -1480,9 +1468,6 @@ mana_gd_alloc_res_map(uint32_t res_avail,
r->map =
malloc(n * sizeof(unsigned long), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!r->map)
- return ENOMEM;
-
r->size = res_avail;
mtx_init(&r->lock_spin, lock_name, NULL, MTX_SPIN);
@@ -1616,10 +1601,6 @@ mana_gd_setup_irqs(device_t dev)
gc->irq_contexts = malloc(nvec * sizeof(struct gdma_irq_context),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!gc->irq_contexts) {
- rc = ENOMEM;
- goto err_setup_irq_release;
- }
for (i = 0; i < nvec; i++) {
gic = &gc->irq_contexts[i];
@@ -1750,7 +1731,6 @@ static int
mana_gd_probe(device_t dev)
{
mana_vendor_id_t *ent;
- char adapter_name[60];
uint16_t pci_vendor_id = 0;
uint16_t pci_device_id = 0;
@@ -1764,8 +1744,7 @@ mana_gd_probe(device_t dev)
mana_dbg(NULL, "vendor=%x device=%x\n",
pci_vendor_id, pci_device_id);
- sprintf(adapter_name, DEVICE_DESC);
- device_set_desc_copy(dev, adapter_name);
+ device_set_desc(dev, DEVICE_DESC);
return (BUS_PROBE_DEFAULT);
}
@@ -1900,6 +1879,11 @@ static int
mana_gd_detach(device_t dev)
{
struct gdma_context *gc = device_get_softc(dev);
+ int error;
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
mana_remove(&gc->mana);
@@ -1911,7 +1895,7 @@ mana_gd_detach(device_t dev)
pci_disable_busmaster(dev);
- return (bus_generic_detach(dev));
+ return (0);
}
diff --git a/sys/dev/mana/gdma_util.h b/sys/dev/mana/gdma_util.h
index 822c831b9d70..1efa315bbcfe 100644
--- a/sys/dev/mana/gdma_util.h
+++ b/sys/dev/mana/gdma_util.h
@@ -170,27 +170,6 @@ find_first_zero_bit(const unsigned long *p, unsigned long max)
return (max);
}
-static inline unsigned long
-ilog2(unsigned long x)
-{
- unsigned long log = x;
- while (x >>= 1)
- log++;
- return (log);
-}
-
-static inline unsigned long
-roundup_pow_of_two(unsigned long x)
-{
- return (1UL << flsl(x - 1));
-}
-
-static inline int
-is_power_of_2(unsigned long n)
-{
- return (n == roundup_pow_of_two(n));
-}
-
struct completion {
unsigned int done;
struct mtx lock;
diff --git a/sys/dev/mana/hw_channel.c b/sys/dev/mana/hw_channel.c
index 7a40a28894fb..5904389596a3 100644
--- a/sys/dev/mana/hw_channel.c
+++ b/sys/dev/mana/hw_channel.c
@@ -416,8 +416,6 @@ mana_hwc_create_cq(struct hw_channel_context *hwc,
cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
hwc_cq = malloc(sizeof(*hwc_cq), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!hwc_cq)
- return ENOMEM;
err = mana_hwc_create_gdma_eq(hwc, eq_size, ctx, callback, &eq);
if (err) {
@@ -438,10 +436,6 @@ mana_hwc_create_cq(struct hw_channel_context *hwc,
comp_buf = mallocarray(q_depth, sizeof(struct gdma_comp),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!comp_buf) {
- err = ENOMEM;
- goto out;
- }
hwc_cq->hwc = hwc;
hwc_cq->comp_buf = comp_buf;
@@ -476,8 +470,6 @@ mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, uint16_t q_depth,
dma_buf = malloc(sizeof(*dma_buf) +
q_depth * sizeof(struct hwc_work_request),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!dma_buf)
- return ENOMEM;
dma_buf->num_reqs = q_depth;
@@ -560,8 +552,6 @@ mana_hwc_create_wq(struct hw_channel_context *hwc,
queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
hwc_wq = malloc(sizeof(*hwc_wq), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!hwc_wq)
- return ENOMEM;
err = mana_hwc_create_gdma_wq(hwc, q_type, queue_size, &queue);
if (err)
@@ -669,8 +659,6 @@ mana_hwc_test_channel(struct hw_channel_context *hwc, uint16_t q_depth,
ctx = malloc(q_depth * sizeof(struct hwc_caller_ctx),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!ctx)
- return ENOMEM;
for (i = 0; i < q_depth; ++i)
init_completion(&ctx[i].comp_event);
@@ -719,9 +707,6 @@ mana_hwc_establish_channel(struct gdma_context *gc, uint16_t *q_depth,
gc->cq_table = malloc(gc->max_num_cqs * sizeof(struct gdma_queue *),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!gc->cq_table)
- return ENOMEM;
-
gc->cq_table[cq->id] = cq;
return 0;
@@ -782,8 +767,6 @@ mana_hwc_create_channel(struct gdma_context *gc)
int err;
hwc = malloc(sizeof(*hwc), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!hwc)
- return ENOMEM;
gd->gdma_context = gc;
gd->driver_data = hwc;
diff --git a/sys/dev/mana/mana.h b/sys/dev/mana/mana.h
index 906b28eb56b6..a037eb3f05c7 100644
--- a/sys/dev/mana/mana.h
+++ b/sys/dev/mana/mana.h
@@ -106,9 +106,23 @@ enum TRI_STATE {
#define DEFAULT_FRAME_SIZE (ADAPTER_MTU_SIZE + 14)
#define MAX_FRAME_SIZE 4096
-#define RX_BUFFERS_PER_QUEUE 512
-
-#define MAX_SEND_BUFFERS_PER_QUEUE 256
+/* Unit number of RX buffers. Must be power of two
+ * Higher number could fail at allocation.
+ */
+#define MAX_RX_BUFFERS_PER_QUEUE 8192
+#define DEF_RX_BUFFERS_PER_QUEUE 1024
+#define MIN_RX_BUFFERS_PER_QUEUE 128
+
+/* Unit number of TX buffers. Must be power of two
+ * Higher number could fail at allocation.
+ * The max value is derived as the maximum
+ * allocatable pages supported on host per guest
+ * through testing. TX buffer size beyond this
+ * value is rejected by the hardware.
+ */
+#define MAX_SEND_BUFFERS_PER_QUEUE 16384
+#define DEF_SEND_BUFFERS_PER_QUEUE 1024
+#define MIN_SEND_BUFFERS_PER_QUEUE 128
#define EQ_SIZE (8 * PAGE_SIZE)
#define LOG2_EQ_THROTTLE 3
@@ -135,6 +149,7 @@ struct mana_stats {
counter_u64_t collapse_err; /* tx */
counter_u64_t dma_mapping_err; /* rx, tx */
counter_u64_t mbuf_alloc_fail; /* rx */
+ counter_u64_t partial_refill; /* rx */
counter_u64_t alt_chg; /* tx */
counter_u64_t alt_reset; /* tx */
counter_u64_t cqe_err; /* tx */
@@ -427,6 +442,8 @@ struct mana_rxq {
uint32_t num_rx_buf;
uint32_t buf_index;
+ uint32_t next_to_refill;
+ uint32_t refill_thresh;
uint64_t lro_tried;
uint64_t lro_failed;
@@ -507,6 +524,9 @@ struct mana_port_context {
unsigned int max_queues;
unsigned int num_queues;
+ unsigned int tx_queue_size;
+ unsigned int rx_queue_size;
+
mana_handle_t port_handle;
int vport_use_count;
@@ -694,6 +714,13 @@ struct mana_cfg_rx_steer_resp {
#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
+#define MANA_IDX_NEXT(idx, size) (((idx) + 1) & ((size) - 1))
+#define MANA_GET_SPACE(start_idx, end_idx, size) \
+ (((end_idx) >= (start_idx)) ? \
+ ((end_idx) - (start_idx)) : ((size) - (start_idx) + (end_idx)))
+
+#define MANA_RX_REFILL_THRESH 256
+
struct mana_tx_package {
struct gdma_wqe_request wqe_req;
struct gdma_sge sgl_array[MAX_MBUF_FRAGS];
diff --git a/sys/dev/mana/mana_en.c b/sys/dev/mana/mana_en.c
index 0e5f86b5e105..949b498ceadc 100644
--- a/sys/dev/mana/mana_en.c
+++ b/sys/dev/mana/mana_en.c
@@ -67,6 +67,10 @@
static int mana_up(struct mana_port_context *apc);
static int mana_down(struct mana_port_context *apc);
+extern unsigned int mana_tx_req_size;
+extern unsigned int mana_rx_req_size;
+extern unsigned int mana_rx_refill_threshold;
+
static void
mana_rss_key_fill(void *k, size_t size)
{
@@ -492,6 +496,7 @@ mana_xmit(struct mana_txq *txq)
if_t ndev = txq->ndev;
struct mbuf *mbuf;
struct mana_port_context *apc = if_getsoftc(ndev);
+ unsigned int tx_queue_size = apc->tx_queue_size;
struct mana_port_stats *port_stats = &apc->port_stats;
struct gdma_dev *gd = apc->ac->gdma_dev;
uint64_t packets, bytes;
@@ -634,8 +639,7 @@ mana_xmit(struct mana_txq *txq)
continue;
}
- next_to_use =
- (next_to_use + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
+ next_to_use = MANA_IDX_NEXT(next_to_use, tx_queue_size);
(void)atomic_inc_return(&txq->pending_sends);
@@ -921,13 +925,6 @@ mana_init_port_context(struct mana_port_context *apc)
apc->rxqs = mallocarray(apc->num_queues, sizeof(struct mana_rxq *),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!apc->rxqs) {
- bus_dma_tag_destroy(apc->tx_buf_tag);
- bus_dma_tag_destroy(apc->rx_buf_tag);
- apc->rx_buf_tag = NULL;
- return ENOMEM;
- }
-
return 0;
}
@@ -1156,8 +1153,6 @@ mana_cfg_vport_steering(struct mana_port_context *apc,
req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
req = malloc(req_buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
- if (!req)
- return ENOMEM;
mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
sizeof(resp));
@@ -1325,8 +1320,6 @@ mana_create_eq(struct mana_context *ac)
ac->eqs = mallocarray(gc->max_num_queues, sizeof(struct mana_eq),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!ac->eqs)
- return ENOMEM;
spec.type = GDMA_EQ;
spec.monitor_avl_buf = false;
@@ -1434,10 +1427,11 @@ mana_poll_tx_cq(struct mana_cq *cq)
unsigned int wqe_unit_cnt = 0;
struct mana_txq *txq = cq->txq;
struct mana_port_context *apc;
+ unsigned int tx_queue_size;
uint16_t next_to_complete;
if_t ndev;
int comp_read;
- int txq_idx = txq->idx;;
+ int txq_idx = txq->idx;
int i;
int sa_drop = 0;
@@ -1447,6 +1441,7 @@ mana_poll_tx_cq(struct mana_cq *cq)
ndev = txq->ndev;
apc = if_getsoftc(ndev);
+ tx_queue_size = apc->tx_queue_size;
comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
CQE_POLLING_BUFFER);
@@ -1532,7 +1527,7 @@ mana_poll_tx_cq(struct mana_cq *cq)
mb();
next_to_complete =
- (next_to_complete + 1) % MAX_SEND_BUFFERS_PER_QUEUE;
+ MANA_IDX_NEXT(next_to_complete, tx_queue_size);
pkt_transmitted++;
}
@@ -1597,18 +1592,11 @@ mana_poll_tx_cq(struct mana_cq *cq)
}
static void
-mana_post_pkt_rxq(struct mana_rxq *rxq)
+mana_post_pkt_rxq(struct mana_rxq *rxq,
+ struct mana_recv_buf_oob *recv_buf_oob)
{
- struct mana_recv_buf_oob *recv_buf_oob;
- uint32_t curr_index;
int err;
- curr_index = rxq->buf_index++;
- if (rxq->buf_index == rxq->num_rx_buf)
- rxq->buf_index = 0;
-
- recv_buf_oob = &rxq->rx_oobs[curr_index];
-
err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
&recv_buf_oob->wqe_inf);
if (err) {
@@ -1727,6 +1715,68 @@ mana_rx_mbuf(struct mbuf *mbuf, struct mana_rxcomp_oob *cqe,
counter_exit();
}
+static int
+mana_refill_rx_mbufs(struct mana_port_context *apc,
+ struct mana_rxq *rxq, uint32_t num)
+{
+ struct mana_recv_buf_oob *rxbuf_oob;
+ uint32_t next_to_refill;
+ uint32_t i;
+ int err;
+
+ next_to_refill = rxq->next_to_refill;
+
+ for (i = 0; i < num; i++) {
+ if (next_to_refill == rxq->buf_index) {
+ mana_warn(NULL, "refilling index reached current, "
+ "aborted! rxq %u, oob idx %u\n",
+ rxq->rxq_idx, next_to_refill);
+ break;
+ }
+
+ rxbuf_oob = &rxq->rx_oobs[next_to_refill];
+
+ if (likely(rxbuf_oob->mbuf == NULL)) {
+ err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
+ } else {
+ mana_warn(NULL, "mbuf not null when refilling, "
+ "rxq %u, oob idx %u, reusing\n",
+ rxq->rxq_idx, next_to_refill);
+ err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
+ }
+
+ if (unlikely(err != 0)) {
+ mana_dbg(NULL,
+ "failed to load rx mbuf, err = %d, rxq = %u\n",
+ err, rxq->rxq_idx);
+ counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
+ break;
+ }
+
+ mana_post_pkt_rxq(rxq, rxbuf_oob);
+
+ next_to_refill = MANA_IDX_NEXT(next_to_refill,
+ rxq->num_rx_buf);
+ }
+
+ if (likely(i != 0)) {
+ struct gdma_context *gc =
+ rxq->gdma_rq->gdma_dev->gdma_context;
+
+ mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
+ }
+
+ if (unlikely(i < num)) {
+ counter_u64_add(rxq->stats.partial_refill, 1);
+ mana_dbg(NULL,
+ "refilled rxq %u with only %u mbufs (%u requested)\n",
+ rxq->rxq_idx, i, num);
+ }
+
+ rxq->next_to_refill = next_to_refill;
+ return (i);
+}
+
static void
mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
struct gdma_comp *cqe)
@@ -1736,8 +1786,8 @@ mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
if_t ndev = rxq->ndev;
struct mana_port_context *apc;
struct mbuf *old_mbuf;
+ uint32_t refill_required;
uint32_t curr, pktlen;
- int err;
switch (oob->cqe_hdr.cqe_type) {
case CQE_RX_OKAY:
@@ -1790,29 +1840,24 @@ mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
/* Unload DMA map for the old mbuf */
mana_unload_rx_mbuf(apc, rxq, rxbuf_oob, false);
-
- /* Load a new mbuf to replace the old one */
- err = mana_load_rx_mbuf(apc, rxq, rxbuf_oob, true);
- if (err) {
- mana_dbg(NULL,
- "failed to load rx mbuf, err = %d, packet dropped.\n",
- err);
- counter_u64_add(rxq->stats.mbuf_alloc_fail, 1);
- /*
- * Failed to load new mbuf, rxbuf_oob->mbuf is still
- * pointing to the old one. Drop the packet.
- */
- old_mbuf = NULL;
- /* Reload the existing mbuf */
- mana_load_rx_mbuf(apc, rxq, rxbuf_oob, false);
- }
+ /* Clear the mbuf pointer to avoid reuse */
+ rxbuf_oob->mbuf = NULL;
mana_rx_mbuf(old_mbuf, oob, rxq);
drop:
mana_move_wq_tail(rxq->gdma_rq, rxbuf_oob->wqe_inf.wqe_size_in_bu);
- mana_post_pkt_rxq(rxq);
+ rxq->buf_index = MANA_IDX_NEXT(rxq->buf_index, rxq->num_rx_buf);
+
+ /* Check if refill is needed */
+ refill_required = MANA_GET_SPACE(rxq->next_to_refill,
+ rxq->buf_index, rxq->num_rx_buf);
+
+ if (refill_required >= rxq->refill_thresh) {
+ /* Refill empty rx_oobs with new mbufs */
+ mana_refill_rx_mbufs(apc, rxq, refill_required);
+ }
}
static void
@@ -1845,13 +1890,6 @@ mana_poll_rx_cq(struct mana_cq *cq)
mana_process_rx_cqe(cq->rxq, cq, &comp[i]);
}
- if (comp_read > 0) {
- struct gdma_context *gc =
- cq->rxq->gdma_rq->gdma_dev->gdma_context;
-
- mana_gd_wq_ring_doorbell(gc, cq->rxq->gdma_rq);
- }
-
tcp_lro_flush_all(&cq->rxq->lro);
}
@@ -1878,9 +1916,9 @@ mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
mana_gd_ring_cq(gdma_queue, arm_bit);
}
-#define MANA_POLL_BUDGET 8
-#define MANA_RX_BUDGET 256
-#define MANA_TX_BUDGET MAX_SEND_BUFFERS_PER_QUEUE
+#define MANA_POLL_BUDGET 256
+#define MANA_RX_BUDGET 8
+#define MANA_TX_BUDGET 8
static void
mana_poll(void *arg, int pending)
@@ -1987,7 +2025,7 @@ mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
if (txq->tx_buf_info) {
/* Free all mbufs which are still in-flight */
- for (i = 0; i < MAX_SEND_BUFFERS_PER_QUEUE; i++) {
+ for (i = 0; i < apc->tx_queue_size; i++) {
txbuf_info = &txq->tx_buf_info[i];
if (txbuf_info->mbuf) {
mana_tx_unmap_mbuf(apc, txbuf_info);
@@ -2043,19 +2081,21 @@ mana_create_txq(struct mana_port_context *apc, if_t net)
apc->tx_qp = mallocarray(apc->num_queues, sizeof(struct mana_tx_qp),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!apc->tx_qp)
- return ENOMEM;
/* The minimum size of the WQE is 32 bytes, hence
- * MAX_SEND_BUFFERS_PER_QUEUE represents the maximum number of WQEs
+ * apc->tx_queue_size represents the maximum number of WQEs
* the SQ can store. This value is then used to size other queues
* to prevent overflow.
+ * Also note that the txq_size is always going to be page aligned,
+ * as min val of apc->tx_queue_size is 128 and that would make
+ * txq_size 128 * 32 = 4096 and the other higher values of
+ * apc->tx_queue_size are always power of two.
*/
- txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
+ txq_size = apc->tx_queue_size * 32;
KASSERT(IS_ALIGNED(txq_size, PAGE_SIZE),
("txq size not page aligned"));
- cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
+ cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE;
cq_size = ALIGN(cq_size, PAGE_SIZE);
gc = gd->gdma_context;
@@ -2127,7 +2167,7 @@ mana_create_txq(struct mana_port_context *apc, if_t net)
mana_dbg(NULL,
"txq %d, txq gdma id %d, txq cq gdma id %d\n",
- i, txq->gdma_txq_id, cq->gdma_id);;
+ i, txq->gdma_txq_id, cq->gdma_id);
if (cq->gdma_id >= gc->max_num_cqs) {
if_printf(net, "CQ id %u too large.\n", cq->gdma_id);
@@ -2138,31 +2178,16 @@ mana_create_txq(struct mana_port_context *apc, if_t net)
gc->cq_table[cq->gdma_id] = cq->gdma_cq;
/* Initialize tx specific data */
- txq->tx_buf_info = malloc(MAX_SEND_BUFFERS_PER_QUEUE *
+ txq->tx_buf_info = malloc(apc->tx_queue_size *
sizeof(struct mana_send_buf_info),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (unlikely(txq->tx_buf_info == NULL)) {
- if_printf(net,
- "Failed to allocate tx buf info for SQ %u\n",
- txq->gdma_sq->id);
- err = ENOMEM;
- goto out;
- }
-
snprintf(txq->txq_mtx_name, nitems(txq->txq_mtx_name),
"mana:tx(%d)", i);
mtx_init(&txq->txq_mtx, txq->txq_mtx_name, NULL, MTX_DEF);
- txq->txq_br = buf_ring_alloc(4 * MAX_SEND_BUFFERS_PER_QUEUE,
+ txq->txq_br = buf_ring_alloc(4 * apc->tx_queue_size,
M_DEVBUF, M_WAITOK, &txq->txq_mtx);
- if (unlikely(txq->txq_br == NULL)) {
- if_printf(net,
- "Failed to allocate buf ring for SQ %u\n",
- txq->gdma_sq->id);
- err = ENOMEM;
- goto out;
- }
/* Allocate taskqueue for deferred send */
TASK_INIT(&txq->enqueue_task, 0, mana_xmit_taskfunc, txq);
@@ -2351,13 +2376,10 @@ mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
gc = gd->gdma_context;
rxq = malloc(sizeof(*rxq) +
- RX_BUFFERS_PER_QUEUE * sizeof(struct mana_recv_buf_oob),
+ apc->rx_queue_size * sizeof(struct mana_recv_buf_oob),
M_DEVBUF, M_WAITOK | M_ZERO);
- if (!rxq)
- return NULL;
-
rxq->ndev = ndev;
- rxq->num_rx_buf = RX_BUFFERS_PER_QUEUE;
+ rxq->num_rx_buf = apc->rx_queue_size;
rxq->rxq_idx = rxq_idx;
/*
* Minimum size is MCLBYTES(2048) bytes for a mbuf cluster.
@@ -2370,6 +2392,23 @@ mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
mana_dbg(NULL, "Setting rxq %d datasize %d\n",
rxq_idx, rxq->datasize);
+ /*
+ * Two steps to set the mbuf refill_thresh.
+ * 1) If mana_rx_refill_threshold is set, honor it.
+ * Set to default value otherwise.
+ * 2) Select the smaller of 1) above and 1/4 of the
+ * rx buffer size.
+ */
+ if (mana_rx_refill_threshold != 0)
+ rxq->refill_thresh = mana_rx_refill_threshold;
+ else
+ rxq->refill_thresh = MANA_RX_REFILL_THRESH;
+ rxq->refill_thresh = min_t(uint32_t,
+ rxq->num_rx_buf / 4, rxq->refill_thresh);
+
+ mana_dbg(NULL, "Setting rxq %d refill thresh %u\n",
+ rxq_idx, rxq->refill_thresh);
+
rxq->rxobj = INVALID_MANA_HANDLE;
err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
@@ -2794,6 +2833,62 @@ mana_detach(if_t ndev)
return err;
}
+static unsigned int
+mana_get_tx_queue_size(int port_idx, unsigned int request_size)
+{
+ unsigned int new_size;
+
+ if (request_size == 0)
+ /* Uninitialized */
+ new_size = DEF_SEND_BUFFERS_PER_QUEUE;
+ else
+ new_size = roundup_pow_of_two(request_size);
+
+ if (new_size < MIN_SEND_BUFFERS_PER_QUEUE ||
+ new_size > MAX_SEND_BUFFERS_PER_QUEUE) {
+ mana_info(NULL, "mana port %d: requested tx buffer "
+ "size %u out of allowable range (%u - %u), "
+ "setting to default\n",
+ port_idx, request_size,
+ MIN_SEND_BUFFERS_PER_QUEUE,
+ MAX_SEND_BUFFERS_PER_QUEUE);
+ new_size = DEF_SEND_BUFFERS_PER_QUEUE;
+ }
+ mana_info(NULL, "mana port %d: tx buffer size %u "
+ "(%u requested)\n",
+ port_idx, new_size, request_size);
+
+ return (new_size);
+}
+
+static unsigned int
+mana_get_rx_queue_size(int port_idx, unsigned int request_size)
+{
+ unsigned int new_size;
+
+ if (request_size == 0)
+ /* Uninitialized */
+ new_size = DEF_RX_BUFFERS_PER_QUEUE;
+ else
+ new_size = roundup_pow_of_two(request_size);
+
+ if (new_size < MIN_RX_BUFFERS_PER_QUEUE ||
+ new_size > MAX_RX_BUFFERS_PER_QUEUE) {
+ mana_info(NULL, "mana port %d: requested rx buffer "
+ "size %u out of allowable range (%u - %u), "
+ "setting to default\n",
+ port_idx, request_size,
+ MIN_RX_BUFFERS_PER_QUEUE,
+ MAX_RX_BUFFERS_PER_QUEUE);
+ new_size = DEF_RX_BUFFERS_PER_QUEUE;
+ }
+ mana_info(NULL, "mana port %d: rx buffer size %u "
+ "(%u requested)\n",
+ port_idx, new_size, request_size);
+
+ return (new_size);
+}
+
static int
mana_probe_port(struct mana_context *ac, int port_idx,
if_t *ndev_storage)
@@ -2805,25 +2900,18 @@ mana_probe_port(struct mana_context *ac, int port_idx,
int err;
ndev = if_alloc_dev(IFT_ETHER, gc->dev);
- if (!ndev) {
- mana_err(NULL, "Failed to allocate ifnet struct\n");
- return ENOMEM;
- }
-
*ndev_storage = ndev;
apc = malloc(sizeof(*apc), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!apc) {
- mana_err(NULL, "Failed to allocate port context\n");
- err = ENOMEM;
- goto free_net;
- }
-
apc->ac = ac;
apc->ndev = ndev;
apc->max_queues = gc->max_num_queues;
apc->num_queues = min_t(unsigned int,
gc->max_num_queues, MANA_MAX_NUM_QUEUES);
+ apc->tx_queue_size = mana_get_tx_queue_size(port_idx,
+ mana_tx_req_size);
+ apc->rx_queue_size = mana_get_rx_queue_size(port_idx,
+ mana_rx_req_size);
apc->port_handle = INVALID_MANA_HANDLE;
apc->port_idx = port_idx;
apc->frame_size = DEFAULT_FRAME_SIZE;
@@ -2897,7 +2985,6 @@ mana_probe_port(struct mana_context *ac, int port_idx,
reset_apc:
free(apc, M_DEVBUF);
-free_net:
*ndev_storage = NULL;
if_printf(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
if_free(ndev);
@@ -2920,9 +3007,6 @@ int mana_probe(struct gdma_dev *gd)
return err;
ac = malloc(sizeof(*ac), M_DEVBUF, M_WAITOK | M_ZERO);
- if (!ac)
- return ENOMEM;
-
ac->gdma_dev = gd;
ac->num_ports = 1;
gd->driver_data = ac;
diff --git a/sys/dev/mana/mana_sysctl.c b/sys/dev/mana/mana_sysctl.c
index 844a05040595..c2916f9004cd 100644
--- a/sys/dev/mana/mana_sysctl.c
+++ b/sys/dev/mana/mana_sysctl.c
@@ -34,9 +34,21 @@ static int mana_sysctl_cleanup_thread_cpu(SYSCTL_HANDLER_ARGS);
int mana_log_level = MANA_ALERT | MANA_WARNING | MANA_INFO;
+unsigned int mana_tx_req_size;
+unsigned int mana_rx_req_size;
+unsigned int mana_rx_refill_threshold;
+
SYSCTL_NODE(_hw, OID_AUTO, mana, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"MANA driver parameters");
+SYSCTL_UINT(_hw_mana, OID_AUTO, tx_req_size, CTLFLAG_RWTUN,
+ &mana_tx_req_size, 0, "requested number of unit of tx queue");
+SYSCTL_UINT(_hw_mana, OID_AUTO, rx_req_size, CTLFLAG_RWTUN,
+ &mana_rx_req_size, 0, "requested number of unit of rx queue");
+SYSCTL_UINT(_hw_mana, OID_AUTO, rx_refill_thresh, CTLFLAG_RWTUN,
+ &mana_rx_refill_threshold, 0,
+ "number of rx slots before starting the refill");
+
/*
* Logging level for changing verbosity of the output
*/
@@ -166,6 +178,14 @@ mana_sysctl_add_port(struct mana_port_context *apc)
"enable_altq", CTLFLAG_RW, &apc->enable_tx_altq, 0,
"Choose alternative txq under heavy load");
+ SYSCTL_ADD_UINT(ctx, apc->port_list, OID_AUTO,
+ "tx_queue_size", CTLFLAG_RD, &apc->tx_queue_size, 0,
+ "number of unit of tx queue");
+
+ SYSCTL_ADD_UINT(ctx, apc->port_list, OID_AUTO,
+ "rx_queue_size", CTLFLAG_RD, &apc->rx_queue_size, 0,
+ "number of unit of rx queue");
+
SYSCTL_ADD_PROC(ctx, apc->port_list, OID_AUTO,
"bind_cleanup_thread_cpu",
CTLTYPE_U8 | CTLFLAG_RW | CTLFLAG_MPSAFE,
@@ -314,6 +334,9 @@ mana_sysctl_add_queues(struct mana_port_context *apc)
"mbuf_alloc_fail", CTLFLAG_RD,
&rx_stats->mbuf_alloc_fail, "Failed mbuf allocs");
SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
+ "partial_refill", CTLFLAG_RD,
+ &rx_stats->partial_refill, "Partially refilled mbuf");
+ SYSCTL_ADD_COUNTER_U64(ctx, rx_list, OID_AUTO,
"dma_mapping_err", CTLFLAG_RD,
&rx_stats->dma_mapping_err, "DMA mapping errors");
}
diff --git a/sys/dev/md/embedfs.S b/sys/dev/md/embedfs.S
index af4f9c08ed3a..62bd118647c0 100644
--- a/sys/dev/md/embedfs.S
+++ b/sys/dev/md/embedfs.S
@@ -42,3 +42,9 @@ mfs_root:
.type mfs_root_end, %object
mfs_root_end:
.size mfs_root_end, . - mfs_root_end
+
+#if defined(__aarch64__)
+#include <machine/asm.h>
+#include <sys/elf_common.h>
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
+#endif
diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
index 241517898ad4..ec1664fac701 100644
--- a/sys/dev/md/md.c
+++ b/sys/dev/md/md.c
@@ -11,9 +11,9 @@
*/
/*-
- * The following functions are based on the vn(4) driver: mdstart_swap(),
- * mdstart_vnode(), mdcreate_swap(), mdcreate_vnode() and mddestroy(),
- * and as such under the following copyright:
+ * The following functions are based on the historical vn(4) driver:
+ * mdstart_swap(), mdstart_vnode(), mdcreate_swap(), mdcreate_vnode()
+ * and mddestroy(), and as such under the following copyright:
*
* Copyright (c) 1988 University of Utah.
* Copyright (c) 1990, 1993
@@ -89,6 +89,8 @@
#include <sys/unistd.h>
#include <sys/vnode.h>
#include <sys/disk.h>
+#include <sys/param.h>
+#include <sys/bus.h>
#include <geom/geom.h>
#include <geom/geom_int.h>
@@ -256,7 +258,7 @@ struct md_s {
unsigned opencount;
unsigned fwheads;
unsigned fwsectors;
- char ident[32];
+ char ident[DISK_IDENT_SIZE];
unsigned flags;
char name[20];
struct proc *procp;
@@ -264,25 +266,40 @@ struct md_s {
struct g_provider *pp;
int (*start)(struct md_s *sc, struct bio *bp);
struct devstat *devstat;
- bool candelete;
-
- /* MD_MALLOC related fields */
- struct indir *indir;
- uma_zone_t uma;
-
- /* MD_PRELOAD related fields */
- u_char *pl_ptr;
- size_t pl_len;
-
- /* MD_VNODE related fields */
- struct vnode *vnode;
- char file[PATH_MAX];
- char label[PATH_MAX];
struct ucred *cred;
- vm_offset_t kva;
+ char label[PATH_MAX];
+ bool candelete;
- /* MD_SWAP related fields */
- vm_object_t object;
+ union {
+ /* MD_MALLOC related fields */
+ struct {
+ struct indir *indir;
+ uma_zone_t uma;
+ } s_malloc;
+
+ /* MD_PRELOAD related fields */
+ struct {
+ u_char *pl_ptr;
+ size_t pl_len;
+ char name[PATH_MAX];
+ } s_preload;
+
+ /* MD_VNODE related fields */
+ struct {
+ struct vnode *vnode;
+ char file[PATH_MAX];
+ vm_offset_t kva;
+ } s_vnode;
+
+ /* MD_SWAP related fields */
+ struct {
+ vm_object_t object;
+ } s_swap;
+
+ /* MD_NULL */
+ struct {
+ } s_null;
+ };
};
static struct indir *
@@ -324,7 +341,7 @@ destroy_indir(struct md_s *sc, struct indir *ip)
if (ip->shift)
destroy_indir(sc, (struct indir*)(ip->array[i]));
else if (ip->array[i] > 255)
- uma_zfree(sc->uma, (void *)(ip->array[i]));
+ uma_zfree(sc->s_malloc.uma, (void *)(ip->array[i]));
}
del_indir(ip);
}
@@ -675,10 +692,10 @@ mdstart_malloc(struct md_s *sc, struct bio *bp)
secno = bp->bio_offset / sc->sectorsize;
error = 0;
while (nsec--) {
- osp = s_read(sc->indir, secno);
+ osp = s_read(sc->s_malloc.indir, secno);
if (bp->bio_cmd == BIO_DELETE) {
if (osp != 0)
- error = s_write(sc->indir, secno, 0);
+ error = s_write(sc->s_malloc.indir, secno, 0);
} else if (bp->bio_cmd == BIO_READ) {
if (osp == 0) {
if (notmapped) {
@@ -743,10 +760,12 @@ mdstart_malloc(struct md_s *sc, struct bio *bp)
}
if (i == sc->sectorsize) {
if (osp != uc)
- error = s_write(sc->indir, secno, uc);
+ error = s_write(sc->s_malloc.indir,
+ secno, uc);
} else {
if (osp <= 255) {
- sp = (uintptr_t)uma_zalloc(sc->uma,
+ sp = (uintptr_t)uma_zalloc(
+ sc->s_malloc.uma,
md_malloc_wait ? M_WAITOK :
M_NOWAIT);
if (sp == 0) {
@@ -767,7 +786,8 @@ mdstart_malloc(struct md_s *sc, struct bio *bp)
bcopy(dst, (void *)sp,
sc->sectorsize);
}
- error = s_write(sc->indir, secno, sp);
+ error = s_write(sc->s_malloc.indir,
+ secno, sp);
} else {
if (notmapped) {
error = md_malloc_move_ma(&m,
@@ -790,7 +810,7 @@ mdstart_malloc(struct md_s *sc, struct bio *bp)
error = EOPNOTSUPP;
}
if (osp > 255)
- uma_zfree(sc->uma, (void*)osp);
+ uma_zfree(sc->s_malloc.uma, (void*)osp);
if (error != 0)
break;
secno++;
@@ -848,7 +868,7 @@ mdstart_preload(struct md_s *sc, struct bio *bp)
{
uint8_t *p;
- p = sc->pl_ptr + bp->bio_offset;
+ p = sc->s_preload.pl_ptr + bp->bio_offset;
switch (bp->bio_cmd) {
case BIO_READ:
if ((bp->bio_flags & BIO_VLIST) != 0) {
@@ -887,25 +907,8 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
int ma_offs, npages;
bool mapped;
- switch (bp->bio_cmd) {
- case BIO_READ:
- auio.uio_rw = UIO_READ;
- break;
- case BIO_WRITE:
- auio.uio_rw = UIO_WRITE;
- break;
- case BIO_FLUSH:
- break;
- case BIO_DELETE:
- if (sc->candelete)
- break;
- /* FALLTHROUGH */
- default:
- return (EOPNOTSUPP);
- }
-
td = curthread;
- vp = sc->vnode;
+ vp = sc->s_vnode.vnode;
piov = NULL;
ma_offs = bp->bio_ma_offset;
off = bp->bio_offset;
@@ -920,7 +923,14 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
* still valid.
*/
- if (bp->bio_cmd == BIO_FLUSH) {
+ switch (bp->bio_cmd) {
+ case BIO_READ:
+ auio.uio_rw = UIO_READ;
+ break;
+ case BIO_WRITE:
+ auio.uio_rw = UIO_WRITE;
+ break;
+ case BIO_FLUSH:
do {
(void)vn_start_write(vp, &mp, V_WAIT);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
@@ -929,11 +939,17 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
vn_finished_write(mp);
} while (error == ERELOOKUP);
return (error);
- } else if (bp->bio_cmd == BIO_DELETE) {
- error = vn_deallocate(vp, &off, &len, 0,
- sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred, NOCRED);
- bp->bio_resid = len;
- return (error);
+ case BIO_DELETE:
+ if (sc->candelete) {
+ error = vn_deallocate(vp, &off, &len, 0,
+ sc->flags & MD_ASYNC ? 0 : IO_SYNC,
+ sc->cred, NOCRED);
+ bp->bio_resid = len;
+ return (error);
+ }
+ /* FALLTHROUGH */
+ default:
+ return (EOPNOTSUPP);
}
auio.uio_offset = (vm_ooffset_t)bp->bio_offset;
@@ -967,8 +983,10 @@ unmapped_step:
KASSERT(iolen > 0, ("zero iolen"));
KASSERT(npages <= atop(maxphys + PAGE_SIZE),
("npages %d too large", npages));
- pmap_qenter(sc->kva, &bp->bio_ma[atop(ma_offs)], npages);
- aiov.iov_base = (void *)(sc->kva + (ma_offs & PAGE_MASK));
+ pmap_qenter(sc->s_vnode.kva, &bp->bio_ma[atop(ma_offs)],
+ npages);
+ aiov.iov_base = (void *)(sc->s_vnode.kva + (ma_offs &
+ PAGE_MASK));
aiov.iov_len = iolen;
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
@@ -981,7 +999,7 @@ unmapped_step:
auio.uio_iovcnt = 1;
}
iostart = auio.uio_offset;
- if (auio.uio_rw == UIO_READ) {
+ if (bp->bio_cmd == BIO_READ) {
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
error = VOP_READ(vp, &auio, 0, sc->cred);
VOP_UNLOCK(vp);
@@ -1002,7 +1020,7 @@ unmapped_step:
POSIX_FADV_DONTNEED);
if (mapped) {
- pmap_qremove(sc->kva, npages);
+ pmap_qremove(sc->s_vnode.kva, npages);
if (error == 0) {
len -= iolen;
bp->bio_resid -= iolen;
@@ -1056,20 +1074,21 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
rv = VM_PAGER_OK;
- vm_object_pip_add(sc->object, 1);
+ vm_object_pip_add(sc->s_swap.object, 1);
for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
- m = vm_page_grab_unlocked(sc->object, i, VM_ALLOC_SYSTEM);
+ m = vm_page_grab_unlocked(sc->s_swap.object, i,
+ VM_ALLOC_SYSTEM);
if (bp->bio_cmd == BIO_READ) {
if (vm_page_all_valid(m))
rv = VM_PAGER_OK;
else
- rv = vm_pager_get_pages(sc->object, &m, 1,
- NULL, NULL);
+ rv = vm_pager_get_pages(sc->s_swap.object,
+ &m, 1, NULL, NULL);
if (rv == VM_PAGER_ERROR) {
- VM_OBJECT_WLOCK(sc->object);
+ VM_OBJECT_WLOCK(sc->s_swap.object);
vm_page_free(m);
- VM_OBJECT_WUNLOCK(sc->object);
+ VM_OBJECT_WUNLOCK(sc->s_swap.object);
break;
} else if (rv == VM_PAGER_FAIL) {
/*
@@ -1096,12 +1115,12 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
if (len == PAGE_SIZE || vm_page_all_valid(m))
rv = VM_PAGER_OK;
else
- rv = vm_pager_get_pages(sc->object, &m, 1,
- NULL, NULL);
+ rv = vm_pager_get_pages(sc->s_swap.object,
+ &m, 1, NULL, NULL);
if (rv == VM_PAGER_ERROR) {
- VM_OBJECT_WLOCK(sc->object);
+ VM_OBJECT_WLOCK(sc->s_swap.object);
vm_page_free(m);
- VM_OBJECT_WUNLOCK(sc->object);
+ VM_OBJECT_WUNLOCK(sc->s_swap.object);
break;
} else if (rv == VM_PAGER_FAIL)
pmap_zero_page(m);
@@ -1122,12 +1141,12 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
if (len == PAGE_SIZE || vm_page_all_valid(m))
rv = VM_PAGER_OK;
else
- rv = vm_pager_get_pages(sc->object, &m, 1,
- NULL, NULL);
- VM_OBJECT_WLOCK(sc->object);
+ rv = vm_pager_get_pages(sc->s_swap.object,
+ &m, 1, NULL, NULL);
+ VM_OBJECT_WLOCK(sc->s_swap.object);
if (rv == VM_PAGER_ERROR) {
vm_page_free(m);
- VM_OBJECT_WUNLOCK(sc->object);
+ VM_OBJECT_WUNLOCK(sc->s_swap.object);
break;
} else if (rv == VM_PAGER_FAIL) {
vm_page_free(m);
@@ -1143,7 +1162,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
m = NULL;
}
}
- VM_OBJECT_WUNLOCK(sc->object);
+ VM_OBJECT_WUNLOCK(sc->s_swap.object);
}
if (m != NULL) {
/*
@@ -1163,7 +1182,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
offs = 0;
ma_offs += len;
}
- vm_object_pip_wakeup(sc->object);
+ vm_object_pip_wakeup(sc->s_swap.object);
return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
}
@@ -1301,6 +1320,7 @@ mdinit(struct md_s *sc)
{
struct g_geom *gp;
struct g_provider *pp;
+ unsigned remn;
g_topology_lock();
gp = g_new_geomf(&g_md_class, "md%d", sc->unit);
@@ -1309,6 +1329,13 @@ mdinit(struct md_s *sc)
devstat_remove_entry(pp->stat);
pp->stat = NULL;
pp->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE;
+ /* Prune off any residual fractional sector. */
+ remn = sc->mediasize % sc->sectorsize;
+ if (remn != 0) {
+ printf("md%d: truncating fractional last sector by %u bytes\n",
+ sc->unit, remn);
+ sc->mediasize -= remn;
+ }
pp->mediasize = sc->mediasize;
pp->sectorsize = sc->sectorsize;
switch (sc->type) {
@@ -1349,19 +1376,19 @@ mdcreate_malloc(struct md_s *sc, struct md_req *mdr)
sc->fwsectors = mdr->md_fwsectors;
if (mdr->md_fwheads != 0)
sc->fwheads = mdr->md_fwheads;
- sc->flags = mdr->md_options & (MD_COMPRESS | MD_FORCE);
- sc->indir = dimension(sc->mediasize / sc->sectorsize);
- sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
- 0x1ff, 0);
+ sc->flags = mdr->md_options & (MD_COMPRESS | MD_FORCE | MD_RESERVE);
+ sc->s_malloc.indir = dimension(sc->mediasize / sc->sectorsize);
+ sc->s_malloc.uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL,
+ NULL, NULL, 0x1ff, 0);
if (mdr->md_options & MD_RESERVE) {
off_t nsectors;
nsectors = sc->mediasize / sc->sectorsize;
for (u = 0; u < nsectors; u++) {
- sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
- M_WAITOK : M_NOWAIT) | M_ZERO);
+ sp = (uintptr_t)uma_zalloc(sc->s_malloc.uma,
+ (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
if (sp != 0)
- error = s_write(sc->indir, u, sp);
+ error = s_write(sc->s_malloc.indir, u, sp);
else
error = ENOMEM;
if (error != 0)
@@ -1389,7 +1416,7 @@ mdsetcred(struct md_s *sc, struct ucred *cred)
* Horrible kludge to establish credentials for NFS XXX.
*/
- if (sc->vnode) {
+ if (sc->type == MD_VNODE && sc->s_vnode.vnode != NULL) {
struct uio auio;
struct iovec aiov;
@@ -1404,9 +1431,9 @@ mdsetcred(struct md_s *sc, struct ucred *cred)
auio.uio_rw = UIO_READ;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_resid = aiov.iov_len;
- vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
- error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
- VOP_UNLOCK(sc->vnode);
+ vn_lock(sc->s_vnode.vnode, LK_EXCLUSIVE | LK_RETRY);
+ error = VOP_READ(sc->s_vnode.vnode, &auio, 0, sc->cred);
+ VOP_UNLOCK(sc->s_vnode.vnode);
free(tmpbuf, M_TEMP);
}
return (error);
@@ -1423,11 +1450,12 @@ mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td)
fname = mdr->md_file;
if (mdr->md_file_seg == UIO_USERSPACE) {
- error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
+ error = copyinstr(fname, sc->s_vnode.file,
+ sizeof(sc->s_vnode.file), NULL);
if (error != 0)
return (error);
} else if (mdr->md_file_seg == UIO_SYSSPACE)
- strlcpy(sc->file, fname, sizeof(sc->file));
+ strlcpy(sc->s_vnode.file, fname, sizeof(sc->s_vnode.file));
else
return (EDOOFUS);
@@ -1437,7 +1465,7 @@ mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td)
*/
flags = FREAD | ((mdr->md_options & MD_READONLY) ? 0 : FWRITE) \
| ((mdr->md_options & MD_VERIFY) ? O_VERIFY : 0);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file);
+ NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->s_vnode.file);
error = vn_open(&nd, &flags, 0, NULL);
if (error != 0)
return (error);
@@ -1474,20 +1502,20 @@ mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td)
snprintf(sc->ident, sizeof(sc->ident), "MD-DEV%ju-INO%ju",
(uintmax_t)vattr.va_fsid, (uintmax_t)vattr.va_fileid);
sc->flags = mdr->md_options & (MD_ASYNC | MD_CACHE | MD_FORCE |
- MD_VERIFY);
+ MD_VERIFY | MD_MUSTDEALLOC);
if (!(flags & FWRITE))
sc->flags |= MD_READONLY;
- sc->vnode = nd.ni_vp;
+ sc->s_vnode.vnode = nd.ni_vp;
error = mdsetcred(sc, td->td_ucred);
if (error != 0) {
- sc->vnode = NULL;
+ sc->s_vnode.vnode = NULL;
vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
nd.ni_vp->v_vflag &= ~VV_MD;
goto bad;
}
- sc->kva = kva_alloc(maxphys + PAGE_SIZE);
+ sc->s_vnode.kva = kva_alloc(maxphys + PAGE_SIZE);
return (0);
bad:
VOP_UNLOCK(nd.ni_vp);
@@ -1531,23 +1559,37 @@ mddestroy(struct md_s *sc, struct thread *td)
msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
mtx_unlock(&sc->queue_mtx);
mtx_destroy(&sc->queue_mtx);
- if (sc->vnode != NULL) {
- vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
- sc->vnode->v_vflag &= ~VV_MD;
- VOP_UNLOCK(sc->vnode);
- (void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
- FREAD : (FREAD|FWRITE), sc->cred, td);
+ switch (sc->type) {
+ case MD_VNODE:
+ if (sc->s_vnode.vnode != NULL) {
+ vn_lock(sc->s_vnode.vnode, LK_EXCLUSIVE | LK_RETRY);
+ sc->s_vnode.vnode->v_vflag &= ~VV_MD;
+ VOP_UNLOCK(sc->s_vnode.vnode);
+ (void)vn_close(sc->s_vnode.vnode,
+ sc->flags & MD_READONLY ? FREAD : (FREAD|FWRITE),
+ sc->cred, td);
+ }
+ if (sc->s_vnode.kva != 0)
+ kva_free(sc->s_vnode.kva, maxphys + PAGE_SIZE);
+ break;
+ case MD_SWAP:
+ if (sc->s_swap.object != NULL)
+ vm_object_deallocate(sc->s_swap.object);
+ break;
+ case MD_MALLOC:
+ if (sc->s_malloc.indir != NULL)
+ destroy_indir(sc, sc->s_malloc.indir);
+ if (sc->s_malloc.uma != NULL)
+ uma_zdestroy(sc->s_malloc.uma);
+ break;
+ case MD_PRELOAD:
+ case MD_NULL:
+ break;
+ default:
+ __assert_unreachable();
}
if (sc->cred != NULL)
crfree(sc->cred);
- if (sc->object != NULL)
- vm_object_deallocate(sc->object);
- if (sc->indir)
- destroy_indir(sc, sc->indir);
- if (sc->uma)
- uma_zdestroy(sc->uma);
- if (sc->kva)
- kva_free(sc->kva, maxphys + PAGE_SIZE);
LIST_REMOVE(sc, list);
free_unr(md_uh, sc->unit);
@@ -1572,13 +1614,14 @@ mdresize(struct md_s *sc, struct md_req *mdr)
oldpages = OFF_TO_IDX(sc->mediasize);
newpages = OFF_TO_IDX(mdr->md_mediasize);
if (newpages < oldpages) {
- VM_OBJECT_WLOCK(sc->object);
- vm_object_page_remove(sc->object, newpages, 0, 0);
+ VM_OBJECT_WLOCK(sc->s_swap.object);
+ vm_object_page_remove(sc->s_swap.object, newpages,
+ 0, 0);
swap_release_by_cred(IDX_TO_OFF(oldpages -
newpages), sc->cred);
- sc->object->charge = IDX_TO_OFF(newpages);
- sc->object->size = newpages;
- VM_OBJECT_WUNLOCK(sc->object);
+ sc->s_swap.object->charge = IDX_TO_OFF(newpages);
+ sc->s_swap.object->size = newpages;
+ VM_OBJECT_WUNLOCK(sc->s_swap.object);
} else if (newpages > oldpages) {
res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
oldpages), sc->cred);
@@ -1586,7 +1629,7 @@ mdresize(struct md_s *sc, struct md_req *mdr)
return (ENOMEM);
if ((mdr->md_options & MD_RESERVE) ||
(sc->flags & MD_RESERVE)) {
- error = swap_pager_reserve(sc->object,
+ error = swap_pager_reserve(sc->s_swap.object,
oldpages, newpages - oldpages);
if (error < 0) {
swap_release_by_cred(
@@ -1595,10 +1638,10 @@ mdresize(struct md_s *sc, struct md_req *mdr)
return (EDOM);
}
}
- VM_OBJECT_WLOCK(sc->object);
- sc->object->charge = IDX_TO_OFF(newpages);
- sc->object->size = newpages;
- VM_OBJECT_WUNLOCK(sc->object);
+ VM_OBJECT_WLOCK(sc->s_swap.object);
+ sc->s_swap.object->charge = IDX_TO_OFF(newpages);
+ sc->s_swap.object->size = newpages;
+ VM_OBJECT_WUNLOCK(sc->s_swap.object);
}
break;
default:
@@ -1639,13 +1682,13 @@ mdcreate_swap(struct md_s *sc, struct md_req *mdr, struct thread *td)
sc->fwsectors = mdr->md_fwsectors;
if (mdr->md_fwheads != 0)
sc->fwheads = mdr->md_fwheads;
- sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
- VM_PROT_DEFAULT, 0, td->td_ucred);
- if (sc->object == NULL)
+ sc->s_swap.object = vm_pager_allocate(OBJT_SWAP, NULL,
+ PAGE_SIZE * npage, VM_PROT_DEFAULT, 0, td->td_ucred);
+ if (sc->s_swap.object == NULL)
return (ENOMEM);
sc->flags = mdr->md_options & (MD_FORCE | MD_RESERVE);
if (mdr->md_options & MD_RESERVE) {
- if (swap_pager_reserve(sc->object, 0, npage) < 0) {
+ if (swap_pager_reserve(sc->s_swap.object, 0, npage) < 0) {
error = EDOM;
goto finish;
}
@@ -1653,8 +1696,8 @@ mdcreate_swap(struct md_s *sc, struct md_req *mdr, struct thread *td)
error = mdsetcred(sc, td->td_ucred);
finish:
if (error != 0) {
- vm_object_deallocate(sc->object);
- sc->object = NULL;
+ vm_object_deallocate(sc->s_swap.object);
+ sc->s_swap.object = NULL;
}
return (error);
}
@@ -1678,7 +1721,7 @@ kern_mdattach_locked(struct thread *td, struct md_req *mdr)
{
struct md_s *sc;
unsigned sectsize;
- int error, i;
+ int error;
sx_assert(&md_sx, SA_XLOCKED);
@@ -1750,10 +1793,6 @@ err_after_new:
return (error);
}
- /* Prune off any residual fractional sector */
- i = sc->mediasize % sc->sectorsize;
- sc->mediasize -= i;
-
mdinit(sc);
return (0);
}
@@ -1856,10 +1895,13 @@ kern_mdquery_locked(struct md_req *mdr)
if (error != 0)
return (error);
}
- if (sc->type == MD_VNODE ||
- (sc->type == MD_PRELOAD && mdr->md_file != NULL))
- error = copyout(sc->file, mdr->md_file,
- strlen(sc->file) + 1);
+ if (sc->type == MD_VNODE) {
+ error = copyout(sc->s_vnode.file, mdr->md_file,
+ strlen(sc->s_vnode.file) + 1);
+ } else if (sc->type == MD_PRELOAD && mdr->md_file != NULL) {
+ error = copyout(sc->s_preload.name, mdr->md_file,
+ strlen(sc->s_preload.name) + 1);
+ }
return (error);
}
@@ -2011,11 +2053,12 @@ md_preloaded(u_char *image, size_t length, const char *name)
return;
sc->mediasize = length;
sc->sectorsize = DEV_BSIZE;
- sc->pl_ptr = image;
- sc->pl_len = length;
+ sc->s_preload.pl_ptr = image;
+ sc->s_preload.pl_len = length;
sc->start = mdstart_preload;
if (name != NULL)
- strlcpy(sc->file, name, sizeof(sc->file));
+ strlcpy(sc->s_preload.name, name,
+ sizeof(sc->s_preload.name));
#ifdef MD_ROOT
if (sc->unit == 0) {
#ifndef ROOTDEVNAME
@@ -2041,8 +2084,10 @@ g_md_init(struct g_class *mp __unused)
{
caddr_t mod;
u_char *ptr, *name, *type;
+ u_char scratch[40];
unsigned len;
int i;
+ vm_offset_t paddr;
/* figure out log2(NINDIR) */
for (i = NINDIR, nshift = -1; i; nshift++)
@@ -2082,6 +2127,25 @@ g_md_init(struct g_class *mp __unused)
sx_xunlock(&md_sx);
}
}
+
+ /*
+ * Load up to 32 pre-loaded disks
+ */
+ for (int i = 0; i < 32; i++) {
+ if (resource_long_value("md", i, "physaddr",
+ (long *) &paddr) != 0 ||
+ resource_int_value("md", i, "len", &len) != 0)
+ break;
+ ptr = (char *)pmap_map(NULL, paddr, paddr + len, VM_PROT_READ);
+ if (ptr != NULL && len != 0) {
+ sprintf(scratch, "preload%d 0x%016jx", i,
+ (uintmax_t)paddr);
+ sx_xlock(&md_sx);
+ md_preloaded(ptr, len, scratch);
+ sx_xunlock(&md_sx);
+ }
+ }
+
status_dev = make_dev(&mdctl_cdevsw, INT_MAX, UID_ROOT, GID_WHEEL,
0600, MDCTL_NAME);
g_topology_lock();
@@ -2127,9 +2191,14 @@ g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
sbuf_printf(sb, " t %s", type);
- if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
- (mp->type == MD_PRELOAD && mp->file[0] != '\0'))
- sbuf_printf(sb, " file %s", mp->file);
+ if (mp->type == MD_VNODE &&
+ mp->s_vnode.vnode != NULL)
+ sbuf_printf(sb, " file %s", mp->s_vnode.file);
+ if (mp->type == MD_PRELOAD &&
+ mp->s_preload.name[0] != '\0') {
+ sbuf_printf(sb, " file %s",
+ mp->s_preload.name);
+ }
sbuf_printf(sb, " label %s", mp->label);
} else {
sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
@@ -2154,15 +2223,23 @@ g_md_dumpconf(struct sbuf *sb, const char *indent, struct g_geom *gp,
"read-only");
sbuf_printf(sb, "%s<type>%s</type>\n", indent,
type);
- if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
- (mp->type == MD_PRELOAD && mp->file[0] != '\0')) {
+ if (mp->type == MD_VNODE) {
+ if (mp->s_vnode.vnode != NULL) {
+ sbuf_printf(sb, "%s<file>", indent);
+ g_conf_printf_escaped(sb, "%s",
+ mp->s_vnode.file);
+ sbuf_printf(sb, "</file>\n");
+ }
+ sbuf_printf(sb, "%s<cache>%s</cache>\n", indent,
+ (mp->flags & MD_CACHE) == 0 ? "off": "on");
+ }
+ if (mp->type == MD_PRELOAD &&
+ mp->s_preload.name[0] != '\0') {
sbuf_printf(sb, "%s<file>", indent);
- g_conf_printf_escaped(sb, "%s", mp->file);
+ g_conf_printf_escaped(sb, "%s",
+ mp->s_preload.name);
sbuf_printf(sb, "</file>\n");
}
- if (mp->type == MD_VNODE)
- sbuf_printf(sb, "%s<cache>%s</cache>\n", indent,
- (mp->flags & MD_CACHE) == 0 ? "off": "on");
sbuf_printf(sb, "%s<label>", indent);
g_conf_printf_escaped(sb, "%s", mp->label);
sbuf_printf(sb, "</label>\n");
diff --git a/sys/dev/mdio/mdio.c b/sys/dev/mdio/mdio.c
index 55122edd16cc..0ef7e7453799 100644
--- a/sys/dev/mdio/mdio.c
+++ b/sys/dev/mdio/mdio.c
@@ -37,8 +37,8 @@ static void
mdio_identify(driver_t *driver, device_t parent)
{
- if (device_find_child(parent, mdio_driver.name, -1) == NULL)
- BUS_ADD_CHILD(parent, 0, mdio_driver.name, -1);
+ if (device_find_child(parent, mdio_driver.name, DEVICE_UNIT_ANY) == NULL)
+ BUS_ADD_CHILD(parent, 0, mdio_driver.name, DEVICE_UNIT_ANY);
}
static int
@@ -54,16 +54,9 @@ static int
mdio_attach(device_t dev)
{
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
- return (bus_generic_attach(dev));
-}
-
-static int
-mdio_detach(device_t dev)
-{
-
- bus_generic_detach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -108,7 +101,7 @@ static device_method_t mdio_methods[] = {
DEVMETHOD(device_identify, mdio_identify),
DEVMETHOD(device_probe, mdio_probe),
DEVMETHOD(device_attach, mdio_attach),
- DEVMETHOD(device_detach, mdio_detach),
+ DEVMETHOD(device_detach, bus_generic_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
/* bus interface */
diff --git a/sys/dev/mem/memutil.c b/sys/dev/mem/memutil.c
index cf9714d6ec8f..20ce337df0ab 100644
--- a/sys/dev/mem/memutil.c
+++ b/sys/dev/mem/memutil.c
@@ -26,15 +26,14 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/memrange.h>
-#include <sys/rwlock.h>
-#include <sys/systm.h>
+#include <sys/sx.h>
-static struct rwlock mr_lock;
+static struct sx mr_lock;
/*
* Implementation-neutral, kernel-callable functions for manipulating
@@ -46,7 +45,7 @@ mem_range_init(void)
if (mem_range_softc.mr_op == NULL)
return;
- rw_init(&mr_lock, "memrange");
+ sx_init(&mr_lock, "memrange");
mem_range_softc.mr_op->init(&mem_range_softc);
}
@@ -56,7 +55,7 @@ mem_range_destroy(void)
if (mem_range_softc.mr_op == NULL)
return;
- rw_destroy(&mr_lock);
+ sx_destroy(&mr_lock);
}
int
@@ -67,12 +66,12 @@ mem_range_attr_get(struct mem_range_desc *mrd, int *arg)
if (mem_range_softc.mr_op == NULL)
return (EOPNOTSUPP);
nd = *arg;
- rw_rlock(&mr_lock);
+ sx_slock(&mr_lock);
if (nd == 0)
*arg = mem_range_softc.mr_ndesc;
else
bcopy(mem_range_softc.mr_desc, mrd, nd * sizeof(*mrd));
- rw_runlock(&mr_lock);
+ sx_sunlock(&mr_lock);
return (0);
}
@@ -83,8 +82,8 @@ mem_range_attr_set(struct mem_range_desc *mrd, int *arg)
if (mem_range_softc.mr_op == NULL)
return (EOPNOTSUPP);
- rw_wlock(&mr_lock);
+ sx_xlock(&mr_lock);
ret = mem_range_softc.mr_op->set(&mem_range_softc, mrd, arg);
- rw_wunlock(&mr_lock);
+ sx_xunlock(&mr_lock);
return (ret);
}
diff --git a/sys/dev/mfi/mfi.c b/sys/dev/mfi/mfi.c
index 3a61559d4605..13e5dfc84fd1 100644
--- a/sys/dev/mfi/mfi.c
+++ b/sys/dev/mfi/mfi.c
@@ -775,8 +775,8 @@ mfi_attach(struct mfi_softc *sc)
&sc->mfi_keep_deleted_volumes, 0,
"Don't detach the mfid device for a busy volume that is deleted");
- device_add_child(sc->mfi_dev, "mfip", -1);
- bus_generic_attach(sc->mfi_dev);
+ device_add_child(sc->mfi_dev, "mfip", DEVICE_UNIT_ANY);
+ bus_attach_children(sc->mfi_dev);
/* Start the timeout watchdog */
callout_init(&sc->mfi_watchdog_callout, 1);
@@ -1922,7 +1922,8 @@ mfi_add_ld_complete(struct mfi_command *cm)
mtx_unlock(&sc->mfi_io_lock);
bus_topo_lock();
- if ((child = device_add_child(sc->mfi_dev, "mfid", -1)) == NULL) {
+ if ((child = device_add_child(sc->mfi_dev, "mfid",
+ DEVICE_UNIT_ANY)) == NULL) {
device_printf(sc->mfi_dev, "Failed to add logical disk\n");
free(ld_info, M_MFIBUF);
bus_topo_unlock();
@@ -1932,7 +1933,7 @@ mfi_add_ld_complete(struct mfi_command *cm)
device_set_ivars(child, ld_info);
device_set_desc(child, "MFI Logical Disk");
- bus_generic_attach(sc->mfi_dev);
+ bus_attach_children(sc->mfi_dev);
bus_topo_unlock();
mtx_lock(&sc->mfi_io_lock);
}
@@ -2010,7 +2011,8 @@ mfi_add_sys_pd_complete(struct mfi_command *cm)
mtx_unlock(&sc->mfi_io_lock);
bus_topo_lock();
- if ((child = device_add_child(sc->mfi_dev, "mfisyspd", -1)) == NULL) {
+ if ((child = device_add_child(sc->mfi_dev, "mfisyspd",
+ DEVICE_UNIT_ANY)) == NULL) {
device_printf(sc->mfi_dev, "Failed to add system pd\n");
free(pd_info, M_MFIBUF);
bus_topo_unlock();
@@ -2020,7 +2022,7 @@ mfi_add_sys_pd_complete(struct mfi_command *cm)
device_set_ivars(child, pd_info);
device_set_desc(child, "MFI System PD");
- bus_generic_attach(sc->mfi_dev);
+ bus_attach_children(sc->mfi_dev);
bus_topo_unlock();
mtx_lock(&sc->mfi_io_lock);
}
@@ -3633,11 +3635,8 @@ out:
mfi_aen_entry = malloc(sizeof(struct mfi_aen), M_MFIBUF,
M_WAITOK);
mtx_lock(&sc->mfi_io_lock);
- if (mfi_aen_entry != NULL) {
- mfi_aen_entry->p = curproc;
- TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry,
- aen_link);
- }
+ mfi_aen_entry->p = curproc;
+ TAILQ_INSERT_TAIL(&sc->mfi_aen_pids, mfi_aen_entry, aen_link);
error = mfi_aen_register(sc, l_aen.laen_seq_num,
l_aen.laen_class_locale);
diff --git a/sys/dev/mfi/mfi_cam.c b/sys/dev/mfi/mfi_cam.c
index cce303c123e5..af95ff957d8c 100644
--- a/sys/dev/mfi/mfi_cam.c
+++ b/sys/dev/mfi/mfi_cam.c
@@ -298,7 +298,7 @@ mfip_cam_rescan(struct mfi_softc *sc, uint32_t tid)
device_t mfip_dev;
bus_topo_lock();
- mfip_dev = device_find_child(sc->mfi_dev, "mfip", -1);
+ mfip_dev = device_find_child(sc->mfi_dev, "mfip", DEVICE_UNIT_ANY);
bus_topo_unlock();
if (mfip_dev == NULL) {
device_printf(sc->mfi_dev, "Couldn't find mfip child device!\n");
diff --git a/sys/dev/mfi/mfi_pci.c b/sys/dev/mfi/mfi_pci.c
index de7d88556c8c..65e8e30bf994 100644
--- a/sys/dev/mfi/mfi_pci.c
+++ b/sys/dev/mfi/mfi_pci.c
@@ -112,7 +112,7 @@ SYSCTL_INT(_hw_mfi, OID_AUTO, msi, CTLFLAG_RDTUN, &mfi_msi, 0,
static int mfi_mrsas_enable;
SYSCTL_INT(_hw_mfi, OID_AUTO, mrsas_enable, CTLFLAG_RDTUN, &mfi_mrsas_enable,
- 0, "Allow mrasas to take newer cards");
+ 0, "Allow mrsas to take newer cards");
struct mfi_ident {
uint16_t vendor;
@@ -279,8 +279,7 @@ static int
mfi_pci_detach(device_t dev)
{
struct mfi_softc *sc;
- int error, devcount, i;
- device_t *devlist;
+ int error;
sc = device_get_softc(dev);
@@ -294,13 +293,11 @@ mfi_pci_detach(device_t dev)
sc->mfi_detaching = 1;
mtx_unlock(&sc->mfi_io_lock);
- if ((error = device_get_children(sc->mfi_dev, &devlist, &devcount)) != 0) {
+ error = bus_generic_detach(sc->mfi_dev);
+ if (error != 0) {
sx_xunlock(&sc->mfi_config_lock);
return error;
}
- for (i = 0; i < devcount; i++)
- device_delete_child(sc->mfi_dev, devlist[i]);
- free(devlist, M_TEMP);
sx_xunlock(&sc->mfi_config_lock);
EVENTHANDLER_DEREGISTER(shutdown_final, sc->mfi_eh);
diff --git a/sys/dev/mfi/mfireg.h b/sys/dev/mfi/mfireg.h
index f7e3ebc98cd4..fe5e1be29d38 100644
--- a/sys/dev/mfi/mfireg.h
+++ b/sys/dev/mfi/mfireg.h
@@ -1028,7 +1028,7 @@ struct mfi_evt_detail {
} pd_prog;
struct {
- struct mfi_evt_pd ld;
+ struct mfi_evt_pd pd;
uint32_t prev_state;
uint32_t new_state;
} pd_state;
diff --git a/sys/dev/mgb/if_mgb.c b/sys/dev/mgb/if_mgb.c
index 6fafb303143c..409f34167df0 100644
--- a/sys/dev/mgb/if_mgb.c
+++ b/sys/dev/mgb/if_mgb.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2019 The FreeBSD Foundation, Inc.
+ * Copyright (c) 2019 The FreeBSD Foundation
*
* This driver was written by Gerald ND Aryeetey <gndaryee@uwaterloo.ca>
* under sponsorship from the FreeBSD Foundation.
@@ -482,8 +482,7 @@ mgb_detach(if_ctx_t ctx)
iflib_irq_free(ctx, &sc->rx_irq);
iflib_irq_free(ctx, &sc->admin_irq);
- if (sc->miibus != NULL)
- device_delete_child(sc->dev, sc->miibus);
+ bus_generic_detach(sc->dev);
if (sc->pba != NULL)
error = bus_release_resource(sc->dev, SYS_RES_MEMORY,
@@ -1436,7 +1435,7 @@ mgb_hw_teardown(struct mgb_softc *sc)
/* Stop MAC */
CSR_CLEAR_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL);
- CSR_WRITE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
+ CSR_CLEAR_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL);
if ((err = mgb_wait_for_bits(sc, MGB_MAC_RX, MGB_MAC_DSBL, 0)))
return (err);
if ((err = mgb_wait_for_bits(sc, MGB_MAC_TX, MGB_MAC_DSBL, 0)))
diff --git a/sys/dev/mgb/if_mgb.h b/sys/dev/mgb/if_mgb.h
index fa49805d73b7..19f6d4a20cc7 100644
--- a/sys/dev/mgb/if_mgb.h
+++ b/sys/dev/mgb/if_mgb.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2019 The FreeBSD Foundation, Inc.
+ * Copyright (c) 2019 The FreeBSD Foundation
*
* This driver was written by Gerald ND Aryeetey <gndaryee@uwaterloo.ca>
* under sponsorship from the FreeBSD Foundation.
diff --git a/sys/dev/mge/if_mge.c b/sys/dev/mge/if_mge.c
index 2331cb9f8f35..25d213ff16ee 100644
--- a/sys/dev/mge/if_mge.c
+++ b/sys/dev/mge/if_mge.c
@@ -398,33 +398,16 @@ mge_ver_params(struct mge_softc *sc)
uint32_t d, r;
soc_id(&d, &r);
- if (d == MV_DEV_88F6281 || d == MV_DEV_88F6781 ||
- d == MV_DEV_88F6282 ||
- d == MV_DEV_MV78100 ||
- d == MV_DEV_MV78100_Z0 ||
- (d & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY) {
- sc->mge_ver = 2;
- sc->mge_mtu = 0x4e8;
- sc->mge_tfut_ipg_max = 0xFFFF;
- sc->mge_rx_ipg_max = 0xFFFF;
- sc->mge_tx_arb_cfg = 0xFC0000FF;
- sc->mge_tx_tok_cfg = 0xFFFF7FFF;
- sc->mge_tx_tok_cnt = 0x3FFFFFFF;
- } else {
- sc->mge_ver = 1;
- sc->mge_mtu = 0x458;
- sc->mge_tfut_ipg_max = 0x3FFF;
- sc->mge_rx_ipg_max = 0x3FFF;
- sc->mge_tx_arb_cfg = 0x000000FF;
- sc->mge_tx_tok_cfg = 0x3FFFFFFF;
- sc->mge_tx_tok_cnt = 0x3FFFFFFF;
- }
- if (d == MV_DEV_88RC8180)
- sc->mge_intr_cnt = 1;
- else
- sc->mge_intr_cnt = 2;
-
- if (d == MV_DEV_MV78160 || d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
+ sc->mge_ver = 1;
+ sc->mge_mtu = 0x458;
+ sc->mge_tfut_ipg_max = 0x3FFF;
+ sc->mge_rx_ipg_max = 0x3FFF;
+ sc->mge_tx_arb_cfg = 0x000000FF;
+ sc->mge_tx_tok_cfg = 0x3FFFFFFF;
+ sc->mge_tx_tok_cnt = 0x3FFFFFFF;
+ sc->mge_intr_cnt = 2;
+
+ if (d == MV_DEV_MV78260 || d == MV_DEV_MV78460)
sc->mge_hw_csum = 0;
else
sc->mge_hw_csum = 1;
@@ -847,12 +830,6 @@ mge_attach(device_t dev)
/* Allocate network interface */
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "if_alloc() failed\n");
- mge_detach(dev);
- return (ENOMEM);
- }
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setsoftc(ifp, sc);
if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
@@ -924,8 +901,8 @@ mge_attach(device_t dev)
if (sc->switch_attached) {
MGE_WRITE(sc, MGE_REG_PHYDEV, MGE_SWITCH_PHYDEV);
- device_add_child(dev, "mdio", -1);
- bus_generic_attach(dev);
+ device_add_child(dev, "mdio", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
}
return (0);
diff --git a/sys/dev/mii/e1000phy.c b/sys/dev/mii/e1000phy.c
index e978c043d72a..050e9db71577 100644
--- a/sys/dev/mii/e1000phy.c
+++ b/sys/dev/mii/e1000phy.c
@@ -185,6 +185,11 @@ e1000phy_reset(struct mii_softc *sc)
{
uint16_t reg, page;
+ /* Undo power-down / isolate */
+ reg = PHY_READ(sc, E1000_CR);
+ reg &= ~(E1000_CR_ISOLATE | E1000_CR_POWER_DOWN);
+ PHY_WRITE(sc, E1000_CR, reg);
+
reg = PHY_READ(sc, E1000_SCR);
if ((sc->mii_flags & MIIF_HAVEFIBER) != 0) {
reg &= ~E1000_SCR_AUTO_X_MODE;
@@ -353,6 +358,8 @@ e1000phy_service(struct mii_softc *sc, struct mii_data *mii, int cmd)
reg = PHY_READ(sc, E1000_CR);
reg &= ~E1000_CR_AUTO_NEG_ENABLE;
+ /* Undo power-down / isolate */
+ reg &= ~(E1000_CR_ISOLATE | E1000_CR_POWER_DOWN);
PHY_WRITE(sc, E1000_CR, reg | E1000_CR_RESET);
if (IFM_SUBTYPE(ife->ifm_media) == IFM_1000_T) {
diff --git a/sys/dev/mii/mcommphy.c b/sys/dev/mii/mcommphy.c
index a8a16c00bade..407b29fd9938 100644
--- a/sys/dev/mii/mcommphy.c
+++ b/sys/dev/mii/mcommphy.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2022 Jared McNeill <jmcneill@invisible.ca>
* Copyright (c) 2022 Soren Schmidt <sos@deepcore.dk>
+ * Copyright (c) 2024 Jari Sihvola <jsihv@gmx.com>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -26,9 +27,12 @@
*/
/*
- * Motorcomm YT8511C / YT8511H Integrated 10/100/1000 Gigabit Ethernet phy
+ * Motorcomm YT8511C/YT8511H/YT8531
+ * Integrated 10/100/1000 Gigabit Ethernet phy
*/
+#include "opt_platform.h"
+
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
@@ -42,12 +46,18 @@
#include <dev/mii/mii.h>
#include <dev/mii/miivar.h>
+#ifdef FDT
+#include <dev/mii/mii_fdt.h>
+#endif
+#include "miidevs.h"
#include "miibus_if.h"
-#define MCOMMPHY_OUI 0x000000
-#define MCOMMPHY_MODEL 0x10
-#define MCOMMPHY_REV 0x0a
+#define MCOMMPHY_YT8511_OUI 0x000000
+#define MCOMMPHY_YT8511_MODEL 0x10
+#define MCOMMPHY_YT8511_REV 0x0a
+
+#define MCOMMPHY_YT8531_MODEL 0x11
#define EXT_REG_ADDR 0x1e
#define EXT_REG_DATA 0x1f
@@ -61,9 +71,51 @@
#define PHY_SLEEP_CONTROL1_REG 0x27
#define PLLON_IN_SLP 0x4000
+/* Registers and values for YT8531 */
+#define YT8531_CHIP_CONFIG 0xa001
+#define RXC_DLY_EN (1 << 8)
+
+#define YT8531_PAD_DRSTR_CFG 0xa010
+#define PAD_RXC_MASK 0x7
+#define PAD_RXC_SHIFT 13
+#define JH7110_RGMII_RXC_STRENGTH 6
+
+#define YT8531_RGMII_CONFIG1 0xa003
+#define RX_DELAY_SEL_SHIFT 10
+#define RX_DELAY_SEL_MASK 0xf
+#define RXC_DLY_THRESH 2250
+#define RXC_DLY_ADDON 1900
+#define TX_DELAY_SEL_FE_MASK 0xf
+#define TX_DELAY_SEL_FE_SHIFT 4
+#define TX_DELAY_SEL_MASK 0xf
+#define TX_DELAY_SEL_SHIFT 0
+#define TX_CLK_SEL (1 << 14)
+#define INTERNAL_DLY_DIV 150
+
+#define YT8531_SYNCE_CFG 0xa012
+#define EN_SYNC_E (1 << 6)
+
#define LOWEST_SET_BIT(mask) ((((mask) - 1) & (mask)) ^ (mask))
#define SHIFTIN(x, mask) ((x) * LOWEST_SET_BIT(mask))
+static const struct mii_phydesc mcommphys[] = {
+ MII_PHY_DESC(MOTORCOMM, YT8511),
+ MII_PHY_DESC(MOTORCOMM2, YT8531),
+ MII_PHY_END
+};
+
+struct mcommphy_softc {
+ mii_softc_t mii_sc;
+ device_t dev;
+ u_int rx_delay_ps;
+ u_int tx_delay_ps;
+ bool tx_10_inv;
+ bool tx_100_inv;
+ bool tx_1000_inv;
+};
+
+static void mcommphy_yt8531_speed_adjustment(struct mii_softc *sc);
+
static int
mcommphy_service(struct mii_softc *sc, struct mii_data *mii, int cmd)
{
@@ -84,6 +136,16 @@ mcommphy_service(struct mii_softc *sc, struct mii_data *mii, int cmd)
/* Update the media status. */
PHY_STATUS(sc);
+ /*
+ * For the needs of JH7110 which has two Ethernet devices with
+ * different TX inverted configuration depending on speed used
+ */
+ if (sc->mii_mpd_model == MCOMMPHY_YT8531_MODEL &&
+ (sc->mii_media_active != mii->mii_media_active ||
+ sc->mii_media_status != mii->mii_media_status)) {
+ mcommphy_yt8531_speed_adjustment(sc);
+ }
+
/* Callback if something changed. */
mii_phy_update(sc, cmd);
@@ -105,26 +167,22 @@ mcommphy_probe(device_t dev)
* The YT8511C reports an OUI of 0. Best we can do here is to match
* exactly the contents of the PHY identification registers.
*/
- if (MII_OUI(ma->mii_id1, ma->mii_id2) == MCOMMPHY_OUI &&
- MII_MODEL(ma->mii_id2) == MCOMMPHY_MODEL &&
- MII_REV(ma->mii_id2) == MCOMMPHY_REV) {
+ if (MII_OUI(ma->mii_id1, ma->mii_id2) == MCOMMPHY_YT8511_OUI &&
+ MII_MODEL(ma->mii_id2) == MCOMMPHY_YT8511_MODEL &&
+ MII_REV(ma->mii_id2) == MCOMMPHY_YT8511_REV) {
device_set_desc(dev, "Motorcomm YT8511 media interface");
- return BUS_PROBE_DEFAULT;
+ return (BUS_PROBE_DEFAULT);
}
- return (ENXIO);
+
+ /* YT8531 follows a conventional procedure */
+ return (mii_phy_dev_probe(dev, mcommphys, BUS_PROBE_DEFAULT));
}
-static int
-mcommphy_attach(device_t dev)
+static void
+mcommphy_yt8511_setup(struct mii_softc *sc)
{
- struct mii_softc *sc = device_get_softc(dev);
uint16_t oldaddr, data;
- mii_phy_dev_attach(dev, MIIF_NOMANPAUSE, &mcommphy_funcs, 0);
-
- PHY_RESET(sc);
-
- /* begin chip stuff */
oldaddr = PHY_READ(sc, EXT_REG_ADDR);
PHY_WRITE(sc, EXT_REG_ADDR, PHY_CLOCK_GATING_REG);
@@ -150,21 +208,189 @@ mcommphy_attach(device_t dev)
PHY_WRITE(sc, EXT_REG_DATA, data);
PHY_WRITE(sc, EXT_REG_ADDR, oldaddr);
- /* end chip stuff */
+}
+
+static void
+mcommphy_yt8531_speed_adjustment(struct mii_softc *sc)
+{
+ struct mcommphy_softc *mcomm_sc = (struct mcommphy_softc *)sc;
+ struct mii_data *mii = sc->mii_pdata;
+ bool tx_clk_inv = false;
+ uint16_t reg, oldaddr;
+
+ switch (IFM_SUBTYPE(mii->mii_media_active)) {
+ case IFM_1000_T:
+ tx_clk_inv = mcomm_sc->tx_1000_inv;
+ break;
+ case IFM_100_T:
+ tx_clk_inv = mcomm_sc->tx_100_inv;
+ break;
+ case IFM_10_T:
+ tx_clk_inv = mcomm_sc->tx_10_inv;
+ break;
+ }
+
+ oldaddr = PHY_READ(sc, EXT_REG_ADDR);
+
+ PHY_WRITE(sc, EXT_REG_ADDR, YT8531_RGMII_CONFIG1);
+ reg = PHY_READ(sc, EXT_REG_DATA);
+ if (tx_clk_inv)
+ reg |= TX_CLK_SEL;
+ else
+ reg &= ~TX_CLK_SEL;
+ PHY_WRITE(sc, EXT_REG_DATA, reg);
+
+ PHY_WRITE(sc, EXT_REG_ADDR, oldaddr);
+}
+
+#ifdef FDT
+static int
+mcommphy_yt8531_setup_delay(struct mii_softc *sc)
+{
+ struct mcommphy_softc *mcomm_sc = (struct mcommphy_softc *)sc;
+ uint16_t reg, oldaddr;
+ int rx_delay = 0, tx_delay = 0;
+ bool rxc_dly_en_off = false;
+
+ if (mcomm_sc->rx_delay_ps > RXC_DLY_THRESH) {
+ rx_delay = (mcomm_sc->rx_delay_ps - RXC_DLY_ADDON) /
+ INTERNAL_DLY_DIV;
+ } else if (mcomm_sc->rx_delay_ps > 0) {
+ rx_delay = mcomm_sc->rx_delay_ps / INTERNAL_DLY_DIV;
+ rxc_dly_en_off = true;
+ }
+
+ if (mcomm_sc->tx_delay_ps > 0) {
+ tx_delay = mcomm_sc->tx_delay_ps / INTERNAL_DLY_DIV;
+ }
+
+ oldaddr = PHY_READ(sc, EXT_REG_ADDR);
- sc->mii_capabilities = PHY_READ(sc, MII_BMSR) & sc->mii_capmask;
- if (sc->mii_capabilities & BMSR_EXTSTAT)
- sc->mii_extcapabilities = PHY_READ(sc, MII_EXTSR);
+ /* Modifying Chip Config register */
+ PHY_WRITE(sc, EXT_REG_ADDR, YT8531_CHIP_CONFIG);
+ reg = PHY_READ(sc, EXT_REG_DATA);
+ if (rxc_dly_en_off)
+ reg &= ~(RXC_DLY_EN);
+ PHY_WRITE(sc, EXT_REG_DATA, reg);
+
+ /* Modifying RGMII Config1 register */
+ PHY_WRITE(sc, EXT_REG_ADDR, YT8531_RGMII_CONFIG1);
+ reg = PHY_READ(sc, EXT_REG_DATA);
+ reg &= ~(RX_DELAY_SEL_MASK << RX_DELAY_SEL_SHIFT);
+ reg |= rx_delay << RX_DELAY_SEL_SHIFT;
+ reg &= ~(TX_DELAY_SEL_MASK << TX_DELAY_SEL_SHIFT);
+ reg |= tx_delay << TX_DELAY_SEL_SHIFT;
+ PHY_WRITE(sc, EXT_REG_DATA, reg);
+
+ PHY_WRITE(sc, EXT_REG_ADDR, oldaddr);
+
+ return (0);
+}
+#endif
+
+static int
+mcommphy_yt8531_setup(struct mii_softc *sc)
+{
+ uint16_t reg, oldaddr;
+
+ oldaddr = PHY_READ(sc, EXT_REG_ADDR);
+
+ /* Modifying Pad Drive Strength register */
+ PHY_WRITE(sc, EXT_REG_ADDR, YT8531_PAD_DRSTR_CFG);
+ reg = PHY_READ(sc, EXT_REG_DATA);
+ reg &= ~(PAD_RXC_MASK << PAD_RXC_SHIFT);
+ reg |= (JH7110_RGMII_RXC_STRENGTH << PAD_RXC_SHIFT);
+ PHY_WRITE(sc, EXT_REG_DATA, reg);
+
+ /* Modifying SyncE Config register */
+ PHY_WRITE(sc, EXT_REG_ADDR, YT8531_SYNCE_CFG);
+ reg = PHY_READ(sc, EXT_REG_DATA);
+ reg &= ~(EN_SYNC_E);
+ PHY_WRITE(sc, EXT_REG_DATA, reg);
+
+ PHY_WRITE(sc, EXT_REG_ADDR, oldaddr);
+
+#ifdef FDT
+ if (mcommphy_yt8531_setup_delay(sc) != 0)
+ return (ENXIO);
+#endif
+
+ return (0);
+}
+
+#ifdef FDT
+static void
+mcommphy_fdt_get_config(struct mcommphy_softc *sc)
+{
+ mii_fdt_phy_config_t *cfg;
+ pcell_t val;
+
+ cfg = mii_fdt_get_config(sc->dev);
+
+ if (OF_hasprop(cfg->phynode, "motorcomm,tx-clk-10-inverted"))
+ sc->tx_10_inv = true;
+ if (OF_hasprop(cfg->phynode, "motorcomm,tx-clk-100-inverted"))
+ sc->tx_100_inv = true;
+ if (OF_hasprop(cfg->phynode, "motorcomm,tx-clk-1000-inverted"))
+ sc->tx_1000_inv = true;
+
+ /* Grab raw delay values (picoseconds); adjusted later. */
+ if (OF_getencprop(cfg->phynode, "rx-internal-delay-ps", &val,
+ sizeof(val)) > 0) {
+ sc->rx_delay_ps = val;
+ }
+ if (OF_getencprop(cfg->phynode, "tx-internal-delay-ps", &val,
+ sizeof(val)) > 0) {
+ sc->tx_delay_ps = val;
+ }
+
+ mii_fdt_free_config(cfg);
+}
+#endif
+
+static int
+mcommphy_attach(device_t dev)
+{
+ struct mcommphy_softc *mcomm_sc = device_get_softc(dev);
+ mii_softc_t *mii_sc = &mcomm_sc->mii_sc;
+ int ret = 0;
+
+ mcomm_sc->dev = dev;
+
+#ifdef FDT
+ mcommphy_fdt_get_config(mcomm_sc);
+#endif
+
+ mii_phy_dev_attach(dev, MIIF_NOMANPAUSE, &mcommphy_funcs, 0);
+
+ PHY_RESET(mii_sc);
+
+ if (mii_sc->mii_mpd_model == MCOMMPHY_YT8511_MODEL)
+ mcommphy_yt8511_setup(mii_sc);
+ else if (mii_sc->mii_mpd_model == MCOMMPHY_YT8531_MODEL)
+ ret = mcommphy_yt8531_setup(mii_sc);
+ else {
+ device_printf(dev, "no PHY model detected\n");
+ return (ENXIO);
+ }
+ if (ret) {
+ device_printf(dev, "PHY setup failed, error: %d\n", ret);
+ return (ret);
+ }
+
+ mii_sc->mii_capabilities = PHY_READ(mii_sc, MII_BMSR) &
+ mii_sc->mii_capmask;
+ if (mii_sc->mii_capabilities & BMSR_EXTSTAT)
+ mii_sc->mii_extcapabilities = PHY_READ(mii_sc, MII_EXTSR);
device_printf(dev, " ");
- mii_phy_add_media(sc);
+ mii_phy_add_media(mii_sc);
printf("\n");
- MIIBUS_MEDIAINIT(sc->mii_dev);
+ MIIBUS_MEDIAINIT(mii_sc->mii_dev);
return (0);
}
-
static device_method_t mcommphy_methods[] = {
/* device interface */
DEVMETHOD(device_probe, mcommphy_probe),
@@ -177,7 +403,7 @@ static device_method_t mcommphy_methods[] = {
static driver_t mcommphy_driver = {
"mcommphy",
mcommphy_methods,
- sizeof(struct mii_softc)
+ sizeof(struct mcommphy_softc)
};
DRIVER_MODULE(mcommphy, miibus, mcommphy_driver, 0, 0);
diff --git a/sys/dev/mii/mii.c b/sys/dev/mii/mii.c
index 83c8aafdf9fb..b63bfb6285bb 100644
--- a/sys/dev/mii/mii.c
+++ b/sys/dev/mii/mii.c
@@ -58,7 +58,7 @@ MODULE_VERSION(miibus, 1);
#include "miibus_if.h"
-static bus_child_detached_t miibus_child_detached;
+static bus_child_deleted_t miibus_child_deleted;
static bus_child_location_t miibus_child_location;
static bus_child_pnpinfo_t miibus_child_pnpinfo;
static device_detach_t miibus_detach;
@@ -84,7 +84,7 @@ static device_method_t miibus_methods[] = {
/* bus interface */
DEVMETHOD(bus_print_child, miibus_print_child),
DEVMETHOD(bus_read_ivar, miibus_read_ivar),
- DEVMETHOD(bus_child_detached, miibus_child_detached),
+ DEVMETHOD(bus_child_deleted, miibus_child_deleted),
DEVMETHOD(bus_child_pnpinfo, miibus_child_pnpinfo),
DEVMETHOD(bus_child_location, miibus_child_location),
DEVMETHOD(bus_hinted_child, miibus_hinted_child),
@@ -147,7 +147,8 @@ miibus_attach(device_t dev)
if_setcapenablebit(mii->mii_ifp, IFCAP_LINKSTATE, 0);
LIST_INIT(&mii->mii_phys);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static int
@@ -167,7 +168,7 @@ miibus_detach(device_t dev)
}
static void
-miibus_child_detached(device_t dev, device_t child)
+miibus_child_deleted(device_t dev, device_t child)
{
struct mii_attach_args *args;
@@ -407,7 +408,7 @@ mii_attach(device_t dev, device_t *miibus, if_t ifp,
ivars->ifmedia_upd = ifmedia_upd;
ivars->ifmedia_sts = ifmedia_sts;
ivars->mii_flags = flags;
- *miibus = device_add_child(dev, "miibus", -1);
+ *miibus = device_add_child(dev, "miibus", DEVICE_UNIT_ANY);
if (*miibus == NULL) {
rv = ENXIO;
goto fail;
@@ -496,7 +497,7 @@ mii_attach(device_t dev, device_t *miibus, if_t ifp,
if (args == NULL)
goto skip;
bcopy((char *)&ma, (char *)args, sizeof(ma));
- phy = device_add_child(*miibus, NULL, -1);
+ phy = device_add_child(*miibus, NULL, DEVICE_UNIT_ANY);
if (phy == NULL) {
free(args, M_DEVBUF);
goto skip;
@@ -520,16 +521,12 @@ mii_attach(device_t dev, device_t *miibus, if_t ifp,
rv = ENXIO;
goto fail;
}
- rv = bus_generic_attach(dev);
- if (rv != 0)
- goto fail;
+ bus_attach_children(dev);
/* Attaching of the PHY drivers is done in miibus_attach(). */
return (0);
}
- rv = bus_generic_attach(*miibus);
- if (rv != 0)
- goto fail;
+ bus_attach_children(*miibus);
return (0);
diff --git a/sys/dev/mii/mii_fdt.c b/sys/dev/mii/mii_fdt.c
index 240312114cf2..2254af162a34 100644
--- a/sys/dev/mii/mii_fdt.c
+++ b/sys/dev/mii/mii_fdt.c
@@ -116,6 +116,8 @@ mii_fdt_lookup_phy(phandle_t node, int addr)
*/
ports = ofw_bus_find_child(node, "ports");
if (ports <= 0)
+ ports = ofw_bus_find_child(node, "ethernet-ports");
+ if (ports <= 0)
return (-1);
for (child = OF_child(ports); child != 0; child = OF_peer(child)) {
diff --git a/sys/dev/mii/miidevs b/sys/dev/mii/miidevs
index 78d1dddba59f..5c10fd0f0224 100644
--- a/sys/dev/mii/miidevs
+++ b/sys/dev/mii/miidevs
@@ -63,6 +63,7 @@ oui LEVEL1 0x00207b Level 1
oui MARVELL 0x005043 Marvell Semiconductor
oui MICREL 0x0010a1 Micrel
oui MOTORCOMM 0x000000 Motorcomm
+oui MOTORCOMM2 0xc82b5e Motorcomm
oui MYSON 0x00c0b4 Myson Technology
oui NATSEMI 0x080017 National Semiconductor
oui PMCSIERRA 0x00e004 PMC-Sierra
@@ -295,6 +296,7 @@ model MICREL KSZ9031 0x0022 Micrel KSZ9031 10/100/1000 PHY
/* Motorcomm PHYs */
model MOTORCOMM YT8511 0x010a Motorcomm YT8511 10/100/1000 PHY
+model MOTORCOMM2 YT8531 0x0011 Motorcomm YT8531 10/100/1000 PHY
/* Myson Technology PHYs */
model xxMYSON MTD972 0x0000 MTD972 10/100 media interface
@@ -330,6 +332,7 @@ model REALTEK RTL8305SC 0x0005 RTL8305SC 10/100 802.1q switch
model REALTEK RTL8201E 0x0008 RTL8201E 10/100 media interface
model REALTEK RTL8251 0x0000 RTL8251/8153 1000BASE-T media interface
model REALTEK RTL8169S 0x0011 RTL8169S/8110S/8211 1000BASE-T media interface
+model REALTEK RTL8211FVD 0x0007 RTL8211F-VD 1000BASE-T media interface
/* Seeq Seeq PHYs */
model SEEQ 80220 0x0003 Seeq 80220 10/100 media interface
diff --git a/sys/dev/mii/mv88e151x.c b/sys/dev/mii/mv88e151x.c
index 618ad81471c9..fb03b2a7a917 100644
--- a/sys/dev/mii/mv88e151x.c
+++ b/sys/dev/mii/mv88e151x.c
@@ -97,7 +97,7 @@ mv88e151x_attach(device_t dev)
{
const struct mii_attach_args *ma;
struct mii_softc *sc;
- uint32_t cop_cap, cop_extcap;
+ uint32_t cop_cap = 0, cop_extcap = 0;
sc = device_get_softc(dev);
ma = device_get_ivars(dev);
@@ -224,10 +224,12 @@ mv88e151x_fiber_status(struct mii_softc *phy)
else if (reg & MV88E151X_STATUS_LINK &&
reg & MV88E151X_STATUS_SYNC &&
(reg & MV88E151X_STATUS_ENERGY) == 0) {
- if ((reg & MV88E151X_STATUS_SPEED_MASK) ==
+ if (((reg & MV88E151X_STATUS_SPEED_MASK) >>
+ MV88E151X_STATUS_SPEED_SHIFT) ==
MV88E151X_STATUS_SPEED_1000)
mii->mii_media_active |= IFM_1000_SX;
- else if ((reg & MV88E151X_STATUS_SPEED_MASK) ==
+ else if (((reg & MV88E151X_STATUS_SPEED_MASK) >>
+ MV88E151X_STATUS_SPEED_SHIFT) ==
MV88E151X_STATUS_SPEED_100)
mii->mii_media_active |= IFM_100_FX;
else
diff --git a/sys/dev/mii/rgephy.c b/sys/dev/mii/rgephy.c
index 63b3406bb831..4ff86c8c8ccb 100644
--- a/sys/dev/mii/rgephy.c
+++ b/sys/dev/mii/rgephy.c
@@ -93,6 +93,7 @@ static void rgephy_disable_eee(struct mii_softc *);
static const struct mii_phydesc rgephys[] = {
MII_PHY_DESC(REALTEK, RTL8169S),
MII_PHY_DESC(REALTEK, RTL8251),
+ MII_PHY_DESC(REALTEK, RTL8211FVD),
MII_PHY_END
};
@@ -283,7 +284,7 @@ rgephy_linkup(struct mii_softc *sc)
linkup = 0;
if ((sc->mii_flags & MIIF_PHYPRIV0) == 0 &&
sc->mii_mpd_rev >= RGEPHY_8211B) {
- if (sc->mii_mpd_rev == RGEPHY_8211F) {
+ if (sc->mii_mpd_rev >= RGEPHY_8211F) {
reg = PHY_READ(sc, RGEPHY_F_MII_SSR);
if (reg & RGEPHY_F_SSR_LINK)
linkup++;
@@ -338,7 +339,7 @@ rgephy_status(struct mii_softc *sc)
if ((sc->mii_flags & MIIF_PHYPRIV0) == 0 &&
sc->mii_mpd_rev >= RGEPHY_8211B) {
- if (sc->mii_mpd_rev == RGEPHY_8211F) {
+ if (sc->mii_mpd_rev >= RGEPHY_8211F) {
ssr = PHY_READ(sc, RGEPHY_F_MII_SSR);
switch (ssr & RGEPHY_F_SSR_SPD_MASK) {
case RGEPHY_F_SSR_S1000:
@@ -523,6 +524,7 @@ rgephy_reset(struct mii_softc *sc)
switch (sc->mii_mpd_rev) {
case RGEPHY_8211F:
+ case RGEPHY_8211FVD:
pcr = PHY_READ(sc, RGEPHY_F_MII_PCR1);
pcr &= ~(RGEPHY_F_PCR1_MDI_MM | RGEPHY_F_PCR1_ALDPS_EN);
PHY_WRITE(sc, RGEPHY_F_MII_PCR1, pcr);
diff --git a/sys/dev/mii/rgephyreg.h b/sys/dev/mii/rgephyreg.h
index 205f1a836020..fef985ebe30e 100644
--- a/sys/dev/mii/rgephyreg.h
+++ b/sys/dev/mii/rgephyreg.h
@@ -38,6 +38,7 @@
#define RGEPHY_8211B 2
#define RGEPHY_8211C 3
#define RGEPHY_8211F 6
+#define RGEPHY_8211FVD 8
/*
* RealTek 8169S/8110S gigE PHY registers
diff --git a/sys/dev/mlx/mlx.c b/sys/dev/mlx/mlx.c
index bed697b0423c..f0c7591803e0 100644
--- a/sys/dev/mlx/mlx.c
+++ b/sys/dev/mlx/mlx.c
@@ -522,7 +522,7 @@ mlx_startup(struct mlx_softc *sc)
{
struct mlx_enq_sys_drive *mes;
struct mlx_sysdrive *dr;
- int i, error;
+ int i;
debug_called(1);
@@ -560,15 +560,14 @@ mlx_startup(struct mlx_softc *sc)
dr->ms_sectors = 63;
dr->ms_cylinders = dr->ms_size / (255 * 63);
}
- dr->ms_disk = device_add_child(sc->mlx_dev, /*"mlxd"*/NULL, -1);
+ dr->ms_disk = device_add_child(sc->mlx_dev, /*"mlxd"*/NULL, DEVICE_UNIT_ANY);
if (dr->ms_disk == 0)
device_printf(sc->mlx_dev, "device_add_child failed\n");
device_set_ivars(dr->ms_disk, dr);
}
}
free(mes, M_DEVBUF);
- if ((error = bus_generic_attach(sc->mlx_dev)) != 0)
- device_printf(sc->mlx_dev, "bus_generic_attach returned %d", error);
+ bus_attach_children(sc->mlx_dev);
/* mark controller back up */
MLX_IO_LOCK(sc);
@@ -643,7 +642,7 @@ mlx_shutdown(device_t dev)
static int
mlx_shutdown_locked(struct mlx_softc *sc)
{
- int i, error;
+ int error;
debug_called(1);
@@ -661,17 +660,11 @@ mlx_shutdown_locked(struct mlx_softc *sc)
printf("done\n");
}
MLX_IO_UNLOCK(sc);
-
+
/* delete all our child devices */
- for (i = 0; i < MLX_MAXDRIVES; i++) {
- if (sc->mlx_sysdrive[i].ms_disk != 0) {
- if ((error = device_delete_child(sc->mlx_dev, sc->mlx_sysdrive[i].ms_disk)) != 0)
- return (error);
- sc->mlx_sysdrive[i].ms_disk = 0;
- }
- }
+ error = bus_generic_detach(sc->mlx_dev);
- return (0);
+ return (error);
}
/********************************************************************************
@@ -2075,8 +2068,8 @@ mlx_user_command(struct mlx_softc *sc, struct mlx_usercommand *mu)
goto out;
}
MLX_IO_UNLOCK(sc);
- if (((kbuf = malloc(mu->mu_datasize, M_DEVBUF, M_WAITOK)) == NULL) ||
- (error = copyin(mu->mu_buf, kbuf, mu->mu_datasize))) {
+ kbuf = malloc(mu->mu_datasize, M_DEVBUF, M_WAITOK);
+ if ((error = copyin(mu->mu_buf, kbuf, mu->mu_datasize))) {
MLX_IO_LOCK(sc);
goto out;
}
diff --git a/sys/dev/mlx4/mlx4_core/mlx4_alloc.c b/sys/dev/mlx4/mlx4_core/mlx4_alloc.c
index dcf6204dfc1a..fa68625f5ab9 100644
--- a/sys/dev/mlx4/mlx4_core/mlx4_alloc.c
+++ b/sys/dev/mlx4/mlx4_core/mlx4_alloc.c
@@ -173,7 +173,7 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
u32 reserved_bot, u32 reserved_top)
{
/* num must be a power of 2 */
- if (num != roundup_pow_of_two(num))
+ if (!is_power_of_2(num))
return -EINVAL;
bitmap->last = 0;
diff --git a/sys/dev/mlx4/mlx4_core/mlx4_cmd.c b/sys/dev/mlx4/mlx4_core/mlx4_cmd.c
index 45d06104448d..54256ab1e124 100644
--- a/sys/dev/mlx4/mlx4_core/mlx4_cmd.c
+++ b/sys/dev/mlx4/mlx4_core/mlx4_cmd.c
@@ -1960,7 +1960,7 @@ static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
if (err) {
- mlx4_info(dev, "Failed query availible VPPs\n");
+ mlx4_info(dev, "Failed query available VPPs\n");
return;
}
@@ -1982,12 +1982,12 @@ static void mlx4_allocate_port_vpps(struct mlx4_dev *dev, int port)
/* Query actual allocated VPP, just to make sure */
err = mlx4_ALLOCATE_VPP_get(dev, port, &availible_vpp, vpp_param);
if (err) {
- mlx4_info(dev, "Failed query availible VPPs\n");
+ mlx4_info(dev, "Failed query available VPPs\n");
return;
}
port_qos->num_of_qos_vfs = num_vfs;
- mlx4_dbg(dev, "Port %d Availible VPPs %d\n", port, availible_vpp);
+ mlx4_dbg(dev, "Port %d Available VPPs %d\n", port, availible_vpp);
for (i = 0; i < MLX4_NUM_UP; i++)
mlx4_dbg(dev, "Port %d UP %d Allocated %d VPPs\n", port, i,
@@ -2884,7 +2884,7 @@ static int mlx4_set_vport_qos(struct mlx4_priv *priv, int slave, int port,
memset(vpp_qos, 0, sizeof(struct mlx4_vport_qos_param) * MLX4_NUM_UP);
if (slave > port_qos->num_of_qos_vfs) {
- mlx4_info(dev, "No availible VPP resources for this VF\n");
+ mlx4_info(dev, "No available VPP resources for this VF\n");
return -EINVAL;
}
diff --git a/sys/dev/mlx4/mlx4_en/en.h b/sys/dev/mlx4/mlx4_en/en.h
index a29b40ddaf10..838894f7b02f 100644
--- a/sys/dev/mlx4/mlx4_en/en.h
+++ b/sys/dev/mlx4/mlx4_en/en.h
@@ -232,7 +232,7 @@ enum cq_type {
/*
* Useful macros
*/
-#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
+#define ROUNDUP_LOG2(x) order_base_2(x)
#define XNOR(x, y) (!(x) == !(y))
#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
diff --git a/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c b/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
index 583de1816d1b..ac4bfd1b1a14 100644
--- a/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
+++ b/sys/dev/mlx4/mlx4_en/mlx4_en_netdev.c
@@ -2143,11 +2143,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
dev = priv->dev = if_alloc(IFT_ETHER);
- if (dev == NULL) {
- en_err(priv, "Net device allocation failed\n");
- kfree(priv);
- return -ENOMEM;
- }
if_setsoftc(dev, priv);
if_initname(dev, "mlxen", (device_get_unit(
mdev->pdev->dev.bsddev) * MLX4_MAX_PORTS) + port - 1);
diff --git a/sys/dev/mlx4/mlx4_en/mlx4_en_tx.c b/sys/dev/mlx4/mlx4_en/mlx4_en_tx.c
index ee8ed0da240d..d45ccacd7499 100644
--- a/sys/dev/mlx4/mlx4_en/mlx4_en_tx.c
+++ b/sys/dev/mlx4/mlx4_en/mlx4_en_tx.c
@@ -91,8 +91,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->size_mask = size - 1;
ring->stride = stride;
ring->inline_thold = MAX(MIN_PKT_LEN, MIN(priv->prof->inline_thold, MAX_INLINE));
- mtx_init(&ring->tx_lock.m, "mlx4 tx", NULL, MTX_DEF);
- mtx_init(&ring->comp_lock.m, "mlx4 comp", NULL, MTX_DEF);
+ mtx_init(&ring->tx_lock, "mlx4 tx", NULL, MTX_DEF);
+ mtx_init(&ring->comp_lock, "mlx4 comp", NULL, MTX_DEF);
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = kzalloc_node(tmp, GFP_KERNEL, node);
@@ -205,8 +205,8 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
for (x = 0; x != ring->size; x++)
bus_dmamap_destroy(ring->dma_tag, ring->tx_info[x].dma_map);
vfree(ring->tx_info);
- mtx_destroy(&ring->tx_lock.m);
- mtx_destroy(&ring->comp_lock.m);
+ mtx_destroy(&ring->tx_lock);
+ mtx_destroy(&ring->comp_lock);
bus_dma_tag_destroy(ring->dma_tag);
kfree(ring);
*pring = NULL;
@@ -688,7 +688,7 @@ int mlx4_en_xmit(struct mlx4_en_priv *priv, int tx_ind, struct mbuf **mbp)
dseg = &tx_desc->data;
/* send a copy of the frame to the BPF listener, if any */
- if (ifp != NULL && if_getbpf(ifp) != NULL)
+ if (ifp != NULL)
ETHER_BPF_MTAP(ifp, mb);
/* get default flags */
diff --git a/sys/dev/mlx4/mlx4_ib/mlx4_ib_qp.c b/sys/dev/mlx4/mlx4_ib/mlx4_ib_qp.c
index 3cda28752592..b391946440b9 100644
--- a/sys/dev/mlx4/mlx4_ib/mlx4_ib_qp.c
+++ b/sys/dev/mlx4/mlx4_ib/mlx4_ib_qp.c
@@ -488,7 +488,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER)))
qp->sq.wqe_shift = ilog2(64);
else
- qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
+ qp->sq.wqe_shift = order_base_2(s);
for (;;) {
qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
@@ -544,7 +544,7 @@ static int set_user_sq_size(struct mlx4_ib_dev *dev,
/* Sanity check SQ size before proceeding */
if ((1 << ucmd->log_sq_bb_count) > dev->dev->caps.max_wqes ||
ucmd->log_sq_stride >
- ilog2(roundup_pow_of_two(dev->dev->caps.max_sq_desc_sz)) ||
+ order_base_2(dev->dev->caps.max_sq_desc_sz) ||
ucmd->log_sq_stride < MLX4_IB_MIN_SQ_STRIDE)
return -EINVAL;
diff --git a/sys/dev/mlx5/cq.h b/sys/dev/mlx5/cq.h
index 02de7f66e260..cc551e65dead 100644
--- a/sys/dev/mlx5/cq.h
+++ b/sys/dev/mlx5/cq.h
@@ -111,6 +111,12 @@ struct mlx5_cq_modify_params {
} params;
};
+enum {
+ CQE_STRIDE_64 = 0,
+ CQE_STRIDE_128 = 1,
+ CQE_STRIDE_128_PAD = 2,
+};
+
static inline int cqe_sz_to_mlx_sz(u8 size)
{
return size == 64 ? CQE_SIZE_64 : CQE_SIZE_128;
diff --git a/sys/dev/mlx5/crypto.h b/sys/dev/mlx5/crypto.h
new file mode 100644
index 000000000000..3b2c4c218ef2
--- /dev/null
+++ b/sys/dev/mlx5/crypto.h
@@ -0,0 +1,36 @@
+/*-
+ * Copyright (c) 2023, NVIDIA Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MLX5_CRYPTO_H__
+#define __MLX5_CRYPTO_H__
+
+struct mlx5_core_dev;
+
+int mlx5_encryption_key_create(struct mlx5_core_dev *, u32 pdn, u32 key_type,
+ const void *p_key, u32 key_len, u32 *p_obj_id);
+int mlx5_encryption_key_destroy(struct mlx5_core_dev *mdev, u32 oid);
+
+#endif /* __MLX5_CRYPTO_H__ */
diff --git a/sys/dev/mlx5/device.h b/sys/dev/mlx5/device.h
index 4b7cf8686794..3e2c4f15a5cc 100644
--- a/sys/dev/mlx5/device.h
+++ b/sys/dev/mlx5/device.h
@@ -32,8 +32,8 @@
#define FW_INIT_TIMEOUT_MILI 2000
#define FW_INIT_WAIT_MS 2
-#define FW_PRE_INIT_TIMEOUT_MILI 120000
-#define FW_INIT_WARN_MESSAGE_INTERVAL 20000
+#define FW_PRE_INIT_TIMEOUT_MILI 5000
+#define FW_INIT_WARN_MESSAGE_INTERVAL 2000
#if defined(__LITTLE_ENDIAN)
#define MLX5_SET_HOST_ENDIANNESS 0
@@ -148,12 +148,14 @@ __mlx5_mask16(typ, fld))
tmp; \
})
-#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8
-#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8
+#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
+#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
+#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
+#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
- MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
- MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
+ MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
+ MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
/* insert a value to a struct */
#define MLX5_VSC_SET(typ, p, fld, v) do { \
@@ -391,6 +393,8 @@ enum {
MLX5_OPCODE_UMR = 0x25,
MLX5_OPCODE_QOS_REMAP = 0x2a,
+ MLX5_OPCODE_ACCESS_ASO = 0x2d,
+
MLX5_OPCODE_SIGNATURE_CANCELED = (1 << 15),
};
@@ -567,6 +571,11 @@ struct mlx5_eqe_vport_change {
__be32 rsvd1[6];
};
+struct mlx5_eqe_obj_change {
+ u8 rsvd0[2];
+ __be16 obj_type;
+ __be32 obj_id;
+};
#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
@@ -638,6 +647,7 @@ union ev_data {
struct mlx5_eqe_dct dct;
struct mlx5_eqe_temp_warning temp_warning;
struct mlx5_eqe_xrq_err xrq_err;
+ struct mlx5_eqe_obj_change obj_change;
} __packed;
struct mlx5_eqe {
@@ -703,7 +713,12 @@ struct mlx5_cqe64 {
u8 l4_hdr_type_etc;
__be16 vlan_info;
__be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
- __be32 imm_inval_pkey;
+ union {
+ __be32 immediate;
+ __be32 inval_rkey;
+ __be32 pkey;
+ __be32 ft_metadata;
+ };
u8 rsvd40[4];
__be32 byte_cnt;
__be64 timestamp;
@@ -712,6 +727,7 @@ struct mlx5_cqe64 {
u8 signature;
u8 op_own;
};
+_Static_assert(sizeof(struct mlx5_cqe64) == 0x40, "CQE layout broken");
#define MLX5_CQE_TSTMP_PTP (1ULL << 63)
@@ -919,6 +935,7 @@ enum {
MLX5_MATCH_OUTER_HEADERS = 1 << 0,
MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
MLX5_MATCH_INNER_HEADERS = 1 << 2,
+ MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
};
@@ -988,12 +1005,19 @@ enum mlx5_cap_type {
MLX5_CAP_VECTOR_CALC,
MLX5_CAP_QOS,
MLX5_CAP_DEBUG,
- MLX5_CAP_NVME,
- MLX5_CAP_DMC,
- MLX5_CAP_DEC,
+ MLX5_CAP_RESERVED_14,
+ MLX5_CAP_DEV_MEM,
+ MLX5_CAP_RESERVED_16,
MLX5_CAP_TLS,
+ MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
+ MLX5_CAP_IPSEC,
+ MLX5_CAP_CRYPTO = 0x1a,
+ MLX5_CAP_DEV_SHAMPO = 0x1d,
+ MLX5_CAP_MACSEC = 0x1f,
MLX5_CAP_GENERAL_2 = 0x20,
+ MLX5_CAP_PORT_SELECTION = 0x25,
+ MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
/* NUM OF CAP Types */
MLX5_CAP_NUM
};
@@ -1058,6 +1082,9 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+#define MLX5_CAP64_FLOWTABLE(mdev, cap) \
+ MLX5_GET64(flow_table_nic_cap, (mdev)->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
@@ -1067,6 +1094,36 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
+#define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
+
+#define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
+
+#define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
+
+#define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
+
+#define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
+
+#define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
+ MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
+
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
@@ -1093,14 +1150,46 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
+#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
+
+#define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2_MAX(mdev, cap) \
+ MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, ft_field_support_2_esw_fdb.cap)
+
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
+#define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
+ MLX5_GET64(flow_table_eswitch_cap, \
+ (mdev)->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+
#define MLX5_CAP_ESW_MAX(mdev, cap) \
MLX5_GET(e_switch_cap, \
mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
+#define MLX5_CAP_PORT_SELECTION(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->hca_caps_cur[MLX5_CAP_PORT_SELECTION], cap)
+
+#define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_GET(port_selection_cap, \
+ mdev->hca_caps_max[MLX5_CAP_PORT_SELECTION], cap)
+
+#define MLX5_CAP_ADV_VIRTUALIZATION(mdev, cap) \
+ MLX5_GET(adv_virtualization_cap, \
+ mdev->hca_caps_cur[MLX5_CAP_ADV_VIRTUALIZATION], cap)
+
+#define MLX5_CAP_ADV_VIRTUALIZATION_MAX(mdev, cap) \
+ MLX5_GET(adv_virtualization_cap, \
+ mdev->hca_caps_max[MLX5_CAP_ADV_VIRTUALIZATION], cap)
+
+#define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
+
+#define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \
+ MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap)
+
#define MLX5_CAP_ODP(mdev, cap)\
MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
@@ -1169,6 +1258,9 @@ enum mlx5_mcam_feature_groups {
#define MLX5_CAP_DEV_EVENT(mdev, cap)\
MLX5_ADDR_OF(device_event_cap, (mdev)->hca_caps_cur[MLX5_CAP_DEV_EVENT], cap)
+#define MLX5_CAP_IPSEC(mdev, cap) \
+ MLX5_GET(ipsec_cap, (mdev)->hca_caps_cur[MLX5_CAP_IPSEC], cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
diff --git a/sys/dev/mlx5/doorbell.h b/sys/dev/mlx5/doorbell.h
index 11ce9ae6649f..4a32e74623a0 100644
--- a/sys/dev/mlx5/doorbell.h
+++ b/sys/dev/mlx5/doorbell.h
@@ -61,10 +61,12 @@ static inline void mlx5_write64(__be32 val[2], void __iomem *dest,
{
unsigned long flags;
- spin_lock_irqsave(doorbell_lock, flags);
+ if (doorbell_lock)
+ spin_lock_irqsave(doorbell_lock, flags);
__raw_writel((__force u32) val[0], dest);
__raw_writel((__force u32) val[1], dest + 4);
- spin_unlock_irqrestore(doorbell_lock, flags);
+ if (doorbell_lock)
+ spin_unlock_irqrestore(doorbell_lock, flags);
}
#endif
diff --git a/sys/dev/mlx5/driver.h b/sys/dev/mlx5/driver.h
index 6ebe4171a503..cdefe7e013f6 100644
--- a/sys/dev/mlx5/driver.h
+++ b/sys/dev/mlx5/driver.h
@@ -634,11 +634,13 @@ struct mlx5_priv {
#endif
struct mlx5_pme_stats pme_stats;
+ struct mlx5_flow_steering *steering;
struct mlx5_eswitch *eswitch;
struct mlx5_bfreg_data bfregs;
struct mlx5_uars_page *uar;
struct mlx5_fc_stats fc_stats;
+ struct mlx5_ft_pool *ft_pool;
};
enum mlx5_device_state {
@@ -721,13 +723,18 @@ struct mlx5_core_dev {
u32 vsc_addr;
u32 issi;
struct mlx5_special_contexts special_contexts;
- unsigned int module_status[MLX5_MAX_PORTS];
+ unsigned int module_status;
+ unsigned int module_num;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_root_namespace *esw_egress_root_ns;
struct mlx5_flow_root_namespace *esw_ingress_root_ns;
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
+ struct mlx5_flow_root_namespace *nic_tx_root_ns;
+ struct mlx5_flow_root_namespace *rdma_tx_root_ns;
+ struct mlx5_flow_root_namespace *rdma_rx_root_ns;
+
u32 num_q_counter_allocated[MLX5_INTERFACE_NUMBER];
struct mlx5_crspace_regmap *dump_rege;
uint32_t *dump_data;
@@ -756,6 +763,7 @@ struct mlx5_core_dev {
#ifdef CONFIG_MLX5_FPGA
struct mlx5_fpga_device *fpga;
#endif
+ struct xarray ipsec_sadb;
};
enum {
diff --git a/sys/dev/mlx5/fs.h b/sys/dev/mlx5/fs.h
index 8107726bc76a..6bb05e004479 100644
--- a/sys/dev/mlx5/fs.h
+++ b/sys/dev/mlx5/fs.h
@@ -33,8 +33,33 @@
#include <dev/mlx5/device.h>
#include <dev/mlx5/driver.h>
+enum mlx5_flow_destination_type {
+ MLX5_FLOW_DESTINATION_TYPE_NONE,
+ MLX5_FLOW_DESTINATION_TYPE_VPORT,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ MLX5_FLOW_DESTINATION_TYPE_TIR,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER,
+ MLX5_FLOW_DESTINATION_TYPE_UPLINK,
+ MLX5_FLOW_DESTINATION_TYPE_PORT,
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
+ MLX5_FLOW_DESTINATION_TYPE_RANGE,
+ MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE,
+};
+
enum {
- MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16,
+ MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17,
+ MLX5_FLOW_CONTEXT_ACTION_DECRYPT = 1 << 18,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS = 1 << 19,
+};
+
+enum {
+ MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
+ MLX5_FLOW_TABLE_TERMINATION = BIT(2),
+ MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
+ MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
};
/*Flow tag*/
@@ -44,12 +69,6 @@ enum {
MLX5_FS_SNIFFER_FLOW_TAG = 0xFFFFFD,
};
-enum mlx5_rule_fwd_action {
- MLX5_FLOW_RULE_FWD_ACTION_ALLOW = 0x1,
- MLX5_FLOW_RULE_FWD_ACTION_DROP = 0x2,
- MLX5_FLOW_RULE_FWD_ACTION_DEST = 0x4,
-};
-
enum {
MLX5_FS_FLOW_TAG_MASK = 0xFFFFFF,
};
@@ -61,54 +80,149 @@ enum {
enum mlx5_flow_namespace_type {
MLX5_FLOW_NAMESPACE_BYPASS,
+ MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC,
+ MLX5_FLOW_NAMESPACE_LAG,
MLX5_FLOW_NAMESPACE_OFFLOADS,
+ MLX5_FLOW_NAMESPACE_ETHTOOL,
MLX5_FLOW_NAMESPACE_KERNEL,
MLX5_FLOW_NAMESPACE_LEFTOVERS,
- MLX5_FLOW_NAMESPACE_SNIFFER_RX,
- MLX5_FLOW_NAMESPACE_SNIFFER_TX,
+ MLX5_FLOW_NAMESPACE_ANCHOR,
+ MLX5_FLOW_NAMESPACE_FDB_BYPASS,
MLX5_FLOW_NAMESPACE_FDB,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
MLX5_FLOW_NAMESPACE_ESW_INGRESS,
+ MLX5_FLOW_NAMESPACE_SNIFFER_RX,
+ MLX5_FLOW_NAMESPACE_SNIFFER_TX,
+ MLX5_FLOW_NAMESPACE_EGRESS,
+ MLX5_FLOW_NAMESPACE_EGRESS_IPSEC,
+ MLX5_FLOW_NAMESPACE_EGRESS_MACSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_RX,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
+ MLX5_FLOW_NAMESPACE_RDMA_TX,
+ MLX5_FLOW_NAMESPACE_PORT_SEL,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC,
+ MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC,
+};
+
+enum {
+ FDB_BYPASS_PATH,
+ FDB_TC_OFFLOAD,
+ FDB_FT_OFFLOAD,
+ FDB_TC_MISS,
+ FDB_BR_OFFLOAD,
+ FDB_SLOW_PATH,
+ FDB_PER_VPORT,
};
struct mlx5_flow_table;
struct mlx5_flow_group;
struct mlx5_flow_rule;
struct mlx5_flow_namespace;
+struct mlx5_flow_handle;
+
+enum {
+ FLOW_CONTEXT_HAS_TAG = BIT(0),
+};
+
+struct mlx5_flow_context {
+ u32 flags;
+ u32 flow_tag;
+ u32 flow_source;
+};
struct mlx5_flow_spec {
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
+ struct mlx5_flow_context flow_context;
+};
+
+enum {
+ MLX5_FLOW_DEST_VPORT_VHCA_ID = BIT(0),
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1),
+};
+
+enum mlx5_flow_dest_range_field {
+ MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN = 0,
};
struct mlx5_flow_destination {
- u32 type;
+ enum mlx5_flow_destination_type type;
union {
- u32 tir_num;
- struct mlx5_flow_table *ft;
- u32 vport_num;
+ u32 tir_num;
+ u32 ft_num;
+ struct mlx5_flow_table *ft;
+ u32 counter_id;
+ struct {
+ u16 num;
+ u16 vhca_id;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ u8 flags;
+ } vport;
+ struct {
+ struct mlx5_flow_table *hit_ft;
+ struct mlx5_flow_table *miss_ft;
+ enum mlx5_flow_dest_range_field field;
+ u32 min;
+ u32 max;
+ } range;
+ u32 sampler_id;
};
};
-enum mlx5_flow_act_actions {
- MLX5_FLOW_ACT_ACTIONS_FLOW_TAG = 1 << 0,
- MLX5_FLOW_ACT_ACTIONS_MODIFY_HDR = 1 << 1,
- MLX5_FLOW_ACT_ACTIONS_PACKET_REFORMAT = 1 << 2,
- MLX5_FLOW_ACT_ACTIONS_COUNT = 1 << 3,
+struct mlx5_exe_aso {
+ u32 object_id;
+ u8 type;
+ u8 return_reg_id;
+ union {
+ u32 ctrl_data;
+ struct {
+ u8 meter_idx;
+ u8 init_color;
+ } flow_meter;
+ };
+};
+
+enum {
+ FLOW_ACT_NO_APPEND = BIT(0),
+ FLOW_ACT_IGNORE_FLOW_LEVEL = BIT(1),
+};
+
+struct mlx5_fs_vlan {
+ u16 ethtype;
+ u16 vid;
+ u8 prio;
+};
+
+#define MLX5_FS_VLAN_DEPTH 2
+
+enum mlx5_flow_act_crypto_type {
+ MLX5_FLOW_ACT_CRYPTO_TYPE_IPSEC,
+};
+
+enum mlx5_flow_act_crypto_op {
+ MLX5_FLOW_ACT_CRYPTO_OP_ENCRYPT,
+ MLX5_FLOW_ACT_CRYPTO_OP_DECRYPT,
};
-enum MLX5_FLOW_ACT_FLAGS {
- MLX5_FLOW_ACT_NO_APPEND = 1 << 0,
+struct mlx5_flow_act_crypto_params {
+ u32 obj_id;
+ u8 type; /* see enum mlx5_flow_act_crypto_type */
+ u8 op; /* see enum mlx5_flow_act_crypto_op */
};
struct mlx5_flow_act {
- u32 actions; /* See enum mlx5_flow_act_actions */
- u32 flags;
- u32 flow_tag;
- struct mlx5_modify_hdr *modify_hdr;
+ u32 action;
+ struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat;
- struct mlx5_fc *counter;
+ struct mlx5_flow_act_crypto_params crypto;
+ u32 flags;
+ struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
+ struct ib_counters *counters;
+ struct mlx5_flow_group *fg;
+ struct mlx5_exe_aso exe_aso;
};
#define FT_NAME_STR_SZ 20
@@ -136,6 +250,28 @@ static inline bool outer_header_zero(u32 *match_criteria)
}
struct mlx5_flow_namespace *
+mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ int vport);
+
+struct mlx5_flow_table_attr {
+ int prio;
+ int max_fte;
+ u32 level;
+ u32 flags;
+ u16 uid;
+ struct mlx5_flow_table *next_ft;
+
+ struct {
+ int max_num_groups;
+ int num_reserved_entries;
+ } autogroup;
+};
+
+struct mlx5_flow_namespace *
+mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, int n);
+
+struct mlx5_flow_namespace *
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type);
@@ -145,24 +281,19 @@ mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
*/
struct mlx5_flow_table *
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- const char *name,
- int num_flow_table_entries,
- int max_num_groups,
- int num_reserved_entries);
+ struct mlx5_flow_table_attr *ft_attr);
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
- u16 vport,
- int prio,
- const char *name,
- int num_flow_table_entries);
+ struct mlx5_flow_table_attr *ft_attr, u16 vport);
+
+struct mlx5_flow_table *mlx5_create_lag_demux_flow_table(
+ struct mlx5_flow_namespace *ns,
+ int prio, u32 level);
struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- const char *name,
- int num_flow_table_entries);
+ struct mlx5_flow_table_attr *ft_attr);
int mlx5_destroy_flow_table(struct mlx5_flow_table *ft);
/* inbox should be set with the following values:
@@ -175,18 +306,17 @@ struct mlx5_flow_group *
mlx5_create_flow_group(struct mlx5_flow_table *ft, u32 *in);
void mlx5_destroy_flow_group(struct mlx5_flow_group *fg);
-/* Single destination per rule.
- * Group ID is implied by the match criteria.
- */
-struct mlx5_flow_rule *
-mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
- u32 sw_action,
- struct mlx5_flow_act *flow_act,
- struct mlx5_flow_destination *dest);
-void mlx5_del_flow_rule(struct mlx5_flow_rule **);
+struct mlx5_flow_handle *
+mlx5_add_flow_rules(struct mlx5_flow_table *ft,
+ const struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int num_dest);
+void mlx5_del_flow_rules(struct mlx5_flow_handle **pp);
+
+int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
+ struct mlx5_flow_destination *new_dest,
+ struct mlx5_flow_destination *old_dest);
/*The following API is for sniffer*/
typedef int (*rule_event_fn)(struct mlx5_flow_rule *rule,
@@ -292,4 +422,8 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
u64 *packets, u64 *bytes);
u32 mlx5_fc_id(struct mlx5_fc *counter);
/******* End of Flow counters API ******/
+
+u32 mlx5_flow_table_id(struct mlx5_flow_table *ft);
+int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
+int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
#endif
diff --git a/sys/dev/mlx5/mlx5_accel/ipsec.h b/sys/dev/mlx5/mlx5_accel/ipsec.h
index c020d41cd875..c3f3a2372482 100644
--- a/sys/dev/mlx5/mlx5_accel/ipsec.h
+++ b/sys/dev/mlx5/mlx5_accel/ipsec.h
@@ -1,137 +1,280 @@
/*-
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2023 NVIDIA corporation & affiliates.
*
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
*
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
*
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
*/
#ifndef __MLX5_ACCEL_IPSEC_H__
#define __MLX5_ACCEL_IPSEC_H__
-#ifdef CONFIG_MLX5_ACCEL
-
+#include <sys/mbuf.h>
#include <dev/mlx5/driver.h>
+#include <dev/mlx5/qp.h>
+#include <dev/mlx5/mlx5_core/mlx5_core.h>
+#include <dev/mlx5/mlx5_en/en.h>
+#include <dev/mlx5/mlx5_lib/aso.h>
+
+#define MLX5E_IPSEC_SADB_RX_BITS 10
+#define MLX5_IPSEC_METADATA_MARKER(ipsec_metadata) ((ipsec_metadata >> 31) & 0x1)
+
+#define VLAN_NONE 0xfff
+
+struct mlx5e_priv;
+struct mlx5e_tx_wqe;
+struct mlx5e_ipsec_tx;
+struct mlx5e_ipsec_rx;
+struct mlx5e_ipsec_rx_ip_type;
+
+struct aes_gcm_keymat {
+ u64 seq_iv;
-enum {
- MLX5_ACCEL_IPSEC_DEVICE = BIT(1),
- MLX5_ACCEL_IPSEC_IPV6 = BIT(2),
- MLX5_ACCEL_IPSEC_ESP = BIT(3),
- MLX5_ACCEL_IPSEC_LSO = BIT(4),
+ u32 salt;
+ u32 icv_len;
+
+ u32 key_len;
+ u32 aes_key[256 / 32];
+};
+
+struct mlx5e_ipsec_priv_bothdir {
+ struct mlx5e_ipsec_sa_entry *priv_in;
+ struct mlx5e_ipsec_sa_entry *priv_out;
};
-#define MLX5_IPSEC_SADB_IP_AH BIT(7)
-#define MLX5_IPSEC_SADB_IP_ESP BIT(6)
-#define MLX5_IPSEC_SADB_SA_VALID BIT(5)
-#define MLX5_IPSEC_SADB_SPI_EN BIT(4)
-#define MLX5_IPSEC_SADB_DIR_SX BIT(3)
-#define MLX5_IPSEC_SADB_IPV6 BIT(2)
+struct mlx5e_ipsec_work {
+ struct work_struct work;
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+ void *data;
+};
+
+struct mlx5e_ipsec_dwork {
+ struct delayed_work dwork;
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+ struct mlx5e_ipsec_priv_bothdir *pb;
+};
-enum {
- MLX5_IPSEC_CMD_ADD_SA = 0,
- MLX5_IPSEC_CMD_DEL_SA = 1,
+struct mlx5e_ipsec_aso {
+ u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
+ dma_addr_t dma_addr;
+ struct mlx5_aso *aso;
+ /* Protect ASO WQ access, as it is global to whole IPsec */
+ spinlock_t lock;
};
-enum mlx5_accel_ipsec_enc_mode {
- MLX5_IPSEC_SADB_MODE_NONE = 0,
- MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128 = 1,
- MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128 = 3,
+struct mlx5_replay_esn {
+ u32 replay_window;
+ u32 esn;
+ u32 esn_msb;
+ u8 overlap : 1;
+ u8 trigger : 1;
};
-#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
- MLX5_ACCEL_IPSEC_DEVICE)
+struct mlx5_accel_esp_xfrm_attrs {
+ u32 spi;
+ struct aes_gcm_keymat aes_gcm;
-struct mlx5_accel_ipsec_sa {
- __be32 cmd;
- u8 key_enc[32];
- u8 key_auth[32];
- __be32 sip[4];
- __be32 dip[4];
union {
- struct {
- __be32 reserved;
- u8 salt_iv[8];
- __be32 salt;
- } __packed gcm;
- struct {
- u8 salt[16];
- } __packed cbc;
- };
- __be32 spi;
- __be32 sw_sa_handle;
- __be16 tfclen;
- u8 enc_mode;
- u8 sip_masklen;
- u8 dip_masklen;
- u8 flags;
- u8 reserved[2];
-} __packed;
-
-/**
- * mlx5_accel_ipsec_sa_cmd_exec - Execute an IPSec SADB command
- * @mdev: mlx5 device
- * @cmd: command to execute
- * May be called from atomic context. Returns context pointer, or error
- * Caller must eventually call mlx5_accel_ipsec_sa_cmd_wait from non-atomic
- * context, to cleanup the context pointer
- */
-void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
- struct mlx5_accel_ipsec_sa *cmd);
-
-/**
- * mlx5_accel_ipsec_sa_cmd_wait - Wait for command execution completion
- * @context: Context pointer returned from call to mlx5_accel_ipsec_sa_cmd_exec
- * Sleeps (killable) until command execution is complete.
- * Returns the command result, or -EINTR if killed
- */
-int mlx5_accel_ipsec_sa_cmd_wait(void *context);
+ __be32 a4;
+ __be32 a6[4];
+ } saddr;
+
+ union {
+ __be32 a4;
+ __be32 a6[4];
+ } daddr;
+
+ u8 dir : 2;
+ u8 encap : 1;
+ u8 drop : 1;
+ u8 family;
+ struct mlx5_replay_esn replay_esn;
+ u32 authsize;
+ u32 reqid;
+ u16 sport;
+ u16 dport;
+};
-u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev);
+enum mlx5_ipsec_cap {
+ MLX5_IPSEC_CAP_CRYPTO = 1 << 0,
+ MLX5_IPSEC_CAP_ESN = 1 << 1,
+ MLX5_IPSEC_CAP_PACKET_OFFLOAD = 1 << 2,
+ MLX5_IPSEC_CAP_ROCE = 1 << 3,
+ MLX5_IPSEC_CAP_PRIO = 1 << 4,
+ MLX5_IPSEC_CAP_TUNNEL = 1 << 5,
+ MLX5_IPSEC_CAP_ESPINUDP = 1 << 6,
+};
+
+struct mlx5e_ipsec {
+ struct mlx5_core_dev *mdev;
+ struct workqueue_struct *wq;
+ struct mlx5e_ipsec_tx *tx;
+ struct mlx5e_ipsec_rx *rx_ipv4;
+ struct mlx5e_ipsec_rx *rx_ipv6;
+ struct mlx5e_ipsec_rx_ip_type *rx_ip_type;
+ struct mlx5e_ipsec_aso *aso;
+ u32 pdn;
+ u32 mkey;
+};
-unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
-int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
- unsigned int count);
+struct mlx5e_ipsec_rule {
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_handle *kspi_rule;
+ struct mlx5_flow_handle *reqid_rule;
+ struct mlx5_flow_handle *vid_zero_rule;
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_pkt_reformat *pkt_reformat;
+ struct mlx5_fc *fc;
+};
-int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
-void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
+struct mlx5e_ipsec_esn_state {
+ u32 esn;
+ u32 esn_msb;
+ u8 overlap: 1;
+};
-#else
+struct mlx5e_ipsec_sa_entry {
+ struct secasvar *savp;
+ if_t ifp;
+ if_t ifpo;
+ struct mlx5e_ipsec *ipsec;
+ struct mlx5_accel_esp_xfrm_attrs attrs;
+ struct mlx5e_ipsec_rule ipsec_rule;
+ struct mlx5e_ipsec_dwork *dwork;
+ struct mlx5e_ipsec_work *work;
+ u32 ipsec_obj_id;
+ u32 enc_key_id;
+ u16 kspi; /* Stack allocated unique SA identifier */
+ struct mlx5e_ipsec_esn_state esn_state;
+ u16 vid;
+};
-#define MLX5_IPSEC_DEV(mdev) false
+struct upspec {
+ u16 dport;
+ u16 sport;
+ u8 proto;
+};
-static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
+struct mlx5_accel_pol_xfrm_attrs {
+ union {
+ __be32 a4;
+ __be32 a6[4];
+ } saddr;
+
+ union {
+ __be32 a4;
+ __be32 a6[4];
+ } daddr;
+
+ struct upspec upspec;
+
+ u8 family;
+ u8 action;
+ u8 dir : 2;
+ u32 reqid;
+ u32 prio;
+ u16 vid;
+};
+
+struct mlx5e_ipsec_pol_entry {
+ struct secpolicy *sp;
+ struct mlx5e_ipsec *ipsec;
+ struct mlx5e_ipsec_rule ipsec_rule;
+ struct mlx5_accel_pol_xfrm_attrs attrs;
+};
+
+/* This function doesn't really belong here, but let's put it here for now */
+void mlx5_object_change_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
+
+int mlx5e_ipsec_init(struct mlx5e_priv *priv);
+void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv);
+
+int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec);
+void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
+
+int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
+void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry);
+
+u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev);
+
+static inline struct mlx5_core_dev *
+mlx5e_ipsec_sa2dev(struct mlx5e_ipsec_sa_entry *sa_entry)
{
- return 0;
+ return sa_entry->ipsec->mdev;
}
-static inline void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
+static inline struct mlx5_core_dev *
+mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry)
{
+ return pol_entry->ipsec->mdev;
}
-#endif
+void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_accel_esp_xfrm_attrs *attrs,
+ u8 dir);
+int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec);
+void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec);
+int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
+void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry);
+void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry);
+struct ipsec_accel_out_tag;
+void mlx5e_accel_ipsec_handle_tx_wqe(struct mbuf *mb, struct mlx5e_tx_wqe *wqe,
+ struct ipsec_accel_out_tag *tag);
+int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
+void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry);
+static inline int mlx5e_accel_ipsec_get_metadata(unsigned int id)
+{
+ return MLX5_ETH_WQE_FT_META_IPSEC << 23 | id;
+}
+static inline void
+mlx5e_accel_ipsec_handle_tx(struct mbuf *mb, struct mlx5e_tx_wqe *wqe)
+{
+ struct ipsec_accel_out_tag *tag;
+
+ tag = (struct ipsec_accel_out_tag *)m_tag_find(mb,
+ PACKET_TAG_IPSEC_ACCEL_OUT, NULL);
+ if (tag != NULL)
+ mlx5e_accel_ipsec_handle_tx_wqe(mb, wqe, tag);
+}
+void mlx5e_accel_ipsec_fs_rx_tables_destroy(struct mlx5e_priv *priv);
+int mlx5e_accel_ipsec_fs_rx_tables_create(struct mlx5e_priv *priv);
+void mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(struct mlx5e_priv *priv);
+int mlx5e_accel_ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv);
+int mlx5_accel_ipsec_rx_tag_add(if_t ifp, struct mlx5e_rq_mbuf *mr);
+void mlx5e_accel_ipsec_handle_rx_cqe(if_t ifp, struct mbuf *mb,
+ struct mlx5_cqe64 *cqe, struct mlx5e_rq_mbuf *mr);
+
+static inline int mlx5e_accel_ipsec_flow(struct mlx5_cqe64 *cqe)
+{
+ return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata));
+}
+
+static inline void
+mlx5e_accel_ipsec_handle_rx(if_t ifp, struct mbuf *mb, struct mlx5_cqe64 *cqe,
+ struct mlx5e_rq_mbuf *mr)
+{
+ u32 ipsec_meta_data = be32_to_cpu(cqe->ft_metadata);
+
+ if (MLX5_IPSEC_METADATA_MARKER(ipsec_meta_data))
+ mlx5e_accel_ipsec_handle_rx_cqe(ifp, mb, cqe, mr);
+}
#endif /* __MLX5_ACCEL_IPSEC_H__ */
diff --git a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec.c b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec.c
new file mode 100644
index 000000000000..3f3c575c9dad
--- /dev/null
+++ b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec.c
@@ -0,0 +1,821 @@
+/*-
+ * Copyright (c) 2023 NVIDIA corporation & affiliates.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "opt_ipsec.h"
+
+#include <sys/types.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/pfkeyv2.h>
+#include <netipsec/key.h>
+#include <netipsec/key_var.h>
+#include <netipsec/keydb.h>
+#include <netipsec/ipsec.h>
+#include <netipsec/xform.h>
+#include <netipsec/ipsec_offload.h>
+#include <dev/mlx5/fs.h>
+#include <dev/mlx5/mlx5_en/en.h>
+#include <dev/mlx5/mlx5_accel/ipsec.h>
+
+#define MLX5_IPSEC_RESCHED msecs_to_jiffies(1000)
+
+static void mlx5e_if_sa_deinstall_onekey(struct ifnet *ifp, u_int dev_spi,
+ void *priv);
+static int mlx5e_if_sa_deinstall(struct ifnet *ifp, u_int dev_spi, void *priv);
+
+static struct mlx5e_ipsec_sa_entry *to_ipsec_sa_entry(void *x)
+{
+ return (struct mlx5e_ipsec_sa_entry *)x;
+}
+
+static struct mlx5e_ipsec_pol_entry *to_ipsec_pol_entry(void *x)
+{
+ return (struct mlx5e_ipsec_pol_entry *)x;
+}
+
+static void
+mlx5e_ipsec_handle_counters_onedir(struct mlx5e_ipsec_sa_entry *sa_entry,
+ u64 *packets, u64 *bytes)
+{
+ struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+
+ mlx5_fc_query(mdev, ipsec_rule->fc, packets, bytes);
+}
+
+static struct mlx5e_ipsec_sa_entry *
+mlx5e_ipsec_other_sa_entry(struct mlx5e_ipsec_priv_bothdir *pb,
+ struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ return (pb->priv_in == sa_entry ? pb->priv_out : pb->priv_in);
+}
+
+static void
+mlx5e_ipsec_handle_counters(struct work_struct *_work)
+{
+ struct mlx5e_ipsec_dwork *dwork =
+ container_of(_work, struct mlx5e_ipsec_dwork, dwork.work);
+ struct mlx5e_ipsec_sa_entry *sa_entry = dwork->sa_entry;
+ struct mlx5e_ipsec_sa_entry *other_sa_entry;
+ u64 bytes, bytes1, packets1, packets;
+
+ if (sa_entry->attrs.drop)
+ return;
+ other_sa_entry = mlx5e_ipsec_other_sa_entry(dwork->pb, sa_entry);
+ if (other_sa_entry == NULL || other_sa_entry->attrs.drop)
+ return;
+
+ mlx5e_ipsec_handle_counters_onedir(sa_entry, &packets, &bytes);
+ mlx5e_ipsec_handle_counters_onedir(other_sa_entry, &packets1, &bytes1);
+ packets += packets1;
+ bytes += bytes1;
+
+#ifdef IPSEC_OFFLOAD
+ ipsec_accel_drv_sa_lifetime_update(
+ sa_entry->savp, sa_entry->ifpo, sa_entry->kspi, bytes, packets);
+#endif
+
+ queue_delayed_work(sa_entry->ipsec->wq, &dwork->dwork,
+ MLX5_IPSEC_RESCHED);
+}
+
+static int
+mlx5e_ipsec_create_dwork(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_priv_bothdir *pb)
+{
+ struct mlx5e_ipsec_dwork *dwork;
+
+ dwork = kzalloc(sizeof(*dwork), GFP_KERNEL);
+ if (!dwork)
+ return (ENOMEM);
+
+ dwork->sa_entry = sa_entry;
+ dwork->pb = pb;
+ INIT_DELAYED_WORK(&dwork->dwork, mlx5e_ipsec_handle_counters);
+ sa_entry->dwork = dwork;
+ return 0;
+}
+
+static int mlx5_xform_ah_authsize(const struct auth_hash *esph)
+{
+ int alen;
+
+ if (esph == NULL)
+ return 0;
+
+ switch (esph->type) {
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ alen = esph->hashsize / 2; /* RFC4868 2.3 */
+ break;
+
+ case CRYPTO_POLY1305:
+ case CRYPTO_AES_NIST_GMAC:
+ alen = esph->hashsize;
+ break;
+
+ default:
+ alen = AH_HMAC_HASHLEN;
+ break;
+ }
+
+ return alen;
+}
+
+void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_accel_esp_xfrm_attrs *attrs,
+ u8 dir)
+{
+ struct secasvar *savp = sa_entry->savp;
+ const struct auth_hash *esph = savp->tdb_authalgxform;
+ struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
+ struct secasindex *saidx = &savp->sah->saidx;
+ struct seckey *key_encap = savp->key_enc;
+ int key_len;
+
+ memset(attrs, 0, sizeof(*attrs));
+
+ /* subtract off the salt, RFC4106, 8.1 and RFC3686, 5.1 */
+ key_len = _KEYLEN(key_encap) - SAV_ISCTRORGCM(savp) * 4 - SAV_ISCHACHA(savp) * 4;
+
+ memcpy(aes_gcm->aes_key, key_encap->key_data, key_len);
+ aes_gcm->key_len = key_len;
+
+ /* salt and seq_iv */
+ aes_gcm->seq_iv = 0;
+ memcpy(&aes_gcm->salt, key_encap->key_data + key_len,
+ sizeof(aes_gcm->salt));
+
+ switch (savp->alg_enc) {
+ case SADB_X_EALG_AESGCM8:
+ attrs->authsize = 8 / 4; /* in dwords */
+ break;
+ case SADB_X_EALG_AESGCM12:
+ attrs->authsize = 12 / 4; /* in dwords */
+ break;
+ case SADB_X_EALG_AESGCM16:
+ attrs->authsize = 16 / 4; /* in dwords */
+ break;
+ default: break;
+ }
+
+ /* iv len */
+ aes_gcm->icv_len = mlx5_xform_ah_authsize(esph); //TBD: check if value make sense
+
+ attrs->dir = dir;
+ /* spi - host order */
+ attrs->spi = ntohl(savp->spi);
+ attrs->family = saidx->dst.sa.sa_family;
+ attrs->reqid = saidx->reqid;
+
+ if (saidx->src.sa.sa_family == AF_INET) {
+ attrs->saddr.a4 = saidx->src.sin.sin_addr.s_addr;
+ attrs->daddr.a4 = saidx->dst.sin.sin_addr.s_addr;
+ } else {
+ memcpy(&attrs->saddr.a6, &saidx->src.sin6.sin6_addr, 16);
+ memcpy(&attrs->daddr.a6, &saidx->dst.sin6.sin6_addr, 16);
+ }
+
+ if (savp->natt) {
+ attrs->encap = true;
+ attrs->sport = savp->natt->sport;
+ attrs->dport = savp->natt->dport;
+ }
+
+ if (savp->flags & SADB_X_SAFLAGS_ESN) {
+ /* We support replay window with ESN only */
+ attrs->replay_esn.trigger = true;
+ if (sa_entry->esn_state.esn_msb)
+ attrs->replay_esn.esn = sa_entry->esn_state.esn;
+ else
+ /* According to RFC4303, section "3.3.3. Sequence Number Generation",
+ * the first packet sent using a given SA will contain a sequence
+ * number of 1.
+ */
+ attrs->replay_esn.esn = max_t(u32, sa_entry->esn_state.esn, 1);
+ attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
+ attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
+
+ if (savp->replay) {
+ switch (savp->replay->wsize) {
+ case 4:
+ attrs->replay_esn.replay_window = MLX5_IPSEC_ASO_REPLAY_WIN_32BIT;
+ break;
+ case 8:
+ attrs->replay_esn.replay_window = MLX5_IPSEC_ASO_REPLAY_WIN_64BIT;
+ break;
+ case 16:
+ attrs->replay_esn.replay_window = MLX5_IPSEC_ASO_REPLAY_WIN_128BIT;
+ break;
+ case 32:
+ attrs->replay_esn.replay_window = MLX5_IPSEC_ASO_REPLAY_WIN_256BIT;
+ break;
+ default:
+ /* Do nothing */
+ break;
+ }
+ }
+ }
+}
+
+static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
+ struct secasvar *savp)
+{
+ struct secasindex *saidx = &savp->sah->saidx;
+ struct seckey *key_encp = savp->key_enc;
+ int keylen;
+
+ if (!(mlx5_ipsec_device_caps(mdev) &
+ MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
+ mlx5_core_err(mdev, "FULL offload is not supported\n");
+ return (EOPNOTSUPP);
+ }
+ if (savp->state == SADB_SASTATE_DEAD)
+ return (EOPNOTSUPP);
+ if (savp->alg_enc == SADB_EALG_NONE) {
+ mlx5_core_err(mdev, "Cannot offload authenticated xfrm states\n");
+ return (EOPNOTSUPP);
+ }
+ if (savp->alg_enc != SADB_X_EALG_AESGCM16) {
+ mlx5_core_err(mdev, "Only IPSec aes-gcm-16 encryption protocol may be offloaded\n");
+ return (EOPNOTSUPP);
+ }
+ if (savp->tdb_compalgxform) {
+ mlx5_core_err(mdev, "Cannot offload compressed xfrm states\n");
+ return (EOPNOTSUPP);
+ }
+ if (savp->alg_auth != SADB_X_AALG_AES128GMAC && savp->alg_auth != SADB_X_AALG_AES256GMAC) {
+ mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bits\n");
+ return (EOPNOTSUPP);
+ }
+ if ((saidx->dst.sa.sa_family != AF_INET && saidx->dst.sa.sa_family != AF_INET6) ||
+ (saidx->src.sa.sa_family != AF_INET && saidx->src.sa.sa_family != AF_INET6)) {
+ mlx5_core_err(mdev, "Only IPv4/6 xfrm states may be offloaded\n");
+ return (EOPNOTSUPP);
+ }
+ if (saidx->proto != IPPROTO_ESP) {
+ mlx5_core_err(mdev, "Only ESP xfrm state may be offloaded\n");
+ return (EOPNOTSUPP);
+ }
+ /* subtract off the salt, RFC4106, 8.1 and RFC3686, 5.1 */
+ keylen = _KEYLEN(key_encp) - SAV_ISCTRORGCM(savp) * 4 - SAV_ISCHACHA(savp) * 4;
+ if (keylen != 128/8 && keylen != 256 / 8) {
+ mlx5_core_err(mdev, "Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
+ return (EOPNOTSUPP);
+ }
+
+ if (saidx->mode != IPSEC_MODE_TRANSPORT) {
+ mlx5_core_err(mdev, "Only transport xfrm states may be offloaded in full offload mode\n");
+ return (EOPNOTSUPP);
+ }
+
+ if (savp->natt) {
+ if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESPINUDP)) {
+ mlx5_core_err(mdev, "Encapsulation is not supported\n");
+ return (EOPNOTSUPP);
+ }
+ }
+
+ if (savp->replay && savp->replay->wsize != 0 && savp->replay->wsize != 4 &&
+ savp->replay->wsize != 8 && savp->replay->wsize != 16 && savp->replay->wsize != 32) {
+ mlx5_core_err(mdev, "Unsupported replay window size %d\n", savp->replay->wsize);
+ return (EOPNOTSUPP);
+ }
+
+ if ((savp->flags & SADB_X_SAFLAGS_ESN) != 0) {
+ if ((mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_ESN) == 0) {
+ mlx5_core_err(mdev, "ESN is not supported\n");
+ return (EOPNOTSUPP);
+ }
+ } else if (savp->replay != NULL && savp->replay->wsize != 0) {
+ mlx5_core_warn(mdev,
+ "non-ESN but replay-protect SA offload is not supported\n");
+ return (EOPNOTSUPP);
+ }
+ return 0;
+}
+
+static int
+mlx5e_if_sa_newkey_onedir(struct ifnet *ifp, void *sav, int dir, u_int drv_spi,
+ struct mlx5e_ipsec_sa_entry **privp, struct mlx5e_ipsec_priv_bothdir *pb,
+ struct ifnet *ifpo)
+{
+#ifdef IPSEC_OFFLOAD
+ struct rm_priotracker tracker;
+#endif
+ struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
+ struct mlx5e_priv *priv = if_getsoftc(ifp);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+ u16 vid = VLAN_NONE;
+ int err;
+
+ if (priv->gone != 0 || ipsec == NULL)
+ return (EOPNOTSUPP);
+
+ if (if_gettype(ifpo) == IFT_L2VLAN)
+ VLAN_TAG(ifpo, &vid);
+
+#ifdef IPSEC_OFFLOAD
+ ipsec_sahtree_rlock(&tracker);
+#endif
+ err = mlx5e_xfrm_validate_state(mdev, sav);
+#ifdef IPSEC_OFFLOAD
+ ipsec_sahtree_runlock(&tracker);
+#endif
+ if (err)
+ return err;
+
+ sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
+ if (sa_entry == NULL)
+ return (ENOMEM);
+
+ sa_entry->kspi = drv_spi;
+ sa_entry->savp = sav;
+ sa_entry->ifp = ifp;
+ sa_entry->ifpo = ifpo;
+ sa_entry->ipsec = ipsec;
+ sa_entry->vid = vid;
+
+#ifdef IPSEC_OFFLOAD
+ ipsec_sahtree_rlock(&tracker);
+#endif
+ err = mlx5e_xfrm_validate_state(mdev, sav);
+ if (err != 0) {
+#ifdef IPSEC_OFFLOAD
+ ipsec_sahtree_runlock(&tracker);
+#endif
+ goto err_xfrm;
+ }
+ mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &sa_entry->attrs, dir);
+#ifdef IPSEC_OFFLOAD
+ ipsec_sahtree_runlock(&tracker);
+#endif
+
+ err = mlx5e_ipsec_create_dwork(sa_entry, pb);
+ if (err)
+ goto err_xfrm;
+
+ /* create hw context */
+ err = mlx5_ipsec_create_sa_ctx(sa_entry);
+ if (err)
+ goto err_sa_ctx;
+
+ err = mlx5e_accel_ipsec_fs_add_rule(sa_entry);
+ if (err)
+ goto err_fs;
+
+ *privp = sa_entry;
+ if (sa_entry->dwork)
+ queue_delayed_work(ipsec->wq, &sa_entry->dwork->dwork, MLX5_IPSEC_RESCHED);
+
+ err = xa_insert(&mdev->ipsec_sadb, sa_entry->ipsec_obj_id, sa_entry, GFP_KERNEL);
+ if (err)
+ goto err_xa;
+
+ return 0;
+
+err_xa:
+ if (sa_entry->dwork)
+ cancel_delayed_work_sync(&sa_entry->dwork->dwork);
+ mlx5e_accel_ipsec_fs_del_rule(sa_entry);
+err_fs:
+ mlx5_ipsec_free_sa_ctx(sa_entry);
+err_sa_ctx:
+ kfree(sa_entry->dwork);
+ sa_entry->dwork = NULL;
+err_xfrm:
+ kfree(sa_entry);
+ mlx5_en_err(ifp, "Device failed to offload this state");
+ return err;
+}
+
+#define GET_TRUNK_IF(vifp, ifp, ept) \
+ if (if_gettype(vifp) == IFT_L2VLAN) { \
+ NET_EPOCH_ENTER(ept); \
+ ifp = VLAN_TRUNKDEV(vifp); \
+ NET_EPOCH_EXIT(ept); \
+ } else { \
+ ifp = vifp; \
+ }
+
+static int
+mlx5e_if_sa_newkey(struct ifnet *ifpo, void *sav, u_int dev_spi, void **privp)
+{
+ struct mlx5e_ipsec_priv_bothdir *pb;
+ struct epoch_tracker et;
+ struct ifnet *ifp;
+ int error;
+
+ GET_TRUNK_IF(ifpo, ifp, et);
+
+ pb = malloc(sizeof(struct mlx5e_ipsec_priv_bothdir), M_DEVBUF,
+ M_WAITOK | M_ZERO);
+ error = mlx5e_if_sa_newkey_onedir(
+ ifp, sav, IPSEC_DIR_INBOUND, dev_spi, &pb->priv_in, pb, ifpo);
+ if (error != 0) {
+ free(pb, M_DEVBUF);
+ return (error);
+ }
+ error = mlx5e_if_sa_newkey_onedir(
+ ifp, sav, IPSEC_DIR_OUTBOUND, dev_spi, &pb->priv_out, pb, ifpo);
+ if (error == 0) {
+ *privp = pb;
+ } else {
+ if (pb->priv_in->dwork != NULL)
+ cancel_delayed_work_sync(&pb->priv_in->dwork->dwork);
+ mlx5e_if_sa_deinstall_onekey(ifp, dev_spi, pb->priv_in);
+ free(pb, M_DEVBUF);
+ }
+ return (error);
+}
+
+static void
+mlx5e_if_sa_deinstall_onekey(struct ifnet *ifp, u_int dev_spi, void *priv)
+{
+ struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(priv);
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct mlx5e_ipsec_sa_entry *old;
+
+ old = xa_erase(&mdev->ipsec_sadb, sa_entry->ipsec_obj_id);
+ WARN_ON(old != sa_entry);
+
+ mlx5e_accel_ipsec_fs_del_rule(sa_entry);
+ mlx5_ipsec_free_sa_ctx(sa_entry);
+ kfree(sa_entry->dwork);
+ kfree(sa_entry);
+}
+
+static int
+mlx5e_if_sa_deinstall(struct ifnet *ifpo, u_int dev_spi, void *priv)
+{
+ struct mlx5e_ipsec_priv_bothdir pb, *pbp;
+ struct epoch_tracker et;
+ struct ifnet *ifp;
+
+ GET_TRUNK_IF(ifpo, ifp, et);
+
+ pbp = priv;
+ pb = *(struct mlx5e_ipsec_priv_bothdir *)priv;
+ pbp->priv_in = pbp->priv_out = NULL;
+
+ if (pb.priv_in->dwork != NULL)
+ cancel_delayed_work_sync(&pb.priv_in->dwork->dwork);
+ if (pb.priv_out->dwork != NULL)
+ cancel_delayed_work_sync(&pb.priv_out->dwork->dwork);
+
+ mlx5e_if_sa_deinstall_onekey(ifp, dev_spi, pb.priv_in);
+ mlx5e_if_sa_deinstall_onekey(ifp, dev_spi, pb.priv_out);
+ free(pbp, M_DEVBUF);
+ return (0);
+}
+
+static void
+mlx5e_if_sa_cnt_one(struct ifnet *ifp, void *sa, uint32_t drv_spi,
+ void *priv, u64 *bytes, u64 *packets)
+{
+ struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(priv);
+ struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+
+ mlx5_fc_query(mdev, ipsec_rule->fc, packets, bytes);
+}
+
+static int
+mlx5e_if_sa_cnt(struct ifnet *ifpo, void *sa, uint32_t drv_spi, void *priv,
+ struct seclifetime *lt)
+{
+ struct mlx5e_ipsec_priv_bothdir *pb;
+ u64 packets_in, packets_out;
+ u64 bytes_in, bytes_out;
+ struct epoch_tracker et;
+ struct ifnet *ifp;
+
+ GET_TRUNK_IF(ifpo, ifp, et);
+
+ pb = priv;
+ mlx5e_if_sa_cnt_one(ifp, sa, drv_spi, pb->priv_in,
+ &bytes_in, &packets_in);
+ mlx5e_if_sa_cnt_one(ifp, sa, drv_spi, pb->priv_out,
+ &bytes_out, &packets_out);
+ /* TODO: remove this casting once Kostia changes allocation type to be u64 */
+ lt->bytes = bytes_in + bytes_out;
+ lt->allocations = (uint32_t)(packets_in + packets_out);
+ return (0);
+}
+
+static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
+ struct secpolicy *sp, struct inpcb *inp)
+{
+ struct secpolicyindex *spidx = &sp->spidx;
+
+ if (!(mlx5_ipsec_device_caps(mdev) &
+ MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
+ mlx5_core_err(mdev, "FULL offload is not supported\n");
+ return (EINVAL);
+ }
+
+ if (sp->tcount > 1) {
+ mlx5_core_err(mdev, "Can offload exactly one template, "
+ "not %d\n", sp->tcount);
+ return (EINVAL);
+ }
+
+ if (sp->policy == IPSEC_POLICY_BYPASS &&
+ !(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO)) {
+ mlx5_core_err(mdev, "Device does not support policy priority\n");
+ return (EINVAL);
+ }
+
+ if (sp->tcount > 0 && inp != NULL) {
+ mlx5_core_err(mdev, "Not valid input data\n");
+ return (EINVAL);
+ }
+
+ if (spidx->dir != IPSEC_DIR_INBOUND && spidx->dir != IPSEC_DIR_OUTBOUND) {
+ mlx5_core_err(mdev, "Wrong policy direction\n");
+ return (EINVAL);
+ }
+
+ if (sp->tcount > 0 && sp->req[0]->saidx.mode != IPSEC_MODE_TRANSPORT) {
+ mlx5_core_err(mdev, "Device supports transport mode only");
+ return (EINVAL);
+ }
+
+ if (sp->policy != IPSEC_POLICY_DISCARD &&
+ sp->policy != IPSEC_POLICY_IPSEC && sp->policy != IPSEC_POLICY_BYPASS) {
+ mlx5_core_err(mdev, "Offloaded policy must be specific on its action\n");
+ return (EINVAL);
+ }
+
+ if (sp->policy == IPSEC_POLICY_BYPASS && !inp) {
+ mlx5_core_err(mdev, "Missing port information for IKE bypass\n");
+ return (EINVAL);
+ }
+
+ if (inp != NULL) {
+ INP_RLOCK(inp);
+ if (inp->inp_socket == NULL || inp->inp_socket->so_proto->
+ pr_protocol != IPPROTO_UDP) {
+ mlx5_core_err(mdev, "Unsupported IKE bypass protocol %d\n",
+ inp->inp_socket == NULL ? -1 :
+ inp->inp_socket->so_proto->pr_protocol);
+ INP_RUNLOCK(inp);
+ return (EINVAL);
+ }
+ INP_RUNLOCK(inp);
+ }
+
+ /* TODO fill relevant bits */
+ return 0;
+}
+
+static void
+mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
+ struct mlx5_accel_pol_xfrm_attrs *attrs, struct inpcb *inp, u16 vid)
+{
+ struct secpolicy *sp = pol_entry->sp;
+ struct secpolicyindex *spidx = &sp->spidx;
+
+ memset(attrs, 0, sizeof(*attrs));
+
+ if (!inp) {
+ if (spidx->src.sa.sa_family == AF_INET) {
+ attrs->saddr.a4 = spidx->src.sin.sin_addr.s_addr;
+ attrs->daddr.a4 = spidx->dst.sin.sin_addr.s_addr;
+ } else if (spidx->src.sa.sa_family == AF_INET6) {
+ memcpy(&attrs->saddr.a6, &spidx->src.sin6.sin6_addr, 16);
+ memcpy(&attrs->daddr.a6, &spidx->dst.sin6.sin6_addr, 16);
+ } else {
+ KASSERT(0, ("unsupported family %d", spidx->src.sa.sa_family));
+ }
+ attrs->family = spidx->src.sa.sa_family;
+ attrs->prio = 0;
+ attrs->action = sp->policy;
+ attrs->reqid = sp->req[0]->saidx.reqid;
+ } else {
+ INP_RLOCK(inp);
+ if ((inp->inp_vflag & INP_IPV4) != 0) {
+ attrs->saddr.a4 = inp->inp_laddr.s_addr;
+ attrs->daddr.a4 = inp->inp_faddr.s_addr;
+ attrs->family = AF_INET;
+ } else if ((inp->inp_vflag & INP_IPV6) != 0) {
+ memcpy(&attrs->saddr.a6, &inp->in6p_laddr, 16);
+ memcpy(&attrs->daddr.a6, &inp->in6p_laddr, 16);
+ attrs->family = AF_INET6;
+ } else {
+ KASSERT(0, ("unsupported family %d", inp->inp_vflag));
+ }
+ attrs->upspec.dport = inp->inp_fport;
+ attrs->upspec.sport = inp->inp_lport;
+ attrs->upspec.proto = inp->inp_ip_p;
+ INP_RUNLOCK(inp);
+
+ /* Give highest priority for PCB policies */
+ attrs->prio = 1;
+ attrs->action = IPSEC_POLICY_IPSEC;
+ }
+ attrs->dir = spidx->dir;
+ attrs->vid = vid;
+}
+
+static int
+mlx5e_if_spd_install(struct ifnet *ifpo, void *sp, void *inp1, void **ifdatap)
+{
+ struct mlx5e_ipsec_pol_entry *pol_entry;
+ struct mlx5e_priv *priv;
+ struct epoch_tracker et;
+ u16 vid = VLAN_NONE;
+ struct ifnet *ifp;
+ int err;
+
+ GET_TRUNK_IF(ifpo, ifp, et);
+ if (if_gettype(ifpo) == IFT_L2VLAN)
+ VLAN_TAG(ifpo, &vid);
+ priv = if_getsoftc(ifp);
+ if (priv->gone || !priv->ipsec)
+ return (EOPNOTSUPP);
+
+ err = mlx5e_xfrm_validate_policy(priv->mdev, sp, inp1);
+ if (err)
+ return err;
+
+ pol_entry = kzalloc(sizeof(*pol_entry), GFP_KERNEL);
+ if (!pol_entry)
+ return (ENOMEM);
+
+ pol_entry->sp = sp;
+ pol_entry->ipsec = priv->ipsec;
+
+ mlx5e_ipsec_build_accel_pol_attrs(pol_entry, &pol_entry->attrs,
+ inp1, vid);
+ err = mlx5e_accel_ipsec_fs_add_pol(pol_entry);
+ if (err)
+ goto err_pol;
+ *ifdatap = pol_entry;
+
+ return 0;
+
+err_pol:
+ kfree(pol_entry);
+ mlx5_en_err(ifp, "Device failed to offload this policy");
+ return err;
+}
+
+static int
+mlx5e_if_spd_deinstall(struct ifnet *ifpo, void *sp, void *ifdata)
+{
+ struct mlx5e_ipsec_pol_entry *pol_entry;
+
+ pol_entry = to_ipsec_pol_entry(ifdata);
+ mlx5e_accel_ipsec_fs_del_pol(pol_entry);
+ kfree(pol_entry);
+ return 0;
+}
+
+void mlx5e_ipsec_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ipsec *pipsec = priv->ipsec;
+ if (!pipsec)
+ return;
+
+ mlx5e_accel_ipsec_fs_cleanup(pipsec);
+ destroy_workqueue(pipsec->wq);
+ mlx5e_ipsec_aso_cleanup(pipsec);
+ kfree(pipsec);
+ priv->ipsec = NULL;
+}
+
+static int
+mlx5e_if_ipsec_hwassist(if_t ifneto, void *sav __unused,
+ uint32_t drv_spi __unused, void *priv __unused)
+{
+ if_t ifnet;
+
+ if (if_gettype(ifneto) == IFT_L2VLAN) {
+ ifnet = VLAN_TRUNKDEV(ifneto);
+ } else {
+ ifnet = ifneto;
+ }
+
+ return (if_gethwassist(ifnet) & (CSUM_TSO | CSUM_TCP | CSUM_UDP |
+ CSUM_IP | CSUM_IP6_TSO | CSUM_IP6_TCP | CSUM_IP6_UDP));
+}
+
+static const struct if_ipsec_accel_methods mlx5e_ipsec_funcs = {
+ .if_sa_newkey = mlx5e_if_sa_newkey,
+ .if_sa_deinstall = mlx5e_if_sa_deinstall,
+ .if_spdadd = mlx5e_if_spd_install,
+ .if_spddel = mlx5e_if_spd_deinstall,
+ .if_sa_cnt = mlx5e_if_sa_cnt,
+ .if_hwassist = mlx5e_if_ipsec_hwassist,
+};
+
+int mlx5e_ipsec_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_ipsec *pipsec;
+ if_t ifp = priv->ifp;
+ int ret;
+
+ mlx5_core_info(mdev, "ipsec "
+ "offload %d log_max_dek %d gen_obj_types %d "
+ "ipsec_encrypt %d ipsec_decrypt %d "
+ "esp_aes_gcm_128_encrypt %d esp_aes_gcm_128_decrypt %d "
+ "ipsec_full_offload %d "
+ "reformat_add_esp_trasport %d reformat_del_esp_trasport %d "
+ "decap %d "
+ "ignore_flow_level_tx %d ignore_flow_level_rx %d "
+ "reformat_natt_tx %d reformat_natt_rx %d "
+ "ipsec_esn %d\n",
+ MLX5_CAP_GEN(mdev, ipsec_offload) != 0,
+ MLX5_CAP_GEN(mdev, log_max_dek) != 0,
+ (MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC) != 0,
+ MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) != 0,
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt) != 0,
+ MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) != 0,
+ MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt) != 0,
+ MLX5_CAP_IPSEC(mdev, ipsec_full_offload) != 0,
+ MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) != 0,
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_trasport) != 0,
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap) != 0,
+ MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) != 0,
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level) != 0,
+ MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
+ reformat_add_esp_transport_over_udp) != 0,
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ reformat_del_esp_transport_over_udp) != 0,
+ MLX5_CAP_IPSEC(mdev, ipsec_esn) != 0);
+
+ if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)) {
+ mlx5_core_dbg(mdev, "Not an IPSec offload device\n");
+ return 0;
+ }
+
+ xa_init_flags(&mdev->ipsec_sadb, XA_FLAGS_ALLOC);
+
+ pipsec = kzalloc(sizeof(*pipsec), GFP_KERNEL);
+ if (pipsec == NULL)
+ return (ENOMEM);
+
+ pipsec->mdev = mdev;
+ pipsec->pdn = priv->pdn;
+ pipsec->mkey = priv->mr.key;
+
+ ret = mlx5e_ipsec_aso_init(pipsec);
+ if (ret)
+ goto err_ipsec_aso;
+
+ pipsec->wq = alloc_workqueue("mlx5e_ipsec", WQ_UNBOUND, 0);
+ if (pipsec->wq == NULL) {
+ ret = ENOMEM;
+ goto err_ipsec_wq;
+ }
+
+ ret = mlx5e_accel_ipsec_fs_init(pipsec);
+ if (ret)
+ goto err_ipsec_alloc;
+
+ if_setipsec_accel_methods(ifp, &mlx5e_ipsec_funcs);
+ priv->ipsec = pipsec;
+ mlx5_core_dbg(mdev, "IPSec attached to netdevice\n");
+ return 0;
+
+err_ipsec_alloc:
+ destroy_workqueue(pipsec->wq);
+err_ipsec_wq:
+ mlx5e_ipsec_aso_cleanup(pipsec);
+err_ipsec_aso:
+ kfree(pipsec);
+ mlx5_core_err(priv->mdev, "IPSec initialization failed, %d\n", ret);
+ return ret;
+}
diff --git a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c
new file mode 100644
index 000000000000..d1f454a5ec41
--- /dev/null
+++ b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_fs.c
@@ -0,0 +1,2289 @@
+/*-
+ * Copyright (c) 2023 NVIDIA corporation & affiliates.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "opt_ipsec.h"
+
+#include <sys/types.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <net/pfkeyv2.h>
+#include <netipsec/key_var.h>
+#include <netipsec/keydb.h>
+#include <netipsec/ipsec.h>
+#include <netipsec/xform.h>
+#include <netipsec/ipsec_offload.h>
+#include <dev/mlx5/fs.h>
+#include <dev/mlx5/mlx5_en/en.h>
+#include <dev/mlx5/qp.h>
+#include <dev/mlx5/mlx5_accel/ipsec.h>
+#include <dev/mlx5/mlx5_core/fs_core.h>
+#include <dev/mlx5/mlx5_core/fs_chains.h>
+
+/*
+ * TX tables are organized differently for Ethernet and for RoCE:
+ *
+ * +=========+
+ * Ethernet Tx | SA KSPI | match
+ * --------------------->|Flowtable|----->+ +
+ * | |\ | / \
+ * +=========+ | | / \ +=========+ +=========+
+ * miss | | / \ | Status | | |
+ * DROP<--------+ |---->|Encrypt|------>|Flowtable|---->| TX NS |
+ * | \ / | | | |
+ * | \ / +=========+ +=========+
+ * +=========+ +=========+ | \ / |
+ * RoCE | Policy | match|SA ReqId |match| + |
+ * Tx |Flowtable|----->|Flowtable|---->+ |
+ * ---->|IP header| |ReqId+IP | |
+ * | | | header |--------------------------------+
+ * +=========+ +=========+ miss |
+ * | |
+ * | miss |
+ * +-------------------------------------------------------
+ *
+ * +=========+
+ * | RDMA |
+ * |Flowtable|
+ * | |
+ * Rx Tables and rules: +=========+
+ * + /
+ * +=========+ +=========+ / \ +=========+ +=========+ /match
+ * | Policy | | SA | / \ | Status | | RoCE |/
+ * ---->|Flowtable| match|Flowtable| match / \ |Flowtable|----->|Flowtable|
+ * |IP header|----->|IP header|----->|Decrypt|----->| | | Roce V2 |
+ * | | |+ESP+SPI | \ / | | | UDP port|\
+ * +=========+ +=========+ \ / +=========+ +=========+ \miss
+ * | | \ / \
+ * | | + +=========+
+ * | miss | miss | Ethernet|
+ * +--------------->---------------------------------------------------->| RX NS |
+ * | |
+ * +=========+
+ *
+ */
+
+#define NUM_IPSEC_FTE BIT(15)
+#define IPSEC_TUNNEL_DEFAULT_TTL 0x40
+
+struct mlx5e_ipsec_fc {
+ struct mlx5_fc *cnt;
+ struct mlx5_fc *drop;
+};
+
+struct mlx5e_ipsec_ft {
+ struct mutex mutex; /* Protect changes to this struct */
+ struct mlx5_flow_table *pol;
+ struct mlx5_flow_table *sa_kspi;
+ struct mlx5_flow_table *sa;
+ struct mlx5_flow_table *status;
+ u32 refcnt;
+};
+
+struct mlx5e_ipsec_tx_roce {
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_namespace *ns;
+};
+
+struct mlx5e_ipsec_miss {
+ struct mlx5_flow_group *group;
+ struct mlx5_flow_handle *rule;
+};
+
+struct mlx5e_ipsec_tx {
+ struct mlx5e_ipsec_ft ft;
+ struct mlx5e_ipsec_miss pol;
+ struct mlx5e_ipsec_miss kspi_miss;
+ struct mlx5e_ipsec_rule status;
+ struct mlx5e_ipsec_rule kspi_bypass_rule; /*rule for IPSEC bypass*/
+ struct mlx5_flow_namespace *ns;
+ struct mlx5e_ipsec_fc *fc;
+ struct mlx5_fs_chains *chains;
+ struct mlx5e_ipsec_tx_roce roce;
+};
+
+struct mlx5e_ipsec_rx_roce {
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_handle *rule;
+ struct mlx5e_ipsec_miss roce_miss;
+
+ struct mlx5_flow_table *ft_rdma;
+ struct mlx5_flow_namespace *ns_rdma;
+};
+
+struct mlx5e_ipsec_rx_ip_type {
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_handle *ipv4_rule;
+ struct mlx5_flow_handle *ipv6_rule;
+ struct mlx5e_ipsec_miss miss;
+};
+
+struct mlx5e_ipsec_rx {
+ struct mlx5e_ipsec_ft ft;
+ struct mlx5e_ipsec_miss pol;
+ struct mlx5e_ipsec_miss sa;
+ struct mlx5e_ipsec_rule status;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5e_ipsec_fc *fc;
+ struct mlx5_fs_chains *chains;
+ struct mlx5e_ipsec_rx_roce roce;
+};
+
+static void setup_fte_reg_a_with_tag(struct mlx5_flow_spec *spec,
+ u16 kspi);
+static void setup_fte_reg_a_no_tag(struct mlx5_flow_spec *spec);
+
+static void setup_fte_no_frags(struct mlx5_flow_spec *spec)
+{
+ /* Non fragmented */
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.frag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.frag, 0);
+}
+
+static void setup_fte_esp(struct mlx5_flow_spec *spec)
+{
+ /* ESP header */
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_ESP);
+}
+
+static void setup_fte_spi(struct mlx5_flow_spec *spec, u32 spi, bool encap)
+{
+ /* SPI number */
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
+ if (encap) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.inner_esp_spi);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters.inner_esp_spi, spi);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, misc_parameters.outer_esp_spi);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters.outer_esp_spi, spi);
+ }
+}
+
+static void
+setup_fte_vid(struct mlx5_flow_spec *spec, u16 vid)
+{
+ /* virtual lan tag */
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.cvlan_tag, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
+ vid);
+}
+
+static void
+clear_fte_vid(struct mlx5_flow_spec *spec)
+{
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ outer_headers.cvlan_tag, 0);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.cvlan_tag, 0);
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ outer_headers.first_vid, 0);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.first_vid, 0);
+}
+
+static void
+setup_fte_no_vid(struct mlx5_flow_spec *spec)
+{
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value,
+ outer_headers.cvlan_tag, 0);
+}
+
+static struct mlx5_fs_chains *
+ipsec_chains_create(struct mlx5_core_dev *mdev, struct mlx5_flow_table *miss_ft,
+ enum mlx5_flow_namespace_type ns, int base_prio,
+ int base_level, struct mlx5_flow_table **root_ft)
+{
+ struct mlx5_chains_attr attr = {};
+ struct mlx5_fs_chains *chains;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
+ MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
+ attr.max_grp_num = 2;
+ attr.default_ft = miss_ft;
+ attr.ns = ns;
+ attr.fs_base_prio = base_prio;
+ attr.fs_base_level = base_level;
+ chains = mlx5_chains_create(mdev, &attr);
+ if (IS_ERR(chains))
+ return chains;
+
+ /* Create chain 0, prio 1, level 0 to connect chains to prev in fs_core */
+ ft = mlx5_chains_get_table(chains, 0, 1, 0);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_chains_get;
+ }
+
+ *root_ft = ft;
+ return chains;
+
+err_chains_get:
+ mlx5_chains_destroy(chains);
+ return ERR_PTR(err);
+}
+
+static void ipsec_chains_destroy(struct mlx5_fs_chains *chains)
+{
+ mlx5_chains_put_table(chains, 0, 1, 0);
+ mlx5_chains_destroy(chains);
+}
+
+static struct mlx5_flow_table *
+ipsec_chains_get_table(struct mlx5_fs_chains *chains, u32 prio)
+{
+ return mlx5_chains_get_table(chains, 0, prio + 1, 0);
+}
+
+static void ipsec_chains_put_table(struct mlx5_fs_chains *chains, u32 prio)
+{
+ mlx5_chains_put_table(chains, 0, prio + 1, 0);
+}
+
+static struct mlx5_flow_table *ipsec_rx_ft_create(struct mlx5_flow_namespace *ns,
+ int level, int prio,
+ int max_num_groups)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+
+ ft_attr.max_fte = NUM_IPSEC_FTE;
+ ft_attr.level = level;
+ ft_attr.prio = prio;
+ ft_attr.autogroup.max_num_groups = max_num_groups;
+ ft_attr.autogroup.num_reserved_entries = 1;
+
+ return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+}
+
+static int ipsec_miss_create(struct mlx5_core_dev *mdev,
+ struct mlx5_flow_table *ft,
+ struct mlx5e_ipsec_miss *miss,
+ struct mlx5_flow_destination *dest)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_spec *spec;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!flow_group_in || !spec) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* Create miss_group */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+ miss->group = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(miss->group)) {
+ err = PTR_ERR(miss->group);
+ mlx5_core_err(mdev, "fail to create IPsec miss_group err=%d\n",
+ err);
+ goto out;
+ }
+
+ if (dest)
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ else
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ /* Create miss rule */
+ miss->rule = mlx5_add_flow_rules(ft, NULL, &flow_act, dest, 1);
+ if (IS_ERR(miss->rule)) {
+ mlx5_destroy_flow_group(miss->group);
+ err = PTR_ERR(miss->rule);
+ mlx5_core_err(mdev, "fail to create IPsec miss_rule err=%d\n",
+ err);
+ goto out;
+ }
+out:
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return err;
+}
+
+static int setup_modify_header(struct mlx5_core_dev *mdev, u32 val, u8 dir,
+ struct mlx5_flow_act *flow_act)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ enum mlx5_flow_namespace_type ns_type;
+ struct mlx5_modify_hdr *modify_hdr;
+
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ switch (dir) {
+ case IPSEC_DIR_INBOUND:
+ MLX5_SET(set_action_in, action, field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+ break;
+ case IPSEC_DIR_OUTBOUND:
+ MLX5_SET(set_action_in, action, field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_0);
+ ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ MLX5_SET(set_action_in, action, data, val);
+ MLX5_SET(set_action_in, action, offset, 0);
+ MLX5_SET(set_action_in, action, length, 32);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, ns_type, 1, action);
+ if (IS_ERR(modify_hdr)) {
+ mlx5_core_err(mdev, "Failed to allocate modify_header %ld\n",
+ PTR_ERR(modify_hdr));
+ return PTR_ERR(modify_hdr);
+ }
+
+ flow_act->modify_hdr = modify_hdr;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ return 0;
+}
+
+static int
+setup_pkt_transport_reformat(struct mlx5_accel_esp_xfrm_attrs *attrs,
+ struct mlx5_pkt_reformat_params *reformat_params)
+{
+ struct udphdr *udphdr;
+ size_t bfflen = 16;
+ char *reformatbf;
+ __be32 spi;
+ void *hdr;
+
+ if (attrs->family == AF_INET) {
+ if (attrs->encap)
+ reformat_params->type = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4;
+ else
+ reformat_params->type = MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4;
+ } else {
+ if (attrs->encap)
+ reformat_params->type =
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6;
+ else
+ reformat_params->type =
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6;
+ }
+
+ if (attrs->encap)
+ bfflen += sizeof(*udphdr);
+ reformatbf = kzalloc(bfflen, GFP_KERNEL);
+ if (!reformatbf)
+ return -ENOMEM;
+
+ hdr = reformatbf;
+ if (attrs->encap) {
+ udphdr = (struct udphdr *)reformatbf;
+ udphdr->uh_sport = attrs->sport;
+ udphdr->uh_dport = attrs->dport;
+ hdr += sizeof(*udphdr);
+ }
+
+ /* convert to network format */
+ spi = htonl(attrs->spi);
+ memcpy(hdr, &spi, 4);
+
+ reformat_params->param_0 = attrs->authsize;
+ reformat_params->size = bfflen;
+ reformat_params->data = reformatbf;
+
+ return 0;
+}
+
+static int setup_pkt_reformat(struct mlx5_core_dev *mdev,
+ struct mlx5_accel_esp_xfrm_attrs *attrs,
+ struct mlx5_flow_act *flow_act)
+{
+ enum mlx5_flow_namespace_type ns_type = MLX5_FLOW_NAMESPACE_EGRESS;
+ struct mlx5_pkt_reformat_params reformat_params = {};
+ struct mlx5_pkt_reformat *pkt_reformat;
+ int ret;
+
+ if (attrs->dir == IPSEC_DIR_INBOUND) {
+ if (attrs->encap)
+ reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP;
+ else
+ reformat_params.type = MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT;
+ ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
+ goto cmd;
+ }
+
+ ret = setup_pkt_transport_reformat(attrs, &reformat_params);
+ if (ret)
+ return ret;
+cmd:
+ pkt_reformat =
+ mlx5_packet_reformat_alloc(mdev, &reformat_params, ns_type);
+ if (reformat_params.data)
+ kfree(reformat_params.data);
+ if (IS_ERR(pkt_reformat))
+ return PTR_ERR(pkt_reformat);
+
+ flow_act->pkt_reformat = pkt_reformat;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ return 0;
+}
+
+static void setup_fte_addr4(struct mlx5_flow_spec *spec, __be32 *saddr,
+ __be32 *daddr)
+{
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 4);
+
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4), saddr, 4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4), daddr, 4);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+}
+
+static void setup_fte_addr6(struct mlx5_flow_spec *spec, __be32 *saddr,
+ __be32 *daddr)
+{
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, 6);
+
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), saddr, 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), daddr, 16);
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6), 0xff, 16);
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6), 0xff, 16);
+}
+
+static void
+setup_fte_ip_version(struct mlx5_flow_spec *spec, u8 family)
+{
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version,
+ family == AF_INET ? 4 : 6);
+}
+
+static int rx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5e_ipsec_rx *rx;
+ struct mlx5_fc *counter;
+ int err;
+
+ rx = (attrs->family == AF_INET) ? ipsec->rx_ipv4 : ipsec->rx_ipv6;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ if (!attrs->drop) {
+ err = setup_modify_header(mdev, sa_entry->kspi | BIT(31), IPSEC_DIR_INBOUND,
+ &flow_act);
+ if (err)
+ goto err_mod_header;
+ }
+
+ err = setup_pkt_reformat(mdev, attrs, &flow_act);
+ if (err)
+ goto err_pkt_reformat;
+
+ counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_add_cnt;
+ }
+
+ flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
+ flow_act.crypto.op = MLX5_FLOW_ACT_CRYPTO_OP_DECRYPT;
+ flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ if (attrs->drop)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ else
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[0].ft = rx->ft.status;
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter_id = mlx5_fc_id(counter);
+
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+
+ if (!attrs->encap)
+ setup_fte_esp(spec);
+
+ setup_fte_spi(spec, attrs->spi, attrs->encap);
+ setup_fte_no_frags(spec);
+
+ if (sa_entry->vid != VLAN_NONE)
+ setup_fte_vid(spec, sa_entry->vid);
+ else
+ setup_fte_no_vid(spec);
+
+ rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "fail to add RX ipsec rule err=%d\n", err);
+ goto err_add_flow;
+ }
+ ipsec_rule->rule = rule;
+
+ /* Add another rule for zero vid */
+ if (sa_entry->vid == VLAN_NONE) {
+ clear_fte_vid(spec);
+ setup_fte_vid(spec, 0);
+ rule = mlx5_add_flow_rules(rx->ft.sa, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "fail to add RX ipsec zero vid rule err=%d\n",
+ err);
+ goto err_add_flow;
+ }
+ ipsec_rule->vid_zero_rule = rule;
+ }
+
+ kvfree(spec);
+ ipsec_rule->fc = counter;
+ ipsec_rule->modify_hdr = flow_act.modify_hdr;
+ ipsec_rule->pkt_reformat = flow_act.pkt_reformat;
+ return 0;
+
+err_add_flow:
+ mlx5_fc_destroy(mdev, counter);
+ if (ipsec_rule->rule != NULL)
+ mlx5_del_flow_rules(&ipsec_rule->rule);
+err_add_cnt:
+ mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
+err_pkt_reformat:
+ if (flow_act.modify_hdr != NULL)
+ mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
+err_mod_header:
+ kvfree(spec);
+
+ return err;
+}
+
+static struct mlx5_flow_table *ipsec_tx_ft_create(struct mlx5_flow_namespace *ns,
+ int level, int prio,
+ int max_num_groups)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+
+ ft_attr.autogroup.num_reserved_entries = 1;
+ ft_attr.autogroup.max_num_groups = max_num_groups;
+ ft_attr.max_fte = NUM_IPSEC_FTE;
+ ft_attr.level = level;
+ ft_attr.prio = prio;
+
+ return mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+}
+
+static int ipsec_counter_rule_tx(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *fte;
+ int err;
+
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_COUNT |
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(tx->fc->cnt);
+ fte = mlx5_add_flow_rules(tx->ft.status, NULL, &flow_act, &dest, 1);
+ if (IS_ERR_OR_NULL(fte)) {
+ err = PTR_ERR(fte);
+ mlx5_core_err(mdev, "Fail to add ipsec tx counter rule err=%d\n", err);
+ goto err_rule;
+ }
+
+ tx->status.rule = fte;
+ return 0;
+
+err_rule:
+ return err;
+}
+
+static void tx_destroy_roce(struct mlx5e_ipsec_tx *tx) {
+ if (!tx->roce.ft)
+ return;
+
+ mlx5_del_flow_rules(&tx->roce.rule);
+ mlx5_destroy_flow_group(tx->roce.g);
+ mlx5_destroy_flow_table(tx->roce.ft);
+ tx->roce.ft = NULL;
+}
+
+/* IPsec TX flow steering */
+static void tx_destroy(struct mlx5e_ipsec_tx *tx)
+{
+ tx_destroy_roce(tx);
+ if (tx->chains) {
+ ipsec_chains_destroy(tx->chains);
+ } else {
+ mlx5_del_flow_rules(&tx->pol.rule);
+ mlx5_destroy_flow_group(tx->pol.group);
+ mlx5_destroy_flow_table(tx->ft.pol);
+ }
+ mlx5_destroy_flow_table(tx->ft.sa);
+ mlx5_del_flow_rules(&tx->kspi_miss.rule);
+ mlx5_destroy_flow_group(tx->kspi_miss.group);
+ mlx5_del_flow_rules(&tx->kspi_bypass_rule.rule);
+ mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule);
+ mlx5_destroy_flow_table(tx->ft.sa_kspi);
+ mlx5_del_flow_rules(&tx->status.rule);
+ mlx5_destroy_flow_table(tx->ft.status);
+}
+
+static int ipsec_tx_roce_rule_setup(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec_tx *tx)
+{
+ struct mlx5_flow_destination dst = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ int err = 0;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = tx->ft.pol;
+ rule = mlx5_add_flow_rules(tx->roce.ft, NULL, &flow_act, &dst, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add TX roce ipsec rule err=%d\n",
+ err);
+ goto out;
+ }
+ tx->roce.rule = rule;
+
+out:
+ return err;
+}
+
+static int ipsec_tx_create_roce(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *g;
+ int ix = 0;
+ int err;
+ u32 *in;
+
+ if (!tx->roce.ns)
+ return -EOPNOTSUPP;
+
+ in = kvzalloc(MLX5_ST_SZ_BYTES(create_flow_group_in), GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ ft_attr.max_fte = 1;
+ ft = mlx5_create_flow_table(tx->roce.ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ mlx5_core_err(mdev, "Fail to create ipsec tx roce ft err=%d\n",
+ err);
+ goto fail_table;
+ }
+ tx->roce.ft = ft;
+
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += 1;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Fail to create ipsec tx roce group err=%d\n",
+ err);
+ goto fail_group;
+ }
+ tx->roce.g = g;
+
+ err = ipsec_tx_roce_rule_setup(mdev, tx);
+ if (err) {
+ mlx5_core_err(mdev, "Fail to create RoCE IPsec tx rules err=%d\n", err);
+ goto fail_rule;
+ }
+
+ kvfree(in);
+ return 0;
+
+fail_rule:
+ mlx5_destroy_flow_group(tx->roce.g);
+fail_group:
+ mlx5_destroy_flow_table(tx->roce.ft);
+ tx->roce.ft = NULL;
+fail_table:
+ kvfree(in);
+ return err;
+}
+
+/*
+ * Setting a rule in KSPI table for values that should bypass IPSEC.
+ *
+ * mdev - mlx5 core device
+ * tx - IPSEC TX
+ * return - 0 for success errno for failure
+ */
+static int tx_create_kspi_bypass_rules(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec_tx *tx)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_act flow_act_kspi = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ dest.ft = tx->ft.status;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ flow_act_kspi.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ setup_fte_reg_a_with_tag(spec, IPSEC_ACCEL_DRV_SPI_BYPASS);
+ rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, &flow_act_kspi,
+ &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add ipsec kspi bypass rule err=%d\n",
+ err);
+ goto err_add_kspi_rule;
+ }
+ tx->kspi_bypass_rule.kspi_rule = rule;
+
+ /* set the rule for packets withoiut ipsec tag. */
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ memset(spec, 0, sizeof(*spec));
+ setup_fte_reg_a_no_tag(spec);
+ rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add ipsec kspi bypass rule err=%d\n", err);
+ goto err_add_rule;
+ }
+ tx->kspi_bypass_rule.rule = rule;
+
+ kvfree(spec);
+ return 0;
+err_add_rule:
+ mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule);
+err_add_kspi_rule:
+ kvfree(spec);
+ return err;
+}
+
+
+static int tx_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_tx *tx)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_table *ft;
+ int err;
+
+ /*
+ * Tx flow is different for ethernet traffic then for RoCE packets
+ * For Ethernet packets we start in SA KSPI table that matches KSPI of SA rule
+ * to the KSPI in the packet metadata
+ * For RoCE traffic we start in Policy table, then move to SA table
+ * which matches either reqid of the SA rule to reqid reported by policy table
+ * or ip header fields of SA to the packet IP header fields.
+ * Tables are ordered by their level so we set kspi
+ * with level 0 to have it first one for ethernet traffic.
+ * For RoCE the RoCE TX table direct the packets to policy table explicitly
+ */
+ ft = ipsec_tx_ft_create(tx->ns, 0, 0, 4);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+ tx->ft.sa_kspi = ft;
+
+ ft = ipsec_tx_ft_create(tx->ns, 2, 0, 4);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_reqid_ft;
+ }
+ tx->ft.sa = ft;
+
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
+ tx->chains = ipsec_chains_create(
+ mdev, tx->ft.sa, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC, 0, 1,
+ &tx->ft.pol);
+ if (IS_ERR(tx->chains)) {
+ err = PTR_ERR(tx->chains);
+ goto err_pol_ft;
+ }
+ } else {
+ ft = ipsec_tx_ft_create(tx->ns, 1, 0, 2);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_pol_ft;
+ }
+ tx->ft.pol = ft;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = tx->ft.sa;
+ err = ipsec_miss_create(mdev, tx->ft.pol, &tx->pol, &dest);
+ if (err)
+ goto err_pol_miss;
+ }
+
+ ft = ipsec_tx_ft_create(tx->ns, 2, 0, 1);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_status_ft;
+ }
+ tx->ft.status = ft;
+
+ /* set miss rule for kspi table with drop action*/
+ err = ipsec_miss_create(mdev, tx->ft.sa_kspi, &tx->kspi_miss, NULL);
+ if (err)
+ goto err_kspi_miss;
+
+ err = tx_create_kspi_bypass_rules(mdev, tx);
+ if (err)
+ goto err_kspi_rule;
+
+ err = ipsec_counter_rule_tx(mdev, tx);
+ if (err)
+ goto err_status_rule;
+
+ err = ipsec_tx_create_roce(mdev, tx);
+ if (err)
+ goto err_counter_rule;
+
+ return 0;
+
+err_counter_rule:
+ mlx5_del_flow_rules(&tx->status.rule);
+err_status_rule:
+ mlx5_del_flow_rules(&tx->kspi_bypass_rule.rule);
+ mlx5_del_flow_rules(&tx->kspi_bypass_rule.kspi_rule);
+err_kspi_rule:
+ mlx5_destroy_flow_table(tx->ft.status);
+err_status_ft:
+ if (tx->chains) {
+ ipsec_chains_destroy(tx->chains);
+ } else {
+ mlx5_del_flow_rules(&tx->pol.rule);
+ mlx5_destroy_flow_group(tx->pol.group);
+ }
+err_pol_miss:
+ if (!tx->chains)
+ mlx5_destroy_flow_table(tx->ft.pol);
+err_pol_ft:
+ mlx5_del_flow_rules(&tx->kspi_miss.rule);
+ mlx5_destroy_flow_group(tx->kspi_miss.group);
+err_kspi_miss:
+ mlx5_destroy_flow_table(tx->ft.sa);
+err_reqid_ft:
+ mlx5_destroy_flow_table(tx->ft.sa_kspi);
+ return err;
+}
+
+static int tx_get(struct mlx5_core_dev *mdev, struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_tx *tx)
+{
+ int err;
+
+ if (tx->ft.refcnt)
+ goto skip;
+
+ err = tx_create(mdev, tx);
+ if (err)
+ return err;
+
+skip:
+ tx->ft.refcnt++;
+ return 0;
+}
+
+static void tx_put(struct mlx5e_ipsec *ipsec, struct mlx5e_ipsec_tx *tx)
+{
+ if (--tx->ft.refcnt)
+ return;
+
+ tx_destroy(tx);
+}
+
+static struct mlx5e_ipsec_tx *tx_ft_get(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
+ int err;
+
+ mutex_lock(&tx->ft.mutex);
+ err = tx_get(mdev, ipsec, tx);
+ mutex_unlock(&tx->ft.mutex);
+ if (err)
+ return ERR_PTR(err);
+
+ return tx;
+}
+
+static struct mlx5_flow_table *tx_ft_get_policy(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec *ipsec,
+ u32 prio)
+{
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ mutex_lock(&tx->ft.mutex);
+ err = tx_get(mdev, ipsec, tx);
+ if (err)
+ goto err_get;
+
+ ft = tx->chains ? ipsec_chains_get_table(tx->chains, prio) : tx->ft.pol;
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_get_ft;
+ }
+
+ mutex_unlock(&tx->ft.mutex);
+ return ft;
+
+err_get_ft:
+ tx_put(ipsec, tx);
+err_get:
+ mutex_unlock(&tx->ft.mutex);
+ return ERR_PTR(err);
+}
+
+static void tx_ft_put_policy(struct mlx5e_ipsec *ipsec, u32 prio)
+{
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
+
+ mutex_lock(&tx->ft.mutex);
+ if (tx->chains)
+ ipsec_chains_put_table(tx->chains, prio);
+
+ tx_put(ipsec, tx);
+ mutex_unlock(&tx->ft.mutex);
+}
+
+static void tx_ft_put(struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
+
+ mutex_lock(&tx->ft.mutex);
+ tx_put(ipsec, tx);
+ mutex_unlock(&tx->ft.mutex);
+}
+
+static void setup_fte_reg_a_with_tag(struct mlx5_flow_spec *spec,
+ u16 kspi)
+{
+ /* Add IPsec indicator in metadata_reg_a. */
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_a);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_IPSEC << 23 | kspi);
+}
+
+static void setup_fte_reg_a_no_tag(struct mlx5_flow_spec *spec)
+{
+ /* Add IPsec indicator in metadata_reg_a. */
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+ MLX5_SET(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_a,
+ MLX5_ETH_WQE_FT_META_IPSEC << 23);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_a,
+ 0);
+}
+
+static void setup_fte_reg_c0(struct mlx5_flow_spec *spec, u32 reqid)
+{
+ /* Pass policy check before choosing this SA */
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_0);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.metadata_reg_c_0, reqid);
+}
+
+static void setup_fte_upper_proto_match(struct mlx5_flow_spec *spec, struct upspec *upspec)
+{
+ switch (upspec->proto) {
+ case IPPROTO_UDP:
+ if (upspec->dport) {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4,
+ spec->match_criteria, udp_dport);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
+ udp_dport, upspec->dport);
+ }
+
+ if (upspec->sport) {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4,
+ spec->match_criteria, udp_sport);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
+ udp_dport, upspec->sport);
+ }
+ break;
+ case IPPROTO_TCP:
+ if (upspec->dport) {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4,
+ spec->match_criteria, tcp_dport);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
+ tcp_dport, upspec->dport);
+ }
+
+ if (upspec->sport) {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4,
+ spec->match_criteria, tcp_sport);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value,
+ tcp_dport, upspec->sport);
+ }
+ break;
+ default:
+ return;
+ }
+
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, spec->match_criteria, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, spec->match_value, ip_protocol, upspec->proto);
+}
+
+static int tx_add_kspi_rule(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_tx *tx,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int num_dest)
+{
+ struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ setup_fte_no_frags(spec);
+ setup_fte_reg_a_with_tag(spec, sa_entry->kspi);
+
+ if (sa_entry->vid != VLAN_NONE)
+ setup_fte_vid(spec, sa_entry->vid);
+ else
+ setup_fte_no_vid(spec);
+
+ rule = mlx5_add_flow_rules(tx->ft.sa_kspi, spec, flow_act, dest, num_dest);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "fail to add TX ipsec kspi rule err=%d\n", err);
+ goto err_add_kspi_flow;
+ }
+ ipsec_rule->kspi_rule = rule;
+ kvfree(spec);
+ return 0;
+
+err_add_kspi_flow:
+ kvfree(spec);
+ return err;
+}
+
+static int tx_add_reqid_ip_rules(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5e_ipsec_tx *tx,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int num_dest)
+{
+ struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+
+ if(attrs->reqid) {
+ if (sa_entry->vid != VLAN_NONE)
+ setup_fte_vid(spec, sa_entry->vid);
+ else
+ setup_fte_no_vid(spec);
+ setup_fte_no_frags(spec);
+ setup_fte_reg_c0(spec, attrs->reqid);
+ rule = mlx5_add_flow_rules(tx->ft.sa, spec, flow_act, dest, num_dest);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "fail to add TX ipsec reqid rule err=%d\n", err);
+ goto err_add_reqid_rule;
+ }
+ ipsec_rule->reqid_rule = rule;
+ memset(spec, 0, sizeof(*spec));
+ }
+
+ if (sa_entry->vid != VLAN_NONE)
+ setup_fte_vid(spec, sa_entry->vid);
+ else
+ setup_fte_no_vid(spec);
+
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+ setup_fte_no_frags(spec);
+
+ rule = mlx5_add_flow_rules(tx->ft.sa, spec, flow_act, dest, num_dest);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "fail to add TX ipsec ip rule err=%d\n", err);
+ goto err_add_ip_rule;
+ }
+ ipsec_rule->rule = rule;
+ kvfree(spec);
+ return 0;
+
+err_add_ip_rule:
+ mlx5_del_flow_rules(&ipsec_rule->reqid_rule);
+err_add_reqid_rule:
+ kvfree(spec);
+ return err;
+}
+
+static int tx_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5e_ipsec_tx *tx;
+ struct mlx5_fc *counter;
+ int err;
+
+ tx = tx_ft_get(mdev, ipsec);
+ if (IS_ERR(tx))
+ return PTR_ERR(tx);
+
+ err = setup_pkt_reformat(mdev, attrs, &flow_act);
+ if (err)
+ goto err_pkt_reformat;
+
+ counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_add_cnt;
+ }
+
+ flow_act.crypto.type = MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC;
+ flow_act.crypto.obj_id = sa_entry->ipsec_obj_id;
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ if (attrs->drop)
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ else
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ dest[0].ft = tx->ft.status;
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter_id = mlx5_fc_id(counter);
+
+ err = tx_add_kspi_rule(sa_entry, tx, &flow_act, dest, 2);
+ if (err) {
+ goto err_add_kspi_rule;
+ }
+
+ err = tx_add_reqid_ip_rules(sa_entry, tx, &flow_act, dest, 2);
+ if (err) {
+ goto err_add_reqid_ip_rule;
+ }
+
+ ipsec_rule->fc = counter;
+ ipsec_rule->pkt_reformat = flow_act.pkt_reformat;
+ return 0;
+
+err_add_reqid_ip_rule:
+ mlx5_del_flow_rules(&ipsec_rule->kspi_rule);
+err_add_kspi_rule:
+ mlx5_fc_destroy(mdev, counter);
+err_add_cnt:
+ if (flow_act.pkt_reformat)
+ mlx5_packet_reformat_dealloc(mdev, flow_act.pkt_reformat);
+err_pkt_reformat:
+ tx_ft_put(ipsec);
+ return err;
+}
+
+static int tx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+ struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
+ struct mlx5e_ipsec_tx *tx = pol_entry->ipsec->tx;
+ struct mlx5_flow_destination dest[2] = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft;
+ int err, dstn = 0;
+
+ ft = tx_ft_get_policy(mdev, pol_entry->ipsec, attrs->prio);
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+
+ setup_fte_no_frags(spec);
+ setup_fte_upper_proto_match(spec, &attrs->upspec);
+
+ switch (attrs->action) {
+ case IPSEC_POLICY_IPSEC:
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ err = setup_modify_header(mdev, attrs->reqid,
+ IPSEC_DIR_OUTBOUND, &flow_act);
+ if (err)
+ goto err_mod_header;
+ break;
+ case IPSEC_POLICY_DISCARD:
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[dstn].counter_id = mlx5_fc_id(tx->fc->drop);
+ dstn++;
+ break;
+ default:
+ err = -EINVAL;
+ goto err_mod_header;
+ }
+
+ if (attrs->vid != VLAN_NONE)
+ setup_fte_vid(spec, attrs->vid);
+ else
+ setup_fte_no_vid(spec);
+
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ dest[dstn].ft = tx->ft.sa;
+ dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dstn++;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "fail to add TX ipsec rule err=%d\n", err);
+ goto err_action;
+ }
+
+ kvfree(spec);
+ pol_entry->ipsec_rule.rule = rule;
+ pol_entry->ipsec_rule.modify_hdr = flow_act.modify_hdr;
+ return 0;
+
+err_action:
+ if (flow_act.modify_hdr)
+ mlx5_modify_header_dealloc(mdev, flow_act.modify_hdr);
+err_mod_header:
+ kvfree(spec);
+err_alloc:
+ tx_ft_put_policy(pol_entry->ipsec, attrs->prio);
+ return err;
+}
+
+static int rx_add_policy(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+ struct mlx5_accel_pol_xfrm_attrs *attrs = &pol_entry->attrs;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
+ struct mlx5e_ipsec *ipsec = pol_entry->ipsec;
+ struct mlx5_flow_destination dest[2];
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft;
+ struct mlx5e_ipsec_rx *rx;
+ int err, dstn = 0;
+
+ rx = (attrs->family == AF_INET) ? ipsec->rx_ipv4 : ipsec->rx_ipv6;
+ ft = rx->chains ? ipsec_chains_get_table(rx->chains, attrs->prio) : rx->ft.pol;
+ if (IS_ERR(ft))
+ return PTR_ERR(ft);
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ switch (attrs->action) {
+ case IPSEC_POLICY_IPSEC:
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ break;
+ case IPSEC_POLICY_DISCARD:
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[dstn].counter_id = mlx5_fc_id(rx->fc->drop);
+ dstn++;
+ break;
+ default:
+ err = -EINVAL;
+ goto err_action;
+ }
+
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ dest[dstn].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest[dstn].ft = rx->ft.sa;
+ dstn++;
+
+ if (attrs->family == AF_INET)
+ setup_fte_addr4(spec, &attrs->saddr.a4, &attrs->daddr.a4);
+ else
+ setup_fte_addr6(spec, attrs->saddr.a6, attrs->daddr.a6);
+
+ setup_fte_no_frags(spec);
+ setup_fte_upper_proto_match(spec, &attrs->upspec);
+ if (attrs->vid != VLAN_NONE)
+ setup_fte_vid(spec, attrs->vid);
+ else
+ setup_fte_no_vid(spec);
+
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add RX IPsec policy rule err=%d\n", err);
+ goto err_action;
+ }
+ pol_entry->ipsec_rule.rule = rule;
+
+ /* Add also rule for zero vid */
+ if (attrs->vid == VLAN_NONE) {
+ clear_fte_vid(spec);
+ setup_fte_vid(spec, 0);
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, dstn);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add RX IPsec policy rule err=%d\n",
+ err);
+ goto err_action;
+ }
+ pol_entry->ipsec_rule.vid_zero_rule = rule;
+ }
+
+ kvfree(spec);
+ return 0;
+
+err_action:
+ if (pol_entry->ipsec_rule.rule != NULL)
+ mlx5_del_flow_rules(&pol_entry->ipsec_rule.rule);
+ kvfree(spec);
+err_alloc:
+ if (rx->chains != NULL)
+ ipsec_chains_put_table(rx->chains, attrs->prio);
+ return err;
+}
+
+static void ipsec_fs_destroy_counters(struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
+
+ mlx5_fc_destroy(mdev, rx_ipv4->fc->drop);
+ mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
+ kfree(rx_ipv4->fc);
+ mlx5_fc_destroy(mdev, tx->fc->drop);
+ mlx5_fc_destroy(mdev, tx->fc->cnt);
+ kfree(tx->fc);
+}
+
+static int ipsec_fs_init_counters(struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5e_ipsec_rx *rx_ipv4 = ipsec->rx_ipv4;
+ struct mlx5e_ipsec_rx *rx_ipv6 = ipsec->rx_ipv6;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5e_ipsec_tx *tx = ipsec->tx;
+ struct mlx5e_ipsec_fc *fc;
+ struct mlx5_fc *counter;
+ int err;
+
+ fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL);
+ if (!fc)
+ return -ENOMEM;
+
+ tx->fc = fc;
+ counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_tx_fc_alloc;
+ }
+
+ fc->cnt = counter;
+ counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_tx_fc_cnt;
+ }
+
+ fc->drop = counter;
+
+ fc = kzalloc(sizeof(*tx->fc), GFP_KERNEL);
+ if (!fc) {
+ err = -ENOMEM;
+ goto err_tx_fc_drop;
+ }
+
+ /* Both IPv4 and IPv6 point to same flow counters struct. */
+ rx_ipv4->fc = fc;
+ rx_ipv6->fc = fc;
+ counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_rx_fc_alloc;
+ }
+
+ fc->cnt = counter;
+ counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(counter)) {
+ err = PTR_ERR(counter);
+ goto err_rx_fc_cnt;
+ }
+
+ fc->drop = counter;
+ return 0;
+
+err_rx_fc_cnt:
+ mlx5_fc_destroy(mdev, rx_ipv4->fc->cnt);
+err_rx_fc_alloc:
+ kfree(rx_ipv4->fc);
+err_tx_fc_drop:
+ mlx5_fc_destroy(mdev, tx->fc->drop);
+err_tx_fc_cnt:
+ mlx5_fc_destroy(mdev, tx->fc->cnt);
+err_tx_fc_alloc:
+ kfree(tx->fc);
+ return err;
+}
+
+static int ipsec_status_rule(struct mlx5_core_dev *mdev,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_modify_hdr *modify_hdr;
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ /* Action to copy 7 bit ipsec_syndrome to regB[24:30] */
+ MLX5_SET(copy_action_in, action, action_type, MLX5_ACTION_TYPE_COPY);
+ MLX5_SET(copy_action_in, action, src_field, MLX5_ACTION_IN_FIELD_IPSEC_SYNDROME);
+ MLX5_SET(copy_action_in, action, src_offset, 0);
+ MLX5_SET(copy_action_in, action, length, 7);
+ MLX5_SET(copy_action_in, action, dst_field, MLX5_ACTION_IN_FIELD_METADATA_REG_B);
+ MLX5_SET(copy_action_in, action, dst_offset, 24);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_KERNEL,
+ 1, action);
+
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ mlx5_core_err(mdev,
+ "fail to alloc ipsec copy modify_header_id err=%d\n", err);
+ goto out_spec;
+ }
+
+ /* create fte */
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ flow_act.modify_hdr = modify_hdr;
+
+ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "fail to add ipsec rx err copy rule err=%d\n", err);
+ goto out;
+ }
+
+ kvfree(spec);
+ rx->status.rule = rule;
+ rx->status.modify_hdr = modify_hdr;
+ return 0;
+
+out:
+ mlx5_modify_header_dealloc(mdev, modify_hdr);
+out_spec:
+ kvfree(spec);
+ return err;
+}
+
+static void ipsec_fs_rx_roce_rules_destroy(struct mlx5e_ipsec_rx_roce *rx_roce)
+{
+ if (!rx_roce->ns_rdma)
+ return;
+
+ mlx5_del_flow_rules(&rx_roce->roce_miss.rule);
+ mlx5_del_flow_rules(&rx_roce->rule);
+ mlx5_destroy_flow_group(rx_roce->roce_miss.group);
+ mlx5_destroy_flow_group(rx_roce->g);
+}
+
+static void ipsec_fs_rx_catchall_rules_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
+{
+ mutex_lock(&rx->ft.mutex);
+ mlx5_del_flow_rules(&rx->sa.rule);
+ mlx5_destroy_flow_group(rx->sa.group);
+ if (rx->chains == NULL) {
+ mlx5_del_flow_rules(&rx->pol.rule);
+ mlx5_destroy_flow_group(rx->pol.group);
+ }
+ mlx5_del_flow_rules(&rx->status.rule);
+ mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+ ipsec_fs_rx_roce_rules_destroy(&rx->roce);
+ mutex_unlock(&rx->ft.mutex);
+}
+
+static void ipsec_fs_rx_roce_table_destroy(struct mlx5e_ipsec_rx_roce *rx_roce)
+{
+ if (!rx_roce->ns_rdma)
+ return;
+
+ mlx5_destroy_flow_table(rx_roce->ft_rdma);
+ mlx5_destroy_flow_table(rx_roce->ft);
+}
+
+static void
+ipsec_fs_rx_ip_type_catchall_rule_destroy(struct mlx5e_ipsec_rx_ip_type* rx_ip_type)
+{
+ mlx5_del_flow_rules(&rx_ip_type->ipv4_rule);
+ mlx5_del_flow_rules(&rx_ip_type->ipv6_rule);
+ mlx5_del_flow_rules(&rx_ip_type->miss.rule);
+ mlx5_destroy_flow_group(rx_ip_type->miss.group);
+ rx_ip_type->miss.group = NULL;
+}
+
+static void ipsec_fs_rx_table_destroy(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx)
+{
+ if (rx->chains) {
+ ipsec_chains_destroy(rx->chains);
+ } else {
+ mlx5_del_flow_rules(&rx->pol.rule);
+ mlx5_destroy_flow_table(rx->ft.pol);
+ }
+ mlx5_destroy_flow_table(rx->ft.sa);
+ mlx5_destroy_flow_table(rx->ft.status);
+ ipsec_fs_rx_roce_table_destroy(&rx->roce);
+}
+
+static void ipsec_roce_setup_udp_dport(struct mlx5_flow_spec *spec, u16 dport)
+{
+ spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, IPPROTO_UDP);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.udp_dport);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport, dport);
+}
+
+static int ipsec_roce_rx_rule_setup(struct mlx5_flow_destination *default_dst,
+ struct mlx5e_ipsec_rx_roce *roce, struct mlx5_core_dev *mdev)
+{
+ struct mlx5_flow_destination dst = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ ipsec_roce_setup_udp_dport(spec, ROCE_V2_UDP_DPORT);
+
+ //flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;//not needed it is added in command
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ dst.ft = roce->ft_rdma;
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ rule = mlx5_add_flow_rules(roce->ft, spec, &flow_act, &dst, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add RX roce ipsec rule err=%d\n",
+ err);
+ goto fail_add_rule;
+ }
+
+ roce->rule = rule;
+
+ rule = mlx5_add_flow_rules(roce->ft, NULL, &flow_act, default_dst, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Fail to add RX roce ipsec miss rule err=%d\n",
+ err);
+ goto fail_add_default_rule;
+ }
+
+ roce->roce_miss.rule = rule;
+
+ kvfree(spec);
+ return 0;
+
+fail_add_default_rule:
+ mlx5_del_flow_rules(&roce->rule);
+fail_add_rule:
+ kvfree(spec);
+ return err;
+}
+
+static int ipsec_roce_rx_rules(struct mlx5e_ipsec_rx *rx, struct mlx5_flow_destination *defdst,
+ struct mlx5_core_dev *mdev)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ void *outer_headers_c;
+ u32 *in;
+ int err = 0;
+ int ix = 0;
+ u8 *mc;
+
+ if (!rx->roce.ns_rdma)
+ return 0;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc, outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += 1;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(rx->roce.ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Fail to create ipsec rx roce group at nic err=%d\n", err);
+ goto fail_group;
+ }
+ rx->roce.g = g;
+
+ memset(in, 0, MLX5_ST_SZ_BYTES(create_flow_group_in));
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += 1;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ g = mlx5_create_flow_group(rx->roce.ft, in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev, "Fail to create ipsec rx roce miss group at nic err=%d\n",
+ err);
+ goto fail_mgroup;
+ }
+ rx->roce.roce_miss.group = g;
+
+ err = ipsec_roce_rx_rule_setup(defdst, &rx->roce, mdev);
+ if (err)
+ goto fail_setup_rule;
+
+ kvfree(in);
+ return 0;
+
+fail_setup_rule:
+ mlx5_destroy_flow_group(rx->roce.roce_miss.group);
+fail_mgroup:
+ mlx5_destroy_flow_group(rx->roce.g);
+fail_group:
+ kvfree(in);
+ return err;
+}
+
+static int ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *defdst)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_flow_destination dest[2] = {};
+ int err = 0;
+
+ mutex_lock(&rx->ft.mutex);
+ /* IPsec RoCE RX rules */
+ err = ipsec_roce_rx_rules(rx, defdst, mdev);
+ if (err)
+ goto out;
+
+ /* IPsec Rx IP Status table rule */
+ dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ if (rx->roce.ft)
+ dest[0].ft = rx->roce.ft;
+ else
+ dest[0].ft = priv->fts.vlan.t;
+
+ dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest[1].counter_id = mlx5_fc_id(rx->fc->cnt);
+ err = ipsec_status_rule(mdev, rx, dest);
+ if (err)
+ goto err_roce_rules_destroy;
+
+ if (!rx->chains) {
+ /* IPsec Rx IP policy default miss rule */
+ err = ipsec_miss_create(mdev, rx->ft.pol, &rx->pol, defdst);
+ if (err)
+ goto err_status_rule_destroy;
+ }
+
+ /* FIXME: This is workaround to current design
+ * which installs SA on firt packet. So we need to forward this
+ * packet to the stack. It doesn't work with RoCE and eswitch traffic,
+ */
+ err = ipsec_miss_create(mdev, rx->ft.sa, &rx->sa, defdst);
+ if (err)
+ goto err_status_sa_rule_destroy;
+
+ mutex_unlock(&rx->ft.mutex);
+ return 0;
+
+err_status_sa_rule_destroy:
+ if (!rx->chains) {
+ mlx5_del_flow_rules(&rx->pol.rule);
+ mlx5_destroy_flow_group(rx->pol.group);
+ }
+err_status_rule_destroy:
+ mlx5_del_flow_rules(&rx->status.rule);
+ mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+err_roce_rules_destroy:
+ ipsec_fs_rx_roce_rules_destroy(&rx->roce);
+out:
+ mutex_unlock(&rx->ft.mutex);
+ return err;
+}
+
+static int ipsec_fs_rx_roce_tables_create(struct mlx5e_ipsec_rx *rx,
+ int rx_init_level, int rdma_init_level)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *ft;
+ int err = 0;
+
+ if (!rx->roce.ns_rdma)
+ return 0;
+
+ ft_attr.max_fte = 2;
+ ft_attr.level = rx_init_level;
+ ft = mlx5_create_flow_table(rx->ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ return err;
+ }
+ rx->roce.ft = ft;
+
+ ft_attr.max_fte = 0;
+ ft_attr.level = rdma_init_level;
+ ft = mlx5_create_flow_table(rx->roce.ns_rdma, &ft_attr);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto out;
+ }
+ rx->roce.ft_rdma = ft;
+
+ return 0;
+out:
+ mlx5_destroy_flow_table(rx->roce.ft);
+ rx->roce.ft = NULL;
+ return err;
+}
+
+static int
+ipsec_fs_rx_ip_type_catchall_rules_create(struct mlx5e_priv *priv,
+ struct mlx5_flow_destination *defdst)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+ struct mlx5_flow_destination dst = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ return -ENOMEM;
+ }
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+
+ /* Set rule for ipv4 packets */
+ dst.ft = ipsec->rx_ipv4->ft.pol;
+ setup_fte_ip_version(spec, AF_INET);
+ rule = mlx5_add_flow_rules(ipsec->rx_ip_type->ft, spec, &flow_act, &dst, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add ipv4 rule to ip_type table err=%d\n",
+ err);
+ goto out;
+ }
+ ipsec->rx_ip_type->ipv4_rule = rule;
+
+ /* Set rule for ipv6 packets */
+ dst.ft = ipsec->rx_ipv6->ft.pol;
+ setup_fte_ip_version(spec, AF_INET6);
+ rule = mlx5_add_flow_rules(ipsec->rx_ip_type->ft, spec, &flow_act, &dst, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev, "Failed to add ipv6 rule to ip_type table err=%d\n",
+ err);
+ goto fail_add_ipv6_rule;
+ }
+ ipsec->rx_ip_type->ipv6_rule = rule;
+
+ /* set miss rule */
+ err = ipsec_miss_create(mdev, ipsec->rx_ip_type->ft, &ipsec->rx_ip_type->miss, defdst);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to add miss rule to ip_type table err=%d\n",
+ err);
+ goto fail_miss_rule;
+ }
+
+ goto out;
+
+fail_miss_rule:
+ mlx5_del_flow_rules(&ipsec->rx_ip_type->ipv6_rule);
+fail_add_ipv6_rule:
+ mlx5_del_flow_rules(&ipsec->rx_ip_type->ipv4_rule);
+out:
+ kvfree(spec);
+ return err;
+}
+
+static int
+ipsec_fs_rx_ip_type_table_create(struct mlx5e_priv *priv,
+ int level)
+{
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+ struct mlx5_flow_table *ft;
+ int err = 0;
+
+ /* Create rx ip type table */
+ ft = ipsec_rx_ft_create(ipsec->rx_ip_type->ns, level, 0, 1);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto out;
+ }
+ ipsec->rx_ip_type->ft = ft;
+
+ priv->fts.ipsec_ft = priv->ipsec->rx_ip_type->ft;
+
+out:
+ return err;
+}
+
+static int ipsec_fs_rx_table_create(struct mlx5_core_dev *mdev, struct mlx5e_ipsec_rx *rx,
+ int rx_init_level, int rdma_init_level)
+{
+ struct mlx5_flow_namespace *ns = rx->ns;
+ struct mlx5_flow_table *ft;
+ int err = 0;
+
+ mutex_lock(&rx->ft.mutex);
+
+ /* IPsec Rx IP SA table create */
+ ft = ipsec_rx_ft_create(ns, rx_init_level + 1, 0, 1);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto out;
+ }
+ rx->ft.sa = ft;
+
+ /* IPsec Rx IP Status table create */
+ ft = ipsec_rx_ft_create(ns, rx_init_level + 2, 0, 1);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_sa_table_destroy;
+ }
+ rx->ft.status = ft;
+
+ if (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO) {
+ rx->chains = ipsec_chains_create(mdev, rx->ft.sa,
+ MLX5_FLOW_NAMESPACE_KERNEL, 0,
+ rx_init_level, &rx->ft.pol);
+ if (IS_ERR(rx->chains)) {
+ err = PTR_ERR(rx->chains);
+ goto err_status_table_destroy;
+ }
+ } else {
+ ft = ipsec_rx_ft_create(ns, rx_init_level, 0, 1);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_status_table_destroy;
+ }
+ rx->ft.pol = ft;
+ }
+
+ /* IPsec RoCE RX tables create*/
+ err = ipsec_fs_rx_roce_tables_create(rx, rx_init_level + 3,
+ rdma_init_level);
+ if (err)
+ goto err_pol_table_destroy;
+
+ goto out;
+
+err_pol_table_destroy:
+ mlx5_destroy_flow_table(rx->ft.pol);
+err_status_table_destroy:
+ mlx5_destroy_flow_table(rx->ft.status);
+err_sa_table_destroy:
+ mlx5_destroy_flow_table(rx->ft.sa);
+out:
+ mutex_unlock(&rx->ft.mutex);
+ return err;
+}
+
+#define NIC_RDMA_BOTH_DIRS_CAPS (MLX5_FT_NIC_RX_2_NIC_RX_RDMA | MLX5_FT_NIC_TX_RDMA_2_NIC_TX)
+
+static void mlx5e_accel_ipsec_fs_init_roce(struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_namespace *ns;
+
+ if ((MLX5_CAP_GEN_2(ipsec->mdev, flow_table_type_2_type) &
+ NIC_RDMA_BOTH_DIRS_CAPS) != NIC_RDMA_BOTH_DIRS_CAPS) {
+ mlx5_core_dbg(mdev, "Failed to init roce ns, capabilities not supported\n");
+ return;
+ }
+
+ ns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC);
+ if (!ns) {
+ mlx5_core_err(mdev, "Failed to init roce rx ns\n");
+ return;
+ }
+
+ ipsec->rx_ipv4->roce.ns_rdma = ns;
+ ipsec->rx_ipv6->roce.ns_rdma = ns;
+
+ ns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC);
+ if (!ns) {
+ ipsec->rx_ipv4->roce.ns_rdma = NULL;
+ ipsec->rx_ipv6->roce.ns_rdma = NULL;
+ mlx5_core_err(mdev, "Failed to init roce tx ns\n");
+ return;
+ }
+
+ ipsec->tx->roce.ns = ns;
+}
+
+int mlx5e_accel_ipsec_fs_add_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ if (sa_entry->attrs.dir == IPSEC_DIR_OUTBOUND)
+ return tx_add_rule(sa_entry);
+
+ return rx_add_rule(sa_entry);
+}
+
+void mlx5e_accel_ipsec_fs_del_rule(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+
+ mlx5_del_flow_rules(&ipsec_rule->rule);
+ mlx5_del_flow_rules(&ipsec_rule->kspi_rule);
+ if (ipsec_rule->vid_zero_rule != NULL)
+ mlx5_del_flow_rules(&ipsec_rule->vid_zero_rule);
+ if (ipsec_rule->reqid_rule != NULL)
+ mlx5_del_flow_rules(&ipsec_rule->reqid_rule);
+ mlx5_fc_destroy(mdev, ipsec_rule->fc);
+ mlx5_packet_reformat_dealloc(mdev, ipsec_rule->pkt_reformat);
+ if (sa_entry->attrs.dir == IPSEC_DIR_OUTBOUND) {
+ tx_ft_put(sa_entry->ipsec);
+ return;
+ }
+
+ if (ipsec_rule->modify_hdr != NULL)
+ mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+}
+
+int mlx5e_accel_ipsec_fs_add_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+ if (pol_entry->attrs.dir == IPSEC_DIR_OUTBOUND)
+ return tx_add_policy(pol_entry);
+
+ return rx_add_policy(pol_entry);
+}
+
+void mlx5e_accel_ipsec_fs_del_pol(struct mlx5e_ipsec_pol_entry *pol_entry)
+{
+ struct mlx5e_ipsec_rule *ipsec_rule = &pol_entry->ipsec_rule;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_pol2dev(pol_entry);
+
+ mlx5_del_flow_rules(&ipsec_rule->rule);
+ if (ipsec_rule->vid_zero_rule != NULL)
+ mlx5_del_flow_rules(&ipsec_rule->vid_zero_rule);
+
+ if (pol_entry->attrs.dir == IPSEC_DIR_INBOUND) {
+ struct mlx5e_ipsec_rx *rx;
+
+ rx = (pol_entry->attrs.family == AF_INET)
+ ? pol_entry->ipsec->rx_ipv4
+ : pol_entry->ipsec->rx_ipv6;
+ if (rx->chains)
+ ipsec_chains_put_table(rx->chains,
+ pol_entry->attrs.prio);
+ return;
+ }
+
+ if (ipsec_rule->modify_hdr)
+ mlx5_modify_header_dealloc(mdev, ipsec_rule->modify_hdr);
+
+ tx_ft_put_policy(pol_entry->ipsec, pol_entry->attrs.prio);
+}
+
+void mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(struct mlx5e_priv *priv)
+{
+ /* Check if IPsec supported */
+ if (!priv->ipsec)
+ return;
+
+ ipsec_fs_rx_ip_type_catchall_rule_destroy(priv->ipsec->rx_ip_type);
+ ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv4);
+ ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv6);
+}
+
+int mlx5e_accel_ipsec_fs_rx_catchall_rules(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+ struct mlx5_flow_destination dest = {};
+ int err = 0;
+
+ /* Check if IPsec supported */
+ if (!ipsec)
+ return 0;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = priv->fts.vlan.t;
+ err = ipsec_fs_rx_catchall_rules(priv, ipsec->rx_ipv6, &dest);
+ if (err)
+ goto out;
+
+ err = ipsec_fs_rx_catchall_rules(priv, ipsec->rx_ipv4, &dest);
+ if (err)
+ ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv6);
+
+ err = ipsec_fs_rx_ip_type_catchall_rules_create(priv, &dest);
+ if (err) {
+ ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv6);
+ ipsec_fs_rx_catchall_rules_destroy(priv->mdev, priv->ipsec->rx_ipv4);
+ }
+
+out:
+ return err;
+}
+
+void mlx5e_accel_ipsec_fs_rx_tables_destroy(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+
+ /* Check if IPsec supported */
+ if (!ipsec)
+ return;
+
+ mlx5_destroy_flow_table(ipsec->rx_ip_type->ft);
+ ipsec_fs_rx_table_destroy(mdev, ipsec->rx_ipv6);
+ ipsec_fs_rx_table_destroy(mdev, ipsec->rx_ipv4);
+}
+
+int mlx5e_accel_ipsec_fs_rx_tables_create(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+ int err = 0;
+
+ /* Check if IPsec supported */
+ if (!ipsec)
+ return 0;
+
+ err = ipsec_fs_rx_ip_type_table_create(priv, 0);
+ if (err)
+ return err;
+
+ err = ipsec_fs_rx_table_create(ipsec->mdev, ipsec->rx_ipv4, 1, 0);
+ if (err)
+ goto err_ipv4_table;
+
+ err = ipsec_fs_rx_table_create(ipsec->mdev, ipsec->rx_ipv6, 5, 1);
+ if (err)
+ goto err_ipv6_table;
+
+ return 0;
+
+err_ipv6_table:
+ ipsec_fs_rx_table_destroy(priv->mdev, ipsec->rx_ipv4);
+err_ipv4_table:
+ mlx5_destroy_flow_table(ipsec->rx_ip_type->ft);
+ return err;
+}
+
+void mlx5e_accel_ipsec_fs_cleanup(struct mlx5e_ipsec *ipsec)
+{
+ WARN_ON(ipsec->tx->ft.refcnt);
+ mutex_destroy(&ipsec->rx_ipv6->ft.mutex);
+ mutex_destroy(&ipsec->rx_ipv4->ft.mutex);
+ mutex_destroy(&ipsec->tx->ft.mutex);
+ ipsec_fs_destroy_counters(ipsec);
+ kfree(ipsec->rx_ip_type);
+ kfree(ipsec->rx_ipv6);
+ kfree(ipsec->rx_ipv4);
+ kfree(ipsec->tx);
+}
+
+int mlx5e_accel_ipsec_fs_init(struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5_flow_namespace *tns, *rns;
+ int err = -ENOMEM;
+
+ tns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_EGRESS_IPSEC);
+ if (!tns)
+ return -EOPNOTSUPP;
+
+ rns = mlx5_get_flow_namespace(ipsec->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
+ if (!rns)
+ return -EOPNOTSUPP;
+
+ ipsec->tx = kzalloc(sizeof(*ipsec->tx), GFP_KERNEL);
+ if (!ipsec->tx)
+ return -ENOMEM;
+
+ ipsec->rx_ip_type = kzalloc(sizeof(*ipsec->rx_ip_type), GFP_KERNEL);
+ if (!ipsec->rx_ip_type)
+ goto err_tx;
+
+ ipsec->rx_ipv4 = kzalloc(sizeof(*ipsec->rx_ipv4), GFP_KERNEL);
+ if (!ipsec->rx_ipv4)
+ goto err_ip_type;
+
+ ipsec->rx_ipv6 = kzalloc(sizeof(*ipsec->rx_ipv6), GFP_KERNEL);
+ if (!ipsec->rx_ipv6)
+ goto err_rx_ipv4;
+
+ err = ipsec_fs_init_counters(ipsec);
+ if (err)
+ goto err_rx_ipv6;
+
+ ipsec->tx->ns = tns;
+ mutex_init(&ipsec->tx->ft.mutex);
+ ipsec->rx_ip_type->ns = rns;
+ ipsec->rx_ipv4->ns = rns;
+ ipsec->rx_ipv6->ns = rns;
+ mutex_init(&ipsec->rx_ipv4->ft.mutex);
+ mutex_init(&ipsec->rx_ipv6->ft.mutex);
+
+ mlx5e_accel_ipsec_fs_init_roce(ipsec);
+
+ return 0;
+
+err_rx_ipv6:
+ kfree(ipsec->rx_ipv6);
+err_rx_ipv4:
+ kfree(ipsec->rx_ipv4);
+err_ip_type:
+ kfree(ipsec->rx_ip_type);
+err_tx:
+ kfree(ipsec->tx);
+ return err;
+}
+
+void mlx5e_accel_ipsec_fs_modify(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec_sa_entry sa_entry_shadow = {};
+ int err;
+
+ memcpy(&sa_entry_shadow, sa_entry, sizeof(*sa_entry));
+ memset(&sa_entry_shadow.ipsec_rule, 0x00, sizeof(sa_entry->ipsec_rule));
+
+ err = mlx5e_accel_ipsec_fs_add_rule(&sa_entry_shadow);
+ if (err)
+ return;
+ mlx5e_accel_ipsec_fs_del_rule(sa_entry);
+ memcpy(sa_entry, &sa_entry_shadow, sizeof(*sa_entry));
+}
diff --git a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
new file mode 100644
index 000000000000..cc0bc1f3fcd2
--- /dev/null
+++ b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
@@ -0,0 +1,486 @@
+/*-
+ * Copyright (c) 2023 NVIDIA corporation & affiliates.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/types.h>
+#include <netinet/in.h>
+#include <sys/socket.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <net/pfkeyv2.h>
+#include <netipsec/ipsec.h>
+#include <dev/mlx5/mlx5_en/en.h>
+#include <dev/mlx5/crypto.h>
+#include <dev/mlx5/mlx5_accel/ipsec.h>
+
+u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
+{
+ u32 caps = 0;
+
+ if (!MLX5_CAP_GEN(mdev, ipsec_offload))
+ return 0;
+
+ if (!MLX5_CAP_GEN(mdev, log_max_dek))
+ return 0;
+
+ if (!(MLX5_CAP_GEN_64(mdev, general_obj_types) &
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
+ return 0;
+
+ if (!MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ipsec_encrypt) ||
+ !MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ipsec_decrypt))
+ return 0;
+
+ if (!MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_encrypt) ||
+ !MLX5_CAP_IPSEC(mdev, ipsec_crypto_esp_aes_gcm_128_decrypt))
+ return 0;
+
+ if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) {
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
+ reformat_add_esp_trasport) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ reformat_del_esp_trasport) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
+ caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
+
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level))
+ caps |= MLX5_IPSEC_CAP_PRIO;
+
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_transport_over_udp) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_transport_over_udp))
+ caps |= MLX5_IPSEC_CAP_ESPINUDP;
+ }
+
+ if (!caps)
+ return 0;
+
+ if (MLX5_CAP_IPSEC(mdev, ipsec_esn))
+ caps |= MLX5_IPSEC_CAP_ESN;
+
+ return caps;
+}
+EXPORT_SYMBOL_GPL(mlx5_ipsec_device_caps);
+
+static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
+ struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ void *aso_ctx;
+
+ aso_ctx = MLX5_ADDR_OF(ipsec_obj, obj, ipsec_aso);
+ /* ASO context */
+ MLX5_SET(ipsec_obj, obj, ipsec_aso_access_pd, pdn);
+ MLX5_SET(ipsec_obj, obj, full_offload, 1);
+ MLX5_SET(ipsec_aso, aso_ctx, valid, 1);
+ /* MLX5_IPSEC_ASO_REG_C_4_5 is type C register that is used
+ * in flow steering to perform matching against. Please be
+ * aware that this register was chosen arbitrary and can't
+ * be used in other places as long as IPsec packet offload
+ * active.
+ */
+ MLX5_SET(ipsec_obj, obj, aso_return_reg, MLX5_IPSEC_ASO_REG_C_4_5);
+ if (attrs->replay_esn.trigger) {
+ MLX5_SET(ipsec_aso, aso_ctx, esn_event_arm, 1);
+
+ if (attrs->dir == IPSEC_DIR_INBOUND) {
+ MLX5_SET(ipsec_aso, aso_ctx, window_sz,
+ attrs->replay_esn.replay_window);
+ if (attrs->replay_esn.replay_window != 0)
+ MLX5_SET(ipsec_aso, aso_ctx, mode,
+ MLX5_IPSEC_ASO_REPLAY_PROTECTION);
+ else
+ MLX5_SET(ipsec_aso, aso_ctx, mode,
+ MLX5_IPSEC_ASO_MODE);
+ }
+ MLX5_SET(ipsec_aso, aso_ctx, mode_parameter,
+ attrs->replay_esn.esn);
+ }
+
+ switch (attrs->dir) {
+ case IPSEC_DIR_OUTBOUND:
+ if (attrs->replay_esn.trigger)
+ MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
+ else
+ MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_MODE);
+ break;
+ default:
+ break;
+ }
+}
+
+static int mlx5_create_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5_accel_esp_xfrm_attrs *attrs = &sa_entry->attrs;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ struct aes_gcm_keymat *aes_gcm = &attrs->aes_gcm;
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+ u32 in[MLX5_ST_SZ_DW(create_ipsec_obj_in)] = {};
+ void *obj, *salt_p, *salt_iv_p;
+ int err;
+
+ obj = MLX5_ADDR_OF(create_ipsec_obj_in, in, ipsec_object);
+
+ /* salt and seq_iv */
+ salt_p = MLX5_ADDR_OF(ipsec_obj, obj, salt);
+ memcpy(salt_p, &aes_gcm->salt, sizeof(aes_gcm->salt));
+
+ MLX5_SET(ipsec_obj, obj, icv_length, MLX5_IPSEC_OBJECT_ICV_LEN_16B);
+ salt_iv_p = MLX5_ADDR_OF(ipsec_obj, obj, implicit_iv);
+ memcpy(salt_iv_p, &aes_gcm->seq_iv, sizeof(aes_gcm->seq_iv));
+
+ /* esn */
+ if (attrs->replay_esn.trigger) {
+ MLX5_SET(ipsec_obj, obj, esn_en, 1);
+ MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
+ MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
+ }
+
+ /* enc./dec. key */
+ MLX5_SET(ipsec_obj, obj, dekn, sa_entry->enc_key_id);
+
+ /* general object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJ);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_IPSEC);
+
+ mlx5e_ipsec_packet_setup(obj, sa_entry->ipsec->pdn, attrs);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ sa_entry->ipsec_obj_id =
+ MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+ return err;
+}
+
+static void mlx5_destroy_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJ);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_GENERAL_OBJECT_TYPES_IPSEC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
+
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_ipsec_create_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct aes_gcm_keymat *aes_gcm = &sa_entry->attrs.aes_gcm;
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ int err;
+
+ /* key */
+ err = mlx5_encryption_key_create(mdev, sa_entry->ipsec->pdn,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC,
+ aes_gcm->aes_key,
+ aes_gcm->key_len,
+ &sa_entry->enc_key_id);
+ if (err) {
+ mlx5_core_dbg(mdev, "Failed to create encryption key (err = %d)\n", err);
+ return err;
+ }
+
+ err = mlx5_create_ipsec_obj(sa_entry);
+ if (err) {
+ mlx5_core_dbg(mdev, "Failed to create IPsec object (err = %d)\n", err);
+ goto err_enc_key;
+ }
+
+ return 0;
+
+err_enc_key:
+ mlx5_encryption_key_destroy(mdev, sa_entry->enc_key_id);
+ return err;
+}
+
+void mlx5_ipsec_free_sa_ctx(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+
+ mlx5_destroy_ipsec_obj(sa_entry);
+ mlx5_encryption_key_destroy(mdev, sa_entry->enc_key_id);
+}
+
+static void mlx5e_ipsec_aso_copy(struct mlx5_wqe_aso_ctrl_seg *ctrl,
+ struct mlx5_wqe_aso_ctrl_seg *data)
+{
+ if (!data)
+ return;
+
+ ctrl->data_mask_mode = data->data_mask_mode;
+ ctrl->condition_1_0_operand = data->condition_1_0_operand;
+ ctrl->condition_1_0_offset = data->condition_1_0_offset;
+ ctrl->data_offset_condition_operand = data->data_offset_condition_operand;
+ ctrl->condition_0_data = data->condition_0_data;
+ ctrl->condition_0_mask = data->condition_0_mask;
+ ctrl->condition_1_data = data->condition_1_data;
+ ctrl->condition_1_mask = data->condition_1_mask;
+ ctrl->bitwise_data = data->bitwise_data;
+ ctrl->data_mask = data->data_mask;
+}
+
+static int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_wqe_aso_ctrl_seg *data)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5e_ipsec_aso *aso = ipsec->aso;
+ struct mlx5_wqe_aso_ctrl_seg *ctrl;
+ struct mlx5_aso_wqe *wqe;
+ unsigned long expires;
+ u8 ds_cnt;
+ int ret;
+
+ spin_lock_bh(&aso->lock);
+ memset(aso->ctx, 0, sizeof(aso->ctx));
+ wqe = mlx5_aso_get_wqe(aso->aso);
+ ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+ mlx5_aso_build_wqe(aso->aso, ds_cnt, wqe, sa_entry->ipsec_obj_id,
+ MLX5_ACCESS_ASO_OPC_MOD_IPSEC);
+
+ ctrl = &wqe->aso_ctrl;
+ ctrl->va_l = cpu_to_be32(lower_32_bits(aso->dma_addr) | ASO_CTRL_READ_EN);
+ ctrl->va_h = cpu_to_be32(upper_32_bits(aso->dma_addr));
+ ctrl->l_key = cpu_to_be32(ipsec->mkey);
+ mlx5e_ipsec_aso_copy(ctrl, data);
+
+ mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
+ expires = jiffies + msecs_to_jiffies(10);
+ do {
+ ret = mlx5_aso_poll_cq(aso->aso, false);
+ if (ret)
+ /* We are in atomic context */
+ udelay(10);
+ } while (ret && time_is_after_jiffies(expires));
+ spin_unlock_bh(&aso->lock);
+
+ return ret;
+}
+
+#define MLX5E_IPSEC_ESN_SCOPE_MID 0x80000000L
+
+static int mlx5_modify_ipsec_obj(struct mlx5e_ipsec_sa_entry *sa_entry,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ struct mlx5_core_dev *mdev = mlx5e_ipsec_sa2dev(sa_entry);
+ u32 in[MLX5_ST_SZ_DW(modify_ipsec_obj_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(query_ipsec_obj_out)];
+ u64 modify_field_select = 0;
+ u64 general_obj_types;
+ void *obj;
+ int err;
+
+ general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
+ if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC))
+ return -EINVAL;
+
+ /* general object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJ);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_IPSEC);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sa_entry->ipsec_obj_id);
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err) {
+ mlx5_core_err(mdev, "Query IPsec object failed (Object id %d), err = %d\n",
+ sa_entry->ipsec_obj_id, err);
+ return err;
+ }
+
+ obj = MLX5_ADDR_OF(query_ipsec_obj_out, out, ipsec_object);
+ modify_field_select = MLX5_GET64(ipsec_obj, obj, modify_field_select);
+
+ /* esn */
+ if (!(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP) ||
+ !(modify_field_select & MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB))
+ return -EOPNOTSUPP;
+
+ obj = MLX5_ADDR_OF(modify_ipsec_obj_in, in, ipsec_object);
+ MLX5_SET64(ipsec_obj, obj, modify_field_select,
+ MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP |
+ MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB);
+ MLX5_SET(ipsec_obj, obj, esn_msb, attrs->replay_esn.esn_msb);
+ MLX5_SET(ipsec_obj, obj, esn_overlap, attrs->replay_esn.overlap);
+
+ /* general object fields set */
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJ);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5_accel_esp_modify_xfrm(struct mlx5e_ipsec_sa_entry *sa_entry,
+ const struct mlx5_accel_esp_xfrm_attrs *attrs)
+{
+ int err;
+
+ err = mlx5_modify_ipsec_obj(sa_entry, attrs);
+ if (err)
+ return;
+
+ memcpy(&sa_entry->attrs, attrs, sizeof(sa_entry->attrs));
+}
+
+static void mlx5e_ipsec_aso_update(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_wqe_aso_ctrl_seg *data)
+{
+ data->data_mask_mode = MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT << 6;
+ data->condition_1_0_operand = MLX5_ASO_ALWAYS_TRUE | MLX5_ASO_ALWAYS_TRUE << 4;
+
+ mlx5e_ipsec_aso_query(sa_entry, data);
+}
+
+#define MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET 0
+
+static void mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry,
+ u32 mode_param)
+{
+ struct mlx5_accel_esp_xfrm_attrs attrs = {};
+ struct mlx5_wqe_aso_ctrl_seg data = {};
+
+ if (mode_param < MLX5E_IPSEC_ESN_SCOPE_MID) {
+ sa_entry->esn_state.esn_msb++;
+ sa_entry->esn_state.overlap = 0;
+ } else {
+ sa_entry->esn_state.overlap = 1;
+ }
+
+ mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs, sa_entry->attrs.dir);
+
+ mlx5_accel_esp_modify_xfrm(sa_entry, &attrs);
+
+ data.data_offset_condition_operand = MLX5_IPSEC_ASO_REMOVE_FLOW_PKT_CNT_OFFSET;
+ data.bitwise_data = cpu_to_be64(BIT_ULL(54));
+ data.data_mask = data.bitwise_data;
+
+ mlx5e_ipsec_aso_update(sa_entry, &data);
+}
+
+static void mlx5e_ipsec_handle_event(struct work_struct *_work)
+{
+ struct mlx5e_ipsec_work *work =
+ container_of(_work, struct mlx5e_ipsec_work, work);
+ struct mlx5e_ipsec_sa_entry *sa_entry = work->data;
+ struct mlx5_accel_esp_xfrm_attrs *attrs;
+ struct mlx5e_ipsec_aso *aso;
+ int ret;
+
+ aso = sa_entry->ipsec->aso;
+ attrs = &sa_entry->attrs;
+
+ /* TODO: Kostia, this event should be locked/protected
+ * from concurent SA delete.
+ */
+ ret = mlx5e_ipsec_aso_query(sa_entry, NULL);
+ if (ret)
+ goto unlock;
+
+ if (attrs->replay_esn.trigger &&
+ !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
+ u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
+
+ mlx5e_ipsec_update_esn_state(sa_entry, mode_param);
+ }
+
+unlock:
+ kfree(work);
+}
+
+void mlx5_object_change_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
+{
+ struct mlx5e_ipsec_sa_entry *sa_entry;
+ struct mlx5_eqe_obj_change *object;
+ struct mlx5e_ipsec_work *work;
+ u16 type;
+
+ object = &eqe->data.obj_change;
+ type = be16_to_cpu(object->obj_type);
+
+ if (type != MLX5_GENERAL_OBJECT_TYPES_IPSEC)
+ return;
+
+ sa_entry = xa_load(&dev->ipsec_sadb, be32_to_cpu(object->obj_id));
+ if (!sa_entry)
+ return;
+
+ work = kmalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, mlx5e_ipsec_handle_event);
+ work->data = sa_entry;
+
+ queue_work(sa_entry->ipsec->wq, &work->work);
+}
+
+int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5e_ipsec_aso *aso;
+ struct device *pdev;
+ int err;
+
+ aso = kzalloc(sizeof(*ipsec->aso), GFP_KERNEL);
+ if (!aso)
+ return -ENOMEM;
+
+ pdev = &mdev->pdev->dev;
+ aso->dma_addr = dma_map_single(pdev, aso->ctx, sizeof(aso->ctx), DMA_BIDIRECTIONAL);
+ err = dma_mapping_error(pdev, aso->dma_addr);
+ if (err)
+ goto err_dma;
+
+ aso->aso = mlx5_aso_create(mdev, ipsec->pdn);
+ if (IS_ERR(aso->aso)) {
+ err = PTR_ERR(aso->aso);
+ goto err_aso_create;
+ }
+
+ spin_lock_init(&aso->lock);
+ ipsec->aso = aso;
+ return 0;
+
+err_aso_create:
+ dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx), DMA_BIDIRECTIONAL);
+err_dma:
+ kfree(aso);
+ return err;
+}
+
+void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec)
+{
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5e_ipsec_aso *aso;
+ struct device *pdev;
+
+ aso = ipsec->aso;
+ pdev = &mdev->pdev->dev;
+
+ mlx5_aso_destroy(aso->aso);
+ dma_unmap_single(pdev, aso->dma_addr, sizeof(aso->ctx), DMA_BIDIRECTIONAL);
+ kfree(aso);
+}
diff --git a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_rxtx.c b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_rxtx.c
new file mode 100644
index 000000000000..5dccb8bc2b87
--- /dev/null
+++ b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_rxtx.c
@@ -0,0 +1,87 @@
+/*-
+ * Copyright (c) 2023 NVIDIA corporation & affiliates.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "opt_ipsec.h"
+
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netipsec/keydb.h>
+#include <netipsec/ipsec_offload.h>
+#include <netipsec/xform.h>
+#include <dev/mlx5/qp.h>
+#include <dev/mlx5/mlx5_en/en.h>
+#include <dev/mlx5/mlx5_accel/ipsec.h>
+
+#define MLX5_IPSEC_METADATA_HANDLE(ipsec_metadata) (ipsec_metadata & 0xFFFFFF)
+
+int
+mlx5_accel_ipsec_rx_tag_add(if_t ifp, struct mlx5e_rq_mbuf *mr)
+{
+ struct mlx5e_priv *priv;
+ struct ipsec_accel_in_tag *mtag;
+
+ priv = if_getsoftc(ifp);
+ if (priv->ipsec == NULL)
+ return (0);
+ if (mr->ipsec_mtag != NULL)
+ return (0);
+
+ mtag = (struct ipsec_accel_in_tag *)m_tag_get(
+ PACKET_TAG_IPSEC_ACCEL_IN, sizeof(struct ipsec_accel_in_tag) -
+ __offsetof(struct ipsec_accel_in_tag, xh), M_NOWAIT);
+ if (mtag == NULL)
+ return (-ENOMEM);
+ mr->ipsec_mtag = mtag;
+ return (0);
+}
+
+void
+mlx5e_accel_ipsec_handle_rx_cqe(if_t ifp, struct mbuf *mb,
+ struct mlx5_cqe64 *cqe, struct mlx5e_rq_mbuf *mr)
+{
+ struct ipsec_accel_in_tag *mtag;
+ u32 drv_spi;
+
+ drv_spi = MLX5_IPSEC_METADATA_HANDLE(be32_to_cpu(cqe->ft_metadata));
+ mtag = mr->ipsec_mtag;
+ WARN_ON(mtag == NULL);
+ if (mtag != NULL) {
+ mtag->drv_spi = drv_spi;
+ if (ipsec_accel_fill_xh(ifp, drv_spi, &mtag->xh)) {
+ m_tag_prepend(mb, &mtag->tag);
+ mr->ipsec_mtag = NULL;
+ }
+ }
+}
+
+void
+mlx5e_accel_ipsec_handle_tx_wqe(struct mbuf *mb, struct mlx5e_tx_wqe *wqe,
+ struct ipsec_accel_out_tag *tag)
+{
+ wqe->eth.flow_table_metadata = cpu_to_be32(
+ mlx5e_accel_ipsec_get_metadata(tag->drv_spi));
+}
diff --git a/sys/dev/mlx5/mlx5_core/eswitch.h b/sys/dev/mlx5/mlx5_core/eswitch.h
index ca03da287543..50d06951bf07 100644
--- a/sys/dev/mlx5/mlx5_core/eswitch.h
+++ b/sys/dev/mlx5/mlx5_core/eswitch.h
@@ -29,6 +29,8 @@
#include <linux/if_ether.h>
#include <dev/mlx5/device.h>
+#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_flow_table)
+
#define MLX5_MAX_UC_PER_VPORT(dev) \
(1 << MLX5_CAP_GEN(dev, log_max_current_uc_list))
@@ -83,15 +85,15 @@ struct l2addr_node {
struct vport_ingress {
struct mlx5_flow_table *acl;
struct mlx5_flow_group *drop_grp;
- struct mlx5_flow_rule *drop_rule;
+ struct mlx5_flow_handle *drop_rule;
};
struct vport_egress {
struct mlx5_flow_table *acl;
struct mlx5_flow_group *allowed_vlans_grp;
struct mlx5_flow_group *drop_grp;
- struct mlx5_flow_rule *allowed_vlan;
- struct mlx5_flow_rule *drop_rule;
+ struct mlx5_flow_handle *allowed_vlan;
+ struct mlx5_flow_handle *drop_rule;
};
struct mlx5_vport {
diff --git a/sys/dev/mlx5/mlx5_core/fs_chains.h b/sys/dev/mlx5/mlx5_core/fs_chains.h
new file mode 100644
index 000000000000..e703a98981b6
--- /dev/null
+++ b/sys/dev/mlx5/mlx5_core/fs_chains.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 Mellanox Technologies. */
+
+#ifndef __ML5_ESW_CHAINS_H__
+#define __ML5_ESW_CHAINS_H__
+
+#include <dev/mlx5/fs.h>
+
+struct mlx5_fs_chains;
+
+enum mlx5_chains_flags {
+ MLX5_CHAINS_AND_PRIOS_SUPPORTED = BIT(0),
+ MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED = BIT(1),
+ MLX5_CHAINS_FT_TUNNEL_SUPPORTED = BIT(2),
+};
+
+struct mlx5_chains_attr {
+ enum mlx5_flow_namespace_type ns;
+ int fs_base_prio;
+ int fs_base_level;
+ u32 flags;
+ u32 max_grp_num;
+ struct mlx5_flow_table *default_ft;
+};
+
+bool
+mlx5_chains_prios_supported(struct mlx5_fs_chains *chains);
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains);
+bool
+mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains);
+u32
+mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains);
+u32
+mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains);
+u32
+mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains);
+
+struct mlx5_flow_table *
+mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level);
+void
+mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level);
+
+struct mlx5_flow_table *
+mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains);
+
+struct mlx5_flow_table *
+mlx5_chains_create_global_table(struct mlx5_fs_chains *chains);
+void
+mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft);
+
+int
+mlx5_chains_get_chain_mapping(struct mlx5_fs_chains *chains, u32 chain,
+ u32 *chain_mapping);
+int
+mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains,
+ u32 chain_mapping);
+
+struct mlx5_fs_chains *
+mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr);
+void mlx5_chains_destroy(struct mlx5_fs_chains *chains);
+
+void
+mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft);
+void
+mlx5_chains_print_info(struct mlx5_fs_chains *chains);
+
+#endif /* __ML5_ESW_CHAINS_H__ */
diff --git a/sys/dev/mlx5/mlx5_core/fs_cmd.h b/sys/dev/mlx5/mlx5_core/fs_cmd.h
new file mode 100644
index 000000000000..a2b2d537ac45
--- /dev/null
+++ b/sys/dev/mlx5/mlx5_core/fs_cmd.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _MLX5_FS_CMD_
+#define _MLX5_FS_CMD_
+
+#include "fs_core.h"
+
+struct mlx5_flow_cmds {
+ int (*create_flow_table)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table_attr *ft_attr,
+ struct mlx5_flow_table *next_ft);
+ int (*destroy_flow_table)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft);
+
+ int (*modify_flow_table)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft);
+
+ int (*create_flow_group)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 *in,
+ struct mlx5_flow_group *fg);
+
+ int (*destroy_flow_group)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg);
+
+ int (*create_fte)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg,
+ struct fs_fte *fte);
+
+ int (*update_fte)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg,
+ int modify_mask,
+ struct fs_fte *fte);
+
+ int (*delete_fte)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte);
+
+ int (*update_root_ft)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 underlay_qpn,
+ bool disconnect);
+
+ int (*packet_reformat_alloc)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat_params *params,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat);
+
+ void (*packet_reformat_dealloc)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat);
+
+ int (*modify_header_alloc)(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr);
+
+ void (*modify_header_dealloc)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr);
+
+ int (*set_peer)(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns);
+
+ int (*create_ns)(struct mlx5_flow_root_namespace *ns);
+ int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
+
+ u32 (*get_capabilities)(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type);
+};
+
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
+int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
+ enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
+ u32 *id);
+int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
+int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
+ u64 *packets, u64 *bytes);
+
+int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len);
+int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
+ u32 *out);
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
+
+#endif
diff --git a/sys/dev/mlx5/mlx5_core/fs_core.h b/sys/dev/mlx5/mlx5_core/fs_core.h
index 05757f493469..1d7339e6b7d5 100644
--- a/sys/dev/mlx5/mlx5_core/fs_core.h
+++ b/sys/dev/mlx5/mlx5_core/fs_core.h
@@ -1,14 +1,11 @@
-/*-
- * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
@@ -26,306 +23,327 @@
#ifndef _MLX5_FS_CORE_
#define _MLX5_FS_CORE_
-#include <asm/atomic.h>
-#include <linux/completion.h>
-#include <linux/mutex.h>
#include <dev/mlx5/fs.h>
-enum fs_type {
+#define FDB_TC_MAX_CHAIN 3
+#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
+#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1)
+
+/* The index of the last real chain (FT) + 1 as chain zero is valid as well */
+#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
+
+#define FDB_TC_MAX_PRIO 16
+#define FDB_TC_LEVELS_PER_PRIO 2
+
+struct mlx5_flow_definer {
+ enum mlx5_flow_namespace_type ns_type;
+ u32 id;
+};
+
+struct mlx5_modify_hdr {
+ enum mlx5_flow_namespace_type ns_type;
+ union {
+ u32 id;
+ };
+};
+
+struct mlx5_pkt_reformat {
+ enum mlx5_flow_namespace_type ns_type;
+ int reformat_type; /* from mlx5_ifc */
+ union {
+ u32 id;
+ };
+};
+
+/* FS_TYPE_PRIO_CHAINS is a PRIO that will have namespaces only,
+ * and those are in parallel to one another when going over them to connect
+ * a new flow table. Meaning the last flow table in a TYPE_PRIO prio in one
+ * parallel namespace will not automatically connect to the first flow table
+ * found in any prio in any next namespace, but skip the entire containing
+ * TYPE_PRIO_CHAINS prio.
+ *
+ * This is used to implement tc chains, each chain of prios is a different
+ * namespace inside a containing TYPE_PRIO_CHAINS prio.
+ */
+
+enum fs_node_type {
FS_TYPE_NAMESPACE,
FS_TYPE_PRIO,
+ FS_TYPE_PRIO_CHAINS,
FS_TYPE_FLOW_TABLE,
FS_TYPE_FLOW_GROUP,
FS_TYPE_FLOW_ENTRY,
FS_TYPE_FLOW_DEST
};
-enum fs_ft_type {
+/**********************************************************************************************************/
+
+
+#define fs_ft_type fs_flow_table_type
+/************************************************************************************************************/
+enum fs_flow_table_type {
FS_FT_NIC_RX = 0x0,
+ FS_FT_NIC_TX = 0x1,
FS_FT_ESW_EGRESS_ACL = 0x2,
FS_FT_ESW_INGRESS_ACL = 0x3,
FS_FT_FDB = 0X4,
- FS_FT_SNIFFER_RX = 0x5,
- FS_FT_SNIFFER_TX = 0x6
+ FS_FT_SNIFFER_RX = 0X5,
+ FS_FT_SNIFFER_TX = 0X6,
+ FS_FT_RDMA_RX = 0X7,
+ FS_FT_RDMA_TX = 0X8,
+ FS_FT_PORT_SEL = 0X9,
+ FS_FT_MAX_TYPE = FS_FT_PORT_SEL,
+};
+
+enum fs_flow_table_op_mod {
+ FS_FT_OP_MOD_NORMAL,
+ FS_FT_OP_MOD_LAG_DEMUX,
};
enum fs_fte_status {
FS_FTE_STATUS_EXISTING = 1UL << 0,
};
-/* Should always be the first variable in the struct */
-struct fs_base {
- struct list_head list;
- struct fs_base *parent;
- enum fs_type type;
- struct kref refcount;
+enum mlx5_flow_steering_mode {
+ MLX5_FLOW_STEERING_MODE_DMFS,
+ MLX5_FLOW_STEERING_MODE_SMFS
+};
+
+enum mlx5_flow_steering_capabilty {
+ MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX = 1UL << 0,
+ MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX = 1UL << 1,
+ MLX5_FLOW_STEERING_CAP_MATCH_RANGES = 1UL << 2,
+};
+
+struct mlx5_flow_steering {
+ struct mlx5_core_dev *dev;
+ enum mlx5_flow_steering_mode mode;
+ struct kmem_cache *fgs_cache;
+ struct kmem_cache *ftes_cache;
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_flow_root_namespace *fdb_root_ns;
+ struct mlx5_flow_namespace **fdb_sub_ns;
+ struct mlx5_flow_root_namespace **esw_egress_root_ns;
+ struct mlx5_flow_root_namespace **esw_ingress_root_ns;
+ struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
+ struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
+ struct mlx5_flow_root_namespace *rdma_rx_root_ns;
+ struct mlx5_flow_root_namespace *rdma_tx_root_ns;
+ struct mlx5_flow_root_namespace *egress_root_ns;
+ struct mlx5_flow_root_namespace *port_sel_root_ns;
+ int esw_egress_acl_vports;
+ int esw_ingress_acl_vports;
+};
+
+struct fs_node {
+ struct list_head list;
+ struct list_head children;
+ enum fs_node_type type;
+ struct fs_node *parent;
+ struct fs_node *root;
/* lock the node for writing and traversing */
- struct mutex lock;
- struct completion complete;
- atomic_t users_refcount;
- const char *name;
+ struct rw_semaphore lock;
+ refcount_t refcount;
+ bool active;
+ void (*del_hw_func)(struct fs_node *);
+ void (*del_sw_func)(struct fs_node *);
+ atomic_t version;
};
struct mlx5_flow_rule {
- struct fs_base base;
+ struct fs_node node;
+ struct mlx5_flow_table *ft;
struct mlx5_flow_destination dest_attr;
- struct list_head clients_data;
- /*protect clients lits*/
- struct mutex clients_lock;
-};
-
-struct fs_fte {
- struct fs_base base;
- u32 val[MLX5_ST_SZ_DW(fte_match_param)];
- uint32_t dests_size;
- struct list_head dests;
- uint32_t index; /* index in ft */
- struct mlx5_flow_act flow_act;
- u32 sw_action; /* enum mlx5_rule_fwd_action */
- enum fs_fte_status status;
+ /* next_ft should be accessed under chain_lock and only of
+ * destination type is FWD_NEXT_fT.
+ */
+ struct list_head next_ft;
+ u32 sw_action;
};
-struct fs_star_rule {
- struct mlx5_flow_group *fg;
- struct fs_fte *fte;
+struct mlx5_flow_handle {
+ int num_rules;
+ struct mlx5_flow_rule *rule[];
};
+/* Type of children is mlx5_flow_group */
struct mlx5_flow_table {
- struct fs_base base;
- /* sorted list by start_index */
- struct list_head fgs;
+ struct fs_node node;
+ u32 id;
+ u16 vport;
+ unsigned int max_fte;
+ unsigned int level;
+ enum fs_flow_table_type type;
+ enum fs_flow_table_op_mod op_mod;
struct {
bool active;
- unsigned int max_types;
+ unsigned int required_groups;
unsigned int group_size;
- unsigned int num_types;
+ unsigned int num_groups;
unsigned int max_fte;
} autogroup;
- unsigned int max_fte;
- unsigned int level;
- uint32_t id;
- u16 vport;
- enum fs_ft_type type;
- struct fs_star_rule star_rule;
- unsigned int shared_refcount;
+ /* Protect fwd_rules */
+ struct mutex lock;
+ /* FWD rules that point on this flow table */
+ struct list_head fwd_rules;
+ u32 flags;
+ struct xarray fgs_xa;
+ enum mlx5_flow_table_miss_action def_miss_action;
+ struct mlx5_flow_namespace *ns;
};
-enum fs_prio_flags {
- MLX5_CORE_FS_PRIO_SHARED = 1
+struct mlx5_ft_underlay_qp {
+ struct list_head list;
+ u32 qpn;
+};
+
+#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_e00
+/* Calculate the fte_match_param length and without the reserved length.
+ * Make sure the reserved field is the last.
+ */
+#define MLX5_ST_SZ_DW_MATCH_PARAM \
+ ((MLX5_BYTE_OFF(fte_match_param, MLX5_FTE_MATCH_PARAM_RESERVED) / sizeof(u32)) + \
+ BUILD_BUG_ON_ZERO(MLX5_ST_SZ_BYTES(fte_match_param) != \
+ MLX5_FLD_SZ_BYTES(fte_match_param, \
+ MLX5_FTE_MATCH_PARAM_RESERVED) +\
+ MLX5_BYTE_OFF(fte_match_param, \
+ MLX5_FTE_MATCH_PARAM_RESERVED)))
+
+/* Type of children is mlx5_flow_rule */
+struct fs_fte {
+ struct fs_node node;
+ u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
+ u32 dests_size;
+ u32 fwd_dests;
+ u32 index;
+ struct mlx5_flow_context flow_context;
+ struct mlx5_flow_act action;
+ enum fs_fte_status status;
+ struct mlx5_fc *counter;
+ int modify_mask;
};
+/* Type of children is mlx5_flow_table/namespace */
struct fs_prio {
- struct fs_base base;
- struct list_head objs; /* each object is a namespace or ft */
- unsigned int max_ft;
- unsigned int num_ft;
- unsigned int max_ns;
+ struct fs_node node;
+ unsigned int num_levels;
+ unsigned int start_level;
unsigned int prio;
- /*When create shared flow table, this lock should be taken*/
- struct mutex shared_lock;
- u8 flags;
+ unsigned int num_ft;
};
+/* Type of children is fs_prio */
struct mlx5_flow_namespace {
/* parent == NULL => root ns */
- struct fs_base base;
- /* sorted by priority number */
- struct list_head prios; /* list of fs_prios */
- struct list_head list_notifiers;
- struct rw_semaphore notifiers_rw_sem;
- struct rw_semaphore dests_rw_sem;
+ struct fs_node node;
+ enum mlx5_flow_table_miss_action def_miss_action;
+};
+
+struct mlx5_flow_group_mask {
+ u8 match_criteria_enable;
+ u32 match_criteria[MLX5_ST_SZ_DW_MATCH_PARAM];
+};
+
+/* Type of children is fs_fte */
+struct mlx5_flow_group {
+ struct fs_node node;
+ struct mlx5_flow_group_mask mask;
+ u32 start_index;
+ u32 max_ftes;
+ struct ida fte_allocator;
+ u32 id;
+ struct xarray ftes_xa;
};
struct mlx5_flow_root_namespace {
struct mlx5_flow_namespace ns;
- struct mlx5_flow_table *ft_level_0;
- enum fs_ft_type table_type;
+ enum mlx5_flow_steering_mode mode;
+ enum fs_flow_table_type table_type;
struct mlx5_core_dev *dev;
struct mlx5_flow_table *root_ft;
- /* When chaining flow-tables, this lock should be taken */
- struct mutex fs_chain_lock;
+ /* Should be held when chaining flow tables */
+ struct mutex chain_lock;
+ struct list_head underlay_qpns;
+ const struct mlx5_flow_cmds *cmds;
};
-struct mlx5_flow_group {
- struct fs_base base;
- struct list_head ftes;
- struct mlx5_core_fs_mask mask;
- uint32_t start_index;
- uint32_t max_ftes;
- uint32_t num_ftes;
- uint32_t id;
-};
+int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
+void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
+void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
+ struct delayed_work *dwork,
+ unsigned long delay);
+void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
+ unsigned long interval);
-struct mlx5_flow_handler {
- struct list_head list;
- rule_event_fn add_dst_cb;
- rule_event_fn del_dst_cb;
- void *client_context;
- struct mlx5_flow_namespace *ns;
-};
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
-struct fs_client_priv_data {
- struct mlx5_flow_handler *fs_handler;
- struct list_head list;
- void *client_dst_data;
-};
+int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns);
-struct mlx5_modify_hdr {
- enum mlx5_flow_namespace_type ns_type;
- u32 id;
-};
+int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
+ enum mlx5_flow_steering_mode mode);
-struct mlx5_pkt_reformat {
- enum mlx5_flow_namespace_type ns_type;
- int reformat_type; /* from mlx5_ifc */
- u32 id;
-};
+int mlx5_fs_core_alloc(struct mlx5_core_dev *dev);
+void mlx5_fs_core_free(struct mlx5_core_dev *dev);
+int mlx5_fs_core_init(struct mlx5_core_dev *dev);
+void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev);
-void _fs_remove_node(struct kref *kref);
-#define fs_get_obj(v, _base) {v = container_of((_base), typeof(*v), base); }
-#define fs_get_parent(v, child) {v = (child)->base.parent ? \
- container_of((child)->base.parent, \
- typeof(*v), base) : NULL; }
+int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports);
+void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev);
+int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports);
+void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev);
-#define fs_list_for_each_entry(pos, cond, root) \
- list_for_each_entry(pos, root, base.list) \
- if (!(cond)) {} else
+u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type);
-#define fs_list_for_each_entry_continue(pos, cond, root) \
- list_for_each_entry_continue(pos, root, base.list) \
- if (!(cond)) {} else
+struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
-#define fs_list_for_each_entry_reverse(pos, cond, root) \
- list_for_each_entry_reverse(pos, root, base.list) \
- if (!(cond)) {} else
+#define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); }
-#define fs_list_for_each_entry_continue_reverse(pos, cond, root) \
- list_for_each_entry_continue_reverse(pos, root, base.list) \
- if (!(cond)) {} else
+#define fs_list_for_each_entry(pos, root) \
+ list_for_each_entry(pos, root, node.list)
-#define fs_for_each_ft(pos, prio) \
- fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_TABLE, \
- &(prio)->objs)
+#define fs_list_for_each_entry_safe(pos, tmp, root) \
+ list_for_each_entry_safe(pos, tmp, root, node.list)
-#define fs_for_each_ft_reverse(pos, prio) \
- fs_list_for_each_entry_reverse(pos, \
- (pos)->base.type == FS_TYPE_FLOW_TABLE, \
- &(prio)->objs)
+#define fs_for_each_ns_or_ft_reverse(pos, prio) \
+ list_for_each_entry_reverse(pos, &(prio)->node.children, list)
-#define fs_for_each_ns(pos, prio) \
- fs_list_for_each_entry(pos, \
- (pos)->base.type == FS_TYPE_NAMESPACE, \
- &(prio)->objs)
-
-#define fs_for_each_ns_or_ft_reverse(pos, prio) \
- list_for_each_entry_reverse(pos, &(prio)->objs, list) \
- if (!((pos)->type == FS_TYPE_NAMESPACE || \
- (pos)->type == FS_TYPE_FLOW_TABLE)) {} else
-
-#define fs_for_each_ns_or_ft(pos, prio) \
- list_for_each_entry(pos, &(prio)->objs, list) \
- if (!((pos)->type == FS_TYPE_NAMESPACE || \
- (pos)->type == FS_TYPE_FLOW_TABLE)) {} else
-
-#define fs_for_each_ns_or_ft_continue_reverse(pos, prio) \
- list_for_each_entry_continue_reverse(pos, &(prio)->objs, list) \
- if (!((pos)->type == FS_TYPE_NAMESPACE || \
- (pos)->type == FS_TYPE_FLOW_TABLE)) {} else
-
-#define fs_for_each_ns_or_ft_continue(pos, prio) \
- list_for_each_entry_continue(pos, &(prio)->objs, list) \
- if (!((pos)->type == FS_TYPE_NAMESPACE || \
- (pos)->type == FS_TYPE_FLOW_TABLE)) {} else
+#define fs_for_each_ns_or_ft(pos, prio) \
+ list_for_each_entry(pos, (&(prio)->node.children), list)
#define fs_for_each_prio(pos, ns) \
- fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_PRIO, \
- &(ns)->prios)
+ fs_list_for_each_entry(pos, &(ns)->node.children)
-#define fs_for_each_prio_reverse(pos, ns) \
- fs_list_for_each_entry_reverse(pos, (pos)->base.type == FS_TYPE_PRIO, \
- &(ns)->prios)
+#define fs_for_each_ns(pos, prio) \
+ fs_list_for_each_entry(pos, &(prio)->node.children)
-#define fs_for_each_prio_continue(pos, ns) \
- fs_list_for_each_entry_continue(pos, (pos)->base.type == FS_TYPE_PRIO, \
- &(ns)->prios)
+#define fs_for_each_ft(pos, prio) \
+ fs_list_for_each_entry(pos, &(prio)->node.children)
-#define fs_for_each_prio_continue_reverse(pos, ns) \
- fs_list_for_each_entry_continue_reverse(pos, \
- (pos)->base.type == FS_TYPE_PRIO, \
- &(ns)->prios)
+#define fs_for_each_ft_safe(pos, tmp, prio) \
+ fs_list_for_each_entry_safe(pos, tmp, &(prio)->node.children)
#define fs_for_each_fg(pos, ft) \
- fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_GROUP, \
- &(ft)->fgs)
+ fs_list_for_each_entry(pos, &(ft)->node.children)
#define fs_for_each_fte(pos, fg) \
- fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_ENTRY, \
- &(fg)->ftes)
+ fs_list_for_each_entry(pos, &(fg)->node.children)
+
#define fs_for_each_dst(pos, fte) \
- fs_list_for_each_entry(pos, (pos)->base.type == FS_TYPE_FLOW_DEST, \
- &(fte)->dests)
-
-int mlx5_cmd_fs_create_ft(struct mlx5_core_dev *dev,
- u16 vport, enum fs_ft_type type, unsigned int level,
- unsigned int log_size, const char *name, unsigned int *table_id);
-
-int mlx5_cmd_fs_destroy_ft(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_ft_type type, unsigned int table_id);
-
-int mlx5_cmd_fs_create_fg(struct mlx5_core_dev *dev,
- u32 *in,
- u16 vport,
- enum fs_ft_type type, unsigned int table_id,
- unsigned int *group_id);
-
-int mlx5_cmd_fs_destroy_fg(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_ft_type type, unsigned int table_id,
- unsigned int group_id);
-
-
-int mlx5_cmd_fs_set_fte(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_fte_status *fte_status,
- u32 *match_val,
- enum fs_ft_type type, unsigned int table_id,
- unsigned int index, unsigned int group_id,
- struct mlx5_flow_act *flow_act,
- u32 sw_action, int dest_size,
- struct list_head *dests); /* mlx5_flow_desination */
-
-int mlx5_cmd_fs_delete_fte(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_fte_status *fte_status,
- enum fs_ft_type type, unsigned int table_id,
- unsigned int index);
-
-int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
- enum fs_ft_type type,
- unsigned int id);
-
-int mlx5_init_fs(struct mlx5_core_dev *dev);
-void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
-void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
- unsigned long interval);
+ fs_list_for_each_entry(pos, &(fte)->node.children)
+
+#define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) ( \
+ (type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) : \
+ (type == FS_FT_NIC_TX) ? MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) : \
+ (type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) : \
+ (type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) : \
+ (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
+ (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \
+ (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
+ (type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
+ (type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \
+ (type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\
+ )
-int mlx5_cmd_modify_header_alloc(struct mlx5_core_dev *dev,
- enum mlx5_flow_namespace_type namespace,
- u8 num_actions,
- void *modify_actions,
- struct mlx5_modify_hdr *modify_hdr);
-void mlx5_cmd_modify_header_dealloc(struct mlx5_core_dev *dev,
- struct mlx5_modify_hdr *modify_hdr);
-int mlx5_cmd_packet_reformat_alloc(struct mlx5_core_dev *dev,
- struct mlx5_pkt_reformat_params *params,
- enum mlx5_flow_namespace_type namespace,
- struct mlx5_pkt_reformat *pkt_reformat);
-void mlx5_cmd_packet_reformat_dealloc(struct mlx5_core_dev *dev,
- struct mlx5_pkt_reformat *pkt_reformat);
-int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
-void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
-void mlx5_fc_queue_stats_work(struct mlx5_core_dev *dev,
- struct delayed_work *dwork,
- unsigned long delay);
#endif
diff --git a/sys/dev/mlx5/mlx5_core/fs_ft_pool.h b/sys/dev/mlx5/mlx5_core/fs_ft_pool.h
new file mode 100644
index 000000000000..a5e4df624e27
--- /dev/null
+++ b/sys/dev/mlx5/mlx5_core/fs_ft_pool.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#ifndef __MLX5_FS_FT_POOL_H__
+#define __MLX5_FS_FT_POOL_H__
+
+#include <linux/module.h>
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/mlx5_core/fs_core.h>
+#include <linux/compiler.h>
+
+#define POOL_NEXT_SIZE BIT(30)
+
+int mlx5_ft_pool_init(struct mlx5_core_dev *dev);
+void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev);
+
+int
+mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type table_type,
+ int desired_size);
+void
+mlx5_ft_pool_put_sz(struct mlx5_core_dev *dev, int sz);
+
+#endif /* __MLX5_FS_FT_POOL_H__ */
diff --git a/sys/dev/mlx5/mlx5_core/fs_tcp.h b/sys/dev/mlx5/mlx5_core/fs_tcp.h
index fa11ad9c4cb5..e2433af53a42 100644
--- a/sys/dev/mlx5/mlx5_core/fs_tcp.h
+++ b/sys/dev/mlx5/mlx5_core/fs_tcp.h
@@ -27,15 +27,15 @@
#define __MLX5E_ACCEL_FS_TCP_H__
struct inpcb;
-struct mlx5_flow_rule;
+struct mlx5_flow_handle;
struct mlx5e_priv;
int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *);
void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *);
-struct mlx5_flow_rule *
+struct mlx5_flow_handle *
mlx5e_accel_fs_add_inpcb(struct mlx5e_priv *,
struct inpcb *, uint32_t tirn, uint32_t flow_tag, uint16_t vlan_id);
#define MLX5E_ACCEL_FS_ADD_INPCB_NO_VLAN 0xFFFF
-void mlx5e_accel_fs_del_inpcb(struct mlx5_flow_rule *);
+void mlx5e_accel_fs_del_inpcb(struct mlx5_flow_handle *);
#endif /* __MLX5E_ACCEL_FS_TCP_H__ */
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_cmd.c b/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
index d46feb4b9e5b..86c721a83cb7 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_cmd.c
@@ -247,7 +247,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
{
struct mlx5_core_dev *dev = container_of(ent->cmd,
struct mlx5_core_dev, cmd);
- int poll_end = jiffies +
+ long poll_end = jiffies +
msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
u8 own;
@@ -417,6 +417,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
case MLX5_CMD_OP_ALLOC_Q_COUNTER:
case MLX5_CMD_OP_QUERY_Q_COUNTER:
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
case MLX5_CMD_OP_ALLOC_PD:
case MLX5_CMD_OP_ALLOC_UAR:
case MLX5_CMD_OP_CONFIG_INT_MODERATION:
@@ -614,6 +615,9 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJ);
MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJ);
MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJ);
+ MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
+ MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
+ MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
default: return "unknown command opcode";
}
}
@@ -947,7 +951,7 @@ static const char *deliv_status_to_str(u8 status)
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
{
- int timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
+ unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
int err;
if (ent->polling) {
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_core.h b/sys/dev/mlx5/mlx5_core/mlx5_core.h
index f0b1dde60323..f63bb2070bcf 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_core.h
+++ b/sys/dev/mlx5/mlx5_core/mlx5_core.h
@@ -163,4 +163,14 @@ enum {
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev);
void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state);
+/************************************************ TESTTEST********************************************/
+static inline int mlx5_init_fs(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
+
+static inline int mlx5_cleanup_fs(struct mlx5_core_dev *dev)
+{
+ return 0;
+}
#endif /* __MLX5_CORE_H__ */
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_crypto.c b/sys/dev/mlx5/mlx5_core/mlx5_crypto.c
new file mode 100644
index 000000000000..03804219e0b3
--- /dev/null
+++ b/sys/dev/mlx5/mlx5_core/mlx5_crypto.c
@@ -0,0 +1,94 @@
+/*-
+ * Copyright (c) 2019-2021, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "opt_rss.h"
+#include "opt_ratelimit.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/crypto.h>
+
+int mlx5_encryption_key_create(struct mlx5_core_dev *mdev, u32 pdn, u32 key_type,
+ const void *p_key, u32 key_len, u32 *p_obj_id)
+{
+ u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(create_encryption_key_out)] = {};
+ u64 general_obj_types;
+ int err;
+
+ general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
+ if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJ_TYPES_ENCRYPTION_KEY))
+ return -EINVAL;
+
+ switch (key_len) {
+ case 128 / 8:
+ memcpy(MLX5_ADDR_OF(create_encryption_key_in, in,
+ encryption_key_object.key[4]), p_key, 128 / 8);
+ MLX5_SET(create_encryption_key_in, in, encryption_key_object.pd, pdn);
+ MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_size,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128);
+ MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_type,
+ key_type);
+ break;
+ case 256 / 8:
+ memcpy(MLX5_ADDR_OF(create_encryption_key_in, in,
+ encryption_key_object.key[0]), p_key, 256 / 8);
+ MLX5_SET(create_encryption_key_in, in, encryption_key_object.pd, pdn);
+ MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_size,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256);
+ MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_type,
+ key_type);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ MLX5_SET(create_encryption_key_in, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJ);
+ MLX5_SET(create_encryption_key_in, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
+
+ err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+ if (err == 0)
+ *p_obj_id = MLX5_GET(create_encryption_key_out, out, obj_id);
+
+ /* avoid leaking key on the stack */
+ explicit_bzero(in, sizeof(in));
+
+ return err;
+}
+
+int mlx5_encryption_key_destroy(struct mlx5_core_dev *mdev, u32 oid)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_encryption_key_in)] = {};
+ u32 out[MLX5_ST_SZ_DW(destroy_encryption_key_out)] = {};
+
+ MLX5_SET(destroy_encryption_key_in, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJ);
+ MLX5_SET(destroy_encryption_key_in, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
+ MLX5_SET(destroy_encryption_key_in, in, obj_id, oid);
+
+ return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_diagnostics.c b/sys/dev/mlx5/mlx5_core/mlx5_diagnostics.c
index 182be547272a..9730ab3c56c1 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_diagnostics.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_diagnostics.c
@@ -26,6 +26,8 @@
#include "opt_rss.h"
#include "opt_ratelimit.h"
+#define _WANT_SFF_8024_ID
+
#include <dev/mlx5/driver.h>
#include <dev/mlx5/port.h>
#include <dev/mlx5/diagnostics.h>
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_eq.c b/sys/dev/mlx5/mlx5_core/mlx5_eq.c
index 2d5b53b6482a..1090f8638171 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_eq.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_eq.c
@@ -33,6 +33,7 @@
#include <dev/mlx5/mlx5_fpga/core.h>
#include <dev/mlx5/mlx5_core/mlx5_core.h>
#include <dev/mlx5/mlx5_core/eswitch.h>
+#include <dev/mlx5/mlx5_accel/ipsec.h>
#ifdef RSS
#include <net/rss_config.h>
@@ -165,6 +166,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_CODING_DCBX_CHANGE_EVENT";
case MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT:
return "MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT";
+ case MLX5_EVENT_TYPE_OBJECT_CHANGE:
+ return "MLX5_EVENT_TYPE_OBJECT_CHANGE";
default:
return "Unrecognized event";
}
@@ -370,6 +373,10 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
mlx5_temp_warning_event(dev, eqe);
break;
+ case MLX5_EVENT_TYPE_OBJECT_CHANGE:
+ mlx5_object_change_event(dev, eqe);
+ break;
+
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
@@ -571,6 +578,10 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
MLX5_EVENT_TYPE_CODING_GENERAL_NOTIFICATION_EVENT);
}
+ if (mlx5_ipsec_device_caps(dev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD)
+ async_event_mask |=
+ (1ull << MLX5_EVENT_TYPE_OBJECT_CHANGE);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD);
if (err) {
@@ -679,9 +690,9 @@ static const char *mlx5_port_module_event_error_type_to_string(u8 error_type)
unsigned int mlx5_query_module_status(struct mlx5_core_dev *dev, int module_num)
{
- if (module_num < 0 || module_num >= MLX5_MAX_PORTS)
- return 0; /* undefined */
- return dev->module_status[module_num];
+ if (module_num != dev->module_num)
+ return 0; /* module num doesn't equal to what FW reported */
+ return dev->module_status;
}
static void mlx5_port_module_event(struct mlx5_core_dev *dev,
@@ -729,8 +740,8 @@ static void mlx5_port_module_event(struct mlx5_core_dev *dev,
"Module %u, unknown status %d\n", module_num, module_status);
}
/* store module status */
- if (module_num < MLX5_MAX_PORTS)
- dev->module_status[module_num] = module_status;
+ dev->module_status = module_status;
+ dev->module_num = module_num;
}
static void mlx5_port_general_notification_event(struct mlx5_core_dev *dev,
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_eswitch.c b/sys/dev/mlx5/mlx5_core/mlx5_eswitch.c
index 15f5f0ff0336..30f04144502b 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_eswitch.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_eswitch.c
@@ -64,7 +64,7 @@ struct esw_uc_addr {
/* E-Switch MC FDB table hash node */
struct esw_mc_addr { /* SRIOV only */
struct l2addr_node node;
- struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */
+ struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */
u32 refcnt;
};
@@ -73,7 +73,7 @@ struct vport_addr {
struct l2addr_node node;
u8 action;
u32 vport;
- struct mlx5_flow_rule *flow_rule; /* SRIOV only */
+ struct mlx5_flow_handle *flow_rule; /* SRIOV only */
};
enum {
@@ -215,59 +215,54 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
}
/* E-Switch FDB */
-static struct mlx5_flow_rule *
+static struct mlx5_flow_handle *
esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
{
- int match_header = MLX5_MATCH_OUTER_HEADERS;
- struct mlx5_flow_destination dest;
- struct mlx5_flow_rule *flow_rule = NULL;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_act flow_act = {};
- u32 *match_v;
- u32 *match_c;
+ struct mlx5_flow_spec *spec;
u8 *dmac_v;
u8 *dmac_c;
- match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- if (!match_v || !match_c) {
- printf("mlx5_core: WARN: ""FDB: Failed to alloc match parameters\n");
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ printf("mlx5_core: WARN: ""FDB: Failed to alloc flow spec\n");
goto out;
}
- dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dmac_47_16);
- dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dmac_47_16);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
ether_addr_copy(dmac_v, mac);
/* Match criteria mask */
memset(dmac_c, 0xff, 6);
- dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT;
- dest.vport_num = vport;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport.num = vport;
esw_debug(esw->dev,
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
dmac_v, dmac_c, vport);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule =
- mlx5_add_flow_rule(esw->fdb_table.fdb,
- match_header,
- match_c,
- match_v,
- MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act, &dest);
+ mlx5_add_flow_rules(esw->fdb_table.fdb, spec,
+ &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(flow_rule)) {
printf("mlx5_core: WARN: ""FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
flow_rule = NULL;
}
out:
- kfree(match_v);
- kfree(match_c);
+ kfree(spec);
return flow_rule;
}
static int esw_create_fdb_table(struct mlx5_eswitch *esw)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb;
@@ -295,7 +290,9 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw)
/* (-2) Since MaorG said so .. */
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)) - 2;
- fdb = mlx5_create_flow_table(root_ns, 0, "FDB", table_size);
+ ft_attr.prio = FDB_SLOW_PATH;
+ ft_attr.max_fte = table_size;
+ fdb = mlx5_create_flow_table(root_ns, &ft_attr);
if (IS_ERR_OR_NULL(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
@@ -397,7 +394,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
mlx5_mpfs_del_mac(esw->dev, esw_uc->table_index);
- mlx5_del_flow_rule(&vaddr->flow_rule);
+ mlx5_del_flow_rules(&vaddr->flow_rule);
l2addr_hash_del(esw_uc);
return 0;
@@ -456,12 +453,12 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
vport, mac, vaddr->flow_rule, esw_mc->refcnt,
esw_mc->uplink_rule);
- mlx5_del_flow_rule(&vaddr->flow_rule);
+ mlx5_del_flow_rules(&vaddr->flow_rule);
if (--esw_mc->refcnt)
return 0;
- mlx5_del_flow_rule(&esw_mc->uplink_rule);
+ mlx5_del_flow_rules(&esw_mc->uplink_rule);
l2addr_hash_del(esw_mc);
return 0;
@@ -602,13 +599,13 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_group *vlan_grp = NULL;
struct mlx5_flow_group *drop_grp = NULL;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *acl;
void *match_criteria;
- char table_name[32];
u32 *flow_group_in;
int table_size = 2;
int err = 0;
@@ -619,7 +616,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS, vport->vport);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
return;
@@ -629,8 +626,10 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
if (!flow_group_in)
return;
- snprintf(table_name, 32, "egress_%d", vport->vport);
- acl = mlx5_create_vport_flow_table(root_ns, vport->vport, 0, table_name, table_size);
+ ft_attr.max_fte = table_size;
+ if (vport->vport)
+ ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
+ acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport->vport);
if (IS_ERR_OR_NULL(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
@@ -678,8 +677,8 @@ out:
static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- mlx5_del_flow_rule(&vport->egress.allowed_vlan);
- mlx5_del_flow_rule(&vport->egress.drop_rule);
+ mlx5_del_flow_rules(&vport->egress.allowed_vlan);
+ mlx5_del_flow_rules(&vport->egress.drop_rule);
}
static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
@@ -703,12 +702,12 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *acl;
struct mlx5_flow_group *g;
void *match_criteria;
- char table_name[32];
u32 *flow_group_in;
int table_size = 1;
int err = 0;
@@ -719,7 +718,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
- root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+ root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, vport->vport);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
return;
@@ -729,8 +728,10 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
if (!flow_group_in)
return;
- snprintf(table_name, 32, "ingress_%d", vport->vport);
- acl = mlx5_create_vport_flow_table(root_ns, vport->vport, 0, table_name, table_size);
+ ft_attr.max_fte = table_size;
+ if (vport->vport)
+ ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
+ acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport->vport);
if (IS_ERR_OR_NULL(acl)) {
err = PTR_ERR(acl);
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
@@ -763,7 +764,7 @@ out:
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
- mlx5_del_flow_rule(&vport->ingress.drop_rule);
+ mlx5_del_flow_rules(&vport->ingress.drop_rule);
}
static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
@@ -785,9 +786,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_destination dest;
- u32 *match_v;
- u32 *match_c;
+ struct mlx5_flow_spec *spec;
int err = 0;
if (IS_ERR_OR_NULL(vport->ingress.acl)) {
@@ -806,35 +805,28 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->vlan, vport->qos);
- match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- if (!match_v || !match_c) {
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
err = -ENOMEM;
esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
vport->vport, err);
goto out;
}
- MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.cvlan_tag);
-
- dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT;
- dest.vport_num = vport->vport;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
vport->ingress.drop_rule =
- mlx5_add_flow_rule(vport->ingress.acl,
- MLX5_MATCH_OUTER_HEADERS,
- match_c,
- match_v,
- MLX5_FLOW_RULE_FWD_ACTION_DROP,
- &flow_act, &dest);
+ mlx5_add_flow_rules(vport->ingress.acl, spec,
+ &flow_act, NULL, 0);
if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
err = PTR_ERR(vport->ingress.drop_rule);
printf("mlx5_core: WARN: ""vport[%d] configure ingress rules, err(%d)\n", vport->vport, err);
vport->ingress.drop_rule = NULL;
}
out:
- kfree(match_v);
- kfree(match_c);
+ kfree(spec);
return err;
}
@@ -842,9 +834,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
struct mlx5_flow_act flow_act = {};
- struct mlx5_flow_destination dest;
- u32 *match_v;
- u32 *match_c;
+ struct mlx5_flow_spec *spec;
int err = 0;
if (IS_ERR_OR_NULL(vport->egress.acl)) {
@@ -862,9 +852,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->vlan, vport->qos);
- match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- if (!match_v || !match_c) {
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
err = -ENOMEM;
esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
vport->vport, err);
@@ -872,21 +861,17 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
}
/* Allowed vlan rule */
- MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.cvlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid);
- MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan);
- dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT;
- dest.vport_num = vport->vport;
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->egress.allowed_vlan =
- mlx5_add_flow_rule(vport->egress.acl,
- MLX5_MATCH_OUTER_HEADERS,
- match_c,
- match_v,
- MLX5_FLOW_RULE_FWD_ACTION_ALLOW,
- &flow_act, &dest);
+ mlx5_add_flow_rules(vport->egress.acl, spec,
+ &flow_act, NULL, 0);
if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
err = PTR_ERR(vport->egress.allowed_vlan);
printf("mlx5_core: WARN: ""vport[%d] configure egress allowed vlan rule failed, err(%d)\n", vport->vport, err);
@@ -894,24 +879,17 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
goto out;
}
- /* Drop others rule (star rule) */
- memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
- memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
vport->egress.drop_rule =
- mlx5_add_flow_rule(vport->egress.acl,
- 0,
- match_c,
- match_v,
- MLX5_FLOW_RULE_FWD_ACTION_DROP,
- &flow_act, &dest);
+ mlx5_add_flow_rules(vport->egress.acl, NULL,
+ &flow_act, NULL, 0);
if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
err = PTR_ERR(vport->egress.drop_rule);
printf("mlx5_core: WARN: ""vport[%d] configure egress drop rule failed, err(%d)\n", vport->vport, err);
vport->egress.drop_rule = NULL;
}
out:
- kfree(match_v);
- kfree(match_c);
+ kfree(spec);
return err;
}
@@ -1030,7 +1008,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
- esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
+ esw_warn(esw->dev, "E-Switch egress ACL is not supported by FW\n");
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fc_cmd.c b/sys/dev/mlx5/mlx5_core/mlx5_fc_cmd.c
deleted file mode 100644
index f3410249e67f..000000000000
--- a/sys/dev/mlx5/mlx5_core/mlx5_fc_cmd.c
+++ /dev/null
@@ -1,102 +0,0 @@
-/*-
- * Copyright (c) 2022 NVIDIA corporation & affiliates.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#include <dev/mlx5/driver.h>
-#include <dev/mlx5/device.h>
-#include <dev/mlx5/mlx5_ifc.h>
-#include <dev/mlx5/mlx5_core/mlx5_fc_cmd.h>
-#include <dev/mlx5/mlx5_core/mlx5_core.h>
-
-int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
- enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
- u32 *id)
-{
- u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
- u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
- int err;
-
- MLX5_SET(alloc_flow_counter_in, in, opcode,
- MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
- MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
-
- err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
- if (!err)
- *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
- return err;
-}
-
-int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
-{
- return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
-}
-
-int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
-{
- u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
-
- MLX5_SET(dealloc_flow_counter_in, in, opcode,
- MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
- MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
- return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
-}
-
-int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
- u64 *packets, u64 *bytes)
-{
- u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
- MLX5_ST_SZ_BYTES(traffic_counter)] = {};
- u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
- void *stats;
- int err = 0;
-
- MLX5_SET(query_flow_counter_in, in, opcode,
- MLX5_CMD_OP_QUERY_FLOW_COUNTER);
- MLX5_SET(query_flow_counter_in, in, op_mod, 0);
- MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (err)
- return err;
-
- stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
- *packets = MLX5_GET64(traffic_counter, stats, packets);
- *bytes = MLX5_GET64(traffic_counter, stats, octets);
- return 0;
-}
-
-int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
- u32 *out)
-{
- int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
- u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
-
- MLX5_SET(query_flow_counter_in, in, opcode,
- MLX5_CMD_OP_QUERY_FLOW_COUNTER);
- MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
- MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
-}
-
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fc_cmd.h b/sys/dev/mlx5/mlx5_core/mlx5_fc_cmd.h
deleted file mode 100644
index 3adebb3ca94c..000000000000
--- a/sys/dev/mlx5/mlx5_core/mlx5_fc_cmd.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (c) 2023, NVIDIA Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _MLX5_FC_CMD_
-#define _MLX5_FC_CMD_
-
-#include "fs_core.h"
-
-int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
-int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
- enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
- u32 *id);
-int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
-int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
- u64 *packets, u64 *bytes);
-
-int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
- u32 *out);
-static inline int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
-{
- return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
- MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
-}
-
-#endif
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fs_chains.c b/sys/dev/mlx5/mlx5_core/mlx5_fs_chains.c
new file mode 100644
index 000000000000..21c1914fd864
--- /dev/null
+++ b/sys/dev/mlx5/mlx5_core/mlx5_fs_chains.c
@@ -0,0 +1,664 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2020 Mellanox Technologies.
+
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/mlx5_ifc.h>
+#include <dev/mlx5/fs.h>
+
+#include "mlx5_core.h"
+#include "fs_chains.h"
+#include "fs_ft_pool.h"
+#include "fs_core.h"
+
+#define chains_lock(chains) ((chains)->lock)
+#define chains_xa(chains) ((chains)->chains_xa)
+#define prios_xa(chains) ((chains)->prios_xa)
+#define chains_default_ft(chains) ((chains)->chains_default_ft)
+#define chains_end_ft(chains) ((chains)->chains_end_ft)
+#define FT_TBL_SZ (64 * 1024)
+
+struct mlx5_fs_chains {
+ struct mlx5_core_dev *dev;
+
+ struct xarray chains_xa;
+ struct xarray prios_xa;
+ /* Protects above chains_ht and prios_ht */
+ struct mutex lock;
+
+ struct mlx5_flow_table *chains_default_ft;
+ struct mlx5_flow_table *chains_end_ft;
+
+ enum mlx5_flow_namespace_type ns;
+ u32 group_num;
+ u32 flags;
+ int fs_base_prio;
+ int fs_base_level;
+};
+
+struct fs_chain {
+ u32 chain;
+
+ int ref;
+ int id;
+ uint32_t xa_idx;
+
+ struct mlx5_fs_chains *chains;
+ struct list_head prios_list;
+ struct mlx5_flow_handle *restore_rule;
+ struct mlx5_modify_hdr *miss_modify_hdr;
+};
+
+struct prio_key {
+ u32 chain;
+ u32 prio;
+ u32 level;
+};
+
+struct prio {
+ struct list_head list;
+
+ struct prio_key key;
+ uint32_t xa_idx;
+
+ int ref;
+
+ struct fs_chain *chain;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_table *next_ft;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_handle *miss_rule;
+};
+
+/*
+static const struct rhashtable_params chain_params = {
+ .head_offset = offsetof(struct fs_chain, node),
+ .key_offset = offsetof(struct fs_chain, chain),
+ .key_len = sizeof_field(struct fs_chain, chain),
+ .automatic_shrinking = true,
+};
+
+static const struct rhashtable_params prio_params = {
+ .head_offset = offsetof(struct prio, node),
+ .key_offset = offsetof(struct prio, key),
+ .key_len = sizeof_field(struct prio, key),
+ .automatic_shrinking = true,
+};
+*/
+
+bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains)
+{
+ return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED;
+}
+
+bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains)
+{
+ return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
+}
+
+bool mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains)
+{
+ return mlx5_chains_prios_supported(chains) &&
+ mlx5_chains_ignore_flow_level_supported(chains);
+}
+
+u32 mlx5_chains_get_chain_range(struct mlx5_fs_chains *chains)
+{
+ if (!mlx5_chains_prios_supported(chains))
+ return 1;
+
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ return UINT_MAX - 1;
+
+ /* We should get here only for eswitch case */
+ return FDB_TC_MAX_CHAIN;
+}
+
+u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
+{
+ return mlx5_chains_get_chain_range(chains) + 1;
+}
+
+u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
+{
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ return UINT_MAX;
+
+ if (!chains->dev->priv.eswitch)
+ return 1;
+
+ /* We should get here only for eswitch case */
+ return FDB_TC_MAX_PRIO;
+}
+
+static unsigned int mlx5_chains_get_level_range(struct mlx5_fs_chains *chains)
+{
+ if (mlx5_chains_ignore_flow_level_supported(chains))
+ return UINT_MAX;
+
+ /* Same value for FDB and NIC RX tables */
+ return FDB_TC_LEVELS_PER_PRIO;
+}
+
+void
+mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft)
+{
+ chains_end_ft(chains) = ft;
+}
+
+static struct mlx5_flow_table *
+mlx5_chains_create_table(struct mlx5_fs_chains *chains,
+ u32 chain, u32 prio, u32 level)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ int sz;
+
+ if (chains->flags & MLX5_CHAINS_FT_TUNNEL_SUPPORTED)
+ ft_attr.flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+
+ sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
+ ft_attr.max_fte = sz;
+
+ /* We use chains_default_ft(chains) as the table's next_ft till
+ * ignore_flow_level is allowed on FT creation and not just for FTEs.
+ * Instead caller should add an explicit miss rule if needed.
+ */
+ ft_attr.next_ft = chains_default_ft(chains);
+
+ /* The root table(chain 0, prio 1, level 0) is required to be
+ * connected to the previous fs_core managed prio.
+ * We always create it, as a managed table, in order to align with
+ * fs_core logic.
+ */
+ if (!mlx5_chains_ignore_flow_level_supported(chains) ||
+ (chain == 0 && prio == 1 && level == 0)) {
+ ft_attr.level = chains->fs_base_level;
+ ft_attr.prio = chains->fs_base_prio;
+ ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
+ mlx5_get_fdb_sub_ns(chains->dev, chain) :
+ mlx5_get_flow_namespace(chains->dev, chains->ns);
+ } else {
+ ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
+ ft_attr.prio = chains->fs_base_prio;
+ /* Firmware doesn't allow us to create another level 0 table,
+ * so we create all unmanaged tables as level 1 (base + 1).
+ *
+ * To connect them, we use explicit miss rules with
+ * ignore_flow_level. Caller is responsible to create
+ * these rules (if needed).
+ */
+ ft_attr.level = chains->fs_base_level + 1;
+ ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
+ }
+
+ ft_attr.autogroup.num_reserved_entries = 2;
+ ft_attr.autogroup.max_num_groups = chains->group_num;
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ mlx5_core_warn(chains->dev, "Failed to create chains table err %d (chain: %d, prio: %d, level: %d, size: %d)\n",
+ (int)PTR_ERR(ft), chain, prio, level, sz);
+ return ft;
+ }
+
+ return ft;
+}
+
+static struct fs_chain *
+mlx5_chains_create_chain(struct mlx5_fs_chains *chains, u32 chain)
+{
+ struct fs_chain *chain_s = NULL;
+ int err;
+
+ chain_s = kvzalloc(sizeof(*chain_s), GFP_KERNEL);
+ if (!chain_s)
+ return ERR_PTR(-ENOMEM);
+
+ chain_s->chains = chains;
+ chain_s->chain = chain;
+ INIT_LIST_HEAD(&chain_s->prios_list);
+
+ err = xa_alloc(&chains_xa(chains), &chain_s->xa_idx, chain_s,
+ xa_limit_32b, GFP_KERNEL);
+ if (err)
+ goto err_insert;
+
+ return chain_s;
+
+err_insert:
+ kvfree(chain_s);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_chains_destroy_chain(struct fs_chain *chain)
+{
+ struct mlx5_fs_chains *chains = chain->chains;
+
+ xa_erase(&chains_xa(chains), chain->xa_idx);
+ kvfree(chain);
+}
+
+static struct fs_chain *
+mlx5_chains_get_chain(struct mlx5_fs_chains *chains, u32 chain)
+{
+ struct fs_chain *chain_s = NULL;
+ unsigned long idx;
+
+ xa_for_each(&chains_xa(chains), idx, chain_s) {
+ if (chain_s->chain == chain)
+ break;
+ }
+
+ if (!chain_s) {
+ chain_s = mlx5_chains_create_chain(chains, chain);
+ if (IS_ERR(chain_s))
+ return chain_s;
+ }
+
+ chain_s->ref++;
+
+ return chain_s;
+}
+
+static struct mlx5_flow_handle *
+mlx5_chains_add_miss_rule(struct fs_chain *chain,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act act = {};
+
+ act.flags = FLOW_ACT_NO_APPEND;
+ if (mlx5_chains_ignore_flow_level_supported(chain->chains))
+ act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
+
+ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = next_ft;
+
+ return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
+}
+
+static int
+mlx5_chains_update_prio_prevs(struct prio *prio,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5_flow_handle *miss_rules[FDB_TC_LEVELS_PER_PRIO + 1] = {};
+ struct fs_chain *chain = prio->chain;
+ struct prio *pos;
+ int n = 0, err;
+
+ if (prio->key.level)
+ return 0;
+
+ /* Iterate in reverse order until reaching the level 0 rule of
+ * the previous priority, adding all the miss rules first, so we can
+ * revert them if any of them fails.
+ */
+ pos = prio;
+ list_for_each_entry_continue_reverse(pos,
+ &chain->prios_list,
+ list) {
+ miss_rules[n] = mlx5_chains_add_miss_rule(chain,
+ pos->ft,
+ next_ft);
+ if (IS_ERR(miss_rules[n])) {
+ err = PTR_ERR(miss_rules[n]);
+ goto err_prev_rule;
+ }
+
+ n++;
+ if (!pos->key.level)
+ break;
+ }
+
+ /* Success, delete old miss rules, and update the pointers. */
+ n = 0;
+ pos = prio;
+ list_for_each_entry_continue_reverse(pos,
+ &chain->prios_list,
+ list) {
+ mlx5_del_flow_rules(&pos->miss_rule);
+
+ pos->miss_rule = miss_rules[n];
+ pos->next_ft = next_ft;
+
+ n++;
+ if (!pos->key.level)
+ break;
+ }
+
+ return 0;
+
+err_prev_rule:
+ while (--n >= 0)
+ mlx5_del_flow_rules(&miss_rules[n]);
+
+ return err;
+}
+
+static void
+mlx5_chains_put_chain(struct fs_chain *chain)
+{
+ if (--chain->ref == 0)
+ mlx5_chains_destroy_chain(chain);
+}
+
+static struct prio *
+mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
+ u32 chain, u32 prio, u32 level)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_handle *miss_rule;
+ struct mlx5_flow_group *miss_group;
+ struct mlx5_flow_table *next_ft;
+ struct mlx5_flow_table *ft;
+ struct fs_chain *chain_s;
+ struct list_head *pos;
+ struct prio *prio_s;
+ u32 *flow_group_in;
+ int err;
+
+ chain_s = mlx5_chains_get_chain(chains, chain);
+ if (IS_ERR(chain_s))
+ return ERR_CAST(chain_s);
+
+ prio_s = kvzalloc(sizeof(*prio_s), GFP_KERNEL);
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!prio_s || !flow_group_in) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* Chain's prio list is sorted by prio and level.
+ * And all levels of some prio point to the next prio's level 0.
+ * Example list (prio, level):
+ * (3,0)->(3,1)->(5,0)->(5,1)->(6,1)->(7,0)
+ * In hardware, we will we have the following pointers:
+ * (3,0) -> (5,0) -> (7,0) -> Slow path
+ * (3,1) -> (5,0)
+ * (5,1) -> (7,0)
+ * (6,1) -> (7,0)
+ */
+
+ /* Default miss for each chain: */
+ next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
+ chains_default_ft(chains) :
+ chains_end_ft(chains);
+ list_for_each(pos, &chain_s->prios_list) {
+ struct prio *p = list_entry(pos, struct prio, list);
+
+ /* exit on first pos that is larger */
+ if (prio < p->key.prio || (prio == p->key.prio &&
+ level < p->key.level)) {
+ /* Get next level 0 table */
+ next_ft = p->key.level == 0 ? p->ft : p->next_ft;
+ break;
+ }
+ }
+
+ ft = mlx5_chains_create_table(chains, chain, prio, level);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto err_create;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ ft->max_fte - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ ft->max_fte - 1);
+ miss_group = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(miss_group)) {
+ err = PTR_ERR(miss_group);
+ goto err_group;
+ }
+
+ /* Add miss rule to next_ft */
+ miss_rule = mlx5_chains_add_miss_rule(chain_s, ft, next_ft);
+ if (IS_ERR(miss_rule)) {
+ err = PTR_ERR(miss_rule);
+ goto err_miss_rule;
+ }
+
+ prio_s->miss_group = miss_group;
+ prio_s->miss_rule = miss_rule;
+ prio_s->next_ft = next_ft;
+ prio_s->chain = chain_s;
+ prio_s->key.chain = chain;
+ prio_s->key.prio = prio;
+ prio_s->key.level = level;
+ prio_s->ft = ft;
+
+ err = xa_alloc(&prios_xa(chains), &prio_s->xa_idx, prio_s,
+ xa_limit_32b, GFP_KERNEL);
+ if (err)
+ goto err_insert;
+
+ list_add(&prio_s->list, pos->prev);
+
+ /* Table is ready, connect it */
+ err = mlx5_chains_update_prio_prevs(prio_s, ft);
+ if (err)
+ goto err_update;
+
+ kvfree(flow_group_in);
+ return prio_s;
+
+err_update:
+ list_del(&prio_s->list);
+ xa_erase(&prios_xa(chains), prio_s->xa_idx);
+err_insert:
+ mlx5_del_flow_rules(&miss_rule);
+err_miss_rule:
+ mlx5_destroy_flow_group(miss_group);
+err_group:
+ mlx5_destroy_flow_table(ft);
+err_create:
+err_alloc:
+ kvfree(prio_s);
+ kvfree(flow_group_in);
+ mlx5_chains_put_chain(chain_s);
+ return ERR_PTR(err);
+}
+
+static void
+mlx5_chains_destroy_prio(struct mlx5_fs_chains *chains,
+ struct prio *prio)
+{
+ struct fs_chain *chain = prio->chain;
+
+ WARN_ON(mlx5_chains_update_prio_prevs(prio,
+ prio->next_ft));
+
+ list_del(&prio->list);
+ xa_erase(&prios_xa(chains), prio->xa_idx);
+ mlx5_del_flow_rules(&prio->miss_rule);
+ mlx5_destroy_flow_group(prio->miss_group);
+ mlx5_destroy_flow_table(prio->ft);
+ mlx5_chains_put_chain(chain);
+ kvfree(prio);
+}
+
+struct mlx5_flow_table *
+mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level)
+{
+ struct mlx5_flow_table *prev_fts;
+ struct prio *prio_s;
+ unsigned long idx;
+ int l = 0;
+
+ if ((chain > mlx5_chains_get_chain_range(chains) &&
+ chain != mlx5_chains_get_nf_ft_chain(chains)) ||
+ prio > mlx5_chains_get_prio_range(chains) ||
+ level > mlx5_chains_get_level_range(chains))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ /* create earlier levels for correct fs_core lookup when
+ * connecting tables.
+ */
+ for (l = 0; l < level; l++) {
+ prev_fts = mlx5_chains_get_table(chains, chain, prio, l);
+ if (IS_ERR(prev_fts)) {
+ prio_s = ERR_CAST(prev_fts);
+ goto err_get_prevs;
+ }
+ }
+
+ mutex_lock(&chains_lock(chains));
+ xa_for_each(&prios_xa(chains), idx, prio_s) {
+ if (chain == prio_s->key.chain &&
+ prio == prio_s->key.prio &&
+ level == prio_s->key.level)
+ break;
+ }
+ if (!prio_s) {
+ prio_s = mlx5_chains_create_prio(chains, chain,
+ prio, level);
+ if (IS_ERR(prio_s))
+ goto err_create_prio;
+ }
+
+ ++prio_s->ref;
+ mutex_unlock(&chains_lock(chains));
+
+ return prio_s->ft;
+
+err_create_prio:
+ mutex_unlock(&chains_lock(chains));
+err_get_prevs:
+ while (--l >= 0)
+ mlx5_chains_put_table(chains, chain, prio, l);
+ return ERR_CAST(prio_s);
+}
+
+void
+mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
+ u32 level)
+{
+ struct prio *prio_s;
+ unsigned long idx;
+
+ mutex_lock(&chains_lock(chains));
+
+ xa_for_each(&prios_xa(chains), idx, prio_s) {
+ if (chain == prio_s->key.chain &&
+ prio == prio_s->key.prio &&
+ level == prio_s->key.level)
+ break;
+ }
+ if (!prio_s)
+ goto err_get_prio;
+
+ if (--prio_s->ref == 0)
+ mlx5_chains_destroy_prio(chains, prio_s);
+ mutex_unlock(&chains_lock(chains));
+
+ while (level-- > 0)
+ mlx5_chains_put_table(chains, chain, prio, level);
+
+ return;
+
+err_get_prio:
+ mutex_unlock(&chains_lock(chains));
+ WARN_ONCE(1,
+ "Couldn't find table: (chain: %d prio: %d level: %d)",
+ chain, prio, level);
+}
+
+struct mlx5_flow_table *
+mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
+{
+ return chains_end_ft(chains);
+}
+
+struct mlx5_flow_table *
+mlx5_chains_create_global_table(struct mlx5_fs_chains *chains)
+{
+ u32 chain, prio, level;
+ int err;
+
+ if (!mlx5_chains_ignore_flow_level_supported(chains)) {
+ err = -EOPNOTSUPP;
+
+ mlx5_core_warn(chains->dev,
+ "Couldn't create global flow table, ignore_flow_level not supported.");
+ goto err_ignore;
+ }
+
+ chain = mlx5_chains_get_chain_range(chains),
+ prio = mlx5_chains_get_prio_range(chains);
+ level = mlx5_chains_get_level_range(chains);
+
+ return mlx5_chains_create_table(chains, chain, prio, level);
+
+err_ignore:
+ return ERR_PTR(err);
+}
+
+void
+mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
+ struct mlx5_flow_table *ft)
+{
+ mlx5_destroy_flow_table(ft);
+}
+
+static struct mlx5_fs_chains *
+mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
+{
+ struct mlx5_fs_chains *chains;
+
+ chains = kzalloc(sizeof(*chains), GFP_KERNEL);
+ if (!chains)
+ return ERR_PTR(-ENOMEM);
+
+ chains->dev = dev;
+ chains->flags = attr->flags;
+ chains->ns = attr->ns;
+ chains->group_num = attr->max_grp_num;
+ chains->fs_base_prio = attr->fs_base_prio;
+ chains->fs_base_level = attr->fs_base_level;
+ chains_default_ft(chains) = chains_end_ft(chains) = attr->default_ft;
+
+ xa_init(&chains_xa(chains));
+ xa_init(&prios_xa(chains));
+
+ mutex_init(&chains_lock(chains));
+
+ return chains;
+}
+
+static void
+mlx5_chains_cleanup(struct mlx5_fs_chains *chains)
+{
+ mutex_destroy(&chains_lock(chains));
+ xa_destroy(&prios_xa(chains));
+ xa_destroy(&chains_xa(chains));
+
+ kfree(chains);
+}
+
+struct mlx5_fs_chains *
+mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
+{
+ struct mlx5_fs_chains *chains;
+
+ chains = mlx5_chains_init(dev, attr);
+
+ return chains;
+}
+
+void
+mlx5_chains_destroy(struct mlx5_fs_chains *chains)
+{
+ mlx5_chains_cleanup(chains);
+}
+
+void
+mlx5_chains_print_info(struct mlx5_fs_chains *chains)
+{
+ mlx5_core_dbg(chains->dev, "Flow table chains groups(%d)\n", chains->group_num);
+}
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fs_cmd.c b/sys/dev/mlx5/mlx5_core/mlx5_fs_cmd.c
index 0f827f0e69d3..b3c118fedc9b 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_fs_cmd.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_fs_cmd.c
@@ -1,468 +1,993 @@
-/*-
- * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
*
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
-#include "opt_rss.h"
-#include "opt_ratelimit.h"
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <dev/mlx5/mlx5_ifc.h>
+#include <dev/mlx5/driver.h>
#include <dev/mlx5/device.h>
-#include <dev/mlx5/fs.h>
+#include <dev/mlx5/mlx5_ifc.h>
+
+#include "fs_core.h"
+#include "fs_cmd.h"
+#include "fs_ft_pool.h"
+#include "mlx5_core.h"
+#include "eswitch.h"
-#include <dev/mlx5/mlx5_core/fs_core.h>
-#include <dev/mlx5/mlx5_core/mlx5_core.h>
+static int mlx5_cmd_stub_update_root_ft(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 underlay_qpn,
+ bool disconnect)
+{
+ return 0;
+}
-int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
- enum fs_ft_type type,
- unsigned int id)
+static int mlx5_cmd_stub_create_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table_attr *ft_attr,
+ struct mlx5_flow_table *next_ft)
{
- u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
+ int max_fte = ft_attr->max_fte;
- if (!dev)
- return -EINVAL;
+ ft->max_fte = max_fte ? roundup_pow_of_two(max_fte) : 1;
+
+ return 0;
+}
+
+static int mlx5_cmd_stub_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_modify_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_create_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 *in,
+ struct mlx5_flow_group *fg)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_create_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_update_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ int modify_mask,
+ struct fs_fte *fte)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_stub_delete_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat_params *params,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ return 0;
+}
+
+static void mlx5_cmd_stub_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+}
+
+static int mlx5_cmd_stub_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ return 0;
+}
+
+static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+}
+
+static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_create_ns(struct mlx5_flow_root_namespace *ns)
+{
+ return 0;
+}
+
+static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns)
+{
+ return 0;
+}
+
+static u32 mlx5_cmd_stub_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
+{
+ return 0;
+}
+
+static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft, u32 underlay_qpn,
+ bool disconnect)
+{
+ u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
+ struct mlx5_core_dev *dev = ns->dev;
+
+ if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
+ underlay_qpn == 0)
+ return 0;
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
- MLX5_SET(set_flow_table_root_in, in, table_type, type);
- MLX5_SET(set_flow_table_root_in, in, table_id, id);
+ MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
+
+ if (disconnect)
+ MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
+ else
+ MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
+ MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
+ MLX5_SET(set_flow_table_root_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+
+ return mlx5_cmd_exec_in(dev, set_flow_table_root, in);
}
-int mlx5_cmd_fs_create_ft(struct mlx5_core_dev *dev,
- u16 vport, enum fs_ft_type type, unsigned int level,
- unsigned int log_size, const char *name, unsigned int *table_id)
+static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table_attr *ft_attr,
+ struct mlx5_flow_table *next_ft)
{
- u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {0};
+ int en_encap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
+ int en_decap = !!(ft->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+ int term = !!(ft->flags & MLX5_FLOW_TABLE_TERMINATION);
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
+ struct mlx5_core_dev *dev = ns->dev;
+ unsigned int size;
int err;
- if (!dev)
- return -EINVAL;
+ size = mlx5_ft_pool_get_avail_sz(dev, ft->type, ft_attr->max_fte);
+ if (!size)
+ return -ENOSPC;
MLX5_SET(create_flow_table_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_TABLE);
- MLX5_SET(create_flow_table_in, in, table_type, type);
- MLX5_SET(create_flow_table_in, in, flow_table_context.level, level);
- MLX5_SET(create_flow_table_in, in, flow_table_context.log_size,
- log_size);
- if (strstr(name, FS_REFORMAT_KEYWORD) != NULL)
- MLX5_SET(create_flow_table_in, in,
- flow_table_context.reformat_en, 1);
- if (vport) {
- MLX5_SET(create_flow_table_in, in, vport_number, vport);
- MLX5_SET(create_flow_table_in, in, other_vport, 1);
+ MLX5_SET(create_flow_table_in, in, uid, ft_attr->uid);
+ MLX5_SET(create_flow_table_in, in, table_type, ft->type);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.level, ft->level);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.log_size, size ? ilog2(size) : 0);
+ MLX5_SET(create_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(create_flow_table_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+
+ MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
+ en_decap);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
+ en_encap);
+ MLX5_SET(create_flow_table_in, in, flow_table_context.termination_table,
+ term);
+
+ switch (ft->op_mod) {
+ case FS_FT_OP_MOD_NORMAL:
+ if (next_ft) {
+ MLX5_SET(create_flow_table_in, in,
+ flow_table_context.table_miss_action,
+ MLX5_FLOW_TABLE_MISS_ACTION_FWD);
+ MLX5_SET(create_flow_table_in, in,
+ flow_table_context.table_miss_id, next_ft->id);
+ } else {
+ MLX5_SET(create_flow_table_in, in,
+ flow_table_context.table_miss_action,
+ ft->def_miss_action);
+ }
+ break;
+
+ case FS_FT_OP_MOD_LAG_DEMUX:
+ MLX5_SET(create_flow_table_in, in, op_mod, 0x1);
+ if (next_ft)
+ MLX5_SET(create_flow_table_in, in,
+ flow_table_context.lag_master_next_table_id,
+ next_ft->id);
+ break;
}
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *table_id = MLX5_GET(create_flow_table_out, out, table_id);
+ err = mlx5_cmd_exec_inout(dev, create_flow_table, in, out);
+ if (!err) {
+ ft->id = MLX5_GET(create_flow_table_out, out,
+ table_id);
+ ft->max_fte = size;
+ } else {
+ mlx5_ft_pool_put_sz(ns->dev, size);
+ }
return err;
}
-int mlx5_cmd_fs_destroy_ft(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_ft_type type, unsigned int table_id)
+static int mlx5_cmd_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft)
{
- u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)] = {0};
-
- if (!dev)
- return -EINVAL;
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
+ struct mlx5_core_dev *dev = ns->dev;
+ int err;
MLX5_SET(destroy_flow_table_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
- MLX5_SET(destroy_flow_table_in, in, table_type, type);
- MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
- if (vport) {
- MLX5_SET(destroy_flow_table_in, in, vport_number, vport);
- MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
+ MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
+ MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
+ MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(destroy_flow_table_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+
+ err = mlx5_cmd_exec_in(dev, destroy_flow_table, in);
+ if (!err)
+ mlx5_ft_pool_put_sz(ns->dev, ft->max_fte);
+
+ return err;
+}
+
+static int mlx5_cmd_modify_flow_table(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_flow_table_in)] = {};
+ struct mlx5_core_dev *dev = ns->dev;
+
+ MLX5_SET(modify_flow_table_in, in, opcode,
+ MLX5_CMD_OP_MODIFY_FLOW_TABLE);
+ MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
+ MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
+
+ if (ft->op_mod == FS_FT_OP_MOD_LAG_DEMUX) {
+ MLX5_SET(modify_flow_table_in, in, modify_field_select,
+ MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID);
+ if (next_ft) {
+ MLX5_SET(modify_flow_table_in, in,
+ flow_table_context.lag_master_next_table_id, next_ft->id);
+ } else {
+ MLX5_SET(modify_flow_table_in, in,
+ flow_table_context.lag_master_next_table_id, 0);
+ }
+ } else {
+ MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(modify_flow_table_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ MLX5_SET(modify_flow_table_in, in, modify_field_select,
+ MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
+ if (next_ft) {
+ MLX5_SET(modify_flow_table_in, in,
+ flow_table_context.table_miss_action,
+ MLX5_FLOW_TABLE_MISS_ACTION_FWD);
+ MLX5_SET(modify_flow_table_in, in,
+ flow_table_context.table_miss_id,
+ next_ft->id);
+ } else {
+ MLX5_SET(modify_flow_table_in, in,
+ flow_table_context.table_miss_action,
+ ft->def_miss_action);
+ }
}
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ return mlx5_cmd_exec_in(dev, modify_flow_table, in);
}
-int mlx5_cmd_fs_create_fg(struct mlx5_core_dev *dev,
- u32 *in,
- u16 vport,
- enum fs_ft_type type, unsigned int table_id,
- unsigned int *group_id)
+static int mlx5_cmd_create_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ u32 *in,
+ struct mlx5_flow_group *fg)
{
- u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {0};
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
+ struct mlx5_core_dev *dev = ns->dev;
int err;
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- if (!dev)
- return -EINVAL;
MLX5_SET(create_flow_group_in, in, opcode,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
- MLX5_SET(create_flow_group_in, in, table_type, type);
- MLX5_SET(create_flow_group_in, in, table_id, table_id);
- if (vport) {
- MLX5_SET(create_flow_group_in, in, vport_number, vport);
- MLX5_SET(create_flow_group_in, in, other_vport, 1);
- }
-
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ MLX5_SET(create_flow_group_in, in, table_type, ft->type);
+ MLX5_SET(create_flow_group_in, in, table_id, ft->id);
+ MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
+ MLX5_SET(create_flow_group_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ err = mlx5_cmd_exec_inout(dev, create_flow_group, in, out);
if (!err)
- *group_id = MLX5_GET(create_flow_group_out, out, group_id);
-
+ fg->id = MLX5_GET(create_flow_group_out, out,
+ group_id);
return err;
}
-int mlx5_cmd_fs_destroy_fg(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_ft_type type, unsigned int table_id,
- unsigned int group_id)
+static int mlx5_cmd_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
{
- u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)] = {0};
-
- if (!dev)
- return -EINVAL;
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
+ struct mlx5_core_dev *dev = ns->dev;
MLX5_SET(destroy_flow_group_in, in, opcode,
MLX5_CMD_OP_DESTROY_FLOW_GROUP);
- MLX5_SET(destroy_flow_group_in, in, table_type, type);
- MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
- MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
- if (vport) {
- MLX5_SET(destroy_flow_group_in, in, vport_number, vport);
- MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
- }
-
- return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
+ MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
+ MLX5_SET(destroy_flow_group_in, in, group_id, fg->id);
+ MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
+ MLX5_SET(destroy_flow_group_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+ return mlx5_cmd_exec_in(dev, destroy_flow_group, in);
}
-int mlx5_cmd_fs_set_fte(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_fte_status *fte_status,
- u32 *match_val,
- enum fs_ft_type type, unsigned int table_id,
- unsigned int index, unsigned int group_id,
- struct mlx5_flow_act *flow_act,
- u32 sw_action, int dest_size,
- struct list_head *dests) /* mlx5_flow_desination */
+static int mlx5_set_extended_dest(struct mlx5_core_dev *dev,
+ struct fs_fte *fte, bool *extended_dest)
{
- u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
- u32 *in;
- unsigned int inlen;
+ int fw_log_max_fdb_encap_uplink =
+ MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
+ int num_fwd_destinations = 0;
struct mlx5_flow_rule *dst;
- void *in_flow_context;
- void *in_match_value;
- void *in_dests;
- int err;
- int opmod = 0;
- int modify_mask = 0;
- int atomic_mod_cap;
- u32 prm_action = 0;
- int count_list = 0;
+ int num_encap = 0;
- if (sw_action != MLX5_FLOW_RULE_FWD_ACTION_DEST)
- dest_size = 0;
+ *extended_dest = false;
+ if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ return 0;
- if (sw_action & MLX5_FLOW_RULE_FWD_ACTION_ALLOW)
- prm_action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
+ dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_NONE)
+ continue;
+ if ((dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+ dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
+ dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
+ num_encap++;
+ num_fwd_destinations++;
+ }
+ if (num_fwd_destinations > 1 && num_encap > 0)
+ *extended_dest = true;
- if (sw_action & MLX5_FLOW_RULE_FWD_ACTION_DROP)
- prm_action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
+ mlx5_core_warn(dev, "FW does not support extended destination");
+ return -EOPNOTSUPP;
+ }
+ if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
+ mlx5_core_warn(dev, "FW does not support more than %d encaps",
+ 1 << fw_log_max_fdb_encap_uplink);
+ return -EOPNOTSUPP;
+ }
- if (sw_action & MLX5_FLOW_RULE_FWD_ACTION_DEST)
- prm_action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ return 0;
+}
- if (flow_act->actions & MLX5_FLOW_ACT_ACTIONS_COUNT) {
- prm_action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
- count_list = 1;
- }
+static void
+mlx5_cmd_set_fte_flow_meter(struct fs_fte *fte, void *in_flow_context)
+{
+ void *exe_aso_ctrl;
+ void *execute_aso;
+
+ execute_aso = MLX5_ADDR_OF(flow_context, in_flow_context,
+ execute_aso[0]);
+ MLX5_SET(execute_aso, execute_aso, valid, 1);
+ MLX5_SET(execute_aso, execute_aso, aso_object_id,
+ fte->action.exe_aso.object_id);
+
+ exe_aso_ctrl = MLX5_ADDR_OF(execute_aso, execute_aso, exe_aso_ctrl);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, return_reg_id,
+ fte->action.exe_aso.return_reg_id);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, aso_type,
+ fte->action.exe_aso.type);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, init_color,
+ fte->action.exe_aso.flow_meter.init_color);
+ MLX5_SET(exe_aso_ctrl_flow_meter, exe_aso_ctrl, meter_id,
+ fte->action.exe_aso.flow_meter.meter_idx);
+}
- inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
- (dest_size + count_list) * MLX5_ST_SZ_BYTES(dest_format_struct);
+static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
+ int opmod, int modify_mask,
+ struct mlx5_flow_table *ft,
+ unsigned group_id,
+ struct fs_fte *fte)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0};
+ bool extended_dest = false;
+ struct mlx5_flow_rule *dst;
+ void *in_flow_context, *vlan;
+ void *in_match_value;
+ unsigned int inlen;
+ int dst_cnt_size;
+ void *in_dests;
+ u32 *in;
+ int err;
- if (!dev)
- return -EINVAL;
+ if (mlx5_set_extended_dest(dev, fte, &extended_dest))
+ return -EOPNOTSUPP;
- if (*fte_status & FS_FTE_STATUS_EXISTING) {
- atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
- flow_table_properties_nic_receive.
- flow_modify_en);
- if (!atomic_mod_cap)
- return -ENOTSUPP;
- opmod = 1;
- modify_mask = 1 <<
- MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST;
- }
+ if (!extended_dest)
+ dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
+ else
+ dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_warn(dev, "failed to allocate inbox\n");
+ inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
return -ENOMEM;
- }
MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
MLX5_SET(set_fte_in, in, op_mod, opmod);
MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
- MLX5_SET(set_fte_in, in, table_type, type);
- MLX5_SET(set_fte_in, in, table_id, table_id);
- MLX5_SET(set_fte_in, in, flow_index, index);
- if (vport) {
- MLX5_SET(set_fte_in, in, vport_number, vport);
- MLX5_SET(set_fte_in, in, other_vport, 1);
- }
+ MLX5_SET(set_fte_in, in, table_type, ft->type);
+ MLX5_SET(set_fte_in, in, table_id, ft->id);
+ MLX5_SET(set_fte_in, in, flow_index, fte->index);
+ MLX5_SET(set_fte_in, in, ignore_flow_level,
+ !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL));
+
+ MLX5_SET(set_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(set_fte_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
- if (flow_act->actions & MLX5_FLOW_ACT_ACTIONS_FLOW_TAG)
- MLX5_SET(flow_context, in_flow_context, flow_tag, flow_act->flow_tag);
- if (flow_act->actions & MLX5_FLOW_ACT_ACTIONS_MODIFY_HDR) {
- MLX5_SET(flow_context, in_flow_context, modify_header_id,
- flow_act->modify_hdr->id);
- prm_action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- }
- if (flow_act->actions & MLX5_FLOW_ACT_ACTIONS_PACKET_REFORMAT) {
- MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
- flow_act->pkt_reformat->id);
- prm_action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+
+ MLX5_SET(flow_context, in_flow_context, flow_tag,
+ fte->flow_context.flow_tag);
+ MLX5_SET(flow_context, in_flow_context, flow_source,
+ fte->flow_context.flow_source);
+
+ MLX5_SET(flow_context, in_flow_context, extended_destination,
+ extended_dest);
+ if (extended_dest) {
+ u32 action;
+
+ action = fte->action.action &
+ ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ MLX5_SET(flow_context, in_flow_context, action, action);
+ } else {
+ MLX5_SET(flow_context, in_flow_context, action,
+ fte->action.action);
+ if (fte->action.pkt_reformat)
+ MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+ fte->action.pkt_reformat->id);
}
- MLX5_SET(flow_context, in_flow_context, destination_list_size,
- dest_size);
+ if (fte->action.modify_hdr)
+ MLX5_SET(flow_context, in_flow_context, modify_header_id,
+ fte->action.modify_hdr->id);
+
+ MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_type,
+ fte->action.crypto.type);
+ MLX5_SET(flow_context, in_flow_context, encrypt_decrypt_obj_id,
+ fte->action.crypto.obj_id);
+
+ vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
+
+ MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
+ MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
+
+ vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
+
+ MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
+ MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
+ MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
+
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
- memcpy(in_match_value, match_val, MLX5_ST_SZ_BYTES(fte_match_param));
+ memcpy(in_match_value, &fte->val, sizeof(fte->val));
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ int list_size = 0;
- if (dest_size) {
- list_for_each_entry(dst, dests, base.list) {
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ enum mlx5_flow_destination_type type = dst->dest_attr.type;
+ enum mlx5_ifc_flow_destination_type ifc_type;
unsigned int id;
- MLX5_SET(dest_format_struct, in_dests, destination_type,
- dst->dest_attr.type);
- if (dst->dest_attr.type ==
- MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE)
+ if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ switch (type) {
+ case MLX5_FLOW_DESTINATION_TYPE_NONE:
+ continue;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+ id = dst->dest_attr.ft_num;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = dst->dest_attr.ft->id;
- else
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id_valid,
+ !!(dst->dest_attr.vport.flags &
+ MLX5_FLOW_DEST_VPORT_VHCA_ID));
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_eswitch_owner_vhca_id,
+ dst->dest_attr.vport.vhca_id);
+ if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) {
+ /* destination_id is reserved */
+ id = 0;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
+ break;
+ }
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
+ id = dst->dest_attr.vport.num;
+ if (extended_dest &&
+ dst->dest_attr.vport.pkt_reformat) {
+ MLX5_SET(dest_format_struct, in_dests,
+ packet_reformat,
+ !!(dst->dest_attr.vport.flags &
+ MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
+ MLX5_SET(extended_dest_format, in_dests,
+ packet_reformat_id,
+ dst->dest_attr.vport.pkt_reformat->id);
+ }
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+ id = dst->dest_attr.sampler_id;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+ break;
+ case MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE:
+ MLX5_SET(dest_format_struct, in_dests,
+ destination_table_type, dst->dest_attr.ft->type);
+ id = dst->dest_attr.ft->id;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+ break;
+ default:
id = dst->dest_attr.tir_num;
+ ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
+ }
+
+ MLX5_SET(dest_format_struct, in_dests, destination_type,
+ ifc_type);
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
- in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+ in_dests += dst_cnt_size;
+ list_size++;
}
+
+ MLX5_SET(flow_context, in_flow_context, destination_list_size,
+ list_size);
}
- if (flow_act->actions & MLX5_FLOW_ACT_ACTIONS_COUNT) {
- MLX5_SET(dest_format_struct, in_dests, destination_id,
- mlx5_fc_id(flow_act->counter));
- in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
- MLX5_SET(flow_context, in_flow_context, flow_counter_list_size, 1);
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
+ log_max_flow_counter,
+ ft->type));
+ int list_size = 0;
+
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ if (dst->dest_attr.type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
+ dst->dest_attr.counter_id);
+ in_dests += dst_cnt_size;
+ list_size++;
+ }
+ if (list_size > max_list_size) {
+ err = -EINVAL;
+ goto err_out;
+ }
+
+ MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
+ list_size);
}
- MLX5_SET(flow_context, in_flow_context, action, prm_action);
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
- if (!err)
- *fte_status |= FS_FTE_STATUS_EXISTING;
+ if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+ if (fte->action.exe_aso.type == MLX5_EXE_ASO_FLOW_METER) {
+ mlx5_cmd_set_fte_flow_meter(fte, in_flow_context);
+ } else {
+ err = -EOPNOTSUPP;
+ goto err_out;
+ }
+ }
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+err_out:
kvfree(in);
+ return err;
+}
+
+static int mlx5_cmd_create_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *group,
+ struct fs_fte *fte)
+{
+ struct mlx5_core_dev *dev = ns->dev;
+ unsigned int group_id = group->id;
+
+ return mlx5_cmd_set_fte(dev, 0, 0, ft, group_id, fte);
+}
+
+static int mlx5_cmd_update_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg,
+ int modify_mask,
+ struct fs_fte *fte)
+{
+ int opmod;
+ struct mlx5_core_dev *dev = ns->dev;
+ int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
+ flow_table_properties_nic_receive.
+ flow_modify_en);
+ if (!atomic_mod_cap)
+ return -EOPNOTSUPP;
+ opmod = 1;
+
+ return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, fg->id, fte);
+}
+
+static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_table *ft,
+ struct fs_fte *fte)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
+ struct mlx5_core_dev *dev = ns->dev;
+ MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+ MLX5_SET(delete_fte_in, in, table_type, ft->type);
+ MLX5_SET(delete_fte_in, in, table_id, ft->id);
+ MLX5_SET(delete_fte_in, in, flow_index, fte->index);
+ MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(delete_fte_in, in, other_vport,
+ !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT));
+
+ return mlx5_cmd_exec_in(dev, delete_fte, in);
+}
+
+int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
+ enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
+ u32 *id)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
+ MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
+
+ err = mlx5_cmd_exec_inout(dev, alloc_flow_counter, in, out);
+ if (!err)
+ *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
return err;
}
-int mlx5_cmd_fs_delete_fte(struct mlx5_core_dev *dev,
- u16 vport,
- enum fs_fte_status *fte_status,
- enum fs_ft_type type, unsigned int table_id,
- unsigned int index)
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
+{
+ return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
+}
+
+int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {};
+
+ MLX5_SET(dealloc_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
+ MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
+ return mlx5_cmd_exec_in(dev, dealloc_flow_counter, in);
+}
+
+int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
+ u64 *packets, u64 *bytes)
+{
+ u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+ MLX5_ST_SZ_BYTES(traffic_counter)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
+ void *stats;
+ int err = 0;
+
+ MLX5_SET(query_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_FLOW_COUNTER);
+ MLX5_SET(query_flow_counter_in, in, op_mod, 0);
+ MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
+ err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
+ *packets = MLX5_GET64(traffic_counter, stats, packets);
+ *bytes = MLX5_GET64(traffic_counter, stats, octets);
+ return 0;
+}
+
+int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
{
- u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {0};
- u32 out[MLX5_ST_SZ_DW(delete_fte_out)] = {0};
+ return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+ MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
+}
+
+int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
+ u32 *out)
+{
+ int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {};
+
+ MLX5_SET(query_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_FLOW_COUNTER);
+ MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
+ MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
+ return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
+}
+
+static int mlx5_cmd_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat_params *params,
+ enum mlx5_flow_namespace_type namespace,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
+ struct mlx5_core_dev *dev = ns->dev;
+ void *packet_reformat_context_in;
+ int max_encap_size;
+ void *reformat;
+ int inlen;
int err;
+ u32 *in;
- if (!(*fte_status & FS_FTE_STATUS_EXISTING))
- return 0;
+ if (namespace == MLX5_FLOW_NAMESPACE_FDB ||
+ namespace == MLX5_FLOW_NAMESPACE_FDB_BYPASS)
+ max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
+ else
+ max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
- if (!dev)
+ if (params->size > max_encap_size) {
+ mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
+ params->size, max_encap_size);
return -EINVAL;
+ }
- MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
- MLX5_SET(delete_fte_in, in, table_type, type);
- MLX5_SET(delete_fte_in, in, table_id, table_id);
- MLX5_SET(delete_fte_in, in, flow_index, index);
- if (vport) {
- MLX5_SET(delete_fte_in, in, vport_number, vport);
- MLX5_SET(delete_fte_in, in, other_vport, 1);
+ in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) +
+ params->size, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
+ in, packet_reformat_context);
+ reformat = MLX5_ADDR_OF(packet_reformat_context_in,
+ packet_reformat_context_in,
+ reformat_data);
+ inlen = reformat - (void *)in + params->size;
+
+ MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
+ reformat_data_size, params->size);
+ MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
+ reformat_type, params->type);
+ MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
+ reformat_param_0, params->param_0);
+ MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
+ reformat_param_1, params->param_1);
+ if (params->data && params->size)
+ memcpy(reformat, params->data, params->size);
+
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+
+ pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
+ out, packet_reformat_id);
+ kfree(in);
+ return err;
+}
+
+static void mlx5_cmd_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
+ struct mlx5_core_dev *dev = ns->dev;
+
+ MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+ MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
+ pkt_reformat->id);
+
+ mlx5_cmd_exec_in(dev, dealloc_packet_reformat_context, in);
+}
+
+static int mlx5_cmd_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+ u8 namespace, u8 num_actions,
+ void *modify_actions,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
+ int max_actions, actions_size, inlen, err;
+ struct mlx5_core_dev *dev = ns->dev;
+ void *actions_in;
+ u8 table_type;
+ u32 *in;
+
+ switch (namespace) {
+ case MLX5_FLOW_NAMESPACE_FDB:
+ case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
+ max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
+ table_type = FS_FT_FDB;
+ break;
+ case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
+ case MLX5_FLOW_NAMESPACE_KERNEL:
+ case MLX5_FLOW_NAMESPACE_BYPASS:
+ max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
+ table_type = FS_FT_NIC_RX;
+ break;
+ case MLX5_FLOW_NAMESPACE_EGRESS:
+ case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
+ case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
+ max_actions = MLX5_CAP_FLOWTABLE_NIC_TX(dev, max_modify_header_actions);
+ table_type = FS_FT_NIC_TX;
+ break;
+ case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+ max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
+ table_type = FS_FT_ESW_INGRESS_ACL;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX:
+ max_actions = MLX5_CAP_FLOWTABLE_RDMA_TX(dev, max_modify_header_actions);
+ table_type = FS_FT_RDMA_TX;
+ break;
+ default:
+ return -EOPNOTSUPP;
}
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
- if (!err)
- *fte_status = 0;
+ if (num_actions > max_actions) {
+ mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
+ num_actions, max_actions);
+ return -EOPNOTSUPP;
+ }
+
+ actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
+ inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
+
+ in = kzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(alloc_modify_header_context_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
+ MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
+
+ actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
+ memcpy(actions_in, modify_actions, actions_size);
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+
+ modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
+ kfree(in);
return err;
}
-int mlx5_cmd_modify_header_alloc(struct mlx5_core_dev *dev,
- enum mlx5_flow_namespace_type namespace,
- u8 num_actions,
- void *modify_actions,
- struct mlx5_modify_hdr *modify_hdr)
-{
- u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
- int max_actions, actions_size, inlen, err;
- void *actions_in;
- u8 table_type;
- u32 *in;
-
- switch (namespace) {
- case MLX5_FLOW_NAMESPACE_FDB:
- max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, max_modify_header_actions);
- table_type = FS_FT_FDB;
- break;
- case MLX5_FLOW_NAMESPACE_KERNEL:
- case MLX5_FLOW_NAMESPACE_BYPASS:
- max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(dev, max_modify_header_actions);
- table_type = FS_FT_NIC_RX;
- break;
- case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- max_actions = MLX5_CAP_ESW_INGRESS_ACL(dev, max_modify_header_actions);
- table_type = FS_FT_ESW_INGRESS_ACL;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- if (num_actions > max_actions) {
- mlx5_core_warn(dev, "too many modify header actions %d, max supported %d\n",
- num_actions, max_actions);
- return -EOPNOTSUPP;
- }
-
- actions_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) * num_actions;
- inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) + actions_size;
-
- in = kzalloc(inlen, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- MLX5_SET(alloc_modify_header_context_in, in, opcode,
- MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
- MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
- MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_actions);
-
- actions_in = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
- memcpy(actions_in, modify_actions, actions_size);
-
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-
- modify_hdr->id = MLX5_GET(alloc_modify_header_context_out, out, modify_header_id);
- kfree(in);
-
- return err;
-}
-
-void mlx5_cmd_modify_header_dealloc(struct mlx5_core_dev *dev,
- struct mlx5_modify_hdr *modify_hdr)
-{
- u32 out[MLX5_ST_SZ_DW(dealloc_modify_header_context_out)] = {};
- u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
-
- MLX5_SET(dealloc_modify_header_context_in, in, opcode,
- MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
- MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
- modify_hdr->id);
-
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5_cmd_packet_reformat_alloc(struct mlx5_core_dev *dev,
- struct mlx5_pkt_reformat_params *params,
- enum mlx5_flow_namespace_type namespace,
- struct mlx5_pkt_reformat *pkt_reformat)
-{
- u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
- void *packet_reformat_context_in;
- int max_encap_size;
- void *reformat;
- int inlen;
- int err;
- u32 *in;
-
- if (namespace == MLX5_FLOW_NAMESPACE_FDB)
- max_encap_size = MLX5_CAP_ESW(dev, max_encap_header_size);
- else
- max_encap_size = MLX5_CAP_FLOWTABLE(dev, max_encap_header_size);
-
- if (params->size > max_encap_size) {
- mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
- params->size, max_encap_size);
- return -EINVAL;
- }
-
- in = kzalloc(MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in) +
- params->size, GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- packet_reformat_context_in = MLX5_ADDR_OF(alloc_packet_reformat_context_in,
- in, packet_reformat_context);
- reformat = MLX5_ADDR_OF(packet_reformat_context_in,
- packet_reformat_context_in,
- reformat_data);
- inlen = reformat - (void *)in + params->size;
-
- MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
- MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
- MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
- reformat_data_size, params->size);
- MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
- reformat_type, params->type);
- MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
- reformat_param_0, params->param_0);
- MLX5_SET(packet_reformat_context_in, packet_reformat_context_in,
- reformat_param_1, params->param_1);
- if (params->data && params->size)
- memcpy(reformat, params->data, params->size);
-
- err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-
- pkt_reformat->id = MLX5_GET(alloc_packet_reformat_context_out,
- out, packet_reformat_id);
- kfree(in);
+static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
+ struct mlx5_core_dev *dev = ns->dev;
+
+ MLX5_SET(dealloc_modify_header_context_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
+ MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
+ modify_hdr->id);
- return err;
+ mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
}
-void mlx5_cmd_packet_reformat_dealloc(struct mlx5_core_dev *dev,
- struct mlx5_pkt_reformat *pkt_reformat)
+static u32 mlx5_cmd_get_capabilities(struct mlx5_flow_root_namespace *ns,
+ enum fs_flow_table_type ft_type)
{
- u32 out[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_out)] = {};
- u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
+ return 0;
+}
- MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
- MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
- MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
- pkt_reformat->id);
+static const struct mlx5_flow_cmds mlx5_flow_cmds = {
+ .create_flow_table = mlx5_cmd_create_flow_table,
+ .destroy_flow_table = mlx5_cmd_destroy_flow_table,
+ .modify_flow_table = mlx5_cmd_modify_flow_table,
+ .create_flow_group = mlx5_cmd_create_flow_group,
+ .destroy_flow_group = mlx5_cmd_destroy_flow_group,
+ .create_fte = mlx5_cmd_create_fte,
+ .update_fte = mlx5_cmd_update_fte,
+ .delete_fte = mlx5_cmd_delete_fte,
+ .update_root_ft = mlx5_cmd_update_root_ft,
+ .packet_reformat_alloc = mlx5_cmd_packet_reformat_alloc,
+ .packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
+ .set_peer = mlx5_cmd_stub_set_peer,
+ .create_ns = mlx5_cmd_stub_create_ns,
+ .destroy_ns = mlx5_cmd_stub_destroy_ns,
+ .get_capabilities = mlx5_cmd_get_capabilities,
+};
+
+static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
+ .create_flow_table = mlx5_cmd_stub_create_flow_table,
+ .destroy_flow_table = mlx5_cmd_stub_destroy_flow_table,
+ .modify_flow_table = mlx5_cmd_stub_modify_flow_table,
+ .create_flow_group = mlx5_cmd_stub_create_flow_group,
+ .destroy_flow_group = mlx5_cmd_stub_destroy_flow_group,
+ .create_fte = mlx5_cmd_stub_create_fte,
+ .update_fte = mlx5_cmd_stub_update_fte,
+ .delete_fte = mlx5_cmd_stub_delete_fte,
+ .update_root_ft = mlx5_cmd_stub_update_root_ft,
+ .packet_reformat_alloc = mlx5_cmd_stub_packet_reformat_alloc,
+ .packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
+ .modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
+ .modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
+ .set_peer = mlx5_cmd_stub_set_peer,
+ .create_ns = mlx5_cmd_stub_create_ns,
+ .destroy_ns = mlx5_cmd_stub_destroy_ns,
+ .get_capabilities = mlx5_cmd_stub_get_capabilities,
+};
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void)
+{
+ return &mlx5_flow_cmds;
+}
+
+static const struct mlx5_flow_cmds *mlx5_fs_cmd_get_stub_cmds(void)
+{
+ return &mlx5_flow_cmd_stubs;
+}
- mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type)
+{
+ switch (type) {
+ case FS_FT_NIC_RX:
+ case FS_FT_ESW_EGRESS_ACL:
+ case FS_FT_ESW_INGRESS_ACL:
+ case FS_FT_FDB:
+ case FS_FT_SNIFFER_RX:
+ case FS_FT_SNIFFER_TX:
+ case FS_FT_NIC_TX:
+ case FS_FT_RDMA_RX:
+ case FS_FT_RDMA_TX:
+ case FS_FT_PORT_SEL:
+ return mlx5_fs_cmd_get_fw_cmds();
+ default:
+ return mlx5_fs_cmd_get_stub_cmds();
+ }
}
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fs_core.c b/sys/dev/mlx5/mlx5_core/mlx5_fs_core.c
new file mode 100644
index 000000000000..8d93d4740462
--- /dev/null
+++ b/sys/dev/mlx5/mlx5_core/mlx5_fs_core.c
@@ -0,0 +1,3522 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/mlx5_core/mlx5_core.h>
+#include <dev/mlx5/mlx5_core/fs_core.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+
+#include "eswitch.h"
+#include "fs_ft_pool.h"
+#include "fs_cmd.h"
+
+#define down_write_nested(a, b) down_write(a)
+
+#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
+ sizeof(struct init_tree_node))
+
+#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
+ ...) {.type = FS_TYPE_PRIO,\
+ .min_ft_level = min_level_val,\
+ .num_levels = num_levels_val,\
+ .num_leaf_prios = num_prios_val,\
+ .caps = caps_val,\
+ .children = (struct init_tree_node[]) {__VA_ARGS__},\
+ .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
+}
+
+#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
+ ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
+ __VA_ARGS__)\
+
+#define ADD_NS(def_miss_act, ...) {.type = FS_TYPE_NAMESPACE, \
+ .def_miss_action = def_miss_act,\
+ .children = (struct init_tree_node[]) {__VA_ARGS__},\
+ .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
+}
+
+#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
+ sizeof(long))
+
+#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
+
+#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
+ .caps = (long[]) {__VA_ARGS__} }
+
+#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
+ FS_CAP(flow_table_properties_nic_receive.modify_root), \
+ FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
+ FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
+
+#define FS_CHAINING_CAPS_EGRESS \
+ FS_REQUIRED_CAPS( \
+ FS_CAP(flow_table_properties_nic_transmit.flow_modify_en), \
+ FS_CAP(flow_table_properties_nic_transmit.modify_root), \
+ FS_CAP(flow_table_properties_nic_transmit \
+ .identified_miss_table_mode), \
+ FS_CAP(flow_table_properties_nic_transmit.flow_table_modify))
+
+#define FS_CHAINING_CAPS_RDMA_TX \
+ FS_REQUIRED_CAPS( \
+ FS_CAP(flow_table_properties_nic_transmit_rdma.flow_modify_en), \
+ FS_CAP(flow_table_properties_nic_transmit_rdma.modify_root), \
+ FS_CAP(flow_table_properties_nic_transmit_rdma \
+ .identified_miss_table_mode), \
+ FS_CAP(flow_table_properties_nic_transmit_rdma \
+ .flow_table_modify))
+
+#define LEFTOVERS_NUM_LEVELS 1
+#define LEFTOVERS_NUM_PRIOS 1
+
+#define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
+#define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
+
+#define BY_PASS_PRIO_NUM_LEVELS 1
+#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
+ LEFTOVERS_NUM_PRIOS)
+
+#define KERNEL_RX_MACSEC_NUM_PRIOS 1
+#define KERNEL_RX_MACSEC_NUM_LEVELS 2
+#define KERNEL_RX_MACSEC_MIN_LEVEL (BY_PASS_MIN_LEVEL + KERNEL_RX_MACSEC_NUM_PRIOS)
+
+#define ETHTOOL_PRIO_NUM_LEVELS 1
+#define ETHTOOL_NUM_PRIOS 11
+#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
+/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
+ * IPsec RoCE policy
+ */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 9
+#define KERNEL_NIC_NUM_PRIOS 1
+/* One more level for tc */
+#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
+
+#define KERNEL_NIC_TC_NUM_PRIOS 1
+#define KERNEL_NIC_TC_NUM_LEVELS 13
+
+#define ANCHOR_NUM_LEVELS 1
+#define ANCHOR_NUM_PRIOS 1
+#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
+
+#define OFFLOADS_MAX_FT 2
+#define OFFLOADS_NUM_PRIOS 1
+#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + OFFLOADS_NUM_PRIOS)
+
+#define LAG_PRIO_NUM_LEVELS 1
+#define LAG_NUM_PRIOS 1
+#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
+
+#define KERNEL_TX_IPSEC_NUM_PRIOS 1
+#define KERNEL_TX_IPSEC_NUM_LEVELS 3
+#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
+
+#define KERNEL_TX_MACSEC_NUM_PRIOS 1
+#define KERNEL_TX_MACSEC_NUM_LEVELS 2
+#define KERNEL_TX_MACSEC_MIN_LEVEL (KERNEL_TX_IPSEC_MIN_LEVEL + KERNEL_TX_MACSEC_NUM_PRIOS)
+
+#define MAX_VPORTS 128
+
+struct node_caps {
+ size_t arr_sz;
+ long *caps;
+};
+
+static struct init_tree_node {
+ enum fs_node_type type;
+ struct init_tree_node *children;
+ int ar_size;
+ struct node_caps caps;
+ int min_ft_level;
+ int num_leaf_prios;
+ int prio;
+ int num_levels;
+ enum mlx5_flow_table_miss_action def_miss_action;
+} root_fs = {
+ .type = FS_TYPE_NAMESPACE,
+ .ar_size = 8,
+ .children = (struct init_tree_node[]){
+ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_RX_MACSEC_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_RX_MACSEC_NUM_PRIOS,
+ KERNEL_RX_MACSEC_NUM_LEVELS))),
+ ADD_PRIO(0, LAG_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(LAG_NUM_PRIOS,
+ LAG_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS,
+ OFFLOADS_MAX_FT))),
+ ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
+ ETHTOOL_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS,
+ KERNEL_NIC_TC_NUM_LEVELS),
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
+ KERNEL_NIC_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS,
+ LEFTOVERS_NUM_LEVELS))),
+ ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS,
+ ANCHOR_NUM_LEVELS))),
+ }
+};
+
+static struct init_tree_node egress_root_fs = {
+ .type = FS_TYPE_NAMESPACE,
+ .ar_size = 3,
+ .children = (struct init_tree_node[]) {
+ ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
+ FS_CHAINING_CAPS_EGRESS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_TX_IPSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS_EGRESS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_TX_IPSEC_NUM_PRIOS,
+ KERNEL_TX_IPSEC_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_TX_MACSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS_EGRESS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(KERNEL_TX_MACSEC_NUM_PRIOS,
+ KERNEL_TX_MACSEC_NUM_LEVELS))),
+ }
+};
+
+enum {
+ RDMA_RX_IPSEC_PRIO,
+ RDMA_RX_COUNTERS_PRIO,
+ RDMA_RX_BYPASS_PRIO,
+ RDMA_RX_KERNEL_PRIO,
+};
+
+#define RDMA_RX_IPSEC_NUM_PRIOS 1
+#define RDMA_RX_IPSEC_NUM_LEVELS 2
+#define RDMA_RX_IPSEC_MIN_LEVEL (RDMA_RX_IPSEC_NUM_LEVELS)
+
+#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
+#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
+#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
+
+static struct init_tree_node rdma_rx_root_fs = {
+ .type = FS_TYPE_NAMESPACE,
+ .ar_size = 4,
+ .children = (struct init_tree_node[]) {
+ [RDMA_RX_IPSEC_PRIO] =
+ ADD_PRIO(0, RDMA_RX_IPSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(RDMA_RX_IPSEC_NUM_PRIOS,
+ RDMA_RX_IPSEC_NUM_LEVELS))),
+ [RDMA_RX_COUNTERS_PRIO] =
+ ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
+ RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
+ [RDMA_RX_BYPASS_PRIO] =
+ ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ [RDMA_RX_KERNEL_PRIO] =
+ ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
+ ADD_MULTIPLE_PRIO(1, 1))),
+ }
+};
+
+enum {
+ RDMA_TX_COUNTERS_PRIO,
+ RDMA_TX_IPSEC_PRIO,
+ RDMA_TX_BYPASS_PRIO,
+};
+
+#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
+#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
+
+#define RDMA_TX_IPSEC_NUM_PRIOS 1
+#define RDMA_TX_IPSEC_PRIO_NUM_LEVELS 1
+#define RDMA_TX_IPSEC_MIN_LEVEL (RDMA_TX_COUNTERS_MIN_LEVEL + RDMA_TX_IPSEC_NUM_PRIOS)
+
+static struct init_tree_node rdma_tx_root_fs = {
+ .type = FS_TYPE_NAMESPACE,
+ .ar_size = 3,
+ .children = (struct init_tree_node[]) {
+ [RDMA_TX_COUNTERS_PRIO] =
+ ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
+ RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
+ [RDMA_TX_IPSEC_PRIO] =
+ ADD_PRIO(0, RDMA_TX_IPSEC_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(RDMA_TX_IPSEC_NUM_PRIOS,
+ RDMA_TX_IPSEC_PRIO_NUM_LEVELS))),
+
+ [RDMA_TX_BYPASS_PRIO] =
+ ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS_RDMA_TX,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ }
+};
+
+enum fs_i_lock_class {
+ FS_LOCK_GRANDPARENT,
+ FS_LOCK_PARENT,
+ FS_LOCK_CHILD
+};
+
+static void del_hw_flow_table(struct fs_node *node);
+static void del_hw_flow_group(struct fs_node *node);
+static void del_hw_fte(struct fs_node *node);
+static void del_sw_flow_table(struct fs_node *node);
+static void del_sw_flow_group(struct fs_node *node);
+static void del_sw_fte(struct fs_node *node);
+static void del_sw_prio(struct fs_node *node);
+static void del_sw_ns(struct fs_node *node);
+/* Delete rule (destination) is special case that
+ * requires to lock the FTE for all the deletion process.
+ */
+static void del_sw_hw_rule(struct fs_node *node);
+static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
+ struct mlx5_flow_destination *d2);
+static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
+static struct mlx5_flow_rule *
+find_flow_rule(struct fs_fte *fte,
+ struct mlx5_flow_destination *dest);
+
+static void tree_init_node(struct fs_node *node,
+ void (*del_hw_func)(struct fs_node *),
+ void (*del_sw_func)(struct fs_node *))
+{
+ refcount_set(&node->refcount, 1);
+ INIT_LIST_HEAD(&node->list);
+ INIT_LIST_HEAD(&node->children);
+ init_rwsem(&node->lock);
+ node->del_hw_func = del_hw_func;
+ node->del_sw_func = del_sw_func;
+ node->active = false;
+}
+
+static void tree_add_node(struct fs_node *node, struct fs_node *parent)
+{
+ if (parent)
+ refcount_inc(&parent->refcount);
+ node->parent = parent;
+
+ /* Parent is the root */
+ if (!parent)
+ node->root = node;
+ else
+ node->root = parent->root;
+}
+
+static int tree_get_node(struct fs_node *node)
+{
+ return refcount_inc_not_zero(&node->refcount);
+}
+
+static void nested_down_read_ref_node(struct fs_node *node,
+ enum fs_i_lock_class class)
+{
+ if (node) {
+ down_read_nested(&node->lock, class);
+ refcount_inc(&node->refcount);
+ }
+}
+
+static void nested_down_write_ref_node(struct fs_node *node,
+ enum fs_i_lock_class class)
+{
+ if (node) {
+ down_write_nested(&node->lock, class);
+ refcount_inc(&node->refcount);
+ }
+}
+
+static void down_write_ref_node(struct fs_node *node, bool locked)
+{
+ if (node) {
+ if (!locked)
+ down_write(&node->lock);
+ refcount_inc(&node->refcount);
+ }
+}
+
+static void up_read_ref_node(struct fs_node *node)
+{
+ refcount_dec(&node->refcount);
+ up_read(&node->lock);
+}
+
+static void up_write_ref_node(struct fs_node *node, bool locked)
+{
+ refcount_dec(&node->refcount);
+ if (!locked)
+ up_write(&node->lock);
+}
+
+static void tree_put_node(struct fs_node *node, bool locked)
+{
+ struct fs_node *parent_node = node->parent;
+
+ if (refcount_dec_and_test(&node->refcount)) {
+ if (node->del_hw_func)
+ node->del_hw_func(node);
+ if (parent_node) {
+ down_write_ref_node(parent_node, locked);
+ list_del_init(&node->list);
+ }
+ node->del_sw_func(node);
+ if (parent_node)
+ up_write_ref_node(parent_node, locked);
+ node = NULL;
+ }
+ if (!node && parent_node)
+ tree_put_node(parent_node, locked);
+}
+
+static int tree_remove_node(struct fs_node *node, bool locked)
+{
+ if (refcount_read(&node->refcount) > 1) {
+ refcount_dec(&node->refcount);
+ return -EEXIST;
+ }
+ tree_put_node(node, locked);
+ return 0;
+}
+
+static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
+ unsigned int prio)
+{
+ struct fs_prio *iter_prio;
+
+ fs_for_each_prio(iter_prio, ns) {
+ if (iter_prio->prio == prio)
+ return iter_prio;
+ }
+
+ return NULL;
+}
+
+static bool is_fwd_next_action(u32 action)
+{
+ return action & (MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
+}
+
+static bool is_fwd_dest_type(enum mlx5_flow_destination_type type)
+{
+ return type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM ||
+ type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE ||
+ type == MLX5_FLOW_DESTINATION_TYPE_UPLINK ||
+ type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+ type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER ||
+ type == MLX5_FLOW_DESTINATION_TYPE_TIR ||
+ type == MLX5_FLOW_DESTINATION_TYPE_RANGE ||
+ type == MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE;
+}
+
+static bool check_valid_spec(const struct mlx5_flow_spec *spec)
+{
+ int i;
+
+ for (i = 0; i < MLX5_ST_SZ_DW_MATCH_PARAM; i++)
+ if (spec->match_value[i] & ~spec->match_criteria[i]) {
+ pr_warn("mlx5_core: match_value differs from match_criteria\n");
+ return false;
+ }
+
+ return true;
+}
+
+struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
+{
+ struct fs_node *root;
+ struct mlx5_flow_namespace *ns;
+
+ root = node->root;
+
+ if (WARN_ON(root->type != FS_TYPE_NAMESPACE)) {
+ pr_warn("mlx5: flow steering node is not in tree or garbaged\n");
+ return NULL;
+ }
+
+ ns = container_of(root, struct mlx5_flow_namespace, node);
+ return container_of(ns, struct mlx5_flow_root_namespace, ns);
+}
+
+static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
+{
+ struct mlx5_flow_root_namespace *root = find_root(node);
+
+ if (root)
+ return root->dev->priv.steering;
+ return NULL;
+}
+
+static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
+{
+ struct mlx5_flow_root_namespace *root = find_root(node);
+
+ if (root)
+ return root->dev;
+ return NULL;
+}
+
+static void del_sw_ns(struct fs_node *node)
+{
+ kfree(node);
+}
+
+static void del_sw_prio(struct fs_node *node)
+{
+ kfree(node);
+}
+
+static void del_hw_flow_table(struct fs_node *node)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_table *ft;
+ struct mlx5_core_dev *dev;
+ int err;
+
+ fs_get_obj(ft, node);
+ dev = get_dev(&ft->node);
+ root = find_root(&ft->node);
+
+ if (node->active) {
+ err = root->cmds->destroy_flow_table(root, ft);
+ if (err)
+ mlx5_core_warn(dev, "flow steering can't destroy ft\n");
+ }
+}
+
+static void del_sw_flow_table(struct fs_node *node)
+{
+ struct mlx5_flow_table *ft;
+ struct fs_prio *prio;
+
+ fs_get_obj(ft, node);
+
+ xa_destroy(&ft->fgs_xa);
+ if (ft->node.parent) {
+ fs_get_obj(prio, ft->node.parent);
+ prio->num_ft--;
+ }
+ kfree(ft);
+}
+
+static void modify_fte(struct fs_fte *fte)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_core_dev *dev;
+ int err;
+
+ fs_get_obj(fg, fte->node.parent);
+ fs_get_obj(ft, fg->node.parent);
+ dev = get_dev(&fte->node);
+
+ root = find_root(&ft->node);
+ err = root->cmds->update_fte(root, ft, fg, fte->modify_mask, fte);
+ if (err)
+ mlx5_core_warn(dev,
+ "%s can't del rule fg id=%d fte_index=%d\n",
+ __func__, fg->id, fte->index);
+ fte->modify_mask = 0;
+}
+
+static void del_sw_hw_rule(struct fs_node *node)
+{
+ struct mlx5_flow_rule *rule;
+ struct fs_fte *fte;
+
+ fs_get_obj(rule, node);
+ fs_get_obj(fte, rule->node.parent);
+ if (is_fwd_next_action(rule->sw_action)) {
+ mutex_lock(&rule->dest_attr.ft->lock);
+ list_del(&rule->next_ft);
+ mutex_unlock(&rule->dest_attr.ft->lock);
+ }
+
+ if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) {
+ --fte->dests_size;
+ fte->modify_mask |=
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION) |
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ goto out;
+ }
+
+ if (rule->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_PORT) {
+ --fte->dests_size;
+ fte->modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+ fte->action.action &= ~MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+ goto out;
+ }
+
+ if (is_fwd_dest_type(rule->dest_attr.type)) {
+ --fte->dests_size;
+ --fte->fwd_dests;
+
+ if (!fte->fwd_dests)
+ fte->action.action &=
+ ~MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ fte->modify_mask |=
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+ goto out;
+ }
+out:
+ kfree(rule);
+}
+
+static void del_hw_fte(struct fs_node *node)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_core_dev *dev;
+ struct fs_fte *fte;
+ int err;
+
+ fs_get_obj(fte, node);
+ fs_get_obj(fg, fte->node.parent);
+ fs_get_obj(ft, fg->node.parent);
+
+ WARN_ON(fte->dests_size);
+ dev = get_dev(&ft->node);
+ root = find_root(&ft->node);
+ if (node->active) {
+ err = root->cmds->delete_fte(root, ft, fte);
+ if (err)
+ mlx5_core_warn(dev,
+ "flow steering can't delete fte in index %d of flow group id %d\n",
+ fte->index, fg->id);
+ node->active = false;
+ }
+}
+
+static void del_sw_fte(struct fs_node *node)
+{
+ struct mlx5_flow_steering *steering = get_steering(node);
+ struct mlx5_flow_group *fg;
+ struct fs_fte *deleted_fte;
+ struct fs_fte *fte;
+
+ fs_get_obj(fte, node);
+ fs_get_obj(fg, fte->node.parent);
+
+ deleted_fte = xa_erase(&fg->ftes_xa, fte->index);
+ WARN_ON(deleted_fte != fte);
+ ida_free(&fg->fte_allocator, fte->index - fg->start_index);
+ kmem_cache_free(steering->ftes_cache, fte);
+}
+
+static void del_hw_flow_group(struct fs_node *node)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_table *ft;
+ struct mlx5_core_dev *dev;
+
+ fs_get_obj(fg, node);
+ fs_get_obj(ft, fg->node.parent);
+ dev = get_dev(&ft->node);
+
+ root = find_root(&ft->node);
+ if (fg->node.active && root->cmds->destroy_flow_group(root, ft, fg))
+ mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
+ fg->id, ft->id);
+}
+
+static void del_sw_flow_group(struct fs_node *node)
+{
+ struct mlx5_flow_steering *steering = get_steering(node);
+ struct mlx5_flow_group *deleted_fg;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_table *ft;
+
+ fs_get_obj(fg, node);
+ fs_get_obj(ft, fg->node.parent);
+
+ xa_destroy(&fg->ftes_xa);
+ ida_destroy(&fg->fte_allocator);
+ if (ft->autogroup.active &&
+ fg->max_ftes == ft->autogroup.group_size &&
+ fg->start_index < ft->autogroup.max_fte)
+ ft->autogroup.num_groups--;
+ deleted_fg = xa_erase(&ft->fgs_xa, fg->start_index);
+ WARN_ON(deleted_fg != fg);
+ kmem_cache_free(steering->fgs_cache, fg);
+}
+
+static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
+{
+ int index;
+ int ret;
+
+ index = ida_alloc_max(&fg->fte_allocator, fg->max_ftes, GFP_KERNEL);
+ if (index < 0)
+ return index;
+
+ fte->index = index + fg->start_index;
+ ret = xa_insert(&fg->ftes_xa, fte->index, fte, GFP_KERNEL);
+ if (ret)
+ goto err_ida_remove;
+
+ tree_add_node(&fte->node, &fg->node);
+ list_add_tail(&fte->node.list, &fg->node.children);
+ return 0;
+
+err_ida_remove:
+ ida_free(&fg->fte_allocator, index);
+ return ret;
+}
+
+static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
+ const struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act)
+{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct fs_fte *fte;
+
+ fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
+ if (!fte)
+ return ERR_PTR(-ENOMEM);
+
+ memcpy(fte->val, &spec->match_value, sizeof(fte->val));
+ fte->node.type = FS_TYPE_FLOW_ENTRY;
+ fte->action = *flow_act;
+ fte->flow_context = spec->flow_context;
+
+ tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
+
+ return fte;
+}
+
+static void dealloc_flow_group(struct mlx5_flow_steering *steering,
+ struct mlx5_flow_group *fg)
+{
+ xa_destroy(&fg->ftes_xa);
+ kmem_cache_free(steering->fgs_cache, fg);
+}
+
+static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
+ u8 match_criteria_enable,
+ const void *match_criteria,
+ int start_index,
+ int end_index)
+{
+ struct mlx5_flow_group *fg;
+
+ fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
+ if (!fg)
+ return ERR_PTR(-ENOMEM);
+
+ xa_init(&fg->ftes_xa);
+
+ ida_init(&fg->fte_allocator);
+ fg->mask.match_criteria_enable = match_criteria_enable;
+ memcpy(&fg->mask.match_criteria, match_criteria,
+ sizeof(fg->mask.match_criteria));
+ fg->node.type = FS_TYPE_FLOW_GROUP;
+ fg->start_index = start_index;
+ fg->max_ftes = end_index - start_index + 1;
+
+ return fg;
+}
+
+static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
+ u8 match_criteria_enable,
+ const void *match_criteria,
+ int start_index,
+ int end_index,
+ struct list_head *prev)
+{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_group *fg;
+ int ret;
+
+ fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
+ start_index, end_index);
+ if (IS_ERR(fg))
+ return fg;
+
+ /* initialize refcnt, add to parent list */
+ ret = xa_insert(&ft->fgs_xa, fg->start_index, fg, GFP_KERNEL);
+ if (ret) {
+ dealloc_flow_group(steering, fg);
+ return ERR_PTR(ret);
+ }
+
+ tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
+ tree_add_node(&fg->node, &ft->node);
+ /* Add node to group list */
+ list_add(&fg->node.list, prev);
+ atomic_inc(&ft->node.version);
+
+ return fg;
+}
+
+static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport,
+ enum fs_flow_table_type table_type,
+ enum fs_flow_table_op_mod op_mod,
+ u32 flags)
+{
+ struct mlx5_flow_table *ft;
+
+ ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+ if (!ft)
+ return ERR_PTR(-ENOMEM);
+
+ xa_init(&ft->fgs_xa);
+
+ ft->level = level;
+ ft->node.type = FS_TYPE_FLOW_TABLE;
+ ft->op_mod = op_mod;
+ ft->type = table_type;
+ ft->vport = vport;
+ ft->flags = flags;
+ INIT_LIST_HEAD(&ft->fwd_rules);
+ mutex_init(&ft->lock);
+
+ return ft;
+}
+
+/* If reverse is false, then we search for the first flow table in the
+ * root sub-tree from start(closest from right), else we search for the
+ * last flow table in the root sub-tree till start(closest from left).
+ */
+static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
+ struct list_head *start,
+ bool reverse)
+{
+#define list_advance_entry(pos, reverse) \
+ ((reverse) ? list_prev_entry(pos, list) : list_next_entry(pos, list))
+
+#define list_for_each_advance_continue(pos, head, reverse) \
+ for (pos = list_advance_entry(pos, reverse); \
+ &pos->list != (head); \
+ pos = list_advance_entry(pos, reverse))
+
+ struct fs_node *iter = list_entry(start, struct fs_node, list);
+ struct mlx5_flow_table *ft = NULL;
+
+ if (!root || root->type == FS_TYPE_PRIO_CHAINS)
+ return NULL;
+
+ list_for_each_advance_continue(iter, &root->children, reverse) {
+ if (iter->type == FS_TYPE_FLOW_TABLE) {
+ fs_get_obj(ft, iter);
+ return ft;
+ }
+ ft = find_closest_ft_recursive(iter, &iter->children, reverse);
+ if (ft)
+ return ft;
+ }
+
+ return ft;
+}
+
+/* If reverse is false then return the first flow table in next priority of
+ * prio in the tree, else return the last flow table in the previous priority
+ * of prio in the tree.
+ */
+static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse)
+{
+ struct mlx5_flow_table *ft = NULL;
+ struct fs_node *curr_node;
+ struct fs_node *parent;
+
+ parent = prio->node.parent;
+ curr_node = &prio->node;
+ while (!ft && parent) {
+ ft = find_closest_ft_recursive(parent, &curr_node->list, reverse);
+ curr_node = parent;
+ parent = curr_node->parent;
+ }
+ return ft;
+}
+
+/* Assuming all the tree is locked by mutex chain lock */
+static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio)
+{
+ return find_closest_ft(prio, false);
+}
+
+/* Assuming all the tree is locked by mutex chain lock */
+static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
+{
+ return find_closest_ft(prio, true);
+}
+
+static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
+ struct mlx5_flow_act *flow_act)
+{
+ struct fs_prio *prio;
+ bool next_ns;
+
+ next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
+ fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
+
+ return find_next_chained_ft(prio);
+}
+
+static int connect_fts_in_prio(struct mlx5_core_dev *dev,
+ struct fs_prio *prio,
+ struct mlx5_flow_table *ft)
+{
+ struct mlx5_flow_root_namespace *root = find_root(&prio->node);
+ struct mlx5_flow_table *iter;
+ int err;
+
+ fs_for_each_ft(iter, prio) {
+ err = root->cmds->modify_flow_table(root, iter, ft);
+ if (err) {
+ mlx5_core_err(dev,
+ "Failed to modify flow table id %d, type %d, err %d\n",
+ iter->id, iter->type, err);
+ /* The driver is out of sync with the FW */
+ return err;
+ }
+ }
+ return 0;
+}
+
+/* Connect flow tables from previous priority of prio to ft */
+static int connect_prev_fts(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *ft,
+ struct fs_prio *prio)
+{
+ struct mlx5_flow_table *prev_ft;
+
+ prev_ft = find_prev_chained_ft(prio);
+ if (prev_ft) {
+ struct fs_prio *prev_prio;
+
+ fs_get_obj(prev_prio, prev_ft->node.parent);
+ return connect_fts_in_prio(dev, prev_prio, ft);
+ }
+ return 0;
+}
+
+static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
+ *prio)
+{
+ struct mlx5_flow_root_namespace *root = find_root(&prio->node);
+ struct mlx5_ft_underlay_qp *uqp;
+ int min_level = INT_MAX;
+ int err = 0;
+ u32 qpn;
+
+ if (root->root_ft)
+ min_level = root->root_ft->level;
+
+ if (ft->level >= min_level)
+ return 0;
+
+ if (list_empty(&root->underlay_qpns)) {
+ /* Don't set any QPN (zero) in case QPN list is empty */
+ qpn = 0;
+ err = root->cmds->update_root_ft(root, ft, qpn, false);
+ } else {
+ list_for_each_entry(uqp, &root->underlay_qpns, list) {
+ qpn = uqp->qpn;
+ err = root->cmds->update_root_ft(root, ft,
+ qpn, false);
+ if (err)
+ break;
+ }
+ }
+
+ if (err)
+ mlx5_core_warn(root->dev,
+ "Update root flow table of id(%u) qpn(%d) failed\n",
+ ft->id, qpn);
+ else
+ root->root_ft = ft;
+
+ return err;
+}
+
+static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct fs_fte *fte;
+ int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+ int err = 0;
+
+ fs_get_obj(fte, rule->node.parent);
+ if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ return -EINVAL;
+ down_write_ref_node(&fte->node, false);
+ fs_get_obj(fg, fte->node.parent);
+ fs_get_obj(ft, fg->node.parent);
+
+ memcpy(&rule->dest_attr, dest, sizeof(*dest));
+ root = find_root(&ft->node);
+ err = root->cmds->update_fte(root, ft, fg,
+ modify_mask, fte);
+ up_write_ref_node(&fte->node, false);
+
+ return err;
+}
+
+int mlx5_modify_rule_destination(struct mlx5_flow_handle *handle,
+ struct mlx5_flow_destination *new_dest,
+ struct mlx5_flow_destination *old_dest)
+{
+ int i;
+
+ if (!old_dest) {
+ if (handle->num_rules != 1)
+ return -EINVAL;
+ return _mlx5_modify_rule_destination(handle->rule[0],
+ new_dest);
+ }
+
+ for (i = 0; i < handle->num_rules; i++) {
+ if (mlx5_flow_dests_cmp(new_dest, &handle->rule[i]->dest_attr))
+ return _mlx5_modify_rule_destination(handle->rule[i],
+ new_dest);
+ }
+
+ return -EINVAL;
+}
+
+/* Modify/set FWD rules that point on old_next_ft to point on new_next_ft */
+static int connect_fwd_rules(struct mlx5_core_dev *dev,
+ struct mlx5_flow_table *new_next_ft,
+ struct mlx5_flow_table *old_next_ft)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_rule *iter;
+ int err = 0;
+
+ /* new_next_ft and old_next_ft could be NULL only
+ * when we create/destroy the anchor flow table.
+ */
+ if (!new_next_ft || !old_next_ft)
+ return 0;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = new_next_ft;
+
+ mutex_lock(&old_next_ft->lock);
+ list_splice_init(&old_next_ft->fwd_rules, &new_next_ft->fwd_rules);
+ mutex_unlock(&old_next_ft->lock);
+ list_for_each_entry(iter, &new_next_ft->fwd_rules, next_ft) {
+ if ((iter->sw_action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS) &&
+ iter->ft->ns == new_next_ft->ns)
+ continue;
+
+ err = _mlx5_modify_rule_destination(iter, &dest);
+ if (err)
+ pr_err("mlx5_core: failed to modify rule to point on flow table %d\n",
+ new_next_ft->id);
+ }
+ return 0;
+}
+
+static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
+ struct fs_prio *prio)
+{
+ struct mlx5_flow_table *next_ft, *first_ft;
+ int err = 0;
+
+ /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
+
+ first_ft = list_first_entry_or_null(&prio->node.children,
+ struct mlx5_flow_table, node.list);
+ if (!first_ft || first_ft->level > ft->level) {
+ err = connect_prev_fts(dev, ft, prio);
+ if (err)
+ return err;
+
+ next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
+ err = connect_fwd_rules(dev, ft, next_ft);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE(dev,
+ flow_table_properties_nic_receive.modify_root))
+ err = update_root_ft_create(ft, prio);
+ return err;
+}
+
+static void list_add_flow_table(struct mlx5_flow_table *ft,
+ struct fs_prio *prio)
+{
+ struct list_head *prev = &prio->node.children;
+ struct mlx5_flow_table *iter;
+
+ fs_for_each_ft(iter, prio) {
+ if (iter->level > ft->level)
+ break;
+ prev = &iter->node.list;
+ }
+ list_add(&ft->node.list, prev);
+}
+
+static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_table_attr *ft_attr,
+ enum fs_flow_table_op_mod op_mod,
+ u16 vport)
+{
+ struct mlx5_flow_root_namespace *root = find_root(&ns->node);
+ bool unmanaged = ft_attr->flags & MLX5_FLOW_TABLE_UNMANAGED;
+ struct mlx5_flow_table *next_ft;
+ struct fs_prio *fs_prio = NULL;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ if (!root) {
+ pr_err("mlx5: flow steering failed to find root of namespace\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ mutex_lock(&root->chain_lock);
+ fs_prio = find_prio(ns, ft_attr->prio);
+ if (!fs_prio) {
+ err = -EINVAL;
+ goto unlock_root;
+ }
+ if (!unmanaged) {
+ /* The level is related to the
+ * priority level range.
+ */
+ if (ft_attr->level >= fs_prio->num_levels) {
+ err = -ENOSPC;
+ goto unlock_root;
+ }
+
+ ft_attr->level += fs_prio->start_level;
+ }
+
+ /* The level is related to the
+ * priority level range.
+ */
+ ft = alloc_flow_table(ft_attr->level,
+ vport,
+ root->table_type,
+ op_mod, ft_attr->flags);
+ if (IS_ERR(ft)) {
+ err = PTR_ERR(ft);
+ goto unlock_root;
+ }
+
+ tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
+ next_ft = unmanaged ? ft_attr->next_ft :
+ find_next_chained_ft(fs_prio);
+ ft->def_miss_action = ns->def_miss_action;
+ ft->ns = ns;
+ err = root->cmds->create_flow_table(root, ft, ft_attr, next_ft);
+ if (err)
+ goto free_ft;
+
+ if (!unmanaged) {
+ err = connect_flow_table(root->dev, ft, fs_prio);
+ if (err)
+ goto destroy_ft;
+ }
+
+ ft->node.active = true;
+ down_write_ref_node(&fs_prio->node, false);
+ if (!unmanaged) {
+ tree_add_node(&ft->node, &fs_prio->node);
+ list_add_flow_table(ft, fs_prio);
+ } else {
+ ft->node.root = fs_prio->node.root;
+ }
+ fs_prio->num_ft++;
+ up_write_ref_node(&fs_prio->node, false);
+ mutex_unlock(&root->chain_lock);
+ return ft;
+destroy_ft:
+ root->cmds->destroy_flow_table(root, ft);
+free_ft:
+ xa_destroy(&ft->fgs_xa);
+ kfree(ft);
+unlock_root:
+ mutex_unlock(&root->chain_lock);
+ return ERR_PTR(err);
+}
+
+struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_table_attr *ft_attr)
+{
+ return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, 0);
+}
+EXPORT_SYMBOL(mlx5_create_flow_table);
+
+u32 mlx5_flow_table_id(struct mlx5_flow_table *ft)
+{
+ return ft->id;
+}
+EXPORT_SYMBOL(mlx5_flow_table_id);
+
+struct mlx5_flow_table *
+mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_table_attr *ft_attr, u16 vport)
+{
+ return __mlx5_create_flow_table(ns, ft_attr, FS_FT_OP_MOD_NORMAL, vport);
+}
+
+struct mlx5_flow_table*
+mlx5_create_lag_demux_flow_table(struct mlx5_flow_namespace *ns,
+ int prio, u32 level)
+{
+ struct mlx5_flow_table_attr ft_attr = {};
+
+ ft_attr.level = level;
+ ft_attr.prio = prio;
+ ft_attr.max_fte = 1;
+
+ return __mlx5_create_flow_table(ns, &ft_attr, FS_FT_OP_MOD_LAG_DEMUX, 0);
+}
+EXPORT_SYMBOL(mlx5_create_lag_demux_flow_table);
+
+#define MAX_FLOW_GROUP_SIZE BIT(24)
+struct mlx5_flow_table*
+mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_table_attr *ft_attr)
+{
+ int num_reserved_entries = ft_attr->autogroup.num_reserved_entries;
+ int max_num_groups = ft_attr->autogroup.max_num_groups;
+ struct mlx5_flow_table *ft;
+ int autogroups_max_fte;
+
+ ft = mlx5_create_flow_table(ns, ft_attr);
+ if (IS_ERR(ft))
+ return ft;
+
+ autogroups_max_fte = ft->max_fte - num_reserved_entries;
+ if (max_num_groups > autogroups_max_fte)
+ goto err_validate;
+ if (num_reserved_entries > ft->max_fte)
+ goto err_validate;
+
+ /* Align the number of groups according to the largest group size */
+ if (autogroups_max_fte / (max_num_groups + 1) > MAX_FLOW_GROUP_SIZE)
+ max_num_groups = (autogroups_max_fte / MAX_FLOW_GROUP_SIZE) - 1;
+
+ ft->autogroup.active = true;
+ ft->autogroup.required_groups = max_num_groups;
+ ft->autogroup.max_fte = autogroups_max_fte;
+ /* We save place for flow groups in addition to max types */
+ ft->autogroup.group_size = autogroups_max_fte / (max_num_groups + 1);
+
+ return ft;
+
+err_validate:
+ mlx5_destroy_flow_table(ft);
+ return ERR_PTR(-ENOSPC);
+}
+EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
+
+struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
+ u32 *fg_in)
+{
+ struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+ void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
+ fg_in, match_criteria);
+ u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
+ fg_in,
+ match_criteria_enable);
+ int start_index = MLX5_GET(create_flow_group_in, fg_in,
+ start_flow_index);
+ int end_index = MLX5_GET(create_flow_group_in, fg_in,
+ end_flow_index);
+ struct mlx5_flow_group *fg;
+ int err;
+
+ if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
+ return ERR_PTR(-EPERM);
+
+ down_write_ref_node(&ft->node, false);
+ fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
+ start_index, end_index,
+ ft->node.children.prev);
+ up_write_ref_node(&ft->node, false);
+ if (IS_ERR(fg))
+ return fg;
+
+ err = root->cmds->create_flow_group(root, ft, fg_in, fg);
+ if (err) {
+ tree_put_node(&fg->node, false);
+ return ERR_PTR(err);
+ }
+ fg->node.active = true;
+
+ return fg;
+}
+EXPORT_SYMBOL(mlx5_create_flow_group);
+
+static struct mlx5_flow_rule *alloc_rule(struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_rule *rule;
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return NULL;
+
+ INIT_LIST_HEAD(&rule->next_ft);
+ rule->node.type = FS_TYPE_FLOW_DEST;
+ if (dest)
+ memcpy(&rule->dest_attr, dest, sizeof(*dest));
+ else
+ rule->dest_attr.type = MLX5_FLOW_DESTINATION_TYPE_NONE;
+
+ return rule;
+}
+
+static struct mlx5_flow_handle *alloc_handle(int num_rules)
+{
+ struct mlx5_flow_handle *handle;
+
+ handle = kzalloc(struct_size(handle, rule, num_rules), GFP_KERNEL);
+ if (!handle)
+ return NULL;
+
+ handle->num_rules = num_rules;
+
+ return handle;
+}
+
+static void destroy_flow_handle(struct fs_fte *fte,
+ struct mlx5_flow_handle *handle,
+ struct mlx5_flow_destination *dest,
+ int i)
+{
+ for (; --i >= 0;) {
+ if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
+ fte->dests_size--;
+ list_del(&handle->rule[i]->node.list);
+ kfree(handle->rule[i]);
+ }
+ }
+ kfree(handle);
+}
+
+static struct mlx5_flow_handle *
+create_flow_handle(struct fs_fte *fte,
+ struct mlx5_flow_destination *dest,
+ int dest_num,
+ int *modify_mask,
+ bool *new_rule)
+{
+ struct mlx5_flow_handle *handle;
+ struct mlx5_flow_rule *rule = NULL;
+ static int count = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS);
+ static int dst = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+ int type;
+ int i = 0;
+
+ handle = alloc_handle((dest_num) ? dest_num : 1);
+ if (!handle)
+ return ERR_PTR(-ENOMEM);
+
+ do {
+ if (dest) {
+ rule = find_flow_rule(fte, dest + i);
+ if (rule) {
+ refcount_inc(&rule->node.refcount);
+ goto rule_found;
+ }
+ }
+
+ *new_rule = true;
+ rule = alloc_rule(dest + i);
+ if (!rule)
+ goto free_rules;
+
+ /* Add dest to dests list- we need flow tables to be in the
+ * end of the list for forward to next prio rules.
+ */
+ tree_init_node(&rule->node, NULL, del_sw_hw_rule);
+ if (dest &&
+ dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
+ list_add(&rule->node.list, &fte->node.children);
+ else
+ list_add_tail(&rule->node.list, &fte->node.children);
+ if (dest) {
+ fte->dests_size++;
+
+ if (is_fwd_dest_type(dest[i].type))
+ fte->fwd_dests++;
+
+ type = dest[i].type ==
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ *modify_mask |= type ? count : dst;
+ }
+rule_found:
+ handle->rule[i] = rule;
+ } while (++i < dest_num);
+
+ return handle;
+
+free_rules:
+ destroy_flow_handle(fte, handle, dest, i);
+ return ERR_PTR(-ENOMEM);
+}
+
+/* fte should not be deleted while calling this function */
+static struct mlx5_flow_handle *
+add_rule_fte(struct fs_fte *fte,
+ struct mlx5_flow_group *fg,
+ struct mlx5_flow_destination *dest,
+ int dest_num,
+ bool update_action)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_handle *handle;
+ struct mlx5_flow_table *ft;
+ int modify_mask = 0;
+ int err;
+ bool new_rule = false;
+
+ handle = create_flow_handle(fte, dest, dest_num, &modify_mask,
+ &new_rule);
+ if (IS_ERR(handle) || !new_rule)
+ goto out;
+
+ if (update_action)
+ modify_mask |= BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_ACTION);
+
+ fs_get_obj(ft, fg->node.parent);
+ root = find_root(&fg->node);
+ if (!(fte->status & FS_FTE_STATUS_EXISTING))
+ err = root->cmds->create_fte(root, ft, fg, fte);
+ else
+ err = root->cmds->update_fte(root, ft, fg, modify_mask, fte);
+ if (err)
+ goto free_handle;
+
+ fte->node.active = true;
+ fte->status |= FS_FTE_STATUS_EXISTING;
+ atomic_inc(&fg->node.version);
+
+out:
+ return handle;
+
+free_handle:
+ destroy_flow_handle(fte, handle, dest, handle->num_rules);
+ return ERR_PTR(err);
+}
+
+static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
+ const struct mlx5_flow_spec *spec)
+{
+ struct list_head *prev = &ft->node.children;
+ u32 max_fte = ft->autogroup.max_fte;
+ unsigned int candidate_index = 0;
+ unsigned int group_size = 0;
+ struct mlx5_flow_group *fg;
+
+ if (!ft->autogroup.active)
+ return ERR_PTR(-ENOENT);
+
+ if (ft->autogroup.num_groups < ft->autogroup.required_groups)
+ group_size = ft->autogroup.group_size;
+
+ /* max_fte == ft->autogroup.max_types */
+ if (group_size == 0)
+ group_size = 1;
+
+ /* sorted by start_index */
+ fs_for_each_fg(fg, ft) {
+ if (candidate_index + group_size > fg->start_index)
+ candidate_index = fg->start_index + fg->max_ftes;
+ else
+ break;
+ prev = &fg->node.list;
+ }
+
+ if (candidate_index + group_size > max_fte)
+ return ERR_PTR(-ENOSPC);
+
+ fg = alloc_insert_flow_group(ft,
+ spec->match_criteria_enable,
+ spec->match_criteria,
+ candidate_index,
+ candidate_index + group_size - 1,
+ prev);
+ if (IS_ERR(fg))
+ goto out;
+
+ if (group_size == ft->autogroup.group_size)
+ ft->autogroup.num_groups++;
+
+out:
+ return fg;
+}
+
+static int create_auto_flow_group(struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
+{
+ struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *match_criteria_addr;
+ u8 src_esw_owner_mask_on;
+ void *misc;
+ int err;
+ u32 *in;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable,
+ fg->mask.match_criteria_enable);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
+ fg->max_ftes - 1);
+
+ misc = MLX5_ADDR_OF(fte_match_param, fg->mask.match_criteria,
+ misc_parameters);
+ src_esw_owner_mask_on = !!MLX5_GET(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id);
+ MLX5_SET(create_flow_group_in, in,
+ source_eswitch_owner_vhca_id_valid, src_esw_owner_mask_on);
+
+ match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
+ in, match_criteria);
+ memcpy(match_criteria_addr, fg->mask.match_criteria,
+ sizeof(fg->mask.match_criteria));
+
+ err = root->cmds->create_flow_group(root, ft, in, fg);
+ if (!err) {
+ fg->node.active = true;
+ }
+
+ kvfree(in);
+ return err;
+}
+
+static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
+ struct mlx5_flow_destination *d2)
+{
+ if (d1->type == d2->type) {
+ if (((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+ d1->type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
+ d1->vport.num == d2->vport.num &&
+ d1->vport.flags == d2->vport.flags &&
+ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ?
+ (d1->vport.vhca_id == d2->vport.vhca_id) : true) &&
+ ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ?
+ (d1->vport.pkt_reformat->id ==
+ d2->vport.pkt_reformat->id) : true)) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+ d1->ft == d2->ft) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR &&
+ d1->tir_num == d2->tir_num) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM &&
+ d1->ft_num == d2->ft_num) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER &&
+ d1->sampler_id == d2->sampler_id) ||
+ (d1->type == MLX5_FLOW_DESTINATION_TYPE_RANGE &&
+ d1->range.field == d2->range.field &&
+ d1->range.hit_ft == d2->range.hit_ft &&
+ d1->range.miss_ft == d2->range.miss_ft &&
+ d1->range.min == d2->range.min &&
+ d1->range.max == d2->range.max))
+ return true;
+ }
+
+ return false;
+}
+
+static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_rule *rule;
+
+ list_for_each_entry(rule, &fte->node.children, node.list) {
+ if (mlx5_flow_dests_cmp(&rule->dest_attr, dest))
+ return rule;
+ }
+ return NULL;
+}
+
+static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
+ const struct mlx5_fs_vlan *vlan1)
+{
+ return vlan0->ethtype != vlan1->ethtype ||
+ vlan0->vid != vlan1->vid ||
+ vlan0->prio != vlan1->prio;
+}
+
+static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
+ const struct mlx5_flow_act *act2)
+{
+ u32 action1 = act1->action;
+ u32 action2 = act2->action;
+ u32 xored_actions;
+
+ xored_actions = action1 ^ action2;
+
+ /* if one rule only wants to count, it's ok */
+ if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
+ action2 == MLX5_FLOW_CONTEXT_ACTION_COUNT)
+ return false;
+
+ if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_DECAP |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 |
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
+ act1->pkt_reformat != act2->pkt_reformat)
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ act1->modify_hdr != act2->modify_hdr)
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
+ check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
+ return true;
+
+ if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
+ check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
+ return true;
+
+ return false;
+}
+
+static int check_conflicting_ftes(struct fs_fte *fte,
+ const struct mlx5_flow_context *flow_context,
+ const struct mlx5_flow_act *flow_act)
+{
+ if (check_conflicting_actions(flow_act, &fte->action)) {
+ mlx5_core_warn(get_dev(&fte->node),
+ "Found two FTEs with conflicting actions\n");
+ return -EEXIST;
+ }
+
+ if ((flow_context->flags & FLOW_CONTEXT_HAS_TAG) &&
+ fte->flow_context.flow_tag != flow_context->flow_tag) {
+ mlx5_core_warn(get_dev(&fte->node),
+ "FTE flow tag %u already exists with different flow tag %u\n",
+ fte->flow_context.flow_tag,
+ flow_context->flow_tag);
+ return -EEXIST;
+ }
+
+ return 0;
+}
+
+static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
+ const struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num,
+ struct fs_fte *fte)
+{
+ struct mlx5_flow_handle *handle;
+ int old_action;
+ int i;
+ int ret;
+
+ ret = check_conflicting_ftes(fte, &spec->flow_context, flow_act);
+ if (ret)
+ return ERR_PTR(ret);
+
+ old_action = fte->action.action;
+ fte->action.action |= flow_act->action;
+ handle = add_rule_fte(fte, fg, dest, dest_num,
+ old_action != flow_act->action);
+ if (IS_ERR(handle)) {
+ fte->action.action = old_action;
+ return handle;
+ }
+
+ for (i = 0; i < handle->num_rules; i++) {
+ if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
+ tree_add_node(&handle->rule[i]->node, &fte->node);
+ }
+ }
+ return handle;
+}
+
+static bool counter_is_valid(u32 action)
+{
+ return (action & (MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST));
+}
+
+static bool dest_is_valid(struct mlx5_flow_destination *dest,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_table *ft)
+{
+ bool ignore_level = flow_act->flags & FLOW_ACT_IGNORE_FLOW_LEVEL;
+ u32 action = flow_act->action;
+
+ if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
+ return counter_is_valid(action);
+
+ if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ return true;
+
+ if (ignore_level) {
+ if (ft->type != FS_FT_FDB &&
+ ft->type != FS_FT_NIC_RX &&
+ ft->type != FS_FT_NIC_TX)
+ return false;
+
+ if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+ ft->type != dest->ft->type)
+ return false;
+ }
+
+ if (!dest || ((dest->type ==
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
+ (dest->ft->level <= ft->level && !ignore_level)))
+ return false;
+ return true;
+}
+
+struct match_list {
+ struct list_head list;
+ struct mlx5_flow_group *g;
+};
+
+static void free_match_list(struct match_list *head, bool ft_locked)
+{
+ struct match_list *iter, *match_tmp;
+
+ list_for_each_entry_safe(iter, match_tmp, &head->list,
+ list) {
+ tree_put_node(&iter->g->node, ft_locked);
+ list_del(&iter->list);
+ kfree(iter);
+ }
+}
+
+#define xa_for_each_rcu(xa, index, entry) \
+ for ((entry) = NULL, (index) = 0; \
+ ((entry) = xa_next(xa, &index, (entry) != NULL)) != NULL; )
+
+static int build_match_list(struct match_list *match_head,
+ struct mlx5_flow_table *ft,
+ const struct mlx5_flow_spec *spec,
+ struct mlx5_flow_group *fg,
+ bool ft_locked)
+{
+ struct mlx5_flow_group *g;
+ unsigned long id;
+
+ rcu_read_lock();
+ INIT_LIST_HEAD(&match_head->list);
+ xa_for_each_rcu(&ft->fgs_xa, id, g) {
+ struct match_list *curr_match;
+
+ if (memcmp(&g->mask, spec, sizeof_field(struct mlx5_flow_group,
+ mask)))
+ continue;
+
+ if (fg && fg != g)
+ continue;
+
+ if (unlikely(!tree_get_node(&g->node)))
+ continue;
+
+ curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
+ if (!curr_match) {
+ rcu_read_unlock();
+ free_match_list(match_head, ft_locked);
+ return -ENOMEM;
+ }
+ curr_match->g = g;
+ list_add_tail(&curr_match->list, &match_head->list);
+ }
+ rcu_read_unlock();
+ return 0;
+}
+
+static u64 matched_fgs_get_version(struct list_head *match_head)
+{
+ struct match_list *iter;
+ u64 version = 0;
+
+ list_for_each_entry(iter, match_head, list)
+ version += (u64)atomic_read(&iter->g->node.version);
+ return version;
+}
+
+static struct fs_fte *
+lookup_fte_locked(struct mlx5_flow_group *g,
+ const u32 *match_value,
+ bool take_write)
+{
+ struct fs_fte *fte_tmp;
+ unsigned long index;
+
+ if (take_write)
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+ else
+ nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
+ xa_for_each_rcu(&g->ftes_xa, index, fte_tmp) {
+ if (!memcmp(match_value, fte_tmp->val, sizeof_field(struct fs_fte, val)))
+ break;
+ }
+ if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
+ fte_tmp = NULL;
+ goto out;
+ }
+ if (!fte_tmp->node.active) {
+ tree_put_node(&fte_tmp->node, false);
+ fte_tmp = NULL;
+ goto out;
+ }
+
+ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+out:
+ if (take_write)
+ up_write_ref_node(&g->node, false);
+ else
+ up_read_ref_node(&g->node);
+ return fte_tmp;
+}
+
+static struct mlx5_flow_handle *
+try_add_to_existing_fg(struct mlx5_flow_table *ft,
+ struct list_head *match_head,
+ const struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num,
+ int ft_version)
+{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_handle *rule;
+ struct match_list *iter;
+ bool take_write = false;
+ struct fs_fte *fte;
+ u64 version = 0;
+ int err;
+
+ fte = alloc_fte(ft, spec, flow_act);
+ if (IS_ERR(fte))
+ return ERR_PTR(-ENOMEM);
+
+search_again_locked:
+ if (flow_act->flags & FLOW_ACT_NO_APPEND)
+ goto skip_search;
+ version = matched_fgs_get_version(match_head);
+ /* Try to find an fte with identical match value and attempt update its
+ * action.
+ */
+ list_for_each_entry(iter, match_head, list) {
+ struct fs_fte *fte_tmp;
+
+ g = iter->g;
+ fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
+ if (!fte_tmp)
+ continue;
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
+ /* No error check needed here, because insert_fte() is not called */
+ up_write_ref_node(&fte_tmp->node, false);
+ tree_put_node(&fte_tmp->node, false);
+ kmem_cache_free(steering->ftes_cache, fte);
+ return rule;
+ }
+
+skip_search:
+ /* No group with matching fte found, or we skipped the search.
+ * Try to add a new fte to any matching fg.
+ */
+
+ /* Check the ft version, for case that new flow group
+ * was added while the fgs weren't locked
+ */
+ if (atomic_read(&ft->node.version) != ft_version) {
+ rule = ERR_PTR(-EAGAIN);
+ goto out;
+ }
+
+ /* Check the fgs version. If version have changed it could be that an
+ * FTE with the same match value was added while the fgs weren't
+ * locked.
+ */
+ if (!(flow_act->flags & FLOW_ACT_NO_APPEND) &&
+ version != matched_fgs_get_version(match_head)) {
+ take_write = true;
+ goto search_again_locked;
+ }
+
+ list_for_each_entry(iter, match_head, list) {
+ g = iter->g;
+
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+
+ if (!g->node.active) {
+ up_write_ref_node(&g->node, false);
+ continue;
+ }
+
+ err = insert_fte(g, fte);
+ if (err) {
+ up_write_ref_node(&g->node, false);
+ if (err == -ENOSPC)
+ continue;
+ kmem_cache_free(steering->ftes_cache, fte);
+ return ERR_PTR(err);
+ }
+
+ nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
+ up_write_ref_node(&g->node, false);
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
+ up_write_ref_node(&fte->node, false);
+ if (IS_ERR(rule))
+ tree_put_node(&fte->node, false);
+ return rule;
+ }
+ rule = ERR_PTR(-ENOENT);
+out:
+ kmem_cache_free(steering->ftes_cache, fte);
+ return rule;
+}
+
+static struct mlx5_flow_handle *
+_mlx5_add_flow_rules(struct mlx5_flow_table *ft,
+ const struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num)
+
+{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_handle *rule;
+ struct match_list match_head;
+ struct mlx5_flow_group *g;
+ bool take_write = false;
+ struct fs_fte *fte;
+ int version;
+ int err;
+ int i;
+
+ if (!check_valid_spec(spec))
+ return ERR_PTR(-EINVAL);
+
+ if (flow_act->fg && ft->autogroup.active)
+ return ERR_PTR(-EINVAL);
+
+ if (dest && dest_num <= 0)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < dest_num; i++) {
+ if (!dest_is_valid(&dest[i], flow_act, ft))
+ return ERR_PTR(-EINVAL);
+ }
+ nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
+search_again_locked:
+ version = atomic_read(&ft->node.version);
+
+ /* Collect all fgs which has a matching match_criteria */
+ err = build_match_list(&match_head, ft, spec, flow_act->fg, take_write);
+ if (err) {
+ if (take_write)
+ up_write_ref_node(&ft->node, false);
+ else
+ up_read_ref_node(&ft->node);
+ return ERR_PTR(err);
+ }
+
+ if (!take_write)
+ up_read_ref_node(&ft->node);
+
+ rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
+ dest_num, version);
+ free_match_list(&match_head, take_write);
+ if (!IS_ERR(rule) ||
+ (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
+ if (take_write)
+ up_write_ref_node(&ft->node, false);
+ return rule;
+ }
+
+ if (!take_write) {
+ nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
+ take_write = true;
+ }
+
+ if (PTR_ERR(rule) == -EAGAIN ||
+ version != atomic_read(&ft->node.version))
+ goto search_again_locked;
+
+ g = alloc_auto_flow_group(ft, spec);
+ if (IS_ERR(g)) {
+ rule = ERR_CAST(g);
+ up_write_ref_node(&ft->node, false);
+ return rule;
+ }
+
+ fte = alloc_fte(ft, spec, flow_act);
+ if (IS_ERR(fte)) {
+ up_write_ref_node(&ft->node, false);
+ err = PTR_ERR(fte);
+ goto err_alloc_fte;
+ }
+
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+ up_write_ref_node(&ft->node, false);
+
+ err = create_auto_flow_group(ft, g);
+ if (err)
+ goto err_release_fg;
+
+ err = insert_fte(g, fte);
+ if (err)
+ goto err_release_fg;
+
+ nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
+ up_write_ref_node(&g->node, false);
+ rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
+ up_write_ref_node(&fte->node, false);
+ if (IS_ERR(rule))
+ tree_put_node(&fte->node, false);
+ tree_put_node(&g->node, false);
+ return rule;
+
+err_release_fg:
+ up_write_ref_node(&g->node, false);
+ kmem_cache_free(steering->ftes_cache, fte);
+err_alloc_fte:
+ tree_put_node(&g->node, false);
+ return ERR_PTR(err);
+}
+
+static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
+{
+ return ((ft->type == FS_FT_NIC_RX) &&
+ (MLX5_CAP_FLOWTABLE(get_dev(&ft->node), nic_rx_multi_path_tirs)));
+}
+
+struct mlx5_flow_handle *
+mlx5_add_flow_rules(struct mlx5_flow_table *ft,
+ const struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int num_dest)
+{
+ struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+ static const struct mlx5_flow_spec zero_spec = {};
+ struct mlx5_flow_destination *gen_dest = NULL;
+ struct mlx5_flow_table *next_ft = NULL;
+ struct mlx5_flow_handle *handle = NULL;
+ u32 sw_action = flow_act->action;
+ int i;
+
+ if (!spec)
+ spec = &zero_spec;
+
+ if (!is_fwd_next_action(sw_action))
+ return _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
+
+ if (!fwd_next_prio_supported(ft))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&root->chain_lock);
+ next_ft = find_next_fwd_ft(ft, flow_act);
+ if (!next_ft) {
+ handle = ERR_PTR(-EOPNOTSUPP);
+ goto unlock;
+ }
+
+ gen_dest = kcalloc(num_dest + 1, sizeof(*dest),
+ GFP_KERNEL);
+ if (!gen_dest) {
+ handle = ERR_PTR(-ENOMEM);
+ goto unlock;
+ }
+ for (i = 0; i < num_dest; i++)
+ gen_dest[i] = dest[i];
+ gen_dest[i].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ gen_dest[i].ft = next_ft;
+ dest = gen_dest;
+ num_dest++;
+ flow_act->action &= ~(MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO |
+ MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS);
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
+ if (IS_ERR(handle))
+ goto unlock;
+
+ if (list_empty(&handle->rule[num_dest - 1]->next_ft)) {
+ mutex_lock(&next_ft->lock);
+ list_add(&handle->rule[num_dest - 1]->next_ft,
+ &next_ft->fwd_rules);
+ mutex_unlock(&next_ft->lock);
+ handle->rule[num_dest - 1]->sw_action = sw_action;
+ handle->rule[num_dest - 1]->ft = ft;
+ }
+unlock:
+ mutex_unlock(&root->chain_lock);
+ kfree(gen_dest);
+ return handle;
+}
+EXPORT_SYMBOL(mlx5_add_flow_rules);
+
+void mlx5_del_flow_rules(struct mlx5_flow_handle **pp)
+{
+ struct mlx5_flow_handle *handle;
+ struct fs_fte *fte;
+ int i;
+
+ handle = *pp;
+ *pp = NULL;
+ if (IS_ERR_OR_NULL(handle))
+ return;
+
+ /* In order to consolidate the HW changes we lock the FTE for other
+ * changes, and increase its refcount, in order not to perform the
+ * "del" functions of the FTE. Will handle them here.
+ * The removal of the rules is done under locked FTE.
+ * After removing all the handle's rules, if there are remaining
+ * rules, it means we just need to modify the FTE in FW, and
+ * unlock/decrease the refcount we increased before.
+ * Otherwise, it means the FTE should be deleted. First delete the
+ * FTE in FW. Then, unlock the FTE, and proceed the tree_put_node of
+ * the FTE, which will handle the last decrease of the refcount, as
+ * well as required handling of its parent.
+ */
+ fs_get_obj(fte, handle->rule[0]->node.parent);
+ down_write_ref_node(&fte->node, false);
+ for (i = handle->num_rules - 1; i >= 0; i--)
+ tree_remove_node(&handle->rule[i]->node, true);
+ if (list_empty(&fte->node.children)) {
+ fte->node.del_hw_func(&fte->node);
+ /* Avoid double call to del_hw_fte */
+ fte->node.del_hw_func = NULL;
+ up_write_ref_node(&fte->node, false);
+ tree_put_node(&fte->node, false);
+ } else if (fte->dests_size) {
+ if (fte->modify_mask)
+ modify_fte(fte);
+ up_write_ref_node(&fte->node, false);
+ } else {
+ up_write_ref_node(&fte->node, false);
+ }
+ kfree(handle);
+}
+EXPORT_SYMBOL(mlx5_del_flow_rules);
+
+/* Assuming prio->node.children(flow tables) is sorted by level */
+static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
+{
+ struct fs_prio *prio;
+
+ fs_get_obj(prio, ft->node.parent);
+
+ if (!list_is_last(&ft->node.list, &prio->node.children))
+ return list_next_entry(ft, node.list);
+ return find_next_chained_ft(prio);
+}
+
+static int update_root_ft_destroy(struct mlx5_flow_table *ft)
+{
+ struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+ struct mlx5_ft_underlay_qp *uqp;
+ struct mlx5_flow_table *new_root_ft = NULL;
+ int err = 0;
+ u32 qpn;
+
+ if (root->root_ft != ft)
+ return 0;
+
+ new_root_ft = find_next_ft(ft);
+ if (!new_root_ft) {
+ root->root_ft = NULL;
+ return 0;
+ }
+
+ if (list_empty(&root->underlay_qpns)) {
+ /* Don't set any QPN (zero) in case QPN list is empty */
+ qpn = 0;
+ err = root->cmds->update_root_ft(root, new_root_ft,
+ qpn, false);
+ } else {
+ list_for_each_entry(uqp, &root->underlay_qpns, list) {
+ qpn = uqp->qpn;
+ err = root->cmds->update_root_ft(root,
+ new_root_ft, qpn,
+ false);
+ if (err)
+ break;
+ }
+ }
+
+ if (err)
+ mlx5_core_warn(root->dev,
+ "Update root flow table of id(%u) qpn(%d) failed\n",
+ ft->id, qpn);
+ else
+ root->root_ft = new_root_ft;
+
+ return 0;
+}
+
+/* Connect flow table from previous priority to
+ * the next flow table.
+ */
+static int disconnect_flow_table(struct mlx5_flow_table *ft)
+{
+ struct mlx5_core_dev *dev = get_dev(&ft->node);
+ struct mlx5_flow_table *next_ft;
+ struct fs_prio *prio;
+ int err = 0;
+
+ err = update_root_ft_destroy(ft);
+ if (err)
+ return err;
+
+ fs_get_obj(prio, ft->node.parent);
+ if (!(list_first_entry(&prio->node.children,
+ struct mlx5_flow_table,
+ node.list) == ft))
+ return 0;
+
+ next_ft = find_next_ft(ft);
+ err = connect_fwd_rules(dev, next_ft, ft);
+ if (err)
+ return err;
+
+ err = connect_prev_fts(dev, next_ft, prio);
+ if (err)
+ mlx5_core_warn(dev, "Failed to disconnect flow table %d\n",
+ ft->id);
+ return err;
+}
+
+int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
+{
+ struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+ int err = 0;
+
+ mutex_lock(&root->chain_lock);
+ if (!(ft->flags & MLX5_FLOW_TABLE_UNMANAGED))
+ err = disconnect_flow_table(ft);
+ if (err) {
+ mutex_unlock(&root->chain_lock);
+ return err;
+ }
+ if (tree_remove_node(&ft->node, false))
+ mlx5_core_warn(get_dev(&ft->node), "Flow table %d wasn't destroyed, refcount > 1\n",
+ ft->id);
+ mutex_unlock(&root->chain_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_destroy_flow_table);
+
+void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
+{
+ if (tree_remove_node(&fg->node, false))
+ mlx5_core_warn(get_dev(&fg->node), "Flow group %d wasn't destroyed, refcount > 1\n",
+ fg->id);
+}
+EXPORT_SYMBOL(mlx5_destroy_flow_group);
+
+struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
+ int n)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+ if (!steering || !steering->fdb_sub_ns)
+ return NULL;
+
+ return steering->fdb_sub_ns[n];
+}
+EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
+
+static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
+{
+ switch (type) {
+ case MLX5_FLOW_NAMESPACE_BYPASS:
+ case MLX5_FLOW_NAMESPACE_KERNEL_RX_MACSEC:
+ case MLX5_FLOW_NAMESPACE_LAG:
+ case MLX5_FLOW_NAMESPACE_OFFLOADS:
+ case MLX5_FLOW_NAMESPACE_ETHTOOL:
+ case MLX5_FLOW_NAMESPACE_KERNEL:
+ case MLX5_FLOW_NAMESPACE_LEFTOVERS:
+ case MLX5_FLOW_NAMESPACE_ANCHOR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ struct mlx5_flow_root_namespace *root_ns;
+ int prio = 0;
+ struct fs_prio *fs_prio;
+ struct mlx5_flow_namespace *ns;
+
+ if (!steering)
+ return NULL;
+
+ switch (type) {
+ case MLX5_FLOW_NAMESPACE_FDB:
+ if (steering->fdb_root_ns)
+ return &steering->fdb_root_ns->ns;
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ if (steering->port_sel_root_ns)
+ return &steering->port_sel_root_ns->ns;
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
+ if (steering->sniffer_rx_root_ns)
+ return &steering->sniffer_rx_root_ns->ns;
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
+ if (steering->sniffer_tx_root_ns)
+ return &steering->sniffer_tx_root_ns->ns;
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_FDB_BYPASS:
+ root_ns = steering->fdb_root_ns;
+ prio = FDB_BYPASS_PATH;
+ break;
+ case MLX5_FLOW_NAMESPACE_EGRESS:
+ case MLX5_FLOW_NAMESPACE_EGRESS_IPSEC:
+ case MLX5_FLOW_NAMESPACE_EGRESS_MACSEC:
+ root_ns = steering->egress_root_ns;
+ prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX:
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_BYPASS_PRIO;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL:
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_KERNEL_PRIO;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX:
+ root_ns = steering->rdma_tx_root_ns;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_COUNTERS_PRIO;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS:
+ root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_COUNTERS_PRIO;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX_IPSEC:
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_IPSEC_PRIO;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX_IPSEC:
+ root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_IPSEC_PRIO;
+ break;
+ default: /* Must be NIC RX */
+ WARN_ON(!is_nic_rx_ns(type));
+ root_ns = steering->root_ns;
+ prio = type;
+ break;
+ }
+
+ if (!root_ns)
+ return NULL;
+
+ fs_prio = find_prio(&root_ns->ns, prio);
+ if (!fs_prio)
+ return NULL;
+
+ ns = list_first_entry(&fs_prio->node.children,
+ typeof(*ns),
+ node.list);
+
+ return ns;
+}
+EXPORT_SYMBOL(mlx5_get_flow_namespace);
+
+struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type,
+ int vport)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+ if (!steering)
+ return NULL;
+
+ switch (type) {
+ case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+ if (vport >= steering->esw_egress_acl_vports)
+ return NULL;
+ if (steering->esw_egress_root_ns &&
+ steering->esw_egress_root_ns[vport])
+ return &steering->esw_egress_root_ns[vport]->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+ if (vport >= steering->esw_ingress_acl_vports)
+ return NULL;
+ if (steering->esw_ingress_root_ns &&
+ steering->esw_ingress_root_ns[vport])
+ return &steering->esw_ingress_root_ns[vport]->ns;
+ else
+ return NULL;
+ default:
+ return NULL;
+ }
+}
+
+static struct fs_prio *_fs_create_prio(struct mlx5_flow_namespace *ns,
+ unsigned int prio,
+ int num_levels,
+ enum fs_node_type type)
+{
+ struct fs_prio *fs_prio;
+
+ fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
+ if (!fs_prio)
+ return ERR_PTR(-ENOMEM);
+
+ fs_prio->node.type = type;
+ tree_init_node(&fs_prio->node, NULL, del_sw_prio);
+ tree_add_node(&fs_prio->node, &ns->node);
+ fs_prio->num_levels = num_levels;
+ fs_prio->prio = prio;
+ list_add_tail(&fs_prio->node.list, &ns->node.children);
+
+ return fs_prio;
+}
+
+static struct fs_prio *fs_create_prio_chained(struct mlx5_flow_namespace *ns,
+ unsigned int prio,
+ int num_levels)
+{
+ return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO_CHAINS);
+}
+
+static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
+ unsigned int prio, int num_levels)
+{
+ return _fs_create_prio(ns, prio, num_levels, FS_TYPE_PRIO);
+}
+
+static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
+ *ns)
+{
+ ns->node.type = FS_TYPE_NAMESPACE;
+
+ return ns;
+}
+
+static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
+ int def_miss_act)
+{
+ struct mlx5_flow_namespace *ns;
+
+ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+ if (!ns)
+ return ERR_PTR(-ENOMEM);
+
+ fs_init_namespace(ns);
+ ns->def_miss_action = def_miss_act;
+ tree_init_node(&ns->node, NULL, del_sw_ns);
+ tree_add_node(&ns->node, &prio->node);
+ list_add_tail(&ns->node.list, &prio->node.children);
+
+ return ns;
+}
+
+static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
+ struct init_tree_node *prio_metadata)
+{
+ struct fs_prio *fs_prio;
+ int i;
+
+ for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
+ fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
+ if (IS_ERR(fs_prio))
+ return PTR_ERR(fs_prio);
+ }
+ return 0;
+}
+
+#define FLOW_TABLE_BIT_SZ 1
+#define GET_FLOW_TABLE_CAP(dev, offset) \
+ ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \
+ offset / 32)) >> \
+ (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
+static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
+{
+ int i;
+
+ for (i = 0; i < caps->arr_sz; i++) {
+ if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
+ return false;
+ }
+ return true;
+}
+
+static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
+ struct init_tree_node *init_node,
+ struct fs_node *fs_parent_node,
+ struct init_tree_node *init_parent_node,
+ int prio)
+{
+ int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
+ flow_table_properties_nic_receive.
+ max_ft_level);
+ struct mlx5_flow_namespace *fs_ns;
+ struct fs_prio *fs_prio;
+ struct fs_node *base;
+ int i;
+ int err;
+
+ if (init_node->type == FS_TYPE_PRIO) {
+ if ((init_node->min_ft_level > max_ft_level) ||
+ !has_required_caps(steering->dev, &init_node->caps))
+ return 0;
+
+ fs_get_obj(fs_ns, fs_parent_node);
+ if (init_node->num_leaf_prios)
+ return create_leaf_prios(fs_ns, prio, init_node);
+ fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
+ if (IS_ERR(fs_prio))
+ return PTR_ERR(fs_prio);
+ base = &fs_prio->node;
+ } else if (init_node->type == FS_TYPE_NAMESPACE) {
+ fs_get_obj(fs_prio, fs_parent_node);
+ fs_ns = fs_create_namespace(fs_prio, init_node->def_miss_action);
+ if (IS_ERR(fs_ns))
+ return PTR_ERR(fs_ns);
+ base = &fs_ns->node;
+ } else {
+ return -EINVAL;
+ }
+ prio = 0;
+ for (i = 0; i < init_node->ar_size; i++) {
+ err = init_root_tree_recursive(steering, &init_node->children[i],
+ base, init_node, prio);
+ if (err)
+ return err;
+ if (init_node->children[i].type == FS_TYPE_PRIO &&
+ init_node->children[i].num_leaf_prios) {
+ prio += init_node->children[i].num_leaf_prios;
+ }
+ }
+
+ return 0;
+}
+
+static int init_root_tree(struct mlx5_flow_steering *steering,
+ struct init_tree_node *init_node,
+ struct fs_node *fs_parent_node)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < init_node->ar_size; i++) {
+ err = init_root_tree_recursive(steering, &init_node->children[i],
+ fs_parent_node,
+ init_node, i);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static void del_sw_root_ns(struct fs_node *node)
+{
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_flow_namespace *ns;
+
+ fs_get_obj(ns, node);
+ root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
+ mutex_destroy(&root_ns->chain_lock);
+ kfree(node);
+}
+
+static struct mlx5_flow_root_namespace
+*create_root_ns(struct mlx5_flow_steering *steering,
+ enum fs_flow_table_type table_type)
+{
+ const struct mlx5_flow_cmds *cmds = mlx5_fs_cmd_get_default(table_type);
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_flow_namespace *ns;
+
+ /* Create the root namespace */
+ root_ns = kzalloc(sizeof(*root_ns), GFP_KERNEL);
+ if (!root_ns)
+ return NULL;
+
+ root_ns->dev = steering->dev;
+ root_ns->table_type = table_type;
+ root_ns->cmds = cmds;
+
+ INIT_LIST_HEAD(&root_ns->underlay_qpns);
+
+ ns = &root_ns->ns;
+ fs_init_namespace(ns);
+ mutex_init(&root_ns->chain_lock);
+ tree_init_node(&ns->node, NULL, del_sw_root_ns);
+ tree_add_node(&ns->node, NULL);
+
+ return root_ns;
+}
+
+static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level);
+
+static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
+{
+ struct fs_prio *prio;
+
+ fs_for_each_prio(prio, ns) {
+ /* This updates prio start_level and num_levels */
+ set_prio_attrs_in_prio(prio, acc_level);
+ acc_level += prio->num_levels;
+ }
+ return acc_level;
+}
+
+static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
+{
+ struct mlx5_flow_namespace *ns;
+ int acc_level_ns = acc_level;
+
+ prio->start_level = acc_level;
+ fs_for_each_ns(ns, prio) {
+ /* This updates start_level and num_levels of ns's priority descendants */
+ acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
+
+ /* If this a prio with chains, and we can jump from one chain
+ * (namespace) to another, so we accumulate the levels
+ */
+ if (prio->node.type == FS_TYPE_PRIO_CHAINS)
+ acc_level = acc_level_ns;
+ }
+
+ if (!prio->num_levels)
+ prio->num_levels = acc_level_ns - prio->start_level;
+ WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
+}
+
+static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
+{
+ struct mlx5_flow_namespace *ns = &root_ns->ns;
+ struct fs_prio *prio;
+ int start_level = 0;
+
+ fs_for_each_prio(prio, ns) {
+ set_prio_attrs_in_prio(prio, start_level);
+ start_level += prio->num_levels;
+ }
+}
+
+#define ANCHOR_PRIO 0
+#define ANCHOR_SIZE 1
+#define ANCHOR_LEVEL 0
+static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_flow_namespace *ns = NULL;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_table *ft;
+
+ ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
+ if (WARN_ON(!ns))
+ return -EINVAL;
+
+ ft_attr.max_fte = ANCHOR_SIZE;
+ ft_attr.level = ANCHOR_LEVEL;
+ ft_attr.prio = ANCHOR_PRIO;
+
+ ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(ft)) {
+ mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
+ return PTR_ERR(ft);
+ }
+ return 0;
+}
+
+static int init_root_ns(struct mlx5_flow_steering *steering)
+{
+ int err;
+
+ steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
+ if (!steering->root_ns)
+ return -ENOMEM;
+
+ err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
+ if (err)
+ goto out_err;
+
+ set_prio_attrs(steering->root_ns);
+ err = create_anchor_flow_table(steering);
+ if (err)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ cleanup_root_ns(steering->root_ns);
+ steering->root_ns = NULL;
+ return err;
+}
+
+static void clean_tree(struct fs_node *node)
+{
+ if (node) {
+ struct fs_node *iter;
+ struct fs_node *temp;
+
+ tree_get_node(node);
+ list_for_each_entry_safe(iter, temp, &node->children, list)
+ clean_tree(iter);
+ tree_put_node(node, false);
+ tree_remove_node(node, false);
+ }
+}
+
+static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
+{
+ if (!root_ns)
+ return;
+
+ clean_tree(&root_ns->ns.node);
+}
+
+static int init_sniffer_tx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
+
+ steering->sniffer_tx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_TX);
+ if (!steering->sniffer_tx_root_ns)
+ return -ENOMEM;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->sniffer_tx_root_ns->ns, 0, 1);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
+static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
+
+ steering->sniffer_rx_root_ns = create_root_ns(steering, FS_FT_SNIFFER_RX);
+ if (!steering->sniffer_rx_root_ns)
+ return -ENOMEM;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->sniffer_rx_root_ns->ns, 0, 1);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
+#define PORT_SEL_NUM_LEVELS 3
+static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
+
+ steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
+ if (!steering->port_sel_root_ns)
+ return -ENOMEM;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
+ PORT_SEL_NUM_LEVELS);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
+static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
+{
+ int err;
+
+ steering->rdma_rx_root_ns = create_root_ns(steering, FS_FT_RDMA_RX);
+ if (!steering->rdma_rx_root_ns)
+ return -ENOMEM;
+
+ err = init_root_tree(steering, &rdma_rx_root_fs,
+ &steering->rdma_rx_root_ns->ns.node);
+ if (err)
+ goto out_err;
+
+ set_prio_attrs(steering->rdma_rx_root_ns);
+
+ return 0;
+
+out_err:
+ cleanup_root_ns(steering->rdma_rx_root_ns);
+ steering->rdma_rx_root_ns = NULL;
+ return err;
+}
+
+static int init_rdma_tx_root_ns(struct mlx5_flow_steering *steering)
+{
+ int err;
+
+ steering->rdma_tx_root_ns = create_root_ns(steering, FS_FT_RDMA_TX);
+ if (!steering->rdma_tx_root_ns)
+ return -ENOMEM;
+
+ err = init_root_tree(steering, &rdma_tx_root_fs,
+ &steering->rdma_tx_root_ns->ns.node);
+ if (err)
+ goto out_err;
+
+ set_prio_attrs(steering->rdma_tx_root_ns);
+
+ return 0;
+
+out_err:
+ cleanup_root_ns(steering->rdma_tx_root_ns);
+ steering->rdma_tx_root_ns = NULL;
+ return err;
+}
+
+/* FT and tc chains are stored in the same array so we can re-use the
+ * mlx5_get_fdb_sub_ns() and tc api for FT chains.
+ * When creating a new ns for each chain store it in the first available slot.
+ * Assume tc chains are created and stored first and only then the FT chain.
+ */
+static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
+ struct mlx5_flow_namespace *ns)
+{
+ int chain = 0;
+
+ while (steering->fdb_sub_ns[chain])
+ ++chain;
+
+ steering->fdb_sub_ns[chain] = ns;
+}
+
+static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
+ struct fs_prio *maj_prio)
+{
+ struct mlx5_flow_namespace *ns;
+ struct fs_prio *min_prio;
+ int prio;
+
+ ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
+
+ for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
+ min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
+ if (IS_ERR(min_prio))
+ return PTR_ERR(min_prio);
+ }
+
+ store_fdb_sub_ns_prio_chain(steering, ns);
+
+ return 0;
+}
+
+static int create_fdb_chains(struct mlx5_flow_steering *steering,
+ int fs_prio,
+ int chains)
+{
+ struct fs_prio *maj_prio;
+ int levels;
+ int chain;
+ int err;
+
+ levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
+ maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
+ fs_prio,
+ levels);
+ if (IS_ERR(maj_prio))
+ return PTR_ERR(maj_prio);
+
+ for (chain = 0; chain < chains; chain++) {
+ err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
+{
+ int err;
+
+ steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
+ sizeof(*steering->fdb_sub_ns),
+ GFP_KERNEL);
+ if (!steering->fdb_sub_ns)
+ return -ENOMEM;
+
+ err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
+ if (err)
+ return err;
+
+ err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int create_fdb_bypass(struct mlx5_flow_steering *steering)
+{
+ struct mlx5_flow_namespace *ns;
+ struct fs_prio *prio;
+ int i;
+
+ prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH, 0);
+ if (IS_ERR(prio))
+ return PTR_ERR(prio);
+
+ ns = fs_create_namespace(prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
+ if (IS_ERR(ns))
+ return PTR_ERR(ns);
+
+ for (i = 0; i < MLX5_BY_PASS_NUM_REGULAR_PRIOS; i++) {
+ prio = fs_create_prio(ns, i, 1);
+ if (IS_ERR(prio))
+ return PTR_ERR(prio);
+ }
+ return 0;
+}
+
+static void cleanup_fdb_root_ns(struct mlx5_flow_steering *steering)
+{
+ cleanup_root_ns(steering->fdb_root_ns);
+ steering->fdb_root_ns = NULL;
+ kfree(steering->fdb_sub_ns);
+ steering->fdb_sub_ns = NULL;
+}
+
+static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *maj_prio;
+ int err;
+
+ steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
+ if (!steering->fdb_root_ns)
+ return -ENOMEM;
+
+ err = create_fdb_bypass(steering);
+ if (err)
+ goto out_err;
+
+ err = create_fdb_fast_path(steering);
+ if (err)
+ goto out_err;
+
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_TC_MISS, 1);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BR_OFFLOAD, 4);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
+ /* We put this priority last, knowing that nothing will get here
+ * unless explicitly forwarded to. This is possible because the
+ * slow path tables have catch all rules and nothing gets passed
+ * those tables.
+ */
+ maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_PER_VPORT, 1);
+ if (IS_ERR(maj_prio)) {
+ err = PTR_ERR(maj_prio);
+ goto out_err;
+ }
+
+ set_prio_attrs(steering->fdb_root_ns);
+ return 0;
+
+out_err:
+ cleanup_fdb_root_ns(steering);
+ return err;
+}
+
+static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
+{
+ struct fs_prio *prio;
+
+ steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
+ if (!steering->esw_egress_root_ns[vport])
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
+static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
+{
+ struct fs_prio *prio;
+
+ steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
+ if (!steering->esw_ingress_root_ns[vport])
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
+int mlx5_fs_egress_acls_init(struct mlx5_core_dev *dev, int total_vports)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int err;
+ int i;
+
+ steering->esw_egress_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->esw_egress_root_ns),
+ GFP_KERNEL);
+ if (!steering->esw_egress_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vports; i++) {
+ err = init_egress_acl_root_ns(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+ steering->esw_egress_acl_vports = total_vports;
+ return 0;
+
+cleanup_root_ns:
+ for (i--; i >= 0; i--)
+ cleanup_root_ns(steering->esw_egress_root_ns[i]);
+ kfree(steering->esw_egress_root_ns);
+ steering->esw_egress_root_ns = NULL;
+ return err;
+}
+
+void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int i;
+
+ if (!steering->esw_egress_root_ns)
+ return;
+
+ for (i = 0; i < steering->esw_egress_acl_vports; i++)
+ cleanup_root_ns(steering->esw_egress_root_ns[i]);
+
+ kfree(steering->esw_egress_root_ns);
+ steering->esw_egress_root_ns = NULL;
+}
+
+int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int err;
+ int i;
+
+ steering->esw_ingress_root_ns =
+ kcalloc(total_vports,
+ sizeof(*steering->esw_ingress_root_ns),
+ GFP_KERNEL);
+ if (!steering->esw_ingress_root_ns)
+ return -ENOMEM;
+
+ for (i = 0; i < total_vports; i++) {
+ err = init_ingress_acl_root_ns(steering, i);
+ if (err)
+ goto cleanup_root_ns;
+ }
+ steering->esw_ingress_acl_vports = total_vports;
+ return 0;
+
+cleanup_root_ns:
+ for (i--; i >= 0; i--)
+ cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+ kfree(steering->esw_ingress_root_ns);
+ steering->esw_ingress_root_ns = NULL;
+ return err;
+}
+
+void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int i;
+
+ if (!steering->esw_ingress_root_ns)
+ return;
+
+ for (i = 0; i < steering->esw_ingress_acl_vports; i++)
+ cleanup_root_ns(steering->esw_ingress_root_ns[i]);
+
+ kfree(steering->esw_ingress_root_ns);
+ steering->esw_ingress_root_ns = NULL;
+}
+
+u32 mlx5_fs_get_capabilities(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type type)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_namespace *ns;
+
+ ns = mlx5_get_flow_namespace(dev, type);
+ if (!ns)
+ return 0;
+
+ root = find_root(&ns->node);
+ if (!root)
+ return 0;
+
+ return root->cmds->get_capabilities(root, root->table_type);
+}
+
+static int init_egress_root_ns(struct mlx5_flow_steering *steering)
+{
+ int err;
+
+ steering->egress_root_ns = create_root_ns(steering,
+ FS_FT_NIC_TX);
+ if (!steering->egress_root_ns)
+ return -ENOMEM;
+
+ err = init_root_tree(steering, &egress_root_fs,
+ &steering->egress_root_ns->ns.node);
+ if (err)
+ goto cleanup;
+ set_prio_attrs(steering->egress_root_ns);
+ return 0;
+cleanup:
+ cleanup_root_ns(steering->egress_root_ns);
+ steering->egress_root_ns = NULL;
+ return err;
+}
+
+void mlx5_fs_core_cleanup(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+ cleanup_root_ns(steering->root_ns);
+ cleanup_fdb_root_ns(steering);
+ cleanup_root_ns(steering->port_sel_root_ns);
+ cleanup_root_ns(steering->sniffer_rx_root_ns);
+ cleanup_root_ns(steering->sniffer_tx_root_ns);
+ cleanup_root_ns(steering->rdma_rx_root_ns);
+ cleanup_root_ns(steering->rdma_tx_root_ns);
+ cleanup_root_ns(steering->egress_root_ns);
+}
+
+int mlx5_fs_core_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ int err;
+
+ if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
+ (MLX5_CAP_GEN(dev, nic_flow_table))) ||
+ ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
+ MLX5_CAP_GEN(dev, ipoib_enhanced_offloads))) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
+ err = init_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_ESWITCH_MANAGER(dev)) {
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
+ err = init_fdb_root_ns(steering);
+ if (err)
+ goto err;
+ }
+ err = mlx5_fs_egress_acls_init(dev, MAX_VPORTS);
+ if (err)
+ goto err;
+ err = mlx5_fs_ingress_acls_init(dev, MAX_VPORTS);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE_SNIFFER_RX(dev, ft_support)) {
+ err = init_sniffer_rx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE_SNIFFER_TX(dev, ft_support)) {
+ err = init_sniffer_tx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
+ err = init_port_sel_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
+ MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
+ err = init_rdma_rx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE_RDMA_TX(dev, ft_support)) {
+ err = init_rdma_tx_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ if (MLX5_CAP_FLOWTABLE_NIC_TX(dev, ft_support)) {
+ err = init_egress_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ mlx5_fs_core_cleanup(dev);
+ return err;
+}
+
+void mlx5_fs_core_free(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+
+ kmem_cache_destroy(steering->ftes_cache);
+ kmem_cache_destroy(steering->fgs_cache);
+ kfree(steering);
+ mlx5_ft_pool_destroy(dev);
+ mlx5_cleanup_fc_stats(dev);
+}
+
+int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
+{
+ struct mlx5_flow_steering *steering;
+ int err = 0;
+
+ err = mlx5_init_fc_stats(dev);
+ if (err)
+ return err;
+
+ err = mlx5_ft_pool_init(dev);
+ if (err)
+ goto err;
+
+ steering = kzalloc(sizeof(*steering), GFP_KERNEL);
+ if (!steering) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ steering->dev = dev;
+ dev->priv.steering = steering;
+
+ steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
+
+ steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
+ sizeof(struct mlx5_flow_group), 0,
+ 0, NULL);
+ steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
+ 0, NULL);
+ if (!steering->ftes_cache || !steering->fgs_cache) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ mlx5_fs_core_free(dev);
+ return err;
+}
+
+int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
+{
+ struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
+ struct mlx5_ft_underlay_qp *new_uqp;
+ int err = 0;
+
+ new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
+ if (!new_uqp)
+ return -ENOMEM;
+
+ mutex_lock(&root->chain_lock);
+
+ if (!root->root_ft) {
+ err = -EINVAL;
+ goto update_ft_fail;
+ }
+
+ err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
+ false);
+ if (err) {
+ mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
+ underlay_qpn, err);
+ goto update_ft_fail;
+ }
+
+ new_uqp->qpn = underlay_qpn;
+ list_add_tail(&new_uqp->list, &root->underlay_qpns);
+
+ mutex_unlock(&root->chain_lock);
+
+ return 0;
+
+update_ft_fail:
+ mutex_unlock(&root->chain_lock);
+ kfree(new_uqp);
+ return err;
+}
+EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
+
+int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
+{
+ struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
+ struct mlx5_ft_underlay_qp *uqp;
+ bool found = false;
+ int err = 0;
+
+ mutex_lock(&root->chain_lock);
+ list_for_each_entry(uqp, &root->underlay_qpns, list) {
+ if (uqp->qpn == underlay_qpn) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
+ underlay_qpn);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = root->cmds->update_root_ft(root, root->root_ft, underlay_qpn,
+ true);
+ if (err)
+ mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
+ underlay_qpn, err);
+
+ list_del(&uqp->list);
+ mutex_unlock(&root->chain_lock);
+ kfree(uqp);
+
+ return 0;
+
+out:
+ mutex_unlock(&root->chain_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
+
+static struct mlx5_flow_root_namespace
+*get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5_flow_namespace *ns;
+
+ if (ns_type == MLX5_FLOW_NAMESPACE_ESW_EGRESS ||
+ ns_type == MLX5_FLOW_NAMESPACE_ESW_INGRESS)
+ ns = mlx5_get_flow_vport_acl_namespace(dev, ns_type, 0);
+ else
+ ns = mlx5_get_flow_namespace(dev, ns_type);
+ if (!ns)
+ return NULL;
+
+ return find_root(&ns->node);
+}
+
+struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type,
+ u8 num_actions,
+ void *modify_actions)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_modify_hdr *modify_hdr;
+ int err;
+
+ root = get_root_namespace(dev, ns_type);
+ if (!root)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
+ if (!modify_hdr)
+ return ERR_PTR(-ENOMEM);
+
+ modify_hdr->ns_type = ns_type;
+ err = root->cmds->modify_header_alloc(root, ns_type, num_actions,
+ modify_actions, modify_hdr);
+ if (err) {
+ kfree(modify_hdr);
+ return ERR_PTR(err);
+ }
+
+ return modify_hdr;
+}
+EXPORT_SYMBOL(mlx5_modify_header_alloc);
+
+void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
+ struct mlx5_modify_hdr *modify_hdr)
+{
+ struct mlx5_flow_root_namespace *root;
+
+ root = get_root_namespace(dev, modify_hdr->ns_type);
+ if (WARN_ON(!root))
+ return;
+ root->cmds->modify_header_dealloc(root, modify_hdr);
+ kfree(modify_hdr);
+}
+EXPORT_SYMBOL(mlx5_modify_header_dealloc);
+
+struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
+ struct mlx5_pkt_reformat_params *params,
+ enum mlx5_flow_namespace_type ns_type)
+{
+ struct mlx5_pkt_reformat *pkt_reformat;
+ struct mlx5_flow_root_namespace *root;
+ int err;
+
+ root = get_root_namespace(dev, ns_type);
+ if (!root)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
+ if (!pkt_reformat)
+ return ERR_PTR(-ENOMEM);
+
+ pkt_reformat->ns_type = ns_type;
+ pkt_reformat->reformat_type = params->type;
+ err = root->cmds->packet_reformat_alloc(root, params, ns_type,
+ pkt_reformat);
+ if (err) {
+ kfree(pkt_reformat);
+ return ERR_PTR(err);
+ }
+
+ return pkt_reformat;
+}
+EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
+
+void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_flow_root_namespace *root;
+
+ root = get_root_namespace(dev, pkt_reformat->ns_type);
+ if (WARN_ON(!root))
+ return;
+ root->cmds->packet_reformat_dealloc(root, pkt_reformat);
+ kfree(pkt_reformat);
+}
+EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
+
+int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
+ struct mlx5_flow_root_namespace *peer_ns)
+{
+ if (peer_ns && ns->mode != peer_ns->mode) {
+ mlx5_core_err(ns->dev,
+ "Can't peer namespace of different steering mode\n");
+ return -EINVAL;
+ }
+
+ return ns->cmds->set_peer(ns, peer_ns);
+}
+
+/* This function should be called only at init stage of the namespace.
+ * It is not safe to call this function while steering operations
+ * are executed in the namespace.
+ */
+int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
+ enum mlx5_flow_steering_mode mode)
+{
+ struct mlx5_flow_root_namespace *root;
+ const struct mlx5_flow_cmds *cmds;
+ int err;
+
+ root = find_root(&ns->node);
+ if (&root->ns != ns)
+ /* Can't set cmds to non root namespace */
+ return -EINVAL;
+
+ if (root->table_type != FS_FT_FDB)
+ return -EOPNOTSUPP;
+
+ if (root->mode == mode)
+ return 0;
+
+ cmds = mlx5_fs_cmd_get_fw_cmds();
+ if (!cmds)
+ return -EOPNOTSUPP;
+
+ err = cmds->create_ns(root);
+ if (err) {
+ mlx5_core_err(root->dev, "Failed to create flow namespace (%d)\n",
+ err);
+ return err;
+ }
+
+ root->cmds->destroy_ns(root);
+ root->cmds = cmds;
+ root->mode = mode;
+
+ return 0;
+}
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c b/sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c
index 7214c5256388..f8c7b3adc2c0 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_fs_counters.c
@@ -31,7 +31,7 @@
#include <linux/rbtree.h>
#include <dev/mlx5/mlx5_core/mlx5_core.h>
#include <dev/mlx5/mlx5_core/fs_core.h>
-#include <dev/mlx5/mlx5_core/mlx5_fc_cmd.h>
+#include <dev/mlx5/mlx5_core/fs_cmd.h>
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
#define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000)
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fs_ft_pool.c b/sys/dev/mlx5/mlx5_core/mlx5_fs_ft_pool.c
new file mode 100644
index 000000000000..70d9d235b629
--- /dev/null
+++ b/sys/dev/mlx5/mlx5_core/mlx5_fs_ft_pool.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021 Mellanox Technologies. */
+
+#include "fs_ft_pool.h"
+
+/* Firmware currently has 4 pool of 4 sizes that it supports (FT_POOLS),
+ * and a virtual memory region of 16M (MLX5_FT_SIZE), this region is duplicated
+ * for each flow table pool. We can allocate up to 16M of each pool,
+ * and we keep track of how much we used via mlx5_ft_pool_get_avail_sz.
+ * Firmware doesn't report any of this for now.
+ * ESW_POOL is expected to be sorted from large to small and match firmware
+ * pools.
+ */
+#define FT_SIZE (16 * 1024 * 1024)
+static const unsigned int FT_POOLS[] = { 4 * 1024 * 1024,
+ 1 * 1024 * 1024,
+ 64 * 1024,
+ 128,
+ 1 /* size for termination tables */ };
+struct mlx5_ft_pool {
+ int ft_left[ARRAY_SIZE(FT_POOLS)];
+};
+
+int mlx5_ft_pool_init(struct mlx5_core_dev *dev)
+{
+ struct mlx5_ft_pool *ft_pool;
+ int i;
+
+ ft_pool = kzalloc(sizeof(*ft_pool), GFP_KERNEL);
+ if (!ft_pool)
+ return -ENOMEM;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--)
+ ft_pool->ft_left[i] = FT_SIZE / FT_POOLS[i];
+
+ dev->priv.ft_pool = ft_pool;
+ return 0;
+}
+
+void mlx5_ft_pool_destroy(struct mlx5_core_dev *dev)
+{
+ kfree(dev->priv.ft_pool);
+}
+
+int
+mlx5_ft_pool_get_avail_sz(struct mlx5_core_dev *dev, enum fs_flow_table_type table_type,
+ int desired_size)
+{
+ u32 max_ft_size = 1 << MLX5_CAP_FLOWTABLE_TYPE(dev, log_max_ft_size, table_type);
+ int i, found_i = -1;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
+ if (dev->priv.ft_pool->ft_left[i] &&
+ (FT_POOLS[i] >= desired_size || desired_size == POOL_NEXT_SIZE) &&
+ FT_POOLS[i] <= max_ft_size) {
+ found_i = i;
+ if (desired_size != POOL_NEXT_SIZE)
+ break;
+ }
+ }
+
+ if (found_i != -1) {
+ --dev->priv.ft_pool->ft_left[found_i];
+ return FT_POOLS[found_i];
+ }
+
+ return 0;
+}
+
+void
+mlx5_ft_pool_put_sz(struct mlx5_core_dev *dev, int sz)
+{
+ int i;
+
+ if (!sz)
+ return;
+
+ for (i = ARRAY_SIZE(FT_POOLS) - 1; i >= 0; i--) {
+ if (sz == FT_POOLS[i]) {
+ ++dev->priv.ft_pool->ft_left[i];
+ return;
+ }
+ }
+
+ WARN_ONCE(1, "Couldn't find size %d in flow table size pool", sz);
+}
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fs_tcp.c b/sys/dev/mlx5/mlx5_core/mlx5_fs_tcp.c
index d7d63d7932a1..f69c36aa72de 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_fs_tcp.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_fs_tcp.c
@@ -81,12 +81,12 @@ accel_fs_tcp_set_ipv6_flow(struct mlx5_flow_spec *spec, struct inpcb *inp)
#endif
void
-mlx5e_accel_fs_del_inpcb(struct mlx5_flow_rule *rule)
+mlx5e_accel_fs_del_inpcb(struct mlx5_flow_handle *rule)
{
- mlx5_del_flow_rule(&rule);
+ mlx5_del_flow_rules(&rule);
}
-struct mlx5_flow_rule *
+struct mlx5_flow_handle *
mlx5e_accel_fs_add_inpcb(struct mlx5e_priv *priv,
struct inpcb *inp, uint32_t tirn, uint32_t flow_tag,
uint16_t vlan_id)
@@ -96,18 +96,17 @@ mlx5e_accel_fs_add_inpcb(struct mlx5e_priv *priv,
#if defined(INET) || defined(INET6)
struct mlx5e_accel_fs_tcp *fs_tcp = &priv->fts.accel_tcp;
#endif
- struct mlx5_flow_rule *flow;
+ struct mlx5_flow_handle *flow;
struct mlx5_flow_spec *spec;
- struct mlx5_flow_act flow_act = {
- .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
- .flow_tag = flow_tag,
- };
+ struct mlx5_flow_act flow_act = {};
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return (ERR_PTR(-ENOMEM));
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
+ spec->flow_context.flow_tag = flow_tag;
INP_RLOCK(inp);
/* Set VLAN ID to match, if any. */
@@ -160,13 +159,9 @@ mlx5e_accel_fs_add_inpcb(struct mlx5e_priv *priv,
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = tirn;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- flow = mlx5_add_flow_rule(ft->t, spec->match_criteria_enable,
- spec->match_criteria,
- spec->match_value,
- MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act,
- &dest);
+ flow = mlx5_add_flow_rules(ft->t, spec, &flow_act, &dest, 1);
out:
kvfree(spec);
return (flow);
@@ -175,18 +170,18 @@ out:
static int
accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv, int type)
{
- static u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
- static u32 match_value[MLX5_ST_SZ_DW(fte_match_param)];
+ static struct mlx5_flow_spec spec = {};
struct mlx5_flow_destination dest = {};
struct mlx5e_accel_fs_tcp *fs_tcp;
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_handle *rule;
struct mlx5_flow_act flow_act = {
- .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
- .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
};
fs_tcp = &priv->fts.accel_tcp;
+ spec.flow_context.flags = FLOW_CONTEXT_HAS_TAG;
+ spec.flow_context.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
/*
@@ -197,10 +192,11 @@ accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv, int type)
* of flow tables.
*/
dest.ft = (type == MLX5E_ACCEL_FS_TCP_NUM_TYPES - 1) ?
- priv->fts.vlan.t : fs_tcp->tables[type + 1].t;
+ ((priv->fts.ipsec_ft) ? priv->fts.ipsec_ft : priv->fts.vlan.t) :
+ fs_tcp->tables[type + 1].t;
- rule = mlx5_add_flow_rule(fs_tcp->tables[type].t, 0, match_criteria, match_value,
- MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
+ rule = mlx5_add_flow_rules(fs_tcp->tables[type].t, &spec, &flow_act,
+ &dest, 1);
if (IS_ERR(rule))
return (PTR_ERR(rule));
@@ -317,11 +313,13 @@ static int
accel_fs_tcp_create_table(struct mlx5e_priv *priv, int type)
{
struct mlx5e_flow_table *ft = &priv->fts.accel_tcp.tables[type];
+ struct mlx5_flow_table_attr ft_attr = {};
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fts.accel_tcp.ns, 0, "tcp",
- MLX5E_ACCEL_FS_TCP_TABLE_SIZE);
+ ft_attr.max_fte = MLX5E_ACCEL_FS_TCP_TABLE_SIZE;
+ ft_attr.level = type;
+ ft->t = mlx5_create_flow_table(priv->fts.accel_tcp.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
@@ -365,7 +363,7 @@ mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
return;
for (i = 0; i < MLX5E_ACCEL_FS_TCP_NUM_TYPES; i++) {
- mlx5_del_flow_rule(&priv->fts.accel_tcp.default_rules[i]);
+ mlx5_del_flow_rules(&priv->fts.accel_tcp.default_rules[i]);
accel_fs_tcp_destroy_table(priv, i);
}
}
@@ -402,7 +400,7 @@ mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
err_destroy_rules:
while (i--)
- mlx5_del_flow_rule(&priv->fts.accel_tcp.default_rules[i]);
+ mlx5_del_flow_rules(&priv->fts.accel_tcp.default_rules[i]);
i = MLX5E_ACCEL_FS_TCP_NUM_TYPES;
err_destroy_tables:
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fs_tree.c b/sys/dev/mlx5/mlx5_core/mlx5_fs_tree.c
deleted file mode 100644
index b76ea7b60582..000000000000
--- a/sys/dev/mlx5/mlx5_core/mlx5_fs_tree.c
+++ /dev/null
@@ -1,2874 +0,0 @@
-/*-
- * Copyright (c) 2013-2021, Mellanox Technologies, Ltd. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include "opt_rss.h"
-#include "opt_ratelimit.h"
-
-#include <linux/module.h>
-#include <dev/mlx5/driver.h>
-#include <dev/mlx5/mlx5_core/mlx5_core.h>
-#include <dev/mlx5/mlx5_core/fs_core.h>
-#include <linux/string.h>
-#include <linux/compiler.h>
-
-#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
- sizeof(struct init_tree_node))
-
-#define ADD_PRIO(name_val, flags_val, min_level_val, max_ft_val, caps_val, \
- ...) {.type = FS_TYPE_PRIO,\
- .name = name_val,\
- .min_ft_level = min_level_val,\
- .flags = flags_val,\
- .max_ft = max_ft_val,\
- .caps = caps_val,\
- .children = (struct init_tree_node[]) {__VA_ARGS__},\
- .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
-}
-
-#define ADD_FT_PRIO(name_val, flags_val, max_ft_val, ...)\
- ADD_PRIO(name_val, flags_val, 0, max_ft_val, {},\
- __VA_ARGS__)\
-
-#define ADD_NS(name_val, ...) {.type = FS_TYPE_NAMESPACE,\
- .name = name_val,\
- .children = (struct init_tree_node[]) {__VA_ARGS__},\
- .ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
-}
-
-#define INIT_CAPS_ARRAY_SIZE(...) (sizeof((long[]){__VA_ARGS__}) /\
- sizeof(long))
-
-#define FS_CAP(cap) (__mlx5_bit_off(flow_table_nic_cap, cap))
-
-#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
- .caps = (long[]) {__VA_ARGS__}}
-
-/* Flowtable sizes: */
-#define BYPASS_MAX_FT 5
-#define BYPASS_PRIO_MAX_FT 1
-#define OFFLOADS_MAX_FT 2
-#define KERNEL_MAX_FT 5
-#define LEFTOVER_MAX_FT 1
-
-/* Flowtable levels: */
-#define OFFLOADS_MIN_LEVEL 3
-#define KERNEL_MIN_LEVEL (OFFLOADS_MIN_LEVEL + 1)
-#define LEFTOVER_MIN_LEVEL (KERNEL_MIN_LEVEL + 1)
-#define BYPASS_MIN_LEVEL (MLX5_NUM_BYPASS_FTS + LEFTOVER_MIN_LEVEL)
-
-struct node_caps {
- size_t arr_sz;
- long *caps;
-};
-
-struct init_tree_node {
- enum fs_type type;
- const char *name;
- struct init_tree_node *children;
- int ar_size;
- struct node_caps caps;
- u8 flags;
- int min_ft_level;
- int prio;
- int max_ft;
-} root_fs = {
- .type = FS_TYPE_NAMESPACE,
- .name = "root",
- .ar_size = 4,
- .children = (struct init_tree_node[]) {
- ADD_PRIO("by_pass_prio", 0, BYPASS_MIN_LEVEL, 0,
- FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
- FS_CAP(flow_table_properties_nic_receive.modify_root)),
- ADD_NS("by_pass_ns",
- ADD_FT_PRIO("prio0", 0,
- BYPASS_PRIO_MAX_FT),
- ADD_FT_PRIO("prio1", 0,
- BYPASS_PRIO_MAX_FT),
- ADD_FT_PRIO("prio2", 0,
- BYPASS_PRIO_MAX_FT),
- ADD_FT_PRIO("prio3", 0,
- BYPASS_PRIO_MAX_FT),
- ADD_FT_PRIO("prio4", 0,
- BYPASS_PRIO_MAX_FT),
- ADD_FT_PRIO("prio5", 0,
- BYPASS_PRIO_MAX_FT),
- ADD_FT_PRIO("prio6", 0,
- BYPASS_PRIO_MAX_FT),
- ADD_FT_PRIO("prio7", 0,
- BYPASS_PRIO_MAX_FT),
- ADD_FT_PRIO("prio-mcast", 0,
- BYPASS_PRIO_MAX_FT))),
- ADD_PRIO("offloads_prio", 0, OFFLOADS_MIN_LEVEL, 0, {},
- ADD_NS("offloads_ns",
- ADD_FT_PRIO("prio_offloads-0", 0,
- OFFLOADS_MAX_FT))),
- ADD_PRIO("kernel_prio", 0, KERNEL_MIN_LEVEL, 0, {},
- ADD_NS("kernel_ns",
- ADD_FT_PRIO("prio_kernel-0", 0,
- KERNEL_MAX_FT))),
- ADD_PRIO("leftovers_prio", MLX5_CORE_FS_PRIO_SHARED,
- LEFTOVER_MIN_LEVEL, 0,
- FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
- FS_CAP(flow_table_properties_nic_receive.modify_root)),
- ADD_NS("leftover_ns",
- ADD_FT_PRIO("leftovers_prio-0",
- MLX5_CORE_FS_PRIO_SHARED,
- LEFTOVER_MAX_FT)))
- }
-};
-
-/* Tree creation functions */
-
-static struct mlx5_flow_root_namespace *find_root(struct fs_base *node)
-{
- struct fs_base *parent;
-
- /* Make sure we only read it once while we go up the tree */
- while ((parent = node->parent))
- node = parent;
-
- if (node->type != FS_TYPE_NAMESPACE) {
- return NULL;
- }
-
- return container_of(container_of(node,
- struct mlx5_flow_namespace,
- base),
- struct mlx5_flow_root_namespace,
- ns);
-}
-
-static inline struct mlx5_core_dev *fs_get_dev(struct fs_base *node)
-{
- struct mlx5_flow_root_namespace *root = find_root(node);
-
- if (root)
- return root->dev;
- return NULL;
-}
-
-static void fs_init_node(struct fs_base *node,
- unsigned int refcount)
-{
- kref_init(&node->refcount);
- atomic_set(&node->users_refcount, refcount);
- init_completion(&node->complete);
- INIT_LIST_HEAD(&node->list);
- mutex_init(&node->lock);
-}
-
-static void _fs_add_node(struct fs_base *node,
- const char *name,
- struct fs_base *parent)
-{
- if (parent)
- atomic_inc(&parent->users_refcount);
- node->name = kstrdup_const(name, GFP_KERNEL);
- node->parent = parent;
-}
-
-static void fs_add_node(struct fs_base *node,
- struct fs_base *parent, const char *name,
- unsigned int refcount)
-{
- fs_init_node(node, refcount);
- _fs_add_node(node, name, parent);
-}
-
-static void _fs_put(struct fs_base *node, void (*kref_cb)(struct kref *kref),
- bool parent_locked);
-
-static void fs_del_dst(struct mlx5_flow_rule *dst);
-static void _fs_del_ft(struct mlx5_flow_table *ft);
-static void fs_del_fg(struct mlx5_flow_group *fg);
-static void fs_del_fte(struct fs_fte *fte);
-
-static void cmd_remove_node(struct fs_base *base)
-{
- switch (base->type) {
- case FS_TYPE_FLOW_DEST:
- fs_del_dst(container_of(base, struct mlx5_flow_rule, base));
- break;
- case FS_TYPE_FLOW_TABLE:
- _fs_del_ft(container_of(base, struct mlx5_flow_table, base));
- break;
- case FS_TYPE_FLOW_GROUP:
- fs_del_fg(container_of(base, struct mlx5_flow_group, base));
- break;
- case FS_TYPE_FLOW_ENTRY:
- fs_del_fte(container_of(base, struct fs_fte, base));
- break;
- default:
- break;
- }
-}
-
-static void __fs_remove_node(struct kref *kref)
-{
- struct fs_base *node = container_of(kref, struct fs_base, refcount);
-
- if (node->parent) {
- if (node->type == FS_TYPE_FLOW_DEST)
- mutex_lock(&node->parent->parent->lock);
- mutex_lock(&node->parent->lock);
- }
- mutex_lock(&node->lock);
- cmd_remove_node(node);
- mutex_unlock(&node->lock);
- complete(&node->complete);
- if (node->parent) {
- mutex_unlock(&node->parent->lock);
- if (node->type == FS_TYPE_FLOW_DEST)
- mutex_unlock(&node->parent->parent->lock);
- _fs_put(node->parent, _fs_remove_node, false);
- }
-}
-
-void _fs_remove_node(struct kref *kref)
-{
- struct fs_base *node = container_of(kref, struct fs_base, refcount);
-
- __fs_remove_node(kref);
- kfree_const(node->name);
- kfree(node);
-}
-
-static void fs_get(struct fs_base *node)
-{
- atomic_inc(&node->users_refcount);
-}
-
-static void _fs_put(struct fs_base *node, void (*kref_cb)(struct kref *kref),
- bool parent_locked)
-{
- struct fs_base *parent_node = node->parent;
-
- if (parent_node && !parent_locked)
- mutex_lock(&parent_node->lock);
- if (atomic_dec_and_test(&node->users_refcount)) {
- if (parent_node) {
- /*remove from parent's list*/
- list_del_init(&node->list);
- mutex_unlock(&parent_node->lock);
- }
- kref_put(&node->refcount, kref_cb);
- if (parent_node && parent_locked)
- mutex_lock(&parent_node->lock);
- } else if (parent_node && !parent_locked) {
- mutex_unlock(&parent_node->lock);
- }
-}
-
-static void fs_put(struct fs_base *node)
-{
- _fs_put(node, __fs_remove_node, false);
-}
-
-static void fs_put_parent_locked(struct fs_base *node)
-{
- _fs_put(node, __fs_remove_node, true);
-}
-
-static void fs_remove_node(struct fs_base *node)
-{
- fs_put(node);
- wait_for_completion(&node->complete);
- kfree_const(node->name);
- kfree(node);
-}
-
-static void fs_remove_node_parent_locked(struct fs_base *node)
-{
- fs_put_parent_locked(node);
- wait_for_completion(&node->complete);
- kfree_const(node->name);
- kfree(node);
-}
-
-static struct fs_fte *fs_alloc_fte(u32 sw_action,
- struct mlx5_flow_act *flow_act,
- u32 *match_value,
- unsigned int index)
-{
- struct fs_fte *fte;
-
-
- fte = kzalloc(sizeof(*fte), GFP_KERNEL);
- if (!fte)
- return ERR_PTR(-ENOMEM);
-
- memcpy(fte->val, match_value, sizeof(fte->val));
- fte->base.type = FS_TYPE_FLOW_ENTRY;
- fte->dests_size = 0;
- fte->index = index;
- INIT_LIST_HEAD(&fte->dests);
- fte->flow_act = *flow_act;
- fte->sw_action = sw_action;
-
- return fte;
-}
-
-static struct fs_fte *alloc_star_ft_entry(struct mlx5_flow_table *ft,
- struct mlx5_flow_group *fg,
- u32 *match_value,
- unsigned int index)
-{
- int err;
- struct fs_fte *fte;
- struct mlx5_flow_rule *dst;
- struct mlx5_flow_act flow_act = {
- .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
- .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
- };
-
- if (fg->num_ftes == fg->max_ftes)
- return ERR_PTR(-ENOSPC);
-
- fte = fs_alloc_fte(MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act, match_value, index);
- if (IS_ERR(fte))
- return fte;
-
- /*create dst*/
- dst = kzalloc(sizeof(*dst), GFP_KERNEL);
- if (!dst) {
- err = -ENOMEM;
- goto free_fte;
- }
-
- fte->base.parent = &fg->base;
- fte->dests_size = 1;
- dst->dest_attr.type = MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE;
- dst->base.parent = &fte->base;
- list_add(&dst->base.list, &fte->dests);
- /* assumed that the callee creates the star rules sorted by index */
- list_add_tail(&fte->base.list, &fg->ftes);
- fg->num_ftes++;
-
- return fte;
-
-free_fte:
- kfree(fte);
- return ERR_PTR(err);
-}
-
-/* assume that fte can't be changed */
-static void free_star_fte_entry(struct fs_fte *fte)
-{
- struct mlx5_flow_group *fg;
- struct mlx5_flow_rule *dst, *temp;
-
- fs_get_parent(fg, fte);
-
- list_for_each_entry_safe(dst, temp, &fte->dests, base.list) {
- fte->dests_size--;
- list_del(&dst->base.list);
- kfree(dst);
- }
-
- list_del(&fte->base.list);
- fg->num_ftes--;
- kfree(fte);
-}
-
-static struct mlx5_flow_group *fs_alloc_fg(u32 *create_fg_in)
-{
- struct mlx5_flow_group *fg;
- void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
- create_fg_in, match_criteria);
- u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
- create_fg_in,
- match_criteria_enable);
- fg = kzalloc(sizeof(*fg), GFP_KERNEL);
- if (!fg)
- return ERR_PTR(-ENOMEM);
-
- INIT_LIST_HEAD(&fg->ftes);
- fg->mask.match_criteria_enable = match_criteria_enable;
- memcpy(&fg->mask.match_criteria, match_criteria,
- sizeof(fg->mask.match_criteria));
- fg->base.type = FS_TYPE_FLOW_GROUP;
- fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
- start_flow_index);
- fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
- end_flow_index) - fg->start_index + 1;
- return fg;
-}
-
-static struct mlx5_flow_table *find_next_ft(struct fs_prio *prio);
-static struct mlx5_flow_table *find_prev_ft(struct mlx5_flow_table *curr,
- struct fs_prio *prio);
-
-/* assumed src_ft and dst_ft can't be freed */
-static int fs_set_star_rule(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *src_ft,
- struct mlx5_flow_table *dst_ft)
-{
- struct mlx5_flow_rule *src_dst;
- struct fs_fte *src_fte;
- int err = 0;
- u32 *match_value;
- int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
-
- src_dst = list_first_entry(&src_ft->star_rule.fte->dests,
- struct mlx5_flow_rule, base.list);
- match_value = mlx5_vzalloc(match_len);
- if (!match_value) {
- mlx5_core_warn(dev, "failed to allocate inbox\n");
- return -ENOMEM;
- }
- /*Create match context*/
-
- fs_get_parent(src_fte, src_dst);
-
- src_dst->dest_attr.ft = dst_ft;
- if (dst_ft) {
- err = mlx5_cmd_fs_set_fte(dev,
- src_ft->vport,
- &src_fte->status,
- match_value, src_ft->type,
- src_ft->id, src_fte->index,
- src_ft->star_rule.fg->id,
- &src_fte->flow_act,
- src_fte->sw_action,
- src_fte->dests_size,
- &src_fte->dests);
- if (err)
- goto free;
-
- fs_get(&dst_ft->base);
- } else {
- mlx5_cmd_fs_delete_fte(dev,
- src_ft->vport,
- &src_fte->status,
- src_ft->type, src_ft->id,
- src_fte->index);
- }
-
-free:
- kvfree(match_value);
- return err;
-}
-
-static int connect_prev_fts(struct fs_prio *locked_prio,
- struct fs_prio *prev_prio,
- struct mlx5_flow_table *next_ft)
-{
- struct mlx5_flow_table *iter;
- int err = 0;
- struct mlx5_core_dev *dev = fs_get_dev(&prev_prio->base);
-
- if (!dev)
- return -ENODEV;
-
- mutex_lock(&prev_prio->base.lock);
- fs_for_each_ft(iter, prev_prio) {
- struct mlx5_flow_rule *src_dst =
- list_first_entry(&iter->star_rule.fte->dests,
- struct mlx5_flow_rule, base.list);
- struct mlx5_flow_table *prev_ft = src_dst->dest_attr.ft;
-
- if (prev_ft == next_ft)
- continue;
-
- err = fs_set_star_rule(dev, iter, next_ft);
- if (err) {
- mlx5_core_warn(dev,
- "mlx5: flow steering can't connect prev and next\n");
- goto unlock;
- } else {
- /* Assume ft's prio is locked */
- if (prev_ft) {
- struct fs_prio *prio;
-
- fs_get_parent(prio, prev_ft);
- if (prio == locked_prio)
- fs_put_parent_locked(&prev_ft->base);
- else
- fs_put(&prev_ft->base);
- }
- }
- }
-
-unlock:
- mutex_unlock(&prev_prio->base.lock);
- return 0;
-}
-
-static int create_star_rule(struct mlx5_flow_table *ft, struct fs_prio *prio)
-{
- struct mlx5_flow_group *fg;
- int err;
- u32 *fg_in;
- u32 *match_value;
- struct mlx5_flow_table *next_ft;
- struct mlx5_flow_table *prev_ft;
- struct mlx5_flow_root_namespace *root = find_root(&prio->base);
- int fg_inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
-
- fg_in = mlx5_vzalloc(fg_inlen);
- if (!fg_in) {
- mlx5_core_warn(root->dev, "failed to allocate inbox\n");
- return -ENOMEM;
- }
-
- match_value = mlx5_vzalloc(match_len);
- if (!match_value) {
- mlx5_core_warn(root->dev, "failed to allocate inbox\n");
- kvfree(fg_in);
- return -ENOMEM;
- }
-
- MLX5_SET(create_flow_group_in, fg_in, start_flow_index, ft->max_fte);
- MLX5_SET(create_flow_group_in, fg_in, end_flow_index, ft->max_fte);
- fg = fs_alloc_fg(fg_in);
- if (IS_ERR(fg)) {
- err = PTR_ERR(fg);
- goto out;
- }
- ft->star_rule.fg = fg;
- err = mlx5_cmd_fs_create_fg(fs_get_dev(&prio->base),
- fg_in, ft->vport, ft->type,
- ft->id,
- &fg->id);
- if (err)
- goto free_fg;
-
- ft->star_rule.fte = alloc_star_ft_entry(ft, fg,
- match_value,
- ft->max_fte);
- if (IS_ERR(ft->star_rule.fte))
- goto free_star_rule;
-
- mutex_lock(&root->fs_chain_lock);
- next_ft = find_next_ft(prio);
- err = fs_set_star_rule(root->dev, ft, next_ft);
- if (err) {
- mutex_unlock(&root->fs_chain_lock);
- goto free_star_rule;
- }
- if (next_ft) {
- struct fs_prio *parent;
-
- fs_get_parent(parent, next_ft);
- fs_put(&next_ft->base);
- }
- prev_ft = find_prev_ft(ft, prio);
- if (prev_ft) {
- struct fs_prio *prev_parent;
-
- fs_get_parent(prev_parent, prev_ft);
-
- err = connect_prev_fts(NULL, prev_parent, ft);
- if (err) {
- mutex_unlock(&root->fs_chain_lock);
- goto destroy_chained_star_rule;
- }
- fs_put(&prev_ft->base);
- }
- mutex_unlock(&root->fs_chain_lock);
- kvfree(fg_in);
- kvfree(match_value);
-
- return 0;
-
-destroy_chained_star_rule:
- fs_set_star_rule(fs_get_dev(&prio->base), ft, NULL);
- if (next_ft)
- fs_put(&next_ft->base);
-free_star_rule:
- free_star_fte_entry(ft->star_rule.fte);
- mlx5_cmd_fs_destroy_fg(fs_get_dev(&ft->base), ft->vport,
- ft->type, ft->id,
- fg->id);
-free_fg:
- kfree(fg);
-out:
- kvfree(fg_in);
- kvfree(match_value);
- return err;
-}
-
-static void destroy_star_rule(struct mlx5_flow_table *ft, struct fs_prio *prio)
-{
- int err;
- struct mlx5_flow_root_namespace *root;
- struct mlx5_core_dev *dev = fs_get_dev(&prio->base);
- struct mlx5_flow_table *prev_ft, *next_ft;
- struct fs_prio *prev_prio;
-
- WARN_ON(!dev);
-
- root = find_root(&prio->base);
- if (!root)
- mlx5_core_err(dev,
- "flow steering failed to find root of priority %s",
- prio->base.name);
-
- /* In order to ensure atomic deletion, first update
- * prev ft to point on the next ft.
- */
- mutex_lock(&root->fs_chain_lock);
- prev_ft = find_prev_ft(ft, prio);
- next_ft = find_next_ft(prio);
- if (prev_ft) {
- fs_get_parent(prev_prio, prev_ft);
- /*Prev is connected to ft, only if ft is the first(last) in the prio*/
- err = connect_prev_fts(prio, prev_prio, next_ft);
- if (err)
- mlx5_core_warn(root->dev,
- "flow steering can't connect prev and next of flow table\n");
- fs_put(&prev_ft->base);
- }
-
- err = fs_set_star_rule(root->dev, ft, NULL);
- /*One put is for fs_get in find next ft*/
- if (next_ft) {
- fs_put(&next_ft->base);
- if (!err)
- fs_put(&next_ft->base);
- }
-
- mutex_unlock(&root->fs_chain_lock);
- err = mlx5_cmd_fs_destroy_fg(dev, ft->vport, ft->type, ft->id,
- ft->star_rule.fg->id);
- if (err)
- mlx5_core_warn(dev,
- "flow steering can't destroy star entry group(index:%d) of ft:%s\n", ft->star_rule.fg->start_index,
- ft->base.name);
- free_star_fte_entry(ft->star_rule.fte);
-
- kfree(ft->star_rule.fg);
- ft->star_rule.fg = NULL;
-}
-
-static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
- unsigned int prio)
-{
- struct fs_prio *iter_prio;
-
- fs_for_each_prio(iter_prio, ns) {
- if (iter_prio->prio == prio)
- return iter_prio;
- }
-
- return NULL;
-}
-
-static unsigned int _alloc_new_level(struct fs_prio *prio,
- struct mlx5_flow_namespace *match);
-
-static unsigned int __alloc_new_level(struct mlx5_flow_namespace *ns,
- struct fs_prio *prio)
-{
- unsigned int level = 0;
- struct fs_prio *p;
-
- if (!ns)
- return 0;
-
- mutex_lock(&ns->base.lock);
- fs_for_each_prio(p, ns) {
- if (p != prio)
- level += p->max_ft;
- else
- break;
- }
- mutex_unlock(&ns->base.lock);
-
- fs_get_parent(prio, ns);
- if (prio)
- WARN_ON(prio->base.type != FS_TYPE_PRIO);
-
- return level + _alloc_new_level(prio, ns);
-}
-
-/* Called under lock of priority, hence locking all upper objects */
-static unsigned int _alloc_new_level(struct fs_prio *prio,
- struct mlx5_flow_namespace *match)
-{
- struct mlx5_flow_namespace *ns;
- struct fs_base *it;
- unsigned int level = 0;
-
- if (!prio)
- return 0;
-
- mutex_lock(&prio->base.lock);
- fs_for_each_ns_or_ft_reverse(it, prio) {
- if (it->type == FS_TYPE_NAMESPACE) {
- struct fs_prio *p;
-
- fs_get_obj(ns, it);
-
- if (match != ns) {
- mutex_lock(&ns->base.lock);
- fs_for_each_prio(p, ns)
- level += p->max_ft;
- mutex_unlock(&ns->base.lock);
- } else {
- break;
- }
- } else {
- struct mlx5_flow_table *ft;
-
- fs_get_obj(ft, it);
- mutex_unlock(&prio->base.lock);
- return level + ft->level + 1;
- }
- }
-
- fs_get_parent(ns, prio);
- mutex_unlock(&prio->base.lock);
- return __alloc_new_level(ns, prio) + level;
-}
-
-static unsigned int alloc_new_level(struct fs_prio *prio)
-{
- return _alloc_new_level(prio, NULL);
-}
-
-static int update_root_ft_create(struct mlx5_flow_root_namespace *root,
- struct mlx5_flow_table *ft)
-{
- int err = 0;
- int min_level = INT_MAX;
-
- if (root->root_ft)
- min_level = root->root_ft->level;
-
- if (ft->level < min_level)
- err = mlx5_cmd_update_root_ft(root->dev, ft->type,
- ft->id);
- else
- return err;
-
- if (err)
- mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
- ft->id);
- else
- root->root_ft = ft;
-
- return err;
-}
-
-static struct mlx5_flow_table *_create_ft_common(struct mlx5_flow_namespace *ns,
- u16 vport,
- struct fs_prio *fs_prio,
- int max_fte,
- const char *name)
-{
- struct mlx5_flow_table *ft;
- int err;
- int log_table_sz;
- int ft_size;
- char gen_name[20];
- struct mlx5_flow_root_namespace *root = find_root(&ns->base);
- struct mlx5_core_dev *dev = fs_get_dev(&ns->base);
-
- if (!root) {
- mlx5_core_err(dev,
- "flow steering failed to find root of namespace %s",
- ns->base.name);
- return ERR_PTR(-ENODEV);
- }
-
- if (fs_prio->num_ft == fs_prio->max_ft)
- return ERR_PTR(-ENOSPC);
-
- ft = kzalloc(sizeof(*ft), GFP_KERNEL);
- if (!ft)
- return ERR_PTR(-ENOMEM);
-
- fs_init_node(&ft->base, 1);
- INIT_LIST_HEAD(&ft->fgs);
-
- /* Temporarily WA until we expose the level set in the API */
- if (root->table_type == FS_FT_ESW_EGRESS_ACL ||
- root->table_type == FS_FT_ESW_INGRESS_ACL)
- ft->level = 0;
- else
- ft->level = alloc_new_level(fs_prio);
-
- ft->base.type = FS_TYPE_FLOW_TABLE;
- ft->vport = vport;
- ft->type = root->table_type;
- /*Two entries are reserved for star rules*/
- ft_size = roundup_pow_of_two(max_fte + 2);
- /*User isn't aware to those rules*/
- ft->max_fte = ft_size - 2;
- log_table_sz = ilog2(ft_size);
-
- if (name == NULL || name[0] == '\0') {
- snprintf(gen_name, sizeof(gen_name), "flow_table_%u", ft->id);
- name = gen_name;
- }
-
- err = mlx5_cmd_fs_create_ft(root->dev, ft->vport, ft->type,
- ft->level, log_table_sz, name, &ft->id);
- if (err)
- goto free_ft;
-
- err = create_star_rule(ft, fs_prio);
- if (err)
- goto del_ft;
-
- if ((root->table_type == FS_FT_NIC_RX) && MLX5_CAP_FLOWTABLE(root->dev,
- flow_table_properties_nic_receive.modify_root)) {
- err = update_root_ft_create(root, ft);
- if (err)
- goto destroy_star_rule;
- }
-
- _fs_add_node(&ft->base, name, &fs_prio->base);
-
- list_add_tail(&ft->base.list, &fs_prio->objs);
- fs_prio->num_ft++;
-
- return ft;
-
-destroy_star_rule:
- destroy_star_rule(ft, fs_prio);
-del_ft:
- mlx5_cmd_fs_destroy_ft(root->dev, ft->vport, ft->type, ft->id);
-free_ft:
- kfree(ft);
- return ERR_PTR(err);
-}
-
-static struct mlx5_flow_table *create_ft_common(struct mlx5_flow_namespace *ns,
- u16 vport,
- unsigned int prio,
- int max_fte,
- const char *name)
-{
- struct fs_prio *fs_prio = NULL;
- fs_prio = find_prio(ns, prio);
- if (!fs_prio)
- return ERR_PTR(-EINVAL);
-
- return _create_ft_common(ns, vport, fs_prio, max_fte, name);
-}
-
-
-static struct mlx5_flow_table *find_first_ft_in_ns(struct mlx5_flow_namespace *ns,
- struct list_head *start);
-
-static struct mlx5_flow_table *find_first_ft_in_prio(struct fs_prio *prio,
- struct list_head *start);
-
-static struct mlx5_flow_table *mlx5_create_autogrouped_shared_flow_table(struct fs_prio *fs_prio)
-{
- struct mlx5_flow_table *ft;
-
- ft = find_first_ft_in_prio(fs_prio, &fs_prio->objs);
- if (ft) {
- ft->shared_refcount++;
- return ft;
- }
-
- return NULL;
-}
-
-struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- const char *name,
- int num_flow_table_entries,
- int max_num_groups,
- int num_reserved_entries)
-{
- struct mlx5_flow_table *ft = NULL;
- struct fs_prio *fs_prio;
- bool is_shared_prio;
-
- if (max_num_groups > (num_flow_table_entries - num_reserved_entries))
- return ERR_PTR(-EINVAL);
- if (num_reserved_entries > num_flow_table_entries)
- return ERR_PTR(-EINVAL);
-
- fs_prio = find_prio(ns, prio);
- if (!fs_prio)
- return ERR_PTR(-EINVAL);
-
- is_shared_prio = fs_prio->flags & MLX5_CORE_FS_PRIO_SHARED;
- if (is_shared_prio) {
- mutex_lock(&fs_prio->shared_lock);
- ft = mlx5_create_autogrouped_shared_flow_table(fs_prio);
- }
-
- if (ft)
- goto return_ft;
-
- ft = create_ft_common(ns, 0, prio, num_flow_table_entries,
- name);
- if (IS_ERR(ft))
- goto return_ft;
-
- ft->autogroup.active = true;
- ft->autogroup.max_types = max_num_groups;
- ft->autogroup.max_fte = num_flow_table_entries - num_reserved_entries;
- /* We save place for flow groups in addition to max types */
- ft->autogroup.group_size = ft->autogroup.max_fte / (max_num_groups + 1);
-
- if (is_shared_prio)
- ft->shared_refcount = 1;
-
-return_ft:
- if (is_shared_prio)
- mutex_unlock(&fs_prio->shared_lock);
- return ft;
-}
-EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
-
-struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
- u16 vport,
- int prio,
- const char *name,
- int num_flow_table_entries)
-{
- return create_ft_common(ns, vport, prio, num_flow_table_entries, name);
-}
-EXPORT_SYMBOL(mlx5_create_vport_flow_table);
-
-struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- const char *name,
- int num_flow_table_entries)
-{
- return create_ft_common(ns, 0, prio, num_flow_table_entries, name);
-}
-EXPORT_SYMBOL(mlx5_create_flow_table);
-
-static void _fs_del_ft(struct mlx5_flow_table *ft)
-{
- int err;
- struct mlx5_core_dev *dev = fs_get_dev(&ft->base);
- struct fs_prio *prio;
-
- err = mlx5_cmd_fs_destroy_ft(dev, ft->vport, ft->type, ft->id);
- if (err)
- mlx5_core_warn(dev, "flow steering can't destroy ft %s\n",
- ft->base.name);
-
- fs_get_parent(prio, ft);
- prio->num_ft--;
-}
-
-static int update_root_ft_destroy(struct mlx5_flow_root_namespace *root,
- struct mlx5_flow_table *ft)
-{
- int err = 0;
- struct fs_prio *prio;
- struct mlx5_flow_table *next_ft = NULL;
- struct mlx5_flow_table *put_ft = NULL;
-
- if (root->root_ft != ft)
- return 0;
-
- fs_get_parent(prio, ft);
- /*Assuming objs containis only flow tables and
- * flow tables are sorted by level.
- */
- if (!list_is_last(&ft->base.list, &prio->objs)) {
- next_ft = list_next_entry(ft, base.list);
- } else {
- next_ft = find_next_ft(prio);
- put_ft = next_ft;
- }
-
- if (next_ft) {
- err = mlx5_cmd_update_root_ft(root->dev, next_ft->type,
- next_ft->id);
- if (err)
- mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
- ft->id);
- }
- if (!err)
- root->root_ft = next_ft;
-
- if (put_ft)
- fs_put(&put_ft->base);
-
- return err;
-}
-
-/*Objects in the same prio are destroyed in the reverse order they were createrd*/
-int mlx5_destroy_flow_table(struct mlx5_flow_table *ft)
-{
- int err = 0;
- struct fs_prio *prio;
- struct mlx5_flow_root_namespace *root;
- bool is_shared_prio;
- struct mlx5_core_dev *dev;
-
- fs_get_parent(prio, ft);
- root = find_root(&prio->base);
- dev = fs_get_dev(&prio->base);
-
- if (!root) {
- mlx5_core_err(dev,
- "flow steering failed to find root of priority %s",
- prio->base.name);
- return -ENODEV;
- }
-
- is_shared_prio = prio->flags & MLX5_CORE_FS_PRIO_SHARED;
- if (is_shared_prio) {
- mutex_lock(&prio->shared_lock);
- if (ft->shared_refcount > 1) {
- --ft->shared_refcount;
- fs_put(&ft->base);
- mutex_unlock(&prio->shared_lock);
- return 0;
- }
- }
-
- mutex_lock(&prio->base.lock);
- mutex_lock(&ft->base.lock);
-
- err = update_root_ft_destroy(root, ft);
- if (err)
- goto unlock_ft;
-
- /* delete two last entries */
- destroy_star_rule(ft, prio);
-
- mutex_unlock(&ft->base.lock);
- fs_remove_node_parent_locked(&ft->base);
- mutex_unlock(&prio->base.lock);
- if (is_shared_prio)
- mutex_unlock(&prio->shared_lock);
-
- return err;
-
-unlock_ft:
- mutex_unlock(&ft->base.lock);
- mutex_unlock(&prio->base.lock);
- if (is_shared_prio)
- mutex_unlock(&prio->shared_lock);
-
- return err;
-}
-EXPORT_SYMBOL(mlx5_destroy_flow_table);
-
-static struct mlx5_flow_group *fs_create_fg(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- struct list_head *prev,
- u32 *fg_in,
- int refcount)
-{
- struct mlx5_flow_group *fg;
- unsigned int group_size;
- int err;
- char name[20];
-
- fg = fs_alloc_fg(fg_in);
- if (IS_ERR(fg))
- return fg;
-
- group_size = MLX5_GET(create_flow_group_in, fg_in, end_flow_index) -
- MLX5_GET(create_flow_group_in, fg_in, start_flow_index) + 1;
- err = mlx5_cmd_fs_create_fg(dev, fg_in,
- ft->vport, ft->type, ft->id,
- &fg->id);
- if (err)
- goto free_fg;
-
- mutex_lock(&ft->base.lock);
-
- if (ft->autogroup.active && group_size == ft->autogroup.group_size)
- ft->autogroup.num_types++;
-
- snprintf(name, sizeof(name), "group_%u", fg->id);
- /*Add node to tree*/
- fs_add_node(&fg->base, &ft->base, name, refcount);
- /*Add node to group list*/
- list_add(&fg->base.list, prev);
- mutex_unlock(&ft->base.lock);
-
- return fg;
-
-free_fg:
- kfree(fg);
- return ERR_PTR(err);
-}
-
-struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
- u32 *in)
-{
- struct mlx5_flow_group *fg;
- struct mlx5_core_dev *dev = fs_get_dev(&ft->base);
- unsigned int start_index;
-
- start_index = MLX5_GET(create_flow_group_in, in, start_flow_index);
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- if (ft->autogroup.active && start_index < ft->autogroup.max_fte)
- return ERR_PTR(-EPERM);
-
- fg = fs_create_fg(dev, ft, ft->fgs.prev, in, 1);
-
- return fg;
-}
-EXPORT_SYMBOL(mlx5_create_flow_group);
-
-/*Group is destoyed when all the rules in the group were removed*/
-static void fs_del_fg(struct mlx5_flow_group *fg)
-{
- struct mlx5_flow_table *parent_ft;
- struct mlx5_core_dev *dev;
-
- fs_get_parent(parent_ft, fg);
- dev = fs_get_dev(&parent_ft->base);
- WARN_ON(!dev);
-
- if (parent_ft->autogroup.active &&
- fg->max_ftes == parent_ft->autogroup.group_size &&
- fg->start_index < parent_ft->autogroup.max_fte)
- parent_ft->autogroup.num_types--;
-
- if (mlx5_cmd_fs_destroy_fg(dev, parent_ft->vport,
- parent_ft->type,
- parent_ft->id, fg->id))
- mlx5_core_warn(dev, "flow steering can't destroy fg\n");
-}
-
-void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
-{
- fs_remove_node(&fg->base);
-}
-EXPORT_SYMBOL(mlx5_destroy_flow_group);
-
-static bool _fs_match_exact_val(void *mask, void *val1, void *val2, size_t size)
-{
- unsigned int i;
-
- /* TODO: optimize by comparing 64bits when possible */
- for (i = 0; i < size; i++, mask++, val1++, val2++)
- if ((*((u8 *)val1) & (*(u8 *)mask)) !=
- ((*(u8 *)val2) & (*(u8 *)mask)))
- return false;
-
- return true;
-}
-
-bool fs_match_exact_val(struct mlx5_core_fs_mask *mask,
- void *val1, void *val2)
-{
- if (mask->match_criteria_enable &
- 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
- void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
- val1, outer_headers);
- void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
- val2, outer_headers);
- void *fte_mask = MLX5_ADDR_OF(fte_match_param,
- mask->match_criteria, outer_headers);
-
- if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
- MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
- return false;
- }
-
- if (mask->match_criteria_enable &
- 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
- void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
- val1, misc_parameters);
- void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
- val2, misc_parameters);
- void *fte_mask = MLX5_ADDR_OF(fte_match_param,
- mask->match_criteria, misc_parameters);
-
- if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
- MLX5_ST_SZ_BYTES(fte_match_set_misc)))
- return false;
- }
- if (mask->match_criteria_enable &
- 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
- void *fte_match1 = MLX5_ADDR_OF(fte_match_param,
- val1, inner_headers);
- void *fte_match2 = MLX5_ADDR_OF(fte_match_param,
- val2, inner_headers);
- void *fte_mask = MLX5_ADDR_OF(fte_match_param,
- mask->match_criteria, inner_headers);
-
- if (!_fs_match_exact_val(fte_mask, fte_match1, fte_match2,
- MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)))
- return false;
- }
- return true;
-}
-
-bool fs_match_exact_mask(u8 match_criteria_enable1,
- u8 match_criteria_enable2,
- void *mask1, void *mask2)
-{
- return match_criteria_enable1 == match_criteria_enable2 &&
- !memcmp(mask1, mask2, MLX5_ST_SZ_BYTES(fte_match_param));
-}
-
-static struct mlx5_flow_table *find_first_ft_in_ns_reverse(struct mlx5_flow_namespace *ns,
- struct list_head *start);
-
-static struct mlx5_flow_table *_find_first_ft_in_prio_reverse(struct fs_prio *prio,
- struct list_head *start)
-{
- struct fs_base *it = container_of(start, struct fs_base, list);
-
- if (!prio)
- return NULL;
-
- fs_for_each_ns_or_ft_continue_reverse(it, prio) {
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_table *ft;
-
- if (it->type == FS_TYPE_FLOW_TABLE) {
- fs_get_obj(ft, it);
- fs_get(&ft->base);
- return ft;
- }
-
- fs_get_obj(ns, it);
- WARN_ON(ns->base.type != FS_TYPE_NAMESPACE);
-
- ft = find_first_ft_in_ns_reverse(ns, &ns->prios);
- if (ft)
- return ft;
- }
-
- return NULL;
-}
-
-static struct mlx5_flow_table *find_first_ft_in_prio_reverse(struct fs_prio *prio,
- struct list_head *start)
-{
- struct mlx5_flow_table *ft;
-
- if (!prio)
- return NULL;
-
- mutex_lock(&prio->base.lock);
- ft = _find_first_ft_in_prio_reverse(prio, start);
- mutex_unlock(&prio->base.lock);
-
- return ft;
-}
-
-static struct mlx5_flow_table *find_first_ft_in_ns_reverse(struct mlx5_flow_namespace *ns,
- struct list_head *start)
-{
- struct fs_prio *prio;
-
- if (!ns)
- return NULL;
-
- fs_get_obj(prio, container_of(start, struct fs_base, list));
- mutex_lock(&ns->base.lock);
- fs_for_each_prio_continue_reverse(prio, ns) {
- struct mlx5_flow_table *ft;
-
- ft = find_first_ft_in_prio_reverse(prio, &prio->objs);
- if (ft) {
- mutex_unlock(&ns->base.lock);
- return ft;
- }
- }
- mutex_unlock(&ns->base.lock);
-
- return NULL;
-}
-
-/* Returned a held ft, assumed curr is protected, assumed curr's parent is
- * locked
- */
-static struct mlx5_flow_table *find_prev_ft(struct mlx5_flow_table *curr,
- struct fs_prio *prio)
-{
- struct mlx5_flow_table *ft = NULL;
- struct fs_base *curr_base;
-
- if (!curr)
- return NULL;
-
- /* prio has either namespace or flow-tables, but not both */
- if (!list_empty(&prio->objs) &&
- list_first_entry(&prio->objs, struct mlx5_flow_table, base.list) !=
- curr)
- return NULL;
-
- while (!ft && prio) {
- struct mlx5_flow_namespace *ns;
-
- fs_get_parent(ns, prio);
- ft = find_first_ft_in_ns_reverse(ns, &prio->base.list);
- curr_base = &ns->base;
- fs_get_parent(prio, ns);
-
- if (prio && !ft)
- ft = find_first_ft_in_prio_reverse(prio,
- &curr_base->list);
- }
- return ft;
-}
-
-static struct mlx5_flow_table *_find_first_ft_in_prio(struct fs_prio *prio,
- struct list_head *start)
-{
- struct fs_base *it = container_of(start, struct fs_base, list);
-
- if (!prio)
- return NULL;
-
- fs_for_each_ns_or_ft_continue(it, prio) {
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_table *ft;
-
- if (it->type == FS_TYPE_FLOW_TABLE) {
- fs_get_obj(ft, it);
- fs_get(&ft->base);
- return ft;
- }
-
- fs_get_obj(ns, it);
- WARN_ON(ns->base.type != FS_TYPE_NAMESPACE);
-
- ft = find_first_ft_in_ns(ns, &ns->prios);
- if (ft)
- return ft;
- }
-
- return NULL;
-}
-
-static struct mlx5_flow_table *find_first_ft_in_prio(struct fs_prio *prio,
- struct list_head *start)
-{
- struct mlx5_flow_table *ft;
-
- if (!prio)
- return NULL;
-
- mutex_lock(&prio->base.lock);
- ft = _find_first_ft_in_prio(prio, start);
- mutex_unlock(&prio->base.lock);
-
- return ft;
-}
-
-static struct mlx5_flow_table *find_first_ft_in_ns(struct mlx5_flow_namespace *ns,
- struct list_head *start)
-{
- struct fs_prio *prio;
-
- if (!ns)
- return NULL;
-
- fs_get_obj(prio, container_of(start, struct fs_base, list));
- mutex_lock(&ns->base.lock);
- fs_for_each_prio_continue(prio, ns) {
- struct mlx5_flow_table *ft;
-
- ft = find_first_ft_in_prio(prio, &prio->objs);
- if (ft) {
- mutex_unlock(&ns->base.lock);
- return ft;
- }
- }
- mutex_unlock(&ns->base.lock);
-
- return NULL;
-}
-
-/* returned a held ft, assumed curr is protected, assumed curr's parent is
- * locked
- */
-static struct mlx5_flow_table *find_next_ft(struct fs_prio *prio)
-{
- struct mlx5_flow_table *ft = NULL;
- struct fs_base *curr_base;
-
- while (!ft && prio) {
- struct mlx5_flow_namespace *ns;
-
- fs_get_parent(ns, prio);
- ft = find_first_ft_in_ns(ns, &prio->base.list);
- curr_base = &ns->base;
- fs_get_parent(prio, ns);
-
- if (!ft && prio)
- ft = _find_first_ft_in_prio(prio, &curr_base->list);
- }
- return ft;
-}
-
-
-/* called under ft mutex lock */
-static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria)
-{
- unsigned int group_size;
- unsigned int candidate_index = 0;
- struct mlx5_flow_group *g;
- struct mlx5_flow_group *ret;
- struct list_head *prev = &ft->fgs;
- struct mlx5_core_dev *dev;
- u32 *in;
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- void *match_criteria_addr;
- u32 max_fte = ft->autogroup.max_fte;
-
- if (!ft->autogroup.active)
- return ERR_PTR(-ENOENT);
-
- dev = fs_get_dev(&ft->base);
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- in = mlx5_vzalloc(inlen);
- if (!in) {
- mlx5_core_warn(dev, "failed to allocate inbox\n");
- return ERR_PTR(-ENOMEM);
- }
-
-
- if (ft->autogroup.num_types < ft->autogroup.max_types)
- group_size = ft->autogroup.group_size;
- else
- group_size = 1;
-
- if (group_size == 0) {
- mlx5_core_warn(dev,
- "flow steering can't create group size of 0\n");
- ret = ERR_PTR(-EINVAL);
- goto out;
- }
-
- /* sorted by start_index */
- fs_for_each_fg(g, ft) {
- if (candidate_index + group_size > g->start_index)
- candidate_index = g->start_index + g->max_ftes;
- else
- break;
- prev = &g->base.list;
- }
-
- if (candidate_index + group_size > max_fte) {
- ret = ERR_PTR(-ENOSPC);
- goto out;
- }
-
- MLX5_SET(create_flow_group_in, in, match_criteria_enable,
- match_criteria_enable);
- MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
- MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index +
- group_size - 1);
- match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
- in, match_criteria);
- memcpy(match_criteria_addr, match_criteria,
- MLX5_ST_SZ_BYTES(fte_match_param));
-
- ret = fs_create_fg(dev, ft, prev, in, 0);
-out:
- kvfree(in);
- return ret;
-}
-
-static struct mlx5_flow_namespace *get_ns_with_notifiers(struct fs_base *node)
-{
- struct mlx5_flow_namespace *ns = NULL;
-
- while (node && (node->type != FS_TYPE_NAMESPACE ||
- list_empty(&container_of(node, struct
- mlx5_flow_namespace,
- base)->list_notifiers)))
- node = node->parent;
-
- if (node)
- fs_get_obj(ns, node);
-
- return ns;
-}
-
-
-/*Assumption- fte is locked*/
-static void call_to_add_rule_notifiers(struct mlx5_flow_rule *dst,
- struct fs_fte *fte)
-{
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_handler *iter_handler;
- struct fs_client_priv_data *iter_client;
- void *data;
- bool is_new_rule = list_first_entry(&fte->dests,
- struct mlx5_flow_rule,
- base.list) == dst;
- int err;
-
- ns = get_ns_with_notifiers(&fte->base);
- if (!ns)
- return;
-
- down_read(&ns->notifiers_rw_sem);
- list_for_each_entry(iter_handler, &ns->list_notifiers,
- list) {
- if (iter_handler->add_dst_cb) {
- data = NULL;
- mutex_lock(&dst->clients_lock);
- list_for_each_entry(
- iter_client, &dst->clients_data, list) {
- if (iter_client->fs_handler == iter_handler) {
- data = iter_client->client_dst_data;
- break;
- }
- }
- mutex_unlock(&dst->clients_lock);
- err = iter_handler->add_dst_cb(dst,
- is_new_rule,
- data,
- iter_handler->client_context);
- if (err)
- break;
- }
- }
- up_read(&ns->notifiers_rw_sem);
-}
-
-static void call_to_del_rule_notifiers(struct mlx5_flow_rule *dst,
- struct fs_fte *fte)
-{
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_handler *iter_handler;
- struct fs_client_priv_data *iter_client;
- void *data;
- bool ctx_changed = (fte->dests_size == 0);
-
- ns = get_ns_with_notifiers(&fte->base);
- if (!ns)
- return;
- down_read(&ns->notifiers_rw_sem);
- list_for_each_entry(iter_handler, &ns->list_notifiers,
- list) {
- data = NULL;
- mutex_lock(&dst->clients_lock);
- list_for_each_entry(iter_client, &dst->clients_data, list) {
- if (iter_client->fs_handler == iter_handler) {
- data = iter_client->client_dst_data;
- break;
- }
- }
- mutex_unlock(&dst->clients_lock);
- if (iter_handler->del_dst_cb) {
- iter_handler->del_dst_cb(dst, ctx_changed, data,
- iter_handler->client_context);
- }
- }
- up_read(&ns->notifiers_rw_sem);
-}
-
-/* fte should not be deleted while calling this function */
-static struct mlx5_flow_rule *_fs_add_dst_fte(struct fs_fte *fte,
- struct mlx5_flow_group *fg,
- struct mlx5_flow_destination *dest)
-{
- struct mlx5_flow_table *ft;
- struct mlx5_flow_rule *dst;
- int err;
-
- dst = kzalloc(sizeof(*dst), GFP_KERNEL);
- if (!dst)
- return ERR_PTR(-ENOMEM);
-
- memcpy(&dst->dest_attr, dest, sizeof(*dest));
- dst->base.type = FS_TYPE_FLOW_DEST;
- INIT_LIST_HEAD(&dst->clients_data);
- mutex_init(&dst->clients_lock);
- fs_get_parent(ft, fg);
- /*Add dest to dests list- added as first element after the head*/
- list_add_tail(&dst->base.list, &fte->dests);
- fte->dests_size++;
- err = mlx5_cmd_fs_set_fte(fs_get_dev(&ft->base),
- ft->vport,
- &fte->status,
- fte->val, ft->type,
- ft->id, fte->index, fg->id, &fte->flow_act,
- fte->sw_action, fte->dests_size, &fte->dests);
- if (err)
- goto free_dst;
-
- list_del(&dst->base.list);
-
- return dst;
-
-free_dst:
- list_del(&dst->base.list);
- kfree(dst);
- fte->dests_size--;
- return ERR_PTR(err);
-}
-
-static char *get_dest_name(struct mlx5_flow_destination *dest)
-{
- char *name = kzalloc(sizeof(char) * 20, GFP_KERNEL);
-
- switch (dest->type) {
- case MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE:
- snprintf(name, 20, "dest_%s_%u", "flow_table",
- dest->ft->id);
- return name;
- case MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT:
- snprintf(name, 20, "dest_%s_%u", "vport",
- dest->vport_num);
- return name;
- case MLX5_FLOW_CONTEXT_DEST_TYPE_TIR:
- snprintf(name, 20, "dest_%s_%u", "tir", dest->tir_num);
- return name;
- default:
- kfree(name);
- return NULL;
- }
-}
-
-/* assumed fg is locked */
-static unsigned int fs_get_free_fg_index(struct mlx5_flow_group *fg,
- struct list_head **prev)
-{
- struct fs_fte *fte;
- unsigned int start = fg->start_index;
-
- if (prev)
- *prev = &fg->ftes;
-
- /* assumed list is sorted by index */
- fs_for_each_fte(fte, fg) {
- if (fte->index != start)
- return start;
- start++;
- if (prev)
- *prev = &fte->base.list;
- }
-
- return start;
-}
-
-
-static struct fs_fte *fs_create_fte(struct mlx5_flow_group *fg,
- u32 *match_value,
- u32 sw_action,
- struct mlx5_flow_act *flow_act,
- struct list_head **prev)
-{
- struct fs_fte *fte;
- int index = 0;
-
- index = fs_get_free_fg_index(fg, prev);
- fte = fs_alloc_fte(sw_action, flow_act, match_value, index);
- if (IS_ERR(fte))
- return fte;
-
- return fte;
-}
-
-static void add_rule_to_tree(struct mlx5_flow_rule *rule,
- struct fs_fte *fte)
-{
- char *dest_name;
-
- dest_name = get_dest_name(&rule->dest_attr);
- fs_add_node(&rule->base, &fte->base, dest_name, 1);
- /* re-add to list, since fs_add_node reset our list */
- list_add_tail(&rule->base.list, &fte->dests);
- kfree(dest_name);
- call_to_add_rule_notifiers(rule, fte);
-}
-
-static void fs_del_dst(struct mlx5_flow_rule *dst)
-{
- struct mlx5_flow_table *ft;
- struct mlx5_flow_group *fg;
- struct fs_fte *fte;
- u32 *match_value;
- struct mlx5_core_dev *dev = fs_get_dev(&dst->base);
- int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
- int err;
-
- WARN_ON(!dev);
-
- match_value = mlx5_vzalloc(match_len);
- if (!match_value) {
- mlx5_core_warn(dev, "failed to allocate inbox\n");
- return;
- }
-
- fs_get_parent(fte, dst);
- fs_get_parent(fg, fte);
- sx_assert(&fg->base.lock.sx, SX_XLOCKED);
- memcpy(match_value, fte->val, sizeof(fte->val));
- /* ft can't be changed as fg is locked */
- fs_get_parent(ft, fg);
- list_del(&dst->base.list);
- fte->dests_size--;
- if (fte->dests_size) {
- err = mlx5_cmd_fs_set_fte(dev, ft->vport,
- &fte->status, match_value, ft->type,
- ft->id, fte->index, fg->id,
- &fte->flow_act, fte->sw_action,
- fte->dests_size, &fte->dests);
- if (err) {
- mlx5_core_warn(dev, "%s can't delete dst %s\n",
- __func__, dst->base.name);
- goto err;
- }
- }
- call_to_del_rule_notifiers(dst, fte);
-err:
- kvfree(match_value);
-}
-
-static void fs_del_fte(struct fs_fte *fte)
-{
- struct mlx5_flow_table *ft;
- struct mlx5_flow_group *fg;
- int err;
- struct mlx5_core_dev *dev;
-
- fs_get_parent(fg, fte);
- fs_get_parent(ft, fg);
-
- dev = fs_get_dev(&ft->base);
- WARN_ON(!dev);
-
- err = mlx5_cmd_fs_delete_fte(dev, ft->vport, &fte->status,
- ft->type, ft->id, fte->index);
- if (err)
- mlx5_core_warn(dev, "flow steering can't delete fte %s\n",
- fte->base.name);
-
- fg->num_ftes--;
-}
-
-static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
- const struct mlx5_flow_act *act2)
-{
- u32 action1 = act1->actions;
- u32 action2 = act2->actions;
- u32 xored_actions;
-
- xored_actions = action1 ^ action2;
-
- if (xored_actions & (MLX5_FLOW_ACT_ACTIONS_FLOW_TAG))
- return true;
-
- if (action1 & MLX5_FLOW_ACT_ACTIONS_FLOW_TAG &&
- act1->flow_tag != act2->flow_tag)
- return true;
-
- /* Can even have complex actions in merged rules */
- if (action1 & MLX5_FLOW_ACT_ACTIONS_MODIFY_HDR)
- return true;
-
- if (action1 & MLX5_FLOW_ACT_ACTIONS_PACKET_REFORMAT)
- return true;
-
- if (action1 & MLX5_FLOW_ACT_ACTIONS_COUNT)
- return true;
-
- return false;
-}
-
-/* assuming parent fg is locked */
-/* Add dst algorithm */
-static struct mlx5_flow_rule *fs_add_dst_fg(struct mlx5_flow_group *fg,
- u32 *match_value,
- u32 sw_action,
- struct mlx5_flow_act *flow_act,
- struct mlx5_flow_destination *dest)
-{
- struct fs_fte *fte;
- struct mlx5_flow_rule *dst;
- struct mlx5_flow_table *ft;
- struct list_head *prev;
- char fte_name[20];
-
- mutex_lock(&fg->base.lock);
- if (flow_act->flags & MLX5_FLOW_ACT_NO_APPEND)
- goto insert_fte;
-
- fs_for_each_fte(fte, fg) {
- /* TODO: Check of size against PRM max size */
- mutex_lock(&fte->base.lock);
- if (fs_match_exact_val(&fg->mask, match_value, &fte->val) &&
- sw_action == fte->sw_action &&
- !check_conflicting_actions(flow_act, &fte->flow_act)) {
- dst = _fs_add_dst_fte(fte, fg, dest);
- mutex_unlock(&fte->base.lock);
- if (IS_ERR(dst))
- goto unlock_fg;
- goto add_rule;
- }
- mutex_unlock(&fte->base.lock);
- }
-
-insert_fte:
- fs_get_parent(ft, fg);
- if (fg->num_ftes == fg->max_ftes) {
- dst = ERR_PTR(-ENOSPC);
- goto unlock_fg;
- }
-
- fte = fs_create_fte(fg, match_value, sw_action, flow_act, &prev);
- if (IS_ERR(fte)) {
- dst = (void *)fte;
- goto unlock_fg;
- }
- dst = _fs_add_dst_fte(fte, fg, dest);
- if (IS_ERR(dst)) {
- kfree(fte);
- goto unlock_fg;
- }
-
- fg->num_ftes++;
-
- snprintf(fte_name, sizeof(fte_name), "fte%u", fte->index);
- /* Add node to tree */
- fs_add_node(&fte->base, &fg->base, fte_name, 0);
- list_add(&fte->base.list, prev);
-add_rule:
- add_rule_to_tree(dst, fte);
-unlock_fg:
- mutex_unlock(&fg->base.lock);
- return dst;
-}
-
-static struct mlx5_flow_rule *fs_add_dst_ft(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
- u32 sw_action,
- struct mlx5_flow_act *flow_act,
- struct mlx5_flow_destination *dest)
-{
- /*? where dst_entry is allocated*/
- struct mlx5_flow_group *g;
- struct mlx5_flow_rule *dst;
-
- fs_get(&ft->base);
- mutex_lock(&ft->base.lock);
- fs_for_each_fg(g, ft)
- if (fs_match_exact_mask(g->mask.match_criteria_enable,
- match_criteria_enable,
- g->mask.match_criteria,
- match_criteria)) {
- mutex_unlock(&ft->base.lock);
-
- dst = fs_add_dst_fg(g, match_value, sw_action, flow_act, dest);
- if (PTR_ERR(dst) && PTR_ERR(dst) != -ENOSPC)
- goto unlock;
- }
- mutex_unlock(&ft->base.lock);
-
- g = create_autogroup(ft, match_criteria_enable, match_criteria);
- if (IS_ERR(g)) {
- dst = (void *)g;
- goto unlock;
- }
-
- dst = fs_add_dst_fg(g, match_value,
- sw_action, flow_act, dest);
- if (IS_ERR(dst)) {
- /* Remove assumes refcount > 0 and autogroup creates a group
- * with a refcount = 0.
- */
- fs_get(&g->base);
- fs_remove_node(&g->base);
- goto unlock;
- }
-
-unlock:
- fs_put(&ft->base);
- return dst;
-}
-
-struct mlx5_flow_rule *
-mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
- u32 sw_action,
- struct mlx5_flow_act *flow_act,
- struct mlx5_flow_destination *dest)
-{
- struct mlx5_flow_rule *dst;
- struct mlx5_flow_namespace *ns;
-
- ns = get_ns_with_notifiers(&ft->base);
- if (ns)
- down_read(&ns->dests_rw_sem);
- dst = fs_add_dst_ft(ft, match_criteria_enable, match_criteria,
- match_value, sw_action, flow_act, dest);
- if (ns)
- up_read(&ns->dests_rw_sem);
-
- return dst;
-
-
-}
-EXPORT_SYMBOL(mlx5_add_flow_rule);
-
-void mlx5_del_flow_rule(struct mlx5_flow_rule **pp)
-{
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_rule *dst;
-
- dst = *pp;
- *pp = NULL;
-
- if (IS_ERR_OR_NULL(dst))
- return;
- ns = get_ns_with_notifiers(&dst->base);
- if (ns)
- down_read(&ns->dests_rw_sem);
- fs_remove_node(&dst->base);
- if (ns)
- up_read(&ns->dests_rw_sem);
-}
-EXPORT_SYMBOL(mlx5_del_flow_rule);
-
-#define MLX5_CORE_FS_ROOT_NS_NAME "root"
-#define MLX5_CORE_FS_ESW_EGRESS_ACL "esw_egress_root"
-#define MLX5_CORE_FS_ESW_INGRESS_ACL "esw_ingress_root"
-#define MLX5_CORE_FS_FDB_ROOT_NS_NAME "fdb_root"
-#define MLX5_CORE_FS_SNIFFER_RX_ROOT_NS_NAME "sniffer_rx_root"
-#define MLX5_CORE_FS_SNIFFER_TX_ROOT_NS_NAME "sniffer_tx_root"
-#define MLX5_CORE_FS_PRIO_MAX_FT 4
-#define MLX5_CORE_FS_PRIO_MAX_NS 1
-
-static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
- unsigned prio, int max_ft,
- const char *name, u8 flags)
-{
- struct fs_prio *fs_prio;
-
- fs_prio = kzalloc(sizeof(*fs_prio), GFP_KERNEL);
- if (!fs_prio)
- return ERR_PTR(-ENOMEM);
-
- fs_prio->base.type = FS_TYPE_PRIO;
- fs_add_node(&fs_prio->base, &ns->base, name, 1);
- fs_prio->max_ft = max_ft;
- fs_prio->max_ns = MLX5_CORE_FS_PRIO_MAX_NS;
- fs_prio->prio = prio;
- fs_prio->flags = flags;
- list_add_tail(&fs_prio->base.list, &ns->prios);
- INIT_LIST_HEAD(&fs_prio->objs);
- mutex_init(&fs_prio->shared_lock);
-
- return fs_prio;
-}
-
-static void cleanup_root_ns(struct mlx5_core_dev *dev)
-{
- struct mlx5_flow_root_namespace *root_ns = dev->root_ns;
- struct fs_prio *iter_prio;
-
- if (!root_ns)
- return;
-
- /* stage 1 */
- fs_for_each_prio(iter_prio, &root_ns->ns) {
- struct mlx5_flow_namespace *iter_ns;
-
- fs_for_each_ns(iter_ns, iter_prio) {
- while (!list_empty(&iter_ns->prios)) {
- struct fs_base *iter_prio2 =
- list_first_entry(&iter_ns->prios,
- struct fs_base,
- list);
-
- fs_remove_node(iter_prio2);
- }
- }
- }
-
- /* stage 2 */
- fs_for_each_prio(iter_prio, &root_ns->ns) {
- while (!list_empty(&iter_prio->objs)) {
- struct fs_base *iter_ns =
- list_first_entry(&iter_prio->objs,
- struct fs_base,
- list);
-
- fs_remove_node(iter_ns);
- }
- }
- /* stage 3 */
- while (!list_empty(&root_ns->ns.prios)) {
- struct fs_base *iter_prio =
- list_first_entry(&root_ns->ns.prios,
- struct fs_base,
- list);
-
- fs_remove_node(iter_prio);
- }
-
- fs_remove_node(&root_ns->ns.base);
- dev->root_ns = NULL;
-}
-
-static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev,
- struct mlx5_flow_root_namespace *root_ns)
-{
- struct fs_base *prio;
-
- if (!root_ns)
- return;
-
- if (!list_empty(&root_ns->ns.prios)) {
- prio = list_first_entry(&root_ns->ns.prios,
- struct fs_base,
- list);
- fs_remove_node(prio);
- }
- fs_remove_node(&root_ns->ns.base);
- root_ns = NULL;
-}
-
-void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
-{
- mlx5_cleanup_fc_stats(dev);
- cleanup_root_ns(dev);
- cleanup_single_prio_root_ns(dev, dev->sniffer_rx_root_ns);
- cleanup_single_prio_root_ns(dev, dev->sniffer_tx_root_ns);
- cleanup_single_prio_root_ns(dev, dev->fdb_root_ns);
- cleanup_single_prio_root_ns(dev, dev->esw_egress_root_ns);
- cleanup_single_prio_root_ns(dev, dev->esw_ingress_root_ns);
-}
-
-static struct mlx5_flow_namespace *fs_init_namespace(struct mlx5_flow_namespace
- *ns)
-{
- ns->base.type = FS_TYPE_NAMESPACE;
- init_rwsem(&ns->dests_rw_sem);
- init_rwsem(&ns->notifiers_rw_sem);
- INIT_LIST_HEAD(&ns->prios);
- INIT_LIST_HEAD(&ns->list_notifiers);
-
- return ns;
-}
-
-static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev,
- enum fs_ft_type
- table_type,
- char *name)
-{
- struct mlx5_flow_root_namespace *root_ns;
- struct mlx5_flow_namespace *ns;
-
- /* create the root namespace */
- root_ns = mlx5_vzalloc(sizeof(*root_ns));
- if (!root_ns)
- goto err;
-
- root_ns->dev = dev;
- root_ns->table_type = table_type;
- mutex_init(&root_ns->fs_chain_lock);
-
- ns = &root_ns->ns;
- fs_init_namespace(ns);
- fs_add_node(&ns->base, NULL, name, 1);
-
- return root_ns;
-err:
- return NULL;
-}
-
-static int init_fdb_root_ns(struct mlx5_core_dev *dev)
-{
- struct fs_prio *prio;
-
- dev->fdb_root_ns = create_root_ns(dev, FS_FT_FDB,
- MLX5_CORE_FS_FDB_ROOT_NS_NAME);
- if (!dev->fdb_root_ns)
- return -ENOMEM;
-
- /* create 1 prio*/
- prio = fs_create_prio(&dev->fdb_root_ns->ns, 0, 1, "fdb_prio", 0);
- if (IS_ERR(prio))
- return PTR_ERR(prio);
- else
- return 0;
-}
-
-#define MAX_VPORTS 128
-
-static int init_egress_acl_root_ns(struct mlx5_core_dev *dev)
-{
- struct fs_prio *prio;
-
- dev->esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL,
- MLX5_CORE_FS_ESW_EGRESS_ACL);
- if (!dev->esw_egress_root_ns)
- return -ENOMEM;
-
- /* create 1 prio*/
- prio = fs_create_prio(&dev->esw_egress_root_ns->ns, 0, MAX_VPORTS,
- "esw_egress_prio", 0);
- if (IS_ERR(prio))
- return PTR_ERR(prio);
- else
- return 0;
-}
-
-static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
-{
- struct fs_prio *prio;
-
- dev->esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL,
- MLX5_CORE_FS_ESW_INGRESS_ACL);
- if (!dev->esw_ingress_root_ns)
- return -ENOMEM;
-
- /* create 1 prio*/
- prio = fs_create_prio(&dev->esw_ingress_root_ns->ns, 0, MAX_VPORTS,
- "esw_ingress_prio", 0);
- if (IS_ERR(prio))
- return PTR_ERR(prio);
- else
- return 0;
-}
-
-static int init_sniffer_rx_root_ns(struct mlx5_core_dev *dev)
-{
- struct fs_prio *prio;
-
- dev->sniffer_rx_root_ns = create_root_ns(dev, FS_FT_SNIFFER_RX,
- MLX5_CORE_FS_SNIFFER_RX_ROOT_NS_NAME);
- if (!dev->sniffer_rx_root_ns)
- return -ENOMEM;
-
- /* create 1 prio*/
- prio = fs_create_prio(&dev->sniffer_rx_root_ns->ns, 0, 1,
- "sniffer_prio", 0);
- if (IS_ERR(prio))
- return PTR_ERR(prio);
- else
- return 0;
-}
-
-
-static int init_sniffer_tx_root_ns(struct mlx5_core_dev *dev)
-{
- struct fs_prio *prio;
-
- dev->sniffer_tx_root_ns = create_root_ns(dev, FS_FT_SNIFFER_TX,
- MLX5_CORE_FS_SNIFFER_TX_ROOT_NS_NAME);
- if (!dev->sniffer_tx_root_ns)
- return -ENOMEM;
-
- /* create 1 prio*/
- prio = fs_create_prio(&dev->sniffer_tx_root_ns->ns, 0, 1,
- "sniffer_prio", 0);
- if (IS_ERR(prio))
- return PTR_ERR(prio);
- else
- return 0;
-}
-
-static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio,
- const char *name)
-{
- struct mlx5_flow_namespace *ns;
-
- ns = kzalloc(sizeof(*ns), GFP_KERNEL);
- if (!ns)
- return ERR_PTR(-ENOMEM);
-
- fs_init_namespace(ns);
- fs_add_node(&ns->base, &prio->base, name, 1);
- list_add_tail(&ns->base.list, &prio->objs);
-
- return ns;
-}
-
-#define FLOW_TABLE_BIT_SZ 1
-#define GET_FLOW_TABLE_CAP(dev, offset) \
- ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \
- offset / 32)) >> \
- (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
-
-static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
-{
- int i;
-
- for (i = 0; i < caps->arr_sz; i++) {
- if (!GET_FLOW_TABLE_CAP(dev, caps->caps[i]))
- return false;
- }
- return true;
-}
-
-static int _init_root_tree(struct mlx5_core_dev *dev, int max_ft_level,
- struct init_tree_node *node, struct fs_base *base_parent,
- struct init_tree_node *tree_parent)
-{
- struct mlx5_flow_namespace *fs_ns;
- struct fs_prio *fs_prio;
- int priority;
- struct fs_base *base;
- int i;
- int err = 0;
-
- if (node->type == FS_TYPE_PRIO) {
- if ((node->min_ft_level > max_ft_level) ||
- !has_required_caps(dev, &node->caps))
- goto out;
-
- fs_get_obj(fs_ns, base_parent);
- priority = node - tree_parent->children;
- fs_prio = fs_create_prio(fs_ns, priority,
- node->max_ft,
- node->name, node->flags);
- if (IS_ERR(fs_prio)) {
- err = PTR_ERR(fs_prio);
- goto out;
- }
- base = &fs_prio->base;
- } else if (node->type == FS_TYPE_NAMESPACE) {
- fs_get_obj(fs_prio, base_parent);
- fs_ns = fs_create_namespace(fs_prio, node->name);
- if (IS_ERR(fs_ns)) {
- err = PTR_ERR(fs_ns);
- goto out;
- }
- base = &fs_ns->base;
- } else {
- return -EINVAL;
- }
- for (i = 0; i < node->ar_size; i++) {
- err = _init_root_tree(dev, max_ft_level, &node->children[i], base,
- node);
- if (err)
- break;
- }
-out:
- return err;
-}
-
-static int init_root_tree(struct mlx5_core_dev *dev, int max_ft_level,
- struct init_tree_node *node, struct fs_base *parent)
-{
- int i;
- struct mlx5_flow_namespace *fs_ns;
- int err = 0;
-
- fs_get_obj(fs_ns, parent);
- for (i = 0; i < node->ar_size; i++) {
- err = _init_root_tree(dev, max_ft_level,
- &node->children[i], &fs_ns->base, node);
- if (err)
- break;
- }
- return err;
-}
-
-static int sum_max_ft_in_prio(struct fs_prio *prio);
-static int sum_max_ft_in_ns(struct mlx5_flow_namespace *ns)
-{
- struct fs_prio *prio;
- int sum = 0;
-
- fs_for_each_prio(prio, ns) {
- sum += sum_max_ft_in_prio(prio);
- }
- return sum;
-}
-
-static int sum_max_ft_in_prio(struct fs_prio *prio)
-{
- int sum = 0;
- struct fs_base *it;
- struct mlx5_flow_namespace *ns;
-
- if (prio->max_ft)
- return prio->max_ft;
-
- fs_for_each_ns_or_ft(it, prio) {
- if (it->type == FS_TYPE_FLOW_TABLE)
- continue;
-
- fs_get_obj(ns, it);
- sum += sum_max_ft_in_ns(ns);
- }
- prio->max_ft = sum;
- return sum;
-}
-
-static void set_max_ft(struct mlx5_flow_namespace *ns)
-{
- struct fs_prio *prio;
-
- if (!ns)
- return;
-
- fs_for_each_prio(prio, ns)
- sum_max_ft_in_prio(prio);
-}
-
-static int init_root_ns(struct mlx5_core_dev *dev)
-{
- int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
- flow_table_properties_nic_receive.
- max_ft_level);
-
- dev->root_ns = create_root_ns(dev, FS_FT_NIC_RX,
- MLX5_CORE_FS_ROOT_NS_NAME);
- if (IS_ERR_OR_NULL(dev->root_ns))
- goto err;
-
-
- if (init_root_tree(dev, max_ft_level, &root_fs, &dev->root_ns->ns.base))
- goto err;
-
- set_max_ft(&dev->root_ns->ns);
-
- return 0;
-err:
- return -ENOMEM;
-}
-
-u8 mlx5_get_match_criteria_enable(struct mlx5_flow_rule *rule)
-{
- struct fs_base *pbase;
- struct mlx5_flow_group *fg;
-
- pbase = rule->base.parent;
- WARN_ON(!pbase);
- pbase = pbase->parent;
- WARN_ON(!pbase);
-
- fs_get_obj(fg, pbase);
- return fg->mask.match_criteria_enable;
-}
-
-void mlx5_get_match_value(u32 *match_value,
- struct mlx5_flow_rule *rule)
-{
- struct fs_base *pbase;
- struct fs_fte *fte;
-
- pbase = rule->base.parent;
- WARN_ON(!pbase);
- fs_get_obj(fte, pbase);
-
- memcpy(match_value, fte->val, sizeof(fte->val));
-}
-
-void mlx5_get_match_criteria(u32 *match_criteria,
- struct mlx5_flow_rule *rule)
-{
- struct fs_base *pbase;
- struct mlx5_flow_group *fg;
-
- pbase = rule->base.parent;
- WARN_ON(!pbase);
- pbase = pbase->parent;
- WARN_ON(!pbase);
-
- fs_get_obj(fg, pbase);
- memcpy(match_criteria, &fg->mask.match_criteria,
- sizeof(fg->mask.match_criteria));
-}
-
-int mlx5_init_fs(struct mlx5_core_dev *dev)
-{
- int err;
-
- if (MLX5_CAP_GEN(dev, nic_flow_table)) {
- err = init_root_ns(dev);
- if (err)
- goto err;
- }
-
- err = init_fdb_root_ns(dev);
- if (err)
- goto err;
-
- err = init_egress_acl_root_ns(dev);
- if (err)
- goto err;
-
- err = init_ingress_acl_root_ns(dev);
- if (err)
- goto err;
-
- err = init_sniffer_tx_root_ns(dev);
- if (err)
- goto err;
-
- err = init_sniffer_rx_root_ns(dev);
- if (err)
- goto err;
-
- err = mlx5_init_fc_stats(dev);
- if (err)
- goto err;
-
- return 0;
-err:
- mlx5_cleanup_fs(dev);
- return err;
-}
-
-struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
- enum mlx5_flow_namespace_type type)
-{
- struct mlx5_flow_root_namespace *root_ns = dev->root_ns;
- int prio;
- static struct fs_prio *fs_prio;
- struct mlx5_flow_namespace *ns;
-
- switch (type) {
- case MLX5_FLOW_NAMESPACE_BYPASS:
- prio = 0;
- break;
- case MLX5_FLOW_NAMESPACE_OFFLOADS:
- prio = 1;
- break;
- case MLX5_FLOW_NAMESPACE_KERNEL:
- prio = 2;
- break;
- case MLX5_FLOW_NAMESPACE_LEFTOVERS:
- prio = 3;
- break;
- case MLX5_FLOW_NAMESPACE_FDB:
- if (dev->fdb_root_ns)
- return &dev->fdb_root_ns->ns;
- else
- return NULL;
- case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
- if (dev->esw_egress_root_ns)
- return &dev->esw_egress_root_ns->ns;
- else
- return NULL;
- case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
- if (dev->esw_ingress_root_ns)
- return &dev->esw_ingress_root_ns->ns;
- else
- return NULL;
- case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
- if (dev->sniffer_rx_root_ns)
- return &dev->sniffer_rx_root_ns->ns;
- else
- return NULL;
- case MLX5_FLOW_NAMESPACE_SNIFFER_TX:
- if (dev->sniffer_tx_root_ns)
- return &dev->sniffer_tx_root_ns->ns;
- else
- return NULL;
- default:
- return NULL;
- }
-
- if (!root_ns)
- return NULL;
-
- fs_prio = find_prio(&root_ns->ns, prio);
- if (!fs_prio)
- return NULL;
-
- ns = list_first_entry(&fs_prio->objs,
- typeof(*ns),
- base.list);
-
- return ns;
-}
-EXPORT_SYMBOL(mlx5_get_flow_namespace);
-
-
-int mlx5_set_rule_private_data(struct mlx5_flow_rule *rule,
- struct mlx5_flow_handler *fs_handler,
- void *client_data)
-{
- struct fs_client_priv_data *priv_data;
-
- mutex_lock(&rule->clients_lock);
- /*Check that hanlder isn't exists in the list already*/
- list_for_each_entry(priv_data, &rule->clients_data, list) {
- if (priv_data->fs_handler == fs_handler) {
- priv_data->client_dst_data = client_data;
- goto unlock;
- }
- }
- priv_data = kzalloc(sizeof(*priv_data), GFP_KERNEL);
- if (!priv_data) {
- mutex_unlock(&rule->clients_lock);
- return -ENOMEM;
- }
-
- priv_data->client_dst_data = client_data;
- priv_data->fs_handler = fs_handler;
- list_add(&priv_data->list, &rule->clients_data);
-
-unlock:
- mutex_unlock(&rule->clients_lock);
-
- return 0;
-}
-
-static int remove_from_clients(struct mlx5_flow_rule *rule,
- bool ctx_changed,
- void *client_data,
- void *context)
-{
- struct fs_client_priv_data *iter_client;
- struct fs_client_priv_data *temp_client;
- struct mlx5_flow_handler *handler = (struct
- mlx5_flow_handler*)context;
-
- mutex_lock(&rule->clients_lock);
- list_for_each_entry_safe(iter_client, temp_client,
- &rule->clients_data, list) {
- if (iter_client->fs_handler == handler) {
- list_del(&iter_client->list);
- kfree(iter_client);
- break;
- }
- }
- mutex_unlock(&rule->clients_lock);
-
- return 0;
-}
-
-struct mlx5_flow_handler *mlx5_register_rule_notifier(struct mlx5_core_dev *dev,
- enum mlx5_flow_namespace_type ns_type,
- rule_event_fn add_cb,
- rule_event_fn del_cb,
- void *context)
-{
- struct mlx5_flow_namespace *ns;
- struct mlx5_flow_handler *handler;
-
- ns = mlx5_get_flow_namespace(dev, ns_type);
- if (!ns)
- return ERR_PTR(-EINVAL);
-
- handler = kzalloc(sizeof(*handler), GFP_KERNEL);
- if (!handler)
- return ERR_PTR(-ENOMEM);
-
- handler->add_dst_cb = add_cb;
- handler->del_dst_cb = del_cb;
- handler->client_context = context;
- handler->ns = ns;
- down_write(&ns->notifiers_rw_sem);
- list_add_tail(&handler->list, &ns->list_notifiers);
- up_write(&ns->notifiers_rw_sem);
-
- return handler;
-}
-
-static void iterate_rules_in_ns(struct mlx5_flow_namespace *ns,
- rule_event_fn add_rule_cb,
- void *context);
-
-void mlx5_unregister_rule_notifier(struct mlx5_flow_handler *handler)
-{
- struct mlx5_flow_namespace *ns = handler->ns;
-
- /*Remove from dst's clients*/
- down_write(&ns->dests_rw_sem);
- down_write(&ns->notifiers_rw_sem);
- iterate_rules_in_ns(ns, remove_from_clients, handler);
- list_del(&handler->list);
- up_write(&ns->notifiers_rw_sem);
- up_write(&ns->dests_rw_sem);
- kfree(handler);
-}
-
-static void iterate_rules_in_ft(struct mlx5_flow_table *ft,
- rule_event_fn add_rule_cb,
- void *context)
-{
- struct mlx5_flow_group *iter_fg;
- struct fs_fte *iter_fte;
- struct mlx5_flow_rule *iter_rule;
- int err = 0;
- bool is_new_rule;
-
- mutex_lock(&ft->base.lock);
- fs_for_each_fg(iter_fg, ft) {
- mutex_lock(&iter_fg->base.lock);
- fs_for_each_fte(iter_fte, iter_fg) {
- mutex_lock(&iter_fte->base.lock);
- is_new_rule = true;
- fs_for_each_dst(iter_rule, iter_fte) {
- fs_get(&iter_rule->base);
- err = add_rule_cb(iter_rule,
- is_new_rule,
- NULL,
- context);
- fs_put_parent_locked(&iter_rule->base);
- if (err)
- break;
- is_new_rule = false;
- }
- mutex_unlock(&iter_fte->base.lock);
- if (err)
- break;
- }
- mutex_unlock(&iter_fg->base.lock);
- if (err)
- break;
- }
- mutex_unlock(&ft->base.lock);
-}
-
-static void iterate_rules_in_prio(struct fs_prio *prio,
- rule_event_fn add_rule_cb,
- void *context)
-{
- struct fs_base *it;
-
- mutex_lock(&prio->base.lock);
- fs_for_each_ns_or_ft(it, prio) {
- if (it->type == FS_TYPE_FLOW_TABLE) {
- struct mlx5_flow_table *ft;
-
- fs_get_obj(ft, it);
- iterate_rules_in_ft(ft, add_rule_cb, context);
- } else {
- struct mlx5_flow_namespace *ns;
-
- fs_get_obj(ns, it);
- iterate_rules_in_ns(ns, add_rule_cb, context);
- }
- }
- mutex_unlock(&prio->base.lock);
-}
-
-static void iterate_rules_in_ns(struct mlx5_flow_namespace *ns,
- rule_event_fn add_rule_cb,
- void *context)
-{
- struct fs_prio *iter_prio;
-
- mutex_lock(&ns->base.lock);
- fs_for_each_prio(iter_prio, ns) {
- iterate_rules_in_prio(iter_prio, add_rule_cb, context);
- }
- mutex_unlock(&ns->base.lock);
-}
-
-void mlx5_flow_iterate_existing_rules(struct mlx5_flow_namespace *ns,
- rule_event_fn add_rule_cb,
- void *context)
-{
- down_write(&ns->dests_rw_sem);
- down_read(&ns->notifiers_rw_sem);
- iterate_rules_in_ns(ns, add_rule_cb, context);
- up_read(&ns->notifiers_rw_sem);
- up_write(&ns->dests_rw_sem);
-}
-
-
-void mlx5_del_flow_rules_list(struct mlx5_flow_rules_list *rules_list)
-{
- struct mlx5_flow_rule_node *iter_node;
- struct mlx5_flow_rule_node *temp_node;
-
- list_for_each_entry_safe(iter_node, temp_node, &rules_list->head, list) {
- list_del(&iter_node->list);
- kfree(iter_node);
- }
-
- kfree(rules_list);
-}
-
-#define ROCEV1_ETHERTYPE 0x8915
-static int set_rocev1_rules(struct list_head *rules_list)
-{
- struct mlx5_flow_rule_node *rocev1_rule;
-
- rocev1_rule = kzalloc(sizeof(*rocev1_rule), GFP_KERNEL);
- if (!rocev1_rule)
- return -ENOMEM;
-
- rocev1_rule->match_criteria_enable =
- 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
- MLX5_SET(fte_match_set_lyr_2_4, rocev1_rule->match_criteria, ethertype,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, rocev1_rule->match_value, ethertype,
- ROCEV1_ETHERTYPE);
-
- list_add_tail(&rocev1_rule->list, rules_list);
-
- return 0;
-}
-
-#define ROCEV2_UDP_PORT 4791
-static int set_rocev2_rules(struct list_head *rules_list)
-{
- struct mlx5_flow_rule_node *ipv4_rule;
- struct mlx5_flow_rule_node *ipv6_rule;
-
- ipv4_rule = kzalloc(sizeof(*ipv4_rule), GFP_KERNEL);
- if (!ipv4_rule)
- return -ENOMEM;
-
- ipv6_rule = kzalloc(sizeof(*ipv6_rule), GFP_KERNEL);
- if (!ipv6_rule) {
- kfree(ipv4_rule);
- return -ENOMEM;
- }
-
- ipv4_rule->match_criteria_enable =
- 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
- MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, ethertype,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, ethertype,
- 0x0800);
- MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, ip_protocol,
- 0xff);
- MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, ip_protocol,
- IPPROTO_UDP);
- MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_criteria, udp_dport,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, ipv4_rule->match_value, udp_dport,
- ROCEV2_UDP_PORT);
-
- ipv6_rule->match_criteria_enable =
- 1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS;
- MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, ethertype,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, ethertype,
- 0x86dd);
- MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, ip_protocol,
- 0xff);
- MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, ip_protocol,
- IPPROTO_UDP);
- MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_criteria, udp_dport,
- 0xffff);
- MLX5_SET(fte_match_set_lyr_2_4, ipv6_rule->match_value, udp_dport,
- ROCEV2_UDP_PORT);
-
- list_add_tail(&ipv4_rule->list, rules_list);
- list_add_tail(&ipv6_rule->list, rules_list);
-
- return 0;
-}
-
-
-struct mlx5_flow_rules_list *get_roce_flow_rules(u8 roce_mode)
-{
- int err = 0;
- struct mlx5_flow_rules_list *rules_list =
- kzalloc(sizeof(*rules_list), GFP_KERNEL);
-
- if (!rules_list)
- return NULL;
-
- INIT_LIST_HEAD(&rules_list->head);
-
- if (roce_mode & MLX5_ROCE_VERSION_1_CAP) {
- err = set_rocev1_rules(&rules_list->head);
- if (err)
- goto free_list;
- }
- if (roce_mode & MLX5_ROCE_VERSION_2_CAP)
- err = set_rocev2_rules(&rules_list->head);
- if (err)
- goto free_list;
-
- return rules_list;
-
-free_list:
- mlx5_del_flow_rules_list(rules_list);
- return NULL;
-}
-
-struct mlx5_modify_hdr *mlx5_modify_header_alloc(struct mlx5_core_dev *dev,
- enum mlx5_flow_namespace_type ns_type,
- u8 num_actions,
- void *modify_actions)
-{
- struct mlx5_modify_hdr *modify_hdr;
- int err;
-
- modify_hdr = kzalloc(sizeof(*modify_hdr), GFP_KERNEL);
- if (!modify_hdr)
- return ERR_PTR(-ENOMEM);
-
- modify_hdr->ns_type = ns_type;
- err = mlx5_cmd_modify_header_alloc(dev, ns_type, num_actions,
- modify_actions, modify_hdr);
- if (err) {
- kfree(modify_hdr);
- return ERR_PTR(err);
- }
-
- return modify_hdr;
-}
-EXPORT_SYMBOL(mlx5_modify_header_alloc);
-
-void mlx5_modify_header_dealloc(struct mlx5_core_dev *dev,
- struct mlx5_modify_hdr *modify_hdr)
-{
- mlx5_cmd_modify_header_dealloc(dev, modify_hdr);
- kfree(modify_hdr);
-}
-EXPORT_SYMBOL(mlx5_modify_header_dealloc);
-
-struct mlx5_pkt_reformat *mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,
- struct mlx5_pkt_reformat_params *params,
- enum mlx5_flow_namespace_type ns_type)
-{
- struct mlx5_pkt_reformat *pkt_reformat;
- int err;
-
- pkt_reformat = kzalloc(sizeof(*pkt_reformat), GFP_KERNEL);
- if (!pkt_reformat)
- return ERR_PTR(-ENOMEM);
-
- pkt_reformat->ns_type = ns_type;
- pkt_reformat->reformat_type = params->type;
- err = mlx5_cmd_packet_reformat_alloc(dev, params, ns_type,
- pkt_reformat);
- if (err) {
- kfree(pkt_reformat);
- return ERR_PTR(err);
- }
-
- return pkt_reformat;
-}
-EXPORT_SYMBOL(mlx5_packet_reformat_alloc);
-
-void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
- struct mlx5_pkt_reformat *pkt_reformat)
-{
- mlx5_cmd_packet_reformat_dealloc(dev, pkt_reformat);
- kfree(pkt_reformat);
-}
-EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
-
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_fw.c b/sys/dev/mlx5/mlx5_core/mlx5_fw.c
index 1a4956b09d32..233bd4a38c91 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_fw.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_fw.c
@@ -240,6 +240,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, ipsec_offload)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_IPSEC);
+ if (err)
+ return err;
+ }
+
err = mlx5_core_query_special_contexts(dev);
if (err)
return err;
@@ -300,7 +306,7 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
#define MLX5_FAST_TEARDOWN_WAIT_MS 3000
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
{
- int end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
+ unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {};
u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {};
int state;
@@ -337,7 +343,7 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
} while (!time_after(jiffies, end));
if (mlx5_get_nic_state(dev) != MLX5_NIC_IFC_DISABLED) {
- mlx5_core_err(dev, "NIC IFC still %d after %ums.\n",
+ mlx5_core_err(dev, "NIC IFC still %d after %lums.\n",
mlx5_get_nic_state(dev), delay_ms);
return -EIO;
}
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_health.c b/sys/dev/mlx5/mlx5_core/mlx5_health.c
index f4049d23d75d..bedd51eb02e4 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_health.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_health.c
@@ -265,7 +265,8 @@ mlx5_health_allow_reset(struct mlx5_core_dev *dev)
#define MLX5_NIC_STATE_POLL_MS 5
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force)
{
- int end, delay_ms = MLX5_CRDUMP_WAIT_MS;
+ unsigned long end;
+ int delay_ms = MLX5_CRDUMP_WAIT_MS;
u32 fatal_error;
int lock = -EBUSY;
@@ -445,7 +446,7 @@ static void health_care(struct work_struct *work)
spin_unlock_irqrestore(&health->wq_lock, flags);
}
-static int get_next_poll_jiffies(void)
+static unsigned long get_next_poll_jiffies(void)
{
unsigned long next;
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_main.c b/sys/dev/mlx5/mlx5_core/mlx5_main.c
index 6b9b63a24714..221781327b51 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_main.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_main.c
@@ -26,6 +26,7 @@
#include "opt_rss.h"
#include "opt_ratelimit.h"
+#include "opt_ipsec.h"
#include <linux/kmod.h>
#include <linux/module.h>
@@ -52,8 +53,12 @@
#include <dev/mlx5/mlx5_core/diag_cnt.h>
#ifdef PCI_IOV
#include <sys/nv.h>
+#include <sys/socket.h>
#include <dev/pci/pci_iov.h>
#include <sys/iov_schema.h>
+#include <sys/iov.h>
+#include <net/if.h>
+#include <net/if_vlan_var.h>
#endif
static const char mlx5_version[] = "Mellanox Core driver "
@@ -63,6 +68,9 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_DEPEND(mlx5, linuxkpi, 1, 1, 1);
MODULE_DEPEND(mlx5, mlxfw, 1, 1, 1);
MODULE_DEPEND(mlx5, firmware, 1, 1, 1);
+#ifdef IPSEC_OFFLOAD
+MODULE_DEPEND(mlx5, ipsec, 1, 1, 1);
+#endif
MODULE_VERSION(mlx5, 1);
SYSCTL_NODE(_hw, OID_AUTO, mlx5, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
@@ -225,6 +233,7 @@ static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
#ifdef PCI_IOV
static const char iov_mac_addr_name[] = "mac-addr";
+static const char iov_vlan_name[] = "vlan";
static const char iov_node_guid_name[] = "node-guid";
static const char iov_port_guid_name[] = "port-guid";
#endif
@@ -752,8 +761,8 @@ static inline int fw_initializing(struct mlx5_core_dev *dev)
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
u32 warn_time_mili)
{
- int warn = jiffies + msecs_to_jiffies(warn_time_mili);
- int end = jiffies + msecs_to_jiffies(max_wait_mili);
+ unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
+ unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
int err = 0;
MPASS(max_wait_mili > warn_time_mili);
@@ -765,8 +774,8 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
}
if (warn_time_mili && time_after(jiffies, warn)) {
mlx5_core_warn(dev,
- "Waiting for FW initialization, timeout abort in %u s\n",
- (unsigned)(jiffies_to_msecs(end - warn) / 1000));
+ "Waiting for FW initialization, timeout abort in %lu s\n",
+ (unsigned long)(jiffies_to_msecs(end - warn) / 1000));
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
msleep(FW_INIT_WAIT_MS);
@@ -1204,7 +1213,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
goto err_stop_eqs;
}
- err = mlx5_init_fs(dev);
+ err = mlx5_fs_core_init(dev);
if (err) {
mlx5_core_err(dev, "flow steering init %d\n", err);
goto err_free_comp_eqs;
@@ -1322,7 +1331,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_diag_cnt_cleanup(dev);
mlx5_fpga_device_stop(dev);
mlx5_mpfs_destroy(dev);
- mlx5_cleanup_fs(dev);
+ mlx5_fs_core_cleanup(dev);
mlx5_wait_for_reclaim_vfs_pages(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
@@ -1689,10 +1698,16 @@ static int init_one(struct pci_dev *pdev,
mlx5_pagealloc_init(dev);
+ err = mlx5_fs_core_alloc(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed to alloc flow steering\n");
+ goto clean_health;
+ }
+
err = mlx5_load_one(dev, priv, true);
if (err) {
mlx5_core_err(dev, "mlx5_load_one failed %d\n", err);
- goto clean_health;
+ goto clean_fs;
}
mlx5_fwdump_prep(dev);
@@ -1714,6 +1729,8 @@ static int init_one(struct pci_dev *pdev,
vf_schema = pci_iov_schema_alloc_node();
pci_iov_schema_add_unicast_mac(vf_schema,
iov_mac_addr_name, 0, NULL);
+ pci_iov_schema_add_vlan(vf_schema,
+ iov_vlan_name, 0, 0);
pci_iov_schema_add_uint64(vf_schema, iov_node_guid_name,
0, 0);
pci_iov_schema_add_uint64(vf_schema, iov_port_guid_name,
@@ -1736,6 +1753,8 @@ static int init_one(struct pci_dev *pdev,
pci_save_state(pdev);
return 0;
+clean_fs:
+ mlx5_fs_core_free(dev);
clean_health:
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
@@ -1767,6 +1786,7 @@ static void remove_one(struct pci_dev *pdev)
(long long)(dev->priv.fw_pages * MLX5_ADAPTER_PAGE_SIZE));
}
+ mlx5_fs_core_free(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_health_cleanup(dev);
mlx5_fwdump_clean(dev);
@@ -1950,6 +1970,25 @@ mlx5_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config)
}
}
+ if (nvlist_exists_number(vf_config, iov_vlan_name)) {
+ uint16_t vlan = nvlist_get_number(vf_config, iov_vlan_name);
+
+ if (vlan == DOT1Q_VID_NULL)
+ error = ENOTSUP;
+ else {
+ if (vlan == VF_VLAN_TRUNK)
+ vlan = DOT1Q_VID_NULL;
+
+ error = -mlx5_eswitch_set_vport_vlan(priv->eswitch,
+ vfnum + 1, vlan, 0);
+ }
+ if (error != 0) {
+ mlx5_core_err(core_dev,
+ "setting VLAN for VF %d failed, error %d\n",
+ vfnum + 1, error);
+ }
+ }
+
if (nvlist_exists_number(vf_config, iov_node_guid_name)) {
node_guid = nvlist_get_number(vf_config, iov_node_guid_name);
error = -mlx5_modify_nic_vport_node_guid(core_dev, vfnum + 1,
@@ -2103,7 +2142,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0xa2d3) }, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
- { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */
+ { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 Family integrated network controller */
{ }
};
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c b/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
index 6207442e756f..b1798f909ee5 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_pagealloc.c
@@ -519,7 +519,7 @@ enum {
s64 mlx5_wait_for_reclaim_vfs_pages(struct mlx5_core_dev *dev)
{
- int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+ unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
s64 prevpages = 0;
s64 npages = 0;
@@ -557,7 +557,7 @@ static int optimal_reclaimed_pages(void)
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
{
- int end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+ unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
struct mlx5_fw_page *fwp;
struct rb_node *p;
int nclaimed = 0;
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_tls.c b/sys/dev/mlx5/mlx5_core/mlx5_tls.c
index b3a49c603fed..3ed209e2028d 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_tls.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_tls.c
@@ -33,66 +33,6 @@
#include <dev/mlx5/mlx5_core/mlx5_core.h>
#include <dev/mlx5/mlx5_core/transobj.h>
-int mlx5_encryption_key_create(struct mlx5_core_dev *mdev, u32 pdn,
- const void *p_key, u32 key_len, u32 *p_obj_id)
-{
- u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
- u32 out[MLX5_ST_SZ_DW(create_encryption_key_out)] = {};
- u64 general_obj_types;
- int err;
-
- general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
- if (!(general_obj_types & MLX5_HCA_CAP_GENERAL_OBJ_TYPES_ENCRYPTION_KEY))
- return -EINVAL;
-
- switch (key_len) {
- case 128 / 8:
- memcpy(MLX5_ADDR_OF(create_encryption_key_in, in,
- encryption_key_object.key[4]), p_key, 128 / 8);
- MLX5_SET(create_encryption_key_in, in, encryption_key_object.pd, pdn);
- MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_size,
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128);
- MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_type,
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK);
- break;
- case 256 / 8:
- memcpy(MLX5_ADDR_OF(create_encryption_key_in, in,
- encryption_key_object.key[0]), p_key, 256 / 8);
- MLX5_SET(create_encryption_key_in, in, encryption_key_object.pd, pdn);
- MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_size,
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256);
- MLX5_SET(create_encryption_key_in, in, encryption_key_object.key_type,
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK);
- break;
- default:
- return -EINVAL;
- }
-
- MLX5_SET(create_encryption_key_in, in, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJ);
- MLX5_SET(create_encryption_key_in, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
-
- err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
- if (err == 0)
- *p_obj_id = MLX5_GET(create_encryption_key_out, out, obj_id);
-
- /* avoid leaking key on the stack */
- memset(in, 0, sizeof(in));
-
- return err;
-}
-
-int mlx5_encryption_key_destroy(struct mlx5_core_dev *mdev, u32 oid)
-{
- u32 in[MLX5_ST_SZ_DW(destroy_encryption_key_in)] = {};
- u32 out[MLX5_ST_SZ_DW(destroy_encryption_key_out)] = {};
-
- MLX5_SET(destroy_encryption_key_in, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJ);
- MLX5_SET(destroy_encryption_key_in, in, obj_type, MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
- MLX5_SET(destroy_encryption_key_in, in, obj_id, oid);
-
- return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
int mlx5_tls_open_tis(struct mlx5_core_dev *mdev, int tc, int tdn, int pdn, u32 *p_tisn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
diff --git a/sys/dev/mlx5/mlx5_core/mlx5_transobj.c b/sys/dev/mlx5/mlx5_core/mlx5_transobj.c
index 6d375d371597..c62969d8d172 100644
--- a/sys/dev/mlx5/mlx5_core/mlx5_transobj.c
+++ b/sys/dev/mlx5/mlx5_core/mlx5_transobj.c
@@ -166,6 +166,18 @@ int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
}
+
+int
+mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {0};
+ int err;
+
+ MLX5_SET(modify_tir_in, in, opcode, MLX5_CMD_OP_MODIFY_TIR);
+ err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+ return (err);
+}
+
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u32 uid)
{
u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {0};
diff --git a/sys/dev/mlx5/mlx5_core/transobj.h b/sys/dev/mlx5/mlx5_core/transobj.h
index 1cc40ca8b1b7..6a21d7db90c8 100644
--- a/sys/dev/mlx5/mlx5_core/transobj.h
+++ b/sys/dev/mlx5/mlx5_core/transobj.h
@@ -40,6 +40,7 @@ void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tirn);
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 *in, int inlen);
void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn, u32 uid);
int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
u32 *tisn);
diff --git a/sys/dev/mlx5/mlx5_core/wq.h b/sys/dev/mlx5/mlx5_core/wq.h
index c996eca41114..2e1d6a6fcff0 100644
--- a/sys/dev/mlx5/mlx5_core/wq.h
+++ b/sys/dev/mlx5/mlx5_core/wq.h
@@ -27,6 +27,7 @@
#define __MLX5_WQ_H__
#include <dev/mlx5/mlx5_ifc.h>
+#include <dev/mlx5/cq.h>
struct mlx5_wq_param {
int linear;
@@ -136,6 +137,22 @@ static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
*wq->db = cpu_to_be32(wq->cc & 0xffffff);
}
+static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq)
+{
+ u32 ci = mlx5_cqwq_get_ci(wq);
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
+ u8 cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
+ u8 sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
+
+ if (cqe_ownership_bit != sw_ownership_val)
+ return NULL;
+
+ /* ensure cqe content is read after cqe ownership bit */
+ atomic_thread_fence_acq();
+
+ return cqe;
+}
+
static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
{
return wq->cur_sz == wq->sz_m1;
diff --git a/sys/dev/mlx5/mlx5_en/en.h b/sys/dev/mlx5/mlx5_en/en.h
index 502c1c19af6f..f59902be226a 100644
--- a/sys/dev/mlx5/mlx5_en/en.h
+++ b/sys/dev/mlx5/mlx5_en/en.h
@@ -89,14 +89,13 @@
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xe
-#define MLX5E_MAX_BUSDMA_RX_SEGS 15
+#define MLX5E_MAX_BUSDMA_RX_SEGS 31
#ifndef MLX5E_MAX_RX_BYTES
#define MLX5E_MAX_RX_BYTES MCLBYTES
#endif
-#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ \
- MIN(65535, 7 * MLX5E_MAX_RX_BYTES)
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ 65535
#define MLX5E_DIM_DEFAULT_PROFILE 3
#define MLX5E_DIM_MAX_RX_CQ_MODERATION_PKTS_WITH_LRO 16
@@ -747,10 +746,13 @@ struct mlx5e_cq {
struct mlx5_wq_ctrl wq_ctrl;
} __aligned(MLX5E_CACHELINE_SIZE);
+struct ipsec_accel_in_tag;
+
struct mlx5e_rq_mbuf {
bus_dmamap_t dma_map;
caddr_t data;
struct mbuf *mbuf;
+ struct ipsec_accel_in_tag *ipsec_mtag;
};
struct mlx5e_rq {
@@ -770,6 +772,7 @@ struct mlx5e_rq {
struct mlx5e_cq cq;
struct lro_ctrl lro;
volatile int enabled;
+ int processing;
int ix;
/* Dynamic Interrupt Moderation */
@@ -956,7 +959,7 @@ struct mlx5_flow_rule;
struct mlx5e_eth_addr_info {
u8 addr [ETH_ALEN + 2];
/* flow table rule per traffic type */
- struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT];
+ struct mlx5_flow_handle *ft_rule[MLX5E_NUM_TT];
};
#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
@@ -992,10 +995,10 @@ enum {
struct mlx5e_vlan_db {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct mlx5_flow_rule *active_vlans_ft_rule[VLAN_N_VID];
- struct mlx5_flow_rule *untagged_ft_rule;
- struct mlx5_flow_rule *any_cvlan_ft_rule;
- struct mlx5_flow_rule *any_svlan_ft_rule;
+ struct mlx5_flow_handle *active_vlans_ft_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *untagged_ft_rule;
+ struct mlx5_flow_handle *any_cvlan_ft_rule;
+ struct mlx5_flow_handle *any_svlan_ft_rule;
bool filter_disabled;
};
@@ -1004,7 +1007,7 @@ struct mlx5e_vxlan_db_el {
u_int proto;
u_int port;
bool installed;
- struct mlx5_flow_rule *vxlan_ft_rule;
+ struct mlx5_flow_handle *vxlan_ft_rule;
TAILQ_ENTRY(mlx5e_vxlan_db_el) link;
};
@@ -1027,19 +1030,20 @@ enum accel_fs_tcp_type {
struct mlx5e_accel_fs_tcp {
struct mlx5_flow_namespace *ns;
struct mlx5e_flow_table tables[MLX5E_ACCEL_FS_TCP_NUM_TYPES];
- struct mlx5_flow_rule *default_rules[MLX5E_ACCEL_FS_TCP_NUM_TYPES];
+ struct mlx5_flow_handle *default_rules[MLX5E_ACCEL_FS_TCP_NUM_TYPES];
};
struct mlx5e_flow_tables {
struct mlx5_flow_namespace *ns;
struct mlx5e_flow_table vlan;
struct mlx5e_flow_table vxlan;
- struct mlx5_flow_rule *vxlan_catchall_ft_rule;
+ struct mlx5_flow_handle *vxlan_catchall_ft_rule;
struct mlx5e_flow_table main;
struct mlx5e_flow_table main_vxlan;
- struct mlx5_flow_rule *main_vxlan_rule[MLX5E_NUM_TT];
+ struct mlx5_flow_handle *main_vxlan_rule[MLX5E_NUM_TT];
struct mlx5e_flow_table inner_rss;
struct mlx5e_accel_fs_tcp accel_tcp;
+ struct mlx5_flow_table *ipsec_ft;
};
struct mlx5e_xmit_args {
@@ -1067,6 +1071,7 @@ struct mlx5e_dcbx {
u32 xoff;
};
+struct mlx5e_ipsec;
struct mlx5e_priv {
struct mlx5_core_dev *mdev; /* must be first */
@@ -1145,6 +1150,7 @@ struct mlx5e_priv {
bool sw_is_port_buf_owner;
struct pfil_head *pfil;
+ struct mlx5e_ipsec *ipsec;
struct mlx5e_channel channel[];
};
@@ -1295,6 +1301,7 @@ void mlx5e_refresh_sq_inline(struct mlx5e_priv *priv);
int mlx5e_update_buf_lossy(struct mlx5e_priv *priv);
int mlx5e_fec_update(struct mlx5e_priv *priv);
int mlx5e_hw_temperature_update(struct mlx5e_priv *priv);
+int mlx5e_hw_lro_update_tirs(struct mlx5e_priv *priv);
/* Internal Queue, IQ, API functions */
void mlx5e_iq_send_nop(struct mlx5e_iq *, u32);
diff --git a/sys/dev/mlx5/mlx5_en/en_hw_tls.h b/sys/dev/mlx5/mlx5_en/en_hw_tls.h
index 2018198e5e52..d637314e040e 100644
--- a/sys/dev/mlx5/mlx5_en/en_hw_tls.h
+++ b/sys/dev/mlx5/mlx5_en/en_hw_tls.h
@@ -84,7 +84,7 @@ struct mlx5e_tls {
struct workqueue_struct *wq;
uma_zone_t zone;
uint32_t max_resources; /* max number of resources */
- volatile uint32_t num_resources; /* current number of resources */
+ int zone_max;
int init; /* set when ready */
char zname[32];
};
diff --git a/sys/dev/mlx5/mlx5_en/en_hw_tls_rx.h b/sys/dev/mlx5/mlx5_en/en_hw_tls_rx.h
index b824ca686e2c..e185aceb4b26 100644
--- a/sys/dev/mlx5/mlx5_en/en_hw_tls_rx.h
+++ b/sys/dev/mlx5/mlx5_en/en_hw_tls_rx.h
@@ -61,7 +61,7 @@ struct mlx5e_tls_rx_tag {
uint32_t tirn; /* HW TIR context number */
uint32_t dek_index; /* HW TLS context number */
struct mlx5e_tls_rx *tls_rx; /* parent pointer */
- struct mlx5_flow_rule *flow_rule;
+ struct mlx5_flow_handle *flow_rule;
struct mtx mtx;
struct completion progress_complete;
uint32_t state; /* see MLX5E_TLS_RX_ST_XXX */
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c b/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
index 0dcca1077a60..28401048a427 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_ethtool.c
@@ -1124,16 +1124,8 @@ mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
/* import HW LRO mode */
if (priv->params_ethtool.hw_lro != 0 &&
MLX5_CAP_ETH(priv->mdev, lro_cap)) {
+ priv->params.hw_lro_en = true;
priv->params_ethtool.hw_lro = 1;
- /* check if feature should actually be enabled */
- if (if_getcapenable(priv->ifp) & IFCAP_LRO) {
- priv->params.hw_lro_en = true;
- } else {
- priv->params.hw_lro_en = false;
-
- mlx5_en_warn(priv->ifp, "To enable HW LRO "
- "please also enable LRO via ifconfig(8).\n");
- }
} else {
/* return an error if HW does not support this feature */
if (priv->params_ethtool.hw_lro != 0)
@@ -1141,7 +1133,13 @@ mlx5e_ethtool_handler(SYSCTL_HANDLER_ARGS)
priv->params.hw_lro_en = false;
priv->params_ethtool.hw_lro = 0;
}
- /* restart network interface, if any */
+
+ error = mlx5e_hw_lro_update_tirs(priv);
+
+ /*
+ * Restart network interface, if any. This
+ * re-populates rx wqes with proper segment sizes.
+ */
if (was_opened)
mlx5e_open_locked(priv->ifp);
break;
@@ -1479,12 +1477,6 @@ mlx5e_create_ethtool(struct mlx5e_priv *priv)
mlx5e_params_desc[2 * x], CTLTYPE_U64 | CTLFLAG_RD |
CTLFLAG_MPSAFE, priv, x, &mlx5e_ethtool_handler, "QU",
mlx5e_params_desc[2 * x + 1]);
- } else if (strcmp(mlx5e_params_desc[2 * x], "hw_lro") == 0) {
- /* read-only, but tunable parameters */
- SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(node), OID_AUTO,
- mlx5e_params_desc[2 * x], CTLTYPE_U64 | CTLFLAG_RDTUN |
- CTLFLAG_MPSAFE, priv, x, &mlx5e_ethtool_handler, "QU",
- mlx5e_params_desc[2 * x + 1]);
} else {
/*
* NOTE: In FreeBSD-11 and newer the
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c b/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
index 4939deb4c122..c45f02cdaf42 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_flow_table.c
@@ -27,6 +27,7 @@
#include "opt_ratelimit.h"
#include <dev/mlx5/mlx5_en/en.h>
+#include <dev/mlx5/mlx5_accel/ipsec.h>
#include <linux/list.h>
#include <dev/mlx5/fs.h>
@@ -143,17 +144,17 @@ static void
mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
struct mlx5e_eth_addr_info *ai)
{
- mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
- mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
- mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
- mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
- mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_TCP]);
- mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_TCP]);
- mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6_UDP]);
- mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4_UDP]);
- mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV6]);
- mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_IPV4]);
- mlx5_del_flow_rule(&ai->ft_rule[MLX5E_TT_ANY]);
+ mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
+ mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
+ mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
+ mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
+ mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_TCP]);
+ mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_TCP]);
+ mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6_UDP]);
+ mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4_UDP]);
+ mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV6]);
+ mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_IPV4]);
+ mlx5_del_flow_rules(&ai->ft_rule[MLX5E_TT_ANY]);
}
static int
@@ -248,24 +249,30 @@ mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
static int
mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
struct mlx5e_eth_addr_info *ai, int type,
- u32 *mc, u32 *mv)
+ struct mlx5_flow_spec *spec)
{
struct mlx5_flow_destination dest = {};
u8 mc_enable = 0;
- struct mlx5_flow_rule **rule_p;
+ struct mlx5_flow_handle **rule_p;
struct mlx5_flow_table *ft = priv->fts.main.t;
- u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
+ u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dmac_47_16);
- u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
+ u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dmac_47_16);
u32 *tirn = priv->tirn;
u32 tt_vec;
int err = 0;
struct mlx5_flow_act flow_act = {
- .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
- .flow_tag = MLX5_FS_ETH_FLOW_TAG,
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
};
+ u8 *mc;
+ u8 *mv;
+ mv = (u8 *)spec->match_value;
+ mc = (u8 *)spec->match_criteria;
+
+ spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
+ spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
switch (type) {
@@ -289,12 +296,11 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
tt_vec = mlx5e_get_tt_vec(ai, type);
+ spec->match_criteria_enable = mc_enable;
if (tt_vec & BIT(MLX5E_TT_ANY)) {
rule_p = &ai->ft_rule[MLX5E_TT_ANY];
dest.tir_num = tirn[MLX5E_TT_ANY];
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@@ -302,14 +308,13 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
mc_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ spec->match_criteria_enable = mc_enable;
if (tt_vec & BIT(MLX5E_TT_IPV4)) {
rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
dest.tir_num = tirn[MLX5E_TT_IPV4];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@@ -319,9 +324,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV6];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@@ -334,9 +337,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@@ -346,9 +347,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@@ -360,9 +359,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@@ -372,9 +369,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@@ -386,9 +381,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@@ -398,9 +391,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@@ -412,9 +403,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IP);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@@ -424,9 +413,7 @@ mlx5e_add_eth_addr_rule_sub(struct mlx5e_priv *priv,
dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
ETHERTYPE_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
}
@@ -445,23 +432,19 @@ static int
mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
struct mlx5e_eth_addr_info *ai, int type)
{
- u32 *match_criteria;
- u32 *match_value;
+ struct mlx5_flow_spec *spec;
int err = 0;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
mlx5_en_err(priv->ifp, "alloc failed\n");
err = -ENOMEM;
goto add_eth_addr_rule_out;
}
- err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, match_criteria,
- match_value);
+ err = mlx5e_add_eth_addr_rule_sub(priv, ai, type, spec);
add_eth_addr_rule_out:
- kvfree(match_criteria);
- kvfree(match_value);
+ kvfree(spec);
return (err);
}
@@ -469,51 +452,56 @@ add_eth_addr_rule_out:
static void
mlx5e_del_main_vxlan_rules(struct mlx5e_priv *priv)
{
- mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
- mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
- mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH]);
- mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH]);
- mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP]);
- mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP]);
- mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP]);
- mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP]);
- mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6]);
- mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4]);
- mlx5_del_flow_rule(&priv->fts.main_vxlan_rule[MLX5E_TT_ANY]);
+ mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
+ mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
+ mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH]);
+ mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH]);
+ mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP]);
+ mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP]);
+ mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP]);
+ mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP]);
+ mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV6]);
+ mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_IPV4]);
+ mlx5_del_flow_rules(&priv->fts.main_vxlan_rule[MLX5E_TT_ANY]);
}
static int
-mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
+mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec)
{
struct mlx5_flow_destination dest = {};
- u8 mc_enable = 0;
- struct mlx5_flow_rule **rule_p;
+ struct mlx5_flow_handle **rule_p;
struct mlx5_flow_table *ft = priv->fts.main_vxlan.t;
u32 *tirn = priv->tirn_inner_vxlan;
struct mlx5_flow_act flow_act = {
- .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
- .flow_tag = MLX5_FS_ETH_FLOW_TAG,
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
};
int err = 0;
+ u8 *mc;
+ u8 *mv;
+
+ spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
+ spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
+
+ mc = (u8 *)spec->match_criteria;
+ mv = (u8 *)spec->match_value;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
- mc_enable = MLX5_MATCH_INNER_HEADERS;
+ spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ethertype);
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4];
dest.tir_num = tirn[MLX5E_TT_IPV4];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6];
dest.tir_num = tirn[MLX5E_TT_IPV6];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@@ -523,16 +511,14 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_UDP];
dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_UDP];
dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@@ -541,16 +527,14 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_TCP];
dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_TCP];
dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@@ -559,16 +543,14 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_AH];
dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV6_IPSEC_AH];
dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@@ -577,8 +559,7 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_IPV4_IPSEC_ESP];
dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype, ETHERTYPE_IP);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@@ -586,18 +567,16 @@ mlx5e_add_main_vxlan_rules_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
MLX5_SET(fte_match_param, mv, inner_headers.ethertype,
ETHERTYPE_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
- mc_enable = 0;
+ spec->match_criteria_enable = 0;
memset(mv, 0, MLX5_ST_SZ_BYTES(fte_match_param));
memset(mc, 0, MLX5_ST_SZ_BYTES(fte_match_param));
rule_p = &priv->fts.main_vxlan_rule[MLX5E_TT_ANY];
dest.tir_num = tirn[MLX5E_TT_ANY];
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, NULL, &flow_act, &dest, 1);
if (IS_ERR_OR_NULL(*rule_p))
goto err_del_ai;
@@ -614,22 +593,19 @@ err_del_ai:
static int
mlx5e_add_main_vxlan_rules(struct mlx5e_priv *priv)
{
- u32 *match_criteria;
- u32 *match_value;
+ struct mlx5_flow_spec *spec;
int err = 0;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (match_value == NULL || match_criteria == NULL) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
mlx5_en_err(priv->ifp, "alloc failed\n");
err = -ENOMEM;
goto add_main_vxlan_rules_out;
}
- err = mlx5e_add_main_vxlan_rules_sub(priv, match_criteria, match_value);
+ err = mlx5e_add_main_vxlan_rules_sub(priv, spec);
add_main_vxlan_rules_out:
- kvfree(match_criteria);
- kvfree(match_value);
+ kvfree(spec);
return (err);
}
@@ -687,22 +663,27 @@ enum mlx5e_vlan_rule_type {
static int
mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type, u16 vid,
- u32 *mc, u32 *mv)
+ struct mlx5_flow_spec *spec)
{
struct mlx5_flow_table *ft = priv->fts.vlan.t;
struct mlx5_flow_destination dest = {};
- u8 mc_enable = 0;
- struct mlx5_flow_rule **rule_p;
+ struct mlx5_flow_handle **rule_p;
int err = 0;
struct mlx5_flow_act flow_act = {
- .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
- .flow_tag = MLX5_FS_ETH_FLOW_TAG,
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
};
+ u8 *mv;
+ u8 *mc;
+ mv = (u8 *)spec->match_value;
+ mc = (u8 *)spec->match_criteria;
+
+ spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
+ spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fts.vxlan.t;
- mc_enable = MLX5_MATCH_OUTER_HEADERS;
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
@@ -729,11 +710,7 @@ mlx5e_add_vlan_rule_sub(struct mlx5e_priv *priv,
break;
}
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST,
- &flow_act,
- &dest);
-
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
@@ -747,24 +724,20 @@ static int
mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
- u32 *match_criteria;
- u32 *match_value;
+ struct mlx5_flow_spec *spec;
int err = 0;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
mlx5_en_err(priv->ifp, "alloc failed\n");
err = -ENOMEM;
goto add_vlan_rule_out;
}
- err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, match_criteria,
- match_value);
+ err = mlx5e_add_vlan_rule_sub(priv, rule_type, vid, spec);
add_vlan_rule_out:
- kvfree(match_criteria);
- kvfree(match_value);
+ kvfree(spec);
return (err);
}
@@ -775,16 +748,16 @@ mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
{
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- mlx5_del_flow_rule(&priv->vlan.untagged_ft_rule);
+ mlx5_del_flow_rules(&priv->vlan.untagged_ft_rule);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
- mlx5_del_flow_rule(&priv->vlan.any_cvlan_ft_rule);
+ mlx5_del_flow_rules(&priv->vlan.any_cvlan_ft_rule);
break;
case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
- mlx5_del_flow_rule(&priv->vlan.any_svlan_ft_rule);
+ mlx5_del_flow_rules(&priv->vlan.any_svlan_ft_rule);
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
- mlx5_del_flow_rule(&priv->vlan.active_vlans_ft_rule[vid]);
+ mlx5_del_flow_rules(&priv->vlan.active_vlans_ft_rule[vid]);
mlx5e_vport_context_update_vlans(priv);
break;
default:
@@ -1518,11 +1491,16 @@ mlx5e_create_main_flow_table(struct mlx5e_priv *priv, bool inner_vxlan)
{
struct mlx5e_flow_table *ft = inner_vxlan ? &priv->fts.main_vxlan :
&priv->fts.main;
+ struct mlx5_flow_table_attr ft_attr = {};
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fts.ns, 0,
- inner_vxlan ? "vxlan_main" : "main", MLX5E_MAIN_TABLE_SIZE);
+ ft_attr.max_fte = MLX5E_MAIN_TABLE_SIZE;
+ if (priv->ipsec)
+ ft_attr.level = inner_vxlan ? 10 : 12;
+ else
+ ft_attr.level = inner_vxlan ? 2 : 4;
+ ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@@ -1643,11 +1621,13 @@ static int
mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
{
struct mlx5e_flow_table *ft = &priv->fts.vlan;
+ struct mlx5_flow_table_attr ft_attr = {};
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vlan",
- MLX5E_VLAN_TABLE_SIZE);
+ ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE;
+ ft_attr.level = (priv->ipsec) ? 9 : 0;
+ ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@@ -1683,23 +1663,29 @@ mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
}
static int
-mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv,
+mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
struct mlx5e_vxlan_db_el *el)
{
struct mlx5_flow_table *ft = priv->fts.vxlan.t;
struct mlx5_flow_destination dest = {};
- u8 mc_enable;
- struct mlx5_flow_rule **rule_p;
+ struct mlx5_flow_handle **rule_p;
int err = 0;
struct mlx5_flow_act flow_act = {
- .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
- .flow_tag = MLX5_FS_ETH_FLOW_TAG,
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
};
+ u8 *mc;
+ u8 *mv;
+
+ mv = (u8 *)spec->match_value;
+ mc = (u8 *)spec->match_criteria;
+
+ spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
+ spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fts.main_vxlan.t;
- mc_enable = MLX5_MATCH_OUTER_HEADERS;
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
rule_p = &el->vxlan_ft_rule;
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
MLX5_SET(fte_match_param, mv, outer_headers.ethertype, el->proto);
@@ -1708,8 +1694,7 @@ mlx5e_add_vxlan_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv,
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.udp_dport);
MLX5_SET(fte_match_param, mv, outer_headers.udp_dport, el->port);
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
@@ -1764,23 +1749,20 @@ static int
mlx5e_add_vxlan_rule_from_db(struct mlx5e_priv *priv,
struct mlx5e_vxlan_db_el *el)
{
- u32 *match_criteria;
- u32 *match_value;
+ struct mlx5_flow_spec *spec;
int err;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (match_value == NULL || match_criteria == NULL) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
mlx5_en_err(priv->ifp, "alloc failed\n");
err = -ENOMEM;
goto add_vxlan_rule_out;
}
- err = mlx5e_add_vxlan_rule_sub(priv, match_criteria, match_value, el);
+ err = mlx5e_add_vxlan_rule_sub(priv, spec, el);
add_vxlan_rule_out:
- kvfree(match_criteria);
- kvfree(match_value);
+ kvfree(spec);
return (err);
}
@@ -1801,8 +1783,8 @@ mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
el->refcount++;
if (el->installed)
return (0);
- }
- el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
+ } else
+ el = mlx5e_vxlan_alloc_db_el(priv, proto, port);
if ((if_getcapenable(priv->ifp) & IFCAP_VXLAN_HWCSUM) != 0) {
err = mlx5e_add_vxlan_rule_from_db(priv, el);
@@ -1818,24 +1800,25 @@ mlx5e_add_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
}
static int
-mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
+mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec)
{
struct mlx5_flow_table *ft = priv->fts.vxlan.t;
struct mlx5_flow_destination dest = {};
- u8 mc_enable = 0;
- struct mlx5_flow_rule **rule_p;
+ struct mlx5_flow_handle **rule_p;
int err = 0;
struct mlx5_flow_act flow_act = {
- .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
- .flow_tag = MLX5_FS_ETH_FLOW_TAG,
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
};
+ spec->flow_context.flow_tag = MLX5_FS_ETH_FLOW_TAG;
+ spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
+
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fts.main.t;
rule_p = &priv->fts.vxlan_catchall_ft_rule;
- *rule_p = mlx5_add_flow_rule(ft, mc_enable, mc, mv,
- MLX5_FLOW_RULE_FWD_ACTION_DEST, &flow_act, &dest);
+ *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
@@ -1850,24 +1833,20 @@ mlx5e_add_vxlan_catchall_rule_sub(struct mlx5e_priv *priv, u32 *mc, u32 *mv)
static int
mlx5e_add_vxlan_catchall_rule(struct mlx5e_priv *priv)
{
- u32 *match_criteria;
- u32 *match_value;
+ struct mlx5_flow_spec *spec;
int err;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (match_value == NULL || match_criteria == NULL) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
mlx5_en_err(priv->ifp, "alloc failed\n");
err = -ENOMEM;
goto add_vxlan_rule_out;
}
- err = mlx5e_add_vxlan_catchall_rule_sub(priv, match_criteria,
- match_value);
+ err = mlx5e_add_vxlan_catchall_rule_sub(priv, spec);
add_vxlan_rule_out:
- kvfree(match_criteria);
- kvfree(match_value);
+ kvfree(spec);
return (err);
}
@@ -1911,7 +1890,7 @@ mlx5e_del_vxlan_rule(struct mlx5e_priv *priv, sa_family_t family, u_int port)
}
if (el->installed)
- mlx5_del_flow_rule(&el->vxlan_ft_rule);
+ mlx5_del_flow_rules(&el->vxlan_ft_rule);
TAILQ_REMOVE(&priv->vxlan.head, el, link);
kvfree(el);
return (0);
@@ -1925,7 +1904,7 @@ mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv)
TAILQ_FOREACH(el, &priv->vxlan.head, link) {
if (!el->installed)
continue;
- mlx5_del_flow_rule(&el->vxlan_ft_rule);
+ mlx5_del_flow_rules(&el->vxlan_ft_rule);
el->installed = false;
}
}
@@ -1933,7 +1912,7 @@ mlx5e_del_all_vxlan_rules(struct mlx5e_priv *priv)
static void
mlx5e_del_vxlan_catchall_rule(struct mlx5e_priv *priv)
{
- mlx5_del_flow_rule(&priv->fts.vxlan_catchall_ft_rule);
+ mlx5_del_flow_rules(&priv->fts.vxlan_catchall_ft_rule);
}
void
@@ -2030,11 +2009,13 @@ static int
mlx5e_create_vxlan_flow_table(struct mlx5e_priv *priv)
{
struct mlx5e_flow_table *ft = &priv->fts.vxlan;
+ struct mlx5_flow_table_attr ft_attr = {};
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "vxlan",
- MLX5E_VXLAN_TABLE_SIZE);
+ ft_attr.max_fte = MLX5E_VXLAN_TABLE_SIZE;
+ ft_attr.level = (priv->ipsec) ? 10 : 1;
+ ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@@ -2144,11 +2125,13 @@ static int
mlx5e_create_inner_rss_flow_table(struct mlx5e_priv *priv)
{
struct mlx5e_flow_table *ft = &priv->fts.inner_rss;
+ struct mlx5_flow_table_attr ft_attr = {};
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fts.ns, 0, "inner_rss",
- MLX5E_INNER_RSS_TABLE_SIZE);
+ ft_attr.max_fte = MLX5E_INNER_RSS_TABLE_SIZE;
+ ft_attr.level = (priv->ipsec) ? 11 : 3;
+ ft->t = mlx5_create_flow_table(priv->fts.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@@ -2198,9 +2181,13 @@ mlx5e_open_flow_tables(struct mlx5e_priv *priv)
priv->fts.ns = mlx5_get_flow_namespace(
priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
+ err = mlx5e_accel_ipsec_fs_rx_tables_create(priv);
+ if (err)
+ return err;
+
err = mlx5e_create_vlan_flow_table(priv);
if (err)
- return (err);
+ goto err_destroy_ipsec_flow_table;
err = mlx5e_create_vxlan_flow_table(priv);
if (err)
@@ -2222,13 +2209,19 @@ mlx5e_open_flow_tables(struct mlx5e_priv *priv)
if (err)
goto err_destroy_main_flow_table_false;
+ err = mlx5e_accel_ipsec_fs_rx_catchall_rules(priv);
+ if (err)
+ goto err_destroy_vxlan_catchall_rule;
+
err = mlx5e_accel_fs_tcp_create(priv);
if (err)
- goto err_del_vxlan_catchall_rule;
+ goto err_destroy_ipsec_catchall_rules;
return (0);
-err_del_vxlan_catchall_rule:
+err_destroy_ipsec_catchall_rules:
+ mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(priv);
+err_destroy_vxlan_catchall_rule:
mlx5e_del_vxlan_catchall_rule(priv);
err_destroy_main_flow_table_false:
mlx5e_destroy_main_flow_table(priv);
@@ -2240,6 +2233,8 @@ err_destroy_vxlan_flow_table:
mlx5e_destroy_vxlan_flow_table(priv);
err_destroy_vlan_flow_table:
mlx5e_destroy_vlan_flow_table(priv);
+err_destroy_ipsec_flow_table:
+ mlx5e_accel_ipsec_fs_rx_tables_destroy(priv);
return (err);
}
@@ -2248,12 +2243,14 @@ void
mlx5e_close_flow_tables(struct mlx5e_priv *priv)
{
mlx5e_accel_fs_tcp_destroy(priv);
+ mlx5e_accel_ipsec_fs_rx_catchall_rules_destroy(priv);
mlx5e_del_vxlan_catchall_rule(priv);
mlx5e_destroy_main_flow_table(priv);
mlx5e_destroy_inner_rss_flow_table(priv);
mlx5e_destroy_main_vxlan_flow_table(priv);
mlx5e_destroy_vxlan_flow_table(priv);
mlx5e_destroy_vlan_flow_table(priv);
+ mlx5e_accel_ipsec_fs_rx_tables_destroy(priv);
}
int
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c b/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
index f2ed45826843..6c83de5f3580 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
@@ -31,6 +31,7 @@
#include <dev/mlx5/mlx5_en/en.h>
#include <dev/mlx5/tls.h>
+#include <dev/mlx5/crypto.h>
#include <linux/delay.h>
#include <sys/ktls.h>
@@ -80,17 +81,57 @@ static const char *mlx5e_tls_stats_desc[] = {
static void mlx5e_tls_work(struct work_struct *);
+/*
+ * Expand the tls tag UMA zone in a sleepable context
+ */
+
+static void
+mlx5e_prealloc_tags(struct mlx5e_priv *priv, int nitems)
+{
+ struct mlx5e_tls_tag **tags;
+ int i;
+
+ tags = malloc(sizeof(tags[0]) * nitems,
+ M_MLX5E_TLS, M_WAITOK);
+ for (i = 0; i < nitems; i++)
+ tags[i] = uma_zalloc(priv->tls.zone, M_WAITOK);
+ __compiler_membar();
+ for (i = 0; i < nitems; i++)
+ uma_zfree(priv->tls.zone, tags[i]);
+ free(tags, M_MLX5E_TLS);
+}
+
static int
mlx5e_tls_tag_import(void *arg, void **store, int cnt, int domain, int flags)
{
struct mlx5e_tls_tag *ptag;
- int i;
+ struct mlx5e_priv *priv = arg;
+ int err, i;
+
+ /*
+ * mlx5_tls_open_tis() sleeps on a firmware command, so
+ * zone allocations must be done from a sleepable context.
+ * Note that the uma_zalloc() in mlx5e_tls_snd_tag_alloc()
+ * is done with M_NOWAIT so that hitting the zone limit does
+ * not cause the allocation to pause forever.
+ */
for (i = 0; i != cnt; i++) {
ptag = malloc_domainset(sizeof(*ptag), M_MLX5E_TLS,
- mlx5_dev_domainset(arg), flags | M_ZERO);
+ mlx5_dev_domainset(priv->mdev), flags | M_ZERO);
+ if (ptag == NULL)
+ return (i);
+ ptag->tls = &priv->tls;
mtx_init(&ptag->mtx, "mlx5-tls-tag-mtx", NULL, MTX_DEF);
INIT_WORK(&ptag->work, mlx5e_tls_work);
+ err = mlx5_tls_open_tis(priv->mdev, 0, priv->tdn,
+ priv->pdn, &ptag->tisn);
+ if (err) {
+ MLX5E_TLS_STAT_INC(ptag, tx_error, 1);
+ free(ptag, M_MLX5E_TLS);
+ return (i);
+ }
+
store[i] = ptag;
}
return (i);
@@ -113,7 +154,6 @@ mlx5e_tls_tag_release(void *arg, void **store, int cnt)
if (ptag->tisn != 0) {
mlx5_tls_close_tis(priv->mdev, ptag->tisn);
- atomic_add_32(&ptls->num_resources, -1U);
}
mtx_destroy(&ptag->mtx);
@@ -135,20 +175,38 @@ mlx5e_tls_tag_zfree(struct mlx5e_tls_tag *ptag)
/* avoid leaking keys */
memset(ptag->crypto_params, 0, sizeof(ptag->crypto_params));
- /* update number of TIS contexts */
- if (ptag->tisn == 0)
- atomic_add_32(&ptag->tls->num_resources, -1U);
-
/* return tag to UMA */
uma_zfree(ptag->tls->zone, ptag);
}
+static int
+mlx5e_max_tag_proc(SYSCTL_HANDLER_ARGS)
+{
+ struct mlx5e_priv *priv = (struct mlx5e_priv *)arg1;
+ struct mlx5e_tls *ptls = &priv->tls;
+ int err;
+ unsigned int max_tags;
+
+ max_tags = ptls->zone_max;
+ err = sysctl_handle_int(oidp, &max_tags, arg2, req);
+ if (err != 0 || req->newptr == NULL )
+ return err;
+ if (max_tags == ptls->zone_max)
+ return 0;
+ if (max_tags > priv->tls.max_resources || max_tags == 0)
+ return (EINVAL);
+ ptls->zone_max = max_tags;
+ uma_zone_set_max(ptls->zone, ptls->zone_max);
+ return 0;
+}
+
int
mlx5e_tls_init(struct mlx5e_priv *priv)
{
struct mlx5e_tls *ptls = &priv->tls;
struct sysctl_oid *node;
- uint32_t x;
+ uint32_t max_dek, max_tis, x;
+ int zone_max = 0, prealloc_tags = 0;
if (MLX5_CAP_GEN(priv->mdev, tls_tx) == 0 ||
MLX5_CAP_GEN(priv->mdev, log_max_dek) == 0)
@@ -163,13 +221,31 @@ mlx5e_tls_init(struct mlx5e_priv *priv)
snprintf(ptls->zname, sizeof(ptls->zname),
"mlx5_%u_tls", device_get_unit(priv->mdev->pdev->dev.bsddev));
+
+ TUNABLE_INT_FETCH("hw.mlx5.tls_max_tags", &zone_max);
+ TUNABLE_INT_FETCH("hw.mlx5.tls_prealloc_tags", &prealloc_tags);
+
ptls->zone = uma_zcache_create(ptls->zname,
sizeof(struct mlx5e_tls_tag), NULL, NULL, NULL, NULL,
- mlx5e_tls_tag_import, mlx5e_tls_tag_release, priv->mdev,
- UMA_ZONE_UNMANAGED);
+ mlx5e_tls_tag_import, mlx5e_tls_tag_release, priv,
+ UMA_ZONE_UNMANAGED | (prealloc_tags ? UMA_ZONE_NOFREE : 0));
/* shared between RX and TX TLS */
- ptls->max_resources = 1U << (MLX5_CAP_GEN(priv->mdev, log_max_dek) - 1);
+ max_dek = 1U << (MLX5_CAP_GEN(priv->mdev, log_max_dek) - 1);
+ max_tis = 1U << (MLX5_CAP_GEN(priv->mdev, log_max_tis) - 1);
+ ptls->max_resources = MIN(max_dek, max_tis);
+
+ if (zone_max != 0) {
+ ptls->zone_max = zone_max;
+ if (ptls->zone_max > priv->tls.max_resources)
+ ptls->zone_max = priv->tls.max_resources;
+ } else {
+ ptls->zone_max = priv->tls.max_resources;
+ }
+
+ uma_zone_set_max(ptls->zone, ptls->zone_max);
+ if (prealloc_tags != 0)
+ mlx5e_prealloc_tags(priv, ptls->zone_max);
for (x = 0; x != MLX5E_TLS_STATS_NUM; x++)
ptls->stats.arg[x] = counter_u64_alloc(M_WAITOK);
@@ -182,6 +258,10 @@ mlx5e_tls_init(struct mlx5e_priv *priv)
if (node == NULL)
return (0);
+ SYSCTL_ADD_PROC(&priv->sysctl_ctx, SYSCTL_CHILDREN(node), OID_AUTO, "tls_max_tag",
+ CTLFLAG_RW | CTLTYPE_UINT | CTLFLAG_MPSAFE, priv, 0, mlx5e_max_tag_proc,
+ "IU", "Max number of TLS offload session tags");
+
mlx5e_create_counter_stats(&ptls->ctx,
SYSCTL_CHILDREN(node), "stats",
mlx5e_tls_stats_desc, MLX5E_TLS_STATS_NUM,
@@ -205,60 +285,67 @@ mlx5e_tls_cleanup(struct mlx5e_priv *priv)
uma_zdestroy(ptls->zone);
destroy_workqueue(ptls->wq);
- /* check if all resources are freed */
- MPASS(priv->tls.num_resources == 0);
-
for (x = 0; x != MLX5E_TLS_STATS_NUM; x++)
counter_u64_free(ptls->stats.arg[x]);
}
+
+static int
+mlx5e_tls_st_init(struct mlx5e_priv *priv, struct mlx5e_tls_tag *ptag)
+{
+ int err;
+
+ /* try to open TIS, if not present */
+ if (ptag->tisn == 0) {
+ err = mlx5_tls_open_tis(priv->mdev, 0, priv->tdn,
+ priv->pdn, &ptag->tisn);
+ if (err) {
+ MLX5E_TLS_STAT_INC(ptag, tx_error, 1);
+ return (-err);
+ }
+ }
+ MLX5_SET(sw_tls_cntx, ptag->crypto_params, progress.pd, ptag->tisn);
+
+ /* try to allocate a DEK context ID */
+ err = mlx5_encryption_key_create(priv->mdev, priv->pdn,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
+ MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, key.key_data),
+ MLX5_GET(sw_tls_cntx, ptag->crypto_params, key.key_len),
+ &ptag->dek_index);
+ if (err) {
+ MLX5E_TLS_STAT_INC(ptag, tx_error, 1);
+ return (-err);
+ }
+
+ MLX5_SET(sw_tls_cntx, ptag->crypto_params, param.dek_index, ptag->dek_index);
+
+ ptag->dek_index_ok = 1;
+
+ MLX5E_TLS_TAG_LOCK(ptag);
+ if (ptag->state == MLX5E_TLS_ST_INIT)
+ ptag->state = MLX5E_TLS_ST_SETUP;
+ MLX5E_TLS_TAG_UNLOCK(ptag);
+ return (0);
+}
+
static void
mlx5e_tls_work(struct work_struct *work)
{
struct mlx5e_tls_tag *ptag;
struct mlx5e_priv *priv;
- int err;
ptag = container_of(work, struct mlx5e_tls_tag, work);
priv = container_of(ptag->tls, struct mlx5e_priv, tls);
switch (ptag->state) {
case MLX5E_TLS_ST_INIT:
- /* try to open TIS, if not present */
- if (ptag->tisn == 0) {
- err = mlx5_tls_open_tis(priv->mdev, 0, priv->tdn,
- priv->pdn, &ptag->tisn);
- if (err) {
- MLX5E_TLS_STAT_INC(ptag, tx_error, 1);
- break;
- }
- }
- MLX5_SET(sw_tls_cntx, ptag->crypto_params, progress.pd, ptag->tisn);
-
- /* try to allocate a DEK context ID */
- err = mlx5_encryption_key_create(priv->mdev, priv->pdn,
- MLX5_ADDR_OF(sw_tls_cntx, ptag->crypto_params, key.key_data),
- MLX5_GET(sw_tls_cntx, ptag->crypto_params, key.key_len),
- &ptag->dek_index);
- if (err) {
- MLX5E_TLS_STAT_INC(ptag, tx_error, 1);
- break;
- }
-
- MLX5_SET(sw_tls_cntx, ptag->crypto_params, param.dek_index, ptag->dek_index);
-
- ptag->dek_index_ok = 1;
-
- MLX5E_TLS_TAG_LOCK(ptag);
- if (ptag->state == MLX5E_TLS_ST_INIT)
- ptag->state = MLX5E_TLS_ST_SETUP;
- MLX5E_TLS_TAG_UNLOCK(ptag);
+ (void)mlx5e_tls_st_init(priv, ptag);
break;
case MLX5E_TLS_ST_RELEASE:
/* try to destroy DEK context by ID */
if (ptag->dek_index_ok)
- err = mlx5_encryption_key_destroy(priv->mdev, ptag->dek_index);
+ (void)mlx5_encryption_key_destroy(priv->mdev, ptag->dek_index);
/* free tag */
mlx5e_tls_tag_zfree(ptag);
@@ -307,8 +394,7 @@ mlx5e_tls_set_params(void *ctx, const struct tls_session_params *en)
CTASSERT(MLX5E_TLS_ST_INIT == 0);
int
-mlx5e_tls_snd_tag_alloc(if_t ifp,
- union if_snd_tag_alloc_params *params,
+mlx5e_tls_snd_tag_alloc(if_t ifp, union if_snd_tag_alloc_params *params,
struct m_snd_tag **ppmt)
{
union if_snd_tag_alloc_params rl_params;
@@ -323,30 +409,16 @@ mlx5e_tls_snd_tag_alloc(if_t ifp,
if (priv->gone != 0 || priv->tls.init == 0)
return (EOPNOTSUPP);
- /* allocate new tag from zone, if any */
ptag = uma_zalloc(priv->tls.zone, M_NOWAIT);
- if (ptag == NULL)
- return (ENOMEM);
+ if (ptag == NULL)
+ return (ENOMEM);
/* sanity check default values */
MPASS(ptag->dek_index == 0);
MPASS(ptag->dek_index_ok == 0);
- /* setup TLS tag */
- ptag->tls = &priv->tls;
-
/* check if there is no TIS context */
- if (ptag->tisn == 0) {
- uint32_t value;
-
- value = atomic_fetchadd_32(&priv->tls.num_resources, 1U);
-
- /* check resource limits */
- if (value >= priv->tls.max_resources) {
- error = ENOMEM;
- goto failure;
- }
- }
+ KASSERT(ptag->tisn != 0, ("ptag %p w/0 tisn", ptag));
en = &params->tls.tls->params;
@@ -439,8 +511,9 @@ mlx5e_tls_snd_tag_alloc(if_t ifp,
/* reset state */
ptag->state = MLX5E_TLS_ST_INIT;
- queue_work(priv->tls.wq, &ptag->work);
- flush_work(&ptag->work);
+ error = mlx5e_tls_st_init(priv, ptag);
+ if (error != 0)
+ goto failure;
return (0);
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c b/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c
index 19c6adf18809..89d2010656c5 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c
@@ -29,6 +29,7 @@
#include <dev/mlx5/mlx5_en/en.h>
+#include <dev/mlx5/crypto.h>
#include <dev/mlx5/tls.h>
#include <dev/mlx5/fs.h>
@@ -41,13 +42,30 @@
static if_snd_tag_free_t mlx5e_tls_rx_snd_tag_free;
static if_snd_tag_modify_t mlx5e_tls_rx_snd_tag_modify;
+static if_snd_tag_status_str_t mlx5e_tls_rx_snd_tag_status_str;
static const struct if_snd_tag_sw mlx5e_tls_rx_snd_tag_sw = {
.snd_tag_modify = mlx5e_tls_rx_snd_tag_modify,
.snd_tag_free = mlx5e_tls_rx_snd_tag_free,
+ .snd_tag_status_str = mlx5e_tls_rx_snd_tag_status_str,
.type = IF_SND_TAG_TYPE_TLS_RX
};
+static const char *mlx5e_tls_rx_progress_params_auth_state_str[] = {
+ [MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD] = "no_offload",
+ [MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_OFFLOAD] = "offload",
+ [MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION] =
+ "authentication",
+};
+
+static const char *mlx5e_tls_rx_progress_params_record_tracker_state_str[] = {
+ [MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START] = "start",
+ [MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING] =
+ "tracking",
+ [MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING] =
+ "searching",
+};
+
MALLOC_DEFINE(M_MLX5E_TLS_RX, "MLX5E_TLS_RX", "MLX5 ethernet HW TLS RX");
/* software TLS RX context */
@@ -249,7 +267,8 @@ mlx5e_tls_rx_send_progress_parameters_sync(struct mlx5e_iq *iq,
mtx_unlock(&iq->lock);
while (1) {
- if (wait_for_completion_timeout(&ptag->progress_complete, hz) != 0)
+ if (wait_for_completion_timeout(&ptag->progress_complete,
+ msecs_to_jiffies(1000)) != 0)
break;
priv = container_of(iq, struct mlx5e_channel, iq)->priv;
if (priv->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
@@ -330,7 +349,8 @@ done:
* Zero is returned upon success, else some error happened.
*/
static int
-mlx5e_tls_rx_receive_progress_parameters(struct mlx5e_iq *iq, struct mlx5e_tls_rx_tag *ptag)
+mlx5e_tls_rx_receive_progress_parameters(struct mlx5e_iq *iq,
+ struct mlx5e_tls_rx_tag *ptag, mlx5e_iq_callback_t *cb)
{
struct mlx5e_get_tls_progress_params_wqe *wqe;
const u32 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
@@ -366,7 +386,7 @@ mlx5e_tls_rx_receive_progress_parameters(struct mlx5e_iq *iq, struct mlx5e_tls_r
memcpy(iq->doorbell.d32, &wqe->ctrl, sizeof(iq->doorbell.d32));
iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
- iq->data[pi].callback = &mlx5e_tls_rx_receive_progress_parameters_cb;
+ iq->data[pi].callback = cb;
iq->data[pi].arg = ptag;
m_snd_tag_ref(&ptag->tag);
@@ -387,11 +407,12 @@ static int
mlx5e_tls_rx_tag_import(void *arg, void **store, int cnt, int domain, int flags)
{
struct mlx5e_tls_rx_tag *ptag;
+ struct mlx5_core_dev *mdev = arg;
int i;
for (i = 0; i != cnt; i++) {
ptag = malloc_domainset(sizeof(*ptag), M_MLX5E_TLS_RX,
- mlx5_dev_domainset(arg), flags | M_ZERO);
+ mlx5_dev_domainset(mdev), flags | M_ZERO);
mtx_init(&ptag->mtx, "mlx5-tls-rx-tag-mtx", NULL, MTX_DEF);
INIT_WORK(&ptag->work, mlx5e_tls_rx_work);
store[i] = ptag;
@@ -551,6 +572,7 @@ mlx5e_tls_rx_work(struct work_struct *work)
/* try to allocate a DEK context ID */
err = mlx5_encryption_key_create(priv->mdev, priv->pdn,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS,
MLX5_ADDR_OF(sw_tls_rx_cntx, ptag->crypto_params, key.key_data),
MLX5_GET(sw_tls_rx_cntx, ptag->crypto_params, key.key_len),
&ptag->dek_index);
@@ -637,7 +659,8 @@ mlx5e_tls_rx_set_params(void *ctx, struct inpcb *inp, const struct tls_session_p
return (EINVAL);
MLX5_SET64(sw_tls_rx_cntx, ctx, param.initial_record_number, tls_sn_he);
- MLX5_SET(sw_tls_rx_cntx, ctx, param.resync_tcp_sn, tcp_sn_he);
+ MLX5_SET(sw_tls_rx_cntx, ctx, param.resync_tcp_sn, 0);
+ MLX5_SET(sw_tls_rx_cntx, ctx, progress.next_record_tcp_sn, tcp_sn_he);
return (0);
}
@@ -659,7 +682,7 @@ mlx5e_tls_rx_snd_tag_alloc(if_t ifp,
struct mlx5e_iq *iq;
struct mlx5e_priv *priv;
struct mlx5e_tls_rx_tag *ptag;
- struct mlx5_flow_rule *flow_rule;
+ struct mlx5_flow_handle *flow_rule;
const struct tls_session_params *en;
uint32_t value;
int error;
@@ -816,6 +839,7 @@ mlx5e_tls_rx_snd_tag_alloc(if_t ifp,
}
ptag->flow_rule = flow_rule;
+ init_completion(&ptag->progress_complete);
return (0);
@@ -965,7 +989,8 @@ mlx5e_tls_rx_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_param
params->tls_rx.tls_rec_length,
params->tls_rx.tls_seq_number) &&
ptag->tcp_resync_pending == 0) {
- err = mlx5e_tls_rx_receive_progress_parameters(iq, ptag);
+ err = mlx5e_tls_rx_receive_progress_parameters(iq, ptag,
+ &mlx5e_tls_rx_receive_progress_parameters_cb);
if (err != 0) {
MLX5E_TLS_RX_STAT_INC(ptag, rx_resync_err, 1);
} else {
@@ -998,6 +1023,74 @@ mlx5e_tls_rx_snd_tag_free(struct m_snd_tag *pmt)
queue_work(priv->tls_rx.wq, &ptag->work);
}
+static void
+mlx5e_tls_rx_str_status_cb(void *arg)
+{
+ struct mlx5e_tls_rx_tag *ptag;
+
+ ptag = (struct mlx5e_tls_rx_tag *)arg;
+ complete_all(&ptag->progress_complete);
+ m_snd_tag_rele(&ptag->tag);
+}
+
+static int
+mlx5e_tls_rx_snd_tag_status_str(struct m_snd_tag *pmt, char *buf, size_t *sz)
+{
+ int err, out_size;
+ struct mlx5e_iq *iq;
+ void *buffer;
+ uint32_t tracker_state_val;
+ uint32_t auth_state_val;
+ struct mlx5e_priv *priv;
+ struct mlx5e_tls_rx_tag *ptag =
+ container_of(pmt, struct mlx5e_tls_rx_tag, tag);
+
+ if (buf == NULL)
+ return (0);
+
+ MLX5E_TLS_RX_TAG_LOCK(ptag);
+ priv = container_of(ptag->tls_rx, struct mlx5e_priv, tls_rx);
+ iq = mlx5e_tls_rx_get_iq(priv, ptag->flowid, ptag->flowtype);
+ reinit_completion(&ptag->progress_complete);
+ err = mlx5e_tls_rx_receive_progress_parameters(iq, ptag,
+ &mlx5e_tls_rx_str_status_cb);
+ MLX5E_TLS_RX_TAG_UNLOCK(ptag);
+ if (err != 0)
+ return (err);
+
+ for (;;) {
+ if (wait_for_completion_timeout(&ptag->progress_complete,
+ msecs_to_jiffies(1000)) != 0)
+ break;
+ if (priv->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
+ pci_channel_offline(priv->mdev->pdev) != 0)
+ return (ENXIO);
+ }
+ buffer = mlx5e_tls_rx_get_progress_buffer(ptag);
+ tracker_state_val = MLX5_GET(tls_progress_params, buffer,
+ record_tracker_state);
+ auth_state_val = MLX5_GET(tls_progress_params, buffer, auth_state);
+
+ /* Validate tracker state value is in range */
+ if (tracker_state_val >
+ MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_SEARCHING)
+ return (EINVAL);
+
+ /* Validate auth state value is in range */
+ if (auth_state_val >
+ MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_AUTHENTICATION)
+ return (EINVAL);
+
+ out_size = snprintf(buf, *sz, "tracker_state: %s, auth_state: %s",
+ mlx5e_tls_rx_progress_params_record_tracker_state_str[
+ tracker_state_val],
+ mlx5e_tls_rx_progress_params_auth_state_str[auth_state_val]);
+
+ if (out_size <= *sz)
+ *sz = out_size;
+ return (0);
+}
+
#else
int
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
index e1dbd02fcf3a..f83506bda1aa 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
@@ -24,17 +24,22 @@
* SUCH DAMAGE.
*/
+#include "opt_ipsec.h"
#include "opt_kern_tls.h"
#include "opt_rss.h"
#include "opt_ratelimit.h"
#include <dev/mlx5/mlx5_en/en.h>
+#include <dev/mlx5/mlx5_accel/ipsec.h>
#include <sys/eventhandler.h>
#include <sys/sockio.h>
#include <machine/atomic.h>
#include <net/debugnet.h>
+#include <netinet/tcp_ratelimit.h>
+#include <netipsec/keydb.h>
+#include <netipsec/ipsec_offload.h>
static int mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs);
static if_snd_tag_query_t mlx5e_ul_snd_tag_query;
@@ -164,186 +169,186 @@ static const struct media mlx5e_mode_table[MLX5E_LINK_SPEEDS_NUMBER] =
},
};
-static const struct media mlx5e_ext_mode_table[MLX5E_EXT_LINK_SPEEDS_NUMBER][MLX5E_CABLE_TYPE_NUMBER] =
+static const struct media mlx5e_ext_mode_table[MLX5E_EXT_LINK_SPEEDS_NUMBER][MLX5E_CONNECTOR_TYPE_NUMBER] =
{
/**/
- [MLX5E_SGMII_100M][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_SGMII_100M][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_100_SGMII,
.baudrate = IF_Mbps(100),
},
/**/
- [MLX5E_1000BASE_X_SGMII][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_1000BASE_X_SGMII][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_1000_CX,
.baudrate = IF_Mbps(1000),
},
- [MLX5E_1000BASE_X_SGMII][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
+ [MLX5E_1000BASE_X_SGMII][MLX5E_PORT_FIBRE] = {
.subtype = IFM_1000_SX,
.baudrate = IF_Mbps(1000),
},
/**/
- [MLX5E_5GBASE_R][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_5GBASE_R][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_5000_KR,
.baudrate = IF_Mbps(5000),
},
- [MLX5E_5GBASE_R][MLX5E_CABLE_TYPE_TWISTED_PAIR] = {
+ [MLX5E_5GBASE_R][MLX5E_PORT_TP] = {
.subtype = IFM_5000_T,
.baudrate = IF_Mbps(5000),
},
/**/
- [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_10G_KR,
.baudrate = IF_Gbps(10ULL),
},
- [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
+ [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_PORT_DA] = {
.subtype = IFM_10G_CR1,
.baudrate = IF_Gbps(10ULL),
},
- [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
+ [MLX5E_10GBASE_XFI_XAUI_1][MLX5E_PORT_FIBRE] = {
.subtype = IFM_10G_SR,
.baudrate = IF_Gbps(10ULL),
},
/**/
- [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_40G_KR4,
.baudrate = IF_Gbps(40ULL),
},
- [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
+ [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_PORT_DA] = {
.subtype = IFM_40G_CR4,
.baudrate = IF_Gbps(40ULL),
},
- [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
+ [MLX5E_40GBASE_XLAUI_4_XLPPI_4][MLX5E_PORT_FIBRE] = {
.subtype = IFM_40G_SR4,
.baudrate = IF_Gbps(40ULL),
},
/**/
- [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_25G_KR,
.baudrate = IF_Gbps(25ULL),
},
- [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
+ [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_PORT_DA] = {
.subtype = IFM_25G_CR,
.baudrate = IF_Gbps(25ULL),
},
- [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
+ [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_PORT_FIBRE] = {
.subtype = IFM_25G_SR,
.baudrate = IF_Gbps(25ULL),
},
- [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_CABLE_TYPE_TWISTED_PAIR] = {
+ [MLX5E_25GAUI_1_25GBASE_CR_KR][MLX5E_PORT_TP] = {
.subtype = IFM_25G_T,
.baudrate = IF_Gbps(25ULL),
},
/**/
- [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_50G_KR2,
.baudrate = IF_Gbps(50ULL),
},
- [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
+ [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_PORT_DA] = {
.subtype = IFM_50G_CR2,
.baudrate = IF_Gbps(50ULL),
},
- [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
+ [MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2][MLX5E_PORT_FIBRE] = {
.subtype = IFM_50G_SR2,
.baudrate = IF_Gbps(50ULL),
},
/**/
- [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_50G_KR_PAM4,
.baudrate = IF_Gbps(50ULL),
},
- [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
+ [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_PORT_DA] = {
.subtype = IFM_50G_CP,
.baudrate = IF_Gbps(50ULL),
},
- [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
+ [MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR][MLX5E_PORT_FIBRE] = {
.subtype = IFM_50G_SR,
.baudrate = IF_Gbps(50ULL),
},
/**/
- [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_100G_KR4,
.baudrate = IF_Gbps(100ULL),
},
- [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
+ [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_PORT_DA] = {
.subtype = IFM_100G_CR4,
.baudrate = IF_Gbps(100ULL),
},
- [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
+ [MLX5E_CAUI_4_100GBASE_CR4_KR4][MLX5E_PORT_FIBRE] = {
.subtype = IFM_100G_SR4,
.baudrate = IF_Gbps(100ULL),
},
/**/
- [MLX5E_100GAUI_1_100GBASE_CR_KR][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_100GAUI_1_100GBASE_CR_KR][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_100G_KR_PAM4,
.baudrate = IF_Gbps(100ULL),
},
- [MLX5E_100GAUI_1_100GBASE_CR_KR][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
+ [MLX5E_100GAUI_1_100GBASE_CR_KR][MLX5E_PORT_DA] = {
.subtype = IFM_100G_CR_PAM4,
.baudrate = IF_Gbps(100ULL),
},
- [MLX5E_100GAUI_1_100GBASE_CR_KR][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
+ [MLX5E_100GAUI_1_100GBASE_CR_KR][MLX5E_PORT_FIBRE] = {
.subtype = IFM_100G_SR2, /* XXX */
.baudrate = IF_Gbps(100ULL),
},
/**/
- [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_100G_KR4,
.baudrate = IF_Gbps(100ULL),
},
- [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
+ [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_PORT_DA] = {
.subtype = IFM_100G_CP2,
.baudrate = IF_Gbps(100ULL),
},
- [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
+ [MLX5E_100GAUI_2_100GBASE_CR2_KR2][MLX5E_PORT_FIBRE] = {
.subtype = IFM_100G_SR2,
.baudrate = IF_Gbps(100ULL),
},
/**/
- [MLX5E_200GAUI_2_200GBASE_CR2_KR2][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_200GAUI_2_200GBASE_CR2_KR2][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_200G_KR4_PAM4, /* XXX */
.baudrate = IF_Gbps(200ULL),
},
- [MLX5E_200GAUI_2_200GBASE_CR2_KR2][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
+ [MLX5E_200GAUI_2_200GBASE_CR2_KR2][MLX5E_PORT_DA] = {
.subtype = IFM_200G_CR4_PAM4, /* XXX */
.baudrate = IF_Gbps(200ULL),
},
- [MLX5E_200GAUI_2_200GBASE_CR2_KR2][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
+ [MLX5E_200GAUI_2_200GBASE_CR2_KR2][MLX5E_PORT_FIBRE] = {
.subtype = IFM_200G_SR4, /* XXX */
.baudrate = IF_Gbps(200ULL),
},
/**/
- [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_200G_KR4_PAM4,
.baudrate = IF_Gbps(200ULL),
},
- [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CABLE_TYPE_PASSIVE_COPPER] = {
+ [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_PORT_DA] = {
.subtype = IFM_200G_CR4_PAM4,
.baudrate = IF_Gbps(200ULL),
},
- [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_CABLE_TYPE_OPTICAL_MODULE] = {
+ [MLX5E_200GAUI_4_200GBASE_CR4_KR4][MLX5E_PORT_FIBRE] = {
.subtype = IFM_200G_SR4,
.baudrate = IF_Gbps(200ULL),
},
/**/
- [MLX5E_400GAUI_8][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_400GAUI_8][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_400G_LR8, /* XXX */
.baudrate = IF_Gbps(400ULL),
},
/**/
- [MLX5E_400GAUI_4_400GBASE_CR4_KR4][MLX5E_CABLE_TYPE_UNKNOWN] = {
+ [MLX5E_400GAUI_4_400GBASE_CR4_KR4][MLX5E_PORT_UNKNOWN] = {
.subtype = IFM_400G_LR8, /* XXX */
.baudrate = IF_Gbps(400ULL),
},
@@ -367,7 +372,7 @@ mlx5e_update_carrier(struct mlx5e_priv *priv)
u32 eth_proto_oper;
int error;
u8 i;
- u8 cable_type;
+ u8 connector_type;
u8 port_state;
u8 is_er_type;
bool ext;
@@ -398,24 +403,14 @@ mlx5e_update_carrier(struct mlx5e_priv *priv)
ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_oper);
-
+ connector_type = MLX5_GET(ptys_reg, out, connector_type);
i = ilog2(eth_proto_oper);
if (ext) {
- error = mlx5_query_pddr_cable_type(mdev, 1, &cable_type);
- if (error != 0) {
- /* use fallback entry */
- media_entry = mlx5e_ext_mode_table[i][MLX5E_CABLE_TYPE_UNKNOWN];
-
- mlx5_en_err(priv->ifp,
- "query port pddr failed: %d\n", error);
- } else {
- media_entry = mlx5e_ext_mode_table[i][cable_type];
-
+ media_entry = mlx5e_ext_mode_table[i][connector_type];
/* check if we should use fallback entry */
- if (media_entry.subtype == 0)
- media_entry = mlx5e_ext_mode_table[i][MLX5E_CABLE_TYPE_UNKNOWN];
- }
+ if (media_entry.subtype == 0)
+ media_entry = mlx5e_ext_mode_table[i][MLX5E_PORT_UNKNOWN];
} else {
media_entry = mlx5e_mode_table[i];
}
@@ -1221,9 +1216,9 @@ mlx5e_create_rq(struct mlx5e_channel *c,
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
- nsegs * MLX5E_MAX_RX_BYTES, /* maxsize */
+ nsegs * wqe_sz, /* maxsize */
nsegs, /* nsegments */
- nsegs * MLX5E_MAX_RX_BYTES, /* maxsegsize */
+ nsegs * wqe_sz, /* maxsegsize */
0, /* flags */
NULL, NULL, /* lockfunc, lockfuncarg */
&rq->dma_tag)))
@@ -1320,6 +1315,8 @@ mlx5e_destroy_rq(struct mlx5e_rq *rq)
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
for (i = 0; i != wq_sz; i++) {
if (rq->mbuf[i].mbuf != NULL) {
+ if (rq->mbuf[i].ipsec_mtag != NULL)
+ m_tag_free(&rq->mbuf[i].ipsec_mtag->tag);
bus_dmamap_unload(rq->dma_tag, rq->mbuf[i].dma_map);
m_freem(rq->mbuf[i].mbuf);
}
@@ -1463,6 +1460,17 @@ static void
mlx5e_close_rq_wait(struct mlx5e_rq *rq)
{
+ mtx_lock(&rq->mtx);
+ MPASS(rq->enabled == 0);
+ while (rq->processing > 0) {
+ /*
+ * No wakeup, relying on timeout.
+ * Use msleep_sbt() since msleep() conflicts with linuxkpi.
+ */
+ msleep_sbt(&rq->processing, &rq->mtx, 0, "mlx5ecrq",
+ tick_sbt * hz, 0, C_HARDCLOCK);
+ }
+ mtx_unlock(&rq->mtx);
mlx5e_disable_rq(rq);
mlx5e_close_cq(&rq->cq);
cancel_work_sync(&rq->dim.work);
@@ -2310,30 +2318,18 @@ mlx5e_close_channel_wait(struct mlx5e_channel *c)
static int
mlx5e_get_wqe_sz(struct mlx5e_priv *priv, u32 *wqe_sz, u32 *nsegs)
{
- u32 r, n;
+ u32 r, n, maxs;
- r = priv->params.hw_lro_en ? priv->params.lro_wqe_sz :
+ maxs = priv->params.hw_lro_en ? priv->params.lro_wqe_sz :
MLX5E_SW2MB_MTU(if_getmtu(priv->ifp));
- if (r > MJUM16BYTES)
- return (-ENOMEM);
-
- if (r > MJUM9BYTES)
- r = MJUM16BYTES;
- else if (r > MJUMPAGESIZE)
- r = MJUM9BYTES;
- else if (r > MCLBYTES)
- r = MJUMPAGESIZE;
- else
- r = MCLBYTES;
+ r = maxs > MCLBYTES ? MJUMPAGESIZE : MCLBYTES;
/*
* n + 1 must be a power of two, because stride size must be.
* Stride size is 16 * (n + 1), as the first segment is
* control.
*/
- for (n = howmany(r, MLX5E_MAX_RX_BYTES); !powerof2(n + 1); n++)
- ;
-
+ n = roundup_pow_of_two(1 + howmany(maxs, r)) - 1;
if (n > MLX5E_MAX_BUSDMA_RX_SEGS)
return (-ENOMEM);
@@ -2940,6 +2936,71 @@ mlx5e_get_rss_key(void *key_ptr)
}
static void
+mlx5e_hw_lro_set_tir_ctx_lro_max_msg_sz(struct mlx5e_priv *priv, u32 *tirc)
+{
+ MLX5_SET(tirc, tirc, lro_max_msg_sz, (priv->params.lro_wqe_sz >> 8) -
+ (MLX5_CAP_ETH(priv->mdev, lro_max_msg_sz_mode) == 0 ? 1 : 0));
+}
+
+static void
+mlx5e_hw_lro_set_tir_ctx(struct mlx5e_priv *priv, u32 *tirc)
+{
+ MLX5_SET(tirc, tirc, lro_enable_mask,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+ /* TODO: add the option to choose timer value dynamically */
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+ MLX5_CAP_ETH(priv->mdev, lro_timer_supported_periods[2]));
+ mlx5e_hw_lro_set_tir_ctx_lro_max_msg_sz(priv, tirc);
+}
+
+static int
+mlx5e_hw_lro_update_tir(struct mlx5e_priv *priv, int tt, bool inner_vxlan)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *tirc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (in == NULL)
+ return (-ENOMEM);
+ tirc = MLX5_ADDR_OF(modify_tir_in, in, tir_context);
+
+ /* fill the command part */
+ MLX5_SET(modify_tir_in, in, tirn, inner_vxlan ?
+ priv->tirn_inner_vxlan[tt] : priv->tirn[tt]);
+ MLX5_SET64(modify_tir_in, in, modify_bitmask,
+ (1 << MLX5_MODIFY_TIR_BITMASK_LRO));
+
+ /* fill the context */
+ if (priv->params.hw_lro_en)
+ mlx5e_hw_lro_set_tir_ctx(priv, tirc);
+
+ err = mlx5_core_modify_tir(mdev, in, inlen);
+
+ kvfree(in);
+ return (err);
+}
+
+int
+mlx5e_hw_lro_update_tirs(struct mlx5e_priv *priv)
+{
+ int err, err1, i;
+
+ err = 0;
+ for (i = 0; i != 2 * MLX5E_NUM_TT; i++) {
+ err1 = mlx5e_hw_lro_update_tir(priv, i / 2, (i % 2) ? true :
+ false);
+ if (err1 != 0 && err == 0)
+ err = err1;
+ }
+ return (-err);
+}
+
+static void
mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt, bool inner_vxlan)
{
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
@@ -2949,8 +3010,6 @@ mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt, bool inner_vxla
MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
-#define ROUGH_MAX_L2_L3_HDR_SZ 256
-
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
@@ -2963,18 +3022,8 @@ mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 * tirc, int tt, bool inner_vxla
MLX5_HASH_FIELD_SEL_DST_IP |\
MLX5_HASH_FIELD_SEL_IPSEC_SPI)
- if (priv->params.hw_lro_en) {
- MLX5_SET(tirc, tirc, lro_enable_mask,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
- MLX5_SET(tirc, tirc, lro_max_msg_sz,
- (priv->params.lro_wqe_sz -
- ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
- /* TODO: add the option to choose timer value dynamically */
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
- MLX5_CAP_ETH(priv->mdev,
- lro_timer_supported_periods[2]));
- }
+ if (priv->params.hw_lro_en)
+ mlx5e_hw_lro_set_tir_ctx(priv, tirc);
if (inner_vxlan)
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
@@ -3399,6 +3448,51 @@ mlx5e_set_rx_mode(if_t ifp)
queue_work(priv->wq, &priv->set_rx_mode_work);
}
+static bool
+mlx5e_is_ipsec_capable(struct mlx5_core_dev *mdev)
+{
+#ifdef IPSEC_OFFLOAD
+ if ((mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PACKET_OFFLOAD) != 0)
+ return (true);
+#endif
+ return (false);
+}
+
+static bool
+mlx5e_is_ratelimit_capable(struct mlx5_core_dev *mdev)
+{
+#ifdef RATELIMIT
+ if (MLX5_CAP_GEN(mdev, qos) &&
+ MLX5_CAP_QOS(mdev, packet_pacing))
+ return (true);
+#endif
+ return (false);
+}
+
+static bool
+mlx5e_is_tlstx_capable(struct mlx5_core_dev *mdev)
+{
+#ifdef KERN_TLS
+ if (MLX5_CAP_GEN(mdev, tls_tx) != 0 &&
+ MLX5_CAP_GEN(mdev, log_max_dek) != 0)
+ return (true);
+#endif
+ return (false);
+}
+
+static bool
+mlx5e_is_tlsrx_capable(struct mlx5_core_dev *mdev)
+{
+#ifdef KERN_TLS
+ if (MLX5_CAP_GEN(mdev, tls_rx) != 0 &&
+ MLX5_CAP_GEN(mdev, log_max_dek) != 0 &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
+ ft_field_support.outer_ip_version) != 0)
+ return (true);
+#endif
+ return (false);
+}
+
static int
mlx5e_ioctl(if_t ifp, u_long command, caddr_t data)
{
@@ -3502,6 +3596,24 @@ mlx5e_ioctl(if_t ifp, u_long command, caddr_t data)
drv_ioctl_data = (struct siocsifcapnv_driver_data *)data;
PRIV_LOCK(priv);
siocsifcap_driver:
+ if (!mlx5e_is_tlstx_capable(priv->mdev)) {
+ drv_ioctl_data->reqcap &= ~(IFCAP_TXTLS4 |
+ IFCAP_TXTLS6);
+ }
+ if (!mlx5e_is_tlsrx_capable(priv->mdev)) {
+ drv_ioctl_data->reqcap2 &= ~(
+ IFCAP2_BIT(IFCAP2_RXTLS4) |
+ IFCAP2_BIT(IFCAP2_RXTLS6));
+ }
+ if (!mlx5e_is_ipsec_capable(priv->mdev)) {
+ drv_ioctl_data->reqcap2 &=
+ ~IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD);
+ }
+ if (!mlx5e_is_ratelimit_capable(priv->mdev)) {
+ drv_ioctl_data->reqcap &= ~(IFCAP_TXTLS_RTLMT |
+ IFCAP_TXRTLMT);
+ }
+
mask = drv_ioctl_data->reqcap ^ if_getcapenable(ifp);
if (mask & IFCAP_TXCSUM) {
@@ -3602,31 +3714,11 @@ siocsifcap_driver:
}
VLAN_CAPABILITIES(ifp);
- /* turn off LRO means also turn of HW LRO - if it's on */
- if (mask & IFCAP_LRO) {
- int was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- bool need_restart = false;
+ /* hw_lro and IFCAP_LRO are divorsed, only toggle sw LRO. */
+ if (mask & IFCAP_LRO)
if_togglecapenable(ifp, IFCAP_LRO);
- /* figure out if updating HW LRO is needed */
- if (!(if_getcapenable(ifp) & IFCAP_LRO)) {
- if (priv->params.hw_lro_en) {
- priv->params.hw_lro_en = false;
- need_restart = true;
- }
- } else {
- if (priv->params.hw_lro_en == false &&
- priv->params_ethtool.hw_lro != 0) {
- priv->params.hw_lro_en = true;
- need_restart = true;
- }
- }
- if (was_opened && need_restart) {
- mlx5e_close_locked(ifp);
- mlx5e_open_locked(ifp);
- }
- }
if (mask & IFCAP_HWRXTSTMP) {
if_togglecapenable(ifp, IFCAP_HWRXTSTMP);
if (if_getcapenable(ifp) & IFCAP_HWRXTSTMP) {
@@ -3642,6 +3734,18 @@ siocsifcap_driver:
if_togglecapenable2(ifp, IFCAP2_BIT(IFCAP2_RXTLS4));
if ((mask & IFCAP2_BIT(IFCAP2_RXTLS6)) != 0)
if_togglecapenable2(ifp, IFCAP2_BIT(IFCAP2_RXTLS6));
+#ifdef IPSEC_OFFLOAD
+ if ((mask & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) != 0) {
+ bool was_enabled = (if_getcapenable2(ifp) &
+ IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) != 0;
+ mlx5e_close_locked(ifp);
+ if (was_enabled)
+ ipsec_accel_on_ifdown(priv->ifp);
+ if_togglecapenable2(ifp,
+ IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD));
+ mlx5e_open_locked(ifp);
+ }
+#endif
out:
PRIV_UNLOCK(priv);
break;
@@ -3674,6 +3778,11 @@ out:
/* Check if module is present before doing an access */
module_status = mlx5_query_module_status(priv->mdev, module_num);
if (module_status != MLX5_MODULE_STATUS_PLUGGED_ENABLED) {
+ if (bootverbose)
+ mlx5_en_err(ifp,
+ "Query module %d status: not plugged (%d), "
+ "eeprom reading is not supported\n",
+ module_num, module_status);
error = EINVAL;
goto err_i2c;
}
@@ -4485,10 +4594,6 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
M_MLX5EN, mlx5_dev_domainset(mdev), M_WAITOK | M_ZERO);
ifp = priv->ifp = if_alloc_dev(IFT_ETHER, mdev->pdev->dev.bsddev);
- if (ifp == NULL) {
- mlx5_core_err(mdev, "if_alloc() failed\n");
- goto err_free_priv;
- }
/* setup all static fields */
if (mlx5e_priv_static_init(priv, mdev, mdev->priv.eq_table.num_comp_vectors)) {
mlx5_core_err(mdev, "mlx5e_priv_static_init() failed\n");
@@ -4517,13 +4622,21 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
if_setcapabilitiesbit(ifp, IFCAP_TSO | IFCAP_VLAN_HWTSO, 0);
if_setcapabilitiesbit(ifp, IFCAP_HWSTATS | IFCAP_HWRXTSTMP, 0);
if_setcapabilitiesbit(ifp, IFCAP_MEXTPG, 0);
- if_setcapabilitiesbit(ifp, IFCAP_TXTLS4 | IFCAP_TXTLS6, 0);
-#ifdef RATELIMIT
- if_setcapabilitiesbit(ifp, IFCAP_TXRTLMT | IFCAP_TXTLS_RTLMT, 0);
-#endif
+ if (mlx5e_is_tlstx_capable(mdev))
+ if_setcapabilitiesbit(ifp, IFCAP_TXTLS4 | IFCAP_TXTLS6, 0);
+ if (mlx5e_is_tlsrx_capable(mdev))
+ if_setcapabilities2bit(ifp, IFCAP2_BIT(IFCAP2_RXTLS4) |
+ IFCAP2_BIT(IFCAP2_RXTLS6), 0);
+ if (mlx5e_is_ratelimit_capable(mdev)) {
+ if_setcapabilitiesbit(ifp, IFCAP_TXRTLMT, 0);
+ if (mlx5e_is_tlstx_capable(mdev))
+ if_setcapabilitiesbit(ifp, IFCAP_TXTLS_RTLMT, 0);
+ }
if_setcapabilitiesbit(ifp, IFCAP_VXLAN_HWCSUM | IFCAP_VXLAN_HWTSO, 0);
- if_setcapabilities2bit(ifp, IFCAP2_BIT(IFCAP2_RXTLS4) |
- IFCAP2_BIT(IFCAP2_RXTLS6), 0);
+ if (mlx5e_is_ipsec_capable(mdev))
+ if_setcapabilities2bit(ifp, IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD),
+ 0);
+
if_setsndtagallocfn(ifp, mlx5e_snd_tag_alloc);
#ifdef RATELIMIT
if_setratelimitqueryfn(ifp, mlx5e_ratelimit_query);
@@ -4623,10 +4736,18 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
goto err_rl_init;
}
+ if ((if_getcapenable2(ifp) & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) != 0) {
+ err = mlx5e_ipsec_init(priv);
+ if (err) {
+ if_printf(ifp, "%s: mlx5e_tls_init failed\n", __func__);
+ goto err_tls_init;
+ }
+ }
+
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
if_printf(ifp, "%s: mlx5e_open_drop_rq failed (%d)\n", __func__, err);
- goto err_tls_init;
+ goto err_ipsec_init;
}
err = mlx5e_open_rqts(priv);
@@ -4802,6 +4923,9 @@ err_open_rqts:
err_open_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
+err_ipsec_init:
+ mlx5e_ipsec_cleanup(priv);
+
err_tls_init:
mlx5e_tls_cleanup(priv);
@@ -4828,8 +4952,6 @@ err_free_sysctl:
err_free_ifp:
if_free(ifp);
-
-err_free_priv:
free(priv, M_MLX5EN);
return (NULL);
}
@@ -4848,7 +4970,12 @@ mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
#ifdef RATELIMIT
/*
- * The kernel can have reference(s) via the m_snd_tag's into
+ * Tell the TCP ratelimit code to release the rate-sets attached
+ * to our ifnet.
+ */
+ tcp_rl_release_ifnet(ifp);
+ /*
+ * The kernel can still have reference(s) via the m_snd_tag's into
* the ratelimit channels, and these must go away before
* detaching:
*/
@@ -4910,10 +5037,14 @@ mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
ether_ifdetach(ifp);
mlx5e_tls_rx_cleanup(priv);
+#ifdef IPSEC_OFFLOAD
+ ipsec_accel_on_ifdown(priv->ifp);
+#endif
mlx5e_close_flow_tables(priv);
mlx5e_close_tirs(priv);
mlx5e_close_rqts(priv);
mlx5e_close_drop_rq(&priv->drop_rq);
+ mlx5e_ipsec_cleanup(priv);
mlx5e_tls_cleanup(priv);
mlx5e_rl_cleanup(priv);
@@ -5028,6 +5159,7 @@ mlx5e_cleanup(void)
module_init_order(mlx5e_init, SI_ORDER_SIXTH);
module_exit_order(mlx5e_cleanup, SI_ORDER_SIXTH);
+MODULE_DEPEND(mlx5en, ipsec, 1, 1, 1);
MODULE_DEPEND(mlx5en, linuxkpi, 1, 1, 1);
MODULE_DEPEND(mlx5en, mlx5, 1, 1, 1);
MODULE_VERSION(mlx5en, 1);
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c b/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
index b52dee102a3d..eb569488631a 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
@@ -27,7 +27,9 @@
#include "opt_ratelimit.h"
#include <dev/mlx5/mlx5_en/en.h>
+#include <netinet/ip_var.h>
#include <machine/in_cksum.h>
+#include <dev/mlx5/mlx5_accel/ipsec.h>
static inline int
mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
@@ -43,25 +45,21 @@ mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
if (rq->mbuf[ix].mbuf != NULL)
return (0);
- mb_head = mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
- MLX5E_MAX_RX_BYTES);
+ mb_head = mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rq->wqe_sz);
if (unlikely(mb == NULL))
return (-ENOMEM);
- mb->m_len = MLX5E_MAX_RX_BYTES;
- mb->m_pkthdr.len = MLX5E_MAX_RX_BYTES;
+ mb->m_len = rq->wqe_sz;
+ mb->m_pkthdr.len = rq->wqe_sz;
for (i = 1; i < rq->nsegs; i++) {
- if (mb_head->m_pkthdr.len >= rq->wqe_sz)
- break;
- mb = mb->m_next = m_getjcl(M_NOWAIT, MT_DATA, 0,
- MLX5E_MAX_RX_BYTES);
+ mb = mb->m_next = m_getjcl(M_NOWAIT, MT_DATA, 0, rq->wqe_sz);
if (unlikely(mb == NULL)) {
m_freem(mb_head);
return (-ENOMEM);
}
- mb->m_len = MLX5E_MAX_RX_BYTES;
- mb_head->m_pkthdr.len += MLX5E_MAX_RX_BYTES;
+ mb->m_len = rq->wqe_sz;
+ mb_head->m_pkthdr.len += rq->wqe_sz;
}
/* rewind to first mbuf in chain */
mb = mb_head;
@@ -69,6 +67,9 @@ mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
/* get IP header aligned */
m_adj(mb, MLX5E_NET_IP_ALIGN);
+ err = mlx5_accel_ipsec_rx_tag_add(rq->ifp, &rq->mbuf[ix]);
+ if (err)
+ goto err_free_mbuf;
err = -bus_dmamap_load_mbuf_sg(rq->dma_tag, rq->mbuf[ix].dma_map,
mb, segs, &nsegs, BUS_DMA_NOWAIT);
if (err != 0)
@@ -124,6 +125,27 @@ mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
mlx5_wq_ll_update_db_record(&rq->wq);
}
+static uint32_t
+csum_reduce(uint32_t val)
+{
+ while (val > 0xffff)
+ val = (val >> 16) + (val & 0xffff);
+ return (val);
+}
+
+static u_short
+csum_buf(uint32_t val, void *buf, int len)
+{
+ u_short x;
+
+ MPASS(len % 2 == 0);
+ for (int i = 0; i < len; i += 2) {
+ bcopy((char *)buf + i, &x, 2);
+ val = csum_reduce(val + x);
+ }
+ return (val);
+}
+
static void
mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe)
{
@@ -135,6 +157,7 @@ mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe)
struct ip *ip4 = NULL;
struct tcphdr *th;
uint32_t *ts_ptr;
+ uint32_t tcp_csum;
uint8_t l4_hdr_type;
int tcp_ack;
@@ -164,10 +187,10 @@ mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe)
ts_ptr = (uint32_t *)(th + 1);
if (get_cqe_lro_tcppsh(cqe))
- th->th_flags |= TH_PUSH;
+ tcp_set_flags(th, tcp_get_flags(th) | TH_PUSH);
if (tcp_ack) {
- th->th_flags |= TH_ACK;
+ tcp_set_flags(th, tcp_get_flags(th) | TH_ACK);
th->th_ack = cqe->lro_ack_seq_num;
th->th_win = cqe->lro_tcp_win;
@@ -183,29 +206,55 @@ mlx5e_lro_update_hdr(struct mbuf *mb, struct mlx5_cqe64 *cqe)
* +--------+--------+--------+--------+
*/
if (get_cqe_lro_timestamp_valid(cqe) &&
- (__predict_true(*ts_ptr) == ntohl(TCPOPT_NOP << 24 |
+ (__predict_true(*ts_ptr == ntohl(TCPOPT_NOP << 24 |
TCPOPT_NOP << 16 | TCPOPT_TIMESTAMP << 8 |
- TCPOLEN_TIMESTAMP))) {
+ TCPOLEN_TIMESTAMP)))) {
/*
* cqe->timestamp is 64bit long.
* [0-31] - timestamp.
* [32-64] - timestamp echo replay.
*/
- ts_ptr[1] = *(uint32_t *)&cqe->timestamp;
ts_ptr[2] = *((uint32_t *)&cqe->timestamp + 1);
}
}
if (ip4) {
+ struct ipovly io;
+
ip4->ip_ttl = cqe->lro_min_ttl;
ip4->ip_len = cpu_to_be16(tot_len);
ip4->ip_sum = 0;
- ip4->ip_sum = in_cksum(mb, ip4->ip_hl << 2);
+ ip4->ip_sum = in_cksum_skip(mb, (ip4->ip_hl << 2) +
+ ETHER_HDR_LEN, ETHER_HDR_LEN);
+
+ /* TCP checksum: data */
+ tcp_csum = cqe->check_sum;
+
+ /* TCP checksum: IP pseudoheader */
+ bzero(io.ih_x1, sizeof(io.ih_x1));
+ io.ih_pr = IPPROTO_TCP;
+ io.ih_len = htons(ntohs(ip4->ip_len) - sizeof(*ip4));
+ io.ih_src = ip4->ip_src;
+ io.ih_dst = ip4->ip_dst;
+ tcp_csum = csum_buf(tcp_csum, &io, sizeof(io));
+
+ /* TCP checksum: TCP header */
+ th->th_sum = 0;
+ tcp_csum = csum_buf(tcp_csum, th, th->th_off * 4);
+ th->th_sum = ~tcp_csum & 0xffff;
} else {
ip6->ip6_hlim = cqe->lro_min_ttl;
ip6->ip6_plen = cpu_to_be16(tot_len -
sizeof(struct ip6_hdr));
+
+ /* TCP checksum */
+ th->th_sum = 0;
+ tcp_csum = ~in6_cksum_partial_l2(mb, IPPROTO_TCP,
+ sizeof(struct ether_header),
+ sizeof(struct ether_header) + sizeof(struct ip6_hdr),
+ tot_len - sizeof(struct ip6_hdr), th->th_off * 4) & 0xffff;
+ tcp_csum = csum_reduce(tcp_csum + cqe->check_sum);
+ th->th_sum = ~tcp_csum & 0xffff;
}
- /* TODO: handle tcp checksum */
}
static uint64_t
@@ -273,9 +322,8 @@ mlx5e_mbuf_tstmp(struct mlx5e_priv *priv, uint64_t hw_tstmp)
}
static inline void
-mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe,
- struct mlx5e_rq *rq, struct mbuf *mb,
- u32 cqe_bcnt)
+mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
+ struct mbuf *mb, struct mlx5e_rq_mbuf *mr, u32 cqe_bcnt)
{
if_t ifp = rq->ifp;
struct mlx5e_channel *c;
@@ -418,6 +466,8 @@ mlx5e_build_rx_mbuf(struct mlx5_cqe64 *cqe,
default:
break;
}
+
+ mlx5e_accel_ipsec_handle_rx(ifp, mb, cqe, mr);
}
static inline void
@@ -563,7 +613,9 @@ mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
("Filter returned %d!\n", rv));
}
}
- if ((MHLEN - MLX5E_NET_IP_ALIGN) >= byte_cnt &&
+ if (!mlx5e_accel_ipsec_flow(cqe) /* tag is already assigned
+ to rq->mbuf */ &&
+ MHLEN - MLX5E_NET_IP_ALIGN >= byte_cnt &&
(mb = m_gethdr(M_NOWAIT, MT_DATA)) != NULL) {
/* set maximum mbuf length */
mb->m_len = MHLEN - MLX5E_NET_IP_ALIGN;
@@ -580,7 +632,8 @@ mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
rq->mbuf[wqe_counter].dma_map);
}
rx_common:
- mlx5e_build_rx_mbuf(cqe, rq, mb, byte_cnt);
+ mlx5e_build_rx_mbuf(cqe, rq, mb, &rq->mbuf[wqe_counter],
+ byte_cnt);
rq->stats.bytes += byte_cnt;
rq->stats.packets++;
#ifdef NUMA
@@ -644,6 +697,9 @@ mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe __unused)
mtx_unlock(&c->iq.lock);
mtx_lock(&rq->mtx);
+ if (rq->enabled == 0)
+ goto out;
+ rq->processing++;
/*
* Polling the entire CQ without posting new WQEs results in
@@ -664,6 +720,8 @@ mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe __unused)
net_dim(&rq->dim, rq->stats.packets, rq->stats.bytes);
mlx5e_cq_arm(&rq->cq, MLX5_GET_DOORBELL_LOCK(&rq->channel->priv->doorbell_lock));
tcp_lro_flush_all(&rq->lro);
+ rq->processing--;
+out:
mtx_unlock(&rq->mtx);
for (int j = 0; j != MLX5E_MAX_TX_NUM_TC; j++) {
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c b/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
index 1f2820abc30e..14f797dc1b1f 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
@@ -30,6 +30,7 @@
#include <dev/mlx5/mlx5_en/en.h>
#include <machine/atomic.h>
+#include <dev/mlx5/mlx5_accel/ipsec.h>
static inline bool
mlx5e_do_send_cqe_inline(struct mlx5e_sq *sq)
@@ -744,8 +745,10 @@ top:
/* get pointer to mbuf */
mb = *mbp;
+ mlx5e_accel_ipsec_handle_tx(mb, wqe);
+
/* Send a copy of the frame to the BPF listener, if any */
- if (ifp != NULL && if_getbpf(ifp) != NULL)
+ if (ifp != NULL)
ETHER_BPF_MTAP(ifp, mb);
if (mb->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) {
diff --git a/sys/dev/mlx5/mlx5_ib/mlx5_ib.h b/sys/dev/mlx5/mlx5_ib/mlx5_ib.h
index 5dbde72d0c5b..c2c4bc5d2791 100644
--- a/sys/dev/mlx5/mlx5_ib/mlx5_ib.h
+++ b/sys/dev/mlx5/mlx5_ib/mlx5_ib.h
@@ -170,7 +170,7 @@ struct mlx5_ib_flow_handler {
struct list_head list;
struct ib_flow ibflow;
struct mlx5_ib_flow_prio *prio;
- struct mlx5_flow_rule *rule;
+ struct mlx5_flow_handle *rule;
};
struct mlx5_ib_flow_db {
diff --git a/sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c b/sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c
index 13908f75669a..81beadd263f7 100644
--- a/sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c
+++ b/sys/dev/mlx5/mlx5_ib/mlx5_ib_cq.c
@@ -201,7 +201,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
case MLX5_CQE_RESP_WR_IMM:
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->imm_inval_pkey;
+ wc->ex.imm_data = cqe->immediate;
break;
case MLX5_CQE_RESP_SEND:
wc->opcode = IB_WC_RECV;
@@ -213,12 +213,12 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
case MLX5_CQE_RESP_SEND_IMM:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cqe->imm_inval_pkey;
+ wc->ex.imm_data = cqe->immediate;
break;
case MLX5_CQE_RESP_SEND_INV:
wc->opcode = IB_WC_RECV;
wc->wc_flags = IB_WC_WITH_INVALIDATE;
- wc->ex.invalidate_rkey = be32_to_cpu(cqe->imm_inval_pkey);
+ wc->ex.invalidate_rkey = be32_to_cpu(cqe->inval_rkey);
break;
}
wc->src_qp = be32_to_cpu(cqe->flags_rqpn) & 0xffffff;
@@ -226,7 +226,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
wc->wc_flags |= g ? IB_WC_GRH : 0;
if (unlikely(is_qp1(qp->ibqp.qp_type))) {
- u16 pkey = be32_to_cpu(cqe->imm_inval_pkey) & 0xffff;
+ u16 pkey = be32_to_cpu(cqe->pkey) & 0xffff;
ib_find_cached_pkey(&dev->ib_dev, qp->port, pkey,
&wc->pkey_index);
diff --git a/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c b/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c
index 027f8ded3faf..5825cee87d9b 100644
--- a/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c
+++ b/sys/dev/mlx5/mlx5_ib/mlx5_ib_main.c
@@ -2072,13 +2072,13 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
mutex_lock(&dev->flow_db.lock);
list_for_each_entry_safe(iter, tmp, &handler->list, list) {
- mlx5_del_flow_rule(&iter->rule);
+ mlx5_del_flow_rules(&iter->rule);
put_flow_table(dev, iter->prio, true);
list_del(&iter->list);
kfree(iter);
}
- mlx5_del_flow_rule(&handler->rule);
+ mlx5_del_flow_rules(&handler->rule);
put_flow_table(dev, handler->prio, true);
mutex_unlock(&dev->flow_db.lock);
@@ -2107,6 +2107,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
enum flow_table_type ft_type)
{
bool dont_trap = flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP;
+ struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_ib_flow_prio *prio;
struct mlx5_flow_table *ft;
@@ -2155,10 +2156,11 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
ft = prio->flow_table;
if (!ft) {
- ft = mlx5_create_auto_grouped_flow_table(ns, priority, "bypass",
- num_entries,
- num_groups,
- 0);
+ ft_attr.prio = priority;
+ ft_attr.max_fte = num_entries;
+ ft_attr.autogroup.max_num_groups = num_groups;
+
+ ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
if (!IS_ERR(ft)) {
prio->refcount = 0;
@@ -2181,10 +2183,8 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
struct mlx5_flow_spec *spec;
const void *ib_flow = (const void *)flow_attr + sizeof(*flow_attr);
unsigned int spec_index;
- struct mlx5_flow_act flow_act = {
- .actions = MLX5_FLOW_ACT_ACTIONS_FLOW_TAG,
- .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
- };
+ struct mlx5_flow_act flow_act = {};
+
u32 action;
int err = 0;
@@ -2198,6 +2198,9 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
goto free;
}
+ spec->flow_context.flags = FLOW_CONTEXT_HAS_TAG;
+ spec->flow_context.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
+
INIT_LIST_HEAD(&handler->list);
for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
@@ -2210,13 +2213,9 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
}
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
- action = dst ? MLX5_FLOW_RULE_FWD_ACTION_DEST : 0;
- handler->rule = mlx5_add_flow_rule(ft, spec->match_criteria_enable,
- spec->match_criteria,
- spec->match_value,
- action,
- &flow_act,
- dst);
+ action = dst ? MLX5_FLOW_CONTEXT_ACTION_FWD_DEST : 0;
+ flow_act.action = action;
+ handler->rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, 1);
if (IS_ERR(handler->rule)) {
err = PTR_ERR(handler->rule);
@@ -2247,7 +2246,7 @@ static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *de
handler_dst = create_flow_rule(dev, ft_prio,
flow_attr, dst);
if (IS_ERR(handler_dst)) {
- mlx5_del_flow_rule(&handler->rule);
+ mlx5_del_flow_rules(&handler->rule);
ft_prio->refcount--;
kfree(handler);
handler = handler_dst;
@@ -2310,7 +2309,7 @@ static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *de
&leftovers_specs[LEFTOVERS_UC].flow_attr,
dst);
if (IS_ERR(handler_ucast)) {
- mlx5_del_flow_rule(&handler->rule);
+ mlx5_del_flow_rules(&handler->rule);
ft_prio->refcount--;
kfree(handler);
handler = handler_ucast;
@@ -2353,7 +2352,7 @@ static struct mlx5_ib_flow_handler *create_sniffer_rule(struct mlx5_ib_dev *dev,
return handler_rx;
err_tx:
- mlx5_del_flow_rule(&handler_rx->rule);
+ mlx5_del_flow_rules(&handler_rx->rule);
ft_rx->refcount--;
kfree(handler_rx);
err:
diff --git a/sys/dev/mlx5/mlx5_ib/mlx5_ib_mem.c b/sys/dev/mlx5/mlx5_ib/mlx5_ib_mem.c
index bd06e531531b..9428e6ece3a4 100644
--- a/sys/dev/mlx5/mlx5_ib/mlx5_ib_mem.c
+++ b/sys/dev/mlx5/mlx5_ib/mlx5_ib_mem.c
@@ -78,10 +78,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
}
if (i) {
- m = min_t(unsigned long, ilog2(roundup_pow_of_two(i)), m);
+ m = min_t(unsigned long, order_base_2(i), m);
if (order)
- *order = ilog2(roundup_pow_of_two(i) >> m);
+ *order = order_base_2(i) - m;
*ncont = DIV_ROUND_UP(i, (1 << m));
} else {
diff --git a/sys/dev/mlx5/mlx5_ifc.h b/sys/dev/mlx5/mlx5_ifc.h
index a10cb60dbfdd..3f75acd250da 100644
--- a/sys/dev/mlx5/mlx5_ifc.h
+++ b/sys/dev/mlx5/mlx5_ifc.h
@@ -63,7 +63,7 @@ enum {
MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
- MLX5_EVENT_TYPE_CODING_GENERAL_OBJ_EVENT = 0x27,
+ MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
};
enum {
@@ -323,7 +323,12 @@ enum {
};
enum {
+ MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_IPSEC = 1ULL << 0x13,
+};
+
+enum {
MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY = 0xc,
+ MLX5_GENERAL_OBJECT_TYPES_IPSEC = 0x13,
};
enum {
@@ -336,7 +341,8 @@ enum {
};
enum {
- MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_DEK = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_TLS = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_TYPE_IPSEC = 0x2,
};
struct mlx5_ifc_flow_table_fields_supported_bits {
@@ -463,39 +469,70 @@ struct mlx5_ifc_eth_discard_cntrs_grp_bits {
u8 reserved_at_340[0x440];
};
+
struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1];
- u8 flow_tag[0x1];
+ u8 reserved_at_1[0x1];
u8 flow_counter[0x1];
u8 flow_modify_en[0x1];
u8 modify_root[0x1];
- u8 identified_miss_table[0x1];
+ u8 identified_miss_table_mode[0x1];
u8 flow_table_modify[0x1];
- u8 encap[0x1];
+ u8 reformat[0x1];
u8 decap[0x1];
- u8 reset_root_to_default[0x1];
- u8 reserved_at_a[0x16];
-
- u8 reserved_at_20[0x2];
+ u8 reserved_at_9[0x1];
+ u8 pop_vlan[0x1];
+ u8 push_vlan[0x1];
+ u8 reserved_at_c[0x1];
+ u8 pop_vlan_2[0x1];
+ u8 push_vlan_2[0x1];
+ u8 reformat_and_vlan_action[0x1];
+ u8 reserved_at_10[0x1];
+ u8 sw_owner[0x1];
+ u8 reformat_l3_tunnel_to_l2[0x1];
+ u8 reformat_l2_to_l3_tunnel[0x1];
+ u8 reformat_and_modify_action[0x1];
+ u8 ignore_flow_level[0x1];
+ u8 reserved_at_16[0x1];
+ u8 table_miss_action_domain[0x1];
+ u8 termination_table[0x1];
+ u8 reformat_and_fwd_to_table[0x1];
+ u8 reserved_at_1a[0x2];
+ u8 ipsec_encrypt[0x1];
+ u8 ipsec_decrypt[0x1];
+ u8 sw_owner_v2[0x1];
+ u8 reserved_at_1f[0x1];
+ u8 termination_table_raw_traffic[0x1];
+ u8 reserved_at_21[0x1];
u8 log_max_ft_size[0x6];
u8 log_max_modify_header_context[0x8];
u8 max_modify_header_actions[0x8];
u8 max_ft_level[0x8];
- u8 reserved_at_40[0x20];
-
- u8 reserved_at_60[0x18];
+ u8 reformat_add_esp_trasport[0x1];
+ u8 reformat_l2_to_l3_esp_tunnel[0x1];
+ u8 reformat_add_esp_transport_over_udp[0x1];
+ u8 reformat_del_esp_trasport[0x1];
+ u8 reformat_l3_esp_tunnel_to_l2[0x1];
+ u8 reformat_del_esp_transport_over_udp[0x1];
+ u8 execute_aso[0x1];
+ u8 reserved_at_47[0x19];
+ u8 reserved_at_60[0x2];
+ u8 reformat_insert[0x1];
+ u8 reformat_remove[0x1];
+ u8 macsec_encrypt[0x1];
+ u8 macsec_decrypt[0x1];
+ u8 reserved_at_66[0x2];
+ u8 reformat_add_macsec[0x1];
+ u8 reformat_remove_macsec[0x1];
+ u8 reserved_at_6a[0xe];
u8 log_max_ft_num[0x8];
-
u8 reserved_at_80[0x10];
u8 log_max_flow_counter[0x8];
u8 log_max_destination[0x8];
-
u8 reserved_at_a0[0x18];
u8 log_max_flow[0x8];
-
u8 reserved_at_c0[0x40];
-
struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
struct mlx5_ifc_flow_table_fields_supported_bits ft_field_bitmask_support;
@@ -518,20 +555,15 @@ struct mlx5_ifc_flow_counter_list_bits {
u8 reserved_1[0x20];
};
-enum {
- MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0x0,
- MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 0x1,
- MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 0x2,
- MLX5_FLOW_CONTEXT_DEST_TYPE_QP = 0x3,
-};
-
struct mlx5_ifc_dest_format_struct_bits {
- u8 destination_type[0x8];
- u8 destination_id[0x18];
+ u8 destination_type[0x8];
+ u8 destination_id[0x18];
- u8 reserved_0[0x8];
- u8 destination_table_type[0x8];
- u8 reserved_at_1[0x10];
+ u8 destination_eswitch_owner_vhca_id_valid[0x1];
+ u8 packet_reformat[0x1];
+ u8 reserved_at_22[0x6];
+ u8 destination_table_type[0x8];
+ u8 destination_eswitch_owner_vhca_id[0x10];
};
struct mlx5_ifc_ipv4_layout_bits {
@@ -585,11 +617,25 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
};
+struct mlx5_ifc_nvgre_key_bits {
+ u8 hi[0x18];
+ u8 lo[0x8];
+};
+
+union mlx5_ifc_gre_key_bits {
+ struct mlx5_ifc_nvgre_key_bits nvgre;
+ u8 key[0x20];
+};
+
struct mlx5_ifc_fte_match_set_misc_bits {
- u8 reserved_0[0x8];
+ u8 gre_c_present[0x1];
+ u8 reserved_at_1[0x1];
+ u8 gre_k_present[0x1];
+ u8 gre_s_present[0x1];
+ u8 source_vhca_port[0x4];
u8 source_sqn[0x18];
- u8 reserved_1[0x10];
+ u8 source_eswitch_owner_vhca_id[0x10];
u8 source_port[0x10];
u8 outer_second_prio[0x3];
@@ -599,35 +645,163 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 inner_second_cfi[0x1];
u8 inner_second_vid[0xc];
- u8 outer_second_vlan_tag[0x1];
- u8 inner_second_vlan_tag[0x1];
- u8 reserved_2[0xe];
+ u8 outer_second_cvlan_tag[0x1];
+ u8 inner_second_cvlan_tag[0x1];
+ u8 outer_second_svlan_tag[0x1];
+ u8 inner_second_svlan_tag[0x1];
+ u8 reserved_at_64[0xc];
u8 gre_protocol[0x10];
- u8 gre_key_h[0x18];
- u8 gre_key_l[0x8];
+ union mlx5_ifc_gre_key_bits gre_key;
u8 vxlan_vni[0x18];
- u8 reserved_3[0x8];
+ u8 bth_opcode[0x8];
u8 geneve_vni[0x18];
- u8 reserved4[0x7];
+ u8 reserved_at_d8[0x6];
+ u8 geneve_tlv_option_0_exist[0x1];
u8 geneve_oam[0x1];
- u8 reserved_5[0xc];
+ u8 reserved_at_e0[0xc];
u8 outer_ipv6_flow_label[0x14];
- u8 reserved_6[0xc];
+ u8 reserved_at_100[0xc];
u8 inner_ipv6_flow_label[0x14];
- u8 reserved_7[0xa];
+ u8 reserved_at_120[0xa];
u8 geneve_opt_len[0x6];
u8 geneve_protocol_type[0x10];
- u8 reserved_8[0x8];
+ u8 reserved_at_140[0x8];
u8 bth_dst_qp[0x18];
+ u8 inner_esp_spi[0x20];
+ u8 outer_esp_spi[0x20];
+ u8 reserved_at_1a0[0x60];
+};
+
+struct mlx5_ifc_fte_match_mpls_bits {
+ u8 mpls_label[0x14];
+ u8 mpls_exp[0x3];
+ u8 mpls_s_bos[0x1];
+ u8 mpls_ttl[0x8];
+};
+
+struct mlx5_ifc_fte_match_set_misc2_bits {
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls;
+
+ struct mlx5_ifc_fte_match_mpls_bits inner_first_mpls;
+
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_gre;
+
+ struct mlx5_ifc_fte_match_mpls_bits outer_first_mpls_over_udp;
- u8 reserved_9[0xa0];
+ u8 metadata_reg_c_7[0x20];
+
+ u8 metadata_reg_c_6[0x20];
+
+ u8 metadata_reg_c_5[0x20];
+
+ u8 metadata_reg_c_4[0x20];
+
+ u8 metadata_reg_c_3[0x20];
+
+ u8 metadata_reg_c_2[0x20];
+
+ u8 metadata_reg_c_1[0x20];
+
+ u8 metadata_reg_c_0[0x20];
+
+ u8 metadata_reg_a[0x20];
+
+ u8 reserved_at_1a0[0x8];
+
+ u8 macsec_syndrome[0x8];
+ u8 ipsec_syndrome[0x8];
+ u8 reserved_at_1b8[0x8];
+
+ u8 reserved_at_1c0[0x40];
+};
+
+struct mlx5_ifc_fte_match_set_misc3_bits {
+ u8 inner_tcp_seq_num[0x20];
+
+ u8 outer_tcp_seq_num[0x20];
+
+ u8 inner_tcp_ack_num[0x20];
+
+ u8 outer_tcp_ack_num[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 outer_vxlan_gpe_vni[0x18];
+
+ u8 outer_vxlan_gpe_next_protocol[0x8];
+ u8 outer_vxlan_gpe_flags[0x8];
+ u8 reserved_at_b0[0x10];
+
+ u8 icmp_header_data[0x20];
+
+ u8 icmpv6_header_data[0x20];
+
+ u8 icmp_type[0x8];
+ u8 icmp_code[0x8];
+ u8 icmpv6_type[0x8];
+ u8 icmpv6_code[0x8];
+
+ u8 geneve_tlv_option_0_data[0x20];
+
+ u8 gtpu_teid[0x20];
+
+ u8 gtpu_msg_type[0x8];
+ u8 gtpu_msg_flags[0x8];
+ u8 reserved_at_170[0x10];
+
+ u8 gtpu_dw_2[0x20];
+
+ u8 gtpu_first_ext_dw_0[0x20];
+
+ u8 gtpu_dw_0[0x20];
+
+ u8 reserved_at_1e0[0x20];
+};
+
+struct mlx5_ifc_fte_match_set_misc4_bits {
+ u8 prog_sample_field_value_0[0x20];
+
+ u8 prog_sample_field_id_0[0x20];
+
+ u8 prog_sample_field_value_1[0x20];
+
+ u8 prog_sample_field_id_1[0x20];
+
+ u8 prog_sample_field_value_2[0x20];
+
+ u8 prog_sample_field_id_2[0x20];
+
+ u8 prog_sample_field_value_3[0x20];
+
+ u8 prog_sample_field_id_3[0x20];
+
+ u8 reserved_at_100[0x100];
+};
+
+struct mlx5_ifc_fte_match_set_misc5_bits {
+ u8 macsec_tag_0[0x20];
+
+ u8 macsec_tag_1[0x20];
+
+ u8 macsec_tag_2[0x20];
+
+ u8 macsec_tag_3[0x20];
+
+ u8 tunnel_header_0[0x20];
+
+ u8 tunnel_header_1[0x20];
+
+ u8 tunnel_header_2[0x20];
+
+ u8 tunnel_header_3[0x20];
+
+ u8 reserved_at_100[0x100];
};
struct mlx5_ifc_cmd_pas_bits {
@@ -863,6 +1037,20 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 reserved_1[0x7200];
};
+struct mlx5_ifc_port_selection_cap_bits {
+ u8 reserved_at_0[0x10];
+ u8 port_select_flow_table[0x1];
+ u8 reserved_at_11[0x1];
+ u8 port_select_flow_table_bypass[0x1];
+ u8 reserved_at_13[0xd];
+
+ u8 reserved_at_20[0x1e0];
+
+ struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_port_selection;
+
+ u8 reserved_at_400[0x7c00];
+};
+
struct mlx5_ifc_pddr_module_info_bits {
u8 cable_technology[0x8];
u8 cable_breakout[0x8];
@@ -1154,7 +1342,15 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_0[0x20];
u8 hca_cap_2[0x1];
- u8 reserved_at_21[0x1f];
+ u8 create_lag_when_not_master_up[0x1];
+ u8 dtor[0x1];
+ u8 event_on_vhca_state_teardown_request[0x1];
+ u8 event_on_vhca_state_in_use[0x1];
+ u8 event_on_vhca_state_active[0x1];
+ u8 event_on_vhca_state_allocated[0x1];
+ u8 event_on_vhca_state_invalid[0x1];
+ u8 reserved_at_28[0x8];
+ u8 vhca_id[0x10];
u8 reserved_at_40[0x40];
@@ -1404,7 +1600,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_460[0x3];
u8 log_max_uctx[0x5];
- u8 reserved_at_468[0x3];
+ u8 reserved_at_468[0x2];
+ u8 ipsec_offload[0x1];
u8 log_max_umem[0x5];
u8 max_num_eqs[0x10];
@@ -1488,11 +1685,27 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_260[0x5a0];
};
-enum mlx5_flow_destination_type {
- MLX5_FLOW_DESTINATION_TYPE_VPORT = 0x0,
- MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
- MLX5_FLOW_DESTINATION_TYPE_TIR = 0x2,
- MLX5_FLOW_DESTINATION_TYPE_TABLE_TYPE = 0xA,
+enum mlx5_ifc_flow_destination_type {
+ MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT = 0x0,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE = 0x1,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_TIR = 0x2,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER = 0x6,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK = 0x8,
+ MLX5_IFC_FLOW_DESTINATION_TYPE_TABLE_TYPE = 0xA,
+};
+
+enum mlx5_flow_table_miss_action {
+ MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ MLX5_FLOW_TABLE_MISS_ACTION_FWD,
+ MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
+};
+
+struct mlx5_ifc_extended_dest_format_bits {
+ struct mlx5_ifc_dest_format_struct_bits destination_entry;
+
+ u8 packet_reformat_id[0x20];
+
+ u8 reserved_at_60[0x20];
};
union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
@@ -1502,13 +1715,21 @@ union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
};
struct mlx5_ifc_fte_match_param_bits {
- struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
+
+ struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
- struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
+ struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
- struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
+ struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
- u8 reserved_0[0xa00];
+ struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
+
+ struct mlx5_ifc_fte_match_set_misc4_bits misc_parameters_4;
+
+ struct mlx5_ifc_fte_match_set_misc5_bits misc_parameters_5;
+
+ u8 reserved_at_e00[0x200];
};
enum {
@@ -2310,43 +2531,85 @@ struct mlx5_ifc_rdbc_bits {
u8 atomic_resp[32][0x8];
};
+struct mlx5_ifc_vlan_bits {
+ u8 ethtype[0x10];
+ u8 prio[0x3];
+ u8 cfi[0x1];
+ u8 vid[0xc];
+};
+
+enum {
+ MLX5_FLOW_METER_COLOR_RED = 0x0,
+ MLX5_FLOW_METER_COLOR_YELLOW = 0x1,
+ MLX5_FLOW_METER_COLOR_GREEN = 0x2,
+ MLX5_FLOW_METER_COLOR_UNDEFINED = 0x3,
+};
+
enum {
- MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1,
- MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
- MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
- MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT = 0x10,
- MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
+ MLX5_EXE_ASO_FLOW_METER = 0x2,
+};
+
+struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits {
+ u8 return_reg_id[0x4];
+ u8 aso_type[0x4];
+ u8 reserved_at_8[0x14];
+ u8 action[0x1];
+ u8 init_color[0x2];
+ u8 meter_id[0x1];
+};
+
+union mlx5_ifc_exe_aso_ctrl {
+ struct mlx5_ifc_exe_aso_ctrl_flow_meter_bits exe_aso_ctrl_flow_meter;
+};
+
+struct mlx5_ifc_execute_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_1[0x7];
+ u8 aso_object_id[0x18];
+
+ union mlx5_ifc_exe_aso_ctrl exe_aso_ctrl;
+};
+
+enum {
+ MLX5_FLOW_CONTEXT_ENCRYPT_DECRYPT_TYPE_IPSEC = 0x0,
};
struct mlx5_ifc_flow_context_bits {
- u8 reserved_0[0x20];
+ struct mlx5_ifc_vlan_bits push_vlan;
u8 group_id[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 flow_tag[0x18];
- u8 reserved_2[0x10];
+ u8 reserved_at_60[0x10];
u8 action[0x10];
- u8 reserved_3[0x8];
+ u8 extended_destination[0x1];
+ u8 reserved_at_81[0x1];
+ u8 flow_source[0x2];
+ u8 encrypt_decrypt_type[0x4];
u8 destination_list_size[0x18];
- u8 reserved_4[0x8];
+ u8 reserved_at_a0[0x8];
u8 flow_counter_list_size[0x18];
u8 packet_reformat_id[0x20];
- u8 modify_header_id[0x20];
+ u8 modify_header_id[0x20];
- u8 reserved_6[0x100];
+ struct mlx5_ifc_vlan_bits push_vlan_2;
+
+ u8 encrypt_decrypt_obj_id[0x20];
+ u8 reserved_at_140[0xc0];
struct mlx5_ifc_fte_match_param_bits match_value;
- u8 reserved_7[0x600];
+ struct mlx5_ifc_execute_aso_bits execute_aso[4];
+
+ u8 reserved_at_1300[0x500];
- union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[0];
+ union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[];
};
enum {
@@ -3008,21 +3271,27 @@ enum {
};
struct mlx5_ifc_flow_table_context_bits {
- u8 reformat_en[0x1];
- u8 decap_en[0x1];
- u8 reserved_at_2[0x2];
- u8 table_miss_action[0x4];
- u8 level[0x8];
- u8 reserved_at_10[0x8];
- u8 log_size[0x8];
+ u8 reformat_en[0x1];
+ u8 decap_en[0x1];
+ u8 sw_owner[0x1];
+ u8 termination_table[0x1];
+ u8 table_miss_action[0x4];
+ u8 level[0x8];
+ u8 reserved_at_10[0x8];
+ u8 log_size[0x8];
- u8 reserved_at_20[0x8];
- u8 table_miss_id[0x18];
+ u8 reserved_at_20[0x8];
+ u8 table_miss_id[0x18];
- u8 reserved_at_40[0x8];
- u8 lag_master_next_table_id[0x18];
+ u8 reserved_at_40[0x8];
+ u8 lag_master_next_table_id[0x18];
+
+ u8 reserved_at_60[0x60];
+
+ u8 sw_owner_icm_root_1[0x40];
+
+ u8 sw_owner_icm_root_0[0x40];
- u8 reserved_at_60[0xe0];
};
struct mlx5_ifc_esw_vport_context_bits {
@@ -3980,28 +4249,32 @@ struct mlx5_ifc_set_flow_table_root_out_bits {
};
struct mlx5_ifc_set_flow_table_root_in_bits {
- u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
- u8 op_mod[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
- u8 other_vport[0x1];
- u8 reserved_2[0xf];
- u8 vport_number[0x10];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
- u8 table_type[0x8];
- u8 reserved_4[0x18];
+ u8 table_type[0x8];
+ u8 reserved_at_88[0x7];
+ u8 table_of_other_vport[0x1];
+ u8 table_vport_number[0x10];
- u8 reserved_5[0x8];
- u8 table_id[0x18];
+ u8 reserved_at_a0[0x8];
+ u8 table_id[0x18];
- u8 reserved_6[0x8];
- u8 underlay_qpn[0x18];
-
- u8 reserved_7[0x120];
+ u8 reserved_at_c0[0x8];
+ u8 underlay_qpn[0x18];
+ u8 table_eswitch_owner_vhca_id_valid[0x1];
+ u8 reserved_at_e1[0xf];
+ u8 table_eswitch_owner_vhca_id[0x10];
+ u8 reserved_at_100[0x100];
};
struct mlx5_ifc_set_fte_out_bits {
@@ -4014,34 +4287,35 @@ struct mlx5_ifc_set_fte_out_bits {
};
struct mlx5_ifc_set_fte_in_bits {
- u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
- u8 op_mod[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
- u8 other_vport[0x1];
- u8 reserved_2[0xf];
- u8 vport_number[0x10];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
- u8 table_type[0x8];
- u8 reserved_4[0x18];
+ u8 table_type[0x8];
+ u8 reserved_at_88[0x18];
- u8 reserved_5[0x8];
- u8 table_id[0x18];
+ u8 reserved_at_a0[0x8];
+ u8 table_id[0x18];
- u8 reserved_6[0x18];
- u8 modify_enable_mask[0x8];
+ u8 ignore_flow_level[0x1];
+ u8 reserved_at_c1[0x17];
+ u8 modify_enable_mask[0x8];
- u8 reserved_7[0x20];
+ u8 reserved_at_e0[0x20];
- u8 flow_index[0x20];
+ u8 flow_index[0x20];
- u8 reserved_8[0xe0];
+ u8 reserved_at_120[0xe0];
- struct mlx5_ifc_flow_context_bits flow_context;
+ struct mlx5_ifc_flow_context_bits flow_context;
};
struct mlx5_ifc_set_driver_version_out_bits {
@@ -4322,6 +4596,23 @@ enum {
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_UPLINK = 0x2,
};
+enum {
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW = 0x1,
+ MLX5_FLOW_CONTEXT_ACTION_DROP = 0x2,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST = 0x4,
+ MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT = 0x10,
+ MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_DECRYPT = 0x1000,
+ MLX5_FLOW_CONTEXT_ACTION_CRYPTO_ENCRYPT = 0x2000,
+ MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO = 0x4000,
+};
+
struct mlx5_ifc_query_vport_state_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@@ -5542,6 +5833,12 @@ enum mlx5_reformat_ctx_type {
MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL = 0x2,
MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2 = 0x3,
MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL = 0x4,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV4 = 0x5,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV4 = 0x7,
+ MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT = 0x8,
+ MLX5_REFORMAT_TYPE_DEL_ESP_TRANSPORT_OVER_UDP = 0xa,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_IPV6 = 0xb,
+ MLX5_REFORMAT_TYPE_ADD_ESP_TRANSPORT_OVER_UDPV6 = 0xc,
};
struct mlx5_ifc_alloc_packet_reformat_context_in_bits {
@@ -6252,6 +6549,11 @@ struct mlx5_ifc_modify_hca_vport_context_in_bits {
struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
};
+enum {
+ MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID = (1UL << 0),
+ MLX5_MODIFY_FLOW_TABLE_LAG_NEXT_TABLE_ID = (1UL << 15),
+};
+
struct mlx5_ifc_modify_flow_table_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -7895,24 +8197,24 @@ struct mlx5_ifc_create_flow_table_out_bits {
};
struct mlx5_ifc_create_flow_table_in_bits {
- u8 opcode[0x10];
- u8 reserved_at_10[0x10];
+ u8 opcode[0x10];
+ u8 uid[0x10];
- u8 reserved_at_20[0x10];
- u8 op_mod[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
- u8 other_vport[0x1];
- u8 reserved_at_41[0xf];
- u8 vport_number[0x10];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
- u8 reserved_at_60[0x20];
+ u8 reserved_at_60[0x20];
- u8 table_type[0x8];
- u8 reserved_at_88[0x18];
+ u8 table_type[0x8];
+ u8 reserved_at_88[0x18];
- u8 reserved_at_a0[0x20];
+ u8 reserved_at_a0[0x20];
- struct mlx5_ifc_flow_table_context_bits flow_table_context;
+ struct mlx5_ifc_flow_table_context_bits flow_table_context;
};
struct mlx5_ifc_create_flow_group_out_bits {
@@ -7928,46 +8230,54 @@ struct mlx5_ifc_create_flow_group_out_bits {
};
enum {
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
- MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS = 0x2,
+ MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
};
struct mlx5_ifc_create_flow_group_in_bits {
- u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
- u8 op_mod[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
- u8 other_vport[0x1];
- u8 reserved_2[0xf];
- u8 vport_number[0x10];
+ u8 other_vport[0x1];
+ u8 reserved_at_41[0xf];
+ u8 vport_number[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
- u8 table_type[0x8];
- u8 reserved_4[0x18];
+ u8 table_type[0x8];
+ u8 reserved_at_88[0x4];
+ u8 group_type[0x4];
+ u8 reserved_at_90[0x10];
- u8 reserved_5[0x8];
- u8 table_id[0x18];
+ u8 reserved_at_a0[0x8];
+ u8 table_id[0x18];
- u8 reserved_6[0x20];
+ u8 source_eswitch_owner_vhca_id_valid[0x1];
- u8 start_flow_index[0x20];
+ u8 reserved_at_c1[0x1f];
- u8 reserved_7[0x20];
+ u8 start_flow_index[0x20];
- u8 end_flow_index[0x20];
+ u8 reserved_at_100[0x20];
- u8 reserved_8[0xa0];
+ u8 end_flow_index[0x20];
- u8 reserved_9[0x18];
- u8 match_criteria_enable[0x8];
+ u8 reserved_at_140[0x10];
+ u8 match_definer_id[0x10];
- struct mlx5_ifc_fte_match_param_bits match_criteria;
+ u8 reserved_at_160[0x80];
+
+ u8 reserved_at_1e0[0x18];
+ u8 match_criteria_enable[0x8];
+
+ struct mlx5_ifc_fte_match_param_bits match_criteria;
- u8 reserved_10[0xe00];
+ u8 reserved_at_1200[0xe00];
};
struct mlx5_ifc_create_encryption_key_out_bits {
@@ -11618,5 +11928,120 @@ enum mlx5_fc_bulk_alloc_bitmask {
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
+struct mlx5_ifc_ipsec_cap_bits {
+ u8 ipsec_full_offload[0x1];
+ u8 ipsec_crypto_offload[0x1];
+ u8 ipsec_esn[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_256_encrypt[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_128_encrypt[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_256_decrypt[0x1];
+ u8 ipsec_crypto_esp_aes_gcm_128_decrypt[0x1];
+ u8 reserved_at_7[0x4];
+ u8 log_max_ipsec_offload[0x5];
+ u8 reserved_at_10[0x10];
+
+ u8 min_log_ipsec_full_replay_window[0x8];
+ u8 max_log_ipsec_full_replay_window[0x8];
+ u8 reserved_at_30[0x7d0];
+};
+
+enum {
+ MLX5_IPSEC_OBJECT_ICV_LEN_16B,
+};
+
+enum {
+ MLX5_IPSEC_ASO_REG_C_0_1 = 0x0,
+ MLX5_IPSEC_ASO_REG_C_2_3 = 0x1,
+ MLX5_IPSEC_ASO_REG_C_4_5 = 0x2,
+ MLX5_IPSEC_ASO_REG_C_6_7 = 0x3,
+};
+
+enum {
+ MLX5_IPSEC_ASO_MODE = 0x0,
+ MLX5_IPSEC_ASO_REPLAY_PROTECTION = 0x1,
+ MLX5_IPSEC_ASO_INC_SN = 0x2,
+};
+enum {
+ MLX5_IPSEC_ASO_REPLAY_WIN_32BIT = 0x0,
+ MLX5_IPSEC_ASO_REPLAY_WIN_64BIT = 0x1,
+ MLX5_IPSEC_ASO_REPLAY_WIN_128BIT = 0x2,
+ MLX5_IPSEC_ASO_REPLAY_WIN_256BIT = 0x3,
+};
+
+struct mlx5_ifc_ipsec_aso_bits {
+ u8 valid[0x1];
+ u8 reserved_at_201[0x1];
+ u8 mode[0x2];
+ u8 window_sz[0x2];
+ u8 soft_lft_arm[0x1];
+ u8 hard_lft_arm[0x1];
+ u8 remove_flow_enable[0x1];
+ u8 esn_event_arm[0x1];
+ u8 reserved_at_20a[0x16];
+
+ u8 remove_flow_pkt_cnt[0x20];
+
+ u8 remove_flow_soft_lft[0x20];
+
+ u8 reserved_at_260[0x80];
+
+ u8 mode_parameter[0x20];
+
+ u8 replay_protection_window[0x100];
+};
+
+struct mlx5_ifc_ipsec_obj_bits {
+ u8 modify_field_select[0x40];
+ u8 full_offload[0x1];
+ u8 reserved_at_41[0x1];
+ u8 esn_en[0x1];
+ u8 esn_overlap[0x1];
+ u8 reserved_at_44[0x2];
+ u8 icv_length[0x2];
+ u8 reserved_at_48[0x4];
+ u8 aso_return_reg[0x4];
+ u8 reserved_at_50[0x10];
+
+ u8 esn_msb[0x20];
+
+ u8 reserved_at_80[0x8];
+ u8 dekn[0x18];
+
+ u8 salt[0x20];
+
+ u8 implicit_iv[0x40];
+
+ u8 reserved_at_100[0x8];
+ u8 ipsec_aso_access_pd[0x18];
+ u8 reserved_at_120[0xe0];
+
+ struct mlx5_ifc_ipsec_aso_bits ipsec_aso;
+};
+
+struct mlx5_ifc_create_ipsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_ipsec_obj_bits ipsec_object;
+};
+
+enum {
+ MLX5_MODIFY_IPSEC_BITMASK_ESN_OVERLAP = 1 << 0,
+ MLX5_MODIFY_IPSEC_BITMASK_ESN_MSB = 1 << 1,
+};
+
+struct mlx5_ifc_query_ipsec_obj_out_bits {
+ struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
+ struct mlx5_ifc_ipsec_obj_bits ipsec_object;
+};
+
+struct mlx5_ifc_modify_ipsec_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr;
+ struct mlx5_ifc_ipsec_obj_bits ipsec_object;
+};
+
+enum {
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_TLS = 0x1,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_IPSEC = 0x2,
+ MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_PURPOSE_MACSEC = 0x4,
+};
#endif /* MLX5_IFC_H */
diff --git a/sys/dev/mlx5/mlx5_lib/aso.h b/sys/dev/mlx5/mlx5_lib/aso.h
new file mode 100644
index 000000000000..e880a5e51db1
--- /dev/null
+++ b/sys/dev/mlx5/mlx5_lib/aso.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_LIB_ASO_H__
+#define __MLX5_LIB_ASO_H__
+
+#include <dev/mlx5/qp.h>
+#include <dev/mlx5/mlx5_core/mlx5_core.h>
+
+#define MLX5_ASO_WQEBBS \
+ (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_BB))
+#define MLX5_ASO_WQEBBS_DATA \
+ (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe_data), MLX5_SEND_WQE_BB))
+#define ASO_CTRL_READ_EN BIT(0)
+#define MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT 24
+#define MLX5_MACSEC_ASO_DS_CNT (DIV_ROUND_UP(sizeof(struct mlx5_aso_wqe), MLX5_SEND_WQE_DS))
+
+#define ASO_CTRL_READ_EN BIT(0)
+struct mlx5_wqe_aso_ctrl_seg {
+ __be32 va_h;
+ __be32 va_l; /* include read_enable */
+ __be32 l_key;
+ u8 data_mask_mode;
+ u8 condition_1_0_operand;
+ u8 condition_1_0_offset;
+ u8 data_offset_condition_operand;
+ __be32 condition_0_data;
+ __be32 condition_0_mask;
+ __be32 condition_1_data;
+ __be32 condition_1_mask;
+ __be64 bitwise_data;
+ __be64 data_mask;
+};
+
+struct mlx5_wqe_aso_data_seg {
+ __be32 bytewise_data[16];
+};
+
+struct mlx5_aso_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
+};
+
+struct mlx5_aso_wqe_data {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_aso_ctrl_seg aso_ctrl;
+ struct mlx5_wqe_aso_data_seg aso_data;
+};
+
+enum {
+ MLX5_ASO_LOGICAL_AND,
+ MLX5_ASO_LOGICAL_OR,
+};
+
+enum {
+ MLX5_ASO_ALWAYS_FALSE,
+ MLX5_ASO_ALWAYS_TRUE,
+ MLX5_ASO_EQUAL,
+ MLX5_ASO_NOT_EQUAL,
+ MLX5_ASO_GREATER_OR_EQUAL,
+ MLX5_ASO_LESSER_OR_EQUAL,
+ MLX5_ASO_LESSER,
+ MLX5_ASO_GREATER,
+ MLX5_ASO_CYCLIC_GREATER,
+ MLX5_ASO_CYCLIC_LESSER,
+};
+
+enum {
+ MLX5_ASO_DATA_MASK_MODE_BITWISE_64BIT,
+ MLX5_ASO_DATA_MASK_MODE_BYTEWISE_64BYTE,
+ MLX5_ASO_DATA_MASK_MODE_CALCULATED_64BYTE,
+};
+
+enum {
+ MLX5_ACCESS_ASO_OPC_MOD_IPSEC = 0x0,
+ MLX5_ACCESS_ASO_OPC_MOD_FLOW_METER = 0x2,
+ MLX5_ACCESS_ASO_OPC_MOD_MACSEC = 0x5,
+};
+
+struct mlx5_aso;
+
+struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso);
+void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
+ struct mlx5_aso_wqe *aso_wqe,
+ u32 obj_id, u32 opc_mode);
+void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg);
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data);
+
+struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn);
+void mlx5_aso_destroy(struct mlx5_aso *aso);
+#endif /* __MLX5_LIB_ASO_H__ */
diff --git a/sys/dev/mlx5/mlx5_lib/mlx5_aso.c b/sys/dev/mlx5/mlx5_lib/mlx5_aso.c
new file mode 100644
index 000000000000..7040c9e148c8
--- /dev/null
+++ b/sys/dev/mlx5/mlx5_lib/mlx5_aso.c
@@ -0,0 +1,428 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/printk.h>
+#include <dev/mlx5/driver.h>
+#include <dev/mlx5/mlx5_core/transobj.h>
+#include "aso.h"
+#include <dev/mlx5/mlx5_core/wq.h>
+#include <dev/mlx5/cq.h>
+
+struct mlx5_aso_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+
+ /* data path - accessed per napi poll */
+ struct mlx5_core_cq mcq;
+
+ /* control */
+ struct mlx5_core_dev *mdev;
+ struct mlx5_wq_ctrl wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5_aso {
+ /* data path */
+ u16 cc;
+ u16 pc;
+
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg;
+ struct mlx5_aso_cq cq;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ void __iomem *uar_map;
+ u32 sqn;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+
+} ____cacheline_aligned_in_smp;
+
+static void mlx5_aso_free_cq(struct mlx5_aso_cq *cq)
+{
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5_aso_alloc_cq(struct mlx5_core_dev *mdev, int numa_node,
+ void *cqc_data, struct mlx5_aso_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param;
+ int err;
+ u32 i;
+
+ param.linear = 1;
+ err = mlx5_cqwq_create(mdev, &param, cqc_data, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_dev *mdev = cq->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int inlen, eqn, irqn_not_used;
+ void *in, *cqc;
+ int err;
+
+ err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn_not_used);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+
+ mlx5_fill_page_array(&cq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, cq_period_mode, 0);
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_aso_destroy_cq(struct mlx5_aso_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5_aso_create_cq(struct mlx5_core_dev *mdev, int numa_node,
+ struct mlx5_aso_cq *cq)
+{
+ void *cqc_data;
+ int err;
+
+ cqc_data = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc_data)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc_data, log_cq_size, 1);
+ MLX5_SET(cqc, cqc_data, uar_page, mdev->priv.uar->index);
+ if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
+ MLX5_SET(cqc, cqc_data, cqe_sz, CQE_STRIDE_128_PAD);
+
+ err = mlx5_aso_alloc_cq(mdev, numa_node, cqc_data, cq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc aso wq cq, err=%d\n", err);
+ goto err_out;
+ }
+
+ err = create_aso_cq(cq, cqc_data);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create aso wq cq, err=%d\n", err);
+ goto err_free_cq;
+ }
+
+ kvfree(cqc_data);
+ return 0;
+
+err_free_cq:
+ mlx5_aso_free_cq(cq);
+err_out:
+ kvfree(cqc_data);
+ return err;
+}
+
+static int mlx5_aso_alloc_sq(struct mlx5_core_dev *mdev, int numa_node,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5_wq_param param;
+ int err;
+
+ sq->uar_map = mdev->priv.uar->map;
+
+ param.linear = 1;
+ err = mlx5_wq_cyc_create(mdev, &param, sqc_wq, wq, &sq->wq_ctrl);
+ if (err)
+ return err;
+
+ wq->db = &wq->db[MLX5_SND_DBR];
+
+ return 0;
+}
+
+static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+ u8 ts_format;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ ts_format = mlx5_get_sq_default_ts(mdev);
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, mdev->priv.uar->index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_array(&sq->wq_ctrl.buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_aso_set_sq_rdy(struct mlx5_core_dev *mdev, u32 sqn)
+{
+ void *in, *sqc;
+ int inlen, err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ MLX5_SET(modify_sq_in, in, sqn, sqn);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_aso_create_sq_rdy(struct mlx5_core_dev *mdev, u32 pdn,
+ void *sqc_data, struct mlx5_aso *sq)
+{
+ int err;
+
+ err = create_aso_sq(mdev, pdn, sqc_data, sq);
+ if (err)
+ return err;
+
+ err = mlx5_aso_set_sq_rdy(mdev, sq->sqn);
+ if (err)
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+
+ return err;
+}
+
+static void mlx5_aso_free_sq(struct mlx5_aso *sq)
+{
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static void mlx5_aso_destroy_sq(struct mlx5_aso *sq)
+{
+ mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
+ mlx5_aso_free_sq(sq);
+}
+
+static int mlx5_aso_create_sq(struct mlx5_core_dev *mdev, int numa_node,
+ u32 pdn, struct mlx5_aso *sq)
+{
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, pdn);
+ MLX5_SET(wq, wq, log_wq_sz, 1);
+
+ err = mlx5_aso_alloc_sq(mdev, numa_node, sqc_data, sq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc aso wq sq, err=%d\n", err);
+ goto err_out;
+ }
+
+ err = mlx5_aso_create_sq_rdy(mdev, pdn, sqc_data, sq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to open aso wq sq, err=%d\n", err);
+ goto err_free_asosq;
+ }
+
+ mlx5_core_dbg(mdev, "aso sq->sqn = 0x%x\n", sq->sqn);
+
+ kvfree(sqc_data);
+ return 0;
+
+err_free_asosq:
+ mlx5_aso_free_sq(sq);
+err_out:
+ kvfree(sqc_data);
+ return err;
+}
+
+struct mlx5_aso *mlx5_aso_create(struct mlx5_core_dev *mdev, u32 pdn)
+{
+ int numa_node = dev_to_node(&mdev->pdev->dev);
+ struct mlx5_aso *aso;
+ int err;
+
+ aso = kzalloc(sizeof(*aso), GFP_KERNEL);
+ if (!aso)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_aso_create_cq(mdev, numa_node, &aso->cq);
+ if (err)
+ goto err_cq;
+
+ err = mlx5_aso_create_sq(mdev, numa_node, pdn, aso);
+ if (err)
+ goto err_sq;
+
+ return aso;
+
+err_sq:
+ mlx5_aso_destroy_cq(&aso->cq);
+err_cq:
+ kfree(aso);
+ return ERR_PTR(err);
+}
+
+void mlx5_aso_destroy(struct mlx5_aso *aso)
+{
+ mlx5_aso_destroy_sq(aso);
+ mlx5_aso_destroy_cq(&aso->cq);
+ kfree(aso);
+}
+
+void mlx5_aso_build_wqe(struct mlx5_aso *aso, u8 ds_cnt,
+ struct mlx5_aso_wqe *aso_wqe,
+ u32 obj_id, u32 opc_mode)
+{
+ struct mlx5_wqe_ctrl_seg *cseg = &aso_wqe->ctrl;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((opc_mode << MLX5_WQE_CTRL_WQE_OPC_MOD_SHIFT) |
+ (aso->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_ACCESS_ASO);
+ cseg->qpn_ds = cpu_to_be32((aso->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ cseg->general_id = cpu_to_be32(obj_id);
+}
+
+struct mlx5_aso_wqe *mlx5_aso_get_wqe(struct mlx5_aso *aso)
+{
+ struct mlx5_aso_wqe *wqe;
+ u16 pi;
+
+ pi = mlx5_wq_cyc_ctr2ix(&aso->wq, aso->pc);
+ wqe = mlx5_wq_cyc_get_wqe(&aso->wq, pi);
+ memset(wqe, 0, sizeof(*wqe));
+ return wqe;
+}
+
+void mlx5_aso_post_wqe(struct mlx5_aso *aso, bool with_data,
+ struct mlx5_wqe_ctrl_seg *doorbell_cseg)
+{
+ doorbell_cseg->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
+ /* ensure wqe is visible to device before updating doorbell record */
+ wmb();
+
+ if (with_data)
+ aso->pc += MLX5_ASO_WQEBBS_DATA;
+ else
+ aso->pc += MLX5_ASO_WQEBBS;
+ *aso->wq.db = cpu_to_be32(aso->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)doorbell_cseg, aso->uar_map + MLX5_BF_OFFSET, NULL);
+
+ /* Ensure doorbell is written on uar_page before poll_cq */
+ WRITE_ONCE(doorbell_cseg, NULL);
+}
+
+int mlx5_aso_poll_cq(struct mlx5_aso *aso, bool with_data)
+{
+ struct mlx5_aso_cq *cq = &aso->cq;
+ struct mlx5_cqe64 *cqe;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return -ETIMEDOUT;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ mlx5_cqwq_pop(&cq->wq);
+
+ if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
+ struct mlx5_err_cqe *err_cqe;
+
+ mlx5_core_err(cq->mdev, "Bad OP in ASOSQ CQE: 0x%x\n",
+ get_cqe_opcode(cqe));
+
+ err_cqe = (struct mlx5_err_cqe *)cqe;
+ mlx5_core_err(cq->mdev, "vendor_err_synd=%x\n",
+ err_cqe->vendor_err_synd);
+ mlx5_core_err(cq->mdev, "syndrome=%x\n",
+ err_cqe->syndrome);
+ print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
+ 16, 1, err_cqe,
+ sizeof(*err_cqe), false);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ if (with_data)
+ aso->cc += MLX5_ASO_WQEBBS_DATA;
+ else
+ aso->cc += MLX5_ASO_WQEBBS;
+
+ return 0;
+}
diff --git a/sys/dev/mlx5/port.h b/sys/dev/mlx5/port.h
index bfbc721139d3..a35265852ae4 100644
--- a/sys/dev/mlx5/port.h
+++ b/sys/dev/mlx5/port.h
@@ -128,6 +128,19 @@ enum mlx5e_ext_link_speed {
MLX5E_EXT_LINK_SPEEDS_NUMBER = 32,
};
+enum mlx5e_connector_type {
+ MLX5E_PORT_UNKNOWN = 0,
+ MLX5E_PORT_NONE = 1,
+ MLX5E_PORT_TP = 2,
+ MLX5E_PORT_AUI = 3,
+ MLX5E_PORT_BNC = 4,
+ MLX5E_PORT_MII = 5,
+ MLX5E_PORT_FIBRE = 6,
+ MLX5E_PORT_DA = 7,
+ MLX5E_PORT_OTHER = 8,
+ MLX5E_CONNECTOR_TYPE_NUMBER = 9,
+};
+
enum mlx5e_cable_type {
MLX5E_CABLE_TYPE_UNKNOWN = 0,
MLX5E_CABLE_TYPE_ACTIVE_CABLE = 1,
diff --git a/sys/dev/mlx5/qp.h b/sys/dev/mlx5/qp.h
index 50e964c8f912..11acb94e7dd6 100644
--- a/sys/dev/mlx5/qp.h
+++ b/sys/dev/mlx5/qp.h
@@ -144,6 +144,8 @@ enum {
#define MLX5_SEND_WQE_DS 16
#define MLX5_SEND_WQE_BB 64
#define MLX5_SEND_WQEBB_NUM_DS (MLX5_SEND_WQE_BB / MLX5_SEND_WQE_DS)
+#define MLX5_WQE_CTRL_QPN_SHIFT 8
+#define MLX5_WQE_CTRL_WQE_INDEX_SHIFT 8
enum {
MLX5_SEND_WQE_MAX_WQEBBS = 16,
@@ -192,7 +194,10 @@ struct mlx5_wqe_ctrl_seg {
u8 signature;
u8 rsvd[2];
u8 fm_ce_se;
- __be32 imm;
+ union {
+ __be32 imm;
+ __be32 general_id;
+ };
};
#define MLX5_WQE_CTRL_DS_MASK 0x3f
@@ -226,6 +231,10 @@ enum {
MLX5_ETH_WQE_SWP_OUTER_L4_TYPE = 1 << 5,
};
+enum {
+ MLX5_ETH_WQE_FT_META_IPSEC = BIT(0),
+};
+
struct mlx5_wqe_eth_seg {
u8 swp_outer_l4_offset;
u8 swp_outer_l3_offset;
@@ -234,7 +243,7 @@ struct mlx5_wqe_eth_seg {
u8 cs_flags;
u8 swp_flags;
__be16 mss;
- __be32 rsvd2;
+ __be32 flow_table_metadata;
union {
struct {
__be16 inline_hdr_sz;
diff --git a/sys/dev/mlx5/tls.h b/sys/dev/mlx5/tls.h
index 67bd7ef686c5..93682a90861c 100644
--- a/sys/dev/mlx5/tls.h
+++ b/sys/dev/mlx5/tls.h
@@ -28,9 +28,6 @@
struct mlx5_core_dev;
-int mlx5_encryption_key_create(struct mlx5_core_dev *mdev, u32 pdn,
- const void *p_key, u32 key_len, u32 * p_obj_id);
-int mlx5_encryption_key_destroy(struct mlx5_core_dev *mdev, u32 oid);
int mlx5_tls_open_tis(struct mlx5_core_dev *mdev, int tc, int tdn, int pdn, u32 *p_tisn);
void mlx5_tls_close_tis(struct mlx5_core_dev *mdev, u32 tisn);
int mlx5_tls_open_tir(struct mlx5_core_dev *mdev, int tdn, int rqtn, u32 *p_tirn);
diff --git a/sys/dev/mmc/bridge.h b/sys/dev/mmc/bridge.h
index 4ec082a61b55..bd08b695c84a 100644
--- a/sys/dev/mmc/bridge.h
+++ b/sys/dev/mmc/bridge.h
@@ -103,6 +103,10 @@ enum mmc_chip_select {
cs_dontcare = 0, cs_high, cs_low
};
+enum mmc_bus_type {
+ bus_type_default = 0, bus_type_spi
+};
+
enum mmc_bus_width {
bus_width_1 = 0, bus_width_4 = 2, bus_width_8 = 3
};
@@ -123,6 +127,7 @@ struct mmc_ios {
uint32_t clock; /* Speed of the clock in Hz to move data */
enum mmc_vdd vdd; /* Voltage to apply to the power pins */
enum mmc_vccq vccq; /* Voltage to use for signaling */
+ enum mmc_bus_type bus_type;
enum mmc_bus_mode bus_mode;
enum mmc_chip_select chip_select;
enum mmc_bus_width bus_width;
diff --git a/sys/dev/mmc/host/dwmmc.c b/sys/dev/mmc/host/dwmmc.c
index 8d67608c3977..a422d86d6034 100644
--- a/sys/dev/mmc/host/dwmmc.c
+++ b/sys/dev/mmc/host/dwmmc.c
@@ -315,20 +315,11 @@ static void
dwmmc_cmd_done(struct dwmmc_softc *sc)
{
struct mmc_command *cmd;
-#ifdef MMCCAM
- union ccb *ccb;
-#endif
-#ifdef MMCCAM
- ccb = sc->ccb;
- if (ccb == NULL)
- return;
- cmd = &ccb->mmcio.cmd;
-#else
+ DWMMC_ASSERT_LOCKED(sc);
+
cmd = sc->curcmd;
-#endif
- if (cmd == NULL)
- return;
+ KASSERT(cmd != NULL, ("%s: sc %p curcmd %p == NULL", __func__, sc, cmd));
if (cmd->flags & MMC_RSP_PRESENT) {
if (cmd->flags & MMC_RSP_136) {
@@ -350,15 +341,17 @@ dwmmc_tasklet(struct dwmmc_softc *sc)
{
struct mmc_command *cmd;
+ DWMMC_ASSERT_LOCKED(sc);
+
cmd = sc->curcmd;
- if (cmd == NULL)
- return;
+ KASSERT(cmd != NULL, ("%s: sc %p curcmd %p == NULL", __func__, sc, cmd));
if (!sc->cmd_done)
return;
if (cmd->error != MMC_ERR_NONE || !cmd->data) {
dwmmc_next_operation(sc);
+
} else if (cmd->data && sc->dto_rcvd) {
if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
@@ -383,6 +376,7 @@ dwmmc_intr(void *arg)
DWMMC_LOCK(sc);
cmd = sc->curcmd;
+ KASSERT(cmd != NULL, ("%s: sc %p curcmd %p == NULL", __func__, sc, cmd));
/* First handle SDMMC controller interrupts */
reg = READ4(sc, SDMMC_MINTSTS);
@@ -462,10 +456,10 @@ dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present)
was_present = sc->child != NULL;
if (!was_present && is_present) {
- taskqueue_enqueue_timeout(taskqueue_swi_giant,
+ taskqueue_enqueue_timeout(taskqueue_bus,
&sc->card_delayed_task, -(hz / 2));
} else if (was_present && !is_present) {
- taskqueue_enqueue(taskqueue_swi_giant, &sc->card_task);
+ taskqueue_enqueue(taskqueue_bus, &sc->card_task);
}
}
@@ -477,34 +471,30 @@ dwmmc_card_task(void *arg, int pending __unused)
#ifdef MMCCAM
mmc_cam_sim_discover(&sc->mmc_sim);
#else
- DWMMC_LOCK(sc);
-
+ bus_topo_lock();
if (READ4(sc, SDMMC_CDETECT) == 0 ||
(sc->mmc_helper.props & MMC_PROP_BROKEN_CD)) {
if (sc->child == NULL) {
if (bootverbose)
device_printf(sc->dev, "Card inserted\n");
- sc->child = device_add_child(sc->dev, "mmc", -1);
- DWMMC_UNLOCK(sc);
+ sc->child = device_add_child(sc->dev, "mmc", DEVICE_UNIT_ANY);
if (sc->child) {
device_set_ivars(sc->child, sc);
(void)device_probe_and_attach(sc->child);
}
- } else
- DWMMC_UNLOCK(sc);
+ }
} else {
/* Card isn't present, detach if necessary */
if (sc->child != NULL) {
if (bootverbose)
device_printf(sc->dev, "Card removed\n");
- DWMMC_UNLOCK(sc);
device_delete_child(sc->dev, sc->child);
sc->child = NULL;
- } else
- DWMMC_UNLOCK(sc);
+ }
}
+ bus_topo_unlock();
#endif /* MMCCAM */
}
@@ -751,7 +741,7 @@ dwmmc_attach(device_t dev)
WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
TASK_INIT(&sc->card_task, 0, dwmmc_card_task, sc);
- TIMEOUT_TASK_INIT(taskqueue_swi_giant, &sc->card_delayed_task, 0,
+ TIMEOUT_TASK_INIT(taskqueue_bus, &sc->card_delayed_task, 0,
dwmmc_card_task, sc);
#ifdef MMCCAM
@@ -778,12 +768,12 @@ dwmmc_detach(device_t dev)
sc = device_get_softc(dev);
- ret = device_delete_children(dev);
+ ret = bus_generic_detach(dev);
if (ret != 0)
return (ret);
- taskqueue_drain(taskqueue_swi_giant, &sc->card_task);
- taskqueue_drain_timeout(taskqueue_swi_giant, &sc->card_delayed_task);
+ taskqueue_drain(taskqueue_bus, &sc->card_task);
+ taskqueue_drain_timeout(taskqueue_bus, &sc->card_delayed_task);
if (sc->intr_cookie != NULL) {
ret = bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
@@ -1097,6 +1087,9 @@ dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
uint32_t cmdr;
dprintf("%s\n", __func__);
+
+ DWMMC_ASSERT_LOCKED(sc);
+
sc->curcmd = cmd;
data = cmd->data;
@@ -1181,18 +1174,22 @@ dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
static void
dwmmc_next_operation(struct dwmmc_softc *sc)
{
- struct mmc_command *cmd;
- dprintf("%s\n", __func__);
#ifdef MMCCAM
union ccb *ccb;
+#else
+ struct mmc_request *req;
+#endif
+ struct mmc_command *cmd;
+ dprintf("%s\n", __func__);
+ DWMMC_ASSERT_LOCKED(sc);
+
+#ifdef MMCCAM
ccb = sc->ccb;
if (ccb == NULL)
return;
cmd = &ccb->mmcio.cmd;
#else
- struct mmc_request *req;
-
req = sc->req;
if (req == NULL)
return;
@@ -1209,7 +1206,7 @@ dwmmc_next_operation(struct dwmmc_softc *sc)
* mostly caused by multi-block write command
* followed by single-read.
*/
- while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
+ while (READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
continue;
if (sc->flags & PENDING_CMD) {
@@ -1223,50 +1220,44 @@ dwmmc_next_operation(struct dwmmc_softc *sc)
return;
}
-#ifdef MMCCAM
- sc->ccb = NULL;
sc->curcmd = NULL;
+#ifdef MMCCAM
ccb->ccb_h.status =
(ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
xpt_done(ccb);
+ sc->ccb = NULL;
#else
- sc->req = NULL;
- sc->curcmd = NULL;
req->done(req);
+ sc->req = NULL;
#endif
}
+#ifndef MMCCAM
static int
dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
{
struct dwmmc_softc *sc;
- sc = device_get_softc(brdev);
-
dprintf("%s\n", __func__);
- DWMMC_LOCK(sc);
+ sc = device_get_softc(brdev);
-#ifdef MMCCAM
- sc->flags |= PENDING_CMD;
-#else
+ DWMMC_LOCK(sc);
if (sc->req != NULL) {
DWMMC_UNLOCK(sc);
return (EBUSY);
}
-
sc->req = req;
sc->flags |= PENDING_CMD;
if (sc->req->stop)
sc->flags |= PENDING_STOP;
-#endif
- dwmmc_next_operation(sc);
+ dwmmc_next_operation(sc);
DWMMC_UNLOCK(sc);
+
return (0);
}
-#ifndef MMCCAM
static int
dwmmc_get_ro(device_t brdev, device_t reqdev)
{
@@ -1509,10 +1500,15 @@ dwmmc_cam_request(device_t dev, union ccb *ccb)
struct ccb_mmcio *mmcio;
sc = device_get_softc(dev);
- mmcio = &ccb->mmcio;
-
DWMMC_LOCK(sc);
+ KASSERT(ccb->ccb_h.pinfo.index == CAM_ACTIVE_INDEX,
+ ("%s: ccb %p index %d != CAM_ACTIVE_INDEX: func=%#x %s status %#x\n",
+ __func__, ccb, ccb->ccb_h.pinfo.index, ccb->ccb_h.func_code,
+ xpt_action_name(ccb->ccb_h.func_code), ccb->ccb_h.status));
+
+ mmcio = &ccb->mmcio;
+
#ifdef DEBUG
if (__predict_false(bootverbose)) {
device_printf(sc->dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
@@ -1523,16 +1519,21 @@ dwmmc_cam_request(device_t dev, union ccb *ccb)
#endif
if (mmcio->cmd.data != NULL) {
if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
- panic("data->len = %d, data->flags = %d -- something is b0rked",
- (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
+ panic("%s: data %p data->len = %d, data->flags = %d -- something is b0rked",
+ __func__, mmcio->cmd.data, (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
}
+
if (sc->ccb != NULL) {
- device_printf(sc->dev, "Controller still has an active command\n");
+ device_printf(sc->dev, "%s: Controller still has an active command: "
+ "sc->ccb %p new ccb %p\n", __func__, sc->ccb, ccb);
+ DWMMC_UNLOCK(sc);
return (EBUSY);
}
sc->ccb = ccb;
+ sc->flags |= PENDING_CMD;
+
+ dwmmc_next_operation(sc);
DWMMC_UNLOCK(sc);
- dwmmc_request(sc->dev, NULL, NULL);
return (0);
}
diff --git a/sys/dev/mmc/host/dwmmc_rockchip.c b/sys/dev/mmc/host/dwmmc_rockchip.c
index c4b07ac3290e..656e9a9cf09d 100644
--- a/sys/dev/mmc/host/dwmmc_rockchip.c
+++ b/sys/dev/mmc/host/dwmmc_rockchip.c
@@ -78,6 +78,7 @@ rockchip_dwmmc_attach(device_t dev)
{
struct dwmmc_softc *sc;
int type;
+ int rc;
sc = device_get_softc(dev);
sc->hwtype = HWTYPE_ROCKCHIP;
@@ -90,8 +91,17 @@ rockchip_dwmmc_attach(device_t dev)
}
sc->update_ios = &dwmmc_rockchip_update_ios;
+ rc = dwmmc_attach(dev);
- return (dwmmc_attach(dev));
+ /*
+ * Note: It's not that the controller doesn't support HS200,
+ * it's that FreeBSD doesn't support tuning.
+ * If someone implemented tuning, this could work.
+ */
+ device_printf(dev, "Disabling HS200+ (tuning code not written)\n");
+ sc->host.caps &= ~(MMC_CAP_MMC_HS200 | MMC_CAP_MMC_HS400);
+
+ return (rc);
}
static int
diff --git a/sys/dev/mmc/host/dwmmc_starfive.c b/sys/dev/mmc/host/dwmmc_starfive.c
new file mode 100644
index 000000000000..ab90da3b2b57
--- /dev/null
+++ b/sys/dev/mmc/host/dwmmc_starfive.c
@@ -0,0 +1,114 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright 2017 Emmanuel Vadot <manu@freebsd.org>
+ * Copyright (c) 2024 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Mitchell Horne
+ * <mhorne@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/module.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+
+#include <machine/bus.h>
+
+#include <dev/mmc/bridge.h>
+#include <dev/mmc/mmc_fdt_helpers.h>
+
+#include <dev/mmc/host/dwmmc_var.h>
+
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include "opt_mmccam.h"
+
+enum dwmmc_type {
+ DWMMC_GENERIC = 1,
+ DWMMC_JH7110
+};
+
+static struct ofw_compat_data compat_data[] = {
+ {"snps,dw-mshc", DWMMC_GENERIC},
+ {"starfive,jh7110-mmc", DWMMC_JH7110},
+ {NULL, 0}
+};
+
+static int dwmmc_starfive_update_ios(struct dwmmc_softc *sc,
+ struct mmc_ios *ios)
+{
+ int err;
+
+ if (ios->clock != 0 && ios->clock != sc->bus_hz) {
+ err = clk_set_freq(sc->ciu, ios->clock, CLK_SET_ROUND_DOWN);
+ if (err != 0) {
+ printf("%s, Failed to set freq for ciu clock\n",
+ __func__);
+ return (err);
+ }
+ sc->bus_hz = ios->clock;
+ }
+
+ return (0);
+}
+
+static int
+starfive_dwmmc_probe(device_t dev)
+{
+ phandle_t node;
+ int type;
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
+ if (type == 0)
+ return (ENXIO);
+
+ /*
+ * If we matched the generic compat string, check the top-level board
+ * compatible, to ensure we should actually use the starfive driver.
+ */
+ if (type == DWMMC_GENERIC) {
+ node = OF_finddevice("/");
+ if (!ofw_bus_node_is_compatible(node, "starfive,jh7110"))
+ return (ENXIO);
+ }
+
+ device_set_desc(dev, "Synopsys DesignWare Mobile Storage "
+ "Host Controller (StarFive)");
+
+ return (BUS_PROBE_VENDOR);
+}
+
+static int
+starfive_dwmmc_attach(device_t dev)
+{
+ struct dwmmc_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->update_ios = &dwmmc_starfive_update_ios;
+
+ return (dwmmc_attach(dev));
+}
+
+static device_method_t starfive_dwmmc_methods[] = {
+ /* bus interface */
+ DEVMETHOD(device_probe, starfive_dwmmc_probe),
+ DEVMETHOD(device_attach, starfive_dwmmc_attach),
+ DEVMETHOD(device_detach, dwmmc_detach),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(starfive_dwmmc, starfive_dwmmc_driver, starfive_dwmmc_methods,
+ sizeof(struct dwmmc_softc), dwmmc_driver);
+
+DRIVER_MODULE(starfive_dwmmc, simplebus, starfive_dwmmc_driver, 0, 0);
+
+#ifndef MMCCAM
+MMC_DECLARE_BRIDGE(starfive_dwmmc);
+#endif
diff --git a/sys/dev/mmc/mmc.c b/sys/dev/mmc/mmc.c
index d59396208ccc..f4ea8faca35c 100644
--- a/sys/dev/mmc/mmc.c
+++ b/sys/dev/mmc/mmc.c
@@ -701,6 +701,10 @@ mmc_select_card(struct mmc_softc *sc, uint16_t rca)
{
int err, flags;
+ /* No card selection in SPI mode. */
+ if (mmcbr_get_bus_type(sc->dev) == bus_type_spi)
+ return (MMC_ERR_NONE);
+
flags = (rca ? MMC_RSP_R1B : MMC_RSP_NONE) | MMC_CMD_AC;
sc->retune_paused++;
err = mmc_wait_for_command(sc, MMC_SELECT_CARD, (uint32_t)rca << 16,
@@ -900,6 +904,10 @@ mmc_set_timing(struct mmc_softc *sc, struct mmc_ivars *ivar,
uint8_t value;
int err;
+ /* No timings in SPI mode. */
+ if (mmcbr_get_bus_type(sc->dev) == bus_type_spi)
+ return (MMC_ERR_NONE);
+
if (mmcbr_get_mode(sc->dev) == mode_sd) {
switch (timing) {
case bus_timing_normal:
@@ -1915,7 +1923,7 @@ child_common:
mmc_log_card(sc->dev, ivar, newcard);
if (newcard) {
/* Add device. */
- child = device_add_child(sc->dev, NULL, -1);
+ child = device_add_child(sc->dev, NULL, DEVICE_UNIT_ANY);
if (child != NULL) {
device_set_ivars(child, ivar);
sc->child_list = realloc(sc->child_list,
@@ -2457,7 +2465,7 @@ mmc_scan(struct mmc_softc *sc)
device_printf(dev, "Failed to release bus after scanning\n");
return;
}
- (void)bus_generic_attach(dev);
+ bus_attach_children(dev);
}
static int
diff --git a/sys/dev/mmc/mmc_fdt_helpers.c b/sys/dev/mmc/mmc_fdt_helpers.c
index 752e5d14bcb0..aed85dab55f4 100644
--- a/sys/dev/mmc/mmc_fdt_helpers.c
+++ b/sys/dev/mmc/mmc_fdt_helpers.c
@@ -111,7 +111,7 @@ cd_intr(void *arg)
{
struct mmc_helper *helper = arg;
- taskqueue_enqueue_timeout(taskqueue_swi_giant,
+ taskqueue_enqueue_timeout(taskqueue_bus,
&helper->cd_delayed_task, -(hz / 2));
}
@@ -129,7 +129,7 @@ cd_card_task(void *arg, int pending __unused)
/* If we're polling re-schedule the task */
if (helper->cd_ihandler == NULL)
- taskqueue_enqueue_timeout_sbt(taskqueue_swi_giant,
+ taskqueue_enqueue_timeout_sbt(taskqueue_bus,
&helper->cd_delayed_task, mstosbt(500), 0, C_PREL(2));
}
@@ -145,7 +145,7 @@ cd_setup(struct mmc_helper *helper, phandle_t node)
dev = helper->dev;
- TIMEOUT_TASK_INIT(taskqueue_swi_giant, &helper->cd_delayed_task, 0,
+ TIMEOUT_TASK_INIT(taskqueue_bus, &helper->cd_delayed_task, 0,
cd_card_task, helper);
/*
@@ -280,7 +280,7 @@ mmc_fdt_gpio_setup(device_t dev, phandle_t node, struct mmc_helper *helper,
/*
* Schedule a card detection
*/
- taskqueue_enqueue_timeout_sbt(taskqueue_swi_giant,
+ taskqueue_enqueue_timeout_sbt(taskqueue_bus,
&helper->cd_delayed_task, mstosbt(500), 0, C_PREL(2));
return (0);
}
@@ -301,7 +301,7 @@ mmc_fdt_gpio_teardown(struct mmc_helper *helper)
if (helper->cd_ires != NULL)
bus_release_resource(helper->dev, SYS_RES_IRQ, 0, helper->cd_ires);
- taskqueue_drain_timeout(taskqueue_swi_giant, &helper->cd_delayed_task);
+ taskqueue_drain_timeout(taskqueue_bus, &helper->cd_delayed_task);
}
bool
diff --git a/sys/dev/mmc/mmc_subr.c b/sys/dev/mmc/mmc_subr.c
index 0a555cd74c97..fba99e791dff 100644
--- a/sys/dev/mmc/mmc_subr.c
+++ b/sys/dev/mmc/mmc_subr.c
@@ -112,6 +112,8 @@ mmc_wait_for_app_cmd(device_t busdev, device_t dev, uint16_t rca,
sc = device_get_softc(busdev);
+ cmd->flags |= MMC_CMD_IS_APP;
+
/* Squelch error reporting at lower levels, we report below. */
sc->squelched++;
do {
diff --git a/sys/dev/mmc/mmcbrvar.h b/sys/dev/mmc/mmcbrvar.h
index 8faef227324b..c47966793098 100644
--- a/sys/dev/mmc/mmcbrvar.h
+++ b/sys/dev/mmc/mmcbrvar.h
@@ -60,6 +60,7 @@
#include "mmcbr_if.h"
enum mmcbr_device_ivars {
+ MMCBR_IVAR_BUS_TYPE,
MMCBR_IVAR_BUS_MODE,
MMCBR_IVAR_BUS_WIDTH,
MMCBR_IVAR_CHIP_SELECT,
@@ -113,6 +114,17 @@ mmcbr_get_retune_req(device_t dev)
return ((int)v);
}
+static int __inline
+mmcbr_get_bus_type(device_t dev)
+{
+ uintptr_t v;
+
+ if (__predict_false(BUS_READ_IVAR(device_get_parent(dev), dev,
+ MMCBR_IVAR_BUS_TYPE, &v) != 0))
+ return (bus_type_default);
+ return ((int)v);
+}
+
/*
* Convenience wrappers for the mmcbr interface
*/
diff --git a/sys/dev/mmc/mmcreg.h b/sys/dev/mmc/mmcreg.h
index b544e3dd41e5..e5783249c67b 100644
--- a/sys/dev/mmc/mmcreg.h
+++ b/sys/dev/mmc/mmcreg.h
@@ -80,6 +80,7 @@ struct mmc_command {
#define MMC_CMD_BC (2ul << 5) /* Broadcast command, no response */
#define MMC_CMD_BCR (3ul << 5) /* Broadcast command with response */
#define MMC_CMD_MASK (3ul << 5)
+#define MMC_CMD_IS_APP (1ul << 7) /* Next cmd after MMC_APP_CMD */
/* Possible response types defined in the standard: */
#define MMC_RSP_NONE (0)
diff --git a/sys/dev/mmc/mmcsd.c b/sys/dev/mmc/mmcsd.c
index cd973e53802c..5b9cb93c7b31 100644
--- a/sys/dev/mmc/mmcsd.c
+++ b/sys/dev/mmc/mmcsd.c
@@ -1563,5 +1563,5 @@ mmcsd_handler(module_t mod __unused, int what, void *arg __unused)
}
DRIVER_MODULE(mmcsd, mmc, mmcsd_driver, mmcsd_handler, NULL);
-MODULE_DEPEND(mmcsd, g_flashmap, 0, 0, 0);
+MODULE_DEPEND(mmcsd, geom_flashmap, 0, 0, 0);
MMC_DEPEND(mmcsd);
diff --git a/sys/dev/mmc/mmcspi.c b/sys/dev/mmc/mmcspi.c
new file mode 100644
index 000000000000..d18f7aeb8cc0
--- /dev/null
+++ b/sys/dev/mmc/mmcspi.c
@@ -0,0 +1,2378 @@
+/*-
+ * Copyright (c) 2012-2025 Patrick Kelsey. All rights reserved.
+ * Copyright (c) 2025 Ruslan Bukin <br@bsdpad.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Portions of this software may have been developed with reference to
+ * the SD Simplified Specification. The following disclaimer may apply:
+ *
+ * The following conditions apply to the release of the simplified
+ * specification ("Simplified Specification") by the SD Card Association and
+ * the SD Group. The Simplified Specification is a subset of the complete SD
+ * Specification which is owned by the SD Card Association and the SD
+ * Group. This Simplified Specification is provided on a non-confidential
+ * basis subject to the disclaimers below. Any implementation of the
+ * Simplified Specification may require a license from the SD Card
+ * Association, SD Group, SD-3C LLC or other third parties.
+ *
+ * Disclaimers:
+ *
+ * The information contained in the Simplified Specification is presented only
+ * as a standard specification for SD Cards and SD Host/Ancillary products and
+ * is provided "AS-IS" without any representations or warranties of any
+ * kind. No responsibility is assumed by the SD Group, SD-3C LLC or the SD
+ * Card Association for any damages, any infringements of patents or other
+ * right of the SD Group, SD-3C LLC, the SD Card Association or any third
+ * parties, which may result from its use. No license is granted by
+ * implication, estoppel or otherwise under any patent or other rights of the
+ * SD Group, SD-3C LLC, the SD Card Association or any third party. Nothing
+ * herein shall be construed as an obligation by the SD Group, the SD-3C LLC
+ * or the SD Card Association to disclose or distribute any technical
+ * information, know-how or other confidential information to any third party.
+ *
+ *
+ * CRC routines adapted from public domain code written by Lammert Bies.
+ *
+ *
+ * This is an implementation of mmcbr that communicates with SD/MMC cards in
+ * SPI mode via spibus_if. In order to minimize changes to the existing
+ * MMC/SD stack (and allow for maximal reuse of the same), the behavior of
+ * the SD-bus command set is emulated as much as possible, where required.
+ *
+ * The SPI bus ownership behavior is to acquire the SPI bus for the entire
+ * duration that the MMC host is acquired.
+ *
+ * CRC checking is enabled by default, but can be disabled at runtime
+ * per-card via sysctl (e.g. sysctl dev.mmcspi.0.use_crc=0).
+ *
+ * Considered, but not implemented:
+ * - Card presence detection
+ * - Card power control
+ * - Detection of lock switch state on cards that have them
+ * - Yielding the CPU during long card busy cycles
+ *
+ * Originally developed and tested using a MicroTik RouterBOARD RB450G and
+ * 31 microSD cards available circa 2012.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+
+#include <dev/mmc/bridge.h>
+#include <dev/mmc/mmcreg.h>
+#include <dev/mmc/mmcbrvar.h>
+#include <dev/spibus/spi.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include "mmcbr_if.h"
+#include "spibus_if.h"
+
+#define MMCSPI_RETRIES 3
+#define MMCSPI_TIMEOUT_SEC 3
+
+#define MMCSPI_MAX_RSP_LEN 5 /* max length of an Rn response */
+#define MMCSPI_OCR_LEN 4
+
+#define MMCSPI_DATA_BLOCK_LEN 512
+#define MMCSPI_DATA_CRC_LEN 2
+
+#define MMCSPI_POLL_LEN 8 /* amount to read when searching */
+
+#define MMCSPI_R1_MASK 0x80 /* mask used to search for R1 tokens */
+#define MMCSPI_R1_VALUE 0x00 /* value used to search for R1 tokens */
+#define MMCSPI_DR_MASK 0x11 /* mask used to search for data resp tokens */
+#define MMCSPI_DR_VALUE 0x01 /* value used to search for data resp tokens */
+
+#define MMCSPI_DR_ERR_MASK 0x0e
+#define MMCSPI_DR_ERR_NONE 0x04
+#define MMCSPI_DR_ERR_CRC 0x0a
+#define MMCSPI_DR_ERR_WRITE 0x0c
+
+#define MMCSPI_TOKEN_SB 0xfe /* start block token for read single,
+ read multi, and write single */
+#define MMCSPI_TOKEN_SB_WM 0xfc /* start block token for write multi */
+#define MMCSPI_TOKEN_ST 0xfd /* stop transmission token */
+#define MMCSPI_IS_DE_TOKEN(x) (0 == ((x) & 0xf0)) /* detector for data
+ error token */
+
+#define MMCSPI_R1_IDLE 0x01
+#define MMCSPI_R1_ERASE_RST 0x02
+#define MMCSPI_R1_ILL_CMD 0x04
+#define MMCSPI_R1_CRC_ERR 0x08
+#define MMCSPI_R1_ERASE_ERR 0x10
+#define MMCSPI_R1_ADDR_ERR 0x20
+#define MMCSPI_R1_PARAM_ERR 0x40
+
+#define MMCSPI_R1_ERR_MASK (MMCSPI_R1_PARAM_ERR | MMCSPI_R1_ADDR_ERR | \
+ MMCSPI_R1_ERASE_ERR | MMCSPI_R1_CRC_ERR | \
+ MMCSPI_R1_ILL_CMD)
+
+#define MMCSPI_R2_LOCKED 0x01
+#define MMCSPI_R2_WP_ER_LCK 0x02
+#define MMCSPI_R2_ERR 0x04
+#define MMCSPI_R2_CC_ERR 0x08
+#define MMCSPI_R2_ECC_FAIL 0x10
+#define MMCSPI_R2_WP_VIOLATE 0x20
+#define MMCSPI_R2_ERASE_PARAM 0x40
+#define MMCSPI_R2_OOR_CSD_OW 0x80
+
+/* commands that only apply to the SPI interface */
+#define MMCSPI_READ_OCR 58
+#define MMCSPI_CRC_ON_OFF 59
+
+static struct ofw_compat_data compat_data[] = {
+ { "mmc-spi-slot", 1 },
+ { NULL, 0 }
+};
+
+struct mmcspi_command {
+ struct mmc_command *mmc_cmd; /* command passed from mmc layer */
+ uint32_t opcode; /* possibly translated opcode */
+ uint32_t arg; /* possibly translated arg */
+ uint32_t flags; /* possibly translated flags */
+ uint32_t retries; /* possibly translated retry count */
+ struct mmc_data *data; /* possibly redirected data segment */
+ unsigned int error_mask; /* R1 errors check mask */
+ unsigned char use_crc; /* do crc checking for this command */
+ unsigned char rsp_type; /* SPI response type of this command */
+#define MMCSPI_RSP_R1 0
+#define MMCSPI_RSP_R1B 1
+#define MMCSPI_RSP_R2 2
+#define MMCSPI_RSP_R3 3
+#define MMCSPI_RSP_R7 4
+ unsigned char rsp_len; /* response len of this command */
+ unsigned char mmc_rsp_type; /* MMC reponse type to translate to */
+#define MMCSPI_TO_MMC_RSP_NONE 0
+#define MMCSPI_TO_MMC_RSP_R1 1
+#define MMCSPI_TO_MMC_RSP_R1B 2
+#define MMCSPI_TO_MMC_RSP_R2 3
+#define MMCSPI_TO_MMC_RSP_R3 4
+#define MMCSPI_TO_MMC_RSP_R6 5
+#define MMCSPI_TO_MMC_RSP_R7 6
+ struct mmc_data ldata; /* local read data */
+};
+
+struct mmcspi_slot {
+ struct mmcspi_softc *sc; /* back pointer to parent bridge */
+ device_t dev; /* mmc device for slot */
+ boolean_t bus_busy; /* host has been acquired */
+ struct mmc_host host; /* host parameters */
+ struct mtx mtx; /* slot mutex */
+ uint8_t last_ocr[MMCSPI_OCR_LEN]; /* ocr retrieved after CMD8 */
+ uint32_t last_opcode; /* last opcode requested by mmc layer */
+ uint32_t last_flags; /* last flags requested by mmc layer */
+ unsigned int crc_enabled; /* crc checking is enabled */
+ unsigned int crc_init_done; /* whether the initial crc setting has
+ been sent to the card */
+#define MMCSPI_MAX_LDATA_LEN 16
+ uint8_t ldata_buf[MMCSPI_MAX_LDATA_LEN];
+};
+
+struct mmcspi_softc {
+ device_t dev; /* this mmc bridge device */
+ device_t busdev;
+ struct mmcspi_slot slot;
+ unsigned int use_crc; /* command CRC checking */
+};
+
+#if defined(MMCSPI_ENABLE_DEBUG_FUNCS)
+static void mmcspi_dump_data(device_t dev, const char *label, uint8_t *data,
+ unsigned int len);
+static void mmcspi_dump_spi_bus(device_t dev, unsigned int len);
+#endif
+
+#define MMCSPI_LOCK_SLOT(_slot) mtx_lock(&(_slot)->mtx)
+#define MMCSPI_UNLOCK_SLOT(_slot) mtx_unlock(&(_slot)->mtx)
+#define MMCSPI_SLOT_LOCK_INIT(_slot) mtx_init(&(_slot)->mtx, \
+ "SD slot mtx", "mmcspi", MTX_DEF)
+#define MMCSPI_SLOT_LOCK_DESTROY(_slot) mtx_destroy(&(_slot)->mtx);
+#define MMCSPI_ASSERT_SLOT_LOCKED(_slot) mtx_assert(&(_slot)->mtx, \
+ MA_OWNED);
+#define MMCSPI_ASSERT_SLOT_UNLOCKED(_slot) mtx_assert(&(_slot)->mtx, \
+ MA_NOTOWNED);
+
+#define TRACE_ZONE_ENABLED(zone) (trace_zone_mask & TRACE_ZONE_##zone)
+
+#define TRACE_ENTER(dev) \
+ if (TRACE_ZONE_ENABLED(ENTER)) { \
+ device_printf(dev, "%s: enter\n", __func__); \
+ }
+
+#define TRACE_EXIT(dev) \
+ if (TRACE_ZONE_ENABLED(EXIT)) { \
+ device_printf(dev, "%s: exit\n", __func__); \
+ }
+
+#define TRACE(dev, zone, ...) \
+ if (TRACE_ZONE_ENABLED(zone)) { \
+ device_printf(dev, __VA_ARGS__); \
+ }
+
+#define TRACE_ZONE_ENTER (1ul << 0) /* function entrance */
+#define TRACE_ZONE_EXIT (1ul << 1) /* function exit */
+#define TRACE_ZONE_ACTION (1ul << 2) /* for narrating major actions taken */
+#define TRACE_ZONE_RESULT (1ul << 3) /* for narrating results of actions */
+#define TRACE_ZONE_ERROR (1ul << 4) /* for reporting errors */
+#define TRACE_ZONE_DATA (1ul << 5) /* for dumping bus data */
+#define TRACE_ZONE_DETAILS (1ul << 6) /* for narrating minor actions/results */
+
+#define TRACE_ZONE_NONE 0
+#define TRACE_ZONE_ALL 0xffffffff
+
+#define CRC7_INITIAL 0x00
+#define CRC16_INITIAL 0x0000
+
+SYSCTL_NODE(_hw, OID_AUTO, mmcspi, CTLFLAG_RD, 0, "mmcspi driver");
+
+static unsigned int trace_zone_mask = TRACE_ZONE_ERROR;
+
+static uint8_t crc7tab[256];
+static uint16_t crc16tab[256];
+static uint8_t onesbuf[MMCSPI_DATA_BLOCK_LEN]; /* for driving the tx line
+ when receiving */
+static uint8_t junkbuf[MMCSPI_DATA_BLOCK_LEN]; /* for receiving data when
+ transmitting */
+
+static uint8_t
+update_crc7(uint8_t crc, uint8_t *buf, unsigned int len)
+{
+ uint8_t tmp;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ tmp = (crc << 1) ^ buf[i];
+ crc = crc7tab[tmp];
+ }
+
+ return (crc);
+}
+
+static uint16_t
+update_crc16(uint16_t crc, uint8_t *buf, unsigned int len)
+{
+ uint16_t tmp, c16;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ c16 = 0x00ff & (uint16_t)buf[i];
+
+ tmp = (crc >> 8) ^ c16;
+ crc = (crc << 8) ^ crc16tab[tmp];
+ }
+
+ return (crc);
+}
+
+static void
+init_crc7tab(void)
+{
+#define P_CRC7 0x89
+
+ int i, j;
+ uint8_t crc, c;
+
+ for (i = 0; i < 256; i++) {
+
+ c = (uint8_t)i;
+ crc = (c & 0x80) ? c ^ P_CRC7 : c;
+
+ for (j=1; j<8; j++) {
+ crc = crc << 1;
+
+ if (crc & 0x80)
+ crc = crc ^ P_CRC7;
+ }
+
+ crc7tab[i] = crc;
+ }
+}
+
+static void
+init_crc16tab(void)
+{
+#define P_CCITT 0x1021
+
+ int i, j;
+ uint16_t crc, c;
+
+ for (i = 0; i < 256; i++) {
+
+ crc = 0;
+ c = ((uint16_t) i) << 8;
+
+ for (j=0; j<8; j++) {
+
+ if ((crc ^ c) & 0x8000) crc = ( crc << 1 ) ^ P_CCITT;
+ else crc = crc << 1;
+
+ c = c << 1;
+ }
+
+ crc16tab[i] = crc;
+ }
+}
+
+static void
+mmcspi_slot_init(device_t brdev, struct mmcspi_slot *slot)
+{
+ struct mmcspi_softc *sc;
+
+ TRACE_ENTER(brdev);
+
+ sc = device_get_softc(brdev);
+
+ slot->sc = sc;
+ slot->dev = NULL; /* will get real value when card is added */
+ slot->bus_busy = false;
+ slot->host.f_min = 100000; /* this should be as low as we need to go
+ for any card */
+ slot->host.caps = 0;
+ /* SPI mode requires 3.3V operation */
+ slot->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
+
+ MMCSPI_SLOT_LOCK_INIT(slot);
+
+ TRACE_EXIT(brdev);
+}
+
+static void
+mmcspi_slot_fini(device_t brdev, struct mmcspi_slot *slot)
+{
+ TRACE_ENTER(brdev);
+
+ MMCSPI_SLOT_LOCK_DESTROY(slot);
+
+ TRACE_EXIT(brdev);
+}
+
+static void
+mmcspi_card_add(struct mmcspi_slot *slot)
+{
+ device_t brdev;
+ device_t child;
+
+ brdev = slot->sc->dev;
+
+ TRACE_ENTER(brdev);
+
+ child = device_add_child(brdev, "mmc", DEVICE_UNIT_ANY);
+
+ MMCSPI_LOCK_SLOT(slot);
+ slot->dev = child;
+ device_set_ivars(slot->dev, slot);
+ MMCSPI_UNLOCK_SLOT(slot);
+
+ device_probe_and_attach(slot->dev);
+
+ TRACE_EXIT(brdev);
+}
+
+static void
+mmcspi_card_delete(struct mmcspi_slot *slot)
+{
+ device_t brdev;
+ device_t dev;
+
+ brdev = slot->sc->dev;
+
+ TRACE_ENTER(brdev);
+
+ MMCSPI_LOCK_SLOT(slot);
+ dev = slot->dev;
+ slot->dev = NULL;
+ MMCSPI_UNLOCK_SLOT(slot);
+ device_delete_child(brdev, dev);
+
+ TRACE_EXIT(brdev);
+}
+
+static int
+mmcspi_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "MMC SPI mode controller");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+mmcspi_attach(device_t dev)
+{
+ struct mmcspi_softc *sc;
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *tree;
+ struct sysctl_oid_list *child;
+
+ TRACE_ENTER(dev);
+
+ sc = device_get_softc(dev);
+ ctx = device_get_sysctl_ctx(dev);
+ tree = device_get_sysctl_tree(dev);
+ child = SYSCTL_CHILDREN(tree);
+
+ sc->dev = dev;
+ sc->busdev = device_get_parent(dev);
+ sc->use_crc = 1;
+
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "use_crc", CTLFLAG_RW,
+ &sc->use_crc, sizeof(sc->use_crc), "Enable/disable crc checking");
+
+ SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "trace_mask", CTLFLAG_RW,
+ &trace_zone_mask, sizeof(trace_zone_mask), "Bitmask for adjusting "
+ "trace messages");
+
+ mmcspi_slot_init(dev, &sc->slot);
+
+ /* XXX trigger this from card insert detection */
+ mmcspi_card_add(&sc->slot);
+
+ TRACE_EXIT(dev);
+
+ return (0);
+}
+
+static int
+mmcspi_detach(device_t dev)
+{
+ struct mmcspi_softc *sc;
+
+ TRACE_ENTER(dev);
+
+ sc = device_get_softc(dev);
+
+ /* XXX trigger this from card removal detection */
+ mmcspi_card_delete(&sc->slot);
+
+ mmcspi_slot_fini(dev, &sc->slot);
+
+ TRACE_EXIT(dev);
+
+ return (0);
+}
+
+static int
+mmcspi_suspend(device_t dev)
+{
+ int err;
+
+ TRACE_ENTER(dev);
+ err = bus_generic_suspend(dev);
+ if (err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+ TRACE_EXIT(dev);
+
+ return (0);
+}
+
+static int
+mmcspi_resume(device_t dev)
+{
+ int err;
+
+ TRACE_ENTER(dev);
+ err = bus_generic_resume(dev);
+ if (err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+ TRACE_EXIT(dev);
+
+ return (0);
+}
+
+static int
+mmcspi_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
+{
+ struct mmcspi_slot *slot;
+
+ TRACE_ENTER(bus);
+
+ slot = device_get_ivars(child);
+
+ switch (which) {
+ case MMCBR_IVAR_BUS_TYPE:
+ *result = bus_type_spi;
+ break;
+ case MMCBR_IVAR_BUS_MODE:
+ *result = slot->host.ios.bus_mode;
+ break;
+ case MMCBR_IVAR_BUS_WIDTH:
+ *result = slot->host.ios.bus_width;
+ break;
+ case MMCBR_IVAR_CHIP_SELECT:
+ *result = slot->host.ios.chip_select;
+ break;
+ case MMCBR_IVAR_CLOCK:
+ *result = slot->host.ios.clock;
+ break;
+ case MMCBR_IVAR_F_MIN:
+ *result = slot->host.f_min;
+ break;
+ case MMCBR_IVAR_F_MAX:
+ *result = slot->host.f_max;
+ break;
+ case MMCBR_IVAR_HOST_OCR:
+ *result = slot->host.host_ocr;
+ break;
+ case MMCBR_IVAR_MODE:
+ *result = slot->host.mode;
+ break;
+ case MMCBR_IVAR_OCR:
+ *result = slot->host.ocr;
+ break;
+ case MMCBR_IVAR_POWER_MODE:
+ *result = slot->host.ios.power_mode;
+ break;
+ case MMCBR_IVAR_VDD:
+ *result = slot->host.ios.vdd;
+ break;
+ case MMCBR_IVAR_VCCQ:
+ *result = slot->host.ios.vccq;
+ break;
+ case MMCBR_IVAR_CAPS:
+ *result = slot->host.caps;
+ break;
+ case MMCBR_IVAR_TIMING:
+ *result = slot->host.ios.timing;
+ break;
+ case MMCBR_IVAR_MAX_DATA:
+ /* seems reasonable, not dictated by anything */
+ *result = 64 * 1024;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ TRACE_EXIT(bus);
+
+ return (0);
+}
+
+static int
+mmcspi_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
+{
+ struct mmcspi_slot *slot;
+
+ TRACE_ENTER(bus);
+
+ slot = device_get_ivars(child);
+
+ switch (which) {
+ default:
+ return (EINVAL);
+ case MMCBR_IVAR_BUS_MODE:
+ slot->host.ios.bus_mode = value;
+ break;
+ case MMCBR_IVAR_BUS_WIDTH:
+ slot->host.ios.bus_width = value;
+ break;
+ case MMCBR_IVAR_CLOCK:
+ slot->host.ios.clock = value;
+ break;
+ case MMCBR_IVAR_CHIP_SELECT:
+ slot->host.ios.chip_select = value;
+ break;
+ case MMCBR_IVAR_MODE:
+ slot->host.mode = value;
+ break;
+ case MMCBR_IVAR_OCR:
+ slot->host.ocr = value;
+ break;
+ case MMCBR_IVAR_POWER_MODE:
+ slot->host.ios.power_mode = value;
+ break;
+ case MMCBR_IVAR_VDD:
+ slot->host.ios.vdd = value;
+ break;
+ case MMCBR_IVAR_VCCQ:
+ slot->host.ios.vccq = value;
+ break;
+ case MMCBR_IVAR_TIMING:
+ slot->host.ios.timing = value;
+ break;
+ case MMCBR_IVAR_BUS_TYPE:
+ case MMCBR_IVAR_CAPS:
+ case MMCBR_IVAR_HOST_OCR:
+ case MMCBR_IVAR_F_MIN:
+ case MMCBR_IVAR_F_MAX:
+ case MMCBR_IVAR_MAX_DATA:
+ return (EINVAL);
+ }
+ TRACE_EXIT(bus);
+
+ return (0);
+}
+
+static unsigned int
+mmcspi_do_spi_read(device_t dev, uint8_t *data, unsigned int len)
+{
+ struct spi_command spi_cmd;
+ struct mmcspi_softc *sc;
+ int err;
+
+ TRACE_ENTER(dev);
+
+ sc = device_get_softc(dev);
+
+ spi_cmd.tx_cmd = onesbuf;
+ spi_cmd.rx_cmd = data;
+ spi_cmd.tx_cmd_sz = len;
+ spi_cmd.rx_cmd_sz = len;
+ spi_cmd.tx_data = NULL;
+ spi_cmd.rx_data = NULL;
+ spi_cmd.tx_data_sz = 0;
+ spi_cmd.rx_data_sz = 0;
+
+ err = SPIBUS_TRANSFER(sc->busdev, sc->dev, &spi_cmd);
+
+#ifdef DEBUG_RX
+ int i;
+ if (err == 0) {
+ printf("rx val: ");
+ for (i = 0; i < len; i++)
+ printf("%x ", data[i]);
+ printf("\n");
+ }
+#endif
+
+ TRACE_EXIT(dev);
+
+ return (err ? MMC_ERR_FAILED : MMC_ERR_NONE);
+}
+
+static unsigned int
+mmcspi_do_spi_write(device_t dev, uint8_t *cmd, unsigned int cmdlen,
+ uint8_t *data, unsigned int datalen)
+{
+ struct mmcspi_softc *sc;
+ struct spi_command spi_cmd;
+ int err;
+
+ TRACE_ENTER(dev);
+
+ sc = device_get_softc(dev);
+
+ spi_cmd.tx_cmd = cmd;
+ spi_cmd.rx_cmd = junkbuf;
+ spi_cmd.tx_cmd_sz = cmdlen;
+ spi_cmd.rx_cmd_sz = cmdlen;
+ spi_cmd.tx_data = data;
+ spi_cmd.rx_data = junkbuf;
+ spi_cmd.tx_data_sz = datalen;
+ spi_cmd.rx_data_sz = datalen;
+
+ err = SPIBUS_TRANSFER(sc->busdev, sc->dev, &spi_cmd);
+
+ TRACE_EXIT(dev);
+
+ return (err ? MMC_ERR_FAILED : MMC_ERR_NONE);
+}
+
+static unsigned int
+mmcspi_wait_for_not_busy(device_t dev)
+{
+ unsigned int busy_length;
+ uint8_t pollbuf[MMCSPI_POLL_LEN];
+ struct bintime start, elapsed;
+ unsigned int err;
+ int i;
+
+ busy_length = 0;
+
+ TRACE_ENTER(dev);
+ TRACE(dev, ACTION, "waiting for not busy\n");
+
+ getbintime(&start);
+ do {
+ TRACE(dev, DETAILS, "looking for end of busy\n");
+ err = mmcspi_do_spi_read(dev, pollbuf, MMCSPI_POLL_LEN);
+ if (MMC_ERR_NONE != err) {
+ TRACE(dev, ERROR, "spi read failed\n");
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ for (i = 0; i < MMCSPI_POLL_LEN; i++) {
+ if (pollbuf[i] != 0x00) {
+ TRACE(dev, DETAILS,
+ "end of busy found at %d\n", i);
+ break;
+ }
+ busy_length++;
+ }
+
+ getbintime(&elapsed);
+ bintime_sub(&elapsed, &start);
+
+ if (elapsed.sec > MMCSPI_TIMEOUT_SEC) {
+ TRACE(dev, ERROR, "card busy timeout\n");
+ return (MMC_ERR_TIMEOUT);
+ }
+ } while (MMCSPI_POLL_LEN == i);
+
+ TRACE(dev, RESULT, "busy for %u byte slots\n", busy_length);
+ TRACE_EXIT(dev);
+
+ return (MMC_ERR_NONE);
+}
+
+static int
+mmcspi_update_ios(device_t brdev, device_t reqdev)
+{
+ struct mmcspi_softc *sc;
+ struct mmcspi_slot *slot;
+ struct spi_command spi_cmd;
+
+ TRACE_ENTER(brdev);
+
+ sc = device_get_softc(brdev);
+ slot = device_get_ivars(reqdev);
+
+ if (power_up == slot->host.ios.power_mode) {
+ /*
+ * This sequence provides the initialization steps required
+ * by the spec after card power is applied, but before any
+ * commands are issued. These operations are harmless if
+ * applied at any other time (after a warm reset, for
+ * example).
+ */
+
+ /*
+ * XXX Power-on portion of implementation of card power
+ * control should go here. Should probably include a power
+ * off first to ensure card is fully reset from any previous
+ * state.
+ */
+
+ /*
+ * Make sure power to card has ramped up. The spec requires
+ * power to ramp up in 35ms or less.
+ */
+ DELAY(35000);
+
+ /*
+ * Provide at least 74 clocks with CS and MOSI high that the
+ * spec requires after card power stabilizes.
+ */
+
+ spi_cmd.tx_cmd = onesbuf;
+ spi_cmd.tx_cmd_sz = 10;
+ spi_cmd.rx_cmd = junkbuf;
+ spi_cmd.rx_cmd_sz = 10;
+ spi_cmd.tx_data = NULL;
+ spi_cmd.rx_data = NULL;
+ spi_cmd.tx_data_sz = 0;
+ spi_cmd.rx_data_sz = 0;
+
+ SPIBUS_TRANSFER(sc->busdev, sc->dev, &spi_cmd);
+
+ /*
+ * Perhaps this was a warm reset and the card is in the
+ * middle of a long operation.
+ */
+ mmcspi_wait_for_not_busy(brdev);
+
+ slot->last_opcode = 0xffffffff;
+ slot->last_flags = 0;
+ memset(slot->last_ocr, 0, MMCSPI_OCR_LEN);
+ slot->crc_enabled = 0;
+ slot->crc_init_done = 0;
+ }
+
+ if (power_off == slot->host.ios.power_mode) {
+ /*
+ * XXX Power-off portion of implementation of card power
+ * control should go here.
+ */
+ }
+
+ TRACE_EXIT(brdev);
+
+ return (0);
+}
+
+static unsigned int
+mmcspi_shift_copy(uint8_t *dest, uint8_t *src, unsigned int dest_len,
+ unsigned int shift)
+{
+ unsigned int i;
+
+ if (0 == shift)
+ memcpy(dest, src, dest_len);
+ else {
+ for (i = 0; i < dest_len; i++) {
+ dest[i] =
+ (src[i] << shift) |
+ (src[i + 1] >> (8 - shift));
+ }
+ }
+
+ return (dest_len);
+}
+
+static unsigned int
+mmcspi_get_response_token(device_t dev, uint8_t mask, uint8_t value,
+ unsigned int len, unsigned int has_busy, uint8_t *rspbuf)
+{
+ uint8_t pollbuf[2 * MMCSPI_MAX_RSP_LEN];
+ struct bintime start, elapsed;
+ boolean_t found;
+ unsigned int err;
+ unsigned int offset;
+ unsigned int shift = 0;
+ unsigned int remaining;
+ uint16_t search_space;
+ uint16_t search_mask;
+ uint16_t search_value;
+ int i;
+
+ TRACE_ENTER(dev);
+
+ /*
+ * This loop searches data clocked out of the card for a response
+ * token matching the given mask and value. It will locate tokens
+ * that are not byte-aligned, as some cards send non-byte-aligned
+ * response tokens in some situations. For example, the following
+ * card consistently sends an unaligned response token to the stop
+ * command used to terminate multi-block reads:
+ *
+ * Transcend 2GB SDSC card, cid:
+ * mid=0x1b oid=0x534d pnm="00000" prv=1.0 mdt=00.2000
+ */
+
+ offset = 0;
+ found = false;
+ getbintime(&start);
+ do {
+ TRACE(dev, DETAILS, "looking for response token with "
+ "mask 0x%02x, value 0x%02x\n", mask, value);
+ err = mmcspi_do_spi_read(dev, &pollbuf[offset], len);
+ if (MMC_ERR_NONE != err) {
+ TRACE(dev, ERROR, "spi read of resp token failed\n");
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ for (i = 0; i < len + offset; i++) {
+ if ((pollbuf[i] & mask) == value) {
+ TRACE(dev, DETAILS, "response token found at "
+ "%d (0x%02x)\n", i, pollbuf[i]);
+ shift = 0;
+ found = true;
+ break;
+ } else if (i < len + offset - 1) {
+ /*
+ * Not the last byte in the buffer, so check
+ * for a non-aligned response.
+ */
+ search_space = ((uint16_t)pollbuf[i] << 8) |
+ pollbuf[i + 1];
+ search_mask = (uint16_t)mask << 8;
+ search_value = (uint16_t)value << 8;
+
+ TRACE(dev, DETAILS, "search: space=0x%04x "
+ " mask=0x%04x val=0x%04x\n", search_space,
+ search_mask, search_value);
+
+ for (shift = 1; shift < 8; shift++) {
+ search_space <<= 1;
+ if ((search_space & search_mask) ==
+ search_value) {
+ found = true;
+ TRACE(dev, DETAILS, "Found mat"
+ "ch at shift %u\n", shift);
+ break;
+ }
+ }
+
+ if (shift < 8)
+ break;
+ } else {
+ /*
+ * Move the last byte to the first position
+ * and go 'round again.
+ */
+ pollbuf[0] = pollbuf[i];
+ }
+ }
+
+ if (!found) {
+ offset = 1;
+
+ getbintime(&elapsed);
+ bintime_sub(&elapsed, &start);
+
+ if (elapsed.sec > MMCSPI_TIMEOUT_SEC) {
+ TRACE(dev, ERROR, "timeout while looking for "
+ "response token\n");
+ return (MMC_ERR_TIMEOUT);
+ }
+ }
+ } while (!found);
+
+ /*
+ * Note that if i == 0 and offset == 1, shift is always greater than
+ * zero.
+ */
+ remaining = i - offset + (shift ? 1 : 0);
+
+ TRACE(dev, DETAILS, "len=%u i=%u rem=%u shift=%u\n",
+ len, i, remaining, shift);
+
+ if (remaining) {
+ err = mmcspi_do_spi_read(dev, &pollbuf[len + offset],
+ remaining);
+ if (MMC_ERR_NONE != err) {
+ TRACE(dev, ERROR, "spi read of remainder of response "
+ "token failed\n");
+ TRACE_EXIT(dev);
+ return (err);
+ }
+ }
+
+ mmcspi_shift_copy(rspbuf, &pollbuf[i], len, shift);
+
+ if (TRACE_ZONE_ENABLED(RESULT)) {
+ TRACE(dev, RESULT, "response =");
+ for (i = 0; i < len; i++)
+ printf(" 0x%02x", rspbuf[i]);
+ printf("\n");
+ }
+
+ if (has_busy) {
+ err = mmcspi_wait_for_not_busy(dev);
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+ }
+
+ TRACE_EXIT(dev);
+
+ return (MMC_ERR_NONE);
+}
+
+static unsigned int
+mmcspi_set_up_command(device_t dev, struct mmcspi_command *mmcspi_cmd,
+ struct mmc_command *mmc_cmd)
+{
+ struct mmcspi_softc *sc;
+ struct mmcspi_slot *slot;
+ uint32_t opcode;
+ uint32_t arg;
+ uint32_t flags;
+ uint32_t retries;
+ unsigned char rsp_type;
+ unsigned char rsp_len;
+ unsigned char mmc_rsp_type;
+ unsigned int ldata_len = 0;
+ unsigned int use_crc;
+
+ sc = device_get_softc(dev);
+ slot = &sc->slot;
+ use_crc = slot->crc_enabled;
+
+ opcode = mmc_cmd->opcode;
+ arg = mmc_cmd->arg;
+ flags = mmc_cmd->flags;
+ retries = mmc_cmd->retries;
+
+ if (flags & MMC_CMD_IS_APP) {
+ switch (opcode) {
+ case ACMD_SD_STATUS:
+ rsp_type = MMCSPI_RSP_R2;
+ mmc_rsp_type = MMCSPI_TO_MMC_RSP_R1;
+ break;
+ case ACMD_SEND_NUM_WR_BLOCKS:
+ case ACMD_SET_WR_BLK_ERASE_COUNT:
+ case ACMD_SET_CLR_CARD_DETECT:
+ case ACMD_SEND_SCR:
+ rsp_type = MMCSPI_RSP_R1;
+ mmc_rsp_type = MMCSPI_TO_MMC_RSP_R1;
+ break;
+ case ACMD_SD_SEND_OP_COND:
+ /* only HCS bit is valid in spi mode */
+ arg &= 0x40000000;
+ rsp_type = MMCSPI_RSP_R1;
+ mmc_rsp_type = MMCSPI_TO_MMC_RSP_R3;
+ break;
+ default:
+ TRACE(dev, ERROR, "Invalid app command opcode %u\n",
+ opcode);
+ return (MMC_ERR_INVALID);
+ }
+ } else {
+ switch (opcode) {
+ case MMC_GO_IDLE_STATE:
+ use_crc = 1;
+ rsp_type = MMCSPI_RSP_R1;
+ mmc_rsp_type = MMCSPI_TO_MMC_RSP_NONE;
+ break;
+
+ case MMC_SEND_OP_COND:
+ case MMC_SWITCH_FUNC: /* also SD_SWITCH_FUNC */
+ case MMC_SET_BLOCKLEN:
+ case MMC_READ_SINGLE_BLOCK:
+ case MMC_READ_MULTIPLE_BLOCK:
+ case MMC_WRITE_BLOCK:
+ case MMC_WRITE_MULTIPLE_BLOCK:
+ case MMC_PROGRAM_CSD:
+ case MMC_SEND_WRITE_PROT:
+ case SD_ERASE_WR_BLK_START:
+ case SD_ERASE_WR_BLK_END:
+ case MMC_LOCK_UNLOCK:
+ case MMC_GEN_CMD:
+ rsp_type = MMCSPI_RSP_R1;
+ mmc_rsp_type = MMCSPI_TO_MMC_RSP_R1;
+ break;
+ case MMCSPI_CRC_ON_OFF:
+ rsp_type = MMCSPI_RSP_R1;
+ mmc_rsp_type = MMCSPI_TO_MMC_RSP_NONE;
+ break;
+
+ case MMC_SEND_CSD:
+ case MMC_SEND_CID:
+ arg = 0; /* no rca in spi mode */
+ rsp_type = MMCSPI_RSP_R1;
+ mmc_rsp_type = MMCSPI_TO_MMC_RSP_R2;
+ ldata_len = 16;
+ break;
+
+ case MMC_APP_CMD:
+ arg = 0; /* no rca in spi mode */
+ rsp_type = MMCSPI_RSP_R1;
+ mmc_rsp_type = MMCSPI_TO_MMC_RSP_R1;
+ break;
+
+ case MMC_STOP_TRANSMISSION:
+ case MMC_SET_WRITE_PROT:
+ case MMC_CLR_WRITE_PROT:
+ case MMC_ERASE:
+ rsp_type = MMCSPI_RSP_R1B;
+ mmc_rsp_type = MMCSPI_TO_MMC_RSP_R1B;
+ break;
+
+ case MMC_ALL_SEND_CID:
+ /* handle MMC_ALL_SEND_CID as MMC_SEND_CID */
+ opcode = MMC_SEND_CID;
+ rsp_type = MMCSPI_RSP_R1;
+ mmc_rsp_type = MMCSPI_TO_MMC_RSP_R2;
+ ldata_len = 16;
+ break;
+
+ case MMC_SEND_STATUS:
+ arg = 0; /* no rca in spi mode */
+ rsp_type = MMCSPI_RSP_R2;
+ mmc_rsp_type = MMCSPI_TO_MMC_RSP_R1;
+ break;
+
+
+ case MMCSPI_READ_OCR:
+ rsp_type = MMCSPI_RSP_R3;
+ mmc_rsp_type = MMCSPI_TO_MMC_RSP_NONE;
+ break;
+
+ case SD_SEND_RELATIVE_ADDR:
+ /*
+ * Handle SD_SEND_RELATIVE_ADDR as MMC_SEND_STATUS -
+ * the rca returned to the caller will always be 0.
+ */
+ opcode = MMC_SEND_STATUS;
+ rsp_type = MMCSPI_RSP_R2;
+ mmc_rsp_type = MMCSPI_TO_MMC_RSP_R6;
+ break;
+
+ case SD_SEND_IF_COND:
+ use_crc = 1;
+ rsp_type = MMCSPI_RSP_R7;
+ mmc_rsp_type = MMCSPI_TO_MMC_RSP_R7;
+ break;
+
+ default:
+ TRACE(dev, ERROR, "Invalid cmd opcode %u\n", opcode);
+ return (MMC_ERR_INVALID);
+ }
+ }
+
+ switch (rsp_type) {
+ case MMCSPI_RSP_R1:
+ case MMCSPI_RSP_R1B:
+ rsp_len = 1;
+ break;
+ case MMCSPI_RSP_R2:
+ rsp_len = 2;
+ break;
+ case MMCSPI_RSP_R3:
+ case MMCSPI_RSP_R7:
+ rsp_len = 5;
+ break;
+ default:
+ TRACE(dev, ERROR, "Unknown response type %u\n", rsp_type);
+ return (MMC_ERR_INVALID);
+ }
+
+ mmcspi_cmd->mmc_cmd = mmc_cmd;
+ mmcspi_cmd->opcode = opcode;
+ mmcspi_cmd->arg = arg;
+ mmcspi_cmd->flags = flags;
+ mmcspi_cmd->retries = retries;
+ mmcspi_cmd->use_crc = use_crc;
+ mmcspi_cmd->error_mask = MMCSPI_R1_ERR_MASK;
+ if (!mmcspi_cmd->use_crc)
+ mmcspi_cmd->error_mask &= ~MMCSPI_R1_CRC_ERR;
+ mmcspi_cmd->rsp_type = rsp_type;
+ mmcspi_cmd->rsp_len = rsp_len;
+ mmcspi_cmd->mmc_rsp_type = mmc_rsp_type;
+
+ memset(&mmcspi_cmd->ldata, 0, sizeof(struct mmc_data));
+ mmcspi_cmd->ldata.len = ldata_len;
+ if (ldata_len) {
+ mmcspi_cmd->ldata.data = sc->slot.ldata_buf;
+ mmcspi_cmd->ldata.flags = MMC_DATA_READ;
+
+ mmcspi_cmd->data = &mmcspi_cmd->ldata;
+ } else
+ mmcspi_cmd->data = mmc_cmd->data;
+
+ return (MMC_ERR_NONE);
+}
+
+static unsigned int
+mmcspi_send_cmd(device_t dev, struct mmcspi_command *cmd, uint8_t *rspbuf)
+{
+ unsigned int err;
+ uint32_t opcode;
+ uint32_t arg;
+ uint8_t txbuf[8];
+ uint8_t crc;
+
+ TRACE_ENTER(dev);
+
+ opcode = cmd->opcode;
+ arg = cmd->arg;
+
+ TRACE(dev, ACTION, "sending %sMD%u(0x%08x)\n",
+ cmd->flags & MMC_CMD_IS_APP ? "AC": "C", opcode, arg);
+
+ /*
+ * Sending this byte ahead of each command prevents some cards from
+ * responding with unaligned data, and doesn't bother the others.
+ * Examples:
+ *
+ * Sandisk 32GB SDHC card, cid:
+ * mid=0x03 oid=0x5344 pnm="SU32G" prv=8.0 mdt=00.2000
+ */
+ txbuf[0] = 0xff;
+
+ txbuf[1] = 0x40 | (opcode & 0x3f);
+ txbuf[2] = arg >> 24;
+ txbuf[3] = (arg >> 16) & 0xff;
+ txbuf[4] = (arg >> 8) & 0xff;
+ txbuf[5] = arg & 0xff;
+
+ if (cmd->use_crc)
+ crc = update_crc7(CRC7_INITIAL, &txbuf[1], 5);
+ else
+ crc = 0;
+
+ txbuf[6] = (crc << 1) | 0x01;
+
+ /*
+ * Some cards have garbage on the bus in the first byte slot after
+ * the last command byte. This seems to be common with the stop
+ * command. Clocking out an extra byte with the command will
+ * result in that data not being searched for the response token,
+ * which is ok, because no cards respond that fast.
+ */
+ txbuf[7] = 0xff;
+
+ err = mmcspi_do_spi_write(dev, txbuf, sizeof(txbuf), NULL, 0);
+ if (MMC_ERR_NONE != err) {
+ TRACE(dev, ERROR, "spi write of command failed\n");
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ TRACE(dev, DETAILS,
+ "rx cmd bytes 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ junkbuf[0], junkbuf[1], junkbuf[2], junkbuf[3], junkbuf[4],
+ junkbuf[5] );
+ TRACE(dev, DETAILS, "skipped response byte is 0x%02x\n", junkbuf[6]);
+
+ err = mmcspi_get_response_token(dev, MMCSPI_R1_MASK, MMCSPI_R1_VALUE,
+ cmd->rsp_len, MMCSPI_RSP_R1B == cmd->rsp_type, rspbuf);
+
+ if (MMC_ERR_NONE == err) {
+ if (rspbuf[0] & cmd->error_mask & MMCSPI_R1_CRC_ERR)
+ err = MMC_ERR_BADCRC;
+ else if (rspbuf[0] & cmd->error_mask)
+ err = MMC_ERR_INVALID;
+ }
+
+ TRACE_EXIT(dev);
+
+ return (err);
+}
+
+static unsigned int
+mmcspi_read_block(device_t dev, uint8_t *data, unsigned int len,
+ unsigned int check_crc16, unsigned int check_crc7)
+{
+ struct bintime start;
+ struct bintime elapsed;
+ unsigned int non_token_bytes;
+ unsigned int data_captured;
+ unsigned int crc_captured;
+ unsigned int pollbufpos;
+ unsigned int crc16_mismatch;
+ unsigned int err;
+ uint16_t crc16, computed_crc16;
+ uint8_t crc7, computed_crc7;
+ uint8_t pollbuf[MMCSPI_POLL_LEN];
+ uint8_t crcbuf[MMCSPI_DATA_CRC_LEN];
+ int i;
+
+ crc16_mismatch = 0;
+
+ TRACE_ENTER(dev);
+ TRACE(dev, ACTION, "read block(%u)\n", len);
+
+ /*
+ * With this approach, we could pointlessly read up to
+ * (MMCSPI_POLL_LEN - 3 - len) bytes from the spi bus, but only in
+ * the odd situation where MMCSPI_POLL_LEN is greater than len + 3.
+ */
+ getbintime(&start);
+ do {
+ TRACE(dev, DETAILS, "looking for read token\n");
+ err = mmcspi_do_spi_read(dev, pollbuf, MMCSPI_POLL_LEN);
+ if (MMC_ERR_NONE != err) {
+ TRACE(dev, ERROR, "token read on spi failed\n");
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ for (i = 0; i < MMCSPI_POLL_LEN; i++) {
+ if (MMCSPI_TOKEN_SB == pollbuf[i]) {
+ TRACE(dev, RESULT,
+ "found start block token at %d\n", i);
+ break;
+ } else if (MMCSPI_IS_DE_TOKEN(pollbuf[i])) {
+ TRACE(dev, ERROR,
+ "found data error token at %d\n", i);
+ TRACE_EXIT(dev);
+ return (MMC_ERR_FAILED);
+ }
+ }
+
+ getbintime(&elapsed);
+ bintime_sub(&elapsed, &start);
+
+ if (elapsed.sec > MMCSPI_TIMEOUT_SEC) {
+ TRACE(dev, ERROR, "timeout while looking for read "
+ "token\n");
+ return (MMC_ERR_TIMEOUT);
+ }
+ } while (MMCSPI_POLL_LEN == i);
+
+ /* copy any data captured in tail of poll buf to data buf */
+ non_token_bytes = MMCSPI_POLL_LEN - i - 1;
+ data_captured = min(non_token_bytes, len);
+ crc_captured = non_token_bytes - data_captured;
+ pollbufpos = i + 1;
+
+ TRACE(dev, DETAILS, "data bytes captured in pollbuf = %u\n",
+ data_captured);
+
+ memcpy(data, &pollbuf[pollbufpos], data_captured);
+ pollbufpos += data_captured;
+
+ TRACE(dev, DETAILS, "data bytes to read = %u, crc_captured = %u\n",
+ len - data_captured, crc_captured);
+
+ /* get any remaining data from the spi bus */
+ if (data_captured < len) {
+ err = mmcspi_do_spi_read(dev, &data[data_captured],
+ len - data_captured);
+ if (MMC_ERR_NONE != err) {
+ TRACE(dev, ERROR,
+ "spi read of remainder of block failed\n");
+ TRACE_EXIT(dev);
+ return (err);
+ }
+ }
+
+ /* copy any crc captured in the poll buf to the crc buf */
+ memcpy(crcbuf, &pollbuf[pollbufpos], crc_captured);
+
+ /* get any remaining crc */
+ if (crc_captured < MMCSPI_DATA_CRC_LEN) {
+ TRACE(dev, DETAILS, "crc bytes to read = %u\n",
+ MMCSPI_DATA_CRC_LEN - crc_captured);
+
+ err = mmcspi_do_spi_read(dev, &crcbuf[crc_captured],
+ MMCSPI_DATA_CRC_LEN - crc_captured);
+ if (MMC_ERR_NONE != err) {
+ TRACE(dev, ERROR, "spi read of crc failed\n");
+ TRACE_EXIT(dev);
+ return (err);
+ }
+ }
+
+ /*
+ * The following crc checking code is deliberately structured to
+ * allow a passing crc-7 check to override a failing crc-16 check
+ * when both are enabled.
+ */
+ if (check_crc16) {
+ crc16 = ((uint16_t)crcbuf[0] << 8) | crcbuf[1];
+ computed_crc16 = update_crc16(CRC16_INITIAL, data, len);
+ TRACE(dev, RESULT, "sent_crc16=0x%04x computed_crc16=0x%04x\n",
+ crc16, computed_crc16);
+
+ if (computed_crc16 != crc16) {
+ crc16_mismatch = 1;
+
+ TRACE(dev, ERROR, "crc16 mismatch, should be 0x%04x, "
+ " is 0x%04x\n", crc16, computed_crc16);
+
+ if (!check_crc7) {
+ TRACE_EXIT(dev);
+ return (MMC_ERR_BADCRC);
+ }
+ }
+ }
+
+ if (check_crc7) {
+ if (crc16_mismatch) {
+ /*
+ * Let the user know something else is being checked
+ * after announcing an error above.
+ */
+ TRACE(dev, ERROR, "checking crc7\n");
+ }
+
+ crc7 = data[len - 1] >> 1;
+ computed_crc7 = update_crc7(CRC7_INITIAL, data, len - 1);
+ TRACE(dev, RESULT, "sent_crc7=0x%02x computed_crc7=0x%02x\n",
+ crc7, computed_crc7);
+
+ if (computed_crc7 != crc7) {
+ TRACE(dev, ERROR,
+ "crc7 mismatch, should be 0x%02x, is 0x%02x\n",
+ crc7, computed_crc7);
+
+ TRACE_EXIT(dev);
+ return (MMC_ERR_BADCRC);
+ }
+ }
+
+ TRACE_EXIT(dev);
+
+ return (MMC_ERR_NONE);
+}
+
+static unsigned int
+mmcspi_send_stop(device_t dev, unsigned int retries)
+{
+ struct mmcspi_command stop;
+ struct mmc_command mmc_stop;
+ uint8_t stop_response;
+ unsigned int err;
+ int i;
+
+ TRACE_ENTER(dev);
+
+ memset(&mmc_stop, 0, sizeof(mmc_stop));
+ mmc_stop.opcode = MMC_STOP_TRANSMISSION;
+ mmc_stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+ err = mmcspi_set_up_command(dev, &stop, &mmc_stop);
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ /*
+ * Retry stop commands that fail due to bad crc here because having
+ * the caller retry the entire read/write command due to such a
+ * failure is pointlessly expensive.
+ */
+ for (i = 0; i <= retries; i++) {
+ TRACE(dev, ACTION, "sending stop message\n");
+
+ err = mmcspi_send_cmd(dev, &stop, &stop_response);
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ TRACE(dev, RESULT, "stop response=0x%02x\n", stop_response);
+
+ /* retry on crc error */
+ if (stop_response & stop.error_mask & MMCSPI_R1_CRC_ERR) {
+ continue;
+ }
+ }
+
+ if (stop_response & stop.error_mask) {
+ TRACE_EXIT(dev);
+
+ /*
+ * Don't return MMC_ERR_BADCRC here, even if
+ * MMCSPI_R1_CRC_ERR is set, because that would trigger the
+ * caller's retry-on-crc-error mechanism, effectively
+ * squaring the maximum number of retries of the stop
+ * command.
+ */
+ return (MMC_ERR_FAILED);
+ }
+ TRACE_EXIT(dev);
+
+ return (MMC_ERR_NONE);
+}
+
+static unsigned int
+mmcspi_read_phase(device_t dev, struct mmcspi_command *cmd)
+{
+ struct mmc_data *data;
+ unsigned int data_offset;
+ unsigned int num_blocks;
+ unsigned int len;
+ unsigned int err;
+ uint8_t *data8;
+ int i;
+
+ TRACE_ENTER(dev);
+
+ data = cmd->data;
+ data8 = (uint8_t *)data->data;
+ data_offset = 0;
+
+ if (data->len < MMCSPI_DATA_BLOCK_LEN) {
+ num_blocks = 1;
+ len = data->len;
+ } else {
+ num_blocks = data->len / MMCSPI_DATA_BLOCK_LEN;
+ len = MMCSPI_DATA_BLOCK_LEN;
+ }
+
+ for (i = 0; i < num_blocks; i++) {
+ /*
+ * The CID and CSD data blocks contain both a trailing crc-7
+ * inside the data block and the standard crc-16 following
+ * the data block, so both are checked when use_crc is true.
+ *
+ * When crc checking has been enabled via CMD59, some cards
+ * send CID and CSD data blocks with correct crc-7 values
+ * but incorrect crc-16 values. read_block will accept
+ * those responses as valid as long as the crc-7 is correct.
+ *
+ * Examples:
+ *
+ * Super Talent 1GB SDSC card, cid:
+ * mid=0x1b oid=0x534d pnm="00000" prv=1.0 mdt=02.2010
+ */
+ err = mmcspi_read_block(dev, &data8[data_offset], len,
+ cmd->use_crc, cmd->use_crc && ((MMC_SEND_CID == cmd->opcode)
+ || (MMC_SEND_CSD == cmd->opcode)));
+
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ data_offset += MMCSPI_DATA_BLOCK_LEN;
+ }
+
+ /* multi-block read commands require a stop */
+ if (num_blocks > 1) {
+ err = mmcspi_send_stop(dev, cmd->retries);
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+ }
+
+ TRACE_EXIT(dev);
+
+ return (MMC_ERR_NONE);
+}
+
+static unsigned int
+mmcspi_write_block(device_t dev, uint8_t *data, unsigned int is_multi,
+ unsigned char use_crc, uint8_t *status)
+{
+ uint8_t txbuf[max(MMCSPI_POLL_LEN, 2)];
+ uint8_t response_token;
+ unsigned int err;
+ uint16_t crc;
+
+ TRACE_ENTER(dev);
+
+ if (use_crc)
+ crc = update_crc16(CRC16_INITIAL, data, MMCSPI_DATA_BLOCK_LEN);
+ else
+ crc = 0;
+
+ TRACE(dev, ACTION, "write block(512) crc=0x%04x\n", crc);
+
+ txbuf[0] = is_multi ? MMCSPI_TOKEN_SB_WM : MMCSPI_TOKEN_SB;
+ err = mmcspi_do_spi_write(dev, txbuf, 1, data, MMCSPI_DATA_BLOCK_LEN);
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ txbuf[0] = crc >> 8;
+ txbuf[1] = crc & 0xff;
+ err = mmcspi_do_spi_write(dev, txbuf, 2, NULL, 0);
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ err = mmcspi_get_response_token(dev, MMCSPI_DR_MASK, MMCSPI_DR_VALUE,
+ 1, 1, &response_token);
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ *status = response_token & MMCSPI_DR_ERR_MASK;
+
+ TRACE_EXIT(dev);
+
+ return (MMC_ERR_NONE);
+}
+
+static unsigned int
+mmcspi_write_phase(device_t dev, struct mmcspi_command *cmd)
+{
+
+ struct mmc_data *data;
+ unsigned int data_offset;
+ unsigned int num_blocks;
+ unsigned int err;
+ uint8_t *data8;
+ uint8_t token[2];
+ uint8_t status;
+ int i;
+
+ TRACE_ENTER(dev);
+
+ data = cmd->data;
+
+ data8 = (uint8_t *)data->data;
+ data_offset = 0;
+ num_blocks = data->len / MMCSPI_DATA_BLOCK_LEN;
+ for (i = 0; i < num_blocks; i++) {
+ err = mmcspi_write_block(dev, &data8[data_offset],
+ num_blocks > 1, cmd->use_crc, &status);
+
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ if (MMCSPI_DR_ERR_NONE != status) {
+ if (num_blocks > 1) {
+ /*
+ * Ignore any failure reported for the stop
+ * command, as the return status for the
+ * write phase will be whatever error was
+ * indicated in the data respone token.
+ */
+ mmcspi_send_stop(dev, cmd->retries);
+ }
+
+ /*
+ * A CRC error can't be ignored here, even if crc
+ * use is disabled, as there is no way to simply
+ * carry on when a data error token has been sent.
+ */
+ if (MMCSPI_DR_ERR_CRC == status) {
+ TRACE_EXIT(dev);
+ return (MMC_ERR_BADCRC);
+ } else {
+ TRACE_EXIT(dev);
+ return (MMC_ERR_FAILED);
+ }
+ }
+
+ data_offset += MMCSPI_DATA_BLOCK_LEN;
+ }
+
+ /* successful multi-block write commands require a stop token */
+ if (num_blocks > 1) {
+ TRACE(dev, ACTION, "Sending stop token\n");
+
+ /*
+ * Most/all cards are a bit sluggish in assserting busy
+ * after receipt of the STOP_TRAN token. Clocking out an
+ * extra byte here provides a byte of dead time before
+ * looking for not busy, avoiding a premature not-busy
+ * determination with such cards.
+ */
+ token[0] = MMCSPI_TOKEN_ST;
+ token[1] = 0xff;
+
+ err = mmcspi_do_spi_write(dev, token, sizeof(token), NULL, 0);
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ err = mmcspi_wait_for_not_busy(dev);
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+ }
+
+ TRACE_EXIT(dev);
+
+ return (MMC_ERR_NONE);
+}
+
+static unsigned int
+mmcspi_translate_response(device_t dev, struct mmcspi_command *cmd,
+ uint8_t *rspbuf)
+{
+ struct mmc_command *mmc_cmd;
+ uint32_t mmc_rsp_type;
+ uint8_t *ldata;
+
+ mmc_cmd = cmd->mmc_cmd;
+ mmc_rsp_type = cmd->mmc_rsp_type;
+ ldata = cmd->ldata.data;
+
+ TRACE_ENTER(dev);
+
+ TRACE(dev, ACTION, "translating SPI rsp %u to SD rsp %u\n",
+ cmd->rsp_type, mmc_rsp_type);
+
+ if ((MMCSPI_TO_MMC_RSP_R1 == mmc_rsp_type) ||
+ (MMCSPI_TO_MMC_RSP_R1B == mmc_rsp_type)) {
+
+ TRACE(dev, ACTION, "translating SPI-R1/2 to SD-R1\n");
+
+ if ((MMCSPI_RSP_R1 == cmd->rsp_type) ||
+ (MMCSPI_RSP_R1B == cmd->rsp_type) ||
+ (MMCSPI_RSP_R2 == cmd->rsp_type)) {
+ mmc_cmd->resp[0] = 0;
+
+ if (rspbuf[0] & MMCSPI_R1_PARAM_ERR)
+ mmc_cmd->resp[0] |= R1_OUT_OF_RANGE;
+
+ if (rspbuf[0] & MMCSPI_R1_ADDR_ERR)
+ mmc_cmd->resp[0] |= R1_ADDRESS_ERROR;
+
+ if (rspbuf[0] & MMCSPI_R1_ERASE_ERR)
+ mmc_cmd->resp[0] |= R1_ERASE_SEQ_ERROR;
+
+ if (rspbuf[0] & MMCSPI_R1_CRC_ERR)
+ mmc_cmd->resp[0] |= R1_COM_CRC_ERROR;
+
+ if (rspbuf[0] & MMCSPI_R1_ILL_CMD)
+ mmc_cmd->resp[0] |= R1_ILLEGAL_COMMAND;
+
+ if (rspbuf[0] & MMCSPI_R1_ERASE_RST)
+ mmc_cmd->resp[0] |= R1_ERASE_RESET;
+
+ if (rspbuf[0] & MMCSPI_R1_IDLE)
+ mmc_cmd->resp[0] |=
+ (uint32_t)R1_STATE_IDLE << 9;
+ else
+ mmc_cmd->resp[0] |=
+ (uint32_t)R1_STATE_READY << 9;
+
+ /* When MMC_CMD_IS_APP is sent, emulate R1_APP_CMD
+ SD-bus status bit. */
+ if (!(cmd->flags & MMC_CMD_IS_APP) &&
+ (MMC_APP_CMD == cmd->opcode))
+ mmc_cmd->resp[0] |= R1_APP_CMD;
+
+ if (MMCSPI_RSP_R2 == cmd->rsp_type) {
+ if (rspbuf[1] & MMCSPI_R2_OOR_CSD_OW)
+ mmc_cmd->resp[0] |=
+ R1_OUT_OF_RANGE |
+ R1_CSD_OVERWRITE;
+
+ if (rspbuf[1] & MMCSPI_R2_ERASE_PARAM)
+ mmc_cmd->resp[0] |= R1_ERASE_PARAM;
+
+ if (rspbuf[1] & MMCSPI_R2_WP_VIOLATE)
+ mmc_cmd->resp[0] |= R1_WP_VIOLATION;
+
+ if (rspbuf[1] & MMCSPI_R2_ECC_FAIL)
+ mmc_cmd->resp[0] |= R1_CARD_ECC_FAILED;
+
+ if (rspbuf[1] & MMCSPI_R2_CC_ERR)
+ mmc_cmd->resp[0] |= R1_CC_ERROR;
+
+ if (rspbuf[1] & MMCSPI_R2_ERR)
+ mmc_cmd->resp[0] |= R1_ERROR;
+
+ if (rspbuf[1] & MMCSPI_R2_WP_ER_LCK)
+ mmc_cmd->resp[0] |=
+ R1_LOCK_UNLOCK_FAILED |
+ R1_WP_ERASE_SKIP;
+
+ if (rspbuf[1] & MMCSPI_R2_LOCKED)
+ mmc_cmd->resp[0] |= R1_CARD_IS_LOCKED;
+
+ }
+ } else
+ return (MMC_ERR_INVALID);
+
+ } else if (MMCSPI_TO_MMC_RSP_R2 == mmc_rsp_type) {
+
+ if (16 == cmd->ldata.len) {
+
+ TRACE(dev, ACTION, "translating SPI-R1/ldata(16) "
+ "to SD-R2\n");
+
+ /* ldata contains bits 127:0 of the spi response */
+
+ mmc_cmd->resp[0] =
+ (uint32_t)ldata[0] << 24 |
+ (uint32_t)ldata[1] << 16 |
+ (uint32_t)ldata[2] << 8 |
+ (uint32_t)ldata[3];
+
+ mmc_cmd->resp[1] =
+ (uint32_t)ldata[4] << 24 |
+ (uint32_t)ldata[5] << 16 |
+ (uint32_t)ldata[6] << 8 |
+ (uint32_t)ldata[7];
+
+ mmc_cmd->resp[2] =
+ (uint32_t)ldata[8] << 24 |
+ (uint32_t)ldata[9] << 16 |
+ (uint32_t)ldata[10] << 8 |
+ (uint32_t)ldata[11];
+
+ mmc_cmd->resp[3] =
+ (uint32_t)ldata[12] << 24 |
+ (uint32_t)ldata[13] << 16 |
+ (uint32_t)ldata[14] << 8;
+
+ } else
+ return (MMC_ERR_INVALID);
+
+ } else if (MMCSPI_TO_MMC_RSP_R3 == mmc_rsp_type) {
+
+ if (MMCSPI_RSP_R3 == cmd->rsp_type) {
+
+ TRACE(dev, ACTION, "translating SPI-R3 to SD-R3\n");
+
+ /* rspbuf contains a 40-bit spi-R3 from the
+ MMCSPI_READ_OCR response, of which bits 31:0 are
+ the OCR value */
+
+ /* spi response bits 31:0 mapped to
+ sdhc register bits 31:0 */
+ mmc_cmd->resp[0] =
+ (uint32_t)rspbuf[1] << 24 |
+ (uint32_t)rspbuf[2] << 16 |
+ (uint32_t)rspbuf[3] << 8 |
+ (uint32_t)rspbuf[4];
+
+ /* Clear card busy bit (indicating busy) if the
+ SPI-R1 idle bit is set. */
+ if (rspbuf[0] & MMCSPI_R1_IDLE) {
+ mmc_cmd->resp[0] &= ~MMC_OCR_CARD_BUSY;
+ } else {
+ mmc_cmd->resp[0] |= MMC_OCR_CARD_BUSY;
+ }
+
+ TRACE(dev, DETAILS, "ocr=0x%08x\n", mmc_cmd->resp[0]);
+ } else
+ return (MMC_ERR_INVALID);
+
+ } else if (MMCSPI_TO_MMC_RSP_R6 == mmc_rsp_type) {
+ if (MMCSPI_RSP_R2 == cmd->rsp_type) {
+
+ TRACE(dev, ACTION, "translating SPI-R2 to SD-R6\n");
+
+ /* rca returned will always be zero */
+ mmc_cmd->resp[0] = 0;
+
+ if (rspbuf[0] & MMCSPI_R1_CRC_ERR)
+ mmc_cmd->resp[0] |= 0x8000;
+
+ if (rspbuf[0] & MMCSPI_R1_ILL_CMD)
+ mmc_cmd->resp[0] |= 0x4000;
+
+ if (rspbuf[1] & MMCSPI_R2_ERR)
+ mmc_cmd->resp[0] |= 0x2000;
+
+ if (rspbuf[0] & MMCSPI_R1_IDLE)
+ mmc_cmd->resp[0] |=
+ (uint32_t)R1_STATE_IDLE << 9;
+ else
+ mmc_cmd->resp[0] |=
+ (uint32_t)R1_STATE_READY << 9;
+ } else
+ return (MMC_ERR_INVALID);
+
+ } else if (MMCSPI_TO_MMC_RSP_R7 == mmc_rsp_type) {
+ if (MMCSPI_RSP_R7 == cmd->rsp_type) {
+
+ TRACE(dev, ACTION, "translating SPI-R7 to SD-R7\n");
+
+ /* rsp buf contains a 40-bit spi-R7, of which bits
+ 11:0 need to be transferred */
+
+ /* spi response bits 11:0 mapped to
+ sdhc register bits 11:0 */
+ mmc_cmd->resp[0] =
+ (uint32_t)(rspbuf[3] & 0xf) << 8 |
+ (uint32_t)rspbuf[4];
+ } else
+ return (MMC_ERR_INVALID);
+
+ } else if (MMCSPI_TO_MMC_RSP_NONE != mmc_rsp_type)
+ return (MMC_ERR_INVALID);
+
+ TRACE_EXIT(dev);
+
+ return (MMC_ERR_NONE);
+}
+
+static unsigned int
+mmcspi_get_ocr(device_t dev, uint8_t *ocrbuf)
+{
+ struct mmc_command mmc_cmd;
+ struct mmcspi_command cmd;
+ unsigned int err;
+ uint8_t r1_status;
+ uint8_t rspbuf[MMCSPI_MAX_RSP_LEN];
+
+ TRACE_ENTER(dev);
+
+ memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+ mmc_cmd.opcode = MMCSPI_READ_OCR;
+ mmc_cmd.flags = MMC_RSP_R3 | MMC_CMD_AC;
+
+ err = mmcspi_set_up_command(dev, &cmd, &mmc_cmd);
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ err = mmcspi_send_cmd(dev, &cmd, rspbuf);
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ r1_status = rspbuf[0] & cmd.error_mask;
+ if (r1_status) {
+ if (r1_status & MMCSPI_R1_CRC_ERR)
+ err = MMC_ERR_BADCRC;
+ else
+ err = MMC_ERR_INVALID;
+
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ memcpy(ocrbuf, &rspbuf[1], MMCSPI_OCR_LEN);
+
+ TRACE_EXIT(dev);
+
+ return (MMC_ERR_NONE);
+}
+
+static unsigned int
+mmcspi_set_crc_on_off(device_t dev, unsigned int crc_on)
+{
+ struct mmc_command mmc_cmd;
+ struct mmcspi_command cmd;
+ unsigned int err;
+ uint8_t r1_status;
+ uint8_t rspbuf[MMCSPI_MAX_RSP_LEN];
+
+ TRACE_ENTER(dev);
+
+ memset(&mmc_cmd, 0, sizeof(struct mmc_command));
+ mmc_cmd.opcode = MMCSPI_CRC_ON_OFF;
+ mmc_cmd.arg = crc_on ? 1 : 0;
+ mmc_cmd.flags = MMC_RSP_NONE | MMC_CMD_AC;
+
+ err = mmcspi_set_up_command(dev, &cmd, &mmc_cmd);
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ err = mmcspi_send_cmd(dev, &cmd, rspbuf);
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ r1_status = rspbuf[0] & cmd.error_mask;
+ if (r1_status) {
+ if (r1_status & MMCSPI_R1_CRC_ERR)
+ err = MMC_ERR_BADCRC;
+ else
+ err = MMC_ERR_INVALID;
+
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ TRACE_EXIT(dev);
+ return (MMC_ERR_NONE);
+}
+
+static unsigned int
+mmcspi_update_crc_setting(device_t dev, unsigned int crc_on)
+{
+ struct mmcspi_softc *sc;
+ struct mmcspi_slot *slot;
+ unsigned int err;
+ int i;
+
+ TRACE_ENTER(dev);
+
+ sc = device_get_softc(dev);
+ slot = &sc->slot;
+
+ for (i = 0; i <= MMCSPI_RETRIES; i++) {
+ err = mmcspi_set_crc_on_off(dev, crc_on);
+ if (MMC_ERR_BADCRC != err)
+ break;
+ }
+
+ if (MMC_ERR_NONE != err) {
+ TRACE_EXIT(dev);
+ return (err);
+ }
+
+ if (crc_on)
+ slot->crc_enabled = 1;
+ else
+ slot->crc_enabled = 0;
+
+ TRACE_EXIT(dev);
+
+ return (MMC_ERR_NONE);
+}
+
+static int
+mmcspi_request(device_t brdev, device_t reqdev, struct mmc_request *req)
+{
+ TRACE_ENTER(brdev);
+
+ struct mmcspi_softc *sc = device_get_softc(brdev);
+ struct mmcspi_slot *slot = &sc->slot;
+ struct mmcspi_command cmd;
+ struct mmc_command *mmc_cmd = req->cmd;
+ struct mmc_data *data;
+ unsigned int err;
+ unsigned int use_crc_sample;
+ int i, j;
+ uint32_t opcode;
+ uint32_t flags;
+ uint32_t last_opcode;
+ uint32_t last_flags;
+ uint8_t rspbuf[MMCSPI_MAX_RSP_LEN];
+
+#define IS_CMD(code, cmd, flags) \
+ (!((flags) & MMC_CMD_IS_APP) && ((code) == (cmd)))
+#define IS_ACMD(code, cmd, flags) \
+ (((flags) & MMC_CMD_IS_APP) && ((code) == (cmd)))
+
+ if (power_on != slot->host.ios.power_mode)
+ return (MMC_ERR_INVALID);
+
+ /*
+ * Sample use_crc sysctl and adjust card setting if required and
+ * appropriate.
+ */
+ use_crc_sample = sc->use_crc;
+ if (slot->crc_init_done &&
+ (use_crc_sample != slot->crc_enabled)) {
+ err = mmcspi_update_crc_setting(brdev, use_crc_sample);
+ if (MMC_ERR_NONE != err)
+ goto out;
+ slot->crc_init_done = 1;
+ }
+
+ err = mmcspi_set_up_command(brdev, &cmd, mmc_cmd);
+ if (MMC_ERR_NONE != err)
+ goto out;
+
+ opcode = cmd.opcode;
+ flags = cmd.flags;
+ data = cmd.data;
+
+ last_opcode = slot->last_opcode;
+ last_flags = slot->last_flags;
+
+ /* enforce restrictions on request parameters */
+ if (data) {
+ /*
+ * All writes must be a multiple of the block length. All
+ * reads greater than the block length must be a multiple of
+ * the block length.
+ */
+ if ((data->len % MMCSPI_DATA_BLOCK_LEN) &&
+ !((data->flags & MMC_DATA_READ) &&
+ (data->len < MMCSPI_DATA_BLOCK_LEN))) {
+ TRACE(brdev, ERROR,
+ "requested data phase not a multiple of %u\n",
+ MMCSPI_DATA_BLOCK_LEN);
+ err = MMC_ERR_INVALID;
+ goto out;
+ }
+
+ if (((data->flags & MMC_DATA_READ) &&
+ (data->flags & MMC_DATA_WRITE)) ||
+ (data->flags & MMC_DATA_STREAM)) {
+ TRACE(brdev, ERROR, "illegal data phase flags 0x%02x\n",
+ data->flags);
+ err = MMC_ERR_INVALID;
+ goto out;
+ }
+ }
+
+ for (i = 0; i <= cmd.retries; i++) {
+ /*
+ * On the next command following a CMD8, collect the OCR and
+ * save it off for use in the next ACMD41.
+ */
+ if (IS_CMD(SD_SEND_IF_COND, last_opcode, last_flags)) {
+ err = mmcspi_get_ocr(brdev, slot->last_ocr);
+ if (MMC_ERR_NONE != err) {
+ if (MMC_ERR_BADCRC == err)
+ continue;
+ goto out;
+ }
+ }
+
+ err = mmcspi_send_cmd(brdev, &cmd, rspbuf);
+ if (MMC_ERR_NONE != err) {
+ if (MMC_ERR_BADCRC == err)
+ continue;
+ goto out;
+ }
+
+ if (data) {
+ if (data->flags & MMC_DATA_READ)
+ err = mmcspi_read_phase(brdev, &cmd);
+ else /* MMC_DATA_WRITE */
+ err = mmcspi_write_phase(brdev, &cmd);
+ if (MMC_ERR_NONE != err) {
+ if (MMC_ERR_BADCRC == err)
+ continue;
+ goto out;
+ }
+ }
+ break;
+ }
+
+ if (MMC_ERR_NONE != err)
+ goto out;
+
+ /*
+ * If this was an ACMD_SD_SEND_OP_COND or MMC_SEND_OP_COND, we need
+ * to return an OCR value in the result. If the response from the
+ * card indicates it is still in the IDLE state, supply the OCR
+ * value obtained after the last CMD8, otherwise issue an
+ * MMCSPI_READ_OCR to get the current value, which will have a valid
+ * CCS bit.
+ *
+ * This dance is required under this emulation approach because the
+ * spec stipulates that no other commands should be sent while
+ * ACMD_SD_SEND_OP_COND is being used to poll for the end of the
+ * IDLE state, and some cards do enforce that requirement.
+ */
+ if (IS_ACMD(ACMD_SD_SEND_OP_COND, opcode, flags) ||
+ IS_CMD(MMC_SEND_OP_COND, opcode, flags)) {
+
+ if (rspbuf[0] & MMCSPI_R1_IDLE)
+ memcpy(&rspbuf[1], slot->last_ocr, MMCSPI_OCR_LEN);
+ else {
+
+ /*
+ * Some cards won't accept the MMCSPI_CRC_ON_OFF
+ * command until initialization is complete.
+ *
+ * Examples:
+ *
+ * Super Talent 1GB SDSC card, cid:
+ * mid=0x1b oid=0x534d pnm="00000" prv=1.0 mdt=02.2010
+ */
+ if (!slot->crc_init_done) {
+ err = mmcspi_update_crc_setting(brdev,
+ sc->use_crc);
+ if (MMC_ERR_NONE != err)
+ goto out;
+ slot->crc_init_done = 1;
+ }
+
+ for (j = 0; j <= cmd.retries; j++) {
+ /*
+ * Note that in this case, we pass on the R1
+ * from READ_OCR.
+ */
+ err = mmcspi_get_ocr(brdev, rspbuf);
+ if (MMC_ERR_NONE != err) {
+ if (MMC_ERR_BADCRC == err)
+ continue;
+
+ goto out;
+ }
+
+ }
+
+ if (MMC_ERR_NONE != err)
+ goto out;
+
+ }
+
+ /* adjust the SPI response type to include the OCR */
+ cmd.rsp_type = MMCSPI_RSP_R3;
+ }
+
+ err = mmcspi_translate_response(brdev, &cmd, rspbuf);
+ if (MMC_ERR_NONE != err)
+ goto out;
+
+ out:
+ slot->last_opcode = mmc_cmd->opcode;
+ slot->last_flags = mmc_cmd->flags;
+
+ mmc_cmd->error = err;
+
+ if (req->done)
+ req->done(req);
+
+ TRACE_EXIT(brdev);
+
+ return (err);
+}
+
+static int
+mmcspi_get_ro(device_t brdev, device_t reqdev)
+{
+
+ TRACE_ENTER(brdev);
+ TRACE_EXIT(brdev);
+
+ /* XXX no support for this currently */
+ return (0);
+}
+
+static int
+mmcspi_acquire_host(device_t brdev, device_t reqdev)
+{
+ struct mmcspi_slot *slot;
+ int err;
+
+ TRACE_ENTER(brdev);
+ err = 0;
+
+ slot = device_get_ivars(reqdev);
+
+ MMCSPI_LOCK_SLOT(slot);
+ while (slot->bus_busy)
+ mtx_sleep(slot, &slot->mtx, 0, "mmcspiah", 0);
+ slot->bus_busy++;
+ MMCSPI_UNLOCK_SLOT(slot);
+
+ TRACE_EXIT(brdev);
+
+ return (err);
+}
+
+static int
+mmcspi_release_host(device_t brdev, device_t reqdev)
+{
+ struct mmcspi_slot *slot;
+
+ TRACE_ENTER(brdev);
+
+ slot = device_get_ivars(reqdev);
+
+ MMCSPI_LOCK_SLOT(slot);
+ slot->bus_busy--;
+ MMCSPI_UNLOCK_SLOT(slot);
+
+ wakeup(slot);
+
+ TRACE_EXIT(brdev);
+
+ return (0);
+}
+
+static int
+mmcspi_modevent_handler(module_t mod, int what, void *arg)
+{
+
+ switch (what) {
+ case MOD_LOAD:
+ init_crc7tab();
+ init_crc16tab();
+ memset(onesbuf, 0xff, sizeof(onesbuf));
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+
+ return (0);
+}
+
+static int
+mmcspi_switch_vccq(device_t bus, device_t child)
+{
+
+ return (0);
+}
+
+#if defined(MMCSPI_ENABLE_DEBUG_FUNCS)
+static void
+mmcspi_dump_data(device_t dev, const char *label, uint8_t *data,
+ unsigned int len)
+{
+ unsigned int i, j;
+ unsigned int num_lines;
+ unsigned int residual;
+
+ TRACE_ENTER(dev);
+
+ num_lines = len / 16;
+ residual = len - 16 * num_lines;
+
+ for(i = 0; i < num_lines; i++) {
+ device_printf(dev, "%s:", label);
+ for(j = 0; j < 16; j++)
+ printf(" %02x", data[i * 16 + j]);
+ printf("\n");
+ }
+
+ if (residual) {
+ device_printf(dev, "%s:", label);
+ for(j = 0; j < residual; j++)
+ printf(" %02x", data[num_lines * 16 + j]);
+ printf("\n");
+ }
+
+ TRACE_EXIT(dev);
+}
+
+static void
+mmcspi_dump_spi_bus(device_t dev, unsigned int len)
+{
+ unsigned int num_blocks;
+ unsigned int residual;
+ unsigned int i;
+
+ TRACE_ENTER(dev);
+
+ num_blocks = len / MMCSPI_DATA_BLOCK_LEN;
+ residual = len - num_blocks * MMCSPI_DATA_BLOCK_LEN;
+
+ for (i = 0; i < num_blocks; i++) {
+ if (MMC_ERR_NONE != mmcspi_do_spi_read(dev, junkbuf,
+ MMCSPI_DATA_BLOCK_LEN)) {
+ device_printf(dev, "spi read failed\n");
+ return;
+ }
+
+ mmcspi_dump_data(dev, "bus_data", junkbuf,
+ MMCSPI_DATA_BLOCK_LEN);
+ }
+
+ if (residual) {
+ if (MMC_ERR_NONE != mmcspi_do_spi_read(dev, junkbuf,
+ residual)) {
+ device_printf(dev, "spi read failed\n");
+ return;
+ }
+
+ mmcspi_dump_data(dev, "bus_data", junkbuf, residual);
+ }
+
+ TRACE_EXIT(dev);
+}
+#endif
+
+static device_method_t mmcspi_methods[] = {
+ /* device_if */
+ DEVMETHOD(device_probe, mmcspi_probe),
+ DEVMETHOD(device_attach, mmcspi_attach),
+ DEVMETHOD(device_detach, mmcspi_detach),
+ DEVMETHOD(device_suspend, mmcspi_suspend),
+ DEVMETHOD(device_resume, mmcspi_resume),
+
+ /* Bus interface */
+ DEVMETHOD(bus_read_ivar, mmcspi_read_ivar),
+ DEVMETHOD(bus_write_ivar, mmcspi_write_ivar),
+
+ /* mmcbr_if */
+ DEVMETHOD(mmcbr_update_ios, mmcspi_update_ios),
+ DEVMETHOD(mmcbr_request, mmcspi_request),
+ DEVMETHOD(mmcbr_get_ro, mmcspi_get_ro),
+ DEVMETHOD(mmcbr_acquire_host, mmcspi_acquire_host),
+ DEVMETHOD(mmcbr_release_host, mmcspi_release_host),
+ DEVMETHOD(mmcbr_switch_vccq, mmcspi_switch_vccq),
+
+ {0, 0},
+};
+
+static driver_t mmcspi_driver = {
+ "mmcspi",
+ mmcspi_methods,
+ sizeof(struct mmcspi_softc),
+};
+
+DRIVER_MODULE(mmcspi, spibus, mmcspi_driver, mmcspi_modevent_handler, NULL);
+MODULE_DEPEND(mmcspi, spibus, 1, 1, 1);
+MMC_DECLARE_BRIDGE(mmcspi);
+#ifdef FDT
+SPIBUS_FDT_PNP_INFO(compat_data);
+#endif
diff --git a/sys/dev/mmcnull/mmcnull.c b/sys/dev/mmcnull/mmcnull.c
index 028d3aabd7f1..ec4bc1339778 100644
--- a/sys/dev/mmcnull/mmcnull.c
+++ b/sys/dev/mmcnull/mmcnull.c
@@ -77,7 +77,7 @@ mmcnull_identify(driver_t *driver, device_t parent)
return;
/* Avoid duplicates. */
- if (device_find_child(parent, "mmcnull", -1))
+ if (device_find_child(parent, "mmcnull", DEVICE_UNIT_ANY))
return;
child = BUS_ADD_CHILD(parent, 20, "mmcnull", 0);
diff --git a/sys/dev/mpi3mr/mpi/mpi30_api.h b/sys/dev/mpi3mr/mpi/mpi30_api.h
index aa7b54ec470e..8b05deb7717c 100644
--- a/sys/dev/mpi3mr/mpi/mpi30_api.h
+++ b/sys/dev/mpi3mr/mpi/mpi30_api.h
@@ -1,7 +1,7 @@
/*
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2016-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/mpi3mr/mpi/mpi30_cnfg.h b/sys/dev/mpi3mr/mpi/mpi30_cnfg.h
index d1ae2ebfa372..d4cec3330a56 100644
--- a/sys/dev/mpi3mr/mpi/mpi30_cnfg.h
+++ b/sys/dev/mpi3mr/mpi/mpi30_cnfg.h
@@ -1,7 +1,7 @@
/*
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2016-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -64,6 +64,7 @@
* Configuration Page Attributes *
****************************************************************************/
#define MPI3_CONFIG_PAGEATTR_MASK (0xF0)
+#define MPI3_CONFIG_PAGEATTR_SHIFT (4)
#define MPI3_CONFIG_PAGEATTR_READ_ONLY (0x00)
#define MPI3_CONFIG_PAGEATTR_CHANGEABLE (0x10)
#define MPI3_CONFIG_PAGEATTR_PERSISTENT (0x20)
@@ -84,58 +85,79 @@
/**** Device PageAddress Format ****/
#define MPI3_DEVICE_PGAD_FORM_MASK (0xF0000000)
+#define MPI3_DEVICE_PGAD_FORM_SHIFT (28)
#define MPI3_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
#define MPI3_DEVICE_PGAD_FORM_HANDLE (0x20000000)
#define MPI3_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF)
+#define MPI3_DEVICE_PGAD_HANDLE_SHIFT (0)
/**** SAS Expander PageAddress Format ****/
#define MPI3_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000)
+#define MPI3_SAS_EXPAND_PGAD_FORM_SHIFT (28)
#define MPI3_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM (0x10000000)
#define MPI3_SAS_EXPAND_PGAD_FORM_HANDLE (0x20000000)
#define MPI3_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00FF0000)
#define MPI3_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16)
#define MPI3_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000FFFF)
+#define MPI3_SAS_EXPAND_PGAD_HANDLE_SHIFT (0)
/**** SAS Phy PageAddress Format ****/
#define MPI3_SAS_PHY_PGAD_FORM_MASK (0xF0000000)
+#define MPI3_SAS_PHY_PGAD_FORM_SHIFT (28)
#define MPI3_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000)
#define MPI3_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF)
+#define MPI3_SAS_PHY_PGAD_PHY_NUMBER_SHIFT (0)
/**** SAS Port PageAddress Format ****/
#define MPI3_SASPORT_PGAD_FORM_MASK (0xF0000000)
+#define MPI3_SASPORT_PGAD_FORM_SHIFT (28)
#define MPI3_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000)
#define MPI3_SASPORT_PGAD_FORM_PORT_NUM (0x10000000)
#define MPI3_SASPORT_PGAD_PORT_NUMBER_MASK (0x000000FF)
+#define MPI3_SASPORT_PGAD_PORT_NUMBER_SHIFT (0)
/**** Enclosure PageAddress Format ****/
#define MPI3_ENCLOS_PGAD_FORM_MASK (0xF0000000)
+#define MPI3_ENCLOS_PGAD_FORM_SHIFT (28)
#define MPI3_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
#define MPI3_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
#define MPI3_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
+#define MPI3_ENCLOS_PGAD_HANDLE_SHIFT (0)
/**** PCIe Switch PageAddress Format ****/
#define MPI3_PCIE_SWITCH_PGAD_FORM_MASK (0xF0000000)
+#define MPI3_PCIE_SWITCH_PGAD_FORM_SHIFT (28)
#define MPI3_PCIE_SWITCH_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
#define MPI3_PCIE_SWITCH_PGAD_FORM_HANDLE_PORT_NUM (0x10000000)
#define MPI3_PCIE_SWITCH_PGAD_FORM_HANDLE (0x20000000)
#define MPI3_PCIE_SWITCH_PGAD_PORTNUM_MASK (0x00FF0000)
#define MPI3_PCIE_SWITCH_PGAD_PORTNUM_SHIFT (16)
#define MPI3_PCIE_SWITCH_PGAD_HANDLE_MASK (0x0000FFFF)
+#define MPI3_PCIE_SWITCH_PGAD_HANDLE_SHIFT (0)
/**** PCIe Link PageAddress Format ****/
#define MPI3_PCIE_LINK_PGAD_FORM_MASK (0xF0000000)
+#define MPI3_PCIE_LINK_PGAD_FORM_SHIFT (28)
#define MPI3_PCIE_LINK_PGAD_FORM_GET_NEXT_LINK (0x00000000)
#define MPI3_PCIE_LINK_PGAD_FORM_LINK_NUM (0x10000000)
#define MPI3_PCIE_LINK_PGAD_LINKNUM_MASK (0x000000FF)
+#define MPI3_PCIE_LINK_PGAD_LINKNUM_SHIFT (0)
/**** Security PageAddress Format ****/
#define MPI3_SECURITY_PGAD_FORM_MASK (0xF0000000)
+#define MPI3_SECURITY_PGAD_FORM_SHIFT (28)
#define MPI3_SECURITY_PGAD_FORM_GET_NEXT_SLOT (0x00000000)
#define MPI3_SECURITY_PGAD_FORM_SLOT_NUM (0x10000000)
#define MPI3_SECURITY_PGAD_SLOT_GROUP_MASK (0x0000FF00)
#define MPI3_SECURITY_PGAD_SLOT_GROUP_SHIFT (8)
#define MPI3_SECURITY_PGAD_SLOT_MASK (0x000000FF)
+#define MPI3_SECURITY_PGAD_SLOT_SHIFT (0)
+
+/**** Instance PageAddress Format ****/
+#define MPI3_INSTANCE_PGAD_INSTANCE_MASK (0x0000FFFF)
+#define MPI3_INSTANCE_PGAD_INSTANCE_SHIFT (0)
+
/*****************************************************************************
* Configuration Request Message *
@@ -149,7 +171,8 @@ typedef struct _MPI3_CONFIG_REQUEST
U8 IOCUseOnly06; /* 0x06 */
U8 MsgFlags; /* 0x07 */
U16 ChangeCount; /* 0x08 */
- U16 Reserved0A; /* 0x0A */
+ U8 ProxyIOCNumber; /* 0x0A */
+ U8 Reserved0B; /* 0x0B */
U8 PageVersion; /* 0x0C */
U8 PageNumber; /* 0x0D */
U8 PageType; /* 0x0E */
@@ -185,7 +208,7 @@ typedef struct _MPI3_CONFIG_PAGE_HEADER
* Common definitions used by Configuration Pages *
****************************************************************************/
-/**** Defines for Negotiated Link Rates ****/
+/**** Defines for NegotiatedLinkRates ****/
#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK (0xF0)
#define MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT (4)
#define MPI3_SAS_NEG_LINK_RATE_PHYSICAL_MASK (0x0F)
@@ -212,6 +235,7 @@ typedef struct _MPI3_CONFIG_PAGE_HEADER
#define MPI3_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010)
#define MPI3_SAS_APHYINFO_REASON_MASK (0x0000000F)
+#define MPI3_SAS_APHYINFO_REASON_SHIFT (0)
#define MPI3_SAS_APHYINFO_REASON_UNKNOWN (0x00000000)
#define MPI3_SAS_APHYINFO_REASON_POWER_ON (0x00000001)
#define MPI3_SAS_APHYINFO_REASON_HARD_RESET (0x00000002)
@@ -231,6 +255,7 @@ typedef struct _MPI3_CONFIG_PAGE_HEADER
#define MPI3_SAS_PHYINFO_STATUS_VACANT (0x80000000)
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000)
+#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_SHIFT (27)
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_ACTIVE (0x00000000)
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_PARTIAL (0x08000000)
#define MPI3_SAS_PHYINFO_PHY_POWER_CONDITION_SLUMBER (0x10000000)
@@ -247,6 +272,7 @@ typedef struct _MPI3_CONFIG_PAGE_HEADER
#define MPI3_SAS_PHYINFO_ZONING_ENABLED (0x00100000)
#define MPI3_SAS_PHYINFO_REASON_MASK (0x000F0000)
+#define MPI3_SAS_PHYINFO_REASON_SHIFT (16)
#define MPI3_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
#define MPI3_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
#define MPI3_SAS_PHYINFO_REASON_HARD_RESET (0x00020000)
@@ -266,12 +292,14 @@ typedef struct _MPI3_CONFIG_PAGE_HEADER
#define MPI3_SAS_PHYINFO_PARTIAL_PATHWAY_TIME_SHIFT (8)
#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_MASK (0x000000F0)
+#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_SHIFT (4)
#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_DIRECT (0x00000000)
#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_SUBTRACTIVE (0x00000010)
#define MPI3_SAS_PHYINFO_ROUTING_ATTRIBUTE_TABLE (0x00000020)
/**** Defines for the ProgrammedLinkRate field ****/
#define MPI3_SAS_PRATE_MAX_RATE_MASK (0xF0)
+#define MPI3_SAS_PRATE_MAX_RATE_SHIFT (4)
#define MPI3_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00)
#define MPI3_SAS_PRATE_MAX_RATE_1_5 (0x80)
#define MPI3_SAS_PRATE_MAX_RATE_3_0 (0x90)
@@ -279,6 +307,7 @@ typedef struct _MPI3_CONFIG_PAGE_HEADER
#define MPI3_SAS_PRATE_MAX_RATE_12_0 (0xB0)
#define MPI3_SAS_PRATE_MAX_RATE_22_5 (0xC0)
#define MPI3_SAS_PRATE_MIN_RATE_MASK (0x0F)
+#define MPI3_SAS_PRATE_MIN_RATE_SHIFT (0)
#define MPI3_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
#define MPI3_SAS_PRATE_MIN_RATE_1_5 (0x08)
#define MPI3_SAS_PRATE_MIN_RATE_3_0 (0x09)
@@ -288,12 +317,14 @@ typedef struct _MPI3_CONFIG_PAGE_HEADER
/**** Defines for the HwLinkRate field ****/
#define MPI3_SAS_HWRATE_MAX_RATE_MASK (0xF0)
+#define MPI3_SAS_HWRATE_MAX_RATE_SHIFT (4)
#define MPI3_SAS_HWRATE_MAX_RATE_1_5 (0x80)
#define MPI3_SAS_HWRATE_MAX_RATE_3_0 (0x90)
#define MPI3_SAS_HWRATE_MAX_RATE_6_0 (0xA0)
#define MPI3_SAS_HWRATE_MAX_RATE_12_0 (0xB0)
#define MPI3_SAS_HWRATE_MAX_RATE_22_5 (0xC0)
#define MPI3_SAS_HWRATE_MIN_RATE_MASK (0x0F)
+#define MPI3_SAS_HWRATE_MIN_RATE_SHIFT (0)
#define MPI3_SAS_HWRATE_MIN_RATE_1_5 (0x08)
#define MPI3_SAS_HWRATE_MIN_RATE_3_0 (0x09)
#define MPI3_SAS_HWRATE_MIN_RATE_6_0 (0x0A)
@@ -331,6 +362,9 @@ typedef struct _MPI3_CONFIG_PAGE_HEADER
#define MPI3_MFGPAGE_DEVID_SAS5116_MPI_NS (0x00B5)
#define MPI3_MFGPAGE_DEVID_SAS5116_NVME_NS (0x00B6)
#define MPI3_MFGPAGE_DEVID_SAS5116_PCIE_SWITCH (0x00B8)
+#define MPI3_MFGPAGE_DEVID_SAS5248_MPI (0x00F0)
+#define MPI3_MFGPAGE_DEVID_SAS5248_MPI_NS (0x00F1)
+#define MPI3_MFGPAGE_DEVID_SAS5248_PCIE_SWITCH (0x00F2)
/*****************************************************************************
* Manufacturing Page 0 *
@@ -478,19 +512,28 @@ typedef struct _MPI3_MAN6_GPIO_ENTRY
/**** Defines for FunctionFlags when FunctionCode is ISTWI_RESET ****/
#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_MASK (0x01)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_SHIFT (0)
#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_ISTWI (0x00)
#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_RECEPTACLEID (0x01)
/**** Defines for Param1 (Flags) when FunctionCode is EXT_INTERRUPT ****/
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_MASK (0xF0)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_SHIFT (4)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_GENERIC (0x00)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_CABLE_MGMT (0x10)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_ACTIVE_CABLE_OVERCURRENT (0x20)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_ACK_REQUIRED (0x02)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_MASK (0x01)
+#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_SHIFT (0)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_EDGE (0x00)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_LEVEL (0x01)
+/**** Defines for Param1 (LEVEL) when FunctionCode is OVER_TEMPERATURE ****/
+#define MPI3_MAN6_GPIO_OVER_TEMP_PARAM1_LEVEL_WARNING (0x00)
+#define MPI3_MAN6_GPIO_OVER_TEMP_PARAM1_LEVEL_CRITICAL (0x01)
+#define MPI3_MAN6_GPIO_OVER_TEMP_PARAM1_LEVEL_FATAL (0x02)
+
/**** Defines for Param1 (PHY STATE) when FunctionCode is PORT_STATUS_GREEN ****/
#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ALL_UP (0x00)
#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ONE_OR_MORE_UP (0x01)
@@ -506,9 +549,11 @@ typedef struct _MPI3_MAN6_GPIO_ENTRY
/**** Defines for the Flags field ****/
#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_MASK (0x0100)
+#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_SHIFT (8)
#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_FAST_EDGE (0x0100)
#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_SLOW_EDGE (0x0000)
#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_MASK (0x00C0)
+#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_SHIFT (6)
#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_100OHM (0x0000)
#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_66OHM (0x0040)
#define MPI3_MAN6_GPIO_FLAGS_DRIVE_STRENGTH_50OHM (0x0080)
@@ -518,6 +563,7 @@ typedef struct _MPI3_MAN6_GPIO_ENTRY
#define MPI3_MAN6_GPIO_FLAGS_ACTIVE_HIGH (0x0008)
#define MPI3_MAN6_GPIO_FLAGS_BI_DIR_ENABLED (0x0004)
#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_MASK (0x0003)
+#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_SHIFT (0)
#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_INPUT (0x0000)
#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_OPEN_DRAIN_OUTPUT (0x0001)
#define MPI3_MAN6_GPIO_FLAGS_DIRECTION_OPEN_SOURCE_OUTPUT (0x0002)
@@ -570,9 +616,11 @@ typedef struct _MPI3_MAN7_RECEPTACLE_INFO
/**** Defines for PEDClk field ****/
#define MPI3_MAN7_PEDCLK_ROUTING_MASK (0x10)
+#define MPI3_MAN7_PEDCLK_ROUTING_SHIFT (4)
#define MPI3_MAN7_PEDCLK_ROUTING_DIRECT (0x00)
#define MPI3_MAN7_PEDCLK_ROUTING_CLOCK_BUFFER (0x10)
#define MPI3_MAN7_PEDCLK_ID_MASK (0x0F)
+#define MPI3_MAN7_PEDCLK_ID_SHIFT (0)
#ifndef MPI3_MAN7_RECEPTACLE_INFO_MAX
#define MPI3_MAN7_RECEPTACLE_INFO_MAX (1)
@@ -594,6 +642,7 @@ typedef struct _MPI3_MAN_PAGE7
/**** Defines for Flags field ****/
#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_MASK (0x01)
+#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_SHIFT (0)
#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_0 (0x00)
#define MPI3_MAN7_FLAGS_BASE_ENCLOSURE_LEVEL_1 (0x01)
@@ -965,6 +1014,7 @@ typedef struct _MPI3_MAN11_BOARD_FAN_DEVICE_FORMAT
/**** Defines for the Flags field ****/
#define MPI3_MAN11_BOARD_FAN_FLAGS_FAN_CTRLR_TYPE_MASK (0x07)
+#define MPI3_MAN11_BOARD_FAN_FLAGS_FAN_CTRLR_TYPE_SHIFT (0)
#define MPI3_MAN11_BOARD_FAN_FLAGS_FAN_CTRLR_TYPE_AMC6821 (0x00)
typedef union _MPI3_MAN11_DEVICE_SPECIFIC_FORMAT
@@ -1067,13 +1117,15 @@ typedef struct _MPI3_MAN_PAGE12
#define MPI3_MAN12_FLAGS_GROUP_ID_DISABLED (0x0100)
#define MPI3_MAN12_FLAGS_SIO_CLK_FILTER_ENABLED (0x0004)
#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_MASK (0x0002)
+#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_SHIFT (1)
#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_PUSH_PULL (0x0000)
#define MPI3_MAN12_FLAGS_SCLOCK_SLOAD_TYPE_OPEN_DRAIN (0x0002)
#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_MASK (0x0001)
+#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_SHIFT (0)
#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_PUSH_PULL (0x0000)
#define MPI3_MAN12_FLAGS_SDATAOUT_TYPE_OPEN_DRAIN (0x0001)
-/**** Defines for the SioClkFreq field ****/
+/**** Defines for the SClockFreq field ****/
#define MPI3_MAN12_SIO_CLK_FREQ_MIN (32) /* 32 Hz min SIO Clk Freq */
#define MPI3_MAN12_SIO_CLK_FREQ_MAX (100000) /* 100 KHz max SIO Clk Freq */
@@ -1089,6 +1141,7 @@ typedef struct _MPI3_MAN_PAGE12
/*** Defines for the Pattern field ****/
#define MPI3_MAN12_PATTERN_RATE_MASK (0xE0000000)
+#define MPI3_MAN12_PATTERN_RATE_SHIFT (29)
#define MPI3_MAN12_PATTERN_RATE_2_HZ (0x00000000)
#define MPI3_MAN12_PATTERN_RATE_4_HZ (0x20000000)
#define MPI3_MAN12_PATTERN_RATE_8_HZ (0x40000000)
@@ -1300,14 +1353,17 @@ typedef struct _MPI3_MAN_PAGE20
/**** Defines for the AllowedPersonalities field ****/
#define MPI3_MAN20_ALLOWEDPERSON_RAID_MASK (0x02)
+#define MPI3_MAN20_ALLOWEDPERSON_RAID_SHIFT (1)
#define MPI3_MAN20_ALLOWEDPERSON_RAID_ALLOWED (0x02)
#define MPI3_MAN20_ALLOWEDPERSON_RAID_NOT_ALLOWED (0x00)
#define MPI3_MAN20_ALLOWEDPERSON_EHBA_MASK (0x01)
+#define MPI3_MAN20_ALLOWEDPERSON_EHBA_SHIFT (0)
#define MPI3_MAN20_ALLOWEDPERSON_EHBA_ALLOWED (0x01)
#define MPI3_MAN20_ALLOWEDPERSON_EHBA_NOT_ALLOWED (0x00)
-/**** Defines for the NonpremuimFeatures field ****/
+/**** Defines for the NonpremiumFeatures field ****/
#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_MASK (0x01)
+#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_SHIFT (0)
#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_ENABLED (0x00)
#define MPI3_MAN20_NONPREMUIM_DISABLE_PD_DEGRADED_DISABLED (0x01)
@@ -1328,17 +1384,37 @@ typedef struct _MPI3_MAN_PAGE21
/**** Defines for the Flags field ****/
#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_MASK (0x00000060)
+#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_SHIFT (5)
#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_BLOCK (0x00000000)
#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_ALLOW (0x00000020)
#define MPI3_MAN21_FLAGS_UNCERTIFIED_DRIVES_WARN (0x00000040)
#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_MASK (0x00000008)
+#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_SHIFT (3)
#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_ALLOW (0x00000000)
#define MPI3_MAN21_FLAGS_BLOCK_SSD_WR_CACHE_CHANGE_PREVENT (0x00000008)
#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_MASK (0x00000001)
+#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_SHIFT (0)
#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_DEFAULT (0x00000000)
#define MPI3_MAN21_FLAGS_SES_VPD_ASSOC_OEM_SPECIFIC (0x00000001)
/*****************************************************************************
+ * Manufacturing Page 22 *
+ ****************************************************************************/
+
+typedef struct _MPI3_MAN_PAGE22
+{
+ MPI3_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U32 Reserved08; /* 0x08 */
+ U16 NumEUI64; /* 0x0C */
+ U16 Reserved0E; /* 0x0E */
+ U64 BaseEUI64; /* 0x10 */
+} MPI3_MAN_PAGE22, MPI3_POINTER PTR_MPI3_MAN_PAGE22,
+ Mpi3ManPage22_t, MPI3_POINTER pMpi3ManPage22_t;
+
+/**** Defines for the PageVersion field ****/
+#define MPI3_MAN22_PAGEVERSION (0x00)
+
+/*****************************************************************************
* Manufacturing Pages 32-63 (ProductSpecific) *
****************************************************************************/
#ifndef MPI3_MAN_PROD_SPECIFIC_MAX
@@ -1390,18 +1466,21 @@ typedef struct _MPI3_IO_UNIT_PAGE1
/**** Defines for the Flags field ****/
#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_MASK (0x00000030)
+#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_SHIFT (4)
#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_ENABLE (0x00000000)
#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_DISABLE (0x00000010)
#define MPI3_IOUNIT1_FLAGS_NVME_WRITE_CACHE_NO_MODIFY (0x00000020)
#define MPI3_IOUNIT1_FLAGS_ATA_SECURITY_FREEZE_LOCK (0x00000008)
#define MPI3_IOUNIT1_FLAGS_WRITE_SAME_BUFFER (0x00000004)
#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_MASK (0x00000003)
+#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_SHIFT (0)
#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_ENABLE (0x00000000)
#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_DISABLE (0x00000001)
#define MPI3_IOUNIT1_FLAGS_SATA_WRITE_CACHE_UNCHANGED (0x00000002)
/**** Defines for the DMDReport PCIe/SATA/SAS fields ****/
#define MPI3_IOUNIT1_DMD_REPORT_DELAY_TIME_MASK (0x7F)
+#define MPI3_IOUNIT1_DMD_REPORT_DELAY_TIME_SHIFT (0)
#define MPI3_IOUNIT1_DMD_REPORT_UNIT_16_SEC (0x80)
/*****************************************************************************
@@ -1427,6 +1506,7 @@ typedef struct _MPI3_IO_UNIT_PAGE2
#define MPI3_IOUNIT2_GPIO_FUNCTION_MASK (0xFFFC)
#define MPI3_IOUNIT2_GPIO_FUNCTION_SHIFT (2)
#define MPI3_IOUNIT2_GPIO_SETTING_MASK (0x0001)
+#define MPI3_IOUNIT2_GPIO_SETTING_SHIFT (0)
#define MPI3_IOUNIT2_GPIO_SETTING_OFF (0x0000)
#define MPI3_IOUNIT2_GPIO_SETTING_ON (0x0001)
@@ -1434,13 +1514,21 @@ typedef struct _MPI3_IO_UNIT_PAGE2
* IO Unit Page 3 *
****************************************************************************/
+typedef enum _MPI3_IOUNIT3_THRESHOLD
+{
+ MPI3_IOUNIT3_THRESHOLD_WARNING = 0,
+ MPI3_IOUNIT3_THRESHOLD_CRITICAL = 1,
+ MPI3_IOUNIT3_THRESHOLD_FATAL = 2,
+ MPI3_IOUNIT3_THRESHOLD_LOW = 3,
+ MPI3_IOUNIT3_NUM_THRESHOLDS
+} MPI3_IOUNIT3_THRESHOLD;
+
typedef struct _MPI3_IO_UNIT3_SENSOR
{
U16 Flags; /* 0x00 */
U8 ThresholdMargin; /* 0x02 */
U8 Reserved03; /* 0x03 */
- U16 Threshold[3]; /* 0x04 */
- U16 Reserved0A; /* 0x0A */
+ U16 Threshold[MPI3_IOUNIT3_NUM_THRESHOLDS]; /* 0x04 */
U32 Reserved0C; /* 0x0C */
U32 Reserved10; /* 0x10 */
U32 Reserved14; /* 0x14 */
@@ -1448,6 +1536,7 @@ typedef struct _MPI3_IO_UNIT3_SENSOR
Mpi3IOUnit3Sensor_t, MPI3_POINTER pMpi3IOUnit3Sensor_t;
/**** Defines for the Flags field ****/
+#define MPI3_IOUNIT3_SENSOR_FLAGS_LOW_THRESHOLD_VALID (0x0020)
#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_EVENT_ENABLED (0x0010)
#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_ACTION_ENABLED (0x0008)
#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_EVENT_ENABLED (0x0004)
@@ -1591,8 +1680,9 @@ typedef struct _MPI3_IO_UNIT_PAGE5
#define MPI3_IOUNIT5_FLAGS_POWER_CAPABLE_SPINUP (0x02)
#define MPI3_IOUNIT5_FLAGS_AUTO_PORT_ENABLE (0x01)
-/**** Defines for the PHY field ****/
+/**** Defines for the Phy field ****/
#define MPI3_IOUNIT5_PHY_SPINUP_GROUP_MASK (0x03)
+#define MPI3_IOUNIT5_PHY_SPINUP_GROUP_SHIFT (0)
/*****************************************************************************
* IO Unit Page 6 *
@@ -1621,11 +1711,33 @@ typedef struct _MPI3_IO_UNIT_PAGE6
#define MPI3_IOUNIT8_DIGEST_MAX (1)
#endif /* MPI3_IOUNIT8_DIGEST_MAX */
-typedef union _MPI3_IOUNIT8_DIGEST
+typedef union _MPI3_IOUNIT8_RAW_DIGEST
{
U32 Dword[16];
U16 Word[32];
U8 Byte[64];
+} MPI3_IOUNIT8_RAW_DIGEST, MPI3_POINTER PTR_MPI3_IOUNIT8_RAW_DIGEST,
+ Mpi3IOUnit8RawDigest_t, MPI3_POINTER pMpi3IOUnit8RawDigest_t;
+
+typedef struct _MPI3_IOUNIT8_METADATA_DIGEST
+{
+ U8 SlotStatus; /* 0x00 */
+ U8 Reserved01[3]; /* 0x01 */
+ U32 Reserved04[3]; /* 0x04 */
+ MPI3_IOUNIT8_RAW_DIGEST DigestData; /* 0x10 */
+} MPI3_IOUNIT8_METADATA_DIGEST, MPI3_POINTER PTR_MPI3_IOUNIT8_METADATA_DIGEST,
+ Mpi3IOUnit8MetadataDigest_t, MPI3_POINTER pMpi3IOUnit8MetadataDigest_t;
+
+/**** Defines for the SlotStatus field ****/
+#define MPI3_IOUNIT8_METADATA_DIGEST_SLOTSTATUS_UNUSED (0x00)
+#define MPI3_IOUNIT8_METADATA_DIGEST_SLOTSTATUS_UPDATE_PENDING (0x01)
+#define MPI3_IOUNIT8_METADATA_DIGEST_SLOTSTATUS_VALID (0x03)
+#define MPI3_IOUNIT8_METADATA_DIGEST_SLOTSTATUS_INVALID (0x07)
+
+typedef union _MPI3_IOUNIT8_DIGEST
+{
+ MPI3_IOUNIT8_RAW_DIGEST RawDigest[MPI3_IOUNIT8_DIGEST_MAX];
+ MPI3_IOUNIT8_METADATA_DIGEST MetadataDigest[MPI3_IOUNIT8_DIGEST_MAX];
} MPI3_IOUNIT8_DIGEST, MPI3_POINTER PTR_MPI3_IOUNIT8_DIGEST,
Mpi3IOUnit8Digest_t, MPI3_POINTER pMpi3IOUnit8Digest_t;
@@ -1633,8 +1745,9 @@ typedef struct _MPI3_IO_UNIT_PAGE8
{
MPI3_CONFIG_PAGE_HEADER Header; /* 0x00 */
U8 SBMode; /* 0x08 */
- U8 SbState; /* 0x09 */
- U16 Reserved0A; /* 0x0A */
+ U8 SBState; /* 0x09 */
+ U8 Flags; /* 0x0A */
+ U8 Reserved0A; /* 0x0B */
U8 NumSlots; /* 0x0C */
U8 SlotsAvailable; /* 0x0D */
U8 CurrentKeyEncryptionAlgo; /* 0x0E */
@@ -1642,22 +1755,33 @@ typedef struct _MPI3_IO_UNIT_PAGE8
MPI3_VERSION_UNION CurrentSvn; /* 0x10 */
U32 Reserved14; /* 0x14 */
U32 CurrentKey[128]; /* 0x18 */
- MPI3_IOUNIT8_DIGEST Digest[MPI3_IOUNIT8_DIGEST_MAX]; /* 0x218 */ /* variable length */
+ MPI3_IOUNIT8_DIGEST Digest; /* 0x218 */ /* variable length */
} MPI3_IO_UNIT_PAGE8, MPI3_POINTER PTR_MPI3_IO_UNIT_PAGE8,
Mpi3IOUnitPage8_t, MPI3_POINTER pMpi3IOUnitPage8_t;
/**** Defines for the PageVersion field ****/
-#define MPI3_IOUNIT8_PAGEVERSION (0x00)
+#define MPI3_IOUNIT8_PAGEVERSION (0x00)
/**** Defines for the SBMode field ****/
-#define MPI3_IOUNIT8_SBMODE_SECURE_DEBUG (0x04)
-#define MPI3_IOUNIT8_SBMODE_HARD_SECURE (0x02)
-#define MPI3_IOUNIT8_SBMODE_CONFIG_SECURE (0x01)
+#define MPI3_IOUNIT8_SBMODE_HARD_SECURE_RECERTIFIED (0x08)
+#define MPI3_IOUNIT8_SBMODE_SECURE_DEBUG (0x04)
+#define MPI3_IOUNIT8_SBMODE_HARD_SECURE (0x02)
+#define MPI3_IOUNIT8_SBMODE_CONFIG_SECURE (0x01)
/**** Defines for the SBState field ****/
-#define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04)
-#define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02)
-#define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01)
+#define MPI3_IOUNIT8_SBSTATE_SVN_UPDATE_PENDING (0x04)
+#define MPI3_IOUNIT8_SBSTATE_KEY_UPDATE_PENDING (0x02)
+#define MPI3_IOUNIT8_SBSTATE_SECURE_BOOT_ENABLED (0x01)
+
+/**** Defines for the Flags field ****/
+#define MPI3_IOUNIT8_FLAGS_CURRENT_KEY_IOUNIT17 (0x08)
+#define MPI3_IOUNIT8_FLAGS_DIGESTFORM_MASK (0x07)
+#define MPI3_IOUNIT8_FLAGS_DIGESTFORM_SHIFT (0)
+#define MPI3_IOUNIT8_FLAGS_DIGESTFORM_RAW (0x00)
+#define MPI3_IOUNIT8_FLAGS_DIGESTFORM_DIGEST_WITH_METADATA (0x01)
+
+/**** Use MPI3_ENCRYPTION_ALGORITHM_ defines (see mpi30_image.h) for the CurrentKeyEncryptionAlgo field ****/
+/**** Use MPI3_HASH_ALGORITHM defines (see mpi30_image.h) for the KeyDigestHashAlgo field ****/
/*****************************************************************************
* IO Unit Page 9 *
@@ -1685,6 +1809,7 @@ typedef struct _MPI3_IO_UNIT_PAGE9
/**** Defines for the FirstDevice field ****/
#define MPI3_IOUNIT9_FIRSTDEVICE_UNKNOWN (0xFFFF)
+#define MPI3_IOUNIT9_FIRSTDEVICE_IN_DRIVER_PAGE_0 (0xFFFE)
/*****************************************************************************
* IO Unit Page 10 *
@@ -1710,6 +1835,7 @@ typedef struct _MPI3_IO_UNIT_PAGE10
/**** Defines for the Flags field ****/
#define MPI3_IOUNIT10_FLAGS_VALID (0x01)
#define MPI3_IOUNIT10_FLAGS_ACTIVEID_MASK (0x02)
+#define MPI3_IOUNIT10_FLAGS_ACTIVEID_SHIFT (1)
#define MPI3_IOUNIT10_FLAGS_ACTIVEID_FIRST_REGION (0x00)
#define MPI3_IOUNIT10_FLAGS_ACTIVEID_SECOND_REGION (0x02)
#define MPI3_IOUNIT10_FLAGS_PBLP_EXPECTED (0x80)
@@ -1791,6 +1917,7 @@ typedef struct _MPI3_IO_UNIT_PAGE12
#define MPI3_IOUNIT12_FLAGS_NUMPASSES_32 (0x00000200)
#define MPI3_IOUNIT12_FLAGS_NUMPASSES_64 (0x00000300)
#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_MASK (0x00000003)
+#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_SHIFT (0)
#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_DISABLED (0x00000000)
#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_500US (0x00000001)
#define MPI3_IOUNIT12_FLAGS_PASSPERIOD_1MS (0x00000002)
@@ -1901,6 +2028,7 @@ typedef struct _MPI3_IO_UNIT_PAGE15
/**** Defines for the Flags field ****/
#define MPI3_IOUNIT15_FLAGS_EPRINIT_INITREQUIRED (0x04)
#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_MASK (0x03)
+#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_SHIFT (0)
#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_NOT_SUPPORTED (0x00)
#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITHOUT_POWER_BRAKE_GPIO (0x01)
#define MPI3_IOUNIT15_FLAGS_EPRSUPPORT_WITH_POWER_BRAKE_GPIO (0x02)
@@ -1909,6 +2037,122 @@ typedef struct _MPI3_IO_UNIT_PAGE15
#define MPI3_IOUNIT15_NUMPOWERBUDGETDATA_POWER_BUDGETING_DISABLED (0x00)
/*****************************************************************************
+ * IO Unit Page 16 *
+ ****************************************************************************/
+
+#ifndef MPI3_IOUNIT16_ERROR_MAX
+#define MPI3_IOUNIT16_ERROR_MAX (1)
+#endif /* MPI3_IOUNIT16_ERROR_MAX */
+
+typedef struct _MPI3_IOUNIT16_ERROR
+{
+ U32 Offset; /* 0x00 */
+ U32 Reserved04; /* 0x04 */
+ U64 Count; /* 0x08 */
+ U64 Timestamp; /* 0x10 */
+} MPI3_IOUNIT16_ERROR, MPI3_POINTER PTR_MPI3_IOUNIT16_ERROR,
+ Mpi3IOUnit16Error_t, MPI3_POINTER pMpi3IOUnit16Error_t;
+
+typedef struct _MPI3_IO_UNIT_PAGE16
+{
+ MPI3_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U64 TotalErrorCount; /* 0x08 */
+ U32 Reserved10[3]; /* 0x10 */
+ U8 NumErrors; /* 0x1C */
+ U8 MaxErrorsTracked; /* 0x1D */
+ U16 Reserved1E; /* 0x1E */
+ MPI3_IOUNIT16_ERROR Error[MPI3_IOUNIT16_ERROR_MAX]; /* 0x20 */ /* variable length */
+} MPI3_IO_UNIT_PAGE16, MPI3_POINTER PTR_MPI3_IO_UNIT_PAGE16,
+ Mpi3IOUnitPage16_t, MPI3_POINTER pMpi3IOUnitPage16_t;
+
+/**** Defines for the PageVersion field ****/
+#define MPI3_IOUNIT16_PAGEVERSION (0x00)
+
+/*****************************************************************************
+ * IO Unit Page 17 *
+ ****************************************************************************/
+
+#ifndef MPI3_IOUNIT17_CURRENTKEY_MAX
+#define MPI3_IOUNIT17_CURRENTKEY_MAX (1)
+#endif /* MPI3_IOUNIT17_CURRENTKEY_MAX */
+
+typedef struct _MPI3_IO_UNIT_PAGE17
+{
+ MPI3_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 NumInstances; /* 0x08 */
+ U8 Instance; /* 0x09 */
+ U16 Reserved0A; /* 0x0A */
+ U32 Reserved0C[4]; /* 0x0C */
+ U16 KeyLength; /* 0x1C */
+ U8 EncryptionAlgorithm; /* 0x1E */
+ U8 Reserved1F; /* 0x1F */
+ U32 CurrentKey[MPI3_IOUNIT17_CURRENTKEY_MAX]; /* 0x20 */ /* variable length */
+} MPI3_IO_UNIT_PAGE17, MPI3_POINTER PTR_MPI3_IO_UNIT_PAGE17,
+ Mpi3IOUnitPage17_t, MPI3_POINTER pMpi3IOUnitPage17_t;
+
+/**** Defines for the PageVersion field ****/
+#define MPI3_IOUNIT17_PAGEVERSION (0x00)
+
+/**** Use MPI3_ENCRYPTION_ALGORITHM_ defines (see mpi30_image.h) for the EncryptionAlgorithm field ****/
+
+/*****************************************************************************
+ * IO Unit Page 18 *
+ ****************************************************************************/
+
+typedef struct _MPI3_IO_UNIT_PAGE18
+{
+ MPI3_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U8 Flags; /* 0x08 */
+ U8 PollInterval; /* 0x09 */
+ U16 Reserved0A; /* 0x0A */
+ U32 Reserved0C; /* 0x0C */
+} MPI3_IO_UNIT_PAGE18, MPI3_POINTER PTR_MPI3_IO_UNIT_PAGE18,
+ Mpi3IOUnitPage18_t, MPI3_POINTER pMpi3IOUnitPage18_t;
+
+/**** Defines for the PageVersion field ****/
+#define MPI3_IOUNIT18_PAGEVERSION (0x00)
+
+/**** Defines for the Flags field ****/
+#define MPI3_IOUNIT18_FLAGS_DIRECTATTACHED_ENABLE (0x01)
+
+/**** Defines for the PollInterval field ****/
+#define MPI3_IOUNIT18_POLLINTERVAL_DISABLE (0x00)
+
+/*****************************************************************************
+ * IO Unit Page 19 *
+ ****************************************************************************/
+
+#ifndef MPI3_IOUNIT19_DEVICE_MAX
+#define MPI3_IOUNIT19_DEVICE_MAX (1)
+#endif /* MPI3_IOUNIT19_DEVICE_MAX */
+
+typedef struct _MPI3_IOUNIT19_DEVICE_
+{
+ U16 Temperature; /* 0x00 */
+ U16 DevHandle; /* 0x02 */
+ U16 PersistentID; /* 0x04 */
+ U16 Reserved06; /* 0x06 */
+} MPI3_IOUNIT19_DEVICE, MPI3_POINTER PTR_MPI3_IOUNIT19_DEVICE,
+ Mpi3IOUnit19Device_t, MPI3_POINTER pMpi3IOUnit19Device_t;
+
+/**** Defines for the Temperature field ****/
+#define MPI3_IOUNIT19_DEVICE_TEMPERATURE_UNAVAILABLE (0x8000)
+
+typedef struct _MPI3_IO_UNIT_PAGE19
+{
+ MPI3_CONFIG_PAGE_HEADER Header; /* 0x00 */
+ U16 NumDevices; /* 0x08 */
+ U16 Reserved0A; /* 0x0A */
+ U32 Reserved0C; /* 0x0C */
+ MPI3_IOUNIT19_DEVICE Device[MPI3_IOUNIT19_DEVICE_MAX]; /* 0x10 */
+} MPI3_IO_UNIT_PAGE19, MPI3_POINTER PTR_MPI3_IO_UNIT_PAGE19,
+ Mpi3IOUnitPage19_t, MPI3_POINTER pMpi3IOUnitPage19_t;
+
+/**** Defines for the PageVersion field ****/
+#define MPI3_IOUNIT19_PAGEVERSION (0x00)
+
+
+/*****************************************************************************
* IOC Configuration Pages *
****************************************************************************/
@@ -1973,7 +2217,8 @@ typedef struct _MPI3_IOC_PAGE2
* Driver Configuration Pages *
****************************************************************************/
-/**** Defines for the Flags field ****/
+/**** Defines for the Flags field in Driver Pages 10, 20, and 30 ****/
+/**** NOT used in Driver Page 1 Flags field ****/
#define MPI3_DRIVER_FLAGS_ADMINRAIDPD_BLOCKED (0x0010)
#define MPI3_DRIVER_FLAGS_OOBRAIDPD_BLOCKED (0x0008)
#define MPI3_DRIVER_FLAGS_OOBRAIDVD_BLOCKED (0x0004)
@@ -2006,11 +2251,13 @@ typedef struct _MPI3_ALLOWED_CMD_NVME
} MPI3_ALLOWED_CMD_NVME, MPI3_POINTER PTR_MPI3_ALLOWED_CMD_NVME,
Mpi3AllowedCmdNvme_t, MPI3_POINTER pMpi3AllowedCmdNvme_t;
-/**** Defines for the CommandFlags field ****/
+/**** Defines for the NVMeCmdFlags field ****/
#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_MASK (0x80)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_SHIFT (7)
#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_IO (0x00)
#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_ADMIN (0x80)
#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_MASK (0x3F)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_SHIFT (0)
#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_NVM (0x00)
typedef union _MPI3_ALLOWED_CMD
@@ -2047,7 +2294,7 @@ typedef struct _MPI3_DRIVER_PAGE0
U8 TURInterval; /* 0x0F */
U8 Reserved10; /* 0x10 */
U8 SecurityKeyTimeout; /* 0x11 */
- U16 Reserved12; /* 0x12 */
+ U16 FirstDevice; /* 0x12 */
U32 Reserved14; /* 0x14 */
U32 Reserved18; /* 0x18 */
} MPI3_DRIVER_PAGE0, MPI3_POINTER PTR_MPI3_DRIVER_PAGE0,
@@ -2057,13 +2304,20 @@ typedef struct _MPI3_DRIVER_PAGE0
#define MPI3_DRIVER0_PAGEVERSION (0x00)
/**** Defines for the BSDOptions field ****/
+#define MPI3_DRIVER0_BSDOPTS_DEVICEEXPOSURE_DISABLE (0x00000020)
+#define MPI3_DRIVER0_BSDOPTS_WRITECACHE_DISABLE (0x00000010)
#define MPI3_DRIVER0_BSDOPTS_HEADLESS_MODE_ENABLE (0x00000008)
#define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_SHIFT (0)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_INTERNAL_DEVS (0x00000002)
+/**** Defines for the FirstDevice field ****/
+#define MPI3_DRIVER0_FIRSTDEVICE_IGNORE1 (0x0000)
+#define MPI3_DRIVER0_FIRSTDEVICE_IGNORE2 (0xFFFF)
+
/*****************************************************************************
* Driver Page 1 *
****************************************************************************/
@@ -2071,7 +2325,8 @@ typedef struct _MPI3_DRIVER_PAGE1
{
MPI3_CONFIG_PAGE_HEADER Header; /* 0x00 */
U32 Flags; /* 0x08 */
- U32 Reserved0C; /* 0x0C */
+ U8 TimeStampUpdate; /* 0x0C */
+ U8 Reserved0D[3]; /* 0x0D */
U16 HostDiagTraceMaxSize; /* 0x10 */
U16 HostDiagTraceMinSize; /* 0x12 */
U16 HostDiagTraceDecrementSize; /* 0x14 */
@@ -2263,14 +2518,6 @@ typedef union _MPI3_SECURITY_NONCE
} MPI3_SECURITY_NONCE, MPI3_POINTER PTR_MPI3_SECURITY_NONCE,
Mpi3SecurityNonce_t, MPI3_POINTER pMpi3SecurityNonce_t;
-typedef union _MPI3_SECURITY_ROOT_DIGEST
-{
- U32 Dword[16];
- U16 Word[32];
- U8 Byte[64];
-} MPI3_SECURITY_ROOT_DIGEST, MPI3_POINTER PTR_MPI3_SECURITY_ROOT_DIGEST,
- Mpi3SecurityRootDigest_t, MPI3_POINTER pMpi3SecurityRootDigest_t;
-
/*****************************************************************************
* Security Page 0 *
****************************************************************************/
@@ -2305,6 +2552,7 @@ typedef struct _MPI3_SECURITY_PAGE0
/**** Defines for the CertChainFlags field ****/
#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_MASK (0x0E)
+#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_SHIFT (1)
#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_UNUSED (0x00)
#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_CERBERUS (0x02)
#define MPI3_SECURITY0_CERTCHAIN_FLAGS_AUTH_API_SPDM (0x04)
@@ -2343,6 +2591,7 @@ typedef struct _MPI3_SECURITY1_KEY_RECORD
/**** Defines for the Flags field ****/
#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_MASK (0x1F)
+#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_SHIFT (0)
#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_NOT_VALID (0x00)
#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_HMAC (0x01)
#define MPI3_SECURITY1_KEY_RECORD_FLAGS_TYPE_AES (0x02)
@@ -2382,17 +2631,25 @@ typedef struct _MPI3_SECURITY_PAGE1
#define MPI3_SECURITY2_TRUSTED_ROOT_MAX 1
#endif /* MPI3_SECURITY2_TRUSTED_ROOT_MAX */
+#ifndef MPI3_SECURITY2_ROOT_LEN
+#define MPI3_SECURITY2_ROOT_LEN 4
+#endif /* MPI3_SECURITY2_ROOT_LEN */
+
typedef struct _MPI3_SECURITY2_TRUSTED_ROOT
{
U8 Level; /* 0x00 */
U8 HashAlgorithm; /* 0x01 */
U16 TrustedRootFlags; /* 0x02 */
U32 Reserved04[3]; /* 0x04 */
- MPI3_SECURITY_ROOT_DIGEST RootDigest; /* 0x10 */
+ U8 Root[MPI3_SECURITY2_ROOT_LEN]; /* 0x10 */ /* variable length */
} MPI3_SECURITY2_TRUSTED_ROOT, MPI3_POINTER PTR_MPI3_SECURITY2_TRUSTED_ROOT,
Mpi3Security2TrustedRoot_t, MPI3_POINTER pMpi3Security2TrustedRoot_t;
/**** Defines for the TrustedRootFlags field ****/
+#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_ROOTFORM_MASK (0xF000)
+#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_ROOTFORM_SHIFT (12)
+#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_ROOTFORM_DIGEST (0x0000)
+#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_ROOTFORM_DERCERT (0x1000)
#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_HASHALGOSOURCE_MASK (0x0006)
#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_HASHALGOSOURCE_SHIFT (1)
#define MPI3_SECURITY2_TRUSTEDROOT_TRUSTEDROOTFLAGS_HASHALGOSOURCE_HA_FIELD (0x0000)
@@ -2407,7 +2664,8 @@ typedef struct _MPI3_SECURITY_PAGE2
MPI3_SECURITY_NONCE Nonce; /* 0x50 */
U32 Reserved90[3]; /* 0x90 */
U8 NumRoots; /* 0x9C */
- U8 Reserved9D[3]; /* 0x9D */
+ U8 Reserved9D; /* 0x9D */
+ U16 RootElementSize; /* 0x9E */
MPI3_SECURITY2_TRUSTED_ROOT TrustedRoot[MPI3_SECURITY2_TRUSTED_ROOT_MAX]; /* 0xA0 */ /* variable length */
} MPI3_SECURITY_PAGE2, MPI3_POINTER PTR_MPI3_SECURITY_PAGE2,
Mpi3SecurityPage2_t, MPI3_POINTER pMpi3SecurityPage2_t;
@@ -2469,6 +2727,7 @@ typedef struct _MPI3_SAS_IO_UNIT_PAGE0
/**** Defines for the PortFlags field ****/
#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08)
#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_MASK (0x03)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_SHIFT (0)
#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_IOUNIT1 (0x00)
#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_DYNAMIC (0x01)
#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_BACKPLANE (0x02)
@@ -2533,6 +2792,7 @@ typedef struct _MPI3_SAS_IO_UNIT_PAGE1
#define MPI3_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004)
#define MPI3_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002)
#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_MASK (0x0001)
+#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_SHIFT (0)
#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_DEVICE_NAME (0x0000)
#define MPI3_SASIOUNIT1_CONTROL_HARD_RESET_SAS_ADDRESS (0x0001)
@@ -2562,6 +2822,7 @@ typedef struct _MPI3_SAS_IO_UNIT_PAGE1
#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_12_0 (0xB0)
#define MPI3_SASIOUNIT1_MMLR_MAX_RATE_22_5 (0xC0)
#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_MASK (0x0F)
+#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_SHIFT (0)
#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_6_0 (0x0A)
#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_12_0 (0x0B)
#define MPI3_SASIOUNIT1_MMLR_MIN_RATE_22_5 (0x0C)
@@ -3052,10 +3313,11 @@ typedef struct _MPI3_SAS_PHY_PAGE4
* Common definitions used by PCIe Configuration Pages *
****************************************************************************/
-/**** Defines for Negotiated Link Rates ****/
+/**** Defines for NegotiatedLinkRates ****/
#define MPI3_PCIE_LINK_RETIMERS_MASK (0x30)
#define MPI3_PCIE_LINK_RETIMERS_SHIFT (4)
#define MPI3_PCIE_NEG_LINK_RATE_MASK (0x0F)
+#define MPI3_PCIE_NEG_LINK_RATE_SHIFT (0)
#define MPI3_PCIE_NEG_LINK_RATE_UNKNOWN (0x00)
#define MPI3_PCIE_NEG_LINK_RATE_PHY_DISABLED (0x01)
#define MPI3_PCIE_NEG_LINK_RATE_2_5 (0x02)
@@ -3099,6 +3361,7 @@ typedef struct _MPI3_PCIE_IO_UNIT0_PHY_DATA
/**** Defines for the LinkFlags field ****/
#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_MASK (0x10)
+#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_SHIFT (4)
#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_IOUNIT1 (0x00)
#define MPI3_PCIEIOUNIT0_LINKFLAGS_CONFIG_SOURCE_BKPLANE (0x10)
#define MPI3_PCIEIOUNIT0_LINKFLAGS_ENUM_IN_PROGRESS (0x08)
@@ -3175,6 +3438,7 @@ typedef struct _MPI3_PCIE_IO_UNIT1_PHY_DATA
/**** Defines for the LinkFlags field ****/
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_MASK (0x03)
+#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_SHIFT (0)
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_DIS_SEPARATE_REFCLK (0x00)
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRIS (0x01)
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRNS (0x02)
@@ -3213,14 +3477,16 @@ typedef struct _MPI3_PCIE_IO_UNIT_PAGE1
/**** Defines for the ControlFlags field ****/
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_MASK (0xE0000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_SHIFT (29)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_NONE (0x00000000)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_DEASSERT (0x20000000)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_ASSERT (0x40000000)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PERST_OVERRIDE_BACKPLANE_ERROR (0x60000000)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_MASK (0x1C000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_SHIFT (26)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_NONE (0x00000000)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_DEASSERT (0x04000000)
-#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_ASSERT (0x08000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_ENABLE (0x04000000)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_DISABLE (0x08000000)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_REFCLK_OVERRIDE_BACKPLANE_ERROR (0x0C000000)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_PARTIAL_CAPACITY_ENABLE (0x00000100)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_OVERRIDE_DISABLE (0x00000080)
@@ -3231,6 +3497,7 @@ typedef struct _MPI3_PCIE_IO_UNIT_PAGE1
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_ENABLED (0x00000010)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRNS_ENABLED (0x00000020)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MASK (0x0000000F)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_SHIFT (0)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_USE_BACKPLANE (0x00000000)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_2_5 (0x00000002)
#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_5_0 (0x00000003)
@@ -3362,12 +3629,14 @@ typedef struct _MPI3_PCIE_SWITCH_PAGE1
/**** Defines for the PageVersion field ****/
#define MPI3_PCIESWITCH1_PAGEVERSION (0x00)
-/**** Defines for the FLAGS field ****/
+/**** Defines for the Flags field ****/
#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_MASK (0x0C)
#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_SHIFT (2)
+
/*** use MPI3_PCIE_ASPM_ENABLE_ defines for ASPMState field values ***/
#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_MASK (0x03)
#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_SHIFT (0)
+
/*** use MPI3_PCIE_ASPM_SUPPORT_ defines for ASPMSupport field values ***/
/**** Defines for the NegotiatedLinkRate field - use MPI3_PCIE_NEG_LINK_RATE_ defines ****/
@@ -3454,6 +3723,8 @@ typedef struct _MPI3_ENCLOSURE_PAGE0
U16 SEPDevHandle; /* 0x1A */
U8 ChassisSlot; /* 0x1C */
U8 Reserved1D[3]; /* 0x1D */
+ U32 ReceptacleIDs; /* 0x20 */
+ U32 Reserved24; /* 0x24 */
} MPI3_ENCLOSURE_PAGE0, MPI3_POINTER PTR_MPI3_ENCLOSURE_PAGE0,
Mpi3EnclosurePage0_t, MPI3_POINTER pMpi3EnclosurePage0_t;
@@ -3462,19 +3733,23 @@ typedef struct _MPI3_ENCLOSURE_PAGE0
/**** Defines for the Flags field ****/
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_MASK (0xC000)
+#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_SHIFT (0xC000)
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_VIRTUAL (0x0000)
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_SAS (0x4000)
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_PCIE (0x8000)
#define MPI3_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK (0x0010)
+#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_SHIFT (4)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_NOT_FOUND (0x0000)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT (0x0010)
#define MPI3_ENCLS0_FLAGS_MNG_MASK (0x000F)
+#define MPI3_ENCLS0_FLAGS_MNG_SHIFT (0)
#define MPI3_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
#define MPI3_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
#define MPI3_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0002)
-/**** Defines for the PhysicalPort field - use MPI3_DEVICE0_PHYPORT_ defines ****/
+/**** Defines for the ReceptacleIDs field ****/
+#define MPI3_ENCLS0_RECEPTACLEIDS_NOT_REPORTED (0x00000000)
/*****************************************************************************
* Device Configuration Pages *
@@ -3550,6 +3825,7 @@ typedef struct _MPI3_DEVICE0_PCIE_FORMAT
/**** Defines for DeviceInfo bitfield ****/
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0007)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SHIFT (0)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NO_DEVICE (0x0000)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE (0x0001)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SWITCH_DEVICE (0x0002)
@@ -3577,11 +3853,13 @@ typedef struct _MPI3_DEVICE0_PCIE_FORMAT
#define MPI3_DEVICE0_PCIE_CAP_ASPM_SHIFT (6)
/*** use MPI3_PCIE_ASPM_SUPPORT_ defines for ASPM field values ***/
-/**** Defines for the RecoverMethod field ****/
+/**** Defines for the RecoveryInfo field ****/
#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_MASK (0xE0)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_SHIFT (5)
#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_NS_MGMT (0x00)
#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_FORMAT (0x20)
#define MPI3_DEVICE0_PCIE_RECOVER_REASON_MASK (0x1F)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_SHIFT (0)
#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NS (0x00)
#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NSID_1 (0x01)
#define MPI3_DEVICE0_PCIE_RECOVER_REASON_TOO_MANY_NS (0x02)
@@ -3628,6 +3906,11 @@ typedef struct _MPI3_DEVICE0_VD_FORMAT
/**** Defines for the Flags field ****/
#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_MASK (0xF000)
#define MPI3_DEVICE0_VD_FLAGS_IO_THROTTLE_GROUP_QD_SHIFT (12)
+#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_MASK (0x0003)
+#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_SHIFT (0)
+#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_HDD (0x0000)
+#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_SSD (0x0001)
+#define MPI3_DEVICE0_VD_FLAGS_OSEXPOSURE_NO_GUIDANCE (0x0002)
typedef union _MPI3_DEVICE0_DEV_SPEC_FORMAT
{
@@ -3705,6 +3988,7 @@ typedef struct _MPI3_DEVICE_PAGE0
#define MPI3_DEVICE0_ASTATUS_SIF_UDMA_SN (0x27)
#define MPI3_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x28)
#define MPI3_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x29)
+#define MPI3_DEVICE0_ASTATUS_SIF_DEVICE_FAULT (0x2A)
#define MPI3_DEVICE0_ASTATUS_SIF_MAX (0x2F)
/* PCIe Access Status Codes */
#define MPI3_DEVICE0_ASTATUS_PCIE_UNKNOWN (0x30)
@@ -3740,6 +4024,7 @@ typedef struct _MPI3_DEVICE_PAGE0
/**** Defines for the Flags field ****/
#define MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK (0xE000)
+#define MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_SHIFT (13)
#define MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT (0x0000)
#define MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB (0x2000)
#define MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB (0x4000)
@@ -3773,7 +4058,8 @@ typedef struct _MPI3_DEVICE1_PCIE_FORMAT
U16 DeviceID; /* 0x02 */
U16 SubsystemVendorID; /* 0x04 */
U16 SubsystemID; /* 0x06 */
- U32 Reserved08; /* 0x08 */
+ U16 ReadyTimeout; /* 0x08 */
+ U16 Reserved0A; /* 0x0A */
U8 RevisionID; /* 0x0C */
U8 Reserved0D; /* 0x0D */
U16 PCIParameters; /* 0x0E */
diff --git a/sys/dev/mpi3mr/mpi/mpi30_image.h b/sys/dev/mpi3mr/mpi/mpi30_image.h
index 7b953cb3b1a6..73451d80fe58 100644
--- a/sys/dev/mpi3mr/mpi/mpi30_image.h
+++ b/sys/dev/mpi3mr/mpi/mpi30_image.h
@@ -1,7 +1,7 @@
/*
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2016-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -119,13 +119,23 @@ typedef struct _MPI3_COMPONENT_IMAGE_HEADER
#define MPI3_IMAGE_HEADER_SIGNATURE1_RMC (0x20434D52) /* string "RMC " */
#define MPI3_IMAGE_HEADER_SIGNATURE1_SMM (0x204D4D53) /* string "SMM " */
#define MPI3_IMAGE_HEADER_SIGNATURE1_PSW (0x20575350) /* string "PSW " */
-
+#define MPI3_IMAGE_HEADER_SIGNATURE1_CSW (0x20575343) /* string "CSW " */
/**** Definitions for Signature2 field ****/
#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
/**** Definitions for Flags field ****/
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_MASK (0x00000300)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_SHIFT (8)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_UNSPECIFIED (0x00000000)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_NOT_SIGNED (0x00000100)
+#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_UEFI_MICROSOFT_SIGNED (0x00000200)
+#define MPI3_IMAGE_HEADER_FLAGS_CERT_CHAIN_FORMAT_MASK (0x000000C0)
+#define MPI3_IMAGE_HEADER_FLAGS_CERT_CHAIN_FORMAT_SHIFT (6)
+#define MPI3_IMAGE_HEADER_FLAGS_CERT_CHAIN_FORMAT_DEVICE_CERT (0x00000000)
+#define MPI3_IMAGE_HEADER_FLAGS_CERT_CHAIN_FORMAT_ALIAS_CERT (0x00000040)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
+#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_SHIFT (4)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_DI (0x00000010)
#define MPI3_IMAGE_HEADER_FLAGS_SIGNED_NVDATA (0x00000008)
@@ -216,12 +226,14 @@ typedef struct _MPI3_CI_MANIFEST_MPI
/* defines for the ReleaseLevel field */
#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_DEV (0x00)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_PRE_PRODUCTION (0x08)
#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_PREALPHA (0x10)
#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_ALPHA (0x20)
#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_BETA (0x30)
#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_RC (0x40)
#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_GCA (0x50)
#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_POINT (0x60)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_DIAG (0xF0)
/* defines for the Flags field */
#define MPI3_CI_MANIFEST_MPI_FLAGS_DIAG_AUTHORIZATION (0x01)
@@ -314,9 +326,9 @@ typedef struct _MPI3_SUPPORTED_DEVICES_DATA
} MPI3_SUPPORTED_DEVICES_DATA, MPI3_POINTER PTR_MPI3_SUPPORTED_DEVICES_DATA,
Mpi3SupportedDevicesData_t, MPI3_POINTER pMpi3SupportedDevicesData_t;
-#ifndef MPI3_ENCRYPTED_HASH_MAX
-#define MPI3_ENCRYPTED_HASH_MAX (1)
-#endif /* MPI3_ENCRYPTED_HASH_MAX */
+#ifndef MPI3_PUBLIC_KEY_MAX
+#define MPI3_PUBLIC_KEY_MAX (1)
+#endif /* MPI3_PUBLIC_KEY_MAX */
/* Encrypted Hash Entry Format */
typedef struct _MPI3_ENCRYPTED_HASH_ENTRY
@@ -324,24 +336,30 @@ typedef struct _MPI3_ENCRYPTED_HASH_ENTRY
U8 HashImageType; /* 0x00 */
U8 HashAlgorithm; /* 0x01 */
U8 EncryptionAlgorithm; /* 0x02 */
- U8 Reserved03; /* 0x03 */
- U32 Reserved04; /* 0x04 */
- U32 EncryptedHash[MPI3_ENCRYPTED_HASH_MAX]; /* 0x08 */ /* variable length */
+ U8 Flags; /* 0x03 */
+ U16 PublicKeySize; /* 0x04 */
+ U16 SignatureSize; /* 0x06 */
+ U32 PublicKey[MPI3_PUBLIC_KEY_MAX]; /* 0x08 */ /* variable length */
+ /* Signature - offset of this field must be calculated */ /* variable length */
} MPI3_ENCRYPTED_HASH_ENTRY, MPI3_POINTER PTR_MPI3_ENCRYPTED_HASH_ENTRY,
Mpi3EncryptedHashEntry_t, MPI3_POINTER pMpi3EncryptedHashEntry_t;
/* defines for the HashImageType field */
-#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_SIGNATURE (0x03)
+#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH (0x03)
+#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_1_OF_2 (0x04)
+#define MPI3_HASH_IMAGE_TYPE_KEY_WITH_HASH_2_OF_2 (0x05)
/* defines for the HashAlgorithm field */
#define MPI3_HASH_ALGORITHM_VERSION_MASK (0xE0)
+#define MPI3_HASH_ALGORITHM_VERSION_SHIFT (5)
#define MPI3_HASH_ALGORITHM_VERSION_NONE (0x00)
#define MPI3_HASH_ALGORITHM_VERSION_SHA1 (0x20) /* Obsolete */
#define MPI3_HASH_ALGORITHM_VERSION_SHA2 (0x40)
#define MPI3_HASH_ALGORITHM_VERSION_SHA3 (0x60)
#define MPI3_HASH_ALGORITHM_SIZE_MASK (0x1F)
+#define MPI3_HASH_ALGORITHM_SIZE_SHIFT (0)
#define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00)
#define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01)
#define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02)
@@ -358,24 +376,15 @@ typedef struct _MPI3_ENCRYPTED_HASH_ENTRY
#define MPI3_ENCRYPTION_ALGORITHM_ECDSA_P256 (0x07) /* NIST secp256r1 curve */
#define MPI3_ENCRYPTION_ALGORITHM_ECDSA_P384 (0x08) /* NIST secp384r1 curve */
#define MPI3_ENCRYPTION_ALGORITHM_ECDSA_P521 (0x09) /* NIST secp521r1 curve */
+#define MPI3_ENCRYPTION_ALGORITHM_LMS_HSS (0x0A) /* Leighton-Micali Signature (LMS) */
+ /* Hierarchical Signature System (HSS) */
+#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_87 (0x0B) /* Module-Lattice-Based Sig Algo - Category 5 */
+#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_65 (0x0C) /* Module-Lattice-Based Sig Algo - Category 3 */
+#define MPI3_ENCRYPTION_ALGORITHM_ML_DSA_44 (0x0D) /* Module-Lattice-Based Sig Algo - Category 2 */
-
-#ifndef MPI3_PUBLIC_KEY_MAX
-#define MPI3_PUBLIC_KEY_MAX (1)
-#endif /* MPI3_PUBLIC_KEY_MAX */
-
-/* Encrypted Key with Hash Entry Format */
-typedef struct _MPI3_ENCRYPTED_KEY_WITH_HASH_ENTRY
-{
- U8 HashImageType; /* 0x00 */
- U8 HashAlgorithm; /* 0x01 */
- U8 EncryptionAlgorithm; /* 0x02 */
- U8 Reserved03; /* 0x03 */
- U32 Reserved04; /* 0x04 */
- U32 PublicKey[MPI3_PUBLIC_KEY_MAX]; /* 0x08 */ /* variable length */
- /* EncryptedHash - offset of this field must be calculated */ /* variable length */
-} MPI3_ENCRYPTED_KEY_WITH_HASH_ENTRY, MPI3_POINTER PTR_MPI3_ENCRYPTED_KEY_WITH_HASH_ENTRY,
- Mpi3EncryptedKeyWithHashEntry_t, MPI3_POINTER pMpi3EncryptedKeyWithHashEntry_t;
+/* defines for the Flags field */
+#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_MASK (0x0F)
+#define MPI3_ENCRYPTED_HASH_ENTRY_FLAGS_PAIRED_KEY_SHIFT (0)
#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
#define MPI3_ENCRYPTED_HASH_ENTRY_MAX (1)
diff --git a/sys/dev/mpi3mr/mpi/mpi30_init.h b/sys/dev/mpi3mr/mpi/mpi30_init.h
index c0ba14909ac1..c24725972162 100644
--- a/sys/dev/mpi3mr/mpi/mpi30_init.h
+++ b/sys/dev/mpi3mr/mpi/mpi30_init.h
@@ -1,7 +1,7 @@
/*
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2016-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -78,7 +78,7 @@ typedef struct _MPI3_SCSI_IO_REQUEST
U16 ChangeCount; /* 0x08 */
U16 DevHandle; /* 0x0A */
U32 Flags; /* 0x0C */
- U32 SkipCount; /* 0x10 */
+ U32 IOCUseOnly10; /* 0x10 */
U32 DataLength; /* 0x14 */
U8 LUN[8]; /* 0x18 */
MPI3_SCSI_IO_CDB_UNION CDB; /* 0x20 */
@@ -91,11 +91,16 @@ typedef struct _MPI3_SCSI_IO_REQUEST
#define MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE (0x40)
/**** Defines for the Flags field ****/
-#define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000)
+#define MPI3_SCSIIO_FLAGS_LARGE_CDB_MASK (0x60000000)
+#define MPI3_SCSIIO_FLAGS_LARGE_CDB_SHIFT (29)
#define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000)
#define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000)
#define MPI3_SCSIIO_FLAGS_CDB_IN_SEPARATE_BUFFER (0x40000000)
+#define MPI3_SCSIIO_FLAGS_CDB_PRODUCT_SPECIFIC (0x60000000)
+#define MPI3_SCSIIO_FLAGS_IOC_USE_ONLY_27_MASK (0x18000000)
+#define MPI3_SCSIIO_FLAGS_IOC_USE_ONLY_27_SHIFT (27)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_MASK (0x07000000)
+#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SHIFT (24)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_SIMPLEQ (0x00000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_HEADOFQ (0x01000000)
#define MPI3_SCSIIO_FLAGS_TASKATTRIBUTE_ORDEREDQ (0x02000000)
@@ -103,12 +108,15 @@ typedef struct _MPI3_SCSI_IO_REQUEST
#define MPI3_SCSIIO_FLAGS_CMDPRI_MASK (0x00F00000)
#define MPI3_SCSIIO_FLAGS_CMDPRI_SHIFT (20)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_MASK (0x000C0000)
+#define MPI3_SCSIIO_FLAGS_DATADIRECTION_SHIFT (18)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_NO_DATA_TRANSFER (0x00000000)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_WRITE (0x00040000)
#define MPI3_SCSIIO_FLAGS_DATADIRECTION_READ (0x00080000)
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_MASK (0x00030000)
+#define MPI3_SCSIIO_FLAGS_DMAOPERATION_SHIFT (16)
#define MPI3_SCSIIO_FLAGS_DMAOPERATION_HOST_PI (0x00010000)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_MASK (0x000000F0)
+#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_SHIFT (4)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_IO_THROTTLING (0x00000010)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE (0x00000020)
#define MPI3_SCSIIO_FLAGS_DIVERT_REASON_PROD_SPECIFIC (0x00000080)
@@ -166,6 +174,7 @@ typedef struct _MPI3_SCSI_IO_REPLY
/**** Defines for the SCSIState field ****/
#define MPI3_SCSI_STATE_SENSE_MASK (0x03)
+#define MPI3_SCSI_STATE_SENSE_SHIFT (0)
#define MPI3_SCSI_STATE_SENSE_VALID (0x00)
#define MPI3_SCSI_STATE_SENSE_FAILED (0x01)
#define MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY (0x02)
diff --git a/sys/dev/mpi3mr/mpi/mpi30_ioc.h b/sys/dev/mpi3mr/mpi/mpi30_ioc.h
index 77f6be5b2694..dc7b478536c3 100644
--- a/sys/dev/mpi3mr/mpi/mpi30_ioc.h
+++ b/sys/dev/mpi3mr/mpi/mpi30_ioc.h
@@ -1,7 +1,7 @@
/*
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2016-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -76,17 +76,20 @@ typedef struct _MPI3_IOC_INIT_REQUEST
Mpi3IOCInitRequest_t, MPI3_POINTER pMpi3IOCInitRequest_t;
/**** Defines for the MsgFlags field ****/
-#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03)
-#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00)
-#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01)
-#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02)
-#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_BOTH (0x03)
+#define MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED (0x08)
+#define MPI3_IOCINIT_MSGFLAGS_SCSIIOSTATUSREPLY_SUPPORTED (0x04)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SHIFT (0)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_BOTH (0x03)
/**** Defines for the WhoInit field ****/
-#define MPI3_WHOINIT_NOT_INITIALIZED (0x00)
-#define MPI3_WHOINIT_ROM_BIOS (0x02)
-#define MPI3_WHOINIT_HOST_DRIVER (0x03)
-#define MPI3_WHOINIT_MANUFACTURER (0x04)
+#define MPI3_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI3_WHOINIT_ROM_BIOS (0x02)
+#define MPI3_WHOINIT_HOST_DRIVER (0x03)
+#define MPI3_WHOINIT_MANUFACTURER (0x04)
/**** Defines for the DriverInformationAddress field */
typedef struct _MPI3_DRIVER_INFO_LAYOUT
@@ -102,6 +105,13 @@ typedef struct _MPI3_DRIVER_INFO_LAYOUT
} MPI3_DRIVER_INFO_LAYOUT, MPI3_POINTER PTR_MPI3_DRIVER_INFO_LAYOUT,
Mpi3DriverInfoLayout_t, MPI3_POINTER pMpi3DriverInfoLayout_t;
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_MASK (0x00000003)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_SHIFT (0)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_GUIDANCE (0x00000000)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_SPECIAL (0x00000001)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_HDD (0x00000002)
+#define MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_REPORT_AS_SSD (0x00000003)
+
/*****************************************************************************
* IOCFacts Request Message *
****************************************************************************/
@@ -173,23 +183,27 @@ typedef struct _MPI3_IOC_FACTS_DATA
U16 MaxIOThrottleGroup; /* 0x62 */
U16 IOThrottleLow; /* 0x64 */
U16 IOThrottleHigh; /* 0x66 */
+ U32 DiagFdlSize; /* 0x68 */
+ U32 DiagTtySize; /* 0x6C */
} MPI3_IOC_FACTS_DATA, MPI3_POINTER PTR_MPI3_IOC_FACTS_DATA,
Mpi3IOCFactsData_t, MPI3_POINTER pMpi3IOCFactsData_t;
/**** Defines for the IOCCapabilities field ****/
#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000)
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_SHIFT (31)
#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000)
#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x80000000)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_MASK (0x00000600)
+#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_SHIFT (9)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_FIXED_THRESHOLD (0x00000000)
#define MPI3_IOCFACTS_CAPABILITY_INT_COALESCE_OUTSTANDING_IO (0x00000200)
-#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_CAPABLE (0x00000100)
-#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_ENABLED (0x00000080)
-#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_ENABLED (0x00000040)
-#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_DRIVER_ENABLED (0x00000020)
-#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD_ENABLED (0x00000010)
-#define MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE (0x00000008)
-#define MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED (0x00000002)
+#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_SUPPORTED (0x00000100)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_SUPPORTED (0x00000080)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_SUPPORTED (0x00000040)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_DRIVER_SUPPORTED (0x00000020)
+#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD_SUPPORTED (0x00000010)
+#define MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED (0x00000008)
+#define MPI3_IOCFACTS_CAPABILITY_MULTIPATH_SUPPORTED (0x00000002)
#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_SUPPORTED (0x00000001)
/**** WhoInit values are defined under IOCInit Request Message definition ****/
@@ -207,6 +221,7 @@ typedef struct _MPI3_IOC_FACTS_DATA
#define MPI3_IOCFACTS_EXCEPT_SAS_DISABLED (0x1000)
#define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_SHIFT (8)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_MGMT (0x0100)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_MGMT (0x0200)
@@ -219,7 +234,10 @@ typedef struct _MPI3_IOC_FACTS_DATA
#define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020)
#define MPI3_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0010)
#define MPI3_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0008)
+#define MPI3_IOCFACTS_EXCEPT_BLOCKING_BOOT_EVENT (0x0004)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_SELFTEST_FAILURE (0x0002)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x0001)
+#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SHIFT (0)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_PRIMARY (0x0000)
#define MPI3_IOCFACTS_EXCEPT_BOOTSTAT_SECONDARY (0x0001)
@@ -234,22 +252,31 @@ typedef struct _MPI3_IOC_FACTS_DATA
#define MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED (0x0000)
/**** Defines for the Flags field ****/
-#define MPI3_IOCFACTS_FLAGS_SIGNED_NVDATA_REQUIRED (0x00010000)
-#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000FF00)
-#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8)
-#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK (0x00000030)
-#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_NOT_STARTED (0x00000000)
-#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_IN_PROGRESS (0x00000010)
-#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_COMPLETE (0x00000020)
-#define MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK (0x0000000F)
-#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000)
-#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
+#define MPI3_IOCFACTS_FLAGS_SIGNED_NVDATA_REQUIRED (0x00010000)
+#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_MASK (0x0000FF00)
+#define MPI3_IOCFACTS_FLAGS_DMA_ADDRESS_WIDTH_SHIFT (8)
+#define MPI3_IOCFACTS_FLAGS_MAX_REQ_PER_REPLY_QUEUE_LIMIT (0x00000040)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK (0x00000030)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_SHIFT (4)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_NOT_STARTED (0x00000000)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_IN_PROGRESS (0x00000010)
+#define MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_COMPLETE (0x00000020)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_MASK (0x0000000F)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_SHIFT (0)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_EHBA (0x00000000)
+#define MPI3_IOCFACTS_FLAGS_PERSONALITY_RAID_DDR (0x00000002)
/**** Defines for the IOThrottleDataLength field ****/
-#define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000)
+#define MPI3_IOCFACTS_IO_THROTTLE_DATA_LENGTH_NOT_REQUIRED (0x0000)
-/**** Defines for the IOThrottleDataLength field ****/
-#define MPI3_IOCFACTS_MAX_IO_THROTTLE_GROUP_NOT_REQUIRED (0x0000)
+/**** Defines for the MaxIOThrottleGroup field ****/
+#define MPI3_IOCFACTS_MAX_IO_THROTTLE_GROUP_NOT_REQUIRED (0x0000)
+
+/**** Defines for the DiagFdlSize field ****/
+#define MPI3_IOCFACTS_DIAGFDLSIZE_NOT_SUPPORTED (0x00000000)
+
+/**** Defines for the DiagTtySize field ****/
+#define MPI3_IOCFACTS_DIAGTTYSIZE_NOT_SUPPORTED (0x00000000)
/*****************************************************************************
* Management Passthrough Request Message *
@@ -295,6 +322,7 @@ typedef struct _MPI3_CREATE_REQUEST_QUEUE_REQUEST
/**** Defines for the Flags field ****/
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SHIFT (7)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
@@ -343,10 +371,12 @@ typedef struct _MPI3_CREATE_REPLY_QUEUE_REQUEST
/**** Defines for the Flags field ****/
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SHIFT (7)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_COALESCE_DISABLE (0x02)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01)
+#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_SHIFT (0)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01)
@@ -440,9 +470,9 @@ typedef struct _MPI3_EVENT_NOTIFICATION_REQUEST
} MPI3_EVENT_NOTIFICATION_REQUEST, MPI3_POINTER PTR_MPI3_EVENT_NOTIFICATION_REQUEST,
Mpi3EventNotificationRequest_t, MPI3_POINTER pMpi3EventNotificationRequest_t;
-/**** Defines for the SASBroadcastPrimitiveMasks field - use MPI3_EVENT_PRIMITIVE_ values ****/
+/**** Defines for the SASBroadcastPrimitiveMasks field - use MPI3_EVENT_BROADCAST_PRIMITIVE_ values ****/
-/**** Defines for the SASNotifyPrimitiveMasks field - use MPI3_EVENT_NOTIFY_ values ****/
+/**** Defines for the SASNotifyPrimitiveMasks field - use MPI3_EVENT_NOTIFY_PRIMITIVE_ values ****/
/**** Defines for the EventMasks field - use MPI3_EVENT_ values ****/
@@ -470,9 +500,11 @@ typedef struct _MPI3_EVENT_NOTIFICATION_REPLY
/**** Defines for the MsgFlags field ****/
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK (0x01)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_SHIFT (0)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED (0x01)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_NOT_REQUIRED (0x00)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_MASK (0x02)
+#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_SHIFT (1)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_ORIGINAL (0x00)
#define MPI3_EVENT_NOTIFY_MSGFLAGS_EVENT_ORIGINALITY_REPLAY (0x02)
@@ -716,7 +748,7 @@ typedef struct _MPI3_EVENT_SAS_TOPO_PHY_ENTRY
{
U16 AttachedDevHandle; /* 0x00 */
U8 LinkRate; /* 0x02 */
- U8 Status; /* 0x03 */
+ U8 PhyStatus; /* 0x03 */
} MPI3_EVENT_SAS_TOPO_PHY_ENTRY, MPI3_POINTER PTR_MPI3_EVENT_SAS_TOPO_PHY_ENTRY,
Mpi3EventSasTopoPhyEntry_t, MPI3_POINTER pMpi3EventSasTopoPhyEntry_t;
@@ -743,6 +775,7 @@ typedef struct _MPI3_EVENT_SAS_TOPO_PHY_ENTRY
#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_NO_EXIST (0x40)
#define MPI3_EVENT_SAS_TOPO_PHY_STATUS_VACANT (0x80)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_MASK (0x0F)
+#define MPI3_EVENT_SAS_TOPO_PHY_RC_SHIFT (0)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING (0x02)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_PHY_CHANGED (0x03)
#define MPI3_EVENT_SAS_TOPO_PHY_RC_NO_CHANGE (0x04)
@@ -873,6 +906,7 @@ typedef struct _MPI3_EVENT_PCIE_TOPO_PORT_ENTRY
/**** Defines for the CurrentPortInfo and PreviousPortInfo field ****/
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_MASK (0xF0)
+#define MPI3_EVENT_PCIE_TOPO_PI_LANES_SHIFT (4)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_UNKNOWN (0x00)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_1 (0x10)
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_2 (0x20)
@@ -881,6 +915,7 @@ typedef struct _MPI3_EVENT_PCIE_TOPO_PORT_ENTRY
#define MPI3_EVENT_PCIE_TOPO_PI_LANES_16 (0x50)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_MASK (0x0F)
+#define MPI3_EVENT_PCIE_TOPO_PI_RATE_SHIFT (0)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_UNKNOWN (0x00)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_DISABLED (0x01)
#define MPI3_EVENT_PCIE_TOPO_PI_RATE_2_5 (0x02)
@@ -1369,6 +1404,7 @@ typedef struct _MPI3_PEL_REQ_ACTION_ACKNOWLEDGE
/**** Definitions for the MsgFlags field ****/
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_MASK (0x03)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_SHIFT (0)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_NO_GUIDANCE (0x00)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_CONTINUE_OP (0x01)
#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_TRANSITION_TO_FAULT (0x02)
@@ -1425,6 +1461,7 @@ typedef struct _MPI3_CI_DOWNLOAD_REQUEST
#define MPI3_CI_DOWNLOAD_MSGFLAGS_FORCE_FMC_ENABLE (0x40)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_SIGNED_NVDATA (0x20)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MASK (0x03)
+#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SHIFT (0)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_FAST (0x00)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_MEDIUM (0x01)
#define MPI3_CI_DOWNLOAD_MSGFLAGS_WRITE_CACHE_FLUSH_SLOW (0x02)
@@ -1460,6 +1497,7 @@ typedef struct _MPI3_CI_DOWNLOAD_REPLY
#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20)
#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0E)
+#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_SHIFT (1)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_AWAITING (0x02)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_ONLINE_PENDING (0x04)
@@ -1490,9 +1528,11 @@ typedef struct _MPI3_CI_UPLOAD_REQUEST
/**** Defines for the MsgFlags field ****/
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_MASK (0x01)
+#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SHIFT (0)
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY (0x00)
#define MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_SECONDARY (0x01)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_MASK (0x02)
+#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_SHIFT (1)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_FLASH (0x00)
#define MPI3_CI_UPLOAD_MSGFLAGS_FORMAT_EXECUTABLE (0x02)
diff --git a/sys/dev/mpi3mr/mpi/mpi30_pci.h b/sys/dev/mpi3mr/mpi/mpi30_pci.h
index f15dab2a5a9c..12d7000882cb 100644
--- a/sys/dev/mpi3mr/mpi/mpi30_pci.h
+++ b/sys/dev/mpi3mr/mpi/mpi30_pci.h
@@ -1,7 +1,7 @@
/*
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2016-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -69,9 +69,11 @@ typedef struct _MPI3_NVME_ENCAPSULATED_REQUEST
/**** Defines for the Flags field ****/
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK (0x0002)
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_SHIFT (1)
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000)
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_ALL (0x0002)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_MASK (0x0001)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_SHIFT (0)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0001)
diff --git a/sys/dev/mpi3mr/mpi/mpi30_raid.h b/sys/dev/mpi3mr/mpi/mpi30_raid.h
index fe2c4baffd3c..6fe557f843f3 100644
--- a/sys/dev/mpi3mr/mpi/mpi30_raid.h
+++ b/sys/dev/mpi3mr/mpi/mpi30_raid.h
@@ -1,7 +1,7 @@
/*
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2016-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/mpi3mr/mpi/mpi30_sas.h b/sys/dev/mpi3mr/mpi/mpi30_sas.h
index c28de07c9fdd..e50bcde0ade4 100644
--- a/sys/dev/mpi3mr/mpi/mpi30_sas.h
+++ b/sys/dev/mpi3mr/mpi/mpi30_sas.h
@@ -1,7 +1,7 @@
/*
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2016-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -52,6 +52,7 @@
#define MPI3_SAS_DEVICE_INFO_STP_INITIATOR (0x00000010)
#define MPI3_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000008)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK (0x00000007)
+#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_SHIFT (0)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE (0x00000000)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE (0x00000001)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER (0x00000002)
diff --git a/sys/dev/mpi3mr/mpi/mpi30_targ.h b/sys/dev/mpi3mr/mpi/mpi30_targ.h
index d9aee48a6437..8ae654410165 100644
--- a/sys/dev/mpi3mr/mpi/mpi30_targ.h
+++ b/sys/dev/mpi3mr/mpi/mpi30_targ.h
@@ -1,7 +1,7 @@
/*
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2016-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -123,6 +123,7 @@ typedef struct _MPI3_TARGET_CMD_BUF_POST_BASE_REQUEST
/**** Defines for the BufferPostFlags field ****/
#define MPI3_CMD_BUF_POST_BASE_FLAGS_DLAS_MASK (0x0C)
+#define MPI3_CMD_BUF_POST_BASE_FLAGS_DLAS_SHIFT (2)
#define MPI3_CMD_BUF_POST_BASE_FLAGS_DLAS_SYSTEM (0x00)
#define MPI3_CMD_BUF_POST_BASE_FLAGS_DLAS_IOCUDP (0x04)
#define MPI3_CMD_BUF_POST_BASE_FLAGS_DLAS_IOCCTL (0x08)
@@ -191,7 +192,7 @@ typedef struct _MPI3_TARGET_ASSIST_REQUEST
U16 QueueTag; /* 0x12 */
U16 IoIndex; /* 0x14 */
U16 InitiatorConnectionTag; /* 0x16 */
- U32 SkipCount; /* 0x18 */
+ U32 IOCUseOnly18; /* 0x18 */
U32 DataLength; /* 0x1C */
U32 PortTransferLength; /* 0x20 */
U32 PrimaryReferenceTag; /* 0x24 */
@@ -206,12 +207,18 @@ typedef struct _MPI3_TARGET_ASSIST_REQUEST
#define MPI3_TARGET_ASSIST_MSGFLAGS_METASGL_VALID (0x80)
/**** Defines for the Flags field ****/
+#define MPI3_TARGET_ASSIST_FLAGS_IOC_USE_ONLY_23_MASK (0x00800000)
+#define MPI3_TARGET_ASSIST_FLAGS_IOC_USE_ONLY_23_SHIFT (23)
+#define MPI3_TARGET_ASSIST_FLAGS_IOC_USE_ONLY_22_MASK (0x00400000)
+#define MPI3_TARGET_ASSIST_FLAGS_IOC_USE_ONLY_22_SHIFT (22)
#define MPI3_TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER (0x00200000)
#define MPI3_TARGET_ASSIST_FLAGS_AUTO_STATUS (0x00100000)
#define MPI3_TARGET_ASSIST_FLAGS_DATADIRECTION_MASK (0x000C0000)
+#define MPI3_TARGET_ASSIST_FLAGS_DATADIRECTION_SHIFT (18)
#define MPI3_TARGET_ASSIST_FLAGS_DATADIRECTION_WRITE (0x00040000)
#define MPI3_TARGET_ASSIST_FLAGS_DATADIRECTION_READ (0x00080000)
#define MPI3_TARGET_ASSIST_FLAGS_DMAOPERATION_MASK (0x00030000)
+#define MPI3_TARGET_ASSIST_FLAGS_DMAOPERATION_SHIFT (16)
#define MPI3_TARGET_ASSIST_FLAGS_DMAOPERATION_HOST_PI (0x00010000)
/**** Defines for the SGL field ****/
@@ -243,6 +250,8 @@ typedef struct _MPI3_TARGET_STATUS_SEND_REQUEST
Mpi3TargetStatusSendRequest_t, MPI3_POINTER pMpi3TargetStatusSendRequest_t;
/**** Defines for the Flags field ****/
+#define MPI3_TSS_FLAGS_IOC_USE_ONLY_6_MASK (0x0040)
+#define MPI3_TSS_FLAGS_IOC_USE_ONLY_6_SHIFT (6)
#define MPI3_TSS_FLAGS_REPOST_CMD_BUFFER (0x0020)
#define MPI3_TSS_FLAGS_AUTO_SEND_GOOD_STATUS (0x0010)
@@ -292,7 +301,7 @@ typedef struct _MPI3_TARGET_MODE_ABORT_REQUEST
#define MPI3_TARGET_MODE_ABORT_ALL_CMD_BUFFERS (0x00)
#define MPI3_TARGET_MODE_ABORT_EXACT_IO_REQUEST (0x01)
#define MPI3_TARGET_MODE_ABORT_ALL_COMMANDS (0x02)
-
+#define MPI3_TARGET_MODE_ABORT_ALL_COMMANDS_DEVHANDLE (0x03)
/*****************************************************************************
* Target Mode Abort Reply Message *
diff --git a/sys/dev/mpi3mr/mpi/mpi30_tool.h b/sys/dev/mpi3mr/mpi/mpi30_tool.h
index 55fb53601863..7f43d5d45465 100644
--- a/sys/dev/mpi3mr/mpi/mpi30_tool.h
+++ b/sys/dev/mpi3mr/mpi/mpi30_tool.h
@@ -1,7 +1,7 @@
/*
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2016-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -106,9 +106,11 @@ typedef struct _MPI3_TOOL_ISTWI_READ_WRITE_REQUEST
/**** Bitfield definitions for Flags field ****/
#define MPI3_TOOLBOX_ISTWI_FLAGS_AUTO_RESERVE_RELEASE (0x80)
#define MPI3_TOOLBOX_ISTWI_FLAGS_ADDRESS_MODE_MASK (0x04)
+#define MPI3_TOOLBOX_ISTWI_FLAGS_ADDRESS_MODE_SHIFT (2)
#define MPI3_TOOLBOX_ISTWI_FLAGS_ADDRESS_MODE_DEVINDEX (0x00)
#define MPI3_TOOLBOX_ISTWI_FLAGS_ADDRESS_MODE_DEVICE_FIELD (0x04)
#define MPI3_TOOLBOX_ISTWI_FLAGS_PAGE_ADDRESS_MASK (0x03)
+#define MPI3_TOOLBOX_ISTWI_FLAGS_PAGE_ADDRESS_SHIFT (0)
/**** Definitions for the Action field ****/
#define MPI3_TOOLBOX_ISTWI_ACTION_RESERVE_BUS (0x00)
@@ -366,6 +368,7 @@ typedef struct _MPI3_DIAG_BUFFER_POST_REQUEST
#define MPI3_DIAG_BUFFER_TYPE_FW (0x02)
#define MPI3_DIAG_BUFFER_TYPE_DRIVER (0x10)
#define MPI3_DIAG_BUFFER_TYPE_FDL (0x20)
+#define MPI3_DIAG_BUFFER_TYPE_TTY (0x30)
#define MPI3_DIAG_BUFFER_MIN_PRODUCT_SPECIFIC (0xF0)
#define MPI3_DIAG_BUFFER_MAX_PRODUCT_SPECIFIC (0xFF)
@@ -388,11 +391,12 @@ typedef struct _MPI3_DRIVER_BUFFER_HEADER
} MPI3_DRIVER_BUFFER_HEADER, MPI3_POINTER PTR_MPI3_DRIVER_BUFFER_HEADER,
Mpi3DriverBufferHeader_t, MPI3_POINTER pMpi3DriverBufferHeader_t;
-/**** Defines for the Type field ****/
+/**** Defines for the Signature field ****/
#define MPI3_DRIVER_DIAG_BUFFER_HEADER_SIGNATURE_CIRCULAR (0x43495243)
/**** Defines for the Flags field ****/
#define MPI3_DRIVER_DIAG_BUFFER_HEADER_FLAGS_CIRCULAR_BUF_FORMAT_MASK (0x00000003)
+#define MPI3_DRIVER_DIAG_BUFFER_HEADER_FLAGS_CIRCULAR_BUF_FORMAT_SHIFT (0)
#define MPI3_DRIVER_DIAG_BUFFER_HEADER_FLAGS_CIRCULAR_BUF_FORMAT_ASCII (0x00000000)
#define MPI3_DRIVER_DIAG_BUFFER_HEADER_FLAGS_CIRCULAR_BUF_FORMAT_RTTRACE (0x00000001)
@@ -449,6 +453,7 @@ typedef struct _MPI3_DIAG_BUFFER_UPLOAD_REQUEST
/**** Defined for the Flags field ****/
#define MPI3_DIAG_BUFFER_UPLOAD_FLAGS_FORMAT_MASK (0x01)
+#define MPI3_DIAG_BUFFER_UPLOAD_FLAGS_FORMAT_SHIFT (0)
#define MPI3_DIAG_BUFFER_UPLOAD_FLAGS_FORMAT_DECODED (0x00)
#define MPI3_DIAG_BUFFER_UPLOAD_FLAGS_FORMAT_ENCODED (0x01)
diff --git a/sys/dev/mpi3mr/mpi/mpi30_transport.h b/sys/dev/mpi3mr/mpi/mpi30_transport.h
index 436496411309..d9ebbfa0b5d8 100644
--- a/sys/dev/mpi3mr/mpi/mpi30_transport.h
+++ b/sys/dev/mpi3mr/mpi/mpi30_transport.h
@@ -1,7 +1,7 @@
/*
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2016-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -37,10 +37,9 @@
*
* Broadcom Inc. (Broadcom) MPI3MR Adapter FreeBSD
*
- */
-
-/*
- * Version History
+ *
+ *
+ * Version History
* ---------------
*
* Date Version Description
@@ -72,8 +71,20 @@
* 09-02-22 03.00.27.00 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 27.
* 10-20-22 03.00.27.01 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 27 - Interim Release 1.
* 12-02-22 03.00.28.00 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 28.
- * 02-24-22 03.00.29.00 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 29.
+ * 02-24-23 03.00.29.00 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 29.
+ * 05-19-23 03.00.30.00 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 30.
+ * 08-18-23 03.00.30.01 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 30 - Interim Release 1.
+ * 11-17-23 03.00.31.00 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 31
+ * 02-16-24 03.00.32.00 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 32
+ * 02-23-24 03.00.32.01 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 32 - Interim Release 1.
+ * 04-19-24 03.00.32.02 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 32 - Interim Release 2.
+ * 05-10-24 03.00.33.00 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 33
+ * 06-14-24 03.00.33.01 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 33 - Interim Release 1.
+ * 07-26-24 03.00.34.00 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 34
+ * 11-08-24 03.00.35.00 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 35
+ * 02-14-25 03.00.36.00 Corresponds to Fusion-MPT MPI 3.0 Specification Rev 36
*/
+
#ifndef MPI30_TRANSPORT_H
#define MPI30_TRANSPORT_H 1
@@ -101,7 +112,7 @@ typedef union _MPI3_VERSION_UNION
/****** Version constants for this revision ****/
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (29)
+#define MPI3_VERSION_UNIT (36)
#define MPI3_VERSION_DEV (0)
/****** DevHandle definitions *****/
@@ -176,6 +187,7 @@ typedef volatile struct _MPI3_SYSIF_REGISTERS
#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ (0x000F0000)
#define MPI3_SYSIF_IOC_CONFIG_OPER_REQ_ENT_SZ_SHIFT (16)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000C000)
+#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_SHIFT (14)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000)
#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ (0x00002000)
@@ -196,6 +208,7 @@ typedef volatile struct _MPI3_SYSIF_REGISTERS
/**** Defines for the AdminQueueNumEntries register ****/
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_OFFSET (0x00000024)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_MASK (0x0FFF)
+#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REQ_SHIFT (0)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_OFFSET (0x00000026)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_MASK (0x0FFF0000)
#define MPI3_SYSIF_ADMIN_Q_NUM_ENTRIES_REPLY_SHIFT (16)
@@ -211,6 +224,7 @@ typedef volatile struct _MPI3_SYSIF_REGISTERS
/**** Defines for the CoalesceControl register ****/
#define MPI3_SYSIF_COALESCE_CONTROL_OFFSET (0x00000040)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_MASK (0xC0000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_SHIFT (30)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xC0000000)
@@ -239,6 +253,7 @@ typedef volatile struct _MPI3_SYSIF_REGISTERS
/**** Defines for the WriteSequence register *****/
#define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001C04)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000F)
+#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_SHIFT (0)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_1ST (0xF)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND (0x4)
@@ -250,6 +265,7 @@ typedef volatile struct _MPI3_SYSIF_REGISTERS
/**** Defines for the HostDiagnostic register *****/
#define MPI3_SYSIF_HOST_DIAG_OFFSET (0x00001C08)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SHIFT (8)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_HOST_CONTROL_BOOT_RESET (0x00000200)
@@ -267,6 +283,7 @@ typedef volatile struct _MPI3_SYSIF_REGISTERS
/**** Defines for the Fault register ****/
#define MPI3_SYSIF_FAULT_OFFSET (0x00001C10)
#define MPI3_SYSIF_FAULT_CODE_MASK (0x0000FFFF)
+#define MPI3_SYSIF_FAULT_CODE_SHIFT (0)
#define MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET (0x0000F000)
#define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000F001)
#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000F002)
@@ -274,6 +291,7 @@ typedef volatile struct _MPI3_SYSIF_REGISTERS
#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000F004)
#define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000F005)
#define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000F006)
+#define MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER (0x0000F007)
/**** Defines for FaultCodeAdditionalInfo registers ****/
#define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001C14)
@@ -307,12 +325,14 @@ typedef volatile struct _MPI3_SYSIF_REGISTERS
/**** Defines for DiagRWControl register ****/
#define MPI3_SYSIF_DIAG_RW_CONTROL_OFFSET (0x00001C60)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_MASK (0x00000030)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_SHIFT (4)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_1BYTE (0x00000000)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_2BYTES (0x00000010)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_4BYTES (0x00000020)
#define MPI3_SYSIF_DIAG_RW_CONTROL_LEN_8BYTES (0x00000030)
#define MPI3_SYSIF_DIAG_RW_CONTROL_RESET (0x00000004)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_MASK (0x00000002)
+#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_SHIFT (1)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_READ (0x00000000)
#define MPI3_SYSIF_DIAG_RW_CONTROL_DIR_WRITE (0x00000002)
#define MPI3_SYSIF_DIAG_RW_CONTROL_START (0x00000001)
@@ -320,6 +340,7 @@ typedef volatile struct _MPI3_SYSIF_REGISTERS
/**** Defines for DiagRWStatus register ****/
#define MPI3_SYSIF_DIAG_RW_STATUS_OFFSET (0x00001C62)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_MASK (0x0000000E)
+#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SHIFT (1)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_SUCCESS (0x00000000)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_INV_ADDR (0x00000002)
#define MPI3_SYSIF_DIAG_RW_STATUS_STATUS_ACC_ERR (0x00000004)
@@ -357,7 +378,9 @@ typedef struct _MPI3_DEFAULT_REPLY_DESCRIPTOR
/**** Defines for the ReplyFlags field ****/
#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK (0x0001)
+#define MPI3_REPLY_DESCRIPT_FLAGS_PHASE_SHIFT (0)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_MASK (0xF000)
+#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SHIFT (12)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY (0x0000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_SUCCESS (0x1000)
#define MPI3_REPLY_DESCRIPT_FLAGS_TYPE_TARGET_COMMAND_BUFFER (0x2000)
@@ -425,15 +448,9 @@ typedef struct _MPI3_STATUS_REPLY_DESCRIPTOR
} MPI3_STATUS_REPLY_DESCRIPTOR, MPI3_POINTER PTR_MPI3_STATUS_REPLY_DESCRIPTOR,
Mpi3StatusReplyDescriptor_t, MPI3_POINTER pMpi3StatusReplyDescriptor_t;
-/**** Defines for the IOCStatus field ****/
-#define MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL (0x8000)
-#define MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK (0x7FFF)
+/**** Use MPI3_IOCSTATUS_ defines for the IOCStatus field ****/
-/**** Defines for the IOCLogInfo field ****/
-#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_MASK (0xF0000000)
-#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_NO_INFO (0x00000000)
-#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_TYPE_SAS (0x30000000)
-#define MPI3_REPLY_DESCRIPT_STATUS_IOCLOGINFO_DATA_MASK (0x0FFFFFFF)
+/**** Use MPI3_IOCLOGINFO_ defines for the IOCLogInfo field ****/
/*****************************************************************************
* Union of Reply Descriptors *
@@ -516,6 +533,7 @@ typedef union _MPI3_SGE_UNION
/**** Definitions for the Flags field ****/
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_MASK (0xF0)
+#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SHIFT (4)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE (0x00)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_BIT_BUCKET (0x10)
#define MPI3_SGE_FLAGS_ELEMENT_TYPE_CHAIN (0x20)
@@ -524,6 +542,7 @@ typedef union _MPI3_SGE_UNION
#define MPI3_SGE_FLAGS_END_OF_LIST (0x08)
#define MPI3_SGE_FLAGS_END_OF_BUFFER (0x04)
#define MPI3_SGE_FLAGS_DLAS_MASK (0x03)
+#define MPI3_SGE_FLAGS_DLAS_SHIFT (0)
#define MPI3_SGE_FLAGS_DLAS_SYSTEM (0x00)
#define MPI3_SGE_FLAGS_DLAS_IOC_UDP (0x01)
#define MPI3_SGE_FLAGS_DLAS_IOC_CTL (0x02)
@@ -532,30 +551,33 @@ typedef union _MPI3_SGE_UNION
#define MPI3_SGE_EXT_OPER_EEDP (0x00)
/**** Definitions for the EEDPFlags field of Extended EEDP element ****/
-#define MPI3_EEDPFLAGS_INCR_PRI_REF_TAG (0x8000)
-#define MPI3_EEDPFLAGS_INCR_SEC_REF_TAG (0x4000)
-#define MPI3_EEDPFLAGS_INCR_PRI_APP_TAG (0x2000)
-#define MPI3_EEDPFLAGS_INCR_SEC_APP_TAG (0x1000)
-#define MPI3_EEDPFLAGS_ESC_PASSTHROUGH (0x0800)
-#define MPI3_EEDPFLAGS_CHK_REF_TAG (0x0400)
-#define MPI3_EEDPFLAGS_CHK_APP_TAG (0x0200)
-#define MPI3_EEDPFLAGS_CHK_GUARD (0x0100)
-#define MPI3_EEDPFLAGS_ESC_MODE_MASK (0x00C0)
-#define MPI3_EEDPFLAGS_ESC_MODE_DO_NOT_DISABLE (0x0040)
-#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE (0x0080)
+#define MPI3_EEDPFLAGS_INCR_PRI_REF_TAG (0x8000)
+#define MPI3_EEDPFLAGS_INCR_SEC_REF_TAG (0x4000)
+#define MPI3_EEDPFLAGS_INCR_PRI_APP_TAG (0x2000)
+#define MPI3_EEDPFLAGS_INCR_SEC_APP_TAG (0x1000)
+#define MPI3_EEDPFLAGS_ESC_PASSTHROUGH (0x0800)
+#define MPI3_EEDPFLAGS_CHK_REF_TAG (0x0400)
+#define MPI3_EEDPFLAGS_CHK_APP_TAG (0x0200)
+#define MPI3_EEDPFLAGS_CHK_GUARD (0x0100)
+#define MPI3_EEDPFLAGS_ESC_MODE_MASK (0x00C0)
+#define MPI3_EEDPFLAGS_ESC_MODE_SHIFT (6)
+#define MPI3_EEDPFLAGS_ESC_MODE_DO_NOT_DISABLE (0x0040)
+#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_DISABLE (0x0080)
#define MPI3_EEDPFLAGS_ESC_MODE_APPTAG_REFTAG_DISABLE (0x00C0)
-#define MPI3_EEDPFLAGS_HOST_GUARD_MASK (0x0030)
-#define MPI3_EEDPFLAGS_HOST_GUARD_T10_CRC (0x0000)
-#define MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM (0x0010)
-#define MPI3_EEDPFLAGS_HOST_GUARD_OEM_SPECIFIC (0x0020)
-#define MPI3_EEDPFLAGS_PT_REF_TAG (0x0008)
-#define MPI3_EEDPFLAGS_EEDP_OP_MASK (0x0007)
-#define MPI3_EEDPFLAGS_EEDP_OP_CHECK (0x0001)
-#define MPI3_EEDPFLAGS_EEDP_OP_STRIP (0x0002)
-#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE (0x0003)
-#define MPI3_EEDPFLAGS_EEDP_OP_INSERT (0x0004)
-#define MPI3_EEDPFLAGS_EEDP_OP_REPLACE (0x0006)
-#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN (0x0007)
+#define MPI3_EEDPFLAGS_HOST_GUARD_MASK (0x0030)
+#define MPI3_EEDPFLAGS_HOST_GUARD_SHIFT (4)
+#define MPI3_EEDPFLAGS_HOST_GUARD_T10_CRC (0x0000)
+#define MPI3_EEDPFLAGS_HOST_GUARD_IP_CHKSUM (0x0010)
+#define MPI3_EEDPFLAGS_HOST_GUARD_OEM_SPECIFIC (0x0020)
+#define MPI3_EEDPFLAGS_PT_REF_TAG (0x0008)
+#define MPI3_EEDPFLAGS_EEDP_OP_MASK (0x0007)
+#define MPI3_EEDPFLAGS_EEDP_OP_SHIFT (0)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK (0x0001)
+#define MPI3_EEDPFLAGS_EEDP_OP_STRIP (0x0002)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REMOVE (0x0003)
+#define MPI3_EEDPFLAGS_EEDP_OP_INSERT (0x0004)
+#define MPI3_EEDPFLAGS_EEDP_OP_REPLACE (0x0006)
+#define MPI3_EEDPFLAGS_EEDP_OP_CHECK_REGEN (0x0007)
/**** Definitions for the UserDataSize field of Extended EEDP element ****/
#define MPI3_EEDP_UDS_512 (0x01)
@@ -652,9 +674,9 @@ typedef struct _MPI3_DEFAULT_REPLY
#define MPI3_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF) /* End of the product-specific range of function codes */
/**** Defines for IOCStatus ****/
-#define MPI3_IOCSTATUS_LOG_INFO_AVAIL_MASK (0x8000)
#define MPI3_IOCSTATUS_LOG_INFO_AVAILABLE (0x8000)
#define MPI3_IOCSTATUS_STATUS_MASK (0x7FFF)
+#define MPI3_IOCSTATUS_STATUS_SHIFT (0)
/* Common IOCStatus values for all replies */
#define MPI3_IOCSTATUS_SUCCESS (0x0000)
@@ -665,6 +687,7 @@ typedef struct _MPI3_DEFAULT_REPLY
#define MPI3_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
#define MPI3_IOCSTATUS_INVALID_FIELD (0x0007)
#define MPI3_IOCSTATUS_INVALID_STATE (0x0008)
+#define MPI3_IOCSTATUS_SHUTDOWN_ACTIVE (0x0009)
#define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000A)
#define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000B)
#define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000C)
@@ -742,6 +765,7 @@ typedef struct _MPI3_DEFAULT_REPLY
#define MPI3_IOCLOGINFO_TYPE_NONE (0x0)
#define MPI3_IOCLOGINFO_TYPE_SAS (0x3)
#define MPI3_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF)
+#define MPI3_IOCLOGINFO_LOG_DATA_SHIFT (0)
#endif /* MPI30_TRANSPORT_H */
diff --git a/sys/dev/mpi3mr/mpi/mpi30_type.h b/sys/dev/mpi3mr/mpi/mpi30_type.h
index 267ede701762..a6ec8c395c35 100644
--- a/sys/dev/mpi3mr/mpi/mpi30_type.h
+++ b/sys/dev/mpi3mr/mpi/mpi30_type.h
@@ -1,7 +1,7 @@
/*
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2016-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Redistribution and use in source and binary forms, with or without
diff --git a/sys/dev/mpi3mr/mpi3mr.c b/sys/dev/mpi3mr/mpi3mr.c
index 932d174a6b50..bcf8f46ddf5d 100644
--- a/sys/dev/mpi3mr/mpi3mr.c
+++ b/sys/dev/mpi3mr/mpi3mr.c
@@ -1,7 +1,7 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2016-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2016-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Authors: Sumit Saxena <sumit.saxena@broadcom.com>
@@ -83,7 +83,7 @@ static void mpi3mr_port_enable_complete(struct mpi3mr_softc *sc,
struct mpi3mr_drvr_cmd *drvrcmd);
static void mpi3mr_flush_io(struct mpi3mr_softc *sc);
static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
- U32 reset_reason);
+ U16 reset_reason);
static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
struct mpi3mr_drvr_cmd *cmdparam, U8 iou_rc);
static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
@@ -186,7 +186,7 @@ poll_for_command_completion(struct mpi3mr_softc *sc,
* Return: None.
*/
static void
-mpi3mr_trigger_snapdump(struct mpi3mr_softc *sc, U32 reason_code)
+mpi3mr_trigger_snapdump(struct mpi3mr_softc *sc, U16 reason_code)
{
U32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
@@ -221,7 +221,7 @@ mpi3mr_trigger_snapdump(struct mpi3mr_softc *sc, U32 reason_code)
*
* Return: None.
*/
-static void mpi3mr_check_rh_fault_ioc(struct mpi3mr_softc *sc, U32 reason_code)
+static void mpi3mr_check_rh_fault_ioc(struct mpi3mr_softc *sc, U16 reason_code)
{
U32 ioc_status;
@@ -1147,7 +1147,7 @@ enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_softc *sc)
return MRIOC_STATE_RESET_REQUESTED;
}
-static inline void mpi3mr_clear_resethistory(struct mpi3mr_softc *sc)
+static inline void mpi3mr_clear_reset_history(struct mpi3mr_softc *sc)
{
U32 ioc_status;
@@ -1167,9 +1167,9 @@ static inline void mpi3mr_clear_resethistory(struct mpi3mr_softc *sc)
*
* Return: 0 on success, -1 on failure.
*/
-static int mpi3mr_mur_ioc(struct mpi3mr_softc *sc, U32 reset_reason)
+static int mpi3mr_mur_ioc(struct mpi3mr_softc *sc, U16 reset_reason)
{
- U32 ioc_config, timeout, ioc_status;
+ U32 ioc_config, timeout, ioc_status, scratch_pad0;
int retval = -1;
mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Message Unit Reset(MUR)\n");
@@ -1177,8 +1177,13 @@ static int mpi3mr_mur_ioc(struct mpi3mr_softc *sc, U32 reset_reason)
mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC is unrecoverable MUR not issued\n");
return retval;
}
- mpi3mr_clear_resethistory(sc);
- mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, reset_reason);
+ mpi3mr_clear_reset_history(sc);
+
+ scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_FREEBSD <<
+ MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
+ (sc->facts.ioc_num <<
+ MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
+ mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, scratch_pad0);
ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
@@ -1187,7 +1192,7 @@ static int mpi3mr_mur_ioc(struct mpi3mr_softc *sc, U32 reset_reason)
do {
ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
- mpi3mr_clear_resethistory(sc);
+ mpi3mr_clear_reset_history(sc);
ioc_config =
mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
@@ -1217,24 +1222,44 @@ static int mpi3mr_mur_ioc(struct mpi3mr_softc *sc, U32 reset_reason)
*
* Return: 0 on success, appropriate error on failure.
*/
-static int mpi3mr_bring_ioc_ready(struct mpi3mr_softc *sc)
+static int mpi3mr_bring_ioc_ready(struct mpi3mr_softc *sc,
+ U64 *start_time)
{
- U32 ioc_config, timeout;
- enum mpi3mr_iocstate current_state;
+ enum mpi3mr_iocstate current_state;
+ U32 ioc_status;
+ int retval;
- ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
- ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
+ U32 ioc_config = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET, ioc_config);
- timeout = sc->ready_timeout * 10;
- do {
- current_state = mpi3mr_get_iocstate(sc);
- if (current_state == MRIOC_STATE_READY)
- return 0;
- DELAY(100 * 1000);
- } while (--timeout);
+ if (*start_time == 0)
+ *start_time = ticks;
- return -1;
+ do {
+ ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
+ if (ioc_status & (MPI3_SYSIF_IOC_STATUS_FAULT | MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
+ mpi3mr_print_fault_info(sc);
+ retval = mpi3mr_issue_reset(sc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET, MPI3MR_RESET_FROM_BRINGUP);
+ if (retval) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Failed to soft reset the IOC, error 0x%d\n", __func__, retval);
+ return -1;
+ }
+ }
+ mpi3mr_clear_reset_history(sc);
+ return EAGAIN;
+ }
+
+ current_state = mpi3mr_get_iocstate(sc);
+ if (current_state == MRIOC_STATE_READY)
+ return 0;
+
+ DELAY(100 * 1000);
+
+ } while (((ticks - *start_time) / hz) < sc->ready_timeout);
+
+ return -1;
}
static const struct {
@@ -1313,6 +1338,7 @@ static const struct {
"diagnostic buffer post timeout"
},
{ MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronus reset" },
+ { MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT, "configuration request timeout" },
{ MPI3MR_RESET_REASON_COUNT, "Reset reason count" },
};
@@ -1403,14 +1429,10 @@ mpi3mr_soft_reset_success(U32 ioc_status, U32 ioc_config)
static inline bool mpi3mr_diagfault_success(struct mpi3mr_softc *sc,
U32 ioc_status)
{
- U32 fault;
-
if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
return false;
- fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) & MPI3_SYSIF_FAULT_CODE_MASK;
- if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
- return true;
- return false;
+ mpi3mr_print_fault_info(sc);
+ return true;
}
/**
@@ -1616,6 +1638,7 @@ static int mpi3mr_process_factsdata(struct mpi3mr_softc *sc,
(facts_data->MaxPCIeSwitches);
sc->facts.max_sasexpanders =
(facts_data->MaxSASExpanders);
+ sc->facts.max_data_length = facts_data->MaxDataLength;
sc->facts.max_sasinitiators =
(facts_data->MaxSASInitiators);
sc->facts.max_enclosures = (facts_data->MaxEnclosures);
@@ -1650,6 +1673,10 @@ static int mpi3mr_process_factsdata(struct mpi3mr_softc *sc,
sc->facts.io_throttle_low = facts_data->IOThrottleLow;
sc->facts.io_throttle_high = facts_data->IOThrottleHigh;
+ if (sc->facts.max_data_length == MPI3_IOCFACTS_MAX_DATA_LENGTH_NOT_REPORTED)
+ sc->facts.max_data_length = MPI3MR_DEFAULT_MAX_IO_SIZE;
+ else
+ sc->facts.max_data_length *= MPI3MR_PAGE_SIZE_4K;
/*Store in 512b block count*/
if (sc->facts.io_throttle_data_length)
sc->io_throttle_data_length =
@@ -1889,6 +1916,15 @@ static int mpi3mr_reply_alloc(struct mpi3mr_softc *sc)
goto out_failed;
}
+ sc->cfg_cmds.reply = malloc(sc->reply_sz,
+ M_MPI3MR, M_NOWAIT | M_ZERO);
+
+ if (!sc->cfg_cmds.reply) {
+ printf(IOCNAME "Cannot allocate memory for cfg_cmds.reply\n",
+ sc->name);
+ goto out_failed;
+ }
+
sc->ioctl_cmds.reply = malloc(sc->reply_sz, M_MPI3MR, M_NOWAIT | M_ZERO);
if (!sc->ioctl_cmds.reply) {
printf(IOCNAME "Cannot allocate memory for ioctl_cmds.reply\n",
@@ -2139,7 +2175,7 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_softc *sc)
strcpy(drvr_info->DriverName, MPI3MR_DRIVER_NAME);
strcpy(drvr_info->DriverVersion, MPI3MR_DRIVER_VERSION);
strcpy(drvr_info->DriverReleaseDate, MPI3MR_DRIVER_RELDATE);
- drvr_info->DriverCapabilities = 0;
+ drvr_info->DriverCapabilities = MPI3_IOCINIT_DRIVERCAP_OSEXPOSURE_NO_SPECIAL;
memcpy((U8 *)&sc->driver_info, (U8 *)drvr_info, sizeof(sc->driver_info));
memset(&iocinit_req, 0, sizeof(iocinit_req));
@@ -2175,6 +2211,8 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_softc *sc)
time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
iocinit_req.TimeStamp = htole64(time_in_msec);
+ iocinit_req.MsgFlags |= MPI3_IOCINIT_MSGFLAGS_WRITESAMEDIVERT_SUPPORTED;
+
init_completion(&sc->init_cmds.completion);
retval = mpi3mr_submit_admin_cmd(sc, &iocinit_req,
sizeof(iocinit_req));
@@ -2267,7 +2305,7 @@ mpi3mr_display_ioc_info(struct mpi3mr_softc *sc)
printf("Capabilities=(");
if (sc->facts.ioc_capabilities &
- MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE) {
+ MPI3_IOCFACTS_CAPABILITY_RAID_SUPPORTED) {
printf("RAID");
i++;
}
@@ -2508,7 +2546,9 @@ static int mpi3mr_alloc_chain_bufs(struct mpi3mr_softc *sc)
goto out_failed;
}
- sz = MPI3MR_CHAINSGE_SIZE;
+ if (sc->max_sgl_entries > sc->facts.max_data_length / PAGE_SIZE)
+ sc->max_sgl_entries = sc->facts.max_data_length / PAGE_SIZE;
+ sz = sc->max_sgl_entries * sizeof(Mpi3SGESimple_t);
if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
4096, 0, /* algnmnt, boundary */
@@ -2707,14 +2747,16 @@ int mpi3mr_initialize_ioc(struct mpi3mr_softc *sc, U8 init_type)
{
int retval = 0;
enum mpi3mr_iocstate ioc_state;
- U64 ioc_info;
+ U64 ioc_info, start_ticks = 0;
U32 ioc_status, ioc_control, i, timeout;
Mpi3IOCFactsData_t facts_data;
char str[32];
U32 size;
+ U8 retry = 0;
sc->cpu_count = mp_ncpus;
+retry_init:
ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
ioc_control = mpi3mr_regread(sc, MPI3_SYSIF_IOC_CONFIG_OFFSET);
ioc_info = mpi3mr_regread64(sc, MPI3_SYSIF_IOC_INFO_LOW_OFFSET);
@@ -2722,28 +2764,25 @@ int mpi3mr_initialize_ioc(struct mpi3mr_softc *sc, U8 init_type)
mpi3mr_dprint(sc, MPI3MR_INFO, "SOD ioc_status: 0x%x ioc_control: 0x%x "
"ioc_info: 0x%lx\n", ioc_status, ioc_control, ioc_info);
- /*The timeout value is in 2sec unit, changing it to seconds*/
+ /*The timeout value is in 2sec unit, changing it to seconds*/
sc->ready_timeout =
((ioc_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
ioc_state = mpi3mr_get_iocstate(sc);
-
mpi3mr_dprint(sc, MPI3MR_INFO, "IOC state: %s IOC ready timeout: %d\n",
mpi3mr_iocstate_name(ioc_state), sc->ready_timeout);
- if (ioc_state == MRIOC_STATE_BECOMING_READY ||
- ioc_state == MRIOC_STATE_RESET_REQUESTED) {
- timeout = sc->ready_timeout * 10;
- do {
- DELAY(1000 * 100);
- } while (--timeout);
-
+ timeout = sc->ready_timeout * 10;
+ do {
ioc_state = mpi3mr_get_iocstate(sc);
- mpi3mr_dprint(sc, MPI3MR_INFO,
- "IOC in %s state after waiting for reset time\n",
- mpi3mr_iocstate_name(ioc_state));
- }
+
+ if (ioc_state != MRIOC_STATE_BECOMING_READY &&
+ ioc_state != MRIOC_STATE_RESET_REQUESTED)
+ break;
+
+ DELAY(1000 * 100);
+ } while (--timeout);
if (ioc_state == MRIOC_STATE_READY) {
retval = mpi3mr_mur_ioc(sc, MPI3MR_RESET_FROM_BRINGUP);
@@ -2755,53 +2794,76 @@ int mpi3mr_initialize_ioc(struct mpi3mr_softc *sc, U8 init_type)
}
if (ioc_state != MRIOC_STATE_RESET) {
- mpi3mr_print_fault_info(sc);
- mpi3mr_dprint(sc, MPI3MR_ERROR, "issuing soft reset to bring to reset state\n");
- retval = mpi3mr_issue_reset(sc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
- MPI3MR_RESET_FROM_BRINGUP);
- if (retval) {
- mpi3mr_dprint(sc, MPI3MR_ERROR,
- "%s :Failed to soft reset IOC, error 0x%d\n",
- __func__, retval);
- goto out_failed;
- }
- }
-
+ if (ioc_state == MRIOC_STATE_FAULT) {
+ mpi3mr_print_fault_info(sc);
+
+ U32 fault = mpi3mr_regread(sc, MPI3_SYSIF_FAULT_OFFSET) &
+ MPI3_SYSIF_FAULT_CODE_MASK;
+ if (fault == MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER) {
+ mpi3mr_dprint(sc, MPI3MR_INFO,
+ "controller faulted due to insufficient power, try by connecting it in a different slot\n");
+ goto err;
+ }
+
+ U32 host_diagnostic;
+ timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
+ do {
+ host_diagnostic = mpi3mr_regread(sc, MPI3_SYSIF_HOST_DIAG_OFFSET);
+ if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
+ break;
+ DELAY(100 * 1000);
+ } while (--timeout);
+ }
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "issuing soft reset to bring to reset state\n");
+ retval = mpi3mr_issue_reset(sc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
+ MPI3MR_RESET_FROM_BRINGUP);
+ if (retval) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR,
+ "%s :Failed to soft reset IOC, error 0x%d\n",
+ __func__, retval);
+ goto err_retry;
+ }
+ }
+
ioc_state = mpi3mr_get_iocstate(sc);
if (ioc_state != MRIOC_STATE_RESET) {
mpi3mr_dprint(sc, MPI3MR_ERROR, "Cannot bring IOC to reset state\n");
- goto out_failed;
+ goto err_retry;
}
retval = mpi3mr_setup_admin_qpair(sc);
if (retval) {
mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup Admin queues, error 0x%x\n",
retval);
- goto out_failed;
+ if (retval == ENOMEM)
+ goto err;
+ goto err_retry;
}
-
- retval = mpi3mr_bring_ioc_ready(sc);
+
+ retval = mpi3mr_bring_ioc_ready(sc, &start_ticks);
if (retval) {
- mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to bring IOC ready, error 0x%x\n",
- retval);
- goto out_failed;
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to bring IOC ready, error 0x%x\n", retval);
+ if (retval == EAGAIN)
+ goto err_retry;
+ goto err;
}
+
if (init_type == MPI3MR_INIT_TYPE_INIT) {
retval = mpi3mr_alloc_interrupts(sc, 1);
if (retval) {
mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, error 0x%x\n",
retval);
- goto out_failed;
+ goto err;
}
-
+
retval = mpi3mr_setup_irqs(sc);
if (retval) {
mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup ISR, error 0x%x\n",
retval);
- goto out_failed;
+ goto err;
}
}
@@ -2825,6 +2887,12 @@ int mpi3mr_initialize_ioc(struct mpi3mr_softc *sc, U8 init_type)
sc->init_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
sc->init_cmds.host_tag = MPI3MR_HOSTTAG_INITCMDS;
+ mtx_init(&sc->cfg_cmds.completion.lock, "CFG commands lock", NULL, MTX_DEF);
+ sc->cfg_cmds.reply = NULL;
+ sc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
+ sc->cfg_cmds.dev_handle = MPI3MR_INVALID_DEV_HANDLE;
+ sc->cfg_cmds.host_tag = MPI3MR_HOSTTAG_CFGCMDS;
+
mtx_init(&sc->ioctl_cmds.completion.lock, "IOCTL commands lock", NULL, MTX_DEF);
sc->ioctl_cmds.reply = NULL;
sc->ioctl_cmds.state = MPI3MR_CMD_NOTUSED;
@@ -2861,25 +2929,30 @@ int mpi3mr_initialize_ioc(struct mpi3mr_softc *sc, U8 init_type)
retval = mpi3mr_issue_iocfacts(sc, &facts_data);
if (retval) {
- mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Facts, retval: 0x%x\n",
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Facts, error: 0x%x\n",
retval);
- goto out_failed;
+ if (retval == ENOMEM)
+ goto err;
+ goto err_retry;
}
retval = mpi3mr_process_factsdata(sc, &facts_data);
if (retval) {
- mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC Facts data processing failedi, retval: 0x%x\n",
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "IOC Facts data processing failed, error: 0x%x\n",
retval);
- goto out_failed;
+ goto err_retry;
}
sc->num_io_throttle_group = sc->facts.max_io_throttle_group;
mpi3mr_atomic_set(&sc->pend_large_data_sz, 0);
-
+
if (init_type == MPI3MR_INIT_TYPE_RESET) {
retval = mpi3mr_validate_fw_update(sc);
- if (retval)
- goto out_failed;
+ if (retval) {
+ if (retval == ENOMEM)
+ goto err;
+ goto err_retry;
+ }
} else {
sc->reply_sz = sc->facts.reply_sz;
}
@@ -2888,25 +2961,27 @@ int mpi3mr_initialize_ioc(struct mpi3mr_softc *sc, U8 init_type)
retval = mpi3mr_reply_alloc(sc);
if (retval) {
- mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated reply and sense buffers, retval: 0x%x\n",
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated reply and sense buffers, error: 0x%x\n",
retval);
- goto out_failed;
+ goto err;
}
-
+
if (init_type == MPI3MR_INIT_TYPE_INIT) {
retval = mpi3mr_alloc_chain_bufs(sc);
if (retval) {
- mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated chain buffers, retval: 0x%x\n",
- retval);
- goto out_failed;
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocated chain buffers, error: 0x%x\n",
+ retval);
+ goto err;
}
}
-
+
retval = mpi3mr_issue_iocinit(sc);
if (retval) {
- mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Init, retval: 0x%x\n",
- retval);
- goto out_failed;
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to Issue IOC Init, error: 0x%x\n",
+ retval);
+ if (retval == ENOMEM)
+ goto err;
+ goto err_retry;
}
mpi3mr_print_fw_pkg_ver(sc);
@@ -2914,77 +2989,87 @@ int mpi3mr_initialize_ioc(struct mpi3mr_softc *sc, U8 init_type)
sc->reply_free_q_host_index = sc->num_reply_bufs;
mpi3mr_regwrite(sc, MPI3_SYSIF_REPLY_FREE_HOST_INDEX_OFFSET,
sc->reply_free_q_host_index);
-
+
sc->sense_buf_q_host_index = sc->num_sense_bufs;
-
+
mpi3mr_regwrite(sc, MPI3_SYSIF_SENSE_BUF_FREE_HOST_INDEX_OFFSET,
sc->sense_buf_q_host_index);
if (init_type == MPI3MR_INIT_TYPE_INIT) {
retval = mpi3mr_alloc_interrupts(sc, 0);
if (retval) {
- mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, retval: 0x%x\n",
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate interrupts, error: 0x%x\n",
retval);
- goto out_failed;
+ goto err;
}
retval = mpi3mr_setup_irqs(sc);
if (retval) {
- printf(IOCNAME "Failed to setup ISR, error: 0x%x\n",
- sc->name, retval);
- goto out_failed;
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to setup ISR, error: 0x%x\n", retval);
+ goto err;
}
mpi3mr_enable_interrupts(sc);
} else
mpi3mr_enable_interrupts(sc);
-
- retval = mpi3mr_create_op_queues(sc);
+ retval = mpi3mr_create_op_queues(sc);
if (retval) {
mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to create operational queues, error: %d\n",
retval);
- goto out_failed;
+ if (retval == ENOMEM)
+ goto err;
+ goto err_retry;
}
if (!sc->throttle_groups && sc->num_io_throttle_group) {
- mpi3mr_dprint(sc, MPI3MR_ERROR, "allocating memory for throttle groups\n");
size = sizeof(struct mpi3mr_throttle_group_info);
sc->throttle_groups = (struct mpi3mr_throttle_group_info *)
malloc(sc->num_io_throttle_group *
size, M_MPI3MR, M_NOWAIT | M_ZERO);
- if (!sc->throttle_groups)
- goto out_failed;
+ if (!sc->throttle_groups) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "throttle groups memory allocation failed\n");
+ goto err;
+ }
}
if (init_type == MPI3MR_INIT_TYPE_RESET) {
- mpi3mr_dprint(sc, MPI3MR_INFO, "Re-register events\n");
+ mpi3mr_dprint(sc, MPI3MR_XINFO, "Re-register events\n");
retval = mpi3mr_register_events(sc);
if (retval) {
- mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to re-register events, retval: 0x%x\n",
+ mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to re-register events, error: 0x%x\n",
retval);
- goto out_failed;
+ goto err_retry;
}
mpi3mr_dprint(sc, MPI3MR_INFO, "Issuing Port Enable\n");
retval = mpi3mr_issue_port_enable(sc, 0);
if (retval) {
- mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to issue port enable, retval: 0x%x\n",
+ mpi3mr_dprint(sc, MPI3MR_INFO, "Failed to issue port enable, error: 0x%x\n",
retval);
- goto out_failed;
+ goto err_retry;
}
}
retval = mpi3mr_pel_alloc(sc);
if (retval) {
- mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate memory for PEL, retval: 0x%x\n",
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to allocate memory for PEL, error: 0x%x\n",
retval);
- goto out_failed;
+ goto err;
}
-
+
+ if (mpi3mr_cfg_get_driver_pg1(sc) != 0)
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Failed to get the cfg driver page1\n");
+
return retval;
-out_failed:
+err_retry:
+ if ((retry++ < 2) && (((ticks - start_ticks) / hz) < (sc->ready_timeout - 60))) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Retrying controller initialization,"
+ "retry_count: %d\n", retry);
+ goto retry_init;
+ }
+err:
retval = -1;
return retval;
}
@@ -3053,6 +3138,115 @@ out:
return retval;
}
+static int mpi3mr_timestamp_sync(struct mpi3mr_softc *sc)
+{
+ int retval = 0;
+ struct timeval current_time;
+ int64_t time_in_msec;
+ Mpi3IoUnitControlRequest_t iou_ctrl = {0};
+
+ mtx_lock(&sc->init_cmds.completion.lock);
+ if (sc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue timestamp sync: command is in use\n");
+ mtx_unlock(&sc->init_cmds.completion.lock);
+ return -1;
+ }
+
+ sc->init_cmds.state = MPI3MR_CMD_PENDING;
+ sc->init_cmds.is_waiting = 1;
+ sc->init_cmds.callback = NULL;
+ iou_ctrl.HostTag = htole64(MPI3MR_HOSTTAG_INITCMDS);
+ iou_ctrl.Function = MPI3_FUNCTION_IO_UNIT_CONTROL;
+ iou_ctrl.Operation = MPI3_CTRL_OP_UPDATE_TIMESTAMP;
+ getmicrotime(&current_time);
+ time_in_msec = (int64_t)current_time.tv_sec * 1000 + current_time.tv_usec/1000;
+ iou_ctrl.Param64[0] = htole64(time_in_msec);
+
+ init_completion(&sc->init_cmds.completion);
+
+ retval = mpi3mr_submit_admin_cmd(sc, &iou_ctrl, sizeof(iou_ctrl));
+ if (retval) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "timestamp sync: Admin Post failed\n");
+ goto out_unlock;
+ }
+
+ wait_for_completion_timeout(&sc->init_cmds.completion,
+ (MPI3MR_INTADMCMD_TIMEOUT));
+
+ if (!(sc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue timestamp sync: command timed out\n");
+ sc->init_cmds.is_waiting = 0;
+
+ if (!(sc->init_cmds.state & MPI3MR_CMD_RESET))
+ mpi3mr_check_rh_fault_ioc(sc, MPI3MR_RESET_FROM_TSU_TIMEOUT);
+
+ retval = -1;
+ goto out_unlock;
+ }
+
+ if (((sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) != MPI3_IOCSTATUS_SUCCESS) &&
+ (sc->init_cmds.ioc_status != MPI3_IOCSTATUS_SUPERVISOR_ONLY)) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue timestamp sync: Failed IOCStatus(0x%04x) Loginfo(0x%08x)\n",
+ (sc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK), sc->init_cmds.ioc_loginfo);
+ retval = -1;
+ }
+
+out_unlock:
+ sc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mtx_unlock(&sc->init_cmds.completion.lock);
+
+ return retval;
+}
+
+void
+mpi3mr_timestamp_thread(void *arg)
+{
+ struct mpi3mr_softc *sc = (struct mpi3mr_softc *)arg;
+ U64 elapsed_time = 0;
+
+ sc->timestamp_thread_active = 1;
+ mtx_lock(&sc->reset_mutex);
+ while (1) {
+
+ if (sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ||
+ (sc->unrecoverable == 1)) {
+ mpi3mr_dprint(sc, MPI3MR_INFO,
+ "Exit due to %s from %s\n",
+ sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN ? "Shutdown" :
+ "Hardware critical error", __func__);
+ break;
+ }
+ mtx_unlock(&sc->reset_mutex);
+
+ while (sc->reset_in_progress) {
+ if (elapsed_time)
+ elapsed_time = 0;
+ if (sc->unrecoverable)
+ break;
+ pause("mpi3mr_timestamp_thread", hz / 5);
+ }
+
+ if (elapsed_time++ >= sc->ts_update_interval * 60) {
+ mpi3mr_timestamp_sync(sc);
+ elapsed_time = 0;
+ }
+
+ /*
+ * Sleep for 1 second if we're not exiting, then loop to top
+ * to poll exit status and hardware health.
+ */
+ mtx_lock(&sc->reset_mutex);
+ if (((sc->mpi3mr_flags & MPI3MR_FLAGS_SHUTDOWN) == 0) &&
+ (!sc->unrecoverable) && (!sc->reset_in_progress)) {
+ msleep(&sc->timestamp_chan, &sc->reset_mutex, PRIBIO,
+ "mpi3mr_timestamp", 1 * hz);
+ }
+ }
+ mtx_unlock(&sc->reset_mutex);
+ sc->timestamp_thread_active = 0;
+ kproc_exit(0);
+}
+
void
mpi3mr_watchdog_thread(void *arg)
{
@@ -3118,6 +3312,14 @@ mpi3mr_watchdog_thread(void *arg)
sc->unrecoverable = 1;
break;
}
+
+ if (fault == MPI3_SYSIF_FAULT_CODE_INSUFFICIENT_PCI_SLOT_POWER) {
+ mpi3mr_dprint(sc, MPI3MR_INFO,
+ "controller faulted due to insufficient power, marking controller as unrecoverable\n");
+ sc->unrecoverable = 1;
+ break;
+ }
+
if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
|| (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS)
|| (sc->reset_in_progress))
@@ -3338,6 +3540,19 @@ void mpi3mr_update_device(struct mpi3mr_softc *sc,
break;
}
+ switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) {
+ case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB:
+ tgtdev->ws_len = 256;
+ break;
+ case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_2048_LB:
+ tgtdev->ws_len = 2048;
+ break;
+ case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_NO_LIMIT:
+ default:
+ tgtdev->ws_len = 0;
+ break;
+ }
+
switch (tgtdev->dev_type) {
case MPI3_DEVICE_DEVFORM_SAS_SATA:
{
@@ -3477,6 +3692,7 @@ static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
{
U16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_DEVRMCMD_MIN;
struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
+ struct mpi3mr_target *tgtdev = NULL;
mpi3mr_dprint(sc, MPI3MR_EVENT,
"%s :dev_rmhs_iouctrl_complete:handle(0x%04x), ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -3497,6 +3713,13 @@ static void mpi3mr_dev_rmhs_complete_iou(struct mpi3mr_softc *sc,
"%s :dev removal handshake failed after all retries: handle(0x%04x)\n",
__func__, drv_cmd->dev_handle);
} else {
+ mtx_lock_spin(&sc->target_lock);
+ TAILQ_FOREACH(tgtdev, &sc->cam_sc->tgt_list, tgt_next) {
+ if (tgtdev->dev_handle == drv_cmd->dev_handle)
+ tgtdev->state = MPI3MR_DEV_REMOVE_HS_COMPLETED;
+ }
+ mtx_unlock_spin(&sc->target_lock);
+
mpi3mr_dprint(sc, MPI3MR_INFO,
"%s :dev removal handshake completed successfully: handle(0x%04x)\n",
__func__, drv_cmd->dev_handle);
@@ -3604,18 +3827,7 @@ static void mpi3mr_dev_rmhs_send_tm(struct mpi3mr_softc *sc, U16 handle,
U8 retrycount = 5;
struct mpi3mr_drvr_cmd *drv_cmd = cmdparam;
struct delayed_dev_rmhs_node *delayed_dev_rmhs = NULL;
- struct mpi3mr_target *tgtdev = NULL;
- mtx_lock_spin(&sc->target_lock);
- TAILQ_FOREACH(tgtdev, &sc->cam_sc->tgt_list, tgt_next) {
- if ((tgtdev->dev_handle == handle) &&
- (iou_rc == MPI3_CTRL_OP_REMOVE_DEVICE)) {
- tgtdev->state = MPI3MR_DEV_REMOVE_HS_STARTED;
- break;
- }
- }
- mtx_unlock_spin(&sc->target_lock);
-
if (drv_cmd)
goto issue_cmd;
do {
@@ -3890,7 +4102,7 @@ static void mpi3mr_sastopochg_evt_th(struct mpi3mr_softc *sc,
handle = le16toh(topo_evt->PhyEntry[i].AttachedDevHandle);
if (!handle)
continue;
- reason_code = topo_evt->PhyEntry[i].Status &
+ reason_code = topo_evt->PhyEntry[i].PhyStatus &
MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, handle);
switch (reason_code) {
@@ -4170,11 +4382,16 @@ static void mpi3mr_process_events(struct mpi3mr_softc *sc,
break;
}
case MPI3_EVENT_DEVICE_INFO_CHANGED:
- case MPI3_EVENT_LOG_DATA:
{
process_evt_bh = 1;
break;
}
+ case MPI3_EVENT_LOG_DATA:
+ {
+ mpi3mr_app_save_logdata(sc, (char*)event_reply->EventData,
+ le16toh(event_reply->EventDataLength) * 4);
+ break;
+ }
case MPI3_EVENT_ENERGY_PACK_CHANGE:
{
mpi3mr_energypackchg_evt_th(sc, event_reply);
@@ -4270,7 +4487,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
Mpi3SuccessReplyDescriptor_t *success_desc;
Mpi3DefaultReply_t *def_reply = NULL;
struct mpi3mr_drvr_cmd *cmdptr = NULL;
- Mpi3SCSIIOReply_t *scsi_reply;
+ Mpi3SCSIIOReply_t *scsi_reply = NULL;
U8 *sense_buf = NULL;
*reply_dma = 0;
@@ -4281,10 +4498,9 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
host_tag = status_desc->HostTag;
ioc_status = status_desc->IOCStatus;
- if (ioc_status &
- MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ if (ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
ioc_loginfo = status_desc->IOCLogInfo;
- ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ ioc_status &= MPI3_IOCSTATUS_STATUS_MASK;
break;
case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
@@ -4294,10 +4510,9 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
goto out;
host_tag = def_reply->HostTag;
ioc_status = def_reply->IOCStatus;
- if (ioc_status &
- MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ if (ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
ioc_loginfo = def_reply->IOCLogInfo;
- ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ ioc_status &= MPI3_IOCSTATUS_STATUS_MASK;
if (def_reply->Function == MPI3_FUNCTION_SCSI_IO) {
scsi_reply = (Mpi3SCSIIOReply_t *)def_reply;
sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
@@ -4315,6 +4530,9 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
case MPI3MR_HOSTTAG_INITCMDS:
cmdptr = &sc->init_cmds;
break;
+ case MPI3MR_HOSTTAG_CFGCMDS:
+ cmdptr = &sc->cfg_cmds;
+ break;
case MPI3MR_HOSTTAG_IOCTLCMDS:
cmdptr = &sc->ioctl_cmds;
break;
@@ -4372,7 +4590,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_softc *sc,
}
}
out:
- if (sense_buf != NULL)
+ if (scsi_reply != NULL && sense_buf != NULL)
mpi3mr_repost_sense_buf(sc,
scsi_reply->SenseDataBufferAddress);
return;
@@ -4391,6 +4609,7 @@ static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc)
U32 num_adm_reply = 0;
U64 reply_dma = 0;
Mpi3DefaultReplyDescriptor_t *reply_desc;
+ U16 threshold_comps = 0;
mtx_lock_spin(&sc->admin_reply_lock);
if (sc->admin_in_use == false) {
@@ -4428,6 +4647,11 @@ static int mpi3mr_complete_admin_cmd(struct mpi3mr_softc *sc)
if ((reply_desc->ReplyFlags &
MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
break;
+
+ if (++threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
+ mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, adm_reply_ci);
+ threshold_comps = 0;
+ }
} while (1);
mpi3mr_regwrite(sc, MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET, adm_reply_ci);
@@ -4492,10 +4716,9 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_softc *sc,
status_desc = (Mpi3StatusReplyDescriptor_t *)reply_desc;
host_tag = status_desc->HostTag;
ioc_status = status_desc->IOCStatus;
- if (ioc_status &
- MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ if (ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
ioc_loginfo = status_desc->IOCLogInfo;
- ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ ioc_status &= MPI3_IOCSTATUS_STATUS_MASK;
break;
case MPI3_REPLY_DESCRIPT_FLAGS_TYPE_ADDRESS_REPLY:
addr_desc = (Mpi3AddressReplyDescriptor_t *)reply_desc;
@@ -4519,10 +4742,9 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_softc *sc,
resp_data = scsi_reply->ResponseData;
sense_buf = mpi3mr_get_sensebuf_virt_addr(sc,
scsi_reply->SenseDataBufferAddress);
- if (ioc_status &
- MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_LOGINFOAVAIL)
+ if (ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
ioc_loginfo = scsi_reply->IOCLogInfo;
- ioc_status &= MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK;
+ ioc_status &= MPI3_IOCSTATUS_STATUS_MASK;
if (sense_state == MPI3_SCSI_STATE_SENSE_BUFF_Q_EMPTY)
mpi3mr_dprint(sc, MPI3MR_ERROR, "Ran out of sense buffers\n");
@@ -4724,7 +4946,7 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_softc *sc,
csio->resid = cm->length - le32toh(xfer_count);
case MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR:
case MPI3_IOCSTATUS_SUCCESS:
- if ((scsi_reply->IOCStatus & MPI3_REPLY_DESCRIPT_STATUS_IOCSTATUS_STATUS_MASK) ==
+ if ((scsi_reply->IOCStatus & MPI3_IOCSTATUS_STATUS_MASK) ==
MPI3_IOCSTATUS_SCSI_RECOVERED_ERROR)
mpi3mr_dprint(sc, MPI3MR_XINFO, "func: %s line: %d recovered error\n", __func__, __LINE__);
@@ -4840,7 +5062,7 @@ int mpi3mr_complete_io_cmd(struct mpi3mr_softc *sc,
U32 num_op_replies = 0;
U64 reply_dma = 0;
Mpi3DefaultReplyDescriptor_t *reply_desc;
- U16 req_qid = 0;
+ U16 req_qid = 0, threshold_comps = 0;
mtx_lock_spin(&op_reply_q->q_lock);
if (op_reply_q->in_use == false) {
@@ -4885,6 +5107,12 @@ int mpi3mr_complete_io_cmd(struct mpi3mr_softc *sc,
if ((reply_desc->ReplyFlags &
MPI3_REPLY_DESCRIPT_FLAGS_PHASE_MASK) != exp_phase)
break;
+
+ if (++threshold_comps == MPI3MR_THRESHOLD_REPLY_COUNT) {
+ mpi3mr_regwrite(sc, MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(op_reply_q->qid), reply_ci);
+ threshold_comps = 0;
+ }
+
} while (1);
@@ -4940,7 +5168,7 @@ mpi3mr_alloc_requests(struct mpi3mr_softc *sc)
struct mpi3mr_cmd *cmd;
int i, j, nsegs, ret;
- nsegs = MPI3MR_SG_DEPTH;
+ nsegs = sc->max_sgl_entries;
ret = bus_dma_tag_create( sc->mpi3mr_parent_dmat, /* parent */
1, 0, /* algnmnt, boundary */
sc->dma_loaddr, /* lowaddr */
@@ -5210,6 +5438,184 @@ out_failed:
mpi3mr_free_ioctl_dma_memory(sc);
}
+static void inline
+mpi3mr_free_dma_mem(struct mpi3mr_softc *sc,
+ struct dma_memory_desc *mem_desc)
+{
+ if (mem_desc->dma_addr)
+ bus_dmamap_unload(mem_desc->tag, mem_desc->dmamap);
+
+ if (mem_desc->addr != NULL) {
+ bus_dmamem_free(mem_desc->tag, mem_desc->addr, mem_desc->dmamap);
+ mem_desc->addr = NULL;
+ }
+
+ if (mem_desc->tag != NULL)
+ bus_dma_tag_destroy(mem_desc->tag);
+}
+
+static int
+mpi3mr_alloc_dma_mem(struct mpi3mr_softc *sc,
+ struct dma_memory_desc *mem_desc)
+{
+ int retval;
+
+ if (bus_dma_tag_create(sc->mpi3mr_parent_dmat, /* parent */
+ 4, 0, /* algnmnt, boundary */
+ sc->dma_loaddr, /* lowaddr */
+ sc->dma_hiaddr, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ mem_desc->size, /* maxsize */
+ 1, /* nsegments */
+ mem_desc->size, /* maxsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &mem_desc->tag)) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate DMA tag\n", __func__);
+ return ENOMEM;
+ }
+
+ if (bus_dmamem_alloc(mem_desc->tag, (void **)&mem_desc->addr,
+ BUS_DMA_NOWAIT, &mem_desc->dmamap)) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot allocate DMA memory\n", __func__);
+ retval = ENOMEM;
+ goto out;
+ }
+
+ bzero(mem_desc->addr, mem_desc->size);
+
+ bus_dmamap_load(mem_desc->tag, mem_desc->dmamap, mem_desc->addr, mem_desc->size,
+ mpi3mr_memaddr_cb, &mem_desc->dma_addr, BUS_DMA_NOWAIT);
+
+ if (!mem_desc->addr) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Cannot load DMA map\n", __func__);
+ retval = ENOMEM;
+ goto out;
+ }
+ return 0;
+out:
+ mpi3mr_free_dma_mem(sc, mem_desc);
+ return retval;
+}
+
+static int
+mpi3mr_post_cfg_req(struct mpi3mr_softc *sc, Mpi3ConfigRequest_t *cfg_req)
+{
+ int retval;
+
+ mtx_lock(&sc->cfg_cmds.completion.lock);
+ if (sc->cfg_cmds.state & MPI3MR_CMD_PENDING) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue cfg request: cfg command is in use\n");
+ mtx_unlock(&sc->cfg_cmds.completion.lock);
+ return -1;
+ }
+
+ sc->cfg_cmds.state = MPI3MR_CMD_PENDING;
+ sc->cfg_cmds.is_waiting = 1;
+ sc->cfg_cmds.callback = NULL;
+ sc->cfg_cmds.ioc_status = 0;
+ sc->cfg_cmds.ioc_loginfo = 0;
+
+ cfg_req->HostTag = htole16(MPI3MR_HOSTTAG_CFGCMDS);
+ cfg_req->Function = MPI3_FUNCTION_CONFIG;
+ cfg_req->PageType = MPI3_CONFIG_PAGETYPE_DRIVER;
+ cfg_req->PageNumber = 1;
+ cfg_req->PageAddress = 0;
+
+ init_completion(&sc->cfg_cmds.completion);
+
+ retval = mpi3mr_submit_admin_cmd(sc, cfg_req, sizeof(*cfg_req));
+ if (retval) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "Issue cfg request: Admin Post failed\n");
+ goto out;
+ }
+
+ wait_for_completion_timeout(&sc->cfg_cmds.completion,
+ (MPI3MR_INTADMCMD_TIMEOUT));
+
+ if (!(sc->cfg_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ if (!(sc->cfg_cmds.state & MPI3MR_CMD_RESET)) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "config request command timed out\n");
+ mpi3mr_check_rh_fault_ioc(sc, MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT);
+ }
+ retval = -1;
+ sc->cfg_cmds.is_waiting = 0;
+ goto out;
+ }
+
+ if ((sc->cfg_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK) !=
+ MPI3_IOCSTATUS_SUCCESS ) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "config request failed, IOCStatus(0x%04x) "
+ " Loginfo(0x%08x) \n",(sc->cfg_cmds.ioc_status &
+ MPI3_IOCSTATUS_STATUS_MASK), sc->cfg_cmds.ioc_loginfo);
+ retval = -1;
+ }
+
+out:
+ sc->cfg_cmds.state = MPI3MR_CMD_NOTUSED;
+ mtx_unlock(&sc->cfg_cmds.completion.lock);
+ return retval;
+}
+
+static int mpi3mr_process_cfg_req(struct mpi3mr_softc *sc,
+ Mpi3ConfigRequest_t *cfg_req,
+ Mpi3ConfigPageHeader_t *cfg_hdr,
+ void *cfg_buf, U32 cfg_buf_sz)
+{
+ int retval;
+ struct dma_memory_desc mem_desc = {0};
+
+ if (cfg_req->Action == MPI3_CONFIG_ACTION_PAGE_HEADER)
+ mem_desc.size = sizeof(Mpi3ConfigPageHeader_t);
+ else {
+ mem_desc.size = le16toh(cfg_hdr->PageLength) * 4;
+ cfg_req->PageLength = cfg_hdr->PageLength;
+ cfg_req->PageVersion = cfg_hdr->PageVersion;
+ }
+
+ retval = mpi3mr_alloc_dma_mem(sc, &mem_desc);
+ if (retval) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Failed to allocate DMA memory\n", __func__);
+ return retval;
+ }
+
+ mpi3mr_add_sg_single(&cfg_req->SGL, MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST,
+ mem_desc.size, mem_desc.dma_addr);
+
+ retval = mpi3mr_post_cfg_req(sc, cfg_req);
+ if (retval)
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "%s: Failed to post config request\n", __func__);
+ else
+ memcpy(cfg_buf, mem_desc.addr, min(mem_desc.size, cfg_buf_sz));
+
+ mpi3mr_free_dma_mem(sc, &mem_desc);
+ return retval;
+}
+
+int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_softc *sc)
+{
+ int retval;
+ Mpi3DriverPage1_t driver_pg1 = {0};
+ Mpi3ConfigPageHeader_t cfg_hdr = {0};
+ Mpi3ConfigRequest_t cfg_req = {0};
+
+ cfg_req.Action = MPI3_CONFIG_ACTION_PAGE_HEADER;
+ retval = mpi3mr_process_cfg_req(sc, &cfg_req, NULL, &cfg_hdr, sizeof(cfg_hdr));
+ if (retval)
+ goto error;
+
+ cfg_req.Action = MPI3_CONFIG_ACTION_READ_CURRENT;
+ retval = mpi3mr_process_cfg_req(sc, &cfg_req, &cfg_hdr, &driver_pg1, sizeof(driver_pg1));
+
+error:
+ if (!retval && driver_pg1.TimeStampUpdate)
+ sc->ts_update_interval = driver_pg1.TimeStampUpdate;
+ else
+ sc->ts_update_interval = MPI3MR_TSUPDATE_INTERVAL;
+
+ return retval;
+}
+
void
mpi3mr_destory_mtx(struct mpi3mr_softc *sc)
{
@@ -5241,6 +5647,9 @@ mpi3mr_destory_mtx(struct mpi3mr_softc *sc)
if (mtx_initialized(&sc->init_cmds.completion.lock))
mtx_destroy(&sc->init_cmds.completion.lock);
+ if (mtx_initialized(&sc->cfg_cmds.completion.lock))
+ mtx_destroy(&sc->cfg_cmds.completion.lock);
+
if (mtx_initialized(&sc->ioctl_cmds.completion.lock))
mtx_destroy(&sc->ioctl_cmds.completion.lock);
@@ -5419,6 +5828,11 @@ mpi3mr_free_mem(struct mpi3mr_softc *sc)
sc->init_cmds.reply = NULL;
}
+ if (sc->cfg_cmds.reply) {
+ free(sc->cfg_cmds.reply, M_MPI3MR);
+ sc->cfg_cmds.reply = NULL;
+ }
+
if (sc->ioctl_cmds.reply) {
free(sc->ioctl_cmds.reply, M_MPI3MR);
sc->ioctl_cmds.reply = NULL;
@@ -5536,6 +5950,9 @@ static void mpi3mr_flush_drv_cmds(struct mpi3mr_softc *sc)
cmdptr = &sc->init_cmds;
mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
+ cmdptr = &sc->cfg_cmds;
+ mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
+
cmdptr = &sc->ioctl_cmds;
mpi3mr_drv_cmd_comp_reset(sc, cmdptr);
@@ -5579,6 +5996,7 @@ static void mpi3mr_memset_buffers(struct mpi3mr_softc *sc)
memset(sc->admin_reply, 0, sc->admin_reply_q_sz);
memset(sc->init_cmds.reply, 0, sc->reply_sz);
+ memset(sc->cfg_cmds.reply, 0, sc->reply_sz);
memset(sc->ioctl_cmds.reply, 0, sc->reply_sz);
memset(sc->host_tm_cmds.reply, 0, sc->reply_sz);
memset(sc->pel_cmds.reply, 0, sc->reply_sz);
@@ -5642,6 +6060,7 @@ static void mpi3mr_invalidate_devhandles(struct mpi3mr_softc *sc)
target->io_throttle_enabled = 0;
target->io_divert = 0;
target->throttle_group = NULL;
+ target->ws_len = 0;
}
}
mtx_unlock_spin(&sc->target_lock);
@@ -5668,6 +6087,8 @@ static void mpi3mr_rfresh_tgtdevs(struct mpi3mr_softc *sc)
if (target->exposed_to_os)
mpi3mr_remove_device_from_os(sc, target->dev_handle);
mpi3mr_remove_device_from_list(sc, target, true);
+ } else if (target->is_hidden && target->exposed_to_os) {
+ mpi3mr_remove_device_from_os(sc, target->dev_handle);
}
}
@@ -5693,6 +6114,8 @@ static void mpi3mr_flush_io(struct mpi3mr_softc *sc)
if (cmd->callout_owner) {
ccb = (union ccb *)(cmd->ccb);
ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
+ mpi3mr_atomic_dec(&sc->fw_outstanding);
+ mpi3mr_atomic_dec(&cmd->targ->outstanding);
mpi3mr_cmd_done(sc, cmd);
} else {
cmd->ccb = NULL;
@@ -5701,23 +6124,6 @@ static void mpi3mr_flush_io(struct mpi3mr_softc *sc)
}
}
}
-/**
- * mpi3mr_clear_reset_history - Clear reset history
- * @sc: Adapter instance reference
- *
- * Write the reset history bit in IOC Status to clear the bit,
- * if it is already set.
- *
- * Return: Nothing.
- */
-static inline void mpi3mr_clear_reset_history(struct mpi3mr_softc *sc)
-{
- U32 ioc_status;
-
- ioc_status = mpi3mr_regread(sc, MPI3_SYSIF_IOC_STATUS_OFFSET);
- if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
- mpi3mr_regwrite(sc, MPI3_SYSIF_IOC_STATUS_OFFSET, ioc_status);
-}
/**
* mpi3mr_set_diagsave - Set diag save bit for snapdump
@@ -5752,11 +6158,11 @@ static inline void mpi3mr_set_diagsave(struct mpi3mr_softc *sc)
* Return: 0 on success, non-zero on failure.
*/
static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
- U32 reset_reason)
+ U16 reset_reason)
{
int retval = -1;
U8 unlock_retry_count = 0;
- U32 host_diagnostic, ioc_status, ioc_config;
+ U32 host_diagnostic = 0, ioc_status, ioc_config, scratch_pad0;
U32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
@@ -5810,7 +6216,14 @@ static int mpi3mr_issue_reset(struct mpi3mr_softc *sc, U16 reset_type,
unlock_retry_count, host_diagnostic);
} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
- mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, reset_reason);
+ if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT)
+ mpi3mr_set_diagsave(sc);
+
+ scratch_pad0 = ((MPI3MR_RESET_REASON_OSTYPE_FREEBSD <<
+ MPI3MR_RESET_REASON_OSTYPE_SHIFT) |
+ (sc->facts.ioc_num <<
+ MPI3MR_RESET_REASON_IOCNUM_SHIFT) | reset_reason);
+ mpi3mr_regwrite(sc, MPI3_SYSIF_SCRATCHPAD0_OFFSET, scratch_pad0);
mpi3mr_regwrite(sc, MPI3_SYSIF_HOST_DIAG_OFFSET, host_diagnostic | reset_type);
if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
@@ -5889,7 +6302,7 @@ inline void mpi3mr_cleanup_event_taskq(struct mpi3mr_softc *sc)
* Return: 0 on success, non-zero on failure.
*/
int mpi3mr_soft_reset_handler(struct mpi3mr_softc *sc,
- U32 reset_reason, bool snapdump)
+ U16 reset_reason, bool snapdump)
{
int retval = 0, i = 0;
enum mpi3mr_iocstate ioc_state;
@@ -5929,6 +6342,9 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_softc *sc,
sc->reset_in_progress = 1;
sc->block_ioctls = 1;
+ if (sc->timestamp_thread_active)
+ wakeup(&sc->timestamp_chan);
+
while (mpi3mr_atomic_read(&sc->pend_ioctls) && (i < PEND_IOCTLS_COMP_WAIT_TIME)) {
ioc_state = mpi3mr_get_iocstate(sc);
if (ioc_state == MRIOC_STATE_FAULT)
@@ -5996,10 +6412,14 @@ out:
mpi3mr_app_send_aen(sc);
}
} else {
- mpi3mr_issue_reset(sc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+ ioc_state = mpi3mr_get_iocstate(sc);
+ if (ioc_state != MRIOC_STATE_FAULT)
+ mpi3mr_issue_reset(sc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
+
sc->unrecoverable = 1;
sc->reset_in_progress = 0;
+ sc->block_ioctls = 0;
}
mpi3mr_dprint(sc, MPI3MR_INFO, "Soft Reset: %s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
diff --git a/sys/dev/mpi3mr/mpi3mr.h b/sys/dev/mpi3mr/mpi3mr.h
index d93c53b286cb..e2f2bfc47fbf 100644
--- a/sys/dev/mpi3mr/mpi3mr.h
+++ b/sys/dev/mpi3mr/mpi3mr.h
@@ -1,7 +1,7 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2020-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2020-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Authors: Sumit Saxena <sumit.saxena@broadcom.com>
@@ -87,18 +87,19 @@
#include <sys/kthread.h>
#include "mpi/mpi30_api.h"
-#define MPI3MR_DRIVER_VERSION "8.6.0.2.0"
-#define MPI3MR_DRIVER_RELDATE "17th May 2023"
+#define MPI3MR_DRIVER_VERSION "8.14.0.2.0"
+#define MPI3MR_DRIVER_RELDATE "9th Apr 2025"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_NAME_LENGTH 32
#define IOCNAME "%s: "
+#define MPI3MR_DEFAULT_MAX_IO_SIZE (1 * 1024 * 1024)
+
#define SAS4116_CHIP_REV_A0 0
#define SAS4116_CHIP_REV_B0 1
-#define MPI3MR_SG_DEPTH (MPI3MR_4K_PGSZ/sizeof(Mpi3SGESimple_t))
#define MPI3MR_MAX_SECTORS 2048
#define MPI3MR_MAX_CMDS_LUN 7
#define MPI3MR_MAX_CDB_LENGTH 16
@@ -109,7 +110,12 @@
#define MPI3MR_RAID_QDEPTH 128
#define MPI3MR_NVME_QDEPTH 128
+/* Definitions for internal SGL and Chain SGL buffers */
#define MPI3MR_4K_PGSZ 4096
+#define MPI3MR_PAGE_SIZE_4K 4096
+#define MPI3MR_DEFAULT_SGL_ENTRIES 256
+#define MPI3MR_MAX_SGL_ENTRIES 2048
+
#define MPI3MR_AREQQ_SIZE (2 * MPI3MR_4K_PGSZ)
#define MPI3MR_AREPQ_SIZE (4 * MPI3MR_4K_PGSZ)
#define MPI3MR_AREQ_FRAME_SZ 128
@@ -123,7 +129,7 @@
#define MPI3MR_OP_REP_Q_QD 1024
#define MPI3MR_OP_REP_Q_QD_A0 4096
-#define MPI3MR_CHAINSGE_SIZE MPI3MR_4K_PGSZ
+#define MPI3MR_THRESHOLD_REPLY_COUNT 100
#define MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST \
(MPI3_SGE_FLAGS_ELEMENT_TYPE_SIMPLE | MPI3_SGE_FLAGS_DLAS_SYSTEM | \
@@ -135,10 +141,14 @@
#define MPI3MR_HOSTTAG_PELABORT 3
#define MPI3MR_HOSTTAG_PELWAIT 4
#define MPI3MR_HOSTTAG_TMS 5
+#define MPI3MR_HOSTTAG_CFGCMDS 6
#define MAX_MGMT_ADAPTERS 8
#define MPI3MR_WAIT_BEFORE_CTRL_RESET 5
+#define MPI3MR_RESET_REASON_OSTYPE_FREEBSD 0x4
+#define MPI3MR_RESET_REASON_OSTYPE_SHIFT 28
+#define MPI3MR_RESET_REASON_IOCNUM_SHIFT 20
struct mpi3mr_mgmt_info {
uint16_t count;
@@ -154,7 +164,7 @@ extern char fmt_os_ver[16];
raw_os_ver[3], raw_os_ver[4], raw_os_ver[5],\
raw_os_ver[6]);
#define MPI3MR_NUM_DEVRMCMD 1
-#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_TMS + 1)
+#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_CFGCMDS + 1)
#define MPI3MR_HOSTTAG_DEVRMCMD_MAX (MPI3MR_HOSTTAG_DEVRMCMD_MIN + \
MPI3MR_NUM_DEVRMCMD - 1)
#define MPI3MR_INTERNALCMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX
@@ -226,6 +236,10 @@ extern char fmt_os_ver[16];
#define MPI3MR_PERIODIC_DELAY 1 /* 1 second heartbeat/watchdog check */
+#define WRITE_SAME_32 0x0d
+
+#define MPI3MR_TSUPDATE_INTERVAL 900
+
struct completion {
unsigned int done;
struct mtx lock;
@@ -302,6 +316,7 @@ enum mpi3mr_reset_reason {
MPI3MR_RESET_FROM_SCSIIO_TIMEOUT = 26,
MPI3MR_RESET_FROM_FIRMWARE = 27,
MPI3MR_DEFAULT_RESET_REASON = 28,
+ MPI3MR_RESET_FROM_CFG_REQ_TIMEOUT = 29,
MPI3MR_RESET_REASON_COUNT,
};
@@ -328,6 +343,7 @@ struct mpi3mr_ioc_facts
U16 max_perids;
U16 max_pds;
U16 max_sasexpanders;
+ U32 max_data_length;
U16 max_sasinitiators;
U16 max_enclosures;
U16 max_pcieswitches;
@@ -446,8 +462,7 @@ enum mpi3mr_cmd_state {
enum mpi3mr_target_state {
MPI3MR_DEV_CREATED = 1,
- MPI3MR_DEV_REMOVE_HS_STARTED = 2,
- MPI3MR_DEV_DELETED = 3,
+ MPI3MR_DEV_REMOVE_HS_COMPLETED = 2,
};
struct mpi3mr_cmd {
@@ -544,6 +559,7 @@ struct mpi3mr_softc {
char driver_name[MPI3MR_NAME_LENGTH];
int bars;
bus_addr_t dma_loaddr;
+ bus_addr_t dma_hiaddr;
u_int mpi3mr_debug;
struct mpi3mr_reset reset;
int max_msix_vectors;
@@ -665,6 +681,7 @@ struct mpi3mr_softc {
struct mtx target_lock;
U16 max_host_ios;
+ U32 max_sgl_entries;
bus_dma_tag_t chain_sgl_list_tag;
struct mpi3mr_chain *chain_sgl_list;
U16 chain_bitmap_sz;
@@ -676,6 +693,7 @@ struct mpi3mr_softc {
struct mpi3mr_drvr_cmd host_tm_cmds;
struct mpi3mr_drvr_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD];
struct mpi3mr_drvr_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD];
+ struct mpi3mr_drvr_cmd cfg_cmds;
U16 devrem_bitmap_sz;
void *devrem_bitmap;
@@ -753,6 +771,10 @@ struct mpi3mr_softc {
struct dma_memory_desc ioctl_chain_sge;
struct dma_memory_desc ioctl_resp_sge;
bool ioctl_sges_allocated;
+ struct proc *timestamp_thread_proc;
+ void *timestamp_chan;
+ u_int8_t timestamp_thread_active;
+ U32 ts_update_interval;
};
static __inline uint64_t
@@ -960,11 +982,12 @@ void mpi3mr_cleanup_event_taskq(struct mpi3mr_softc *sc);
void
mpi3mr_hexdump(void *buf, int sz, int format);
int mpi3mr_soft_reset_handler(struct mpi3mr_softc *sc,
- U32 reset_reason, bool snapdump);
+ U16 reset_reason, bool snapdump);
void
mpi3mrsas_release_simq_reinit(struct mpi3mr_cam_softc *cam_sc);
void
mpi3mr_watchdog_thread(void *arg);
+void mpi3mr_timestamp_thread(void *arg);
void mpi3mr_add_device(struct mpi3mr_softc *sc, U16 per_id);
int mpi3mr_remove_device(struct mpi3mr_softc *sc, U16 handle);
int
@@ -982,6 +1005,7 @@ void mpi3mr_set_io_divert_for_all_vd_in_tg(struct mpi3mr_softc *sc,
enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_softc *sc);
void mpi3mr_poll_pend_io_completions(struct mpi3mr_softc *sc);
void int_to_lun(unsigned int lun, U8 *req_lun);
-void trigger_reset_from_watchdog(struct mpi3mr_softc *sc, U8 reset_type, U32 reset_reason);
+void trigger_reset_from_watchdog(struct mpi3mr_softc *sc, U8 reset_type, U16 reset_reason);
void mpi3mr_alloc_ioctl_dma_memory(struct mpi3mr_softc *sc);
+int mpi3mr_cfg_get_driver_pg1(struct mpi3mr_softc *sc);
#endif /*MPI3MR_H_INCLUDED*/
diff --git a/sys/dev/mpi3mr/mpi3mr_app.c b/sys/dev/mpi3mr/mpi3mr_app.c
index 7bd926269018..7e439bf7ed72 100644
--- a/sys/dev/mpi3mr/mpi3mr_app.c
+++ b/sys/dev/mpi3mr/mpi3mr_app.c
@@ -1,7 +1,7 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2020-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2020-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Authors: Sumit Saxena <sumit.saxena@broadcom.com>
@@ -797,6 +797,8 @@ mpi3mr_app_mptcmds(struct cdev *dev, u_long cmd, void *uarg,
struct mpi3mr_ioctl_mpt_dma_buffer *dma_buffers = NULL, *dma_buff = NULL;
struct mpi3mr_ioctl_mpirepbuf *mpirepbuf = NULL;
struct mpi3mr_ioctl_mptcmd *karg = (struct mpi3mr_ioctl_mptcmd *)uarg;
+ struct mpi3mr_target *tgtdev = NULL;
+ Mpi3SCSITaskMgmtRequest_t *tm_req = NULL;
sc = mpi3mr_app_get_adp_instance(karg->mrioc_id);
@@ -1060,6 +1062,18 @@ mpi3mr_app_mptcmds(struct cdev *dev, u_long cmd, void *uarg,
}
}
+ if (mpi_header->Function == MPI3_FUNCTION_SCSI_TASK_MGMT) {
+ tm_req = (Mpi3SCSITaskMgmtRequest_t *)mpi_request;
+ if (tm_req->TaskType != MPI3_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
+ tgtdev = mpi3mr_find_target_by_dev_handle(sc->cam_sc, tm_req->DevHandle);
+ if (!tgtdev) {
+ rval = ENODEV;
+ goto out;
+ }
+ mpi3mr_atomic_inc(&tgtdev->block_io);
+ }
+ }
+
sc->ioctl_cmds.state = MPI3MR_CMD_PENDING;
sc->ioctl_cmds.is_waiting = 1;
sc->ioctl_cmds.callback = NULL;
@@ -1178,6 +1192,9 @@ mpi3mr_app_mptcmds(struct cdev *dev, u_long cmd, void *uarg,
sc->mpi3mr_aen_triggered = 0;
out_failed:
+ if (tgtdev)
+ mpi3mr_atomic_dec(&tgtdev->block_io);
+
sc->ioctl_cmds.is_senseprst = 0;
sc->ioctl_cmds.sensebuf = NULL;
sc->ioctl_cmds.state = MPI3MR_CMD_NOTUSED;
@@ -1641,6 +1658,18 @@ mpi3mr_pel_enable(struct mpi3mr_softc *sc,
struct mpi3mr_ioctl_pel_enable pel_enable;
mpi3mr_dprint(sc, MPI3MR_TRACE, "%s() line: %d\n", __func__, __LINE__);
+ if (sc->unrecoverable) {
+ device_printf(sc->mpi3mr_dev, "Issue IOCTL: controller is in unrecoverable state\n");
+ return EFAULT;
+ }
+ if (sc->reset_in_progress) {
+ device_printf(sc->mpi3mr_dev, "Issue IOCTL: reset in progress\n");
+ return EAGAIN;
+ }
+ if (sc->block_ioctls) {
+ device_printf(sc->mpi3mr_dev, "Issue IOCTL: IOCTLs are blocked\n");
+ return EAGAIN;
+ }
if ((data_out_sz != sizeof(pel_enable) ||
(pel_enable.pel_class > MPI3_PEL_CLASS_FAULT))) {
@@ -2067,7 +2096,7 @@ mpi3mr_get_adpinfo(struct mpi3mr_softc *sc,
adpinfo.pci_dev_hw_rev = pci_read_config(sc->mpi3mr_dev, PCIR_REVID, 1);
adpinfo.pci_subsys_dev_id = pci_get_subdevice(sc->mpi3mr_dev);
adpinfo.pci_subsys_ven_id = pci_get_subvendor(sc->mpi3mr_dev);
- adpinfo.pci_bus = pci_get_bus(sc->mpi3mr_dev);;
+ adpinfo.pci_bus = pci_get_bus(sc->mpi3mr_dev);
adpinfo.pci_dev = pci_get_slot(sc->mpi3mr_dev);
adpinfo.pci_func = pci_get_function(sc->mpi3mr_dev);
adpinfo.pci_seg_id = pci_get_domain(sc->mpi3mr_dev);
diff --git a/sys/dev/mpi3mr/mpi3mr_app.h b/sys/dev/mpi3mr/mpi3mr_app.h
index 733aeb0ae53d..a02c83ad32fe 100644
--- a/sys/dev/mpi3mr/mpi3mr_app.h
+++ b/sys/dev/mpi3mr/mpi3mr_app.h
@@ -1,7 +1,7 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2020-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2020-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Authors: Sumit Saxena <sumit.saxena@broadcom.com>
diff --git a/sys/dev/mpi3mr/mpi3mr_cam.c b/sys/dev/mpi3mr/mpi3mr_cam.c
index b842e2a05bda..a5120e2788db 100644
--- a/sys/dev/mpi3mr/mpi3mr_cam.c
+++ b/sys/dev/mpi3mr/mpi3mr_cam.c
@@ -1,7 +1,7 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2020-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2020-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Authors: Sumit Saxena <sumit.saxena@broadcom.com>
@@ -58,13 +58,12 @@
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/sbuf.h>
+#include <sys/stdarg.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
-#include <machine/stdarg.h>
-
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_debug.h>
@@ -82,6 +81,7 @@
#include "mpi3mr.h"
#include <sys/time.h> /* XXX for pcpu.h */
#include <sys/pcpu.h> /* XXX for PCPU_GET */
+#include <asm/unaligned.h>
#define smp_processor_id() PCPU_GET(cpuid)
@@ -101,6 +101,37 @@ extern void mpi3mr_add_sg_single(void *paddr, U8 flags, U32 length,
static U32 event_count;
+static
+inline void mpi3mr_divert_ws(Mpi3SCSIIORequest_t *req,
+ struct ccb_scsiio *csio,
+ U16 ws_len)
+{
+ U8 unmap = 0, ndob = 0;
+ U32 num_blocks = 0;
+ U8 opcode = scsiio_cdb_ptr(csio)[0];
+ U16 service_action = ((scsiio_cdb_ptr(csio)[8] << 8) | scsiio_cdb_ptr(csio)[9]);
+
+
+ if (opcode == WRITE_SAME_16 ||
+ (opcode == VARIABLE_LEN_CDB &&
+ service_action == WRITE_SAME_32)) {
+
+ int unmap_ndob_index = (opcode == WRITE_SAME_16) ? 1 : 10;
+
+ unmap = scsiio_cdb_ptr(csio)[unmap_ndob_index] & 0x08;
+ ndob = scsiio_cdb_ptr(csio)[unmap_ndob_index] & 0x01;
+ num_blocks = get_unaligned_be32(scsiio_cdb_ptr(csio) +
+ ((opcode == WRITE_SAME_16) ? 10 : 28));
+
+ /* Check conditions for diversion to firmware */
+ if (unmap && ndob && num_blocks > ws_len) {
+ req->MsgFlags |= MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE;
+ req->Flags = htole32(le32toh(req->Flags) |
+ MPI3_SCSIIO_FLAGS_DIVERT_REASON_WRITE_SAME_TOO_LARGE);
+ }
+ }
+}
+
static void mpi3mr_prepare_sgls(void *arg,
bus_dma_segment_t *segs, int nsegs, int error)
{
@@ -144,7 +175,7 @@ static void mpi3mr_prepare_sgls(void *arg,
bus_dmamap_sync(sc->buffer_dmat, cm->dmamap,
BUS_DMASYNC_PREWRITE);
- KASSERT(nsegs <= MPI3MR_SG_DEPTH && nsegs > 0,
+ KASSERT(nsegs <= sc->max_sgl_entries && nsegs > 0,
("%s: bad SGE count: %d\n", device_get_nameunit(sc->mpi3mr_dev), nsegs));
KASSERT(scsiio_req->DataLength != 0,
("%s: Data segments (%d), but DataLength == 0\n",
@@ -186,7 +217,7 @@ static void mpi3mr_prepare_sgls(void *arg,
chain = chain_req->buf;
chain_dma = chain_req->buf_phys;
- memset(chain_req->buf, 0, PAGE_SIZE);
+ memset(chain_req->buf, 0, sc->max_sgl_entries * sizeof(Mpi3SGESimple_t));
sges_in_segment = sges_left;
chain_length = sges_in_segment * sizeof(Mpi3SGESimple_t);
@@ -454,7 +485,7 @@ void mpi3mr_poll_pend_io_completions(struct mpi3mr_softc *sc)
}
void
-trigger_reset_from_watchdog(struct mpi3mr_softc *sc, U8 reset_type, U32 reset_reason)
+trigger_reset_from_watchdog(struct mpi3mr_softc *sc, U8 reset_type, U16 reset_reason)
{
if (sc->reset_in_progress) {
mpi3mr_dprint(sc, MPI3MR_INFO, "Another reset is in progress, no need to trigger the reset\n");
@@ -1079,6 +1110,9 @@ mpi3mr_action_scsiio(struct mpi3mr_cam_softc *cam_sc, union ccb *ccb)
break;
}
+ if (targ->ws_len)
+ mpi3mr_divert_ws(req, csio, targ->ws_len);
+
req->Flags = htole32(mpi_control);
if (csio->ccb_h.flags & CAM_CDB_POINTER)
@@ -1119,7 +1153,7 @@ mpi3mr_action_scsiio(struct mpi3mr_cam_softc *cam_sc, union ccb *ccb)
return;
case CAM_DATA_VADDR:
case CAM_DATA_BIO:
- if (csio->dxfer_len > (MPI3MR_SG_DEPTH * MPI3MR_4K_PGSZ)) {
+ if (csio->dxfer_len > (sc->max_sgl_entries * PAGE_SIZE)) {
mpi3mr_set_ccbstatus(ccb, CAM_REQ_TOO_BIG);
mpi3mr_release_command(cm);
xpt_done(ccb);
@@ -1270,8 +1304,10 @@ mpi3mr_cam_action(struct cam_sim *sim, union ccb *ccb)
{
struct mpi3mr_cam_softc *cam_sc;
struct mpi3mr_target *targ;
+ struct mpi3mr_softc *sc;
cam_sc = cam_sim_softc(sim);
+ sc = cam_sc->sc;
mpi3mr_dprint(cam_sc->sc, MPI3MR_TRACE, "ccb func_code 0x%x target id: 0x%x\n",
ccb->ccb_h.func_code, ccb->ccb_h.target_id);
@@ -1322,7 +1358,7 @@ mpi3mr_cam_action(struct cam_sim *sim, union ccb *ccb)
"PCI device target_id: %u max io size: %u\n",
ccb->ccb_h.target_id, cpi->maxio);
} else {
- cpi->maxio = PAGE_SIZE * (MPI3MR_SG_DEPTH - 1);
+ cpi->maxio = PAGE_SIZE * (sc->max_sgl_entries - 1);
}
mpi3mr_set_ccbstatus(ccb, CAM_REQ_CMP);
break;
@@ -1560,7 +1596,7 @@ mpi3mr_sastopochg_evt_debug(struct mpi3mr_softc *sc,
if (!handle)
continue;
phy_number = event_data->StartPhyNum + i;
- reason_code = event_data->PhyEntry[i].Status &
+ reason_code = event_data->PhyEntry[i].PhyStatus &
MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
switch (reason_code) {
case MPI3_EVENT_SAS_TOPO_PHY_RC_TARG_NOT_RESPONDING:
@@ -1613,7 +1649,7 @@ mpi3mr_process_sastopochg_evt(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_wo
continue;
target->link_rate = link_rate;
- reason_code = event_data->PhyEntry[i].Status &
+ reason_code = event_data->PhyEntry[i].PhyStatus &
MPI3_EVENT_SAS_TOPO_PHY_RC_MASK;
switch (reason_code) {
@@ -1638,14 +1674,6 @@ mpi3mr_process_sastopochg_evt(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_wo
return;
}
-static inline void
-mpi3mr_logdata_evt_bh(struct mpi3mr_softc *sc,
- struct mpi3mr_fw_event_work *fwevt)
-{
- mpi3mr_app_save_logdata(sc, fwevt->event_data,
- fwevt->event_data_size);
-}
-
static void
mpi3mr_pcietopochg_evt_debug(struct mpi3mr_softc *sc,
Mpi3EventDataPcieTopologyChangeList_t *event_data)
@@ -1802,9 +1830,9 @@ out:
int mpi3mr_remove_device_from_os(struct mpi3mr_softc *sc, U16 handle)
{
- U32 i = 0;
int retval = 0;
struct mpi3mr_target *target;
+ unsigned int target_outstanding;
mpi3mr_dprint(sc, MPI3MR_EVENT,
"Removing Device (dev_handle: %d)\n", handle);
@@ -1822,17 +1850,19 @@ int mpi3mr_remove_device_from_os(struct mpi3mr_softc *sc, U16 handle)
target->flags |= MPI3MRSAS_TARGET_INREMOVAL;
- while (mpi3mr_atomic_read(&target->outstanding) && (i < 30)) {
- i++;
- if (!(i % 2)) {
- mpi3mr_dprint(sc, MPI3MR_INFO,
- "[%2d]waiting for "
- "waiting for outstanding commands to complete on target: %d\n",
- i, target->per_id);
+ target_outstanding = mpi3mr_atomic_read(&target->outstanding);
+ if (target_outstanding) {
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "there are [%2d] outstanding IOs on target: %d "
+ "Poll reply queue once\n", target_outstanding, target->per_id);
+ mpi3mr_poll_pend_io_completions(sc);
+ target_outstanding = mpi3mr_atomic_read(&target->outstanding);
+ if (target_outstanding) {
+ target_outstanding = mpi3mr_atomic_read(&target->outstanding);
+ mpi3mr_dprint(sc, MPI3MR_ERROR, "[%2d] outstanding IOs present on target: %d "
+ "despite poll\n", target_outstanding, target->per_id);
}
- DELAY(1000 * 1000);
- }
-
+ }
+
if (target->exposed_to_os && !sc->reset_in_progress) {
mpi3mr_rescan_target(sc, target);
mpi3mr_dprint(sc, MPI3MR_INFO,
@@ -1848,18 +1878,16 @@ out:
void mpi3mr_remove_device_from_list(struct mpi3mr_softc *sc,
struct mpi3mr_target *target, bool must_delete)
{
+ if ((must_delete == false) &&
+ (target->state != MPI3MR_DEV_REMOVE_HS_COMPLETED))
+ return;
+
mtx_lock_spin(&sc->target_lock);
- if ((target->state == MPI3MR_DEV_REMOVE_HS_STARTED) ||
- (must_delete == true)) {
- TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next);
- target->state = MPI3MR_DEV_DELETED;
- }
+ TAILQ_REMOVE(&sc->cam_sc->tgt_list, target, tgt_next);
mtx_unlock_spin(&sc->target_lock);
- if (target->state == MPI3MR_DEV_DELETED) {
- free(target, M_MPI3MR);
- target = NULL;
- }
+ free(target, M_MPI3MR);
+ target = NULL;
return;
}
@@ -1999,11 +2027,6 @@ mpi3mr_fw_work(struct mpi3mr_softc *sc, struct mpi3mr_fw_event_work *fw_event)
mpi3mr_process_pcietopochg_evt(sc, fw_event);
break;
}
- case MPI3_EVENT_LOG_DATA:
- {
- mpi3mr_logdata_evt_bh(sc, fw_event);
- break;
- }
default:
mpi3mr_dprint(sc, MPI3MR_TRACE,"Unhandled event 0x%0X\n",
fw_event->event);
@@ -2064,12 +2087,6 @@ mpi3mr_cam_attach(struct mpi3mr_softc *sc)
mpi3mr_dprint(sc, MPI3MR_XINFO, "Starting CAM Attach\n");
cam_sc = malloc(sizeof(struct mpi3mr_cam_softc), M_MPI3MR, M_WAITOK|M_ZERO);
- if (!cam_sc) {
- mpi3mr_dprint(sc, MPI3MR_ERROR,
- "Failed to allocate memory for controller CAM instance\n");
- return (ENOMEM);
- }
-
cam_sc->maxtargets = sc->facts.max_perids + 1;
TAILQ_INIT(&cam_sc->tgt_list);
diff --git a/sys/dev/mpi3mr/mpi3mr_cam.h b/sys/dev/mpi3mr/mpi3mr_cam.h
index 4f3ce47751e9..a6c41226b2e5 100644
--- a/sys/dev/mpi3mr/mpi3mr_cam.h
+++ b/sys/dev/mpi3mr/mpi3mr_cam.h
@@ -1,7 +1,7 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2020-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2020-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Authors: Sumit Saxena <sumit.saxena@broadcom.com>
@@ -121,6 +121,7 @@ struct mpi3mr_target {
struct mpi3mr_throttle_group_info *throttle_group;
uint64_t q_depth;
enum mpi3mr_target_state state;
+ uint16_t ws_len;
};
struct mpi3mr_cam_softc {
diff --git a/sys/dev/mpi3mr/mpi3mr_pci.c b/sys/dev/mpi3mr/mpi3mr_pci.c
index 1548d577a726..b436541b26c0 100644
--- a/sys/dev/mpi3mr/mpi3mr_pci.c
+++ b/sys/dev/mpi3mr/mpi3mr_pci.c
@@ -1,7 +1,7 @@
/*
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2020-2023, Broadcom Inc. All rights reserved.
+ * Copyright (c) 2020-2025, Broadcom Inc. All rights reserved.
* Support: <fbsd-storage-driver.pdl@broadcom.com>
*
* Authors: Sumit Saxena <sumit.saxena@broadcom.com>
@@ -178,12 +178,15 @@ mpi3mr_get_tunables(struct mpi3mr_softc *sc)
sc->reset_in_progress = 0;
sc->reset.type = 0;
sc->iot_enable = 1;
+ sc->max_sgl_entries = maxphys / PAGE_SIZE;
+
/*
* Grab the global variables.
*/
TUNABLE_INT_FETCH("hw.mpi3mr.debug_level", &sc->mpi3mr_debug);
TUNABLE_INT_FETCH("hw.mpi3mr.ctrl_reset", &sc->reset.type);
TUNABLE_INT_FETCH("hw.mpi3mr.iot_enable", &sc->iot_enable);
+ TUNABLE_INT_FETCH("hw.mpi3mr.max_sgl_entries", &sc->max_sgl_entries);
/* Grab the unit-instance variables */
snprintf(tmpstr, sizeof(tmpstr), "dev.mpi3mr.%d.debug_level",
@@ -197,6 +200,10 @@ mpi3mr_get_tunables(struct mpi3mr_softc *sc)
snprintf(tmpstr, sizeof(tmpstr), "dev.mpi3mr.%d.iot_enable",
device_get_unit(sc->mpi3mr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->iot_enable);
+
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mpi3mr.%d.max_sgl_entries",
+ device_get_unit(sc->mpi3mr_dev));
+ TUNABLE_INT_FETCH(tmpstr, &sc->max_sgl_entries);
}
static struct mpi3mr_ident *
@@ -325,6 +332,13 @@ mpi3mr_ich_startup(void *arg)
mtx_unlock(&sc->mpi3mr_mtx);
+ error = mpi3mr_kproc_create(mpi3mr_timestamp_thread, sc,
+ &sc->timestamp_thread_proc, 0, 0,
+ "mpi3mr_timestamp_thread%d",
+ device_get_unit(sc->mpi3mr_dev));
+ if (error)
+ device_printf(sc->mpi3mr_dev, "Error %d starting timestamp thread\n", error);
+
error = mpi3mr_kproc_create(mpi3mr_watchdog_thread, sc,
&sc->watchdog_thread, 0, 0, "mpi3mr_watchdog%d",
device_get_unit(sc->mpi3mr_dev));
@@ -443,7 +457,16 @@ mpi3mr_pci_attach(device_t dev)
sc->mpi3mr_dev = dev;
mpi3mr_get_tunables(sc);
-
+
+ if (sc->max_sgl_entries > MPI3MR_MAX_SGL_ENTRIES)
+ sc->max_sgl_entries = MPI3MR_MAX_SGL_ENTRIES;
+ else if (sc->max_sgl_entries < MPI3MR_DEFAULT_SGL_ENTRIES)
+ sc->max_sgl_entries = MPI3MR_DEFAULT_SGL_ENTRIES;
+ else {
+ sc->max_sgl_entries /= MPI3MR_DEFAULT_SGL_ENTRIES;
+ sc->max_sgl_entries *= MPI3MR_DEFAULT_SGL_ENTRIES;
+ }
+
if ((error = mpi3mr_initialize_ioc(sc, MPI3MR_INIT_TYPE_INIT)) != 0) {
mpi3mr_dprint(sc, MPI3MR_ERROR, "FW initialization failed\n");
goto load_failed;
@@ -458,7 +481,7 @@ mpi3mr_pci_attach(device_t dev)
mpi3mr_dprint(sc, MPI3MR_ERROR, "CAM attach failed\n");
goto load_failed;
}
-
+
sc->mpi3mr_ich.ich_func = mpi3mr_ich_startup;
sc->mpi3mr_ich.ich_arg = sc;
if (config_intrhook_establish(&sc->mpi3mr_ich) != 0) {
@@ -648,10 +671,26 @@ mpi3mr_pci_detach(device_t dev)
mtx_lock(&sc->reset_mutex);
sc->mpi3mr_flags |= MPI3MR_FLAGS_SHUTDOWN;
+ if (sc->timestamp_thread_active)
+ wakeup(&sc->timestamp_chan);
+
if (sc->watchdog_thread_active)
wakeup(&sc->watchdog_chan);
mtx_unlock(&sc->reset_mutex);
+ i = 0;
+ while (sc->timestamp_thread_active && (i < 180)) {
+ i++;
+ if (!(i % 5)) {
+ mpi3mr_dprint(sc, MPI3MR_INFO,
+ "[%2d]waiting for "
+ "timestamp thread to quit reset %d\n", i,
+ sc->timestamp_thread_active);
+ }
+ pause("mpi3mr_shutdown", hz);
+ }
+
+ i = 0;
while (sc->reset_in_progress && (i < PEND_IOCTLS_COMP_WAIT_TIME)) {
i++;
if (!(i % 5)) {
diff --git a/sys/dev/mpr/mpr.c b/sys/dev/mpr/mpr.c
index d5c02f9608ca..262d6b58b705 100644
--- a/sys/dev/mpr/mpr.c
+++ b/sys/dev/mpr/mpr.c
@@ -1195,7 +1195,7 @@ mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
{
MPI2_DEFAULT_REPLY *reply;
MPI2_IOC_FACTS_REQUEST request;
- int error, req_sz, reply_sz;
+ int error, req_sz, reply_sz, retry = 0;
MPR_FUNCTRACE(sc);
mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__);
@@ -1204,13 +1204,26 @@ mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
reply_sz = sizeof(MPI2_IOC_FACTS_REPLY);
reply = (MPI2_DEFAULT_REPLY *)facts;
+ /*
+ * Retry sending the initialization sequence. Sometimes, especially with
+ * older firmware, the initialization process fails. Retrying allows the
+ * error to clear in the firmware.
+ */
bzero(&request, req_sz);
request.Function = MPI2_FUNCTION_IOC_FACTS;
- error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5);
-
- adjust_iocfacts_endianness(facts);
- mpr_dprint(sc, MPR_TRACE, "facts->IOCCapabilities 0x%x\n", facts->IOCCapabilities);
+ while (retry < 5) {
+ error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5);
+ if (error == 0)
+ break;
+ mpr_dprint(sc, MPR_FAULT, "%s failed retry %d\n", __func__, retry);
+ DELAY(1000);
+ retry++;
+ }
+ if (error == 0) {
+ adjust_iocfacts_endianness(facts);
+ mpr_dprint(sc, MPR_TRACE, "facts->IOCCapabilities 0x%x\n", facts->IOCCapabilities);
+ }
mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error);
return (error);
}
@@ -1716,6 +1729,7 @@ mpr_get_tunables(struct mpr_softc *sc)
sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD;
sc->spinup_wait_time = DEFAULT_SPINUP_WAIT;
sc->use_phynum = 1;
+ sc->encl_min_slots = 0;
sc->max_reqframes = MPR_REQ_FRAMES;
sc->max_prireqframes = MPR_PRI_REQ_FRAMES;
sc->max_replyframes = MPR_REPLY_FRAMES;
@@ -1735,6 +1749,7 @@ mpr_get_tunables(struct mpr_softc *sc)
TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu);
TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time);
TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum);
+ TUNABLE_INT_FETCH("hw.mpr.encl_min_slots", &sc->encl_min_slots);
TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes);
TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes);
TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes);
@@ -1784,6 +1799,10 @@ mpr_get_tunables(struct mpr_softc *sc)
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.encl_min_slots",
+ device_get_unit(sc->mpr_dev));
+ TUNABLE_INT_FETCH(tmpstr, &sc->encl_min_slots);
+
snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_reqframes",
device_get_unit(sc->mpr_dev));
TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes);
@@ -1938,6 +1957,10 @@ mpr_setup_sysctl(struct mpr_softc *sc)
SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD,
&sc->prp_page_alloc_fail, "PRP page allocation failures");
+
+ SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+ OID_AUTO, "encl_min_slots", CTLFLAG_RW, &sc->encl_min_slots, 0,
+ "force enclosure minimum slots");
}
static struct mpr_debug_string {
diff --git a/sys/dev/mpr/mpr_mapping.c b/sys/dev/mpr/mpr_mapping.c
index f9a9ac1c53d0..38aa4dfc7ef2 100644
--- a/sys/dev/mpr/mpr_mapping.c
+++ b/sys/dev/mpr/mpr_mapping.c
@@ -2785,6 +2785,8 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc,
* DPM, if it's being used.
*/
if (enc_idx != MPR_ENCTABLE_BAD_IDX) {
+ u16 new_num_slots;
+
et_entry = &sc->enclosure_table[enc_idx];
if (et_entry->init_complete &&
!et_entry->missing_count) {
@@ -2796,6 +2798,17 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc,
et_entry->enc_handle = le16toh(event_data->
EnclosureHandle);
et_entry->start_slot = le16toh(event_data->StartSlot);
+ new_num_slots = le16toh(event_data->NumSlots);
+ if (new_num_slots < sc->encl_min_slots) {
+ mpr_dprint(sc, MPR_MAPPING, "%s: Enclosure %d num_slots %d, overriding with %d.\n",
+ __func__, enc_idx, new_num_slots, sc->encl_min_slots);
+ new_num_slots = sc->encl_min_slots;
+ }
+ if (et_entry->num_slots != new_num_slots) {
+ mpr_dprint(sc, MPR_MAPPING, "%s: Enclosure %d old num_slots %d, new %d.\n",
+ __func__, enc_idx, et_entry->num_slots, sc->encl_min_slots);
+ et_entry->num_slots = new_num_slots;
+ }
saved_phy_bits = et_entry->phy_bits;
et_entry->phy_bits |= le32toh(event_data->PhyBits);
if (saved_phy_bits != et_entry->phy_bits)
@@ -2858,6 +2871,11 @@ mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc,
et_entry->start_index = MPR_MAPTABLE_BAD_IDX;
et_entry->dpm_entry_num = MPR_DPM_BAD_IDX;
et_entry->num_slots = le16toh(event_data->NumSlots);
+ if (et_entry->num_slots < sc->encl_min_slots) {
+ mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Enclosure %d num_slots is %d, overriding with %d.\n",
+ __func__, enc_idx, et_entry->num_slots, sc->encl_min_slots);
+ et_entry->num_slots = sc->encl_min_slots;
+ }
et_entry->start_slot = le16toh(event_data->StartSlot);
et_entry->phy_bits = le32toh(event_data->PhyBits);
}
diff --git a/sys/dev/mpr/mpr_sas.c b/sys/dev/mpr/mpr_sas.c
index 048b3cb7a53d..5f3a27a468b0 100644
--- a/sys/dev/mpr/mpr_sas.c
+++ b/sys/dev/mpr/mpr_sas.c
@@ -51,13 +51,12 @@
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/sbuf.h>
+#include <sys/stdarg.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
-#include <machine/stdarg.h>
-
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_debug.h>
diff --git a/sys/dev/mpr/mpr_sas_lsi.c b/sys/dev/mpr/mpr_sas_lsi.c
index 766473370dca..d9744b1054c2 100644
--- a/sys/dev/mpr/mpr_sas_lsi.c
+++ b/sys/dev/mpr/mpr_sas_lsi.c
@@ -51,13 +51,12 @@
#include <sys/taskqueue.h>
#include <sys/sbuf.h>
#include <sys/reboot.h>
+#include <sys/stdarg.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
-#include <machine/stdarg.h>
-
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_debug.h>
diff --git a/sys/dev/mpr/mprvar.h b/sys/dev/mpr/mprvar.h
index 0f1743f4266e..93f3fbffe079 100644
--- a/sys/dev/mpr/mprvar.h
+++ b/sys/dev/mpr/mprvar.h
@@ -366,6 +366,7 @@ struct mpr_softc {
int spinup_wait_time;
int use_phynum;
int dump_reqs_alltypes;
+ int encl_min_slots;
uint64_t chain_alloc_fail;
uint64_t prp_page_alloc_fail;
struct sysctl_ctx_list sysctl_ctx;
diff --git a/sys/dev/mps/mps.c b/sys/dev/mps/mps.c
index adad2450a3d4..9dfa0471ac0a 100644
--- a/sys/dev/mps/mps.c
+++ b/sys/dev/mps/mps.c
@@ -1127,7 +1127,7 @@ mps_get_iocfacts(struct mps_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
{
MPI2_DEFAULT_REPLY *reply;
MPI2_IOC_FACTS_REQUEST request;
- int error, req_sz, reply_sz;
+ int error, req_sz, reply_sz, retry = 0;
MPS_FUNCTRACE(sc);
mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
@@ -1136,10 +1136,21 @@ mps_get_iocfacts(struct mps_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
reply_sz = sizeof(MPI2_IOC_FACTS_REPLY);
reply = (MPI2_DEFAULT_REPLY *)facts;
+ /*
+ * Retry sending the initialization sequence. Sometimes, especially with
+ * older firmware, the initialization process fails. Retrying allows the
+ * error to clear in the firmware.
+ */
bzero(&request, req_sz);
request.Function = MPI2_FUNCTION_IOC_FACTS;
- error = mps_request_sync(sc, &request, reply, req_sz, reply_sz, 5);
- mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
+ while (retry < 5) {
+ error = mps_request_sync(sc, &request, reply, req_sz, reply_sz, 5);
+ if (error == 0)
+ break;
+ mps_dprint(sc, MPS_FAULT, "%s failed retry %d\n", __func__, retry);
+ DELAY(1000);
+ retry++;
+ }
return (error);
}
@@ -1149,7 +1160,7 @@ mps_send_iocinit(struct mps_softc *sc)
{
MPI2_IOC_INIT_REQUEST init;
MPI2_DEFAULT_REPLY reply;
- int req_sz, reply_sz, error;
+ int req_sz, reply_sz, error, retry = 0;
struct timeval now;
uint64_t time_in_msec;
@@ -1193,10 +1204,21 @@ mps_send_iocinit(struct mps_softc *sc)
time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF);
init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF);
-
- error = mps_request_sync(sc, &init, &reply, req_sz, reply_sz, 5);
- if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
- error = ENXIO;
+ /*
+ * Retry sending the initialization sequence. Sometimes, especially with
+ * older firmware, the initialization process fails. Retrying allows the
+ * error to clear in the firmware.
+ */
+ while (retry < 5) {
+ error = mps_request_sync(sc, &init, &reply, req_sz, reply_sz, 5);
+ if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
+ error = ENXIO;
+ if (error == 0)
+ break;
+ mps_dprint(sc, MPS_FAULT, "%s failed retry %d\n", __func__, retry);
+ DELAY(1000);
+ retry++;
+ }
mps_dprint(sc, MPS_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus);
mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
diff --git a/sys/dev/mps/mps_pci.c b/sys/dev/mps/mps_pci.c
index ece49435b39e..7a0c577eb72a 100644
--- a/sys/dev/mps/mps_pci.c
+++ b/sys/dev/mps/mps_pci.c
@@ -122,8 +122,8 @@ struct mps_ident {
0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2208" },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2308" },
- // Add Customer specific vender/subdevice id before generic
- // (0xffff) vender/subdevice id.
+ // Add Customer specific vendor/subdevice id before generic
+ // (0xffff) vendor/subdevice id.
{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
0x8086, 0x3516, 0, "Intel(R) Integrated RAID Module RMS25JB080" },
{ MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
diff --git a/sys/dev/mps/mps_sas.c b/sys/dev/mps/mps_sas.c
index 3cf29aa4e893..fa0f817ed67b 100644
--- a/sys/dev/mps/mps_sas.c
+++ b/sys/dev/mps/mps_sas.c
@@ -51,13 +51,12 @@
#include <sys/kthread.h>
#include <sys/taskqueue.h>
#include <sys/sbuf.h>
+#include <sys/stdarg.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
-#include <machine/stdarg.h>
-
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_xpt.h>
@@ -859,7 +858,7 @@ mps_detach_sas(struct mps_softc *sc)
if (sassc->devq != NULL)
cam_simq_free(sassc->devq);
- for(i=0; i< sassc->maxtargets ;i++) {
+ for (i = 0; i < sassc->maxtargets; i++) {
targ = &sassc->targets[i];
SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
free(lun, M_MPT2);
@@ -3397,7 +3396,7 @@ mpssas_realloc_targets(struct mps_softc *sc, int maxtargets)
* the allocated LUNs for each target and then the target buffer
* itself.
*/
- for (i=0; i< maxtargets; i++) {
+ for (i = 0; i < maxtargets; i++) {
targ = &sassc->targets[i];
SLIST_FOREACH_SAFE(lun, &targ->luns, lun_link, lun_tmp) {
free(lun, M_MPT2);
diff --git a/sys/dev/mps/mps_sas_lsi.c b/sys/dev/mps/mps_sas_lsi.c
index aeec50433e04..e2d1ef2b013a 100644
--- a/sys/dev/mps/mps_sas_lsi.c
+++ b/sys/dev/mps/mps_sas_lsi.c
@@ -52,13 +52,12 @@
#include <sys/taskqueue.h>
#include <sys/sbuf.h>
#include <sys/reboot.h>
+#include <sys/stdarg.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
-#include <machine/stdarg.h>
-
#include <cam/cam.h>
#include <cam/cam_ccb.h>
#include <cam/cam_debug.h>
diff --git a/sys/dev/mpt/mpt.c b/sys/dev/mpt/mpt.c
index 80b26a0d6280..63ddb13a9f03 100644
--- a/sys/dev/mpt/mpt.c
+++ b/sys/dev/mpt/mpt.c
@@ -486,8 +486,8 @@ mpt_default_reply_handler(struct mpt_softc *mpt, request_t *req,
{
mpt_prt(mpt,
- "Default Handler Called: req=%p:%u reply_descriptor=%x frame=%p\n",
- req, req->serno, reply_desc, reply_frame);
+ "Default Handler Called: req=%u:%u reply_descriptor=%x frame=%p\n",
+ req->index, req->serno, reply_desc, reply_frame);
if (reply_frame != NULL)
mpt_dump_reply_frame(mpt, reply_frame);
diff --git a/sys/dev/mpt/mpt_cam.c b/sys/dev/mpt/mpt_cam.c
index 490add7bc599..35efdd1474d6 100644
--- a/sys/dev/mpt/mpt_cam.c
+++ b/sys/dev/mpt/mpt_cam.c
@@ -1246,7 +1246,8 @@ mpt_timeout(void *arg)
MPT_LOCK_ASSERT(mpt);
req = ccb->ccb_h.ccb_req_ptr;
- mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
+ mpt_lprt(mpt, MPT_PRT_DEBUG,
+ "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
req->serno, ccb, req->ccb);
/* XXX: WHAT ARE WE TRYING TO DO HERE? */
if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
@@ -2571,7 +2572,8 @@ mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
ccb = req->ccb;
if (ccb == NULL) {
- mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
+ mpt_lprt(mpt, MPT_PRT_DEBUG,
+ "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
req, req->serno);
return (TRUE);
}
@@ -2628,7 +2630,8 @@ mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
TAILQ_REMOVE(&mpt->request_pending_list, req, links);
} else {
- mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
+ mpt_lprt(mpt, MPT_PRT_DEBUG,
+ "completing timedout/aborted req %p:%u\n",
req, req->serno);
TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
}
@@ -3988,7 +3991,8 @@ mpt_recover_commands(struct mpt_softc *mpt)
uint8_t response;
MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
- mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
+ mpt_lprt(mpt, MPT_PRT_DEBUG,
+ "attempting to abort req %p:%u function %x\n",
req, req->serno, hdrp->Function);
ccb = req->ccb;
if (ccb == NULL) {
@@ -4063,7 +4067,8 @@ mpt_recover_commands(struct mpt_softc *mpt)
mpt_reset(mpt, TRUE);
continue;
}
- mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
+ mpt_lprt(mpt, MPT_PRT_DEBUG,
+ "abort of req %p:%u completed\n", req, req->serno);
}
}
diff --git a/sys/dev/mpt/mpt_debug.c b/sys/dev/mpt/mpt_debug.c
index 8e8cb0cf413a..f2441c78c660 100644
--- a/sys/dev/mpt/mpt_debug.c
+++ b/sys/dev/mpt/mpt_debug.c
@@ -75,7 +75,7 @@
#include <cam/scsi/scsi_all.h>
-#include <machine/stdarg.h> /* for use by mpt_prt below */
+#include <sys/stdarg.h> /* for use by mpt_prt below */
struct Error_Map {
int Error_Code;
diff --git a/sys/dev/mpt/mpt_raid.c b/sys/dev/mpt/mpt_raid.c
index d4761c0fef67..2b868f6ef070 100644
--- a/sys/dev/mpt/mpt_raid.c
+++ b/sys/dev/mpt/mpt_raid.c
@@ -57,10 +57,9 @@
#include <sys/callout.h>
#include <sys/kthread.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
-#include <machine/stdarg.h>
-
struct mpt_raid_action_result
{
union {
@@ -831,7 +830,7 @@ mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt)
}
ioc_vol = mpt->ioc_page2->RaidVolume;
ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
- for (;ioc_vol != ioc_last_vol; ioc_vol++) {
+ for (; ioc_vol != ioc_last_vol; ioc_vol++) {
if (ioc_vol->VolumeID == tgt) {
return (1);
}
@@ -1407,7 +1406,7 @@ mpt_refresh_raid_data(struct mpt_softc *mpt)
ioc_vol = mpt->ioc_page2->RaidVolume;
ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes;
- for (;ioc_vol != ioc_last_vol; ioc_vol++) {
+ for (; ioc_vol != ioc_last_vol; ioc_vol++) {
struct mpt_raid_volume *mpt_vol;
mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber;
diff --git a/sys/dev/mrsas/mrsas_ioctl.c b/sys/dev/mrsas/mrsas_ioctl.c
index 74eacfbeb9fa..8a85544604a4 100644
--- a/sys/dev/mrsas/mrsas_ioctl.c
+++ b/sys/dev/mrsas/mrsas_ioctl.c
@@ -462,13 +462,6 @@ mrsas_user_command(struct mrsas_softc *sc, struct mfi_ioc_passthru *ioc)
kern_sge[0].length = 0;
} else {
ioctl_temp_data_mem = malloc(ioc->buf_size, M_MRSAS, M_WAITOK);
- if (ioctl_temp_data_mem == NULL) {
- device_printf(sc->mrsas_dev, "Could not allocate "
- "%d memory for temporary passthrough ioctl\n",
- ioc->buf_size);
- ret = ENOMEM;
- goto out;
- }
/* Copy in data from user space */
ret = copyin(ioc->buf, ioctl_temp_data_mem, ioc->buf_size);
@@ -483,12 +476,6 @@ mrsas_user_command(struct mrsas_softc *sc, struct mfi_ioc_passthru *ioc)
*/
passcmd = malloc(sizeof(struct mrsas_passthru_cmd), M_MRSAS,
M_WAITOK);
- if (passcmd == NULL) {
- device_printf(sc->mrsas_dev, "Could not allocate "
- "memory for temporary passthrough cb struct\n");
- ret = ENOMEM;
- goto out;
- }
passcmd->complete = 0;
passcmd->sc = sc;
passcmd->cmd = cmd;
diff --git a/sys/dev/msk/if_msk.c b/sys/dev/msk/if_msk.c
index bb00ffc7ae34..295eb2584b3e 100644
--- a/sys/dev/msk/if_msk.c
+++ b/sys/dev/msk/if_msk.c
@@ -252,6 +252,7 @@ static const char *model_name[] = {
static int mskc_probe(device_t);
static int mskc_attach(device_t);
+static void mskc_child_deleted(device_t, device_t);
static int mskc_detach(device_t);
static int mskc_shutdown(device_t);
static int mskc_setup_rambuffer(struct msk_softc *);
@@ -335,6 +336,7 @@ static device_method_t mskc_methods[] = {
DEVMETHOD(device_resume, mskc_resume),
DEVMETHOD(device_shutdown, mskc_shutdown),
+ DEVMETHOD(bus_child_deleted, mskc_child_deleted),
DEVMETHOD(bus_get_dma_tag, mskc_get_dma_tag),
DEVMETHOD_END
@@ -1564,7 +1566,6 @@ static int
msk_probe(device_t dev)
{
struct msk_softc *sc;
- char desc[100];
sc = device_get_softc(device_get_parent(dev));
/*
@@ -1573,11 +1574,10 @@ msk_probe(device_t dev)
* mskc_attach() will create a second device instance
* for us.
*/
- snprintf(desc, sizeof(desc),
+ device_set_descf(dev,
"Marvell Technology Group Ltd. %s Id 0x%02x Rev 0x%02x",
model_name[sc->msk_hw_id - CHIP_ID_YUKON_XL], sc->msk_hw_id,
sc->msk_hw_rev);
- device_set_desc_copy(dev, desc);
return (BUS_PROBE_DEFAULT);
}
@@ -1625,11 +1625,6 @@ msk_attach(device_t dev)
msk_rx_dma_jalloc(sc_if);
ifp = sc_if->msk_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(sc_if->msk_if_dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc_if);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -1943,7 +1938,7 @@ mskc_attach(device_t dev)
if ((error = mskc_setup_rambuffer(sc)) != 0)
goto fail;
- sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", -1);
+ sc->msk_devs[MSK_PORT_A] = device_add_child(dev, "msk", DEVICE_UNIT_ANY);
if (sc->msk_devs[MSK_PORT_A] == NULL) {
device_printf(dev, "failed to add child for PORT_A\n");
error = ENXIO;
@@ -1960,7 +1955,7 @@ mskc_attach(device_t dev)
device_set_ivars(sc->msk_devs[MSK_PORT_A], mmd);
if (sc->msk_num_port > 1) {
- sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", -1);
+ sc->msk_devs[MSK_PORT_B] = device_add_child(dev, "msk", DEVICE_UNIT_ANY);
if (sc->msk_devs[MSK_PORT_B] == NULL) {
device_printf(dev, "failed to add child for PORT_B\n");
error = ENXIO;
@@ -1977,11 +1972,7 @@ mskc_attach(device_t dev)
device_set_ivars(sc->msk_devs[MSK_PORT_B], mmd);
}
- error = bus_generic_attach(dev);
- if (error) {
- device_printf(dev, "failed to attach port(s)\n");
- goto fail;
- }
+ bus_attach_children(dev);
/* Hook interrupt last to avoid having to lock softc. */
error = bus_setup_intr(dev, sc->msk_irq[0], INTR_TYPE_NET |
@@ -2029,17 +2020,6 @@ msk_detach(device_t dev)
MSK_IF_LOCK(sc_if);
}
- /*
- * We're generally called from mskc_detach() which is using
- * device_delete_child() to get to here. It's already trashed
- * miibus for us, so don't do it here or we'll panic.
- *
- * if (sc_if->msk_miibus != NULL) {
- * device_delete_child(dev, sc_if->msk_miibus);
- * sc_if->msk_miibus = NULL;
- * }
- */
-
msk_rx_dma_jfree(sc_if);
msk_txrx_dma_free(sc_if);
bus_generic_detach(dev);
@@ -2053,6 +2033,12 @@ msk_detach(device_t dev)
return (0);
}
+static void
+mskc_child_deleted(device_t dev, device_t child)
+{
+ free(device_get_ivars(child), M_DEVBUF);
+}
+
static int
mskc_detach(device_t dev)
{
@@ -2061,19 +2047,7 @@ mskc_detach(device_t dev)
sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->msk_mtx), ("msk mutex not initialized"));
- if (device_is_alive(dev)) {
- if (sc->msk_devs[MSK_PORT_A] != NULL) {
- free(device_get_ivars(sc->msk_devs[MSK_PORT_A]),
- M_DEVBUF);
- device_delete_child(dev, sc->msk_devs[MSK_PORT_A]);
- }
- if (sc->msk_devs[MSK_PORT_B] != NULL) {
- free(device_get_ivars(sc->msk_devs[MSK_PORT_B]),
- M_DEVBUF);
- device_delete_child(dev, sc->msk_devs[MSK_PORT_B]);
- }
- bus_generic_detach(dev);
- }
+ bus_generic_detach(dev);
/* Disable all interrupts. */
CSR_WRITE_4(sc, B0_IMSK, 0);
diff --git a/sys/dev/mvs/mvs.c b/sys/dev/mvs/mvs.c
index a98a59259581..4132af157a62 100644
--- a/sys/dev/mvs/mvs.c
+++ b/sys/dev/mvs/mvs.c
@@ -37,8 +37,8 @@
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/stdarg.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
@@ -105,7 +105,7 @@ static int
mvs_ch_probe(device_t dev)
{
- device_set_desc_copy(dev, "Marvell SATA channel");
+ device_set_desc(dev, "Marvell SATA channel");
return (BUS_PROBE_DEFAULT);
}
@@ -1798,7 +1798,7 @@ completeall:
}
xpt_setup_ccb(&ccb->ccb_h, ch->hold[i]->ccb_h.path,
ch->hold[i]->ccb_h.pinfo.priority);
- if (ccb->ccb_h.func_code == XPT_ATA_IO) {
+ if (ch->hold[i]->ccb_h.func_code == XPT_ATA_IO) {
/* READ LOG */
ccb->ccb_h.recovery_type = RECOVERY_READ_LOG;
ccb->ccb_h.func_code = XPT_ATA_IO;
diff --git a/sys/dev/mvs/mvs_pci.c b/sys/dev/mvs/mvs_pci.c
index be9351403a0d..f0df709db732 100644
--- a/sys/dev/mvs/mvs_pci.c
+++ b/sys/dev/mvs/mvs_pci.c
@@ -36,8 +36,8 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sbuf.h>
+#include <sys/stdarg.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
@@ -77,7 +77,6 @@ static struct {
static int
mvs_probe(device_t dev)
{
- char buf[64];
int i;
uint32_t devid = pci_get_devid(dev);
uint8_t revid = pci_get_revid(dev);
@@ -85,9 +84,8 @@ mvs_probe(device_t dev)
for (i = 0; mvs_ids[i].id != 0; i++) {
if (mvs_ids[i].id == devid &&
mvs_ids[i].rev <= revid) {
- snprintf(buf, sizeof(buf), "%s SATA controller",
+ device_set_descf(dev, "%s SATA controller",
mvs_ids[i].name);
- device_set_desc_copy(dev, buf);
return (BUS_PROBE_DEFAULT);
}
}
@@ -164,13 +162,13 @@ mvs_attach(device_t dev)
}
/* Attach all channels on this controller */
for (unit = 0; unit < ctlr->channels; unit++) {
- child = device_add_child(dev, "mvsch", -1);
+ child = device_add_child(dev, "mvsch", DEVICE_UNIT_ANY);
if (child == NULL)
device_printf(dev, "failed to add channel device\n");
else
device_set_ivars(child, (void *)(intptr_t)unit);
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return 0;
}
@@ -178,9 +176,12 @@ static int
mvs_detach(device_t dev)
{
struct mvs_controller *ctlr = device_get_softc(dev);
+ int error;
/* Detach & delete all children */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
/* Free interrupt. */
if (ctlr->irq.r_irq) {
diff --git a/sys/dev/mvs/mvs_soc.c b/sys/dev/mvs/mvs_soc.c
index 696b65d54359..5bafc07847b4 100644
--- a/sys/dev/mvs/mvs_soc.c
+++ b/sys/dev/mvs/mvs_soc.c
@@ -35,8 +35,8 @@
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/stdarg.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
@@ -61,11 +61,6 @@ static struct {
int ports;
int quirks;
} mvs_ids[] = {
- {MV_DEV_88F5182, 0x00, "Marvell 88F5182", 2, MVS_Q_GENIIE|MVS_Q_SOC},
- {MV_DEV_88F6281, 0x00, "Marvell 88F6281", 2, MVS_Q_GENIIE|MVS_Q_SOC},
- {MV_DEV_88F6282, 0x00, "Marvell 88F6282", 2, MVS_Q_GENIIE|MVS_Q_SOC},
- {MV_DEV_MV78100, 0x00, "Marvell MV78100", 2, MVS_Q_GENIIE|MVS_Q_SOC},
- {MV_DEV_MV78100_Z0, 0x00,"Marvell MV78100", 2, MVS_Q_GENIIE|MVS_Q_SOC},
{MV_DEV_MV78260, 0x00, "Marvell MV78260", 2, MVS_Q_GENIIE|MVS_Q_SOC},
{MV_DEV_MV78460, 0x00, "Marvell MV78460", 2, MVS_Q_GENIIE|MVS_Q_SOC},
{0, 0x00, NULL, 0, 0}
@@ -74,7 +69,6 @@ static struct {
static int
mvs_probe(device_t dev)
{
- char buf[64];
int i;
uint32_t devid, revid;
@@ -88,9 +82,8 @@ mvs_probe(device_t dev)
for (i = 0; mvs_ids[i].id != 0; i++) {
if (mvs_ids[i].id == devid &&
mvs_ids[i].rev <= revid) {
- snprintf(buf, sizeof(buf), "%s SATA controller",
+ device_set_descf(dev, "%s SATA controller",
mvs_ids[i].name);
- device_set_desc_copy(dev, buf);
return (BUS_PROBE_DEFAULT);
}
}
@@ -168,13 +161,13 @@ mvs_attach(device_t dev)
}
/* Attach all channels on this controller */
for (unit = 0; unit < ctlr->channels; unit++) {
- child = device_add_child(dev, "mvsch", -1);
+ child = device_add_child(dev, "mvsch", DEVICE_UNIT_ANY);
if (child == NULL)
device_printf(dev, "failed to add channel device\n");
else
device_set_ivars(child, (void *)(intptr_t)unit);
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return 0;
}
@@ -182,9 +175,12 @@ static int
mvs_detach(device_t dev)
{
struct mvs_controller *ctlr = device_get_softc(dev);
+ int error;
/* Detach & delete all children */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
/* Free interrupt. */
if (ctlr->irq.r_irq) {
@@ -374,7 +370,7 @@ mvs_release_resource(device_t dev, device_t child, struct resource *r)
rman_release_resource(r);
return (0);
case SYS_RES_IRQ:
- if (rid != ATA_IRQ_RID)
+ if (rman_get_rid(r) != ATA_IRQ_RID)
return ENOENT;
return (0);
}
diff --git a/sys/dev/mwl/if_mwl.c b/sys/dev/mwl/if_mwl.c
index 479f3144dce3..9f3d34f4f50d 100644
--- a/sys/dev/mwl/if_mwl.c
+++ b/sys/dev/mwl/if_mwl.c
@@ -226,12 +226,9 @@ enum {
MWL_DEBUG_AMPDU = 0x00004000, /* BA stream handling */
MWL_DEBUG_ANY = 0xffffffff
};
-#define IS_BEACON(wh) \
- ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \
- (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON))
#define IFF_DUMPPKTS_RECV(sc, wh) \
((sc->sc_debug & MWL_DEBUG_RECV) && \
- ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh)))
+ ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IEEE80211_IS_MGMT_BEACON(wh)))
#define IFF_DUMPPKTS_XMIT(sc) \
(sc->sc_debug & MWL_DEBUG_XMIT)
@@ -436,6 +433,8 @@ mwl_attach(uint16_t devid, struct mwl_softc *sc)
| IEEE80211_HTC_SMPS /* SMPS available */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
/*
* Mark h/w crypto support.
* XXX no way to query h/w support.
@@ -1519,8 +1518,7 @@ mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
if (k->wk_keyix != IEEE80211_KEYIX_NONE ||
(k->wk_flags & IEEE80211_KEY_GROUP)) {
- if (!(&vap->iv_nw_keys[0] <= k &&
- k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
+ if (!ieee80211_is_key_global(vap, k)) {
/* should not happen */
DPRINTF(sc, MWL_DEBUG_KEYCACHE,
"%s: bogus group key\n", __func__);
@@ -1801,7 +1799,7 @@ mwl_updateslot(struct ieee80211com *ic)
return;
/*
- * Calculate the ERP flags. The firwmare will use
+ * Calculate the ERP flags. The firmware will use
* this to carry out the appropriate measures.
*/
prot = 0;
@@ -2554,7 +2552,7 @@ mwl_anyhdrsize(const void *data)
{
const struct ieee80211_frame *wh = data;
- if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) {
+ if (IEEE80211_IS_CTL(wh)) {
switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) {
case IEEE80211_FC0_SUBTYPE_CTS:
case IEEE80211_FC0_SUBTYPE_ACK:
@@ -3091,6 +3089,8 @@ mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *
} else
qos = 0;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (iswep) {
const struct ieee80211_cipher *cip;
struct ieee80211_key *k;
@@ -3348,6 +3348,7 @@ mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
ni = bf->bf_node;
if (ni != NULL) {
status = le32toh(ds->Status);
+ int rate;
if (status & EAGLE_TXD_STATUS_OK) {
uint16_t Format = le16toh(ds->Format);
uint8_t txant = _IEEE80211_MASKSHIFT(Format,
@@ -3360,14 +3361,14 @@ mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq)
sc->sc_stats.mst_tx_mretries++;
if (txq->qnum >= MWL_WME_AC_VO)
ic->ic_wme.wme_hipri_traffic++;
- ni->ni_txrate = _IEEE80211_MASKSHIFT(Format,
+ rate = _IEEE80211_MASKSHIFT(Format,
EAGLE_TXD_RATE);
if ((Format & EAGLE_TXD_FORMAT_HT) == 0) {
- ni->ni_txrate = mwl_cvtlegacyrix(
- ni->ni_txrate);
+ rate = mwl_cvtlegacyrix(rate);
} else
- ni->ni_txrate |= IEEE80211_RATE_MCS;
- sc->sc_stats.mst_tx_rate = ni->ni_txrate;
+ rate |= IEEE80211_RATE_MCS;
+ sc->sc_stats.mst_tx_rate = rate;
+ ieee80211_node_set_txrate_dot11rate(ni, rate);
} else {
if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR)
sc->sc_stats.mst_tx_linkerror++;
@@ -4020,7 +4021,7 @@ mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40;
if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20;
- if (ni->ni_chw != 40)
+ if (ni->ni_chw != NET80211_STA_RX_BW_40)
pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40;
}
return pi;
diff --git a/sys/dev/mxge/if_mxge.c b/sys/dev/mxge/if_mxge.c
index 4036a46645db..f36f41d53b40 100644
--- a/sys/dev/mxge/if_mxge.c
+++ b/sys/dev/mxge/if_mxge.c
@@ -4615,10 +4615,6 @@ mxge_attach(device_t dev)
TASK_INIT(&sc->watchdog_task, 1, mxge_watchdog_task, sc);
sc->tq = taskqueue_create("mxge_taskq", M_WAITOK,
taskqueue_thread_enqueue, &sc->tq);
- if (sc->tq == NULL) {
- err = ENOMEM;
- goto abort_with_nothing;
- }
err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
1, /* alignment */
@@ -4640,11 +4636,6 @@ mxge_attach(device_t dev)
}
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- err = ENOSPC;
- goto abort_with_parent_dmat;
- }
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
snprintf(sc->cmd_mtx_name, sizeof(sc->cmd_mtx_name), "%s:cmd",
@@ -4813,7 +4804,6 @@ abort_with_lock:
mtx_destroy(&sc->cmd_mtx);
mtx_destroy(&sc->driver_mtx);
if_free(ifp);
-abort_with_parent_dmat:
bus_dma_tag_destroy(sc->parent_dmat);
abort_with_tq:
if (sc->tq != NULL) {
@@ -4821,7 +4811,6 @@ abort_with_tq:
taskqueue_free(sc->tq);
sc->tq = NULL;
}
-abort_with_nothing:
return err;
}
diff --git a/sys/dev/my/if_my.c b/sys/dev/my/if_my.c
index 4b7e5e711707..f6d407fedade 100644
--- a/sys/dev/my/if_my.c
+++ b/sys/dev/my/if_my.c
@@ -873,11 +873,6 @@ my_attach(device_t dev)
bzero(sc->my_ldata, sizeof(struct my_list_data));
ifp = sc->my_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto free_ldata;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -961,7 +956,6 @@ detach_if:
ether_ifdetach(ifp);
free_if:
if_free(ifp);
-free_ldata:
free(sc->my_ldata_ptr, M_DEVBUF);
release_irq:
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq);
diff --git a/sys/dev/nctgpio/nctgpio.c b/sys/dev/nctgpio/nctgpio.c
index 75ea1fbdba17..ddc2ceef7dfb 100644
--- a/sys/dev/nctgpio/nctgpio.c
+++ b/sys/dev/nctgpio/nctgpio.c
@@ -1258,13 +1258,14 @@ nct_attach(device_t dev)
GPIO_UNLOCK(sc);
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
device_printf(dev, "failed to attach to gpiobus\n");
GPIO_LOCK_DESTROY(sc);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/neta/if_mvneta.c b/sys/dev/neta/if_mvneta.c
index 8a2c2ec8512c..1c6247adb56b 100644
--- a/sys/dev/neta/if_mvneta.c
+++ b/sys/dev/neta/if_mvneta.c
@@ -612,11 +612,6 @@ mvneta_attach(device_t self)
/* Allocate network interface */
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(self, "if_alloc() failed\n");
- mvneta_detach(self);
- return (ENOMEM);
- }
if_initname(ifp, device_get_name(self), device_get_unit(self));
/*
@@ -806,14 +801,14 @@ mvneta_attach(device_t self)
if (mvneta_has_switch(self)) {
if (bootverbose)
device_printf(self, "This device is attached to a switch\n");
- child = device_add_child(sc->dev, "mdio", -1);
+ child = device_add_child(sc->dev, "mdio", DEVICE_UNIT_ANY);
if (child == NULL) {
ether_ifdetach(sc->ifp);
mvneta_detach(self);
return (ENXIO);
}
- bus_generic_attach(sc->dev);
- bus_generic_attach(child);
+ bus_attach_children(sc->dev);
+ bus_attach_children(child);
}
/* Configure MAC media */
@@ -848,7 +843,7 @@ mvneta_detach(device_t dev)
for (q = 0; q < MVNETA_TX_QNUM_MAX; q++)
mvneta_ring_dealloc_tx_queue(sc, q);
- device_delete_children(dev);
+ bus_generic_detach(dev);
if (sc->ih_cookie[0] != NULL)
bus_teardown_intr(dev, sc->res[1], sc->ih_cookie[0]);
@@ -3005,8 +3000,6 @@ mvneta_rx_queue(struct mvneta_softc *sc, int q, int npkt)
struct mvneta_rx_desc *r;
struct mvneta_buf *rxbuf;
struct mbuf *m;
- struct lro_ctrl *lro;
- struct lro_entry *queued;
void *pktbuf;
int i, pktlen, processed, ndma;
@@ -3120,11 +3113,7 @@ rx_lro:
/*
* Flush any outstanding LRO work
*/
- lro = &rx->lro;
- while (__predict_false((queued = LIST_FIRST(&lro->lro_active)) != NULL)) {
- LIST_REMOVE(LIST_FIRST((&lro->lro_active)), next);
- tcp_lro_flush(lro, queued);
- }
+ tcp_lro_flush_all(&rx->lro);
}
STATIC void
diff --git a/sys/dev/netmap/if_ptnet.c b/sys/dev/netmap/if_ptnet.c
index 56d853eb7392..9c06f7fec530 100644
--- a/sys/dev/netmap/if_ptnet.c
+++ b/sys/dev/netmap/if_ptnet.c
@@ -27,8 +27,9 @@
/* Driver for ptnet paravirtualized network device. */
#include <sys/cdefs.h>
+#include "opt_inet.h"
+#include "opt_inet6.h"
-#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
@@ -75,9 +76,6 @@
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
-#include "opt_inet.h"
-#include "opt_inet6.h"
-
#include <sys/selinfo.h>
#include <net/netmap.h>
#include <dev/netmap/netmap_kern.h>
@@ -399,12 +397,6 @@ ptnet_attach(device_t dev)
/* Setup Ethernet interface. */
sc->ifp = ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "Failed to allocate ifnet\n");
- err = ENOMEM;
- goto err_path;
- }
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setbaudrate(ifp, IF_Gbps(10));
if_setsoftc(ifp, sc);
@@ -546,7 +538,7 @@ ptnet_detach(device_t dev)
ptnet_irqs_fini(sc);
if (sc->csb_gh) {
- contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF);
+ free(sc->csb_gh, M_DEVBUF);
sc->csb_gh = NULL;
sc->csb_hg = NULL;
}
diff --git a/sys/dev/netmap/netmap.c b/sys/dev/netmap/netmap.c
index 832d0ecc0c6e..f531151fb656 100644
--- a/sys/dev/netmap/netmap.c
+++ b/sys/dev/netmap/netmap.c
@@ -4010,8 +4010,8 @@ netmap_attach_common(struct netmap_adapter *na)
na->active_fds = 0;
if (na->nm_mem == NULL) {
- /* use iommu or global allocator */
- na->nm_mem = netmap_mem_get_iommu(na);
+ /* select an allocator based on IOMMU and NUMA affinity */
+ na->nm_mem = netmap_mem_get_allocator(na);
}
if (na->nm_bdg_attach == NULL)
/* no special nm_bdg_attach callback. On VALE
diff --git a/sys/dev/netmap/netmap_freebsd.c b/sys/dev/netmap/netmap_freebsd.c
index a4a0124471c0..8cc543d54c2e 100644
--- a/sys/dev/netmap/netmap_freebsd.c
+++ b/sys/dev/netmap/netmap_freebsd.c
@@ -612,10 +612,6 @@ nm_os_vi_persist(const char *name, if_t *ret)
eaddr[5] = (uint8_t)unit;
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- nm_prerr("if_alloc failed");
- return ENOMEM;
- }
if_initname(ifp, name, IF_DUNIT_NONE);
if_setflags(ifp, IFF_UP | IFF_SIMPLEX | IFF_MULTICAST);
if_setinitfn(ifp, (void *)nm_vi_dummy);
@@ -864,16 +860,12 @@ nm_os_pt_memdev_iounmap(struct ptnetmap_memdev *ptn_dev)
static int
ptn_memdev_probe(device_t dev)
{
- char desc[256];
-
if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID)
return (ENXIO);
if (pci_get_device(dev) != PTNETMAP_PCI_DEVICE_ID)
return (ENXIO);
- snprintf(desc, sizeof(desc), "%s PCI adapter",
- PTNETMAP_MEMDEV_NAME);
- device_set_desc_copy(dev, desc);
+ device_set_descf(dev, "%s PCI adapter", PTNETMAP_MEMDEV_NAME);
return (BUS_PROBE_DEFAULT);
}
@@ -1033,11 +1025,20 @@ netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
return (VM_PAGER_OK);
}
+static void
+netmap_dev_pager_path(void *handle, char *path, size_t len)
+{
+ struct netmap_vm_handle_t *vmh = handle;
+ struct cdev *dev = vmh->dev;
+
+ dev_copyname(dev, path, len);
+}
static struct cdev_pager_ops netmap_cdev_pager_ops = {
.cdev_pg_ctor = netmap_dev_pager_ctor,
.cdev_pg_dtor = netmap_dev_pager_dtor,
.cdev_pg_fault = netmap_dev_pager_fault,
+ .cdev_pg_path = netmap_dev_pager_path,
};
@@ -1405,13 +1406,13 @@ netmap_knwrite(struct knote *kn, long hint)
return netmap_knrw(kn, hint, POLLOUT);
}
-static struct filterops netmap_rfiltops = {
+static const struct filterops netmap_rfiltops = {
.f_isfd = 1,
.f_detach = netmap_knrdetach,
.f_event = netmap_knread,
};
-static struct filterops netmap_wfiltops = {
+static const struct filterops netmap_wfiltops = {
.f_isfd = 1,
.f_detach = netmap_knwdetach,
.f_event = netmap_knwrite,
diff --git a/sys/dev/netmap/netmap_kern.h b/sys/dev/netmap/netmap_kern.h
index dd736b46ae70..931bf7cd332b 100644
--- a/sys/dev/netmap/netmap_kern.h
+++ b/sys/dev/netmap/netmap_kern.h
@@ -81,6 +81,7 @@
#if defined(__FreeBSD__)
#include <sys/selinfo.h>
+#include <vm/vm.h>
#define likely(x) __builtin_expect((long)!!(x), 1L)
#define unlikely(x) __builtin_expect((long)!!(x), 0L)
@@ -1727,10 +1728,30 @@ extern int netmap_generic_txqdisc;
#define NM_IS_NATIVE(ifp) (NM_NA_VALID(ifp) && NA(ifp)->nm_dtor == netmap_hw_dtor)
#if defined(__FreeBSD__)
+extern int netmap_port_numa_affinity;
-/* Assigns the device IOMMU domain to an allocator.
- * Returns -ENOMEM in case the domain is different */
-#define nm_iommu_group_id(dev) (-1)
+static inline int
+nm_iommu_group_id(struct netmap_adapter *na)
+{
+ return (-1);
+}
+
+static inline int
+nm_numa_domain(struct netmap_adapter *na)
+{
+ int domain;
+
+ /*
+ * If the system has only one NUMA domain, don't bother distinguishing
+ * between IF_NODOM and domain 0.
+ */
+ if (vm_ndomains == 1 || netmap_port_numa_affinity == 0)
+ return (-1);
+ domain = if_getnumadomain(na->ifp);
+ if (domain == IF_NODOM)
+ domain = -1;
+ return (domain);
+}
/* Callback invoked by the dma machinery after a successful dmamap_load */
static void netmap_dmamap_cb(__unused void *arg,
diff --git a/sys/dev/netmap/netmap_mem2.c b/sys/dev/netmap/netmap_mem2.c
index 23954b377f9b..d69e9305f6f0 100644
--- a/sys/dev/netmap/netmap_mem2.c
+++ b/sys/dev/netmap/netmap_mem2.c
@@ -37,8 +37,8 @@
#endif /* __APPLE__ */
#ifdef __FreeBSD__
-#include <sys/cdefs.h> /* prerequisite */
#include <sys/types.h>
+#include <sys/domainset.h>
#include <sys/malloc.h>
#include <sys/kernel.h> /* MALLOC_DEFINE */
#include <sys/proc.h>
@@ -174,12 +174,13 @@ struct netmap_mem_d {
struct netmap_obj_pool pools[NETMAP_POOLS_NR];
nm_memid_t nm_id; /* allocator identifier */
- int nm_grp; /* iommu group id */
+ int nm_grp; /* iommu group id */
+ int nm_numa_domain; /* local NUMA domain */
/* list of all existing allocators, sorted by nm_id */
struct netmap_mem_d *prev, *next;
- struct netmap_mem_ops *ops;
+ const struct netmap_mem_ops *ops;
struct netmap_obj_params params[NETMAP_POOLS_NR];
@@ -310,7 +311,7 @@ netmap_mem_rings_delete(struct netmap_adapter *na)
static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *);
static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *);
-static int nm_mem_check_group(struct netmap_mem_d *, bus_dma_tag_t);
+static int nm_mem_check_group(struct netmap_mem_d *, void *);
static void nm_mem_release_id(struct netmap_mem_d *);
nm_memid_t
@@ -533,7 +534,7 @@ static struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = {
* running in netmap mode.
* Virtual (VALE) ports will have each its own allocator.
*/
-extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */
+extern const struct netmap_mem_ops netmap_mem_global_ops; /* forward */
struct netmap_mem_d nm_mem = { /* Our memory allocator. */
.pools = {
[NETMAP_IF_POOL] = {
@@ -576,6 +577,7 @@ struct netmap_mem_d nm_mem = { /* Our memory allocator. */
.nm_id = 1,
.nm_grp = -1,
+ .nm_numa_domain = -1,
.prev = &nm_mem,
.next = &nm_mem,
@@ -615,6 +617,7 @@ static const struct netmap_mem_d nm_blueprint = {
},
.nm_grp = -1,
+ .nm_numa_domain = -1,
.flags = NETMAP_MEM_PRIVATE,
@@ -625,7 +628,6 @@ static const struct netmap_mem_d nm_blueprint = {
#define STRINGIFY(x) #x
-
#define DECLARE_SYSCTLS(id, name) \
SYSBEGIN(mem2_ ## name); \
SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \
@@ -649,9 +651,14 @@ DECLARE_SYSCTLS(NETMAP_IF_POOL, if);
DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf);
+int netmap_port_numa_affinity = 0;
+SYSCTL_INT(_dev_netmap, OID_AUTO, port_numa_affinity,
+ CTLFLAG_RDTUN, &netmap_port_numa_affinity, 0,
+ "Use NUMA-local memory for memory pools when possible");
+
/* call with nm_mem_list_lock held */
static int
-nm_mem_assign_id_locked(struct netmap_mem_d *nmd, int grp_id)
+nm_mem_assign_id_locked(struct netmap_mem_d *nmd, int grp_id, int domain)
{
nm_memid_t id;
struct netmap_mem_d *scan = netmap_last_mem_d;
@@ -666,6 +673,7 @@ nm_mem_assign_id_locked(struct netmap_mem_d *nmd, int grp_id)
if (id != scan->nm_id) {
nmd->nm_id = id;
nmd->nm_grp = grp_id;
+ nmd->nm_numa_domain = domain;
nmd->prev = scan->prev;
nmd->next = scan;
scan->prev->next = nmd;
@@ -688,7 +696,7 @@ nm_mem_assign_id(struct netmap_mem_d *nmd, int grp_id)
int ret;
NM_MTX_LOCK(nm_mem_list_lock);
- ret = nm_mem_assign_id_locked(nmd, grp_id);
+ ret = nm_mem_assign_id_locked(nmd, grp_id, -1);
NM_MTX_UNLOCK(nm_mem_list_lock);
return ret;
@@ -728,7 +736,7 @@ netmap_mem_find(nm_memid_t id)
}
static int
-nm_mem_check_group(struct netmap_mem_d *nmd, bus_dma_tag_t dev)
+nm_mem_check_group(struct netmap_mem_d *nmd, void *dev)
{
int err = 0, id;
@@ -1284,7 +1292,7 @@ netmap_reset_obj_allocator(struct netmap_obj_pool *p)
* in the lut.
*/
for (i = 0; i < p->objtotal; i += p->_clustentries) {
- contigfree(p->lut[i].vaddr, p->_clustsize, M_NETMAP);
+ free(p->lut[i].vaddr, M_NETMAP);
}
nm_free_lut(p->lut, p->objtotal);
}
@@ -1399,10 +1407,9 @@ netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int obj
/* call with NMA_LOCK held */
static int
-netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
+netmap_finalize_obj_allocator(struct netmap_mem_d *nmd, struct netmap_obj_pool *p)
{
int i; /* must be signed */
- size_t n;
if (p->lut) {
/* if the lut is already there we assume that also all the
@@ -1430,7 +1437,6 @@ netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
* Allocate clusters, init pointers
*/
- n = p->_clustsize;
for (i = 0; i < (int)p->objtotal;) {
int lim = i + p->_clustentries;
char *clust;
@@ -1442,8 +1448,16 @@ netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
* can live with standard malloc, because the hardware will not
* access the pages directly.
*/
- clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO,
- (size_t)0, -1UL, PAGE_SIZE, 0);
+ if (nmd->nm_numa_domain == -1) {
+ clust = contigmalloc(p->_clustsize, M_NETMAP,
+ M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0);
+ } else {
+ struct domainset *ds;
+
+ ds = DOMAINSET_PREF(nmd->nm_numa_domain);
+ clust = contigmalloc_domainset(p->_clustsize, M_NETMAP,
+ ds, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0);
+ }
if (clust == NULL) {
/*
* If we get here, there is a severe memory shortage,
@@ -1456,8 +1470,7 @@ netmap_finalize_obj_allocator(struct netmap_obj_pool *p)
lim = i / 2;
for (i--; i >= lim; i--) {
if (i % p->_clustentries == 0 && p->lut[i].vaddr)
- contigfree(p->lut[i].vaddr,
- n, M_NETMAP);
+ free(p->lut[i].vaddr, M_NETMAP);
p->lut[i].vaddr = NULL;
}
out:
@@ -1637,7 +1650,7 @@ netmap_mem_finalize_all(struct netmap_mem_d *nmd)
nmd->lasterr = 0;
nmd->nm_totalsize = 0;
for (i = 0; i < NETMAP_POOLS_NR; i++) {
- nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]);
+ nmd->lasterr = netmap_finalize_obj_allocator(nmd, &nmd->pools[i]);
if (nmd->lasterr)
goto error;
nmd->nm_totalsize += nmd->pools[i].memtotal;
@@ -1670,7 +1683,7 @@ error:
*/
static void *
_netmap_mem_private_new(size_t size, struct netmap_obj_params *p, int grp_id,
- struct netmap_mem_ops *ops, uint64_t memtotal, int *perr)
+ const struct netmap_mem_ops *ops, uint64_t memtotal, int *perr)
{
struct netmap_mem_d *d = NULL;
int i, err = 0;
@@ -1805,24 +1818,26 @@ netmap_mem_private_new(u_int txr, u_int txd, u_int rxr, u_int rxd,
return d;
}
-/* Reference iommu allocator - find existing or create new,
- * for not hw addapeters fallback to global allocator.
+/* Reference IOMMU and NUMA local allocator - find existing or create new,
+ * for non-hw adapters, fall back to global allocator.
*/
struct netmap_mem_d *
-netmap_mem_get_iommu(struct netmap_adapter *na)
+netmap_mem_get_allocator(struct netmap_adapter *na)
{
- int i, err, grp_id;
+ int i, domain, err, grp_id;
struct netmap_mem_d *nmd;
if (na == NULL || na->pdev == NULL)
return netmap_mem_get(&nm_mem);
+ domain = nm_numa_domain(na->pdev);
grp_id = nm_iommu_group_id(na->pdev);
NM_MTX_LOCK(nm_mem_list_lock);
nmd = netmap_last_mem_d;
do {
- if (!(nmd->flags & NETMAP_MEM_HIDDEN) && nmd->nm_grp == grp_id) {
+ if (!(nmd->flags & NETMAP_MEM_HIDDEN) &&
+ nmd->nm_grp == grp_id && nmd->nm_numa_domain == domain) {
nmd->refcount++;
NM_DBG_REFC(nmd, __FUNCTION__, __LINE__);
NM_MTX_UNLOCK(nm_mem_list_lock);
@@ -1837,7 +1852,7 @@ netmap_mem_get_iommu(struct netmap_adapter *na)
*nmd = nm_mem_blueprint;
- err = nm_mem_assign_id_locked(nmd, grp_id);
+ err = nm_mem_assign_id_locked(nmd, grp_id, domain);
if (err)
goto error_free;
@@ -2177,7 +2192,7 @@ netmap_mem2_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na)
}
-struct netmap_mem_ops netmap_mem_global_ops = {
+const struct netmap_mem_ops netmap_mem_global_ops = {
.nmd_get_lut = netmap_mem2_get_lut,
.nmd_get_info = netmap_mem2_get_info,
.nmd_ofstophys = netmap_mem2_ofstophys,
@@ -2881,7 +2896,7 @@ netmap_mem_pt_guest_create(nm_memid_t mem_id)
ptnmd->pt_ifs = NULL;
/* Assign new id in the guest (We have the lock) */
- err = nm_mem_assign_id_locked(&ptnmd->up, -1);
+ err = nm_mem_assign_id_locked(&ptnmd->up, -1, -1);
if (err)
goto error;
diff --git a/sys/dev/netmap/netmap_mem2.h b/sys/dev/netmap/netmap_mem2.h
index 1681d5c7721f..0123b010e944 100644
--- a/sys/dev/netmap/netmap_mem2.h
+++ b/sys/dev/netmap/netmap_mem2.h
@@ -146,7 +146,7 @@ struct netmap_mem_d* netmap_mem_private_new( u_int txr, u_int txd, u_int rxr, u_
#define netmap_mem_get(d) __netmap_mem_get(d, __FUNCTION__, __LINE__)
#define netmap_mem_put(d) __netmap_mem_put(d, __FUNCTION__, __LINE__)
struct netmap_mem_d* __netmap_mem_get(struct netmap_mem_d *, const char *, int);
-struct netmap_mem_d* netmap_mem_get_iommu(struct netmap_adapter *);
+struct netmap_mem_d* netmap_mem_get_allocator(struct netmap_adapter *);
void __netmap_mem_put(struct netmap_mem_d *, const char *, int);
struct netmap_mem_d* netmap_mem_find(nm_memid_t);
unsigned netmap_mem_bufsize(struct netmap_mem_d *nmd);
diff --git a/sys/dev/nfe/if_nfe.c b/sys/dev/nfe/if_nfe.c
index 901bdd5e1e6e..265181ef7ad0 100644
--- a/sys/dev/nfe/if_nfe.c
+++ b/sys/dev/nfe/if_nfe.c
@@ -567,11 +567,6 @@ nfe_attach(device_t dev)
goto fail;
ifp = sc->nfe_ifp = if_gethandle(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_gethandle()\n");
- error = ENOSPC;
- goto fail;
- }
/*
* Allocate Tx and Rx rings.
@@ -613,7 +608,7 @@ nfe_attach(device_t dev)
(IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO), 0);
}
- if (pci_find_cap(dev, PCIY_PMG, &reg) == 0)
+ if (pci_has_pm(dev))
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
@@ -716,8 +711,6 @@ nfe_detach(device_t dev)
nfe_set_macaddr(sc, eaddr);
if_free(ifp);
}
- if (sc->nfe_miibus)
- device_delete_child(dev, sc->nfe_miibus);
bus_generic_detach(dev);
if (sc->nfe_tq != NULL) {
taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task);
@@ -2085,7 +2078,7 @@ nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map,
BUS_DMASYNC_POSTREAD);
- for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
+ for (prog = 0; ; NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) {
if (count <= 0)
break;
count--;
@@ -2199,7 +2192,7 @@ nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp)
bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map,
BUS_DMASYNC_POSTREAD);
- for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
+ for (prog = 0; ; NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT),
vtag = 0) {
if (count <= 0)
break;
@@ -3316,12 +3309,10 @@ nfe_set_wol(struct nfe_softc *sc)
{
if_t ifp;
uint32_t wolctl;
- int pmc;
- uint16_t pmstat;
NFE_LOCK_ASSERT(sc);
- if (pci_find_cap(sc->nfe_dev, PCIY_PMG, &pmc) != 0)
+ if (!pci_has_pm(sc->nfe_dev))
return;
ifp = sc->nfe_ifp;
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
@@ -3341,9 +3332,6 @@ nfe_set_wol(struct nfe_softc *sc)
NFE_RX_START);
}
/* Request PME if WOL is requested. */
- pmstat = pci_read_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->nfe_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
+ pci_enable_pme(sc->nfe_dev);
}
diff --git a/sys/dev/nfsmb/nfsmb.c b/sys/dev/nfsmb/nfsmb.c
index 5594e5b152e6..b88b2ca0001f 100644
--- a/sys/dev/nfsmb/nfsmb.c
+++ b/sys/dev/nfsmb/nfsmb.c
@@ -196,13 +196,13 @@ nfsmbsub_attach(device_t dev)
mtx_init(&nfsmbsub_sc->lock, device_get_nameunit(dev), "nfsmb",
MTX_DEF);
- nfsmbsub_sc->smbus = device_add_child(dev, "smbus", -1);
+ nfsmbsub_sc->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY);
if (nfsmbsub_sc->smbus == NULL) {
nfsmbsub_detach(dev);
return (EINVAL);
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -232,7 +232,7 @@ nfsmb_attach(device_t dev)
mtx_init(&nfsmb_sc->lock, device_get_nameunit(dev), "nfsmb", MTX_DEF);
/* Allocate a new smbus device */
- nfsmb_sc->smbus = device_add_child(dev, "smbus", -1);
+ nfsmb_sc->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY);
if (!nfsmb_sc->smbus) {
nfsmb_detach(dev);
return (EINVAL);
@@ -255,7 +255,7 @@ nfsmb_attach(device_t dev)
case NFSMB_DEVICEID_NF4_78S_SMB:
case NFSMB_DEVICEID_NF4_79_SMB:
/* Trying to add secondary device as slave */
- nfsmb_sc->subdev = device_add_child(dev, "nfsmb", -1);
+ nfsmb_sc->subdev = device_add_child(dev, "nfsmb", DEVICE_UNIT_ANY);
if (!nfsmb_sc->subdev) {
nfsmb_detach(dev);
return (EINVAL);
@@ -265,7 +265,7 @@ nfsmb_attach(device_t dev)
break;
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -275,13 +275,13 @@ nfsmbsub_detach(device_t dev)
{
device_t parent;
struct nfsmb_softc *nfsmbsub_sc = device_get_softc(dev);
+ int error;
parent = device_get_parent(dev);
- if (nfsmbsub_sc->smbus) {
- device_delete_child(dev, nfsmbsub_sc->smbus);
- nfsmbsub_sc->smbus = NULL;
- }
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
mtx_destroy(&nfsmbsub_sc->lock);
if (nfsmbsub_sc->res) {
bus_release_resource(parent, SYS_RES_IOPORT, nfsmbsub_sc->rid,
@@ -295,16 +295,11 @@ static int
nfsmb_detach(device_t dev)
{
struct nfsmb_softc *nfsmb_sc = device_get_softc(dev);
+ int error;
- if (nfsmb_sc->subdev) {
- device_delete_child(dev, nfsmb_sc->subdev);
- nfsmb_sc->subdev = NULL;
- }
-
- if (nfsmb_sc->smbus) {
- device_delete_child(dev, nfsmb_sc->smbus);
- nfsmb_sc->smbus = NULL;
- }
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
mtx_destroy(&nfsmb_sc->lock);
if (nfsmb_sc->res) {
diff --git a/sys/dev/nge/if_nge.c b/sys/dev/nge/if_nge.c
index dc3be913f89b..b9cf77cc0428 100644
--- a/sys/dev/nge/if_nge.c
+++ b/sys/dev/nge/if_nge.c
@@ -900,11 +900,6 @@ nge_attach(device_t dev)
nge_sysctl_node(sc);
ifp = sc->nge_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not allocate ifnet structure\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -920,7 +915,7 @@ nge_attach(device_t dev)
* supply(3VAUX) to drive PME such that checking PCI power
* management capability is necessary.
*/
- if (pci_find_cap(sc->nge_dev, PCIY_PMG, &i) == 0)
+ if (pci_has_pm(sc->nge_dev))
if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
@@ -1004,10 +999,6 @@ nge_detach(device_t dev)
ether_ifdetach(ifp);
}
- if (sc->nge_miibus != NULL) {
- device_delete_child(dev, sc->nge_miibus);
- sc->nge_miibus = NULL;
- }
bus_generic_detach(dev);
if (sc->nge_intrhand != NULL)
bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand);
@@ -2519,12 +2510,10 @@ nge_wol(struct nge_softc *sc)
{
if_t ifp;
uint32_t reg;
- uint16_t pmstat;
- int pmc;
NGE_LOCK_ASSERT(sc);
- if (pci_find_cap(sc->nge_dev, PCIY_PMG, &pmc) != 0)
+ if (!pci_has_pm(sc->nge_dev))
return;
ifp = sc->nge_ifp;
@@ -2565,11 +2554,8 @@ nge_wol(struct nge_softc *sc)
}
/* Request PME. */
- pmstat = pci_read_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->nge_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
+ pci_enable_pme(sc->nge_dev);
}
/*
@@ -2604,23 +2590,11 @@ nge_resume(device_t dev)
{
struct nge_softc *sc;
if_t ifp;
- uint16_t pmstat;
- int pmc;
sc = device_get_softc(dev);
NGE_LOCK(sc);
ifp = sc->nge_ifp;
- if (pci_find_cap(sc->nge_dev, PCIY_PMG, &pmc) == 0) {
- /* Disable PME and clear PME status. */
- pmstat = pci_read_config(sc->nge_dev,
- pmc + PCIR_POWER_STATUS, 2);
- if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
- pmstat &= ~PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->nge_dev,
- pmc + PCIR_POWER_STATUS, pmstat, 2);
- }
- }
if (if_getflags(ifp) & IFF_UP) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
nge_init_locked(sc);
diff --git a/sys/dev/ntb/if_ntb/if_ntb.c b/sys/dev/ntb/if_ntb/if_ntb.c
index 5a132baa5f1c..2334265cefa7 100644
--- a/sys/dev/ntb/if_ntb/if_ntb.c
+++ b/sys/dev/ntb/if_ntb/if_ntb.c
@@ -136,10 +136,6 @@ ntb_net_attach(device_t dev)
int i;
ifp = sc->ifp = if_gethandle(IFT_ETHER);
- if (ifp == NULL) {
- printf("ntb: Cannot allocate ifnet structure\n");
- return (ENOMEM);
- }
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setdev(ifp, dev);
diff --git a/sys/dev/ntb/ntb.c b/sys/dev/ntb/ntb.c
index 38def5be0bb1..53d9b47a9114 100644
--- a/sys/dev/ntb/ntb.c
+++ b/sys/dev/ntb/ntb.c
@@ -107,7 +107,7 @@ ntb_register_device(device_t dev)
nc->dbcnt = db;
nc->dbmask = (db == 0) ? 0 : (0xffffffffffffffff >> (64 - db));
rm_init(&nc->ctx_lock, "ntb ctx");
- nc->dev = device_add_child(dev, name, -1);
+ nc->dev = device_add_child(dev, name, DEVICE_UNIT_ANY);
if (nc->dev == NULL) {
ntb_unregister_device(dev);
return (ENOMEM);
@@ -142,7 +142,7 @@ ntb_register_device(device_t dev)
i++;
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/ntb/ntb_transport.c b/sys/dev/ntb/ntb_transport.c
index 6e2eefae88ec..6e085d8d276f 100644
--- a/sys/dev/ntb/ntb_transport.c
+++ b/sys/dev/ntb/ntb_transport.c
@@ -462,7 +462,7 @@ ntb_transport_attach(device_t dev)
nc->consumer = i;
nc->qpoff = qpu;
nc->qpcnt = qp;
- nc->dev = device_add_child(dev, name, -1);
+ nc->dev = device_add_child(dev, name, DEVICE_UNIT_ANY);
if (nc->dev == NULL) {
device_printf(dev, "Can not add child.\n");
break;
@@ -511,7 +511,7 @@ ntb_transport_attach(device_t dev)
if (enable_xeon_watchdog != 0)
callout_reset(&nt->link_watchdog, 0, xeon_link_watchdog_hb, nt);
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
err:
diff --git a/sys/dev/null/null.c b/sys/dev/null/null.c
index 7ffc618e63ee..8525eb9543c3 100644
--- a/sys/dev/null/null.c
+++ b/sys/dev/null/null.c
@@ -4,6 +4,7 @@
* Copyright (c) 2000 Mark R. V. Murray & Jeroen C. van Gelderen
* Copyright (c) 2001-2004 Mark R. V. Murray
* Copyright (c) 2014 Eitan Adler
+ * Copyright (c) 2025 Pietro Cerutti
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -39,6 +40,7 @@
#include <sys/disk.h>
#include <sys/bus.h>
#include <sys/filio.h>
+#include <sys/event.h>
#include <machine/bus.h>
#include <machine/vmparam.h>
@@ -53,12 +55,26 @@ static d_write_t null_write;
static d_ioctl_t null_ioctl;
static d_ioctl_t zero_ioctl;
static d_read_t zero_read;
+static d_kqfilter_t kqfilter;
+static int one_ev(struct knote *kn, long hint);
+static int zero_ev(struct knote *kn, long hint);
+
+static const struct filterops one_fop = {
+ .f_isfd = 1,
+ .f_event = one_ev
+};
+
+static const struct filterops zero_fop = {
+ .f_isfd = 1,
+ .f_event = zero_ev
+};
static struct cdevsw full_cdevsw = {
.d_version = D_VERSION,
.d_read = zero_read,
.d_write = full_write,
.d_ioctl = zero_ioctl,
+ .d_kqfilter = kqfilter,
.d_name = "full",
};
@@ -67,6 +83,7 @@ static struct cdevsw null_cdevsw = {
.d_read = (d_read_t *)nullop,
.d_write = null_write,
.d_ioctl = null_ioctl,
+ .d_kqfilter = kqfilter,
.d_name = "null",
};
@@ -75,6 +92,7 @@ static struct cdevsw zero_cdevsw = {
.d_read = zero_read,
.d_write = null_write,
.d_ioctl = zero_ioctl,
+ .d_kqfilter = kqfilter,
.d_name = "zero",
.d_flags = D_MMAP_ANON,
};
@@ -197,5 +215,35 @@ null_modevent(module_t mod __unused, int type, void *data __unused)
return (0);
}
+static int
+one_ev(struct knote *kn, long hint)
+{
+
+ return (1);
+}
+
+static int
+zero_ev(struct knote *kn, long hint)
+{
+
+ return (0);
+}
+
+static int
+kqfilter(struct cdev *dev, struct knote *kn)
+{
+
+ switch (kn->kn_filter) {
+ case EVFILT_READ:
+ kn->kn_fop = dev->si_devsw == &null_cdevsw ? &zero_fop : &one_fop;
+ return (0);
+ case EVFILT_WRITE:
+ kn->kn_fop = dev->si_devsw == &full_cdevsw ? &zero_fop : &one_fop;
+ return (0);
+ default:
+ return (EOPNOTSUPP);
+ }
+}
+
DEV_MODULE(null, null_modevent, NULL);
MODULE_VERSION(null, 1);
diff --git a/sys/dev/nvdimm/nvdimm_acpi.c b/sys/dev/nvdimm/nvdimm_acpi.c
index 995e60e8b4ee..39697e52eebb 100644
--- a/sys/dev/nvdimm/nvdimm_acpi.c
+++ b/sys/dev/nvdimm/nvdimm_acpi.c
@@ -109,7 +109,7 @@ nvdimm_root_create_devs(device_t dev, ACPI_TABLE_NFIT *nfitbl)
if (dimm_handle == NULL)
continue;
- child = BUS_ADD_CHILD(dev, 100, "nvdimm", -1);
+ child = BUS_ADD_CHILD(dev, 100, "nvdimm", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(dev, "failed to create nvdimm\n");
return (ENXIO);
@@ -188,22 +188,24 @@ nvdimm_root_attach(device_t dev)
error = nvdimm_root_create_devs(dev, nfitbl);
if (error != 0)
return (error);
- error = bus_generic_attach(dev);
- if (error != 0)
- return (error);
+ bus_attach_children(dev);
root = device_get_softc(dev);
error = nvdimm_root_create_spas(root, nfitbl);
AcpiPutTable(&nfitbl->Header);
return (error);
}
+static void
+nvdimm_root_child_deleted(device_t dev, device_t child)
+{
+ free(device_get_ivars(child), M_NVDIMM_ACPI);
+}
+
static int
nvdimm_root_detach(device_t dev)
{
struct nvdimm_root_dev *root;
struct SPA_mapping *spa, *next;
- device_t *children;
- int i, error, num_children;
root = device_get_softc(dev);
SLIST_FOREACH_SAFE(spa, &root->spas, link, next) {
@@ -212,17 +214,7 @@ nvdimm_root_detach(device_t dev)
SLIST_REMOVE_HEAD(&root->spas, link);
free(spa, M_NVDIMM_ACPI);
}
- error = bus_generic_detach(dev);
- if (error != 0)
- return (error);
- error = device_get_children(dev, &children, &num_children);
- if (error != 0)
- return (error);
- for (i = 0; i < num_children; i++)
- free(device_get_ivars(children[i]), M_NVDIMM_ACPI);
- free(children, M_TEMP);
- error = device_delete_children(dev);
- return (error);
+ return (bus_generic_detach(dev));
}
static int
@@ -264,6 +256,7 @@ static device_method_t nvdimm_acpi_methods[] = {
DEVMETHOD(device_attach, nvdimm_root_attach),
DEVMETHOD(device_detach, nvdimm_root_detach),
DEVMETHOD(bus_add_child, bus_generic_add_child),
+ DEVMETHOD(bus_child_deleted, nvdimm_root_child_deleted),
DEVMETHOD(bus_read_ivar, nvdimm_root_read_ivar),
DEVMETHOD(bus_write_ivar, nvdimm_root_write_ivar),
DEVMETHOD(bus_child_location, nvdimm_root_child_location),
diff --git a/sys/dev/nvdimm/nvdimm_e820.c b/sys/dev/nvdimm/nvdimm_e820.c
index 3fbe2df31209..f916801750b6 100644
--- a/sys/dev/nvdimm/nvdimm_e820.c
+++ b/sys/dev/nvdimm/nvdimm_e820.c
@@ -257,25 +257,21 @@ static void
nvdimm_e820_identify(driver_t *driver, device_t parent)
{
device_t child;
- caddr_t kmdp;
if (resource_disabled(driver->name, 0))
return;
/* Just create a single instance of the fake bus. */
- if (device_find_child(parent, driver->name, -1) != NULL)
+ if (device_find_child(parent, driver->name, DEVICE_UNIT_ANY) != NULL)
return;
- kmdp = preload_search_by_type("elf kernel");
- if (kmdp == NULL)
- kmdp = preload_search_by_type("elf64 kernel");
- smapbase = (const void *)preload_search_info(kmdp,
+ smapbase = (const void *)preload_search_info(preload_kmdp,
MODINFO_METADATA | MODINFOMD_SMAP);
/* Only supports BIOS SMAP for now. */
if (smapbase == NULL)
return;
- child = BUS_ADD_CHILD(parent, 0, driver->name, -1);
+ child = BUS_ADD_CHILD(parent, 0, driver->name, DEVICE_UNIT_ANY);
if (child == NULL)
device_printf(parent, "add %s child failed\n", driver->name);
}
diff --git a/sys/dev/nvme/nvme.c b/sys/dev/nvme/nvme.c
index 84f365024f13..d119f9877aaa 100644
--- a/sys/dev/nvme/nvme.c
+++ b/sys/dev/nvme/nvme.c
@@ -51,7 +51,7 @@ int32_t nvme_retry_count;
MALLOC_DEFINE(M_NVME, "nvme", "nvme(4) memory allocations");
static void
-nvme_init(void)
+nvme_init(void *dummy __unused)
{
uint32_t i;
@@ -62,7 +62,7 @@ nvme_init(void)
SYSINIT(nvme_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, nvme_init, NULL);
static void
-nvme_uninit(void)
+nvme_uninit(void *dummy __unused)
{
}
@@ -295,7 +295,6 @@ nvme_register_consumer(nvme_cons_ns_fn_t ns_fn, nvme_cons_ctrlr_fn_t ctrlr_fn,
void
nvme_unregister_consumer(struct nvme_consumer *consumer)
{
-
consumer->id = INVALID_CONSUMER_ID;
}
diff --git a/sys/dev/nvme/nvme.h b/sys/dev/nvme/nvme.h
index a4baade7df5d..f4ea08f129c0 100644
--- a/sys/dev/nvme/nvme.h
+++ b/sys/dev/nvme/nvme.h
@@ -35,11 +35,17 @@
#include <sys/param.h>
#include <sys/endian.h>
+#ifndef _KERNEL
+#include <stdbool.h>
+#endif
+
+struct sbuf;
#define NVME_PASSTHROUGH_CMD _IOWR('n', 0, struct nvme_pt_command)
#define NVME_RESET_CONTROLLER _IO('n', 1)
#define NVME_GET_NSID _IOR('n', 2, struct nvme_get_nsid)
#define NVME_GET_MAX_XFER_SIZE _IOR('n', 3, uint64_t)
+#define NVME_GET_CONTROLLER_DATA _IOR('n', 4, struct nvme_controller_data)
#define NVME_IO_TEST _IOWR('n', 100, struct nvme_io_test)
#define NVME_BIO_TEST _IOWR('n', 101, struct nvme_io_test)
@@ -648,8 +654,16 @@ enum nvme_critical_warning_state {
NVME_CRIT_WARN_ST_PERSISTENT_MEMORY_REGION = 0x20,
};
#define NVME_CRIT_WARN_ST_RESERVED_MASK (0xC0)
-#define NVME_ASYNC_EVENT_NS_ATTRIBUTE (0x100)
-#define NVME_ASYNC_EVENT_FW_ACTIVATE (0x200)
+#define NVME_ASYNC_EVENT_NS_ATTRIBUTE (1U << 8)
+#define NVME_ASYNC_EVENT_FW_ACTIVATE (1U << 9)
+#define NVME_ASYNC_EVENT_TELEMETRY_LOG (1U << 10)
+#define NVME_ASYNC_EVENT_ASYM_NS_ACC (1U << 11)
+#define NVME_ASYNC_EVENT_PRED_LAT_DELTA (1U << 12)
+#define NVME_ASYNC_EVENT_LBA_STATUS (1U << 13)
+#define NVME_ASYNC_EVENT_ENDURANCE_DELTA (1U << 14)
+#define NVME_ASYNC_EVENT_NVM_SHUTDOWN (1U << 15)
+#define NVME_ASYNC_EVENT_ZONE_DELTA (1U << 27)
+#define NVME_ASYNC_EVENT_DISCOVERY_DELTA (1U << 31)
/* slot for current FW */
#define NVME_FIRMWARE_PAGE_AFI_SLOT_SHIFT (0)
@@ -832,7 +846,7 @@ struct nvme_command {
uint32_t cdw13; /* command-specific */
uint32_t cdw14; /* command-specific */
uint32_t cdw15; /* command-specific */
-};
+} __aligned(8);
_Static_assert(sizeof(struct nvme_command) == 16 * 4, "bad size for nvme_command");
@@ -1493,9 +1507,7 @@ struct nvme_namespace_data {
uint8_t eui64[8];
/** lba format support */
- uint32_t lbaf[16];
-
- uint8_t reserved7[192];
+ uint32_t lbaf[64];
uint8_t vendor_specific[3712];
} __packed __aligned(4);
@@ -1601,7 +1613,7 @@ struct nvme_health_information_page {
uint32_t ttftmt2;
uint8_t reserved2[280];
-} __packed __aligned(4);
+} __packed __aligned(8);
_Static_assert(sizeof(struct nvme_health_information_page) == 512, "bad size for nvme_health_information_page");
@@ -1652,6 +1664,30 @@ struct nvme_device_self_test_page {
_Static_assert(sizeof(struct nvme_device_self_test_page) == 564,
"bad size for nvme_device_self_test_page");
+/*
+ * Header structure for both host initiated telemetry (page 7) and controller
+ * initiated telemetry (page 8).
+ */
+struct nvme_telemetry_log_page {
+ uint8_t identifier;
+ uint8_t rsvd[4];
+ uint8_t oui[3];
+ uint16_t da1_last;
+ uint16_t da2_last;
+ uint16_t da3_last;
+ uint8_t rsvd2[2];
+ uint32_t da4_last;
+ uint8_t rsvd3[361];
+ uint8_t hi_gen;
+ uint8_t ci_avail;
+ uint8_t ci_gen;
+ uint8_t reason[128];
+ /* Blocks of telemetry data follow */
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct nvme_telemetry_log_page) == 512,
+ "bad size for nvme_telemetry_log");
+
struct nvme_discovery_log_entry {
uint8_t trtype;
uint8_t adrfam;
@@ -1868,6 +1904,9 @@ struct nvme_hmb_desc {
#define nvme_completion_is_error(cpl) \
(NVME_STATUS_GET_SC((cpl)->status) != 0 || NVME_STATUS_GET_SCT((cpl)->status) != 0)
+void nvme_cpl_sbuf(const struct nvme_completion *cpl, struct sbuf *sbuf);
+void nvme_opcode_sbuf(bool admin, uint8_t opc, struct sbuf *sb);
+void nvme_sc_sbuf(const struct nvme_completion *cpl, struct sbuf *sbuf);
void nvme_strvis(uint8_t *dst, const uint8_t *src, int dstlen, int srclen);
#ifdef _KERNEL
@@ -1878,6 +1917,7 @@ struct thread;
struct nvme_namespace;
struct nvme_controller;
struct nvme_consumer;
+struct nvme_passthru_cmd;
typedef void (*nvme_cb_fn_t)(void *, const struct nvme_completion *);
@@ -1897,6 +1937,11 @@ int nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
uint32_t nsid, int is_user_buffer,
int is_admin_cmd);
+int nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
+ struct nvme_passthru_cmd *npc,
+ uint32_t nsid, bool is_user,
+ bool is_admin);
+
/* Admin functions */
void nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr,
uint8_t feature, uint32_t cdw11,
@@ -2108,8 +2153,6 @@ static inline
void nvme_namespace_data_swapbytes(struct nvme_namespace_data *s __unused)
{
#if _BYTE_ORDER != _LITTLE_ENDIAN
- int i;
-
s->nsze = le64toh(s->nsze);
s->ncap = le64toh(s->ncap);
s->nuse = le64toh(s->nuse);
@@ -2128,7 +2171,7 @@ void nvme_namespace_data_swapbytes(struct nvme_namespace_data *s __unused)
s->anagrpid = le32toh(s->anagrpid);
s->nvmsetid = le16toh(s->nvmsetid);
s->endgid = le16toh(s->endgid);
- for (i = 0; i < 16; i++)
+ for (unsigned i = 0; i < nitems(s->lbaf); i++)
s->lbaf[i] = le32toh(s->lbaf[i]);
#endif
}
diff --git a/sys/dev/nvme/nvme_ahci.c b/sys/dev/nvme/nvme_ahci.c
index 888207a454f7..b06661226d34 100644
--- a/sys/dev/nvme/nvme_ahci.c
+++ b/sys/dev/nvme/nvme_ahci.c
@@ -124,6 +124,5 @@ bad:
static int
nvme_ahci_detach(device_t dev)
{
-
return (nvme_detach(dev));
}
diff --git a/sys/dev/nvme/nvme_ctrlr.c b/sys/dev/nvme/nvme_ctrlr.c
index b7b03082c54e..3a1894bf754d 100644
--- a/sys/dev/nvme/nvme_ctrlr.c
+++ b/sys/dev/nvme/nvme_ctrlr.c
@@ -39,15 +39,19 @@
#include <sys/uio.h>
#include <sys/sbuf.h>
#include <sys/endian.h>
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_map.h>
#include "nvme_private.h"
+#include "nvme_linux.h"
#define B4_CHK_RDY_DELAY_MS 2300 /* work around controller bug */
static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
- struct nvme_async_event_request *aer);
+ struct nvme_async_event_request *aer);
static void
nvme_ctrlr_barrier(struct nvme_controller *ctrlr, int flags)
@@ -231,7 +235,7 @@ nvme_ctrlr_construct_io_qpairs(struct nvme_controller *ctrlr)
}
static void
-nvme_ctrlr_fail(struct nvme_controller *ctrlr)
+nvme_ctrlr_fail(struct nvme_controller *ctrlr, bool admin_also)
{
int i;
@@ -241,7 +245,10 @@ nvme_ctrlr_fail(struct nvme_controller *ctrlr)
* a different error, though when we fail, that hardly matters).
*/
ctrlr->is_failed = true;
- nvme_qpair_fail(&ctrlr->adminq);
+ if (admin_also) {
+ ctrlr->is_failed_admin = true;
+ nvme_qpair_fail(&ctrlr->adminq);
+ }
if (ctrlr->ioq != NULL) {
for (i = 0; i < ctrlr->num_io_queues; i++) {
nvme_qpair_fail(&ctrlr->ioq[i]);
@@ -414,6 +421,7 @@ nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
TSENTER();
+ ctrlr->is_failed_admin = true;
nvme_ctrlr_disable_qpairs(ctrlr);
err = nvme_ctrlr_disable(ctrlr);
@@ -422,6 +430,8 @@ nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr)
err = nvme_ctrlr_enable(ctrlr);
out:
+ if (err == 0)
+ ctrlr->is_failed_admin = false;
TSEXIT();
return (err);
@@ -434,11 +444,10 @@ nvme_ctrlr_reset(struct nvme_controller *ctrlr)
cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1);
- if (cmpset == 0 || ctrlr->is_failed)
+ if (cmpset == 0)
/*
- * Controller is already resetting or has failed. Return
- * immediately since there is no need to kick off another
- * reset in these cases.
+ * Controller is already resetting. Return immediately since
+ * there is no need to kick off another reset.
*/
return;
@@ -591,7 +600,6 @@ nvme_ctrlr_construct_namespaces(struct nvme_controller *ctrlr)
static bool
is_log_page_id_valid(uint8_t page_id)
{
-
switch (page_id) {
case NVME_LOG_ERROR:
case NVME_LOG_HEALTH_INFORMATION:
@@ -647,7 +655,6 @@ static void
nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
uint8_t state)
{
-
if (state & NVME_CRIT_WARN_ST_AVAILABLE_SPARE)
nvme_printf(ctrlr, "SMART WARNING: available spare space below threshold\n");
@@ -674,96 +681,6 @@ nvme_ctrlr_log_critical_warnings(struct nvme_controller *ctrlr,
}
static void
-nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl)
-{
- struct nvme_async_event_request *aer = arg;
- struct nvme_health_information_page *health_info;
- struct nvme_ns_list *nsl;
- struct nvme_error_information_entry *err;
- int i;
-
- /*
- * If the log page fetch for some reason completed with an error,
- * don't pass log page data to the consumers. In practice, this case
- * should never happen.
- */
- if (nvme_completion_is_error(cpl))
- nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
- aer->log_page_id, NULL, 0);
- else {
- /* Convert data to host endian */
- switch (aer->log_page_id) {
- case NVME_LOG_ERROR:
- err = (struct nvme_error_information_entry *)aer->log_page_buffer;
- for (i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
- nvme_error_information_entry_swapbytes(err++);
- break;
- case NVME_LOG_HEALTH_INFORMATION:
- nvme_health_information_page_swapbytes(
- (struct nvme_health_information_page *)aer->log_page_buffer);
- break;
- case NVME_LOG_CHANGED_NAMESPACE:
- nvme_ns_list_swapbytes(
- (struct nvme_ns_list *)aer->log_page_buffer);
- break;
- case NVME_LOG_COMMAND_EFFECT:
- nvme_command_effects_page_swapbytes(
- (struct nvme_command_effects_page *)aer->log_page_buffer);
- break;
- case NVME_LOG_RES_NOTIFICATION:
- nvme_res_notification_page_swapbytes(
- (struct nvme_res_notification_page *)aer->log_page_buffer);
- break;
- case NVME_LOG_SANITIZE_STATUS:
- nvme_sanitize_status_page_swapbytes(
- (struct nvme_sanitize_status_page *)aer->log_page_buffer);
- break;
- default:
- break;
- }
-
- if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
- health_info = (struct nvme_health_information_page *)
- aer->log_page_buffer;
- nvme_ctrlr_log_critical_warnings(aer->ctrlr,
- health_info->critical_warning);
- /*
- * Critical warnings reported through the
- * SMART/health log page are persistent, so
- * clear the associated bits in the async event
- * config so that we do not receive repeated
- * notifications for the same event.
- */
- aer->ctrlr->async_event_config &=
- ~health_info->critical_warning;
- nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
- aer->ctrlr->async_event_config, NULL, NULL);
- } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE &&
- !nvme_use_nvd) {
- nsl = (struct nvme_ns_list *)aer->log_page_buffer;
- for (i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
- if (nsl->ns[i] > NVME_MAX_NAMESPACES)
- break;
- nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
- }
- }
-
- /*
- * Pass the cpl data from the original async event completion,
- * not the log page fetch.
- */
- nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
- aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
- }
-
- /*
- * Repost another asynchronous event request to replace the one
- * that just completed.
- */
- nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
-}
-
-static void
nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
{
struct nvme_async_event_request *aer = arg;
@@ -778,33 +695,18 @@ nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
return;
}
- /* Associated log page is in bits 23:16 of completion entry dw0. */
+ /*
+ * Save the completion status and associated log page is in bits 23:16
+ * of completion entry dw0. Print a message and queue it for further
+ * processing.
+ */
+ memcpy(&aer->cpl, cpl, sizeof(*cpl));
aer->log_page_id = NVMEV(NVME_ASYNC_EVENT_LOG_PAGE_ID, cpl->cdw0);
-
nvme_printf(aer->ctrlr, "async event occurred (type 0x%x, info 0x%02x,"
" page 0x%02x)\n", NVMEV(NVME_ASYNC_EVENT_TYPE, cpl->cdw0),
NVMEV(NVME_ASYNC_EVENT_INFO, cpl->cdw0),
aer->log_page_id);
-
- if (is_log_page_id_valid(aer->log_page_id)) {
- aer->log_page_size = nvme_ctrlr_get_log_page_size(aer->ctrlr,
- aer->log_page_id);
- memcpy(&aer->cpl, cpl, sizeof(*cpl));
- nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
- NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer,
- aer->log_page_size, nvme_ctrlr_async_event_log_page_cb,
- aer);
- /* Wait to notify consumers until after log page is fetched. */
- } else {
- nvme_notify_async_consumers(aer->ctrlr, cpl, aer->log_page_id,
- NULL, 0);
-
- /*
- * Repost another asynchronous event request to replace the one
- * that just completed.
- */
- nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
- }
+ taskqueue_enqueue(aer->ctrlr->taskqueue, &aer->task);
}
static void
@@ -813,9 +715,21 @@ nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
{
struct nvme_request *req;
+ /*
+ * We're racing the reset thread, so let that process submit this again.
+ * XXX does this really solve that race? And is that race even possible
+ * since we only reset when we've no theard from the card in a long
+ * time. Why would we get an AER in the middle of that just before we
+ * kick off the reset?
+ */
+ if (ctrlr->is_resetting)
+ return;
+
aer->ctrlr = ctrlr;
- req = nvme_allocate_request_null(nvme_ctrlr_async_event_cb, aer);
+ req = nvme_allocate_request_null(M_WAITOK, nvme_ctrlr_async_event_cb,
+ aer);
aer->req = req;
+ aer->log_page_id = 0; /* Not a valid page */
/*
* Disable timeout here, since asynchronous event requests should by
@@ -868,7 +782,6 @@ nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
static void
nvme_ctrlr_configure_int_coalescing(struct nvme_controller *ctrlr)
{
-
ctrlr->int_coal_time = 0;
TUNABLE_INT_FETCH("hw.nvme.int_coal_time",
&ctrlr->int_coal_time);
@@ -1089,7 +1002,7 @@ nvme_ctrlr_start(void *ctrlr_arg, bool resetting)
return;
if (resetting && nvme_ctrlr_identify(ctrlr) != 0) {
- nvme_ctrlr_fail(ctrlr);
+ nvme_ctrlr_fail(ctrlr, false);
return;
}
@@ -1104,7 +1017,7 @@ nvme_ctrlr_start(void *ctrlr_arg, bool resetting)
if (resetting) {
old_num_io_queues = ctrlr->num_io_queues;
if (nvme_ctrlr_set_num_qpairs(ctrlr) != 0) {
- nvme_ctrlr_fail(ctrlr);
+ nvme_ctrlr_fail(ctrlr, false);
return;
}
@@ -1122,12 +1035,12 @@ nvme_ctrlr_start(void *ctrlr_arg, bool resetting)
nvme_ctrlr_hmb_enable(ctrlr, true, true);
if (nvme_ctrlr_create_qpairs(ctrlr) != 0) {
- nvme_ctrlr_fail(ctrlr);
+ nvme_ctrlr_fail(ctrlr, false);
return;
}
if (nvme_ctrlr_construct_namespaces(ctrlr) != 0) {
- nvme_ctrlr_fail(ctrlr);
+ nvme_ctrlr_fail(ctrlr, false);
return;
}
@@ -1146,9 +1059,8 @@ nvme_ctrlr_start_config_hook(void *arg)
TSENTER();
- if (nvme_ctrlr_hw_reset(ctrlr) != 0) {
-fail:
- nvme_ctrlr_fail(ctrlr);
+ if (nvme_ctrlr_hw_reset(ctrlr) != 0 || ctrlr->fail_on_reset != 0) {
+ nvme_ctrlr_fail(ctrlr, true);
config_intrhook_disestablish(&ctrlr->config_hook);
return;
}
@@ -1161,13 +1073,15 @@ fail:
nvme_ctrlr_construct_io_qpairs(ctrlr) == 0)
nvme_ctrlr_start(ctrlr, false);
else
- goto fail;
+ nvme_ctrlr_fail(ctrlr, false);
nvme_sysctl_initialize_ctrlr(ctrlr);
config_intrhook_disestablish(&ctrlr->config_hook);
- ctrlr->is_initialized = 1;
- nvme_notify_new_controller(ctrlr);
+ if (!ctrlr->is_failed) {
+ ctrlr->is_initialized = true;
+ nvme_notify_new_controller(ctrlr);
+ }
TSEXIT();
}
@@ -1184,12 +1098,146 @@ nvme_ctrlr_reset_task(void *arg, int pending)
nvme_ctrlr_start(ctrlr, true);
} else {
nvme_ctrlr_devctl_log(ctrlr, "RESET", "event=\"timed_out\"");
- nvme_ctrlr_fail(ctrlr);
+ nvme_ctrlr_fail(ctrlr, true);
}
atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
}
+static void
+nvme_ctrlr_aer_done(void *arg, const struct nvme_completion *cpl)
+{
+ struct nvme_async_event_request *aer = arg;
+
+ mtx_lock(&aer->mtx);
+ if (nvme_completion_is_error(cpl))
+ aer->log_page_size = (uint32_t)-1;
+ else
+ aer->log_page_size = nvme_ctrlr_get_log_page_size(
+ aer->ctrlr, aer->log_page_id);
+ wakeup(aer);
+ mtx_unlock(&aer->mtx);
+}
+
+static void
+nvme_ctrlr_aer_task(void *arg, int pending)
+{
+ struct nvme_async_event_request *aer = arg;
+ struct nvme_controller *ctrlr = aer->ctrlr;
+ uint32_t len;
+
+ /*
+ * We're resetting, so just punt.
+ */
+ if (ctrlr->is_resetting)
+ return;
+
+ if (!is_log_page_id_valid(aer->log_page_id)) {
+ /*
+ * Repost another asynchronous event request to replace the one
+ * that just completed.
+ */
+ nvme_notify_async_consumers(ctrlr, &aer->cpl, aer->log_page_id,
+ NULL, 0);
+ nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
+ goto out;
+ }
+
+ aer->log_page_size = 0;
+ len = nvme_ctrlr_get_log_page_size(aer->ctrlr, aer->log_page_id);
+ nvme_ctrlr_cmd_get_log_page(aer->ctrlr, aer->log_page_id,
+ NVME_GLOBAL_NAMESPACE_TAG, aer->log_page_buffer, len,
+ nvme_ctrlr_aer_done, aer);
+ mtx_lock(&aer->mtx);
+ while (aer->log_page_size == 0)
+ mtx_sleep(aer, &aer->mtx, PRIBIO, "nvme_pt", 0);
+ mtx_unlock(&aer->mtx);
+
+ if (aer->log_page_size != (uint32_t)-1) {
+ /*
+ * If the log page fetch for some reason completed with an
+ * error, don't pass log page data to the consumers. In
+ * practice, this case should never happen.
+ */
+ nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
+ aer->log_page_id, NULL, 0);
+ goto out;
+ }
+
+ /* Convert data to host endian */
+ switch (aer->log_page_id) {
+ case NVME_LOG_ERROR: {
+ struct nvme_error_information_entry *err =
+ (struct nvme_error_information_entry *)aer->log_page_buffer;
+ for (int i = 0; i < (aer->ctrlr->cdata.elpe + 1); i++)
+ nvme_error_information_entry_swapbytes(err++);
+ break;
+ }
+ case NVME_LOG_HEALTH_INFORMATION:
+ nvme_health_information_page_swapbytes(
+ (struct nvme_health_information_page *)aer->log_page_buffer);
+ break;
+ case NVME_LOG_CHANGED_NAMESPACE:
+ nvme_ns_list_swapbytes(
+ (struct nvme_ns_list *)aer->log_page_buffer);
+ break;
+ case NVME_LOG_COMMAND_EFFECT:
+ nvme_command_effects_page_swapbytes(
+ (struct nvme_command_effects_page *)aer->log_page_buffer);
+ break;
+ case NVME_LOG_RES_NOTIFICATION:
+ nvme_res_notification_page_swapbytes(
+ (struct nvme_res_notification_page *)aer->log_page_buffer);
+ break;
+ case NVME_LOG_SANITIZE_STATUS:
+ nvme_sanitize_status_page_swapbytes(
+ (struct nvme_sanitize_status_page *)aer->log_page_buffer);
+ break;
+ default:
+ break;
+ }
+
+ if (aer->log_page_id == NVME_LOG_HEALTH_INFORMATION) {
+ struct nvme_health_information_page *health_info =
+ (struct nvme_health_information_page *)aer->log_page_buffer;
+
+ /*
+ * Critical warnings reported through the SMART/health log page
+ * are persistent, so clear the associated bits in the async
+ * event config so that we do not receive repeated notifications
+ * for the same event.
+ */
+ nvme_ctrlr_log_critical_warnings(aer->ctrlr,
+ health_info->critical_warning);
+ aer->ctrlr->async_event_config &=
+ ~health_info->critical_warning;
+ nvme_ctrlr_cmd_set_async_event_config(aer->ctrlr,
+ aer->ctrlr->async_event_config, NULL, NULL);
+ } else if (aer->log_page_id == NVME_LOG_CHANGED_NAMESPACE) {
+ struct nvme_ns_list *nsl =
+ (struct nvme_ns_list *)aer->log_page_buffer;
+ for (int i = 0; i < nitems(nsl->ns) && nsl->ns[i] != 0; i++) {
+ if (nsl->ns[i] > NVME_MAX_NAMESPACES)
+ break;
+ nvme_notify_ns(aer->ctrlr, nsl->ns[i]);
+ }
+ }
+
+ /*
+ * Pass the cpl data from the original async event completion, not the
+ * log page fetch.
+ */
+ nvme_notify_async_consumers(aer->ctrlr, &aer->cpl,
+ aer->log_page_id, aer->log_page_buffer, aer->log_page_size);
+
+ /*
+ * Repost another asynchronous event request to replace the one
+ * that just completed.
+ */
+out:
+ nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
+}
+
/*
* Poll all the queues enabled on the device for completion.
*/
@@ -1220,6 +1268,34 @@ nvme_ctrlr_shared_handler(void *arg)
nvme_mmio_write_4(ctrlr, intmc, 1);
}
+#define NVME_MAX_PAGES (int)(1024 / sizeof(vm_page_t))
+
+static int
+nvme_user_ioctl_req(vm_offset_t addr, size_t len, bool is_read,
+ vm_page_t *upages, int max_pages, int *npagesp, struct nvme_request **req,
+ nvme_cb_fn_t cb_fn, void *cb_arg)
+{
+ vm_prot_t prot = VM_PROT_READ;
+ int err;
+
+ if (is_read)
+ prot |= VM_PROT_WRITE; /* Device will write to host memory */
+ err = vm_fault_hold_pages(&curproc->p_vmspace->vm_map,
+ addr, len, prot, upages, max_pages, npagesp);
+ if (err != 0)
+ return (err);
+ *req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
+ (*req)->payload = memdesc_vmpages(upages, len, addr & PAGE_MASK);
+ (*req)->payload_valid = true;
+ return (0);
+}
+
+static void
+nvme_user_ioctl_free(vm_page_t *pages, int npage)
+{
+ vm_page_unhold_pages(pages, npage);
+}
+
static void
nvme_pt_done(void *arg, const struct nvme_completion *cpl)
{
@@ -1242,40 +1318,33 @@ nvme_pt_done(void *arg, const struct nvme_completion *cpl)
int
nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
- struct nvme_pt_command *pt, uint32_t nsid, int is_user_buffer,
+ struct nvme_pt_command *pt, uint32_t nsid, int is_user,
int is_admin_cmd)
{
- struct nvme_request *req;
- struct mtx *mtx;
- struct buf *buf = NULL;
- int ret = 0;
+ struct nvme_request *req;
+ struct mtx *mtx;
+ int ret = 0;
+ int npages = 0;
+ vm_page_t upages[NVME_MAX_PAGES];
if (pt->len > 0) {
if (pt->len > ctrlr->max_xfer_size) {
- nvme_printf(ctrlr, "pt->len (%d) "
- "exceeds max_xfer_size (%d)\n", pt->len,
- ctrlr->max_xfer_size);
- return EIO;
+ nvme_printf(ctrlr,
+ "len (%d) exceeds max_xfer_size (%d)\n",
+ pt->len, ctrlr->max_xfer_size);
+ return (EIO);
}
- if (is_user_buffer) {
- /*
- * Ensure the user buffer is wired for the duration of
- * this pass-through command.
- */
- PHOLD(curproc);
- buf = uma_zalloc(pbuf_zone, M_WAITOK);
- buf->b_iocmd = pt->is_read ? BIO_READ : BIO_WRITE;
- if (vmapbuf(buf, pt->buf, pt->len, 1) < 0) {
- ret = EFAULT;
- goto err;
- }
- req = nvme_allocate_request_vaddr(buf->b_data, pt->len,
+ if (is_user) {
+ ret = nvme_user_ioctl_req((vm_offset_t)pt->buf, pt->len,
+ pt->is_read, upages, nitems(upages), &npages, &req,
nvme_pt_done, pt);
+ if (ret != 0)
+ return (ret);
} else
req = nvme_allocate_request_vaddr(pt->buf, pt->len,
- nvme_pt_done, pt);
+ M_WAITOK, nvme_pt_done, pt);
} else
- req = nvme_allocate_request_null(nvme_pt_done, pt);
+ req = nvme_allocate_request_null(M_WAITOK, nvme_pt_done, pt);
/* Assume user space already converted to little-endian */
req->cmd.opc = pt->cmd.opc;
@@ -1304,12 +1373,92 @@ nvme_ctrlr_passthrough_cmd(struct nvme_controller *ctrlr,
mtx_sleep(pt, mtx, PRIBIO, "nvme_pt", 0);
mtx_unlock(mtx);
- if (buf != NULL) {
- vunmapbuf(buf);
-err:
- uma_zfree(pbuf_zone, buf);
- PRELE(curproc);
- }
+ if (npages > 0)
+ nvme_user_ioctl_free(upages, npages);
+
+ return (ret);
+}
+
+static void
+nvme_npc_done(void *arg, const struct nvme_completion *cpl)
+{
+ struct nvme_passthru_cmd *npc = arg;
+ struct mtx *mtx = (void *)(uintptr_t)npc->metadata;
+
+ npc->result = cpl->cdw0; /* cpl in host order by now */
+ mtx_lock(mtx);
+ npc->metadata = 0;
+ wakeup(npc);
+ mtx_unlock(mtx);
+}
+
+/* XXX refactor? */
+
+int
+nvme_ctrlr_linux_passthru_cmd(struct nvme_controller *ctrlr,
+ struct nvme_passthru_cmd *npc, uint32_t nsid, bool is_user, bool is_admin)
+{
+ struct nvme_request *req;
+ struct mtx *mtx;
+ int ret = 0;
+ int npages = 0;
+ vm_page_t upages[NVME_MAX_PAGES];
+
+ /*
+ * We don't support metadata.
+ */
+ if (npc->metadata != 0 || npc->metadata_len != 0)
+ return (EIO);
+
+ if (npc->data_len > 0 && npc->addr != 0) {
+ if (npc->data_len > ctrlr->max_xfer_size) {
+ nvme_printf(ctrlr,
+ "data_len (%d) exceeds max_xfer_size (%d)\n",
+ npc->data_len, ctrlr->max_xfer_size);
+ return (EIO);
+ }
+ if (is_user) {
+ ret = nvme_user_ioctl_req(npc->addr, npc->data_len,
+ npc->opcode & 0x1, upages, nitems(upages), &npages,
+ &req, nvme_npc_done, npc);
+ if (ret != 0)
+ return (ret);
+ } else
+ req = nvme_allocate_request_vaddr(
+ (void *)(uintptr_t)npc->addr, npc->data_len,
+ M_WAITOK, nvme_npc_done, npc);
+ } else
+ req = nvme_allocate_request_null(M_WAITOK, nvme_npc_done, npc);
+
+ req->cmd.opc = npc->opcode;
+ req->cmd.fuse = npc->flags;
+ req->cmd.rsvd2 = htole32(npc->cdw2);
+ req->cmd.rsvd3 = htole32(npc->cdw3);
+ req->cmd.cdw10 = htole32(npc->cdw10);
+ req->cmd.cdw11 = htole32(npc->cdw11);
+ req->cmd.cdw12 = htole32(npc->cdw12);
+ req->cmd.cdw13 = htole32(npc->cdw13);
+ req->cmd.cdw14 = htole32(npc->cdw14);
+ req->cmd.cdw15 = htole32(npc->cdw15);
+
+ req->cmd.nsid = htole32(nsid);
+
+ mtx = mtx_pool_find(mtxpool_sleep, npc);
+ npc->metadata = (uintptr_t) mtx;
+
+ /* XXX no timeout passed down */
+ if (is_admin)
+ nvme_ctrlr_submit_admin_request(ctrlr, req);
+ else
+ nvme_ctrlr_submit_io_request(ctrlr, req);
+
+ mtx_lock(mtx);
+ while (npc->metadata != 0)
+ mtx_sleep(npc, mtx, PRIBIO, "nvme_npc", 0);
+ mtx_unlock(mtx);
+
+ if (npages > 0)
+ nvme_user_ioctl_free(upages, npages);
return (ret);
}
@@ -1324,6 +1473,7 @@ nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
ctrlr = cdev->si_drv1;
switch (cmd) {
+ case NVME_IOCTL_RESET: /* Linux compat */
case NVME_RESET_CONTROLLER:
nvme_ctrlr_reset(ctrlr);
break;
@@ -1334,15 +1484,30 @@ nvme_ctrlr_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
case NVME_GET_NSID:
{
struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
- strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
+ strlcpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
sizeof(gnsid->cdev));
- gnsid->cdev[sizeof(gnsid->cdev) - 1] = '\0';
gnsid->nsid = 0;
break;
}
case NVME_GET_MAX_XFER_SIZE:
*(uint64_t *)arg = ctrlr->max_xfer_size;
break;
+ case NVME_GET_CONTROLLER_DATA:
+ memcpy(arg, &ctrlr->cdata, sizeof(ctrlr->cdata));
+ break;
+ /* Linux Compatible (see nvme_linux.h) */
+ case NVME_IOCTL_ID:
+ td->td_retval[0] = 0xfffffffful;
+ return (0);
+
+ case NVME_IOCTL_ADMIN_CMD:
+ case NVME_IOCTL_IO_CMD: {
+ struct nvme_passthru_cmd *npc = (struct nvme_passthru_cmd *)arg;
+
+ return (nvme_ctrlr_linux_passthru_cmd(ctrlr, npc, npc->nsid, true,
+ cmd == NVME_IOCTL_ADMIN_CMD));
+ }
+
default:
return (ENOTTY);
}
@@ -1443,6 +1608,8 @@ nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
ctrlr->enable_aborts = 0;
TUNABLE_INT_FETCH("hw.nvme.enable_aborts", &ctrlr->enable_aborts);
+ ctrlr->alignment_splits = counter_u64_alloc(M_WAITOK);
+
/* Cap transfers by the maximum addressable by page-sized PRP (4KB pages -> 2MB). */
ctrlr->max_xfer_size = MIN(maxphys, (ctrlr->page_size / 8 * ctrlr->page_size));
if (nvme_ctrlr_construct_admin_qpair(ctrlr) != 0)
@@ -1451,23 +1618,23 @@ nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
/*
* Create 2 threads for the taskqueue. The reset thread will block when
* it detects that the controller has failed until all I/O has been
- * failed up the stack. The fail_req task needs to be able to run in
- * this case to finish the request failure for some cases.
- *
- * We could partially solve this race by draining the failed requeust
- * queue before proceding to free the sim, though nothing would stop
- * new I/O from coming in after we do that drain, but before we reach
- * cam_sim_free, so this big hammer is used instead.
+ * failed up the stack. The second thread is used for AER events, which
+ * can block, but only briefly for memory and log page fetching.
*/
ctrlr->taskqueue = taskqueue_create("nvme_taskq", M_WAITOK,
taskqueue_thread_enqueue, &ctrlr->taskqueue);
taskqueue_start_threads(&ctrlr->taskqueue, 2, PI_DISK, "nvme taskq");
ctrlr->is_resetting = 0;
- ctrlr->is_initialized = 0;
+ ctrlr->is_initialized = false;
ctrlr->notification_sent = 0;
TASK_INIT(&ctrlr->reset_task, 0, nvme_ctrlr_reset_task, ctrlr);
- STAILQ_INIT(&ctrlr->fail_req);
+ for (int i = 0; i < NVME_MAX_ASYNC_EVENTS; i++) {
+ struct nvme_async_event_request *aer = &ctrlr->aer[i];
+
+ TASK_INIT(&aer->task, 0, nvme_ctrlr_aer_task, aer);
+ mtx_init(&aer->mtx, "AER mutex", NULL, MTX_DEF);
+ }
ctrlr->is_failed = false;
make_dev_args_init(&md_args);
@@ -1477,18 +1644,25 @@ nvme_ctrlr_construct(struct nvme_controller *ctrlr, device_t dev)
md_args.mda_mode = 0600;
md_args.mda_unit = device_get_unit(dev);
md_args.mda_si_drv1 = (void *)ctrlr;
- status = make_dev_s(&md_args, &ctrlr->cdev, "nvme%d",
- device_get_unit(dev));
+ status = make_dev_s(&md_args, &ctrlr->cdev, "%s",
+ device_get_nameunit(dev));
if (status != 0)
return (ENXIO);
return (0);
}
+/*
+ * Called on detach, or on error on attach. The nvme_controller won't be used
+ * again once we return, so we have to tear everything down (so nothing
+ * references this, no callbacks, etc), but don't need to reset all the state
+ * since nvme_controller will be freed soon.
+ */
void
nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
{
- int gone, i;
+ int i;
+ bool gone;
ctrlr->is_dying = true;
@@ -1498,12 +1672,18 @@ nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
goto noadminq;
/*
- * Check whether it is a hot unplug or a clean driver detach.
- * If device is not there any more, skip any shutdown commands.
+ * Check whether it is a hot unplug or a clean driver detach. If device
+ * is not there any more, skip any shutdown commands. Some hotplug
+ * bridges will return zeros instead of ff's when the device is
+ * departing, so ask the bridge if the device is gone. Some systems can
+ * remove the drive w/o the bridge knowing its gone (they don't really
+ * do hotplug), so failsafe with detecting all ff's (impossible with
+ * this hardware) as the device being gone.
*/
- gone = (nvme_mmio_read_4(ctrlr, csts) == NVME_GONE);
+ gone = bus_child_present(dev) == 0 ||
+ (nvme_mmio_read_4(ctrlr, csts) == NVME_GONE);
if (gone)
- nvme_ctrlr_fail(ctrlr);
+ nvme_ctrlr_fail(ctrlr, true);
else
nvme_notify_fail_consumers(ctrlr);
@@ -1529,21 +1709,27 @@ nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
nvme_admin_qpair_destroy(&ctrlr->adminq);
/*
- * Notify the controller of a shutdown, even though this is due to
- * a driver unload, not a system shutdown (this path is not invoked
- * during shutdown). This ensures the controller receives a
- * shutdown notification in case the system is shutdown before
- * reloading the driver.
+ * Notify the controller of a shutdown, even though this is due to a
+ * driver unload, not a system shutdown (this path is not invoked uring
+ * shutdown). This ensures the controller receives a shutdown
+ * notification in case the system is shutdown before reloading the
+ * driver. Some NVMe drives need this to flush their cache to stable
+ * media and consider it a safe shutdown in SMART stats.
*/
- if (!gone)
+ if (!gone) {
nvme_ctrlr_shutdown(ctrlr);
-
- if (!gone)
nvme_ctrlr_disable(ctrlr);
+ }
noadminq:
- if (ctrlr->taskqueue)
+ if (ctrlr->taskqueue) {
taskqueue_free(ctrlr->taskqueue);
+ for (int i = 0; i < NVME_MAX_ASYNC_EVENTS; i++) {
+ struct nvme_async_event_request *aer = &ctrlr->aer[i];
+
+ mtx_destroy(&aer->mtx);
+ }
+ }
if (ctrlr->tag)
bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
@@ -1561,6 +1747,9 @@ noadminq:
ctrlr->resource_id, ctrlr->resource);
nores:
+ if (ctrlr->alignment_splits)
+ counter_u64_free(ctrlr->alignment_splits);
+
mtx_destroy(&ctrlr->lock);
}
@@ -1596,7 +1785,6 @@ void
nvme_ctrlr_submit_admin_request(struct nvme_controller *ctrlr,
struct nvme_request *req)
{
-
nvme_qpair_submit_request(&ctrlr->adminq, req);
}
@@ -1613,14 +1801,12 @@ nvme_ctrlr_submit_io_request(struct nvme_controller *ctrlr,
device_t
nvme_ctrlr_get_device(struct nvme_controller *ctrlr)
{
-
return (ctrlr->dev);
}
const struct nvme_controller_data *
nvme_ctrlr_get_data(struct nvme_controller *ctrlr)
{
-
return (&ctrlr->cdata);
}
@@ -1630,7 +1816,9 @@ nvme_ctrlr_suspend(struct nvme_controller *ctrlr)
int to = hz;
/*
- * Can't touch failed controllers, so it's already suspended.
+ * Can't touch failed controllers, so it's already suspended. User will
+ * need to do an explicit reset to bring it back, if that's even
+ * possible.
*/
if (ctrlr->is_failed)
return (0);
@@ -1671,7 +1859,6 @@ nvme_ctrlr_suspend(struct nvme_controller *ctrlr)
int
nvme_ctrlr_resume(struct nvme_controller *ctrlr)
{
-
/*
* Can't touch failed controllers, so nothing to do to resume.
*/
@@ -1684,7 +1871,8 @@ nvme_ctrlr_resume(struct nvme_controller *ctrlr)
/*
* Now that we've reset the hardware, we can restart the controller. Any
* I/O that was pending is requeued. Any admin commands are aborted with
- * an error. Once we've restarted, take the controller out of reset.
+ * an error. Once we've restarted, stop flagging the controller as being
+ * in the reset phase.
*/
nvme_ctrlr_start(ctrlr, true);
(void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
@@ -1697,7 +1885,7 @@ fail:
* itself, due to questionable APIs.
*/
nvme_printf(ctrlr, "Failed to reset on resume, failing.\n");
- nvme_ctrlr_fail(ctrlr);
+ nvme_ctrlr_fail(ctrlr, true);
(void)atomic_cmpset_32(&ctrlr->is_resetting, 1, 0);
return (0);
}
diff --git a/sys/dev/nvme/nvme_ctrlr_cmd.c b/sys/dev/nvme/nvme_ctrlr_cmd.c
index 68934b9b3947..5a44ed425acb 100644
--- a/sys/dev/nvme/nvme_ctrlr_cmd.c
+++ b/sys/dev/nvme/nvme_ctrlr_cmd.c
@@ -37,7 +37,7 @@ nvme_ctrlr_cmd_identify_controller(struct nvme_controller *ctrlr, void *payload,
struct nvme_command *cmd;
req = nvme_allocate_request_vaddr(payload,
- sizeof(struct nvme_controller_data), cb_fn, cb_arg);
+ sizeof(struct nvme_controller_data), M_WAITOK, cb_fn, cb_arg);
cmd = &req->cmd;
cmd->opc = NVME_OPC_IDENTIFY;
@@ -59,7 +59,7 @@ nvme_ctrlr_cmd_identify_namespace(struct nvme_controller *ctrlr, uint32_t nsid,
struct nvme_command *cmd;
req = nvme_allocate_request_vaddr(payload,
- sizeof(struct nvme_namespace_data), cb_fn, cb_arg);
+ sizeof(struct nvme_namespace_data), M_WAITOK, cb_fn, cb_arg);
cmd = &req->cmd;
cmd->opc = NVME_OPC_IDENTIFY;
@@ -79,7 +79,7 @@ nvme_ctrlr_cmd_create_io_cq(struct nvme_controller *ctrlr,
struct nvme_request *req;
struct nvme_command *cmd;
- req = nvme_allocate_request_null(cb_fn, cb_arg);
+ req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
cmd = &req->cmd;
cmd->opc = NVME_OPC_CREATE_IO_CQ;
@@ -103,7 +103,7 @@ nvme_ctrlr_cmd_create_io_sq(struct nvme_controller *ctrlr,
struct nvme_request *req;
struct nvme_command *cmd;
- req = nvme_allocate_request_null(cb_fn, cb_arg);
+ req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
cmd = &req->cmd;
cmd->opc = NVME_OPC_CREATE_IO_SQ;
@@ -127,7 +127,7 @@ nvme_ctrlr_cmd_delete_io_cq(struct nvme_controller *ctrlr,
struct nvme_request *req;
struct nvme_command *cmd;
- req = nvme_allocate_request_null(cb_fn, cb_arg);
+ req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
cmd = &req->cmd;
cmd->opc = NVME_OPC_DELETE_IO_CQ;
@@ -148,7 +148,7 @@ nvme_ctrlr_cmd_delete_io_sq(struct nvme_controller *ctrlr,
struct nvme_request *req;
struct nvme_command *cmd;
- req = nvme_allocate_request_null(cb_fn, cb_arg);
+ req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
cmd = &req->cmd;
cmd->opc = NVME_OPC_DELETE_IO_SQ;
@@ -171,7 +171,7 @@ nvme_ctrlr_cmd_set_feature(struct nvme_controller *ctrlr, uint8_t feature,
struct nvme_request *req;
struct nvme_command *cmd;
- req = nvme_allocate_request_null(cb_fn, cb_arg);
+ req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
cmd = &req->cmd;
cmd->opc = NVME_OPC_SET_FEATURES;
@@ -193,7 +193,7 @@ nvme_ctrlr_cmd_get_feature(struct nvme_controller *ctrlr, uint8_t feature,
struct nvme_request *req;
struct nvme_command *cmd;
- req = nvme_allocate_request_null(cb_fn, cb_arg);
+ req = nvme_allocate_request_null(M_WAITOK, cb_fn, cb_arg);
cmd = &req->cmd;
cmd->opc = NVME_OPC_GET_FEATURES;
@@ -259,7 +259,12 @@ nvme_ctrlr_cmd_get_log_page(struct nvme_controller *ctrlr, uint8_t log_page,
struct nvme_request *req;
struct nvme_command *cmd;
- req = nvme_allocate_request_vaddr(payload, payload_size, cb_fn, cb_arg);
+ /*
+ * XXX-MJ this should be M_WAITOK but we might be called from AER
+ * completion processing, which is a non-sleepable context.
+ */
+ req = nvme_allocate_request_vaddr(payload, payload_size,
+ M_NOWAIT, cb_fn, cb_arg);
cmd = &req->cmd;
cmd->opc = NVME_OPC_GET_LOG_PAGE;
@@ -276,7 +281,6 @@ nvme_ctrlr_cmd_get_error_page(struct nvme_controller *ctrlr,
struct nvme_error_information_entry *payload, uint32_t num_entries,
nvme_cb_fn_t cb_fn, void *cb_arg)
{
-
KASSERT(num_entries > 0, ("%s called with num_entries==0\n", __func__));
/* Controller's error log page entries is 0-based. */
@@ -297,7 +301,6 @@ nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
uint32_t nsid, struct nvme_health_information_page *payload,
nvme_cb_fn_t cb_fn, void *cb_arg)
{
-
nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_HEALTH_INFORMATION,
nsid, payload, sizeof(*payload), cb_fn, cb_arg);
}
@@ -306,7 +309,6 @@ void
nvme_ctrlr_cmd_get_firmware_page(struct nvme_controller *ctrlr,
struct nvme_firmware_page *payload, nvme_cb_fn_t cb_fn, void *cb_arg)
{
-
nvme_ctrlr_cmd_get_log_page(ctrlr, NVME_LOG_FIRMWARE_SLOT,
NVME_GLOBAL_NAMESPACE_TAG, payload, sizeof(*payload), cb_fn,
cb_arg);
@@ -319,7 +321,11 @@ nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
struct nvme_request *req;
struct nvme_command *cmd;
- req = nvme_allocate_request_null(cb_fn, cb_arg);
+ /*
+ * XXX-MJ this should be M_WAITOK, we do reset from non-sleepable
+ * context and abort commands as part of that.
+ */
+ req = nvme_allocate_request_null(M_NOWAIT, cb_fn, cb_arg);
cmd = &req->cmd;
cmd->opc = NVME_OPC_ABORT;
diff --git a/sys/dev/nvme/nvme_linux.h b/sys/dev/nvme/nvme_linux.h
new file mode 100644
index 000000000000..aaa68e1d34f8
--- /dev/null
+++ b/sys/dev/nvme/nvme_linux.h
@@ -0,0 +1,58 @@
+/*-
+ * Copyright (c) 2024, Netflix Inc.
+ * Written by Warner Losh
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+/*
+ * Linux compatible NVME ioctls. So far we just support ID, ADMIN_CMD and
+ * IO_CMD. The rest are not supported.
+ */
+
+
+#include <sys/ioccom.h>
+#include <sys/_types.h>
+
+struct nvme_passthru_cmd {
+ __uint8_t opcode;
+ __uint8_t flags;
+ __uint16_t rsvd1;
+ __uint32_t nsid;
+ __uint32_t cdw2;
+ __uint32_t cdw3;
+ __uint64_t metadata;
+ __uint64_t addr;
+ __uint32_t metadata_len;
+ __uint32_t data_len;
+ __uint32_t cdw10;
+ __uint32_t cdw11;
+ __uint32_t cdw12;
+ __uint32_t cdw13;
+ __uint32_t cdw14;
+ __uint32_t cdw15;
+ __uint32_t timeout_ms;
+ __uint32_t result;
+};
+
+#define nvme_admin_cmd nvme_passthru_cmd
+
+/*
+ * Linux nvme ioctls, commented out ones are not supported
+ */
+#define NVME_IOCTL_ID _IO('N', 0x40)
+#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd)
+/* #define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io) */
+#define NVME_IOCTL_IO_CMD _IOWR('N', 0x43, struct nvme_passthru_cmd)
+#define NVME_IOCTL_RESET _IO('N', 0x44)
+/* #define NVME_IOCTL_SUBSYS_RESET _IO('N', 0x45) */
+/* #define NVME_IOCTL_RESCAN _IO('N', 0x46) */
+/* #define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64) */
+/* #define NVME_IOCTL_IO64_CMD _IOWR('N', 0x48, struct nvme_passthru_cmd64) */
+/* #define NVME_IOCTL_IO64_CMD_VEC _IOWR('N', 0x49, struct nvme_passthru_cmd64) */
+
+/* io_uring async commands: */
+/* #define NVME_URING_CMD_IO _IOWR('N', 0x80, struct nvme_uring_cmd) */
+/* #define NVME_URING_CMD_IO_VEC _IOWR('N', 0x81, struct nvme_uring_cmd) */
+/* #define NVME_URING_CMD_ADMIN _IOWR('N', 0x82, struct nvme_uring_cmd) */
+/* #define NVME_URING_CMD_ADMIN_VEC _IOWR('N', 0x83, struct nvme_uring_cmd) */
diff --git a/sys/dev/nvme/nvme_ns.c b/sys/dev/nvme/nvme_ns.c
index 360b9f982c20..e84d2066930e 100644
--- a/sys/dev/nvme/nvme_ns.c
+++ b/sys/dev/nvme/nvme_ns.c
@@ -43,6 +43,7 @@
#include <geom/geom.h>
#include "nvme_private.h"
+#include "nvme_linux.h"
static void nvme_bio_child_inbed(struct bio *parent, int bio_error);
static void nvme_bio_child_done(void *arg,
@@ -82,9 +83,8 @@ nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
case NVME_GET_NSID:
{
struct nvme_get_nsid *gnsid = (struct nvme_get_nsid *)arg;
- strncpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
+ strlcpy(gnsid->cdev, device_get_nameunit(ctrlr->dev),
sizeof(gnsid->cdev));
- gnsid->cdev[sizeof(gnsid->cdev) - 1] = '\0';
gnsid->nsid = ns->id;
break;
}
@@ -94,6 +94,18 @@ nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
case DIOCGSECTORSIZE:
*(u_int *)arg = nvme_ns_get_sector_size(ns);
break;
+ /* Linux Compatible (see nvme_linux.h) */
+ case NVME_IOCTL_ID:
+ td->td_retval[0] = ns->id;
+ return (0);
+
+ case NVME_IOCTL_ADMIN_CMD:
+ case NVME_IOCTL_IO_CMD: {
+ struct nvme_passthru_cmd *npc = (struct nvme_passthru_cmd *)arg;
+
+ return (nvme_ctrlr_linux_passthru_cmd(ctrlr, npc, ns->id, true,
+ cmd == NVME_IOCTL_ADMIN_CMD));
+ }
default:
return (ENOTTY);
}
@@ -117,7 +129,6 @@ static int
nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused,
struct thread *td)
{
-
return (0);
}
@@ -219,7 +230,6 @@ nvme_ns_get_model_number(struct nvme_namespace *ns)
const struct nvme_namespace_data *
nvme_ns_get_data(struct nvme_namespace *ns)
{
-
return (&ns->data);
}
@@ -429,6 +439,7 @@ nvme_ns_split_bio(struct nvme_namespace *ns, struct bio *bp,
if (child_bios == NULL)
return (ENOMEM);
+ counter_u64_add(ns->ctrlr->alignment_splits, 1);
for (i = 0; i < num_bios; i++) {
child = child_bios[i];
err = nvme_ns_bio_process(ns, child, nvme_bio_child_done);
@@ -604,11 +615,12 @@ nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
md_args.mda_unit = unit;
md_args.mda_mode = 0600;
md_args.mda_si_drv1 = ns;
- res = make_dev_s(&md_args, &ns->cdev, "nvme%dns%d",
- device_get_unit(ctrlr->dev), ns->id);
+ res = make_dev_s(&md_args, &ns->cdev, "%sn%d",
+ device_get_nameunit(ctrlr->dev), ns->id);
if (res != 0)
return (ENXIO);
-
+ ns->cdev->si_drv2 = make_dev_alias(ns->cdev, "%sns%d",
+ device_get_nameunit(ctrlr->dev), ns->id);
ns->cdev->si_flags |= SI_UNMAPPED;
return (0);
@@ -617,7 +629,9 @@ nvme_ns_construct(struct nvme_namespace *ns, uint32_t id,
void
nvme_ns_destruct(struct nvme_namespace *ns)
{
-
- if (ns->cdev != NULL)
+ if (ns->cdev != NULL) {
+ if (ns->cdev->si_drv2 != NULL)
+ destroy_dev(ns->cdev->si_drv2);
destroy_dev(ns->cdev);
+ }
}
diff --git a/sys/dev/nvme/nvme_ns_cmd.c b/sys/dev/nvme/nvme_ns_cmd.c
index 8cbeac025307..1bad9929cb09 100644
--- a/sys/dev/nvme/nvme_ns_cmd.c
+++ b/sys/dev/nvme/nvme_ns_cmd.c
@@ -36,8 +36,7 @@ nvme_ns_cmd_read(struct nvme_namespace *ns, void *payload, uint64_t lba,
struct nvme_request *req;
req = nvme_allocate_request_vaddr(payload,
- lba_count*nvme_ns_get_sector_size(ns), cb_fn, cb_arg);
-
+ lba_count * nvme_ns_get_sector_size(ns), M_NOWAIT, cb_fn, cb_arg);
if (req == NULL)
return (ENOMEM);
@@ -56,11 +55,9 @@ nvme_ns_cmd_read_bio(struct nvme_namespace *ns, struct bio *bp,
uint64_t lba;
uint64_t lba_count;
- req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);
-
+ req = nvme_allocate_request_bio(bp, M_NOWAIT, cb_fn, cb_arg);
if (req == NULL)
return (ENOMEM);
-
lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
lba_count = bp->bio_bcount / nvme_ns_get_sector_size(ns);
nvme_ns_read_cmd(&req->cmd, ns->id, lba, lba_count);
@@ -77,8 +74,7 @@ nvme_ns_cmd_write(struct nvme_namespace *ns, void *payload, uint64_t lba,
struct nvme_request *req;
req = nvme_allocate_request_vaddr(payload,
- lba_count*nvme_ns_get_sector_size(ns), cb_fn, cb_arg);
-
+ lba_count * nvme_ns_get_sector_size(ns), M_NOWAIT, cb_fn, cb_arg);
if (req == NULL)
return (ENOMEM);
@@ -97,8 +93,7 @@ nvme_ns_cmd_write_bio(struct nvme_namespace *ns, struct bio *bp,
uint64_t lba;
uint64_t lba_count;
- req = nvme_allocate_request_bio(bp, cb_fn, cb_arg);
-
+ req = nvme_allocate_request_bio(bp, M_NOWAIT, cb_fn, cb_arg);
if (req == NULL)
return (ENOMEM);
lba = bp->bio_offset / nvme_ns_get_sector_size(ns);
@@ -118,8 +113,8 @@ nvme_ns_cmd_deallocate(struct nvme_namespace *ns, void *payload,
struct nvme_command *cmd;
req = nvme_allocate_request_vaddr(payload,
- num_ranges * sizeof(struct nvme_dsm_range), cb_fn, cb_arg);
-
+ num_ranges * sizeof(struct nvme_dsm_range), M_NOWAIT, cb_fn,
+ cb_arg);
if (req == NULL)
return (ENOMEM);
@@ -141,8 +136,7 @@ nvme_ns_cmd_flush(struct nvme_namespace *ns, nvme_cb_fn_t cb_fn, void *cb_arg)
{
struct nvme_request *req;
- req = nvme_allocate_request_null(cb_fn, cb_arg);
-
+ req = nvme_allocate_request_null(M_NOWAIT, cb_fn, cb_arg);
if (req == NULL)
return (ENOMEM);
@@ -165,8 +159,8 @@ nvme_ns_dump(struct nvme_namespace *ns, void *virt, off_t offset, size_t len)
int i;
status.done = FALSE;
- req = nvme_allocate_request_vaddr(virt, len, nvme_completion_poll_cb,
- &status);
+ req = nvme_allocate_request_vaddr(virt, len, M_NOWAIT,
+ nvme_completion_poll_cb, &status);
if (req == NULL)
return (ENOMEM);
diff --git a/sys/dev/nvme/nvme_pci.c b/sys/dev/nvme/nvme_pci.c
index 29b49b7df403..c07a68d2f0dc 100644
--- a/sys/dev/nvme/nvme_pci.c
+++ b/sys/dev/nvme/nvme_pci.c
@@ -151,7 +151,6 @@ nvme_pci_probe (device_t device)
static int
nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr)
{
-
ctrlr->resource_id = PCIR_BAR(0);
ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
diff --git a/sys/dev/nvme/nvme_private.h b/sys/dev/nvme/nvme_private.h
index 69141add4e48..52e9fcbbebcd 100644
--- a/sys/dev/nvme/nvme_private.h
+++ b/sys/dev/nvme/nvme_private.h
@@ -32,6 +32,7 @@
#include <sys/param.h>
#include <sys/bio.h>
#include <sys/bus.h>
+#include <sys/counter.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
@@ -122,6 +123,8 @@ struct nvme_request {
struct nvme_async_event_request {
struct nvme_controller *ctrlr;
struct nvme_request *req;
+ struct task task;
+ struct mtx mtx;
struct nvme_completion cpl;
uint32_t log_page_id;
uint32_t log_page_size;
@@ -297,12 +300,14 @@ struct nvme_controller {
void *cons_cookie[NVME_MAX_CONSUMERS];
uint32_t is_resetting;
- uint32_t is_initialized;
uint32_t notification_sent;
+ u_int fail_on_reset;
bool is_failed;
+ bool is_failed_admin;
bool is_dying;
- STAILQ_HEAD(, nvme_request) fail_req;
+ bool isr_warned;
+ bool is_initialized;
/* Host Memory Buffer */
int hmb_nchunks;
@@ -317,6 +322,9 @@ struct nvme_controller {
bus_dmamap_t hmb_desc_map;
struct nvme_hmb_desc *hmb_desc_vaddr;
uint64_t hmb_desc_paddr;
+
+ /* Statistics */
+ counter_u64_t alignment_splits;
};
#define nvme_mmio_offsetof(reg) \
@@ -413,9 +421,6 @@ void nvme_qpair_submit_request(struct nvme_qpair *qpair,
struct nvme_request *req);
void nvme_qpair_reset(struct nvme_qpair *qpair);
void nvme_qpair_fail(struct nvme_qpair *qpair);
-void nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
- struct nvme_request *req,
- uint32_t sct, uint32_t sc);
void nvme_admin_qpair_enable(struct nvme_qpair *qpair);
void nvme_admin_qpair_disable(struct nvme_qpair *qpair);
@@ -454,18 +459,17 @@ int nvme_detach(device_t dev);
* vast majority of these without waiting for a tick plus scheduling delays. Since
* these are on startup, this drastically reduces startup time.
*/
-static __inline
-void
+static __inline void
nvme_completion_poll(struct nvme_completion_poll_status *status)
{
int timeout = ticks + 10 * hz;
- sbintime_t delta_t = SBT_1US;
+ sbintime_t delta = SBT_1US;
while (!atomic_load_acq_int(&status->done)) {
if (timeout - ticks < 0)
panic("NVME polled command failed to complete within 10s.");
- pause_sbt("nvme", delta_t, 0, C_PREL(1));
- delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ pause_sbt("nvme", delta, 0, C_PREL(1));
+ delta = min(SBT_1MS, delta + delta / 2);
}
}
@@ -481,11 +485,14 @@ nvme_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
}
static __inline struct nvme_request *
-_nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
+_nvme_allocate_request(const int how, nvme_cb_fn_t cb_fn, void *cb_arg)
{
struct nvme_request *req;
- req = malloc(sizeof(*req), M_NVME, M_NOWAIT | M_ZERO);
+ KASSERT(how == M_WAITOK || how == M_NOWAIT,
+ ("nvme_allocate_request: invalid how %d", how));
+
+ req = malloc(sizeof(*req), M_NVME, how | M_ZERO);
if (req != NULL) {
req->cb_fn = cb_fn;
req->cb_arg = cb_arg;
@@ -496,11 +503,11 @@ _nvme_allocate_request(nvme_cb_fn_t cb_fn, void *cb_arg)
static __inline struct nvme_request *
nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
- nvme_cb_fn_t cb_fn, void *cb_arg)
+ const int how, nvme_cb_fn_t cb_fn, void *cb_arg)
{
struct nvme_request *req;
- req = _nvme_allocate_request(cb_fn, cb_arg);
+ req = _nvme_allocate_request(how, cb_fn, cb_arg);
if (req != NULL) {
req->payload = memdesc_vaddr(payload, payload_size);
req->payload_valid = true;
@@ -509,20 +516,21 @@ nvme_allocate_request_vaddr(void *payload, uint32_t payload_size,
}
static __inline struct nvme_request *
-nvme_allocate_request_null(nvme_cb_fn_t cb_fn, void *cb_arg)
+nvme_allocate_request_null(const int how, nvme_cb_fn_t cb_fn, void *cb_arg)
{
struct nvme_request *req;
- req = _nvme_allocate_request(cb_fn, cb_arg);
+ req = _nvme_allocate_request(how, cb_fn, cb_arg);
return (req);
}
static __inline struct nvme_request *
-nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
+nvme_allocate_request_bio(struct bio *bio, const int how, nvme_cb_fn_t cb_fn,
+ void *cb_arg)
{
struct nvme_request *req;
- req = _nvme_allocate_request(cb_fn, cb_arg);
+ req = _nvme_allocate_request(how, cb_fn, cb_arg);
if (req != NULL) {
req->payload = memdesc_bio(bio);
req->payload_valid = true;
@@ -531,16 +539,16 @@ nvme_allocate_request_bio(struct bio *bio, nvme_cb_fn_t cb_fn, void *cb_arg)
}
static __inline struct nvme_request *
-nvme_allocate_request_ccb(union ccb *ccb, nvme_cb_fn_t cb_fn, void *cb_arg)
+nvme_allocate_request_ccb(union ccb *ccb, const int how, nvme_cb_fn_t cb_fn,
+ void *cb_arg)
{
struct nvme_request *req;
- req = _nvme_allocate_request(cb_fn, cb_arg);
+ req = _nvme_allocate_request(how, cb_fn, cb_arg);
if (req != NULL) {
req->payload = memdesc_ccb(ccb);
req->payload_valid = true;
}
-
return (req);
}
diff --git a/sys/dev/nvme/nvme_qpair.c b/sys/dev/nvme/nvme_qpair.c
index 62d27e439180..4f2c44da3b4f 100644
--- a/sys/dev/nvme/nvme_qpair.c
+++ b/sys/dev/nvme/nvme_qpair.c
@@ -31,6 +31,7 @@
#include <sys/conf.h>
#include <sys/domainset.h>
#include <sys/proc.h>
+#include <sys/sbuf.h>
#include <dev/pci/pcivar.h>
@@ -43,96 +44,36 @@ static void _nvme_qpair_submit_request(struct nvme_qpair *qpair,
struct nvme_request *req);
static void nvme_qpair_destroy(struct nvme_qpair *qpair);
-#define DEFAULT_INDEX 256
-#define DEFAULT_ENTRY(x) [DEFAULT_INDEX] = x
-#define OPC_ENTRY(x) [NVME_OPC_ ## x] = #x
-
-static const char *admin_opcode[DEFAULT_INDEX + 1] = {
- OPC_ENTRY(DELETE_IO_SQ),
- OPC_ENTRY(CREATE_IO_SQ),
- OPC_ENTRY(GET_LOG_PAGE),
- OPC_ENTRY(DELETE_IO_CQ),
- OPC_ENTRY(CREATE_IO_CQ),
- OPC_ENTRY(IDENTIFY),
- OPC_ENTRY(ABORT),
- OPC_ENTRY(SET_FEATURES),
- OPC_ENTRY(GET_FEATURES),
- OPC_ENTRY(ASYNC_EVENT_REQUEST),
- OPC_ENTRY(NAMESPACE_MANAGEMENT),
- OPC_ENTRY(FIRMWARE_ACTIVATE),
- OPC_ENTRY(FIRMWARE_IMAGE_DOWNLOAD),
- OPC_ENTRY(DEVICE_SELF_TEST),
- OPC_ENTRY(NAMESPACE_ATTACHMENT),
- OPC_ENTRY(KEEP_ALIVE),
- OPC_ENTRY(DIRECTIVE_SEND),
- OPC_ENTRY(DIRECTIVE_RECEIVE),
- OPC_ENTRY(VIRTUALIZATION_MANAGEMENT),
- OPC_ENTRY(NVME_MI_SEND),
- OPC_ENTRY(NVME_MI_RECEIVE),
- OPC_ENTRY(CAPACITY_MANAGEMENT),
- OPC_ENTRY(LOCKDOWN),
- OPC_ENTRY(DOORBELL_BUFFER_CONFIG),
- OPC_ENTRY(FABRICS_COMMANDS),
- OPC_ENTRY(FORMAT_NVM),
- OPC_ENTRY(SECURITY_SEND),
- OPC_ENTRY(SECURITY_RECEIVE),
- OPC_ENTRY(SANITIZE),
- OPC_ENTRY(GET_LBA_STATUS),
- DEFAULT_ENTRY("ADMIN COMMAND"),
-};
-
-static const char *io_opcode[DEFAULT_INDEX + 1] = {
- OPC_ENTRY(FLUSH),
- OPC_ENTRY(WRITE),
- OPC_ENTRY(READ),
- OPC_ENTRY(WRITE_UNCORRECTABLE),
- OPC_ENTRY(COMPARE),
- OPC_ENTRY(WRITE_ZEROES),
- OPC_ENTRY(DATASET_MANAGEMENT),
- OPC_ENTRY(VERIFY),
- OPC_ENTRY(RESERVATION_REGISTER),
- OPC_ENTRY(RESERVATION_REPORT),
- OPC_ENTRY(RESERVATION_ACQUIRE),
- OPC_ENTRY(RESERVATION_RELEASE),
- OPC_ENTRY(COPY),
- DEFAULT_ENTRY("IO COMMAND"),
-};
-
-static const char *
-get_opcode_string(const char *op[DEFAULT_INDEX + 1], uint16_t opc)
-{
- const char *nm = opc < DEFAULT_INDEX ? op[opc] : op[DEFAULT_INDEX];
-
- return (nm != NULL ? nm : op[DEFAULT_INDEX]);
-}
-
static const char *
-get_admin_opcode_string(uint16_t opc)
+get_opcode_string(bool admin, uint8_t opc, char *buf, size_t len)
{
- return (get_opcode_string(admin_opcode, opc));
-}
+ struct sbuf sb;
-static const char *
-get_io_opcode_string(uint16_t opc)
-{
- return (get_opcode_string(io_opcode, opc));
+ sbuf_new(&sb, buf, len, SBUF_FIXEDLEN);
+ nvme_opcode_sbuf(admin, opc, &sb);
+ if (sbuf_finish(&sb) != 0)
+ return ("");
+ return (buf);
}
static void
nvme_admin_qpair_print_command(struct nvme_qpair *qpair,
struct nvme_command *cmd)
{
+ char buf[64];
- nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%x "
+ nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%x "
"cdw10:%08x cdw11:%08x\n",
- get_admin_opcode_string(cmd->opc), cmd->opc, qpair->id, cmd->cid,
- le32toh(cmd->nsid), le32toh(cmd->cdw10), le32toh(cmd->cdw11));
+ get_opcode_string(true, cmd->opc, buf, sizeof(buf)), qpair->id,
+ cmd->cid, le32toh(cmd->nsid), le32toh(cmd->cdw10),
+ le32toh(cmd->cdw11));
}
static void
nvme_io_qpair_print_command(struct nvme_qpair *qpair,
struct nvme_command *cmd)
{
+ char buf[64];
switch (cmd->opc) {
case NVME_OPC_WRITE:
@@ -143,23 +84,15 @@ nvme_io_qpair_print_command(struct nvme_qpair *qpair,
case NVME_OPC_VERIFY:
nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d "
"lba:%llu len:%d\n",
- get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid),
+ get_opcode_string(false, cmd->opc, buf, sizeof(buf)),
+ qpair->id, cmd->cid, le32toh(cmd->nsid),
((unsigned long long)le32toh(cmd->cdw11) << 32) + le32toh(cmd->cdw10),
(le32toh(cmd->cdw12) & 0xFFFF) + 1);
break;
- case NVME_OPC_FLUSH:
- case NVME_OPC_DATASET_MANAGEMENT:
- case NVME_OPC_RESERVATION_REGISTER:
- case NVME_OPC_RESERVATION_REPORT:
- case NVME_OPC_RESERVATION_ACQUIRE:
- case NVME_OPC_RESERVATION_RELEASE:
- nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n",
- get_io_opcode_string(cmd->opc), qpair->id, cmd->cid, le32toh(cmd->nsid));
- break;
default:
- nvme_printf(qpair->ctrlr, "%s (%02x) sqid:%d cid:%d nsid:%d\n",
- get_io_opcode_string(cmd->opc), cmd->opc, qpair->id,
- cmd->cid, le32toh(cmd->nsid));
+ nvme_printf(qpair->ctrlr, "%s sqid:%d cid:%d nsid:%d\n",
+ get_opcode_string(false, cmd->opc, buf, sizeof(buf)),
+ qpair->id, cmd->cid, le32toh(cmd->nsid));
break;
}
}
@@ -183,170 +116,33 @@ nvme_qpair_print_command(struct nvme_qpair *qpair, struct nvme_command *cmd)
}
}
-struct nvme_status_string {
- uint16_t sc;
- const char * str;
-};
-
-static struct nvme_status_string generic_status[] = {
- { NVME_SC_SUCCESS, "SUCCESS" },
- { NVME_SC_INVALID_OPCODE, "INVALID OPCODE" },
- { NVME_SC_INVALID_FIELD, "INVALID_FIELD" },
- { NVME_SC_COMMAND_ID_CONFLICT, "COMMAND ID CONFLICT" },
- { NVME_SC_DATA_TRANSFER_ERROR, "DATA TRANSFER ERROR" },
- { NVME_SC_ABORTED_POWER_LOSS, "ABORTED - POWER LOSS" },
- { NVME_SC_INTERNAL_DEVICE_ERROR, "INTERNAL DEVICE ERROR" },
- { NVME_SC_ABORTED_BY_REQUEST, "ABORTED - BY REQUEST" },
- { NVME_SC_ABORTED_SQ_DELETION, "ABORTED - SQ DELETION" },
- { NVME_SC_ABORTED_FAILED_FUSED, "ABORTED - FAILED FUSED" },
- { NVME_SC_ABORTED_MISSING_FUSED, "ABORTED - MISSING FUSED" },
- { NVME_SC_INVALID_NAMESPACE_OR_FORMAT, "INVALID NAMESPACE OR FORMAT" },
- { NVME_SC_COMMAND_SEQUENCE_ERROR, "COMMAND SEQUENCE ERROR" },
- { NVME_SC_INVALID_SGL_SEGMENT_DESCR, "INVALID SGL SEGMENT DESCRIPTOR" },
- { NVME_SC_INVALID_NUMBER_OF_SGL_DESCR, "INVALID NUMBER OF SGL DESCRIPTORS" },
- { NVME_SC_DATA_SGL_LENGTH_INVALID, "DATA SGL LENGTH INVALID" },
- { NVME_SC_METADATA_SGL_LENGTH_INVALID, "METADATA SGL LENGTH INVALID" },
- { NVME_SC_SGL_DESCRIPTOR_TYPE_INVALID, "SGL DESCRIPTOR TYPE INVALID" },
- { NVME_SC_INVALID_USE_OF_CMB, "INVALID USE OF CONTROLLER MEMORY BUFFER" },
- { NVME_SC_PRP_OFFET_INVALID, "PRP OFFET INVALID" },
- { NVME_SC_ATOMIC_WRITE_UNIT_EXCEEDED, "ATOMIC WRITE UNIT EXCEEDED" },
- { NVME_SC_OPERATION_DENIED, "OPERATION DENIED" },
- { NVME_SC_SGL_OFFSET_INVALID, "SGL OFFSET INVALID" },
- { NVME_SC_HOST_ID_INCONSISTENT_FORMAT, "HOST IDENTIFIER INCONSISTENT FORMAT" },
- { NVME_SC_KEEP_ALIVE_TIMEOUT_EXPIRED, "KEEP ALIVE TIMEOUT EXPIRED" },
- { NVME_SC_KEEP_ALIVE_TIMEOUT_INVALID, "KEEP ALIVE TIMEOUT INVALID" },
- { NVME_SC_ABORTED_DUE_TO_PREEMPT, "COMMAND ABORTED DUE TO PREEMPT AND ABORT" },
- { NVME_SC_SANITIZE_FAILED, "SANITIZE FAILED" },
- { NVME_SC_SANITIZE_IN_PROGRESS, "SANITIZE IN PROGRESS" },
- { NVME_SC_SGL_DATA_BLOCK_GRAN_INVALID, "SGL_DATA_BLOCK_GRANULARITY_INVALID" },
- { NVME_SC_NOT_SUPPORTED_IN_CMB, "COMMAND NOT SUPPORTED FOR QUEUE IN CMB" },
- { NVME_SC_NAMESPACE_IS_WRITE_PROTECTED, "NAMESPACE IS WRITE PROTECTED" },
- { NVME_SC_COMMAND_INTERRUPTED, "COMMAND INTERRUPTED" },
- { NVME_SC_TRANSIENT_TRANSPORT_ERROR, "TRANSIENT TRANSPORT ERROR" },
-
- { NVME_SC_LBA_OUT_OF_RANGE, "LBA OUT OF RANGE" },
- { NVME_SC_CAPACITY_EXCEEDED, "CAPACITY EXCEEDED" },
- { NVME_SC_NAMESPACE_NOT_READY, "NAMESPACE NOT READY" },
- { NVME_SC_RESERVATION_CONFLICT, "RESERVATION CONFLICT" },
- { NVME_SC_FORMAT_IN_PROGRESS, "FORMAT IN PROGRESS" },
- { 0xFFFF, "GENERIC" }
-};
-
-static struct nvme_status_string command_specific_status[] = {
- { NVME_SC_COMPLETION_QUEUE_INVALID, "INVALID COMPLETION QUEUE" },
- { NVME_SC_INVALID_QUEUE_IDENTIFIER, "INVALID QUEUE IDENTIFIER" },
- { NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED, "MAX QUEUE SIZE EXCEEDED" },
- { NVME_SC_ABORT_COMMAND_LIMIT_EXCEEDED, "ABORT CMD LIMIT EXCEEDED" },
- { NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED, "ASYNC LIMIT EXCEEDED" },
- { NVME_SC_INVALID_FIRMWARE_SLOT, "INVALID FIRMWARE SLOT" },
- { NVME_SC_INVALID_FIRMWARE_IMAGE, "INVALID FIRMWARE IMAGE" },
- { NVME_SC_INVALID_INTERRUPT_VECTOR, "INVALID INTERRUPT VECTOR" },
- { NVME_SC_INVALID_LOG_PAGE, "INVALID LOG PAGE" },
- { NVME_SC_INVALID_FORMAT, "INVALID FORMAT" },
- { NVME_SC_FIRMWARE_REQUIRES_RESET, "FIRMWARE REQUIRES RESET" },
- { NVME_SC_INVALID_QUEUE_DELETION, "INVALID QUEUE DELETION" },
- { NVME_SC_FEATURE_NOT_SAVEABLE, "FEATURE IDENTIFIER NOT SAVEABLE" },
- { NVME_SC_FEATURE_NOT_CHANGEABLE, "FEATURE NOT CHANGEABLE" },
- { NVME_SC_FEATURE_NOT_NS_SPECIFIC, "FEATURE NOT NAMESPACE SPECIFIC" },
- { NVME_SC_FW_ACT_REQUIRES_NVMS_RESET, "FIRMWARE ACTIVATION REQUIRES NVM SUBSYSTEM RESET" },
- { NVME_SC_FW_ACT_REQUIRES_RESET, "FIRMWARE ACTIVATION REQUIRES RESET" },
- { NVME_SC_FW_ACT_REQUIRES_TIME, "FIRMWARE ACTIVATION REQUIRES MAXIMUM TIME VIOLATION" },
- { NVME_SC_FW_ACT_PROHIBITED, "FIRMWARE ACTIVATION PROHIBITED" },
- { NVME_SC_OVERLAPPING_RANGE, "OVERLAPPING RANGE" },
- { NVME_SC_NS_INSUFFICIENT_CAPACITY, "NAMESPACE INSUFFICIENT CAPACITY" },
- { NVME_SC_NS_ID_UNAVAILABLE, "NAMESPACE IDENTIFIER UNAVAILABLE" },
- { NVME_SC_NS_ALREADY_ATTACHED, "NAMESPACE ALREADY ATTACHED" },
- { NVME_SC_NS_IS_PRIVATE, "NAMESPACE IS PRIVATE" },
- { NVME_SC_NS_NOT_ATTACHED, "NS NOT ATTACHED" },
- { NVME_SC_THIN_PROV_NOT_SUPPORTED, "THIN PROVISIONING NOT SUPPORTED" },
- { NVME_SC_CTRLR_LIST_INVALID, "CONTROLLER LIST INVALID" },
- { NVME_SC_SELF_TEST_IN_PROGRESS, "DEVICE SELF-TEST IN PROGRESS" },
- { NVME_SC_BOOT_PART_WRITE_PROHIB, "BOOT PARTITION WRITE PROHIBITED" },
- { NVME_SC_INVALID_CTRLR_ID, "INVALID CONTROLLER IDENTIFIER" },
- { NVME_SC_INVALID_SEC_CTRLR_STATE, "INVALID SECONDARY CONTROLLER STATE" },
- { NVME_SC_INVALID_NUM_OF_CTRLR_RESRC, "INVALID NUMBER OF CONTROLLER RESOURCES" },
- { NVME_SC_INVALID_RESOURCE_ID, "INVALID RESOURCE IDENTIFIER" },
- { NVME_SC_SANITIZE_PROHIBITED_WPMRE, "SANITIZE PROHIBITED WRITE PERSISTENT MEMORY REGION ENABLED" },
- { NVME_SC_ANA_GROUP_ID_INVALID, "ANA GROUP IDENTIFIED INVALID" },
- { NVME_SC_ANA_ATTACH_FAILED, "ANA ATTACH FAILED" },
-
- { NVME_SC_CONFLICTING_ATTRIBUTES, "CONFLICTING ATTRIBUTES" },
- { NVME_SC_INVALID_PROTECTION_INFO, "INVALID PROTECTION INFO" },
- { NVME_SC_ATTEMPTED_WRITE_TO_RO_PAGE, "WRITE TO RO PAGE" },
- { 0xFFFF, "COMMAND SPECIFIC" }
-};
-
-static struct nvme_status_string media_error_status[] = {
- { NVME_SC_WRITE_FAULTS, "WRITE FAULTS" },
- { NVME_SC_UNRECOVERED_READ_ERROR, "UNRECOVERED READ ERROR" },
- { NVME_SC_GUARD_CHECK_ERROR, "GUARD CHECK ERROR" },
- { NVME_SC_APPLICATION_TAG_CHECK_ERROR, "APPLICATION TAG CHECK ERROR" },
- { NVME_SC_REFERENCE_TAG_CHECK_ERROR, "REFERENCE TAG CHECK ERROR" },
- { NVME_SC_COMPARE_FAILURE, "COMPARE FAILURE" },
- { NVME_SC_ACCESS_DENIED, "ACCESS DENIED" },
- { NVME_SC_DEALLOCATED_OR_UNWRITTEN, "DEALLOCATED OR UNWRITTEN LOGICAL BLOCK" },
- { 0xFFFF, "MEDIA ERROR" }
-};
-
-static struct nvme_status_string path_related_status[] = {
- { NVME_SC_INTERNAL_PATH_ERROR, "INTERNAL PATH ERROR" },
- { NVME_SC_ASYMMETRIC_ACCESS_PERSISTENT_LOSS, "ASYMMETRIC ACCESS PERSISTENT LOSS" },
- { NVME_SC_ASYMMETRIC_ACCESS_INACCESSIBLE, "ASYMMETRIC ACCESS INACCESSIBLE" },
- { NVME_SC_ASYMMETRIC_ACCESS_TRANSITION, "ASYMMETRIC ACCESS TRANSITION" },
- { NVME_SC_CONTROLLER_PATHING_ERROR, "CONTROLLER PATHING ERROR" },
- { NVME_SC_HOST_PATHING_ERROR, "HOST PATHING ERROR" },
- { NVME_SC_COMMAND_ABORTED_BY_HOST, "COMMAND ABORTED BY HOST" },
- { 0xFFFF, "PATH RELATED" },
-};
-
static const char *
-get_status_string(uint16_t sct, uint16_t sc)
+get_status_string(const struct nvme_completion *cpl, char *buf, size_t len)
{
- struct nvme_status_string *entry;
-
- switch (sct) {
- case NVME_SCT_GENERIC:
- entry = generic_status;
- break;
- case NVME_SCT_COMMAND_SPECIFIC:
- entry = command_specific_status;
- break;
- case NVME_SCT_MEDIA_ERROR:
- entry = media_error_status;
- break;
- case NVME_SCT_PATH_RELATED:
- entry = path_related_status;
- break;
- case NVME_SCT_VENDOR_SPECIFIC:
- return ("VENDOR SPECIFIC");
- default:
- return ("RESERVED");
- }
+ struct sbuf sb;
- while (entry->sc != 0xFFFF) {
- if (entry->sc == sc)
- return (entry->str);
- entry++;
- }
- return (entry->str);
+ sbuf_new(&sb, buf, len, SBUF_FIXEDLEN);
+ nvme_sc_sbuf(cpl, &sb);
+ if (sbuf_finish(&sb) != 0)
+ return ("");
+ return (buf);
}
void
nvme_qpair_print_completion(struct nvme_qpair *qpair,
struct nvme_completion *cpl)
{
- uint8_t sct, sc, crd, m, dnr, p;
+ char buf[64];
+ uint8_t crd, m, dnr, p;
- sct = NVME_STATUS_GET_SCT(cpl->status);
- sc = NVME_STATUS_GET_SC(cpl->status);
crd = NVME_STATUS_GET_CRD(cpl->status);
m = NVME_STATUS_GET_M(cpl->status);
dnr = NVME_STATUS_GET_DNR(cpl->status);
p = NVME_STATUS_GET_P(cpl->status);
- nvme_printf(qpair->ctrlr, "%s (%02x/%02x) crd:%x m:%x dnr:%x p:%d "
+ nvme_printf(qpair->ctrlr, "%s crd:%x m:%x dnr:%x p:%d "
"sqid:%d cid:%d cdw0:%x\n",
- get_status_string(sct, sc), sct, sc, crd, m, dnr, p,
+ get_status_string(cpl, buf, sizeof(buf)), crd, m, dnr, p,
cpl->sqid, cpl->cid, cpl->cdw0);
}
@@ -414,10 +210,12 @@ static void
nvme_qpair_complete_tracker(struct nvme_tracker *tr,
struct nvme_completion *cpl, error_print_t print_on_error)
{
- struct nvme_qpair * qpair = tr->qpair;
+ struct nvme_qpair *qpair = tr->qpair;
struct nvme_request *req;
bool retry, error, retriable;
+ mtx_assert(&qpair->lock, MA_NOTOWNED);
+
req = tr->req;
error = nvme_completion_is_error(cpl);
retriable = nvme_completion_is_retry(cpl);
@@ -480,43 +278,52 @@ nvme_qpair_complete_tracker(struct nvme_tracker *tr,
mtx_unlock(&qpair->lock);
}
+static uint32_t
+nvme_qpair_make_status(uint32_t sct, uint32_t sc, uint32_t dnr)
+{
+ uint32_t status = 0;
+
+ status |= NVMEF(NVME_STATUS_SCT, sct);
+ status |= NVMEF(NVME_STATUS_SC, sc);
+ status |= NVMEF(NVME_STATUS_DNR, dnr);
+ /* M=0 : this is artificial so no data in error log page */
+ /* CRD=0 : this is artificial and no delayed retry support anyway */
+ /* P=0 : phase not checked */
+ return (status);
+}
+
static void
nvme_qpair_manual_complete_tracker(
struct nvme_tracker *tr, uint32_t sct, uint32_t sc, uint32_t dnr,
error_print_t print_on_error)
{
struct nvme_completion cpl;
+ struct nvme_qpair * qpair = tr->qpair;
- memset(&cpl, 0, sizeof(cpl));
+ mtx_assert(&qpair->lock, MA_NOTOWNED);
- struct nvme_qpair * qpair = tr->qpair;
+ memset(&cpl, 0, sizeof(cpl));
cpl.sqid = qpair->id;
cpl.cid = tr->cid;
- cpl.status |= NVMEF(NVME_STATUS_SCT, sct);
- cpl.status |= NVMEF(NVME_STATUS_SC, sc);
- cpl.status |= NVMEF(NVME_STATUS_DNR, dnr);
- /* M=0 : this is artificial so no data in error log page */
- /* CRD=0 : this is artificial and no delayed retry support anyway */
- /* P=0 : phase not checked */
+ cpl.status = nvme_qpair_make_status(sct, sc, dnr);
nvme_qpair_complete_tracker(tr, &cpl, print_on_error);
}
-void
+static void
nvme_qpair_manual_complete_request(struct nvme_qpair *qpair,
- struct nvme_request *req, uint32_t sct, uint32_t sc)
+ struct nvme_request *req, uint32_t sct, uint32_t sc, uint32_t dnr,
+ error_print_t print_on_error)
{
struct nvme_completion cpl;
bool error;
memset(&cpl, 0, sizeof(cpl));
cpl.sqid = qpair->id;
- cpl.status |= NVMEF(NVME_STATUS_SCT, sct);
- cpl.status |= NVMEF(NVME_STATUS_SC, sc);
-
+ cpl.status = nvme_qpair_make_status(sct, sc, dnr);
error = nvme_completion_is_error(&cpl);
- if (error) {
+ if (error && print_on_error == ERROR_PRINT_ALL) {
nvme_qpair_print_command(qpair, &req->cmd);
nvme_qpair_print_completion(qpair, &cpl);
}
@@ -679,7 +486,7 @@ _nvme_qpair_process_completions(struct nvme_qpair *qpair)
bool
nvme_qpair_process_completions(struct nvme_qpair *qpair)
{
- bool done;
+ bool done = false;
/*
* Interlock with reset / recovery code. This is an usually uncontended
@@ -687,12 +494,12 @@ nvme_qpair_process_completions(struct nvme_qpair *qpair)
* and to prevent races with the recovery process called from a timeout
* context.
*/
- if (!mtx_trylock(&qpair->recovery)) {
- qpair->num_recovery_nolock++;
- return (false);
- }
+ mtx_lock(&qpair->recovery);
- done = _nvme_qpair_process_completions(qpair);
+ if (__predict_true(qpair->recovery_state == RECOVERY_NONE))
+ done = _nvme_qpair_process_completions(qpair);
+ else
+ qpair->num_recovery_nolock++; // XXX likely need to rename
mtx_unlock(&qpair->recovery);
@@ -950,27 +757,26 @@ nvme_admin_qpair_abort_aers(struct nvme_qpair *qpair)
/*
* nvme_complete_tracker must be called without the qpair lock held. It
* takes the lock to adjust outstanding_tr list, so make sure we don't
- * have it yet (since this is a general purpose routine). We take the
- * lock to make the list traverse safe, but have to drop the lock to
- * complete any AER. We restart the list scan when we do this to make
- * this safe. There's interlock with the ISR so we know this tracker
- * won't be completed twice.
+ * have it yet. We need the lock to make the list traverse safe, but
+ * have to drop the lock to complete any AER. We restart the list scan
+ * when we do this to make this safe. There's interlock with the ISR so
+ * we know this tracker won't be completed twice.
*/
mtx_assert(&qpair->lock, MA_NOTOWNED);
mtx_lock(&qpair->lock);
tr = TAILQ_FIRST(&qpair->outstanding_tr);
while (tr != NULL) {
- if (tr->req->cmd.opc == NVME_OPC_ASYNC_EVENT_REQUEST) {
- mtx_unlock(&qpair->lock);
- nvme_qpair_manual_complete_tracker(tr,
- NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0,
- ERROR_PRINT_NONE);
- mtx_lock(&qpair->lock);
- tr = TAILQ_FIRST(&qpair->outstanding_tr);
- } else {
+ if (tr->req->cmd.opc != NVME_OPC_ASYNC_EVENT_REQUEST) {
tr = TAILQ_NEXT(tr, tailq);
+ continue;
}
+ mtx_unlock(&qpair->lock);
+ nvme_qpair_manual_complete_tracker(tr,
+ NVME_SCT_GENERIC, NVME_SC_ABORTED_SQ_DELETION, 0,
+ ERROR_PRINT_NONE);
+ mtx_lock(&qpair->lock);
+ tr = TAILQ_FIRST(&qpair->outstanding_tr);
}
mtx_unlock(&qpair->lock);
}
@@ -987,7 +793,6 @@ nvme_admin_qpair_destroy(struct nvme_qpair *qpair)
void
nvme_io_qpair_destroy(struct nvme_qpair *qpair)
{
-
nvme_qpair_destroy(qpair);
}
@@ -997,22 +802,35 @@ nvme_abort_complete(void *arg, const struct nvme_completion *status)
struct nvme_tracker *tr = arg;
/*
- * If cdw0 == 1, the controller was not able to abort the command
- * we requested. We still need to check the active tracker array,
- * to cover race where I/O timed out at same time controller was
- * completing the I/O.
+ * If cdw0 bit 0 == 1, the controller was not able to abort the command
+ * we requested. We still need to check the active tracker array, to
+ * cover race where I/O timed out at same time controller was completing
+ * the I/O. An abort command always is on the admin queue, but affects
+ * either an admin or an I/O queue, so take the appropriate qpair lock
+ * for the original command's queue, since we'll need it to avoid races
+ * with the completion code and to complete the command manually.
*/
- if (status->cdw0 == 1 && tr->qpair->act_tr[tr->cid] != NULL) {
+ mtx_lock(&tr->qpair->lock);
+ if ((status->cdw0 & 1) == 1 && tr->qpair->act_tr[tr->cid] != NULL) {
/*
- * An I/O has timed out, and the controller was unable to
- * abort it for some reason. Construct a fake completion
- * status, and then complete the I/O's tracker manually.
+ * An I/O has timed out, and the controller was unable to abort
+ * it for some reason. And we've not processed a completion for
+ * it yet. Construct a fake completion status, and then complete
+ * the I/O's tracker manually.
*/
nvme_printf(tr->qpair->ctrlr,
"abort command failed, aborting command manually\n");
nvme_qpair_manual_complete_tracker(tr,
NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 0, ERROR_PRINT_ALL);
}
+ /*
+ * XXX We don't check status for the possible 'Could not abort because
+ * excess aborts were submitted to the controller'. We don't prevent
+ * that, either. Document for the future here, since the standard is
+ * squishy and only says 'may generate' but implies anything is possible
+ * including hangs if you exceed the ACL.
+ */
+ mtx_unlock(&tr->qpair->lock);
}
static void
@@ -1022,8 +840,9 @@ nvme_qpair_timeout(void *arg)
struct nvme_controller *ctrlr = qpair->ctrlr;
struct nvme_tracker *tr;
sbintime_t now;
- bool idle = false;
- bool needs_reset;
+ bool idle = true;
+ bool is_admin = qpair == &ctrlr->adminq;
+ bool fast;
uint32_t csts;
uint8_t cfs;
@@ -1034,9 +853,10 @@ nvme_qpair_timeout(void *arg)
* failure processing that races with the qpair timeout will fail
* safely.
*/
- if (qpair->ctrlr->is_failed) {
+ if (is_admin ? qpair->ctrlr->is_failed_admin : qpair->ctrlr->is_failed) {
nvme_printf(qpair->ctrlr,
- "Failed controller, stopping watchdog timeout.\n");
+ "%sFailed controller, stopping watchdog timeout.\n",
+ is_admin ? "Complete " : "");
qpair->timer_armed = false;
return;
}
@@ -1069,23 +889,35 @@ nvme_qpair_timeout(void *arg)
*/
csts = nvme_mmio_read_4(ctrlr, csts);
cfs = NVMEV(NVME_CSTS_REG_CFS, csts);
- if (csts == NVME_GONE || cfs == 1)
- goto do_reset;
+ if (csts == NVME_GONE || cfs == 1) {
+ /*
+ * We've had a command timeout that we weren't able to
+ * abort or we have aborts disabled and any command
+ * timed out.
+ *
+ * If we get here due to a possible surprise hot-unplug
+ * event, then we let nvme_ctrlr_reset confirm and fail
+ * the controller.
+ */
+do_reset:
+ nvme_printf(ctrlr, "Resetting controller due to a timeout%s.\n",
+ (csts == 0xffffffff) ? " and possible hot unplug" :
+ (cfs ? " and fatal error status" : ""));
+ qpair->recovery_state = RECOVERY_WAITING;
+ nvme_ctrlr_reset(ctrlr);
+ idle = false;
+ break;
+ }
- /*
- * Process completions. We already have the recovery lock, so
- * call the locked version.
- */
- _nvme_qpair_process_completions(qpair);
/*
- * Check to see if we need to timeout any commands. If we do, then
- * we also enter a recovery phase.
+ * See if there's any recovery needed. First, do a fast check to
+ * see if anything could have timed out. If not, then skip
+ * everything else.
*/
- now = getsbinuptime();
- needs_reset = false;
- idle = true;
+ fast = false;
mtx_lock(&qpair->lock);
+ now = getsbinuptime();
TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) {
/*
* Skip async commands, they are posted to the card for
@@ -1093,48 +925,83 @@ nvme_qpair_timeout(void *arg)
*/
if (tr->deadline == SBT_MAX)
continue;
- if (now > tr->deadline) {
- if (tr->req->cb_fn != nvme_abort_complete &&
- ctrlr->enable_aborts) {
- /*
- * This isn't an abort command, ask
- * for a hardware abort.
- */
- nvme_ctrlr_cmd_abort(ctrlr, tr->cid,
- qpair->id, nvme_abort_complete, tr);
- } else {
- /*
- * Otherwise we have a live command in
- * the card (either one we couldn't
- * abort, or aborts weren't enabled).
- * The only safe way to proceed is to do
- * a reset.
- */
- needs_reset = true;
- }
- } else {
- idle = false;
- }
+
+ /*
+ * If the first real transaction is not in timeout, then
+ * we're done. Otherwise, we try recovery.
+ */
+ idle = false;
+ if (now <= tr->deadline)
+ fast = true;
+ break;
}
mtx_unlock(&qpair->lock);
- if (!needs_reset)
+ if (idle || fast)
break;
/*
- * We've had a command timeout that we weren't able to abort
- *
- * If we get here due to a possible surprise hot-unplug event,
- * then we let nvme_ctrlr_reset confirm and fail the
- * controller.
+ * There's a stale transaction at the start of the queue whose
+ * deadline has passed. Poll the competions as a last-ditch
+ * effort in case an interrupt has been missed. Warn the user if
+ * transactions were found of possible interrupt issues, but
+ * just once per controller.
+ */
+ if (_nvme_qpair_process_completions(qpair) && !ctrlr->isr_warned) {
+ nvme_printf(ctrlr, "System interrupt issues?\n");
+ ctrlr->isr_warned = true;
+ }
+
+ /*
+ * Now that we've run the ISR, re-rheck to see if there's any
+ * timed out commands and abort them or reset the card if so.
*/
- do_reset:
- nvme_printf(ctrlr, "Resetting controller due to a timeout%s.\n",
- (csts == 0xffffffff) ? " and possible hot unplug" :
- (cfs ? " and fatal error status" : ""));
- qpair->recovery_state = RECOVERY_WAITING;
- nvme_ctrlr_reset(ctrlr);
- idle = false; /* We want to keep polling */
+ mtx_lock(&qpair->lock);
+ idle = true;
+ TAILQ_FOREACH(tr, &qpair->outstanding_tr, tailq) {
+ /*
+ * Skip async commands, they are posted to the card for
+ * an indefinite amount of time and have no deadline.
+ */
+ if (tr->deadline == SBT_MAX)
+ continue;
+
+ /*
+ * If we know this tracker hasn't timed out, we also
+ * know all subsequent ones haven't timed out. The tr
+ * queue is in submission order and all normal commands
+ * in a queue have the same timeout (or the timeout was
+ * changed by the user, but we eventually timeout then).
+ */
+ idle = false;
+ if (now <= tr->deadline)
+ break;
+
+ /*
+ * Timeout expired, abort it or reset controller.
+ */
+ if (ctrlr->enable_aborts &&
+ tr->req->cb_fn != nvme_abort_complete) {
+ /*
+ * This isn't an abort command, ask for a
+ * hardware abort. This goes to the admin
+ * queue which will reset the card if it
+ * times out.
+ */
+ nvme_ctrlr_cmd_abort(ctrlr, tr->cid, qpair->id,
+ nvme_abort_complete, tr);
+ } else {
+ /*
+ * We have a live command in the card (either
+ * one we couldn't abort, or aborts weren't
+ * enabled). We can only reset.
+ */
+ mtx_unlock(&qpair->lock);
+ goto do_reset;
+ }
+ }
+ mtx_unlock(&qpair->lock);
break;
+
case RECOVERY_WAITING:
/*
* These messages aren't interesting while we're suspended. We
@@ -1201,7 +1068,7 @@ nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr)
bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
- bus_space_write_4(qpair->ctrlr->bus_tag, qpair->ctrlr->bus_handle,
+ bus_space_write_4(ctrlr->bus_tag, ctrlr->bus_handle,
qpair->sq_tdbl_off, qpair->sq_tail);
qpair->num_cmds++;
}
@@ -1259,47 +1126,41 @@ _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
{
struct nvme_tracker *tr;
int err = 0;
+ bool is_admin = qpair == &qpair->ctrlr->adminq;
mtx_assert(&qpair->lock, MA_OWNED);
tr = TAILQ_FIRST(&qpair->free_tr);
req->qpair = qpair;
- if (tr == NULL || qpair->recovery_state != RECOVERY_NONE) {
- /*
- * No tracker is available, or the qpair is disabled due to an
- * in-progress controller-level reset. If we lose the race with
- * recovery_state, then we may add an extra request to the queue
- * which will be resubmitted later. We only set recovery_state
- * to NONE with qpair->lock also held, so if we observe that the
- * state is not NONE, we know it can't transition to NONE below
- * when we've submitted the request to hardware.
- *
- * Also, as part of the failure process, we set recovery_state
- * to RECOVERY_WAITING, so we check here to see if we've failed
- * the controller. We set it before we call the qpair_fail
- * functions, which take out the lock lock before messing with
- * queued_req. Since we hold that lock, we know it's safe to
- * either fail directly, or queue the failure should is_failed
- * be stale. If we lose the race reading is_failed, then
- * nvme_qpair_fail will fail the queued request.
- */
+ /*
+ * The controller has failed, so fail the request. Note, that this races
+ * the recovery / timeout code. Since we hold the qpair lock, we know
+ * it's safe to fail directly. is_failed is set when we fail the
+ * controller. It is only ever reset in the ioctl reset controller
+ * path, which is safe to race (for failed controllers, we make no
+ * guarantees about bringing it out of failed state relative to other
+ * commands). We try hard to allow admin commands when the entire
+ * controller hasn't failed, only something related to I/O queues.
+ */
+ if (is_admin ? qpair->ctrlr->is_failed_admin : qpair->ctrlr->is_failed) {
+ nvme_qpair_manual_complete_request(qpair, req,
+ NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, 1,
+ ERROR_PRINT_NONE);
+ return;
+ }
- if (qpair->ctrlr->is_failed) {
- /*
- * The controller has failed, so fail the request.
- */
- nvme_qpair_manual_complete_request(qpair, req,
- NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST);
- } else {
- /*
- * Put the request on the qpair's request queue to be
- * processed when a tracker frees up via a command
- * completion or when the controller reset is
- * completed.
- */
- STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
- }
+ /*
+ * No tracker is available, or the qpair is disabled due to an
+ * in-progress controller-level reset. If we lose the race with
+ * recovery_state, then we may add an extra request to the queue which
+ * will be resubmitted later. We only set recovery_state to NONE with
+ * qpair->lock also held, so if we observe that the state is not NONE,
+ * we know it won't transition back to NONE without retrying queued
+ * request.
+ */
+ if (tr == NULL || qpair->recovery_state != RECOVERY_NONE) {
+ STAILQ_INSERT_TAIL(&qpair->queued_req, req, stailq);
return;
}
@@ -1313,6 +1174,11 @@ _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
return;
}
+ /*
+ * tr->deadline updating when nvme_payload_map calls
+ * nvme_qpair_submit_tracker (we call it above directly
+ * when there's no map to load).
+ */
err = bus_dmamap_load_mem(tr->qpair->dma_tag_payload,
tr->payload_dma_map, &req->payload, nvme_payload_map, tr, 0);
if (err != 0) {
@@ -1335,7 +1201,6 @@ _nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
void
nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
{
-
mtx_lock(&qpair->lock);
_nvme_qpair_submit_request(qpair, req);
mtx_unlock(&qpair->lock);
@@ -1344,11 +1209,13 @@ nvme_qpair_submit_request(struct nvme_qpair *qpair, struct nvme_request *req)
static void
nvme_qpair_enable(struct nvme_qpair *qpair)
{
+ bool is_admin __unused = qpair == &qpair->ctrlr->adminq;
+
if (mtx_initialized(&qpair->recovery))
mtx_assert(&qpair->recovery, MA_OWNED);
if (mtx_initialized(&qpair->lock))
mtx_assert(&qpair->lock, MA_OWNED);
- KASSERT(!qpair->ctrlr->is_failed,
+ KASSERT(!(is_admin ? qpair->ctrlr->is_failed_admin : qpair->ctrlr->is_failed),
("Enabling a failed qpair\n"));
qpair->recovery_state = RECOVERY_NONE;
@@ -1357,7 +1224,6 @@ nvme_qpair_enable(struct nvme_qpair *qpair)
void
nvme_qpair_reset(struct nvme_qpair *qpair)
{
-
qpair->sq_head = qpair->sq_tail = qpair->cq_head = 0;
/*
@@ -1515,7 +1381,7 @@ nvme_qpair_fail(struct nvme_qpair *qpair)
STAILQ_REMOVE_HEAD(&qpair->queued_req, stailq);
mtx_unlock(&qpair->lock);
nvme_qpair_manual_complete_request(qpair, req, NVME_SCT_GENERIC,
- NVME_SC_ABORTED_BY_REQUEST);
+ NVME_SC_ABORTED_BY_REQUEST, 1, ERROR_PRINT_ALL);
mtx_lock(&qpair->lock);
}
diff --git a/sys/dev/nvme/nvme_sim.c b/sys/dev/nvme/nvme_sim.c
index f561756f99b7..7693aa6d54d3 100644
--- a/sys/dev/nvme/nvme_sim.c
+++ b/sys/dev/nvme/nvme_sim.c
@@ -96,15 +96,16 @@ nvme_sim_nvmeio(struct cam_sim *sim, union ccb *ccb)
/* SG LIST ??? */
if ((nvmeio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
req = nvme_allocate_request_bio((struct bio *)payload,
- nvme_sim_nvmeio_done, ccb);
+ M_NOWAIT, nvme_sim_nvmeio_done, ccb);
else if ((nvmeio->ccb_h.flags & CAM_DATA_SG) == CAM_DATA_SG)
- req = nvme_allocate_request_ccb(ccb, nvme_sim_nvmeio_done, ccb);
+ req = nvme_allocate_request_ccb(ccb, M_NOWAIT,
+ nvme_sim_nvmeio_done, ccb);
else if (payload == NULL)
- req = nvme_allocate_request_null(nvme_sim_nvmeio_done, ccb);
+ req = nvme_allocate_request_null(M_NOWAIT, nvme_sim_nvmeio_done,
+ ccb);
else
- req = nvme_allocate_request_vaddr(payload, size,
+ req = nvme_allocate_request_vaddr(payload, size, M_NOWAIT,
nvme_sim_nvmeio_done, ccb);
-
if (req == NULL) {
nvmeio->ccb_h.status = CAM_RESRC_UNAVAIL;
xpt_done(ccb);
@@ -203,7 +204,7 @@ nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
cpi->xport_specific.nvme.slot = pci_get_slot(dev);
cpi->xport_specific.nvme.function = pci_get_function(dev);
cpi->xport_specific.nvme.extra = 0;
- strncpy(cpi->xport_specific.nvme.dev_name, device_get_nameunit(dev),
+ strlcpy(cpi->xport_specific.nvme.dev_name, device_get_nameunit(dev),
sizeof(cpi->xport_specific.nvme.dev_name));
cpi->hba_vendor = pci_get_vendor(dev);
cpi->hba_device = pci_get_device(dev);
@@ -268,7 +269,6 @@ nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
ccb->ccb_h.status = CAM_REQ_CMP;
break;
case XPT_NVME_IO: /* Execute the requested I/O operation */
- case XPT_NVME_ADMIN: /* or Admin operation */
if (ctrlr->is_failed) {
/*
* I/O came in while we were failing the drive, so drop
@@ -279,6 +279,18 @@ nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
}
nvme_sim_nvmeio(sim, ccb);
return; /* no done */
+ case XPT_NVME_ADMIN: /* or Admin operation */
+ if (ctrlr->is_failed_admin) {
+ /*
+ * Admin request came in when we can't send admin
+ * commands, so drop it. Once falure is complete, we'll
+ * be destroyed.
+ */
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ break;
+ }
+ nvme_sim_nvmeio(sim, ccb);
+ return; /* no done */
default:
ccb->ccb_h.status = CAM_REQ_INVALID;
break;
@@ -289,7 +301,6 @@ nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
static void
nvme_sim_poll(struct cam_sim *sim)
{
-
nvme_ctrlr_poll(sim2ctrlr(sim));
}
@@ -380,7 +391,7 @@ nvme_sim_controller_fail(void *ctrlr_arg)
struct nvme_consumer *consumer_cookie;
static void
-nvme_sim_init(void)
+nvme_sim_init(void *dummy __unused)
{
if (nvme_use_nvd)
return;
@@ -393,7 +404,7 @@ SYSINIT(nvme_sim_register, SI_SUB_DRIVERS, SI_ORDER_ANY,
nvme_sim_init, NULL);
static void
-nvme_sim_uninit(void)
+nvme_sim_uninit(void *dummy __unused)
{
if (nvme_use_nvd)
return;
diff --git a/sys/dev/nvme/nvme_sysctl.c b/sys/dev/nvme/nvme_sysctl.c
index d6452a2e5492..50d19e730a16 100644
--- a/sys/dev/nvme/nvme_sysctl.c
+++ b/sys/dev/nvme/nvme_sysctl.c
@@ -30,6 +30,7 @@
#include "opt_nvme.h"
#include <sys/param.h>
+#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/sysctl.h>
@@ -152,7 +153,6 @@ nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
static void
nvme_qpair_reset_stats(struct nvme_qpair *qpair)
{
-
/*
* Reset the values. Due to sanity checks in
* nvme_qpair_process_completions, we reset the number of interrupt
@@ -175,8 +175,10 @@ nvme_sysctl_num_cmds(SYSCTL_HANDLER_ARGS)
num_cmds = ctrlr->adminq.num_cmds;
- for (i = 0; i < ctrlr->num_io_queues; i++)
- num_cmds += ctrlr->ioq[i].num_cmds;
+ if (ctrlr->ioq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_cmds += ctrlr->ioq[i].num_cmds;
+ }
return (sysctl_handle_64(oidp, &num_cmds, 0, req));
}
@@ -190,8 +192,10 @@ nvme_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS)
num_intr_handler_calls = ctrlr->adminq.num_intr_handler_calls;
- for (i = 0; i < ctrlr->num_io_queues; i++)
- num_intr_handler_calls += ctrlr->ioq[i].num_intr_handler_calls;
+ if (ctrlr->ioq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_intr_handler_calls += ctrlr->ioq[i].num_intr_handler_calls;
+ }
return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req));
}
@@ -205,8 +209,10 @@ nvme_sysctl_num_retries(SYSCTL_HANDLER_ARGS)
num_retries = ctrlr->adminq.num_retries;
- for (i = 0; i < ctrlr->num_io_queues; i++)
- num_retries += ctrlr->ioq[i].num_retries;
+ if (ctrlr->ioq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_retries += ctrlr->ioq[i].num_retries;
+ }
return (sysctl_handle_64(oidp, &num_retries, 0, req));
}
@@ -220,8 +226,10 @@ nvme_sysctl_num_failures(SYSCTL_HANDLER_ARGS)
num_failures = ctrlr->adminq.num_failures;
- for (i = 0; i < ctrlr->num_io_queues; i++)
- num_failures += ctrlr->ioq[i].num_failures;
+ if (ctrlr->ioq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_failures += ctrlr->ioq[i].num_failures;
+ }
return (sysctl_handle_64(oidp, &num_failures, 0, req));
}
@@ -235,8 +243,10 @@ nvme_sysctl_num_ignored(SYSCTL_HANDLER_ARGS)
num_ignored = ctrlr->adminq.num_ignored;
- for (i = 0; i < ctrlr->num_io_queues; i++)
- num_ignored += ctrlr->ioq[i].num_ignored;
+ if (ctrlr->ioq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_ignored += ctrlr->ioq[i].num_ignored;
+ }
return (sysctl_handle_64(oidp, &num_ignored, 0, req));
}
@@ -250,8 +260,10 @@ nvme_sysctl_num_recovery_nolock(SYSCTL_HANDLER_ARGS)
num = ctrlr->adminq.num_recovery_nolock;
- for (i = 0; i < ctrlr->num_io_queues; i++)
- num += ctrlr->ioq[i].num_recovery_nolock;
+ if (ctrlr->ioq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num += ctrlr->ioq[i].num_recovery_nolock;
+ }
return (sysctl_handle_64(oidp, &num, 0, req));
}
@@ -270,8 +282,10 @@ nvme_sysctl_reset_stats(SYSCTL_HANDLER_ARGS)
if (val != 0) {
nvme_qpair_reset_stats(&ctrlr->adminq);
- for (i = 0; i < ctrlr->num_io_queues; i++)
- nvme_qpair_reset_stats(&ctrlr->ioq[i]);
+ if (ctrlr->ioq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ nvme_qpair_reset_stats(&ctrlr->ioq[i]);
+ }
}
return (0);
@@ -318,6 +332,10 @@ nvme_sysctl_initialize_queue(struct nvme_qpair *qpair,
CTLFLAG_RD, &qpair->num_recovery_nolock,
"Number of times that we failed to lock recovery in the ISR");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "recovery",
+ CTLFLAG_RW, &qpair->recovery_state, 0,
+ "Current recovery state of the queue");
+
SYSCTL_ADD_PROC(ctrlr_ctx, que_list, OID_AUTO,
"dump_debug", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
qpair, 0, nvme_sysctl_dump_debug, "IU", "Dump debug data");
@@ -327,8 +345,8 @@ void
nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr)
{
struct sysctl_ctx_list *ctrlr_ctx;
- struct sysctl_oid *ctrlr_tree, *que_tree;
- struct sysctl_oid_list *ctrlr_list;
+ struct sysctl_oid *ctrlr_tree, *que_tree, *ioq_tree;
+ struct sysctl_oid_list *ctrlr_list, *ioq_list;
#define QUEUE_NAME_LENGTH 16
char queue_name[QUEUE_NAME_LENGTH];
int i;
@@ -407,16 +425,35 @@ nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr)
CTLFLAG_RD, &ctrlr->cap_hi, 0,
"Hi 32-bits of capacities for the drive");
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "fail_on_reset",
+ CTLFLAG_RD, &ctrlr->fail_on_reset, 0,
+ "Pretend the next reset fails and fail the controller");
+
que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "adminq",
CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Admin Queue");
nvme_sysctl_initialize_queue(&ctrlr->adminq, ctrlr_ctx, que_tree);
- for (i = 0; i < ctrlr->num_io_queues; i++) {
- snprintf(queue_name, QUEUE_NAME_LENGTH, "ioq%d", i);
- que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
- queue_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "IO Queue");
- nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx,
- que_tree);
+ /*
+ * Make sure that we've constructed the I/O queues before setting up the
+ * sysctls. Failed controllers won't allocate it, but we want the rest
+ * of the sysctls to diagnose things.
+ */
+ if (ctrlr->ioq != NULL) {
+ ioq_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "ioq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "I/O Queues");
+ ioq_list = SYSCTL_CHILDREN(ioq_tree);
+
+ for (i = 0; i < ctrlr->num_io_queues; i++) {
+ snprintf(queue_name, QUEUE_NAME_LENGTH, "%d", i);
+ que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ioq_list, OID_AUTO,
+ queue_name, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "IO Queue");
+ nvme_sysctl_initialize_queue(&ctrlr->ioq[i], ctrlr_ctx,
+ que_tree);
+ }
}
+
+ SYSCTL_ADD_COUNTER_U64(ctrlr_ctx, ctrlr_list, OID_AUTO, "alignment_splits",
+ CTLFLAG_RD, &ctrlr->alignment_splits,
+ "Number of times we split the I/O alignment for drives with preferred alignment");
}
diff --git a/sys/dev/nvme/nvme_util.c b/sys/dev/nvme/nvme_util.c
index 47d84e5b6957..cb0ba729ac96 100644
--- a/sys/dev/nvme/nvme_util.c
+++ b/sys/dev/nvme/nvme_util.c
@@ -5,6 +5,8 @@
* Copyright (C) 1997 Justin T. Gibbs
* All rights reserved.
*
+ * Copyright (c) 2023-2025 Chelsio Communications, Inc.
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@@ -28,8 +30,244 @@
*/
#include <sys/param.h>
+#include <sys/sbuf.h>
#include <dev/nvme/nvme.h>
+#define OPC_ENTRY(x) [NVME_OPC_ ## x] = #x
+
+static const char *admin_opcode[256] = {
+ OPC_ENTRY(DELETE_IO_SQ),
+ OPC_ENTRY(CREATE_IO_SQ),
+ OPC_ENTRY(GET_LOG_PAGE),
+ OPC_ENTRY(DELETE_IO_CQ),
+ OPC_ENTRY(CREATE_IO_CQ),
+ OPC_ENTRY(IDENTIFY),
+ OPC_ENTRY(ABORT),
+ OPC_ENTRY(SET_FEATURES),
+ OPC_ENTRY(GET_FEATURES),
+ OPC_ENTRY(ASYNC_EVENT_REQUEST),
+ OPC_ENTRY(NAMESPACE_MANAGEMENT),
+ OPC_ENTRY(FIRMWARE_ACTIVATE),
+ OPC_ENTRY(FIRMWARE_IMAGE_DOWNLOAD),
+ OPC_ENTRY(DEVICE_SELF_TEST),
+ OPC_ENTRY(NAMESPACE_ATTACHMENT),
+ OPC_ENTRY(KEEP_ALIVE),
+ OPC_ENTRY(DIRECTIVE_SEND),
+ OPC_ENTRY(DIRECTIVE_RECEIVE),
+ OPC_ENTRY(VIRTUALIZATION_MANAGEMENT),
+ OPC_ENTRY(NVME_MI_SEND),
+ OPC_ENTRY(NVME_MI_RECEIVE),
+ OPC_ENTRY(CAPACITY_MANAGEMENT),
+ OPC_ENTRY(LOCKDOWN),
+ OPC_ENTRY(DOORBELL_BUFFER_CONFIG),
+ OPC_ENTRY(FABRICS_COMMANDS),
+ OPC_ENTRY(FORMAT_NVM),
+ OPC_ENTRY(SECURITY_SEND),
+ OPC_ENTRY(SECURITY_RECEIVE),
+ OPC_ENTRY(SANITIZE),
+ OPC_ENTRY(GET_LBA_STATUS),
+};
+
+static const char *nvm_opcode[256] = {
+ OPC_ENTRY(FLUSH),
+ OPC_ENTRY(WRITE),
+ OPC_ENTRY(READ),
+ OPC_ENTRY(WRITE_UNCORRECTABLE),
+ OPC_ENTRY(COMPARE),
+ OPC_ENTRY(WRITE_ZEROES),
+ OPC_ENTRY(DATASET_MANAGEMENT),
+ OPC_ENTRY(VERIFY),
+ OPC_ENTRY(RESERVATION_REGISTER),
+ OPC_ENTRY(RESERVATION_REPORT),
+ OPC_ENTRY(RESERVATION_ACQUIRE),
+ OPC_ENTRY(RESERVATION_RELEASE),
+ OPC_ENTRY(COPY),
+};
+
+#define SC_ENTRY(x) [NVME_SC_ ## x] = #x
+
+static const char *generic_status[256] = {
+ SC_ENTRY(SUCCESS),
+ SC_ENTRY(INVALID_OPCODE),
+ SC_ENTRY(INVALID_FIELD),
+ SC_ENTRY(COMMAND_ID_CONFLICT),
+ SC_ENTRY(DATA_TRANSFER_ERROR),
+ SC_ENTRY(ABORTED_POWER_LOSS),
+ SC_ENTRY(INTERNAL_DEVICE_ERROR),
+ SC_ENTRY(ABORTED_BY_REQUEST),
+ SC_ENTRY(ABORTED_SQ_DELETION),
+ SC_ENTRY(ABORTED_FAILED_FUSED),
+ SC_ENTRY(ABORTED_MISSING_FUSED),
+ SC_ENTRY(INVALID_NAMESPACE_OR_FORMAT),
+ SC_ENTRY(COMMAND_SEQUENCE_ERROR),
+ SC_ENTRY(INVALID_SGL_SEGMENT_DESCR),
+ SC_ENTRY(INVALID_NUMBER_OF_SGL_DESCR),
+ SC_ENTRY(DATA_SGL_LENGTH_INVALID),
+ SC_ENTRY(METADATA_SGL_LENGTH_INVALID),
+ SC_ENTRY(SGL_DESCRIPTOR_TYPE_INVALID),
+ SC_ENTRY(INVALID_USE_OF_CMB),
+ SC_ENTRY(PRP_OFFET_INVALID),
+ SC_ENTRY(ATOMIC_WRITE_UNIT_EXCEEDED),
+ SC_ENTRY(OPERATION_DENIED),
+ SC_ENTRY(SGL_OFFSET_INVALID),
+ SC_ENTRY(HOST_ID_INCONSISTENT_FORMAT),
+ SC_ENTRY(KEEP_ALIVE_TIMEOUT_EXPIRED),
+ SC_ENTRY(KEEP_ALIVE_TIMEOUT_INVALID),
+ SC_ENTRY(ABORTED_DUE_TO_PREEMPT),
+ SC_ENTRY(SANITIZE_FAILED),
+ SC_ENTRY(SANITIZE_IN_PROGRESS),
+ SC_ENTRY(SGL_DATA_BLOCK_GRAN_INVALID),
+ SC_ENTRY(NOT_SUPPORTED_IN_CMB),
+ SC_ENTRY(NAMESPACE_IS_WRITE_PROTECTED),
+ SC_ENTRY(COMMAND_INTERRUPTED),
+ SC_ENTRY(TRANSIENT_TRANSPORT_ERROR),
+
+ SC_ENTRY(LBA_OUT_OF_RANGE),
+ SC_ENTRY(CAPACITY_EXCEEDED),
+ SC_ENTRY(NAMESPACE_NOT_READY),
+ SC_ENTRY(RESERVATION_CONFLICT),
+ SC_ENTRY(FORMAT_IN_PROGRESS),
+};
+
+static const char *command_specific_status[256] = {
+ SC_ENTRY(COMPLETION_QUEUE_INVALID),
+ SC_ENTRY(INVALID_QUEUE_IDENTIFIER),
+ SC_ENTRY(MAXIMUM_QUEUE_SIZE_EXCEEDED),
+ SC_ENTRY(ABORT_COMMAND_LIMIT_EXCEEDED),
+ SC_ENTRY(ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED),
+ SC_ENTRY(INVALID_FIRMWARE_SLOT),
+ SC_ENTRY(INVALID_FIRMWARE_IMAGE),
+ SC_ENTRY(INVALID_INTERRUPT_VECTOR),
+ SC_ENTRY(INVALID_LOG_PAGE),
+ SC_ENTRY(INVALID_FORMAT),
+ SC_ENTRY(FIRMWARE_REQUIRES_RESET),
+ SC_ENTRY(INVALID_QUEUE_DELETION),
+ SC_ENTRY(FEATURE_NOT_SAVEABLE),
+ SC_ENTRY(FEATURE_NOT_CHANGEABLE),
+ SC_ENTRY(FEATURE_NOT_NS_SPECIFIC),
+ SC_ENTRY(FW_ACT_REQUIRES_NVMS_RESET),
+ SC_ENTRY(FW_ACT_REQUIRES_RESET),
+ SC_ENTRY(FW_ACT_REQUIRES_TIME),
+ SC_ENTRY(FW_ACT_PROHIBITED),
+ SC_ENTRY(OVERLAPPING_RANGE),
+ SC_ENTRY(NS_INSUFFICIENT_CAPACITY),
+ SC_ENTRY(NS_ID_UNAVAILABLE),
+ SC_ENTRY(NS_ALREADY_ATTACHED),
+ SC_ENTRY(NS_IS_PRIVATE),
+ SC_ENTRY(NS_NOT_ATTACHED),
+ SC_ENTRY(THIN_PROV_NOT_SUPPORTED),
+ SC_ENTRY(CTRLR_LIST_INVALID),
+ SC_ENTRY(SELF_TEST_IN_PROGRESS),
+ SC_ENTRY(BOOT_PART_WRITE_PROHIB),
+ SC_ENTRY(INVALID_CTRLR_ID),
+ SC_ENTRY(INVALID_SEC_CTRLR_STATE),
+ SC_ENTRY(INVALID_NUM_OF_CTRLR_RESRC),
+ SC_ENTRY(INVALID_RESOURCE_ID),
+ SC_ENTRY(SANITIZE_PROHIBITED_WPMRE),
+ SC_ENTRY(ANA_GROUP_ID_INVALID),
+ SC_ENTRY(ANA_ATTACH_FAILED),
+
+ SC_ENTRY(CONFLICTING_ATTRIBUTES),
+ SC_ENTRY(INVALID_PROTECTION_INFO),
+ SC_ENTRY(ATTEMPTED_WRITE_TO_RO_PAGE),
+};
+
+static const char *media_error_status[256] = {
+ SC_ENTRY(WRITE_FAULTS),
+ SC_ENTRY(UNRECOVERED_READ_ERROR),
+ SC_ENTRY(GUARD_CHECK_ERROR),
+ SC_ENTRY(APPLICATION_TAG_CHECK_ERROR),
+ SC_ENTRY(REFERENCE_TAG_CHECK_ERROR),
+ SC_ENTRY(COMPARE_FAILURE),
+ SC_ENTRY(ACCESS_DENIED),
+ SC_ENTRY(DEALLOCATED_OR_UNWRITTEN),
+};
+
+static const char *path_related_status[256] = {
+ SC_ENTRY(INTERNAL_PATH_ERROR),
+ SC_ENTRY(ASYMMETRIC_ACCESS_PERSISTENT_LOSS),
+ SC_ENTRY(ASYMMETRIC_ACCESS_INACCESSIBLE),
+ SC_ENTRY(ASYMMETRIC_ACCESS_TRANSITION),
+ SC_ENTRY(CONTROLLER_PATHING_ERROR),
+ SC_ENTRY(HOST_PATHING_ERROR),
+ SC_ENTRY(COMMAND_ABORTED_BY_HOST),
+};
+
+void
+nvme_opcode_sbuf(bool admin, uint8_t opc, struct sbuf *sb)
+{
+ const char *s, *type;
+
+ if (admin) {
+ s = admin_opcode[opc];
+ type = "ADMIN";
+ } else {
+ s = nvm_opcode[opc];
+ type = "NVM";
+ }
+ if (s == NULL)
+ sbuf_printf(sb, "%s (%02x)", type, opc);
+ else
+ sbuf_printf(sb, "%s (%02x)", s, opc);
+}
+
+void
+nvme_sc_sbuf(const struct nvme_completion *cpl, struct sbuf *sb)
+{
+ const char *s, *type;
+ uint16_t status, sc, sct;
+
+ status = le16toh(cpl->status);
+ sc = NVME_STATUS_GET_SC(status);
+ sct = NVME_STATUS_GET_SCT(status);
+ switch (sct) {
+ case NVME_SCT_GENERIC:
+ s = generic_status[sc];
+ type = "GENERIC";
+ break;
+ case NVME_SCT_COMMAND_SPECIFIC:
+ s = command_specific_status[sc];
+ type = "COMMAND SPECIFIC";
+ break;
+ case NVME_SCT_MEDIA_ERROR:
+ s = media_error_status[sc];
+ type = "MEDIA ERROR";
+ break;
+ case NVME_SCT_PATH_RELATED:
+ s = path_related_status[sc];
+ type = "PATH RELATED";
+ break;
+ case NVME_SCT_VENDOR_SPECIFIC:
+ s = NULL;
+ type = "VENDOR SPECIFIC";
+ break;
+ default:
+ s = NULL;
+ type = NULL;
+ break;
+ }
+
+ if (type == NULL)
+ sbuf_printf(sb, "RESERVED (%02x/%02x)", sct, sc);
+ else if (s == NULL)
+ sbuf_printf(sb, "%s (%02x/%02x)", type, sct, sc);
+ else
+ sbuf_printf(sb, "%s (%02x/%02x)", s, sct, sc);
+}
+
+void
+nvme_cpl_sbuf(const struct nvme_completion *cpl, struct sbuf *sb)
+{
+ uint16_t status;
+
+ status = le16toh(cpl->status);
+ nvme_sc_sbuf(cpl, sb);
+ if (NVME_STATUS_GET_M(status) != 0)
+ sbuf_printf(sb, " M");
+ if (NVME_STATUS_GET_DNR(status) != 0)
+ sbuf_printf(sb, " DNR");
+}
+
void
nvme_strvis(uint8_t *dst, const uint8_t *src, int dstlen, int srclen)
{
diff --git a/sys/dev/nvmf/controller/ctl_frontend_nvmf.c b/sys/dev/nvmf/controller/ctl_frontend_nvmf.c
index a203bb1c90a6..658b47699c1d 100644
--- a/sys/dev/nvmf/controller/ctl_frontend_nvmf.c
+++ b/sys/dev/nvmf/controller/ctl_frontend_nvmf.c
@@ -19,7 +19,9 @@
#include <sys/queue.h>
#include <sys/refcount.h>
#include <sys/sbuf.h>
+#include <sys/smp.h>
#include <sys/sx.h>
+#include <sys/taskqueue.h>
#include <machine/bus.h>
#include <machine/bus_dma.h>
@@ -31,8 +33,10 @@
#include <cam/ctl/ctl.h>
#include <cam/ctl/ctl_error.h>
+#include <cam/ctl/ctl_ha.h>
#include <cam/ctl/ctl_io.h>
#include <cam/ctl/ctl_frontend.h>
+#include <cam/ctl/ctl_private.h>
/*
* Store pointers to the capsule and qpair in the two pointer members
@@ -47,6 +51,7 @@ static int nvmft_ioctl(struct cdev *cdev, u_long cmd, caddr_t data,
int flag, struct thread *td);
static int nvmft_shutdown(void);
+static struct taskqueue *nvmft_taskq;
static TAILQ_HEAD(, nvmft_port) nvmft_ports;
static struct sx nvmft_ports_lock;
@@ -65,9 +70,9 @@ nvmft_online(void *arg)
{
struct nvmft_port *np = arg;
- sx_xlock(&np->lock);
+ mtx_lock(&np->lock);
np->online = true;
- sx_xunlock(&np->lock);
+ mtx_unlock(&np->lock);
}
static void
@@ -76,7 +81,7 @@ nvmft_offline(void *arg)
struct nvmft_port *np = arg;
struct nvmft_controller *ctrlr;
- sx_xlock(&np->lock);
+ mtx_lock(&np->lock);
np->online = false;
TAILQ_FOREACH(ctrlr, &np->controllers, link) {
@@ -86,8 +91,32 @@ nvmft_offline(void *arg)
}
while (!TAILQ_EMPTY(&np->controllers))
- sx_sleep(np, &np->lock, 0, "nvmfoff", 0);
- sx_xunlock(&np->lock);
+ mtx_sleep(np, &np->lock, 0, "nvmfoff", 0);
+ mtx_unlock(&np->lock);
+}
+
+static int
+nvmft_info(void *arg, struct sbuf *sb)
+{
+ struct nvmft_port *np = arg;
+ struct nvmft_controller *ctrlr;
+ int retval;
+
+ mtx_lock(&np->lock);
+ retval = sbuf_printf(sb, "\t<port>%s,p,%u</port>\n", np->cdata.subnqn,
+ np->portid);
+ if (retval != 0)
+ goto out;
+
+ TAILQ_FOREACH(ctrlr, &np->controllers, link) {
+ retval = sbuf_printf(sb, "\t<host id=\"%u\">%s</host>\n",
+ ctrlr->cntlid, ctrlr->hostnqn);
+ if (retval != 0)
+ break;
+ }
+out:
+ mtx_unlock(&np->lock);
+ return (retval);
}
static int
@@ -97,7 +126,7 @@ nvmft_lun_enable(void *arg, int lun_id)
struct nvmft_controller *ctrlr;
uint32_t *old_ns, *new_ns;
uint32_t nsid;
- u_int i;
+ u_int i, new_count;
if (lun_id >= le32toh(np->cdata.nn)) {
printf("NVMFT: %s lun %d larger than maximum nsid %u\n",
@@ -106,14 +135,22 @@ nvmft_lun_enable(void *arg, int lun_id)
}
nsid = lun_id + 1;
- sx_xlock(&np->lock);
- new_ns = mallocarray(np->num_ns + 1, sizeof(*new_ns), M_NVMFT,
- M_WAITOK);
+ mtx_lock(&np->lock);
+ for (;;) {
+ new_count = np->num_ns + 1;
+ mtx_unlock(&np->lock);
+ new_ns = mallocarray(new_count, sizeof(*new_ns), M_NVMFT,
+ M_WAITOK);
+ mtx_lock(&np->lock);
+ if (np->num_ns + 1 <= new_count)
+ break;
+ free(new_ns, M_NVMFT);
+ }
for (i = 0; i < np->num_ns; i++) {
if (np->active_ns[i] < nsid)
continue;
if (np->active_ns[i] == nsid) {
- sx_xunlock(&np->lock);
+ mtx_unlock(&np->lock);
free(new_ns, M_NVMFT);
printf("NVMFT: %s duplicate lun %d\n",
np->cdata.subnqn, lun_id);
@@ -140,7 +177,7 @@ nvmft_lun_enable(void *arg, int lun_id)
nvmft_controller_lun_changed(ctrlr, lun_id);
}
- sx_xunlock(&np->lock);
+ mtx_unlock(&np->lock);
free(old_ns, M_NVMFT);
return (0);
@@ -158,12 +195,12 @@ nvmft_lun_disable(void *arg, int lun_id)
return (0);
nsid = lun_id + 1;
- sx_xlock(&np->lock);
+ mtx_lock(&np->lock);
for (i = 0; i < np->num_ns; i++) {
if (np->active_ns[i] == nsid)
goto found;
}
- sx_xunlock(&np->lock);
+ mtx_unlock(&np->lock);
printf("NVMFT: %s request to disable nonexistent lun %d\n",
np->cdata.subnqn, lun_id);
return (EINVAL);
@@ -180,7 +217,7 @@ found:
nvmft_controller_lun_changed(ctrlr, lun_id);
}
- sx_xunlock(&np->lock);
+ mtx_unlock(&np->lock);
return (0);
}
@@ -191,7 +228,7 @@ nvmft_populate_active_nslist(struct nvmft_port *np, uint32_t nsid,
{
u_int i, count;
- sx_slock(&np->lock);
+ mtx_lock(&np->lock);
count = 0;
for (i = 0; i < np->num_ns; i++) {
if (np->active_ns[i] <= nsid)
@@ -201,7 +238,7 @@ nvmft_populate_active_nslist(struct nvmft_port *np, uint32_t nsid,
if (count == nitems(nslist->ns))
break;
}
- sx_sunlock(&np->lock);
+ mtx_unlock(&np->lock);
}
void
@@ -458,8 +495,8 @@ nvmft_datamove_in(struct ctl_nvmeio *ctnio, struct nvmft_qpair *qp,
ctl_datamove_done((union ctl_io *)ctnio, true);
}
-static void
-nvmft_datamove(union ctl_io *io)
+void
+nvmft_handle_datamove(union ctl_io *io)
{
struct nvmf_capsule *nc;
struct nvmft_qpair *qp;
@@ -478,6 +515,35 @@ nvmft_datamove(union ctl_io *io)
nvmft_datamove_out(&io->nvmeio, qp, nc);
}
+void
+nvmft_abort_datamove(union ctl_io *io)
+{
+ io->io_hdr.port_status = 1;
+ io->io_hdr.flags |= CTL_FLAG_ABORT;
+ ctl_datamove_done(io, true);
+}
+
+static void
+nvmft_datamove(union ctl_io *io)
+{
+ struct nvmft_qpair *qp;
+
+ qp = NVMFT_QP(io);
+ nvmft_qpair_datamove(qp, io);
+}
+
+void
+nvmft_enqueue_task(struct task *task)
+{
+ taskqueue_enqueue(nvmft_taskq, task);
+}
+
+void
+nvmft_drain_task(struct task *task)
+{
+ taskqueue_drain(nvmft_taskq, task);
+}
+
static void
hip_add(uint64_t pair[2], uint64_t addend)
{
@@ -561,6 +627,17 @@ end:
static int
nvmft_init(void)
{
+ int error;
+
+ nvmft_taskq = taskqueue_create("nvmft", M_WAITOK,
+ taskqueue_thread_enqueue, &nvmft_taskq);
+ error = taskqueue_start_threads_in_proc(&nvmft_taskq, mp_ncpus, PWAIT,
+ control_softc->ctl_proc, "nvmft");
+ if (error != 0) {
+ taskqueue_free(nvmft_taskq);
+ return (error);
+ }
+
TAILQ_INIT(&nvmft_ports);
sx_init(&nvmft_ports_lock, "nvmft ports");
return (0);
@@ -580,7 +657,7 @@ nvmft_port_free(struct nvmft_port *np)
free(np->active_ns, M_NVMFT);
clean_unrhdr(np->ids);
delete_unrhdr(np->ids);
- sx_destroy(&np->lock);
+ mtx_destroy(&np->lock);
free(np, M_NVMFT);
}
@@ -750,9 +827,10 @@ nvmft_port_create(struct ctl_req *req)
np = malloc(sizeof(*np), M_NVMFT, M_WAITOK | M_ZERO);
refcount_init(&np->refs, 1);
+ np->portid = portid;
np->max_io_qsize = max_io_qsize;
np->cap = _nvmf_controller_cap(max_io_qsize, enable_timeout / 500);
- sx_init(&np->lock, "nvmft port");
+ mtx_init(&np->lock, "nvmft port", NULL, MTX_DEF);
np->ids = new_unrhdr(0, MIN(CTL_MAX_INIT_PER_PORT - 1,
NVMF_CNTLID_STATIC_MAX), UNR_NO_MTX);
TAILQ_INIT(&np->controllers);
@@ -781,6 +859,7 @@ nvmft_port_create(struct ctl_req *req)
port->virtual_port = 0;
port->port_online = nvmft_online;
port->port_offline = nvmft_offline;
+ port->port_info = nvmft_info;
port->onoff_arg = np;
port->lun_enable = nvmft_lun_enable;
port->lun_disable = nvmft_lun_disable;
@@ -870,7 +949,13 @@ nvmft_port_remove(struct ctl_req *req)
TAILQ_REMOVE(&nvmft_ports, np, link);
sx_xunlock(&nvmft_ports_lock);
- ctl_port_offline(&np->port);
+ mtx_lock(&np->lock);
+ if (np->online) {
+ mtx_unlock(&np->lock);
+ ctl_port_offline(&np->port);
+ } else
+ mtx_unlock(&np->lock);
+
nvmft_port_rele(np);
req->status = CTL_LUN_OK;
}
@@ -878,29 +963,55 @@ nvmft_port_remove(struct ctl_req *req)
static void
nvmft_handoff(struct ctl_nvmf *cn)
{
- struct nvmf_fabric_connect_cmd cmd;
- struct nvmf_handoff_controller_qpair *handoff;
- struct nvmf_fabric_connect_data *data;
+ const struct nvmf_fabric_connect_cmd *cmd;
+ const struct nvmf_fabric_connect_data *data;
+ const nvlist_t *params;
struct nvmft_port *np;
+ nvlist_t *nvl;
+ size_t len;
+ enum nvmf_trtype trtype;
int error;
np = NULL;
- data = NULL;
- handoff = &cn->data.handoff;
- error = copyin(handoff->cmd, &cmd, sizeof(cmd));
+ error = nvmf_unpack_ioc_nvlist(&cn->data.handoff, &nvl);
if (error != 0) {
cn->status = CTL_NVMF_ERROR;
snprintf(cn->error_str, sizeof(cn->error_str),
- "Failed to copyin CONNECT SQE");
+ "Failed to copyin and unpack handoff arguments");
return;
}
- data = malloc(sizeof(*data), M_NVMFT, M_WAITOK);
- error = copyin(handoff->data, data, sizeof(*data));
- if (error != 0) {
+ if (!nvlist_exists_number(nvl, "trtype") ||
+ !nvlist_exists_nvlist(nvl, "params") ||
+ !nvlist_exists_binary(nvl, "cmd") ||
+ !nvlist_exists_binary(nvl, "data")) {
cn->status = CTL_NVMF_ERROR;
snprintf(cn->error_str, sizeof(cn->error_str),
- "Failed to copyin CONNECT data");
+ "Handoff arguments missing required value");
+ goto out;
+ }
+
+ params = nvlist_get_nvlist(nvl, "params");
+ if (!nvmf_validate_qpair_nvlist(params, true)) {
+ cn->status = CTL_NVMF_ERROR;
+ snprintf(cn->error_str, sizeof(cn->error_str),
+ "Invalid queue pair parameters");
+ goto out;
+ }
+
+ cmd = nvlist_get_binary(nvl, "cmd", &len);
+ if (len != sizeof(*cmd)) {
+ cn->status = CTL_NVMF_ERROR;
+ snprintf(cn->error_str, sizeof(cn->error_str),
+ "Wrong size for CONNECT SQE");
+ goto out;
+ }
+
+ data = nvlist_get_binary(nvl, "data", &len);
+ if (len != sizeof(*data)) {
+ cn->status = CTL_NVMF_ERROR;
+ snprintf(cn->error_str, sizeof(cn->error_str),
+ "Wrong size for CONNECT data");
goto out;
}
@@ -931,8 +1042,10 @@ nvmft_handoff(struct ctl_nvmf *cn)
nvmft_port_ref(np);
sx_sunlock(&nvmft_ports_lock);
- if (handoff->params.admin) {
- error = nvmft_handoff_admin_queue(np, handoff, &cmd, data);
+ trtype = nvlist_get_number(nvl, "trtype");
+ if (nvlist_get_bool(params, "admin")) {
+ error = nvmft_handoff_admin_queue(np, trtype, params, cmd,
+ data);
if (error != 0) {
cn->status = CTL_NVMF_ERROR;
snprintf(cn->error_str, sizeof(cn->error_str),
@@ -940,11 +1053,11 @@ nvmft_handoff(struct ctl_nvmf *cn)
goto out;
}
} else {
- error = nvmft_handoff_io_queue(np, handoff, &cmd, data);
+ error = nvmft_handoff_io_queue(np, trtype, params, cmd, data);
if (error != 0) {
cn->status = CTL_NVMF_ERROR;
snprintf(cn->error_str, sizeof(cn->error_str),
- "Failed to handoff admin queue: %d", error);
+ "Failed to handoff I/O queue: %d", error);
goto out;
}
}
@@ -953,7 +1066,7 @@ nvmft_handoff(struct ctl_nvmf *cn)
out:
if (np != NULL)
nvmft_port_rele(np);
- free(data, M_NVMFT);
+ nvlist_destroy(nvl);
}
static void
@@ -979,7 +1092,7 @@ nvmft_list(struct ctl_nvmf *cn)
sbuf_printf(sb, "<ctlnvmflist>\n");
sx_slock(&nvmft_ports_lock);
TAILQ_FOREACH(np, &nvmft_ports, link) {
- sx_slock(&np->lock);
+ mtx_lock(&np->lock);
TAILQ_FOREACH(ctrlr, &np->controllers, link) {
sbuf_printf(sb, "<connection id=\"%d\">"
"<hostnqn>%s</hostnqn>"
@@ -991,7 +1104,7 @@ nvmft_list(struct ctl_nvmf *cn)
np->cdata.subnqn,
ctrlr->trtype);
}
- sx_sunlock(&np->lock);
+ mtx_unlock(&np->lock);
}
sx_sunlock(&nvmft_ports_lock);
sbuf_printf(sb, "</ctlnvmflist>\n");
@@ -1029,7 +1142,7 @@ nvmft_terminate(struct ctl_nvmf *cn)
found = false;
sx_slock(&nvmft_ports_lock);
TAILQ_FOREACH(np, &nvmft_ports, link) {
- sx_slock(&np->lock);
+ mtx_lock(&np->lock);
TAILQ_FOREACH(ctrlr, &np->controllers, link) {
if (tp->all != 0)
match = true;
@@ -1047,7 +1160,7 @@ nvmft_terminate(struct ctl_nvmf *cn)
nvmft_controller_error(ctrlr, NULL, ECONNABORTED);
found = true;
}
- sx_sunlock(&np->lock);
+ mtx_unlock(&np->lock);
}
sx_sunlock(&nvmft_ports_lock);
@@ -1115,6 +1228,7 @@ nvmft_shutdown(void)
if (!TAILQ_EMPTY(&nvmft_ports))
return (EBUSY);
+ taskqueue_free(nvmft_taskq);
sx_destroy(&nvmft_ports_lock);
return (0);
}
diff --git a/sys/dev/nvmf/controller/nvmft_controller.c b/sys/dev/nvmf/controller/nvmft_controller.c
index f3783eac1275..390467534ca2 100644
--- a/sys/dev/nvmf/controller/nvmft_controller.c
+++ b/sys/dev/nvmf/controller/nvmft_controller.c
@@ -14,7 +14,6 @@
#include <sys/memdesc.h>
#include <sys/mutex.h>
#include <sys/sbuf.h>
-#include <sys/sx.h>
#include <sys/taskqueue.h>
#include <dev/nvmf/nvmf_transport.h>
@@ -55,8 +54,6 @@ nvmft_controller_alloc(struct nvmft_port *np, uint16_t cntlid,
ctrlr = malloc(sizeof(*ctrlr), M_NVMFT, M_WAITOK | M_ZERO);
ctrlr->cntlid = cntlid;
- nvmft_port_ref(np);
- TAILQ_INSERT_TAIL(&np->controllers, ctrlr, link);
ctrlr->np = np;
mtx_init(&ctrlr->lock, "nvmft controller", NULL, MTX_DEF);
callout_init(&ctrlr->ka_timer, 1);
@@ -107,9 +104,8 @@ nvmft_keep_alive_timer(void *arg)
}
int
-nvmft_handoff_admin_queue(struct nvmft_port *np,
- const struct nvmf_handoff_controller_qpair *handoff,
- const struct nvmf_fabric_connect_cmd *cmd,
+nvmft_handoff_admin_queue(struct nvmft_port *np, enum nvmf_trtype trtype,
+ const nvlist_t *params, const struct nvmf_fabric_connect_cmd *cmd,
const struct nvmf_fabric_connect_data *data)
{
struct nvmft_controller *ctrlr;
@@ -120,13 +116,17 @@ nvmft_handoff_admin_queue(struct nvmft_port *np,
if (cmd->qid != htole16(0))
return (EINVAL);
- qp = nvmft_qpair_init(handoff->trtype, &handoff->params, 0,
- "admin queue");
+ qp = nvmft_qpair_init(trtype, params, 0, "admin queue");
+ if (qp == NULL) {
+ printf("NVMFT: Failed to setup admin queue from %.*s\n",
+ (int)sizeof(data->hostnqn), data->hostnqn);
+ return (ENXIO);
+ }
- sx_xlock(&np->lock);
+ mtx_lock(&np->lock);
cntlid = alloc_unr(np->ids);
if (cntlid == -1) {
- sx_xunlock(&np->lock);
+ mtx_unlock(&np->lock);
printf("NVMFT: Unable to allocate controller for %.*s\n",
(int)sizeof(data->hostnqn), data->hostnqn);
nvmft_connect_error(qp, cmd, NVME_SCT_COMMAND_SPECIFIC,
@@ -141,12 +141,25 @@ nvmft_handoff_admin_queue(struct nvmft_port *np,
("%s: duplicate controllers with id %d", __func__, cntlid));
}
#endif
+ mtx_unlock(&np->lock);
ctrlr = nvmft_controller_alloc(np, cntlid, data);
+
+ mtx_lock(&np->lock);
+ if (!np->online) {
+ mtx_unlock(&np->lock);
+ nvmft_controller_free(ctrlr);
+ free_unr(np->ids, cntlid);
+ nvmft_qpair_destroy(qp);
+ return (ENXIO);
+ }
+ nvmft_port_ref(np);
+ TAILQ_INSERT_TAIL(&np->controllers, ctrlr, link);
+
nvmft_printf(ctrlr, "associated with %.*s\n",
(int)sizeof(data->hostnqn), data->hostnqn);
ctrlr->admin = qp;
- ctrlr->trtype = handoff->trtype;
+ ctrlr->trtype = trtype;
/*
* The spec requires a non-zero KeepAlive timer, but allow a
@@ -162,17 +175,16 @@ nvmft_handoff_admin_queue(struct nvmft_port *np,
callout_reset_sbt(&ctrlr->ka_timer, ctrlr->ka_sbt, 0,
nvmft_keep_alive_timer, ctrlr, C_HARDCLOCK);
}
+ mtx_unlock(&np->lock);
nvmft_finish_accept(qp, cmd, ctrlr);
- sx_xunlock(&np->lock);
return (0);
}
int
-nvmft_handoff_io_queue(struct nvmft_port *np,
- const struct nvmf_handoff_controller_qpair *handoff,
- const struct nvmf_fabric_connect_cmd *cmd,
+nvmft_handoff_io_queue(struct nvmft_port *np, enum nvmf_trtype trtype,
+ const nvlist_t *params, const struct nvmf_fabric_connect_cmd *cmd,
const struct nvmf_fabric_connect_data *data)
{
struct nvmft_controller *ctrlr;
@@ -186,15 +198,20 @@ nvmft_handoff_io_queue(struct nvmft_port *np,
cntlid = le16toh(data->cntlid);
snprintf(name, sizeof(name), "I/O queue %u", qid);
- qp = nvmft_qpair_init(handoff->trtype, &handoff->params, qid, name);
+ qp = nvmft_qpair_init(trtype, params, qid, name);
+ if (qp == NULL) {
+ printf("NVMFT: Failed to setup I/O queue %u from %.*s\n", qid,
+ (int)sizeof(data->hostnqn), data->hostnqn);
+ return (ENXIO);
+ }
- sx_slock(&np->lock);
+ mtx_lock(&np->lock);
TAILQ_FOREACH(ctrlr, &np->controllers, link) {
if (ctrlr->cntlid == cntlid)
break;
}
if (ctrlr == NULL) {
- sx_sunlock(&np->lock);
+ mtx_unlock(&np->lock);
printf("NVMFT: Nonexistent controller %u for I/O queue %u from %.*s\n",
ctrlr->cntlid, qid, (int)sizeof(data->hostnqn),
data->hostnqn);
@@ -205,7 +222,7 @@ nvmft_handoff_io_queue(struct nvmft_port *np,
}
if (memcmp(ctrlr->hostid, data->hostid, sizeof(ctrlr->hostid)) != 0) {
- sx_sunlock(&np->lock);
+ mtx_unlock(&np->lock);
nvmft_printf(ctrlr,
"hostid mismatch for I/O queue %u from %.*s\n", qid,
(int)sizeof(data->hostnqn), data->hostnqn);
@@ -215,7 +232,7 @@ nvmft_handoff_io_queue(struct nvmft_port *np,
return (EINVAL);
}
if (memcmp(ctrlr->hostnqn, data->hostnqn, sizeof(ctrlr->hostnqn)) != 0) {
- sx_sunlock(&np->lock);
+ mtx_unlock(&np->lock);
nvmft_printf(ctrlr,
"hostnqn mismatch for I/O queue %u from %.*s\n", qid,
(int)sizeof(data->hostnqn), data->hostnqn);
@@ -225,12 +242,12 @@ nvmft_handoff_io_queue(struct nvmft_port *np,
return (EINVAL);
}
- /* XXX: Require handoff->trtype == ctrlr->trtype? */
+ /* XXX: Require trtype == ctrlr->trtype? */
mtx_lock(&ctrlr->lock);
if (ctrlr->shutdown) {
mtx_unlock(&ctrlr->lock);
- sx_sunlock(&np->lock);
+ mtx_unlock(&np->lock);
nvmft_printf(ctrlr,
"attempt to create I/O queue %u on disabled controller from %.*s\n",
qid, (int)sizeof(data->hostnqn), data->hostnqn);
@@ -241,7 +258,7 @@ nvmft_handoff_io_queue(struct nvmft_port *np,
}
if (ctrlr->num_io_queues == 0) {
mtx_unlock(&ctrlr->lock);
- sx_sunlock(&np->lock);
+ mtx_unlock(&np->lock);
nvmft_printf(ctrlr,
"attempt to create I/O queue %u without enabled queues from %.*s\n",
qid, (int)sizeof(data->hostnqn), data->hostnqn);
@@ -252,7 +269,7 @@ nvmft_handoff_io_queue(struct nvmft_port *np,
}
if (cmd->qid > ctrlr->num_io_queues) {
mtx_unlock(&ctrlr->lock);
- sx_sunlock(&np->lock);
+ mtx_unlock(&np->lock);
nvmft_printf(ctrlr,
"attempt to create invalid I/O queue %u from %.*s\n", qid,
(int)sizeof(data->hostnqn), data->hostnqn);
@@ -263,7 +280,7 @@ nvmft_handoff_io_queue(struct nvmft_port *np,
}
if (ctrlr->io_qpairs[qid - 1].qp != NULL) {
mtx_unlock(&ctrlr->lock);
- sx_sunlock(&np->lock);
+ mtx_unlock(&np->lock);
nvmft_printf(ctrlr,
"attempt to re-create I/O queue %u from %.*s\n", qid,
(int)sizeof(data->hostnqn), data->hostnqn);
@@ -275,8 +292,8 @@ nvmft_handoff_io_queue(struct nvmft_port *np,
ctrlr->io_qpairs[qid - 1].qp = qp;
mtx_unlock(&ctrlr->lock);
+ mtx_unlock(&np->lock);
nvmft_finish_accept(qp, cmd, ctrlr);
- sx_sunlock(&np->lock);
return (0);
}
@@ -375,11 +392,11 @@ nvmft_controller_terminate(void *arg, int pending)
/* Remove association (CNTLID). */
np = ctrlr->np;
- sx_xlock(&np->lock);
+ mtx_lock(&np->lock);
TAILQ_REMOVE(&np->controllers, ctrlr, link);
- free_unr(np->ids, ctrlr->cntlid);
wakeup_np = (!np->online && TAILQ_EMPTY(&np->controllers));
- sx_xunlock(&np->lock);
+ mtx_unlock(&np->lock);
+ free_unr(np->ids, ctrlr->cntlid);
if (wakeup_np)
wakeup(np);
@@ -770,6 +787,7 @@ handle_set_features(struct nvmft_controller *ctrlr,
ctrlr->aer_mask = aer_mask;
mtx_unlock(&ctrlr->lock);
nvmft_send_success(ctrlr->admin, nc);
+ nvmf_free_capsule(nc);
return;
}
default:
@@ -944,7 +962,7 @@ nvmft_handle_admin_command(struct nvmft_controller *ctrlr,
if (NVMEV(NVME_CC_REG_EN, ctrlr->cc) == 0 &&
cmd->opc != NVME_OPC_FABRICS_COMMANDS) {
nvmft_printf(ctrlr,
- "Unsupported admin opcode %#x whiled disabled\n", cmd->opc);
+ "Unsupported admin opcode %#x while disabled\n", cmd->opc);
nvmft_send_generic_error(ctrlr->admin, nc,
NVME_SC_COMMAND_SEQUENCE_ERROR);
nvmf_free_capsule(nc);
diff --git a/sys/dev/nvmf/controller/nvmft_qpair.c b/sys/dev/nvmf/controller/nvmft_qpair.c
index 6cb3ebd76884..73c7bb280780 100644
--- a/sys/dev/nvmf/controller/nvmft_qpair.c
+++ b/sys/dev/nvmf/controller/nvmft_qpair.c
@@ -31,9 +31,11 @@ struct nvmft_qpair {
uint16_t qid;
u_int qsize;
uint16_t sqhd;
- uint16_t sqtail;
volatile u_int qp_refs; /* Internal references on 'qp'. */
+ struct task datamove_task;
+ STAILQ_HEAD(, ctl_io_hdr) datamove_queue;
+
struct mtx lock;
char name[16];
@@ -41,6 +43,7 @@ struct nvmft_qpair {
static int _nvmft_send_generic_error(struct nvmft_qpair *qp,
struct nvmf_capsule *nc, uint8_t sc_status);
+static void nvmft_datamove_task(void *context, int pending);
static void
nvmft_qpair_error(void *arg, int error)
@@ -98,24 +101,24 @@ nvmft_receive_capsule(void *arg, struct nvmf_capsule *nc)
}
struct nvmft_qpair *
-nvmft_qpair_init(enum nvmf_trtype trtype,
- const struct nvmf_handoff_qpair_params *handoff, uint16_t qid,
+nvmft_qpair_init(enum nvmf_trtype trtype, const nvlist_t *params, uint16_t qid,
const char *name)
{
struct nvmft_qpair *qp;
qp = malloc(sizeof(*qp), M_NVMFT, M_WAITOK | M_ZERO);
- qp->admin = handoff->admin;
- qp->sq_flow_control = handoff->sq_flow_control;
- qp->qsize = handoff->qsize;
+ qp->admin = nvlist_get_bool(params, "admin");
+ qp->sq_flow_control = nvlist_get_bool(params, "sq_flow_control");
+ qp->qsize = nvlist_get_number(params, "qsize");
qp->qid = qid;
- qp->sqhd = handoff->sqhd;
- qp->sqtail = handoff->sqtail;
+ qp->sqhd = nvlist_get_number(params, "sqhd");
strlcpy(qp->name, name, sizeof(qp->name));
mtx_init(&qp->lock, "nvmft qp", NULL, MTX_DEF);
qp->cids = BITSET_ALLOC(NUM_CIDS, M_NVMFT, M_WAITOK | M_ZERO);
+ STAILQ_INIT(&qp->datamove_queue);
+ TASK_INIT(&qp->datamove_task, 0, nvmft_datamove_task, qp);
- qp->qp = nvmf_allocate_qpair(trtype, true, handoff, nvmft_qpair_error,
+ qp->qp = nvmf_allocate_qpair(trtype, true, params, nvmft_qpair_error,
qp, nvmft_receive_capsule, qp);
if (qp->qp == NULL) {
mtx_destroy(&qp->lock);
@@ -131,14 +134,25 @@ nvmft_qpair_init(enum nvmf_trtype trtype,
void
nvmft_qpair_shutdown(struct nvmft_qpair *qp)
{
+ STAILQ_HEAD(, ctl_io_hdr) datamove_queue;
struct nvmf_qpair *nq;
+ union ctl_io *io;
+ STAILQ_INIT(&datamove_queue);
mtx_lock(&qp->lock);
nq = qp->qp;
qp->qp = NULL;
+ STAILQ_CONCAT(&datamove_queue, &qp->datamove_queue);
mtx_unlock(&qp->lock);
if (nq != NULL && refcount_release(&qp->qp_refs))
nvmf_free_qpair(nq);
+
+ while (!STAILQ_EMPTY(&datamove_queue)) {
+ io = (union ctl_io *)STAILQ_FIRST(&datamove_queue);
+ STAILQ_REMOVE_HEAD(&datamove_queue, links);
+ nvmft_abort_datamove(io);
+ }
+ nvmft_drain_task(&qp->datamove_task);
}
void
@@ -359,3 +373,43 @@ nvmft_finish_accept(struct nvmft_qpair *qp,
rsp.status_code_specific.success.cntlid = htole16(ctrlr->cntlid);
return (nvmft_send_connect_response(qp, &rsp));
}
+
+void
+nvmft_qpair_datamove(struct nvmft_qpair *qp, union ctl_io *io)
+{
+ bool enqueue_task;
+
+ mtx_lock(&qp->lock);
+ if (qp->qp == NULL) {
+ mtx_unlock(&qp->lock);
+ nvmft_abort_datamove(io);
+ return;
+ }
+ enqueue_task = STAILQ_EMPTY(&qp->datamove_queue);
+ STAILQ_INSERT_TAIL(&qp->datamove_queue, &io->io_hdr, links);
+ mtx_unlock(&qp->lock);
+ if (enqueue_task)
+ nvmft_enqueue_task(&qp->datamove_task);
+}
+
+static void
+nvmft_datamove_task(void *context, int pending __unused)
+{
+ struct nvmft_qpair *qp = context;
+ union ctl_io *io;
+ bool abort;
+
+ mtx_lock(&qp->lock);
+ while (!STAILQ_EMPTY(&qp->datamove_queue)) {
+ io = (union ctl_io *)STAILQ_FIRST(&qp->datamove_queue);
+ STAILQ_REMOVE_HEAD(&qp->datamove_queue, links);
+ abort = (qp->qp == NULL);
+ mtx_unlock(&qp->lock);
+ if (abort)
+ nvmft_abort_datamove(io);
+ else
+ nvmft_handle_datamove(io);
+ mtx_lock(&qp->lock);
+ }
+ mtx_unlock(&qp->lock);
+}
diff --git a/sys/dev/nvmf/controller/nvmft_subr.c b/sys/dev/nvmf/controller/nvmft_subr.c
index bb2bc0988e81..245971813854 100644
--- a/sys/dev/nvmf/controller/nvmft_subr.c
+++ b/sys/dev/nvmf/controller/nvmft_subr.c
@@ -26,46 +26,6 @@ nvmf_nqn_valid(const char *nqn)
len = strnlen(nqn, NVME_NQN_FIELD_SIZE);
if (len == 0 || len > NVMF_NQN_MAX_LEN)
return (false);
-
-#ifdef STRICT_CHECKS
- /*
- * Stricter checks from the spec. Linux does not seem to
- * require these.
- */
-
- /*
- * NVMF_NQN_MIN_LEN does not include '.', and require at least
- * one character of a domain name.
- */
- if (len < NVMF_NQN_MIN_LEN + 2)
- return (false);
- if (memcmp("nqn.", nqn, strlen("nqn.")) != 0)
- return (false);
- nqn += strlen("nqn.");
-
- /* Next 4 digits must be a year. */
- for (u_int i = 0; i < 4; i++) {
- if (!isdigit(nqn[i]))
- return (false);
- }
- nqn += 4;
-
- /* '-' between year and month. */
- if (nqn[0] != '-')
- return (false);
- nqn++;
-
- /* 2 digit month. */
- for (u_int i = 0; i < 2; i++) {
- if (!isdigit(nqn[i]))
- return (false);
- }
- nqn += 2;
-
- /* '.' between month and reverse domain name. */
- if (nqn[0] != '.')
- return (false);
-#endif
return (true);
}
diff --git a/sys/dev/nvmf/controller/nvmft_var.h b/sys/dev/nvmf/controller/nvmft_var.h
index fc1f86754382..85032b2dc55f 100644
--- a/sys/dev/nvmf/controller/nvmft_var.h
+++ b/sys/dev/nvmf/controller/nvmft_var.h
@@ -9,6 +9,7 @@
#define __NVMFT_VAR_H__
#include <sys/_callout.h>
+#include <sys/_nv.h>
#include <sys/refcount.h>
#include <sys/taskqueue.h>
@@ -32,9 +33,10 @@ struct nvmft_port {
struct nvme_firmware_page fp;
uint64_t cap;
uint32_t max_io_qsize;
+ uint16_t portid;
bool online;
- struct sx lock;
+ struct mtx lock;
struct unrhdr *ids;
TAILQ_HEAD(, nvmft_controller) controllers;
@@ -110,6 +112,10 @@ void nvmft_populate_active_nslist(struct nvmft_port *np, uint32_t nsid,
void nvmft_dispatch_command(struct nvmft_qpair *qp,
struct nvmf_capsule *nc, bool admin);
void nvmft_terminate_commands(struct nvmft_controller *ctrlr);
+void nvmft_abort_datamove(union ctl_io *io);
+void nvmft_handle_datamove(union ctl_io *io);
+void nvmft_drain_task(struct task *task);
+void nvmft_enqueue_task(struct task *task);
/* nvmft_controller.c */
void nvmft_controller_error(struct nvmft_controller *ctrlr,
@@ -121,23 +127,22 @@ void nvmft_handle_admin_command(struct nvmft_controller *ctrlr,
void nvmft_handle_io_command(struct nvmft_qpair *qp, uint16_t qid,
struct nvmf_capsule *nc);
int nvmft_handoff_admin_queue(struct nvmft_port *np,
- const struct nvmf_handoff_controller_qpair *handoff,
+ enum nvmf_trtype trtype, const nvlist_t *params,
const struct nvmf_fabric_connect_cmd *cmd,
const struct nvmf_fabric_connect_data *data);
-int nvmft_handoff_io_queue(struct nvmft_port *np,
- const struct nvmf_handoff_controller_qpair *handoff,
- const struct nvmf_fabric_connect_cmd *cmd,
+int nvmft_handoff_io_queue(struct nvmft_port *np, enum nvmf_trtype trtype,
+ const nvlist_t *params, const struct nvmf_fabric_connect_cmd *cmd,
const struct nvmf_fabric_connect_data *data);
int nvmft_printf(struct nvmft_controller *ctrlr, const char *fmt, ...)
__printflike(2, 3);
/* nvmft_qpair.c */
struct nvmft_qpair *nvmft_qpair_init(enum nvmf_trtype trtype,
- const struct nvmf_handoff_qpair_params *handoff, uint16_t qid,
- const char *name);
+ const nvlist_t *params, uint16_t qid, const char *name);
void nvmft_qpair_shutdown(struct nvmft_qpair *qp);
void nvmft_qpair_destroy(struct nvmft_qpair *qp);
struct nvmft_controller *nvmft_qpair_ctrlr(struct nvmft_qpair *qp);
+void nvmft_qpair_datamove(struct nvmft_qpair *qp, union ctl_io *io);
uint16_t nvmft_qpair_id(struct nvmft_qpair *qp);
const char *nvmft_qpair_name(struct nvmft_qpair *qp);
void nvmft_command_completed(struct nvmft_qpair *qp,
diff --git a/sys/dev/nvmf/host/nvmf.c b/sys/dev/nvmf/host/nvmf.c
index 0902bc78a7b5..1ac0d142443b 100644
--- a/sys/dev/nvmf/host/nvmf.c
+++ b/sys/dev/nvmf/host/nvmf.c
@@ -8,13 +8,18 @@
#include <sys/param.h>
#include <sys/bus.h>
#include <sys/conf.h>
+#include <sys/dnv.h>
+#include <sys/eventhandler.h>
#include <sys/lock.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/memdesc.h>
#include <sys/module.h>
#include <sys/mutex.h>
+#include <sys/nv.h>
+#include <sys/reboot.h>
#include <sys/sx.h>
+#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <dev/nvme/nvme.h>
#include <dev/nvmf/nvmf.h>
@@ -22,10 +27,20 @@
#include <dev/nvmf/host/nvmf_var.h>
static struct cdevsw nvmf_cdevsw;
+static struct taskqueue *nvmf_tq;
+
+bool nvmf_fail_disconnect = false;
+SYSCTL_BOOL(_kern_nvmf, OID_AUTO, fail_on_disconnection, CTLFLAG_RWTUN,
+ &nvmf_fail_disconnect, 0, "Fail I/O requests on connection failure");
MALLOC_DEFINE(M_NVMF, "nvmf", "NVMe over Fabrics host");
+static void nvmf_controller_loss_task(void *arg, int pending);
static void nvmf_disconnect_task(void *arg, int pending);
+static void nvmf_request_reconnect(struct nvmf_softc *sc);
+static void nvmf_request_reconnect_task(void *arg, int pending);
+static void nvmf_shutdown_pre_sync(void *arg, int howto);
+static void nvmf_shutdown_post_sync(void *arg, int howto);
void
nvmf_complete(void *arg, const struct nvme_completion *cqe)
@@ -187,104 +202,132 @@ nvmf_send_keep_alive(void *arg)
}
int
-nvmf_init_ivars(struct nvmf_ivars *ivars, struct nvmf_handoff_host *hh)
+nvmf_copyin_handoff(const struct nvmf_ioc_nv *nv, nvlist_t **nvlp)
{
- size_t len;
- u_int i;
+ const struct nvme_discovery_log_entry *dle;
+ const struct nvme_controller_data *cdata;
+ const nvlist_t *const *io;
+ const nvlist_t *admin, *rparams;
+ nvlist_t *nvl;
+ size_t i, num_io_queues;
+ uint32_t qsize;
int error;
- memset(ivars, 0, sizeof(*ivars));
-
- if (!hh->admin.admin || hh->num_io_queues < 1)
- return (EINVAL);
-
- ivars->cdata = malloc(sizeof(*ivars->cdata), M_NVMF, M_WAITOK);
- error = copyin(hh->cdata, ivars->cdata, sizeof(*ivars->cdata));
+ error = nvmf_unpack_ioc_nvlist(nv, &nvl);
if (error != 0)
- goto out;
- nvme_controller_data_swapbytes(ivars->cdata);
+ return (error);
- len = hh->num_io_queues * sizeof(*ivars->io_params);
- ivars->io_params = malloc(len, M_NVMF, M_WAITOK);
- error = copyin(hh->io, ivars->io_params, len);
- if (error != 0)
- goto out;
- for (i = 0; i < hh->num_io_queues; i++) {
- if (ivars->io_params[i].admin) {
- error = EINVAL;
- goto out;
- }
+ if (!nvlist_exists_number(nvl, "trtype") ||
+ !nvlist_exists_nvlist(nvl, "admin") ||
+ !nvlist_exists_nvlist_array(nvl, "io") ||
+ !nvlist_exists_binary(nvl, "cdata") ||
+ !nvlist_exists_nvlist(nvl, "rparams"))
+ goto invalid;
+
+ rparams = nvlist_get_nvlist(nvl, "rparams");
+ if (!nvlist_exists_binary(rparams, "dle") ||
+ !nvlist_exists_string(rparams, "hostnqn") ||
+ !nvlist_exists_number(rparams, "num_io_queues") ||
+ !nvlist_exists_number(rparams, "io_qsize"))
+ goto invalid;
+
+ admin = nvlist_get_nvlist(nvl, "admin");
+ if (!nvmf_validate_qpair_nvlist(admin, false))
+ goto invalid;
+ if (!nvlist_get_bool(admin, "admin"))
+ goto invalid;
+
+ io = nvlist_get_nvlist_array(nvl, "io", &num_io_queues);
+ if (num_io_queues < 1 ||
+ num_io_queues != nvlist_get_number(rparams, "num_io_queues"))
+ goto invalid;
+ for (i = 0; i < num_io_queues; i++) {
+ if (!nvmf_validate_qpair_nvlist(io[i], false))
+ goto invalid;
+ }
- /* Require all I/O queues to be the same size. */
- if (ivars->io_params[i].qsize != ivars->io_params[0].qsize) {
- error = EINVAL;
- goto out;
- }
+ /* Require all I/O queues to be the same size. */
+ qsize = nvlist_get_number(rparams, "io_qsize");
+ for (i = 0; i < num_io_queues; i++) {
+ if (nvlist_get_number(io[i], "qsize") != qsize)
+ goto invalid;
}
- ivars->hh = hh;
- return (0);
+ cdata = nvlist_get_binary(nvl, "cdata", &i);
+ if (i != sizeof(*cdata))
+ goto invalid;
+ dle = nvlist_get_binary(rparams, "dle", &i);
+ if (i != sizeof(*dle))
+ goto invalid;
-out:
- free(ivars->io_params, M_NVMF);
- free(ivars->cdata, M_NVMF);
- return (error);
-}
+ if (memcmp(dle->subnqn, cdata->subnqn, sizeof(cdata->subnqn)) != 0)
+ goto invalid;
-void
-nvmf_free_ivars(struct nvmf_ivars *ivars)
-{
- free(ivars->io_params, M_NVMF);
- free(ivars->cdata, M_NVMF);
+ *nvlp = nvl;
+ return (0);
+invalid:
+ nvlist_destroy(nvl);
+ return (EINVAL);
}
static int
nvmf_probe(device_t dev)
{
- struct nvmf_ivars *ivars = device_get_ivars(dev);
- char desc[260];
+ const nvlist_t *nvl = device_get_ivars(dev);
+ const struct nvme_controller_data *cdata;
- if (ivars == NULL)
+ if (nvl == NULL)
return (ENXIO);
- snprintf(desc, sizeof(desc), "Fabrics: %.256s", ivars->cdata->subnqn);
- device_set_desc_copy(dev, desc);
+ cdata = nvlist_get_binary(nvl, "cdata", NULL);
+ device_set_descf(dev, "Fabrics: %.256s", cdata->subnqn);
return (BUS_PROBE_DEFAULT);
}
static int
-nvmf_establish_connection(struct nvmf_softc *sc, struct nvmf_ivars *ivars)
+nvmf_establish_connection(struct nvmf_softc *sc, nvlist_t *nvl)
{
+ const nvlist_t *const *io;
+ const nvlist_t *admin;
+ uint64_t kato;
+ size_t num_io_queues;
+ enum nvmf_trtype trtype;
char name[16];
+ trtype = nvlist_get_number(nvl, "trtype");
+ admin = nvlist_get_nvlist(nvl, "admin");
+ io = nvlist_get_nvlist_array(nvl, "io", &num_io_queues);
+ kato = dnvlist_get_number(nvl, "kato", 0);
+ sc->reconnect_delay = dnvlist_get_number(nvl, "reconnect_delay", 0);
+ sc->controller_loss_timeout = dnvlist_get_number(nvl,
+ "controller_loss_timeout", 0);
+
/* Setup the admin queue. */
- sc->admin = nvmf_init_qp(sc, ivars->hh->trtype, &ivars->hh->admin,
- "admin queue");
+ sc->admin = nvmf_init_qp(sc, trtype, admin, "admin queue", 0);
if (sc->admin == NULL) {
device_printf(sc->dev, "Failed to setup admin queue\n");
return (ENXIO);
}
/* Setup I/O queues. */
- sc->io = malloc(ivars->hh->num_io_queues * sizeof(*sc->io), M_NVMF,
+ sc->io = malloc(num_io_queues * sizeof(*sc->io), M_NVMF,
M_WAITOK | M_ZERO);
- sc->num_io_queues = ivars->hh->num_io_queues;
+ sc->num_io_queues = num_io_queues;
for (u_int i = 0; i < sc->num_io_queues; i++) {
snprintf(name, sizeof(name), "I/O queue %u", i);
- sc->io[i] = nvmf_init_qp(sc, ivars->hh->trtype,
- &ivars->io_params[i], name);
+ sc->io[i] = nvmf_init_qp(sc, trtype, io[i], name, i);
if (sc->io[i] == NULL) {
device_printf(sc->dev, "Failed to setup I/O queue %u\n",
- i + 1);
+ i);
return (ENXIO);
}
}
/* Start KeepAlive timers. */
- if (ivars->hh->kato != 0) {
+ if (kato != 0) {
sc->ka_traffic = NVMEV(NVME_CTRLR_DATA_CTRATT_TBKAS,
sc->cdata->ctratt) != 0;
- sc->ka_rx_sbt = mstosbt(ivars->hh->kato);
+ sc->ka_rx_sbt = mstosbt(kato);
sc->ka_tx_sbt = sc->ka_rx_sbt / 2;
callout_reset_sbt(&sc->ka_rx_timer, sc->ka_rx_sbt, 0,
nvmf_check_keep_alive, sc, C_HARDCLOCK);
@@ -292,12 +335,23 @@ nvmf_establish_connection(struct nvmf_softc *sc, struct nvmf_ivars *ivars)
nvmf_send_keep_alive, sc, C_HARDCLOCK);
}
+ memcpy(sc->cdata, nvlist_get_binary(nvl, "cdata", NULL),
+ sizeof(*sc->cdata));
+
+ /* Save reconnect parameters. */
+ nvlist_destroy(sc->rparams);
+ sc->rparams = nvlist_take_nvlist(nvl, "rparams");
+
return (0);
}
+typedef bool nvmf_scan_active_ns_cb(struct nvmf_softc *, uint32_t,
+ const struct nvme_namespace_data *, void *);
+
static bool
-nvmf_scan_nslist(struct nvmf_softc *sc, struct nvme_ns_list *nslist,
- struct nvme_namespace_data *data, uint32_t *nsidp)
+nvmf_scan_active_nslist(struct nvmf_softc *sc, struct nvme_ns_list *nslist,
+ struct nvme_namespace_data *data, uint32_t *nsidp,
+ nvmf_scan_active_ns_cb *cb, void *cb_arg)
{
struct nvmf_completion_status status;
uint32_t nsid;
@@ -333,13 +387,6 @@ nvmf_scan_nslist(struct nvmf_softc *sc, struct nvme_ns_list *nslist,
return (true);
}
- if (sc->ns[nsid - 1] != NULL) {
- device_printf(sc->dev,
- "duplicate namespace %u in active namespace list\n",
- nsid);
- return (false);
- }
-
nvmf_status_init(&status);
nvmf_status_wait_io(&status);
if (!nvmf_cmd_identify_namespace(sc, nsid, data, nvmf_complete,
@@ -365,49 +412,37 @@ nvmf_scan_nslist(struct nvmf_softc *sc, struct nvme_ns_list *nslist,
return (false);
}
- /*
- * As in nvme_ns_construct, a size of zero indicates an
- * invalid namespace.
- */
nvme_namespace_data_swapbytes(data);
- if (data->nsze == 0) {
- device_printf(sc->dev,
- "ignoring active namespace %u with zero size\n",
- nsid);
- continue;
- }
-
- sc->ns[nsid - 1] = nvmf_init_ns(sc, nsid, data);
-
- nvmf_sim_rescan_ns(sc, nsid);
+ if (!cb(sc, nsid, data, cb_arg))
+ return (false);
}
MPASS(nsid == nslist->ns[nitems(nslist->ns) - 1] && nsid != 0);
- if (nsid >= 0xfffffffd)
+ if (nsid >= NVME_GLOBAL_NAMESPACE_TAG - 1)
*nsidp = 0;
else
- *nsidp = nsid + 1;
+ *nsidp = nsid;
return (true);
}
static bool
-nvmf_add_namespaces(struct nvmf_softc *sc)
+nvmf_scan_active_namespaces(struct nvmf_softc *sc, nvmf_scan_active_ns_cb *cb,
+ void *cb_arg)
{
struct nvme_namespace_data *data;
struct nvme_ns_list *nslist;
uint32_t nsid;
bool retval;
- sc->ns = mallocarray(sc->cdata->nn, sizeof(*sc->ns), M_NVMF,
- M_WAITOK | M_ZERO);
nslist = malloc(sizeof(*nslist), M_NVMF, M_WAITOK);
data = malloc(sizeof(*data), M_NVMF, M_WAITOK);
nsid = 0;
retval = true;
for (;;) {
- if (!nvmf_scan_nslist(sc, nslist, data, &nsid)) {
+ if (!nvmf_scan_active_nslist(sc, nslist, data, &nsid, cb,
+ cb_arg)) {
retval = false;
break;
}
@@ -420,36 +455,77 @@ nvmf_add_namespaces(struct nvmf_softc *sc)
return (retval);
}
+static bool
+nvmf_add_ns(struct nvmf_softc *sc, uint32_t nsid,
+ const struct nvme_namespace_data *data, void *arg __unused)
+{
+ if (sc->ns[nsid - 1] != NULL) {
+ device_printf(sc->dev,
+ "duplicate namespace %u in active namespace list\n",
+ nsid);
+ return (false);
+ }
+
+ /*
+ * As in nvme_ns_construct, a size of zero indicates an
+ * invalid namespace.
+ */
+ if (data->nsze == 0) {
+ device_printf(sc->dev,
+ "ignoring active namespace %u with zero size\n", nsid);
+ return (true);
+ }
+
+ sc->ns[nsid - 1] = nvmf_init_ns(sc, nsid, data);
+
+ nvmf_sim_rescan_ns(sc, nsid);
+ return (true);
+}
+
+static bool
+nvmf_add_namespaces(struct nvmf_softc *sc)
+{
+ sc->ns = mallocarray(sc->cdata->nn, sizeof(*sc->ns), M_NVMF,
+ M_WAITOK | M_ZERO);
+ return (nvmf_scan_active_namespaces(sc, nvmf_add_ns, NULL));
+}
+
static int
nvmf_attach(device_t dev)
{
struct make_dev_args mda;
struct nvmf_softc *sc = device_get_softc(dev);
- struct nvmf_ivars *ivars = device_get_ivars(dev);
+ nvlist_t *nvl = device_get_ivars(dev);
+ const nvlist_t * const *io;
+ struct sysctl_oid *oid;
uint64_t val;
u_int i;
int error;
- if (ivars == NULL)
+ if (nvl == NULL)
return (ENXIO);
sc->dev = dev;
- sc->trtype = ivars->hh->trtype;
+ sc->trtype = nvlist_get_number(nvl, "trtype");
callout_init(&sc->ka_rx_timer, 1);
callout_init(&sc->ka_tx_timer, 1);
sx_init(&sc->connection_lock, "nvmf connection");
TASK_INIT(&sc->disconnect_task, 0, nvmf_disconnect_task, sc);
+ TIMEOUT_TASK_INIT(nvmf_tq, &sc->controller_loss_task, 0,
+ nvmf_controller_loss_task, sc);
+ TIMEOUT_TASK_INIT(nvmf_tq, &sc->request_reconnect_task, 0,
+ nvmf_request_reconnect_task, sc);
- /* Claim the cdata pointer from ivars. */
- sc->cdata = ivars->cdata;
- ivars->cdata = NULL;
+ oid = SYSCTL_ADD_NODE(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "ioq",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "I/O Queues");
+ sc->ioq_oid_list = SYSCTL_CHILDREN(oid);
- nvmf_init_aer(sc);
+ sc->cdata = malloc(sizeof(*sc->cdata), M_NVMF, M_WAITOK);
- /* TODO: Multiqueue support. */
- sc->max_pending_io = ivars->io_params[0].qsize /* * sc->num_io_queues */;
+ nvmf_init_aer(sc);
- error = nvmf_establish_connection(sc, ivars);
+ error = nvmf_establish_connection(sc, nvl);
if (error != 0)
goto out;
@@ -476,6 +552,10 @@ nvmf_attach(device_t dev)
NVME_CAP_HI_MPSMIN(sc->cap >> 32)));
}
+ io = nvlist_get_nvlist_array(nvl, "io", NULL);
+ sc->max_pending_io = nvlist_get_number(io[0], "qsize") *
+ sc->num_io_queues;
+
error = nvmf_init_sim(sc);
if (error != 0)
goto out;
@@ -503,6 +583,11 @@ nvmf_attach(device_t dev)
goto out;
}
+ sc->shutdown_pre_sync_eh = EVENTHANDLER_REGISTER(shutdown_pre_sync,
+ nvmf_shutdown_pre_sync, sc, SHUTDOWN_PRI_FIRST);
+ sc->shutdown_post_sync_eh = EVENTHANDLER_REGISTER(shutdown_post_sync,
+ nvmf_shutdown_post_sync, sc, SHUTDOWN_PRI_LAST);
+
return (0);
out:
if (sc->ns != NULL) {
@@ -529,8 +614,11 @@ out:
nvmf_destroy_aer(sc);
- taskqueue_drain(taskqueue_thread, &sc->disconnect_task);
+ taskqueue_drain_timeout(nvmf_tq, &sc->request_reconnect_task);
+ taskqueue_drain_timeout(nvmf_tq, &sc->controller_loss_task);
+ taskqueue_drain(nvmf_tq, &sc->disconnect_task);
sx_destroy(&sc->connection_lock);
+ nvlist_destroy(sc->rparams);
free(sc->cdata, M_NVMF);
return (error);
}
@@ -538,7 +626,7 @@ out:
void
nvmf_disconnect(struct nvmf_softc *sc)
{
- taskqueue_enqueue(taskqueue_thread, &sc->disconnect_task);
+ taskqueue_enqueue(nvmf_tq, &sc->disconnect_task);
}
static void
@@ -579,6 +667,7 @@ nvmf_disconnect_task(void *arg, int pending __unused)
return;
}
+ nanotime(&sc->last_disconnect);
callout_drain(&sc->ka_tx_timer);
callout_drain(&sc->ka_rx_timer);
sc->ka_traffic = false;
@@ -600,29 +689,98 @@ nvmf_disconnect_task(void *arg, int pending __unused)
nvmf_destroy_qp(sc->admin);
sc->admin = NULL;
+ if (sc->reconnect_delay != 0)
+ nvmf_request_reconnect(sc);
+ if (sc->controller_loss_timeout != 0)
+ taskqueue_enqueue_timeout(nvmf_tq,
+ &sc->controller_loss_task, sc->controller_loss_timeout *
+ hz);
+
+ sx_xunlock(&sc->connection_lock);
+}
+
+static void
+nvmf_controller_loss_task(void *arg, int pending)
+{
+ struct nvmf_softc *sc = arg;
+ device_t dev;
+ int error;
+
+ bus_topo_lock();
+ sx_xlock(&sc->connection_lock);
+ if (sc->admin != NULL || sc->detaching) {
+ /* Reconnected or already detaching. */
+ sx_xunlock(&sc->connection_lock);
+ bus_topo_unlock();
+ return;
+ }
+
+ sc->controller_timedout = true;
+ sx_xunlock(&sc->connection_lock);
+
+ /*
+ * XXX: Doing this from here is a bit ugly. We don't have an
+ * extra reference on `dev` but bus_topo_lock should block any
+ * concurrent device_delete_child invocations.
+ */
+ dev = sc->dev;
+ error = device_delete_child(root_bus, dev);
+ if (error != 0)
+ device_printf(dev,
+ "failed to detach after controller loss: %d\n", error);
+ bus_topo_unlock();
+}
+
+static void
+nvmf_request_reconnect(struct nvmf_softc *sc)
+{
+ char buf[64];
+
+ sx_assert(&sc->connection_lock, SX_LOCKED);
+
+ snprintf(buf, sizeof(buf), "name=\"%s\"", device_get_nameunit(sc->dev));
+ devctl_notify("nvme", "controller", "RECONNECT", buf);
+ taskqueue_enqueue_timeout(nvmf_tq, &sc->request_reconnect_task,
+ sc->reconnect_delay * hz);
+}
+
+static void
+nvmf_request_reconnect_task(void *arg, int pending)
+{
+ struct nvmf_softc *sc = arg;
+
+ sx_xlock(&sc->connection_lock);
+ if (sc->admin != NULL || sc->detaching || sc->controller_timedout) {
+ /* Reconnected or already detaching. */
+ sx_xunlock(&sc->connection_lock);
+ return;
+ }
+
+ nvmf_request_reconnect(sc);
sx_xunlock(&sc->connection_lock);
}
static int
-nvmf_reconnect_host(struct nvmf_softc *sc, struct nvmf_handoff_host *hh)
+nvmf_reconnect_host(struct nvmf_softc *sc, struct nvmf_ioc_nv *nv)
{
- struct nvmf_ivars ivars;
+ const struct nvme_controller_data *cdata;
+ nvlist_t *nvl;
u_int i;
int error;
+ error = nvmf_copyin_handoff(nv, &nvl);
+ if (error != 0)
+ return (error);
+
/* XXX: Should we permit changing the transport type? */
- if (sc->trtype != hh->trtype) {
+ if (sc->trtype != nvlist_get_number(nvl, "trtype")) {
device_printf(sc->dev,
"transport type mismatch on reconnect\n");
return (EINVAL);
}
- error = nvmf_init_ivars(&ivars, hh);
- if (error != 0)
- return (error);
-
sx_xlock(&sc->connection_lock);
- if (sc->admin != NULL || sc->detaching) {
+ if (sc->admin != NULL || sc->detaching || sc->controller_timedout) {
error = EBUSY;
goto out;
}
@@ -634,8 +792,9 @@ nvmf_reconnect_host(struct nvmf_softc *sc, struct nvmf_handoff_host *hh)
* ensures the new association is connected to the same NVMe
* subsystem.
*/
- if (memcmp(sc->cdata->subnqn, ivars.cdata->subnqn,
- sizeof(ivars.cdata->subnqn)) != 0) {
+ cdata = nvlist_get_binary(nvl, "cdata", NULL);
+ if (memcmp(sc->cdata->subnqn, cdata->subnqn,
+ sizeof(cdata->subnqn)) != 0) {
device_printf(sc->dev,
"controller subsystem NQN mismatch on reconnect\n");
error = EINVAL;
@@ -647,7 +806,7 @@ nvmf_reconnect_host(struct nvmf_softc *sc, struct nvmf_handoff_host *hh)
* max_pending_io is still correct?
*/
- error = nvmf_establish_connection(sc, &ivars);
+ error = nvmf_establish_connection(sc, nvl);
if (error != 0)
goto out;
@@ -665,12 +824,85 @@ nvmf_reconnect_host(struct nvmf_softc *sc, struct nvmf_handoff_host *hh)
nvmf_reconnect_ns(sc->ns[i]);
}
nvmf_reconnect_sim(sc);
+
+ nvmf_rescan_all_ns(sc);
+
+ taskqueue_cancel_timeout(nvmf_tq, &sc->request_reconnect_task, NULL);
+ taskqueue_cancel_timeout(nvmf_tq, &sc->controller_loss_task, NULL);
out:
sx_xunlock(&sc->connection_lock);
- nvmf_free_ivars(&ivars);
+ nvlist_destroy(nvl);
return (error);
}
+static void
+nvmf_shutdown_pre_sync(void *arg, int howto)
+{
+ struct nvmf_softc *sc = arg;
+
+ if ((howto & RB_NOSYNC) != 0 || SCHEDULER_STOPPED())
+ return;
+
+ /*
+ * If this association is disconnected, abort any pending
+ * requests with an error to permit filesystems to unmount
+ * without hanging.
+ */
+ sx_xlock(&sc->connection_lock);
+ if (sc->admin != NULL || sc->detaching) {
+ sx_xunlock(&sc->connection_lock);
+ return;
+ }
+
+ for (u_int i = 0; i < sc->cdata->nn; i++) {
+ if (sc->ns[i] != NULL)
+ nvmf_shutdown_ns(sc->ns[i]);
+ }
+ nvmf_shutdown_sim(sc);
+ sx_xunlock(&sc->connection_lock);
+}
+
+static void
+nvmf_shutdown_post_sync(void *arg, int howto)
+{
+ struct nvmf_softc *sc = arg;
+
+ if ((howto & RB_NOSYNC) != 0 || SCHEDULER_STOPPED())
+ return;
+
+ /*
+ * If this association is connected, disconnect gracefully.
+ */
+ sx_xlock(&sc->connection_lock);
+ if (sc->admin == NULL || sc->detaching) {
+ sx_xunlock(&sc->connection_lock);
+ return;
+ }
+
+ callout_drain(&sc->ka_tx_timer);
+ callout_drain(&sc->ka_rx_timer);
+
+ nvmf_shutdown_controller(sc);
+
+ /*
+ * Quiesce consumers so that any commands submitted after this
+ * fail with an error. Notably, nda(4) calls nda_flush() from
+ * a post_sync handler that might be ordered after this one.
+ */
+ for (u_int i = 0; i < sc->cdata->nn; i++) {
+ if (sc->ns[i] != NULL)
+ nvmf_shutdown_ns(sc->ns[i]);
+ }
+ nvmf_shutdown_sim(sc);
+
+ for (u_int i = 0; i < sc->num_io_queues; i++) {
+ nvmf_destroy_qp(sc->io[i]);
+ }
+ nvmf_destroy_qp(sc->admin);
+ sc->admin = NULL;
+ sx_xunlock(&sc->connection_lock);
+}
+
static int
nvmf_detach(device_t dev)
{
@@ -683,6 +915,9 @@ nvmf_detach(device_t dev)
sc->detaching = true;
sx_xunlock(&sc->connection_lock);
+ EVENTHANDLER_DEREGISTER(shutdown_pre_sync, sc->shutdown_pre_sync_eh);
+ EVENTHANDLER_DEREGISTER(shutdown_post_sync, sc->shutdown_post_sync_eh);
+
nvmf_destroy_sim(sc);
for (i = 0; i < sc->cdata->nn; i++) {
if (sc->ns[i] != NULL)
@@ -701,7 +936,21 @@ nvmf_detach(device_t dev)
}
free(sc->io, M_NVMF);
- taskqueue_drain(taskqueue_thread, &sc->disconnect_task);
+ taskqueue_drain(nvmf_tq, &sc->disconnect_task);
+ if (taskqueue_cancel_timeout(nvmf_tq, &sc->request_reconnect_task,
+ NULL) != 0)
+ taskqueue_drain_timeout(nvmf_tq, &sc->request_reconnect_task);
+
+ /*
+ * Don't cancel/drain the controller loss task if that task
+ * has fired and is triggering the detach.
+ */
+ if (!sc->controller_timedout) {
+ if (taskqueue_cancel_timeout(nvmf_tq, &sc->controller_loss_task,
+ NULL) != 0)
+ taskqueue_drain_timeout(nvmf_tq,
+ &sc->controller_loss_task);
+ }
if (sc->admin != NULL)
nvmf_destroy_qp(sc->admin);
@@ -709,16 +958,45 @@ nvmf_detach(device_t dev)
nvmf_destroy_aer(sc);
sx_destroy(&sc->connection_lock);
+ nvlist_destroy(sc->rparams);
free(sc->cdata, M_NVMF);
return (0);
}
+static void
+nvmf_rescan_ns_1(struct nvmf_softc *sc, uint32_t nsid,
+ const struct nvme_namespace_data *data)
+{
+ struct nvmf_namespace *ns;
+
+ /* XXX: Needs locking around sc->ns[]. */
+ ns = sc->ns[nsid - 1];
+ if (data->nsze == 0) {
+ /* XXX: Needs locking */
+ if (ns != NULL) {
+ nvmf_destroy_ns(ns);
+ sc->ns[nsid - 1] = NULL;
+ }
+ } else {
+ /* XXX: Needs locking */
+ if (ns == NULL) {
+ sc->ns[nsid - 1] = nvmf_init_ns(sc, nsid, data);
+ } else {
+ if (!nvmf_update_ns(ns, data)) {
+ nvmf_destroy_ns(ns);
+ sc->ns[nsid - 1] = NULL;
+ }
+ }
+ }
+
+ nvmf_sim_rescan_ns(sc, nsid);
+}
+
void
nvmf_rescan_ns(struct nvmf_softc *sc, uint32_t nsid)
{
struct nvmf_completion_status status;
struct nvme_namespace_data *data;
- struct nvmf_namespace *ns;
data = malloc(sizeof(*data), M_NVMF, M_WAITOK);
@@ -751,29 +1029,58 @@ nvmf_rescan_ns(struct nvmf_softc *sc, uint32_t nsid)
nvme_namespace_data_swapbytes(data);
- /* XXX: Needs locking around sc->ns[]. */
- ns = sc->ns[nsid - 1];
- if (data->nsze == 0) {
- /* XXX: Needs locking */
+ nvmf_rescan_ns_1(sc, nsid, data);
+
+ free(data, M_NVMF);
+}
+
+static void
+nvmf_purge_namespaces(struct nvmf_softc *sc, uint32_t first_nsid,
+ uint32_t next_valid_nsid)
+{
+ struct nvmf_namespace *ns;
+
+ for (uint32_t nsid = first_nsid; nsid < next_valid_nsid; nsid++)
+ {
+ /* XXX: Needs locking around sc->ns[]. */
+ ns = sc->ns[nsid - 1];
if (ns != NULL) {
nvmf_destroy_ns(ns);
sc->ns[nsid - 1] = NULL;
- }
- } else {
- /* XXX: Needs locking */
- if (ns == NULL) {
- sc->ns[nsid - 1] = nvmf_init_ns(sc, nsid, data);
- } else {
- if (!nvmf_update_ns(ns, data)) {
- nvmf_destroy_ns(ns);
- sc->ns[nsid - 1] = NULL;
- }
+
+ nvmf_sim_rescan_ns(sc, nsid);
}
}
+}
- free(data, M_NVMF);
+static bool
+nvmf_rescan_ns_cb(struct nvmf_softc *sc, uint32_t nsid,
+ const struct nvme_namespace_data *data, void *arg)
+{
+ uint32_t *last_nsid = arg;
- nvmf_sim_rescan_ns(sc, nsid);
+ /* Check for any gaps prior to this namespace. */
+ nvmf_purge_namespaces(sc, *last_nsid + 1, nsid);
+ *last_nsid = nsid;
+
+ nvmf_rescan_ns_1(sc, nsid, data);
+ return (true);
+}
+
+void
+nvmf_rescan_all_ns(struct nvmf_softc *sc)
+{
+ uint32_t last_nsid;
+
+ last_nsid = 0;
+ if (!nvmf_scan_active_namespaces(sc, nvmf_rescan_ns_cb, &last_nsid))
+ return;
+
+ /*
+ * Check for any namespace devices after the last active
+ * namespace.
+ */
+ nvmf_purge_namespaces(sc, last_nsid + 1, sc->cdata->nn + 1);
}
int
@@ -822,12 +1129,21 @@ nvmf_passthrough_cmd(struct nvmf_softc *sc, struct nvme_pt_command *pt,
cmd.cdw14 = pt->cmd.cdw14;
cmd.cdw15 = pt->cmd.cdw15;
+ sx_slock(&sc->connection_lock);
+ if (sc->admin == NULL || sc->detaching) {
+ device_printf(sc->dev,
+ "failed to send passthrough command\n");
+ error = ECONNABORTED;
+ sx_sunlock(&sc->connection_lock);
+ goto error;
+ }
if (admin)
qp = sc->admin;
else
qp = nvmf_select_io_queue(sc);
nvmf_status_init(&status);
req = nvmf_allocate_request(qp, &cmd, nvmf_complete, &status, M_WAITOK);
+ sx_sunlock(&sc->connection_lock);
if (req == NULL) {
device_printf(sc->dev, "failed to send passthrough command\n");
error = ECONNABORTED;
@@ -857,14 +1173,46 @@ error:
}
static int
+nvmf_reconnect_params(struct nvmf_softc *sc, struct nvmf_ioc_nv *nv)
+{
+ int error;
+
+ sx_slock(&sc->connection_lock);
+ error = nvmf_pack_ioc_nvlist(sc->rparams, nv);
+ sx_sunlock(&sc->connection_lock);
+
+ return (error);
+}
+
+static int
+nvmf_connection_status(struct nvmf_softc *sc, struct nvmf_ioc_nv *nv)
+{
+ nvlist_t *nvl, *nvl_ts;
+ int error;
+
+ nvl = nvlist_create(0);
+ nvl_ts = nvlist_create(0);
+
+ sx_slock(&sc->connection_lock);
+ nvlist_add_bool(nvl, "connected", sc->admin != NULL);
+ nvlist_add_number(nvl_ts, "tv_sec", sc->last_disconnect.tv_sec);
+ nvlist_add_number(nvl_ts, "tv_nsec", sc->last_disconnect.tv_nsec);
+ sx_sunlock(&sc->connection_lock);
+ nvlist_move_nvlist(nvl, "last_disconnect", nvl_ts);
+
+ error = nvmf_pack_ioc_nvlist(nvl, nv);
+ nvlist_destroy(nvl);
+ return (error);
+}
+
+static int
nvmf_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
struct thread *td)
{
struct nvmf_softc *sc = cdev->si_drv1;
struct nvme_get_nsid *gnsid;
struct nvme_pt_command *pt;
- struct nvmf_reconnect_params *rp;
- struct nvmf_handoff_host *hh;
+ struct nvmf_ioc_nv *nv;
switch (cmd) {
case NVME_PASSTHROUGH_CMD:
@@ -872,25 +1220,25 @@ nvmf_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag,
return (nvmf_passthrough_cmd(sc, pt, true));
case NVME_GET_NSID:
gnsid = (struct nvme_get_nsid *)arg;
- strncpy(gnsid->cdev, device_get_nameunit(sc->dev),
+ strlcpy(gnsid->cdev, device_get_nameunit(sc->dev),
sizeof(gnsid->cdev));
- gnsid->cdev[sizeof(gnsid->cdev) - 1] = '\0';
gnsid->nsid = 0;
return (0);
case NVME_GET_MAX_XFER_SIZE:
*(uint64_t *)arg = sc->max_xfer_size;
return (0);
- case NVMF_RECONNECT_PARAMS:
- rp = (struct nvmf_reconnect_params *)arg;
- if ((sc->cdata->fcatt & 1) == 0)
- rp->cntlid = NVMF_CNTLID_DYNAMIC;
- else
- rp->cntlid = sc->cdata->ctrlr_id;
- memcpy(rp->subnqn, sc->cdata->subnqn, sizeof(rp->subnqn));
+ case NVME_GET_CONTROLLER_DATA:
+ memcpy(arg, sc->cdata, sizeof(*sc->cdata));
return (0);
+ case NVMF_RECONNECT_PARAMS:
+ nv = (struct nvmf_ioc_nv *)arg;
+ return (nvmf_reconnect_params(sc, nv));
case NVMF_RECONNECT_HOST:
- hh = (struct nvmf_handoff_host *)arg;
- return (nvmf_reconnect_host(sc, hh));
+ nv = (struct nvmf_ioc_nv *)arg;
+ return (nvmf_reconnect_host(sc, nv));
+ case NVMF_CONNECTION_STATUS:
+ nv = (struct nvmf_ioc_nv *)arg;
+ return (nvmf_connection_status(sc, nv));
default:
return (ENOTTY);
}
@@ -904,14 +1252,25 @@ static struct cdevsw nvmf_cdevsw = {
static int
nvmf_modevent(module_t mod, int what, void *arg)
{
+ int error;
+
switch (what) {
case MOD_LOAD:
- return (nvmf_ctl_load());
+ error = nvmf_ctl_load();
+ if (error != 0)
+ return (error);
+
+ nvmf_tq = taskqueue_create("nvmf", M_WAITOK | M_ZERO,
+ taskqueue_thread_enqueue, &nvmf_tq);
+ taskqueue_start_threads(&nvmf_tq, 1, PWAIT, "nvmf taskq");
+ return (0);
case MOD_QUIESCE:
return (0);
case MOD_UNLOAD:
nvmf_ctl_unload();
destroy_dev_drain(&nvmf_cdevsw);
+ if (nvmf_tq != NULL)
+ taskqueue_free(nvmf_tq);
return (0);
default:
return (EOPNOTSUPP);
@@ -923,9 +1282,6 @@ static device_method_t nvmf_methods[] = {
DEVMETHOD(device_probe, nvmf_probe),
DEVMETHOD(device_attach, nvmf_attach),
DEVMETHOD(device_detach, nvmf_detach),
-#if 0
- DEVMETHOD(device_shutdown, nvmf_shutdown),
-#endif
DEVMETHOD_END
};
diff --git a/sys/dev/nvmf/host/nvmf_aer.c b/sys/dev/nvmf/host/nvmf_aer.c
index 4c950f1518d0..2f7f177d0421 100644
--- a/sys/dev/nvmf/host/nvmf_aer.c
+++ b/sys/dev/nvmf/host/nvmf_aer.c
@@ -62,7 +62,7 @@ nvmf_handle_changed_namespaces(struct nvmf_softc *sc,
* probably just rescan the entire set of namespaces.
*/
if (ns_list->ns[0] == 0xffffffff) {
- device_printf(sc->dev, "too many changed namespaces\n");
+ nvmf_rescan_all_ns(sc);
return;
}
diff --git a/sys/dev/nvmf/host/nvmf_ctldev.c b/sys/dev/nvmf/host/nvmf_ctldev.c
index f40005a2a666..275d5e9c932a 100644
--- a/sys/dev/nvmf/host/nvmf_ctldev.c
+++ b/sys/dev/nvmf/host/nvmf_ctldev.c
@@ -9,6 +9,7 @@
#include <sys/bus.h>
#include <sys/conf.h>
#include <sys/malloc.h>
+#include <sys/nv.h>
#include <dev/nvme/nvme.h>
#include <dev/nvmf/nvmf.h>
#include <dev/nvmf/nvmf_transport.h>
@@ -17,25 +18,25 @@
static struct cdev *nvmf_cdev;
static int
-nvmf_handoff_host(struct nvmf_handoff_host *hh)
+nvmf_handoff_host(struct nvmf_ioc_nv *nv)
{
- struct nvmf_ivars ivars;
+ nvlist_t *nvl;
device_t dev;
int error;
- error = nvmf_init_ivars(&ivars, hh);
+ error = nvmf_copyin_handoff(nv, &nvl);
if (error != 0)
return (error);
bus_topo_lock();
- dev = device_add_child(root_bus, "nvme", -1);
+ dev = device_add_child(root_bus, "nvme", DEVICE_UNIT_ANY);
if (dev == NULL) {
bus_topo_unlock();
error = ENXIO;
goto out;
}
- device_set_ivars(dev, &ivars);
+ device_set_ivars(dev, nvl);
error = device_probe_and_attach(dev);
device_set_ivars(dev, NULL);
if (error != 0)
@@ -43,7 +44,7 @@ nvmf_handoff_host(struct nvmf_handoff_host *hh)
bus_topo_unlock();
out:
- nvmf_free_ivars(&ivars);
+ nvlist_destroy(nvl);
return (error);
}
@@ -117,7 +118,7 @@ nvmf_ctl_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
{
switch (cmd) {
case NVMF_HANDOFF_HOST:
- return (nvmf_handoff_host((struct nvmf_handoff_host *)arg));
+ return (nvmf_handoff_host((struct nvmf_ioc_nv *)arg));
case NVMF_DISCONNECT_HOST:
return (nvmf_disconnect_host((const char **)arg));
case NVMF_DISCONNECT_ALL:
diff --git a/sys/dev/nvmf/host/nvmf_ns.c b/sys/dev/nvmf/host/nvmf_ns.c
index 3ce434bf7c50..4215c8295d2e 100644
--- a/sys/dev/nvmf/host/nvmf_ns.c
+++ b/sys/dev/nvmf/host/nvmf_ns.c
@@ -18,7 +18,7 @@
#include <sys/proc.h>
#include <sys/refcount.h>
#include <sys/sbuf.h>
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include <dev/nvme/nvme.h>
#include <dev/nvmf/host/nvmf_var.h>
@@ -29,6 +29,7 @@ struct nvmf_namespace {
u_int flags;
uint32_t lba_size;
bool disconnected;
+ bool shutdown;
TAILQ_HEAD(, bio) pending_bios;
struct mtx lock;
@@ -49,7 +50,7 @@ ns_printf(struct nvmf_namespace *ns, const char *fmt, ...)
sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN);
sbuf_set_drain(&sb, sbuf_printf_drain, NULL);
- sbuf_printf(&sb, "%sns%u: ", device_get_nameunit(ns->sc->dev),
+ sbuf_printf(&sb, "%sn%u: ", device_get_nameunit(ns->sc->dev),
ns->id);
va_start(ap, fmt);
@@ -84,13 +85,22 @@ nvmf_ns_biodone(struct bio *bio)
ns = bio->bio_dev->si_drv1;
/* If a request is aborted, resubmit or queue it for resubmission. */
- if (bio->bio_error == ECONNABORTED) {
+ if (bio->bio_error == ECONNABORTED && !nvmf_fail_disconnect) {
bio->bio_error = 0;
bio->bio_driver2 = 0;
mtx_lock(&ns->lock);
if (ns->disconnected) {
- TAILQ_INSERT_TAIL(&ns->pending_bios, bio, bio_queue);
- mtx_unlock(&ns->lock);
+ if (nvmf_fail_disconnect || ns->shutdown) {
+ mtx_unlock(&ns->lock);
+ bio->bio_error = ECONNABORTED;
+ bio->bio_flags |= BIO_ERROR;
+ bio->bio_resid = bio->bio_bcount;
+ biodone(bio);
+ } else {
+ TAILQ_INSERT_TAIL(&ns->pending_bios, bio,
+ bio_queue);
+ mtx_unlock(&ns->lock);
+ }
} else {
mtx_unlock(&ns->lock);
nvmf_ns_strategy(bio);
@@ -163,6 +173,7 @@ nvmf_ns_submit_bio(struct nvmf_namespace *ns, struct bio *bio)
struct nvme_dsm_range *dsm_range;
struct memdesc mem;
uint64_t lba, lba_count;
+ int error;
dsm_range = NULL;
memset(&cmd, 0, sizeof(cmd));
@@ -201,10 +212,15 @@ nvmf_ns_submit_bio(struct nvmf_namespace *ns, struct bio *bio)
mtx_lock(&ns->lock);
if (ns->disconnected) {
- TAILQ_INSERT_TAIL(&ns->pending_bios, bio, bio_queue);
+ if (nvmf_fail_disconnect || ns->shutdown) {
+ error = ECONNABORTED;
+ } else {
+ TAILQ_INSERT_TAIL(&ns->pending_bios, bio, bio_queue);
+ error = 0;
+ }
mtx_unlock(&ns->lock);
free(dsm_range, M_NVMF);
- return (0);
+ return (error);
}
req = nvmf_allocate_request(nvmf_select_io_queue(ns->sc), &cmd,
@@ -258,9 +274,8 @@ nvmf_ns_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag,
return (nvmf_passthrough_cmd(ns->sc, pt, false));
case NVME_GET_NSID:
gnsid = (struct nvme_get_nsid *)arg;
- strncpy(gnsid->cdev, device_get_nameunit(ns->sc->dev),
+ strlcpy(gnsid->cdev, device_get_nameunit(ns->sc->dev),
sizeof(gnsid->cdev));
- gnsid->cdev[sizeof(gnsid->cdev) - 1] = '\0';
gnsid->nsid = ns->id;
return (0);
case DIOCGMEDIASIZE:
@@ -314,7 +329,7 @@ static struct cdevsw nvmf_ns_cdevsw = {
struct nvmf_namespace *
nvmf_init_ns(struct nvmf_softc *sc, uint32_t id,
- struct nvme_namespace_data *data)
+ const struct nvme_namespace_data *data)
{
struct make_dev_args mda;
struct nvmf_namespace *ns;
@@ -372,10 +387,12 @@ nvmf_init_ns(struct nvmf_softc *sc, uint32_t id,
mda.mda_gid = GID_WHEEL;
mda.mda_mode = 0600;
mda.mda_si_drv1 = ns;
- error = make_dev_s(&mda, &ns->cdev, "%sns%u",
+ error = make_dev_s(&mda, &ns->cdev, "%sn%u",
device_get_nameunit(sc->dev), id);
if (error != 0)
goto fail;
+ ns->cdev->si_drv2 = make_dev_alias(ns->cdev, "%sns%u",
+ device_get_nameunit(sc->dev), id);
ns->cdev->si_flags |= SI_UNMAPPED;
@@ -414,11 +431,35 @@ nvmf_reconnect_ns(struct nvmf_namespace *ns)
}
void
+nvmf_shutdown_ns(struct nvmf_namespace *ns)
+{
+ TAILQ_HEAD(, bio) bios;
+ struct bio *bio;
+
+ mtx_lock(&ns->lock);
+ ns->shutdown = true;
+ TAILQ_INIT(&bios);
+ TAILQ_CONCAT(&bios, &ns->pending_bios, bio_queue);
+ mtx_unlock(&ns->lock);
+
+ while (!TAILQ_EMPTY(&bios)) {
+ bio = TAILQ_FIRST(&bios);
+ TAILQ_REMOVE(&bios, bio, bio_queue);
+ bio->bio_error = ECONNABORTED;
+ bio->bio_flags |= BIO_ERROR;
+ bio->bio_resid = bio->bio_bcount;
+ biodone(bio);
+ }
+}
+
+void
nvmf_destroy_ns(struct nvmf_namespace *ns)
{
TAILQ_HEAD(, bio) bios;
struct bio *bio;
+ if (ns->cdev->si_drv2 != NULL)
+ destroy_dev(ns->cdev->si_drv2);
destroy_dev(ns->cdev);
/*
@@ -451,7 +492,8 @@ nvmf_destroy_ns(struct nvmf_namespace *ns)
}
bool
-nvmf_update_ns(struct nvmf_namespace *ns, struct nvme_namespace_data *data)
+nvmf_update_ns(struct nvmf_namespace *ns,
+ const struct nvme_namespace_data *data)
{
uint8_t lbads, lbaf;
diff --git a/sys/dev/nvmf/host/nvmf_qpair.c b/sys/dev/nvmf/host/nvmf_qpair.c
index 96cb5a8b0465..2f511cf0406d 100644
--- a/sys/dev/nvmf/host/nvmf_qpair.c
+++ b/sys/dev/nvmf/host/nvmf_qpair.c
@@ -10,6 +10,8 @@
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
+#include <sys/nv.h>
+#include <sys/sysctl.h>
#include <dev/nvme/nvme.h>
#include <dev/nvmf/nvmf.h>
#include <dev/nvmf/nvmf_transport.h>
@@ -31,6 +33,7 @@ struct nvmf_host_qpair {
u_int num_commands;
uint16_t sqhd;
uint16_t sqtail;
+ uint64_t submitted;
struct mtx lock;
@@ -41,6 +44,7 @@ struct nvmf_host_qpair {
struct nvmf_host_command **active_commands;
char name[16];
+ struct sysctl_ctx_list sysctl_ctx;
};
struct nvmf_request *
@@ -112,8 +116,23 @@ nvmf_dispatch_command(struct nvmf_host_qpair *qp, struct nvmf_host_command *cmd)
struct nvmf_softc *sc = qp->sc;
struct nvme_command *sqe;
struct nvmf_capsule *nc;
+ uint16_t new_sqtail;
int error;
+ mtx_assert(&qp->lock, MA_OWNED);
+
+ qp->submitted++;
+
+ /*
+ * Update flow control tracking. This is just a sanity check.
+ * Since num_commands == qsize - 1, there can never be too
+ * many commands in flight.
+ */
+ new_sqtail = (qp->sqtail + 1) % (qp->num_commands + 1);
+ KASSERT(new_sqtail != qp->sqhd, ("%s: qp %p is full", __func__, qp));
+ qp->sqtail = new_sqtail;
+ mtx_unlock(&qp->lock);
+
nc = cmd->req->nc;
sqe = nvmf_capsule_sqe(nc);
@@ -177,11 +196,23 @@ nvmf_receive_capsule(void *arg, struct nvmf_capsule *nc)
return;
}
+ /* Update flow control tracking. */
+ mtx_lock(&qp->lock);
+ if (qp->sq_flow_control) {
+ if (nvmf_sqhd_valid(nc))
+ qp->sqhd = le16toh(cqe->sqhd);
+ } else {
+ /*
+ * If SQ FC is disabled, just advance the head for
+ * each response capsule received.
+ */
+ qp->sqhd = (qp->sqhd + 1) % (qp->num_commands + 1);
+ }
+
/*
* If the queue has been shutdown due to an error, silently
* drop the response.
*/
- mtx_lock(&qp->lock);
if (qp->qp == NULL) {
device_printf(sc->dev,
"received completion for CID %u on shutdown %s\n", cid,
@@ -212,7 +243,6 @@ nvmf_receive_capsule(void *arg, struct nvmf_capsule *nc)
} else {
cmd->req = STAILQ_FIRST(&qp->pending_requests);
STAILQ_REMOVE_HEAD(&qp->pending_requests, link);
- mtx_unlock(&qp->lock);
nvmf_dispatch_command(qp, cmd);
}
@@ -221,28 +251,61 @@ nvmf_receive_capsule(void *arg, struct nvmf_capsule *nc)
nvmf_free_request(req);
}
+static void
+nvmf_sysctls_qp(struct nvmf_softc *sc, struct nvmf_host_qpair *qp,
+ bool admin, u_int qid)
+{
+ struct sysctl_ctx_list *ctx = &qp->sysctl_ctx;
+ struct sysctl_oid *oid;
+ struct sysctl_oid_list *list;
+ char name[8];
+
+ if (admin) {
+ oid = SYSCTL_ADD_NODE(ctx,
+ SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
+ "adminq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Admin Queue");
+ } else {
+ snprintf(name, sizeof(name), "%u", qid);
+ oid = SYSCTL_ADD_NODE(ctx, sc->ioq_oid_list, OID_AUTO, name,
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "I/O Queue");
+ }
+ list = SYSCTL_CHILDREN(oid);
+
+ SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "num_entries", CTLFLAG_RD,
+ NULL, qp->num_commands + 1, "Number of entries in queue");
+ SYSCTL_ADD_U16(ctx, list, OID_AUTO, "sq_head", CTLFLAG_RD, &qp->sqhd,
+ 0, "Current head of submission queue (as observed by driver)");
+ SYSCTL_ADD_U16(ctx, list, OID_AUTO, "sq_tail", CTLFLAG_RD, &qp->sqtail,
+ 0, "Current tail of submission queue (as observed by driver)");
+ SYSCTL_ADD_U64(ctx, list, OID_AUTO, "num_cmds", CTLFLAG_RD,
+ &qp->submitted, 0, "Number of commands submitted");
+}
+
struct nvmf_host_qpair *
nvmf_init_qp(struct nvmf_softc *sc, enum nvmf_trtype trtype,
- struct nvmf_handoff_qpair_params *handoff, const char *name)
+ const nvlist_t *nvl, const char *name, u_int qid)
{
struct nvmf_host_command *cmd, *ncmd;
struct nvmf_host_qpair *qp;
u_int i;
+ bool admin;
+ admin = nvlist_get_bool(nvl, "admin");
qp = malloc(sizeof(*qp), M_NVMF, M_WAITOK | M_ZERO);
qp->sc = sc;
- qp->sq_flow_control = handoff->sq_flow_control;
- qp->sqhd = handoff->sqhd;
- qp->sqtail = handoff->sqtail;
+ qp->sq_flow_control = nvlist_get_bool(nvl, "sq_flow_control");
+ qp->sqhd = nvlist_get_number(nvl, "sqhd");
+ qp->sqtail = nvlist_get_number(nvl, "sqtail");
strlcpy(qp->name, name, sizeof(qp->name));
mtx_init(&qp->lock, "nvmf qp", NULL, MTX_DEF);
+ (void)sysctl_ctx_init(&qp->sysctl_ctx);
/*
* Allocate a spare command slot for each pending AER command
* on the admin queue.
*/
- qp->num_commands = handoff->qsize - 1;
- if (handoff->admin)
+ qp->num_commands = nvlist_get_number(nvl, "qsize") - 1;
+ if (admin)
qp->num_commands += sc->num_aer;
qp->active_commands = malloc(sizeof(*qp->active_commands) *
@@ -255,9 +318,10 @@ nvmf_init_qp(struct nvmf_softc *sc, enum nvmf_trtype trtype,
}
STAILQ_INIT(&qp->pending_requests);
- qp->qp = nvmf_allocate_qpair(trtype, false, handoff, nvmf_qp_error,
- qp, nvmf_receive_capsule, qp);
+ qp->qp = nvmf_allocate_qpair(trtype, false, nvl, nvmf_qp_error, qp,
+ nvmf_receive_capsule, qp);
if (qp->qp == NULL) {
+ (void)sysctl_ctx_free(&qp->sysctl_ctx);
TAILQ_FOREACH_SAFE(cmd, &qp->free_commands, link, ncmd) {
TAILQ_REMOVE(&qp->free_commands, cmd, link);
free(cmd, M_NVMF);
@@ -268,6 +332,8 @@ nvmf_init_qp(struct nvmf_softc *sc, enum nvmf_trtype trtype,
return (NULL);
}
+ nvmf_sysctls_qp(sc, qp, admin, qid);
+
return (qp);
}
@@ -339,6 +405,7 @@ nvmf_destroy_qp(struct nvmf_host_qpair *qp)
struct nvmf_host_command *cmd, *ncmd;
nvmf_shutdown_qp(qp);
+ (void)sysctl_ctx_free(&qp->sysctl_ctx);
TAILQ_FOREACH_SAFE(cmd, &qp->free_commands, link, ncmd) {
TAILQ_REMOVE(&qp->free_commands, cmd, link);
@@ -381,6 +448,5 @@ nvmf_submit_request(struct nvmf_request *req)
("%s: CID already busy", __func__));
qp->active_commands[cmd->cid] = cmd;
cmd->req = req;
- mtx_unlock(&qp->lock);
nvmf_dispatch_command(qp, cmd);
}
diff --git a/sys/dev/nvmf/host/nvmf_sim.c b/sys/dev/nvmf/host/nvmf_sim.c
index b097b04d64c3..de9e958d8afd 100644
--- a/sys/dev/nvmf/host/nvmf_sim.c
+++ b/sys/dev/nvmf/host/nvmf_sim.c
@@ -40,7 +40,13 @@ nvmf_ccb_done(union ccb *ccb)
return;
if (nvmf_cqe_aborted(&ccb->nvmeio.cpl)) {
- ccb->ccb_h.status = CAM_REQUEUE_REQ;
+ struct cam_sim *sim = xpt_path_sim(ccb->ccb_h.path);
+ struct nvmf_softc *sc = cam_sim_softc(sim);
+
+ if (nvmf_fail_disconnect || sc->sim_shutdown)
+ ccb->ccb_h.status = CAM_DEV_NOT_THERE;
+ else
+ ccb->ccb_h.status = CAM_REQUEUE_REQ;
xpt_done(ccb);
} else if (ccb->nvmeio.cpl.status != 0) {
ccb->ccb_h.status = CAM_NVME_STATUS_ERROR;
@@ -52,7 +58,7 @@ nvmf_ccb_done(union ccb *ccb)
xpt_done(ccb);
} else {
ccb->ccb_h.status = CAM_REQ_CMP;
- xpt_done_direct(ccb);
+ xpt_done(ccb);
}
}
@@ -106,7 +112,10 @@ nvmf_sim_io(struct nvmf_softc *sc, union ccb *ccb)
mtx_lock(&sc->sim_mtx);
if (sc->sim_disconnected) {
mtx_unlock(&sc->sim_mtx);
- nvmeio->ccb_h.status = CAM_REQUEUE_REQ;
+ if (nvmf_fail_disconnect || sc->sim_shutdown)
+ nvmeio->ccb_h.status = CAM_DEV_NOT_THERE;
+ else
+ nvmeio->ccb_h.status = CAM_REQUEUE_REQ;
xpt_done(ccb);
return;
}
@@ -116,8 +125,8 @@ nvmf_sim_io(struct nvmf_softc *sc, union ccb *ccb)
qp = sc->admin;
req = nvmf_allocate_request(qp, &nvmeio->cmd, nvmf_ccb_complete,
ccb, M_NOWAIT);
+ mtx_unlock(&sc->sim_mtx);
if (req == NULL) {
- mtx_unlock(&sc->sim_mtx);
nvmeio->ccb_h.status = CAM_RESRC_UNAVAIL;
xpt_done(ccb);
return;
@@ -141,7 +150,6 @@ nvmf_sim_io(struct nvmf_softc *sc, union ccb *ccb)
("%s: incoming CCB is not in-progress", __func__));
ccb->ccb_h.status |= CAM_SIM_QUEUED;
nvmf_submit_request(req);
- mtx_unlock(&sc->sim_mtx);
}
static void
@@ -183,7 +191,7 @@ nvmf_sim_action(struct cam_sim *sim, union ccb *ccb)
cpi->xport_specific.nvmf.nsid =
xpt_path_lun_id(ccb->ccb_h.path);
cpi->xport_specific.nvmf.trtype = sc->trtype;
- strncpy(cpi->xport_specific.nvmf.dev_name,
+ strlcpy(cpi->xport_specific.nvmf.dev_name,
device_get_nameunit(sc->dev),
sizeof(cpi->xport_specific.nvmf.dev_name));
cpi->maxio = sc->max_xfer_size;
@@ -320,6 +328,15 @@ nvmf_reconnect_sim(struct nvmf_softc *sc)
}
void
+nvmf_shutdown_sim(struct nvmf_softc *sc)
+{
+ mtx_lock(&sc->sim_mtx);
+ sc->sim_shutdown = true;
+ mtx_unlock(&sc->sim_mtx);
+ xpt_release_simq(sc->sim, 1);
+}
+
+void
nvmf_destroy_sim(struct nvmf_softc *sc)
{
xpt_async(AC_LOST_DEVICE, sc->path, NULL);
diff --git a/sys/dev/nvmf/host/nvmf_var.h b/sys/dev/nvmf/host/nvmf_var.h
index 64525851631e..606245b3969c 100644
--- a/sys/dev/nvmf/host/nvmf_var.h
+++ b/sys/dev/nvmf/host/nvmf_var.h
@@ -9,10 +9,13 @@
#define __NVMF_VAR_H__
#include <sys/_callout.h>
+#include <sys/_eventhandler.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
+//#include <sys/_nv.h>
#include <sys/_sx.h>
#include <sys/_task.h>
+#include <sys/smp.h>
#include <sys/queue.h>
#include <dev/nvme/nvme.h>
#include <dev/nvmf/nvmf_transport.h>
@@ -21,15 +24,10 @@ struct nvmf_aer;
struct nvmf_capsule;
struct nvmf_host_qpair;
struct nvmf_namespace;
+struct sysctl_oid_list;
typedef void nvmf_request_complete_t(void *, const struct nvme_completion *);
-struct nvmf_ivars {
- struct nvmf_handoff_host *hh;
- struct nvmf_handoff_qpair_params *io_params;
- struct nvme_controller_data *cdata;
-};
-
struct nvmf_softc {
device_t dev;
@@ -42,6 +40,7 @@ struct nvmf_softc {
struct cam_path *path;
struct mtx sim_mtx;
bool sim_disconnected;
+ bool sim_shutdown;
struct nvmf_namespace **ns;
@@ -76,12 +75,27 @@ struct nvmf_softc {
struct callout ka_rx_timer;
sbintime_t ka_rx_sbt;
+ struct timeout_task request_reconnect_task;
+ struct timeout_task controller_loss_task;
+ uint32_t reconnect_delay;
+ uint32_t controller_loss_timeout;
+
struct sx connection_lock;
struct task disconnect_task;
bool detaching;
+ bool controller_timedout;
u_int num_aer;
struct nvmf_aer *aer;
+
+ struct sysctl_oid_list *ioq_oid_list;
+
+ nvlist_t *rparams;
+
+ struct timespec last_disconnect;
+
+ eventhandler_tag shutdown_pre_sync_eh;
+ eventhandler_tag shutdown_post_sync_eh;
};
struct nvmf_request {
@@ -104,8 +118,8 @@ struct nvmf_completion_status {
static __inline struct nvmf_host_qpair *
nvmf_select_io_queue(struct nvmf_softc *sc)
{
- /* TODO: Support multiple queues? */
- return (sc->io[0]);
+ u_int idx = curcpu * sc->num_io_queues / (mp_maxid + 1);
+ return (sc->io[idx]);
}
static __inline bool
@@ -140,14 +154,17 @@ extern driver_t nvme_nvmf_driver;
MALLOC_DECLARE(M_NVMF);
#endif
+/* If true, I/O requests will fail while the host is disconnected. */
+extern bool nvmf_fail_disconnect;
+
/* nvmf.c */
void nvmf_complete(void *arg, const struct nvme_completion *cqe);
void nvmf_io_complete(void *arg, size_t xfered, int error);
void nvmf_wait_for_reply(struct nvmf_completion_status *status);
-int nvmf_init_ivars(struct nvmf_ivars *ivars, struct nvmf_handoff_host *hh);
-void nvmf_free_ivars(struct nvmf_ivars *ivars);
+int nvmf_copyin_handoff(const struct nvmf_ioc_nv *nv, nvlist_t **nvlp);
void nvmf_disconnect(struct nvmf_softc *sc);
void nvmf_rescan_ns(struct nvmf_softc *sc, uint32_t nsid);
+void nvmf_rescan_all_ns(struct nvmf_softc *sc);
int nvmf_passthrough_cmd(struct nvmf_softc *sc, struct nvme_pt_command *pt,
bool admin);
@@ -180,17 +197,17 @@ void nvmf_ctl_unload(void);
/* nvmf_ns.c */
struct nvmf_namespace *nvmf_init_ns(struct nvmf_softc *sc, uint32_t id,
- struct nvme_namespace_data *data);
+ const struct nvme_namespace_data *data);
void nvmf_disconnect_ns(struct nvmf_namespace *ns);
void nvmf_reconnect_ns(struct nvmf_namespace *ns);
+void nvmf_shutdown_ns(struct nvmf_namespace *ns);
void nvmf_destroy_ns(struct nvmf_namespace *ns);
bool nvmf_update_ns(struct nvmf_namespace *ns,
- struct nvme_namespace_data *data);
+ const struct nvme_namespace_data *data);
/* nvmf_qpair.c */
struct nvmf_host_qpair *nvmf_init_qp(struct nvmf_softc *sc,
- enum nvmf_trtype trtype, struct nvmf_handoff_qpair_params *handoff,
- const char *name);
+ enum nvmf_trtype trtype, const nvlist_t *nvl, const char *name, u_int qid);
void nvmf_shutdown_qp(struct nvmf_host_qpair *qp);
void nvmf_destroy_qp(struct nvmf_host_qpair *qp);
struct nvmf_request *nvmf_allocate_request(struct nvmf_host_qpair *qp,
@@ -202,6 +219,7 @@ void nvmf_free_request(struct nvmf_request *req);
int nvmf_init_sim(struct nvmf_softc *sc);
void nvmf_disconnect_sim(struct nvmf_softc *sc);
void nvmf_reconnect_sim(struct nvmf_softc *sc);
+void nvmf_shutdown_sim(struct nvmf_softc *sc);
void nvmf_destroy_sim(struct nvmf_softc *sc);
void nvmf_sim_rescan_ns(struct nvmf_softc *sc, uint32_t id);
diff --git a/sys/dev/nvmf/nvmf.h b/sys/dev/nvmf/nvmf.h
index 1f1ecd437c7e..9b2b4c1dea40 100644
--- a/sys/dev/nvmf/nvmf.h
+++ b/sys/dev/nvmf/nvmf.h
@@ -26,54 +26,107 @@
#define NVMF_NN (1024)
-struct nvmf_handoff_qpair_params {
- bool admin;
- bool sq_flow_control;
- u_int qsize;
- uint16_t sqhd;
- uint16_t sqtail; /* host only */
- union {
- struct {
- int fd;
- uint8_t rxpda;
- uint8_t txpda;
- bool header_digests;
- bool data_digests;
- uint32_t maxr2t;
- uint32_t maxh2cdata;
- uint32_t max_icd;
- } tcp;
- };
-};
+/*
+ * Default timeouts for Fabrics hosts. These match values used by
+ * Linux.
+ */
+#define NVMF_DEFAULT_RECONNECT_DELAY 10
+#define NVMF_DEFAULT_CONTROLLER_LOSS 600
-struct nvmf_handoff_host {
- u_int trtype;
- u_int num_io_queues;
- u_int kato;
- struct nvmf_handoff_qpair_params admin;
- struct nvmf_handoff_qpair_params *io;
- const struct nvme_controller_data *cdata;
+/*
+ * (data, size) is the userspace buffer for a packed nvlist.
+ *
+ * For requests that copyout an nvlist, len is the amount of data
+ * copied out to *data. If size is zero, no data is copied and len is
+ * set to the required buffer size.
+ */
+struct nvmf_ioc_nv {
+ void *data;
+ size_t len;
+ size_t size;
};
-struct nvmf_reconnect_params {
- uint16_t cntlid;
- char subnqn[256];
-};
+/*
+ * The fields in a qpair handoff nvlist are:
+ *
+ * Transport independent:
+ *
+ * bool admin
+ * bool sq_flow_control
+ * number qsize
+ * number sqhd
+ * number sqtail host only
+ *
+ * TCP transport:
+ *
+ * number fd
+ * number rxpda
+ * number txpda
+ * bool header_digests
+ * bool data_digests
+ * number maxr2t
+ * number maxh2cdata
+ * number max_icd
+ */
-struct nvmf_handoff_controller_qpair {
- u_int trtype;
- struct nvmf_handoff_qpair_params params;
- const struct nvmf_fabric_connect_cmd *cmd;
- const struct nvmf_fabric_connect_data *data;
-};
+/*
+ * The fields in the nvlist for NVMF_HANDOFF_HOST and
+ * NVMF_RECONNECT_HOST are:
+ *
+ * number trtype
+ * number kato (optional)
+ * number reconnect_delay (optional)
+ * number controller_loss_timeout (optional)
+ * qpair handoff nvlist admin
+ * qpair handoff nvlist array io
+ * binary cdata struct nvme_controller_data
+ * NVMF_RECONNECT_PARAMS nvlist rparams
+ */
+
+/*
+ * The fields in the nvlist for NVMF_RECONNECT_PARAMS are:
+ *
+ * binary dle struct nvme_discovery_log_entry
+ * string hostnqn
+ * number num_io_queues
+ * number kato (optional)
+ * number reconnect_delay (optional)
+ * number controller_loss_timeout (optional)
+ * number io_qsize
+ * bool sq_flow_control
+ *
+ * TCP transport:
+ *
+ * bool header_digests
+ * bool data_digests
+ */
+
+/*
+ * The fields in the nvlist for NVMF_CONNECTION_STATUS are:
+ *
+ * bool connected
+ * timespec nvlist last_disconnect
+ * number tv_sec
+ * number tv_nsec
+ */
+
+/*
+ * The fields in the nvlist for handing off a controller qpair are:
+ *
+ * number trtype
+ * qpair handoff nvlist params
+ * binary cmd struct nvmf_fabric_connect_cmd
+ * binary data struct nvmf_fabric_connect_data
+ */
/* Operations on /dev/nvmf */
-#define NVMF_HANDOFF_HOST _IOW('n', 200, struct nvmf_handoff_host)
+#define NVMF_HANDOFF_HOST _IOW('n', 200, struct nvmf_ioc_nv)
#define NVMF_DISCONNECT_HOST _IOW('n', 201, const char *)
#define NVMF_DISCONNECT_ALL _IO('n', 202)
/* Operations on /dev/nvmeX */
-#define NVMF_RECONNECT_PARAMS _IOR('n', 203, struct nvmf_reconnect_params)
-#define NVMF_RECONNECT_HOST _IOW('n', 204, struct nvmf_handoff_host)
+#define NVMF_RECONNECT_PARAMS _IOWR('n', 203, struct nvmf_ioc_nv)
+#define NVMF_RECONNECT_HOST _IOW('n', 204, struct nvmf_ioc_nv)
+#define NVMF_CONNECTION_STATUS _IOWR('n', 205, struct nvmf_ioc_nv)
#endif /* !__NVMF_H__ */
diff --git a/sys/dev/nvmf/nvmf_proto.h b/sys/dev/nvmf/nvmf_proto.h
index b0be236f77fa..f67c34acbf95 100644
--- a/sys/dev/nvmf/nvmf_proto.h
+++ b/sys/dev/nvmf/nvmf_proto.h
@@ -22,8 +22,6 @@
* NVMe over Fabrics specification definitions
*/
-#pragma pack(push, 1)
-
#define NVME_NQN_FIELD_SIZE 256
struct nvmf_capsule_cmd {
@@ -174,7 +172,7 @@ struct nvmf_fabric_cmd {
uint16_t cid;
uint8_t fctype;
uint8_t reserved2[59];
-};
+} __aligned(8);
struct nvmf_fabric_auth_recv_cmd {
uint8_t opcode;
@@ -764,6 +762,4 @@ _Static_assert(offsetof(struct nvme_tcp_r2t_hdr, ttag) == 10, "Incorrect offset"
_Static_assert(offsetof(struct nvme_tcp_r2t_hdr, r2to) == 12, "Incorrect offset");
_Static_assert(offsetof(struct nvme_tcp_r2t_hdr, r2tl) == 16, "Incorrect offset");
-#pragma pack(pop)
-
#endif /* __NVMF_PROTO_H__ */
diff --git a/sys/dev/nvmf/nvmf_tcp.c b/sys/dev/nvmf/nvmf_tcp.c
index 57c81eceee02..e50d7ff48d2b 100644
--- a/sys/dev/nvmf/nvmf_tcp.c
+++ b/sys/dev/nvmf/nvmf_tcp.c
@@ -18,6 +18,7 @@
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/mutex.h>
+#include <sys/nv.h>
#include <sys/protosw.h>
#include <sys/refcount.h>
#include <sys/socket.h>
@@ -138,7 +139,7 @@ static void tcp_free_qpair(struct nvmf_qpair *nq);
SYSCTL_NODE(_kern_nvmf, OID_AUTO, tcp, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"TCP transport");
static u_int tcp_max_transmit_data = 256 * 1024;
-SYSCTL_UINT(_kern_nvmf_tcp, OID_AUTO, max_c2hdata, CTLFLAG_RWTUN,
+SYSCTL_UINT(_kern_nvmf_tcp, OID_AUTO, max_transmit_data, CTLFLAG_RWTUN,
&tcp_max_transmit_data, 0,
"Maximum size of data payload in a transmitted PDU");
@@ -442,7 +443,7 @@ nvmf_tcp_construct_pdu(struct nvmf_tcp_qpair *qp, void *hdr, size_t hlen,
plen += sizeof(digest);
if (data_len != 0) {
KASSERT(m_length(data, NULL) == data_len, ("length mismatch"));
- pdo = roundup2(plen, qp->txpda);
+ pdo = roundup(plen, qp->txpda);
pad = pdo - plen;
plen = pdo + data_len;
if (qp->data_digests)
@@ -623,10 +624,7 @@ mbuf_copyto_io(struct mbuf *m, u_int skip, u_int len,
while (len != 0) {
MPASS((m->m_flags & M_EXTPG) == 0);
- todo = m->m_len - skip;
- if (todo > len)
- todo = len;
-
+ todo = min(m->m_len - skip, len);
memdesc_copyback(&io->io_mem, io_offset, todo, mtodo(m, skip));
skip = 0;
io_offset += todo;
@@ -887,7 +885,7 @@ nvmf_tcp_mext_pg(void *arg, int how)
struct nvmf_tcp_command_buffer *cb = arg;
struct mbuf *m;
- m = mb_alloc_ext_pgs(how, nvmf_tcp_free_mext_pg);
+ m = mb_alloc_ext_pgs(how, nvmf_tcp_free_mext_pg, M_RDONLY);
m->m_ext.ext_arg1 = cb;
tcp_hold_command_buffer(cb);
return (m);
@@ -972,7 +970,7 @@ nvmf_tcp_handle_r2t(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu)
}
/*
- * XXX: The spec does not specify how to handle R2T tranfers
+ * XXX: The spec does not specify how to handle R2T transfers
* out of range of the original command.
*/
data_len = le32toh(r2t->r2tl);
@@ -1000,9 +998,7 @@ nvmf_tcp_handle_r2t(struct nvmf_tcp_qpair *qp, struct nvmf_tcp_rxpdu *pdu)
struct mbuf *m;
uint32_t sent, todo;
- todo = data_len;
- if (todo > qp->max_tx_data)
- todo = qp->max_tx_data;
+ todo = min(data_len, qp->max_tx_data);
m = nvmf_tcp_command_buffer_mbuf(cb, data_offset, todo, &sent,
todo < data_len);
tcp_send_h2c_pdu(qp, r2t->cccid, r2t->ttag, data_offset, m,
@@ -1418,8 +1414,7 @@ nvmf_soupcall_send(struct socket *so, void *arg, int waitflag)
}
static struct nvmf_qpair *
-tcp_allocate_qpair(bool controller,
- const struct nvmf_handoff_qpair_params *params)
+tcp_allocate_qpair(bool controller, const nvlist_t *nvl)
{
struct nvmf_tcp_qpair *qp;
struct socket *so;
@@ -1427,8 +1422,18 @@ tcp_allocate_qpair(bool controller,
cap_rights_t rights;
int error;
- error = fget(curthread, params->tcp.fd, cap_rights_init_one(&rights,
- CAP_SOCK_CLIENT), &fp);
+ if (!nvlist_exists_number(nvl, "fd") ||
+ !nvlist_exists_number(nvl, "rxpda") ||
+ !nvlist_exists_number(nvl, "txpda") ||
+ !nvlist_exists_bool(nvl, "header_digests") ||
+ !nvlist_exists_bool(nvl, "data_digests") ||
+ !nvlist_exists_number(nvl, "maxr2t") ||
+ !nvlist_exists_number(nvl, "maxh2cdata") ||
+ !nvlist_exists_number(nvl, "max_icd"))
+ return (NULL);
+
+ error = fget(curthread, nvlist_get_number(nvl, "fd"),
+ cap_rights_init_one(&rights, CAP_SOCK_CLIENT), &fp);
if (error != 0)
return (NULL);
if (fp->f_type != DTYPE_SOCKET) {
@@ -1450,26 +1455,28 @@ tcp_allocate_qpair(bool controller,
qp = malloc(sizeof(*qp), M_NVMF_TCP, M_WAITOK | M_ZERO);
qp->so = so;
refcount_init(&qp->refs, 1);
- qp->txpda = params->tcp.txpda;
- qp->rxpda = params->tcp.rxpda;
- qp->header_digests = params->tcp.header_digests;
- qp->data_digests = params->tcp.data_digests;
- qp->maxr2t = params->tcp.maxr2t;
- qp->maxh2cdata = params->tcp.maxh2cdata;
+ qp->txpda = nvlist_get_number(nvl, "txpda");
+ qp->rxpda = nvlist_get_number(nvl, "rxpda");
+ qp->header_digests = nvlist_get_bool(nvl, "header_digests");
+ qp->data_digests = nvlist_get_bool(nvl, "data_digests");
+ qp->maxr2t = nvlist_get_number(nvl, "maxr2t");
+ if (controller)
+ qp->maxh2cdata = nvlist_get_number(nvl, "maxh2cdata");
qp->max_tx_data = tcp_max_transmit_data;
if (!controller) {
- if (qp->max_tx_data > params->tcp.maxh2cdata)
- qp->max_tx_data = params->tcp.maxh2cdata;
+ qp->max_tx_data = min(qp->max_tx_data,
+ nvlist_get_number(nvl, "maxh2cdata"));
+ qp->max_icd = nvlist_get_number(nvl, "max_icd");
}
- qp->max_icd = params->tcp.max_icd;
if (controller) {
/* Use the SUCCESS flag if SQ flow control is disabled. */
- qp->send_success = !params->sq_flow_control;
+ qp->send_success = !nvlist_get_bool(nvl, "sq_flow_control");
/* NB: maxr2t is 0's based. */
qp->num_ttags = MIN((u_int)UINT16_MAX + 1,
- (uint64_t)params->qsize * (uint64_t)qp->maxr2t + 1);
+ nvlist_get_number(nvl, "qsize") *
+ ((uint64_t)qp->maxr2t + 1));
qp->open_ttags = mallocarray(qp->num_ttags,
sizeof(*qp->open_ttags), M_NVMF_TCP, M_WAITOK | M_ZERO);
}
@@ -1558,6 +1565,7 @@ tcp_free_qpair(struct nvmf_qpair *nq)
for (u_int i = 0; i < qp->num_ttags; i++) {
cb = qp->open_ttags[i];
if (cb != NULL) {
+ cb->tc->active_r2ts--;
cb->error = ECONNABORTED;
tcp_release_command_buffer(cb);
}
@@ -1569,6 +1577,10 @@ tcp_free_qpair(struct nvmf_qpair *nq)
TAILQ_FOREACH_SAFE(cb, &qp->rx_buffers.head, link, ncb) {
tcp_remove_command_buffer(&qp->rx_buffers, cb);
mtx_unlock(&qp->rx_buffers.lock);
+#ifdef INVARIANTS
+ if (cb->tc != NULL)
+ cb->tc->pending_r2ts--;
+#endif
cb->error = ECONNABORTED;
tcp_release_command_buffer(cb);
mtx_lock(&qp->rx_buffers.lock);
@@ -1784,7 +1796,6 @@ tcp_send_controller_data(struct nvmf_capsule *nc, uint32_t data_offset,
{
struct nvmf_tcp_qpair *qp = TQP(nc->nc_qpair);
struct nvme_sgl_descriptor *sgl;
- struct mbuf *n, *p;
uint32_t data_len;
bool last_pdu, last_xfer;
@@ -1813,21 +1824,29 @@ tcp_send_controller_data(struct nvmf_capsule *nc, uint32_t data_offset,
/* Queue one more C2H_DATA PDUs containing the data from 'm'. */
while (m != NULL) {
+ struct mbuf *n;
uint32_t todo;
- todo = m->m_len;
- p = m;
- n = p->m_next;
- while (n != NULL) {
- if (todo + n->m_len > qp->max_tx_data) {
- p->m_next = NULL;
- break;
- }
- todo += n->m_len;
- p = n;
+ if (m->m_len > qp->max_tx_data) {
+ n = m_split(m, qp->max_tx_data, M_WAITOK);
+ todo = m->m_len;
+ } else {
+ struct mbuf *p;
+
+ todo = m->m_len;
+ p = m;
n = p->m_next;
+ while (n != NULL) {
+ if (todo + n->m_len > qp->max_tx_data) {
+ p->m_next = NULL;
+ break;
+ }
+ todo += n->m_len;
+ p = n;
+ n = p->m_next;
+ }
+ MPASS(m_length(m, NULL) == todo);
}
- MPASS(m_length(m, NULL) == todo);
last_pdu = (n == NULL && last_xfer);
tcp_send_c2h_pdu(qp, nc->nc_sqe.cid, data_offset, m, todo,
diff --git a/sys/dev/nvmf/nvmf_tcp.h b/sys/dev/nvmf/nvmf_tcp.h
index 00b0917f75a4..03b5d2445928 100644
--- a/sys/dev/nvmf/nvmf_tcp.h
+++ b/sys/dev/nvmf/nvmf_tcp.h
@@ -9,7 +9,6 @@
#define __NVMF_TCP_H__
#ifndef _KERNEL
-#define __assert_unreachable __unreachable
#define MPASS assert
#endif
@@ -41,6 +40,13 @@ nvmf_tcp_validate_pdu_header(const struct nvme_tcp_common_pdu_hdr *ch,
uint8_t digest_flags, valid_flags;
plen = le32toh(ch->plen);
+ full_hlen = ch->hlen;
+ if ((ch->flags & NVME_TCP_CH_FLAGS_HDGSTF) != 0)
+ full_hlen += sizeof(uint32_t);
+ if (plen == full_hlen)
+ data_len = 0;
+ else
+ data_len = plen - ch->pdo;
/*
* Errors must be reported for the lowest incorrect field
@@ -50,7 +56,7 @@ nvmf_tcp_validate_pdu_header(const struct nvme_tcp_common_pdu_hdr *ch,
/* Validate pdu_type. */
/* Controllers only receive PDUs with a PDU direction of 0. */
- if (controller != (ch->pdu_type & 0x01) == 0) {
+ if (controller != ((ch->pdu_type & 0x01) == 0)) {
printf("NVMe/TCP: Invalid PDU type %u\n", ch->pdu_type);
*fes = NVME_TCP_TERM_REQ_FES_INVALID_HEADER_FIELD;
*fei = offsetof(struct nvme_tcp_common_pdu_hdr, pdu_type);
@@ -125,11 +131,15 @@ nvmf_tcp_validate_pdu_header(const struct nvme_tcp_common_pdu_hdr *ch,
return (EBADMSG);
}
- /* Verify that digests are present iff enabled. */
+ /*
+ * Verify that digests are present iff enabled. Note that the
+ * data digest will not be present if there is no data
+ * payload.
+ */
digest_flags = 0;
if (header_digests)
digest_flags |= NVME_TCP_CH_FLAGS_HDGSTF;
- if (data_digests)
+ if (data_digests && data_len != 0)
digest_flags |= NVME_TCP_CH_FLAGS_DDGSTF;
if ((digest_flags & valid_flags) !=
(ch->flags & (NVME_TCP_CH_FLAGS_HDGSTF |
@@ -184,9 +194,6 @@ nvmf_tcp_validate_pdu_header(const struct nvme_tcp_common_pdu_hdr *ch,
}
/* Validate pdo. */
- full_hlen = ch->hlen;
- if ((ch->flags & NVME_TCP_CH_FLAGS_HDGSTF) != 0)
- full_hlen += sizeof(uint32_t);
switch (ch->pdu_type) {
default:
__assert_unreachable();
@@ -207,7 +214,7 @@ nvmf_tcp_validate_pdu_header(const struct nvme_tcp_common_pdu_hdr *ch,
case NVME_TCP_PDU_TYPE_H2C_DATA:
case NVME_TCP_PDU_TYPE_C2H_DATA:
/* Permit PDO of 0 if there is no data. */
- if (full_hlen == plen && ch->pdo == 0)
+ if (data_len == 0 && ch->pdo == 0)
break;
if (ch->pdo < full_hlen || ch->pdo > plen ||
@@ -229,10 +236,6 @@ nvmf_tcp_validate_pdu_header(const struct nvme_tcp_common_pdu_hdr *ch,
return (EBADMSG);
}
- if (plen == full_hlen)
- data_len = 0;
- else
- data_len = plen - ch->pdo;
switch (ch->pdu_type) {
default:
__assert_unreachable();
diff --git a/sys/dev/nvmf/nvmf_transport.c b/sys/dev/nvmf/nvmf_transport.c
index 14d526192270..1d3f5ea4cf69 100644
--- a/sys/dev/nvmf/nvmf_transport.c
+++ b/sys/dev/nvmf/nvmf_transport.c
@@ -12,6 +12,7 @@
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
+#include <sys/nv.h>
#include <sys/refcount.h>
#include <sys/sysctl.h>
#include <sys/sx.h>
@@ -47,8 +48,7 @@ nvmf_supported_trtype(enum nvmf_trtype trtype)
struct nvmf_qpair *
nvmf_allocate_qpair(enum nvmf_trtype trtype, bool controller,
- const struct nvmf_handoff_qpair_params *params,
- nvmf_qpair_error_t *error_cb, void *error_cb_arg,
+ const nvlist_t *params, nvmf_qpair_error_t *error_cb, void *error_cb_arg,
nvmf_capsule_receive_t *receive_cb, void *receive_cb_arg)
{
struct nvmf_transport *nt;
@@ -76,7 +76,7 @@ nvmf_allocate_qpair(enum nvmf_trtype trtype, bool controller,
qp->nq_error_arg = error_cb_arg;
qp->nq_receive = receive_cb;
qp->nq_receive_arg = receive_cb_arg;
- qp->nq_admin = params->admin;
+ qp->nq_admin = nvlist_get_bool(params, "admin");
return (qp);
}
@@ -180,6 +180,14 @@ nvmf_capsule_cqe(struct nvmf_capsule *nc)
return (&nc->nc_cqe);
}
+bool
+nvmf_sqhd_valid(struct nvmf_capsule *nc)
+{
+ KASSERT(nc->nc_qe_len == sizeof(struct nvme_completion),
+ ("%s: capsule %p is not a response capsule", __func__, nc));
+ return (nc->nc_sqhd_valid);
+}
+
uint8_t
nvmf_validate_command_capsule(struct nvmf_capsule *nc)
{
@@ -223,6 +231,92 @@ nvmf_send_controller_data(struct nvmf_capsule *nc, uint32_t data_offset,
}
int
+nvmf_pack_ioc_nvlist(const nvlist_t *nvl, struct nvmf_ioc_nv *nv)
+{
+ void *packed;
+ int error;
+
+ error = nvlist_error(nvl);
+ if (error != 0)
+ return (error);
+
+ if (nv->size == 0) {
+ nv->len = nvlist_size(nvl);
+ } else {
+ packed = nvlist_pack(nvl, &nv->len);
+ if (packed == NULL)
+ error = ENOMEM;
+ else if (nv->len > nv->size)
+ error = EFBIG;
+ else
+ error = copyout(packed, nv->data, nv->len);
+ free(packed, M_NVLIST);
+ }
+ return (error);
+}
+
+int
+nvmf_unpack_ioc_nvlist(const struct nvmf_ioc_nv *nv, nvlist_t **nvlp)
+{
+ void *packed;
+ nvlist_t *nvl;
+ int error;
+
+ packed = malloc(nv->size, M_NVMF_TRANSPORT, M_WAITOK);
+ error = copyin(nv->data, packed, nv->size);
+ if (error != 0) {
+ free(packed, M_NVMF_TRANSPORT);
+ return (error);
+ }
+
+ nvl = nvlist_unpack(packed, nv->size, 0);
+ free(packed, M_NVMF_TRANSPORT);
+ if (nvl == NULL)
+ return (EINVAL);
+
+ *nvlp = nvl;
+ return (0);
+}
+
+bool
+nvmf_validate_qpair_nvlist(const nvlist_t *nvl, bool controller)
+{
+ uint64_t value, qsize;
+ bool admin, valid;
+
+ valid = true;
+ valid &= nvlist_exists_bool(nvl, "admin");
+ valid &= nvlist_exists_bool(nvl, "sq_flow_control");
+ valid &= nvlist_exists_number(nvl, "qsize");
+ valid &= nvlist_exists_number(nvl, "sqhd");
+ if (!controller)
+ valid &= nvlist_exists_number(nvl, "sqtail");
+ if (!valid)
+ return (false);
+
+ admin = nvlist_get_bool(nvl, "admin");
+ qsize = nvlist_get_number(nvl, "qsize");
+ if (admin) {
+ if (qsize < NVME_MIN_ADMIN_ENTRIES ||
+ qsize > NVME_MAX_ADMIN_ENTRIES)
+ return (false);
+ } else {
+ if (qsize < NVME_MIN_IO_ENTRIES || qsize > NVME_MAX_IO_ENTRIES)
+ return (false);
+ }
+ value = nvlist_get_number(nvl, "sqhd");
+ if (value > qsize - 1)
+ return (false);
+ if (!controller) {
+ value = nvlist_get_number(nvl, "sqtail");
+ if (value > qsize - 1)
+ return (false);
+ }
+
+ return (true);
+}
+
+int
nvmf_transport_module_handler(struct module *mod, int what, void *arg)
{
struct nvmf_transport_ops *ops = arg;
@@ -292,8 +386,6 @@ nvmf_transport_module_handler(struct module *mod, int what, void *arg)
prev = nt;
}
if (nt == NULL) {
- KASSERT(nt->nt_active_qpairs == 0,
- ("unregistered transport has connections"));
sx_xunlock(&nvmf_transports_lock);
return (0);
}
diff --git a/sys/dev/nvmf/nvmf_transport.h b/sys/dev/nvmf/nvmf_transport.h
index 549170b25940..b192baeaccc1 100644
--- a/sys/dev/nvmf/nvmf_transport.h
+++ b/sys/dev/nvmf/nvmf_transport.h
@@ -13,6 +13,7 @@
* (target) to send and receive capsules and associated data.
*/
+#include <sys/_nv.h>
#include <sys/sysctl.h>
#include <dev/nvmf/nvmf_proto.h>
@@ -20,8 +21,8 @@ struct mbuf;
struct memdesc;
struct nvmf_capsule;
struct nvmf_connection;
+struct nvmf_ioc_nv;
struct nvmf_qpair;
-struct nvmf_handoff_qpair_params;
SYSCTL_DECL(_kern_nvmf);
@@ -54,7 +55,7 @@ typedef void nvmf_io_complete_t(void *, size_t, int);
* independent.
*/
struct nvmf_qpair *nvmf_allocate_qpair(enum nvmf_trtype trtype,
- bool controller, const struct nvmf_handoff_qpair_params *params,
+ bool controller, const nvlist_t *params,
nvmf_qpair_error_t *error_cb, void *error_cb_arg,
nvmf_capsule_receive_t *receive_cb, void *receive_cb_arg);
void nvmf_free_qpair(struct nvmf_qpair *qp);
@@ -78,6 +79,7 @@ int nvmf_transmit_capsule(struct nvmf_capsule *nc);
void nvmf_abort_capsule_data(struct nvmf_capsule *nc, int error);
void *nvmf_capsule_sqe(struct nvmf_capsule *nc);
void *nvmf_capsule_cqe(struct nvmf_capsule *nc);
+bool nvmf_sqhd_valid(struct nvmf_capsule *nc);
/* Controller-specific APIs. */
@@ -137,4 +139,23 @@ u_int nvmf_send_controller_data(struct nvmf_capsule *nc,
#define NVMF_SUCCESS_SENT 0x100
#define NVMF_MORE 0x101
+/* Helper APIs for nvlists used in icotls. */
+
+/*
+ * Pack the nvlist nvl and copyout to the buffer described by nv.
+ */
+int nvmf_pack_ioc_nvlist(const nvlist_t *nvl, struct nvmf_ioc_nv *nv);
+
+/*
+ * Copyin and unpack an nvlist described by nv. The unpacked nvlist
+ * is returned in *nvlp on success.
+ */
+int nvmf_unpack_ioc_nvlist(const struct nvmf_ioc_nv *nv, nvlist_t **nvlp);
+
+/*
+ * Returns true if a qpair handoff nvlist has all the required
+ * transport-independent values.
+ */
+bool nvmf_validate_qpair_nvlist(const nvlist_t *nvl, bool controller);
+
#endif /* !__NVMF_TRANSPORT_H__ */
diff --git a/sys/dev/nvmf/nvmf_transport_internal.h b/sys/dev/nvmf/nvmf_transport_internal.h
index 0be427ee0690..eb819a5c83b9 100644
--- a/sys/dev/nvmf/nvmf_transport_internal.h
+++ b/sys/dev/nvmf/nvmf_transport_internal.h
@@ -8,6 +8,7 @@
#ifndef __NVMF_TRANSPORT_INTERNAL_H__
#define __NVMF_TRANSPORT_INTERNAL_H__
+#include <sys/_nv.h>
#include <sys/memdesc.h>
/*
@@ -21,7 +22,7 @@ struct nvmf_io_request;
struct nvmf_transport_ops {
/* Queue pair management. */
struct nvmf_qpair *(*allocate_qpair)(bool controller,
- const struct nvmf_handoff_qpair_params *params);
+ const nvlist_t *nvl);
void (*free_qpair)(struct nvmf_qpair *qp);
/* Capsule operations. */
diff --git a/sys/dev/oce/oce_if.c b/sys/dev/oce/oce_if.c
index d7ec58ec1f5e..1906933fa75a 100644
--- a/sys/dev/oce/oce_if.c
+++ b/sys/dev/oce/oce_if.c
@@ -170,7 +170,7 @@ static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, s
static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
/* Helper function prototypes in this file */
-static int oce_attach_ifp(POCE_SOFTC sc);
+static void oce_attach_ifp(POCE_SOFTC sc);
static void oce_add_vlan(void *arg, if_t ifp, uint16_t vtag);
static void oce_del_vlan(void *arg, if_t ifp, uint16_t vtag);
static int oce_vid_config(POCE_SOFTC sc);
@@ -252,7 +252,6 @@ oce_probe(device_t dev)
uint16_t vendor = 0;
uint16_t device = 0;
int i = 0;
- char str[256] = {0};
POCE_SOFTC sc;
sc = device_get_softc(dev);
@@ -265,9 +264,9 @@ oce_probe(device_t dev)
for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) {
if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) {
if (device == (supportedDevices[i] & 0xffff)) {
- sprintf(str, "%s:%s", "Emulex CNA NIC function",
- component_revision);
- device_set_desc_copy(dev, str);
+ device_set_descf(dev,
+ "%s:%s", "Emulex CNA NIC function",
+ component_revision);
switch (device) {
case PCI_PRODUCT_BE2:
@@ -335,9 +334,7 @@ oce_attach(device_t dev)
if (rc)
goto intr_free;
- rc = oce_attach_ifp(sc);
- if (rc)
- goto queues_free;
+ oce_attach_ifp(sc);
#if defined(INET6) || defined(INET)
rc = oce_init_lro(sc);
@@ -373,8 +370,6 @@ oce_attach(device_t dev)
}
softc_tail = sc;
- gone_in_dev(dev, 15, "relatively uncommon 10GbE NIC");
-
return 0;
stats_free:
@@ -393,7 +388,6 @@ ifp_free:
#endif
ether_ifdetach(sc->ifp);
if_free(sc->ifp);
-queues_free:
oce_queue_release_all(sc);
intr_free:
oce_intr_free(sc);
@@ -1498,7 +1492,7 @@ oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_
/* correct tcp header */
tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num);
if(cqe2->push) {
- tcp_hdr->th_flags |= TH_PUSH;
+ tcp_set_flags(tcp_hdr, tcp_get_flags(tcp_hdr) | TH_PUSH);
}
tcp_hdr->th_win = htons(cqe2->tcp_window);
tcp_hdr->th_sum = 0xffff;
@@ -2100,13 +2094,11 @@ oce_rq_handler(void *arg)
* Helper function prototypes in this file *
*****************************************************************************/
-static int
+static void
oce_attach_ifp(POCE_SOFTC sc)
{
sc->ifp = if_alloc(IFT_ETHER);
- if (!sc->ifp)
- return ENOMEM;
ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status);
ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
@@ -2149,8 +2141,6 @@ oce_attach_ifp(POCE_SOFTC sc)
if_sethwtsomaxsegsize(sc->ifp, 4096);
ether_ifattach(sc->ifp, sc->macaddr.mac_addr);
-
- return 0;
}
static void
diff --git a/sys/dev/ocs_fc/ocs_cam.c b/sys/dev/ocs_fc/ocs_cam.c
index a944c7628d85..772e7ca1c383 100644
--- a/sys/dev/ocs_fc/ocs_cam.c
+++ b/sys/dev/ocs_fc/ocs_cam.c
@@ -700,8 +700,8 @@ int32_t ocs_scsi_recv_tmf(ocs_io_t *tmfio, uint64_t lun, ocs_scsi_tmf_cmd_e cmd,
trsrc = &fcp->targ_rsrc_wildcard;
}
- device_printf(tmfio->ocs->dev, "%s: io=%p cmd=%#x LU=%lx en=%s\n",
- __func__, tmfio, cmd, (unsigned long)lun,
+ device_printf(tmfio->ocs->dev, "%s: io=%u(index) cmd=%#x LU=%lx en=%s\n",
+ __func__, tmfio->instance_index, cmd, (unsigned long)lun,
trsrc ? (trsrc->enabled ? "T" : "F") : "X");
if (trsrc) {
inot = (struct ccb_immediate_notify *)STAILQ_FIRST(&trsrc->inot);
diff --git a/sys/dev/ocs_fc/ocs_hw.c b/sys/dev/ocs_fc/ocs_hw.c
index cfb9d4f8f536..62ef3cd49be3 100644
--- a/sys/dev/ocs_fc/ocs_hw.c
+++ b/sys/dev/ocs_fc/ocs_hw.c
@@ -11214,7 +11214,7 @@ target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
getmicrouptime(&cur_time);
timevalsub(&cur_time, &io->submit_time);
if (cur_time.tv_sec > io->wqe_timeout) {
- ocs_log_info(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d elasped time:%u\n",
+ ocs_log_info(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d elapsed time:%u\n",
io->indicator, io->reqtag, io->type, cur_time.tv_sec);
/* remove from active_wqe list so won't try to abort again */
@@ -12192,7 +12192,7 @@ ocs_hw_set_persistent_topology(ocs_hw_t *hw, uint32_t topology, uint32_t opts)
param.persistent_topo = SLI4_INIT_LINK_F_P2P_FAIL_OVER;
param.topo_failover = 1;
} else {
- param.persistent_topo = SLI4_INIT_LINK_F_P2P_ONLY;;
+ param.persistent_topo = SLI4_INIT_LINK_F_P2P_ONLY;
param.topo_failover = 0;
}
break;
diff --git a/sys/dev/ocs_fc/ocs_mgmt.c b/sys/dev/ocs_fc/ocs_mgmt.c
index 726b499f28ba..5b7f6557c017 100644
--- a/sys/dev/ocs_fc/ocs_mgmt.c
+++ b/sys/dev/ocs_fc/ocs_mgmt.c
@@ -226,7 +226,7 @@ ocs_mgmt_get_list(ocs_t *ocs, ocs_textbuf_t *textbuf)
ocs_mgmt_start_unnumbered_section(textbuf, "ocs");
- for (i=0;i<ARRAY_SIZE(mgmt_table);i++) {
+ for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) {
access = 0;
if (mgmt_table[i].get_handler) {
access |= MGMT_MODE_RD;
@@ -305,7 +305,7 @@ ocs_mgmt_get(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
char *unqualified_name = name + strlen(qualifier) + 1;
- for (i=0;i<ARRAY_SIZE(mgmt_table);i++) {
+ for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) {
if (ocs_strcmp(unqualified_name, mgmt_table[i].name) == 0) {
if (mgmt_table[i].get_handler) {
mgmt_table[i].get_handler(ocs, name, textbuf);
@@ -387,7 +387,7 @@ ocs_mgmt_set(ocs_t *ocs, char *name, char *value)
char *unqualified_name = name + strlen(qualifier) +1;
/* See if it's a value I can set */
- for (i=0;i<ARRAY_SIZE(mgmt_table);i++) {
+ for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) {
if (ocs_strcmp(unqualified_name, mgmt_table[i].name) == 0) {
if (mgmt_table[i].set_handler) {
return mgmt_table[i].set_handler(ocs, name, value);
@@ -469,7 +469,7 @@ ocs_mgmt_exec(ocs_t *ocs, char *action, void *arg_in,
char *unqualified_name = action + strlen(qualifier) +1;
/* See if it's an action I can perform */
- for (i=0;i<ARRAY_SIZE(mgmt_table); i++) {
+ for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) {
if (ocs_strcmp(unqualified_name, mgmt_table[i].name) == 0) {
if (mgmt_table[i].action_handler) {
return mgmt_table[i].action_handler(ocs, action, arg_in, arg_in_length,
@@ -527,7 +527,7 @@ ocs_mgmt_get_all(ocs_t *ocs, ocs_textbuf_t *textbuf)
ocs_mgmt_start_unnumbered_section(textbuf, "ocs");
- for (i=0;i<ARRAY_SIZE(mgmt_table);i++) {
+ for (i = 0; i < ARRAY_SIZE(mgmt_table); i++) {
if (mgmt_table[i].get_handler) {
mgmt_table[i].get_handler(ocs, mgmt_table[i].name, textbuf);
} else if (mgmt_table[i].action_handler) {
@@ -1212,7 +1212,7 @@ get_sfp_a2(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
int buffer_remaining = (SFP_PAGE_SIZE * 3) + 1;
int bytes_added;
- for (i=0; i < bytes_read; i++) {
+ for (i = 0; i < bytes_read; i++) {
bytes_added = ocs_snprintf(d, buffer_remaining, "%02x ", *s);
++s;
d += bytes_added;
@@ -2040,7 +2040,7 @@ get_profile_list(ocs_t *ocs, char *name, ocs_textbuf_t *textbuf)
result_buf = ocs_malloc(ocs, BUFFER_SIZE, OCS_M_ZERO);
bytes_left = BUFFER_SIZE;
- for (i=0; i<result.list->num_descriptors; i++) {
+ for (i = 0; i < result.list->num_descriptors; i++) {
sprintf(result_line, "0x%02x:%s\n", result.list->descriptors[i].profile_id,
result.list->descriptors[i].profile_description);
if (strlen(result_line) < bytes_left) {
diff --git a/sys/dev/ocs_fc/ocs_os.h b/sys/dev/ocs_fc/ocs_os.h
index 9e3beff35548..331d5b07aecd 100644
--- a/sys/dev/ocs_fc/ocs_os.h
+++ b/sys/dev/ocs_fc/ocs_os.h
@@ -56,10 +56,10 @@
#include <sys/taskqueue.h>
#include <sys/bitstring.h>
#include <sys/stack.h>
+#include <sys/stdarg.h>
#include <machine/atomic.h>
#include <machine/bus.h>
-#include <machine/stdarg.h>
#include <dev/pci/pcivar.h>
diff --git a/sys/dev/ofw/ofw_bus_subr.c b/sys/dev/ofw/ofw_bus_subr.c
index d63e89e2d677..b99d784929bc 100644
--- a/sys/dev/ofw/ofw_bus_subr.c
+++ b/sys/dev/ofw/ofw_bus_subr.c
@@ -210,7 +210,7 @@ ofw_bus_node_status_okay(phandle_t node)
OF_getprop(node, "status", status, OFW_STATUS_LEN);
if ((len == 5 && (bcmp(status, "okay", len) == 0)) ||
- (len == 3 && (bcmp(status, "ok", len))))
+ (len == 3 && (bcmp(status, "ok", len) == 0)))
return (1);
return (0);
@@ -634,27 +634,106 @@ ofw_bus_find_iparent(phandle_t node)
return (iparent);
}
+static phandle_t
+ofw_bus_search_iparent(phandle_t node)
+{
+ phandle_t iparent;
+
+ do {
+ if (OF_getencprop(node, "interrupt-parent", &iparent,
+ sizeof(iparent)) > 0) {
+ node = OF_node_from_xref(iparent);
+ } else {
+ node = OF_parent(node);
+ }
+ if (node == 0)
+ return (0);
+ } while (!OF_hasprop(node, "#interrupt-cells"));
+
+ return (OF_xref_from_node(node));
+}
+
+static int
+ofw_bus_traverse_imap(phandle_t inode, phandle_t node, uint32_t *intr,
+ int intrsz, pcell_t *res, int ressz, phandle_t *iparentp)
+{
+ struct ofw_bus_iinfo ii;
+ void *reg;
+ uint32_t *intrp;
+ phandle_t iparent;
+ int rv = 0;
+
+ /* We already have an interrupt controller */
+ if (OF_hasprop(node, "interrupt-controller"))
+ return (0);
+
+ intrp = malloc(intrsz, M_OFWPROP, M_WAITOK);
+ memcpy(intrp, intr, intrsz);
+
+ while (true) {
+ /* There is no interrupt-map to follow */
+ if (!OF_hasprop(inode, "interrupt-map")) {
+ free(intrp, M_OFWPROP);
+ return (0);
+ }
+
+ memset(&ii, 0, sizeof(ii));
+ ofw_bus_setup_iinfo(inode, &ii, sizeof(cell_t));
+
+ reg = NULL;
+ if (ii.opi_addrc > 0)
+ reg = malloc(ii.opi_addrc, M_OFWPROP, M_WAITOK);
+
+ rv = ofw_bus_lookup_imap(node, &ii, reg, ii.opi_addrc, intrp,
+ intrsz, res, ressz, &iparent);
+
+ free(reg, M_OFWPROP);
+ free(ii.opi_imap, M_OFWPROP);
+ free(ii.opi_imapmsk, M_OFWPROP);
+ free(intrp, M_OFWPROP);
+
+ if (rv == 0)
+ return (0);
+
+ node = inode;
+ inode = OF_node_from_xref(iparent);
+
+ /* Stop when we have an interrupt controller */
+ if (OF_hasprop(inode, "interrupt-controller")) {
+ *iparentp = iparent;
+ return (rv);
+ }
+
+ intrsz = rv * sizeof(pcell_t);
+ intrp = malloc(intrsz, M_OFWPROP, M_WAITOK);
+ memcpy(intrp, res, intrsz);
+ }
+}
+
int
ofw_bus_intr_to_rl(device_t dev, phandle_t node,
struct resource_list *rl, int *rlen)
{
- phandle_t iparent;
+ phandle_t iparent, iparent_node;
+ uint32_t result[16];
+ uint32_t intrpcells, *intrp;
uint32_t icells, *intr;
int err, i, irqnum, nintr, rid;
- boolean_t extended;
+ bool extended;
nintr = OF_getencprop_alloc_multi(node, "interrupts", sizeof(*intr),
(void **)&intr);
if (nintr > 0) {
- iparent = ofw_bus_find_iparent(node);
+ iparent = ofw_bus_search_iparent(node);
if (iparent == 0) {
device_printf(dev, "No interrupt-parent found, "
"assuming direct parent\n");
iparent = OF_parent(node);
iparent = OF_xref_from_node(iparent);
}
- if (OF_searchencprop(OF_node_from_xref(iparent),
- "#interrupt-cells", &icells, sizeof(icells)) == -1) {
+ iparent_node = OF_node_from_xref(iparent);
+ if (OF_searchencprop(iparent_node, "#interrupt-cells", &icells,
+ sizeof(icells)) == -1) {
device_printf(dev, "Missing #interrupt-cells "
"property, assuming <1>\n");
icells = 1;
@@ -677,7 +756,8 @@ ofw_bus_intr_to_rl(device_t dev, phandle_t node,
for (i = 0; i < nintr; i += icells) {
if (extended) {
iparent = intr[i++];
- if (OF_searchencprop(OF_node_from_xref(iparent),
+ iparent_node = OF_node_from_xref(iparent);
+ if (OF_searchencprop(iparent_node,
"#interrupt-cells", &icells, sizeof(icells)) == -1) {
device_printf(dev, "Missing #interrupt-cells "
"property\n");
@@ -691,7 +771,16 @@ ofw_bus_intr_to_rl(device_t dev, phandle_t node,
break;
}
}
- irqnum = ofw_bus_map_intr(dev, iparent, icells, &intr[i]);
+
+ intrp = &intr[i];
+ intrpcells = ofw_bus_traverse_imap(iparent_node, node, intrp,
+ icells * sizeof(intr[0]), result, sizeof(result), &iparent);
+ if (intrpcells > 0)
+ intrp = result;
+ else
+ intrpcells = icells;
+
+ irqnum = ofw_bus_map_intr(dev, iparent, intrpcells, intrp);
resource_list_add(rl, SYS_RES_IRQ, rid++, irqnum, irqnum, 1);
}
if (rlen != NULL)
@@ -707,7 +796,7 @@ ofw_bus_intr_by_rid(device_t dev, phandle_t node, int wanted_rid,
phandle_t iparent;
uint32_t icells, *intr;
int err, i, nintr, rid;
- boolean_t extended;
+ bool extended;
nintr = OF_getencprop_alloc_multi(node, "interrupts", sizeof(*intr),
(void **)&intr);
diff --git a/sys/dev/ofw/ofw_cpu.c b/sys/dev/ofw/ofw_cpu.c
index e18004ae19d2..888af0440746 100644
--- a/sys/dev/ofw/ofw_cpu.c
+++ b/sys/dev/ofw/ofw_cpu.c
@@ -42,8 +42,9 @@
#include <dev/ofw/ofw_bus_subr.h>
#include <dev/ofw/ofw_cpu.h>
-#if defined(__arm__) || defined(__arm64__) || defined(__riscv__)
+#if defined(__arm__) || defined(__arm64__) || defined(__riscv)
#include <dev/clk/clk.h>
+#define HAS_CLK
#endif
static int ofw_cpulist_probe(device_t);
@@ -123,7 +124,7 @@ ofw_cpulist_attach(device_t dev)
free(dinfo, M_OFWCPU);
continue;
}
- cdev = device_add_child(dev, NULL, -1);
+ cdev = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (cdev == NULL) {
device_printf(dev, "<%s>: device_add_child failed\n",
dinfo->obd_name);
@@ -134,7 +135,8 @@ ofw_cpulist_attach(device_t dev)
device_set_ivars(cdev, dinfo);
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static const struct ofw_bus_devinfo *
@@ -151,7 +153,7 @@ static int ofw_cpu_read_ivar(device_t dev, device_t child, int index,
struct ofw_cpu_softc {
struct pcpu *sc_cpu_pcpu;
uint32_t sc_nominal_mhz;
- boolean_t sc_reg_valid;
+ bool sc_reg_valid;
pcell_t sc_reg[2];
};
@@ -180,6 +182,24 @@ static driver_t ofw_cpu_driver = {
DRIVER_MODULE(ofw_cpu, cpulist, ofw_cpu_driver, 0, 0);
+static bool
+ofw_cpu_is_runnable(phandle_t node)
+{
+ /*
+ * Per the DeviceTree Specification, a cpu node (under /cpus) that
+ * has 'status = disabled' indicates that "the CPU is in a quiescent
+ * state."
+ *
+ * A quiescent CPU that specifies an "enable-method", such as
+ * "spin-table", can still be used by the kernel.
+ *
+ * Lacking this, any CPU marked "disabled" or other non-okay status
+ * should be excluded from the kernel's view.
+ */
+ return (ofw_bus_node_status_okay(node) ||
+ OF_hasprop(node, "enable-method"));
+}
+
static int
ofw_cpu_probe(device_t dev)
{
@@ -188,6 +208,9 @@ ofw_cpu_probe(device_t dev)
if (type == NULL || strcmp(type, "cpu") != 0)
return (ENXIO);
+ if (!ofw_cpu_is_runnable(ofw_bus_get_node(dev)))
+ return (ENXIO);
+
device_set_desc(dev, "Open Firmware CPU");
if (!bootverbose && device_get_unit(dev) != 0) {
device_quiet(dev);
@@ -198,6 +221,30 @@ ofw_cpu_probe(device_t dev)
}
static int
+get_freq_from_clk(device_t dev, struct ofw_cpu_softc *sc)
+{
+#ifdef HAS_CLK
+ clk_t cpuclk;
+ uint64_t freq;
+ int rv;
+
+ rv = clk_get_by_ofw_index(dev, 0, 0, &cpuclk);
+ if (rv == 0) {
+ rv = clk_get_freq(cpuclk, &freq);
+ if (rv != 0 && bootverbose)
+ device_printf(dev,
+ "Cannot get freq of property clocks\n");
+ else
+ sc->sc_nominal_mhz = freq / 1000000;
+ }
+
+ return (rv);
+#else
+ return (ENODEV);
+#endif
+}
+
+static int
ofw_cpu_attach(device_t dev)
{
struct ofw_cpulist_softc *psc;
@@ -205,10 +252,6 @@ ofw_cpu_attach(device_t dev)
phandle_t node;
pcell_t cell;
int rv;
-#if defined(__arm__) || defined(__arm64__) || defined(__riscv__)
- clk_t cpuclk;
- uint64_t freq;
-#endif
sc = device_get_softc(dev);
psc = device_get_softc(device_get_parent(dev));
@@ -275,18 +318,7 @@ ofw_cpu_attach(device_t dev)
sc->sc_cpu_pcpu = pcpu_find(device_get_unit(dev));
if (OF_getencprop(node, "clock-frequency", &cell, sizeof(cell)) < 0) {
-#if defined(__arm__) || defined(__arm64__) || defined(__riscv__)
- rv = clk_get_by_ofw_index(dev, 0, 0, &cpuclk);
- if (rv == 0) {
- rv = clk_get_freq(cpuclk, &freq);
- if (rv != 0 && bootverbose)
- device_printf(dev,
- "Cannot get freq of property clocks\n");
- else
- sc->sc_nominal_mhz = freq / 1000000;
- } else
-#endif
- {
+ if (get_freq_from_clk(dev, sc) != 0) {
if (bootverbose)
device_printf(dev,
"missing 'clock-frequency' property\n");
@@ -297,8 +329,10 @@ ofw_cpu_attach(device_t dev)
if (sc->sc_nominal_mhz != 0 && bootverbose)
device_printf(dev, "Nominal frequency %dMhz\n",
sc->sc_nominal_mhz);
- bus_generic_probe(dev);
- return (bus_generic_attach(dev));
+
+ bus_identify_children(dev);
+ bus_attach_children(dev);
+ return (0);
}
static int
@@ -335,11 +369,10 @@ ofw_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
}
int
-ofw_cpu_early_foreach(ofw_cpu_foreach_cb callback, boolean_t only_runnable)
+ofw_cpu_early_foreach(ofw_cpu_foreach_cb callback, bool only_runnable)
{
phandle_t node, child;
pcell_t addr_cells, reg[2];
- char status[16];
char device_type[16];
u_int id, next_id;
int count, rv;
@@ -376,14 +409,8 @@ ofw_cpu_early_foreach(ofw_cpu_foreach_cb callback, boolean_t only_runnable)
* those that have been enabled, or do provide a method
* to enable them.
*/
- if (only_runnable) {
- status[0] = '\0';
- OF_getprop(child, "status", status, sizeof(status));
- if (status[0] != '\0' && strcmp(status, "okay") != 0 &&
- strcmp(status, "ok") != 0 &&
- !OF_hasprop(child, "enable-method"))
- continue;
- }
+ if (only_runnable && !ofw_cpu_is_runnable(child))
+ continue;
/*
* Check we have a register to identify the cpu
diff --git a/sys/dev/ofw/ofw_cpu.h b/sys/dev/ofw/ofw_cpu.h
index cb30dfb6e262..9f4e9e65aa61 100644
--- a/sys/dev/ofw/ofw_cpu.h
+++ b/sys/dev/ofw/ofw_cpu.h
@@ -30,6 +30,6 @@
#define _DEV_OFW_OFW_CPU_H_
typedef bool (*ofw_cpu_foreach_cb)(u_int, phandle_t, u_int, pcell_t *);
-int ofw_cpu_early_foreach(ofw_cpu_foreach_cb, boolean_t);
+int ofw_cpu_early_foreach(ofw_cpu_foreach_cb, bool);
#endif /* _DEV_OFW_OFW_CPU_H_ */
diff --git a/sys/dev/ofw/ofw_fdt.c b/sys/dev/ofw/ofw_fdt.c
index 4b0451824df5..fd9a9f80af39 100644
--- a/sys/dev/ofw/ofw_fdt.c
+++ b/sys/dev/ofw/ofw_fdt.c
@@ -32,12 +32,11 @@
#include <sys/ctype.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
+#include <sys/stdarg.h>
#include <sys/systm.h>
#include <contrib/libfdt/libfdt.h>
-#include <machine/stdarg.h>
-
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/ofwvar.h>
#include <dev/ofw/openfirm.h>
diff --git a/sys/dev/ofw/ofw_firmware.c b/sys/dev/ofw/ofw_firmware.c
index 8723965f74be..360f7ee56e8a 100644
--- a/sys/dev/ofw/ofw_firmware.c
+++ b/sys/dev/ofw/ofw_firmware.c
@@ -150,7 +150,8 @@ ofw_firmware_attach(device_t dev)
device_probe_and_attach(cdev);
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static device_method_t ofw_firmware_methods[] = {
diff --git a/sys/dev/ofw/ofw_pcib.c b/sys/dev/ofw/ofw_pcib.c
index ebc09fccd93e..0cfddd155e52 100644
--- a/sys/dev/ofw/ofw_pcib.c
+++ b/sys/dev/ofw/ofw_pcib.c
@@ -302,8 +302,9 @@ ofw_pcib_attach(device_t dev)
return (error);
}
- device_add_child(dev, "pci", -1);
- return (bus_generic_attach(dev));
+ device_add_child(dev, "pci", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
+ return (0);
}
static int
@@ -421,17 +422,13 @@ static struct resource *
ofw_pcib_alloc_resource(device_t bus, device_t child, int type, int *rid,
rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct ofw_pci_softc *sc;
sc = device_get_softc(bus);
-#endif
switch (type) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_domain_alloc_bus(sc->sc_pci_domain, child, rid,
start, end, count, flags));
-#endif
case SYS_RES_MEMORY:
case SYS_RES_IOPORT:
return (bus_generic_rman_alloc_resource(bus, child, type, rid,
@@ -445,16 +442,12 @@ ofw_pcib_alloc_resource(device_t bus, device_t child, int type, int *rid,
static int
ofw_pcib_release_resource(device_t bus, device_t child, struct resource *res)
{
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct ofw_pci_softc *sc;
sc = device_get_softc(bus);
-#endif
switch (rman_get_type(res)) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_domain_release_bus(sc->sc_pci_domain, child, res));
-#endif
case SYS_RES_MEMORY:
case SYS_RES_IOPORT:
return (bus_generic_rman_release_resource(bus, child, res));
@@ -505,16 +498,12 @@ ofw_pcib_translate_resource(device_t bus, int type, rman_res_t start,
static int
ofw_pcib_activate_resource(device_t bus, device_t child, struct resource *res)
{
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct ofw_pci_softc *sc;
sc = device_get_softc(bus);
-#endif
switch (rman_get_type(res)) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_domain_activate_bus(sc->sc_pci_domain, child, res));
-#endif
case SYS_RES_MEMORY:
case SYS_RES_IOPORT:
return (bus_generic_rman_activate_resource(bus, child, res));
@@ -621,17 +610,13 @@ ofw_pcib_bus_get_bus_tag(device_t bus, device_t child)
static int
ofw_pcib_deactivate_resource(device_t bus, device_t child, struct resource *res)
{
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct ofw_pci_softc *sc;
sc = device_get_softc(bus);
-#endif
switch (rman_get_type(res)) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_domain_deactivate_bus(sc->sc_pci_domain, child,
res));
-#endif
case SYS_RES_MEMORY:
case SYS_RES_IOPORT:
return (bus_generic_rman_deactivate_resource(bus, child, res));
@@ -644,17 +629,13 @@ static int
ofw_pcib_adjust_resource(device_t bus, device_t child,
struct resource *res, rman_res_t start, rman_res_t end)
{
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct ofw_pci_softc *sc;
sc = device_get_softc(bus);
-#endif
switch (rman_get_type(res)) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_domain_adjust_bus(sc->sc_pci_domain, child, res,
start, end));
-#endif
case SYS_RES_MEMORY:
case SYS_RES_IOPORT:
return (bus_generic_rman_adjust_resource(bus, child, res,
diff --git a/sys/dev/ofw/ofw_standard.c b/sys/dev/ofw/ofw_standard.c
index 47a2cabf44a6..8df768dffbcd 100644
--- a/sys/dev/ofw/ofw_standard.c
+++ b/sys/dev/ofw/ofw_standard.c
@@ -60,10 +60,9 @@
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
+#include <sys/stdarg.h>
#include <sys/systm.h>
-#include <machine/stdarg.h>
-
#include <dev/ofw/ofwvar.h>
#include <dev/ofw/openfirm.h>
diff --git a/sys/dev/ofw/ofwbus.c b/sys/dev/ofw/ofwbus.c
index 51e6072ad4ba..d66befcb7314 100644
--- a/sys/dev/ofw/ofwbus.c
+++ b/sys/dev/ofw/ofwbus.c
@@ -117,7 +117,7 @@ ofwbus_attach(device_t dev)
/*
* Allow devices to identify.
*/
- bus_generic_probe(dev);
+ bus_identify_children(dev);
/*
* Now walk the OFW tree and attach top-level devices.
@@ -125,7 +125,8 @@ ofwbus_attach(device_t dev)
for (node = OF_child(node); node > 0; node = OF_peer(node))
simplebus_add_device(dev, node, 0, NULL, -1, NULL);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static struct resource *
diff --git a/sys/dev/ofw/openfirm.c b/sys/dev/ofw/openfirm.c
index 881a2cccb072..b5f58b86a9c3 100644
--- a/sys/dev/ofw/openfirm.c
+++ b/sys/dev/ofw/openfirm.c
@@ -68,8 +68,7 @@
#include <sys/queue.h>
#include <sys/systm.h>
#include <sys/endian.h>
-
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include <dev/ofw/ofwvar.h>
#include <dev/ofw/openfirm.h>
@@ -96,7 +95,7 @@ struct xrefinfo {
static SLIST_HEAD(, xrefinfo) xreflist = SLIST_HEAD_INITIALIZER(xreflist);
static struct mtx xreflist_lock;
-static boolean_t xref_init_done;
+static bool xref_init_done;
#define FIND_BY_XREF 0
#define FIND_BY_NODE 1
@@ -187,13 +186,22 @@ xrefinfo_add(phandle_t node, phandle_t xref, device_t dev)
return (xi);
}
+static void
+xrefinfo_remove(struct xrefinfo *xi)
+{
+
+ mtx_lock(&xreflist_lock);
+ SLIST_REMOVE(&xreflist, xi, xrefinfo, next_entry);
+ mtx_unlock(&xreflist_lock);
+}
+
/*
* OFW install routines. Highest priority wins, equal priority also
* overrides allowing last-set to win.
*/
SET_DECLARE(ofw_set, ofw_def_t);
-boolean_t
+bool
OF_install(char *name, int prio)
{
ofw_def_t *ofwp, **ofwpp;
@@ -202,7 +210,7 @@ OF_install(char *name, int prio)
/* Allow OF layer to be uninstalled */
if (name == NULL) {
ofw_def_impl = NULL;
- return (FALSE);
+ return (false);
}
/*
@@ -216,11 +224,11 @@ OF_install(char *name, int prio)
prio >= curr_prio) {
curr_prio = prio;
ofw_def_impl = ofwp;
- return (TRUE);
+ return (true);
}
}
- return (FALSE);
+ return (false);
}
/* Initializer */
@@ -704,6 +712,16 @@ OF_device_register_xref(phandle_t xref, device_t dev)
panic("Attempt to register device before xreflist_init");
}
+void
+OF_device_unregister_xref(phandle_t xref, device_t dev)
+{
+ struct xrefinfo *xi;
+
+ if ((xi = xrefinfo_find(xref, FIND_BY_XREF)) == NULL)
+ return;
+ xrefinfo_remove(xi);
+}
+
/* Call the method in the scope of a given instance. */
int
OF_call_method(const char *method, ihandle_t instance, int nargs, int nreturns,
diff --git a/sys/dev/ofw/openfirm.h b/sys/dev/ofw/openfirm.h
index 149f2a951745..4e2b035827cb 100644
--- a/sys/dev/ofw/openfirm.h
+++ b/sys/dev/ofw/openfirm.h
@@ -83,8 +83,8 @@ MALLOC_DECLARE(M_OFWPROP);
* interface as the Open Firmware access mechanism, OF_init initializes it.
*/
-boolean_t OF_install(char *name, int prio);
-int OF_init(void *cookie);
+bool OF_install(char *name, int prio);
+int OF_init(void *cookie);
/*
* Known Open Firmware interface names
@@ -149,6 +149,7 @@ phandle_t OF_xref_from_node(phandle_t node);
device_t OF_device_from_xref(phandle_t xref);
phandle_t OF_xref_from_device(device_t dev);
int OF_device_register_xref(phandle_t xref, device_t dev);
+void OF_device_unregister_xref(phandle_t xref, device_t dev);
/* Device I/O functions */
ihandle_t OF_open(const char *path);
diff --git a/sys/dev/otus/if_otus.c b/sys/dev/otus/if_otus.c
index dbb913d83ae8..f6c4a0118b68 100644
--- a/sys/dev/otus/if_otus.c
+++ b/sys/dev/otus/if_otus.c
@@ -728,6 +728,12 @@ otus_attachhook(struct otus_softc *sc)
IEEE80211_C_SWAMSDUTX | /* Do software A-MSDU TX */
IEEE80211_C_WPA; /* WPA/RSN. */
+ /*
+ * Although A-MPDU RX is fine, A-MPDU TX apparently has some
+ * hardware bugs. Looking at Linux carl9170, it has a work-around
+ * that forces all frames into the AC_BE queue regardless of
+ * the actual QoS queue.
+ */
ic->ic_htcaps =
IEEE80211_HTC_HT |
#if 0
@@ -737,6 +743,8 @@ otus_attachhook(struct otus_softc *sc)
IEEE80211_HTCAP_MAXAMSDU_3839 |
IEEE80211_HTCAP_SMPS_OFF;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
otus_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1686,8 +1694,7 @@ otus_sub_rxeof(struct otus_softc *sc, uint8_t *buf, int len, struct mbufq *rxq)
* with invalid frame control values here. Just toss them
* rather than letting net80211 get angry and log.
*/
- if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) !=
- IEEE80211_FC0_VERSION_0) {
+ if (!IEEE80211_IS_FC0_CHECK_VER(wh, IEEE80211_FC0_VERSION_0)) {
OTUS_DPRINTF(sc, OTUS_DEBUG_RXDONE,
"%s: invalid 802.11 fc version (firmware bug?)\n",
__func__);
@@ -2233,6 +2240,9 @@ otus_tx(struct otus_softc *sc, struct ieee80211_node *ni, struct mbuf *m,
int hasqos, xferlen, type, ismcast;
wh = mtod(m, struct ieee80211_frame *);
+
+ ieee80211_output_seqno_assign(ni, -1, m);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
@@ -2282,7 +2292,8 @@ otus_tx(struct otus_softc *sc, struct ieee80211_node *ni, struct mbuf *m,
rate = otus_rate_to_hw_rate(sc, tp->ucastrate);
else {
(void) ieee80211_ratectl_rate(ni, NULL, 0);
- rate = otus_rate_to_hw_rate(sc, ni->ni_txrate);
+ rate = otus_rate_to_hw_rate(sc,
+ ieee80211_node_get_txrate_dot11rate(ni));
}
phyctl = 0;
@@ -2347,9 +2358,11 @@ otus_tx(struct otus_softc *sc, struct ieee80211_node *ni, struct mbuf *m,
data->m = m;
OTUS_DPRINTF(sc, OTUS_DEBUG_XMIT,
- "%s: tx: m=%p; data=%p; len=%d mac=0x%04x phy=0x%08x rate=0x%02x, ni_txrate=%d\n",
+ "%s: tx: m=%p; data=%p; len=%d mac=0x%04x phy=0x%08x "
+ "rate=0x%02x, dot11rate=%d\n",
__func__, m, data, le16toh(head->len), macctl, phyctl,
- (int) rate, (int) ni->ni_txrate);
+ (int) rate,
+ (int) ieee80211_node_get_txrate_dot11rate(ni));
/* Submit transfer */
STAILQ_INSERT_TAIL(&sc->sc_tx_pending[OTUS_BULK_TX], data, next);
diff --git a/sys/dev/ow/ow.c b/sys/dev/ow/ow.c
index 6d89cd445a3b..0325e6b324c8 100644
--- a/sys/dev/ow/ow.c
+++ b/sys/dev/ow/ow.c
@@ -321,7 +321,7 @@ ow_add_child(device_t dev, romid_t romid)
di = malloc(sizeof(*di), M_OW, M_WAITOK);
di->romid = romid;
- child = device_add_child(dev, NULL, -1);
+ child = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
free(di, M_OW);
return ENOMEM;
@@ -330,6 +330,12 @@ ow_add_child(device_t dev, romid_t romid)
return (0);
}
+static void
+ow_child_deleted(device_t dev, device_t child)
+{
+ free(device_get_ivars(child), M_OW);
+}
+
static device_t
ow_child_by_romid(device_t dev, romid_t romid)
{
@@ -552,15 +558,13 @@ ow_attach(device_t ndev)
sc->dev = ndev;
mtx_init(&sc->mtx, device_get_nameunit(sc->dev), "ow", MTX_DEF);
ow_enumerate(ndev, ow_search_rom, ow_device_found);
- return bus_generic_attach(ndev);
+ bus_attach_children(ndev);
+ return (0);
}
static int
ow_detach(device_t ndev)
{
- device_t *children, child;
- int nkid, i;
- struct ow_devinfo *di;
struct ow_softc *sc;
sc = device_get_softc(ndev);
@@ -570,19 +574,6 @@ ow_detach(device_t ndev)
*/
bus_generic_detach(ndev);
- /*
- * We delete all the children, and free up the ivars
- */
- if (device_get_children(ndev, &children, &nkid) != 0)
- return ENOMEM;
- for (i = 0; i < nkid; i++) {
- child = children[i];
- di = device_get_ivars(child);
- free(di, M_OW);
- device_delete_child(ndev, child);
- }
- free(children, M_TEMP);
-
OW_LOCK_DESTROY(sc);
return 0;
}
@@ -703,6 +694,7 @@ static device_method_t ow_methods[] = {
DEVMETHOD(device_detach, ow_detach),
/* Bus interface */
+ DEVMETHOD(bus_child_deleted, ow_child_deleted),
DEVMETHOD(bus_child_pnpinfo, ow_child_pnpinfo),
DEVMETHOD(bus_read_ivar, ow_read_ivar),
DEVMETHOD(bus_write_ivar, ow_write_ivar),
diff --git a/sys/dev/ow/owc_gpiobus.c b/sys/dev/ow/owc_gpiobus.c
index f919c431c12a..f010a4dc75f1 100644
--- a/sys/dev/ow/owc_gpiobus.c
+++ b/sys/dev/ow/owc_gpiobus.c
@@ -133,8 +133,9 @@ owc_gpiobus_attach(device_t dev)
* interrupts work, because we can't do IO for them until we can read
* the system timecounter (which initializes after device attachments).
*/
- device_add_child(sc->sc_dev, "ow", -1);
- return (bus_delayed_attach_children(dev));
+ device_add_child(sc->sc_dev, "ow", DEVICE_UNIT_ANY);
+ bus_delayed_attach_children(dev);
+ return (0);
}
static int
@@ -145,7 +146,7 @@ owc_gpiobus_detach(device_t dev)
sc = device_get_softc(dev);
- if ((err = device_delete_children(dev)) != 0)
+ if ((err = bus_generic_detach(dev)) != 0)
return (err);
gpio_pin_release(sc->sc_pin);
diff --git a/sys/dev/p2sb/lewisburg_gpio.c b/sys/dev/p2sb/lewisburg_gpio.c
index b45d7767602c..3be777ab9524 100644
--- a/sys/dev/p2sb/lewisburg_gpio.c
+++ b/sys/dev/p2sb/lewisburg_gpio.c
@@ -217,10 +217,11 @@ lbggpio_attach(device_t dev)
}
/* support gpio */
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL)
return (ENXIO);
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/p2sb/lewisburg_gpiocm.c b/sys/dev/p2sb/lewisburg_gpiocm.c
index e46575217d92..9dbbd84f2379 100644
--- a/sys/dev/p2sb/lewisburg_gpiocm.c
+++ b/sys/dev/p2sb/lewisburg_gpiocm.c
@@ -315,29 +315,18 @@ lbggpiocm_attach(device_t dev)
group->npins = npins < MAX_PAD_PER_GROUP ? npins :
MAX_PAD_PER_GROUP;
npins -= group->npins;
- group->dev = device_add_child(dev, "gpio", -1);
+ group->dev = device_add_child(dev, "gpio", DEVICE_UNIT_ANY);
}
sc->community->ngroups = i;
- return (bus_generic_attach(dev));
-}
-
-static int
-lbggpiocm_detach(device_t dev)
-{
- int error;
-
- error = device_delete_children(dev);
- if (error)
- return (error);
-
- return (bus_generic_detach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static device_method_t lbggpiocm_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, lbggpiocm_probe),
DEVMETHOD(device_attach, lbggpiocm_attach),
- DEVMETHOD(device_detach, lbggpiocm_detach),
+ DEVMETHOD(device_detach, bus_generic_detach),
DEVMETHOD_END
};
diff --git a/sys/dev/p2sb/p2sb.c b/sys/dev/p2sb/p2sb.c
index 2f294f03158a..950ee4e86866 100644
--- a/sys/dev/p2sb/p2sb.c
+++ b/sys/dev/p2sb/p2sb.c
@@ -149,7 +149,8 @@ p2sb_attach(device_t dev)
for (i = 0; i < nitems(lbg_communities); ++i)
device_add_child(dev, "lbggpiocm", i);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
/* Detach device. */
@@ -158,9 +159,13 @@ static int
p2sb_detach(device_t dev)
{
struct p2sb_softc *sc;
+ int error;
/* Teardown the state in our softc created in our attach routine. */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
+
sc = device_get_softc(dev);
mtx_destroy(&sc->mutex);
if (sc->res != NULL)
diff --git a/sys/dev/pccbb/pccbb.c b/sys/dev/pccbb/pccbb.c
index c40261be4724..b8fb7827dfbf 100644
--- a/sys/dev/pccbb/pccbb.c
+++ b/sys/dev/pccbb/pccbb.c
@@ -275,42 +275,12 @@ int
cbb_detach(device_t brdev)
{
struct cbb_softc *sc = device_get_softc(brdev);
- device_t *devlist;
- int tmp, tries, error, numdevs;
+ int error;
- /*
- * Before we delete the children (which we have to do because
- * attach doesn't check for children busses correctly), we have
- * to detach the children. Even if we didn't need to delete the
- * children, we have to detach them.
- */
error = bus_generic_detach(brdev);
if (error != 0)
return (error);
- /*
- * Since the attach routine doesn't search for children before it
- * attaches them to this device, we must delete them here in order
- * for the kldload/unload case to work. If we failed to do that, then
- * we'd get duplicate devices when cbb.ko was reloaded.
- */
- tries = 10;
- do {
- error = device_get_children(brdev, &devlist, &numdevs);
- if (error == 0)
- break;
- /*
- * Try hard to cope with low memory.
- */
- if (error == ENOMEM) {
- pause("cbbnomem", 1);
- continue;
- }
- } while (tries-- > 0);
- for (tmp = 0; tmp < numdevs; tmp++)
- device_delete_child(brdev, devlist[tmp]);
- free(devlist, M_TEMP);
-
/* Turn off the interrupts */
cbb_set(sc, CBB_SOCKET_MASK, 0);
diff --git a/sys/dev/pccbb/pccbb_pci.c b/sys/dev/pccbb/pccbb_pci.c
index 3b66b3df3852..a3e4dfa9ccf7 100644
--- a/sys/dev/pccbb/pccbb_pci.c
+++ b/sys/dev/pccbb/pccbb_pci.c
@@ -276,10 +276,6 @@ cbb_print_config(device_t dev)
static int
cbb_pci_attach(device_t brdev)
{
-#if !(defined(NEW_PCIB) && defined(PCI_RES_BUS))
- static int curr_bus_number = 2; /* XXX EVILE BAD (see below) */
- uint32_t pribus;
-#endif
struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev);
struct sysctl_ctx_list *sctx;
struct sysctl_oid *soid;
@@ -293,13 +289,8 @@ cbb_pci_attach(device_t brdev)
sc->cbdev = NULL;
sc->domain = pci_get_domain(brdev);
sc->pribus = pcib_get_bus(parent);
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
pci_write_config(brdev, PCIR_PRIBUS_2, sc->pribus, 1);
pcib_setup_secbus(brdev, &sc->bus, 1);
-#else
- sc->bus.sec = pci_read_config(brdev, PCIR_SECBUS_2, 1);
- sc->bus.sub = pci_read_config(brdev, PCIR_SUBBUS_2, 1);
-#endif
SLIST_INIT(&sc->rl);
rid = CBBR_SOCKBASE;
@@ -315,7 +306,7 @@ cbb_pci_attach(device_t brdev)
}
/* attach children */
- sc->cbdev = device_add_child(brdev, "cardbus", -1);
+ sc->cbdev = device_add_child(brdev, "cardbus", DEVICE_UNIT_ANY);
if (sc->cbdev == NULL)
DEVPRINTF((brdev, "WARNING: cannot add cardbus bus.\n"));
else if (device_probe_and_attach(sc->cbdev) != 0)
@@ -351,32 +342,6 @@ cbb_pci_attach(device_t brdev)
CTLFLAG_RD, &sc->subbus, 0, "io range 2 open");
#endif
-#if !(defined(NEW_PCIB) && defined(PCI_RES_BUS))
- /*
- * This is a gross hack. We should be scanning the entire pci
- * tree, assigning bus numbers in a way such that we (1) can
- * reserve 1 extra bus just in case and (2) all sub buses
- * are in an appropriate range.
- */
- DEVPRINTF((brdev, "Secondary bus is %d\n", sc->bus.sec));
- pribus = pci_read_config(brdev, PCIR_PRIBUS_2, 1);
- if (sc->bus.sec == 0 || sc->pribus != pribus) {
- if (curr_bus_number <= sc->pribus)
- curr_bus_number = sc->pribus + 1;
- if (pribus != sc->pribus) {
- DEVPRINTF((brdev, "Setting primary bus to %d\n",
- sc->pribus));
- pci_write_config(brdev, PCIR_PRIBUS_2, sc->pribus, 1);
- }
- sc->bus.sec = curr_bus_number++;
- sc->bus.sub = curr_bus_number++;
- DEVPRINTF((brdev, "Secondary bus set to %d subbus %d\n",
- sc->bus.sec, sc->bus.sub));
- pci_write_config(brdev, PCIR_SECBUS_2, sc->bus.sec, 1);
- pci_write_config(brdev, PCIR_SUBBUS_2, sc->bus.sub, 1);
- }
-#endif
-
/* Map and establish the interrupt. */
rid = 0;
sc->irq_res = bus_alloc_resource_any(brdev, SYS_RES_IRQ, &rid,
@@ -429,16 +394,12 @@ err:
static int
cbb_pci_detach(device_t brdev)
{
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct cbb_softc *sc = device_get_softc(brdev);
-#endif
int error;
error = cbb_detach(brdev);
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
if (error == 0)
pcib_free_secbus(brdev, &sc->bus);
-#endif
return (error);
}
@@ -787,7 +748,6 @@ cbb_pci_filt(void *arg)
return retval;
}
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
static struct resource *
cbb_pci_alloc_resource(device_t bus, device_t child, int type, int *rid,
rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
@@ -836,7 +796,6 @@ cbb_pci_release_resource(device_t bus, device_t child, struct resource *r)
}
return (cbb_release_resource(bus, child, r));
}
-#endif
/************************************************************************/
/* PCI compat methods */
@@ -931,14 +890,9 @@ static device_method_t cbb_methods[] = {
/* bus methods */
DEVMETHOD(bus_read_ivar, cbb_read_ivar),
DEVMETHOD(bus_write_ivar, cbb_write_ivar),
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
DEVMETHOD(bus_alloc_resource, cbb_pci_alloc_resource),
DEVMETHOD(bus_adjust_resource, cbb_pci_adjust_resource),
DEVMETHOD(bus_release_resource, cbb_pci_release_resource),
-#else
- DEVMETHOD(bus_alloc_resource, cbb_alloc_resource),
- DEVMETHOD(bus_release_resource, cbb_release_resource),
-#endif
DEVMETHOD(bus_activate_resource, cbb_activate_resource),
DEVMETHOD(bus_deactivate_resource, cbb_deactivate_resource),
DEVMETHOD(bus_driver_added, cbb_driver_added),
diff --git a/sys/dev/pcf/pcf_isa.c b/sys/dev/pcf/pcf_isa.c
index 173fc2c4170f..c797dc31e6d9 100644
--- a/sys/dev/pcf/pcf_isa.c
+++ b/sys/dev/pcf/pcf_isa.c
@@ -160,11 +160,11 @@ pcf_isa_attach(device_t dev)
}
}
- if ((sc->iicbus = device_add_child(dev, "iicbus", -1)) == NULL)
+ if ((sc->iicbus = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY)) == NULL)
device_printf(dev, "could not allocate iicbus instance\n");
/* probe and attach the iicbus */
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
@@ -192,9 +192,6 @@ pcf_isa_detach(device_t dev)
if ((rv = bus_generic_detach(dev)) != 0)
return (rv);
- if ((rv = device_delete_child(dev, sc->iicbus)) != 0)
- return (rv);
-
if (sc->res_irq != 0) {
bus_teardown_intr(dev, sc->res_irq, sc->intr_cookie);
bus_release_resource(dev, SYS_RES_IRQ, sc->rid_irq, sc->res_irq);
diff --git a/sys/dev/pci/controller/pci_n1sdp.c b/sys/dev/pci/controller/pci_n1sdp.c
index f5cd9d2b88f3..487041bc78e4 100644
--- a/sys/dev/pci/controller/pci_n1sdp.c
+++ b/sys/dev/pci/controller/pci_n1sdp.c
@@ -222,8 +222,9 @@ n1sdp_pcie_acpi_attach(device_t dev)
if (err)
return (err);
- device_add_child(dev, "pci", -1);
- return (bus_generic_attach(dev));
+ device_add_child(dev, "pci", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
+ return (0);
}
static int
diff --git a/sys/dev/pci/fixup_pci.c b/sys/dev/pci/fixup_pci.c
index 4bccda90a040..cad175f301a3 100644
--- a/sys/dev/pci/fixup_pci.c
+++ b/sys/dev/pci/fixup_pci.c
@@ -55,7 +55,6 @@ static void fixc1_nforce2(device_t dev);
static device_method_t fixup_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, fixup_pci_probe),
- DEVMETHOD(device_attach, bus_generic_attach),
{ 0, 0 }
};
diff --git a/sys/dev/pci/hostb_pci.c b/sys/dev/pci/hostb_pci.c
index 3f6abf31b236..e6c10418a684 100644
--- a/sys/dev/pci/hostb_pci.c
+++ b/sys/dev/pci/hostb_pci.c
@@ -69,15 +69,15 @@ static int
pci_hostb_attach(device_t dev)
{
- bus_generic_probe(dev);
+ bus_identify_children(dev);
/*
* If AGP capabilities are present on this device, then create
* an AGP child.
*/
if (pci_find_cap(dev, PCIY_AGP, NULL) == 0)
- device_add_child(dev, "agp", -1);
- bus_generic_attach(dev);
+ device_add_child(dev, "agp", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/pci/ignore_pci.c b/sys/dev/pci/ignore_pci.c
index 22964e55b81d..8a12e43bcae3 100644
--- a/sys/dev/pci/ignore_pci.c
+++ b/sys/dev/pci/ignore_pci.c
@@ -41,11 +41,12 @@
#include <dev/pci/pcivar.h>
static int ignore_pci_probe(device_t dev);
+static int ignore_pci_attach(device_t dev);
static device_method_t ignore_pci_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, ignore_pci_probe),
- DEVMETHOD(device_attach, bus_generic_attach),
+ DEVMETHOD(device_attach, ignore_pci_attach),
{ 0, 0 }
};
@@ -68,3 +69,9 @@ ignore_pci_probe(device_t dev)
}
return(ENXIO);
}
+
+static int
+ignore_pci_attach(device_t dev)
+{
+ return (0);
+}
diff --git a/sys/dev/pci/isa_pci.c b/sys/dev/pci/isa_pci.c
index c8ad277edb41..f63c63afc384 100644
--- a/sys/dev/pci/isa_pci.c
+++ b/sys/dev/pci/isa_pci.c
@@ -160,7 +160,7 @@ static int
isab_pci_attach(device_t dev)
{
- bus_generic_probe(dev);
+ bus_identify_children(dev);
return (isab_attach(dev));
}
diff --git a/sys/dev/pci/pci.c b/sys/dev/pci/pci.c
index cbab4b50392f..cde98cb62cef 100644
--- a/sys/dev/pci/pci.c
+++ b/sys/dev/pci/pci.c
@@ -45,6 +45,7 @@
#include <sys/module.h>
#include <sys/queue.h>
#include <sys/sbuf.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <sys/taskqueue.h>
@@ -58,7 +59,6 @@
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
-#include <machine/stdarg.h>
#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
#include <machine/intr_machdep.h>
@@ -88,6 +88,16 @@
(((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
+static device_probe_t pci_probe;
+
+static bus_reset_post_t pci_reset_post;
+static bus_reset_prepare_t pci_reset_prepare;
+static bus_reset_child_t pci_reset_child;
+static bus_hint_device_unit_t pci_hint_device_unit;
+static bus_remap_intr_t pci_remap_intr_method;
+
+static pci_get_id_t pci_get_id_method;
+
static int pci_has_quirk(uint32_t devid, int quirk);
static pci_addr_t pci_mapbase(uint64_t mapreg);
static const char *pci_maptype(uint64_t mapreg);
@@ -103,7 +113,6 @@ static void pci_assign_interrupt(device_t bus, device_t dev,
int force_route);
static int pci_add_map(device_t bus, device_t dev, int reg,
struct resource_list *rl, int force, int prefetch);
-static int pci_probe(device_t dev);
static void pci_load_vendor_data(void);
static int pci_describe_parse_line(char **ptr, int *vendor,
int *device, char **desc);
@@ -125,17 +134,6 @@ static int pci_msi_blacklisted(void);
static int pci_msix_blacklisted(void);
static void pci_resume_msi(device_t dev);
static void pci_resume_msix(device_t dev);
-static int pci_remap_intr_method(device_t bus, device_t dev,
- u_int irq);
-static void pci_hint_device_unit(device_t acdev, device_t child,
- const char *name, int *unitp);
-static int pci_reset_post(device_t dev, device_t child);
-static int pci_reset_prepare(device_t dev, device_t child);
-static int pci_reset_child(device_t dev, device_t child,
- int flags);
-
-static int pci_get_id_method(device_t dev, device_t child,
- enum pci_id_type type, uintptr_t *rid);
static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d,
int b, int s, int f, uint16_t vid, uint16_t did);
@@ -166,10 +164,12 @@ static device_method_t pci_methods[] = {
DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
DEVMETHOD(bus_delete_resource, pci_delete_resource),
DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
- DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
+ DEVMETHOD(bus_adjust_resource, pci_adjust_resource),
DEVMETHOD(bus_release_resource, pci_release_resource),
DEVMETHOD(bus_activate_resource, pci_activate_resource),
DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
+ DEVMETHOD(bus_map_resource, pci_map_resource),
+ DEVMETHOD(bus_unmap_resource, pci_unmap_resource),
DEVMETHOD(bus_child_deleted, pci_child_deleted),
DEVMETHOD(bus_child_detached, pci_child_detached),
DEVMETHOD(bus_child_pnpinfo, pci_child_pnpinfo_method),
@@ -240,6 +240,7 @@ struct pci_quirk {
#define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
#define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
#define PCI_QUIRK_REALLOC_BAR 7 /* Can't allocate memory at the default address */
+#define PCI_QUIRK_DISABLE_FLR 8 /* Function-Level Reset (FLR) not working. */
int arg1;
int arg2;
};
@@ -319,6 +320,13 @@ static const struct pci_quirk pci_quirks[] = {
* expected place.
*/
{ 0x98741002, PCI_QUIRK_REALLOC_BAR, 0, 0 },
+
+ /*
+ * With some MediaTek mt76 WiFi FLR does not work despite advertised.
+ */
+ { 0x061614c3, PCI_QUIRK_DISABLE_FLR, 0, 0 }, /* mt76 7922 */
+
+ /* end of table */
{ 0 }
};
@@ -353,8 +361,8 @@ static int pci_do_power_nodriver = 0;
SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN,
&pci_do_power_nodriver, 0,
"Place a function into D3 state when no driver attaches to it. 0 means"
- " disable. 1 means conservatively place devices into D3 state. 2 means"
- " aggressively place devices into D3 state. 3 means put absolutely"
+ " disable. 1 means conservatively place function into D3 state. 2 means"
+ " aggressively place function into D3 state. 3 means put absolutely"
" everything in D3 state.");
int pci_do_power_resume = 1;
@@ -399,17 +407,23 @@ static int pci_clear_bars;
SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0,
"Ignore firmware-assigned resources for BARs.");
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
static int pci_clear_buses;
SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0,
"Ignore firmware-assigned bus numbers.");
-#endif
static int pci_enable_ari = 1;
SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari,
0, "Enable support for PCIe Alternative RID Interpretation");
+/*
+ * Some x86 firmware only enables PCIe hotplug if we claim to support aspm,
+ * however enabling it breaks some arm64 firmware as it powers off devices.
+ */
+#if defined(__i386__) || defined(__amd64__)
int pci_enable_aspm = 1;
+#else
+int pci_enable_aspm = 0;
+#endif
SYSCTL_INT(_hw_pci, OID_AUTO, enable_aspm, CTLFLAG_RDTUN, &pci_enable_aspm,
0, "Enable support for PCIe Active State Power Management");
@@ -423,6 +437,10 @@ SYSCTL_BOOL(_hw_pci, OID_AUTO, enable_mps_tune, CTLFLAG_RWTUN,
&pci_enable_mps_tune, 1,
"Enable tuning of MPS(maximum payload size)." );
+static bool pci_intx_reroute = true;
+SYSCTL_BOOL(_hw_pci, OID_AUTO, intx_reroute, CTLFLAG_RWTUN,
+ &pci_intx_reroute, 0, "Re-route INTx interrupts when scanning devices");
+
static int
pci_has_quirk(uint32_t devid, int quirk)
{
@@ -517,6 +535,27 @@ pci_find_class_from(uint8_t class, uint8_t subclass, device_t from)
return (NULL);
}
+device_t
+pci_find_base_class_from(uint8_t class, device_t from)
+{
+ struct pci_devinfo *dinfo;
+ bool found = false;
+
+ STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
+ if (from != NULL && found == false) {
+ if (from != dinfo->cfg.dev)
+ continue;
+ found = true;
+ continue;
+ }
+ if (dinfo->cfg.baseclass == class) {
+ return (dinfo->cfg.dev);
+ }
+ }
+
+ return (NULL);
+}
+
static int
pci_printf(pcicfgregs *cfg, const char *fmt, ...)
{
@@ -881,13 +920,8 @@ pci_read_cap(device_t pcib, pcicfgregs *cfg)
/* Process this entry */
switch (REG(ptr + PCICAP_ID, 1)) {
case PCIY_PMG: /* PCI power management */
- if (cfg->pp.pp_cap == 0) {
- cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
- cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
- cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
- if ((nextptr - ptr) > PCIR_POWER_DATA)
- cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
- }
+ cfg->pp.pp_location = ptr;
+ cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
break;
case PCIY_HT: /* HyperTransport */
/* Determine HT-specific capability type. */
@@ -925,14 +959,10 @@ pci_read_cap(device_t pcib, pcicfgregs *cfg)
case PCIY_MSI: /* PCI MSI */
cfg->msi.msi_location = ptr;
cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
- cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
- PCIM_MSICTRL_MMC_MASK)>>1);
break;
case PCIY_MSIX: /* PCI MSI-X */
cfg->msix.msix_location = ptr;
cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
- cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
- PCIM_MSIXCTRL_TABLE_SIZE) + 1;
val = REG(ptr + PCIR_MSIX_TABLE, 4);
cfg->msix.msix_table_bar = PCIR_BAR(val &
PCIM_MSIX_BIR_MASK);
@@ -1184,7 +1214,7 @@ vpd_read_elem_data(struct vpd_readstate *vrs, char keyword[2], char **value, int
int len;
len = vpd_read_elem_head(vrs, keyword);
- if (len > maxlen)
+ if (len < 0 || len > maxlen)
return (-1);
*value = vpd_read_value(vrs, len);
@@ -1205,7 +1235,7 @@ vpd_fixup_cksum(struct vpd_readstate *vrs, char *rvstring, int len)
}
/* fetch one read-only element and return size of heading + data */
-static size_t
+static int
next_vpd_ro_elem(struct vpd_readstate *vrs, int maxsize)
{
struct pcicfg_vpd *vpd;
@@ -1239,7 +1269,7 @@ next_vpd_ro_elem(struct vpd_readstate *vrs, int maxsize)
}
/* fetch one writable element and return size of heading + data */
-static size_t
+static int
next_vpd_rw_elem(struct vpd_readstate *vrs, int maxsize)
{
struct pcicfg_vpd *vpd;
@@ -1513,6 +1543,7 @@ pci_find_cap_method(device_t dev, device_t child, int capability,
pcicfgregs *cfg = &dinfo->cfg;
uint32_t status;
uint8_t ptr;
+ int cnt;
/*
* Check the CAP_LIST bit of the PCI status register first.
@@ -1539,9 +1570,11 @@ pci_find_cap_method(device_t dev, device_t child, int capability,
ptr = pci_read_config(child, ptr, 1);
/*
- * Traverse the capabilities list.
+ * Traverse the capabilities list. Limit by total theoretical
+ * maximum number of caps: capability needs at least id and
+ * next registers, and any type X header cannot contain caps.
*/
- while (ptr != 0) {
+ for (cnt = 0; ptr != 0 && cnt < (PCIE_REGMAX - 0x40) / 2; cnt++) {
if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
if (capreg != NULL)
*capreg = ptr;
@@ -1704,7 +1737,7 @@ pci_mask_msix(device_t dev, u_int index)
struct pcicfg_msix *msix = &dinfo->cfg.msix;
uint32_t offset, val;
- KASSERT(msix->msix_msgnum > index, ("bogus index"));
+ KASSERT(PCI_MSIX_MSGNUM(msix->msix_ctrl) > index, ("bogus index"));
offset = msix->msix_table_offset + index * 16 + 12;
val = bus_read_4(msix->msix_table_res, offset);
val |= PCIM_MSIX_VCTRL_MASK;
@@ -1723,7 +1756,7 @@ pci_unmask_msix(device_t dev, u_int index)
struct pcicfg_msix *msix = &dinfo->cfg.msix;
uint32_t offset, val;
- KASSERT(msix->msix_table_len > index, ("bogus index"));
+ KASSERT(PCI_MSIX_MSGNUM(msix->msix_ctrl) > index, ("bogus index"));
offset = msix->msix_table_offset + index * 16 + 12;
val = bus_read_4(msix->msix_table_res, offset);
val &= ~PCIM_MSIX_VCTRL_MASK;
@@ -1760,11 +1793,13 @@ pci_resume_msix(device_t dev)
struct pcicfg_msix *msix = &dinfo->cfg.msix;
struct msix_table_entry *mte;
struct msix_vector *mv;
- int i;
+ u_int i, msgnum;
if (msix->msix_alloc > 0) {
+ msgnum = PCI_MSIX_MSGNUM(msix->msix_ctrl);
+
/* First, mask all vectors. */
- for (i = 0; i < msix->msix_msgnum; i++)
+ for (i = 0; i < msgnum; i++)
pci_mask_msix(dev, i);
/* Second, program any messages with at least one handler. */
@@ -1793,10 +1828,12 @@ pci_alloc_msix_method(device_t dev, device_t child, int *count)
struct pci_devinfo *dinfo = device_get_ivars(child);
pcicfgregs *cfg = &dinfo->cfg;
struct resource_list_entry *rle;
- int actual, error, i, irq, max;
+ u_int actual, i, max;
+ int error, irq;
+ uint16_t ctrl, msgnum;
/* Don't let count == 0 get us into trouble. */
- if (*count == 0)
+ if (*count < 1)
return (EINVAL);
/* If rid 0 is allocated, then fail. */
@@ -1832,11 +1869,14 @@ pci_alloc_msix_method(device_t dev, device_t child, int *count)
}
cfg->msix.msix_pba_res = rle->res;
+ ctrl = pci_read_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
+ 2);
+ msgnum = PCI_MSIX_MSGNUM(ctrl);
if (bootverbose)
device_printf(child,
"attempting to allocate %d MSI-X vectors (%d supported)\n",
- *count, cfg->msix.msix_msgnum);
- max = min(*count, cfg->msix.msix_msgnum);
+ *count, msgnum);
+ max = min(*count, msgnum);
for (i = 0; i < max; i++) {
/* Allocate a message. */
error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
@@ -1856,7 +1896,7 @@ pci_alloc_msix_method(device_t dev, device_t child, int *count)
device_printf(child, "using IRQ %ju for MSI-X\n",
rle->start);
else {
- int run;
+ bool run;
/*
* Be fancy and try to print contiguous runs of
@@ -1865,14 +1905,14 @@ pci_alloc_msix_method(device_t dev, device_t child, int *count)
*/
device_printf(child, "using IRQs %ju", rle->start);
irq = rle->start;
- run = 0;
+ run = false;
for (i = 1; i < actual; i++) {
rle = resource_list_find(&dinfo->resources,
SYS_RES_IRQ, i + 1);
/* Still in a run? */
if (rle->start == irq + 1) {
- run = 1;
+ run = true;
irq++;
continue;
}
@@ -1880,7 +1920,7 @@ pci_alloc_msix_method(device_t dev, device_t child, int *count)
/* Finish previous range. */
if (run) {
printf("-%d", irq);
- run = 0;
+ run = false;
}
/* Start new range. */
@@ -1895,15 +1935,19 @@ pci_alloc_msix_method(device_t dev, device_t child, int *count)
}
}
- /* Mask all vectors. */
- for (i = 0; i < cfg->msix.msix_msgnum; i++)
+ /*
+ * Mask all vectors. Note that the message index assertion in
+ * pci_mask_msix requires msix_ctrl to be set.
+ */
+ cfg->msix.msix_ctrl = ctrl;
+ for (i = 0; i < msgnum; i++)
pci_mask_msix(child, i);
/* Allocate and initialize vector data and virtual table. */
- cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
- M_DEVBUF, M_WAITOK | M_ZERO);
- cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
+ cfg->msix.msix_vectors = mallocarray(actual, sizeof(struct msix_vector),
M_DEVBUF, M_WAITOK | M_ZERO);
+ cfg->msix.msix_table = mallocarray(actual,
+ sizeof(struct msix_table_entry), M_DEVBUF, M_WAITOK | M_ZERO);
for (i = 0; i < actual; i++) {
rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
cfg->msix.msix_vectors[i].mv_irq = rle->start;
@@ -1911,9 +1955,10 @@ pci_alloc_msix_method(device_t dev, device_t child, int *count)
}
/* Update control register to enable MSI-X. */
- cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+ ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
- cfg->msix.msix_ctrl, 2);
+ ctrl, 2);
+ cfg->msix.msix_ctrl = ctrl;
/* Update counts of alloc'd messages. */
cfg->msix.msix_alloc = actual;
@@ -1968,14 +2013,15 @@ pci_remap_msix_method(device_t dev, device_t child, int count,
struct pci_devinfo *dinfo = device_get_ivars(child);
struct pcicfg_msix *msix = &dinfo->cfg.msix;
struct resource_list_entry *rle;
- int i, irq, j, *used;
+ u_int i, irq, j;
+ bool *used;
/*
* Have to have at least one message in the table but the
* table can't be bigger than the actual MSI-X table in the
* device.
*/
- if (count == 0 || count > msix->msix_msgnum)
+ if (count < 1 || count > PCI_MSIX_MSGNUM(msix->msix_ctrl))
return (EINVAL);
/* Sanity check the vectors. */
@@ -1988,17 +2034,17 @@ pci_remap_msix_method(device_t dev, device_t child, int count,
* It's a big pain to support it, and it doesn't really make
* sense anyway. Also, at least one vector must be used.
*/
- used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
+ used = mallocarray(msix->msix_alloc, sizeof(*used), M_DEVBUF, M_WAITOK |
M_ZERO);
for (i = 0; i < count; i++)
if (vectors[i] != 0)
- used[vectors[i] - 1] = 1;
+ used[vectors[i] - 1] = true;
for (i = 0; i < msix->msix_alloc - 1; i++)
- if (used[i] == 0 && used[i + 1] == 1) {
+ if (!used[i] && used[i + 1]) {
free(used, M_DEVBUF);
return (EINVAL);
}
- if (used[0] != 1) {
+ if (!used[0]) {
free(used, M_DEVBUF);
return (EINVAL);
}
@@ -2031,7 +2077,7 @@ pci_remap_msix_method(device_t dev, device_t child, int count,
* used.
*/
free(msix->msix_table, M_DEVBUF);
- msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
+ msix->msix_table = mallocarray(count, sizeof(struct msix_table_entry),
M_DEVBUF, M_WAITOK | M_ZERO);
for (i = 0; i < count; i++)
msix->msix_table[i].mte_vector = vectors[i];
@@ -2039,15 +2085,15 @@ pci_remap_msix_method(device_t dev, device_t child, int count,
/* Free any unused IRQs and resize the vectors array if necessary. */
j = msix->msix_alloc - 1;
- if (used[j] == 0) {
+ if (!used[j]) {
struct msix_vector *vec;
- while (used[j] == 0) {
+ while (!used[j]) {
PCIB_RELEASE_MSIX(device_get_parent(dev), child,
msix->msix_vectors[j].mv_irq);
j--;
}
- vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
+ vec = mallocarray(j + 1, sizeof(struct msix_vector), M_DEVBUF,
M_WAITOK);
bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
(j + 1));
@@ -2089,7 +2135,7 @@ pci_release_msix(device_t dev, device_t child)
struct pci_devinfo *dinfo = device_get_ivars(child);
struct pcicfg_msix *msix = &dinfo->cfg.msix;
struct resource_list_entry *rle;
- int i;
+ u_int i;
/* Do we have any messages to release? */
if (msix->msix_alloc == 0)
@@ -2141,9 +2187,13 @@ pci_msix_count_method(device_t dev, device_t child)
{
struct pci_devinfo *dinfo = device_get_ivars(child);
struct pcicfg_msix *msix = &dinfo->cfg.msix;
+ uint16_t ctrl;
- if (pci_do_msix && msix->msix_location != 0)
- return (msix->msix_msgnum);
+ if (pci_do_msix && msix->msix_location != 0) {
+ ctrl = pci_read_config(child, msix->msix_location +
+ PCIR_MSI_CTRL, 2);
+ return (PCI_MSIX_MSGNUM(ctrl));
+ }
return (0);
}
@@ -2410,7 +2460,8 @@ pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
struct msix_vector *mv;
uint64_t addr;
uint32_t data;
- int error, i, j;
+ u_int i, j;
+ int error;
/*
* Handle MSI first. We try to find this IRQ among our list
@@ -2575,11 +2626,12 @@ pci_alloc_msi_method(device_t dev, device_t child, int *count)
struct pci_devinfo *dinfo = device_get_ivars(child);
pcicfgregs *cfg = &dinfo->cfg;
struct resource_list_entry *rle;
- int actual, error, i, irqs[32];
- uint16_t ctrl;
+ u_int actual, i;
+ int error, irqs[32];
+ uint16_t ctrl, msgnum;
/* Don't let count == 0 get us into trouble. */
- if (*count == 0)
+ if (*count < 1)
return (EINVAL);
/* If rid 0 is allocated, then fail. */
@@ -2599,13 +2651,15 @@ pci_alloc_msi_method(device_t dev, device_t child, int *count)
if (cfg->msi.msi_location == 0 || !pci_do_msi)
return (ENODEV);
+ ctrl = pci_read_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, 2);
+ msgnum = PCI_MSI_MSGNUM(ctrl);
if (bootverbose)
device_printf(child,
- "attempting to allocate %d MSI vectors (%d supported)\n",
- *count, cfg->msi.msi_msgnum);
+ "attempting to allocate %d MSI vectors (%u supported)\n",
+ *count, msgnum);
/* Don't ask for more than the device supports. */
- actual = min(*count, cfg->msi.msi_msgnum);
+ actual = min(*count, msgnum);
/* Don't ask for more than 32 messages. */
actual = min(actual, 32);
@@ -2640,7 +2694,7 @@ pci_alloc_msi_method(device_t dev, device_t child, int *count)
if (actual == 1)
device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
else {
- int run;
+ bool run;
/*
* Be fancy and try to print contiguous runs
@@ -2648,18 +2702,18 @@ pci_alloc_msi_method(device_t dev, device_t child, int *count)
* we are in a range.
*/
device_printf(child, "using IRQs %d", irqs[0]);
- run = 0;
+ run = false;
for (i = 1; i < actual; i++) {
/* Still in a run? */
if (irqs[i] == irqs[i - 1] + 1) {
- run = 1;
+ run = true;
continue;
}
/* Finish previous range. */
if (run) {
printf("-%d", irqs[i - 1]);
- run = 0;
+ run = false;
}
/* Start new range. */
@@ -2674,7 +2728,6 @@ pci_alloc_msi_method(device_t dev, device_t child, int *count)
}
/* Update control register with actual count. */
- ctrl = cfg->msi.msi_ctrl;
ctrl &= ~PCIM_MSICTRL_MME_MASK;
ctrl |= (ffs(actual) - 1) << 4;
cfg->msi.msi_ctrl = ctrl;
@@ -2694,7 +2747,8 @@ pci_release_msi_method(device_t dev, device_t child)
struct pci_devinfo *dinfo = device_get_ivars(child);
struct pcicfg_msi *msi = &dinfo->cfg.msi;
struct resource_list_entry *rle;
- int error, i, irqs[32];
+ u_int i, irqs[32];
+ int error;
/* Try MSI-X first. */
error = pci_release_msix(dev, child);
@@ -2747,9 +2801,13 @@ pci_msi_count_method(device_t dev, device_t child)
{
struct pci_devinfo *dinfo = device_get_ivars(child);
struct pcicfg_msi *msi = &dinfo->cfg.msi;
+ uint16_t ctrl;
- if (pci_do_msi && msi->msi_location != 0)
- return (msi->msi_msgnum);
+ if (pci_do_msi && msi->msi_location != 0) {
+ ctrl = pci_read_config(child, msi->msi_location + PCIR_MSI_CTRL,
+ 2);
+ return (PCI_MSI_MSGNUM(ctrl));
+ }
return (0);
}
@@ -2791,7 +2849,7 @@ pci_set_powerstate_method(device_t dev, device_t child, int state)
uint16_t status;
int oldstate, highest, delay;
- if (cfg->pp.pp_cap == 0)
+ if (cfg->pp.pp_location == 0)
return (EOPNOTSUPP);
/*
@@ -2822,8 +2880,8 @@ pci_set_powerstate_method(device_t dev, device_t child, int state)
delay = 200;
else
delay = 0;
- status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
- & ~PCIM_PSTAT_DMASK;
+ status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_location +
+ PCIR_POWER_STATUS, 2) & ~PCIM_PSTAT_DMASK;
switch (state) {
case PCI_POWERSTATE_D0:
status |= PCIM_PSTAT_D0;
@@ -2846,10 +2904,12 @@ pci_set_powerstate_method(device_t dev, device_t child, int state)
}
if (bootverbose)
- pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
- state);
+ pci_printf(cfg, "Transition from %s to %s\n",
+ pci_powerstate_to_str(oldstate),
+ pci_powerstate_to_str(state));
- PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
+ PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_location + PCIR_POWER_STATUS,
+ status, 2);
if (delay)
DELAY(delay);
return (0);
@@ -2863,8 +2923,9 @@ pci_get_powerstate_method(device_t dev, device_t child)
uint16_t status;
int result;
- if (cfg->pp.pp_cap != 0) {
- status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
+ if (cfg->pp.pp_location != 0) {
+ status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_location +
+ PCIR_POWER_STATUS, 2);
switch (status & PCIM_PSTAT_DMASK) {
case PCIM_PSTAT_D0:
result = PCI_POWERSTATE_D0;
@@ -2889,6 +2950,50 @@ pci_get_powerstate_method(device_t dev, device_t child)
return (result);
}
+/* Clear any active PME# and disable PME# generation. */
+void
+pci_clear_pme(device_t dev)
+{
+ struct pci_devinfo *dinfo = device_get_ivars(dev);
+ pcicfgregs *cfg = &dinfo->cfg;
+ uint16_t status;
+
+ if (cfg->pp.pp_location != 0) {
+ status = pci_read_config(dev, dinfo->cfg.pp.pp_location +
+ PCIR_POWER_STATUS, 2);
+ status &= ~PCIM_PSTAT_PMEENABLE;
+ status |= PCIM_PSTAT_PME;
+ pci_write_config(dev, dinfo->cfg.pp.pp_location +
+ PCIR_POWER_STATUS, status, 2);
+ }
+}
+
+/* Clear any active PME# and enable PME# generation. */
+void
+pci_enable_pme(device_t dev)
+{
+ struct pci_devinfo *dinfo = device_get_ivars(dev);
+ pcicfgregs *cfg = &dinfo->cfg;
+ uint16_t status;
+
+ if (cfg->pp.pp_location != 0) {
+ status = pci_read_config(dev, dinfo->cfg.pp.pp_location +
+ PCIR_POWER_STATUS, 2);
+ status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
+ pci_write_config(dev, dinfo->cfg.pp.pp_location +
+ PCIR_POWER_STATUS, status, 2);
+ }
+}
+
+bool
+pci_has_pm(device_t dev)
+{
+ struct pci_devinfo *dinfo = device_get_ivars(dev);
+ pcicfgregs *cfg = &dinfo->cfg;
+
+ return (cfg->pp.pp_location != 0);
+}
+
/*
* Some convenience functions for PCI device drivers.
*/
@@ -2992,10 +3097,11 @@ pci_print_verbose(struct pci_devinfo *dinfo)
if (cfg->intpin > 0)
printf("\tintpin=%c, irq=%d\n",
cfg->intpin +'a' -1, cfg->intline);
- if (cfg->pp.pp_cap) {
+ if (cfg->pp.pp_location) {
uint16_t status;
- status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
+ status = pci_read_config(cfg->dev, cfg->pp.pp_location +
+ PCIR_POWER_STATUS, 2);
printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
cfg->pp.pp_cap & PCIM_PCAP_SPEC,
cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
@@ -3003,19 +3109,21 @@ pci_print_verbose(struct pci_devinfo *dinfo)
status & PCIM_PSTAT_DMASK);
}
if (cfg->msi.msi_location) {
- int ctrl;
+ uint16_t ctrl, msgnum;
ctrl = cfg->msi.msi_ctrl;
+ msgnum = PCI_MSI_MSGNUM(ctrl);
printf("\tMSI supports %d message%s%s%s\n",
- cfg->msi.msi_msgnum,
- (cfg->msi.msi_msgnum == 1) ? "" : "s",
+ msgnum, (msgnum == 1) ? "" : "s",
(ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
(ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
}
if (cfg->msix.msix_location) {
+ uint16_t msgnum;
+
+ msgnum = PCI_MSIX_MSGNUM(cfg->msix.msix_ctrl);
printf("\tMSI-X supports %d message%s ",
- cfg->msix.msix_msgnum,
- (cfg->msix.msix_msgnum == 1) ? "" : "s");
+ msgnum, (msgnum == 1) ? "" : "s");
if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
printf("in map 0x%x\n",
cfg->msix.msix_table_bar);
@@ -3700,7 +3808,6 @@ xhci_early_takeover(device_t self)
bus_release_resource(self, SYS_RES_MEMORY, rid, res);
}
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
static void
pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg,
struct resource_list *rl)
@@ -3856,7 +3963,6 @@ pci_alloc_secbus(device_t dev, device_t child, int *rid, rman_res_t start,
return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start,
end, count, flags));
}
-#endif
static int
pci_ea_bei_to_rid(device_t dev, int bei)
@@ -4086,8 +4192,8 @@ pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
pci_add_map(bus, dev, q->arg1, rl, force, 0);
- if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
-#ifdef __PCI_REROUTE_INTERRUPT
+ if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline) &&
+ pci_intx_reroute) {
/*
* Try to re-route interrupts. Sometimes the BIOS or
* firmware may leave bogus values in these registers.
@@ -4095,9 +4201,6 @@ pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
* have.
*/
pci_assign_interrupt(bus, dev, 1);
-#else
- pci_assign_interrupt(bus, dev, 0);
-#endif
}
if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
@@ -4112,13 +4215,11 @@ pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
uhci_early_takeover(dev);
}
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
/*
* Reserve resources for secondary bus ranges behind bridge
* devices.
*/
pci_reserve_secbus(bus, dev, cfg, rl);
-#endif
}
static struct pci_devinfo *
@@ -4426,11 +4527,12 @@ pci_add_child(device_t bus, struct pci_devinfo *dinfo)
{
device_t dev;
- dinfo->cfg.dev = dev = device_add_child(bus, NULL, -1);
+ dinfo->cfg.dev = dev = device_add_child(bus, NULL, DEVICE_UNIT_ANY);
device_set_ivars(dev, dinfo);
resource_list_init(&dinfo->resources);
pci_cfg_save(dev, dinfo, 0);
pci_cfg_restore(dev, dinfo);
+ pci_clear_pme(dev);
pci_print_verbose(dinfo);
pci_add_resources(bus, dev, 0, 0);
if (pci_enable_mps_tune)
@@ -4464,14 +4566,11 @@ pci_attach_common(device_t dev)
{
struct pci_softc *sc;
int busno, domain;
-#ifdef PCI_RES_BUS
int rid;
-#endif
sc = device_get_softc(dev);
domain = pcib_get_domain(dev);
busno = pcib_get_bus(dev);
-#ifdef PCI_RES_BUS
rid = 0;
sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno,
1, 0);
@@ -4479,7 +4578,6 @@ pci_attach_common(device_t dev)
device_printf(dev, "failed to allocate bus number\n");
return (ENXIO);
}
-#endif
if (bootverbose)
device_printf(dev, "domain=%d, physical bus=%d\n",
domain, busno);
@@ -4505,27 +4603,22 @@ pci_attach(device_t dev)
domain = pcib_get_domain(dev);
busno = pcib_get_bus(dev);
pci_add_children(dev, domain, busno);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
int
pci_detach(device_t dev)
{
-#ifdef PCI_RES_BUS
struct pci_softc *sc;
-#endif
int error;
error = bus_generic_detach(dev);
if (error)
return (error);
-#ifdef PCI_RES_BUS
sc = device_get_softc(dev);
error = bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus);
- if (error)
- return (error);
-#endif
- return (device_delete_children(dev));
+ return (error);
}
static void
@@ -4630,6 +4723,7 @@ pci_resume_child(device_t dev, device_t child)
dinfo = device_get_ivars(child);
pci_cfg_restore(child, dinfo);
+ pci_clear_pme(child);
if (!device_is_attached(child))
pci_cfg_save(child, dinfo, 1);
@@ -5105,10 +5199,8 @@ pci_child_detached(device_t dev, device_t child)
pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
-#ifdef PCI_RES_BUS
if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0)
pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n");
-#endif
pci_cfg_save(child, dinfo, 1);
}
@@ -5545,11 +5637,9 @@ pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid,
rl = &dinfo->resources;
cfg = &dinfo->cfg;
switch (type) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_alloc_secbus(dev, child, rid, start, end, count,
flags));
-#endif
case SYS_RES_IRQ:
/*
* Can't alloc legacy interrupt once MSI messages have
@@ -5570,7 +5660,6 @@ pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid,
break;
case SYS_RES_IOPORT:
case SYS_RES_MEMORY:
-#ifdef NEW_PCIB
/*
* PCI-PCI bridge I/O window resources are not BARs.
* For those allocations just pass the request up the
@@ -5589,7 +5678,6 @@ pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid,
type, rid, start, end, count, flags));
}
}
-#endif
/* Reserve resources for this BAR if needed. */
rle = resource_list_find(rl, type, *rid);
if (rle == NULL) {
@@ -5662,7 +5750,6 @@ pci_release_resource(device_t dev, device_t child, struct resource *r)
}
#endif
-#ifdef NEW_PCIB
/*
* PCI-PCI bridge I/O window resources are not BARs. For
* those allocations just pass the request up the tree.
@@ -5677,7 +5764,6 @@ pci_release_resource(device_t dev, device_t child, struct resource *r)
return (bus_generic_release_resource(dev, child, r));
}
}
-#endif
rl = &dinfo->resources;
return (resource_list_release(rl, dev, child, r));
@@ -5689,25 +5775,44 @@ pci_activate_resource(device_t dev, device_t child, struct resource *r)
struct pci_devinfo *dinfo;
int error, rid, type;
- error = bus_generic_activate_resource(dev, child, r);
- if (error)
- return (error);
+ if (device_get_parent(child) != dev)
+ return (bus_generic_activate_resource(dev, child, r));
- /* Enable decoding in the command register when activating BARs. */
- if (device_get_parent(child) == dev) {
- /* Device ROMs need their decoding explicitly enabled. */
- dinfo = device_get_ivars(child);
- rid = rman_get_rid(r);
- type = rman_get_type(r);
- if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
- pci_write_bar(child, pci_find_bar(child, rid),
- rman_get_start(r) | PCIM_BIOS_ENABLE);
- switch (type) {
+ dinfo = device_get_ivars(child);
+#ifdef PCI_IOV
+ if (dinfo->cfg.flags & PCICFG_VF) {
+ switch (rman_get_type(r)) {
+ /* VFs can't have I/O BARs. */
case SYS_RES_IOPORT:
+ error = EINVAL;
+ break;
case SYS_RES_MEMORY:
- error = PCI_ENABLE_IO(dev, child, type);
+ error = pci_vf_activate_mem_resource(dev, child, r);
+ break;
+ default:
+ error = bus_generic_activate_resource(dev, child, r);
break;
}
+ } else
+#endif
+ error = bus_generic_activate_resource(dev, child, r);
+ if (error)
+ return (error);
+
+ rid = rman_get_rid(r);
+ type = rman_get_type(r);
+
+ /* Device ROMs need their decoding explicitly enabled. */
+ if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
+ pci_write_bar(child, pci_find_bar(child, rid),
+ rman_get_start(r) | PCIM_BIOS_ENABLE);
+
+ /* Enable decoding in the command register when activating BARs. */
+ switch (type) {
+ case SYS_RES_IOPORT:
+ case SYS_RES_MEMORY:
+ error = PCI_ENABLE_IO(dev, child, type);
+ break;
}
return (error);
}
@@ -5718,22 +5823,124 @@ pci_deactivate_resource(device_t dev, device_t child, struct resource *r)
struct pci_devinfo *dinfo;
int error, rid, type;
- error = bus_generic_deactivate_resource(dev, child, r);
+ if (device_get_parent(child) != dev)
+ return (bus_generic_deactivate_resource(dev, child, r));
+
+ dinfo = device_get_ivars(child);
+#ifdef PCI_IOV
+ if (dinfo->cfg.flags & PCICFG_VF) {
+ switch (rman_get_type(r)) {
+ /* VFs can't have I/O BARs. */
+ case SYS_RES_IOPORT:
+ error = EINVAL;
+ break;
+ case SYS_RES_MEMORY:
+ error = pci_vf_deactivate_mem_resource(dev, child, r);
+ break;
+ default:
+ error = bus_generic_deactivate_resource(dev, child, r);
+ break;
+ }
+ } else
+#endif
+ error = bus_generic_deactivate_resource(dev, child, r);
if (error)
return (error);
/* Disable decoding for device ROMs. */
- if (device_get_parent(child) == dev) {
- dinfo = device_get_ivars(child);
- rid = rman_get_rid(r);
- type = rman_get_type(r);
- if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
- pci_write_bar(child, pci_find_bar(child, rid),
- rman_get_start(r));
- }
+ rid = rman_get_rid(r);
+ type = rman_get_type(r);
+ if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
+ pci_write_bar(child, pci_find_bar(child, rid),
+ rman_get_start(r));
return (0);
}
+int
+pci_adjust_resource(device_t dev, device_t child, struct resource *r,
+ rman_res_t start, rman_res_t end)
+{
+#ifdef PCI_IOV
+ struct pci_devinfo *dinfo;
+
+ if (device_get_parent(child) != dev)
+ return (bus_generic_adjust_resource(dev, child, r, start,
+ end));
+
+ dinfo = device_get_ivars(child);
+ if (dinfo->cfg.flags & PCICFG_VF) {
+ switch (rman_get_type(r)) {
+ /* VFs can't have I/O BARs. */
+ case SYS_RES_IOPORT:
+ return (EINVAL);
+ case SYS_RES_MEMORY:
+ return (pci_vf_adjust_mem_resource(dev, child, r,
+ start, end));
+ }
+
+ /* Fall through for other types of resource allocations. */
+ }
+#endif
+
+ return (bus_generic_adjust_resource(dev, child, r, start, end));
+}
+
+int
+pci_map_resource(device_t dev, device_t child, struct resource *r,
+ struct resource_map_request *argsp, struct resource_map *map)
+{
+#ifdef PCI_IOV
+ struct pci_devinfo *dinfo;
+
+ if (device_get_parent(child) != dev)
+ return (bus_generic_map_resource(dev, child, r, argsp,
+ map));
+
+ dinfo = device_get_ivars(child);
+ if (dinfo->cfg.flags & PCICFG_VF) {
+ switch (rman_get_type(r)) {
+ /* VFs can't have I/O BARs. */
+ case SYS_RES_IOPORT:
+ return (EINVAL);
+ case SYS_RES_MEMORY:
+ return (pci_vf_map_mem_resource(dev, child, r, argsp,
+ map));
+ }
+
+ /* Fall through for other types of resource allocations. */
+ }
+#endif
+
+ return (bus_generic_map_resource(dev, child, r, argsp, map));
+}
+
+int
+pci_unmap_resource(device_t dev, device_t child, struct resource *r,
+ struct resource_map *map)
+{
+#ifdef PCI_IOV
+ struct pci_devinfo *dinfo;
+
+ if (device_get_parent(child) != dev)
+ return (bus_generic_unmap_resource(dev, child, r, map));
+
+ dinfo = device_get_ivars(child);
+ if (dinfo->cfg.flags & PCICFG_VF) {
+ switch (rman_get_type(r)) {
+ /* VFs can't have I/O BARs. */
+ case SYS_RES_IOPORT:
+ return (EINVAL);
+ case SYS_RES_MEMORY:
+ return (pci_vf_unmap_mem_resource(dev, child, r, map));
+ }
+
+ /* Fall through for other types of resource allocations. */
+ }
+#endif
+
+ return (bus_generic_unmap_resource(dev, child, r, map));
+}
+
void
pci_child_deleted(device_t dev, device_t child)
{
@@ -6541,6 +6748,8 @@ pcie_flr(device_t dev, u_int max_delay, bool force)
if (!(pci_read_config(dev, cap + PCIER_DEVICE_CAP, 4) & PCIEM_CAP_FLR))
return (false);
+ if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_FLR))
+ return (false);
/*
* Disable busmastering to prevent generation of new
diff --git a/sys/dev/pci/pci_dw.c b/sys/dev/pci/pci_dw.c
index f03e19dfe041..dcc9c0c5e369 100644
--- a/sys/dev/pci/pci_dw.c
+++ b/sys/dev/pci/pci_dw.c
@@ -31,7 +31,6 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
-#include <sys/devmap.h>
#include <sys/proc.h>
#include <sys/kernel.h>
#include <sys/lock.h>
@@ -821,7 +820,7 @@ pci_dw_init(device_t dev)
if (rv != 0)
goto out;
- device_add_child(dev, "pci", -1);
+ device_add_child(dev, "pci", DEVICE_UNIT_ANY);
return (0);
out:
diff --git a/sys/dev/pci/pci_dw_mv.c b/sys/dev/pci/pci_dw_mv.c
index 97d957932d42..b67356fc0e1d 100644
--- a/sys/dev/pci/pci_dw_mv.c
+++ b/sys/dev/pci/pci_dw_mv.c
@@ -31,7 +31,6 @@
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
-#include <sys/devmap.h>
#include <sys/proc.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
@@ -306,7 +305,8 @@ pci_mv_attach(device_t dev)
goto out;
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
out:
/* XXX Cleanup */
return (rv);
diff --git a/sys/dev/pci/pci_host_generic.c b/sys/dev/pci/pci_host_generic.c
index 82ed51460621..49b131cd2299 100644
--- a/sys/dev/pci/pci_host_generic.c
+++ b/sys/dev/pci/pci_host_generic.c
@@ -59,6 +59,12 @@
#define PCI_RF_FLAGS 0
#endif
+/*
+ * We allocate "ranges" specified mappings higher up in the rid space to avoid
+ * conflicts with various definitions in the wild that may have other registers
+ * attributed to the controller besides just the config space.
+ */
+#define RANGE_RID(idx) ((idx) + 100)
/* Forward prototypes */
@@ -67,8 +73,6 @@ static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot,
static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot,
u_int func, u_int reg, uint32_t val, int bytes);
static int generic_pcie_maxslots(device_t dev);
-static int generic_pcie_read_ivar(device_t dev, device_t child, int index,
- uintptr_t *result);
static int generic_pcie_write_ivar(device_t dev, device_t child, int index,
uintptr_t value);
@@ -87,7 +91,7 @@ pci_host_generic_core_attach(device_t dev)
const char *range_descr;
char buf[64];
int domain, error;
- int flags, rid, tuple, type;
+ int flags, rid, tuple;
sc = device_get_softc(dev);
sc->dev = dev;
@@ -175,7 +179,7 @@ pci_host_generic_core_attach(device_t dev)
phys_base = sc->ranges[tuple].phys_base;
pci_base = sc->ranges[tuple].pci_base;
size = sc->ranges[tuple].size;
- rid = tuple + 1;
+ rid = RANGE_RID(tuple);
if (size == 0)
continue; /* empty range element */
switch (FLAG_TYPE(sc->ranges[tuple].flags)) {
@@ -183,19 +187,16 @@ pci_host_generic_core_attach(device_t dev)
sc->has_pmem = true;
range_descr = "prefetch";
flags = RF_PREFETCHABLE;
- type = SYS_RES_MEMORY;
rm = &sc->pmem_rman;
break;
case FLAG_TYPE_MEM:
range_descr = "memory";
flags = 0;
- type = SYS_RES_MEMORY;
rm = &sc->mem_rman;
break;
case FLAG_TYPE_IO:
range_descr = "I/O port";
flags = 0;
- type = SYS_RES_IOPORT;
rm = &sc->io_rman;
break;
default:
@@ -205,15 +206,17 @@ pci_host_generic_core_attach(device_t dev)
device_printf(dev,
"PCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Type: %s\n",
pci_base, phys_base, size, range_descr);
- error = bus_set_resource(dev, type, rid, phys_base, size);
+ error = bus_set_resource(dev, SYS_RES_MEMORY, rid, phys_base,
+ size);
if (error != 0) {
device_printf(dev,
"failed to set resource for range %d: %d\n", tuple,
error);
continue;
}
- sc->ranges[tuple].res = bus_alloc_resource_any(dev, type, &rid,
- RF_ACTIVE | RF_UNMAPPED | flags);
+ sc->ranges[tuple].rid = rid;
+ sc->ranges[tuple].res = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE | RF_UNMAPPED | flags);
if (sc->ranges[tuple].res == NULL) {
device_printf(dev,
"failed to allocate resource for range %d\n", tuple);
@@ -248,7 +251,7 @@ int
pci_host_generic_core_detach(device_t dev)
{
struct generic_pcie_core_softc *sc;
- int error, tuple, type;
+ int error, rid, tuple;
sc = device_get_softc(dev);
@@ -257,23 +260,25 @@ pci_host_generic_core_detach(device_t dev)
return (error);
for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) {
- if (sc->ranges[tuple].size == 0)
+ rid = sc->ranges[tuple].rid;
+ if (sc->ranges[tuple].size == 0) {
+ MPASS(sc->ranges[tuple].res == NULL);
continue; /* empty range element */
+ }
+
+ MPASS(rid != -1);
switch (FLAG_TYPE(sc->ranges[tuple].flags)) {
case FLAG_TYPE_PMEM:
case FLAG_TYPE_MEM:
- type = SYS_RES_MEMORY;
- break;
case FLAG_TYPE_IO:
- type = SYS_RES_IOPORT;
break;
default:
continue;
}
if (sc->ranges[tuple].res != NULL)
- bus_release_resource(dev, type, tuple + 1,
+ bus_release_resource(dev, SYS_RES_MEMORY, rid,
sc->ranges[tuple].res);
- bus_delete_resource(dev, type, tuple + 1);
+ bus_delete_resource(dev, SYS_RES_MEMORY, rid);
}
rman_fini(&sc->io_rman);
rman_fini(&sc->mem_rman);
@@ -362,20 +367,18 @@ generic_pcie_maxslots(device_t dev)
return (31); /* max slots per bus acc. to standard */
}
-static int
+int
generic_pcie_read_ivar(device_t dev, device_t child, int index,
uintptr_t *result)
{
struct generic_pcie_core_softc *sc;
sc = device_get_softc(dev);
-
- if (index == PCIB_IVAR_BUS) {
+ switch (index) {
+ case PCIB_IVAR_BUS:
*result = sc->bus_start;
return (0);
- }
-
- if (index == PCIB_IVAR_DOMAIN) {
+ case PCIB_IVAR_DOMAIN:
*result = sc->ecam;
return (0);
}
@@ -416,16 +419,12 @@ int
pci_host_generic_core_release_resource(device_t dev, device_t child,
struct resource *res)
{
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct generic_pcie_core_softc *sc;
sc = device_get_softc(dev);
-#endif
switch (rman_get_type(res)) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_domain_release_bus(sc->ecam, child, res));
-#endif
case SYS_RES_IOPORT:
case SYS_RES_MEMORY:
return (bus_generic_rman_release_resource(dev, child, res));
@@ -506,22 +505,16 @@ struct resource *
pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type,
int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
{
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct generic_pcie_core_softc *sc;
-#endif
struct resource *res;
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
sc = device_get_softc(dev);
-#endif
switch (type) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
res = pci_domain_alloc_bus(sc->ecam, child, rid, start, end,
count, flags);
break;
-#endif
case SYS_RES_IOPORT:
case SYS_RES_MEMORY:
res = bus_generic_rman_alloc_resource(dev, child, type, rid,
@@ -543,16 +536,12 @@ pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type,
static int
generic_pcie_activate_resource(device_t dev, device_t child, struct resource *r)
{
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct generic_pcie_core_softc *sc;
sc = device_get_softc(dev);
-#endif
switch (rman_get_type(r)) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_domain_activate_bus(sc->ecam, child, r));
-#endif
case SYS_RES_IOPORT:
case SYS_RES_MEMORY:
return (bus_generic_rman_activate_resource(dev, child, r));
@@ -565,16 +554,12 @@ static int
generic_pcie_deactivate_resource(device_t dev, device_t child,
struct resource *r)
{
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct generic_pcie_core_softc *sc;
sc = device_get_softc(dev);
-#endif
switch (rman_get_type(r)) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_domain_deactivate_bus(sc->ecam, child, r));
-#endif
case SYS_RES_IOPORT:
case SYS_RES_MEMORY:
return (bus_generic_rman_deactivate_resource(dev, child, r));
@@ -587,17 +572,13 @@ static int
generic_pcie_adjust_resource(device_t dev, device_t child,
struct resource *res, rman_res_t start, rman_res_t end)
{
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct generic_pcie_core_softc *sc;
sc = device_get_softc(dev);
-#endif
switch (rman_get_type(res)) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (pci_domain_adjust_bus(sc->ecam, child, res, start,
end));
-#endif
case SYS_RES_IOPORT:
case SYS_RES_MEMORY:
return (bus_generic_rman_adjust_resource(dev, child, res,
@@ -619,10 +600,8 @@ generic_pcie_map_resource(device_t dev, device_t child, struct resource *r,
type = rman_get_type(r);
switch (type) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (EINVAL);
-#endif
case SYS_RES_IOPORT:
case SYS_RES_MEMORY:
break;
@@ -646,7 +625,7 @@ generic_pcie_map_resource(device_t dev, device_t child, struct resource *r,
args.offset = start - range->pci_base;
args.length = length;
- return (bus_generic_map_resource(dev, child, range->res, &args, map));
+ return (bus_map_resource(dev, range->res, &args, map));
}
static int
@@ -658,22 +637,20 @@ generic_pcie_unmap_resource(device_t dev, device_t child, struct resource *r,
type = rman_get_type(r);
switch (type) {
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
case PCI_RES_BUS:
return (EINVAL);
-#endif
case SYS_RES_IOPORT:
case SYS_RES_MEMORY:
- range = generic_pcie_containing_range(dev, type,
- rman_get_start(r), rman_get_end(r));
- if (range == NULL || range->res == NULL)
- return (ENOENT);
- r = range->res;
break;
default:
- break;
+ return (bus_generic_unmap_resource(dev, child, r, map));
}
- return (bus_generic_unmap_resource(dev, child, r, map));
+
+ range = generic_pcie_containing_range(dev, type, rman_get_start(r),
+ rman_get_end(r));
+ if (range == NULL || range->res == NULL)
+ return (ENOENT);
+ return (bus_unmap_resource(dev, range->res, map));
}
static bus_dma_tag_t
diff --git a/sys/dev/pci/pci_host_generic.h b/sys/dev/pci/pci_host_generic.h
index 688345e92db5..6579cd0918c4 100644
--- a/sys/dev/pci/pci_host_generic.h
+++ b/sys/dev/pci/pci_host_generic.h
@@ -64,6 +64,7 @@ struct pcie_range {
#define FLAG_TYPE_MEM 0x2
#define FLAG_TYPE_PMEM 0x3
struct resource *res;
+ int rid;
};
struct generic_pcie_core_softc {
@@ -97,5 +98,6 @@ struct resource *pci_host_generic_core_alloc_resource(device_t, device_t, int,
int *, rman_res_t, rman_res_t, rman_res_t, u_int);
int pci_host_generic_core_release_resource(device_t, device_t,
struct resource *);
+int generic_pcie_read_ivar(device_t, device_t, int, uintptr_t *);
#endif /* __PCI_HOST_GENERIC_H_ */
diff --git a/sys/dev/pci/pci_host_generic_acpi.c b/sys/dev/pci/pci_host_generic_acpi.c
index 2191ec4d655a..324ead5e8b87 100644
--- a/sys/dev/pci/pci_host_generic_acpi.c
+++ b/sys/dev/pci/pci_host_generic_acpi.c
@@ -99,7 +99,6 @@ static struct {
{ "MVEBU ", "CN9130 ", PCIE_ECAM_DESIGNWARE_QUIRK },
{ "MVEBU ", "CN9131 ", PCIE_ECAM_DESIGNWARE_QUIRK },
{ "MVEBU ", "CN9132 ", PCIE_ECAM_DESIGNWARE_QUIRK },
- { 0 },
};
/* Forward prototypes */
@@ -183,6 +182,7 @@ pci_host_generic_acpi_parse_resource(ACPI_RESOURCE *res, void *arg)
/* Save detected ranges */
if (res->Data.Address.ResourceType == ACPI_MEMORY_RANGE ||
res->Data.Address.ResourceType == ACPI_IO_RANGE) {
+ sc->base.ranges[r].rid = -1;
sc->base.ranges[r].pci_base = min;
sc->base.ranges[r].phys_base = min + off;
sc->base.ranges[r].size = max - min + 1;
@@ -202,9 +202,9 @@ static void
pci_host_acpi_get_oem_quirks(struct generic_pcie_acpi_softc *sc,
ACPI_TABLE_HEADER *hdr)
{
- int i;
+ size_t i;
- for (i = 0; pci_acpi_quirks[i].quirks; i++) {
+ for (i = 0; i < nitems(pci_acpi_quirks); i++) {
if (memcmp(hdr->OemId, pci_acpi_quirks[i].oem_id,
ACPI_OEM_ID_SIZE) != 0)
continue;
@@ -288,6 +288,8 @@ pci_host_generic_acpi_init(device_t dev)
sc = device_get_softc(dev);
handle = acpi_get_handle(dev);
+ acpi_pcib_osc(dev, &sc->osc_ctl, 0);
+
/* Get Start bus number for the PCI host bus is from _BBN method */
status = acpi_GetInteger(handle, "_BBN", &sc->base.bus_start);
if (ACPI_FAILURE(status)) {
@@ -336,31 +338,25 @@ pci_host_generic_acpi_attach(device_t dev)
if (error != 0)
return (error);
- device_add_child(dev, "pci", -1);
- return (bus_generic_attach(dev));
+ device_add_child(dev, "pci", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
+ return (0);
}
static int
generic_pcie_acpi_read_ivar(device_t dev, device_t child, int index,
uintptr_t *result)
{
- struct generic_pcie_acpi_softc *sc;
-
- sc = device_get_softc(dev);
-
- if (index == PCIB_IVAR_BUS) {
- *result = sc->base.bus_start;
- return (0);
- }
+ ACPI_HANDLE handle;
- if (index == PCIB_IVAR_DOMAIN) {
- *result = sc->base.ecam;
+ switch (index) {
+ case ACPI_IVAR_HANDLE:
+ handle = acpi_get_handle(dev);
+ *result = (uintptr_t)handle;
return (0);
}
- if (bootverbose)
- device_printf(dev, "ERROR: Unknown index %d.\n", index);
- return (ENOENT);
+ return (generic_pcie_read_ivar(dev, child, index, result));
}
static int
@@ -415,7 +411,8 @@ generic_pcie_get_iommu(device_t pci, device_t child, uintptr_t *id)
{
struct generic_pcie_acpi_softc *sc;
struct pci_id_ofw_iommu *iommu;
- u_int iommu_sid, iommu_xref;
+ uint64_t iommu_xref;
+ u_int iommu_sid;
uintptr_t rid;
int err;
@@ -511,6 +508,30 @@ generic_pcie_acpi_get_id(device_t pci, device_t child, enum pci_id_type type,
return (pcib_get_id(pci, child, type, id));
}
+static int
+generic_pcie_acpi_request_feature(device_t pcib, device_t dev,
+ enum pci_feature feature)
+{
+ struct generic_pcie_acpi_softc *sc;
+ uint32_t osc_ctl;
+
+ sc = device_get_softc(pcib);
+
+ switch (feature) {
+ case PCI_FEATURE_HP:
+ osc_ctl = PCIM_OSC_CTL_PCIE_HP;
+ break;
+ case PCI_FEATURE_AER:
+ osc_ctl = PCIM_OSC_CTL_PCIE_AER;
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ return (acpi_pcib_osc(pcib, &sc->osc_ctl, osc_ctl));
+}
+
+
static device_method_t generic_pcie_acpi_methods[] = {
DEVMETHOD(device_probe, generic_pcie_acpi_probe),
DEVMETHOD(device_attach, pci_host_generic_acpi_attach),
@@ -524,6 +545,7 @@ static device_method_t generic_pcie_acpi_methods[] = {
DEVMETHOD(pcib_release_msix, generic_pcie_acpi_release_msix),
DEVMETHOD(pcib_map_msi, generic_pcie_acpi_map_msi),
DEVMETHOD(pcib_get_id, generic_pcie_acpi_get_id),
+ DEVMETHOD(pcib_request_feature, generic_pcie_acpi_request_feature),
DEVMETHOD_END
};
diff --git a/sys/dev/pci/pci_host_generic_acpi.h b/sys/dev/pci/pci_host_generic_acpi.h
index 802099abb9d7..5617da971306 100644
--- a/sys/dev/pci/pci_host_generic_acpi.h
+++ b/sys/dev/pci/pci_host_generic_acpi.h
@@ -35,6 +35,7 @@
struct generic_pcie_acpi_softc {
struct generic_pcie_core_softc base;
int segment;
+ uint32_t osc_ctl;
ACPI_BUFFER ap_prt; /* interrupt routing table */
};
diff --git a/sys/dev/pci/pci_host_generic_den0115.c b/sys/dev/pci/pci_host_generic_den0115.c
index cfef34824965..d8e3f9feaf18 100644
--- a/sys/dev/pci/pci_host_generic_den0115.c
+++ b/sys/dev/pci/pci_host_generic_den0115.c
@@ -124,8 +124,7 @@ pci_host_acpi_smccc_has_feature(uint32_t pci_func_id)
{
struct arm_smccc_res result;
- if (psci_callfn(SMCCC_PCI_FEATURES, pci_func_id, 0, 0, 0, 0, 0, 0,
- &result) < 0) {
+ if (arm_smccc_invoke(SMCCC_PCI_FEATURES, pci_func_id, &result) < 0) {
return (false);
}
@@ -137,7 +136,7 @@ pci_host_acpi_smccc_pci_version(uint32_t *versionp)
{
struct arm_smccc_res result;
- if (psci_callfn(SMCCC_PCI_VERSION, 0, 0, 0, 0, 0, 0, 0, &result) < 0) {
+ if (arm_smccc_invoke(SMCCC_PCI_VERSION, &result) < 0) {
return (false);
}
@@ -185,8 +184,8 @@ pci_host_acpi_smccc_attach(device_t dev)
return (error);
if (pci_host_acpi_smccc_has_feature(SMCCC_PCI_GET_SEG_INFO) &&
- psci_callfn(SMCCC_PCI_GET_SEG_INFO, sc->base.ecam, 0, 0, 0, 0, 0,
- 0, &result) == SMCCC_RET_SUCCESS) {
+ arm_smccc_invoke(SMCCC_PCI_GET_SEG_INFO, sc->base.ecam,
+ &result) == SMCCC_RET_SUCCESS) {
start = SMCCC_PCI_SEG_START(result.a1);
end = SMCCC_PCI_SEG_END(result.a1);
@@ -194,8 +193,9 @@ pci_host_acpi_smccc_attach(device_t dev)
sc->base.bus_end = MIN(sc->base.bus_end, end);
}
- device_add_child(dev, "pci", -1);
- return (bus_generic_attach(dev));
+ device_add_child(dev, "pci", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
+ return (0);
}
static uint32_t
@@ -215,8 +215,7 @@ pci_host_acpi_smccc_read_config(device_t dev, u_int bus, u_int slot,
return (~0U);
addr = (sc->base.ecam << 16) | (bus << 8) | (slot << 3) | (func << 0);
- if (psci_callfn(SMCCC_PCI_READ, addr, reg, bytes, 0, 0, 0, 0,
- &result) < 0) {
+ if (arm_smccc_invoke(SMCCC_PCI_READ, addr, reg, bytes, &result) < 0) {
return (~0U);
}
@@ -240,7 +239,7 @@ pci_host_acpi_smccc_write_config(device_t dev, u_int bus, u_int slot,
return;
addr = (sc->base.ecam << 16) | (bus << 8) | (slot << 3) | (func << 0);
- psci_callfn(SMCCC_PCI_WRITE, addr, reg, bytes, val, 0, 0, 0, &result);
+ arm_smccc_invoke(SMCCC_PCI_WRITE, addr, reg, bytes, val, &result);
}
static device_method_t generic_pcie_acpi_smccc_methods[] = {
diff --git a/sys/dev/pci/pci_host_generic_fdt.c b/sys/dev/pci/pci_host_generic_fdt.c
index 854ec0be8dfa..ffe63b82a234 100644
--- a/sys/dev/pci/pci_host_generic_fdt.c
+++ b/sys/dev/pci/pci_host_generic_fdt.c
@@ -170,8 +170,9 @@ pci_host_generic_fdt_attach(device_t dev)
if (error != 0)
return (error);
- device_add_child(dev, "pci", -1);
- return (bus_generic_attach(dev));
+ device_add_child(dev, "pci", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
+ return (0);
}
static int
@@ -214,6 +215,7 @@ parse_pci_mem_ranges(device_t dev, struct generic_pcie_core_softc *sc)
sc->ranges[i].flags |= FLAG_TYPE_MEM;
}
+ sc->ranges[i].rid = -1;
sc->ranges[i].pci_base = 0;
for (k = 0; k < (pci_addr_cells - 1); k++) {
sc->ranges[i].pci_base <<= 32;
@@ -365,7 +367,7 @@ generic_pcie_get_iommu(device_t pci, device_t child, uintptr_t *id)
{
struct pci_id_ofw_iommu *iommu;
uint32_t iommu_rid;
- uint32_t iommu_xref;
+ phandle_t iommu_xref;
uint16_t pci_rid;
phandle_t node;
int err;
diff --git a/sys/dev/pci/pci_iov.c b/sys/dev/pci/pci_iov.c
index c8e139f043c9..0efcfeac9eff 100644
--- a/sys/dev/pci/pci_iov.c
+++ b/sys/dev/pci/pci_iov.c
@@ -43,10 +43,10 @@
#include <sys/pciio.h>
#include <sys/queue.h>
#include <sys/rman.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
-#include <machine/stdarg.h>
#include <sys/nv.h>
#include <sys/iov_schema.h>
@@ -670,7 +670,7 @@ pci_iov_enumerate_vfs(struct pci_devinfo *dinfo, const nvlist_t *config,
}
}
- bus_generic_attach(bus);
+ bus_attach_children(bus);
}
static int
@@ -734,10 +734,18 @@ pci_iov_config(struct cdev *cdev, struct pci_iov_arg *arg)
first_rid = pci_get_rid(dev) + rid_off;
last_rid = first_rid + (num_vfs - 1) * rid_stride;
- /* We don't yet support allocating extra bus numbers for VFs. */
if (pci_get_bus(dev) != PCI_RID2BUS(last_rid)) {
- error = ENOSPC;
- goto out;
+ int rid = 0;
+ uint16_t last_rid_bus = PCI_RID2BUS(last_rid);
+
+ iov->iov_bus_res = bus_alloc_resource(bus, PCI_RES_BUS, &rid,
+ last_rid_bus, last_rid_bus, 1, RF_ACTIVE);
+ if (iov->iov_bus_res == NULL) {
+ device_printf(dev,
+ "failed to allocate PCIe bus number for VFs\n");
+ error = ENOSPC;
+ goto out;
+ }
}
if (!ari_enabled && PCI_RID2SLOT(last_rid) != 0) {
@@ -785,6 +793,11 @@ out:
}
}
+ if (iov->iov_bus_res != NULL) {
+ bus_release_resource(bus, iov->iov_bus_res);
+ iov->iov_bus_res = NULL;
+ }
+
if (iov->iov_flags & IOV_RMAN_INITED) {
rman_fini(&iov->rman);
iov->iov_flags &= ~IOV_RMAN_INITED;
@@ -895,6 +908,11 @@ pci_iov_delete_iov_children(struct pci_devinfo *dinfo)
}
}
+ if (iov->iov_bus_res != NULL) {
+ bus_release_resource(bus, iov->iov_bus_res);
+ iov->iov_bus_res = NULL;
+ }
+
if (iov->iov_flags & IOV_RMAN_INITED) {
rman_fini(&iov->rman);
iov->iov_flags &= ~IOV_RMAN_INITED;
@@ -1070,6 +1088,12 @@ pci_vf_release_mem_resource(device_t dev, device_t child, struct resource *r)
dinfo = device_get_ivars(child);
+ KASSERT(rman_get_type(r) == SYS_RES_MEMORY,
+ ("%s: invalid resource %p", __func__, r));
+ KASSERT(rman_is_region_manager(r, &dinfo->cfg.iov->rman),
+ ("%s: rman %p doesn't match for resource %p", __func__,
+ &dinfo->cfg.iov->rman, r));
+
if (rman_get_flags(r) & RF_ACTIVE) {
error = bus_deactivate_resource(child, r);
if (error != 0)
@@ -1086,3 +1110,148 @@ pci_vf_release_mem_resource(device_t dev, device_t child, struct resource *r)
return (rman_release_resource(r));
}
+
+int
+pci_vf_activate_mem_resource(device_t dev, device_t child, struct resource *r)
+{
+#ifdef INVARIANTS
+ struct pci_devinfo *dinfo = device_get_ivars(child);
+#endif
+ struct resource_map map;
+ int error;
+
+ KASSERT(rman_get_type(r) == SYS_RES_MEMORY,
+ ("%s: invalid resource %p", __func__, r));
+ KASSERT(rman_is_region_manager(r, &dinfo->cfg.iov->rman),
+ ("%s: rman %p doesn't match for resource %p", __func__,
+ &dinfo->cfg.iov->rman, r));
+
+ error = rman_activate_resource(r);
+ if (error != 0)
+ return (error);
+
+ if ((rman_get_flags(r) & RF_UNMAPPED) == 0) {
+ error = BUS_MAP_RESOURCE(dev, child, r, NULL, &map);
+ if (error != 0) {
+ rman_deactivate_resource(r);
+ return (error);
+ }
+
+ rman_set_mapping(r, &map);
+ }
+ return (0);
+}
+
+int
+pci_vf_deactivate_mem_resource(device_t dev, device_t child, struct resource *r)
+{
+#ifdef INVARIANTS
+ struct pci_devinfo *dinfo = device_get_ivars(child);
+#endif
+ struct resource_map map;
+ int error;
+
+ KASSERT(rman_get_type(r) == SYS_RES_MEMORY,
+ ("%s: invalid resource %p", __func__, r));
+ KASSERT(rman_is_region_manager(r, &dinfo->cfg.iov->rman),
+ ("%s: rman %p doesn't match for resource %p", __func__,
+ &dinfo->cfg.iov->rman, r));
+
+ error = rman_deactivate_resource(r);
+ if (error != 0)
+ return (error);
+
+ if ((rman_get_flags(r) & RF_UNMAPPED) == 0) {
+ rman_get_mapping(r, &map);
+ BUS_UNMAP_RESOURCE(dev, child, r, &map);
+ }
+ return (0);
+}
+
+int
+pci_vf_adjust_mem_resource(device_t dev, device_t child, struct resource *r,
+ rman_res_t start, rman_res_t end)
+{
+#ifdef INVARIANTS
+ struct pci_devinfo *dinfo = device_get_ivars(child);
+#endif
+
+ KASSERT(rman_get_type(r) == SYS_RES_MEMORY,
+ ("%s: invalid resource %p", __func__, r));
+ KASSERT(rman_is_region_manager(r, &dinfo->cfg.iov->rman),
+ ("%s: rman %p doesn't match for resource %p", __func__,
+ &dinfo->cfg.iov->rman, r));
+
+ return (rman_adjust_resource(r, start, end));
+}
+
+static struct resource *
+pci_vf_find_parent_resource(struct pcicfg_iov *iov, struct resource *r)
+{
+ struct resource *pres;
+
+ for (u_int i = 0; i <= PCIR_MAX_BAR_0; i++) {
+ pres = iov->iov_bar[i].res;
+ if (pres != NULL) {
+ if (rman_get_start(pres) <= rman_get_start(r) &&
+ rman_get_end(pres) >= rman_get_end(r))
+ return (pres);
+ }
+ }
+ return (NULL);
+}
+
+int
+pci_vf_map_mem_resource(device_t dev, device_t child, struct resource *r,
+ struct resource_map_request *argsp, struct resource_map *map)
+{
+ struct pci_devinfo *dinfo = device_get_ivars(child);
+ struct pcicfg_iov *iov = dinfo->cfg.iov;
+ struct resource_map_request args;
+ struct resource *pres;
+ rman_res_t length, start;
+ int error;
+
+ KASSERT(rman_get_type(r) == SYS_RES_MEMORY,
+ ("%s: invalid resource %p", __func__, r));
+ KASSERT(rman_is_region_manager(r, &iov->rman),
+ ("%s: rman %p doesn't match for resource %p", __func__,
+ &dinfo->cfg.iov->rman, r));
+
+ /* Resources must be active to be mapped. */
+ if (!(rman_get_flags(r) & RF_ACTIVE))
+ return (ENXIO);
+
+ resource_init_map_request(&args);
+ error = resource_validate_map_request(r, argsp, &args, &start, &length);
+ if (error)
+ return (error);
+
+ pres = pci_vf_find_parent_resource(dinfo->cfg.iov, r);
+ if (pres == NULL)
+ return (ENOENT);
+
+ args.offset = start - rman_get_start(pres);
+ args.length = length;
+ return (bus_map_resource(iov->iov_pf, pres, &args, map));
+}
+
+int
+pci_vf_unmap_mem_resource(device_t dev, device_t child, struct resource *r,
+ struct resource_map *map)
+{
+ struct pci_devinfo *dinfo = device_get_ivars(child);
+ struct pcicfg_iov *iov = dinfo->cfg.iov;
+ struct resource *pres;
+
+ KASSERT(rman_get_type(r) == SYS_RES_MEMORY,
+ ("%s: invalid resource %p", __func__, r));
+ KASSERT(rman_is_region_manager(r, &iov->rman),
+ ("%s: rman %p doesn't match for resource %p", __func__,
+ &dinfo->cfg.iov->rman, r));
+
+ pres = pci_vf_find_parent_resource(iov, r);
+ if (pres == NULL)
+ return (ENOENT);
+ return (bus_unmap_resource(iov->iov_pf, pres, map));
+}
diff --git a/sys/dev/pci/pci_iov_private.h b/sys/dev/pci/pci_iov_private.h
index 7ae2219b936d..ecf0a9b21be5 100644
--- a/sys/dev/pci/pci_iov_private.h
+++ b/sys/dev/pci/pci_iov_private.h
@@ -39,6 +39,8 @@ struct pcicfg_iov {
struct cdev *iov_cdev;
nvlist_t *iov_schema;
+ struct resource *iov_bus_res;
+
struct pci_iov_bar iov_bar[PCIR_MAX_BAR_0 + 1];
struct rman rman;
char rman_name[64];
diff --git a/sys/dev/pci/pci_iov_schema.c b/sys/dev/pci/pci_iov_schema.c
index 3cec329d77a1..f60c687f9b87 100644
--- a/sys/dev/pci/pci_iov_schema.c
+++ b/sys/dev/pci/pci_iov_schema.c
@@ -33,8 +33,7 @@
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/queue.h>
-
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include <sys/dnv.h>
#include <sys/nv.h>
@@ -54,11 +53,13 @@ static validate_func pci_iov_schema_validate_bool;
static validate_func pci_iov_schema_validate_string;
static validate_func pci_iov_schema_validate_uint;
static validate_func pci_iov_schema_validate_unicast_mac;
+static validate_func pci_iov_schema_validate_vlan;
static default_validate_t pci_iov_validate_bool_default;
static default_validate_t pci_iov_validate_string_default;
static default_validate_t pci_iov_validate_uint_default;
static default_validate_t pci_iov_validate_unicast_mac_default;
+static default_validate_t pci_iov_validate_vlan_default;
struct config_type_validator {
const char *type_name;
@@ -107,6 +108,11 @@ static struct config_type_validator pci_iov_schema_validators[] = {
.validate = pci_iov_schema_validate_unicast_mac,
.default_validate = pci_iov_validate_unicast_mac_default,
},
+ {
+ .type_name = "vlan",
+ .validate = pci_iov_schema_validate_vlan,
+ .default_validate = pci_iov_validate_vlan_default,
+ },
};
static const struct config_type_validator *
@@ -261,6 +267,26 @@ pci_iov_schema_add_unicast_mac(nvlist_t *schema, const char *name,
nvlist_move_nvlist(schema, name, entry);
}
+void
+pci_iov_schema_add_vlan(nvlist_t *schema, const char *name,
+ uint32_t flags, const uint16_t defaultVal)
+{
+ nvlist_t *entry;
+
+ entry = nvlist_create(NV_FLAG_IGNORE_CASE);
+ if (entry == NULL) {
+ nvlist_set_error(schema, ENOMEM);
+ return;
+ }
+
+ pci_iov_schema_add_type(entry, "vlan");
+ if (flags & IOV_SCHEMA_HASDEFAULT)
+ nvlist_add_number(entry, "default", defaultVal);
+ pci_iov_schema_add_required(entry, flags);
+
+ nvlist_move_nvlist(schema, name, entry);
+}
+
static int
pci_iov_schema_validate_bool(const struct config_type_validator * validator,
const nvlist_t *config, const char *name)
@@ -320,6 +346,24 @@ pci_iov_schema_validate_unicast_mac(
return (0);
}
+static int
+pci_iov_schema_validate_vlan(
+ const struct config_type_validator * validator,
+ const nvlist_t *config, const char *name)
+{
+ uint16_t vlan;
+
+ if (!nvlist_exists_number(config, name))
+ return (EINVAL);
+
+ vlan = nvlist_get_number(config, name);
+
+ if (vlan > 4095 && vlan != VF_VLAN_TRUNK)
+ return (EINVAL);
+
+ return (0);
+}
+
static void
pci_iov_config_add_default(const nvlist_t *param_schema, const char *name,
nvlist_t *config)
@@ -401,6 +445,22 @@ pci_iov_validate_unicast_mac_default(
}
static int
+pci_iov_validate_vlan_default(
+ const struct config_type_validator * validator, const nvlist_t *param)
+{
+ uint16_t vlan;
+
+ if (! nvlist_exists_number(param, DEFAULT_SCHEMA_NAME))
+ return (EINVAL);
+
+ vlan = nvlist_get_number(param, DEFAULT_SCHEMA_NAME);
+ if (vlan > 4095 && vlan != VF_VLAN_TRUNK)
+ return (EINVAL);
+
+ return (0);
+}
+
+static int
pci_iov_validate_param_schema(const nvlist_t *schema)
{
const struct config_type_validator *validator;
diff --git a/sys/dev/pci/pci_pci.c b/sys/dev/pci/pci_pci.c
index 35062d67050e..40ed5db4480e 100644
--- a/sys/dev/pci/pci_pci.c
+++ b/sys/dev/pci/pci_pci.c
@@ -58,19 +58,16 @@
#include "pcib_if.h"
static int pcib_probe(device_t dev);
-static int pcib_suspend(device_t dev);
static int pcib_resume(device_t dev);
static bus_child_present_t pcib_child_present;
static bus_alloc_resource_t pcib_alloc_resource;
-#ifdef NEW_PCIB
static bus_adjust_resource_t pcib_adjust_resource;
static bus_release_resource_t pcib_release_resource;
static bus_activate_resource_t pcib_activate_resource;
static bus_deactivate_resource_t pcib_deactivate_resource;
static bus_map_resource_t pcib_map_resource;
static bus_unmap_resource_t pcib_unmap_resource;
-#endif
static int pcib_reset_child(device_t dev, device_t child, int flags);
static int pcib_power_for_sleep(device_t pcib, device_t dev,
@@ -101,7 +98,7 @@ static device_method_t pcib_methods[] = {
DEVMETHOD(device_attach, pcib_attach),
DEVMETHOD(device_detach, pcib_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
- DEVMETHOD(device_suspend, pcib_suspend),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
DEVMETHOD(device_resume, pcib_resume),
/* Bus interface */
@@ -109,19 +106,12 @@ static device_method_t pcib_methods[] = {
DEVMETHOD(bus_read_ivar, pcib_read_ivar),
DEVMETHOD(bus_write_ivar, pcib_write_ivar),
DEVMETHOD(bus_alloc_resource, pcib_alloc_resource),
-#ifdef NEW_PCIB
DEVMETHOD(bus_adjust_resource, pcib_adjust_resource),
DEVMETHOD(bus_release_resource, pcib_release_resource),
DEVMETHOD(bus_activate_resource, pcib_activate_resource),
DEVMETHOD(bus_deactivate_resource, pcib_deactivate_resource),
DEVMETHOD(bus_map_resource, pcib_map_resource),
DEVMETHOD(bus_unmap_resource, pcib_unmap_resource),
-#else
- DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
- DEVMETHOD(bus_release_resource, bus_generic_release_resource),
- DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
- DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
-#endif
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
DEVMETHOD(bus_reset_child, pcib_reset_child),
@@ -150,11 +140,8 @@ static device_method_t pcib_methods[] = {
DEFINE_CLASS_0(pcib, pcib_driver, pcib_methods, sizeof(struct pcib_softc));
EARLY_DRIVER_MODULE(pcib, pci, pcib_driver, NULL, NULL, BUS_PASS_BUS);
-#if defined(NEW_PCIB) || defined(PCI_HP)
SYSCTL_DECL(_hw_pci);
-#endif
-#ifdef NEW_PCIB
static int pci_clear_pcib;
SYSCTL_INT(_hw_pci, OID_AUTO, clear_pcib, CTLFLAG_RDTUN, &pci_clear_pcib, 0,
"Clear firmware-assigned resources for PCI-PCI bridge I/O windows.");
@@ -191,10 +178,8 @@ static int
pcib_is_resource_managed(struct pcib_softc *sc, struct resource *r)
{
-#ifdef PCI_RES_BUS
if (rman_get_type(r) == PCI_RES_BUS)
return (rman_is_region_manager(r, &sc->bus.rman));
-#endif
return (pcib_get_resource_window(sc, r) != NULL);
}
@@ -618,7 +603,6 @@ pcib_free_windows(struct pcib_softc *sc)
pcib_release_window(sc, &sc->io, SYS_RES_IOPORT);
}
-#ifdef PCI_RES_BUS
/*
* Allocate a suitable secondary bus for this bridge if needed and
* initialize the resource manager for the secondary bus range. Note
@@ -806,141 +790,6 @@ pcib_alloc_subbus(struct pcib_secbus *bus, device_t child, int *rid,
flags));
return (NULL);
}
-#endif
-
-#else
-
-/*
- * Is the prefetch window open (eg, can we allocate memory in it?)
- */
-static int
-pcib_is_prefetch_open(struct pcib_softc *sc)
-{
- return (sc->pmembase > 0 && sc->pmembase < sc->pmemlimit);
-}
-
-/*
- * Is the nonprefetch window open (eg, can we allocate memory in it?)
- */
-static int
-pcib_is_nonprefetch_open(struct pcib_softc *sc)
-{
- return (sc->membase > 0 && sc->membase < sc->memlimit);
-}
-
-/*
- * Is the io window open (eg, can we allocate ports in it?)
- */
-static int
-pcib_is_io_open(struct pcib_softc *sc)
-{
- return (sc->iobase > 0 && sc->iobase < sc->iolimit);
-}
-
-/*
- * Get current I/O decode.
- */
-static void
-pcib_get_io_decode(struct pcib_softc *sc)
-{
- device_t dev;
- uint32_t iolow;
-
- dev = sc->dev;
-
- iolow = pci_read_config(dev, PCIR_IOBASEL_1, 1);
- if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32)
- sc->iobase = PCI_PPBIOBASE(
- pci_read_config(dev, PCIR_IOBASEH_1, 2), iolow);
- else
- sc->iobase = PCI_PPBIOBASE(0, iolow);
-
- iolow = pci_read_config(dev, PCIR_IOLIMITL_1, 1);
- if ((iolow & PCIM_BRIO_MASK) == PCIM_BRIO_32)
- sc->iolimit = PCI_PPBIOLIMIT(
- pci_read_config(dev, PCIR_IOLIMITH_1, 2), iolow);
- else
- sc->iolimit = PCI_PPBIOLIMIT(0, iolow);
-}
-
-/*
- * Get current memory decode.
- */
-static void
-pcib_get_mem_decode(struct pcib_softc *sc)
-{
- device_t dev;
- pci_addr_t pmemlow;
-
- dev = sc->dev;
-
- sc->membase = PCI_PPBMEMBASE(0,
- pci_read_config(dev, PCIR_MEMBASE_1, 2));
- sc->memlimit = PCI_PPBMEMLIMIT(0,
- pci_read_config(dev, PCIR_MEMLIMIT_1, 2));
-
- pmemlow = pci_read_config(dev, PCIR_PMBASEL_1, 2);
- if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64)
- sc->pmembase = PCI_PPBMEMBASE(
- pci_read_config(dev, PCIR_PMBASEH_1, 4), pmemlow);
- else
- sc->pmembase = PCI_PPBMEMBASE(0, pmemlow);
-
- pmemlow = pci_read_config(dev, PCIR_PMLIMITL_1, 2);
- if ((pmemlow & PCIM_BRPM_MASK) == PCIM_BRPM_64)
- sc->pmemlimit = PCI_PPBMEMLIMIT(
- pci_read_config(dev, PCIR_PMLIMITH_1, 4), pmemlow);
- else
- sc->pmemlimit = PCI_PPBMEMLIMIT(0, pmemlow);
-}
-
-/*
- * Restore previous I/O decode.
- */
-static void
-pcib_set_io_decode(struct pcib_softc *sc)
-{
- device_t dev;
- uint32_t iohi;
-
- dev = sc->dev;
-
- iohi = sc->iobase >> 16;
- if (iohi > 0)
- pci_write_config(dev, PCIR_IOBASEH_1, iohi, 2);
- pci_write_config(dev, PCIR_IOBASEL_1, sc->iobase >> 8, 1);
-
- iohi = sc->iolimit >> 16;
- if (iohi > 0)
- pci_write_config(dev, PCIR_IOLIMITH_1, iohi, 2);
- pci_write_config(dev, PCIR_IOLIMITL_1, sc->iolimit >> 8, 1);
-}
-
-/*
- * Restore previous memory decode.
- */
-static void
-pcib_set_mem_decode(struct pcib_softc *sc)
-{
- device_t dev;
- pci_addr_t pmemhi;
-
- dev = sc->dev;
-
- pci_write_config(dev, PCIR_MEMBASE_1, sc->membase >> 16, 2);
- pci_write_config(dev, PCIR_MEMLIMIT_1, sc->memlimit >> 16, 2);
-
- pmemhi = sc->pmembase >> 32;
- if (pmemhi > 0)
- pci_write_config(dev, PCIR_PMBASEH_1, pmemhi, 4);
- pci_write_config(dev, PCIR_PMBASEL_1, sc->pmembase >> 16, 2);
-
- pmemhi = sc->pmemlimit >> 32;
- if (pmemhi > 0)
- pci_write_config(dev, PCIR_PMLIMITH_1, pmemhi, 4);
- pci_write_config(dev, PCIR_PMLIMITL_1, sc->pmemlimit >> 16, 2);
-}
-#endif
#ifdef PCI_HP
/*
@@ -951,7 +800,10 @@ SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_hp, CTLFLAG_RDTUN,
&pci_enable_pcie_hp, 0,
"Enable support for native PCI-express HotPlug.");
-TASKQUEUE_DEFINE_THREAD(pci_hp);
+static sbintime_t pcie_hp_detach_timeout = 5 * SBT_1S;
+SYSCTL_SBINTIME_MSEC(_hw_pci, OID_AUTO, pcie_hp_detach_timeout, CTLFLAG_RWTUN,
+ &pcie_hp_detach_timeout,
+ "Attention Button delay for PCI-express Eject.");
static void
pcib_probe_hotplug(struct pcib_softc *sc)
@@ -1039,7 +891,7 @@ pcib_pcie_hotplug_command(struct pcib_softc *sc, uint16_t val, uint16_t mask)
(ctl & new) & PCIEM_SLOT_CTL_CCIE) {
sc->flags |= PCIB_HOTPLUG_CMD_PENDING;
if (!cold)
- taskqueue_enqueue_timeout(taskqueue_pci_hp,
+ taskqueue_enqueue_timeout(taskqueue_bus,
&sc->pcie_cc_task, hz);
}
}
@@ -1055,7 +907,7 @@ pcib_pcie_hotplug_command_completed(struct pcib_softc *sc)
device_printf(dev, "Command Completed\n");
if (!(sc->flags & PCIB_HOTPLUG_CMD_PENDING))
return;
- taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_cc_task, NULL);
+ taskqueue_cancel_timeout(taskqueue_bus, &sc->pcie_cc_task, NULL);
sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING;
wakeup(sc);
}
@@ -1078,7 +930,8 @@ pcib_hotplug_inserted(struct pcib_softc *sc)
return (false);
/* A power fault implicitly turns off power to the slot. */
- if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD)
+ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP &&
+ sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD)
return (false);
/* If the MRL is disengaged, the slot is powered off. */
@@ -1174,10 +1027,10 @@ pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask,
device_printf(sc->dev,
"Data Link Layer inactive\n");
else
- taskqueue_enqueue_timeout(taskqueue_pci_hp,
+ taskqueue_enqueue_timeout(taskqueue_bus,
&sc->pcie_dll_task, hz);
} else if (sc->pcie_link_sta & PCIEM_LINK_STA_DL_ACTIVE)
- taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_dll_task,
+ taskqueue_cancel_timeout(taskqueue_bus, &sc->pcie_dll_task,
NULL);
pcib_pcie_hotplug_command(sc, val, mask);
@@ -1189,7 +1042,7 @@ pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask,
*/
if (schedule_task &&
(pcib_hotplug_present(sc) != 0) != (sc->child != NULL))
- taskqueue_enqueue(taskqueue_pci_hp, &sc->pcie_hp_task);
+ taskqueue_enqueue(taskqueue_bus, &sc->pcie_hp_task);
}
static void
@@ -1217,15 +1070,21 @@ pcib_pcie_intr_hotplug(void *arg)
device_printf(dev,
"Attention Button Pressed: Detach Cancelled\n");
sc->flags &= ~PCIB_DETACH_PENDING;
- taskqueue_cancel_timeout(taskqueue_pci_hp,
+ taskqueue_cancel_timeout(taskqueue_bus,
&sc->pcie_ab_task, NULL);
} else if (old_slot_sta & PCIEM_SLOT_STA_PDS) {
/* Only initiate detach sequence if device present. */
- device_printf(dev,
- "Attention Button Pressed: Detaching in 5 seconds\n");
- sc->flags |= PCIB_DETACH_PENDING;
- taskqueue_enqueue_timeout(taskqueue_pci_hp,
- &sc->pcie_ab_task, 5 * hz);
+ if (pcie_hp_detach_timeout != 0) {
+ device_printf(dev,
+ "Attention Button Pressed: Detaching in %ld ms\n",
+ (long)(pcie_hp_detach_timeout / SBT_1MS));
+ sc->flags |= PCIB_DETACH_PENDING;
+ taskqueue_enqueue_timeout_sbt(taskqueue_bus,
+ &sc->pcie_ab_task, pcie_hp_detach_timeout,
+ SBT_1S, 0);
+ } else {
+ sc->flags |= PCIB_DETACHING;
+ }
}
}
if (sc->pcie_slot_sta & PCIEM_SLOT_STA_PFD)
@@ -1264,8 +1123,8 @@ pcib_pcie_hotplug_task(void *context, int pending)
dev = sc->dev;
if (pcib_hotplug_present(sc) != 0) {
if (sc->child == NULL) {
- sc->child = device_add_child(dev, "pci", -1);
- bus_generic_attach(dev);
+ sc->child = device_add_child(dev, "pci", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
}
} else {
if (sc->child != NULL) {
@@ -1422,11 +1281,11 @@ pcib_setup_hotplug(struct pcib_softc *sc)
dev = sc->dev;
TASK_INIT(&sc->pcie_hp_task, 0, pcib_pcie_hotplug_task, sc);
- TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_ab_task, 0,
+ TIMEOUT_TASK_INIT(taskqueue_bus, &sc->pcie_ab_task, 0,
pcib_pcie_ab_timeout, sc);
- TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_cc_task, 0,
+ TIMEOUT_TASK_INIT(taskqueue_bus, &sc->pcie_cc_task, 0,
pcib_pcie_cc_timeout, sc);
- TIMEOUT_TASK_INIT(taskqueue_pci_hp, &sc->pcie_dll_task, 0,
+ TIMEOUT_TASK_INIT(taskqueue_bus, &sc->pcie_dll_task, 0,
pcib_pcie_dll_timeout, sc);
sc->pcie_hp_lock = bus_topo_mtx();
@@ -1439,6 +1298,7 @@ pcib_setup_hotplug(struct pcib_softc *sc)
/* Clear any events previously pending. */
pcie_write_config(dev, PCIER_SLOT_STA, sc->pcie_slot_sta, 2);
+ sc->pcie_slot_sta = pcie_read_config(dev, PCIER_SLOT_STA, 2);
/* Enable HotPlug events. */
mask = PCIEM_SLOT_CTL_DLLSCE | PCIEM_SLOT_CTL_HPIE |
@@ -1472,13 +1332,13 @@ pcib_detach_hotplug(struct pcib_softc *sc)
/* Disable the card in the slot and force it to detach. */
if (sc->flags & PCIB_DETACH_PENDING) {
sc->flags &= ~PCIB_DETACH_PENDING;
- taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_ab_task,
+ taskqueue_cancel_timeout(taskqueue_bus, &sc->pcie_ab_task,
NULL);
}
sc->flags |= PCIB_DETACHING;
if (sc->flags & PCIB_HOTPLUG_CMD_PENDING) {
- taskqueue_cancel_timeout(taskqueue_pci_hp, &sc->pcie_cc_task,
+ taskqueue_cancel_timeout(taskqueue_bus, &sc->pcie_cc_task,
NULL);
tsleep(sc, 0, "hpcmd", hz);
sc->flags &= ~PCIB_HOTPLUG_CMD_PENDING;
@@ -1501,53 +1361,21 @@ pcib_detach_hotplug(struct pcib_softc *sc)
error = pcib_release_pcie_irq(sc);
if (error)
return (error);
- taskqueue_drain(taskqueue_pci_hp, &sc->pcie_hp_task);
- taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_ab_task);
- taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_cc_task);
- taskqueue_drain_timeout(taskqueue_pci_hp, &sc->pcie_dll_task);
+ taskqueue_drain(taskqueue_bus, &sc->pcie_hp_task);
+ taskqueue_drain_timeout(taskqueue_bus, &sc->pcie_ab_task);
+ taskqueue_drain_timeout(taskqueue_bus, &sc->pcie_cc_task);
+ taskqueue_drain_timeout(taskqueue_bus, &sc->pcie_dll_task);
return (0);
}
#endif
/*
- * Get current bridge configuration.
- */
-static void
-pcib_cfg_save(struct pcib_softc *sc)
-{
-#ifndef NEW_PCIB
- device_t dev;
- uint16_t command;
-
- dev = sc->dev;
-
- command = pci_read_config(dev, PCIR_COMMAND, 2);
- if (command & PCIM_CMD_PORTEN)
- pcib_get_io_decode(sc);
- if (command & PCIM_CMD_MEMEN)
- pcib_get_mem_decode(sc);
-#endif
-}
-
-/*
* Restore previous bridge configuration.
*/
static void
pcib_cfg_restore(struct pcib_softc *sc)
{
-#ifndef NEW_PCIB
- uint16_t command;
-#endif
-
-#ifdef NEW_PCIB
pcib_write_windows(sc, WIN_IO | WIN_MEM | WIN_PMEM);
-#else
- command = pci_read_config(sc->dev, PCIR_COMMAND, 2);
- if (command & PCIM_CMD_PORTEN)
- pcib_set_io_decode(sc);
- if (command & PCIM_CMD_MEMEN)
- pcib_set_mem_decode(sc);
-#endif
}
/*
@@ -1579,12 +1407,7 @@ pcib_attach_common(device_t dev)
* Get current bridge configuration.
*/
sc->domain = pci_get_domain(dev);
-#if !(defined(NEW_PCIB) && defined(PCI_RES_BUS))
- sc->bus.sec = pci_read_config(dev, PCIR_SECBUS_1, 1);
- sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1);
-#endif
sc->bridgectl = pci_read_config(dev, PCIR_BRIDGECTL_1, 2);
- pcib_cfg_save(sc);
/*
* The primary bus register should always be the bus of the
@@ -1611,20 +1434,6 @@ pcib_attach_common(device_t dev)
* Quirk handling.
*/
switch (pci_get_devid(dev)) {
-#if !(defined(NEW_PCIB) && defined(PCI_RES_BUS))
- case 0x12258086: /* Intel 82454KX/GX (Orion) */
- {
- uint8_t supbus;
-
- supbus = pci_read_config(dev, 0x41, 1);
- if (supbus != 0xff) {
- sc->bus.sec = supbus + 1;
- sc->bus.sub = supbus + 1;
- }
- break;
- }
-#endif
-
/*
* The i82380FB mobile docking controller is a PCI-PCI bridge,
* and it is a subtractive bridge. However, the ProgIf is wrong
@@ -1637,34 +1446,6 @@ pcib_attach_common(device_t dev)
case 0x060513d7: /* Toshiba ???? */
sc->flags |= PCIB_SUBTRACTIVE;
break;
-
-#if !(defined(NEW_PCIB) && defined(PCI_RES_BUS))
- /* Compaq R3000 BIOS sets wrong subordinate bus number. */
- case 0x00dd10de:
- {
- char *cp;
-
- if ((cp = kern_getenv("smbios.planar.maker")) == NULL)
- break;
- if (strncmp(cp, "Compal", 6) != 0) {
- freeenv(cp);
- break;
- }
- freeenv(cp);
- if ((cp = kern_getenv("smbios.planar.product")) == NULL)
- break;
- if (strncmp(cp, "08A0", 4) != 0) {
- freeenv(cp);
- break;
- }
- freeenv(cp);
- if (sc->bus.sub < 0xa) {
- pci_write_config(dev, PCIR_SUBBUS_1, 0xa, 1);
- sc->bus.sub = pci_read_config(dev, PCIR_SUBBUS_1, 1);
- }
- break;
- }
-#endif
}
if (pci_msi_device_blacklisted(dev))
@@ -1688,12 +1469,8 @@ pcib_attach_common(device_t dev)
#ifdef PCI_HP
pcib_probe_hotplug(sc);
#endif
-#ifdef NEW_PCIB
-#ifdef PCI_RES_BUS
pcib_setup_secbus(dev, &sc->bus, 1);
-#endif
pcib_probe_windows(sc);
-#endif
#ifdef PCI_HP
if (sc->flags & PCIB_HOTPLUG)
pcib_setup_hotplug(sc);
@@ -1702,7 +1479,6 @@ pcib_attach_common(device_t dev)
device_printf(dev, " domain %d\n", sc->domain);
device_printf(dev, " secondary bus %d\n", sc->bus.sec);
device_printf(dev, " subordinate bus %d\n", sc->bus.sub);
-#ifdef NEW_PCIB
if (pcib_is_window_open(&sc->io))
device_printf(dev, " I/O decode 0x%jx-0x%jx\n",
(uintmax_t)sc->io.base, (uintmax_t)sc->io.limit);
@@ -1712,17 +1488,6 @@ pcib_attach_common(device_t dev)
if (pcib_is_window_open(&sc->pmem))
device_printf(dev, " prefetched decode 0x%jx-0x%jx\n",
(uintmax_t)sc->pmem.base, (uintmax_t)sc->pmem.limit);
-#else
- if (pcib_is_io_open(sc))
- device_printf(dev, " I/O decode 0x%x-0x%x\n",
- sc->iobase, sc->iolimit);
- if (pcib_is_nonprefetch_open(sc))
- device_printf(dev, " memory decode 0x%jx-0x%jx\n",
- (uintmax_t)sc->membase, (uintmax_t)sc->memlimit);
- if (pcib_is_prefetch_open(sc))
- device_printf(dev, " prefetched decode 0x%jx-0x%jx\n",
- (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit);
-#endif
if (sc->bridgectl & (PCIB_BCR_ISA_ENABLE | PCIB_BCR_VGA_ENABLE) ||
sc->flags & PCIB_SUBTRACTIVE) {
device_printf(dev, " special decode ");
@@ -1778,8 +1543,9 @@ pcib_attach_child(device_t dev)
}
#endif
- sc->child = device_add_child(dev, "pci", -1);
- return (bus_generic_attach(dev));
+ sc->child = device_add_child(dev, "pci", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
+ return (0);
}
int
@@ -1793,15 +1559,11 @@ pcib_attach(device_t dev)
int
pcib_detach(device_t dev)
{
-#if defined(PCI_HP) || defined(NEW_PCIB)
struct pcib_softc *sc;
-#endif
int error;
-#if defined(PCI_HP) || defined(NEW_PCIB)
sc = device_get_softc(dev);
-#endif
- error = bus_generic_detach(dev);
+ error = bus_detach_children(dev);
if (error)
return (error);
#ifdef PCI_HP
@@ -1814,24 +1576,12 @@ pcib_detach(device_t dev)
error = device_delete_children(dev);
if (error)
return (error);
-#ifdef NEW_PCIB
pcib_free_windows(sc);
-#ifdef PCI_RES_BUS
pcib_free_secbus(dev, &sc->bus);
-#endif
-#endif
return (0);
}
int
-pcib_suspend(device_t dev)
-{
-
- pcib_cfg_save(device_get_softc(dev));
- return (bus_generic_suspend(dev));
-}
-
-int
pcib_resume(device_t dev)
{
@@ -1905,7 +1655,6 @@ pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
return(ENOENT);
}
-#ifdef NEW_PCIB
/*
* Attempt to allocate a resource from the existing resources assigned
* to a window.
@@ -2306,11 +2055,9 @@ pcib_alloc_resource(device_t dev, device_t child, int type, int *rid,
}
switch (type) {
-#ifdef PCI_RES_BUS
case PCI_RES_BUS:
return (pcib_alloc_subbus(&sc->bus, child, rid, start, end,
count, flags));
-#endif
case SYS_RES_IOPORT:
if (pcib_is_isa_range(sc, start, end, count))
return (NULL);
@@ -2390,7 +2137,6 @@ pcib_adjust_resource(device_t bus, device_t child, struct resource *r,
if (!pcib_is_resource_managed(sc, r))
return (bus_generic_adjust_resource(bus, child, r, start, end));
-#ifdef PCI_RES_BUS
if (type == PCI_RES_BUS) {
/*
* If our bus range isn't big enough to grow the sub-allocation
@@ -2404,9 +2150,7 @@ pcib_adjust_resource(device_t bus, device_t child, struct resource *r,
if (error != 0)
return (error);
}
- } else
-#endif
- {
+ } else {
/*
* Resource is managed and not a secondary bus number, must
* be from one of our windows.
@@ -2549,7 +2293,7 @@ pcib_map_resource(device_t dev, device_t child, struct resource *r,
args.offset = start - rman_get_start(pres);
args.length = length;
- return (bus_generic_map_resource(dev, child, pres, &args, map));
+ return (bus_map_resource(dev, pres, &args, map));
}
static int
@@ -2558,172 +2302,17 @@ pcib_unmap_resource(device_t dev, device_t child, struct resource *r,
{
struct pcib_softc *sc = device_get_softc(dev);
struct pcib_window *w;
+ struct resource *pres;
w = pcib_get_resource_window(sc, r);
- if (w != NULL) {
- r = pcib_find_parent_resource(w, r);
- if (r == NULL)
- return (ENOENT);
- }
- return (bus_generic_unmap_resource(dev, child, r, map));
-}
-#else
-/*
- * We have to trap resource allocation requests and ensure that the bridge
- * is set up to, or capable of handling them.
- */
-static struct resource *
-pcib_alloc_resource(device_t dev, device_t child, int type, int *rid,
- rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
-{
- struct pcib_softc *sc = device_get_softc(dev);
- const char *name, *suffix;
- int ok;
-
- /*
- * Fail the allocation for this range if it's not supported.
- */
- name = device_get_nameunit(child);
- if (name == NULL) {
- name = "";
- suffix = "";
- } else
- suffix = " ";
- switch (type) {
- case SYS_RES_IOPORT:
- ok = 0;
- if (!pcib_is_io_open(sc))
- break;
- ok = (start >= sc->iobase && end <= sc->iolimit);
-
- /*
- * Make sure we allow access to VGA I/O addresses when the
- * bridge has the "VGA Enable" bit set.
- */
- if (!ok && pci_is_vga_ioport_range(start, end))
- ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0;
-
- if ((sc->flags & PCIB_SUBTRACTIVE) == 0) {
- if (!ok) {
- if (start < sc->iobase)
- start = sc->iobase;
- if (end > sc->iolimit)
- end = sc->iolimit;
- if (start < end)
- ok = 1;
- }
- } else {
- ok = 1;
-#if 0
- /*
- * If we overlap with the subtractive range, then
- * pick the upper range to use.
- */
- if (start < sc->iolimit && end > sc->iobase)
- start = sc->iolimit + 1;
-#endif
- }
- if (end < start) {
- device_printf(dev, "ioport: end (%jx) < start (%jx)\n",
- end, start);
- start = 0;
- end = 0;
- ok = 0;
- }
- if (!ok) {
- device_printf(dev, "%s%srequested unsupported I/O "
- "range 0x%jx-0x%jx (decoding 0x%x-0x%x)\n",
- name, suffix, start, end, sc->iobase, sc->iolimit);
- return (NULL);
- }
- if (bootverbose)
- device_printf(dev,
- "%s%srequested I/O range 0x%jx-0x%jx: in range\n",
- name, suffix, start, end);
- break;
-
- case SYS_RES_MEMORY:
- ok = 0;
- if (pcib_is_nonprefetch_open(sc))
- ok = ok || (start >= sc->membase && end <= sc->memlimit);
- if (pcib_is_prefetch_open(sc))
- ok = ok || (start >= sc->pmembase && end <= sc->pmemlimit);
-
- /*
- * Make sure we allow access to VGA memory addresses when the
- * bridge has the "VGA Enable" bit set.
- */
- if (!ok && pci_is_vga_memory_range(start, end))
- ok = (sc->bridgectl & PCIB_BCR_VGA_ENABLE) ? 1 : 0;
-
- if ((sc->flags & PCIB_SUBTRACTIVE) == 0) {
- if (!ok) {
- ok = 1;
- if (flags & RF_PREFETCHABLE) {
- if (pcib_is_prefetch_open(sc)) {
- if (start < sc->pmembase)
- start = sc->pmembase;
- if (end > sc->pmemlimit)
- end = sc->pmemlimit;
- } else {
- ok = 0;
- }
- } else { /* non-prefetchable */
- if (pcib_is_nonprefetch_open(sc)) {
- if (start < sc->membase)
- start = sc->membase;
- if (end > sc->memlimit)
- end = sc->memlimit;
- } else {
- ok = 0;
- }
- }
- }
- } else if (!ok) {
- ok = 1; /* subtractive bridge: always ok */
-#if 0
- if (pcib_is_nonprefetch_open(sc)) {
- if (start < sc->memlimit && end > sc->membase)
- start = sc->memlimit + 1;
- }
- if (pcib_is_prefetch_open(sc)) {
- if (start < sc->pmemlimit && end > sc->pmembase)
- start = sc->pmemlimit + 1;
- }
-#endif
- }
- if (end < start) {
- device_printf(dev, "memory: end (%jx) < start (%jx)\n",
- end, start);
- start = 0;
- end = 0;
- ok = 0;
- }
- if (!ok && bootverbose)
- device_printf(dev,
- "%s%srequested unsupported memory range %#jx-%#jx "
- "(decoding %#jx-%#jx, %#jx-%#jx)\n",
- name, suffix, start, end,
- (uintmax_t)sc->membase, (uintmax_t)sc->memlimit,
- (uintmax_t)sc->pmembase, (uintmax_t)sc->pmemlimit);
- if (!ok)
- return (NULL);
- if (bootverbose)
- device_printf(dev,"%s%srequested memory range "
- "0x%jx-0x%jx: good\n",
- name, suffix, start, end);
- break;
+ if (w == NULL)
+ return (bus_generic_unmap_resource(dev, child, r, map));
- default:
- break;
- }
- /*
- * Bridge is OK decoding this resource, so pass it up.
- */
- return (bus_generic_alloc_resource(dev, child, type, rid, start, end,
- count, flags));
+ pres = pcib_find_parent_resource(w, r);
+ if (pres == NULL)
+ return (ENOENT);
+ return (bus_unmap_resource(dev, pres, map));
}
-#endif
/*
* If ARI is enabled on this downstream port, translate the function number
diff --git a/sys/dev/pci/pci_private.h b/sys/dev/pci/pci_private.h
index 6645488d4929..3577c04d7043 100644
--- a/sys/dev/pci/pci_private.h
+++ b/sys/dev/pci/pci_private.h
@@ -40,14 +40,80 @@ DECLARE_CLASS(pci_driver);
struct pci_softc {
bus_dma_tag_t sc_dma_tag;
-#ifdef PCI_RES_BUS
struct resource *sc_bus;
-#endif
};
extern int pci_do_power_resume;
extern int pci_do_power_suspend;
+
+device_attach_t pci_attach;
+device_detach_t pci_detach;
+device_resume_t pci_resume;
+
+bus_print_child_t pci_print_child;
+bus_probe_nomatch_t pci_probe_nomatch;
+bus_read_ivar_t pci_read_ivar;
+bus_write_ivar_t pci_write_ivar;
+bus_driver_added_t pci_driver_added;
+bus_setup_intr_t pci_setup_intr;
+bus_teardown_intr_t pci_teardown_intr;
+
+bus_get_dma_tag_t pci_get_dma_tag;
+bus_get_resource_list_t pci_get_resource_list;
+bus_delete_resource_t pci_delete_resource;
+bus_alloc_resource_t pci_alloc_resource;
+bus_adjust_resource_t pci_adjust_resource;
+bus_release_resource_t pci_release_resource;
+bus_activate_resource_t pci_activate_resource;
+bus_deactivate_resource_t pci_deactivate_resource;
+bus_map_resource_t pci_map_resource;
+bus_unmap_resource_t pci_unmap_resource;
+bus_child_deleted_t pci_child_deleted;
+bus_child_detached_t pci_child_detached;
+bus_child_pnpinfo_t pci_child_pnpinfo_method;
+bus_child_location_t pci_child_location_method;
+bus_get_device_path_t pci_get_device_path_method;
+bus_suspend_child_t pci_suspend_child;
+bus_resume_child_t pci_resume_child;
+bus_rescan_t pci_rescan_method;
+
+pci_read_config_t pci_read_config_method;
+pci_write_config_t pci_write_config_method;
+pci_enable_busmaster_t pci_enable_busmaster_method;
+pci_disable_busmaster_t pci_disable_busmaster_method;
+pci_enable_io_t pci_enable_io_method;
+pci_disable_io_t pci_disable_io_method;
+pci_get_vpd_ident_t pci_get_vpd_ident_method;
+pci_get_vpd_readonly_t pci_get_vpd_readonly_method;
+pci_get_powerstate_t pci_get_powerstate_method;
+pci_set_powerstate_t pci_set_powerstate_method;
+pci_assign_interrupt_t pci_assign_interrupt_method;
+pci_find_cap_t pci_find_cap_method;
+pci_find_next_cap_t pci_find_next_cap_method;
+pci_find_extcap_t pci_find_extcap_method;
+pci_find_next_extcap_t pci_find_next_extcap_method;
+pci_find_htcap_t pci_find_htcap_method;
+pci_find_next_htcap_t pci_find_next_htcap_method;
+pci_alloc_msi_t pci_alloc_msi_method;
+pci_alloc_msix_t pci_alloc_msix_method;
+pci_enable_msi_t pci_enable_msi_method;
+pci_enable_msix_t pci_enable_msix_method;
+pci_disable_msi_t pci_disable_msi_method;
+pci_remap_msix_t pci_remap_msix_method;
+pci_release_msi_t pci_release_msi_method;
+pci_msi_count_t pci_msi_count_method;
+pci_msix_count_t pci_msix_count_method;
+pci_msix_pba_bar_t pci_msix_pba_bar_method;
+pci_msix_table_bar_t pci_msix_table_bar_method;
+pci_alloc_devinfo_t pci_alloc_devinfo_method;
+pci_child_added_t pci_child_added_method;
+#ifdef PCI_IOV
+pci_iov_attach_t pci_iov_attach_method;
+pci_iov_detach_t pci_iov_detach_method;
+pci_create_iov_child_t pci_create_iov_child_method;
+#endif
+
void pci_add_children(device_t dev, int domain, int busno);
void pci_add_child(device_t bus, struct pci_devinfo *dinfo);
device_t pci_add_iov_child(device_t bus, device_t pf, uint16_t rid,
@@ -55,95 +121,12 @@ device_t pci_add_iov_child(device_t bus, device_t pf, uint16_t rid,
void pci_add_resources(device_t bus, device_t dev, int force,
uint32_t prefetchmask);
void pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov);
-struct pci_devinfo *pci_alloc_devinfo_method(device_t dev);
-int pci_attach(device_t dev);
int pci_attach_common(device_t dev);
-int pci_detach(device_t dev);
-int pci_rescan_method(device_t dev);
-void pci_driver_added(device_t dev, driver_t *driver);
int pci_ea_is_enabled(device_t dev, int rid);
-int pci_print_child(device_t dev, device_t child);
-void pci_probe_nomatch(device_t dev, device_t child);
-int pci_read_ivar(device_t dev, device_t child, int which,
- uintptr_t *result);
-int pci_write_ivar(device_t dev, device_t child, int which,
- uintptr_t value);
-int pci_setup_intr(device_t dev, device_t child,
- struct resource *irq, int flags, driver_filter_t *filter,
- driver_intr_t *intr, void *arg, void **cookiep);
-int pci_teardown_intr(device_t dev, device_t child,
- struct resource *irq, void *cookie);
-int pci_get_vpd_ident_method(device_t dev, device_t child,
- const char **identptr);
-int pci_get_vpd_readonly_method(device_t dev, device_t child,
- const char *kw, const char **vptr);
-int pci_set_powerstate_method(device_t dev, device_t child,
- int state);
-int pci_get_powerstate_method(device_t dev, device_t child);
-uint32_t pci_read_config_method(device_t dev, device_t child,
- int reg, int width);
-void pci_write_config_method(device_t dev, device_t child,
- int reg, uint32_t val, int width);
-int pci_enable_busmaster_method(device_t dev, device_t child);
-int pci_disable_busmaster_method(device_t dev, device_t child);
-int pci_enable_io_method(device_t dev, device_t child, int space);
-int pci_disable_io_method(device_t dev, device_t child, int space);
-int pci_find_cap_method(device_t dev, device_t child,
- int capability, int *capreg);
-int pci_find_next_cap_method(device_t dev, device_t child,
- int capability, int start, int *capreg);
-int pci_find_extcap_method(device_t dev, device_t child,
- int capability, int *capreg);
-int pci_find_next_extcap_method(device_t dev, device_t child,
- int capability, int start, int *capreg);
-int pci_find_htcap_method(device_t dev, device_t child,
- int capability, int *capreg);
-int pci_find_next_htcap_method(device_t dev, device_t child,
- int capability, int start, int *capreg);
-int pci_alloc_msi_method(device_t dev, device_t child, int *count);
-int pci_alloc_msix_method(device_t dev, device_t child, int *count);
-void pci_enable_msi_method(device_t dev, device_t child,
- uint64_t address, uint16_t data);
-void pci_enable_msix_method(device_t dev, device_t child,
- u_int index, uint64_t address, uint32_t data);
-void pci_disable_msi_method(device_t dev, device_t child);
-int pci_remap_msix_method(device_t dev, device_t child,
- int count, const u_int *vectors);
-int pci_release_msi_method(device_t dev, device_t child);
-int pci_msi_count_method(device_t dev, device_t child);
-int pci_msix_count_method(device_t dev, device_t child);
-int pci_msix_pba_bar_method(device_t dev, device_t child);
-int pci_msix_table_bar_method(device_t dev, device_t child);
-struct resource *pci_alloc_resource(device_t dev, device_t child,
- int type, int *rid, rman_res_t start, rman_res_t end,
- rman_res_t count, u_int flags);
-int pci_release_resource(device_t dev, device_t child,
- struct resource *r);
-int pci_activate_resource(device_t dev, device_t child,
- struct resource *r);
-int pci_deactivate_resource(device_t dev, device_t child,
- struct resource *r);
-void pci_delete_resource(device_t dev, device_t child,
- int type, int rid);
-struct resource_list *pci_get_resource_list (device_t dev, device_t child);
struct pci_devinfo *pci_read_device(device_t pcib, device_t bus, int d, int b,
int s, int f);
void pci_print_verbose(struct pci_devinfo *dinfo);
int pci_freecfg(struct pci_devinfo *dinfo);
-void pci_child_deleted(device_t dev, device_t child);
-void pci_child_detached(device_t dev, device_t child);
-int pci_child_location_method(device_t cbdev, device_t child,
- struct sbuf *sb);
-int pci_child_pnpinfo_method(device_t cbdev, device_t child,
- struct sbuf *sb);
-int pci_get_device_path_method(device_t dev, device_t child,
- const char *locator, struct sbuf *sb);
-int pci_assign_interrupt_method(device_t dev, device_t child);
-int pci_resume(device_t dev);
-int pci_resume_child(device_t dev, device_t child);
-int pci_suspend_child(device_t dev, device_t child);
-bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev);
-void pci_child_added_method(device_t dev, device_t child);
/** Restore the config register state. The state must be previously
* saved with pci_cfg_save. However, the pci bus driver takes care of
@@ -171,17 +154,21 @@ struct resource *pci_alloc_multi_resource(device_t dev, device_t child,
int type, int *rid, rman_res_t start, rman_res_t end,
rman_res_t count, u_long num, u_int flags);
-int pci_iov_attach_method(device_t bus, device_t dev,
- struct nvlist *pf_schema, struct nvlist *vf_schema,
- const char *name);
-int pci_iov_detach_method(device_t bus, device_t dev);
-
-device_t pci_create_iov_child_method(device_t bus, device_t pf,
- uint16_t rid, uint16_t vid, uint16_t did);
-
struct resource *pci_vf_alloc_mem_resource(device_t dev, device_t child,
int *rid, rman_res_t start, rman_res_t end,
rman_res_t count, u_int flags);
int pci_vf_release_mem_resource(device_t dev, device_t child,
struct resource *r);
+int pci_vf_activate_mem_resource(device_t dev, device_t child,
+ struct resource *r);
+int pci_vf_deactivate_mem_resource(device_t dev, device_t child,
+ struct resource *r);
+int pci_vf_adjust_mem_resource(device_t dev, device_t child,
+ struct resource *r, rman_res_t start, rman_res_t end);
+int pci_vf_map_mem_resource(device_t dev, device_t child,
+ struct resource *r, struct resource_map_request *argsp,
+ struct resource_map *map);
+int pci_vf_unmap_mem_resource(device_t dev, device_t child,
+ struct resource *r, struct resource_map *map);
+
#endif /* _PCI_PRIVATE_H_ */
diff --git a/sys/dev/pci/pci_subr.c b/sys/dev/pci/pci_subr.c
index f916e7a8da87..09633e56a24a 100644
--- a/sys/dev/pci/pci_subr.c
+++ b/sys/dev/pci/pci_subr.c
@@ -131,7 +131,6 @@ host_pcib_get_busno(pci_read_config_fn read_config, int bus, int slot, int func,
return 1;
}
-#ifdef NEW_PCIB
/*
* Return a pointer to a pretty name for a PCI device. If the device
* has a driver attached, the device's name is used, otherwise a name
@@ -284,7 +283,6 @@ pcib_host_res_adjust(struct pcib_host_resources *hr, device_t dev,
return (ERANGE);
}
-#ifdef PCI_RES_BUS
struct pci_domain {
int pd_domain;
struct rman pd_bus_rman;
@@ -412,6 +410,3 @@ pci_domain_deactivate_bus(int domain, device_t dev, struct resource *r)
#endif
return (rman_deactivate_resource(r));
}
-#endif /* PCI_RES_BUS */
-
-#endif /* NEW_PCIB */
diff --git a/sys/dev/pci/pci_user.c b/sys/dev/pci/pci_user.c
index cdb893efa950..9768030995e7 100644
--- a/sys/dev/pci/pci_user.c
+++ b/sys/dev/pci/pci_user.c
@@ -79,6 +79,9 @@ struct pci_conf32 {
u_int8_t pc_revid; /* chip revision ID */
char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
u_int32_t pd_unit; /* device unit number */
+ int pd_numa_domain; /* device NUMA domain */
+ u_int32_t pc_reported_len;/* length of PCI data reported */
+ char pc_spare[64]; /* space for future fields */
};
struct pci_match_conf32 {
@@ -287,25 +290,25 @@ pci_conf_match32(struct pci_match_conf32 *matches, int num_matches,
#define PRE7_COMPAT
typedef enum {
- PCI_GETCONF_NO_MATCH_OLD = 0x00,
- PCI_GETCONF_MATCH_BUS_OLD = 0x01,
- PCI_GETCONF_MATCH_DEV_OLD = 0x02,
- PCI_GETCONF_MATCH_FUNC_OLD = 0x04,
- PCI_GETCONF_MATCH_NAME_OLD = 0x08,
- PCI_GETCONF_MATCH_UNIT_OLD = 0x10,
- PCI_GETCONF_MATCH_VENDOR_OLD = 0x20,
- PCI_GETCONF_MATCH_DEVICE_OLD = 0x40,
- PCI_GETCONF_MATCH_CLASS_OLD = 0x80
-} pci_getconf_flags_old;
-
-struct pcisel_old {
+ PCI_GETCONF_NO_MATCH_FREEBSD6 = 0x00,
+ PCI_GETCONF_MATCH_BUS_FREEBSD6 = 0x01,
+ PCI_GETCONF_MATCH_DEV_FREEBSD6 = 0x02,
+ PCI_GETCONF_MATCH_FUNC_FREEBSD6 = 0x04,
+ PCI_GETCONF_MATCH_NAME_FREEBSD6 = 0x08,
+ PCI_GETCONF_MATCH_UNIT_FREEBSD6 = 0x10,
+ PCI_GETCONF_MATCH_VENDOR_FREEBSD6 = 0x20,
+ PCI_GETCONF_MATCH_DEVICE_FREEBSD6 = 0x40,
+ PCI_GETCONF_MATCH_CLASS_FREEBSD6 = 0x80
+} pci_getconf_flags_freebsd6;
+
+struct pcisel_freebsd6 {
u_int8_t pc_bus; /* bus number */
u_int8_t pc_dev; /* device on this bus */
u_int8_t pc_func; /* function on this device */
};
-struct pci_conf_old {
- struct pcisel_old pc_sel; /* bus+slot+function */
+struct pci_conf_freebsd6 {
+ struct pcisel_freebsd6 pc_sel; /* bus+slot+function */
u_int8_t pc_hdr; /* PCI header type */
u_int16_t pc_subvendor; /* card vendor ID */
u_int16_t pc_subdevice; /* card device ID, assigned by
@@ -321,26 +324,26 @@ struct pci_conf_old {
u_long pd_unit; /* device unit number */
};
-struct pci_match_conf_old {
- struct pcisel_old pc_sel; /* bus+slot+function */
+struct pci_match_conf_freebsd6 {
+ struct pcisel_freebsd6 pc_sel; /* bus+slot+function */
char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
u_long pd_unit; /* Unit number */
u_int16_t pc_vendor; /* PCI Vendor ID */
u_int16_t pc_device; /* PCI Device ID */
u_int8_t pc_class; /* PCI class */
- pci_getconf_flags_old flags; /* Matching expression */
+ pci_getconf_flags_freebsd6 flags; /* Matching expression */
};
-struct pci_io_old {
- struct pcisel_old pi_sel; /* device to operate on */
+struct pci_io_freebsd6 {
+ struct pcisel_freebsd6 pi_sel; /* device to operate on */
int pi_reg; /* configuration register to examine */
int pi_width; /* width (in bytes) of read or write */
u_int32_t pi_data; /* data to write or result of read */
};
#ifdef COMPAT_FREEBSD32
-struct pci_conf_old32 {
- struct pcisel_old pc_sel; /* bus+slot+function */
+struct pci_conf_freebsd6_32 {
+ struct pcisel_freebsd6 pc_sel; /* bus+slot+function */
uint8_t pc_hdr; /* PCI header type */
uint16_t pc_subvendor; /* card vendor ID */
uint16_t pc_subdevice; /* card device ID, assigned by
@@ -356,25 +359,25 @@ struct pci_conf_old32 {
uint32_t pd_unit; /* device unit number (u_long) */
};
-struct pci_match_conf_old32 {
- struct pcisel_old pc_sel; /* bus+slot+function */
+struct pci_match_conf_freebsd6_32 {
+ struct pcisel_freebsd6 pc_sel; /* bus+slot+function */
char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
uint32_t pd_unit; /* Unit number (u_long) */
uint16_t pc_vendor; /* PCI Vendor ID */
uint16_t pc_device; /* PCI Device ID */
uint8_t pc_class; /* PCI class */
- pci_getconf_flags_old flags; /* Matching expression */
+ pci_getconf_flags_freebsd6 flags; /* Matching expression */
};
-#define PCIOCGETCONF_OLD32 _IOWR('p', 1, struct pci_conf_io32)
+#define PCIOCGETCONF_FREEBSD6_32 _IOWR('p', 1, struct pci_conf_io32)
#endif /* COMPAT_FREEBSD32 */
-#define PCIOCGETCONF_OLD _IOWR('p', 1, struct pci_conf_io)
-#define PCIOCREAD_OLD _IOWR('p', 2, struct pci_io_old)
-#define PCIOCWRITE_OLD _IOWR('p', 3, struct pci_io_old)
+#define PCIOCGETCONF_FREEBSD6 _IOWR('p', 1, struct pci_conf_io)
+#define PCIOCREAD_FREEBSD6 _IOWR('p', 2, struct pci_io_freebsd6)
+#define PCIOCWRITE_FREEBSD6 _IOWR('p', 3, struct pci_io_freebsd6)
static int
-pci_conf_match_old(struct pci_match_conf_old *matches, int num_matches,
+pci_conf_match_freebsd6(struct pci_match_conf_freebsd6 *matches, int num_matches,
struct pci_conf *match_buf)
{
int i;
@@ -389,7 +392,7 @@ pci_conf_match_old(struct pci_match_conf_old *matches, int num_matches,
/*
* I'm not sure why someone would do this...but...
*/
- if (matches[i].flags == PCI_GETCONF_NO_MATCH_OLD)
+ if (matches[i].flags == PCI_GETCONF_NO_MATCH_FREEBSD6)
continue;
/*
@@ -397,35 +400,35 @@ pci_conf_match_old(struct pci_match_conf_old *matches, int num_matches,
* comparison. If the comparison fails, we don't have a
* match, go on to the next item if there is one.
*/
- if (((matches[i].flags & PCI_GETCONF_MATCH_BUS_OLD) != 0)
+ if (((matches[i].flags & PCI_GETCONF_MATCH_BUS_FREEBSD6) != 0)
&& (match_buf->pc_sel.pc_bus != matches[i].pc_sel.pc_bus))
continue;
- if (((matches[i].flags & PCI_GETCONF_MATCH_DEV_OLD) != 0)
+ if (((matches[i].flags & PCI_GETCONF_MATCH_DEV_FREEBSD6) != 0)
&& (match_buf->pc_sel.pc_dev != matches[i].pc_sel.pc_dev))
continue;
- if (((matches[i].flags & PCI_GETCONF_MATCH_FUNC_OLD) != 0)
+ if (((matches[i].flags & PCI_GETCONF_MATCH_FUNC_FREEBSD6) != 0)
&& (match_buf->pc_sel.pc_func != matches[i].pc_sel.pc_func))
continue;
- if (((matches[i].flags & PCI_GETCONF_MATCH_VENDOR_OLD) != 0)
+ if (((matches[i].flags & PCI_GETCONF_MATCH_VENDOR_FREEBSD6) != 0)
&& (match_buf->pc_vendor != matches[i].pc_vendor))
continue;
- if (((matches[i].flags & PCI_GETCONF_MATCH_DEVICE_OLD) != 0)
+ if (((matches[i].flags & PCI_GETCONF_MATCH_DEVICE_FREEBSD6) != 0)
&& (match_buf->pc_device != matches[i].pc_device))
continue;
- if (((matches[i].flags & PCI_GETCONF_MATCH_CLASS_OLD) != 0)
+ if (((matches[i].flags & PCI_GETCONF_MATCH_CLASS_FREEBSD6) != 0)
&& (match_buf->pc_class != matches[i].pc_class))
continue;
- if (((matches[i].flags & PCI_GETCONF_MATCH_UNIT_OLD) != 0)
+ if (((matches[i].flags & PCI_GETCONF_MATCH_UNIT_FREEBSD6) != 0)
&& (match_buf->pd_unit != matches[i].pd_unit))
continue;
- if (((matches[i].flags & PCI_GETCONF_MATCH_NAME_OLD) != 0)
+ if (((matches[i].flags & PCI_GETCONF_MATCH_NAME_FREEBSD6) != 0)
&& (strncmp(matches[i].pd_name, match_buf->pd_name,
sizeof(match_buf->pd_name)) != 0))
continue;
@@ -438,7 +441,7 @@ pci_conf_match_old(struct pci_match_conf_old *matches, int num_matches,
#ifdef COMPAT_FREEBSD32
static int
-pci_conf_match_old32(struct pci_match_conf_old32 *matches, int num_matches,
+pci_conf_match_freebsd6_32(struct pci_match_conf_freebsd6_32 *matches, int num_matches,
struct pci_conf *match_buf)
{
int i;
@@ -453,7 +456,7 @@ pci_conf_match_old32(struct pci_match_conf_old32 *matches, int num_matches,
/*
* I'm not sure why someone would do this...but...
*/
- if (matches[i].flags == PCI_GETCONF_NO_MATCH_OLD)
+ if (matches[i].flags == PCI_GETCONF_NO_MATCH_FREEBSD6)
continue;
/*
@@ -461,35 +464,35 @@ pci_conf_match_old32(struct pci_match_conf_old32 *matches, int num_matches,
* comparison. If the comparison fails, we don't have a
* match, go on to the next item if there is one.
*/
- if (((matches[i].flags & PCI_GETCONF_MATCH_BUS_OLD) != 0) &&
+ if (((matches[i].flags & PCI_GETCONF_MATCH_BUS_FREEBSD6) != 0) &&
(match_buf->pc_sel.pc_bus != matches[i].pc_sel.pc_bus))
continue;
- if (((matches[i].flags & PCI_GETCONF_MATCH_DEV_OLD) != 0) &&
+ if (((matches[i].flags & PCI_GETCONF_MATCH_DEV_FREEBSD6) != 0) &&
(match_buf->pc_sel.pc_dev != matches[i].pc_sel.pc_dev))
continue;
- if (((matches[i].flags & PCI_GETCONF_MATCH_FUNC_OLD) != 0) &&
+ if (((matches[i].flags & PCI_GETCONF_MATCH_FUNC_FREEBSD6) != 0) &&
(match_buf->pc_sel.pc_func != matches[i].pc_sel.pc_func))
continue;
- if (((matches[i].flags & PCI_GETCONF_MATCH_VENDOR_OLD) != 0) &&
+ if (((matches[i].flags & PCI_GETCONF_MATCH_VENDOR_FREEBSD6) != 0) &&
(match_buf->pc_vendor != matches[i].pc_vendor))
continue;
- if (((matches[i].flags & PCI_GETCONF_MATCH_DEVICE_OLD) != 0) &&
+ if (((matches[i].flags & PCI_GETCONF_MATCH_DEVICE_FREEBSD6) != 0) &&
(match_buf->pc_device != matches[i].pc_device))
continue;
- if (((matches[i].flags & PCI_GETCONF_MATCH_CLASS_OLD) != 0) &&
+ if (((matches[i].flags & PCI_GETCONF_MATCH_CLASS_FREEBSD6) != 0) &&
(match_buf->pc_class != matches[i].pc_class))
continue;
- if (((matches[i].flags & PCI_GETCONF_MATCH_UNIT_OLD) != 0) &&
+ if (((matches[i].flags & PCI_GETCONF_MATCH_UNIT_FREEBSD6) != 0) &&
((u_int32_t)match_buf->pd_unit != matches[i].pd_unit))
continue;
- if (((matches[i].flags & PCI_GETCONF_MATCH_NAME_OLD) != 0) &&
+ if (((matches[i].flags & PCI_GETCONF_MATCH_NAME_FREEBSD6) != 0) &&
(strncmp(matches[i].pd_name, match_buf->pd_name,
sizeof(match_buf->pd_name)) != 0))
continue;
@@ -502,15 +505,62 @@ pci_conf_match_old32(struct pci_match_conf_old32 *matches, int num_matches,
#endif /* COMPAT_FREEBSD32 */
#endif /* !PRE7_COMPAT */
+#ifdef COMPAT_FREEBSD14
+struct pci_conf_freebsd14 {
+ struct pcisel pc_sel; /* domain+bus+slot+function */
+ u_int8_t pc_hdr; /* PCI header type */
+ u_int16_t pc_subvendor; /* card vendor ID */
+ u_int16_t pc_subdevice; /* card device ID, assigned by
+ card vendor */
+ u_int16_t pc_vendor; /* chip vendor ID */
+ u_int16_t pc_device; /* chip device ID, assigned by
+ chip vendor */
+ u_int8_t pc_class; /* chip PCI class */
+ u_int8_t pc_subclass; /* chip PCI subclass */
+ u_int8_t pc_progif; /* chip PCI programming interface */
+ u_int8_t pc_revid; /* chip revision ID */
+ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
+ u_long pd_unit; /* device unit number */
+};
+#define PCIOCGETCONF_FREEBSD14 _IOWR('p', 5, struct pci_conf_io)
+
+#ifdef COMPAT_FREEBSD32
+struct pci_conf_freebsd14_32 {
+ struct pcisel pc_sel; /* domain+bus+slot+function */
+ u_int8_t pc_hdr; /* PCI header type */
+ u_int16_t pc_subvendor; /* card vendor ID */
+ u_int16_t pc_subdevice; /* card device ID, assigned by
+ card vendor */
+ u_int16_t pc_vendor; /* chip vendor ID */
+ u_int16_t pc_device; /* chip device ID, assigned by
+ chip vendor */
+ u_int8_t pc_class; /* chip PCI class */
+ u_int8_t pc_subclass; /* chip PCI subclass */
+ u_int8_t pc_progif; /* chip PCI programming interface */
+ u_int8_t pc_revid; /* chip revision ID */
+ char pd_name[PCI_MAXNAMELEN + 1]; /* device name */
+ u_int32_t pd_unit; /* device unit number */
+};
+#define PCIOCGETCONF_FREEBSD14_32 \
+ _IOC_NEWTYPE(PCIOCGETCONF_FREEBSD14, struct pci_conf_io32)
+#endif /* COMPAT_FREEBSD32 */
+#endif /* COMPAT_FREEBSD14 */
+
union pci_conf_union {
- struct pci_conf pc;
+ struct pci_conf pc;
#ifdef COMPAT_FREEBSD32
- struct pci_conf32 pc32;
+ struct pci_conf32 pc32;
+#endif
+#ifdef COMPAT_FREEBSD14
+ struct pci_conf_freebsd14 pc14;
+#ifdef COMPAT_FREEBSD32
+ struct pci_conf_freebsd14_32 pc14_32;
+#endif
#endif
#ifdef PRE7_COMPAT
- struct pci_conf_old pco;
+ struct pci_conf_freebsd6 pco;
#ifdef COMPAT_FREEBSD32
- struct pci_conf_old32 pco32;
+ struct pci_conf_freebsd6_32 pco32;
#endif
#endif
};
@@ -522,22 +572,28 @@ pci_conf_match(u_long cmd, struct pci_match_conf *matches, int num_matches,
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
return (pci_conf_match_native(
(struct pci_match_conf *)matches, num_matches, match_buf));
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
return (pci_conf_match32((struct pci_match_conf32 *)matches,
num_matches, match_buf));
#endif
#ifdef PRE7_COMPAT
- case PCIOCGETCONF_OLD:
- return (pci_conf_match_old(
- (struct pci_match_conf_old *)matches, num_matches,
+ case PCIOCGETCONF_FREEBSD6:
+ return (pci_conf_match_freebsd6(
+ (struct pci_match_conf_freebsd6 *)matches, num_matches,
match_buf));
#ifdef COMPAT_FREEBSD32
- case PCIOCGETCONF_OLD32:
- return (pci_conf_match_old32(
- (struct pci_match_conf_old32 *)matches, num_matches,
+ case PCIOCGETCONF_FREEBSD6_32:
+ return (pci_conf_match_freebsd6_32(
+ (struct pci_match_conf_freebsd6_32 *)matches, num_matches,
match_buf));
#endif
#endif
@@ -645,17 +701,23 @@ pci_match_conf_size(u_long cmd)
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
return (sizeof(struct pci_match_conf));
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
return (sizeof(struct pci_match_conf32));
#endif
#ifdef PRE7_COMPAT
- case PCIOCGETCONF_OLD:
- return (sizeof(struct pci_match_conf_old));
+ case PCIOCGETCONF_FREEBSD6:
+ return (sizeof(struct pci_match_conf_freebsd6));
#ifdef COMPAT_FREEBSD32
- case PCIOCGETCONF_OLD32:
- return (sizeof(struct pci_match_conf_old32));
+ case PCIOCGETCONF_FREEBSD6_32:
+ return (sizeof(struct pci_match_conf_freebsd6_32));
#endif
#endif
default:
@@ -675,12 +737,20 @@ pci_conf_size(u_long cmd)
case PCIOCGETCONF32:
return (sizeof(struct pci_conf32));
#endif
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+ return (sizeof(struct pci_conf_freebsd14));
+#ifdef COMPAT_FREEBSD32
+ case PCIOCGETCONF_FREEBSD14_32:
+ return (sizeof(struct pci_conf_freebsd14_32));
+#endif
+#endif
#ifdef PRE7_COMPAT
- case PCIOCGETCONF_OLD:
- return (sizeof(struct pci_conf_old));
+ case PCIOCGETCONF_FREEBSD6:
+ return (sizeof(struct pci_conf_freebsd6));
#ifdef COMPAT_FREEBSD32
- case PCIOCGETCONF_OLD32:
- return (sizeof(struct pci_conf_old32));
+ case PCIOCGETCONF_FREEBSD6_32:
+ return (sizeof(struct pci_conf_freebsd6_32));
#endif
#endif
default:
@@ -692,34 +762,40 @@ pci_conf_size(u_long cmd)
static void
pci_conf_io_init(struct pci_conf_io *cio, caddr_t data, u_long cmd)
{
-#if defined(COMPAT_FREEBSD32)
+#ifdef COMPAT_FREEBSD32
struct pci_conf_io32 *cio32;
#endif
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
#ifdef PRE7_COMPAT
- case PCIOCGETCONF_OLD:
+ case PCIOCGETCONF_FREEBSD6:
#endif
*cio = *(struct pci_conf_io *)data;
return;
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
#ifdef PRE7_COMPAT
- case PCIOCGETCONF_OLD32:
+ case PCIOCGETCONF_FREEBSD6_32:
#endif
- cio32 = (struct pci_conf_io32 *)data;
- cio->pat_buf_len = cio32->pat_buf_len;
- cio->num_patterns = cio32->num_patterns;
- cio->patterns = (void *)(uintptr_t)cio32->patterns;
- cio->match_buf_len = cio32->match_buf_len;
- cio->num_matches = cio32->num_matches;
- cio->matches = (void *)(uintptr_t)cio32->matches;
- cio->offset = cio32->offset;
- cio->generation = cio32->generation;
- cio->status = cio32->status;
- return;
+ cio32 = (struct pci_conf_io32 *)data;
+ cio->pat_buf_len = cio32->pat_buf_len;
+ cio->num_patterns = cio32->num_patterns;
+ cio->patterns = (void *)(uintptr_t)cio32->patterns;
+ cio->match_buf_len = cio32->match_buf_len;
+ cio->num_matches = cio32->num_matches;
+ cio->matches = (void *)(uintptr_t)cio32->matches;
+ cio->offset = cio32->offset;
+ cio->generation = cio32->generation;
+ cio->status = cio32->status;
+ return;
#endif
default:
@@ -733,14 +809,17 @@ pci_conf_io_update_data(const struct pci_conf_io *cio, caddr_t data,
u_long cmd)
{
struct pci_conf_io *d_cio;
-#if defined(COMPAT_FREEBSD32)
+#ifdef COMPAT_FREEBSD32
struct pci_conf_io32 *cio32;
#endif
switch (cmd) {
case PCIOCGETCONF:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#endif
#ifdef PRE7_COMPAT
- case PCIOCGETCONF_OLD:
+ case PCIOCGETCONF_FREEBSD6:
#endif
d_cio = (struct pci_conf_io *)data;
d_cio->status = cio->status;
@@ -751,8 +830,11 @@ pci_conf_io_update_data(const struct pci_conf_io *cio, caddr_t data,
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
#ifdef PRE7_COMPAT
- case PCIOCGETCONF_OLD32:
+ case PCIOCGETCONF_FREEBSD6_32:
#endif
cio32 = (struct pci_conf_io32 *)data;
@@ -781,8 +863,17 @@ pci_conf_for_copyout(const struct pci_conf *pcp, union pci_conf_union *pcup,
pcup->pc = *pcp;
return;
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+ memcpy(&pcup->pc14, pcp, sizeof(pcup->pc14));
+ return;
+#endif
+
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
pcup->pc32.pc_sel = pcp->pc_sel;
pcup->pc32.pc_hdr = pcp->pc_hdr;
pcup->pc32.pc_subvendor = pcp->pc_subvendor;
@@ -796,12 +887,17 @@ pci_conf_for_copyout(const struct pci_conf *pcp, union pci_conf_union *pcup,
strlcpy(pcup->pc32.pd_name, pcp->pd_name,
sizeof(pcup->pc32.pd_name));
pcup->pc32.pd_unit = (uint32_t)pcp->pd_unit;
+ if (cmd == PCIOCGETCONF32) {
+ pcup->pc32.pd_numa_domain = pcp->pd_numa_domain;
+ pcup->pc32.pc_reported_len =
+ (uint32_t)offsetof(struct pci_conf32, pc_spare);
+ }
return;
-#endif
+#endif /* COMPAT_FREEBSD32 */
#ifdef PRE7_COMPAT
#ifdef COMPAT_FREEBSD32
- case PCIOCGETCONF_OLD32:
+ case PCIOCGETCONF_FREEBSD6_32:
pcup->pco32.pc_sel.pc_bus = pcp->pc_sel.pc_bus;
pcup->pco32.pc_sel.pc_dev = pcp->pc_sel.pc_dev;
pcup->pco32.pc_sel.pc_func = pcp->pc_sel.pc_func;
@@ -820,7 +916,7 @@ pci_conf_for_copyout(const struct pci_conf *pcp, union pci_conf_union *pcup,
return;
#endif /* COMPAT_FREEBSD32 */
- case PCIOCGETCONF_OLD:
+ case PCIOCGETCONF_FREEBSD6:
pcup->pco.pc_sel.pc_bus = pcp->pc_sel.pc_bus;
pcup->pco.pc_sel.pc_dev = pcp->pc_sel.pc_dev;
pcup->pco.pc_sel.pc_func = pcp->pc_sel.pc_func;
@@ -1024,13 +1120,13 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
struct pci_map *pm;
struct pci_bar_mmap *pbm;
size_t confsz, iolen;
- int error, ionum, i, num_patterns;
+ int domain, error, ionum, i, num_patterns;
union pci_conf_union pcu;
#ifdef PRE7_COMPAT
struct pci_io iodata;
- struct pci_io_old *io_old;
+ struct pci_io_freebsd6 *io_freebsd6;
- io_old = NULL;
+ io_freebsd6 = NULL;
#endif
/*
@@ -1044,10 +1140,16 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
#endif
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#ifdef COMPAT_FREEBSD32
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
+#endif
#ifdef PRE7_COMPAT
- case PCIOCGETCONF_OLD:
+ case PCIOCGETCONF_FREEBSD6:
#ifdef COMPAT_FREEBSD32
- case PCIOCGETCONF_OLD32:
+ case PCIOCGETCONF_FREEBSD6_32:
#endif
#endif
case PCIOCGETBAR:
@@ -1069,10 +1171,16 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
#ifdef COMPAT_FREEBSD32
case PCIOCGETCONF32:
#endif
+#ifdef COMPAT_FREEBSD14
+ case PCIOCGETCONF_FREEBSD14:
+#ifdef COMPAT_FREEBSD32
+ case PCIOCGETCONF_FREEBSD14_32:
+#endif
+#endif
#ifdef PRE7_COMPAT
- case PCIOCGETCONF_OLD:
+ case PCIOCGETCONF_FREEBSD6:
#ifdef COMPAT_FREEBSD32
- case PCIOCGETCONF_OLD32:
+ case PCIOCGETCONF_FREEBSD6_32:
#endif
#endif
cio = malloc(sizeof(struct pci_conf_io), M_TEMP,
@@ -1172,7 +1280,7 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
*/
cio->status = PCI_GETCONF_ERROR;
error = EINVAL;
- goto getconfexit;
+ goto getconfexit;
}
/*
@@ -1201,6 +1309,12 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
dinfo->conf.pd_unit = 0;
}
+ if (dinfo->cfg.dev != NULL &&
+ bus_get_domain(dinfo->cfg.dev, &domain) == 0)
+ dinfo->conf.pd_numa_domain = domain;
+ else
+ dinfo->conf.pd_numa_domain = 0;
+
if (pattern_buf == NULL ||
pci_conf_match(cmd, pattern_buf, num_patterns,
&dinfo->conf) == 0) {
@@ -1217,6 +1331,9 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
break;
}
+ dinfo->conf.pc_reported_len =
+ offsetof(struct pci_conf, pc_spare);
+
pci_conf_for_copyout(&dinfo->conf, &pcu, cmd);
error = copyout(&pcu,
(caddr_t)cio->matches +
@@ -1258,16 +1375,16 @@ getconfexit:
break;
#ifdef PRE7_COMPAT
- case PCIOCREAD_OLD:
- case PCIOCWRITE_OLD:
- io_old = (struct pci_io_old *)data;
+ case PCIOCREAD_FREEBSD6:
+ case PCIOCWRITE_FREEBSD6:
+ io_freebsd6 = (struct pci_io_freebsd6 *)data;
iodata.pi_sel.pc_domain = 0;
- iodata.pi_sel.pc_bus = io_old->pi_sel.pc_bus;
- iodata.pi_sel.pc_dev = io_old->pi_sel.pc_dev;
- iodata.pi_sel.pc_func = io_old->pi_sel.pc_func;
- iodata.pi_reg = io_old->pi_reg;
- iodata.pi_width = io_old->pi_width;
- iodata.pi_data = io_old->pi_data;
+ iodata.pi_sel.pc_bus = io_freebsd6->pi_sel.pc_bus;
+ iodata.pi_sel.pc_dev = io_freebsd6->pi_sel.pc_dev;
+ iodata.pi_sel.pc_func = io_freebsd6->pi_sel.pc_func;
+ iodata.pi_reg = io_freebsd6->pi_reg;
+ iodata.pi_width = io_freebsd6->pi_width;
+ iodata.pi_data = io_freebsd6->pi_data;
data = (caddr_t)&iodata;
/* FALLTHROUGH */
#endif
@@ -1295,7 +1412,7 @@ getconfexit:
io->pi_sel.pc_func);
if (pcidev) {
#ifdef PRE7_COMPAT
- if (cmd == PCIOCWRITE || cmd == PCIOCWRITE_OLD)
+ if (cmd == PCIOCWRITE || cmd == PCIOCWRITE_FREEBSD6)
#else
if (cmd == PCIOCWRITE)
#endif
@@ -1304,8 +1421,8 @@ getconfexit:
io->pi_data,
io->pi_width);
#ifdef PRE7_COMPAT
- else if (cmd == PCIOCREAD_OLD)
- io_old->pi_data =
+ else if (cmd == PCIOCREAD_FREEBSD6)
+ io_freebsd6->pi_data =
pci_read_config(pcidev,
io->pi_reg,
io->pi_width);
@@ -1318,8 +1435,8 @@ getconfexit:
error = 0;
} else {
#ifdef COMPAT_FREEBSD4
- if (cmd == PCIOCREAD_OLD) {
- io_old->pi_data = -1;
+ if (cmd == PCIOCREAD_FREEBSD6) {
+ io_freebsd6->pi_data = -1;
error = 0;
} else
#endif
diff --git a/sys/dev/pci/pcib_private.h b/sys/dev/pci/pcib_private.h
index 65b3ed31cf94..4c1f6e038ae1 100644
--- a/sys/dev/pci/pcib_private.h
+++ b/sys/dev/pci/pcib_private.h
@@ -35,7 +35,6 @@
#include <sys/taskqueue.h>
-#ifdef NEW_PCIB
/*
* Data structure and routines that Host to PCI bridge drivers can use
* to restrict allocations for child devices to ranges decoded by the
@@ -58,7 +57,6 @@ struct resource *pcib_host_res_alloc(struct pcib_host_resources *hr,
int pcib_host_res_adjust(struct pcib_host_resources *hr,
device_t dev, struct resource *r, rman_res_t start,
rman_res_t end);
-#endif
/*
* Export portions of generic PCI:PCI bridge support so that it can be
@@ -66,7 +64,6 @@ int pcib_host_res_adjust(struct pcib_host_resources *hr,
*/
DECLARE_CLASS(pcib_driver);
-#ifdef NEW_PCIB
#define WIN_IO 0x1
#define WIN_MEM 0x2
#define WIN_PMEM 0x4
@@ -83,18 +80,15 @@ struct pcib_window {
int step; /* log_2 of window granularity */
const char *name;
};
-#endif
struct pcib_secbus {
u_int sec;
u_int sub;
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
device_t dev;
struct rman rman;
struct resource *res;
const char *name;
int sub_reg;
-#endif
};
/*
@@ -116,18 +110,9 @@ struct pcib_softc
u_int domain; /* domain number */
u_int pribus; /* primary bus number */
struct pcib_secbus bus; /* secondary bus numbers */
-#ifdef NEW_PCIB
struct pcib_window io; /* I/O port window */
struct pcib_window mem; /* memory window */
struct pcib_window pmem; /* prefetchable memory window */
-#else
- pci_addr_t pmembase; /* base address of prefetchable memory */
- pci_addr_t pmemlimit; /* topmost address of prefetchable memory */
- pci_addr_t membase; /* base address of memory window */
- pci_addr_t memlimit; /* topmost address of memory window */
- uint32_t iobase; /* base address of port window */
- uint32_t iolimit; /* topmost address of port window */
-#endif
uint16_t bridgectl; /* bridge control register */
uint16_t pcie_link_sta;
uint16_t pcie_slot_sta;
@@ -153,7 +138,6 @@ typedef uint32_t pci_read_config_fn(int d, int b, int s, int f, int reg,
int host_pcib_get_busno(pci_read_config_fn read_config, int bus,
int slot, int func, uint8_t *busnum);
-#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
struct resource *pci_domain_alloc_bus(int domain, device_t dev, int *rid,
rman_res_t start, rman_res_t end, rman_res_t count, u_int flags);
int pci_domain_adjust_bus(int domain, device_t dev,
@@ -170,14 +154,11 @@ struct resource *pcib_alloc_subbus(struct pcib_secbus *bus, device_t child,
void pcib_free_secbus(device_t dev, struct pcib_secbus *bus);
void pcib_setup_secbus(device_t dev, struct pcib_secbus *bus,
int min_count);
-#endif
int pcib_attach(device_t dev);
int pcib_attach_child(device_t dev);
void pcib_attach_common(device_t dev);
void pcib_bridge_init(device_t dev);
-#ifdef NEW_PCIB
const char *pcib_child_name(device_t child);
-#endif
int pcib_detach(device_t dev);
int pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result);
int pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value);
diff --git a/sys/dev/pci/pcireg.h b/sys/dev/pci/pcireg.h
index 623deb8b4505..f6aaf30611e4 100644
--- a/sys/dev/pci/pcireg.h
+++ b/sys/dev/pci/pcireg.h
@@ -616,6 +616,8 @@
#define PCIM_MSICTRL_MMC_16 0x0008
#define PCIM_MSICTRL_MMC_32 0x000A
#define PCIM_MSICTRL_MSI_ENABLE 0x0001
+#define PCI_MSI_MSGNUM(ctrl) \
+ (1 << (((ctrl) & PCIM_MSICTRL_MMC_MASK) >> 1))
#define PCIR_MSI_ADDR 0x4
#define PCIR_MSI_ADDR_HIGH 0x8
#define PCIR_MSI_DATA 0x8
@@ -965,6 +967,7 @@
#define PCIM_MSIXCTRL_MSIX_ENABLE 0x8000
#define PCIM_MSIXCTRL_FUNCTION_MASK 0x4000
#define PCIM_MSIXCTRL_TABLE_SIZE 0x07FF
+#define PCI_MSIX_MSGNUM(ctrl) (((ctrl) & PCIM_MSIXCTRL_TABLE_SIZE) + 1)
#define PCIR_MSIX_TABLE 0x4
#define PCIR_MSIX_PBA 0x8
#define PCIM_MSIX_BIR_MASK 0x7
diff --git a/sys/dev/pci/pcivar.h b/sys/dev/pci/pcivar.h
index c2c1f055def9..4abb5e977346 100644
--- a/sys/dev/pci/pcivar.h
+++ b/sys/dev/pci/pcivar.h
@@ -51,9 +51,7 @@ struct pcicfg_bridge {
/* Interesting values for PCI power management */
struct pcicfg_pp {
uint16_t pp_cap; /* PCI power management capabilities */
- uint8_t pp_status; /* conf. space addr. of PM control/status reg */
- uint8_t pp_bse; /* conf. space addr. of PM BSE reg */
- uint8_t pp_data; /* conf. space addr. of PM data reg */
+ uint8_t pp_location; /* Offset of power management registers */
};
struct pci_map {
@@ -90,7 +88,6 @@ struct pcicfg_vpd {
struct pcicfg_msi {
uint16_t msi_ctrl; /* Message Control */
uint8_t msi_location; /* Offset of MSI capability registers. */
- uint8_t msi_msgnum; /* Number of messages */
int msi_alloc; /* Number of allocated messages. */
uint64_t msi_addr; /* Contents of address register. */
uint16_t msi_data; /* Contents of data register. */
@@ -111,14 +108,13 @@ struct msix_table_entry {
struct pcicfg_msix {
uint16_t msix_ctrl; /* Message Control */
- uint16_t msix_msgnum; /* Number of messages */
uint8_t msix_location; /* Offset of MSI-X capability registers. */
uint8_t msix_table_bar; /* BAR containing vector table. */
uint8_t msix_pba_bar; /* BAR containing PBA. */
uint32_t msix_table_offset;
uint32_t msix_pba_offset;
- int msix_alloc; /* Number of allocated vectors. */
- int msix_table_len; /* Length of virtual table. */
+ u_int msix_alloc; /* Number of allocated vectors. */
+ u_int msix_table_len; /* Length of virtual table. */
struct msix_table_entry *msix_table; /* Virtual table. */
struct msix_vector *msix_vectors; /* Array of allocated vectors. */
struct resource *msix_table_res; /* Resource containing vector table. */
@@ -126,8 +122,8 @@ struct pcicfg_msix {
};
struct pci_id_ofw_iommu {
+ uintptr_t xref;
uint32_t id;
- uint32_t xref;
};
/* Interesting values for HyperTransport */
@@ -501,24 +497,39 @@ pci_is_vga_memory_range(rman_res_t start, rman_res_t end)
/*
* PCI power states are as defined by ACPI:
*
- * D0 State in which device is on and running. It is receiving full
- * power from the system and delivering full functionality to the user.
- * D1 Class-specific low-power state in which device context may or may not
- * be lost. Buses in D1 cannot do anything to the bus that would force
- * devices on that bus to lose context.
- * D2 Class-specific low-power state in which device context may or may
- * not be lost. Attains greater power savings than D1. Buses in D2
- * can cause devices on that bus to lose some context. Devices in D2
- * must be prepared for the bus to be in D2 or higher.
- * D3 State in which the device is off and not running. Device context is
- * lost. Power can be removed from the device.
+ * D0 State in which device is on and running. It is receiving full
+ * power from the system and delivering full functionality to the user.
+ * D1 Class-specific low-power state in which device context may or may not
+ * be lost. Buses in D1 cannot do anything to the bus that would force
+ * devices on that bus to lose context.
+ * D2 Class-specific low-power state in which device context may or may
+ * not be lost. Attains greater power savings than D1. Buses in D2
+ * can cause devices on that bus to lose some context. Devices in D2
+ * must be prepared for the bus to be in D2 or higher.
+ * D3hot State in which the device is off and not running. Device context is
+ * lost. Power can be removed from the device.
+ * D3cold Same as D3hot, but power has been removed from the device.
*/
#define PCI_POWERSTATE_D0 0
#define PCI_POWERSTATE_D1 1
#define PCI_POWERSTATE_D2 2
-#define PCI_POWERSTATE_D3 3
+#define PCI_POWERSTATE_D3_HOT 3
+#define PCI_POWERSTATE_D3_COLD 4
+#define PCI_POWERSTATE_D3 PCI_POWERSTATE_D3_COLD
+#define PCI_POWERSTATE_MAX PCI_POWERSTATE_D3_COLD
+#define PCI_POWERSTATE_COUNT 5
#define PCI_POWERSTATE_UNKNOWN -1
+static __inline const char *
+pci_powerstate_to_str(int state)
+{
+ const char *strs[PCI_POWERSTATE_COUNT] = {"D0", "D1", "D2", "D3hot",
+ "D3cold"};
+
+ MPASS(state >= PCI_POWERSTATE_D0 && state <= PCI_POWERSTATE_MAX);
+ return (strs[state]);
+}
+
static __inline int
pci_set_powerstate(device_t dev, int state)
{
@@ -670,6 +681,7 @@ device_t pci_find_dbsf(uint32_t, uint8_t, uint8_t, uint8_t);
device_t pci_find_device(uint16_t, uint16_t);
device_t pci_find_class(uint8_t class, uint8_t subclass);
device_t pci_find_class_from(uint8_t class, uint8_t subclass, device_t devfrom);
+device_t pci_find_base_class_from(uint8_t class, device_t devfrom);
/* Can be used by drivers to manage the MSI-X table. */
int pci_pending_msix(device_t dev, u_int index);
@@ -687,6 +699,9 @@ void pci_restore_state(device_t dev);
void pci_save_state(device_t dev);
int pci_set_max_read_req(device_t dev, int size);
int pci_power_reset(device_t dev);
+void pci_clear_pme(device_t dev);
+void pci_enable_pme(device_t dev);
+bool pci_has_pm(device_t dev);
uint32_t pcie_read_config(device_t dev, int reg, int width);
void pcie_write_config(device_t dev, int reg, uint32_t value, int width);
uint32_t pcie_adjust_config(device_t dev, int reg, uint32_t mask,
diff --git a/sys/dev/pci/vga_pci.c b/sys/dev/pci/vga_pci.c
index 16ee93bc8d99..09166c0cbea6 100644
--- a/sys/dev/pci/vga_pci.c
+++ b/sys/dev/pci/vga_pci.c
@@ -366,11 +366,11 @@ static int
vga_pci_attach(device_t dev)
{
- bus_generic_probe(dev);
+ bus_identify_children(dev);
/* Always create a drmn child for now to make it easier on drm. */
- device_add_child(dev, "drmn", -1);
- bus_generic_attach(dev);
+ device_add_child(dev, "drmn", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
if (vga_pci_is_boot_display(dev))
device_printf(dev, "Boot video device\n");
@@ -378,31 +378,6 @@ vga_pci_attach(device_t dev)
return (0);
}
-static int
-vga_pci_suspend(device_t dev)
-{
-
- return (bus_generic_suspend(dev));
-}
-
-static int
-vga_pci_detach(device_t dev)
-{
- int error;
-
- error = bus_generic_detach(dev);
- if (error == 0)
- error = device_delete_children(dev);
- return (error);
-}
-
-static int
-vga_pci_resume(device_t dev)
-{
-
- return (bus_generic_resume(dev));
-}
-
/* Bus interface. */
static int
@@ -732,9 +707,9 @@ static device_method_t vga_pci_methods[] = {
DEVMETHOD(device_probe, vga_pci_probe),
DEVMETHOD(device_attach, vga_pci_attach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
- DEVMETHOD(device_suspend, vga_pci_suspend),
- DEVMETHOD(device_detach, vga_pci_detach),
- DEVMETHOD(device_resume, vga_pci_resume),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_detach, bus_generic_detach),
+ DEVMETHOD(device_resume, bus_generic_resume),
/* Bus interface */
DEVMETHOD(bus_read_ivar, vga_pci_read_ivar),
diff --git a/sys/dev/pms/RefTisa/sallsdk/spc/sampirsp.c b/sys/dev/pms/RefTisa/sallsdk/spc/sampirsp.c
index 541940c5fda8..6a45e954d315 100644
--- a/sys/dev/pms/RefTisa/sallsdk/spc/sampirsp.c
+++ b/sys/dev/pms/RefTisa/sallsdk/spc/sampirsp.c
@@ -7217,7 +7217,7 @@ GLOBAL bit32 mpiDekManagementRsp(
agEvent.encryptOperation = OSSA_HW_ENCRYPT_DEK_INVALIDTE;
}
agEvent.status = status;
- if (status == OSSA_MPI_ENC_ERR_ILLEGAL_DEK_PARAM || OSSA_MPI_ERR_DEK_MANAGEMENT_DEK_UNWRAP_FAIL)
+ if (status == OSSA_MPI_ENC_ERR_ILLEGAL_DEK_PARAM || status == OSSA_MPI_ERR_DEK_MANAGEMENT_DEK_UNWRAP_FAIL)
{
agEvent.eq = errorQualifier;
}
diff --git a/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c b/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c
index c24d03a178ee..cd1b80c3d712 100644
--- a/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c
+++ b/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c
@@ -318,13 +318,6 @@ int agtiapi_getdevlist( struct agtiapi_softc *pCard,
sizeof(void *) );
AGTIAPI_PRINTK("agtiapi_getdevlist: portCount %d\n", pCard->portCount);
devList = malloc(memNeeded1, TEMP2, M_WAITOK);
- if (devList == NULL)
- {
- AGTIAPI_PRINTK("agtiapi_getdevlist: failed to allocate memory\n");
- ret_val = IOCTL_CALL_FAIL;
- agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR;
- return ret_val;
- }
osti_memset(devList, 0, memNeeded1);
pPortalData = &pCard->pPortalData[0];
pDeviceHandleList = (bit8*)devList;
@@ -970,13 +963,8 @@ static int agtiapi_attach( device_t devx )
}
else
{
- pmsc->pPortalData = (ag_portal_data_t *)
- malloc( sizeof(ag_portal_data_t) * pmsc->portCount,
+ pmsc->pPortalData = malloc( sizeof(ag_portal_data_t) * pmsc->portCount,
M_PMC_MPRT, M_ZERO | M_WAITOK );
- if (pmsc->pPortalData == NULL)
- {
- AGTIAPI_PRINTK( "agtiapi_attach: Portal memory allocation ERROR\n" );
- }
}
pPortalData = pmsc->pPortalData;
@@ -1227,32 +1215,14 @@ STATIC agBOOLEAN agtiapi_InitCardHW( struct agtiapi_softc *pmsc )
pmsc->flags |= AGTIAPI_SYS_INTR_ON;
numVal = sizeof(ag_device_t) * pmsc->devDiscover;
- pmsc->pDevList =
- (ag_device_t *)malloc( numVal, M_PMC_MDVT, M_ZERO | M_WAITOK );
- if( !pmsc->pDevList ) {
- AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d DevList ERROR\n", numVal );
- panic( "agtiapi_InitCardHW\n" );
- return AGTIAPI_FAIL;
- }
+ pmsc->pDevList = malloc( numVal, M_PMC_MDVT, M_ZERO | M_WAITOK );
#ifdef LINUX_PERBI_SUPPORT
numVal = sizeof(ag_slr_map_t) * pmsc->devDiscover;
- pmsc->pSLRList =
- (ag_slr_map_t *)malloc( numVal, M_PMC_MSLR, M_ZERO | M_WAITOK );
- if( !pmsc->pSLRList ) {
- AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d SLRList ERROR\n", numVal );
- panic( "agtiapi_InitCardHW SLRL\n" );
- return AGTIAPI_FAIL;
- }
+ pmsc->pSLRList = malloc( numVal, M_PMC_MSLR, M_ZERO | M_WAITOK );
numVal = sizeof(ag_tgt_map_t) * pmsc->devDiscover;
- pmsc->pWWNList =
- (ag_tgt_map_t *)malloc( numVal, M_PMC_MTGT, M_ZERO | M_WAITOK );
- if( !pmsc->pWWNList ) {
- AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d WWNList ERROR\n", numVal );
- panic( "agtiapi_InitCardHW WWNL\n" );
- return AGTIAPI_FAIL;
- }
+ pmsc->pWWNList = malloc( numVal, M_PMC_MTGT, M_ZERO | M_WAITOK );
// Get the WWN_to_target_ID mappings from the
// holding area which contains the input of the
@@ -6478,7 +6448,6 @@ int agtiapi_ReleaseHBA( device_t dev )
pCard->pPortalData = NULL;
AGTIAPI_PRINTK("agtiapi_ReleaseHBA: PortalData released\n");
}
- //calls contigfree() or free()
agtiapi_MemFree(pCardInfo);
AGTIAPI_PRINTK("agtiapi_ReleaseHBA: low level resource released\n");
diff --git a/sys/dev/ppbus/if_plip.c b/sys/dev/ppbus/if_plip.c
index a9ef8dc016b7..16139139c0e7 100644
--- a/sys/dev/ppbus/if_plip.c
+++ b/sys/dev/ppbus/if_plip.c
@@ -214,9 +214,9 @@ lp_identify(driver_t *driver, device_t parent)
{
device_t dev;
- dev = device_find_child(parent, "plip", -1);
+ dev = device_find_child(parent, "plip", DEVICE_UNIT_ANY);
if (!dev)
- BUS_ADD_CHILD(parent, 0, "plip", -1);
+ BUS_ADD_CHILD(parent, 0, "plip", DEVICE_UNIT_ANY);
}
static int
@@ -249,10 +249,6 @@ lp_attach(device_t dev)
}
ifp = lp->sc_ifp = if_alloc(IFT_PARA);
- if (ifp == NULL) {
- return (ENOSPC);
- }
-
if_setsoftc(ifp, lp);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setmtu(ifp, LPMTU);
diff --git a/sys/dev/ppbus/lpbb.c b/sys/dev/ppbus/lpbb.c
index 560c7009b370..3d2253ed9378 100644
--- a/sys/dev/ppbus/lpbb.c
+++ b/sys/dev/ppbus/lpbb.c
@@ -61,9 +61,9 @@ lpbb_identify(driver_t *driver, device_t parent)
device_t dev;
- dev = device_find_child(parent, "lpbb", -1);
+ dev = device_find_child(parent, "lpbb", DEVICE_UNIT_ANY);
if (!dev)
- BUS_ADD_CHILD(parent, 0, "lpbb", -1);
+ BUS_ADD_CHILD(parent, 0, "lpbb", DEVICE_UNIT_ANY);
}
static int
@@ -85,7 +85,7 @@ lpbb_attach(device_t dev)
device_t bitbang;
/* add generic bit-banging code */
- bitbang = device_add_child(dev, "iicbb", -1);
+ bitbang = device_add_child(dev, "iicbb", DEVICE_UNIT_ANY);
device_probe_and_attach(bitbang);
return (0);
diff --git a/sys/dev/ppbus/lpt.c b/sys/dev/ppbus/lpt.c
index 644e0f08008b..401e94d25727 100644
--- a/sys/dev/ppbus/lpt.c
+++ b/sys/dev/ppbus/lpt.c
@@ -100,7 +100,7 @@ static int volatile lptflag = 1;
#define LPINITRDY 4 /* wait up to 4 seconds for a ready */
#define LPTOUTINITIAL 10 /* initial timeout to wait for ready 1/10 s */
#define LPTOUTMAX 1 /* maximal timeout 1 s */
-#define LPPRI (PZERO+8)
+#define LPPRI (PWAIT)
#define BUFSIZE 1024
#define BUFSTATSIZE 32
@@ -238,8 +238,7 @@ lpt_port_test(device_t ppbus, u_char data, u_char mask)
do {
DELAY(10);
temp = ppb_rdtr(ppbus) & mask;
- }
- while (temp != data && --timeout);
+ } while (temp != data && --timeout);
lprintf(("out=%x\tin=%x\ttout=%d\n", data, temp, timeout));
return (temp == data);
}
@@ -337,9 +336,9 @@ lpt_identify(driver_t *driver, device_t parent)
device_t dev;
- dev = device_find_child(parent, LPT_NAME, -1);
+ dev = device_find_child(parent, LPT_NAME, DEVICE_UNIT_ANY);
if (!dev)
- BUS_ADD_CHILD(parent, 0, LPT_NAME, -1);
+ BUS_ADD_CHILD(parent, 0, LPT_NAME, DEVICE_UNIT_ANY);
}
/*
@@ -560,9 +559,7 @@ lptopen(struct cdev *dev, int flags, int fmt, struct thread *td)
}
/* is printer online and ready for output */
- } while ((ppb_rstr(ppbus) &
- (LPS_SEL|LPS_OUT|LPS_NBSY|LPS_NERR)) !=
- (LPS_SEL|LPS_NBSY|LPS_NERR));
+ } while ((ppb_rstr(ppbus) & RDY_MASK) != LP_READY);
sc->sc_control = LPC_SEL|LPC_NINIT;
if (sc->sc_flags & LP_AUTOLF)
@@ -619,9 +616,7 @@ lptclose(struct cdev *dev, int flags, int fmt, struct thread *td)
/* if the last write was interrupted, don't complete it */
if ((!(sc->sc_state & INTERRUPTED)) && (sc->sc_irq & LP_USE_IRQ))
- while ((ppb_rstr(ppbus) &
- (LPS_SEL|LPS_OUT|LPS_NBSY|LPS_NERR)) !=
- (LPS_SEL|LPS_NBSY|LPS_NERR) || sc->sc_xfercnt)
+ while ((ppb_rstr(ppbus) & RDY_MASK) != LP_READY || sc->sc_xfercnt)
/* wait 1 second, give up if we get a signal */
if (ppb_sleep(ppbus, lptdev, LPPRI | PCATCH, "lpclose",
hz) != EWOULDBLOCK)
diff --git a/sys/dev/ppbus/pcfclock.c b/sys/dev/ppbus/pcfclock.c
index e0d2e71b49e5..47a1a010a311 100644
--- a/sys/dev/ppbus/pcfclock.c
+++ b/sys/dev/ppbus/pcfclock.c
@@ -111,9 +111,9 @@ pcfclock_identify(driver_t *driver, device_t parent)
device_t dev;
- dev = device_find_child(parent, PCFCLOCK_NAME, -1);
+ dev = device_find_child(parent, PCFCLOCK_NAME, DEVICE_UNIT_ANY);
if (!dev)
- BUS_ADD_CHILD(parent, 0, PCFCLOCK_NAME, -1);
+ BUS_ADD_CHILD(parent, 0, PCFCLOCK_NAME, DEVICE_UNIT_ANY);
}
static int
diff --git a/sys/dev/ppbus/ppb_msq.c b/sys/dev/ppbus/ppb_msq.c
index 1e2cb17640a2..d43f30374170 100644
--- a/sys/dev/ppbus/ppb_msq.c
+++ b/sys/dev/ppbus/ppb_msq.c
@@ -29,7 +29,7 @@
*/
#include <sys/cdefs.h>
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include <sys/param.h>
#include <sys/lock.h>
diff --git a/sys/dev/ppbus/ppbconf.c b/sys/dev/ppbus/ppbconf.c
index 469b2ecd045f..2c3f17318747 100644
--- a/sys/dev/ppbus/ppbconf.c
+++ b/sys/dev/ppbus/ppbconf.c
@@ -404,7 +404,7 @@ ppbus_attach(device_t dev)
}
/* Locate our children */
- bus_generic_probe(dev);
+ bus_identify_children(dev);
#ifndef DONTPROBE_1284
/* detect IEEE1284 compliant devices */
@@ -414,22 +414,7 @@ ppbus_attach(device_t dev)
#endif /* !DONTPROBE_1284 */
/* launch attachment of the added children */
- bus_generic_attach(dev);
-
- return (0);
-}
-
-static int
-ppbus_detach(device_t dev)
-{
- int error;
-
- error = bus_generic_detach(dev);
- if (error)
- return (error);
-
- /* detach & delete all children */
- device_delete_children(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -578,7 +563,7 @@ static device_method_t ppbus_methods[] = {
/* device interface */
DEVMETHOD(device_probe, ppbus_probe),
DEVMETHOD(device_attach, ppbus_attach),
- DEVMETHOD(device_detach, ppbus_detach),
+ DEVMETHOD(device_detach, bus_generic_detach),
/* bus interface */
DEVMETHOD(bus_add_child, ppbus_add_child),
diff --git a/sys/dev/ppbus/ppbconf.h b/sys/dev/ppbus/ppbconf.h
index 0f6395121fba..66f45634ea6d 100644
--- a/sys/dev/ppbus/ppbconf.h
+++ b/sys/dev/ppbus/ppbconf.h
@@ -63,7 +63,7 @@
/*
* Parallel Port Bus sleep/wakeup queue.
*/
-#define PPBPRI (PZERO+8)
+#define PPBPRI (PWAIT)
/*
* Parallel Port Chipset mode masks.
diff --git a/sys/dev/ppbus/ppi.c b/sys/dev/ppbus/ppi.c
index 3789da6fbed5..3fd5f43a4a3a 100644
--- a/sys/dev/ppbus/ppi.c
+++ b/sys/dev/ppbus/ppi.c
@@ -135,9 +135,9 @@ ppi_identify(driver_t *driver, device_t parent)
device_t dev;
- dev = device_find_child(parent, "ppi", -1);
+ dev = device_find_child(parent, "ppi", DEVICE_UNIT_ANY);
if (!dev)
- BUS_ADD_CHILD(parent, 0, "ppi", -1);
+ BUS_ADD_CHILD(parent, 0, "ppi", DEVICE_UNIT_ANY);
}
/*
diff --git a/sys/dev/ppbus/pps.c b/sys/dev/ppbus/pps.c
index a5c0a56d4f94..80581e3beae7 100644
--- a/sys/dev/ppbus/pps.c
+++ b/sys/dev/ppbus/pps.c
@@ -76,9 +76,9 @@ ppsidentify(driver_t *driver, device_t parent)
device_t dev;
- dev = device_find_child(parent, PPS_NAME, -1);
+ dev = device_find_child(parent, PPS_NAME, DEVICE_UNIT_ANY);
if (!dev)
- BUS_ADD_CHILD(parent, 0, PPS_NAME, -1);
+ BUS_ADD_CHILD(parent, 0, PPS_NAME, DEVICE_UNIT_ANY);
}
static int
diff --git a/sys/dev/ppc/ppc.c b/sys/dev/ppc/ppc.c
index e5ef392c4063..de75f4747709 100644
--- a/sys/dev/ppc/ppc.c
+++ b/sys/dev/ppc/ppc.c
@@ -1389,7 +1389,7 @@ ppc_exec_microseq(device_t dev, struct ppb_microseq **p_msq)
/* let's suppose the next instr. is the same */
prefetch:
- for (;mi->opcode == MS_OP_RASSERT; INCR_PC)
+ for (; mi->opcode == MS_OP_RASSERT; INCR_PC)
w_reg(mi->arg[0].i, ppc, (char)mi->arg[1].i);
if (mi->opcode == MS_OP_DELAY) {
@@ -1801,7 +1801,7 @@ ppc_attach(device_t dev)
}
/* add ppbus as a child of this isa to parallel bridge */
- ppc->ppbus = device_add_child(dev, "ppbus", -1);
+ ppc->ppbus = device_add_child(dev, "ppbus", DEVICE_UNIT_ANY);
/*
* Probe the ppbus and attach devices found.
@@ -1815,13 +1815,16 @@ int
ppc_detach(device_t dev)
{
struct ppc_data *ppc = DEVTOSOFTC(dev);
+ int error;
if (ppc->res_irq == 0) {
return (ENXIO);
}
/* detach & delete all children */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
if (ppc->res_irq != 0) {
bus_teardown_intr(dev, ppc->res_irq, ppc->intr_cookie);
diff --git a/sys/dev/proto/proto_bus_isa.c b/sys/dev/proto/proto_bus_isa.c
index 0e4f2a29b429..39ad126bf7b7 100644
--- a/sys/dev/proto/proto_bus_isa.c
+++ b/sys/dev/proto/proto_bus_isa.c
@@ -32,7 +32,6 @@
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
-#include <sys/sbuf.h>
#include <isa/isavar.h>
#include <isa/pnpvar.h>
@@ -62,7 +61,6 @@ static char **proto_isa_devnames;
static int
proto_isa_probe(device_t dev)
{
- struct sbuf *sb;
struct resource *res;
int rid, type;
@@ -76,11 +74,7 @@ proto_isa_probe(device_t dev)
if (res == NULL)
return (ENODEV);
- sb = sbuf_new_auto();
- sbuf_printf(sb, "%s:%#jx", proto_isa_prefix, rman_get_start(res));
- sbuf_finish(sb);
- device_set_desc_copy(dev, sbuf_data(sb));
- sbuf_delete(sb);
+ device_set_descf(dev, "%s:%#jx", proto_isa_prefix, rman_get_start(res));
bus_release_resource(dev, type, rid, res);
return (proto_probe(dev, proto_isa_prefix, &proto_isa_devnames));
}
diff --git a/sys/dev/proto/proto_bus_pci.c b/sys/dev/proto/proto_bus_pci.c
index 9a98443845f5..ebcfd6d4711c 100644
--- a/sys/dev/proto/proto_bus_pci.c
+++ b/sys/dev/proto/proto_bus_pci.c
@@ -32,7 +32,6 @@
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
-#include <sys/sbuf.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
@@ -62,17 +61,12 @@ static char **proto_pci_devnames;
static int
proto_pci_probe(device_t dev)
{
- struct sbuf *sb;
-
if ((pci_read_config(dev, PCIR_HDRTYPE, 1) & PCIM_HDRTYPE) != 0)
return (ENXIO);
- sb = sbuf_new_auto();
- sbuf_printf(sb, "%s%d:%d:%d:%d", proto_pci_prefix, pci_get_domain(dev),
- pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev));
- sbuf_finish(sb);
- device_set_desc_copy(dev, sbuf_data(sb));
- sbuf_delete(sb);
+ device_set_descf(dev, "%s%d:%d:%d:%d", proto_pci_prefix,
+ pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
+ pci_get_function(dev));
return (proto_probe(dev, proto_pci_prefix, &proto_pci_devnames));
}
diff --git a/sys/dev/proto/proto_core.c b/sys/dev/proto/proto_core.c
index 2a7fe3bf9f33..88963f9154b6 100644
--- a/sys/dev/proto/proto_core.c
+++ b/sys/dev/proto/proto_core.c
@@ -38,11 +38,11 @@
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/reboot.h>
+#include <sys/stdarg.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <sys/uio.h>
#include <machine/resource.h>
-#include <machine/stdarg.h>
#include <dev/pci/pcivar.h>
diff --git a/sys/dev/psci/psci.c b/sys/dev/psci/psci.c
index e1e9c1880b54..497b23d2d4c3 100644
--- a/sys/dev/psci/psci.c
+++ b/sys/dev/psci/psci.c
@@ -71,6 +71,7 @@
struct psci_softc {
device_t dev;
+ device_t smccc_dev;
uint32_t psci_version;
uint32_t psci_fnids[PSCI_FN_MAX];
@@ -341,11 +342,16 @@ psci_attach(device_t dev, psci_initfn_t psci_init, int default_version)
if (psci_init(dev, default_version))
return (ENXIO);
+ psci_softc = sc;
+
#ifdef __aarch64__
smccc_init();
-#endif
+ sc->smccc_dev = device_add_child(dev, "smccc", DEVICE_UNIT_ANY);
+ if (sc->smccc_dev == NULL)
+ device_printf(dev, "Unable to add SMCCC device\n");
- psci_softc = sc;
+ bus_attach_children(dev);
+#endif
return (0);
}
@@ -378,12 +384,18 @@ psci_fdt_callfn(psci_callfn_t *callfn)
{
phandle_t node;
- node = ofw_bus_find_compatible(OF_peer(0), "arm,psci-0.2");
- if (node == 0) {
- node = ofw_bus_find_compatible(OF_peer(0), "arm,psci-1.0");
- if (node == 0)
- return (PSCI_MISSING);
+ /* XXX: This is suboptimal, we should walk the tree & check each
+ * node against compat_data, but we only have a few entries so
+ * it's ok for now.
+ */
+ for (int i = 0; compat_data[i].ocd_str != NULL; i++) {
+ node = ofw_bus_find_compatible(OF_peer(0),
+ compat_data[i].ocd_str);
+ if (node != 0)
+ break;
}
+ if (node == 0)
+ return (PSCI_MISSING);
if (!ofw_bus_node_status_okay(node))
return (PSCI_MISSING);
diff --git a/sys/dev/psci/smccc.c b/sys/dev/psci/smccc.c
index 08ad6d84fc3c..e40a60336d98 100644
--- a/sys/dev/psci/smccc.c
+++ b/sys/dev/psci/smccc.c
@@ -34,7 +34,9 @@
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/bus.h>
#include <sys/kernel.h>
+#include <sys/module.h>
#include <dev/psci/psci.h>
#include <dev/psci/smccc.h>
@@ -66,6 +68,33 @@ smccc_init(void)
}
}
+static int
+smccc_probe(device_t dev)
+{
+ int32_t version;
+
+ /*
+ * If the version is not implemented then we treat it as SMCCC 1.0
+ */
+ if (psci_features(SMCCC_VERSION) == PSCI_RETVAL_NOT_SUPPORTED ||
+ (version = arm_smccc_invoke(SMCCC_VERSION, NULL)) <= 0) {
+ device_set_desc(dev, "ARM SMCCC v1.0");
+ return (0);
+ }
+
+ device_set_descf(dev, "ARM SMCCC v%d.%d", SMCCC_VERSION_MAJOR(version),
+ SMCCC_VERSION_MINOR(version));
+
+ return (0);
+}
+
+static int
+smccc_attach(device_t dev)
+{
+ bus_attach_children(dev);
+ return (0);
+}
+
uint32_t
smccc_get_version(void)
{
@@ -79,9 +108,9 @@ smccc_arch_features(uint32_t smccc_func_id)
MPASS(smccc_version != 0);
if (smccc_version == SMCCC_VERSION_1_0)
- return (PSCI_RETVAL_NOT_SUPPORTED);
+ return (SMCCC_RET_NOT_SUPPORTED);
- return (psci_call(SMCCC_ARCH_FEATURES, smccc_func_id, 0, 0));
+ return (arm_smccc_invoke(SMCCC_ARCH_FEATURES, smccc_func_id, NULL));
}
/*
@@ -95,7 +124,7 @@ smccc_arch_workaround_1(void)
MPASS(smccc_version != 0);
KASSERT(smccc_version != SMCCC_VERSION_1_0,
("SMCCC arch workaround 1 called with an invalid SMCCC interface"));
- return (psci_call(SMCCC_ARCH_WORKAROUND_1, 0, 0, 0));
+ return (arm_smccc_invoke(SMCCC_ARCH_WORKAROUND_1, NULL));
}
int
@@ -105,5 +134,23 @@ smccc_arch_workaround_2(int enable)
MPASS(smccc_version != 0);
KASSERT(smccc_version != SMCCC_VERSION_1_0,
("SMCCC arch workaround 2 called with an invalid SMCCC interface"));
- return (psci_call(SMCCC_ARCH_WORKAROUND_2, enable, 0, 0));
+ return (arm_smccc_invoke(SMCCC_ARCH_WORKAROUND_2, enable, NULL));
}
+
+static device_method_t smccc_methods[] = {
+ DEVMETHOD(device_probe, smccc_probe),
+ DEVMETHOD(device_attach, smccc_attach),
+
+ DEVMETHOD(bus_add_child, bus_generic_add_child),
+
+ DEVMETHOD_END
+};
+
+static driver_t smccc_driver = {
+ "smccc",
+ smccc_methods,
+ 0,
+};
+
+EARLY_DRIVER_MODULE(smccc, psci, smccc_driver, 0, 0,
+ BUS_PASS_CPU + BUS_PASS_ORDER_FIRST);
diff --git a/sys/dev/psci/smccc.h b/sys/dev/psci/smccc.h
index 96527f037d78..b9e878d7c8ea 100644
--- a/sys/dev/psci/smccc.h
+++ b/sys/dev/psci/smccc.h
@@ -32,6 +32,7 @@
#ifndef _PSCI_SMCCC_H_
#define _PSCI_SMCCC_H_
+#define SMCCC_MAKE_VERSION(maj, min) ((maj) << 16 | (min))
#define SMCCC_VERSION_MAJOR(ver) (((ver) >> 16) & 0x7fff)
#define SMCCC_VERSION_MINOR(ver) ((ver) & 0xffff)
@@ -91,6 +92,38 @@ int arm_smccc_smc(register_t, register_t, register_t, register_t, register_t,
int arm_smccc_hvc(register_t, register_t, register_t, register_t, register_t,
register_t, register_t, register_t, struct arm_smccc_res *res);
+#define arm_smccc_invoke_1(func, a0, res) \
+ func(a0, 0, 0, 0, 0, 0, 0, 0, res)
+#define arm_smccc_invoke_2(func, a0, a1, res) \
+ func(a0, a1, 0, 0, 0, 0, 0, 0, res)
+#define arm_smccc_invoke_3(func, a0, a1, a2, res) \
+ func(a0, a1, a2, 0, 0, 0, 0, 0, res)
+#define arm_smccc_invoke_4(func, a0, a1, a2, a3, res) \
+ func(a0, a1, a2, a3, 0, 0, 0, 0, res)
+#define arm_smccc_invoke_5(func, a0, a1, a2, a3, a4, res) \
+ func(a0, a1, a2, a3, a4, 0, 0, 0, res)
+#define arm_smccc_invoke_6(func, a0, a1, a2, a3, a4, a5, res) \
+ func(a0, a1, a2, a3, a4, a5, 0, 0, res)
+#define arm_smccc_invoke_7(func, a0, a1, a2, a3, a4, a5, a6, res) \
+ func(a0, a1, a2, a3, a4, a5, a6, 0, res)
+#define arm_smccc_invoke_8(func, a0, a1, a2, a3, a4, a5, a6, a7, res) \
+ func(a0, a1, a2, a3, a4, a5, a6, a7, res)
+
+#define _arm_smccc_invoke_macro(_1, _2, _3, _4, _5, _6, _7, _8, NAME, ...) \
+ NAME
+#define _arm_smccc_invoke(func, a0, ...) \
+ _arm_smccc_invoke_macro(__VA_ARGS__, arm_smccc_invoke_8, \
+ arm_smccc_invoke_7, arm_smccc_invoke_6, arm_smccc_invoke_5, \
+ arm_smccc_invoke_4, arm_smccc_invoke_3, arm_smccc_invoke_2, \
+ arm_smccc_invoke_1)(func, a0, __VA_ARGS__)
+
+#define arm_smccc_invoke_hvc(a0, ...) \
+ _arm_smccc_invoke(arm_smccc_hvc, a0, __VA_ARGS__)
+#define arm_smccc_invoke_smc(a0, ...) \
+ _arm_smccc_invoke(arm_smccc_smc, a0, __VA_ARGS__)
+#define arm_smccc_invoke(a0, ...) \
+ _arm_smccc_invoke(psci_callfn, a0, __VA_ARGS__)
+
struct arm_smccc_1_2_regs {
register_t a0;
register_t a1;
diff --git a/sys/dev/psci/smccc_arm64.S b/sys/dev/psci/smccc_arm64.S
index 3d3c9fc837b1..2a3c09ec26b2 100644
--- a/sys/dev/psci/smccc_arm64.S
+++ b/sys/dev/psci/smccc_arm64.S
@@ -30,7 +30,10 @@
* SUCH DAMAGE.
*/
+#include <sys/elf_common.h>
+
#include <machine/asm.h>
+
.macro arm_smccc_1_0 insn
ENTRY(arm_smccc_\insn)
\insn #0
@@ -77,10 +80,12 @@ ENTRY(arm_smccc_1_2_\insn)
stp x16, x17, [x19, #16 * 8]
1: ldp xzr, x19, [sp], #16
ret
-END(arm_smccc_1_2\insn)
+END(arm_smccc_1_2_\insn)
.endm
/* int arm_smccc_1_2_*(const struct arm_smccc_1_2_regs *args,
* struct arm_smccc_1_2_regs *res)
*/
arm_smccc_1_2 hvc
arm_smccc_1_2 smc
+
+GNU_PROPERTY_AARCH64_FEATURE_1_NOTE(GNU_PROPERTY_AARCH64_FEATURE_1_VAL)
diff --git a/sys/dev/psci/smccc_errata.c b/sys/dev/psci/smccc_errata.c
new file mode 100644
index 000000000000..ebfc0f8b67ee
--- /dev/null
+++ b/sys/dev/psci/smccc_errata.c
@@ -0,0 +1,139 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Arm Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * A driver for the Arm Errata Management Firmware Interface (Errata ABI).
+ * This queries into the SMCCC firmware for the status of errata using the
+ * interface documented in den0100 [1].
+ *
+ * [1] https://developer.arm.com/documentation/den0100/latest
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/eventhandler.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/random.h>
+
+#include <dev/psci/psci.h>
+#include <dev/psci/smccc.h>
+
+#include <machine/cpu_feat.h>
+
+#define ERRATA_HIGHER_EL_MITIGATION 3
+#define ERRATA_NOT_AFFECTED 2
+#define ERRATA_AFFECTED 1
+
+#define EM_VERSION SMCCC_FUNC_ID(SMCCC_FAST_CALL, \
+ SMCCC_32BIT_CALL, SMCCC_STD_SECURE_SERVICE_CALLS, 0xf0u)
+#define EM_VERSION_MIN 0x10000L
+#define EM_FEATURES SMCCC_FUNC_ID(SMCCC_FAST_CALL, \
+ SMCCC_32BIT_CALL, SMCCC_STD_SECURE_SERVICE_CALLS, 0xf1u)
+#define EM_CPU_ERRATUM_FEATURES SMCCC_FUNC_ID(SMCCC_FAST_CALL, \
+ SMCCC_32BIT_CALL, SMCCC_STD_SECURE_SERVICE_CALLS, 0xf2u)
+
+static device_identify_t errata_identify;
+static device_probe_t errata_probe;
+static device_attach_t errata_attach;
+static cpu_feat_errata errata_cpu_feat_errata_check(const struct cpu_feat *,
+ u_int);
+
+static void
+errata_identify(driver_t *driver, device_t parent)
+{
+ int32_t version;
+
+ /* Check if Errata ABI is supported */
+ if (smccc_arch_features(EM_VERSION) != SMCCC_RET_SUCCESS)
+ return;
+
+ /* Check we have Errata 1.0 or later */
+ version = psci_call(EM_VERSION, 0, 0, 0);
+ if (version < EM_VERSION_MIN)
+ return;
+
+ if (BUS_ADD_CHILD(parent, 0, "errata", DEVICE_UNIT_ANY) == NULL)
+ device_printf(parent, "add errata child failed\n");
+}
+
+static int
+errata_probe(device_t dev)
+{
+ device_set_desc(dev, "Arm SMCCC Errata Management");
+ return (BUS_PROBE_NOWILDCARD);
+}
+
+static int
+errata_attach(device_t dev)
+{
+ /* Check for EM_CPU_ERRATUM_FEATURES. It's mandatory, so should exist */
+ if (arm_smccc_invoke(EM_FEATURES, EM_CPU_ERRATUM_FEATURES, NULL) < 0) {
+ device_printf(dev,
+ "EM_CPU_ERRATUM_FEATURES is not implemented\n");
+ return (ENXIO);
+ }
+
+ cpu_feat_register_errata_check(errata_cpu_feat_errata_check);
+
+ return (0);
+}
+
+static cpu_feat_errata
+errata_cpu_feat_errata_check(const struct cpu_feat *feat __unused, u_int errata_id)
+{
+ struct arm_smccc_res res;
+
+ switch(arm_smccc_invoke(EM_CPU_ERRATUM_FEATURES, errata_id, 0, &res)) {
+ default:
+ return (ERRATA_UNKNOWN);
+ case ERRATA_NOT_AFFECTED:
+ return (ERRATA_NONE);
+ case ERRATA_AFFECTED:
+ return (ERRATA_AFFECTED);
+ case ERRATA_HIGHER_EL_MITIGATION:
+ return (ERRATA_FW_MITIGAION);
+ }
+}
+
+static device_method_t errata_methods[] = {
+ DEVMETHOD(device_identify, errata_identify),
+ DEVMETHOD(device_probe, errata_probe),
+ DEVMETHOD(device_attach, errata_attach),
+
+ DEVMETHOD_END
+};
+
+static driver_t errata_driver = {
+ "errata",
+ errata_methods,
+ 0
+};
+
+DRIVER_MODULE(errata, smccc, errata_driver, 0, 0);
diff --git a/sys/dev/psci/smccc_trng.c b/sys/dev/psci/smccc_trng.c
new file mode 100644
index 000000000000..8a2e5508ef48
--- /dev/null
+++ b/sys/dev/psci/smccc_trng.c
@@ -0,0 +1,143 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024 Arm Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * A driver for the Arm True Random Number Generator Firmware Interface.
+ * This queries into the SMCCC firmware for random numbers using the
+ * interface documented in den0098 [1].
+ *
+ * [1] https://developer.arm.com/documentation/den0098/latest
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/random.h>
+
+#include <dev/psci/psci.h>
+#include <dev/psci/smccc.h>
+
+#include <dev/random/randomdev.h>
+
+#define TRNG_VERSION SMCCC_FUNC_ID(SMCCC_FAST_CALL, \
+ SMCCC_32BIT_CALL, SMCCC_STD_SECURE_SERVICE_CALLS, 0x50)
+#define TRNG_VERSION_MIN 0x10000L
+#define TRNG_RND64 SMCCC_FUNC_ID(SMCCC_FAST_CALL, \
+ SMCCC_64BIT_CALL, SMCCC_STD_SECURE_SERVICE_CALLS, 0x53)
+
+static device_identify_t trng_identify;
+static device_probe_t trng_probe;
+static device_attach_t trng_attach;
+
+static unsigned trng_read(void *, unsigned);
+
+static const struct random_source random_trng = {
+ .rs_ident = "Arm SMCCC TRNG",
+ .rs_source = RANDOM_PURE_ARM_TRNG,
+ .rs_read = trng_read,
+};
+
+static void
+trng_identify(driver_t *driver, device_t parent)
+{
+ int32_t version;
+
+ /* Check if TRNG is supported */
+ if (smccc_arch_features(TRNG_VERSION) != SMCCC_RET_SUCCESS)
+ return;
+
+ /* Check we have TRNG 1.0 or later */
+ version = psci_call(TRNG_VERSION, 0, 0, 0);
+ if (version < TRNG_VERSION_MIN)
+ return;
+
+ if (BUS_ADD_CHILD(parent, 0, "trng", DEVICE_UNIT_ANY) == NULL)
+ device_printf(parent, "add TRNG child failed\n");
+}
+
+static int
+trng_probe(device_t dev)
+{
+ device_set_desc(dev, "Arm SMCCC TRNG");
+ return (BUS_PROBE_NOWILDCARD);
+}
+
+static int
+trng_attach(device_t dev)
+{
+ struct arm_smccc_res res;
+ int32_t ret;
+
+ ret = arm_smccc_invoke(TRNG_RND64, 192, &res);
+ if (ret < 0) {
+ device_printf(dev, "Failed to read fron TRNG\n");
+ } else {
+ random_source_register(&random_trng);
+ }
+
+ return (0);
+}
+
+static unsigned
+trng_read(void *buf, unsigned usz)
+{
+ struct arm_smccc_res res;
+ register_t len;
+ int32_t ret;
+
+ len = usz;
+ if (len > sizeof(uint64_t))
+ len = sizeof(uint64_t);
+ if (len == 0)
+ return (0);
+
+ ret = arm_smccc_invoke(TRNG_RND64, len * 8, &res);
+ if (ret < 0)
+ return (0);
+
+ memcpy(buf, &res.a3, len);
+ return (len);
+}
+
+static device_method_t trng_methods[] = {
+ DEVMETHOD(device_identify, trng_identify),
+ DEVMETHOD(device_probe, trng_probe),
+ DEVMETHOD(device_attach, trng_attach),
+
+ DEVMETHOD_END
+};
+
+static driver_t trng_driver = {
+ "trng",
+ trng_methods,
+ 0
+};
+
+DRIVER_MODULE(trng, smccc, trng_driver, 0, 0);
diff --git a/sys/dev/pst/pst-iop.c b/sys/dev/pst/pst-iop.c
index 03eca4d96286..2afb989e8590 100644
--- a/sys/dev/pst/pst-iop.c
+++ b/sys/dev/pst/pst-iop.c
@@ -37,9 +37,9 @@
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/stdarg.h>
#include <vm/vm.h>
#include <vm/pmap.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
@@ -136,7 +136,7 @@ iop_attach(void *arg)
ident->vendor, ident->product);
printf("pstiop: description=<%.16s> revision=<%.8s>\n",
ident->description, ident->revision);
- contigfree(reply, PAGE_SIZE, M_PSTIOP);
+ free(reply, M_PSTIOP);
}
#endif
@@ -317,18 +317,18 @@ iop_get_lct(struct iop_softc *sc)
msg->sgl.phys_addr[0] = vtophys(reply);
if (iop_queue_wait_msg(sc, mfa, (struct i2o_basic_message *)msg)) {
- contigfree(reply, ALLOCSIZE, M_PSTIOP);
+ free(reply, M_PSTIOP);
return 0;
}
if (!(sc->lct = malloc(reply->table_size * sizeof(struct i2o_lct_entry),
M_PSTIOP, M_NOWAIT | M_ZERO))) {
- contigfree(reply, ALLOCSIZE, M_PSTIOP);
+ free(reply, M_PSTIOP);
return 0;
}
bcopy(&reply->entry[0], sc->lct,
reply->table_size * sizeof(struct i2o_lct_entry));
sc->lct_count = reply->table_size;
- contigfree(reply, ALLOCSIZE, M_PSTIOP);
+ free(reply, M_PSTIOP);
return 1;
}
@@ -374,10 +374,10 @@ iop_get_util_params(struct iop_softc *sc, int target, int operation, int group)
if (iop_queue_wait_msg(sc, mfa, (struct i2o_basic_message *)msg) ||
reply->error_info_size) {
- contigfree(reply, PAGE_SIZE, M_PSTIOP);
+ free(reply, M_PSTIOP);
reply = NULL;
}
- contigfree(param, PAGE_SIZE, M_PSTIOP);
+ free(param, M_PSTIOP);
return reply;
}
diff --git a/sys/dev/pst/pst-pci.c b/sys/dev/pst/pst-pci.c
index 2b9295984845..3c1f740cf2f8 100644
--- a/sys/dev/pst/pst-pci.c
+++ b/sys/dev/pst/pst-pci.c
@@ -37,9 +37,9 @@
#include <sys/malloc.h>
#include <sys/lock.h>
#include <sys/mutex.h>
+#include <sys/stdarg.h>
#include <vm/vm.h>
#include <vm/pmap.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
@@ -94,7 +94,8 @@ iop_pci_attach(device_t dev)
if (!iop_init(sc))
return 0;
- return bus_generic_attach(dev);
+ bus_attach_children(dev);
+ return (0);
}
static int
diff --git a/sys/dev/pst/pst-raid.c b/sys/dev/pst/pst-raid.c
index de152f611e3a..3e98ed9eb1d2 100644
--- a/sys/dev/pst/pst-raid.c
+++ b/sys/dev/pst/pst-raid.c
@@ -41,11 +41,11 @@
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/rman.h>
+#include <sys/stdarg.h>
#include <vm/vm.h>
#include <vm/pmap.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
@@ -88,7 +88,7 @@ int
pst_add_raid(struct iop_softc *sc, struct i2o_lct_entry *lct)
{
struct pst_softc *psc;
- device_t child = device_add_child(sc->dev, "pst", -1);
+ device_t child = device_add_child(sc->dev, "pst", DEVICE_UNIT_ANY);
if (!child)
return ENOMEM;
@@ -126,11 +126,11 @@ pst_attach(device_t dev)
if (!(psc->info = (struct i2o_bsa_device *)
malloc(sizeof(struct i2o_bsa_device), M_PSTRAID, M_NOWAIT))) {
- contigfree(reply, PAGE_SIZE, M_PSTIOP);
+ free(reply, M_PSTIOP);
return ENOMEM;
}
bcopy(reply->result, psc->info, sizeof(struct i2o_bsa_device));
- contigfree(reply, PAGE_SIZE, M_PSTIOP);
+ free(reply, M_PSTIOP);
if (!(reply = iop_get_util_params(psc->iop, psc->lct->local_tid,
I2O_PARAMS_OPERATION_FIELD_GET,
@@ -148,7 +148,7 @@ pst_attach(device_t dev)
bpack(ident->vendor, ident->vendor, 16);
bpack(ident->product, ident->product, 16);
sprintf(name, "%s %s", ident->vendor, ident->product);
- contigfree(reply, PAGE_SIZE, M_PSTIOP);
+ free(reply, M_PSTIOP);
bioq_init(&psc->queue);
diff --git a/sys/dev/puc/puc.c b/sys/dev/puc/puc.c
index a37016c80226..d55fdf63e70b 100644
--- a/sys/dev/puc/puc.c
+++ b/sys/dev/puc/puc.c
@@ -315,7 +315,7 @@ puc_bfe_attach(device_t dev)
goto fail;
port->p_rclk = res;
- port->p_dev = device_add_child(dev, NULL, -1);
+ port->p_dev = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (port->p_dev != NULL)
device_set_ivars(port->p_dev, (void *)port);
}
@@ -373,10 +373,9 @@ puc_bfe_attach(device_t dev)
return (0);
fail:
+ device_delete_children(dev);
for (idx = 0; idx < sc->sc_nports; idx++) {
port = &sc->sc_port[idx];
- if (port->p_dev != NULL)
- device_delete_child(dev, port->p_dev);
if (port->p_rres != NULL)
rman_release_resource(port->p_rres);
if (port->p_ires != NULL)
@@ -409,21 +408,19 @@ puc_bfe_detach(device_t dev)
sc = device_get_softc(dev);
/* Detach our children. */
- error = 0;
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
+
for (idx = 0; idx < sc->sc_nports; idx++) {
port = &sc->sc_port[idx];
if (port->p_dev == NULL)
continue;
- if (device_delete_child(dev, port->p_dev) == 0) {
- if (port->p_rres != NULL)
- rman_release_resource(port->p_rres);
- if (port->p_ires != NULL)
- rman_release_resource(port->p_ires);
- } else
- error = ENXIO;
+ if (port->p_rres != NULL)
+ rman_release_resource(port->p_rres);
+ if (port->p_ires != NULL)
+ rman_release_resource(port->p_ires);
}
- if (error)
- return (error);
if (sc->sc_serdevs != 0UL)
bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie);
diff --git a/sys/dev/puc/pucdata.c b/sys/dev/puc/pucdata.c
index f127e27e7b08..436af76001da 100644
--- a/sys/dev/puc/pucdata.c
+++ b/sys/dev/puc/pucdata.c
@@ -64,6 +64,7 @@ static puc_config_f puc_config_quatech;
static puc_config_f puc_config_syba;
static puc_config_f puc_config_siig;
static puc_config_f puc_config_sunix;
+static puc_config_f puc_config_systembase;
static puc_config_f puc_config_timedia;
static puc_config_f puc_config_titan;
@@ -493,6 +494,428 @@ const struct puc_cfg puc_pci_devices[] = {
.config_function = puc_config_siig
},
+ { 0x135a, 0x0841, 0xffff, 0,
+ "Brainboxes UC-268",
+ DEFAULT_RCLK,
+ PUC_PORT_4S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0861, 0xffff, 0,
+ "Brainboxes UC-257",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0862, 0xffff, 0,
+ "Brainboxes UC-257",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0863, 0xffff, 0,
+ "Brainboxes UC-257",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0881, 0xffff, 0,
+ "Brainboxes UC-279",
+ DEFAULT_RCLK,
+ PUC_PORT_8S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x08a1, 0xffff, 0,
+ "Brainboxes UC-313",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x08a2, 0xffff, 0,
+ "Brainboxes UC-313",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x08a3, 0xffff, 0,
+ "Brainboxes UC-313",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x08c1, 0xffff, 0,
+ "Brainboxes UC-310",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x08e1, 0xffff, 0,
+ "Brainboxes UC-302",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x08e2, 0xffff, 0,
+ "Brainboxes UC-302",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x08e3, 0xffff, 0,
+ "Brainboxes UC-302",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0901, 0xffff, 0,
+ "Brainboxes UC-431",
+ DEFAULT_RCLK,
+ PUC_PORT_3S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0921, 0xffff, 0,
+ "Brainboxes UC-420",
+ DEFAULT_RCLK,
+ PUC_PORT_4S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0981, 0xffff, 0,
+ "Brainboxes UC-475",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0982, 0xffff, 0,
+ "Brainboxes UC-475",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x09a1, 0xffff, 0,
+ "Brainboxes UC-607",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x09a2, 0xffff, 0,
+ "Brainboxes UC-607",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x09a3, 0xffff, 0,
+ "Brainboxes UC-607",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0a81, 0xffff, 0,
+ "Brainboxes UC-357",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0a82, 0xffff, 0,
+ "Brainboxes UC-357",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0a83, 0xffff, 0,
+ "Brainboxes UC-357",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0ac1, 0xffff, 0,
+ "Brainboxes UP-189",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0ac2, 0xffff, 0,
+ "Brainboxes UP-189",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0ac3, 0xffff, 0,
+ "Brainboxes UP-189",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0b01, 0xffff, 0,
+ "Brainboxes UC-346",
+ DEFAULT_RCLK,
+ PUC_PORT_4S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0b02, 0xffff, 0,
+ "Brainboxes UC-346",
+ DEFAULT_RCLK,
+ PUC_PORT_4S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0b21, 0xffff, 0,
+ "Brainboxes UP-200",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0b22, 0xffff, 0,
+ "Brainboxes UP-200",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0b23, 0xffff, 0,
+ "Brainboxes UP-200",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0ba1, 0xffff, 0,
+ "Brainboxes UC-101",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0bc1, 0xffff, 0,
+ "Brainboxes UC-203",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0bc2, 0xffff, 0,
+ "Brainboxes UC-203",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0c01, 0xffff, 0,
+ "Brainboxes UP-869",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0c02, 0xffff, 0,
+ "Brainboxes UP-869",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0c03, 0xffff, 0,
+ "Brainboxes UP-869",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0c21, 0xffff, 0,
+ "Brainboxes UP-880",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0c22, 0xffff, 0,
+ "Brainboxes UP-880",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0c23, 0xffff, 0,
+ "Brainboxes UP-880",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0c41, 0xffff, 0,
+ "Brainboxes UC-368",
+ DEFAULT_RCLK,
+ PUC_PORT_4S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0ca1, 0xffff, 0,
+ "Brainboxes UC-253",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0d21, 0xffff, 0,
+ "Brainboxes UC-260",
+ DEFAULT_RCLK,
+ PUC_PORT_4S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0d41, 0xffff, 0,
+ "Brainboxes UC-836",
+ DEFAULT_RCLK,
+ PUC_PORT_4S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0d80, 0xffff, 0,
+ "Intashield IS-200",
+ DEFAULT_RCLK,
+ PUC_PORT_2S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0dc0, 0xffff, 0,
+ "Intashield IS-400",
+ DEFAULT_RCLK,
+ PUC_PORT_4S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0e41, 0xffff, 0,
+ "Brainboxes PX-279",
+ DEFAULT_RCLK,
+ PUC_PORT_8S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x0e61, 0xffff, 0,
+ "Brainboxes UC-414",
+ DEFAULT_RCLK,
+ PUC_PORT_4S, 0x18, 0, 8,
+ },
+
+ { 0x135a, 0x400a, 0xffff, 0,
+ "Brainboxes PX-260",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x400b, 0xffff, 0,
+ "Brainboxes PX-320",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x400c, 0xffff, 0,
+ "Brainboxes PX-313",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x400e, 0xffff, 0,
+ "Brainboxes PX-310",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x400f, 0xffff, 0,
+ "Brainboxes PX-346",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x4010, 0xffff, 0,
+ "Brainboxes PX-368",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x4011, 0xffff, 0,
+ "Brainboxes PX-420",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x4012, 0xffff, 0,
+ "Brainboxes PX-431",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x4013, 0xffff, 0,
+ "Brainboxes PX-820",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x4014, 0xffff, 0,
+ "Brainboxes PX-831",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x4015, 0xffff, 0,
+ "Brainboxes PX-257",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x4016, 0xffff, 0,
+ "Brainboxes PX-246",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x4017, 0xffff, 0,
+ "Brainboxes PX-846",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x4018, 0xffff, 0,
+ "Brainboxes PX-857",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x4019, 0xffff, 0,
+ "Brainboxes PX-101",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x401d, 0xffff, 0,
+ "Brainboxes PX-475",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x401e, 0xffff, 0,
+ "Brainboxes PX-803",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x4027, 0xffff, 0,
+ "Intashield IX-100",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x4028, 0xffff, 0,
+ "Intashield IX-200",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
+ { 0x135a, 0x4029, 0xffff, 0,
+ "Intashield IX-400",
+ DEFAULT_RCLK * 0x22,
+ PUC_PORT_NONSTANDARD, 0x10, 0, -1,
+ .config_function = puc_config_oxford_pcie
+ },
+
{ 0x135c, 0x0010, 0xffff, 0,
"Quatech QSC-100",
-3, /* max 8x clock rate */
@@ -1283,6 +1706,23 @@ const struct puc_cfg puc_pci_devices[] = {
PUC_PORT_4S, 0x10, 0, 8,
.config_function = puc_config_icbook
},
+
+ /*
+ * Systembase cards using SB16C1050 UARTs:
+ */
+ { 0x14a1, 0x0008, 0x14a1, 0x0008,
+ "Systembase SB16C1058",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_8S, 0x10, 0, 8,
+ .config_function = puc_config_systembase,
+ },
+ { 0x14a1, 0x0004, 0x14a1, 0x0004,
+ "Systembase SB16C1054",
+ DEFAULT_RCLK * 8,
+ PUC_PORT_4S, 0x10, 0, 8,
+ .config_function = puc_config_systembase,
+ },
+
{ 0xffff, 0, 0xffff, 0, NULL, 0 }
};
@@ -1872,3 +2312,28 @@ puc_config_titan(struct puc_softc *sc __unused, enum puc_cfg_cmd cmd,
}
return (ENXIO);
}
+
+static int
+puc_config_systembase(struct puc_softc *sc __unused,
+ enum puc_cfg_cmd cmd, int port, intptr_t *res)
+{
+ struct puc_bar *bar;
+
+ switch (cmd) {
+ case PUC_CFG_SETUP:
+ bar = puc_get_bar(sc, 0x14);
+ if (bar == NULL)
+ return (ENXIO);
+
+ /*
+ * The Systembase SB16C1058 (and probably other devices
+ * based on the SB16C1050 UART core) require poking a
+ * register in the *other* RID to turn on interrupts.
+ */
+ bus_write_1(bar->b_res, /* OPT_IMRREG0 */ 0xc, 0xff);
+ return (0);
+ default:
+ break;
+ }
+ return (ENXIO);
+}
diff --git a/sys/dev/pwm/controller/allwinner/aw_pwm.c b/sys/dev/pwm/controller/allwinner/aw_pwm.c
index 8d7e38834fdd..117f3ae17e1b 100644
--- a/sys/dev/pwm/controller/allwinner/aw_pwm.c
+++ b/sys/dev/pwm/controller/allwinner/aw_pwm.c
@@ -188,9 +188,10 @@ skipcfg:
node = ofw_bus_get_node(dev);
OF_device_register_xref(OF_xref_from_node(node), dev);
- sc->busdev = device_add_child(dev, "pwmbus", -1);
+ sc->busdev = device_add_child(dev, "pwmbus", DEVICE_UNIT_ANY);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
fail:
aw_pwm_detach(dev);
@@ -210,9 +211,6 @@ aw_pwm_detach(device_t dev)
return (error);
}
- if (sc->busdev != NULL)
- device_delete_child(dev, sc->busdev);
-
if (sc->res != NULL)
bus_release_resources(dev, aw_pwm_spec, &sc->res);
diff --git a/sys/dev/pwm/controller/rockchip/rk_pwm.c b/sys/dev/pwm/controller/rockchip/rk_pwm.c
index f1f3946e4d59..d05a51e9afb9 100644
--- a/sys/dev/pwm/controller/rockchip/rk_pwm.c
+++ b/sys/dev/pwm/controller/rockchip/rk_pwm.c
@@ -212,9 +212,10 @@ rk_pwm_attach(device_t dev)
node = ofw_bus_get_node(dev);
OF_device_register_xref(OF_xref_from_node(node), dev);
- sc->busdev = device_add_child(dev, "pwmbus", -1);
+ sc->busdev = device_add_child(dev, "pwmbus", DEVICE_UNIT_ANY);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
fail:
rk_pwm_detach(dev);
diff --git a/sys/dev/pwm/ofw_pwmbus.c b/sys/dev/pwm/ofw_pwmbus.c
index 2d2c47291bca..913792374fd9 100644
--- a/sys/dev/pwm/ofw_pwmbus.c
+++ b/sys/dev/pwm/ofw_pwmbus.c
@@ -150,7 +150,8 @@ ofw_pwmbus_attach(device_t dev)
if (chan >= sc->base.nchannels)
continue;
- if ((child = ofw_pwmbus_add_child(dev, 0, NULL, -1)) == NULL)
+ if ((child = ofw_pwmbus_add_child(dev, 0, NULL,
+ DEVICE_UNIT_ANY)) == NULL)
continue;
ivars = device_get_ivars(child);
@@ -173,7 +174,8 @@ ofw_pwmbus_attach(device_t dev)
*/
if (!any_children) {
for (chan = 0; chan < sc->base.nchannels; ++chan) {
- child = ofw_pwmbus_add_child(dev, 0, "pwmc", -1);
+ child = ofw_pwmbus_add_child(dev, 0, "pwmc",
+ DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(dev, "failed to add pwmc child "
" device for channel %u\n", chan);
@@ -184,9 +186,10 @@ ofw_pwmbus_attach(device_t dev)
}
}
bus_enumerate_hinted_children(dev);
- bus_generic_probe(dev);
+ bus_identify_children(dev);
+ bus_attach_children(dev);
- return (bus_generic_attach(dev));
+ return (0);
}
static device_method_t ofw_pwmbus_methods[] = {
diff --git a/sys/dev/pwm/pwmbus.c b/sys/dev/pwm/pwmbus.c
index e4feaa95e590..c0c07a36c277 100644
--- a/sys/dev/pwm/pwmbus.c
+++ b/sys/dev/pwm/pwmbus.c
@@ -182,20 +182,10 @@ pwmbus_attach(device_t dev)
}
bus_enumerate_hinted_children(dev);
- bus_generic_probe(dev);
+ bus_identify_children(dev);
+ bus_attach_children(dev);
- return (bus_generic_attach(dev));
-}
-
-static int
-pwmbus_detach(device_t dev)
-{
- int rv;
-
- if ((rv = bus_generic_detach(dev)) == 0)
- rv = device_delete_children(dev);
-
- return (rv);
+ return (0);
}
/*
@@ -248,7 +238,7 @@ static device_method_t pwmbus_methods[] = {
/* device_if */
DEVMETHOD(device_probe, pwmbus_probe),
DEVMETHOD(device_attach, pwmbus_attach),
- DEVMETHOD(device_detach, pwmbus_detach),
+ DEVMETHOD(device_detach, bus_generic_detach),
/* bus_if */
DEVMETHOD(bus_add_child, pwmbus_add_child),
diff --git a/sys/dev/qat/include/adf_cfg_device.h b/sys/dev/qat/include/adf_cfg_device.h
index f2891e4eb805..1419292a262f 100644
--- a/sys/dev/qat/include/adf_cfg_device.h
+++ b/sys/dev/qat/include/adf_cfg_device.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef ADF_CFG_DEVICE_H_
#define ADF_CFG_DEVICE_H_
@@ -79,4 +79,6 @@ int adf_cfg_device_init(struct adf_cfg_device *device,
void adf_cfg_device_clear(struct adf_cfg_device *device,
struct adf_accel_dev *accel_dev);
+void adf_cfg_device_clear_all(struct adf_accel_dev *accel_dev);
+
#endif
diff --git a/sys/dev/qat/include/adf_dbgfs.h b/sys/dev/qat/include/adf_dbgfs.h
new file mode 100644
index 000000000000..a07933c0f02d
--- /dev/null
+++ b/sys/dev/qat/include/adf_dbgfs.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
+
+#ifndef ADF_DBGFS_H
+#define ADF_DBGFS_H
+
+void adf_dbgfs_init(struct adf_accel_dev *accel_dev);
+void adf_dbgfs_add(struct adf_accel_dev *accel_dev);
+void adf_dbgfs_rm(struct adf_accel_dev *accel_dev);
+void adf_dbgfs_exit(struct adf_accel_dev *accel_dev);
+#endif
diff --git a/sys/dev/qat/include/adf_gen4vf_hw_csr_data.h b/sys/dev/qat/include/adf_gen4vf_hw_csr_data.h
index ed78ab54ec4e..5143b88907ba 100644
--- a/sys/dev/qat/include/adf_gen4vf_hw_csr_data.h
+++ b/sys/dev/qat/include/adf_gen4vf_hw_csr_data.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef ADF_GEN4VF_HW_CSR_DATA_H_
#define ADF_GEN4VF_HW_CSR_DATA_H_
@@ -51,7 +51,7 @@
struct resource *_csr_base_addr = csr_base_addr; \
u32 _bank = bank; \
u32 _ring = ring; \
- dma_addr_t _value = value; \
+ bus_addr_t _value = value; \
u32 l_base = 0, u_base = 0; \
l_base = (u32)((_value)&0xFFFFFFFF); \
u_base = (u32)(((_value)&0xFFFFFFFF00000000ULL) >> 32); \
diff --git a/sys/dev/qat/include/adf_heartbeat.h b/sys/dev/qat/include/adf_heartbeat.h
index b2eab7139c1f..7a72678e77de 100644
--- a/sys/dev/qat/include/adf_heartbeat.h
+++ b/sys/dev/qat/include/adf_heartbeat.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef ADF_HEARTBEAT_H_
#define ADF_HEARTBEAT_H_
@@ -18,8 +18,8 @@ struct adf_heartbeat {
u64 last_hb_check_time;
enum adf_device_heartbeat_status last_hb_status;
struct qat_sysctl heartbeat;
- struct qat_sysctl *heartbeat_sent;
- struct qat_sysctl *heartbeat_failed;
+ struct qat_sysctl heartbeat_sent;
+ struct qat_sysctl heartbeat_failed;
};
int adf_heartbeat_init(struct adf_accel_dev *accel_dev);
diff --git a/sys/dev/qat/include/adf_pfvf_vf_msg.h b/sys/dev/qat/include/adf_pfvf_vf_msg.h
index 3cf6d5ed0815..44066ca1fe85 100644
--- a/sys/dev/qat/include/adf_pfvf_vf_msg.h
+++ b/sys/dev/qat/include/adf_pfvf_vf_msg.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef ADF_PFVF_VF_MSG_H
#define ADF_PFVF_VF_MSG_H
@@ -8,5 +8,6 @@ void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev);
int adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev);
int adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev);
+void adf_vf2pf_restarting_complete(struct adf_accel_dev *accel_dev);
#endif /* ADF_PFVF_VF_MSG_H */
diff --git a/sys/dev/qat/include/common/adf_accel_devices.h b/sys/dev/qat/include/common/adf_accel_devices.h
index 0a1248b9a68e..eeffc6a9132c 100644
--- a/sys/dev/qat/include/common/adf_accel_devices.h
+++ b/sys/dev/qat/include/common/adf_accel_devices.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef ADF_ACCEL_DEVICES_H_
#define ADF_ACCEL_DEVICES_H_
@@ -7,6 +7,8 @@
#include "adf_cfg_common.h"
#include "adf_pfvf_msg.h"
+#include "opt_qat.h"
+
#define ADF_CFG_NUM_SERVICES 4
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
@@ -37,12 +39,16 @@
#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
#define ADF_401XX_PCI_DEVICE_ID 0x4942
#define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
+#define ADF_402XX_PCI_DEVICE_ID 0x4944
+#define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
#define IS_QAT_GEN3(ID) ({ (ID == ADF_C4XXX_PCI_DEVICE_ID); })
static inline bool
IS_QAT_GEN4(const unsigned int id)
{
return (id == ADF_4XXX_PCI_DEVICE_ID || id == ADF_401XX_PCI_DEVICE_ID ||
+ id == ADF_402XX_PCI_DEVICE_ID ||
+ id == ADF_402XXIOV_PCI_DEVICE_ID ||
id == ADF_4XXXIOV_PCI_DEVICE_ID ||
id == ADF_401XXIOV_PCI_DEVICE_ID);
}
@@ -446,6 +452,7 @@ struct adf_hw_device_data {
uint8_t num_accel;
uint8_t num_logical_accel;
uint8_t num_engines;
+ bool get_ring_to_svc_done;
int (*get_storage_enabled)(struct adf_accel_dev *accel_dev,
uint32_t *storage_enabled);
u8 query_storage_cap;
@@ -683,10 +690,18 @@ struct adf_accel_dev {
struct sysctl_oid *ras_reset;
struct sysctl_oid *pke_replay_dbgfile;
struct sysctl_oid *misc_error_dbgfile;
+ struct sysctl_oid *fw_version_oid;
+ struct sysctl_oid *mmp_version_oid;
+ struct sysctl_oid *hw_version_oid;
+ struct sysctl_oid *cnv_error_oid;
struct list_head list;
struct adf_accel_pci accel_pci_dev;
struct adf_accel_compat_manager *cm;
u8 compat_ver;
+#ifdef QAT_DISABLE_SAFE_DC_MODE
+ struct sysctl_oid *safe_dc_mode;
+ u8 disable_safe_dc_mode;
+#endif /* QAT_DISABLE_SAFE_DC_MODE */
union {
struct {
/* vf_info is non-zero when SR-IOV is init'ed */
@@ -711,5 +726,6 @@ struct adf_accel_dev {
bool is_vf;
u32 accel_id;
void *lac_dev;
+ struct mutex lock; /* protect accel_dev during start/stop e.t.c */
};
#endif
diff --git a/sys/dev/qat/include/common/adf_cfg_common.h b/sys/dev/qat/include/common/adf_cfg_common.h
index 4a85e021aeb7..eb3edec41742 100644
--- a/sys/dev/qat/include/common/adf_cfg_common.h
+++ b/sys/dev/qat/include/common/adf_cfg_common.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef ADF_CFG_COMMON_H_
#define ADF_CFG_COMMON_H_
@@ -62,7 +62,7 @@ struct adf_pci_address {
unsigned char bus;
unsigned char dev;
unsigned char func;
-} __packed;
+};
#define ADF_CFG_SERV_RING_PAIR_0_SHIFT 0
#define ADF_CFG_SERV_RING_PAIR_1_SHIFT 3
diff --git a/sys/dev/qat/include/common/adf_common_drv.h b/sys/dev/qat/include/common/adf_common_drv.h
index b6bc2511bfba..f9f4463f69c3 100644
--- a/sys/dev/qat/include/common/adf_common_drv.h
+++ b/sys/dev/qat/include/common/adf_common_drv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef ADF_DRV_H
#define ADF_DRV_H
@@ -304,6 +304,7 @@ void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
int adf_pf2vf_handle_pf_restarting(struct adf_accel_dev *accel_dev);
int adf_pf2vf_handle_pf_rp_reset(struct adf_accel_dev *accel_dev,
struct pfvf_message msg);
+int adf_pf2vf_handle_pf_error(struct adf_accel_dev *accel_dev);
bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev);
static inline int
adf_sriov_configure(device_t *pdev, int numvfs)
diff --git a/sys/dev/qat/include/common/adf_gen4_hw_data.h b/sys/dev/qat/include/common/adf_gen4_hw_data.h
index d0423eaa17cf..cde5ae1f4e10 100644
--- a/sys/dev/qat/include/common/adf_gen4_hw_data.h
+++ b/sys/dev/qat/include/common/adf_gen4_hw_data.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2021 Intel Corporation */
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef ADF_GEN4_HW_CSR_DATA_H_
#define ADF_GEN4_HW_CSR_DATA_H_
@@ -62,7 +62,7 @@
struct resource *_csr_base_addr = csr_base_addr; \
u32 _bank = bank; \
u32 _ring = ring; \
- dma_addr_t _value = value; \
+ bus_addr_t _value = value; \
u32 l_base = 0, u_base = 0; \
l_base = lower_32_bits(_value); \
u_base = upper_32_bits(_value); \
diff --git a/sys/dev/qat/include/common/adf_pfvf_msg.h b/sys/dev/qat/include/common/adf_pfvf_msg.h
index 349db9a13b22..abd9cd46014f 100644
--- a/sys/dev/qat/include/common/adf_pfvf_msg.h
+++ b/sys/dev/qat/include/common/adf_pfvf_msg.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef ADF_PFVF_MSG_H
#define ADF_PFVF_MSG_H
@@ -97,6 +97,7 @@ enum pf2vf_msgtype {
ADF_PF2VF_MSGTYPE_RESTARTING = 0x01,
ADF_PF2VF_MSGTYPE_VERSION_RESP = 0x02,
ADF_PF2VF_MSGTYPE_BLKMSG_RESP = 0x03,
+ ADF_PF2VF_MSGTYPE_FATAL_ERROR = 0x04,
/* Values from 0x10 are Gen4 specific, message type is only 4 bits in
Gen2 devices. */
ADF_PF2VF_MSGTYPE_RP_RESET_RESP = 0x10,
@@ -111,6 +112,7 @@ enum vf2pf_msgtype {
ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ = 0x07,
ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ = 0x08,
ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ = 0x09,
+ ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE = 0x0a,
/* Values from 0x10 are Gen4 specific, message type is only 4 bits in
Gen2 devices. */
ADF_VF2PF_MSGTYPE_RP_RESET = 0x10,
@@ -124,8 +126,10 @@ enum pfvf_compatibility_version {
ADF_PFVF_COMPAT_FAST_ACK = 0x03,
/* Ring to service mapping support for non-standard mappings */
ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04,
+ /* Fallback compat */
+ ADF_PFVF_COMPAT_FALLBACK = 0x05,
/* Reference to the latest version */
- ADF_PFVF_COMPAT_THIS_VERSION = 0x04,
+ ADF_PFVF_COMPAT_THIS_VERSION = 0x05,
};
/* PF->VF Version Response */
diff --git a/sys/dev/qat/include/common/adf_uio_cleanup.h b/sys/dev/qat/include/common/adf_uio_cleanup.h
index 8f1132181355..876843cd9aa8 100644
--- a/sys/dev/qat/include/common/adf_uio_cleanup.h
+++ b/sys/dev/qat/include/common/adf_uio_cleanup.h
@@ -1,10 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2023 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef ADF_UIO_CLEANUP_H
#define ADF_UIO_CLEANUP_H
void adf_uio_do_cleanup_orphan(int bank,
struct adf_uio_control_accel *accel);
-
#endif
diff --git a/sys/dev/qat/include/common/adf_uio_control.h b/sys/dev/qat/include/common/adf_uio_control.h
index 4662c02233b6..032baa9b54c2 100644
--- a/sys/dev/qat/include/common/adf_uio_control.h
+++ b/sys/dev/qat/include/common/adf_uio_control.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2023 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef QAT_UIO_CONTROL_H
#define QAT_UIO_CONTROL_H
#include <sys/condvar.h>
@@ -38,5 +38,4 @@ struct adf_uio_control_accel {
};
-
#endif /* end of include guard: QAT_UIO_CONTROL_H */
diff --git a/sys/dev/qat/include/icp_qat_fw_init_admin.h b/sys/dev/qat/include/icp_qat_fw_init_admin.h
index 3537fb3f8cfd..f3e0ff9d0064 100644
--- a/sys/dev/qat/include/icp_qat_fw_init_admin.h
+++ b/sys/dev/qat/include/icp_qat_fw_init_admin.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
#define _ICP_QAT_FW_INIT_ADMIN_H_
@@ -43,6 +43,8 @@ enum icp_qat_fw_cnv_error_type {
CNV_ERR_TYPE_UNKNOWN_ERROR
};
+#define ICP_QAT_FW_INIT_DISABLE_SAFE_DC_MODE_FLAG 0x02
+
#define CNV_ERROR_TYPE_GET(latest_error) \
({ \
__typeof__(latest_error) _lerror = latest_error; \
@@ -69,7 +71,8 @@ struct icp_qat_fw_init_admin_req {
struct {
u64 resrvd2;
u16 ibuf_size_in_kb;
- u16 resrvd3;
+ u8 fw_flags;
+ u8 resrvd3;
u32 resrvd4;
};
/* ICP_QAT_FW_CONSTANTS_CFG */
@@ -195,8 +198,8 @@ struct icp_qat_fw_init_admin_resp {
enum icp_qat_fw_init_admin_init_flag { ICP_QAT_FW_INIT_FLAG_PKE_DISABLED = 0 };
struct icp_qat_fw_init_admin_hb_cnt {
- u16 resp_heartbeat_cnt;
u16 req_heartbeat_cnt;
+ u16 resp_heartbeat_cnt;
};
#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
diff --git a/sys/dev/qat/qat/qat_ocf.c b/sys/dev/qat/qat/qat_ocf.c
index 8958c7b82e49..74f113e46884 100644
--- a/sys/dev/qat/qat/qat_ocf.c
+++ b/sys/dev/qat/qat/qat_ocf.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/* System headers */
#include <sys/param.h>
#include <sys/systm.h>
@@ -517,7 +517,7 @@ qat_ocf_session_init(device_t dev,
M_NOWAIT,
0,
~1UL,
- 1 << (bsrl(sessionCtxSize - 1) + 1),
+ 1 << (ilog2(sessionCtxSize - 1) + 1),
0);
if (NULL == sessionCtx) {
device_printf(dev, "unable to allocate memory for session\n");
@@ -544,7 +544,7 @@ qat_ocf_session_init(device_t dev,
fail:
/* Release resources if any */
if (sessionCtx)
- contigfree(sessionCtx, sessionCtxSize, M_QAT_OCF);
+ free(sessionCtx, M_QAT_OCF);
return status;
}
@@ -610,9 +610,7 @@ qat_ocf_remove_session(device_t dev,
}
explicit_bzero(qat_session->sessionCtx, qat_session->sessionCtxSize);
- contigfree(qat_session->sessionCtx,
- qat_session->sessionCtxSize,
- M_QAT_OCF);
+ free(qat_session->sessionCtx, M_QAT_OCF);
qat_session->sessionCtx = NULL;
qat_session->sessionCtxSize = 0;
@@ -943,8 +941,8 @@ fail:
static void
qat_ocf_identify(driver_t *drv, device_t parent)
{
- if (device_find_child(parent, "qat_ocf", -1) == NULL &&
- BUS_ADD_CHILD(parent, 200, "qat_ocf", -1) == 0)
+ if (device_find_child(parent, "qat_ocf", DEVICE_UNIT_ANY) == NULL &&
+ BUS_ADD_CHILD(parent, 200, "qat_ocf", DEVICE_UNIT_ANY) == 0)
device_printf(parent, "qat_ocf: could not attach!");
}
@@ -1282,7 +1280,6 @@ static driver_t qat_ocf_driver = {
.size = sizeof(struct qat_ocf_softc),
};
-
DRIVER_MODULE_ORDERED(qat,
nexus,
qat_ocf_driver,
diff --git a/sys/dev/qat/qat_api/common/compression/dc_datapath.c b/sys/dev/qat/qat_api/common/compression/dc_datapath.c
index de14be2fdb0d..312b2d6749cc 100644
--- a/sys/dev/qat/qat_api/common/compression/dc_datapath.c
+++ b/sys/dev/qat/qat_api/common/compression/dc_datapath.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
* @file dc_datapath.c
@@ -118,14 +118,19 @@ dcCompression_ProcessCallback(void *pRespMsg)
/* Cast response message to compression response message type */
pCompRespMsg = (icp_qat_fw_comp_resp_t *)pRespMsg;
-
+ if (!(pCompRespMsg)) {
+ QAT_UTILS_LOG("pCompRespMsg is NULL\n");
+ return;
+ }
/* Extract request data pointer from the opaque data */
LAC_MEM_SHARED_READ_TO_PTR(pCompRespMsg->opaque_data, pReqData);
+ if (!(pReqData)) {
+ QAT_UTILS_LOG("pReqData is NULL\n");
+ return;
+ }
/* Extract fields from the request data structure */
pCookie = (dc_compression_cookie_t *)pReqData;
- if (!pCookie)
- return;
pSessionDesc = DC_SESSION_DESC_FROM_CTX_GET(pCookie->pSessionHandle);
pService = (sal_compression_service_t *)(pCookie->dcInstance);
@@ -523,7 +528,7 @@ dcCheckOpData(sal_compression_service_t *pService, CpaDcOpData *pOpData)
if (CPA_TRUE == pOpData->integrityCrcCheck &&
NULL == pOpData->pCrcData) {
LAC_INVALID_PARAM_LOG("Integrity CRC data structure "
- "not intialized in CpaDcOpData");
+ "not initialized in CpaDcOpData");
return CPA_STATUS_INVALID_PARAM;
}
@@ -1401,7 +1406,6 @@ cpaDcCompressData(CpaInstanceHandle dcInstance,
CpaInstanceHandle insHandle = NULL;
Cpa64U srcBuffSize = 0;
-
if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) {
insHandle = dcGetFirstHandle();
} else {
@@ -1507,7 +1511,6 @@ cpaDcCompressData2(CpaInstanceHandle dcInstance,
return CPA_STATUS_INVALID_PARAM;
}
-
if ((CPA_TRUE == pOpData->compressAndVerify) &&
(CPA_TRUE == pOpData->compressAndVerifyAndRecover) &&
(CPA_FALSE == pOpData->integrityCrcCheck)) {
@@ -1526,7 +1529,6 @@ cpaDcCompressData2(CpaInstanceHandle dcInstance,
return CPA_STATUS_UNSUPPORTED;
}
-
if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) {
insHandle = dcGetFirstHandle();
} else {
@@ -1703,7 +1705,6 @@ dcDecompressDataCheck(CpaInstanceHandle insHandle,
return CPA_STATUS_INVALID_PARAM;
}
-
*srcBufferSize = srcBuffSize;
return CPA_STATUS_SUCCESS;
@@ -1724,7 +1725,6 @@ cpaDcDecompressData(CpaInstanceHandle dcInstance,
Cpa64U srcBuffSize = 0;
CpaStatus status = CPA_STATUS_SUCCESS;
-
if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) {
insHandle = dcGetFirstHandle();
} else {
@@ -1783,7 +1783,6 @@ cpaDcDecompressData(CpaInstanceHandle dcInstance,
return CPA_STATUS_INVALID_PARAM;
}
-
if (CPA_DC_STATEFUL == pSessionDesc->sessState) {
/* Lock the session to check if there are in-flight stateful
* requests */
@@ -1864,7 +1863,6 @@ cpaDcDecompressData2(CpaInstanceHandle dcInstance,
callbackTag);
}
-
if (CPA_INSTANCE_HANDLE_SINGLE == dcInstance) {
insHandle = dcGetFirstHandle();
} else {
@@ -1931,7 +1929,6 @@ cpaDcDecompressData2(CpaInstanceHandle dcInstance,
return CPA_STATUS_INVALID_PARAM;
}
-
if (CPA_DC_STATEFUL == pSessionDesc->sessState) {
/* Lock the session to check if there are in-flight stateful
* requests */
diff --git a/sys/dev/qat/qat_api/common/compression/dc_session.c b/sys/dev/qat/qat_api/common/compression/dc_session.c
index c92d6eebdc47..60f4410dac32 100644
--- a/sys/dev/qat/qat_api/common/compression/dc_session.c
+++ b/sys/dev/qat/qat_api/common/compression/dc_session.c
@@ -151,7 +151,8 @@ dcCompHwBlockPopulate(sal_compression_service_t *pService,
}
/* Set delay match mode */
- if (CPA_TRUE == pService->comp_device_data.enableDmm) {
+ if (ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED ==
+ pService->comp_device_data.enableDmm) {
dmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED;
} else {
dmm = ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED;
@@ -282,7 +283,8 @@ dcCompHwBlockPopulateGen4(sal_compression_service_t *pService,
hw_comp_lower_csr.hash_update =
ICP_QAT_HW_COMP_20_SKIP_HASH_UPDATE_DONT_ALLOW;
hw_comp_lower_csr.edmm =
- (CPA_TRUE == pService->comp_device_data.enableDmm) ?
+ (ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_ENABLED ==
+ pService->comp_device_data.enableDmm) ?
ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_ENABLED :
ICP_QAT_HW_COMP_20_EXTENDED_DELAY_MATCH_MODE_EDMM_DISABLED;
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_session.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_session.h
index afb3994daad7..6c9de34e7691 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_session.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_session.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
@@ -56,7 +56,7 @@
* while there are requests in flight.
*
* <b>Reference Count</b>\n
- * - The perform funcion increments the reference count for the session.
+ * - The perform function increments the reference count for the session.
* - The callback function decrements the reference count for the session.
* - The Remove function checks the reference count to ensure that it is 0.
*
@@ -256,14 +256,14 @@ typedef struct lac_session_desc_s {
/**< Cipher slice type to be used, set at init session time */
Cpa8U cipherAesXtsKey1Forward[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< Cached AES XTS Forward key
- * For CPM2.0 AES XTS key convertion need to be done in SW.
+ * For CPM2.0 AES XTS key conversions need to be done in SW.
* Because use can update session direction at any time,
* also forward key needs to be cached
*/
Cpa8U cipherAesXtsKey1Reverse[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< AES XTS Reverse key
- * For CPM2.0 AES XTS key convertion need to be done in SW.
- * Reverse key always will be calcilated at session setup time and
+ * For CPM2.0 AES XTS key conversions need to be done in SW.
+ * Reverse key always will be calculated at session setup time and
* cached to be used when needed */
Cpa8U cipherAesXtsKey2[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< For AES XTS session need to store Key2 value in order to generate
@@ -442,14 +442,14 @@ typedef struct lac_session_desc_d1_s {
/**< Cipher slice type to be used, set at init session time */
Cpa8U cipherAesXtsKey1Forward[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< Cached AES XTS Forward key
- * For CPM2.0 AES XTS key convertion need to be done in SW.
+ * For CPM2.0 AES XTS key conversions need to be done in SW.
* Because use can update session direction at any time,
* also forward key needs to be cached
*/
Cpa8U cipherAesXtsKey1Reverse[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< AES XTS Reverse key
- * For CPM2.0 AES XTS key convertion need to be done in SW.
- * Reverse key always will be calcilated at session setup time and
+ * For CPM2.0 AES XTS key conversions need to be done in SW.
+ * Reverse key always will be calculated at session setup time and
* cached to be used when needed */
Cpa8U cipherAesXtsKey2[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< For AES XTS session need to store Key2 value in order to generate
@@ -594,14 +594,14 @@ typedef struct lac_session_desc_d2_s {
/**< Cipher slice type to be used, set at init session time */
Cpa8U cipherAesXtsKey1Forward[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< Cached AES XTS Forward key
- * For CPM2.0 AES XTS key convertion need to be done in SW.
+ * For CPM2.0 AES XTS key conversions need to be done in SW.
* Because use can update session direction at any time,
* also forward key needs to be cached
*/
Cpa8U cipherAesXtsKey1Reverse[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< AES XTS Reverse key
- * For CPM2.0 AES XTS key convertion need to be done in SW.
- * Reverse key always will be calcilated at session setup time and
+ * For CPM2.0 AES XTS key conversions need to be done in SW.
+ * Reverse key always will be calculated at session setup time and
* cached to be used when needed */
Cpa8U cipherAesXtsKey2[LAC_CIPHER_AES_XTS_KEY_MAX_LENGTH];
/**< For AES XTS session need to store Key2 value in order to generate
@@ -625,7 +625,7 @@ typedef struct lac_session_desc_d2_s {
sizeof(LAC_ARCH_UINT))
/**< @ingroup LacSym_Session
* Size of the memory that the client has to allocate for a session. Extra
- * memory is needed to internally re-align the data. The pointer to the algined
+ * memory is needed to internally re-align the data. The pointer to the aligned
* data is stored at the start of the user allocated memory hence the extra
* space for an LAC_ARCH_UINT */
@@ -669,7 +669,7 @@ typedef struct lac_session_desc_d2_s {
*
* @param[in] instanceHandle_in Instance Handle
* @param[in] pSymCb callback function
-* @param[in] pSessionSetupData pointer to the strucutre containing the setup
+* @param[in] pSessionSetupData pointer to the structure containing the setup
*data
* @param[in] isDpSession CPA_TRUE for a data plane session
* @param[out] pSessionCtx Pointer to session context
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym.h
index 3eb41432aced..f7468b5d20dd 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -18,7 +18,7 @@
* The symmetric component demuliplexes the following crypto operations to
* the appropriate sub-components: cipher, hash, algorithm chaining and
* authentication encryption. It is a common layer between the above
- * mentioned components where common resources are allocated and paramater
+ * mentioned components where common resources are allocated and parameter
* checks are done. The operation specific resource allocation and parameter
* checks are done in the sub-component itself.
*
@@ -32,15 +32,15 @@
* chain to ensure it is valid.
* - \ref LacSymStats "Statistics": Manages statistics for symmetric
* - \ref LacSymQat "Symmetric QAT": The symmetric qat component is
- * initialiased by the symmetric component.
- * - \ref LacCipher "Cipher" : demultiplex cipher opertions to this component.
- * - \ref LacHash "Hash" : demultiplex hash opertions to this component.
+ * initialised by the symmetric component.
+ * - \ref LacCipher "Cipher" : demultiplex cipher operations to this component.
+ * - \ref LacHash "Hash" : demultiplex hash operations to this component.
* to this component.
* - \ref LacAlgChain "Algorithm Chaining": The algorithm chaining component
* - OSAL : Memory allocation, Mutex's, atomics
*
* @lld_initialisation
- * This component is initialied during the LAC initialisation sequence. It
+ * This component is initialized during the LAC initialisation sequence. It
* initialises the session table, statistics, symmetric QAT, initialises the
* hash definitions lookup table, the hash alg supported lookup table and
* registers a callback function with the symmetric response handler to process
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_alg_chain.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_alg_chain.h
index 1750fd0bebf4..095b7a426732 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_alg_chain.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_alg_chain.h
@@ -1,8 +1,5 @@
-/***************************************************************************
- *
- * <COPYRIGHT_TAG>
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_cb.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_cb.h
index 58caa321c410..703fa92b614a 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_cb.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_cb.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -21,7 +21,7 @@
* Dequeue pending requests
* @description
* This function is called by a callback function of a blocking
- * operation (either a partial packet or a hash precompute operaion)
+ * operation (either a partial packet or a hash precompute operation)
* in softIRQ context. It dequeues requests for the following reasons:
* 1. All pre-computes that happened when initialising a session
* have completed. Dequeue any requests that were queued on the
@@ -40,7 +40,7 @@ CpaStatus LacSymCb_PendingReqsDequeue(lac_session_desc_t *pSessionDesc);
/**
*****************************************************************************
* @ingroup LacSym
- * Register symmetric callback funcion handlers
+ * Register symmetric callback function handlers
*
* @description
* This function registers the symmetric callback handler functions with
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_cipher.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_cipher.h
index 3ae237939b2c..c3d8203dd8b0 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_cipher.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_cipher.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
@@ -23,14 +23,14 @@
* and Triple-DES cipher algorithms, in ECB, CBC and CTR modes. The ARC4 stream
* cipher algorithm is also supported. Data may be provided as a full packet,
* or as a sequence of partial packets. The result of the operation can be
- * written back to the source buffer (in-place) or to a seperate output buffer
+ * written back to the source buffer (in-place) or to a separate output buffer
* (out-of-place). Data must be encapsulated in ICP buffers.
*
* The cipher component is responsible for implementing the cipher-specific
* functionality for registering and de-registering a session, for the perform
* operation and for processing the QAT responses to cipher requests. Statistics
* are maintained for cipher in the symmetric \ref CpaCySymStats64 "stats"
- * structure. This module has been seperated out into two. The cipher QAT module
+ * structure. This module has been separated out into two. The cipher QAT module
* deals entirely with QAT data structures. The cipher module itself has minimal
* exposure to the QAT data structures.
*
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash.h
index 4451e91ea5d6..19f6f5ddc69a 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash.h
@@ -1,8 +1,5 @@
-/***************************************************************************
- *
- * <COPYRIGHT_TAG>
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
@@ -24,7 +21,7 @@
*
* The hash component supports hashing in 3 modes. PLAIN, AUTH and NESTED.
* Plain mode is used to provide data integrity while auth mode is used to
- * provide integrity as well as its authenticity. Nested mode is inteded
+ * provide integrity as well as its authenticity. Nested mode is intended
* for use by non standard HMAC like algorithms such as for the SSL master
* key secret. Partial packets is supported for both plain and auth modes.
* In-place and out-of-place processing is supported for all modes. The
@@ -33,7 +30,7 @@
* The hash component is responsible for implementing the hash specific
* functionality for initialising a session and for a perform operation.
* Statistics are maintained in the symmetric \ref CpaCySymStats64 "stats"
- * structure. This module has been seperated out into two. The hash QAT module
+ * structure. This module has been separated out into two. The hash QAT module
* deals entirely with QAT data structures. The hash module itself has minimal
* exposure to the QAT data structures.
*
@@ -62,7 +59,7 @@
* the data path by the length of time it takes to do two hashes on a block
* size of data. Note: a partial packet operation generates an intermediate
* state. The final operation on a partial packet or when a full packet is
- * used applies padding and gives the final hash result. Esentially for the
+ * used applies padding and gives the final hash result. Essentially for the
* inner hash, a partial packet final is issued on the data, using the
* precomputed intermediate state and returns the digest.
*
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash_defs.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash_defs.h
index 6ebdcf659360..de9e74b31577 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash_defs.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash_defs.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -313,7 +313,7 @@
/**< @ingroup LacSymQatHash
* Macro to check for qat hash mode is set to 2 and the hash mode is
* Auth. This applies to HMAC algorithms (no pre compute). This is used
- * to differntiate between TLS and HMAC */
+ * to differentiate between TLS and HMAC */
#define IS_HASH_MODE_2_NESTED(qatHashMode, hashMode) \
((IS_HASH_MODE_2(qatHashMode)) && \
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash_precomputes.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash_precomputes.h
index cdb252fb5b46..cf5e21a14c5f 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash_precomputes.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_hash_precomputes.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -34,7 +34,7 @@
/**< maximum size of the working data for the HMAC precompute operations
*
* Maximum size of lac_sym_hash_precomp_op_data_t is 264 bytes. For hash
- * precomputes there are 2 of these structrues and a further
+ * precomputes there are 2 of these structures and a further
* lac_sym_hash_precomp_op_t structure required. This comes to a total of 536
* bytes.
* For the asynchronous version of the precomputes, the memory for the hash
@@ -122,7 +122,7 @@ typedef struct lac_sym_hash_aes_precomp_qat_s {
Cpa8U contentDesc[LAC_SYM_QAT_MAX_CIPHER_SETUP_BLK_SZ];
/**< Content descriptor for a cipher operation */
Cpa8U data[LAC_SYM_HASH_PRECOMP_MAX_AES_ECB_DATA];
- /**< The data to be ciphered is conatined here and the result is
+ /**< The data to be ciphered is contained here and the result is
* written in place back into this buffer */
icp_qat_fw_la_cipher_req_params_t cipherReqParams;
/**< Request parameters as read in by the QAT */
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_key.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_key.h
index bae0d8faabc7..25c919d9b38d 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_key.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_key.h
@@ -1,8 +1,5 @@
-/***************************************************************************
- *
- * <COPYRIGHT_TAG>
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
@@ -16,7 +13,7 @@
*
* @lld_overview
*
- * Key generation component is reponsible for SSL, TLS & MGF operations. All
+ * Key generation component is responsible for SSL, TLS & MGF operations. All
* memory required for the keygen operations is got from the keygen cookie
* structure which is carved up as required.
*
@@ -24,8 +21,8 @@
* outer hash and SHA1 as the inner hash.
*
* Refer to sections in draft-freier-ssl-version3-02.txt:
- * 6.1 Asymmetric cryptographic computations - This refers to coverting
- * the pre master secret to the master secret.
+ * 6.1 Asymmetric cryptographic computations - This refers to converting
+ * the pre-master secret to the master secret.
* 6.2.2 Converting the master secret into keys and MAC secrets - Using
* the master secret to generate the key material.
*
@@ -40,11 +37,11 @@
*
* @lld_dependencies
* \ref LacSymQatHash: for building up hash content descriptor
- * \ref LacMem: for virt to phys coversions
+ * \ref LacMem: for virt to phys conversions
*
* @lld_initialisation
- * The reponse handler is registered with Symmetric. The Maximum SSL is
- * allocated. A structure is allocated containing all the TLS lables that
+ * The response handler is registered with Symmetric. The Maximum SSL is
+ * allocated. A structure is allocated containing all the TLS labels that
* are supported. On shutdown the memory for these structures are freed.
*
* @lld_module_algorithms
@@ -115,7 +112,7 @@
*
* @description
* This structure is used to hold the various TLS labels. Each field is
- * on an 8 byte boundary provided the structure itslef is 8 bytes aligned.
+ * on an 8 byte boundary provided the structure itself is 8 bytes aligned.
*****************************************************************************/
typedef struct lac_sym_key_tls_labels_s {
Cpa8U masterSecret[ICP_QAT_FW_LA_TLS_LABEL_LEN_MAX];
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_partial.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_partial.h
index 3f6c75ca7fb7..633d1c7afa96 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_partial.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_partial.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -20,7 +20,7 @@
* proceed where they would get an incorrect digest, cipher result.
*
* Maintain a SpinLock for partials in flight per session. Try and acquire this
- * SpinLock. If it cant be acquired return an error straight away to the client
+ * SpinLock. If it can't be acquired return an error straight away to the client
* as there is already a partial in flight. There is no blocking in the data
* path for this.
*
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat.h
index 986e230dc1ff..dc72601bae44 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
@@ -24,7 +24,7 @@
* - \ref LacMem "Memory" - Inline memory functions
*
* @lld_initialisation
- * This component is initialied during the LAC initialisation sequence. It
+ * This component is initialized during the LAC initialisation sequence. It
* is called by the Symmetric Initialisation function.
*
* @lld_module_algorithms
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_cipher.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_cipher.h
index 2f1d36dcd669..2f4a4511f4fc 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_cipher.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_cipher.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
@@ -172,7 +172,7 @@ void LacSymQat_CipherXTSModeUpdateKeyLen(lac_session_desc_t *pSessionDesc,
* LacSymQat_CipherCtrlBlockInitialize()
*
* @description
- * intialize the cipher control block with all zeros
+ * initialize the cipher control block with all zeros
*
* @param[in] pMsg Pointer to the common request message
*
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_hash.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_hash.h
index 38c5892b0cc4..51a215ffc72a 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_hash.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_hash.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
@@ -42,7 +42,7 @@
* hash precomputes
*
* @description
- * This structure contains infomation on the hash precomputes
+ * This structure contains information on the hash precomputes
*
*****************************************************************************/
typedef struct lac_sym_qat_hash_precompute_info_s {
@@ -62,7 +62,7 @@ typedef struct lac_sym_qat_hash_precompute_info_s {
* hash state prefix buffer info
*
* @description
- * This structure contains infomation on the hash state prefix aad buffer
+ * This structure contains information on the hash state prefix aad buffer
*
*****************************************************************************/
typedef struct lac_sym_qat_hash_state_buffer_info_s {
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_hash_defs_lookup.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_hash_defs_lookup.h
index decc5d8c491b..050dd42ba9b2 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_hash_defs_lookup.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_qat_hash_defs_lookup.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
@@ -117,7 +117,7 @@ void LacSymQat_HashAlgLookupGet(CpaInstanceHandle instanceHandle,
/**
*******************************************************************************
* @ingroup LacSymQatHashDefsLookup
-* get hash defintions from lookup table.
+* get hash definitions from lookup table.
*
* @description
* This function looks up the hash lookup array for a structure
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_stats.h b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_stats.h
index 96f579b26c4c..ac7439713681 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_stats.h
+++ b/sys/dev/qat/qat_api/common/crypto/sym/include/lac_sym_stats.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -9,7 +9,7 @@
*
* @ingroup LacSym
*
- * Symetric Common consists of common statistics, buffer and partial packet
+ * Symmetric Common consists of common statistics, buffer and partial packet
* functionality.
*
***************************************************************************/
@@ -123,7 +123,7 @@ void LacSym_StatsFree(CpaInstanceHandle instanceHandle);
/**
*******************************************************************************
* @ingroup LacSymStats
-* Inrement a stat
+* Increment a stat
*
* @description
* This function incrementes a stat for a specific engine.
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/key/lac_sym_key.c b/sys/dev/qat/qat_api/common/crypto/sym/key/lac_sym_key.c
index cab8d6c7796c..36e0175f988a 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/key/lac_sym_key.c
+++ b/sys/dev/qat/qat_api/common/crypto/sym/key/lac_sym_key.c
@@ -1,8 +1,5 @@
-/***************************************************************************
- *
- * <COPYRIGHT_TAG>
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
@@ -365,7 +362,6 @@ cpaCyKeyGenQueryStats(CpaInstanceHandle instanceHandle_in,
{
CpaInstanceHandle instanceHandle = NULL;
-
if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) {
instanceHandle =
Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM);
@@ -393,7 +389,6 @@ cpaCyKeyGenQueryStats64(CpaInstanceHandle instanceHandle_in,
{
CpaInstanceHandle instanceHandle = NULL;
-
if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) {
instanceHandle =
Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM);
@@ -917,7 +912,6 @@ cpaCyKeyGenMgf(const CpaInstanceHandle instanceHandle_in,
{
CpaInstanceHandle instanceHandle = NULL;
-
if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) {
instanceHandle =
Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM);
@@ -956,7 +950,6 @@ cpaCyKeyGenMgfExt(const CpaInstanceHandle instanceHandle_in,
{
CpaInstanceHandle instanceHandle = NULL;
-
if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) {
instanceHandle =
Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM);
@@ -2167,6 +2160,14 @@ LacSymKey_CheckParamSslTls(const void *pKeyGenOpData,
}
}
+ /* check 0 secret length as it is not valid for SSL3 Key Gen
+ * request */
+ if (0 == uSecretLen) {
+ LAC_INVALID_PARAM_LOG1("%u secret.dataLenInBytes",
+ uSecretLen);
+ return CPA_STATUS_INVALID_PARAM;
+ }
+
/* Only seed length for SSL3 Key Gen request */
if (maxSeedLen != uSeedLen) {
LAC_INVALID_PARAM_LOG("seed.dataLenInBytes");
@@ -2194,11 +2195,11 @@ LacSymKey_CheckParamSslTls(const void *pKeyGenOpData,
/* Api max value */
/* ICP_QAT_FW_LA_TLS_V1_1_SECRET_LEN_MAX needs to be
* multiplied
- * by 4 in order to verifiy the 512 conditions. We did
+ * by 4 in order to verify the 512 conditions. We did
* not change
* ICP_QAT_FW_LA_TLS_V1_1_SECRET_LEN_MAX as it
* represents
- * the max value tha firmware can handle.
+ * the max value that firmware can handle.
*/
maxSecretLen =
ICP_QAT_FW_LA_TLS_V1_1_SECRET_LEN_MAX * 4;
@@ -2206,11 +2207,11 @@ LacSymKey_CheckParamSslTls(const void *pKeyGenOpData,
/* Api max value */
/* ICP_QAT_FW_LA_TLS_V1_2_SECRET_LEN_MAX needs to be
* multiplied
- * by 8 in order to verifiy the 512 conditions. We did
+ * by 8 in order to verify the 512 conditions. We did
* not change
* ICP_QAT_FW_LA_TLS_V1_2_SECRET_LEN_MAX as it
* represents
- * the max value tha firmware can handle.
+ * the max value that firmware can handle.
*/
maxSecretLen =
ICP_QAT_FW_LA_TLS_V1_2_SECRET_LEN_MAX * 8;
@@ -2596,7 +2597,6 @@ cpaCyKeyGenTls(const CpaInstanceHandle instanceHandle_in,
{
CpaInstanceHandle instanceHandle = NULL;
-
if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) {
instanceHandle =
Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM);
@@ -2669,7 +2669,6 @@ cpaCyKeyGenTls2(const CpaInstanceHandle instanceHandle_in,
{
CpaInstanceHandle instanceHandle = NULL;
-
if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) {
instanceHandle =
Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM);
@@ -2756,7 +2755,6 @@ cpaCyKeyGenTls3(const CpaInstanceHandle instanceHandle_in,
return CPA_STATUS_INVALID_PARAM;
}
-
return LacSymKey_KeyGenSslTls(instanceHandle_in,
pKeyGenCb,
pCallbackTag,
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_alg_chain.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_alg_chain.c
index 5b4ebdc85654..56f211025103 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_alg_chain.c
+++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_alg_chain.c
@@ -1,8 +1,5 @@
-/***************************************************************************
- *
- * <COPYRIGHT_TAG>
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -403,7 +400,6 @@ LacAlgChain_HashCDBuild(
&sizeInBytes);
}
}
-
static Cpa16U
LacAlgChain_GetCipherConfigSize(lac_session_desc_t *pSessionDesc)
{
@@ -1178,7 +1174,7 @@ LacAlgChain_SessionInit(const CpaInstanceHandle instanceHandle,
&cipherOffsetInConstantsTable,
&hashOffsetInConstantsTable);
- /* for a certain combination of Algorthm Chaining we want to
+ /* for a certain combination of Algorithm Chaining we want to
use an optimised cd block */
if (pSessionDesc->symOperation ==
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_api.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_api.c
index a0891278cb52..6f330835902c 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_api.c
+++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_api.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -336,7 +336,6 @@ LacSymSession_ParamCheck(const CpaInstanceHandle instanceHandle,
return CPA_STATUS_SUCCESS;
}
-
/**
* @ingroup LacSym
* Function which perform parameter checks on data buffers for symmetric
@@ -445,7 +444,7 @@ LacSymPerform_BufferParamCheck(const CpaBufferList *const pSrcBuffer,
}
}
- /* check for partial packet suport for the session operation */
+ /* check for partial packet support for the session operation */
if (CPA_CY_SYM_PACKET_TYPE_FULL != pOpData->packetType) {
if (CPA_FALSE == pSessionDesc->isPartialSupported) {
/* return out here to simplify cleanup */
@@ -569,7 +568,6 @@ LacSym_InitSession(const CpaInstanceHandle instanceHandle,
return CPA_STATUS_INVALID_PARAM;
}
-
pCipherSetupData = &pSessionSetupData->cipherSetupData;
pHashSetupData = &pSessionSetupData->hashSetupData;
@@ -673,7 +671,6 @@ cpaCySymRemoveSession(const CpaInstanceHandle instanceHandle_in,
CpaInstanceHandle instanceHandle = NULL;
Cpa64U numPendingRequests = 0;
-
if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) {
instanceHandle =
Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM);
@@ -794,7 +791,6 @@ LacSym_Perform(const CpaInstanceHandle instanceHandle,
}
}
-
/* If synchronous Operation - Callback function stored in the session
* descriptor so a flag is set in the perform to indicate that
* the perform is being re-called for the synchronous operation */
@@ -872,7 +868,7 @@ LacSym_Perform(const CpaInstanceHandle instanceHandle,
pVerifyResult);
if (CPA_STATUS_SUCCESS == status) {
- /* check for partial packet suport for the session operation */
+ /* check for partial packet support for the session operation */
if (CPA_CY_SYM_PACKET_TYPE_FULL != pOpData->packetType) {
LacSym_PartialPacketStateUpdate(
pOpData->packetType, &pSessionDesc->partialState);
@@ -923,7 +919,6 @@ cpaCySymQueryStats(const CpaInstanceHandle instanceHandle_in,
CpaInstanceHandle instanceHandle = NULL;
-
if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) {
instanceHandle =
Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM);
@@ -955,7 +950,6 @@ cpaCySymQueryStats64(const CpaInstanceHandle instanceHandle_in,
CpaInstanceHandle instanceHandle = NULL;
-
if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) {
instanceHandle =
Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO_SYM);
@@ -1037,7 +1031,6 @@ cpaCySymSessionCtxGetDynamicSize(
/* Choose Session Context size */
getCtxSize(pSessionSetupData, pSessionCtxSizeInBytes);
-
return CPA_STATUS_SUCCESS;
}
@@ -1116,6 +1109,5 @@ cpaCyBufferListGetMetaSize(const CpaInstanceHandle instanceHandle_in,
(sizeof(icp_flat_buffer_desc_t) * numBuffers) +
ICP_DESCRIPTOR_ALIGNMENT_BYTES;
-
return CPA_STATUS_SUCCESS;
}
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_auth_enc.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_auth_enc.c
index dd018a25a88c..67ffeafcd48d 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_auth_enc.c
+++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_auth_enc.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -91,7 +91,6 @@ LacSymAlgChain_CheckCCMData(Cpa8U *pAdditionalAuthData,
return CPA_STATUS_SUCCESS;
}
-
/**
* @ingroup LacAuthEnc
*/
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cb.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cb.c
index 4e47de384a1a..d88c6707b9df 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cb.c
+++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cb.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -296,7 +296,7 @@ LacSymCb_ProcessDpCallback(CpaCySymDpOpData *pResponse,
/* For CCM and GCM, if qatRespStatusOkFlag is false, the data has to be
* cleaned as stated in RFC 3610; in DP mode, it is the user
- * responsability to do so */
+ * responsibility to do so */
if (((CPA_CY_SYM_OP_CIPHER == pSessionDesc->symOperation) &&
SPC != pSessionDesc->singlePassState) ||
@@ -450,12 +450,11 @@ LacSymCb_PendingReqsDequeue(lac_session_desc_t *pSessionDesc)
}
/*
- * Now we'll attempt to send the message directly to QAT. We'll
- * keep looing until it succeeds (or at least a very high number
- * of retries), as the failure only happens when the ring is
- * full, and this is only a temporary situation. After a few
- * retries, space will become availble, allowing the putMsg to
- * succeed.
+ * Now we'll attempt to send the message directly to QAT. We'll keep
+ * looking until it succeeds (or at least a very high number of
+ * retries), as the failure only happens when the ring is full,
+ * and this is only a temporary situation. After a few retries,
+ * space will become available, allowing the putMsg to succeed.
*/
retries = 0;
do {
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cipher.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cipher.c
index 1c24c13a5ebe..0c01ceac2e19 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cipher.c
+++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_cipher.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -143,7 +143,7 @@ LacCipher_PerformIvCheck(sal_service_t *pService,
CPA_TRUE;
}
/* For subsequent partials in a sequence, we'll
- * re-use the IV that was written back by the
+ * reuse the IV that was written back by the
* QAT, using internal request queueing if
* necessary to ensure that the next partial
* request isn't issued to the QAT until the
@@ -215,7 +215,6 @@ LacCipher_PerformIvCheck(sal_service_t *pService,
return CPA_STATUS_SUCCESS;
}
-
CpaStatus
LacCipher_SessionSetupDataCheck(const CpaCySymCipherSetupData *pCipherSetupData,
Cpa32U capabilitiesMask)
@@ -440,7 +439,7 @@ LacCipher_GetCipherSliceType(sal_crypto_service_t *pService,
Cpa32U capabilitiesMask =
pService->generic_service_info.capabilitiesMask;
- /* UCS Slice is supproted only in Gen4 */
+ /* UCS Slice is supported only in Gen4 */
if (isCyGen4x(pService)) {
if (LAC_CIPHER_IS_XTS_MODE(cipherAlgorithm) ||
LAC_CIPHER_IS_CHACHA(cipherAlgorithm) ||
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_compile_check.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_compile_check.c
index d732add29c76..0313032aac84 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_compile_check.c
+++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_compile_check.c
@@ -1,8 +1,5 @@
-/***************************************************************************
- *
- * <COPYRIGHT_TAG>
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_dp.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_dp.c
index 1957126e0f1c..65a0d17d307f 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_dp.c
+++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_dp.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -962,7 +962,6 @@ cpaCySymDpInitSession(CpaInstanceHandle instanceHandle,
CpaStatus status = CPA_STATUS_FAIL;
sal_service_t *pService = NULL;
-
LAC_CHECK_INSTANCE_HANDLE(instanceHandle);
SAL_CHECK_INSTANCE_TYPE(instanceHandle,
(SAL_SERVICE_TYPE_CRYPTO |
@@ -999,7 +998,6 @@ cpaCySymDpRegCbFunc(const CpaInstanceHandle instanceHandle,
{
sal_crypto_service_t *pService = (sal_crypto_service_t *)instanceHandle;
-
LAC_CHECK_INSTANCE_HANDLE(instanceHandle);
SAL_CHECK_INSTANCE_TYPE(instanceHandle,
(SAL_SERVICE_TYPE_CRYPTO |
@@ -1021,7 +1019,6 @@ cpaCySymDpEnqueueOp(CpaCySymDpOpData *pRequest, const CpaBoolean performOpNow)
CpaStatus status = CPA_STATUS_SUCCESS;
-
LAC_CHECK_NULL_PARAM(pRequest);
status = LacDp_EnqueueParamCheck(pRequest);
if (CPA_STATUS_SUCCESS != status) {
@@ -1064,7 +1061,6 @@ cpaCySymDpPerformOpNow(const CpaInstanceHandle instanceHandle)
{
icp_comms_trans_handle trans_handle = NULL;
-
LAC_CHECK_INSTANCE_HANDLE(instanceHandle);
SAL_CHECK_INSTANCE_TYPE(instanceHandle,
(SAL_SERVICE_TYPE_CRYPTO |
@@ -1097,7 +1093,6 @@ cpaCySymDpEnqueueOpBatch(const Cpa32U numberRequests,
CpaStatus status = CPA_STATUS_SUCCESS;
sal_crypto_service_t *pService = NULL;
-
LAC_CHECK_NULL_PARAM(pRequests);
LAC_CHECK_NULL_PARAM(pRequests[0]);
LAC_CHECK_NULL_PARAM(pRequests[0]->instanceHandle);
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash.c
index a2d313c956ba..46f652cfd5c6 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash.c
+++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -366,7 +366,6 @@ LacHash_PrecomputeDataCreate(const CpaInstanceHandle instanceHandle,
return status;
}
-
/** @ingroup LacHash */
CpaStatus
LacHash_HashContextCheck(CpaInstanceHandle instanceHandle,
@@ -752,10 +751,13 @@ LacHash_PerformParamCheck(CpaInstanceHandle instanceHandle,
&pHashAlgInfo);
/* check if the message is a multiple of the block size. */
- if ((pOpData->messageLenToHashInBytes %
- pHashAlgInfo->blockLength) != 0) {
- LAC_INVALID_PARAM_LOG(
- "messageLenToHashInBytes not block size");
+ if (pOpData->messageLenToHashInBytes %
+ pHashAlgInfo->blockLength !=
+ 0) {
+ LAC_INVALID_PARAM_LOG2(
+ "message(%d) not block-size(%d) multiple",
+ pOpData->messageLenToHashInBytes,
+ pHashAlgInfo->blockLength);
return CPA_STATUS_INVALID_PARAM;
}
}
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash_sw_precomputes.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash_sw_precomputes.c
index 05bbd3a52d6e..e29de6f1c729 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash_sw_precomputes.c
+++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_hash_sw_precomputes.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -53,7 +53,7 @@ LacSymHash_Compute(CpaCySymHashAlgorithm hashAlgorithm,
* Note: from SHA hashes appropriate endian swapping is required.
* For sha1, sha224 and sha256 double words based swapping.
* For sha384 and sha512 quad words swapping.
- * No endianes swapping for md5 is required.
+ * No endianness swapping for md5 is required.
*/
CpaStatus status = CPA_STATUS_FAIL;
Cpa32U i = 0;
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_queue.c b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_queue.c
index a840882acaf9..5c76d5d81324 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_queue.c
+++ b/sys/dev/qat/qat_api/common/crypto/sym/lac_sym_queue.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -111,7 +111,7 @@ LacSymQueue_RequestSend(const CpaInstanceHandle instanceHandle,
*/
if (CPA_CY_SYM_PACKET_TYPE_FULL !=
pRequest->pOpData->packetType) {
- /* Select blocking operations which this reqest will
+ /* Select blocking operations which this request will
* complete */
pSessionDesc->nonBlockingOpsInProgress = CPA_FALSE;
}
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat.c b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat.c
index 6b436edf16cd..7735d07bc620 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat.c
+++ b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
@@ -244,7 +244,7 @@ LacSymQat_UseSymConstantsTable(lac_session_desc_t *pSession,
*pCipherOffset = 0;
*pHashOffset = 0;
- /* for chaining can we use the optimised content descritor */
+ /* for chaining can we use the optimised content descriptor */
if (pSession->laCmdId == ICP_QAT_FW_LA_CMD_CIPHER_HASH ||
pSession->laCmdId == ICP_QAT_FW_LA_CMD_HASH_CIPHER) {
useOptimisedContentDesc =
@@ -297,7 +297,7 @@ LacSymQat_UseSymConstantsTable(lac_session_desc_t *pSession,
(pSession->qatHashMode == ICP_QAT_HW_AUTH_MODE1)) {
/* we can only use the SHA1-mode1 in the SHRAM constants
* table when
- * we are using the opimised content desc */
+ * we are using the optimised content desc */
return CPA_FALSE;
}
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_cipher.c b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_cipher.c
index 4900cf7996aa..d8a7ac75aec1 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_cipher.c
+++ b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_cipher.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -214,6 +214,11 @@ static const uint8_t key_size_f8[] = {
ICP_QAT_HW_CIPHER_ALGO_AES256 /* ICP_QAT_HW_AES_256_F8_KEY_SZ */
};
+/* This array must be kept aligned with CpaCySymCipherAlgorithm enum but
+ * offset by -1 as that enum starts at 1. LacSymQat_CipherGetCfgData()
+ * below relies on that alignment and uses that enum -1 to index into this
+ * array.
+ */
typedef struct _icp_qat_hw_cipher_info {
icp_qat_hw_cipher_algo_t algorithm;
icp_qat_hw_cipher_mode_t mode;
@@ -478,7 +483,7 @@ LacSymQat_CipherCtrlBlockWrite(icp_qat_la_bulk_req_ftr_t *pMsg,
in this case, and add padding. It makes no sense
to force applications to provide such key length for couple reasons:
1. It won't be possible to distinguish between AES 192 and 256 based
- on key lenght only
+ on key length only
2. Only some modes of AES will use UCS slice, then application will
have to know which ones */
if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == sliceType &&
@@ -542,7 +547,7 @@ LacSymQat_CipherGetCfgData(lac_session_desc_t *pSession,
sal_crypto_service_t *pService =
(sal_crypto_service_t *)pSession->pInstance;
- CpaCySymCipherAlgorithm cipherAlgorithm = 0;
+ int cipherIdx = 0;
icp_qat_hw_cipher_dir_t cipherDirection = 0;
/* Set defaults */
@@ -551,21 +556,33 @@ LacSymQat_CipherGetCfgData(lac_session_desc_t *pSession,
*pMode = ICP_QAT_HW_CIPHER_ECB_MODE;
*pDir = ICP_QAT_HW_CIPHER_ENCRYPT;
- /* decrease since it's numbered from 1 instead of 0 */
- cipherAlgorithm = pSession->cipherAlgorithm - 1;
+ /* offset index as CpaCySymCipherAlgorithm enum starts from 1, not from
+ * 0 */
+ cipherIdx = pSession->cipherAlgorithm - 1;
cipherDirection =
pSession->cipherDirection == CPA_CY_SYM_CIPHER_DIRECTION_ENCRYPT ?
ICP_QAT_HW_CIPHER_ENCRYPT :
ICP_QAT_HW_CIPHER_DECRYPT;
- *pAlgorithm = icp_qat_alg_info[cipherAlgorithm].algorithm;
- *pMode = icp_qat_alg_info[cipherAlgorithm].mode;
- *pDir = icp_qat_alg_info[cipherAlgorithm].dir[cipherDirection];
+ /* Boundary check against the last value in the algorithm enum */
+ if (!(pSession->cipherAlgorithm <= CPA_CY_SYM_CIPHER_SM4_CTR)) {
+ QAT_UTILS_LOG("Invalid cipherAlgorithm value\n");
+ return;
+ }
+
+ if (!(cipherDirection <= ICP_QAT_HW_CIPHER_DECRYPT)) {
+ QAT_UTILS_LOG("Invalid cipherDirection value\n");
+ return;
+ }
+
+ *pAlgorithm = icp_qat_alg_info[cipherIdx].algorithm;
+ *pMode = icp_qat_alg_info[cipherIdx].mode;
+ *pDir = icp_qat_alg_info[cipherIdx].dir[cipherDirection];
*pKey_convert =
- icp_qat_alg_info[cipherAlgorithm].key_convert[cipherDirection];
+ icp_qat_alg_info[cipherIdx].key_convert[cipherDirection];
- if (IS_KEY_DEP_NO != icp_qat_alg_info[cipherAlgorithm].isKeyLenDepend) {
- *pAlgorithm = icp_qat_alg_info[cipherAlgorithm]
+ if (IS_KEY_DEP_NO != icp_qat_alg_info[cipherIdx].isKeyLenDepend) {
+ *pAlgorithm = icp_qat_alg_info[cipherIdx]
.pAlgByKeySize[pSession->cipherKeyLenInBytes];
}
@@ -657,12 +674,11 @@ LacSymQat_CipherHwBlockPopulateKeySetup(
/* Special handling of AES 192 key for UCS slice.
UCS requires it to have 32 bytes - set is as targetKeyLen
in this case, and add padding. It makes no sense
- to force applications to provide such key length for couple
- reasons:
- 1. It won't be possible to distinguish between AES 192 and
- 256 based on key lenght only
- 2. Only some modes of AES will use UCS slice, then
- application will have to know which ones */
+ to force applications to provide such key length for couple reasons:
+ 1. It won't be possible to distinguish between AES 192 and 256 based
+ on key length only
+ 2. Only some modes of AES will use UCS slice, then application will
+ have to know which ones */
if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE == sliceType &&
ICP_QAT_HW_AES_192_KEY_SZ == targetKeyLenInBytes) {
targetKeyLenInBytes = ICP_QAT_HW_UCS_AES_192_KEY_SZ;
@@ -918,10 +934,10 @@ LacSymQat_CipherRequestParamsPopulate(lac_session_desc_t *pSessionDesc,
/* Populate the field with the contents of the buffer,
* zero field first as data may be smaller than the field */
- /* In case of XTS mode using UCS slice always embedd IV.
- * IV provided by user needs to be encrypted to calculate
- * initial tweak, use pCipherReqParams->u.cipher_IV_array as
- * destination buffer for tweak value */
+ /* In case of XTS mode using UCS slice always encrypt the embedded IV.
+ * IV provided by user needs to be encrypted to calculate initial tweak,
+ * use pCipherReqParams->u.cipher_IV_array as destination buffer for
+ * tweak value */
if (ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE ==
pSessionDesc->cipherSliceType &&
LAC_CIPHER_IS_XTS_MODE(pSessionDesc->cipherAlgorithm)) {
diff --git a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_hash_defs_lookup.c b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_hash_defs_lookup.c
index 40ae7b1d1de7..e99f264ccd67 100644
--- a/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_hash_defs_lookup.c
+++ b/sys/dev/qat/qat_api/common/crypto/sym/qat/lac_sym_qat_hash_defs_lookup.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -42,7 +42,7 @@ typedef struct lac_sym_qat_hash_def_map_s {
CpaCySymHashAlgorithm hashAlgorithm;
/* hash algorithm */
lac_sym_qat_hash_defs_t hashDefs;
- /* hash defintions pointers */
+ /* hash definitions pointers */
} lac_sym_qat_hash_def_map_t;
/*
@@ -219,7 +219,7 @@ static lac_sym_qat_hash_alg_info_t sm3Info = { LAC_HASH_SM3_DIGEST_SIZE,
static lac_sym_qat_hash_alg_info_t polyInfo = { LAC_HASH_POLY_DIGEST_SIZE,
LAC_HASH_POLY_BLOCK_SIZE,
- NULL, /* intial state */
+ NULL, /* initial state */
LAC_HASH_POLY_STATE_SIZE };
static lac_sym_qat_hash_alg_info_t xcbcMacInfo = {
@@ -239,7 +239,7 @@ static lac_sym_qat_hash_alg_info_t aesCmacInfo = {
static lac_sym_qat_hash_alg_info_t aesCcmInfo = {
LAC_HASH_AES_CCM_DIGEST_SIZE,
LAC_HASH_AES_CCM_BLOCK_SIZE,
- NULL, /* intial state */
+ NULL, /* initial state */
0 /* state size */
};
diff --git a/sys/dev/qat/qat_api/common/ctrl/sal_compression.c b/sys/dev/qat/qat_api/common/ctrl/sal_compression.c
index c0f5a411d87e..e8ae47f0f0d3 100644
--- a/sys/dev/qat/qat_api/common/ctrl/sal_compression.c
+++ b/sys/dev/qat/qat_api/common/ctrl/sal_compression.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
* @file sal_compression.c
@@ -371,9 +371,6 @@ SalCtrl_CompressionInit(icp_accel_dev_t *device, sal_service_t *service)
SAL_SERVICE_GOOD_FOR_INIT(pCompressionService);
- pCompressionService->generic_service_info.state =
- SAL_SERVICE_STATE_INITIALIZING;
-
if (CPA_FALSE == pCompressionService->generic_service_info.is_dyn) {
section = icpGetProcessName();
}
@@ -1438,7 +1435,8 @@ cpaDcInstanceGetInfo2(const CpaInstanceHandle instanceHandle,
pInstanceInfo2->isOffloaded = CPA_TRUE;
/* Get the instance name and part name from the config file */
dev = icp_adf_getAccelDevByAccelId(pCompressionService->pkgID);
- if (NULL == dev) {
+ if (NULL == dev ||
+ 0 == strnlen(dev->deviceName, ADF_DEVICE_TYPE_LENGTH + 1)) {
QAT_UTILS_LOG("Can not find device for the instance.\n");
LAC_OS_BZERO(pInstanceInfo2, sizeof(CpaInstanceInfo2));
return CPA_STATUS_FAIL;
diff --git a/sys/dev/qat/qat_api/common/ctrl/sal_crypto.c b/sys/dev/qat/qat_api/common/ctrl/sal_crypto.c
index cba75eb41c17..3e134f43af6e 100644
--- a/sys/dev/qat/qat_api/common/ctrl/sal_crypto.c
+++ b/sys/dev/qat/qat_api/common/ctrl/sal_crypto.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -73,6 +73,15 @@
#define NUM_CRYPTO_ASYM_RX_RINGS 1
#define NUM_CRYPTO_NRBG_RX_RINGS 1
+CpaStatus Lac_GetCyInstancesByType(
+ const CpaAccelerationServiceType accelerationServiceType,
+ Cpa16U numInstances,
+ CpaInstanceHandle *pInstances);
+
+CpaStatus Lac_GetCyNumInstancesByType(
+ const CpaAccelerationServiceType accelerationServiceType,
+ Cpa16U *pNumInstances);
+
static CpaInstanceHandle
Lac_CryptoGetFirstHandle(void)
{
@@ -88,7 +97,6 @@ Lac_CryptoGetFirstHandle(void)
return instHandle;
}
-
/* Function to release the sym handles. */
static CpaStatus
SalCtrl_SymReleaseTransHandle(sal_service_t *service)
@@ -116,7 +124,6 @@ SalCtrl_SymReleaseTransHandle(sal_service_t *service)
return ret_status;
}
-
/*
* @ingroup sal_crypto
* Frees resources (memory and transhandles) if allocated
@@ -153,7 +160,6 @@ SalCtrl_SymFreeResources(sal_crypto_service_t *pCryptoService)
return status;
}
-
/**
***********************************************************************
* @ingroup SalCtrl
@@ -175,7 +181,6 @@ SalCtrl_SymFreeResources(sal_crypto_service_t *pCryptoService)
} \
} while (0)
-
/* Function that creates the Sym Handles. */
static CpaStatus
SalCtrl_SymCreateTransHandle(icp_accel_dev_t *device,
@@ -325,7 +330,6 @@ SalCtrl_CryptoDebug(void *private_data, char *data, int size, int offset)
return ++offset;
}
-
static CpaStatus
SalCtrl_SymInit(icp_accel_dev_t *device, sal_service_t *service)
{
@@ -342,7 +346,6 @@ SalCtrl_SymInit(icp_accel_dev_t *device, sal_service_t *service)
section = icpGetProcessName();
}
-
/* Register callbacks for the symmetric services
* (Hash, Cipher, Algorithm-Chaining) (returns void)*/
LacSymCb_CallbacksRegister();
@@ -581,7 +584,6 @@ SalCtr_InstInit(icp_accel_dev_t *device, sal_service_t *service)
section = icpGetProcessName();
}
-
/* Get Config Info: Accel Num, bank Num, packageID,
coreAffinity, nodeAffinity and response mode */
@@ -847,7 +849,6 @@ SalCtrl_CryptoShutdown(icp_accel_dev_t *device, sal_service_t *service)
return CPA_STATUS_FAIL;
}
-
/* Free memory and transhandles */
switch (svc_type) {
case SAL_SERVICE_TYPE_CRYPTO_ASYM:
@@ -886,7 +887,6 @@ cpaCyGetStatusText(const CpaInstanceHandle instanceHandle,
{
CpaStatus status = CPA_STATUS_SUCCESS;
-
LAC_CHECK_NULL_PARAM(pStatusText);
switch (errStatus) {
@@ -969,7 +969,6 @@ cpaCyStartInstance(CpaInstanceHandle instanceHandle_in)
CpaStatus status = CPA_STATUS_SUCCESS;
sal_crypto_service_t *pService = NULL;
-
if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) {
instanceHandle = Lac_GetFirstHandle(SAL_SERVICE_TYPE_CRYPTO);
if (!instanceHandle) {
@@ -1021,7 +1020,6 @@ cpaCyStopInstance(CpaInstanceHandle instanceHandle_in)
CpaStatus status = CPA_STATUS_SUCCESS;
sal_crypto_service_t *pService = NULL;
-
if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) {
instanceHandle = Lac_CryptoGetFirstHandle();
} else {
@@ -1066,7 +1064,6 @@ cpaCyInstanceSetNotificationCb(
CpaStatus status = CPA_STATUS_SUCCESS;
sal_service_t *gen_handle = instanceHandle;
-
LAC_CHECK_NULL_PARAM(gen_handle);
gen_handle->notification_cb = pInstanceNotificationCb;
gen_handle->cb_tag = pCallbackTag;
@@ -1080,84 +1077,8 @@ cpaCyInstanceSetNotificationCb(
CpaStatus
cpaCyGetNumInstances(Cpa16U *pNumInstances)
{
- CpaStatus status = CPA_STATUS_SUCCESS;
- CpaInstanceHandle cyInstanceHandle;
- CpaInstanceInfo2 info;
- icp_accel_dev_t **pAdfInsts = NULL;
- icp_accel_dev_t *dev_addr = NULL;
- sal_t *base_addr = NULL;
- sal_list_t *list_temp = NULL;
- Cpa16U num_accel_dev = 0;
- Cpa16U num_inst = 0;
- Cpa16U i = 0;
-
- LAC_CHECK_NULL_PARAM(pNumInstances);
-
- /* Get the number of accel_dev in the system */
- status = icp_amgr_getNumInstances(&num_accel_dev);
- LAC_CHECK_STATUS(status);
-
- /* Allocate memory to store addr of accel_devs */
- pAdfInsts =
- malloc(num_accel_dev * sizeof(icp_accel_dev_t *), M_QAT, M_WAITOK);
- num_accel_dev = 0;
- /* Get ADF to return all accel_devs that support either
- * symmetric or asymmetric crypto */
- status = icp_amgr_getAllAccelDevByCapabilities(
- (ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
- ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC),
- pAdfInsts,
- &num_accel_dev);
- if (CPA_STATUS_SUCCESS != status) {
- LAC_LOG_ERROR("No support for crypto\n");
- *pNumInstances = 0;
- free(pAdfInsts, M_QAT);
- return status;
- }
-
- for (i = 0; i < num_accel_dev; i++) {
- dev_addr = (icp_accel_dev_t *)pAdfInsts[i];
- if (NULL == dev_addr || NULL == dev_addr->pSalHandle) {
- continue;
- }
-
- base_addr = dev_addr->pSalHandle;
- list_temp = base_addr->crypto_services;
- while (NULL != list_temp) {
- cyInstanceHandle = SalList_getObject(list_temp);
- status = cpaCyInstanceGetInfo2(cyInstanceHandle, &info);
- if (CPA_STATUS_SUCCESS == status &&
- CPA_TRUE == info.isPolled) {
- num_inst++;
- }
- list_temp = SalList_next(list_temp);
- }
- list_temp = base_addr->asym_services;
- while (NULL != list_temp) {
- cyInstanceHandle = SalList_getObject(list_temp);
- status = cpaCyInstanceGetInfo2(cyInstanceHandle, &info);
- if (CPA_STATUS_SUCCESS == status &&
- CPA_TRUE == info.isPolled) {
- num_inst++;
- }
- list_temp = SalList_next(list_temp);
- }
- list_temp = base_addr->sym_services;
- while (NULL != list_temp) {
- cyInstanceHandle = SalList_getObject(list_temp);
- status = cpaCyInstanceGetInfo2(cyInstanceHandle, &info);
- if (CPA_STATUS_SUCCESS == status &&
- CPA_TRUE == info.isPolled) {
- num_inst++;
- }
- list_temp = SalList_next(list_temp);
- }
- }
- *pNumInstances = num_inst;
- free(pAdfInsts, M_QAT);
-
-
- return status;
+ return Lac_GetCyNumInstancesByType(CPA_ACC_SVC_TYPE_CRYPTO,
+ pNumInstances);
}
/**
@@ -1167,119 +1088,9 @@ cpaCyGetNumInstances(Cpa16U *pNumInstances)
CpaStatus
cpaCyGetInstances(Cpa16U numInstances, CpaInstanceHandle *pCyInstances)
{
- CpaStatus status = CPA_STATUS_SUCCESS;
- CpaInstanceHandle cyInstanceHandle;
- CpaInstanceInfo2 info;
- icp_accel_dev_t **pAdfInsts = NULL;
- icp_accel_dev_t *dev_addr = NULL;
- sal_t *base_addr = NULL;
- sal_list_t *list_temp = NULL;
- Cpa16U num_accel_dev = 0;
- Cpa16U num_allocated_instances = 0;
- Cpa16U index = 0;
- Cpa16U i = 0;
-
-
- LAC_CHECK_NULL_PARAM(pCyInstances);
- if (0 == numInstances) {
- LAC_INVALID_PARAM_LOG("NumInstances is 0");
- return CPA_STATUS_INVALID_PARAM;
- }
-
- /* Get the number of crypto instances */
- status = cpaCyGetNumInstances(&num_allocated_instances);
- if (CPA_STATUS_SUCCESS != status) {
- return status;
- }
-
- if (numInstances > num_allocated_instances) {
- QAT_UTILS_LOG("Only %d crypto instances available\n",
- num_allocated_instances);
- return CPA_STATUS_RESOURCE;
- }
-
- /* Get the number of accel devices in the system */
- status = icp_amgr_getNumInstances(&num_accel_dev);
- LAC_CHECK_STATUS(status);
-
- /* Allocate memory to store addr of accel_devs */
- pAdfInsts =
- malloc(num_accel_dev * sizeof(icp_accel_dev_t *), M_QAT, M_WAITOK);
-
- num_accel_dev = 0;
- /* Get ADF to return all accel_devs that support either
- * symmetric or asymmetric crypto */
- status = icp_amgr_getAllAccelDevByCapabilities(
- (ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
- ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC),
- pAdfInsts,
- &num_accel_dev);
- if (CPA_STATUS_SUCCESS != status) {
- LAC_LOG_ERROR("No support for crypto\n");
- free(pAdfInsts, M_QAT);
- return status;
- }
-
- for (i = 0; i < num_accel_dev; i++) {
- dev_addr = (icp_accel_dev_t *)pAdfInsts[i];
- /* Note dev_addr cannot be NULL here as numInstances = 0
- * is not valid and if dev_addr = NULL then index = 0 (which
- * is less than numInstances and status is set to _RESOURCE
- * above
- */
- base_addr = dev_addr->pSalHandle;
- if (NULL == base_addr) {
- continue;
- }
- list_temp = base_addr->crypto_services;
- while (NULL != list_temp) {
- if (index > (numInstances - 1)) {
- break;
- }
- cyInstanceHandle = SalList_getObject(list_temp);
- status = cpaCyInstanceGetInfo2(cyInstanceHandle, &info);
- list_temp = SalList_next(list_temp);
- if (CPA_STATUS_SUCCESS != status ||
- CPA_TRUE != info.isPolled) {
- continue;
- }
- pCyInstances[index] = cyInstanceHandle;
- index++;
- }
- list_temp = base_addr->asym_services;
- while (NULL != list_temp) {
- if (index > (numInstances - 1)) {
- break;
- }
- cyInstanceHandle = SalList_getObject(list_temp);
- status = cpaCyInstanceGetInfo2(cyInstanceHandle, &info);
- list_temp = SalList_next(list_temp);
- if (CPA_STATUS_SUCCESS != status ||
- CPA_TRUE != info.isPolled) {
- continue;
- }
- pCyInstances[index] = cyInstanceHandle;
- index++;
- }
- list_temp = base_addr->sym_services;
- while (NULL != list_temp) {
- if (index > (numInstances - 1)) {
- break;
- }
- cyInstanceHandle = SalList_getObject(list_temp);
- status = cpaCyInstanceGetInfo2(cyInstanceHandle, &info);
- list_temp = SalList_next(list_temp);
- if (CPA_STATUS_SUCCESS != status ||
- CPA_TRUE != info.isPolled) {
- continue;
- }
- pCyInstances[index] = cyInstanceHandle;
- index++;
- }
- }
- free(pAdfInsts, M_QAT);
-
- return status;
+ return Lac_GetCyInstancesByType(CPA_ACC_SVC_TYPE_CRYPTO,
+ numInstances,
+ pCyInstances);
}
/**
@@ -1359,7 +1170,6 @@ cpaCyInstanceGetInfo2(const CpaInstanceHandle instanceHandle_in,
char valStr[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
char *section = DYN_SEC;
-
if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) {
instanceHandle = Lac_CryptoGetFirstHandle();
} else {
@@ -1423,9 +1233,10 @@ cpaCyInstanceGetInfo2(const CpaInstanceHandle instanceHandle_in,
}
pInstanceInfo2->isOffloaded = CPA_TRUE;
- /* Get the instance name and part name*/
+ /* Get the instance name and part name */
dev = icp_adf_getAccelDevByAccelId(pCryptoService->pkgID);
- if (NULL == dev) {
+ if (NULL == dev ||
+ 0 == strnlen(dev->deviceName, ADF_DEVICE_TYPE_LENGTH + 1)) {
LAC_LOG_ERROR("Can not find device for the instance\n");
LAC_OS_BZERO(pInstanceInfo2, sizeof(CpaInstanceInfo2));
return CPA_STATUS_FAIL;
@@ -1474,7 +1285,6 @@ cpaCyQueryCapabilities(const CpaInstanceHandle instanceHandle_in,
/* Verify Instance exists */
CpaInstanceHandle instanceHandle = NULL;
-
if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) {
instanceHandle = Lac_CryptoGetFirstHandle();
} else {
@@ -1634,7 +1444,6 @@ cpaCySetAddressTranslation(const CpaInstanceHandle instanceHandle_in,
CpaInstanceHandle instanceHandle = NULL;
sal_service_t *pService = NULL;
-
if (CPA_INSTANCE_HANDLE_SINGLE == instanceHandle_in) {
instanceHandle = Lac_CryptoGetFirstHandle();
} else {
@@ -1756,7 +1565,6 @@ icp_sal_CyPollSymRing(CpaInstanceHandle instanceHandle_in,
return status;
}
-
/**
******************************************************************************
* @ingroup cpaCyCommon
@@ -1910,7 +1718,6 @@ Lac_GetFirstHandle(sal_service_type_t svc_type)
default:
LAC_LOG_ERROR("Invalid service type\n");
return NULL;
- break;
}
/* Only need 1 dev with crypto enabled - so check all devices*/
status = icp_amgr_getAllAccelDevByEachCapability(capabilities,
@@ -1978,7 +1785,6 @@ icp_sal_SymGetInflightRequests(CpaInstanceHandle instanceHandle,
numInflightRequests);
}
-
CpaStatus
icp_sal_dp_SymGetInflightRequests(CpaInstanceHandle instanceHandle,
Cpa32U *maxInflightRequests,
@@ -1994,7 +1800,6 @@ icp_sal_dp_SymGetInflightRequests(CpaInstanceHandle instanceHandle,
numInflightRequests);
}
-
CpaStatus
icp_sal_setForceAEADMACVerify(CpaInstanceHandle instanceHandle,
CpaBoolean forceAEADMacVerify)
diff --git a/sys/dev/qat/qat_api/common/ctrl/sal_get_instances.c b/sys/dev/qat/qat_api/common/ctrl/sal_get_instances.c
index 27037e99d1ac..f68853dc43a8 100644
--- a/sys/dev/qat/qat_api/common/ctrl/sal_get_instances.c
+++ b/sys/dev/qat/qat_api/common/ctrl/sal_get_instances.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
@@ -10,7 +10,9 @@
* @ingroup SalCtrl
*
* @description
- * This file contains the main function to get SAL instances.
+ * This file contains generic functions to get instances of a specified
+ * service type. Note these are complementary to the already existing
+ * service-specific functions.
*
*****************************************************************************/
@@ -34,19 +36,22 @@
#include "lac_mem.h"
#include "lac_list.h"
#include "lac_sal_types.h"
+#include "lac_sal_types_crypto.h"
/**
******************************************************************************
* @ingroup SalCtrl
* @description
- * Get either sym or asym instance number
+ * Get the total number of either sym, asym or cy instances
*****************************************************************************/
-static CpaStatus
-Lac_GetSingleCyNumInstances(
+CpaStatus
+Lac_GetCyNumInstancesByType(
const CpaAccelerationServiceType accelerationServiceType,
Cpa16U *pNumInstances)
{
CpaStatus status = CPA_STATUS_SUCCESS;
+ CpaInstanceHandle instanceHandle;
+ CpaInstanceInfo2 info;
icp_accel_dev_t **pAdfInsts = NULL;
icp_accel_dev_t *dev_addr = NULL;
sal_t *base_addr = NULL;
@@ -71,6 +76,12 @@ Lac_GetSingleCyNumInstances(
service = "sym";
break;
+ case CPA_ACC_SVC_TYPE_CRYPTO:
+ accel_capability = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ service = "cy";
+ break;
+
default:
QAT_UTILS_LOG("Invalid service type\n");
return CPA_STATUS_INVALID_PARAM;
@@ -106,14 +117,48 @@ Lac_GetSingleCyNumInstances(
}
base_addr = dev_addr->pSalHandle;
- if (CPA_ACC_SVC_TYPE_CRYPTO_ASYM == accelerationServiceType) {
+ if (CPA_ACC_SVC_TYPE_CRYPTO == accelerationServiceType) {
+ list_temp = base_addr->crypto_services;
+ while (NULL != list_temp) {
+ instanceHandle = SalList_getObject(list_temp);
+ status = cpaCyInstanceGetInfo2(instanceHandle,
+ &info);
+ if (CPA_STATUS_SUCCESS == status &&
+ CPA_TRUE == info.isPolled) {
+ num_inst++;
+ }
+ list_temp = SalList_next(list_temp);
+ }
+ }
+
+ if (CPA_ACC_SVC_TYPE_CRYPTO_ASYM == accelerationServiceType ||
+ CPA_ACC_SVC_TYPE_CRYPTO == accelerationServiceType) {
list_temp = base_addr->asym_services;
- } else {
- list_temp = base_addr->sym_services;
+ while (NULL != list_temp) {
+ instanceHandle = SalList_getObject(list_temp);
+ status = cpaCyInstanceGetInfo2(instanceHandle,
+ &info);
+ if (CPA_STATUS_SUCCESS == status &&
+ CPA_TRUE == info.isPolled) {
+ num_inst++;
+ }
+ list_temp = SalList_next(list_temp);
+ }
}
- while (NULL != list_temp) {
- num_inst++;
- list_temp = SalList_next(list_temp);
+
+ if (CPA_ACC_SVC_TYPE_CRYPTO_SYM == accelerationServiceType ||
+ CPA_ACC_SVC_TYPE_CRYPTO == accelerationServiceType) {
+ list_temp = base_addr->sym_services;
+ while (NULL != list_temp) {
+ instanceHandle = SalList_getObject(list_temp);
+ status = cpaCyInstanceGetInfo2(instanceHandle,
+ &info);
+ if (CPA_STATUS_SUCCESS == status &&
+ CPA_TRUE == info.isPolled) {
+ num_inst++;
+ }
+ list_temp = SalList_next(list_temp);
+ }
}
}
@@ -127,15 +172,17 @@ Lac_GetSingleCyNumInstances(
******************************************************************************
* @ingroup SalCtrl
* @description
- * Get either sym or asym instance
+ * Get either sym, asym or cy instance
*****************************************************************************/
-static CpaStatus
-Lac_GetSingleCyInstances(
+CpaStatus
+Lac_GetCyInstancesByType(
const CpaAccelerationServiceType accelerationServiceType,
Cpa16U numInstances,
CpaInstanceHandle *pInstances)
{
CpaStatus status = CPA_STATUS_SUCCESS;
+ CpaInstanceHandle instanceHandle = NULL;
+ CpaInstanceInfo2 info;
icp_accel_dev_t **pAdfInsts = NULL;
icp_accel_dev_t *dev_addr = NULL;
sal_t *base_addr = NULL;
@@ -163,14 +210,21 @@ Lac_GetSingleCyInstances(
accel_capability = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
service = "sym";
break;
+
+ case CPA_ACC_SVC_TYPE_CRYPTO:
+ accel_capability = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ service = "cy";
+ break;
+
default:
QAT_UTILS_LOG("Invalid service type\n");
return CPA_STATUS_INVALID_PARAM;
}
/* Get the number of instances */
- status = cpaGetNumInstances(accelerationServiceType,
- &num_allocated_instances);
+ status = Lac_GetCyNumInstancesByType(accelerationServiceType,
+ &num_allocated_instances);
if (CPA_STATUS_SUCCESS != status) {
return status;
}
@@ -216,17 +270,63 @@ Lac_GetSingleCyInstances(
continue;
}
- if (CPA_ACC_SVC_TYPE_CRYPTO_ASYM == accelerationServiceType)
+ if (CPA_ACC_SVC_TYPE_CRYPTO == accelerationServiceType) {
+ list_temp = base_addr->crypto_services;
+ while (NULL != list_temp) {
+ if (index > (numInstances - 1))
+ break;
+
+ instanceHandle = SalList_getObject(list_temp);
+ status = cpaCyInstanceGetInfo2(instanceHandle,
+ &info);
+ list_temp = SalList_next(list_temp);
+ if (CPA_STATUS_SUCCESS != status ||
+ CPA_TRUE != info.isPolled) {
+ continue;
+ }
+ pInstances[index] = instanceHandle;
+ index++;
+ }
+ }
+
+ if (CPA_ACC_SVC_TYPE_CRYPTO_ASYM == accelerationServiceType ||
+ CPA_ACC_SVC_TYPE_CRYPTO == accelerationServiceType) {
list_temp = base_addr->asym_services;
- else
- list_temp = base_addr->sym_services;
- while (NULL != list_temp) {
- if (index > (numInstances - 1))
- break;
+ while (NULL != list_temp) {
+ if (index > (numInstances - 1))
+ break;
+
+ instanceHandle = SalList_getObject(list_temp);
+ status = cpaCyInstanceGetInfo2(instanceHandle,
+ &info);
+ list_temp = SalList_next(list_temp);
+ if (CPA_STATUS_SUCCESS != status ||
+ CPA_TRUE != info.isPolled) {
+ continue;
+ }
+ pInstances[index] = instanceHandle;
+ index++;
+ }
+ }
- pInstances[index] = SalList_getObject(list_temp);
- list_temp = SalList_next(list_temp);
- index++;
+ if (CPA_ACC_SVC_TYPE_CRYPTO_SYM == accelerationServiceType ||
+ CPA_ACC_SVC_TYPE_CRYPTO == accelerationServiceType) {
+ list_temp = base_addr->sym_services;
+ while (NULL != list_temp) {
+ if (index > (numInstances - 1))
+ break;
+
+ instanceHandle = SalList_getObject(list_temp);
+ status = cpaCyInstanceGetInfo2(instanceHandle,
+ &info);
+ list_temp = SalList_next(list_temp);
+ if (CPA_STATUS_SUCCESS != status ||
+ CPA_TRUE != info.isPolled) {
+ continue;
+ }
+ pInstances[index] = instanceHandle;
+ index++;
+ }
}
}
free(pAdfInsts, M_QAT);
@@ -242,16 +342,24 @@ CpaStatus
cpaGetNumInstances(const CpaAccelerationServiceType accelerationServiceType,
Cpa16U *pNumInstances)
{
+ LAC_CHECK_NULL_PARAM(pNumInstances);
+
switch (accelerationServiceType) {
case CPA_ACC_SVC_TYPE_CRYPTO_ASYM:
case CPA_ACC_SVC_TYPE_CRYPTO_SYM:
- return Lac_GetSingleCyNumInstances(accelerationServiceType,
- pNumInstances);
case CPA_ACC_SVC_TYPE_CRYPTO:
- return cpaCyGetNumInstances(pNumInstances);
+ return Lac_GetCyNumInstancesByType(accelerationServiceType,
+ pNumInstances);
+
case CPA_ACC_SVC_TYPE_DATA_COMPRESSION:
return cpaDcGetNumInstances(pNumInstances);
+ case CPA_ACC_SVC_TYPE_PATTERN_MATCH:
+ case CPA_ACC_SVC_TYPE_RAID:
+ case CPA_ACC_SVC_TYPE_XML:
+ QAT_UTILS_LOG("Unsupported service type\n");
+ return CPA_STATUS_UNSUPPORTED;
+
default:
QAT_UTILS_LOG("Invalid service type\n");
*pNumInstances = 0;
@@ -268,18 +376,25 @@ cpaGetInstances(const CpaAccelerationServiceType accelerationServiceType,
Cpa16U numInstances,
CpaInstanceHandle *pInstances)
{
+ LAC_CHECK_NULL_PARAM(pInstances);
+
switch (accelerationServiceType) {
case CPA_ACC_SVC_TYPE_CRYPTO_ASYM:
case CPA_ACC_SVC_TYPE_CRYPTO_SYM:
- return Lac_GetSingleCyInstances(accelerationServiceType,
+ case CPA_ACC_SVC_TYPE_CRYPTO:
+ return Lac_GetCyInstancesByType(accelerationServiceType,
numInstances,
pInstances);
- case CPA_ACC_SVC_TYPE_CRYPTO:
- return cpaCyGetInstances(numInstances, pInstances);
case CPA_ACC_SVC_TYPE_DATA_COMPRESSION:
return cpaDcGetInstances(numInstances, pInstances);
+ case CPA_ACC_SVC_TYPE_PATTERN_MATCH:
+ case CPA_ACC_SVC_TYPE_RAID:
+ case CPA_ACC_SVC_TYPE_XML:
+ QAT_UTILS_LOG("Unsupported service type\n");
+ return CPA_STATUS_UNSUPPORTED;
+
default:
QAT_UTILS_LOG("Invalid service type\n");
return CPA_STATUS_INVALID_PARAM;
diff --git a/sys/dev/qat/qat_api/common/include/lac_common.h b/sys/dev/qat/qat_api/common/include/lac_common.h
index 18ab5d049a47..6962e6f43a6d 100644
--- a/sys/dev/qat/qat_api/common/include/lac_common.h
+++ b/sys/dev/qat/qat_api/common/include/lac_common.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
* @file lac_common.h Common macros
@@ -574,7 +574,6 @@ typedef enum lac_expected_size_s {
******************************************************************************/
#define LAC_QUADWORDS_TO_BYTES(x) ((x) << 3)
-
/******************************************************************************/
/*
diff --git a/sys/dev/qat/qat_api/common/include/lac_hooks.h b/sys/dev/qat/qat_api/common/include/lac_hooks.h
index 37ea65396086..6386eed53f8c 100644
--- a/sys/dev/qat/qat_api/common/include/lac_hooks.h
+++ b/sys/dev/qat/qat_api/common/include/lac_hooks.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*******************************************************************************
* @file lac_hooks.h
@@ -9,7 +9,7 @@
* @ingroup LacCommon
*
* Component Init/Shutdown functions. These are:
- * - an init function which is called during the intialisation sequence,
+ * - an init function which is called during the initialisation sequence,
* - a shutdown function which is called by the overall shutdown function,
*
******************************************************************************/
diff --git a/sys/dev/qat/qat_api/common/include/lac_mem.h b/sys/dev/qat/qat_api/common/include/lac_mem.h
index ce4c8045a27c..5392606a4c49 100644
--- a/sys/dev/qat/qat_api/common/include/lac_mem.h
+++ b/sys/dev/qat/qat_api/common/include/lac_mem.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
* @file lac_mem.h
@@ -351,7 +351,7 @@ LacMem_OsContigAlignMemAlloc(void **ppMemAddr,
* results in following entry:
* static const unsigned int highest_bit_of_lac_mem_blk_t = 3
*
- * CAUTION!!
+ * CAUTION!
* Macro is prepared only for type names NOT-containing ANY
* special characters. Types as amongst others:
* - void *
@@ -481,7 +481,7 @@ LacMem_OsContigAlignMemFree(void **ppMemAddr)
* This is because pInternalMem describes the memory that will be sent to
* QAT.
*
- * The caller must keep the original buffer pointer. The alllocated buffer
+ * The caller must keep the original buffer pointer. The allocated buffer
*is
* freed (as necessary) using icp_LacBufferRestore().
*
diff --git a/sys/dev/qat/qat_api/common/include/lac_mem_pools.h b/sys/dev/qat/qat_api/common/include/lac_mem_pools.h
index cbc3c787637a..406516509ed2 100644
--- a/sys/dev/qat/qat_api/common/include/lac_mem_pools.h
+++ b/sys/dev/qat/qat_api/common/include/lac_mem_pools.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
* @file lac_mem_pools.h
@@ -15,20 +15,20 @@
* This component is designed as a set of utility functions for the
* creation of pre-allocated memory pools. Each pool will be created using OS
* memory with a user specified number of elements, element size and element
- * alignment(alignmnet is at byte granularity).
+ * alignment(alignment is at byte granularity).
* @lld_dependencies
* These utilities rely on QAT Utils for locking mechanisms and memory
- *allocation
+ * allocation
* @lld_initialisation
* Pool creation needs to be done by each component. There is no specific
* initialisation required for this feature.
* @lld_module_algorithms
- * The following is a diagram of how the memory is layed out for each block
+ * The following is a diagram of how the memory is laid out for each block
* in a pool. Each element must be aligned on the boundary requested for in the
* create call. In order to hide the management of the pools from the user,
* the memory block data is hidden prior to the
* data pointer. This way it can be accessed easily on a free call with pointer
- * arithmatic. The Padding at the start is simply there for alignment and is
+ * arithmetic. The Padding at the start is simply there for alignment and is
* unused in the pools.
*
* -------------------------------------------------------
@@ -73,7 +73,7 @@ typedef struct lac_mem_blk_s {
CpaBoolean isInUse;
/**< indicates if the pool item is in use */
struct lac_mem_blk_s *pNext;
- /**< link to next blcok in the pool */
+ /**< link to next block in the pool */
struct lac_mem_pool_hdr_s *pPoolID;
/**< identifier of the pool that this block was allocated from */
} lac_mem_blk_t;
@@ -82,7 +82,7 @@ typedef struct lac_mem_blk_s {
(((lac_mem_blk_t *)((LAC_ARCH_UINT)pVirtAddr - sizeof(lac_mem_blk_t))) \
->physDataPtr)
/**< @ingroup LacMemPool
- * macro for retreiving the physical address of the memory block. */
+ * macro for retrieving the physical address of the memory block. */
#define LAC_MEM_POOL_INIT_POOL_ID 0
/**< @ingroup LacMemPool
diff --git a/sys/dev/qat/qat_api/common/include/lac_sal.h b/sys/dev/qat/qat_api/common/include/lac_sal.h
index 4c2e4347a16c..dff6fa8c7265 100644
--- a/sys/dev/qat/qat_api/common/include/lac_sal.h
+++ b/sys/dev/qat/qat_api/common/include/lac_sal.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
* @file lac_sal.h
@@ -57,7 +57,7 @@ CpaStatus SalCtrl_ServiceCreate(sal_service_type_t service,
* @ingroup SalCtl
* @description
* This macro goes through the 'list' passed in as a parameter. For each
- * element found in the list, it peforms a cast to the type of the element
+ * element found in the list, it performs a cast to the type of the element
* given by the 'type' parameter. Finally, it calls the function given by
* the 'function' parameter, passing itself and the device as parameters.
*
@@ -114,7 +114,7 @@ CpaStatus SalCtrl_ServiceCreate(sal_service_type_t service,
* @ingroup SalCtl
* @description
* This macro goes through the 'list' passed in as a parameter. For each
- * element found in the list, it peforms a cast to the type of the element
+ * element found in the list, it performs a cast to the type of the element
* given by the 'type' parameter. Finally, it checks the state of the
* element and if it is in state 'state_check' then it calls the
* function given by the 'function' parameter, passing itself
@@ -212,7 +212,7 @@ CpaStatus SalCtrl_CryptoStart(icp_accel_dev_t *device, sal_service_t *service);
* @ingroup SalCtrl
* @description
* This function is used to stop an instance of crypto service.
- * It checks for inflight messages to the FW. If no messages are pending
+ * It checks for in-flight messages to the FW. If no messages are pending
* it returns success. If messages are pending it returns retry.
*
* @context
@@ -340,7 +340,7 @@ CpaStatus SalCtrl_CompressionStart(icp_accel_dev_t *device,
* @ingroup SalCtrl
* @description
* This function is used to stop an instance of compression service.
- * It checks for inflight messages to the FW. If no messages are pending
+ * It checks for in-flight messages to the FW. If no messages are pending
* it returns success. If messages are pending it returns retry.
*
* @context
diff --git a/sys/dev/qat/qat_api/common/include/lac_sal_types.h b/sys/dev/qat/qat_api/common/include/lac_sal_types.h
index 8eff818d93cc..3960c4b94d01 100644
--- a/sys/dev/qat/qat_api/common/include/lac_sal_types.h
+++ b/sys/dev/qat/qat_api/common/include/lac_sal_types.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
* @file lac_sal_types.h
@@ -195,8 +195,8 @@ typedef struct sal_service_debug_s {
* @param[in] pService pointer to service instance
* @param[in] service_type service type to check againstx.
*
- * @return CPA_STATUS_FAIL Parameter is incorrect type
- *
+ * @return CPA_STATUS_FAIL Parameter is incorrect type
+ *
******************************************************************************/
#define SAL_CHECK_INSTANCE_TYPE(pService, service_type) \
do { \
diff --git a/sys/dev/qat/qat_api/common/include/lac_sal_types_crypto.h b/sys/dev/qat/qat_api/common/include/lac_sal_types_crypto.h
index c26603e4b582..952c174adfec 100644
--- a/sys/dev/qat/qat_api/common/include/lac_sal_types_crypto.h
+++ b/sys/dev/qat/qat_api/common/include/lac_sal_types_crypto.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
@@ -187,4 +187,25 @@ typedef struct sal_crypto_service_s {
CpaInstanceHandle Lac_GetFirstHandle(sal_service_type_t svc_type);
+/**
+ ******************************************************************************
+ * @ingroup SalCtrl
+ * @description
+ * Get the total number of either sym, asym or cy instances
+ *****************************************************************************/
+CpaStatus Lac_GetCyNumInstancesByType(
+ const CpaAccelerationServiceType accelerationServiceType,
+ Cpa16U *pNumInstances);
+
+/**
+ ******************************************************************************
+ * @ingroup SalCtrl
+ * @description
+ * Get either sym, asym or cy instance
+ *****************************************************************************/
+CpaStatus Lac_GetCyInstancesByType(
+ const CpaAccelerationServiceType accelerationServiceType,
+ Cpa16U numInstances,
+ CpaInstanceHandle *pInstances);
+
#endif /*LAC_SAL_TYPES_CRYPTO_H_*/
diff --git a/sys/dev/qat/qat_api/common/include/sal_qat_cmn_msg.h b/sys/dev/qat/qat_api/common/include/sal_qat_cmn_msg.h
index 127ef6039197..7837d3c61240 100644
--- a/sys/dev/qat/qat_api/common/include/sal_qat_cmn_msg.h
+++ b/sys/dev/qat/qat_api/common/include/sal_qat_cmn_msg.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
* @file sal_qat_cmn_msg.c
@@ -147,7 +147,7 @@ void SalQatMsg_CmnMidWrite(icp_qat_fw_la_bulk_req_t *pReq,
* section of the Request Msg.
*
* @param[in] pMsg Pointer to 128B Request Msg buffer.
- * @param[in] pContentDescInfo content descripter info.
+ * @param[in] pContentDescInfo content descriptor info.
*
* @return
* none
diff --git a/sys/dev/qat/qat_api/common/include/sal_types_compression.h b/sys/dev/qat/qat_api/common/include/sal_types_compression.h
index d7bfe33cab08..4b17438b3258 100644
--- a/sys/dev/qat/qat_api/common/include/sal_types_compression.h
+++ b/sys/dev/qat/qat_api/common/include/sal_types_compression.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
* @file sal_types_compression.h
@@ -24,6 +24,8 @@
#define DC_NUM_RX_RINGS (1)
#define DC_NUM_COMPRESSION_LEVELS (CPA_DC_L12)
+#define MAX_SGL_NUM 0x10000
+
/**
*****************************************************************************
* @ingroup SalCtrl
diff --git a/sys/dev/qat/qat_api/common/qat_comms/sal_qat_cmn_msg.c b/sys/dev/qat/qat_api/common/qat_comms/sal_qat_cmn_msg.c
index 8b7c5160f712..4b7ec2d0e1aa 100644
--- a/sys/dev/qat/qat_api/common/qat_comms/sal_qat_cmn_msg.c
+++ b/sys/dev/qat/qat_api/common/qat_comms/sal_qat_cmn_msg.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
* @file sal_qat_cmn_msg.h
@@ -145,7 +145,7 @@ void inline SalQatMsg_CmnMidWrite(icp_qat_fw_la_bulk_req_t *pReq,
* icp_qat_fw_comn_req_hdr_cd_pars_t section of the Request Msg.
*
* @param[in] pMsg Pointer to 128B Request Msg buffer.
- * @param[in] pContentDescInfo content descripter info.
+ * @param[in] pContentDescInfo content descriptor info.
*
* @return
* none
diff --git a/sys/dev/qat/qat_api/common/utils/lac_buffer_desc.c b/sys/dev/qat/qat_api/common/utils/lac_buffer_desc.c
index 4867e4ea0f15..40b239174cf1 100644
--- a/sys/dev/qat/qat_api/common/utils/lac_buffer_desc.c
+++ b/sys/dev/qat/qat_api/common/utils/lac_buffer_desc.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
* @file lac_buffer_desc.c Utility functions for setting buffer descriptors
@@ -31,7 +31,7 @@
/* Invalid physical address value */
#define INVALID_PHYSICAL_ADDRESS 0
-/* Indicates what type of buffer writes need to be perfomed */
+/* Indicates what type of buffer writes need to be performed */
typedef enum lac_buff_write_op_e {
WRITE_NORMAL = 0,
WRITE_AND_GET_SIZE,
@@ -153,7 +153,7 @@ LacBuffDesc_CommonBufferListDescWrite(const CpaBufferList *pUserBufferList,
/* This function implements the buffer description writes for the traditional
* APIs Zero length buffers are allowed, should be used for CHA-CHA-POLY and
- * GCM aglorithms */
+ * GCM algorithms */
CpaStatus
LacBuffDesc_BufferListDescWriteAndAllowZeroBuffer(
const CpaBufferList *pUserBufferList,
diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw.h
index b4d1f5829ba2..de743987863f 100644
--- a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw.h
+++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
* @file icp_qat_fw.h
@@ -241,13 +241,12 @@ typedef struct icp_qat_fw_comn_req_mid_s {
* field */
uint32_t src_length;
- /** < Length of source flat buffer incase src buffer
+ /** < Length of source flat buffer in case src buffer
* type is flat */
uint32_t dst_length;
- /** < Length of source flat buffer incase dst buffer
+ /** < Length of source flat buffer in case dst buffer
* type is flat */
-
} icp_qat_fw_comn_req_mid_t;
/**
diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_comp.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_comp.h
index fe1b7ad55de8..834359c66c03 100644
--- a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_comp.h
+++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_comp.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
* @file icp_qat_fw_comp.h
@@ -47,7 +47,6 @@ typedef enum {
} icp_qat_fw_comp_cmd_id_t;
-
/*
* REQUEST FLAGS IN COMMON COMPRESSION
* In common message it is named as SERVICE SPECIFIC FLAGS.
@@ -65,7 +64,6 @@ typedef enum {
* are don't care. i.e., these features are removed from QAT 2.0.
*/
-
/**< Flag usage */
#define ICP_QAT_FW_COMP_STATELESS_SESSION 0
@@ -410,7 +408,6 @@ typedef struct icp_qat_fw_comp_req_params_s {
((crc & ICP_QAT_FW_COMP_CRC_MODE_MASK) \
<< ICP_QAT_FW_COMP_CRC_MODE_BITPOS))
-
/*
* REQUEST FLAGS IN REQUEST PARAMETERS COMPRESSION
*
@@ -423,7 +420,6 @@ typedef struct icp_qat_fw_comp_req_params_s {
* +=====+-----+----- + --- + ----+-----+ --- + ----- + --- + ---- + -- + -- +
*/
-
/**
*****************************************************************************
* @ingroup icp_qat_fw_comp
@@ -446,18 +442,17 @@ typedef enum {
ICP_QAT_FW_COMP_20_CMD_LZ4S_DECOMPRESS = 6,
/*!< LZ4S Decompress Request */
- ICP_QAT_FW_COMP_20_CMD_XP10_COMPRESS = 7,
- /*!< XP10 Compress Request -- Placeholder */
+ ICP_QAT_FW_COMP_20_CMD_RESERVED_1 = 7,
+ /*!< Placeholder */
- ICP_QAT_FW_COMP_20_CMD_XP10_DECOMPRESS = 8,
- /*!< XP10 Decompress Request -- Placeholder */
+ ICP_QAT_FW_COMP_20_CMD_RESERVED_2 = 8,
+ /*!< Placeholder */
ICP_QAT_FW_COMP_20_CMD_DELIMITER
/**< Delimiter type */
} icp_qat_fw_comp_20_cmd_id_t;
-
/*
* REQUEST FLAGS IN REQUEST PARAMETERS COMPRESSION
*
diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_la.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_la.h
index b0942d206aa1..76b8eca98ece 100644
--- a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_la.h
+++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_la.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
* @file icp_qat_fw_la.h
@@ -451,7 +451,7 @@ typedef struct icp_qat_fw_la_bulk_req_s {
* the case of partial processing. See the HLD for further details
*
* + ====== + ------------------------- + ----------------------- +
- * | Parial | Prefix Addr | Hash State Sz |
+ * | Partial| Prefix Addr | Hash State Sz |
* | State | | |
* + ====== + ------------------------- + ----------------------- +
* | FULL | Points to the prefix data | Prefix size as below. |
@@ -509,7 +509,7 @@ typedef struct icp_qat_fw_la_bulk_req_s {
* is required
* @param ciphIV Cipher IV field contents
* @param ciphcfg Cipher/Auth Config offset type
- * @param partial Inidicate if the packet is a partial part
+ * @param partial Indicate if the packet is a partial part
*
*****************************************************************************/
#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, \
@@ -1042,7 +1042,7 @@ typedef struct icp_qat_fw_cipher_cd_ctrl_hdr_s {
/**< LW 27 */
uint8_t cipher_state_sz;
/**< State size in quad words of the cipher algorithm used in this
- * session. Set to zero if the algorithm doesnt provide any state */
+ * session. Set to zero if the algorithm doesn't provide any state */
uint8_t cipher_key_sz;
/**< Key size in quad words of the cipher algorithm used in this session
@@ -1179,7 +1179,7 @@ typedef struct icp_qat_fw_cipher_auth_cd_ctrl_hdr_s {
/**< LW 27 */
uint8_t cipher_state_sz;
/**< State size in quad words of the cipher algorithm used in this
- * session. Set to zero if the algorithm doesnt provide any state */
+ * session. Set to zero if the algorithm doesn't provide any state */
uint8_t cipher_key_sz;
/**< Key size in quad words of the cipher algorithm used in this session
diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_mmp.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_mmp.h
index f9471acadba2..5abb0cbdb30e 100644
--- a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_mmp.h
+++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_mmp.h
@@ -1,9 +1,8 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/* --- (Automatically generated (build v. 2.7), do not modify manually) --- */
-
/**
* @file icp_qat_fw_mmp.h
* @defgroup icp_qat_fw_mmp ICP QAT FW MMP Processing Definitions
@@ -12,10 +11,9 @@
* @brief
* This file documents the external interfaces that the QAT FW running
* on the QAT Acceleration Engine provides to clients wanting to
- * accelerate crypto assymetric applications
+ * accelerate crypto asymmetric applications
*/
-
#ifndef __ICP_QAT_FW_MMP__
#define __ICP_QAT_FW_MMP__
@@ -33,12 +31,12 @@
* Local constants
**************************************************************************
*/
-#define ICP_QAT_FW_PKE_INPUT_COUNT_MAX 7
+#define ICP_QAT_FW_PKE_INPUT_COUNT_MAX 7
/**< @ingroup icp_qat_fw_pke
- * Maximum number of input paramaters in all PKE request */
-#define ICP_QAT_FW_PKE_OUTPUT_COUNT_MAX 5
+ * Maximum number of input parameters in all PKE request */
+#define ICP_QAT_FW_PKE_OUTPUT_COUNT_MAX 5
/**< @ingroup icp_qat_fw_pke
- * Maximum number of output paramaters in all PKE request */
+ * Maximum number of output parameters in all PKE request */
/**
* @ingroup icp_qat_fw_mmp
diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_mmp_ids.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_mmp_ids.h
index a57de52a9f10..c68d6dbd0157 100644
--- a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_mmp_ids.h
+++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_mmp_ids.h
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
-
+/* Copyright(c) 2007-2025 Intel Corporation */
/* --- (Automatically generated (relocation v. 1.3), do not modify manually) --- */
@@ -11,7 +10,7 @@
* @brief
* This file documents the external interfaces that the QAT FW running
* on the QAT Acceleration Engine provides to clients wanting to
- * accelerate crypto assymetric applications
+ * accelerate crypto asymmetric applications
*/
#ifndef __ICP_QAT_FW_MMP_IDS__
diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_pke.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_pke.h
index 1ba7e81c52a9..aa642cc23a57 100644
--- a/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_pke.h
+++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_fw_pke.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
* @file icp_qat_fw_pke.h
* @defgroup icp_qat_fw_pke ICP QAT FW PKE Processing Definitions
@@ -8,7 +8,7 @@
* @brief
* This file documents the external interfaces that the QAT FW running
* on the QAT Acceleration Engine provides to clients wanting to
- * accelerate crypto assymetric applications
+ * accelerate crypto asymmetric applications
*/
#ifndef _ICP_QAT_FW_PKE_
diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_hw.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_hw.h
index 2af0684206ab..5882199cf1a9 100644
--- a/sys/dev/qat/qat_api/firmware/include/icp_qat_hw.h
+++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_hw.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
* @file icp_qat_hw.h
@@ -163,7 +163,7 @@ typedef struct icp_qat_hw_auth_config_s {
#define QAT_AUTH_MODE_MASK 0xF
/**< @ingroup icp_qat_hw_defs
- * Four bit mask used for determing the Auth mode */
+ * Four bit mask used for determining the Auth mode */
#define QAT_AUTH_ALGO_BITPOS 0
/**< @ingroup icp_qat_hw_defs
@@ -1277,7 +1277,7 @@ typedef struct icp_qat_hw_trng_test_status_s {
#define QAT_TRNG_TEST_STATUS_MASK 0x1
/**< @ingroup icp_qat_hw_defs
- * Mask of one bit used to determine the TRNG Test staus */
+ * Mask of one bit used to determine the TRNG Test status */
#define QAT_TRNG_TEST_STATUS_BITPOS 1
/**< @ingroup icp_qat_hw_defs
diff --git a/sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp.h b/sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp.h
index 8a149edd8d59..df1de387ce42 100644
--- a/sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp.h
+++ b/sys/dev/qat/qat_api/firmware/include/icp_qat_hw_20_comp.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
*****************************************************************************
* @file icp_qat_hw_2x_comp.h
@@ -17,7 +17,6 @@
#include "icp_qat_hw_20_comp_defs.h" /* For HW definitions */
#include "icp_qat_fw.h" /* For Set Field Macros. */
-
#define BYTE_SWAP_32 __builtin_bswap32
/**
diff --git a/sys/dev/qat/qat_api/include/cpa.h b/sys/dev/qat/qat_api/include/cpa.h
index f4baa90c45cf..a3e32e4ef1d4 100644
--- a/sys/dev/qat/qat_api/include/cpa.h
+++ b/sys/dev/qat/qat_api/include/cpa.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/cpa_dev.h b/sys/dev/qat/qat_api/include/cpa_dev.h
index 2d548e8a9541..9c1c7ccff45e 100644
--- a/sys/dev/qat/qat_api/include/cpa_dev.h
+++ b/sys/dev/qat/qat_api/include/cpa_dev.h
@@ -1,38 +1,5 @@
-/****************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
@@ -101,9 +68,9 @@ typedef struct _CpaDeviceInfo {
CpaBoolean dcEnabled;
/**< Compression service enabled */
CpaBoolean cySymEnabled;
- /**< Symetric crypto service enabled */
+ /**< Symmetric crypto service enabled */
CpaBoolean cyAsymEnabled;
- /**< Asymetric crypto service enabled */
+ /**< Asymmetric crypto service enabled */
CpaBoolean inlineEnabled;
/**< Inline service enabled */
Cpa32U deviceMemorySizeAvailable;
diff --git a/sys/dev/qat/qat_api/include/cpa_types.h b/sys/dev/qat/qat_api/include/cpa_types.h
index 00ed3c60fce6..712f1cf88b93 100644
--- a/sys/dev/qat/qat_api/include/cpa_types.h
+++ b/sys/dev/qat/qat_api/include/cpa_types.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/dc/cpa_dc.h b/sys/dev/qat/qat_api/include/dc/cpa_dc.h
index 7094747bc83e..d1751fc7ee16 100644
--- a/sys/dev/qat/qat_api/include/dc/cpa_dc.h
+++ b/sys/dev/qat/qat_api/include/dc/cpa_dc.h
@@ -1,38 +1,5 @@
-/****************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
@@ -135,6 +102,7 @@ extern"C" {
(CPA_DC_API_VERSION_NUM_MAJOR == major && \
CPA_DC_API_VERSION_NUM_MINOR < minor))
+
/**
*****************************************************************************
* @ingroup cpaDc
@@ -999,7 +967,7 @@ typedef struct _CpaCrcData {
/**< CRC32 calculated on the input buffer during compression
* requests and on the output buffer during decompression requests. */
Cpa32U adler32;
- /**< ADLER32 calculated on the input buffer during compression
+ /**< Adler32 calculated on the input buffer during compression
* requests and on the output buffer during decompression requests. */
CpaIntegrityCrc integrityCrc;
/**< 32bit Integrity CRCs */
@@ -1289,7 +1257,7 @@ cpaDcResetSession(const CpaInstanceHandle dcInstance,
* This function will reset the internal xxHash state maintained within a
* session. This would be used in conjunction with the
* CpaDcSessionSetupData.accumulateXXHash flag being set to TRUE for this
- * session. It will enable reseting (reinitialising) just the xxHash
+ * session. It will enable resetting (reinitialising) just the xxHash
* calculation back to the state when the session was first initialised.
*
* @context
@@ -1761,7 +1729,7 @@ cpaDcLZ4SCompressBound(const CpaInstanceHandle dcInstance,
* session, the checksum passed to cpaDcCompressData should be set to the
* checksum value produced by the previous call to cpaDcCompressData().
* When the last block of input data is passed to cpaDcCompressData(), the
- * flush flag should be set to CP_DC_FLUSH_FINAL. This will cause the BFINAL
+ * flush flag should be set to CPA_DC_FLUSH_FINAL. This will cause the BFINAL
* bit to be set in a deflate stream. It is the responsibility of the calling
* application to maintain overall lengths across the stateless requests
* and to pass the checksum produced by one request into the next request.
@@ -2286,7 +2254,7 @@ cpaDcNsDecompressData( CpaInstanceHandle dcInstance,
* - Content size = 0
* - Dictionary ID = 0
* - Header checksum = 1 byte representing the second byte of the
- * XXH32 of the frame decriptor field.
+ * XXH32 of the frame descriptor field.
*
* The counter parameter will be set to the number of bytes added to the
* buffer. The pData will be not be changed.
diff --git a/sys/dev/qat/qat_api/include/dc/cpa_dc_bp.h b/sys/dev/qat/qat_api/include/dc/cpa_dc_bp.h
index 97bcf8c5613b..754992f65e67 100644
--- a/sys/dev/qat/qat_api/include/dc/cpa_dc_bp.h
+++ b/sys/dev/qat/qat_api/include/dc/cpa_dc_bp.h
@@ -1,38 +1,5 @@
-/****************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/dc/cpa_dc_chain.h b/sys/dev/qat/qat_api/include/dc/cpa_dc_chain.h
index 0baab2547f18..7c0102d8d838 100644
--- a/sys/dev/qat/qat_api/include/dc/cpa_dc_chain.h
+++ b/sys/dev/qat/qat_api/include/dc/cpa_dc_chain.h
@@ -1,38 +1,5 @@
-/****************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
@@ -77,21 +44,21 @@ extern"C" {
*****************************************************************************/
typedef enum _CpaDcChainOperations
{
- CPA_DC_CHAIN_COMPRESS_THEN_HASH,
+ CPA_DC_CHAIN_COMPRESS_THEN_HASH = 0,
/**< 2 operations for chaining:
* 1st operation is to perform compression on plain text
* 2nd operation is to perform hash on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for compression setup data
* 2nd entry is for hash setup data*/
- CPA_DC_CHAIN_COMPRESS_THEN_ENCRYPT,
+ CPA_DC_CHAIN_COMPRESS_THEN_ENCRYPT = 1,
/**< 2 operations for chaining:
* 1st operation is to perform compression on plain text
* 2nd operation is to perform encryption on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for compression setup data
* 2nd entry is for encryption setup data*/
- CPA_DC_CHAIN_COMPRESS_THEN_HASH_ENCRYPT,
+ CPA_DC_CHAIN_COMPRESS_THEN_HASH_ENCRYPT = 2,
/**< 2 operations for chaining:
* 1st operation is to perform compression on plain text
* 2nd operation is to perform hash on compressed text and
@@ -99,7 +66,7 @@ typedef enum _CpaDcChainOperations
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for compression setup data
* 2nd entry is for hash and encryption setup data*/
- CPA_DC_CHAIN_COMPRESS_THEN_ENCRYPT_HASH,
+ CPA_DC_CHAIN_COMPRESS_THEN_ENCRYPT_HASH = 3,
/**< 2 operations for chaining:
* 1st operation is to perform compression on plain text
* 2nd operation is to perform encryption on compressed text and
@@ -107,35 +74,35 @@ typedef enum _CpaDcChainOperations
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for compression setup data
* 2nd entry is for encryption and hash setup data*/
- CPA_DC_CHAIN_COMPRESS_THEN_AEAD,
+ CPA_DC_CHAIN_COMPRESS_THEN_AEAD = 4,
/**< 2 operations for chaining:
* 1st operation is to perform compression on plain text
* 2nd operation is to perform AEAD encryption on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for compression setup data
* 2nd entry is for AEAD encryption setup data*/
- CPA_DC_CHAIN_HASH_THEN_COMPRESS,
+ CPA_DC_CHAIN_HASH_THEN_COMPRESS = 5,
/**< 2 operations for chaining:
* 1st operation is to perform hash on plain text
* 2nd operation is to perform compression on plain text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for hash setup data
* 2nd entry is for compression setup data*/
- CPA_DC_CHAIN_HASH_VERIFY_THEN_DECOMPRESS,
+ CPA_DC_CHAIN_HASH_VERIFY_THEN_DECOMPRESS = 6,
/**< 2 operations for chaining:
* 1st operation is to perform hash verify on compressed text
* 2nd operation is to perform decompression on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for hash setup data
* 2nd entry is for decompression setup data*/
- CPA_DC_CHAIN_DECRYPT_THEN_DECOMPRESS,
+ CPA_DC_CHAIN_DECRYPT_THEN_DECOMPRESS = 7,
/**< 2 operations for chaining:
* 1st operation is to perform decryption on compressed & encrypted text
* 2nd operation is to perform decompression on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for decryption setup data
* 2nd entry is for decompression setup data*/
- CPA_DC_CHAIN_HASH_VERIFY_DECRYPT_THEN_DECOMPRESS,
+ CPA_DC_CHAIN_HASH_VERIFY_DECRYPT_THEN_DECOMPRESS = 8,
/**< 2 operations for chaining:
* 1st operation is to perform hash verify on compressed & encrypted text
* and decryption on compressed & encrypted text
@@ -143,7 +110,7 @@ typedef enum _CpaDcChainOperations
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for hash and decryption setup data
* 2nd entry is for decompression setup data*/
- CPA_DC_CHAIN_DECRYPT_HASH_VERIFY_THEN_DECOMPRESS,
+ CPA_DC_CHAIN_DECRYPT_HASH_VERIFY_THEN_DECOMPRESS = 9,
/**< 2 operations for chaining:
* 1st operation is to perform decryption on compressed & encrypted text
* and hash verify on compressed text
@@ -151,25 +118,25 @@ typedef enum _CpaDcChainOperations
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for decryption and hash setup data
* 2nd entry is for decompression setup data*/
- CPA_DC_CHAIN_AEAD_THEN_DECOMPRESS,
+ CPA_DC_CHAIN_AEAD_THEN_DECOMPRESS = 10,
/**< 2 operations for chaining:
* 1st operation is to perform AEAD decryption on compressed & encrypted text
* 2nd operation is to perform decompression on compressed text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for AEAD decryption setup data
* 2nd entry is for decompression setup data*/
- CPA_DC_CHAIN_DECOMPRESS_THEN_HASH_VERIFY,
+ CPA_DC_CHAIN_DECOMPRESS_THEN_HASH_VERIFY = 11,
/**< 2 operations for chaining:
* 1st operation is to perform decompression on compressed text
* 2nd operation is to perform hash verify on plain text
**< 2 entries in CpaDcChainSessionSetupData array:
* 1st entry is for decompression setup data
* 2nd entry is for hash setup data*/
- CPA_DC_CHAIN_COMPRESS_THEN_AEAD_THEN_HASH,
+ CPA_DC_CHAIN_COMPRESS_THEN_AEAD_THEN_HASH = 12,
/**< 3 operations for chaining:
* 1st operation is to perform compression on plain text
- * 2nd operation is to perform AEAD encryption compressed text
- * 3rd operation is to perfom hash on compressed & encrypted text
+ * 2nd operation is to perform AEAD encryption on compressed text
+ * 3rd operation is to perform hash on compressed & encrypted text
**< 3 entries in CpaDcChainSessionSetupData array:
* 1st entry is for compression setup data
* 2nd entry is for AEAD encryption setup data
@@ -187,9 +154,9 @@ typedef enum _CpaDcChainOperations
*****************************************************************************/
typedef enum _CpaDcChainSessionType
{
- CPA_DC_CHAIN_COMPRESS_DECOMPRESS,
+ CPA_DC_CHAIN_COMPRESS_DECOMPRESS = 0,
/**< Indicate the session is for compression or decompression */
- CPA_DC_CHAIN_SYMMETRIC_CRYPTO,
+ CPA_DC_CHAIN_SYMMETRIC_CRYPTO
/**< Indicate the session is for symmetric crypto */
} CpaDcChainSessionType;
@@ -210,7 +177,7 @@ typedef struct _CpaDcChainSessionSetupData {
CpaDcSessionSetupData *pDcSetupData;
/**< Pointer to compression session setup data */
CpaCySymSessionSetupData *pCySetupData;
- /**< Pointer to symmectric crypto session setup data */
+ /**< Pointer to symmetric crypto session setup data */
};
} CpaDcChainSessionSetupData;
@@ -230,7 +197,7 @@ typedef struct _CpaDcChainOpData {
CpaDcOpData *pDcOp;
/**< Pointer to compression operation data */
CpaCySymOpData *pCySymOp;
- /**< Pointer to symmectric crypto operation data */
+ /**< Pointer to symmetric crypto operation data */
};
} CpaDcChainOpData;
@@ -259,7 +226,7 @@ typedef struct _CpaDcChainRqResults {
Cpa32U crc32;
/**< crc32 checksum produced by chaining operations */
Cpa32U adler32;
- /**< adler32 checksum produced by chaining operations */
+ /**< Adler32 checksum produced by chaining operations */
}CpaDcChainRqResults;
/**
@@ -366,8 +333,6 @@ cpaDcChainGetSessionSize(CpaInstanceHandle dcInstance,
* @retval CPA_STATUS_FAIL Function failed.
* @retval CPA_STATUS_INVALID_PARAM Invalid parameter passed in.
* @retval CPA_STATUS_RESOURCE Error related to system resources.
- * @retval CPA_STATUS_RESTARTING API implementation is restarting. Resubmit
- * the request.
* @retval CPA_STATUS_UNSUPPORTED Function is not supported.
*
* @pre
@@ -563,7 +528,7 @@ cpaDcChainRemoveSession(const CpaInstanceHandle dcInstance,
* @param[in] pChainOpData Pointer to an array of CpaDcChainOpData
* structures. There should be numOpDatas
* entries in the array.
- * @param[in,out] pResults Pointer to CpaDcChainRqResults structure.
+ * @param[in,out] pResults Pointer to CpaDcChainRqResults
* @param[in] callbackTag User supplied value to help correlate
* the callback with its associated request.
*
@@ -593,9 +558,9 @@ cpaDcChainRemoveSession(const CpaInstanceHandle dcInstance,
* -# The order of entries in pChainOpData[] must be consistent with the
* order of operations described for the chaining operation in
* CpaDcChainOperations.
- * As an example, for CPA_DC_CHAIN_COMPRESS_THEN_ENCRYPT, pChainOpData[0]
- * must contain the compression operation data and pChainOpData[1] must
- * contain the encryption operation data.
+ * As an example, for CPA_DC_CHAIN_HASH_THEN_COMPRESS, pChainOpData[0]
+ * must contain the hash operation data and pChainOpData[1] must
+ * contain the compress operation data.
*
* -# The numOpDatas for each chaining operation are specified in the
* comments for the operation in CpaDcChainOperations.
@@ -610,8 +575,6 @@ cpaDcChainRemoveSession(const CpaInstanceHandle dcInstance,
* CPA_DC_CHAIN_SYMMETRIC_CRYPTO and pChainOpData[]->pCySymOp should
* point to a CpaCySymOpData structure.
*
- * -# Stateful compression is not supported for chaining.
- *
* -# Partial packet processing is not supported.
*
* This function has identical buffer processing rules as
diff --git a/sys/dev/qat/qat_api/include/dc/cpa_dc_dp.h b/sys/dev/qat/qat_api/include/dc/cpa_dc_dp.h
index 680e021f95d6..95c34e631b9e 100644
--- a/sys/dev/qat/qat_api/include/dc/cpa_dc_dp.h
+++ b/sys/dev/qat/qat_api/include/dc/cpa_dc_dp.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/icp_buffer_desc.h b/sys/dev/qat/qat_api/include/icp_buffer_desc.h
index 18ec7042c7e9..ef433495935d 100644
--- a/sys/dev/qat/qat_api/include/icp_buffer_desc.h
+++ b/sys/dev/qat/qat_api/include/icp_buffer_desc.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
+
/**
*****************************************************************************
* @file icp_buffer_desc.h
@@ -20,7 +21,7 @@
typedef Cpa64U icp_qat_addr_width_t; // hi32 first, lo32 second
-// Alignement constraint of the buffer list.
+/* Alignment constraint of the buffer list. */
#define ICP_DESCRIPTOR_ALIGNMENT_BYTES 8
/**
@@ -31,7 +32,7 @@ typedef Cpa64U icp_qat_addr_width_t; // hi32 first, lo32 second
*
* @description
* A QAT friendly buffer descriptor.
- * All buffer descriptor described in this structure are physcial
+ * All buffer descriptor described in this structure are physical
* and are 64 bit wide.
*
* Updates in the CpaFlatBuffer should be also reflected in this
@@ -55,7 +56,7 @@ typedef struct icp_flat_buffer_desc_s {
*
* @description
* A QAT friendly buffer descriptor.
- * All buffer descriptor described in this structure are physcial
+ * All buffer descriptor described in this structure are physical
* and are 64 bit wide.
*
* Updates in the CpaBufferList should be also reflected in this structure
diff --git a/sys/dev/qat/qat_api/include/icp_sal_user.h b/sys/dev/qat/qat_api/include/icp_sal_user.h
index fd01fa97d344..6dd8b2a26746 100644
--- a/sys/dev/qat/qat_api/include/icp_sal_user.h
+++ b/sys/dev/qat/qat_api/include/icp_sal_user.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
* @file icp_sal_user.h
@@ -75,7 +75,7 @@ CpaStatus icp_sal_userStart(const char *pProcessName);
*
* @param[in] limitDevAccess Specifies if the address space is limited
* to one device (true) or if it spans
- * accross multiple devices.
+ * across multiple devices.
*
* @retval CPA_STATUS_SUCCESS No error
* @retval CPA_STATUS_FAIL Operation failed. In this case user
@@ -478,7 +478,7 @@ CpaStatus icp_sal_find_new_devices(void);
* @assumptions
* None
* @sideEffects
- * In case a device has beed stoped or restarted the application
+ * In case a device has been stopped or restarted the application
* will get restarting/stop/shutdown events
* @reentrant
* No
diff --git a/sys/dev/qat/qat_api/include/icp_sal_versions.h b/sys/dev/qat/qat_api/include/icp_sal_versions.h
index db1ba297adc6..0eb227ade09c 100644
--- a/sys/dev/qat/qat_api/include/icp_sal_versions.h
+++ b/sys/dev/qat/qat_api/include/icp_sal_versions.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/**
***************************************************************************
* @file icp_sal_versions.h
@@ -26,7 +26,7 @@
/* Part name and number of the accelerator device */
#define SAL_INFO2_DRIVER_SW_VERSION_MAJ_NUMBER 3
-#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 14
+#define SAL_INFO2_DRIVER_SW_VERSION_MIN_NUMBER 16
#define SAL_INFO2_DRIVER_SW_VERSION_PATCH_NUMBER 0
/**
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_common.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_common.h
index 92c262356e95..87c73fe7f05d 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_common.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_common.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_dh.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_dh.h
index 57a77b8559f2..b82bf63642a9 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_dh.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_dh.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_dsa.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_dsa.h
index f7f51bf2aa7b..149a888f1744 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_dsa.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_dsa.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_ec.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_ec.h
index 8f72bd669229..45f724a8d629 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_ec.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_ec.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
@@ -266,7 +233,7 @@ typedef struct _CpaCyEcCurveParametersWeierstrass
*
* @description
* This union allows for the characterisation of different curve types
- * encapsulted in one data type. The intention is that new curve types
+ * encapsulated in one data type. The intention is that new curve types
* will be added in the future.
*
* @note
@@ -451,7 +418,7 @@ typedef struct _CpaCyEcGenericPointVerifyOpData {
*****************************************************************************
* @ingroup cpaCyEc
* EC Point Multiplication Operation Data for Edwards or
- * Montgomery curves as specificied in RFC#7748.
+ * Montgomery curves as specified in RFC#7748.
*
* @description
* This structure contains the operation data for the
@@ -486,11 +453,11 @@ typedef struct _CpaCyEcMontEdwdsPointMultiplyOpData {
/**< field type for the operation */
CpaBoolean generator;
/**< True if the operation is a generator multiplication (kG)
- * False if it is a variable point multiplcation (kP). */
+ * False if it is a variable point multiplication (kP). */
CpaFlatBuffer k;
/**< k scalar multiplier for the operation */
CpaFlatBuffer x;
- /**< x value. Used in scalar varable point multiplication operations.
+ /**< x value. Used in scalar variable point multiplication operations.
* Not required if the generator is True. Must be NULL if not required.
* The size of the buffer MUST be 32B for 25519 curves and 64B for 448
* curves */
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_ecdh.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_ecdh.h
index fcf969093136..3a7ce1a820c0 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_ecdh.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_ecdh.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_ecdsa.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_ecdsa.h
index a3ecbbe88026..af1eea223e89 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_ecdsa.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_ecdsa.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_ecsm2.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_ecsm2.h
index c7c89e3c6e8a..5d5e2b209aa1 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_ecsm2.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_ecsm2.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_im.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_im.h
index 2225e364f64a..a9410523367f 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_im.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_im.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_key.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_key.h
index 3d013271e80b..c51d57fd8ca6 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_key.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_key.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_kpt.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_kpt.h
index 612b86dbe488..e3efc2163848 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_kpt.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_kpt.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
@@ -96,14 +63,14 @@ typedef Cpa64U CpaCyKptHandle;
typedef enum CpaCyKptKeyManagementStatus_t
{
CPA_CY_KPT_SUCCESS = 0,
- /**< Generic success status for all KPT wrapping key handling functions*/
+ /**< Generic success status for all KPT wrapping key handling functions */
CPA_CY_KPT_LOADKEY_FAIL_QUOTA_EXCEEDED_PER_VFID,
- /**< SWK count exceeds the configured maxmium value per VFID*/
+ /**< SWK count exceeds the configured maximum value per VFID */
CPA_CY_KPT_LOADKEY_FAIL_QUOTA_EXCEEDED_PER_PASID,
- /**< SWK count exceeds the configured maxmium value per PASID*/
+ /**< SWK count exceeds the configured maximum value per PASID */
CPA_CY_KPT_LOADKEY_FAIL_QUOTA_EXCEEDED,
- /**< SWK count exceeds the configured maxmium value when not scoped to
- * VFID or PASID*/
+ /**< SWK count exceeds the configured maximum value when not scoped to
+ * VFID or PASID */
CPA_CY_KPT_SWK_FAIL_NOT_FOUND,
/**< Unable to find SWK entry by handle */
CPA_CY_KPT_FAILED,
@@ -277,7 +244,7 @@ typedef struct CpaCyKptRsaPrivateKeyRep1_t
* describing the RSA private key. The quintuple of p, q, dP, dQ, and qInv
* (explained below and in the spec) are required for the second
* representation. For KPT the parameters are Encrypted
- * with the assoicated SWK as follows:
+ * with the associated SWK as follows:
* Encrypt - AES-256-GCM (Key, AAD, Input)
* "||" - denotes concatenation
* Key = SWK
@@ -584,11 +551,11 @@ typedef struct CpaCyKptEcdsaSignRSOpData_t
* enumerate type CpaCyKptKeyManagementStatus
* CPA_CY_KPT_SUCCESS Key Loaded successfully
* CPA_CY_KPT_LOADKEY_FAIL_QUOTA_EXCEEDED_PER_VFID
- * SWK count exceeds the configured maxmium value per VFID
+ * SWK count exceeds the configured maximum value per VFID
* CPA_CY_KPT_LOADKEY_FAIL_QUOTA_EXCEEDED_PER_PASID
- * SWK count exceeds the configured maxmium value per PASID
+ * SWK count exceeds the configured maximum value per PASID
* CPA_CY_KPT_LOADKEY_FAIL_QUOTA_EXCEEDED
- * SWK count exceeds the configured maxmium value when not scoped to
+ * SWK count exceeds the configured maximum value when not scoped to
* VFID or PASID
* CPA_CY_KPT_FAILED Operation failed due to unspecified reason
*
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_ln.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_ln.h
index 43550cdb0fed..1049bb114948 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_ln.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_ln.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_prime.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_prime.h
index 7065304f69e8..313a2aca7649 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_prime.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_prime.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_rsa.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_rsa.h
index a72950ecd970..bd85b7ee178d 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_rsa.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_rsa.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_sym.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_sym.h
index 370b7e2397c4..45f456d4a31d 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_sym.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_sym.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
@@ -71,7 +38,7 @@ extern "C" {
* is allocated by the client. The size of the memory that the client needs
* to allocate is determined by a call to the @ref
* cpaCySymSessionCtxGetSize or @ref cpaCySymSessionCtxGetDynamicSize
- * functions. The session context memory is initialized with a call to
+ * functions. The session context memory is initialized with a call to
* the @ref cpaCySymInitSession function.
* This memory MUST not be freed until a call to @ref
* cpaCySymRemoveSession has completed successfully.
@@ -253,7 +220,7 @@ typedef enum _CpaCySymCipherDirection
* Symmetric Cipher Setup Data.
* @description
* This structure contains data relating to Cipher (Encryption and
- * Decryption) to set up a session.
+ * Decryption) to setup a session.
*
*****************************************************************************/
typedef struct _CpaCySymCipherSetupData {
@@ -272,7 +239,8 @@ typedef struct _CpaCySymCipherSetupData {
* - Two keys must be provided and cipherKeyLenInBytes refers to total
* length of the two keys.
* - Each key can be either 128 bits (16 bytes) or 256 bits (32 bytes).
- * - Both keys must have the same size. */
+ * - Both keys must have the same size.
+ */
Cpa8U *pCipherKey;
/**< Cipher key
* For the CPA_CY_SYM_CIPHER_AES_F8 mode of operation, pCipherKey will
@@ -281,7 +249,9 @@ typedef struct _CpaCySymCipherSetupData {
* bytes to match the length of the encryption key used.
* For AES-XTS mode of operation, two keys must be provided and pCipherKey
* must point to the two keys concatenated together (Key1 || Key2).
- * cipherKeyLenInBytes will contain the total size of both keys. */
+ * cipherKeyLenInBytes will contain the total size of both keys.
+ * These fields are set to NULL if key derivation will be used.
+ */
CpaCySymCipherDirection cipherDirection;
/**< This parameter determines if the cipher operation is an encrypt or
* a decrypt operation.
@@ -651,11 +621,11 @@ typedef enum _CpaCySymAlgChainOrder
* setup a session.
*
****************************************************************************/
-typedef struct _CpaCySymSessionSetupData {
+typedef struct _CpaCySymSessionSetupData {
CpaCyPriority sessionPriority;
/**< Priority of this session */
CpaCySymOp symOperation;
- /**< Operation to perfom */
+ /**< Operation to perform */
CpaCySymCipherSetupData cipherSetupData;
/**< Cipher Setup Data for the session. This member is ignored for the
* CPA_CY_SYM_OP_HASH operation. */
@@ -665,7 +635,7 @@ typedef struct _CpaCySymSessionSetupData {
CpaCySymAlgChainOrder algChainOrder;
/**< If this operation data structure relates to an algorithm chaining
* session then this parameter determines the order in which the chained
- * operations are performed. If this structure does not relate to an
+ * operations are performed. If this structure does not relate to an
* algorithm chaining session then this parameter will be ignored.
*
* @note In the case of authenticated ciphers (GCM and CCM), which are
@@ -674,7 +644,7 @@ typedef struct _CpaCySymSessionSetupData {
* cases. */
CpaBoolean digestIsAppended;
/**< Flag indicating whether the digest is appended immediately following
- * the region over which the digest is computed. This is true for both
+ * the region over which the digest is computed. This is true for both
* IPsec packets and SSL/TLS records.
*
* If this flag is set, then the value of the pDigestResult field of
@@ -689,7 +659,7 @@ typedef struct _CpaCySymSessionSetupData {
*/
CpaBoolean verifyDigest;
/**< This flag is relevant only for operations which generate a message
- * digest. If set to true, the computed digest will not be written back
+ * digest. If set to true, the computed digest will not be written back
* to the buffer location specified by other parameters, but instead will
* be verified (i.e. compared to the value passed in at that location).
* The number of bytes to be written or compared is indicated by the
@@ -697,7 +667,7 @@ typedef struct _CpaCySymSessionSetupData {
* @note This option is only valid for full packets and for final
* partial packets when using partials without algorithm chaining.
* @note The value of this field is ignored for the authenticated ciphers
- * (AES_CCM and AES_GCM). Digest verification is always done for these
+ * (AES_CCM and AES_GCM). Digest verification is always done for these
* (when the direction is decrypt) and unless the DP API is used,
* the message buffer will be zeroed if verification fails. When using the
* DP API, it is the API clients responsibility to clear the message
@@ -938,6 +908,7 @@ typedef struct _CpaCySymOpData {
* operation, this field is not used and should be set to 0. Instead
* the AAD data should be placed in the source buffer.
*/
+
} CpaCySymOpData;
/**
@@ -961,7 +932,7 @@ typedef struct _CpaCySymOpData {
* @description
* This macro sets the additional authentication data in the
* appropriate location of the@ref CpaCySymOpData struct for the
- * authenticated encryptionalgorithm @ref CPA_CY_SYM_HASH_AES_CCM.
+ * authenticated encryption algorithm @ref CPA_CY_SYM_HASH_AES_CCM.
****************************************************************************/
#define CPA_CY_SYM_CCM_SET_AAD(pOpData, pAad, aadLen) do { \
memcpy(&pOpData->pAdditionalAuthData[18], pAad, aadLen); \
@@ -1122,10 +1093,10 @@ typedef void (*CpaCySymCbFunc)(void *pCallbackTag,
* implementations)
* (2) between different releases of the same API implementation.
*
- * The size returned by this function is the smallest size needed to
+ * The size returned by this function is the smallest size needed to
* support all possible combinations of setup data parameters. Some
- * setup data parameter combinations may fit within a smaller session
- * context size. The alternate cpaCySymSessionCtxGetDynamicSize()
+ * setup data parameter combinations may fit within a smaller session
+ * context size. The alternate cpaCySymSessionCtxGetDynamicSize()
* function will return the smallest size needed to fit the
* provided setup data parameters.
*
@@ -1183,17 +1154,17 @@ cpaCySymSessionCtxGetSize(const CpaInstanceHandle instanceHandle,
* Gets the minimum size required to store a session context.
*
* @description
- * This function is used by the client to determine the smallest size of
- * the memory it must allocate in order to store the session context.
- * This MUST be called before the client allocates the memory for the
- * session context and before the client calls the @ref cpaCySymInitSession
+ * This function is used by the client to determine the smallest size of
+ * the memory it must allocate in order to store the session context.
+ * This MUST be called before the client allocates the memory for the
+ * session context and before the client calls the @ref cpaCySymInitSession
* function.
*
* This function is an alternate to cpaCySymSessionGetSize().
- * cpaCySymSessionCtxGetSize() will return a fixed size which is the
- * minimum memory size needed to support all possible setup data parameter
- * combinations. cpaCySymSessionCtxGetDynamicSize() will return the
- * minimum memory size needed to support the specific session setup
+ * cpaCySymSessionCtxGetSize() will return a fixed size which is the
+ * minimum memory size needed to support all possible setup data parameter
+ * combinations. cpaCySymSessionCtxGetDynamicSize() will return the
+ * minimum memory size needed to support the specific session setup
* data parameters provided. This size may be different for different setup
* data parameters.
*
@@ -1564,7 +1535,7 @@ cpaCySymSessionInUse(CpaCySymSessionCtx sessionCtx,
* a multiple of the relevant block size.
* i.e. padding WILL NOT be applied to the data.
* For optimum performance, the buffer should
- * only contain the data region that the
+ * only contain the data region that the
* cryptographic operation(s) must be performed on.
* Any additional data in the source buffer may be
* copied to the destination buffer and this copy
diff --git a/sys/dev/qat/qat_api/include/lac/cpa_cy_sym_dp.h b/sys/dev/qat/qat_api/include/lac/cpa_cy_sym_dp.h
index 7f103ec98e51..24c1eaeeab4b 100644
--- a/sys/dev/qat/qat_api/include/lac/cpa_cy_sym_dp.h
+++ b/sys/dev/qat/qat_api/include/lac/cpa_cy_sym_dp.h
@@ -1,38 +1,5 @@
-/***************************************************************************
- *
- * BSD LICENSE
- *
- * Copyright(c) 2007-2023 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- ***************************************************************************/
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*
*****************************************************************************
@@ -376,7 +343,7 @@ typedef struct _CpaCySymDpOpData {
* @description
* This is the callback function prototype. The callback function is
* registered by the application using the @ref cpaCySymDpRegCbFunc
- * function call, and called back on completion of asycnhronous
+ * function call, and called back on completion of asynchronous
* requests made via calls to @ref cpaCySymDpEnqueueOp or @ref
* cpaCySymDpEnqueueOpBatch.
*
@@ -428,7 +395,7 @@ typedef void (*CpaCySymDpCbFunc)(CpaCySymDpOpData *pOpData,
* @description
* This function allows a completion callback function to be registered.
* The registered callback function is invoked on completion of
- * asycnhronous requests made via calls to @ref cpaCySymDpEnqueueOp
+ * asynchronous requests made via calls to @ref cpaCySymDpEnqueueOp
* or @ref cpaCySymDpEnqueueOpBatch.
*
* If a callback function was previously registered, it is overwritten.
diff --git a/sys/dev/qat/qat_api/qat_direct/include/icp_accel_devices.h b/sys/dev/qat/qat_api/qat_direct/include/icp_accel_devices.h
index abfab512193c..395d235a367e 100644
--- a/sys/dev/qat/qat_api/qat_direct/include/icp_accel_devices.h
+++ b/sys/dev/qat/qat_api/qat_direct/include/icp_accel_devices.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*****************************************************************************
* @file icp_accel_devices.h
*
@@ -126,7 +126,7 @@ typedef struct accel_dev_s {
QatUtilsAtomic usageCounter; /* Usage counter. Prevents
shutting down the dev if not 0*/
Cpa32U deviceMemAvail; /* Device memory for intermediate buffers */
- /* Component specific fields - cast to relevent layer */
+ /* Component specific fields - cast to relevant layer */
void *pRingInflight; /* For offload optimization */
void *pSalHandle; /* For SAL*/
void *pQatStats; /* For QATAL/SAL stats */
diff --git a/sys/dev/qat/qat_api/qat_direct/include/icp_adf_init.h b/sys/dev/qat/qat_api/qat_direct/include/icp_adf_init.h
index 8c494d9445e0..67b3bdc5718f 100644
--- a/sys/dev/qat/qat_api/qat_direct/include/icp_adf_init.h
+++ b/sys/dev/qat/qat_api/qat_direct/include/icp_adf_init.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
/*****************************************************************************
* @file icp_adf_init.h
*
@@ -40,7 +40,7 @@ typedef enum icp_adf_ringInfoOperation_e {
} icp_adf_ringInfoOperation_t;
/*
- * Ring generic serivce info private data
+ * Ring generic service info private data
*/
typedef enum icp_adf_ringInfoService_e {
ICP_ADF_RING_SERVICE_0 = 0,
diff --git a/sys/dev/qat/qat_api/qat_kernel/src/lac_adf_interface_freebsd.c b/sys/dev/qat/qat_api/qat_kernel/src/lac_adf_interface_freebsd.c
index b869a8eb934b..12cce62d7806 100644
--- a/sys/dev/qat/qat_api/qat_kernel/src/lac_adf_interface_freebsd.c
+++ b/sys/dev/qat/qat_api/qat_kernel/src/lac_adf_interface_freebsd.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "adf_cfg.h"
#include "cpa.h"
#include "icp_accel_devices.h"
@@ -288,8 +288,8 @@ icp_amgr_getAccelDevByCapabilities(Cpa32U capabilitiesMask,
/*
* icp_amgr_getAllAccelDevByEachCapabilities
- * Returns table of accel devices that are started and implement
- * each of the capabilities specified in capabilitiesMask.
+ * Returns table of accel devices that are started and that implement
+ * at least one of the capabilities specified in capabilitiesMask.
*/
CpaStatus
icp_amgr_getAllAccelDevByEachCapability(Cpa32U capabilitiesMask,
diff --git a/sys/dev/qat/qat_api/qat_utils/include/qat_utils.h b/sys/dev/qat/qat_api/qat_utils/include/qat_utils.h
index 778be17e841a..a2dea054f753 100644
--- a/sys/dev/qat/qat_api/qat_utils/include/qat_utils.h
+++ b/sys/dev/qat/qat_api/qat_utils/include/qat_utils.h
@@ -1,9 +1,8 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef QAT_UTILS_H
#define QAT_UTILS_H
-
#include <sys/param.h>
#include <sys/ctype.h>
#include <sys/endian.h>
@@ -830,7 +829,7 @@ CpaStatus qatUtilsHashSHA512Full(uint8_t *in, uint8_t *out, uint32_t len);
*
* @brief Single block AES encrypt
*
- * @param key - pointer to symetric key.
+ * @param key - pointer to symmetric key.
* keyLenInBytes - key length
* in - pointer to data to encrypt
* out - pointer to output buffer for encrypted text
@@ -853,7 +852,7 @@ CpaStatus qatUtilsAESEncrypt(uint8_t *key,
*
* @brief Converts AES forward key to reverse key
*
- * @param key - pointer to symetric key.
+ * @param key - pointer to symmetric key.
* keyLenInBytes - key length
* out - pointer to output buffer for reversed key
* The in and out buffers need to be at least AES block size long
diff --git a/sys/dev/qat/qat_api/qat_utils/src/QatUtilsServices.c b/sys/dev/qat/qat_api/qat_utils/src/QatUtilsServices.c
index 3134c2d375eb..27cfde01c7ae 100644
--- a/sys/dev/qat/qat_api/qat_utils/src/QatUtilsServices.c
+++ b/sys/dev/qat/qat_api/qat_utils/src/QatUtilsServices.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_utils.h"
#include <sys/param.h>
@@ -10,7 +10,7 @@
#include <sys/proc.h>
#include <sys/sched.h>
#include <sys/time.h>
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include <vm/vm.h>
#include <vm/pmap.h>
@@ -20,7 +20,7 @@
*
* Data struct to store the information on the
* memory allocated. This structure is stored at the beginning of
- * the allocated chunck of memory
+ * the allocated chunk of memory
* size is the no of byte passed to the memory allocation functions
* mSize is the real size of the memory required to the OS
*
@@ -91,7 +91,7 @@ qatUtilsMemFreeNUMA(void *ptr)
"QatUtilsMemAlignedFree: Detected corrupted data: memory leak!\n");
return;
}
- contigfree(memInfo->mAllocMemPtr, memInfo->mSize, M_QAT);
+ free(memInfo->mAllocMemPtr, M_QAT);
}
CpaStatus
diff --git a/sys/dev/qat/qat_common/adf_aer.c b/sys/dev/qat/qat_common/adf_aer.c
index 7fdeba873420..0f402ba255ed 100644
--- a/sys/dev/qat/qat_common/adf_aer.c
+++ b/sys/dev/qat/qat_common/adf_aer.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
@@ -28,17 +28,13 @@ static struct workqueue_struct *device_reset_wq;
void
linux_complete_common(struct completion *c, int all)
{
- int wakeup_swapper;
-
sleepq_lock(c);
c->done++;
if (all)
- wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
+ sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
else
- wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
+ sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
sleepq_release(c);
- if (wakeup_swapper)
- kick_proc0();
}
/* reset dev data */
@@ -280,6 +276,15 @@ adf_notify_fatal_error_work(struct work_struct *work)
struct adf_fatal_error_data *wq_data =
container_of(work, struct adf_fatal_error_data, work);
struct adf_accel_dev *accel_dev = wq_data->accel_dev;
+ struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+
+ if (adf_dev_in_use(accel_dev)) {
+ if (hw_device->pre_reset) {
+ device_printf(GET_DEV(accel_dev),
+ "Performing pre reset save\n");
+ hw_device->pre_reset(accel_dev);
+ }
+ }
adf_error_notifier((uintptr_t)accel_dev);
if (!accel_dev->is_vf) {
diff --git a/sys/dev/qat/qat_common/adf_cfg.c b/sys/dev/qat/qat_common/adf_cfg.c
index 736ede860840..37ab44fdb1f6 100644
--- a/sys/dev/qat/qat_common/adf_cfg.c
+++ b/sys/dev/qat/qat_common/adf_cfg.c
@@ -1,15 +1,10 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "adf_accel_devices.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
-#include "adf_cfg_dev_dbg.h"
#include "adf_cfg_device.h"
#include "adf_cfg_sysctl.h"
-#include "adf_heartbeat_dbg.h"
-#include "adf_ver_dbg.h"
-#include "adf_fw_counters.h"
-#include "adf_cnvnr_freq_counters.h"
/**
* adf_cfg_dev_add() - Create an acceleration device configuration table.
@@ -73,31 +68,13 @@ adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
ADF_CFG_MAX_VAL);
}
- if (adf_cfg_sysctl_add(accel_dev))
- goto err;
-
- if (adf_cfg_dev_dbg_add(accel_dev))
- goto err;
-
- if (!accel_dev->is_vf) {
- if (adf_heartbeat_dbg_add(accel_dev))
- goto err;
-
- if (adf_ver_dbg_add(accel_dev))
- goto err;
-
- if (adf_fw_counters_add(accel_dev))
- goto err;
-
- if (adf_cnvnr_freq_counters_add(accel_dev))
- goto err;
+ if (adf_cfg_sysctl_add(accel_dev)) {
+ free(dev_cfg_data, M_QAT);
+ accel_dev->cfg = NULL;
+ return EFAULT;
}
- return 0;
-err:
- free(dev_cfg_data, M_QAT);
- accel_dev->cfg = NULL;
- return EFAULT;
+ return 0;
}
static void adf_cfg_section_del_all(struct list_head *head);
@@ -142,13 +119,6 @@ adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
sx_xunlock(&dev_cfg_data->lock);
adf_cfg_sysctl_remove(accel_dev);
- adf_cfg_dev_dbg_remove(accel_dev);
- if (!accel_dev->is_vf) {
- adf_ver_dbg_del(accel_dev);
- adf_heartbeat_dbg_del(accel_dev);
- adf_fw_counters_remove(accel_dev);
- adf_cnvnr_freq_counters_remove(accel_dev);
- }
free(dev_cfg_data, M_QAT);
accel_dev->cfg = NULL;
diff --git a/sys/dev/qat/qat_common/adf_cfg_device.c b/sys/dev/qat/qat_common/adf_cfg_device.c
index a26d2fdfd32e..4860a4064b97 100644
--- a/sys/dev/qat/qat_common/adf_cfg_device.c
+++ b/sys/dev/qat/qat_common/adf_cfg_device.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "adf_cfg_instance.h"
#include "adf_cfg_section.h"
#include "adf_cfg_device.h"
@@ -677,6 +677,18 @@ adf_cfg_device_clear(struct adf_cfg_device *device,
device->instances = NULL;
}
+void
+adf_cfg_device_clear_all(struct adf_accel_dev *accel_dev)
+{
+ sx_xlock(&accel_dev->cfg->lock);
+ if (accel_dev->cfg->dev) {
+ adf_cfg_device_clear(accel_dev->cfg->dev, accel_dev);
+ free(accel_dev->cfg->dev, M_QAT);
+ accel_dev->cfg->dev = NULL;
+ }
+ sx_xunlock(&accel_dev->cfg->lock);
+}
+
/*
* Static configuration for userspace
*/
diff --git a/sys/dev/qat/qat_common/adf_cfg_sysctl.c b/sys/dev/qat/qat_common/adf_cfg_sysctl.c
index 621c3cc5b6c6..1a836765c94a 100644
--- a/sys/dev/qat/qat_common/adf_cfg_sysctl.c
+++ b/sys/dev/qat/qat_common/adf_cfg_sysctl.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@@ -10,6 +10,7 @@
#include "adf_common_drv.h"
#include <sys/mutex.h>
#include <sys/sbuf.h>
+#include <sys/priv.h>
#define ADF_CFG_SYSCTL_BUF_SZ ADF_CFG_MAX_VAL
#define ADF_CFG_UP_STR "up"
@@ -105,6 +106,9 @@ static int adf_cfg_sysctl_services_handle(SYSCTL_HANDLER_ARGS)
int ret = 0;
int i = 0;
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
accel_dev = arg1;
if (!accel_dev)
return ENXIO;
@@ -156,6 +160,9 @@ static int adf_cfg_sysctl_mode_handle(SYSCTL_HANDLER_ARGS)
int ret = 0;
int i = 0;
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
accel_dev = arg1;
if (!accel_dev)
return ENXIO;
@@ -204,6 +211,9 @@ static int adf_cfg_sysctl_handle(SYSCTL_HANDLER_ARGS)
unsigned int len;
int ret = 0;
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
accel_dev = arg1;
if (!accel_dev)
return ENXIO;
@@ -245,6 +255,9 @@ static int adf_cfg_sysctl_num_processes_handle(SYSCTL_HANDLER_ARGS)
uint32_t num_user_processes = 0;
int ret = 0;
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
accel_dev = arg1;
if (!accel_dev)
return ENXIO;
diff --git a/sys/dev/qat/qat_common/adf_clock.c b/sys/dev/qat/qat_common/adf_clock.c
index 36204c9939ac..f5d4116505b6 100644
--- a/sys/dev/qat/qat_common/adf_clock.c
+++ b/sys/dev/qat/qat_common/adf_clock.c
@@ -1,9 +1,10 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include <linux/delay.h>
+#include <sys/priv.h>
#define MEASURE_CLOCK_RETRIES 10
#define MEASURE_CLOCK_DELTA_THRESHOLD 100
@@ -21,11 +22,30 @@
} \
} while (0)
+static int adf_clock_read_frequency(SYSCTL_HANDLER_ARGS)
+{
+ struct adf_accel_dev *accel_dev = arg1;
+ struct adf_hw_device_data *hw_data;
+ int error = EFAULT;
+
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
+ if (accel_dev == NULL)
+ return EINVAL;
+
+ hw_data = accel_dev->hw_device;
+
+ error = sysctl_handle_int(oidp, &hw_data->clock_frequency, 0, req);
+ if (error || !req->newptr)
+ return error;
+
+ return (0);
+}
+
int
adf_clock_debugfs_add(struct adf_accel_dev *accel_dev)
{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
-
struct sysctl_ctx_list *qat_sysctl_ctx;
struct sysctl_oid *qat_sysctl_tree;
struct sysctl_oid *rc = 0;
@@ -35,13 +55,15 @@ adf_clock_debugfs_add(struct adf_accel_dev *accel_dev)
qat_sysctl_tree =
device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev);
- rc = SYSCTL_ADD_UINT(qat_sysctl_ctx,
+ rc = SYSCTL_ADD_PROC(qat_sysctl_ctx,
SYSCTL_CHILDREN(qat_sysctl_tree),
OID_AUTO,
CLK_DBGFS_FILE,
- CTLFLAG_RD,
- &hw_data->clock_frequency,
+ CTLTYPE_INT | CTLFLAG_RD,
+ accel_dev,
0,
+ adf_clock_read_frequency,
+ "IU",
"clock frequency");
HB_SYSCTL_ERR(rc);
return 0;
diff --git a/sys/dev/qat/qat_common/adf_freebsd_cfg_dev_dbg.c b/sys/dev/qat/qat_common/adf_freebsd_cfg_dev_dbg.c
index 7585dd9b29d4..6068d7d99496 100644
--- a/sys/dev/qat/qat_common/adf_freebsd_cfg_dev_dbg.c
+++ b/sys/dev/qat/qat_common/adf_freebsd_cfg_dev_dbg.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_common_drv.h"
#include "adf_cfg_device.h"
@@ -12,6 +12,7 @@
#include <sys/sx.h>
#include <sys/systm.h>
#include <sys/malloc.h>
+#include <sys/priv.h>
static int qat_dev_cfg_show(SYSCTL_HANDLER_ARGS)
{
@@ -21,6 +22,9 @@ static int qat_dev_cfg_show(SYSCTL_HANDLER_ARGS)
struct sbuf sb;
int error;
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
sbuf_new_for_sysctl(&sb, NULL, 128, req);
dev_cfg = arg1;
sx_slock(&dev_cfg->lock);
diff --git a/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c b/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c
index 960c71472bc8..e4ddbf489192 100644
--- a/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c
+++ b/sys/dev/qat/qat_common/adf_freebsd_cnvnr_ctrs_dbg.c
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
+#include <sys/priv.h>
#include "adf_cnvnr_freq_counters.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
@@ -45,6 +46,9 @@ static int qat_cnvnr_ctrs_dbg_read(SYSCTL_HANDLER_ARGS)
char report[MAX_REPORT_SIZE];
char *report_ptr = report;
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
/* Defensive check */
if (!accel_dev || accel_dev->accel_id > ADF_MAX_DEVICES)
return EINVAL;
@@ -89,6 +93,7 @@ static int qat_cnvnr_ctrs_dbg_read(SYSCTL_HANDLER_ARGS)
/* Extracting number of Acceleration Engines */
num_aes = hw_device->get_num_aes(hw_device);
+ explicit_bzero(&request, sizeof(struct icp_qat_fw_init_admin_req));
for (ae = 0; ae < num_aes; ae++) {
if (accel_dev->au_info && !test_bit(ae, &dc_ae_msk))
continue;
@@ -123,10 +128,12 @@ static int qat_cnvnr_ctrs_dbg_read(SYSCTL_HANDLER_ARGS)
cnvnr_err_str[error_type],
latest_error);
if (bytes_written <= 0) {
- printf("ERROR: No space left in CnV ctrs line buffer\n"
- "\tAcceleration ID: %d, Engine: %d\n",
- accel_dev->accel_id,
- ae);
+ device_printf(
+ GET_DEV(accel_dev),
+ "ERROR: No space left in CnV ctrs line buffer\n"
+ "\tAcceleration ID: %d, Engine: %d\n",
+ accel_dev->accel_id,
+ ae);
break;
}
report_ptr += bytes_written;
@@ -141,7 +148,6 @@ adf_cnvnr_freq_counters_add(struct adf_accel_dev *accel_dev)
{
struct sysctl_ctx_list *qat_sysctl_ctx;
struct sysctl_oid *qat_cnvnr_ctrs_sysctl_tree;
- struct sysctl_oid *oid_rc;
/* Defensive checks */
if (!accel_dev)
@@ -154,19 +160,22 @@ adf_cnvnr_freq_counters_add(struct adf_accel_dev *accel_dev)
device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev);
/* Create "cnv_error" string type leaf - with callback */
- oid_rc = SYSCTL_ADD_PROC(qat_sysctl_ctx,
- SYSCTL_CHILDREN(qat_cnvnr_ctrs_sysctl_tree),
- OID_AUTO,
- "cnv_error",
- CTLTYPE_STRING | CTLFLAG_RD,
- accel_dev,
- 0,
- qat_cnvnr_ctrs_dbg_read,
- "IU",
- "QAT CnVnR status");
-
- if (!oid_rc) {
- printf("ERROR: Memory allocation failed\n");
+ accel_dev->cnv_error_oid =
+ SYSCTL_ADD_PROC(qat_sysctl_ctx,
+ SYSCTL_CHILDREN(qat_cnvnr_ctrs_sysctl_tree),
+ OID_AUTO,
+ "cnv_error",
+ CTLTYPE_STRING | CTLFLAG_RD,
+ accel_dev,
+ 0,
+ qat_cnvnr_ctrs_dbg_read,
+ "IU",
+ "QAT CnVnR status");
+
+ if (!accel_dev->cnv_error_oid) {
+ device_printf(
+ GET_DEV(accel_dev),
+ "Failed to create qat cnvnr freq counters sysctl entry.\n");
return ENOMEM;
}
return 0;
@@ -175,4 +184,17 @@ adf_cnvnr_freq_counters_add(struct adf_accel_dev *accel_dev)
void
adf_cnvnr_freq_counters_remove(struct adf_accel_dev *accel_dev)
{
+ struct sysctl_ctx_list *qat_sysctl_ctx;
+
+ if (!accel_dev)
+ return;
+
+ qat_sysctl_ctx =
+ device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
+
+ if (accel_dev->cnv_error_oid) {
+ sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->cnv_error_oid);
+ sysctl_remove_oid(accel_dev->cnv_error_oid, 1, 1);
+ accel_dev->cnv_error_oid = NULL;
+ }
}
diff --git a/sys/dev/qat/qat_common/adf_freebsd_dbgfs.c b/sys/dev/qat/qat_common/adf_freebsd_dbgfs.c
new file mode 100644
index 000000000000..21ffb0adf559
--- /dev/null
+++ b/sys/dev/qat/qat_common/adf_freebsd_dbgfs.c
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
+
+#include "adf_accel_devices.h"
+#include "adf_cfg_dev_dbg.h"
+#include "adf_cnvnr_freq_counters.h"
+#include "adf_common_drv.h"
+#include "adf_dbgfs.h"
+#include "adf_fw_counters.h"
+#include "adf_freebsd_pfvf_ctrs_dbg.h"
+#include "adf_heartbeat_dbg.h"
+#include "adf_ver_dbg.h"
+
+/**
+ * adf_dbgfs_init() - add persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * This function creates debugfs entries that are persistent through a device
+ * state change (from up to down or vice versa).
+ */
+void
+adf_dbgfs_init(struct adf_accel_dev *accel_dev)
+{
+ adf_cfg_dev_dbg_add(accel_dev);
+}
+
+/**
+ * adf_dbgfs_exit() - remove persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ */
+void
+adf_dbgfs_exit(struct adf_accel_dev *accel_dev)
+{
+ adf_cfg_dev_dbg_remove(accel_dev);
+}
+
+/**
+ * adf_dbgfs_add() - add non-persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * This function creates debugfs entries that are not persistent through
+ * a device state change (from up to down or vice versa).
+ */
+void
+adf_dbgfs_add(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->is_vf) {
+ adf_heartbeat_dbg_add(accel_dev);
+ adf_ver_dbg_add(accel_dev);
+ adf_fw_counters_add(accel_dev);
+ adf_cnvnr_freq_counters_add(accel_dev);
+ }
+}
+
+/**
+ * adf_dbgfs_rm() - remove non-persistent debugfs entries
+ * @accel_dev: Pointer to acceleration device.
+ */
+void
+adf_dbgfs_rm(struct adf_accel_dev *accel_dev)
+{
+ if (!accel_dev->is_vf) {
+ adf_cnvnr_freq_counters_remove(accel_dev);
+ adf_fw_counters_remove(accel_dev);
+ adf_ver_dbg_del(accel_dev);
+ adf_heartbeat_dbg_del(accel_dev);
+ }
+}
diff --git a/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c b/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c
index b8a17344bdea..67e1d4ad2cab 100644
--- a/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c
+++ b/sys/dev/qat/qat_common/adf_freebsd_dev_processes.c
@@ -1,6 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
-
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
@@ -146,8 +145,6 @@ adf_processes_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
return ENXIO;
}
prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO);
- if (!prv_data)
- return ENOMEM;
INIT_LIST_HEAD(&prv_data->list);
error = devfs_set_cdevpriv(prv_data, adf_processes_release);
if (error) {
@@ -412,17 +409,6 @@ adf_state_set(int dev, enum adf_event event)
state->state.dev_state = event;
state->state.dev_id = dev;
STAILQ_INSERT_TAIL(head, state, entries_state);
- if (event == ADF_EVENT_STOP) {
- state = NULL;
- state = malloc(sizeof(struct entry_state),
- M_QAT,
- M_NOWAIT | M_ZERO);
- if (!state)
- continue;
- state->state.dev_state = ADF_EVENT_SHUTDOWN;
- state->state.dev_id = dev;
- STAILQ_INSERT_TAIL(head, state, entries_state);
- }
}
mtx_unlock(&mtx);
callout_schedule(&callout, ADF_STATE_CALLOUT_TIME);
@@ -453,7 +439,7 @@ adf_state_event_handler(struct adf_accel_dev *accel_dev, enum adf_event event)
case ADF_EVENT_START:
return ret;
case ADF_EVENT_STOP:
- break;
+ return ret;
case ADF_EVENT_ERROR:
break;
#if defined(QAT_UIO) && defined(QAT_DBG)
@@ -550,6 +536,7 @@ adf_state_destroy(void)
struct entry_proc_events *proc_events = NULL;
adf_service_unregister(&adf_state_hndl);
+ destroy_dev(adf_state_dev);
mtx_lock(&callout_mtx);
callout_stop(&callout);
mtx_unlock(&callout_mtx);
@@ -562,7 +549,6 @@ adf_state_destroy(void)
}
mtx_unlock(&mtx);
mtx_destroy(&mtx);
- destroy_dev(adf_state_dev);
}
static int
@@ -573,14 +559,8 @@ adf_state_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
int ret = 0;
prv_data = malloc(sizeof(*prv_data), M_QAT, M_WAITOK | M_ZERO);
- if (!prv_data)
- return -ENOMEM;
entry_proc_events =
malloc(sizeof(struct entry_proc_events), M_QAT, M_WAITOK | M_ZERO);
- if (!entry_proc_events) {
- free(prv_data, M_QAT);
- return -ENOMEM;
- }
mtx_lock(&mtx);
prv_data->cdev = dev;
prv_data->cdev->si_drv1 = prv_data;
diff --git a/sys/dev/qat/qat_common/adf_freebsd_heartbeat_dbg.c b/sys/dev/qat/qat_common/adf_freebsd_heartbeat_dbg.c
index c22640045fda..e7b4840600e1 100644
--- a/sys/dev/qat/qat_common/adf_freebsd_heartbeat_dbg.c
+++ b/sys/dev/qat/qat_common/adf_freebsd_heartbeat_dbg.c
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
+#include <sys/priv.h>
#include "adf_heartbeat_dbg.h"
#include "adf_common_drv.h"
#include "adf_cfg.h"
@@ -17,6 +18,49 @@
} \
} while (0)
+
+static int qat_dev_hb_read_sent(SYSCTL_HANDLER_ARGS)
+{
+ struct adf_accel_dev *accel_dev = arg1;
+ struct adf_heartbeat *hb;
+ int error = EFAULT;
+
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
+ if (accel_dev == NULL)
+ return EINVAL;
+
+ hb = accel_dev->heartbeat;
+
+ error = sysctl_handle_int(oidp, &hb->hb_sent_counter, 0, req);
+ if (error || !req->newptr)
+ return error;
+
+ return (0);
+}
+
+static int qat_dev_hb_read_failed(SYSCTL_HANDLER_ARGS)
+{
+ struct adf_accel_dev *accel_dev = arg1;
+ struct adf_heartbeat *hb;
+ int error = EFAULT;
+
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
+ if (accel_dev == NULL)
+ return EINVAL;
+
+ hb = accel_dev->heartbeat;
+
+ error = sysctl_handle_int(oidp, &hb->hb_failed_counter, 0, req);
+ if (error || !req->newptr)
+ return error;
+
+ return (0);
+}
+
/* Handler for HB status check */
static int qat_dev_hb_read(SYSCTL_HANDLER_ARGS)
{
@@ -24,6 +68,10 @@ static int qat_dev_hb_read(SYSCTL_HANDLER_ARGS)
struct adf_accel_dev *accel_dev = arg1;
struct adf_heartbeat *hb;
int ret = 0;
+
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
if (accel_dev == NULL) {
return EINVAL;
}
@@ -48,7 +96,6 @@ adf_heartbeat_dbg_add(struct adf_accel_dev *accel_dev)
struct sysctl_ctx_list *qat_hb_sysctl_ctx;
struct sysctl_oid *qat_hb_sysctl_tree;
struct adf_heartbeat *hb;
- struct sysctl_oid *rc = 0;
if (accel_dev == NULL) {
return EINVAL;
@@ -63,43 +110,80 @@ adf_heartbeat_dbg_add(struct adf_accel_dev *accel_dev)
qat_hb_sysctl_tree =
device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev);
- rc = SYSCTL_ADD_UINT(qat_hb_sysctl_ctx,
- SYSCTL_CHILDREN(qat_hb_sysctl_tree),
- OID_AUTO,
- "heartbeat_sent",
- CTLFLAG_RD,
- &hb->hb_sent_counter,
- 0,
- "HB sent count");
- HB_SYSCTL_ERR(rc);
-
- rc = SYSCTL_ADD_UINT(qat_hb_sysctl_ctx,
- SYSCTL_CHILDREN(qat_hb_sysctl_tree),
- OID_AUTO,
- "heartbeat_failed",
- CTLFLAG_RD,
- &hb->hb_failed_counter,
- 0,
- "HB failed count");
- HB_SYSCTL_ERR(rc);
-
- rc = SYSCTL_ADD_PROC(qat_hb_sysctl_ctx,
- SYSCTL_CHILDREN(qat_hb_sysctl_tree),
- OID_AUTO,
- "heartbeat",
- CTLTYPE_INT | CTLFLAG_RD,
- accel_dev,
- 0,
- qat_dev_hb_read,
- "IU",
- "QAT device status");
- HB_SYSCTL_ERR(rc);
+ hb->heartbeat_sent.oid =
+ SYSCTL_ADD_PROC(qat_hb_sysctl_ctx,
+ SYSCTL_CHILDREN(qat_hb_sysctl_tree),
+ OID_AUTO,
+ "heartbeat_sent",
+ CTLTYPE_INT | CTLFLAG_RD,
+ accel_dev,
+ 0,
+ qat_dev_hb_read_sent,
+ "IU",
+ "HB failed count");
+ HB_SYSCTL_ERR(hb->heartbeat_sent.oid);
+
+ hb->heartbeat_failed.oid =
+ SYSCTL_ADD_PROC(qat_hb_sysctl_ctx,
+ SYSCTL_CHILDREN(qat_hb_sysctl_tree),
+ OID_AUTO,
+ "heartbeat_failed",
+ CTLTYPE_INT | CTLFLAG_RD,
+ accel_dev,
+ 0,
+ qat_dev_hb_read_failed,
+ "IU",
+ "HB failed count");
+ HB_SYSCTL_ERR(hb->heartbeat_failed.oid);
+
+ hb->heartbeat.oid = SYSCTL_ADD_PROC(qat_hb_sysctl_ctx,
+ SYSCTL_CHILDREN(qat_hb_sysctl_tree),
+ OID_AUTO,
+ "heartbeat",
+ CTLTYPE_INT | CTLFLAG_RD,
+ accel_dev,
+ 0,
+ qat_dev_hb_read,
+ "IU",
+ "QAT device status");
+ HB_SYSCTL_ERR(hb->heartbeat.oid);
return 0;
}
int
adf_heartbeat_dbg_del(struct adf_accel_dev *accel_dev)
{
+ struct sysctl_ctx_list *qat_sysctl_ctx;
+ struct adf_heartbeat *hb;
+
+ if (!accel_dev) {
+ return EINVAL;
+ }
+
+ hb = accel_dev->heartbeat;
+
+ qat_sysctl_ctx =
+ device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
+
+ if (hb->heartbeat.oid) {
+ sysctl_ctx_entry_del(qat_sysctl_ctx, hb->heartbeat.oid);
+ sysctl_remove_oid(hb->heartbeat.oid, 1, 1);
+ hb->heartbeat.oid = NULL;
+ }
+
+ if (hb->heartbeat_failed.oid) {
+ sysctl_ctx_entry_del(qat_sysctl_ctx, hb->heartbeat_failed.oid);
+ sysctl_remove_oid(hb->heartbeat_failed.oid, 1, 1);
+ hb->heartbeat_failed.oid = NULL;
+ }
+
+ if (hb->heartbeat_sent.oid) {
+ sysctl_ctx_entry_del(qat_sysctl_ctx, hb->heartbeat_sent.oid);
+ sysctl_remove_oid(hb->heartbeat_sent.oid, 1, 1);
+ hb->heartbeat_sent.oid = NULL;
+ }
+
adf_heartbeat_clean(accel_dev);
+
return 0;
}
diff --git a/sys/dev/qat/qat_common/adf_freebsd_pfvf_ctrs_dbg.c b/sys/dev/qat/qat_common/adf_freebsd_pfvf_ctrs_dbg.c
index 76830e2920c3..a50e5fa62a18 100644
--- a/sys/dev/qat/qat_common/adf_freebsd_pfvf_ctrs_dbg.c
+++ b/sys/dev/qat/qat_common/adf_freebsd_pfvf_ctrs_dbg.c
@@ -1,9 +1,10 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
#include "adf_dev_err.h"
#include "adf_freebsd_pfvf_ctrs_dbg.h"
+#include <sys/priv.h>
#define MAX_REPORT_LINES (14)
#define MAX_REPORT_LINE_LEN (64)
@@ -92,6 +93,9 @@ static int adf_pfvf_ctrs_show(SYSCTL_HANDLER_ARGS)
struct pfvf_stats *pfvf_counters = arg1;
char report[MAX_REPORT_SIZE];
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
if (!pfvf_counters)
return EINVAL;
diff --git a/sys/dev/qat/qat_common/adf_freebsd_transport_debug.c b/sys/dev/qat/qat_common/adf_freebsd_transport_debug.c
index 35375bb20014..78ea6a7a5083 100644
--- a/sys/dev/qat/qat_common/adf_freebsd_transport_debug.c
+++ b/sys/dev/qat/qat_common/adf_freebsd_transport_debug.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
@@ -14,6 +14,7 @@
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
+#include <sys/priv.h>
static int adf_ring_show(SYSCTL_HANDLER_ARGS)
{
@@ -25,6 +26,9 @@ static int adf_ring_show(SYSCTL_HANDLER_ARGS)
int error, word;
uint32_t *wp, *end;
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
sbuf_new_for_sysctl(&sb, NULL, 128, req);
{
int head, tail, empty;
@@ -125,6 +129,9 @@ static int adf_bank_show(SYSCTL_HANDLER_ARGS)
struct sbuf sb;
int error, ring_id;
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
sbuf_new_for_sysctl(&sb, NULL, 128, req);
bank = arg1;
accel_dev = bank->accel_dev;
diff --git a/sys/dev/qat/qat_common/adf_freebsd_uio.c b/sys/dev/qat/qat_common/adf_freebsd_uio.c
index c109fc79b0f4..64efde72b4b8 100644
--- a/sys/dev/qat/qat_common/adf_freebsd_uio.c
+++ b/sys/dev/qat/qat_common/adf_freebsd_uio.c
@@ -199,10 +199,6 @@ adf_alloc_bundle(struct adf_accel_dev *accel_dev, int bundle_nr)
accel = accel_dev->accel;
handle = malloc(sizeof(*handle), M_QAT, M_WAITOK | M_ZERO);
- if (!handle) {
- printf("ERROR in adf_alloc_bundle %d\n", __LINE__);
- return ENOMEM;
- }
handle->accel = accel;
handle->bundle = bundle_nr;
@@ -294,10 +290,6 @@ adf_uio_mmap_single(struct cdev *dev,
/* Adding pid to bundle list */
instance_rings =
malloc(sizeof(*instance_rings), M_QAT, M_WAITOK | M_ZERO);
- if (!instance_rings) {
- printf("QAT: Memory allocation error - line: %d\n", __LINE__);
- return -ENOMEM;
- }
instance_rings->user_pid = curproc->p_pid;
instance_rings->ring_mask = 0;
mutex_lock(&bundle->list_lock);
diff --git a/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c b/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c
index 6fb4cf0bf2f7..954e31c683ce 100644
--- a/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c
+++ b/sys/dev/qat/qat_common/adf_freebsd_uio_cleanup.c
@@ -123,9 +123,6 @@ get_orphan_bundle(int bank,
orphan_bundle =
malloc(sizeof(*orphan_bundle), M_QAT, M_WAITOK | M_ZERO);
- if (!orphan_bundle)
- return ENOMEM;
-
csr_base = accel->bar->virt_addr;
orphan_bundle->csr_base = csr_base;
orphan_bundle->bank = bank;
diff --git a/sys/dev/qat/qat_common/adf_freebsd_ver_dbg.c b/sys/dev/qat/qat_common/adf_freebsd_ver_dbg.c
index 31805d5fb91e..041481435426 100644
--- a/sys/dev/qat/qat_common/adf_freebsd_ver_dbg.c
+++ b/sys/dev/qat/qat_common/adf_freebsd_ver_dbg.c
@@ -1,16 +1,20 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
#include "adf_accel_devices.h"
#include "adf_ver_dbg.h"
+#include <sys/priv.h>
static int adf_sysctl_read_fw_versions(SYSCTL_HANDLER_ARGS)
{
struct adf_accel_dev *accel_dev = arg1;
char fw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
if (!accel_dev)
return -EINVAL;
@@ -34,6 +38,9 @@ static int adf_sysctl_read_hw_versions(SYSCTL_HANDLER_ARGS)
struct adf_accel_dev *accel_dev = arg1;
char hw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
if (!accel_dev)
return -EINVAL;
@@ -55,6 +62,9 @@ static int adf_sysctl_read_mmp_versions(SYSCTL_HANDLER_ARGS)
struct adf_accel_dev *accel_dev = arg1;
char mmp_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
if (!accel_dev)
return -EINVAL;
@@ -86,7 +96,6 @@ adf_ver_dbg_add(struct adf_accel_dev *accel_dev)
{
struct sysctl_ctx_list *qat_sysctl_ctx;
struct sysctl_oid *qat_sysctl_tree;
- struct sysctl_oid *rc = 0;
if (!accel_dev)
return -EINVAL;
@@ -96,43 +105,46 @@ adf_ver_dbg_add(struct adf_accel_dev *accel_dev)
qat_sysctl_tree =
device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev);
- rc = SYSCTL_ADD_OID(qat_sysctl_ctx,
- SYSCTL_CHILDREN(qat_sysctl_tree),
- OID_AUTO,
- "fw_version",
- CTLTYPE_STRING | CTLFLAG_RD,
- accel_dev,
- 0,
- adf_sysctl_read_fw_versions,
- "A",
- "QAT FW version");
- if (!rc)
+ accel_dev->fw_version_oid =
+ SYSCTL_ADD_OID(qat_sysctl_ctx,
+ SYSCTL_CHILDREN(qat_sysctl_tree),
+ OID_AUTO,
+ "fw_version",
+ CTLTYPE_STRING | CTLFLAG_RD,
+ accel_dev,
+ 0,
+ adf_sysctl_read_fw_versions,
+ "A",
+ "QAT FW version");
+ if (!accel_dev->fw_version_oid)
goto err;
- rc = SYSCTL_ADD_OID(qat_sysctl_ctx,
- SYSCTL_CHILDREN(qat_sysctl_tree),
- OID_AUTO,
- "hw_version",
- CTLTYPE_STRING | CTLFLAG_RD,
- accel_dev,
- 0,
- adf_sysctl_read_hw_versions,
- "A",
- "QAT HW version");
- if (!rc)
+ accel_dev->hw_version_oid =
+ SYSCTL_ADD_OID(qat_sysctl_ctx,
+ SYSCTL_CHILDREN(qat_sysctl_tree),
+ OID_AUTO,
+ "hw_version",
+ CTLTYPE_STRING | CTLFLAG_RD,
+ accel_dev,
+ 0,
+ adf_sysctl_read_hw_versions,
+ "A",
+ "QAT HW version");
+ if (!accel_dev->hw_version_oid)
goto err;
- rc = SYSCTL_ADD_OID(qat_sysctl_ctx,
- SYSCTL_CHILDREN(qat_sysctl_tree),
- OID_AUTO,
- "mmp_version",
- CTLTYPE_STRING | CTLFLAG_RD,
- accel_dev,
- 0,
- adf_sysctl_read_mmp_versions,
- "A",
- "QAT MMP version");
- if (!rc)
+ accel_dev->mmp_version_oid =
+ SYSCTL_ADD_OID(qat_sysctl_ctx,
+ SYSCTL_CHILDREN(qat_sysctl_tree),
+ OID_AUTO,
+ "mmp_version",
+ CTLTYPE_STRING | CTLFLAG_RD,
+ accel_dev,
+ 0,
+ adf_sysctl_read_mmp_versions,
+ "A",
+ "QAT MMP version");
+ if (!accel_dev->mmp_version_oid)
goto err;
return 0;
@@ -145,4 +157,30 @@ err:
void
adf_ver_dbg_del(struct adf_accel_dev *accel_dev)
{
+ struct sysctl_ctx_list *qat_sysctl_ctx;
+
+ if (!accel_dev)
+ return;
+
+ qat_sysctl_ctx =
+ device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
+
+ if (accel_dev->mmp_version_oid) {
+ sysctl_ctx_entry_del(qat_sysctl_ctx,
+ accel_dev->mmp_version_oid);
+ sysctl_remove_oid(accel_dev->mmp_version_oid, 1, 1);
+ accel_dev->mmp_version_oid = NULL;
+ }
+
+ if (accel_dev->hw_version_oid) {
+ sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->hw_version_oid);
+ sysctl_remove_oid(accel_dev->hw_version_oid, 1, 1);
+ accel_dev->hw_version_oid = NULL;
+ }
+
+ if (accel_dev->fw_version_oid) {
+ sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->fw_version_oid);
+ sysctl_remove_oid(accel_dev->fw_version_oid, 1, 1);
+ accel_dev->fw_version_oid = NULL;
+ }
}
diff --git a/sys/dev/qat/qat_common/adf_fw_counters.c b/sys/dev/qat/qat_common/adf_fw_counters.c
index ea674b27bd0f..1356fa89e775 100644
--- a/sys/dev/qat/qat_common/adf_fw_counters.c
+++ b/sys/dev/qat/qat_common/adf_fw_counters.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include <sys/types.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
@@ -9,6 +9,7 @@
#include "icp_qat_fw_init_admin.h"
#include <sys/mutex.h>
#include <sys/sbuf.h>
+#include <sys/priv.h>
#define ADF_FW_COUNTERS_BUF_SZ 4096
#define ADF_RAS_EVENT_STR "RAS events"
@@ -126,6 +127,9 @@ int adf_read_fw_counters(SYSCTL_HANDLER_ARGS)
struct sbuf *sbuf = NULL;
char *cbuf = NULL;
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
if (accel_dev == NULL) {
return EINVAL;
}
@@ -211,7 +215,6 @@ adf_fw_counters_add(struct adf_accel_dev *accel_dev)
struct adf_fw_counters_data *fw_counters_data;
struct sysctl_ctx_list *qat_sysctl_ctx;
struct sysctl_oid *qat_sysctl_tree;
- struct sysctl_oid *rc = 0;
fw_counters_data =
malloc(sizeof(*fw_counters_data), M_QAT, M_WAITOK | M_ZERO);
@@ -225,20 +228,24 @@ adf_fw_counters_add(struct adf_accel_dev *accel_dev)
device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
qat_sysctl_tree =
device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev);
- rc = SYSCTL_ADD_OID(qat_sysctl_ctx,
- SYSCTL_CHILDREN(qat_sysctl_tree),
- OID_AUTO,
- "fw_counters",
- CTLTYPE_STRING | CTLFLAG_RD,
- accel_dev,
- 0,
- adf_read_fw_counters,
- "A",
- "QAT FW counters");
- if (!rc)
+ fw_counters_data->debug =
+ SYSCTL_ADD_OID(qat_sysctl_ctx,
+ SYSCTL_CHILDREN(qat_sysctl_tree),
+ OID_AUTO,
+ "fw_counters",
+ CTLTYPE_STRING | CTLFLAG_RD,
+ accel_dev,
+ 0,
+ adf_read_fw_counters,
+ "A",
+ "QAT FW counters");
+ if (!fw_counters_data->debug) {
+ free(fw_counters_data, M_QAT);
+ accel_dev->fw_counters_data = NULL;
return ENOMEM;
- else
- return 0;
+ }
+
+ return 0;
}
static void
@@ -396,12 +403,21 @@ adf_fw_counters_section_add(struct adf_accel_dev *accel_dev,
void
adf_fw_counters_remove(struct adf_accel_dev *accel_dev)
{
+ struct sysctl_ctx_list *qat_sysctl_ctx;
struct adf_fw_counters_data *fw_counters_data =
accel_dev->fw_counters_data;
if (!fw_counters_data)
return;
+ if (fw_counters_data->debug) {
+ qat_sysctl_ctx =
+ device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
+ sysctl_ctx_entry_del(qat_sysctl_ctx, fw_counters_data->debug);
+ sysctl_remove_oid(fw_counters_data->debug, 1, 1);
+ fw_counters_data->debug = NULL;
+ }
+
down_write(&fw_counters_data->lock);
adf_fw_counters_section_del_all(&fw_counters_data->ae_sec_list);
up_write(&fw_counters_data->lock);
diff --git a/sys/dev/qat/qat_common/adf_gen2_hw_data.c b/sys/dev/qat/qat_common/adf_gen2_hw_data.c
index 10e86f8cd218..92fd2b9bc8e5 100644
--- a/sys/dev/qat/qat_common/adf_gen2_hw_data.c
+++ b/sys/dev/qat/qat_common/adf_gen2_hw_data.c
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2021 Intel Corporation */
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "adf_gen2_hw_data.h"
#include "icp_qat_hw.h"
@@ -54,7 +54,7 @@ write_csr_ring_config(struct resource *csr_base_addr,
WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value);
}
-static dma_addr_t
+static bus_addr_t
read_csr_ring_base(struct resource *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_BASE(csr_base_addr, bank, ring);
diff --git a/sys/dev/qat/qat_common/adf_gen4_pfvf.c b/sys/dev/qat/qat_common/adf_gen4_pfvf.c
index 03bb90938e85..122abb301d31 100644
--- a/sys/dev/qat/qat_common/adf_gen4_pfvf.c
+++ b/sys/dev/qat/qat_common/adf_gen4_pfvf.c
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include <linux/iopoll.h>
-#include <linux/mutex.h>
#include <linux/types.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
diff --git a/sys/dev/qat/qat_common/adf_gen4_timer.c b/sys/dev/qat/qat_common/adf_gen4_timer.c
index 96b65cdff181..2c74d09418e5 100644
--- a/sys/dev/qat/qat_common/adf_gen4_timer.c
+++ b/sys/dev/qat/qat_common/adf_gen4_timer.c
@@ -57,7 +57,7 @@ end:
static void
timer_handler(struct timer_list *tl)
{
- struct adf_int_timer *int_timer = from_timer(int_timer, tl, timer);
+ struct adf_int_timer *int_timer = timer_container_of(int_timer, tl, timer);
struct adf_accel_dev *accel_dev = int_timer->accel_dev;
struct adf_hb_timer_data *hb_timer_data = NULL;
u64 timeout_val = adf_get_next_timeout(int_timer->timeout_val);
diff --git a/sys/dev/qat/qat_common/adf_gen4vf_hw_csr_data.c b/sys/dev/qat/qat_common/adf_gen4vf_hw_csr_data.c
index 5c06b413b528..bfa778ea301b 100644
--- a/sys/dev/qat/qat_common/adf_gen4vf_hw_csr_data.c
+++ b/sys/dev/qat/qat_common/adf_gen4vf_hw_csr_data.c
@@ -1,10 +1,10 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "adf_accel_devices.h"
#include "adf_gen4vf_hw_csr_data.h"
static u64
-build_csr_ring_base_addr(dma_addr_t addr, u32 size)
+build_csr_ring_base_addr(bus_addr_t addr, u32 size)
{
return BUILD_RING_BASE_ADDR_GEN4(addr, size);
}
@@ -54,7 +54,7 @@ write_csr_ring_config(struct resource *csr_base_addr,
WRITE_CSR_RING_CONFIG_GEN4VF(csr_base_addr, bank, ring, value);
}
-static dma_addr_t
+static bus_addr_t
read_csr_ring_base(struct resource *csr_base_addr, u32 bank, u32 ring)
{
return READ_CSR_RING_BASE_GEN4VF(csr_base_addr, bank, ring);
@@ -64,7 +64,7 @@ static void
write_csr_ring_base(struct resource *csr_base_addr,
u32 bank,
u32 ring,
- dma_addr_t addr)
+ bus_addr_t addr)
{
WRITE_CSR_RING_BASE_GEN4VF(csr_base_addr, bank, ring, addr);
}
diff --git a/sys/dev/qat/qat_common/adf_init.c b/sys/dev/qat/qat_common/adf_init.c
index f0b75db3f6ed..a7ebb70dde3d 100644
--- a/sys/dev/qat/qat_common/adf_init.c
+++ b/sys/dev/qat/qat_common/adf_init.c
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
+#include "adf_dbgfs.h"
#include "adf_accel_devices.h"
#include "icp_qat_uclo.h"
#include "icp_qat_fw.h"
@@ -19,6 +20,10 @@
#include "adf_common_drv.h"
#include "icp_qat_fw.h"
+#if defined(QAT_UIO)
+#include "adf_cfg_device.h"
+#endif /* QAT_UIO*/
+
/* Mask used to check the CompressAndVerify capability bit */
#define DC_CNV_EXTENDED_CAPABILITY (0x01)
@@ -28,6 +33,11 @@
static LIST_HEAD(service_table);
static DEFINE_MUTEX(service_lock);
+static int adf_dev_init_locked(struct adf_accel_dev *accel_dev);
+static int adf_dev_start_locked(struct adf_accel_dev *accel_dev);
+static int adf_dev_stop_locked(struct adf_accel_dev *accel_dev);
+static void adf_dev_shutdown_locked(struct adf_accel_dev *accel_dev);
+
static void
adf_service_add(struct service_hndl *service)
{
@@ -261,6 +271,18 @@ adf_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
int
adf_dev_init(struct adf_accel_dev *accel_dev)
{
+ int ret = 0;
+
+ mutex_lock(&accel_dev->lock);
+ ret = adf_dev_init_locked(accel_dev);
+ mutex_unlock(&accel_dev->lock);
+
+ return ret;
+}
+
+static int
+adf_dev_init_locked(struct adf_accel_dev *accel_dev)
+{
struct service_hndl *service;
struct list_head *list_itr;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
@@ -410,17 +432,23 @@ adf_dev_init(struct adf_accel_dev *accel_dev)
int
adf_dev_start(struct adf_accel_dev *accel_dev)
{
+ int ret = 0;
+
+ mutex_lock(&accel_dev->lock);
+ ret = adf_dev_start_locked(accel_dev);
+ mutex_unlock(&accel_dev->lock);
+
+ return ret;
+}
+
+static int
+adf_dev_start_locked(struct adf_accel_dev *accel_dev)
+{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
set_bit(ADF_STATUS_STARTING, &accel_dev->status);
- if (adf_devmgr_verify_id(&accel_dev->accel_id)) {
- device_printf(GET_DEV(accel_dev),
- "QAT: Device %d not found\n",
- accel_dev->accel_id);
- return ENODEV;
- }
if (adf_ae_start(accel_dev)) {
device_printf(GET_DEV(accel_dev), "AE Start Failed\n");
return EFAULT;
@@ -489,6 +517,8 @@ adf_dev_start(struct adf_accel_dev *accel_dev)
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
set_bit(ADF_STATUS_STARTED, &accel_dev->status);
+ adf_dbgfs_add(accel_dev);
+
return 0;
}
@@ -505,15 +535,24 @@ adf_dev_start(struct adf_accel_dev *accel_dev)
int
adf_dev_stop(struct adf_accel_dev *accel_dev)
{
+ int ret = 0;
+
+ mutex_lock(&accel_dev->lock);
+ ret = adf_dev_stop_locked(accel_dev);
+ mutex_unlock(&accel_dev->lock);
+
+ return ret;
+}
+
+static int
+adf_dev_stop_locked(struct adf_accel_dev *accel_dev)
+{
struct service_hndl *service;
struct list_head *list_itr;
- if (adf_devmgr_verify_id(&accel_dev->accel_id)) {
- device_printf(GET_DEV(accel_dev),
- "QAT: Device %d not found\n",
- accel_dev->accel_id);
- return ENODEV;
- }
+ if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status))
+ return 0;
+
if (!adf_dev_started(accel_dev) &&
!test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
return 0;
@@ -526,6 +565,8 @@ adf_dev_stop(struct adf_accel_dev *accel_dev)
return EBUSY;
}
+ adf_dbgfs_rm(accel_dev);
+
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
@@ -566,10 +607,21 @@ adf_dev_stop(struct adf_accel_dev *accel_dev)
void
adf_dev_shutdown(struct adf_accel_dev *accel_dev)
{
+ mutex_lock(&accel_dev->lock);
+ adf_dev_shutdown_locked(accel_dev);
+ mutex_unlock(&accel_dev->lock);
+}
+
+static void
+adf_dev_shutdown_locked(struct adf_accel_dev *accel_dev)
+{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
+ if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status))
+ return;
+
if (test_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status)) {
sysctl_ctx_free(&accel_dev->sysctl_ctx);
clear_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED,
@@ -618,8 +670,12 @@ adf_dev_shutdown(struct adf_accel_dev *accel_dev)
}
/* Delete configuration only if not restarting */
- if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+ if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) {
adf_cfg_del_all(accel_dev);
+#ifdef QAT_UIO
+ adf_cfg_device_clear_all(accel_dev);
+#endif
+ }
if (hw_data->remove_pke_stats)
hw_data->remove_pke_stats(accel_dev);
diff --git a/sys/dev/qat/qat_common/adf_pfvf_vf_msg.c b/sys/dev/qat/qat_common/adf_pfvf_vf_msg.c
index dc0cb0110ec3..8c0221d344ff 100644
--- a/sys/dev/qat/qat_common/adf_pfvf_vf_msg.c
+++ b/sys/dev/qat/qat_common/adf_pfvf_vf_msg.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include <linux/bitfield.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
@@ -98,6 +98,22 @@ adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
return 0;
}
+void
+adf_vf2pf_restarting_complete(struct adf_accel_dev *accel_dev)
+{
+ struct pfvf_message msg = { .type =
+ ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE };
+
+ if (accel_dev->u1.vf.pf_compat_ver < ADF_PFVF_COMPAT_FALLBACK)
+ return;
+
+ if (adf_send_vf2pf_msg(accel_dev, msg)) {
+ device_printf(
+ GET_DEV(accel_dev),
+ "Failed to send Restarting complete event to PF\n");
+ }
+}
+
int
adf_vf2pf_get_capabilities(struct adf_accel_dev *accel_dev)
{
@@ -180,5 +196,7 @@ adf_vf2pf_get_ring_to_svc(struct adf_accel_dev *accel_dev)
/* Only v1 at present */
accel_dev->hw_device->ring_to_svc_map = rts_map_msg.map;
+ accel_dev->hw_device->get_ring_to_svc_done = true;
+
return 0;
}
diff --git a/sys/dev/qat/qat_common/adf_pfvf_vf_proto.c b/sys/dev/qat/qat_common/adf_pfvf_vf_proto.c
index a09ddb819831..2a338b96a5f3 100644
--- a/sys/dev/qat/qat_common/adf_pfvf_vf_proto.c
+++ b/sys/dev/qat/qat_common/adf_pfvf_vf_proto.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include <linux/kernel.h>
#include "adf_accel_devices.h"
#include "adf_common_drv.h"
@@ -89,9 +89,15 @@ adf_send_vf2pf_req(struct adf_accel_dev *accel_dev,
}
/* Wait for response, if it times out retry */
- ret =
- wait_for_completion_timeout(&accel_dev->u1.vf.msg_received,
- timeout);
+ if (!cold) {
+ ret = wait_for_completion_timeout(
+ &accel_dev->u1.vf.msg_received, timeout);
+ } else {
+ /* In cold start timers may not be initialized yet */
+ DELAY(ADF_PFVF_MSG_RESP_TIMEOUT * 1000);
+ ret = try_wait_for_completion(
+ &accel_dev->u1.vf.msg_received);
+ }
if (ret) {
if (likely(resp))
*resp = accel_dev->u1.vf.response;
@@ -346,6 +352,9 @@ adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg)
case ADF_PF2VF_MSGTYPE_RP_RESET_RESP:
adf_pf2vf_handle_pf_rp_reset(accel_dev, msg);
return true;
+ case ADF_PF2VF_MSGTYPE_FATAL_ERROR:
+ adf_pf2vf_handle_pf_error(accel_dev);
+ return true;
case ADF_PF2VF_MSGTYPE_VERSION_RESP:
case ADF_PF2VF_MSGTYPE_BLKMSG_RESP:
accel_dev->u1.vf.response = msg;
diff --git a/sys/dev/qat/qat_common/adf_vf_isr.c b/sys/dev/qat/qat_common/adf_vf_isr.c
index a34e23b8fb4b..9f98ecd09d6e 100644
--- a/sys/dev/qat/qat_common/adf_vf_isr.c
+++ b/sys/dev/qat/qat_common/adf_vf_isr.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include <sys/kernel.h>
#include <sys/systm.h>
@@ -17,6 +17,7 @@
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
#include "adf_pfvf_utils.h"
+#include "adf_pfvf_vf_msg.h"
static TASKQUEUE_DEFINE_THREAD(qat_vf);
static TASKQUEUE_DEFINE_THREAD(qat_bank_handler);
@@ -65,6 +66,7 @@ adf_dev_stop_async(struct work_struct *work)
/* Re-enable PF2VF interrupts */
hw_data->enable_pf2vf_interrupt(accel_dev);
+ adf_vf2pf_restarting_complete(accel_dev);
kfree(stop_data);
}
@@ -123,6 +125,17 @@ adf_pf2vf_handle_pf_rp_reset(struct adf_accel_dev *accel_dev,
return 0;
}
+int
+adf_pf2vf_handle_pf_error(struct adf_accel_dev *accel_dev)
+{
+ device_printf(GET_DEV(accel_dev), "Fatal error received from PF\n");
+
+ if (adf_notify_fatal_error(accel_dev))
+ device_printf(GET_DEV(accel_dev), "Couldn't notify fatal error\n");
+
+ return 0;
+}
+
static void
adf_pf2vf_bh_handler(void *data, int pending)
{
diff --git a/sys/dev/qat/qat_common/qat_hal.c b/sys/dev/qat/qat_common/qat_hal.c
index b1e35e77272a..75190246ee1d 100644
--- a/sys/dev/qat/qat_common/qat_hal.c
+++ b/sys/dev/qat/qat_common/qat_hal.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
@@ -1052,8 +1052,7 @@ qat_hal_init(struct adf_accel_dev *accel_dev)
handle->hal_cap_ae_xfer_csr_addr_v = ae_offset;
handle->hal_ep_csr_addr_v = ep_offset;
handle->hal_cap_ae_local_csr_addr_v =
- ((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
- LOCAL_TO_XFER_REG_OFFSET);
+ ((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET);
handle->fw_auth = (pci_get_device(GET_DEV(handle->accel_dev)) ==
ADF_DH895XCC_PCI_DEVICE_ID) ?
false :
@@ -1283,7 +1282,7 @@ qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
unsigned int max_cycle,
unsigned int *endpc)
{
- uint64_t savuwords[MAX_EXEC_INST];
+ u64 *savuwords = NULL;
unsigned int ind_lm_addr0, ind_lm_addr1;
unsigned int ind_lm_addr2, ind_lm_addr3;
unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
@@ -1300,6 +1299,11 @@ qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
pr_err("QAT: invalid instruction num %d\n", inst_num);
return EINVAL;
}
+
+ savuwords = kzalloc(sizeof(u64) * MAX_EXEC_INST, GFP_KERNEL);
+ if (!savuwords)
+ return ENOMEM;
+
/* save current context */
qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
@@ -1360,8 +1364,10 @@ qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
qat_hal_enable_ctx(handle, ae, (1 << ctx));
/* wait for micro codes to finish */
- if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
+ if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0) {
+ kfree(savuwords);
return EFAULT;
+ }
if (endpc) {
unsigned int ctx_status;
@@ -1429,6 +1435,7 @@ qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, ind_sig);
qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+ kfree(savuwords);
return 0;
}
diff --git a/sys/dev/qat/qat_common/qat_uclo.c b/sys/dev/qat/qat_common/qat_uclo.c
index faf3154e80b8..b17020286d24 100644
--- a/sys/dev/qat/qat_common/qat_uclo.c
+++ b/sys/dev/qat/qat_common/qat_uclo.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
@@ -892,6 +892,7 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
return ICP_QAT_AC_C4XXX_DEV_TYPE;
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
+ case ADF_402XX_PCI_DEVICE_ID:
return ICP_QAT_AC_4XXX_A_DEV_TYPE;
default:
pr_err("QAT: unsupported device 0x%x\n",
@@ -1618,11 +1619,17 @@ qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
unsigned int length, simg_offset = sizeof(*auth_chunk);
unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev));
+ if (size <= ICP_QAT_AE_IMG_OFFSET(device_id)) {
+ pr_err("QAT: error, input image size too small %d\n", size);
+ return EINVAL;
+ }
+
if (size >
(ICP_QAT_AE_IMG_OFFSET(device_id) + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
pr_err("QAT: error, input image size overflow %d\n", size);
return EINVAL;
}
+
length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
ICP_QAT_CSS_AE_SIMG_LEN(device_id) + simg_offset :
size + ICP_QAT_CSS_FWSK_PAD_LEN(device_id) + simg_offset;
@@ -1824,11 +1831,6 @@ qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
GET_DEV(handle->accel_dev)));
return status;
}
- if (pci_get_device(GET_DEV(handle->accel_dev)) ==
- ADF_C3XXX_PCI_DEVICE_ID) {
- pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
- return EINVAL;
- }
status = qat_uclo_wr_sram_by_words(handle,
handle->hal_sram_offset,
addr_ptr,
diff --git a/sys/dev/qat/qat_hw/qat_200xx/adf_200xx_hw_data.h b/sys/dev/qat/qat_hw/qat_200xx/adf_200xx_hw_data.h
index 67560a7a7d1c..f8adc0dba935 100644
--- a/sys/dev/qat/qat_hw/qat_200xx/adf_200xx_hw_data.h
+++ b/sys/dev/qat/qat_hw/qat_200xx/adf_200xx_hw_data.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef ADF_200XX_HW_DATA_H_
#define ADF_200XX_HW_DATA_H_
diff --git a/sys/dev/qat/qat_hw/qat_200xx/adf_drv.c b/sys/dev/qat/qat_hw/qat_200xx/adf_drv.c
index f9b8c742d339..816010dc06f8 100644
--- a/sys/dev/qat/qat_hw/qat_200xx/adf_drv.c
+++ b/sys/dev/qat/qat_hw/qat_200xx/adf_drv.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
@@ -7,13 +7,12 @@
#include "adf_200xx_hw_data.h"
#include "adf_fw_counters.h"
#include "adf_cfg_device.h"
+#include "adf_dbgfs.h"
#include <sys/types.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <machine/bus_dma.h>
#include <dev/pci/pcireg.h>
-#include "adf_heartbeat_dbg.h"
-#include "adf_cnvnr_freq_counters.h"
static MALLOC_DEFINE(M_QAT_200XX, "qat_200xx", "qat_200xx");
@@ -73,6 +72,7 @@ adf_cleanup_accel(struct adf_accel_dev *accel_dev)
free(accel_dev->hw_device, M_QAT_200XX);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
adf_devmgr_rm_dev(accel_dev, NULL);
}
@@ -84,7 +84,7 @@ adf_attach(device_t dev)
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
unsigned int i = 0, bar_nr = 0, reg_val = 0;
- int ret, rid;
+ int ret = 0, rid;
struct adf_cfg_device *cfg_dev = NULL;
/* Set pci MaxPayLoad to 256. Implemented to avoid the issue of
@@ -96,6 +96,7 @@ adf_attach(device_t dev)
accel_dev = device_get_softc(dev);
+ mutex_init(&accel_dev->lock);
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = dev;
@@ -108,9 +109,10 @@ adf_attach(device_t dev)
/* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called
*/
- if (adf_devmgr_add_dev(accel_dev, NULL)) {
+ ret = adf_devmgr_add_dev(accel_dev, NULL);
+ if (ret) {
device_printf(dev, "Failed to add new accelerator device.\n");
- return ENXIO;
+ goto out_err_lock;
}
/* Allocate and configure device configuration structure */
@@ -213,16 +215,20 @@ adf_attach(device_t dev)
bar->base_addr = rman_get_start(bar->virt_addr);
bar->size = rman_get_size(bar->virt_addr);
}
- pci_enable_busmaster(dev);
+ ret = pci_enable_busmaster(dev);
+ if (ret)
+ goto out_err;
+
+ adf_dbgfs_init(accel_dev);
if (!accel_dev->hw_device->config_device) {
ret = EFAULT;
- goto out_err;
+ goto out_err_disable;
}
ret = accel_dev->hw_device->config_device(accel_dev);
if (ret)
- goto out_err;
+ goto out_err_disable;
ret = adf_dev_init(accel_dev);
if (ret)
@@ -241,8 +247,13 @@ out_dev_stop:
adf_dev_stop(accel_dev);
out_dev_shutdown:
adf_dev_shutdown(accel_dev);
+out_err_disable:
+ pci_disable_busmaster(dev);
out_err:
adf_cleanup_accel(accel_dev);
+out_err_lock:
+ mutex_destroy(&accel_dev->lock);
+
return ret;
}
@@ -258,7 +269,9 @@ adf_detach(device_t dev)
adf_dev_shutdown(accel_dev);
+ pci_disable_busmaster(dev);
adf_cleanup_accel(accel_dev);
+ mutex_destroy(&accel_dev->lock);
return 0;
}
diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c
index 3326d7cb4ffb..49e1e1859e78 100644
--- a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c
+++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include <linux/iopoll.h>
#include <adf_accel_devices.h>
#include <adf_cfg.h>
@@ -212,57 +212,77 @@ adf_4xxx_get_hw_cap(struct adf_accel_dev *accel_dev)
{
device_t pdev = accel_dev->accel_pci_dev.pci_dev;
u32 fusectl1;
- u32 capabilities;
+ u32 capabilities_sym, capabilities_sym_cipher, capabilities_sym_auth,
+ capabilities_asym, capabilities_dc, capabilities_other;
+
+ capabilities_other = ICP_ACCEL_CAPABILITIES_RL;
/* Read accelerator capabilities mask */
fusectl1 = pci_read_config(pdev, ADF_4XXX_FUSECTL1_OFFSET, 4);
- capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
- ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
- ICP_ACCEL_CAPABILITIES_CIPHER |
- ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
- ICP_ACCEL_CAPABILITIES_COMPRESSION |
- ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
- ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
- ICP_ACCEL_CAPABILITIES_SHA3 | ICP_ACCEL_CAPABILITIES_HKDF |
- ICP_ACCEL_CAPABILITIES_SHA3_EXT | ICP_ACCEL_CAPABILITIES_SM3 |
+
+ capabilities_sym_cipher = ICP_ACCEL_CAPABILITIES_HKDF |
ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
- ICP_ACCEL_CAPABILITIES_AESGCM_SPC | ICP_ACCEL_CAPABILITIES_AES_V2 |
- ICP_ACCEL_CAPABILITIES_RL | ICP_ACCEL_CAPABILITIES_ECEDMONT |
- ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC | ICP_ACCEL_CAPABILITIES_AES_V2;
+ capabilities_sym_auth = ICP_ACCEL_CAPABILITIES_SM3 |
+ ICP_ACCEL_CAPABILITIES_SHA3 | ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+ /* A set bit in fusectl1 means the feature is OFF in this SKU */
if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
- capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
- capabilities &= ~ICP_ACCEL_CAPABILITIES_HKDF;
- capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
- }
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
- capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
- capabilities &= ~ICP_ACCEL_CAPABILITIES_SHA3;
- capabilities &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
- capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+ capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_HKDF;
+ capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_SM4;
}
- if (fusectl1 & ICP_ACCEL_MASK_PKE_SLICE) {
- capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
- capabilities &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
+
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
+ capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+ capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+ capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
}
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
- capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
- capabilities &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
- capabilities &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
- capabilities &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
+ capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SM3;
+ capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+ capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
}
+
if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
- capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3;
- capabilities &= ~ICP_ACCEL_CAPABILITIES_SM4;
+ capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_SM4;
+ capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SM3;
}
- if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
- capabilities &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
- capabilities &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
- capabilities &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
- capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+
+ if (capabilities_sym_cipher)
+ capabilities_sym_cipher |= ICP_ACCEL_CAPABILITIES_CIPHER;
+
+ if (capabilities_sym_auth)
+ capabilities_sym_auth |= ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+
+ capabilities_sym = capabilities_sym_cipher | capabilities_sym_auth;
+
+ if (capabilities_sym)
+ capabilities_sym |= ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+
+ capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_SM2 | ICP_ACCEL_CAPABILITIES_ECEDMONT;
+
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
}
- return capabilities;
+ capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+ if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+ }
+
+ return capabilities_sym | capabilities_dc | capabilities_asym |
+ capabilities_other;
}
static u32
@@ -516,8 +536,8 @@ adf_exit_accel_units(struct adf_accel_dev *accel_dev)
}
static const char *
-get_obj_name(struct adf_accel_dev *accel_dev,
- enum adf_accel_unit_services service)
+get_obj_name_4xxx(struct adf_accel_dev *accel_dev,
+ enum adf_accel_unit_services service)
{
switch (service) {
case ADF_ACCEL_ASYM:
@@ -533,6 +553,24 @@ get_obj_name(struct adf_accel_dev *accel_dev,
}
}
+static const char *
+get_obj_name_402xx(struct adf_accel_dev *accel_dev,
+ enum adf_accel_unit_services service)
+{
+ switch (service) {
+ case ADF_ACCEL_ASYM:
+ return ADF_402XX_ASYM_OBJ;
+ case ADF_ACCEL_CRYPTO:
+ return ADF_402XX_SYM_OBJ;
+ case ADF_ACCEL_COMPRESSION:
+ return ADF_402XX_DC_OBJ;
+ case ADF_ACCEL_ADMIN:
+ return ADF_402XX_ADMIN_OBJ;
+ default:
+ return NULL;
+ }
+}
+
static uint32_t
get_objs_num(struct adf_accel_dev *accel_dev)
{
@@ -709,6 +747,10 @@ adf_4xxx_send_admin_init(struct adf_accel_dev *accel_dev)
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
req.cmd_id = ICP_QAT_FW_INIT_ME;
+#ifdef QAT_DISABLE_SAFE_DC_MODE
+ if (accel_dev->disable_safe_dc_mode)
+ req.fw_flags = ICP_QAT_FW_INIT_DISABLE_SAFE_DC_MODE_FLAG;
+#endif /* QAT_DISABLE_SAFE_DC_MODE */
if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) {
device_printf(GET_DEV(accel_dev),
"Error sending init message\n");
@@ -958,8 +1000,23 @@ adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id)
hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
hw_data->get_sku = get_sku;
hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE;
- hw_data->fw_name = ADF_4XXX_FW;
- hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ switch (id) {
+ case ADF_402XX_PCI_DEVICE_ID:
+ hw_data->fw_name = ADF_402XX_FW;
+ hw_data->fw_mmp_name = ADF_402XX_MMP;
+ hw_data->asym_ae_active_thd_mask = DEFAULT_4XXX_ASYM_AE_MASK;
+ break;
+ case ADF_401XX_PCI_DEVICE_ID:
+ hw_data->fw_name = ADF_4XXX_FW;
+ hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ hw_data->asym_ae_active_thd_mask = DEFAULT_401XX_ASYM_AE_MASK;
+ break;
+
+ default:
+ hw_data->fw_name = ADF_4XXX_FW;
+ hw_data->fw_mmp_name = ADF_4XXX_MMP;
+ hw_data->asym_ae_active_thd_mask = DEFAULT_4XXX_ASYM_AE_MASK;
+ }
hw_data->init_admin_comms = adf_init_admin_comms;
hw_data->exit_admin_comms = adf_exit_admin_comms;
hw_data->send_admin_init = adf_4xxx_send_admin_init;
@@ -978,7 +1035,13 @@ adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id)
hw_data->get_ring_svc_map_data = get_ring_svc_map_data;
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
hw_data->get_objs_num = get_objs_num;
- hw_data->get_obj_name = get_obj_name;
+ switch (id) {
+ case ADF_402XX_PCI_DEVICE_ID:
+ hw_data->get_obj_name = get_obj_name_402xx;
+ break;
+ default:
+ hw_data->get_obj_name = get_obj_name_4xxx;
+ }
hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask;
hw_data->get_service_type = adf_4xxx_get_service_type;
hw_data->set_msix_rttable = set_msix_default_rttable;
@@ -989,21 +1052,15 @@ adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id)
hw_data->get_hb_clock = get_hb_clock;
hw_data->int_timer_init = adf_int_timer_init;
hw_data->int_timer_exit = adf_int_timer_exit;
+ hw_data->pre_reset = adf_dev_pre_reset;
+ hw_data->post_reset = adf_dev_post_reset;
+ hw_data->disable_arb = adf_disable_arb;
hw_data->get_heartbeat_status = adf_get_heartbeat_status;
hw_data->get_ae_clock = get_ae_clock;
hw_data->measure_clock = measure_clock;
hw_data->query_storage_cap = 1;
hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
- switch (id) {
- case ADF_401XX_PCI_DEVICE_ID:
- hw_data->asym_ae_active_thd_mask = DEFAULT_401XX_ASYM_AE_MASK;
- break;
- case ADF_4XXX_PCI_DEVICE_ID:
- default:
- hw_data->asym_ae_active_thd_mask = DEFAULT_4XXX_ASYM_AE_MASK;
- }
-
adf_gen4_init_hw_csr_info(&hw_data->csr_info);
adf_gen4_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
}
diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h
index b6a5da92fdb5..fa7249dca596 100644
--- a/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h
+++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007 - 2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef ADF_4XXX_HW_DATA_H_
#define ADF_4XXX_HW_DATA_H_
@@ -87,6 +87,12 @@
#define ADF_4XXX_SYM_OBJ "qat_4xxx_sym.bin"
#define ADF_4XXX_ASYM_OBJ "qat_4xxx_asym.bin"
#define ADF_4XXX_ADMIN_OBJ "qat_4xxx_admin.bin"
+#define ADF_402XX_FW "qat_402xx_fw"
+#define ADF_402XX_MMP "qat_402xx_mmp_fw"
+#define ADF_402XX_DC_OBJ "qat_402xx_dc.bin"
+#define ADF_402XX_SYM_OBJ "qat_402xx_sym.bin"
+#define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin"
+#define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin"
/* Only 3 types of images can be loaded including the admin image */
#define ADF_4XXX_MAX_OBJ 3
@@ -105,7 +111,7 @@ enum icp_qat_4xxx_slice_mask {
ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3),
ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4),
ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5),
- ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(6),
+ ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(7),
};
void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id);
diff --git a/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c b/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c
index f4a673e25a40..f9ad39fa45f0 100644
--- a/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c
+++ b/sys/dev/qat/qat_hw/qat_4xxx/adf_drv.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007 - 2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
@@ -8,13 +8,12 @@
#include "adf_gen4_hw_data.h"
#include "adf_fw_counters.h"
#include "adf_cfg_device.h"
+#include "adf_dbgfs.h"
#include <sys/types.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <machine/bus_dma.h>
#include <dev/pci/pcireg.h>
-#include "adf_heartbeat_dbg.h"
-#include "adf_cnvnr_freq_counters.h"
static MALLOC_DEFINE(M_QAT_4XXX, "qat_4xxx", "qat_4xxx");
@@ -23,12 +22,14 @@ static MALLOC_DEFINE(M_QAT_4XXX, "qat_4xxx", "qat_4xxx");
PCI_VENDOR_ID_INTEL, device_id \
}
-static const struct pci_device_id adf_pci_tbl[] =
- { ADF_SYSTEM_DEVICE(ADF_4XXX_PCI_DEVICE_ID),
- ADF_SYSTEM_DEVICE(ADF_401XX_PCI_DEVICE_ID),
- {
- 0,
- } };
+static const struct pci_device_id adf_pci_tbl[] = {
+ ADF_SYSTEM_DEVICE(ADF_4XXX_PCI_DEVICE_ID),
+ ADF_SYSTEM_DEVICE(ADF_401XX_PCI_DEVICE_ID),
+ ADF_SYSTEM_DEVICE(ADF_402XX_PCI_DEVICE_ID),
+ {
+ 0,
+ }
+};
static int
adf_probe(device_t dev)
@@ -47,6 +48,74 @@ adf_probe(device_t dev)
return ENXIO;
}
+#ifdef QAT_DISABLE_SAFE_DC_MODE
+static int adf_4xxx_sysctl_disable_safe_dc_mode(SYSCTL_HANDLER_ARGS)
+{
+ struct adf_accel_dev *accel_dev = arg1;
+ int error, value = accel_dev->disable_safe_dc_mode;
+
+ error = sysctl_handle_int(oidp, &value, 0, req);
+ if (error || !req->newptr)
+ return error;
+
+ if (value != 1 && value != 0)
+ return EINVAL;
+
+ if (adf_dev_started(accel_dev)) {
+ device_printf(
+ GET_DEV(accel_dev),
+ "QAT: configuration can only be changed in \"down\" device state\n");
+ return EBUSY;
+ }
+
+ accel_dev->disable_safe_dc_mode = (u8)value;
+
+ return 0;
+}
+
+static void
+adf_4xxx_disable_safe_dc_sysctl_add(struct adf_accel_dev *accel_dev)
+{
+ struct sysctl_ctx_list *qat_sysctl_ctx;
+ struct sysctl_oid *qat_sysctl_tree;
+
+ qat_sysctl_ctx =
+ device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
+ qat_sysctl_tree =
+ device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev);
+ accel_dev->safe_dc_mode =
+ SYSCTL_ADD_OID(qat_sysctl_ctx,
+ SYSCTL_CHILDREN(qat_sysctl_tree),
+ OID_AUTO,
+ "disable_safe_dc_mode",
+ CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_TUN |
+ CTLFLAG_SKIP,
+ accel_dev,
+ 0,
+ adf_4xxx_sysctl_disable_safe_dc_mode,
+ "LU",
+ "Disable QAT safe data compression mode");
+}
+
+static void
+adf_4xxx_disable_safe_dc_sysctl_remove(struct adf_accel_dev *accel_dev)
+{
+ int ret;
+ struct sysctl_ctx_list *qat_sysctl_ctx =
+ device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
+
+ ret = sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->safe_dc_mode);
+ if (ret) {
+ device_printf(GET_DEV(accel_dev), "Failed to delete entry\n");
+ } else {
+ ret = sysctl_remove_oid(accel_dev->safe_dc_mode, 1, 1);
+ if (ret)
+ device_printf(GET_DEV(accel_dev),
+ "Failed to delete oid\n");
+ }
+}
+#endif /* QAT_DISABLE_SAFE_DC_MODE */
+
static void
adf_cleanup_accel(struct adf_accel_dev *accel_dev)
{
@@ -68,6 +137,7 @@ adf_cleanup_accel(struct adf_accel_dev *accel_dev)
switch (pci_get_device(accel_pci_dev->pci_dev)) {
case ADF_4XXX_PCI_DEVICE_ID:
case ADF_401XX_PCI_DEVICE_ID:
+ case ADF_402XX_PCI_DEVICE_ID:
adf_clean_hw_data_4xxx(accel_dev->hw_device);
break;
default:
@@ -76,6 +146,10 @@ adf_cleanup_accel(struct adf_accel_dev *accel_dev)
free(accel_dev->hw_device, M_QAT_4XXX);
accel_dev->hw_device = NULL;
}
+#ifdef QAT_DISABLE_SAFE_DC_MODE
+ adf_4xxx_disable_safe_dc_sysctl_remove(accel_dev);
+#endif /* QAT_DISABLE_SAFE_DC_MODE */
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
adf_devmgr_rm_dev(accel_dev, NULL);
}
@@ -87,7 +161,7 @@ adf_attach(device_t dev)
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
unsigned int bar_nr;
- int ret, rid;
+ int ret = 0, rid;
struct adf_cfg_device *cfg_dev = NULL;
/* Set pci MaxPayLoad to 512. Implemented to avoid the issue of
@@ -99,6 +173,7 @@ adf_attach(device_t dev)
accel_dev = device_get_softc(dev);
+ mutex_init(&accel_dev->lock);
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = dev;
@@ -109,9 +184,10 @@ adf_attach(device_t dev)
/* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called
*/
- if (adf_devmgr_add_dev(accel_dev, NULL)) {
+ ret = adf_devmgr_add_dev(accel_dev, NULL);
+ if (ret) {
device_printf(dev, "Failed to add new accelerator device.\n");
- return ENXIO;
+ goto out_err_lock;
}
/* Allocate and configure device configuration structure */
@@ -121,11 +197,6 @@ adf_attach(device_t dev)
adf_init_hw_data_4xxx(accel_dev->hw_device, pci_get_device(dev));
accel_pci_dev->revid = pci_get_revid(dev);
hw_data->fuses = pci_read_config(dev, ADF_4XXX_FUSECTL4_OFFSET, 4);
- if (accel_pci_dev->revid == 0x00) {
- device_printf(dev, "A0 stepping is not supported.\n");
- ret = ENODEV;
- goto out_err;
- }
/* Get PPAERUCM values and store */
ret = adf_aer_store_ppaerucm_reg(dev, hw_data);
@@ -153,6 +224,10 @@ adf_attach(device_t dev)
if (ret)
goto out_err;
+#ifdef QAT_DISABLE_SAFE_DC_MODE
+ adf_4xxx_disable_safe_dc_sysctl_add(accel_dev);
+#endif /* QAT_DISABLE_SAFE_DC_MODE */
+
pci_set_max_read_req(dev, 4096);
ret = bus_dma_tag_create(bus_get_dma_tag(dev),
@@ -203,16 +278,20 @@ adf_attach(device_t dev)
bar->base_addr = rman_get_start(bar->virt_addr);
bar->size = rman_get_size(bar->virt_addr);
}
- pci_enable_busmaster(dev);
+ ret = pci_enable_busmaster(dev);
+ if (ret)
+ goto out_err;
+
+ adf_dbgfs_init(accel_dev);
if (!accel_dev->hw_device->config_device) {
ret = EFAULT;
- goto out_err;
+ goto out_err_disable;
}
ret = accel_dev->hw_device->config_device(accel_dev);
if (ret)
- goto out_err;
+ goto out_err_disable;
ret = adf_dev_init(accel_dev);
if (ret)
@@ -231,8 +310,13 @@ out_dev_stop:
adf_dev_stop(accel_dev);
out_dev_shutdown:
adf_dev_shutdown(accel_dev);
+out_err_disable:
+ pci_disable_busmaster(dev);
out_err:
adf_cleanup_accel(accel_dev);
+out_err_lock:
+ mutex_destroy(&accel_dev->lock);
+
return ret;
}
@@ -248,7 +332,9 @@ adf_detach(device_t dev)
adf_dev_shutdown(accel_dev);
+ pci_disable_busmaster(dev);
adf_cleanup_accel(accel_dev);
+ mutex_destroy(&accel_dev->lock);
return 0;
}
diff --git a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_4xxxvf_hw_data.c b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_4xxxvf_hw_data.c
index 27e5ef8162ab..37de24ba9e23 100644
--- a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_4xxxvf_hw_data.c
+++ b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_4xxxvf_hw_data.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include <adf_accel_devices.h>
#include <adf_cfg.h>
#include <adf_common_drv.h>
@@ -133,45 +133,74 @@ adf_4xxxvf_get_hw_cap(struct adf_accel_dev *accel_dev)
{
device_t pdev = accel_dev->accel_pci_dev.pci_dev;
u32 vffusectl1;
- u32 capabilities;
-
- capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC +
- ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC +
- ICP_ACCEL_CAPABILITIES_CIPHER +
- ICP_ACCEL_CAPABILITIES_AUTHENTICATION +
- ICP_ACCEL_CAPABILITIES_COMPRESSION +
- ICP_ACCEL_CAPABILITIES_SHA3_EXT + ICP_ACCEL_CAPABILITIES_SM2 +
- ICP_ACCEL_CAPABILITIES_SM3 + ICP_ACCEL_CAPABILITIES_SM4 +
- ICP_ACCEL_CAPABILITIES_CHACHA_POLY +
- ICP_ACCEL_CAPABILITIES_AESGCM_SPC +
- ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 +
- ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION +
- ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+ u32 capabilities_sym, capabilities_sym_cipher, capabilities_sym_auth,
+ capabilities_asym, capabilities_dc;
/* Get fused capabilities */
vffusectl1 = pci_read_config(pdev, ADF_4XXXIOV_VFFUSECTL1_OFFSET, 4);
- if (vffusectl1 & BIT(7)) {
- capabilities &=
- ~(ICP_ACCEL_CAPABILITIES_SM3 + ICP_ACCEL_CAPABILITIES_SM4);
+ capabilities_sym_cipher = ICP_ACCEL_CAPABILITIES_HKDF |
+ ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
+ ICP_ACCEL_CAPABILITIES_AESGCM_SPC | ICP_ACCEL_CAPABILITIES_AES_V2;
+ capabilities_sym_auth = ICP_ACCEL_CAPABILITIES_SM3 |
+ ICP_ACCEL_CAPABILITIES_SHA3 | ICP_ACCEL_CAPABILITIES_SHA3_EXT;
+
+ /* A set bit in vffusectl1 means the feature is OFF in this SKU */
+ if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_CIPHER_SLICE) {
+ capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_HKDF;
+ capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_SM4;
}
- if (vffusectl1 & BIT(6)) {
- capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3;
+
+ if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_UCS_SLICE) {
+ capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
+ capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
+ capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
}
- if (vffusectl1 & BIT(3)) {
- capabilities &= ~(ICP_ACCEL_CAPABILITIES_COMPRESSION +
- ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64);
+
+ if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_AUTH_SLICE) {
+ capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SM3;
+ capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SHA3;
+ capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
}
- if (vffusectl1 & BIT(2)) {
- capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+
+ if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_SMX_SLICE) {
+ capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_SM4;
+ capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SM3;
}
- if (vffusectl1 & BIT(1)) {
- capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+
+ if (capabilities_sym_cipher)
+ capabilities_sym_cipher |= ICP_ACCEL_CAPABILITIES_CIPHER;
+
+ if (capabilities_sym_auth)
+ capabilities_sym_auth |= ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
+
+ capabilities_sym = capabilities_sym_cipher | capabilities_sym_auth;
+
+ if (capabilities_sym)
+ capabilities_sym |= ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
+
+ capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_SM2 | ICP_ACCEL_CAPABILITIES_ECEDMONT;
+
+ if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_PKE_SLICE) {
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
+ capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
}
- if (vffusectl1 & BIT(0)) {
- capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
+
+ capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
+ ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
+
+ if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_COMPRESS_SLICE) {
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
+ capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
}
- return capabilities;
+
+ return capabilities_sym | capabilities_dc | capabilities_asym;
}
static void
@@ -265,6 +294,9 @@ get_ring_to_svc_map(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map)
char val[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
u32 i = 0;
+ if (accel_dev->hw_device->get_ring_to_svc_done)
+ return 0;
+
/* Get the services enabled by user if provided.
* The function itself will also be called during the driver probe
* procedure where no ServicesEnable is provided. Then the device
diff --git a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_4xxxvf_hw_data.h b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_4xxxvf_hw_data.h
index 83c034f543c9..a702fc69dde7 100644
--- a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_4xxxvf_hw_data.h
+++ b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_4xxxvf_hw_data.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef ADF_4XXXVF_HW_DATA_H_
#define ADF_4XXXVF_HW_DATA_H_
@@ -27,6 +27,18 @@
#define ADF_4XXXIOV_VFFUSECTL4_OFFSET (0x1C4)
#define ADF_4XXXIOV_VFFUSECTL5_OFFSET (0x1C8)
+/*qat_4xxxvf fuse bits are same as qat_4xxx*/
+enum icp_qat_4xxxvf_slice_mask {
+ ICP_ACCEL_4XXXVF_MASK_CIPHER_SLICE = 0x01,
+ ICP_ACCEL_4XXXVF_MASK_AUTH_SLICE = 0x02,
+ ICP_ACCEL_4XXXVF_MASK_PKE_SLICE = 0x04,
+ ICP_ACCEL_4XXXVF_MASK_COMPRESS_SLICE = 0x08,
+ ICP_ACCEL_4XXXVF_MASK_UCS_SLICE = 0x10,
+ ICP_ACCEL_4XXXVF_MASK_EIA3_SLICE = 0x20,
+ /*SM3&SM4 are indicated by same bit*/
+ ICP_ACCEL_4XXXVF_MASK_SMX_SLICE = 0x80,
+};
+
void adf_init_hw_data_4xxxiov(struct adf_hw_device_data *hw_data);
void adf_clean_hw_data_4xxxiov(struct adf_hw_device_data *hw_data);
u32 adf_4xxxvf_get_hw_cap(struct adf_accel_dev *accel_dev);
diff --git a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c
index 05a99ae43ab7..dbe40835ccbf 100644
--- a/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c
+++ b/sys/dev/qat/qat_hw/qat_4xxxvf/adf_drv.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
@@ -8,6 +8,7 @@
#include "adf_gen4_hw_data.h"
#include "adf_fw_counters.h"
#include "adf_cfg_device.h"
+#include "adf_dbgfs.h"
#include <sys/types.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
@@ -21,12 +22,14 @@ static MALLOC_DEFINE(M_QAT_4XXXVF, "qat_4xxxvf", "qat_4xxxvf");
PCI_VENDOR_ID_INTEL, device_id \
}
-static const struct pci_device_id adf_pci_tbl[] =
- { ADF_SYSTEM_DEVICE(ADF_4XXXIOV_PCI_DEVICE_ID),
- ADF_SYSTEM_DEVICE(ADF_401XXIOV_PCI_DEVICE_ID),
- {
- 0,
- } };
+static const struct pci_device_id adf_pci_tbl[] = {
+ ADF_SYSTEM_DEVICE(ADF_4XXXIOV_PCI_DEVICE_ID),
+ ADF_SYSTEM_DEVICE(ADF_401XXIOV_PCI_DEVICE_ID),
+ ADF_SYSTEM_DEVICE(ADF_402XXIOV_PCI_DEVICE_ID),
+ {
+ 0,
+ }
+};
static int
adf_probe(device_t dev)
@@ -75,6 +78,7 @@ adf_cleanup_accel(struct adf_accel_dev *accel_dev)
switch (pci_get_device(accel_pci_dev->pci_dev)) {
case ADF_4XXXIOV_PCI_DEVICE_ID:
case ADF_401XXIOV_PCI_DEVICE_ID:
+ case ADF_402XXIOV_PCI_DEVICE_ID:
adf_clean_hw_data_4xxxiov(accel_dev->hw_device);
break;
default:
@@ -83,6 +87,7 @@ adf_cleanup_accel(struct adf_accel_dev *accel_dev)
free(accel_dev->hw_device, M_QAT_4XXXVF);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
}
@@ -99,6 +104,7 @@ adf_attach(device_t dev)
struct adf_cfg_device *cfg_dev = NULL;
accel_dev = device_get_softc(dev);
+ mutex_init(&accel_dev->lock);
accel_dev->is_vf = true;
pf = adf_devmgr_pci_to_accel_dev(pci_find_pf(dev));
@@ -110,18 +116,15 @@ adf_attach(device_t dev)
accel_pci_dev->node = 0;
/* Add accel device to accel table */
- if (adf_devmgr_add_dev(accel_dev, pf)) {
+ ret = adf_devmgr_add_dev(accel_dev, pf);
+ if (ret) {
device_printf(GET_DEV(accel_dev),
"Failed to add new accelerator device.\n");
- return -EFAULT;
+ goto out_err_lock;
}
+
/* Allocate and configure device configuration structure */
hw_data = malloc(sizeof(*hw_data), M_QAT_4XXXVF, M_WAITOK | M_ZERO);
- if (!hw_data) {
- ret = -ENOMEM;
- goto out_err;
- }
-
accel_dev->hw_device = hw_data;
adf_init_hw_data_4xxxiov(accel_dev->hw_device);
accel_pci_dev->revid = pci_get_revid(dev);
@@ -155,6 +158,8 @@ adf_attach(device_t dev)
NULL,
NULL,
&accel_dev->dma_tag);
+ if (ret)
+ goto out_err;
hw_data->accel_capabilities_mask = adf_4xxxvf_get_hw_cap(accel_dev);
@@ -183,7 +188,11 @@ adf_attach(device_t dev)
bar->base_addr = rman_get_start(bar->virt_addr);
bar->size = rman_get_size(bar->virt_addr);
}
- pci_enable_busmaster(dev);
+ ret = pci_enable_busmaster(dev);
+ if (ret)
+ goto out_err;
+
+ adf_dbgfs_init(accel_dev);
/* Completion for VF2PF request/response message exchange */
init_completion(&accel_dev->u1.vf.msg_received);
@@ -191,7 +200,7 @@ adf_attach(device_t dev)
ret = hw_data->config_device(accel_dev);
if (ret)
- goto out_err;
+ goto out_err_disable;
ret = adf_dev_init(accel_dev);
if (!ret)
@@ -213,8 +222,13 @@ adf_attach(device_t dev)
return ret;
+out_err_disable:
+ pci_disable_busmaster(dev);
out_err:
adf_cleanup_accel(accel_dev);
+out_err_lock:
+ mutex_destroy(&accel_dev->lock);
+
return ret;
}
@@ -232,7 +246,9 @@ adf_detach(device_t dev)
clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
adf_dev_stop(accel_dev);
adf_dev_shutdown(accel_dev);
+ pci_disable_busmaster(dev);
adf_cleanup_accel(accel_dev);
+ mutex_destroy(&accel_dev->lock);
return 0;
}
diff --git a/sys/dev/qat/qat_hw/qat_c3xxx/adf_c3xxx_hw_data.h b/sys/dev/qat/qat_hw/qat_c3xxx/adf_c3xxx_hw_data.h
index bfc5db1f5e5c..486c46a1cac7 100644
--- a/sys/dev/qat/qat_hw/qat_c3xxx/adf_c3xxx_hw_data.h
+++ b/sys/dev/qat/qat_hw/qat_c3xxx/adf_c3xxx_hw_data.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#ifndef ADF_C3XXX_HW_DATA_H_
#define ADF_C3XXX_HW_DATA_H_
diff --git a/sys/dev/qat/qat_hw/qat_c3xxx/adf_drv.c b/sys/dev/qat/qat_hw/qat_c3xxx/adf_drv.c
index 74e9e2292623..b91daefb45d4 100644
--- a/sys/dev/qat/qat_hw/qat_c3xxx/adf_drv.c
+++ b/sys/dev/qat/qat_hw/qat_c3xxx/adf_drv.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
@@ -7,13 +7,12 @@
#include "adf_c3xxx_hw_data.h"
#include "adf_fw_counters.h"
#include "adf_cfg_device.h"
+#include "adf_dbgfs.h"
#include <sys/types.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <machine/bus_dma.h>
#include <dev/pci/pcireg.h>
-#include "adf_heartbeat_dbg.h"
-#include "adf_cnvnr_freq_counters.h"
static MALLOC_DEFINE(M_QAT_C3XXX, "qat_c3xxx", "qat_c3xxx");
@@ -73,6 +72,7 @@ adf_cleanup_accel(struct adf_accel_dev *accel_dev)
free(accel_dev->hw_device, M_QAT_C3XXX);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
adf_devmgr_rm_dev(accel_dev, NULL);
}
@@ -84,7 +84,7 @@ adf_attach(device_t dev)
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
unsigned int i, bar_nr;
- int ret, rid;
+ int ret = 0, rid;
struct adf_cfg_device *cfg_dev = NULL;
/* Set pci MaxPayLoad to 256. Implemented to avoid the issue of
@@ -95,6 +95,7 @@ adf_attach(device_t dev)
accel_dev = device_get_softc(dev);
+ mutex_init(&accel_dev->lock);
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = dev;
@@ -106,9 +107,10 @@ adf_attach(device_t dev)
/* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called */
- if (adf_devmgr_add_dev(accel_dev, NULL)) {
+ ret = adf_devmgr_add_dev(accel_dev, NULL);
+ if (ret) {
device_printf(dev, "Failed to add new accelerator device.\n");
- return ENXIO;
+ goto out_err_lock;
}
/* Allocate and configure device configuration structure */
@@ -202,16 +204,20 @@ adf_attach(device_t dev)
bar->base_addr = rman_get_start(bar->virt_addr);
bar->size = rman_get_size(bar->virt_addr);
}
- pci_enable_busmaster(dev);
+ ret = pci_enable_busmaster(dev);
+ if (ret)
+ goto out_err;
+
+ adf_dbgfs_init(accel_dev);
if (!accel_dev->hw_device->config_device) {
ret = EFAULT;
- goto out_err;
+ goto out_err_disable;
}
ret = accel_dev->hw_device->config_device(accel_dev);
if (ret)
- goto out_err;
+ goto out_err_disable;
ret = adf_dev_init(accel_dev);
if (ret)
@@ -230,8 +236,13 @@ out_dev_stop:
adf_dev_stop(accel_dev);
out_dev_shutdown:
adf_dev_shutdown(accel_dev);
+out_err_disable:
+ pci_disable_busmaster(dev);
out_err:
adf_cleanup_accel(accel_dev);
+out_err_lock:
+ mutex_destroy(&accel_dev->lock);
+
return ret;
}
@@ -247,7 +258,9 @@ adf_detach(device_t dev)
adf_dev_shutdown(accel_dev);
+ pci_disable_busmaster(dev);
adf_cleanup_accel(accel_dev);
+ mutex_destroy(&accel_dev->lock);
return 0;
}
diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ae_config.c b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ae_config.c
index e68d0bca80fc..a2bb36727fd4 100644
--- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ae_config.c
+++ b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ae_config.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "adf_c4xxx_hw_data.h"
#include <linux/kernel.h>
#include <linux/types.h>
@@ -9,6 +9,7 @@
#include <linux/io.h>
#include <sys/sbuf.h>
#include <sys/sysctl.h>
+#include <sys/priv.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_cfg.h>
@@ -59,6 +60,10 @@ static int adf_ae_config_show(SYSCTL_HANDLER_ARGS)
u8 ae_index;
u8 num_aes;
int ret = 0;
+
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
u32 num_au = hw_data->get_num_accel_units(hw_data);
sbuf_new_for_sysctl(&sb, NULL, 2048, req);
diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_hw_data.c b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_hw_data.c
index 36bdbe82d5a9..d2969c1b06ed 100644
--- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_hw_data.c
+++ b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_hw_data.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <adf_accel_devices.h>
@@ -858,10 +858,10 @@ update_hw_capability(struct adf_accel_dev *accel_dev)
if (!au_info->asym_ae_msk)
disabled_caps = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
- ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
-
+ ICP_ACCEL_CAPABILITIES_ECEDMONT;
if (!au_info->sym_ae_msk)
disabled_caps |= ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
+ ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_ZUC |
ICP_ACCEL_CAPABILITIES_SHA3_EXT |
ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 |
diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_misc_error_stats.c b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_misc_error_stats.c
index 3821e60df746..4fdbec791ce6 100644
--- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_misc_error_stats.c
+++ b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_misc_error_stats.c
@@ -1,11 +1,12 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "adf_c4xxx_hw_data.h"
#include "adf_c4xxx_misc_error_stats.h"
#include "adf_common_drv.h"
#include "adf_cfg_common.h"
#include <sys/sbuf.h>
#include <sys/sysctl.h>
+#include <sys/priv.h>
#define MISC_ERROR_DBG_FILE "misc_error_stats"
#define LINE \
@@ -23,6 +24,9 @@ static int qat_misc_error_show(SYSCTL_HANDLER_ARGS)
{
struct sbuf sb;
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
sbuf_new_for_sysctl(&sb, NULL, 256, req);
sbuf_printf(&sb, "\n");
sbuf_printf(&sb, LINE);
diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_pke_replay_stats.c b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_pke_replay_stats.c
index 61a879900f9c..06145a3d7906 100644
--- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_pke_replay_stats.c
+++ b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_pke_replay_stats.c
@@ -1,11 +1,12 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "adf_c4xxx_hw_data.h"
#include "adf_c4xxx_pke_replay_stats.h"
#include "adf_common_drv.h"
#include "icp_qat_fw_init_admin.h"
#include <sys/sbuf.h>
#include <sys/sysctl.h>
+#include <sys/priv.h>
#define PKE_REPLAY_DBG_FILE "pke_replay_stats"
#define LINE \
@@ -21,6 +22,9 @@ static int qat_pke_replay_counters_show(SYSCTL_HANDLER_ARGS)
u64 suc_counter = 0;
u64 unsuc_counter = 0;
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
sbuf_new_for_sysctl(&sb, NULL, 256, req);
sbuf_printf(&sb, "\n");
diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ras.c b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ras.c
index d7cf8e350fa4..af4c6d123c84 100644
--- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ras.c
+++ b/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_ras.c
@@ -1,10 +1,11 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "adf_c4xxx_ras.h"
#include "adf_accel_devices.h"
#include "adf_c4xxx_hw_data.h"
#include <adf_dev_err.h>
#include "adf_c4xxx_inline.h"
+#include <sys/priv.h>
#define ADF_RAS_STR_LEN 64
@@ -13,6 +14,9 @@ static int adf_sysctl_read_ras_correctable(SYSCTL_HANDLER_ARGS)
struct adf_accel_dev *accel_dev = arg1;
unsigned long counter = 0;
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
if (accel_dev->ras_counters)
counter = atomic_read(&accel_dev->ras_counters[ADF_RAS_CORR]);
@@ -24,6 +28,9 @@ static int adf_sysctl_read_ras_uncorrectable(SYSCTL_HANDLER_ARGS)
struct adf_accel_dev *accel_dev = arg1;
unsigned long counter = 0;
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
if (accel_dev->ras_counters)
counter = atomic_read(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
@@ -35,6 +42,9 @@ static int adf_sysctl_read_ras_fatal(SYSCTL_HANDLER_ARGS)
struct adf_accel_dev *accel_dev = arg1;
unsigned long counter = 0;
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
if (accel_dev->ras_counters)
counter = atomic_read(&accel_dev->ras_counters[ADF_RAS_FATAL]);
@@ -47,6 +57,9 @@ static int adf_sysctl_write_ras_reset(SYSCTL_HANDLER_ARGS)
int value = 0;
int ret = SYSCTL_IN(req, &value, sizeof(value));
+ if (priv_check(curthread, PRIV_DRIVER) != 0)
+ return EPERM;
+
if (!ret && value != 0 && accel_dev->ras_counters) {
}
diff --git a/sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c b/sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c
index 0e206e960a6c..f333101cce50 100644
--- a/sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c
+++ b/sys/dev/qat/qat_hw/qat_c4xxx/adf_drv.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
@@ -7,13 +7,12 @@
#include "adf_c4xxx_hw_data.h"
#include "adf_fw_counters.h"
#include "adf_cfg_device.h"
+#include "adf_dbgfs.h"
#include <sys/types.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <machine/bus_dma.h>
#include <dev/pci/pcireg.h>
-#include "adf_heartbeat_dbg.h"
-#include "adf_cnvnr_freq_counters.h"
static MALLOC_DEFINE(M_QAT_C4XXX, "qat_c4xx", "qat_c4xx");
@@ -73,6 +72,7 @@ adf_cleanup_accel(struct adf_accel_dev *accel_dev)
free(accel_dev->hw_device, M_QAT_C4XXX);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
adf_devmgr_rm_dev(accel_dev, NULL);
}
@@ -84,7 +84,7 @@ adf_attach(device_t dev)
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
unsigned int i, bar_nr;
- int ret, rid;
+ int ret = 0, rid;
struct adf_cfg_device *cfg_dev = NULL;
/* Set pci MaxPayLoad to 256. Implemented to avoid the issue of
@@ -96,6 +96,7 @@ adf_attach(device_t dev)
accel_dev = device_get_softc(dev);
+ mutex_init(&accel_dev->lock);
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = dev;
@@ -108,9 +109,10 @@ adf_attach(device_t dev)
/* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called
*/
- if (adf_devmgr_add_dev(accel_dev, NULL)) {
+ ret = adf_devmgr_add_dev(accel_dev, NULL);
+ if (ret) {
device_printf(dev, "Failed to add new accelerator device.\n");
- return ENXIO;
+ goto out_err_lock;
}
/* Allocate and configure device configuration structure */
@@ -201,16 +203,20 @@ adf_attach(device_t dev)
bar->base_addr = rman_get_start(bar->virt_addr);
bar->size = rman_get_start(bar->virt_addr);
}
- pci_enable_busmaster(dev);
+ ret = pci_enable_busmaster(dev);
+ if (ret)
+ goto out_err;
+
+ adf_dbgfs_init(accel_dev);
if (!accel_dev->hw_device->config_device) {
ret = EFAULT;
- goto out_err;
+ goto out_err_disable;
}
ret = accel_dev->hw_device->config_device(accel_dev);
if (ret)
- goto out_err;
+ goto out_err_disable;
ret = adf_dev_init(accel_dev);
if (ret)
@@ -229,8 +235,13 @@ out_dev_stop:
adf_dev_stop(accel_dev);
out_dev_shutdown:
adf_dev_shutdown(accel_dev);
+out_err_disable:
+ pci_disable_busmaster(dev);
out_err:
adf_cleanup_accel(accel_dev);
+out_err_lock:
+ mutex_destroy(&accel_dev->lock);
+
return ret;
}
@@ -246,7 +257,9 @@ adf_detach(device_t dev)
adf_dev_shutdown(accel_dev);
+ pci_disable_busmaster(dev);
adf_cleanup_accel(accel_dev);
+ mutex_destroy(&accel_dev->lock);
return 0;
}
diff --git a/sys/dev/qat/qat_hw/qat_c62x/adf_drv.c b/sys/dev/qat/qat_hw/qat_c62x/adf_drv.c
index 4035a8385bd5..955f6c48a071 100644
--- a/sys/dev/qat/qat_hw/qat_c62x/adf_drv.c
+++ b/sys/dev/qat/qat_hw/qat_c62x/adf_drv.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
@@ -7,13 +7,12 @@
#include "adf_c62x_hw_data.h"
#include "adf_fw_counters.h"
#include "adf_cfg_device.h"
+#include "adf_dbgfs.h"
#include <sys/types.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <machine/bus_dma.h>
#include <dev/pci/pcireg.h>
-#include "adf_heartbeat_dbg.h"
-#include "adf_cnvnr_freq_counters.h"
static MALLOC_DEFINE(M_QAT_C62X, "qat_c62x", "qat_c62x");
@@ -73,6 +72,7 @@ adf_cleanup_accel(struct adf_accel_dev *accel_dev)
free(accel_dev->hw_device, M_QAT_C62X);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
adf_devmgr_rm_dev(accel_dev, NULL);
}
@@ -84,7 +84,7 @@ adf_attach(device_t dev)
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
unsigned int i, bar_nr;
- int ret, rid;
+ int ret = 0, rid;
struct adf_cfg_device *cfg_dev = NULL;
/* Set pci MaxPayLoad to 256. Implemented to avoid the issue of
@@ -95,6 +95,7 @@ adf_attach(device_t dev)
accel_dev = device_get_softc(dev);
+ mutex_init(&accel_dev->lock);
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = dev;
@@ -106,9 +107,10 @@ adf_attach(device_t dev)
/* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called */
- if (adf_devmgr_add_dev(accel_dev, NULL)) {
+ ret = adf_devmgr_add_dev(accel_dev, NULL);
+ if (ret) {
device_printf(dev, "Failed to add new accelerator device.\n");
- return ENXIO;
+ goto out_err_lock;
}
/* Allocate and configure device configuration structure */
@@ -203,16 +205,20 @@ adf_attach(device_t dev)
bar->base_addr = rman_get_start(bar->virt_addr);
bar->size = rman_get_size(bar->virt_addr);
}
- pci_enable_busmaster(dev);
+ ret = pci_enable_busmaster(dev);
+ if (ret)
+ goto out_err;
+
+ adf_dbgfs_init(accel_dev);
if (!accel_dev->hw_device->config_device) {
ret = EFAULT;
- goto out_err;
+ goto out_err_disable;
}
ret = accel_dev->hw_device->config_device(accel_dev);
if (ret)
- goto out_err;
+ goto out_err_disable;
ret = adf_dev_init(accel_dev);
if (ret)
@@ -231,8 +237,13 @@ out_dev_stop:
adf_dev_stop(accel_dev);
out_dev_shutdown:
adf_dev_shutdown(accel_dev);
+out_err_disable:
+ pci_disable_busmaster(dev);
out_err:
adf_cleanup_accel(accel_dev);
+out_err_lock:
+ mutex_destroy(&accel_dev->lock);
+
return ret;
}
@@ -248,7 +259,9 @@ adf_detach(device_t dev)
adf_dev_shutdown(accel_dev);
+ pci_disable_busmaster(dev);
adf_cleanup_accel(accel_dev);
+ mutex_destroy(&accel_dev->lock);
return 0;
}
diff --git a/sys/dev/qat/qat_hw/qat_dh895xcc/adf_drv.c b/sys/dev/qat/qat_hw/qat_dh895xcc/adf_drv.c
index f7a31da23ba9..a74de5eb592d 100644
--- a/sys/dev/qat/qat_hw/qat_dh895xcc/adf_drv.c
+++ b/sys/dev/qat/qat_hw/qat_dh895xcc/adf_drv.c
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2007-2022 Intel Corporation */
+/* Copyright(c) 2007-2025 Intel Corporation */
#include "qat_freebsd.h"
#include "adf_cfg.h"
#include "adf_common_drv.h"
@@ -7,13 +7,12 @@
#include "adf_dh895xcc_hw_data.h"
#include "adf_fw_counters.h"
#include "adf_cfg_device.h"
+#include "adf_dbgfs.h"
#include <sys/types.h>
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <machine/bus_dma.h>
#include <dev/pci/pcireg.h>
-#include "adf_heartbeat_dbg.h"
-#include "adf_cnvnr_freq_counters.h"
static MALLOC_DEFINE(M_QAT_DH895XCC, "qat_dh895xcc", "qat_dh895xcc");
@@ -73,6 +72,7 @@ adf_cleanup_accel(struct adf_accel_dev *accel_dev)
free(accel_dev->hw_device, M_QAT_DH895XCC);
accel_dev->hw_device = NULL;
}
+ adf_dbgfs_exit(accel_dev);
adf_cfg_dev_remove(accel_dev);
adf_devmgr_rm_dev(accel_dev, NULL);
}
@@ -84,7 +84,7 @@ adf_attach(device_t dev)
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
unsigned int i, bar_nr;
- int ret, rid;
+ int ret = 0, rid;
struct adf_cfg_device *cfg_dev = NULL;
/* Set pci MaxPayLoad to 256. Implemented to avoid the issue of
@@ -95,6 +95,7 @@ adf_attach(device_t dev)
accel_dev = device_get_softc(dev);
+ mutex_init(&accel_dev->lock);
INIT_LIST_HEAD(&accel_dev->crypto_list);
accel_pci_dev = &accel_dev->accel_pci_dev;
accel_pci_dev->pci_dev = dev;
@@ -104,9 +105,10 @@ adf_attach(device_t dev)
/* Add accel device to accel table.
* This should be called before adf_cleanup_accel is called */
- if (adf_devmgr_add_dev(accel_dev, NULL)) {
+ ret = adf_devmgr_add_dev(accel_dev, NULL);
+ if (ret) {
device_printf(dev, "Failed to add new accelerator device.\n");
- return ENXIO;
+ goto out_err_lock;
}
/* Allocate and configure device configuration structure */
@@ -191,16 +193,20 @@ adf_attach(device_t dev)
bar->base_addr = rman_get_start(bar->virt_addr);
bar->size = rman_get_size(bar->virt_addr);
}
- pci_enable_busmaster(dev);
+ ret = pci_enable_busmaster(dev);
+ if (ret)
+ goto out_err;
+
+ adf_dbgfs_init(accel_dev);
if (!accel_dev->hw_device->config_device) {
ret = EFAULT;
- goto out_err;
+ goto out_err_disable;
}
ret = accel_dev->hw_device->config_device(accel_dev);
if (ret)
- goto out_err;
+ goto out_err_disable;
ret = adf_dev_init(accel_dev);
if (ret)
@@ -219,8 +225,13 @@ out_dev_stop:
adf_dev_stop(accel_dev);
out_dev_shutdown:
adf_dev_shutdown(accel_dev);
+out_err_disable:
+ pci_disable_busmaster(dev);
out_err:
adf_cleanup_accel(accel_dev);
+out_err_lock:
+ mutex_destroy(&accel_dev->lock);
+
return ret;
}
@@ -236,7 +247,10 @@ adf_detach(device_t dev)
adf_dev_shutdown(accel_dev);
+ pci_disable_busmaster(dev);
adf_cleanup_accel(accel_dev);
+ mutex_destroy(&accel_dev->lock);
+
return 0;
}
diff --git a/sys/dev/qcom_dwc3/qcom_dwc3.c b/sys/dev/qcom_dwc3/qcom_dwc3.c
index 5fc93b871ad1..7352d658d326 100644
--- a/sys/dev/qcom_dwc3/qcom_dwc3.c
+++ b/sys/dev/qcom_dwc3/qcom_dwc3.c
@@ -58,7 +58,7 @@ static struct ofw_compat_data compat_data[] = {
struct qcom_dwc3_softc {
struct simplebus_softc sc;
device_t dev;
- clk_t clk_master;
+ clk_t clk_core;
clk_t clk_sleep;
clk_t clk_mock_utmi;
int type;
@@ -98,8 +98,8 @@ qcom_dwc3_attach(device_t dev)
sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
/* Mandatory clocks */
- if (clk_get_by_ofw_name(dev, 0, "master", &sc->clk_master) != 0) {
- device_printf(dev, "Cannot get master clock\n");
+ if (clk_get_by_ofw_name(dev, 0, "core", &sc->clk_core) != 0) {
+ device_printf(dev, "Cannot get core clock\n");
return (ENXIO);
}
@@ -121,10 +121,10 @@ qcom_dwc3_attach(device_t dev)
/*
* Now, iterate over the clocks and enable them.
*/
- err = clk_enable(sc->clk_master);
+ err = clk_enable(sc->clk_core);
if (err != 0) {
device_printf(dev, "Could not enable clock %s\n",
- clk_get_name(sc->clk_master));
+ clk_get_name(sc->clk_core));
return (ENXIO);
}
err = clk_enable(sc->clk_sleep);
@@ -156,7 +156,8 @@ qcom_dwc3_attach(device_t dev)
device_probe_and_attach(cdev);
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static device_method_t qcom_dwc3_methods[] = {
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma.c b/sys/dev/qcom_ess_edma/qcom_ess_edma.c
new file mode 100644
index 000000000000..990bfe5ee074
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma.c
@@ -0,0 +1,985 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/smp.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_desc.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_rx.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_tx.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_gmac.h>
+
+static int
+qcom_ess_edma_probe(device_t dev)
+{
+
+ if (! ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_is_compatible(dev, "qcom,ess-edma") == 0)
+ return (ENXIO);
+
+ device_set_desc(dev,
+ "Qualcomm Atheros IPQ4018/IPQ4019 Ethernet driver");
+ return (0);
+}
+
+static int
+qcom_ess_edma_release_intr(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_intr *intr)
+{
+
+ if (intr->irq_res == NULL)
+ return (0);
+
+ if (intr->irq_intr != NULL)
+ bus_teardown_intr(sc->sc_dev, intr->irq_res, intr->irq_intr);
+ if (intr->irq_res != NULL)
+ bus_release_resource(sc->sc_dev, SYS_RES_IRQ, intr->irq_rid,
+ intr->irq_res);
+
+ return (0);
+}
+
+static void
+qcom_ess_edma_tx_queue_xmit(struct qcom_ess_edma_softc *sc, int queue_id)
+{
+ struct qcom_ess_edma_tx_state *txs = &sc->sc_tx_state[queue_id];
+ int n = 0;
+ int ret;
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_TASK,
+ "%s: called; TX queue %d\n", __func__, queue_id);
+
+ EDMA_RING_LOCK_ASSERT(&sc->sc_tx_ring[queue_id]);
+
+ sc->sc_tx_ring[queue_id].stats.num_tx_xmit_defer++;
+
+ (void) atomic_cmpset_int(&txs->enqueue_is_running, 1, 0);
+
+ /* Don't do any work if the ring is empty */
+ if (buf_ring_empty(txs->br))
+ return;
+
+ /*
+ * The ring isn't empty, dequeue frames and hand
+ * them to the hardware; defer updating the
+ * transmit ring pointer until we're done.
+ */
+ while (! buf_ring_empty(txs->br)) {
+ if_t ifp;
+ struct qcom_ess_edma_gmac *gmac;
+ struct mbuf *m;
+
+ m = buf_ring_peek_clear_sc(txs->br);
+ if (m == NULL)
+ break;
+
+ ifp = m->m_pkthdr.rcvif;
+ gmac = if_getsoftc(ifp);
+
+ /*
+ * The only way we'll know if we have space is to
+ * to try and transmit it.
+ */
+ ret = qcom_ess_edma_tx_ring_frame(sc, queue_id, &m,
+ gmac->port_mask, gmac->vlan_id);
+ if (ret == 0) {
+ if_inc_counter(gmac->ifp, IFCOUNTER_OPACKETS, 1);
+ buf_ring_advance_sc(txs->br);
+ } else {
+ /* Put whatever we tried to transmit back */
+ if_inc_counter(gmac->ifp, IFCOUNTER_OERRORS, 1);
+ buf_ring_putback_sc(txs->br, m);
+ break;
+ }
+ n++;
+ }
+
+ /*
+ * Only push the updated descriptor ring stuff to the hardware
+ * if we actually queued something.
+ */
+ if (n != 0)
+ (void) qcom_ess_edma_tx_ring_frame_update(sc, queue_id);
+}
+
+/*
+ * Enqueued when a deferred TX needs to happen.
+ */
+static void
+qcom_ess_edma_tx_queue_xmit_task(void *arg, int npending)
+{
+ struct qcom_ess_edma_tx_state *txs = arg;
+ struct qcom_ess_edma_softc *sc = txs->sc;
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
+ "%s: called; TX queue %d\n", __func__, txs->queue_id);
+
+ EDMA_RING_LOCK(&sc->sc_tx_ring[txs->queue_id]);
+
+ sc->sc_tx_ring[txs->queue_id].stats.num_tx_xmit_task++;
+ qcom_ess_edma_tx_queue_xmit(sc, txs->queue_id);
+
+ EDMA_RING_UNLOCK(&sc->sc_tx_ring[txs->queue_id]);
+}
+
+/*
+ * Enqueued when a TX completion interrupt occurs.
+ */
+static void
+qcom_ess_edma_tx_queue_complete_task(void *arg, int npending)
+{
+ struct qcom_ess_edma_tx_state *txs = arg;
+ struct qcom_ess_edma_softc *sc = txs->sc;
+
+ /* Transmit queue */
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
+ "%s: called; TX queue %d\n", __func__, txs->queue_id);
+
+ EDMA_RING_LOCK(&sc->sc_tx_ring[txs->queue_id]);
+
+ /*
+ * Complete/free tx mbufs.
+ */
+ (void) qcom_ess_edma_tx_ring_complete(sc, txs->queue_id);
+
+ /*
+ * ACK the interrupt.
+ */
+ (void) qcom_ess_edma_hw_intr_tx_ack(sc, txs->queue_id);
+
+ /*
+ * Re-enable the interrupt.
+ */
+ (void) qcom_ess_edma_hw_intr_tx_intr_set_enable(sc, txs->queue_id,
+ true);
+
+ /*
+ * Do any pending TX work if there's any buffers in the ring.
+ */
+ if (! buf_ring_empty(txs->br))
+ qcom_ess_edma_tx_queue_xmit(sc, txs->queue_id);
+
+ EDMA_RING_UNLOCK(&sc->sc_tx_ring[txs->queue_id]);
+}
+
+static int
+qcom_ess_edma_setup_tx_state(struct qcom_ess_edma_softc *sc, int txq, int cpu)
+{
+ struct qcom_ess_edma_tx_state *txs;
+ struct qcom_ess_edma_desc_ring *ring;
+ cpuset_t mask;
+
+ txs = &sc->sc_tx_state[txq];
+ ring = &sc->sc_tx_ring[txq];
+
+ snprintf(txs->label, QCOM_ESS_EDMA_LABEL_SZ - 1, "txq%d_compl", txq);
+
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+
+ txs->queue_id = txq;
+ txs->sc = sc;
+ txs->completion_tq = taskqueue_create_fast(txs->label, M_NOWAIT,
+ taskqueue_thread_enqueue, &txs->completion_tq);
+#if 0
+ taskqueue_start_threads_cpuset(&txs->completion_tq, 1, PI_NET,
+ &mask, "%s", txs->label);
+#else
+ taskqueue_start_threads(&txs->completion_tq, 1, PI_NET,
+ "%s", txs->label);
+#endif
+
+ TASK_INIT(&txs->completion_task, 0,
+ qcom_ess_edma_tx_queue_complete_task, txs);
+ TASK_INIT(&txs->xmit_task, 0,
+ qcom_ess_edma_tx_queue_xmit_task, txs);
+
+ txs->br = buf_ring_alloc(EDMA_TX_BUFRING_SIZE, M_DEVBUF, M_WAITOK,
+ &ring->mtx);
+
+ return (0);
+}
+
+/*
+ * Free the transmit ring state.
+ *
+ * This assumes that the taskqueues have been drained and DMA has
+ * stopped - all we're doing here is freeing the allocated resources.
+ */
+static int
+qcom_ess_edma_free_tx_state(struct qcom_ess_edma_softc *sc, int txq)
+{
+ struct qcom_ess_edma_tx_state *txs;
+
+ txs = &sc->sc_tx_state[txq];
+
+ taskqueue_free(txs->completion_tq);
+
+ while (! buf_ring_empty(txs->br)) {
+ struct mbuf *m;
+
+ m = buf_ring_dequeue_sc(txs->br);
+ m_freem(m);
+ }
+
+ buf_ring_free(txs->br, M_DEVBUF);
+
+ return (0);
+}
+
+static void
+qcom_ess_edma_rx_queue_complete_task(void *arg, int npending)
+{
+ struct qcom_ess_edma_rx_state *rxs = arg;
+ struct qcom_ess_edma_softc *sc = rxs->sc;
+ struct mbufq mq;
+
+ mbufq_init(&mq, EDMA_RX_RING_SIZE);
+
+ /* Receive queue */
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
+ "%s: called; RX queue %d\n",
+ __func__, rxs->queue_id);
+
+ EDMA_RING_LOCK(&sc->sc_rx_ring[rxs->queue_id]);
+
+ /*
+ * Do receive work, get completed mbufs.
+ */
+ (void) qcom_ess_edma_rx_ring_complete(sc, rxs->queue_id, &mq);
+
+ /*
+ * ACK the interrupt.
+ */
+ (void) qcom_ess_edma_hw_intr_rx_ack(sc, rxs->queue_id);
+
+ /*
+ * Re-enable interrupt for this ring.
+ */
+ (void) qcom_ess_edma_hw_intr_rx_intr_set_enable(sc, rxs->queue_id,
+ true);
+
+ EDMA_RING_UNLOCK(&sc->sc_rx_ring[rxs->queue_id]);
+
+ /* Push frames into networking stack */
+ (void) qcom_ess_edma_gmac_receive_frames(sc, rxs->queue_id, &mq);
+}
+
+static int
+qcom_ess_edma_setup_rx_state(struct qcom_ess_edma_softc *sc, int rxq, int cpu)
+{
+ struct qcom_ess_edma_rx_state *rxs;
+ cpuset_t mask;
+
+ rxs = &sc->sc_rx_state[rxq];
+
+ snprintf(rxs->label, QCOM_ESS_EDMA_LABEL_SZ - 1, "rxq%d_compl", rxq);
+
+ CPU_ZERO(&mask);
+ CPU_SET(cpu, &mask);
+
+ rxs->queue_id = rxq;
+ rxs->sc = sc;
+ rxs->completion_tq = taskqueue_create_fast(rxs->label, M_NOWAIT,
+ taskqueue_thread_enqueue, &rxs->completion_tq);
+#if 0
+ taskqueue_start_threads_cpuset(&rxs->completion_tq, 1, PI_NET,
+ &mask, "%s", rxs->label);
+#else
+ taskqueue_start_threads(&rxs->completion_tq, 1, PI_NET,
+ "%s", rxs->label);
+#endif
+
+ TASK_INIT(&rxs->completion_task, 0,
+ qcom_ess_edma_rx_queue_complete_task, rxs);
+ return (0);
+}
+
+/*
+ * Free the receive ring state.
+ *
+ * This assumes that the taskqueues have been drained and DMA has
+ * stopped - all we're doing here is freeing the allocated resources.
+ */
+
+static int
+qcom_ess_edma_free_rx_state(struct qcom_ess_edma_softc *sc, int rxq)
+{
+ struct qcom_ess_edma_rx_state *rxs;
+
+ rxs = &sc->sc_rx_state[rxq];
+
+ taskqueue_free(rxs->completion_tq);
+
+ return (0);
+}
+
+
+static int
+qcom_ess_edma_detach(device_t dev)
+{
+ struct qcom_ess_edma_softc *sc = device_get_softc(dev);
+ int i;
+
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
+ (void) qcom_ess_edma_release_intr(sc, &sc->sc_tx_irq[i]);
+ }
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
+ (void) qcom_ess_edma_release_intr(sc, &sc->sc_rx_irq[i]);
+ }
+
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
+ (void) qcom_ess_edma_free_tx_state(sc, i);
+ (void) qcom_ess_edma_tx_ring_clean(sc, &sc->sc_rx_ring[i]);
+ (void) qcom_ess_edma_desc_ring_free(sc, &sc->sc_tx_ring[i]);
+ }
+
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
+ (void) qcom_ess_edma_free_rx_state(sc, i);
+ (void) qcom_ess_edma_rx_ring_clean(sc, &sc->sc_rx_ring[i]);
+ (void) qcom_ess_edma_desc_ring_free(sc, &sc->sc_rx_ring[i]);
+ }
+
+ if (sc->sc_dma_tag) {
+ bus_dma_tag_destroy(sc->sc_dma_tag);
+ sc->sc_dma_tag = NULL;
+ }
+
+ if (sc->sc_mem_res)
+ bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_mem_rid,
+ sc->sc_mem_res);
+ mtx_destroy(&sc->sc_mtx);
+
+ return(0);
+}
+
+static int
+qcom_ess_edma_filter(void *arg)
+{
+ struct qcom_ess_edma_intr *intr = arg;
+ struct qcom_ess_edma_softc *sc = intr->sc;
+
+ if (intr->irq_rid < QCOM_ESS_EDMA_NUM_TX_IRQS) {
+ int tx_queue = intr->irq_rid;
+
+ intr->stats.num_intr++;
+
+ /*
+ * Disable the interrupt for this ring.
+ */
+ (void) qcom_ess_edma_hw_intr_tx_intr_set_enable(sc, tx_queue,
+ false);
+
+ /*
+ * Schedule taskqueue to run for this queue.
+ */
+ taskqueue_enqueue(sc->sc_tx_state[tx_queue].completion_tq,
+ &sc->sc_tx_state[tx_queue].completion_task);
+
+ return (FILTER_HANDLED);
+ } else {
+ int rx_queue = intr->irq_rid - QCOM_ESS_EDMA_NUM_TX_IRQS;
+
+ intr->stats.num_intr++;
+
+ /*
+ * Disable the interrupt for this ring.
+ */
+ (void) qcom_ess_edma_hw_intr_rx_intr_set_enable(sc, rx_queue,
+ false);
+
+ /*
+ * Schedule taskqueue to run for this queue.
+ */
+ taskqueue_enqueue(sc->sc_rx_state[rx_queue].completion_tq,
+ &sc->sc_rx_state[rx_queue].completion_task);
+
+ return (FILTER_HANDLED);
+ }
+}
+
+static int
+qcom_ess_edma_setup_intr(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_intr *intr, int rid, int cpu_id)
+{
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
+ "%s: setting up interrupt id %d\n", __func__, rid);
+ intr->sc = sc;
+ intr->irq_rid = rid;
+ intr->irq_res = bus_alloc_resource_any(sc->sc_dev,
+ SYS_RES_IRQ, &intr->irq_rid, RF_ACTIVE);
+ if (intr->irq_res == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR: couldn't allocate IRQ %d\n",
+ rid);
+ return (ENXIO);
+ }
+
+ if ((bus_setup_intr(sc->sc_dev, intr->irq_res,
+ INTR_TYPE_NET | INTR_MPSAFE,
+ qcom_ess_edma_filter, NULL, intr,
+ &intr->irq_intr))) {
+ device_printf(sc->sc_dev,
+ "ERROR: unable to register interrupt handler for"
+ " IRQ %d\n", rid);
+ return (ENXIO);
+ }
+
+ /* If requested, bind the interrupt to the given CPU. */
+ if (cpu_id != -1) {
+ if (intr_bind_irq(sc->sc_dev, intr->irq_res, cpu_id) != 0) {
+ device_printf(sc->sc_dev,
+ "ERROR: unable to bind IRQ %d to CPU %d\n",
+ rid, cpu_id);
+ }
+ /* Note: don't completely error out here */
+ }
+
+ return (0);
+}
+
+static int
+qcom_ess_edma_sysctl_dump_state(SYSCTL_HANDLER_ARGS)
+{
+ struct qcom_ess_edma_softc *sc = arg1;
+ int val = 0;
+ int error;
+ int i;
+
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ if (val == 0)
+ return (0);
+
+ EDMA_LOCK(sc);
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
+ device_printf(sc->sc_dev,
+ "RXQ[%d]: prod=%u, cons=%u, hw prod=%u, hw cons=%u,"
+ " REG_SW_CONS_IDX=0x%08x\n",
+ i,
+ sc->sc_rx_ring[i].next_to_fill,
+ sc->sc_rx_ring[i].next_to_clean,
+ EDMA_REG_READ(sc,
+ EDMA_REG_RFD_IDX_Q(i)) & EDMA_RFD_PROD_IDX_BITS,
+ qcom_ess_edma_hw_rfd_get_cons_index(sc, i),
+ EDMA_REG_READ(sc, EDMA_REG_RX_SW_CONS_IDX_Q(i)));
+ }
+
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
+ device_printf(sc->sc_dev,
+ "TXQ[%d]: prod=%u, cons=%u, hw prod=%u, hw cons=%u\n",
+ i,
+ sc->sc_tx_ring[i].next_to_fill,
+ sc->sc_tx_ring[i].next_to_clean,
+ (EDMA_REG_READ(sc, EDMA_REG_TPD_IDX_Q(i))
+ >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK,
+ EDMA_REG_READ(sc, EDMA_REG_TX_SW_CONS_IDX_Q(i)));
+ }
+
+ device_printf(sc->sc_dev, "EDMA_REG_TXQ_CTRL=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_TXQ_CTRL));
+ device_printf(sc->sc_dev, "EDMA_REG_RXQ_CTRL=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_RXQ_CTRL));
+ device_printf(sc->sc_dev, "EDMA_REG_RX_DESC0=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_RX_DESC0));
+ device_printf(sc->sc_dev, "EDMA_REG_RX_DESC1=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_RX_DESC1));
+ device_printf(sc->sc_dev, "EDMA_REG_RX_ISR=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_RX_ISR));
+ device_printf(sc->sc_dev, "EDMA_REG_TX_ISR=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_TX_ISR));
+ device_printf(sc->sc_dev, "EDMA_REG_MISC_ISR=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_MISC_ISR));
+ device_printf(sc->sc_dev, "EDMA_REG_WOL_ISR=0x%08x\n",
+ EDMA_REG_READ(sc, EDMA_REG_WOL_ISR));
+
+ EDMA_UNLOCK(sc);
+
+ return (0);
+}
+
+static int
+qcom_ess_edma_sysctl_dump_stats(SYSCTL_HANDLER_ARGS)
+{
+ struct qcom_ess_edma_softc *sc = arg1;
+ int val = 0;
+ int error;
+ int i;
+
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ if (val == 0)
+ return (0);
+
+ EDMA_LOCK(sc);
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
+ device_printf(sc->sc_dev,
+ "RXQ[%d]: num_added=%llu, num_cleaned=%llu,"
+ " num_dropped=%llu, num_enqueue_full=%llu,"
+ " num_rx_no_gmac=%llu, tx_mapfail=%llu,"
+ " num_tx_maxfrags=%llu, num_rx_ok=%llu\n",
+ i,
+ sc->sc_rx_ring[i].stats.num_added,
+ sc->sc_rx_ring[i].stats.num_cleaned,
+ sc->sc_rx_ring[i].stats.num_dropped,
+ sc->sc_rx_ring[i].stats.num_enqueue_full,
+ sc->sc_rx_ring[i].stats.num_rx_no_gmac,
+ sc->sc_rx_ring[i].stats.num_tx_mapfail,
+ sc->sc_rx_ring[i].stats.num_tx_maxfrags,
+ sc->sc_rx_ring[i].stats.num_rx_ok);
+ }
+
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
+ device_printf(sc->sc_dev,
+ "TXQ[%d]: num_added=%llu, num_cleaned=%llu,"
+ " num_dropped=%llu, num_enqueue_full=%llu,"
+ " tx_mapfail=%llu, tx_complete=%llu, tx_xmit_defer=%llu,"
+ " tx_xmit_task=%llu, num_tx_maxfrags=%llu,"
+ " num_tx_ok=%llu\n",
+ i,
+ sc->sc_tx_ring[i].stats.num_added,
+ sc->sc_tx_ring[i].stats.num_cleaned,
+ sc->sc_tx_ring[i].stats.num_dropped,
+ sc->sc_tx_ring[i].stats.num_enqueue_full,
+ sc->sc_tx_ring[i].stats.num_tx_mapfail,
+ sc->sc_tx_ring[i].stats.num_tx_complete,
+ sc->sc_tx_ring[i].stats.num_tx_xmit_defer,
+ sc->sc_tx_ring[i].stats.num_tx_xmit_task,
+ sc->sc_tx_ring[i].stats.num_tx_maxfrags,
+ sc->sc_tx_ring[i].stats.num_tx_ok);
+ }
+
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
+ device_printf(sc->sc_dev, "INTR_RXQ[%d]: num_intr=%llu\n",
+ i,
+ sc->sc_rx_irq[i].stats.num_intr);
+ }
+
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
+ device_printf(sc->sc_dev, "INTR_TXQ[%d]: num_intr=%llu\n",
+ i,
+ sc->sc_tx_irq[i].stats.num_intr);
+ }
+
+ EDMA_UNLOCK(sc);
+
+ return (0);
+}
+
+
+static int
+qcom_ess_edma_sysctl_tx_intmit(SYSCTL_HANDLER_ARGS)
+{
+ struct qcom_ess_edma_softc *sc = arg1;
+ uint32_t usec;
+ int val = 0;
+ int error;
+
+ EDMA_LOCK(sc);
+ (void) qcom_ess_edma_hw_get_tx_intr_moderation(sc, &usec);
+ EDMA_UNLOCK(sc);
+
+ val = usec;
+
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ goto finish;
+
+ EDMA_LOCK(sc);
+ error = qcom_ess_edma_hw_set_tx_intr_moderation(sc, (uint32_t) val);
+ EDMA_UNLOCK(sc);
+finish:
+ return error;
+}
+
+
+static int
+qcom_ess_edma_attach_sysctl(struct qcom_ess_edma_softc *sc)
+{
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
+
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "debug", CTLFLAG_RW, &sc->sc_debug, 0,
+ "debugging flags");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "state", CTLTYPE_INT | CTLFLAG_RW, sc,
+ 0, qcom_ess_edma_sysctl_dump_state, "I", "");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "stats", CTLTYPE_INT | CTLFLAG_RW, sc,
+ 0, qcom_ess_edma_sysctl_dump_stats, "I", "");
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "tx_intmit", CTLTYPE_INT | CTLFLAG_RW, sc,
+ 0, qcom_ess_edma_sysctl_tx_intmit, "I", "");
+
+ return (0);
+}
+
+static int
+qcom_ess_edma_attach(device_t dev)
+{
+ struct qcom_ess_edma_softc *sc = device_get_softc(dev);
+ int i, ret;
+
+ mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
+
+ sc->sc_dev = dev;
+ sc->sc_debug = 0;
+
+ (void) qcom_ess_edma_attach_sysctl(sc);
+
+ /* Create parent DMA tag. */
+ ret = bus_dma_tag_create(
+ bus_get_dma_tag(sc->sc_dev), /* parent */
+ 1, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
+ 0, /* nsegments */
+ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->sc_dma_tag);
+ if (ret != 0) {
+ device_printf(sc->sc_dev,
+ "ERROR: failed to create parent DMA tag\n");
+ goto error;
+ }
+
+ /* Map control/status registers. */
+ sc->sc_mem_rid = 0;
+ sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &sc->sc_mem_rid, RF_ACTIVE);
+
+ if (sc->sc_mem_res == NULL) {
+ device_printf(dev, "ERROR: couldn't map MMIO space\n");
+ goto error;
+ }
+
+ sc->sc_mem_res_size = (size_t) bus_get_resource_count(dev,
+ SYS_RES_MEMORY, sc->sc_mem_rid);
+ if (sc->sc_mem_res_size == 0) {
+ device_printf(dev, "%s: failed to get device memory size\n",
+ __func__);
+ goto error;
+ }
+
+ /*
+ * How many TX queues per CPU, for figuring out flowid/CPU
+ * mapping.
+ */
+ sc->sc_config.num_tx_queue_per_cpu =
+ QCOM_ESS_EDMA_NUM_TX_RINGS / mp_ncpus;
+
+ /* Allocate TX IRQs */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
+ int cpu_id;
+
+ /*
+ * The current mapping in the if_transmit() path
+ * will map mp_ncpu groups of flowids to the TXQs.
+ * So for a 4 CPU system the first four will be CPU 0,
+ * the second four will be CPU 1, etc.
+ */
+ cpu_id = qcom_ess_edma_tx_queue_to_cpu(sc, i);
+ if (qcom_ess_edma_setup_intr(sc, &sc->sc_tx_irq[i],
+ i, cpu_id) != 0)
+ goto error;
+ if (bootverbose)
+ device_printf(sc->sc_dev,
+ "mapping TX IRQ %d to CPU %d\n",
+ i, cpu_id);
+ }
+
+ /* Allocate RX IRQs */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
+ int cpu_id = qcom_ess_edma_rx_queue_to_cpu(sc, i);
+ if (qcom_ess_edma_setup_intr(sc, &sc->sc_rx_irq[i],
+ i + QCOM_ESS_EDMA_NUM_TX_IRQS, cpu_id) != 0)
+ goto error;
+ if (bootverbose)
+ device_printf(sc->sc_dev,
+ "mapping RX IRQ %d to CPU %d\n",
+ i, cpu_id);
+ }
+
+ /* Default receive frame size - before ETHER_ALIGN hack */
+ sc->sc_config.rx_buf_size = 2048;
+ sc->sc_config.rx_buf_ether_align = true;
+
+ /* Default RSS paramters */
+ sc->sc_config.rss_type =
+ EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP
+ | EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP
+ | EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6;
+
+ /* Default queue parameters */
+ sc->sc_config.tx_ring_count = EDMA_TX_RING_SIZE;
+ sc->sc_config.rx_ring_count = EDMA_RX_RING_SIZE;
+
+ /* Default interrupt masks */
+ sc->sc_config.rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK;
+ sc->sc_config.tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK;
+ sc->sc_state.misc_intr_mask = 0;
+ sc->sc_state.wol_intr_mask = 0;
+ sc->sc_state.intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE;
+
+ /*
+ * Parse out the gmac count so we can start parsing out
+ * the gmac list and create us some ifnets.
+ */
+ if (OF_getencprop(ofw_bus_get_node(dev), "qcom,num_gmac",
+ &sc->sc_config.num_gmac, sizeof(uint32_t)) > 0) {
+ device_printf(sc->sc_dev, "Creating %d GMACs\n",
+ sc->sc_config.num_gmac);
+ } else {
+ device_printf(sc->sc_dev, "Defaulting to 1 GMAC\n");
+ sc->sc_config.num_gmac = 1;
+ }
+ if (sc->sc_config.num_gmac > QCOM_ESS_EDMA_MAX_NUM_GMACS) {
+ device_printf(sc->sc_dev, "Capping GMACs to %d\n",
+ QCOM_ESS_EDMA_MAX_NUM_GMACS);
+ sc->sc_config.num_gmac = QCOM_ESS_EDMA_MAX_NUM_GMACS;
+ }
+
+ /*
+ * And now, create some gmac entries here; we'll create the
+ * ifnet's once this is all done.
+ */
+ for (i = 0; i < sc->sc_config.num_gmac; i++) {
+ ret = qcom_ess_edma_gmac_parse(sc, i);
+ if (ret != 0) {
+ device_printf(sc->sc_dev,
+ "Failed to parse gmac%d\n", i);
+ goto error;
+ }
+ }
+
+ /* allocate tx rings */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
+ char label[QCOM_ESS_EDMA_LABEL_SZ];
+ int cpu_id;
+
+ snprintf(label, QCOM_ESS_EDMA_LABEL_SZ - 1, "tx_ring%d", i);
+ if (qcom_ess_edma_desc_ring_setup(sc, &sc->sc_tx_ring[i],
+ label,
+ sc->sc_config.tx_ring_count,
+ sizeof(struct qcom_ess_edma_sw_desc_tx),
+ sizeof(struct qcom_ess_edma_tx_desc),
+ QCOM_ESS_EDMA_MAX_TXFRAGS,
+ ESS_EDMA_TX_BUFFER_ALIGN) != 0)
+ goto error;
+ if (qcom_ess_edma_tx_ring_setup(sc, &sc->sc_tx_ring[i]) != 0)
+ goto error;
+
+ /* Same CPU as the interrupts for now */
+ cpu_id = qcom_ess_edma_tx_queue_to_cpu(sc, i);
+
+ if (qcom_ess_edma_setup_tx_state(sc, i, cpu_id) != 0)
+ goto error;
+ }
+
+ /* allocate rx rings */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
+ char label[QCOM_ESS_EDMA_LABEL_SZ];
+ int cpu_id;
+
+ snprintf(label, QCOM_ESS_EDMA_LABEL_SZ - 1, "rx_ring%d", i);
+ if (qcom_ess_edma_desc_ring_setup(sc, &sc->sc_rx_ring[i],
+ label,
+ sc->sc_config.rx_ring_count,
+ sizeof(struct qcom_ess_edma_sw_desc_rx),
+ sizeof(struct qcom_ess_edma_rx_free_desc),
+ 1,
+ ESS_EDMA_RX_BUFFER_ALIGN) != 0)
+ goto error;
+ if (qcom_ess_edma_rx_ring_setup(sc, &sc->sc_rx_ring[i]) != 0)
+ goto error;
+
+ /* Same CPU as the interrupts for now */
+ cpu_id = qcom_ess_edma_rx_queue_to_cpu(sc, i);
+
+ if (qcom_ess_edma_setup_rx_state(sc, i, cpu_id) != 0)
+ goto error;
+ }
+
+ /*
+ * map the gmac instances <-> port masks, so incoming frames know
+ * where they need to be forwarded to.
+ */
+ for (i = 0; i < QCOM_ESS_EDMA_MAX_NUM_PORTS; i++)
+ sc->sc_gmac_port_map[i] = -1;
+ for (i = 0; i < sc->sc_config.num_gmac; i++) {
+ ret = qcom_ess_edma_gmac_setup_port_mapping(sc, i);
+ if (ret != 0) {
+ device_printf(sc->sc_dev,
+ "Failed to setup port mpapping for gmac%d\n", i);
+ goto error;
+ }
+ }
+
+
+ /* Create ifnets */
+ for (i = 0; i < sc->sc_config.num_gmac; i++) {
+ ret = qcom_ess_edma_gmac_create_ifnet(sc, i);
+ if (ret != 0) {
+ device_printf(sc->sc_dev,
+ "Failed to create ifnet for gmac%d\n", i);
+ goto error;
+ }
+ }
+
+ /*
+ * NOTE: If there's no ess-switch / we're a single phy, we
+ * still need to reset the ess fabric to a fixed useful state.
+ * Otherwise we won't be able to pass packets to anything.
+ *
+ * Worry about this later.
+ */
+
+ EDMA_LOCK(sc);
+
+ /* disable all interrupts */
+ ret = qcom_ess_edma_hw_intr_disable(sc);
+ if (ret != 0) {
+ device_printf(sc->sc_dev,
+ "Failed to disable interrupts (%d)\n",
+ ret);
+ goto error_locked;
+ }
+
+ /* reset edma */
+ ret = qcom_ess_edma_hw_stop(sc);
+
+ /* fill RX ring here, explicitly */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
+ EDMA_RING_LOCK(&sc->sc_rx_ring[i]);
+ (void) qcom_ess_edma_rx_ring_fill(sc, i,
+ sc->sc_config.rx_ring_count);
+ EDMA_RING_UNLOCK(&sc->sc_rx_ring[i]);
+ }
+
+ /* configure TX/RX rings; RSS config; initial interrupt rates, etc */
+ ret = qcom_ess_edma_hw_setup(sc);
+ ret = qcom_ess_edma_hw_setup_tx(sc);
+ ret = qcom_ess_edma_hw_setup_rx(sc);
+ ret = qcom_ess_edma_hw_setup_txrx_desc_rings(sc);
+
+ /* setup rss indirection table */
+ ret = qcom_ess_edma_hw_configure_rss_table(sc);
+
+ /* setup load balancing table */
+ ret = qcom_ess_edma_hw_configure_load_balance_table(sc);
+
+ /* configure virtual queue */
+ ret = qcom_ess_edma_hw_configure_tx_virtual_queue(sc);
+
+ /* configure AXI burst max */
+ ret = qcom_ess_edma_hw_configure_default_axi_transaction_size(sc);
+
+ /* enable IRQs */
+ ret = qcom_ess_edma_hw_intr_enable(sc);
+
+ /* enable TX control */
+ ret = qcom_ess_edma_hw_tx_enable(sc);
+
+ /* enable RX control */
+ ret = qcom_ess_edma_hw_rx_enable(sc);
+
+ EDMA_UNLOCK(sc);
+
+ return (0);
+
+error_locked:
+ EDMA_UNLOCK(sc);
+error:
+ qcom_ess_edma_detach(dev);
+ return (ENXIO);
+}
+
+static device_method_t qcom_ess_edma_methods[] = {
+ /* Driver */
+ DEVMETHOD(device_probe, qcom_ess_edma_probe),
+ DEVMETHOD(device_attach, qcom_ess_edma_attach),
+ DEVMETHOD(device_detach, qcom_ess_edma_detach),
+
+ {0, 0},
+};
+
+static driver_t qcom_ess_edma_driver = {
+ "essedma",
+ qcom_ess_edma_methods,
+ sizeof(struct qcom_ess_edma_softc),
+};
+
+DRIVER_MODULE(qcom_ess_edma, simplebus, qcom_ess_edma_driver, NULL, 0);
+DRIVER_MODULE(qcom_ess_edma, ofwbus, qcom_ess_edma_driver, NULL, 0);
+MODULE_DEPEND(qcom_ess_edma, ether, 1, 1, 1);
+MODULE_VERSION(qcom_ess_edma, 1);
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_debug.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_debug.h
new file mode 100644
index 000000000000..325f03f12cb0
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_debug.h
@@ -0,0 +1,52 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef __QCOM_ESS_EDMA_DEBUG_H__
+#define __QCOM_ESS_EDMA_DEBUG_H__
+
+#define QCOM_ESS_EDMA_DBG_INTERRUPT 0x00000001
+#define QCOM_ESS_EDMA_DBG_DESCRIPTOR_SETUP 0x00000002
+#define QCOM_ESS_EDMA_DBG_RX_RING_MGMT 0x00000004
+#define QCOM_ESS_EDMA_DBG_TX_RING_MGMT 0x00000008
+#define QCOM_ESS_EDMA_DBG_RX_FRAME 0x00000010
+#define QCOM_ESS_EDMA_DBG_RX_RING 0x00000020
+#define QCOM_ESS_EDMA_DBG_TX_FRAME 0x00000040
+#define QCOM_ESS_EDMA_DBG_TX_RING 0x00000080
+#define QCOM_ESS_EDMA_DBG_TX_RING_COMPLETE 0x00000100
+#define QCOM_ESS_EDMA_DBG_TX_TASK 0x00000200
+#define QCOM_ESS_EDMA_DBG_TX_FRAME_ERROR 0x00000400
+#define QCOM_ESS_EDMA_DBG_STATE 0x00000800
+
+#define QCOM_ESS_EDMA_DPRINTF(sc, flags, ...) \
+ do { \
+ if ((sc)->sc_debug & (flags)) \
+ device_printf((sc)->sc_dev, __VA_ARGS__); \
+ } while (0)
+
+#endif /* __QCOM_ESS_EDMA_DEBUG_H__ */
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_desc.c b/sys/dev/qcom_ess_edma/qcom_ess_edma_desc.c
new file mode 100644
index 000000000000..11ce74137c32
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_desc.c
@@ -0,0 +1,351 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_desc.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
+
+static void
+qcom_ess_edma_desc_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs,
+ int error)
+{
+ if (error != 0)
+ return;
+ KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
+ *(bus_addr_t *)arg = segs[0].ds_addr;
+}
+
+/*
+ * Initialise the given descriptor ring.
+ */
+int
+qcom_ess_edma_desc_ring_setup(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring,
+ char *label,
+ int count,
+ int sw_desc_size,
+ int hw_desc_size,
+ int num_segments,
+ int buffer_align)
+{
+ int error;
+ int hw_ring_size;
+
+ ring->label = strdup(label, M_TEMP);
+ if (ring->label == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR: failed to strdup label\n");
+ error = ENOMEM;
+ goto error;
+ }
+
+ mtx_init(&ring->mtx, ring->label, NULL, MTX_DEF);
+
+ hw_ring_size = count * hw_desc_size;
+
+ /*
+ * Round the hardware ring size up to a cacheline
+ * so we don't end up with partial cacheline sizes
+ * causing bounce buffers to be used.
+ */
+ hw_ring_size = ((hw_ring_size + PAGE_SIZE) / PAGE_SIZE) * PAGE_SIZE;
+
+ /*
+ * For now set it to 4 byte alignment, no max size.
+ */
+ ring->ring_align = EDMA_DESC_RING_ALIGN;
+ error = bus_dma_tag_create(
+ sc->sc_dma_tag, /* parent */
+ EDMA_DESC_RING_ALIGN, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ hw_ring_size, /* maxsize */
+ 1, /* nsegments */
+ hw_ring_size, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &ring->hw_ring_dma_tag);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "ERROR: failed to create descriptor DMA tag (%d)\n",
+ error);
+ goto error;
+ }
+
+ /*
+ * Buffer ring - used passed in value
+ */
+ ring->buffer_align = buffer_align;
+ error = bus_dma_tag_create(
+ sc->sc_dma_tag, /* parent */
+ buffer_align, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ EDMA_DESC_MAX_BUFFER_SIZE * num_segments, /* maxsize */
+ num_segments, /* nsegments */
+ EDMA_DESC_MAX_BUFFER_SIZE, /* maxsegsize */
+ 0, /* flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &ring->buffer_dma_tag);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "ERROR: failed to create buffer DMA tag (%d)\n",
+ error);
+ goto error;
+ }
+
+ /*
+ * Allocate software descriptors
+ */
+ ring->sw_desc = mallocarray(count, sw_desc_size, M_TEMP,
+ M_NOWAIT | M_ZERO);
+ if (ring->sw_desc == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR: failed to allocate sw_desc\n");
+ goto error;
+ }
+
+ /*
+ * Allocate hardware descriptors, initialise map, get
+ * physical address.
+ */
+ error = bus_dmamem_alloc(ring->hw_ring_dma_tag,
+ (void **)&ring->hw_desc,
+ BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
+ &ring->hw_desc_map);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "failed to allocate DMA'able memory for hw_desc ring\n");
+ goto error;
+ }
+ ring->hw_desc_paddr = 0;
+ error = bus_dmamap_load(ring->hw_ring_dma_tag, ring->hw_desc_map,
+ ring->hw_desc, hw_ring_size, qcom_ess_edma_desc_map_addr,
+ &ring->hw_desc_paddr, BUS_DMA_NOWAIT);
+ bus_dmamap_sync(ring->hw_ring_dma_tag, ring->hw_desc_map,
+ BUS_DMASYNC_PREWRITE);
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_DESCRIPTOR_SETUP,
+ "%s: PADDR=0x%08lx\n", __func__, ring->hw_desc_paddr);
+
+ /*
+ * All done, initialise state.
+ */
+ ring->hw_entry_size = hw_desc_size;
+ ring->sw_entry_size = sw_desc_size;
+ ring->ring_count = count;
+
+ return (0);
+error:
+ mtx_destroy(&ring->mtx);
+ if (ring->label != NULL)
+ free(ring->label, M_TEMP);
+ if (ring->hw_desc != NULL) {
+ bus_dmamap_sync(ring->hw_ring_dma_tag, ring->hw_desc_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->hw_ring_dma_tag, ring->hw_desc_map);
+ bus_dmamem_free(ring->hw_ring_dma_tag, ring->hw_desc,
+ ring->hw_desc_map);
+ ring->hw_desc = NULL;
+ }
+ if (ring->sw_desc != NULL) {
+ free(ring->sw_desc, M_TEMP);
+ ring->sw_desc = NULL;
+ }
+ if (ring->hw_ring_dma_tag != NULL) {
+ bus_dma_tag_destroy(ring->hw_ring_dma_tag);
+ ring->hw_ring_dma_tag = NULL;
+ }
+ if (ring->buffer_dma_tag != NULL) {
+ bus_dma_tag_destroy(ring->buffer_dma_tag);
+ ring->buffer_dma_tag = NULL;
+ }
+
+ return (error);
+}
+
+/*
+ * Free/clean the given descriptor ring.
+ *
+ * The ring itself right now is static; so we don't free it.
+ * We just free the resources it has.
+ */
+int
+qcom_ess_edma_desc_ring_free(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+
+ mtx_destroy(&ring->mtx);
+ if (ring->label != NULL)
+ free(ring->label, M_TEMP);
+
+ if (ring->hw_desc != NULL) {
+ bus_dmamap_sync(ring->hw_ring_dma_tag, ring->hw_desc_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(ring->hw_ring_dma_tag, ring->hw_desc_map);
+ bus_dmamem_free(ring->hw_ring_dma_tag, ring->hw_desc,
+ ring->hw_desc_map);
+ ring->hw_desc = NULL;
+ }
+
+ if (ring->sw_desc != NULL) {
+ free(ring->sw_desc, M_TEMP);
+ ring->sw_desc = NULL;
+ }
+
+ if (ring->hw_ring_dma_tag != NULL) {
+ bus_dma_tag_destroy(ring->hw_ring_dma_tag);
+ ring->hw_ring_dma_tag = NULL;
+ }
+ if (ring->buffer_dma_tag != NULL) {
+ bus_dma_tag_destroy(ring->buffer_dma_tag);
+ ring->buffer_dma_tag = NULL;
+ }
+
+ return (0);
+}
+
+/*
+ * Fetch the given software descriptor pointer by index.
+ *
+ * Returns NULL if the index is out of bounds.
+ */
+void *
+qcom_ess_edma_desc_ring_get_sw_desc(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, uint16_t index)
+{
+ char *p;
+
+ if (index >= ring->ring_count)
+ return (NULL);
+
+ p = (char *) ring->sw_desc;
+
+ return (void *) (p + (ring->sw_entry_size * index));
+}
+
+/*
+ * Fetch the given hardware descriptor pointer by index.
+ *
+ * Returns NULL if the index is out of bounds.
+ */
+void *
+qcom_ess_edma_desc_ring_get_hw_desc(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, uint16_t index)
+{
+ char *p;
+
+ if (index >= ring->ring_count)
+ return (NULL);
+
+ p = (char *) ring->hw_desc;
+
+ return (void *) (p + (ring->hw_entry_size * index));
+}
+
+/*
+ * Flush the hardware ring after a write, before the hardware
+ * gets to it.
+ */
+int
+qcom_ess_edma_desc_ring_flush_preupdate(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+
+ bus_dmamap_sync(ring->hw_ring_dma_tag, ring->hw_desc_map,
+ BUS_DMASYNC_PREWRITE);
+
+ return (0);
+}
+
+
+/*
+ * Flush the hardware ring after the hardware writes into it,
+ * before a read.
+ */
+int
+qcom_ess_edma_desc_ring_flush_postupdate(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+
+ bus_dmamap_sync(ring->hw_ring_dma_tag, ring->hw_desc_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ return (0);
+}
+
+/*
+ * Get how many descriptor slots are available.
+ */
+int
+qcom_ess_edma_desc_ring_get_num_available(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+ uint16_t sw_next_to_fill;
+ uint16_t sw_next_to_clean;
+ uint16_t count = 0;
+
+ sw_next_to_clean = ring->next_to_clean;
+ sw_next_to_fill = ring->next_to_fill;
+
+ if (sw_next_to_clean <= sw_next_to_fill)
+ count = ring->ring_count;
+
+ return (count + sw_next_to_clean - sw_next_to_fill - 1);
+}
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_desc.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_desc.h
new file mode 100644
index 000000000000..b7213d94da5d
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_desc.h
@@ -0,0 +1,63 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __QCOM_ESS_EDMA_DESC_H__
+#define __QCOM_ESS_EDMA_DESC_H__
+
+extern int qcom_ess_edma_desc_ring_setup(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring,
+ char *label,
+ int count,
+ int sw_desc_size,
+ int hw_desc_size,
+ int num_segments,
+ int buffer_alignment);
+extern int qcom_ess_edma_desc_ring_free(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+extern void * qcom_ess_edma_desc_ring_get_sw_desc(
+ struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring,
+ uint16_t index);
+extern void * qcom_ess_edma_desc_ring_get_hw_desc(
+ struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring,
+ uint16_t index);
+extern int qcom_ess_edma_desc_ring_flush_preupdate(
+ struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+extern int qcom_ess_edma_desc_ring_flush_postupdate(
+ struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+extern int qcom_ess_edma_desc_ring_get_num_available(
+ struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+
+#endif /* __QCOM_ESS_EDMA_DESC_H__ */
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.c b/sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.c
new file mode 100644
index 000000000000..6510dcc74ca2
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.c
@@ -0,0 +1,462 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/gpio.h>
+#include <sys/mbuf.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/smp.h>
+#include <sys/proc.h>
+#include <sys/sched.h>
+#include <sys/queue.h>
+#include <sys/taskqueue.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_vlan_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+#include <net/if_types.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/atomic.h>
+
+#include <dev/gpio/gpiobusvar.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_desc.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_rx.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_tx.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_gmac.h>
+
+static int
+qcom_ess_edma_gmac_mediachange(if_t ifp)
+{
+ struct qcom_ess_edma_gmac *gmac = if_getsoftc(ifp);
+ struct qcom_ess_edma_softc *sc = gmac->sc;
+ struct ifmedia *ifm = &gmac->ifm;
+ struct ifmedia_entry *ife = ifm->ifm_cur;
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+
+ if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) {
+ device_printf(sc->sc_dev,
+ "AUTO is not supported this MAC");
+ return (EINVAL);
+ }
+
+ /*
+ * Ignore everything
+ */
+ return (0);
+}
+
+static void
+qcom_ess_edma_gmac_mediastatus(if_t ifp, struct ifmediareq *ifmr)
+{
+
+ ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
+ ifmr->ifm_active = IFM_ETHER | IFM_1000_T | IFM_FDX;
+}
+
+static int
+qcom_ess_edma_gmac_ioctl(if_t ifp, u_long command, caddr_t data)
+{
+ struct qcom_ess_edma_gmac *gmac = if_getsoftc(ifp);
+ struct qcom_ess_edma_softc *sc = gmac->sc;
+ struct ifreq *ifr = (struct ifreq *) data;
+ int error, mask;
+
+ switch (command) {
+ case SIOCSIFFLAGS:
+ if ((if_getflags(ifp) & IFF_UP) != 0) {
+ /* up */
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_STATE,
+ "%s: gmac%d: IFF_UP\n",
+ __func__,
+ gmac->id);
+ if_setdrvflagbits(ifp, IFF_DRV_RUNNING,
+ IFF_DRV_OACTIVE);
+ if_link_state_change(ifp, LINK_STATE_UP);
+
+ } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) {
+ /* down */
+ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_STATE,
+ "%s: gmac%d: IF down\n",
+ __func__,
+ gmac->id);
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ }
+ error = 0;
+ break;
+ case SIOCGIFMEDIA:
+ case SIOCSIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &gmac->ifm, command);
+ break;
+ case SIOCSIFCAP:
+ mask = ifr->ifr_reqcap ^ if_getcapenable(ifp);
+ error = 0;
+
+ if ((mask & IFCAP_RXCSUM) != 0 &&
+ (if_getcapabilities(ifp) & IFCAP_RXCSUM) != 0)
+ if_togglecapenable(ifp, IFCAP_RXCSUM);
+
+ if ((mask & IFCAP_VLAN_HWTAGGING) != 0 &&
+ (if_getcapabilities(ifp) & IFCAP_VLAN_HWTAGGING) != 0)
+ if_togglecapenable(ifp, IFCAP_VLAN_HWTAGGING);
+
+ VLAN_CAPABILITIES(ifp);
+ break;
+ default:
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (error);
+}
+
+static void
+qcom_ess_edma_gmac_init(void *arg)
+{
+ struct qcom_ess_edma_gmac *gmac = arg;
+ struct qcom_ess_edma_softc *sc = gmac->sc;
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_STATE,
+ "%s: gmac%d: called\n",
+ __func__,
+ gmac->id);
+
+ if_setdrvflagbits(gmac->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE);
+ if_link_state_change(gmac->ifp, LINK_STATE_UP);
+}
+
+static int
+qcom_ess_edma_gmac_transmit(if_t ifp, struct mbuf *m)
+{
+ struct qcom_ess_edma_gmac *gmac = if_getsoftc(ifp);
+ struct qcom_ess_edma_softc *sc = gmac->sc;
+ struct qcom_ess_edma_tx_state *txs;
+ int ret;
+ int q;
+
+ /* Make sure our CPU doesn't change whilst we're running */
+ sched_pin();
+
+ /*
+ * Map flowid / curcpu to a given transmit queue.
+ *
+ * Since we're running on a platform with either two
+ * or four CPUs, we want to distribute the load to a set
+ * of TX queues that won't clash with any other CPU TX queue
+ * use.
+ */
+ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) {
+ /* Map flowid to a queue */
+ q = m->m_pkthdr.flowid % sc->sc_config.num_tx_queue_per_cpu;
+
+ /* Now, map the queue to a set of unique queues per CPU */
+ q = q << (mp_ncpus * curcpu);
+
+ /* And ensure we're not overflowing */
+ q = q % QCOM_ESS_EDMA_NUM_TX_RINGS;
+ } else {
+ /*
+ * Use the first TXQ in each CPU group, so we don't
+ * hit lock contention with traffic that has flowids.
+ */
+ q = (mp_ncpus * curcpu) % QCOM_ESS_EDMA_NUM_TX_RINGS;
+ }
+
+ /* Attempt to enqueue in the buf_ring. */
+ /*
+ * XXX TODO: maybe move this into *tx.c so gmac.c doesn't
+ * need to reach into the tx_state stuff?
+ */
+ txs = &sc->sc_tx_state[q];
+
+ /* XXX TODO: add an mbuf tag instead? for the transmit gmac/ifp ? */
+ m->m_pkthdr.rcvif = ifp;
+
+ ret = buf_ring_enqueue(txs->br, m);
+
+ if (ret == 0) {
+ if (atomic_cmpset_int(&txs->enqueue_is_running, 0, 1) == 1) {
+ taskqueue_enqueue(txs->completion_tq, &txs->xmit_task);
+ }
+ }
+
+ sched_unpin();
+
+ /* Don't consume mbuf; if_transmit caller will if needed */
+ return (ret);
+}
+
+static void
+qcom_ess_edma_gmac_qflush(if_t ifp)
+{
+ struct qcom_ess_edma_gmac *gmac = if_getsoftc(ifp);
+ struct qcom_ess_edma_softc *sc = gmac->sc;
+
+ /* XXX TODO */
+ device_printf(sc->sc_dev, "%s: gmac%d: called\n",
+ __func__,
+ gmac->id);
+
+ /*
+ * Flushing the ifnet would, sigh, require walking each buf_ring
+ * and then removing /only/ the entries matching that ifnet.
+ * Which is a complete pain to do right now.
+ */
+}
+
+int
+qcom_ess_edma_gmac_parse(struct qcom_ess_edma_softc *sc, int gmac_id)
+{
+ struct qcom_ess_edma_gmac *gmac;
+ char gmac_name[10];
+ uint32_t vlan_tag[2];
+ phandle_t p;
+ int len;
+
+ sprintf(gmac_name, "gmac%d", gmac_id);
+
+ gmac = &sc->sc_gmac[gmac_id];
+
+ /* Find our sub-device */
+ p = ofw_bus_find_child(ofw_bus_get_node(sc->sc_dev), gmac_name);
+ if (p <= 0) {
+ device_printf(sc->sc_dev,
+ "%s: couldn't find %s\n", __func__,
+ gmac_name);
+ return (ENOENT);
+ }
+
+ /* local-mac-address */
+ len = OF_getprop(p, "local-mac-address", (void *) &gmac->eaddr,
+ sizeof(struct ether_addr));
+ if (len != sizeof(struct ether_addr)) {
+ device_printf(sc->sc_dev,
+ "gmac%d: Couldn't parse local-mac-address\n",
+ gmac_id);
+ memset(&gmac->eaddr, 0, sizeof(gmac->eaddr));
+ }
+
+ /* vlan-tag - <id portmask> tuple */
+ len = OF_getproplen(p, "vlan_tag");
+ if (len != sizeof(vlan_tag)) {
+ device_printf(sc->sc_dev,
+ "gmac%d: no vlan_tag field or invalid size/values\n",
+ gmac_id);
+ return (EINVAL);
+ }
+ len = OF_getencprop(p, "vlan_tag", (void *) &vlan_tag,
+ sizeof(vlan_tag));
+ if (len != sizeof(vlan_tag)) {
+ device_printf(sc->sc_dev,
+ "gmac%d: couldn't parse vlan_tag field\n", gmac_id);
+ return (EINVAL);
+ }
+
+ /*
+ * Setup the given gmac entry.
+ */
+ gmac->sc = sc;
+ gmac->id = gmac_id;
+ gmac->enabled = true;
+ gmac->vlan_id = vlan_tag[0];
+ gmac->port_mask = vlan_tag[1];
+
+ device_printf(sc->sc_dev,
+ "gmac%d: MAC=%6D, vlan id=%d, port_mask=0x%04x\n",
+ gmac_id,
+ &gmac->eaddr, ":",
+ gmac->vlan_id,
+ gmac->port_mask);
+
+ return (0);
+}
+
+int
+qcom_ess_edma_gmac_create_ifnet(struct qcom_ess_edma_softc *sc, int gmac_id)
+{
+ struct qcom_ess_edma_gmac *gmac;
+ char gmac_name[10];
+
+ sprintf(gmac_name, "gmac%d", gmac_id);
+
+ gmac = &sc->sc_gmac[gmac_id];
+
+ /* Skip non-setup gmacs */
+ if (gmac->enabled == false)
+ return (0);
+
+ gmac->ifp = if_alloc(IFT_ETHER);
+ if (gmac->ifp == NULL) {
+ device_printf(sc->sc_dev, "gmac%d: couldn't allocate ifnet\n",
+ gmac_id);
+ return (ENOSPC);
+ }
+
+ if_setsoftc(gmac->ifp, gmac);
+
+ if_initname(gmac->ifp, "gmac", gmac_id);
+
+ if (ETHER_IS_ZERO(gmac->eaddr.octet)) {
+ device_printf(sc->sc_dev, "gmac%d: generating random MAC\n",
+ gmac_id);
+ ether_gen_addr(gmac->ifp, (void *) &gmac->eaddr.octet);
+ }
+
+ if_setflags(gmac->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
+
+ if_setioctlfn(gmac->ifp, qcom_ess_edma_gmac_ioctl);
+ if_setinitfn(gmac->ifp, qcom_ess_edma_gmac_init);
+ if_settransmitfn(gmac->ifp, qcom_ess_edma_gmac_transmit);
+ if_setqflushfn(gmac->ifp, qcom_ess_edma_gmac_qflush);
+
+ if_setcapabilitiesbit(gmac->ifp, IFCAP_VLAN_MTU |
+ IFCAP_VLAN_HWTAGGING, 0);
+
+ if_setcapabilitiesbit(gmac->ifp, IFCAP_RXCSUM, 0);
+
+ /* CSUM_TCP | CSUM_UDP for TX checksum offload */
+ if_clearhwassist(gmac->ifp);
+
+ /* Configure a hard-coded media */
+ ifmedia_init(&gmac->ifm, 0, qcom_ess_edma_gmac_mediachange,
+ qcom_ess_edma_gmac_mediastatus);
+ ifmedia_add(&gmac->ifm, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL);
+ ifmedia_set(&gmac->ifm, IFM_ETHER | IFM_1000_T | IFM_FDX);
+
+ ether_ifattach(gmac->ifp, (char *) &gmac->eaddr);
+
+ if_setcapenable(gmac->ifp, if_getcapabilities(gmac->ifp));
+
+ return (0);
+}
+
+/*
+ * Setup the port mapping for the given GMAC.
+ *
+ * This populates sc->sc_gmac_port_map[] to point the given port
+ * entry to this gmac index. The receive path code can then use
+ * this to figure out which gmac ifp to push a receive frame into.
+ */
+int
+qcom_ess_edma_gmac_setup_port_mapping(struct qcom_ess_edma_softc *sc,
+ int gmac_id)
+{
+ struct qcom_ess_edma_gmac *gmac;
+ int i;
+
+ gmac = &sc->sc_gmac[gmac_id];
+
+ /* Skip non-setup gmacs */
+ if (gmac->enabled == false)
+ return (0);
+
+ for (i = 0; i < QCOM_ESS_EDMA_MAX_NUM_PORTS; i++) {
+ if ((gmac->port_mask & (1U << i)) == 0)
+ continue;
+ if (sc->sc_gmac_port_map[i] != -1) {
+ device_printf(sc->sc_dev,
+ "DUPLICATE GMAC port map (port %d)\n",
+ i);
+ return (ENXIO);
+ }
+
+ sc->sc_gmac_port_map[i] = gmac_id;
+
+ if (bootverbose)
+ device_printf(sc->sc_dev,
+ "ESS port %d maps to gmac%d\n",
+ i, gmac_id);
+ }
+
+ return (0);
+}
+
+/*
+ * Receive frames to into the network stack.
+ *
+ * This takes a list of mbufs in the mbufq and receives them
+ * up into the appopriate ifnet context. It takes care of
+ * the network epoch as well.
+ *
+ * This must be called with no locks held.
+ */
+int
+qcom_ess_edma_gmac_receive_frames(struct qcom_ess_edma_softc *sc,
+ int rx_queue, struct mbufq *mq)
+{
+ struct qcom_ess_edma_desc_ring *ring;
+ struct epoch_tracker et;
+ struct mbuf *m;
+ if_t ifp;
+
+ ring = &sc->sc_rx_ring[rx_queue];
+
+ NET_EPOCH_ENTER(et);
+ while ((m = mbufq_dequeue(mq)) != NULL) {
+ if (m->m_pkthdr.rcvif == NULL) {
+ ring->stats.num_rx_no_gmac++;
+ m_freem(m);
+ } else {
+ ring->stats.num_rx_ok++;
+ ifp = m->m_pkthdr.rcvif;
+ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
+ if_input(ifp, m);
+ }
+ }
+ NET_EPOCH_EXIT(et);
+ return (0);
+}
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.h
new file mode 100644
index 000000000000..48862d058d99
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_gmac.h
@@ -0,0 +1,46 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __QCOM_ESS_EDMA_GMAC_H__
+#define __QCOM_ESS_EDMA_GMAC_H__
+
+extern int qcom_ess_edma_gmac_parse(struct qcom_ess_edma_softc *sc,
+ int gmac_id);
+extern int qcom_ess_edma_gmac_create_ifnet(struct qcom_ess_edma_softc *sc,
+ int gmac_id);
+extern int qcom_ess_edma_gmac_setup_port_mapping(
+ struct qcom_ess_edma_softc *sc, int gmac_id);
+
+extern int qcom_ess_edma_gmac_receive_frames(struct qcom_ess_edma_softc *sc,
+ int rx_queue,
+ struct mbufq *mq);
+
+#endif /* __QCOM_ESS_EDMA_GMAC_H__ */
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c b/sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
new file mode 100644
index 000000000000..1ba11db248e5
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_hw.c
@@ -0,0 +1,752 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
+
+/*
+ * Reset the ESS EDMA core.
+ *
+ * This is ... problematic. There's only a single clock control
+ * for the ESS core - and that includes both the EDMA (ethernet)
+ * and switch hardware.
+ *
+ * AND, it's a placeholder for what the linux ess-edma driver
+ * is doing directly to the ess core because in some instances
+ * where there's a single PHY hooked up, it's possible that
+ * ess-switch won't be initialised. In that case it defaults
+ * to a very minimal switch config. Now, that's honestly pretty
+ * bad, and instead we should be doing that kind of awareness
+ * in ar40xx_switch.
+ *
+ * So, for now this is a big no-op, at least until everything
+ * is implemented enough that I can get the switch/phy code and
+ * this EDMA driver code to co-exist.
+ */
+int
+qcom_ess_edma_hw_reset(struct qcom_ess_edma_softc *sc)
+{
+
+ EDMA_LOCK_ASSERT(sc);
+
+ device_printf(sc->sc_dev, "%s: called, TODO!\n", __func__);
+
+ /*
+ * This is where the linux ess-edma driver would reset the
+ * ESS core.
+ */
+
+ /*
+ * and here's where the linux ess-edma driver would program
+ * in the initial port config, rgmii control, traffic
+ * port forwarding and broadcast/multicast traffic forwarding.
+ *
+ * instead, this should be done by the ar40xx_switch driver!
+ */
+
+ return (0);
+}
+
+/*
+ * Get the TX interrupt moderation timer.
+ *
+ * The resolution of this register is 2uS.
+ */
+int
+qcom_ess_edma_hw_get_tx_intr_moderation(struct qcom_ess_edma_softc *sc,
+ uint32_t *usec)
+{
+ uint32_t reg;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT);
+ reg = reg >> EDMA_IRQ_MODRT_TX_TIMER_SHIFT;
+ reg &= EDMA_IRQ_MODRT_TIMER_MASK;
+
+ *usec = reg * 2;
+
+ return (0);
+}
+
+
+/*
+ * Set the TX interrupt moderation timer.
+ *
+ * The resolution of this register is 2uS.
+ */
+int
+qcom_ess_edma_hw_set_tx_intr_moderation(struct qcom_ess_edma_softc *sc,
+ uint32_t usec)
+{
+ uint32_t reg;
+
+ usec = usec / 2;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT);
+ reg &= ~(EDMA_IRQ_MODRT_TIMER_MASK << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
+ reg |= (usec & EDMA_IRQ_MODRT_TIMER_MASK)
+ << EDMA_IRQ_MODRT_TX_TIMER_SHIFT;
+ EDMA_REG_WRITE(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Set the RX interrupt moderation timer.
+ *
+ * The resolution of this register is 2uS.
+ */
+int
+qcom_ess_edma_hw_set_rx_intr_moderation(struct qcom_ess_edma_softc *sc,
+ uint32_t usec)
+{
+ uint32_t reg;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT);
+ reg &= ~(EDMA_IRQ_MODRT_TIMER_MASK << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
+ reg |= (usec & EDMA_IRQ_MODRT_TIMER_MASK)
+ << EDMA_IRQ_MODRT_RX_TIMER_SHIFT;
+ EDMA_REG_WRITE(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Disable all interrupts.
+ */
+int
+qcom_ess_edma_hw_intr_disable(struct qcom_ess_edma_softc *sc)
+{
+ int i;
+
+ /* Disable TX interrupts */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_INT_MASK_Q(i), 0);
+ }
+
+ /* Disable RX interrupts */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_INT_MASK_Q(i), 0);
+ }
+
+ /* Disable misc/WOL interrupts */
+ EDMA_REG_WRITE(sc, EDMA_REG_MISC_IMR, 0);
+ EDMA_REG_WRITE(sc, EDMA_REG_WOL_IMR, 0);
+
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Enable/disable the given RX ring interrupt.
+ */
+int
+qcom_ess_edma_hw_intr_rx_intr_set_enable(struct qcom_ess_edma_softc *sc,
+ int rxq, bool state)
+{
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_INT_MASK_Q(rxq), state ? 1 : 0);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Enable/disable the given TX ring interrupt.
+ */
+int
+qcom_ess_edma_hw_intr_tx_intr_set_enable(struct qcom_ess_edma_softc *sc,
+ int txq, bool state)
+{
+
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_INT_MASK_Q(txq), state ? 1 : 0);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Enable interrupts.
+ */
+int
+qcom_ess_edma_hw_intr_enable(struct qcom_ess_edma_softc *sc)
+{
+ int i;
+
+ /* ACK, then Enable TX interrupts */
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_ISR, 0xffff);
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_IRQS; i++) {
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_INT_MASK_Q(i),
+ sc->sc_config.tx_intr_mask);
+ }
+
+ /* ACK, then Enable RX interrupts */
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_ISR, 0xff);
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_IRQS; i++) {
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_INT_MASK_Q(i),
+ sc->sc_config.rx_intr_mask);
+ }
+
+ /* Disable misc/WOL interrupts */
+ EDMA_REG_WRITE(sc, EDMA_REG_MISC_IMR, 0);
+ EDMA_REG_WRITE(sc, EDMA_REG_WOL_IMR, 0);
+
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Clear interrupt status.
+ */
+int
+qcom_ess_edma_hw_intr_status_clear(struct qcom_ess_edma_softc *sc)
+{
+
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_ISR, 0xff);
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_ISR, 0xffff);
+ EDMA_REG_WRITE(sc, EDMA_REG_MISC_ISR, 0x1fff);
+ EDMA_REG_WRITE(sc, EDMA_REG_WOL_ISR, 0x1);
+
+ return (0);
+}
+
+/*
+ * ACK the given RX queue ISR.
+ *
+ * Must be called with the RX ring lock held!
+ */
+int
+qcom_ess_edma_hw_intr_rx_ack(struct qcom_ess_edma_softc *sc, int rx_queue)
+{
+
+ EDMA_RING_LOCK_ASSERT(&sc->sc_rx_ring[rx_queue]);
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_ISR, (1U << rx_queue));
+ (void) EDMA_REG_READ(sc, EDMA_REG_RX_ISR);
+
+ return (0);
+}
+
+/*
+ * ACK the given TX queue ISR.
+ *
+ * Must be called with the TX ring lock held!
+ */
+int
+qcom_ess_edma_hw_intr_tx_ack(struct qcom_ess_edma_softc *sc, int tx_queue)
+{
+
+ EDMA_RING_LOCK_ASSERT(&sc->sc_tx_ring[tx_queue]);
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_ISR, (1U << tx_queue));
+ (void) EDMA_REG_READ(sc, EDMA_REG_TX_ISR);
+
+ return (0);
+}
+
+/*
+ * Configure the default RSS indirection table.
+ */
+int
+qcom_ess_edma_hw_configure_rss_table(struct qcom_ess_edma_softc *sc)
+{
+ int i;
+
+ /*
+ * The default IDT value configures the hash buckets
+ * to a repeating pattern of q0, q2, q4, q6.
+ */
+ for (i = 0; i < EDMA_NUM_IDT; i++) {
+ EDMA_REG_WRITE(sc, EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE);
+ }
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Configure the default load balance mapping table.
+ */
+int
+qcom_ess_edma_hw_configure_load_balance_table(struct qcom_ess_edma_softc *sc)
+{
+
+ /*
+ * I think this is mapping things to queues 0,2,4,6.
+ * Linux says it's 0,1,3,4 but that doesn't match the
+ * EDMA_LB_REG_VALUE field.
+ */
+ EDMA_REG_WRITE(sc, EDMA_REG_LB_RING, EDMA_LB_REG_VALUE);
+ EDMA_REG_BARRIER_WRITE(sc);
+ return (0);
+}
+
+/*
+ * Configure the default virtual tx ring queues.
+ */
+int
+qcom_ess_edma_hw_configure_tx_virtual_queue(struct qcom_ess_edma_softc *sc)
+{
+
+ EDMA_REG_WRITE(sc, EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE);
+ EDMA_REG_WRITE(sc, EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE);
+
+ EDMA_REG_BARRIER_WRITE(sc);
+ return (0);
+}
+
+/*
+ * Configure the default maximum AXI bus transaction size.
+ */
+int
+qcom_ess_edma_hw_configure_default_axi_transaction_size(
+ struct qcom_ess_edma_softc *sc)
+{
+
+ EDMA_REG_WRITE(sc, EDMA_REG_AXIW_CTRL_MAXWRSIZE,
+ EDMA_AXIW_MAXWRSIZE_VALUE);
+ return (0);
+}
+
+/*
+ * Stop the TX/RX queues.
+ */
+int
+qcom_ess_edma_hw_stop_txrx_queues(struct qcom_ess_edma_softc *sc)
+{
+ uint32_t reg;
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_RXQ_CTRL);
+ reg &= ~EDMA_RXQ_CTRL_EN;
+ EDMA_REG_WRITE(sc, EDMA_REG_RXQ_CTRL, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_TXQ_CTRL);
+ reg &= ~EDMA_TXQ_CTRL_TXQ_EN;
+ EDMA_REG_WRITE(sc, EDMA_REG_TXQ_CTRL, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+ return (0);
+}
+
+/*
+ * Stop the EDMA block, disable interrupts.
+ */
+int
+qcom_ess_edma_hw_stop(struct qcom_ess_edma_softc *sc)
+{
+ int ret;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ ret = qcom_ess_edma_hw_intr_disable(sc);
+ if (ret != 0) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
+ "%s: hw_intr_disable failed (%d)\n",
+ __func__,
+ ret);
+ }
+
+ ret = qcom_ess_edma_hw_intr_status_clear(sc);
+ if (ret != 0) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
+ "%s: hw_intr_status_clear failed (%d)\n",
+ __func__,
+ ret);
+ }
+
+ ret = qcom_ess_edma_hw_stop_txrx_queues(sc);
+ if (ret != 0) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_INTERRUPT,
+ "%s: hw_stop_txrx_queues failed (%d)\n",
+ __func__,
+ ret);
+ }
+
+ return (0);
+}
+
+/*
+ * Update the producer index for the given receive queue.
+ *
+ * Note: the RX ring lock must be held!
+ *
+ * Return 0 if OK, an error number if there's an error.
+ */
+int
+qcom_ess_edma_hw_rfd_prod_index_update(struct qcom_ess_edma_softc *sc,
+ int queue, int idx)
+{
+ uint32_t reg;
+
+ EDMA_RING_LOCK_ASSERT(&sc->sc_rx_ring[queue]);
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING_MGMT,
+ "%s: called; q=%d idx=0x%x\n",
+ __func__, queue, idx);
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_RFD_IDX_Q(queue));
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING_MGMT,
+ "%s: q=%d reg was 0x%08x\n", __func__, queue, reg);
+ reg &= ~EDMA_RFD_PROD_IDX_BITS;
+ reg |= idx;
+ EDMA_REG_WRITE(sc, EDMA_REG_RFD_IDX_Q(queue), reg);
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING_MGMT,
+ "%s: q=%d reg now 0x%08x\n", __func__, queue, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Fetch the consumer index for the given receive queue.
+ *
+ * Returns the current consumer index.
+ *
+ * Note - since it's used in statistics/debugging it isn't asserting the
+ * RX ring lock, so be careful when/how you use this!
+ */
+int
+qcom_ess_edma_hw_rfd_get_cons_index(struct qcom_ess_edma_softc *sc, int queue)
+{
+ uint32_t reg;
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_RFD_IDX_Q(queue));
+ return (reg >> EDMA_RFD_CONS_IDX_SHIFT) & EDMA_RFD_CONS_IDX_MASK;
+}
+
+/*
+ * Update the software consumed index to the hardware, so
+ * it knows what we've read.
+ *
+ * Note: the RX ring lock must be held when calling this!
+ *
+ * Returns 0 if OK, error number if error.
+ */
+int
+qcom_ess_edma_hw_rfd_sw_cons_index_update(struct qcom_ess_edma_softc *sc,
+ int queue, int idx)
+{
+ EDMA_RING_LOCK_ASSERT(&sc->sc_rx_ring[queue]);
+
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_SW_CONS_IDX_Q(queue), idx);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Setup initial hardware configuration.
+ */
+int
+qcom_ess_edma_hw_setup(struct qcom_ess_edma_softc *sc)
+{
+ uint32_t reg;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_INTR_CTRL);
+ reg &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT);
+ reg |= sc->sc_state.intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT;
+ EDMA_REG_WRITE(sc, EDMA_REG_INTR_CTRL, reg);
+
+
+ /* Clear wake-on-lan config */
+ EDMA_REG_WRITE(sc, EDMA_REG_WOL_CTRL, 0);
+
+ /* configure initial interrupt moderation config */
+ reg = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT);
+ reg |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT);
+ EDMA_REG_WRITE(sc, EDMA_REG_IRQ_MODRT_TIMER_INIT, reg);
+
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Setup TX DMA burst configuration.
+ */
+int
+qcom_ess_edma_hw_setup_tx(struct qcom_ess_edma_softc *sc)
+{
+ uint32_t reg;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ reg = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT);
+ reg |= EDMA_TXQ_CTRL_TPD_BURST_EN;
+ reg |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT);
+ EDMA_REG_WRITE(sc, EDMA_REG_TXQ_CTRL, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Setup default RSS, RX burst/prefetch/interrupt thresholds.
+ *
+ * Strip VLANs, those are offloaded in the RX descriptor.
+ */
+int
+qcom_ess_edma_hw_setup_rx(struct qcom_ess_edma_softc *sc)
+{
+ uint32_t reg;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ /* Configure RSS types */
+ EDMA_REG_WRITE(sc, EDMA_REG_RSS_TYPE, sc->sc_config.rss_type);
+
+ /* Configure RFD burst */
+ reg = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT);
+ /* .. and RFD prefetch threshold */
+ reg |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT);
+ /* ... and threshold to generate RFD interrupt */
+ reg |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT);
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_DESC1, reg);
+
+ /* Set RX FIFO threshold to begin DMAing data to host */
+ reg = EDMA_FIFO_THRESH_128_BYTE;
+ /* Remove VLANs (??) */
+ reg |= EDMA_RXQ_CTRL_RMV_VLAN;
+ EDMA_REG_WRITE(sc, EDMA_REG_RXQ_CTRL, reg);
+
+ EDMA_REG_BARRIER_WRITE(sc);
+ return (0);
+}
+
+/*
+ * XXX TODO: this particular routine is a bit big and likely should be split
+ * across main, hw, desc, rx and tx. But to expedite initial bring-up,
+ * let's just commit the sins here and get receive up and going.
+ */
+int
+qcom_ess_edma_hw_setup_txrx_desc_rings(struct qcom_ess_edma_softc *sc)
+{
+ uint32_t reg, i, idx;
+ int len;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ /*
+ * setup base addresses for each transmit ring, and
+ * read in the initial index to use for transmit.
+ */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_TX_RINGS; i++) {
+ /* Descriptor ring based address */
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING_MGMT,
+ "TXQ[%d]: ring paddr=0x%08lx\n",
+ i, sc->sc_tx_ring[i].hw_desc_paddr);
+ EDMA_REG_WRITE(sc, EDMA_REG_TPD_BASE_ADDR_Q(i),
+ sc->sc_tx_ring[i].hw_desc_paddr);
+
+ /* And now, grab the consumer index */
+ reg = EDMA_REG_READ(sc, EDMA_REG_TPD_IDX_Q(i));
+ idx = (reg >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff;
+
+ sc->sc_tx_ring[i].next_to_fill = idx;
+ sc->sc_tx_ring[i].next_to_clean = idx;
+
+ /* Update prod and sw consumer indexes */
+ reg &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT);
+ reg |= idx;
+ EDMA_REG_WRITE(sc, EDMA_REG_TPD_IDX_Q(i), reg);
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_SW_CONS_IDX_Q(i), idx);
+
+ /* Set the ring size */
+ EDMA_REG_WRITE(sc, EDMA_REG_TPD_RING_SIZE,
+ sc->sc_config.tx_ring_count & EDMA_TPD_RING_SIZE_MASK);
+
+ }
+
+ /* Set base addresses for each RFD ring */
+ for (i = 0; i < QCOM_ESS_EDMA_NUM_RX_RINGS; i++) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING_MGMT,
+ "RXQ[%d]: ring paddr=0x%08lx\n",
+ i, sc->sc_rx_ring[i].hw_desc_paddr);
+ EDMA_REG_WRITE(sc, EDMA_REG_RFD_BASE_ADDR_Q(i),
+ sc->sc_rx_ring[i].hw_desc_paddr);
+ }
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ /* Configure RX buffer size */
+ len = sc->sc_config.rx_buf_size;
+ if (sc->sc_config.rx_buf_ether_align)
+ len -= ETHER_ALIGN;
+ reg = (len & EDMA_RX_BUF_SIZE_MASK)
+ << EDMA_RX_BUF_SIZE_SHIFT;
+ /* .. and RFD ring size */
+ reg |= (sc->sc_config.rx_ring_count & EDMA_RFD_RING_SIZE_MASK)
+ << EDMA_RFD_RING_SIZE_SHIFT;
+ EDMA_REG_WRITE(sc, EDMA_REG_RX_DESC0, reg);
+
+ /* Disable the TX low/high watermark (for interrupts?) */
+ EDMA_REG_WRITE(sc, EDMA_REG_TXF_WATER_MARK, 0);
+
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ /* Load all the ring base addresses into the hardware */
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_TX_SRAM_PART);
+ reg |= 1 << EDMA_LOAD_PTR_SHIFT;
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_SRAM_PART, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Enable general MAC TX DMA.
+ */
+int
+qcom_ess_edma_hw_tx_enable(struct qcom_ess_edma_softc *sc)
+{
+ uint32_t reg;
+
+ EDMA_LOCK_ASSERT(sc);
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_TXQ_CTRL);
+ reg |= EDMA_TXQ_CTRL_TXQ_EN;
+ EDMA_REG_WRITE(sc, EDMA_REG_TXQ_CTRL, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Enable general MAC RX DMA.
+ */
+int
+qcom_ess_edma_hw_rx_enable(struct qcom_ess_edma_softc *sc)
+{
+ EDMA_LOCK_ASSERT(sc);
+ uint32_t reg;
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_RXQ_CTRL);
+ reg |= EDMA_RXQ_CTRL_EN;
+ EDMA_REG_WRITE(sc, EDMA_REG_RXQ_CTRL, reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Read the TPD consumer index register for the given transmit ring.
+ */
+int
+qcom_ess_edma_hw_tx_read_tpd_cons_idx(struct qcom_ess_edma_softc *sc,
+ int queue_id, uint16_t *idx)
+{
+ uint32_t reg;
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_TPD_IDX_Q(queue_id));
+ *idx = (reg >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK;
+
+ return (0);
+}
+
+/*
+ * Update the TPD producer index for the given transmit wring.
+ */
+int
+qcom_ess_edma_hw_tx_update_tpd_prod_idx(struct qcom_ess_edma_softc *sc,
+ int queue_id, uint16_t idx)
+{
+ uint32_t reg;
+
+ EDMA_REG_BARRIER_READ(sc);
+ reg = EDMA_REG_READ(sc, EDMA_REG_TPD_IDX_Q(queue_id));
+ reg &= ~EDMA_TPD_PROD_IDX_BITS;
+ reg |= (idx & EDMA_TPD_PROD_IDX_MASK) << EDMA_TPD_PROD_IDX_SHIFT;
+ EDMA_REG_WRITE(sc, EDMA_REG_TPD_IDX_Q(queue_id), reg);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
+
+/*
+ * Update the TPD software consumer index register for the given
+ * transmit ring - ie, what software has cleaned.
+ */
+int
+qcom_ess_edma_hw_tx_update_cons_idx(struct qcom_ess_edma_softc *sc,
+ int queue_id, uint16_t idx)
+{
+
+ EDMA_REG_WRITE(sc, EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), idx);
+ EDMA_REG_BARRIER_WRITE(sc);
+
+ return (0);
+}
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_hw.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_hw.h
new file mode 100644
index 000000000000..3ee3bc64b658
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_hw.h
@@ -0,0 +1,86 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __QCOM_ESS_EDMA_HW_H__
+#define __QCOM_ESS_EDMA_HW_H__
+
+extern int qcom_ess_edma_hw_reset(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_get_tx_intr_moderation(
+ struct qcom_ess_edma_softc *sc, uint32_t *usec);
+extern int qcom_ess_edma_hw_set_tx_intr_moderation(
+ struct qcom_ess_edma_softc *sc, uint32_t usec);
+extern int qcom_ess_edma_hw_set_rx_intr_moderation(
+ struct qcom_ess_edma_softc *sc, uint32_t usec);
+extern int qcom_ess_edma_hw_intr_disable(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_intr_rx_intr_set_enable(
+ struct qcom_ess_edma_softc *sc, int rxq, bool state);
+extern int qcom_ess_edma_hw_intr_tx_intr_set_enable(
+ struct qcom_ess_edma_softc *sc, int txq, bool state);
+extern int qcom_ess_edma_hw_intr_enable(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_intr_status_clear(
+ struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_intr_rx_ack(struct qcom_ess_edma_softc *sc,
+ int rx_queue);
+extern int qcom_ess_edma_hw_intr_tx_ack(struct qcom_ess_edma_softc *sc,
+ int tx_queue);
+extern int qcom_ess_edma_hw_configure_rss_table(
+ struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_configure_load_balance_table(
+ struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_configure_tx_virtual_queue(
+ struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_configure_default_axi_transaction_size(
+ struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_stop_txrx_queues(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_stop(struct qcom_ess_edma_softc *sc);
+
+extern int qcom_ess_edma_hw_rfd_prod_index_update(
+ struct qcom_ess_edma_softc *sc, int queue, int idx);
+extern int qcom_ess_edma_hw_rfd_get_cons_index(
+ struct qcom_ess_edma_softc *sc, int queue);
+extern int qcom_ess_edma_hw_rfd_sw_cons_index_update(
+ struct qcom_ess_edma_softc *sc, int queue, int idx);
+
+extern int qcom_ess_edma_hw_setup(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_setup_tx(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_setup_rx(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_setup_txrx_desc_rings(
+ struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_tx_enable(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_rx_enable(struct qcom_ess_edma_softc *sc);
+extern int qcom_ess_edma_hw_tx_read_tpd_cons_idx(
+ struct qcom_ess_edma_softc *sc, int queue_id, uint16_t *idx);
+extern int qcom_ess_edma_hw_tx_update_tpd_prod_idx(
+ struct qcom_ess_edma_softc *sc, int queue_id, uint16_t idx);
+extern int qcom_ess_edma_hw_tx_update_cons_idx(
+ struct qcom_ess_edma_softc *sc, int queue_id, uint16_t idx);
+
+#endif /* __QCOM_ESS_EDMA_VAR_H__ */
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_reg.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_reg.h
new file mode 100644
index 000000000000..0fa1e37f7e5b
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_reg.h
@@ -0,0 +1,429 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+/*
+ * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+ */
+
+#ifndef __QCOM_ESS_EDMA_REG_H__
+#define __QCOM_ESS_EDMA_REG_H__
+
+/*
+ * Alignment of descriptor ring memory allocation.
+ */
+#define EDMA_DESC_RING_ALIGN PAGE_SIZE
+
+/* Not sure if this is really valid or not */
+#define EDMA_DESC_MAX_BUFFER_SIZE 4096
+
+/* The hardware can accept both of these, so we don't need bounce buffers! */
+#define ESS_EDMA_TX_BUFFER_ALIGN 1
+#define ESS_EDMA_RX_BUFFER_ALIGN 1
+
+/* register definition */
+#define EDMA_REG_MAS_CTRL 0x0
+#define EDMA_REG_TIMEOUT_CTRL 0x004
+#define EDMA_REG_DBG0 0x008
+#define EDMA_REG_DBG1 0x00C
+#define EDMA_REG_SW_CTRL0 0x100
+#define EDMA_REG_SW_CTRL1 0x104
+
+/* Interrupt Status Register */
+#define EDMA_REG_RX_ISR 0x200
+#define EDMA_REG_TX_ISR 0x208
+#define EDMA_REG_MISC_ISR 0x210
+#define EDMA_REG_WOL_ISR 0x218
+
+#define EDMA_MISC_ISR_RX_URG_Q(x) (1U << (x)x)
+
+#define EDMA_MISC_ISR_AXIR_TIMEOUT 0x00000100
+#define EDMA_MISC_ISR_AXIR_ERR 0x00000200
+#define EDMA_MISC_ISR_TXF_DEAD 0x00000400
+#define EDMA_MISC_ISR_AXIW_ERR 0x00000800
+#define EDMA_MISC_ISR_AXIW_TIMEOUT 0x00001000
+
+#define EDMA_WOL_ISR 0x00000001
+
+/* Interrupt Mask Register */
+#define EDMA_REG_MISC_IMR 0x214
+#define EDMA_REG_WOL_IMR 0x218
+
+#define EDMA_RX_IMR_NORMAL_MASK 0x1
+#define EDMA_TX_IMR_NORMAL_MASK 0x1
+#define EDMA_MISC_IMR_NORMAL_MASK 0x80001FFF
+#define EDMA_WOL_IMR_NORMAL_MASK 0x1
+
+/* Edma receive consumer index */
+#define EDMA_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */
+/* Edma transmit consumer index */
+#define EDMA_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */
+
+/* IRQ Moderator Initial Timer Register */
+#define EDMA_REG_IRQ_MODRT_TIMER_INIT 0x280
+#define EDMA_IRQ_MODRT_TIMER_MASK 0xFFFF
+#define EDMA_IRQ_MODRT_RX_TIMER_SHIFT 0
+#define EDMA_IRQ_MODRT_TX_TIMER_SHIFT 16
+
+/* Interrupt Control Register */
+#define EDMA_REG_INTR_CTRL 0x284
+#define EDMA_INTR_CLR_TYP_SHIFT 0
+#define EDMA_INTR_SW_IDX_W_TYP_SHIFT 1
+#define EDMA_INTR_CLEAR_TYPE_W1 0
+#define EDMA_INTR_CLEAR_TYPE_R 1
+
+/* RX Interrupt Mask Register */
+#define EDMA_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */
+
+/* TX Interrupt mask register */
+#define EDMA_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */
+
+/* Load Ptr Register
+ * Software sets this bit after the initialization of the head and tail
+ */
+#define EDMA_REG_TX_SRAM_PART 0x400
+#define EDMA_LOAD_PTR_SHIFT 16
+
+/* TXQ Control Register */
+#define EDMA_REG_TXQ_CTRL 0x404
+#define EDMA_TXQ_CTRL_IP_OPTION_EN 0x10
+#define EDMA_TXQ_CTRL_TXQ_EN 0x20
+#define EDMA_TXQ_CTRL_ENH_MODE 0x40
+#define EDMA_TXQ_CTRL_LS_8023_EN 0x80
+#define EDMA_TXQ_CTRL_TPD_BURST_EN 0x100
+#define EDMA_TXQ_CTRL_LSO_BREAK_EN 0x200
+#define EDMA_TXQ_NUM_TPD_BURST_MASK 0xF
+#define EDMA_TXQ_TXF_BURST_NUM_MASK 0xFFFF
+#define EDMA_TXQ_NUM_TPD_BURST_SHIFT 0
+#define EDMA_TXQ_TXF_BURST_NUM_SHIFT 16
+
+#define EDMA_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */
+#define EDMA_TXF_WATER_MARK_MASK 0x0FFF
+#define EDMA_TXF_LOW_WATER_MARK_SHIFT 0
+#define EDMA_TXF_HIGH_WATER_MARK_SHIFT 16
+#define EDMA_TXQ_CTRL_BURST_MODE_EN 0x80000000
+
+/* WRR Control Register */
+#define EDMA_REG_WRR_CTRL_Q0_Q3 0x40c
+#define EDMA_REG_WRR_CTRL_Q4_Q7 0x410
+#define EDMA_REG_WRR_CTRL_Q8_Q11 0x414
+#define EDMA_REG_WRR_CTRL_Q12_Q15 0x418
+
+/* Weight round robin(WRR), it takes queue as input, and computes
+ * starting bits where we need to write the weight for a particular
+ * queue
+ */
+#define EDMA_WRR_SHIFT(x) (((x) * 5) % 20)
+
+/* Tx Descriptor Control Register */
+#define EDMA_REG_TPD_RING_SIZE 0x41C
+#define EDMA_TPD_RING_SIZE_SHIFT 0
+#define EDMA_TPD_RING_SIZE_MASK 0xFFFF
+
+/* Transmit descriptor base address */
+#define EDMA_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */
+
+/* TPD Index Register */
+#define EDMA_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */
+
+#define EDMA_TPD_PROD_IDX_BITS 0x0000FFFF
+#define EDMA_TPD_CONS_IDX_BITS 0xFFFF0000
+#define EDMA_TPD_PROD_IDX_MASK 0xFFFF
+#define EDMA_TPD_CONS_IDX_MASK 0xFFFF
+#define EDMA_TPD_PROD_IDX_SHIFT 0
+#define EDMA_TPD_CONS_IDX_SHIFT 16
+
+/* TX Virtual Queue Mapping Control Register */
+#define EDMA_REG_VQ_CTRL0 0x4A0
+#define EDMA_REG_VQ_CTRL1 0x4A4
+
+/* Virtual QID shift, it takes queue as input, and computes
+ * Virtual QID position in virtual qid control register
+ */
+#define EDMA_VQ_ID_SHIFT(i) (((i) * 3) % 24)
+
+/* Virtual Queue Default Value */
+#define EDMA_VQ_REG_VALUE 0x240240
+
+/* Tx side Port Interface Control Register */
+#define EDMA_REG_PORT_CTRL 0x4A8
+#define EDMA_PAD_EN_SHIFT 15
+
+/* Tx side VLAN Configuration Register */
+#define EDMA_REG_VLAN_CFG 0x4AC
+
+#define EDMA_TX_CVLAN 16
+#define EDMA_TX_INS_CVLAN 17
+#define EDMA_TX_CVLAN_TAG_SHIFT 0
+
+#define EDMA_TX_SVLAN 14
+#define EDMA_TX_INS_SVLAN 15
+#define EDMA_TX_SVLAN_TAG_SHIFT 16
+
+/* Tx Queue Packet Statistic Register */
+#define EDMA_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */
+
+#define EDMA_TX_STAT_PKT_MASK 0xFFFFFF
+
+/* Tx Queue Byte Statistic Register */
+#define EDMA_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */
+
+/* Load Balance Based Ring Offset Register */
+#define EDMA_REG_LB_RING 0x800
+#define EDMA_LB_RING_ENTRY_MASK 0xff
+#define EDMA_LB_RING_ID_MASK 0x7
+#define EDMA_LB_RING_PROFILE_ID_MASK 0x3
+#define EDMA_LB_RING_ENTRY_BIT_OFFSET 8
+#define EDMA_LB_RING_ID_OFFSET 0
+#define EDMA_LB_RING_PROFILE_ID_OFFSET 3
+#define EDMA_LB_REG_VALUE 0x6040200
+
+/* Load Balance Priority Mapping Register */
+#define EDMA_REG_LB_PRI_START 0x804
+#define EDMA_REG_LB_PRI_END 0x810
+#define EDMA_LB_PRI_REG_INC 4
+#define EDMA_LB_PRI_ENTRY_BIT_OFFSET 4
+#define EDMA_LB_PRI_ENTRY_MASK 0xf
+
+/* RSS Priority Mapping Register */
+#define EDMA_REG_RSS_PRI 0x820
+#define EDMA_RSS_PRI_ENTRY_MASK 0xf
+#define EDMA_RSS_RING_ID_MASK 0x7
+#define EDMA_RSS_PRI_ENTRY_BIT_OFFSET 4
+
+/* RSS Indirection Register */
+#define EDMA_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */
+#define EDMA_NUM_IDT 16
+#define EDMA_RSS_IDT_VALUE 0x64206420
+
+/* Default RSS Ring Register */
+#define EDMA_REG_DEF_RSS 0x890
+#define EDMA_DEF_RSS_MASK 0x7
+
+/* RSS Hash Function Type Register */
+#define EDMA_REG_RSS_TYPE 0x894
+#define EDMA_RSS_TYPE_NONE 0x01
+#define EDMA_RSS_TYPE_IPV4TCP 0x02
+#define EDMA_RSS_TYPE_IPV6_TCP 0x04
+#define EDMA_RSS_TYPE_IPV4_UDP 0x08
+#define EDMA_RSS_TYPE_IPV6UDP 0x10
+#define EDMA_RSS_TYPE_IPV4 0x20
+#define EDMA_RSS_TYPE_IPV6 0x40
+#define EDMA_RSS_HASH_MODE_MASK 0x7f
+
+#define EDMA_REG_RSS_HASH_VALUE 0x8C0
+
+#define EDMA_REG_RSS_TYPE_RESULT 0x8C4
+
+
+/* rrd5 */
+#define EDMA_HASH_TYPE_SHIFT 12
+#define EDMA_HASH_TYPE_MASK 0xf
+#define EDMA_RRD_RSS_TYPE_NONE 0
+#define EDMA_RRD_RSS_TYPE_IPV4TCP 1
+#define EDMA_RRD_RSS_TYPE_IPV6_TCP 2
+#define EDMA_RRD_RSS_TYPE_IPV4_UDP 3
+#define EDMA_RRD_RSS_TYPE_IPV6UDP 4
+#define EDMA_RRD_RSS_TYPE_IPV4 5
+#define EDMA_RRD_RSS_TYPE_IPV6 6
+
+#define EDMA_RFS_FLOW_ENTRIES 1024
+#define EDMA_RFS_FLOW_ENTRIES_MASK (EDMA_RFS_FLOW_ENTRIES - 1)
+#define EDMA_RFS_EXPIRE_COUNT_PER_CALL 128
+
+/* RFD Base Address Register */
+#define EDMA_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */
+
+/* RFD Index Register */
+#define EDMA_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2))
+
+#define EDMA_RFD_PROD_IDX_BITS 0x00000FFF
+#define EDMA_RFD_CONS_IDX_BITS 0x0FFF0000
+#define EDMA_RFD_PROD_IDX_MASK 0xFFF
+#define EDMA_RFD_CONS_IDX_MASK 0xFFF
+#define EDMA_RFD_PROD_IDX_SHIFT 0
+#define EDMA_RFD_CONS_IDX_SHIFT 16
+
+/* Rx Descriptor Control Register */
+#define EDMA_REG_RX_DESC0 0xA10
+#define EDMA_RFD_RING_SIZE_MASK 0xFFF
+#define EDMA_RX_BUF_SIZE_MASK 0xFFFF
+#define EDMA_RFD_RING_SIZE_SHIFT 0
+#define EDMA_RX_BUF_SIZE_SHIFT 16
+
+#define EDMA_REG_RX_DESC1 0xA14
+#define EDMA_RXQ_RFD_BURST_NUM_MASK 0x3F
+#define EDMA_RXQ_RFD_PF_THRESH_MASK 0x1F
+#define EDMA_RXQ_RFD_LOW_THRESH_MASK 0xFFF
+#define EDMA_RXQ_RFD_BURST_NUM_SHIFT 0
+#define EDMA_RXQ_RFD_PF_THRESH_SHIFT 8
+#define EDMA_RXQ_RFD_LOW_THRESH_SHIFT 16
+
+/* RXQ Control Register */
+#define EDMA_REG_RXQ_CTRL 0xA18
+#define EDMA_FIFO_THRESH_TYPE_SHIF 0
+#define EDMA_FIFO_THRESH_128_BYTE 0x0
+#define EDMA_FIFO_THRESH_64_BYTE 0x1
+#define EDMA_RXQ_CTRL_RMV_VLAN 0x00000002
+#define EDMA_RXQ_CTRL_EN 0x0000FF00
+
+/* AXI Burst Size Config */
+#define EDMA_REG_AXIW_CTRL_MAXWRSIZE 0xA1C
+#define EDMA_AXIW_MAXWRSIZE_VALUE 0x0
+
+/* Rx Statistics Register */
+#define EDMA_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */
+#define EDMA_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */
+
+/* WoL Pattern Length Register */
+#define EDMA_REG_WOL_PATTERN_LEN0 0xC00
+#define EDMA_WOL_PT_LEN_MASK 0xFF
+#define EDMA_WOL_PT0_LEN_SHIFT 0
+#define EDMA_WOL_PT1_LEN_SHIFT 8
+#define EDMA_WOL_PT2_LEN_SHIFT 16
+#define EDMA_WOL_PT3_LEN_SHIFT 24
+
+#define EDMA_REG_WOL_PATTERN_LEN1 0xC04
+#define EDMA_WOL_PT4_LEN_SHIFT 0
+#define EDMA_WOL_PT5_LEN_SHIFT 8
+#define EDMA_WOL_PT6_LEN_SHIFT 16
+
+/* WoL Control Register */
+#define EDMA_REG_WOL_CTRL 0xC08
+#define EDMA_WOL_WK_EN 0x00000001
+#define EDMA_WOL_MG_EN 0x00000002
+#define EDMA_WOL_PT0_EN 0x00000004
+#define EDMA_WOL_PT1_EN 0x00000008
+#define EDMA_WOL_PT2_EN 0x00000010
+#define EDMA_WOL_PT3_EN 0x00000020
+#define EDMA_WOL_PT4_EN 0x00000040
+#define EDMA_WOL_PT5_EN 0x00000080
+#define EDMA_WOL_PT6_EN 0x00000100
+
+/* MAC Control Register */
+#define EDMA_REG_MAC_CTRL0 0xC20
+#define EDMA_REG_MAC_CTRL1 0xC24
+
+/* WoL Pattern Register */
+#define EDMA_REG_WOL_PATTERN_START 0x5000
+#define EDMA_PATTERN_PART_REG_OFFSET 0x40
+
+/* TX descriptor fields */
+#define EDMA_TPD_HDR_SHIFT 0
+#define EDMA_TPD_PPPOE_EN 0x00000100
+#define EDMA_TPD_IP_CSUM_EN 0x00000200
+#define EDMA_TPD_TCP_CSUM_EN 0x0000400
+#define EDMA_TPD_UDP_CSUM_EN 0x00000800
+#define EDMA_TPD_CUSTOM_CSUM_EN 0x00000C00
+#define EDMA_TPD_LSO_EN 0x00001000
+#define EDMA_TPD_LSO_V2_EN 0x00002000
+#define EDMA_TPD_IPV4_EN 0x00010000
+#define EDMA_TPD_MSS_MASK 0x1FFF
+#define EDMA_TPD_MSS_SHIFT 18
+#define EDMA_TPD_CUSTOM_CSUM_SHIFT 18
+#define EDMA_TPD_EOP 0x80000000
+
+/* word3 */
+#define EDMA_TPD_PORT_BITMAP_SHIFT 18
+#define EDMA_TPD_FROM_CPU_SHIFT 25
+#define EDMA_FROM_CPU_MASK 0x80
+
+/* TX descriptor - little endian */
+struct qcom_ess_edma_tx_desc {
+ uint16_t len; /* full packet including CRC */
+ uint16_t svlan_tag; /* vlan tag */
+ uint32_t word1; /* byte 4-7 */
+ uint32_t addr; /* address of buffer */
+ uint32_t word3; /* byte 12 */
+} __packed;
+
+/* RRD descriptor fields */
+#define EDMA_RRD_NUM_RFD_MASK 0x000F
+#define EDMA_RRD_SVLAN 0x8000
+#define EDMA_RRD_FLOW_COOKIE_MASK 0x07FF
+
+#define EDMA_RRD_PKT_SIZE_MASK 0x3FFF
+#define EDMA_RRD_CSUM_FAIL_MASK 0xC000
+#define EDMA_RRD_CVLAN 0x0001
+#define EDMA_RRD_DESC_VALID 0x8000
+
+#define EDMA_RRD_PRIORITY_SHIFT 4
+#define EDMA_RRD_PRIORITY_MASK 0x7
+#define EDMA_RRD_PORT_TYPE_SHIFT 7
+#define EDMA_RRD_PORT_TYPE_MASK 0x1F
+
+#define EDMA_PORT_ID_SHIFT 12
+#define EDMA_PORT_ID_MASK 0x7
+
+/* RX RRD descriptor - 16 bytes */
+struct qcom_edma_rx_return_desc {
+ uint16_t rrd0;
+ uint16_t rrd1;
+ uint16_t rrd2;
+ uint16_t rrd3;
+ uint16_t rrd4;
+ uint16_t rrd5;
+ uint16_t rrd6;
+ uint16_t rrd7;
+} __packed;
+
+
+/* RX RFD descriptor - little endian */
+struct qcom_ess_edma_rx_free_desc {
+ uint32_t addr; /* buffer addr */
+} __packed;
+
+#define ESS_RGMII_CTRL 0x0004
+
+/* Configurations */
+#define EDMA_INTR_CLEAR_TYPE 0
+#define EDMA_INTR_SW_IDX_W_TYPE 0
+#define EDMA_FIFO_THRESH_TYPE 0
+#define EDMA_RSS_TYPE 0
+#define EDMA_RX_IMT 0x0020
+#define EDMA_TX_IMT 0x0050
+#define EDMA_TPD_BURST 5
+#define EDMA_TXF_BURST 0x100
+#define EDMA_RFD_BURST 8
+#define EDMA_RFD_THR 16
+#define EDMA_RFD_LTHR 0
+
+#endif /* __QCOM_ESS_EDMA_REG_H__ */
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c b/sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
new file mode 100644
index 000000000000..d39c0117133a
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_rx.c
@@ -0,0 +1,514 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/mbuf.h>
+#include <sys/endian.h>
+#include <sys/smp.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_desc.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_rx.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
+
+/*
+ * Map the given RX queue to a given CPU.
+ */
+int
+qcom_ess_edma_rx_queue_to_cpu(struct qcom_ess_edma_softc *sc, int queue)
+{
+ return (queue % mp_ncpus);
+}
+
+int
+qcom_ess_edma_rx_ring_setup(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+ struct qcom_ess_edma_sw_desc_rx *rxd;
+ int i, ret;
+
+ for (i = 0; i < EDMA_RX_RING_SIZE; i++) {
+ rxd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, i);
+ if (rxd == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get sw desc (idx %d)\n", i);
+ return (EINVAL);
+ }
+ rxd->m = NULL;
+ ret = bus_dmamap_create(ring->buffer_dma_tag,
+ BUS_DMA_NOWAIT,
+ &rxd->m_dmamap);
+ if (ret != 0) {
+ device_printf(sc->sc_dev,
+ "%s: failed to create dmamap (%d)\n",
+ __func__, ret);
+ }
+ }
+
+ return (0);
+}
+
+int
+qcom_ess_edma_rx_ring_clean(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+ device_printf(sc->sc_dev, "%s: TODO\n", __func__);
+ return (0);
+}
+
+/*
+ * Allocate a receive buffer for the given ring/index, setup DMA.
+ *
+ * The caller must have called the ring prewrite routine in order
+ * to flush the ring memory if needed before writing to it.
+ * It's not done here so we don't do it on /every/ ring update.
+ *
+ * Returns an error if the slot is full or unable to fill it;
+ * the caller should then figure out how to cope.
+ */
+int
+qcom_ess_edma_rx_buf_alloc(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, int idx)
+{
+ struct mbuf *m;
+ struct qcom_ess_edma_sw_desc_rx *rxd;
+ struct qcom_ess_edma_rx_free_desc *ds;
+ bus_dma_segment_t segs[1];
+ int error;
+ int nsegs;
+
+ /* Get the software/hardware descriptors we're going to update */
+ rxd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, idx);
+ if (rxd == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get sw desc (idx %d)\n", idx);
+ return (EINVAL);
+ }
+ ds = qcom_ess_edma_desc_ring_get_hw_desc(sc, ring, idx);
+ if (ds == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get hw desc (idx %d)\n", idx);
+ return (EINVAL);
+ }
+
+ /* If this ring has an mbuf already then return error */
+ if (rxd->m != NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR: sw desc idx %d already has an mbuf\n",
+ idx);
+ return (EINVAL); /* XXX */
+ }
+
+ /* Allocate mbuf */
+ m = m_get2(sc->sc_config.rx_buf_size, M_NOWAIT, MT_DATA, M_PKTHDR);
+ if (m == NULL) {
+ /* XXX keep statistics */
+ device_printf(sc->sc_dev, "ERROR: failed to allocate mbuf\n");
+ return (ENOMEM);
+ }
+
+ /* Load dma map, get physical memory address of mbuf */
+ nsegs = 1;
+ m->m_pkthdr.len = m->m_len = sc->sc_config.rx_buf_size;
+
+ /* ETHER_ALIGN hack */
+ if (sc->sc_config.rx_buf_ether_align)
+ m_adj(m, ETHER_ALIGN);
+ error = bus_dmamap_load_mbuf_sg(ring->buffer_dma_tag, rxd->m_dmamap,
+ m, segs, &nsegs, 0);
+ if (error != 0 || nsegs != 1) {
+ device_printf(sc->sc_dev,
+ "ERROR: couldn't load mbuf dmamap (%d) (nsegs=%d)\n", error, nsegs);
+ m_freem(m);
+ return (error);
+ }
+
+ /* Populate sw and hw desc */
+ rxd->m = m;
+ rxd->m_physaddr = segs[0].ds_addr;
+
+ ds->addr = htole32(segs[0].ds_addr);
+
+ ring->stats.num_added++;
+
+ return (0);
+}
+
+/*
+ * Remove a receive buffer from the given ring/index.
+ *
+ * This clears the software/hardware index and unmaps the mbuf;
+ * the returned mbuf will be owned by the caller.
+ */
+struct mbuf *
+qcom_ess_edma_rx_buf_clean(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, int idx)
+{
+ struct mbuf *m;
+ struct qcom_ess_edma_sw_desc_rx *rxd;
+ struct qcom_ess_edma_rx_free_desc *ds;
+
+ /* Get the software/hardware descriptors we're going to update */
+ rxd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, idx);
+ if (rxd == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get sw desc (idx %d)\n", idx);
+ return (NULL);
+ }
+ ds = qcom_ess_edma_desc_ring_get_hw_desc(sc, ring, idx);
+ if (ds == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get hw desc (idx %d)\n", idx);
+ return (NULL);
+ }
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING,
+ "%s: idx=%u, rxd=%p, ds=0x%p, maddr=0x%08x/0x%08lx\n",
+ __func__, idx, rxd, ds, ds->addr, rxd->m_physaddr);
+
+ /* No mbuf? return null; it's fine */
+ if (rxd->m == NULL) {
+ return (NULL);
+ }
+
+ /* Flush mbuf */
+ bus_dmamap_sync(ring->buffer_dma_tag, rxd->m_dmamap,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ /* Unload */
+ bus_dmamap_unload(ring->buffer_dma_tag, rxd->m_dmamap);
+
+ /* Remove sw/hw desc entries */
+ m = rxd->m;
+ rxd->m = NULL;
+
+#ifdef ESS_EDMA_DEBUG_CLEAR_DESC
+ /*
+ * Note: removing hw entries is purely for correctness; it may be
+ * VERY SLOW!
+ */
+ ds->addr = 0;
+#endif
+
+ ring->stats.num_cleaned++;
+
+ return (m);
+}
+
+/*
+ * Fill the current ring, up to 'num' entries (or the ring is full.)
+ * It will also update the producer index for the given queue.
+ *
+ * Returns 0 if OK, error if there's a problem.
+ */
+int
+qcom_ess_edma_rx_ring_fill(struct qcom_ess_edma_softc *sc,
+ int queue, int num)
+{
+ struct qcom_ess_edma_desc_ring *ring;
+ int num_fill;
+ int idx;
+ int error;
+ int prod_index;
+ int n = 0;
+
+
+ ring = &sc->sc_rx_ring[queue];
+
+ EDMA_RING_LOCK_ASSERT(ring);
+
+ num_fill = num;
+ if (num_fill > ring->ring_count)
+ num_fill = ring->ring_count - 1;
+ idx = ring->next_to_fill;
+
+ while (num_fill != 0) {
+ error = qcom_ess_edma_rx_buf_alloc(sc, ring, idx);
+ if (error != 0) {
+ device_printf(sc->sc_dev,
+ "ERROR: queue %d: failed to alloc rx buf (%d)\n",
+ queue, error);
+ break;
+ }
+ num_fill--;
+
+ /* Update ring index, wrap at ring_count */
+ idx++;
+ if (idx >= ring->ring_count)
+ idx = 0;
+ n++;
+ }
+
+ ring->next_to_fill = idx;
+
+ /* Flush ring updates before HW index is updated */
+ qcom_ess_edma_desc_ring_flush_preupdate(sc, ring);
+
+ /* producer index is the ring number, minus 1 (ie the slot BEFORE) */
+ if (idx == 0)
+ prod_index = ring->ring_count - 1;
+ else
+ prod_index = idx - 1;
+ (void) qcom_ess_edma_hw_rfd_prod_index_update(sc, queue, prod_index);
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING,
+ "%s: queue %d: added %d bufs, prod_idx=%u\n",
+ __func__, queue, n, prod_index);
+
+ return (0);
+}
+
+/*
+ * fetch, unmap the given mbuf
+ *
+struct mbuf *
+qcom_ess_edma_rx_buf_clean(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, int idx)
+*/
+
+
+/*
+ * Run through the RX ring, complete frames.
+ *
+ * For now they're simply freed and the ring is re-filled.
+ * Once that logic is working soundly we'll want to populate an
+ * mbuf list for the caller with completed mbufs so they can be
+ * dispatched up to the network stack.
+ */
+int
+qcom_ess_edma_rx_ring_complete(struct qcom_ess_edma_softc *sc, int queue,
+ struct mbufq *mq)
+{
+ struct qcom_ess_edma_desc_ring *ring;
+ struct qcom_ess_edma_sw_desc_rx *rxd;
+ int n, cleaned_count, len;
+ uint16_t sw_next_to_clean, hw_next_to_clean;
+ struct mbuf *m;
+ struct qcom_edma_rx_return_desc *rrd;
+ int num_rfds, port_id, priority, hash_type, hash_val, flow_cookie, vlan;
+ bool rx_checksum = 1;
+ int port_vlan = -1;
+
+ ring = &sc->sc_rx_ring[queue];
+
+ EDMA_RING_LOCK_ASSERT(ring);
+
+ qcom_ess_edma_desc_ring_flush_postupdate(sc, ring);
+
+ sw_next_to_clean = ring->next_to_clean;
+ hw_next_to_clean = 0;
+ cleaned_count = 0;
+
+ for (n = 0; n < EDMA_RX_RING_SIZE - 1; n++) {
+ rxd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring,
+ sw_next_to_clean);
+ if (rxd == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get sw desc (idx %d)\n",
+ sw_next_to_clean);
+ return (EINVAL);
+ }
+
+ hw_next_to_clean = qcom_ess_edma_hw_rfd_get_cons_index(sc,
+ queue);
+ if (hw_next_to_clean == sw_next_to_clean)
+ break;
+
+ /* Unmap the mbuf at this index */
+ m = qcom_ess_edma_rx_buf_clean(sc, ring, sw_next_to_clean);
+ sw_next_to_clean = (sw_next_to_clean + 1) % ring->ring_count;
+ cleaned_count++;
+
+ /* Get the RRD header */
+ rrd = mtod(m, struct qcom_edma_rx_return_desc *);
+ if (rrd->rrd7 & EDMA_RRD_DESC_VALID) {
+ len = rrd->rrd6 & EDMA_RRD_PKT_SIZE_MASK;
+ num_rfds = rrd->rrd1 & EDMA_RRD_NUM_RFD_MASK;;
+ port_id = (rrd->rrd1 >> EDMA_PORT_ID_SHIFT)
+ & EDMA_PORT_ID_MASK;
+ priority = (rrd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT)
+ & EDMA_RRD_PRIORITY_MASK;
+ hash_type = (rrd->rrd5 >> EDMA_HASH_TYPE_SHIFT)
+ & EDMA_HASH_TYPE_MASK;
+ hash_val = rrd->rrd2;
+ flow_cookie = rrd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK;
+ vlan = rrd->rrd4;
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_FRAME,
+ "%s: len=%d, num_rfds=%d, port_id=%d,"
+ " priority=%d, hash_type=%d, hash_val=%d,"
+ " flow_cookie=%d, vlan=%d\n",
+ __func__,
+ len,
+ num_rfds,
+ port_id,
+ priority,
+ hash_type,
+ hash_val,
+ flow_cookie,
+ vlan);
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_FRAME,
+ "%s: flags: L4 checksum"
+ " fail=%d, 802.1q vlan=%d, 802.1ad vlan=%d\n",
+ __func__,
+ !! (rrd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK),
+ !! (rrd->rrd7 & EDMA_RRD_CVLAN),
+ !! (rrd->rrd1 & EDMA_RRD_SVLAN));
+ } else {
+ len = 0;
+ }
+
+ /* Payload starts after the RRD header */
+ m_adj(m, sizeof(struct qcom_edma_rx_return_desc));
+
+ /* Set mbuf length now */
+ m->m_len = m->m_pkthdr.len = len;
+
+ /*
+ * Set rcvif to the relevant GMAC ifp; GMAC receive will
+ * check the field to receive it to the right place, or
+ * if it's NULL it'll drop it for us.
+ */
+ m->m_pkthdr.rcvif = NULL;
+ if (sc->sc_gmac_port_map[port_id] != -1) {
+ struct qcom_ess_edma_gmac *gmac;
+ gmac = &sc->sc_gmac[sc->sc_gmac_port_map[port_id]];
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_FRAME,
+ "%s: port_id=%d gmac=%d\n", __func__,
+ port_id, gmac->id);
+ if (gmac->enabled == true) {
+ m->m_pkthdr.rcvif = gmac->ifp;
+ if ((if_getcapenable(gmac->ifp) & IFCAP_RXCSUM) != 0)
+ rx_checksum = true;
+ }
+ port_vlan = gmac->vlan_id;
+ }
+
+ /* XXX TODO: handle multi-frame packets (ie, jumbos!) */
+ /* XXX TODO: handle 802.1ad VLAN offload field */
+ /* XXX TODO: flow offload */
+
+ /*
+ * For now we don't support disabling VLAN offload.
+ * Instead, tags are stripped by the hardware.
+ * Handle the outer VLAN tag; worry about 802.1ad
+ * later on (and hopefully by something other than
+ * adding another mbuf.)
+ */
+ if ((rrd->rrd7 & EDMA_RRD_CVLAN) != 0) {
+ /*
+ * There's an outer VLAN tag that has been
+ * decaped by the hardware. Compare it to the
+ * current port vlan, and if they don't match,
+ * add an offloaded VLAN tag to the mbuf.
+ *
+ * And yes, care about the priority field too.
+ */
+ if ((port_vlan == -1) || (port_vlan != vlan)) {
+ m->m_pkthdr.ether_vtag = (vlan & 0xfff)
+ | ((priority < 1) & 0xf);
+ m->m_flags |= M_VLANTAG;
+ }
+ }
+
+ /*
+ * Store the hash info in the mbuf if it's there.
+ *
+ * XXX TODO: decode the RSS field and translate it to
+ * the mbuf hash entry. For now, just treat as OPAQUE.
+ */
+ if (hash_type != EDMA_RRD_RSS_TYPE_NONE) {
+ m->m_pkthdr.flowid = hash_val;
+ M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
+ }
+
+ /*
+ * Check the RX checksum flag if the destination ifp
+ * has the RXCSUM flag set.
+ */
+ if (rx_checksum) {
+ if (rrd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK) {
+ /* Fail */
+ ring->stats.num_rx_csum_fail++;
+ } else {
+ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED
+ | CSUM_IP_VALID
+ | CSUM_DATA_VALID
+ | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xffff;
+ ring->stats.num_rx_csum_ok++;
+ }
+ }
+
+
+ /*
+ * Finally enqueue into the incoming receive queue
+ * to push up into the networking stack.
+ */
+ if (mbufq_enqueue(mq, m) != 0) {
+ ring->stats.num_enqueue_full++;
+ m_freem(m);
+ }
+ }
+ ring->next_to_clean = sw_next_to_clean;
+
+ /* Refill ring if needed */
+ if (cleaned_count > 0) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_RX_RING,
+ "%s: ring=%d, cleaned=%d\n",
+ __func__, queue, cleaned_count);
+ (void) qcom_ess_edma_rx_ring_fill(sc, queue, cleaned_count);
+ (void) qcom_ess_edma_hw_rfd_sw_cons_index_update(sc, queue,
+ ring->next_to_clean);
+ }
+
+ return (0);
+}
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_rx.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_rx.h
new file mode 100644
index 000000000000..e23d7f326b1d
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_rx.h
@@ -0,0 +1,51 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __QCOM_ESS_EDMA_RX_H__
+#define __QCOM_ESS_EDMA_RX_H__
+
+extern int qcom_ess_edma_rx_queue_to_cpu(struct qcom_ess_edma_softc *sc,
+ int queue);
+extern int qcom_ess_edma_rx_ring_setup(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+extern int qcom_ess_edma_rx_ring_clean(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+extern int qcom_ess_edma_rx_buf_alloc(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, int idx);
+extern struct mbuf * qcom_ess_edma_rx_buf_clean(
+ struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, int idx);
+extern int qcom_ess_edma_rx_ring_fill(struct qcom_ess_edma_softc *sc,
+ int queue, int num);
+extern int qcom_ess_edma_rx_ring_complete(struct qcom_ess_edma_softc *sc,
+ int queue, struct mbufq *mq);
+
+#endif /* __QCOM_ESS_EDMA_RX_H__ */
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c b/sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
new file mode 100644
index 000000000000..a86ac1dfdc31
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_tx.c
@@ -0,0 +1,454 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/mbuf.h>
+#include <sys/endian.h>
+#include <sys/smp.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/ethernet.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/qcom_ess_edma/qcom_ess_edma_var.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_reg.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_hw.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_desc.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_tx.h>
+#include <dev/qcom_ess_edma/qcom_ess_edma_debug.h>
+
+/*
+ * Map the given TX queue to a given CPU.
+ *
+ * The current mapping in the if_transmit() path
+ * will map mp_ncpu groups of flowids to the TXQs.
+ * So for a 4 CPU system the first four will be CPU 0,
+ * the second four will be CPU 1, etc.
+ */
+int
+qcom_ess_edma_tx_queue_to_cpu(struct qcom_ess_edma_softc *sc, int queue)
+{
+
+ return (queue / mp_ncpus);
+}
+
+int
+qcom_ess_edma_tx_ring_setup(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+ struct qcom_ess_edma_sw_desc_tx *txd;
+ int i, ret;
+
+ for (i = 0; i < EDMA_TX_RING_SIZE; i++) {
+ txd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, i);
+ if (txd == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get sw desc (idx %d)\n", i);
+ return (EINVAL);
+ }
+ txd->m = NULL;
+ ret = bus_dmamap_create(ring->buffer_dma_tag,
+ BUS_DMA_NOWAIT,
+ &txd->m_dmamap);
+ if (ret != 0) {
+ device_printf(sc->sc_dev,
+ "%s: failed to create dmamap (%d)\n",
+ __func__, ret);
+ }
+ }
+
+ return (0);
+}
+
+int
+qcom_ess_edma_tx_ring_clean(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring)
+{
+ device_printf(sc->sc_dev, "%s: TODO\n", __func__);
+ return (0);
+}
+
+/*
+ * Clear the sw/hw descriptor entries, unmap/free the mbuf chain that's
+ * part of this.
+ */
+static int
+qcom_ess_edma_tx_unmap_and_clean(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring, uint16_t idx)
+{
+ struct qcom_ess_edma_sw_desc_tx *txd;
+ struct qcom_ess_edma_tx_desc *ds;
+
+ /* Get the software/hardware descriptors we're going to update */
+ txd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, idx);
+ if (txd == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get sw desc (idx %d)\n", idx);
+ return (EINVAL);
+ }
+
+ ds = qcom_ess_edma_desc_ring_get_hw_desc(sc, ring, idx);
+ if (ds == NULL) {
+ device_printf(sc->sc_dev,
+ "ERROR; couldn't get hw desc (idx %d)\n", idx);
+ return (EINVAL);
+ }
+
+ if (txd->m != NULL) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING,
+ "%s: idx %d, unmap/free\n", __func__, idx);
+ bus_dmamap_unload(ring->buffer_dma_tag, txd->m_dmamap);
+ m_freem(txd->m);
+ txd->m = NULL;
+ txd->is_first = txd->is_last = 0;
+ }
+
+#ifdef ESS_EDMA_DEBUG_CLEAR_DESC
+ /* This is purely for debugging/testing right now; it's slow! */
+ memset(ds, 0, sizeof(struct qcom_ess_edma_tx_desc));
+#endif
+
+ return (0);
+}
+
+/*
+ * Run through the TX ring, complete/free frames.
+ */
+int
+qcom_ess_edma_tx_ring_complete(struct qcom_ess_edma_softc *sc, int queue)
+{
+ struct qcom_ess_edma_desc_ring *ring;
+ uint32_t n;
+ uint16_t sw_next_to_clean, hw_next_to_clean;
+
+ ring = &sc->sc_tx_ring[queue];
+
+ EDMA_RING_LOCK_ASSERT(ring);
+
+ qcom_ess_edma_desc_ring_flush_postupdate(sc, ring);
+
+ sw_next_to_clean = ring->next_to_clean;
+ hw_next_to_clean = 0;
+ n = 0;
+
+ /* Get the current hardware completion index */
+ (void) qcom_ess_edma_hw_tx_read_tpd_cons_idx(sc, queue,
+ &hw_next_to_clean);
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING,
+ "%s: called; sw=%d, hw=%d\n", __func__,
+ sw_next_to_clean, hw_next_to_clean);
+
+ /* clean the buffer chain and descriptor(s) here */
+ while (sw_next_to_clean != hw_next_to_clean) {
+ qcom_ess_edma_tx_unmap_and_clean(sc, ring, sw_next_to_clean);
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING,
+ "%s cleaning %d\n", __func__, sw_next_to_clean);
+ sw_next_to_clean++;
+ if (sw_next_to_clean >= ring->ring_count)
+ sw_next_to_clean = 0;
+ n++;
+ }
+
+ ring->stats.num_cleaned += n;
+ ring->stats.num_tx_complete++;
+
+ ring->next_to_clean = sw_next_to_clean;
+
+ /* update the TPD consumer index register */
+ qcom_ess_edma_hw_tx_update_cons_idx(sc, queue, sw_next_to_clean);
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_RING_COMPLETE,
+ "%s: cleaned %d descriptors\n", __func__, n);
+
+ return (0);
+}
+
+/*
+ * Attempt to enqueue a single frame.
+ *
+ * This is the MVP required to send a single ethernet mbuf / mbuf chain.
+ * VLAN tags are added/required as the default switch configuration
+ * from device-tree uses both the port bitmap and VLAN IDs for
+ * controlling LAN/WAN/etc interface traffic.
+ *
+ * Note, this does NOT update the transmit pointer to the hardware;
+ * that must be done after calling this function one or more times.
+ *
+ * The mbuf is either consumed into the ring or it is returned
+ * unsent. If we've modifide it in any way then the caller should
+ * use what's returned back in m0 (eg to pushback.)
+ */
+int
+qcom_ess_edma_tx_ring_frame(struct qcom_ess_edma_softc *sc, int queue,
+ struct mbuf **m0, uint16_t port_bitmap, int default_vlan)
+{
+ struct qcom_ess_edma_sw_desc_tx *txd_first;
+ struct qcom_ess_edma_desc_ring *ring;
+ struct ether_vlan_header *eh;
+ bus_dma_segment_t txsegs[QCOM_ESS_EDMA_MAX_TXFRAGS];
+ uint32_t word1, word3;
+ uint32_t eop;
+ int vlan_id;
+ int num_left, ret, nsegs, i;
+ uint16_t next_to_fill;
+ uint16_t svlan_tag;
+ struct mbuf *m;
+
+ ring = &sc->sc_tx_ring[queue];
+
+ EDMA_RING_LOCK_ASSERT(ring);
+
+ m = *m0;
+
+ /*
+ * Do we have ANY space? If not, return ENOBUFS, let the
+ * caller decide what to do with the mbuf.
+ */
+ num_left = qcom_ess_edma_desc_ring_get_num_available(sc, ring);
+ if (num_left < 2) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: num_left=%d\n", __func__, num_left);
+ ring->stats.num_enqueue_full++;
+ return (ENOBUFS);
+ }
+
+ /*
+ * Get the current sw/hw descriptor offset; we'll use its
+ * dmamap and then switch it out with the last one when
+ * the mbuf is put there.
+ */
+ next_to_fill = ring->next_to_fill;
+ txd_first = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring,
+ next_to_fill);
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: starting at idx %d\n", __func__, next_to_fill);
+
+ /*
+ * Do the initial mbuf load; see how many fragments we
+ * have. If we don't have enough descriptors available
+ * then immediately unmap and return an error.
+ */
+ ret = bus_dmamap_load_mbuf_sg(ring->buffer_dma_tag,
+ txd_first->m_dmamap,
+ m,
+ txsegs,
+ &nsegs,
+ BUS_DMA_NOWAIT);
+ if (ret != 0) {
+ ring->stats.num_tx_mapfail++;
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: map failed (%d)\n", __func__, ret);
+ return (ENOBUFS);
+ }
+ if (nsegs == 0) {
+ ring->stats.num_tx_maxfrags++;
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: too many segs\n", __func__);
+ return (ENOBUFS);
+ }
+
+ if (nsegs + 2 > num_left) {
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: nsegs=%d, num_left=%d\n", __func__, nsegs, num_left);
+ bus_dmamap_unload(ring->buffer_dma_tag, txd_first->m_dmamap);
+ ring->stats.num_enqueue_full++;
+ return (ENOBUFS);
+ }
+
+ bus_dmamap_sync(ring->buffer_dma_tag, txd_first->m_dmamap,
+ BUS_DMASYNC_PREWRITE);
+
+ /*
+ * At this point we're committed to sending the frame.
+ *
+ * Get rid of the rcvif that is being used to track /send/ ifnet.
+ */
+ m->m_pkthdr.rcvif = NULL;
+
+ /*
+ *
+ * Configure up the various header fields that are shared
+ * between descriptors.
+ */
+ svlan_tag = 0; /* 802.3ad tag? */
+ /* word1 - tx checksum, v4/v6 TSO, pppoe, 802.3ad vlan flag */
+ word1 = 0;
+ /*
+ * word3 - insert default vlan; vlan tag/flag, CPU/STP/RSTP stuff,
+ * port map
+ */
+ word3 = 0;
+ word3 |= (port_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT);
+
+ /*
+ * If VLAN offload is enabled, we can enable inserting a CVLAN
+ * tag here for the default VLAN, or the VLAN interface.
+ * The default switch configuration requires both a port_bitmap
+ * and 802.1q VLANs configured.
+ *
+ * If there's a VLAN tag on the mbuf then we leave it alone.
+ * I don't want to try and strip out the VLAN header from a packet
+ * here.
+ *
+ * There's no 802.1ad support in here yet.
+ */
+ eh = mtod(m, struct ether_vlan_header *);
+ if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
+ /* Don't add a tag, just use what's here */
+ vlan_id = -1;
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: no vlan id\n", __func__);
+
+ } else if ((m->m_flags & M_VLANTAG) != 0) {
+ /* We have an offload VLAN tag, use it */
+ vlan_id = m->m_pkthdr.ether_vtag & 0x0fff;
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: header tag vlan id=%d\n", __func__, vlan_id);
+ } else {
+ /* No VLAN tag, no VLAN header; default VLAN */
+ vlan_id = default_vlan;
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: no vlan tag/hdr; vlan id=%d\n", __func__,
+ vlan_id);
+ }
+
+ /*
+ * Only add the offload tag if we need to.
+ */
+ if (vlan_id != -1) {
+ word3 |= (1U << EDMA_TX_INS_CVLAN);
+ word3 |= (vlan_id << EDMA_TX_CVLAN_TAG_SHIFT);
+ }
+
+ /* End of frame flag */
+ eop = 0;
+
+ /*
+ * Walk the mbuf segment list, and allocate descriptor
+ * entries. Put the mbuf in the last descriptor entry
+ * and then switch out the first/last dmamap entries.
+ */
+ for (i = 0; i < nsegs; i++) {
+ struct qcom_ess_edma_sw_desc_tx *txd;
+ struct qcom_ess_edma_tx_desc *ds;
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: filling idx %d\n", __func__, next_to_fill);
+ txd = qcom_ess_edma_desc_ring_get_sw_desc(sc, ring, next_to_fill);
+ ds = qcom_ess_edma_desc_ring_get_hw_desc(sc, ring, next_to_fill);
+ txd->m = NULL;
+ if (i == 0) {
+ txd->is_first = 1;
+ }
+ if (i == (nsegs - 1)) {
+ bus_dmamap_t dm;
+
+ txd->is_last = 1;
+ eop = EDMA_TPD_EOP;
+ /*
+ * Put the txmap and the mbuf in the last swdesc.
+ * That way it isn't freed until we've transmitted
+ * all the descriptors of this frame, in case the
+ * hardware decides to notify us of some half-sent
+ * stuff.
+ *
+ * Moving the pointers around here sucks a little
+ * but it DOES beat not freeing the dmamap entries
+ * correctly.
+ */
+ txd->m = m;
+ dm = txd_first->m_dmamap;
+ txd_first->m_dmamap = txd->m_dmamap;
+ txd->m_dmamap = dm;
+ }
+ ds->word1 = word1 | eop;
+ ds->word3 = word3;
+ ds->svlan_tag = svlan_tag;
+ ds->addr = htole32(txsegs[i].ds_addr);
+ ds->len = htole16(txsegs[i].ds_len);
+
+ QCOM_ESS_EDMA_DPRINTF(sc, QCOM_ESS_EDMA_DBG_TX_FRAME,
+ "%s: addr=0x%lx len=%ld eop=0x%x\n",
+ __func__,
+ txsegs[i].ds_addr,
+ txsegs[i].ds_len,
+ eop);
+
+ next_to_fill++;
+ if (next_to_fill >= ring->ring_count)
+ next_to_fill = 0;
+ }
+
+ ring->stats.num_added += nsegs;
+
+ /* Finish, update ring tracking */
+ ring->next_to_fill = next_to_fill;
+
+ ring->stats.num_tx_ok++;
+
+ return (0);
+}
+
+/*
+ * Update the hardware with the new state of the transmit ring.
+ */
+int
+qcom_ess_edma_tx_ring_frame_update(struct qcom_ess_edma_softc *sc, int queue)
+{
+ struct qcom_ess_edma_desc_ring *ring;
+
+ ring = &sc->sc_tx_ring[queue];
+
+ EDMA_RING_LOCK_ASSERT(ring);
+
+ qcom_ess_edma_desc_ring_flush_preupdate(sc, ring);
+
+ (void) qcom_ess_edma_hw_tx_update_tpd_prod_idx(sc, queue,
+ ring->next_to_fill);
+
+ /* XXX keep stats for this specific call? */
+ return (0);
+}
diff --git a/sys/dev/bhnd/cores/usb/bhnd_usbvar.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_tx.h
index ec169e1b0543..cb1dc02e0bd1 100644
--- a/sys/dev/bhnd/cores/usb/bhnd_usbvar.h
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_tx.h
@@ -1,8 +1,7 @@
/*-
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2010, Aleksandr Rybalko <ray@ddteam.net>
- * All rights reserved.
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,35 +25,26 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
+ * $FreeBSD$
+ *
*/
-#ifndef _BHND_USBVAR_H_
-#define _BHND_USBVAR_H_
-
-struct bhnd_usb_softc {
- bus_space_tag_t sc_bt;
- bus_space_handle_t sc_bh;
- bus_addr_t sc_maddr;
- bus_size_t sc_msize;
- bus_addr_t sc_irqn;
- struct intr_event *sc_events; /* IRQ events structs */
-
- struct resource *sc_mem;
- struct resource *sc_irq;
- struct rman mem_rman;
- struct rman irq_rman;
- int devid;
+#ifndef __QCOM_ESS_EDMA_TX_H__
+#define __QCOM_ESS_EDMA_TX_H__
-};
-struct bhnd_usb_devinfo {
- struct resource_list sdi_rl;
- uint8_t sdi_unit; /* core index on bus */
- rman_res_t sdi_irq; /**< child IRQ, if mapped */
- bool sdi_irq_mapped; /**< true if IRQ mapped, false otherwise */
- char sdi_name[8];
- rman_res_t sdi_maddr;
- rman_res_t sdi_msize;
-};
+extern int qcom_ess_edma_tx_queue_to_cpu(struct qcom_ess_edma_softc *sc,
+ int queue);
+extern int qcom_ess_edma_tx_ring_setup(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+extern int qcom_ess_edma_tx_ring_clean(struct qcom_ess_edma_softc *sc,
+ struct qcom_ess_edma_desc_ring *ring);
+extern int qcom_ess_edma_tx_ring_complete(struct qcom_ess_edma_softc *sc,
+ int queue);
+extern int qcom_ess_edma_tx_ring_frame(struct qcom_ess_edma_softc *sc,
+ int queue, struct mbuf **m0, uint16_t port_bitmap,
+ int default_vlan);
+extern int qcom_ess_edma_tx_ring_frame_update(struct qcom_ess_edma_softc *sc,
+ int queue);
-#endif /* _BHND_USBVAR_H_ */
+#endif /* __QCOM_ESS_EDMA_TX_H__ */
diff --git a/sys/dev/qcom_ess_edma/qcom_ess_edma_var.h b/sys/dev/qcom_ess_edma/qcom_ess_edma_var.h
new file mode 100644
index 000000000000..0e7afcfbf1c5
--- /dev/null
+++ b/sys/dev/qcom_ess_edma/qcom_ess_edma_var.h
@@ -0,0 +1,258 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2021 Adrian Chadd <adrian@FreeBSD.org>.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ *
+ */
+
+#ifndef __QCOM_ESS_EDMA_VAR_H__
+#define __QCOM_ESS_EDMA_VAR_H__
+
+#define EDMA_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
+#define EDMA_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx)
+#define EDMA_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sc_mtx, MA_OWNED)
+
+#define EDMA_RING_LOCK(_ring) mtx_lock(&(_ring)->mtx)
+#define EDMA_RING_UNLOCK(_ring) mtx_unlock(&(_ring)->mtx)
+#define EDMA_RING_LOCK_ASSERT(_ring) mtx_assert(&(_ring)->mtx, MA_OWNED)
+
+/*
+ * register space access macros
+ */
+#define EDMA_REG_WRITE(sc, reg, val) do { \
+ bus_write_4(sc->sc_mem_res, (reg), (val)); \
+ } while (0)
+
+#define EDMA_REG_READ(sc, reg) bus_read_4(sc->sc_mem_res, (reg))
+
+#define EDMA_REG_SET_BITS(sc, reg, bits) \
+ EDMA_REG_WRITE(sc, reg, EDMA_REG_READ(sc, (reg)) | (bits))
+
+#define EDMA_REG_CLEAR_BITS(sc, reg, bits) \
+ EDMA_REG_WRITE(sc, reg, EDMA_REG_READ(sc, (reg)) & ~(bits))
+
+#define EDMA_REG_BARRIER_WRITE(sc) bus_barrier((sc)->sc_mem_res, \
+ 0, (sc)->sc_mem_res_size, BUS_SPACE_BARRIER_WRITE)
+#define EDMA_REG_BARRIER_READ(sc) bus_barrier((sc)->sc_mem_res, \
+ 0, (sc)->sc_mem_res_size, BUS_SPACE_BARRIER_READ)
+#define EDMA_REG_BARRIER_RW(sc) bus_barrier((sc)->sc_mem_res, \
+ 0, (sc)->sc_mem_res_size, \
+ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)
+
+
+/*
+ * Fixed number of interrupts - 16 TX, 8 RX.
+ *
+ * The Linux driver supports 4 or 8 RX queues.
+ */
+
+#define QCOM_ESS_EDMA_NUM_TX_IRQS 16
+#define QCOM_ESS_EDMA_NUM_RX_IRQS 8
+
+#define QCOM_ESS_EDMA_NUM_TX_RINGS 16
+#define QCOM_ESS_EDMA_NUM_RX_RINGS 8
+
+#define EDMA_TX_RING_SIZE 128
+#define EDMA_RX_RING_SIZE 128
+
+#define EDMA_TX_BUFRING_SIZE 512
+
+/* Maximum number of GMAC instances */
+#define QCOM_ESS_EDMA_MAX_NUM_GMACS 5
+
+/* Maximum number of ports to support mapping to GMACs */
+#define QCOM_ESS_EDMA_MAX_NUM_PORTS 8
+
+#define QCOM_ESS_EDMA_MAX_TXFRAGS 8
+
+struct qcom_ess_edma_softc;
+
+/*
+ * An instance of an interrupt queue.
+ */
+struct qcom_ess_edma_intr {
+ struct qcom_ess_edma_softc *sc;
+ struct resource *irq_res;
+ int irq_rid;
+ void *irq_intr;
+
+ struct {
+ uint64_t num_intr;
+ } stats;
+};
+
+/*
+ * A TX/RX descriptor ring.
+ */
+struct qcom_ess_edma_desc_ring {
+ bus_dma_tag_t hw_ring_dma_tag; /* tag for hw ring */
+ bus_dma_tag_t buffer_dma_tag; /* tag for mbufs */
+ char *label;
+
+ struct mtx mtx;
+
+ bus_dmamap_t hw_desc_map;
+ bus_addr_t hw_desc_paddr;
+ void *hw_desc;
+
+ void *sw_desc;
+ int hw_entry_size; /* hw desc entry size */
+ int sw_entry_size; /* sw desc entry size */
+ int ring_count; /* Number of entries */
+ int buffer_align;
+ int ring_align;
+
+ uint16_t next_to_fill;
+ uint16_t next_to_clean;
+ uint16_t pending_fill;
+
+ struct {
+ uint64_t num_added;
+ uint64_t num_cleaned;
+ uint64_t num_dropped;
+ uint64_t num_enqueue_full;
+ uint64_t num_rx_no_gmac;
+ uint64_t num_rx_ok;
+ uint64_t num_tx_ok;
+ uint64_t num_tx_maxfrags;
+ uint64_t num_tx_mapfail;
+ uint64_t num_rx_csum_ok;
+ uint64_t num_rx_csum_fail;
+ uint64_t num_tx_complete;
+ uint64_t num_tx_xmit_defer;
+ uint64_t num_tx_xmit_task;
+ } stats;
+};
+
+/*
+ * Structs for transmit and receive software
+ * ring entries.
+ */
+struct qcom_ess_edma_sw_desc_tx {
+ struct mbuf *m;
+ bus_dmamap_t m_dmamap;
+ uint32_t is_first:1;
+ uint32_t is_last:1;
+};
+
+struct qcom_ess_edma_sw_desc_rx {
+ struct mbuf *m;
+ bus_dmamap_t m_dmamap;
+ bus_addr_t m_physaddr;
+};
+
+#define QCOM_ESS_EDMA_LABEL_SZ 16
+
+/*
+ * Per transmit ring TX state for TX queue / buf_ring stuff.
+ */
+struct qcom_ess_edma_tx_state {
+ struct task completion_task;
+ struct task xmit_task;
+ struct buf_ring *br;
+ struct taskqueue *completion_tq;
+ struct qcom_ess_edma_softc *sc;
+ char label[QCOM_ESS_EDMA_LABEL_SZ];
+ int enqueue_is_running;
+ int queue_id;
+};
+
+/*
+ * Per receive ring RX state for taskqueue stuff.
+ */
+struct qcom_ess_edma_rx_state {
+ struct task completion_task;
+ struct taskqueue *completion_tq;
+ struct qcom_ess_edma_softc *sc;
+ char label[QCOM_ESS_EDMA_LABEL_SZ];
+ int queue_id;
+};
+
+struct qcom_ess_edma_gmac {
+ struct qcom_ess_edma_softc *sc;
+ int id;
+ bool enabled;
+ /* Native VLAN ID */
+ int vlan_id;
+ /* Switch portmask for this instance */
+ int port_mask;
+ /* MAC address for this ifnet (from device tree) */
+ struct ether_addr eaddr;
+ /* ifnet interface! */
+ if_t ifp;
+ /* media interface */
+ struct ifmedia ifm;
+};
+
+struct qcom_ess_edma_softc {
+ device_t sc_dev;
+ struct mtx sc_mtx;
+ struct resource *sc_mem_res;
+ size_t sc_mem_res_size;
+ int sc_mem_rid;
+ uint32_t sc_debug;
+ bus_dma_tag_t sc_dma_tag;
+
+ struct qcom_ess_edma_intr sc_tx_irq[QCOM_ESS_EDMA_NUM_TX_IRQS];
+ struct qcom_ess_edma_intr sc_rx_irq[QCOM_ESS_EDMA_NUM_RX_IRQS];
+
+ struct qcom_ess_edma_desc_ring sc_tx_ring[QCOM_ESS_EDMA_NUM_TX_RINGS];
+ struct qcom_ess_edma_desc_ring sc_rx_ring[QCOM_ESS_EDMA_NUM_RX_RINGS];
+ struct qcom_ess_edma_tx_state sc_tx_state[QCOM_ESS_EDMA_NUM_TX_RINGS];
+ struct qcom_ess_edma_rx_state sc_rx_state[QCOM_ESS_EDMA_NUM_RX_RINGS];
+ struct qcom_ess_edma_gmac sc_gmac[QCOM_ESS_EDMA_MAX_NUM_GMACS];
+
+ int sc_gmac_port_map[QCOM_ESS_EDMA_MAX_NUM_PORTS];
+
+ struct {
+ uint32_t num_gmac;
+ uint32_t mdio_supported;
+ uint32_t poll_required;
+ uint32_t rss_type;
+
+ uint32_t rx_buf_size;
+ bool rx_buf_ether_align;
+
+ uint32_t tx_intr_mask;
+ uint32_t rx_intr_mask;
+
+ /* number of tx/rx descriptor entries in each ring */
+ uint32_t rx_ring_count;
+ uint32_t tx_ring_count;
+
+ /* how many queues for each CPU */
+ uint32_t num_tx_queue_per_cpu;
+ } sc_config;
+
+ struct {
+ uint32_t misc_intr_mask;
+ uint32_t wol_intr_mask;
+ uint32_t intr_sw_idx_w;
+ } sc_state;
+};
+
+#endif /* __QCOM_ESS_EDMA_VAR_H__ */
diff --git a/sys/dev/qcom_gcc/qcom_gcc_clock.c b/sys/dev/qcom_gcc/qcom_gcc_clock.c
new file mode 100644
index 000000000000..c8c10b0c5172
--- /dev/null
+++ b/sys/dev/qcom_gcc/qcom_gcc_clock.c
@@ -0,0 +1,98 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025, Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/sglist.h>
+#include <sys/random.h>
+#include <sys/stdatomic.h>
+#include <sys/mutex.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include "qcom_gcc_var.h"
+
+int
+qcom_gcc_clock_read(device_t dev, bus_addr_t addr, uint32_t *val)
+{
+ struct qcom_gcc_softc *sc;
+
+ sc = device_get_softc(dev);
+ *val = bus_read_4(sc->reg, addr);
+ return (0);
+}
+
+int
+qcom_gcc_clock_write(device_t dev, bus_addr_t addr, uint32_t val)
+{
+ struct qcom_gcc_softc *sc;
+
+ sc = device_get_softc(dev);
+ bus_write_4(sc->reg, addr, val);
+ return (0);
+}
+
+int
+qcom_gcc_clock_modify(device_t dev, bus_addr_t addr,
+ uint32_t clear_mask, uint32_t set_mask)
+{
+ struct qcom_gcc_softc *sc;
+ uint32_t reg;
+
+ sc = device_get_softc(dev);
+ reg = bus_read_4(sc->reg, addr);
+ reg &= clear_mask;
+ reg |= set_mask;
+ bus_write_4(sc->reg, addr, reg);
+ return (0);
+}
+
+void
+qcom_gcc_clock_lock(device_t dev)
+{
+ struct qcom_gcc_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_lock(&sc->mtx);
+}
+
+void
+qcom_gcc_clock_unlock(device_t dev)
+{
+ struct qcom_gcc_softc *sc;
+
+ sc = device_get_softc(dev);
+ mtx_unlock(&sc->mtx);
+}
diff --git a/sys/dev/sound/pcm/ac97_patch.h b/sys/dev/qcom_gcc/qcom_gcc_ipq4018.h
index 997b10dbd02c..2b5bfa453766 100644
--- a/sys/dev/sound/pcm/ac97_patch.h
+++ b/sys/dev/qcom_gcc/qcom_gcc_ipq4018.h
@@ -1,8 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2003 Orion Hodson
- * All rights reserved.
+ * Copyright (c) 2025 Adrian Chadd <adrian@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,10 +25,17 @@
* SUCH DAMAGE.
*/
-typedef void (*ac97_patch)(struct ac97_info*);
+#ifndef __QCOM_GCC_IPQ4018_H__
+#define __QCOM_GCC_IPQ4018_H__
-void ad1886_patch(struct ac97_info*);
-void ad198x_patch(struct ac97_info*);
-void ad1981b_patch(struct ac97_info*);
-void cmi9739_patch(struct ac97_info*);
-void alc655_patch(struct ac97_info*);
+/*
+ * reset block
+ */
+extern void qcom_gcc_ipq4018_hwreset_init(struct qcom_gcc_softc *);
+
+/*
+ * clock block
+ */
+extern void qcom_gcc_ipq4018_clock_setup(struct qcom_gcc_softc *);
+
+#endif /* __QCOM_GCC_IPQ4018_H__ */
diff --git a/sys/dev/qcom_gcc/qcom_gcc_ipq4018_clock.c b/sys/dev/qcom_gcc/qcom_gcc_ipq4018_clock.c
index 6441cf3e6ae5..ce67b2898efb 100644
--- a/sys/dev/qcom_gcc/qcom_gcc_ipq4018_clock.c
+++ b/sys/dev/qcom_gcc/qcom_gcc_ipq4018_clock.c
@@ -59,8 +59,8 @@
#include <dev/qcom_clk/qcom_clk_branch2.h>
#include <dev/qcom_clk/qcom_clk_ro_div.h>
-#include "qcom_gcc_ipq4018_var.h"
-
+#include "qcom_gcc_var.h"
+#include "qcom_gcc_ipq4018.h"
/* Fixed rate clock. */
#define F_RATE(_id, cname, _freq) \
@@ -578,7 +578,7 @@ static struct qcom_clk_branch2_def branch2_tbl[] = {
0x1e00c, 0, 0, 0, 0x1e00c, QCOM_CLK_BRANCH2_BRANCH_HALT,
false, 0),
F_BRANCH2(GCC_USB2_SLEEP_CLK, "gcc_usb2_sleep_clk",
- "gcc_sleep_clk_src", 0x1e010, 0, 0, 0, 0x1e010,
+ "sleep_clk", 0x1e010, 0, 0, 0, 0x1e010,
QCOM_CLK_BRANCH2_BRANCH_HALT,
false, 0),
F_BRANCH2(GCC_USB2_MOCK_UTMI_CLK, "gcc_usb2_mock_utmi_clk",
@@ -588,7 +588,7 @@ static struct qcom_clk_branch2_def branch2_tbl[] = {
F_BRANCH2(GCC_USB3_MASTER_CLK, "gcc_usb3_master_clk", "fepll125",
0x1e028, 0, 0, 0, 0x1e028, QCOM_CLK_BRANCH2_BRANCH_HALT,
false, 0),
- F_BRANCH2(GCC_USB3_SLEEP_CLK, "gcc_usb3_sleep_clk", "gcc_sleep_clk_src",
+ F_BRANCH2(GCC_USB3_SLEEP_CLK, "gcc_usb3_sleep_clk", "sleep_clk",
0x1e02c, 0, 0, 0, 0x1e02c, QCOM_CLK_BRANCH2_BRANCH_HALT,
false, 0),
F_BRANCH2(GCC_USB3_MOCK_UTMI_CLK, "gcc_usb3_mock_utmi_clk",
@@ -602,7 +602,12 @@ static struct qcom_clk_branch2_def branch2_tbl[] = {
F_BRANCH2(GCC_WCSS2G_REF_CLK, "gcc_wcss2g_ref_clk", "xo",
0x1f00c, 0, 0, 0, 0x1f00c, QCOM_CLK_BRANCH2_BRANCH_HALT,
false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT),
- F_BRANCH2(GCC_WCSS2G_RTC_CLK, "gcc_wcss2g_rtc_clk", "gcc_sleep_clk_src",
+ /*
+ * TODO: figure out whether gcc_sleep_clk_src -> sleep_clk is right;
+ * will need to go consult the openwrt ipq4018 device tree / code
+ * again!
+ */
+ F_BRANCH2(GCC_WCSS2G_RTC_CLK, "gcc_wcss2g_rtc_clk", "sleep_clk",
0x1f010, 0, 0, 0, 0x1f010, QCOM_CLK_BRANCH2_BRANCH_HALT,
false, 0),
@@ -613,7 +618,7 @@ static struct qcom_clk_branch2_def branch2_tbl[] = {
F_BRANCH2(GCC_WCSS5G_REF_CLK, "gcc_wcss5g_ref_clk", "xo",
0x1f00c, 0, 0, 0, 0x2000c, QCOM_CLK_BRANCH2_BRANCH_HALT,
false, QCOM_CLK_BRANCH2_FLAGS_SET_RATE_PARENT),
- F_BRANCH2(GCC_WCSS5G_RTC_CLK, "gcc_wcss5g_rtc_clk", "gcc_sleep_clk_src",
+ F_BRANCH2(GCC_WCSS5G_RTC_CLK, "gcc_wcss5g_rtc_clk", "sleep_clk",
0x1f010, 0, 0, 0, 0x20010, QCOM_CLK_BRANCH2_BRANCH_HALT,
false, 0),
@@ -624,7 +629,7 @@ static struct qcom_clk_branch2_def branch2_tbl[] = {
};
static void
-qcom_gcc_ipq4018_clock_init_fepll(struct qcom_gcc_ipq4018_softc *sc)
+qcom_gcc_ipq4018_clock_init_fepll(struct qcom_gcc_softc *sc)
{
int i, rv;
@@ -636,7 +641,7 @@ qcom_gcc_ipq4018_clock_init_fepll(struct qcom_gcc_ipq4018_softc *sc)
}
static void
-qcom_gcc_ipq4018_clock_init_fdiv(struct qcom_gcc_ipq4018_softc *sc)
+qcom_gcc_ipq4018_clock_init_fdiv(struct qcom_gcc_softc *sc)
{
int i, rv;
@@ -648,7 +653,7 @@ qcom_gcc_ipq4018_clock_init_fdiv(struct qcom_gcc_ipq4018_softc *sc)
}
static void
-qcom_gcc_ipq4018_clock_init_apssdiv(struct qcom_gcc_ipq4018_softc *sc)
+qcom_gcc_ipq4018_clock_init_apssdiv(struct qcom_gcc_softc *sc)
{
int i, rv;
@@ -660,7 +665,7 @@ qcom_gcc_ipq4018_clock_init_apssdiv(struct qcom_gcc_ipq4018_softc *sc)
}
static void
-qcom_gcc_ipq4018_clock_init_rcg2(struct qcom_gcc_ipq4018_softc *sc)
+qcom_gcc_ipq4018_clock_init_rcg2(struct qcom_gcc_softc *sc)
{
int i, rv;
@@ -672,7 +677,7 @@ qcom_gcc_ipq4018_clock_init_rcg2(struct qcom_gcc_ipq4018_softc *sc)
}
static void
-qcom_gcc_ipq4018_clock_init_branch2(struct qcom_gcc_ipq4018_softc *sc)
+qcom_gcc_ipq4018_clock_init_branch2(struct qcom_gcc_softc *sc)
{
int i, rv;
@@ -684,7 +689,7 @@ qcom_gcc_ipq4018_clock_init_branch2(struct qcom_gcc_ipq4018_softc *sc)
}
static void
-qcom_gcc_ipq4018_clock_init_ro_div(struct qcom_gcc_ipq4018_softc *sc)
+qcom_gcc_ipq4018_clock_init_ro_div(struct qcom_gcc_softc *sc)
{
int i, rv;
@@ -695,43 +700,8 @@ qcom_gcc_ipq4018_clock_init_ro_div(struct qcom_gcc_ipq4018_softc *sc)
}
}
-int
-qcom_gcc_ipq4018_clock_read(device_t dev, bus_addr_t addr, uint32_t *val)
-{
- struct qcom_gcc_ipq4018_softc *sc;
-
- sc = device_get_softc(dev);
- *val = bus_read_4(sc->reg, addr);
- return (0);
-}
-
-int
-qcom_gcc_ipq4018_clock_write(device_t dev, bus_addr_t addr, uint32_t val)
-{
- struct qcom_gcc_ipq4018_softc *sc;
-
- sc = device_get_softc(dev);
- bus_write_4(sc->reg, addr, val);
- return (0);
-}
-
-int
-qcom_gcc_ipq4018_clock_modify(device_t dev, bus_addr_t addr,
- uint32_t clear_mask, uint32_t set_mask)
-{
- struct qcom_gcc_ipq4018_softc *sc;
- uint32_t reg;
-
- sc = device_get_softc(dev);
- reg = bus_read_4(sc->reg, addr);
- reg &= clear_mask;
- reg |= set_mask;
- bus_write_4(sc->reg, addr, reg);
- return (0);
-}
-
void
-qcom_gcc_ipq4018_clock_setup(struct qcom_gcc_ipq4018_softc *sc)
+qcom_gcc_ipq4018_clock_setup(struct qcom_gcc_softc *sc)
{
sc->clkdom = clkdom_create(sc->dev);
@@ -747,21 +717,3 @@ qcom_gcc_ipq4018_clock_setup(struct qcom_gcc_ipq4018_softc *sc)
/* Finalise clock tree */
clkdom_finit(sc->clkdom);
}
-
-void
-qcom_gcc_ipq4018_clock_lock(device_t dev)
-{
- struct qcom_gcc_ipq4018_softc *sc;
-
- sc = device_get_softc(dev);
- mtx_lock(&sc->mtx);
-}
-
-void
-qcom_gcc_ipq4018_clock_unlock(device_t dev)
-{
- struct qcom_gcc_ipq4018_softc *sc;
-
- sc = device_get_softc(dev);
- mtx_unlock(&sc->mtx);
-}
diff --git a/sys/dev/qcom_gcc/qcom_gcc_ipq4018_reset.c b/sys/dev/qcom_gcc/qcom_gcc_ipq4018_reset.c
index ae2236d7fca7..f99d1d9ad9f1 100644
--- a/sys/dev/qcom_gcc/qcom_gcc_ipq4018_reset.c
+++ b/sys/dev/qcom_gcc/qcom_gcc_ipq4018_reset.c
@@ -50,10 +50,10 @@
#include <dt-bindings/clock/qcom,gcc-ipq4019.h>
-#include "qcom_gcc_ipq4018_var.h"
+#include "qcom_gcc_var.h"
+#include "qcom_gcc_ipq4018.h"
-
-static const struct qcom_gcc_ipq4018_reset_entry gcc_ipq4019_reset_list[] = {
+static const struct qcom_gcc_reset_entry gcc_ipq4019_reset_list[] = {
[WIFI0_CPU_INIT_RESET] = { 0x1f008, 5 },
[WIFI0_RADIO_SRIF_RESET] = { 0x1f008, 4 },
[WIFI0_RADIO_WARM_RESET] = { 0x1f008, 3 },
@@ -127,10 +127,10 @@ static const struct qcom_gcc_ipq4018_reset_entry gcc_ipq4019_reset_list[] = {
[GCC_SPDM_BCR] = {0x25000, 0},
};
-int
+static int
qcom_gcc_ipq4018_hwreset_assert(device_t dev, intptr_t id, bool reset)
{
- struct qcom_gcc_ipq4018_softc *sc;
+ struct qcom_gcc_softc *sc;
uint32_t reg;
sc = device_get_softc(dev);
@@ -151,10 +151,10 @@ qcom_gcc_ipq4018_hwreset_assert(device_t dev, intptr_t id, bool reset)
return (0);
}
-int
+static int
qcom_gcc_ipq4018_hwreset_is_asserted(device_t dev, intptr_t id, bool *reset)
{
- struct qcom_gcc_ipq4018_softc *sc;
+ struct qcom_gcc_softc *sc;
uint32_t reg;
sc = device_get_softc(dev);
@@ -175,3 +175,9 @@ qcom_gcc_ipq4018_hwreset_is_asserted(device_t dev, intptr_t id, bool *reset)
return (0);
}
+void
+qcom_gcc_ipq4018_hwreset_init(struct qcom_gcc_softc *sc)
+{
+ sc->sc_cb.hw_reset_assert = qcom_gcc_ipq4018_hwreset_assert;
+ sc->sc_cb.hw_reset_is_asserted = qcom_gcc_ipq4018_hwreset_is_asserted;
+}
diff --git a/sys/dev/qcom_gcc/qcom_gcc_ipq4018.c b/sys/dev/qcom_gcc/qcom_gcc_main.c
index 5980d8ebe893..3950bd985feb 100644
--- a/sys/dev/qcom_gcc/qcom_gcc_ipq4018.c
+++ b/sys/dev/qcom_gcc/qcom_gcc_main.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2021, Adrian Chadd <adrian@FreeBSD.org>
+ * Copyright (c) 2025, Adrian Chadd <adrian@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -25,7 +25,7 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-/* Driver for Qualcomm IPQ4018 clock and reset device */
+/* Driver for Qualcomm clock/reset trees */
#include <sys/param.h>
#include <sys/kernel.h>
@@ -49,19 +49,29 @@
#include "clkdev_if.h"
#include "hwreset_if.h"
-#include <dt-bindings/clock/qcom,gcc-ipq4019.h>
+#include "qcom_gcc_var.h"
+#include "qcom_gcc_ipq4018.h"
-#include "qcom_gcc_ipq4018_var.h"
+static int qcom_gcc_modevent(module_t, int, void *);
+static int qcom_gcc_probe(device_t);
+static int qcom_gcc_attach(device_t);
+static int qcom_gcc_detach(device_t);
-static int qcom_gcc_ipq4018_modevent(module_t, int, void *);
+struct qcom_gcc_chipset_list_entry {
+ const char *ofw;
+ const char *desc;
+ qcom_gcc_chipset_t chipset;
+};
-static int qcom_gcc_ipq4018_probe(device_t);
-static int qcom_gcc_ipq4018_attach(device_t);
-static int qcom_gcc_ipq4018_detach(device_t);
+static struct qcom_gcc_chipset_list_entry qcom_gcc_chipset_list[] = {
+ { "qcom,gcc-ipq4019", "Qualcomm IPQ4018 Clock/Reset Controller",
+ QCOM_GCC_CHIPSET_IPQ4018 },
+ { NULL, NULL, 0 },
+};
static int
-qcom_gcc_ipq4018_modevent(module_t mod, int type, void *unused)
+qcom_gcc_modevent(module_t mod, int type, void *unused)
{
int error;
@@ -81,37 +91,64 @@ qcom_gcc_ipq4018_modevent(module_t mod, int type, void *unused)
}
static int
-qcom_gcc_ipq4018_probe(device_t dev)
+qcom_gcc_probe(device_t dev)
{
+ struct qcom_gcc_softc *sc;
+ int i;
+
+ sc = device_get_softc(dev);
+
if (! ofw_bus_status_okay(dev))
return (ENXIO);
- if (ofw_bus_is_compatible(dev, "qcom,gcc-ipq4019") == 0)
- return (ENXIO);
+ for (i = 0; qcom_gcc_chipset_list[i].ofw != NULL; i++) {
+ const struct qcom_gcc_chipset_list_entry *ce;
- return (0);
+ ce = &qcom_gcc_chipset_list[i];
+ if (ofw_bus_is_compatible(dev, ce->ofw) == 0)
+ continue;
+ device_set_desc(dev, ce->desc);
+ sc->sc_chipset = ce->chipset;
+ return (0);
+ }
+
+ return (ENXIO);
}
static int
-qcom_gcc_ipq4018_attach(device_t dev)
+qcom_gcc_attach(device_t dev)
{
- struct qcom_gcc_ipq4018_softc *sc;
+ struct qcom_gcc_softc *sc;
+ size_t mem_sz;
sc = device_get_softc(dev);
/* Found a compatible device! */
sc->dev = dev;
+ /*
+ * Setup the hardware callbacks, before any further initialisation
+ * is performed.
+ */
+ switch (sc->sc_chipset) {
+ case QCOM_GCC_CHIPSET_IPQ4018:
+ qcom_gcc_ipq4018_hwreset_init(sc);
+ mem_sz = 0x60000;
+ break;
+ case QCOM_GCC_CHIPSET_NONE:
+ device_printf(dev, "Invalid chipset (%d)\n", sc->sc_chipset);
+ return (ENXIO);
+ }
+
sc->reg_rid = 0;
+
sc->reg = bus_alloc_resource_anywhere(dev, SYS_RES_MEMORY,
- &sc->reg_rid, 0x60000, RF_ACTIVE);
+ &sc->reg_rid, mem_sz, RF_ACTIVE);
if (sc->reg == NULL) {
device_printf(dev, "Couldn't allocate memory resource!\n");
return (ENXIO);
}
- device_set_desc(dev, "Qualcomm IPQ4018 Clock/Reset Controller");
-
mtx_init(&sc->mtx, device_get_nameunit(dev), NULL, MTX_DEF);
/*
@@ -122,15 +159,22 @@ qcom_gcc_ipq4018_attach(device_t dev)
/*
* Setup and register as a clock provider.
*/
- qcom_gcc_ipq4018_clock_setup(sc);
+ switch (sc->sc_chipset) {
+ case QCOM_GCC_CHIPSET_IPQ4018:
+ qcom_gcc_ipq4018_clock_setup(sc);
+ break;
+ case QCOM_GCC_CHIPSET_NONE:
+ device_printf(dev, "Invalid chipset (%d)\n", sc->sc_chipset);
+ return (ENXIO);
+ }
return (0);
}
static int
-qcom_gcc_ipq4018_detach(device_t dev)
+qcom_gcc_detach(device_t dev)
{
- struct qcom_gcc_ipq4018_softc *sc;
+ struct qcom_gcc_softc *sc;
sc = device_get_softc(dev);
@@ -145,34 +189,34 @@ qcom_gcc_ipq4018_detach(device_t dev)
return (0);
}
-static device_method_t qcom_gcc_ipq4018_methods[] = {
+static device_method_t qcom_gcc_methods[] = {
/* Device methods. */
- DEVMETHOD(device_probe, qcom_gcc_ipq4018_probe),
- DEVMETHOD(device_attach, qcom_gcc_ipq4018_attach),
- DEVMETHOD(device_detach, qcom_gcc_ipq4018_detach),
+ DEVMETHOD(device_probe, qcom_gcc_probe),
+ DEVMETHOD(device_attach, qcom_gcc_attach),
+ DEVMETHOD(device_detach, qcom_gcc_detach),
/* Reset interface */
- DEVMETHOD(hwreset_assert, qcom_gcc_ipq4018_hwreset_assert),
- DEVMETHOD(hwreset_is_asserted, qcom_gcc_ipq4018_hwreset_is_asserted),
+ DEVMETHOD(hwreset_assert, qcom_gcc_hwreset_assert),
+ DEVMETHOD(hwreset_is_asserted, qcom_gcc_hwreset_is_asserted),
/* Clock interface */
- DEVMETHOD(clkdev_read_4, qcom_gcc_ipq4018_clock_read),
- DEVMETHOD(clkdev_write_4, qcom_gcc_ipq4018_clock_write),
- DEVMETHOD(clkdev_modify_4, qcom_gcc_ipq4018_clock_modify),
- DEVMETHOD(clkdev_device_lock, qcom_gcc_ipq4018_clock_lock),
- DEVMETHOD(clkdev_device_unlock, qcom_gcc_ipq4018_clock_unlock),
+ DEVMETHOD(clkdev_read_4, qcom_gcc_clock_read),
+ DEVMETHOD(clkdev_write_4, qcom_gcc_clock_write),
+ DEVMETHOD(clkdev_modify_4, qcom_gcc_clock_modify),
+ DEVMETHOD(clkdev_device_lock, qcom_gcc_clock_lock),
+ DEVMETHOD(clkdev_device_unlock, qcom_gcc_clock_unlock),
DEVMETHOD_END
};
-static driver_t qcom_gcc_ipq4018_driver = {
+static driver_t qcom_gcc_driver = {
"qcom_gcc",
- qcom_gcc_ipq4018_methods,
- sizeof(struct qcom_gcc_ipq4018_softc)
+ qcom_gcc_methods,
+ sizeof(struct qcom_gcc_softc)
};
-EARLY_DRIVER_MODULE(qcom_gcc_ipq4018, simplebus, qcom_gcc_ipq4018_driver,
- qcom_gcc_ipq4018_modevent, NULL, BUS_PASS_CPU + BUS_PASS_ORDER_EARLY);
-EARLY_DRIVER_MODULE(qcom_gcc_ipq4018, ofwbus, qcom_gcc_ipq4018_driver,
- qcom_gcc_ipq4018_modevent, NULL, BUS_PASS_CPU + BUS_PASS_ORDER_EARLY);
-MODULE_VERSION(qcom_gcc_ipq4018, 1);
+EARLY_DRIVER_MODULE(qcom_gcc, simplebus, qcom_gcc_driver,
+ qcom_gcc_modevent, NULL, BUS_PASS_CPU + BUS_PASS_ORDER_EARLY);
+EARLY_DRIVER_MODULE(qcom_gcc, ofwbus, qcom_gcc_driver,
+ qcom_gcc_modevent, NULL, BUS_PASS_CPU + BUS_PASS_ORDER_EARLY);
+MODULE_VERSION(qcom_gcc, 1);
diff --git a/sys/dev/qcom_gcc/qcom_gcc_reset.c b/sys/dev/qcom_gcc/qcom_gcc_reset.c
new file mode 100644
index 000000000000..05ea817fbcc4
--- /dev/null
+++ b/sys/dev/qcom_gcc/qcom_gcc_reset.c
@@ -0,0 +1,64 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2021, Adrian Chadd <adrian@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/sglist.h>
+#include <sys/random.h>
+#include <sys/stdatomic.h>
+#include <sys/mutex.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/bus.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/hwreset/hwreset.h>
+
+#include "hwreset_if.h"
+
+#include "qcom_gcc_var.h"
+
+int
+qcom_gcc_hwreset_assert(device_t dev, intptr_t id, bool reset)
+{
+ struct qcom_gcc_softc *sc = device_get_softc(dev);
+ return (sc->sc_cb.hw_reset_assert(dev, id, reset));
+}
+
+int
+qcom_gcc_hwreset_is_asserted(device_t dev, intptr_t id, bool *reset)
+{
+ struct qcom_gcc_softc *sc = device_get_softc(dev);
+
+ return (sc->sc_cb.hw_reset_is_asserted(dev, id, reset));
+}
diff --git a/sys/dev/qcom_gcc/qcom_gcc_ipq4018_var.h b/sys/dev/qcom_gcc/qcom_gcc_var.h
index b3c54f1a7f73..2d4e969e1134 100644
--- a/sys/dev/qcom_gcc/qcom_gcc_ipq4018_var.h
+++ b/sys/dev/qcom_gcc/qcom_gcc_var.h
@@ -25,41 +25,56 @@
* SUCH DAMAGE.
*/
-#ifndef __QCOM_GCC_IPQ4018_VAR_H__
-#define __QCOM_GCC_IPQ4018_VAR_H__
+#ifndef __QCOM_GCC_VAR_H__
+#define __QCOM_GCC_VAR_H__
-struct qcom_gcc_ipq4018_reset_entry {
+typedef enum {
+ QCOM_GCC_CHIPSET_NONE = 0,
+ QCOM_GCC_CHIPSET_IPQ4018 = 1,
+} qcom_gcc_chipset_t;
+
+struct qcom_gcc_reset_entry {
uint32_t reg;
uint32_t bit;
};
-struct qcom_gcc_ipq4018_softc {
+struct qcom_gcc_hw_callbacks {
+ /* Reset block */
+ int (*hw_reset_assert)(device_t, intptr_t, bool);
+ int (*hw_reset_is_asserted)(device_t, intptr_t, bool *);
+
+ /* Clock block */
+};
+
+struct qcom_gcc_softc {
device_t dev;
int reg_rid;
struct resource *reg;
struct mtx mtx;
struct clkdom *clkdom;
+ qcom_gcc_chipset_t sc_chipset;
+ struct qcom_gcc_hw_callbacks sc_cb;
};
/*
* reset block
*/
-extern int qcom_gcc_ipq4018_hwreset_assert(device_t dev, intptr_t id,
+extern int qcom_gcc_hwreset_assert(device_t dev, intptr_t id,
bool reset);
-extern int qcom_gcc_ipq4018_hwreset_is_asserted(device_t dev, intptr_t id,
+extern int qcom_gcc_hwreset_is_asserted(device_t dev, intptr_t id,
bool *reset);
/*
* clock block
*/
-extern int qcom_gcc_ipq4018_clock_read(device_t dev, bus_addr_t addr,
+extern int qcom_gcc_clock_read(device_t dev, bus_addr_t addr,
uint32_t *val);
-extern int qcom_gcc_ipq4018_clock_write(device_t dev, bus_addr_t addr,
+extern int qcom_gcc_clock_write(device_t dev, bus_addr_t addr,
uint32_t val);
-extern int qcom_gcc_ipq4018_clock_modify(device_t dev, bus_addr_t addr,
+extern int qcom_gcc_clock_modify(device_t dev, bus_addr_t addr,
uint32_t clear_mask, uint32_t set_mask);
-extern void qcom_gcc_ipq4018_clock_setup(struct qcom_gcc_ipq4018_softc *sc);
-extern void qcom_gcc_ipq4018_clock_lock(device_t dev);
-extern void qcom_gcc_ipq4018_clock_unlock(device_t dev);
+extern void qcom_gcc_clock_setup(struct qcom_gcc_softc *sc);
+extern void qcom_gcc_clock_lock(device_t dev);
+extern void qcom_gcc_clock_unlock(device_t dev);
-#endif /* __QCOM_GCC_IPQ4018_VAR_H__ */
+#endif /* __QCOM_GCC_VAR_H__ */
diff --git a/sys/dev/qcom_qup/qcom_spi.c b/sys/dev/qcom_qup/qcom_spi.c
index b7e4d6519a36..87e70d531324 100644
--- a/sys/dev/qcom_qup/qcom_spi.c
+++ b/sys/dev/qcom_qup/qcom_spi.c
@@ -420,7 +420,7 @@ qcom_spi_attach(device_t dev)
}
QCOM_SPI_UNLOCK(sc);
- sc->spibus = device_add_child(dev, "spibus", -1);
+ sc->spibus = device_add_child(dev, "spibus", DEVICE_UNIT_ANY);
/* We're done, so shut down the interface clock for now */
device_printf(dev, "DONE: shutting down interface clock for now\n");
@@ -429,7 +429,8 @@ qcom_spi_attach(device_t dev)
/* Register for debug sysctl */
qcom_spi_sysctl_attach(sc);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
error:
if (sc->sc_irq_h)
bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_h);
@@ -839,8 +840,6 @@ qcom_spi_detach(device_t dev)
int i;
bus_generic_detach(sc->sc_dev);
- if (sc->spibus != NULL)
- device_delete_child(dev, sc->spibus);
if (sc->sc_irq_h)
bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_h);
diff --git a/sys/dev/qcom_rnd/qcom_rnd.c b/sys/dev/qcom_rnd/qcom_rnd.c
index fdd0b553523e..a5ece7e00f28 100644
--- a/sys/dev/qcom_rnd/qcom_rnd.c
+++ b/sys/dev/qcom_rnd/qcom_rnd.c
@@ -63,7 +63,7 @@ static int qcom_rnd_detach(device_t);
static int qcom_rnd_harvest(struct qcom_rnd_softc *, void *, size_t *);
static unsigned qcom_rnd_read(void *, unsigned);
-static struct random_source random_qcom_rnd = {
+static const struct random_source random_qcom_rnd = {
.rs_ident = "Qualcomm Entropy Adapter",
.rs_source = RANDOM_PURE_QUALCOMM,
.rs_read = qcom_rnd_read,
diff --git a/sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c b/sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c
index 2d390cd449af..50f54b896748 100644
--- a/sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c
+++ b/sys/dev/qcom_tlmm/qcom_tlmm_ipq4018.c
@@ -346,13 +346,14 @@ qcom_tlmm_ipq4018_attach(device_t dev)
fdt_pinctrl_register(dev, NULL);
fdt_pinctrl_configure_by_name(dev, "default");
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
device_printf(dev, "%s: failed to attach bus\n", __func__);
qcom_tlmm_ipq4018_detach(dev);
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/qlnx/qlnxe/bcm_osal.h b/sys/dev/qlnx/qlnxe/bcm_osal.h
index 5d940d3272d6..7148fd3f6215 100644
--- a/sys/dev/qlnx/qlnxe/bcm_osal.h
+++ b/sys/dev/qlnx/qlnxe/bcm_osal.h
@@ -72,7 +72,7 @@ extern void qlnx_dma_free_coherent(void *ecore_dev, void *v_addr,
bus_addr_t phys, uint32_t size);
extern void qlnx_link_update(void *p_hwfn);
-extern void qlnx_barrier(void *p_hwfn);
+extern void qlnx_barrier(void *p_dev);
extern void *qlnx_zalloc(uint32_t size);
@@ -102,24 +102,6 @@ extern void qlnx_vf_flr_update(void *p_hwfn);
#ifndef QLNX_RDMA
-static __inline unsigned long
-roundup_pow_of_two(unsigned long x)
-{
- return (1UL << flsl(x - 1));
-}
-
-static __inline int
-is_power_of_2(unsigned long n)
-{
- return (n == roundup_pow_of_two(n));
-}
-
-static __inline unsigned long
-rounddown_pow_of_two(unsigned long x)
-{
- return (1UL << (flsl(x) - 1));
-}
-
#define max_t(type, val1, val2) \
((type)(val1) > (type)(val2) ? (type)(val1) : (val2))
#define min_t(type, val1, val2) \
@@ -213,14 +195,14 @@ typedef struct osal_list_t
#define OSAL_SPIN_LOCK_ALLOC(p_hwfn, mutex)
#define OSAL_SPIN_LOCK_DEALLOC(mutex) mtx_destroy(mutex)
#define OSAL_SPIN_LOCK_INIT(lock) {\
- mtx_init(lock, __func__, MTX_NETWORK_LOCK, MTX_SPIN); \
+ mtx_init(lock, __func__, "OSAL spin lock", MTX_SPIN); \
}
#define OSAL_SPIN_UNLOCK(lock) {\
- mtx_unlock(lock); \
+ mtx_unlock_spin(lock); \
}
#define OSAL_SPIN_LOCK(lock) {\
- mtx_lock(lock); \
+ mtx_lock_spin(lock); \
}
#define OSAL_MUTEX_ALLOC(p_hwfn, mutex)
diff --git a/sys/dev/qlnx/qlnxe/ecore.h b/sys/dev/qlnx/qlnxe/ecore.h
index 8fcbc1f8d8a0..eda7c260ba99 100644
--- a/sys/dev/qlnx/qlnxe/ecore.h
+++ b/sys/dev/qlnx/qlnxe/ecore.h
@@ -790,6 +790,7 @@ struct ecore_dev {
u8 dp_level;
char name[NAME_SIZE];
void *dp_ctx;
+ void *ha;
enum ecore_dev_type type;
/* Translate type/revision combo into the proper conditions */
diff --git a/sys/dev/qlnx/qlnxe/ecore_dev.c b/sys/dev/qlnx/qlnxe/ecore_dev.c
index 6187ecdbc446..389a95a4164c 100644
--- a/sys/dev/qlnx/qlnxe/ecore_dev.c
+++ b/sys/dev/qlnx/qlnxe/ecore_dev.c
@@ -5268,7 +5268,7 @@ ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
}
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Read default link: Speed 0x%08x, Adv. Speed 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%08x usec]\n",
+ "Read default link: Speed %u Mb/sec, Adv. Speeds 0x%08x, AN: 0x%02x, PAUSE AN: 0x%02x EEE: %02x [%u usec]\n",
link->speed.forced_speed, link->speed.advertised_speeds,
link->speed.autoneg, link->pause.autoneg,
p_caps->default_eee, p_caps->eee_lpi_timer);
@@ -6860,7 +6860,7 @@ int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
p_hwfn->qm_info.pf_rl);
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Configured MAX bandwidth to be %08x Mb/sec\n",
+ "Configured MAX bandwidth to be %u Mb/sec\n",
p_link->speed);
return rc;
@@ -6918,7 +6918,7 @@ int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Configured MIN bandwidth to be %d Mb/sec\n",
+ "Configured MIN bandwidth to be %u Mb/sec\n",
p_link->min_pf_rate);
return rc;
diff --git a/sys/dev/qlnx/qlnxe/ecore_mcp.c b/sys/dev/qlnx/qlnxe/ecore_mcp.c
index d94c7382edc5..6d1e5fe24d06 100644
--- a/sys/dev/qlnx/qlnxe/ecore_mcp.c
+++ b/sys/dev/qlnx/qlnxe/ecore_mcp.c
@@ -191,17 +191,17 @@ enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
- OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock);
OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_cmd_elem, p_tmp,
&p_hwfn->mcp_info->cmd_list, list,
struct ecore_mcp_cmd_elem) {
ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
}
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
#ifdef CONFIG_ECORE_LOCK_ALLOC
- OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
- OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->link_lock);
+ OSAL_MUTEX_DEALLOC(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MUTEX_DEALLOC(&p_hwfn->mcp_info->link_lock);
#endif
}
@@ -308,18 +308,18 @@ enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
/* Initialize the MFW spinlocks */
#ifdef CONFIG_ECORE_LOCK_ALLOC
- if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
+ if (OSAL_MUTEX_LOCK_ALLOC(p_hwfn, &p_info->cmd_lock)) {
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
return ECORE_NOMEM;
}
- if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->link_lock)) {
- OSAL_SPIN_LOCK_DEALLOC(&p_info->cmd_lock);
+ if (OSAL_MUTEX_ALLOC(p_hwfn, &p_info->link_lock)) {
+ OSAL_MUTEX_DEALLOC(&p_info->cmd_lock);
OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
return ECORE_NOMEM;
}
#endif
- OSAL_SPIN_LOCK_INIT(&p_info->cmd_lock);
- OSAL_SPIN_LOCK_INIT(&p_info->link_lock);
+ OSAL_MUTEX_INIT(&p_info->cmd_lock);
+ OSAL_MUTEX_INIT(&p_info->link_lock);
OSAL_LIST_INIT(&p_info->cmd_list);
@@ -381,7 +381,7 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
}
/* Ensure that only a single thread is accessing the mailbox */
- OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock);
org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
@@ -407,7 +407,7 @@ enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
rc = ECORE_AGAIN;
}
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
@@ -551,7 +551,7 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
* The spinlock stays locked until the command is sent.
*/
- OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock);
if (!ecore_mcp_has_pending_cmd(p_hwfn))
break;
@@ -562,7 +562,7 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
else if (rc != ECORE_AGAIN)
goto err;
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
if (ECORE_MB_FLAGS_IS_SET(p_mb_params, CAN_SLEEP)) {
OSAL_MSLEEP(msecs);
} else {
@@ -588,7 +588,7 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
}
__ecore_mcp_cmd_and_union(p_hwfn, p_ptt, p_mb_params, seq_num);
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
/* Wait for the MFW response */
do {
@@ -602,7 +602,7 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
} else {
OSAL_UDELAY(usecs);
}
- OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock);
if (p_cmd_elem->b_is_completed)
break;
@@ -613,7 +613,7 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
else if (rc != ECORE_AGAIN)
goto err;
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
OSAL_MFW_CMD_PREEMPT(p_hwfn);
} while (++cnt < max_retries);
@@ -623,9 +623,9 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
p_mb_params->cmd, p_mb_params->param);
ecore_mcp_print_cpu_info(p_hwfn, p_ptt);
- OSAL_SPIN_LOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->cmd_lock);
ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
if (!ECORE_MB_FLAGS_IS_SET(p_mb_params, AVOID_BLOCK))
ecore_mcp_cmd_set_blocking(p_hwfn, true);
@@ -634,7 +634,7 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
}
ecore_mcp_cmd_del_elem(p_hwfn, p_cmd_elem);
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
"MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
@@ -647,7 +647,7 @@ _ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
return ECORE_SUCCESS;
err:
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->cmd_lock);
+ OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->cmd_lock);
return rc;
}
@@ -1439,7 +1439,7 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
u32 status = 0;
/* Prevent SW/attentions from doing this at the same time */
- OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->link_lock);
p_link = &p_hwfn->mcp_info->link_output;
OSAL_MEMSET(p_link, 0, sizeof(*p_link));
@@ -1585,7 +1585,7 @@ static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
OSAL_LINK_UPDATE(p_hwfn, p_ptt);
out:
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
+ OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->link_lock);
}
enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
@@ -1638,7 +1638,7 @@ enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
if (b_up)
DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
- "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
+ "Configuring Link: Speed %u Mb/sec, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x\n",
phy_cfg.speed, phy_cfg.pause, phy_cfg.adv_speed,
phy_cfg.loopback_mode);
else
@@ -1774,7 +1774,7 @@ ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
struct public_func shmem_info;
u32 resp = 0, param = 0;
- OSAL_SPIN_LOCK(&p_hwfn->mcp_info->link_lock);
+ OSAL_MUTEX_ACQUIRE(&p_hwfn->mcp_info->link_lock);
ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
MCP_PF_ID(p_hwfn));
@@ -1787,7 +1787,7 @@ ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
- OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->link_lock);
+ OSAL_MUTEX_RELEASE(&p_hwfn->mcp_info->link_lock);
/* Acknowledge the MFW */
ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
diff --git a/sys/dev/qlnx/qlnxe/ecore_mcp.h b/sys/dev/qlnx/qlnxe/ecore_mcp.h
index c94583cdfba3..edb1f9083467 100644
--- a/sys/dev/qlnx/qlnxe/ecore_mcp.h
+++ b/sys/dev/qlnx/qlnxe/ecore_mcp.h
@@ -51,10 +51,10 @@ struct ecore_mcp_info {
/* List for mailbox commands which were sent and wait for a response */
osal_list_t cmd_list;
- /* Spinlock used for protecting the access to the mailbox commands list
+ /* Lock used for protecting the access to the mailbox commands list
* and the sending of the commands.
*/
- osal_spinlock_t cmd_lock;
+ osal_mutex_t cmd_lock;
/* Flag to indicate whether sending a MFW mailbox command is blocked */
bool b_block_cmd;
@@ -62,7 +62,7 @@ struct ecore_mcp_info {
/* Spinlock used for syncing SW link-changes and link-changes
* originating from attention context.
*/
- osal_spinlock_t link_lock;
+ osal_mutex_t link_lock;
/* Address of the MCP public area */
u32 public_base;
diff --git a/sys/dev/qlnx/qlnxe/qlnx_def.h b/sys/dev/qlnx/qlnxe/qlnx_def.h
index 8ac403ab49dc..796845f3f8c6 100644
--- a/sys/dev/qlnx/qlnxe/qlnx_def.h
+++ b/sys/dev/qlnx/qlnxe/qlnx_def.h
@@ -391,7 +391,7 @@ struct qlnx_host {
int msix_count;
- struct mtx hw_lock;
+ struct sx hw_lock;
/* debug */
@@ -696,22 +696,6 @@ extern int qlnx_alloc_mem_sb(qlnx_host_t *ha, struct ecore_sb_info *sb_info,
* Some OS specific stuff
*/
-#if (defined IFM_100G_SR4)
-#define QLNX_IFM_100G_SR4 IFM_100G_SR4
-#define QLNX_IFM_100G_LR4 IFM_100G_LR4
-#define QLNX_IFM_100G_CR4 IFM_100G_CR4
-#else
-#define QLNX_IFM_100G_SR4 IFM_UNKNOWN
-#define QLNX_IFM_100G_LR4 IFM_UNKNOWN
-#endif /* #if (defined IFM_100G_SR4) */
-
-#if (defined IFM_25G_SR)
-#define QLNX_IFM_25G_SR IFM_25G_SR
-#define QLNX_IFM_25G_CR IFM_25G_CR
-#else
-#define QLNX_IFM_25G_SR IFM_UNKNOWN
-#define QLNX_IFM_25G_CR IFM_UNKNOWN
-#endif /* #if (defined IFM_25G_SR) */
#define QLNX_INC_IERRORS(ifp) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1)
#define QLNX_INC_IQDROPS(ifp) if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1)
diff --git a/sys/dev/qlnx/qlnxe/qlnx_os.c b/sys/dev/qlnx/qlnxe/qlnx_os.c
index 0ef3c93580b6..9963f472c615 100644
--- a/sys/dev/qlnx/qlnxe/qlnx_os.c
+++ b/sys/dev/qlnx/qlnxe/qlnx_os.c
@@ -30,6 +30,8 @@
* Author : David C Somayajulu, Cavium, Inc., San Jose, CA 95131.
*/
+#include "opt_inet.h"
+
#include <sys/cdefs.h>
#include "qlnx_os.h"
#include "bcm_osal.h"
@@ -90,8 +92,8 @@ static void qlnx_init_ifnet(device_t dev, qlnx_host_t *ha);
static void qlnx_init(void *arg);
static void qlnx_init_locked(qlnx_host_t *ha);
static int qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi);
-static int qlnx_set_promisc(qlnx_host_t *ha);
-static int qlnx_set_allmulti(qlnx_host_t *ha);
+static int qlnx_set_promisc(qlnx_host_t *ha, int enabled);
+static int qlnx_set_allmulti(qlnx_host_t *ha, int enabled);
static int qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data);
static int qlnx_media_change(if_t ifp);
static void qlnx_media_status(if_t ifp, struct ifmediareq *ifmr);
@@ -227,7 +229,6 @@ MODULE_DEPEND(if_qlnxev, ether, 1, 1, 1);
MALLOC_DEFINE(M_QLNXBUF, "qlnxbuf", "Buffers for qlnx driver");
-char qlnx_dev_str[128];
char qlnx_ver_str[VER_SIZE];
char qlnx_name_str[NAME_SIZE];
@@ -374,60 +375,48 @@ qlnx_pci_probe(device_t dev)
#ifndef QLNX_VF
case QLOGIC_PCI_DEVICE_ID_1644:
- snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
+ device_set_descf(dev, "%s v%d.%d.%d",
"Qlogic 100GbE PCI CNA Adapter-Ethernet Function",
QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
QLNX_VERSION_BUILD);
- device_set_desc_copy(dev, qlnx_dev_str);
-
break;
case QLOGIC_PCI_DEVICE_ID_1634:
- snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
+ device_set_descf(dev, "%s v%d.%d.%d",
"Qlogic 40GbE PCI CNA Adapter-Ethernet Function",
QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
QLNX_VERSION_BUILD);
- device_set_desc_copy(dev, qlnx_dev_str);
-
break;
case QLOGIC_PCI_DEVICE_ID_1656:
- snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
+ device_set_descf(dev, "%s v%d.%d.%d",
"Qlogic 25GbE PCI CNA Adapter-Ethernet Function",
QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
QLNX_VERSION_BUILD);
- device_set_desc_copy(dev, qlnx_dev_str);
-
break;
case QLOGIC_PCI_DEVICE_ID_1654:
- snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
+ device_set_descf(dev, "%s v%d.%d.%d",
"Qlogic 50GbE PCI CNA Adapter-Ethernet Function",
QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
QLNX_VERSION_BUILD);
- device_set_desc_copy(dev, qlnx_dev_str);
-
break;
case QLOGIC_PCI_DEVICE_ID_8070:
- snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
+ device_set_descf(dev, "%s v%d.%d.%d",
"Qlogic 10GbE/25GbE/40GbE PCI CNA (AH)"
" Adapter-Ethernet Function",
QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
QLNX_VERSION_BUILD);
- device_set_desc_copy(dev, qlnx_dev_str);
-
break;
#else
case QLOGIC_PCI_DEVICE_ID_8090:
- snprintf(qlnx_dev_str, sizeof(qlnx_dev_str), "%s v%d.%d.%d",
+ device_set_descf(dev, "%s v%d.%d.%d",
"Qlogic SRIOV PCI CNA (AH) "
"Adapter-Ethernet Function",
QLNX_VERSION_MAJOR, QLNX_VERSION_MINOR,
QLNX_VERSION_BUILD);
- device_set_desc_copy(dev, qlnx_dev_str);
-
break;
#endif /* #ifndef QLNX_VF */
@@ -763,7 +752,7 @@ qlnx_pci_attach(device_t dev)
ha->pci_dev = dev;
- mtx_init(&ha->hw_lock, "qlnx_hw_lock", MTX_NETWORK_LOCK, MTX_DEF);
+ sx_init(&ha->hw_lock, "qlnx_hw_lock");
ha->flags.lock_init = 1;
@@ -1207,6 +1196,7 @@ qlnx_init_hw(qlnx_host_t *ha)
int rval = 0;
struct ecore_hw_prepare_params params;
+ ha->cdev.ha = ha;
ecore_init_struct(&ha->cdev);
/* ha->dp_module = ECORE_MSG_PROBE |
@@ -1351,7 +1341,7 @@ qlnx_release(qlnx_host_t *ha)
pci_release_msi(dev);
if (ha->flags.lock_init) {
- mtx_destroy(&ha->hw_lock);
+ sx_destroy(&ha->hw_lock);
}
if (ha->pci_reg)
@@ -2304,10 +2294,6 @@ qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
if_t ifp;
ifp = ha->ifp = if_alloc(IFT_ETHER);
-
- if (ifp == NULL)
- panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
device_id = pci_get_device(ha->pci_dev);
@@ -2322,8 +2308,6 @@ qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
else if (device_id == QLOGIC_PCI_DEVICE_ID_1644)
if_setbaudrate(ifp, IF_Gbps(100));
- if_setcapabilities(ifp, IFCAP_LINKSTATE);
-
if_setinitfn(ifp, qlnx_init);
if_setsoftc(ifp, ha);
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -2355,12 +2339,8 @@ qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
ha->primary_mac[5] = (rnd >> 16) & 0xFF;
}
- ether_ifattach(ifp, ha->primary_mac);
- bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
-
if_setcapabilities(ifp, IFCAP_HWCSUM);
if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
-
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWTAGGING, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWFILTER, 0);
@@ -2369,6 +2349,8 @@ qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
if_setcapabilitiesbit(ifp, IFCAP_TSO4, 0);
if_setcapabilitiesbit(ifp, IFCAP_TSO6, 0);
if_setcapabilitiesbit(ifp, IFCAP_LRO, 0);
+ if_setcapabilitiesbit(ifp, IFCAP_LINKSTATE, 0);
+ if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0);
if_sethwtsomax(ifp, QLNX_MAX_TSO_FRAME_SIZE -
(ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN));
@@ -2393,18 +2375,15 @@ qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
ifmedia_add(&ha->media, (IFM_ETHER | IFM_40G_CR4), 0, NULL);
} else if ((device_id == QLOGIC_PCI_DEVICE_ID_1656) ||
(device_id == QLOGIC_PCI_DEVICE_ID_8070)) {
- ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_SR), 0, NULL);
- ifmedia_add(&ha->media, (IFM_ETHER | QLNX_IFM_25G_CR), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_25G_SR), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_25G_CR), 0, NULL);
} else if (device_id == QLOGIC_PCI_DEVICE_ID_1654) {
ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_KR2), 0, NULL);
ifmedia_add(&ha->media, (IFM_ETHER | IFM_50G_CR2), 0, NULL);
} else if (device_id == QLOGIC_PCI_DEVICE_ID_1644) {
- ifmedia_add(&ha->media,
- (IFM_ETHER | QLNX_IFM_100G_LR4), 0, NULL);
- ifmedia_add(&ha->media,
- (IFM_ETHER | QLNX_IFM_100G_SR4), 0, NULL);
- ifmedia_add(&ha->media,
- (IFM_ETHER | QLNX_IFM_100G_CR4), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_100G_LR4), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_100G_SR4), 0, NULL);
+ ifmedia_add(&ha->media, (IFM_ETHER | IFM_100G_CR4), 0, NULL);
}
ifmedia_add(&ha->media, (IFM_ETHER | IFM_FDX), 0, NULL);
@@ -2412,6 +2391,9 @@ qlnx_init_ifnet(device_t dev, qlnx_host_t *ha)
ifmedia_set(&ha->media, (IFM_ETHER | IFM_AUTO));
+ ether_ifattach(ifp, ha->primary_mac);
+ bcopy(if_getlladdr(ha->ifp), ha->primary_mac, ETHER_ADDR_LEN);
+
QL_DPRINT2(ha, "exit\n");
return;
@@ -2595,7 +2577,7 @@ qlnx_set_multi(qlnx_host_t *ha, uint32_t add_multi)
}
static int
-qlnx_set_promisc(qlnx_host_t *ha)
+qlnx_set_promisc(qlnx_host_t *ha, int enabled)
{
int rc = 0;
uint8_t filter;
@@ -2604,15 +2586,20 @@ qlnx_set_promisc(qlnx_host_t *ha)
return (0);
filter = ha->filter;
- filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
- filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ if (enabled) {
+ filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+ filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
+ } else {
+ filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
+ filter &= ~ECORE_ACCEPT_UCAST_UNMATCHED;
+ }
rc = qlnx_set_rx_accept_filter(ha, filter);
return (rc);
}
static int
-qlnx_set_allmulti(qlnx_host_t *ha)
+qlnx_set_allmulti(qlnx_host_t *ha, int enabled)
{
int rc = 0;
uint8_t filter;
@@ -2621,7 +2608,11 @@ qlnx_set_allmulti(qlnx_host_t *ha)
return (0);
filter = ha->filter;
- filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+ if (enabled) {
+ filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+ } else {
+ filter &= ~ECORE_ACCEPT_MCAST_UNMATCHED;
+ }
rc = qlnx_set_rx_accept_filter(ha, filter);
return (rc);
@@ -2631,6 +2622,7 @@ static int
qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
{
int ret = 0, mask;
+ int flags;
struct ifreq *ifr = (struct ifreq *)data;
#ifdef INET
struct ifaddr *ifa = (struct ifaddr *)data;
@@ -2684,15 +2676,16 @@ qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
QL_DPRINT4(ha, "SIOCSIFFLAGS (0x%lx)\n", cmd);
QLNX_LOCK(ha);
+ flags = if_getflags(ifp);
- if (if_getflags(ifp) & IFF_UP) {
+ if (flags & IFF_UP) {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) {
- if ((if_getflags(ifp) ^ ha->if_flags) &
+ if ((flags ^ ha->if_flags) &
IFF_PROMISC) {
- ret = qlnx_set_promisc(ha);
+ ret = qlnx_set_promisc(ha, flags & IFF_PROMISC);
} else if ((if_getflags(ifp) ^ ha->if_flags) &
IFF_ALLMULTI) {
- ret = qlnx_set_allmulti(ha);
+ ret = qlnx_set_allmulti(ha, flags & IFF_ALLMULTI);
}
} else {
ha->max_frame_size = if_getmtu(ifp) +
@@ -2702,9 +2695,9 @@ qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
} else {
if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
qlnx_stop(ha);
- ha->if_flags = if_getflags(ifp);
}
+ ha->if_flags = if_getflags(ifp);
QLNX_UNLOCK(ha);
break;
@@ -2728,7 +2721,9 @@ qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
case SIOCSIFMEDIA:
case SIOCGIFMEDIA:
- QL_DPRINT4(ha, "SIOCSIFMEDIA/SIOCGIFMEDIA (0x%lx)\n", cmd);
+ case SIOCGIFXMEDIA:
+ QL_DPRINT4(ha,
+ "SIOCSIFMEDIA/SIOCGIFMEDIA/SIOCGIFXMEDIA (0x%lx)\n", cmd);
ret = ifmedia_ioctl(ifp, ifr, &ha->media, cmd);
break;
@@ -2783,7 +2778,7 @@ qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
if (!p_ptt) {
QL_DPRINT1(ha, "ecore_ptt_acquire failed\n");
- ret = -1;
+ ret = ERESTART;
break;
}
@@ -2794,7 +2789,7 @@ qlnx_ioctl(if_t ifp, u_long cmd, caddr_t data)
ecore_ptt_release(p_hwfn, p_ptt);
if (ret) {
- ret = -1;
+ ret = ENODEV;
break;
}
@@ -3812,11 +3807,11 @@ qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
case MEDIA_MODULE_FIBER:
case MEDIA_UNSPECIFIED:
if (if_link->speed == (100 * 1000))
- ifm_type = QLNX_IFM_100G_SR4;
+ ifm_type = IFM_100G_SR4;
else if (if_link->speed == (40 * 1000))
ifm_type = IFM_40G_SR4;
else if (if_link->speed == (25 * 1000))
- ifm_type = QLNX_IFM_25G_SR;
+ ifm_type = IFM_25G_SR;
else if (if_link->speed == (10 * 1000))
ifm_type = (IFM_10G_LR | IFM_10G_SR);
else if (if_link->speed == (1 * 1000))
@@ -3826,11 +3821,11 @@ qlnx_get_optics(qlnx_host_t *ha, struct qlnx_link_output *if_link)
case MEDIA_DA_TWINAX:
if (if_link->speed == (100 * 1000))
- ifm_type = QLNX_IFM_100G_CR4;
+ ifm_type = IFM_100G_CR4;
else if (if_link->speed == (40 * 1000))
ifm_type = IFM_40G_CR4;
else if (if_link->speed == (25 * 1000))
- ifm_type = QLNX_IFM_25G_CR;
+ ifm_type = IFM_25G_CR;
else if (if_link->speed == (10 * 1000))
ifm_type = IFM_10G_TWINAX;
@@ -5387,11 +5382,11 @@ qlnx_zalloc(uint32_t size)
}
void
-qlnx_barrier(void *p_hwfn)
+qlnx_barrier(void *p_dev)
{
qlnx_host_t *ha;
- ha = (qlnx_host_t *)((struct ecore_hwfn *)p_hwfn)->p_dev;
+ ha = ((struct ecore_dev *) p_dev)->ha;
bus_barrier(ha->pci_reg, 0, 0, BUS_SPACE_BARRIER_WRITE);
}
@@ -7062,8 +7057,19 @@ qlnx_set_rx_mode(qlnx_host_t *ha)
{
int rc = 0;
uint8_t filter;
+ const if_t ifp = ha->ifp;
+ const struct ifaddr *ifa;
+ struct sockaddr_dl *sdl;
- rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
+ ifa = if_getifaddr(ifp);
+ if (if_gettype(ifp) == IFT_ETHER && ifa != NULL &&
+ ifa->ifa_addr != NULL) {
+ sdl = (struct sockaddr_dl *) ifa->ifa_addr;
+
+ rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, LLADDR(sdl));
+ } else {
+ rc = qlnx_set_ucast_rx_mac(ha, ECORE_FILTER_REPLACE, ha->primary_mac);
+ }
if (rc)
return rc;
@@ -7075,9 +7081,11 @@ qlnx_set_rx_mode(qlnx_host_t *ha)
ECORE_ACCEPT_MCAST_MATCHED |
ECORE_ACCEPT_BCAST;
- if (qlnx_vf_device(ha) == 0) {
+ if (qlnx_vf_device(ha) == 0 || (if_getflags(ha->ifp) & IFF_PROMISC)) {
filter |= ECORE_ACCEPT_UCAST_UNMATCHED;
filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
+ } else if (if_getflags(ha->ifp) & IFF_ALLMULTI) {
+ filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
}
ha->filter = filter;
diff --git a/sys/dev/qlnx/qlnxe/qlnx_os.h b/sys/dev/qlnx/qlnxe/qlnx_os.h
index 261283fb6eaf..6d717d0e70bf 100644
--- a/sys/dev/qlnx/qlnxe/qlnx_os.h
+++ b/sys/dev/qlnx/qlnxe/qlnx_os.h
@@ -130,8 +130,8 @@ MALLOC_DECLARE(M_QLNXBUF);
/*
* Locks
*/
-#define QLNX_LOCK(ha) mtx_lock(&ha->hw_lock)
-#define QLNX_UNLOCK(ha) mtx_unlock(&ha->hw_lock)
+#define QLNX_LOCK(ha) sx_xlock(&ha->hw_lock)
+#define QLNX_UNLOCK(ha) sx_xunlock(&ha->hw_lock)
/*
* structure encapsulating a DMA buffer
diff --git a/sys/dev/qlnx/qlnxe/qlnx_rdma.c b/sys/dev/qlnx/qlnxe/qlnx_rdma.c
index 77b25a017ade..a69f0efd095a 100644
--- a/sys/dev/qlnx/qlnxe/qlnx_rdma.c
+++ b/sys/dev/qlnx/qlnxe/qlnx_rdma.c
@@ -253,8 +253,6 @@ qlnx_rdma_deregister_if(qlnx_rdma_if_t *rdma_if)
int ret = 0;
qlnx_host_t *ha;
- printf("%s: enter rdma_if = %p\n", __func__, rdma_if);
-
if (mtx_initialized(&qlnx_rdma_dev_lock)) {
mtx_lock(&qlnx_rdma_dev_lock);
@@ -285,7 +283,6 @@ qlnx_rdma_deregister_if(qlnx_rdma_if_t *rdma_if)
mtx_unlock(&qlnx_rdma_dev_lock);
}
- printf("%s: exit rdma_if = %p\n", __func__, rdma_if);
return (ret);
}
diff --git a/sys/dev/qlxgb/qla_os.c b/sys/dev/qlxgb/qla_os.c
index b9a0c1ec07a3..87e504a83c79 100644
--- a/sys/dev/qlxgb/qla_os.c
+++ b/sys/dev/qlxgb/qla_os.c
@@ -658,9 +658,6 @@ qla_init_ifnet(device_t dev, qla_host_t *ha)
ifp = ha->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setmtu(ifp, ETHERMTU);
diff --git a/sys/dev/qlxgbe/ql_ioctl.c b/sys/dev/qlxgbe/ql_ioctl.c
index b34a9cc508c0..c95cbd9df688 100644
--- a/sys/dev/qlxgbe/ql_ioctl.c
+++ b/sys/dev/qlxgbe/ql_ioctl.c
@@ -652,8 +652,8 @@ ql_slowpath_log(qla_host_t *ha, qla_sp_log_t *log)
log->num_entries = ha->hw.sp_log_num_entries;
}
device_printf(ha->pci_dev,
- "%s: exit [rval = %d][%p, next_idx = %d, %d entries, %d bytes]\n",
- __func__, rval, log->buffer, log->next_idx, log->num_entries, size);
+ "%s: exit [rval = %d][next_idx = %d, %d entries, %d bytes]\n",
+ __func__, rval, log->next_idx, log->num_entries, size);
mtx_unlock(&ha->sp_log_lock);
return (rval);
diff --git a/sys/dev/qlxgbe/ql_isr.c b/sys/dev/qlxgbe/ql_isr.c
index 62ad81b2a607..076cad567801 100644
--- a/sys/dev/qlxgbe/ql_isr.c
+++ b/sys/dev/qlxgbe/ql_isr.c
@@ -280,7 +280,7 @@ qla_lro_intr(qla_host_t *ha, qla_sgl_lro_t *sgc, uint32_t sds_idx)
th = (struct tcphdr *)(mpf->m_data + sgc->l4_offset);
if (sgc->flags & Q8_LRO_COMP_PUSH_BIT)
- th->th_flags |= TH_PUSH;
+ tcp_set_flags(th, tcp_get_flags(th) | TH_PUSH);
m_adj(mpf, sgc->l2_offset);
diff --git a/sys/dev/qlxgbe/ql_os.c b/sys/dev/qlxgbe/ql_os.c
index d20e7a103d02..a12f1efec082 100644
--- a/sys/dev/qlxgbe/ql_os.c
+++ b/sys/dev/qlxgbe/ql_os.c
@@ -322,7 +322,7 @@ static int
qla_pci_attach(device_t dev)
{
qla_host_t *ha = NULL;
- uint32_t rsrc_len;
+ uint32_t rsrc_len __unused;
int i;
uint32_t num_rcvq = 0;
@@ -405,10 +405,10 @@ qla_pci_attach(device_t dev)
__func__);
goto qla_pci_attach_err;
}
- device_printf(dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
+ QL_DPRINT2(ha, (dev, "%s: ha %p pci_func 0x%x rsrc_count 0x%08x"
" msix_count 0x%x pci_reg %p pci_reg1 %p num_rcvq = %d\n",
__func__, ha, ha->pci_func, rsrc_len, ha->msix_count,
- ha->pci_reg, ha->pci_reg1, num_rcvq);
+ ha->pci_reg, ha->pci_reg1, num_rcvq));
if ((ha->msix_count < 64) || (num_rcvq != 32)) {
if (ha->hw.num_sds_rings > 15) {
@@ -852,10 +852,6 @@ qla_init_ifnet(device_t dev, qla_host_t *ha)
QL_DPRINT2(ha, (dev, "%s: enter\n", __func__));
ifp = ha->ifp = if_alloc(IFT_ETHER);
-
- if (ifp == NULL)
- panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setbaudrate(ifp, IF_Gbps(10));
@@ -1274,9 +1270,9 @@ qla_send(qla_host_t *ha, struct mbuf **m_headp, uint32_t txr_idx,
"mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,\
ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head));
- device_printf(ha->pci_dev, "%s [%d]: txr_idx = %d tx_idx = %d "
+ QL_DPRINT2(ha, (ha->pci_dev, "%s [%d]: txr_idx = %d tx_idx = %d "
"mbuf = %p\n", __func__, __LINE__, txr_idx, tx_idx,
- ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head);
+ ha->tx_ring[txr_idx].tx_buf[tx_idx].m_head));
if (m_head)
m_freem(m_head);
diff --git a/sys/dev/qlxge/qls_os.c b/sys/dev/qlxge/qls_os.c
index 8c110540a042..eca7006850e0 100644
--- a/sys/dev/qlxge/qls_os.c
+++ b/sys/dev/qlxge/qls_os.c
@@ -160,20 +160,20 @@ qls_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
ha = (qla_host_t *)arg1;
for (i = 0; i < ha->num_tx_rings; i++) {
- device_printf(ha->pci_dev,
+ QL_DPRINT2((ha->pci_dev,
"%s: tx_ring[%d].tx_frames= %p\n",
__func__, i,
- (void *)ha->tx_ring[i].tx_frames);
+ (void *)ha->tx_ring[i].tx_frames));
- device_printf(ha->pci_dev,
+ QL_DPRINT2((ha->pci_dev,
"%s: tx_ring[%d].tx_tso_frames= %p\n",
__func__, i,
- (void *)ha->tx_ring[i].tx_tso_frames);
+ (void *)ha->tx_ring[i].tx_tso_frames));
- device_printf(ha->pci_dev,
+ QL_DPRINT2((ha->pci_dev,
"%s: tx_ring[%d].tx_vlan_frames= %p\n",
__func__, i,
- (void *)ha->tx_ring[i].tx_vlan_frames);
+ (void *)ha->tx_ring[i].tx_vlan_frames));
device_printf(ha->pci_dev,
"%s: tx_ring[%d].txr_free= 0x%08x\n",
@@ -197,15 +197,15 @@ qls_sysctl_get_drvr_stats(SYSCTL_HANDLER_ARGS)
}
for (i = 0; i < ha->num_rx_rings; i++) {
- device_printf(ha->pci_dev,
+ QL_DPRINT2((ha->pci_dev,
"%s: rx_ring[%d].rx_int= %p\n",
__func__, i,
- (void *)ha->rx_ring[i].rx_int);
+ (void *)ha->rx_ring[i].rx_int));
- device_printf(ha->pci_dev,
+ QL_DPRINT2((ha->pci_dev,
"%s: rx_ring[%d].rss_int= %p\n",
__func__, i,
- (void *)ha->rx_ring[i].rss_int);
+ (void *)ha->rx_ring[i].rss_int));
device_printf(ha->pci_dev,
"%s: rx_ring[%d].lbq_next= 0x%08x\n",
@@ -383,9 +383,9 @@ qls_pci_attach(device_t dev)
ha->msix_count = qls_get_msix_count(ha);
- device_printf(dev, "\n%s: ha %p pci_func 0x%x msix_count 0x%x"
+ QL_DPRINT2((dev, "\n%s: ha %p pci_func 0x%x msix_count 0x%x"
" pci_reg %p pci_reg1 %p\n", __func__, ha,
- ha->pci_func, ha->msix_count, ha->pci_reg, ha->pci_reg1);
+ ha->pci_func, ha->msix_count, ha->pci_reg, ha->pci_reg1));
if (pci_alloc_msix(dev, &ha->msix_count)) {
device_printf(dev, "%s: pci_alloc_msi[%d] failed\n", __func__,
@@ -719,10 +719,6 @@ qls_init_ifnet(device_t dev, qla_host_t *ha)
QL_DPRINT2((dev, "%s: enter\n", __func__));
ifp = ha->ifp = if_alloc(IFT_ETHER);
-
- if (ifp == NULL)
- panic("%s: cannot if_alloc()\n", device_get_nameunit(dev));
-
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setbaudrate(ifp, IF_Gbps(10));
if_setinitfn(ifp, qls_init);
diff --git a/sys/dev/quicc/quicc_core.c b/sys/dev/quicc/quicc_core.c
index 134481b1c0a1..48c6a6be65f9 100644
--- a/sys/dev/quicc/quicc_core.c
+++ b/sys/dev/quicc/quicc_core.c
@@ -186,7 +186,7 @@ quicc_bfe_attach(device_t dev)
rle = resource_list_find(&qd->qd_rlist, SYS_RES_IRQ, 0);
rle->res = sc->sc_ires;
- qd->qd_dev = device_add_child(dev, NULL, -1);
+ qd->qd_dev = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
device_set_ivars(qd->qd_dev, (void *)qd);
error = device_probe_and_attach(qd->qd_dev);
diff --git a/sys/dev/ral/rt2560.c b/sys/dev/ral/rt2560.c
index d7cafe1994c9..7feb324eb21d 100644
--- a/sys/dev/ral/rt2560.c
+++ b/sys/dev/ral/rt2560.c
@@ -281,6 +281,8 @@ rt2560_attach(device_t dev, int id)
#endif
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
rt2560_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1516,6 +1518,8 @@ rt2560_tx_mgt(struct rt2560_softc *sc, struct mbuf *m0,
wh = mtod(m0, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
@@ -1558,10 +1562,7 @@ rt2560_tx_mgt(struct rt2560_softc *sc, struct mbuf *m0,
*(uint16_t *)wh->i_dur = htole16(dur);
/* tell hardware to add timestamp for probe responses */
- if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
- IEEE80211_FC0_TYPE_MGT &&
- (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
- IEEE80211_FC0_SUBTYPE_PROBE_RESP)
+ if (IEEE80211_IS_MGMT_PROBE_RESP(wh))
flags |= RT2560_TX_TIMESTAMP;
}
@@ -1743,7 +1744,7 @@ rt2560_tx_data(struct rt2560_softc *sc, struct mbuf *m0,
rate = tp->ucastrate;
} else {
(void) ieee80211_ratectl_rate(ni, NULL, 0);
- rate = ni->ni_txrate;
+ rate = ieee80211_node_get_txrate_dot11rate(ni);
}
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
@@ -1824,7 +1825,7 @@ rt2560_tx_data(struct rt2560_softc *sc, struct mbuf *m0,
/* remember link conditions for rate adaptation algorithm */
if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) {
- data->rix = ni->ni_txrate;
+ data->rix = ieee80211_node_get_txrate_dot11rate(ni);
/* XXX probably need last rssi value and not avg */
data->rssi = ic->ic_node_getrssi(ni);
} else
diff --git a/sys/dev/ral/rt2661.c b/sys/dev/ral/rt2661.c
index 1194ef12189f..c9c86d4f089a 100644
--- a/sys/dev/ral/rt2661.c
+++ b/sys/dev/ral/rt2661.c
@@ -282,6 +282,8 @@ rt2661_attach(device_t dev, int id)
#endif
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
rt2661_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1284,7 +1286,7 @@ rt2661_tx_mgt(struct rt2661_softc *sc, struct mbuf *m0,
rate = ni->ni_txparms->mgmtrate;
wh = mtod(m0, struct ieee80211_frame *);
-
+ ieee80211_output_seqno_assign(ni, -1, m0);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
@@ -1326,9 +1328,7 @@ rt2661_tx_mgt(struct rt2661_softc *sc, struct mbuf *m0,
*(uint16_t *)wh->i_dur = htole16(dur);
/* tell hardware to add timestamp in probe responses */
- if ((wh->i_fc[0] &
- (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
- (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
+ if (IEEE80211_IS_MGMT_PROBE_RESP(wh))
flags |= RT2661_TX_TIMESTAMP;
}
@@ -1433,7 +1433,7 @@ rt2661_tx_data(struct rt2661_softc *sc, struct mbuf *m0,
rate = tp->ucastrate;
} else {
(void) ieee80211_ratectl_rate(ni, NULL, 0);
- rate = ni->ni_txrate;
+ rate = ieee80211_node_get_txrate_dot11rate(ni);
}
rate &= IEEE80211_RATE_VAL;
@@ -1517,7 +1517,7 @@ rt2661_tx_data(struct rt2661_softc *sc, struct mbuf *m0,
/* remember link conditions for rate adaptation algorithm */
if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) {
- data->rix = ni->ni_txrate;
+ data->rix = ieee80211_node_get_txrate_dot11rate(ni);
/* XXX probably need last rssi value and not avg */
data->rssi = ic->ic_node_getrssi(ni);
} else
diff --git a/sys/dev/ral/rt2860.c b/sys/dev/ral/rt2860.c
index ab5b32b4e026..76fe4652839d 100644
--- a/sys/dev/ral/rt2860.c
+++ b/sys/dev/ral/rt2860.c
@@ -323,6 +323,8 @@ rt2860_attach(device_t dev, int id)
| IEEE80211_C_WME /* 802.11e */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
rt2860_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1471,6 +1473,7 @@ rt2860_tx(struct rt2860_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
wh = mtod(m, struct ieee80211_frame *);
+ ieee80211_output_seqno_assign(ni, -1, m);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m);
if (k == NULL) {
@@ -1493,7 +1496,7 @@ rt2860_tx(struct rt2860_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
rate = tp->ucastrate;
} else {
(void) ieee80211_ratectl_rate(ni, NULL, 0);
- rate = ni->ni_txrate;
+ rate = ieee80211_node_get_txrate_dot11rate(ni);
}
rate &= IEEE80211_RATE_VAL;
@@ -1559,9 +1562,7 @@ rt2860_tx(struct rt2860_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
*(uint16_t *)wh->i_dur = htole16(dur);
}
/* ask MAC to insert timestamp into probe responses */
- if ((wh->i_fc[0] &
- (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
- (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
+ if (IEEE80211_IS_MGMT_PROBE_RESP(wh))
/* NOTE: beacons do not pass through tx_data() */
txwi->flags |= RT2860_TX_TS;
@@ -1802,9 +1803,7 @@ rt2860_tx_raw(struct rt2860_softc *sc, struct mbuf *m,
*(uint16_t *)wh->i_dur = htole16(dur);
}
/* ask MAC to insert timestamp into probe responses */
- if ((wh->i_fc[0] &
- (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
- (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
+ if (IEEE80211_IS_MGMT_PROBE_RESP(wh))
/* NOTE: beacons do not pass through tx_data() */
txwi->flags |= RT2860_TX_TS;
diff --git a/sys/dev/random/armv8rng.c b/sys/dev/random/armv8rng.c
index 61698bfff820..524d80317681 100644
--- a/sys/dev/random/armv8rng.c
+++ b/sys/dev/random/armv8rng.c
@@ -44,7 +44,7 @@
static u_int random_rndr_read(void *, u_int);
static bool has_rndr;
-static struct random_source random_armv8_rndr = {
+static const struct random_source random_armv8_rndr = {
.rs_ident = "Armv8 rndr RNG",
.rs_source = RANDOM_PURE_ARMV8,
.rs_read = random_rndr_read,
diff --git a/sys/dev/random/darn.c b/sys/dev/random/darn.c
index a66754e095fb..9bb4991df82f 100644
--- a/sys/dev/random/darn.c
+++ b/sys/dev/random/darn.c
@@ -56,7 +56,7 @@
static u_int random_darn_read(void *, u_int);
-static struct random_source random_darn = {
+static const struct random_source random_darn = {
.rs_ident = "PowerISA DARN random number generator",
.rs_source = RANDOM_PURE_DARN,
.rs_read = random_darn_read
diff --git a/sys/dev/random/fenestrasX/fx_pool.c b/sys/dev/random/fenestrasX/fx_pool.c
index d2e6f0db71ee..f4ad1e295d54 100644
--- a/sys/dev/random/fenestrasX/fx_pool.c
+++ b/sys/dev/random/fenestrasX/fx_pool.c
@@ -164,6 +164,9 @@ static const struct fxrng_ent_char {
[RANDOM_CALLOUT] = {
.entc_cls = &fxrng_lo_push,
},
+ [RANDOM_RANDOMDEV] = {
+ .entc_cls = &fxrng_lo_push,
+ },
[RANDOM_PURE_OCTEON] = {
.entc_cls = &fxrng_hi_push, /* Could be made pull. */
},
diff --git a/sys/dev/random/fenestrasX/fx_rng.c b/sys/dev/random/fenestrasX/fx_rng.c
index eb6c19b9179c..5faaea1cca3a 100644
--- a/sys/dev/random/fenestrasX/fx_rng.c
+++ b/sys/dev/random/fenestrasX/fx_rng.c
@@ -34,11 +34,11 @@
#include <sys/mutex.h>
#include <sys/random.h>
#include <sys/sdt.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#include <sys/systm.h>
#include <machine/cpu.h>
-#include <machine/stdarg.h>
#define CHACHA_EMBED
#define KEYSTREAM_ONLY
diff --git a/sys/dev/random/fortuna.c b/sys/dev/random/fortuna.c
index 53b629ac378c..8363de99a60a 100644
--- a/sys/dev/random/fortuna.c
+++ b/sys/dev/random/fortuna.c
@@ -71,8 +71,6 @@
#include <dev/random/fortuna.h>
/* Defined in FS&K */
-#define RANDOM_FORTUNA_NPOOLS 32 /* The number of accumulation pools */
-#define RANDOM_FORTUNA_DEFPOOLSIZE 64 /* The default pool size/length for a (re)seed */
#define RANDOM_FORTUNA_MAX_READ (1 << 20) /* Max bytes from AES before rekeying */
#define RANDOM_FORTUNA_BLOCKS_PER_KEY (1 << 16) /* Max blocks from AES before rekeying */
CTASSERT(RANDOM_FORTUNA_BLOCKS_PER_KEY * RANDOM_BLOCKSIZE ==
@@ -343,6 +341,13 @@ random_fortuna_process_event(struct harvest_event *event)
u_int pl;
RANDOM_RESEED_LOCK();
+ /*
+ * Run SP 800-90B health tests on the source if so configured.
+ */
+ if (!random_harvest_healthtest(event)) {
+ RANDOM_RESEED_UNLOCK();
+ return;
+ }
/*-
* FS&K - P_i = P_i|<harvested stuff>
* Accumulate the event into the appropriate pool
diff --git a/sys/dev/random/fortuna.h b/sys/dev/random/fortuna.h
index cb4683514989..7378edb9238c 100644
--- a/sys/dev/random/fortuna.h
+++ b/sys/dev/random/fortuna.h
@@ -27,6 +27,10 @@
#ifndef SYS_DEV_RANDOM_FORTUNA_H_INCLUDED
#define SYS_DEV_RANDOM_FORTUNA_H_INCLUDED
+/* Defined in FS&K */
+#define RANDOM_FORTUNA_NPOOLS 32 /* The number of accumulation pools */
+#define RANDOM_FORTUNA_DEFPOOLSIZE 64 /* The default pool size/length for a (re)seed */
+
#ifdef _KERNEL
typedef struct mtx mtx_t;
#define RANDOM_RESEED_INIT_LOCK(x) mtx_init(&fortuna_state.fs_mtx, "reseed mutex", NULL, MTX_DEF)
diff --git a/sys/dev/random/ivy.c b/sys/dev/random/ivy.c
index 05474d977276..fa1e4831f1b9 100644
--- a/sys/dev/random/ivy.c
+++ b/sys/dev/random/ivy.c
@@ -51,7 +51,7 @@
static bool has_rdrand, has_rdseed;
static u_int random_ivy_read(void *, u_int);
-static struct random_source random_ivy = {
+static const struct random_source random_ivy = {
.rs_ident = "Intel Secure Key RNG",
.rs_source = RANDOM_PURE_RDRAND,
.rs_read = random_ivy_read
diff --git a/sys/dev/random/nehemiah.c b/sys/dev/random/nehemiah.c
index f76071290b8f..56f144169dae 100644
--- a/sys/dev/random/nehemiah.c
+++ b/sys/dev/random/nehemiah.c
@@ -44,7 +44,7 @@
static u_int random_nehemiah_read(void *, u_int);
-static struct random_source random_nehemiah = {
+static const struct random_source random_nehemiah = {
.rs_ident = "VIA Nehemiah Padlock RNG",
.rs_source = RANDOM_PURE_NEHEMIAH,
.rs_read = random_nehemiah_read
diff --git a/sys/dev/random/random_harvestq.c b/sys/dev/random/random_harvestq.c
index 4605d811a239..2d7af254c52c 100644
--- a/sys/dev/random/random_harvestq.c
+++ b/sys/dev/random/random_harvestq.c
@@ -54,6 +54,7 @@
#include <crypto/rijndael/rijndael-api-fst.h>
#include <crypto/sha2/sha256.h>
+#include <dev/random/fortuna.h>
#include <dev/random/hash.h>
#include <dev/random/randomdev.h>
#include <dev/random/random_harvestq.h>
@@ -87,6 +88,8 @@ static void random_sources_feed(void);
static __read_mostly bool epoch_inited;
static __read_mostly epoch_t rs_epoch;
+static const char *random_source_descr[ENTROPYSOURCE];
+
/*
* How many events to queue up. We create this many items in
* an 'empty' queue, then transfer them to the 'harvest' queue with
@@ -100,14 +103,16 @@ static __read_mostly epoch_t rs_epoch;
volatile int random_kthread_control;
-/* Allow the sysadmin to select the broad category of
- * entropy types to harvest.
+/*
+ * Allow the sysadmin to select the broad category of entropy types to harvest.
+ *
+ * Updates are synchronized by the harvest mutex.
*/
__read_frequently u_int hc_source_mask;
struct random_sources {
CK_LIST_ENTRY(random_sources) rrs_entries;
- struct random_source *rrs_source;
+ const struct random_source *rrs_source;
};
static CK_LIST_HEAD(sources_head, random_sources) source_list =
@@ -130,36 +135,25 @@ static struct harvest_context {
/* The context of the kernel thread processing harvested entropy */
struct proc *hc_kthread_proc;
/*
- * Lockless ring buffer holding entropy events
- * If ring.in == ring.out,
- * the buffer is empty.
- * If ring.in != ring.out,
- * the buffer contains harvested entropy.
- * If (ring.in + 1) == ring.out (mod RANDOM_RING_MAX),
- * the buffer is full.
- *
- * NOTE: ring.in points to the last added element,
- * and ring.out points to the last consumed element.
- *
- * The ring.in variable needs locking as there are multiple
- * sources to the ring. Only the sources may change ring.in,
- * but the consumer may examine it.
- *
- * The ring.out variable does not need locking as there is
- * only one consumer. Only the consumer may change ring.out,
- * but the sources may examine it.
+ * A pair of buffers for queued events. New events are added to the
+ * active queue while the kthread processes the other one in parallel.
*/
- struct entropy_ring {
+ struct entropy_buffer {
struct harvest_event ring[RANDOM_RING_MAX];
- volatile u_int in;
- volatile u_int out;
- } hc_entropy_ring;
+ u_int pos;
+ } hc_entropy_buf[2];
+ u_int hc_active_buf;
struct fast_entropy_accumulator {
volatile u_int pos;
uint32_t buf[RANDOM_ACCUM_MAX];
} hc_entropy_fast_accumulator;
} harvest_context;
+#define RANDOM_HARVEST_INIT_LOCK() mtx_init(&harvest_context.hc_mtx, \
+ "entropy harvest mutex", NULL, MTX_SPIN)
+#define RANDOM_HARVEST_LOCK() mtx_lock_spin(&harvest_context.hc_mtx)
+#define RANDOM_HARVEST_UNLOCK() mtx_unlock_spin(&harvest_context.hc_mtx)
+
static struct kproc_desc random_proc_kp = {
"rand_harvestq",
random_kthread,
@@ -177,43 +171,48 @@ random_harvestq_fast_process_event(struct harvest_event *event)
static void
random_kthread(void)
{
- u_int maxloop, ring_out, i;
+ struct harvest_context *hc;
- /*
- * Locking is not needed as this is the only place we modify ring.out, and
- * we only examine ring.in without changing it. Both of these are volatile,
- * and this is a unique thread.
- */
+ hc = &harvest_context;
for (random_kthread_control = 1; random_kthread_control;) {
- /* Deal with events, if any. Restrict the number we do in one go. */
- maxloop = RANDOM_RING_MAX;
- while (harvest_context.hc_entropy_ring.out != harvest_context.hc_entropy_ring.in) {
- ring_out = (harvest_context.hc_entropy_ring.out + 1)%RANDOM_RING_MAX;
- random_harvestq_fast_process_event(harvest_context.hc_entropy_ring.ring + ring_out);
- harvest_context.hc_entropy_ring.out = ring_out;
- if (!--maxloop)
- break;
- }
+ struct entropy_buffer *buf;
+ u_int entries;
+
+ /* Deal with queued events. */
+ RANDOM_HARVEST_LOCK();
+ buf = &hc->hc_entropy_buf[hc->hc_active_buf];
+ entries = buf->pos;
+ buf->pos = 0;
+ hc->hc_active_buf = (hc->hc_active_buf + 1) %
+ nitems(hc->hc_entropy_buf);
+ RANDOM_HARVEST_UNLOCK();
+ for (u_int i = 0; i < entries; i++)
+ random_harvestq_fast_process_event(&buf->ring[i]);
+
+ /* Poll sources of noise. */
random_sources_feed();
+
/* XXX: FIX!! Increase the high-performance data rate? Need some measurements first. */
- for (i = 0; i < RANDOM_ACCUM_MAX; i++) {
- if (harvest_context.hc_entropy_fast_accumulator.buf[i]) {
- random_harvest_direct(harvest_context.hc_entropy_fast_accumulator.buf + i, sizeof(harvest_context.hc_entropy_fast_accumulator.buf[0]), RANDOM_UMA);
- harvest_context.hc_entropy_fast_accumulator.buf[i] = 0;
+ for (u_int i = 0; i < RANDOM_ACCUM_MAX; i++) {
+ if (hc->hc_entropy_fast_accumulator.buf[i]) {
+ random_harvest_direct(&hc->hc_entropy_fast_accumulator.buf[i],
+ sizeof(hc->hc_entropy_fast_accumulator.buf[0]), RANDOM_UMA);
+ hc->hc_entropy_fast_accumulator.buf[i] = 0;
}
}
/* XXX: FIX!! This is a *great* place to pass hardware/live entropy to random(9) */
- tsleep_sbt(&harvest_context.hc_kthread_proc, 0, "-",
+ tsleep_sbt(&hc->hc_kthread_proc, 0, "-",
SBT_1S/RANDOM_KTHREAD_HZ, 0, C_PREL(1));
}
random_kthread_control = -1;
- wakeup(&harvest_context.hc_kthread_proc);
+ wakeup(&hc->hc_kthread_proc);
kproc_exit(0);
/* NOTREACHED */
}
-/* This happens well after SI_SUB_RANDOM */
SYSINIT(random_device_h_proc, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, kproc_start,
&random_proc_kp);
+_Static_assert(SI_SUB_KICK_SCHEDULER > SI_SUB_RANDOM,
+ "random kthread starting before subsystem initialization");
static void
rs_epoch_init(void *dummy __unused)
@@ -259,8 +258,8 @@ random_sources_feed(void)
* stuck for a few seconds with random_kthread gradually collecting a
* small chunk of entropy every 1 / RANDOM_KTHREAD_HZ seconds.
*
- * The value 64 below is RANDOM_FORTUNA_DEFPOOLSIZE, i.e. chosen to
- * fill Fortuna's pools in the default configuration. With another
+ * We collect RANDOM_FORTUNA_DEFPOOLSIZE bytes per pool, i.e. enough
+ * to fill Fortuna's pools in the default configuration. With another
* PRNG or smaller pools for Fortuna, we might collect more entropy
* than needed to fill the pools, but this is harmless; alternatively,
* a different PRNG, larger pools, or fast entropy sources which are
@@ -270,8 +269,8 @@ random_sources_feed(void)
* try again for a large amount of entropy.
*/
if (!p_random_alg_context->ra_seeded())
- npools = howmany(p_random_alg_context->ra_poolcount * 64,
- sizeof(entropy));
+ npools = howmany(p_random_alg_context->ra_poolcount *
+ RANDOM_FORTUNA_DEFPOOLSIZE, sizeof(entropy));
/*
* Step over all of live entropy sources, and feed their output
@@ -281,8 +280,15 @@ random_sources_feed(void)
epoch_enter_preempt(rs_epoch, &et);
CK_LIST_FOREACH(rrs, &source_list, rrs_entries) {
for (i = 0; i < npools; i++) {
+ if (rrs->rrs_source->rs_read == NULL) {
+ /* Source pushes entropy asynchronously. */
+ continue;
+ }
n = rrs->rrs_source->rs_read(entropy, sizeof(entropy));
- KASSERT((n <= sizeof(entropy)), ("%s: rs_read returned too much data (%u > %zu)", __func__, n, sizeof(entropy)));
+ KASSERT((n <= sizeof(entropy)),
+ ("%s: rs_read returned too much data (%u > %zu)",
+ __func__, n, sizeof(entropy)));
+
/*
* Sometimes the HW entropy source doesn't have anything
* ready for us. This isn't necessarily untrustworthy.
@@ -304,7 +310,283 @@ random_sources_feed(void)
explicit_bzero(entropy, sizeof(entropy));
}
-/* ARGSUSED */
+/*
+ * State used for conducting NIST SP 800-90B health tests on entropy sources.
+ */
+static struct health_test_softc {
+ uint32_t ht_rct_value[HARVESTSIZE + 1];
+ u_int ht_rct_count; /* number of samples with the same value */
+ u_int ht_rct_limit; /* constant after init */
+
+ uint32_t ht_apt_value[HARVESTSIZE + 1];
+ u_int ht_apt_count; /* number of samples with the same value */
+ u_int ht_apt_seq; /* sequence number of the last sample */
+ u_int ht_apt_cutoff; /* constant after init */
+
+ uint64_t ht_total_samples;
+ bool ondemand; /* Set to true to restart the state machine */
+ enum {
+ INIT = 0, /* initial state */
+ DISABLED, /* health checking is disabled */
+ STARTUP, /* doing startup tests, samples are discarded */
+ STEADY, /* steady-state operation */
+ FAILED, /* health check failed, discard samples */
+ } ht_state;
+} healthtest[ENTROPYSOURCE];
+
+#define RANDOM_SELFTEST_STARTUP_SAMPLES 1024 /* 4.3, requirement 4 */
+#define RANDOM_SELFTEST_APT_WINDOW 512 /* 4.4.2 */
+
+static void
+copy_event(uint32_t dst[static HARVESTSIZE + 1],
+ const struct harvest_event *event)
+{
+ memset(dst, 0, sizeof(uint32_t) * (HARVESTSIZE + 1));
+ memcpy(dst, event->he_entropy, event->he_size);
+ if (event->he_source <= RANDOM_ENVIRONMENTAL_END) {
+ /*
+ * For pure entropy sources the timestamp counter is generally
+ * quite determinstic since samples are taken at regular
+ * intervals, so does not contribute much to the entropy. To
+ * make health tests more effective, exclude it from the sample,
+ * since it might otherwise defeat the health tests in a
+ * scenario where the source is stuck.
+ */
+ dst[HARVESTSIZE] = event->he_somecounter;
+ }
+}
+
+static void
+random_healthtest_rct_init(struct health_test_softc *ht,
+ const struct harvest_event *event)
+{
+ ht->ht_rct_count = 1;
+ copy_event(ht->ht_rct_value, event);
+}
+
+/*
+ * Apply the repitition count test to a sample.
+ *
+ * Return false if the test failed, i.e., we observed >= C consecutive samples
+ * with the same value, and true otherwise.
+ */
+static bool
+random_healthtest_rct_next(struct health_test_softc *ht,
+ const struct harvest_event *event)
+{
+ uint32_t val[HARVESTSIZE + 1];
+
+ copy_event(val, event);
+ if (memcmp(val, ht->ht_rct_value, sizeof(ht->ht_rct_value)) != 0) {
+ ht->ht_rct_count = 1;
+ memcpy(ht->ht_rct_value, val, sizeof(ht->ht_rct_value));
+ return (true);
+ } else {
+ ht->ht_rct_count++;
+ return (ht->ht_rct_count < ht->ht_rct_limit);
+ }
+}
+
+static void
+random_healthtest_apt_init(struct health_test_softc *ht,
+ const struct harvest_event *event)
+{
+ ht->ht_apt_count = 1;
+ ht->ht_apt_seq = 1;
+ copy_event(ht->ht_apt_value, event);
+}
+
+static bool
+random_healthtest_apt_next(struct health_test_softc *ht,
+ const struct harvest_event *event)
+{
+ uint32_t val[HARVESTSIZE + 1];
+
+ if (ht->ht_apt_seq == 0) {
+ random_healthtest_apt_init(ht, event);
+ return (true);
+ }
+
+ copy_event(val, event);
+ if (memcmp(val, ht->ht_apt_value, sizeof(ht->ht_apt_value)) == 0) {
+ ht->ht_apt_count++;
+ if (ht->ht_apt_count >= ht->ht_apt_cutoff)
+ return (false);
+ }
+
+ ht->ht_apt_seq++;
+ if (ht->ht_apt_seq == RANDOM_SELFTEST_APT_WINDOW)
+ ht->ht_apt_seq = 0;
+
+ return (true);
+}
+
+/*
+ * Run the health tests for the given event. This is assumed to be called from
+ * a serialized context.
+ */
+bool
+random_harvest_healthtest(const struct harvest_event *event)
+{
+ struct health_test_softc *ht;
+
+ ht = &healthtest[event->he_source];
+
+ /*
+ * Was on-demand testing requested? Restart the state machine if so,
+ * restarting the startup tests.
+ */
+ if (atomic_load_bool(&ht->ondemand)) {
+ atomic_store_bool(&ht->ondemand, false);
+ ht->ht_state = INIT;
+ }
+
+ switch (ht->ht_state) {
+ case __predict_false(INIT):
+ /* Store the first sample and initialize test state. */
+ random_healthtest_rct_init(ht, event);
+ random_healthtest_apt_init(ht, event);
+ ht->ht_total_samples = 0;
+ ht->ht_state = STARTUP;
+ return (false);
+ case DISABLED:
+ /* No health testing for this source. */
+ return (true);
+ case STEADY:
+ case STARTUP:
+ ht->ht_total_samples++;
+ if (random_healthtest_rct_next(ht, event) &&
+ random_healthtest_apt_next(ht, event)) {
+ if (ht->ht_state == STARTUP &&
+ ht->ht_total_samples >=
+ RANDOM_SELFTEST_STARTUP_SAMPLES) {
+ printf(
+ "random: health test passed for source %s\n",
+ random_source_descr[event->he_source]);
+ ht->ht_state = STEADY;
+ }
+ return (ht->ht_state == STEADY);
+ }
+ ht->ht_state = FAILED;
+ printf(
+ "random: health test failed for source %s, discarding samples\n",
+ random_source_descr[event->he_source]);
+ /* FALLTHROUGH */
+ case FAILED:
+ return (false);
+ }
+}
+
+static bool nist_healthtest_enabled = false;
+SYSCTL_BOOL(_kern_random, OID_AUTO, nist_healthtest_enabled,
+ CTLFLAG_RDTUN, &nist_healthtest_enabled, 0,
+ "Enable NIST SP 800-90B health tests for noise sources");
+
+static void
+random_healthtest_init(enum random_entropy_source source, int min_entropy)
+{
+ struct health_test_softc *ht;
+
+ ht = &healthtest[source];
+ memset(ht, 0, sizeof(*ht));
+ KASSERT(ht->ht_state == INIT,
+ ("%s: health test state is %d for source %d",
+ __func__, ht->ht_state, source));
+
+ /*
+ * If health-testing is enabled, validate all sources except CACHED and
+ * VMGENID: they are deterministic sources used only a small, fixed
+ * number of times, so statistical testing is not applicable.
+ */
+ if (!nist_healthtest_enabled ||
+ source == RANDOM_CACHED || source == RANDOM_PURE_VMGENID) {
+ ht->ht_state = DISABLED;
+ return;
+ }
+
+ /*
+ * Set cutoff values for the two tests, given a min-entropy estimate for
+ * the source and allowing for an error rate of 1 in 2^{34}. With a
+ * min-entropy estimate of 1 bit and a sample rate of RANDOM_KTHREAD_HZ,
+ * we expect to see an false positive once in ~54.5 years.
+ *
+ * The RCT limit comes from the formula in section 4.4.1.
+ *
+ * The APT cutoffs are calculated using the formula in section 4.4.2
+ * footnote 10 with the number of Bernoulli trials changed from W to
+ * W-1, since the test as written counts the number of samples equal to
+ * the first sample in the window, and thus tests W-1 samples. We
+ * provide cutoffs for estimates up to sizeof(uint32_t)*HARVESTSIZE*8
+ * bits.
+ */
+ const int apt_cutoffs[] = {
+ [1] = 329,
+ [2] = 195,
+ [3] = 118,
+ [4] = 73,
+ [5] = 48,
+ [6] = 33,
+ [7] = 23,
+ [8] = 17,
+ [9] = 13,
+ [10] = 11,
+ [11] = 9,
+ [12] = 8,
+ [13] = 7,
+ [14] = 6,
+ [15] = 5,
+ [16] = 5,
+ [17 ... 19] = 4,
+ [20 ... 25] = 3,
+ [26 ... 42] = 2,
+ [43 ... 64] = 1,
+ };
+ const int error_rate = 34;
+
+ if (min_entropy == 0) {
+ /*
+ * For environmental sources, the main source of entropy is the
+ * associated timecounter value. Since these sources can be
+ * influenced by unprivileged users, we conservatively use a
+ * min-entropy estimate of 1 bit per sample. For "pure"
+ * sources, we assume 8 bits per sample, as such sources provide
+ * a variable amount of data per read and in particular might
+ * only provide a single byte at a time.
+ */
+ min_entropy = source >= RANDOM_PURE_START ? 8 : 1;
+ } else if (min_entropy < 0 || min_entropy >= nitems(apt_cutoffs)) {
+ panic("invalid min_entropy %d for %s", min_entropy,
+ random_source_descr[source]);
+ }
+
+ ht->ht_rct_limit = 1 + howmany(error_rate, min_entropy);
+ ht->ht_apt_cutoff = apt_cutoffs[min_entropy];
+}
+
+static int
+random_healthtest_ondemand(SYSCTL_HANDLER_ARGS)
+{
+ u_int mask, source;
+ int error;
+
+ mask = 0;
+ error = sysctl_handle_int(oidp, &mask, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ while (mask != 0) {
+ source = ffs(mask) - 1;
+ if (source < nitems(healthtest))
+ atomic_store_bool(&healthtest[source].ondemand, true);
+ mask &= ~(1u << source);
+ }
+ return (0);
+}
+SYSCTL_PROC(_kern_random, OID_AUTO, nist_healthtest_ondemand,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
+ random_healthtest_ondemand, "I",
+ "Re-run NIST SP 800-90B startup health tests for a noise source");
+
static int
random_check_uint_harvestmask(SYSCTL_HANDLER_ARGS)
{
@@ -313,9 +595,9 @@ random_check_uint_harvestmask(SYSCTL_HANDLER_ARGS)
_RANDOM_HARVEST_ETHER_OFF | _RANDOM_HARVEST_UMA_OFF;
int error;
- u_int value, orig_value;
+ u_int value;
- orig_value = value = hc_source_mask;
+ value = atomic_load_int(&hc_source_mask);
error = sysctl_handle_int(oidp, &value, 0, req);
if (error != 0 || req->newptr == NULL)
return (error);
@@ -326,16 +608,17 @@ random_check_uint_harvestmask(SYSCTL_HANDLER_ARGS)
/*
* Disallow userspace modification of pure entropy sources.
*/
+ RANDOM_HARVEST_LOCK();
hc_source_mask = (value & ~user_immutable_mask) |
- (orig_value & user_immutable_mask);
+ (hc_source_mask & user_immutable_mask);
+ RANDOM_HARVEST_UNLOCK();
return (0);
}
SYSCTL_PROC(_kern_random_harvest, OID_AUTO, mask,
- CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, NULL, 0,
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 0,
random_check_uint_harvestmask, "IU",
"Entropy harvesting mask");
-/* ARGSUSED */
static int
random_print_harvestmask(SYSCTL_HANDLER_ARGS)
{
@@ -344,9 +627,16 @@ random_print_harvestmask(SYSCTL_HANDLER_ARGS)
error = sysctl_wire_old_buffer(req, 0);
if (error == 0) {
+ u_int mask;
+
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
- for (i = ENTROPYSOURCE - 1; i >= 0; i--)
- sbuf_cat(&sbuf, (hc_source_mask & (1 << i)) ? "1" : "0");
+ mask = atomic_load_int(&hc_source_mask);
+ for (i = ENTROPYSOURCE - 1; i >= 0; i--) {
+ bool present;
+
+ present = (mask & (1u << i)) != 0;
+ sbuf_cat(&sbuf, present ? "1" : "0");
+ }
error = sbuf_finish(&sbuf);
sbuf_delete(&sbuf);
}
@@ -369,7 +659,8 @@ static const char *random_source_descr[ENTROPYSOURCE] = {
[RANDOM_SWI] = "SWI",
[RANDOM_FS_ATIME] = "FS_ATIME",
[RANDOM_UMA] = "UMA",
- [RANDOM_CALLOUT] = "CALLOUT", /* ENVIRONMENTAL_END */
+ [RANDOM_CALLOUT] = "CALLOUT",
+ [RANDOM_RANDOMDEV] = "RANDOMDEV", /* ENVIRONMENTAL_END */
[RANDOM_PURE_OCTEON] = "PURE_OCTEON", /* PURE_START */
[RANDOM_PURE_SAFE] = "PURE_SAFE",
[RANDOM_PURE_GLXSB] = "PURE_GLXSB",
@@ -385,10 +676,10 @@ static const char *random_source_descr[ENTROPYSOURCE] = {
[RANDOM_PURE_VMGENID] = "PURE_VMGENID",
[RANDOM_PURE_QUALCOMM] = "PURE_QUALCOMM",
[RANDOM_PURE_ARMV8] = "PURE_ARMV8",
+ [RANDOM_PURE_ARM_TRNG] = "PURE_ARM_TRNG",
/* "ENTROPYSOURCE" */
};
-/* ARGSUSED */
static int
random_print_harvestmask_symbolic(SYSCTL_HANDLER_ARGS)
{
@@ -399,16 +690,21 @@ random_print_harvestmask_symbolic(SYSCTL_HANDLER_ARGS)
first = true;
error = sysctl_wire_old_buffer(req, 0);
if (error == 0) {
+ u_int mask;
+
sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
+ mask = atomic_load_int(&hc_source_mask);
for (i = ENTROPYSOURCE - 1; i >= 0; i--) {
- if (i >= RANDOM_PURE_START &&
- (hc_source_mask & (1 << i)) == 0)
+ bool present;
+
+ present = (mask & (1u << i)) != 0;
+ if (i >= RANDOM_PURE_START && !present)
continue;
if (!first)
sbuf_cat(&sbuf, ",");
- sbuf_cat(&sbuf, !(hc_source_mask & (1 << i)) ? "[" : "");
+ sbuf_cat(&sbuf, !present ? "[" : "");
sbuf_cat(&sbuf, random_source_descr[i]);
- sbuf_cat(&sbuf, !(hc_source_mask & (1 << i)) ? "]" : "");
+ sbuf_cat(&sbuf, !present ? "]" : "");
first = false;
}
error = sbuf_finish(&sbuf);
@@ -421,7 +717,6 @@ SYSCTL_PROC(_kern_random_harvest, OID_AUTO, mask_symbolic,
random_print_harvestmask_symbolic, "A",
"Entropy harvesting mask (symbolic)");
-/* ARGSUSED */
static void
random_harvestq_init(void *unused __unused)
{
@@ -431,7 +726,10 @@ random_harvestq_init(void *unused __unused)
hc_source_mask = almost_everything_mask;
RANDOM_HARVEST_INIT_LOCK();
- harvest_context.hc_entropy_ring.in = harvest_context.hc_entropy_ring.out = 0;
+ harvest_context.hc_active_buf = 0;
+
+ for (int i = RANDOM_START; i <= RANDOM_ENVIRONMENTAL_END; i++)
+ random_healthtest_init(i, 0);
}
SYSINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_init, NULL);
@@ -451,7 +749,7 @@ random_early_prime(char *entropy, size_t len)
return (0);
for (i = 0; i < len; i += sizeof(event.he_entropy)) {
- event.he_somecounter = (uint32_t)get_cyclecount();
+ event.he_somecounter = random_get_cyclecount();
event.he_size = sizeof(event.he_entropy);
event.he_source = RANDOM_CACHED;
event.he_destination =
@@ -491,7 +789,6 @@ random_prime_loader_file(const char *type)
* known to the kernel, and inserting it directly into the hashing
* module, currently Fortuna.
*/
-/* ARGSUSED */
static void
random_harvestq_prime(void *unused __unused)
{
@@ -520,7 +817,6 @@ random_harvestq_prime(void *unused __unused)
}
SYSINIT(random_device_prime, SI_SUB_RANDOM, SI_ORDER_MIDDLE, random_harvestq_prime, NULL);
-/* ARGSUSED */
static void
random_harvestq_deinit(void *unused __unused)
{
@@ -538,9 +834,9 @@ SYSUNINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_d
* This is supposed to be fast; do not do anything slow in here!
* It is also illegal (and morally reprehensible) to insert any
* high-rate data here. "High-rate" is defined as a data source
- * that will usually cause lots of failures of the "Lockless read"
- * check a few lines below. This includes the "always-on" sources
- * like the Intel "rdrand" or the VIA Nehamiah "xstore" sources.
+ * that is likely to fill up the buffer in much less than 100ms.
+ * This includes the "always-on" sources like the Intel "rdrand"
+ * or the VIA Nehamiah "xstore" sources.
*/
/* XXXRW: get_cyclecount() is cheap on most modern hardware, where cycle
* counters are built in, but on older hardware it will do a real time clock
@@ -549,28 +845,29 @@ SYSUNINIT(random_device_h_init, SI_SUB_RANDOM, SI_ORDER_THIRD, random_harvestq_d
void
random_harvest_queue_(const void *entropy, u_int size, enum random_entropy_source origin)
{
+ struct harvest_context *hc;
+ struct entropy_buffer *buf;
struct harvest_event *event;
- u_int ring_in;
- KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE, ("%s: origin %d invalid\n", __func__, origin));
+ KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE,
+ ("%s: origin %d invalid", __func__, origin));
+
+ hc = &harvest_context;
RANDOM_HARVEST_LOCK();
- ring_in = (harvest_context.hc_entropy_ring.in + 1)%RANDOM_RING_MAX;
- if (ring_in != harvest_context.hc_entropy_ring.out) {
- /* The ring is not full */
- event = harvest_context.hc_entropy_ring.ring + ring_in;
- event->he_somecounter = (uint32_t)get_cyclecount();
+ buf = &hc->hc_entropy_buf[hc->hc_active_buf];
+ if (buf->pos < RANDOM_RING_MAX) {
+ event = &buf->ring[buf->pos++];
+ event->he_somecounter = random_get_cyclecount();
event->he_source = origin;
- event->he_destination = harvest_context.hc_destination[origin]++;
+ event->he_destination = hc->hc_destination[origin]++;
if (size <= sizeof(event->he_entropy)) {
event->he_size = size;
memcpy(event->he_entropy, entropy, size);
- }
- else {
+ } else {
/* Big event, so squash it */
event->he_size = sizeof(event->he_entropy[0]);
event->he_entropy[0] = jenkins_hash(entropy, size, (uint32_t)(uintptr_t)event);
}
- harvest_context.hc_entropy_ring.in = ring_in;
}
RANDOM_HARVEST_UNLOCK();
}
@@ -587,7 +884,8 @@ random_harvest_fast_(const void *entropy, u_int size)
u_int pos;
pos = harvest_context.hc_entropy_fast_accumulator.pos;
- harvest_context.hc_entropy_fast_accumulator.buf[pos] ^= jenkins_hash(entropy, size, (uint32_t)get_cyclecount());
+ harvest_context.hc_entropy_fast_accumulator.buf[pos] ^=
+ jenkins_hash(entropy, size, random_get_cyclecount());
harvest_context.hc_entropy_fast_accumulator.pos = (pos + 1)%RANDOM_ACCUM_MAX;
}
@@ -604,7 +902,7 @@ random_harvest_direct_(const void *entropy, u_int size, enum random_entropy_sour
KASSERT(origin >= RANDOM_START && origin < ENTROPYSOURCE, ("%s: origin %d invalid\n", __func__, origin));
size = MIN(size, sizeof(event.he_entropy));
- event.he_somecounter = (uint32_t)get_cyclecount();
+ event.he_somecounter = random_get_cyclecount();
event.he_size = size;
event.he_source = origin;
event.he_destination = harvest_context.hc_destination[origin]++;
@@ -613,21 +911,7 @@ random_harvest_direct_(const void *entropy, u_int size, enum random_entropy_sour
}
void
-random_harvest_register_source(enum random_entropy_source source)
-{
-
- hc_source_mask |= (1 << source);
-}
-
-void
-random_harvest_deregister_source(enum random_entropy_source source)
-{
-
- hc_source_mask &= ~(1 << source);
-}
-
-void
-random_source_register(struct random_source *rsource)
+random_source_register(const struct random_source *rsource)
{
struct random_sources *rrs;
@@ -636,25 +920,25 @@ random_source_register(struct random_source *rsource)
rrs = malloc(sizeof(*rrs), M_ENTROPY, M_WAITOK);
rrs->rrs_source = rsource;
- random_harvest_register_source(rsource->rs_source);
-
printf("random: registering fast source %s\n", rsource->rs_ident);
+ random_healthtest_init(rsource->rs_source, rsource->rs_min_entropy);
+
RANDOM_HARVEST_LOCK();
+ hc_source_mask |= (1 << rsource->rs_source);
CK_LIST_INSERT_HEAD(&source_list, rrs, rrs_entries);
RANDOM_HARVEST_UNLOCK();
}
void
-random_source_deregister(struct random_source *rsource)
+random_source_deregister(const struct random_source *rsource)
{
struct random_sources *rrs = NULL;
KASSERT(rsource != NULL, ("invalid input to %s", __func__));
- random_harvest_deregister_source(rsource->rs_source);
-
RANDOM_HARVEST_LOCK();
+ hc_source_mask &= ~(1 << rsource->rs_source);
CK_LIST_FOREACH(rrs, &source_list, rrs_entries)
if (rrs->rrs_source == rsource) {
CK_LIST_REMOVE(rrs, rrs_entries);
diff --git a/sys/dev/random/random_harvestq.h b/sys/dev/random/random_harvestq.h
index 69a9dfabd44a..1d462500df85 100644
--- a/sys/dev/random/random_harvestq.h
+++ b/sys/dev/random/random_harvestq.h
@@ -27,6 +27,9 @@
#ifndef SYS_DEV_RANDOM_RANDOM_HARVESTQ_H_INCLUDED
#define SYS_DEV_RANDOM_RANDOM_HARVESTQ_H_INCLUDED
+#include <sys/types.h>
+#include <machine/cpu.h>
+
#define HARVESTSIZE 2 /* Max length in words of each harvested entropy unit */
/* These are used to queue harvested packets of entropy. The entropy
@@ -40,8 +43,12 @@ struct harvest_event {
uint8_t he_source; /* origin of the entropy */
};
-#define RANDOM_HARVEST_INIT_LOCK(x) mtx_init(&harvest_context.hc_mtx, "entropy harvest mutex", NULL, MTX_SPIN)
-#define RANDOM_HARVEST_LOCK(x) mtx_lock_spin(&harvest_context.hc_mtx)
-#define RANDOM_HARVEST_UNLOCK(x) mtx_unlock_spin(&harvest_context.hc_mtx)
+static inline uint32_t
+random_get_cyclecount(void)
+{
+ return ((uint32_t)get_cyclecount());
+}
+
+bool random_harvest_healthtest(const struct harvest_event *event);
#endif /* SYS_DEV_RANDOM_RANDOM_HARVESTQ_H_INCLUDED */
diff --git a/sys/dev/random/randomdev.c b/sys/dev/random/randomdev.c
index 6d637ab5a53e..ced4dd8067d9 100644
--- a/sys/dev/random/randomdev.c
+++ b/sys/dev/random/randomdev.c
@@ -303,16 +303,16 @@ randomdev_accumulate(uint8_t *buf, u_int count)
/* Extra timing here is helpful to scrape scheduler jitter entropy */
randomdev_hash_init(&hash);
- timestamp = (uint32_t)get_cyclecount();
+ timestamp = random_get_cyclecount();
randomdev_hash_iterate(&hash, &timestamp, sizeof(timestamp));
randomdev_hash_iterate(&hash, buf, count);
- timestamp = (uint32_t)get_cyclecount();
+ timestamp = random_get_cyclecount();
randomdev_hash_iterate(&hash, &timestamp, sizeof(timestamp));
randomdev_hash_finish(&hash, entropy_data);
for (i = 0; i < RANDOM_KEYSIZE_WORDS; i += sizeof(event.he_entropy)/sizeof(event.he_entropy[0])) {
- event.he_somecounter = (uint32_t)get_cyclecount();
+ event.he_somecounter = random_get_cyclecount();
event.he_size = sizeof(event.he_entropy);
- event.he_source = RANDOM_CACHED;
+ event.he_source = RANDOM_RANDOMDEV;
event.he_destination = destination++; /* Harmless cheating */
memcpy(event.he_entropy, entropy_data + i, sizeof(event.he_entropy));
p_random_alg_context->ra_event_processor(&event);
diff --git a/sys/dev/random/randomdev.h b/sys/dev/random/randomdev.h
index e1c9ac7b680d..a6ca66c7d92e 100644
--- a/sys/dev/random/randomdev.h
+++ b/sys/dev/random/randomdev.h
@@ -52,7 +52,9 @@ random_check_uint_##name(SYSCTL_HANDLER_ARGS) \
}
#endif /* SYSCTL_DECL */
+#ifdef MALLOC_DECLARE
MALLOC_DECLARE(M_ENTROPY);
+#endif
extern bool random_bypass_before_seeding;
extern bool read_random_bypassed_before_seeding;
@@ -101,10 +103,11 @@ struct random_source {
const char *rs_ident;
enum random_entropy_source rs_source;
random_source_read_t *rs_read;
+ int rs_min_entropy;
};
-void random_source_register(struct random_source *);
-void random_source_deregister(struct random_source *);
+void random_source_register(const struct random_source *);
+void random_source_deregister(const struct random_source *);
#endif /* _KERNEL */
diff --git a/sys/dev/rccgpio/rccgpio.c b/sys/dev/rccgpio/rccgpio.c
index b2b775b879ad..dafd0b511fa9 100644
--- a/sys/dev/rccgpio/rccgpio.c
+++ b/sys/dev/rccgpio/rccgpio.c
@@ -308,7 +308,7 @@ rcc_gpio_attach(device_t dev)
RCC_WRITE(sc, RCC_GPIO_GP_LVL, sc->sc_output);
/* Attach the gpiobus. */
- sc->sc_busdev = gpiobus_attach_bus(dev);
+ sc->sc_busdev = gpiobus_add_bus(dev);
if (sc->sc_busdev == NULL) {
bus_release_resource(dev, SYS_RES_IOPORT, sc->sc_io_rid,
sc->sc_io_res);
@@ -316,6 +316,7 @@ rcc_gpio_attach(device_t dev)
return (ENXIO);
}
+ bus_attach_children(dev);
return (0);
}
diff --git a/sys/dev/re/if_re.c b/sys/dev/re/if_re.c
index 594ed9d60379..d56c975a43d2 100644
--- a/sys/dev/re/if_re.c
+++ b/sys/dev/re/if_re.c
@@ -353,6 +353,8 @@ static driver_t re_driver = {
DRIVER_MODULE(re, pci, re_driver, 0, 0);
DRIVER_MODULE(miibus, re, miibus_driver, 0, 0);
+MODULE_PNP_INFO("U16:vendor;U16:device;U32:#;D:#", pci, re, re_devs,
+ nitems(re_devs) - 1);
#define EE_SET(x) \
CSR_WRITE_1(sc, RL_EECMD, \
@@ -1606,11 +1608,6 @@ re_attach(device_t dev)
re_add_sysctls(sc);
ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
/* Take controller out of deep sleep mode. */
if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) {
@@ -1690,7 +1687,7 @@ re_attach(device_t dev)
if (if_getcapabilities(ifp) & IFCAP_HWCSUM)
if_setcapabilitiesbit(ifp, IFCAP_VLAN_HWCSUM, 0);
/* Enable WOL if PM is supported. */
- if (pci_find_cap(sc->rl_dev, PCIY_PMG, &reg) == 0)
+ if (pci_has_pm(sc->rl_dev))
if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
if_setcapenablebit(ifp, 0, (IFCAP_WOL_UCAST | IFCAP_WOL_MCAST));
@@ -1808,8 +1805,6 @@ re_detach(device_t dev)
if_setflagbits(ifp, 0, IFF_UP);
ether_ifdetach(ifp);
}
- if (sc->rl_miibus)
- device_delete_child(dev, sc->rl_miibus);
bus_generic_detach(dev);
/*
@@ -3565,6 +3560,7 @@ re_ioctl(if_t ifp, u_long command, caddr_t data)
static void
re_watchdog(struct rl_softc *sc)
{
+ struct epoch_tracker et;
if_t ifp;
RL_LOCK_ASSERT(sc);
@@ -3585,7 +3581,9 @@ re_watchdog(struct rl_softc *sc)
if_printf(ifp, "watchdog timeout\n");
if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ NET_EPOCH_ENTER(et);
re_rxeof(sc, NULL);
+ NET_EPOCH_EXIT(et);
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
re_init_locked(sc);
if (!if_sendq_empty(ifp))
@@ -3866,13 +3864,11 @@ static void
re_setwol(struct rl_softc *sc)
{
if_t ifp;
- int pmc;
- uint16_t pmstat;
uint8_t v;
RL_LOCK_ASSERT(sc);
- if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
+ if (!pci_has_pm(sc->rl_dev))
return;
ifp = sc->rl_ifp;
@@ -3934,22 +3930,18 @@ re_setwol(struct rl_softc *sc)
*/
/* Request PME if WOL is requested. */
- pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
+ pci_enable_pme(sc->rl_dev);
}
static void
re_clrwol(struct rl_softc *sc)
{
- int pmc;
uint8_t v;
RL_LOCK_ASSERT(sc);
- if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
+ if (!pci_has_pm(sc->rl_dev))
return;
/* Enable config register write. */
diff --git a/sys/dev/regulator/regulator_bus.c b/sys/dev/regulator/regulator_bus.c
index 99081c792b90..15b6d71f0c46 100644
--- a/sys/dev/regulator/regulator_bus.c
+++ b/sys/dev/regulator/regulator_bus.c
@@ -64,7 +64,8 @@ ofw_regulator_bus_attach(device_t dev)
simplebus_add_device(dev, child, 0, NULL, -1, NULL);
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static device_method_t ofw_regulator_bus_methods[] = {
diff --git a/sys/dev/regulator/regulator_fixed.c b/sys/dev/regulator/regulator_fixed.c
index d9ebbe017e11..55cdb5e4aeae 100644
--- a/sys/dev/regulator/regulator_fixed.c
+++ b/sys/dev/regulator/regulator_fixed.c
@@ -100,12 +100,8 @@ static struct gpio_entry *
regnode_get_gpio_entry(struct gpiobus_pin *gpio_pin)
{
struct gpio_entry *entry, *tmp;
- device_t busdev;
int rv;
- busdev = GPIO_GET_BUS(gpio_pin->dev);
- if (busdev == NULL)
- return (NULL);
entry = malloc(sizeof(struct gpio_entry), M_FIXEDREGULATOR,
M_WAITOK | M_ZERO);
@@ -122,8 +118,8 @@ regnode_get_gpio_entry(struct gpiobus_pin *gpio_pin)
}
/* Reserve pin. */
- /* XXX Can we call gpiobus_acquire_pin() with gpio_list_mtx held? */
- rv = gpiobus_acquire_pin(busdev, gpio_pin->pin);
+ /* XXX Can we call gpio_pin_acquire() with gpio_list_mtx held? */
+ rv = gpio_pin_acquire(gpio_pin);
if (rv != 0) {
mtx_unlock(&gpio_list_mtx);
free(entry, M_FIXEDREGULATOR);
@@ -485,14 +481,17 @@ regfix_attach(device_t dev)
/* Try to get and configure GPIO. */
rv = regfix_get_gpio(sc);
- if (rv != 0)
- return (bus_generic_attach(dev));
+ if (rv != 0) {
+ bus_attach_children(dev);
+ return (0);
+ }
/* Register regulator. */
regnode_fixed_register(sc->dev, &sc->init_def);
sc->attach_done = true;
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static device_method_t regfix_methods[] = {
diff --git a/sys/dev/rl/if_rl.c b/sys/dev/rl/if_rl.c
index b8c21c3265de..c045e57fb79a 100644
--- a/sys/dev/rl/if_rl.c
+++ b/sys/dev/rl/if_rl.c
@@ -640,7 +640,7 @@ rl_attach(device_t dev)
const struct rl_type *t;
struct sysctl_ctx_list *ctx;
struct sysctl_oid_list *children;
- int error = 0, hwrev, i, phy, pmc, rid;
+ int error = 0, hwrev, i, phy, rid;
int prefer_iomap, unit;
uint16_t rl_did = 0;
char tn[32];
@@ -780,11 +780,6 @@ rl_attach(device_t dev)
goto fail;
ifp = sc->rl_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
#define RL_PHYAD_INTERNAL 0
@@ -808,8 +803,7 @@ rl_attach(device_t dev)
if_setinitfn(ifp, rl_init);
if_setcapabilities(ifp, IFCAP_VLAN_MTU);
/* Check WOL for RTL8139B or newer controllers. */
- if (sc->rl_type == RL_8139 &&
- pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
+ if (sc->rl_type == RL_8139 && pci_has_pm(sc->rl_dev)) {
hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV;
switch (hwrev) {
case RL_HWREV_8139B:
@@ -887,8 +881,6 @@ rl_detach(device_t dev)
#if 0
sc->suspended = 1;
#endif
- if (sc->rl_miibus)
- device_delete_child(dev, sc->rl_miibus);
bus_generic_detach(dev);
if (sc->rl_intrhand[0])
@@ -1979,24 +1971,13 @@ rl_resume(device_t dev)
{
struct rl_softc *sc;
if_t ifp;
- int pmc;
- uint16_t pmstat;
sc = device_get_softc(dev);
ifp = sc->rl_ifp;
RL_LOCK(sc);
- if ((if_getcapabilities(ifp) & IFCAP_WOL) != 0 &&
- pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) {
- /* Disable PME and clear PME status. */
- pmstat = pci_read_config(sc->rl_dev,
- pmc + PCIR_POWER_STATUS, 2);
- if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
- pmstat &= ~PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->rl_dev,
- pmc + PCIR_POWER_STATUS, pmstat, 2);
- }
+ if ((if_getcapabilities(ifp) & IFCAP_WOL) != 0) {
/*
* Clear WOL matching such that normal Rx filtering
* wouldn't interfere with WOL patterns.
@@ -2044,8 +2025,6 @@ static void
rl_setwol(struct rl_softc *sc)
{
if_t ifp;
- int pmc;
- uint16_t pmstat;
uint8_t v;
RL_LOCK_ASSERT(sc);
@@ -2053,8 +2032,6 @@ rl_setwol(struct rl_softc *sc)
ifp = sc->rl_ifp;
if ((if_getcapabilities(ifp) & IFCAP_WOL) == 0)
return;
- if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0)
- return;
/* Enable config register write. */
CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE);
@@ -2087,11 +2064,8 @@ rl_setwol(struct rl_softc *sc)
CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF);
/* Request PME if WOL is requested. */
- pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
+ pci_enable_pme(sc->rl_dev);
}
static void
diff --git a/sys/dev/rndtest/rndtest.c b/sys/dev/rndtest/rndtest.c
index 47dd418aa2de..6a4a08a5c727 100644
--- a/sys/dev/rndtest/rndtest.c
+++ b/sys/dev/rndtest/rndtest.c
@@ -41,8 +41,8 @@
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/random.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
-#include <machine/stdarg.h>
#include <dev/rndtest/rndtest.h>
diff --git a/sys/dev/rtsx/rtsx.c b/sys/dev/rtsx/rtsx.c
index a2f124f6c30d..aed0bd6c8b8c 100644
--- a/sys/dev/rtsx/rtsx.c
+++ b/sys/dev/rtsx/rtsx.c
@@ -633,10 +633,10 @@ rtsx_handle_card_present(struct rtsx_softc *sc)
* (sometimes the card detect pin stabilizes
* before the other pins have made good contact).
*/
- taskqueue_enqueue_timeout(taskqueue_swi_giant,
+ taskqueue_enqueue_timeout(taskqueue_bus,
&sc->rtsx_card_insert_task, -hz);
} else if (was_present && !is_present) {
- taskqueue_enqueue(taskqueue_swi_giant, &sc->rtsx_card_remove_task);
+ taskqueue_enqueue(taskqueue_bus, &sc->rtsx_card_remove_task);
}
}
@@ -648,6 +648,9 @@ rtsx_card_task(void *arg, int pending __unused)
{
struct rtsx_softc *sc = arg;
+#ifndef MMCCAM
+ bus_topo_lock();
+#endif
if (rtsx_is_card_present(sc)) {
sc->rtsx_flags |= RTSX_F_CARD_PRESENT;
/* Card is present, attach if necessary. */
@@ -664,9 +667,7 @@ rtsx_card_task(void *arg, int pending __unused)
sc->rtsx_cam_status = 1;
mmc_cam_sim_discover(&sc->rtsx_mmc_sim);
#else /* !MMCCAM */
- RTSX_LOCK(sc);
- sc->rtsx_mmc_dev = device_add_child(sc->rtsx_dev, "mmc", -1);
- RTSX_UNLOCK(sc);
+ sc->rtsx_mmc_dev = device_add_child(sc->rtsx_dev, "mmc", DEVICE_UNIT_ANY);
if (sc->rtsx_mmc_dev == NULL) {
device_printf(sc->rtsx_dev, "Adding MMC bus failed\n");
} else {
@@ -699,6 +700,9 @@ rtsx_card_task(void *arg, int pending __unused)
#endif /* MMCCAM */
}
}
+#ifndef MMCCAM
+ bus_topo_unlock();
+#endif
}
static bool
@@ -3630,6 +3634,7 @@ rtsx_attach(device_t dev)
device_printf(dev, "If a card is detected without an SD card present,"
" add dev.rtsx.0.inversion=0 in loader.conf(5)\n");
sc->rtsx_inversion = 1;
+ break;
}
}
@@ -3689,7 +3694,7 @@ rtsx_attach(device_t dev)
sc->rtsx_mem_btag = rman_get_bustag(sc->rtsx_mem_res);
sc->rtsx_mem_bhandle = rman_get_bushandle(sc->rtsx_mem_res);
- TIMEOUT_TASK_INIT(taskqueue_swi_giant, &sc->rtsx_card_insert_task, 0,
+ TIMEOUT_TASK_INIT(taskqueue_bus, &sc->rtsx_card_insert_task, 0,
rtsx_card_task, sc);
TASK_INIT(&sc->rtsx_card_remove_task, 0, rtsx_card_task, sc);
@@ -3783,13 +3788,13 @@ rtsx_detach(device_t dev)
WRITE4(sc, RTSX_BIER, sc->rtsx_intr_enabled);
/* Stop device. */
- error = device_delete_children(sc->rtsx_dev);
- sc->rtsx_mmc_dev = NULL;
+ error = bus_generic_detach(sc->rtsx_dev);
if (error)
return (error);
+ sc->rtsx_mmc_dev = NULL;
- taskqueue_drain_timeout(taskqueue_swi_giant, &sc->rtsx_card_insert_task);
- taskqueue_drain(taskqueue_swi_giant, &sc->rtsx_card_remove_task);
+ taskqueue_drain_timeout(taskqueue_bus, &sc->rtsx_card_insert_task);
+ taskqueue_drain(taskqueue_bus, &sc->rtsx_card_remove_task);
/* Teardown the state in our softc created in our attach routine. */
rtsx_dma_free(sc);
diff --git a/sys/dev/rtwn/if_rtwn.c b/sys/dev/rtwn/if_rtwn.c
index 4334d5700e51..c5889937fb08 100644
--- a/sys/dev/rtwn/if_rtwn.c
+++ b/sys/dev/rtwn/if_rtwn.c
@@ -232,6 +232,7 @@ rtwn_attach(struct rtwn_softc *sc)
| IEEE80211_C_WME /* 802.11e */
| IEEE80211_C_SWAMSDUTX /* Do software A-MSDU TX */
| IEEE80211_C_FF /* Atheros fast-frames */
+ | IEEE80211_C_TXPMGT /* TX power control */
;
if (sc->sc_hwcrypto != RTWN_CRYPTO_SW) {
@@ -247,6 +248,7 @@ rtwn_attach(struct rtwn_softc *sc)
| IEEE80211_HTCAP_SMPS_OFF /* SM PS mode disabled */
/* s/w capabilities */
| IEEE80211_HTC_HT /* HT operation */
+ | IEEE80211_HTC_RX_AMSDU_AMPDU /* A-MSDU in A-MPDU */
| IEEE80211_HTC_AMPDU /* A-MPDU tx */
| IEEE80211_HTC_AMSDU /* A-MSDU tx */
;
@@ -266,6 +268,14 @@ rtwn_attach(struct rtwn_softc *sc)
ic->ic_flags_ext |= IEEE80211_FEXT_WATCHDOG;
#endif
+ /* Enable seqno offload */
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
+#ifdef RTWN_WITHOUT_UCODE
+ /* Don't originate NULL data frames - let firmware do this */
+ ic->ic_flags_ext |= IEEE80211_FEXT_NO_NULLDATA;
+#endif
+
/* Adjust capabilities. */
rtwn_adj_devcaps(sc);
@@ -296,6 +306,11 @@ rtwn_attach(struct rtwn_softc *sc)
sc->sc_node_free = ic->ic_node_free;
ic->ic_node_free = rtwn_node_free;
+ /* Note: this has to happen AFTER ieee80211_ifattach() */
+ ieee80211_set_software_ciphers(ic, IEEE80211_CRYPTO_WEP |
+ IEEE80211_CRYPTO_TKIP | IEEE80211_CRYPTO_AES_CCM |
+ IEEE80211_CRYPTO_AES_GCM_128);
+
rtwn_postattach(sc);
rtwn_radiotap_attach(sc);
@@ -319,18 +334,55 @@ rtwn_radiotap_attach(struct rtwn_softc *sc)
&rxtap->wr_ihdr, sizeof(*rxtap), RTWN_RX_RADIOTAP_PRESENT);
}
+#ifdef RTWN_DEBUG
+static int
+rtwn_sysctl_reg_readwrite(SYSCTL_HANDLER_ARGS)
+{
+ struct rtwn_softc *sc = arg1;
+ int error;
+ uint32_t val;
+
+ if (sc->sc_reg_addr > 0xffff)
+ return (EINVAL);
+
+ RTWN_LOCK(sc);
+ val = rtwn_read_4(sc, sc->sc_reg_addr);
+ RTWN_UNLOCK(sc);
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return (error);
+ RTWN_LOCK(sc);
+ rtwn_write_4(sc, sc->sc_reg_addr, val);
+ RTWN_UNLOCK(sc);
+ return (0);
+}
+#endif /* RTWN_DEBUG */
+
void
rtwn_sysctlattach(struct rtwn_softc *sc)
{
struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev);
struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev);
-#if 1
+ sc->sc_reg_addr = 0;
+#ifdef RTWN_DEBUG
+ SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "reg_addr", CTLFLAG_RW, &sc->sc_reg_addr,
+ sc->sc_reg_addr, "debug register address");
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "reg_val", CTLTYPE_INT | CTLFLAG_RW, sc, 0,
+ rtwn_sysctl_reg_readwrite, "I", "debug register read/write");
+#endif /* RTWN_DEBUG */
+
sc->sc_ht40 = 0;
SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
"ht40", CTLFLAG_RDTUN, &sc->sc_ht40,
sc->sc_ht40, "Enable 40 MHz mode support");
-#endif
+
+ sc->sc_ena_tsf64 = 0;
+ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "ena_tsf64", CTLFLAG_RWTUN, &sc->sc_ena_tsf64,
+ sc->sc_ena_tsf64, "Enable/disable per-packet TSF64 reporting");
#ifdef RTWN_DEBUG
SYSCTL_ADD_U32(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
@@ -397,6 +449,29 @@ rtwn_resume(struct rtwn_softc *sc)
ieee80211_resume_all(ic);
}
+void
+rtwn_attach_vht_cap_info_mcs(struct rtwn_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ uint32_t rx_mcs = 0, tx_mcs = 0;
+
+ for (int i = 0 ; i < 8; i++) {
+ if (i < sc->ntxchains)
+ tx_mcs |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i*2));
+ else
+ tx_mcs |= (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2));
+
+ if (i < sc->nrxchains)
+ rx_mcs |= (IEEE80211_VHT_MCS_SUPPORT_0_9 << (i*2));
+ else
+ rx_mcs |= (IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2));
+ }
+ ic->ic_vht_cap.supp_mcs.rx_mcs_map = rx_mcs;
+ ic->ic_vht_cap.supp_mcs.rx_highest = 0;
+ ic->ic_vht_cap.supp_mcs.tx_mcs_map = tx_mcs;
+ ic->ic_vht_cap.supp_mcs.tx_highest = 0;
+}
+
static void
rtwn_vap_decrement_counters(struct rtwn_softc *sc,
enum ieee80211_opmode opmode, int id)
@@ -570,6 +645,7 @@ rtwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
/* 802.11n parameters */
vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_16;
vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
+ vap->iv_ampdu_limit = IEEE80211_HTCAP_MAXRXAMPDU_64K;
TIMEOUT_TASK_INIT(taskqueue_thread, &uvp->tx_beacon_csa, 0,
rtwn_tx_beacon_csa, vap);
@@ -691,6 +767,14 @@ rtwn_ioctl_reset(struct ieee80211vap *vap, u_long cmd)
case IEEE80211_IOC_LDPC:
error = 0;
break;
+ case IEEE80211_IOC_TXPOWER:
+ {
+ struct rtwn_softc *sc = vap->iv_ic->ic_softc;
+ RTWN_LOCK(sc);
+ error = rtwn_set_tx_power(sc, vap);
+ RTWN_UNLOCK(sc);
+ }
+ break;
default:
error = ENETRESET;
break;
@@ -954,6 +1038,8 @@ rtwn_tsf_sync_enable(struct rtwn_softc *sc, struct ieee80211vap *vap)
/* Enable TSF synchronization. */
rtwn_setbits_1(sc, R92C_BCN_CTRL(uvp->id),
R92C_BCN_CTRL_DIS_TSF_UDT0, 0);
+ /* Enable TSF beacon handling, needed for RA */
+ rtwn_sta_beacon_enable(sc, uvp->id, true);
break;
case IEEE80211_M_IBSS:
ieee80211_runtask(ic, &uvp->tsf_sync_adhoc_task);
@@ -1095,6 +1181,7 @@ rtwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
/* Disable TSF synchronization / beaconing. */
rtwn_beacon_enable(sc, uvp->id, 0);
+ rtwn_sta_beacon_enable(sc, uvp->id, false);
rtwn_setbits_1(sc, R92C_BCN_CTRL(uvp->id),
0, R92C_BCN_CTRL_DIS_TSF_UDT0);
@@ -1124,6 +1211,9 @@ rtwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
/* Stop Rx of data frames. */
rtwn_write_2(sc, R92C_RXFLTMAP2, 0);
+ /* Stop Rx of control frames. */
+ rtwn_write_2(sc, R92C_RXFLTMAP1, 0);
+
/* Reset EDCA parameters. */
rtwn_write_4(sc, R92C_EDCA_VO_PARAM, 0x002f3217);
rtwn_write_4(sc, R92C_EDCA_VI_PARAM, 0x005e4317);
@@ -1188,7 +1278,8 @@ rtwn_calc_basicrates(struct rtwn_softc *sc)
struct rtwn_vap *rvp;
struct ieee80211vap *vap;
struct ieee80211_node *ni;
- uint32_t rates;
+ struct ieee80211_htrateset *rs_ht;
+ uint32_t rates = 0, htrates = 0;
rvp = sc->vaps[i];
if (rvp == NULL || rvp->curr_mode == R92C_MSR_NOLINK)
@@ -1199,16 +1290,48 @@ rtwn_calc_basicrates(struct rtwn_softc *sc)
continue;
ni = ieee80211_ref_node(vap->iv_bss);
- rtwn_get_rates(sc, &ni->ni_rates, NULL, &rates, NULL, 1);
+ if (ni->ni_flags & IEEE80211_NODE_HT)
+ rs_ht = &ni->ni_htrates;
+ else
+ rs_ht = NULL;
+ /*
+ * Only fetches basic rates; fetch 802.11abg and 11n basic
+ * rates
+ */
+ rtwn_get_rates(sc, &ni->ni_rates, rs_ht, &rates, &htrates,
+ NULL, 1);
+
+ /*
+ * We need at least /an/ OFDM and/or MCS rate for HT
+ * operation, or the MAC will generate MCS7 ACK/Block-ACK
+ * frames and thus performance will suffer.
+ */
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+ htrates |= 0x01; /* MCS0 */
+ rates |= (1 << RTWN_RIDX_OFDM6);
+ }
+
basicrates |= rates;
+ basicrates |= (htrates << RTWN_RIDX_HT_MCS_SHIFT);
+
+ /* Filter out undesired high rates */
+ if (ni->ni_chan != IEEE80211_CHAN_ANYC &&
+ IEEE80211_IS_CHAN_5GHZ(ni->ni_chan))
+ basicrates &= R92C_RRSR_RATE_MASK_5GHZ;
+ else
+ basicrates &= R92C_RRSR_RATE_MASK_2GHZ;
+
ieee80211_free_node(ni);
}
- if (basicrates == 0)
+ if (basicrates == 0) {
+ device_printf(sc->sc_dev,
+ "WARNING: no configured basic rates!\n");
return;
+ }
- /* XXX initial RTS rate? */
rtwn_set_basicrates(sc, basicrates);
+ rtwn_set_rts_rate(sc, basicrates);
}
static int
@@ -1262,6 +1385,11 @@ rtwn_run(struct rtwn_softc *sc, struct ieee80211vap *vap)
rtwn_write_2(sc, R92C_BCN_INTERVAL(uvp->id), ni->ni_intval);
if (sc->vaps_running == sc->monvaps_running) {
+ /* Enable Rx of BAR control frames. */
+ rtwn_write_2(sc, R92C_RXFLTMAP1,
+ 1 << (IEEE80211_FC0_SUBTYPE_BAR >>
+ IEEE80211_FC0_SUBTYPE_SHIFT));
+
/* Enable Rx of data frames. */
rtwn_write_2(sc, R92C_RXFLTMAP2, 0xffff);
@@ -1539,6 +1667,14 @@ rtwn_getradiocaps(struct ieee80211com *ic,
/* XXX workaround add_channel_list() limitations */
setbit(bands, IEEE80211_MODE_11A);
setbit(bands, IEEE80211_MODE_11NA);
+
+ if (IEEE80211_CONF_VHT(ic)) {
+ setbit(bands, IEEE80211_MODE_VHT_5GHZ);
+ /* Only enable VHT80 if HT40/VHT40 is available */
+ if (sc->sc_ht40)
+ cbw_flags |= NET80211_CBW_FLAG_VHT80;
+ }
+
for (i = 0; i < nitems(sc->chan_num_5ghz); i++) {
if (sc->chan_num_5ghz[i] == 0)
continue;
diff --git a/sys/dev/rtwn/if_rtwn_cam.c b/sys/dev/rtwn/if_rtwn_cam.c
index 864c13d78285..d142cd0476e4 100644
--- a/sys/dev/rtwn/if_rtwn_cam.c
+++ b/sys/dev/rtwn/if_rtwn_cam.c
@@ -113,8 +113,7 @@ rtwn_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
struct rtwn_softc *sc = vap->iv_ic->ic_softc;
int i, start;
- if (&vap->iv_nw_keys[0] <= k &&
- k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
+ if (ieee80211_is_key_global(vap, k)) {
*keyix = ieee80211_crypto_get_key_wepidx(vap, k);
if (sc->sc_hwcrypto != RTWN_CRYPTO_FULL)
k->wk_flags |= IEEE80211_KEY_SWCRYPT;
@@ -308,8 +307,7 @@ rtwn_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k,
return (1);
}
- if (&vap->iv_nw_keys[0] <= k &&
- k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
+ if (ieee80211_is_key_global(vap, k)) {
if (sc->sc_hwcrypto == RTWN_CRYPTO_FULL) {
struct rtwn_vap *rvp = RTWN_VAP(vap);
diff --git a/sys/dev/rtwn/if_rtwn_fw.c b/sys/dev/rtwn/if_rtwn_fw.c
index 551adbdb8704..0fc5bac75c82 100644
--- a/sys/dev/rtwn/if_rtwn_fw.c
+++ b/sys/dev/rtwn/if_rtwn_fw.c
@@ -141,7 +141,8 @@ rtwn_load_firmware(struct rtwn_softc *sc)
sc->fwver = le16toh(hdr->version);
RTWN_DPRINTF(sc, RTWN_DEBUG_FIRMWARE,
- "FW V%u.%u %02u-%02u %02u:%02u\n",
+ "FW (%s) V%u.%u %02u-%02u %02u:%02u\n",
+ sc->fwname,
le16toh(hdr->version), le16toh(hdr->subversion),
hdr->month, hdr->date, hdr->hour, hdr->minute);
ptr += sizeof(*hdr);
diff --git a/sys/dev/rtwn/if_rtwn_nop.h b/sys/dev/rtwn/if_rtwn_nop.h
index 4d7c63c87cd8..5e205617a12d 100644
--- a/sys/dev/rtwn/if_rtwn_nop.h
+++ b/sys/dev/rtwn/if_rtwn_nop.h
@@ -54,6 +54,12 @@ rtwn_nop_softc_vap(struct rtwn_softc *sc, struct ieee80211vap *vap)
{
}
+static __inline int
+rtwn_nop_int_softc_vap(struct rtwn_softc *sc, struct ieee80211vap *vap)
+{
+ return (0);
+}
+
static __inline void
rtwn_nop_softc_uint8_int(struct rtwn_softc *sc, uint8_t *buf, int len)
{
diff --git a/sys/dev/rtwn/if_rtwn_ridx.h b/sys/dev/rtwn/if_rtwn_ridx.h
index dd93f2db4ccf..616ba8d6d724 100644
--- a/sys/dev/rtwn/if_rtwn_ridx.h
+++ b/sys/dev/rtwn/if_rtwn_ridx.h
@@ -22,6 +22,11 @@
#define IF_RTWN_RIDX_H
/* HW rate indices. */
+
+/*
+ * Note - these are also used as offsets into the TX power table
+ * array.
+ */
#define RTWN_RIDX_CCK1 0
#define RTWN_RIDX_CCK2 1
#define RTWN_RIDX_CCK55 2
@@ -37,13 +42,34 @@
#define RTWN_RIDX_HT_MCS_SHIFT 12
#define RTWN_RIDX_HT_MCS(i) (RTWN_RIDX_HT_MCS_SHIFT + (i))
+#define RTWN_RIDX_TO_MCS(ridx) ((ridx) - RTWN_RIDX_HT_MCS_SHIFT)
+
+/* HT supports up to MCS31, so goes from 12 -> 43 */
+
+#define RTWN_RIDX_LEGACY_HT_COUNT 44
+
+/*
+ * VHT supports MCS0..9 for up to 4 spatial streams, so
+ * goes from 44 -> 83.
+ */
+#define RTWN_RIDX_VHT_MCS_SHIFT 44
+#define RTWN_RIDX_VHT_MCS(s, i) (RTWN_RIDX_VHT_MCS_SHIFT + ((10*(s)) + (i)))
+
+/*
+ * The total amount of rate indexes, CCK, OFDM, HT MCS0..31,
+ * VHT MCS0..9 for 1-4 streams.
+ */
+#define RTWN_RIDX_COUNT 84
-#define RTWN_RIDX_COUNT 28
#define RTWN_RIDX_UNKNOWN (uint8_t)-1
-#define RTWN_RATE_IS_CCK(rate) ((rate) <= RTWN_RIDX_CCK11)
+#define RTWN_RATE_IS_CCK(rate) ((rate) <= RTWN_RIDX_CCK11)
#define RTWN_RATE_IS_OFDM(rate) \
- ((rate) >= RTWN_RIDX_OFDM6 && (rate) != RTWN_RIDX_UNKNOWN)
+ ((rate) >= RTWN_RIDX_OFDM6 && (rate) <= RTWN_RIDX_OFDM54)
+#define RTWN_RATE_IS_HT(rate) \
+ ((rate) >= RTWN_RIDX_HT_MCS_SHIFT && (rate) < RTWN_RIDX_VHT_MCS_SHIFT)
+#define RTWN_RATE_IS_VHT(rate) \
+ ((rate) >= RTWN_RIDX_VHT_MCS_SHIFT && (rate) <= RTWN_RIDX_COUNT)
static const uint8_t ridx2rate[] =
{ 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 };
@@ -56,20 +82,22 @@ rate2ridx(uint8_t rate)
}
switch (rate) {
/* 11g */
- case 12: return 4;
- case 18: return 5;
- case 24: return 6;
- case 36: return 7;
- case 48: return 8;
- case 72: return 9;
- case 96: return 10;
- case 108: return 11;
+ case 12: return (RTWN_RIDX_OFDM6);
+ case 18: return (RTWN_RIDX_OFDM9);
+ case 24: return (RTWN_RIDX_OFDM12);
+ case 36: return (RTWN_RIDX_OFDM18);
+ case 48: return (RTWN_RIDX_OFDM24);
+ case 72: return (RTWN_RIDX_OFDM36);
+ case 96: return (RTWN_RIDX_OFDM48);
+ case 108: return (RTWN_RIDX_OFDM54);
/* 11b */
- case 2: return 0;
- case 4: return 1;
- case 11: return 2;
- case 22: return 3;
- default: return RTWN_RIDX_UNKNOWN;
+ case 2: return (RTWN_RIDX_CCK1);
+ case 4: return (RTWN_RIDX_CCK2);
+ case 11: return (RTWN_RIDX_CCK55);
+ case 22: return (RTWN_RIDX_CCK11);
+ default:
+ printf("%s: called; unknown rate (%d)\n", __func__, rate);
+ return (RTWN_RIDX_UNKNOWN);
}
}
@@ -80,13 +108,25 @@ rtwn_ctl_mcsrate(const struct ieee80211_rate_table *rt, uint8_t ridx)
uint8_t cix, rate;
/* Check if we are using MCS rate. */
- KASSERT(ridx >= RTWN_RIDX_HT_MCS(0) && ridx != RTWN_RIDX_UNKNOWN,
- ("bad mcs rate index %d", ridx));
+ KASSERT(RTWN_RATE_IS_HT(ridx), ("bad mcs rate index %d", ridx));
- rate = (ridx - RTWN_RIDX_HT_MCS(0)) | IEEE80211_RATE_MCS;
+ rate = RTWN_RIDX_TO_MCS(ridx) | IEEE80211_RATE_MCS;
cix = rt->info[rt->rateCodeToIndex[rate]].ctlRateIndex;
KASSERT(cix != (uint8_t)-1, ("rate %d (%d) has no info", rate, ridx));
- return rt->info[cix].dot11Rate;
+ return (rt->info[cix].dot11Rate);
+}
+
+/* VHT version of rtwn_ctl_mcsrate */
+/* XXX TODO: also should move this to net80211 */
+static __inline__ uint8_t
+rtwn_ctl_vhtrate(const struct ieee80211_rate_table *rt, uint8_t ridx)
+{
+
+ /* Check if we are using VHT MCS rate. */
+ KASSERT(RTWN_RATE_IS_VHT(ridx), ("bad mcs rate index %d", ridx));
+
+ /* TODO: there's no VHT tables, so for now just stick to OFDM12 */
+ return (24);
}
#endif /* IF_RTWN_RIDX_H */
diff --git a/sys/dev/rtwn/if_rtwn_rx.c b/sys/dev/rtwn/if_rtwn_rx.c
index 762472eca440..9b7c93fee9b6 100644
--- a/sys/dev/rtwn/if_rtwn_rx.c
+++ b/sys/dev/rtwn/if_rtwn_rx.c
@@ -52,12 +52,24 @@
#include <dev/rtwn/rtl8192c/r92c_reg.h>
+/*
+ * Get the driver rate set for the current operating rateset(s).
+ *
+ * rates_p is set to a mask of 11abg ridx values (not HW rate values.)
+ * htrates_p is set to a mask of 11n ridx values (not HW rate values),
+ * starting at MCS0 == bit 0.
+ *
+ * maxrate_p is set to the ridx value.
+ *
+ * If basic_rates is 1 then only the 11abg basic rate logic will
+ * be applied; the HT rateset will be applied to 11n rates.
+ */
void
rtwn_get_rates(struct rtwn_softc *sc, const struct ieee80211_rateset *rs,
const struct ieee80211_htrateset *rs_ht, uint32_t *rates_p,
- int *maxrate_p, int basic_rates)
+ uint32_t *htrates_p, int *maxrate_p, int basic_rates)
{
- uint32_t rates;
+ uint32_t rates = 0, htrates = 0;
uint8_t ridx;
int i, maxrate;
@@ -65,7 +77,7 @@ rtwn_get_rates(struct rtwn_softc *sc, const struct ieee80211_rateset *rs,
rates = 0;
maxrate = 0;
- /* This is for 11bg */
+ /* This is for 11abg */
for (i = 0; i < rs->rs_nrates; i++) {
/* Convert 802.11 rate to HW rate index. */
ridx = rate2ridx(IEEE80211_RV(rs->rs_rates[i]));
@@ -80,25 +92,35 @@ rtwn_get_rates(struct rtwn_softc *sc, const struct ieee80211_rateset *rs,
}
/* If we're doing 11n, enable 11n rates */
- if (rs_ht != NULL && !basic_rates) {
+ if (rs_ht != NULL) {
for (i = 0; i < rs_ht->rs_nrates; i++) {
- if ((rs_ht->rs_rates[i] & 0x7f) > 0xf)
+ uint8_t rate = rs_ht->rs_rates[i] & 0x7f;
+ bool is_basic = rs_ht->rs_rates[i] &
+ IEEE80211_RATE_BASIC;
+ /* Only do up to 2-stream rates for now */
+ if ((rate) > 0xf)
continue;
- /* 11n rates start at index 12 */
- ridx = RTWN_RIDX_HT_MCS((rs_ht->rs_rates[i]) & 0xf);
- rates |= (1 << ridx);
+
+ if (basic_rates && is_basic == false)
+ continue;
+
+ ridx = rate & 0xf;
+ htrates |= (1 << ridx);
/* Guard against the rate table being oddly ordered */
- if (ridx > maxrate)
- maxrate = ridx;
+ if (RTWN_RIDX_HT_MCS(ridx) > maxrate)
+ maxrate = RTWN_RIDX_HT_MCS(ridx);
}
}
RTWN_DPRINTF(sc, RTWN_DEBUG_RA,
- "%s: rates 0x%08X, maxrate %d\n", __func__, rates, maxrate);
+ "%s: rates 0x%08X htrates 0x%08X, maxrate %d\n",
+ __func__, rates, htrates, maxrate);
if (rates_p != NULL)
*rates_p = rates;
+ if (htrates_p != NULL)
+ *htrates_p = htrates;
if (maxrate_p != NULL)
*maxrate_p = maxrate;
}
@@ -112,6 +134,41 @@ rtwn_set_basicrates(struct rtwn_softc *sc, uint32_t rates)
rtwn_setbits_4(sc, R92C_RRSR, R92C_RRSR_RATE_BITMAP_M, rates);
}
+/*
+ * Configure the initial RTS rate to use.
+ */
+void
+rtwn_set_rts_rate(struct rtwn_softc *sc, uint32_t rates)
+{
+ uint8_t ridx;
+
+ /*
+ * We shouldn't set the initial RTS/CTS generation rate
+ * as the highest available rate - that may end up
+ * with trying to configure something like MCS1 RTS/CTS.
+ *
+ * Instead, choose a suitable low OFDM/CCK rate based
+ * on the basic rate bitmask. Assume the caller
+ * has filtered out CCK modes in 5GHz.
+ */
+ rates &= (1 << RTWN_RIDX_CCK1) | (1 << RTWN_RIDX_CCK55) |
+ (1 << RTWN_RIDX_CCK11) | (1 << RTWN_RIDX_OFDM6) |
+ (1 << RTWN_RIDX_OFDM9) | (1 << RTWN_RIDX_OFDM12) |
+ (1 << RTWN_RIDX_OFDM18) | (1 << RTWN_RIDX_OFDM24);
+ if (rates == 0) {
+ device_printf(sc->sc_dev,
+ "WARNING: no configured basic RTS rate!\n");
+ return;
+ }
+ ridx = fls(rates) - 1;
+
+ RTWN_DPRINTF(sc, RTWN_DEBUG_RA,
+ "%s: mask=0x%08x, ridx=%d\n",
+ __func__, rates, ridx);
+
+ rtwn_write_1(sc, R92C_INIRTS_RATE_SEL, ridx);
+}
+
static void
rtwn_update_avgrssi(struct rtwn_softc *sc, struct rtwn_node *un, int8_t rssi,
int is_cck)
@@ -285,8 +342,18 @@ rtwn_rx_common(struct rtwn_softc *sc, struct mbuf *m, void *desc)
rxs.c_pktflags |= IEEE80211_RX_F_FAIL_FCSCRC;
rxs.r_flags |= IEEE80211_R_TSF_START; /* XXX undocumented */
- rxs.r_flags |= IEEE80211_R_TSF64;
- rxs.c_rx_tsf = rtwn_extend_rx_tsf(sc, stat);
+
+ /*
+ * Doing the TSF64 extension on USB is expensive, especially
+ * if it's being done on every MPDU in an AMPDU burst.
+ */
+ if (sc->sc_ena_tsf64) {
+ rxs.r_flags |= IEEE80211_R_TSF64;
+ rxs.c_rx_tsf = rtwn_extend_rx_tsf(sc, stat);
+ } else {
+ rxs.r_flags |= IEEE80211_R_TSF32;
+ rxs.c_rx_tsf = le32toh(stat->tsf_low);
+ }
/* Get RSSI from PHY status descriptor. */
is_cck = (rxs.c_pktflags & IEEE80211_RX_F_CCK) != 0;
@@ -318,6 +385,10 @@ rtwn_rx_common(struct rtwn_softc *sc, struct mbuf *m, void *desc)
/* Drop PHY descriptor. */
m_adj(m, infosz + shift);
+ /* If APPFCS, drop FCS */
+ if (sc->rcr & R92C_RCR_APPFCS)
+ m_adj(m, -IEEE80211_CRC_LEN);
+
return (ni);
}
@@ -456,6 +527,15 @@ rtwn_rxfilter_init(struct rtwn_softc *sc)
R92C_RCR_HTC_LOC_CTRL | R92C_RCR_APP_PHYSTS |
R92C_RCR_APP_ICV | R92C_RCR_APP_MIC;
+ /*
+ * Add FCS, to work around occasional 4 byte truncation
+ * with some frames. This is more problematic on RTL8812/
+ * RTL8821 because they're also doing L3/L4 checksum offload
+ * and hardware encryption, so both are tagged as "passed"
+ * before the frame is truncated.
+ */
+ sc->rcr |= R92C_RCR_APPFCS;
+
/* Update dynamic Rx filter parts. */
rtwn_rxfilter_update(sc);
}
@@ -506,5 +586,12 @@ rtwn_set_promisc(struct rtwn_softc *sc)
sc->rcr &= ~mask_min;
sc->rcr |= mask_all;
}
+
+ /*
+ * Add FCS, to work around occasional 4 byte truncation.
+ * See the previous comment above R92C_RCR_APPFCS.
+ */
+ sc->rcr |= R92C_RCR_APPFCS;
+
rtwn_rxfilter_set(sc);
}
diff --git a/sys/dev/rtwn/if_rtwn_rx.h b/sys/dev/rtwn/if_rtwn_rx.h
index 73bdf0d7a0de..751173d0f80f 100644
--- a/sys/dev/rtwn/if_rtwn_rx.h
+++ b/sys/dev/rtwn/if_rtwn_rx.h
@@ -20,8 +20,10 @@
#define RTWN_NOISE_FLOOR -95
void rtwn_get_rates(struct rtwn_softc *, const struct ieee80211_rateset *,
- const struct ieee80211_htrateset *, uint32_t *, int *, int);
+ const struct ieee80211_htrateset *, uint32_t *, uint32_t *,
+ int *, int);
void rtwn_set_basicrates(struct rtwn_softc *, uint32_t);
+void rtwn_set_rts_rate(struct rtwn_softc *, uint32_t);
struct ieee80211_node * rtwn_rx_common(struct rtwn_softc *, struct mbuf *,
void *);
void rtwn_adhoc_recv_mgmt(struct ieee80211_node *, struct mbuf *, int,
diff --git a/sys/dev/rtwn/if_rtwn_tx.c b/sys/dev/rtwn/if_rtwn_tx.c
index f5e97933b314..fa7f35f2de83 100644
--- a/sys/dev/rtwn/if_rtwn_tx.c
+++ b/sys/dev/rtwn/if_rtwn_tx.c
@@ -105,6 +105,33 @@ rtwn_get_cipher(u_int ic_cipher)
return (cipher);
}
+static uint8_t
+rtwn_tx_ratectl_to_ridx(struct rtwn_softc *sc, struct ieee80211_node *ni,
+ struct ieee80211_node_txrate *txr)
+{
+ /* TODO: this should be based on the node channel */
+ struct ieee80211com *ic = &sc->sc_ic;
+ uint8_t ridx;
+
+ switch (txr->type) {
+ case IEEE80211_NODE_TXRATE_LEGACY:
+ case IEEE80211_NODE_TXRATE_HT:
+ ridx = rate2ridx(txr->dot11rate);
+ break;
+ case IEEE80211_NODE_TXRATE_VHT:
+ ridx = RTWN_RIDX_VHT_MCS(txr->nss - 1, txr->mcs);
+ break;
+ default:
+ if (ic->ic_curmode != IEEE80211_MODE_11B)
+ ridx = RTWN_RIDX_OFDM36;
+ else
+ ridx = RTWN_RIDX_CCK55;
+ break;
+ }
+
+ return (ridx);
+}
+
static int
rtwn_tx_data(struct rtwn_softc *sc, struct ieee80211_node *ni,
struct mbuf *m)
@@ -116,7 +143,8 @@ rtwn_tx_data(struct rtwn_softc *sc, struct ieee80211_node *ni,
struct ieee80211_frame *wh;
struct rtwn_tx_desc_common *txd;
struct rtwn_tx_buf buf;
- uint8_t rate, ridx, type;
+ uint8_t ridx, type;
+ bool force_rate = false;
u_int cipher;
int ismcast;
@@ -129,28 +157,35 @@ rtwn_tx_data(struct rtwn_softc *sc, struct ieee80211_node *ni,
/* Choose a TX rate index. */
if (type == IEEE80211_FC0_TYPE_MGT ||
type == IEEE80211_FC0_TYPE_CTL ||
- (m->m_flags & M_EAPOL) != 0)
- rate = tp->mgmtrate;
- else if (ismcast)
- rate = tp->mcastrate;
- else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE)
- rate = tp->ucastrate;
- else {
+ (m->m_flags & M_EAPOL) != 0) {
+ ridx = rate2ridx(tp->mgmtrate);
+ force_rate = true;
+ } else if (ismcast) {
+ ridx = rate2ridx(tp->mcastrate);
+ force_rate = true;
+ } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
+ ridx = rate2ridx(tp->ucastrate);
+ force_rate = true;
+ } else {
if (sc->sc_ratectl == RTWN_RATECTL_NET80211) {
+ struct ieee80211_node_txrate txr = { 0 };
/* XXX pass pktlen */
- (void) ieee80211_ratectl_rate(ni, NULL, 0);
- rate = ni->ni_txrate;
+ ieee80211_ratectl_rate(ni, NULL, 0);
+ ieee80211_node_get_txrate(ni, &txr);
+ ridx = rtwn_tx_ratectl_to_ridx(sc, ni, &txr);
} else {
if (ni->ni_flags & IEEE80211_NODE_HT)
- rate = IEEE80211_RATE_MCS | 0x4; /* MCS4 */
+ ridx = rate2ridx(IEEE80211_RATE_MCS | 0x4); /* MCS4 */
else if (ic->ic_curmode != IEEE80211_MODE_11B)
- rate = ridx2rate[RTWN_RIDX_OFDM36];
+ ridx = RTWN_RIDX_OFDM36;
else
- rate = ridx2rate[RTWN_RIDX_CCK55];
+ ridx = RTWN_RIDX_CCK55;
}
}
- ridx = rate2ridx(rate);
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
cipher = IEEE80211_CIPHER_NONE;
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
@@ -172,7 +207,7 @@ rtwn_tx_data(struct rtwn_softc *sc, struct ieee80211_node *ni,
memset(txd, 0, sc->txdesc_len);
txd->txdw1 = htole32(SM(RTWN_TXDW1_CIPHER, rtwn_get_cipher(cipher)));
- rtwn_fill_tx_desc(sc, ni, m, txd, ridx, tp->maxretry);
+ rtwn_fill_tx_desc(sc, ni, m, txd, ridx, force_rate, tp->maxretry);
if (ieee80211_radiotap_active_vap(vap)) {
struct rtwn_tx_radiotap_header *tap = &sc->sc_txtap;
@@ -198,6 +233,10 @@ rtwn_tx_raw(struct rtwn_softc *sc, struct ieee80211_node *ni,
uint8_t type;
u_int cipher;
+ /* seqno allocate, only if AMPDU isn't running */
+ if ((m->m_flags & M_AMPDU_MPDU) == 0)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/* Encrypt the frame if need be. */
cipher = IEEE80211_CIPHER_NONE;
if (params->ibp_flags & IEEE80211_BPF_CRYPTO) {
@@ -263,6 +302,11 @@ rtwn_start(struct rtwn_softc *sc)
struct mbuf *m;
RTWN_ASSERT_LOCKED(sc);
+
+ /* Ensure no work is scheduled during reset/teardown */
+ if ((sc->sc_flags & RTWN_RUNNING) == 0)
+ return;
+
while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
if (sc->qfullmsk != 0) {
mbufq_prepend(&sc->sc_snd, m);
diff --git a/sys/dev/rtwn/if_rtwnreg.h b/sys/dev/rtwn/if_rtwnreg.h
index 9762f0482b1c..f1c0a7a6ff55 100644
--- a/sys/dev/rtwn/if_rtwnreg.h
+++ b/sys/dev/rtwn/if_rtwnreg.h
@@ -158,6 +158,9 @@ rtwn_chan2centieee(const struct ieee80211_channel *c)
{
int chan;
+ if (IEEE80211_IS_CHAN_VHT(c))
+ return c->ic_vht_ch_freq1;
+
chan = c->ic_ieee;
if (c->ic_extieee != 0)
chan = (chan + c->ic_extieee) / 2;
diff --git a/sys/dev/rtwn/if_rtwnvar.h b/sys/dev/rtwn/if_rtwnvar.h
index 6a44b7b73902..aa42715b1674 100644
--- a/sys/dev/rtwn/if_rtwnvar.h
+++ b/sys/dev/rtwn/if_rtwnvar.h
@@ -32,7 +32,7 @@
#define RTWN_MACID_VALID 0x8000
#define RTWN_MACID_LIMIT 128
-#define RTWN_TX_TIMEOUT 5000 /* ms */
+#define RTWN_TX_TIMEOUT 1000 /* ms */
#define RTWN_MAX_EPOUT 4
#define RTWN_PORT_COUNT 2
@@ -133,7 +133,8 @@ struct rtwn_vap {
*/
enum {
RTWN_RX_DATA,
- RTWN_RX_TX_REPORT,
+ RTWN_RX_TX_REPORT, /* Per-packet */
+ RTWN_RX_TX_REPORT2, /* Per-MACID summary */
RTWN_RX_OTHER
};
@@ -171,13 +172,13 @@ struct rtwn_softc {
struct mbufq sc_snd;
device_t sc_dev;
-#if 1
int sc_ht40;
-#endif
+ int sc_ena_tsf64;
uint32_t sc_debug;
int sc_hwcrypto;
int sc_ratectl_sysctl;
int sc_ratectl;
+ uint32_t sc_reg_addr;
uint8_t sc_detached;
uint8_t sc_flags;
@@ -318,7 +319,7 @@ struct rtwn_softc {
void (*sc_detach_private)(struct rtwn_softc *);
void (*sc_fill_tx_desc)(struct rtwn_softc *,
struct ieee80211_node *, struct mbuf *,
- void *, uint8_t, int);
+ void *, uint8_t, bool, int);
void (*sc_fill_tx_desc_raw)(struct rtwn_softc *,
struct ieee80211_node *, struct mbuf *,
void *, const struct ieee80211_bpf_params *);
@@ -329,6 +330,8 @@ struct rtwn_softc {
uint8_t (*sc_rx_radiotap_flags)(const void *);
void (*sc_beacon_init)(struct rtwn_softc *, void *, int);
void (*sc_beacon_enable)(struct rtwn_softc *, int, int);
+ void (*sc_sta_beacon_enable)(struct rtwn_softc *, int,
+ bool);
void (*sc_beacon_set_rate)(void *, int);
void (*sc_beacon_select)(struct rtwn_softc *, int);
void (*sc_set_chan)(struct rtwn_softc *,
@@ -349,6 +352,8 @@ struct rtwn_softc {
int (*sc_classify_intr)(struct rtwn_softc *, void *, int);
void (*sc_handle_tx_report)(struct rtwn_softc *, uint8_t *,
int);
+ void (*sc_handle_tx_report2)(struct rtwn_softc *, uint8_t *,
+ int);
void (*sc_handle_c2h_report)(struct rtwn_softc *,
uint8_t *, int);
int (*sc_check_frame)(struct rtwn_softc *, struct mbuf *);
@@ -364,6 +369,8 @@ struct rtwn_softc {
void (*sc_init_antsel)(struct rtwn_softc *);
void (*sc_post_init)(struct rtwn_softc *);
int (*sc_init_bcnq1_boundary)(struct rtwn_softc *);
+ int (*sc_set_tx_power)(struct rtwn_softc *,
+ struct ieee80211vap *);
const uint8_t *chan_list_5ghz[3];
int chan_num_5ghz[3];
@@ -394,6 +401,7 @@ struct rtwn_softc {
uint16_t rx_dma_size;
int macid_limit;
+ int macid_rpt2_max_num;
int cam_entry_limit;
int fwsize_limit;
int temp_delta;
@@ -428,6 +436,8 @@ void rtwn_detach(struct rtwn_softc *);
void rtwn_resume(struct rtwn_softc *);
void rtwn_suspend(struct rtwn_softc *);
+void rtwn_attach_vht_cap_info_mcs(struct rtwn_softc *);
+
/* Interface-specific. */
#define rtwn_write_1(_sc, _addr, _val) \
(((_sc)->sc_write_1)((_sc), (_addr), (_val)))
@@ -519,9 +529,9 @@ void rtwn_suspend(struct rtwn_softc *);
#define rtwn_detach_private(_sc) \
(((_sc)->sc_detach_private)((_sc)))
#define rtwn_fill_tx_desc(_sc, _ni, _m, \
- _buf, _ridx, _maxretry) \
+ _buf, _ridx, _force, _maxretry) \
(((_sc)->sc_fill_tx_desc)((_sc), (_ni), \
- (_m), (_buf), (_ridx), (_maxretry)))
+ (_m), (_buf), (_ridx), (_force), (_maxretry)))
#define rtwn_fill_tx_desc_raw(_sc, _ni, _m, \
_buf, _params) \
(((_sc)->sc_fill_tx_desc_raw)((_sc), (_ni), \
@@ -550,6 +560,8 @@ void rtwn_suspend(struct rtwn_softc *);
(((_sc)->sc_classify_intr)((_sc), (_buf), (_len)))
#define rtwn_handle_tx_report(_sc, _buf, _len) \
(((_sc)->sc_handle_tx_report)((_sc), (_buf), (_len)))
+#define rtwn_handle_tx_report2(_sc, _buf, _len) \
+ (((_sc)->sc_handle_tx_report2)((_sc), (_buf), (_len)))
#define rtwn_handle_c2h_report(_sc, _buf, _len) \
(((_sc)->sc_handle_c2h_report)((_sc), (_buf), (_len)))
#define rtwn_check_frame(_sc, _m) \
@@ -558,6 +570,8 @@ void rtwn_suspend(struct rtwn_softc *);
(((_sc)->sc_beacon_init)((_sc), (_buf), (_id)))
#define rtwn_beacon_enable(_sc, _id, _enable) \
(((_sc)->sc_beacon_enable)((_sc), (_id), (_enable)))
+#define rtwn_sta_beacon_enable(_sc, _id, _enable) \
+ (((_sc)->sc_sta_beacon_enable)((_sc), (_id), (_enable)))
#define rtwn_beacon_set_rate(_sc, _buf, _is5ghz) \
(((_sc)->sc_beacon_set_rate)((_buf), (_is5ghz)))
#define rtwn_beacon_select(_sc, _id) \
@@ -586,6 +600,8 @@ void rtwn_suspend(struct rtwn_softc *);
(((_sc)->sc_post_init)((_sc)))
#define rtwn_init_bcnq1_boundary(_sc) \
(((_sc)->sc_init_bcnq1_boundary)((_sc)))
+#define rtwn_set_tx_power(_sc, _vap) \
+ (((_sc)->sc_set_tx_power)((_sc), (_vap)))
/*
* Methods to access subfields in registers.
diff --git a/sys/dev/rtwn/pci/rtwn_pci_rx.c b/sys/dev/rtwn/pci/rtwn_pci_rx.c
index 7f06725afb0e..4ef879f1c947 100644
--- a/sys/dev/rtwn/pci/rtwn_pci_rx.c
+++ b/sys/dev/rtwn/pci/rtwn_pci_rx.c
@@ -237,6 +237,33 @@ rtwn_pci_tx_report(struct rtwn_pci_softc *pc, int len)
}
static void
+rtwn_pci_tx_report2(struct rtwn_pci_softc *pc, int len)
+{
+ struct rtwn_softc *sc = &pc->pc_sc;
+
+ if (sc->sc_ratectl != RTWN_RATECTL_NET80211) {
+ /* shouldn't happen */
+ device_printf(sc->sc_dev,
+ "%s called while ratectl = %d!\n",
+ __func__, sc->sc_ratectl);
+ return;
+ }
+
+ RTWN_NT_LOCK(sc);
+ rtwn_handle_tx_report2(sc, pc->pc_rx_buf, len);
+ RTWN_NT_UNLOCK(sc);
+
+#ifdef IEEE80211_SUPPORT_SUPERG
+ /*
+ * NB: this will executed only when 'report' bit is set.
+ */
+ if (sc->sc_tx_n_active > 0 && --sc->sc_tx_n_active <= 1)
+ rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all);
+#endif
+}
+
+
+static void
rtwn_pci_c2h_report(struct rtwn_pci_softc *pc, int len)
{
rtwn_handle_c2h_report(&pc->pc_sc, pc->pc_rx_buf, len);
@@ -341,6 +368,9 @@ rtwn_pci_rx_done(struct rtwn_softc *sc)
case RTWN_RX_TX_REPORT:
rtwn_pci_tx_report(pc, len);
break;
+ case RTWN_RX_TX_REPORT2:
+ rtwn_pci_tx_report2(pc, len);
+ break;
case RTWN_RX_OTHER:
rtwn_pci_c2h_report(pc, len);
break;
diff --git a/sys/dev/rtwn/rtl8188e/pci/r88ee_attach.c b/sys/dev/rtwn/rtl8188e/pci/r88ee_attach.c
index 190a0dcd0e2b..5bcd4a81b50d 100644
--- a/sys/dev/rtwn/rtl8188e/pci/r88ee_attach.c
+++ b/sys/dev/rtwn/rtl8188e/pci/r88ee_attach.c
@@ -145,6 +145,7 @@ r88ee_attach(struct rtwn_pci_softc *pc)
sc->sc_get_rssi_ofdm = r88e_get_rssi_ofdm;
sc->sc_classify_intr = r88e_classify_intr;
sc->sc_handle_tx_report = r88e_ratectl_tx_complete;
+ sc->sc_handle_tx_report2 = rtwn_nop_softc_uint8_int;
sc->sc_handle_c2h_report = r88e_handle_c2h_report;
sc->sc_check_frame = rtwn_nop_int_softc_mbuf;
sc->sc_rf_read = r92c_rf_read;
@@ -176,6 +177,7 @@ r88ee_attach(struct rtwn_pci_softc *pc)
#endif
sc->sc_beacon_init = r92c_beacon_init;
sc->sc_beacon_enable = r88e_beacon_enable;
+ sc->sc_sta_beacon_enable = r88e_sta_beacon_enable;
sc->sc_beacon_set_rate = rtwn_nop_void_int;
sc->sc_beacon_select = rtwn_nop_softc_int;
sc->sc_temp_measure = r88e_temp_measure;
@@ -190,6 +192,7 @@ r88ee_attach(struct rtwn_pci_softc *pc)
sc->sc_init_antsel = rtwn_nop_softc;
sc->sc_post_init = r88ee_post_init;
sc->sc_init_bcnq1_boundary = rtwn_nop_int_softc;
+ sc->sc_set_tx_power = r92c_set_tx_power;
sc->mac_prog = &rtl8188e_mac[0];
sc->mac_size = nitems(rtl8188e_mac);
diff --git a/sys/dev/rtwn/rtl8188e/r88e.h b/sys/dev/rtwn/rtl8188e/r88e.h
index 33c6fa3432f5..6569b014a5c6 100644
--- a/sys/dev/rtwn/rtl8188e/r88e.h
+++ b/sys/dev/rtwn/rtl8188e/r88e.h
@@ -39,6 +39,7 @@
*/
/* r88e_beacon.c */
void r88e_beacon_enable(struct rtwn_softc *, int, int);
+void r88e_sta_beacon_enable(struct rtwn_softc *, int, bool);
/* r88e_calib.c */
void r88e_iq_calib(struct rtwn_softc *);
@@ -85,6 +86,7 @@ int8_t r88e_get_rssi_cck(struct rtwn_softc *, void *);
int8_t r88e_get_rssi_ofdm(struct rtwn_softc *, void *);
void r88e_get_rx_stats(struct rtwn_softc *, struct ieee80211_rx_stats *,
const void *, const void *);
+void r88e_ratectl_tx_complete_periodic(struct rtwn_softc *, uint8_t *, int);
/* r88e_tx.c */
void r88e_tx_enable_ampdu(void *, int);
diff --git a/sys/dev/rtwn/rtl8188e/r88e_beacon.c b/sys/dev/rtwn/rtl8188e/r88e_beacon.c
index 941e41151b59..74b23359e1a3 100644
--- a/sys/dev/rtwn/rtl8188e/r88e_beacon.c
+++ b/sys/dev/rtwn/rtl8188e/r88e_beacon.c
@@ -43,6 +43,9 @@
#include <dev/rtwn/rtl8188e/r88e.h>
#include <dev/rtwn/rtl8188e/r88e_reg.h>
+/*
+ * Enable/disable beaconing in AP/IBSS/Mesh modes.
+ */
void
r88e_beacon_enable(struct rtwn_softc *sc, int id, int enable)
{
@@ -57,3 +60,12 @@ r88e_beacon_enable(struct rtwn_softc *sc, int id, int enable)
R92C_BCN_CTRL_EN_BCN, 0);
}
}
+
+/*
+ * There's no firmware rate control, beacon processing isn't
+ * needed in STA mode.
+ */
+void
+r88e_sta_beacon_enable(struct rtwn_softc *sc, int id, bool enable)
+{
+}
diff --git a/sys/dev/rtwn/rtl8188e/r88e_chan.c b/sys/dev/rtwn/rtl8188e/r88e_chan.c
index c072b3554083..f91862720639 100644
--- a/sys/dev/rtwn/rtl8188e/r88e_chan.c
+++ b/sys/dev/rtwn/rtl8188e/r88e_chan.c
@@ -84,6 +84,7 @@ void
r88e_get_txpower(struct rtwn_softc *sc, int chain,
struct ieee80211_channel *c, uint8_t power[RTWN_RIDX_COUNT])
{
+ const struct ieee80211com *ic = &sc->sc_ic;
struct r92c_softc *rs = sc->sc_priv;
const struct rtwn_r88e_txpwr *rt = rs->rs_txpwr;
uint8_t cckpow, ofdmpow, bw20pow, htpow = 0;
@@ -96,15 +97,36 @@ r88e_get_txpower(struct rtwn_softc *sc, int chain,
return;
}
- /* XXX net80211 regulatory */
+ /*
+ * Treat the entries in 1/2 dBm resolution where 0 = 0dBm.
+ * Apply the adjustments afterwards; assume that the vendor
+ * driver is applying offsets to make up for the actual
+ * target power in dBm.
+ */
max_mcs = RTWN_RIDX_HT_MCS(sc->ntxchains * 8 - 1);
- KASSERT(max_mcs <= RTWN_RIDX_COUNT, ("increase ridx limit\n"));
+ KASSERT(max_mcs <= RTWN_RIDX_LEGACY_HT_COUNT, ("increase ridx limit\n"));
/* Compute per-CCK rate Tx power. */
- cckpow = rt->cck_tx_pwr[group];
for (ridx = RTWN_RIDX_CCK1; ridx <= RTWN_RIDX_CCK11; ridx++) {
- power[ridx] = (ridx == RTWN_RIDX_CCK2) ? cckpow - 9 : cckpow;
+ /*
+ * Note: the regulatory limit is applied to cckpow before
+ * it's subtracted for CCK2.
+ */
+ cckpow = rt->cck_tx_pwr[group];
+ if (cckpow > ic->ic_txpowlimit)
+ cckpow = ic->ic_txpowlimit;
+
+ /*
+ * If it's CCK2 then we subtract the 9 (4.5dB?) offset
+ * and make sure we aren't going to underflow.
+ */
+ if (ridx == RTWN_RIDX_CCK2 && cckpow < 9)
+ cckpow = 0;
+ else if (ridx == RTWN_RIDX_CCK2)
+ cckpow = cckpow - 9;
+
+ power[ridx] = cckpow;
}
if (group < 5)
@@ -112,14 +134,18 @@ r88e_get_txpower(struct rtwn_softc *sc, int chain,
/* Compute per-OFDM rate Tx power. */
ofdmpow = htpow + rt->ofdm_tx_pwr_diff;
+ if (ofdmpow > ic->ic_txpowlimit)
+ ofdmpow = ic->ic_txpowlimit;
for (ridx = RTWN_RIDX_OFDM6; ridx <= RTWN_RIDX_OFDM54; ridx++)
power[ridx] = ofdmpow;
bw20pow = htpow + rt->bw20_tx_pwr_diff;
+ if (bw20pow > ic->ic_txpowlimit)
+ bw20pow = ic->ic_txpowlimit;
for (ridx = RTWN_RIDX_HT_MCS(0); ridx <= max_mcs; ridx++)
power[ridx] = bw20pow;
- /* Apply max limit. */
+ /* Apply max limit */
for (ridx = RTWN_RIDX_CCK1; ridx <= max_mcs; ridx++) {
if (power[ridx] > R92C_MAX_TX_PWR)
power[ridx] = R92C_MAX_TX_PWR;
diff --git a/sys/dev/rtwn/rtl8188e/r88e_rx.c b/sys/dev/rtwn/rtl8188e/r88e_rx.c
index 4f8517f1e490..7ea7b2f9e496 100644
--- a/sys/dev/rtwn/rtl8188e/r88e_rx.c
+++ b/sys/dev/rtwn/rtl8188e/r88e_rx.c
@@ -63,8 +63,9 @@ r88e_classify_intr(struct rtwn_softc *sc, void *buf, int len)
case R88E_RXDW3_RPT_RX:
return (RTWN_RX_DATA);
case R88E_RXDW3_RPT_TX1: /* per-packet Tx report */
- case R88E_RXDW3_RPT_TX2: /* periodical Tx report */
return (RTWN_RX_TX_REPORT);
+ case R88E_RXDW3_RPT_TX2: /* periodical Tx report */
+ return (RTWN_RX_TX_REPORT2);
case R88E_RXDW3_RPT_HIS:
return (RTWN_RX_OTHER);
default: /* shut up the compiler */
@@ -119,9 +120,8 @@ r88e_ratectl_tx_complete(struct rtwn_softc *sc, uint8_t *buf, int len)
txs.flags = IEEE80211_RATECTL_STATUS_LONG_RETRY |
IEEE80211_RATECTL_STATUS_FINAL_RATE;
txs.long_retries = ntries;
- if (rpt->final_rate > RTWN_RIDX_OFDM54) { /* MCS */
- txs.final_rate =
- rpt->final_rate - RTWN_RIDX_HT_MCS_SHIFT;
+ if (RTWN_RATE_IS_HT(rpt->final_rate)) { /* MCS */
+ txs.final_rate = RTWN_RIDX_TO_MCS(rpt->final_rate);
txs.final_rate |= IEEE80211_RATE_MCS;
} else
txs.final_rate = ridx2rate[rpt->final_rate];
@@ -232,3 +232,91 @@ r88e_get_rx_stats(struct rtwn_softc *sc, struct ieee80211_rx_stats *rxs,
rxs->c_band = IEEE80211_CHAN_2GHZ;
}
}
+
+void
+r88e_ratectl_tx_complete_periodic(struct rtwn_softc *sc, uint8_t *buf,
+ int len)
+{
+ const struct r92c_rx_stat *rxs;
+ uint64_t mac_bitmap;
+ int macid;
+
+ if (len < sizeof(struct r92c_rx_stat))
+ return;
+
+ rxs = (const struct r92c_rx_stat *) buf;
+
+ /* Skip Rx descriptor. */
+ buf += sizeof(struct r92c_rx_stat);
+ len -= sizeof(struct r92c_rx_stat);
+
+ /*
+ * Note: the valid macid bitmap is rx_desc[5] << 32 | rx_desc[4];
+ * Note: rx_desc[5] is the TSF, which isn't valid for this report!
+ */
+ mac_bitmap = ((uint64_t) le32toh(rxs->tsf_low) << 32)
+ | le32toh(rxs->rxdw4);
+
+#if 0
+ RTWN_DPRINTF(sc, RTWN_DEBUG_RA,
+ "%s: mac bitmap: 0x%lx\n", __func__, mac_bitmap);
+#endif
+
+ /*
+ * Note: the RX reports aren't sparse - invalid entries (ie,
+ * the bitmap has the macid set to 0) are just populated
+ * with random data.
+ */
+ for (macid = 0; (macid < 64) && (macid < sc->macid_rpt2_max_num) &&
+ (len >= sizeof(struct r88e_fw_c2h_txreport2_entry)); macid++) {
+ struct ieee80211_ratectl_tx_stats txs = { 0 };
+ const struct r88e_fw_c2h_txreport2_entry *rpt;
+ uint32_t ntotal, nsuccess, ndrop, nretry, nframes;
+
+ rpt = (const struct r88e_fw_c2h_txreport2_entry *) buf;
+ buf += sizeof(struct r88e_fw_c2h_txreport2_entry);
+ len -= sizeof(struct r88e_fw_c2h_txreport2_entry);
+
+ if ((mac_bitmap & (1UL << macid)) == 0)
+ continue;
+
+ txs.flags = IEEE80211_RATECTL_TX_STATS_NODE |
+ IEEE80211_RATECTL_TX_STATS_RETRIES;
+
+ /* calculate all the various combinations of things */
+ nframes = le16toh(rpt->retry0);
+ ntotal = nframes + rpt->retry1 + rpt->retry2
+ + rpt->retry3 + rpt->retry4 + rpt->drop;
+ /*
+ * Note: sometimes this is zero or 1, but the retries
+ * are all capped out at 255! That means the frame
+ * transmits are all failing.
+ */
+ nsuccess = ntotal - rpt->drop;
+ ndrop = rpt->drop;
+ nretry = rpt->retry1 + rpt->retry2 + rpt->retry3
+ + rpt->retry4;
+
+ txs.nretries = nretry + ndrop;
+ txs.nsuccess = nsuccess;
+ txs.nframes = ntotal;
+
+ RTWN_DPRINTF(sc, RTWN_DEBUG_RA,
+ "%s: MAC %d rpt retries %d %d %d %d %d, "
+ "drop %d\n",
+ __func__,
+ macid,
+ le16toh(rpt->retry0),
+ rpt->retry1,
+ rpt->retry2,
+ rpt->retry3,
+ rpt->retry4,
+ rpt->drop);
+ if (sc->node_list[macid] != NULL) {
+ struct ieee80211_node *ni;
+ ni = sc->node_list[macid];
+ txs.ni = ni;
+ ieee80211_ratectl_tx_update(ni->ni_vap, &txs);
+ }
+ }
+}
diff --git a/sys/dev/rtwn/rtl8188e/r88e_rx_desc.h b/sys/dev/rtwn/rtl8188e/r88e_rx_desc.h
index f3e1a3c1b9bc..59e885eb4821 100644
--- a/sys/dev/rtwn/rtl8188e/r88e_rx_desc.h
+++ b/sys/dev/rtwn/rtl8188e/r88e_rx_desc.h
@@ -81,6 +81,20 @@ struct r88e_tx_rpt_ccx {
uint8_t rptb7;
} __packed;
+/*
+ * The 8188E periodic TX report entries
+ * (type 2 report.)
+ */
+struct r88e_fw_c2h_txreport2_entry {
+ uint16_t retry0;
+ uint8_t retry1;
+ uint8_t retry2;
+ uint8_t retry3;
+ uint8_t retry4;
+ uint8_t drop;
+ uint8_t reserved;
+} __packed;
+
/* Interrupt message format. */
/* XXX recheck */
struct r88e_intr_msg {
diff --git a/sys/dev/rtwn/rtl8188e/usb/r88eu_attach.c b/sys/dev/rtwn/rtl8188e/usb/r88eu_attach.c
index 77193321bb9e..9ace2396d712 100644
--- a/sys/dev/rtwn/rtl8188e/usb/r88eu_attach.c
+++ b/sys/dev/rtwn/rtl8188e/usb/r88eu_attach.c
@@ -138,6 +138,7 @@ r88eu_attach(struct rtwn_usb_softc *uc)
sc->sc_get_rssi_ofdm = r88e_get_rssi_ofdm;
sc->sc_classify_intr = r88e_classify_intr;
sc->sc_handle_tx_report = r88e_ratectl_tx_complete;
+ sc->sc_handle_tx_report2 = r88e_ratectl_tx_complete_periodic;
sc->sc_handle_c2h_report = r88e_handle_c2h_report;
sc->sc_check_frame = rtwn_nop_int_softc_mbuf;
sc->sc_rf_read = r92c_rf_read;
@@ -169,6 +170,7 @@ r88eu_attach(struct rtwn_usb_softc *uc)
#endif
sc->sc_beacon_init = r92c_beacon_init;
sc->sc_beacon_enable = r88e_beacon_enable;
+ sc->sc_sta_beacon_enable = r88e_sta_beacon_enable;
sc->sc_beacon_set_rate = rtwn_nop_void_int;
sc->sc_beacon_select = rtwn_nop_softc_int;
sc->sc_temp_measure = r88e_temp_measure;
@@ -183,6 +185,7 @@ r88eu_attach(struct rtwn_usb_softc *uc)
sc->sc_init_antsel = rtwn_nop_softc;
sc->sc_post_init = r88eu_post_init;
sc->sc_init_bcnq1_boundary = rtwn_nop_int_softc;
+ sc->sc_set_tx_power = r92c_set_tx_power;
sc->mac_prog = &rtl8188e_mac[0];
sc->mac_size = nitems(rtl8188e_mac);
@@ -209,6 +212,8 @@ r88eu_attach(struct rtwn_usb_softc *uc)
sc->rx_dma_size = R88E_RX_DMA_BUFFER_SIZE;
sc->macid_limit = R88E_MACID_MAX + 1;
+ /* XXX this limit may be expanded to R88E_MACID_MAX */
+ sc->macid_rpt2_max_num = 2;
sc->cam_entry_limit = R92C_CAM_ENTRY_COUNT;
sc->fwsize_limit = R92C_MAX_FW_SIZE;
sc->temp_delta = R88E_CALIB_THRESHOLD;
diff --git a/sys/dev/rtwn/rtl8188e/usb/r88eu_init.c b/sys/dev/rtwn/rtl8188e/usb/r88eu_init.c
index f4f936493cda..312e437958ec 100644
--- a/sys/dev/rtwn/rtl8188e/usb/r88eu_init.c
+++ b/sys/dev/rtwn/rtl8188e/usb/r88eu_init.c
@@ -279,9 +279,22 @@ void
r88eu_post_init(struct rtwn_softc *sc)
{
- /* Enable per-packet TX report. */
+ /* Enable per-packet TX report (RPT1) */
rtwn_setbits_1(sc, R88E_TX_RPT_CTRL, 0, R88E_TX_RPT1_ENA);
+#ifndef RTWN_WITHOUT_UCODE
+ /* Enable timer report (RPT2) if requested */
+ if (sc->macid_rpt2_max_num > 0) {
+ rtwn_setbits_1(sc, R88E_TX_RPT_CTRL, 0,
+ R88E_TX_RPT2_ENA);
+
+ /* Configure how many TX RPT2 entries to populate */
+ rtwn_write_1(sc, R88E_TX_RPT_MACID_MAX,
+ sc->macid_rpt2_max_num);
+ /* Enable periodic TX report; 32uS units */
+ rtwn_write_2(sc, R88E_TX_RPT_TIME, 0xcdf0);
+ }
+#endif
/* Disable Tx if MACID is not associated. */
rtwn_write_4(sc, R88E_MACID_NO_LINK, 0xffffffff);
rtwn_write_4(sc, R88E_MACID_NO_LINK + 4, 0xffffffff);
diff --git a/sys/dev/rtwn/rtl8192c/pci/r92ce_attach.c b/sys/dev/rtwn/rtl8192c/pci/r92ce_attach.c
index 423bf2af1845..ef18edceabc2 100644
--- a/sys/dev/rtwn/rtl8192c/pci/r92ce_attach.c
+++ b/sys/dev/rtwn/rtl8192c/pci/r92ce_attach.c
@@ -175,6 +175,7 @@ r92ce_attach(struct rtwn_pci_softc *pc)
sc->sc_get_rssi_ofdm = r92c_get_rssi_ofdm;
sc->sc_classify_intr = r92c_classify_intr;
sc->sc_handle_tx_report = rtwn_nop_softc_uint8_int;
+ sc->sc_handle_tx_report2 = rtwn_nop_softc_uint8_int;
sc->sc_handle_c2h_report = rtwn_nop_softc_uint8_int;
sc->sc_check_frame = rtwn_nop_int_softc_mbuf;
sc->sc_rf_read = r92c_rf_read;
@@ -206,6 +207,7 @@ r92ce_attach(struct rtwn_pci_softc *pc)
#endif
sc->sc_beacon_init = r92c_beacon_init;
sc->sc_beacon_enable = r92c_beacon_enable;
+ sc->sc_sta_beacon_enable = r92c_sta_beacon_enable;
sc->sc_beacon_set_rate = rtwn_nop_void_int;
sc->sc_beacon_select = rtwn_nop_softc_int;
sc->sc_temp_measure = r92c_temp_measure;
@@ -220,6 +222,7 @@ r92ce_attach(struct rtwn_pci_softc *pc)
sc->sc_init_antsel = rtwn_nop_softc;
sc->sc_post_init = r92ce_post_init;
sc->sc_init_bcnq1_boundary = rtwn_nop_int_softc;
+ sc->sc_set_tx_power = r92c_set_tx_power;
sc->mac_prog = &rtl8192ce_mac[0];
sc->mac_size = nitems(rtl8192ce_mac);
diff --git a/sys/dev/rtwn/rtl8192c/r92c.h b/sys/dev/rtwn/rtl8192c/r92c.h
index f73e92f7c932..cab7393caf39 100644
--- a/sys/dev/rtwn/rtl8192c/r92c.h
+++ b/sys/dev/rtwn/rtl8192c/r92c.h
@@ -46,6 +46,7 @@ void r92c_read_chipid_vendor(struct rtwn_softc *, uint32_t);
/* r92c_beacon.c */
void r92c_beacon_init(struct rtwn_softc *, void *, int);
void r92c_beacon_enable(struct rtwn_softc *, int, int);
+void r92c_sta_beacon_enable(struct rtwn_softc *, int, bool);
/* r92c_calib.c */
void r92c_iq_calib(struct rtwn_softc *);
@@ -54,10 +55,12 @@ void r92c_temp_measure(struct rtwn_softc *);
uint8_t r92c_temp_read(struct rtwn_softc *);
/* r92c_chan.c */
+void r92c_dump_txpower(struct rtwn_softc *, int, uint8_t[RTWN_RIDX_COUNT]);
void r92c_get_txpower(struct rtwn_softc *, int,
struct ieee80211_channel *, uint8_t[RTWN_RIDX_COUNT]);
void r92c_write_txpower(struct rtwn_softc *, int,
uint8_t power[RTWN_RIDX_COUNT]);
+int r92c_set_tx_power(struct rtwn_softc *, struct ieee80211vap *);
void r92c_set_bw20(struct rtwn_softc *, uint8_t);
void r92c_set_chan(struct rtwn_softc *, struct ieee80211_channel *);
void r92c_set_gain(struct rtwn_softc *, uint8_t);
@@ -114,7 +117,7 @@ void r92c_tx_enable_ampdu(void *, int);
void r92c_tx_setup_hwseq(void *);
void r92c_tx_setup_macid(void *, int);
void r92c_fill_tx_desc(struct rtwn_softc *, struct ieee80211_node *,
- struct mbuf *, void *, uint8_t, int);
+ struct mbuf *, void *, uint8_t, bool, int);
void r92c_fill_tx_desc_raw(struct rtwn_softc *, struct ieee80211_node *,
struct mbuf *, void *, const struct ieee80211_bpf_params *);
void r92c_fill_tx_desc_null(struct rtwn_softc *, void *, int, int, int);
diff --git a/sys/dev/rtwn/rtl8192c/r92c_beacon.c b/sys/dev/rtwn/rtl8192c/r92c_beacon.c
index 9e4cdb5f1399..4646b9317c2f 100644
--- a/sys/dev/rtwn/rtl8192c/r92c_beacon.c
+++ b/sys/dev/rtwn/rtl8192c/r92c_beacon.c
@@ -64,11 +64,13 @@ r92c_beacon_init(struct rtwn_softc *sc, void *buf, int id)
rtwn_r92c_tx_setup_macid(sc, buf, id);
txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
- txd->txdw4 |= htole32(SM(R92C_TXDW4_SEQ_SEL, id));
txd->txdw4 |= htole32(SM(R92C_TXDW4_PORT_ID, id));
txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, RTWN_RIDX_CCK1));
}
+/*
+ * Enable/disable beacon generation in AP/IBSS/mesh modes.
+ */
void
r92c_beacon_enable(struct rtwn_softc *sc, int id, int enable)
{
@@ -81,3 +83,20 @@ r92c_beacon_enable(struct rtwn_softc *sc, int id, int enable)
R92C_BCN_CTRL_EN_BCN, 0);
}
}
+
+/*
+ * Enable/disable beacon processing in STA mode.
+ *
+ * This is required for firmware rate control.
+ */
+void
+r92c_sta_beacon_enable(struct rtwn_softc *sc, int id, bool enable)
+{
+ if (enable) {
+ rtwn_setbits_1(sc, R92C_BCN_CTRL(id),
+ 0, R92C_BCN_CTRL_EN_BCN);
+ } else {
+ rtwn_setbits_1(sc, R92C_BCN_CTRL(id),
+ R92C_BCN_CTRL_EN_BCN, 0);
+ }
+}
diff --git a/sys/dev/rtwn/rtl8192c/r92c_chan.c b/sys/dev/rtwn/rtl8192c/r92c_chan.c
index 0edb32a97b4d..f93159a3c94e 100644
--- a/sys/dev/rtwn/rtl8192c/r92c_chan.c
+++ b/sys/dev/rtwn/rtl8192c/r92c_chan.c
@@ -53,6 +53,56 @@
#include <dev/rtwn/rtl8192c/r92c_reg.h>
#include <dev/rtwn/rtl8192c/r92c_var.h>
+void
+r92c_dump_txpower(struct rtwn_softc *sc, int chain,
+ uint8_t power[RTWN_RIDX_COUNT])
+{
+
+#ifdef RTWN_DEBUG
+ if (sc->sc_debug & RTWN_DEBUG_TXPWR) {
+ int i;
+
+ /* Print CCK */
+ RTWN_DPRINTF(sc, RTWN_DEBUG_TXPWR,
+ "TX [%d]: CCK: 1M: %d 2M: %d 5.5M: %d 11M: %d\n",
+ chain,
+ power[RTWN_RIDX_CCK1],
+ power[RTWN_RIDX_CCK2],
+ power[RTWN_RIDX_CCK55],
+ power[RTWN_RIDX_CCK11]);
+ /* Print OFDM */
+ RTWN_DPRINTF(sc, RTWN_DEBUG_TXPWR,
+ "TX [%d]: OFDM: 6M: %d 9M: %d 12M: %d 18M: %d 24M: %d "
+ "36M: %d 48M: %d 54M: %d\n",
+ chain,
+ power[RTWN_RIDX_OFDM6],
+ power[RTWN_RIDX_OFDM9],
+ power[RTWN_RIDX_OFDM12],
+ power[RTWN_RIDX_OFDM18],
+ power[RTWN_RIDX_OFDM24],
+ power[RTWN_RIDX_OFDM36],
+ power[RTWN_RIDX_OFDM48],
+ power[RTWN_RIDX_OFDM54]);
+ /* Print HT, 1 and 2 stream */
+ for (i = 0; i < sc->ntxchains; i++) {
+ RTWN_DPRINTF(sc, RTWN_DEBUG_TXPWR,
+ "TX [%d]: MCS%d-%d: %d %d %d %d %d %d %d %d\n",
+ chain,
+ i * 8,
+ i * 8 + 7,
+ power[RTWN_RIDX_HT_MCS(i * 8 + 0)],
+ power[RTWN_RIDX_HT_MCS(i * 8 + 1)],
+ power[RTWN_RIDX_HT_MCS(i * 8 + 2)],
+ power[RTWN_RIDX_HT_MCS(i * 8 + 3)],
+ power[RTWN_RIDX_HT_MCS(i * 8 + 4)],
+ power[RTWN_RIDX_HT_MCS(i * 8 + 5)],
+ power[RTWN_RIDX_HT_MCS(i * 8 + 6)],
+ power[RTWN_RIDX_HT_MCS(i * 8 + 7)]);
+ }
+ }
+#endif
+}
+
static int
r92c_get_power_group(struct rtwn_softc *sc, struct ieee80211_channel *c)
{
@@ -81,6 +131,7 @@ void
r92c_get_txpower(struct rtwn_softc *sc, int chain,
struct ieee80211_channel *c, uint8_t power[RTWN_RIDX_COUNT])
{
+ const struct ieee80211com *ic = &sc->sc_ic;
struct r92c_softc *rs = sc->sc_priv;
struct rtwn_r92c_txpwr *rt = rs->rs_txpwr;
const struct rtwn_r92c_txagc *base = rs->rs_txagc;
@@ -94,16 +145,21 @@ r92c_get_txpower(struct rtwn_softc *sc, int chain,
return;
}
- /* XXX net80211 regulatory */
+ /*
+ * Treat the entries in 1/2 dBm resolution where 0 = 0dBm.
+ * Apply the adjustments afterwards; assume that the vendor
+ * driver is applying offsets to make up for the actual
+ * target power in dBm.
+ */
max_mcs = RTWN_RIDX_HT_MCS(sc->ntxchains * 8 - 1);
- KASSERT(max_mcs <= RTWN_RIDX_COUNT, ("increase ridx limit\n"));
+ KASSERT(max_mcs <= RTWN_RIDX_LEGACY_HT_COUNT, ("increase ridx limit\n"));
if (rs->regulatory == 0) {
for (ridx = RTWN_RIDX_CCK1; ridx <= RTWN_RIDX_CCK11; ridx++)
power[ridx] = base[chain].pwr[0][ridx];
}
- for (ridx = RTWN_RIDX_OFDM6; ridx < RTWN_RIDX_COUNT; ridx++) {
+ for (ridx = RTWN_RIDX_OFDM6; ridx < RTWN_RIDX_LEGACY_HT_COUNT; ridx++) {
if (rs->regulatory == 3) {
power[ridx] = base[chain].pwr[0][ridx];
/* Apply vendor limits. */
@@ -149,6 +205,10 @@ r92c_get_txpower(struct rtwn_softc *sc, int chain,
for (ridx = RTWN_RIDX_CCK1; ridx <= max_mcs; ridx++) {
if (power[ridx] > R92C_MAX_TX_PWR)
power[ridx] = R92C_MAX_TX_PWR;
+ /* Apply net80211 limits */
+ if (power[ridx] > ic->ic_txpowlimit)
+ power[ridx] = ic->ic_txpowlimit;
+
}
}
@@ -224,23 +284,32 @@ r92c_set_txpower(struct rtwn_softc *sc, struct ieee80211_channel *c)
memset(power, 0, sizeof(power));
/* Compute per-rate Tx power values. */
rtwn_r92c_get_txpower(sc, i, c, power);
-#ifdef RTWN_DEBUG
- if (sc->sc_debug & RTWN_DEBUG_TXPWR) {
- int max_mcs, ridx;
-
- max_mcs = RTWN_RIDX_HT_MCS(sc->ntxchains * 8 - 1);
-
- /* Dump per-rate Tx power values. */
- printf("Tx power for chain %d:\n", i);
- for (ridx = RTWN_RIDX_CCK1; ridx <= max_mcs; ridx++)
- printf("Rate %d = %u\n", ridx, power[ridx]);
- }
-#endif
+ /* Optionally print out the power table */
+ r92c_dump_txpower(sc, i, power);
/* Write per-rate Tx power values to hardware. */
r92c_write_txpower(sc, i, power);
}
}
+/*
+ * Only reconfigure the transmit power if there's a valid BSS node and
+ * channel. Otherwise just let the next call to r92c_set_chan()
+ * configure the transmit power.
+ */
+int
+r92c_set_tx_power(struct rtwn_softc *sc, struct ieee80211vap *vap)
+{
+ if (vap->iv_bss == NULL)
+ return (EINVAL);
+ if (vap->iv_bss->ni_chan == IEEE80211_CHAN_ANYC)
+ return (EINVAL);
+
+ /* Set it for the current channel */
+ r92c_set_txpower(sc, vap->iv_bss->ni_chan);
+
+ return (0);
+}
+
static void
r92c_set_bw40(struct rtwn_softc *sc, uint8_t chan, int prichlo)
{
@@ -262,7 +331,8 @@ r92c_set_bw40(struct rtwn_softc *sc, uint8_t chan, int prichlo)
rtwn_bb_setbits(sc, R92C_FPGA0_ANAPARAM2,
R92C_FPGA0_ANAPARAM2_CBW20, 0);
- rtwn_bb_setbits(sc, 0x818, 0x0c000000, (prichlo ? 2 : 1) << 26);
+ rtwn_bb_setbits(sc, R92C_FPGA0_POWER_SAVE,
+ R92C_FPGA0_POWER_SAVE_PS_MASK, (prichlo ? 2 : 1) << 26);
/* Select 40MHz bandwidth. */
rtwn_rf_write(sc, 0, R92C_RF_CHNLBW,
diff --git a/sys/dev/rtwn/rtl8192c/r92c_fw.c b/sys/dev/rtwn/rtl8192c/r92c_fw.c
index 5ab56a5f454e..58584b3cd020 100644
--- a/sys/dev/rtwn/rtl8192c/r92c_fw.c
+++ b/sys/dev/rtwn/rtl8192c/r92c_fw.c
@@ -164,44 +164,22 @@ r92c_fw_download_enable(struct rtwn_softc *sc, int enable)
#ifndef RTWN_WITHOUT_UCODE
static int
r92c_send_ra_cmd(struct rtwn_softc *sc, int macid, uint32_t rates,
- int maxrate)
+ int maxrate, bool shortgi)
{
struct r92c_fw_cmd_macid_cfg cmd;
uint8_t mode;
int error = 0;
- /* XXX should be called directly from iv_newstate() for MACID_BC */
- /* XXX joinbss, not send_ra_cmd() */
-#ifdef RTWN_TODO
- /* NB: group addressed frames are done at 11bg rates for now */
- if (ic->ic_curmode == IEEE80211_MODE_11B)
- mode = R92C_RAID_11B;
- else
- mode = R92C_RAID_11BG;
- /* XXX misleading 'mode' value here for unicast frames */
- RTWN_DPRINTF(sc, RTWN_DEBUG_RA,
- "%s: mode 0x%x, rates 0x%08x, basicrates 0x%08x\n", __func__,
- mode, rates, basicrates);
-
- /* Set rates mask for group addressed frames. */
- cmd.macid = RTWN_MACID_BC | R92C_CMD_MACID_VALID;
- cmd.mask = htole32(mode << 28 | basicrates);
- error = rtwn_fw_cmd(sc, R92C_CMD_MACID_CONFIG, &cmd, sizeof(cmd));
- if (error != 0) {
- device_printf(sc->sc_dev,
- "could not set RA mask for broadcast station\n");
- return (error);
- }
-#endif
-
/* Set rates mask for unicast frames. */
- if (maxrate >= RTWN_RIDX_HT_MCS(0))
- mode = R92C_RAID_11GN;
- else if (maxrate >= RTWN_RIDX_OFDM6)
+ if (RTWN_RATE_IS_HT(maxrate))
+ mode = R92C_RAID_11BGN;
+ else if (RTWN_RATE_IS_OFDM(maxrate))
mode = R92C_RAID_11BG;
else
mode = R92C_RAID_11B;
cmd.macid = macid | R92C_CMD_MACID_VALID;
+ if (shortgi)
+ cmd.macid |= R92C_CMD_MACID_SGI;
cmd.mask = htole32(mode << 28 | rates);
error = r92c_fw_cmd(sc, R92C_CMD_MACID_CONFIG, &cmd, sizeof(cmd));
if (error != 0) {
@@ -220,7 +198,7 @@ r92c_init_ra(struct rtwn_softc *sc, int macid)
{
struct ieee80211_htrateset *rs_ht;
struct ieee80211_node *ni;
- uint32_t rates;
+ uint32_t rates, htrates;
int maxrate;
RTWN_NT_LOCK(sc);
@@ -236,17 +214,37 @@ r92c_init_ra(struct rtwn_softc *sc, int macid)
rs_ht = &ni->ni_htrates;
else
rs_ht = NULL;
- /* XXX MACID_BC */
- rtwn_get_rates(sc, &ni->ni_rates, rs_ht, &rates, &maxrate, 0);
+ /*
+ * Note: this pushes the rate bitmap and maxrate into the
+ * firmware; and for this chipset 2-stream 11n support is enough.
+ */
+ rtwn_get_rates(sc, &ni->ni_rates, rs_ht, &rates, &htrates, &maxrate, 0);
RTWN_NT_UNLOCK(sc);
#ifndef RTWN_WITHOUT_UCODE
if (sc->sc_ratectl == RTWN_RATECTL_FW) {
- r92c_send_ra_cmd(sc, macid, rates, maxrate);
+ uint32_t fw_rates;
+ bool shortgi;
+ /* Add HT rates after normal rates; limit to MCS0..15 */
+ fw_rates = rates |
+ ((htrates & 0xffff) << RTWN_RIDX_HT_MCS_SHIFT);
+ /* Re-calculate short-gi based on op mode */
+ if (IEEE80211_IS_CHAN_HT40(ni->ni_chan))
+ shortgi = ieee80211_ht_check_tx_shortgi_40(ni);
+ else if (IEEE80211_IS_CHAN_HT20(ni->ni_chan))
+ shortgi = ieee80211_ht_check_tx_shortgi_20(ni);
+ else
+ shortgi = false;
+ r92c_send_ra_cmd(sc, macid, fw_rates, maxrate, shortgi);
}
#endif
- rtwn_write_1(sc, R92C_INIDATA_RATE_SEL(macid), maxrate);
+ /*
+ * There's no need to set this if firmware rate control is
+ * enabled - the firmware will be controlling this per MACID.
+ */
+ if (sc->sc_ratectl != RTWN_RATECTL_FW)
+ rtwn_write_1(sc, R92C_INIDATA_RATE_SEL(macid), maxrate);
ieee80211_free_node(ni);
}
@@ -283,7 +281,6 @@ r92c_joinbss_rpt(struct rtwn_softc *sc, int macid)
end:
#endif
- /* TODO: init rates for RTWN_MACID_BC. */
if (macid & RTWN_MACID_VALID)
r92c_init_ra(sc, macid & ~RTWN_MACID_VALID);
}
diff --git a/sys/dev/rtwn/rtl8192c/r92c_fw_cmd.h b/sys/dev/rtwn/rtl8192c/r92c_fw_cmd.h
index b16e2819624f..e19c091fa54d 100644
--- a/sys/dev/rtwn/rtl8192c/r92c_fw_cmd.h
+++ b/sys/dev/rtwn/rtl8192c/r92c_fw_cmd.h
@@ -80,6 +80,7 @@ struct r92c_fw_cmd_macid_cfg {
uint32_t mask;
uint8_t macid;
#define R92C_CMD_MACID_VALID 0x80
+#define R92C_CMD_MACID_SGI 0x20
} __packed;
/*
diff --git a/sys/dev/rtwn/rtl8192c/r92c_reg.h b/sys/dev/rtwn/rtl8192c/r92c_reg.h
index e640b6b560b1..e6d232a88834 100644
--- a/sys/dev/rtwn/rtl8192c/r92c_reg.h
+++ b/sys/dev/rtwn/rtl8192c/r92c_reg.h
@@ -519,7 +519,25 @@
#define R92C_RRSR_RATE_BITMAP_M 0x000fffff
#define R92C_RRSR_RATE_BITMAP_S 0
#define R92C_RRSR_RATE_CCK_ONLY_1M 0xffff1
+/* Suitable low-rate defaults for 2/5GHz CTS/ACK/Block-ACK */
+/*
+ * Note: the RTL8192CU vendor driver disables 2M CCK as a
+ * basic rate due to "Low TXEVM" causing issues with other
+ * vendor devices. Since we want to maximise basic rate
+ * reliability to prevent retries (due to missing RTS/CTS
+ * and ACK/Block-ACK), do the same here.
+ *
+ * And, unfortunately, enabling MCS rates for self-generated
+ * and management/control frames can result in the peer AP
+ * just plainly ignoring you. This happened with older
+ * D-Link 802.11n era APs. The masks will exclude MCS management
+ * rates, it's easy to add it to the mask in rtwn_set_basicrates().
+ * (Just |= 0x100, bit 12 == MCS 0.)
+ */
+#define R92C_RRSR_RATE_MASK_2GHZ 0x015d
+#define R92C_RRSR_RATE_MASK_5GHZ 0x0150
#define R92C_RRSR_RATE_ALL 0xfffff
+#define R92C_RRSR_RSC_SUBCHNL_MASK 0x00600000
#define R92C_RRSR_RSC_LOWSUBCHNL 0x00200000
#define R92C_RRSR_RSC_UPSUBCHNL 0x00400000
#define R92C_RRSR_SHORT 0x00800000
@@ -534,6 +552,15 @@
#define R92C_EDCA_PARAM_TXOP_M 0xffff0000
#define R92C_EDCA_PARAM_TXOP_S 16
+/* Bits for R92C_INIRTS_RATE_SEL. */
+#define R92C_INIRTS_RATE_SEL_RATE_M 0x3f
+#define R92C_INIRTS_RATE_SEL_RATE_S 0
+
+/* Bits for R92C_INIDATA_RATE_SEL. */
+#define R92C_INIDATA_RATE_SEL_RATE_M 0x3f
+#define R92C_INIDATA_RATE_SEL_RATE_S 0
+#define R92C_INIDATA_RATE_SEL_SHORTGI 0x40
+
/* Bits for R92C_HWSEQ_CTRL / R92C_TXPAUSE. */
#define R92C_TX_QUEUE_VO 0x01
#define R92C_TX_QUEUE_VI 0x02
@@ -668,6 +695,7 @@
*/
#define R92C_FPGA0_RFMOD 0x800
#define R92C_FPGA0_TXINFO 0x804
+#define R92C_FPGA0_POWER_SAVE 0x818
#define R92C_HSSI_PARAM1(chain) (0x820 + (chain) * 8)
#define R92C_HSSI_PARAM2(chain) (0x824 + (chain) * 8)
#define R92C_TXAGC_RATE18_06(i) (((i) == 0) ? 0xe00 : 0x830)
@@ -725,6 +753,11 @@
#define R92C_RFMOD_CCK_EN 0x01000000
#define R92C_RFMOD_OFDM_EN 0x02000000
+/* Bits for R92C_FPGA0_POWER_SAVE. */
+#define R92C_FPGA0_POWER_SAVE_PS_MASK 0x0c000000
+#define R92C_FPGA0_POWER_SAVE_PS_LOWER_CHANNEL 0x04000000
+#define R92C_FPGA0_POWER_SAVE_PS_UPPER_CHANNEL 0x08000000
+
/* Bits for R92C_HSSI_PARAM1(i). */
#define R92C_HSSI_PARAM1_PI 0x00000100
diff --git a/sys/dev/rtwn/rtl8192c/r92c_rx.c b/sys/dev/rtwn/rtl8192c/r92c_rx.c
index 72f726a24550..9af3b1ebc2c9 100644
--- a/sys/dev/rtwn/rtl8192c/r92c_rx.c
+++ b/sys/dev/rtwn/rtl8192c/r92c_rx.c
@@ -121,7 +121,7 @@ r92c_get_rx_stats(struct rtwn_softc *sc, struct ieee80211_rx_stats *rxs,
rxs->c_pktflags |= IEEE80211_RX_F_AMPDU;
else if (rxdw1 & R92C_RXDW1_AMPDU_MORE)
rxs->c_pktflags |= IEEE80211_RX_F_AMPDU_MORE;
- if ((rxdw3 & R92C_RXDW3_SPLCP) && rate >= RTWN_RIDX_HT_MCS(0))
+ if ((rxdw3 & R92C_RXDW3_SPLCP) && RTWN_RATE_IS_HT(rate))
rxs->c_pktflags |= IEEE80211_RX_F_SHORTGI;
if (rxdw3 & R92C_RXDW3_HT40)
@@ -131,13 +131,13 @@ r92c_get_rx_stats(struct rtwn_softc *sc, struct ieee80211_rx_stats *rxs,
if (RTWN_RATE_IS_CCK(rate))
rxs->c_phytype = IEEE80211_RX_FP_11B;
- else if (rate < RTWN_RIDX_HT_MCS(0))
+ else if (RTWN_RATE_IS_OFDM(rate))
rxs->c_phytype = IEEE80211_RX_FP_11G;
else
rxs->c_phytype = IEEE80211_RX_FP_11NG;
/* Map HW rate index to 802.11 rate. */
- if (rate < RTWN_RIDX_HT_MCS(0)) {
+ if (RTWN_RATE_IS_CCK(rate) || RTWN_RATE_IS_OFDM(rate)) {
rxs->c_rate = ridx2rate[rate];
if (RTWN_RATE_IS_CCK(rate))
rxs->c_pktflags |= IEEE80211_RX_F_CCK;
@@ -145,7 +145,7 @@ r92c_get_rx_stats(struct rtwn_softc *sc, struct ieee80211_rx_stats *rxs,
rxs->c_pktflags |= IEEE80211_RX_F_OFDM;
} else { /* MCS0~15. */
rxs->c_rate =
- IEEE80211_RATE_MCS | (rate - RTWN_RIDX_HT_MCS_SHIFT);
+ IEEE80211_RATE_MCS | (RTWN_RIDX_TO_MCS(rate));
rxs->c_pktflags |= IEEE80211_RX_F_HT;
}
}
diff --git a/sys/dev/rtwn/rtl8192c/r92c_tx.c b/sys/dev/rtwn/rtl8192c/r92c_tx.c
index 15beca776b61..ba2f60bd9295 100644
--- a/sys/dev/rtwn/rtl8192c/r92c_tx.c
+++ b/sys/dev/rtwn/rtl8192c/r92c_tx.c
@@ -66,8 +66,7 @@ r92c_tx_set_ht40(struct rtwn_softc *sc, void *buf, struct ieee80211_node *ni)
{
struct r92c_tx_desc *txd = (struct r92c_tx_desc *)buf;
- if (ni->ni_chan != IEEE80211_CHAN_ANYC &&
- IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
+ if (ieee80211_ht_check_tx_ht40(ni)) {
int extc_offset;
extc_offset = r92c_tx_get_sco(sc, ni->ni_chan);
@@ -78,10 +77,14 @@ r92c_tx_set_ht40(struct rtwn_softc *sc, void *buf, struct ieee80211_node *ni)
static void
r92c_tx_protection(struct rtwn_softc *sc, struct r92c_tx_desc *txd,
- enum ieee80211_protmode mode, uint8_t ridx)
+ enum ieee80211_protmode mode, uint8_t ridx, bool force_rate)
{
struct ieee80211com *ic = &sc->sc_ic;
uint8_t rate;
+ bool use_fw_ratectl;
+
+ use_fw_ratectl =
+ (sc->sc_ratectl == RTWN_RATECTL_FW && !force_rate);
switch (mode) {
case IEEE80211_PROT_CTSONLY:
@@ -96,17 +99,27 @@ r92c_tx_protection(struct rtwn_softc *sc, struct r92c_tx_desc *txd,
if (mode == IEEE80211_PROT_CTSONLY ||
mode == IEEE80211_PROT_RTSCTS) {
- if (ridx >= RTWN_RIDX_HT_MCS(0))
+ if (use_fw_ratectl) {
+ /*
+ * If we're not forcing the driver rate then this
+ * field actually doesn't matter; what matters is
+ * the RRSR and INIRTS configuration.
+ */
+ ridx = RTWN_RIDX_OFDM24;
+ } else if (RTWN_RATE_IS_HT(ridx)) {
rate = rtwn_ctl_mcsrate(ic->ic_rt, ridx);
- else
+ ridx = rate2ridx(IEEE80211_RV(rate));
+ } else {
rate = ieee80211_ctl_rate(ic->ic_rt, ridx2rate[ridx]);
- ridx = rate2ridx(IEEE80211_RV(rate));
+ ridx = rate2ridx(IEEE80211_RV(rate));
+ }
txd->txdw4 |= htole32(SM(R92C_TXDW4_RTSRATE, ridx));
/* RTS rate fallback limit (max). */
txd->txdw5 |= htole32(SM(R92C_TXDW5_RTSRATE_FB_LMT, 0xf));
- if (RTWN_RATE_IS_CCK(ridx) && ridx != RTWN_RIDX_CCK1 &&
+ if (!use_fw_ratectl && RTWN_RATE_IS_CCK(ridx) &&
+ ridx != RTWN_RIDX_CCK1 &&
(ic->ic_flags & IEEE80211_F_SHPREAMBLE))
txd->txdw4 |= htole32(R92C_TXDW4_RTS_SHORT);
}
@@ -171,16 +184,21 @@ static void
r92c_tx_set_sgi(struct rtwn_softc *sc, void *buf, struct ieee80211_node *ni)
{
struct r92c_tx_desc *txd = (struct r92c_tx_desc *)buf;
- struct ieee80211vap *vap = ni->ni_vap;
- if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) && /* HT20 */
- (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20))
- txd->txdw5 |= htole32(R92C_TXDW5_SGI);
- else if (ni->ni_chan != IEEE80211_CHAN_ANYC && /* HT40 */
- IEEE80211_IS_CHAN_HT40(ni->ni_chan) &&
- (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) &&
- (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40))
- txd->txdw5 |= htole32(R92C_TXDW5_SGI);
+ /*
+ * Only enable short-GI if we're transmitting in that
+ * width to that node.
+ *
+ * Specifically, do not enable shortgi for 20MHz if
+ * we're attempting to transmit at 40MHz.
+ */
+ if (ieee80211_ht_check_tx_ht40(ni)) {
+ if (ieee80211_ht_check_tx_shortgi_40(ni))
+ txd->txdw5 |= htole32(R92C_TXDW5_SGI);
+ } else if (ieee80211_ht_check_tx_ht(ni)) {
+ if (ieee80211_ht_check_tx_shortgi_20(ni))
+ txd->txdw5 |= htole32(R92C_TXDW5_SGI);
+ }
}
void
@@ -208,22 +226,129 @@ r92c_tx_setup_macid(void *buf, int id)
struct r92c_tx_desc *txd = (struct r92c_tx_desc *)buf;
txd->txdw1 |= htole32(SM(R92C_TXDW1_MACID, id));
+}
+
+static int
+r92c_calculate_tx_agg_window(struct rtwn_softc *sc,
+ const struct ieee80211_node *ni, int tid)
+{
+ const struct ieee80211_tx_ampdu *tap;
+ int wnd;
+
+ tap = &ni->ni_tx_ampdu[tid];
+
+ /*
+ * BAW is (MAX_AGG * 2) + 1, hence the /2 here.
+ * Ensure we don't send 0 or more than 64.
+ */
+ wnd = tap->txa_wnd / 2;
+ if (wnd == 0)
+ wnd = 1;
+ else if (wnd > 0x1f)
+ wnd = 0x1f;
+
+ return (wnd);
+}
+
+/*
+ * Check whether to enable the per-packet TX CCX report.
+ *
+ * For chipsets that do the RPT2 reports, enabling the TX
+ * CCX report results in the packet not being counted in
+ * the RPT2 counts.
+ */
+static bool
+r92c_check_enable_ccx_report(struct rtwn_softc *sc, int macid)
+{
+ if (sc->sc_ratectl != RTWN_RATECTL_NET80211)
+ return false;
+
+#ifndef RTWN_WITHOUT_UCODE
+ if ((sc->macid_rpt2_max_num != 0) &&
+ (macid < sc->macid_rpt2_max_num))
+ return false;
+#endif
+ return true;
+}
+
+static void
+r92c_fill_tx_desc_datarate(struct rtwn_softc *sc, struct r92c_tx_desc *txd,
+ uint8_t ridx, bool force_rate)
+{
+
+ /* Force this rate if needed. */
+ if (sc->sc_ratectl == RTWN_RATECTL_FW && !force_rate) {
+ txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, 0));
+ } else {
+ txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, ridx));
+ txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
+ }
+
+ /* Data rate fallback limit (max). */
+ txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE_FB_LMT, 0x1f));
+}
+
+static void
+r92c_fill_tx_desc_shpreamble(struct rtwn_softc *sc, struct r92c_tx_desc *txd,
+ uint8_t ridx, bool force_rate)
+{
+ const struct ieee80211com *ic = &sc->sc_ic;
+
+ if (RTWN_RATE_IS_CCK(ridx) && ridx != RTWN_RIDX_CCK1 &&
+ (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
+ txd->txdw4 |= htole32(R92C_TXDW4_DATA_SHPRE);
+}
+
+static enum ieee80211_protmode
+r92c_tx_get_protmode(struct rtwn_softc *sc, const struct ieee80211vap *vap,
+ const struct ieee80211_node *ni, const struct mbuf *m,
+ uint8_t ridx, bool force_rate)
+{
+ const struct ieee80211com *ic = &sc->sc_ic;
+ enum ieee80211_protmode prot;
+
+ prot = IEEE80211_PROT_NONE;
+
+ /*
+ * If doing firmware rate control, base it the configured channel.
+ * This ensures that for HT operation the RTS/CTS or CTS-to-self
+ * configuration is obeyed.
+ */
+ if (sc->sc_ratectl == RTWN_RATECTL_FW && !force_rate) {
+ struct ieee80211_channel *chan;
+ enum ieee80211_phymode mode;
+
+ chan = (ni->ni_chan != IEEE80211_CHAN_ANYC) ?
+ ni->ni_chan : ic->ic_curchan;
+ mode = ieee80211_chan2mode(chan);
+ if (mode == IEEE80211_MODE_11NG)
+ prot = ic->ic_htprotmode;
+ else if (ic->ic_flags & IEEE80211_F_USEPROT)
+ prot = ic->ic_protmode;
+ } else {
+ if (RTWN_RATE_IS_HT(ridx))
+ prot = ic->ic_htprotmode;
+ else if (ic->ic_flags & IEEE80211_F_USEPROT)
+ prot = ic->ic_protmode;
+ }
- /* XXX does not belong here */
- /* XXX temporary (I hope) */
- /* Force CCK1 for RTS / CTS frames (driver bug) */
- txd->txdw4 &= ~htole32(SM(R92C_TXDW4_RTSRATE, R92C_TXDW4_RTSRATE_M));
- txd->txdw4 &= ~htole32(R92C_TXDW4_RTS_SHORT);
+ /* XXX fix last comparison for A-MSDU (in net80211) */
+ /* XXX A-MPDU? */
+ if (m->m_pkthdr.len + IEEE80211_CRC_LEN >
+ vap->iv_rtsthreshold &&
+ vap->iv_rtsthreshold != IEEE80211_RTS_MAX)
+ prot = IEEE80211_PROT_RTSCTS;
+
+ return (prot);
}
void
r92c_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
- struct mbuf *m, void *buf, uint8_t ridx, int maxretry)
+ struct mbuf *m, void *buf, uint8_t ridx, bool force_rate, int maxretry)
{
#ifndef RTWN_WITHOUT_UCODE
struct r92c_softc *rs = sc->sc_priv;
#endif
- struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = ni->ni_vap;
struct rtwn_vap *uvp = RTWN_VAP(vap);
struct ieee80211_frame *wh;
@@ -252,7 +377,13 @@ r92c_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
if (ismcast)
txd->flags0 |= R92C_FLAGS0_BMCAST;
+ if (IEEE80211_IS_QOSDATA(wh))
+ txd->txdw4 |= htole32(R92C_TXDW4_QOS);
+
if (!ismcast) {
+ struct rtwn_node *un = RTWN_NODE(ni);
+ macid = un->id;
+
/* Unicast frame, check if an ACK is expected. */
if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) !=
IEEE80211_QOS_ACKPOLICY_NOACK) {
@@ -261,9 +392,6 @@ r92c_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
maxretry));
}
- struct rtwn_node *un = RTWN_NODE(ni);
- macid = un->id;
-
if (type == IEEE80211_FC0_TYPE_DATA) {
qsel = tid % RTWN_MAX_TID;
@@ -271,11 +399,11 @@ r92c_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
(m->m_flags & M_AMPDU_MPDU) != 0);
if (m->m_flags & M_AMPDU_MPDU) {
txd->txdw2 |= htole32(SM(R92C_TXDW2_AMPDU_DEN,
- vap->iv_ampdu_density));
+ ieee80211_ht_get_node_ampdu_density(ni)));
txd->txdw6 |= htole32(SM(R92C_TXDW6_MAX_AGG,
- 0x1f)); /* XXX */
+ r92c_calculate_tx_agg_window(sc, ni, tid)));
}
- if (sc->sc_ratectl == RTWN_RATECTL_NET80211) {
+ if (r92c_check_enable_ccx_report(sc, macid)) {
txd->txdw2 |= htole32(R92C_TXDW2_CCX_RPT);
sc->sc_tx_n_active++;
#ifndef RTWN_WITHOUT_UCODE
@@ -283,28 +411,24 @@ r92c_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
#endif
}
- if (RTWN_RATE_IS_CCK(ridx) && ridx != RTWN_RIDX_CCK1 &&
- (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
- txd->txdw4 |= htole32(R92C_TXDW4_DATA_SHPRE);
+ r92c_fill_tx_desc_shpreamble(sc, txd, ridx, force_rate);
+
+ prot = r92c_tx_get_protmode(sc, vap, ni, m, ridx,
+ force_rate);
- prot = IEEE80211_PROT_NONE;
- if (ridx >= RTWN_RIDX_HT_MCS(0)) {
+ /*
+ * Note: Firmware rate control will enable short-GI
+ * based on the configured rate mask, however HT40
+ * may not be enabled.
+ */
+ if (sc->sc_ratectl != RTWN_RATECTL_FW &&
+ RTWN_RATE_IS_HT(ridx)) {
r92c_tx_set_ht40(sc, txd, ni);
r92c_tx_set_sgi(sc, txd, ni);
- prot = ic->ic_htprotmode;
- } else if (ic->ic_flags & IEEE80211_F_USEPROT)
- prot = ic->ic_protmode;
-
- /* XXX fix last comparison for A-MSDU (in net80211) */
- /* XXX A-MPDU? */
- if (m->m_pkthdr.len + IEEE80211_CRC_LEN >
- vap->iv_rtsthreshold &&
- vap->iv_rtsthreshold != IEEE80211_RTS_MAX)
- prot = IEEE80211_PROT_RTSCTS;
+ }
/* NB: checks for ht40 / short bits (set above). */
- if (prot != IEEE80211_PROT_NONE)
- r92c_tx_protection(sc, txd, prot, ridx);
+ r92c_tx_protection(sc, txd, prot, ridx, force_rate);
} else /* IEEE80211_FC0_TYPE_MGT */
qsel = R92C_TXDW1_QSEL_MGNT;
} else {
@@ -315,28 +439,23 @@ r92c_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
txd->txdw1 |= htole32(SM(R92C_TXDW1_QSEL, qsel));
rtwn_r92c_tx_setup_macid(sc, txd, macid);
- txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, ridx));
- /* Data rate fallback limit (max). */
- txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE_FB_LMT, 0x1f));
+
+ /* Fill in data rate, data retry */
+ r92c_fill_tx_desc_datarate(sc, txd, ridx, force_rate);
+
txd->txdw4 |= htole32(SM(R92C_TXDW4_PORT_ID, uvp->id));
r92c_tx_raid(sc, txd, ni, ismcast);
- /* Force this rate if needed. */
- if (sc->sc_ratectl != RTWN_RATECTL_FW)
- txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
-
if (!hasqos) {
/* Use HW sequence numbering for non-QoS frames. */
rtwn_r92c_tx_setup_hwseq(sc, txd);
- txd->txdw4 |= htole32(SM(R92C_TXDW4_SEQ_SEL, uvp->id));
} else {
uint16_t seqno;
- if (m->m_flags & M_AMPDU_MPDU) {
- seqno = ni->ni_txseqs[tid] % IEEE80211_SEQ_RANGE;
- ni->ni_txseqs[tid]++;
- } else
- seqno = M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE;
+ if (m->m_flags & M_AMPDU_MPDU)
+ ieee80211_output_seqno_assign(ni, -1, m);
+
+ seqno = M_SEQNO_GET(m);
/* Set sequence number. */
txd->txdseq = htole16(seqno);
@@ -372,27 +491,26 @@ r92c_fill_tx_desc_raw(struct rtwn_softc *sc, struct ieee80211_node *ni,
params->ibp_try0));
}
if (params->ibp_flags & IEEE80211_BPF_RTS)
- r92c_tx_protection(sc, txd, IEEE80211_PROT_RTSCTS, ridx);
+ r92c_tx_protection(sc, txd, IEEE80211_PROT_RTSCTS, ridx,
+ true);
if (params->ibp_flags & IEEE80211_BPF_CTS)
- r92c_tx_protection(sc, txd, IEEE80211_PROT_CTSONLY, ridx);
+ r92c_tx_protection(sc, txd, IEEE80211_PROT_CTSONLY, ridx,
+ true);
rtwn_r92c_tx_setup_macid(sc, txd, RTWN_MACID_BC);
txd->txdw1 |= htole32(SM(R92C_TXDW1_QSEL, R92C_TXDW1_QSEL_MGNT));
/* Set TX rate index. */
- txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE, ridx));
- txd->txdw5 |= htole32(SM(R92C_TXDW5_DATARATE_FB_LMT, 0x1f));
+ r92c_fill_tx_desc_datarate(sc, txd, ridx, true); /* force rate */
txd->txdw4 |= htole32(SM(R92C_TXDW4_PORT_ID, uvp->id));
- txd->txdw4 |= htole32(R92C_TXDW4_DRVRATE);
r92c_tx_raid(sc, txd, ni, ismcast);
if (!IEEE80211_QOS_HAS_SEQ(wh)) {
/* Use HW sequence numbering for non-QoS frames. */
rtwn_r92c_tx_setup_hwseq(sc, txd);
- txd->txdw4 |= htole32(SM(R92C_TXDW4_SEQ_SEL, uvp->id));
} else {
/* Set sequence number. */
- txd->txdseq |= htole16(M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE);
+ txd->txdseq |= htole16(M_SEQNO_GET(m));
}
}
@@ -418,7 +536,6 @@ r92c_fill_tx_desc_null(struct rtwn_softc *sc, void *buf, int is11b,
if (!qos) {
rtwn_r92c_tx_setup_hwseq(sc, txd);
- txd->txdw4 |= htole32(SM(R92C_TXDW4_SEQ_SEL, id));
}
}
diff --git a/sys/dev/rtwn/rtl8192c/r92c_tx_desc.h b/sys/dev/rtwn/rtl8192c/r92c_tx_desc.h
index 6e546c3da236..33edb975f062 100644
--- a/sys/dev/rtwn/rtl8192c/r92c_tx_desc.h
+++ b/sys/dev/rtwn/rtl8192c/r92c_tx_desc.h
@@ -69,8 +69,7 @@ struct r92c_tx_desc {
uint32_t txdw4;
#define R92C_TXDW4_RTSRATE_M 0x0000001f
#define R92C_TXDW4_RTSRATE_S 0
-#define R92C_TXDW4_SEQ_SEL_M 0x00000040
-#define R92C_TXDW4_SEQ_SEL_S 6
+#define R92C_TXDW4_QOS 0x00000040 /* BIT(6) for 8188cu/8192cu/8723au */
#define R92C_TXDW4_HWSEQ_EN 0x00000080
#define R92C_TXDW4_DRVRATE 0x00000100
#define R92C_TXDW4_CTS2SELF 0x00000800
diff --git a/sys/dev/rtwn/rtl8192c/usb/r92cu_attach.c b/sys/dev/rtwn/rtl8192c/usb/r92cu_attach.c
index 585610225193..cd350c7fcd8a 100644
--- a/sys/dev/rtwn/rtl8192c/usb/r92cu_attach.c
+++ b/sys/dev/rtwn/rtl8192c/usb/r92cu_attach.c
@@ -167,6 +167,7 @@ r92cu_attach(struct rtwn_usb_softc *uc)
sc->sc_get_rssi_ofdm = r92c_get_rssi_ofdm;
sc->sc_classify_intr = r92c_classify_intr;
sc->sc_handle_tx_report = rtwn_nop_softc_uint8_int;
+ sc->sc_handle_tx_report2 = rtwn_nop_softc_uint8_int;
sc->sc_handle_c2h_report = rtwn_nop_softc_uint8_int;
sc->sc_check_frame = rtwn_nop_int_softc_mbuf;
sc->sc_rf_read = r92c_rf_read;
@@ -198,6 +199,7 @@ r92cu_attach(struct rtwn_usb_softc *uc)
#endif
sc->sc_beacon_init = r92c_beacon_init;
sc->sc_beacon_enable = r92c_beacon_enable;
+ sc->sc_sta_beacon_enable = r92c_sta_beacon_enable;
sc->sc_beacon_set_rate = rtwn_nop_void_int;
sc->sc_beacon_select = rtwn_nop_softc_int;
sc->sc_temp_measure = r92c_temp_measure;
@@ -212,6 +214,7 @@ r92cu_attach(struct rtwn_usb_softc *uc)
sc->sc_init_antsel = r92c_init_antsel;
sc->sc_post_init = r92cu_post_init;
sc->sc_init_bcnq1_boundary = rtwn_nop_int_softc;
+ sc->sc_set_tx_power = r92c_set_tx_power;
sc->mac_prog = &rtl8192cu_mac[0];
sc->mac_size = nitems(rtl8192cu_mac);
diff --git a/sys/dev/rtwn/rtl8192c/usb/r92cu_init.c b/sys/dev/rtwn/rtl8192c/usb/r92cu_init.c
index 42e50ff9f8bd..91b1b78edb01 100644
--- a/sys/dev/rtwn/rtl8192c/usb/r92cu_init.c
+++ b/sys/dev/rtwn/rtl8192c/usb/r92cu_init.c
@@ -354,8 +354,6 @@ void
r92cu_post_init(struct rtwn_softc *sc)
{
- rtwn_write_4(sc, R92C_POWER_STATUS, 0x5);
-
/* Perform LO and IQ calibrations. */
r92c_iq_calib(sc);
/* Perform LC calibration. */
@@ -375,11 +373,7 @@ r92cu_post_init(struct rtwn_softc *sc)
if (sc->sc_flags & RTWN_FW_LOADED) {
struct r92c_softc *rs = sc->sc_priv;
- if (sc->sc_ratectl_sysctl == RTWN_RATECTL_FW) {
- /* XXX firmware RA does not work yet */
- sc->sc_ratectl = RTWN_RATECTL_NET80211;
- } else
- sc->sc_ratectl = sc->sc_ratectl_sysctl;
+ sc->sc_ratectl = sc->sc_ratectl_sysctl;
/* Start C2H event handling. */
callout_reset(&rs->rs_c2h_report, rs->rs_c2h_timeout,
diff --git a/sys/dev/rtwn/rtl8192e/r92e.h b/sys/dev/rtwn/rtl8192e/r92e.h
index 331750c48726..280cc1464ac6 100644
--- a/sys/dev/rtwn/rtl8192e/r92e.h
+++ b/sys/dev/rtwn/rtl8192e/r92e.h
@@ -46,6 +46,7 @@ void r92e_detach_private(struct rtwn_softc *);
/* r92e_chan.c */
void r92e_set_chan(struct rtwn_softc *, struct ieee80211_channel *);
+int r92e_set_tx_power(struct rtwn_softc *sc, struct ieee80211vap *vap);
/* r92e_fw.c */
#ifndef RTWN_WITHOUT_UCODE
diff --git a/sys/dev/rtwn/rtl8192e/r92e_chan.c b/sys/dev/rtwn/rtl8192e/r92e_chan.c
index b28462873c09..4cf17433d257 100644
--- a/sys/dev/rtwn/rtl8192e/r92e_chan.c
+++ b/sys/dev/rtwn/rtl8192e/r92e_chan.c
@@ -90,6 +90,7 @@ static void
r92e_get_txpower(struct rtwn_softc *sc, int chain, struct ieee80211_channel *c,
uint8_t power[RTWN_RIDX_COUNT])
{
+ const struct ieee80211com *ic = &sc->sc_ic;
struct r92e_softc *rs = sc->sc_priv;
int i, ridx, group, max_mcs;
@@ -103,19 +104,32 @@ r92e_get_txpower(struct rtwn_softc *sc, int chain, struct ieee80211_channel *c,
max_mcs = RTWN_RIDX_HT_MCS(sc->ntxchains * 8 - 1);
/* XXX regulatory */
- /* XXX net80211 regulatory */
- for (ridx = RTWN_RIDX_CCK1; ridx <= RTWN_RIDX_CCK11; ridx++)
+ for (ridx = RTWN_RIDX_CCK1; ridx <= RTWN_RIDX_CCK11; ridx++) {
power[ridx] = rs->cck_tx_pwr[chain][group];
- for (ridx = RTWN_RIDX_OFDM6; ridx <= max_mcs; ridx++)
+ if (power[ridx] > ic->ic_txpowlimit)
+ power[ridx] = ic->ic_txpowlimit;
+ }
+ for (ridx = RTWN_RIDX_OFDM6; ridx <= max_mcs; ridx++) {
power[ridx] = rs->ht40_tx_pwr_2g[chain][group];
+ if (power[ridx] > ic->ic_txpowlimit)
+ power[ridx] = ic->ic_txpowlimit;
+ }
- for (ridx = RTWN_RIDX_OFDM6; ridx <= RTWN_RIDX_OFDM54; ridx++)
- power[ridx] += rs->ofdm_tx_pwr_diff_2g[chain][0];
+ for (ridx = RTWN_RIDX_OFDM6; ridx <= RTWN_RIDX_OFDM54; ridx++) {
+ /* Ensure we don't underflow if the power delta is -ve */
+ int8_t pwr;
+
+ pwr = power[ridx] + rs->ofdm_tx_pwr_diff_2g[chain][0];
+ if (pwr < 0)
+ pwr = 0;
+
+ power[ridx] = pwr;
+ }
for (i = 0; i < sc->ntxchains; i++) {
uint8_t min_mcs;
- uint8_t pwr_diff;
+ int8_t pwr_diff, pwr;
if (IEEE80211_IS_CHAN_HT40(c))
pwr_diff = rs->bw40_tx_pwr_diff_2g[chain][i];
@@ -123,8 +137,13 @@ r92e_get_txpower(struct rtwn_softc *sc, int chain, struct ieee80211_channel *c,
pwr_diff = rs->bw20_tx_pwr_diff_2g[chain][i];
min_mcs = RTWN_RIDX_HT_MCS(i * 8);
- for (ridx = min_mcs; ridx <= max_mcs; ridx++)
- power[ridx] += pwr_diff;
+ for (ridx = min_mcs; ridx <= max_mcs; ridx++) {
+ /* Ensure we don't underflow */
+ pwr = power[ridx] + pwr_diff;
+ if (pwr < 0)
+ pwr = 0;
+ power[ridx] = pwr;
+ }
}
/* Apply max limit. */
@@ -132,15 +151,6 @@ r92e_get_txpower(struct rtwn_softc *sc, int chain, struct ieee80211_channel *c,
if (power[ridx] > R92C_MAX_TX_PWR)
power[ridx] = R92C_MAX_TX_PWR;
}
-
-#ifdef RTWN_DEBUG
- if (sc->sc_debug & RTWN_DEBUG_TXPWR) {
- /* Dump per-rate Tx power values. */
- printf("Tx power for chain %d:\n", chain);
- for (ridx = RTWN_RIDX_CCK1; ridx < RTWN_RIDX_COUNT; ridx++)
- printf("Rate %d = %u\n", ridx, power[ridx]);
- }
-#endif
}
static void
@@ -153,11 +163,26 @@ r92e_set_txpower(struct rtwn_softc *sc, struct ieee80211_channel *c)
memset(power, 0, sizeof(power));
/* Compute per-rate Tx power values. */
r92e_get_txpower(sc, i, c, power);
+ /* Optionally print out the power table */
+ r92c_dump_txpower(sc, i, power);
/* Write per-rate Tx power values to hardware. */
r92c_write_txpower(sc, i, power);
}
}
+int
+r92e_set_tx_power(struct rtwn_softc *sc, struct ieee80211vap *vap)
+{
+
+ if (vap->iv_bss == NULL)
+ return (EINVAL);
+ if (vap->iv_bss->ni_chan == IEEE80211_CHAN_ANYC)
+ return (EINVAL);
+
+ r92e_set_txpower(sc, vap->iv_bss->ni_chan);
+ return (0);
+}
+
static void
r92e_set_bw40(struct rtwn_softc *sc, uint8_t chan, int prichlo)
{
@@ -227,4 +252,28 @@ r92e_set_chan(struct rtwn_softc *sc, struct ieee80211_channel *c)
/* Set Tx power for this new channel. */
r92e_set_txpower(sc, c);
+
+ /*
+ * Work around some timing issues with RTL8192EU on faster
+ * CPUs / USB-3 ports by sleeping for 10ms.
+ *
+ * Without this delay the initial frame send during authentication
+ * doesn't occur.
+ *
+ * My (adrian) guess is that there's a race condition between
+ * everything being programmed into the hardware and the first
+ * send. Notably, TXPAUSE isn't 0x0 after rf init,
+ * which the rtl8xxxu driver has a commit to address (c6015bf3ff1ff)
+ * - wifi: rtl8xxxu: fixing transmission failure for rtl8192eu
+ *
+ * Although it's hard to do due to locking constraints, reading
+ * TXPAUSE during scan / association shows it's non-zero, which
+ * needs to be looked at in more depth.
+ *
+ * Linux doesn't have a delay here, however it does try multiple
+ * times to send an authentication frame.
+ *
+ * See PR/247528 for more info.
+ */
+ rtwn_delay(sc, 10000);
}
diff --git a/sys/dev/rtwn/rtl8192e/r92e_init.c b/sys/dev/rtwn/rtl8192e/r92e_init.c
index 925221bdabb1..077c64626f8c 100644
--- a/sys/dev/rtwn/rtl8192e/r92e_init.c
+++ b/sys/dev/rtwn/rtl8192e/r92e_init.c
@@ -368,6 +368,7 @@ r92e_power_off(struct rtwn_softc *sc)
return;
}
+#if 0
/* SOP option to disable BG/MB. */
rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, 0xff,
R92C_APS_FSMCO_SOP_RCK, 3);
@@ -377,12 +378,15 @@ r92e_power_off(struct rtwn_softc *sc)
/* Disable small LDO. */
rtwn_setbits_1(sc, R92C_SPS0_CTRL, 0x1, 0);
+#endif
/* Enable WL suspend. */
rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, R92C_APS_FSMCO_AFSM_PCIE,
R92C_APS_FSMCO_AFSM_HSUS, 1);
+#if 0
/* Enable SW LPS. */
rtwn_setbits_1_shift(sc, R92C_APS_FSMCO, 0,
R92C_APS_FSMCO_APFM_RSM, 1);
+#endif
}
diff --git a/sys/dev/rtwn/rtl8192e/r92e_rf.c b/sys/dev/rtwn/rtl8192e/r92e_rf.c
index 8e05a827262b..478945d13c85 100644
--- a/sys/dev/rtwn/rtl8192e/r92e_rf.c
+++ b/sys/dev/rtwn/rtl8192e/r92e_rf.c
@@ -64,7 +64,9 @@ r92e_rf_read(struct rtwn_softc *sc, int chain, uint8_t addr)
RW(val, R92C_HSSI_PARAM2_READ_ADDR, addr) &
~R92C_HSSI_PARAM2_READ_EDGE);
+ rtwn_delay(sc, 10);
rtwn_bb_setbits(sc, R92C_HSSI_PARAM2(0), R92C_HSSI_PARAM2_READ_EDGE, 0);
+ rtwn_delay(sc, 100);
rtwn_bb_setbits(sc, R92C_HSSI_PARAM2(0), 0, R92C_HSSI_PARAM2_READ_EDGE);
rtwn_delay(sc, 20);
@@ -82,5 +84,6 @@ r92e_rf_write(struct rtwn_softc *sc, int chain, uint8_t addr, uint32_t val)
rtwn_bb_setbits(sc, 0x818, 0x20000, 0);
rtwn_bb_write(sc, R92C_LSSI_PARAM(chain),
SM(R88E_LSSI_PARAM_ADDR, addr) | SM(R92C_LSSI_PARAM_DATA, val));
+ rtwn_delay(sc, 1);
rtwn_bb_setbits(sc, 0x818, 0, 0x20000);
}
diff --git a/sys/dev/rtwn/rtl8192e/usb/r92eu_attach.c b/sys/dev/rtwn/rtl8192e/usb/r92eu_attach.c
index a0a93925358c..a11a6bb79c5d 100644
--- a/sys/dev/rtwn/rtl8192e/usb/r92eu_attach.c
+++ b/sys/dev/rtwn/rtl8192e/usb/r92eu_attach.c
@@ -116,6 +116,7 @@ r92eu_attach(struct rtwn_usb_softc *uc)
sc->sc_get_rssi_ofdm = r88e_get_rssi_ofdm;
sc->sc_classify_intr = r12au_classify_intr;
sc->sc_handle_tx_report = r12a_ratectl_tx_complete;
+ sc->sc_handle_tx_report2 = rtwn_nop_softc_uint8_int;
sc->sc_handle_c2h_report = r92e_handle_c2h_report;
sc->sc_check_frame = rtwn_nop_int_softc_mbuf;
sc->sc_rf_read = r92e_rf_read;
@@ -149,6 +150,7 @@ r92eu_attach(struct rtwn_usb_softc *uc)
#endif
sc->sc_beacon_init = r12a_beacon_init;
sc->sc_beacon_enable = r92c_beacon_enable;
+ sc->sc_sta_beacon_enable = r92c_sta_beacon_enable;
sc->sc_beacon_set_rate = rtwn_nop_void_int;
sc->sc_beacon_select = r21a_beacon_select;
sc->sc_temp_measure = r88e_temp_measure;
@@ -163,6 +165,7 @@ r92eu_attach(struct rtwn_usb_softc *uc)
sc->sc_init_antsel = rtwn_nop_softc;
sc->sc_post_init = r92eu_post_init;
sc->sc_init_bcnq1_boundary = rtwn_nop_int_softc;
+ sc->sc_set_tx_power = r92e_set_tx_power;
sc->mac_prog = &rtl8192eu_mac[0];
sc->mac_size = nitems(rtl8192eu_mac);
diff --git a/sys/dev/rtwn/rtl8812a/r12a.h b/sys/dev/rtwn/rtl8812a/r12a.h
index 19dbd1569e6d..e5c5c7cd6a80 100644
--- a/sys/dev/rtwn/rtl8812a/r12a.h
+++ b/sys/dev/rtwn/rtl8812a/r12a.h
@@ -60,6 +60,7 @@ void r12a_detach_private(struct rtwn_softc *);
/* r12a_beacon.c */
void r12a_beacon_init(struct rtwn_softc *, void *, int);
void r12a_beacon_set_rate(void *, int);
+void r12a_sta_beacon_enable(struct rtwn_softc *, int, bool);
/* r12a_calib.c */
void r12a_save_bb_afe_vals(struct rtwn_softc *, uint32_t[],
@@ -130,7 +131,7 @@ void r12a_get_rx_stats(struct rtwn_softc *, struct ieee80211_rx_stats *,
/* r12a_tx.c */
void r12a_fill_tx_desc(struct rtwn_softc *, struct ieee80211_node *,
- struct mbuf *, void *, uint8_t, int);
+ struct mbuf *, void *, uint8_t, bool, int);
void r12a_fill_tx_desc_raw(struct rtwn_softc *, struct ieee80211_node *,
struct mbuf *, void *, const struct ieee80211_bpf_params *);
void r12a_fill_tx_desc_null(struct rtwn_softc *, void *, int, int, int);
diff --git a/sys/dev/rtwn/rtl8812a/r12a_beacon.c b/sys/dev/rtwn/rtl8812a/r12a_beacon.c
index b4458d60a0fa..93b4e25a50ed 100644
--- a/sys/dev/rtwn/rtl8812a/r12a_beacon.c
+++ b/sys/dev/rtwn/rtl8812a/r12a_beacon.c
@@ -91,3 +91,12 @@ r12a_beacon_set_rate(void *buf, int is5ghz)
} else
txd->txdw4 = htole32(SM(R12A_TXDW4_DATARATE, RTWN_RIDX_CCK1));
}
+
+/*
+ * For now (no rate control) don't change the beacon configuration
+ * in STA mode.
+ */
+void
+r12a_sta_beacon_enable(struct rtwn_softc *sc, int id, bool enable)
+{
+}
diff --git a/sys/dev/rtwn/rtl8812a/r12a_chan.c b/sys/dev/rtwn/rtl8812a/r12a_chan.c
index 6bad6345f27f..f900d1ef7b2d 100644
--- a/sys/dev/rtwn/rtl8812a/r12a_chan.c
+++ b/sys/dev/rtwn/rtl8812a/r12a_chan.c
@@ -60,30 +60,10 @@
#include <dev/rtwn/rtl8812a/r12a_var.h>
static void
-r12a_write_txpower(struct rtwn_softc *sc, int chain,
+r12a_write_txpower_ht(struct rtwn_softc *sc, int chain,
struct ieee80211_channel *c, uint8_t power[RTWN_RIDX_COUNT])
{
- if (IEEE80211_IS_CHAN_2GHZ(c)) {
- /* Write per-CCK rate Tx power. */
- rtwn_bb_write(sc, R12A_TXAGC_CCK11_1(chain),
- SM(R12A_TXAGC_CCK1, power[RTWN_RIDX_CCK1]) |
- SM(R12A_TXAGC_CCK2, power[RTWN_RIDX_CCK2]) |
- SM(R12A_TXAGC_CCK55, power[RTWN_RIDX_CCK55]) |
- SM(R12A_TXAGC_CCK11, power[RTWN_RIDX_CCK11]));
- }
-
- /* Write per-OFDM rate Tx power. */
- rtwn_bb_write(sc, R12A_TXAGC_OFDM18_6(chain),
- SM(R12A_TXAGC_OFDM06, power[RTWN_RIDX_OFDM6]) |
- SM(R12A_TXAGC_OFDM09, power[RTWN_RIDX_OFDM9]) |
- SM(R12A_TXAGC_OFDM12, power[RTWN_RIDX_OFDM12]) |
- SM(R12A_TXAGC_OFDM18, power[RTWN_RIDX_OFDM18]));
- rtwn_bb_write(sc, R12A_TXAGC_OFDM54_24(chain),
- SM(R12A_TXAGC_OFDM24, power[RTWN_RIDX_OFDM24]) |
- SM(R12A_TXAGC_OFDM36, power[RTWN_RIDX_OFDM36]) |
- SM(R12A_TXAGC_OFDM48, power[RTWN_RIDX_OFDM48]) |
- SM(R12A_TXAGC_OFDM54, power[RTWN_RIDX_OFDM54]));
/* Write per-MCS Tx power. */
rtwn_bb_write(sc, R12A_TXAGC_MCS3_0(chain),
SM(R12A_TXAGC_MCS0, power[RTWN_RIDX_HT_MCS(0)]) |
@@ -108,7 +88,139 @@ r12a_write_txpower(struct rtwn_softc *sc, int chain,
SM(R12A_TXAGC_MCS15, power[RTWN_RIDX_HT_MCS(15)]));
}
- /* TODO: VHT rates */
+ /* TODO: HT MCS 16 -> 31 */
+}
+
+static void
+r12a_write_txpower_vht(struct rtwn_softc *sc, int chain,
+ struct ieee80211_channel *c, uint8_t power[RTWN_RIDX_COUNT])
+{
+
+ /* 1SS, MCS 0..3 */
+ rtwn_bb_write(sc, R12A_TXAGC_NSS1IX3_1IX0(chain),
+ SM(R12A_TXAGC_NSS1_MCS0, power[RTWN_RIDX_VHT_MCS(0, 0)]) |
+ SM(R12A_TXAGC_NSS1_MCS1, power[RTWN_RIDX_VHT_MCS(0, 1)]) |
+ SM(R12A_TXAGC_NSS1_MCS2, power[RTWN_RIDX_VHT_MCS(0, 2)]) |
+ SM(R12A_TXAGC_NSS1_MCS3, power[RTWN_RIDX_VHT_MCS(0, 3)]));
+
+ /* 1SS, MCS 4..7 */
+ rtwn_bb_write(sc, R12A_TXAGC_NSS1IX7_1IX4(chain),
+ SM(R12A_TXAGC_NSS1_MCS4, power[RTWN_RIDX_VHT_MCS(0, 4)]) |
+ SM(R12A_TXAGC_NSS1_MCS5, power[RTWN_RIDX_VHT_MCS(0, 5)]) |
+ SM(R12A_TXAGC_NSS1_MCS6, power[RTWN_RIDX_VHT_MCS(0, 6)]) |
+ SM(R12A_TXAGC_NSS1_MCS7, power[RTWN_RIDX_VHT_MCS(0, 7)]));
+
+ /* 1SS, MCS 8,9 ; 2SS MCS0, 1 */
+ if (sc->ntxchains == 1) {
+ rtwn_bb_write(sc, R12A_TXAGC_NSS2IX1_1IX8(chain),
+ SM(R12A_TXAGC_NSS1_MCS8, power[RTWN_RIDX_VHT_MCS(0, 8)]) |
+ SM(R12A_TXAGC_NSS1_MCS9, power[RTWN_RIDX_VHT_MCS(0, 9)]) |
+ SM(R12A_TXAGC_NSS2_MCS0, 0) |
+ SM(R12A_TXAGC_NSS2_MCS1, 0));
+ } else {
+ rtwn_bb_write(sc, R12A_TXAGC_NSS2IX1_1IX8(chain),
+ SM(R12A_TXAGC_NSS1_MCS8, power[RTWN_RIDX_VHT_MCS(0, 8)]) |
+ SM(R12A_TXAGC_NSS1_MCS9, power[RTWN_RIDX_VHT_MCS(0, 9)]) |
+ SM(R12A_TXAGC_NSS2_MCS0, power[RTWN_RIDX_VHT_MCS(1, 0)]) |
+ SM(R12A_TXAGC_NSS2_MCS1, power[RTWN_RIDX_VHT_MCS(1, 1)]));
+ }
+
+ /* 2SS MCS 2..5 */
+ if (sc->ntxchains > 1) {
+ rtwn_bb_write(sc, R12A_TXAGC_NSS2IX5_2IX2(chain),
+ SM(R12A_TXAGC_NSS2_MCS2, power[RTWN_RIDX_VHT_MCS(1, 2)]) |
+ SM(R12A_TXAGC_NSS2_MCS3, power[RTWN_RIDX_VHT_MCS(1, 3)]) |
+ SM(R12A_TXAGC_NSS2_MCS4, power[RTWN_RIDX_VHT_MCS(1, 4)]) |
+ SM(R12A_TXAGC_NSS2_MCS5, power[RTWN_RIDX_VHT_MCS(1, 5)]));
+ }
+
+ /* 2SS MCS 6..9 */
+ if (sc->ntxchains > 1) {
+ rtwn_bb_write(sc, R12A_TXAGC_NSS2IX9_2IX6(chain),
+ SM(R12A_TXAGC_NSS2_MCS2, power[RTWN_RIDX_VHT_MCS(1, 6)]) |
+ SM(R12A_TXAGC_NSS2_MCS3, power[RTWN_RIDX_VHT_MCS(1, 7)]) |
+ SM(R12A_TXAGC_NSS2_MCS4, power[RTWN_RIDX_VHT_MCS(1, 8)]) |
+ SM(R12A_TXAGC_NSS2_MCS5, power[RTWN_RIDX_VHT_MCS(1, 9)]));
+ }
+
+ /* TODO: 3SS, 4SS VHT rates */
+}
+
+
+static void
+r12a_write_txpower_cck(struct rtwn_softc *sc, int chain,
+ struct ieee80211_channel *c, uint8_t power[RTWN_RIDX_COUNT])
+{
+
+ if (IEEE80211_IS_CHAN_2GHZ(c)) {
+ /* Write per-CCK rate Tx power. */
+ rtwn_bb_write(sc, R12A_TXAGC_CCK11_1(chain),
+ SM(R12A_TXAGC_CCK1, power[RTWN_RIDX_CCK1]) |
+ SM(R12A_TXAGC_CCK2, power[RTWN_RIDX_CCK2]) |
+ SM(R12A_TXAGC_CCK55, power[RTWN_RIDX_CCK55]) |
+ SM(R12A_TXAGC_CCK11, power[RTWN_RIDX_CCK11]));
+ }
+}
+
+static void
+r12a_write_txpower_ofdm(struct rtwn_softc *sc, int chain,
+ struct ieee80211_channel *c, uint8_t power[RTWN_RIDX_COUNT])
+{
+
+ /* Write per-OFDM rate Tx power. */
+ rtwn_bb_write(sc, R12A_TXAGC_OFDM18_6(chain),
+ SM(R12A_TXAGC_OFDM06, power[RTWN_RIDX_OFDM6]) |
+ SM(R12A_TXAGC_OFDM09, power[RTWN_RIDX_OFDM9]) |
+ SM(R12A_TXAGC_OFDM12, power[RTWN_RIDX_OFDM12]) |
+ SM(R12A_TXAGC_OFDM18, power[RTWN_RIDX_OFDM18]));
+ rtwn_bb_write(sc, R12A_TXAGC_OFDM54_24(chain),
+ SM(R12A_TXAGC_OFDM24, power[RTWN_RIDX_OFDM24]) |
+ SM(R12A_TXAGC_OFDM36, power[RTWN_RIDX_OFDM36]) |
+ SM(R12A_TXAGC_OFDM48, power[RTWN_RIDX_OFDM48]) |
+ SM(R12A_TXAGC_OFDM54, power[RTWN_RIDX_OFDM54]));
+}
+
+static void
+r12a_tx_power_training(struct rtwn_softc *sc, int chain,
+ const struct ieee80211_channel *c, uint8_t power[RTWN_RIDX_COUNT])
+{
+ uint32_t write_data;
+ int32_t power_level;
+ int i;
+
+ write_data = 0;
+
+ power_level = (int32_t) power[RTWN_RIDX_HT_MCS(7)];
+ for (i = 0; i < 3; i++) {
+ if (i == 0)
+ power_level -= 10;
+ else if (i == 1)
+ power_level -= 8;
+ else
+ power_level -= 6;
+
+ /* Handle underflow and the minimum value (2) */
+ if (power_level < 2)
+ power_level = 2;
+
+ write_data |= ((power_level & 0xff) << (i * 8));
+ }
+
+ rtwn_bb_setbits(sc, R12A_TX_PWR_TRAINING(chain),
+ 0x00ffffff, write_data);
+}
+
+static void
+r12a_write_txpower(struct rtwn_softc *sc, int chain,
+ struct ieee80211_channel *c, uint8_t power[RTWN_RIDX_COUNT])
+{
+
+ r12a_write_txpower_cck(sc, chain, c, power);
+ r12a_write_txpower_ofdm(sc, chain, c, power);
+ r12a_write_txpower_ht(sc, chain, c, power);
+ r12a_write_txpower_vht(sc, chain, c, power);
+
+ r12a_tx_power_training(sc, chain, c, power);
}
static int
@@ -163,7 +275,7 @@ r12a_get_txpower(struct rtwn_softc *sc, int chain,
struct ieee80211_channel *c, uint8_t power[RTWN_RIDX_COUNT])
{
struct r12a_softc *rs = sc->sc_priv;
- int i, ridx, group, max_mcs;
+ int i, ridx, group, max_mcs, max_vht_mcs;
/* Determine channel group. */
group = r12a_get_power_group(sc, c);
@@ -172,8 +284,8 @@ r12a_get_txpower(struct rtwn_softc *sc, int chain,
return;
}
- /* TODO: VHT rates. */
max_mcs = RTWN_RIDX_HT_MCS(sc->ntxchains * 8 - 1);
+ max_vht_mcs = RTWN_RIDX_VHT_MCS(sc->ntxchains, 9) - 1;
/* XXX regulatory */
/* XXX net80211 regulatory */
@@ -191,13 +303,11 @@ r12a_get_txpower(struct rtwn_softc *sc, int chain,
uint8_t min_mcs;
uint8_t pwr_diff;
-#ifdef notyet
- if (IEEE80211_IS_CHAN_HT80(c)) {
+ if (IEEE80211_IS_CHAN_VHT80(c)) {
/* Vendor driver uses HT40 values here. */
pwr_diff = rs->bw40_tx_pwr_diff_2g[chain][i];
} else
-#endif
- if (IEEE80211_IS_CHAN_HT40(c))
+ if (IEEE80211_IS_CHAN_HT40(c) || IEEE80211_IS_CHAN_VHT40(c))
pwr_diff = rs->bw40_tx_pwr_diff_2g[chain][i];
else
pwr_diff = rs->bw20_tx_pwr_diff_2g[chain][i];
@@ -207,9 +317,14 @@ r12a_get_txpower(struct rtwn_softc *sc, int chain,
power[ridx] += pwr_diff;
}
} else { /* 5GHz */
+ /* OFDM + HT */
for (ridx = RTWN_RIDX_OFDM6; ridx <= max_mcs; ridx++)
power[ridx] = rs->ht40_tx_pwr_5g[chain][group];
+ /* VHT */
+ for (ridx = RTWN_RIDX_VHT_MCS_SHIFT; ridx <= max_vht_mcs; ridx++)
+ power[ridx] = rs->ht40_tx_pwr_5g[chain][group];
+ /* Add power for OFDM rates */
for (ridx = RTWN_RIDX_OFDM6; ridx <= RTWN_RIDX_OFDM54; ridx++)
power[ridx] += rs->ofdm_tx_pwr_diff_5g[chain][0];
@@ -217,20 +332,26 @@ r12a_get_txpower(struct rtwn_softc *sc, int chain,
uint8_t min_mcs;
uint8_t pwr_diff;
-#ifdef notyet
- if (IEEE80211_IS_CHAN_HT80(c)) {
+ if (IEEE80211_IS_CHAN_VHT80(c)) {
/* TODO: calculate base value. */
pwr_diff = rs->bw80_tx_pwr_diff_5g[chain][i];
} else
-#endif
- if (IEEE80211_IS_CHAN_HT40(c))
+ if (IEEE80211_IS_CHAN_HT40(c) || IEEE80211_IS_CHAN_VHT40(c))
pwr_diff = rs->bw40_tx_pwr_diff_5g[chain][i];
else
pwr_diff = rs->bw20_tx_pwr_diff_5g[chain][i];
+ /* Adjust HT rates */
min_mcs = RTWN_RIDX_HT_MCS(i * 8);
for (ridx = min_mcs; ridx <= max_mcs; ridx++)
power[ridx] += pwr_diff;
+
+ /* Adjust VHT rates */
+ for (ridx = RTWN_RIDX_VHT_MCS(i, 0);
+ ridx <= RTWN_RIDX_VHT_MCS(i, 9);
+ ridx++)
+ power[ridx] += pwr_diff;
+
}
}
@@ -239,6 +360,12 @@ r12a_get_txpower(struct rtwn_softc *sc, int chain,
if (power[ridx] > R92C_MAX_TX_PWR)
power[ridx] = R92C_MAX_TX_PWR;
}
+ for (ridx = RTWN_RIDX_VHT_MCS(0, 0);
+ ridx <= RTWN_RIDX_VHT_MCS(3, 9);
+ ridx++) {
+ if (power[ridx] > R92C_MAX_TX_PWR)
+ power[ridx] = R92C_MAX_TX_PWR;
+ }
#ifdef RTWN_DEBUG
if (sc->sc_debug & RTWN_DEBUG_TXPWR) {
@@ -246,6 +373,7 @@ r12a_get_txpower(struct rtwn_softc *sc, int chain,
printf("Tx power for chain %d:\n", chain);
for (ridx = RTWN_RIDX_CCK1; ridx <= max_mcs; ridx++)
printf("Rate %d = %u\n", ridx, power[ridx]);
+ /* TODO: dump VHT 0..9 for each spatial stream */
}
#endif
}
@@ -278,12 +406,20 @@ r12a_fix_spur(struct rtwn_softc *sc, struct ieee80211_channel *c)
} else {
rtwn_bb_setbits(sc, R12A_RFMOD, 0x400, 0x800);
- if (!IEEE80211_IS_CHAN_HT40(c) && /* 20 MHz */
+ if ((IEEE80211_IS_CHAN_B(c) ||
+ IEEE80211_IS_CHAN_ANYG(c) ||
+ IEEE80211_IS_CHAN_HT20(c)) && /* 2GHz, 20 MHz */
(chan == 13 || chan == 14)) {
rtwn_bb_setbits(sc, R12A_RFMOD, 0, 0x300);
rtwn_bb_setbits(sc, R12A_ADC_BUF_CLK,
0, 0x40000000);
- } else { /* !80 Mhz */
+ } else if (IEEE80211_IS_CHAN_HT40(c) ||
+ IEEE80211_IS_CHAN_VHT40(c)) {
+ /* XXX double check! */
+ rtwn_bb_setbits(sc, R12A_ADC_BUF_CLK,
+ 0, 0x40000000);
+ } else if (IEEE80211_IS_CHAN_VHT80(c)) {
+ /* XXX double check! */
rtwn_bb_setbits(sc, R12A_RFMOD, 0x100, 0x200);
rtwn_bb_setbits(sc, R12A_ADC_BUF_CLK,
0x40000000, 0);
@@ -291,7 +427,9 @@ r12a_fix_spur(struct rtwn_softc *sc, struct ieee80211_channel *c)
}
} else {
/* Set ADC clock to 160M to resolve 2480 MHz spur. */
- if (!IEEE80211_IS_CHAN_HT40(c) && /* 20 MHz */
+ if ((IEEE80211_IS_CHAN_B(c) ||
+ IEEE80211_IS_CHAN_ANYG(c) ||
+ IEEE80211_IS_CHAN_HT20(c)) && /* 2GHz, 20 MHz */
(chan == 13 || chan == 14))
rtwn_bb_setbits(sc, R12A_RFMOD, 0, 0x300);
else if (IEEE80211_IS_CHAN_2GHZ(c))
@@ -314,8 +452,9 @@ r12a_set_band(struct rtwn_softc *sc, struct ieee80211_channel *c)
!(rtwn_read_1(sc, R12A_CCK_CHECK) & R12A_CCK_CHECK_5GHZ))
return;
+ /* Note: this only fetches the basic rates, not the full rateset */
rtwn_get_rates(sc, ieee80211_get_suprates(ic, c), NULL, &basicrates,
- NULL, 1);
+ NULL, NULL, 1);
if (IEEE80211_IS_CHAN_2GHZ(c)) {
rtwn_r12a_set_band_2ghz(sc, basicrates);
swing = rs->tx_bbswing_2g;
@@ -393,16 +532,67 @@ r12a_set_chan(struct rtwn_softc *sc, struct ieee80211_channel *c)
rtwn_rf_setbits(sc, i, R92C_RF_CHNLBW, 0xff, chan);
}
-#ifdef notyet
- if (IEEE80211_IS_CHAN_HT80(c)) { /* 80 MHz */
- rtwn_setbits_2(sc, R92C_WMAC_TRXPTCL_CTL, 0x80, 0x100);
+ if (IEEE80211_IS_CHAN_VHT80(c)) { /* 80 MHz */
+ uint8_t ext20 = 0, ext40 = 0;
+ uint8_t txsc;
+ /* calculate ext20/ext40 */
+ if (c->ic_ieee > c->ic_vht_ch_freq1) {
+ if (c->ic_ieee - c->ic_vht_ch_freq1 == 2) {
+ ext20 = R12A_DATA_SEC_PRIM_UP_20;
+ ext40 = R12A_DATA_SEC_PRIM_UP_40;
+ } else {
+ ext20 = R12A_DATA_SEC_PRIM_UPPER_20;
+ ext40 = R12A_DATA_SEC_PRIM_UP_40;
+ }
+ } else {
+ if (c->ic_vht_ch_freq1 - c->ic_ieee == 2) {
+ ext20 = R12A_DATA_SEC_PRIM_DOWN_20;
+ ext40 = R12A_DATA_SEC_PRIM_DOWN_40;
+ } else {
+ ext20 = R12A_DATA_SEC_PRIM_LOWER_20;
+ ext40 = R12A_DATA_SEC_PRIM_DOWN_40;
+ }
+ }
+ /* Form txsc from sec20/sec40 config */
+ txsc = SM(R12A_DATA_SEC_TXSC_20M, ext20);
+ txsc |= SM(R12A_DATA_SEC_TXSC_40M, ext40);
+
+ rtwn_setbits_2(sc, R92C_WMAC_TRXPTCL_CTL, 0x180, 0x100);
+
+ /* DATA_SEC, for ext20/ext40 */
+ rtwn_write_1(sc, R12A_DATA_SEC, txsc);
+
+ /* ADCCLK */
+ rtwn_bb_setbits(sc, R12A_RFMOD, 0x003003c3, 0x00300202);
+
+ /* ADC160 - Set bit 30 */
+ rtwn_bb_setbits(sc, R12A_ADC_BUF_CLK, 0, 0x40000000);
- /* TODO */
+ /* ADCCLK, ext20 */
+ /* discard high 4 bits */
+ val = rtwn_bb_read(sc, R12A_RFMOD);
+ val = RW(val, R12A_RFMOD_EXT_CHAN, ext20);
+ rtwn_bb_write(sc, R12A_RFMOD, val);
+
+ /* CCA2ND, ext20 */
+ val = rtwn_bb_read(sc, R12A_CCA_ON_SEC);
+ val = RW(val, R12A_CCA_ON_SEC_EXT_CHAN, ext20);
+ rtwn_bb_write(sc, R12A_CCA_ON_SEC, val);
+ /* PEAK_TH */
+ if (rtwn_read_1(sc, 0x837) & 0x04)
+ val = 0x01400000;
+ else if (sc->nrxchains == 2 && sc->ntxchains == 2)
+ val = 0x01800000;
+ else
+ val = 0x01c00000;
+
+ rtwn_bb_setbits(sc, R12A_L1_PEAK_TH, 0x03c00000, val);
+ /* BWMASK */
val = 0x0;
- } else
-#endif
- if (IEEE80211_IS_CHAN_HT40(c)) { /* 40 MHz */
+
+ } else if (IEEE80211_IS_CHAN_HT40(c) ||
+ IEEE80211_IS_CHAN_VHT40(c)) { /* 40 MHz */
uint8_t ext_chan;
if (IEEE80211_IS_CHAN_HT40U(c))
diff --git a/sys/dev/rtwn/rtl8812a/r12a_fw.c b/sys/dev/rtwn/rtl8812a/r12a_fw.c
index 6d383fe2f5ad..c2c12c2a2ee2 100644
--- a/sys/dev/rtwn/rtl8812a/r12a_fw.c
+++ b/sys/dev/rtwn/rtl8812a/r12a_fw.c
@@ -171,8 +171,10 @@ r12a_iq_calib_fw(struct rtwn_softc *sc)
else
cmd.band_bw = RTWN_CMD_IQ_BAND_2GHZ;
- /* TODO: 80/160 MHz. */
- if (IEEE80211_IS_CHAN_HT40(c))
+ /* TODO: 160MHz */
+ if (IEEE80211_IS_CHAN_VHT80(c))
+ cmd.band_bw |= RTWN_CMD_IQ_CHAN_WIDTH_80;
+ else if (IEEE80211_IS_CHAN_HT40(c) || IEEE80211_IS_CHAN_VHT40(c))
cmd.band_bw |= RTWN_CMD_IQ_CHAN_WIDTH_40;
else
cmd.band_bw |= RTWN_CMD_IQ_CHAN_WIDTH_20;
diff --git a/sys/dev/rtwn/rtl8812a/r12a_reg.h b/sys/dev/rtwn/rtl8812a/r12a_reg.h
index f9acc2047490..f1fc4ee2f302 100644
--- a/sys/dev/rtwn/rtl8812a/r12a_reg.h
+++ b/sys/dev/rtwn/rtl8812a/r12a_reg.h
@@ -45,6 +45,10 @@
#define R12A_AMPDU_MAX_TIME 0x456
#define R12A_AMPDU_MAX_LENGTH R92C_AGGLEN_LMT
#define R12A_DATA_SEC 0x483
+#define R12A_DATA_SEC_TXSC_20M_M 0x0000000f
+#define R12A_DATA_SEC_TXSC_20M_S 0
+#define R12A_DATA_SEC_TXSC_40M_M 0x000000f0
+#define R12A_DATA_SEC_TXSC_40M_S 4
#define R12A_ARFR_2G(i) (0x48c + (i) * 8)
#define R12A_HT_SINGLE_AMPDU 0x4c7
@@ -75,8 +79,8 @@
#define R12A_DATA_SEC_PRIM_DOWN_20 0x02
#define R12A_DATA_SEC_PRIM_UPPER_20 0x03
#define R12A_DATA_SEC_PRIM_LOWER_20 0x04
-#define R12A_DATA_SEC_PRIM_UP_40 0x90
-#define R12A_DATA_SEC_PRIM_DOWN_40 0xa0
+#define R12A_DATA_SEC_PRIM_UP_40 0x09
+#define R12A_DATA_SEC_PRIM_DOWN_40 0x0a
/* Bits for R12A_HT_SINGLE_AMPDU. */
#define R12A_HT_SINGLE_AMPDU_PKT_ENA 0x80
@@ -120,6 +124,7 @@
#define R12A_TXAGC_NSS2IX5_2IX2(chain) (0xc48 + (chain) * 0x200)
#define R12A_TXAGC_NSS2IX9_2IX6(chain) (0xc4c + (chain) * 0x200)
#define R12A_INITIAL_GAIN(chain) (0xc50 + (chain) * 0x200)
+#define R12A_TX_PWR_TRAINING(chain) (0xc54 + (chain) * 0x200)
#define R12A_AFE_POWER_1(chain) (0xc60 + (chain) * 0x200)
#define R12A_AFE_POWER_2(chain) (0xc64 + (chain) * 0x200)
#define R12A_SLEEP_NAV(chain) (0xc80 + (chain) * 0x200)
@@ -230,6 +235,56 @@
#define R12A_TXAGC_MCS15_M 0xff000000
#define R12A_TXAGC_MCS15_S 24
+/* Bits for R12A_TXAGC_NSS1IX3_1IX0(i) */
+#define R12A_TXAGC_NSS1_MCS0_M 0x000000ff
+#define R12A_TXAGC_NSS1_MCS0_S 0
+#define R12A_TXAGC_NSS1_MCS1_M 0x0000ff00
+#define R12A_TXAGC_NSS1_MCS1_S 8
+#define R12A_TXAGC_NSS1_MCS2_M 0x00ff0000
+#define R12A_TXAGC_NSS1_MCS2_S 16
+#define R12A_TXAGC_NSS1_MCS3_M 0xff000000
+#define R12A_TXAGC_NSS1_MCS3_S 24
+
+/* Bits for R12A_TXAGC_NSS1IX7_1IX4(i) */
+#define R12A_TXAGC_NSS1_MCS4_M 0x000000ff
+#define R12A_TXAGC_NSS1_MCS4_S 0
+#define R12A_TXAGC_NSS1_MCS5_M 0x0000ff00
+#define R12A_TXAGC_NSS1_MCS5_S 8
+#define R12A_TXAGC_NSS1_MCS6_M 0x00ff0000
+#define R12A_TXAGC_NSS1_MCS6_S 16
+#define R12A_TXAGC_NSS1_MCS7_M 0xff000000
+#define R12A_TXAGC_NSS1_MCS7_S 24
+
+/* Bits for R12A_TXAGC_NSS2IX1_1IX8(i) */
+#define R12A_TXAGC_NSS1_MCS8_M 0x000000ff
+#define R12A_TXAGC_NSS1_MCS8_S 0
+#define R12A_TXAGC_NSS1_MCS9_M 0x0000ff00
+#define R12A_TXAGC_NSS1_MCS9_S 8
+#define R12A_TXAGC_NSS2_MCS0_M 0x00ff0000
+#define R12A_TXAGC_NSS2_MCS0_S 16
+#define R12A_TXAGC_NSS2_MCS1_M 0xff000000
+#define R12A_TXAGC_NSS2_MCS1_S 24
+
+/* Bits for R12A_TXAGC_NSS2IX5_2IX2(i) */
+#define R12A_TXAGC_NSS2_MCS2_M 0x000000ff
+#define R12A_TXAGC_NSS2_MCS2_S 0
+#define R12A_TXAGC_NSS2_MCS3_M 0x0000ff00
+#define R12A_TXAGC_NSS2_MCS3_S 8
+#define R12A_TXAGC_NSS2_MCS4_M 0x00ff0000
+#define R12A_TXAGC_NSS2_MCS4_S 16
+#define R12A_TXAGC_NSS2_MCS5_M 0xff000000
+#define R12A_TXAGC_NSS2_MCS5_S 24
+
+/* Bits for R12A_TXAGC_NSS2IX9_2IX6(i) */
+#define R12A_TXAGC_NSS2_MCS6_M 0x000000ff
+#define R12A_TXAGC_NSS2_MCS6_S 0
+#define R12A_TXAGC_NSS2_MCS7_M 0x0000ff00
+#define R12A_TXAGC_NSS2_MCS7_S 8
+#define R12A_TXAGC_NSS2_MCS8_M 0x00ff0000
+#define R12A_TXAGC_NSS2_MCS8_S 16
+#define R12A_TXAGC_NSS2_MCS9_M 0xff000000
+#define R12A_TXAGC_NSS2_MCS9_S 24
+
/*
* RF (6052) registers.
*/
diff --git a/sys/dev/rtwn/rtl8812a/r12a_rx.c b/sys/dev/rtwn/rtl8812a/r12a_rx.c
index 763397636ac9..b2e02998de49 100644
--- a/sys/dev/rtwn/rtl8812a/r12a_rx.c
+++ b/sys/dev/rtwn/rtl8812a/r12a_rx.c
@@ -107,9 +107,8 @@ r12a_ratectl_tx_complete(struct rtwn_softc *sc, uint8_t *buf, int len)
txs.flags = IEEE80211_RATECTL_STATUS_LONG_RETRY |
IEEE80211_RATECTL_STATUS_FINAL_RATE;
txs.long_retries = ntries;
- if (rpt->final_rate > RTWN_RIDX_OFDM54) { /* MCS */
- txs.final_rate =
- rpt->final_rate - RTWN_RIDX_HT_MCS_SHIFT;
+ if (RTWN_RATE_IS_HT(rpt->final_rate)) { /* MCS */
+ txs.final_rate = RTWN_RIDX_TO_MCS(rpt->final_rate);
txs.final_rate |= IEEE80211_RATE_MCS;
} else
txs.final_rate = ridx2rate[rpt->final_rate];
@@ -191,8 +190,16 @@ r12a_check_frame_checksum(struct rtwn_softc *sc, struct mbuf *m)
(rxdw1 & R12A_RXDW1_IPV6) ? "IPv6" : "IP",
(rxdw1 & R12A_RXDW1_CKSUM_ERR) ? "invalid" : "valid");
+ /*
+ * There seems to be a problem with UDP checksum processing
+ * with the checksum value = 0 (ie, no checksum.)
+ * So, don't treat it as a permament failure; just let
+ * the IP stack take a crack at validating frames.
+ *
+ * See kern/285837 for more details.
+ */
if (rxdw1 & R12A_RXDW1_CKSUM_ERR)
- return (-1);
+ return (0);
if ((rxdw1 & R12A_RXDW1_IPV6) ?
(rs->rs_flags & R12A_RXCKSUM6_EN) :
@@ -247,7 +254,8 @@ r12a_get_rx_stats(struct rtwn_softc *sc, struct ieee80211_rx_stats *rxs,
rxs->c_pktflags |= IEEE80211_RX_F_AMPDU_MORE;
}
- if ((rxdw4 & R12A_RXDW4_SPLCP) && rate >= RTWN_RIDX_HT_MCS(0))
+ if ((rxdw4 & R12A_RXDW4_SPLCP) &&
+ (RTWN_RATE_IS_HT(rate) || RTWN_RATE_IS_VHT(rate)))
rxs->c_pktflags |= IEEE80211_RX_F_SHORTGI;
switch (MS(rxdw4, R12A_RXDW4_BW)) {
@@ -273,31 +281,38 @@ r12a_get_rx_stats(struct rtwn_softc *sc, struct ieee80211_rx_stats *rxs,
/* XXX check with RTL8812AU */
is5ghz = (physt->cfosho[2] != 0x01);
- if (rate < RTWN_RIDX_HT_MCS(0)) {
+ if (RTWN_RATE_IS_CCK(rate) || RTWN_RATE_IS_OFDM(rate)) {
if (is5ghz)
rxs->c_phytype = IEEE80211_RX_FP_11A;
else
rxs->c_phytype = IEEE80211_RX_FP_11G;
- } else {
+ } else if (RTWN_RATE_IS_HT(rate)) {
if (is5ghz)
rxs->c_phytype = IEEE80211_RX_FP_11NA;
else
rxs->c_phytype = IEEE80211_RX_FP_11NG;
+ } else if (RTWN_RATE_IS_VHT(rate)) {
+ /* TODO: there's no FP_VHT_5GHZ yet */
+ rxs->c_phytype = IEEE80211_RX_FP_11NA;
}
}
/* Map HW rate index to 802.11 rate. */
- if (rate < RTWN_RIDX_HT_MCS(0)) {
+ if (RTWN_RATE_IS_CCK(rate) || RTWN_RATE_IS_OFDM(rate)) {
rxs->c_rate = ridx2rate[rate];
if (RTWN_RATE_IS_CCK(rate))
rxs->c_pktflags |= IEEE80211_RX_F_CCK;
else
rxs->c_pktflags |= IEEE80211_RX_F_OFDM;
- } else { /* MCS0~15. */
- /* TODO: VHT rates */
+ } else if (RTWN_RATE_IS_HT(rate)) { /* MCS0~15. */
rxs->c_rate =
- IEEE80211_RATE_MCS | (rate - RTWN_RIDX_HT_MCS_SHIFT);
+ IEEE80211_RATE_MCS | RTWN_RIDX_TO_MCS(rate);
rxs->c_pktflags |= IEEE80211_RX_F_HT;
+ } else if (RTWN_RATE_IS_VHT(rate)) {
+ /* XXX: need to revisit VHT rate representation */
+ rxs->c_vhtnss = (rate - RTWN_RIDX_VHT_MCS_SHIFT) / 10;
+ rxs->c_rate = (rate - RTWN_RIDX_VHT_MCS_SHIFT) % 10;
+ rxs->c_pktflags |= IEEE80211_RX_F_VHT;
}
/*
diff --git a/sys/dev/rtwn/rtl8812a/r12a_tx.c b/sys/dev/rtwn/rtl8812a/r12a_tx.c
index 9e0d8e85c0cf..6a7af0a9b674 100644
--- a/sys/dev/rtwn/rtl8812a/r12a_tx.c
+++ b/sys/dev/rtwn/rtl8812a/r12a_tx.c
@@ -47,6 +47,7 @@
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_radiotap.h>
+#include <net80211/ieee80211_vht.h>
#include <dev/rtwn/if_rtwnreg.h>
#include <dev/rtwn/if_rtwnvar.h>
@@ -56,24 +57,74 @@
#include <dev/rtwn/rtl8812a/r12a.h>
#include <dev/rtwn/rtl8812a/r12a_tx_desc.h>
+/*
+ * This function actually handles the secondary channel mapping,
+ * not the primary channel mapping. It hints to the MAC where
+ * to handle duplicate transmission of the RTS/CTS and payload
+ * frames when the requested transmit channel width is less than
+ * the configured channel width.
+ *
+ * Note: the vendor driver and linux rtw88 driver both leave this
+ * field currently set to 0.
+ *
+ * See the rtl8812au vendor driver, hal/rtl8812a_xmit.c:SCMapping_8812()
+ * and where it's used (and ignored.)
+ */
static int
r12a_get_primary_channel(struct rtwn_softc *sc, struct ieee80211_channel *c)
{
- /* XXX 80 MHz */
+#if 0
+ /* XXX VHT80; VHT40 */
if (IEEE80211_IS_CHAN_HT40U(c))
return (R12A_TXDW5_PRIM_CHAN_20_80_2);
else
return (R12A_TXDW5_PRIM_CHAN_20_80_3);
+#endif
+
+ /*
+ * For now just return the VHT_DATA_SC_DONOT_CARE value
+ * from the reference driver.
+ */
+ return (0);
}
+/*
+ * Configure VHT20/VHT40/VHT80 as appropriate.
+ *
+ * This is only called for VHT, not for HT.
+ */
+static void
+r12a_tx_set_vht_bw(struct rtwn_softc *sc, void *buf, struct ieee80211_node *ni)
+{
+ struct r12a_tx_desc *txd = (struct r12a_tx_desc *)buf;
+ int prim_chan;
+
+ prim_chan = r12a_get_primary_channel(sc, ni->ni_chan);
+
+ if (ieee80211_vht_check_tx_bw(ni, NET80211_STA_RX_BW_80)) {
+ txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_BW,
+ R12A_TXDW5_DATA_BW80));
+ txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_PRIM_CHAN,
+ prim_chan));
+ } else if (ieee80211_vht_check_tx_bw(ni, NET80211_STA_RX_BW_40)) {
+ txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_BW,
+ R12A_TXDW5_DATA_BW40));
+ txd->txdw5 |= htole32(SM(R12A_TXDW5_DATA_PRIM_CHAN,
+ prim_chan));
+ }
+}
+
+/*
+ * Configure HT20/HT40 as appropriate.
+ *
+ * This is only called for HT, not for VHT.
+ */
static void
r12a_tx_set_ht40(struct rtwn_softc *sc, void *buf, struct ieee80211_node *ni)
{
struct r12a_tx_desc *txd = (struct r12a_tx_desc *)buf;
- /* XXX 80 Mhz */
- if (ni->ni_chan != IEEE80211_CHAN_ANYC &&
- IEEE80211_IS_CHAN_HT40(ni->ni_chan)) {
+ if (ieee80211_ht_check_tx_ht40(ni)) {
int prim_chan;
prim_chan = r12a_get_primary_channel(sc, ni->ni_chan);
@@ -104,10 +155,17 @@ r12a_tx_protection(struct rtwn_softc *sc, struct r12a_tx_desc *txd,
if (mode == IEEE80211_PROT_CTSONLY ||
mode == IEEE80211_PROT_RTSCTS) {
- if (ridx >= RTWN_RIDX_HT_MCS(0))
+ /*
+ * Note: this code assumes basic rates for protection for
+ * both 802.11abg and 802.11n rates.
+ */
+ if (RTWN_RATE_IS_VHT(ridx))
+ rate = rtwn_ctl_vhtrate(ic->ic_rt, ridx);
+ else if (RTWN_RATE_IS_HT(ridx))
rate = rtwn_ctl_mcsrate(ic->ic_rt, ridx);
else
rate = ieee80211_ctl_rate(ic->ic_rt, ridx2rate[ridx]);
+ /* Map basic rate back to ridx */
ridx = rate2ridx(IEEE80211_RV(rate));
txd->txdw4 |= htole32(SM(R12A_TXDW4_RTSRATE, ridx));
@@ -147,6 +205,9 @@ r12a_tx_raid(struct rtwn_softc *sc, struct r12a_tx_desc *txd,
case IEEE80211_MODE_11NG:
mode = IEEE80211_MODE_11G;
break;
+ case IEEE80211_MODE_VHT_5GHZ:
+ mode = IEEE80211_MODE_VHT_5GHZ;
+ break;
default:
device_printf(sc->sc_dev, "unknown mode(1) %d!\n",
ic->ic_curmode);
@@ -186,8 +247,13 @@ r12a_tx_raid(struct rtwn_softc *sc, struct r12a_tx_desc *txd,
raid = R12A_RAID_11BGN_2;
}
break;
+ case IEEE80211_MODE_VHT_5GHZ:
+ if (sc->ntxchains == 1)
+ raid = R12A_RAID_11AC_1;
+ else
+ raid = R12A_RAID_11AC_2;
+ break;
default:
- /* TODO: 80 MHz / 11ac */
device_printf(sc->sc_dev, "unknown mode(2) %d!\n", mode);
return;
}
@@ -199,16 +265,23 @@ static void
r12a_tx_set_sgi(struct rtwn_softc *sc, void *buf, struct ieee80211_node *ni)
{
struct r12a_tx_desc *txd = (struct r12a_tx_desc *)buf;
- struct ieee80211vap *vap = ni->ni_vap;
- if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) && /* HT20 */
- (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20))
- txd->txdw5 |= htole32(R12A_TXDW5_DATA_SHORT);
- else if (ni->ni_chan != IEEE80211_CHAN_ANYC && /* HT40 */
- IEEE80211_IS_CHAN_HT40(ni->ni_chan) &&
- (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) &&
- (vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40))
- txd->txdw5 |= htole32(R12A_TXDW5_DATA_SHORT);
+ /* TODO: VHT 20/40/80 checks */
+
+ /*
+ * Only enable short-GI if we're transmitting in that
+ * width to that node.
+ *
+ * Specifically, do not enable shortgi for 20MHz if
+ * we're attempting to transmit at 40MHz.
+ */
+ if (ieee80211_ht_check_tx_ht40(ni)) {
+ if (ieee80211_ht_check_tx_shortgi_40(ni))
+ txd->txdw5 |= htole32(R12A_TXDW5_DATA_SHORT);
+ } else if (ieee80211_ht_check_tx_ht(ni)) {
+ if (ieee80211_ht_check_tx_shortgi_20(ni))
+ txd->txdw5 |= htole32(R12A_TXDW5_DATA_SHORT);
+ }
}
static void
@@ -222,9 +295,31 @@ r12a_tx_set_ldpc(struct rtwn_softc *sc, struct r12a_tx_desc *txd,
txd->txdw5 |= htole32(R12A_TXDW5_DATA_LDPC);
}
+static int
+r12a_calculate_tx_agg_window(struct rtwn_softc *sc,
+ const struct ieee80211_node *ni, int tid)
+{
+ const struct ieee80211_tx_ampdu *tap;
+ int wnd;
+
+ tap = &ni->ni_tx_ampdu[tid];
+
+ /*
+ * BAW is (MAX_AGG * 2) + 1, hence the /2 here.
+ * Ensure we don't send 0 or more than 64.
+ */
+ wnd = tap->txa_wnd / 2;
+ if (wnd == 0)
+ wnd = 1;
+ else if (wnd > 0x1f)
+ wnd = 0x1f;
+
+ return (wnd);
+}
+
void
r12a_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
- struct mbuf *m, void *buf, uint8_t ridx, int maxretry)
+ struct mbuf *m, void *buf, uint8_t ridx, bool force_rate, int maxretry)
{
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = ni->ni_vap;
@@ -273,9 +368,9 @@ r12a_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
if (m->m_flags & M_AMPDU_MPDU) {
txd->txdw2 |= htole32(R12A_TXDW2_AGGEN);
txd->txdw2 |= htole32(SM(R12A_TXDW2_AMPDU_DEN,
- vap->iv_ampdu_density));
+ ieee80211_ht_get_node_ampdu_density(ni)));
txd->txdw3 |= htole32(SM(R12A_TXDW3_MAX_AGG,
- 0x1f)); /* XXX */
+ r12a_calculate_tx_agg_window(sc, ni, tid)));
} else
txd->txdw2 |= htole32(R12A_TXDW2_AGGBK);
@@ -289,7 +384,12 @@ r12a_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
txd->txdw5 |= htole32(R12A_TXDW5_DATA_SHORT);
prot = IEEE80211_PROT_NONE;
- if (ridx >= RTWN_RIDX_HT_MCS(0)) {
+ if (RTWN_RATE_IS_VHT(ridx)) {
+ r12a_tx_set_vht_bw(sc, txd, ni);
+ /* XXX TODO: sgi */
+ /* XXX TODO: ldpc */
+ prot = ic->ic_htprotmode;
+ } else if (RTWN_RATE_IS_HT(ridx)) {
r12a_tx_set_ht40(sc, txd, ni);
r12a_tx_set_sgi(sc, txd, ni);
r12a_tx_set_ldpc(sc, txd, ni);
@@ -333,12 +433,9 @@ r12a_fill_tx_desc(struct rtwn_softc *sc, struct ieee80211_node *ni,
} else {
uint16_t seqno;
- if (m->m_flags & M_AMPDU_MPDU) {
- seqno = ni->ni_txseqs[tid];
- ni->ni_txseqs[tid]++;
- } else
- seqno = M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE;
-
+ if (m->m_flags & M_AMPDU_MPDU)
+ ieee80211_output_seqno_assign(ni, -1, m);
+ seqno = M_SEQNO_GET(m);
/* Set sequence number. */
txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ, seqno));
}
@@ -393,8 +490,7 @@ r12a_fill_tx_desc_raw(struct rtwn_softc *sc, struct ieee80211_node *ni,
txd->txdw3 |= htole32(SM(R12A_TXDW3_SEQ_SEL, uvp->id));
} else {
/* Set sequence number. */
- txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ,
- M_SEQNO_GET(m) % IEEE80211_SEQ_RANGE));
+ txd->txdw9 |= htole32(SM(R12A_TXDW9_SEQ, M_SEQNO_GET(m)));
}
}
diff --git a/sys/dev/rtwn/rtl8812a/r12a_tx_desc.h b/sys/dev/rtwn/rtl8812a/r12a_tx_desc.h
index 93cc9b867401..7d0793a0fe10 100644
--- a/sys/dev/rtwn/rtl8812a/r12a_tx_desc.h
+++ b/sys/dev/rtwn/rtl8812a/r12a_tx_desc.h
@@ -138,9 +138,7 @@ struct r12a_tx_desc {
#define R12A_RAID_11BG 6
#define R12A_RAID_11G 7 /* "pure" 11g */
#define R12A_RAID_11B 8
-#define R12A_RAID_11AC_2_80 9
-#define R12A_RAID_11AC_1_80 10
-#define R12A_RAID_11AC_1 11
-#define R12A_RAID_11AC_2 12
+#define R12A_RAID_11AC_2 9
+#define R12A_RAID_11AC_1 10
#endif /* R12A_TX_DESC_H */
diff --git a/sys/dev/rtwn/rtl8812a/r12a_var.h b/sys/dev/rtwn/rtl8812a/r12a_var.h
index 182e6b902758..0a76e013b6a7 100644
--- a/sys/dev/rtwn/rtl8812a/r12a_var.h
+++ b/sys/dev/rtwn/rtl8812a/r12a_var.h
@@ -99,6 +99,7 @@ struct r12a_softc {
int ac_usb_dma_size;
int ac_usb_dma_time;
int ampdu_max_time;
+ int ampdu_max_size;
};
#define R12A_SOFTC(_sc) ((struct r12a_softc *)((_sc)->sc_priv))
diff --git a/sys/dev/rtwn/rtl8812a/usb/r12au_attach.c b/sys/dev/rtwn/rtl8812a/usb/r12au_attach.c
index 52a8e3d7ccf5..b6850eb9fa23 100644
--- a/sys/dev/rtwn/rtl8812a/usb/r12au_attach.c
+++ b/sys/dev/rtwn/rtl8812a/usb/r12au_attach.c
@@ -141,6 +141,7 @@ r12a_attach_private(struct rtwn_softc *sc)
rs->rs_iq_calib_sw = r12a_iq_calib_sw;
rs->ampdu_max_time = 0x70;
+ rs->ampdu_max_size = 0x1ffff; /* 128k */
sc->sc_priv = rs;
}
@@ -173,7 +174,25 @@ r12au_adj_devcaps(struct rtwn_softc *sc)
IEEE80211_HTC_TXLDPC;
}
- /* TODO: STBC, VHT etc */
+ ic->ic_htcaps |=
+ IEEE80211_HTCAP_CHWIDTH40 | /* 40 MHz channel width */
+ IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
+ ;
+
+ /* TODO: STBC */
+
+ /* VHT config */
+ ic->ic_flags_ext |= IEEE80211_FEXT_VHT;
+ ic->ic_vht_cap.vht_cap_info =
+ IEEE80211_VHTCAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHTCAP_SHORT_GI_80 |
+ IEEE80211_VHTCAP_TXSTBC |
+ IEEE80211_VHTCAP_RXSTBC_1 |
+ IEEE80211_VHTCAP_HTC_VHT |
+ _IEEE80211_SHIFTMASK(7,
+ IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
+
+ rtwn_attach_vht_cap_info_mcs(sc);
}
void
@@ -200,6 +219,7 @@ r12au_attach(struct rtwn_usb_softc *uc)
sc->sc_get_rssi_ofdm = r88e_get_rssi_ofdm;
sc->sc_classify_intr = r12au_classify_intr;
sc->sc_handle_tx_report = r12a_ratectl_tx_complete;
+ sc->sc_handle_tx_report2 = rtwn_nop_softc_uint8_int;
sc->sc_handle_c2h_report = r12a_handle_c2h_report;
sc->sc_check_frame = r12a_check_frame_checksum;
sc->sc_rf_write = r12a_rf_write;
@@ -231,6 +251,7 @@ r12au_attach(struct rtwn_usb_softc *uc)
#endif
sc->sc_beacon_init = r12a_beacon_init;
sc->sc_beacon_enable = r92c_beacon_enable;
+ sc->sc_sta_beacon_enable = r12a_sta_beacon_enable;
sc->sc_beacon_set_rate = r12a_beacon_set_rate;
sc->sc_beacon_select = rtwn_nop_softc_int;
sc->sc_temp_measure = r88e_temp_measure;
@@ -245,6 +266,7 @@ r12au_attach(struct rtwn_usb_softc *uc)
sc->sc_init_antsel = r12a_init_antsel;
sc->sc_post_init = r12au_post_init;
sc->sc_init_bcnq1_boundary = rtwn_nop_int_softc;
+ sc->sc_set_tx_power = rtwn_nop_int_softc_vap;
sc->chan_list_5ghz[0] = r12a_chan_5ghz_0;
sc->chan_list_5ghz[1] = r12a_chan_5ghz_1;
@@ -291,5 +313,7 @@ r12au_attach(struct rtwn_usb_softc *uc)
sc->ntxchains = 2;
sc->nrxchains = 2;
+ sc->sc_ht40 = 1;
+
r12a_attach_private(sc);
}
diff --git a/sys/dev/rtwn/rtl8812a/usb/r12au_init.c b/sys/dev/rtwn/rtl8812a/usb/r12au_init.c
index ac6a599895ac..1bee2c665657 100644
--- a/sys/dev/rtwn/rtl8812a/usb/r12au_init.c
+++ b/sys/dev/rtwn/rtl8812a/usb/r12au_init.c
@@ -142,7 +142,17 @@ r12au_init_ampdu(struct rtwn_softc *sc)
/* Setup AMPDU aggregation. */
rtwn_write_1(sc, R12A_AMPDU_MAX_TIME, rs->ampdu_max_time);
- rtwn_write_4(sc, R12A_AMPDU_MAX_LENGTH, 0xffffffff);
+ /*
+ * Note: The vendor driver (hal/rtl8812a_hal_init.c:SetHwReg8812A())
+ * also sets bit 31.
+ */
+ /*
+ * TODO: this should be limited to the peer in STA mode,
+ * and perhaps the minimum A-MPDU of all VAPs/peers in
+ * multi-STA / other operating modes.
+ */
+ rtwn_write_4(sc, R12A_AMPDU_MAX_LENGTH,
+ rs->ampdu_max_size | (1<<31));
/* 80 MHz clock (again?) */
rtwn_write_1(sc, R92C_USTIME_TSF, 0x50);
diff --git a/sys/dev/rtwn/rtl8821a/usb/r21au_attach.c b/sys/dev/rtwn/rtl8821a/usb/r21au_attach.c
index 480b1ae36b11..60cb6d3fc61d 100644
--- a/sys/dev/rtwn/rtl8821a/usb/r21au_attach.c
+++ b/sys/dev/rtwn/rtl8821a/usb/r21au_attach.c
@@ -141,6 +141,7 @@ r21a_attach_private(struct rtwn_softc *sc)
rs->rs_iq_calib_sw = r21a_iq_calib_sw;
rs->ampdu_max_time = 0x5e;
+ rs->ampdu_max_size = 0xffff; /* 64k */
rs->ac_usb_dma_size = 0x01;
rs->ac_usb_dma_time = 0x10;
@@ -158,7 +159,23 @@ r21au_adj_devcaps(struct rtwn_softc *sc)
if (rs->rs_radar != 0)
ic->ic_caps |= IEEE80211_C_DFS;
- /* TODO: VHT */
+ ic->ic_htcaps |=
+ IEEE80211_HTCAP_CHWIDTH40 | /* 40 MHz channel width */
+ IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */
+ ;
+
+ /* VHT config */
+ ic->ic_flags_ext |= IEEE80211_FEXT_VHT;
+ ic->ic_vht_cap.vht_cap_info =
+ IEEE80211_VHTCAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHTCAP_SHORT_GI_80 |
+ IEEE80211_VHTCAP_TXSTBC |
+ IEEE80211_VHTCAP_RXSTBC_1 |
+ IEEE80211_VHTCAP_HTC_VHT |
+ _IEEE80211_SHIFTMASK(7,
+ IEEE80211_VHTCAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
+
+ rtwn_attach_vht_cap_info_mcs(sc);
}
void
@@ -185,6 +202,7 @@ r21au_attach(struct rtwn_usb_softc *uc)
sc->sc_get_rssi_ofdm = r88e_get_rssi_ofdm;
sc->sc_classify_intr = r12au_classify_intr;
sc->sc_handle_tx_report = r12a_ratectl_tx_complete;
+ sc->sc_handle_tx_report2 = rtwn_nop_softc_uint8_int;
sc->sc_handle_c2h_report = r12a_handle_c2h_report;
sc->sc_check_frame = r12a_check_frame_checksum;
sc->sc_rf_read = r12a_c_cut_rf_read;
@@ -217,6 +235,7 @@ r21au_attach(struct rtwn_usb_softc *uc)
#endif
sc->sc_beacon_init = r21a_beacon_init;
sc->sc_beacon_enable = r92c_beacon_enable;
+ sc->sc_sta_beacon_enable = r12a_sta_beacon_enable;
sc->sc_beacon_set_rate = r12a_beacon_set_rate;
sc->sc_beacon_select = r21a_beacon_select;
sc->sc_temp_measure = r88e_temp_measure;
@@ -231,6 +250,7 @@ r21au_attach(struct rtwn_usb_softc *uc)
sc->sc_init_antsel = r12a_init_antsel;
sc->sc_post_init = r12au_post_init;
sc->sc_init_bcnq1_boundary = r21a_init_bcnq1_boundary;
+ sc->sc_set_tx_power = rtwn_nop_int_softc_vap;
sc->chan_list_5ghz[0] = r12a_chan_5ghz_0;
sc->chan_list_5ghz[1] = r12a_chan_5ghz_1;
@@ -277,5 +297,7 @@ r21au_attach(struct rtwn_usb_softc *uc)
sc->ntxchains = 1;
sc->nrxchains = 1;
+ sc->sc_ht40 = 1;
+
r21a_attach_private(sc);
}
diff --git a/sys/dev/rtwn/usb/rtwn_usb_attach.c b/sys/dev/rtwn/usb/rtwn_usb_attach.c
index 71798ffc14f9..4958939a768a 100644
--- a/sys/dev/rtwn/usb/rtwn_usb_attach.c
+++ b/sys/dev/rtwn/usb/rtwn_usb_attach.c
@@ -156,10 +156,12 @@ rtwn_usb_alloc_tx_list(struct rtwn_softc *sc)
if (error != 0)
return (error);
- STAILQ_INIT(&uc->uc_tx_active);
- STAILQ_INIT(&uc->uc_tx_inactive);
- STAILQ_INIT(&uc->uc_tx_pending);
+ for (i = RTWN_BULK_TX_FIRST; i < RTWN_BULK_EP_COUNT; i++) {
+ STAILQ_INIT(&uc->uc_tx_active[i]);
+ STAILQ_INIT(&uc->uc_tx_pending[i]);
+ }
+ STAILQ_INIT(&uc->uc_tx_inactive);
for (i = 0; i < RTWN_USB_TX_LIST_COUNT; i++)
STAILQ_INSERT_HEAD(&uc->uc_tx_inactive, &uc->uc_tx[i], next);
@@ -207,23 +209,29 @@ static void
rtwn_usb_free_tx_list(struct rtwn_softc *sc)
{
struct rtwn_usb_softc *uc = RTWN_USB_SOFTC(sc);
+ int i;
rtwn_usb_free_list(sc, uc->uc_tx, RTWN_USB_TX_LIST_COUNT);
- STAILQ_INIT(&uc->uc_tx_active);
+ for (i = RTWN_BULK_TX_FIRST; i < RTWN_BULK_EP_COUNT; i++) {
+ STAILQ_INIT(&uc->uc_tx_active[i]);
+ STAILQ_INIT(&uc->uc_tx_pending[i]);
+ }
STAILQ_INIT(&uc->uc_tx_inactive);
- STAILQ_INIT(&uc->uc_tx_pending);
}
static void
rtwn_usb_reset_lists(struct rtwn_softc *sc, struct ieee80211vap *vap)
{
struct rtwn_usb_softc *uc = RTWN_USB_SOFTC(sc);
+ int i;
RTWN_ASSERT_LOCKED(sc);
- rtwn_usb_reset_tx_list(uc, &uc->uc_tx_active, vap);
- rtwn_usb_reset_tx_list(uc, &uc->uc_tx_pending, vap);
+ for (i = RTWN_BULK_TX_FIRST; i < RTWN_BULK_EP_COUNT; i++) {
+ rtwn_usb_reset_tx_list(uc, &uc->uc_tx_active[i], vap);
+ rtwn_usb_reset_tx_list(uc, &uc->uc_tx_pending[i], vap);
+ }
if (vap == NULL) {
rtwn_usb_reset_rx_list(uc);
sc->qfullmsk = 0;
@@ -295,7 +303,7 @@ rtwn_usb_abort_xfers(struct rtwn_softc *sc)
/* abort any pending transfers */
RTWN_UNLOCK(sc);
- for (i = 0; i < RTWN_N_TRANSFER; i++)
+ for (i = 0; i < RTWN_BULK_EP_COUNT; i++)
usbd_transfer_drain(uc->uc_xfer[i]);
RTWN_LOCK(sc);
}
@@ -432,7 +440,7 @@ rtwn_usb_detach(device_t self)
rtwn_usb_free_rx_list(sc);
/* Detach all USB transfers. */
- usbd_transfer_unsetup(uc->uc_xfer, RTWN_N_TRANSFER);
+ usbd_transfer_unsetup(uc->uc_xfer, RTWN_BULK_EP_COUNT);
rtwn_detach_private(sc);
mtx_destroy(&sc->sc_mtx);
diff --git a/sys/dev/rtwn/usb/rtwn_usb_attach.h b/sys/dev/rtwn/usb/rtwn_usb_attach.h
index 728a0a6a3b24..cd5485b36678 100644
--- a/sys/dev/rtwn/usb/rtwn_usb_attach.h
+++ b/sys/dev/rtwn/usb/rtwn_usb_attach.h
@@ -96,6 +96,7 @@ static const STRUCT_USB_HOST_ID rtwn_devs[] = {
RTWN_RTL8192CU_DEV(SITECOMEU, RTL8188CU_1),
RTWN_RTL8192CU_DEV(SITECOMEU, RTL8188CU_2),
RTWN_RTL8192CU_DEV(SITECOMEU, RTL8192CU),
+ RTWN_RTL8192CU_DEV(TPLINK, RTL8192CU),
RTWN_RTL8192CU_DEV(TRENDNET, RTL8188CU),
RTWN_RTL8192CU_DEV(TRENDNET, RTL8192CU),
RTWN_RTL8192CU_DEV(ZYXEL, RTL8192CU),
diff --git a/sys/dev/rtwn/usb/rtwn_usb_ep.c b/sys/dev/rtwn/usb/rtwn_usb_ep.c
index 0848a45a9f86..f9b0672324fe 100644
--- a/sys/dev/rtwn/usb/rtwn_usb_ep.c
+++ b/sys/dev/rtwn/usb/rtwn_usb_ep.c
@@ -55,7 +55,7 @@
#include <dev/rtwn/rtl8192c/usb/r92cu_reg.h>
-static const struct usb_config rtwn_config_common[RTWN_N_TRANSFER] = {
+static const struct usb_config rtwn_config_common[RTWN_BULK_EP_COUNT] = {
[RTWN_BULK_RX] = {
.type = UE_BULK,
.endpoint = UE_ADDR_ANY,
@@ -76,7 +76,7 @@ static const struct usb_config rtwn_config_common[RTWN_N_TRANSFER] = {
.pipe_bof = 1,
.force_short_xfer = 1,
},
- .callback = rtwn_bulk_tx_callback,
+ .callback = rtwn_bulk_tx_callback_be,
.timeout = RTWN_TX_TIMEOUT, /* ms */
},
[RTWN_BULK_TX_BK] = {
@@ -89,7 +89,7 @@ static const struct usb_config rtwn_config_common[RTWN_N_TRANSFER] = {
.pipe_bof = 1,
.force_short_xfer = 1,
},
- .callback = rtwn_bulk_tx_callback,
+ .callback = rtwn_bulk_tx_callback_bk,
.timeout = RTWN_TX_TIMEOUT, /* ms */
},
[RTWN_BULK_TX_VI] = {
@@ -102,7 +102,7 @@ static const struct usb_config rtwn_config_common[RTWN_N_TRANSFER] = {
.pipe_bof = 1,
.force_short_xfer = 1
},
- .callback = rtwn_bulk_tx_callback,
+ .callback = rtwn_bulk_tx_callback_vi,
.timeout = RTWN_TX_TIMEOUT, /* ms */
},
[RTWN_BULK_TX_VO] = {
@@ -115,7 +115,7 @@ static const struct usb_config rtwn_config_common[RTWN_N_TRANSFER] = {
.pipe_bof = 1,
.force_short_xfer = 1
},
- .callback = rtwn_bulk_tx_callback,
+ .callback = rtwn_bulk_tx_callback_vo,
.timeout = RTWN_TX_TIMEOUT, /* ms */
},
};
@@ -200,22 +200,33 @@ rtwn_usb_setup_endpoints(struct rtwn_usb_softc *uc)
/* NB: keep in sync with rtwn_dma_init(). */
rtwn_config[RTWN_BULK_TX_VO].endpoint = addr[0];
+ uc->wme2qid[WME_AC_VO] = RTWN_BULK_TX_VO;
switch (uc->ntx) {
case 4:
case 3:
rtwn_config[RTWN_BULK_TX_BE].endpoint = addr[2];
rtwn_config[RTWN_BULK_TX_BK].endpoint = addr[2];
rtwn_config[RTWN_BULK_TX_VI].endpoint = addr[1];
+ uc->wme2qid[WME_AC_BE] = RTWN_BULK_TX_BE;
+ uc->wme2qid[WME_AC_BK] = RTWN_BULK_TX_BE;
+ uc->wme2qid[WME_AC_VI] = RTWN_BULK_TX_VI;
break;
case 2:
rtwn_config[RTWN_BULK_TX_BE].endpoint = addr[1];
rtwn_config[RTWN_BULK_TX_BK].endpoint = addr[1];
rtwn_config[RTWN_BULK_TX_VI].endpoint = addr[0];
+ uc->wme2qid[WME_AC_BE] = RTWN_BULK_TX_VI;
+ uc->wme2qid[WME_AC_BK] = RTWN_BULK_TX_VI;
+ uc->wme2qid[WME_AC_VI] = RTWN_BULK_TX_VO;
break;
case 1:
rtwn_config[RTWN_BULK_TX_BE].endpoint = addr[0];
rtwn_config[RTWN_BULK_TX_BK].endpoint = addr[0];
rtwn_config[RTWN_BULK_TX_VI].endpoint = addr[0];
+
+ uc->wme2qid[WME_AC_BE] = RTWN_BULK_TX_VO;
+ uc->wme2qid[WME_AC_BK] = RTWN_BULK_TX_VO;
+ uc->wme2qid[WME_AC_VI] = RTWN_BULK_TX_VO;
break;
default:
KASSERT(0, ("unhandled number of endpoints %d\n", uc->ntx));
@@ -225,7 +236,7 @@ rtwn_usb_setup_endpoints(struct rtwn_usb_softc *uc)
rtwn_config[RTWN_BULK_RX].bufsize =
uc->uc_rx_buf_size * RTWN_USB_RXBUFSZ_UNIT;
error = usbd_transfer_setup(uc->uc_udev, &iface_index,
- uc->uc_xfer, rtwn_config, RTWN_N_TRANSFER, uc, &sc->sc_mtx);
+ uc->uc_xfer, rtwn_config, RTWN_BULK_EP_COUNT, uc, &sc->sc_mtx);
free(rtwn_config, M_TEMP);
if (error) {
diff --git a/sys/dev/rtwn/usb/rtwn_usb_rx.c b/sys/dev/rtwn/usb/rtwn_usb_rx.c
index 5db967ddcc18..4a4294c0d890 100644
--- a/sys/dev/rtwn/usb/rtwn_usb_rx.c
+++ b/sys/dev/rtwn/usb/rtwn_usb_rx.c
@@ -124,10 +124,15 @@ rtwn_rx_copy_to_mbuf(struct rtwn_softc *sc, struct rtwn_rx_stat_common *stat,
if (rtwn_rx_check_pre_alloc(sc, stat) != 0)
goto fail;
- m = m_get2(totlen, M_NOWAIT, MT_DATA, M_PKTHDR);
+ /*
+ * Note: this can require >4 KiB (eg de-aggregating an A-MSDU
+ * from an USB frame. See kern/286366 for more information.
+ */
+ m = m_get3(totlen, M_NOWAIT, MT_DATA, M_PKTHDR);
if (__predict_false(m == NULL)) {
- device_printf(sc->sc_dev, "%s: could not allocate RX mbuf\n",
- __func__);
+ device_printf(sc->sc_dev,
+ "%s: could not allocate RX mbuf (%d bytes)\n",
+ __func__, totlen);
goto fail;
}
@@ -334,6 +339,27 @@ rtwn_report_intr(struct rtwn_usb_softc *uc, struct usb_xfer *xfer,
rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all);
#endif
break;
+ case RTWN_RX_TX_REPORT2:
+ if (sc->sc_ratectl != RTWN_RATECTL_NET80211) {
+ /* shouldn't happen */
+ device_printf(sc->sc_dev,
+ "%s called while ratectl = %d!\n",
+ __func__, sc->sc_ratectl);
+ break;
+ }
+
+ RTWN_NT_LOCK(sc);
+ rtwn_handle_tx_report2(sc, buf, len);
+ RTWN_NT_UNLOCK(sc);
+
+#ifdef IEEE80211_SUPPORT_SUPERG
+ /*
+ * NB: this will executed only when 'report' bit is set.
+ */
+ if (sc->sc_tx_n_active > 0 && --sc->sc_tx_n_active <= 1)
+ rtwn_cmd_sleepable(sc, NULL, 0, rtwn_ff_flush_all);
+#endif
+ break;
case RTWN_RX_OTHER:
rtwn_handle_c2h_report(sc, buf, len);
break;
diff --git a/sys/dev/rtwn/usb/rtwn_usb_tx.c b/sys/dev/rtwn/usb/rtwn_usb_tx.c
index 0fb8632d9a16..86d41ed10d91 100644
--- a/sys/dev/rtwn/usb/rtwn_usb_tx.c
+++ b/sys/dev/rtwn/usb/rtwn_usb_tx.c
@@ -65,10 +65,6 @@ static struct rtwn_data * rtwn_usb_getbuf(struct rtwn_usb_softc *);
static void rtwn_usb_txeof(struct rtwn_usb_softc *,
struct rtwn_data *, int);
-static const uint8_t wme2qid[] =
- { RTWN_BULK_TX_BE, RTWN_BULK_TX_BK,
- RTWN_BULK_TX_VI, RTWN_BULK_TX_VO };
-
static struct rtwn_data *
_rtwn_usb_getbuf(struct rtwn_usb_softc *uc)
{
@@ -105,6 +101,7 @@ static void
rtwn_usb_txeof(struct rtwn_usb_softc *uc, struct rtwn_data *data, int status)
{
struct rtwn_softc *sc = &uc->uc_sc;
+ bool is_empty = true;
RTWN_ASSERT_LOCKED(sc);
@@ -120,42 +117,54 @@ rtwn_usb_txeof(struct rtwn_usb_softc *uc, struct rtwn_data *data, int status)
STAILQ_INSERT_TAIL(&uc->uc_tx_inactive, data, next);
sc->qfullmsk = 0;
+
#ifndef D4054
- if (STAILQ_EMPTY(&uc->uc_tx_active) && STAILQ_EMPTY(&uc->uc_tx_pending))
+ for (int i = RTWN_BULK_TX_FIRST; i < RTWN_BULK_EP_COUNT; i++) {
+ if (!STAILQ_EMPTY(&uc->uc_tx_active[i]) ||
+ !STAILQ_EMPTY(&uc->uc_tx_pending[i]))
+ is_empty = false;
+ }
+
+ if (is_empty)
sc->sc_tx_timer = 0;
else
sc->sc_tx_timer = 5;
#endif
}
-void
-rtwn_bulk_tx_callback(struct usb_xfer *xfer, usb_error_t error)
+static void
+rtwn_bulk_tx_callback_qid(struct usb_xfer *xfer, usb_error_t error, int qid)
{
struct rtwn_usb_softc *uc = usbd_xfer_softc(xfer);
struct rtwn_softc *sc = &uc->uc_sc;
struct rtwn_data *data;
+ bool do_is_empty_check = false;
+ int i;
+
+ RTWN_DPRINTF(sc, RTWN_DEBUG_XMIT,
+ "%s: called, qid=%d\n", __func__, qid);
RTWN_ASSERT_LOCKED(sc);
switch (USB_GET_STATE(xfer)){
case USB_ST_TRANSFERRED:
- data = STAILQ_FIRST(&uc->uc_tx_active);
+ data = STAILQ_FIRST(&uc->uc_tx_active[qid]);
if (data == NULL)
goto tr_setup;
- STAILQ_REMOVE_HEAD(&uc->uc_tx_active, next);
+ STAILQ_REMOVE_HEAD(&uc->uc_tx_active[qid], next);
rtwn_usb_txeof(uc, data, 0);
/* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
- data = STAILQ_FIRST(&uc->uc_tx_pending);
+ data = STAILQ_FIRST(&uc->uc_tx_pending[qid]);
if (data == NULL) {
RTWN_DPRINTF(sc, RTWN_DEBUG_XMIT,
"%s: empty pending queue\n", __func__);
- sc->sc_tx_n_active = 0;
+ do_is_empty_check = true;
goto finish;
}
- STAILQ_REMOVE_HEAD(&uc->uc_tx_pending, next);
- STAILQ_INSERT_TAIL(&uc->uc_tx_active, data, next);
+ STAILQ_REMOVE_HEAD(&uc->uc_tx_pending[qid], next);
+ STAILQ_INSERT_TAIL(&uc->uc_tx_active[qid], data, next);
/*
* Note: if this is a beacon frame, ensure that it will go
@@ -169,11 +178,17 @@ tr_setup:
sc->sc_tx_n_active++;
break;
default:
- data = STAILQ_FIRST(&uc->uc_tx_active);
+ data = STAILQ_FIRST(&uc->uc_tx_active[qid]);
if (data == NULL)
goto tr_setup;
- STAILQ_REMOVE_HEAD(&uc->uc_tx_active, next);
+ STAILQ_REMOVE_HEAD(&uc->uc_tx_active[qid], next);
rtwn_usb_txeof(uc, data, 1);
+ if (error != 0)
+ device_printf(sc->sc_dev,
+ "%s: called; txeof qid=%d, error=%s\n",
+ __func__,
+ qid,
+ usbd_errstr(error));
if (error != USB_ERR_CANCELLED) {
usbd_xfer_set_stall(xfer);
goto tr_setup;
@@ -181,6 +196,19 @@ tr_setup:
break;
}
finish:
+
+ /*
+ * Clear sc_tx_n_active if all the pending transfers are 0.
+ *
+ * This is currently a crutch because net80211 doesn't provide
+ * a way to defer all the FF checks or one of the FF checks.
+ * Eventually this should just be tracked per-endpoint.
+ */
+ for (i = RTWN_BULK_TX_FIRST; i < RTWN_BULK_EP_COUNT; i++)
+ if (STAILQ_FIRST(&uc->uc_tx_pending[i]) != NULL)
+ do_is_empty_check = false;
+ if (do_is_empty_check)
+ sc->sc_tx_n_active = 0;
#ifdef IEEE80211_SUPPORT_SUPERG
/*
* If the TX active queue drops below a certain
@@ -210,6 +238,34 @@ finish:
rtwn_start(sc);
}
+void
+rtwn_bulk_tx_callback_be(struct usb_xfer *xfer, usb_error_t error)
+{
+
+ rtwn_bulk_tx_callback_qid(xfer, error, RTWN_BULK_TX_BE);
+}
+
+void
+rtwn_bulk_tx_callback_bk(struct usb_xfer *xfer, usb_error_t error)
+{
+
+ rtwn_bulk_tx_callback_qid(xfer, error, RTWN_BULK_TX_BK);
+}
+
+void
+rtwn_bulk_tx_callback_vi(struct usb_xfer *xfer, usb_error_t error)
+{
+
+ rtwn_bulk_tx_callback_qid(xfer, error, RTWN_BULK_TX_VI);
+}
+
+void
+rtwn_bulk_tx_callback_vo(struct usb_xfer *xfer, usb_error_t error)
+{
+
+ rtwn_bulk_tx_callback_qid(xfer, error, RTWN_BULK_TX_VO);
+}
+
static void
rtwn_usb_tx_checksum(struct rtwn_tx_desc_common *txd)
{
@@ -226,6 +282,7 @@ rtwn_usb_tx_start(struct rtwn_softc *sc, struct ieee80211_node *ni,
struct rtwn_data *data;
struct usb_xfer *xfer;
uint16_t ac;
+ int qid = 0;
RTWN_ASSERT_LOCKED(sc);
@@ -236,17 +293,23 @@ rtwn_usb_tx_start(struct rtwn_softc *sc, struct ieee80211_node *ni,
if (data == NULL)
return (ENOBUFS);
+ /* TODO: should really get a consistent AC/TID, ath(4) style */
ac = M_WME_GETAC(m);
switch (type) {
case IEEE80211_FC0_TYPE_CTL:
case IEEE80211_FC0_TYPE_MGT:
- xfer = uc->uc_xfer[RTWN_BULK_TX_VO];
+ qid = RTWN_BULK_TX_VO;
break;
default:
- xfer = uc->uc_xfer[wme2qid[ac]];
+ qid = uc->wme2qid[ac];
break;
}
+ xfer = uc->uc_xfer[qid];
+
+ RTWN_DPRINTF(sc, RTWN_DEBUG_XMIT,
+ "%s: called, ac=%d, qid=%d, xfer=%p\n",
+ __func__, ac, qid, xfer);
txd = (struct rtwn_tx_desc_common *)tx_desc;
txd->pktlen = htole16(m->m_pkthdr.len);
@@ -264,6 +327,7 @@ rtwn_usb_tx_start(struct rtwn_softc *sc, struct ieee80211_node *ni,
data->buflen = m->m_pkthdr.len + sc->txdesc_len;
data->id = id;
data->ni = ni;
+ data->qid = qid;
if (data->ni != NULL) {
data->m = m;
#ifndef D4054
@@ -271,7 +335,7 @@ rtwn_usb_tx_start(struct rtwn_softc *sc, struct ieee80211_node *ni,
#endif
}
- STAILQ_INSERT_TAIL(&uc->uc_tx_pending, data, next);
+ STAILQ_INSERT_TAIL(&uc->uc_tx_pending[qid], data, next);
if (STAILQ_EMPTY(&uc->uc_tx_inactive))
sc->qfullmsk = 1;
diff --git a/sys/dev/rtwn/usb/rtwn_usb_tx.h b/sys/dev/rtwn/usb/rtwn_usb_tx.h
index 7b762cc01a00..193103f32707 100644
--- a/sys/dev/rtwn/usb/rtwn_usb_tx.h
+++ b/sys/dev/rtwn/usb/rtwn_usb_tx.h
@@ -17,7 +17,10 @@
#ifndef RTWN_USB_TX_H
#define RTWN_USB_TX_H
-void rtwn_bulk_tx_callback(struct usb_xfer *, usb_error_t);
+void rtwn_bulk_tx_callback_bk(struct usb_xfer *, usb_error_t);
+void rtwn_bulk_tx_callback_be(struct usb_xfer *, usb_error_t);
+void rtwn_bulk_tx_callback_vi(struct usb_xfer *, usb_error_t);
+void rtwn_bulk_tx_callback_vo(struct usb_xfer *, usb_error_t);
int rtwn_usb_tx_start(struct rtwn_softc *, struct ieee80211_node *,
struct mbuf *, uint8_t *, uint8_t, int);
diff --git a/sys/dev/rtwn/usb/rtwn_usb_var.h b/sys/dev/rtwn/usb/rtwn_usb_var.h
index bad697bfa1db..27cd2b4e2762 100644
--- a/sys/dev/rtwn/usb/rtwn_usb_var.h
+++ b/sys/dev/rtwn/usb/rtwn_usb_var.h
@@ -30,13 +30,14 @@
#define RTWN_IFACE_INDEX 0
-#define RTWN_USB_RX_LIST_COUNT 1
+#define RTWN_USB_RX_LIST_COUNT 16
#define RTWN_USB_TX_LIST_COUNT 16
struct rtwn_data {
uint8_t *buf;
/* 'id' is meaningful for beacons only */
int id;
+ int qid;
uint16_t buflen;
struct mbuf *m;
struct ieee80211_node *ni;
@@ -50,15 +51,16 @@ enum {
RTWN_BULK_TX_BK, /* = WME_AC_BK */
RTWN_BULK_TX_VI, /* = WME_AC_VI */
RTWN_BULK_TX_VO, /* = WME_AC_VO */
- RTWN_N_TRANSFER = 5,
+ RTWN_BULK_EP_COUNT = 5,
};
#define RTWN_EP_QUEUES RTWN_BULK_RX
+#define RTWN_BULK_TX_FIRST RTWN_BULK_TX_BE
struct rtwn_usb_softc {
struct rtwn_softc uc_sc; /* must be the first */
struct usb_device *uc_udev;
- struct usb_xfer *uc_xfer[RTWN_N_TRANSFER];
+ struct usb_xfer *uc_xfer[RTWN_BULK_EP_COUNT];
struct rtwn_data uc_rx[RTWN_USB_RX_LIST_COUNT];
rtwn_datahead uc_rx_active;
@@ -70,14 +72,16 @@ struct rtwn_usb_softc {
int uc_rx_off;
struct rtwn_data uc_tx[RTWN_USB_TX_LIST_COUNT];
- rtwn_datahead uc_tx_active;
+ rtwn_datahead uc_tx_active[RTWN_BULK_EP_COUNT];
rtwn_datahead uc_tx_inactive;
- rtwn_datahead uc_tx_pending;
+ rtwn_datahead uc_tx_pending[RTWN_BULK_EP_COUNT];
int (*uc_align_rx)(int, int);
int ntx;
int tx_agg_desc_num;
+
+ uint8_t wme2qid[4];
};
#define RTWN_USB_SOFTC(sc) ((struct rtwn_usb_softc *)(sc))
diff --git a/sys/dev/safe/safe.c b/sys/dev/safe/safe.c
index da2afbd38fad..c512f3fc62c0 100644
--- a/sys/dev/safe/safe.c
+++ b/sys/dev/safe/safe.c
@@ -464,7 +464,6 @@ safe_detach(device_t dev)
mtx_destroy(&sc->sc_ringmtx);
safe_dma_free(sc, &sc->sc_ringalloc);
- bus_generic_detach(dev);
bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
diff --git a/sys/dev/sbni/if_sbni.c b/sys/dev/sbni/if_sbni.c
index 4dbfae7777ef..ffbce54a56f2 100644
--- a/sys/dev/sbni/if_sbni.c
+++ b/sys/dev/sbni/if_sbni.c
@@ -212,7 +212,7 @@ sbni_probe(struct sbni_softc *sc)
/*
* Install interface into kernel networking data structures
*/
-int
+void
sbni_attach(struct sbni_softc *sc, int unit, struct sbni_flags flags)
{
if_t ifp;
@@ -220,8 +220,6 @@ sbni_attach(struct sbni_softc *sc, int unit, struct sbni_flags flags)
uint64_t baudrate;
ifp = sc->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- return (ENOMEM);
sbni_outb(sc, CSR0, 0);
set_initial_values(sc, flags);
@@ -250,7 +248,6 @@ sbni_attach(struct sbni_softc *sc, int unit, struct sbni_flags flags)
printf("auto\n");
else
printf("%d (fixed)\n", sc->cur_rxl_index);
- return (0);
}
void
diff --git a/sys/dev/sbni/if_sbni_isa.c b/sys/dev/sbni/if_sbni_isa.c
index 6f8c91a0b3bc..113ff3d954e4 100644
--- a/sys/dev/sbni/if_sbni_isa.c
+++ b/sys/dev/sbni/if_sbni_isa.c
@@ -136,12 +136,7 @@ sbni_attach_isa(device_t dev)
*(u_int32_t*)&flags = device_get_flags(dev);
- error = sbni_attach(sc, device_get_unit(dev) * 2, flags);
- if (error) {
- device_printf(dev, "cannot initialize driver\n");
- sbni_release_resources(sc);
- return (error);
- }
+ sbni_attach(sc, device_get_unit(dev) * 2, flags);
if (sc->irq_res) {
error = bus_setup_intr(
diff --git a/sys/dev/sbni/if_sbni_pci.c b/sys/dev/sbni/if_sbni_pci.c
index 60c70ec492f9..b1b0614613eb 100644
--- a/sys/dev/sbni/if_sbni_pci.c
+++ b/sys/dev/sbni/if_sbni_pci.c
@@ -131,20 +131,9 @@ sbni_pci_attach(device_t dev)
memset(&flags, 0, sizeof(flags));
- error = sbni_attach(sc, device_get_unit(dev) * 2, flags);
- if (error) {
- device_printf(dev, "cannot initialize driver\n");
- goto attach_failed;
- }
- if (sc->slave_sc) {
- error = sbni_attach(sc->slave_sc, device_get_unit(dev) * 2 + 1,
- flags);
- if (error) {
- device_printf(dev, "cannot initialize slave\n");
- sbni_detach(sc);
- goto attach_failed;
- }
- }
+ sbni_attach(sc, device_get_unit(dev) * 2, flags);
+ if (sc->slave_sc)
+ sbni_attach(sc->slave_sc, device_get_unit(dev) * 2 + 1, flags);
if (sc->irq_res) {
error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET |
diff --git a/sys/dev/sbni/if_sbnivar.h b/sys/dev/sbni/if_sbnivar.h
index 92678899a5d9..211d2bc56b0e 100644
--- a/sys/dev/sbni/if_sbnivar.h
+++ b/sys/dev/sbni/if_sbnivar.h
@@ -125,7 +125,7 @@ struct sbni_softc {
void sbni_intr(void *);
int sbni_probe(struct sbni_softc *);
-int sbni_attach(struct sbni_softc *, int, struct sbni_flags);
+void sbni_attach(struct sbni_softc *, int, struct sbni_flags);
void sbni_detach(struct sbni_softc *);
void sbni_release_resources(struct sbni_softc *);
diff --git a/sys/dev/scc/scc_core.c b/sys/dev/scc/scc_core.c
index 376cc25d11b4..48331ee5f42a 100644
--- a/sys/dev/scc/scc_core.c
+++ b/sys/dev/scc/scc_core.c
@@ -195,7 +195,7 @@ scc_bfe_attach(device_t dev, u_int ipc)
m->m_mode = 1U << mode;
if ((cl->cl_modes & m->m_mode) == 0 || ch->ch_sysdev)
continue;
- m->m_dev = device_add_child(dev, NULL, -1);
+ m->m_dev = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
device_set_ivars(m->m_dev, (void *)m);
error = device_probe_child(dev, m->m_dev);
if (!error) {
diff --git a/sys/dev/sdhci/fsl_sdhci.c b/sys/dev/sdhci/fsl_sdhci.c
index c3c1b6207a23..2f3f89f3d925 100644
--- a/sys/dev/sdhci/fsl_sdhci.c
+++ b/sys/dev/sdhci/fsl_sdhci.c
@@ -925,8 +925,8 @@ fsl_sdhci_attach(device_t dev)
sdhci_init_slot(dev, &sc->slot, 0);
sc->slot_init_done = true;
- bus_generic_probe(dev);
- bus_generic_attach(dev);
+ bus_identify_children(dev);
+ bus_attach_children(dev);
sdhci_start_slot(&sc->slot);
diff --git a/sys/dev/sdhci/sdhci.c b/sys/dev/sdhci/sdhci.c
index cdaab4d5886f..b53b85bf44c2 100644
--- a/sys/dev/sdhci/sdhci.c
+++ b/sys/dev/sdhci/sdhci.c
@@ -40,13 +40,13 @@
#include <sys/mutex.h>
#include <sys/resource.h>
#include <sys/rman.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#include <sys/taskqueue.h>
#include <sys/sbuf.h>
#include <machine/bus.h>
#include <machine/resource.h>
-#include <machine/stdarg.h>
#include <dev/mmc/bridge.h>
#include <dev/mmc/mmcreg.h>
@@ -72,10 +72,10 @@ static int sdhci_debug = 0;
SYSCTL_INT(_hw_sdhci, OID_AUTO, debug, CTLFLAG_RWTUN, &sdhci_debug, 0,
"Debug level");
u_int sdhci_quirk_clear = 0;
-SYSCTL_INT(_hw_sdhci, OID_AUTO, quirk_clear, CTLFLAG_RWTUN, &sdhci_quirk_clear,
+SYSCTL_UINT(_hw_sdhci, OID_AUTO, quirk_clear, CTLFLAG_RWTUN, &sdhci_quirk_clear,
0, "Mask of quirks to clear");
u_int sdhci_quirk_set = 0;
-SYSCTL_INT(_hw_sdhci, OID_AUTO, quirk_set, CTLFLAG_RWTUN, &sdhci_quirk_set, 0,
+SYSCTL_UINT(_hw_sdhci, OID_AUTO, quirk_set, CTLFLAG_RWTUN, &sdhci_quirk_set, 0,
"Mask of quirks to set");
#define RD1(slot, off) SDHCI_READ_1((slot)->bus, (slot), (off))
@@ -700,12 +700,14 @@ sdhci_card_task(void *arg, int pending __unused)
mmccam_start_discovery(slot->sim);
SDHCI_UNLOCK(slot);
#else
- d = slot->dev = device_add_child(slot->bus, "mmc", -1);
SDHCI_UNLOCK(slot);
+ bus_topo_lock();
+ d = slot->dev = device_add_child(slot->bus, "mmc", DEVICE_UNIT_ANY);
if (d) {
device_set_ivars(d, slot);
(void)device_probe_and_attach(d);
}
+ bus_topo_unlock();
#endif
} else
SDHCI_UNLOCK(slot);
@@ -731,7 +733,9 @@ sdhci_card_task(void *arg, int pending __unused)
slot->opt &= ~SDHCI_TUNING_ENABLED;
SDHCI_UNLOCK(slot);
callout_drain(&slot->retune_callout);
+ bus_topo_lock();
device_delete_child(slot->bus, d);
+ bus_topo_unlock();
#endif
} else
SDHCI_UNLOCK(slot);
@@ -760,10 +764,10 @@ sdhci_handle_card_present_locked(struct sdhci_slot *slot, bool is_present)
was_present = slot->dev != NULL;
#endif
if (!was_present && is_present) {
- taskqueue_enqueue_timeout(taskqueue_swi_giant,
+ taskqueue_enqueue_timeout(taskqueue_bus,
&slot->card_delayed_task, -SDHCI_INSERT_DELAY_TICKS);
} else if (was_present && !is_present) {
- taskqueue_enqueue(taskqueue_swi_giant, &slot->card_task);
+ taskqueue_enqueue(taskqueue_bus, &slot->card_task);
}
}
@@ -1129,7 +1133,7 @@ no_tuning:
"timeout", CTLFLAG_RWTUN, &slot->timeout, 0,
"Maximum timeout for SDHCI transfers (in secs)");
TASK_INIT(&slot->card_task, 0, sdhci_card_task, slot);
- TIMEOUT_TASK_INIT(taskqueue_swi_giant, &slot->card_delayed_task, 0,
+ TIMEOUT_TASK_INIT(taskqueue_bus, &slot->card_delayed_task, 0,
sdhci_card_task, slot);
callout_init(&slot->card_poll_callout, 1);
callout_init_mtx(&slot->timeout_callout, &slot->mtx, 0);
@@ -1149,6 +1153,9 @@ no_tuning:
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
OID_AUTO, node_name, CTLFLAG_RW, 0, "slot specific node");
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(node_oid),
+ OID_AUTO, "quirks", CTLFLAG_RD, &slot->quirks, 0, "Slot quirks");
+
node_oid = SYSCTL_ADD_NODE(device_get_sysctl_ctx(dev),
SYSCTL_CHILDREN(node_oid), OID_AUTO, "debug", CTLFLAG_RW, 0,
"Debugging node");
@@ -1183,8 +1190,8 @@ sdhci_cleanup_slot(struct sdhci_slot *slot)
callout_drain(&slot->timeout_callout);
callout_drain(&slot->card_poll_callout);
callout_drain(&slot->retune_callout);
- taskqueue_drain(taskqueue_swi_giant, &slot->card_task);
- taskqueue_drain_timeout(taskqueue_swi_giant, &slot->card_delayed_task);
+ taskqueue_drain(taskqueue_bus, &slot->card_task);
+ taskqueue_drain_timeout(taskqueue_bus, &slot->card_delayed_task);
SDHCI_LOCK(slot);
d = slot->dev;
diff --git a/sys/dev/sdhci/sdhci_acpi.c b/sys/dev/sdhci/sdhci_acpi.c
index 7ec85a5a4839..75b7e98ea970 100644
--- a/sys/dev/sdhci/sdhci_acpi.c
+++ b/sys/dev/sdhci/sdhci_acpi.c
@@ -420,6 +420,7 @@ static device_method_t sdhci_methods[] = {
/* Bus interface */
DEVMETHOD(bus_read_ivar, sdhci_generic_read_ivar),
DEVMETHOD(bus_write_ivar, sdhci_generic_write_ivar),
+ DEVMETHOD(bus_add_child, bus_generic_add_child),
/* mmcbr_if */
DEVMETHOD(mmcbr_update_ios, sdhci_generic_update_ios),
diff --git a/sys/dev/sdhci/sdhci_fdt.c b/sys/dev/sdhci/sdhci_fdt.c
index e7dda316539c..efc12b54e10f 100644
--- a/sys/dev/sdhci/sdhci_fdt.c
+++ b/sys/dev/sdhci/sdhci_fdt.c
@@ -47,106 +47,37 @@
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/ofw_bus.h>
-#include <dev/ofw/ofw_bus_subr.h>
-#include <dev/ofw/ofw_subr.h>
#include <dev/clk/clk.h>
#include <dev/clk/clk_fixed.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/ofw/ofw_subr.h>
+#include <dev/ofw/openfirm.h>
#include <dev/syscon/syscon.h>
#include <dev/phy/phy.h>
#include <dev/mmc/bridge.h>
#include <dev/sdhci/sdhci.h>
+#include <dev/sdhci/sdhci_fdt.h>
#include "mmcbr_if.h"
#include "sdhci_if.h"
#include "opt_mmccam.h"
-#include "clkdev_if.h"
-#include "syscon_if.h"
-
-#define MAX_SLOTS 6
#define SDHCI_FDT_ARMADA38X 1
#define SDHCI_FDT_XLNX_ZY7 2
#define SDHCI_FDT_QUALCOMM 3
-#define SDHCI_FDT_RK3399 4
-#define SDHCI_FDT_RK3568 5
-#define SDHCI_FDT_XLNX_ZMP 6
-
-#define RK3399_GRF_EMMCCORE_CON0 0xf000
-#define RK3399_CORECFG_BASECLKFREQ 0xff00
-#define RK3399_CORECFG_TIMEOUTCLKUNIT (1 << 7)
-#define RK3399_CORECFG_TUNINGCOUNT 0x3f
-#define RK3399_GRF_EMMCCORE_CON11 0xf02c
-#define RK3399_CORECFG_CLOCKMULTIPLIER 0xff
-
-#define RK3568_EMMC_HOST_CTRL 0x0508
-#define RK3568_EMMC_EMMC_CTRL 0x052c
-#define RK3568_EMMC_ATCTRL 0x0540
-#define RK3568_EMMC_DLL_CTRL 0x0800
-#define DLL_CTRL_SRST 0x00000001
-#define DLL_CTRL_START 0x00000002
-#define DLL_CTRL_START_POINT_DEFAULT 0x00050000
-#define DLL_CTRL_INCREMENT_DEFAULT 0x00000200
-
-#define RK3568_EMMC_DLL_RXCLK 0x0804
-#define DLL_RXCLK_DELAY_ENABLE 0x08000000
-#define DLL_RXCLK_NO_INV 0x20000000
-
-#define RK3568_EMMC_DLL_TXCLK 0x0808
-#define DLL_TXCLK_DELAY_ENABLE 0x08000000
-#define DLL_TXCLK_TAPNUM_DEFAULT 0x00000008
-#define DLL_TXCLK_TAPNUM_FROM_SW 0x01000000
-
-#define RK3568_EMMC_DLL_STRBIN 0x080c
-#define DLL_STRBIN_DELAY_ENABLE 0x08000000
-#define DLL_STRBIN_TAPNUM_DEFAULT 0x00000008
-#define DLL_STRBIN_TAPNUM_FROM_SW 0x01000000
-
-#define RK3568_EMMC_DLL_STATUS0 0x0840
-#define DLL_STATUS0_DLL_LOCK 0x00000100
-#define DLL_STATUS0_DLL_TIMEOUT 0x00000200
-
-#define LOWEST_SET_BIT(mask) ((((mask) - 1) & (mask)) ^ (mask))
-#define SHIFTIN(x, mask) ((x) * LOWEST_SET_BIT(mask))
static struct ofw_compat_data compat_data[] = {
{ "marvell,armada-380-sdhci", SDHCI_FDT_ARMADA38X },
{ "qcom,sdhci-msm-v4", SDHCI_FDT_QUALCOMM },
- { "rockchip,rk3399-sdhci-5.1", SDHCI_FDT_RK3399 },
{ "xlnx,zy7_sdhci", SDHCI_FDT_XLNX_ZY7 },
- { "rockchip,rk3568-dwcmshc", SDHCI_FDT_RK3568 },
- { "xlnx,zynqmp-8.9a", SDHCI_FDT_XLNX_ZMP },
{ NULL, 0 }
};
-struct sdhci_fdt_softc {
- device_t dev; /* Controller device */
- u_int quirks; /* Chip specific quirks */
- u_int caps; /* If we override SDHCI_CAPABILITIES */
- uint32_t max_clk; /* Max possible freq */
- uint8_t sdma_boundary; /* If we override the SDMA boundary */
- struct resource *irq_res; /* IRQ resource */
- void *intrhand; /* Interrupt handle */
-
- int num_slots; /* Number of slots on this controller*/
- struct sdhci_slot slots[MAX_SLOTS];
- struct resource *mem_res[MAX_SLOTS]; /* Memory resource */
-
- bool wp_inverted; /* WP pin is inverted */
- bool wp_disabled; /* WP pin is not supported */
- bool no_18v; /* No 1.8V support */
-
- clk_t clk_xin; /* xin24m fixed clock */
- clk_t clk_ahb; /* ahb clock */
- clk_t clk_core; /* core clock */
- phy_t phy; /* phy to be used */
-
- struct syscon *syscon; /* Handle to the syscon */
-};
-
struct sdhci_exported_clocks_sc {
device_t clkdev;
};
@@ -168,7 +99,7 @@ DEFINE_CLASS_1(sdhci_exported_clocks_clknode, sdhci_exported_clocks_clknode_clas
sdhci_exported_clocks_clknode_methods, sizeof(struct sdhci_exported_clocks_sc),
clknode_class);
-static int
+int
sdhci_clock_ofw_map(struct clkdom *clkdom, uint32_t ncells,
phandle_t *cells, struct clknode **clk)
{
@@ -183,7 +114,7 @@ sdhci_clock_ofw_map(struct clkdom *clkdom, uint32_t ncells,
return (0);
}
-static void
+void
sdhci_export_clocks(struct sdhci_fdt_softc *sc)
{
struct clknode_init_def def;
@@ -248,7 +179,7 @@ sdhci_export_clocks(struct sdhci_fdt_softc *sc)
clkdom_dump(clkdom);
}
-static int
+int
sdhci_init_clocks(device_t dev)
{
struct sdhci_fdt_softc *sc = device_get_softc(dev);
@@ -279,7 +210,7 @@ sdhci_init_clocks(device_t dev)
return (0);
}
-static int
+int
sdhci_init_phy(struct sdhci_fdt_softc *sc)
{
int error;
@@ -301,7 +232,7 @@ sdhci_init_phy(struct sdhci_fdt_softc *sc)
return (0);
}
-static int
+int
sdhci_get_syscon(struct sdhci_fdt_softc *sc)
{
phandle_t node;
@@ -318,34 +249,6 @@ sdhci_get_syscon(struct sdhci_fdt_softc *sc)
return (0);
}
-static int
-sdhci_init_rk3399(device_t dev)
-{
- struct sdhci_fdt_softc *sc = device_get_softc(dev);
- uint64_t freq;
- uint32_t mask, val;
- int error;
-
- error = clk_get_freq(sc->clk_xin, &freq);
- if (error != 0) {
- device_printf(dev, "cannot get xin clock frequency\n");
- return (ENXIO);
- }
-
- /* Disable clock multiplier */
- mask = RK3399_CORECFG_CLOCKMULTIPLIER;
- val = 0;
- SYSCON_WRITE_4(sc->syscon, RK3399_GRF_EMMCCORE_CON11, (mask << 16) | val);
-
- /* Set base clock frequency */
- mask = RK3399_CORECFG_BASECLKFREQ;
- val = SHIFTIN((freq + (1000000 / 2)) / 1000000,
- RK3399_CORECFG_BASECLKFREQ);
- SYSCON_WRITE_4(sc->syscon, RK3399_GRF_EMMCCORE_CON0, (mask << 16) | val);
-
- return (0);
-}
-
static uint8_t
sdhci_fdt_read_1(device_t dev, struct sdhci_slot *slot, bus_size_t off)
{
@@ -441,79 +344,14 @@ sdhci_fdt_get_ro(device_t bus, device_t dev)
}
static int
-sdhci_fdt_set_clock(device_t dev, struct sdhci_slot *slot, int clock)
-{
- struct sdhci_fdt_softc *sc = device_get_softc(dev);
- int32_t val;
- int i;
-
- if (ofw_bus_search_compatible(dev, compat_data)->ocd_data ==
- SDHCI_FDT_RK3568) {
- if (clock == 400000)
- clock = 375000;
-
- if (clock) {
- clk_set_freq(sc->clk_core, clock, 0);
-
- if (clock <= 52000000) {
- bus_write_4(sc->mem_res[slot->num],
- RK3568_EMMC_DLL_CTRL, 0x0);
- bus_write_4(sc->mem_res[slot->num],
- RK3568_EMMC_DLL_RXCLK, DLL_RXCLK_NO_INV);
- bus_write_4(sc->mem_res[slot->num],
- RK3568_EMMC_DLL_TXCLK, 0x0);
- bus_write_4(sc->mem_res[slot->num],
- RK3568_EMMC_DLL_STRBIN, 0x0);
- return (clock);
- }
-
- bus_write_4(sc->mem_res[slot->num],
- RK3568_EMMC_DLL_CTRL, DLL_CTRL_START);
- DELAY(1000);
- bus_write_4(sc->mem_res[slot->num],
- RK3568_EMMC_DLL_CTRL, 0);
- bus_write_4(sc->mem_res[slot->num],
- RK3568_EMMC_DLL_CTRL, DLL_CTRL_START_POINT_DEFAULT |
- DLL_CTRL_INCREMENT_DEFAULT | DLL_CTRL_START);
- for (i = 0; i < 500; i++) {
- val = bus_read_4(sc->mem_res[slot->num],
- RK3568_EMMC_DLL_STATUS0);
- if (val & DLL_STATUS0_DLL_LOCK &&
- !(val & DLL_STATUS0_DLL_TIMEOUT))
- break;
- DELAY(1000);
- }
- bus_write_4(sc->mem_res[slot->num], RK3568_EMMC_ATCTRL,
- (0x1 << 16 | 0x2 << 17 | 0x3 << 19));
- bus_write_4(sc->mem_res[slot->num],
- RK3568_EMMC_DLL_RXCLK,
- DLL_RXCLK_DELAY_ENABLE | DLL_RXCLK_NO_INV);
- bus_write_4(sc->mem_res[slot->num],
- RK3568_EMMC_DLL_TXCLK, DLL_TXCLK_DELAY_ENABLE |
- DLL_TXCLK_TAPNUM_DEFAULT|DLL_TXCLK_TAPNUM_FROM_SW);
- bus_write_4(sc->mem_res[slot->num],
- RK3568_EMMC_DLL_STRBIN, DLL_STRBIN_DELAY_ENABLE |
- DLL_STRBIN_TAPNUM_DEFAULT |
- DLL_STRBIN_TAPNUM_FROM_SW);
- }
- }
- return (clock);
-}
-
-static int
sdhci_fdt_probe(device_t dev)
{
struct sdhci_fdt_softc *sc = device_get_softc(dev);
- phandle_t node;
- pcell_t cid;
-
- sc->quirks = 0;
- sc->num_slots = 1;
- sc->max_clk = 0;
if (!ofw_bus_status_okay(dev))
return (ENXIO);
+ sc->quirks = 0;
switch (ofw_bus_search_compatible(dev, compat_data)->ocd_data) {
case SDHCI_FDT_ARMADA38X:
sc->quirks = SDHCI_QUIRK_BROKEN_AUTO_STOP;
@@ -525,25 +363,33 @@ sdhci_fdt_probe(device_t dev)
sc->sdma_boundary = SDHCI_BLKSZ_SDMA_BNDRY_4K;
device_set_desc(dev, "Qualcomm FDT SDHCI controller");
break;
- case SDHCI_FDT_RK3399:
- device_set_desc(dev, "Rockchip RK3399 fdt SDHCI controller");
- break;
case SDHCI_FDT_XLNX_ZY7:
sc->quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;
device_set_desc(dev, "Zynq-7000 generic fdt SDHCI controller");
break;
- case SDHCI_FDT_RK3568:
- device_set_desc(dev, "Rockchip RK3568 fdt SDHCI controller");
- break;
- case SDHCI_FDT_XLNX_ZMP:
- device_set_desc(dev, "ZynqMP generic fdt SDHCI controller");
- break;
default:
return (ENXIO);
}
+ return (0);
+}
+
+int
+sdhci_fdt_attach(device_t dev)
+{
+ struct sdhci_fdt_softc *sc = device_get_softc(dev);
+ struct sdhci_slot *slot;
+ int err, slots, rid, i;
+ phandle_t node;
+ pcell_t cid;
+
+ sc->dev = dev;
+
node = ofw_bus_get_node(dev);
+ sc->num_slots = 1;
+ sc->max_clk = 0;
+
/* Allow dts to patch quirks, slots, and max-frequency. */
if ((OF_getencprop(node, "quirks", &cid, sizeof(cid))) > 0)
sc->quirks = cid;
@@ -558,18 +404,6 @@ sdhci_fdt_probe(device_t dev)
if (OF_hasprop(node, "disable-wp"))
sc->wp_disabled = true;
- return (0);
-}
-
-static int
-sdhci_fdt_attach(device_t dev)
-{
- struct sdhci_fdt_softc *sc = device_get_softc(dev);
- struct sdhci_slot *slot;
- int err, slots, rid, i, compat;
-
- sc->dev = dev;
-
/* Allocate IRQ. */
rid = 0;
sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
@@ -579,44 +413,6 @@ sdhci_fdt_attach(device_t dev)
return (ENOMEM);
}
- compat = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
- switch (compat) {
- case SDHCI_FDT_RK3399:
- case SDHCI_FDT_XLNX_ZMP:
- err = sdhci_init_clocks(dev);
- if (err != 0) {
- device_printf(dev, "Cannot init clocks\n");
- return (err);
- }
- sdhci_export_clocks(sc);
- if ((err = sdhci_init_phy(sc)) != 0) {
- device_printf(dev, "Cannot init phy\n");
- return (err);
- }
- if ((err = sdhci_get_syscon(sc)) != 0) {
- device_printf(dev, "Cannot get syscon handle\n");
- return (err);
- }
- if (compat == SDHCI_FDT_RK3399) {
- err = sdhci_init_rk3399(dev);
- if (err != 0) {
- device_printf(dev, "Cannot init RK3399 SDHCI\n");
- return (err);
- }
- }
- break;
- case SDHCI_FDT_RK3568:
- /* setup & enable clocks */
- if (clk_get_by_ofw_name(dev, 0, "core", &sc->clk_core)) {
- device_printf(dev, "cannot get core clock\n");
- return (ENXIO);
- }
- clk_enable(sc->clk_core);
- break;
- default:
- break;
- }
-
/* Scan all slots. */
slots = sc->num_slots; /* number of slots determined in probe(). */
sc->num_slots = 0;
@@ -640,7 +436,6 @@ sdhci_fdt_attach(device_t dev)
if (sdhci_init_slot(dev, slot, i) != 0)
continue;
-
sc->num_slots++;
}
device_printf(dev, "%d slot(s) allocated\n", sc->num_slots);
@@ -660,13 +455,13 @@ sdhci_fdt_attach(device_t dev)
return (0);
}
-static int
+int
sdhci_fdt_detach(device_t dev)
{
struct sdhci_fdt_softc *sc = device_get_softc(dev);
int i;
- bus_generic_detach(dev);
+ bus_detach_children(dev);
bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res),
sc->irq_res);
@@ -680,6 +475,14 @@ sdhci_fdt_detach(device_t dev)
return (0);
}
+int
+sdhci_fdt_set_clock(device_t dev, struct sdhci_slot *slot, int clock)
+{
+
+ return (clock);
+}
+
+
static device_method_t sdhci_fdt_methods[] = {
/* device_if */
DEVMETHOD(device_probe, sdhci_fdt_probe),
@@ -711,7 +514,7 @@ static device_method_t sdhci_fdt_methods[] = {
DEVMETHOD_END
};
-static driver_t sdhci_fdt_driver = {
+driver_t sdhci_fdt_driver = {
"sdhci_fdt",
sdhci_fdt_methods,
sizeof(struct sdhci_fdt_softc),
diff --git a/sys/dev/sdhci/sdhci_fdt.h b/sys/dev/sdhci/sdhci_fdt.h
new file mode 100644
index 000000000000..740fd375edd9
--- /dev/null
+++ b/sys/dev/sdhci/sdhci_fdt.h
@@ -0,0 +1,66 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _SDHCI_FDT_H_
+#define _SDHCI_FDT_H_
+
+#define SDHCI_FDT_MAX_SLOTS 6
+
+struct sdhci_fdt_softc {
+ device_t dev; /* Controller device */
+ u_int quirks; /* Chip specific quirks */
+ u_int caps; /* If we override SDHCI_CAPABILITIES */
+ uint32_t max_clk; /* Max possible freq */
+ uint8_t sdma_boundary; /* If we override the SDMA boundary */
+ struct resource *irq_res; /* IRQ resource */
+ void *intrhand; /* Interrupt handle */
+
+ int num_slots; /* Number of slots on this controller*/
+ struct sdhci_slot slots[SDHCI_FDT_MAX_SLOTS];
+ struct resource *mem_res[SDHCI_FDT_MAX_SLOTS]; /* Memory resource */
+
+ bool wp_inverted; /* WP pin is inverted */
+ bool wp_disabled; /* WP pin is not supported */
+ bool no_18v; /* No 1.8V support */
+
+ clk_t clk_xin; /* xin24m fixed clock */
+ clk_t clk_ahb; /* ahb clock */
+ clk_t clk_core; /* core clock */
+ phy_t phy; /* phy to be used */
+
+ struct syscon *syscon; /* Handle to the syscon */
+};
+
+int sdhci_fdt_attach(device_t dev);
+int sdhci_fdt_detach(device_t dev);
+int sdhci_get_syscon(struct sdhci_fdt_softc *sc);
+int sdhci_init_phy(struct sdhci_fdt_softc *sc);
+void sdhci_export_clocks(struct sdhci_fdt_softc *sc);
+int sdhci_clock_ofw_map(struct clkdom *clkdom, uint32_t ncells,
+ phandle_t *cells, struct clknode **clk);
+int sdhci_init_clocks(device_t dev);
+int sdhci_fdt_set_clock(device_t dev, struct sdhci_slot *slot,
+ int clock);
+#endif
diff --git a/sys/dev/sdhci/sdhci_fdt_cvitek.c b/sys/dev/sdhci/sdhci_fdt_cvitek.c
new file mode 100644
index 000000000000..e13e10df0dc2
--- /dev/null
+++ b/sys/dev/sdhci/sdhci_fdt_cvitek.c
@@ -0,0 +1,144 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2025 Bojan Novković <bnovkov@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/types.h>
+#include <sys/taskqueue.h>
+#include <sys/module.h>
+
+#include <machine/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/ofw_subr.h>
+#include <dev/clk/clk.h>
+#include <dev/clk/clk_fixed.h>
+#include <dev/ofw/openfirm.h>
+#include <dev/syscon/syscon.h>
+
+#include <dev/phy/phy.h>
+#include <dev/mmc/bridge.h>
+#include <dev/mmc/mmcbrvar.h>
+#include <dev/mmc/mmcreg.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/mmc/mmc_fdt_helpers.h>
+
+#include <dev/sdhci/sdhci.h>
+#include <dev/sdhci/sdhci_fdt_gpio.h>
+#include <dev/sdhci/sdhci_fdt.h>
+
+#include "syscon_if.h"
+#include "mmcbr_if.h"
+#include "sdhci_if.h"
+
+#include "opt_mmccam.h"
+#include "opt_soc.h"
+
+#define CV181X_SYSCTRL_SD_PWRSW_CTRL 0x1F4
+#define SD_PWRSW_CTRL_RESET_MASK 0x9
+#define CVI_CV181X_SDHCI_VENDOR_OFFSET 0x200
+#define CVI_CV181X_SDHCI_EMMC_CTRL (CVI_CV181X_SDHCI_VENDOR_OFFSET + 0x0)
+#define EMMC_CTRL_RESET_MASK 0x302
+#define CVI_CV181X_SDHCI_PHY_TX_RX_DLY (CVI_CV181X_SDHCI_VENDOR_OFFSET + 0x40)
+#define PHY_TX_RX_DLY_RESET_MASK 0x1000100
+#define CVI_CV181X_SDHCI_PHY_CONFIG (CVI_CV181X_SDHCI_VENDOR_OFFSET + 0x4C)
+#define PHY_CONFIG_RESET_MASK 0x1
+
+#define SDHCI_FDT_CVITEK_CV181X_SD 1
+
+static struct ofw_compat_data compat_data[] = {
+ { "cvitek,cv181x-sd", SDHCI_FDT_CVITEK_CV181X_SD },
+ { NULL, 0 }
+};
+
+static int
+sdhci_fdt_cvitek_probe(device_t dev)
+{
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
+ return (ENXIO);
+
+ device_set_desc(dev, "Cvitek CV181x SDHCI controller");
+ return (BUS_PROBE_SPECIFIC);
+}
+
+static int
+sdhci_fdt_cvitek_attach(device_t dev)
+{
+ int error;
+ uint32_t reg;
+ phandle_t node;
+ struct resource *res;
+ struct syscon *syscon;
+ struct sdhci_fdt_softc *sc = device_get_softc(dev);
+
+ if (sdhci_fdt_attach(dev))
+ return (ENXIO);
+
+ res = sc->mem_res[0];
+ node = ofw_bus_find_compatible(OF_finddevice("/"), "syscon");
+ error = syscon_get_by_ofw_node(dev, node, &syscon);
+ if (error != 0) {
+ device_printf(dev, "Couldn't get syscon handle\n");
+ return (error);
+ }
+
+ SYSCON_WRITE_4(syscon, CV181X_SYSCTRL_SD_PWRSW_CTRL,
+ SD_PWRSW_CTRL_RESET_MASK);
+ DELAY(1000);
+
+ reg = bus_read_4(res, CVI_CV181X_SDHCI_EMMC_CTRL);
+ reg |= EMMC_CTRL_RESET_MASK;
+ bus_write_4(res, CVI_CV181X_SDHCI_EMMC_CTRL, reg);
+ bus_write_4(res, CVI_CV181X_SDHCI_PHY_TX_RX_DLY,
+ PHY_TX_RX_DLY_RESET_MASK);
+ bus_write_4(res, CVI_CV181X_SDHCI_PHY_CONFIG,
+ PHY_CONFIG_RESET_MASK);
+
+ return (0);
+}
+
+static device_method_t sdhci_fdt_cvitek_methods[] = {
+ /* device_if */
+ DEVMETHOD(device_probe, sdhci_fdt_cvitek_probe),
+ DEVMETHOD(device_attach, sdhci_fdt_cvitek_attach),
+
+ DEVMETHOD_END
+};
+extern driver_t sdhci_fdt_driver;
+
+DEFINE_CLASS_1(sdhci_cvitek, sdhci_fdt_cvitek_driver, sdhci_fdt_cvitek_methods,
+ sizeof(struct sdhci_fdt_softc), sdhci_fdt_driver);
+DRIVER_MODULE(sdhci_cvitek, simplebus, sdhci_fdt_cvitek_driver, NULL, NULL);
+
+#ifndef MMCCAM
+MMC_DECLARE_BRIDGE(sdhci_fdt_cvitek);
+#endif
diff --git a/sys/dev/sdhci/sdhci_fdt_rockchip.c b/sys/dev/sdhci/sdhci_fdt_rockchip.c
new file mode 100644
index 000000000000..44a5e2ffe271
--- /dev/null
+++ b/sys/dev/sdhci/sdhci_fdt_rockchip.c
@@ -0,0 +1,282 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2019 Ganbold Tsagaankhuu <ganbold@freebsd.org>
+ * Copyright (c) 2022 Søren Schmidt <sos@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/resource.h>
+#include <sys/rman.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/ofw/ofw_subr.h>
+#include <dev/clk/clk.h>
+#include <dev/clk/clk_fixed.h>
+#include <dev/ofw/openfirm.h>
+#include <dev/syscon/syscon.h>
+#include <dev/phy/phy.h>
+
+#include <dev/mmc/bridge.h>
+
+#include <dev/sdhci/sdhci.h>
+#include <dev/sdhci/sdhci_fdt.h>
+
+#include "mmcbr_if.h"
+#include "sdhci_if.h"
+
+#include "opt_mmccam.h"
+
+#include "clkdev_if.h"
+#include "syscon_if.h"
+
+#define SDHCI_FDT_RK3399 1
+#define SDHCI_FDT_RK3568 2
+
+#define RK3399_GRF_EMMCCORE_CON0 0xf000
+#define RK3399_CORECFG_BASECLKFREQ 0xff00
+#define RK3399_CORECFG_TIMEOUTCLKUNIT (1 << 7)
+#define RK3399_CORECFG_TUNINGCOUNT 0x3f
+#define RK3399_GRF_EMMCCORE_CON11 0xf02c
+#define RK3399_CORECFG_CLOCKMULTIPLIER 0xff
+
+#define RK3568_EMMC_HOST_CTRL 0x0508
+#define RK3568_EMMC_EMMC_CTRL 0x052c
+#define RK3568_EMMC_ATCTRL 0x0540
+#define RK3568_EMMC_DLL_CTRL 0x0800
+#define DLL_CTRL_SRST 0x00000001
+#define DLL_CTRL_START 0x00000002
+#define DLL_CTRL_START_POINT_DEFAULT 0x00050000
+#define DLL_CTRL_INCREMENT_DEFAULT 0x00000200
+
+#define RK3568_EMMC_DLL_RXCLK 0x0804
+#define DLL_RXCLK_DELAY_ENABLE 0x08000000
+#define DLL_RXCLK_NO_INV 0x20000000
+
+#define RK3568_EMMC_DLL_TXCLK 0x0808
+#define DLL_TXCLK_DELAY_ENABLE 0x08000000
+#define DLL_TXCLK_TAPNUM_DEFAULT 0x00000008
+#define DLL_TXCLK_TAPNUM_FROM_SW 0x01000000
+
+#define RK3568_EMMC_DLL_STRBIN 0x080c
+#define DLL_STRBIN_DELAY_ENABLE 0x08000000
+#define DLL_STRBIN_TAPNUM_DEFAULT 0x00000008
+#define DLL_STRBIN_TAPNUM_FROM_SW 0x01000000
+
+#define RK3568_EMMC_DLL_STATUS0 0x0840
+#define DLL_STATUS0_DLL_LOCK 0x00000100
+#define DLL_STATUS0_DLL_TIMEOUT 0x00000200
+
+#define LOWEST_SET_BIT(mask) ((((mask) - 1) & (mask)) ^ (mask))
+#define SHIFTIN(x, mask) ((x) * LOWEST_SET_BIT(mask))
+
+static struct ofw_compat_data compat_data[] = {
+ { "rockchip,rk3399-sdhci-5.1", SDHCI_FDT_RK3399 },
+ { "rockchip,rk3568-dwcmshc", SDHCI_FDT_RK3568 },
+ { NULL, 0 }
+};
+
+static int
+sdhci_fdt_rockchip_probe(device_t dev)
+{
+ struct sdhci_fdt_softc *sc = device_get_softc(dev);
+
+ sc->quirks = 0;
+ switch (ofw_bus_search_compatible(dev, compat_data)->ocd_data) {
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+ case SDHCI_FDT_RK3399:
+ device_set_desc(dev, "Rockchip RK3399 fdt SDHCI controller");
+ break;
+ case SDHCI_FDT_RK3568:
+ device_set_desc(dev, "Rockchip RK3568 fdt SDHCI controller");
+ break;
+ default:
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+sdhci_init_rk3399(device_t dev)
+{
+ struct sdhci_fdt_softc *sc = device_get_softc(dev);
+ uint64_t freq;
+ uint32_t mask, val;
+ int error;
+
+ error = clk_get_freq(sc->clk_xin, &freq);
+ if (error != 0) {
+ device_printf(dev, "cannot get xin clock frequency\n");
+ return (ENXIO);
+ }
+
+ /* Disable clock multiplier */
+ mask = RK3399_CORECFG_CLOCKMULTIPLIER;
+ val = 0;
+ SYSCON_WRITE_4(sc->syscon, RK3399_GRF_EMMCCORE_CON11, (mask << 16) | val);
+
+ /* Set base clock frequency */
+ mask = RK3399_CORECFG_BASECLKFREQ;
+ val = SHIFTIN((freq + (1000000 / 2)) / 1000000,
+ RK3399_CORECFG_BASECLKFREQ);
+ SYSCON_WRITE_4(sc->syscon, RK3399_GRF_EMMCCORE_CON0, (mask << 16) | val);
+
+ return (0);
+}
+
+static int
+sdhci_fdt_rockchip_set_clock(device_t dev, struct sdhci_slot *slot, int clock)
+{
+ struct sdhci_fdt_softc *sc = device_get_softc(dev);
+ int32_t val;
+ int i;
+
+ if (ofw_bus_search_compatible(dev, compat_data)->ocd_data ==
+ SDHCI_FDT_RK3568) {
+ if (clock == 400000)
+ clock = 375000;
+
+ if (clock) {
+ clk_set_freq(sc->clk_core, clock, 0);
+
+ if (clock <= 52000000) {
+ bus_write_4(sc->mem_res[slot->num],
+ RK3568_EMMC_DLL_CTRL, 0x0);
+ bus_write_4(sc->mem_res[slot->num],
+ RK3568_EMMC_DLL_RXCLK, DLL_RXCLK_NO_INV);
+ bus_write_4(sc->mem_res[slot->num],
+ RK3568_EMMC_DLL_TXCLK, 0x0);
+ bus_write_4(sc->mem_res[slot->num],
+ RK3568_EMMC_DLL_STRBIN, 0x0);
+ return (clock);
+ }
+
+ bus_write_4(sc->mem_res[slot->num],
+ RK3568_EMMC_DLL_CTRL, DLL_CTRL_START);
+ DELAY(1000);
+ bus_write_4(sc->mem_res[slot->num],
+ RK3568_EMMC_DLL_CTRL, 0);
+ bus_write_4(sc->mem_res[slot->num],
+ RK3568_EMMC_DLL_CTRL, DLL_CTRL_START_POINT_DEFAULT |
+ DLL_CTRL_INCREMENT_DEFAULT | DLL_CTRL_START);
+ for (i = 0; i < 500; i++) {
+ val = bus_read_4(sc->mem_res[slot->num],
+ RK3568_EMMC_DLL_STATUS0);
+ if (val & DLL_STATUS0_DLL_LOCK &&
+ !(val & DLL_STATUS0_DLL_TIMEOUT))
+ break;
+ DELAY(1000);
+ }
+ bus_write_4(sc->mem_res[slot->num], RK3568_EMMC_ATCTRL,
+ (0x1 << 16 | 0x2 << 17 | 0x3 << 19));
+ bus_write_4(sc->mem_res[slot->num],
+ RK3568_EMMC_DLL_RXCLK,
+ DLL_RXCLK_DELAY_ENABLE | DLL_RXCLK_NO_INV);
+ bus_write_4(sc->mem_res[slot->num],
+ RK3568_EMMC_DLL_TXCLK, DLL_TXCLK_DELAY_ENABLE |
+ DLL_TXCLK_TAPNUM_DEFAULT|DLL_TXCLK_TAPNUM_FROM_SW);
+ bus_write_4(sc->mem_res[slot->num],
+ RK3568_EMMC_DLL_STRBIN, DLL_STRBIN_DELAY_ENABLE |
+ DLL_STRBIN_TAPNUM_DEFAULT |
+ DLL_STRBIN_TAPNUM_FROM_SW);
+ }
+ }
+ return (sdhci_fdt_set_clock(dev, slot, clock));
+}
+
+static int
+sdhci_fdt_rockchip_attach(device_t dev)
+{
+ struct sdhci_fdt_softc *sc = device_get_softc(dev);
+ int err, compat;
+
+ sc->dev = dev;
+ compat = ofw_bus_search_compatible(dev, compat_data)->ocd_data;
+ switch (compat) {
+ case SDHCI_FDT_RK3399:
+ err = sdhci_init_clocks(dev);
+ if (err != 0) {
+ device_printf(dev, "Cannot init clocks\n");
+ return (err);
+ }
+ sdhci_export_clocks(sc);
+ if ((err = sdhci_init_phy(sc)) != 0) {
+ device_printf(dev, "Cannot init phy\n");
+ return (err);
+ }
+ if ((err = sdhci_get_syscon(sc)) != 0) {
+ device_printf(dev, "Cannot get syscon handle\n");
+ return (err);
+ }
+ err = sdhci_init_rk3399(dev);
+ if (err != 0) {
+ device_printf(dev, "Cannot init RK3399 SDHCI\n");
+ return (err);
+ }
+ break;
+ case SDHCI_FDT_RK3568:
+ /* setup & enable clocks */
+ if (clk_get_by_ofw_name(dev, 0, "core", &sc->clk_core)) {
+ device_printf(dev, "cannot get core clock\n");
+ return (ENXIO);
+ }
+ clk_enable(sc->clk_core);
+ break;
+ default:
+ break;
+ }
+
+ return (sdhci_fdt_attach(dev));
+}
+
+static device_method_t sdhci_fdt_rockchip_methods[] = {
+ /* device_if */
+ DEVMETHOD(device_probe, sdhci_fdt_rockchip_probe),
+ DEVMETHOD(device_attach, sdhci_fdt_rockchip_attach),
+
+ /* SDHCI methods */
+ DEVMETHOD(sdhci_set_clock, sdhci_fdt_rockchip_set_clock),
+
+ DEVMETHOD_END
+};
+extern driver_t sdhci_fdt_driver;
+
+DEFINE_CLASS_1(sdhci_rockchip, sdhci_fdt_rockchip_driver, sdhci_fdt_rockchip_methods,
+ sizeof(struct sdhci_fdt_softc), sdhci_fdt_driver);
+DRIVER_MODULE(sdhci_rockchip, simplebus, sdhci_fdt_rockchip_driver, NULL, NULL);
diff --git a/sys/dev/sdhci/sdhci_fdt_xilinx.c b/sys/dev/sdhci/sdhci_fdt_xilinx.c
new file mode 100644
index 000000000000..1800e756dd33
--- /dev/null
+++ b/sys/dev/sdhci/sdhci_fdt_xilinx.c
@@ -0,0 +1,115 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2023 Emmanuel Vadot <manu@freebsd.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/resource.h>
+#include <sys/rman.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <dev/ofw/ofw_subr.h>
+#include <dev/clk/clk.h>
+#include <dev/clk/clk_fixed.h>
+#include <dev/ofw/openfirm.h>
+#include <dev/syscon/syscon.h>
+#include <dev/phy/phy.h>
+
+#include <dev/mmc/bridge.h>
+
+#include <dev/sdhci/sdhci.h>
+#include <dev/sdhci/sdhci_fdt.h>
+
+#include "mmcbr_if.h"
+#include "sdhci_if.h"
+
+#include "opt_mmccam.h"
+
+#include "clkdev_if.h"
+#include "syscon_if.h"
+
+static int
+sdhci_fdt_xilinx_probe(device_t dev)
+{
+ struct sdhci_fdt_softc *sc = device_get_softc(dev);
+
+ if (!ofw_bus_is_compatible(dev, "xlnx,zynqmp-8.9a"))
+ return (ENXIO);
+
+ sc->quirks = 0;
+ device_set_desc(dev, "ZynqMP generic fdt SDHCI controller");
+
+ return (0);
+}
+
+static int
+sdhci_fdt_xilinx_attach(device_t dev)
+{
+ struct sdhci_fdt_softc *sc = device_get_softc(dev);
+ int err;
+
+ err = sdhci_init_clocks(dev);
+ if (err != 0) {
+ device_printf(dev, "Cannot init clocks\n");
+ return (err);
+ }
+ sdhci_export_clocks(sc);
+ if ((err = sdhci_init_phy(sc)) != 0) {
+ device_printf(dev, "Cannot init phy\n");
+ return (err);
+ }
+ if ((err = sdhci_get_syscon(sc)) != 0) {
+ device_printf(dev, "Cannot get syscon handle\n");
+ return (err);
+ }
+
+ return (sdhci_fdt_attach(dev));
+}
+
+static device_method_t sdhci_fdt_xilinx_methods[] = {
+ /* device_if */
+ DEVMETHOD(device_probe, sdhci_fdt_xilinx_probe),
+ DEVMETHOD(device_attach, sdhci_fdt_xilinx_attach),
+
+ DEVMETHOD_END
+};
+extern driver_t sdhci_fdt_driver;
+
+DEFINE_CLASS_1(sdhci_xilinx, sdhci_fdt_xilinx_driver, sdhci_fdt_xilinx_methods,
+ sizeof(struct sdhci_fdt_softc), sdhci_fdt_driver);
+DRIVER_MODULE(sdhci_xilinx, simplebus, sdhci_fdt_xilinx_driver, NULL, NULL);
diff --git a/sys/dev/sdhci/sdhci_fsl_fdt.c b/sys/dev/sdhci/sdhci_fsl_fdt.c
index 3220f00f277d..185b53a6d2c9 100644
--- a/sys/dev/sdhci/sdhci_fsl_fdt.c
+++ b/sys/dev/sdhci/sdhci_fsl_fdt.c
@@ -995,7 +995,8 @@ sdhci_fsl_fdt_attach(device_t dev)
sc->slot_init_done = true;
sdhci_start_slot(&sc->slot);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
err_free_gpio:
sdhci_fdt_gpio_teardown(sc->gpio);
diff --git a/sys/dev/sdhci/sdhci_xenon.c b/sys/dev/sdhci/sdhci_xenon.c
index 4a823b896ca8..098412a81148 100644
--- a/sys/dev/sdhci/sdhci_xenon.c
+++ b/sys/dev/sdhci/sdhci_xenon.c
@@ -592,7 +592,7 @@ sdhci_xenon_detach(device_t dev)
{
struct sdhci_xenon_softc *sc = device_get_softc(dev);
- bus_generic_detach(dev);
+ bus_detach_children(dev);
bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res),
sc->irq_res);
diff --git a/sys/dev/sdhci/sdhci_xenon_acpi.c b/sys/dev/sdhci/sdhci_xenon_acpi.c
index 01b6c14dc5f2..3e8b2c4a349c 100644
--- a/sys/dev/sdhci/sdhci_xenon_acpi.c
+++ b/sys/dev/sdhci/sdhci_xenon_acpi.c
@@ -86,8 +86,6 @@ sdhci_xenon_acpi_attach(device_t dev)
memset(&mmc_helper, 0, sizeof(mmc_helper));
slot = malloc(sizeof(*slot), M_DEVBUF, M_ZERO | M_WAITOK);
- if (!slot)
- return (ENOMEM);
/*
* Don't use regularators.
diff --git a/sys/dev/sdio/sdiob.c b/sys/dev/sdio/sdiob.c
index 60389c419e35..cb2cc0da6b77 100644
--- a/sys/dev/sdio/sdiob.c
+++ b/sys/dev/sdio/sdiob.c
@@ -150,7 +150,7 @@ sdiob_rw_direct_sc(struct sdiob_softc *sc, uint8_t fn, uint32_t addr, bool wr,
sc->ccb = xpt_alloc_ccb();
else
memset(sc->ccb, 0, sizeof(*sc->ccb));
- xpt_setup_ccb(&sc->ccb->ccb_h, sc->periph->path, CAM_PRIORITY_NONE);
+ xpt_setup_ccb(&sc->ccb->ccb_h, sc->periph->path, CAM_PRIORITY_NORMAL);
CAM_DEBUG(sc->ccb->ccb_h.path, CAM_DEBUG_TRACE,
("%s(fn=%d, addr=%#02x, wr=%d, *val=%#02x)\n", __func__,
fn, addr, wr, *val));
@@ -250,7 +250,7 @@ sdiob_rw_extended_cam(struct sdiob_softc *sc, uint8_t fn, uint32_t addr,
sc->ccb = xpt_alloc_ccb();
else
memset(sc->ccb, 0, sizeof(*sc->ccb));
- xpt_setup_ccb(&sc->ccb->ccb_h, sc->periph->path, CAM_PRIORITY_NONE);
+ xpt_setup_ccb(&sc->ccb->ccb_h, sc->periph->path, CAM_PRIORITY_NORMAL);
CAM_DEBUG(sc->ccb->ccb_h.path, CAM_DEBUG_TRACE,
("%s(fn=%d addr=%#0x wr=%d b_count=%u blksz=%u buf=%p incr=%d)\n",
__func__, fn, addr, wr, b_count, blksz, buffer, incaddr));
@@ -558,7 +558,7 @@ sdiob_attach(device_t dev)
* Do this before any child gets a chance to attach.
*/
for (i = 0; i < sc->cardinfo.num_funcs; i++) {
- sc->child[i] = device_add_child(dev, NULL, -1);
+ sc->child[i] = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (sc->child[i] == NULL) {
device_printf(dev, "%s: failed to add child\n", __func__);
return (ENXIO);
@@ -923,7 +923,7 @@ sdio_newbus_sim_add(struct sdiob_softc *sc)
__func__, sc, sc->periph, sc->periph->sim));
if (sc->dev == NULL)
- sc->dev = BUS_ADD_CHILD(pdev, 0, SDIOB_NAME_S, -1);
+ sc->dev = BUS_ADD_CHILD(pdev, 0, SDIOB_NAME_S, DEVICE_UNIT_ANY);
if (sc->dev == NULL)
return (ENXIO);
device_set_softc(sc->dev, sc);
@@ -977,9 +977,6 @@ sdiobdiscover(void *context, int pending)
if (sc->ccb == NULL)
sc->ccb = xpt_alloc_ccb();
- else
- memset(sc->ccb, 0, sizeof(*sc->ccb));
- xpt_setup_ccb(&sc->ccb->ccb_h, periph->path, CAM_PRIORITY_NONE);
/*
* Read CCCR and FBR of each function, get manufacturer and device IDs,
diff --git a/sys/dev/sdio/sdiodevs b/sys/dev/sdio/sdiodevs
index 194ef8e5d901..8c341e77d9f9 100644
--- a/sys/dev/sdio/sdiodevs
+++ b/sys/dev/sdio/sdiodevs
@@ -42,8 +42,11 @@
* List of TPLMID_MANF "vendor ID"s.
* Please sort by vendor ID ascending.
*/
+vendor REALTEK 0x024c Realtek
+vendor ATHEROS 0x0271 Atheros
vendor BROADCOM 0x02d0 Broadcom
vendor CYPRESS 0x02d0 Cypress/Broadcom
+vendor MEDIATEK 0x037a MediaTek
/*
* --------------------------------------------------------------------------
@@ -51,6 +54,21 @@ vendor CYPRESS 0x02d0 Cypress/Broadcom
* Please group by vendor in same order as above.
*/
+/* Realtek products */
+/* PR 251063 */
+product REALTEK RTW8723BS 0xb723 802.11bgn SDIO WLAN with Bluetooth 4.0 Single-Chip Controller
+/* rtw88 */
+product REALTEK RTW8821BS 0xb821
+product REALTEK RTW8822BS 0xb822 802.11ac/abgn SDIO WLAN with Bluetooth 4.1 Single-Chip Controller
+product REALTEK RTW8821CS 0xc821 802.11ac/abgn SDIO WLAN with Bluetooth 4.2 Single-Chip Controller
+product REALTEK RTW8822CS 0xc822 802.11ac/abgn SDIO WLAN with Bluetooth x.x Single-Chip Controller
+product REALTEK RTW8723DS_1ANT 0xd724 802.11bgn SDIO WLAN with Bluetooth 4.2 Single-Chip Controller
+product REALTEK RTW8723DS_2ANT 0xd723 802.11bgn SDIO WLAN with Bluetooth 4.2 Single-Chip Controller
+
+/* Atheros/QCA products */
+product ATHEROS AR6005 0x050a Qualcomm Atheros 802.11ac WLAN SDIO
+product ATHEROS QCA9377 0x0701 Qualcomm Atheros 802.11ac WLAN SDIO
+
/* Broadcom products */
product BROADCOM 43241 0x4324 BCM43241 fullmac SDIO WiFi
product BROADCOM 4329 0x4329 BCM4329 fullmac SDIO WiFi
@@ -61,13 +79,25 @@ product BROADCOM 4339 0x4339 BCM4339 fullmac SDIO WiFi
product BROADCOM 4345 0x4345 BCM4345 fullmac SDIO WiFi
product BROADCOM 4354 0x4354 BCM4354 fullmac SDIO WiFi
product BROADCOM 4356 0x4356 BCM4356 fullmac SDIO WiFi
+product BROADCOM 4359 0x4359 BCM4359 fullmac SDIO WiFi
product BROADCOM 43143 0xa887 BCM43143 fullmac SDIO WiFi
product BROADCOM 43340 0xa94c BCM43340 fullmac SDIO WiFi
product BROADCOM 43341 0xa94d BCM43341 fullmac SDIO WiFi
product BROADCOM 43362 0xa962 BCM43362 fullmac SDIO WiFi
product BROADCOM 43364 0xa9a4 BCM43364 fullmac SDIO WiFi
product BROADCOM 43430 0xa9a6 BCM43430 fullmac SDIO WiFi
+product BROADCOM 43439 0xa9af BCM43439 fullmac SDIO WiFi
product BROADCOM 43455 0xa9bf BCM43455 fullmac SDIO WiFi
-product CYPRESS 4373 0x4373 CY4373 fullmac SDIO WiFi
+product BROADCOM CYPRESS_4373 0x4373 BCMCY4373 fullmac SDIO WiFi
+product BROADCOM CYPRESS_43012 0xa804 BCMCY43012 fullmac SDIO WiFi
+product BROADCOM CYPRESS_43752 0xaae8 BCMCY43752 fullmac SDIO WiFi
+product BROADCOM CYPRESS_89359 0x4355 BCMCY89359 fullmac SDIO WiFi
+
+product CYPRESS 43439 0xbd3d CY43439 fullmac SDIO WiFi
+palias BROADCOM_CYPRESS_43439 CYPRESS_43439
+
+/* MediaTek products */
+product MEDIATEK MT7663S 0x7603 MediaTek MT7663S SDIO WiFi
+product MEDIATEK MT7921S 0x7901 MediaTek MT7921S SDIO WiFi
/* end */
diff --git a/sys/dev/sff/sfp_fdt.c b/sys/dev/sff/sfp_fdt.c
index 7430282ede70..e566d8ced78c 100644
--- a/sys/dev/sff/sfp_fdt.c
+++ b/sys/dev/sff/sfp_fdt.c
@@ -138,7 +138,6 @@ static device_method_t sfp_fdt_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, sfp_fdt_probe),
DEVMETHOD(device_attach, sfp_fdt_attach),
- DEVMETHOD(device_detach, bus_generic_detach),
/* SFF */
DEVMETHOD(sff_get_i2c_bus, sfp_fdt_get_i2c_bus),
diff --git a/sys/dev/sfxge/sfxge.c b/sys/dev/sfxge/sfxge.c
index 23294f90f517..7d3217fb50de 100644
--- a/sys/dev/sfxge/sfxge.c
+++ b/sys/dev/sfxge/sfxge.c
@@ -611,8 +611,6 @@ sfxge_ifnet_init(if_t ifp, struct sfxge_softc *sc)
if_sethwassistbits(ifp, CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO |
CSUM_TCP_IPV6 | CSUM_UDP_IPV6, 0);
- ether_ifattach(ifp, encp->enc_mac_addr);
-
if_settransmitfn(ifp, sfxge_if_transmit);
if_setqflushfn(ifp, sfxge_if_qflush);
@@ -620,13 +618,11 @@ sfxge_ifnet_init(if_t ifp, struct sfxge_softc *sc)
DBGPRINT(sc->dev, "ifmedia_init");
if ((rc = sfxge_port_ifmedia_init(sc)) != 0)
- goto fail;
+ return (rc);
- return (0);
+ ether_ifattach(ifp, encp->enc_mac_addr);
-fail:
- ether_ifdetach(sc->ifnet);
- return (rc);
+ return (0);
}
void
@@ -1081,11 +1077,6 @@ sfxge_attach(device_t dev)
/* Allocate ifnet. */
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "Couldn't allocate ifnet\n");
- error = ENOMEM;
- goto fail;
- }
sc->ifnet = ifp;
/* Initialize hardware. */
@@ -1122,8 +1113,6 @@ fail3:
fail2:
if_free(sc->ifnet);
-
-fail:
DBGPRINT(sc->dev, "failed %d", error);
return (error);
}
diff --git a/sys/dev/sfxge/sfxge_rx.c b/sys/dev/sfxge/sfxge_rx.c
index 28f9a42b0d22..7e0948425d77 100644
--- a/sys/dev/sfxge/sfxge_rx.c
+++ b/sys/dev/sfxge/sfxge_rx.c
@@ -483,7 +483,7 @@ sfxge_lro_merge(struct sfxge_lro_state *st, struct sfxge_lro_conn *c,
iph->ip6_plen += mbuf->m_len;
c_th = (struct tcphdr *)(iph + 1);
}
- c_th->th_flags |= (th->th_flags & TH_PUSH);
+ tcp_set_flags(c_th, tcp_get_flags(c_th) | (tcp_get_flags(th) & TH_PUSH));
c->th_last = th;
++st->n_merges;
@@ -545,7 +545,7 @@ sfxge_lro_try_merge(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c)
hdr_length);
th_seq = ntohl(th->th_seq);
dont_merge = ((data_length <= 0)
- | (th->th_flags & (TH_URG | TH_SYN | TH_RST | TH_FIN)));
+ | (tcp_get_flags(th) & (TH_URG | TH_SYN | TH_RST | TH_FIN)));
/* Check for options other than aligned timestamp. */
if (th->th_off != 5) {
@@ -592,7 +592,7 @@ sfxge_lro_try_merge(struct sfxge_rxq *rxq, struct sfxge_lro_conn *c)
if (__predict_false(dont_merge)) {
if (c->mbuf != NULL)
sfxge_lro_deliver(&rxq->lro, c);
- if (th->th_flags & (TH_FIN | TH_RST)) {
+ if (tcp_get_flags(th) & (TH_FIN | TH_RST)) {
++rxq->lro.n_drop_closed;
sfxge_lro_drop(rxq, c);
return (0);
diff --git a/sys/dev/sfxge/sfxge_tx.c b/sys/dev/sfxge/sfxge_tx.c
index 511222f656e5..dcc3dd4cd100 100644
--- a/sys/dev/sfxge/sfxge_tx.c
+++ b/sys/dev/sfxge/sfxge_tx.c
@@ -859,10 +859,10 @@ static void sfxge_parse_tx_packet(struct mbuf *mbuf)
* generates TSO packets with RST flag. So, do not assert
* its absence.
*/
- KASSERT(!(th->th_flags & (TH_URG | TH_SYN)),
+ KASSERT(!(tcp_get_flags(th) & (TH_URG | TH_SYN)),
("incompatible TCP flag 0x%x on TSO packet",
- th->th_flags & (TH_URG | TH_SYN)));
- TSO_MBUF_FLAGS(mbuf) = th->th_flags;
+ tcp_get_flags(th) & (TH_URG | TH_SYN)));
+ TSO_MBUF_FLAGS(mbuf) = tcp_get_flags(th);
}
#endif
@@ -1117,10 +1117,10 @@ static void tso_start(struct sfxge_txq *txq, struct sfxge_tso_state *tso,
* generates TSO packets with RST flag. So, do not assert
* its absence.
*/
- KASSERT(!(th->th_flags & (TH_URG | TH_SYN)),
+ KASSERT(!(tcp_get_flags(th) & (TH_URG | TH_SYN)),
("incompatible TCP flag 0x%x on TSO packet",
- th->th_flags & (TH_URG | TH_SYN)));
- tso->tcp_flags = th->th_flags;
+ tcp_get_flags(th) & (TH_URG | TH_SYN)));
+ tso->tcp_flags = tcp_get_flags(th);
#else
tso->seqnum = TSO_MBUF_SEQNUM(mbuf);
tso->tcp_flags = TSO_MBUF_FLAGS(mbuf);
@@ -1319,7 +1319,7 @@ static int tso_start_new_packet(struct sfxge_txq *txq,
if (tso->out_len > tso->seg_size) {
/* This packet will not finish the TSO burst. */
ip_length = tso->header_len - tso->nh_off + tso->seg_size;
- tsoh_th->th_flags &= ~(TH_FIN | TH_PUSH);
+ tcp_set_flags(tsoh_th, tcp_get_flags(tsoh_th) & ~(TH_FIN | TH_PUSH));
} else {
/* This packet will be the last in the TSO burst. */
ip_length = tso->header_len - tso->nh_off + tso->out_len;
diff --git a/sys/dev/sge/if_sge.c b/sys/dev/sge/if_sge.c
index 8146e7cf2e18..80ec0c435ead 100644
--- a/sys/dev/sge/if_sge.c
+++ b/sys/dev/sge/if_sge.c
@@ -603,11 +603,6 @@ sge_attach(device_t dev)
goto fail;
ifp = sc->sge_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "cannot allocate ifnet structure.\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -680,8 +675,6 @@ sge_detach(device_t dev)
SGE_UNLOCK(sc);
callout_drain(&sc->sge_stat_ch);
}
- if (sc->sge_miibus)
- device_delete_child(dev, sc->sge_miibus);
bus_generic_detach(dev);
if (sc->sge_intrhand)
diff --git a/sys/dev/siis/siis.c b/sys/dev/siis/siis.c
index 54ef7ff440aa..eb6848eddee2 100644
--- a/sys/dev/siis/siis.c
+++ b/sys/dev/siis/siis.c
@@ -38,9 +38,9 @@
#include <sys/mutex.h>
#include <sys/sbuf.h>
#include <sys/sema.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
#include <vm/uma.h>
-#include <machine/stdarg.h>
#include <machine/resource.h>
#include <machine/bus.h>
#include <sys/rman.h>
@@ -120,15 +120,13 @@ static struct {
static int
siis_probe(device_t dev)
{
- char buf[64];
int i;
uint32_t devid = pci_get_devid(dev);
for (i = 0; siis_ids[i].id != 0; i++) {
if (siis_ids[i].id == devid) {
- snprintf(buf, sizeof(buf), "%s SATA controller",
+ device_set_descf(dev, "%s SATA controller",
siis_ids[i].name);
- device_set_desc_copy(dev, buf);
return (BUS_PROBE_DEFAULT);
}
}
@@ -191,13 +189,13 @@ siis_attach(device_t dev)
}
/* Attach all channels on this controller */
for (unit = 0; unit < ctlr->channels; unit++) {
- child = device_add_child(dev, "siisch", -1);
+ child = device_add_child(dev, "siisch", DEVICE_UNIT_ANY);
if (child == NULL)
device_printf(dev, "failed to add channel device\n");
else
device_set_ivars(child, (void *)(intptr_t)unit);
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return 0;
}
@@ -205,9 +203,12 @@ static int
siis_detach(device_t dev)
{
struct siis_controller *ctlr = device_get_softc(dev);
+ int error;
/* Detach & delete all children */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
/* Free interrupts. */
if (ctlr->irq.r_irq) {
@@ -449,7 +450,7 @@ static int
siis_ch_probe(device_t dev)
{
- device_set_desc_copy(dev, "SIIS channel");
+ device_set_desc(dev, "SIIS channel");
return (BUS_PROBE_DEFAULT);
}
@@ -1396,7 +1397,7 @@ completeall:
}
xpt_setup_ccb(&ccb->ccb_h, ch->hold[i]->ccb_h.path,
ch->hold[i]->ccb_h.pinfo.priority);
- if (ccb->ccb_h.func_code == XPT_ATA_IO) {
+ if (ch->hold[i]->ccb_h.func_code == XPT_ATA_IO) {
/* READ LOG */
ccb->ccb_h.recovery_type = RECOVERY_READ_LOG;
ccb->ccb_h.func_code = XPT_ATA_IO;
diff --git a/sys/dev/sis/if_sis.c b/sys/dev/sis/if_sis.c
index bf96383e9a4a..d00cf0a8128c 100644
--- a/sys/dev/sis/if_sis.c
+++ b/sys/dev/sis/if_sis.c
@@ -898,7 +898,7 @@ sis_attach(device_t dev)
u_char eaddr[ETHER_ADDR_LEN];
struct sis_softc *sc;
if_t ifp;
- int error = 0, pmc;
+ int error = 0;
sc = device_get_softc(dev);
@@ -1057,11 +1057,6 @@ sis_attach(device_t dev)
goto fail;
ifp = sc->sis_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -1071,7 +1066,7 @@ sis_attach(device_t dev)
if_setsendqlen(ifp, SIS_TX_LIST_CNT - 1);
if_setsendqready(ifp);
- if (pci_find_cap(sc->sis_dev, PCIY_PMG, &pmc) == 0) {
+ if (pci_has_pm(sc->sis_dev)) {
if (sc->sis_type == SIS_TYPE_83815)
if_setcapabilitiesbit(ifp, IFCAP_WOL, 0);
else
@@ -1151,8 +1146,6 @@ sis_detach(device_t dev)
callout_drain(&sc->sis_stat_ch);
ether_ifdetach(ifp);
}
- if (sc->sis_miibus)
- device_delete_child(dev, sc->sis_miibus);
bus_generic_detach(dev);
if (sc->sis_intrhand)
@@ -2318,8 +2311,6 @@ sis_wol(struct sis_softc *sc)
{
if_t ifp;
uint32_t val;
- uint16_t pmstat;
- int pmc;
ifp = sc->sis_ifp;
if ((if_getcapenable(ifp) & IFCAP_WOL) == 0)
@@ -2346,20 +2337,13 @@ sis_wol(struct sis_softc *sc)
/* Enable silent RX mode. */
SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE);
} else {
- if (pci_find_cap(sc->sis_dev, PCIY_PMG, &pmc) != 0)
- return;
val = 0;
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
val |= SIS_PWRMAN_WOL_MAGIC;
CSR_WRITE_4(sc, SIS_PWRMAN_CTL, val);
/* Request PME. */
- pmstat = pci_read_config(sc->sis_dev,
- pmc + PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->sis_dev,
- pmc + PCIR_POWER_STATUS, pmstat, 2);
+ pci_enable_pme(sc->sis_dev);
}
}
diff --git a/sys/dev/sk/if_sk.c b/sys/dev/sk/if_sk.c
index 7505ef58cfe4..094211cc3c34 100644
--- a/sys/dev/sk/if_sk.c
+++ b/sys/dev/sk/if_sk.c
@@ -185,6 +185,7 @@ static const struct sk_type sk_devs[] = {
static int skc_probe(device_t);
static int skc_attach(device_t);
+static void skc_child_deleted(device_t, device_t);
static int skc_detach(device_t);
static int skc_shutdown(device_t);
static int skc_suspend(device_t);
@@ -291,6 +292,7 @@ static device_method_t skc_methods[] = {
DEVMETHOD(device_resume, skc_resume),
DEVMETHOD(device_shutdown, skc_shutdown),
+ DEVMETHOD(bus_child_deleted, skc_child_deleted),
DEVMETHOD(bus_get_dma_tag, skc_get_dma_tag),
DEVMETHOD_END
@@ -1279,11 +1281,6 @@ sk_attach(device_t dev)
sk_dma_jumbo_alloc(sc_if);
ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(sc_if->sk_if_dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc_if);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -1684,7 +1681,7 @@ skc_attach(device_t dev)
device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize);
}
- sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1);
+ sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", DEVICE_UNIT_ANY);
if (sc->sk_devs[SK_PORT_A] == NULL) {
device_printf(dev, "failed to add child for PORT_A\n");
error = ENXIO;
@@ -1701,7 +1698,7 @@ skc_attach(device_t dev)
device_set_ivars(sc->sk_devs[SK_PORT_A], port);
if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) {
- sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1);
+ sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", DEVICE_UNIT_ANY);
if (sc->sk_devs[SK_PORT_B] == NULL) {
device_printf(dev, "failed to add child for PORT_B\n");
error = ENXIO;
@@ -1721,11 +1718,7 @@ skc_attach(device_t dev)
/* Turn on the 'driver is loaded' LED. */
CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON);
- error = bus_generic_attach(dev);
- if (error) {
- device_printf(dev, "failed to attach port(s)\n");
- goto fail;
- }
+ bus_attach_children(dev);
/* Hook interrupt last to avoid having to lock softc */
error = bus_setup_intr(dev, sc->sk_res[1], INTR_TYPE_NET|INTR_MPSAFE,
@@ -1743,6 +1736,12 @@ fail:
return(error);
}
+static void
+skc_child_deleted(device_t dev, device_t child)
+{
+ free(device_get_ivars(child), M_DEVBUF);
+}
+
/*
* Shutdown hardware and free up resources. This can be called any
* time after the mutex has been initialized. It is called in both
@@ -1772,15 +1771,6 @@ sk_detach(device_t dev)
ether_ifdetach(ifp);
SK_IF_LOCK(sc_if);
}
- /*
- * We're generally called from skc_detach() which is using
- * device_delete_child() to get to here. It's already trashed
- * miibus for us, so don't do it here or we'll panic.
- */
- /*
- if (sc_if->sk_miibus != NULL)
- device_delete_child(dev, sc_if->sk_miibus);
- */
bus_generic_detach(dev);
sk_dma_jumbo_free(sc_if);
sk_dma_free(sc_if);
@@ -1799,17 +1789,7 @@ skc_detach(device_t dev)
sc = device_get_softc(dev);
KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized"));
- if (device_is_alive(dev)) {
- if (sc->sk_devs[SK_PORT_A] != NULL) {
- free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF);
- device_delete_child(dev, sc->sk_devs[SK_PORT_A]);
- }
- if (sc->sk_devs[SK_PORT_B] != NULL) {
- free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF);
- device_delete_child(dev, sc->sk_devs[SK_PORT_B]);
- }
- bus_generic_detach(dev);
- }
+ bus_generic_detach(dev);
if (sc->sk_intrhand)
bus_teardown_intr(dev, sc->sk_res[1], sc->sk_intrhand);
diff --git a/sys/dev/smartpqi/smartpqi_event.c b/sys/dev/smartpqi/smartpqi_event.c
index f000d9ce9db3..88dcf45dd08a 100644
--- a/sys/dev/smartpqi/smartpqi_event.c
+++ b/sys/dev/smartpqi/smartpqi_event.c
@@ -115,7 +115,7 @@ pqisrc_ack_all_events(void *arg1)
pending_event = &softs->pending_events[0];
- for (i=0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
+ for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
if (pending_event->pending == true) {
pending_event->pending = false;
pqisrc_acknowledge_event(softs, pending_event);
@@ -417,7 +417,7 @@ pqisrc_report_event_config(pqisrc_softstate_t *softs)
softs->event_config.num_event_descriptors = MIN(event_config_p->num_event_descriptors,
PQI_MAX_EVENT_DESCRIPTORS) ;
- for (i=0; i < softs->event_config.num_event_descriptors ;i++){
+ for (i = 0; i < softs->event_config.num_event_descriptors; i++) {
softs->event_config.descriptors[i].event_type =
event_config_p->descriptors[i].event_type;
}
@@ -477,7 +477,7 @@ pqisrc_set_event_config(pqisrc_softstate_t *softs)
event_config_p->num_event_descriptors = softs->event_config.num_event_descriptors;
- for (i=0; i < softs->event_config.num_event_descriptors ; i++){
+ for (i = 0; i < softs->event_config.num_event_descriptors; i++) {
event_config_p->descriptors[i].event_type =
softs->event_config.descriptors[i].event_type;
if( pqisrc_event_type_to_event_index(event_config_p->descriptors[i].event_type) != -1)
diff --git a/sys/dev/smartpqi/smartpqi_queue.c b/sys/dev/smartpqi/smartpqi_queue.c
index 2e80b01b5436..f05c951cd4f9 100644
--- a/sys/dev/smartpqi/smartpqi_queue.c
+++ b/sys/dev/smartpqi/smartpqi_queue.c
@@ -700,7 +700,7 @@ pqisrc_create_op_obq(pqisrc_softstate_t *softs,
} else {
int i = 0;
DBG_WARN("Error Status Descriptors\n");
- for(i = 0; i < 4;i++)
+ for (i = 0; i < 4; i++)
DBG_WARN(" %x ",admin_resp.resp_type.create_op_oq.status_desc[i]);
}
@@ -743,7 +743,7 @@ pqisrc_create_op_ibq(pqisrc_softstate_t *softs,
} else {
int i = 0;
DBG_WARN("Error Status Decsriptors\n");
- for(i = 0; i < 4;i++)
+ for (i = 0; i < 4; i++)
DBG_WARN(" %x ",admin_resp.resp_type.create_op_iq.status_desc[i]);
}
diff --git a/sys/dev/smartpqi/smartpqi_request.c b/sys/dev/smartpqi/smartpqi_request.c
index 1c0bbc2a1db5..d3c4fb989a99 100644
--- a/sys/dev/smartpqi/smartpqi_request.c
+++ b/sys/dev/smartpqi/smartpqi_request.c
@@ -1284,7 +1284,7 @@ pqisrc_calc_disk_params(pqisrc_softstate_t *softs, aio_req_locator_t *l, rcb_t
return true;
}
-/* Not AIO-eligible if it isnt' a single row/column. */
+/* Not AIO-eligible if it isn't a single row/column. */
static inline boolean_t
pqisrc_is_single_row_column(pqisrc_softstate_t *softs, aio_req_locator_t *l)
{
diff --git a/sys/dev/smbios/smbios.c b/sys/dev/smbios/smbios.c
index 883e8f501b59..8f2b4a56b37b 100644
--- a/sys/dev/smbios/smbios.c
+++ b/sys/dev/smbios/smbios.c
@@ -76,8 +76,8 @@ static void
smbios_identify (driver_t *driver, device_t parent)
{
#ifdef ARCH_MAY_USE_EFI
- struct uuid efi_smbios = EFI_TABLE_SMBIOS;
- struct uuid efi_smbios3 = EFI_TABLE_SMBIOS3;
+ efi_guid_t efi_smbios = EFI_TABLE_SMBIOS;
+ efi_guid_t efi_smbios3 = EFI_TABLE_SMBIOS3;
void *addr_efi;
#endif
struct smbios_eps *eps;
@@ -85,8 +85,8 @@ smbios_identify (driver_t *driver, device_t parent)
void *ptr;
device_t child;
vm_paddr_t addr = 0;
- size_t map_size = sizeof (*eps);
- int length;
+ size_t map_size = sizeof(*eps);
+ uint8_t length;
if (!device_is_alive(parent))
return;
@@ -94,7 +94,7 @@ smbios_identify (driver_t *driver, device_t parent)
#ifdef ARCH_MAY_USE_EFI
if (!efi_get_table(&efi_smbios3, &addr_efi)) {
addr = (vm_paddr_t)addr_efi;
- map_size = sizeof (*eps3);
+ map_size = sizeof(*eps3);
} else if (!efi_get_table(&efi_smbios, &addr_efi)) {
addr = (vm_paddr_t)addr_efi;
}
@@ -102,59 +102,88 @@ smbios_identify (driver_t *driver, device_t parent)
#endif
#if defined(__amd64__) || defined(__i386__)
- if (addr == 0)
- addr = bios_sigsearch(SMBIOS_START, SMBIOS_SIG, SMBIOS_LEN,
+ if (addr == 0) {
+ addr = bios_sigsearch(SMBIOS_START, SMBIOS3_SIG, SMBIOS3_LEN,
SMBIOS_STEP, SMBIOS_OFF);
+ if (addr != 0)
+ map_size = sizeof(*eps3);
+ else
+ addr = bios_sigsearch(SMBIOS_START,
+ SMBIOS_SIG, SMBIOS_LEN, SMBIOS_STEP, SMBIOS_OFF);
+ }
#endif
- if (addr != 0) {
- ptr = pmap_mapbios(addr, map_size);
- if (ptr == NULL)
- return;
- if (map_size == sizeof (*eps3)) {
- eps3 = ptr;
- length = eps3->length;
- if (memcmp(eps3->anchor_string,
- SMBIOS3_SIG, SMBIOS3_LEN) != 0) {
- printf("smbios3: corrupt sig %s found\n",
- eps3->anchor_string);
- return;
- }
- } else {
- eps = ptr;
- length = eps->length;
- if (memcmp(eps->anchor_string,
- SMBIOS_SIG, SMBIOS_LEN) != 0) {
- printf("smbios: corrupt sig %s found\n",
- eps->anchor_string);
- return;
- }
- }
- if (length != map_size) {
- u_int8_t major, minor;
-
- major = eps->major_version;
- minor = eps->minor_version;
-
- /* SMBIOS v2.1 implementation might use 0x1e. */
- if (length == 0x1e && major == 2 && minor == 1) {
- length = 0x1f;
- } else {
- pmap_unmapbios(eps, map_size);
- return;
- }
+ if (addr == 0)
+ return;
+
+ ptr = pmap_mapbios(addr, map_size);
+ if (ptr == NULL) {
+ printf("smbios: Unable to map memory.\n");
+ return;
+ }
+ if (map_size == sizeof(*eps3)) {
+ eps3 = ptr;
+ length = eps3->length;
+ if (memcmp(eps3->anchor_string, SMBIOS3_SIG, SMBIOS3_LEN) != 0)
+ goto corrupt_sig;
+ } else {
+ eps = ptr;
+ length = eps->length;
+ if (memcmp(eps->anchor_string, SMBIOS_SIG, SMBIOS_LEN) != 0)
+ goto corrupt_sig;
+ }
+ if (length != map_size) {
+ /*
+ * SMBIOS v2.1 implementations might use 0x1e because the
+ * standard was then erroneous.
+ */
+ if (length == 0x1e && map_size == sizeof(*eps) &&
+ eps->major_version == 2 && eps->minor_version == 1)
+ length = map_size;
+ else {
+ printf("smbios: %s-bit Entry Point: Invalid length: "
+ "Got %hhu, expected %zu\n",
+ map_size == sizeof(*eps3) ? "64" : "32",
+ length, map_size);
+ goto unmap_return;
}
+ }
- child = BUS_ADD_CHILD(parent, 5, "smbios", -1);
- device_set_driver(child, driver);
+ child = BUS_ADD_CHILD(parent, 5, "smbios", DEVICE_UNIT_ANY);
+ device_set_driver(child, driver);
- /* smuggle the phys addr into probe and attach */
- bus_set_resource(child, SYS_RES_MEMORY, 0, addr, length);
- device_set_desc(child, "System Management BIOS");
- pmap_unmapbios(ptr, map_size);
- }
+ /* smuggle the phys addr into probe and attach */
+ bus_set_resource(child, SYS_RES_MEMORY, 0, addr, length);
+ device_set_desc(child, "System Management BIOS");
+unmap_return:
+ pmap_unmapbios(ptr, map_size);
return;
+
+corrupt_sig:
+ {
+ const char *sig;
+ const char *table_ver_str;
+ size_t i, end;
+
+ if (map_size == sizeof(*eps3)) {
+ sig = eps3->anchor_string;
+ table_ver_str = "64";
+ end = SMBIOS3_LEN;
+ } else {
+ sig = eps->anchor_string;
+ table_ver_str = "32";
+ end = SMBIOS_LEN;
+ }
+
+ /* Space after ':' printed by the loop. */
+ printf("smbios: %s-bit Entry Point: Corrupt signature (hex):",
+ table_ver_str);
+ for (i = 0; i < end; ++i)
+ printf(" %02hhx", sig[i]);
+ printf("\n");
+ }
+ goto unmap_return;
}
static int
@@ -205,18 +234,28 @@ smbios_attach (device_t dev)
if (sc->is_eps3) {
sc->eps3 = va;
- device_printf(dev, "Version: %u.%u",
+ device_printf(dev, "Entry point: v3 (64-bit), Version: %u.%u\n",
sc->eps3->major_version, sc->eps3->minor_version);
+ if (bootverbose)
+ device_printf(dev,
+ "Docrev: %u, Entry Point Revision: %u\n",
+ sc->eps3->docrev, sc->eps3->entry_point_revision);
} else {
+ const struct smbios_eps *const eps = va;
+ const uint8_t bcd = eps->BCD_revision;
+
sc->eps = va;
- device_printf(dev, "Version: %u.%u",
- sc->eps->major_version, sc->eps->minor_version);
- if (bcd2bin(sc->eps->BCD_revision))
- printf(", BCD Revision: %u.%u",
- bcd2bin(sc->eps->BCD_revision >> 4),
- bcd2bin(sc->eps->BCD_revision & 0x0f));
+ device_printf(dev, "Entry point: v2.1 (32-bit), Version: %u.%u",
+ eps->major_version, eps->minor_version);
+ if (bcd < LIBKERN_LEN_BCD2BIN && bcd2bin(bcd) != 0)
+ printf(", BCD Revision: %u.%u\n",
+ bcd2bin(bcd >> 4), bcd2bin(bcd & 0x0f));
+ else
+ printf("\n");
+ if (bootverbose)
+ device_printf(dev, "Entry Point Revision: %u\n",
+ eps->entry_point_revision);
}
- printf("\n");
return (0);
}
@@ -299,25 +338,24 @@ smbios_eps3 (void *v)
static int
smbios_cksum (void *v)
{
- struct smbios3_eps *eps3;
- struct smbios_eps *eps;
- u_int8_t *ptr;
+ const u_int8_t *ptr;
u_int8_t cksum;
u_int8_t length;
int i;
if (smbios_eps3(v)) {
- eps3 = (struct smbios3_eps *)v;
+ const struct smbios3_eps *eps3 = v;
+
length = eps3->length;
} else {
- eps = (struct smbios_eps *)v;
+ const struct smbios_eps *eps = v;
+
length = eps->length;
}
- ptr = (u_int8_t *)v;
+ ptr = v;
cksum = 0;
- for (i = 0; i < length; i++) {
+ for (i = 0; i < length; i++)
cksum += ptr[i];
- }
return (cksum);
}
diff --git a/sys/dev/smbios/smbios.h b/sys/dev/smbios/smbios.h
index 42b7e1181486..01e67556cfc0 100644
--- a/sys/dev/smbios/smbios.h
+++ b/sys/dev/smbios/smbios.h
@@ -80,11 +80,13 @@ struct smbios_structure_header {
typedef void (*smbios_callback_t)(struct smbios_structure_header *, void *);
static inline void
-smbios_walk_table(uint8_t *p, int entries, smbios_callback_t cb, void *arg)
+smbios_walk_table(uint8_t *p, int entries, vm_size_t len,
+ smbios_callback_t cb, void *arg)
{
struct smbios_structure_header *s;
+ uint8_t *endp = p + len;
- while (entries--) {
+ while (entries-- && p < endp) {
s = (struct smbios_structure_header *)p;
cb(s, arg);
@@ -93,7 +95,7 @@ smbios_walk_table(uint8_t *p, int entries, smbios_callback_t cb, void *arg)
* formatted area of this structure.
*/
p += s->length;
- while (!(p[0] == 0 && p[1] == 0))
+ while (p + 1 < endp && !(p[0] == 0 && p[1] == 0))
p++;
/*
diff --git a/sys/dev/smbus/smb.c b/sys/dev/smbus/smb.c
index ee323c835f10..514c42b88131 100644
--- a/sys/dev/smbus/smb.c
+++ b/sys/dev/smbus/smb.c
@@ -126,8 +126,8 @@ static void
smb_identify(driver_t *driver, device_t parent)
{
- if (device_find_child(parent, "smb", -1) == NULL)
- BUS_ADD_CHILD(parent, 0, "smb", -1);
+ if (device_find_child(parent, "smb", DEVICE_UNIT_ANY) == NULL)
+ BUS_ADD_CHILD(parent, 0, "smb", DEVICE_UNIT_ANY);
}
static int
diff --git a/sys/dev/smbus/smbconf.h b/sys/dev/smbus/smbconf.h
index 4c646df20a30..a6c320594064 100644
--- a/sys/dev/smbus/smbconf.h
+++ b/sys/dev/smbus/smbconf.h
@@ -30,7 +30,7 @@
#include <sys/queue.h>
-#define SMBPRI (PZERO+8) /* XXX sleep/wakeup queue priority */
+#define SMBPRI (PWAIT) /* XXX sleep/wakeup queue priority */
#define n(flags) (~(flags) & (flags))
diff --git a/sys/dev/smbus/smbus.c b/sys/dev/smbus/smbus.c
index eed1aa6efce0..9a37c482654b 100644
--- a/sys/dev/smbus/smbus.c
+++ b/sys/dev/smbus/smbus.c
@@ -67,9 +67,9 @@ smbus_attach(device_t dev)
struct smbus_softc *sc = device_get_softc(dev);
mtx_init(&sc->lock, device_get_nameunit(dev), "smbus", MTX_DEF);
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -83,7 +83,6 @@ smbus_detach(device_t dev)
error = bus_generic_detach(dev);
if (error)
return (error);
- device_delete_children(dev);
mtx_destroy(&sc->lock);
return (0);
@@ -113,6 +112,12 @@ smbus_add_child(device_t dev, u_int order, const char *name, int unit)
}
static void
+smbus_child_deleted(device_t dev, device_t child)
+{
+ free(device_get_ivars(child), M_DEVBUF);
+}
+
+static void
smbus_hinted_child(device_t bus, const char *dname, int dunit)
{
struct smbus_ivar *devi;
@@ -222,6 +227,7 @@ static device_method_t smbus_methods[] = {
/* bus interface */
DEVMETHOD(bus_add_child, smbus_add_child),
+ DEVMETHOD(bus_child_deleted, smbus_child_deleted),
DEVMETHOD(bus_hinted_child, smbus_hinted_child),
DEVMETHOD(bus_probe_nomatch, smbus_probe_nomatch),
DEVMETHOD(bus_child_location, smbus_child_location),
diff --git a/sys/dev/smc/if_smc.c b/sys/dev/smc/if_smc.c
index cc7a7b1bb12b..cc05cfd45f67 100644
--- a/sys/dev/smc/if_smc.c
+++ b/sys/dev/smc/if_smc.c
@@ -313,10 +313,6 @@ smc_attach(device_t dev)
sc->smc_dev = dev;
ifp = sc->smc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- error = ENOSPC;
- goto done;
- }
mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
@@ -454,10 +450,7 @@ smc_detach(device_t dev)
if_free(sc->smc_ifp);
}
- if (sc->smc_miibus != NULL) {
- device_delete_child(sc->smc_dev, sc->smc_miibus);
- bus_generic_detach(sc->smc_dev);
- }
+ bus_generic_detach(sc->smc_dev);
if (sc->smc_reg != NULL) {
type = SYS_RES_IOPORT;
diff --git a/sys/dev/sound/driver.c b/sys/dev/sound/driver.c
index 927941ab3d01..c9219261e08f 100644
--- a/sys/dev/sound/driver.c
+++ b/sys/dev/sound/driver.c
@@ -55,18 +55,27 @@ static moduledata_t snd_mod = {
DECLARE_MODULE(snd_driver, snd_mod, SI_SUB_DRIVERS, SI_ORDER_MIDDLE);
MODULE_VERSION(snd_driver, 1);
+#if defined(__powerpc__)
+MODULE_DEPEND(snd_driver, snd_ai2s, 1, 1, 1);
+#endif
MODULE_DEPEND(snd_driver, snd_als4000, 1, 1, 1);
MODULE_DEPEND(snd_driver, snd_atiixp, 1, 1, 1);
+#if defined(__i386__) || defined(__amd64__)
MODULE_DEPEND(snd_driver, snd_cmi, 1, 1, 1);
+#endif
MODULE_DEPEND(snd_driver, snd_cs4281, 1, 1, 1);
MODULE_DEPEND(snd_driver, snd_csa, 1, 1, 1);
MODULE_DEPEND(snd_driver, snd_csapcm, 1, 1, 1);
+#if defined(__powerpc__)
+MODULE_DEPEND(snd_driver, snd_davbus, 1, 1, 1);
+#endif
MODULE_DEPEND(snd_driver, snd_emu10kx, 1, 1, 1);
MODULE_DEPEND(snd_driver, snd_envy24, 1, 1, 1);
MODULE_DEPEND(snd_driver, snd_envy24ht, 1, 1, 1);
MODULE_DEPEND(snd_driver, snd_es137x, 1, 1, 1);
MODULE_DEPEND(snd_driver, snd_fm801, 1, 1, 1);
MODULE_DEPEND(snd_driver, snd_hda, 1, 1, 1);
+MODULE_DEPEND(snd_driver, snd_hdsp, 1, 1, 1);
MODULE_DEPEND(snd_driver, snd_hdspe, 1, 1, 1);
MODULE_DEPEND(snd_driver, snd_ich, 1, 1, 1);
MODULE_DEPEND(snd_driver, snd_maestro3, 1, 1, 1);
diff --git a/sys/dev/sound/dummy.c b/sys/dev/sound/dummy.c
new file mode 100644
index 000000000000..4df5b112d3f4
--- /dev/null
+++ b/sys/dev/sound/dummy.c
@@ -0,0 +1,385 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * This software was developed by Christos Margiolis <christos@FreeBSD.org>
+ * under sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+
+#ifdef HAVE_KERNEL_OPTION_HEADERS
+#include "opt_snd.h"
+#endif
+
+#include <dev/sound/pcm/sound.h>
+#include <mixer_if.h>
+
+#define DUMMY_NPCHAN 1
+#define DUMMY_NRCHAN 1
+#define DUMMY_NCHAN (DUMMY_NPCHAN + DUMMY_NRCHAN)
+
+struct dummy_chan {
+ struct dummy_softc *sc;
+ struct pcm_channel *chan;
+ struct snd_dbuf *buf;
+ struct pcmchan_caps *caps;
+ uint32_t ptr;
+ int dir;
+ int run;
+};
+
+struct dummy_softc {
+ struct snddev_info info;
+ device_t dev;
+ uint32_t cap_fmts[4];
+ struct pcmchan_caps caps;
+ int chnum;
+ struct dummy_chan chans[DUMMY_NCHAN];
+ struct callout callout;
+ struct mtx *lock;
+ bool stopped;
+};
+
+static bool
+dummy_active(struct dummy_softc *sc)
+{
+ struct dummy_chan *ch;
+ int i;
+
+ snd_mtxassert(sc->lock);
+
+ for (i = 0; i < sc->chnum; i++) {
+ ch = &sc->chans[i];
+ if (ch->run)
+ return (true);
+ }
+
+ /* No channel is running at the moment. */
+ return (false);
+}
+
+static void
+dummy_chan_io(void *arg)
+{
+ struct dummy_softc *sc = arg;
+ struct dummy_chan *ch;
+ int i = 0;
+
+ if (sc->stopped)
+ return;
+
+ /* Do not reschedule if no channel is running. */
+ if (!dummy_active(sc))
+ return;
+
+ for (i = 0; i < sc->chnum; i++) {
+ ch = &sc->chans[i];
+ if (!ch->run)
+ continue;
+ if (ch->dir == PCMDIR_PLAY)
+ ch->ptr += sndbuf_getblksz(ch->buf);
+ else
+ sndbuf_fillsilence(ch->buf);
+ snd_mtxunlock(sc->lock);
+ chn_intr(ch->chan);
+ snd_mtxlock(sc->lock);
+ }
+ if (!sc->stopped)
+ callout_schedule(&sc->callout, 1);
+}
+
+static int
+dummy_chan_free(kobj_t obj, void *data)
+{
+ struct dummy_chan *ch =data;
+ uint8_t *buf;
+
+ buf = sndbuf_getbuf(ch->buf);
+ if (buf != NULL)
+ free(buf, M_DEVBUF);
+
+ return (0);
+}
+
+static void *
+dummy_chan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b,
+ struct pcm_channel *c, int dir)
+{
+ struct dummy_softc *sc;
+ struct dummy_chan *ch;
+ uint8_t *buf;
+ size_t bufsz;
+
+ sc = devinfo;
+
+ snd_mtxlock(sc->lock);
+
+ ch = &sc->chans[sc->chnum++];
+ ch->sc = sc;
+ ch->dir = dir;
+ ch->chan = c;
+ ch->buf = b;
+ ch->caps = &sc->caps;
+
+ snd_mtxunlock(sc->lock);
+
+ bufsz = pcm_getbuffersize(sc->dev, 2048, 2048, 65536);
+ buf = malloc(bufsz, M_DEVBUF, M_WAITOK | M_ZERO);
+ if (sndbuf_setup(ch->buf, buf, bufsz) != 0) {
+ dummy_chan_free(obj, ch);
+ return (NULL);
+ }
+
+ return (ch);
+}
+
+static int
+dummy_chan_setformat(kobj_t obj, void *data, uint32_t format)
+{
+ struct dummy_chan *ch = data;
+ int i;
+
+ for (i = 0; ch->caps->fmtlist[i]; i++)
+ if (format == ch->caps->fmtlist[i])
+ return (0);
+
+ return (EINVAL);
+}
+
+static uint32_t
+dummy_chan_setspeed(kobj_t obj, void *data, uint32_t speed)
+{
+ struct dummy_chan *ch = data;
+
+ RANGE(speed, ch->caps->minspeed, ch->caps->maxspeed);
+
+ return (speed);
+}
+
+static uint32_t
+dummy_chan_setblocksize(kobj_t obj, void *data, uint32_t blocksize)
+{
+ struct dummy_chan *ch = data;
+
+ return (sndbuf_getblksz(ch->buf));
+}
+
+static int
+dummy_chan_trigger(kobj_t obj, void *data, int go)
+{
+ struct dummy_chan *ch = data;
+ struct dummy_softc *sc = ch->sc;
+
+ snd_mtxlock(sc->lock);
+
+ if (sc->stopped) {
+ snd_mtxunlock(sc->lock);
+ return (0);
+ }
+
+ switch (go) {
+ case PCMTRIG_START:
+ ch->ptr = 0;
+ ch->run = 1;
+ callout_reset(&sc->callout, 1, dummy_chan_io, sc);
+ break;
+ case PCMTRIG_STOP:
+ case PCMTRIG_ABORT:
+ ch->run = 0;
+ /* If all channels are stopped, stop the callout as well. */
+ if (!dummy_active(sc))
+ callout_stop(&sc->callout);
+ default:
+ break;
+ }
+
+ snd_mtxunlock(sc->lock);
+
+ return (0);
+}
+
+static uint32_t
+dummy_chan_getptr(kobj_t obj, void *data)
+{
+ struct dummy_chan *ch = data;
+
+ return (ch->run ? ch->ptr : 0);
+}
+
+static struct pcmchan_caps *
+dummy_chan_getcaps(kobj_t obj, void *data)
+{
+ struct dummy_chan *ch = data;
+
+ return (ch->caps);
+}
+
+static kobj_method_t dummy_chan_methods[] = {
+ KOBJMETHOD(channel_init, dummy_chan_init),
+ KOBJMETHOD(channel_free, dummy_chan_free),
+ KOBJMETHOD(channel_setformat, dummy_chan_setformat),
+ KOBJMETHOD(channel_setspeed, dummy_chan_setspeed),
+ KOBJMETHOD(channel_setblocksize,dummy_chan_setblocksize),
+ KOBJMETHOD(channel_trigger, dummy_chan_trigger),
+ KOBJMETHOD(channel_getptr, dummy_chan_getptr),
+ KOBJMETHOD(channel_getcaps, dummy_chan_getcaps),
+ KOBJMETHOD_END
+};
+
+CHANNEL_DECLARE(dummy_chan);
+
+static int
+dummy_mixer_init(struct snd_mixer *m)
+{
+ struct dummy_softc *sc;
+
+ sc = mix_getdevinfo(m);
+ if (sc == NULL)
+ return (-1);
+
+ pcm_setflags(sc->dev, pcm_getflags(sc->dev) | SD_F_SOFTPCMVOL);
+ mix_setdevs(m, SOUND_MASK_PCM | SOUND_MASK_VOLUME | SOUND_MASK_RECLEV);
+ mix_setrecdevs(m, SOUND_MASK_RECLEV);
+
+ return (0);
+}
+
+static int
+dummy_mixer_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right)
+{
+ return (0);
+}
+
+static uint32_t
+dummy_mixer_setrecsrc(struct snd_mixer *m, uint32_t src)
+{
+ return (src == SOUND_MASK_RECLEV ? src : 0);
+}
+
+static kobj_method_t dummy_mixer_methods[] = {
+ KOBJMETHOD(mixer_init, dummy_mixer_init),
+ KOBJMETHOD(mixer_set, dummy_mixer_set),
+ KOBJMETHOD(mixer_setrecsrc, dummy_mixer_setrecsrc),
+ KOBJMETHOD_END
+};
+
+MIXER_DECLARE(dummy_mixer);
+
+static void
+dummy_identify(driver_t *driver, device_t parent)
+{
+ if (device_find_child(parent, driver->name, DEVICE_UNIT_ANY) != NULL)
+ return;
+ if (BUS_ADD_CHILD(parent, 0, driver->name, DEVICE_UNIT_ANY) == NULL)
+ device_printf(parent, "add child failed\n");
+}
+
+static int
+dummy_probe(device_t dev)
+{
+ device_set_desc(dev, "Dummy Audio Device");
+
+ return (0);
+}
+
+static int
+dummy_attach(device_t dev)
+{
+ struct dummy_softc *sc;
+ char status[SND_STATUSLEN];
+ int i = 0;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_dummy softc");
+ callout_init_mtx(&sc->callout, sc->lock, 0);
+
+ sc->cap_fmts[0] = SND_FORMAT(AFMT_S32_LE, 2, 0);
+ sc->cap_fmts[1] = SND_FORMAT(AFMT_S24_LE, 2, 0);
+ sc->cap_fmts[2] = SND_FORMAT(AFMT_S16_LE, 2, 0);
+ sc->cap_fmts[3] = 0;
+ sc->caps = (struct pcmchan_caps){
+ 8000, /* minspeed */
+ 96000, /* maxspeed */
+ sc->cap_fmts, /* fmtlist */
+ 0, /* caps */
+ };
+
+ pcm_setflags(dev, pcm_getflags(dev) | SD_F_MPSAFE);
+ pcm_init(dev, sc);
+ for (i = 0; i < DUMMY_NPCHAN; i++)
+ pcm_addchan(dev, PCMDIR_PLAY, &dummy_chan_class, sc);
+ for (i = 0; i < DUMMY_NRCHAN; i++)
+ pcm_addchan(dev, PCMDIR_REC, &dummy_chan_class, sc);
+
+ snprintf(status, SND_STATUSLEN, "on %s",
+ device_get_nameunit(device_get_parent(dev)));
+ if (pcm_register(dev, status))
+ return (ENXIO);
+ mixer_init(dev, &dummy_mixer_class, sc);
+
+ return (0);
+}
+
+static int
+dummy_detach(device_t dev)
+{
+ struct dummy_softc *sc = device_get_softc(dev);
+ int err;
+
+ snd_mtxlock(sc->lock);
+ sc->stopped = true;
+ snd_mtxunlock(sc->lock);
+ callout_drain(&sc->callout);
+ err = pcm_unregister(dev);
+ snd_mtxfree(sc->lock);
+
+ return (err);
+}
+
+static device_method_t dummy_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_identify, dummy_identify),
+ DEVMETHOD(device_probe, dummy_probe),
+ DEVMETHOD(device_attach, dummy_attach),
+ DEVMETHOD(device_detach, dummy_detach),
+ DEVMETHOD_END
+};
+
+static driver_t dummy_driver = {
+ "pcm",
+ dummy_methods,
+ sizeof(struct dummy_softc),
+};
+
+DRIVER_MODULE(snd_dummy, nexus, dummy_driver, 0, 0);
+MODULE_DEPEND(snd_dummy, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER);
+MODULE_VERSION(snd_dummy, 1);
diff --git a/sys/dev/sound/fdt/audio_soc.c b/sys/dev/sound/fdt/audio_soc.c
index 3e937720bb5b..c2bdea399364 100644
--- a/sys/dev/sound/fdt/audio_soc.c
+++ b/sys/dev/sound/fdt/audio_soc.c
@@ -367,7 +367,7 @@ audio_soc_init(void *arg)
auxdev = OF_device_from_xref(aux_devs[i]);
if (auxdev == NULL)
device_printf(sc->dev, "warning: no driver attached to aux node\n");
- aux_node = (struct audio_soc_aux_node *)malloc(sizeof(*aux_node), M_DEVBUF, M_NOWAIT);
+ aux_node = malloc(sizeof(*aux_node), M_DEVBUF, M_NOWAIT);
if (aux_node == NULL) {
device_printf(sc->dev, "failed to allocate aux node struct\n");
return;
@@ -397,10 +397,7 @@ audio_soc_init(void *arg)
}
}
- if (pcm_register(sc->dev, sc, 1, 1)) {
- device_printf(sc->dev, "failed to register PCM\n");
- return;
- }
+ pcm_init(sc->dev, sc);
sc->play_channel.sc = sc;
sc->rec_channel.sc = sc;
@@ -408,7 +405,10 @@ audio_soc_init(void *arg)
pcm_addchan(sc->dev, PCMDIR_PLAY, &audio_soc_chan_class, &sc->play_channel);
pcm_addchan(sc->dev, PCMDIR_REC, &audio_soc_chan_class, &sc->rec_channel);
- pcm_setstatus(sc->dev, "at simplebus");
+ if (pcm_register(sc->dev, "at simplebus")) {
+ device_printf(sc->dev, "failed to register PCM\n");
+ return;
+ }
AUDIO_DAI_SETUP_INTR(sc->cpu_dev, audio_soc_intr, sc);
AUDIO_DAI_SETUP_MIXER(sc->codec_dev, sc->dev);
diff --git a/sys/dev/sound/macio/aoa.c b/sys/dev/sound/macio/aoa.c
index 27626b3d570a..9861bbd92a0c 100644
--- a/sys/dev/sound/macio/aoa.c
+++ b/sys/dev/sound/macio/aoa.c
@@ -372,8 +372,7 @@ aoa_attach(void *xsc)
sc = xsc;
self = sc->sc_dev;
- if (pcm_register(self, sc, 1, 0))
- return (ENXIO);
+ pcm_init(self, sc);
err = pcm_getbuffersize(self, AOA_BUFFER_SIZE, AOA_BUFFER_SIZE,
AOA_BUFFER_SIZE);
@@ -382,7 +381,6 @@ aoa_attach(void *xsc)
pcm_addchan(self, PCMDIR_PLAY, &aoa_chan_class, sc);
snprintf(status, sizeof(status), "at %s", ofw_bus_get_name(self));
- pcm_setstatus(self, status);
- return (0);
+ return (pcm_register(self, status));
}
diff --git a/sys/dev/sound/macio/i2s.c b/sys/dev/sound/macio/i2s.c
index 5f8cb3aa15f7..647d66c27bba 100644
--- a/sys/dev/sound/macio/i2s.c
+++ b/sys/dev/sound/macio/i2s.c
@@ -241,10 +241,8 @@ i2s_attach(device_t self)
* Register a hook for delayed attach in order to allow
* the I2C controller to attach.
*/
- if ((i2s_delayed_attach = malloc(sizeof(struct intr_config_hook),
- M_TEMP, M_WAITOK | M_ZERO)) == NULL)
- return (ENOMEM);
-
+ i2s_delayed_attach = malloc(sizeof(struct intr_config_hook),
+ M_TEMP, M_WAITOK | M_ZERO);
i2s_delayed_attach->ich_func = i2s_postattach;
i2s_delayed_attach->ich_arg = sc;
diff --git a/sys/dev/sound/macio/onyx.c b/sys/dev/sound/macio/onyx.c
index 00c7b826f142..d13f3da92db6 100644
--- a/sys/dev/sound/macio/onyx.c
+++ b/sys/dev/sound/macio/onyx.c
@@ -197,7 +197,6 @@ onyx_probe(device_t dev)
if (strcmp(name, "codec") == 0) {
if (iicbus_get_addr(dev) != PCM3052_IICADDR)
return (ENXIO);
- } else if (strcmp(name, "codec") == 0) {
compat = ofw_bus_get_compat(dev);
if (compat == NULL || strcmp(compat, "pcm3052") != 0)
return (ENXIO);
diff --git a/sys/dev/sound/midi/midi.c b/sys/dev/sound/midi/midi.c
index 81c20580f7b8..6753f864ba9c 100644
--- a/sys/dev/sound/midi/midi.c
+++ b/sys/dev/sound/midi/midi.c
@@ -30,12 +30,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
- /*
- * Parts of this file started out as NetBSD: midi.c 1.31
- * They are mostly gone. Still the most obvious will be the state
- * machine midi_in
- */
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/queue.h>
@@ -66,34 +60,18 @@
#include "mpu_if.h"
#include <dev/sound/midi/midiq.h>
-#include "synth_if.h"
MALLOC_DEFINE(M_MIDI, "midi buffers", "Midi data allocation area");
#ifndef KOBJMETHOD_END
#define KOBJMETHOD_END { NULL, NULL }
#endif
-#define PCMMKMINOR(u, d, c) ((((c) & 0xff) << 16) | (((u) & 0x0f) << 4) | ((d) & 0x0f))
-#define MIDIMKMINOR(u, d, c) PCMMKMINOR(u, d, c)
-
-#define MIDI_DEV_RAW 2
#define MIDI_DEV_MIDICTL 12
enum midi_states {
MIDI_IN_START, MIDI_IN_SYSEX, MIDI_IN_DATA
};
-/*
- * The MPU interface current has init() uninit() inqsize() outqsize()
- * callback() : fiddle with the tx|rx status.
- */
-
-#include "mpu_if.h"
-
-/*
- * /dev/rmidi Structure definitions
- */
-
#define MIDI_NAMELEN 16
struct snd_midi {
KOBJ_FIELDS;
@@ -119,93 +97,12 @@ struct snd_midi {
* complete command packets. */
struct proc *async;
struct cdev *dev;
- struct synth_midi *synth;
- int synth_flags;
TAILQ_ENTRY(snd_midi) link;
};
-struct synth_midi {
- KOBJ_FIELDS;
- struct snd_midi *m;
-};
-
-static synth_open_t midisynth_open;
-static synth_close_t midisynth_close;
-static synth_writeraw_t midisynth_writeraw;
-static synth_killnote_t midisynth_killnote;
-static synth_startnote_t midisynth_startnote;
-static synth_setinstr_t midisynth_setinstr;
-static synth_alloc_t midisynth_alloc;
-static synth_controller_t midisynth_controller;
-static synth_bender_t midisynth_bender;
-
-static kobj_method_t midisynth_methods[] = {
- KOBJMETHOD(synth_open, midisynth_open),
- KOBJMETHOD(synth_close, midisynth_close),
- KOBJMETHOD(synth_writeraw, midisynth_writeraw),
- KOBJMETHOD(synth_setinstr, midisynth_setinstr),
- KOBJMETHOD(synth_startnote, midisynth_startnote),
- KOBJMETHOD(synth_killnote, midisynth_killnote),
- KOBJMETHOD(synth_alloc, midisynth_alloc),
- KOBJMETHOD(synth_controller, midisynth_controller),
- KOBJMETHOD(synth_bender, midisynth_bender),
- KOBJMETHOD_END
-};
-
-DEFINE_CLASS(midisynth, midisynth_methods, 0);
-
-/*
- * Module Exports & Interface
- *
- * struct midi_chan *midi_init(MPU_CLASS cls, int unit, int chan,
- * void *cookie)
- * int midi_uninit(struct snd_midi *)
- *
- * 0 == no error
- * EBUSY or other error
- *
- * int midi_in(struct snd_midi *, char *buf, int count)
- * int midi_out(struct snd_midi *, char *buf, int count)
- *
- * midi_{in,out} return actual size transfered
- *
- */
-
-/*
- * midi_devs tailq, holder of all rmidi instances protected by midistat_lock
- */
-
TAILQ_HEAD(, snd_midi) midi_devs;
-/*
- * /dev/midistat variables and declarations, protected by midistat_lock
- */
-
-static struct sx midistat_lock;
-static int midistat_isopen = 0;
-static struct sbuf midistat_sbuf;
-static struct cdev *midistat_dev;
-
-/*
- * /dev/midistat dev_t declarations
- */
-
-static d_open_t midistat_open;
-static d_close_t midistat_close;
-static d_read_t midistat_read;
-
-static struct cdevsw midistat_cdevsw = {
- .d_version = D_VERSION,
- .d_open = midistat_open,
- .d_close = midistat_close,
- .d_read = midistat_read,
- .d_name = "midistat",
-};
-
-/*
- * /dev/rmidi dev_t declarations, struct variable access is protected by
- * locks contained within the structure.
- */
+struct sx mstat_lock;
static d_open_t midi_open;
static d_close_t midi_close;
@@ -225,41 +122,36 @@ static struct cdevsw midi_cdevsw = {
.d_name = "rmidi",
};
-/*
- * Prototypes of library functions
- */
-
static int midi_destroy(struct snd_midi *, int);
-static int midistat_prepare(struct sbuf * s);
static int midi_load(void);
static int midi_unload(void);
-/*
- * Misc declr.
- */
SYSCTL_NODE(_hw, OID_AUTO, midi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"Midi driver");
-static SYSCTL_NODE(_hw_midi, OID_AUTO, stat, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
- "Status device");
int midi_debug;
/* XXX: should this be moved into debug.midi? */
SYSCTL_INT(_hw_midi, OID_AUTO, debug, CTLFLAG_RW, &midi_debug, 0, "");
-int midi_dumpraw;
-SYSCTL_INT(_hw_midi, OID_AUTO, dumpraw, CTLFLAG_RW, &midi_dumpraw, 0, "");
+#define MIDI_DEBUG(l,a) if(midi_debug>=l) a
-int midi_instroff;
-SYSCTL_INT(_hw_midi, OID_AUTO, instroff, CTLFLAG_RW, &midi_instroff, 0, "");
+void
+midistat_lock(void)
+{
+ sx_xlock(&mstat_lock);
+}
-int midistat_verbose;
-SYSCTL_INT(_hw_midi_stat, OID_AUTO, verbose, CTLFLAG_RW,
- &midistat_verbose, 0, "");
+void
+midistat_unlock(void)
+{
+ sx_xunlock(&mstat_lock);
+}
-#define MIDI_DEBUG(l,a) if(midi_debug>=l) a
-/*
- * CODE START
- */
+void
+midistat_lockassert(void)
+{
+ sx_assert(&mstat_lock, SA_XLOCKED);
+}
/*
* Register a new rmidi device. cls midi_if interface unit == 0 means
@@ -270,9 +162,6 @@ SYSCTL_INT(_hw_midi_stat, OID_AUTO, verbose, CTLFLAG_RW,
* what unit number is used.
*
* It is an error to call midi_init with an already used unit/channel combo.
- *
- * Returns NULL on error
- *
*/
struct snd_midi *
midi_init(kobj_class_t cls, int unit, int channel, void *cookie)
@@ -280,10 +169,10 @@ midi_init(kobj_class_t cls, int unit, int channel, void *cookie)
struct snd_midi *m;
int i;
int inqsize, outqsize;
- MIDI_TYPE *buf;
+ uint8_t *buf;
MIDI_DEBUG(1, printf("midiinit: unit %d/%d.\n", unit, channel));
- sx_xlock(&midistat_lock);
+ midistat_lock();
/*
* Protect against call with existing unit/channel or auto-allocate a
* new unit number.
@@ -311,9 +200,6 @@ midi_init(kobj_class_t cls, int unit, int channel, void *cookie)
MIDI_DEBUG(1, printf("midiinit #2: unit %d/%d.\n", unit, channel));
m = malloc(sizeof(*m), M_MIDI, M_WAITOK | M_ZERO);
- m->synth = malloc(sizeof(*m->synth), M_MIDI, M_WAITOK | M_ZERO);
- kobj_init((kobj_t)m->synth, &midisynth_class);
- m->synth->m = m;
kobj_init((kobj_t)m, cls);
inqsize = MPU_INQSIZE(m, cookie);
outqsize = MPU_OUTQSIZE(m, cookie);
@@ -329,14 +215,14 @@ midi_init(kobj_class_t cls, int unit, int channel, void *cookie)
mtx_lock(&m->qlock);
if (inqsize)
- buf = malloc(sizeof(MIDI_TYPE) * inqsize, M_MIDI, M_NOWAIT);
+ buf = malloc(sizeof(uint8_t) * inqsize, M_MIDI, M_NOWAIT);
else
buf = NULL;
MIDIQ_INIT(m->inq, buf, inqsize);
if (outqsize)
- buf = malloc(sizeof(MIDI_TYPE) * outqsize, M_MIDI, M_NOWAIT);
+ buf = malloc(sizeof(uint8_t) * outqsize, M_MIDI, M_NOWAIT);
else
buf = NULL;
m->hiwat = outqsize / 2;
@@ -361,11 +247,10 @@ midi_init(kobj_class_t cls, int unit, int channel, void *cookie)
TAILQ_INSERT_TAIL(&midi_devs, m, link);
- sx_xunlock(&midistat_lock);
+ midistat_unlock();
- m->dev = make_dev(&midi_cdevsw,
- MIDIMKMINOR(unit, MIDI_DEV_RAW, channel),
- UID_ROOT, GID_WHEEL, 0666, "midi%d.%d", unit, channel);
+ m->dev = make_dev(&midi_cdevsw, unit, UID_ROOT, GID_WHEEL, 0666,
+ "midi%d.%d", unit, channel);
m->dev->si_drv1 = m;
return m;
@@ -379,10 +264,9 @@ err2:
if (MIDIQ_BUF(m->outq))
free(MIDIQ_BUF(m->outq), M_MIDI);
err1:
- free(m->synth, M_MIDI);
free(m, M_MIDI);
err0:
- sx_xunlock(&midistat_lock);
+ midistat_unlock();
MIDI_DEBUG(1, printf("midi_init ended in error\n"));
return NULL;
}
@@ -391,16 +275,14 @@ err0:
* midi_uninit does not call MIDI_UNINIT, as since this is the implementors
* entry point. midi_uninit if fact, does not send any methods. A call to
* midi_uninit is a defacto promise that you won't manipulate ch anymore
- *
*/
-
int
midi_uninit(struct snd_midi *m)
{
int err;
err = EBUSY;
- sx_xlock(&midistat_lock);
+ midistat_lock();
mtx_lock(&m->lock);
if (m->busy) {
if (!(m->rchan || m->wchan))
@@ -422,17 +304,10 @@ midi_uninit(struct snd_midi *m)
err:
mtx_unlock(&m->lock);
exit:
- sx_xunlock(&midistat_lock);
+ midistat_unlock();
return err;
}
-/*
- * midi_in: process all data until the queue is full, then discards the rest.
- * Since midi_in is a state machine, data discards can cause it to get out of
- * whack. Process as much as possible. It calls, wakeup, selnotify and
- * psignal at most once.
- */
-
#ifdef notdef
static int midi_lengths[] = {2, 2, 2, 2, 1, 1, 2, 0};
@@ -446,13 +321,19 @@ static int midi_lengths[] = {2, 2, 2, 2, 1, 1, 2, 0};
#define MIDI_SYSEX_START 0xF0
#define MIDI_SYSEX_END 0xF7
+/*
+ * midi_in: process all data until the queue is full, then discards the rest.
+ * Since midi_in is a state machine, data discards can cause it to get out of
+ * whack. Process as much as possible. It calls, wakeup, selnotify and
+ * psignal at most once.
+ */
int
-midi_in(struct snd_midi *m, MIDI_TYPE *buf, int size)
+midi_in(struct snd_midi *m, uint8_t *buf, int size)
{
/* int i, sig, enq; */
int used;
- /* MIDI_TYPE data; */
+ /* uint8_t data; */
MIDI_DEBUG(5, printf("midi_in: m=%p size=%d\n", m, size));
/*
@@ -577,7 +458,7 @@ midi_in(struct snd_midi *m, MIDI_TYPE *buf, int size)
* midi_out: The only clearer of the M_TXEN flag.
*/
int
-midi_out(struct snd_midi *m, MIDI_TYPE *buf, int size)
+midi_out(struct snd_midi *m, uint8_t *buf, int size)
{
int used;
@@ -613,9 +494,6 @@ midi_out(struct snd_midi *m, MIDI_TYPE *buf, int size)
return used;
}
-/*
- * /dev/rmidi#.# device access functions
- */
int
midi_open(struct cdev *i_dev, int flags, int mode, struct thread *td)
{
@@ -920,444 +798,12 @@ midi_poll(struct cdev *i_dev, int events, struct thread *td)
}
/*
- * /dev/midistat device functions
- *
- */
-static int
-midistat_open(struct cdev *i_dev, int flags, int mode, struct thread *td)
-{
- int error;
-
- MIDI_DEBUG(1, printf("midistat_open\n"));
-
- sx_xlock(&midistat_lock);
- if (midistat_isopen) {
- sx_xunlock(&midistat_lock);
- return EBUSY;
- }
- midistat_isopen = 1;
- if (sbuf_new(&midistat_sbuf, NULL, 4096, SBUF_AUTOEXTEND) == NULL) {
- error = ENXIO;
- goto out;
- }
- error = (midistat_prepare(&midistat_sbuf) > 0) ? 0 : ENOMEM;
-out:
- if (error)
- midistat_isopen = 0;
- sx_xunlock(&midistat_lock);
- return error;
-}
-
-static int
-midistat_close(struct cdev *i_dev, int flags, int mode, struct thread *td)
-{
- MIDI_DEBUG(1, printf("midistat_close\n"));
- sx_xlock(&midistat_lock);
- if (!midistat_isopen) {
- sx_xunlock(&midistat_lock);
- return EBADF;
- }
- sbuf_delete(&midistat_sbuf);
- midistat_isopen = 0;
- sx_xunlock(&midistat_lock);
- return 0;
-}
-
-static int
-midistat_read(struct cdev *i_dev, struct uio *uio, int flag)
-{
- long l;
- int err;
-
- MIDI_DEBUG(4, printf("midistat_read\n"));
- sx_xlock(&midistat_lock);
- if (!midistat_isopen) {
- sx_xunlock(&midistat_lock);
- return EBADF;
- }
- if (uio->uio_offset < 0 || uio->uio_offset > sbuf_len(&midistat_sbuf)) {
- sx_xunlock(&midistat_lock);
- return EINVAL;
- }
- err = 0;
- l = lmin(uio->uio_resid, sbuf_len(&midistat_sbuf) - uio->uio_offset);
- if (l > 0) {
- err = uiomove(sbuf_data(&midistat_sbuf) + uio->uio_offset, l,
- uio);
- }
- sx_xunlock(&midistat_lock);
- return err;
-}
-
-/*
- * Module library functions
- */
-
-static int
-midistat_prepare(struct sbuf *s)
-{
- struct snd_midi *m;
-
- sx_assert(&midistat_lock, SA_XLOCKED);
-
- sbuf_printf(s, "FreeBSD Midi Driver (midi2)\n");
- if (TAILQ_EMPTY(&midi_devs)) {
- sbuf_printf(s, "No devices installed.\n");
- sbuf_finish(s);
- return sbuf_len(s);
- }
- sbuf_printf(s, "Installed devices:\n");
-
- TAILQ_FOREACH(m, &midi_devs, link) {
- mtx_lock(&m->lock);
- sbuf_printf(s, "%s [%d/%d:%s]", m->name, m->unit, m->channel,
- MPU_PROVIDER(m, m->cookie));
- sbuf_printf(s, "%s", MPU_DESCR(m, m->cookie, midistat_verbose));
- sbuf_printf(s, "\n");
- mtx_unlock(&m->lock);
- }
-
- sbuf_finish(s);
- return sbuf_len(s);
-}
-
-#ifdef notdef
-/*
- * Convert IOCTL command to string for debugging
- */
-
-static char *
-midi_cmdname(int cmd)
-{
- static struct {
- int cmd;
- char *name;
- } *tab, cmdtab_midiioctl[] = {
-#define A(x) {x, ## x}
- /*
- * Once we have some real IOCTLs define, the following will
- * be relavant.
- *
- * A(SNDCTL_MIDI_PRETIME), A(SNDCTL_MIDI_MPUMODE),
- * A(SNDCTL_MIDI_MPUCMD), A(SNDCTL_SYNTH_INFO),
- * A(SNDCTL_MIDI_INFO), A(SNDCTL_SYNTH_MEMAVL),
- * A(SNDCTL_FM_LOAD_INSTR), A(SNDCTL_FM_4OP_ENABLE),
- * A(MIOSPASSTHRU), A(MIOGPASSTHRU), A(AIONWRITE),
- * A(AIOGSIZE), A(AIOSSIZE), A(AIOGFMT), A(AIOSFMT),
- * A(AIOGMIX), A(AIOSMIX), A(AIOSTOP), A(AIOSYNC),
- * A(AIOGCAP),
- */
-#undef A
- {
- -1, "unknown"
- },
- };
-
- for (tab = cmdtab_midiioctl; tab->cmd != cmd && tab->cmd != -1; tab++);
- return tab->name;
-}
-
-#endif /* notdef */
-
-/*
- * midisynth
- */
-
-int
-midisynth_open(void *n, void *arg, int flags)
-{
- struct snd_midi *m = ((struct synth_midi *)n)->m;
- int retval;
-
- MIDI_DEBUG(1, printf("midisynth_open %s %s\n",
- flags & FREAD ? "M_RX" : "", flags & FWRITE ? "M_TX" : ""));
-
- if (m == NULL)
- return ENXIO;
-
- mtx_lock(&m->lock);
- mtx_lock(&m->qlock);
-
- retval = 0;
-
- if (flags & FREAD) {
- if (MIDIQ_SIZE(m->inq) == 0)
- retval = ENXIO;
- else if (m->flags & M_RX)
- retval = EBUSY;
- if (retval)
- goto err;
- }
- if (flags & FWRITE) {
- if (MIDIQ_SIZE(m->outq) == 0)
- retval = ENXIO;
- else if (m->flags & M_TX)
- retval = EBUSY;
- if (retval)
- goto err;
- }
- m->busy++;
-
- /*
- * TODO: Consider m->async = 0;
- */
-
- if (flags & FREAD) {
- m->flags |= M_RX | M_RXEN;
- /*
- * Only clear the inq, the outq might still have data to drain
- * from a previous session
- */
- MIDIQ_CLEAR(m->inq);
- m->rchan = 0;
- }
-
- if (flags & FWRITE) {
- m->flags |= M_TX;
- m->wchan = 0;
- }
- m->synth_flags = flags & (FREAD | FWRITE);
-
- MPU_CALLBACK(m, m->cookie, m->flags);
-
-err: mtx_unlock(&m->qlock);
- mtx_unlock(&m->lock);
- MIDI_DEBUG(2, printf("midisynth_open: return %d.\n", retval));
- return retval;
-}
-
-int
-midisynth_close(void *n)
-{
- struct snd_midi *m = ((struct synth_midi *)n)->m;
- int retval;
- int oldflags;
-
- MIDI_DEBUG(1, printf("midisynth_close %s %s\n",
- m->synth_flags & FREAD ? "M_RX" : "",
- m->synth_flags & FWRITE ? "M_TX" : ""));
-
- if (m == NULL)
- return ENXIO;
-
- mtx_lock(&m->lock);
- mtx_lock(&m->qlock);
-
- if ((m->synth_flags & FREAD && !(m->flags & M_RX)) ||
- (m->synth_flags & FWRITE && !(m->flags & M_TX))) {
- retval = ENXIO;
- goto err;
- }
- m->busy--;
-
- oldflags = m->flags;
-
- if (m->synth_flags & FREAD)
- m->flags &= ~(M_RX | M_RXEN);
- if (m->synth_flags & FWRITE)
- m->flags &= ~M_TX;
-
- if ((m->flags & (M_TXEN | M_RXEN)) != (oldflags & (M_RXEN | M_TXEN)))
- MPU_CALLBACK(m, m->cookie, m->flags);
-
- MIDI_DEBUG(1, printf("midi_close: closed, busy = %d.\n", m->busy));
-
- mtx_unlock(&m->qlock);
- mtx_unlock(&m->lock);
- retval = 0;
-err: return retval;
-}
-
-/*
- * Always blocking.
- */
-
-int
-midisynth_writeraw(void *n, uint8_t *buf, size_t len)
-{
- struct snd_midi *m = ((struct synth_midi *)n)->m;
- int retval;
- int used;
- int i;
-
- MIDI_DEBUG(4, printf("midisynth_writeraw\n"));
-
- retval = 0;
-
- if (m == NULL)
- return ENXIO;
-
- mtx_lock(&m->lock);
- mtx_lock(&m->qlock);
-
- if (!(m->flags & M_TX))
- goto err1;
-
- if (midi_dumpraw)
- printf("midi dump: ");
-
- while (len > 0) {
- while (MIDIQ_AVAIL(m->outq) == 0) {
- if (!(m->flags & M_TXEN)) {
- m->flags |= M_TXEN;
- MPU_CALLBACK(m, m->cookie, m->flags);
- }
- mtx_unlock(&m->lock);
- m->wchan = 1;
- MIDI_DEBUG(3, printf("midisynth_writeraw msleep\n"));
- retval = msleep(&m->wchan, &m->qlock,
- PCATCH | PDROP, "midi TX", 0);
- /*
- * We slept, maybe things have changed since last
- * dying check
- */
- if (retval == EINTR)
- goto err0;
-
- if (retval)
- goto err0;
- mtx_lock(&m->lock);
- mtx_lock(&m->qlock);
- m->wchan = 0;
- if (!m->busy)
- goto err1;
- }
-
- /*
- * We are certain than data can be placed on the queue
- */
-
- used = MIN(MIDIQ_AVAIL(m->outq), len);
- used = MIN(used, MIDI_WSIZE);
- MIDI_DEBUG(5,
- printf("midi_synth: resid %zu len %jd avail %jd\n",
- len, (intmax_t)MIDIQ_LEN(m->outq),
- (intmax_t)MIDIQ_AVAIL(m->outq)));
-
- if (midi_dumpraw)
- for (i = 0; i < used; i++)
- printf("%x ", buf[i]);
-
- MIDIQ_ENQ(m->outq, buf, used);
- len -= used;
-
- /*
- * Inform the bottom half that data can be written
- */
- if (!(m->flags & M_TXEN)) {
- m->flags |= M_TXEN;
- MPU_CALLBACK(m, m->cookie, m->flags);
- }
- }
- /*
- * If we Made it here then transfer is good
- */
- if (midi_dumpraw)
- printf("\n");
-
- retval = 0;
-err1: mtx_unlock(&m->qlock);
- mtx_unlock(&m->lock);
-err0: return retval;
-}
-
-static int
-midisynth_killnote(void *n, uint8_t chn, uint8_t note, uint8_t vel)
-{
- u_char c[3];
-
- if (note > 127 || chn > 15)
- return (EINVAL);
-
- if (vel > 127)
- vel = 127;
-
- if (vel == 64) {
- c[0] = 0x90 | (chn & 0x0f); /* Note on. */
- c[1] = (u_char)note;
- c[2] = 0;
- } else {
- c[0] = 0x80 | (chn & 0x0f); /* Note off. */
- c[1] = (u_char)note;
- c[2] = (u_char)vel;
- }
-
- return midisynth_writeraw(n, c, 3);
-}
-
-static int
-midisynth_setinstr(void *n, uint8_t chn, uint16_t instr)
-{
- u_char c[2];
-
- if (instr > 127 || chn > 15)
- return EINVAL;
-
- c[0] = 0xc0 | (chn & 0x0f); /* Progamme change. */
- c[1] = instr + midi_instroff;
-
- return midisynth_writeraw(n, c, 2);
-}
-
-static int
-midisynth_startnote(void *n, uint8_t chn, uint8_t note, uint8_t vel)
-{
- u_char c[3];
-
- if (note > 127 || chn > 15)
- return EINVAL;
-
- if (vel > 127)
- vel = 127;
-
- c[0] = 0x90 | (chn & 0x0f); /* Note on. */
- c[1] = (u_char)note;
- c[2] = (u_char)vel;
-
- return midisynth_writeraw(n, c, 3);
-}
-static int
-midisynth_alloc(void *n, uint8_t chan, uint8_t note)
-{
- return chan;
-}
-
-static int
-midisynth_controller(void *n, uint8_t chn, uint8_t ctrlnum, uint16_t val)
-{
- u_char c[3];
-
- if (ctrlnum > 127 || chn > 15)
- return EINVAL;
-
- c[0] = 0xb0 | (chn & 0x0f); /* Control Message. */
- c[1] = ctrlnum;
- c[2] = val;
- return midisynth_writeraw(n, c, 3);
-}
-
-static int
-midisynth_bender(void *n, uint8_t chn, uint16_t val)
-{
- u_char c[3];
-
- if (val > 16383 || chn > 15)
- return EINVAL;
-
- c[0] = 0xe0 | (chn & 0x0f); /* Pitch bend. */
- c[1] = (u_char)val & 0x7f;
- c[2] = (u_char)(val >> 7) & 0x7f;
-
- return midisynth_writeraw(n, c, 3);
-}
-
-/*
* Single point of midi destructions.
*/
static int
midi_destroy(struct snd_midi *m, int midiuninit)
{
- sx_assert(&midistat_lock, SA_XLOCKED);
+ midistat_lockassert();
mtx_assert(&m->lock, MA_OWNED);
MIDI_DEBUG(3, printf("midi_destroy\n"));
@@ -1371,25 +817,16 @@ midi_destroy(struct snd_midi *m, int midiuninit)
free(MIDIQ_BUF(m->outq), M_MIDI);
mtx_destroy(&m->qlock);
mtx_destroy(&m->lock);
- free(m->synth, M_MIDI);
free(m, M_MIDI);
return 0;
}
-/*
- * Load and unload functions, creates the /dev/midistat device
- */
-
static int
midi_load(void)
{
- sx_init(&midistat_lock, "midistat lock");
+ sx_init(&mstat_lock, "midistat lock");
TAILQ_INIT(&midi_devs);
- midistat_dev = make_dev(&midistat_cdevsw,
- MIDIMKMINOR(0, MIDI_DEV_MIDICTL, 0),
- UID_ROOT, GID_WHEEL, 0666, "midistat");
-
return 0;
}
@@ -1401,10 +838,7 @@ midi_unload(void)
MIDI_DEBUG(1, printf("midi_unload()\n"));
retval = EBUSY;
- sx_xlock(&midistat_lock);
- if (midistat_isopen)
- goto exit0;
-
+ midistat_lock();
TAILQ_FOREACH_SAFE(m, &midi_devs, link, tmp) {
mtx_lock(&m->lock);
if (m->busy)
@@ -1412,28 +846,21 @@ midi_unload(void)
else
retval = midi_destroy(m, 1);
if (retval)
- goto exit1;
+ goto exit;
}
- sx_xunlock(&midistat_lock);
- destroy_dev(midistat_dev);
+ midistat_unlock();
- /*
- * Made it here then unload is complete
- */
- sx_destroy(&midistat_lock);
+ sx_destroy(&mstat_lock);
return 0;
-exit1:
+exit:
mtx_unlock(&m->lock);
-exit0:
- sx_xunlock(&midistat_lock);
+ midistat_unlock();
if (retval)
MIDI_DEBUG(2, printf("midi_unload: failed\n"));
return retval;
}
-extern int seq_modevent(module_t mod, int type, void *data);
-
static int
midi_modevent(module_t mod, int type, void *data)
{
@@ -1444,14 +871,10 @@ midi_modevent(module_t mod, int type, void *data)
switch (type) {
case MOD_LOAD:
retval = midi_load();
- if (retval == 0)
- retval = seq_modevent(mod, type, data);
break;
case MOD_UNLOAD:
retval = midi_unload();
- if (retval == 0)
- retval = seq_modevent(mod, type, data);
break;
default:
@@ -1461,51 +884,5 @@ midi_modevent(module_t mod, int type, void *data)
return retval;
}
-kobj_t
-midimapper_addseq(void *arg1, int *unit, void **cookie)
-{
- unit = NULL;
-
- return (kobj_t)arg1;
-}
-
-int
-midimapper_open(void *arg1, void **cookie)
-{
- int retval = 0;
- struct snd_midi *m;
-
- sx_xlock(&midistat_lock);
- TAILQ_FOREACH(m, &midi_devs, link) {
- retval++;
- }
- sx_xunlock(&midistat_lock);
- return retval;
-}
-
-int
-midimapper_close(void *arg1, void *cookie)
-{
- return 0;
-}
-
-kobj_t
-midimapper_fetch_synth(void *arg, void *cookie, int unit)
-{
- struct snd_midi *m;
- int retval = 0;
-
- sx_xlock(&midistat_lock);
- TAILQ_FOREACH(m, &midi_devs, link) {
- if (unit == retval) {
- sx_xunlock(&midistat_lock);
- return (kobj_t)m->synth;
- }
- retval++;
- }
- sx_xunlock(&midistat_lock);
- return NULL;
-}
-
DEV_MODULE(midi, midi_modevent, NULL);
MODULE_VERSION(midi, 1);
diff --git a/sys/dev/sound/midi/midi.h b/sys/dev/sound/midi/midi.h
index 567279d1e654..286e84264ef3 100644
--- a/sys/dev/sound/midi/midi.h
+++ b/sys/dev/sound/midi/midi.h
@@ -39,19 +39,16 @@ MALLOC_DECLARE(M_MIDI);
#define M_RXEN 0x04
#define M_TXEN 0x08
-#define MIDI_TYPE unsigned char
-
struct snd_midi;
+void midistat_lock(void);
+void midistat_unlock(void);
+void midistat_lockassert(void);
+
struct snd_midi *
midi_init(kobj_class_t _mpu_cls, int _unit, int _channel, void *cookie);
int midi_uninit(struct snd_midi *_m);
-int midi_out(struct snd_midi *_m, MIDI_TYPE *_buf, int _size);
-int midi_in(struct snd_midi *_m, MIDI_TYPE *_buf, int _size);
-
-kobj_t midimapper_addseq(void *arg1, int *unit, void **cookie);
-int midimapper_open(void *arg1, void **cookie);
-int midimapper_close(void *arg1, void *cookie);
-kobj_t midimapper_fetch_synth(void *arg, void *cookie, int unit);
+int midi_out(struct snd_midi *_m, uint8_t *_buf, int _size);
+int midi_in(struct snd_midi *_m, uint8_t *_buf, int _size);
#endif
diff --git a/sys/dev/sound/midi/mpu401.c b/sys/dev/sound/midi/mpu401.c
index a344801d1982..224ebb1b01f4 100644
--- a/sys/dev/sound/midi/mpu401.c
+++ b/sys/dev/sound/midi/mpu401.c
@@ -88,8 +88,6 @@ static int mpu401_minqsize(struct snd_midi *, void *);
static int mpu401_moutqsize(struct snd_midi *, void *);
static void mpu401_mcallback(struct snd_midi *, void *, int);
static void mpu401_mcallbackp(struct snd_midi *, void *, int);
-static const char *mpu401_mdescr(struct snd_midi *, void *, int);
-static const char *mpu401_mprovider(struct snd_midi *, void *);
static kobj_method_t mpu401_methods[] = {
KOBJMETHOD(mpu_init, mpu401_minit),
@@ -98,8 +96,6 @@ static kobj_method_t mpu401_methods[] = {
KOBJMETHOD(mpu_outqsize, mpu401_moutqsize),
KOBJMETHOD(mpu_callback, mpu401_mcallback),
KOBJMETHOD(mpu_callbackp, mpu401_mcallbackp),
- KOBJMETHOD(mpu_descr, mpu401_mdescr),
- KOBJMETHOD(mpu_provider, mpu401_mprovider),
KOBJMETHOD_END
};
@@ -118,28 +114,16 @@ static int
mpu401_intr(struct mpu401 *m)
{
#define MPU_INTR_BUF 16
- MIDI_TYPE b[MPU_INTR_BUF];
+ uint8_t b[MPU_INTR_BUF];
int i;
int s;
-/*
- printf("mpu401_intr\n");
-*/
#define RXRDY(m) ( (STATUS(m) & MPU_INPUTBUSY) == 0)
#define TXRDY(m) ( (STATUS(m) & MPU_OUTPUTBUSY) == 0)
-#if 0
-#define D(x,l) printf("mpu401_intr %d %x %s %s\n",l, x, x&MPU_INPUTBUSY?"RX":"", x&MPU_OUTPUTBUSY?"TX":"")
-#else
-#define D(x,l)
-#endif
i = 0;
s = STATUS(m);
- D(s, 1);
while ((s & MPU_INPUTBUSY) == 0 && i < MPU_INTR_BUF) {
b[i] = READ(m);
-/*
- printf("mpu401_intr in i %d d %d\n", i, b[i]);
-*/
i++;
s = STATUS(m);
}
@@ -148,15 +132,9 @@ mpu401_intr(struct mpu401 *m)
i = 0;
while (!(s & MPU_OUTPUTBUSY) && i < MPU_INTR_BUF) {
if (midi_out(m->mid, b, 1)) {
-/*
- printf("mpu401_intr out i %d d %d\n", i, b[0]);
-*/
WRITE(m, *b);
} else {
-/*
- printf("mpu401_intr write: no output\n");
-*/
return 0;
}
i++;
@@ -262,13 +240,7 @@ static void
mpu401_mcallback(struct snd_midi *sm, void *arg, int flags)
{
struct mpu401 *m = arg;
-#if 0
- printf("mpu401_callback %s %s %s %s\n",
- flags & M_RX ? "M_RX" : "",
- flags & M_TX ? "M_TX" : "",
- flags & M_RXEN ? "M_RXEN" : "",
- flags & M_TXEN ? "M_TXEN" : "");
-#endif
+
if (flags & M_TXEN && m->si) {
callout_reset(&m->timer, 1, mpu401_timeout, m);
}
@@ -278,19 +250,5 @@ mpu401_mcallback(struct snd_midi *sm, void *arg, int flags)
static void
mpu401_mcallbackp(struct snd_midi *sm, void *arg, int flags)
{
-/* printf("mpu401_callbackp\n"); */
mpu401_mcallback(sm, arg, flags);
}
-
-static const char *
-mpu401_mdescr(struct snd_midi *sm, void *arg, int verbosity)
-{
-
- return "descr mpu401";
-}
-
-static const char *
-mpu401_mprovider(struct snd_midi *m, void *arg)
-{
- return "provider mpu401";
-}
diff --git a/sys/dev/sound/midi/mpu_if.m b/sys/dev/sound/midi/mpu_if.m
index b7cb586c5dd0..835d887f703a 100644
--- a/sys/dev/sound/midi/mpu_if.m
+++ b/sys/dev/sound/midi/mpu_if.m
@@ -56,17 +56,6 @@ METHOD void callback {
int _flags;
};
-METHOD const char * provider {
- struct snd_midi *_kobj;
- void *_cookie;
-};
-
-METHOD const char * descr {
- struct snd_midi *_kobj;
- void *_cookie;
- int _verbosity;
-};
-
METHOD int uninit {
struct snd_midi *_kobj;
void *_cookie;
diff --git a/sys/dev/sound/midi/sequencer.c b/sys/dev/sound/midi/sequencer.c
deleted file mode 100644
index 817540f1545a..000000000000
--- a/sys/dev/sound/midi/sequencer.c
+++ /dev/null
@@ -1,2102 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2003 Mathew Kanner
- * Copyright (c) 1993 Hannu Savolainen
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/*
- * The sequencer personality manager.
- */
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/ioccom.h>
-
-#include <sys/filio.h>
-#include <sys/lock.h>
-#include <sys/sockio.h>
-#include <sys/fcntl.h>
-#include <sys/proc.h>
-#include <sys/sysctl.h>
-
-#include <sys/kernel.h> /* for DATA_SET */
-
-#include <sys/module.h>
-#include <sys/conf.h>
-#include <sys/file.h>
-#include <sys/uio.h>
-#include <sys/syslog.h>
-#include <sys/errno.h>
-#include <sys/malloc.h>
-#include <sys/bus.h>
-#include <machine/resource.h>
-#include <machine/bus.h>
-#include <machine/clock.h> /* for DELAY */
-#include <sys/soundcard.h>
-#include <sys/rman.h>
-#include <sys/mman.h>
-#include <sys/poll.h>
-#include <sys/mutex.h>
-#include <sys/condvar.h>
-#include <sys/kthread.h>
-#include <sys/unistd.h>
-#include <sys/selinfo.h>
-
-#ifdef HAVE_KERNEL_OPTION_HEADERS
-#include "opt_snd.h"
-#endif
-
-#include <dev/sound/midi/midi.h>
-#include <dev/sound/midi/midiq.h>
-#include "synth_if.h"
-
-#include <dev/sound/midi/sequencer.h>
-
-#define TMR_TIMERBASE 13
-
-#define SND_DEV_SEQ 1 /* Sequencer output /dev/sequencer (FM
- * synthesizer and MIDI output) */
-#define SND_DEV_MUSIC 8 /* /dev/music, level 2 interface */
-
-/* Length of a sequencer event. */
-#define EV_SZ 8
-#define IEV_SZ 8
-
-/* Lookup modes */
-#define LOOKUP_EXIST (0)
-#define LOOKUP_OPEN (1)
-#define LOOKUP_CLOSE (2)
-
-#define PCMMKMINOR(u, d, c) \
- ((((c) & 0xff) << 16) | (((u) & 0x0f) << 4) | ((d) & 0x0f))
-#define MIDIMKMINOR(u, d, c) PCMMKMINOR(u, d, c)
-#define MIDIUNIT(y) ((dev2unit(y) >> 4) & 0x0f)
-#define MIDIDEV(y) (dev2unit(y) & 0x0f)
-
-/* These are the entries to the sequencer driver. */
-static d_open_t mseq_open;
-static d_close_t mseq_close;
-static d_ioctl_t mseq_ioctl;
-static d_read_t mseq_read;
-static d_write_t mseq_write;
-static d_poll_t mseq_poll;
-
-static struct cdevsw seq_cdevsw = {
- .d_version = D_VERSION,
- .d_open = mseq_open,
- .d_close = mseq_close,
- .d_read = mseq_read,
- .d_write = mseq_write,
- .d_ioctl = mseq_ioctl,
- .d_poll = mseq_poll,
- .d_name = "sequencer",
-};
-
-struct seq_softc {
- KOBJ_FIELDS;
-
- struct mtx seq_lock, q_lock;
- struct cv empty_cv, reset_cv, in_cv, out_cv, state_cv, th_cv;
-
- MIDIQ_HEAD(, u_char) in_q, out_q;
-
- u_long flags;
- /* Flags (protected by flag_mtx of mididev_info) */
- int fflags; /* Access mode */
- int music;
-
- int out_water; /* Sequence output threshould */
- snd_sync_parm sync_parm; /* AIOSYNC parameter set */
- struct thread *sync_thread; /* AIOSYNCing thread */
- struct selinfo in_sel, out_sel;
- int midi_number;
- struct cdev *seqdev, *musicdev;
- int unit;
- int maxunits;
- kobj_t *midis;
- int *midi_flags;
- kobj_t mapper;
- void *mapper_cookie;
- struct timeval timerstop, timersub;
- int timerbase, tempo;
- int timerrun;
- int done;
- int playing;
- int recording;
- int busy;
- int pre_event_timeout;
- int waiting;
-};
-
-/*
- * Module specific stuff, including how many sequecers
- * we currently own.
- */
-
-SYSCTL_NODE(_hw_midi, OID_AUTO, seq, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
- "Midi sequencer");
-
-int seq_debug;
-/* XXX: should this be moved into debug.midi? */
-SYSCTL_INT(_hw_midi_seq, OID_AUTO, debug, CTLFLAG_RW, &seq_debug, 0, "");
-
-midi_cmdtab cmdtab_seqevent[] = {
- {SEQ_NOTEOFF, "SEQ_NOTEOFF"},
- {SEQ_NOTEON, "SEQ_NOTEON"},
- {SEQ_WAIT, "SEQ_WAIT"},
- {SEQ_PGMCHANGE, "SEQ_PGMCHANGE"},
- {SEQ_SYNCTIMER, "SEQ_SYNCTIMER"},
- {SEQ_MIDIPUTC, "SEQ_MIDIPUTC"},
- {SEQ_DRUMON, "SEQ_DRUMON"},
- {SEQ_DRUMOFF, "SEQ_DRUMOFF"},
- {SEQ_ECHO, "SEQ_ECHO"},
- {SEQ_AFTERTOUCH, "SEQ_AFTERTOUCH"},
- {SEQ_CONTROLLER, "SEQ_CONTROLLER"},
- {SEQ_BALANCE, "SEQ_BALANCE"},
- {SEQ_VOLMODE, "SEQ_VOLMODE"},
- {SEQ_FULLSIZE, "SEQ_FULLSIZE"},
- {SEQ_PRIVATE, "SEQ_PRIVATE"},
- {SEQ_EXTENDED, "SEQ_EXTENDED"},
- {EV_SEQ_LOCAL, "EV_SEQ_LOCAL"},
- {EV_TIMING, "EV_TIMING"},
- {EV_CHN_COMMON, "EV_CHN_COMMON"},
- {EV_CHN_VOICE, "EV_CHN_VOICE"},
- {EV_SYSEX, "EV_SYSEX"},
- {-1, NULL},
-};
-
-midi_cmdtab cmdtab_seqioctl[] = {
- {SNDCTL_SEQ_RESET, "SNDCTL_SEQ_RESET"},
- {SNDCTL_SEQ_SYNC, "SNDCTL_SEQ_SYNC"},
- {SNDCTL_SYNTH_INFO, "SNDCTL_SYNTH_INFO"},
- {SNDCTL_SEQ_CTRLRATE, "SNDCTL_SEQ_CTRLRATE"},
- {SNDCTL_SEQ_GETOUTCOUNT, "SNDCTL_SEQ_GETOUTCOUNT"},
- {SNDCTL_SEQ_GETINCOUNT, "SNDCTL_SEQ_GETINCOUNT"},
- {SNDCTL_SEQ_PERCMODE, "SNDCTL_SEQ_PERCMODE"},
- {SNDCTL_FM_LOAD_INSTR, "SNDCTL_FM_LOAD_INSTR"},
- {SNDCTL_SEQ_TESTMIDI, "SNDCTL_SEQ_TESTMIDI"},
- {SNDCTL_SEQ_RESETSAMPLES, "SNDCTL_SEQ_RESETSAMPLES"},
- {SNDCTL_SEQ_NRSYNTHS, "SNDCTL_SEQ_NRSYNTHS"},
- {SNDCTL_SEQ_NRMIDIS, "SNDCTL_SEQ_NRMIDIS"},
- {SNDCTL_SEQ_GETTIME, "SNDCTL_SEQ_GETTIME"},
- {SNDCTL_MIDI_INFO, "SNDCTL_MIDI_INFO"},
- {SNDCTL_SEQ_THRESHOLD, "SNDCTL_SEQ_THRESHOLD"},
- {SNDCTL_SYNTH_MEMAVL, "SNDCTL_SYNTH_MEMAVL"},
- {SNDCTL_FM_4OP_ENABLE, "SNDCTL_FM_4OP_ENABLE"},
- {SNDCTL_PMGR_ACCESS, "SNDCTL_PMGR_ACCESS"},
- {SNDCTL_SEQ_PANIC, "SNDCTL_SEQ_PANIC"},
- {SNDCTL_SEQ_OUTOFBAND, "SNDCTL_SEQ_OUTOFBAND"},
- {SNDCTL_TMR_TIMEBASE, "SNDCTL_TMR_TIMEBASE"},
- {SNDCTL_TMR_START, "SNDCTL_TMR_START"},
- {SNDCTL_TMR_STOP, "SNDCTL_TMR_STOP"},
- {SNDCTL_TMR_CONTINUE, "SNDCTL_TMR_CONTINUE"},
- {SNDCTL_TMR_TEMPO, "SNDCTL_TMR_TEMPO"},
- {SNDCTL_TMR_SOURCE, "SNDCTL_TMR_SOURCE"},
- {SNDCTL_TMR_METRONOME, "SNDCTL_TMR_METRONOME"},
- {SNDCTL_TMR_SELECT, "SNDCTL_TMR_SELECT"},
- {SNDCTL_MIDI_PRETIME, "SNDCTL_MIDI_PRETIME"},
- {AIONWRITE, "AIONWRITE"},
- {AIOGSIZE, "AIOGSIZE"},
- {AIOSSIZE, "AIOSSIZE"},
- {AIOGFMT, "AIOGFMT"},
- {AIOSFMT, "AIOSFMT"},
- {AIOGMIX, "AIOGMIX"},
- {AIOSMIX, "AIOSMIX"},
- {AIOSTOP, "AIOSTOP"},
- {AIOSYNC, "AIOSYNC"},
- {AIOGCAP, "AIOGCAP"},
- {-1, NULL},
-};
-
-midi_cmdtab cmdtab_timer[] = {
- {TMR_WAIT_REL, "TMR_WAIT_REL"},
- {TMR_WAIT_ABS, "TMR_WAIT_ABS"},
- {TMR_STOP, "TMR_STOP"},
- {TMR_START, "TMR_START"},
- {TMR_CONTINUE, "TMR_CONTINUE"},
- {TMR_TEMPO, "TMR_TEMPO"},
- {TMR_ECHO, "TMR_ECHO"},
- {TMR_CLOCK, "TMR_CLOCK"},
- {TMR_SPP, "TMR_SPP"},
- {TMR_TIMESIG, "TMR_TIMESIG"},
- {-1, NULL},
-};
-
-midi_cmdtab cmdtab_seqcv[] = {
- {MIDI_NOTEOFF, "MIDI_NOTEOFF"},
- {MIDI_NOTEON, "MIDI_NOTEON"},
- {MIDI_KEY_PRESSURE, "MIDI_KEY_PRESSURE"},
- {-1, NULL},
-};
-
-midi_cmdtab cmdtab_seqccmn[] = {
- {MIDI_CTL_CHANGE, "MIDI_CTL_CHANGE"},
- {MIDI_PGM_CHANGE, "MIDI_PGM_CHANGE"},
- {MIDI_CHN_PRESSURE, "MIDI_CHN_PRESSURE"},
- {MIDI_PITCH_BEND, "MIDI_PITCH_BEND"},
- {MIDI_SYSTEM_PREFIX, "MIDI_SYSTEM_PREFIX"},
- {-1, NULL},
-};
-
-#ifndef KOBJMETHOD_END
-#define KOBJMETHOD_END { NULL, NULL }
-#endif
-
-/*
- * static const char *mpu401_mprovider(kobj_t obj, struct mpu401 *m);
- */
-
-static kobj_method_t seq_methods[] = {
- /* KOBJMETHOD(mpu_provider,mpu401_mprovider), */
- KOBJMETHOD_END
-};
-
-DEFINE_CLASS(sequencer, seq_methods, 0);
-
-/* The followings are the local function. */
-static int seq_convertold(u_char *event, u_char *out);
-
-/*
- * static void seq_midiinput(struct seq_softc * scp, void *md);
- */
-static void seq_reset(struct seq_softc *scp);
-static int seq_sync(struct seq_softc *scp);
-
-static int seq_processevent(struct seq_softc *scp, u_char *event);
-
-static int seq_timing(struct seq_softc *scp, u_char *event);
-static int seq_local(struct seq_softc *scp, u_char *event);
-
-static int seq_chnvoice(struct seq_softc *scp, kobj_t md, u_char *event);
-static int seq_chncommon(struct seq_softc *scp, kobj_t md, u_char *event);
-static int seq_sysex(struct seq_softc *scp, kobj_t md, u_char *event);
-
-static int seq_fetch_mid(struct seq_softc *scp, int unit, kobj_t *md);
-void seq_copytoinput(struct seq_softc *scp, u_char *event, int len);
-int seq_modevent(module_t mod, int type, void *data);
-struct seq_softc *seqs[10];
-static struct mtx seqinfo_mtx;
-static u_long nseq = 0;
-
-static void timer_start(struct seq_softc *t);
-static void timer_stop(struct seq_softc *t);
-static void timer_setvals(struct seq_softc *t, int tempo, int timerbase);
-static void timer_wait(struct seq_softc *t, int ticks, int wait_abs);
-static int timer_now(struct seq_softc *t);
-
-static void
-timer_start(struct seq_softc *t)
-{
- t->timerrun = 1;
- getmicrotime(&t->timersub);
-}
-
-static void
-timer_continue(struct seq_softc *t)
-{
- struct timeval now;
-
- if (t->timerrun == 1)
- return;
- t->timerrun = 1;
- getmicrotime(&now);
- timevalsub(&now, &t->timerstop);
- timevaladd(&t->timersub, &now);
-}
-
-static void
-timer_stop(struct seq_softc *t)
-{
- t->timerrun = 0;
- getmicrotime(&t->timerstop);
-}
-
-static void
-timer_setvals(struct seq_softc *t, int tempo, int timerbase)
-{
- t->tempo = tempo;
- t->timerbase = timerbase;
-}
-
-static void
-timer_wait(struct seq_softc *t, int ticks, int wait_abs)
-{
- struct timeval now, when;
- int ret;
- unsigned long long i;
-
- while (t->timerrun == 0) {
- SEQ_DEBUG(2, printf("Timer wait when timer isn't running\n"));
- /*
- * The old sequencer used timeouts that only increased
- * the timer when the timer was running.
- * Hence the sequencer would stick (?) if the
- * timer was disabled.
- */
- cv_wait(&t->reset_cv, &t->seq_lock);
- if (t->playing == 0)
- return;
- }
-
- i = ticks * 60ull * 1000000ull / (t->tempo * t->timerbase);
-
- when.tv_sec = i / 1000000;
- when.tv_usec = i % 1000000;
-
-#if 0
- printf("timer_wait tempo %d timerbase %d ticks %d abs %d u_sec %llu\n",
- t->tempo, t->timerbase, ticks, wait_abs, i);
-#endif
-
- if (wait_abs != 0) {
- getmicrotime(&now);
- timevalsub(&now, &t->timersub);
- timevalsub(&when, &now);
- }
- if (when.tv_sec < 0 || when.tv_usec < 0) {
- SEQ_DEBUG(3,
- printf("seq_timer error negative time %lds.%06lds\n",
- (long)when.tv_sec, (long)when.tv_usec));
- return;
- }
- i = when.tv_sec * 1000000ull;
- i += when.tv_usec;
- i *= hz;
- i /= 1000000ull;
-#if 0
- printf("seq_timer usec %llu ticks %llu\n",
- when.tv_sec * 1000000ull + when.tv_usec, i);
-#endif
- t->waiting = 1;
- ret = cv_timedwait(&t->reset_cv, &t->seq_lock, i + 1);
- t->waiting = 0;
-
- if (ret != EWOULDBLOCK)
- SEQ_DEBUG(3, printf("seq_timer didn't timeout\n"));
-
-}
-
-static int
-timer_now(struct seq_softc *t)
-{
- struct timeval now;
- unsigned long long i;
- int ret;
-
- if (t->timerrun == 0)
- now = t->timerstop;
- else
- getmicrotime(&now);
-
- timevalsub(&now, &t->timersub);
-
- i = now.tv_sec * 1000000ull;
- i += now.tv_usec;
- i *= t->timerbase;
-/* i /= t->tempo; */
- i /= 1000000ull;
-
- ret = i;
- /*
- * printf("timer_now: %llu %d\n", i, ret);
- */
-
- return ret;
-}
-
-static void
-seq_eventthread(void *arg)
-{
- struct seq_softc *scp = arg;
- u_char event[EV_SZ];
-
- mtx_lock(&scp->seq_lock);
- SEQ_DEBUG(2, printf("seq_eventthread started\n"));
- while (scp->done == 0) {
-restart:
- while (scp->playing == 0) {
- cv_wait(&scp->state_cv, &scp->seq_lock);
- if (scp->done)
- goto done;
- }
-
- while (MIDIQ_EMPTY(scp->out_q)) {
- cv_broadcast(&scp->empty_cv);
- cv_wait(&scp->out_cv, &scp->seq_lock);
- if (scp->playing == 0)
- goto restart;
- if (scp->done)
- goto done;
- }
-
- MIDIQ_DEQ(scp->out_q, event, EV_SZ);
-
- if (MIDIQ_AVAIL(scp->out_q) < scp->out_water) {
- cv_broadcast(&scp->out_cv);
- selwakeup(&scp->out_sel);
- }
- seq_processevent(scp, event);
- }
-
-done:
- cv_broadcast(&scp->th_cv);
- mtx_unlock(&scp->seq_lock);
- SEQ_DEBUG(2, printf("seq_eventthread finished\n"));
- kproc_exit(0);
-}
-
-/*
- * seq_processevent: This maybe called by the event thread or the IOCTL
- * handler for queued and out of band events respectively.
- */
-static int
-seq_processevent(struct seq_softc *scp, u_char *event)
-{
- int ret;
- kobj_t m;
-
- ret = 0;
-
- if (event[0] == EV_SEQ_LOCAL)
- ret = seq_local(scp, event);
- else if (event[0] == EV_TIMING)
- ret = seq_timing(scp, event);
- else if (event[0] != EV_CHN_VOICE &&
- event[0] != EV_CHN_COMMON &&
- event[0] != EV_SYSEX &&
- event[0] != SEQ_MIDIPUTC) {
- ret = 1;
- SEQ_DEBUG(2, printf("seq_processevent not known %d\n",
- event[0]));
- } else if (seq_fetch_mid(scp, event[1], &m) != 0) {
- ret = 1;
- SEQ_DEBUG(2, printf("seq_processevent midi unit not found %d\n",
- event[1]));
- } else
- switch (event[0]) {
- case EV_CHN_VOICE:
- ret = seq_chnvoice(scp, m, event);
- break;
- case EV_CHN_COMMON:
- ret = seq_chncommon(scp, m, event);
- break;
- case EV_SYSEX:
- ret = seq_sysex(scp, m, event);
- break;
- case SEQ_MIDIPUTC:
- mtx_unlock(&scp->seq_lock);
- ret = SYNTH_WRITERAW(m, &event[2], 1);
- mtx_lock(&scp->seq_lock);
- break;
- }
- return ret;
-}
-
-static int
-seq_addunit(void)
-{
- struct seq_softc *scp;
- int ret;
- u_char *buf;
-
- /* Allocate the softc. */
- ret = ENOMEM;
- scp = malloc(sizeof(*scp), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (scp == NULL) {
- SEQ_DEBUG(1, printf("seq_addunit: softc allocation failed.\n"));
- goto err;
- }
- kobj_init((kobj_t)scp, &sequencer_class);
-
- buf = malloc(sizeof(*buf) * EV_SZ * 1024, M_TEMP, M_NOWAIT | M_ZERO);
- if (buf == NULL)
- goto err;
- MIDIQ_INIT(scp->in_q, buf, EV_SZ * 1024);
- buf = malloc(sizeof(*buf) * EV_SZ * 1024, M_TEMP, M_NOWAIT | M_ZERO);
- if (buf == NULL)
- goto err;
- MIDIQ_INIT(scp->out_q, buf, EV_SZ * 1024);
- ret = EINVAL;
-
- scp->midis = malloc(sizeof(kobj_t) * 32, M_TEMP, M_NOWAIT | M_ZERO);
- scp->midi_flags = malloc(sizeof(*scp->midi_flags) * 32, M_TEMP,
- M_NOWAIT | M_ZERO);
-
- if (scp->midis == NULL || scp->midi_flags == NULL)
- goto err;
-
- scp->flags = 0;
-
- mtx_init(&scp->seq_lock, "seqflq", NULL, 0);
- cv_init(&scp->state_cv, "seqstate");
- cv_init(&scp->empty_cv, "seqempty");
- cv_init(&scp->reset_cv, "seqtimer");
- cv_init(&scp->out_cv, "seqqout");
- cv_init(&scp->in_cv, "seqqin");
- cv_init(&scp->th_cv, "seqstart");
-
- /*
- * Init the damn timer
- */
-
- scp->mapper = midimapper_addseq(scp, &scp->unit, &scp->mapper_cookie);
- if (scp->mapper == NULL)
- goto err;
-
- scp->seqdev = make_dev(&seq_cdevsw,
- MIDIMKMINOR(scp->unit, SND_DEV_SEQ, 0), UID_ROOT,
- GID_WHEEL, 0666, "sequencer%d", scp->unit);
-
- scp->musicdev = make_dev(&seq_cdevsw,
- MIDIMKMINOR(scp->unit, SND_DEV_MUSIC, 0), UID_ROOT,
- GID_WHEEL, 0666, "music%d", scp->unit);
-
- if (scp->seqdev == NULL || scp->musicdev == NULL)
- goto err;
- /*
- * TODO: Add to list of sequencers this module provides
- */
-
- ret =
- kproc_create
- (seq_eventthread, scp, NULL, RFHIGHPID, 0,
- "sequencer %02d", scp->unit);
-
- if (ret)
- goto err;
-
- scp->seqdev->si_drv1 = scp->musicdev->si_drv1 = scp;
-
- SEQ_DEBUG(2, printf("sequencer %d created scp %p\n", scp->unit, scp));
-
- ret = 0;
-
- mtx_lock(&seqinfo_mtx);
- seqs[nseq++] = scp;
- mtx_unlock(&seqinfo_mtx);
-
- goto ok;
-
-err:
- if (scp != NULL) {
- if (scp->seqdev != NULL)
- destroy_dev(scp->seqdev);
- if (scp->musicdev != NULL)
- destroy_dev(scp->musicdev);
- /*
- * TODO: Destroy mutex and cv
- */
- if (scp->midis != NULL)
- free(scp->midis, M_TEMP);
- if (scp->midi_flags != NULL)
- free(scp->midi_flags, M_TEMP);
- if (scp->out_q.b)
- free(scp->out_q.b, M_TEMP);
- if (scp->in_q.b)
- free(scp->in_q.b, M_TEMP);
- free(scp, M_DEVBUF);
- }
-ok:
- return ret;
-}
-
-static int
-seq_delunit(int unit)
-{
- struct seq_softc *scp = seqs[unit];
- int i;
-
- //SEQ_DEBUG(4, printf("seq_delunit: %d\n", unit));
- SEQ_DEBUG(1, printf("seq_delunit: 1 \n"));
- mtx_lock(&scp->seq_lock);
-
- scp->playing = 0;
- scp->done = 1;
- cv_broadcast(&scp->out_cv);
- cv_broadcast(&scp->state_cv);
- cv_broadcast(&scp->reset_cv);
- SEQ_DEBUG(1, printf("seq_delunit: 2 \n"));
- cv_wait(&scp->th_cv, &scp->seq_lock);
- SEQ_DEBUG(1, printf("seq_delunit: 3.0 \n"));
- mtx_unlock(&scp->seq_lock);
- SEQ_DEBUG(1, printf("seq_delunit: 3.1 \n"));
-
- cv_destroy(&scp->state_cv);
- SEQ_DEBUG(1, printf("seq_delunit: 4 \n"));
- cv_destroy(&scp->empty_cv);
- SEQ_DEBUG(1, printf("seq_delunit: 5 \n"));
- cv_destroy(&scp->reset_cv);
- SEQ_DEBUG(1, printf("seq_delunit: 6 \n"));
- cv_destroy(&scp->out_cv);
- SEQ_DEBUG(1, printf("seq_delunit: 7 \n"));
- cv_destroy(&scp->in_cv);
- SEQ_DEBUG(1, printf("seq_delunit: 8 \n"));
- cv_destroy(&scp->th_cv);
-
- SEQ_DEBUG(1, printf("seq_delunit: 10 \n"));
- if (scp->seqdev)
- destroy_dev(scp->seqdev);
- SEQ_DEBUG(1, printf("seq_delunit: 11 \n"));
- if (scp->musicdev)
- destroy_dev(scp->musicdev);
- SEQ_DEBUG(1, printf("seq_delunit: 12 \n"));
- scp->seqdev = scp->musicdev = NULL;
- if (scp->midis != NULL)
- free(scp->midis, M_TEMP);
- SEQ_DEBUG(1, printf("seq_delunit: 13 \n"));
- if (scp->midi_flags != NULL)
- free(scp->midi_flags, M_TEMP);
- SEQ_DEBUG(1, printf("seq_delunit: 14 \n"));
- free(scp->out_q.b, M_TEMP);
- SEQ_DEBUG(1, printf("seq_delunit: 15 \n"));
- free(scp->in_q.b, M_TEMP);
-
- SEQ_DEBUG(1, printf("seq_delunit: 16 \n"));
-
- mtx_destroy(&scp->seq_lock);
- SEQ_DEBUG(1, printf("seq_delunit: 17 \n"));
- free(scp, M_DEVBUF);
-
- mtx_lock(&seqinfo_mtx);
- for (i = unit; i < (nseq - 1); i++)
- seqs[i] = seqs[i + 1];
- nseq--;
- mtx_unlock(&seqinfo_mtx);
-
- return 0;
-}
-
-int
-seq_modevent(module_t mod, int type, void *data)
-{
- int retval, r;
-
- retval = 0;
-
- switch (type) {
- case MOD_LOAD:
- mtx_init(&seqinfo_mtx, "seqmod", NULL, 0);
- retval = seq_addunit();
- break;
-
- case MOD_UNLOAD:
- while (nseq) {
- r = seq_delunit(nseq - 1);
- if (r) {
- retval = r;
- break;
- }
- }
- if (nseq == 0) {
- retval = 0;
- mtx_destroy(&seqinfo_mtx);
- }
- break;
-
- default:
- break;
- }
-
- return retval;
-}
-
-static int
-seq_fetch_mid(struct seq_softc *scp, int unit, kobj_t *md)
-{
-
- if (unit >= scp->midi_number || unit < 0)
- return EINVAL;
-
- *md = scp->midis[unit];
-
- return 0;
-}
-
-int
-mseq_open(struct cdev *i_dev, int flags, int mode, struct thread *td)
-{
- struct seq_softc *scp = i_dev->si_drv1;
- int i;
-
- if (scp == NULL)
- return ENXIO;
-
- SEQ_DEBUG(3, printf("seq_open: scp %p unit %d, flags 0x%x.\n",
- scp, scp->unit, flags));
-
- /*
- * Mark this device busy.
- */
-
- mtx_lock(&scp->seq_lock);
- if (scp->busy) {
- mtx_unlock(&scp->seq_lock);
- SEQ_DEBUG(2, printf("seq_open: unit %d is busy.\n", scp->unit));
- return EBUSY;
- }
- scp->fflags = flags;
- /*
- if ((scp->fflags & O_NONBLOCK) != 0)
- scp->flags |= SEQ_F_NBIO;
- */
- scp->music = MIDIDEV(i_dev) == SND_DEV_MUSIC;
-
- /*
- * Enumerate the available midi devices
- */
- scp->midi_number = 0;
- scp->maxunits = midimapper_open(scp->mapper, &scp->mapper_cookie);
-
- if (scp->maxunits == 0)
- SEQ_DEBUG(2, printf("seq_open: no midi devices\n"));
-
- for (i = 0; i < scp->maxunits; i++) {
- scp->midis[scp->midi_number] =
- midimapper_fetch_synth(scp->mapper, scp->mapper_cookie, i);
- if (scp->midis[scp->midi_number]) {
- if (SYNTH_OPEN(scp->midis[scp->midi_number], scp,
- scp->fflags) != 0)
- scp->midis[scp->midi_number] = NULL;
- else {
- scp->midi_flags[scp->midi_number] =
- SYNTH_QUERY(scp->midis[scp->midi_number]);
- scp->midi_number++;
- }
- }
- }
-
- timer_setvals(scp, 60, 100);
-
- timer_start(scp);
- timer_stop(scp);
- /*
- * actually, if we're in rdonly mode, we should start the timer
- */
- /*
- * TODO: Handle recording now
- */
-
- scp->out_water = MIDIQ_SIZE(scp->out_q) / 2;
-
- scp->busy = 1;
- mtx_unlock(&scp->seq_lock);
-
- SEQ_DEBUG(2, printf("seq_open: opened, mode %s.\n",
- scp->music ? "music" : "sequencer"));
- SEQ_DEBUG(2,
- printf("Sequencer %d %p opened maxunits %d midi_number %d:\n",
- scp->unit, scp, scp->maxunits, scp->midi_number));
- for (i = 0; i < scp->midi_number; i++)
- SEQ_DEBUG(3, printf(" midi %d %p\n", i, scp->midis[i]));
-
- return 0;
-}
-
-/*
- * mseq_close
- */
-int
-mseq_close(struct cdev *i_dev, int flags, int mode, struct thread *td)
-{
- int i;
- struct seq_softc *scp = i_dev->si_drv1;
- int ret;
-
- if (scp == NULL)
- return ENXIO;
-
- SEQ_DEBUG(2, printf("seq_close: unit %d.\n", scp->unit));
-
- mtx_lock(&scp->seq_lock);
-
- ret = ENXIO;
- if (scp->busy == 0)
- goto err;
-
- seq_reset(scp);
- seq_sync(scp);
-
- for (i = 0; i < scp->midi_number; i++)
- if (scp->midis[i])
- SYNTH_CLOSE(scp->midis[i]);
-
- midimapper_close(scp->mapper, scp->mapper_cookie);
-
- timer_stop(scp);
-
- scp->busy = 0;
- ret = 0;
-
-err:
- SEQ_DEBUG(3, printf("seq_close: closed ret = %d.\n", ret));
- mtx_unlock(&scp->seq_lock);
- return ret;
-}
-
-int
-mseq_read(struct cdev *i_dev, struct uio *uio, int ioflag)
-{
- int retval, used;
- struct seq_softc *scp = i_dev->si_drv1;
-
-#define SEQ_RSIZE 32
- u_char buf[SEQ_RSIZE];
-
- if (scp == NULL)
- return ENXIO;
-
- SEQ_DEBUG(7, printf("mseq_read: unit %d, resid %zd.\n",
- scp->unit, uio->uio_resid));
-
- mtx_lock(&scp->seq_lock);
- if ((scp->fflags & FREAD) == 0) {
- SEQ_DEBUG(2, printf("mseq_read: unit %d is not for reading.\n",
- scp->unit));
- retval = EIO;
- goto err1;
- }
- /*
- * Begin recording.
- */
- /*
- * if ((scp->flags & SEQ_F_READING) == 0)
- */
- /*
- * TODO, start recording if not alread
- */
-
- /*
- * I think the semantics are to return as soon
- * as possible.
- * Second thought, it doesn't seem like midimoutain
- * expects that at all.
- * TODO: Look up in some sort of spec
- */
-
- while (uio->uio_resid > 0) {
- while (MIDIQ_EMPTY(scp->in_q)) {
- retval = EWOULDBLOCK;
- /*
- * I wish I knew which one to care about
- */
-
- if (scp->fflags & O_NONBLOCK)
- goto err1;
- if (ioflag & O_NONBLOCK)
- goto err1;
-
- retval = cv_wait_sig(&scp->in_cv, &scp->seq_lock);
- if (retval != 0)
- goto err1;
- }
-
- used = MIN(MIDIQ_LEN(scp->in_q), uio->uio_resid);
- used = MIN(used, SEQ_RSIZE);
-
- SEQ_DEBUG(8, printf("midiread: uiomove cc=%d\n", used));
- MIDIQ_DEQ(scp->in_q, buf, used);
- mtx_unlock(&scp->seq_lock);
- retval = uiomove(buf, used, uio);
- mtx_lock(&scp->seq_lock);
- if (retval)
- goto err1;
- }
-
- retval = 0;
-err1:
- mtx_unlock(&scp->seq_lock);
- SEQ_DEBUG(6, printf("mseq_read: ret %d, resid %zd.\n",
- retval, uio->uio_resid));
-
- return retval;
-}
-
-int
-mseq_write(struct cdev *i_dev, struct uio *uio, int ioflag)
-{
- u_char event[EV_SZ], newevent[EV_SZ], ev_code;
- struct seq_softc *scp = i_dev->si_drv1;
- int retval;
- int used;
-
- SEQ_DEBUG(7, printf("seq_write: unit %d, resid %zd.\n",
- scp->unit, uio->uio_resid));
-
- if (scp == NULL)
- return ENXIO;
-
- mtx_lock(&scp->seq_lock);
-
- if ((scp->fflags & FWRITE) == 0) {
- SEQ_DEBUG(2, printf("seq_write: unit %d is not for writing.\n",
- scp->unit));
- retval = EIO;
- goto err0;
- }
- while (uio->uio_resid > 0) {
- while (MIDIQ_AVAIL(scp->out_q) == 0) {
- retval = EWOULDBLOCK;
- if (scp->fflags & O_NONBLOCK)
- goto err0;
- if (ioflag & O_NONBLOCK)
- goto err0;
- SEQ_DEBUG(8, printf("seq_write cvwait\n"));
-
- scp->playing = 1;
- cv_broadcast(&scp->out_cv);
- cv_broadcast(&scp->state_cv);
-
- retval = cv_wait_sig(&scp->out_cv, &scp->seq_lock);
- /*
- * We slept, maybe things have changed since last
- * dying check
- */
- if (retval != 0)
- goto err0;
-#if 0
- /*
- * Useless test
- */
- if (scp != i_dev->si_drv1)
- retval = ENXIO;
-#endif
- }
-
- used = MIN(uio->uio_resid, 4);
-
- SEQ_DEBUG(8, printf("seqout: resid %zd len %jd avail %jd\n",
- uio->uio_resid, (intmax_t)MIDIQ_LEN(scp->out_q),
- (intmax_t)MIDIQ_AVAIL(scp->out_q)));
-
- if (used != 4) {
- retval = ENXIO;
- goto err0;
- }
- mtx_unlock(&scp->seq_lock);
- retval = uiomove(event, used, uio);
- mtx_lock(&scp->seq_lock);
- if (retval)
- goto err0;
-
- ev_code = event[0];
- SEQ_DEBUG(8, printf("seq_write: unit %d, event %s.\n",
- scp->unit, midi_cmdname(ev_code, cmdtab_seqevent)));
-
- /* Have a look at the event code. */
- if (ev_code == SEQ_FULLSIZE) {
- /*
- * TODO: restore code for SEQ_FULLSIZE
- */
-#if 0
- /*
- * A long event, these are the patches/samples for a
- * synthesizer.
- */
- midiunit = *(u_short *)&event[2];
- mtx_lock(&sd->seq_lock);
- ret = lookup_mididev(scp, midiunit, LOOKUP_OPEN, &md);
- mtx_unlock(&sd->seq_lock);
- if (ret != 0)
- return (ret);
-
- SEQ_DEBUG(printf("seq_write: loading a patch to the unit %d.\n", midiunit));
-
- ret = md->synth.loadpatch(md, *(short *)&event[0], buf,
- p + 4, count, 0);
- return (ret);
-#else
- /*
- * For now, just flush the darn buffer
- */
- SEQ_DEBUG(2,
- printf("seq_write: SEQ_FULLSIZE flusing buffer.\n"));
- while (uio->uio_resid > 0) {
- mtx_unlock(&scp->seq_lock);
- retval = uiomove(event, MIN(EV_SZ, uio->uio_resid), uio);
- mtx_lock(&scp->seq_lock);
- if (retval)
- goto err0;
- }
- retval = 0;
- goto err0;
-#endif
- }
- retval = EINVAL;
- if (ev_code >= 128) {
- int error;
-
- /*
- * Some sort of an extended event. The size is eight
- * bytes. scoop extra info.
- */
- if (scp->music && ev_code == SEQ_EXTENDED) {
- SEQ_DEBUG(2, printf("seq_write: invalid level two event %x.\n", ev_code));
- goto err0;
- }
- mtx_unlock(&scp->seq_lock);
- if (uio->uio_resid < 4)
- error = EINVAL;
- else
- error = uiomove((caddr_t)&event[4], 4, uio);
- mtx_lock(&scp->seq_lock);
- if (error) {
- SEQ_DEBUG(2,
- printf("seq_write: user memory mangled?\n"));
- goto err0;
- }
- } else {
- /*
- * Size four event.
- */
- if (scp->music) {
- SEQ_DEBUG(2, printf("seq_write: four byte event in music mode.\n"));
- goto err0;
- }
- }
- if (ev_code == SEQ_MIDIPUTC) {
- /*
- * TODO: event[2] is unit number to receive char.
- * Range check it.
- */
- }
- if (scp->music) {
-#ifdef not_ever_ever
- if (event[0] == EV_TIMING &&
- (event[1] == TMR_START || event[1] == TMR_STOP)) {
- /*
- * For now, try to make midimoutain work by
- * forcing these events to be processed
- * immediately.
- */
- seq_processevent(scp, event);
- } else
- MIDIQ_ENQ(scp->out_q, event, EV_SZ);
-#else
- MIDIQ_ENQ(scp->out_q, event, EV_SZ);
-#endif
- } else {
- if (seq_convertold(event, newevent) > 0)
- MIDIQ_ENQ(scp->out_q, newevent, EV_SZ);
-#if 0
- else
- goto err0;
-#endif
- }
- }
-
- scp->playing = 1;
- cv_broadcast(&scp->state_cv);
- cv_broadcast(&scp->out_cv);
-
- retval = 0;
-
-err0:
- SEQ_DEBUG(6,
- printf("seq_write done: leftover buffer length %zd retval %d\n",
- uio->uio_resid, retval));
- mtx_unlock(&scp->seq_lock);
- return retval;
-}
-
-int
-mseq_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
- struct thread *td)
-{
- int midiunit, ret, tmp;
- struct seq_softc *scp = i_dev->si_drv1;
- struct synth_info *synthinfo;
- struct midi_info *midiinfo;
- u_char event[EV_SZ];
- u_char newevent[EV_SZ];
-
- kobj_t md;
-
- /*
- * struct snd_size *sndsize;
- */
-
- if (scp == NULL)
- return ENXIO;
-
- SEQ_DEBUG(6, printf("seq_ioctl: unit %d, cmd %s.\n",
- scp->unit, midi_cmdname(cmd, cmdtab_seqioctl)));
-
- ret = 0;
-
- switch (cmd) {
- case SNDCTL_SEQ_GETTIME:
- /*
- * ioctl needed by libtse
- */
- mtx_lock(&scp->seq_lock);
- *(int *)arg = timer_now(scp);
- mtx_unlock(&scp->seq_lock);
- SEQ_DEBUG(6, printf("seq_ioctl: gettime %d.\n", *(int *)arg));
- ret = 0;
- break;
- case SNDCTL_TMR_METRONOME:
- /* fallthrough */
- case SNDCTL_TMR_SOURCE:
- /*
- * Not implemented
- */
- ret = 0;
- break;
- case SNDCTL_TMR_TEMPO:
- event[1] = TMR_TEMPO;
- event[4] = *(int *)arg & 0xFF;
- event[5] = (*(int *)arg >> 8) & 0xFF;
- event[6] = (*(int *)arg >> 16) & 0xFF;
- event[7] = (*(int *)arg >> 24) & 0xFF;
- goto timerevent;
- case SNDCTL_TMR_TIMEBASE:
- event[1] = TMR_TIMERBASE;
- event[4] = *(int *)arg & 0xFF;
- event[5] = (*(int *)arg >> 8) & 0xFF;
- event[6] = (*(int *)arg >> 16) & 0xFF;
- event[7] = (*(int *)arg >> 24) & 0xFF;
- goto timerevent;
- case SNDCTL_TMR_START:
- event[1] = TMR_START;
- goto timerevent;
- case SNDCTL_TMR_STOP:
- event[1] = TMR_STOP;
- goto timerevent;
- case SNDCTL_TMR_CONTINUE:
- event[1] = TMR_CONTINUE;
-timerevent:
- event[0] = EV_TIMING;
- mtx_lock(&scp->seq_lock);
- if (!scp->music) {
- ret = EINVAL;
- mtx_unlock(&scp->seq_lock);
- break;
- }
- seq_processevent(scp, event);
- mtx_unlock(&scp->seq_lock);
- break;
- case SNDCTL_TMR_SELECT:
- SEQ_DEBUG(2,
- printf("seq_ioctl: SNDCTL_TMR_SELECT not supported\n"));
- ret = EINVAL;
- break;
- case SNDCTL_SEQ_SYNC:
- if (mode == O_RDONLY) {
- ret = 0;
- break;
- }
- mtx_lock(&scp->seq_lock);
- ret = seq_sync(scp);
- mtx_unlock(&scp->seq_lock);
- break;
- case SNDCTL_SEQ_PANIC:
- /* fallthrough */
- case SNDCTL_SEQ_RESET:
- /*
- * SNDCTL_SEQ_PANIC == SNDCTL_SEQ_RESET
- */
- mtx_lock(&scp->seq_lock);
- seq_reset(scp);
- mtx_unlock(&scp->seq_lock);
- ret = 0;
- break;
- case SNDCTL_SEQ_TESTMIDI:
- mtx_lock(&scp->seq_lock);
- /*
- * TODO: SNDCTL_SEQ_TESTMIDI now means "can I write to the
- * device?".
- */
- mtx_unlock(&scp->seq_lock);
- break;
-#if 0
- case SNDCTL_SEQ_GETINCOUNT:
- if (mode == O_WRONLY)
- *(int *)arg = 0;
- else {
- mtx_lock(&scp->seq_lock);
- *(int *)arg = scp->in_q.rl;
- mtx_unlock(&scp->seq_lock);
- SEQ_DEBUG(printf("seq_ioctl: incount %d.\n",
- *(int *)arg));
- }
- ret = 0;
- break;
- case SNDCTL_SEQ_GETOUTCOUNT:
- if (mode == O_RDONLY)
- *(int *)arg = 0;
- else {
- mtx_lock(&scp->seq_lock);
- *(int *)arg = scp->out_q.fl;
- mtx_unlock(&scp->seq_lock);
- SEQ_DEBUG(printf("seq_ioctl: outcount %d.\n",
- *(int *)arg));
- }
- ret = 0;
- break;
-#endif
- case SNDCTL_SEQ_CTRLRATE:
- if (*(int *)arg != 0) {
- ret = EINVAL;
- break;
- }
- mtx_lock(&scp->seq_lock);
- *(int *)arg = scp->timerbase;
- mtx_unlock(&scp->seq_lock);
- SEQ_DEBUG(3, printf("seq_ioctl: ctrlrate %d.\n", *(int *)arg));
- ret = 0;
- break;
- /*
- * TODO: ioctl SNDCTL_SEQ_RESETSAMPLES
- */
-#if 0
- case SNDCTL_SEQ_RESETSAMPLES:
- mtx_lock(&scp->seq_lock);
- ret = lookup_mididev(scp, *(int *)arg, LOOKUP_OPEN, &md);
- mtx_unlock(&scp->seq_lock);
- if (ret != 0)
- break;
- ret = midi_ioctl(MIDIMKDEV(major(i_dev), *(int *)arg,
- SND_DEV_MIDIN), cmd, arg, mode, td);
- break;
-#endif
- case SNDCTL_SEQ_NRSYNTHS:
- mtx_lock(&scp->seq_lock);
- *(int *)arg = scp->midi_number;
- mtx_unlock(&scp->seq_lock);
- SEQ_DEBUG(3, printf("seq_ioctl: synths %d.\n", *(int *)arg));
- ret = 0;
- break;
- case SNDCTL_SEQ_NRMIDIS:
- mtx_lock(&scp->seq_lock);
- if (scp->music)
- *(int *)arg = 0;
- else {
- /*
- * TODO: count the numbder of devices that can WRITERAW
- */
- *(int *)arg = scp->midi_number;
- }
- mtx_unlock(&scp->seq_lock);
- SEQ_DEBUG(3, printf("seq_ioctl: midis %d.\n", *(int *)arg));
- ret = 0;
- break;
- /*
- * TODO: ioctl SNDCTL_SYNTH_MEMAVL
- */
-#if 0
- case SNDCTL_SYNTH_MEMAVL:
- mtx_lock(&scp->seq_lock);
- ret = lookup_mididev(scp, *(int *)arg, LOOKUP_OPEN, &md);
- mtx_unlock(&scp->seq_lock);
- if (ret != 0)
- break;
- ret = midi_ioctl(MIDIMKDEV(major(i_dev), *(int *)arg,
- SND_DEV_MIDIN), cmd, arg, mode, td);
- break;
-#endif
- case SNDCTL_SEQ_OUTOFBAND:
- for (ret = 0; ret < EV_SZ; ret++)
- event[ret] = (u_char)arg[0];
-
- mtx_lock(&scp->seq_lock);
- if (scp->music)
- ret = seq_processevent(scp, event);
- else {
- if (seq_convertold(event, newevent) > 0)
- ret = seq_processevent(scp, newevent);
- else
- ret = EINVAL;
- }
- mtx_unlock(&scp->seq_lock);
- break;
- case SNDCTL_SYNTH_INFO:
- synthinfo = (struct synth_info *)arg;
- midiunit = synthinfo->device;
- mtx_lock(&scp->seq_lock);
- if (seq_fetch_mid(scp, midiunit, &md) == 0) {
- bzero(synthinfo, sizeof(*synthinfo));
- synthinfo->name[0] = 'f';
- synthinfo->name[1] = 'a';
- synthinfo->name[2] = 'k';
- synthinfo->name[3] = 'e';
- synthinfo->name[4] = 's';
- synthinfo->name[5] = 'y';
- synthinfo->name[6] = 'n';
- synthinfo->name[7] = 't';
- synthinfo->name[8] = 'h';
- synthinfo->device = midiunit;
- synthinfo->synth_type = SYNTH_TYPE_MIDI;
- synthinfo->capabilities = scp->midi_flags[midiunit];
- ret = 0;
- } else
- ret = EINVAL;
- mtx_unlock(&scp->seq_lock);
- break;
- case SNDCTL_MIDI_INFO:
- midiinfo = (struct midi_info *)arg;
- midiunit = midiinfo->device;
- mtx_lock(&scp->seq_lock);
- if (seq_fetch_mid(scp, midiunit, &md) == 0) {
- bzero(midiinfo, sizeof(*midiinfo));
- midiinfo->name[0] = 'f';
- midiinfo->name[1] = 'a';
- midiinfo->name[2] = 'k';
- midiinfo->name[3] = 'e';
- midiinfo->name[4] = 'm';
- midiinfo->name[5] = 'i';
- midiinfo->name[6] = 'd';
- midiinfo->name[7] = 'i';
- midiinfo->device = midiunit;
- midiinfo->capabilities = scp->midi_flags[midiunit];
- /*
- * TODO: What devtype?
- */
- midiinfo->dev_type = 0x01;
- ret = 0;
- } else
- ret = EINVAL;
- mtx_unlock(&scp->seq_lock);
- break;
- case SNDCTL_SEQ_THRESHOLD:
- mtx_lock(&scp->seq_lock);
- RANGE(*(int *)arg, 1, MIDIQ_SIZE(scp->out_q) - 1);
- scp->out_water = *(int *)arg;
- mtx_unlock(&scp->seq_lock);
- SEQ_DEBUG(3, printf("seq_ioctl: water %d.\n", *(int *)arg));
- ret = 0;
- break;
- case SNDCTL_MIDI_PRETIME:
- tmp = *(int *)arg;
- if (tmp < 0)
- tmp = 0;
- mtx_lock(&scp->seq_lock);
- scp->pre_event_timeout = (hz * tmp) / 10;
- *(int *)arg = scp->pre_event_timeout;
- mtx_unlock(&scp->seq_lock);
- SEQ_DEBUG(3, printf("seq_ioctl: pretime %d.\n", *(int *)arg));
- ret = 0;
- break;
- case SNDCTL_FM_4OP_ENABLE:
- case SNDCTL_PMGR_IFACE:
- case SNDCTL_PMGR_ACCESS:
- /*
- * Patch manager and fm are ded, ded, ded.
- */
- /* fallthrough */
- default:
- /*
- * TODO: Consider ioctl default case.
- * Old code used to
- * if ((scp->fflags & O_ACCMODE) == FREAD) {
- * ret = EIO;
- * break;
- * }
- * Then pass on the ioctl to device 0
- */
- SEQ_DEBUG(2,
- printf("seq_ioctl: unsupported IOCTL %ld.\n", cmd));
- ret = EINVAL;
- break;
- }
-
- return ret;
-}
-
-int
-mseq_poll(struct cdev *i_dev, int events, struct thread *td)
-{
- int ret, lim;
- struct seq_softc *scp = i_dev->si_drv1;
-
- SEQ_DEBUG(3, printf("seq_poll: unit %d.\n", scp->unit));
- SEQ_DEBUG(1, printf("seq_poll: unit %d.\n", scp->unit));
-
- mtx_lock(&scp->seq_lock);
-
- ret = 0;
-
- /* Look up the appropriate queue and select it. */
- if ((events & (POLLOUT | POLLWRNORM)) != 0) {
- /* Start playing. */
- scp->playing = 1;
- cv_broadcast(&scp->state_cv);
- cv_broadcast(&scp->out_cv);
-
- lim = scp->out_water;
-
- if (MIDIQ_AVAIL(scp->out_q) < lim)
- /* No enough space, record select. */
- selrecord(td, &scp->out_sel);
- else
- /* We can write now. */
- ret |= events & (POLLOUT | POLLWRNORM);
- }
- if ((events & (POLLIN | POLLRDNORM)) != 0) {
- /* TODO: Start recording. */
-
- /* Find out the boundary. */
- lim = 1;
- if (MIDIQ_LEN(scp->in_q) < lim)
- /* No data ready, record select. */
- selrecord(td, &scp->in_sel);
- else
- /* We can read now. */
- ret |= events & (POLLIN | POLLRDNORM);
- }
- mtx_unlock(&scp->seq_lock);
-
- return (ret);
-}
-
-#if 0
-static void
-sein_qtr(void *p, void /* mididev_info */ *md)
-{
- struct seq_softc *scp;
-
- scp = (struct seq_softc *)p;
-
- mtx_lock(&scp->seq_lock);
-
- /* Restart playing if we have the data to output. */
- if (scp->queueout_pending)
- seq_callback(scp, SEQ_CB_START | SEQ_CB_WR);
- /* Check the midi device if we are reading. */
- if ((scp->flags & SEQ_F_READING) != 0)
- seq_midiinput(scp, md);
-
- mtx_unlock(&scp->seq_lock);
-}
-
-#endif
-/*
- * seq_convertold
- * Was the old playevent. Use this to convert and old
- * style /dev/sequencer event to a /dev/music event
- */
-static int
-seq_convertold(u_char *event, u_char *out)
-{
- int used;
- u_char dev, chn, note, vel;
-
- out[0] = out[1] = out[2] = out[3] = out[4] = out[5] = out[6] =
- out[7] = 0;
-
- dev = 0;
- chn = event[1];
- note = event[2];
- vel = event[3];
-
- used = 0;
-
-restart:
- /*
- * TODO: Debug statement
- */
- switch (event[0]) {
- case EV_TIMING:
- case EV_CHN_VOICE:
- case EV_CHN_COMMON:
- case EV_SYSEX:
- case EV_SEQ_LOCAL:
- out[0] = event[0];
- out[1] = event[1];
- out[2] = event[2];
- out[3] = event[3];
- out[4] = event[4];
- out[5] = event[5];
- out[6] = event[6];
- out[7] = event[7];
- used += 8;
- break;
- case SEQ_NOTEOFF:
- out[0] = EV_CHN_VOICE;
- out[1] = dev;
- out[2] = MIDI_NOTEOFF;
- out[3] = chn;
- out[4] = note;
- out[5] = 255;
- used += 4;
- break;
-
- case SEQ_NOTEON:
- out[0] = EV_CHN_VOICE;
- out[1] = dev;
- out[2] = MIDI_NOTEON;
- out[3] = chn;
- out[4] = note;
- out[5] = vel;
- used += 4;
- break;
-
- /*
- * wait delay = (event[2] << 16) + (event[3] << 8) + event[4]
- */
-
- case SEQ_PGMCHANGE:
- out[0] = EV_CHN_COMMON;
- out[1] = dev;
- out[2] = MIDI_PGM_CHANGE;
- out[3] = chn;
- out[4] = note;
- out[5] = vel;
- used += 4;
- break;
-/*
- out[0] = EV_TIMING;
- out[1] = dev;
- out[2] = MIDI_PGM_CHANGE;
- out[3] = chn;
- out[4] = note;
- out[5] = vel;
- SEQ_DEBUG(4,printf("seq_playevent: synctimer\n"));
- break;
-*/
-
- case SEQ_MIDIPUTC:
- SEQ_DEBUG(4,
- printf("seq_playevent: put data 0x%02x, unit %d.\n",
- event[1], event[2]));
- /*
- * Pass through to the midi device.
- * device = event[2]
- * data = event[1]
- */
- out[0] = SEQ_MIDIPUTC;
- out[1] = dev;
- out[2] = chn;
- used += 4;
- break;
-#ifdef notyet
- case SEQ_ECHO:
- /*
- * This isn't handled here yet because I don't know if I can
- * just use four bytes events. There might be consequences
- * in the _read routing
- */
- if (seq_copytoinput(scp, event, 4) == EAGAIN) {
- ret = QUEUEFULL;
- break;
- }
- ret = MORE;
- break;
-#endif
- case SEQ_EXTENDED:
- switch (event[1]) {
- case SEQ_NOTEOFF:
- case SEQ_NOTEON:
- case SEQ_PGMCHANGE:
- event++;
- used = 4;
- goto restart;
- break;
- case SEQ_AFTERTOUCH:
- /*
- * SYNTH_AFTERTOUCH(md, event[3], event[4])
- */
- case SEQ_BALANCE:
- /*
- * SYNTH_PANNING(md, event[3], (char)event[4])
- */
- case SEQ_CONTROLLER:
- /*
- * SYNTH_CONTROLLER(md, event[3], event[4], *(short *)&event[5])
- */
- case SEQ_VOLMODE:
- /*
- * SYNTH_VOLUMEMETHOD(md, event[3])
- */
- default:
- SEQ_DEBUG(2,
- printf("seq_convertold: SEQ_EXTENDED type %d"
- "not handled\n", event[1]));
- break;
- }
- break;
- case SEQ_WAIT:
- out[0] = EV_TIMING;
- out[1] = TMR_WAIT_REL;
- out[4] = event[2];
- out[5] = event[3];
- out[6] = event[4];
-
- SEQ_DEBUG(5, printf("SEQ_WAIT %d",
- event[2] + (event[3] << 8) + (event[4] << 24)));
-
- used += 4;
- break;
-
- case SEQ_ECHO:
- case SEQ_SYNCTIMER:
- case SEQ_PRIVATE:
- default:
- SEQ_DEBUG(2,
- printf("seq_convertold: event type %d not handled %d %d %d\n",
- event[0], event[1], event[2], event[3]));
- break;
- }
- return used;
-}
-
-/*
- * Writting to the sequencer buffer never blocks and drops
- * input which cannot be queued
- */
-void
-seq_copytoinput(struct seq_softc *scp, u_char *event, int len)
-{
-
- mtx_assert(&scp->seq_lock, MA_OWNED);
-
- if (MIDIQ_AVAIL(scp->in_q) < len) {
- /*
- * ENOROOM? EINPUTDROPPED? ETOUGHLUCK?
- */
- SEQ_DEBUG(2, printf("seq_copytoinput: queue full\n"));
- } else {
- MIDIQ_ENQ(scp->in_q, event, len);
- selwakeup(&scp->in_sel);
- cv_broadcast(&scp->in_cv);
- }
-
-}
-
-static int
-seq_chnvoice(struct seq_softc *scp, kobj_t md, u_char *event)
-{
- int ret, voice;
- u_char cmd, chn, note, parm;
-
- ret = 0;
- cmd = event[2];
- chn = event[3];
- note = event[4];
- parm = event[5];
-
- mtx_assert(&scp->seq_lock, MA_OWNED);
-
- SEQ_DEBUG(5, printf("seq_chnvoice: unit %d, dev %d, cmd %s,"
- " chn %d, note %d, parm %d.\n", scp->unit, event[1],
- midi_cmdname(cmd, cmdtab_seqcv), chn, note, parm));
-
- voice = SYNTH_ALLOC(md, chn, note);
-
- mtx_unlock(&scp->seq_lock);
-
- switch (cmd) {
- case MIDI_NOTEON:
- if (note < 128 || note == 255) {
-#if 0
- if (scp->music && chn == 9) {
- /*
- * This channel is a percussion. The note
- * number is the patch number.
- */
- /*
- mtx_unlock(&scp->seq_lock);
- if (SYNTH_SETINSTR(md, voice, 128 + note)
- == EAGAIN) {
- mtx_lock(&scp->seq_lock);
- return (QUEUEFULL);
- }
- mtx_lock(&scp->seq_lock);
- */
- note = 60; /* Middle C. */
- }
-#endif
- if (scp->music) {
- /*
- mtx_unlock(&scp->seq_lock);
- if (SYNTH_SETUPVOICE(md, voice, chn)
- == EAGAIN) {
- mtx_lock(&scp->seq_lock);
- return (QUEUEFULL);
- }
- mtx_lock(&scp->seq_lock);
- */
- }
- SYNTH_STARTNOTE(md, voice, note, parm);
- }
- break;
- case MIDI_NOTEOFF:
- SYNTH_KILLNOTE(md, voice, note, parm);
- break;
- case MIDI_KEY_PRESSURE:
- SYNTH_AFTERTOUCH(md, voice, parm);
- break;
- default:
- ret = 1;
- SEQ_DEBUG(2, printf("seq_chnvoice event type %d not handled\n",
- event[1]));
- break;
- }
-
- mtx_lock(&scp->seq_lock);
- return ret;
-}
-
-static int
-seq_chncommon(struct seq_softc *scp, kobj_t md, u_char *event)
-{
- int ret;
- u_short w14;
- u_char cmd, chn, p1;
-
- ret = 0;
- cmd = event[2];
- chn = event[3];
- p1 = event[4];
- w14 = *(u_short *)&event[6];
-
- SEQ_DEBUG(5, printf("seq_chncommon: unit %d, dev %d, cmd %s, chn %d,"
- " p1 %d, w14 %d.\n", scp->unit, event[1],
- midi_cmdname(cmd, cmdtab_seqccmn), chn, p1, w14));
- mtx_unlock(&scp->seq_lock);
- switch (cmd) {
- case MIDI_PGM_CHANGE:
- SEQ_DEBUG(4, printf("seq_chncommon pgmchn chn %d pg %d\n",
- chn, p1));
- SYNTH_SETINSTR(md, chn, p1);
- break;
- case MIDI_CTL_CHANGE:
- SEQ_DEBUG(4, printf("seq_chncommon ctlch chn %d pg %d %d\n",
- chn, p1, w14));
- SYNTH_CONTROLLER(md, chn, p1, w14);
- break;
- case MIDI_PITCH_BEND:
- if (scp->music) {
- /*
- * TODO: MIDI_PITCH_BEND
- */
-#if 0
- mtx_lock(&md->synth.vc_mtx);
- md->synth.chn_info[chn].bender_value = w14;
- if (md->midiunit >= 0) {
- /*
- * Handle all of the notes playing on this
- * channel.
- */
- key = ((int)chn << 8);
- for (i = 0; i < md->synth.alloc.max_voice; i++)
- if ((md->synth.alloc.map[i] & 0xff00) == key) {
- mtx_unlock(&md->synth.vc_mtx);
- mtx_unlock(&scp->seq_lock);
- if (md->synth.bender(md, i, w14) == EAGAIN) {
- mtx_lock(&scp->seq_lock);
- return (QUEUEFULL);
- }
- mtx_lock(&scp->seq_lock);
- }
- } else {
- mtx_unlock(&md->synth.vc_mtx);
- mtx_unlock(&scp->seq_lock);
- if (md->synth.bender(md, chn, w14) == EAGAIN) {
- mtx_lock(&scp->seq_lock);
- return (QUEUEFULL);
- }
- mtx_lock(&scp->seq_lock);
- }
-#endif
- } else
- SYNTH_BENDER(md, chn, w14);
- break;
- default:
- ret = 1;
- SEQ_DEBUG(2,
- printf("seq_chncommon event type %d not handled.\n",
- event[1]));
- break;
- }
- mtx_lock(&scp->seq_lock);
- return ret;
-}
-
-static int
-seq_timing(struct seq_softc *scp, u_char *event)
-{
- int param;
- int ret;
-
- ret = 0;
- param = event[4] + (event[5] << 8) +
- (event[6] << 16) + (event[7] << 24);
-
- SEQ_DEBUG(5, printf("seq_timing: unit %d, cmd %d, param %d.\n",
- scp->unit, event[1], param));
- switch (event[1]) {
- case TMR_WAIT_REL:
- timer_wait(scp, param, 0);
- break;
- case TMR_WAIT_ABS:
- timer_wait(scp, param, 1);
- break;
- case TMR_START:
- timer_start(scp);
- cv_broadcast(&scp->reset_cv);
- break;
- case TMR_STOP:
- timer_stop(scp);
- /*
- * The following cv_broadcast isn't needed since we only
- * wait for 0->1 transitions. It probably won't hurt
- */
- cv_broadcast(&scp->reset_cv);
- break;
- case TMR_CONTINUE:
- timer_continue(scp);
- cv_broadcast(&scp->reset_cv);
- break;
- case TMR_TEMPO:
- if (param < 8)
- param = 8;
- if (param > 360)
- param = 360;
- SEQ_DEBUG(4, printf("Timer set tempo %d\n", param));
- timer_setvals(scp, param, scp->timerbase);
- break;
- case TMR_TIMERBASE:
- if (param < 1)
- param = 1;
- if (param > 1000)
- param = 1000;
- SEQ_DEBUG(4, printf("Timer set timerbase %d\n", param));
- timer_setvals(scp, scp->tempo, param);
- break;
- case TMR_ECHO:
- /*
- * TODO: Consider making 4-byte events for /dev/sequencer
- * PRO: Maybe needed by legacy apps
- * CON: soundcard.h has been warning for a while many years
- * to expect 8 byte events.
- */
-#if 0
- if (scp->music)
- seq_copytoinput(scp, event, 8);
- else {
- param = (param << 8 | SEQ_ECHO);
- seq_copytoinput(scp, (u_char *)&param, 4);
- }
-#else
- seq_copytoinput(scp, event, 8);
-#endif
- break;
- default:
- SEQ_DEBUG(2, printf("seq_timing event type %d not handled.\n",
- event[1]));
- ret = 1;
- break;
- }
- return ret;
-}
-
-static int
-seq_local(struct seq_softc *scp, u_char *event)
-{
- int ret;
-
- ret = 0;
- mtx_assert(&scp->seq_lock, MA_OWNED);
-
- SEQ_DEBUG(5, printf("seq_local: unit %d, cmd %d\n", scp->unit,
- event[1]));
- switch (event[1]) {
- default:
- SEQ_DEBUG(1, printf("seq_local event type %d not handled\n",
- event[1]));
- ret = 1;
- break;
- }
- return ret;
-}
-
-static int
-seq_sysex(struct seq_softc *scp, kobj_t md, u_char *event)
-{
- int i, l;
-
- mtx_assert(&scp->seq_lock, MA_OWNED);
- SEQ_DEBUG(5, printf("seq_sysex: unit %d device %d\n", scp->unit,
- event[1]));
- l = 0;
- for (i = 0; i < 6 && event[i + 2] != 0xff; i++)
- l = i + 1;
- if (l > 0) {
- mtx_unlock(&scp->seq_lock);
- if (SYNTH_SENDSYSEX(md, &event[2], l) == EAGAIN) {
- mtx_lock(&scp->seq_lock);
- return 1;
- }
- mtx_lock(&scp->seq_lock);
- }
- return 0;
-}
-
-/*
- * Reset no longer closes the raw devices nor seq_sync's
- * Callers are IOCTL and seq_close
- */
-static void
-seq_reset(struct seq_softc *scp)
-{
- int chn, i;
- kobj_t m;
-
- mtx_assert(&scp->seq_lock, MA_OWNED);
-
- SEQ_DEBUG(5, printf("seq_reset: unit %d.\n", scp->unit));
-
- /*
- * Stop reading and writing.
- */
-
- /* scp->recording = 0; */
- scp->playing = 0;
- cv_broadcast(&scp->state_cv);
- cv_broadcast(&scp->out_cv);
- cv_broadcast(&scp->reset_cv);
-
- /*
- * For now, don't reset the timers.
- */
- MIDIQ_CLEAR(scp->in_q);
- MIDIQ_CLEAR(scp->out_q);
-
- for (i = 0; i < scp->midi_number; i++) {
- m = scp->midis[i];
- mtx_unlock(&scp->seq_lock);
- SYNTH_RESET(m);
- for (chn = 0; chn < 16; chn++) {
- SYNTH_CONTROLLER(m, chn, 123, 0);
- SYNTH_CONTROLLER(m, chn, 121, 0);
- SYNTH_BENDER(m, chn, 1 << 13);
- }
- mtx_lock(&scp->seq_lock);
- }
-}
-
-/*
- * seq_sync
- * *really* flush the output queue
- * flush the event queue, then flush the synthsisers.
- * Callers are IOCTL and close
- */
-
-#define SEQ_SYNC_TIMEOUT 8
-static int
-seq_sync(struct seq_softc *scp)
-{
- int i, rl, sync[16], done;
-
- mtx_assert(&scp->seq_lock, MA_OWNED);
-
- SEQ_DEBUG(4, printf("seq_sync: unit %d.\n", scp->unit));
-
- /*
- * Wait until output queue is empty. Check every so often to see if
- * the queue is moving along. If it isn't just abort.
- */
- while (!MIDIQ_EMPTY(scp->out_q)) {
- if (!scp->playing) {
- scp->playing = 1;
- cv_broadcast(&scp->state_cv);
- cv_broadcast(&scp->out_cv);
- }
- rl = MIDIQ_LEN(scp->out_q);
-
- i = cv_timedwait_sig(&scp->out_cv,
- &scp->seq_lock, SEQ_SYNC_TIMEOUT * hz);
-
- if (i == EINTR || i == ERESTART) {
- if (i == EINTR) {
- /*
- * XXX: I don't know why we stop playing
- */
- scp->playing = 0;
- cv_broadcast(&scp->out_cv);
- }
- return i;
- }
- if (i == EWOULDBLOCK && rl == MIDIQ_LEN(scp->out_q) &&
- scp->waiting == 0) {
- /*
- * A queue seems to be stuck up. Give up and clear
- * queues.
- */
- MIDIQ_CLEAR(scp->out_q);
- scp->playing = 0;
- cv_broadcast(&scp->state_cv);
- cv_broadcast(&scp->out_cv);
- cv_broadcast(&scp->reset_cv);
-
- /*
- * TODO: Consider if the raw devices need to be flushed
- */
-
- SEQ_DEBUG(1, printf("seq_sync queue stuck, aborting\n"));
-
- return i;
- }
- }
-
- scp->playing = 0;
- /*
- * Since syncing a midi device might block, unlock scp->seq_lock.
- */
-
- mtx_unlock(&scp->seq_lock);
- for (i = 0; i < scp->midi_number; i++)
- sync[i] = 1;
-
- do {
- done = 1;
- for (i = 0; i < scp->midi_number; i++)
- if (sync[i]) {
- if (SYNTH_INSYNC(scp->midis[i]) == 0)
- sync[i] = 0;
- else
- done = 0;
- }
- if (!done)
- DELAY(5000);
-
- } while (!done);
-
- mtx_lock(&scp->seq_lock);
- return 0;
-}
-
-char *
-midi_cmdname(int cmd, midi_cmdtab *tab)
-{
- while (tab->name != NULL) {
- if (cmd == tab->cmd)
- return (tab->name);
- tab++;
- }
-
- return ("unknown");
-}
diff --git a/sys/dev/sound/midi/synth_if.m b/sys/dev/sound/midi/synth_if.m
deleted file mode 100644
index a763b3422bc6..000000000000
--- a/sys/dev/sound/midi/synth_if.m
+++ /dev/null
@@ -1,312 +0,0 @@
-#-
-# Copyright (c) 2003 Mathew Kanner
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-# SUCH DAMAGE.
-#
-#
-
-INTERFACE synth;
-
-#include <sys/systm.h>
-
-CODE {
-
-synth_killnote_t nokillnote;
-synth_startnote_t nostartnote;
-synth_setinstr_t nosetinstr;
-synth_hwcontrol_t nohwcontrol;
-synth_aftertouch_t noaftertouch;
-synth_panning_t nopanning;
-synth_controller_t nocontroller;
-synth_volumemethod_t novolumemethod;
-synth_bender_t nobender;
-synth_setupvoice_t nosetupvoice;
-synth_sendsysex_t nosendsysex;
-synth_allocvoice_t noallocvoice;
-synth_writeraw_t nowriteraw;
-synth_reset_t noreset;
-synth_shortname_t noshortname;
-synth_open_t noopen;
-synth_close_t noclose;
-synth_query_t noquery;
-synth_insync_t noinsync;
-synth_alloc_t noalloc;
-
- int
- nokillnote(void *_kobj, uint8_t _chn, uint8_t _note, uint8_t _vel)
- {
- printf("nokillnote\n");
- return 0;
- }
-
- int
- noopen(void *_kobj, void *_arg, int mode)
- {
- printf("noopen\n");
- return 0;
- }
-
- int
- noquery(void *_kboj)
- {
- printf("noquery\n");
- return 0;
- }
-
- int
- nostartnote(void *_kb, uint8_t _voice, uint8_t _note, uint8_t _parm)
- {
- printf("nostartnote\n");
- return 0;
- }
-
- int
- nosetinstr(void *_kb, uint8_t _chn, uint16_t _patchno)
- {
- printf("nosetinstr\n");
- return 0;
- }
-
- int
- nohwcontrol(void *_kb, uint8_t *_event)
- {
- printf("nohwcontrol\n");
- return 0;
- }
-
- int
- noaftertouch ( void /* X */ * _kobj, uint8_t _x1, uint8_t _x2)
- {
- printf("noaftertouch\n");
- return 0;
- }
-
- int
- nopanning ( void /* X */ * _kobj, uint8_t _x1, uint8_t _x2)
- {
- printf("nopanning\n");
- return 0;
- }
-
- int
- nocontroller ( void /* X */ * _kobj, uint8_t _x1, uint8_t _x2, uint16_t _x3)
- {
- printf("nocontroller\n");
- return 0;
- }
-
- int
- novolumemethod (
- void /* X */ * _kobj,
- uint8_t _x1)
- {
- printf("novolumemethod\n");
- return 0;
- }
-
- int
- nobender ( void /* X */ * _kobj, uint8_t _voice, uint16_t _bend)
- {
- printf("nobender\n");
- return 0;
- }
-
- int
- nosetupvoice ( void /* X */ * _kobj, uint8_t _voice, uint8_t _chn)
- {
-
- printf("nosetupvoice\n");
- return 0;
- }
-
- int
- nosendsysex ( void /* X */ * _kobj, void * _buf, size_t _len)
- {
- printf("nosendsysex\n");
- return 0;
- }
-
- int
- noallocvoice ( void /* X */ * _kobj, uint8_t _chn, uint8_t _note, void *_x)
- {
- printf("noallocvoice\n");
- return 0;
- }
-
- int
- nowriteraw ( void /* X */ * _kobjt, uint8_t * _buf, size_t _len)
- {
- printf("nowriteraw\n");
- return 1;
- }
-
- int
- noreset ( void /* X */ * _kobjt)
- {
-
- printf("noreset\n");
- return 0;
- }
-
- char *
- noshortname (void /* X */ * _kobjt)
- {
- printf("noshortname\n");
- return "noshortname";
- }
-
- int
- noclose ( void /* X */ * _kobjt)
- {
-
- printf("noclose\n");
- return 0;
- }
-
- int
- noinsync (void /* X */ * _kobjt)
- {
-
- printf("noinsync\n");
- return 0;
- }
-
- int
- noalloc ( void /* x */ * _kbojt, uint8_t _chn, uint8_t _note)
- {
- printf("noalloc\n");
- return 0;
- }
-}
-
-METHOD int killnote {
- void /* X */ *_kobj;
- uint8_t _chan;
- uint8_t _note;
- uint8_t _vel;
-} DEFAULT nokillnote;
-
-METHOD int startnote {
- void /* X */ *_kobj;
- uint8_t _voice;
- uint8_t _note;
- uint8_t _parm;
-} DEFAULT nostartnote;
-
-METHOD int setinstr {
- void /* X */ *_kobj;
- uint8_t _chn;
- uint16_t _patchno;
-} DEFAULT nosetinstr;
-
-METHOD int hwcontrol {
- void /* X */ *_kobj;
- uint8_t *_event;
-} DEFAULT nohwcontrol;
-
-METHOD int aftertouch {
- void /* X */ *_kobj;
- uint8_t _x1;
- uint8_t _x2;
-} DEFAULT noaftertouch;
-
-METHOD int panning {
- void /* X */ *_kobj;
- uint8_t _x1;
- uint8_t _x2;
-} DEFAULT nopanning;
-
-METHOD int controller {
- void /* X */ *_kobj;
- uint8_t _x1;
- uint8_t _x2;
- uint16_t _x3;
-} DEFAULT nocontroller;
-
-METHOD int volumemethod {
- void /* X */ *_kobj;
- uint8_t _x1;
-} DEFAULT novolumemethod;
-
-METHOD int bender {
- void /* X */ *_kobj;
- uint8_t _voice;
- uint16_t _bend;
-} DEFAULT nobender;
-
-METHOD int setupvoice {
- void /* X */ *_kobj;
- uint8_t _voice;
- uint8_t _chn;
-} DEFAULT nosetupvoice;
-
-METHOD int sendsysex {
- void /* X */ *_kobj;
- void *_buf;
- size_t _len;
-} DEFAULT nosendsysex;
-
-METHOD int allocvoice {
- void /* X */ *_kobj;
- uint8_t _chn;
- uint8_t _note;
- void *_x;
-} DEFAULT noallocvoice;
-
-METHOD int writeraw {
- void /* X */ *_kobjt;
- uint8_t *_buf;
- size_t _len;
-} DEFAULT nowriteraw;
-
-METHOD int reset {
- void /* X */ *_kobjt;
-} DEFAULT noreset;
-
-METHOD char * shortname {
- void /* X */ *_kobjt;
-} DEFAULT noshortname;
-
-METHOD int open {
- void /* X */ *_kobjt;
- void *_sythn;
- int _mode;
-} DEFAULT noopen;
-
-METHOD int close {
- void /* X */ *_kobjt;
-} DEFAULT noclose;
-
-METHOD int query {
- void /* X */ *_kobjt;
-} DEFAULT noquery;
-
-METHOD int insync {
- void /* X */ *_kobjt;
-} DEFAULT noinsync;
-
-METHOD int alloc {
- void /* x */ *_kbojt;
- uint8_t _chn;
- uint8_t _note;
-} DEFAULT noalloc;
diff --git a/sys/dev/sound/pci/als4000.c b/sys/dev/sound/pci/als4000.c
index b1376d2b6e5a..9d86713b379e 100644
--- a/sys/dev/sound/pci/als4000.c
+++ b/sys/dev/sound/pci/als4000.c
@@ -839,10 +839,7 @@ als_pci_attach(device_t dev)
goto bad_attach;
}
- if (pcm_register(dev, sc, 1, 1)) {
- device_printf(dev, "failed to register pcm entries\n");
- goto bad_attach;
- }
+ pcm_init(dev, sc);
pcm_addchan(dev, PCMDIR_PLAY, &alspchan_class, sc);
pcm_addchan(dev, PCMDIR_REC, &alsrchan_class, sc);
@@ -850,7 +847,11 @@ als_pci_attach(device_t dev)
snprintf(status, SND_STATUSLEN, "port 0x%jx irq %jd on %s",
rman_get_start(sc->reg), rman_get_start(sc->irq),
device_get_nameunit(device_get_parent(dev)));
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status)) {
+ device_printf(dev, "failed to register pcm entries\n");
+ goto bad_attach;
+ }
+
return 0;
bad_attach:
diff --git a/sys/dev/sound/pci/atiixp.c b/sys/dev/sound/pci/atiixp.c
index dcbf041f9605..90e5742e6523 100644
--- a/sys/dev/sound/pci/atiixp.c
+++ b/sys/dev/sound/pci/atiixp.c
@@ -1084,8 +1084,7 @@ atiixp_chip_post_init(void *arg)
mixer_init(sc->dev, ac97_getmixerclass(), sc->codec);
- if (pcm_register(sc->dev, sc, ATI_IXP_NPCHAN, ATI_IXP_NRCHAN))
- goto postinitbad;
+ pcm_init(sc->dev, sc);
for (i = 0; i < ATI_IXP_NPCHAN; i++)
pcm_addchan(sc->dev, PCMDIR_PLAY, &atiixp_chan_class, sc);
@@ -1101,7 +1100,8 @@ atiixp_chip_post_init(void *arg)
rman_get_start(sc->reg), rman_get_start(sc->irq),
device_get_nameunit(device_get_parent(sc->dev)));
- pcm_setstatus(sc->dev, status);
+ if (pcm_register(sc->dev, status))
+ goto postinitbad;
atiixp_lock(sc);
if (sc->polling == 0)
@@ -1168,12 +1168,12 @@ atiixp_release_resource(struct atiixp_info *sc)
static int
atiixp_pci_probe(device_t dev)
{
- int i;
+ size_t i;
uint16_t devid, vendor;
vendor = pci_get_vendor(dev);
devid = pci_get_device(dev);
- for (i = 0; i < sizeof(atiixp_hw) / sizeof(atiixp_hw[0]); i++) {
+ for (i = 0; i < nitems(atiixp_hw); i++) {
if (vendor == atiixp_hw[i].vendor &&
devid == atiixp_hw[i].devid) {
device_set_desc(dev, atiixp_hw[i].desc);
diff --git a/sys/dev/sound/pci/cmi.c b/sys/dev/sound/pci/cmi.c
index 9a92066c51a4..22f1e76a4d1f 100644
--- a/sys/dev/sound/pci/cmi.c
+++ b/sys/dev/sound/pci/cmi.c
@@ -982,8 +982,7 @@ cmi_attach(device_t dev)
if (mixer_init(dev, &cmi_mixer_class, sc))
goto bad;
- if (pcm_register(dev, sc, 1, 1))
- goto bad;
+ pcm_init(dev, sc);
cmi_initsys(sc);
@@ -993,7 +992,8 @@ cmi_attach(device_t dev)
snprintf(status, SND_STATUSLEN, "port 0x%jx irq %jd on %s",
rman_get_start(sc->reg), rman_get_start(sc->irq),
device_get_nameunit(device_get_parent(dev)));
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status))
+ goto bad;
DEB(printf("cmi_attach: succeeded\n"));
return 0;
diff --git a/sys/dev/sound/pci/cs4281.c b/sys/dev/sound/pci/cs4281.c
index 972b83efff8f..7a25f7f4c08d 100644
--- a/sys/dev/sound/pci/cs4281.c
+++ b/sys/dev/sound/pci/cs4281.c
@@ -839,8 +839,7 @@ cs4281_pci_attach(device_t dev)
mixer_init(dev, ac97_getmixerclass(), codec);
- if (pcm_register(dev, sc, 1, 1))
- goto bad;
+ pcm_init(dev, sc);
pcm_addchan(dev, PCMDIR_PLAY, &cs4281chan_class, sc);
pcm_addchan(dev, PCMDIR_REC, &cs4281chan_class, sc);
@@ -849,7 +848,8 @@ cs4281_pci_attach(device_t dev)
(sc->regtype == SYS_RES_IOPORT)? "port" : "mem",
rman_get_start(sc->reg), rman_get_start(sc->irq),
device_get_nameunit(device_get_parent(dev)));
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status))
+ goto bad;
return 0;
diff --git a/sys/dev/sound/pci/csa.c b/sys/dev/sound/pci/csa.c
index 7191f0cc4bf9..4bd8ff029f43 100644
--- a/sys/dev/sound/pci/csa.c
+++ b/sys/dev/sound/pci/csa.c
@@ -45,7 +45,6 @@
#endif
#include <dev/sound/pcm/sound.h>
-#include <dev/sound/chip.h>
#include <dev/sound/pci/csareg.h>
#include <dev/sound/pci/csavar.h>
@@ -109,46 +108,25 @@ static int
clkrun_hack(int run)
{
#ifdef __i386__
- devclass_t pci_devclass;
- device_t *pci_devices, *pci_children, *busp, *childp;
- int pci_count = 0, pci_childcount = 0;
- int i, j, port;
+ device_t child;
+ int port;
u_int16_t control;
bus_space_tag_t btag;
- if ((pci_devclass = devclass_find("pci")) == NULL) {
- return ENXIO;
- }
+ child = pci_find_device(0x8086, 0x7113);
+ if (child == NULL)
+ return (ENXIO);
- devclass_get_devices(pci_devclass, &pci_devices, &pci_count);
-
- for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) {
- pci_childcount = 0;
- if (device_get_children(*busp, &pci_children, &pci_childcount))
- continue;
- for (j = 0, childp = pci_children; j < pci_childcount; j++, childp++) {
- if (pci_get_vendor(*childp) == 0x8086 && pci_get_device(*childp) == 0x7113) {
- port = (pci_read_config(*childp, 0x41, 1) << 8) + 0x10;
- /* XXX */
- btag = X86_BUS_SPACE_IO;
-
- control = bus_space_read_2(btag, 0x0, port);
- control &= ~0x2000;
- control |= run? 0 : 0x2000;
- bus_space_write_2(btag, 0x0, port, control);
- free(pci_devices, M_TEMP);
- free(pci_children, M_TEMP);
- return 0;
- }
- }
- free(pci_children, M_TEMP);
- }
+ port = (pci_read_config(child, 0x41, 1) << 8) + 0x10;
+ /* XXX */
+ btag = X86_BUS_SPACE_IO;
- free(pci_devices, M_TEMP);
- return ENXIO;
-#else
- return 0;
+ control = bus_space_read_2(btag, 0x0, port);
+ control &= ~0x2000;
+ control |= run? 0 : 0x2000;
+ bus_space_write_2(btag, 0x0, port, control);
#endif
+ return (0);
}
static struct csa_card cards_4610[] = {
@@ -295,28 +273,20 @@ csa_attach(device_t dev)
/* Attach the children. */
/* PCM Audio */
- func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (func == NULL) {
- error = ENOMEM;
- goto err_teardown;
- }
+ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_WAITOK | M_ZERO);
func->varinfo = &scp->binfo;
func->func = SCF_PCM;
- scp->pcm = device_add_child(dev, "pcm", -1);
+ scp->pcm = device_add_child(dev, "pcm", DEVICE_UNIT_ANY);
device_set_ivars(scp->pcm, func);
/* Midi Interface */
- func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (func == NULL) {
- error = ENOMEM;
- goto err_teardown;
- }
+ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_WAITOK | M_ZERO);
func->varinfo = &scp->binfo;
func->func = SCF_MIDI;
- scp->midi = device_add_child(dev, "midi", -1);
+ scp->midi = device_add_child(dev, "midi", DEVICE_UNIT_ANY);
device_set_ivars(scp->midi, func);
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
@@ -331,43 +301,32 @@ err_io:
return (error);
}
+static void
+csa_child_deleted(device_t dev, device_t child)
+{
+ free(device_get_ivars(child), M_DEVBUF);
+}
+
static int
csa_detach(device_t dev)
{
csa_res *resp;
sc_p scp;
- struct sndcard_func *func;
int err;
scp = device_get_softc(dev);
resp = &scp->res;
- if (scp->midi != NULL) {
- func = device_get_ivars(scp->midi);
- err = device_delete_child(dev, scp->midi);
- if (err != 0)
- return err;
- if (func != NULL)
- free(func, M_DEVBUF);
- scp->midi = NULL;
- }
-
- if (scp->pcm != NULL) {
- func = device_get_ivars(scp->pcm);
- err = device_delete_child(dev, scp->pcm);
- if (err != 0)
- return err;
- if (func != NULL)
- free(func, M_DEVBUF);
- scp->pcm = NULL;
- }
+ err = bus_generic_detach(dev);
+ if (err != 0)
+ return err;
bus_teardown_intr(dev, resp->irq, scp->ih);
bus_release_resource(dev, SYS_RES_IRQ, resp->irq_rid, resp->irq);
bus_release_resource(dev, SYS_RES_MEMORY, resp->mem_rid, resp->mem);
bus_release_resource(dev, SYS_RES_MEMORY, resp->io_rid, resp->io);
- return bus_generic_detach(dev);
+ return (0);
}
static int
@@ -1082,6 +1041,7 @@ static device_method_t csa_methods[] = {
DEVMETHOD(device_resume, csa_resume),
/* Bus interface */
+ DEVMETHOD(bus_child_deleted, csa_child_deleted),
DEVMETHOD(bus_alloc_resource, csa_alloc_resource),
DEVMETHOD(bus_release_resource, csa_release_resource),
DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
diff --git a/sys/dev/sound/pci/csamidi.c b/sys/dev/sound/pci/csamidi.c
index df1699092990..29d5548b0954 100644
--- a/sys/dev/sound/pci/csamidi.c
+++ b/sys/dev/sound/pci/csamidi.c
@@ -43,7 +43,6 @@
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
-#include <dev/sound/chip.h>
#include <dev/sound/pcm/sound.h>
#include <dev/sound/midi/midi.h>
diff --git a/sys/dev/sound/pci/csapcm.c b/sys/dev/sound/pci/csapcm.c
index 71b9a0253cdb..a966a2e66402 100644
--- a/sys/dev/sound/pci/csapcm.c
+++ b/sys/dev/sound/pci/csapcm.c
@@ -36,7 +36,6 @@
#include <dev/sound/pcm/sound.h>
#include <dev/sound/pcm/ac97.h>
-#include <dev/sound/chip.h>
#include <dev/sound/pci/csareg.h>
#include <dev/sound/pci/csavar.h>
@@ -833,14 +832,14 @@ pcmcsa_attach(device_t dev)
csa_writemem(resp, BA1_CIE, (csa_readmem(resp, BA1_CIE) & ~0x0000003f) | 0x00000001);
csa_active(csa, -1);
- if (pcm_register(dev, csa, 1, 1)) {
+ pcm_init(dev, csa);
+ pcm_addchan(dev, PCMDIR_REC, &csachan_class, csa);
+ pcm_addchan(dev, PCMDIR_PLAY, &csachan_class, csa);
+ if (pcm_register(dev, status)) {
ac97_destroy(codec);
csa_releaseres(csa, dev);
return (ENXIO);
}
- pcm_addchan(dev, PCMDIR_REC, &csachan_class, csa);
- pcm_addchan(dev, PCMDIR_PLAY, &csachan_class, csa);
- pcm_setstatus(dev, status);
return (0);
}
diff --git a/sys/dev/sound/pci/csareg.h b/sys/dev/sound/pci/csareg.h
index a36e36177f5c..3341390f618b 100644
--- a/sys/dev/sound/pci/csareg.h
+++ b/sys/dev/sound/pci/csareg.h
@@ -34,7 +34,7 @@
#define _CSA_REG_H
/*
- * The following constats are orginally in the sample by Crystal Semiconductor.
+ * The following constats are originally in the sample by Crystal Semiconductor.
* Copyright (c) 1996-1998 Crystal Semiconductor Corp.
*/
diff --git a/sys/dev/sound/pci/emu10k1.c b/sys/dev/sound/pci/emu10k1.c
index 0813f89c87b8..e4b2c22f4f07 100644
--- a/sys/dev/sound/pci/emu10k1.c
+++ b/sys/dev/sound/pci/emu10k1.c
@@ -2130,13 +2130,14 @@ emu_pci_attach(device_t dev)
rman_get_start(sc->reg), rman_get_start(sc->irq),
device_get_nameunit(device_get_parent(dev)));
- if (pcm_register(dev, sc, sc->nchans, gotmic ? 3 : 2)) goto bad;
+ pcm_init(dev, sc);
for (i = 0; i < sc->nchans; i++)
pcm_addchan(dev, PCMDIR_PLAY, &emupchan_class, sc);
for (i = 0; i < (gotmic ? 3 : 2); i++)
pcm_addchan(dev, PCMDIR_REC, &emurchan_class, sc);
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status))
+ goto bad;
return 0;
diff --git a/sys/dev/sound/pci/emu10kx-midi.c b/sys/dev/sound/pci/emu10kx-midi.c
index 4ed8e6c1dd9c..2a98562f8f39 100644
--- a/sys/dev/sound/pci/emu10kx-midi.c
+++ b/sys/dev/sound/pci/emu10kx-midi.c
@@ -43,7 +43,6 @@
#include "opt_snd.h"
#endif
-#include <dev/sound/chip.h>
#include <dev/sound/pcm/sound.h>
#include <dev/sound/midi/midi.h>
diff --git a/sys/dev/sound/pci/emu10kx-pcm.c b/sys/dev/sound/pci/emu10kx-pcm.c
index 825a39fc4e63..c280b64892f6 100644
--- a/sys/dev/sound/pci/emu10kx-pcm.c
+++ b/sys/dev/sound/pci/emu10kx-pcm.c
@@ -43,7 +43,6 @@
#include "opt_snd.h"
#endif
-#include <dev/sound/chip.h>
#include <dev/sound/pcm/sound.h>
#include <dev/sound/pcm/ac97.h>
@@ -1460,10 +1459,7 @@ emu_pcm_attach(device_t dev)
pcm_setflags(dev, pcm_getflags(dev) | SD_F_MPSAFE);
/* XXX we should better get number of available channels from parent */
- if (pcm_register(dev, sc, (route == RT_FRONT) ? MAX_CHANNELS : 1, (route == RT_FRONT) ? 1 : 0)) {
- device_printf(dev, "can't register PCM channels!\n");
- goto bad;
- }
+ pcm_init(dev, sc);
sc->pnum = 0;
if (route != RT_MCHRECORD)
pcm_addchan(dev, PCMDIR_PLAY, &emupchan_class, sc);
@@ -1477,7 +1473,8 @@ emu_pcm_attach(device_t dev)
snprintf(status, SND_STATUSLEN, "on %s",
device_get_nameunit(device_get_parent(dev)));
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status))
+ goto bad;
return (0);
diff --git a/sys/dev/sound/pci/emu10kx.c b/sys/dev/sound/pci/emu10kx.c
index d17f5fb16d34..9cd7dbca1cb2 100644
--- a/sys/dev/sound/pci/emu10kx.c
+++ b/sys/dev/sound/pci/emu10kx.c
@@ -49,7 +49,6 @@
#include "opt_snd.h"
#endif
-#include <dev/sound/chip.h>
#include <dev/sound/pcm/sound.h>
#include <dev/sound/pcm/ac97.h>
@@ -3212,120 +3211,72 @@ emu_pci_attach(device_t dev)
sc->pcm[i] = NULL;
/* FRONT */
- func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (func == NULL) {
- error = ENOMEM;
- goto bad;
- }
- pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (pcminfo == NULL) {
- error = ENOMEM;
- goto bad;
- }
+ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_WAITOK | M_ZERO);
+ pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_WAITOK | M_ZERO);
pcminfo->card = sc;
pcminfo->route = RT_FRONT;
func->func = SCF_PCM;
func->varinfo = pcminfo;
- sc->pcm[RT_FRONT] = device_add_child(dev, "pcm", -1);
+ sc->pcm[RT_FRONT] = device_add_child(dev, "pcm", DEVICE_UNIT_ANY);
device_set_ivars(sc->pcm[RT_FRONT], func);
if (!(sc->mch_disabled)) {
/* REAR */
- func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (func == NULL) {
- error = ENOMEM;
- goto bad;
- }
- pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (pcminfo == NULL) {
- error = ENOMEM;
- goto bad;
- }
+ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_WAITOK | M_ZERO);
+ pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_WAITOK | M_ZERO);
pcminfo->card = sc;
pcminfo->route = RT_REAR;
func->func = SCF_PCM;
func->varinfo = pcminfo;
- sc->pcm[RT_REAR] = device_add_child(dev, "pcm", -1);
+ sc->pcm[RT_REAR] = device_add_child(dev, "pcm", DEVICE_UNIT_ANY);
device_set_ivars(sc->pcm[RT_REAR], func);
if (sc->has_51) {
/* CENTER */
- func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (func == NULL) {
- error = ENOMEM;
- goto bad;
- }
- pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (pcminfo == NULL) {
- error = ENOMEM;
- goto bad;
- }
+ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_WAITOK | M_ZERO);
+ pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_WAITOK | M_ZERO);
pcminfo->card = sc;
pcminfo->route = RT_CENTER;
func->func = SCF_PCM;
func->varinfo = pcminfo;
- sc->pcm[RT_CENTER] = device_add_child(dev, "pcm", -1);
+ sc->pcm[RT_CENTER] = device_add_child(dev, "pcm", DEVICE_UNIT_ANY);
device_set_ivars(sc->pcm[RT_CENTER], func);
/* SUB */
- func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (func == NULL) {
- error = ENOMEM;
- goto bad;
- }
- pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (pcminfo == NULL) {
- error = ENOMEM;
- goto bad;
- }
+ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_WAITOK | M_ZERO);
+ pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_WAITOK | M_ZERO);
pcminfo->card = sc;
pcminfo->route = RT_SUB;
func->func = SCF_PCM;
func->varinfo = pcminfo;
- sc->pcm[RT_SUB] = device_add_child(dev, "pcm", -1);
+ sc->pcm[RT_SUB] = device_add_child(dev, "pcm", DEVICE_UNIT_ANY);
device_set_ivars(sc->pcm[RT_SUB], func);
}
if (sc->has_71) {
/* SIDE */
- func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (func == NULL) {
- error = ENOMEM;
- goto bad;
- }
- pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (pcminfo == NULL) {
- error = ENOMEM;
- goto bad;
- }
+ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_WAITOK | M_ZERO);
+ pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_WAITOK | M_ZERO);
pcminfo->card = sc;
pcminfo->route = RT_SIDE;
func->func = SCF_PCM;
func->varinfo = pcminfo;
- sc->pcm[RT_SIDE] = device_add_child(dev, "pcm", -1);
+ sc->pcm[RT_SIDE] = device_add_child(dev, "pcm", DEVICE_UNIT_ANY);
device_set_ivars(sc->pcm[RT_SIDE], func);
}
} /* mch_disabled */
if (sc->mch_rec) {
- func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (func == NULL) {
- error = ENOMEM;
- goto bad;
- }
- pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (pcminfo == NULL) {
- error = ENOMEM;
- goto bad;
- }
+ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_WAITOK | M_ZERO);
+ pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_WAITOK | M_ZERO);
pcminfo->card = sc;
pcminfo->route = RT_MCHRECORD;
func->func = SCF_PCM;
func->varinfo = pcminfo;
- sc->pcm[RT_MCHRECORD] = device_add_child(dev, "pcm", -1);
+ sc->pcm[RT_MCHRECORD] = device_add_child(dev, "pcm", DEVICE_UNIT_ANY);
device_set_ivars(sc->pcm[RT_MCHRECORD], func);
} /*mch_rec */
@@ -3336,16 +3287,8 @@ emu_pci_attach(device_t dev)
#if 0
/* Midi Interface 1: Live!, Audigy, Audigy 2 */
if ((sc->is_emu10k1) || (sc->is_emu10k2) || (sc->is_ca0102)) {
- func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (func == NULL) {
- error = ENOMEM;
- goto bad;
- }
- midiinfo = malloc(sizeof(struct emu_midiinfo), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (midiinfo == NULL) {
- error = ENOMEM;
- goto bad;
- }
+ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_WAITOK | M_ZERO);
+ midiinfo = malloc(sizeof(struct emu_midiinfo), M_DEVBUF, M_WAITOK | M_ZERO);
midiinfo->card = sc;
if (sc->is_emu10k2 || (sc->is_ca0102)) {
midiinfo->port = EMU_A_MUDATA1;
@@ -3357,21 +3300,13 @@ emu_pci_attach(device_t dev)
}
func->func = SCF_MIDI;
func->varinfo = midiinfo;
- sc->midi[0] = device_add_child(dev, "midi", -1);
+ sc->midi[0] = device_add_child(dev, "midi", DEVICE_UNIT_ANY);
device_set_ivars(sc->midi[0], func);
}
/* Midi Interface 2: Audigy, Audigy 2 (on AudigyDrive) */
if (sc->is_emu10k2 || (sc->is_ca0102)) {
- func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (func == NULL) {
- error = ENOMEM;
- goto bad;
- }
- midiinfo = malloc(sizeof(struct emu_midiinfo), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (midiinfo == NULL) {
- error = ENOMEM;
- goto bad;
- }
+ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_WAITOK | M_ZERO);
+ midiinfo = malloc(sizeof(struct emu_midiinfo), M_DEVBUF, M_WAITOK | M_ZERO);
midiinfo->card = sc;
midiinfo->port = EMU_A_MUDATA2;
@@ -3379,11 +3314,12 @@ emu_pci_attach(device_t dev)
func->func = SCF_MIDI;
func->varinfo = midiinfo;
- sc->midi[1] = device_add_child(dev, "midi", -1);
+ sc->midi[1] = device_add_child(dev, "midi", DEVICE_UNIT_ANY);
device_set_ivars(sc->midi[1], func);
}
#endif
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
bad:
/* XXX can we just call emu_pci_detach here? */
@@ -3402,65 +3338,29 @@ bad:
return (error);
}
+static void
+emu_pci_child_deleted(device_t dev, device_t child)
+{
+ struct sndcard_func *func;
+
+ func = device_get_ivars(child);
+ if (func != NULL) {
+ free(func->varinfo, M_DEVBUF);
+ free(func, M_DEVBUF);
+ }
+}
+
static int
emu_pci_detach(device_t dev)
{
struct emu_sc_info *sc;
- struct sndcard_func *func;
- int devcount, i;
- device_t *childlist;
int r = 0;
sc = device_get_softc(dev);
- for (i = 0; i < RT_COUNT; i++) {
- if (sc->pcm[i] != NULL) {
- func = device_get_ivars(sc->pcm[i]);
- if (func != NULL && func->func == SCF_PCM) {
- device_set_ivars(sc->pcm[i], NULL);
- free(func->varinfo, M_DEVBUF);
- free(func, M_DEVBUF);
- }
- r = device_delete_child(dev, sc->pcm[i]);
- if (r) return (r);
- }
- }
-
- if (sc->midi[0] != NULL) {
- func = device_get_ivars(sc->midi[0]);
- if (func != NULL && func->func == SCF_MIDI) {
- device_set_ivars(sc->midi[0], NULL);
- free(func->varinfo, M_DEVBUF);
- free(func, M_DEVBUF);
- }
- r = device_delete_child(dev, sc->midi[0]);
- if (r) return (r);
- }
-
- if (sc->midi[1] != NULL) {
- func = device_get_ivars(sc->midi[1]);
- if (func != NULL && func->func == SCF_MIDI) {
- device_set_ivars(sc->midi[1], NULL);
- free(func->varinfo, M_DEVBUF);
- free(func, M_DEVBUF);
- }
- r = device_delete_child(dev, sc->midi[1]);
- if (r) return (r);
- }
-
- if (device_get_children(dev, &childlist, &devcount) == 0)
- for (i = 0; i < devcount - 1; i++) {
- device_printf(dev, "removing stale child %d (unit %d)\n", i, device_get_unit(childlist[i]));
- func = device_get_ivars(childlist[i]);
- if (func != NULL && (func->func == SCF_MIDI || func->func == SCF_PCM)) {
- device_set_ivars(childlist[i], NULL);
- free(func->varinfo, M_DEVBUF);
- free(func, M_DEVBUF);
- }
- device_delete_child(dev, childlist[i]);
- }
- if (childlist != NULL)
- free(childlist, M_TEMP);
+ r = bus_generic_detach(dev);
+ if (r != 0)
+ return (r);
r = emu10kx_dev_uninit(sc);
if (r)
@@ -3480,7 +3380,7 @@ emu_pci_detach(device_t dev)
mtx_destroy(&sc->rw);
mtx_destroy(&sc->lock);
- return (bus_generic_detach(dev));
+ return (0);
}
/* add suspend, resume */
static device_method_t emu_methods[] = {
@@ -3489,6 +3389,7 @@ static device_method_t emu_methods[] = {
DEVMETHOD(device_attach, emu_pci_attach),
DEVMETHOD(device_detach, emu_pci_detach),
/* Bus methods */
+ DEVMETHOD(bus_child_deleted, emu_pci_child_deleted),
DEVMETHOD(bus_read_ivar, emu_read_ivar),
DEVMETHOD(bus_write_ivar, emu_write_ivar),
diff --git a/sys/dev/sound/pci/envy24.c b/sys/dev/sound/pci/envy24.c
index f7cc7ff5724d..51842bfdb480 100644
--- a/sys/dev/sound/pci/envy24.c
+++ b/sys/dev/sound/pci/envy24.c
@@ -2575,9 +2575,7 @@ envy24_pci_attach(device_t dev)
mixer_init(dev, &envy24mixer_class, sc);
/* set channel information */
- err = pcm_register(dev, sc, 5, 2 + sc->adcn);
- if (err)
- goto bad;
+ pcm_init(dev, sc);
sc->chnum = 0;
for (i = 0; i < 5; i++) {
pcm_addchan(dev, PCMDIR_PLAY, &envy24chan_class, sc);
@@ -2601,7 +2599,8 @@ envy24_pci_attach(device_t dev)
rman_get_end(sc->mt) - rman_get_start(sc->mt) + 1,
rman_get_start(sc->irq),
device_get_nameunit(device_get_parent(dev)));
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status))
+ goto bad;
return 0;
diff --git a/sys/dev/sound/pci/envy24ht.c b/sys/dev/sound/pci/envy24ht.c
index 65c67b84ce53..b8202a9fa7cd 100644
--- a/sys/dev/sound/pci/envy24ht.c
+++ b/sys/dev/sound/pci/envy24ht.c
@@ -2480,10 +2480,7 @@ envy24ht_pci_attach(device_t dev)
mixer_init(dev, &envy24htmixer_class, sc);
/* set channel information */
- /* err = pcm_register(dev, sc, 5, 2 + sc->adcn); */
- err = pcm_register(dev, sc, 1, 2 + sc->adcn);
- if (err)
- goto bad;
+ pcm_init(dev, sc);
sc->chnum = 0;
/* for (i = 0; i < 5; i++) { */
pcm_addchan(dev, PCMDIR_PLAY, &envy24htchan_class, sc);
@@ -2503,7 +2500,8 @@ envy24ht_pci_attach(device_t dev)
rman_get_end(sc->mt) - rman_get_start(sc->mt) + 1,
rman_get_start(sc->irq),
device_get_nameunit(device_get_parent(dev)));
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status))
+ goto bad;
return 0;
diff --git a/sys/dev/sound/pci/es137x.c b/sys/dev/sound/pci/es137x.c
index 8f832d899dd3..3c1bea09b5d1 100644
--- a/sys/dev/sound/pci/es137x.c
+++ b/sys/dev/sound/pci/es137x.c
@@ -1861,13 +1861,13 @@ es_pci_attach(device_t dev)
rman_get_start(es->reg), rman_get_start(es->irq),
device_get_nameunit(device_get_parent(dev)));
- if (pcm_register(dev, es, numplay, 1))
- goto bad;
+ pcm_init(dev, es);
for (i = 0; i < numplay; i++)
pcm_addchan(dev, PCMDIR_PLAY, ct, es);
pcm_addchan(dev, PCMDIR_REC, ct, es);
es_init_sysctls(dev);
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status))
+ goto bad;
es->escfg = ES_SET_GP(es->escfg, 0);
if (numplay == 1)
device_printf(dev, "<Playback: DAC%d / Record: ADC>\n",
diff --git a/sys/dev/sound/pci/fm801.c b/sys/dev/sound/pci/fm801.c
index cbc74249c04d..3537c7807ded 100644
--- a/sys/dev/sound/pci/fm801.c
+++ b/sys/dev/sound/pci/fm801.c
@@ -642,13 +642,14 @@ fm801_pci_attach(device_t dev)
device_get_nameunit(device_get_parent(dev)));
#define FM801_MAXPLAYCH 1
- if (pcm_register(dev, fm801, FM801_MAXPLAYCH, 1)) goto oops;
+ pcm_init(dev, fm801);
pcm_addchan(dev, PCMDIR_PLAY, &fm801ch_class, fm801);
pcm_addchan(dev, PCMDIR_REC, &fm801ch_class, fm801);
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status))
+ goto oops;
- fm801->radio = device_add_child(dev, "radio", -1);
- bus_generic_attach(dev);
+ fm801->radio = device_add_child(dev, "radio", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
return 0;
@@ -675,12 +676,6 @@ fm801_pci_detach(device_t dev)
r = bus_generic_detach(dev);
if (r)
return r;
- if (fm801->radio != NULL) {
- r = device_delete_child(dev, fm801->radio);
- if (r)
- return r;
- fm801->radio = NULL;
- }
r = pcm_unregister(dev);
if (r)
diff --git a/sys/dev/sound/pci/hda/hdaa.c b/sys/dev/sound/pci/hda/hdaa.c
index dcd10cb36510..5dbb5c4f4453 100644
--- a/sys/dev/sound/pci/hda/hdaa.c
+++ b/sys/dev/sound/pci/hda/hdaa.c
@@ -267,7 +267,8 @@ hdaa_channels_handler(struct hdaa_audio_as *as)
struct hdaa_chan *ch = &devinfo->chans[as->chans[0]];
struct hdaa_widget *w;
uint8_t *eld;
- int i, total, sub, assume, channels;
+ int total, sub, assume, channels;
+ size_t i;
uint16_t cpins, upins, tpins;
cpins = upins = 0;
@@ -347,7 +348,7 @@ hdaa_channels_handler(struct hdaa_audio_as *as)
printf("\n");
);
/* Look for maximal fitting matrix. */
- for (i = 0; i < sizeof(matrixes) / sizeof(struct matrix); i++) {
+ for (i = 0; i < nitems(matrixes); i++) {
if (as->pinset != 0 && matrixes[i].analog == 0)
continue;
if ((matrixes[i].m.mask & ~channels) == 0) {
@@ -531,9 +532,11 @@ static void
hdaa_presence_handler(struct hdaa_widget *w)
{
struct hdaa_devinfo *devinfo = w->devinfo;
- struct hdaa_audio_as *as;
+ struct hdaa_audio_as *as, *asp;
+ char buf[32];
uint32_t res;
- int connected, old;
+ int connected, old, i;
+ bool active;
if (w->enable == 0 || w->type !=
HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)
@@ -551,13 +554,6 @@ hdaa_presence_handler(struct hdaa_widget *w)
if (connected == old)
return;
w->wclass.pin.connected = connected;
- HDA_BOOTVERBOSE(
- if (connected || old != 2) {
- device_printf(devinfo->dev,
- "Pin sense: nid=%d sense=0x%08x (%sconnected)\n",
- w->nid, res, !connected ? "dis" : "");
- }
- );
as = &devinfo->as[w->bindas];
if (as->hpredir >= 0 && as->pins[15] == w->nid)
@@ -566,6 +562,38 @@ hdaa_presence_handler(struct hdaa_widget *w)
hdaa_autorecsrc_handler(as, w);
if (old != 2)
hdaa_channels_handler(as);
+
+ if (connected || old != 2) {
+ HDA_BOOTVERBOSE(
+ device_printf(devinfo->dev,
+ "Pin sense: nid=%d sense=0x%08x (%sconnected)\n",
+ w->nid, res, !connected ? "dis" : "");
+ );
+ if (as->hpredir >= 0)
+ return;
+ for (i = 0, active = false; i < devinfo->num_devs; i++) {
+ if (device_get_unit(devinfo->devs[i].dev) == snd_unit) {
+ active = true;
+ break;
+ }
+ }
+ /* Proceed only if we are currently using this codec. */
+ if (!active)
+ return;
+ for (i = 0; i < devinfo->ascnt; i++) {
+ asp = &devinfo->as[i];
+ if (!asp->enable)
+ continue;
+ if ((connected && asp->index == as->index) ||
+ (!connected && asp->dir == as->dir)) {
+ snprintf(buf, sizeof(buf), "cdev=dsp%d",
+ device_get_unit(asp->pdevinfo->dev));
+ devctl_notify("SND", "CONN",
+ asp->dir == HDAA_CTL_IN ? "IN" : "OUT", buf);
+ break;
+ }
+ }
+ }
}
/*
@@ -1252,7 +1280,8 @@ hdaa_sysctl_config(SYSCTL_HANDLER_ARGS)
static void
hdaa_config_fetch(const char *str, uint32_t *on, uint32_t *off)
{
- int i = 0, j, k, len, inv;
+ size_t k;
+ int i = 0, j, len, inv;
for (;;) {
while (str[i] != '\0' &&
@@ -1292,7 +1321,8 @@ static int
hdaa_sysctl_quirks(SYSCTL_HANDLER_ARGS)
{
char buf[256];
- int error, n = 0, i;
+ int error, n = 0;
+ size_t i;
uint32_t quirks, quirks_off;
quirks = *(uint32_t *)oidp->oid_arg1;
@@ -2909,7 +2939,7 @@ hdaa_dump_gpo(struct hdaa_devinfo *devinfo)
data = hda_command(dev,
HDA_CMD_GET_GPO_DATA(0, devinfo->nid));
for (i = 0; i < HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap); i++) {
- device_printf(dev, " GPO%d: state=%d", i,
+ device_printf(dev, " GPO%d: state=%d\n", i,
(data >> i) & 1);
}
}
@@ -3030,8 +3060,7 @@ hdaa_audio_ctl_parse(struct hdaa_devinfo *devinfo)
if (max < 1)
return;
- ctls = (struct hdaa_audio_ctl *)malloc(
- sizeof(*ctls) * max, M_HDAA, M_ZERO | M_NOWAIT);
+ ctls = malloc(sizeof(*ctls) * max, M_HDAA, M_ZERO | M_NOWAIT);
if (ctls == NULL) {
/* Blekh! */
@@ -3183,8 +3212,7 @@ hdaa_audio_as_parse(struct hdaa_devinfo *devinfo)
if (max < 1)
return;
- as = (struct hdaa_audio_as *)malloc(
- sizeof(*as) * max, M_HDAA, M_ZERO | M_NOWAIT);
+ as = malloc(sizeof(*as) * max, M_HDAA, M_ZERO | M_NOWAIT);
if (as == NULL) {
/* Blekh! */
@@ -3217,7 +3245,7 @@ hdaa_audio_as_parse(struct hdaa_devinfo *devinfo)
continue;
}
KASSERT(cnt < max,
- ("%s: Associations owerflow (%d of %d)",
+ ("%s: Associations overflow (%d of %d)",
__func__, cnt, max));
type = w->wclass.pin.config &
HDA_CONFIG_DEFAULTCONF_DEVICE_MASK;
@@ -4074,8 +4102,7 @@ hdaa_audio_bind_as(struct hdaa_devinfo *devinfo)
cnt += as[j].num_chans;
}
if (devinfo->num_chans == 0) {
- devinfo->chans = (struct hdaa_chan *)malloc(
- sizeof(struct hdaa_chan) * cnt,
+ devinfo->chans = malloc(sizeof(struct hdaa_chan) * cnt,
M_HDAA, M_ZERO | M_NOWAIT);
if (devinfo->chans == NULL) {
device_printf(devinfo->dev,
@@ -5487,10 +5514,8 @@ hdaa_prepare_pcms(struct hdaa_devinfo *devinfo)
}
devinfo->num_devs =
max(ardev, apdev) + max(drdev, dpdev);
- devinfo->devs =
- (struct hdaa_pcm_devinfo *)malloc(
- devinfo->num_devs * sizeof(struct hdaa_pcm_devinfo),
- M_HDAA, M_ZERO | M_NOWAIT);
+ devinfo->devs = malloc(devinfo->num_devs *
+ sizeof(struct hdaa_pcm_devinfo), M_HDAA, M_ZERO | M_NOWAIT);
if (devinfo->devs == NULL) {
device_printf(devinfo->dev,
"Unable to allocate memory for devices\n");
@@ -5539,7 +5564,7 @@ hdaa_create_pcms(struct hdaa_devinfo *devinfo)
for (i = 0; i < devinfo->num_devs; i++) {
struct hdaa_pcm_devinfo *pdevinfo = &devinfo->devs[i];
- pdevinfo->dev = device_add_child(devinfo->dev, "pcm", -1);
+ pdevinfo->dev = device_add_child(devinfo->dev, "pcm", DEVICE_UNIT_ANY);
device_set_ivars(pdevinfo->dev, (void *)pdevinfo);
}
}
@@ -6196,13 +6221,15 @@ hdaa_configure(device_t dev)
);
hdaa_patch_direct(devinfo);
HDA_BOOTHVERBOSE(
- device_printf(dev, "Pin sense init...\n");
- );
- hdaa_sense_init(devinfo);
- HDA_BOOTHVERBOSE(
device_printf(dev, "Creating PCM devices...\n");
);
+ hdaa_unlock(devinfo);
hdaa_create_pcms(devinfo);
+ hdaa_lock(devinfo);
+ HDA_BOOTHVERBOSE(
+ device_printf(dev, "Pin sense init...\n");
+ );
+ hdaa_sense_init(devinfo);
HDA_BOOTVERBOSE(
if (devinfo->quirks != 0) {
@@ -6470,7 +6497,7 @@ hdaa_sysctl_reconfig(SYSCTL_HANDLER_ARGS)
hdaa_unconfigure(dev);
hdaa_configure(dev);
hdaa_unlock(devinfo);
- bus_generic_attach(dev);
+ bus_attach_children(dev);
HDA_BOOTHVERBOSE(
device_printf(dev, "Reconfiguration done\n");
);
@@ -6622,9 +6649,8 @@ hdaa_attach(device_t dev)
);
if (devinfo->nodecnt > 0)
- devinfo->widget = (struct hdaa_widget *)malloc(
- sizeof(*(devinfo->widget)) * devinfo->nodecnt, M_HDAA,
- M_WAITOK | M_ZERO);
+ devinfo->widget = malloc(sizeof(*(devinfo->widget)) *
+ devinfo->nodecnt, M_HDAA, M_WAITOK | M_ZERO);
else
devinfo->widget = NULL;
@@ -6677,7 +6703,7 @@ hdaa_attach(device_t dev)
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
"init_clear", CTLFLAG_RW,
&devinfo->init_clear, 1,"Clear initial pin widget configuration");
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -6687,7 +6713,7 @@ hdaa_detach(device_t dev)
struct hdaa_devinfo *devinfo = device_get_softc(dev);
int error;
- if ((error = device_delete_children(dev)) != 0)
+ if ((error = bus_generic_detach(dev)) != 0)
return (error);
hdaa_lock(devinfo);
@@ -7056,9 +7082,7 @@ hdaa_pcm_attach(device_t dev)
HDA_BOOTHVERBOSE(
device_printf(dev, "Registering PCM channels...\n");
);
- if (pcm_register(dev, pdevinfo, (pdevinfo->playas >= 0)?1:0,
- (pdevinfo->recas >= 0)?1:0) != 0)
- device_printf(dev, "Can't register PCM\n");
+ pcm_init(dev, pdevinfo);
pdevinfo->registered++;
@@ -7111,9 +7135,8 @@ hdaa_pcm_attach(device_t dev)
snprintf(status, SND_STATUSLEN, "on %s",
device_get_nameunit(device_get_parent(dev)));
- pcm_setstatus(dev, status);
- return (0);
+ return (pcm_register(dev, status));
}
static int
diff --git a/sys/dev/sound/pci/hda/hdaa_patches.c b/sys/dev/sound/pci/hda/hdaa_patches.c
index 3a7dfc63637a..91bb244578c7 100644
--- a/sys/dev/sound/pci/hda/hdaa_patches.c
+++ b/sys/dev/sound/pci/hda/hdaa_patches.c
@@ -113,6 +113,12 @@ static const struct {
{ APPLE_MACBOOKAIR31, HDA_CODEC_CS4206, HDA_MATCH_ALL,
0, 0,
HDAA_GPIO_SET(1) | HDAA_GPIO_SET(3) },
+ { HDA_MATCH_ALL, HDA_CODEC_CS4208, APPLE_MACBOOKAIR61,
+ 0, 0,
+ HDAA_GPIO_SET(0) },
+ { HDA_MATCH_ALL, HDA_CODEC_CS4208, APPLE_MACBOOKAIR62,
+ 0, 0,
+ HDAA_GPIO_SET(0) },
{ APPLE_MACBOOKPRO55, HDA_CODEC_CS4206, HDA_MATCH_ALL,
0, 0,
HDAA_GPIO_SET(1) | HDAA_GPIO_SET(3) },
@@ -300,6 +306,22 @@ hdac_pin_patch(struct hdaa_widget *w)
patch_str = "as=2";
break;
}
+ } else if (id == HDA_CODEC_CX20590 &&
+ subid == LENOVO_T420S_SUBVENDOR) {
+ switch (nid) {
+ case 25:
+ patch_str = "as=1 seq=15";
+ break;
+ case 27:
+ patch_str = "as=2 seq=15";
+ break;
+ case 31:
+ patch_str = "as=1 seq=0";
+ break;
+ case 35:
+ patch_str = "as=2 seq=0";
+ break;
+ }
} else if (id == HDA_CODEC_ALC235 && subid == ASUS_GL553VE_SUBVENDOR) {
switch (nid) {
case 33:
@@ -318,7 +340,8 @@ hdac_pin_patch(struct hdaa_widget *w)
}
} else if (id == HDA_CODEC_ALC257 &&
(subid == LENOVO_L5AMD_SUBVENDOR ||
- subid == LENOVO_L5INTEL_SUBVENDOR)) {
+ subid == LENOVO_L5INTEL_SUBVENDOR ||
+ subid == LENOVO_IDEAPAD3_SUBVENDOR)) {
switch (nid) {
case 20:
patch_str = "as=1 seq=0";
@@ -329,7 +352,8 @@ hdac_pin_patch(struct hdaa_widget *w)
}
} else if (id == HDA_CODEC_IDT92HD95B &&
(subid == FRAMEWORK_LAPTOP_0001_SUBVENDOR ||
- subid == FRAMEWORK_LAPTOP_0002_SUBVENDOR)) {
+ subid == FRAMEWORK_LAPTOP_0002_SUBVENDOR ||
+ subid == FRAMEWORK_LAPTOP_0003_SUBVENDOR)) {
switch (nid) {
case 10:
patch_str = "as=1 seq=15 color=Black loc=Left";
@@ -338,8 +362,31 @@ hdac_pin_patch(struct hdaa_widget *w)
patch_str = "as=3 seq=15 color=Black loc=Left";
break;
}
+ } else if ((id == HDA_CODEC_ALC295 &&
+ subid == FRAMEWORK_LAPTOP_0005_SUBVENDOR) ||
+ (id == HDA_CODEC_ALC285 &&
+ subid == FRAMEWORK_LAPTOP_000D_SUBVENDOR)) {
+ switch (nid) {
+ case 20:
+ /*
+ * This pin is a duplicate of pin 23 (both as=1 seq=0),
+ * which ends up in the driver disabling the
+ * association altogether. Since sound quality from pin
+ * 23 seems to be better, configure this one as a back
+ * speaker.
+ */
+ patch_str = "as=1 seq=2";
+ break;
+ }
+ } else if (id == HDA_CODEC_ALC295 &&
+ subid == FRAMEWORK_LAPTOP_0006_SUBVENDOR) {
+ switch (nid) {
+ case 33:
+ patch_str = "as=1 seq=15 color=Black loc=Left";
+ break;
+ }
} else if (id == HDA_CODEC_ALC230 &&
- subid == LENOVO_I330_SUBVENDOR) {
+ subid == LENOVO_IDEAPAD330_SUBVENDOR) {
switch (nid) {
case 20:
patch_str = "as=1 seq=0 device=Speaker";
@@ -358,6 +405,17 @@ hdac_pin_patch(struct hdaa_widget *w)
patch_str = "as=4 seq=15";
break;
}
+ } else if (id == HDA_CODEC_ALC294 &&
+ subid == ASUS_UX331_SUBVENDOR) {
+ switch (nid) {
+ case 25:
+ /* XXX You are not expected to understand this. */
+ config = 0x01a1103c;
+ break;
+ case 33:
+ patch_str = "as=1 seq=15";
+ break;
+ }
} else {
/*
* loop over hdaa_model_pin_patch
diff --git a/sys/dev/sound/pci/hda/hdac.c b/sys/dev/sound/pci/hda/hdac.c
index f3dff2052b51..80028063bb0d 100644
--- a/sys/dev/sound/pci/hda/hdac.c
+++ b/sys/dev/sound/pci/hda/hdac.c
@@ -106,9 +106,8 @@ static const struct {
{ HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 },
{ HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 },
{ HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 },
- { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 },
- { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 },
{ HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 },
+ { HDA_INTEL_TGLKH, "Intel Tiger Lake-H", 0, 0 },
{ HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 },
{ HDA_INTEL_ALLK, "Intel Alder Lake", 0, 0 },
{ HDA_INTEL_ALLKM, "Intel Alder Lake-M", 0, 0 },
@@ -118,6 +117,7 @@ static const struct {
{ HDA_INTEL_ALLKPS, "Intel Alder Lake-PS", 0, 0 },
{ HDA_INTEL_RPTLK1, "Intel Raptor Lake-P", 0, 0 },
{ HDA_INTEL_RPTLK2, "Intel Raptor Lake-P", 0, 0 },
+ { HDA_INTEL_RPTLK3, "Intel Raptor Lake-S", 0, 0 },
{ HDA_INTEL_MTL, "Intel Meteor Lake-P", 0, 0 },
{ HDA_INTEL_ARLS, "Intel Arrow Lake-S", 0, 0 },
{ HDA_INTEL_ARL, "Intel Arrow Lake", 0, 0 },
@@ -133,6 +133,7 @@ static const struct {
{ HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 },
{ HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 },
{ HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 },
+ { HDA_INTEL_ELLK2, "Intel Elkhart Lake", 0, 0 },
{ HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 },
{ HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 },
{ HDA_INTEL_SCH, "Intel SCH", 0, 0 },
@@ -193,6 +194,7 @@ static const struct {
{ HDA_ATI_RV940, "ATI RV940", 0, 0 },
{ HDA_ATI_RV970, "ATI RV970", 0, 0 },
{ HDA_ATI_R1000, "ATI R1000", 0, 0 },
+ { HDA_ATI_OLAND, "ATI Oland", 0, 0 },
{ HDA_ATI_KABINI, "ATI Kabini", 0, 0 },
{ HDA_ATI_TRINITY, "ATI Trinity", 0, 0 },
{ HDA_AMD_X370, "AMD X370", 0, 0 },
@@ -1277,6 +1279,7 @@ hdac_attach(device_t dev)
goto hdac_attach_fail;
/* Get Capabilities */
+ hdac_reset(sc, 1);
result = hdac_get_capabilities(sc);
if (result != 0)
goto hdac_attach_fail;
@@ -1615,7 +1618,7 @@ hdac_attach2(void *arg)
HDA_PARAM_REVISION_ID_REVISION_ID(revisionid);
sc->codecs[i].stepping_id =
HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid);
- child = device_add_child(sc->dev, "hdacc", -1);
+ child = device_add_child(sc->dev, "hdacc", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(sc->dev,
"Failed to add CODEC device\n");
@@ -1625,7 +1628,7 @@ hdac_attach2(void *arg)
sc->codecs[i].dev = child;
}
}
- bus_generic_attach(sc->dev);
+ bus_attach_children(sc->dev);
SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev),
SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
@@ -1638,6 +1641,35 @@ hdac_attach2(void *arg)
}
/****************************************************************************
+ * int hdac_shutdown(device_t)
+ *
+ * Power down HDA bus and codecs.
+ ****************************************************************************/
+static int
+hdac_shutdown(device_t dev)
+{
+ struct hdac_softc *sc = device_get_softc(dev);
+
+ HDA_BOOTHVERBOSE(
+ device_printf(dev, "Shutdown...\n");
+ );
+ callout_drain(&sc->poll_callout);
+ taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
+ bus_generic_shutdown(dev);
+
+ hdac_lock(sc);
+ HDA_BOOTHVERBOSE(
+ device_printf(dev, "Reset controller...\n");
+ );
+ hdac_reset(sc, false);
+ hdac_unlock(sc);
+ HDA_BOOTHVERBOSE(
+ device_printf(dev, "Shutdown done\n");
+ );
+ return (0);
+}
+
+/****************************************************************************
* int hdac_suspend(device_t)
*
* Suspend and power down HDA bus and codecs.
@@ -1740,26 +1772,19 @@ static int
hdac_detach(device_t dev)
{
struct hdac_softc *sc = device_get_softc(dev);
- device_t *devlist;
- int cad, i, devcount, error;
+ int i, error;
- if ((error = device_get_children(dev, &devlist, &devcount)) != 0)
+ callout_drain(&sc->poll_callout);
+ hdac_irq_free(sc);
+ taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
return (error);
- for (i = 0; i < devcount; i++) {
- cad = (intptr_t)device_get_ivars(devlist[i]);
- if ((error = device_delete_child(dev, devlist[i])) != 0) {
- free(devlist, M_TEMP);
- return (error);
- }
- sc->codecs[cad].dev = NULL;
- }
- free(devlist, M_TEMP);
hdac_lock(sc);
hdac_reset(sc, false);
hdac_unlock(sc);
- taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
- hdac_irq_free(sc);
for (i = 0; i < sc->num_ss; i++)
hdac_dma_free(sc, &sc->streams[i].bdl);
@@ -2154,6 +2179,7 @@ static device_method_t hdac_methods[] = {
DEVMETHOD(device_probe, hdac_probe),
DEVMETHOD(device_attach, hdac_attach),
DEVMETHOD(device_detach, hdac_detach),
+ DEVMETHOD(device_shutdown, hdac_shutdown),
DEVMETHOD(device_suspend, hdac_suspend),
DEVMETHOD(device_resume, hdac_resume),
/* Bus interface */
@@ -2181,4 +2207,4 @@ static driver_t hdac_driver = {
sizeof(struct hdac_softc),
};
-DRIVER_MODULE(snd_hda, pci, hdac_driver, NULL, NULL);
+DRIVER_MODULE_ORDERED(snd_hda, pci, hdac_driver, NULL, NULL, SI_ORDER_ANY);
diff --git a/sys/dev/sound/pci/hda/hdac.h b/sys/dev/sound/pci/hda/hdac.h
index 4dd589ed2a09..c11e6b2d6810 100644
--- a/sys/dev/sound/pci/hda/hdac.h
+++ b/sys/dev/sound/pci/hda/hdac.h
@@ -66,6 +66,7 @@
#define HDA_INTEL_PCH HDA_MODEL_CONSTRUCT(INTEL, 0x3b56)
#define HDA_INTEL_PCH2 HDA_MODEL_CONSTRUCT(INTEL, 0x3b57)
#define HDA_INTEL_ELLK HDA_MODEL_CONSTRUCT(INTEL, 0x4b55)
+#define HDA_INTEL_ELLK2 HDA_MODEL_CONSTRUCT(INTEL, 0x4b58)
#define HDA_INTEL_JLK2 HDA_MODEL_CONSTRUCT(INTEL, 0x4dc8)
#define HDA_INTEL_BXTNP HDA_MODEL_CONSTRUCT(INTEL, 0x5a98)
#define HDA_INTEL_MACBOOKPRO92 HDA_MODEL_CONSTRUCT(INTEL, 0x7270)
@@ -77,6 +78,7 @@
#define HDA_INTEL_ALLKPS HDA_MODEL_CONSTRUCT(INTEL, 0x51c9)
#define HDA_INTEL_RPTLK1 HDA_MODEL_CONSTRUCT(INTEL, 0x51ca)
#define HDA_INTEL_RPTLK2 HDA_MODEL_CONSTRUCT(INTEL, 0x51cb)
+#define HDA_INTEL_RPTLK3 HDA_MODEL_CONSTRUCT(INTEL, 0x7a50)
#define HDA_INTEL_SCH HDA_MODEL_CONSTRUCT(INTEL, 0x811b)
#define HDA_INTEL_LPT1 HDA_MODEL_CONSTRUCT(INTEL, 0x8c20)
#define HDA_INTEL_LPT2 HDA_MODEL_CONSTRUCT(INTEL, 0x8c21)
@@ -95,9 +97,8 @@
#define HDA_INTEL_CMLKS HDA_MODEL_CONSTRUCT(INTEL, 0xa3f0)
#define HDA_INTEL_CNLK HDA_MODEL_CONSTRUCT(INTEL, 0x9dc8)
#define HDA_INTEL_ICLK HDA_MODEL_CONSTRUCT(INTEL, 0x34c8)
-#define HDA_INTEL_CMLKLP HDA_MODEL_CONSTRUCT(INTEL, 0x02c8)
-#define HDA_INTEL_CMLKH HDA_MODEL_CONSTRUCT(INTEL, 0x06c8)
#define HDA_INTEL_TGLK HDA_MODEL_CONSTRUCT(INTEL, 0xa0c8)
+#define HDA_INTEL_TGLKH HDA_MODEL_CONSTRUCT(INTEL, 0x43c8)
#define HDA_INTEL_MTL HDA_MODEL_CONSTRUCT(INTEL, 0x7e28)
#define HDA_INTEL_ARLS HDA_MODEL_CONSTRUCT(INTEL, 0x7f50)
#define HDA_INTEL_ARL HDA_MODEL_CONSTRUCT(INTEL, 0x7728)
@@ -178,6 +179,7 @@
#define HDA_ATI_RV930 HDA_MODEL_CONSTRUCT(ATI, 0xaa90)
#define HDA_ATI_RV910 HDA_MODEL_CONSTRUCT(ATI, 0xaa98)
#define HDA_ATI_R1000 HDA_MODEL_CONSTRUCT(ATI, 0xaaa0)
+#define HDA_ATI_OLAND HDA_MODEL_CONSTRUCT(ATI, 0xaab0)
#define HDA_ATI_KABINI HDA_MODEL_CONSTRUCT(ATI, 0x9840)
#define HDA_ATI_TRINITY HDA_MODEL_CONSTRUCT(ATI, 0x9902)
#define HDA_ATI_ALL HDA_MODEL_CONSTRUCT(ATI, 0xffff)
@@ -332,6 +334,7 @@
#define ASUS_G2K_SUBVENDOR HDA_MODEL_CONSTRUCT(ASUS, 0x1339)
#define ASUS_Z550SA_SUBVENDOR HDA_MODEL_CONSTRUCT(ASUS, 0x13b0)
#define ASUS_A7T_SUBVENDOR HDA_MODEL_CONSTRUCT(ASUS, 0x13c2)
+#define ASUS_UX331_SUBVENDOR HDA_MODEL_CONSTRUCT(ASUS, 0x14de)
#define ASUS_UX31A_SUBVENDOR HDA_MODEL_CONSTRUCT(ASUS, 0x1517)
#define ASUS_GL553VE_SUBVENDOR HDA_MODEL_CONSTRUCT(ASUS, 0x15e0)
#define ASUS_Z71V_SUBVENDOR HDA_MODEL_CONSTRUCT(ASUS, 0x1964)
@@ -371,6 +374,7 @@
#define LENOVO_X300_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x20ac)
#define LENOVO_T400_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x20f2)
#define LENOVO_T420_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x21ce)
+#define LENOVO_T420S_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x21d2)
#define LENOVO_T430_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x21f3)
#define LENOVO_T430S_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x21fb)
#define LENOVO_T520_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x21cf)
@@ -378,11 +382,13 @@
#define LENOVO_X230_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x21fa)
#define LENOVO_X230T_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x2203)
#define LENOVO_T431S_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x2208)
+#define LENOVO_X1CRBNG11_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x2315)
#define LENOVO_G580_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x3977)
#define LENOVO_L5AMD_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x381b)
#define LENOVO_L5INTEL_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x380f)
#define LENOVO_3000_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x384e)
-#define LENOVO_I330_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x3808)
+#define LENOVO_IDEAPAD330_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x3808)
+#define LENOVO_IDEAPAD3_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0x3881)
#define LENOVO_ALL_SUBVENDOR HDA_MODEL_CONSTRUCT(LENOVO, 0xffff)
/* Samsung */
@@ -422,6 +428,8 @@
*/
#define APPLE_INTEL_MAC 0x76808384
#define APPLE_MACBOOKAIR31 0x0d9410de
+#define APPLE_MACBOOKAIR61 0x106b7100
+#define APPLE_MACBOOKAIR62 0x106b7200
#define APPLE_MACBOOKPRO55 0xcb7910de
#define APPLE_MACBOOKPRO71 0xcb8910de
@@ -525,6 +533,10 @@
#define FRAMEWORK_VENDORID 0xf111
#define FRAMEWORK_LAPTOP_0001_SUBVENDOR HDA_MODEL_CONSTRUCT(FRAMEWORK, 0x0001)
#define FRAMEWORK_LAPTOP_0002_SUBVENDOR HDA_MODEL_CONSTRUCT(FRAMEWORK, 0x0002)
+#define FRAMEWORK_LAPTOP_0003_SUBVENDOR HDA_MODEL_CONSTRUCT(FRAMEWORK, 0x0003)
+#define FRAMEWORK_LAPTOP_0005_SUBVENDOR HDA_MODEL_CONSTRUCT(FRAMEWORK, 0x0005)
+#define FRAMEWORK_LAPTOP_0006_SUBVENDOR HDA_MODEL_CONSTRUCT(FRAMEWORK, 0x0006)
+#define FRAMEWORK_LAPTOP_000D_SUBVENDOR HDA_MODEL_CONSTRUCT(FRAMEWORK, 0x000d)
/* All codecs you can eat... */
#define HDA_CODEC_CONSTRUCT(vendor, id) \
@@ -534,6 +546,7 @@
#define CIRRUSLOGIC_VENDORID 0x1013
#define HDA_CODEC_CS4206 HDA_CODEC_CONSTRUCT(CIRRUSLOGIC, 0x4206)
#define HDA_CODEC_CS4207 HDA_CODEC_CONSTRUCT(CIRRUSLOGIC, 0x4207)
+#define HDA_CODEC_CS4208 HDA_CODEC_CONSTRUCT(CIRRUSLOGIC, 0x4208)
#define HDA_CODEC_CS4210 HDA_CODEC_CONSTRUCT(CIRRUSLOGIC, 0x4210)
#define HDA_CODEC_CSXXXX HDA_CODEC_CONSTRUCT(CIRRUSLOGIC, 0xffff)
@@ -571,6 +584,7 @@
#define HDA_CODEC_ALC284 HDA_CODEC_CONSTRUCT(REALTEK, 0x0284)
#define HDA_CODEC_ALC285 HDA_CODEC_CONSTRUCT(REALTEK, 0x0285)
#define HDA_CODEC_ALC286 HDA_CODEC_CONSTRUCT(REALTEK, 0x0286)
+#define HDA_CODEC_ALC287 HDA_CODEC_CONSTRUCT(REALTEK, 0x0287)
#define HDA_CODEC_ALC288 HDA_CODEC_CONSTRUCT(REALTEK, 0x0288)
#define HDA_CODEC_ALC289 HDA_CODEC_CONSTRUCT(REALTEK, 0x0289)
#define HDA_CODEC_ALC290 HDA_CODEC_CONSTRUCT(REALTEK, 0x0290)
@@ -580,8 +594,6 @@
#define HDA_CODEC_ALC295 HDA_CODEC_CONSTRUCT(REALTEK, 0x0295)
#define HDA_CODEC_ALC298 HDA_CODEC_CONSTRUCT(REALTEK, 0x0298)
#define HDA_CODEC_ALC299 HDA_CODEC_CONSTRUCT(REALTEK, 0x0299)
-#define HDA_CODEC_ALC292 HDA_CODEC_CONSTRUCT(REALTEK, 0x0292)
-#define HDA_CODEC_ALC295 HDA_CODEC_CONSTRUCT(REALTEK, 0x0295)
#define HDA_CODEC_ALC300 HDA_CODEC_CONSTRUCT(REALTEK, 0x0300)
#define HDA_CODEC_ALC623 HDA_CODEC_CONSTRUCT(REALTEK, 0x0623)
#define HDA_CODEC_ALC660 HDA_CODEC_CONSTRUCT(REALTEK, 0x0660)
@@ -605,6 +617,7 @@
#define HDA_CODEC_ALC889 HDA_CODEC_CONSTRUCT(REALTEK, 0x0889)
#define HDA_CODEC_ALC892 HDA_CODEC_CONSTRUCT(REALTEK, 0x0892)
#define HDA_CODEC_ALC897 HDA_CODEC_CONSTRUCT(REALTEK, 0x0897)
+#define HDA_CODEC_ALC898 HDA_CODEC_CONSTRUCT(REALTEK, 0x0898)
#define HDA_CODEC_ALC899 HDA_CODEC_CONSTRUCT(REALTEK, 0x0899)
#define HDA_CODEC_ALC1150 HDA_CODEC_CONSTRUCT(REALTEK, 0x0900)
#define HDA_CODEC_ALCS1200A HDA_CODEC_CONSTRUCT(REALTEK, 0x0b00)
@@ -881,6 +894,7 @@
#define HDA_CODEC_NVIDIATEGRA124 HDA_CODEC_CONSTRUCT(NVIDIA, 0x0028)
#define HDA_CODEC_NVIDIATEGRA210 HDA_CODEC_CONSTRUCT(NVIDIA, 0x0029)
#define HDA_CODEC_NVIDIAMCP67 HDA_CODEC_CONSTRUCT(NVIDIA, 0x0067)
+#define HDA_CODEC_NVIDIAGM204 HDA_CODEC_CONSTRUCT(NVIDIA, 0x0071)
#define HDA_CODEC_NVIDIAMCP73 HDA_CODEC_CONSTRUCT(NVIDIA, 0x8001)
#define HDA_CODEC_NVIDIAXXXX HDA_CODEC_CONSTRUCT(NVIDIA, 0xffff)
@@ -906,6 +920,7 @@
#define HDA_CODEC_INTELGMLK1 HDA_CODEC_CONSTRUCT(INTEL, 0x280d)
#define HDA_CODEC_INTELICLK HDA_CODEC_CONSTRUCT(INTEL, 0x280f)
#define HDA_CODEC_INTELTGLK HDA_CODEC_CONSTRUCT(INTEL, 0x2812)
+#define HDA_CODEC_INTELTGLKH HDA_CODEC_CONSTRUCT(INTEL, 0x2814)
#define HDA_CODEC_INTELALLK HDA_CODEC_CONSTRUCT(INTEL, 0x2815)
#define HDA_CODEC_INTELJLK HDA_CODEC_CONSTRUCT(INTEL, 0x281a)
#define HDA_CODEC_INTELELLK HDA_CODEC_CONSTRUCT(INTEL, 0x281b)
diff --git a/sys/dev/sound/pci/hda/hdacc.c b/sys/dev/sound/pci/hda/hdacc.c
index 009c9098ac3b..4198982c9c2a 100644
--- a/sys/dev/sound/pci/hda/hdacc.c
+++ b/sys/dev/sound/pci/hda/hdacc.c
@@ -74,6 +74,7 @@ static const struct {
} hdacc_codecs[] = {
{ HDA_CODEC_CS4206, 0, "Cirrus Logic CS4206" },
{ HDA_CODEC_CS4207, 0, "Cirrus Logic CS4207" },
+ { HDA_CODEC_CS4208, 0, "Cirrus Logic CS4208" },
{ HDA_CODEC_CS4210, 0, "Cirrus Logic CS4210" },
{ HDA_CODEC_ALC215, 0, "Realtek ALC215" },
{ HDA_CODEC_ALC221, 0, "Realtek ALC221" },
@@ -147,6 +148,7 @@ static const struct {
{ HDA_CODEC_ALC889, 0, "Realtek ALC889" },
{ HDA_CODEC_ALC892, 0, "Realtek ALC892" },
{ HDA_CODEC_ALC897, 0, "Realtek ALC897" },
+ { HDA_CODEC_ALC898, 0, "Realtek ALC898" },
{ HDA_CODEC_ALC899, 0, "Realtek ALC899" },
{ HDA_CODEC_ALC1150, 0, "Realtek ALC1150" },
{ HDA_CODEC_ALCS1200A, 0, "Realtek ALCS1200A" },
@@ -358,6 +360,7 @@ static const struct {
{ HDA_CODEC_NVIDIAMCP78_3, 0, "NVIDIA MCP78" },
{ HDA_CODEC_NVIDIAMCP78_4, 0, "NVIDIA MCP78" },
{ HDA_CODEC_NVIDIAMCP7A, 0, "NVIDIA MCP7A" },
+ { HDA_CODEC_NVIDIAGM204, 0, "NVIDIA GM204" },
{ HDA_CODEC_NVIDIAGT220, 0, "NVIDIA GT220" },
{ HDA_CODEC_NVIDIAGT21X, 0, "NVIDIA GT21x" },
{ HDA_CODEC_NVIDIAMCP89, 0, "NVIDIA MCP89" },
@@ -393,6 +396,7 @@ static const struct {
{ HDA_CODEC_INTELGMLK1, 0, "Intel Gemini Lake" },
{ HDA_CODEC_INTELICLK, 0, "Intel Ice Lake" },
{ HDA_CODEC_INTELTGLK, 0, "Intel Tiger Lake" },
+ { HDA_CODEC_INTELTGLKH, 0, "Intel Tiger Lake-H" },
{ HDA_CODEC_INTELALLK, 0, "Intel Alder Lake" },
{ HDA_CODEC_SII1390, 0, "Silicon Image SiI1390" },
{ HDA_CODEC_SII1392, 0, "Silicon Image SiI1392" },
@@ -520,7 +524,7 @@ hdacc_attach(device_t dev)
codec->fgs[n].subsystem_id = hda_command(dev,
HDA_CMD_GET_SUBSYSTEM_ID(0, i));
hdacc_unlock(codec);
- codec->fgs[n].dev = child = device_add_child(dev, NULL, -1);
+ codec->fgs[n].dev = child = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(dev, "Failed to add function device\n");
continue;
@@ -528,7 +532,7 @@ hdacc_attach(device_t dev)
device_set_ivars(child, &codec->fgs[n]);
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
}
@@ -539,7 +543,7 @@ hdacc_detach(device_t dev)
struct hdacc_softc *codec = device_get_softc(dev);
int error;
- if ((error = device_delete_children(dev)) != 0)
+ if ((error = bus_generic_detach(dev)) != 0)
return (error);
free(codec->fgs, M_HDACC);
return (0);
diff --git a/sys/dev/sound/pci/hda/pin_patch_realtek.h b/sys/dev/sound/pci/hda/pin_patch_realtek.h
index 49afdedea8a1..abb03c92571b 100644
--- a/sys/dev/sound/pci/hda/pin_patch_realtek.h
+++ b/sys/dev/sound/pci/hda/pin_patch_realtek.h
@@ -582,6 +582,21 @@ static struct hdaa_model_pin_patch_t realtek_model_pin_patches[] = {
}
}, { }
}
+ }, { /**** CODEC: HDA_CODEC_ALC287 ****/
+ .id = HDA_CODEC_ALC287,
+ .patches = (struct model_pin_patch_t[]){
+ {
+ .models = (struct pin_machine_model_t[]){
+ PIN_SUBVENDOR(LENOVO_X1CRBNG11_SUBVENDOR),
+ { }
+ },
+ .pin_patches = (struct pin_patch_t[]){
+ PIN_PATCH_NOT_APPLICABLE(20),
+ PIN_PATCH_STRING(33, "as=1 seq=15 device=Headphones loc=Right"),
+ { }
+ }
+ }, { }
+ }
}, { /**** CODEC: HDA_CODEC_ALC288 ****/
.id = HDA_CODEC_ALC288,
.patches = (struct model_pin_patch_t[]){
diff --git a/sys/dev/sound/pci/hdsp-pcm.c b/sys/dev/sound/pci/hdsp-pcm.c
new file mode 100644
index 000000000000..5ac571e64fde
--- /dev/null
+++ b/sys/dev/sound/pci/hdsp-pcm.c
@@ -0,0 +1,1136 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2012-2021 Ruslan Bukin <br@bsdpad.com>
+ * Copyright (c) 2023-2024 Florian Walpen <dev@submerge.ch>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * RME HDSP driver for FreeBSD (pcm-part).
+ * Supported cards: HDSP 9632, HDSP 9652.
+ */
+
+#include <sys/libkern.h>
+
+#include <dev/sound/pcm/sound.h>
+#include <dev/sound/pci/hdsp.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <mixer_if.h>
+
+#define HDSP_MATRIX_MAX 8
+
+struct hdsp_latency {
+ uint32_t n;
+ uint32_t period;
+ float ms;
+};
+
+static struct hdsp_latency latency_map[] = {
+ { 7, 32, 0.7 },
+ { 0, 64, 1.5 },
+ { 1, 128, 3 },
+ { 2, 256, 6 },
+ { 3, 512, 12 },
+ { 4, 1024, 23 },
+ { 5, 2048, 46 },
+ { 6, 4096, 93 },
+
+ { 0, 0, 0 },
+};
+
+struct hdsp_rate {
+ uint32_t speed;
+ uint32_t reg;
+};
+
+static struct hdsp_rate rate_map[] = {
+ { 32000, (HDSP_FREQ_32000) },
+ { 44100, (HDSP_FREQ_44100) },
+ { 48000, (HDSP_FREQ_48000) },
+ { 64000, (HDSP_FREQ_32000 | HDSP_FREQ_DOUBLE) },
+ { 88200, (HDSP_FREQ_44100 | HDSP_FREQ_DOUBLE) },
+ { 96000, (HDSP_FREQ_48000 | HDSP_FREQ_DOUBLE) },
+ { 128000, (HDSP_FREQ_32000 | HDSP_FREQ_QUAD) },
+ { 176400, (HDSP_FREQ_44100 | HDSP_FREQ_QUAD) },
+ { 192000, (HDSP_FREQ_48000 | HDSP_FREQ_QUAD) },
+
+ { 0, 0 },
+};
+
+static uint32_t
+hdsp_adat_slot_map(uint32_t speed)
+{
+ /* ADAT slot bitmap depends on sample rate. */
+ if (speed <= 48000)
+ return (0x000000ff); /* 8 channels single speed. */
+ else if (speed <= 96000)
+ return (0x000000aa); /* 4 channels (1,3,5,7) double speed. */
+ else
+ return (0x00000000); /* ADAT disabled at quad speed. */
+}
+
+static uint32_t
+hdsp_port_slot_map(uint32_t ports, uint32_t speed)
+{
+ uint32_t slot_map = 0;
+
+ if (ports & HDSP_CHAN_9632_ALL) {
+ /* Map HDSP 9632 ports to slot bitmap. */
+ if (ports & HDSP_CHAN_9632_ADAT)
+ slot_map |= (hdsp_adat_slot_map(speed) << 0);
+ if (ports & HDSP_CHAN_9632_SPDIF)
+ slot_map |= (0x03 << 8); /* 2 channels SPDIF. */
+ if (ports & HDSP_CHAN_9632_LINE)
+ slot_map |= (0x03 << 10); /* 2 channels line. */
+ if (ports & HDSP_CHAN_9632_EXT)
+ slot_map |= (0x0f << 12); /* 4 channels extension. */
+ } else if ((ports & HDSP_CHAN_9652_ALL) && (speed <= 96000)) {
+ /* Map HDSP 9652 ports to slot bitmap, no quad speed. */
+ if (ports & HDSP_CHAN_9652_ADAT1)
+ slot_map |= (hdsp_adat_slot_map(speed) << 0);
+ if (ports & HDSP_CHAN_9652_ADAT2)
+ slot_map |= (hdsp_adat_slot_map(speed) << 8);
+ if (ports & HDSP_CHAN_9652_ADAT3)
+ slot_map |= (hdsp_adat_slot_map(speed) << 16);
+ if (ports & HDSP_CHAN_9652_SPDIF)
+ slot_map |= (0x03 << 24); /* 2 channels SPDIF. */
+ }
+
+ return (slot_map);
+}
+
+static uint32_t
+hdsp_slot_first(uint32_t slots)
+{
+ return (slots & (~(slots - 1))); /* Extract first bit set. */
+}
+
+static uint32_t
+hdsp_slot_first_row(uint32_t slots)
+{
+ uint32_t ends;
+
+ /* Ends of slot rows are followed by a slot which is not in the set. */
+ ends = slots & (~(slots >> 1));
+ /* First row of contiguous slots ends in the first row end. */
+ return (slots & (ends ^ (ends - 1)));
+}
+
+static uint32_t
+hdsp_slot_first_n(uint32_t slots, unsigned int n)
+{
+ /* Clear all but the first n slots. */
+ for (uint32_t slot = 1; slot != 0; slot <<= 1) {
+ if ((slots & slot) && n > 0)
+ --n;
+ else
+ slots &= ~slot;
+ }
+ return (slots);
+}
+
+static unsigned int
+hdsp_slot_count(uint32_t slots)
+{
+ return (bitcount32(slots));
+}
+
+static unsigned int
+hdsp_slot_offset(uint32_t slots)
+{
+ return (hdsp_slot_count(hdsp_slot_first(slots) - 1));
+}
+
+static unsigned int
+hdsp_slot_channel_offset(uint32_t subset, uint32_t slots)
+{
+ uint32_t preceding;
+
+ /* Make sure we have a subset of slots. */
+ subset &= slots;
+ /* Include all slots preceding the first one of the subset. */
+ preceding = slots & (hdsp_slot_first(subset) - 1);
+
+ return (hdsp_slot_count(preceding));
+}
+
+static uint32_t
+hdsp_port_first(uint32_t ports)
+{
+ return (ports & (~(ports - 1))); /* Extract first bit set. */
+}
+
+static unsigned int
+hdsp_port_slot_count(uint32_t ports, uint32_t speed)
+{
+ return (hdsp_slot_count(hdsp_port_slot_map(ports, speed)));
+}
+
+static unsigned int
+hdsp_port_slot_count_max(uint32_t ports)
+{
+ return (hdsp_slot_count(hdsp_port_slot_map(ports, 48000)));
+}
+
+static uint32_t
+hdsp_channel_play_ports(struct hdsp_channel *hc)
+{
+ return (hc->ports & (HDSP_CHAN_9632_ALL | HDSP_CHAN_9652_ALL));
+}
+
+static uint32_t
+hdsp_channel_rec_ports(struct hdsp_channel *hc)
+{
+ return (hc->ports & (HDSP_CHAN_9632_ALL | HDSP_CHAN_9652_ALL));
+}
+
+static int
+hdsp_hw_mixer(struct sc_chinfo *ch, unsigned int dst,
+ unsigned int src, unsigned short data)
+{
+ struct sc_pcminfo *scp;
+ struct sc_info *sc;
+ uint32_t value;
+ int offset;
+
+ scp = ch->parent;
+ sc = scp->sc;
+
+ offset = 0;
+ value = (HDSP_MIN_GAIN << 16) | (uint16_t) data;
+
+ if (ch->dir != PCMDIR_PLAY)
+ return (0);
+
+ switch (sc->type) {
+ case HDSP_9632:
+ /* Mixer is 2 rows of sources (inputs, playback) per output. */
+ offset = dst * (2 * HDSP_MIX_SLOTS_9632);
+ /* Source index in the second row (playback). */
+ offset += HDSP_MIX_SLOTS_9632 + src;
+ break;
+ case HDSP_9652:
+ /* Mixer is 2 rows of sources (inputs, playback) per output. */
+ offset = dst * (2 * HDSP_MIX_SLOTS_9652);
+ /* Source index in the second row (playback). */
+ offset += HDSP_MIX_SLOTS_9652 + src;
+ break;
+ default:
+ return (0);
+ }
+
+ /*
+ * We have to write mixer matrix values in pairs, with the second
+ * (odd) value in the upper 16 bits of the 32 bit value.
+ * Make value offset even and shift value accordingly.
+ * Assume the paired value to be silenced, since we only set gain
+ * on the diagonal where src and dst are the same.
+ */
+ if (offset % 2) {
+ offset -= 1;
+ value = (value << 16) | HDSP_MIN_GAIN;
+ }
+
+ hdsp_write_4(sc, HDSP_MIXER_BASE + offset * sizeof(uint16_t), value);
+
+ return (0);
+};
+
+static int
+hdspchan_setgain(struct sc_chinfo *ch)
+{
+ uint32_t port, ports;
+ uint32_t slot, slots;
+ unsigned int offset;
+ unsigned short volume;
+
+ /* Iterate through all physical ports of the channel. */
+ ports = ch->ports;
+ port = hdsp_port_first(ports);
+ while (port != 0) {
+ /*
+ * Get slot map from physical port.
+ * Unlike DMA buffers, the hardware mixer's channel mapping
+ * does not change with double or quad speed sample rates.
+ */
+ slots = hdsp_port_slot_map(port, 48000);
+ slot = hdsp_slot_first(slots);
+
+ /* Treat first slot as left channel. */
+ volume = ch->lvol * HDSP_MAX_GAIN / 100;
+ while (slot != 0) {
+ offset = hdsp_slot_offset(slot);
+ hdsp_hw_mixer(ch, offset, offset, volume);
+
+ slots &= ~slot;
+ slot = hdsp_slot_first(slots);
+
+ /* Subsequent slots all get the right channel volume. */
+ volume = ch->rvol * HDSP_MAX_GAIN / 100;
+ }
+
+ ports &= ~port;
+ port = hdsp_port_first(ports);
+ }
+
+ return (0);
+}
+
+static int
+hdspmixer_init(struct snd_mixer *m)
+{
+ struct sc_pcminfo *scp;
+ struct sc_info *sc;
+ int mask;
+
+ scp = mix_getdevinfo(m);
+ sc = scp->sc;
+ if (sc == NULL)
+ return (-1);
+
+ mask = SOUND_MASK_PCM;
+
+ if (hdsp_channel_play_ports(scp->hc))
+ mask |= SOUND_MASK_VOLUME;
+
+ if (hdsp_channel_rec_ports(scp->hc))
+ mask |= SOUND_MASK_RECLEV;
+
+ snd_mtxlock(sc->lock);
+ pcm_setflags(scp->dev, pcm_getflags(scp->dev) | SD_F_SOFTPCMVOL);
+ mix_setdevs(m, mask);
+ snd_mtxunlock(sc->lock);
+
+ return (0);
+}
+
+static int
+hdspmixer_set(struct snd_mixer *m, unsigned dev,
+ unsigned left, unsigned right)
+{
+ struct sc_pcminfo *scp;
+ struct sc_chinfo *ch;
+ int i;
+
+ scp = mix_getdevinfo(m);
+
+#if 0
+ device_printf(scp->dev, "hdspmixer_set() %d %d\n",
+ left, right);
+#endif
+
+ for (i = 0; i < scp->chnum; i++) {
+ ch = &scp->chan[i];
+ if ((dev == SOUND_MIXER_VOLUME && ch->dir == PCMDIR_PLAY) ||
+ (dev == SOUND_MIXER_RECLEV && ch->dir == PCMDIR_REC)) {
+ ch->lvol = left;
+ ch->rvol = right;
+ if (ch->run)
+ hdspchan_setgain(ch);
+ }
+ }
+
+ return (0);
+}
+
+static kobj_method_t hdspmixer_methods[] = {
+ KOBJMETHOD(mixer_init, hdspmixer_init),
+ KOBJMETHOD(mixer_set, hdspmixer_set),
+ KOBJMETHOD_END
+};
+MIXER_DECLARE(hdspmixer);
+
+static void
+hdspchan_enable(struct sc_chinfo *ch, int value)
+{
+ struct sc_pcminfo *scp;
+ struct sc_info *sc;
+ uint32_t slot, slots;
+ unsigned int offset;
+ int reg;
+
+ scp = ch->parent;
+ sc = scp->sc;
+
+ if (ch->dir == PCMDIR_PLAY)
+ reg = HDSP_OUT_ENABLE_BASE;
+ else
+ reg = HDSP_IN_ENABLE_BASE;
+
+ ch->run = value;
+
+ /* Iterate through all slots of the channel's physical ports. */
+ slots = hdsp_port_slot_map(ch->ports, sc->speed);
+ slot = hdsp_slot_first(slots);
+ while (slot != 0) {
+ /* Set register to enable or disable slot. */
+ offset = hdsp_slot_offset(slot);
+ hdsp_write_1(sc, reg + (4 * offset), value);
+
+ slots &= ~slot;
+ slot = hdsp_slot_first(slots);
+ }
+}
+
+static int
+hdsp_running(struct sc_info *sc)
+{
+ struct sc_pcminfo *scp;
+ struct sc_chinfo *ch;
+ device_t *devlist;
+ int devcount;
+ int i, j;
+ int running;
+
+ running = 0;
+
+ devlist = NULL;
+ devcount = 0;
+
+ if (device_get_children(sc->dev, &devlist, &devcount) != 0)
+ running = 1; /* On error, avoid channel config changes. */
+
+ for (i = 0; running == 0 && i < devcount; i++) {
+ scp = device_get_ivars(devlist[i]);
+ for (j = 0; j < scp->chnum; j++) {
+ ch = &scp->chan[j];
+ if (ch->run) {
+ running = 1;
+ break;
+ }
+ }
+ }
+
+#if 0
+ if (running == 1)
+ device_printf(sc->dev, "hdsp is running\n");
+#endif
+
+ free(devlist, M_TEMP);
+
+ return (running);
+}
+
+static void
+hdsp_start_audio(struct sc_info *sc)
+{
+
+ sc->ctrl_register |= (HDSP_AUDIO_INT_ENABLE | HDSP_ENABLE);
+ hdsp_write_4(sc, HDSP_CONTROL_REG, sc->ctrl_register);
+}
+
+static void
+hdsp_stop_audio(struct sc_info *sc)
+{
+
+ if (hdsp_running(sc) == 1)
+ return;
+
+ sc->ctrl_register &= ~(HDSP_AUDIO_INT_ENABLE | HDSP_ENABLE);
+ hdsp_write_4(sc, HDSP_CONTROL_REG, sc->ctrl_register);
+}
+
+static void
+buffer_mux_write(uint32_t *dma, uint32_t *pcm, unsigned int pos,
+ unsigned int pos_end, unsigned int width, unsigned int channels)
+{
+ unsigned int slot;
+
+ for (; pos < pos_end; ++pos) {
+ for (slot = 0; slot < width; slot++) {
+ dma[slot * HDSP_CHANBUF_SAMPLES + pos] =
+ pcm[pos * channels + slot];
+ }
+ }
+}
+
+static void
+buffer_mux_port(uint32_t *dma, uint32_t *pcm, uint32_t subset, uint32_t slots,
+ unsigned int pos, unsigned int samples, unsigned int channels)
+{
+ unsigned int slot_offset, width;
+ unsigned int chan_pos;
+
+ /* Translate DMA slot offset to DMA buffer offset. */
+ slot_offset = hdsp_slot_offset(subset);
+ dma += slot_offset * HDSP_CHANBUF_SAMPLES;
+
+ /* Channel position of the slot subset. */
+ chan_pos = hdsp_slot_channel_offset(subset, slots);
+ pcm += chan_pos;
+
+ /* Only copy channels supported by both hardware and pcm format. */
+ width = hdsp_slot_count(subset);
+
+ /* Let the compiler inline and loop unroll common cases. */
+ if (width == 1)
+ buffer_mux_write(dma, pcm, pos, pos + samples, 1, channels);
+ else if (width == 2)
+ buffer_mux_write(dma, pcm, pos, pos + samples, 2, channels);
+ else if (width == 4)
+ buffer_mux_write(dma, pcm, pos, pos + samples, 4, channels);
+ else if (width == 8)
+ buffer_mux_write(dma, pcm, pos, pos + samples, 8, channels);
+ else
+ buffer_mux_write(dma, pcm, pos, pos + samples, width, channels);
+}
+
+static void
+buffer_demux_read(uint32_t *dma, uint32_t *pcm, unsigned int pos,
+ unsigned int pos_end, unsigned int width, unsigned int channels)
+{
+ unsigned int slot;
+
+ for (; pos < pos_end; ++pos) {
+ for (slot = 0; slot < width; slot++) {
+ pcm[pos * channels + slot] =
+ dma[slot * HDSP_CHANBUF_SAMPLES + pos];
+ }
+ }
+}
+
+static void
+buffer_demux_port(uint32_t *dma, uint32_t *pcm, uint32_t subset, uint32_t slots,
+ unsigned int pos, unsigned int samples, unsigned int channels)
+{
+ unsigned int slot_offset, width;
+ unsigned int chan_pos;
+
+ /* Translate DMA slot offset to DMA buffer offset. */
+ slot_offset = hdsp_slot_offset(subset);
+ dma += slot_offset * HDSP_CHANBUF_SAMPLES;
+
+ /* Channel position of the slot subset. */
+ chan_pos = hdsp_slot_channel_offset(subset, slots);
+ pcm += chan_pos;
+
+ /* Only copy channels supported by both hardware and pcm format. */
+ width = hdsp_slot_count(subset);
+
+ /* Let the compiler inline and loop unroll common cases. */
+ if (width == 1)
+ buffer_demux_read(dma, pcm, pos, pos + samples, 1, channels);
+ else if (width == 2)
+ buffer_demux_read(dma, pcm, pos, pos + samples, 2, channels);
+ else if (width == 4)
+ buffer_demux_read(dma, pcm, pos, pos + samples, 4, channels);
+ else if (width == 8)
+ buffer_demux_read(dma, pcm, pos, pos + samples, 8, channels);
+ else
+ buffer_demux_read(dma, pcm, pos, pos + samples, width, channels);
+}
+
+
+/* Copy data between DMA and PCM buffers. */
+static void
+buffer_copy(struct sc_chinfo *ch)
+{
+ struct sc_pcminfo *scp;
+ struct sc_info *sc;
+ uint32_t row, slots;
+ uint32_t dma_pos;
+ unsigned int pos, length, remainder, offset, buffer_size;
+ unsigned int channels;
+
+ scp = ch->parent;
+ sc = scp->sc;
+
+ channels = AFMT_CHANNEL(ch->format); /* Number of PCM channels. */
+
+ /* HDSP cards read / write a double buffer, twice the latency period. */
+ buffer_size = 2 * sc->period * sizeof(uint32_t);
+
+ /* Derive buffer position and length to be copied. */
+ if (ch->dir == PCMDIR_PLAY) {
+ /* Buffer position scaled down to a single channel. */
+ pos = sndbuf_getreadyptr(ch->buffer) / channels;
+ length = sndbuf_getready(ch->buffer) / channels;
+ /* Copy no more than 2 periods in advance. */
+ if (length > buffer_size)
+ length = buffer_size;
+ /* Skip what was already copied last time. */
+ offset = (ch->position + buffer_size) - pos;
+ offset %= buffer_size;
+ if (offset <= length) {
+ pos = (pos + offset) % buffer_size;
+ length -= offset;
+ }
+ } else {
+ /* Buffer position scaled down to a single channel. */
+ pos = sndbuf_getfreeptr(ch->buffer) / channels;
+ /* Get DMA buffer write position. */
+ dma_pos = hdsp_read_2(sc, HDSP_STATUS_REG);
+ dma_pos &= HDSP_BUF_POSITION_MASK;
+ dma_pos %= buffer_size;
+ /* Copy what is newly available. */
+ length = (dma_pos + buffer_size) - pos;
+ length %= buffer_size;
+ }
+
+ /* Position and length in samples (4 bytes). */
+ pos /= 4;
+ length /= 4;
+ buffer_size /= sizeof(uint32_t);
+
+ /* Split copy length to wrap around at buffer end. */
+ remainder = 0;
+ if (pos + length > buffer_size)
+ remainder = (pos + length) - buffer_size;
+
+ /* Iterate through rows of contiguous slots. */
+ slots = hdsp_port_slot_map(ch->ports, sc->speed);
+ slots = hdsp_slot_first_n(slots, channels);
+ row = hdsp_slot_first_row(slots);
+
+ while (row != 0) {
+ if (ch->dir == PCMDIR_PLAY) {
+ buffer_mux_port(sc->pbuf, ch->data, row, slots, pos,
+ length - remainder, channels);
+ buffer_mux_port(sc->pbuf, ch->data, row, slots, 0,
+ remainder, channels);
+ } else {
+ buffer_demux_port(sc->rbuf, ch->data, row, slots, pos,
+ length - remainder, channels);
+ buffer_demux_port(sc->rbuf, ch->data, row, slots, 0,
+ remainder, channels);
+ }
+
+ slots &= ~row;
+ row = hdsp_slot_first_row(slots);
+ }
+
+ ch->position = ((pos + length) * 4) % buffer_size;
+}
+
+static int
+clean(struct sc_chinfo *ch)
+{
+ struct sc_pcminfo *scp;
+ struct sc_info *sc;
+ uint32_t *buf;
+ uint32_t slot, slots;
+ unsigned int offset;
+
+ scp = ch->parent;
+ sc = scp->sc;
+ buf = sc->rbuf;
+
+ if (ch->dir == PCMDIR_PLAY)
+ buf = sc->pbuf;
+
+ /* Iterate through all of the channel's slots. */
+ slots = hdsp_port_slot_map(ch->ports, sc->speed);
+ slot = hdsp_slot_first(slots);
+ while (slot != 0) {
+ /* Clear the slot's buffer. */
+ offset = hdsp_slot_offset(slot);
+ bzero(buf + offset * HDSP_CHANBUF_SAMPLES, HDSP_CHANBUF_SIZE);
+
+ slots &= ~slot;
+ slot = hdsp_slot_first(slots);
+ }
+
+ ch->position = 0;
+
+ return (0);
+}
+
+/* Channel interface. */
+static int
+hdspchan_free(kobj_t obj, void *data)
+{
+ struct sc_pcminfo *scp;
+ struct sc_chinfo *ch;
+ struct sc_info *sc;
+
+ ch = data;
+ scp = ch->parent;
+ sc = scp->sc;
+
+#if 0
+ device_printf(scp->dev, "hdspchan_free()\n");
+#endif
+
+ snd_mtxlock(sc->lock);
+ if (ch->data != NULL) {
+ free(ch->data, M_HDSP);
+ ch->data = NULL;
+ }
+ if (ch->caps != NULL) {
+ free(ch->caps, M_HDSP);
+ ch->caps = NULL;
+ }
+ snd_mtxunlock(sc->lock);
+
+ return (0);
+}
+
+static void *
+hdspchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b,
+ struct pcm_channel *c, int dir)
+{
+ struct sc_pcminfo *scp;
+ struct sc_chinfo *ch;
+ struct sc_info *sc;
+ int num;
+
+ scp = devinfo;
+ sc = scp->sc;
+
+ snd_mtxlock(sc->lock);
+ num = scp->chnum;
+
+ ch = &scp->chan[num];
+
+ if (dir == PCMDIR_PLAY)
+ ch->ports = hdsp_channel_play_ports(scp->hc);
+ else
+ ch->ports = hdsp_channel_rec_ports(scp->hc);
+
+ ch->run = 0;
+ ch->lvol = 0;
+ ch->rvol = 0;
+
+ /* Support all possible ADAT widths as channel formats. */
+ ch->cap_fmts[0] =
+ SND_FORMAT(AFMT_S32_LE, hdsp_port_slot_count(ch->ports, 48000), 0);
+ ch->cap_fmts[1] =
+ SND_FORMAT(AFMT_S32_LE, hdsp_port_slot_count(ch->ports, 96000), 0);
+ ch->cap_fmts[2] =
+ SND_FORMAT(AFMT_S32_LE, hdsp_port_slot_count(ch->ports, 192000), 0);
+ ch->cap_fmts[3] = 0;
+
+ ch->caps = malloc(sizeof(struct pcmchan_caps), M_HDSP, M_NOWAIT);
+ *(ch->caps) = (struct pcmchan_caps) {32000, 192000, ch->cap_fmts, 0};
+
+ /* HDSP 9652 does not support quad speed sample rates. */
+ if (sc->type == HDSP_9652) {
+ ch->cap_fmts[2] = SND_FORMAT(AFMT_S32_LE, 2, 0);
+ ch->caps->maxspeed = 96000;
+ }
+
+ /* Allocate maximum buffer size. */
+ ch->size = HDSP_CHANBUF_SIZE * hdsp_port_slot_count_max(ch->ports);
+ ch->data = malloc(ch->size, M_HDSP, M_NOWAIT);
+ ch->position = 0;
+
+ ch->buffer = b;
+ ch->channel = c;
+ ch->parent = scp;
+
+ ch->dir = dir;
+
+ snd_mtxunlock(sc->lock);
+
+ if (sndbuf_setup(ch->buffer, ch->data, ch->size) != 0) {
+ device_printf(scp->dev, "Can't setup sndbuf.\n");
+ hdspchan_free(obj, ch);
+ return (NULL);
+ }
+
+ return (ch);
+}
+
+static int
+hdspchan_trigger(kobj_t obj, void *data, int go)
+{
+ struct sc_pcminfo *scp;
+ struct sc_chinfo *ch;
+ struct sc_info *sc;
+
+ ch = data;
+ scp = ch->parent;
+ sc = scp->sc;
+
+ snd_mtxlock(sc->lock);
+ switch (go) {
+ case PCMTRIG_START:
+#if 0
+ device_printf(scp->dev, "hdspchan_trigger(): start\n");
+#endif
+ hdspchan_enable(ch, 1);
+ hdspchan_setgain(ch);
+ hdsp_start_audio(sc);
+ break;
+
+ case PCMTRIG_STOP:
+ case PCMTRIG_ABORT:
+#if 0
+ device_printf(scp->dev, "hdspchan_trigger(): stop or abort\n");
+#endif
+ clean(ch);
+ hdspchan_enable(ch, 0);
+ hdsp_stop_audio(sc);
+ break;
+
+ case PCMTRIG_EMLDMAWR:
+ case PCMTRIG_EMLDMARD:
+ if(ch->run)
+ buffer_copy(ch);
+ break;
+ }
+
+ snd_mtxunlock(sc->lock);
+
+ return (0);
+}
+
+static uint32_t
+hdspchan_getptr(kobj_t obj, void *data)
+{
+ struct sc_pcminfo *scp;
+ struct sc_chinfo *ch;
+ struct sc_info *sc;
+ uint32_t ret, pos;
+
+ ch = data;
+ scp = ch->parent;
+ sc = scp->sc;
+
+ snd_mtxlock(sc->lock);
+ ret = hdsp_read_2(sc, HDSP_STATUS_REG);
+ snd_mtxunlock(sc->lock);
+
+ pos = ret & HDSP_BUF_POSITION_MASK;
+ pos %= (2 * sc->period * sizeof(uint32_t)); /* Double buffer. */
+ pos *= AFMT_CHANNEL(ch->format); /* Hardbuf with multiple channels. */
+
+ return (pos);
+}
+
+static int
+hdspchan_setformat(kobj_t obj, void *data, uint32_t format)
+{
+ struct sc_chinfo *ch;
+
+ ch = data;
+
+#if 0
+ struct sc_pcminfo *scp = ch->parent;
+ device_printf(scp->dev, "hdspchan_setformat(%d)\n", format);
+#endif
+
+ ch->format = format;
+
+ return (0);
+}
+
+static uint32_t
+hdspchan_setspeed(kobj_t obj, void *data, uint32_t speed)
+{
+ struct sc_pcminfo *scp;
+ struct hdsp_rate *hr;
+ struct sc_chinfo *ch;
+ struct sc_info *sc;
+ int threshold;
+ int i;
+
+ ch = data;
+ scp = ch->parent;
+ sc = scp->sc;
+ hr = NULL;
+
+#if 0
+ device_printf(scp->dev, "hdspchan_setspeed(%d)\n", speed);
+#endif
+
+ if (hdsp_running(sc) == 1)
+ goto end;
+
+ /* HDSP 9652 only supports sample rates up to 96kHz. */
+ if (sc->type == HDSP_9652 && speed > 96000)
+ speed = 96000;
+
+ if (sc->force_speed > 0)
+ speed = sc->force_speed;
+
+ /* First look for equal frequency. */
+ for (i = 0; rate_map[i].speed != 0; i++) {
+ if (rate_map[i].speed == speed)
+ hr = &rate_map[i];
+ }
+
+ /* If no match, just find nearest. */
+ if (hr == NULL) {
+ for (i = 0; rate_map[i].speed != 0; i++) {
+ hr = &rate_map[i];
+ threshold = hr->speed + ((rate_map[i + 1].speed != 0) ?
+ ((rate_map[i + 1].speed - hr->speed) >> 1) : 0);
+ if (speed < threshold)
+ break;
+ }
+ }
+
+ /* Write frequency on the device. */
+ sc->ctrl_register &= ~HDSP_FREQ_MASK;
+ sc->ctrl_register |= hr->reg;
+ hdsp_write_4(sc, HDSP_CONTROL_REG, sc->ctrl_register);
+
+ if (sc->type == HDSP_9632) {
+ /* Set DDS value. */
+ hdsp_write_4(sc, HDSP_FREQ_REG, hdsp_freq_reg_value(hr->speed));
+ }
+
+ sc->speed = hr->speed;
+end:
+
+ return (sc->speed);
+}
+
+static uint32_t
+hdspchan_setblocksize(kobj_t obj, void *data, uint32_t blocksize)
+{
+ struct hdsp_latency *hl;
+ struct sc_pcminfo *scp;
+ struct sc_chinfo *ch;
+ struct sc_info *sc;
+ int threshold;
+ int i;
+
+ ch = data;
+ scp = ch->parent;
+ sc = scp->sc;
+ hl = NULL;
+
+#if 0
+ device_printf(scp->dev, "hdspchan_setblocksize(%d)\n", blocksize);
+#endif
+
+ if (hdsp_running(sc) == 1)
+ goto end;
+
+ if (blocksize > HDSP_LAT_BYTES_MAX)
+ blocksize = HDSP_LAT_BYTES_MAX;
+ else if (blocksize < HDSP_LAT_BYTES_MIN)
+ blocksize = HDSP_LAT_BYTES_MIN;
+
+ blocksize /= 4 /* samples */;
+
+ if (sc->force_period > 0)
+ blocksize = sc->force_period;
+
+ /* First look for equal latency. */
+ for (i = 0; latency_map[i].period != 0; i++) {
+ if (latency_map[i].period == blocksize)
+ hl = &latency_map[i];
+ }
+
+ /* If no match, just find nearest. */
+ if (hl == NULL) {
+ for (i = 0; latency_map[i].period != 0; i++) {
+ hl = &latency_map[i];
+ threshold = hl->period + ((latency_map[i + 1].period != 0) ?
+ ((latency_map[i + 1].period - hl->period) >> 1) : 0);
+ if (blocksize < threshold)
+ break;
+ }
+ }
+
+ snd_mtxlock(sc->lock);
+ sc->ctrl_register &= ~HDSP_LAT_MASK;
+ sc->ctrl_register |= hdsp_encode_latency(hl->n);
+ hdsp_write_4(sc, HDSP_CONTROL_REG, sc->ctrl_register);
+ sc->period = hl->period;
+ snd_mtxunlock(sc->lock);
+
+#if 0
+ device_printf(scp->dev, "New period=%d\n", sc->period);
+#endif
+
+ sndbuf_resize(ch->buffer, 2,
+ (sc->period * AFMT_CHANNEL(ch->format) * sizeof(uint32_t)));
+
+ /* Reset pointer, rewrite frequency (same register) for 9632. */
+ hdsp_write_4(sc, HDSP_RESET_POINTER, 0);
+ if (sc->type == HDSP_9632)
+ hdsp_write_4(sc, HDSP_FREQ_REG, hdsp_freq_reg_value(sc->speed));
+end:
+
+ return (sndbuf_getblksz(ch->buffer));
+}
+
+static uint32_t hdsp_bkp_fmt[] = {
+ SND_FORMAT(AFMT_S32_LE, 2, 0),
+ 0
+};
+
+/* Capabilities fallback, no quad speed for HDSP 9652 compatibility. */
+static struct pcmchan_caps hdsp_bkp_caps = {32000, 96000, hdsp_bkp_fmt, 0};
+
+static struct pcmchan_caps *
+hdspchan_getcaps(kobj_t obj, void *data)
+{
+ struct sc_chinfo *ch;
+
+ ch = data;
+
+#if 0
+ device_printf(ch->parent->dev, "hdspchan_getcaps()\n");
+#endif
+
+ if (ch->caps != NULL)
+ return (ch->caps);
+
+ return (&hdsp_bkp_caps);
+}
+
+static kobj_method_t hdspchan_methods[] = {
+ KOBJMETHOD(channel_init, hdspchan_init),
+ KOBJMETHOD(channel_free, hdspchan_free),
+ KOBJMETHOD(channel_setformat, hdspchan_setformat),
+ KOBJMETHOD(channel_setspeed, hdspchan_setspeed),
+ KOBJMETHOD(channel_setblocksize, hdspchan_setblocksize),
+ KOBJMETHOD(channel_trigger, hdspchan_trigger),
+ KOBJMETHOD(channel_getptr, hdspchan_getptr),
+ KOBJMETHOD(channel_getcaps, hdspchan_getcaps),
+ KOBJMETHOD_END
+};
+CHANNEL_DECLARE(hdspchan);
+
+static int
+hdsp_pcm_probe(device_t dev)
+{
+
+#if 0
+ device_printf(dev,"hdsp_pcm_probe()\n");
+#endif
+
+ return (0);
+}
+
+static uint32_t
+hdsp_pcm_intr(struct sc_pcminfo *scp)
+{
+ struct sc_chinfo *ch;
+ struct sc_info *sc;
+ int i;
+
+ sc = scp->sc;
+
+ for (i = 0; i < scp->chnum; i++) {
+ ch = &scp->chan[i];
+ snd_mtxunlock(sc->lock);
+ chn_intr(ch->channel);
+ snd_mtxlock(sc->lock);
+ }
+
+ return (0);
+}
+
+static int
+hdsp_pcm_attach(device_t dev)
+{
+ char status[SND_STATUSLEN];
+ struct sc_pcminfo *scp;
+ const char *buf;
+ uint32_t pcm_flags;
+ int err;
+ int play, rec;
+
+ scp = device_get_ivars(dev);
+ scp->ih = &hdsp_pcm_intr;
+
+ if (scp->hc->ports & HDSP_CHAN_9632_ALL)
+ buf = "9632";
+ else if (scp->hc->ports & HDSP_CHAN_9652_ALL)
+ buf = "9652";
+ else
+ buf = "?";
+ device_set_descf(dev, "HDSP %s [%s]", buf, scp->hc->descr);
+
+ /*
+ * We don't register interrupt handler with snd_setup_intr
+ * in pcm device. Mark pcm device as MPSAFE manually.
+ */
+ pcm_flags = pcm_getflags(dev) | SD_F_MPSAFE;
+ if (hdsp_port_slot_count_max(scp->hc->ports) > HDSP_MATRIX_MAX)
+ /* Disable vchan conversion, too many channels. */
+ pcm_flags |= SD_F_BITPERFECT;
+ pcm_setflags(dev, pcm_flags);
+
+ pcm_init(dev, scp);
+
+ play = (hdsp_channel_play_ports(scp->hc)) ? 1 : 0;
+ rec = (hdsp_channel_rec_ports(scp->hc)) ? 1 : 0;
+
+ scp->chnum = 0;
+ if (play) {
+ pcm_addchan(dev, PCMDIR_PLAY, &hdspchan_class, scp);
+ scp->chnum++;
+ }
+
+ if (rec) {
+ pcm_addchan(dev, PCMDIR_REC, &hdspchan_class, scp);
+ scp->chnum++;
+ }
+
+ snprintf(status, SND_STATUSLEN, "port 0x%jx irq %jd on %s",
+ rman_get_start(scp->sc->cs),
+ rman_get_start(scp->sc->irq),
+ device_get_nameunit(device_get_parent(dev)));
+ err = pcm_register(dev, status);
+ if (err) {
+ device_printf(dev, "Can't register pcm.\n");
+ return (ENXIO);
+ }
+
+ mixer_init(dev, &hdspmixer_class, scp);
+
+ return (0);
+}
+
+static int
+hdsp_pcm_detach(device_t dev)
+{
+ int err;
+
+ err = pcm_unregister(dev);
+ if (err) {
+ device_printf(dev, "Can't unregister device.\n");
+ return (err);
+ }
+
+ return (0);
+}
+
+static device_method_t hdsp_pcm_methods[] = {
+ DEVMETHOD(device_probe, hdsp_pcm_probe),
+ DEVMETHOD(device_attach, hdsp_pcm_attach),
+ DEVMETHOD(device_detach, hdsp_pcm_detach),
+ { 0, 0 }
+};
+
+static driver_t hdsp_pcm_driver = {
+ "pcm",
+ hdsp_pcm_methods,
+ PCM_SOFTC_SIZE,
+};
+
+DRIVER_MODULE(snd_hdsp_pcm, hdsp, hdsp_pcm_driver, 0, 0);
+MODULE_DEPEND(snd_hdsp, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER);
+MODULE_VERSION(snd_hdsp, 1);
diff --git a/sys/dev/sound/pci/hdsp.c b/sys/dev/sound/pci/hdsp.c
new file mode 100644
index 000000000000..4ba23d22ebce
--- /dev/null
+++ b/sys/dev/sound/pci/hdsp.c
@@ -0,0 +1,1022 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2012-2016 Ruslan Bukin <br@bsdpad.com>
+ * Copyright (c) 2023-2024 Florian Walpen <dev@submerge.ch>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * RME HDSP driver for FreeBSD.
+ * Supported cards: HDSP 9632, HDSP 9652.
+ */
+
+#include <sys/types.h>
+#include <sys/sysctl.h>
+
+#include <dev/sound/pcm/sound.h>
+#include <dev/sound/pci/hdsp.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include <mixer_if.h>
+
+static bool hdsp_unified_pcm = false;
+
+static SYSCTL_NODE(_hw, OID_AUTO, hdsp, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "PCI HDSP");
+
+SYSCTL_BOOL(_hw_hdsp, OID_AUTO, unified_pcm, CTLFLAG_RWTUN,
+ &hdsp_unified_pcm, 0, "Combine physical ports in one unified pcm device");
+
+static struct hdsp_clock_source hdsp_clock_source_table_9632[] = {
+ { "internal", HDSP_CLOCK_INTERNAL },
+ { "adat", HDSP_CLOCK_ADAT1 },
+ { "spdif", HDSP_CLOCK_SPDIF },
+ { "word", HDSP_CLOCK_WORD },
+ { NULL, HDSP_CLOCK_INTERNAL }
+};
+
+static struct hdsp_clock_source hdsp_clock_source_table_9652[] = {
+ { "internal", HDSP_CLOCK_INTERNAL },
+ { "adat1", HDSP_CLOCK_ADAT1 },
+ { "adat2", HDSP_CLOCK_ADAT2 },
+ { "adat3", HDSP_CLOCK_ADAT3 },
+ { "spdif", HDSP_CLOCK_SPDIF },
+ { "word", HDSP_CLOCK_WORD },
+ { "adat_sync", HDSP_CLOCK_ADAT_SYNC },
+ { NULL, HDSP_CLOCK_INTERNAL }
+};
+
+static struct hdsp_channel chan_map_9632[] = {
+ { HDSP_CHAN_9632_ADAT, "adat" },
+ { HDSP_CHAN_9632_SPDIF, "s/pdif" },
+ { HDSP_CHAN_9632_LINE, "line" },
+ { HDSP_CHAN_9632_EXT, "ext" },
+ { 0, NULL },
+};
+
+static struct hdsp_channel chan_map_9632_uni[] = {
+ { HDSP_CHAN_9632_ALL, "all" },
+ { 0, NULL },
+};
+
+static struct hdsp_channel chan_map_9652[] = {
+ { HDSP_CHAN_9652_ADAT1, "adat1" },
+ { HDSP_CHAN_9652_ADAT2, "adat2" },
+ { HDSP_CHAN_9652_ADAT3, "adat3" },
+ { HDSP_CHAN_9652_SPDIF, "s/pdif" },
+ { 0, NULL },
+};
+
+static struct hdsp_channel chan_map_9652_uni[] = {
+ { HDSP_CHAN_9652_ALL, "all" },
+ { 0, NULL },
+};
+
+static void
+hdsp_intr(void *p)
+{
+ struct sc_pcminfo *scp;
+ struct sc_info *sc;
+ device_t *devlist;
+ int devcount;
+ int status;
+ int err;
+ int i;
+
+ sc = (struct sc_info *)p;
+
+ snd_mtxlock(sc->lock);
+
+ status = hdsp_read_1(sc, HDSP_STATUS_REG);
+ if (status & HDSP_AUDIO_IRQ_PENDING) {
+ if ((err = device_get_children(sc->dev, &devlist, &devcount)) != 0)
+ return;
+
+ for (i = 0; i < devcount; i++) {
+ scp = device_get_ivars(devlist[i]);
+ if (scp->ih != NULL)
+ scp->ih(scp);
+ }
+
+ hdsp_write_1(sc, HDSP_INTERRUPT_ACK, 0);
+ free(devlist, M_TEMP);
+ }
+
+ snd_mtxunlock(sc->lock);
+}
+
+static void
+hdsp_dmapsetmap(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+#if 0
+ device_printf(sc->dev, "hdsp_dmapsetmap()\n");
+#endif
+}
+
+static int
+hdsp_alloc_resources(struct sc_info *sc)
+{
+
+ /* Allocate resource. */
+ sc->csid = PCIR_BAR(0);
+ sc->cs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
+ &sc->csid, RF_ACTIVE);
+
+ if (!sc->cs) {
+ device_printf(sc->dev, "Unable to map SYS_RES_MEMORY.\n");
+ return (ENXIO);
+ }
+
+ sc->cst = rman_get_bustag(sc->cs);
+ sc->csh = rman_get_bushandle(sc->cs);
+
+ /* Allocate interrupt resource. */
+ sc->irqid = 0;
+ sc->irq = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irqid,
+ RF_ACTIVE | RF_SHAREABLE);
+
+ if (!sc->irq ||
+ bus_setup_intr(sc->dev, sc->irq, INTR_MPSAFE | INTR_TYPE_AV,
+ NULL, hdsp_intr, sc, &sc->ih)) {
+ device_printf(sc->dev, "Unable to alloc interrupt resource.\n");
+ return (ENXIO);
+ }
+
+ /* Allocate DMA resources. */
+ if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(sc->dev),
+ /*alignment*/4,
+ /*boundary*/0,
+ /*lowaddr*/BUS_SPACE_MAXADDR_32BIT,
+ /*highaddr*/BUS_SPACE_MAXADDR,
+ /*filter*/NULL,
+ /*filterarg*/NULL,
+ /*maxsize*/2 * HDSP_DMASEGSIZE,
+ /*nsegments*/2,
+ /*maxsegsz*/HDSP_DMASEGSIZE,
+ /*flags*/0,
+ /*lockfunc*/NULL,
+ /*lockarg*/NULL,
+ /*dmatag*/&sc->dmat) != 0) {
+ device_printf(sc->dev, "Unable to create dma tag.\n");
+ return (ENXIO);
+ }
+
+ sc->bufsize = HDSP_DMASEGSIZE;
+
+ /* pbuf (play buffer). */
+ if (bus_dmamem_alloc(sc->dmat, (void **)&sc->pbuf, BUS_DMA_WAITOK,
+ &sc->pmap)) {
+ device_printf(sc->dev, "Can't alloc pbuf.\n");
+ return (ENXIO);
+ }
+
+ if (bus_dmamap_load(sc->dmat, sc->pmap, sc->pbuf, sc->bufsize,
+ hdsp_dmapsetmap, sc, BUS_DMA_NOWAIT)) {
+ device_printf(sc->dev, "Can't load pbuf.\n");
+ return (ENXIO);
+ }
+
+ /* rbuf (rec buffer). */
+ if (bus_dmamem_alloc(sc->dmat, (void **)&sc->rbuf, BUS_DMA_WAITOK,
+ &sc->rmap)) {
+ device_printf(sc->dev, "Can't alloc rbuf.\n");
+ return (ENXIO);
+ }
+
+ if (bus_dmamap_load(sc->dmat, sc->rmap, sc->rbuf, sc->bufsize,
+ hdsp_dmapsetmap, sc, BUS_DMA_NOWAIT)) {
+ device_printf(sc->dev, "Can't load rbuf.\n");
+ return (ENXIO);
+ }
+
+ bzero(sc->pbuf, sc->bufsize);
+ bzero(sc->rbuf, sc->bufsize);
+
+ return (0);
+}
+
+static void
+hdsp_map_dmabuf(struct sc_info *sc)
+{
+ uint32_t paddr, raddr;
+
+ paddr = vtophys(sc->pbuf);
+ raddr = vtophys(sc->rbuf);
+
+ hdsp_write_4(sc, HDSP_PAGE_ADDR_BUF_OUT, paddr);
+ hdsp_write_4(sc, HDSP_PAGE_ADDR_BUF_IN, raddr);
+}
+
+static const char *
+hdsp_control_input_level(uint32_t control)
+{
+ switch (control & HDSP_INPUT_LEVEL_MASK) {
+ case HDSP_INPUT_LEVEL_LOWGAIN:
+ return ("LowGain");
+ case HDSP_INPUT_LEVEL_PLUS4DBU:
+ return ("+4dBu");
+ case HDSP_INPUT_LEVEL_MINUS10DBV:
+ return ("-10dBV");
+ default:
+ return (NULL);
+ }
+}
+
+static int
+hdsp_sysctl_input_level(SYSCTL_HANDLER_ARGS)
+{
+ struct sc_info *sc;
+ const char *label;
+ char buf[16] = "invalid";
+ int error;
+ uint32_t control;
+
+ sc = oidp->oid_arg1;
+
+ /* Only available on HDSP 9632. */
+ if (sc->type != HDSP_9632)
+ return (ENXIO);
+
+ /* Extract current input level from control register. */
+ control = sc->ctrl_register & HDSP_INPUT_LEVEL_MASK;
+ label = hdsp_control_input_level(control);
+ if (label != NULL)
+ strlcpy(buf, label, sizeof(buf));
+
+ /* Process sysctl string request. */
+ error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ /* Find input level matching the sysctl string. */
+ label = hdsp_control_input_level(HDSP_INPUT_LEVEL_LOWGAIN);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ control = HDSP_INPUT_LEVEL_LOWGAIN;
+ label = hdsp_control_input_level(HDSP_INPUT_LEVEL_PLUS4DBU);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ control = HDSP_INPUT_LEVEL_PLUS4DBU;
+ label = hdsp_control_input_level(HDSP_INPUT_LEVEL_MINUS10DBV);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ control = HDSP_INPUT_LEVEL_MINUS10DBV;
+
+ /* Set input level in control register. */
+ control &= HDSP_INPUT_LEVEL_MASK;
+ if (control != (sc->ctrl_register & HDSP_INPUT_LEVEL_MASK)) {
+ snd_mtxlock(sc->lock);
+ sc->ctrl_register &= ~HDSP_INPUT_LEVEL_MASK;
+ sc->ctrl_register |= control;
+ hdsp_write_4(sc, HDSP_CONTROL_REG, sc->ctrl_register);
+ snd_mtxunlock(sc->lock);
+ }
+ return (0);
+}
+
+static const char *
+hdsp_control_output_level(uint32_t control)
+{
+ switch (control & HDSP_OUTPUT_LEVEL_MASK) {
+ case HDSP_OUTPUT_LEVEL_MINUS10DBV:
+ return ("-10dBV");
+ case HDSP_OUTPUT_LEVEL_PLUS4DBU:
+ return ("+4dBu");
+ case HDSP_OUTPUT_LEVEL_HIGHGAIN:
+ return ("HighGain");
+ default:
+ return (NULL);
+ }
+}
+
+static int
+hdsp_sysctl_output_level(SYSCTL_HANDLER_ARGS)
+{
+ struct sc_info *sc;
+ const char *label;
+ char buf[16] = "invalid";
+ int error;
+ uint32_t control;
+
+ sc = oidp->oid_arg1;
+
+ /* Only available on HDSP 9632. */
+ if (sc->type != HDSP_9632)
+ return (ENXIO);
+
+ /* Extract current output level from control register. */
+ control = sc->ctrl_register & HDSP_OUTPUT_LEVEL_MASK;
+ label = hdsp_control_output_level(control);
+ if (label != NULL)
+ strlcpy(buf, label, sizeof(buf));
+
+ /* Process sysctl string request. */
+ error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ /* Find output level matching the sysctl string. */
+ label = hdsp_control_output_level(HDSP_OUTPUT_LEVEL_MINUS10DBV);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ control = HDSP_OUTPUT_LEVEL_MINUS10DBV;
+ label = hdsp_control_output_level(HDSP_OUTPUT_LEVEL_PLUS4DBU);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ control = HDSP_OUTPUT_LEVEL_PLUS4DBU;
+ label = hdsp_control_output_level(HDSP_OUTPUT_LEVEL_HIGHGAIN);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ control = HDSP_OUTPUT_LEVEL_HIGHGAIN;
+
+ /* Set output level in control register. */
+ control &= HDSP_OUTPUT_LEVEL_MASK;
+ if (control != (sc->ctrl_register & HDSP_OUTPUT_LEVEL_MASK)) {
+ snd_mtxlock(sc->lock);
+ sc->ctrl_register &= ~HDSP_OUTPUT_LEVEL_MASK;
+ sc->ctrl_register |= control;
+ hdsp_write_4(sc, HDSP_CONTROL_REG, sc->ctrl_register);
+ snd_mtxunlock(sc->lock);
+ }
+ return (0);
+}
+
+static const char *
+hdsp_control_phones_level(uint32_t control)
+{
+ switch (control & HDSP_PHONES_LEVEL_MASK) {
+ case HDSP_PHONES_LEVEL_MINUS12DB:
+ return ("-12dB");
+ case HDSP_PHONES_LEVEL_MINUS6DB:
+ return ("-6dB");
+ case HDSP_PHONES_LEVEL_0DB:
+ return ("0dB");
+ default:
+ return (NULL);
+ }
+}
+
+static int
+hdsp_sysctl_phones_level(SYSCTL_HANDLER_ARGS)
+{
+ struct sc_info *sc;
+ const char *label;
+ char buf[16] = "invalid";
+ int error;
+ uint32_t control;
+
+ sc = oidp->oid_arg1;
+
+ /* Only available on HDSP 9632. */
+ if (sc->type != HDSP_9632)
+ return (ENXIO);
+
+ /* Extract current phones level from control register. */
+ control = sc->ctrl_register & HDSP_PHONES_LEVEL_MASK;
+ label = hdsp_control_phones_level(control);
+ if (label != NULL)
+ strlcpy(buf, label, sizeof(buf));
+
+ /* Process sysctl string request. */
+ error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ /* Find phones level matching the sysctl string. */
+ label = hdsp_control_phones_level(HDSP_PHONES_LEVEL_MINUS12DB);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ control = HDSP_PHONES_LEVEL_MINUS12DB;
+ label = hdsp_control_phones_level(HDSP_PHONES_LEVEL_MINUS6DB);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ control = HDSP_PHONES_LEVEL_MINUS6DB;
+ label = hdsp_control_phones_level(HDSP_PHONES_LEVEL_0DB);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ control = HDSP_PHONES_LEVEL_0DB;
+
+ /* Set phones level in control register. */
+ control &= HDSP_PHONES_LEVEL_MASK;
+ if (control != (sc->ctrl_register & HDSP_PHONES_LEVEL_MASK)) {
+ snd_mtxlock(sc->lock);
+ sc->ctrl_register &= ~HDSP_PHONES_LEVEL_MASK;
+ sc->ctrl_register |= control;
+ hdsp_write_4(sc, HDSP_CONTROL_REG, sc->ctrl_register);
+ snd_mtxunlock(sc->lock);
+ }
+ return (0);
+}
+
+static int
+hdsp_sysctl_sample_rate(SYSCTL_HANDLER_ARGS)
+{
+ struct sc_info *sc = oidp->oid_arg1;
+ int error;
+ unsigned int speed, multiplier;
+
+ speed = sc->force_speed;
+
+ /* Process sysctl (unsigned) integer request. */
+ error = sysctl_handle_int(oidp, &speed, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ /* Speed from 32000 to 192000, 0 falls back to pcm speed setting. */
+ sc->force_speed = 0;
+ if (speed > 0) {
+ multiplier = 1;
+ if ((speed > (96000 + 128000) / 2) && sc->type == HDSP_9632)
+ multiplier = 4;
+ else if (speed > (48000 + 64000) / 2)
+ multiplier = 2;
+
+ if (speed < ((32000 + 44100) / 2) * multiplier)
+ sc->force_speed = 32000 * multiplier;
+ else if (speed < ((44100 + 48000) / 2) * multiplier)
+ sc->force_speed = 44100 * multiplier;
+ else
+ sc->force_speed = 48000 * multiplier;
+ }
+
+ return (0);
+}
+
+
+static int
+hdsp_sysctl_period(SYSCTL_HANDLER_ARGS)
+{
+ struct sc_info *sc = oidp->oid_arg1;
+ int error;
+ unsigned int period;
+
+ period = sc->force_period;
+
+ /* Process sysctl (unsigned) integer request. */
+ error = sysctl_handle_int(oidp, &period, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ /* Period is from 2^5 to 2^14, 0 falls back to pcm latency settings. */
+ sc->force_period = 0;
+ if (period > 0) {
+ sc->force_period = 32;
+ while (sc->force_period < period && sc->force_period < 4096)
+ sc->force_period <<= 1;
+ }
+
+ return (0);
+}
+
+static uint32_t
+hdsp_control_clock_preference(enum hdsp_clock_type type)
+{
+ switch (type) {
+ case HDSP_CLOCK_INTERNAL:
+ return (HDSP_CONTROL_MASTER);
+ case HDSP_CLOCK_ADAT1:
+ return (HDSP_CONTROL_CLOCK(0));
+ case HDSP_CLOCK_ADAT2:
+ return (HDSP_CONTROL_CLOCK(1));
+ case HDSP_CLOCK_ADAT3:
+ return (HDSP_CONTROL_CLOCK(2));
+ case HDSP_CLOCK_SPDIF:
+ return (HDSP_CONTROL_CLOCK(3));
+ case HDSP_CLOCK_WORD:
+ return (HDSP_CONTROL_CLOCK(4));
+ case HDSP_CLOCK_ADAT_SYNC:
+ return (HDSP_CONTROL_CLOCK(5));
+ default:
+ return (HDSP_CONTROL_MASTER);
+ }
+}
+
+static int
+hdsp_sysctl_clock_preference(SYSCTL_HANDLER_ARGS)
+{
+ struct sc_info *sc;
+ struct hdsp_clock_source *clock_table, *clock;
+ char buf[16] = "invalid";
+ int error;
+ uint32_t control;
+
+ sc = oidp->oid_arg1;
+
+ /* Select sync ports table for device type. */
+ if (sc->type == HDSP_9632)
+ clock_table = hdsp_clock_source_table_9632;
+ else if (sc->type == HDSP_9652)
+ clock_table = hdsp_clock_source_table_9652;
+ else
+ return (ENXIO);
+
+ /* Extract preferred clock source from control register. */
+ control = sc->ctrl_register & HDSP_CONTROL_CLOCK_MASK;
+ for (clock = clock_table; clock->name != NULL; ++clock) {
+ if (hdsp_control_clock_preference(clock->type) == control)
+ break;
+ }
+ if (clock->name != NULL)
+ strlcpy(buf, clock->name, sizeof(buf));
+
+ /* Process sysctl string request. */
+ error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ /* Find clock source matching the sysctl string. */
+ for (clock = clock_table; clock->name != NULL; ++clock) {
+ if (strncasecmp(buf, clock->name, sizeof(buf)) == 0)
+ break;
+ }
+
+ /* Set preferred clock source in control register. */
+ if (clock->name != NULL) {
+ control = hdsp_control_clock_preference(clock->type);
+ control &= HDSP_CONTROL_CLOCK_MASK;
+ snd_mtxlock(sc->lock);
+ sc->ctrl_register &= ~HDSP_CONTROL_CLOCK_MASK;
+ sc->ctrl_register |= control;
+ hdsp_write_4(sc, HDSP_CONTROL_REG, sc->ctrl_register);
+ snd_mtxunlock(sc->lock);
+ }
+ return (0);
+}
+
+static uint32_t
+hdsp_status2_clock_source(enum hdsp_clock_type type)
+{
+ switch (type) {
+ case HDSP_CLOCK_INTERNAL:
+ return (0);
+ case HDSP_CLOCK_ADAT1:
+ return (HDSP_STATUS2_CLOCK(0));
+ case HDSP_CLOCK_ADAT2:
+ return (HDSP_STATUS2_CLOCK(1));
+ case HDSP_CLOCK_ADAT3:
+ return (HDSP_STATUS2_CLOCK(2));
+ case HDSP_CLOCK_SPDIF:
+ return (HDSP_STATUS2_CLOCK(3));
+ case HDSP_CLOCK_WORD:
+ return (HDSP_STATUS2_CLOCK(4));
+ case HDSP_CLOCK_ADAT_SYNC:
+ return (HDSP_STATUS2_CLOCK(5));
+ default:
+ return (0);
+ }
+}
+
+static int
+hdsp_sysctl_clock_source(SYSCTL_HANDLER_ARGS)
+{
+ struct sc_info *sc;
+ struct hdsp_clock_source *clock_table, *clock;
+ char buf[16] = "invalid";
+ uint32_t status2;
+
+ sc = oidp->oid_arg1;
+
+ /* Select sync ports table for device type. */
+ if (sc->type == HDSP_9632)
+ clock_table = hdsp_clock_source_table_9632;
+ else if (sc->type == HDSP_9652)
+ clock_table = hdsp_clock_source_table_9652;
+ else
+ return (ENXIO);
+
+ /* Read current (autosync) clock source from status2 register. */
+ snd_mtxlock(sc->lock);
+ status2 = hdsp_read_4(sc, HDSP_STATUS2_REG);
+ status2 &= HDSP_STATUS2_CLOCK_MASK;
+ snd_mtxunlock(sc->lock);
+
+ /* Translate status2 register value to clock source. */
+ for (clock = clock_table; clock->name != NULL; ++clock) {
+ /* In clock master mode, override with internal clock source. */
+ if (sc->ctrl_register & HDSP_CONTROL_MASTER) {
+ if (clock->type == HDSP_CLOCK_INTERNAL)
+ break;
+ } else if (hdsp_status2_clock_source(clock->type) == status2)
+ break;
+ }
+
+ /* Process sysctl string request. */
+ if (clock->name != NULL)
+ strlcpy(buf, clock->name, sizeof(buf));
+ return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
+}
+
+static int
+hdsp_sysctl_clock_list(SYSCTL_HANDLER_ARGS)
+{
+ struct sc_info *sc;
+ struct hdsp_clock_source *clock_table, *clock;
+ char buf[256];
+ int n;
+
+ sc = oidp->oid_arg1;
+ n = 0;
+
+ /* Select clock source table for device type. */
+ if (sc->type == HDSP_9632)
+ clock_table = hdsp_clock_source_table_9632;
+ else if (sc->type == HDSP_9652)
+ clock_table = hdsp_clock_source_table_9652;
+ else
+ return (ENXIO);
+
+ /* List available clock sources. */
+ buf[0] = 0;
+ for (clock = clock_table; clock->name != NULL; ++clock) {
+ if (n > 0)
+ n += strlcpy(buf + n, ",", sizeof(buf) - n);
+ n += strlcpy(buf + n, clock->name, sizeof(buf) - n);
+ }
+ return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
+}
+
+static bool
+hdsp_clock_source_locked(enum hdsp_clock_type type, uint32_t status,
+ uint32_t status2)
+{
+ switch (type) {
+ case HDSP_CLOCK_INTERNAL:
+ return (true);
+ case HDSP_CLOCK_ADAT1:
+ return ((status >> 3) & 0x01);
+ case HDSP_CLOCK_ADAT2:
+ return ((status >> 2) & 0x01);
+ case HDSP_CLOCK_ADAT3:
+ return ((status >> 1) & 0x01);
+ case HDSP_CLOCK_SPDIF:
+ return (!((status >> 25) & 0x01));
+ case HDSP_CLOCK_WORD:
+ return ((status2 >> 3) & 0x01);
+ case HDSP_CLOCK_ADAT_SYNC:
+ return ((status >> 5) & 0x01);
+ default:
+ return (false);
+ }
+}
+
+static bool
+hdsp_clock_source_synced(enum hdsp_clock_type type, uint32_t status,
+ uint32_t status2)
+{
+ switch (type) {
+ case HDSP_CLOCK_INTERNAL:
+ return (true);
+ case HDSP_CLOCK_ADAT1:
+ return ((status >> 18) & 0x01);
+ case HDSP_CLOCK_ADAT2:
+ return ((status >> 17) & 0x01);
+ case HDSP_CLOCK_ADAT3:
+ return ((status >> 16) & 0x01);
+ case HDSP_CLOCK_SPDIF:
+ return (((status >> 4) & 0x01) && !((status >> 25) & 0x01));
+ case HDSP_CLOCK_WORD:
+ return ((status2 >> 4) & 0x01);
+ case HDSP_CLOCK_ADAT_SYNC:
+ return ((status >> 27) & 0x01);
+ default:
+ return (false);
+ }
+}
+
+static int
+hdsp_sysctl_sync_status(SYSCTL_HANDLER_ARGS)
+{
+ struct sc_info *sc;
+ struct hdsp_clock_source *clock_table, *clock;
+ char buf[256];
+ char *state;
+ int n;
+ uint32_t status, status2;
+
+ sc = oidp->oid_arg1;
+ n = 0;
+
+ /* Select sync ports table for device type. */
+ if (sc->type == HDSP_9632)
+ clock_table = hdsp_clock_source_table_9632;
+ else if (sc->type == HDSP_9652)
+ clock_table = hdsp_clock_source_table_9652;
+ else
+ return (ENXIO);
+
+ /* Read current lock and sync bits from status registers. */
+ snd_mtxlock(sc->lock);
+ status = hdsp_read_4(sc, HDSP_STATUS_REG);
+ status2 = hdsp_read_4(sc, HDSP_STATUS2_REG);
+ snd_mtxunlock(sc->lock);
+
+ /* List clock sources with lock and sync state. */
+ for (clock = clock_table; clock->name != NULL; ++clock) {
+ if (clock->type == HDSP_CLOCK_INTERNAL)
+ continue;
+ if (n > 0)
+ n += strlcpy(buf + n, ",", sizeof(buf) - n);
+ state = "none";
+ if (hdsp_clock_source_locked(clock->type, status, status2)) {
+ if (hdsp_clock_source_synced(clock->type, status,
+ status2))
+ state = "sync";
+ else
+ state = "lock";
+ }
+ n += snprintf(buf + n, sizeof(buf) - n, "%s(%s)",
+ clock->name, state);
+ }
+ return (sysctl_handle_string(oidp, buf, sizeof(buf), req));
+}
+
+static int
+hdsp_probe(device_t dev)
+{
+ uint32_t rev;
+
+ if (pci_get_vendor(dev) == PCI_VENDOR_XILINX &&
+ pci_get_device(dev) == PCI_DEVICE_XILINX_HDSP) {
+ rev = pci_get_revid(dev);
+ switch (rev) {
+ case PCI_REVISION_9632:
+ device_set_desc(dev, "RME HDSP 9632");
+ return (0);
+ case PCI_REVISION_9652:
+ device_set_desc(dev, "RME HDSP 9652");
+ return (0);
+ }
+ }
+
+ return (ENXIO);
+}
+
+static int
+hdsp_init(struct sc_info *sc)
+{
+ unsigned mixer_controls;
+
+ /* Set latency. */
+ sc->period = 256;
+ /*
+ * The pcm channel latency settings propagate unreliable blocksizes,
+ * different for recording and playback, and skewed due to rounding
+ * and total buffer size limits.
+ * Force period to a consistent default until these issues are fixed.
+ */
+ sc->force_period = 256;
+ sc->ctrl_register = hdsp_encode_latency(2);
+
+ /* Set rate. */
+ sc->speed = HDSP_SPEED_DEFAULT;
+ sc->force_speed = 0;
+ sc->ctrl_register &= ~HDSP_FREQ_MASK;
+ sc->ctrl_register |= HDSP_FREQ_MASK_DEFAULT;
+
+ /* Set internal clock source (master). */
+ sc->ctrl_register &= ~HDSP_CONTROL_CLOCK_MASK;
+ sc->ctrl_register |= HDSP_CONTROL_MASTER;
+
+ /* SPDIF from coax in, line out. */
+ sc->ctrl_register &= ~HDSP_CONTROL_SPDIF_COAX;
+ sc->ctrl_register |= HDSP_CONTROL_SPDIF_COAX;
+ sc->ctrl_register &= ~HDSP_CONTROL_LINE_OUT;
+ sc->ctrl_register |= HDSP_CONTROL_LINE_OUT;
+
+ /* Default gain levels. */
+ sc->ctrl_register &= ~HDSP_INPUT_LEVEL_MASK;
+ sc->ctrl_register |= HDSP_INPUT_LEVEL_LOWGAIN;
+ sc->ctrl_register &= ~HDSP_OUTPUT_LEVEL_MASK;
+ sc->ctrl_register |= HDSP_OUTPUT_LEVEL_MINUS10DBV;
+ sc->ctrl_register &= ~HDSP_PHONES_LEVEL_MASK;
+ sc->ctrl_register |= HDSP_PHONES_LEVEL_MINUS12DB;
+
+ hdsp_write_4(sc, HDSP_CONTROL_REG, sc->ctrl_register);
+
+ if (sc->type == HDSP_9652)
+ hdsp_write_4(sc, HDSP_CONTROL2_REG, HDSP_CONTROL2_9652_MIXER);
+ else
+ hdsp_write_4(sc, HDSP_CONTROL2_REG, 0);
+
+ switch (sc->type) {
+ case HDSP_9632:
+ /* Mixer matrix is 2 source rows (input, playback) per output. */
+ mixer_controls = 2 * HDSP_MIX_SLOTS_9632 * HDSP_MIX_SLOTS_9632;
+ break;
+ case HDSP_9652:
+ /* Mixer matrix is 2 source rows (input, playback) per output. */
+ mixer_controls = 2 * HDSP_MIX_SLOTS_9652 * HDSP_MIX_SLOTS_9652;
+ break;
+ default:
+ return (ENXIO);
+ }
+
+ /* Initialize mixer matrix by silencing all controls. */
+ for (unsigned offset = 0; offset < mixer_controls * 2; offset += 4) {
+ /* Only accepts 4 byte values, pairs of 16 bit volume controls. */
+ hdsp_write_4(sc, HDSP_MIXER_BASE + offset,
+ (HDSP_MIN_GAIN << 16) | HDSP_MIN_GAIN);
+ }
+
+ /* Reset pointer, rewrite frequency (same register) for 9632. */
+ hdsp_write_4(sc, HDSP_RESET_POINTER, 0);
+ if (sc->type == HDSP_9632) {
+ /* Set DDS value. */
+ hdsp_write_4(sc, HDSP_FREQ_REG, hdsp_freq_reg_value(sc->speed));
+ }
+
+ return (0);
+}
+
+static int
+hdsp_attach(device_t dev)
+{
+ struct hdsp_channel *chan_map;
+ struct sc_pcminfo *scp;
+ struct sc_info *sc;
+ uint32_t rev;
+ int i, err;
+
+#if 0
+ device_printf(dev, "hdsp_attach()\n");
+#endif
+
+ sc = device_get_softc(dev);
+ sc->lock = snd_mtxcreate(device_get_nameunit(dev),
+ "snd_hdsp softc");
+ sc->dev = dev;
+
+ pci_enable_busmaster(dev);
+ rev = pci_get_revid(dev);
+ switch (rev) {
+ case PCI_REVISION_9632:
+ sc->type = HDSP_9632;
+ chan_map = hdsp_unified_pcm ? chan_map_9632_uni : chan_map_9632;
+ break;
+ case PCI_REVISION_9652:
+ sc->type = HDSP_9652;
+ chan_map = hdsp_unified_pcm ? chan_map_9652_uni : chan_map_9652;
+ break;
+ default:
+ return (ENXIO);
+ }
+
+ /* Allocate resources. */
+ err = hdsp_alloc_resources(sc);
+ if (err) {
+ device_printf(dev, "Unable to allocate system resources.\n");
+ return (ENXIO);
+ }
+
+ if (hdsp_init(sc) != 0)
+ return (ENXIO);
+
+ for (i = 0; i < HDSP_MAX_CHANS && chan_map[i].descr != NULL; i++) {
+ scp = malloc(sizeof(struct sc_pcminfo), M_DEVBUF, M_WAITOK | M_ZERO);
+ scp->hc = &chan_map[i];
+ scp->sc = sc;
+ scp->dev = device_add_child(dev, "pcm", DEVICE_UNIT_ANY);
+ device_set_ivars(scp->dev, scp);
+ }
+
+ hdsp_map_dmabuf(sc);
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "sync_status", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ sc, 0, hdsp_sysctl_sync_status, "A",
+ "List clock source signal lock and sync status");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "clock_source", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ sc, 0, hdsp_sysctl_clock_source, "A",
+ "Currently effective clock source");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "clock_preference", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ sc, 0, hdsp_sysctl_clock_preference, "A",
+ "Set 'internal' (master) or preferred autosync clock source");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "clock_list", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ sc, 0, hdsp_sysctl_clock_list, "A",
+ "List of supported clock sources");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "period", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ sc, 0, hdsp_sysctl_period, "A",
+ "Force period of samples per interrupt (32, 64, ... 4096)");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "sample_rate", CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ sc, 0, hdsp_sysctl_sample_rate, "A",
+ "Force sample rate (32000, 44100, 48000, ... 192000)");
+
+ if (sc->type == HDSP_9632) {
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "phones_level", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ sc, 0, hdsp_sysctl_phones_level, "A",
+ "Phones output level ('0dB', '-6dB', '-12dB')");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "output_level", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ sc, 0, hdsp_sysctl_output_level, "A",
+ "Analog output level ('HighGain', '+4dBU', '-10dBV')");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "input_level", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ sc, 0, hdsp_sysctl_input_level, "A",
+ "Analog input level ('LowGain', '+4dBU', '-10dBV')");
+ }
+
+ bus_attach_children(dev);
+ return (0);
+}
+
+static void
+hdsp_child_deleted(device_t dev, device_t child)
+{
+ free(device_get_ivars(child), M_DEVBUF);
+}
+
+static void
+hdsp_dmafree(struct sc_info *sc)
+{
+
+ bus_dmamap_unload(sc->dmat, sc->rmap);
+ bus_dmamap_unload(sc->dmat, sc->pmap);
+ bus_dmamem_free(sc->dmat, sc->rbuf, sc->rmap);
+ bus_dmamem_free(sc->dmat, sc->pbuf, sc->pmap);
+ sc->rbuf = sc->pbuf = NULL;
+}
+
+static int
+hdsp_detach(device_t dev)
+{
+ struct sc_info *sc;
+ int err;
+
+ sc = device_get_softc(dev);
+ if (sc == NULL) {
+ device_printf(dev,"Can't detach: softc is null.\n");
+ return (0);
+ }
+
+ err = bus_generic_detach(dev);
+ if (err)
+ return (err);
+
+ hdsp_dmafree(sc);
+
+ if (sc->ih)
+ bus_teardown_intr(dev, sc->irq, sc->ih);
+ if (sc->dmat)
+ bus_dma_tag_destroy(sc->dmat);
+ if (sc->irq)
+ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq);
+ if (sc->cs)
+ bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->cs);
+ if (sc->lock)
+ snd_mtxfree(sc->lock);
+
+ return (0);
+}
+
+static device_method_t hdsp_methods[] = {
+ DEVMETHOD(device_probe, hdsp_probe),
+ DEVMETHOD(device_attach, hdsp_attach),
+ DEVMETHOD(device_detach, hdsp_detach),
+ DEVMETHOD(bus_child_deleted, hdsp_child_deleted),
+ { 0, 0 }
+};
+
+static driver_t hdsp_driver = {
+ "hdsp",
+ hdsp_methods,
+ PCM_SOFTC_SIZE,
+};
+
+DRIVER_MODULE(snd_hdsp, pci, hdsp_driver, 0, 0);
diff --git a/sys/dev/sound/pci/hdsp.h b/sys/dev/sound/pci/hdsp.h
new file mode 100644
index 000000000000..8ac438cd79f9
--- /dev/null
+++ b/sys/dev/sound/pci/hdsp.h
@@ -0,0 +1,266 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2012-2016 Ruslan Bukin <br@bsdpad.com>
+ * Copyright (c) 2023-2024 Florian Walpen <dev@submerge.ch>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define PCI_VENDOR_XILINX 0x10ee
+#define PCI_DEVICE_XILINX_HDSP 0x3fc5 /* HDSP 9652 */
+#define PCI_REVISION_9632 0x9b
+#define PCI_REVISION_9652 0x6c
+
+#define HDSP_9632 0
+#define HDSP_9652 1
+
+/* Hardware mixer */
+#define HDSP_OUT_ENABLE_BASE 128
+#define HDSP_IN_ENABLE_BASE 384
+#define HDSP_MIXER_BASE 4096
+#define HDSP_MAX_GAIN 32768
+#define HDSP_MIN_GAIN 0
+#define HDSP_MIX_SLOTS_9632 16
+#define HDSP_MIX_SLOTS_9652 26
+#define HDSP_CONTROL2_9652_MIXER (1 << 11)
+
+/* Buffer */
+#define HDSP_PAGE_ADDR_BUF_OUT 32
+#define HDSP_PAGE_ADDR_BUF_IN 36
+#define HDSP_BUF_POSITION_MASK 0x000FFC0
+
+/* Frequency */
+#define HDSP_FREQ_0 (1 << 6)
+#define HDSP_FREQ_1 (1 << 7)
+#define HDSP_FREQ_DOUBLE (1 << 8)
+#define HDSP_FREQ_QUAD (1 << 31)
+
+#define HDSP_FREQ_32000 HDSP_FREQ_0
+#define HDSP_FREQ_44100 HDSP_FREQ_1
+#define HDSP_FREQ_48000 (HDSP_FREQ_0 | HDSP_FREQ_1)
+#define HDSP_FREQ_MASK (HDSP_FREQ_0 | HDSP_FREQ_1 | \
+ HDSP_FREQ_DOUBLE | HDSP_FREQ_QUAD)
+#define HDSP_FREQ_MASK_DEFAULT HDSP_FREQ_48000
+#define HDSP_FREQ_REG 0
+#define HDSP_FREQ_9632 104857600000000ULL
+#define hdsp_freq_multiplier(s) (((s) > 96000) ? 4 : \
+ (((s) > 48000) ? 2 : 1))
+#define hdsp_freq_single(s) ((s) / hdsp_freq_multiplier(s))
+#define hdsp_freq_reg_value(s) (HDSP_FREQ_9632 / hdsp_freq_single(s))
+
+#define HDSP_SPEED_DEFAULT 48000
+
+/* Latency */
+#define HDSP_LAT_0 (1 << 1)
+#define HDSP_LAT_1 (1 << 2)
+#define HDSP_LAT_2 (1 << 3)
+#define HDSP_LAT_MASK (HDSP_LAT_0 | HDSP_LAT_1 | HDSP_LAT_2)
+#define HDSP_LAT_BYTES_MAX (4096 * 4)
+#define HDSP_LAT_BYTES_MIN (32 * 4)
+#define hdsp_encode_latency(x) (((x)<<1) & HDSP_LAT_MASK)
+
+/* Register addresses */
+#define HDSP_RESET_POINTER 0
+#define HDSP_CONTROL_REG 64
+#define HDSP_CONTROL2_REG 256
+#define HDSP_STATUS_REG 0
+#define HDSP_STATUS2_REG 192
+
+/* Control register flags */
+#define HDSP_ENABLE (1 << 0)
+#define HDSP_CONTROL_SPDIF_COAX (1 << 14)
+#define HDSP_CONTROL_LINE_OUT (1 << 24)
+#define HDSP_CONTROL_INPUT_GAIN0 (1 << 25)
+#define HDSP_CONTROL_INPUT_GAIN1 (1 << 26)
+#define HDSP_CONTROL_OUTPUT_GAIN0 (1 << 27)
+#define HDSP_CONTROL_OUTPUT_GAIN1 (1 << 28)
+#define HDSP_CONTROL_PHONES_GAIN0 (1 << 29)
+#define HDSP_CONTROL_PHONES_GAIN1 (1 << 30)
+
+/* Analog input gain level */
+#define HDSP_INPUT_LEVEL_MASK (HDSP_CONTROL_INPUT_GAIN0 | \
+ HDSP_CONTROL_INPUT_GAIN1)
+#define HDSP_INPUT_LEVEL_LOWGAIN 0
+#define HDSP_INPUT_LEVEL_PLUS4DBU (HDSP_CONTROL_INPUT_GAIN0)
+#define HDSP_INPUT_LEVEL_MINUS10DBV (HDSP_CONTROL_INPUT_GAIN0 | \
+ HDSP_CONTROL_INPUT_GAIN1)
+
+/* Analog output gain level */
+#define HDSP_OUTPUT_LEVEL_MASK (HDSP_CONTROL_OUTPUT_GAIN0 | \
+ HDSP_CONTROL_OUTPUT_GAIN1)
+#define HDSP_OUTPUT_LEVEL_MINUS10DBV 0
+#define HDSP_OUTPUT_LEVEL_PLUS4DBU (HDSP_CONTROL_OUTPUT_GAIN0)
+#define HDSP_OUTPUT_LEVEL_HIGHGAIN (HDSP_CONTROL_OUTPUT_GAIN0 | \
+ HDSP_CONTROL_OUTPUT_GAIN1)
+
+/* Phones output gain level */
+#define HDSP_PHONES_LEVEL_MASK (HDSP_CONTROL_PHONES_GAIN0 | \
+ HDSP_CONTROL_PHONES_GAIN1)
+#define HDSP_PHONES_LEVEL_MINUS12DB 0
+#define HDSP_PHONES_LEVEL_MINUS6DB (HDSP_CONTROL_PHONES_GAIN0)
+#define HDSP_PHONES_LEVEL_0DB (HDSP_CONTROL_PHONES_GAIN0 | \
+ HDSP_CONTROL_PHONES_GAIN1)
+
+/* Interrupts */
+#define HDSP_AUDIO_IRQ_PENDING (1 << 0)
+#define HDSP_AUDIO_INT_ENABLE (1 << 5)
+#define HDSP_INTERRUPT_ACK 96
+
+/* Channels */
+#define HDSP_MAX_SLOTS 64 /* Mono channels */
+#define HDSP_MAX_CHANS (HDSP_MAX_SLOTS / 2) /* Stereo pairs */
+
+#define HDSP_CHANBUF_SAMPLES (16 * 1024)
+#define HDSP_CHANBUF_SIZE (4 * HDSP_CHANBUF_SAMPLES)
+#define HDSP_DMASEGSIZE (HDSP_CHANBUF_SIZE * HDSP_MAX_SLOTS)
+
+#define HDSP_CHAN_9632_ADAT (1 << 0)
+#define HDSP_CHAN_9632_SPDIF (1 << 1)
+#define HDSP_CHAN_9632_LINE (1 << 2)
+#define HDSP_CHAN_9632_EXT (1 << 3) /* Extension boards */
+#define HDSP_CHAN_9632_ALL (HDSP_CHAN_9632_ADAT | \
+ HDSP_CHAN_9632_SPDIF | \
+ HDSP_CHAN_9632_LINE | \
+ HDSP_CHAN_9632_EXT)
+
+#define HDSP_CHAN_9652_ADAT1 (1 << 5)
+#define HDSP_CHAN_9652_ADAT2 (1 << 6)
+#define HDSP_CHAN_9652_ADAT3 (1 << 7)
+#define HDSP_CHAN_9652_ADAT_ALL (HDSP_CHAN_9652_ADAT1 | \
+ HDSP_CHAN_9652_ADAT2 | \
+ HDSP_CHAN_9652_ADAT3)
+#define HDSP_CHAN_9652_SPDIF (1 << 8)
+#define HDSP_CHAN_9652_ALL (HDSP_CHAN_9652_ADAT_ALL | \
+ HDSP_CHAN_9652_SPDIF)
+
+struct hdsp_channel {
+ uint32_t ports;
+ char *descr;
+};
+
+enum hdsp_clock_type {
+ HDSP_CLOCK_INTERNAL,
+ HDSP_CLOCK_ADAT1,
+ HDSP_CLOCK_ADAT2,
+ HDSP_CLOCK_ADAT3,
+ HDSP_CLOCK_SPDIF,
+ HDSP_CLOCK_WORD,
+ HDSP_CLOCK_ADAT_SYNC
+};
+
+/* Preferred clock source. */
+#define HDSP_CONTROL_MASTER (1 << 4)
+#define HDSP_CONTROL_CLOCK_MASK (HDSP_CONTROL_MASTER | (1 << 13) | \
+ (1 << 16) | (1 << 17))
+#define HDSP_CONTROL_CLOCK(n) (((n & 0x04) << 11) | ((n & 0x03) << 16))
+
+/* Autosync selected clock source. */
+#define HDSP_STATUS2_CLOCK(n) ((n & 0x07) << 8)
+#define HDSP_STATUS2_CLOCK_MASK HDSP_STATUS2_CLOCK(0x07);
+
+struct hdsp_clock_source {
+ char *name;
+ enum hdsp_clock_type type;
+};
+
+static MALLOC_DEFINE(M_HDSP, "hdsp", "hdsp audio");
+
+/* Channel registers */
+struct sc_chinfo {
+ struct snd_dbuf *buffer;
+ struct pcm_channel *channel;
+ struct sc_pcminfo *parent;
+
+ /* Channel information */
+ struct pcmchan_caps *caps;
+ uint32_t cap_fmts[4];
+ uint32_t dir;
+ uint32_t format;
+ uint32_t ports;
+ uint32_t lvol;
+ uint32_t rvol;
+
+ /* Buffer */
+ uint32_t *data;
+ uint32_t size;
+ uint32_t position;
+
+ /* Flags */
+ uint32_t run;
+};
+
+/* PCM device private data */
+struct sc_pcminfo {
+ device_t dev;
+ uint32_t (*ih) (struct sc_pcminfo *scp);
+ uint32_t chnum;
+ struct sc_chinfo chan[HDSP_MAX_CHANS];
+ struct sc_info *sc;
+ struct hdsp_channel *hc;
+};
+
+/* HDSP device private data */
+struct sc_info {
+ device_t dev;
+ struct mtx *lock;
+
+ uint32_t ctrl_register;
+ uint32_t type;
+
+ /* Control/Status register */
+ struct resource *cs;
+ int csid;
+ bus_space_tag_t cst;
+ bus_space_handle_t csh;
+
+ struct resource *irq;
+ int irqid;
+ void *ih;
+ bus_dma_tag_t dmat;
+
+ /* Play/Record DMA buffers */
+ uint32_t *pbuf;
+ uint32_t *rbuf;
+ uint32_t bufsize;
+ bus_dmamap_t pmap;
+ bus_dmamap_t rmap;
+ uint32_t period;
+ uint32_t speed;
+ uint32_t force_period;
+ uint32_t force_speed;
+};
+
+#define hdsp_read_1(sc, regno) \
+ bus_space_read_1((sc)->cst, (sc)->csh, (regno))
+#define hdsp_read_2(sc, regno) \
+ bus_space_read_2((sc)->cst, (sc)->csh, (regno))
+#define hdsp_read_4(sc, regno) \
+ bus_space_read_4((sc)->cst, (sc)->csh, (regno))
+
+#define hdsp_write_1(sc, regno, data) \
+ bus_space_write_1((sc)->cst, (sc)->csh, (regno), (data))
+#define hdsp_write_2(sc, regno, data) \
+ bus_space_write_2((sc)->cst, (sc)->csh, (regno), (data))
+#define hdsp_write_4(sc, regno, data) \
+ bus_space_write_4((sc)->cst, (sc)->csh, (regno), (data))
diff --git a/sys/dev/sound/pci/hdspe-pcm.c b/sys/dev/sound/pci/hdspe-pcm.c
index 0e78be113a66..09bbbe22dacf 100644
--- a/sys/dev/sound/pci/hdspe-pcm.c
+++ b/sys/dev/sound/pci/hdspe-pcm.c
@@ -34,7 +34,6 @@
#include <dev/sound/pcm/sound.h>
#include <dev/sound/pci/hdspe.h>
-#include <dev/sound/chip.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
@@ -115,10 +114,8 @@ hdspe_port_first_row(uint32_t ports)
uint32_t ends;
/* Restrict ports to one set with contiguous slots. */
- if (ports & HDSPE_CHAN_AIO_LINE)
- ports = HDSPE_CHAN_AIO_LINE; /* Gap in the AIO slots here. */
- else if (ports & HDSPE_CHAN_AIO_ALL)
- ports &= HDSPE_CHAN_AIO_ALL; /* Rest of the AIO slots. */
+ if (ports & HDSPE_CHAN_AIO_ALL)
+ ports &= HDSPE_CHAN_AIO_ALL; /* All AIO slots. */
else if (ports & HDSPE_CHAN_RAY_ALL)
ports &= HDSPE_CHAN_RAY_ALL; /* All RayDAT slots. */
@@ -137,6 +134,8 @@ hdspe_channel_count(uint32_t ports, uint32_t adat_width)
/* AIO ports. */
if (ports & HDSPE_CHAN_AIO_LINE)
count += 2;
+ if (ports & HDSPE_CHAN_AIO_EXT)
+ count += 4;
if (ports & HDSPE_CHAN_AIO_PHONE)
count += 2;
if (ports & HDSPE_CHAN_AIO_AES)
@@ -190,6 +189,8 @@ hdspe_port_slot_offset(uint32_t port, unsigned int adat_width)
/* AIO ports */
case HDSPE_CHAN_AIO_LINE:
return (0);
+ case HDSPE_CHAN_AIO_EXT:
+ return (2);
case HDSPE_CHAN_AIO_PHONE:
return (6);
case HDSPE_CHAN_AIO_AES:
@@ -651,6 +652,35 @@ clean(struct sc_chinfo *ch)
}
/* Channel interface. */
+static int
+hdspechan_free(kobj_t obj, void *data)
+{
+ struct sc_pcminfo *scp;
+ struct sc_chinfo *ch;
+ struct sc_info *sc;
+
+ ch = data;
+ scp = ch->parent;
+ sc = scp->sc;
+
+#if 0
+ device_printf(scp->dev, "hdspechan_free()\n");
+#endif
+
+ snd_mtxlock(sc->lock);
+ if (ch->data != NULL) {
+ free(ch->data, M_HDSPE);
+ ch->data = NULL;
+ }
+ if (ch->caps != NULL) {
+ free(ch->caps, M_HDSPE);
+ ch->caps = NULL;
+ }
+ snd_mtxunlock(sc->lock);
+
+ return (0);
+}
+
static void *
hdspechan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b,
struct pcm_channel *c, int dir)
@@ -703,6 +733,7 @@ hdspechan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b,
if (sndbuf_setup(ch->buffer, ch->data, ch->size) != 0) {
device_printf(scp->dev, "Can't setup sndbuf.\n");
+ hdspechan_free(obj, ch);
return (NULL);
}
@@ -776,35 +807,6 @@ hdspechan_getptr(kobj_t obj, void *data)
}
static int
-hdspechan_free(kobj_t obj, void *data)
-{
- struct sc_pcminfo *scp;
- struct sc_chinfo *ch;
- struct sc_info *sc;
-
- ch = data;
- scp = ch->parent;
- sc = scp->sc;
-
-#if 0
- device_printf(scp->dev, "hdspechan_free()\n");
-#endif
-
- snd_mtxlock(sc->lock);
- if (ch->data != NULL) {
- free(ch->data, M_HDSPE);
- ch->data = NULL;
- }
- if (ch->caps != NULL) {
- free(ch->caps, M_HDSPE);
- ch->caps = NULL;
- }
- snd_mtxunlock(sc->lock);
-
- return (0);
-}
-
-static int
hdspechan_setformat(kobj_t obj, void *data, uint32_t format)
{
struct sc_chinfo *ch;
@@ -1062,13 +1064,10 @@ hdspe_pcm_attach(device_t dev)
pcm_flags |= SD_F_BITPERFECT;
pcm_setflags(dev, pcm_flags);
+ pcm_init(dev, scp);
+
play = (hdspe_channel_play_ports(scp->hc)) ? 1 : 0;
rec = (hdspe_channel_rec_ports(scp->hc)) ? 1 : 0;
- err = pcm_register(dev, scp, play, rec);
- if (err) {
- device_printf(dev, "Can't register pcm.\n");
- return (ENXIO);
- }
scp->chnum = 0;
if (play) {
@@ -1085,7 +1084,11 @@ hdspe_pcm_attach(device_t dev)
rman_get_start(scp->sc->cs),
rman_get_start(scp->sc->irq),
device_get_nameunit(device_get_parent(dev)));
- pcm_setstatus(dev, status);
+ err = pcm_register(dev, status);
+ if (err) {
+ device_printf(dev, "Can't register pcm.\n");
+ return (ENXIO);
+ }
mixer_init(dev, &hdspemixer_class, scp);
diff --git a/sys/dev/sound/pci/hdspe.c b/sys/dev/sound/pci/hdspe.c
index f9c36df359e3..c292b2ddef56 100644
--- a/sys/dev/sound/pci/hdspe.c
+++ b/sys/dev/sound/pci/hdspe.c
@@ -37,7 +37,6 @@
#include <dev/sound/pcm/sound.h>
#include <dev/sound/pci/hdspe.h>
-#include <dev/sound/chip.h>
#include <dev/pci/pcireg.h>
#include <dev/pci/pcivar.h>
@@ -79,6 +78,7 @@ static struct hdspe_clock_source hdspe_clock_source_table_aio[] = {
static struct hdspe_channel chan_map_aio[] = {
{ HDSPE_CHAN_AIO_LINE, "line" },
+ { HDSPE_CHAN_AIO_EXT, "ext" },
{ HDSPE_CHAN_AIO_PHONE, "phone" },
{ HDSPE_CHAN_AIO_AES, "aes" },
{ HDSPE_CHAN_AIO_SPDIF, "s/pdif" },
@@ -246,6 +246,198 @@ hdspe_map_dmabuf(struct sc_info *sc)
}
}
+static const char *
+hdspe_settings_input_level(uint32_t settings)
+{
+ switch (settings & HDSPE_INPUT_LEVEL_MASK) {
+ case HDSPE_INPUT_LEVEL_LOWGAIN:
+ return ("LowGain");
+ case HDSPE_INPUT_LEVEL_PLUS4DBU:
+ return ("+4dBu");
+ case HDSPE_INPUT_LEVEL_MINUS10DBV:
+ return ("-10dBV");
+ default:
+ return (NULL);
+ }
+}
+
+static int
+hdspe_sysctl_input_level(SYSCTL_HANDLER_ARGS)
+{
+ struct sc_info *sc;
+ const char *label;
+ char buf[16] = "invalid";
+ int error;
+ uint32_t settings;
+
+ sc = oidp->oid_arg1;
+
+ /* Only available on HDSPE AIO. */
+ if (sc->type != HDSPE_AIO)
+ return (ENXIO);
+
+ /* Extract current input level from settings register. */
+ settings = sc->settings_register & HDSPE_INPUT_LEVEL_MASK;
+ label = hdspe_settings_input_level(settings);
+ if (label != NULL)
+ strlcpy(buf, label, sizeof(buf));
+
+ /* Process sysctl string request. */
+ error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ /* Find input level matching the sysctl string. */
+ label = hdspe_settings_input_level(HDSPE_INPUT_LEVEL_LOWGAIN);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ settings = HDSPE_INPUT_LEVEL_LOWGAIN;
+ label = hdspe_settings_input_level(HDSPE_INPUT_LEVEL_PLUS4DBU);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ settings = HDSPE_INPUT_LEVEL_PLUS4DBU;
+ label = hdspe_settings_input_level(HDSPE_INPUT_LEVEL_MINUS10DBV);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ settings = HDSPE_INPUT_LEVEL_MINUS10DBV;
+
+ /* Set input level in settings register. */
+ settings &= HDSPE_INPUT_LEVEL_MASK;
+ if (settings != (sc->settings_register & HDSPE_INPUT_LEVEL_MASK)) {
+ snd_mtxlock(sc->lock);
+ sc->settings_register &= ~HDSPE_INPUT_LEVEL_MASK;
+ sc->settings_register |= settings;
+ hdspe_write_4(sc, HDSPE_SETTINGS_REG, sc->settings_register);
+ snd_mtxunlock(sc->lock);
+ }
+ return (0);
+}
+
+static const char *
+hdspe_settings_output_level(uint32_t settings)
+{
+ switch (settings & HDSPE_OUTPUT_LEVEL_MASK) {
+ case HDSPE_OUTPUT_LEVEL_HIGHGAIN:
+ return ("HighGain");
+ case HDSPE_OUTPUT_LEVEL_PLUS4DBU:
+ return ("+4dBu");
+ case HDSPE_OUTPUT_LEVEL_MINUS10DBV:
+ return ("-10dBV");
+ default:
+ return (NULL);
+ }
+}
+
+static int
+hdspe_sysctl_output_level(SYSCTL_HANDLER_ARGS)
+{
+ struct sc_info *sc;
+ const char *label;
+ char buf[16] = "invalid";
+ int error;
+ uint32_t settings;
+
+ sc = oidp->oid_arg1;
+
+ /* Only available on HDSPE AIO. */
+ if (sc->type != HDSPE_AIO)
+ return (ENXIO);
+
+ /* Extract current output level from settings register. */
+ settings = sc->settings_register & HDSPE_OUTPUT_LEVEL_MASK;
+ label = hdspe_settings_output_level(settings);
+ if (label != NULL)
+ strlcpy(buf, label, sizeof(buf));
+
+ /* Process sysctl string request. */
+ error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ /* Find output level matching the sysctl string. */
+ label = hdspe_settings_output_level(HDSPE_OUTPUT_LEVEL_HIGHGAIN);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ settings = HDSPE_OUTPUT_LEVEL_HIGHGAIN;
+ label = hdspe_settings_output_level(HDSPE_OUTPUT_LEVEL_PLUS4DBU);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ settings = HDSPE_OUTPUT_LEVEL_PLUS4DBU;
+ label = hdspe_settings_output_level(HDSPE_OUTPUT_LEVEL_MINUS10DBV);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ settings = HDSPE_OUTPUT_LEVEL_MINUS10DBV;
+
+ /* Set output level in settings register. */
+ settings &= HDSPE_OUTPUT_LEVEL_MASK;
+ if (settings != (sc->settings_register & HDSPE_OUTPUT_LEVEL_MASK)) {
+ snd_mtxlock(sc->lock);
+ sc->settings_register &= ~HDSPE_OUTPUT_LEVEL_MASK;
+ sc->settings_register |= settings;
+ hdspe_write_4(sc, HDSPE_SETTINGS_REG, sc->settings_register);
+ snd_mtxunlock(sc->lock);
+ }
+ return (0);
+}
+
+static const char *
+hdspe_settings_phones_level(uint32_t settings)
+{
+ switch (settings & HDSPE_PHONES_LEVEL_MASK) {
+ case HDSPE_PHONES_LEVEL_HIGHGAIN:
+ return ("HighGain");
+ case HDSPE_PHONES_LEVEL_PLUS4DBU:
+ return ("+4dBu");
+ case HDSPE_PHONES_LEVEL_MINUS10DBV:
+ return ("-10dBV");
+ default:
+ return (NULL);
+ }
+}
+
+static int
+hdspe_sysctl_phones_level(SYSCTL_HANDLER_ARGS)
+{
+ struct sc_info *sc;
+ const char *label;
+ char buf[16] = "invalid";
+ int error;
+ uint32_t settings;
+
+ sc = oidp->oid_arg1;
+
+ /* Only available on HDSPE AIO. */
+ if (sc->type != HDSPE_AIO)
+ return (ENXIO);
+
+ /* Extract current phones level from settings register. */
+ settings = sc->settings_register & HDSPE_PHONES_LEVEL_MASK;
+ label = hdspe_settings_phones_level(settings);
+ if (label != NULL)
+ strlcpy(buf, label, sizeof(buf));
+
+ /* Process sysctl string request. */
+ error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ /* Find phones level matching the sysctl string. */
+ label = hdspe_settings_phones_level(HDSPE_PHONES_LEVEL_HIGHGAIN);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ settings = HDSPE_PHONES_LEVEL_HIGHGAIN;
+ label = hdspe_settings_phones_level(HDSPE_PHONES_LEVEL_PLUS4DBU);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ settings = HDSPE_PHONES_LEVEL_PLUS4DBU;
+ label = hdspe_settings_phones_level(HDSPE_PHONES_LEVEL_MINUS10DBV);
+ if (strncasecmp(buf, label, sizeof(buf)) == 0)
+ settings = HDSPE_PHONES_LEVEL_MINUS10DBV;
+
+ /* Set phones level in settings register. */
+ settings &= HDSPE_PHONES_LEVEL_MASK;
+ if (settings != (sc->settings_register & HDSPE_PHONES_LEVEL_MASK)) {
+ snd_mtxlock(sc->lock);
+ sc->settings_register &= ~HDSPE_PHONES_LEVEL_MASK;
+ sc->settings_register |= settings;
+ hdspe_write_4(sc, HDSPE_SETTINGS_REG, sc->settings_register);
+ snd_mtxunlock(sc->lock);
+ }
+ return (0);
+}
+
static int
hdspe_sysctl_sample_rate(SYSCTL_HANDLER_ARGS)
{
@@ -529,6 +721,15 @@ hdspe_init(struct sc_info *sc)
/* Other settings. */
sc->settings_register = 0;
+
+ /* Default gain levels. */
+ sc->settings_register &= ~HDSPE_INPUT_LEVEL_MASK;
+ sc->settings_register |= HDSPE_INPUT_LEVEL_LOWGAIN;
+ sc->settings_register &= ~HDSPE_OUTPUT_LEVEL_MASK;
+ sc->settings_register |= HDSPE_OUTPUT_LEVEL_MINUS10DBV;
+ sc->settings_register &= ~HDSPE_PHONES_LEVEL_MASK;
+ sc->settings_register |= HDSPE_PHONES_LEVEL_MINUS10DBV;
+
hdspe_write_4(sc, HDSPE_SETTINGS_REG, sc->settings_register);
return (0);
@@ -578,10 +779,10 @@ hdspe_attach(device_t dev)
return (ENXIO);
for (i = 0; i < HDSPE_MAX_CHANS && chan_map[i].descr != NULL; i++) {
- scp = malloc(sizeof(struct sc_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO);
+ scp = malloc(sizeof(struct sc_pcminfo), M_DEVBUF, M_WAITOK | M_ZERO);
scp->hc = &chan_map[i];
scp->sc = sc;
- scp->dev = device_add_child(dev, "pcm", -1);
+ scp->dev = device_add_child(dev, "pcm", DEVICE_UNIT_ANY);
device_set_ivars(scp->dev, scp);
}
@@ -623,7 +824,34 @@ hdspe_attach(device_t dev)
sc, 0, hdspe_sysctl_sample_rate, "A",
"Force sample rate (32000, 44100, 48000, ... 192000)");
- return (bus_generic_attach(dev));
+ if (sc->type == HDSPE_AIO) {
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "phones_level", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ sc, 0, hdspe_sysctl_phones_level, "A",
+ "Phones output level ('HighGain', '+4dBU', '-10dBV')");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "output_level", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ sc, 0, hdspe_sysctl_output_level, "A",
+ "Analog output level ('HighGain', '+4dBU', '-10dBV')");
+
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "input_level", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
+ sc, 0, hdspe_sysctl_input_level, "A",
+ "Analog input level ('LowGain', '+4dBU', '-10dBV')");
+ }
+
+ bus_attach_children(dev);
+ return (0);
+}
+
+static void
+hdspe_child_deleted(device_t dev, device_t child)
+{
+ free(device_get_ivars(child), M_DEVBUF);
}
static void
@@ -649,7 +877,7 @@ hdspe_detach(device_t dev)
return (0);
}
- err = device_delete_children(dev);
+ err = bus_generic_detach(dev);
if (err)
return (err);
@@ -673,6 +901,7 @@ static device_method_t hdspe_methods[] = {
DEVMETHOD(device_probe, hdspe_probe),
DEVMETHOD(device_attach, hdspe_attach),
DEVMETHOD(device_detach, hdspe_detach),
+ DEVMETHOD(bus_child_deleted, hdspe_child_deleted),
{ 0, 0 }
};
diff --git a/sys/dev/sound/pci/hdspe.h b/sys/dev/sound/pci/hdspe.h
index daffeb4ddebc..bced78758068 100644
--- a/sys/dev/sound/pci/hdspe.h
+++ b/sys/dev/sound/pci/hdspe.h
@@ -74,35 +74,43 @@
#define HDSPE_LAT_BYTES_MIN (32 * 4)
#define hdspe_encode_latency(x) (((x)<<1) & HDSPE_LAT_MASK)
-/* Gain */
-#define HDSP_ADGain0 (1 << 25)
-#define HDSP_ADGain1 (1 << 26)
-#define HDSP_DAGain0 (1 << 27)
-#define HDSP_DAGain1 (1 << 28)
-#define HDSP_PhoneGain0 (1 << 29)
-#define HDSP_PhoneGain1 (1 << 30)
-
-#define HDSP_ADGainMask (HDSP_ADGain0 | HDSP_ADGain1)
-#define HDSP_ADGainMinus10dBV (HDSP_ADGainMask)
-#define HDSP_ADGainPlus4dBu (HDSP_ADGain0)
-#define HDSP_ADGainLowGain 0
-
-#define HDSP_DAGainMask (HDSP_DAGain0 | HDSP_DAGain1)
-#define HDSP_DAGainHighGain (HDSP_DAGainMask)
-#define HDSP_DAGainPlus4dBu (HDSP_DAGain0)
-#define HDSP_DAGainMinus10dBV 0
-
-#define HDSP_PhoneGainMask (HDSP_PhoneGain0|HDSP_PhoneGain1)
-#define HDSP_PhoneGain0dB HDSP_PhoneGainMask
-#define HDSP_PhoneGainMinus6dB (HDSP_PhoneGain0)
-#define HDSP_PhoneGainMinus12dB 0
-
-/* Settings */
+/* Register addresses */
#define HDSPE_SETTINGS_REG 0
#define HDSPE_CONTROL_REG 64
#define HDSPE_STATUS_REG 0
#define HDSPE_STATUS1_REG 64
#define HDSPE_STATUS2_REG 192
+
+/* Settings register flags */
+#define HDSPE_SETTINGS_INPUT_GAIN0 (1 << 20)
+#define HDSPE_SETTINGS_INPUT_GAIN1 (1 << 21)
+#define HDSPE_SETTINGS_OUTPUT_GAIN0 (1 << 22)
+#define HDSPE_SETTINGS_OUTPUT_GAIN1 (1 << 23)
+#define HDSPE_SETTINGS_PHONES_GAIN0 (1 << 24)
+#define HDSPE_SETTINGS_PHONES_GAIN1 (1 << 25)
+
+/* Analog input gain level */
+#define HDSPE_INPUT_LEVEL_MASK (HDSPE_SETTINGS_INPUT_GAIN0 | \
+ HDSPE_SETTINGS_INPUT_GAIN1)
+#define HDSPE_INPUT_LEVEL_LOWGAIN 0
+#define HDSPE_INPUT_LEVEL_PLUS4DBU (HDSPE_SETTINGS_INPUT_GAIN0)
+#define HDSPE_INPUT_LEVEL_MINUS10DBV (HDSPE_SETTINGS_INPUT_GAIN1)
+
+/* Analog output gain level */
+#define HDSPE_OUTPUT_LEVEL_MASK (HDSPE_SETTINGS_OUTPUT_GAIN0 | \
+ HDSPE_SETTINGS_OUTPUT_GAIN1)
+#define HDSPE_OUTPUT_LEVEL_HIGHGAIN 0
+#define HDSPE_OUTPUT_LEVEL_PLUS4DBU (HDSPE_SETTINGS_OUTPUT_GAIN0)
+#define HDSPE_OUTPUT_LEVEL_MINUS10DBV (HDSPE_SETTINGS_OUTPUT_GAIN1)
+
+/* Phones output gain level */
+#define HDSPE_PHONES_LEVEL_MASK (HDSPE_SETTINGS_PHONES_GAIN0 | \
+ HDSPE_SETTINGS_PHONES_GAIN1)
+#define HDSPE_PHONES_LEVEL_HIGHGAIN 0
+#define HDSPE_PHONES_LEVEL_PLUS4DBU (HDSPE_SETTINGS_PHONES_GAIN0)
+#define HDSPE_PHONES_LEVEL_MINUS10DBV (HDSPE_SETTINGS_PHONES_GAIN1)
+
+/* Control register flags */
#define HDSPE_ENABLE (1 << 0)
/* Interrupts */
@@ -119,23 +127,25 @@
#define HDSPE_DMASEGSIZE (HDSPE_CHANBUF_SIZE * HDSPE_MAX_SLOTS)
#define HDSPE_CHAN_AIO_LINE (1 << 0)
-#define HDSPE_CHAN_AIO_PHONE (1 << 1)
-#define HDSPE_CHAN_AIO_AES (1 << 2)
-#define HDSPE_CHAN_AIO_SPDIF (1 << 3)
-#define HDSPE_CHAN_AIO_ADAT (1 << 4)
+#define HDSPE_CHAN_AIO_EXT (1 << 1)
+#define HDSPE_CHAN_AIO_PHONE (1 << 2)
+#define HDSPE_CHAN_AIO_AES (1 << 3)
+#define HDSPE_CHAN_AIO_SPDIF (1 << 4)
+#define HDSPE_CHAN_AIO_ADAT (1 << 5)
#define HDSPE_CHAN_AIO_ALL_REC (HDSPE_CHAN_AIO_LINE | \
+ HDSPE_CHAN_AIO_EXT | \
HDSPE_CHAN_AIO_AES | \
HDSPE_CHAN_AIO_SPDIF | \
HDSPE_CHAN_AIO_ADAT)
#define HDSPE_CHAN_AIO_ALL (HDSPE_CHAN_AIO_ALL_REC | \
HDSPE_CHAN_AIO_PHONE) \
-#define HDSPE_CHAN_RAY_AES (1 << 5)
-#define HDSPE_CHAN_RAY_SPDIF (1 << 6)
-#define HDSPE_CHAN_RAY_ADAT1 (1 << 7)
-#define HDSPE_CHAN_RAY_ADAT2 (1 << 8)
-#define HDSPE_CHAN_RAY_ADAT3 (1 << 9)
-#define HDSPE_CHAN_RAY_ADAT4 (1 << 10)
+#define HDSPE_CHAN_RAY_AES (1 << 6)
+#define HDSPE_CHAN_RAY_SPDIF (1 << 7)
+#define HDSPE_CHAN_RAY_ADAT1 (1 << 8)
+#define HDSPE_CHAN_RAY_ADAT2 (1 << 9)
+#define HDSPE_CHAN_RAY_ADAT3 (1 << 10)
+#define HDSPE_CHAN_RAY_ADAT4 (1 << 11)
#define HDSPE_CHAN_RAY_ALL (HDSPE_CHAN_RAY_AES | \
HDSPE_CHAN_RAY_SPDIF | \
HDSPE_CHAN_RAY_ADAT1 | \
diff --git a/sys/dev/sound/pci/ich.c b/sys/dev/sound/pci/ich.c
index fbde0accfd28..500d6d95daac 100644
--- a/sys/dev/sound/pci/ich.c
+++ b/sys/dev/sound/pci/ich.c
@@ -695,7 +695,7 @@ ich_setstatus(struct sc_info *sc)
device_printf(sc->dev,
"PCI Master abort workaround enabled\n");
- pcm_setstatus(sc->dev, status);
+ pcm_register(sc->dev, status);
}
/* -------------------------------------------------------------------- */
@@ -860,12 +860,12 @@ ich_init(struct sc_info *sc)
static int
ich_pci_probe(device_t dev)
{
- int i;
+ size_t i;
uint16_t devid, vendor;
vendor = pci_get_vendor(dev);
devid = pci_get_device(dev);
- for (i = 0; i < sizeof(ich_devs)/sizeof(ich_devs[0]); i++) {
+ for (i = 0; i < nitems(ich_devs); i++) {
if (vendor == ich_devs[i].vendor &&
devid == ich_devs[i].devid) {
device_set_desc(dev, ich_devs[i].name);
@@ -1066,8 +1066,7 @@ ich_pci_attach(device_t dev)
ich_setmap, sc, 0))
goto bad;
- if (pcm_register(dev, sc, 1, (sc->hasmic) ? 2 : 1))
- goto bad;
+ pcm_init(dev, sc);
pcm_addchan(dev, PCMDIR_PLAY, &ichchan_class, sc); /* play */
pcm_addchan(dev, PCMDIR_REC, &ichchan_class, sc); /* record */
diff --git a/sys/dev/sound/pci/maestro3.c b/sys/dev/sound/pci/maestro3.c
index 6dd54a66f683..2d102fcd6dbe 100644
--- a/sys/dev/sound/pci/maestro3.c
+++ b/sys/dev/sound/pci/maestro3.c
@@ -488,7 +488,7 @@ m3_pchan_init(kobj_t kobj, void *devinfo, struct snd_dbuf *b, struct pcm_channel
DMAC_BLOCKF_SELECTOR);
/* set an armload of static initializers */
- for(i = 0 ; i < (sizeof(pv) / sizeof(pv[0])) ; i++) {
+ for(i = 0 ; i < nitems(pv); i++) {
m3_wr_assp_data(sc, ch->dac_data + pv[i].addr, pv[i].val);
}
@@ -862,7 +862,7 @@ m3_rchan_init(kobj_t kobj, void *devinfo, struct snd_dbuf *b, struct pcm_channel
DMAC_PAGE3_SELECTOR + DMAC_BLOCKF_SELECTOR);
/* set an armload of static initializers */
- for(i = 0 ; i < (sizeof(rv) / sizeof(rv[0])) ; i++) {
+ for(i = 0 ; i < nitems(rv); i++) {
m3_wr_assp_data(sc, ch->adc_data + rv[i].addr, rv[i].val);
}
@@ -1423,10 +1423,7 @@ m3_pci_attach(device_t dev)
m3_enable_ints(sc);
- if (pcm_register(dev, sc, dacn, adcn)) {
- device_printf(dev, "pcm_register error\n");
- goto bad;
- }
+ pcm_init(dev, sc);
for (i=0 ; i<dacn ; i++) {
if (pcm_addchan(dev, PCMDIR_PLAY, &m3_pch_class, sc)) {
device_printf(dev, "pcm_addchan (play) error\n");
@@ -1443,8 +1440,8 @@ m3_pci_attach(device_t dev)
(sc->regtype == SYS_RES_IOPORT)? "port" : "mem",
rman_get_start(sc->reg), rman_get_start(sc->irq),
device_get_nameunit(device_get_parent(dev)));
- if (pcm_setstatus(dev, status)) {
- device_printf(dev, "attach: pcm_setstatus error\n");
+ if (pcm_register(dev, status)) {
+ device_printf(dev, "pcm_register error\n");
goto bad;
}
@@ -1453,7 +1450,7 @@ m3_pci_attach(device_t dev)
/* Create the buffer for saving the card state during suspend */
len = sizeof(u_int16_t) * (REV_B_CODE_MEMORY_LENGTH +
REV_B_DATA_MEMORY_LENGTH);
- sc->savemem = (u_int16_t*)malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
+ sc->savemem = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
return 0;
diff --git a/sys/dev/sound/pci/neomagic.c b/sys/dev/sound/pci/neomagic.c
index 25273633ff18..d7824c990a52 100644
--- a/sys/dev/sound/pci/neomagic.c
+++ b/sys/dev/sound/pci/neomagic.c
@@ -707,10 +707,11 @@ nm_pci_attach(device_t dev)
rman_get_start(sc->irq),
device_get_nameunit(device_get_parent(dev)));
- if (pcm_register(dev, sc, 1, 1)) goto bad;
+ pcm_init(dev, sc);
pcm_addchan(dev, PCMDIR_REC, &nmchan_class, sc);
pcm_addchan(dev, PCMDIR_PLAY, &nmchan_class, sc);
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status))
+ goto bad;
return 0;
diff --git a/sys/dev/sound/pci/solo.c b/sys/dev/sound/pci/solo.c
index bee79e723696..90dd2e26ad41 100644
--- a/sys/dev/sound/pci/solo.c
+++ b/sys/dev/sound/pci/solo.c
@@ -35,7 +35,6 @@
#include <dev/pci/pcivar.h>
#include <dev/sound/isa/sb.h>
-#include <dev/sound/chip.h>
#include "mixer_if.h"
@@ -1027,11 +1026,11 @@ ess_attach(device_t dev)
rman_get_start(sc->irq),
device_get_nameunit(device_get_parent(dev)));
- if (pcm_register(dev, sc, 1, 1))
- goto no;
+ pcm_init(dev, sc);
pcm_addchan(dev, PCMDIR_REC, &esschan_class, sc);
pcm_addchan(dev, PCMDIR_PLAY, &esschan_class, sc);
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status))
+ goto no;
return 0;
diff --git a/sys/dev/sound/pci/spicds.c b/sys/dev/sound/pci/spicds.c
index 3b67101b8df0..da0e8d9da6d5 100644
--- a/sys/dev/sound/pci/spicds.c
+++ b/sys/dev/sound/pci/spicds.c
@@ -144,7 +144,7 @@ spicds_create(device_t dev, void *devinfo, int num, spicds_ctrl ctrl)
#if(0)
device_printf(dev, "spicds_create(dev, devinfo, %d, ctrl)\n", num);
#endif
- codec = (struct spicds_info *)malloc(sizeof *codec, M_SPICDS, M_NOWAIT);
+ codec = malloc(sizeof(*codec), M_SPICDS, M_NOWAIT);
if (codec == NULL)
return NULL;
diff --git a/sys/dev/sound/pci/t4dwave.c b/sys/dev/sound/pci/t4dwave.c
index 653e610febbe..07b9e1004573 100644
--- a/sys/dev/sound/pci/t4dwave.c
+++ b/sys/dev/sound/pci/t4dwave.c
@@ -921,12 +921,12 @@ tr_pci_attach(device_t dev)
rman_get_start(tr->reg), rman_get_start(tr->irq),
device_get_nameunit(device_get_parent(dev)));
- if (pcm_register(dev, tr, dacn, 1))
- goto bad;
+ pcm_init(dev, tr);
pcm_addchan(dev, PCMDIR_REC, &trrchan_class, tr);
for (i = 0; i < dacn; i++)
pcm_addchan(dev, PCMDIR_PLAY, &trpchan_class, tr);
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status))
+ goto bad;
return 0;
diff --git a/sys/dev/sound/pci/via8233.c b/sys/dev/sound/pci/via8233.c
index 9f3b312e4365..243353805b94 100644
--- a/sys/dev/sound/pci/via8233.c
+++ b/sys/dev/sound/pci/via8233.c
@@ -1352,8 +1352,7 @@ via_attach(device_t dev)
device_get_nameunit(device_get_parent(dev)));
/* Register */
- if (pcm_register(dev, via, via_dxs_chnum + via_sgd_chnum, NWRCHANS))
- goto bad;
+ pcm_init(dev, via);
for (i = 0; i < via_dxs_chnum; i++)
pcm_addchan(dev, PCMDIR_PLAY, &via8233dxs_class, via);
for (i = 0; i < via_sgd_chnum; i++)
@@ -1366,7 +1365,8 @@ via_attach(device_t dev)
(via_dxs_chnum > 0) ? "En" : "Dis", (via->dxs_src) ? "(SRC)" : "",
via_dxs_chnum, via_sgd_chnum, NWRCHANS);
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status))
+ goto bad;
return (0);
bad:
diff --git a/sys/dev/sound/pci/via82c686.c b/sys/dev/sound/pci/via82c686.c
index 44f846b684d7..40f3521a57a2 100644
--- a/sys/dev/sound/pci/via82c686.c
+++ b/sys/dev/sound/pci/via82c686.c
@@ -585,10 +585,11 @@ via_attach(device_t dev)
device_get_nameunit(device_get_parent(dev)));
/* Register */
- if (pcm_register(dev, via, 1, 1)) goto bad;
+ pcm_init(dev, via);
pcm_addchan(dev, PCMDIR_PLAY, &viachan_class, via);
pcm_addchan(dev, PCMDIR_REC, &viachan_class, via);
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status))
+ goto bad;
return 0;
bad:
if (via->codec) ac97_destroy(via->codec);
diff --git a/sys/dev/sound/pci/vibes.c b/sys/dev/sound/pci/vibes.c
index e587f0113b5d..7e908f188614 100644
--- a/sys/dev/sound/pci/vibes.c
+++ b/sys/dev/sound/pci/vibes.c
@@ -866,18 +866,17 @@ sv_attach(device_t dev) {
if (bootverbose)
printf("Sonicvibes: revision %d.\n", sc->rev);
- if (pcm_register(dev, sc, 1, 1)) {
- device_printf(dev, "sv_attach: pcm_register fail\n");
- goto fail;
- }
-
+ pcm_init(dev, sc);
pcm_addchan(dev, PCMDIR_PLAY, &svpchan_class, sc);
pcm_addchan(dev, PCMDIR_REC, &svrchan_class, sc);
snprintf(status, SND_STATUSLEN, "port 0x%jx irq %jd on %s",
rman_get_start(sc->enh_reg), rman_get_start(sc->irq),
device_get_nameunit(device_get_parent(dev)));
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status)) {
+ device_printf(dev, "sv_attach: pcm_register fail\n");
+ goto fail;
+ }
DEB(printf("sv_attach: succeeded\n"));
diff --git a/sys/dev/sound/pcm/ac97.c b/sys/dev/sound/pcm/ac97.c
index d04ec2d8271c..f5ca06cd3942 100644
--- a/sys/dev/sound/pcm/ac97.c
+++ b/sys/dev/sound/pcm/ac97.c
@@ -32,7 +32,6 @@
#include <dev/sound/pcm/sound.h>
#include <dev/sound/pcm/ac97.h>
-#include <dev/sound/pcm/ac97_patch.h>
#include <dev/pci/pcivar.h>
@@ -40,6 +39,8 @@
static MALLOC_DEFINE(M_AC97, "ac97", "ac97 codec");
+typedef void (*ac97_patch)(struct ac97_info *);
+
struct ac97mixtable_entry {
int reg; /* register index */
/* reg < 0 if inverted polarity */
@@ -133,6 +134,12 @@ static const struct ac97_vendorid ac97vendorid[] = {
{ 0x00000000, NULL }
};
+static void ad1886_patch(struct ac97_info *);
+static void ad198x_patch(struct ac97_info *);
+static void ad1981b_patch(struct ac97_info *);
+static void cmi9739_patch(struct ac97_info *);
+static void alc655_patch(struct ac97_info *);
+
static struct ac97_codecid ac97codecid[] = {
{ 0x41445303, 0x00, 0, "AD1819", 0 },
{ 0x41445340, 0x00, 0, "AD1881", 0 },
@@ -315,12 +322,6 @@ ac97_rdcd(struct ac97_info *codec, int reg)
i[1] = AC97_READ(codec->methods, codec->devinfo, reg);
while (i[0] != i[1] && j)
i[j-- & 1] = AC97_READ(codec->methods, codec->devinfo, reg);
-#if 0
- if (j < 100) {
- device_printf(codec->dev, "%s(): Inconsistent register value at"
- " 0x%08x (retry: %d)\n", __func__, reg, 100 - j);
- }
-#endif
return i[!(j & 1)];
}
return AC97_READ(codec->methods, codec->devinfo, reg);
@@ -505,9 +506,6 @@ ac97_setmixer(struct ac97_info *codec, unsigned channel, unsigned left, unsigned
snd_mtxunlock(codec->lock);
return left | (right << 8);
} else {
-#if 0
- printf("ac97_setmixer: reg=%d, bits=%d, enable=%d\n", e->reg, e->bits, e->enable);
-#endif
return -1;
}
}
@@ -730,10 +728,6 @@ ac97_initmixer(struct ac97_info *codec)
for (j = 0; k >> j; j++)
;
if (j != 0) {
-#if 0
- device_printf(codec->dev, "%2d: [ac97_rdcd() = %d] [Testbit = %d] %d -> %d\n",
- i, k, bit, codec->mix[i].bits, j);
-#endif
codec->mix[i].enable = 1;
codec->mix[i].bits = j;
} else if (reg == AC97_MIX_BEEP) {
@@ -749,9 +743,6 @@ ac97_initmixer(struct ac97_info *codec)
codec->mix[i].enable = 0;
ac97_wrcd(codec, reg, old);
}
-#if 0
- printf("mixch %d, en=%d, b=%d\n", i, codec->mix[i].enable, codec->mix[i].bits);
-#endif
}
device_printf(codec->dev, "<%s>\n",
@@ -872,6 +863,93 @@ ac97_getflags(struct ac97_info *codec)
return codec->flags;
}
+static void
+ad1886_patch(struct ac97_info *codec)
+{
+#define AC97_AD_JACK_SPDIF 0x72
+ /*
+ * Presario700 workaround
+ * for Jack Sense/SPDIF Register misetting causing
+ * no audible output
+ * by Santiago Nullo 04/05/2002
+ */
+ ac97_wrcd(codec, AC97_AD_JACK_SPDIF, 0x0010);
+}
+
+static void
+ad198x_patch(struct ac97_info *codec)
+{
+ switch (ac97_getsubvendor(codec)) {
+ case 0x11931043: /* Not for ASUS A9T (probably else too). */
+ break;
+ default:
+ ac97_wrcd(codec, 0x76, ac97_rdcd(codec, 0x76) | 0x0420);
+ break;
+ }
+}
+
+static void
+ad1981b_patch(struct ac97_info *codec)
+{
+ /*
+ * Enable headphone jack sensing.
+ */
+ switch (ac97_getsubvendor(codec)) {
+ case 0x02d91014: /* IBM Thinkcentre */
+ case 0x099c103c: /* HP nx6110 */
+ ac97_wrcd(codec, AC97_AD_JACK_SPDIF,
+ ac97_rdcd(codec, AC97_AD_JACK_SPDIF) | 0x0800);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+cmi9739_patch(struct ac97_info *codec)
+{
+ /*
+ * Few laptops need extra register initialization
+ * to power up the internal speakers.
+ */
+ switch (ac97_getsubvendor(codec)) {
+ case 0x18431043: /* ASUS W1000N */
+ ac97_wrcd(codec, AC97_REG_POWER, 0x000f);
+ ac97_wrcd(codec, AC97_MIXEXT_CLFE, 0x0000);
+ ac97_wrcd(codec, 0x64, 0x7110);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+alc655_patch(struct ac97_info *codec)
+{
+ /*
+ * MSI (Micro-Star International) specific EAPD quirk.
+ */
+ switch (ac97_getsubvendor(codec)) {
+ case 0x00611462: /* MSI S250 */
+ case 0x01311462: /* MSI S270 */
+ case 0x01611462: /* LG K1 Express */
+ case 0x03511462: /* MSI L725 */
+ ac97_wrcd(codec, 0x7a, ac97_rdcd(codec, 0x7a) & 0xfffd);
+ break;
+ case 0x10ca1734:
+ /*
+ * Amilo Pro V2055 with ALC655 has phone out by default
+ * disabled (surround on), leaving us only with internal
+ * speakers. This should really go to mixer. We write the
+ * Data Flow Control reg.
+ */
+ ac97_wrcd(codec, 0x6a, ac97_rdcd(codec, 0x6a) | 0x0001);
+ break;
+ default:
+ break;
+ }
+}
+
/* -------------------------------------------------------------------- */
static int
@@ -1003,13 +1081,6 @@ ac97mix_init(struct snd_mixer *m)
if (pcm_getflags(codec->dev) & SD_F_SOFTPCMVOL)
ac97_wrcd(codec, AC97_MIX_PCM, 0);
-#if 0
- /* XXX For the sake of debugging purposes */
- mix_setparentchild(m, SOUND_MIXER_VOLUME,
- SOUND_MASK_PCM | SOUND_MASK_CD);
- mix_setrealdev(m, SOUND_MIXER_VOLUME, SOUND_MIXER_NONE);
- ac97_wrcd(codec, AC97_MIX_MASTER, 0);
-#endif
mask = 0;
for (i = 0; i < AC97_MIXER_SIZE; i++)
diff --git a/sys/dev/sound/pcm/ac97_patch.c b/sys/dev/sound/pcm/ac97_patch.c
deleted file mode 100644
index 671b6598f51a..000000000000
--- a/sys/dev/sound/pcm/ac97_patch.c
+++ /dev/null
@@ -1,117 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2002 Orion Hodson
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#ifdef HAVE_KERNEL_OPTION_HEADERS
-#include "opt_snd.h"
-#endif
-
-#include <dev/sound/pcm/sound.h>
-#include <dev/sound/pcm/ac97.h>
-#include <dev/sound/pcm/ac97_patch.h>
-
-void ad1886_patch(struct ac97_info* codec)
-{
-#define AC97_AD_JACK_SPDIF 0x72
- /*
- * Presario700 workaround
- * for Jack Sense/SPDIF Register misetting causing
- * no audible output
- * by Santiago Nullo 04/05/2002
- */
- ac97_wrcd(codec, AC97_AD_JACK_SPDIF, 0x0010);
-}
-
-void ad198x_patch(struct ac97_info* codec)
-{
- switch (ac97_getsubvendor(codec)) {
- case 0x11931043: /* Not for ASUS A9T (probably else too). */
- break;
- default:
- ac97_wrcd(codec, 0x76, ac97_rdcd(codec, 0x76) | 0x0420);
- break;
- }
-}
-
-void ad1981b_patch(struct ac97_info* codec)
-{
- /*
- * Enable headphone jack sensing.
- */
- switch (ac97_getsubvendor(codec)) {
- case 0x02d91014: /* IBM Thinkcentre */
- case 0x099c103c: /* HP nx6110 */
- ac97_wrcd(codec, AC97_AD_JACK_SPDIF,
- ac97_rdcd(codec, AC97_AD_JACK_SPDIF) | 0x0800);
- break;
- default:
- break;
- }
-}
-
-void cmi9739_patch(struct ac97_info* codec)
-{
- /*
- * Few laptops need extra register initialization
- * to power up the internal speakers.
- */
- switch (ac97_getsubvendor(codec)) {
- case 0x18431043: /* ASUS W1000N */
- ac97_wrcd(codec, AC97_REG_POWER, 0x000f);
- ac97_wrcd(codec, AC97_MIXEXT_CLFE, 0x0000);
- ac97_wrcd(codec, 0x64, 0x7110);
- break;
- default:
- break;
- }
-}
-
-void alc655_patch(struct ac97_info* codec)
-{
- /*
- * MSI (Micro-Star International) specific EAPD quirk.
- */
- switch (ac97_getsubvendor(codec)) {
- case 0x00611462: /* MSI S250 */
- case 0x01311462: /* MSI S270 */
- case 0x01611462: /* LG K1 Express */
- case 0x03511462: /* MSI L725 */
- ac97_wrcd(codec, 0x7a, ac97_rdcd(codec, 0x7a) & 0xfffd);
- break;
- case 0x10ca1734:
- /*
- * Amilo Pro V2055 with ALC655 has phone out by default
- * disabled (surround on), leaving us only with internal
- * speakers. This should really go to mixer. We write the
- * Data Flow Control reg.
- */
- ac97_wrcd(codec, 0x6a, ac97_rdcd(codec, 0x6a) | 0x0001);
- break;
- default:
- break;
- }
-}
diff --git a/sys/dev/sound/pcm/buffer.c b/sys/dev/sound/pcm/buffer.c
index afb4b95e357a..de535ec2dcba 100644
--- a/sys/dev/sound/pcm/buffer.c
+++ b/sys/dev/sound/pcm/buffer.c
@@ -356,15 +356,6 @@ sndbuf_setfmt(struct snd_dbuf *b, u_int32_t fmt)
b->fmt = fmt;
b->bps = AFMT_BPS(b->fmt);
b->align = AFMT_ALIGN(b->fmt);
-#if 0
- b->bps = AFMT_CHANNEL(b->fmt);
- if (b->fmt & AFMT_16BIT)
- b->bps <<= 1;
- else if (b->fmt & AFMT_24BIT)
- b->bps *= 3;
- else if (b->fmt & AFMT_32BIT)
- b->bps <<= 2;
-#endif
return 0;
}
@@ -470,39 +461,30 @@ sndbuf_getsel(struct snd_dbuf *b)
unsigned int
sndbuf_getxrun(struct snd_dbuf *b)
{
- SNDBUF_LOCKASSERT(b);
-
return b->xrun;
}
void
sndbuf_setxrun(struct snd_dbuf *b, unsigned int xrun)
{
- SNDBUF_LOCKASSERT(b);
-
b->xrun = xrun;
}
unsigned int
sndbuf_gethwptr(struct snd_dbuf *b)
{
- SNDBUF_LOCKASSERT(b);
-
return b->hp;
}
void
sndbuf_sethwptr(struct snd_dbuf *b, unsigned int ptr)
{
- SNDBUF_LOCKASSERT(b);
-
b->hp = ptr;
}
unsigned int
sndbuf_getready(struct snd_dbuf *b)
{
- SNDBUF_LOCKASSERT(b);
KASSERT((b->rl >= 0) && (b->rl <= b->bufsize), ("%s: b->rl invalid %d", __func__, b->rl));
return b->rl;
@@ -511,7 +493,6 @@ sndbuf_getready(struct snd_dbuf *b)
unsigned int
sndbuf_getreadyptr(struct snd_dbuf *b)
{
- SNDBUF_LOCKASSERT(b);
KASSERT((b->rp >= 0) && (b->rp <= b->bufsize), ("%s: b->rp invalid %d", __func__, b->rp));
return b->rp;
@@ -520,7 +501,6 @@ sndbuf_getreadyptr(struct snd_dbuf *b)
unsigned int
sndbuf_getfree(struct snd_dbuf *b)
{
- SNDBUF_LOCKASSERT(b);
KASSERT((b->rl >= 0) && (b->rl <= b->bufsize), ("%s: b->rl invalid %d", __func__, b->rl));
return b->bufsize - b->rl;
@@ -529,7 +509,6 @@ sndbuf_getfree(struct snd_dbuf *b)
unsigned int
sndbuf_getfreeptr(struct snd_dbuf *b)
{
- SNDBUF_LOCKASSERT(b);
KASSERT((b->rp >= 0) && (b->rp <= b->bufsize), ("%s: b->rp invalid %d", __func__, b->rp));
KASSERT((b->rl >= 0) && (b->rl <= b->bufsize), ("%s: b->rl invalid %d", __func__, b->rl));
@@ -539,40 +518,30 @@ sndbuf_getfreeptr(struct snd_dbuf *b)
u_int64_t
sndbuf_getblocks(struct snd_dbuf *b)
{
- SNDBUF_LOCKASSERT(b);
-
return b->total / b->blksz;
}
u_int64_t
sndbuf_getprevblocks(struct snd_dbuf *b)
{
- SNDBUF_LOCKASSERT(b);
-
return b->prev_total / b->blksz;
}
u_int64_t
sndbuf_gettotal(struct snd_dbuf *b)
{
- SNDBUF_LOCKASSERT(b);
-
return b->total;
}
u_int64_t
sndbuf_getprevtotal(struct snd_dbuf *b)
{
- SNDBUF_LOCKASSERT(b);
-
return b->prev_total;
}
void
sndbuf_updateprevtotal(struct snd_dbuf *b)
{
- SNDBUF_LOCKASSERT(b);
-
b->prev_total = b->total;
}
diff --git a/sys/dev/sound/pcm/buffer.h b/sys/dev/sound/pcm/buffer.h
index 2c5d6e7c214b..ddf4083ec19f 100644
--- a/sys/dev/sound/pcm/buffer.h
+++ b/sys/dev/sound/pcm/buffer.h
@@ -26,8 +26,6 @@
* SUCH DAMAGE.
*/
-#define SNDBUF_LOCKASSERT(b)
-
#define SNDBUF_F_MANAGED 0x00000008
#define SNDBUF_NAMELEN 48
diff --git a/sys/dev/sound/pcm/channel.c b/sys/dev/sound/pcm/channel.c
index 859476f212ae..4d13f20a5262 100644
--- a/sys/dev/sound/pcm/channel.c
+++ b/sys/dev/sound/pcm/channel.c
@@ -6,6 +6,10 @@
* Copyright (c) 1999 Cameron Grant <cg@FreeBSD.org>
* Portions Copyright (c) Luigi Rizzo <luigi@FreeBSD.org> - 1997-99
* All rights reserved.
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Christos Margiolis
+ * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -128,6 +132,7 @@ chn_vpc_proc(int reset, int db)
struct pcm_channel *c;
int i;
+ bus_topo_lock();
for (i = 0; pcm_devclass != NULL &&
i < devclass_get_maxunit(pcm_devclass); i++) {
d = devclass_get_softc(pcm_devclass, i);
@@ -146,6 +151,7 @@ chn_vpc_proc(int reset, int db)
PCM_RELEASE(d);
PCM_UNLOCK(d);
}
+ bus_topo_unlock();
}
static int
@@ -166,7 +172,7 @@ sysctl_hw_snd_vpc_0db(SYSCTL_HANDLER_ARGS)
return (0);
}
SYSCTL_PROC(_hw_snd, OID_AUTO, vpc_0db,
- CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 0, sizeof(int),
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, sizeof(int),
sysctl_hw_snd_vpc_0db, "I",
"0db relative level");
@@ -186,7 +192,7 @@ sysctl_hw_snd_vpc_reset(SYSCTL_HANDLER_ARGS)
return (0);
}
SYSCTL_PROC(_hw_snd, OID_AUTO, vpc_reset,
- CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, sizeof(int),
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof(int),
sysctl_hw_snd_vpc_reset, "I",
"reset volume on all channels");
@@ -309,14 +315,7 @@ chn_wakeup(struct pcm_channel *c)
if (CHN_EMPTY(c, children.busy)) {
if (SEL_WAITING(sndbuf_getsel(bs)) && chn_polltrigger(c))
selwakeuppri(sndbuf_getsel(bs), PRIBIO);
- if (c->flags & CHN_F_SLEEPING) {
- /*
- * Ok, I can just panic it right here since it is
- * quite obvious that we never allow multiple waiters
- * from userland. I'm too generous...
- */
- CHN_BROADCAST(&c->intr_cv);
- }
+ CHN_BROADCAST(&c->intr_cv);
} else {
CHN_FOREACH(ch, c, children.busy) {
CHN_LOCK(ch);
@@ -332,15 +331,13 @@ chn_sleep(struct pcm_channel *c, int timeout)
int ret;
CHN_LOCKASSERT(c);
- KASSERT((c->flags & CHN_F_SLEEPING) == 0,
- ("%s(): entered with CHN_F_SLEEPING", __func__));
if (c->flags & CHN_F_DEAD)
return (EINVAL);
- c->flags |= CHN_F_SLEEPING;
+ c->sleeping++;
ret = cv_timedwait_sig(&c->intr_cv, c->lock, timeout);
- c->flags &= ~CHN_F_SLEEPING;
+ c->sleeping--;
return ((c->flags & CHN_F_DEAD) ? EINVAL : ret);
}
@@ -416,23 +413,6 @@ chn_wrfeed(struct pcm_channel *c)
chn_wakeup(c);
}
-#if 0
-static void
-chn_wrupdate(struct pcm_channel *c)
-{
-
- CHN_LOCKASSERT(c);
- KASSERT(c->direction == PCMDIR_PLAY, ("%s(): bad channel", __func__));
-
- if ((c->flags & (CHN_F_MMAP | CHN_F_VIRTUAL)) || CHN_STOPPED(c))
- return;
- chn_dmaupdate(c);
- chn_wrfeed(c);
- /* tell the driver we've updated the primary buffer */
- chn_trigger(c, PCMTRIG_EMLDMAWR);
-}
-#endif
-
static void
chn_wrintr(struct pcm_channel *c)
{
@@ -546,22 +526,6 @@ chn_rdfeed(struct pcm_channel *c)
chn_wakeup(c);
}
-#if 0
-static void
-chn_rdupdate(struct pcm_channel *c)
-{
-
- CHN_LOCKASSERT(c);
- KASSERT(c->direction == PCMDIR_REC, ("chn_rdupdate on bad channel"));
-
- if ((c->flags & (CHN_F_MMAP | CHN_F_VIRTUAL)) || CHN_STOPPED(c))
- return;
- chn_trigger(c, PCMTRIG_EMLDMARD);
- chn_dmaupdate(c);
- chn_rdfeed(c);
-}
-#endif
-
/* read interrupt routine. Must be called with interrupts blocked. */
static void
chn_rdintr(struct pcm_channel *c)
@@ -999,29 +963,38 @@ static const struct {
{ "mulaw", NULL, NULL, AFMT_MU_LAW },
{ "u8", "8", NULL, AFMT_U8 },
{ "s8", NULL, NULL, AFMT_S8 },
+ { "ac3", NULL, NULL, AFMT_AC3 },
#if BYTE_ORDER == LITTLE_ENDIAN
{ "s16le", "s16", "16", AFMT_S16_LE },
{ "s16be", NULL, NULL, AFMT_S16_BE },
-#else
- { "s16le", NULL, NULL, AFMT_S16_LE },
- { "s16be", "s16", "16", AFMT_S16_BE },
-#endif
- { "u16le", NULL, NULL, AFMT_U16_LE },
- { "u16be", NULL, NULL, AFMT_U16_BE },
- { "s24le", NULL, NULL, AFMT_S24_LE },
+ { "s24le", "s24", "24", AFMT_S24_LE },
{ "s24be", NULL, NULL, AFMT_S24_BE },
- { "u24le", NULL, NULL, AFMT_U24_LE },
- { "u24be", NULL, NULL, AFMT_U24_BE },
-#if BYTE_ORDER == LITTLE_ENDIAN
{ "s32le", "s32", "32", AFMT_S32_LE },
{ "s32be", NULL, NULL, AFMT_S32_BE },
+ { "f32le", "f32", NULL, AFMT_F32_LE },
+ { "f32be", NULL, NULL, AFMT_F32_BE },
+ { "u16le", "u16", NULL, AFMT_U16_LE },
+ { "u16be", NULL, NULL, AFMT_U16_BE },
+ { "u24le", "u24", NULL, AFMT_U24_LE },
+ { "u24be", NULL, NULL, AFMT_U24_BE },
+ { "u32le", "u32", NULL, AFMT_U32_LE },
+ { "u32be", NULL, NULL, AFMT_U32_BE },
#else
+ { "s16le", NULL, NULL, AFMT_S16_LE },
+ { "s16be", "s16", "16", AFMT_S16_BE },
+ { "s24le", NULL, NULL, AFMT_S24_LE },
+ { "s24be", "s24", "24", AFMT_S24_BE },
{ "s32le", NULL, NULL, AFMT_S32_LE },
{ "s32be", "s32", "32", AFMT_S32_BE },
-#endif
+ { "f32le", NULL, NULL, AFMT_F32_LE },
+ { "f32be", "f32", NULL, AFMT_F32_BE },
+ { "u16le", NULL, NULL, AFMT_U16_LE },
+ { "u16be", "u16", NULL, AFMT_U16_BE },
+ { "u24le", NULL, NULL, AFMT_U24_LE },
+ { "u24be", "u24", NULL, AFMT_U24_BE },
{ "u32le", NULL, NULL, AFMT_U32_LE },
- { "u32be", NULL, NULL, AFMT_U32_BE },
- { "ac3", NULL, NULL, AFMT_AC3 },
+ { "u32be", "u32", NULL, AFMT_U32_BE },
+#endif
{ NULL, NULL, NULL, 0 }
};
@@ -1157,89 +1130,171 @@ chn_reset(struct pcm_channel *c, uint32_t fmt, uint32_t spd)
return r;
}
-int
-chn_init(struct pcm_channel *c, void *devinfo, int dir, int direction)
+static struct unrhdr *
+chn_getunr(struct snddev_info *d, int type)
+{
+ switch (type) {
+ case PCMDIR_PLAY:
+ return (d->p_unr);
+ case PCMDIR_PLAY_VIRTUAL:
+ return (d->vp_unr);
+ case PCMDIR_REC:
+ return (d->r_unr);
+ case PCMDIR_REC_VIRTUAL:
+ return (d->vr_unr);
+ default:
+ __assert_unreachable();
+ }
+
+}
+
+char *
+chn_mkname(char *buf, size_t len, struct pcm_channel *c)
+{
+ const char *str;
+
+ KASSERT(buf != NULL && len != 0,
+ ("%s(): bogus buf=%p len=%zu", __func__, buf, len));
+
+ switch (c->type) {
+ case PCMDIR_PLAY:
+ str = "play";
+ break;
+ case PCMDIR_PLAY_VIRTUAL:
+ str = "virtual_play";
+ break;
+ case PCMDIR_REC:
+ str = "record";
+ break;
+ case PCMDIR_REC_VIRTUAL:
+ str = "virtual_record";
+ break;
+ default:
+ __assert_unreachable();
+ }
+
+ snprintf(buf, len, "dsp%d.%s.%d",
+ device_get_unit(c->dev), str, c->unit);
+
+ return (buf);
+}
+
+struct pcm_channel *
+chn_init(struct snddev_info *d, struct pcm_channel *parent, kobj_class_t cls,
+ int dir, void *devinfo)
{
+ struct pcm_channel *c;
struct feeder_class *fc;
struct snd_dbuf *b, *bs;
- int i, ret;
+ char buf[CHN_NAMELEN];
+ int err, i, direction, *vchanrate, *vchanformat;
- chn_lockinit(c, dir);
+ PCM_BUSYASSERT(d);
+ PCM_LOCKASSERT(d);
+
+ switch (dir) {
+ case PCMDIR_PLAY:
+ d->playcount++;
+ /* FALLTHROUGH */
+ case PCMDIR_PLAY_VIRTUAL:
+ if (dir == PCMDIR_PLAY_VIRTUAL)
+ d->pvchancount++;
+ direction = PCMDIR_PLAY;
+ vchanrate = &d->pvchanrate;
+ vchanformat = &d->pvchanformat;
+ break;
+ case PCMDIR_REC:
+ d->reccount++;
+ /* FALLTHROUGH */
+ case PCMDIR_REC_VIRTUAL:
+ if (dir == PCMDIR_REC_VIRTUAL)
+ d->rvchancount++;
+ direction = PCMDIR_REC;
+ vchanrate = &d->rvchanrate;
+ vchanformat = &d->rvchanformat;
+ break;
+ default:
+ device_printf(d->dev,
+ "%s(): invalid channel direction: %d\n",
+ __func__, dir);
+ return (NULL);
+ }
+ PCM_UNLOCK(d);
b = NULL;
bs = NULL;
+
+ c = malloc(sizeof(*c), M_DEVBUF, M_WAITOK | M_ZERO);
+ c->methods = kobj_create(cls, M_DEVBUF, M_WAITOK | M_ZERO);
+ chn_lockinit(c, dir);
CHN_INIT(c, children);
CHN_INIT(c, children.busy);
- c->devinfo = NULL;
- c->feeder = NULL;
+ c->direction = direction;
+ c->type = dir;
+ c->unit = alloc_unr(chn_getunr(d, c->type));
+ c->format = SND_FORMAT(AFMT_S16_LE, 2, 0);
+ c->speed = 48000;
+ c->pid = -1;
c->latency = -1;
c->timeout = 1;
-
- ret = ENOMEM;
- b = sndbuf_create(c->dev, c->name, "primary", c);
- if (b == NULL)
- goto out;
- bs = sndbuf_create(c->dev, c->name, "secondary", c);
- if (bs == NULL)
- goto out;
-
- CHN_LOCK(c);
-
- ret = EINVAL;
- fc = feeder_getclass(NULL);
- if (fc == NULL)
- goto out;
- if (chn_addfeeder(c, fc, NULL))
- goto out;
-
- /*
- * XXX - sndbuf_setup() & sndbuf_resize() expect to be called
- * with the channel unlocked because they are also called
- * from driver methods that don't know about locking
- */
- CHN_UNLOCK(c);
- sndbuf_setup(bs, NULL, 0);
- CHN_LOCK(c);
- c->bufhard = b;
- c->bufsoft = bs;
- c->flags = 0;
- c->feederflags = 0;
- c->sm = NULL;
- c->format = SND_FORMAT(AFMT_U8, 1, 0);
- c->speed = DSP_DEFAULT_SPEED;
+ strlcpy(c->comm, CHN_COMM_UNUSED, sizeof(c->comm));
+ c->parentsnddev = d;
+ c->parentchannel = parent;
+ c->dev = d->dev;
+ c->trigger = PCMTRIG_STOP;
+ strlcpy(c->name, chn_mkname(buf, sizeof(buf), c), sizeof(c->name));
c->matrix = *feeder_matrix_id_map(SND_CHN_MATRIX_1_0);
c->matrix.id = SND_CHN_MATRIX_PCMCHANNEL;
- for (i = 0; i < SND_CHN_T_MAX; i++) {
+ for (i = 0; i < SND_CHN_T_MAX; i++)
c->volume[SND_VOL_C_MASTER][i] = SND_VOL_0DB_MASTER;
- }
c->volume[SND_VOL_C_MASTER][SND_CHN_T_VOL_0DB] = SND_VOL_0DB_MASTER;
c->volume[SND_VOL_C_PCM][SND_CHN_T_VOL_0DB] = chn_vol_0db_pcm;
- memset(c->muted, 0, sizeof(c->muted));
-
+ CHN_LOCK(c);
chn_vpc_reset(c, SND_VOL_C_PCM, 1);
+ CHN_UNLOCK(c);
- ret = ENODEV;
- CHN_UNLOCK(c); /* XXX - Unlock for CHANNEL_INIT() malloc() call */
- c->devinfo = CHANNEL_INIT(c->methods, devinfo, b, c, direction);
- CHN_LOCK(c);
- if (c->devinfo == NULL)
- goto out;
+ fc = feeder_getclass(NULL);
+ if (fc == NULL) {
+ device_printf(d->dev, "%s(): failed to get feeder class\n",
+ __func__);
+ goto fail;
+ }
+ if (feeder_add(c, fc, NULL)) {
+ device_printf(d->dev, "%s(): failed to add feeder\n", __func__);
+ goto fail;
+ }
- ret = ENOMEM;
- if ((sndbuf_getsize(b) == 0) && ((c->flags & CHN_F_VIRTUAL) == 0))
- goto out;
+ b = sndbuf_create(c->dev, c->name, "primary", c);
+ bs = sndbuf_create(c->dev, c->name, "secondary", c);
+ if (b == NULL || bs == NULL) {
+ device_printf(d->dev, "%s(): failed to create %s buffer\n",
+ __func__, b == NULL ? "hardware" : "software");
+ goto fail;
+ }
+ c->bufhard = b;
+ c->bufsoft = bs;
- ret = 0;
- c->direction = direction;
+ c->devinfo = CHANNEL_INIT(c->methods, devinfo, b, c, direction);
+ if (c->devinfo == NULL) {
+ device_printf(d->dev, "%s(): CHANNEL_INIT() failed\n", __func__);
+ goto fail;
+ }
+
+ if ((sndbuf_getsize(b) == 0) && ((c->flags & CHN_F_VIRTUAL) == 0)) {
+ device_printf(d->dev, "%s(): hardware buffer's size is 0\n",
+ __func__);
+ goto fail;
+ }
sndbuf_setfmt(b, c->format);
sndbuf_setspd(b, c->speed);
sndbuf_setfmt(bs, c->format);
sndbuf_setspd(bs, c->speed);
+ sndbuf_setup(bs, NULL, 0);
/**
* @todo Should this be moved somewhere else? The primary buffer
@@ -1248,53 +1303,80 @@ chn_init(struct pcm_channel *c, void *devinfo, int dir, int direction)
*/
if (c->direction == PCMDIR_PLAY) {
bs->sl = sndbuf_getmaxsize(bs);
- bs->shadbuf = malloc(bs->sl, M_DEVBUF, M_NOWAIT);
- if (bs->shadbuf == NULL) {
- ret = ENOMEM;
- goto out;
- }
+ bs->shadbuf = malloc(bs->sl, M_DEVBUF, M_WAITOK);
}
-out:
- CHN_UNLOCK(c);
- if (ret) {
- if (c->devinfo) {
- if (CHANNEL_FREE(c->methods, c->devinfo))
- sndbuf_free(b);
- }
- if (bs)
- sndbuf_destroy(bs);
- if (b)
- sndbuf_destroy(b);
+ if ((c->flags & CHN_F_VIRTUAL) == 0) {
CHN_LOCK(c);
- c->flags |= CHN_F_DEAD;
- chn_lockdestroy(c);
+ err = chn_reset(c, c->format, c->speed);
+ CHN_UNLOCK(c);
+ if (err != 0)
+ goto fail;
+ }
- return ret;
+ PCM_LOCK(d);
+ CHN_INSERT_SORT_ASCEND(d, c, channels.pcm);
+ if ((c->flags & CHN_F_VIRTUAL) == 0) {
+ CHN_INSERT_SORT_ASCEND(d, c, channels.pcm.primary);
+ /* Initialize the *vchanrate/vchanformat parameters. */
+ *vchanrate = sndbuf_getspd(c->bufsoft);
+ *vchanformat = sndbuf_getfmt(c->bufsoft);
}
- return 0;
+ return (c);
+
+fail:
+ chn_kill(c);
+ PCM_LOCK(d);
+
+ return (NULL);
}
void
chn_kill(struct pcm_channel *c)
{
+ struct snddev_info *d = c->parentsnddev;
struct snd_dbuf *b = c->bufhard;
struct snd_dbuf *bs = c->bufsoft;
PCM_BUSYASSERT(c->parentsnddev);
+ PCM_LOCK(d);
+ CHN_REMOVE(d, c, channels.pcm);
+ if ((c->flags & CHN_F_VIRTUAL) == 0)
+ CHN_REMOVE(d, c, channels.pcm.primary);
+
+ switch (c->type) {
+ case PCMDIR_PLAY:
+ d->playcount--;
+ break;
+ case PCMDIR_PLAY_VIRTUAL:
+ d->pvchancount--;
+ break;
+ case PCMDIR_REC:
+ d->reccount--;
+ break;
+ case PCMDIR_REC_VIRTUAL:
+ d->rvchancount--;
+ break;
+ default:
+ __assert_unreachable();
+ }
+ PCM_UNLOCK(d);
+
if (CHN_STARTED(c)) {
CHN_LOCK(c);
chn_trigger(c, PCMTRIG_ABORT);
CHN_UNLOCK(c);
}
- while (chn_removefeeder(c) == 0)
- ;
- if (CHANNEL_FREE(c->methods, c->devinfo))
+ free_unr(chn_getunr(d, c->type), c->unit);
+ feeder_remove(c);
+ if (c->devinfo && CHANNEL_FREE(c->methods, c->devinfo))
sndbuf_free(b);
- sndbuf_destroy(bs);
- sndbuf_destroy(b);
+ if (bs)
+ sndbuf_destroy(bs);
+ if (b)
+ sndbuf_destroy(b);
CHN_LOCK(c);
c->flags |= CHN_F_DEAD;
chn_lockdestroy(c);
@@ -1327,19 +1409,6 @@ chn_release(struct pcm_channel *c)
}
int
-chn_ref(struct pcm_channel *c, int ref)
-{
- PCM_BUSYASSERT(c->parentsnddev);
- CHN_LOCKASSERT(c);
- KASSERT((c->refcount + ref) >= 0,
- ("%s(): new refcount will be negative", __func__));
-
- c->refcount += ref;
-
- return (c->refcount);
-}
-
-int
chn_setvolume_multi(struct pcm_channel *c, int vc, int left, int right,
int center)
{
@@ -1895,12 +1964,6 @@ chn_resizebuf(struct pcm_channel *c, int latency,
hblksz -= hblksz % sndbuf_getalign(b);
-#if 0
- hblksz = sndbuf_getmaxsize(b) >> 1;
- hblksz -= hblksz % sndbuf_getalign(b);
- hblkcnt = 2;
-#endif
-
CHN_UNLOCK(c);
if (chn_usefrags == 0 ||
CHANNEL_SETFRAGMENTS(c->methods, c->devinfo,
@@ -1931,14 +1994,6 @@ chn_resizebuf(struct pcm_channel *c, int latency,
if (limit > CHN_2NDBUFMAXSIZE)
limit = CHN_2NDBUFMAXSIZE;
-#if 0
- while (limit > 0 && (sblksz * sblkcnt) > limit) {
- if (sblkcnt < 4)
- break;
- sblkcnt >>= 1;
- }
-#endif
-
while ((sblksz * sblkcnt) < limit)
sblkcnt <<= 1;
@@ -2052,27 +2107,23 @@ chn_setparam(struct pcm_channel *c, uint32_t format, uint32_t speed)
int
chn_setspeed(struct pcm_channel *c, uint32_t speed)
{
- uint32_t oldformat, oldspeed, format;
+ uint32_t oldformat, oldspeed;
int ret;
-#if 0
- /* XXX force 48k */
- if (c->format & AFMT_PASSTHROUGH)
- speed = AFMT_PASSTHROUGH_RATE;
-#endif
-
oldformat = c->format;
oldspeed = c->speed;
- format = oldformat;
- ret = chn_setparam(c, format, speed);
+ if (c->speed == speed)
+ return (0);
+
+ ret = chn_setparam(c, c->format, speed);
if (ret != 0) {
if (snd_verbose > 3)
device_printf(c->dev,
"%s(): Setting speed %d failed, "
"falling back to %d\n",
__func__, speed, oldspeed);
- chn_setparam(c, c->format, oldspeed);
+ chn_setparam(c, oldformat, oldspeed);
}
return (ret);
@@ -2081,7 +2132,7 @@ chn_setspeed(struct pcm_channel *c, uint32_t speed)
int
chn_setformat(struct pcm_channel *c, uint32_t format)
{
- uint32_t oldformat, oldspeed, speed;
+ uint32_t oldformat, oldspeed;
int ret;
/* XXX force stereo */
@@ -2092,9 +2143,11 @@ chn_setformat(struct pcm_channel *c, uint32_t format)
oldformat = c->format;
oldspeed = c->speed;
- speed = oldspeed;
- ret = chn_setparam(c, format, speed);
+ if (c->format == format)
+ return (0);
+
+ ret = chn_setparam(c, format, c->speed);
if (ret != 0) {
if (snd_verbose > 3)
device_printf(c->dev,
@@ -2182,7 +2235,7 @@ chn_syncstate(struct pcm_channel *c)
else
bass = ((bass & 0x7f) + ((bass >> 8) & 0x7f)) >> 1;
- f = chn_findfeeder(c, FEEDER_EQ);
+ f = feeder_find(c, FEEDER_EQ);
if (f != NULL) {
if (FEEDER_SET(f, FEEDEQ_TREBLE, treble) != 0)
device_printf(c->dev,
@@ -2222,44 +2275,46 @@ chn_trigger(struct pcm_channel *c, int go)
if (go == c->trigger)
return (0);
+ if (snd_verbose > 3) {
+ device_printf(c->dev, "%s() %s: calling go=0x%08x , "
+ "prev=0x%08x\n", __func__, c->name, go, c->trigger);
+ }
+
+ c->trigger = go;
ret = CHANNEL_TRIGGER(c->methods, c->devinfo, go);
if (ret != 0)
return (ret);
+ CHN_UNLOCK(c);
+ PCM_LOCK(d);
+ CHN_LOCK(c);
+
+ /*
+ * Do nothing if another thread set a different trigger while we had
+ * dropped the mutex.
+ */
+ if (go != c->trigger) {
+ PCM_UNLOCK(d);
+ return (0);
+ }
+
+ /*
+ * Use the SAFE variants to prevent inserting/removing an already
+ * existing/missing element.
+ */
switch (go) {
case PCMTRIG_START:
- if (snd_verbose > 3)
- device_printf(c->dev,
- "%s() %s: calling go=0x%08x , "
- "prev=0x%08x\n", __func__, c->name, go,
- c->trigger);
- if (c->trigger != PCMTRIG_START) {
- c->trigger = go;
- CHN_UNLOCK(c);
- PCM_LOCK(d);
- CHN_INSERT_HEAD(d, c, channels.pcm.busy);
- PCM_UNLOCK(d);
- CHN_LOCK(c);
- chn_syncstate(c);
- }
+ CHN_INSERT_HEAD_SAFE(d, c, channels.pcm.busy);
+ PCM_UNLOCK(d);
+ chn_syncstate(c);
break;
case PCMTRIG_STOP:
case PCMTRIG_ABORT:
- if (snd_verbose > 3)
- device_printf(c->dev,
- "%s() %s: calling go=0x%08x , "
- "prev=0x%08x\n", __func__, c->name, go,
- c->trigger);
- if (c->trigger == PCMTRIG_START) {
- c->trigger = go;
- CHN_UNLOCK(c);
- PCM_LOCK(d);
- CHN_REMOVE(d, c, channels.pcm.busy);
- PCM_UNLOCK(d);
- CHN_LOCK(c);
- }
+ CHN_REMOVE(d, c, channels.pcm.busy);
+ PCM_UNLOCK(d);
break;
default:
+ PCM_UNLOCK(d);
break;
}
@@ -2324,7 +2379,7 @@ chn_notify(struct pcm_channel *c, u_int32_t flags)
CHN_LOCKASSERT(c);
if (CHN_EMPTY(c, children))
- return (ENODEV);
+ return (0);
err = 0;
diff --git a/sys/dev/sound/pcm/channel.h b/sys/dev/sound/pcm/channel.h
index 698a1186924f..9ad21d219001 100644
--- a/sys/dev/sound/pcm/channel.h
+++ b/sys/dev/sound/pcm/channel.h
@@ -5,6 +5,10 @@
* Portions Copyright (c) Ryan Beasley <ryan.beasley@gmail.com> - GSoC 2006
* Copyright (c) 1999 Cameron Grant <cg@FreeBSD.org>
* All rights reserved.
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Christos Margiolis
+ * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -84,7 +88,6 @@ struct pcm_channel {
kobj_t methods;
pid_t pid;
- int refcount;
struct pcm_feeder *feeder;
u_int32_t align;
@@ -118,6 +121,8 @@ struct pcm_channel {
* lock.
*/
unsigned int inprog;
+ /* Incrememnt/decrement around cv_timedwait_sig() in chn_sleep(). */
+ unsigned int sleeping;
/**
* Special channel operations should examine @c inprog after acquiring
* lock. If zero, operations may continue. Else, thread should
@@ -159,6 +164,9 @@ struct pcm_channel {
struct {
SLIST_ENTRY(pcm_channel) link;
} opened;
+ struct {
+ SLIST_ENTRY(pcm_channel) link;
+ } primary;
} pcm;
} channels;
@@ -167,8 +175,6 @@ struct pcm_channel {
int16_t volume[SND_VOL_C_MAX][SND_CHN_T_VOL_MAX];
int8_t muted[SND_VOL_C_MAX][SND_CHN_T_VOL_MAX];
-
- void *data1, *data2;
};
#define CHN_HEAD(x, y) &(x)->y.head
@@ -176,6 +182,7 @@ struct pcm_channel {
#define CHN_LINK(y) y.link
#define CHN_EMPTY(x, y) SLIST_EMPTY(CHN_HEAD(x, y))
#define CHN_FIRST(x, y) SLIST_FIRST(CHN_HEAD(x, y))
+#define CHN_NEXT(elm, list) SLIST_NEXT((elm), CHN_LINK(list))
#define CHN_FOREACH(x, y, z) \
SLIST_FOREACH(x, CHN_HEAD(y, z), CHN_LINK(z))
@@ -189,9 +196,6 @@ struct pcm_channel {
#define CHN_INSERT_AFTER(x, y, z) \
SLIST_INSERT_AFTER(x, y, CHN_LINK(z))
-#define CHN_REMOVE(x, y, z) \
- SLIST_REMOVE(CHN_HEAD(x, z), y, pcm_channel, CHN_LINK(z))
-
#define CHN_INSERT_HEAD_SAFE(x, y, z) do { \
struct pcm_channel *t = NULL; \
CHN_FOREACH(t, x, z) { \
@@ -212,20 +216,25 @@ struct pcm_channel {
CHN_INSERT_AFTER(x, y, z); \
} while (0)
-#define CHN_REMOVE_SAFE(x, y, z) do { \
- struct pcm_channel *t = NULL; \
- CHN_FOREACH(t, x, z) { \
- if (t == y) \
- break; \
- } \
- if (t == y) \
- CHN_REMOVE(x, y, z); \
+#define CHN_REMOVE(holder, elm, list) do { \
+ if (CHN_FIRST(holder, list) == (elm)) { \
+ SLIST_REMOVE_HEAD(CHN_HEAD(holder, list), CHN_LINK(list)); \
+ } else { \
+ struct pcm_channel *t = NULL; \
+ CHN_FOREACH(t, holder, list) { \
+ if (CHN_NEXT(t, list) == (elm)) { \
+ SLIST_REMOVE_AFTER(t, CHN_LINK(list)); \
+ break; \
+ } \
+ } \
+ } \
} while (0)
#define CHN_INSERT_SORT(w, x, y, z) do { \
struct pcm_channel *t, *a = NULL; \
CHN_FOREACH(t, x, z) { \
- if ((y)->type w t->type) \
+ if (((y)->type w t->type) || \
+ (((y)->type == t->type) && ((y)->unit w t->unit))) \
a = t; \
else \
break; \
@@ -236,7 +245,7 @@ struct pcm_channel {
CHN_INSERT_HEAD(x, y, z); \
} while (0)
-#define CHN_INSERT_SORT_ASCEND(x, y, z) CHN_INSERT_SORT(>=, x, y, z)
+#define CHN_INSERT_SORT_ASCEND(x, y, z) CHN_INSERT_SORT(>, x, y, z)
#define CHN_INSERT_SORT_DESCEND(x, y, z) CHN_INSERT_SORT(<, x, y, z)
#define CHN_BUF_PARENT(x, y) \
@@ -244,11 +253,6 @@ struct pcm_channel {
(x)->parentchannel->bufhard != NULL) ? \
(x)->parentchannel->bufhard : (y))
-#define CHN_BROADCAST(x) do { \
- if ((x)->cv_waiters != 0) \
- cv_broadcastpri(x, PRIBIO); \
-} while (0)
-
#include "channel_if.h"
int chn_reinit(struct pcm_channel *c);
@@ -259,11 +263,12 @@ int chn_sync(struct pcm_channel *c, int threshold);
int chn_flush(struct pcm_channel *c);
int chn_poll(struct pcm_channel *c, int ev, struct thread *td);
-int chn_init(struct pcm_channel *c, void *devinfo, int dir, int direction);
+char *chn_mkname(char *buf, size_t len, struct pcm_channel *c);
+struct pcm_channel *chn_init(struct snddev_info *d, struct pcm_channel *parent,
+ kobj_class_t cls, int dir, void *devinfo);
void chn_kill(struct pcm_channel *c);
void chn_shutdown(struct pcm_channel *c);
int chn_release(struct pcm_channel *c);
-int chn_ref(struct pcm_channel *c, int ref);
int chn_reset(struct pcm_channel *c, u_int32_t fmt, u_int32_t spd);
int chn_setvolume_multi(struct pcm_channel *c, int vc, int left, int right,
int center);
@@ -321,6 +326,8 @@ int chn_getpeaks(struct pcm_channel *c, int *lpeak, int *rpeak);
#define CHN_LOCKASSERT(c) mtx_assert((c)->lock, MA_OWNED)
#define CHN_UNLOCKASSERT(c) mtx_assert((c)->lock, MA_NOTOWNED)
+#define CHN_BROADCAST(x) cv_broadcastpri(x, PRIBIO)
+
int snd_fmtvalid(uint32_t fmt, uint32_t *fmtlist);
uint32_t snd_str2afmt(const char *);
@@ -333,10 +340,12 @@ extern int chn_latency_profile;
extern int report_soft_formats;
extern int report_soft_matrix;
-#define PCMDIR_PLAY 1
-#define PCMDIR_PLAY_VIRTUAL 2
-#define PCMDIR_REC -1
-#define PCMDIR_REC_VIRTUAL -2
+enum {
+ PCMDIR_PLAY = 1,
+ PCMDIR_PLAY_VIRTUAL,
+ PCMDIR_REC,
+ PCMDIR_REC_VIRTUAL,
+};
#define PCMTRIG_START 1
#define PCMTRIG_EMLDMAWR 2
@@ -353,7 +362,7 @@ extern int report_soft_matrix;
#define CHN_F_RUNNING 0x00000004 /* dma is running */
#define CHN_F_TRIGGERED 0x00000008
#define CHN_F_NOTRIGGER 0x00000010
-#define CHN_F_SLEEPING 0x00000020
+/* unused 0x00000020 */
#define CHN_F_NBIO 0x00000040 /* do non-blocking i/o */
#define CHN_F_MMAP 0x00000080 /* has been mmap()ed */
@@ -361,7 +370,7 @@ extern int report_soft_matrix;
#define CHN_F_BUSY 0x00000100 /* has been opened */
#define CHN_F_DIRTY 0x00000200 /* need re-config */
#define CHN_F_DEAD 0x00000400 /* too many errors, dead, mdk */
-#define CHN_F_SILENCE 0x00000800 /* silence, nil, null, yada */
+/* unused 0x00000800 */
#define CHN_F_HAS_SIZE 0x00001000 /* user set block size */
#define CHN_F_HAS_VCHAN 0x00002000 /* vchan master */
@@ -381,13 +390,13 @@ extern int report_soft_matrix;
"\003RUNNING" \
"\004TRIGGERED" \
"\005NOTRIGGER" \
- "\006SLEEPING" \
+ /* \006 */ \
"\007NBIO" \
"\010MMAP" \
"\011BUSY" \
"\012DIRTY" \
"\013DEAD" \
- "\014SILENCE" \
+ /* \014 */ \
"\015HAS_SIZE" \
"\016HAS_VCHAN" \
"\017VCHAN_PASSTHROUGH" \
@@ -399,7 +408,7 @@ extern int report_soft_matrix;
#define CHN_F_RESET (CHN_F_BUSY | CHN_F_DEAD | \
CHN_F_VIRTUAL | CHN_F_HAS_VCHAN | \
- CHN_F_VCHAN_DYNAMIC | \
+ CHN_F_VCHAN_DYNAMIC | CHN_F_NBIO | \
CHN_F_PASSTHROUGH | CHN_F_EXCLUSIVE)
#define CHN_F_MMAP_INVALID (CHN_F_DEAD | CHN_F_RUNNING)
diff --git a/sys/dev/sound/pcm/dsp.c b/sys/dev/sound/pcm/dsp.c
index 9c31fff7e8cd..fe5576baf017 100644
--- a/sys/dev/sound/pcm/dsp.c
+++ b/sys/dev/sound/pcm/dsp.c
@@ -5,7 +5,7 @@
* Portions Copyright (c) Ryan Beasley <ryan.beasley@gmail.com> - GSoC 2006
* Copyright (c) 1999 Cameron Grant <cg@FreeBSD.org>
* All rights reserved.
- * Copyright (c) 2024 The FreeBSD Foundation
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
*
* Portions of this software were developed by Christos Margiolis
* <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
@@ -37,6 +37,7 @@
#endif
#include <dev/sound/pcm/sound.h>
+#include <dev/sound/pcm/vchan.h>
#include <sys/ctype.h>
#include <sys/lock.h>
#include <sys/rwlock.h>
@@ -51,8 +52,6 @@ struct dsp_cdevpriv {
struct snddev_info *sc;
struct pcm_channel *rdch;
struct pcm_channel *wrch;
- struct pcm_channel *volch;
- int simplex;
};
static int dsp_mmap_allow_prot_exec = 0;
@@ -67,6 +66,12 @@ SYSCTL_INT(_hw_snd, OID_AUTO, basename_clone, CTLFLAG_RWTUN,
#define DSP_REGISTERED(x) (PCM_REGISTERED(x) && (x)->dsp_dev != NULL)
+#define DSP_F_VALID(x) ((x) & (FREAD | FWRITE))
+#define DSP_F_DUPLEX(x) (((x) & (FREAD | FWRITE)) == (FREAD | FWRITE))
+#define DSP_F_SIMPLEX(x) (!DSP_F_DUPLEX(x))
+#define DSP_F_READ(x) ((x) & FREAD)
+#define DSP_F_WRITE(x) ((x) & FWRITE)
+
#define OLDPCM_IOCTL
static d_open_t dsp_open;
@@ -138,147 +143,116 @@ dsp_destroy_dev(device_t dev)
struct snddev_info *d;
d = device_get_softc(dev);
- destroy_dev_sched(d->dsp_dev);
+ destroy_dev(d->dsp_dev);
}
static void
-getchns(struct dsp_cdevpriv *priv, uint32_t prio)
+dsp_lock_chans(struct dsp_cdevpriv *priv, uint32_t prio)
{
- struct snddev_info *d;
- struct pcm_channel *ch;
- uint32_t flags;
-
- if (priv->simplex) {
- d = priv->sc;
- if (!PCM_REGISTERED(d))
- return;
- PCM_LOCK(d);
- PCM_WAIT(d);
- PCM_ACQUIRE(d);
- /*
- * Note: order is important -
- * pcm flags -> prio query flags -> wild guess
- */
- ch = NULL;
- flags = pcm_getflags(d->dev);
- if (flags & SD_F_PRIO_WR) {
- ch = priv->rdch;
- } else if (flags & SD_F_PRIO_RD) {
- ch = priv->wrch;
- } else if (prio & SD_F_PRIO_WR) {
- ch = priv->rdch;
- flags |= SD_F_PRIO_WR;
- } else if (prio & SD_F_PRIO_RD) {
- ch = priv->wrch;
- flags |= SD_F_PRIO_RD;
- } else if (priv->wrch != NULL) {
- ch = priv->rdch;
- flags |= SD_F_PRIO_WR;
- } else if (priv->rdch != NULL) {
- ch = priv->wrch;
- flags |= SD_F_PRIO_RD;
- }
- pcm_setflags(d->dev, flags);
- if (ch != NULL) {
- CHN_LOCK(ch);
- chn_ref(ch, -1);
- chn_release(ch);
- }
- PCM_RELEASE(d);
- PCM_UNLOCK(d);
- }
-
- if (priv->rdch != NULL && (prio & SD_F_PRIO_RD))
+ if (priv->rdch != NULL && DSP_F_READ(prio))
CHN_LOCK(priv->rdch);
- if (priv->wrch != NULL && (prio & SD_F_PRIO_WR))
+ if (priv->wrch != NULL && DSP_F_WRITE(prio))
CHN_LOCK(priv->wrch);
}
static void
-relchns(struct dsp_cdevpriv *priv, uint32_t prio)
+dsp_unlock_chans(struct dsp_cdevpriv *priv, uint32_t prio)
{
- if (priv->rdch != NULL && (prio & SD_F_PRIO_RD))
+ if (priv->rdch != NULL && DSP_F_READ(prio))
CHN_UNLOCK(priv->rdch);
- if (priv->wrch != NULL && (prio & SD_F_PRIO_WR))
+ if (priv->wrch != NULL && DSP_F_WRITE(prio))
CHN_UNLOCK(priv->wrch);
}
-/* duplex / simplex cdev type */
-enum {
- DSP_CDEV_TYPE_RDONLY, /* simplex read-only (record) */
- DSP_CDEV_TYPE_WRONLY, /* simplex write-only (play) */
- DSP_CDEV_TYPE_RDWR /* duplex read, write, or both */
-};
+static int
+dsp_chn_alloc(struct snddev_info *d, struct pcm_channel **ch, int direction,
+ int flags, struct thread *td)
+{
+ struct pcm_channel *c;
+ char *comm;
+ pid_t pid;
+ int err;
+ bool vdir_enabled;
-#define DSP_F_VALID(x) ((x) & (FREAD | FWRITE))
-#define DSP_F_DUPLEX(x) (((x) & (FREAD | FWRITE)) == (FREAD | FWRITE))
-#define DSP_F_SIMPLEX(x) (!DSP_F_DUPLEX(x))
-#define DSP_F_READ(x) ((x) & FREAD)
-#define DSP_F_WRITE(x) ((x) & FWRITE)
+ KASSERT(d != NULL && ch != NULL &&
+ (direction == PCMDIR_PLAY || direction == PCMDIR_REC),
+ ("%s(): invalid d=%p ch=%p direction=%d",
+ __func__, d, ch, direction));
+ PCM_BUSYASSERT(d);
-static const struct {
- int type;
- char *name;
- char *sep;
- char *alias;
- int use_sep;
- int hw;
- int max;
- int volctl;
- uint32_t fmt, spd;
- int query;
-} dsp_cdevs[] = {
- { SND_DEV_DSP, "dsp", ".", NULL, 0, 0, 0, 0,
- SND_FORMAT(AFMT_U8, 1, 0), DSP_DEFAULT_SPEED,
- DSP_CDEV_TYPE_RDWR },
- { SND_DEV_AUDIO, "audio", ".", NULL, 0, 0, 0, 0,
- SND_FORMAT(AFMT_MU_LAW, 1, 0), DSP_DEFAULT_SPEED,
- DSP_CDEV_TYPE_RDWR },
- { SND_DEV_DSP16, "dspW", ".", NULL, 0, 0, 0, 0,
- SND_FORMAT(AFMT_S16_LE, 1, 0), DSP_DEFAULT_SPEED,
- DSP_CDEV_TYPE_RDWR },
- { SND_DEV_DSPHW_PLAY, "dsp", ".p", NULL, 1, 1, SND_MAXHWCHAN, 1,
- SND_FORMAT(AFMT_S16_LE, 2, 0), 48000, DSP_CDEV_TYPE_WRONLY },
- { SND_DEV_DSPHW_VPLAY, "dsp", ".vp", NULL, 1, 1, SND_MAXVCHANS, 1,
- SND_FORMAT(AFMT_S16_LE, 2, 0), 48000, DSP_CDEV_TYPE_WRONLY },
- { SND_DEV_DSPHW_REC, "dsp", ".r", NULL, 1, 1, SND_MAXHWCHAN, 1,
- SND_FORMAT(AFMT_S16_LE, 2, 0), 48000, DSP_CDEV_TYPE_RDONLY },
- { SND_DEV_DSPHW_VREC, "dsp", ".vr", NULL, 1, 1, SND_MAXVCHANS, 1,
- SND_FORMAT(AFMT_S16_LE, 2, 0), 48000, DSP_CDEV_TYPE_RDONLY },
- { SND_DEV_DSPHW_CD, "dspcd", ".", NULL, 0, 0, 0, 0,
- SND_FORMAT(AFMT_S16_LE, 2, 0), 44100, DSP_CDEV_TYPE_RDWR },
- /* Low priority, OSSv4 aliases. */
- { SND_DEV_DSP, "dsp_ac3", ".", "dsp", 0, 0, 0, 0,
- SND_FORMAT(AFMT_U8, 1, 0), DSP_DEFAULT_SPEED,
- DSP_CDEV_TYPE_RDWR },
- { SND_DEV_DSP, "dsp_mmap", ".", "dsp", 0, 0, 0, 0,
- SND_FORMAT(AFMT_U8, 1, 0), DSP_DEFAULT_SPEED,
- DSP_CDEV_TYPE_RDWR },
- { SND_DEV_DSP, "dsp_multich", ".", "dsp", 0, 0, 0, 0,
- SND_FORMAT(AFMT_U8, 1, 0), DSP_DEFAULT_SPEED,
- DSP_CDEV_TYPE_RDWR },
- { SND_DEV_DSP, "dsp_spdifout", ".", "dsp", 0, 0, 0, 0,
- SND_FORMAT(AFMT_U8, 1, 0), DSP_DEFAULT_SPEED,
- DSP_CDEV_TYPE_RDWR },
- { SND_DEV_DSP, "dsp_spdifin", ".", "dsp", 0, 0, 0, 0,
- SND_FORMAT(AFMT_U8, 1, 0), DSP_DEFAULT_SPEED,
- DSP_CDEV_TYPE_RDWR },
-};
+ pid = td->td_proc->p_pid;
+ comm = td->td_proc->p_comm;
+
+ vdir_enabled = (direction == PCMDIR_PLAY && d->flags & SD_F_PVCHANS) ||
+ (direction == PCMDIR_REC && d->flags & SD_F_RVCHANS);
+
+ *ch = NULL;
+ CHN_FOREACH(c, d, channels.pcm.primary) {
+ CHN_LOCK(c);
+ if (c->direction != direction) {
+ CHN_UNLOCK(c);
+ continue;
+ }
+ /* Find an available primary channel to use. */
+ if ((c->flags & CHN_F_BUSY) == 0 ||
+ (vdir_enabled && (c->flags & CHN_F_HAS_VCHAN)))
+ break;
+ CHN_UNLOCK(c);
+ }
+ if (c == NULL)
+ return (EBUSY);
+
+ /*
+ * We can have the following cases:
+ * - vchans are enabled, add a new vchan to the primary channel.
+ * - vchans are disabled, use the primary channel directly.
+ */
+ if (vdir_enabled && ((c->flags & CHN_F_BUSY) == 0 ||
+ c->flags & CHN_F_HAS_VCHAN)) {
+ err = vchan_create(c, ch);
+ CHN_UNLOCK(c);
+ if (err != 0)
+ return (err);
+ CHN_LOCK(*ch);
+ } else if ((c->flags & CHN_F_BUSY) == 0) {
+ *ch = c;
+ } else {
+ CHN_UNLOCK(c);
+ return (ENODEV);
+ }
+
+ (*ch)->flags |= CHN_F_BUSY;
+ if (flags & O_NONBLOCK)
+ (*ch)->flags |= CHN_F_NBIO;
+ if (flags & O_EXCL)
+ (*ch)->flags |= CHN_F_EXCLUSIVE;
+ (*ch)->pid = pid;
+ strlcpy((*ch)->comm, (comm != NULL) ? comm : CHN_COMM_UNKNOWN,
+ sizeof((*ch)->comm));
+
+ if ((err = chn_reset(*ch, (*ch)->format, (*ch)->speed)) != 0)
+ return (err);
+ chn_vpc_reset(*ch, SND_VOL_C_PCM, 0);
+
+ CHN_UNLOCK(*ch);
+
+ return (0);
+}
static void
dsp_close(void *data)
{
struct dsp_cdevpriv *priv = data;
- struct pcm_channel *rdch, *wrch, *volch;
+ struct pcm_channel *rdch, *wrch, *parent;
struct snddev_info *d;
- int sg_ids, rdref, wdref;
+ int sg_ids;
if (priv == NULL)
return;
d = priv->sc;
/* At this point pcm_unregister() will destroy all channels anyway. */
- if (PCM_DETACHING(d))
+ if (!DSP_REGISTERED(d))
goto skip;
PCM_GIANT_ENTER(d);
@@ -289,22 +263,6 @@ dsp_close(void *data)
rdch = priv->rdch;
wrch = priv->wrch;
- volch = priv->volch;
-
- rdref = -1;
- wdref = -1;
-
- if (volch != NULL) {
- if (volch == rdch)
- rdref--;
- else if (volch == wrch)
- wdref--;
- else {
- CHN_LOCK(volch);
- chn_ref(volch, -1);
- CHN_UNLOCK(volch);
- }
- }
if (rdch != NULL)
CHN_REMOVE(d, rdch, channels.pcm.opened);
@@ -331,13 +289,26 @@ dsp_close(void *data)
if (sg_ids != 0)
free_unr(pcmsg_unrhdr, sg_ids);
+ /*
+ * Go through the channel abort/flush path for both
+ * primary and virtual channels to ensure that, in the
+ * case of vchans, the stream is always properly
+ * stopped, and the primary channels do not keep being
+ * interrupted even if all vchans are gone.
+ */
CHN_LOCK(rdch);
- chn_ref(rdch, rdref);
chn_abort(rdch); /* won't sleep */
rdch->flags &= ~(CHN_F_RUNNING | CHN_F_MMAP |
- CHN_F_DEAD | CHN_F_EXCLUSIVE);
+ CHN_F_DEAD | CHN_F_EXCLUSIVE | CHN_F_NBIO);
chn_reset(rdch, 0, 0);
chn_release(rdch);
+ if (rdch->flags & CHN_F_VIRTUAL) {
+ parent = rdch->parentchannel;
+ CHN_LOCK(parent);
+ CHN_LOCK(rdch);
+ vchan_destroy(rdch);
+ CHN_UNLOCK(parent);
+ }
}
if (wrch != NULL) {
/*
@@ -350,12 +321,18 @@ dsp_close(void *data)
free_unr(pcmsg_unrhdr, sg_ids);
CHN_LOCK(wrch);
- chn_ref(wrch, wdref);
chn_flush(wrch); /* may sleep */
wrch->flags &= ~(CHN_F_RUNNING | CHN_F_MMAP |
- CHN_F_DEAD | CHN_F_EXCLUSIVE);
+ CHN_F_DEAD | CHN_F_EXCLUSIVE | CHN_F_NBIO);
chn_reset(wrch, 0, 0);
chn_release(wrch);
+ if (wrch->flags & CHN_F_VIRTUAL) {
+ parent = wrch->parentchannel;
+ CHN_LOCK(parent);
+ CHN_LOCK(wrch);
+ vchan_destroy(wrch);
+ CHN_UNLOCK(parent);
+ }
}
PCM_LOCK(d);
}
@@ -369,43 +346,27 @@ skip:
priv = NULL;
}
-#define DSP_FIXUP_ERROR() do { \
- prio = pcm_getflags(d->dev); \
- if (!DSP_F_VALID(flags)) \
- error = EINVAL; \
- if (!DSP_F_DUPLEX(flags) && \
- ((DSP_F_READ(flags) && d->reccount == 0) || \
- (DSP_F_WRITE(flags) && d->playcount == 0))) \
- error = ENOTSUP; \
- else if (!DSP_F_DUPLEX(flags) && (prio & SD_F_SIMPLEX) && \
- ((DSP_F_READ(flags) && (prio & SD_F_PRIO_WR)) || \
- (DSP_F_WRITE(flags) && (prio & SD_F_PRIO_RD)))) \
- error = EBUSY; \
-} while (0)
-
static int
dsp_open(struct cdev *i_dev, int flags, int mode, struct thread *td)
{
struct dsp_cdevpriv *priv;
- struct pcm_channel *rdch, *wrch;
+ struct pcm_channel *ch;
struct snddev_info *d;
- uint32_t fmt, spd, prio;
- int error, rderror, wrerror;
+ int error, dir;
/* Kind of impossible.. */
if (i_dev == NULL || td == NULL)
return (ENODEV);
d = i_dev->si_drv1;
- if (PCM_DETACHING(d) || !PCM_REGISTERED(d))
+ if (!DSP_REGISTERED(d))
return (EBADF);
+ if (PCM_CHANCOUNT(d) >= PCM_MAXCHANS)
+ return (ENOMEM);
+
priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO);
priv->sc = d;
- priv->rdch = NULL;
- priv->wrch = NULL;
- priv->volch = NULL;
- priv->simplex = (pcm_getflags(d->dev) & SD_F_SIMPLEX) ? 1 : 0;
error = devfs_set_cdevpriv(priv, dsp_close);
if (error != 0)
@@ -418,7 +379,42 @@ dsp_open(struct cdev *i_dev, int flags, int mode, struct thread *td)
PCM_WAIT(d);
error = 0;
- DSP_FIXUP_ERROR();
+ if (!DSP_F_VALID(flags))
+ error = EINVAL;
+ else if (!DSP_F_DUPLEX(flags) &&
+ ((DSP_F_READ(flags) && d->reccount == 0) ||
+ (DSP_F_WRITE(flags) && d->playcount == 0)))
+ error = ENOTSUP;
+ if (pcm_getflags(d->dev) & SD_F_SIMPLEX) {
+ if (DSP_F_DUPLEX(flags)) {
+ /*
+ * If no channels are opened yet, and we request
+ * DUPLEX, limit to playback only, otherwise open one
+ * channel in a direction that already exists.
+ */
+ if (CHN_EMPTY(d, channels.pcm.opened)) {
+ if (d->playcount > 0)
+ flags &= ~FREAD;
+ else if (d->reccount > 0)
+ flags &= ~FWRITE;
+ } else {
+ ch = CHN_FIRST(d, channels.pcm.opened);
+ if (ch->direction == PCMDIR_PLAY)
+ flags &= ~FREAD;
+ else if (ch->direction == PCMDIR_REC)
+ flags &= ~FWRITE;
+ }
+ } else if (!CHN_EMPTY(d, channels.pcm.opened)) {
+ /*
+ * If we requested SIMPLEX, make sure we do not open a
+ * channel in the opposite direction.
+ */
+ ch = CHN_FIRST(d, channels.pcm.opened);
+ dir = DSP_F_READ(flags) ? PCMDIR_REC : PCMDIR_PLAY;
+ if (ch->direction != dir)
+ error = ENOTSUP;
+ }
+ }
if (error != 0) {
PCM_UNLOCK(d);
PCM_GIANT_EXIT(d);
@@ -433,101 +429,30 @@ dsp_open(struct cdev *i_dev, int flags, int mode, struct thread *td)
PCM_ACQUIRE(d);
PCM_UNLOCK(d);
- fmt = SND_FORMAT(AFMT_U8, 1, 0);
- spd = DSP_DEFAULT_SPEED;
-
- rdch = NULL;
- wrch = NULL;
- rderror = 0;
- wrerror = 0;
-
- if (DSP_F_READ(flags)) {
- /* open for read */
- rderror = pcm_chnalloc(d, &rdch, PCMDIR_REC,
- td->td_proc->p_pid, td->td_proc->p_comm);
-
- if (rderror == 0 && chn_reset(rdch, fmt, spd) != 0)
- rderror = ENXIO;
-
- if (rderror != 0) {
- if (rdch != NULL)
- chn_release(rdch);
- if (!DSP_F_DUPLEX(flags)) {
- PCM_RELEASE_QUICK(d);
- PCM_GIANT_EXIT(d);
- return (rderror);
- }
- rdch = NULL;
- } else {
- if (flags & O_NONBLOCK)
- rdch->flags |= CHN_F_NBIO;
- if (flags & O_EXCL)
- rdch->flags |= CHN_F_EXCLUSIVE;
- chn_ref(rdch, 1);
- chn_vpc_reset(rdch, SND_VOL_C_PCM, 0);
- CHN_UNLOCK(rdch);
- }
- }
-
if (DSP_F_WRITE(flags)) {
- /* open for write */
- wrerror = pcm_chnalloc(d, &wrch, PCMDIR_PLAY,
- td->td_proc->p_pid, td->td_proc->p_comm);
-
- if (wrerror == 0 && chn_reset(wrch, fmt, spd) != 0)
- wrerror = ENXIO;
-
- if (wrerror != 0) {
- if (wrch != NULL)
- chn_release(wrch);
- if (!DSP_F_DUPLEX(flags)) {
- if (rdch != NULL) {
- /*
- * Lock, deref and release previously
- * created record channel
- */
- CHN_LOCK(rdch);
- chn_ref(rdch, -1);
- chn_release(rdch);
- }
- PCM_RELEASE_QUICK(d);
- PCM_GIANT_EXIT(d);
- return (wrerror);
- }
- wrch = NULL;
- } else {
- if (flags & O_NONBLOCK)
- wrch->flags |= CHN_F_NBIO;
- if (flags & O_EXCL)
- wrch->flags |= CHN_F_EXCLUSIVE;
- chn_ref(wrch, 1);
- chn_vpc_reset(wrch, SND_VOL_C_PCM, 0);
- CHN_UNLOCK(wrch);
+ error = dsp_chn_alloc(d, &priv->wrch, PCMDIR_PLAY, flags, td);
+ if (error != 0) {
+ PCM_RELEASE_QUICK(d);
+ PCM_GIANT_EXIT(d);
+ return (error);
}
+ PCM_LOCK(d);
+ CHN_INSERT_HEAD(d, priv->wrch, channels.pcm.opened);
+ PCM_UNLOCK(d);
}
-
- PCM_LOCK(d);
-
- if (wrch == NULL && rdch == NULL) {
- PCM_RELEASE(d);
+ if (DSP_F_READ(flags)) {
+ error = dsp_chn_alloc(d, &priv->rdch, PCMDIR_REC, flags, td);
+ if (error != 0) {
+ PCM_RELEASE_QUICK(d);
+ PCM_GIANT_EXIT(d);
+ return (error);
+ }
+ PCM_LOCK(d);
+ CHN_INSERT_HEAD(d, priv->rdch, channels.pcm.opened);
PCM_UNLOCK(d);
- PCM_GIANT_EXIT(d);
- if (wrerror != 0)
- return (wrerror);
- if (rderror != 0)
- return (rderror);
- return (EINVAL);
}
- if (rdch != NULL)
- CHN_INSERT_HEAD(d, rdch, channels.pcm.opened);
- if (wrch != NULL)
- CHN_INSERT_HEAD(d, wrch, channels.pcm.opened);
- priv->rdch = rdch;
- priv->wrch = wrch;
-
- PCM_RELEASE(d);
- PCM_UNLOCK(d);
+ PCM_RELEASE_QUICK(d);
PCM_GIANT_LEAVE(d);
return (0);
@@ -547,19 +472,19 @@ dsp_io_ops(struct dsp_cdevpriv *priv, struct uio *buf)
("%s(): io train wreck!", __func__));
d = priv->sc;
- if (PCM_DETACHING(d) || !DSP_REGISTERED(d))
+ if (!DSP_REGISTERED(d))
return (EBADF);
PCM_GIANT_ENTER(d);
switch (buf->uio_rw) {
case UIO_READ:
- prio = SD_F_PRIO_RD;
+ prio = FREAD;
ch = &priv->rdch;
chn_io = chn_read;
break;
case UIO_WRITE:
- prio = SD_F_PRIO_WR;
+ prio = FWRITE;
ch = &priv->wrch;
chn_io = chn_write;
break;
@@ -570,18 +495,18 @@ dsp_io_ops(struct dsp_cdevpriv *priv, struct uio *buf)
runpid = buf->uio_td->td_proc->p_pid;
- getchns(priv, prio);
+ dsp_lock_chans(priv, prio);
if (*ch == NULL || !((*ch)->flags & CHN_F_BUSY)) {
if (priv->rdch != NULL || priv->wrch != NULL)
- relchns(priv, prio);
+ dsp_unlock_chans(priv, prio);
PCM_GIANT_EXIT(d);
return (EBADF);
}
if (((*ch)->flags & (CHN_F_MMAP | CHN_F_DEAD)) ||
(((*ch)->flags & CHN_F_RUNNING) && (*ch)->pid != runpid)) {
- relchns(priv, prio);
+ dsp_unlock_chans(priv, prio);
PCM_GIANT_EXIT(d);
return (EINVAL);
} else if (!((*ch)->flags & CHN_F_RUNNING)) {
@@ -600,7 +525,7 @@ dsp_io_ops(struct dsp_cdevpriv *priv, struct uio *buf)
CHN_BROADCAST(&(*ch)->cv);
- relchns(priv, prio);
+ dsp_unlock_chans(priv, prio);
PCM_GIANT_LEAVE(d);
@@ -630,7 +555,7 @@ dsp_write(struct cdev *i_dev, struct uio *buf, int flag)
}
static int
-dsp_ioctl_channel(struct dsp_cdevpriv *priv, struct pcm_channel *volch,
+dsp_ioctl_channel(struct dsp_cdevpriv *priv, struct pcm_channel *ch,
u_long cmd, caddr_t arg)
{
struct snddev_info *d;
@@ -648,25 +573,19 @@ dsp_ioctl_channel(struct dsp_cdevpriv *priv, struct pcm_channel *volch,
rdch = priv->rdch;
wrch = priv->wrch;
- /* No specific channel, look into cache */
- if (volch == NULL)
- volch = priv->volch;
-
- /* Look harder */
- if (volch == NULL) {
+ if (ch == NULL) {
if (j == SOUND_MIXER_RECLEV && rdch != NULL)
- volch = rdch;
+ ch = rdch;
else if (j == SOUND_MIXER_PCM && wrch != NULL)
- volch = wrch;
+ ch = wrch;
}
- /* Final validation */
- if (volch == NULL)
+ if (ch == NULL)
return (EINVAL);
- CHN_LOCK(volch);
- if (!(volch->feederflags & (1 << FEEDER_VOLUME))) {
- CHN_UNLOCK(volch);
+ CHN_LOCK(ch);
+ if (!(ch->feederflags & (1 << FEEDER_VOLUME))) {
+ CHN_UNLOCK(ch);
return (EINVAL);
}
@@ -674,28 +593,28 @@ dsp_ioctl_channel(struct dsp_cdevpriv *priv, struct pcm_channel *volch,
case MIXER_WRITE(0):
switch (j) {
case SOUND_MIXER_MUTE:
- if (volch->direction == PCMDIR_REC) {
- chn_setmute_multi(volch, SND_VOL_C_PCM, (*(int *)arg & SOUND_MASK_RECLEV) != 0);
+ if (ch->direction == PCMDIR_REC) {
+ chn_setmute_multi(ch, SND_VOL_C_PCM, (*(int *)arg & SOUND_MASK_RECLEV) != 0);
} else {
- chn_setmute_multi(volch, SND_VOL_C_PCM, (*(int *)arg & SOUND_MASK_PCM) != 0);
+ chn_setmute_multi(ch, SND_VOL_C_PCM, (*(int *)arg & SOUND_MASK_PCM) != 0);
}
break;
case SOUND_MIXER_PCM:
- if (volch->direction != PCMDIR_PLAY)
+ if (ch->direction != PCMDIR_PLAY)
break;
left = *(int *)arg & 0x7f;
right = ((*(int *)arg) >> 8) & 0x7f;
center = (left + right) >> 1;
- chn_setvolume_multi(volch, SND_VOL_C_PCM,
+ chn_setvolume_multi(ch, SND_VOL_C_PCM,
left, right, center);
break;
case SOUND_MIXER_RECLEV:
- if (volch->direction != PCMDIR_REC)
+ if (ch->direction != PCMDIR_REC)
break;
left = *(int *)arg & 0x7f;
right = ((*(int *)arg) >> 8) & 0x7f;
center = (left + right) >> 1;
- chn_setvolume_multi(volch, SND_VOL_C_PCM,
+ chn_setvolume_multi(ch, SND_VOL_C_PCM,
left, right, center);
break;
default:
@@ -707,34 +626,34 @@ dsp_ioctl_channel(struct dsp_cdevpriv *priv, struct pcm_channel *volch,
case MIXER_READ(0):
switch (j) {
case SOUND_MIXER_MUTE:
- mute = CHN_GETMUTE(volch, SND_VOL_C_PCM, SND_CHN_T_FL) ||
- CHN_GETMUTE(volch, SND_VOL_C_PCM, SND_CHN_T_FR);
- if (volch->direction == PCMDIR_REC) {
+ mute = CHN_GETMUTE(ch, SND_VOL_C_PCM, SND_CHN_T_FL) ||
+ CHN_GETMUTE(ch, SND_VOL_C_PCM, SND_CHN_T_FR);
+ if (ch->direction == PCMDIR_REC) {
*(int *)arg = mute << SOUND_MIXER_RECLEV;
} else {
*(int *)arg = mute << SOUND_MIXER_PCM;
}
break;
case SOUND_MIXER_PCM:
- if (volch->direction != PCMDIR_PLAY)
+ if (ch->direction != PCMDIR_PLAY)
break;
- *(int *)arg = CHN_GETVOLUME(volch,
+ *(int *)arg = CHN_GETVOLUME(ch,
SND_VOL_C_PCM, SND_CHN_T_FL);
- *(int *)arg |= CHN_GETVOLUME(volch,
+ *(int *)arg |= CHN_GETVOLUME(ch,
SND_VOL_C_PCM, SND_CHN_T_FR) << 8;
break;
case SOUND_MIXER_RECLEV:
- if (volch->direction != PCMDIR_REC)
+ if (ch->direction != PCMDIR_REC)
break;
- *(int *)arg = CHN_GETVOLUME(volch,
+ *(int *)arg = CHN_GETVOLUME(ch,
SND_VOL_C_PCM, SND_CHN_T_FL);
- *(int *)arg |= CHN_GETVOLUME(volch,
+ *(int *)arg |= CHN_GETVOLUME(ch,
SND_VOL_C_PCM, SND_CHN_T_FR) << 8;
break;
case SOUND_MIXER_DEVMASK:
case SOUND_MIXER_CAPS:
case SOUND_MIXER_STEREODEVS:
- if (volch->direction == PCMDIR_REC)
+ if (ch->direction == PCMDIR_REC)
*(int *)arg = SOUND_MASK_RECLEV;
else
*(int *)arg = SOUND_MASK_PCM;
@@ -748,10 +667,47 @@ dsp_ioctl_channel(struct dsp_cdevpriv *priv, struct pcm_channel *volch,
default:
break;
}
- CHN_UNLOCK(volch);
+ CHN_UNLOCK(ch);
return (0);
}
+#ifdef COMPAT_FREEBSD32
+typedef struct _snd_chan_param32 {
+ uint32_t play_rate;
+ uint32_t rec_rate;
+ uint32_t play_format;
+ uint32_t rec_format;
+} snd_chan_param32;
+#define AIOGFMT32 _IOC_NEWTYPE(AIOGFMT, snd_chan_param32)
+#define AIOSFMT32 _IOC_NEWTYPE(AIOSFMT, snd_chan_param32)
+
+typedef struct _snd_capabilities32 {
+ uint32_t rate_min, rate_max;
+ uint32_t formats;
+ uint32_t bufsize;
+ uint32_t mixers;
+ uint32_t inputs;
+ uint16_t left, right;
+} snd_capabilities32;
+#define AIOGCAP32 _IOC_NEWTYPE(AIOGCAP, snd_capabilities32)
+
+typedef struct audio_errinfo32
+{
+ int32_t play_underruns;
+ int32_t rec_overruns;
+ uint32_t play_ptradjust;
+ uint32_t rec_ptradjust;
+ int32_t play_errorcount;
+ int32_t rec_errorcount;
+ int32_t play_lasterror;
+ int32_t rec_lasterror;
+ int32_t play_errorparm;
+ int32_t rec_errorparm;
+ int32_t filler[16];
+} audio_errinfo32;
+#define SNDCTL_DSP_GETERROR32 _IOC_NEWTYPE(SNDCTL_DSP_GETERROR, audio_errinfo32)
+#endif
+
static int
dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
struct thread *td)
@@ -766,7 +722,7 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
return (err);
d = priv->sc;
- if (PCM_DETACHING(d) || !DSP_REGISTERED(d))
+ if (!DSP_REGISTERED(d))
return (EBADF);
PCM_GIANT_ENTER(d);
@@ -782,7 +738,7 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
PCM_GIANT_EXIT(d);
return (0);
}
- ret = dsp_ioctl_channel(priv, priv->volch, cmd, arg);
+ ret = dsp_ioctl_channel(priv, NULL, cmd, arg);
if (ret != -1) {
PCM_GIANT_EXIT(d);
return (ret);
@@ -815,9 +771,15 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
ret = sound_oss_card_info((oss_card_info *)arg);
break;
case SNDCTL_AUDIOINFO:
+ ret = dsp_oss_audioinfo(i_dev, (oss_audioinfo *)arg,
+ false);
+ break;
case SNDCTL_AUDIOINFO_EX:
+ ret = dsp_oss_audioinfo(i_dev, (oss_audioinfo *)arg,
+ true);
+ break;
case SNDCTL_ENGINEINFO:
- ret = dsp_oss_audioinfo(i_dev, (oss_audioinfo *)arg);
+ ret = dsp_oss_engineinfo(i_dev, (oss_audioinfo *)arg);
break;
case SNDCTL_MIXERINFO:
ret = mixer_oss_mixerinfo(i_dev, (oss_mixerinfo *)arg);
@@ -830,7 +792,6 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
return (ret);
}
- getchns(priv, 0);
rdch = priv->rdch;
wrch = priv->wrch;
@@ -905,9 +866,25 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
case AIOSFMT:
case AIOGFMT:
+#ifdef COMPAT_FREEBSD32
+ case AIOSFMT32:
+ case AIOGFMT32:
+#endif
{
snd_chan_param *p = (snd_chan_param *)arg;
+#ifdef COMPAT_FREEBSD32
+ snd_chan_param32 *p32 = (snd_chan_param32 *)arg;
+ snd_chan_param param;
+
+ if (cmd == AIOSFMT32) {
+ p = &param;
+ p->play_rate = p32->play_rate;
+ p->rec_rate = p32->rec_rate;
+ p->play_format = p32->play_format;
+ p->rec_format = p32->rec_format;
+ }
+#endif
if (cmd == AIOSFMT &&
((p->play_format != 0 && p->play_rate == 0) ||
(p->rec_format != 0 && p->rec_rate == 0))) {
@@ -948,15 +925,41 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
p->rec_format = 0;
}
PCM_RELEASE_QUICK(d);
+#ifdef COMPAT_FREEBSD32
+ if (cmd == AIOSFMT32 || cmd == AIOGFMT32) {
+ p32->play_rate = p->play_rate;
+ p32->rec_rate = p->rec_rate;
+ p32->play_format = p->play_format;
+ p32->rec_format = p->rec_format;
+ }
+#endif
}
break;
case AIOGCAP: /* get capabilities */
+#ifdef COMPAT_FREEBSD32
+ case AIOGCAP32:
+#endif
{
snd_capabilities *p = (snd_capabilities *)arg;
struct pcmchan_caps *pcaps = NULL, *rcaps = NULL;
struct cdev *pdev;
-
+#ifdef COMPAT_FREEBSD32
+ snd_capabilities32 *p32 = (snd_capabilities32 *)arg;
+ snd_capabilities capabilities;
+
+ if (cmd == AIOGCAP32) {
+ p = &capabilities;
+ p->rate_min = p32->rate_min;
+ p->rate_max = p32->rate_max;
+ p->formats = p32->formats;
+ p->bufsize = p32->bufsize;
+ p->mixers = p32->mixers;
+ p->inputs = p32->inputs;
+ p->left = p32->left;
+ p->right = p32->right;
+ }
+#endif
PCM_LOCK(d);
if (rdch) {
CHN_LOCK(rdch);
@@ -989,6 +992,18 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
if (rdch)
CHN_UNLOCK(rdch);
PCM_UNLOCK(d);
+#ifdef COMPAT_FREEBSD32
+ if (cmd == AIOGCAP32) {
+ p32->rate_min = p->rate_min;
+ p32->rate_max = p->rate_max;
+ p32->formats = p->formats;
+ p32->bufsize = p->bufsize;
+ p32->mixers = p->mixers;
+ p32->inputs = p->inputs;
+ p32->left = p->left;
+ p32->right = p->right;
+ }
+#endif
}
break;
@@ -1354,7 +1369,6 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
struct snd_dbuf *bs = wrch->bufsoft;
CHN_LOCK(wrch);
- /* XXX abusive DMA update: chn_wrupdate(wrch); */
a->bytes = sndbuf_getfree(bs);
a->fragments = a->bytes / sndbuf_getblksz(bs);
a->fragstotal = sndbuf_getblkcnt(bs);
@@ -1372,7 +1386,6 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
struct snd_dbuf *bs = rdch->bufsoft;
CHN_LOCK(rdch);
- /* XXX abusive DMA update: chn_rdupdate(rdch); */
a->bytes = sndbuf_gettotal(bs);
a->blocks = sndbuf_getblocks(bs) - rdch->blocks;
a->ptr = sndbuf_getfreeptr(bs);
@@ -1390,7 +1403,6 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
struct snd_dbuf *bs = wrch->bufsoft;
CHN_LOCK(wrch);
- /* XXX abusive DMA update: chn_wrupdate(wrch); */
a->bytes = sndbuf_gettotal(bs);
a->blocks = sndbuf_getblocks(bs) - wrch->blocks;
a->ptr = sndbuf_getreadyptr(bs);
@@ -1482,7 +1494,6 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
struct snd_dbuf *bs = wrch->bufsoft;
CHN_LOCK(wrch);
- /* XXX abusive DMA update: chn_wrupdate(wrch); */
*arg_i = sndbuf_getready(bs);
CHN_UNLOCK(wrch);
} else
@@ -1679,14 +1690,8 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
CHN_LOCK(chn);
bs = chn->bufsoft;
-#if 0
- tmp = (sndbuf_getsize(b) + chn_getptr(chn) - sndbuf_gethwptr(b)) % sndbuf_getsize(b);
- oc->samples = (sndbuf_gettotal(b) + tmp) / sndbuf_getalign(b);
- oc->fifo_samples = (sndbuf_getready(b) - tmp) / sndbuf_getalign(b);
-#else
oc->samples = sndbuf_gettotal(bs) / sndbuf_getalign(bs);
oc->fifo_samples = sndbuf_getready(bs) / sndbuf_getalign(bs);
-#endif
CHN_UNLOCK(chn);
}
break;
@@ -1721,6 +1726,9 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
break;
case SNDCTL_DSP_GETERROR:
+#ifdef COMPAT_FREEBSD32
+ case SNDCTL_DSP_GETERROR32:
+#endif
/*
* OSSv4 docs: "All errors and counters will automatically be
* cleared to zeroes after the call so each call will return only
@@ -1730,6 +1738,14 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
*/
{
audio_errinfo *ei = (audio_errinfo *)arg;
+#ifdef COMPAT_FREEBSD32
+ audio_errinfo errinfo;
+ audio_errinfo32 *ei32 = (audio_errinfo32 *)arg;
+
+ if (cmd == SNDCTL_DSP_GETERROR32) {
+ ei = &errinfo;
+ }
+#endif
bzero((void *)ei, sizeof(*ei));
@@ -1745,6 +1761,21 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
rdch->xruns = 0;
CHN_UNLOCK(rdch);
}
+#ifdef COMPAT_FREEBSD32
+ if (cmd == SNDCTL_DSP_GETERROR32) {
+ bzero((void *)ei32, sizeof(*ei32));
+ ei32->play_underruns = ei->play_underruns;
+ ei32->rec_overruns = ei->rec_overruns;
+ ei32->play_ptradjust = ei->play_ptradjust;
+ ei32->rec_ptradjust = ei->rec_ptradjust;
+ ei32->play_errorcount = ei->play_errorcount;
+ ei32->rec_errorcount = ei->rec_errorcount;
+ ei32->play_lasterror = ei->play_lasterror;
+ ei32->rec_lasterror = ei->rec_lasterror;
+ ei32->play_errorparm = ei->play_errorparm;
+ ei32->rec_errorparm = ei->rec_errorparm;
+ }
+#endif
}
break;
@@ -1835,18 +1866,6 @@ dsp_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
case SNDCTL_SETNAME:
ret = dsp_oss_setname(wrch, rdch, (oss_longname_t *)arg);
break;
-#if 0
- /**
- * @note The S/PDIF interface ioctls, @c SNDCTL_DSP_READCTL and
- * @c SNDCTL_DSP_WRITECTL have been omitted at the suggestion of
- * 4Front Technologies.
- */
- case SNDCTL_DSP_READCTL:
- case SNDCTL_DSP_WRITECTL:
- ret = EINVAL;
- break;
-#endif /* !0 (explicitly omitted ioctls) */
-
#endif /* !OSSV4_EXPERIMENT */
case SNDCTL_DSP_MAPINBUF:
case SNDCTL_DSP_MAPOUTBUF:
@@ -1880,7 +1899,7 @@ dsp_poll(struct cdev *i_dev, int events, struct thread *td)
if ((err = devfs_get_cdevpriv((void **)&priv)) != 0)
return (err);
d = priv->sc;
- if (PCM_DETACHING(d) || !DSP_REGISTERED(d)) {
+ if (!DSP_REGISTERED(d)) {
/* XXX many clients don't understand POLLNVAL */
return (events & (POLLHUP | POLLPRI | POLLIN |
POLLRDNORM | POLLOUT | POLLWRNORM));
@@ -1889,7 +1908,7 @@ dsp_poll(struct cdev *i_dev, int events, struct thread *td)
ret = 0;
- getchns(priv, SD_F_PRIO_RD | SD_F_PRIO_WR);
+ dsp_lock_chans(priv, FREAD | FWRITE);
wrch = priv->wrch;
rdch = priv->rdch;
@@ -1905,7 +1924,7 @@ dsp_poll(struct cdev *i_dev, int events, struct thread *td)
ret |= chn_poll(rdch, e, td);
}
- relchns(priv, SD_F_PRIO_RD | SD_F_PRIO_WR);
+ dsp_unlock_chans(priv, FREAD | FWRITE);
PCM_GIANT_LEAVE(d);
@@ -1962,12 +1981,12 @@ dsp_mmap_single(struct cdev *i_dev, vm_ooffset_t *offset,
if ((err = devfs_get_cdevpriv((void **)&priv)) != 0)
return (err);
d = priv->sc;
- if (PCM_DETACHING(d) || !DSP_REGISTERED(d))
+ if (!DSP_REGISTERED(d))
return (EINVAL);
PCM_GIANT_ENTER(d);
- getchns(priv, SD_F_PRIO_RD | SD_F_PRIO_WR);
+ dsp_lock_chans(priv, FREAD | FWRITE);
wrch = priv->wrch;
rdch = priv->rdch;
@@ -1976,7 +1995,7 @@ dsp_mmap_single(struct cdev *i_dev, vm_ooffset_t *offset,
(*offset + size) > sndbuf_getallocsize(c->bufsoft) ||
(wrch != NULL && (wrch->flags & CHN_F_MMAP_INVALID)) ||
(rdch != NULL && (rdch->flags & CHN_F_MMAP_INVALID))) {
- relchns(priv, SD_F_PRIO_RD | SD_F_PRIO_WR);
+ dsp_unlock_chans(priv, FREAD | FWRITE);
PCM_GIANT_EXIT(d);
return (EINVAL);
}
@@ -1987,7 +2006,7 @@ dsp_mmap_single(struct cdev *i_dev, vm_ooffset_t *offset,
rdch->flags |= CHN_F_MMAP;
*offset = (uintptr_t)sndbuf_getbufofs(c->bufsoft, *offset);
- relchns(priv, SD_F_PRIO_RD | SD_F_PRIO_WR);
+ dsp_unlock_chans(priv, FREAD | FWRITE);
*object = vm_pager_allocate(OBJT_DEVICE, i_dev,
size, nprot, *offset, curthread->td_ucred);
@@ -1998,20 +2017,27 @@ dsp_mmap_single(struct cdev *i_dev, vm_ooffset_t *offset,
return (0);
}
+static const char *dsp_aliases[] = {
+ "dsp_ac3",
+ "dsp_mmap",
+ "dsp_multich",
+ "dsp_spdifout",
+ "dsp_spdifin",
+};
+
static void
dsp_clone(void *arg, struct ucred *cred, char *name, int namelen,
struct cdev **dev)
{
struct snddev_info *d;
- int i;
+ size_t i;
if (*dev != NULL)
return;
if (strcmp(name, "dsp") == 0 && dsp_basename_clone)
goto found;
- for (i = 0; i < nitems(dsp_cdevs); i++) {
- if (dsp_cdevs[i].alias != NULL &&
- strcmp(name, dsp_cdevs[i].name) == 0)
+ for (i = 0; i < nitems(dsp_aliases); i++) {
+ if (strcmp(name, dsp_aliases[i]) == 0)
goto found;
}
return;
@@ -2024,7 +2050,7 @@ found:
* have returned already, meaning it will have set snd_unit to -1, and
* thus devclass_get_softc() will return NULL here.
*/
- if (d != NULL && PCM_REGISTERED(d) && d->dsp_dev != NULL) {
+ if (DSP_REGISTERED(d)) {
*dev = d->dsp_dev;
dev_ref(*dev);
}
@@ -2051,28 +2077,189 @@ dsp_sysuninit(void *p)
SYSINIT(dsp_sysinit, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, dsp_sysinit, NULL);
SYSUNINIT(dsp_sysuninit, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, dsp_sysuninit, NULL);
-char *
-dsp_unit2name(char *buf, size_t len, struct pcm_channel *ch)
+static void
+dsp_oss_audioinfo_unavail(oss_audioinfo *ai, int unit)
{
- int i;
+ bzero(ai, sizeof(*ai));
+ ai->dev = unit;
+ snprintf(ai->name, sizeof(ai->name), "pcm%d (unavailable)", unit);
+ ai->pid = -1;
+ strlcpy(ai->cmd, CHN_COMM_UNUSED, sizeof(ai->cmd));
+ ai->card_number = unit;
+ ai->port_number = unit;
+ ai->mixer_dev = -1;
+ ai->legacy_device = unit;
+}
- KASSERT(buf != NULL && len != 0,
- ("bogus buf=%p len=%ju", buf, (uintmax_t)len));
+/**
+ * @brief Handler for SNDCTL_AUDIOINFO.
+ *
+ * Gathers information about the audio device specified in ai->dev. If
+ * ai->dev == -1, then this function gathers information about the current
+ * device. If the call comes in on a non-audio device and ai->dev == -1,
+ * return EINVAL.
+ *
+ * This routine is supposed to go practically straight to the hardware,
+ * getting capabilities directly from the sound card driver, side-stepping
+ * the intermediate channel interface.
+ *
+ * @note
+ * Calling threads must not hold any snddev_info or pcm_channel locks.
+ *
+ * @param dev device on which the ioctl was issued
+ * @param ai ioctl request data container
+ * @param ex flag to distinguish between SNDCTL_AUDIOINFO from
+ * SNDCTL_AUDIOINFO_EX
+ *
+ * @retval 0 success
+ * @retval EINVAL ai->dev specifies an invalid device
+ */
+int
+dsp_oss_audioinfo(struct cdev *i_dev, oss_audioinfo *ai, bool ex)
+{
+ struct pcmchan_caps *caps;
+ struct pcm_channel *ch;
+ struct snddev_info *d;
+ uint32_t fmts;
+ int i, minch, maxch, unit;
- for (i = 0; i < nitems(dsp_cdevs); i++) {
- if (ch->type != dsp_cdevs[i].type || dsp_cdevs[i].alias != NULL)
+ /*
+ * If probing the device that received the ioctl, make sure it's a
+ * DSP device. (Users may use this ioctl with /dev/mixer and
+ * /dev/midi.)
+ */
+ if (ai->dev == -1 && i_dev->si_devsw != &dsp_cdevsw)
+ return (EINVAL);
+
+ bus_topo_lock();
+ for (unit = 0; pcm_devclass != NULL &&
+ unit < devclass_get_maxunit(pcm_devclass); unit++) {
+ d = devclass_get_softc(pcm_devclass, unit);
+ if (!PCM_REGISTERED(d)) {
+ if ((ai->dev == -1 && unit == snd_unit) ||
+ ai->dev == unit) {
+ dsp_oss_audioinfo_unavail(ai, unit);
+ bus_topo_unlock();
+ return (0);
+ } else {
+ d = NULL;
+ continue;
+ }
+ }
+
+ PCM_UNLOCKASSERT(d);
+ PCM_LOCK(d);
+ if ((ai->dev == -1 && d->dsp_dev == i_dev) ||
+ (ai->dev == unit)) {
+ PCM_UNLOCK(d);
+ break;
+ } else {
+ PCM_UNLOCK(d);
+ d = NULL;
+ }
+ }
+ bus_topo_unlock();
+
+ /* Exhausted the search -- nothing is locked, so return. */
+ if (d == NULL)
+ return (EINVAL);
+
+ /* XXX Need Giant magic entry ??? */
+
+ PCM_UNLOCKASSERT(d);
+ PCM_LOCK(d);
+
+ bzero((void *)ai, sizeof(oss_audioinfo));
+ ai->dev = unit;
+ strlcpy(ai->name, device_get_desc(d->dev), sizeof(ai->name));
+ ai->pid = -1;
+ strlcpy(ai->cmd, CHN_COMM_UNKNOWN, sizeof(ai->cmd));
+ ai->card_number = unit;
+ ai->port_number = unit;
+ ai->mixer_dev = (d->mixer_dev != NULL) ? unit : -1;
+ ai->legacy_device = unit;
+ snprintf(ai->devnode, sizeof(ai->devnode), "/dev/dsp%d", unit);
+ ai->enabled = device_is_attached(d->dev) ? 1 : 0;
+ ai->next_play_engine = 0;
+ ai->next_rec_engine = 0;
+ ai->busy = 0;
+ ai->caps = PCM_CAP_REALTIME | PCM_CAP_MMAP | PCM_CAP_TRIGGER;
+ ai->iformats = 0;
+ ai->oformats = 0;
+ ai->min_rate = INT_MAX;
+ ai->max_rate = 0;
+ ai->min_channels = INT_MAX;
+ ai->max_channels = 0;
+
+ /* Gather global information about the device. */
+ CHN_FOREACH(ch, d, channels.pcm) {
+ CHN_UNLOCKASSERT(ch);
+ CHN_LOCK(ch);
+
+ /*
+ * Skip physical channels if we are servicing SNDCTL_AUDIOINFO,
+ * or VCHANs if we are servicing SNDCTL_AUDIOINFO_EX.
+ *
+ * For SNDCTL_AUDIOINFO do not skip the physical channels if
+ * there are no VCHANs.
+ */
+ if ((ex && (ch->flags & CHN_F_VIRTUAL) != 0) ||
+ ((!ex && (ch->flags & CHN_F_VIRTUAL) == 0) &&
+ (d->pvchancount > 0 || d->rvchancount > 0))) {
+ CHN_UNLOCK(ch);
continue;
- snprintf(buf, len, "%s%d%s%d",
- dsp_cdevs[i].name, device_get_unit(ch->dev),
- dsp_cdevs[i].sep, ch->unit);
- return (buf);
+ }
+
+ if ((ch->flags & CHN_F_BUSY) == 0) {
+ ai->busy |= (ch->direction == PCMDIR_PLAY) ?
+ OPEN_WRITE : OPEN_READ;
+ }
+
+ ai->caps |=
+ ((ch->flags & CHN_F_VIRTUAL) ? PCM_CAP_VIRTUAL : 0) |
+ ((ch->direction == PCMDIR_PLAY) ? PCM_CAP_OUTPUT :
+ PCM_CAP_INPUT);
+
+ caps = chn_getcaps(ch);
+
+ minch = INT_MAX;
+ maxch = 0;
+ fmts = 0;
+ for (i = 0; caps->fmtlist[i]; i++) {
+ fmts |= AFMT_ENCODING(caps->fmtlist[i]);
+ minch = min(AFMT_CHANNEL(caps->fmtlist[i]), minch);
+ maxch = max(AFMT_CHANNEL(caps->fmtlist[i]), maxch);
+ }
+
+ if (ch->direction == PCMDIR_PLAY)
+ ai->oformats |= fmts;
+ else
+ ai->iformats |= fmts;
+
+ if (ex || (pcm_getflags(d->dev) & SD_F_BITPERFECT)) {
+ ai->min_rate = min(ai->min_rate, caps->minspeed);
+ ai->max_rate = max(ai->max_rate, caps->maxspeed);
+ } else {
+ ai->min_rate = min(ai->min_rate, feeder_rate_min);
+ ai->max_rate = max(ai->max_rate, feeder_rate_max);
+ }
+ ai->min_channels = min(ai->min_channels, minch);
+ ai->max_channels = max(ai->max_channels, maxch);
+
+ CHN_UNLOCK(ch);
}
+ if (ai->min_rate == INT_MAX)
+ ai->min_rate = 0;
+ if (ai->min_channels == INT_MAX)
+ ai->min_channels = 0;
- return (NULL);
+ PCM_UNLOCK(d);
+
+ return (0);
}
static int
-dsp_oss_audioinfo_cb(void *data, void *arg)
+dsp_oss_engineinfo_cb(void *data, void *arg)
{
struct dsp_cdevpriv *priv = data;
struct pcm_channel *ch = arg;
@@ -2084,10 +2271,10 @@ dsp_oss_audioinfo_cb(void *data, void *arg)
}
/**
- * @brief Handler for SNDCTL_AUDIOINFO.
+ * @brief Handler for SNDCTL_ENGINEINFO
*
- * Gathers information about the audio device specified in ai->dev. If
- * ai->dev == -1, then this function gathers information about the current
+ * Gathers information about the audio device's engine specified in ai->dev.
+ * If ai->dev == -1, then this function gathers information about the current
* device. If the call comes in on a non-audio device and ai->dev == -1,
* return EINVAL.
*
@@ -2103,18 +2290,15 @@ dsp_oss_audioinfo_cb(void *data, void *arg)
*
* @retval 0 success
* @retval EINVAL ai->dev specifies an invalid device
- *
- * @todo Verify correctness of Doxygen tags. ;)
*/
int
-dsp_oss_audioinfo(struct cdev *i_dev, oss_audioinfo *ai)
+dsp_oss_engineinfo(struct cdev *i_dev, oss_audioinfo *ai)
{
struct pcmchan_caps *caps;
struct pcm_channel *ch;
struct snddev_info *d;
uint32_t fmts;
int i, nchan, *rates, minch, maxch, unit;
- char *devname, buf[CHN_NAMELEN];
/*
* If probing the device that received the ioctl, make sure it's a
@@ -2125,14 +2309,13 @@ dsp_oss_audioinfo(struct cdev *i_dev, oss_audioinfo *ai)
return (EINVAL);
ch = NULL;
- devname = NULL;
nchan = 0;
- bzero(buf, sizeof(buf));
/*
* Search for the requested audio device (channel). Start by
* iterating over pcm devices.
*/
+ bus_topo_lock();
for (unit = 0; pcm_devclass != NULL &&
unit < devclass_get_maxunit(pcm_devclass); unit++) {
d = devclass_get_softc(pcm_devclass, unit);
@@ -2148,158 +2331,145 @@ dsp_oss_audioinfo(struct cdev *i_dev, oss_audioinfo *ai)
CHN_FOREACH(ch, d, channels.pcm) {
CHN_UNLOCKASSERT(ch);
CHN_LOCK(ch);
- if (ai->dev == -1) {
- if (devfs_foreach_cdevpriv(i_dev,
- dsp_oss_audioinfo_cb, ch) != 0) {
- devname = dsp_unit2name(buf,
- sizeof(buf), ch);
- }
- } else if (ai->dev == nchan)
- devname = dsp_unit2name(buf, sizeof(buf), ch);
- if (devname != NULL)
+ if ((ai->dev == -1 && devfs_foreach_cdevpriv(
+ i_dev, dsp_oss_engineinfo_cb, ch) != 0) ||
+ ai->dev == nchan)
break;
CHN_UNLOCK(ch);
++nchan;
}
- if (devname != NULL) {
- /*
- * At this point, the following synchronization stuff
- * has happened:
- * - a specific PCM device is locked.
- * - a specific audio channel has been locked, so be
- * sure to unlock when exiting;
- */
+ if (ch == NULL) {
+ PCM_UNLOCK(d);
+ continue;
+ }
- caps = chn_getcaps(ch);
+ /*
+ * At this point, the following synchronization stuff
+ * has happened:
+ * - a specific PCM device is locked.
+ * - a specific audio channel has been locked, so be
+ * sure to unlock when exiting;
+ */
- /*
- * With all handles collected, zero out the user's
- * container and begin filling in its fields.
- */
- bzero((void *)ai, sizeof(oss_audioinfo));
+ caps = chn_getcaps(ch);
- ai->dev = nchan;
- strlcpy(ai->name, ch->name, sizeof(ai->name));
+ /*
+ * With all handles collected, zero out the user's
+ * container and begin filling in its fields.
+ */
+ bzero((void *)ai, sizeof(oss_audioinfo));
- if ((ch->flags & CHN_F_BUSY) == 0)
- ai->busy = 0;
- else
- ai->busy = (ch->direction == PCMDIR_PLAY) ? OPEN_WRITE : OPEN_READ;
-
- /**
- * @note
- * @c cmd - OSSv4 docs: "Only supported under Linux at
- * this moment." Cop-out, I know, but I'll save
- * running around in the process table for later.
- * Is there a risk of leaking information?
- */
- ai->pid = ch->pid;
+ ai->dev = nchan;
+ strlcpy(ai->name, ch->name, sizeof(ai->name));
- /*
- * These flags stolen from SNDCTL_DSP_GETCAPS handler.
- * Note, however, that a single channel operates in
- * only one direction, so PCM_CAP_DUPLEX is out.
- */
- /**
- * @todo @c SNDCTL_AUDIOINFO::caps - Make drivers keep
- * these in pcmchan::caps?
- */
- ai->caps = PCM_CAP_REALTIME | PCM_CAP_MMAP | PCM_CAP_TRIGGER |
- ((ch->flags & CHN_F_VIRTUAL) ? PCM_CAP_VIRTUAL : 0) |
- ((ch->direction == PCMDIR_PLAY) ? PCM_CAP_OUTPUT : PCM_CAP_INPUT);
+ if ((ch->flags & CHN_F_BUSY) == 0)
+ ai->busy = 0;
+ else
+ ai->busy = (ch->direction == PCMDIR_PLAY) ? OPEN_WRITE : OPEN_READ;
- /*
- * Collect formats supported @b natively by the
- * device. Also determine min/max channels. (I.e.,
- * mono, stereo, or both?)
- *
- * If any channel is stereo, maxch = 2;
- * if all channels are stereo, minch = 2, too;
- * if any channel is mono, minch = 1;
- * and if all channels are mono, maxch = 1.
- */
- minch = 0;
- maxch = 0;
- fmts = 0;
- for (i = 0; caps->fmtlist[i]; i++) {
- fmts |= caps->fmtlist[i];
- if (AFMT_CHANNEL(caps->fmtlist[i]) > 1) {
- minch = (minch == 0) ? 2 : minch;
- maxch = 2;
- } else {
- minch = 1;
- maxch = (maxch == 0) ? 1 : maxch;
- }
- }
+ ai->pid = ch->pid;
+ strlcpy(ai->cmd, ch->comm, sizeof(ai->cmd));
- if (ch->direction == PCMDIR_PLAY)
- ai->oformats = fmts;
- else
- ai->iformats = fmts;
-
- /**
- * @note
- * @c magic - OSSv4 docs: "Reserved for internal use
- * by OSS."
- *
- * @par
- * @c card_number - OSSv4 docs: "Number of the sound
- * card where this device belongs or -1 if this
- * information is not available. Applications
- * should normally not use this field for any
- * purpose."
- */
- ai->card_number = -1;
- /**
- * @todo @c song_name - depends first on
- * SNDCTL_[GS]ETSONG @todo @c label - depends
- * on SNDCTL_[GS]ETLABEL
- * @todo @c port_number - routing information?
- */
- ai->port_number = -1;
- ai->mixer_dev = (d->mixer_dev != NULL) ? unit : -1;
- /**
- * @note
- * @c real_device - OSSv4 docs: "Obsolete."
- */
- ai->real_device = -1;
- snprintf(ai->devnode, sizeof(ai->devnode), "/dev/dsp%d", unit);
- ai->enabled = device_is_attached(d->dev) ? 1 : 0;
- /**
- * @note
- * @c flags - OSSv4 docs: "Reserved for future use."
- *
- * @note
- * @c binding - OSSv4 docs: "Reserved for future use."
- *
- * @todo @c handle - haven't decided how to generate
- * this yet; bus, vendor, device IDs?
- */
+ /*
+ * These flags stolen from SNDCTL_DSP_GETCAPS handler.
+ * Note, however, that a single channel operates in
+ * only one direction, so PCM_CAP_DUPLEX is out.
+ */
+ /**
+ * @todo @c SNDCTL_AUDIOINFO::caps - Make drivers keep
+ * these in pcmchan::caps?
+ */
+ ai->caps = PCM_CAP_REALTIME | PCM_CAP_MMAP | PCM_CAP_TRIGGER |
+ ((ch->flags & CHN_F_VIRTUAL) ? PCM_CAP_VIRTUAL : 0) |
+ ((ch->direction == PCMDIR_PLAY) ? PCM_CAP_OUTPUT : PCM_CAP_INPUT);
+
+ /*
+ * Collect formats supported @b natively by the
+ * device. Also determine min/max channels.
+ */
+ minch = INT_MAX;
+ maxch = 0;
+ fmts = 0;
+ for (i = 0; caps->fmtlist[i]; i++) {
+ fmts |= AFMT_ENCODING(caps->fmtlist[i]);
+ minch = min(AFMT_CHANNEL(caps->fmtlist[i]), minch);
+ maxch = max(AFMT_CHANNEL(caps->fmtlist[i]), maxch);
+ }
+
+ if (ch->direction == PCMDIR_PLAY)
+ ai->oformats = fmts;
+ else
+ ai->iformats = fmts;
+
+ /**
+ * @note
+ * @c magic - OSSv4 docs: "Reserved for internal use
+ * by OSS."
+ *
+ * @par
+ * @c card_number - OSSv4 docs: "Number of the sound
+ * card where this device belongs or -1 if this
+ * information is not available. Applications
+ * should normally not use this field for any
+ * purpose."
+ */
+ ai->card_number = unit;
+ /**
+ * @todo @c song_name - depends first on
+ * SNDCTL_[GS]ETSONG @todo @c label - depends
+ * on SNDCTL_[GS]ETLABEL
+ * @todo @c port_number - routing information?
+ */
+ ai->port_number = unit;
+ ai->mixer_dev = (d->mixer_dev != NULL) ? unit : -1;
+ /**
+ * @note
+ * @c legacy_device - OSSv4 docs: "Obsolete."
+ */
+ ai->legacy_device = unit;
+ snprintf(ai->devnode, sizeof(ai->devnode), "/dev/dsp%d", unit);
+ ai->enabled = device_is_attached(d->dev) ? 1 : 0;
+ /**
+ * @note
+ * @c flags - OSSv4 docs: "Reserved for future use."
+ *
+ * @note
+ * @c binding - OSSv4 docs: "Reserved for future use."
+ *
+ * @todo @c handle - haven't decided how to generate
+ * this yet; bus, vendor, device IDs?
+ */
+
+ if ((ch->flags & CHN_F_EXCLUSIVE) ||
+ (pcm_getflags(d->dev) & SD_F_BITPERFECT)) {
ai->min_rate = caps->minspeed;
ai->max_rate = caps->maxspeed;
+ } else {
+ ai->min_rate = feeder_rate_min;
+ ai->max_rate = feeder_rate_max;
+ }
- ai->min_channels = minch;
- ai->max_channels = maxch;
+ ai->min_channels = minch;
+ ai->max_channels = maxch;
- ai->nrates = chn_getrates(ch, &rates);
- if (ai->nrates > OSS_MAX_SAMPLE_RATES)
- ai->nrates = OSS_MAX_SAMPLE_RATES;
+ ai->nrates = chn_getrates(ch, &rates);
+ if (ai->nrates > OSS_MAX_SAMPLE_RATES)
+ ai->nrates = OSS_MAX_SAMPLE_RATES;
- for (i = 0; i < ai->nrates; i++)
- ai->rates[i] = rates[i];
-
- ai->next_play_engine = 0;
- ai->next_rec_engine = 0;
+ for (i = 0; i < ai->nrates; i++)
+ ai->rates[i] = rates[i];
- CHN_UNLOCK(ch);
- }
+ ai->next_play_engine = 0;
+ ai->next_rec_engine = 0;
+ CHN_UNLOCK(ch);
PCM_UNLOCK(d);
+ bus_topo_unlock();
- if (devname != NULL)
- return (0);
+ return (0);
}
+ bus_topo_unlock();
/* Exhausted the search -- nothing is locked, so return. */
return (EINVAL);
@@ -2384,7 +2554,7 @@ dsp_oss_syncgroup(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_syncgr
* syncgroup.
*/
if (group->id == 0) {
- sg = (struct pcmchan_syncgroup *)malloc(sizeof(*sg), M_DEVBUF, M_NOWAIT);
+ sg = malloc(sizeof(*sg), M_DEVBUF, M_NOWAIT);
if (sg != NULL) {
SLIST_INIT(&sg->members);
sg->id = alloc_unr(pcmsg_unrhdr);
@@ -2411,7 +2581,7 @@ dsp_oss_syncgroup(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_syncgr
* insert into syncgroup.
*/
if (group->mode & PCM_ENABLE_INPUT) {
- smrd = (struct pcmchan_syncmember *)malloc(sizeof(*smrd), M_DEVBUF, M_NOWAIT);
+ smrd = malloc(sizeof(*smrd), M_DEVBUF, M_NOWAIT);
if (smrd == NULL) {
ret = ENOMEM;
goto out;
@@ -2427,7 +2597,7 @@ dsp_oss_syncgroup(struct pcm_channel *wrch, struct pcm_channel *rdch, oss_syncgr
}
if (group->mode & PCM_ENABLE_OUTPUT) {
- smwr = (struct pcmchan_syncmember *)malloc(sizeof(*smwr), M_DEVBUF, M_NOWAIT);
+ smwr = malloc(sizeof(*smwr), M_DEVBUF, M_NOWAIT);
if (smwr == NULL) {
ret = ENOMEM;
goto out;
diff --git a/sys/dev/sound/pcm/dsp.h b/sys/dev/sound/pcm/dsp.h
index b81e60dc19b5..8c0786aad474 100644
--- a/sys/dev/sound/pcm/dsp.h
+++ b/sys/dev/sound/pcm/dsp.h
@@ -5,6 +5,10 @@
* Portions Copyright (c) Ryan Beasley <ryan.beasley@gmail.com> - GSoC 2006
* Copyright (c) 1999 Cameron Grant <cg@FreeBSD.org>
* All rights reserved.
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Christos Margiolis
+ * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,11 +35,9 @@
#ifndef _PCMDSP_H_
#define _PCMDSP_H_
-extern struct cdevsw dsp_cdevsw;
-
int dsp_make_dev(device_t);
void dsp_destroy_dev(device_t);
-char *dsp_unit2name(char *, size_t, struct pcm_channel *);
-int dsp_oss_audioinfo(struct cdev *, oss_audioinfo *);
+int dsp_oss_audioinfo(struct cdev *, oss_audioinfo *, bool);
+int dsp_oss_engineinfo(struct cdev *, oss_audioinfo *);
#endif /* !_PCMDSP_H_ */
diff --git a/sys/dev/sound/pcm/feeder.c b/sys/dev/sound/pcm/feeder.c
index 78443ad76140..af3ada441e48 100644
--- a/sys/dev/sound/pcm/feeder.c
+++ b/sys/dev/sound/pcm/feeder.c
@@ -4,6 +4,10 @@
* Copyright (c) 2005-2009 Ariff Abdullah <ariff@FreeBSD.org>
* Copyright (c) 1999 Cameron Grant <cg@FreeBSD.org>
* All rights reserved.
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Christos Margiolis
+ * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -32,13 +36,13 @@
#endif
#include <dev/sound/pcm/sound.h>
+#include <dev/sound/pcm/vchan.h>
#include "feeder_if.h"
static MALLOC_DEFINE(M_FEEDER, "feeder", "pcm feeder");
#define MAXFEEDERS 256
-#undef FEEDER_DEBUG
struct feedertab_entry {
SLIST_ENTRY(feedertab_entry) link;
@@ -48,96 +52,44 @@ struct feedertab_entry {
int idx;
};
static SLIST_HEAD(, feedertab_entry) feedertab;
+static int feedercnt = 0;
/*****************************************************************************/
-void
-feeder_register(void *p)
+static void
+feeder_register_root(void *p)
{
- static int feedercnt = 0;
-
struct feeder_class *fc = p;
struct feedertab_entry *fte;
- int i;
-
- if (feedercnt == 0) {
- KASSERT(fc->desc == NULL, ("first feeder not root: %s", fc->name));
-
- SLIST_INIT(&feedertab);
- fte = malloc(sizeof(*fte), M_FEEDER, M_NOWAIT | M_ZERO);
- if (fte == NULL) {
- printf("can't allocate memory for root feeder: %s\n",
- fc->name);
-
- return;
- }
- fte->feederclass = fc;
- fte->desc = NULL;
- fte->idx = feedercnt;
- SLIST_INSERT_HEAD(&feedertab, fte, link);
- feedercnt++;
-
- /* initialize global variables */
-
- if (snd_verbose < 0 || snd_verbose > 4)
- snd_verbose = 1;
-
- if (snd_unit < 0)
- snd_unit = -1;
-
- if (snd_maxautovchans < 0 ||
- snd_maxautovchans > SND_MAXVCHANS)
- snd_maxautovchans = 0;
-
- if (chn_latency < CHN_LATENCY_MIN ||
- chn_latency > CHN_LATENCY_MAX)
- chn_latency = CHN_LATENCY_DEFAULT;
-
- if (chn_latency_profile < CHN_LATENCY_PROFILE_MIN ||
- chn_latency_profile > CHN_LATENCY_PROFILE_MAX)
- chn_latency_profile = CHN_LATENCY_PROFILE_DEFAULT;
-
- if (feeder_rate_min < FEEDRATE_MIN ||
- feeder_rate_max < FEEDRATE_MIN ||
- feeder_rate_min > FEEDRATE_MAX ||
- feeder_rate_max > FEEDRATE_MAX ||
- !(feeder_rate_min < feeder_rate_max)) {
- feeder_rate_min = FEEDRATE_RATEMIN;
- feeder_rate_max = FEEDRATE_RATEMAX;
- }
- if (feeder_rate_round < FEEDRATE_ROUNDHZ_MIN ||
- feeder_rate_round > FEEDRATE_ROUNDHZ_MAX)
- feeder_rate_round = FEEDRATE_ROUNDHZ;
+ MPASS(feedercnt == 0);
+ KASSERT(fc->desc == NULL, ("first feeder not root: %s", fc->name));
- if (bootverbose)
- printf("%s: snd_unit=%d snd_maxautovchans=%d "
- "latency=%d "
- "feeder_rate_min=%d feeder_rate_max=%d "
- "feeder_rate_round=%d\n",
- __func__, snd_unit, snd_maxautovchans,
- chn_latency,
- feeder_rate_min, feeder_rate_max,
- feeder_rate_round);
-
- /* we've got our root feeder so don't veto pcm loading anymore */
- pcm_veto_load = 0;
+ SLIST_INIT(&feedertab);
+ fte = malloc(sizeof(*fte), M_FEEDER, M_WAITOK | M_ZERO);
+ fte->feederclass = fc;
+ fte->desc = NULL;
+ fte->idx = feedercnt;
+ SLIST_INSERT_HEAD(&feedertab, fte, link);
+ feedercnt++;
+}
- return;
- }
+void
+feeder_register(void *p)
+{
+ struct feeder_class *fc = p;
+ struct feedertab_entry *fte;
+ int i;
KASSERT(fc->desc != NULL, ("feeder '%s' has no descriptor", fc->name));
- /* beyond this point failure is non-fatal but may result in some translations being unavailable */
+ /*
+ * beyond this point failure is non-fatal but may result in some
+ * translations being unavailable
+ */
i = 0;
while ((feedercnt < MAXFEEDERS) && (fc->desc[i].type > 0)) {
- /* printf("adding feeder %s, %x -> %x\n", fc->name, fc->desc[i].in, fc->desc[i].out); */
- fte = malloc(sizeof(*fte), M_FEEDER, M_NOWAIT | M_ZERO);
- if (fte == NULL) {
- printf("can't allocate memory for feeder '%s', %x -> %x\n", fc->name, fc->desc[i].in, fc->desc[i].out);
-
- return;
- }
+ fte = malloc(sizeof(*fte), M_FEEDER, M_WAITOK | M_ZERO);
fte->feederclass = fc;
fte->desc = &fc->desc[i];
fte->idx = feedercnt;
@@ -146,8 +98,10 @@ feeder_register(void *p)
i++;
}
feedercnt++;
- if (feedercnt >= MAXFEEDERS)
- printf("MAXFEEDERS (%d >= %d) exceeded\n", feedercnt, MAXFEEDERS);
+ if (feedercnt >= MAXFEEDERS) {
+ printf("MAXFEEDERS (%d >= %d) exceeded\n",
+ feedercnt, MAXFEEDERS);
+ }
}
static void
@@ -231,7 +185,7 @@ feeder_getclass(struct pcm_feederdesc *desc)
}
int
-chn_addfeeder(struct pcm_channel *c, struct feeder_class *fc, struct pcm_feederdesc *desc)
+feeder_add(struct pcm_channel *c, struct feeder_class *fc, struct pcm_feederdesc *desc)
{
struct pcm_feeder *nf;
@@ -248,22 +202,20 @@ chn_addfeeder(struct pcm_channel *c, struct feeder_class *fc, struct pcm_feederd
return 0;
}
-int
-chn_removefeeder(struct pcm_channel *c)
+void
+feeder_remove(struct pcm_channel *c)
{
struct pcm_feeder *f;
- if (c->feeder == NULL)
- return -1;
- f = c->feeder;
- c->feeder = c->feeder->source;
- feeder_destroy(f);
-
- return 0;
+ while (c->feeder != NULL) {
+ f = c->feeder;
+ c->feeder = c->feeder->source;
+ feeder_destroy(f);
+ }
}
struct pcm_feeder *
-chn_findfeeder(struct pcm_channel *c, u_int32_t type)
+feeder_find(struct pcm_channel *c, u_int32_t type)
{
struct pcm_feeder *f;
@@ -514,5 +466,10 @@ static struct feeder_class feeder_root_class = {
.desc = NULL,
.data = NULL,
};
-SYSINIT(feeder_root, SI_SUB_DRIVERS, SI_ORDER_FIRST, feeder_register, &feeder_root_class);
+/*
+ * Register the root feeder first so that pcm_addchan() and subsequent
+ * functions can use it.
+ */
+SYSINIT(feeder_root, SI_SUB_DRIVERS, SI_ORDER_FIRST, feeder_register_root,
+ &feeder_root_class);
SYSUNINIT(feeder_root, SI_SUB_DRIVERS, SI_ORDER_FIRST, feeder_unregisterall, NULL);
diff --git a/sys/dev/sound/pcm/feeder.h b/sys/dev/sound/pcm/feeder.h
index c3d6f89d102e..60b8280e59ef 100644
--- a/sys/dev/sound/pcm/feeder.h
+++ b/sys/dev/sound/pcm/feeder.h
@@ -58,10 +58,10 @@ u_int32_t snd_fmtbestbit(u_int32_t fmt, u_int32_t *fmts);
u_int32_t snd_fmtbestchannel(u_int32_t fmt, u_int32_t *fmts);
u_int32_t snd_fmtbest(u_int32_t fmt, u_int32_t *fmts);
-int chn_addfeeder(struct pcm_channel *c, struct feeder_class *fc,
+int feeder_add(struct pcm_channel *c, struct feeder_class *fc,
struct pcm_feederdesc *desc);
-int chn_removefeeder(struct pcm_channel *c);
-struct pcm_feeder *chn_findfeeder(struct pcm_channel *c, u_int32_t type);
+void feeder_remove(struct pcm_channel *c);
+struct pcm_feeder *feeder_find(struct pcm_channel *c, u_int32_t type);
void feeder_printchain(struct pcm_feeder *head);
int feeder_chain(struct pcm_channel *);
@@ -167,32 +167,6 @@ int feeder_matrix_oss_get_channel_order(struct pcmchan_matrix *,
int feeder_matrix_oss_set_channel_order(struct pcmchan_matrix *,
unsigned long long *);
-#if 0
-/* feeder_matrix */
-enum {
- FEEDMATRIX_TYPE,
- FEEDMATRIX_RESET,
- FEEDMATRIX_CHANNELS_IN,
- FEEDMATRIX_CHANNELS_OUT,
- FEEDMATRIX_SET_MAP
-};
-
-enum {
- FEEDMATRIX_TYPE_NONE,
- FEEDMATRIX_TYPE_AUTO,
- FEEDMATRIX_TYPE_2X1,
- FEEDMATRIX_TYPE_1X2,
- FEEDMATRIX_TYPE_2X2
-};
-
-#define FEEDMATRIX_TYPE_STEREO_TO_MONO FEEDMATRIX_TYPE_2X1
-#define FEEDMATRIX_TYPE_MONO_TO_STEREO FEEDMATRIX_TYPE_1X2
-#define FEEDMATRIX_TYPE_SWAP_STEREO FEEDMATRIX_TYPE_2X2
-#define FEEDMATRIX_MAP(x, y) ((((x) & 0x3f) << 6) | ((y) & 0x3f))
-#define FEEDMATRIX_MAP_SRC(x) ((x) & 0x3f)
-#define FEEDMATRIX_MAP_DST(x) (((x) >> 6) & 0x3f)
-#endif
-
/*
* By default, various feeders only deal with sign 16/32 bit native-endian
* since it should provide the fastest processing path. Processing 8bit samples
diff --git a/sys/dev/sound/pcm/feeder_chain.c b/sys/dev/sound/pcm/feeder_chain.c
index 52351ef58510..56de32441de7 100644
--- a/sys/dev/sound/pcm/feeder_chain.c
+++ b/sys/dev/sound/pcm/feeder_chain.c
@@ -102,6 +102,7 @@ static uint32_t feeder_chain_formats_multi[] = {
AFMT_S16_LE, AFMT_S16_BE, AFMT_U16_LE, AFMT_U16_BE,
AFMT_S24_LE, AFMT_S24_BE, AFMT_U24_LE, AFMT_U24_BE,
AFMT_S32_LE, AFMT_S32_BE, AFMT_U32_LE, AFMT_U32_BE,
+ AFMT_F32_LE, AFMT_F32_BE,
0
};
@@ -111,6 +112,7 @@ static uint32_t feeder_chain_formats_fullmulti[] = {
AFMT_S16_LE, AFMT_S16_BE, AFMT_U16_LE, AFMT_U16_BE,
AFMT_S24_LE, AFMT_S24_BE, AFMT_U24_LE, AFMT_U24_BE,
AFMT_S32_LE, AFMT_S32_BE, AFMT_U32_LE, AFMT_U32_BE,
+ AFMT_F32_LE, AFMT_F32_BE,
0
};
@@ -157,7 +159,7 @@ feeder_build_format(struct pcm_channel *c, struct feeder_chain_desc *cdesc)
desc->in = cdesc->current.afmt;
desc->out = cdesc->target.afmt;
- ret = chn_addfeeder(c, fc, desc);
+ ret = feeder_add(c, fc, desc);
if (ret != 0) {
device_printf(c->dev,
"%s(): can't add feeder_format\n", __func__);
@@ -230,7 +232,7 @@ feeder_build_rate(struct pcm_channel *c, struct feeder_chain_desc *cdesc)
desc->in = cdesc->current.afmt;
desc->out = desc->in;
- ret = chn_addfeeder(c, fc, desc);
+ ret = feeder_add(c, fc, desc);
if (ret != 0) {
device_printf(c->dev,
"%s(): can't add feeder_rate\n", __func__);
@@ -309,7 +311,7 @@ feeder_build_matrix(struct pcm_channel *c, struct feeder_chain_desc *cdesc)
desc->out = SND_FORMAT(cdesc->current.afmt,
cdesc->target.matrix->channels, cdesc->target.matrix->ext);
- ret = chn_addfeeder(c, fc, desc);
+ ret = feeder_add(c, fc, desc);
if (ret != 0) {
device_printf(c->dev,
"%s(): can't add feeder_matrix\n", __func__);
@@ -365,7 +367,7 @@ feeder_build_volume(struct pcm_channel *c, struct feeder_chain_desc *cdesc)
desc->in = cdesc->current.afmt;
desc->out = desc->in;
- ret = chn_addfeeder(c, fc, desc);
+ ret = feeder_add(c, fc, desc);
if (ret != 0) {
device_printf(c->dev,
"%s(): can't add feeder_volume\n", __func__);
@@ -433,7 +435,7 @@ feeder_build_eq(struct pcm_channel *c, struct feeder_chain_desc *cdesc)
desc->in = cdesc->current.afmt;
desc->out = desc->in;
- ret = chn_addfeeder(c, fc, desc);
+ ret = feeder_add(c, fc, desc);
if (ret != 0) {
device_printf(c->dev,
"%s(): can't add feeder_eq\n", __func__);
@@ -472,7 +474,7 @@ feeder_build_root(struct pcm_channel *c, struct feeder_chain_desc *cdesc)
return (ENOTSUP);
}
- ret = chn_addfeeder(c, fc, NULL);
+ ret = feeder_add(c, fc, NULL);
if (ret != 0) {
device_printf(c->dev,
"%s(): can't add feeder_root\n", __func__);
@@ -513,7 +515,7 @@ feeder_build_mixer(struct pcm_channel *c, struct feeder_chain_desc *cdesc)
desc->in = cdesc->current.afmt;
desc->out = desc->in;
- ret = chn_addfeeder(c, fc, desc);
+ ret = feeder_add(c, fc, desc);
if (ret != 0) {
device_printf(c->dev,
"%s(): can't add feeder_mixer\n", __func__);
@@ -588,8 +590,7 @@ feeder_chain(struct pcm_channel *c)
CHN_LOCKASSERT(c);
/* Remove everything first. */
- while (chn_removefeeder(c) == 0)
- ;
+ feeder_remove(c);
KASSERT(c->feeder == NULL, ("feeder chain not empty"));
@@ -719,6 +720,17 @@ feeder_chain(struct pcm_channel *c)
c->format = cdesc.target.afmt;
c->speed = cdesc.target.rate;
} else {
+ /*
+ * Bail out early if we do not support either of those formats.
+ */
+ if ((cdesc.origin.afmt & AFMT_CONVERTIBLE) == 0 ||
+ (cdesc.target.afmt & AFMT_CONVERTIBLE) == 0) {
+ device_printf(c->dev,
+ "%s(): unsupported formats: in=0x%08x, out=0x%08x\n",
+ __func__, cdesc.origin.afmt, cdesc.target.afmt);
+ return (ENODEV);
+ }
+
/* hwfmt is not convertible, so 'dummy' it. */
if (hwfmt & AFMT_PASSTHROUGH)
cdesc.dummy = 1;
diff --git a/sys/dev/sound/pcm/feeder_eq.c b/sys/dev/sound/pcm/feeder_eq.c
index a097b13cd986..23e27b922486 100644
--- a/sys/dev/sound/pcm/feeder_eq.c
+++ b/sys/dev/sound/pcm/feeder_eq.c
@@ -3,6 +3,10 @@
*
* Copyright (c) 2008-2009 Ariff Abdullah <ariff@FreeBSD.org>
* All rights reserved.
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Christos Margiolis
+ * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -101,10 +105,6 @@ SYSCTL_INT(_hw_snd, OID_AUTO, feeder_eq_exact_rate, CTLFLAG_RWTUN,
&feeder_eq_exact_rate, 0, "force exact rate validation");
#endif
-struct feed_eq_info;
-
-typedef void (*feed_eq_t)(struct feed_eq_info *, uint8_t *, uint32_t);
-
struct feed_eq_tone {
intpcm_t o1[SND_CHN_MAX];
intpcm_t o2[SND_CHN_MAX];
@@ -117,7 +117,7 @@ struct feed_eq_info {
struct feed_eq_tone treble;
struct feed_eq_tone bass;
struct feed_eq_coeff *coeff;
- feed_eq_t biquad;
+ uint32_t fmt;
uint32_t channels;
uint32_t rate;
uint32_t align;
@@ -135,137 +135,74 @@ struct feed_eq_info {
#define FEEDEQ_ERR_CLIP_CHECK(...)
#endif
-#define FEEDEQ_CLAMP(v) (((v) > PCM_S32_MAX) ? PCM_S32_MAX : \
- (((v) < PCM_S32_MIN) ? PCM_S32_MIN : \
- (v)))
-
-#define FEEDEQ_DECLARE(SIGN, BIT, ENDIAN) \
-static void \
-feed_eq_biquad_##SIGN##BIT##ENDIAN(struct feed_eq_info *info, \
- uint8_t *dst, uint32_t count) \
-{ \
- struct feed_eq_coeff_tone *treble, *bass; \
- intpcm64_t w; \
- intpcm_t v; \
- uint32_t i, j; \
- int32_t pmul, pshift; \
- \
- pmul = feed_eq_preamp[info->preamp].mul; \
- pshift = feed_eq_preamp[info->preamp].shift; \
- \
- if (info->state == FEEDEQ_DISABLE) { \
- j = count * info->channels; \
- dst += j * PCM_##BIT##_BPS; \
- do { \
- dst -= PCM_##BIT##_BPS; \
- v = _PCM_READ_##SIGN##BIT##_##ENDIAN(dst); \
- v = ((intpcm64_t)pmul * v) >> pshift; \
- _PCM_WRITE_##SIGN##BIT##_##ENDIAN(dst, v); \
- } while (--j != 0); \
- \
- return; \
- } \
- \
- treble = &(info->coeff[info->treble.gain].treble); \
- bass = &(info->coeff[info->bass.gain].bass); \
- \
- do { \
- i = 0; \
- j = info->channels; \
- do { \
- v = _PCM_READ_##SIGN##BIT##_##ENDIAN(dst); \
- v <<= 32 - BIT; \
- v = ((intpcm64_t)pmul * v) >> pshift; \
- \
- w = (intpcm64_t)v * treble->b0; \
- w += (intpcm64_t)info->treble.i1[i] * treble->b1; \
- w += (intpcm64_t)info->treble.i2[i] * treble->b2; \
- w -= (intpcm64_t)info->treble.o1[i] * treble->a1; \
- w -= (intpcm64_t)info->treble.o2[i] * treble->a2; \
- info->treble.i2[i] = info->treble.i1[i]; \
- info->treble.i1[i] = v; \
- info->treble.o2[i] = info->treble.o1[i]; \
- w >>= FEEDEQ_COEFF_SHIFT; \
- FEEDEQ_ERR_CLIP_CHECK(treble, w); \
- v = FEEDEQ_CLAMP(w); \
- info->treble.o1[i] = v; \
- \
- w = (intpcm64_t)v * bass->b0; \
- w += (intpcm64_t)info->bass.i1[i] * bass->b1; \
- w += (intpcm64_t)info->bass.i2[i] * bass->b2; \
- w -= (intpcm64_t)info->bass.o1[i] * bass->a1; \
- w -= (intpcm64_t)info->bass.o2[i] * bass->a2; \
- info->bass.i2[i] = info->bass.i1[i]; \
- info->bass.i1[i] = v; \
- info->bass.o2[i] = info->bass.o1[i]; \
- w >>= FEEDEQ_COEFF_SHIFT; \
- FEEDEQ_ERR_CLIP_CHECK(bass, w); \
- v = FEEDEQ_CLAMP(w); \
- info->bass.o1[i] = v; \
- \
- v >>= 32 - BIT; \
- _PCM_WRITE_##SIGN##BIT##_##ENDIAN(dst, v); \
- dst += PCM_##BIT##_BPS; \
- i++; \
- } while (--j != 0); \
- } while (--count != 0); \
-}
-
-#if BYTE_ORDER == LITTLE_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
-FEEDEQ_DECLARE(S, 16, LE)
-FEEDEQ_DECLARE(S, 32, LE)
-#endif
-#if BYTE_ORDER == BIG_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
-FEEDEQ_DECLARE(S, 16, BE)
-FEEDEQ_DECLARE(S, 32, BE)
-#endif
-#ifdef SND_FEEDER_MULTIFORMAT
-FEEDEQ_DECLARE(S, 8, NE)
-FEEDEQ_DECLARE(S, 24, LE)
-FEEDEQ_DECLARE(S, 24, BE)
-FEEDEQ_DECLARE(U, 8, NE)
-FEEDEQ_DECLARE(U, 16, LE)
-FEEDEQ_DECLARE(U, 24, LE)
-FEEDEQ_DECLARE(U, 32, LE)
-FEEDEQ_DECLARE(U, 16, BE)
-FEEDEQ_DECLARE(U, 24, BE)
-FEEDEQ_DECLARE(U, 32, BE)
-#endif
-
-#define FEEDEQ_ENTRY(SIGN, BIT, ENDIAN) \
- { \
- AFMT_##SIGN##BIT##_##ENDIAN, \
- feed_eq_biquad_##SIGN##BIT##ENDIAN \
+__always_inline static void
+feed_eq_biquad(struct feed_eq_info *info, uint8_t *dst, uint32_t count,
+ const uint32_t fmt)
+{
+ struct feed_eq_coeff_tone *treble, *bass;
+ intpcm64_t w;
+ intpcm_t v;
+ uint32_t i, j;
+ int32_t pmul, pshift;
+
+ pmul = feed_eq_preamp[info->preamp].mul;
+ pshift = feed_eq_preamp[info->preamp].shift;
+
+ if (info->state == FEEDEQ_DISABLE) {
+ j = count * info->channels;
+ dst += j * AFMT_BPS(fmt);
+ do {
+ dst -= AFMT_BPS(fmt);
+ v = pcm_sample_read(dst, fmt);
+ v = ((intpcm64_t)pmul * v) >> pshift;
+ pcm_sample_write(dst, v, fmt);
+ } while (--j != 0);
+
+ return;
}
-static const struct {
- uint32_t format;
- feed_eq_t biquad;
-} feed_eq_biquad_tab[] = {
-#if BYTE_ORDER == LITTLE_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
- FEEDEQ_ENTRY(S, 16, LE),
- FEEDEQ_ENTRY(S, 32, LE),
-#endif
-#if BYTE_ORDER == BIG_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
- FEEDEQ_ENTRY(S, 16, BE),
- FEEDEQ_ENTRY(S, 32, BE),
-#endif
-#ifdef SND_FEEDER_MULTIFORMAT
- FEEDEQ_ENTRY(S, 8, NE),
- FEEDEQ_ENTRY(S, 24, LE),
- FEEDEQ_ENTRY(S, 24, BE),
- FEEDEQ_ENTRY(U, 8, NE),
- FEEDEQ_ENTRY(U, 16, LE),
- FEEDEQ_ENTRY(U, 24, LE),
- FEEDEQ_ENTRY(U, 32, LE),
- FEEDEQ_ENTRY(U, 16, BE),
- FEEDEQ_ENTRY(U, 24, BE),
- FEEDEQ_ENTRY(U, 32, BE)
-#endif
-};
+ treble = &(info->coeff[info->treble.gain].treble);
+ bass = &(info->coeff[info->bass.gain].bass);
-#define FEEDEQ_BIQUAD_TAB_SIZE \
- ((int32_t)(sizeof(feed_eq_biquad_tab) / sizeof(feed_eq_biquad_tab[0])))
+ do {
+ i = 0;
+ j = info->channels;
+ do {
+ v = pcm_sample_read_norm(dst, fmt);
+ v = ((intpcm64_t)pmul * v) >> pshift;
+
+ w = (intpcm64_t)v * treble->b0;
+ w += (intpcm64_t)info->treble.i1[i] * treble->b1;
+ w += (intpcm64_t)info->treble.i2[i] * treble->b2;
+ w -= (intpcm64_t)info->treble.o1[i] * treble->a1;
+ w -= (intpcm64_t)info->treble.o2[i] * treble->a2;
+ info->treble.i2[i] = info->treble.i1[i];
+ info->treble.i1[i] = v;
+ info->treble.o2[i] = info->treble.o1[i];
+ w >>= FEEDEQ_COEFF_SHIFT;
+ FEEDEQ_ERR_CLIP_CHECK(treble, w);
+ v = pcm_clamp(w, AFMT_S32_NE);
+ info->treble.o1[i] = v;
+
+ w = (intpcm64_t)v * bass->b0;
+ w += (intpcm64_t)info->bass.i1[i] * bass->b1;
+ w += (intpcm64_t)info->bass.i2[i] * bass->b2;
+ w -= (intpcm64_t)info->bass.o1[i] * bass->a1;
+ w -= (intpcm64_t)info->bass.o2[i] * bass->a2;
+ info->bass.i2[i] = info->bass.i1[i];
+ info->bass.i1[i] = v;
+ info->bass.o2[i] = info->bass.o1[i];
+ w >>= FEEDEQ_COEFF_SHIFT;
+ FEEDEQ_ERR_CLIP_CHECK(bass, w);
+ v = pcm_clamp(w, AFMT_S32_NE);
+ info->bass.o1[i] = v;
+
+ pcm_sample_write_norm(dst, v, fmt);
+ dst += AFMT_BPS(fmt);
+ i++;
+ } while (--j != 0);
+ } while (--count != 0);
+}
static struct feed_eq_coeff *
feed_eq_coeff_rate(uint32_t rate)
@@ -337,26 +274,15 @@ static int
feed_eq_init(struct pcm_feeder *f)
{
struct feed_eq_info *info;
- feed_eq_t biquad_op;
- int i;
if (f->desc->in != f->desc->out)
return (EINVAL);
- biquad_op = NULL;
-
- for (i = 0; i < FEEDEQ_BIQUAD_TAB_SIZE && biquad_op == NULL; i++) {
- if (AFMT_ENCODING(f->desc->in) == feed_eq_biquad_tab[i].format)
- biquad_op = feed_eq_biquad_tab[i].biquad;
- }
-
- if (biquad_op == NULL)
- return (EINVAL);
-
info = malloc(sizeof(*info), M_DEVBUF, M_NOWAIT | M_ZERO);
if (info == NULL)
return (ENOMEM);
+ info->fmt = AFMT_ENCODING(f->desc->in);
info->channels = AFMT_CHANNEL(f->desc->in);
info->align = info->channels * AFMT_BPS(f->desc->in);
@@ -366,8 +292,6 @@ feed_eq_init(struct pcm_feeder *f)
info->preamp = FEEDEQ_PREAMP2IDX(FEEDEQ_PREAMP_DEFAULT);
info->state = FEEDEQ_UNKNOWN;
- info->biquad = biquad_op;
-
f->data = info;
return (feed_eq_setup(info));
@@ -470,7 +394,21 @@ feed_eq_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
if (j == 0)
break;
- info->biquad(info, dst, j);
+ /* Optimize some common formats. */
+ switch (info->fmt) {
+ case AFMT_S16_NE:
+ feed_eq_biquad(info, dst, j, AFMT_S16_NE);
+ break;
+ case AFMT_S24_NE:
+ feed_eq_biquad(info, dst, j, AFMT_S24_NE);
+ break;
+ case AFMT_S32_NE:
+ feed_eq_biquad(info, dst, j, AFMT_S32_NE);
+ break;
+ default:
+ feed_eq_biquad(info, dst, j, info->fmt);
+ break;
+ }
j *= info->align;
dst += j;
@@ -584,7 +522,7 @@ sysctl_dev_pcm_eq(SYSCTL_HANDLER_ARGS)
CHN_FOREACH(c, d, channels.pcm.busy) {
CHN_LOCK(c);
- f = chn_findfeeder(c, FEEDER_EQ);
+ f = feeder_find(c, FEEDER_EQ);
if (f != NULL)
(void)FEEDER_SET(f, FEEDEQ_STATE, val);
CHN_UNLOCK(c);
@@ -643,7 +581,7 @@ sysctl_dev_pcm_eq_preamp(SYSCTL_HANDLER_ARGS)
CHN_FOREACH(c, d, channels.pcm.busy) {
CHN_LOCK(c);
- f = chn_findfeeder(c, FEEDER_EQ);
+ f = feeder_find(c, FEEDER_EQ);
if (f != NULL)
(void)FEEDER_SET(f, FEEDEQ_PREAMP, val);
CHN_UNLOCK(c);
diff --git a/sys/dev/sound/pcm/feeder_format.c b/sys/dev/sound/pcm/feeder_format.c
index 1e18e3e07450..0feac43374b8 100644
--- a/sys/dev/sound/pcm/feeder_format.c
+++ b/sys/dev/sound/pcm/feeder_format.c
@@ -3,6 +3,10 @@
*
* Copyright (c) 2008-2009 Ariff Abdullah <ariff@FreeBSD.org>
* All rights reserved.
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Christos Margiolis
+ * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -37,8 +41,6 @@
#endif
#include <dev/sound/pcm/sound.h>
#include <dev/sound/pcm/pcm.h>
-#include <dev/sound/pcm/g711.h>
-#include <dev/sound/pcm/intpcm.h>
#include "feeder_if.h"
#define SND_USE_FXDIV
@@ -47,109 +49,22 @@
#define FEEDFORMAT_RESERVOIR (SND_CHN_MAX * PCM_32_BPS)
-INTPCM_DECLARE(intpcm_conv_tables)
-
struct feed_format_info {
uint32_t ibps, obps;
uint32_t ialign, oalign, channels;
- intpcm_read_t *read;
- intpcm_write_t *write;
+ uint32_t rdfmt, wrfmt;
uint8_t reservoir[FEEDFORMAT_RESERVOIR];
};
-/*
- * dummy ac3/dts passthrough, etc.
- * XXX assume as s16le.
- */
-static __inline intpcm_t
-intpcm_read_null(uint8_t *src __unused)
-{
-
- return (0);
-}
-
-static __inline void
-intpcm_write_null(uint8_t *dst, intpcm_t v __unused)
-{
-
- _PCM_WRITE_S16_LE(dst, 0);
-}
-
-#define FEEDFORMAT_ENTRY(SIGN, BIT, ENDIAN) \
- { \
- AFMT_##SIGN##BIT##_##ENDIAN, \
- intpcm_read_##SIGN##BIT##ENDIAN, \
- intpcm_write_##SIGN##BIT##ENDIAN \
- }
-
-static const struct {
- uint32_t format;
- intpcm_read_t *read;
- intpcm_write_t *write;
-} feed_format_ops[] = {
- FEEDFORMAT_ENTRY(S, 8, NE),
- FEEDFORMAT_ENTRY(S, 16, LE),
- FEEDFORMAT_ENTRY(S, 24, LE),
- FEEDFORMAT_ENTRY(S, 32, LE),
- FEEDFORMAT_ENTRY(S, 16, BE),
- FEEDFORMAT_ENTRY(S, 24, BE),
- FEEDFORMAT_ENTRY(S, 32, BE),
- FEEDFORMAT_ENTRY(U, 8, NE),
- FEEDFORMAT_ENTRY(U, 16, LE),
- FEEDFORMAT_ENTRY(U, 24, LE),
- FEEDFORMAT_ENTRY(U, 32, LE),
- FEEDFORMAT_ENTRY(U, 16, BE),
- FEEDFORMAT_ENTRY(U, 24, BE),
- FEEDFORMAT_ENTRY(U, 32, BE),
- {
- AFMT_MU_LAW,
- intpcm_read_ulaw, intpcm_write_ulaw
- },
- {
- AFMT_A_LAW,
- intpcm_read_alaw, intpcm_write_alaw
- },
- {
- AFMT_AC3,
- intpcm_read_null, intpcm_write_null
- }
-};
-
-#define FEEDFORMAT_TAB_SIZE \
- ((int32_t)(sizeof(feed_format_ops) / sizeof(feed_format_ops[0])))
-
static int
feed_format_init(struct pcm_feeder *f)
{
struct feed_format_info *info;
- intpcm_read_t *rd_op;
- intpcm_write_t *wr_op;
- int i;
if (f->desc->in == f->desc->out ||
AFMT_CHANNEL(f->desc->in) != AFMT_CHANNEL(f->desc->out))
return (EINVAL);
- rd_op = NULL;
- wr_op = NULL;
-
- for (i = 0; i < FEEDFORMAT_TAB_SIZE &&
- (rd_op == NULL || wr_op == NULL); i++) {
- if (rd_op == NULL &&
- AFMT_ENCODING(f->desc->in) == feed_format_ops[i].format)
- rd_op = feed_format_ops[i].read;
- if (wr_op == NULL &&
- AFMT_ENCODING(f->desc->out) == feed_format_ops[i].format)
- wr_op = feed_format_ops[i].write;
- }
-
- if (rd_op == NULL || wr_op == NULL) {
- printf("%s(): failed to initialize io ops "
- "in=0x%08x out=0x%08x\n",
- __func__, f->desc->in, f->desc->out);
- return (EINVAL);
- }
-
info = malloc(sizeof(*info), M_DEVBUF, M_NOWAIT | M_ZERO);
if (info == NULL)
return (ENOMEM);
@@ -158,11 +73,11 @@ feed_format_init(struct pcm_feeder *f)
info->ibps = AFMT_BPS(f->desc->in);
info->ialign = info->ibps * info->channels;
- info->read = rd_op;
+ info->rdfmt = AFMT_ENCODING(f->desc->in);
info->obps = AFMT_BPS(f->desc->out);
info->oalign = info->obps * info->channels;
- info->write = wr_op;
+ info->wrfmt = AFMT_ENCODING(f->desc->out);
f->data = info;
@@ -246,8 +161,8 @@ feed_format_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
count -= j * info->obps;
do {
- v = info->read(src);
- info->write(dst, v);
+ v = pcm_sample_read_norm(src, info->rdfmt);
+ pcm_sample_write_norm(dst, v, info->wrfmt);
dst += info->obps;
src += info->ibps;
} while (--j != 0);
@@ -271,30 +186,3 @@ static kobj_method_t feeder_format_methods[] = {
};
FEEDER_DECLARE(feeder_format, NULL);
-
-/* Extern */
-intpcm_read_t *
-feeder_format_read_op(uint32_t format)
-{
- int i;
-
- for (i = 0; i < FEEDFORMAT_TAB_SIZE; i++) {
- if (AFMT_ENCODING(format) == feed_format_ops[i].format)
- return (feed_format_ops[i].read);
- }
-
- return (NULL);
-}
-
-intpcm_write_t *
-feeder_format_write_op(uint32_t format)
-{
- int i;
-
- for (i = 0; i < FEEDFORMAT_TAB_SIZE; i++) {
- if (AFMT_ENCODING(format) == feed_format_ops[i].format)
- return (feed_format_ops[i].write);
- }
-
- return (NULL);
-}
diff --git a/sys/dev/sound/pcm/feeder_matrix.c b/sys/dev/sound/pcm/feeder_matrix.c
index f5f02e2bf4f5..43258a311d82 100644
--- a/sys/dev/sound/pcm/feeder_matrix.c
+++ b/sys/dev/sound/pcm/feeder_matrix.c
@@ -3,6 +3,10 @@
*
* Copyright (c) 2008-2009 Ariff Abdullah <ariff@FreeBSD.org>
* All rights reserved.
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Christos Margiolis
+ * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -60,20 +64,11 @@
#define SND_CHN_T_EOF 0x00e0fe0f
#define SND_CHN_T_NULL 0x0e0e0e0e
-struct feed_matrix_info;
-
-typedef void (*feed_matrix_t)(struct feed_matrix_info *, uint8_t *,
- uint8_t *, uint32_t);
-
struct feed_matrix_info {
+ uint32_t fmt;
uint32_t bps;
uint32_t ialign, oalign;
uint32_t in, out;
- feed_matrix_t apply;
-#ifdef FEEDMATRIX_GENERIC
- intpcm_read_t *rd;
- intpcm_write_t *wr;
-#endif
struct {
int chn[SND_CHN_T_MAX + 1];
int mul, shift;
@@ -119,174 +114,64 @@ static int feeder_matrix_default_ids[9] = {
} while (0)
#endif
-#define FEEDMATRIX_DECLARE(SIGN, BIT, ENDIAN) \
-static void \
-feed_matrix_##SIGN##BIT##ENDIAN(struct feed_matrix_info *info, \
- uint8_t *src, uint8_t *dst, uint32_t count) \
-{ \
- intpcm64_t accum; \
- intpcm_t v; \
- int i, j; \
- \
- do { \
- for (i = 0; info->matrix[i].chn[0] != SND_CHN_T_EOF; \
- i++) { \
- if (info->matrix[i].chn[0] == SND_CHN_T_NULL) { \
- _PCM_WRITE_##SIGN##BIT##_##ENDIAN(dst, \
- 0); \
- dst += PCM_##BIT##_BPS; \
- continue; \
- } else if (info->matrix[i].chn[1] == \
- SND_CHN_T_EOF) { \
- v = _PCM_READ_##SIGN##BIT##_##ENDIAN( \
- src + info->matrix[i].chn[0]); \
- _PCM_WRITE_##SIGN##BIT##_##ENDIAN(dst, \
- v); \
- dst += PCM_##BIT##_BPS; \
- continue; \
- } \
- \
- accum = 0; \
- for (j = 0; \
- info->matrix[i].chn[j] != SND_CHN_T_EOF; \
- j++) { \
- v = _PCM_READ_##SIGN##BIT##_##ENDIAN( \
- src + info->matrix[i].chn[j]); \
- accum += v; \
- } \
- \
- accum = (accum * info->matrix[i].mul) >> \
- info->matrix[i].shift; \
- \
- FEEDMATRIX_CLIP_CHECK(accum, BIT); \
- \
- v = (accum > PCM_S##BIT##_MAX) ? \
- PCM_S##BIT##_MAX : \
- ((accum < PCM_S##BIT##_MIN) ? \
- PCM_S##BIT##_MIN : \
- accum); \
- _PCM_WRITE_##SIGN##BIT##_##ENDIAN(dst, v); \
- dst += PCM_##BIT##_BPS; \
- } \
- src += info->ialign; \
- } while (--count != 0); \
-}
-
-#if BYTE_ORDER == LITTLE_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
-FEEDMATRIX_DECLARE(S, 16, LE)
-FEEDMATRIX_DECLARE(S, 32, LE)
-#endif
-#if BYTE_ORDER == BIG_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
-FEEDMATRIX_DECLARE(S, 16, BE)
-FEEDMATRIX_DECLARE(S, 32, BE)
-#endif
-#ifdef SND_FEEDER_MULTIFORMAT
-FEEDMATRIX_DECLARE(S, 8, NE)
-FEEDMATRIX_DECLARE(S, 24, LE)
-FEEDMATRIX_DECLARE(S, 24, BE)
-FEEDMATRIX_DECLARE(U, 8, NE)
-FEEDMATRIX_DECLARE(U, 16, LE)
-FEEDMATRIX_DECLARE(U, 24, LE)
-FEEDMATRIX_DECLARE(U, 32, LE)
-FEEDMATRIX_DECLARE(U, 16, BE)
-FEEDMATRIX_DECLARE(U, 24, BE)
-FEEDMATRIX_DECLARE(U, 32, BE)
-#endif
-
-#define FEEDMATRIX_ENTRY(SIGN, BIT, ENDIAN) \
- { \
- AFMT_##SIGN##BIT##_##ENDIAN, \
- feed_matrix_##SIGN##BIT##ENDIAN \
- }
-
-static const struct {
- uint32_t format;
- feed_matrix_t apply;
-} feed_matrix_tab[] = {
-#if BYTE_ORDER == LITTLE_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
- FEEDMATRIX_ENTRY(S, 16, LE),
- FEEDMATRIX_ENTRY(S, 32, LE),
-#endif
-#if BYTE_ORDER == BIG_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
- FEEDMATRIX_ENTRY(S, 16, BE),
- FEEDMATRIX_ENTRY(S, 32, BE),
-#endif
-#ifdef SND_FEEDER_MULTIFORMAT
- FEEDMATRIX_ENTRY(S, 8, NE),
- FEEDMATRIX_ENTRY(S, 24, LE),
- FEEDMATRIX_ENTRY(S, 24, BE),
- FEEDMATRIX_ENTRY(U, 8, NE),
- FEEDMATRIX_ENTRY(U, 16, LE),
- FEEDMATRIX_ENTRY(U, 24, LE),
- FEEDMATRIX_ENTRY(U, 32, LE),
- FEEDMATRIX_ENTRY(U, 16, BE),
- FEEDMATRIX_ENTRY(U, 24, BE),
- FEEDMATRIX_ENTRY(U, 32, BE)
-#endif
-};
-
-static void
-feed_matrix_reset(struct feed_matrix_info *info)
-{
- uint32_t i, j;
-
- for (i = 0; i < (sizeof(info->matrix) / sizeof(info->matrix[0])); i++) {
- for (j = 0;
- j < (sizeof(info->matrix[i].chn) /
- sizeof(info->matrix[i].chn[0])); j++) {
- info->matrix[i].chn[j] = SND_CHN_T_EOF;
- }
- info->matrix[i].mul = 1;
- info->matrix[i].shift = 0;
- }
-}
-
-#ifdef FEEDMATRIX_GENERIC
-static void
-feed_matrix_apply_generic(struct feed_matrix_info *info,
- uint8_t *src, uint8_t *dst, uint32_t count)
+__always_inline static void
+feed_matrix_apply(struct feed_matrix_info *info, uint8_t *src, uint8_t *dst,
+ uint32_t count, const uint32_t fmt)
{
intpcm64_t accum;
intpcm_t v;
int i, j;
do {
- for (i = 0; info->matrix[i].chn[0] != SND_CHN_T_EOF;
- i++) {
+ for (i = 0; info->matrix[i].chn[0] != SND_CHN_T_EOF; i++) {
if (info->matrix[i].chn[0] == SND_CHN_T_NULL) {
- info->wr(dst, 0);
+ pcm_sample_write(dst, 0, fmt);
dst += info->bps;
continue;
- } else if (info->matrix[i].chn[1] ==
- SND_CHN_T_EOF) {
- v = info->rd(src + info->matrix[i].chn[0]);
- info->wr(dst, v);
+ } else if (info->matrix[i].chn[1] == SND_CHN_T_EOF) {
+ v = pcm_sample_read(src +
+ info->matrix[i].chn[0], fmt);
+ pcm_sample_write(dst, v, fmt);
dst += info->bps;
continue;
}
accum = 0;
- for (j = 0;
- info->matrix[i].chn[j] != SND_CHN_T_EOF;
+ for (j = 0; info->matrix[i].chn[j] != SND_CHN_T_EOF;
j++) {
- v = info->rd(src + info->matrix[i].chn[j]);
+ v = pcm_sample_read(src +
+ info->matrix[i].chn[j], fmt);
accum += v;
}
accum = (accum * info->matrix[i].mul) >>
info->matrix[i].shift;
- FEEDMATRIX_CLIP_CHECK(accum, 32);
+ FEEDMATRIX_CLIP_CHECK(accum, AFMT_BIT(fmt));
- v = (accum > PCM_S32_MAX) ? PCM_S32_MAX :
- ((accum < PCM_S32_MIN) ? PCM_S32_MIN : accum);
- info->wr(dst, v);
+ v = pcm_clamp(accum, fmt);
+ pcm_sample_write(dst, v, fmt);
dst += info->bps;
}
src += info->ialign;
} while (--count != 0);
}
-#endif
+
+static void
+feed_matrix_reset(struct feed_matrix_info *info)
+{
+ uint32_t i, j;
+
+ for (i = 0; i < nitems(info->matrix); i++) {
+ for (j = 0;
+ j < (sizeof(info->matrix[i].chn) /
+ sizeof(info->matrix[i].chn[0])); j++) {
+ info->matrix[i].chn[j] = SND_CHN_T_EOF;
+ }
+ info->matrix[i].mul = 1;
+ info->matrix[i].shift = 0;
+ }
+}
static int
feed_matrix_setup(struct feed_matrix_info *info, struct pcmchan_matrix *m_in,
@@ -396,7 +281,6 @@ feed_matrix_init(struct pcm_feeder *f)
{
struct feed_matrix_info *info;
struct pcmchan_matrix *m_in, *m_out;
- uint32_t i;
int ret;
if (AFMT_ENCODING(f->desc->in) != AFMT_ENCODING(f->desc->out))
@@ -408,31 +292,10 @@ feed_matrix_init(struct pcm_feeder *f)
info->in = f->desc->in;
info->out = f->desc->out;
+ info->fmt = AFMT_ENCODING(info->in);
info->bps = AFMT_BPS(info->in);
info->ialign = AFMT_ALIGN(info->in);
info->oalign = AFMT_ALIGN(info->out);
- info->apply = NULL;
-
- for (i = 0; info->apply == NULL &&
- i < (sizeof(feed_matrix_tab) / sizeof(feed_matrix_tab[0])); i++) {
- if (AFMT_ENCODING(info->in) == feed_matrix_tab[i].format)
- info->apply = feed_matrix_tab[i].apply;
- }
-
- if (info->apply == NULL) {
-#ifdef FEEDMATRIX_GENERIC
- info->rd = feeder_format_read_op(info->in);
- info->wr = feeder_format_write_op(info->out);
- if (info->rd == NULL || info->wr == NULL) {
- free(info, M_DEVBUF);
- return (EINVAL);
- }
- info->apply = feed_matrix_apply_generic;
-#else
- free(info, M_DEVBUF);
- return (EINVAL);
-#endif
- }
m_in = feeder_matrix_format_map(info->in);
m_out = feeder_matrix_format_map(info->out);
@@ -510,7 +373,21 @@ feed_matrix_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
if (j == 0)
break;
- info->apply(info, src, dst, j);
+ /* Optimize some common formats. */
+ switch (info->fmt) {
+ case AFMT_S16_NE:
+ feed_matrix_apply(info, src, dst, j, AFMT_S16_NE);
+ break;
+ case AFMT_S24_NE:
+ feed_matrix_apply(info, src, dst, j, AFMT_S24_NE);
+ break;
+ case AFMT_S32_NE:
+ feed_matrix_apply(info, src, dst, j, AFMT_S32_NE);
+ break;
+ default:
+ feed_matrix_apply(info, src, dst, j, info->fmt);
+ break;
+ }
j *= info->oalign;
dst += j;
@@ -679,7 +556,7 @@ feeder_matrix_compare(struct pcmchan_matrix *m_in, struct pcmchan_matrix *m_out)
m_in->mask != m_out->mask)
return (1);
- for (i = 0; i < (sizeof(m_in->map) / sizeof(m_in->map[0])); i++) {
+ for (i = 0; i < nitems(m_in->map); i++) {
if (m_in->map[i].type != m_out->map[i].type)
return (1);
if (m_in->map[i].type == SND_CHN_T_MAX)
diff --git a/sys/dev/sound/pcm/feeder_mixer.c b/sys/dev/sound/pcm/feeder_mixer.c
index 9f6b653effa3..b6b81ad9a51c 100644
--- a/sys/dev/sound/pcm/feeder_mixer.c
+++ b/sys/dev/sound/pcm/feeder_mixer.c
@@ -3,6 +3,10 @@
*
* Copyright (c) 2008-2009 Ariff Abdullah <ariff@FreeBSD.org>
* All rights reserved.
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Christos Margiolis
+ * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -42,133 +46,83 @@
#undef SND_FEEDER_MULTIFORMAT
#define SND_FEEDER_MULTIFORMAT 1
-typedef void (*feed_mixer_t)(uint8_t *, uint8_t *, uint32_t);
-
-#define FEEDMIXER_DECLARE(SIGN, BIT, ENDIAN) \
-static void \
-feed_mixer_##SIGN##BIT##ENDIAN(uint8_t *src, uint8_t *dst, \
- uint32_t count) \
-{ \
- intpcm##BIT##_t z; \
- intpcm_t x, y; \
- \
- src += count; \
- dst += count; \
- \
- do { \
- src -= PCM_##BIT##_BPS; \
- dst -= PCM_##BIT##_BPS; \
- count -= PCM_##BIT##_BPS; \
- x = PCM_READ_##SIGN##BIT##_##ENDIAN(src); \
- y = PCM_READ_##SIGN##BIT##_##ENDIAN(dst); \
- z = INTPCM##BIT##_T(x) + y; \
- x = PCM_CLAMP_##SIGN##BIT(z); \
- _PCM_WRITE_##SIGN##BIT##_##ENDIAN(dst, x); \
- } while (count != 0); \
-}
-
-#if BYTE_ORDER == LITTLE_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
-FEEDMIXER_DECLARE(S, 16, LE)
-FEEDMIXER_DECLARE(S, 32, LE)
-#endif
-#if BYTE_ORDER == BIG_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
-FEEDMIXER_DECLARE(S, 16, BE)
-FEEDMIXER_DECLARE(S, 32, BE)
-#endif
-#ifdef SND_FEEDER_MULTIFORMAT
-FEEDMIXER_DECLARE(S, 8, NE)
-FEEDMIXER_DECLARE(S, 24, LE)
-FEEDMIXER_DECLARE(S, 24, BE)
-FEEDMIXER_DECLARE(U, 8, NE)
-FEEDMIXER_DECLARE(U, 16, LE)
-FEEDMIXER_DECLARE(U, 24, LE)
-FEEDMIXER_DECLARE(U, 32, LE)
-FEEDMIXER_DECLARE(U, 16, BE)
-FEEDMIXER_DECLARE(U, 24, BE)
-FEEDMIXER_DECLARE(U, 32, BE)
-#endif
-
struct feed_mixer_info {
uint32_t format;
+ uint32_t channels;
int bps;
- feed_mixer_t mix;
};
-#define FEEDMIXER_ENTRY(SIGN, BIT, ENDIAN) \
- { \
- AFMT_##SIGN##BIT##_##ENDIAN, PCM_##BIT##_BPS, \
- feed_mixer_##SIGN##BIT##ENDIAN \
- }
-
-static struct feed_mixer_info feed_mixer_info_tab[] = {
- FEEDMIXER_ENTRY(S, 8, NE),
-#if BYTE_ORDER == LITTLE_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
- FEEDMIXER_ENTRY(S, 16, LE),
- FEEDMIXER_ENTRY(S, 32, LE),
-#endif
-#if BYTE_ORDER == BIG_ENDIAN || defined(SND_FEEDER_MULTIFORMAT)
- FEEDMIXER_ENTRY(S, 16, BE),
- FEEDMIXER_ENTRY(S, 32, BE),
-#endif
-#ifdef SND_FEEDER_MULTIFORMAT
- FEEDMIXER_ENTRY(S, 24, LE),
- FEEDMIXER_ENTRY(S, 24, BE),
- FEEDMIXER_ENTRY(U, 8, NE),
- FEEDMIXER_ENTRY(U, 16, LE),
- FEEDMIXER_ENTRY(U, 24, LE),
- FEEDMIXER_ENTRY(U, 32, LE),
- FEEDMIXER_ENTRY(U, 16, BE),
- FEEDMIXER_ENTRY(U, 24, BE),
- FEEDMIXER_ENTRY(U, 32, BE),
-#endif
- { AFMT_AC3, PCM_16_BPS, NULL },
- { AFMT_MU_LAW, PCM_8_BPS, feed_mixer_U8NE }, /* dummy */
- { AFMT_A_LAW, PCM_8_BPS, feed_mixer_U8NE } /* dummy */
-};
+__always_inline static void
+feed_mixer_apply(uint8_t *src, uint8_t *dst, uint32_t count, const uint32_t fmt)
+{
+ intpcm32_t z;
+ intpcm_t x, y;
-#define FEEDMIXER_TAB_SIZE ((int32_t) \
- (sizeof(feed_mixer_info_tab) / \
- sizeof(feed_mixer_info_tab[0])))
+ src += count;
+ dst += count;
-#define FEEDMIXER_DATA(i, c) ((void *) \
- ((uintptr_t)((((i) & 0x1f) << 7) | \
- ((c) & 0x7f))))
-#define FEEDMIXER_INFOIDX(d) ((uint32_t)((uintptr_t)(d) >> 7) & 0x1f)
-#define FEEDMIXER_CHANNELS(d) ((uint32_t)((uintptr_t)(d)) & 0x7f)
+ do {
+ src -= AFMT_BPS(fmt);
+ dst -= AFMT_BPS(fmt);
+ count -= AFMT_BPS(fmt);
+ x = pcm_sample_read_calc(src, fmt);
+ y = pcm_sample_read_calc(dst, fmt);
+ z = INTPCM_T(x) + y;
+ x = pcm_clamp_calc(z, fmt);
+ pcm_sample_write(dst, x, fmt);
+ } while (count != 0);
+}
static int
feed_mixer_init(struct pcm_feeder *f)
{
- int i;
+ struct feed_mixer_info *info;
if (f->desc->in != f->desc->out)
return (EINVAL);
- for (i = 0; i < FEEDMIXER_TAB_SIZE; i++) {
- if (AFMT_ENCODING(f->desc->in) ==
- feed_mixer_info_tab[i].format) {
- f->data =
- FEEDMIXER_DATA(i, AFMT_CHANNEL(f->desc->in));
- return (0);
- }
- }
+ info = malloc(sizeof(*info), M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (info == NULL)
+ return (ENOMEM);
+
+ info->format = AFMT_ENCODING(f->desc->in);
+ info->channels = AFMT_CHANNEL(f->desc->in);
+ info->bps = AFMT_BPS(f->desc->in);
+
+ f->data = info;
- return (EINVAL);
+ return (0);
+}
+
+static int
+feed_mixer_free(struct pcm_feeder *f)
+{
+ struct feed_mixer_info *info;
+
+ info = f->data;
+ if (info != NULL)
+ free(info, M_DEVBUF);
+
+ f->data = NULL;
+
+ return (0);
}
static int
feed_mixer_set(struct pcm_feeder *f, int what, int value)
{
+ struct feed_mixer_info *info;
+
+ info = f->data;
switch (what) {
case FEEDMIXER_CHANNELS:
if (value < SND_CHN_MIN || value > SND_CHN_MAX)
return (EINVAL);
- f->data = FEEDMIXER_DATA(FEEDMIXER_INFOIDX(f->data), value);
+ info->channels = (uint32_t)value;
break;
default:
return (EINVAL);
- break;
}
return (0);
@@ -294,8 +248,8 @@ feed_mixer_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
if (sz < count)
count = sz;
- info = &feed_mixer_info_tab[FEEDMIXER_INFOIDX(f->data)];
- sz = info->bps * FEEDMIXER_CHANNELS(f->data);
+ info = f->data;
+ sz = info->bps * info->channels;
count = SND_FXROUND(count, sz);
if (count < sz)
return (0);
@@ -328,7 +282,7 @@ feed_mixer_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
if ((ch->flags & CHN_F_MMAP) && !(ch->flags & CHN_F_CLOSING))
sndbuf_acquire(ch->bufsoft, NULL,
sndbuf_getfree(ch->bufsoft));
- if (info->mix == NULL) {
+ if (c->flags & CHN_F_PASSTHROUGH) {
/*
* Passthrough. Dump the first digital/passthrough
* channel into destination buffer, and the rest into
@@ -370,7 +324,24 @@ feed_mixer_feed(struct pcm_feeder *f, struct pcm_channel *c, uint8_t *b,
f->desc->out), mcnt);
mcnt = 0;
}
- info->mix(tmp, b, cnt);
+ switch (info->format) {
+ case AFMT_S16_NE:
+ feed_mixer_apply(tmp, b, cnt,
+ AFMT_S16_NE);
+ break;
+ case AFMT_S24_NE:
+ feed_mixer_apply(tmp, b, cnt,
+ AFMT_S24_NE);
+ break;
+ case AFMT_S32_NE:
+ feed_mixer_apply(tmp, b, cnt,
+ AFMT_S32_NE);
+ break;
+ default:
+ feed_mixer_apply(tmp, b, cnt,
+ info->format);
+ break;
+ }
if (cnt > rcnt)
rcnt = cnt;
}
@@ -394,6 +365,7 @@ static struct pcm_feederdesc feeder_mixer_desc[] = {
static kobj_method_t feeder_mixer_methods[] = {
KOBJMETHOD(feeder_init, feed_mixer_init),
+ KOBJMETHOD(feeder_free, feed_mixer_free),
KOBJMETHOD(feeder_set, feed_mixer_set),
KOBJMETHOD(feeder_feed, feed_mixer_feed),
KOBJMETHOD_END
diff --git a/sys/dev/sound/pcm/feeder_rate.c b/sys/dev/sound/pcm/feeder_rate.c
index c8cc67e8fa80..9c29142b9d6b 100644
--- a/sys/dev/sound/pcm/feeder_rate.c
+++ b/sys/dev/sound/pcm/feeder_rate.c
@@ -258,6 +258,7 @@ sysctl_hw_snd_feeder_rate_quality(SYSCTL_HANDLER_ARGS)
* set resampler quality if and only if it is exist as
* part of feeder chains and the channel is idle.
*/
+ bus_topo_lock();
for (i = 0; pcm_devclass != NULL &&
i < devclass_get_maxunit(pcm_devclass); i++) {
d = devclass_get_softc(pcm_devclass, i);
@@ -268,7 +269,7 @@ sysctl_hw_snd_feeder_rate_quality(SYSCTL_HANDLER_ARGS)
PCM_ACQUIRE(d);
CHN_FOREACH(c, d, channels.pcm) {
CHN_LOCK(c);
- f = chn_findfeeder(c, FEEDER_RATE);
+ f = feeder_find(c, FEEDER_RATE);
if (f == NULL || f->data == NULL || CHN_STARTED(c)) {
CHN_UNLOCK(c);
continue;
@@ -279,11 +280,12 @@ sysctl_hw_snd_feeder_rate_quality(SYSCTL_HANDLER_ARGS)
PCM_RELEASE(d);
PCM_UNLOCK(d);
}
+ bus_topo_unlock();
return (0);
}
SYSCTL_PROC(_hw_snd, OID_AUTO, feeder_rate_quality,
- CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 0, sizeof(int),
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, sizeof(int),
sysctl_hw_snd_feeder_rate_quality, "I",
"sample rate converter quality ("__XSTRING(Z_QUALITY_MIN)"=low .. "
__XSTRING(Z_QUALITY_MAX)"=high)");
@@ -431,11 +433,6 @@ z_roundpow2(int32_t v)
static void
z_feed_zoh(struct z_info *info, uint8_t *dst)
{
-#if 0
- z_copy(info->z_delay +
- (info->z_start * info->channels * info->bps), dst,
- info->channels * info->bps);
-#else
uint32_t cnt;
uint8_t *src;
@@ -449,7 +446,6 @@ z_feed_zoh(struct z_info *info, uint8_t *dst)
do {
*dst++ = *src++;
} while (--cnt != 0);
-#endif
}
/*
@@ -477,10 +473,10 @@ z_feed_linear_##SIGN##BIT##ENDIAN(struct z_info *info, uint8_t *dst) \
ch = info->channels; \
\
do { \
- x = _PCM_READ_##SIGN##BIT##_##ENDIAN(sx); \
- y = _PCM_READ_##SIGN##BIT##_##ENDIAN(sy); \
+ x = pcm_sample_read(sx, AFMT_##SIGN##BIT##_##ENDIAN); \
+ y = pcm_sample_read(sy, AFMT_##SIGN##BIT##_##ENDIAN); \
x = Z_LINEAR_INTERPOLATE_##BIT(z, x, y); \
- _PCM_WRITE_##SIGN##BIT##_##ENDIAN(dst, x); \
+ pcm_sample_write(dst, x, AFMT_##SIGN##BIT##_##ENDIAN); \
sx += PCM_##BIT##_BPS; \
sy += PCM_##BIT##_BPS; \
dst += PCM_##BIT##_BPS; \
@@ -508,10 +504,6 @@ z_feed_linear_##SIGN##BIT##ENDIAN(struct z_info *info, uint8_t *dst) \
#define Z_CLIP_CHECK(...)
#endif
-#define Z_CLAMP(v, BIT) \
- (((v) > PCM_S##BIT##_MAX) ? PCM_S##BIT##_MAX : \
- (((v) < PCM_S##BIT##_MIN) ? PCM_S##BIT##_MIN : (v)))
-
/*
* Sine Cardinal (SINC) Interpolation. Scaling is done in 64 bit, so
* there's no point to hold the plate any longer. All samples will be
@@ -522,7 +514,7 @@ z_feed_linear_##SIGN##BIT##ENDIAN(struct z_info *info, uint8_t *dst) \
c += z >> Z_SHIFT; \
z &= Z_MASK; \
coeff = Z_COEFF_INTERPOLATE(z, z_coeff[c], z_dcoeff[c]); \
- x = _PCM_READ_##SIGN##BIT##_##ENDIAN(p); \
+ x = pcm_sample_read(p, AFMT_##SIGN##BIT##_##ENDIAN); \
v += Z_NORM_##BIT((intpcm64_t)x * coeff); \
z += info->z_dy; \
p adv##= info->channels * PCM_##BIT##_BPS
@@ -580,7 +572,8 @@ z_feed_sinc_##SIGN##BIT##ENDIAN(struct z_info *info, uint8_t *dst) \
else \
v >>= Z_COEFF_SHIFT - Z_GUARD_BIT_##BIT; \
Z_CLIP_CHECK(v, BIT); \
- _PCM_WRITE_##SIGN##BIT##_##ENDIAN(dst, Z_CLAMP(v, BIT)); \
+ pcm_sample_write(dst, pcm_clamp(v, AFMT_##SIGN##BIT##_##ENDIAN),\
+ AFMT_##SIGN##BIT##_##ENDIAN); \
} while (ch != 0); \
}
@@ -605,11 +598,11 @@ z_feed_sinc_polyphase_##SIGN##BIT##ENDIAN(struct z_info *info, uint8_t *dst) \
z_pcoeff = info->z_pcoeff + \
((info->z_alpha * info->z_size) << 1); \
for (i = info->z_size; i != 0; i--) { \
- x = _PCM_READ_##SIGN##BIT##_##ENDIAN(p); \
+ x = pcm_sample_read(p, AFMT_##SIGN##BIT##_##ENDIAN); \
v += Z_NORM_##BIT((intpcm64_t)x * *z_pcoeff); \
z_pcoeff++; \
p += info->channels * PCM_##BIT##_BPS; \
- x = _PCM_READ_##SIGN##BIT##_##ENDIAN(p); \
+ x = pcm_sample_read(p, AFMT_##SIGN##BIT##_##ENDIAN); \
v += Z_NORM_##BIT((intpcm64_t)x * *z_pcoeff); \
z_pcoeff++; \
p += info->channels * PCM_##BIT##_BPS; \
@@ -619,7 +612,8 @@ z_feed_sinc_polyphase_##SIGN##BIT##ENDIAN(struct z_info *info, uint8_t *dst) \
else \
v >>= Z_COEFF_SHIFT - Z_GUARD_BIT_##BIT; \
Z_CLIP_CHECK(v, BIT); \
- _PCM_WRITE_##SIGN##BIT##_##ENDIAN(dst, Z_CLAMP(v, BIT)); \
+ pcm_sample_write(dst, pcm_clamp(v, AFMT_##SIGN##BIT##_##ENDIAN),\
+ AFMT_##SIGN##BIT##_##ENDIAN); \
} while (ch != 0); \
}
@@ -647,6 +641,8 @@ Z_DECLARE(U, 32, LE)
Z_DECLARE(U, 16, BE)
Z_DECLARE(U, 24, BE)
Z_DECLARE(U, 32, BE)
+Z_DECLARE(F, 32, LE)
+Z_DECLARE(F, 32, BE)
#endif
enum {
@@ -695,6 +691,8 @@ static const struct {
Z_RESAMPLER_ENTRY(U, 16, BE),
Z_RESAMPLER_ENTRY(U, 24, BE),
Z_RESAMPLER_ENTRY(U, 32, BE),
+ Z_RESAMPLER_ENTRY(F, 32, LE),
+ Z_RESAMPLER_ENTRY(F, 32, BE),
#endif
};
@@ -1171,14 +1169,6 @@ z_setup_adaptive_sinc:
info->z_scale = Z_ONE;
}
-#if 0
-#define Z_SCALE_DIV 10000
-#define Z_SCALE_LIMIT(s, v) \
- ((((uint64_t)(s) * (v)) + (Z_SCALE_DIV >> 1)) / Z_SCALE_DIV)
-
- info->z_scale = Z_SCALE_LIMIT(info->z_scale, 9780);
-#endif
-
/* Smallest drift increment. */
info->z_dx = info->z_dy / info->z_gy;
@@ -1672,12 +1662,6 @@ z_resampler_feed_internal(struct pcm_feeder *f, struct pcm_channel *c,
*/
do {
info->z_resample(info, dst);
-#if 0
- startdrift = z_gy2gx(info, 1);
- alphadrift = z_drift(info, startdrift, 1);
- info->z_start += startdrift;
- info->z_alpha += alphadrift;
-#else
info->z_alpha += alphadrift;
if (info->z_alpha < info->z_gy)
info->z_start += startdrift;
@@ -1685,7 +1669,6 @@ z_resampler_feed_internal(struct pcm_feeder *f, struct pcm_channel *c,
info->z_start += startdrift - 1;
info->z_alpha -= info->z_gy;
}
-#endif
dst += align;
#ifdef Z_DIAGNOSTIC
info->z_cycle++;
diff --git a/sys/dev/sound/pcm/feeder_volume.c b/sys/dev/sound/pcm/feeder_volume.c
index 452d8788a5a5..ddcbf29804f3 100644
--- a/sys/dev/sound/pcm/feeder_volume.c
+++ b/sys/dev/sound/pcm/feeder_volume.c
@@ -63,10 +63,13 @@ feed_volume_##SIGN##BIT##ENDIAN(int *vol, int *matrix, \
do { \
dst -= PCM_##BIT##_BPS; \
i--; \
- x = PCM_READ_##SIGN##BIT##_##ENDIAN(dst); \
+ x = pcm_sample_read_calc(dst, \
+ AFMT_##SIGN##BIT##_##ENDIAN); \
v = FEEDVOLUME_CALC##BIT(x, vol[matrix[i]]); \
- x = PCM_CLAMP_##SIGN##BIT(v); \
- _PCM_WRITE_##SIGN##BIT##_##ENDIAN(dst, x); \
+ x = pcm_clamp_calc(v, \
+ AFMT_##SIGN##BIT##_##ENDIAN); \
+ pcm_sample_write(dst, x, \
+ AFMT_##SIGN##BIT##_##ENDIAN); \
} while (i != 0); \
} while (--count != 0); \
}
@@ -90,6 +93,8 @@ FEEDVOLUME_DECLARE(U, 32, LE)
FEEDVOLUME_DECLARE(U, 16, BE)
FEEDVOLUME_DECLARE(U, 24, BE)
FEEDVOLUME_DECLARE(U, 32, BE)
+FEEDVOLUME_DECLARE(F, 32, LE)
+FEEDVOLUME_DECLARE(F, 32, BE)
#endif
struct feed_volume_info {
@@ -128,7 +133,9 @@ static const struct {
FEEDVOLUME_ENTRY(U, 32, LE),
FEEDVOLUME_ENTRY(U, 16, BE),
FEEDVOLUME_ENTRY(U, 24, BE),
- FEEDVOLUME_ENTRY(U, 32, BE)
+ FEEDVOLUME_ENTRY(U, 32, BE),
+ FEEDVOLUME_ENTRY(F, 32, LE),
+ FEEDVOLUME_ENTRY(F, 32, BE),
#endif
};
@@ -337,7 +344,7 @@ feeder_volume_apply_matrix(struct pcm_feeder *f, struct pcmchan_matrix *m)
info = f->data;
- for (i = 0; i < (sizeof(info->matrix) / sizeof(info->matrix[0])); i++) {
+ for (i = 0; i < nitems(info->matrix); i++) {
if (i < m->channels)
info->matrix[i] = m->map[i].type;
else
diff --git a/sys/dev/sound/pcm/g711.h b/sys/dev/sound/pcm/g711.h
index fe9ddb651c8b..481ef368e144 100644
--- a/sys/dev/sound/pcm/g711.h
+++ b/sys/dev/sound/pcm/g711.h
@@ -176,48 +176,4 @@
#define _INTPCM_TO_G711(t, v) ((t)[(uint8_t)((v) ^ 0x80)])
-#define G711_DECLARE_TABLE(t) \
-static const struct { \
- const uint8_t ulaw_to_u8[G711_TABLE_SIZE]; \
- const uint8_t alaw_to_u8[G711_TABLE_SIZE]; \
- const uint8_t u8_to_ulaw[G711_TABLE_SIZE]; \
- const uint8_t u8_to_alaw[G711_TABLE_SIZE]; \
-} t = { \
- ULAW_TO_U8, ALAW_TO_U8, \
- U8_TO_ULAW, U8_TO_ALAW \
-}
-
-#define G711_DECLARE_OP(t) \
-static __inline intpcm_t \
-pcm_read_ulaw(uint8_t v) \
-{ \
- \
- return (_G711_TO_INTPCM((t).ulaw_to_u8, v)); \
-} \
- \
-static __inline intpcm_t \
-pcm_read_alaw(uint8_t v) \
-{ \
- \
- return (_G711_TO_INTPCM((t).alaw_to_u8, v)); \
-} \
- \
-static __inline void \
-pcm_write_ulaw(uint8_t *dst, intpcm_t v) \
-{ \
- \
- *dst = _INTPCM_TO_G711((t).u8_to_ulaw, v); \
-} \
- \
-static __inline void \
-pcm_write_alaw(uint8_t *dst, intpcm_t v) \
-{ \
- \
- *dst = _INTPCM_TO_G711((t).u8_to_alaw, v); \
-}
-
-#define G711_DECLARE(t) \
- G711_DECLARE_TABLE(t); \
- G711_DECLARE_OP(t)
-
#endif /* !_SND_G711_H_ */
diff --git a/sys/dev/sound/pcm/intpcm.h b/sys/dev/sound/pcm/intpcm.h
deleted file mode 100644
index 1e85535feec1..000000000000
--- a/sys/dev/sound/pcm/intpcm.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2008-2009 Ariff Abdullah <ariff@FreeBSD.org>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#ifndef _SND_INTPCM_H_
-#define _SND_INTPCM_H_
-
-typedef intpcm_t intpcm_read_t(uint8_t *);
-typedef void intpcm_write_t(uint8_t *, intpcm_t);
-
-extern intpcm_read_t *feeder_format_read_op(uint32_t);
-extern intpcm_write_t *feeder_format_write_op(uint32_t);
-
-#define INTPCM_DECLARE_OP_WRITE(SIGN, BIT, ENDIAN, SHIFT) \
-static __inline void \
-intpcm_write_##SIGN##BIT##ENDIAN(uint8_t *dst, intpcm_t v) \
-{ \
- \
- _PCM_WRITE_##SIGN##BIT##_##ENDIAN(dst, v >> SHIFT); \
-}
-
-#define INTPCM_DECLARE_OP_8(SIGN, ENDIAN) \
-static __inline intpcm_t \
-intpcm_read_##SIGN##8##ENDIAN(uint8_t *src) \
-{ \
- \
- return (_PCM_READ_##SIGN##8##_##ENDIAN(src) << 24); \
-} \
-INTPCM_DECLARE_OP_WRITE(SIGN, 8, ENDIAN, 24)
-
-#define INTPCM_DECLARE_OP_16(SIGN, ENDIAN) \
-static __inline intpcm_t \
-intpcm_read_##SIGN##16##ENDIAN(uint8_t *src) \
-{ \
- \
- return (_PCM_READ_##SIGN##16##_##ENDIAN(src) << 16); \
-} \
-INTPCM_DECLARE_OP_WRITE(SIGN, 16, ENDIAN, 16)
-
-#define INTPCM_DECLARE_OP_24(SIGN, ENDIAN) \
-static __inline intpcm_t \
-intpcm_read_##SIGN##24##ENDIAN(uint8_t *src) \
-{ \
- \
- return (_PCM_READ_##SIGN##24##_##ENDIAN(src) << 8); \
-} \
-INTPCM_DECLARE_OP_WRITE(SIGN, 24, ENDIAN, 8)
-
-#define INTPCM_DECLARE_OP_32(SIGN, ENDIAN) \
-static __inline intpcm_t \
-intpcm_read_##SIGN##32##ENDIAN(uint8_t *src) \
-{ \
- \
- return (_PCM_READ_##SIGN##32##_##ENDIAN(src)); \
-} \
- \
-static __inline void \
-intpcm_write_##SIGN##32##ENDIAN(uint8_t *dst, intpcm_t v) \
-{ \
- \
- _PCM_WRITE_##SIGN##32##_##ENDIAN(dst, v); \
-}
-
-#define INTPCM_DECLARE(t) \
- \
-G711_DECLARE_TABLE(t); \
- \
-static __inline intpcm_t \
-intpcm_read_ulaw(uint8_t *src) \
-{ \
- \
- return (_G711_TO_INTPCM((t).ulaw_to_u8, *src) << 24); \
-} \
- \
-static __inline intpcm_t \
-intpcm_read_alaw(uint8_t *src) \
-{ \
- \
- return (_G711_TO_INTPCM((t).alaw_to_u8, *src) << 24); \
-} \
- \
-static __inline void \
-intpcm_write_ulaw(uint8_t *dst, intpcm_t v) \
-{ \
- \
- *dst = _INTPCM_TO_G711((t).u8_to_ulaw, v >> 24); \
-} \
- \
-static __inline void \
-intpcm_write_alaw(uint8_t *dst, intpcm_t v) \
-{ \
- \
- *dst = _INTPCM_TO_G711((t).u8_to_alaw, v >> 24); \
-} \
- \
-INTPCM_DECLARE_OP_8(S, NE) \
-INTPCM_DECLARE_OP_16(S, LE) \
-INTPCM_DECLARE_OP_16(S, BE) \
-INTPCM_DECLARE_OP_24(S, LE) \
-INTPCM_DECLARE_OP_24(S, BE) \
-INTPCM_DECLARE_OP_32(S, LE) \
-INTPCM_DECLARE_OP_32(S, BE) \
-INTPCM_DECLARE_OP_8(U, NE) \
-INTPCM_DECLARE_OP_16(U, LE) \
-INTPCM_DECLARE_OP_16(U, BE) \
-INTPCM_DECLARE_OP_24(U, LE) \
-INTPCM_DECLARE_OP_24(U, BE) \
-INTPCM_DECLARE_OP_32(U, LE) \
-INTPCM_DECLARE_OP_32(U, BE)
-
-#endif /* !_SND_INTPCM_H_ */
diff --git a/sys/dev/sound/pcm/matrix.h b/sys/dev/sound/pcm/matrix.h
index 14f3a3410a70..e2798c651536 100644
--- a/sys/dev/sound/pcm/matrix.h
+++ b/sys/dev/sound/pcm/matrix.h
@@ -217,4 +217,644 @@
(x)[SND_VOL_C_MASTER][z]) / \
(x)[SND_VOL_C_MASTER][SND_CHN_T_VOL_0DB]) \
+/*
+ * Standard matrix maps:
+ *
+ * struct pcmchan_matrix {
+ * .id = Matrix identity (see matrix.h). Custom defined should use
+ * one of SND_CHN_MATRIX_MISC (for whatever purposes) or
+ * SND_CHN_MATRIX_DRV (hardware driver).
+ * .channels = Total number of channels, including whatever 'extended'
+ * (the X.ext notions, mostly LFE).
+ * .ext = Total number of extended channels (LFE).
+ * .map = {
+ * Sequences of channel type and interleave structure.
+ * [interleave offset] = {
+ * .type = channel type (see matrix.h).
+ * .members = Masks of channels that is acceptable as a
+ * member of this channel type.
+ * },
+ * [total channels] = {
+ * .type = Maximum channels marker (SND_CHN_T_MAX).
+ * .members = 0 (no channels allowed here).
+ * },
+ * },
+ * .mask = Mask of channels that exist in this map.
+ * .offset = {
+ * channel offset that directly translate to the above interleave
+ * offset according to SND_CHN_T_* definitions.
+ * }
+ * };
+ *
+ * Rule of thumb: Avoid using SND_CHN_T_* that is marked with XXX (matrix.h),
+ * or be prepared for the horror to come.
+ *
+ */
+
+#define SND_CHN_MATRIX_MAP_1_0 { \
+ .id = SND_CHN_MATRIX_1_0, \
+ .channels = 1, \
+ .ext = 0, \
+ .map = { \
+ /* Mono, center, etc. */ \
+ [0] = { \
+ .type = SND_CHN_T_FL, \
+ .members = \
+ SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
+ SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF | \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
+ SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SL | \
+ SND_CHN_T_MASK_SR \
+ }, \
+ [1] = { \
+ .type = SND_CHN_T_MAX, \
+ .members = 0 \
+ } \
+ }, \
+ .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
+ SND_CHN_T_MASK_FC, \
+ .offset = { 0, 0, 0, 0, 0, 0, -1, -1, 0, \
+ 0, 0, -1, -1, -1, -1, -1, -1, -1 } \
+}
+
+#define SND_CHN_MATRIX_MAP_2_0 { \
+ .id = SND_CHN_MATRIX_2_0, \
+ .channels = 2, \
+ .ext = 0, \
+ .map = { \
+ /* Left */ \
+ [0] = { \
+ .type = SND_CHN_T_FL, \
+ .members = \
+ SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FC | \
+ SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BL | \
+ SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SL \
+ }, \
+ /* Right */ \
+ [1] = { \
+ .type = SND_CHN_T_FR, \
+ .members = \
+ SND_CHN_T_MASK_FR | SND_CHN_T_MASK_FC | \
+ SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BR | \
+ SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SR \
+ }, \
+ [2] = { \
+ .type = SND_CHN_T_MAX, \
+ .members = 0 \
+ } \
+ }, \
+ .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR, \
+ .offset = { 0, 1, -1, -1, -1, -1, -1, -1, -1, \
+ -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
+}
+
+#define SND_CHN_MATRIX_MAP_2_1 { \
+ .id = SND_CHN_MATRIX_2_1, \
+ .channels = 3, \
+ .ext = 1, \
+ .map = { \
+ /* Left */ \
+ [0] = { \
+ .type = SND_CHN_T_FL, \
+ .members = \
+ SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FC | \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BC | \
+ SND_CHN_T_MASK_SL \
+ }, \
+ /* Right */ \
+ [1] = { \
+ .type = SND_CHN_T_FR, \
+ .members = \
+ SND_CHN_T_MASK_FR | SND_CHN_T_MASK_FC | \
+ SND_CHN_T_MASK_BR | SND_CHN_T_MASK_BC | \
+ SND_CHN_T_MASK_SR \
+ }, \
+ /* LFE */ \
+ [2] = { \
+ .type = SND_CHN_T_LF, \
+ .members = SND_CHN_T_MASK_LF \
+ }, \
+ [3] = { \
+ .type = SND_CHN_T_MAX, \
+ .members = 0 \
+ } \
+ }, \
+ .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
+ SND_CHN_T_MASK_LF, \
+ .offset = { 0, 1, -1, 2, -1, -1, -1, -1, -1, \
+ -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
+}
+
+#define SND_CHN_MATRIX_MAP_3_0 { /* 3 channels default */ \
+ .id = SND_CHN_MATRIX_3_0, \
+ .channels = 3, \
+ .ext = 0, \
+ .map = { \
+ /* Left */ \
+ [0] = { \
+ .type = SND_CHN_T_FL, \
+ .members = \
+ SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FC | \
+ SND_CHN_T_MASK_LF | SND_CHN_T_MASK_SL \
+ }, \
+ /* Right */ \
+ [1] = { \
+ .type = SND_CHN_T_FR, \
+ .members = \
+ SND_CHN_T_MASK_FR | SND_CHN_T_MASK_FC | \
+ SND_CHN_T_MASK_LF | SND_CHN_T_MASK_SR \
+ }, \
+ /* Rear Center */ \
+ [2] = { \
+ .type = SND_CHN_T_BC, \
+ .members = \
+ SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BL | \
+ SND_CHN_T_MASK_BR | SND_CHN_T_MASK_BC | \
+ SND_CHN_T_MASK_SL | SND_CHN_T_MASK_SR \
+ }, \
+ [3] = { \
+ .type = SND_CHN_T_MAX, \
+ .members = 0 \
+ } \
+ }, \
+ .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
+ SND_CHN_T_MASK_BC, \
+ .offset = { 0, 1, -1, -1, -1, -1, -1, -1, 2, \
+ -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
+}
+
+#define SND_CHN_MATRIX_MAP_3_1 { \
+ .id = SND_CHN_MATRIX_3_1, \
+ .channels = 4, \
+ .ext = 1, \
+ .map = { \
+ /* Left */ \
+ [0] = { \
+ .type = SND_CHN_T_FL, \
+ .members = \
+ SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FC | \
+ SND_CHN_T_MASK_SL \
+ }, \
+ /* Right */ \
+ [1] = { \
+ .type = SND_CHN_T_FR, \
+ .members = \
+ SND_CHN_T_MASK_FR | SND_CHN_T_MASK_FC | \
+ SND_CHN_T_MASK_SR \
+ }, \
+ /* LFE */ \
+ [2] = { \
+ .type = SND_CHN_T_LF, \
+ .members = SND_CHN_T_MASK_LF \
+ }, \
+ /* Rear Center */ \
+ [3] = { \
+ .type = SND_CHN_T_BC, \
+ .members = \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
+ SND_CHN_T_MASK_BC | \
+ SND_CHN_T_MASK_SL | SND_CHN_T_MASK_SR \
+ }, \
+ [4] = { \
+ .type = SND_CHN_T_MAX, \
+ .members = 0 \
+ } \
+ }, \
+ .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
+ SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BC, \
+ .offset = { 0, 1, -1, 2, -1, -1, -1, -1, 3, \
+ -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
+}
+
+#define SND_CHN_MATRIX_MAP_4_0 { \
+ .id = SND_CHN_MATRIX_4_0, \
+ .channels = 4, \
+ .ext = 0, \
+ .map = { \
+ /* Left */ \
+ [0] = { \
+ .type = SND_CHN_T_FL, \
+ .members = \
+ SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FC | \
+ SND_CHN_T_MASK_LF | SND_CHN_T_MASK_SL \
+ }, \
+ /* Right */ \
+ [1] = { \
+ .type = SND_CHN_T_FR, \
+ .members = \
+ SND_CHN_T_MASK_FR | SND_CHN_T_MASK_FC | \
+ SND_CHN_T_MASK_LF | SND_CHN_T_MASK_SR \
+ }, \
+ /* Rear Left */ \
+ [2] = { \
+ .type = SND_CHN_T_BL, \
+ .members = \
+ SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BL | \
+ SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SL \
+ }, \
+ /* Rear Right */ \
+ [3] = { \
+ .type = SND_CHN_T_BR, \
+ .members = \
+ SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BR | \
+ SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SR \
+ }, \
+ [4] = { \
+ .type = SND_CHN_T_MAX, \
+ .members = 0 \
+ } \
+ }, \
+ .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR, \
+ .offset = { 0, 1, -1, -1, 2, 3, -1, -1, -1, \
+ -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
+}
+
+#define SND_CHN_MATRIX_MAP_4_1 { \
+ .id = SND_CHN_MATRIX_4_1, \
+ .channels = 5, \
+ .ext = 1, \
+ .map = { \
+ /* Left */ \
+ [0] = { \
+ .type = SND_CHN_T_FL, \
+ .members = \
+ SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FC | \
+ SND_CHN_T_MASK_SL \
+ }, \
+ /* Right */ \
+ [1] = { \
+ .type = SND_CHN_T_FR, \
+ .members = \
+ SND_CHN_T_MASK_FR | SND_CHN_T_MASK_FC | \
+ SND_CHN_T_MASK_SR \
+ }, \
+ /* Rear Left */ \
+ [2] = { \
+ .type = SND_CHN_T_BL, \
+ .members = \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BC | \
+ SND_CHN_T_MASK_SL \
+ }, \
+ /* Rear Right */ \
+ [3] = { \
+ .type = SND_CHN_T_BR, \
+ .members = \
+ SND_CHN_T_MASK_BR | SND_CHN_T_MASK_BC | \
+ SND_CHN_T_MASK_SR \
+ }, \
+ /* LFE */ \
+ [4] = { \
+ .type = SND_CHN_T_LF, \
+ .members = SND_CHN_T_MASK_LF \
+ }, \
+ [5] = { \
+ .type = SND_CHN_T_MAX, \
+ .members = 0 \
+ } \
+ }, \
+ .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
+ SND_CHN_T_MASK_LF, \
+ .offset = { 0, 1, -1, 4, 2, 3, -1, -1, -1, \
+ -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
+}
+
+#define SND_CHN_MATRIX_MAP_5_0 { /* 5 channels default */ \
+ .id = SND_CHN_MATRIX_5_0, \
+ .channels = 5, \
+ .ext = 0, \
+ .map = { \
+ /* Left */ \
+ [0] = { \
+ .type = SND_CHN_T_FL, \
+ .members = \
+ SND_CHN_T_MASK_FL | SND_CHN_T_MASK_LF | \
+ SND_CHN_T_MASK_SL \
+ }, \
+ /* Right */ \
+ [1] = { \
+ .type = SND_CHN_T_FR, \
+ .members = \
+ SND_CHN_T_MASK_FR | SND_CHN_T_MASK_LF | \
+ SND_CHN_T_MASK_SR \
+ }, \
+ /* Rear Left */ \
+ [2] = { \
+ .type = SND_CHN_T_BL, \
+ .members = \
+ SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BL | \
+ SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SL \
+ }, \
+ /* Rear Right */ \
+ [3] = { \
+ .type = SND_CHN_T_BR, \
+ .members = \
+ SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BR | \
+ SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SR \
+ }, \
+ /* Center */ \
+ [4] = { \
+ .type = SND_CHN_T_FC, \
+ .members = SND_CHN_T_MASK_FC \
+ }, \
+ [5] = { \
+ .type = SND_CHN_T_MAX, \
+ .members = 0 \
+ } \
+ }, \
+ .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
+ SND_CHN_T_MASK_FC, \
+ .offset = { 0, 1, 4, -1, 2, 3, -1, -1, -1, \
+ -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
+}
+
+#define SND_CHN_MATRIX_MAP_5_1 { /* 6 channels default */ \
+ .id = SND_CHN_MATRIX_5_1, \
+ .channels = 6, \
+ .ext = 1, \
+ .map = { \
+ /* Left */ \
+ [0] = { \
+ .type = SND_CHN_T_FL, \
+ .members = \
+ SND_CHN_T_MASK_FL | SND_CHN_T_MASK_SL \
+ }, \
+ /* Right */ \
+ [1] = { \
+ .type = SND_CHN_T_FR, \
+ .members = \
+ SND_CHN_T_MASK_FR | SND_CHN_T_MASK_SR \
+ }, \
+ /* Rear Left */ \
+ [2] = { \
+ .type = SND_CHN_T_BL, \
+ .members = \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BC | \
+ SND_CHN_T_MASK_SL \
+ }, \
+ /* Rear Right */ \
+ [3] = { \
+ .type = SND_CHN_T_BR, \
+ .members = \
+ SND_CHN_T_MASK_BR | SND_CHN_T_MASK_BC | \
+ SND_CHN_T_MASK_SR \
+ }, \
+ /* Center */ \
+ [4] = { \
+ .type = SND_CHN_T_FC, \
+ .members = SND_CHN_T_MASK_FC \
+ }, \
+ /* LFE */ \
+ [5] = { \
+ .type = SND_CHN_T_LF, \
+ .members = SND_CHN_T_MASK_LF \
+ }, \
+ [6] = { \
+ .type = SND_CHN_T_MAX, \
+ .members = 0 \
+ } \
+ }, \
+ .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
+ SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF, \
+ .offset = { 0, 1, 4, 5, 2, 3, -1, -1, -1, \
+ -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
+}
+
+#define SND_CHN_MATRIX_MAP_6_0 { \
+ .id = SND_CHN_MATRIX_6_0, \
+ .channels = 6, \
+ .ext = 0, \
+ .map = { \
+ /* Left */ \
+ [0] = { \
+ .type = SND_CHN_T_FL, \
+ .members = \
+ SND_CHN_T_MASK_FL | SND_CHN_T_MASK_LF | \
+ SND_CHN_T_MASK_SL \
+ }, \
+ /* Right */ \
+ [1] = { \
+ .type = SND_CHN_T_FR, \
+ .members = \
+ SND_CHN_T_MASK_FR | SND_CHN_T_MASK_LF | \
+ SND_CHN_T_MASK_SR \
+ }, \
+ /* Rear Left */ \
+ [2] = { \
+ .type = SND_CHN_T_BL, \
+ .members = \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_LF | \
+ SND_CHN_T_MASK_SL \
+ }, \
+ /* Rear Right */ \
+ [3] = { \
+ .type = SND_CHN_T_BR, \
+ .members = \
+ SND_CHN_T_MASK_BR | SND_CHN_T_MASK_LF | \
+ SND_CHN_T_MASK_SR \
+ }, \
+ /* Center */ \
+ [4] = { \
+ .type = SND_CHN_T_FC, \
+ .members = SND_CHN_T_MASK_FC \
+ }, \
+ /* Rear Center */ \
+ [5] = { \
+ .type = SND_CHN_T_BC, \
+ .members = SND_CHN_T_MASK_BC \
+ }, \
+ [6] = { \
+ .type = SND_CHN_T_MAX, \
+ .members = 0 \
+ } \
+ }, \
+ .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
+ SND_CHN_T_MASK_FC | SND_CHN_T_MASK_BC, \
+ .offset = { 0, 1, 4, -1, 2, 3, -1, -1, 5, \
+ -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
+}
+
+#define SND_CHN_MATRIX_MAP_6_1 { \
+ .id = SND_CHN_MATRIX_6_1, \
+ .channels = 7, \
+ .ext = 1, \
+ .map = { \
+ /* Left */ \
+ [0] = { \
+ .type = SND_CHN_T_FL, \
+ .members = \
+ SND_CHN_T_MASK_FL | SND_CHN_T_MASK_SL \
+ }, \
+ /* Right */ \
+ [1] = { \
+ .type = SND_CHN_T_FR, \
+ .members = \
+ SND_CHN_T_MASK_FR | SND_CHN_T_MASK_SR \
+ }, \
+ /* Rear Left */ \
+ [2] = { \
+ .type = SND_CHN_T_BL, \
+ .members = \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_SL \
+ }, \
+ /* Rear Right */ \
+ [3] = { \
+ .type = SND_CHN_T_BR, \
+ .members = \
+ SND_CHN_T_MASK_BR | SND_CHN_T_MASK_SR \
+ }, \
+ /* Center */ \
+ [4] = { \
+ .type = SND_CHN_T_FC, \
+ .members = SND_CHN_T_MASK_FC \
+ }, \
+ /* LFE */ \
+ [5] = { \
+ .type = SND_CHN_T_LF, \
+ .members = SND_CHN_T_MASK_LF \
+ }, \
+ /* Rear Center */ \
+ [6] = { \
+ .type = SND_CHN_T_BC, \
+ .members = SND_CHN_T_MASK_BC \
+ }, \
+ [7] = { \
+ .type = SND_CHN_T_MAX, \
+ .members = 0 \
+ } \
+ }, \
+ .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
+ SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF | \
+ SND_CHN_T_MASK_BC, \
+ .offset = { 0, 1, 4, 5, 2, 3, -1, -1, 6, \
+ -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
+}
+
+#define SND_CHN_MATRIX_MAP_7_0 { \
+ .id = SND_CHN_MATRIX_7_0, \
+ .channels = 7, \
+ .ext = 0, \
+ .map = { \
+ /* Left */ \
+ [0] = { \
+ .type = SND_CHN_T_FL, \
+ .members = \
+ SND_CHN_T_MASK_FL | SND_CHN_T_MASK_LF \
+ }, \
+ /* Right */ \
+ [1] = { \
+ .type = SND_CHN_T_FR, \
+ .members = \
+ SND_CHN_T_MASK_FR | SND_CHN_T_MASK_LF \
+ }, \
+ /* Rear Left */ \
+ [2] = { \
+ .type = SND_CHN_T_BL, \
+ .members = \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BC | \
+ SND_CHN_T_MASK_LF \
+ }, \
+ /* Rear Right */ \
+ [3] = { \
+ .type = SND_CHN_T_BR, \
+ .members = \
+ SND_CHN_T_MASK_BR | SND_CHN_T_MASK_BC | \
+ SND_CHN_T_MASK_LF \
+ }, \
+ /* Center */ \
+ [4] = { \
+ .type = SND_CHN_T_FC, \
+ .members = \
+ SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF \
+ }, \
+ /* Side Left */ \
+ [5] = { \
+ .type = SND_CHN_T_SL, \
+ .members = \
+ SND_CHN_T_MASK_SL | SND_CHN_T_MASK_LF \
+ }, \
+ /* Side Right */ \
+ [6] = { \
+ .type = SND_CHN_T_SR, \
+ .members = \
+ SND_CHN_T_MASK_SR | SND_CHN_T_MASK_LF \
+ }, \
+ [7] = { \
+ .type = SND_CHN_T_MAX, \
+ .members = 0 \
+ } \
+ }, \
+ .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
+ SND_CHN_T_MASK_FC | \
+ SND_CHN_T_MASK_SL | SND_CHN_T_MASK_SR, \
+ .offset = { 0, 1, 4, -1, 2, 3, -1, -1, -1, \
+ 5, 6, -1, -1, -1, -1, -1, -1, -1 } \
+}
+
+#define SND_CHN_MATRIX_MAP_7_1 { \
+ .id = SND_CHN_MATRIX_7_1, \
+ .channels = 8, \
+ .ext = 1, \
+ .map = { \
+ /* Left */ \
+ [0] = { \
+ .type = SND_CHN_T_FL, \
+ .members = SND_CHN_T_MASK_FL \
+ }, \
+ /* Right */ \
+ [1] = { \
+ .type = SND_CHN_T_FR, \
+ .members = SND_CHN_T_MASK_FR \
+ }, \
+ /* Rear Left */ \
+ [2] = { \
+ .type = SND_CHN_T_BL, \
+ .members = \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BC \
+ }, \
+ /* Rear Right */ \
+ [3] = { \
+ .type = SND_CHN_T_BR, \
+ .members = \
+ SND_CHN_T_MASK_BR | SND_CHN_T_MASK_BC \
+ }, \
+ /* Center */ \
+ [4] = { \
+ .type = SND_CHN_T_FC, \
+ .members = SND_CHN_T_MASK_FC \
+ }, \
+ /* LFE */ \
+ [5] = { \
+ .type = SND_CHN_T_LF, \
+ .members = SND_CHN_T_MASK_LF \
+ }, \
+ /* Side Left */ \
+ [6] = { \
+ .type = SND_CHN_T_SL, \
+ .members = SND_CHN_T_MASK_SL \
+ }, \
+ /* Side Right */ \
+ [7] = { \
+ .type = SND_CHN_T_SR, \
+ .members = SND_CHN_T_MASK_SR \
+ }, \
+ [8] = { \
+ .type = SND_CHN_T_MAX, \
+ .members = 0 \
+ } \
+ }, \
+ .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
+ SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
+ SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF | \
+ SND_CHN_T_MASK_SL | SND_CHN_T_MASK_SR, \
+ .offset = { 0, 1, 4, 5, 2, 3, -1, -1, -1, \
+ 6, 7, -1, -1, -1, -1, -1, -1, -1 } \
+}
+
#endif /* !_SND_MATRIX_H_ */
diff --git a/sys/dev/sound/pcm/matrix_map.h b/sys/dev/sound/pcm/matrix_map.h
deleted file mode 100644
index ec0619614273..000000000000
--- a/sys/dev/sound/pcm/matrix_map.h
+++ /dev/null
@@ -1,672 +0,0 @@
-/*-
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (c) 2009 Ariff Abdullah <ariff@FreeBSD.org>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#ifndef _SND_MATRIX_MAP_H_
-#define _SND_MATRIX_MAP_H_
-
-/*
- * Standard matrix maps:
- *
- * struct pcmchan_matrix {
- * .id = Matrix identity (see matrix.h). Custom defined should use
- * one of SND_CHN_MATRIX_MISC (for whatever purposes) or
- * SND_CHN_MATRIX_DRV (hardware driver).
- * .channels = Total number of channels, including whatever 'extended'
- * (the X.ext notions, mostly LFE).
- * .ext = Total number of extended channels (LFE).
- * .map = {
- * Sequences of channel type and interleave structure.
- * [interleave offset] = {
- * .type = channel type (see matrix.h).
- * .members = Masks of channels that is acceptable as a
- * member of this channel type.
- * },
- * [total channels] = {
- * .type = Maximum channels marker (SND_CHN_T_MAX).
- * .members = 0 (no channels allowed here).
- * },
- * },
- * .mask = Mask of channels that exist in this map.
- * .offset = {
- * channel offset that directly translate to the above interleave
- * offset according to SND_CHN_T_* definitions.
- * }
- * };
- *
- * Rule of thumb: Avoid using SND_CHN_T_* that is marked with XXX (matrix.h),
- * or be prepared for the horror to come.
- *
- */
-
-#define SND_CHN_MATRIX_MAP_1_0 { \
- .id = SND_CHN_MATRIX_1_0, \
- .channels = 1, \
- .ext = 0, \
- .map = { \
- /* Mono, center, etc. */ \
- [0] = { \
- .type = SND_CHN_T_FL, \
- .members = \
- SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
- SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF | \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
- SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SL | \
- SND_CHN_T_MASK_SR \
- }, \
- [1] = { \
- .type = SND_CHN_T_MAX, \
- .members = 0 \
- } \
- }, \
- .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
- SND_CHN_T_MASK_FC, \
- .offset = { 0, 0, 0, 0, 0, 0, -1, -1, 0, \
- 0, 0, -1, -1, -1, -1, -1, -1, -1 } \
-}
-
-#define SND_CHN_MATRIX_MAP_2_0 { \
- .id = SND_CHN_MATRIX_2_0, \
- .channels = 2, \
- .ext = 0, \
- .map = { \
- /* Left */ \
- [0] = { \
- .type = SND_CHN_T_FL, \
- .members = \
- SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FC | \
- SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BL | \
- SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SL \
- }, \
- /* Right */ \
- [1] = { \
- .type = SND_CHN_T_FR, \
- .members = \
- SND_CHN_T_MASK_FR | SND_CHN_T_MASK_FC | \
- SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BR | \
- SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SR \
- }, \
- [2] = { \
- .type = SND_CHN_T_MAX, \
- .members = 0 \
- } \
- }, \
- .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR, \
- .offset = { 0, 1, -1, -1, -1, -1, -1, -1, -1, \
- -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
-}
-
-#define SND_CHN_MATRIX_MAP_2_1 { \
- .id = SND_CHN_MATRIX_2_1, \
- .channels = 3, \
- .ext = 1, \
- .map = { \
- /* Left */ \
- [0] = { \
- .type = SND_CHN_T_FL, \
- .members = \
- SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FC | \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BC | \
- SND_CHN_T_MASK_SL \
- }, \
- /* Right */ \
- [1] = { \
- .type = SND_CHN_T_FR, \
- .members = \
- SND_CHN_T_MASK_FR | SND_CHN_T_MASK_FC | \
- SND_CHN_T_MASK_BR | SND_CHN_T_MASK_BC | \
- SND_CHN_T_MASK_SR \
- }, \
- /* LFE */ \
- [2] = { \
- .type = SND_CHN_T_LF, \
- .members = SND_CHN_T_MASK_LF \
- }, \
- [3] = { \
- .type = SND_CHN_T_MAX, \
- .members = 0 \
- } \
- }, \
- .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
- SND_CHN_T_MASK_LF, \
- .offset = { 0, 1, -1, 2, -1, -1, -1, -1, -1, \
- -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
-}
-
-#define SND_CHN_MATRIX_MAP_3_0 { /* 3 channels default */ \
- .id = SND_CHN_MATRIX_3_0, \
- .channels = 3, \
- .ext = 0, \
- .map = { \
- /* Left */ \
- [0] = { \
- .type = SND_CHN_T_FL, \
- .members = \
- SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FC | \
- SND_CHN_T_MASK_LF | SND_CHN_T_MASK_SL \
- }, \
- /* Right */ \
- [1] = { \
- .type = SND_CHN_T_FR, \
- .members = \
- SND_CHN_T_MASK_FR | SND_CHN_T_MASK_FC | \
- SND_CHN_T_MASK_LF | SND_CHN_T_MASK_SR \
- }, \
- /* Rear Center */ \
- [2] = { \
- .type = SND_CHN_T_BC, \
- .members = \
- SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BL | \
- SND_CHN_T_MASK_BR | SND_CHN_T_MASK_BC | \
- SND_CHN_T_MASK_SL | SND_CHN_T_MASK_SR \
- }, \
- [3] = { \
- .type = SND_CHN_T_MAX, \
- .members = 0 \
- } \
- }, \
- .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
- SND_CHN_T_MASK_BC, \
- .offset = { 0, 1, -1, -1, -1, -1, -1, -1, 2, \
- -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
-}
-
-#define SND_CHN_MATRIX_MAP_3_1 { \
- .id = SND_CHN_MATRIX_3_1, \
- .channels = 4, \
- .ext = 1, \
- .map = { \
- /* Left */ \
- [0] = { \
- .type = SND_CHN_T_FL, \
- .members = \
- SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FC | \
- SND_CHN_T_MASK_SL \
- }, \
- /* Right */ \
- [1] = { \
- .type = SND_CHN_T_FR, \
- .members = \
- SND_CHN_T_MASK_FR | SND_CHN_T_MASK_FC | \
- SND_CHN_T_MASK_SR \
- }, \
- /* LFE */ \
- [2] = { \
- .type = SND_CHN_T_LF, \
- .members = SND_CHN_T_MASK_LF \
- }, \
- /* Rear Center */ \
- [3] = { \
- .type = SND_CHN_T_BC, \
- .members = \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
- SND_CHN_T_MASK_BC | \
- SND_CHN_T_MASK_SL | SND_CHN_T_MASK_SR \
- }, \
- [4] = { \
- .type = SND_CHN_T_MAX, \
- .members = 0 \
- } \
- }, \
- .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
- SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BC, \
- .offset = { 0, 1, -1, 2, -1, -1, -1, -1, 3, \
- -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
-}
-
-#define SND_CHN_MATRIX_MAP_4_0 { \
- .id = SND_CHN_MATRIX_4_0, \
- .channels = 4, \
- .ext = 0, \
- .map = { \
- /* Left */ \
- [0] = { \
- .type = SND_CHN_T_FL, \
- .members = \
- SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FC | \
- SND_CHN_T_MASK_LF | SND_CHN_T_MASK_SL \
- }, \
- /* Right */ \
- [1] = { \
- .type = SND_CHN_T_FR, \
- .members = \
- SND_CHN_T_MASK_FR | SND_CHN_T_MASK_FC | \
- SND_CHN_T_MASK_LF | SND_CHN_T_MASK_SR \
- }, \
- /* Rear Left */ \
- [2] = { \
- .type = SND_CHN_T_BL, \
- .members = \
- SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BL | \
- SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SL \
- }, \
- /* Rear Right */ \
- [3] = { \
- .type = SND_CHN_T_BR, \
- .members = \
- SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BR | \
- SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SR \
- }, \
- [4] = { \
- .type = SND_CHN_T_MAX, \
- .members = 0 \
- } \
- }, \
- .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR, \
- .offset = { 0, 1, -1, -1, 2, 3, -1, -1, -1, \
- -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
-}
-
-#define SND_CHN_MATRIX_MAP_4_1 { \
- .id = SND_CHN_MATRIX_4_1, \
- .channels = 5, \
- .ext = 1, \
- .map = { \
- /* Left */ \
- [0] = { \
- .type = SND_CHN_T_FL, \
- .members = \
- SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FC | \
- SND_CHN_T_MASK_SL \
- }, \
- /* Right */ \
- [1] = { \
- .type = SND_CHN_T_FR, \
- .members = \
- SND_CHN_T_MASK_FR | SND_CHN_T_MASK_FC | \
- SND_CHN_T_MASK_SR \
- }, \
- /* Rear Left */ \
- [2] = { \
- .type = SND_CHN_T_BL, \
- .members = \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BC | \
- SND_CHN_T_MASK_SL \
- }, \
- /* Rear Right */ \
- [3] = { \
- .type = SND_CHN_T_BR, \
- .members = \
- SND_CHN_T_MASK_BR | SND_CHN_T_MASK_BC | \
- SND_CHN_T_MASK_SR \
- }, \
- /* LFE */ \
- [4] = { \
- .type = SND_CHN_T_LF, \
- .members = SND_CHN_T_MASK_LF \
- }, \
- [5] = { \
- .type = SND_CHN_T_MAX, \
- .members = 0 \
- } \
- }, \
- .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
- SND_CHN_T_MASK_LF, \
- .offset = { 0, 1, -1, 4, 2, 3, -1, -1, -1, \
- -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
-}
-
-#define SND_CHN_MATRIX_MAP_5_0 { /* 5 channels default */ \
- .id = SND_CHN_MATRIX_5_0, \
- .channels = 5, \
- .ext = 0, \
- .map = { \
- /* Left */ \
- [0] = { \
- .type = SND_CHN_T_FL, \
- .members = \
- SND_CHN_T_MASK_FL | SND_CHN_T_MASK_LF | \
- SND_CHN_T_MASK_SL \
- }, \
- /* Right */ \
- [1] = { \
- .type = SND_CHN_T_FR, \
- .members = \
- SND_CHN_T_MASK_FR | SND_CHN_T_MASK_LF | \
- SND_CHN_T_MASK_SR \
- }, \
- /* Rear Left */ \
- [2] = { \
- .type = SND_CHN_T_BL, \
- .members = \
- SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BL | \
- SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SL \
- }, \
- /* Rear Right */ \
- [3] = { \
- .type = SND_CHN_T_BR, \
- .members = \
- SND_CHN_T_MASK_LF | SND_CHN_T_MASK_BR | \
- SND_CHN_T_MASK_BC | SND_CHN_T_MASK_SR \
- }, \
- /* Center */ \
- [4] = { \
- .type = SND_CHN_T_FC, \
- .members = SND_CHN_T_MASK_FC \
- }, \
- [5] = { \
- .type = SND_CHN_T_MAX, \
- .members = 0 \
- } \
- }, \
- .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
- SND_CHN_T_MASK_FC, \
- .offset = { 0, 1, 4, -1, 2, 3, -1, -1, -1, \
- -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
-}
-
-#define SND_CHN_MATRIX_MAP_5_1 { /* 6 channels default */ \
- .id = SND_CHN_MATRIX_5_1, \
- .channels = 6, \
- .ext = 1, \
- .map = { \
- /* Left */ \
- [0] = { \
- .type = SND_CHN_T_FL, \
- .members = \
- SND_CHN_T_MASK_FL | SND_CHN_T_MASK_SL \
- }, \
- /* Right */ \
- [1] = { \
- .type = SND_CHN_T_FR, \
- .members = \
- SND_CHN_T_MASK_FR | SND_CHN_T_MASK_SR \
- }, \
- /* Rear Left */ \
- [2] = { \
- .type = SND_CHN_T_BL, \
- .members = \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BC | \
- SND_CHN_T_MASK_SL \
- }, \
- /* Rear Right */ \
- [3] = { \
- .type = SND_CHN_T_BR, \
- .members = \
- SND_CHN_T_MASK_BR | SND_CHN_T_MASK_BC | \
- SND_CHN_T_MASK_SR \
- }, \
- /* Center */ \
- [4] = { \
- .type = SND_CHN_T_FC, \
- .members = SND_CHN_T_MASK_FC \
- }, \
- /* LFE */ \
- [5] = { \
- .type = SND_CHN_T_LF, \
- .members = SND_CHN_T_MASK_LF \
- }, \
- [6] = { \
- .type = SND_CHN_T_MAX, \
- .members = 0 \
- } \
- }, \
- .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
- SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF, \
- .offset = { 0, 1, 4, 5, 2, 3, -1, -1, -1, \
- -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
-}
-
-#define SND_CHN_MATRIX_MAP_6_0 { \
- .id = SND_CHN_MATRIX_6_0, \
- .channels = 6, \
- .ext = 0, \
- .map = { \
- /* Left */ \
- [0] = { \
- .type = SND_CHN_T_FL, \
- .members = \
- SND_CHN_T_MASK_FL | SND_CHN_T_MASK_LF | \
- SND_CHN_T_MASK_SL \
- }, \
- /* Right */ \
- [1] = { \
- .type = SND_CHN_T_FR, \
- .members = \
- SND_CHN_T_MASK_FR | SND_CHN_T_MASK_LF | \
- SND_CHN_T_MASK_SR \
- }, \
- /* Rear Left */ \
- [2] = { \
- .type = SND_CHN_T_BL, \
- .members = \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_LF | \
- SND_CHN_T_MASK_SL \
- }, \
- /* Rear Right */ \
- [3] = { \
- .type = SND_CHN_T_BR, \
- .members = \
- SND_CHN_T_MASK_BR | SND_CHN_T_MASK_LF | \
- SND_CHN_T_MASK_SR \
- }, \
- /* Center */ \
- [4] = { \
- .type = SND_CHN_T_FC, \
- .members = SND_CHN_T_MASK_FC \
- }, \
- /* Rear Center */ \
- [5] = { \
- .type = SND_CHN_T_BC, \
- .members = SND_CHN_T_MASK_BC \
- }, \
- [6] = { \
- .type = SND_CHN_T_MAX, \
- .members = 0 \
- } \
- }, \
- .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
- SND_CHN_T_MASK_FC | SND_CHN_T_MASK_BC, \
- .offset = { 0, 1, 4, -1, 2, 3, -1, -1, 5, \
- -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
-}
-
-#define SND_CHN_MATRIX_MAP_6_1 { \
- .id = SND_CHN_MATRIX_6_1, \
- .channels = 7, \
- .ext = 1, \
- .map = { \
- /* Left */ \
- [0] = { \
- .type = SND_CHN_T_FL, \
- .members = \
- SND_CHN_T_MASK_FL | SND_CHN_T_MASK_SL \
- }, \
- /* Right */ \
- [1] = { \
- .type = SND_CHN_T_FR, \
- .members = \
- SND_CHN_T_MASK_FR | SND_CHN_T_MASK_SR \
- }, \
- /* Rear Left */ \
- [2] = { \
- .type = SND_CHN_T_BL, \
- .members = \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_SL \
- }, \
- /* Rear Right */ \
- [3] = { \
- .type = SND_CHN_T_BR, \
- .members = \
- SND_CHN_T_MASK_BR | SND_CHN_T_MASK_SR \
- }, \
- /* Center */ \
- [4] = { \
- .type = SND_CHN_T_FC, \
- .members = SND_CHN_T_MASK_FC \
- }, \
- /* LFE */ \
- [5] = { \
- .type = SND_CHN_T_LF, \
- .members = SND_CHN_T_MASK_LF \
- }, \
- /* Rear Center */ \
- [6] = { \
- .type = SND_CHN_T_BC, \
- .members = SND_CHN_T_MASK_BC \
- }, \
- [7] = { \
- .type = SND_CHN_T_MAX, \
- .members = 0 \
- } \
- }, \
- .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
- SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF | \
- SND_CHN_T_MASK_BC, \
- .offset = { 0, 1, 4, 5, 2, 3, -1, -1, 6, \
- -1, -1, -1, -1, -1, -1, -1, -1, -1 } \
-}
-
-#define SND_CHN_MATRIX_MAP_7_0 { \
- .id = SND_CHN_MATRIX_7_0, \
- .channels = 7, \
- .ext = 0, \
- .map = { \
- /* Left */ \
- [0] = { \
- .type = SND_CHN_T_FL, \
- .members = \
- SND_CHN_T_MASK_FL | SND_CHN_T_MASK_LF \
- }, \
- /* Right */ \
- [1] = { \
- .type = SND_CHN_T_FR, \
- .members = \
- SND_CHN_T_MASK_FR | SND_CHN_T_MASK_LF \
- }, \
- /* Rear Left */ \
- [2] = { \
- .type = SND_CHN_T_BL, \
- .members = \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BC | \
- SND_CHN_T_MASK_LF \
- }, \
- /* Rear Right */ \
- [3] = { \
- .type = SND_CHN_T_BR, \
- .members = \
- SND_CHN_T_MASK_BR | SND_CHN_T_MASK_BC | \
- SND_CHN_T_MASK_LF \
- }, \
- /* Center */ \
- [4] = { \
- .type = SND_CHN_T_FC, \
- .members = \
- SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF \
- }, \
- /* Side Left */ \
- [5] = { \
- .type = SND_CHN_T_SL, \
- .members = \
- SND_CHN_T_MASK_SL | SND_CHN_T_MASK_LF \
- }, \
- /* Side Right */ \
- [6] = { \
- .type = SND_CHN_T_SR, \
- .members = \
- SND_CHN_T_MASK_SR | SND_CHN_T_MASK_LF \
- }, \
- [7] = { \
- .type = SND_CHN_T_MAX, \
- .members = 0 \
- } \
- }, \
- .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
- SND_CHN_T_MASK_FC | \
- SND_CHN_T_MASK_SL | SND_CHN_T_MASK_SR, \
- .offset = { 0, 1, 4, -1, 2, 3, -1, -1, -1, \
- 5, 6, -1, -1, -1, -1, -1, -1, -1 } \
-}
-
-#define SND_CHN_MATRIX_MAP_7_1 { \
- .id = SND_CHN_MATRIX_7_1, \
- .channels = 8, \
- .ext = 1, \
- .map = { \
- /* Left */ \
- [0] = { \
- .type = SND_CHN_T_FL, \
- .members = SND_CHN_T_MASK_FL \
- }, \
- /* Right */ \
- [1] = { \
- .type = SND_CHN_T_FR, \
- .members = SND_CHN_T_MASK_FR \
- }, \
- /* Rear Left */ \
- [2] = { \
- .type = SND_CHN_T_BL, \
- .members = \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BC \
- }, \
- /* Rear Right */ \
- [3] = { \
- .type = SND_CHN_T_BR, \
- .members = \
- SND_CHN_T_MASK_BR | SND_CHN_T_MASK_BC \
- }, \
- /* Center */ \
- [4] = { \
- .type = SND_CHN_T_FC, \
- .members = SND_CHN_T_MASK_FC \
- }, \
- /* LFE */ \
- [5] = { \
- .type = SND_CHN_T_LF, \
- .members = SND_CHN_T_MASK_LF \
- }, \
- /* Side Left */ \
- [6] = { \
- .type = SND_CHN_T_SL, \
- .members = SND_CHN_T_MASK_SL \
- }, \
- /* Side Right */ \
- [7] = { \
- .type = SND_CHN_T_SR, \
- .members = SND_CHN_T_MASK_SR \
- }, \
- [8] = { \
- .type = SND_CHN_T_MAX, \
- .members = 0 \
- } \
- }, \
- .mask = SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR | \
- SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR | \
- SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF | \
- SND_CHN_T_MASK_SL | SND_CHN_T_MASK_SR, \
- .offset = { 0, 1, 4, 5, 2, 3, -1, -1, -1, \
- 6, 7, -1, -1, -1, -1, -1, -1, -1 } \
-}
-
-#endif /* !_SND_MATRIX_MAP_H_ */
diff --git a/sys/dev/sound/pcm/mixer.c b/sys/dev/sound/pcm/mixer.c
index 3e197b120c9d..f281dff36248 100644
--- a/sys/dev/sound/pcm/mixer.c
+++ b/sys/dev/sound/pcm/mixer.c
@@ -105,11 +105,6 @@ static struct cdevsw mixer_cdevsw = {
.d_name = "mixer",
};
-/**
- * Keeps a count of mixer devices; used only by OSSv4 SNDCTL_SYSINFO ioctl.
- */
-int mixer_count = 0;
-
static eventhandler_tag mixer_ehtag = NULL;
static struct cdev *
@@ -151,7 +146,7 @@ mixer_set_softpcmvol(struct snd_mixer *m, struct snddev_info *d,
struct pcm_channel *c;
int dropmtx, acquiremtx;
- if (!PCM_REGISTERED(d) || PCM_DETACHING(d))
+ if (!PCM_REGISTERED(d))
return (EINVAL);
if (mtx_owned(m->lock))
@@ -204,7 +199,7 @@ mixer_set_eq(struct snd_mixer *m, struct snddev_info *d,
else
return (EINVAL);
- if (!PCM_REGISTERED(d) || PCM_DETACHING(d))
+ if (!PCM_REGISTERED(d))
return (EINVAL);
if (mtx_owned(m->lock))
@@ -229,7 +224,7 @@ mixer_set_eq(struct snd_mixer *m, struct snddev_info *d,
CHN_FOREACH(c, d, channels.pcm.busy) {
CHN_LOCK(c);
- f = chn_findfeeder(c, FEEDER_EQ);
+ f = feeder_find(c, FEEDER_EQ);
if (f != NULL)
(void)FEEDER_SET(f, tone, level);
CHN_UNLOCK(c);
@@ -650,7 +645,7 @@ mixer_obj_create(device_t dev, kobj_class_t cls, void *devinfo,
int type, const char *desc)
{
struct snd_mixer *m;
- int i;
+ size_t i;
KASSERT(dev != NULL && cls != NULL && devinfo != NULL,
("%s(): NULL data dev=%p cls=%p devinfo=%p",
@@ -671,7 +666,7 @@ mixer_obj_create(device_t dev, kobj_class_t cls, void *devinfo,
m->devinfo = devinfo;
m->busy = 0;
m->dev = dev;
- for (i = 0; i < (sizeof(m->parent) / sizeof(m->parent[0])); i++) {
+ for (i = 0; i < nitems(m->parent); i++) {
m->parent[i] = SOUND_MIXER_NONE;
m->child[i] = 0;
m->realdev[i] = i;
@@ -701,22 +696,13 @@ mixer_delete(struct snd_mixer *m)
snd_mtxfree(m->lock);
kobj_delete((kobj_t)m, M_MIXER);
- --mixer_count;
-
return (0);
}
struct snd_mixer *
mixer_create(device_t dev, kobj_class_t cls, void *devinfo, const char *desc)
{
- struct snd_mixer *m;
-
- m = mixer_obj_create(dev, cls, devinfo, MIXER_TYPE_SECONDARY, desc);
-
- if (m != NULL)
- ++mixer_count;
-
- return (m);
+ return (mixer_obj_create(dev, cls, devinfo, MIXER_TYPE_SECONDARY, desc));
}
int
@@ -764,13 +750,11 @@ mixer_init(device_t dev, kobj_class_t cls, void *devinfo)
mixer_setrecsrc(m, 0); /* Set default input. */
- pdev = make_dev(&mixer_cdevsw, SND_DEV_CTL, UID_ROOT, GID_WHEEL, 0666,
- "mixer%d", unit);
+ pdev = make_dev(&mixer_cdevsw, 0, UID_ROOT, GID_WHEEL, 0666, "mixer%d",
+ unit);
pdev->si_drv1 = m;
snddev->mixer_dev = pdev;
- ++mixer_count;
-
if (bootverbose) {
for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) {
if (!(m->devs & (1 << i)))
@@ -839,8 +823,6 @@ mixer_uninit(device_t dev)
d->mixer_dev = NULL;
- --mixer_count;
-
return 0;
}
@@ -1071,7 +1053,7 @@ mixer_open(struct cdev *i_dev, int flags, int mode, struct thread *td)
m = i_dev->si_drv1;
d = device_get_softc(m->dev);
- if (!PCM_REGISTERED(d) || PCM_DETACHING(d))
+ if (!PCM_REGISTERED(d))
return (EBADF);
/* XXX Need Giant magic entry ??? */
@@ -1227,7 +1209,7 @@ mixer_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
return (EBADF);
d = device_get_softc(((struct snd_mixer *)i_dev->si_drv1)->dev);
- if (!PCM_REGISTERED(d) || PCM_DETACHING(d))
+ if (!PCM_REGISTERED(d))
return (EBADF);
PCM_GIANT_ENTER(d);
@@ -1282,9 +1264,13 @@ mixer_ioctl_cmd(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode,
case SNDCTL_CARDINFO:
return (sound_oss_card_info((oss_card_info *)arg));
case SNDCTL_AUDIOINFO:
+ return (dsp_oss_audioinfo(i_dev, (oss_audioinfo *)arg,
+ false));
case SNDCTL_AUDIOINFO_EX:
- case SNDCTL_ENGINEINFO:
- return (dsp_oss_audioinfo(i_dev, (oss_audioinfo *)arg));
+ return (dsp_oss_audioinfo(i_dev, (oss_audioinfo *)arg,
+ true));
+ case SNDCTL_ENGINEINFO:
+ return (dsp_oss_engineinfo(i_dev, (oss_audioinfo *)arg));
case SNDCTL_MIXERINFO:
return (mixer_oss_mixerinfo(i_dev, (oss_mixerinfo *)arg));
}
@@ -1379,7 +1365,7 @@ mixer_clone(void *arg,
bus_topo_lock();
d = devclass_get_softc(pcm_devclass, snd_unit);
/* See related comment in dsp_clone(). */
- if (d != NULL && PCM_REGISTERED(d) && d->mixer_dev != NULL) {
+ if (PCM_REGISTERED(d) && d->mixer_dev != NULL) {
*dev = d->mixer_dev;
dev_ref(*dev);
}
@@ -1407,6 +1393,17 @@ mixer_sysuninit(void *p)
SYSINIT(mixer_sysinit, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, mixer_sysinit, NULL);
SYSUNINIT(mixer_sysuninit, SI_SUB_DRIVERS, SI_ORDER_MIDDLE, mixer_sysuninit, NULL);
+static void
+mixer_oss_mixerinfo_unavail(oss_mixerinfo *mi, int unit)
+{
+ bzero(mi, sizeof(*mi));
+ mi->dev = unit;
+ snprintf(mi->id, sizeof(mi->id), "mixer%d (n/a)", unit);
+ snprintf(mi->name, sizeof(mi->name), "pcm%d:mixer (unavailable)", unit);
+ mi->card_number = unit;
+ mi->legacy_device = unit;
+}
+
/**
* @brief Handler for SNDCTL_MIXERINFO
*
@@ -1431,7 +1428,7 @@ mixer_oss_mixerinfo(struct cdev *i_dev, oss_mixerinfo *mi)
{
struct snddev_info *d;
struct snd_mixer *m;
- int nmix, i;
+ int i;
/*
* If probing the device handling the ioctl, make sure it's a mixer
@@ -1442,17 +1439,23 @@ mixer_oss_mixerinfo(struct cdev *i_dev, oss_mixerinfo *mi)
d = NULL;
m = NULL;
- nmix = 0;
/*
* There's a 1:1 relationship between mixers and PCM devices, so
* begin by iterating over PCM devices and search for our mixer.
*/
+ bus_topo_lock();
for (i = 0; pcm_devclass != NULL &&
i < devclass_get_maxunit(pcm_devclass); i++) {
d = devclass_get_softc(pcm_devclass, i);
- if (!PCM_REGISTERED(d) || PCM_DETACHING(d))
- continue;
+ if (!PCM_REGISTERED(d)) {
+ if ((mi->dev == -1 && i == snd_unit) || mi->dev == i) {
+ mixer_oss_mixerinfo_unavail(mi, i);
+ bus_topo_unlock();
+ return (0);
+ } else
+ continue;
+ }
/* XXX Need Giant magic entry */
@@ -1460,92 +1463,100 @@ mixer_oss_mixerinfo(struct cdev *i_dev, oss_mixerinfo *mi)
PCM_UNLOCKASSERT(d);
PCM_LOCK(d);
- if (d->mixer_dev != NULL && d->mixer_dev->si_drv1 != NULL &&
- ((mi->dev == -1 && d->mixer_dev == i_dev) ||
- mi->dev == nmix)) {
- m = d->mixer_dev->si_drv1;
- mtx_lock(m->lock);
-
- /*
- * At this point, the following synchronization stuff
- * has happened:
- * - a specific PCM device is locked.
- * - a specific mixer device has been locked, so be
- * sure to unlock when existing.
- */
- bzero((void *)mi, sizeof(*mi));
- mi->dev = nmix;
- snprintf(mi->id, sizeof(mi->id), "mixer%d", i);
- strlcpy(mi->name, m->name, sizeof(mi->name));
- mi->modify_counter = m->modify_counter;
- mi->card_number = i;
- /*
- * Currently, FreeBSD assumes 1:1 relationship between
- * a pcm and mixer devices, so this is hardcoded to 0.
- */
- mi->port_number = 0;
-
- /**
- * @todo Fill in @sa oss_mixerinfo::mixerhandle.
- * @note From 4Front: "mixerhandle is an arbitrary
- * string that identifies the mixer better than
- * the device number (mixerinfo.dev). Device
- * numbers may change depending on the order the
- * drivers are loaded. However the handle should
- * remain the same provided that the sound card
- * is not moved to another PCI slot."
- */
+ if (!((d->mixer_dev == i_dev && mi->dev == -1) ||
+ mi->dev == i)) {
+ PCM_UNLOCK(d);
+ continue;
+ }
- /**
- * @note
- * @sa oss_mixerinfo::magic is a reserved field.
- *
- * @par
- * From 4Front: "magic is usually 0. However some
- * devices may have dedicated setup utilities and the
- * magic field may contain an unique driver specific
- * value (managed by [4Front])."
- */
+ if (d->mixer_dev->si_drv1 == NULL) {
+ mixer_oss_mixerinfo_unavail(mi, i);
+ PCM_UNLOCK(d);
+ bus_topo_unlock();
+ return (0);
+ }
- mi->enabled = device_is_attached(m->dev) ? 1 : 0;
- /**
- * The only flag for @sa oss_mixerinfo::caps is
- * currently MIXER_CAP_VIRTUAL, which I'm not sure we
- * really worry about.
- */
- /**
- * Mixer extensions currently aren't supported, so
- * leave @sa oss_mixerinfo::nrext blank for now.
- */
- /**
- * @todo Fill in @sa oss_mixerinfo::priority (requires
- * touching drivers?)
- * @note The priority field is for mixer applets to
- * determine which mixer should be the default, with 0
- * being least preferred and 10 being most preferred.
- * From 4Front: "OSS drivers like ICH use higher
- * values (10) because such chips are known to be used
- * only on motherboards. Drivers for high end pro
- * devices use 0 because they will never be the
- * default mixer. Other devices use values 1 to 9
- * depending on the estimated probability of being the
- * default device.
- *
- * XXX Described by Hannu@4Front, but not found in
- * soundcard.h.
- strlcpy(mi->devnode, devtoname(d->mixer_dev),
- sizeof(mi->devnode));
- mi->legacy_device = i;
- */
- mtx_unlock(m->lock);
- } else
- ++nmix;
+ m = d->mixer_dev->si_drv1;
+ mtx_lock(m->lock);
+
+ /*
+ * At this point, the following synchronization stuff
+ * has happened:
+ * - a specific PCM device is locked.
+ * - a specific mixer device has been locked, so be
+ * sure to unlock when existing.
+ */
+ bzero((void *)mi, sizeof(*mi));
+ mi->dev = i;
+ snprintf(mi->id, sizeof(mi->id), "mixer%d", i);
+ strlcpy(mi->name, m->name, sizeof(mi->name));
+ mi->modify_counter = m->modify_counter;
+ mi->card_number = i;
+ /*
+ * Currently, FreeBSD assumes 1:1 relationship between
+ * a pcm and mixer devices, so this is hardcoded to 0.
+ */
+ mi->port_number = 0;
+
+ /**
+ * @todo Fill in @sa oss_mixerinfo::mixerhandle.
+ * @note From 4Front: "mixerhandle is an arbitrary
+ * string that identifies the mixer better than
+ * the device number (mixerinfo.dev). Device
+ * numbers may change depending on the order the
+ * drivers are loaded. However the handle should
+ * remain the same provided that the sound card
+ * is not moved to another PCI slot."
+ */
+
+ /**
+ * @note
+ * @sa oss_mixerinfo::magic is a reserved field.
+ *
+ * @par
+ * From 4Front: "magic is usually 0. However some
+ * devices may have dedicated setup utilities and the
+ * magic field may contain an unique driver specific
+ * value (managed by [4Front])."
+ */
+
+ mi->enabled = device_is_attached(m->dev) ? 1 : 0;
+ /**
+ * The only flag for @sa oss_mixerinfo::caps is
+ * currently MIXER_CAP_VIRTUAL, which I'm not sure we
+ * really worry about.
+ */
+ /**
+ * Mixer extensions currently aren't supported, so
+ * leave @sa oss_mixerinfo::nrext blank for now.
+ */
+
+ /**
+ * @todo Fill in @sa oss_mixerinfo::priority (requires
+ * touching drivers?)
+ * @note The priority field is for mixer applets to
+ * determine which mixer should be the default, with 0
+ * being least preferred and 10 being most preferred.
+ * From 4Front: "OSS drivers like ICH use higher
+ * values (10) because such chips are known to be used
+ * only on motherboards. Drivers for high end pro
+ * devices use 0 because they will never be the
+ * default mixer. Other devices use values 1 to 9
+ * depending on the estimated probability of being the
+ * default device.
+ */
+
+ snprintf(mi->devnode, sizeof(mi->devnode), "/dev/mixer%d", i);
+ mi->legacy_device = i;
+
+ mtx_unlock(m->lock);
PCM_UNLOCK(d);
- if (m != NULL)
- return (0);
+ bus_topo_unlock();
+ return (0);
}
+ bus_topo_unlock();
return (EINVAL);
}
diff --git a/sys/dev/sound/pcm/mixer.h b/sys/dev/sound/pcm/mixer.h
index 6c5c8f3ec3fe..7139a766b392 100644
--- a/sys/dev/sound/pcm/mixer.h
+++ b/sys/dev/sound/pcm/mixer.h
@@ -69,8 +69,6 @@ u_int32_t mix_getchild(struct snd_mixer *m, u_int32_t dev);
void *mix_getdevinfo(struct snd_mixer *m);
struct mtx *mixer_get_lock(struct snd_mixer *m);
-extern int mixer_count;
-
#define MIXER_CMD_DIRECT 0 /* send command within driver */
#define MIXER_CMD_CDEV 1 /* send command from cdev/ioctl */
diff --git a/sys/dev/sound/pcm/pcm.h b/sys/dev/sound/pcm/pcm.h
index 3165822e3c85..7d0a8f0f431b 100644
--- a/sys/dev/sound/pcm/pcm.h
+++ b/sys/dev/sound/pcm/pcm.h
@@ -3,6 +3,10 @@
*
* Copyright (c) 2006-2009 Ariff Abdullah <ariff@FreeBSD.org>
* All rights reserved.
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Christos Margiolis
+ * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,14 +35,11 @@
#include <sys/param.h>
-/*
- * Macros for reading/writing PCM sample / int values from bytes array.
- * Since every process is done using signed integer (and to make our life
- * less miserable), unsigned sample will be converted to its signed
- * counterpart and restored during writing back. To avoid overflow,
- * we truncate 32bit (and only 32bit) samples down to 24bit (see below
- * for the reason), unless SND_PCM_64 is defined.
- */
+#include <dev/sound/pcm/g711.h>
+
+#ifndef _KERNEL
+#include <assert.h> /* for __assert_unreachable() */
+#endif
/*
* Automatically turn on 64bit arithmetic on suitable archs
@@ -106,333 +107,344 @@ typedef uint64_t uintpcm64_t;
#define INTPCM24_T(v) ((intpcm24_t)(v))
#define INTPCM32_T(v) ((intpcm32_t)(v))
-#if BYTE_ORDER == LITTLE_ENDIAN
-#define _PCM_READ_S16_LE(b8) INTPCM_T(*((int16_t *)(b8)))
-#define _PCM_READ_S32_LE(b8) INTPCM_T(*((int32_t *)(b8)))
-#define _PCM_READ_S16_BE(b8) \
- INTPCM_T((b8)[1] | (((int8_t)((b8)[0])) << 8))
-#define _PCM_READ_S32_BE(b8) \
- INTPCM_T((b8)[3] | ((b8)[2] << 8) | ((b8)[1] << 16) | \
- (((int8_t)((b8)[0])) << 24))
-
-#define _PCM_WRITE_S16_LE(b8, val) do { \
- *((int16_t *)(b8)) = (val); \
-} while (0)
-#define _PCM_WRITE_S32_LE(b8, val) do { \
- *((int32_t *)(b8)) = (val); \
-} while (0)
-#define _PCM_WRITE_S16_BE(bb8, vval) do { \
- intpcm_t val = (vval); \
- uint8_t *b8 = (bb8); \
- b8[1] = val; \
- b8[0] = val >> 8; \
-} while (0)
-#define _PCM_WRITE_S32_BE(bb8, vval) do { \
- intpcm_t val = (vval); \
- uint8_t *b8 = (bb8); \
- b8[3] = val; \
- b8[2] = val >> 8; \
- b8[1] = val >> 16; \
- b8[0] = val >> 24; \
-} while (0)
-
-#define _PCM_READ_U16_LE(b8) \
- INTPCM_T((int16_t)(*((uint16_t *)(b8)) ^ 0x8000))
-#define _PCM_READ_U32_LE(b8) \
- INTPCM_T((int32_t)(*((uint32_t *)(b8)) ^ 0x80000000))
-#define _PCM_READ_U16_BE(b8) \
- INTPCM_T((b8)[1] | (((int8_t)((b8)[0] ^ 0x80)) << 8))
-#define _PCM_READ_U32_BE(b8) \
- INTPCM_T((b8)[3] | ((b8)[2] << 8) | ((b8)[1] << 16) | \
- (((int8_t)((b8)[0] ^ 0x80)) << 24))
-
-#define _PCM_WRITE_U16_LE(b8, val) do { \
- *((uint16_t *)(b8)) = (val) ^ 0x8000; \
-} while (0)
-#define _PCM_WRITE_U32_LE(b8, val) do { \
- *((uint32_t *)(b8)) = (val) ^ 0x80000000; \
-} while (0)
-#define _PCM_WRITE_U16_BE(bb8, vval) do { \
- intpcm_t val = (vval); \
- uint8_t *b8 = (bb8); \
- b8[1] = val; \
- b8[0] = (val >> 8) ^ 0x80; \
-} while (0)
-#define _PCM_WRITE_U32_BE(bb8, vval) do { \
- intpcm_t val = (vval); \
- uint8_t *b8 = (bb8); \
- b8[3] = val; \
- b8[2] = val >> 8; \
- b8[1] = val >> 16; \
- b8[0] = (val >> 24) ^ 0x80; \
-} while (0)
-
-#define _PCM_READ_S16_NE(b8) _PCM_READ_S16_LE(b8)
-#define _PCM_READ_U16_NE(b8) _PCM_READ_U16_LE(b8)
-#define _PCM_READ_S32_NE(b8) _PCM_READ_S32_LE(b8)
-#define _PCM_READ_U32_NE(b8) _PCM_READ_U32_LE(b8)
-#define _PCM_WRITE_S16_NE(b6) _PCM_WRITE_S16_LE(b8)
-#define _PCM_WRITE_U16_NE(b6) _PCM_WRITE_U16_LE(b8)
-#define _PCM_WRITE_S32_NE(b6) _PCM_WRITE_S32_LE(b8)
-#define _PCM_WRITE_U32_NE(b6) _PCM_WRITE_U32_LE(b8)
-#else /* !LITTLE_ENDIAN */
-#define _PCM_READ_S16_LE(b8) \
- INTPCM_T((b8)[0] | (((int8_t)((b8)[1])) << 8))
-#define _PCM_READ_S32_LE(b8) \
- INTPCM_T((b8)[0] | ((b8)[1] << 8) | ((b8)[2] << 16) | \
- (((int8_t)((b8)[3])) << 24))
-#define _PCM_READ_S16_BE(b8) INTPCM_T(*((int16_t *)(b8)))
-#define _PCM_READ_S32_BE(b8) INTPCM_T(*((int32_t *)(b8)))
-
-#define _PCM_WRITE_S16_LE(bb8, vval) do { \
- intpcm_t val = (vval); \
- uint8_t *b8 = (bb8); \
- b8[0] = val; \
- b8[1] = val >> 8; \
-} while (0)
-#define _PCM_WRITE_S32_LE(bb8, vval) do { \
- intpcm_t val = (vval); \
- uint8_t *b8 = (bb8); \
- b8[0] = val; \
- b8[1] = val >> 8; \
- b8[2] = val >> 16; \
- b8[3] = val >> 24; \
-} while (0)
-#define _PCM_WRITE_S16_BE(b8, val) do { \
- *((int16_t *)(b8)) = (val); \
-} while (0)
-#define _PCM_WRITE_S32_BE(b8, val) do { \
- *((int32_t *)(b8)) = (val); \
-} while (0)
-
-#define _PCM_READ_U16_LE(b8) \
- INTPCM_T((b8)[0] | (((int8_t)((b8)[1] ^ 0x80)) << 8))
-#define _PCM_READ_U32_LE(b8) \
- INTPCM_T((b8)[0] | ((b8)[1] << 8) | ((b8)[2] << 16) | \
- (((int8_t)((b8)[3] ^ 0x80)) << 24))
-#define _PCM_READ_U16_BE(b8) \
- INTPCM_T((int16_t)(*((uint16_t *)(b8)) ^ 0x8000))
-#define _PCM_READ_U32_BE(b8) \
- INTPCM_T((int32_t)(*((uint32_t *)(b8)) ^ 0x80000000))
-
-#define _PCM_WRITE_U16_LE(bb8, vval) do { \
- intpcm_t val = (vval); \
- uint8_t *b8 = (bb8); \
- b8[0] = val; \
- b8[1] = (val >> 8) ^ 0x80; \
-} while (0)
-#define _PCM_WRITE_U32_LE(bb8, vval) do { \
- intpcm_t val = (vval); \
- uint8_t *b8 = (bb8); \
- b8[0] = val; \
- b8[1] = val >> 8; \
- b8[2] = val >> 16; \
- b8[3] = (val >> 24) ^ 0x80; \
-} while (0)
-#define _PCM_WRITE_U16_BE(b8, val) do { \
- *((uint16_t *)(b8)) = (val) ^ 0x8000; \
-} while (0)
-#define _PCM_WRITE_U32_BE(b8, val) do { \
- *((uint32_t *)(b8)) = (val) ^ 0x80000000; \
-} while (0)
-
-#define _PCM_READ_S16_NE(b8) _PCM_READ_S16_BE(b8)
-#define _PCM_READ_U16_NE(b8) _PCM_READ_U16_BE(b8)
-#define _PCM_READ_S32_NE(b8) _PCM_READ_S32_BE(b8)
-#define _PCM_READ_U32_NE(b8) _PCM_READ_U32_BE(b8)
-#define _PCM_WRITE_S16_NE(b6) _PCM_WRITE_S16_BE(b8)
-#define _PCM_WRITE_U16_NE(b6) _PCM_WRITE_U16_BE(b8)
-#define _PCM_WRITE_S32_NE(b6) _PCM_WRITE_S32_BE(b8)
-#define _PCM_WRITE_U32_NE(b6) _PCM_WRITE_U32_BE(b8)
-#endif /* LITTLE_ENDIAN */
-
-#define _PCM_READ_S24_LE(b8) \
- INTPCM_T((b8)[0] | ((b8)[1] << 8) | (((int8_t)((b8)[2])) << 16))
-#define _PCM_READ_S24_BE(b8) \
- INTPCM_T((b8)[2] | ((b8)[1] << 8) | (((int8_t)((b8)[0])) << 16))
-
-#define _PCM_WRITE_S24_LE(bb8, vval) do { \
- intpcm_t val = (vval); \
- uint8_t *b8 = (bb8); \
- b8[0] = val; \
- b8[1] = val >> 8; \
- b8[2] = val >> 16; \
-} while (0)
-#define _PCM_WRITE_S24_BE(bb8, vval) do { \
- intpcm_t val = (vval); \
- uint8_t *b8 = (bb8); \
- b8[2] = val; \
- b8[1] = val >> 8; \
- b8[0] = val >> 16; \
-} while (0)
-
-#define _PCM_READ_U24_LE(b8) \
- INTPCM_T((b8)[0] | ((b8)[1] << 8) | \
- (((int8_t)((b8)[2] ^ 0x80)) << 16))
-#define _PCM_READ_U24_BE(b8) \
- INTPCM_T((b8)[2] | ((b8)[1] << 8) | \
- (((int8_t)((b8)[0] ^ 0x80)) << 16))
-
-#define _PCM_WRITE_U24_LE(bb8, vval) do { \
- intpcm_t val = (vval); \
- uint8_t *b8 = (bb8); \
- b8[0] = val; \
- b8[1] = val >> 8; \
- b8[2] = (val >> 16) ^ 0x80; \
-} while (0)
-#define _PCM_WRITE_U24_BE(bb8, vval) do { \
- intpcm_t val = (vval); \
- uint8_t *b8 = (bb8); \
- b8[2] = val; \
- b8[1] = val >> 8; \
- b8[0] = (val >> 16) ^ 0x80; \
-} while (0)
-
-#if BYTE_ORDER == LITTLE_ENDIAN
-#define _PCM_READ_S24_NE(b8) _PCM_READ_S24_LE(b8)
-#define _PCM_READ_U24_NE(b8) _PCM_READ_U24_LE(b8)
-#define _PCM_WRITE_S24_NE(b6) _PCM_WRITE_S24_LE(b8)
-#define _PCM_WRITE_U24_NE(b6) _PCM_WRITE_U24_LE(b8)
-#else /* !LITTLE_ENDIAN */
-#define _PCM_READ_S24_NE(b8) _PCM_READ_S24_BE(b8)
-#define _PCM_READ_U24_NE(b8) _PCM_READ_U24_BE(b8)
-#define _PCM_WRITE_S24_NE(b6) _PCM_WRITE_S24_BE(b8)
-#define _PCM_WRITE_U24_NE(b6) _PCM_WRITE_U24_BE(b8)
-#endif /* LITTLE_ENDIAN */
+static const struct {
+ const uint8_t ulaw_to_u8[G711_TABLE_SIZE];
+ const uint8_t alaw_to_u8[G711_TABLE_SIZE];
+ const uint8_t u8_to_ulaw[G711_TABLE_SIZE];
+ const uint8_t u8_to_alaw[G711_TABLE_SIZE];
+} xlaw_conv_tables = {
+ ULAW_TO_U8,
+ ALAW_TO_U8,
+ U8_TO_ULAW,
+ U8_TO_ALAW
+};
+
/*
- * 8bit sample is pretty much useless since it doesn't provide
- * sufficient dynamic range throughout our filtering process.
- * For the sake of completeness, declare it anyway.
+ * Functions for reading/writing PCM integer sample values from bytes array.
+ * Since every process is done using signed integer (and to make our life less
+ * miserable), unsigned sample will be converted to its signed counterpart and
+ * restored during writing back.
*/
-#define _PCM_READ_S8_NE(b8) INTPCM_T(*((int8_t *)(b8)))
-#define _PCM_READ_U8_NE(b8) \
- INTPCM_T((int8_t)(*((uint8_t *)(b8)) ^ 0x80))
+static __always_inline __unused intpcm_t
+pcm_sample_read(const uint8_t *src, uint32_t fmt)
+{
+ intpcm_t v, e, m;
+ bool s;
+
+ fmt = AFMT_ENCODING(fmt);
+
+ switch (fmt) {
+ case AFMT_AC3:
+ v = 0;
+ break;
+ case AFMT_MU_LAW:
+ v = _G711_TO_INTPCM(xlaw_conv_tables.ulaw_to_u8, *src);
+ break;
+ case AFMT_A_LAW:
+ v = _G711_TO_INTPCM(xlaw_conv_tables.alaw_to_u8, *src);
+ break;
+ case AFMT_S8:
+ v = INTPCM_T((int8_t)*src);
+ break;
+ case AFMT_U8:
+ v = INTPCM_T((int8_t)(*src ^ 0x80));
+ break;
+ case AFMT_S16_LE:
+ v = INTPCM_T(src[0] | (int8_t)src[1] << 8);
+ break;
+ case AFMT_S16_BE:
+ v = INTPCM_T(src[1] | (int8_t)src[0] << 8);
+ break;
+ case AFMT_U16_LE:
+ v = INTPCM_T(src[0] | (int8_t)(src[1] ^ 0x80) << 8);
+ break;
+ case AFMT_U16_BE:
+ v = INTPCM_T(src[1] | (int8_t)(src[0] ^ 0x80) << 8);
+ break;
+ case AFMT_S24_LE:
+ v = INTPCM_T(src[0] | src[1] << 8 | (int8_t)src[2] << 16);
+ break;
+ case AFMT_S24_BE:
+ v = INTPCM_T(src[2] | src[1] << 8 | (int8_t)src[0] << 16);
+ break;
+ case AFMT_U24_LE:
+ v = INTPCM_T(src[0] | src[1] << 8 |
+ (int8_t)(src[2] ^ 0x80) << 16);
+ break;
+ case AFMT_U24_BE:
+ v = INTPCM_T(src[2] | src[1] << 8 |
+ (int8_t)(src[0] ^ 0x80) << 16);
+ break;
+ case AFMT_S32_LE:
+ v = INTPCM_T(src[0] | src[1] << 8 | src[2] << 16 |
+ (int8_t)src[3] << 24);
+ break;
+ case AFMT_S32_BE:
+ v = INTPCM_T(src[3] | src[2] << 8 | src[1] << 16 |
+ (int8_t)src[0] << 24);
+ break;
+ case AFMT_U32_LE:
+ v = INTPCM_T(src[0] | src[1] << 8 | src[2] << 16 |
+ (int8_t)(src[3] ^ 0x80) << 24);
+ break;
+ case AFMT_U32_BE:
+ v = INTPCM_T(src[3] | src[2] << 8 | src[1] << 16 |
+ (int8_t)(src[0] ^ 0x80) << 24);
+ break;
+ case AFMT_F32_LE: /* FALLTHROUGH */
+ case AFMT_F32_BE:
+ if (fmt == AFMT_F32_LE) {
+ v = INTPCM_T(src[0] | src[1] << 8 | src[2] << 16 |
+ (int8_t)src[3] << 24);
+ } else {
+ v = INTPCM_T(src[3] | src[2] << 8 | src[1] << 16 |
+ (int8_t)src[0] << 24);
+ }
+ e = (v >> 23) & 0xff;
+ /* NaN, +/- Inf or too small */
+ if (e == 0xff || e < 96) {
+ v = INTPCM_T(0);
+ break;
+ }
+ s = v & 0x80000000U;
+ if (e > 126) {
+ v = INTPCM_T((s == 0) ? PCM_S32_MAX : PCM_S32_MIN);
+ break;
+ }
+ m = 0x800000 | (v & 0x7fffff);
+ e += 8 - 127;
+ if (e < 0)
+ m >>= -e;
+ else
+ m <<= e;
+ v = INTPCM_T((s == 0) ? m : -m);
+ break;
+ default:
+ v = 0;
+ printf("%s(): unknown format: 0x%08x\n", __func__, fmt);
+ __assert_unreachable();
+ }
+
+ return (v);
+}
-#define _PCM_WRITE_S8_NE(b8, val) do { \
- *((int8_t *)(b8)) = (val); \
-} while (0)
-#define _PCM_WRITE_U8_NE(b8, val) do { \
- *((uint8_t *)(b8)) = (val) ^ 0x80; \
-} while (0)
+/*
+ * Read sample and normalize to 32-bit magnitude.
+ */
+static __always_inline __unused intpcm_t
+pcm_sample_read_norm(const uint8_t *src, uint32_t fmt)
+{
+ return (pcm_sample_read(src, fmt) << (32 - AFMT_BIT(fmt)));
+}
/*
- * Common macross. Use this instead of "_", unless we want
- * the real sample value.
+ * Read sample and restrict magnitude to 24 bits.
*/
+static __always_inline __unused intpcm_t
+pcm_sample_read_calc(const uint8_t *src, uint32_t fmt)
+{
+ intpcm_t v;
+
+ v = pcm_sample_read(src, fmt);
+
+#ifndef SND_PCM_64
+ /*
+ * Dynamic range for humans: ~140db.
+ *
+ * 16bit = 96db (close enough)
+ * 24bit = 144db (perfect)
+ * 32bit = 196db (way too much)
+ *
+ * 24bit is pretty much sufficient for our signed integer processing.
+ * Also, to avoid overflow, we truncate 32bit (and only 32bit) samples
+ * down to 24bit (see below for the reason), unless SND_PCM_64 is
+ * defined.
+ */
+ if (fmt & AFMT_32BIT)
+ v >>= PCM_FXSHIFT;
+#endif
+
+ return (v);
+}
+
+static __always_inline __unused void
+pcm_sample_write(uint8_t *dst, intpcm_t v, uint32_t fmt)
+{
+ intpcm_t r, e;
+
+ fmt = AFMT_ENCODING(fmt);
+
+ if (fmt & (AFMT_F32_LE | AFMT_F32_BE)) {
+ if (v == 0)
+ r = 0;
+ else if (v == PCM_S32_MAX)
+ r = 0x3f800000;
+ else if (v == PCM_S32_MIN)
+ r = 0x80000000U | 0x3f800000;
+ else {
+ r = 0;
+ if (v < 0) {
+ r |= 0x80000000U;
+ v = -v;
+ }
+ e = 127 - 8;
+ while ((v & 0x7f000000) != 0) {
+ v >>= 1;
+ e++;
+ }
+ while ((v & 0x7f800000) == 0) {
+ v <<= 1;
+ e--;
+ }
+ r |= (e & 0xff) << 23;
+ r |= v & 0x7fffff;
+ }
+ v = r;
+ }
+
+ switch (fmt) {
+ case AFMT_AC3:
+ *(int16_t *)dst = 0;
+ break;
+ case AFMT_MU_LAW:
+ *dst = _INTPCM_TO_G711(xlaw_conv_tables.u8_to_ulaw, v);
+ break;
+ case AFMT_A_LAW:
+ *dst = _INTPCM_TO_G711(xlaw_conv_tables.u8_to_alaw, v);
+ break;
+ case AFMT_S8:
+ *(int8_t *)dst = v;
+ break;
+ case AFMT_U8:
+ *(int8_t *)dst = v ^ 0x80;
+ break;
+ case AFMT_S16_LE:
+ dst[0] = v;
+ dst[1] = v >> 8;
+ break;
+ case AFMT_S16_BE:
+ dst[1] = v;
+ dst[0] = v >> 8;
+ break;
+ case AFMT_U16_LE:
+ dst[0] = v;
+ dst[1] = (v >> 8) ^ 0x80;
+ break;
+ case AFMT_U16_BE:
+ dst[1] = v;
+ dst[0] = (v >> 8) ^ 0x80;
+ break;
+ case AFMT_S24_LE:
+ dst[0] = v;
+ dst[1] = v >> 8;
+ dst[2] = v >> 16;
+ break;
+ case AFMT_S24_BE:
+ dst[2] = v;
+ dst[1] = v >> 8;
+ dst[0] = v >> 16;
+ break;
+ case AFMT_U24_LE:
+ dst[0] = v;
+ dst[1] = v >> 8;
+ dst[2] = (v >> 16) ^ 0x80;
+ break;
+ case AFMT_U24_BE:
+ dst[2] = v;
+ dst[1] = v >> 8;
+ dst[0] = (v >> 16) ^ 0x80;
+ break;
+ case AFMT_S32_LE: /* FALLTHROUGH */
+ case AFMT_F32_LE:
+ dst[0] = v;
+ dst[1] = v >> 8;
+ dst[2] = v >> 16;
+ dst[3] = v >> 24;
+ break;
+ case AFMT_S32_BE: /* FALLTHROUGH */
+ case AFMT_F32_BE:
+ dst[3] = v;
+ dst[2] = v >> 8;
+ dst[1] = v >> 16;
+ dst[0] = v >> 24;
+ break;
+ case AFMT_U32_LE:
+ dst[0] = v;
+ dst[1] = v >> 8;
+ dst[2] = v >> 16;
+ dst[3] = (v >> 24) ^ 0x80;
+ break;
+ case AFMT_U32_BE:
+ dst[3] = v;
+ dst[2] = v >> 8;
+ dst[1] = v >> 16;
+ dst[0] = (v >> 24) ^ 0x80;
+ break;
+ default:
+ printf("%s(): unknown format: 0x%08x\n", __func__, fmt);
+ __assert_unreachable();
+ }
+}
-/* 8bit */
-#define PCM_READ_S8_NE(b8) _PCM_READ_S8_NE(b8)
-#define PCM_READ_U8_NE(b8) _PCM_READ_U8_NE(b8)
-#define PCM_WRITE_S8_NE(b8, val) _PCM_WRITE_S8_NE(b8, val)
-#define PCM_WRITE_U8_NE(b8, val) _PCM_WRITE_U8_NE(b8, val)
-
-/* 16bit */
-#define PCM_READ_S16_LE(b8) _PCM_READ_S16_LE(b8)
-#define PCM_READ_S16_BE(b8) _PCM_READ_S16_BE(b8)
-#define PCM_READ_U16_LE(b8) _PCM_READ_U16_LE(b8)
-#define PCM_READ_U16_BE(b8) _PCM_READ_U16_BE(b8)
-
-#define PCM_WRITE_S16_LE(b8, val) _PCM_WRITE_S16_LE(b8, val)
-#define PCM_WRITE_S16_BE(b8, val) _PCM_WRITE_S16_BE(b8, val)
-#define PCM_WRITE_U16_LE(b8, val) _PCM_WRITE_U16_LE(b8, val)
-#define PCM_WRITE_U16_BE(b8, val) _PCM_WRITE_U16_BE(b8, val)
-
-#define PCM_READ_S16_NE(b8) _PCM_READ_S16_NE(b8)
-#define PCM_READ_U16_NE(b8) _PCM_READ_U16_NE(b8)
-#define PCM_WRITE_S16_NE(b8) _PCM_WRITE_S16_NE(b8)
-#define PCM_WRITE_U16_NE(b8) _PCM_WRITE_U16_NE(b8)
-
-/* 24bit */
-#define PCM_READ_S24_LE(b8) _PCM_READ_S24_LE(b8)
-#define PCM_READ_S24_BE(b8) _PCM_READ_S24_BE(b8)
-#define PCM_READ_U24_LE(b8) _PCM_READ_U24_LE(b8)
-#define PCM_READ_U24_BE(b8) _PCM_READ_U24_BE(b8)
-
-#define PCM_WRITE_S24_LE(b8, val) _PCM_WRITE_S24_LE(b8, val)
-#define PCM_WRITE_S24_BE(b8, val) _PCM_WRITE_S24_BE(b8, val)
-#define PCM_WRITE_U24_LE(b8, val) _PCM_WRITE_U24_LE(b8, val)
-#define PCM_WRITE_U24_BE(b8, val) _PCM_WRITE_U24_BE(b8, val)
-
-#define PCM_READ_S24_NE(b8) _PCM_READ_S24_NE(b8)
-#define PCM_READ_U24_NE(b8) _PCM_READ_U24_NE(b8)
-#define PCM_WRITE_S24_NE(b8) _PCM_WRITE_S24_NE(b8)
-#define PCM_WRITE_U24_NE(b8) _PCM_WRITE_U24_NE(b8)
-
-/* 32bit */
-#ifdef SND_PCM_64
-#define PCM_READ_S32_LE(b8) _PCM_READ_S32_LE(b8)
-#define PCM_READ_S32_BE(b8) _PCM_READ_S32_BE(b8)
-#define PCM_READ_U32_LE(b8) _PCM_READ_U32_LE(b8)
-#define PCM_READ_U32_BE(b8) _PCM_READ_U32_BE(b8)
-
-#define PCM_WRITE_S32_LE(b8, val) _PCM_WRITE_S32_LE(b8, val)
-#define PCM_WRITE_S32_BE(b8, val) _PCM_WRITE_S32_BE(b8, val)
-#define PCM_WRITE_U32_LE(b8, val) _PCM_WRITE_U32_LE(b8, val)
-#define PCM_WRITE_U32_BE(b8, val) _PCM_WRITE_U32_BE(b8, val)
-
-#define PCM_READ_S32_NE(b8) _PCM_READ_S32_NE(b8)
-#define PCM_READ_U32_NE(b8) _PCM_READ_U32_NE(b8)
-#define PCM_WRITE_S32_NE(b8) _PCM_WRITE_S32_NE(b8)
-#define PCM_WRITE_U32_NE(b8) _PCM_WRITE_U32_NE(b8)
-#else /* !SND_PCM_64 */
/*
- * 24bit integer ?!? This is quite unfortunate, eh? Get the fact straight:
- * Dynamic range for:
- * 1) Human =~ 140db
- * 2) 16bit = 96db (close enough)
- * 3) 24bit = 144db (perfect)
- * 4) 32bit = 196db (way too much)
- * 5) Bugs Bunny = Gazillion!@%$Erbzzztt-EINVAL db
- * Since we're not Bugs Bunny ..uh..err.. avoiding 64bit arithmetic, 24bit
- * is pretty much sufficient for our signed integer processing.
+ * Write sample and normalize to original magnitude.
*/
-#define PCM_READ_S32_LE(b8) (_PCM_READ_S32_LE(b8) >> PCM_FXSHIFT)
-#define PCM_READ_S32_BE(b8) (_PCM_READ_S32_BE(b8) >> PCM_FXSHIFT)
-#define PCM_READ_U32_LE(b8) (_PCM_READ_U32_LE(b8) >> PCM_FXSHIFT)
-#define PCM_READ_U32_BE(b8) (_PCM_READ_U32_BE(b8) >> PCM_FXSHIFT)
-
-#define PCM_READ_S32_NE(b8) (_PCM_READ_S32_NE(b8) >> PCM_FXSHIFT)
-#define PCM_READ_U32_NE(b8) (_PCM_READ_U32_NE(b8) >> PCM_FXSHIFT)
-
-#define PCM_WRITE_S32_LE(b8, val) \
- _PCM_WRITE_S32_LE(b8, (val) << PCM_FXSHIFT)
-#define PCM_WRITE_S32_BE(b8, val) \
- _PCM_WRITE_S32_BE(b8, (val) << PCM_FXSHIFT)
-#define PCM_WRITE_U32_LE(b8, val) \
- _PCM_WRITE_U32_LE(b8, (val) << PCM_FXSHIFT)
-#define PCM_WRITE_U32_BE(b8, val) \
- _PCM_WRITE_U32_BE(b8, (val) << PCM_FXSHIFT)
-
-#define PCM_WRITE_S32_NE(b8, val) \
- _PCM_WRITE_S32_NE(b8, (val) << PCM_FXSHIFT)
-#define PCM_WRITE_U32_NE(b8, val) \
- _PCM_WRITE_U32_NE(b8, (val) << PCM_FXSHIFT)
-#endif /* SND_PCM_64 */
-
-#define PCM_CLAMP_S8(val) \
- (((val) > PCM_S8_MAX) ? PCM_S8_MAX : \
- (((val) < PCM_S8_MIN) ? PCM_S8_MIN : (val)))
-#define PCM_CLAMP_S16(val) \
- (((val) > PCM_S16_MAX) ? PCM_S16_MAX : \
- (((val) < PCM_S16_MIN) ? PCM_S16_MIN : (val)))
-#define PCM_CLAMP_S24(val) \
- (((val) > PCM_S24_MAX) ? PCM_S24_MAX : \
- (((val) < PCM_S24_MIN) ? PCM_S24_MIN : (val)))
+static __always_inline __unused void
+pcm_sample_write_norm(uint8_t *dst, intpcm_t v, uint32_t fmt)
+{
+ pcm_sample_write(dst, v >> (32 - AFMT_BIT(fmt)), fmt);
+}
-#ifdef SND_PCM_64
-#define PCM_CLAMP_S32(val) \
- (((val) > PCM_S32_MAX) ? PCM_S32_MAX : \
- (((val) < PCM_S32_MIN) ? PCM_S32_MIN : (val)))
-#else /* !SND_PCM_64 */
-#define PCM_CLAMP_S32(val) \
- (((val) > PCM_S24_MAX) ? PCM_S32_MAX : \
- (((val) < PCM_S24_MIN) ? PCM_S32_MIN : \
- ((val) << PCM_FXSHIFT)))
-#endif /* SND_PCM_64 */
-
-#define PCM_CLAMP_U8(val) PCM_CLAMP_S8(val)
-#define PCM_CLAMP_U16(val) PCM_CLAMP_S16(val)
-#define PCM_CLAMP_U24(val) PCM_CLAMP_S24(val)
-#define PCM_CLAMP_U32(val) PCM_CLAMP_S32(val)
+/*
+ * To be used with pcm_sample_read_calc().
+ */
+static __always_inline __unused void
+pcm_sample_write_calc(uint8_t *dst, intpcm_t v, uint32_t fmt)
+{
+#ifndef SND_PCM_64
+ /* Shift back to 32-bit magnitude. */
+ if (fmt & AFMT_32BIT)
+ v <<= PCM_FXSHIFT;
+#endif
+ pcm_sample_write(dst, v, fmt);
+}
+
+static __always_inline __unused intpcm_t
+pcm_clamp(intpcm32_t sample, uint32_t fmt)
+{
+ fmt = AFMT_ENCODING(fmt);
+
+ switch (AFMT_BIT(fmt)) {
+ case 8:
+ return ((sample > PCM_S8_MAX) ? PCM_S8_MAX :
+ ((sample < PCM_S8_MIN) ? PCM_S8_MIN : sample));
+ case 16:
+ return ((sample > PCM_S16_MAX) ? PCM_S16_MAX :
+ ((sample < PCM_S16_MIN) ? PCM_S16_MIN : sample));
+ case 24:
+ return ((sample > PCM_S24_MAX) ? PCM_S24_MAX :
+ ((sample < PCM_S24_MIN) ? PCM_S24_MIN : sample));
+ case 32:
+ return ((sample > PCM_S32_MAX) ? PCM_S32_MAX :
+ ((sample < PCM_S32_MIN) ? PCM_S32_MIN : sample));
+ default:
+ printf("%s(): unknown format: 0x%08x\n", __func__, fmt);
+ __assert_unreachable();
+ }
+}
+
+static __always_inline __unused intpcm_t
+pcm_clamp_calc(intpcm32_t sample, uint32_t fmt)
+{
+#ifndef SND_PCM_64
+ if (fmt & AFMT_32BIT) {
+ return ((sample > PCM_S24_MAX) ? PCM_S32_MAX :
+ ((sample < PCM_S24_MIN) ? PCM_S32_MIN :
+ sample << PCM_FXSHIFT));
+ }
+#endif
+
+ return (pcm_clamp(sample, fmt));
+}
#endif /* !_SND_PCM_H_ */
diff --git a/sys/dev/sound/pcm/sndstat.c b/sys/dev/sound/pcm/sndstat.c
index ef006a580d40..51d0fb3bb686 100644
--- a/sys/dev/sound/pcm/sndstat.c
+++ b/sys/dev/sound/pcm/sndstat.c
@@ -5,7 +5,7 @@
* Copyright (c) 2001 Cameron Grant <cg@FreeBSD.org>
* Copyright (c) 2020 The FreeBSD Foundation
* All rights reserved.
- * Copyright (c) 2024 The FreeBSD Foundation
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
*
* Portions of this software were developed by Christos Margiolis
* <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
@@ -45,19 +45,13 @@
#include <sys/nv.h>
#include <sys/dnv.h>
#include <sys/sx.h>
-#ifdef COMPAT_FREEBSD32
-#include <sys/sysent.h>
-#endif
#include <dev/sound/pcm/sound.h>
-#include <dev/sound/pcm/pcm.h>
-#include <dev/sound/version.h>
#include "feeder_if.h"
#define SS_TYPE_PCM 1
#define SS_TYPE_MIDI 2
-#define SS_TYPE_SEQUENCER 3
static d_open_t sndstat_open;
static void sndstat_close(void *);
@@ -154,10 +148,7 @@ sndstat_open(struct cdev *i_dev, int flags, int mode, struct thread *td)
pf = malloc(sizeof(*pf), M_DEVBUF, M_WAITOK | M_ZERO);
- if (sbuf_new(&pf->sbuf, NULL, 4096, SBUF_AUTOEXTEND) == NULL) {
- free(pf, M_DEVBUF);
- return (ENOMEM);
- }
+ sbuf_new(&pf->sbuf, NULL, 4096, SBUF_AUTOEXTEND);
pf->fflags = flags;
TAILQ_INIT(&pf->userdev_list);
@@ -327,47 +318,36 @@ sndstat_write(struct cdev *i_dev, struct uio *buf, int flag)
}
static void
-sndstat_get_caps(struct snddev_info *d, bool play, uint32_t *min_rate,
+sndstat_get_caps(struct snddev_info *d, int dir, uint32_t *min_rate,
uint32_t *max_rate, uint32_t *fmts, uint32_t *minchn, uint32_t *maxchn)
{
struct pcm_channel *c;
- unsigned int encoding;
- int dir;
-
- dir = play ? PCMDIR_PLAY : PCMDIR_REC;
-
- if (play && d->pvchancount > 0) {
- *min_rate = *max_rate = d->pvchanrate;
- *fmts = AFMT_ENCODING(d->pvchanformat);
- *minchn = *maxchn = AFMT_CHANNEL(d->pvchanformat);
- return;
- } else if (!play && d->rvchancount > 0) {
- *min_rate = *max_rate = d->rvchanrate;
- *fmts = AFMT_ENCODING(d->rvchanformat);
- *minchn = *maxchn = AFMT_CHANNEL(d->rvchanformat);
- return;
- }
+ struct pcmchan_caps *caps;
+ int i;
+ *fmts = 0;
*min_rate = UINT32_MAX;
*max_rate = 0;
*minchn = UINT32_MAX;
*maxchn = 0;
- encoding = 0;
- CHN_FOREACH(c, d, channels.pcm) {
- struct pcmchan_caps *caps;
- int i;
- if (c->direction != dir || (c->flags & CHN_F_VIRTUAL) != 0)
+ CHN_FOREACH(c, d, channels.pcm) {
+ if (c->direction != dir)
continue;
-
CHN_LOCK(c);
caps = chn_getcaps(c);
- *min_rate = min(caps->minspeed, *min_rate);
- *max_rate = max(caps->maxspeed, *max_rate);
for (i = 0; caps->fmtlist[i]; i++) {
- encoding |= AFMT_ENCODING(caps->fmtlist[i]);
- *minchn = min(AFMT_CHANNEL(encoding), *minchn);
- *maxchn = max(AFMT_CHANNEL(encoding), *maxchn);
+ *fmts |= AFMT_ENCODING(caps->fmtlist[i]);
+ *minchn = min(AFMT_CHANNEL(caps->fmtlist[i]), *minchn);
+ *maxchn = max(AFMT_CHANNEL(caps->fmtlist[i]), *maxchn);
+ }
+ if ((c->flags & CHN_F_EXCLUSIVE) ||
+ (pcm_getflags(d->dev) & SD_F_BITPERFECT)) {
+ *min_rate = min(*min_rate, caps->minspeed);
+ *max_rate = max(*max_rate, caps->maxspeed);
+ } else {
+ *min_rate = min(*min_rate, feeder_rate_min);
+ *max_rate = max(*max_rate, feeder_rate_max);
}
CHN_UNLOCK(c);
}
@@ -397,9 +377,13 @@ sndstat_create_diinfo_nv(uint32_t min_rate, uint32_t max_rate, uint32_t formats,
static int
sndstat_build_sound4_nvlist(struct snddev_info *d, nvlist_t **dip)
{
- uint32_t maxrate, minrate, fmts, minchn, maxchn;
- nvlist_t *di = NULL, *sound4di = NULL, *diinfo = NULL;
- int err;
+ struct pcm_channel *c;
+ struct pcm_feeder *f;
+ struct sbuf sb;
+ uint32_t maxrate, minrate, fmts, minchn, maxchn, caps;
+ nvlist_t *di = NULL, *sound4di = NULL, *diinfo = NULL, *cdi = NULL;
+ int err, nchan;
+ char buf[AFMTSTR_LEN];
di = nvlist_create(0);
if (di == NULL) {
@@ -424,8 +408,8 @@ sndstat_build_sound4_nvlist(struct snddev_info *d, nvlist_t **dip)
nvlist_add_number(di, SNDST_DSPS_PCHAN, d->playcount);
nvlist_add_number(di, SNDST_DSPS_RCHAN, d->reccount);
if (d->playcount > 0) {
- sndstat_get_caps(d, true, &minrate, &maxrate, &fmts, &minchn,
- &maxchn);
+ sndstat_get_caps(d, PCMDIR_PLAY, &minrate, &maxrate, &fmts,
+ &minchn, &maxchn);
nvlist_add_number(di, "pminrate", minrate);
nvlist_add_number(di, "pmaxrate", maxrate);
nvlist_add_number(di, "pfmts", fmts);
@@ -437,8 +421,8 @@ sndstat_build_sound4_nvlist(struct snddev_info *d, nvlist_t **dip)
nvlist_move_nvlist(di, SNDST_DSPS_INFO_PLAY, diinfo);
}
if (d->reccount > 0) {
- sndstat_get_caps(d, false, &minrate, &maxrate, &fmts, &minchn,
- &maxchn);
+ sndstat_get_caps(d, PCMDIR_REC, &minrate, &maxrate, &fmts,
+ &minchn, &maxchn);
nvlist_add_number(di, "rminrate", minrate);
nvlist_add_number(di, "rmaxrate", maxrate);
nvlist_add_number(di, "rfmts", fmts);
@@ -452,12 +436,153 @@ sndstat_build_sound4_nvlist(struct snddev_info *d, nvlist_t **dip)
nvlist_add_number(sound4di, SNDST_DSPS_SOUND4_UNIT,
device_get_unit(d->dev)); // XXX: I want signed integer here
+ nvlist_add_string(sound4di, SNDST_DSPS_SOUND4_STATUS, d->status);
nvlist_add_bool(
sound4di, SNDST_DSPS_SOUND4_BITPERFECT, d->flags & SD_F_BITPERFECT);
- nvlist_add_number(sound4di, SNDST_DSPS_SOUND4_PVCHAN, d->pvchancount);
- nvlist_add_number(sound4di, SNDST_DSPS_SOUND4_RVCHAN, d->rvchancount);
+ nvlist_add_bool(sound4di, SNDST_DSPS_SOUND4_PVCHAN,
+ d->flags & SD_F_PVCHANS);
+ nvlist_add_number(sound4di, SNDST_DSPS_SOUND4_PVCHANRATE,
+ d->pvchanrate);
+ nvlist_add_number(sound4di, SNDST_DSPS_SOUND4_PVCHANFORMAT,
+ d->pvchanformat);
+ nvlist_add_bool(sound4di, SNDST_DSPS_SOUND4_RVCHAN,
+ d->flags & SD_F_RVCHANS);
+ nvlist_add_number(sound4di, SNDST_DSPS_SOUND4_RVCHANRATE,
+ d->rvchanrate);
+ nvlist_add_number(sound4di, SNDST_DSPS_SOUND4_RVCHANFORMAT,
+ d->rvchanformat);
+
+ nchan = 0;
+ CHN_FOREACH(c, d, channels.pcm) {
+ sbuf_new(&sb, NULL, 4096, SBUF_AUTOEXTEND);
+ cdi = nvlist_create(0);
+ if (cdi == NULL) {
+ sbuf_delete(&sb);
+ PCM_RELEASE_QUICK(d);
+ err = ENOMEM;
+ goto done;
+ }
+
+ CHN_LOCK(c);
+
+ caps = PCM_CAP_REALTIME | PCM_CAP_MMAP | PCM_CAP_TRIGGER |
+ ((c->flags & CHN_F_VIRTUAL) ? PCM_CAP_VIRTUAL : 0) |
+ ((c->direction == PCMDIR_PLAY) ? PCM_CAP_OUTPUT : PCM_CAP_INPUT);
+
+ nvlist_add_string(cdi, SNDST_DSPS_SOUND4_CHAN_NAME, c->name);
+ nvlist_add_string(cdi, SNDST_DSPS_SOUND4_CHAN_PARENTCHAN,
+ c->parentchannel != NULL ? c->parentchannel->name : "");
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_UNIT, nchan++);
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_CAPS, caps);
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_LATENCY,
+ c->latency);
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_RATE, c->speed);
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_FORMAT,
+ c->format);
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_PID, c->pid);
+ nvlist_add_string(cdi, SNDST_DSPS_SOUND4_CHAN_COMM, c->comm);
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_INTR,
+ c->interrupts);
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_FEEDCNT,
+ c->feedcount);
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_XRUNS, c->xruns);
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_LEFTVOL,
+ CHN_GETVOLUME(c, SND_VOL_C_PCM, SND_CHN_T_FL));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_RIGHTVOL,
+ CHN_GETVOLUME(c, SND_VOL_C_PCM, SND_CHN_T_FR));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_FORMAT,
+ sndbuf_getfmt(c->bufhard));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_RATE,
+ sndbuf_getspd(c->bufhard));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_SIZE,
+ sndbuf_getsize(c->bufhard));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_BLKSZ,
+ sndbuf_getblksz(c->bufhard));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_BLKCNT,
+ sndbuf_getblkcnt(c->bufhard));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_FREE,
+ sndbuf_getfree(c->bufhard));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_HWBUF_READY,
+ sndbuf_getready(c->bufhard));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_SWBUF_FORMAT,
+ sndbuf_getfmt(c->bufsoft));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_SWBUF_RATE,
+ sndbuf_getspd(c->bufsoft));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_SWBUF_SIZE,
+ sndbuf_getsize(c->bufsoft));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_SWBUF_BLKSZ,
+ sndbuf_getblksz(c->bufsoft));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_SWBUF_BLKCNT,
+ sndbuf_getblkcnt(c->bufsoft));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_SWBUF_FREE,
+ sndbuf_getfree(c->bufsoft));
+ nvlist_add_number(cdi, SNDST_DSPS_SOUND4_CHAN_SWBUF_READY,
+ sndbuf_getready(c->bufsoft));
+
+ if (c->parentchannel != NULL) {
+ sbuf_printf(&sb, "[%s", (c->direction == PCMDIR_REC) ?
+ c->parentchannel->name : "userland");
+ } else {
+ sbuf_printf(&sb, "[%s", (c->direction == PCMDIR_REC) ?
+ "hardware" : "userland");
+ }
+ sbuf_printf(&sb, " -> ");
+ f = c->feeder;
+ while (f->source != NULL)
+ f = f->source;
+ while (f != NULL) {
+ sbuf_printf(&sb, "%s", f->class->name);
+ if (f->desc->type == FEEDER_FORMAT) {
+ snd_afmt2str(f->desc->in, buf, sizeof(buf));
+ sbuf_printf(&sb, "(%s -> ", buf);
+ snd_afmt2str(f->desc->out, buf, sizeof(buf));
+ sbuf_printf(&sb, "%s)", buf);
+ } else if (f->desc->type == FEEDER_MATRIX) {
+ sbuf_printf(&sb, "(%d.%dch -> %d.%dch)",
+ AFMT_CHANNEL(f->desc->in) -
+ AFMT_EXTCHANNEL(f->desc->in),
+ AFMT_EXTCHANNEL(f->desc->in),
+ AFMT_CHANNEL(f->desc->out) -
+ AFMT_EXTCHANNEL(f->desc->out),
+ AFMT_EXTCHANNEL(f->desc->out));
+ } else if (f->desc->type == FEEDER_RATE) {
+ sbuf_printf(&sb, "(%d -> %d)",
+ FEEDER_GET(f, FEEDRATE_SRC),
+ FEEDER_GET(f, FEEDRATE_DST));
+ } else {
+ snd_afmt2str(f->desc->out, buf, sizeof(buf));
+ sbuf_printf(&sb, "(%s)", buf);
+ }
+ sbuf_printf(&sb, " -> ");
+ f = f->parent;
+ }
+ if (c->parentchannel != NULL) {
+ sbuf_printf(&sb, "%s]", (c->direction == PCMDIR_REC) ?
+ "userland" : c->parentchannel->name);
+ } else {
+ sbuf_printf(&sb, "%s]", (c->direction == PCMDIR_REC) ?
+ "userland" : "hardware");
+ }
+
+ CHN_UNLOCK(c);
+
+ sbuf_finish(&sb);
+ nvlist_add_string(cdi, SNDST_DSPS_SOUND4_CHAN_FEEDERCHAIN,
+ sbuf_data(&sb));
+ sbuf_delete(&sb);
+
+ nvlist_append_nvlist_array(sound4di,
+ SNDST_DSPS_SOUND4_CHAN_INFO, cdi);
+ nvlist_destroy(cdi);
+ err = nvlist_error(sound4di);
+ if (err) {
+ PCM_RELEASE_QUICK(d);
+ goto done;
+ }
+ }
nvlist_move_nvlist(di, SNDST_DSPS_PROVIDER_INFO, sound4di);
sound4di = NULL;
+
PCM_RELEASE_QUICK(d);
nvlist_add_string(di, SNDST_DSPS_PROVIDER, SNDST_DSPS_SOUND4_PROVIDER);
@@ -624,10 +749,9 @@ sndstat_refresh_devs(struct sndstat_file *pf)
}
static int
-sndstat_get_devs(struct sndstat_file *pf, caddr_t data)
+sndstat_get_devs(struct sndstat_file *pf, void *arg_buf, size_t *arg_nbytes)
{
int err;
- struct sndstioc_nv_arg *arg = (struct sndstioc_nv_arg *)data;
SNDSTAT_LOCK();
sx_xlock(&pf->lock);
@@ -666,22 +790,22 @@ sndstat_get_devs(struct sndstat_file *pf, caddr_t data)
SNDSTAT_UNLOCK();
- if (!arg->nbytes) {
- arg->nbytes = pf->devs_nbytes;
+ if (*arg_nbytes == 0) {
+ *arg_nbytes = pf->devs_nbytes;
err = 0;
goto done;
}
- if (arg->nbytes < pf->devs_nbytes) {
- arg->nbytes = 0;
+ if (*arg_nbytes < pf->devs_nbytes) {
+ *arg_nbytes = 0;
err = 0;
goto done;
}
- err = copyout(pf->devs_nvlbuf, arg->buf, pf->devs_nbytes);
+ err = copyout(pf->devs_nvlbuf, arg_buf, pf->devs_nbytes);
if (err)
goto done;
- arg->nbytes = pf->devs_nbytes;
+ *arg_nbytes = pf->devs_nbytes;
free(pf->devs_nvlbuf, M_NVLIST);
pf->devs_nvlbuf = NULL;
@@ -706,7 +830,7 @@ sndstat_unpack_user_nvlbuf(const void *unvlbuf, size_t nbytes, nvlist_t **nvl)
}
*nvl = nvlist_unpack(nvlbuf, nbytes, 0);
free(nvlbuf, M_DEVBUF);
- if (nvl == NULL) {
+ if (*nvl == NULL) {
return (EINVAL);
}
@@ -852,20 +976,24 @@ sndstat_dsp_unpack_nvlist(const nvlist_t *nvlist, struct sndstat_userdev *ud)
}
static int
-sndstat_add_user_devs(struct sndstat_file *pf, caddr_t data)
+sndstat_add_user_devs(struct sndstat_file *pf, void *nvlbuf, size_t nbytes)
{
int err;
nvlist_t *nvl = NULL;
const nvlist_t * const *dsps;
size_t i, ndsps;
- struct sndstioc_nv_arg *arg = (struct sndstioc_nv_arg *)data;
if ((pf->fflags & FWRITE) == 0) {
err = EPERM;
goto done;
}
- err = sndstat_unpack_user_nvlbuf(arg->buf, arg->nbytes, &nvl);
+ if (nbytes > SNDST_UNVLBUF_MAX) {
+ err = ENOMEM;
+ goto done;
+ }
+
+ err = sndstat_unpack_user_nvlbuf(nvlbuf, nbytes, &nvl);
if (err != 0)
goto done;
@@ -911,52 +1039,17 @@ sndstat_flush_user_devs(struct sndstat_file *pf)
return (0);
}
-#ifdef COMPAT_FREEBSD32
-static int
-compat_sndstat_get_devs32(struct sndstat_file *pf, caddr_t data)
-{
- struct sndstioc_nv_arg32 *arg32 = (struct sndstioc_nv_arg32 *)data;
- struct sndstioc_nv_arg arg;
- int err;
-
- arg.buf = (void *)(uintptr_t)arg32->buf;
- arg.nbytes = arg32->nbytes;
-
- err = sndstat_get_devs(pf, (caddr_t)&arg);
- if (err == 0) {
- arg32->buf = (uint32_t)(uintptr_t)arg.buf;
- arg32->nbytes = arg.nbytes;
- }
-
- return (err);
-}
-
-static int
-compat_sndstat_add_user_devs32(struct sndstat_file *pf, caddr_t data)
-{
- struct sndstioc_nv_arg32 *arg32 = (struct sndstioc_nv_arg32 *)data;
- struct sndstioc_nv_arg arg;
- int err;
-
- arg.buf = (void *)(uintptr_t)arg32->buf;
- arg.nbytes = arg32->nbytes;
-
- err = sndstat_add_user_devs(pf, (caddr_t)&arg);
- if (err == 0) {
- arg32->buf = (uint32_t)(uintptr_t)arg.buf;
- arg32->nbytes = arg.nbytes;
- }
-
- return (err);
-}
-#endif
-
static int
sndstat_ioctl(
struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
{
int err;
struct sndstat_file *pf;
+ struct sndstioc_nv_arg *arg;
+#ifdef COMPAT_FREEBSD32
+ struct sndstioc_nv_arg32 *arg32;
+ size_t nbytes;
+#endif
err = devfs_get_cdevpriv((void **)&pf);
if (err != 0)
@@ -964,27 +1057,30 @@ sndstat_ioctl(
switch (cmd) {
case SNDSTIOC_GET_DEVS:
- err = sndstat_get_devs(pf, data);
+ arg = (struct sndstioc_nv_arg *)data;
+ err = sndstat_get_devs(pf, arg->buf, &arg->nbytes);
break;
#ifdef COMPAT_FREEBSD32
case SNDSTIOC_GET_DEVS32:
- if (!SV_CURPROC_FLAG(SV_ILP32)) {
- err = ENODEV;
- break;
+ arg32 = (struct sndstioc_nv_arg32 *)data;
+ nbytes = arg32->nbytes;
+ err = sndstat_get_devs(pf, (void *)(uintptr_t)arg32->buf,
+ &nbytes);
+ if (err == 0) {
+ KASSERT(nbytes < UINT_MAX, ("impossibly many bytes"));
+ arg32->nbytes = nbytes;
}
- err = compat_sndstat_get_devs32(pf, data);
break;
#endif
case SNDSTIOC_ADD_USER_DEVS:
- err = sndstat_add_user_devs(pf, data);
+ arg = (struct sndstioc_nv_arg *)data;
+ err = sndstat_add_user_devs(pf, arg->buf, arg->nbytes);
break;
#ifdef COMPAT_FREEBSD32
case SNDSTIOC_ADD_USER_DEVS32:
- if (!SV_CURPROC_FLAG(SV_ILP32)) {
- err = ENODEV;
- break;
- }
- err = compat_sndstat_add_user_devs32(pf, data);
+ arg32 = (struct sndstioc_nv_arg32 *)data;
+ err = sndstat_add_user_devs(pf, (void *)(uintptr_t)arg32->buf,
+ arg32->nbytes);
break;
#endif
case SNDSTIOC_REFRESH_DEVS:
@@ -1014,7 +1110,7 @@ sndstat_line2userdev(struct sndstat_file *pf, const char *line, int n)
if (e == NULL)
goto fail;
ud->nameunit = strndup(line, e - line, M_DEVBUF);
- ud->devnode = (char *)malloc(e - line + 1, M_DEVBUF, M_WAITOK | M_ZERO);
+ ud->devnode = malloc(e - line + 1, M_DEVBUF, M_WAITOK | M_ZERO);
strlcat(ud->devnode, ud->nameunit, e - line + 1);
line = e + 1;
@@ -1068,8 +1164,6 @@ sndstat_register(device_t dev, char *str)
type = SS_TYPE_PCM;
else if (!strcmp(devtype, "midi"))
type = SS_TYPE_MIDI;
- else if (!strcmp(devtype, "sequencer"))
- type = SS_TYPE_SEQUENCER;
else
return (EINVAL);
@@ -1163,6 +1257,8 @@ sndstat_prepare_pcm(struct sbuf *s, device_t dev, int verbose)
KASSERT(c->bufhard != NULL && c->bufsoft != NULL,
("hosed pcm channel setup"));
+ CHN_LOCK(c);
+
sbuf_printf(s, "\n\t");
sbuf_printf(s, "%s[%s]: ",
@@ -1186,12 +1282,12 @@ sndstat_prepare_pcm(struct sbuf *s, device_t dev, int verbose)
}
sbuf_printf(s, "\n\t");
- sbuf_printf(s, "interrupts %d, ", c->interrupts);
+ sbuf_printf(s, "\tinterrupts %d, ", c->interrupts);
if (c->direction == PCMDIR_REC) {
sbuf_printf(s,
"overruns %d, feed %u, hfree %d, "
- "sfree %d [b:%d/%d/%d|bs:%d/%d/%d]",
+ "sfree %d\n\t\t[b:%d/%d/%d|bs:%d/%d/%d]",
c->xruns, c->feedcount,
sndbuf_getfree(c->bufhard),
sndbuf_getfree(c->bufsoft),
@@ -1204,7 +1300,7 @@ sndstat_prepare_pcm(struct sbuf *s, device_t dev, int verbose)
} else {
sbuf_printf(s,
"underruns %d, feed %u, ready %d "
- "[b:%d/%d/%d|bs:%d/%d/%d]",
+ "\n\t\t[b:%d/%d/%d|bs:%d/%d/%d]",
c->xruns, c->feedcount,
sndbuf_getready(c->bufsoft),
sndbuf_getsize(c->bufhard),
@@ -1216,11 +1312,16 @@ sndstat_prepare_pcm(struct sbuf *s, device_t dev, int verbose)
}
sbuf_printf(s, "\n\t");
- sbuf_printf(s, "channel flags=0x%b", c->flags, CHN_F_BITS);
+ sbuf_printf(s, "\tchannel flags=0x%b", c->flags, CHN_F_BITS);
sbuf_printf(s, "\n\t");
- sbuf_printf(s, "{%s}",
- (c->direction == PCMDIR_REC) ? "hardware" : "userland");
+ if (c->parentchannel != NULL) {
+ sbuf_printf(s, "\t{%s}", (c->direction == PCMDIR_REC) ?
+ c->parentchannel->name : "userland");
+ } else {
+ sbuf_printf(s, "\t{%s}", (c->direction == PCMDIR_REC) ?
+ "hardware" : "userland");
+ }
sbuf_printf(s, " -> ");
f = c->feeder;
while (f->source != NULL)
@@ -1252,8 +1353,15 @@ sndstat_prepare_pcm(struct sbuf *s, device_t dev, int verbose)
sbuf_printf(s, " -> ");
f = f->parent;
}
- sbuf_printf(s, "{%s}",
- (c->direction == PCMDIR_REC) ? "userland" : "hardware");
+ if (c->parentchannel != NULL) {
+ sbuf_printf(s, "{%s}", (c->direction == PCMDIR_REC) ?
+ "userland" : c->parentchannel->name);
+ } else {
+ sbuf_printf(s, "{%s}", (c->direction == PCMDIR_REC) ?
+ "userland" : "hardware");
+ }
+
+ CHN_UNLOCK(c);
}
return (0);
@@ -1271,11 +1379,8 @@ sndstat_prepare(struct sndstat_file *pf_self)
/* make sure buffer is reset */
sbuf_clear(s);
- if (snd_verbose > 0) {
- sbuf_printf(s, "FreeBSD Audio Driver (%ubit %d/%s)\n",
- (u_int)sizeof(intpcm32_t) << 3, SND_DRV_VERSION,
- MACHINE_ARCH);
- }
+ if (snd_verbose > 0)
+ sbuf_printf(s, "FreeBSD Audio Driver\n");
/* generate list of installed devices */
k = 0;
@@ -1333,8 +1438,8 @@ static void
sndstat_sysinit(void *p)
{
sx_init(&sndstat_lock, "sndstat lock");
- sndstat_dev = make_dev(&sndstat_cdevsw, SND_DEV_STATUS,
- UID_ROOT, GID_WHEEL, 0644, "sndstat");
+ sndstat_dev = make_dev(&sndstat_cdevsw, 0, UID_ROOT, GID_WHEEL, 0644,
+ "sndstat");
}
SYSINIT(sndstat_sysinit, SI_SUB_DRIVERS, SI_ORDER_FIRST, sndstat_sysinit, NULL);
diff --git a/sys/dev/sound/pcm/sound.c b/sys/dev/sound/pcm/sound.c
index 9d5eaf3f5ad7..cb510d526fa8 100644
--- a/sys/dev/sound/pcm/sound.c
+++ b/sys/dev/sound/pcm/sound.c
@@ -6,7 +6,7 @@
* Copyright (c) 1999 Cameron Grant <cg@FreeBSD.org>
* Copyright (c) 1997 Luigi Rizzo
* All rights reserved.
- * Copyright (c) 2024 The FreeBSD Foundation
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
*
* Portions of this software were developed by Christos Margiolis
* <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
@@ -41,7 +41,6 @@
#include <dev/sound/pcm/ac97.h>
#include <dev/sound/pcm/vchan.h>
#include <dev/sound/pcm/dsp.h>
-#include <dev/sound/version.h>
#include <sys/limits.h>
#include <sys/sysctl.h>
@@ -49,30 +48,15 @@
devclass_t pcm_devclass;
-int pcm_veto_load = 1;
-
int snd_unit = -1;
static int snd_unit_auto = -1;
SYSCTL_INT(_hw_snd, OID_AUTO, default_auto, CTLFLAG_RWTUN,
&snd_unit_auto, 0, "assign default unit to a newly attached device");
-int snd_maxautovchans = 16;
-
SYSCTL_NODE(_hw, OID_AUTO, snd, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"Sound driver");
-static void pcm_sysinit(device_t);
-
-/*
- * XXX I've had enough with people not telling proper version/arch
- * while reporting problems, not after 387397913213th questions/requests.
- */
-static char snd_driver_version[] =
- __XSTRING(SND_DRV_VERSION)"/"MACHINE_ARCH;
-SYSCTL_STRING(_hw_snd, OID_AUTO, version, CTLFLAG_RD, &snd_driver_version,
- 0, "driver version/arch");
-
/**
* @brief Unit number allocator for syncgroup IDs
*/
@@ -121,210 +105,6 @@ snd_setup_intr(device_t dev, struct resource *res, int flags, driver_intr_t hand
return bus_setup_intr(dev, res, flags, NULL, hand, param, cookiep);
}
-int
-pcm_setvchans(struct snddev_info *d, int direction, int newcnt, int num)
-{
- struct pcm_channel *c, *ch, *nch;
- struct pcmchan_caps *caps;
- int i, err, vcnt;
-
- PCM_BUSYASSERT(d);
-
- if ((direction == PCMDIR_PLAY && d->playcount < 1) ||
- (direction == PCMDIR_REC && d->reccount < 1))
- return (ENODEV);
-
- if (!(d->flags & SD_F_AUTOVCHAN))
- return (EINVAL);
-
- if (newcnt < 0 || newcnt > SND_MAXVCHANS)
- return (E2BIG);
-
- if (direction == PCMDIR_PLAY)
- vcnt = d->pvchancount;
- else if (direction == PCMDIR_REC)
- vcnt = d->rvchancount;
- else
- return (EINVAL);
-
- if (newcnt > vcnt) {
- KASSERT(num == -1 ||
- (num >= 0 && num < SND_MAXVCHANS && (newcnt - 1) == vcnt),
- ("bogus vchan_create() request num=%d newcnt=%d vcnt=%d",
- num, newcnt, vcnt));
- /* add new vchans - find a parent channel first */
- ch = NULL;
- CHN_FOREACH(c, d, channels.pcm) {
- CHN_LOCK(c);
- if (c->direction == direction &&
- ((c->flags & CHN_F_HAS_VCHAN) || (vcnt == 0 &&
- c->refcount < 1 &&
- !(c->flags & (CHN_F_BUSY | CHN_F_VIRTUAL))))) {
- /*
- * Reuse hw channel with vchans already
- * created.
- */
- if (c->flags & CHN_F_HAS_VCHAN) {
- ch = c;
- break;
- }
- /*
- * No vchans ever created, look for
- * channels with supported formats.
- */
- caps = chn_getcaps(c);
- if (caps == NULL) {
- CHN_UNLOCK(c);
- continue;
- }
- for (i = 0; caps->fmtlist[i] != 0; i++) {
- if (caps->fmtlist[i] & AFMT_CONVERTIBLE)
- break;
- }
- if (caps->fmtlist[i] != 0) {
- ch = c;
- break;
- }
- }
- CHN_UNLOCK(c);
- }
- if (ch == NULL)
- return (EBUSY);
- ch->flags |= CHN_F_BUSY;
- err = 0;
- while (err == 0 && newcnt > vcnt) {
- err = vchan_create(ch, num);
- if (err == 0)
- vcnt++;
- else if (err == E2BIG && newcnt > vcnt)
- device_printf(d->dev,
- "%s: err=%d Maximum channel reached.\n",
- __func__, err);
- }
- if (vcnt == 0)
- ch->flags &= ~CHN_F_BUSY;
- CHN_UNLOCK(ch);
- if (err != 0)
- return (err);
- } else if (newcnt < vcnt) {
- KASSERT(num == -1,
- ("bogus vchan_destroy() request num=%d", num));
- CHN_FOREACH(c, d, channels.pcm) {
- CHN_LOCK(c);
- if (c->direction != direction ||
- CHN_EMPTY(c, children) ||
- !(c->flags & CHN_F_HAS_VCHAN)) {
- CHN_UNLOCK(c);
- continue;
- }
- CHN_FOREACH_SAFE(ch, c, nch, children) {
- CHN_LOCK(ch);
- if (vcnt == 1 && c->refcount > 0) {
- CHN_UNLOCK(ch);
- break;
- }
- if (!(ch->flags & CHN_F_BUSY) &&
- ch->refcount < 1) {
- err = vchan_destroy(ch);
- if (err == 0)
- vcnt--;
- } else
- CHN_UNLOCK(ch);
- if (vcnt == newcnt)
- break;
- }
- CHN_UNLOCK(c);
- break;
- }
- }
-
- return (0);
-}
-
-/* return error status and a locked channel */
-int
-pcm_chnalloc(struct snddev_info *d, struct pcm_channel **ch, int direction,
- pid_t pid, char *comm)
-{
- struct pcm_channel *c;
- int err, vchancount, vchan_num;
- bool retry;
-
- KASSERT(d != NULL && ch != NULL &&
- (direction == PCMDIR_PLAY || direction == PCMDIR_REC),
- ("%s(): invalid d=%p ch=%p direction=%d pid=%d",
- __func__, d, ch, direction, pid));
- PCM_BUSYASSERT(d);
-
- *ch = NULL;
- vchan_num = 0;
- vchancount = (direction == PCMDIR_PLAY) ? d->pvchancount :
- d->rvchancount;
-
- retry = false;
-retry_chnalloc:
- err = ENOTSUP;
- /* scan for a free channel */
- CHN_FOREACH(c, d, channels.pcm) {
- CHN_LOCK(c);
- if (c->direction == direction && (c->flags & CHN_F_VIRTUAL)) {
- if (vchancount < snd_maxautovchans &&
- vchan_num < c->unit) {
- CHN_UNLOCK(c);
- goto vchan_alloc;
- }
- vchan_num++;
- }
- if (c->direction == direction && !(c->flags & CHN_F_BUSY)) {
- c->flags |= CHN_F_BUSY;
- c->pid = pid;
- strlcpy(c->comm, (comm != NULL) ? comm :
- CHN_COMM_UNKNOWN, sizeof(c->comm));
- *ch = c;
- return (0);
- } else if (c->direction == direction && (c->flags & CHN_F_BUSY))
- err = EBUSY;
- CHN_UNLOCK(c);
- }
-
- /*
- * We came from retry_chnalloc and still didn't find a free channel.
- */
- if (retry)
- return (err);
-
-vchan_alloc:
- /* no channel available */
- if (!(vchancount > 0 && vchancount < snd_maxautovchans))
- return (err);
- err = pcm_setvchans(d, direction, vchancount + 1, -1);
- if (err == 0) {
- retry = true;
- goto retry_chnalloc;
- }
-
- return (err);
-}
-
-static void
-pcm_setmaxautovchans(struct snddev_info *d, int num)
-{
- PCM_BUSYASSERT(d);
-
- if (num < 0)
- return;
-
- if (num >= 0 && d->pvchancount > num)
- (void)pcm_setvchans(d, PCMDIR_PLAY, num, -1);
- else if (num > 0 && d->pvchancount == 0)
- (void)pcm_setvchans(d, PCMDIR_PLAY, 1, -1);
-
- if (num >= 0 && d->rvchancount > num)
- (void)pcm_setvchans(d, PCMDIR_REC, num, -1);
- else if (num > 0 && d->rvchancount == 0)
- (void)pcm_setvchans(d, PCMDIR_REC, 1, -1);
-}
-
static int
sysctl_hw_snd_default_unit(SYSCTL_HANDLER_ARGS)
{
@@ -334,256 +114,42 @@ sysctl_hw_snd_default_unit(SYSCTL_HANDLER_ARGS)
unit = snd_unit;
error = sysctl_handle_int(oidp, &unit, 0, req);
if (error == 0 && req->newptr != NULL) {
+ bus_topo_lock();
d = devclass_get_softc(pcm_devclass, unit);
- if (!PCM_REGISTERED(d) || CHN_EMPTY(d, channels.pcm))
+ if (!PCM_REGISTERED(d) || CHN_EMPTY(d, channels.pcm)) {
+ bus_topo_unlock();
return EINVAL;
+ }
snd_unit = unit;
snd_unit_auto = 0;
+ bus_topo_unlock();
}
return (error);
}
/* XXX: do we need a way to let the user change the default unit? */
SYSCTL_PROC(_hw_snd, OID_AUTO, default_unit,
- CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_ANYBODY | CTLFLAG_NEEDGIANT, 0,
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_ANYBODY | CTLFLAG_MPSAFE, 0,
sizeof(int), sysctl_hw_snd_default_unit, "I",
"default sound device");
-static int
-sysctl_hw_snd_maxautovchans(SYSCTL_HANDLER_ARGS)
-{
- struct snddev_info *d;
- int i, v, error;
-
- v = snd_maxautovchans;
- error = sysctl_handle_int(oidp, &v, 0, req);
- if (error == 0 && req->newptr != NULL) {
- if (v < 0)
- v = 0;
- if (v > SND_MAXVCHANS)
- v = SND_MAXVCHANS;
- snd_maxautovchans = v;
- for (i = 0; pcm_devclass != NULL &&
- i < devclass_get_maxunit(pcm_devclass); i++) {
- d = devclass_get_softc(pcm_devclass, i);
- if (!PCM_REGISTERED(d))
- continue;
- PCM_ACQUIRE_QUICK(d);
- pcm_setmaxautovchans(d, v);
- PCM_RELEASE_QUICK(d);
- }
- }
- return (error);
-}
-SYSCTL_PROC(_hw_snd, OID_AUTO, maxautovchans,
- CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_NEEDGIANT, 0, sizeof(int),
- sysctl_hw_snd_maxautovchans, "I",
- "maximum virtual channel");
-
-struct pcm_channel *
-pcm_chn_create(struct snddev_info *d, struct pcm_channel *parent, kobj_class_t cls, int dir, int num, void *devinfo)
-{
- struct pcm_channel *ch;
- int direction, err, rpnum, *pnum, max;
- int type, unit;
- char *dirs, *devname, buf[CHN_NAMELEN];
-
- PCM_BUSYASSERT(d);
- PCM_LOCKASSERT(d);
- KASSERT(num >= -1, ("invalid num=%d", num));
-
- switch (dir) {
- case PCMDIR_PLAY:
- dirs = "play";
- direction = PCMDIR_PLAY;
- pnum = &d->playcount;
- type = SND_DEV_DSPHW_PLAY;
- max = SND_MAXHWCHAN;
- break;
- case PCMDIR_PLAY_VIRTUAL:
- dirs = "virtual_play";
- direction = PCMDIR_PLAY;
- pnum = &d->pvchancount;
- type = SND_DEV_DSPHW_VPLAY;
- max = SND_MAXVCHANS;
- break;
- case PCMDIR_REC:
- dirs = "record";
- direction = PCMDIR_REC;
- pnum = &d->reccount;
- type = SND_DEV_DSPHW_REC;
- max = SND_MAXHWCHAN;
- break;
- case PCMDIR_REC_VIRTUAL:
- dirs = "virtual_record";
- direction = PCMDIR_REC;
- pnum = &d->rvchancount;
- type = SND_DEV_DSPHW_VREC;
- max = SND_MAXVCHANS;
- break;
- default:
- return (NULL);
- }
-
- unit = (num == -1) ? 0 : num;
-
- if (*pnum >= max || unit >= max)
- return (NULL);
-
- rpnum = 0;
-
- CHN_FOREACH(ch, d, channels.pcm) {
- if (ch->type != type)
- continue;
- if (unit == ch->unit && num != -1) {
- device_printf(d->dev,
- "channel num=%d allocated!\n", unit);
- return (NULL);
- }
- unit++;
- if (unit >= max) {
- device_printf(d->dev,
- "chan=%d > %d\n", unit, max);
- return (NULL);
- }
- rpnum++;
- }
-
- if (*pnum != rpnum) {
- device_printf(d->dev,
- "%s(): WARNING: pnum screwed : dirs=%s pnum=%d rpnum=%d\n",
- __func__, dirs, *pnum, rpnum);
- return (NULL);
- }
-
- PCM_UNLOCK(d);
- ch = malloc(sizeof(*ch), M_DEVBUF, M_WAITOK | M_ZERO);
- ch->methods = kobj_create(cls, M_DEVBUF, M_WAITOK | M_ZERO);
- ch->type = type;
- ch->unit = unit;
- ch->pid = -1;
- strlcpy(ch->comm, CHN_COMM_UNUSED, sizeof(ch->comm));
- ch->parentsnddev = d;
- ch->parentchannel = parent;
- ch->dev = d->dev;
- ch->trigger = PCMTRIG_STOP;
- devname = dsp_unit2name(buf, sizeof(buf), ch);
- if (devname == NULL) {
- device_printf(d->dev, "Failed to query device name");
- kobj_delete(ch->methods, M_DEVBUF);
- free(ch, M_DEVBUF);
- return (NULL);
- }
- snprintf(ch->name, sizeof(ch->name), "%s:%s:%s",
- device_get_nameunit(ch->dev), dirs, devname);
-
- err = chn_init(ch, devinfo, dir, direction);
- PCM_LOCK(d);
- if (err) {
- device_printf(d->dev, "chn_init(%s) failed: err = %d\n",
- ch->name, err);
- kobj_delete(ch->methods, M_DEVBUF);
- free(ch, M_DEVBUF);
- return (NULL);
- }
-
- return (ch);
-}
-
-int
-pcm_chn_add(struct snddev_info *d, struct pcm_channel *ch)
-{
- PCM_BUSYASSERT(d);
- PCM_LOCKASSERT(d);
- KASSERT(ch != NULL && (ch->direction == PCMDIR_PLAY ||
- ch->direction == PCMDIR_REC), ("Invalid pcm channel"));
-
- CHN_INSERT_SORT_ASCEND(d, ch, channels.pcm);
-
- switch (ch->type) {
- case SND_DEV_DSPHW_PLAY:
- d->playcount++;
- break;
- case SND_DEV_DSPHW_VPLAY:
- d->pvchancount++;
- break;
- case SND_DEV_DSPHW_REC:
- d->reccount++;
- break;
- case SND_DEV_DSPHW_VREC:
- d->rvchancount++;
- break;
- default:
- break;
- }
-
- return (0);
-}
-
-int
-pcm_chn_remove(struct snddev_info *d, struct pcm_channel *ch)
-{
- struct pcm_channel *tmp;
-
- PCM_BUSYASSERT(d);
- PCM_LOCKASSERT(d);
-
- tmp = NULL;
-
- CHN_FOREACH(tmp, d, channels.pcm) {
- if (tmp == ch)
- break;
- }
-
- if (tmp != ch)
- return (EINVAL);
-
- CHN_REMOVE(d, ch, channels.pcm);
-
- switch (ch->type) {
- case SND_DEV_DSPHW_PLAY:
- d->playcount--;
- break;
- case SND_DEV_DSPHW_VPLAY:
- d->pvchancount--;
- break;
- case SND_DEV_DSPHW_REC:
- d->reccount--;
- break;
- case SND_DEV_DSPHW_VREC:
- d->rvchancount--;
- break;
- default:
- break;
- }
-
- return (0);
-}
-
int
pcm_addchan(device_t dev, int dir, kobj_class_t cls, void *devinfo)
{
struct snddev_info *d = device_get_softc(dev);
struct pcm_channel *ch;
- int err;
-
- PCM_BUSYASSERT(d);
+ int err = 0;
PCM_LOCK(d);
- ch = pcm_chn_create(d, NULL, cls, dir, -1, devinfo);
+ PCM_WAIT(d);
+ PCM_ACQUIRE(d);
+ ch = chn_init(d, NULL, cls, dir, devinfo);
if (!ch) {
- device_printf(d->dev, "pcm_chn_create(%s, %d, %p) failed\n",
+ device_printf(d->dev, "chn_init(%s, %d, %p) failed\n",
cls->name, dir, devinfo);
- PCM_UNLOCK(d);
- return (ENODEV);
+ err = ENODEV;
}
-
- err = pcm_chn_add(d, ch);
+ PCM_RELEASE(d);
PCM_UNLOCK(d);
- if (err) {
- device_printf(d->dev, "pcm_chn_add(%s) failed, err=%d\n",
- ch->name, err);
- chn_kill(ch);
- }
return (err);
}
@@ -592,46 +158,53 @@ static void
pcm_killchans(struct snddev_info *d)
{
struct pcm_channel *ch;
- int error;
- bool found;
+ bool again;
PCM_BUSYASSERT(d);
- do {
- found = false;
+ KASSERT(!PCM_REGISTERED(d), ("%s(): still registered\n", __func__));
+
+ for (;;) {
+ again = false;
+ /* Make sure all channels are stopped. */
CHN_FOREACH(ch, d, channels.pcm) {
CHN_LOCK(ch);
- /*
- * Make sure no channel has went to sleep in the
- * meantime.
- */
- chn_shutdown(ch);
- /*
- * We have to give a thread sleeping in chn_sleep() a
- * chance to observe that the channel is dead.
- */
- if ((ch->flags & CHN_F_SLEEPING) == 0) {
- found = true;
+ if (ch->inprog == 0 && ch->sleeping == 0 &&
+ CHN_STOPPED(ch)) {
CHN_UNLOCK(ch);
- break;
+ continue;
}
+ chn_shutdown(ch);
+ if (ch->direction == PCMDIR_PLAY)
+ chn_flush(ch);
+ else
+ chn_abort(ch);
CHN_UNLOCK(ch);
+ again = true;
}
-
/*
- * All channels are still sleeping. Sleep for a bit and try
- * again to see if any of them is awake now.
+ * Some channels are still active. Sleep for a bit and try
+ * again.
*/
- if (!found) {
- pause_sbt("pcmkillchans", SBT_1MS * 5, 0, 0);
- continue;
- }
+ if (again)
+ pause_sbt("pcmkillchans", mstosbt(5), 0, 0);
+ else
+ break;
+ }
- PCM_LOCK(d);
- error = pcm_chn_remove(d, ch);
- PCM_UNLOCK(d);
- if (error == 0)
- chn_kill(ch);
- } while (!CHN_EMPTY(d, channels.pcm));
+ /* All channels are finally dead. */
+ while (!CHN_EMPTY(d, channels.pcm)) {
+ ch = CHN_FIRST(d, channels.pcm);
+ chn_kill(ch);
+ }
+
+ if (d->p_unr != NULL)
+ delete_unrhdr(d->p_unr);
+ if (d->vp_unr != NULL)
+ delete_unrhdr(d->vp_unr);
+ if (d->r_unr != NULL)
+ delete_unrhdr(d->r_unr);
+ if (d->vr_unr != NULL)
+ delete_unrhdr(d->vr_unr);
}
static int
@@ -642,6 +215,7 @@ pcm_best_unit(int old)
best = -1;
bestprio = -100;
+ bus_topo_lock();
for (i = 0; pcm_devclass != NULL &&
i < devclass_get_maxunit(pcm_devclass); i++) {
d = devclass_get_softc(pcm_devclass, i);
@@ -657,53 +231,9 @@ pcm_best_unit(int old)
bestprio = prio;
}
}
- return (best);
-}
+ bus_topo_unlock();
-int
-pcm_setstatus(device_t dev, char *str)
-{
- struct snddev_info *d = device_get_softc(dev);
-
- /* should only be called once */
- if (d->flags & SD_F_REGISTERED)
- return (EINVAL);
-
- PCM_BUSYASSERT(d);
-
- if (d->playcount == 0 || d->reccount == 0)
- d->flags |= SD_F_SIMPLEX;
-
- if (d->playcount > 0 || d->reccount > 0)
- d->flags |= SD_F_AUTOVCHAN;
-
- pcm_setmaxautovchans(d, snd_maxautovchans);
-
- strlcpy(d->status, str, SND_STATUSLEN);
-
- PCM_LOCK(d);
-
- /* Done, we're ready.. */
- d->flags |= SD_F_REGISTERED;
-
- PCM_RELEASE(d);
-
- PCM_UNLOCK(d);
-
- /*
- * Create all sysctls once SD_F_REGISTERED is set else
- * tunable sysctls won't work:
- */
- pcm_sysinit(dev);
-
- if (snd_unit_auto < 0)
- snd_unit_auto = (snd_unit < 0) ? 1 : 0;
- if (snd_unit < 0 || snd_unit_auto > 1)
- snd_unit = device_get_unit(dev);
- else if (snd_unit_auto == 1)
- snd_unit = pcm_best_unit(snd_unit);
-
- return (0);
+ return (best);
}
uint32_t
@@ -797,75 +327,43 @@ sysctl_dev_pcm_bitperfect(SYSCTL_HANDLER_ARGS)
return (err);
}
-static u_int8_t
-pcm_mode_init(struct snddev_info *d)
+static int
+sysctl_dev_pcm_mode(SYSCTL_HANDLER_ARGS)
{
- u_int8_t mode = 0;
+ struct snddev_info *d;
+ int mode = 0;
+
+ d = oidp->oid_arg1;
+ if (!PCM_REGISTERED(d))
+ return (ENODEV);
+ PCM_LOCK(d);
if (d->playcount > 0)
mode |= PCM_MODE_PLAY;
if (d->reccount > 0)
mode |= PCM_MODE_REC;
if (d->mixer_dev != NULL)
mode |= PCM_MODE_MIXER;
+ PCM_UNLOCK(d);
- return (mode);
+ return (sysctl_handle_int(oidp, &mode, 0, req));
}
-static void
-pcm_sysinit(device_t dev)
-{
- struct snddev_info *d = device_get_softc(dev);
- u_int8_t mode;
-
- mode = pcm_mode_init(d);
-
- /* XXX: a user should be able to set this with a control tool, the
- sysadmin then needs min+max sysctls for this */
- SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "buffersize", CTLFLAG_RD, &d->bufsz, 0, "allocated buffer size");
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
- "bitperfect", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, d,
- sizeof(d), sysctl_dev_pcm_bitperfect, "I",
- "bit-perfect playback/recording (0=disable, 1=enable)");
- SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "mode", CTLFLAG_RD, NULL, mode,
- "mode (1=mixer, 2=play, 4=rec. The values are OR'ed if more than "
- "one mode is supported)");
- if (d->flags & SD_F_AUTOVCHAN)
- vchan_initsys(dev);
- if (d->flags & SD_F_EQ)
- feeder_eq_initsys(dev);
-}
-
-int
-pcm_register(device_t dev, void *devinfo, int numplay, int numrec)
+/*
+ * Basic initialization so that drivers can use pcm_addchan() before
+ * pcm_register().
+ */
+void
+pcm_init(device_t dev, void *devinfo)
{
struct snddev_info *d;
int i;
- if (pcm_veto_load) {
- device_printf(dev, "disabled due to an error while initialising: %d\n", pcm_veto_load);
-
- return EINVAL;
- }
-
d = device_get_softc(dev);
d->dev = dev;
d->lock = snd_mtxcreate(device_get_nameunit(dev), "sound cdev");
cv_init(&d->cv, device_get_nameunit(dev));
- PCM_ACQUIRE_QUICK(d);
-#if 0
- /*
- * d->flags should be cleared by the allocator of the softc.
- * We cannot clear this field here because several devices set
- * this flag before calling pcm_register().
- */
- d->flags = 0;
-#endif
+
i = 0;
if (resource_int_value(device_get_name(dev), device_get_unit(dev),
"vpc", &i) != 0 || i != 0)
@@ -884,15 +382,42 @@ pcm_register(device_t dev, void *devinfo, int numplay, int numrec)
d->pvchanformat = 0;
d->rvchanrate = 0;
d->rvchanformat = 0;
+ d->p_unr = new_unrhdr(0, INT_MAX, NULL);
+ d->vp_unr = new_unrhdr(0, INT_MAX, NULL);
+ d->r_unr = new_unrhdr(0, INT_MAX, NULL);
+ d->vr_unr = new_unrhdr(0, INT_MAX, NULL);
CHN_INIT(d, channels.pcm);
CHN_INIT(d, channels.pcm.busy);
CHN_INIT(d, channels.pcm.opened);
+ CHN_INIT(d, channels.pcm.primary);
+}
+
+int
+pcm_register(device_t dev, char *str)
+{
+ struct snddev_info *d = device_get_softc(dev);
+
+ /* should only be called once */
+ if (d->flags & SD_F_REGISTERED)
+ return (EINVAL);
- /* XXX This is incorrect, but lets play along for now. */
- if ((numplay == 0 || numrec == 0) && numplay != numrec)
+ if (d->playcount == 0 || d->reccount == 0)
d->flags |= SD_F_SIMPLEX;
+ if (d->playcount > 0)
+ d->flags |= SD_F_PVCHANS;
+ if (d->reccount > 0)
+ d->flags |= SD_F_RVCHANS;
+ strlcpy(d->status, str, SND_STATUSLEN);
+
+ /* Done, we're ready.. */
+ d->flags |= SD_F_REGISTERED;
+
+ /*
+ * Create all sysctls once SD_F_REGISTERED is set else
+ * tunable sysctls won't work:
+ */
sysctl_ctx_init(&d->play_sysctl_ctx);
d->play_sysctl_tree = SYSCTL_ADD_NODE(&d->play_sysctl_ctx,
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "play",
@@ -902,8 +427,33 @@ pcm_register(device_t dev, void *devinfo, int numplay, int numrec)
SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rec",
CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "recording channels node");
- if (numplay > 0 || numrec > 0)
- d->flags |= SD_F_AUTOVCHAN;
+ /* XXX: a user should be able to set this with a control tool, the
+ sysadmin then needs min+max sysctls for this */
+ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
+ OID_AUTO, "buffersize", CTLFLAG_RD, &d->bufsz, 0,
+ "allocated buffer size");
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "bitperfect", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, d,
+ sizeof(d), sysctl_dev_pcm_bitperfect, "I",
+ "bit-perfect playback/recording (0=disable, 1=enable)");
+ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "mode", CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_MPSAFE, d, sizeof(d),
+ sysctl_dev_pcm_mode, "I",
+ "mode (1=mixer, 2=play, 4=rec. The values are OR'ed if more than "
+ "one mode is supported)");
+ vchan_initsys(dev);
+ if (d->flags & SD_F_EQ)
+ feeder_eq_initsys(dev);
+
+ if (snd_unit_auto < 0)
+ snd_unit_auto = (snd_unit < 0) ? 1 : 0;
+ if (snd_unit < 0 || snd_unit_auto > 1)
+ snd_unit = device_get_unit(dev);
+ else if (snd_unit_auto == 1)
+ snd_unit = pcm_best_unit(snd_unit);
sndstat_register(dev, d->status);
@@ -914,7 +464,6 @@ int
pcm_unregister(device_t dev)
{
struct snddev_info *d;
- struct pcm_channel *ch;
d = device_get_softc(dev);
@@ -926,29 +475,14 @@ pcm_unregister(device_t dev)
PCM_LOCK(d);
PCM_WAIT(d);
- d->flags |= SD_F_DETACHING;
+ d->flags &= ~SD_F_REGISTERED;
PCM_ACQUIRE(d);
PCM_UNLOCK(d);
- CHN_FOREACH(ch, d, channels.pcm) {
- CHN_LOCK(ch);
- /*
- * Do not wait for the timeout in chn_read()/chn_write(). Wake
- * up the sleeping thread and kill the channel.
- */
- chn_shutdown(ch);
- chn_abort(ch);
- CHN_UNLOCK(ch);
- }
+ pcm_killchans(d);
- /* remove /dev/sndstat entry first */
- sndstat_unregister(dev);
-
- PCM_LOCK(d);
- d->flags |= SD_F_DYING;
- d->flags &= ~SD_F_REGISTERED;
- PCM_UNLOCK(d);
+ PCM_RELEASE_QUICK(d);
if (d->play_sysctl_tree != NULL) {
sysctl_ctx_free(&d->play_sysctl_ctx);
@@ -959,15 +493,11 @@ pcm_unregister(device_t dev)
d->rec_sysctl_tree = NULL;
}
+ sndstat_unregister(dev);
+ mixer_uninit(dev);
dsp_destroy_dev(dev);
- (void)mixer_uninit(dev);
- pcm_killchans(d);
-
- PCM_LOCK(d);
- PCM_RELEASE(d);
cv_destroy(&d->cv);
- PCM_UNLOCK(d);
snd_mtxfree(d->lock);
if (snd_unit == device_get_unit(dev)) {
@@ -1008,9 +538,8 @@ sound_oss_sysinfo(oss_sysinfo *si)
struct snddev_info *d;
struct pcm_channel *c;
- int i, j, ncards;
-
- ncards = 0;
+ int j;
+ size_t i;
strlcpy(si->product, si_product, sizeof(si->product));
strlcpy(si->version, si_version, sizeof(si->version));
@@ -1019,13 +548,14 @@ sound_oss_sysinfo(oss_sysinfo *si)
/*
* Iterate over PCM devices and their channels, gathering up data
- * for the numaudios, ncards, and openedaudio fields.
+ * for the numaudioengines and openedaudio fields.
*/
- si->numaudios = 0;
+ si->numaudioengines = 0;
bzero((void *)&si->openedaudio, sizeof(si->openedaudio));
j = 0;
+ bus_topo_lock();
for (i = 0; pcm_devclass != NULL &&
i < devclass_get_maxunit(pcm_devclass); i++) {
d = devclass_get_softc(pcm_devclass, i);
@@ -1038,8 +568,7 @@ sound_oss_sysinfo(oss_sysinfo *si)
PCM_UNLOCKASSERT(d);
PCM_LOCK(d);
- si->numaudios += PCM_CHANCOUNT(d);
- ++ncards;
+ si->numaudioengines += PCM_CHANCOUNT(d);
CHN_FOREACH(c, d, channels.pcm) {
CHN_UNLOCKASSERT(c);
@@ -1053,7 +582,7 @@ sound_oss_sysinfo(oss_sysinfo *si)
PCM_UNLOCK(d);
}
- si->numaudioengines = si->numaudios;
+ bus_topo_unlock();
si->numsynths = 0; /* OSSv4 docs: this field is obsolete */
/**
@@ -1069,8 +598,16 @@ sound_oss_sysinfo(oss_sysinfo *si)
*/
si->nummidis = 0;
si->numtimers = 0;
- si->nummixers = mixer_count;
- si->numcards = ncards;
+ /*
+ * Set this to the maximum unit number so that applications will not
+ * break if they try to loop through all mixers and some of them are
+ * not available.
+ */
+ bus_topo_lock();
+ si->nummixers = devclass_get_maxunit(pcm_devclass);
+ si->numcards = devclass_get_maxunit(pcm_devclass);
+ si->numaudios = devclass_get_maxunit(pcm_devclass);
+ bus_topo_unlock();
/* OSSv4 docs: Intended only for test apps; API doesn't
really have much of a concept of cards. Shouldn't be
used by applications. */
@@ -1086,7 +623,7 @@ sound_oss_sysinfo(oss_sysinfo *si)
* Si->filler is a reserved array, but according to docs each
* element should be set to -1.
*/
- for (i = 0; i < sizeof(si->filler)/sizeof(si->filler[0]); i++)
+ for (i = 0; i < nitems(si->filler); i++)
si->filler[i] = -1;
}
@@ -1094,38 +631,89 @@ int
sound_oss_card_info(oss_card_info *si)
{
struct snddev_info *d;
- int i, ncards;
-
- ncards = 0;
+ int i;
+ bus_topo_lock();
for (i = 0; pcm_devclass != NULL &&
i < devclass_get_maxunit(pcm_devclass); i++) {
d = devclass_get_softc(pcm_devclass, i);
- if (!PCM_REGISTERED(d))
- continue;
-
- if (ncards++ != si->card)
+ if (i != si->card)
continue;
- PCM_UNLOCKASSERT(d);
- PCM_LOCK(d);
-
- strlcpy(si->shortname, device_get_nameunit(d->dev),
- sizeof(si->shortname));
- strlcpy(si->longname, device_get_desc(d->dev),
- sizeof(si->longname));
- strlcpy(si->hw_info, d->status, sizeof(si->hw_info));
- si->intr_count = si->ack_count = 0;
-
- PCM_UNLOCK(d);
+ if (!PCM_REGISTERED(d)) {
+ snprintf(si->shortname, sizeof(si->shortname),
+ "pcm%d (n/a)", i);
+ strlcpy(si->longname, "Device unavailable",
+ sizeof(si->longname));
+ si->hw_info[0] = '\0';
+ si->intr_count = si->ack_count = 0;
+ } else {
+ PCM_UNLOCKASSERT(d);
+ PCM_LOCK(d);
+
+ strlcpy(si->shortname, device_get_nameunit(d->dev),
+ sizeof(si->shortname));
+ strlcpy(si->longname, device_get_desc(d->dev),
+ sizeof(si->longname));
+ strlcpy(si->hw_info, d->status, sizeof(si->hw_info));
+ si->intr_count = si->ack_count = 0;
+
+ PCM_UNLOCK(d);
+ }
+ bus_topo_unlock();
return (0);
}
+ bus_topo_unlock();
+
return (ENXIO);
}
/************************************************************************/
+static void
+sound_global_init(void)
+{
+ if (snd_verbose < 0 || snd_verbose > 4)
+ snd_verbose = 1;
+
+ if (snd_unit < 0)
+ snd_unit = -1;
+
+ snd_vchans_enable = true;
+
+ if (chn_latency < CHN_LATENCY_MIN ||
+ chn_latency > CHN_LATENCY_MAX)
+ chn_latency = CHN_LATENCY_DEFAULT;
+
+ if (chn_latency_profile < CHN_LATENCY_PROFILE_MIN ||
+ chn_latency_profile > CHN_LATENCY_PROFILE_MAX)
+ chn_latency_profile = CHN_LATENCY_PROFILE_DEFAULT;
+
+ if (feeder_rate_min < FEEDRATE_MIN ||
+ feeder_rate_max < FEEDRATE_MIN ||
+ feeder_rate_min > FEEDRATE_MAX ||
+ feeder_rate_max > FEEDRATE_MAX ||
+ !(feeder_rate_min < feeder_rate_max)) {
+ feeder_rate_min = FEEDRATE_RATEMIN;
+ feeder_rate_max = FEEDRATE_RATEMAX;
+ }
+
+ if (feeder_rate_round < FEEDRATE_ROUNDHZ_MIN ||
+ feeder_rate_round > FEEDRATE_ROUNDHZ_MAX)
+ feeder_rate_round = FEEDRATE_ROUNDHZ;
+
+ if (bootverbose)
+ printf("%s: snd_unit=%d snd_vchans_enable=%d "
+ "latency=%d "
+ "feeder_rate_min=%d feeder_rate_max=%d "
+ "feeder_rate_round=%d\n",
+ __func__, snd_unit, snd_vchans_enable,
+ chn_latency,
+ feeder_rate_min, feeder_rate_max,
+ feeder_rate_round);
+}
+
static int
sound_modevent(module_t mod, int type, void *data)
{
@@ -1133,20 +721,21 @@ sound_modevent(module_t mod, int type, void *data)
ret = 0;
switch (type) {
- case MOD_LOAD:
- pcm_devclass = devclass_create("pcm");
- pcmsg_unrhdr = new_unrhdr(1, INT_MAX, NULL);
- break;
- case MOD_UNLOAD:
- if (pcmsg_unrhdr != NULL) {
- delete_unrhdr(pcmsg_unrhdr);
- pcmsg_unrhdr = NULL;
- }
- break;
- case MOD_SHUTDOWN:
- break;
- default:
- ret = ENOTSUP;
+ case MOD_LOAD:
+ pcm_devclass = devclass_create("pcm");
+ pcmsg_unrhdr = new_unrhdr(1, INT_MAX, NULL);
+ sound_global_init();
+ break;
+ case MOD_UNLOAD:
+ if (pcmsg_unrhdr != NULL) {
+ delete_unrhdr(pcmsg_unrhdr);
+ pcmsg_unrhdr = NULL;
+ }
+ break;
+ case MOD_SHUTDOWN:
+ break;
+ default:
+ ret = ENOTSUP;
}
return ret;
diff --git a/sys/dev/sound/pcm/sound.h b/sys/dev/sound/pcm/sound.h
index 08aa56cc96f7..6bd435d0ea25 100644
--- a/sys/dev/sound/pcm/sound.h
+++ b/sys/dev/sound/pcm/sound.h
@@ -5,6 +5,10 @@
* Copyright (c) 1999 Cameron Grant <cg@FreeBSD.org>
* Copyright (c) 1995 Hannu Savolainen
* All rights reserved.
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Christos Margiolis
+ * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -83,7 +87,6 @@ struct snd_mixer;
#include <dev/sound/pcm/buffer.h>
#include <dev/sound/pcm/matrix.h>
-#include <dev/sound/pcm/matrix_map.h>
#include <dev/sound/pcm/channel.h>
#include <dev/sound/pcm/feeder.h>
#include <dev/sound/pcm/mixer.h>
@@ -99,213 +102,70 @@ struct snd_mixer;
#define SOUND_PREFVER SOUND_MODVER
#define SOUND_MAXVER SOUND_MODVER
-/*
- * By design, limit possible channels for each direction.
- */
-#define SND_MAXHWCHAN 256
-#define SND_MAXVCHANS SND_MAXHWCHAN
-
#define SD_F_SIMPLEX 0x00000001
-#define SD_F_AUTOVCHAN 0x00000002
+/* unused 0x00000002 */
#define SD_F_SOFTPCMVOL 0x00000004
-#define SD_F_DYING 0x00000008
-#define SD_F_DETACHING 0x00000010
-#define SD_F_BUSY 0x00000020
-#define SD_F_MPSAFE 0x00000040
-#define SD_F_REGISTERED 0x00000080
-#define SD_F_BITPERFECT 0x00000100
-#define SD_F_VPC 0x00000200 /* volume-per-channel */
-#define SD_F_EQ 0x00000400 /* EQ */
-#define SD_F_EQ_ENABLED 0x00000800 /* EQ enabled */
-#define SD_F_EQ_BYPASSED 0x00001000 /* EQ bypassed */
-#define SD_F_EQ_PC 0x00002000 /* EQ per-channel */
+#define SD_F_BUSY 0x00000008
+#define SD_F_MPSAFE 0x00000010
+#define SD_F_REGISTERED 0x00000020
+#define SD_F_BITPERFECT 0x00000040
+#define SD_F_VPC 0x00000080 /* volume-per-channel */
+#define SD_F_EQ 0x00000100 /* EQ */
+#define SD_F_EQ_ENABLED 0x00000200 /* EQ enabled */
+#define SD_F_EQ_BYPASSED 0x00000400 /* EQ bypassed */
+#define SD_F_EQ_PC 0x00000800 /* EQ per-channel */
+#define SD_F_PVCHANS 0x00001000 /* Playback vchans enabled */
+#define SD_F_RVCHANS 0x00002000 /* Recording vchans enabled */
#define SD_F_EQ_DEFAULT (SD_F_EQ | SD_F_EQ_ENABLED)
#define SD_F_EQ_MASK (SD_F_EQ | SD_F_EQ_ENABLED | \
SD_F_EQ_BYPASSED | SD_F_EQ_PC)
-#define SD_F_PRIO_RD 0x10000000
-#define SD_F_PRIO_WR 0x20000000
-#define SD_F_PRIO_SET (SD_F_PRIO_RD | SD_F_PRIO_WR)
-#define SD_F_DIR_SET 0x40000000
-#define SD_F_TRANSIENT 0xf0000000
-
#define SD_F_BITS "\020" \
"\001SIMPLEX" \
- "\002AUTOVCHAN" \
+ /* "\002 */ \
"\003SOFTPCMVOL" \
- "\004DYING" \
- "\005DETACHING" \
- "\006BUSY" \
- "\007MPSAFE" \
- "\010REGISTERED" \
- "\011BITPERFECT" \
- "\012VPC" \
- "\013EQ" \
- "\014EQ_ENABLED" \
- "\015EQ_BYPASSED" \
- "\016EQ_PC" \
- "\035PRIO_RD" \
- "\036PRIO_WR" \
- "\037DIR_SET"
-
-#define PCM_ALIVE(x) ((x) != NULL && (x)->lock != NULL && \
- !((x)->flags & SD_F_DYING))
-#define PCM_REGISTERED(x) (PCM_ALIVE(x) && \
- ((x)->flags & SD_F_REGISTERED))
-
-#define PCM_DETACHING(x) ((x)->flags & SD_F_DETACHING)
-
+ "\004BUSY" \
+ "\005MPSAFE" \
+ "\006REGISTERED" \
+ "\007BITPERFECT" \
+ "\010VPC" \
+ "\011EQ" \
+ "\012EQ_ENABLED" \
+ "\013EQ_BYPASSED" \
+ "\014EQ_PC" \
+ "\015PVCHANS" \
+ "\016RVCHANS"
+
+#define PCM_ALIVE(x) ((x) != NULL && (x)->lock != NULL)
+#define PCM_REGISTERED(x) (PCM_ALIVE(x) && ((x)->flags & SD_F_REGISTERED))
+
+#define PCM_MAXCHANS 10000
#define PCM_CHANCOUNT(d) \
(d->playcount + d->pvchancount + d->reccount + d->rvchancount)
/* many variables should be reduced to a range. Here define a macro */
#define RANGE(var, low, high) (var) = \
(((var)<(low))? (low) : ((var)>(high))? (high) : (var))
-#define DSP_BUFFSIZE (8192)
-
-/* make figuring out what a format is easier. got AFMT_STEREO already */
-#define AFMT_32BIT (AFMT_S32_LE | AFMT_S32_BE | AFMT_U32_LE | AFMT_U32_BE)
-#define AFMT_24BIT (AFMT_S24_LE | AFMT_S24_BE | AFMT_U24_LE | AFMT_U24_BE)
-#define AFMT_16BIT (AFMT_S16_LE | AFMT_S16_BE | AFMT_U16_LE | AFMT_U16_BE)
-#define AFMT_G711 (AFMT_MU_LAW | AFMT_A_LAW)
-#define AFMT_8BIT (AFMT_G711 | AFMT_U8 | AFMT_S8)
-#define AFMT_SIGNED (AFMT_S32_LE | AFMT_S32_BE | AFMT_S24_LE | AFMT_S24_BE | \
- AFMT_S16_LE | AFMT_S16_BE | AFMT_S8)
-#define AFMT_BIGENDIAN (AFMT_S32_BE | AFMT_U32_BE | AFMT_S24_BE | AFMT_U24_BE | \
- AFMT_S16_BE | AFMT_U16_BE)
-
-#define AFMT_CONVERTIBLE (AFMT_8BIT | AFMT_16BIT | AFMT_24BIT | \
- AFMT_32BIT)
-
-/* Supported vchan mixing formats */
-#define AFMT_VCHAN (AFMT_CONVERTIBLE & ~AFMT_G711)
-
-#define AFMT_PASSTHROUGH AFMT_AC3
-#define AFMT_PASSTHROUGH_RATE 48000
-#define AFMT_PASSTHROUGH_CHANNEL 2
-#define AFMT_PASSTHROUGH_EXTCHANNEL 0
-
-/*
- * We're simply using unused, contiguous bits from various AFMT_ definitions.
- * ~(0xb00ff7ff)
- */
-#define AFMT_ENCODING_MASK 0xf00fffff
-#define AFMT_CHANNEL_MASK 0x07f00000
-#define AFMT_CHANNEL_SHIFT 20
-#define AFMT_CHANNEL_MAX 0x7f
-#define AFMT_EXTCHANNEL_MASK 0x08000000
-#define AFMT_EXTCHANNEL_SHIFT 27
-#define AFMT_EXTCHANNEL_MAX 1
-
-#define AFMT_ENCODING(v) ((v) & AFMT_ENCODING_MASK)
-
-#define AFMT_EXTCHANNEL(v) (((v) & AFMT_EXTCHANNEL_MASK) >> \
- AFMT_EXTCHANNEL_SHIFT)
-
-#define AFMT_CHANNEL(v) (((v) & AFMT_CHANNEL_MASK) >> \
- AFMT_CHANNEL_SHIFT)
-
-#define AFMT_BIT(v) (((v) & AFMT_32BIT) ? 32 : \
- (((v) & AFMT_24BIT) ? 24 : \
- ((((v) & AFMT_16BIT) || \
- ((v) & AFMT_PASSTHROUGH)) ? 16 : 8)))
-
-#define AFMT_BPS(v) (AFMT_BIT(v) >> 3)
-#define AFMT_ALIGN(v) (AFMT_BPS(v) * AFMT_CHANNEL(v))
-
-#define SND_FORMAT(f, c, e) (AFMT_ENCODING(f) | \
- (((c) << AFMT_CHANNEL_SHIFT) & \
- AFMT_CHANNEL_MASK) | \
- (((e) << AFMT_EXTCHANNEL_SHIFT) & \
- AFMT_EXTCHANNEL_MASK))
-
-#define AFMT_U8_NE AFMT_U8
-#define AFMT_S8_NE AFMT_S8
-
-#define AFMT_SIGNED_NE (AFMT_S8_NE | AFMT_S16_NE | AFMT_S24_NE | AFMT_S32_NE)
-
-#define AFMT_NE (AFMT_SIGNED_NE | AFMT_U8_NE | AFMT_U16_NE | \
- AFMT_U24_NE | AFMT_U32_NE)
-
-/*
- * Minor numbers for the sound driver.
- *
- * Unfortunately Creative called the codec chip of SB as a DSP. For this
- * reason the /dev/dsp is reserved for digitized audio use. There is a
- * device for true DSP processors but it will be called something else.
- * In v3.0 it's /dev/sndproc but this could be a temporary solution.
- */
-
-#define SND_DEV_CTL 0 /* Control port /dev/mixer */
-#define SND_DEV_SEQ 1 /* Sequencer /dev/sequencer */
-#define SND_DEV_MIDIN 2 /* Raw midi access */
-#define SND_DEV_DSP 3 /* Digitized voice /dev/dsp */
-#define SND_DEV_AUDIO 4 /* Sparc compatible /dev/audio */
-#define SND_DEV_DSP16 5 /* Like /dev/dsp but 16 bits/sample */
-#define SND_DEV_STATUS 6 /* /dev/sndstat */
- /* #7 not in use now. */
-#define SND_DEV_SEQ2 8 /* /dev/sequencer, level 2 interface */
-#define SND_DEV_SNDPROC 9 /* /dev/sndproc for programmable devices */
-#define SND_DEV_PSS SND_DEV_SNDPROC /* ? */
-#define SND_DEV_NORESET 10
-
-#define SND_DEV_DSPHW_PLAY 11 /* specific playback channel */
-#define SND_DEV_DSPHW_VPLAY 12 /* specific virtual playback channel */
-#define SND_DEV_DSPHW_REC 13 /* specific record channel */
-#define SND_DEV_DSPHW_VREC 14 /* specific virtual record channel */
-
-#define SND_DEV_DSPHW_CD 15 /* s16le/stereo 44100Hz CD */
-
-/*
- * OSSv4 compatible device. For now, it serve no purpose and
- * the cloning itself will forward the request to ordinary /dev/dsp
- * instead.
- */
-#define SND_DEV_DSP_MMAP 16 /* /dev/dsp_mmap */
-#define SND_DEV_DSP_AC3 17 /* /dev/dsp_ac3 */
-#define SND_DEV_DSP_MULTICH 18 /* /dev/dsp_multich */
-#define SND_DEV_DSP_SPDIFOUT 19 /* /dev/dsp_spdifout */
-#define SND_DEV_DSP_SPDIFIN 20 /* /dev/dsp_spdifin */
#define DSP_DEFAULT_SPEED 8000
-#define ON 1
-#define OFF 0
-
-extern int pcm_veto_load;
extern int snd_unit;
-extern int snd_maxautovchans;
extern int snd_verbose;
extern devclass_t pcm_devclass;
extern struct unrhdr *pcmsg_unrhdr;
-/*
- * some macros for debugging purposes
- * DDB/DEB to enable/disable debugging stuff
- * BVDDB to enable debugging when bootverbose
- */
-#define BVDDB(x) if (bootverbose) x
-
#ifndef DEB
#define DEB(x)
#endif
SYSCTL_DECL(_hw_snd);
-int pcm_setvchans(struct snddev_info *d, int direction, int newcnt, int num);
-int pcm_chnalloc(struct snddev_info *d, struct pcm_channel **ch, int direction,
- pid_t pid, char *comm);
-
-struct pcm_channel *pcm_chn_create(struct snddev_info *d, struct pcm_channel *parent, kobj_class_t cls, int dir, int num, void *devinfo);
-int pcm_chn_add(struct snddev_info *d, struct pcm_channel *ch);
-int pcm_chn_remove(struct snddev_info *d, struct pcm_channel *ch);
-
int pcm_addchan(device_t dev, int dir, kobj_class_t cls, void *devinfo);
unsigned int pcm_getbuffersize(device_t dev, unsigned int minbufsz, unsigned int deflt, unsigned int maxbufsz);
-int pcm_register(device_t dev, void *devinfo, int numplay, int numrec);
+void pcm_init(device_t dev, void *devinfo);
+int pcm_register(device_t dev, char *str);
int pcm_unregister(device_t dev);
-int pcm_setstatus(device_t dev, char *str);
u_int32_t pcm_getflags(device_t dev);
void pcm_setflags(device_t dev, u_int32_t val);
void *pcm_getdevinfo(device_t dev);
@@ -322,6 +182,22 @@ void snd_mtxassert(void *m);
int sndstat_register(device_t dev, char *str);
int sndstat_unregister(device_t dev);
+/* These are the function codes assigned to the children of sound cards. */
+enum {
+ SCF_PCM,
+ SCF_MIDI,
+ SCF_SYNTH,
+};
+
+/*
+ * This is the device information struct, used by a bridge device to pass the
+ * device function code to the children.
+ */
+struct sndcard_func {
+ int func; /* The function code. */
+ void *varinfo; /* Bridge-specific information. */
+};
+
/*
* this is rather kludgey- we need to duplicate these struct def'ns from sound.c
* so that the macro versions of pcm_{,un}lock can dereference them.
@@ -338,6 +214,9 @@ struct snddev_info {
struct {
SLIST_HEAD(, pcm_channel) head;
} opened;
+ struct {
+ SLIST_HEAD(, pcm_channel) head;
+ } primary;
} pcm;
} channels;
unsigned playcount, reccount, pvchancount, rvchancount;
@@ -349,12 +228,16 @@ struct snddev_info {
struct mtx *lock;
struct cdev *mixer_dev;
struct cdev *dsp_dev;
- uint32_t pvchanrate, pvchanformat;
- uint32_t rvchanrate, rvchanformat;
+ uint32_t pvchanrate, pvchanformat, pvchanmode;
+ uint32_t rvchanrate, rvchanformat, rvchanmode;
int32_t eqpreamp;
struct sysctl_ctx_list play_sysctl_ctx, rec_sysctl_ctx;
struct sysctl_oid *play_sysctl_tree, *rec_sysctl_tree;
struct cv cv;
+ struct unrhdr *p_unr;
+ struct unrhdr *vp_unr;
+ struct unrhdr *r_unr;
+ struct unrhdr *vr_unr;
};
void sound_oss_sysinfo(oss_sysinfo *);
@@ -405,15 +288,7 @@ int sound_oss_card_info(oss_card_info *);
__func__, __LINE__); \
if ((x)->flags & SD_F_BUSY) { \
(x)->flags &= ~SD_F_BUSY; \
- if ((x)->cv.cv_waiters != 0) { \
- if ((x)->cv.cv_waiters > 1 && snd_verbose > 3) \
- device_printf((x)->dev, \
- "%s(%d): [PCM RELEASE] " \
- "cv_waiters=%d > 1!\n", \
- __func__, __LINE__, \
- (x)->cv.cv_waiters); \
- cv_broadcast(&(x)->cv); \
- } \
+ cv_broadcast(&(x)->cv); \
} else \
panic("%s(%d): [PCM RELEASE] Releasing non-BUSY cv!", \
__func__, __LINE__); \
@@ -505,8 +380,7 @@ int sound_oss_card_info(oss_card_info *);
("%s(%d): [PCM RELEASE] Releasing non-BUSY cv!", \
__func__, __LINE__)); \
(x)->flags &= ~SD_F_BUSY; \
- if ((x)->cv.cv_waiters != 0) \
- cv_broadcast(&(x)->cv); \
+ cv_broadcast(&(x)->cv); \
} while (0)
/* Quick version, for shorter path. */
@@ -563,4 +437,71 @@ int sound_oss_card_info(oss_card_info *);
#endif /* _KERNEL */
+/* make figuring out what a format is easier. got AFMT_STEREO already */
+#define AFMT_32BIT (AFMT_S32_LE | AFMT_S32_BE | AFMT_U32_LE | AFMT_U32_BE | \
+ AFMT_F32_LE | AFMT_F32_BE)
+#define AFMT_24BIT (AFMT_S24_LE | AFMT_S24_BE | AFMT_U24_LE | AFMT_U24_BE)
+#define AFMT_16BIT (AFMT_S16_LE | AFMT_S16_BE | AFMT_U16_LE | AFMT_U16_BE)
+#define AFMT_G711 (AFMT_MU_LAW | AFMT_A_LAW)
+#define AFMT_8BIT (AFMT_G711 | AFMT_U8 | AFMT_S8)
+#define AFMT_SIGNED (AFMT_S32_LE | AFMT_S32_BE | AFMT_F32_LE | AFMT_F32_BE | \
+ AFMT_S24_LE | AFMT_S24_BE | \
+ AFMT_S16_LE | AFMT_S16_BE | AFMT_S8)
+#define AFMT_BIGENDIAN (AFMT_S32_BE | AFMT_U32_BE | AFMT_F32_BE | \
+ AFMT_S24_BE | AFMT_U24_BE | AFMT_S16_BE | AFMT_U16_BE)
+
+#define AFMT_CONVERTIBLE (AFMT_8BIT | AFMT_16BIT | AFMT_24BIT | \
+ AFMT_32BIT)
+
+/* Supported vchan mixing formats */
+#define AFMT_VCHAN (AFMT_CONVERTIBLE & ~AFMT_G711)
+
+#define AFMT_PASSTHROUGH AFMT_AC3
+#define AFMT_PASSTHROUGH_RATE 48000
+#define AFMT_PASSTHROUGH_CHANNEL 2
+#define AFMT_PASSTHROUGH_EXTCHANNEL 0
+
+/*
+ * We're simply using unused, contiguous bits from various AFMT_ definitions.
+ * ~(0xb00ff7ff)
+ */
+#define AFMT_ENCODING_MASK 0xf00fffff
+#define AFMT_CHANNEL_MASK 0x07f00000
+#define AFMT_CHANNEL_SHIFT 20
+#define AFMT_CHANNEL_MAX 0x7f
+#define AFMT_EXTCHANNEL_MASK 0x08000000
+#define AFMT_EXTCHANNEL_SHIFT 27
+#define AFMT_EXTCHANNEL_MAX 1
+
+#define AFMT_ENCODING(v) ((v) & AFMT_ENCODING_MASK)
+
+#define AFMT_EXTCHANNEL(v) (((v) & AFMT_EXTCHANNEL_MASK) >> \
+ AFMT_EXTCHANNEL_SHIFT)
+
+#define AFMT_CHANNEL(v) (((v) & AFMT_CHANNEL_MASK) >> \
+ AFMT_CHANNEL_SHIFT)
+
+#define AFMT_BIT(v) (((v) & AFMT_32BIT) ? 32 : \
+ (((v) & AFMT_24BIT) ? 24 : \
+ ((((v) & AFMT_16BIT) || \
+ ((v) & AFMT_PASSTHROUGH)) ? 16 : 8)))
+
+#define AFMT_BPS(v) (AFMT_BIT(v) >> 3)
+#define AFMT_ALIGN(v) (AFMT_BPS(v) * AFMT_CHANNEL(v))
+
+#define SND_FORMAT(f, c, e) (AFMT_ENCODING(f) | \
+ (((c) << AFMT_CHANNEL_SHIFT) & \
+ AFMT_CHANNEL_MASK) | \
+ (((e) << AFMT_EXTCHANNEL_SHIFT) & \
+ AFMT_EXTCHANNEL_MASK))
+
+#define AFMT_U8_NE AFMT_U8
+#define AFMT_S8_NE AFMT_S8
+
+#define AFMT_SIGNED_NE (AFMT_S8_NE | AFMT_S16_NE | AFMT_S24_NE | \
+ AFMT_S32_NE | AFMT_F32_NE)
+
+#define AFMT_NE (AFMT_SIGNED_NE | AFMT_U8_NE | AFMT_U16_NE | \
+ AFMT_U24_NE | AFMT_U32_NE)
+
#endif /* _OS_H_ */
diff --git a/sys/dev/sound/pcm/vchan.c b/sys/dev/sound/pcm/vchan.c
index c3bc36d924bd..31a4f7db8d70 100644
--- a/sys/dev/sound/pcm/vchan.c
+++ b/sys/dev/sound/pcm/vchan.c
@@ -4,6 +4,10 @@
* Copyright (c) 2006-2009 Ariff Abdullah <ariff@FreeBSD.org>
* Copyright (c) 2001 Cameron Grant <cg@FreeBSD.org>
* All rights reserved.
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Christos Margiolis
+ * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -57,6 +61,8 @@ struct vchan_info {
int trigger;
};
+bool snd_vchans_enable = true;
+
static void *
vchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b,
struct pcm_channel *c, int dir)
@@ -140,20 +146,19 @@ vchan_trigger(kobj_t obj, void *data, int go)
int ret, otrigger;
info = data;
-
- if (!PCMTRIG_COMMON(go) || go == info->trigger)
- return (0);
-
c = info->channel;
p = c->parentchannel;
- otrigger = info->trigger;
- info->trigger = go;
CHN_LOCKASSERT(c);
+ if (!PCMTRIG_COMMON(go) || go == info->trigger)
+ return (0);
CHN_UNLOCK(c);
CHN_LOCK(p);
+ otrigger = info->trigger;
+ info->trigger = go;
+
switch (go) {
case PCMTRIG_START:
if (otrigger != PCMTRIG_START)
@@ -248,101 +253,63 @@ static kobj_method_t vchan_methods[] = {
};
CHANNEL_DECLARE(vchan);
-static void
-pcm_getparentchannel(struct snddev_info *d,
- struct pcm_channel **wrch, struct pcm_channel **rdch)
-{
- struct pcm_channel **ch, *wch, *rch, *c;
-
- KASSERT(d != NULL, ("%s(): NULL snddev_info", __func__));
-
- PCM_BUSYASSERT(d);
- PCM_UNLOCKASSERT(d);
-
- wch = NULL;
- rch = NULL;
-
- CHN_FOREACH(c, d, channels.pcm) {
- CHN_LOCK(c);
- ch = (c->direction == PCMDIR_PLAY) ? &wch : &rch;
- if (c->flags & CHN_F_VIRTUAL) {
- /* Sanity check */
- if (*ch != NULL && *ch != c->parentchannel) {
- CHN_UNLOCK(c);
- *ch = NULL;
- break;
- }
- } else if (c->flags & CHN_F_HAS_VCHAN) {
- /* No way!! */
- if (*ch != NULL) {
- CHN_UNLOCK(c);
- *ch = NULL;
- break;
- }
- *ch = c;
- }
- CHN_UNLOCK(c);
- }
-
- if (wrch != NULL)
- *wrch = wch;
- if (rdch != NULL)
- *rdch = rch;
-}
-
static int
sysctl_dev_pcm_vchans(SYSCTL_HANDLER_ARGS)
{
struct snddev_info *d;
- int direction, vchancount;
- int err, cnt;
+ int err, enabled, flag;
+ bus_topo_lock();
d = devclass_get_softc(pcm_devclass, VCHAN_SYSCTL_UNIT(oidp->oid_arg1));
- if (!PCM_REGISTERED(d) || !(d->flags & SD_F_AUTOVCHAN))
+ if (!PCM_REGISTERED(d)) {
+ bus_topo_unlock();
return (EINVAL);
+ }
+ bus_topo_unlock();
PCM_LOCK(d);
PCM_WAIT(d);
switch (VCHAN_SYSCTL_DIR(oidp->oid_arg1)) {
case VCHAN_PLAY:
- direction = PCMDIR_PLAY;
- vchancount = d->pvchancount;
- cnt = d->playcount;
+ /* Exit if we do not support this direction. */
+ if (d->playcount < 1) {
+ PCM_UNLOCK(d);
+ return (ENODEV);
+ }
+ flag = SD_F_PVCHANS;
break;
case VCHAN_REC:
- direction = PCMDIR_REC;
- vchancount = d->rvchancount;
- cnt = d->reccount;
+ if (d->reccount < 1) {
+ PCM_UNLOCK(d);
+ return (ENODEV);
+ }
+ flag = SD_F_RVCHANS;
break;
default:
PCM_UNLOCK(d);
return (EINVAL);
- break;
}
- if (cnt < 1) {
- PCM_UNLOCK(d);
- return (ENODEV);
- }
+ enabled = (d->flags & flag) != 0;
PCM_ACQUIRE(d);
PCM_UNLOCK(d);
- cnt = vchancount;
- err = sysctl_handle_int(oidp, &cnt, 0, req);
-
- if (err == 0 && req->newptr != NULL && vchancount != cnt) {
- if (cnt < 0)
- cnt = 0;
- if (cnt > SND_MAXVCHANS)
- cnt = SND_MAXVCHANS;
- err = pcm_setvchans(d, direction, cnt, -1);
+ err = sysctl_handle_int(oidp, &enabled, 0, req);
+ if (err != 0 || req->newptr == NULL) {
+ PCM_RELEASE_QUICK(d);
+ return (err);
}
+ if (enabled <= 0)
+ d->flags &= ~flag;
+ else
+ d->flags |= flag;
+
PCM_RELEASE_QUICK(d);
- return err;
+ return (0);
}
static int
@@ -351,79 +318,81 @@ sysctl_dev_pcm_vchanmode(SYSCTL_HANDLER_ARGS)
struct snddev_info *d;
struct pcm_channel *c;
uint32_t dflags;
- int direction, ret;
+ int *vchanmode, direction, ret;
char dtype[16];
+ bus_topo_lock();
d = devclass_get_softc(pcm_devclass, VCHAN_SYSCTL_UNIT(oidp->oid_arg1));
- if (!PCM_REGISTERED(d) || !(d->flags & SD_F_AUTOVCHAN))
+ if (!PCM_REGISTERED(d)) {
+ bus_topo_unlock();
return (EINVAL);
+ }
+ bus_topo_unlock();
PCM_LOCK(d);
PCM_WAIT(d);
switch (VCHAN_SYSCTL_DIR(oidp->oid_arg1)) {
case VCHAN_PLAY:
+ if ((d->flags & SD_F_PVCHANS) == 0) {
+ PCM_UNLOCK(d);
+ return (ENODEV);
+ }
direction = PCMDIR_PLAY;
+ vchanmode = &d->pvchanmode;
break;
case VCHAN_REC:
+ if ((d->flags & SD_F_RVCHANS) == 0) {
+ PCM_UNLOCK(d);
+ return (ENODEV);
+ }
direction = PCMDIR_REC;
+ vchanmode = &d->rvchanmode;
break;
default:
PCM_UNLOCK(d);
return (EINVAL);
- break;
}
PCM_ACQUIRE(d);
PCM_UNLOCK(d);
- if (direction == PCMDIR_PLAY)
- pcm_getparentchannel(d, &c, NULL);
- else
- pcm_getparentchannel(d, NULL, &c);
-
- if (c == NULL) {
- PCM_RELEASE_QUICK(d);
- return (EINVAL);
- }
-
- KASSERT(direction == c->direction, ("%s(): invalid direction %d/%d",
- __func__, direction, c->direction));
-
- CHN_LOCK(c);
- if (c->flags & CHN_F_VCHAN_PASSTHROUGH)
+ if (*vchanmode & CHN_F_VCHAN_PASSTHROUGH)
strlcpy(dtype, "passthrough", sizeof(dtype));
- else if (c->flags & CHN_F_VCHAN_ADAPTIVE)
+ else if (*vchanmode & CHN_F_VCHAN_ADAPTIVE)
strlcpy(dtype, "adaptive", sizeof(dtype));
else
strlcpy(dtype, "fixed", sizeof(dtype));
- CHN_UNLOCK(c);
ret = sysctl_handle_string(oidp, dtype, sizeof(dtype), req);
- if (ret == 0 && req->newptr != NULL) {
- if (strcasecmp(dtype, "passthrough") == 0 ||
- strcmp(dtype, "1") == 0)
- dflags = CHN_F_VCHAN_PASSTHROUGH;
- else if (strcasecmp(dtype, "adaptive") == 0 ||
- strcmp(dtype, "2") == 0)
- dflags = CHN_F_VCHAN_ADAPTIVE;
- else if (strcasecmp(dtype, "fixed") == 0 ||
- strcmp(dtype, "0") == 0)
- dflags = 0;
- else {
- PCM_RELEASE_QUICK(d);
- return (EINVAL);
- }
+ if (ret != 0 || req->newptr == NULL) {
+ PCM_RELEASE_QUICK(d);
+ return (ret);
+ }
+
+ if (strcasecmp(dtype, "passthrough") == 0 || strcmp(dtype, "1") == 0)
+ dflags = CHN_F_VCHAN_PASSTHROUGH;
+ else if (strcasecmp(dtype, "adaptive") == 0 || strcmp(dtype, "2") == 0)
+ dflags = CHN_F_VCHAN_ADAPTIVE;
+ else if (strcasecmp(dtype, "fixed") == 0 || strcmp(dtype, "0") == 0)
+ dflags = 0;
+ else {
+ PCM_RELEASE_QUICK(d);
+ return (EINVAL);
+ }
+
+ CHN_FOREACH(c, d, channels.pcm.primary) {
CHN_LOCK(c);
- if (dflags == (c->flags & CHN_F_VCHAN_DYNAMIC) ||
+ if (c->direction != direction ||
+ dflags == (c->flags & CHN_F_VCHAN_DYNAMIC) ||
(c->flags & CHN_F_PASSTHROUGH)) {
CHN_UNLOCK(c);
- PCM_RELEASE_QUICK(d);
- return (0);
+ continue;
}
c->flags &= ~CHN_F_VCHAN_DYNAMIC;
c->flags |= dflags;
CHN_UNLOCK(c);
+ *vchanmode = dflags;
}
PCM_RELEASE_QUICK(d);
@@ -444,57 +413,45 @@ sysctl_dev_pcm_vchanrate(SYSCTL_HANDLER_ARGS)
{
struct snddev_info *d;
struct pcm_channel *c, *ch;
- struct pcmchan_caps *caps;
- int *vchanrate, vchancount, direction, ret, newspd, restart;
+ int *vchanrate, direction, ret, newspd, restart;
+ bus_topo_lock();
d = devclass_get_softc(pcm_devclass, VCHAN_SYSCTL_UNIT(oidp->oid_arg1));
- if (!PCM_REGISTERED(d) || !(d->flags & SD_F_AUTOVCHAN))
+ if (!PCM_REGISTERED(d)) {
+ bus_topo_unlock();
return (EINVAL);
+ }
+ bus_topo_unlock();
PCM_LOCK(d);
PCM_WAIT(d);
switch (VCHAN_SYSCTL_DIR(oidp->oid_arg1)) {
case VCHAN_PLAY:
+ if ((d->flags & SD_F_PVCHANS) == 0) {
+ PCM_UNLOCK(d);
+ return (ENODEV);
+ }
direction = PCMDIR_PLAY;
- vchancount = d->pvchancount;
vchanrate = &d->pvchanrate;
break;
case VCHAN_REC:
+ if ((d->flags & SD_F_RVCHANS) == 0) {
+ PCM_UNLOCK(d);
+ return (ENODEV);
+ }
direction = PCMDIR_REC;
- vchancount = d->rvchancount;
vchanrate = &d->rvchanrate;
break;
default:
PCM_UNLOCK(d);
return (EINVAL);
- break;
- }
-
- if (vchancount < 1) {
- PCM_UNLOCK(d);
- return (EINVAL);
}
PCM_ACQUIRE(d);
PCM_UNLOCK(d);
- if (direction == PCMDIR_PLAY)
- pcm_getparentchannel(d, &c, NULL);
- else
- pcm_getparentchannel(d, NULL, &c);
-
- if (c == NULL) {
- PCM_RELEASE_QUICK(d);
- return (EINVAL);
- }
-
- KASSERT(direction == c->direction, ("%s(): invalid direction %d/%d",
- __func__, direction, c->direction));
-
- CHN_LOCK(c);
- newspd = c->speed;
- CHN_UNLOCK(c);
+ newspd = *vchanrate;
ret = sysctl_handle_int(oidp, &newspd, 0, req);
if (ret != 0 || req->newptr == NULL) {
@@ -502,45 +459,43 @@ sysctl_dev_pcm_vchanrate(SYSCTL_HANDLER_ARGS)
return (ret);
}
- if (newspd < 1 || newspd < feeder_rate_min ||
- newspd > feeder_rate_max) {
+ if (newspd < feeder_rate_min || newspd > feeder_rate_max) {
PCM_RELEASE_QUICK(d);
return (EINVAL);
}
- CHN_LOCK(c);
-
- if (newspd != c->speed && VCHAN_ACCESSIBLE(c)) {
- if (CHN_STARTED(c)) {
- chn_abort(c);
- restart = 1;
- } else
- restart = 0;
-
- if (feeder_rate_round) {
- caps = chn_getcaps(c);
- RANGE(newspd, caps->minspeed, caps->maxspeed);
- newspd = CHANNEL_SETSPEED(c->methods,
- c->devinfo, newspd);
+ CHN_FOREACH(c, d, channels.pcm.primary) {
+ CHN_LOCK(c);
+ if (c->direction != direction) {
+ CHN_UNLOCK(c);
+ continue;
}
- ret = chn_reset(c, c->format, newspd);
- if (ret == 0) {
- *vchanrate = c->speed;
- if (restart != 0) {
- CHN_FOREACH(ch, c, children.busy) {
- CHN_LOCK(ch);
- if (VCHAN_SYNC_REQUIRED(ch))
- vchan_sync(ch);
- CHN_UNLOCK(ch);
+ if (newspd != c->speed && VCHAN_ACCESSIBLE(c)) {
+ if (CHN_STARTED(c)) {
+ chn_abort(c);
+ restart = 1;
+ } else
+ restart = 0;
+
+ ret = chn_reset(c, c->format, newspd);
+ if (ret == 0) {
+ if (restart != 0) {
+ CHN_FOREACH(ch, c, children.busy) {
+ CHN_LOCK(ch);
+ if (VCHAN_SYNC_REQUIRED(ch))
+ vchan_sync(ch);
+ CHN_UNLOCK(ch);
+ }
+ c->flags |= CHN_F_DIRTY;
+ ret = chn_start(c, 1);
}
- c->flags |= CHN_F_DIRTY;
- ret = chn_start(c, 1);
}
}
- }
+ *vchanrate = sndbuf_getspd(c->bufsoft);
- CHN_UNLOCK(c);
+ CHN_UNLOCK(c);
+ }
PCM_RELEASE_QUICK(d);
@@ -553,63 +508,50 @@ sysctl_dev_pcm_vchanformat(SYSCTL_HANDLER_ARGS)
struct snddev_info *d;
struct pcm_channel *c, *ch;
uint32_t newfmt;
- int *vchanformat, vchancount, direction, ret, restart;
+ int *vchanformat, direction, ret, restart;
char fmtstr[AFMTSTR_LEN];
+ bus_topo_lock();
d = devclass_get_softc(pcm_devclass, VCHAN_SYSCTL_UNIT(oidp->oid_arg1));
- if (!PCM_REGISTERED(d) || !(d->flags & SD_F_AUTOVCHAN))
+ if (!PCM_REGISTERED(d)) {
+ bus_topo_unlock();
return (EINVAL);
+ }
+ bus_topo_unlock();
PCM_LOCK(d);
PCM_WAIT(d);
switch (VCHAN_SYSCTL_DIR(oidp->oid_arg1)) {
case VCHAN_PLAY:
+ if ((d->flags & SD_F_PVCHANS) == 0) {
+ PCM_UNLOCK(d);
+ return (ENODEV);
+ }
direction = PCMDIR_PLAY;
- vchancount = d->pvchancount;
vchanformat = &d->pvchanformat;
break;
case VCHAN_REC:
+ if ((d->flags & SD_F_RVCHANS) == 0) {
+ PCM_UNLOCK(d);
+ return (ENODEV);
+ }
direction = PCMDIR_REC;
- vchancount = d->rvchancount;
vchanformat = &d->rvchanformat;
break;
default:
PCM_UNLOCK(d);
return (EINVAL);
- break;
- }
-
- if (vchancount < 1) {
- PCM_UNLOCK(d);
- return (EINVAL);
}
PCM_ACQUIRE(d);
PCM_UNLOCK(d);
- if (direction == PCMDIR_PLAY)
- pcm_getparentchannel(d, &c, NULL);
- else
- pcm_getparentchannel(d, NULL, &c);
-
- if (c == NULL) {
- PCM_RELEASE_QUICK(d);
- return (EINVAL);
- }
-
- KASSERT(direction == c->direction, ("%s(): invalid direction %d/%d",
- __func__, direction, c->direction));
-
- CHN_LOCK(c);
-
bzero(fmtstr, sizeof(fmtstr));
- if (snd_afmt2str(c->format, fmtstr, sizeof(fmtstr)) != c->format)
+ if (snd_afmt2str(*vchanformat, fmtstr, sizeof(fmtstr)) != *vchanformat)
strlcpy(fmtstr, "<ERROR>", sizeof(fmtstr));
- CHN_UNLOCK(c);
-
ret = sysctl_handle_string(oidp, fmtstr, sizeof(fmtstr), req);
if (ret != 0 || req->newptr == NULL) {
PCM_RELEASE_QUICK(d);
@@ -622,32 +564,37 @@ sysctl_dev_pcm_vchanformat(SYSCTL_HANDLER_ARGS)
return (EINVAL);
}
- CHN_LOCK(c);
-
- if (newfmt != c->format && VCHAN_ACCESSIBLE(c)) {
- if (CHN_STARTED(c)) {
- chn_abort(c);
- restart = 1;
- } else
- restart = 0;
-
- ret = chn_reset(c, newfmt, c->speed);
- if (ret == 0) {
- *vchanformat = c->format;
- if (restart != 0) {
- CHN_FOREACH(ch, c, children.busy) {
- CHN_LOCK(ch);
- if (VCHAN_SYNC_REQUIRED(ch))
- vchan_sync(ch);
- CHN_UNLOCK(ch);
+ CHN_FOREACH(c, d, channels.pcm.primary) {
+ CHN_LOCK(c);
+ if (c->direction != direction) {
+ CHN_UNLOCK(c);
+ continue;
+ }
+ if (newfmt != c->format && VCHAN_ACCESSIBLE(c)) {
+ if (CHN_STARTED(c)) {
+ chn_abort(c);
+ restart = 1;
+ } else
+ restart = 0;
+
+ ret = chn_reset(c, newfmt, c->speed);
+ if (ret == 0) {
+ if (restart != 0) {
+ CHN_FOREACH(ch, c, children.busy) {
+ CHN_LOCK(ch);
+ if (VCHAN_SYNC_REQUIRED(ch))
+ vchan_sync(ch);
+ CHN_UNLOCK(ch);
+ }
+ c->flags |= CHN_F_DIRTY;
+ ret = chn_start(c, 1);
}
- c->flags |= CHN_F_DIRTY;
- ret = chn_start(c, 1);
}
}
- }
+ *vchanformat = sndbuf_getfmt(c->bufsoft);
- CHN_UNLOCK(c);
+ CHN_UNLOCK(c);
+ }
PCM_RELEASE_QUICK(d);
@@ -662,28 +609,24 @@ sysctl_dev_pcm_vchanformat(SYSCTL_HANDLER_ARGS)
"play.vchanrate" : "rec.vchanrate"
int
-vchan_create(struct pcm_channel *parent, int num)
+vchan_create(struct pcm_channel *parent, struct pcm_channel **child)
{
struct snddev_info *d;
struct pcm_channel *ch;
struct pcmchan_caps *parent_caps;
uint32_t vchanfmt, vchanspd;
- int ret, direction, r, save;
+ int ret, direction;
+ ret = 0;
d = parent->parentsnddev;
PCM_BUSYASSERT(d);
CHN_LOCKASSERT(parent);
- if (!(parent->flags & CHN_F_BUSY))
- return (EBUSY);
-
if (!(parent->direction == PCMDIR_PLAY ||
parent->direction == PCMDIR_REC))
return (EINVAL);
- d = parent->parentsnddev;
-
CHN_UNLOCK(parent);
PCM_LOCK(d);
@@ -698,150 +641,48 @@ vchan_create(struct pcm_channel *parent, int num)
}
/* create a new playback channel */
- ch = pcm_chn_create(d, parent, &vchan_class, direction, num, parent);
+ ch = chn_init(d, parent, &vchan_class, direction, parent);
if (ch == NULL) {
PCM_UNLOCK(d);
CHN_LOCK(parent);
return (ENODEV);
}
-
- /* add us to our grandparent's channel list */
- ret = pcm_chn_add(d, ch);
PCM_UNLOCK(d);
- if (ret != 0) {
- chn_kill(ch);
- CHN_LOCK(parent);
- return (ret);
- }
CHN_LOCK(parent);
- /*
- * Add us to our parent channel's children in reverse order
- * so future destruction will pick the last (biggest number)
- * channel.
- */
- CHN_INSERT_SORT_DESCEND(parent, ch, children);
+ CHN_INSERT_SORT_ASCEND(parent, ch, children);
+
+ *child = ch;
if (parent->flags & CHN_F_HAS_VCHAN)
return (0);
- parent->flags |= CHN_F_HAS_VCHAN;
+ parent->flags |= CHN_F_HAS_VCHAN | CHN_F_BUSY;
parent_caps = chn_getcaps(parent);
- if (parent_caps == NULL)
+ if (parent_caps == NULL) {
ret = EINVAL;
-
- save = 0;
-
- if (ret == 0 && vchanfmt == 0) {
- const char *vfmt;
-
- CHN_UNLOCK(parent);
- r = resource_string_value(device_get_name(parent->dev),
- device_get_unit(parent->dev), VCHAN_FMT_HINT(direction),
- &vfmt);
- CHN_LOCK(parent);
- if (r != 0)
- vfmt = NULL;
- if (vfmt != NULL) {
- vchanfmt = snd_str2afmt(vfmt);
- if (vchanfmt != 0 && !(vchanfmt & AFMT_VCHAN))
- vchanfmt = 0;
- }
- if (vchanfmt == 0)
- vchanfmt = VCHAN_DEFAULT_FORMAT;
- save = 1;
- }
-
- if (ret == 0 && vchanspd == 0) {
- /*
- * This is very sad. Few soundcards advertised as being
- * able to do (insanely) higher/lower speed, but in
- * reality, they simply can't. At least, we give user chance
- * to set sane value via kernel hints or sysctl.
- */
- CHN_UNLOCK(parent);
- r = resource_int_value(device_get_name(parent->dev),
- device_get_unit(parent->dev), VCHAN_SPD_HINT(direction),
- &vchanspd);
- CHN_LOCK(parent);
- if (r != 0) {
- /*
- * No saved value, no hint, NOTHING.
- *
- * Workaround for sb16 running
- * poorly at 45k / 49k.
- */
- switch (parent_caps->maxspeed) {
- case 45000:
- case 49000:
- vchanspd = 44100;
- break;
- default:
- vchanspd = VCHAN_DEFAULT_RATE;
- if (vchanspd > parent_caps->maxspeed)
- vchanspd = parent_caps->maxspeed;
- break;
- }
- if (vchanspd < parent_caps->minspeed)
- vchanspd = parent_caps->minspeed;
- }
- save = 1;
+ goto fail;
}
- if (ret == 0) {
- /*
- * Limit the speed between feeder_rate_min <-> feeder_rate_max.
- */
- if (vchanspd < feeder_rate_min)
- vchanspd = feeder_rate_min;
- if (vchanspd > feeder_rate_max)
- vchanspd = feeder_rate_max;
-
- if (feeder_rate_round) {
- RANGE(vchanspd, parent_caps->minspeed,
- parent_caps->maxspeed);
- vchanspd = CHANNEL_SETSPEED(parent->methods,
- parent->devinfo, vchanspd);
- }
-
- ret = chn_reset(parent, vchanfmt, vchanspd);
- }
-
- if (ret == 0 && save) {
- /*
- * Save new value.
- */
- if (direction == PCMDIR_PLAY_VIRTUAL) {
- d->pvchanformat = parent->format;
- d->pvchanrate = parent->speed;
- } else {
- d->rvchanformat = parent->format;
- d->rvchanrate = parent->speed;
- }
- }
+ if ((ret = chn_reset(parent, vchanfmt, vchanspd)) != 0)
+ goto fail;
/*
* If the parent channel supports digital format,
* enable passthrough mode.
*/
- if (ret == 0 && snd_fmtvalid(AFMT_PASSTHROUGH, parent_caps->fmtlist)) {
+ if (snd_fmtvalid(AFMT_PASSTHROUGH, parent_caps->fmtlist)) {
parent->flags &= ~CHN_F_VCHAN_DYNAMIC;
parent->flags |= CHN_F_VCHAN_PASSTHROUGH;
}
- if (ret != 0) {
- CHN_REMOVE(parent, ch, children);
- parent->flags &= ~CHN_F_HAS_VCHAN;
- CHN_UNLOCK(parent);
- PCM_LOCK(d);
- if (pcm_chn_remove(d, ch) == 0) {
- PCM_UNLOCK(d);
- chn_kill(ch);
- } else
- PCM_UNLOCK(d);
- CHN_LOCK(parent);
- }
+ return (ret);
+
+fail:
+ CHN_LOCK(ch);
+ vchan_destroy(ch);
+ *child = NULL;
return (ret);
}
@@ -850,8 +691,6 @@ int
vchan_destroy(struct pcm_channel *c)
{
struct pcm_channel *parent;
- struct snddev_info *d;
- int ret;
KASSERT(c != NULL && c->parentchannel != NULL &&
c->parentsnddev != NULL, ("%s(): invalid channel=%p",
@@ -859,23 +698,15 @@ vchan_destroy(struct pcm_channel *c)
CHN_LOCKASSERT(c);
- d = c->parentsnddev;
parent = c->parentchannel;
- PCM_BUSYASSERT(d);
+ PCM_BUSYASSERT(c->parentsnddev);
CHN_LOCKASSERT(parent);
CHN_UNLOCK(c);
- if (!(parent->flags & CHN_F_BUSY))
- return (EBUSY);
-
- if (CHN_EMPTY(parent, children))
- return (EINVAL);
-
/* remove us from our parent's children list */
CHN_REMOVE(parent, c, children);
-
if (CHN_EMPTY(parent, children)) {
parent->flags &= ~(CHN_F_BUSY | CHN_F_HAS_VCHAN);
chn_reset(parent, parent->format, parent->speed);
@@ -883,18 +714,12 @@ vchan_destroy(struct pcm_channel *c)
CHN_UNLOCK(parent);
- /* remove us from our grandparent's channel list */
- PCM_LOCK(d);
- ret = pcm_chn_remove(d, c);
- PCM_UNLOCK(d);
-
/* destroy ourselves */
- if (ret == 0)
- chn_kill(c);
+ chn_kill(c);
CHN_LOCK(parent);
- return (ret);
+ return (0);
}
int
@@ -920,20 +745,52 @@ vchan_sync(struct pcm_channel *c)
c->flags |= CHN_F_DIRTY;
#ifdef SND_DEBUG
- if (snd_passthrough_verbose != 0) {
- char *devname, buf[CHN_NAMELEN];
-
- devname = dsp_unit2name(buf, sizeof(buf), c);
- device_printf(c->dev,
- "%s(%s/%s) %s() -> re-sync err=%d\n",
- __func__, (devname != NULL) ? devname : "dspX", c->comm,
- caller, ret);
+ if (snd_passthrough_verbose) {
+ device_printf(c->dev, "%s(%s/%s) %s() -> re-sync err=%d\n",
+ __func__, c->name, c->comm, caller, ret);
}
#endif
return (ret);
}
+static int
+sysctl_hw_snd_vchans_enable(SYSCTL_HANDLER_ARGS)
+{
+ struct snddev_info *d;
+ int i, v, error;
+
+ v = snd_vchans_enable;
+ error = sysctl_handle_int(oidp, &v, 0, req);
+ if (error != 0 || req->newptr == NULL)
+ return (error);
+
+ bus_topo_lock();
+ snd_vchans_enable = v >= 1;
+
+ for (i = 0; pcm_devclass != NULL &&
+ i < devclass_get_maxunit(pcm_devclass); i++) {
+ d = devclass_get_softc(pcm_devclass, i);
+ if (!PCM_REGISTERED(d))
+ continue;
+ PCM_ACQUIRE_QUICK(d);
+ if (snd_vchans_enable) {
+ if (d->playcount > 0)
+ d->flags |= SD_F_PVCHANS;
+ if (d->reccount > 0)
+ d->flags |= SD_F_RVCHANS;
+ } else
+ d->flags &= ~(SD_F_PVCHANS | SD_F_RVCHANS);
+ PCM_RELEASE_QUICK(d);
+ }
+ bus_topo_unlock();
+
+ return (0);
+}
+SYSCTL_PROC(_hw_snd, OID_AUTO, vchans_enable,
+ CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, 0, sizeof(int),
+ sysctl_hw_snd_vchans_enable, "I", "global virtual channel switch");
+
void
vchan_initsys(device_t dev)
{
@@ -948,7 +805,7 @@ vchan_initsys(device_t dev)
SYSCTL_CHILDREN(d->play_sysctl_tree),
OID_AUTO, "vchans", CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
VCHAN_SYSCTL_DATA(unit, PLAY), VCHAN_SYSCTL_DATA_SIZE,
- sysctl_dev_pcm_vchans, "I", "total allocated virtual channel");
+ sysctl_dev_pcm_vchans, "I", "virtual channels enabled");
SYSCTL_ADD_PROC(&d->play_sysctl_ctx,
SYSCTL_CHILDREN(d->play_sysctl_tree),
OID_AUTO, "vchanmode",
@@ -974,7 +831,7 @@ vchan_initsys(device_t dev)
OID_AUTO, "vchans",
CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
VCHAN_SYSCTL_DATA(unit, REC), VCHAN_SYSCTL_DATA_SIZE,
- sysctl_dev_pcm_vchans, "I", "total allocated virtual channel");
+ sysctl_dev_pcm_vchans, "I", "virtual channels enabled");
SYSCTL_ADD_PROC(&d->rec_sysctl_ctx,
SYSCTL_CHILDREN(d->rec_sysctl_tree),
OID_AUTO, "vchanmode",
diff --git a/sys/dev/sound/pcm/vchan.h b/sys/dev/sound/pcm/vchan.h
index e2dcc9761261..8c1de9496ef3 100644
--- a/sys/dev/sound/pcm/vchan.h
+++ b/sys/dev/sound/pcm/vchan.h
@@ -4,6 +4,10 @@
* Copyright (c) 2005-2009 Ariff Abdullah <ariff@FreeBSD.org>
* Copyright (c) 2001 Cameron Grant <cg@FreeBSD.org>
* All rights reserved.
+ * Copyright (c) 2024-2025 The FreeBSD Foundation
+ *
+ * Portions of this software were developed by Christos Margiolis
+ * <christos@FreeBSD.org> under sponsorship from the FreeBSD Foundation.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,7 +34,9 @@
#ifndef _SND_VCHAN_H_
#define _SND_VCHAN_H_
-int vchan_create(struct pcm_channel *, int);
+extern bool snd_vchans_enable;
+
+int vchan_create(struct pcm_channel *, struct pcm_channel **);
int vchan_destroy(struct pcm_channel *);
#ifdef SND_DEBUG
diff --git a/sys/dev/sound/usb/uaudio.c b/sys/dev/sound/usb/uaudio.c
index 2351c2522021..ff9f59fe42ab 100644
--- a/sys/dev/sound/usb/uaudio.c
+++ b/sys/dev/sound/usb/uaudio.c
@@ -90,7 +90,6 @@
#include <dev/sound/pcm/sound.h>
#include <dev/sound/usb/uaudioreg.h>
#include <dev/sound/usb/uaudio.h>
-#include <dev/sound/chip.h>
#include "feeder_if.h"
static int uaudio_default_rate = 0; /* use rate list */
@@ -369,7 +368,6 @@ struct uaudio_softc_child {
};
struct uaudio_softc {
- struct sbuf sc_sndstat;
struct sndcard_func sc_sndcard_func;
struct uaudio_chan sc_rec_chan[UAUDIO_MAX_CHILD];
struct uaudio_chan sc_play_chan[UAUDIO_MAX_CHILD];
@@ -392,7 +390,6 @@ struct uaudio_softc {
uint8_t sc_mixer_iface_index;
uint8_t sc_mixer_iface_no;
uint8_t sc_mixer_chan;
- uint8_t sc_sndstat_valid:1;
uint8_t sc_uq_audio_swap_lr:1;
uint8_t sc_uq_au_inp_async:1;
uint8_t sc_uq_au_no_xu:1;
@@ -1128,7 +1125,7 @@ uaudio_attach(device_t dev)
sc->sc_child[i].mix_info == 0)
continue;
sc->sc_child[i].pcm_device =
- device_add_child(dev, "pcm", -1);
+ device_add_child(dev, "pcm", DEVICE_UNIT_ANY);
if (sc->sc_child[i].pcm_device == NULL) {
DPRINTF("out of memory\n");
@@ -1138,10 +1135,7 @@ uaudio_attach(device_t dev)
&sc->sc_sndcard_func);
}
- if (bus_generic_attach(dev)) {
- DPRINTF("child attach failed\n");
- goto detach;
- }
+ bus_attach_children(dev);
if (uaudio_handle_hid) {
if (uaudio_hid_probe(sc, uaa) == 0) {
@@ -1212,14 +1206,9 @@ uaudio_attach_sub(device_t dev, kobj_class_t mixer_class, kobj_class_t chan_clas
snprintf(status, sizeof(status), "on %s",
device_get_nameunit(device_get_parent(dev)));
- if (pcm_register(dev, sc,
- (sc->sc_play_chan[i].num_alt > 0) ? 1 : 0,
- (sc->sc_rec_chan[i].num_alt > 0) ? 1 : 0)) {
- goto detach;
- }
+ pcm_init(dev, sc);
uaudio_pcm_setflags(dev, SD_F_MPSAFE);
- sc->sc_child[i].pcm_registered = 1;
if (sc->sc_play_chan[i].num_alt > 0) {
sc->sc_play_chan[i].priv_sc = sc;
@@ -1232,7 +1221,9 @@ uaudio_attach_sub(device_t dev, kobj_class_t mixer_class, kobj_class_t chan_clas
pcm_addchan(dev, PCMDIR_REC, chan_class,
&sc->sc_rec_chan[i]);
}
- pcm_setstatus(dev, status);
+ if (pcm_register(dev, status))
+ goto detach;
+ sc->sc_child[i].pcm_registered = 1;
uaudio_mixer_register_sysctl(sc, dev, i);
@@ -1294,8 +1285,6 @@ uaudio_detach(device_t dev)
if (bus_generic_detach(dev) != 0) {
DPRINTF("detach failed!\n");
}
- sbuf_delete(&sc->sc_sndstat);
- sc->sc_sndstat_valid = 0;
umidi_detach(dev);
@@ -2150,15 +2139,6 @@ uaudio_chan_fill_info_sub(struct uaudio_softc *sc, struct usb_device *udev,
if (rate > chan->pcm_cap.maxspeed || chan->pcm_cap.maxspeed == 0)
chan->pcm_cap.maxspeed = rate;
- if (sc->sc_sndstat_valid != 0) {
- sbuf_printf(&sc->sc_sndstat, "\n\t"
- "mode %d.%d:(%s) %dch, %dbit, %s, %dHz",
- curidx, alt_index,
- (ep_dir == UE_DIR_IN) ? "input" : "output",
- channels, p_fmt->bPrecision,
- p_fmt->description, rate);
- }
-
next_ep:
sed.v1 = NULL;
ed1 = NULL;
@@ -2231,9 +2211,6 @@ uaudio_chan_fill_info(struct uaudio_softc *sc, struct usb_device *udev)
if (channels == 0)
channels = channels_max;
- if (sbuf_new(&sc->sc_sndstat, NULL, 4096, SBUF_AUTOEXTEND))
- sc->sc_sndstat_valid = 1;
-
/* try to search for a valid config */
for (x = channels; x; x--) {
@@ -2264,8 +2241,6 @@ uaudio_chan_fill_info(struct uaudio_softc *sc, struct usb_device *udev)
if (x == (channels + 1))
x--;
}
- if (sc->sc_sndstat_valid)
- sbuf_finish(&sc->sc_sndstat);
}
static void
@@ -2706,8 +2681,6 @@ uaudio_chan_init(struct uaudio_chan *ch, struct snd_dbuf *b,
DPRINTF("Worst case buffer is %d bytes\n", (int)buf_size);
ch->buf = malloc(buf_size, M_DEVBUF, M_WAITOK | M_ZERO);
- if (ch->buf == NULL)
- goto error;
if (sndbuf_setup(b, ch->buf, buf_size) != 0)
goto error;
@@ -3275,31 +3248,27 @@ uaudio_mixer_add_ctl_sub(struct uaudio_softc *sc, struct uaudio_mixer_node *mc)
malloc(sizeof(*p_mc_new), M_USBDEV, M_WAITOK);
int ch;
- if (p_mc_new != NULL) {
- memcpy(p_mc_new, mc, sizeof(*p_mc_new));
- p_mc_new->next = sc->sc_mixer_root;
- sc->sc_mixer_root = p_mc_new;
- sc->sc_mixer_count++;
+ memcpy(p_mc_new, mc, sizeof(*p_mc_new));
+ p_mc_new->next = sc->sc_mixer_root;
+ sc->sc_mixer_root = p_mc_new;
+ sc->sc_mixer_count++;
- /* set default value for all channels */
- for (ch = 0; ch < p_mc_new->nchan; ch++) {
- switch (p_mc_new->val_default) {
- case 1:
- /* 50% */
- p_mc_new->wData[ch] = (p_mc_new->maxval + p_mc_new->minval) / 2;
- break;
- case 2:
- /* 100% */
- p_mc_new->wData[ch] = p_mc_new->maxval;
- break;
- default:
- /* 0% */
- p_mc_new->wData[ch] = p_mc_new->minval;
- break;
- }
+ /* set default value for all channels */
+ for (ch = 0; ch < p_mc_new->nchan; ch++) {
+ switch (p_mc_new->val_default) {
+ case 1:
+ /* 50% */
+ p_mc_new->wData[ch] = (p_mc_new->maxval + p_mc_new->minval) / 2;
+ break;
+ case 2:
+ /* 100% */
+ p_mc_new->wData[ch] = p_mc_new->maxval;
+ break;
+ default:
+ /* 0% */
+ p_mc_new->wData[ch] = p_mc_new->minval;
+ break;
}
- } else {
- DPRINTF("out of memory\n");
}
}
diff --git a/sys/dev/sound/usb/uaudio.h b/sys/dev/sound/usb/uaudio.h
index 08144701efe5..54b31a4e7bd2 100644
--- a/sys/dev/sound/usb/uaudio.h
+++ b/sys/dev/sound/usb/uaudio.h
@@ -64,8 +64,4 @@ extern void uaudio_mixer_set(struct uaudio_softc *, struct snd_mixer *,
extern uint32_t uaudio_mixer_setrecsrc(struct uaudio_softc *, struct snd_mixer *,
uint32_t src);
-int uaudio_get_vendor(device_t dev);
-int uaudio_get_product(device_t dev);
-int uaudio_get_release(device_t dev);
-
#endif /* _UAUDIO_H_ */
diff --git a/sys/dev/sound/usb/uaudio_pcm.c b/sys/dev/sound/usb/uaudio_pcm.c
index 9b17cb232907..0b3da9b20440 100644
--- a/sys/dev/sound/usb/uaudio_pcm.c
+++ b/sys/dev/sound/usb/uaudio_pcm.c
@@ -32,7 +32,6 @@
#endif
#include <dev/sound/pcm/sound.h>
-#include <dev/sound/chip.h>
#include <dev/sound/usb/uaudio.h>
#include "mixer_if.h"
diff --git a/sys/dev/spibus/acpi_spibus.c b/sys/dev/spibus/acpi_spibus.c
index 7241ad15bdab..a3280ffa567f 100644
--- a/sys/dev/spibus/acpi_spibus.c
+++ b/sys/dev/spibus/acpi_spibus.c
@@ -302,7 +302,7 @@ acpi_spibus_enumerate_child(ACPI_HANDLE handle, UINT32 level,
if (acpi_spibus_delete_acpi_child(handle) != 0)
return (AE_OK);
- child = BUS_ADD_CHILD(spibus, 0, NULL, -1);
+ child = BUS_ADD_CHILD(spibus, 0, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(spibus, "add child failed\n");
return (AE_OK);
@@ -394,7 +394,7 @@ acpi_spibus_detach(device_t dev)
{
acpi_spibus_set_power_children(dev, ACPI_STATE_D3, false);
- return (spibus_detach(dev));
+ return (bus_generic_detach(dev));
}
static int
@@ -561,6 +561,7 @@ static device_method_t acpi_spibus_methods[] = {
DEVMETHOD(bus_alloc_resource, acpi_spibus_alloc_resource),
#endif
DEVMETHOD(bus_add_child, acpi_spibus_add_child),
+ DEVMETHOD(bus_child_deleted, spibus_child_deleted),
DEVMETHOD(bus_probe_nomatch, acpi_spibus_probe_nomatch),
DEVMETHOD(bus_driver_added, acpi_spibus_driver_added),
DEVMETHOD(bus_child_deleted, acpi_spibus_child_deleted),
diff --git a/sys/dev/spibus/controller/allwinner/aw_spi.c b/sys/dev/spibus/controller/allwinner/aw_spi.c
index 382f44345250..34461ab2ba9c 100644
--- a/sys/dev/spibus/controller/allwinner/aw_spi.c
+++ b/sys/dev/spibus/controller/allwinner/aw_spi.c
@@ -235,9 +235,10 @@ aw_spi_attach(device_t dev)
goto fail;
}
- sc->spibus = device_add_child(dev, "spibus", -1);
+ sc->spibus = device_add_child(dev, "spibus", DEVICE_UNIT_ANY);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
fail:
aw_spi_detach(dev);
@@ -252,8 +253,6 @@ aw_spi_detach(device_t dev)
sc = device_get_softc(dev);
bus_generic_detach(sc->dev);
- if (sc->spibus != NULL)
- device_delete_child(dev, sc->spibus);
if (sc->clk_mod != NULL)
clk_release(sc->clk_mod);
diff --git a/sys/dev/spibus/controller/rockchip/rk_spi.c b/sys/dev/spibus/controller/rockchip/rk_spi.c
index f25ec77ead5b..db650763f6e1 100644
--- a/sys/dev/spibus/controller/rockchip/rk_spi.c
+++ b/sys/dev/spibus/controller/rockchip/rk_spi.c
@@ -332,13 +332,14 @@ rk_spi_attach(device_t dev)
goto fail;
}
- sc->spibus = device_add_child(dev, "spibus", -1);
+ sc->spibus = device_add_child(dev, "spibus", DEVICE_UNIT_ANY);
RK_SPI_WRITE_4(sc, RK_SPI_IMR, 0);
RK_SPI_WRITE_4(sc, RK_SPI_TXFTLR, sc->fifo_size/2 - 1);
RK_SPI_WRITE_4(sc, RK_SPI_RXFTLR, sc->fifo_size/2 - 1);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
fail:
rk_spi_detach(dev);
@@ -353,8 +354,6 @@ rk_spi_detach(device_t dev)
sc = device_get_softc(dev);
bus_generic_detach(sc->dev);
- if (sc->spibus != NULL)
- device_delete_child(dev, sc->spibus);
if (sc->clk_spi != NULL)
clk_release(sc->clk_spi);
diff --git a/sys/dev/spibus/ofw_spibus.c b/sys/dev/spibus/ofw_spibus.c
index 654086d0679b..cbf87b7b356d 100644
--- a/sys/dev/spibus/ofw_spibus.c
+++ b/sys/dev/spibus/ofw_spibus.c
@@ -83,7 +83,7 @@ ofw_spibus_attach(device_t dev)
sc->dev = dev;
- bus_generic_probe(dev);
+ bus_identify_children(dev);
bus_enumerate_hinted_children(dev);
/*
@@ -148,7 +148,7 @@ ofw_spibus_attach(device_t dev)
free(dinfo, M_DEVBUF);
continue;
}
- childdev = device_add_child(dev, NULL, -1);
+ childdev = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
resource_list_init(&dinfo->opd_dinfo.rl);
ofw_bus_intr_to_rl(childdev, child,
@@ -156,7 +156,8 @@ ofw_spibus_attach(device_t dev)
device_set_ivars(childdev, dinfo);
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static device_t
@@ -190,6 +191,12 @@ ofw_spibus_add_child(device_t dev, u_int order, const char *name, int unit)
return (child);
}
+static void
+ofw_spibus_child_deleted(device_t dev, device_t child)
+{
+ free(device_get_ivars(child), M_DEVBUF);
+}
+
static const struct ofw_bus_devinfo *
ofw_spibus_get_devinfo(device_t bus, device_t dev)
{
@@ -216,6 +223,7 @@ static device_method_t ofw_spibus_methods[] = {
/* Bus interface */
DEVMETHOD(bus_child_pnpinfo, ofw_bus_gen_child_pnpinfo),
DEVMETHOD(bus_add_child, ofw_spibus_add_child),
+ DEVMETHOD(bus_child_deleted, ofw_spibus_child_deleted),
DEVMETHOD(bus_get_resource_list, ofw_spibus_get_resource_list),
/* ofw_bus interface */
diff --git a/sys/dev/spibus/spibus.c b/sys/dev/spibus/spibus.c
index 3db3c58b4ef1..a2e2d884851b 100644
--- a/sys/dev/spibus/spibus.c
+++ b/sys/dev/spibus/spibus.c
@@ -57,30 +57,8 @@ spibus_attach(device_t dev)
sc->dev = dev;
bus_enumerate_hinted_children(dev);
- return (bus_generic_attach(dev));
-}
-
-/*
- * Since this is not a self-enumerating bus, and since we always add
- * children in attach, we have to always delete children here.
- */
-int
-spibus_detach(device_t dev)
-{
- return (device_delete_children(dev));
-}
-
-static int
-spibus_suspend(device_t dev)
-{
- return (bus_generic_suspend(dev));
-}
-
-static
-int
-spibus_resume(device_t dev)
-{
- return (bus_generic_resume(dev));
+ bus_attach_children(dev);
+ return (0);
}
static int
@@ -198,6 +176,18 @@ spibus_add_child_common(device_t dev, u_int order, const char *name, int unit,
return (child);
}
+void
+spibus_child_deleted(device_t dev, device_t child)
+{
+ struct spibus_ivar *devi;
+
+ devi = device_get_ivars(child);
+ if (devi == NULL)
+ return;
+ resource_list_free(&devi->rl);
+ free(devi, M_DEVBUF);
+}
+
static device_t
spibus_add_child(device_t dev, u_int order, const char *name, int unit)
{
@@ -244,10 +234,10 @@ static device_method_t spibus_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, spibus_probe),
DEVMETHOD(device_attach, spibus_attach),
- DEVMETHOD(device_detach, spibus_detach),
+ DEVMETHOD(device_detach, bus_generic_detach),
DEVMETHOD(device_shutdown, bus_generic_shutdown),
- DEVMETHOD(device_suspend, spibus_suspend),
- DEVMETHOD(device_resume, spibus_resume),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
/* Bus interface */
DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
@@ -262,6 +252,7 @@ static device_method_t spibus_methods[] = {
DEVMETHOD(bus_get_resource_list, spibus_get_resource_list),
DEVMETHOD(bus_add_child, spibus_add_child),
+ DEVMETHOD(bus_child_deleted, spibus_child_deleted),
DEVMETHOD(bus_print_child, spibus_print_child),
DEVMETHOD(bus_probe_nomatch, spibus_probe_nomatch),
DEVMETHOD(bus_read_ivar, spibus_read_ivar),
diff --git a/sys/dev/spibus/spibusvar.h b/sys/dev/spibus/spibusvar.h
index 338bacd82dcf..25cd380173ad 100644
--- a/sys/dev/spibus/spibusvar.h
+++ b/sys/dev/spibus/spibusvar.h
@@ -77,8 +77,8 @@ extern driver_t spibus_driver;
extern driver_t ofw_spibus_driver;
int spibus_attach(device_t);
-int spibus_detach(device_t);
device_t spibus_add_child_common(device_t, u_int, const char *, int, size_t);
+void spibus_child_deleted(device_t, device_t);
void spibus_probe_nomatch(device_t, device_t);
int spibus_child_location(device_t, device_t, struct sbuf *);
int spibus_read_ivar(device_t, device_t, int, uintptr_t *);
diff --git a/sys/dev/sram/mmio_sram.c b/sys/dev/sram/mmio_sram.c
index c1d024459650..4409386d9ad1 100644
--- a/sys/dev/sram/mmio_sram.c
+++ b/sys/dev/sram/mmio_sram.c
@@ -94,7 +94,7 @@ mmio_sram_attach(device_t dev)
/*
* Allow devices to identify.
*/
- bus_generic_probe(dev);
+ bus_identify_children(dev);
/*
* Now walk the OFW tree and attach top-level devices.
@@ -102,7 +102,8 @@ mmio_sram_attach(device_t dev)
for (node = OF_child(node); node > 0; node = OF_peer(node))
simplebus_add_device(dev, node, 0, NULL, -1, NULL);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static int
diff --git a/sys/dev/ste/if_ste.c b/sys/dev/ste/if_ste.c
index 7c7eb02ccfb7..bf8f6fafec11 100644
--- a/sys/dev/ste/if_ste.c
+++ b/sys/dev/ste/if_ste.c
@@ -905,7 +905,7 @@ ste_attach(device_t dev)
struct ste_softc *sc;
if_t ifp;
uint16_t eaddr[ETHER_ADDR_LEN / 2];
- int error = 0, phy, pmc, prefer_iomap, rid;
+ int error = 0, phy, prefer_iomap, rid;
sc = device_get_softc(dev);
sc->ste_dev = dev;
@@ -987,11 +987,6 @@ ste_attach(device_t dev)
goto fail;
ifp = sc->ste_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
/* Do MII setup. */
phy = MII_PHY_ANY;
@@ -1025,7 +1020,7 @@ ste_attach(device_t dev)
*/
if_setifheaderlen(ifp, sizeof(struct ether_vlan_header));
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
- if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
+ if (pci_has_pm(dev))
if_setcapabilitiesbit(ifp, IFCAP_WOL_MAGIC, 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
#ifdef DEVICE_POLLING
@@ -1079,8 +1074,6 @@ ste_detach(device_t dev)
STE_UNLOCK(sc);
callout_drain(&sc->ste_callout);
}
- if (sc->ste_miibus)
- device_delete_child(dev, sc->ste_miibus);
bus_generic_detach(dev);
if (sc->ste_intrhand)
@@ -1999,21 +1992,9 @@ ste_resume(device_t dev)
{
struct ste_softc *sc;
if_t ifp;
- int pmc;
- uint16_t pmstat;
sc = device_get_softc(dev);
STE_LOCK(sc);
- if (pci_find_cap(sc->ste_dev, PCIY_PMG, &pmc) == 0) {
- /* Disable PME and clear PME status. */
- pmstat = pci_read_config(sc->ste_dev,
- pmc + PCIR_POWER_STATUS, 2);
- if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
- pmstat &= ~PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->ste_dev,
- pmc + PCIR_POWER_STATUS, pmstat, 2);
- }
- }
ifp = sc->ste_ifp;
if ((if_getflags(ifp) & IFF_UP) != 0) {
if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING);
@@ -2102,13 +2083,11 @@ static void
ste_setwol(struct ste_softc *sc)
{
if_t ifp;
- uint16_t pmstat;
uint8_t val;
- int pmc;
STE_LOCK_ASSERT(sc);
- if (pci_find_cap(sc->ste_dev, PCIY_PMG, &pmc) != 0) {
+ if (!pci_has_pm(sc->ste_dev)) {
/* Disable WOL. */
CSR_READ_1(sc, STE_WAKE_EVENT);
CSR_WRITE_1(sc, STE_WAKE_EVENT, 0);
@@ -2123,9 +2102,6 @@ ste_setwol(struct ste_softc *sc)
val |= STE_WAKEEVENT_MAGICPKT_ENB | STE_WAKEEVENT_WAKEONLAN_ENB;
CSR_WRITE_1(sc, STE_WAKE_EVENT, val);
/* Request PME. */
- pmstat = pci_read_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->ste_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
+ pci_enable_pme(sc->ste_dev);
}
diff --git a/sys/dev/stge/if_stge.c b/sys/dev/stge/if_stge.c
index 170d9e4da94c..a9a30332358c 100644
--- a/sys/dev/stge/if_stge.c
+++ b/sys/dev/stge/if_stge.c
@@ -560,12 +560,6 @@ stge_attach(device_t dev)
}
ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(sc->sc_dev, "failed to if_alloc()\n");
- error = ENXIO;
- goto fail;
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -678,10 +672,6 @@ stge_detach(device_t dev)
ether_ifdetach(ifp);
}
- if (sc->sc_miibus != NULL) {
- device_delete_child(dev, sc->sc_miibus);
- sc->sc_miibus = NULL;
- }
bus_generic_detach(dev);
stge_dma_free(sc);
diff --git a/sys/dev/sume/if_sume.c b/sys/dev/sume/if_sume.c
index 8d6223cb54d1..7b2a27135e43 100644
--- a/sys/dev/sume/if_sume.c
+++ b/sys/dev/sume/if_sume.c
@@ -1143,7 +1143,7 @@ check_tx_queues(struct sume_adapter *adapter)
}
}
-static int
+static void
sume_ifp_alloc(struct sume_adapter *adapter, uint32_t port)
{
if_t ifp;
@@ -1151,11 +1151,6 @@ sume_ifp_alloc(struct sume_adapter *adapter, uint32_t port)
M_ZERO | M_WAITOK);
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(adapter->dev, "cannot allocate ifnet\n");
- return (ENOMEM);
- }
-
adapter->ifp[port] = ifp;
if_setsoftc(ifp, nf_priv);
@@ -1181,8 +1176,6 @@ sume_ifp_alloc(struct sume_adapter *adapter, uint32_t port)
ifmedia_set(&nf_priv->media, IFM_ETHER | IFM_10G_SR);
if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
-
- return (0);
}
static void
@@ -1202,16 +1195,11 @@ sume_probe_riffa_buffer(const struct sume_adapter *adapter,
{
struct riffa_chnl_dir **rp;
bus_addr_t hw_addr;
- int error, ch;
+ int ch;
device_t dev = adapter->dev;
- error = ENOMEM;
*p = malloc(SUME_RIFFA_CHANNELS * sizeof(struct riffa_chnl_dir *),
M_SUME, M_ZERO | M_WAITOK);
- if (*p == NULL) {
- device_printf(dev, "malloc(%s) failed.\n", dir);
- return (error);
- }
rp = *p;
/* Allocate the chnl_dir structs themselves. */
@@ -1219,11 +1207,6 @@ sume_probe_riffa_buffer(const struct sume_adapter *adapter,
/* One direction. */
rp[ch] = malloc(sizeof(struct riffa_chnl_dir), M_SUME,
M_ZERO | M_WAITOK);
- if (rp[ch] == NULL) {
- device_printf(dev, "malloc(%s[%d]) riffa_chnl_dir "
- "failed.\n", dir, ch);
- return (error);
- }
int err = bus_dma_tag_create(bus_get_dma_tag(dev),
4, 0,
@@ -1452,11 +1435,8 @@ sume_attach(device_t dev)
goto error;
/* Now do the network interfaces. */
- for (i = 0; i < SUME_NPORTS; i++) {
- error = sume_ifp_alloc(adapter, i);
- if (error != 0)
- goto error;
- }
+ for (i = 0; i < SUME_NPORTS; i++)
+ sume_ifp_alloc(adapter, i);
/* Register stats and register sysctls. */
sume_sysctl_init(adapter);
diff --git a/sys/dev/superio/superio.c b/sys/dev/superio/superio.c
index 35b15ac4d62e..24d40eb7a208 100644
--- a/sys/dev/superio/superio.c
+++ b/sys/dev/superio/superio.c
@@ -37,11 +37,11 @@
#include <sys/proc.h>
#include <sys/rman.h>
#include <sys/sbuf.h>
+#include <sys/stdarg.h>
#include <sys/time.h>
#include <machine/bus.h>
#include <machine/resource.h>
-#include <machine/stdarg.h>
#include <isa/isavar.h>
@@ -615,12 +615,9 @@ superio_detect(device_t dev, bool claim, struct siosc *sc)
if (superio_table[i].descr != NULL) {
device_set_desc(dev, superio_table[i].descr);
} else if (sc->vendor == SUPERIO_VENDOR_ITE) {
- char descr[64];
-
- snprintf(descr, sizeof(descr),
+ device_set_descf(dev,
"ITE IT%4x SuperIO (revision 0x%02x)",
sc->devid, sc->revid);
- device_set_desc_copy(dev, descr);
}
return (0);
}
@@ -636,7 +633,7 @@ superio_identify(driver_t *driver, device_t parent)
* Those could be created via isa hints or if this
* driver is loaded, unloaded and then loaded again.
*/
- if (device_find_child(parent, "superio", -1)) {
+ if (device_find_child(parent, "superio", DEVICE_UNIT_ANY)) {
if (bootverbose)
printf("superio: device(s) already created\n");
return;
@@ -688,7 +685,7 @@ superio_add_known_child(device_t dev, superio_dev_type_t type, uint8_t ldn)
struct superio_devinfo *dinfo;
device_t child;
- child = BUS_ADD_CHILD(dev, 0, NULL, -1);
+ child = BUS_ADD_CHILD(dev, 0, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(dev, "failed to add child for ldn %d, type %s\n",
ldn, devtype_to_str(type));
@@ -720,8 +717,8 @@ superio_attach(device_t dev)
sc->known_devices[i].ldn);
}
- bus_generic_probe(dev);
- bus_generic_attach(dev);
+ bus_identify_children(dev);
+ bus_attach_children(dev);
sc->chardev = make_dev(&superio_cdevsw, device_get_unit(dev),
UID_ROOT, GID_WHEEL, 0600, "superio%d", device_get_unit(dev));
@@ -743,7 +740,6 @@ superio_detach(device_t dev)
return (error);
if (sc->chardev != NULL)
destroy_dev(sc->chardev);
- device_delete_children(dev);
bus_release_resource(dev, SYS_RES_IOPORT, sc->io_rid, sc->io_res);
mtx_destroy(&sc->conf_lock);
return (0);
@@ -771,6 +767,18 @@ superio_add_child(device_t dev, u_int order, const char *name, int unit)
return (child);
}
+static void
+superio_child_deleted(device_t dev, device_t child)
+{
+ struct superio_devinfo *dinfo;
+
+ dinfo = device_get_ivars(child);
+ if (dinfo == NULL)
+ return;
+ resource_list_free(&dinfo->resources);
+ free(dinfo, M_DEVBUF);
+}
+
static int
superio_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
{
@@ -1081,6 +1089,7 @@ static device_method_t superio_methods[] = {
DEVMETHOD(device_resume, bus_generic_resume),
DEVMETHOD(bus_add_child, superio_add_child),
+ DEVMETHOD(bus_child_deleted, superio_child_deleted),
DEVMETHOD(bus_child_detached, superio_child_detached),
DEVMETHOD(bus_child_location, superio_child_location),
DEVMETHOD(bus_child_pnpinfo, superio_child_pnp),
diff --git a/sys/dev/sym/sym_hipd.c b/sys/dev/sym/sym_hipd.c
index fa65d544e17d..b4e5c1075fb4 100644
--- a/sys/dev/sym/sym_hipd.c
+++ b/sys/dev/sym/sym_hipd.c
@@ -3266,7 +3266,7 @@ static void sym_init (hcb_p np, int reason)
* Reinitialize usrwide.
* Prepare sync negotiation according to actual SCSI bus mode.
*/
- for (i=0;i<SYM_CONF_MAX_TARGET;i++) {
+ for (i = 0; i < SYM_CONF_MAX_TARGET; i++) {
tcb_p tp = &np->target[i];
tp->to_reset = 0;
@@ -3715,7 +3715,7 @@ static void sym_log_hard_error(hcb_p np, u_short sist, u_char dstat)
}
printf ("%s: regdump:", sym_name(np));
- for (i=0; i<24;i++)
+ for (i = 0; i < 24; i++)
printf (" %02x", (unsigned)INB_OFF(i));
printf (".\n");
@@ -5527,8 +5527,8 @@ static int sym_show_msg (u_char * msg)
u_char i;
printf ("%x",*msg);
if (*msg==M_EXTENDED) {
- for (i=1;i<8;i++) {
- if (i-1>msg[1]) break;
+ for (i = 1; i < 8; i++) {
+ if (i - 1 > msg[1]) break;
printf ("-%x",msg[i]);
}
return (i+1);
@@ -6744,10 +6744,10 @@ restart_test:
/*
* Wait 'til done (with timeout)
*/
- for (i=0; i<SYM_SNOOP_TIMEOUT; i++)
+ for (i = 0; i < SYM_SNOOP_TIMEOUT; i++)
if (INB(nc_istat) & (INTF|SIP|DIP))
break;
- if (i>=SYM_SNOOP_TIMEOUT) {
+ if (i >= SYM_SNOOP_TIMEOUT) {
printf ("CACHE TEST FAILED: timeout.\n");
return (0x20);
}
diff --git a/sys/dev/syscon/syscon_generic.c b/sys/dev/syscon/syscon_generic.c
index be3d093915cc..6f039a680644 100644
--- a/sys/dev/syscon/syscon_generic.c
+++ b/sys/dev/syscon/syscon_generic.c
@@ -194,7 +194,8 @@ syscon_generic_attach(device_t dev)
sc->simplebus_attached = true;
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static int
diff --git a/sys/dev/syscons/syscons.c b/sys/dev/syscons/syscons.c
index 0985f03e1cb7..e9d664f51a1f 100644
--- a/sys/dev/syscons/syscons.c
+++ b/sys/dev/syscons/syscons.c
@@ -58,6 +58,7 @@
#include <sys/serial.h>
#include <sys/signalvar.h>
#include <sys/smp.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#include <sys/tty.h>
#include <sys/power.h>
@@ -72,7 +73,6 @@
#include <machine/psl.h>
#include <machine/frame.h>
#endif
-#include <machine/stdarg.h>
#if defined(__amd64__) || defined(__i386__)
#include <machine/vmparam.h>
@@ -1310,7 +1310,7 @@ sctty_ioctl(struct tty *tp, u_long cmd, caddr_t data, struct thread *td)
if (i == sc->cur_scp->index)
return 0;
error =
- tsleep(VTY_WCHAN(sc, i), (PZERO + 1) | PCATCH, "waitvt", 0);
+ tsleep(VTY_WCHAN(sc, i), PZERO | PCATCH, "waitvt", 0);
return error;
case VT_GETACTIVE: /* get active vty # */
@@ -3571,7 +3571,7 @@ sc_alloc_scr_buffer(scr_stat *scp, int wait, int discard)
old = scp->vtb;
sc_vtb_init(&new, VTB_MEMORY, scp->xsize, scp->ysize, NULL, wait);
if (!discard && (old.vtb_flags & VTB_VALID)) {
- /* retain the current cursor position and buffer contants */
+ /* retain the current cursor position and buffer constants */
scp->cursor_oldpos = scp->cursor_pos;
/*
* This works only if the old buffer has the same size as or
diff --git a/sys/dev/syscons/sysmouse.c b/sys/dev/syscons/sysmouse.c
index 0e38070d613c..05008c50b950 100644
--- a/sys/dev/syscons/sysmouse.c
+++ b/sys/dev/syscons/sysmouse.c
@@ -94,7 +94,15 @@ smdev_evdev_write(int x, int y, int z, int buttons)
evdev_push_event(sysmouse_evdev, EV_REL, REL_X, x);
evdev_push_event(sysmouse_evdev, EV_REL, REL_Y, y);
switch (evdev_sysmouse_t_axis) {
- case EVDEV_SYSMOUSE_T_AXIS_PSM:
+ case EVDEV_SYSMOUSE_T_AXIS_WSP: /* 3 */
+ if (buttons & (1 << 5)) {
+ evdev_push_rel(sysmouse_evdev, REL_HWHEEL, z);
+ buttons &= ~(1 << 5);
+ } else {
+ evdev_push_rel(sysmouse_evdev, REL_WHEEL, -z);
+ }
+ break;
+ case EVDEV_SYSMOUSE_T_AXIS_PSM: /* 2 */
switch (z) {
case 1:
case -1:
@@ -106,14 +114,14 @@ smdev_evdev_write(int x, int y, int z, int buttons)
break;
}
break;
- case EVDEV_SYSMOUSE_T_AXIS_UMS:
+ case EVDEV_SYSMOUSE_T_AXIS_UMS: /* 1 */
if (buttons & (1 << 6))
evdev_push_rel(sysmouse_evdev, REL_HWHEEL, 1);
else if (buttons & (1 << 5))
evdev_push_rel(sysmouse_evdev, REL_HWHEEL, -1);
buttons &= ~((1 << 5)|(1 << 6));
/* PASSTHROUGH */
- case EVDEV_SYSMOUSE_T_AXIS_NONE:
+ case EVDEV_SYSMOUSE_T_AXIS_NONE: /* 0 */
default:
evdev_push_rel(sysmouse_evdev, REL_WHEEL, -z);
}
diff --git a/sys/dev/thunderbolt/hcm.c b/sys/dev/thunderbolt/hcm.c
new file mode 100644
index 000000000000..b8f703fc3b52
--- /dev/null
+++ b/sys/dev/thunderbolt/hcm.c
@@ -0,0 +1,223 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* Host Configuration Manager (HCM) for USB4 and later TB3 */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/hcm_var.h>
+
+static void hcm_cfg_task(void *, int);
+
+int
+hcm_attach(struct nhi_softc *nsc)
+{
+ struct hcm_softc *hcm;
+
+ tb_debug(nsc, DBG_HCM|DBG_EXTRA, "hcm_attach called\n");
+
+ hcm = malloc(sizeof(struct hcm_softc), M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (hcm == NULL) {
+ tb_debug(nsc, DBG_HCM, "Cannot allocate hcm object\n");
+ return (ENOMEM);
+ }
+
+ hcm->dev = nsc->dev;
+ hcm->nsc = nsc;
+ nsc->hcm = hcm;
+
+ hcm->taskqueue = taskqueue_create("hcm_event", M_NOWAIT,
+ taskqueue_thread_enqueue, &hcm->taskqueue);
+ if (hcm->taskqueue == NULL)
+ return (ENOMEM);
+ taskqueue_start_threads(&hcm->taskqueue, 1, PI_DISK, "tbhcm%d_tq",
+ device_get_unit(nsc->dev));
+ TASK_INIT(&hcm->cfg_task, 0, hcm_cfg_task, hcm);
+
+ return (0);
+}
+
+int
+hcm_detach(struct nhi_softc *nsc)
+{
+ struct hcm_softc *hcm;
+
+ hcm = nsc->hcm;
+ if (hcm->taskqueue)
+ taskqueue_free(hcm->taskqueue);
+
+ return (0);
+}
+
+int
+hcm_router_discover(struct hcm_softc *hcm)
+{
+
+ taskqueue_enqueue(hcm->taskqueue, &hcm->cfg_task);
+
+ return (0);
+}
+
+static void
+hcm_cfg_task(void *arg, int pending)
+{
+ struct hcm_softc *hcm;
+ struct router_softc *rsc;
+ struct router_cfg_cap cap;
+ struct tb_cfg_router *cfg;
+ struct tb_cfg_adapter *adp;
+ struct tb_cfg_cap_lane *lane;
+ uint32_t *buf;
+ uint8_t *u;
+ u_int error, i, offset;
+
+ hcm = (struct hcm_softc *)arg;
+
+ tb_debug(hcm, DBG_HCM|DBG_EXTRA, "hcm_cfg_task called\n");
+
+ buf = malloc(8 * 4, M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (buf == NULL) {
+ tb_debug(hcm, DBG_HCM, "Cannot alloc memory for discovery\n");
+ return;
+ }
+
+ rsc = hcm->nsc->root_rsc;
+ error = tb_config_router_read(rsc, 0, 5, buf);
+ if (error != 0) {
+ free(buf, M_NHI);
+ return;
+ }
+
+ cfg = (struct tb_cfg_router *)buf;
+
+ cap.space = TB_CFG_CS_ROUTER;
+ cap.adap = 0;
+ cap.next_cap = GET_ROUTER_CS_NEXT_CAP(cfg);
+ while (cap.next_cap != 0) {
+ error = tb_config_next_cap(rsc, &cap);
+ if (error != 0)
+ break;
+
+ if ((cap.cap_id == TB_CFG_CAP_VSEC) && (cap.vsc_len == 0)) {
+ tb_debug(hcm, DBG_HCM, "Router Cap= %d, vsec= %d, "
+ "len= %d, next_cap= %d\n", cap.cap_id,
+ cap.vsc_id, cap.vsec_len, cap.next_cap);
+ } else if (cap.cap_id == TB_CFG_CAP_VSC) {
+ tb_debug(hcm, DBG_HCM, "Router cap= %d, vsc= %d, "
+ "len= %d, next_cap= %d\n", cap.cap_id,
+ cap.vsc_id, cap.vsc_len, cap.next_cap);
+ } else
+ tb_debug(hcm, DBG_HCM, "Router cap= %d, "
+ "next_cap= %d\n", cap.cap_id, cap.next_cap);
+ if (cap.next_cap > TB_CFG_CAP_OFFSET_MAX)
+ cap.next_cap = 0;
+ }
+
+ u = (uint8_t *)buf;
+ error = tb_config_get_lc_uuid(rsc, u);
+ if (error == 0) {
+ tb_debug(hcm, DBG_HCM, "Router LC UUID: %02x%02x%02x%02x-"
+ "%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\n",
+ u[0], u[1], u[2], u[3], u[4], u[5], u[6], u[7], u[8],
+ u[9], u[10], u[11], u[12], u[13], u[14], u[15]);
+ } else
+ tb_printf(hcm, "Error finding LC registers: %d\n", error);
+
+ for (i = 1; i <= rsc->max_adap; i++) {
+ error = tb_config_adapter_read(rsc, i, 0, 8, buf);
+ if (error != 0) {
+ tb_debug(hcm, DBG_HCM, "Adapter %d: no adapter\n", i);
+ continue;
+ }
+ adp = (struct tb_cfg_adapter *)buf;
+ tb_debug(hcm, DBG_HCM, "Adapter %d: %s, max_counters= 0x%08x,"
+ " adapter_num= %d\n", i,
+ tb_get_string(GET_ADP_CS_TYPE(adp), tb_adapter_type),
+ GET_ADP_CS_MAX_COUNTERS(adp), GET_ADP_CS_ADP_NUM(adp));
+
+ if (GET_ADP_CS_TYPE(adp) != ADP_CS2_LANE)
+ continue;
+
+ error = tb_config_find_adapter_cap(rsc, i, TB_CFG_CAP_LANE,
+ &offset);
+ if (error)
+ continue;
+
+ error = tb_config_adapter_read(rsc, i, offset, 3, buf);
+ if (error)
+ continue;
+
+ lane = (struct tb_cfg_cap_lane *)buf;
+ tb_debug(hcm, DBG_HCM, "Lane Adapter State= %s %s\n",
+ tb_get_string((lane->current_lws & CAP_LANE_STATE_MASK),
+ tb_adapter_state), (lane->targ_lwp & CAP_LANE_DISABLE) ?
+ "disabled" : "enabled");
+
+ if ((lane->current_lws & CAP_LANE_STATE_MASK) ==
+ CAP_LANE_STATE_CL0) {
+ tb_route_t newr;
+
+ newr.hi = rsc->route.hi;
+ newr.lo = rsc->route.lo | (i << rsc->depth * 8);
+
+ tb_printf(hcm, "want to add router at 0x%08x%08x\n",
+ newr.hi, newr.lo);
+ error = tb_router_attach(rsc, newr);
+ tb_printf(rsc, "tb_router_attach returned %d\n", error);
+ }
+ }
+
+ free(buf, M_THUNDERBOLT);
+}
diff --git a/sys/dev/sound/chip.h b/sys/dev/thunderbolt/hcm_var.h
index bb40d2809a00..a11c8e9b6a92 100644
--- a/sys/dev/sound/chip.h
+++ b/sys/dev/thunderbolt/hcm_var.h
@@ -1,7 +1,7 @@
/*-
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 1999 Seigo Tanimura
+ * Copyright (c) 2022 Scott Long
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -24,24 +24,24 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
+ *
+ * $FreeBSD$
*/
-/*
- * These are the function codes assigned to the children of
- * sound cards.
- */
-enum {
- SCF_PCM,
- SCF_MIDI,
- SCF_SYNTH,
-};
+#ifndef _HCM_VAR_H
+#define _HCM_VAR_H
-/*
- * This is the device information struct, used by
- * a bridge device to pass the device function code
- * to the children.
- */
-struct sndcard_func {
- int func; /* The function code. */
- void *varinfo; /* Bridge-specific information. */
+struct hcm_softc {
+ u_int debug;
+ device_t dev;
+ struct nhi_softc *nsc;
+
+ struct task cfg_task;
+ struct taskqueue *taskqueue;
};
+
+int hcm_attach(struct nhi_softc *);
+int hcm_detach(struct nhi_softc *);
+int hcm_router_discover(struct hcm_softc *);
+
+#endif /* _HCM_VAR_H */
diff --git a/sys/dev/thunderbolt/nhi.c b/sys/dev/thunderbolt/nhi.c
new file mode 100644
index 000000000000..205e69c16253
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi.c
@@ -0,0 +1,1170 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* PCIe interface for Thunderbolt Native Host Interface (nhi) */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include <dev/thunderbolt/hcm_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/tb_dev.h>
+#include "tb_if.h"
+
+static int nhi_alloc_ring(struct nhi_softc *, int, int, int,
+ struct nhi_ring_pair **);
+static void nhi_free_ring(struct nhi_ring_pair *);
+static void nhi_free_rings(struct nhi_softc *);
+static int nhi_configure_ring(struct nhi_softc *, struct nhi_ring_pair *);
+static int nhi_activate_ring(struct nhi_ring_pair *);
+static int nhi_deactivate_ring(struct nhi_ring_pair *);
+static int nhi_alloc_ring0(struct nhi_softc *);
+static void nhi_free_ring0(struct nhi_softc *);
+static void nhi_fill_rx_ring(struct nhi_softc *, struct nhi_ring_pair *);
+static int nhi_init(struct nhi_softc *);
+static void nhi_post_init(void *);
+static int nhi_tx_enqueue(struct nhi_ring_pair *, struct nhi_cmd_frame *);
+static int nhi_setup_sysctl(struct nhi_softc *);
+
+SYSCTL_NODE(_hw, OID_AUTO, nhi, CTLFLAG_RD, 0, "NHI Driver Parameters");
+
+MALLOC_DEFINE(M_NHI, "nhi", "nhi driver memory");
+
+#ifndef NHI_DEBUG_LEVEL
+#define NHI_DEBUG_LEVEL 0
+#endif
+
+/* 0 = default, 1 = force-on, 2 = force-off */
+#ifndef NHI_FORCE_HCM
+#define NHI_FORCE_HCM 0
+#endif
+
+void
+nhi_get_tunables(struct nhi_softc *sc)
+{
+ devclass_t dc;
+ device_t ufp;
+ char tmpstr[80], oid[80];
+ u_int val;
+
+ /* Set local defaults */
+ sc->debug = NHI_DEBUG_LEVEL;
+ sc->max_ring_count = NHI_DEFAULT_NUM_RINGS;
+ sc->force_hcm = NHI_FORCE_HCM;
+
+ /* Inherit setting from the upstream thunderbolt switch node */
+ val = TB_GET_DEBUG(sc->dev, &sc->debug);
+ if (val != 0) {
+ dc = devclass_find("tbolt");
+ if (dc != NULL) {
+ ufp = devclass_get_device(dc, device_get_unit(sc->dev));
+ if (ufp != NULL)
+ TB_GET_DEBUG(ufp, &sc->debug);
+ } else {
+ if (TUNABLE_STR_FETCH("hw.tbolt.debug_level", oid,
+ 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+ }
+ }
+
+ /*
+ * Grab global variables. Allow nhi debug flags to override
+ * thunderbolt debug flags, if present.
+ */
+ bzero(oid, 80);
+ if (TUNABLE_STR_FETCH("hw.nhi.debug_level", oid, 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+ if (TUNABLE_INT_FETCH("hw.nhi.max_rings", &val) != 0) {
+ val = min(val, NHI_MAX_NUM_RINGS);
+ sc->max_ring_count = max(val, 1);
+ }
+ if (TUNABLE_INT_FETCH("hw.nhi.force_hcm", &val) != 0)
+ sc->force_hcm = val;
+
+ /* Grab instance variables */
+ bzero(oid, 80);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.nhi.%d.debug_level",
+ device_get_unit(sc->dev));
+ if (TUNABLE_STR_FETCH(tmpstr, oid, 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.nhi.%d.max_rings",
+ device_get_unit(sc->dev));
+ if (TUNABLE_INT_FETCH(tmpstr, &val) != 0) {
+ val = min(val, NHI_MAX_NUM_RINGS);
+ sc->max_ring_count = max(val, 1);
+ }
+ snprintf(tmpstr, sizeof(tmpstr), "dev, nhi.%d.force_hcm",
+ device_get_unit(sc->dev));
+ if (TUNABLE_INT_FETCH(tmpstr, &val) != 0)
+ sc->force_hcm = val;
+
+ return;
+}
+
+static void
+nhi_configure_caps(struct nhi_softc *sc)
+{
+
+ if (NHI_IS_USB4(sc) || (sc->force_hcm == NHI_FORCE_HCM_ON))
+ sc->caps |= NHI_CAP_HCM;
+ if (sc->force_hcm == NHI_FORCE_HCM_OFF)
+ sc->caps &= ~NHI_CAP_HCM;
+}
+
+struct nhi_cmd_frame *
+nhi_alloc_tx_frame(struct nhi_ring_pair *r)
+{
+ struct nhi_cmd_frame *cmd;
+
+ mtx_lock(&r->mtx);
+ cmd = nhi_alloc_tx_frame_locked(r);
+ mtx_unlock(&r->mtx);
+
+ return (cmd);
+}
+
+void
+nhi_free_tx_frame(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ mtx_lock(&r->mtx);
+ nhi_free_tx_frame_locked(r, cmd);
+ mtx_unlock(&r->mtx);
+}
+
+/*
+ * Push a command and data dword through the mailbox to the firmware.
+ * Response is either good, error, or timeout. Commands that return data
+ * do so by reading OUTMAILDATA.
+ */
+int
+nhi_inmail_cmd(struct nhi_softc *sc, uint32_t cmd, uint32_t data)
+{
+ uint32_t val;
+ u_int error, timeout;
+
+ mtx_lock(&sc->nhi_mtx);
+ /*
+ * XXX Should a defer/reschedule happen here, or is it not worth
+ * worrying about?
+ */
+ if (sc->hwflags & NHI_MBOX_BUSY) {
+ mtx_unlock(&sc->nhi_mtx);
+ tb_debug(sc, DBG_MBOX, "Driver busy with mailbox\n");
+ return (EBUSY);
+ }
+ sc->hwflags |= NHI_MBOX_BUSY;
+
+ val = nhi_read_reg(sc, TBT_INMAILCMD);
+ tb_debug(sc, DBG_MBOX|DBG_FULL, "Reading INMAILCMD= 0x%08x\n", val);
+ if (val & INMAILCMD_ERROR)
+ tb_debug(sc, DBG_MBOX, "Error already set in INMAILCMD\n");
+ if (val & INMAILCMD_OPREQ) {
+ mtx_unlock(&sc->nhi_mtx);
+ tb_debug(sc, DBG_MBOX,
+ "INMAILCMD request already in progress\n");
+ return (EBUSY);
+ }
+
+ nhi_write_reg(sc, TBT_INMAILDATA, data);
+ nhi_write_reg(sc, TBT_INMAILCMD, cmd | INMAILCMD_OPREQ);
+
+ /* Poll at 1s intervals */
+ timeout = NHI_MAILBOX_TIMEOUT;
+ while (timeout--) {
+ DELAY(1000000);
+ val = nhi_read_reg(sc, TBT_INMAILCMD);
+ tb_debug(sc, DBG_MBOX|DBG_EXTRA,
+ "Polling INMAILCMD= 0x%08x\n", val);
+ if ((val & INMAILCMD_OPREQ) == 0)
+ break;
+ }
+ sc->hwflags &= ~NHI_MBOX_BUSY;
+ mtx_unlock(&sc->nhi_mtx);
+
+ error = 0;
+ if (val & INMAILCMD_OPREQ) {
+ tb_printf(sc, "Timeout waiting for mailbox\n");
+ error = ETIMEDOUT;
+ }
+ if (val & INMAILCMD_ERROR) {
+ tb_printf(sc, "Firmware reports error in mailbox\n");
+ error = EINVAL;
+ }
+
+ return (error);
+}
+
+/*
+ * Pull command status and data from the firmware mailbox.
+ */
+int
+nhi_outmail_cmd(struct nhi_softc *sc, uint32_t *val)
+{
+
+ if (val == NULL)
+ return (EINVAL);
+ *val = nhi_read_reg(sc, TBT_OUTMAILCMD);
+ return (0);
+}
+
+int
+nhi_attach(struct nhi_softc *sc)
+{
+ uint32_t val;
+ int error = 0;
+
+ if ((error = nhi_setup_sysctl(sc)) != 0)
+ return (error);
+
+ mtx_init(&sc->nhi_mtx, "nhimtx", "NHI Control Mutex", MTX_DEF);
+
+ nhi_configure_caps(sc);
+
+ /*
+ * Get the number of TX/RX paths. This sizes some of the register
+ * arrays during allocation and initialization. USB4 spec says that
+ * the max is 21. Alpine Ridge appears to default to 12.
+ */
+ val = GET_HOST_CAPS_PATHS(nhi_read_reg(sc, NHI_HOST_CAPS));
+ tb_debug(sc, DBG_INIT|DBG_NOISY, "Total Paths= %d\n", val);
+ if ((val == 0) || (val > 21) || ((NHI_IS_AR(sc) && val != 12))) {
+ tb_printf(sc, "WARN: unexpected number of paths: %d\n", val);
+ /* return (ENXIO); */
+ }
+ sc->path_count = val;
+
+ SLIST_INIT(&sc->ring_list);
+
+ error = nhi_pci_configure_interrupts(sc);
+ if (error == 0)
+ error = nhi_alloc_ring0(sc);
+ if (error == 0) {
+ nhi_configure_ring(sc, sc->ring0);
+ nhi_activate_ring(sc->ring0);
+ nhi_fill_rx_ring(sc, sc->ring0);
+ }
+
+ if (error == 0)
+ error = tbdev_add_interface(sc);
+
+ if ((error == 0) && (NHI_USE_ICM(sc)))
+ tb_printf(sc, "WARN: device uses an internal connection manager\n");
+ if ((error == 0) && (NHI_USE_HCM(sc)))
+ ;
+ error = hcm_attach(sc);
+
+ if (error == 0)
+ error = nhi_init(sc);
+
+ return (error);
+}
+
+int
+nhi_detach(struct nhi_softc *sc)
+{
+
+ if (NHI_USE_HCM(sc))
+ hcm_detach(sc);
+
+ if (sc->root_rsc != NULL)
+ tb_router_detach(sc->root_rsc);
+
+ tbdev_remove_interface(sc);
+
+ nhi_pci_disable_interrupts(sc);
+
+ nhi_free_ring0(sc);
+
+ /* XXX Should the rings be marked as !VALID in the descriptors? */
+ nhi_free_rings(sc);
+
+ mtx_destroy(&sc->nhi_mtx);
+
+ return (0);
+}
+
+static void
+nhi_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ bus_addr_t *addr;
+
+ addr = arg;
+ if (error == 0 && nsegs == 1) {
+ *addr = segs[0].ds_addr;
+ } else
+ *addr = 0;
+}
+
+static int
+nhi_alloc_ring(struct nhi_softc *sc, int ringnum, int tx_depth, int rx_depth,
+ struct nhi_ring_pair **rp)
+{
+ bus_dma_template_t t;
+ bus_addr_t ring_busaddr;
+ struct nhi_ring_pair *r;
+ int ring_size, error;
+ u_int rxring_len, txring_len;
+ char *ring;
+
+ if (ringnum >= sc->max_ring_count) {
+ tb_debug(sc, DBG_INIT, "Tried to allocate ring number %d\n",
+ ringnum);
+ return (EINVAL);
+ }
+
+ /* Allocate the ring structure and the RX ring tacker together. */
+ rxring_len = rx_depth * sizeof(void *);
+ txring_len = tx_depth * sizeof(void *);
+ r = malloc(sizeof(struct nhi_ring_pair) + rxring_len + txring_len,
+ M_NHI, M_NOWAIT|M_ZERO);
+ if (r == NULL) {
+ tb_printf(sc, "ERROR: Cannot allocate ring memory\n");
+ return (ENOMEM);
+ }
+
+ r->sc = sc;
+ TAILQ_INIT(&r->tx_head);
+ TAILQ_INIT(&r->rx_head);
+ r->ring_num = ringnum;
+ r->tx_ring_depth = tx_depth;
+ r->tx_ring_mask = tx_depth - 1;
+ r->rx_ring_depth = rx_depth;
+ r->rx_ring_mask = rx_depth - 1;
+ r->rx_pici_reg = NHI_RX_RING_PICI + ringnum * 16;
+ r->tx_pici_reg = NHI_TX_RING_PICI + ringnum * 16;
+ r->rx_cmd_ring = (struct nhi_cmd_frame **)((uint8_t *)r + sizeof (*r));
+ r->tx_cmd_ring = (struct nhi_cmd_frame **)((uint8_t *)r->rx_cmd_ring +
+ rxring_len);
+
+ snprintf(r->name, NHI_RING_NAMELEN, "nhiring%d\n", ringnum);
+ mtx_init(&r->mtx, r->name, "NHI Ring Lock", MTX_DEF);
+ tb_debug(sc, DBG_INIT | DBG_FULL, "Allocated ring context at %p, "
+ "mutex %p\n", r, &r->mtx);
+
+ /* Allocate the RX and TX buffer descriptor rings */
+ ring_size = sizeof(struct nhi_tx_buffer_desc) * r->tx_ring_depth;
+ ring_size += sizeof(struct nhi_rx_buffer_desc) * r->rx_ring_depth;
+ tb_debug(sc, DBG_INIT | DBG_FULL, "Ring %d ring_size= %d\n",
+ ringnum, ring_size);
+
+ bus_dma_template_init(&t, sc->parent_dmat);
+ t.alignment = 4;
+ t.maxsize = t.maxsegsize = ring_size;
+ t.nsegments = 1;
+ if ((error = bus_dma_template_tag(&t, &r->ring_dmat)) != 0) {
+ tb_printf(sc, "Cannot allocate ring %d DMA tag: %d\n",
+ ringnum, error);
+ return (ENOMEM);
+ }
+ if (bus_dmamem_alloc(r->ring_dmat, (void **)&ring, BUS_DMA_NOWAIT,
+ &r->ring_map)) {
+ tb_printf(sc, "Cannot allocate ring memory\n");
+ return (ENOMEM);
+ }
+ bzero(ring, ring_size);
+ bus_dmamap_load(r->ring_dmat, r->ring_map, ring, ring_size,
+ nhi_memaddr_cb, &ring_busaddr, 0);
+
+ r->ring = ring;
+
+ r->tx_ring = (union nhi_ring_desc *)(ring);
+ r->tx_ring_busaddr = ring_busaddr;
+ ring += sizeof(struct nhi_tx_buffer_desc) * r->tx_ring_depth;
+ ring_busaddr += sizeof(struct nhi_tx_buffer_desc) * r->tx_ring_depth;
+
+ r->rx_ring = (union nhi_ring_desc *)(ring);
+ r->rx_ring_busaddr = ring_busaddr;
+
+ tb_debug(sc, DBG_INIT | DBG_EXTRA, "Ring %d: RX %p [0x%jx] "
+ "TX %p [0x%jx]\n", ringnum, r->tx_ring, r->tx_ring_busaddr,
+ r->rx_ring, r->rx_ring_busaddr);
+
+ *rp = r;
+ return (0);
+}
+
+static void
+nhi_free_ring(struct nhi_ring_pair *r)
+{
+
+ tb_debug(r->sc, DBG_INIT, "Freeing ring %d resources\n", r->ring_num);
+ nhi_deactivate_ring(r);
+
+ if (r->tx_ring_busaddr != 0) {
+ bus_dmamap_unload(r->ring_dmat, r->ring_map);
+ r->tx_ring_busaddr = 0;
+ }
+ if (r->ring != NULL) {
+ bus_dmamem_free(r->ring_dmat, r->ring, r->ring_map);
+ r->ring = NULL;
+ }
+ if (r->ring_dmat != NULL) {
+ bus_dma_tag_destroy(r->ring_dmat);
+ r->ring_dmat = NULL;
+ }
+ mtx_destroy(&r->mtx);
+}
+
+static void
+nhi_free_rings(struct nhi_softc *sc)
+{
+ struct nhi_ring_pair *r;
+
+ while ((r = SLIST_FIRST(&sc->ring_list)) != NULL) {
+ nhi_free_ring(r);
+ mtx_lock(&sc->nhi_mtx);
+ SLIST_REMOVE_HEAD(&sc->ring_list, ring_link);
+ mtx_unlock(&sc->nhi_mtx);
+ free(r, M_NHI);
+ }
+
+ return;
+}
+
+static int
+nhi_configure_ring(struct nhi_softc *sc, struct nhi_ring_pair *ring)
+{
+ bus_addr_t busaddr;
+ uint32_t val;
+ int idx;
+
+ idx = ring->ring_num * 16;
+
+ /* Program the TX ring address and size */
+ busaddr = ring->tx_ring_busaddr;
+ nhi_write_reg(sc, NHI_TX_RING_ADDR_LO + idx, busaddr & 0xffffffff);
+ nhi_write_reg(sc, NHI_TX_RING_ADDR_HI + idx, busaddr >> 32);
+ nhi_write_reg(sc, NHI_TX_RING_SIZE + idx, ring->tx_ring_depth);
+ nhi_write_reg(sc, NHI_TX_RING_TABLE_TIMESTAMP + idx, 0x0);
+ tb_debug(sc, DBG_INIT, "TX Ring %d TX_RING_SIZE= 0x%x\n",
+ ring->ring_num, ring->tx_ring_depth);
+
+ /* Program the RX ring address and size */
+ busaddr = ring->rx_ring_busaddr;
+ val = (ring->rx_buffer_size << 16) | ring->rx_ring_depth;
+ nhi_write_reg(sc, NHI_RX_RING_ADDR_LO + idx, busaddr & 0xffffffff);
+ nhi_write_reg(sc, NHI_RX_RING_ADDR_HI + idx, busaddr >> 32);
+ nhi_write_reg(sc, NHI_RX_RING_SIZE + idx, val);
+ nhi_write_reg(sc, NHI_RX_RING_TABLE_BASE1 + idx, 0xffffffff);
+ tb_debug(sc, DBG_INIT, "RX Ring %d RX_RING_SIZE= 0x%x\n",
+ ring->ring_num, val);
+
+ return (0);
+}
+
+static int
+nhi_activate_ring(struct nhi_ring_pair *ring)
+{
+ struct nhi_softc *sc = ring->sc;
+ int idx;
+
+ nhi_pci_enable_interrupt(ring);
+
+ idx = ring->ring_num * 32;
+ tb_debug(sc, DBG_INIT, "Activating ring %d at idx %d\n",
+ ring->ring_num, idx);
+ nhi_write_reg(sc, NHI_TX_RING_TABLE_BASE0 + idx,
+ TX_TABLE_RAW | TX_TABLE_VALID);
+ nhi_write_reg(sc, NHI_RX_RING_TABLE_BASE0 + idx,
+ RX_TABLE_RAW | RX_TABLE_VALID);
+
+ return (0);
+}
+
+static int
+nhi_deactivate_ring(struct nhi_ring_pair *r)
+{
+ struct nhi_softc *sc = r->sc;
+ int idx;
+
+ idx = r->ring_num * 32;
+ tb_debug(sc, DBG_INIT, "Deactiving ring %d at idx %d\n",
+ r->ring_num, idx);
+ nhi_write_reg(sc, NHI_TX_RING_TABLE_BASE0 + idx, 0);
+ nhi_write_reg(sc, NHI_RX_RING_TABLE_BASE0 + idx, 0);
+
+ idx = r->ring_num * 16;
+ tb_debug(sc, DBG_INIT, "Setting ring %d sizes to 0\n", r->ring_num);
+ nhi_write_reg(sc, NHI_TX_RING_SIZE + idx, 0);
+ nhi_write_reg(sc, NHI_RX_RING_SIZE + idx, 0);
+
+ return (0);
+}
+
+static int
+nhi_alloc_ring0(struct nhi_softc *sc)
+{
+ bus_addr_t frames_busaddr;
+ bus_dma_template_t t;
+ struct nhi_intr_tracker *trkr;
+ struct nhi_ring_pair *r;
+ struct nhi_cmd_frame *cmd;
+ char *frames;
+ int error, size, i;
+
+ if ((error = nhi_alloc_ring(sc, 0, NHI_RING0_TX_DEPTH,
+ NHI_RING0_RX_DEPTH, &r)) != 0) {
+ tb_printf(sc, "Error allocating control ring\n");
+ return (error);
+ }
+
+ r->rx_buffer_size = NHI_RING0_FRAME_SIZE;/* Control packets are small */
+
+ /* Allocate the RX and TX buffers that are used for Ring0 comms */
+ size = r->tx_ring_depth * NHI_RING0_FRAME_SIZE;
+ size += r->rx_ring_depth * NHI_RING0_FRAME_SIZE;
+
+ bus_dma_template_init(&t, sc->parent_dmat);
+ t.maxsize = t.maxsegsize = size;
+ t.nsegments = 1;
+ if (bus_dma_template_tag(&t, &sc->ring0_dmat)) {
+ tb_printf(sc, "Error allocating control ring buffer tag\n");
+ return (ENOMEM);
+ }
+
+ if (bus_dmamem_alloc(sc->ring0_dmat, (void **)&frames, BUS_DMA_NOWAIT,
+ &sc->ring0_map) != 0) {
+ tb_printf(sc, "Error allocating control ring memory\n");
+ return (ENOMEM);
+ }
+ bzero(frames, size);
+ bus_dmamap_load(sc->ring0_dmat, sc->ring0_map, frames, size,
+ nhi_memaddr_cb, &frames_busaddr, 0);
+ sc->ring0_frames_busaddr = frames_busaddr;
+ sc->ring0_frames = frames;
+
+ /* Allocate the driver command trackers */
+ sc->ring0_cmds = malloc(sizeof(struct nhi_cmd_frame) *
+ (r->tx_ring_depth + r->rx_ring_depth), M_NHI, M_NOWAIT | M_ZERO);
+ if (sc->ring0_cmds == NULL)
+ return (ENOMEM);
+
+ /* Initialize the RX frames so they can be used */
+ mtx_lock(&r->mtx);
+ for (i = 0; i < r->rx_ring_depth; i++) {
+ cmd = &sc->ring0_cmds[i];
+ cmd->data = (uint32_t *)(frames + NHI_RING0_FRAME_SIZE * i);
+ cmd->data_busaddr = frames_busaddr + NHI_RING0_FRAME_SIZE * i;
+ cmd->flags = CMD_MAPPED;
+ cmd->idx = i;
+ TAILQ_INSERT_TAIL(&r->rx_head, cmd, cm_link);
+ }
+
+ /* Inititalize the TX frames */
+ for ( ; i < r->tx_ring_depth + r->rx_ring_depth - 1; i++) {
+ cmd = &sc->ring0_cmds[i];
+ cmd->data = (uint32_t *)(frames + NHI_RING0_FRAME_SIZE * i);
+ cmd->data_busaddr = frames_busaddr + NHI_RING0_FRAME_SIZE * i;
+ cmd->flags = CMD_MAPPED;
+ cmd->idx = i;
+ nhi_free_tx_frame_locked(r, cmd);
+ }
+ mtx_unlock(&r->mtx);
+
+ /* Do a 1:1 mapping of rings to interrupt vectors. */
+ /* XXX Should be abstracted */
+ trkr = &sc->intr_trackers[0];
+ trkr->ring = r;
+ r->tracker = trkr;
+
+ /* XXX Should be an array */
+ sc->ring0 = r;
+ SLIST_INSERT_HEAD(&sc->ring_list, r, ring_link);
+
+ return (0);
+}
+
+static void
+nhi_free_ring0(struct nhi_softc *sc)
+{
+ if (sc->ring0_cmds != NULL) {
+ free(sc->ring0_cmds, M_NHI);
+ sc->ring0_cmds = NULL;
+ }
+
+ if (sc->ring0_frames_busaddr != 0) {
+ bus_dmamap_unload(sc->ring0_dmat, sc->ring0_map);
+ sc->ring0_frames_busaddr = 0;
+ }
+
+ if (sc->ring0_frames != NULL) {
+ bus_dmamem_free(sc->ring0_dmat, sc->ring0_frames,
+ sc->ring0_map);
+ sc->ring0_frames = NULL;
+ }
+
+ if (sc->ring0_dmat != NULL)
+ bus_dma_tag_destroy(sc->ring0_dmat);
+
+ return;
+}
+
+static void
+nhi_fill_rx_ring(struct nhi_softc *sc, struct nhi_ring_pair *rp)
+{
+ struct nhi_cmd_frame *cmd;
+ struct nhi_rx_buffer_desc *desc;
+ u_int ci;
+
+ /* Assume that we never grow or shrink the ring population */
+ rp->rx_ci = ci = 0;
+ rp->rx_pi = 0;
+
+ do {
+ cmd = TAILQ_FIRST(&rp->rx_head);
+ if (cmd == NULL)
+ break;
+ TAILQ_REMOVE(&rp->rx_head, cmd, cm_link);
+ desc = &rp->rx_ring[ci].rx;
+ if ((cmd->flags & CMD_MAPPED) == 0)
+ panic("Need rx buffer mapping code");
+
+ desc->addr_lo = cmd->data_busaddr & 0xffffffff;
+ desc->addr_hi = (cmd->data_busaddr >> 32) & 0xffffffff;
+ desc->offset = 0;
+ desc->flags = RX_BUFFER_DESC_RS | RX_BUFFER_DESC_IE;
+ rp->rx_ci = ci;
+ rp->rx_cmd_ring[ci] = cmd;
+ tb_debug(sc, DBG_RXQ | DBG_FULL,
+ "Updating ring%d ci= %d cmd= %p, busaddr= 0x%jx\n",
+ rp->ring_num, ci, cmd, cmd->data_busaddr);
+
+ ci = (rp->rx_ci + 1) & rp->rx_ring_mask;
+ } while (ci != rp->rx_pi);
+
+ /* Update the CI in one shot */
+ tb_debug(sc, DBG_RXQ, "Writing RX CI= %d\n", rp->rx_ci);
+ nhi_write_reg(sc, rp->rx_pici_reg, rp->rx_ci);
+
+ return;
+}
+
+static int
+nhi_init(struct nhi_softc *sc)
+{
+ tb_route_t root_route = {0x0, 0x0};
+ uint32_t val;
+ int error;
+
+ tb_debug(sc, DBG_INIT, "Initializing NHI\n");
+
+ /* Set interrupt Auto-ACK */
+ val = nhi_read_reg(sc, NHI_DMA_MISC);
+ tb_debug(sc, DBG_INIT|DBG_FULL, "Read NHI_DMA_MISC= 0x%08x\n", val);
+ val |= DMA_MISC_INT_AUTOCLEAR;
+ tb_debug(sc, DBG_INIT, "Setting interrupt auto-ACK, 0x%08x\n", val);
+ nhi_write_reg(sc, NHI_DMA_MISC, val);
+
+ if (NHI_IS_AR(sc) || NHI_IS_TR(sc) || NHI_IS_ICL(sc))
+ tb_printf(sc, "WARN: device uses an internal connection manager\n");
+
+ /*
+ * Populate the controller (local) UUID, necessary for cross-domain
+ * communications.
+ if (NHI_IS_ICL(sc))
+ nhi_pci_get_uuid(sc);
+ */
+
+ /*
+ * Attach the router to the root thunderbolt bridge now that the DMA
+ * channel is configured and ready.
+ * The root router always has a route of 0x0...0, so set it statically
+ * here.
+ */
+ if ((error = tb_router_attach_root(sc, root_route)) != 0)
+ tb_printf(sc, "tb_router_attach_root() error."
+ " The driver should be loaded at boot\n");
+
+ if (error == 0) {
+ sc->ich.ich_func = nhi_post_init;
+ sc->ich.ich_arg = sc;
+ error = config_intrhook_establish(&sc->ich);
+ if (error)
+ tb_printf(sc, "Failed to establish config hook\n");
+ }
+
+ return (error);
+}
+
+static void
+nhi_post_init(void *arg)
+{
+ struct nhi_softc *sc;
+ uint8_t *u;
+ int error;
+
+ sc = (struct nhi_softc *)arg;
+ tb_debug(sc, DBG_INIT | DBG_EXTRA, "nhi_post_init\n");
+
+ bzero(sc->lc_uuid, 16);
+ error = tb_config_get_lc_uuid(sc->root_rsc, sc->lc_uuid);
+ if (error == 0) {
+ u = sc->lc_uuid;
+ tb_printf(sc, "Root Router LC UUID: %02x%02x%02x%02x-"
+ "%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\n",
+ u[15], u[14], u[13], u[12], u[11], u[10], u[9], u[8], u[7],
+ u[6], u[5], u[4], u[3], u[2], u[1], u[0]);
+ } else
+ tb_printf(sc, "Error finding LC registers: %d\n", error);
+
+ u = sc->uuid;
+ tb_printf(sc, "Root Router UUID: %02x%02x%02x%02x-"
+ "%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x\n",
+ u[15], u[14], u[13], u[12], u[11], u[10], u[9], u[8], u[7],
+ u[6], u[5], u[4], u[3], u[2], u[1], u[0]);
+
+ config_intrhook_disestablish(&sc->ich);
+}
+
+static int
+nhi_tx_enqueue(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ struct nhi_softc *sc;
+ struct nhi_tx_buffer_desc *desc;
+ uint16_t pi;
+
+ sc = r->sc;
+
+ /* A length of 0 means 4096. Can't have longer lengths */
+ if (cmd->req_len > TX_BUFFER_DESC_LEN_MASK + 1) {
+ tb_debug(sc, DBG_TXQ, "Error: TX frame too big\n");
+ return (EINVAL);
+ }
+ cmd->req_len &= TX_BUFFER_DESC_LEN_MASK;
+
+ mtx_lock(&r->mtx);
+ desc = &r->tx_ring[r->tx_pi].tx;
+ pi = (r->tx_pi + 1) & r->tx_ring_mask;
+ if (pi == r->tx_ci) {
+ mtx_unlock(&r->mtx);
+ return (EBUSY);
+ }
+ r->tx_cmd_ring[r->tx_pi] = cmd;
+ r->tx_pi = pi;
+
+ desc->addr_lo = htole32(cmd->data_busaddr & 0xffffffff);
+ desc->addr_hi = htole32(cmd->data_busaddr >> 32);
+ desc->eof_len = htole16((cmd->pdf << TX_BUFFER_DESC_EOF_SHIFT) |
+ cmd->req_len);
+ desc->flags_sof = cmd->pdf | TX_BUFFER_DESC_IE | TX_BUFFER_DESC_RS;
+ desc->offset = 0;
+ desc->payload_time = 0;
+
+ tb_debug(sc, DBG_TXQ, "enqueue TXdescIdx= %d cmdidx= %d len= %d, "
+ "busaddr= 0x%jx\n", r->tx_pi, cmd->idx, cmd->req_len,
+ cmd->data_busaddr);
+
+ nhi_write_reg(sc, r->tx_pici_reg, pi << TX_RING_PI_SHIFT | r->tx_ci);
+ mtx_unlock(&r->mtx);
+ return (0);
+}
+
+/*
+ * No scheduling happens for now. Ring0 scheduling is done in the TB
+ * layer.
+ */
+int
+nhi_tx_schedule(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ int error;
+
+ error = nhi_tx_enqueue(r, cmd);
+ if (error == EBUSY)
+ nhi_write_reg(r->sc, r->tx_pici_reg, r->tx_pi << TX_RING_PI_SHIFT | r->tx_ci);
+ return (error);
+}
+
+int
+nhi_tx_synchronous(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ int error, count;
+
+ if ((error = nhi_tx_schedule(r, cmd)) != 0)
+ return (error);
+
+ if (cmd->flags & CMD_POLLED) {
+ error = 0;
+ count = cmd->timeout * 100;
+
+ /* Enter the loop at least once */
+ while ((count-- > 0) && (cmd->flags & CMD_REQ_COMPLETE) == 0) {
+ DELAY(10000);
+ rmb();
+ nhi_intr(r->tracker);
+ }
+ } else {
+ error = msleep(cmd, &r->mtx, PCATCH, "nhi_tx", cmd->timeout);
+ if ((error == 0) && (cmd->flags & CMD_REQ_COMPLETE) != 0)
+ error = EWOULDBLOCK;
+ }
+
+ if ((cmd->flags & CMD_REQ_COMPLETE) == 0)
+ error = ETIMEDOUT;
+
+ tb_debug(r->sc, DBG_TXQ|DBG_FULL, "tx_synchronous done waiting, "
+ "err= %d, TX_COMPLETE= %d\n", error,
+ !!(cmd->flags & CMD_REQ_COMPLETE));
+
+ if (error == ERESTART) {
+ tb_printf(r->sc, "TX command interrupted\n");
+ } else if ((error == EWOULDBLOCK) || (error == ETIMEDOUT)) {
+ tb_printf(r->sc, "TX command timed out\n");
+ } else if (error != 0) {
+ tb_printf(r->sc, "TX command failed error= %d\n", error);
+ }
+
+ return (error);
+}
+
+static int
+nhi_tx_complete(struct nhi_ring_pair *r, struct nhi_tx_buffer_desc *desc,
+ struct nhi_cmd_frame *cmd)
+{
+ struct nhi_softc *sc;
+ struct nhi_pdf_dispatch *txpdf;
+ u_int sof;
+
+ sc = r->sc;
+ sof = desc->flags_sof & TX_BUFFER_DESC_SOF_MASK;
+ tb_debug(sc, DBG_TXQ, "Recovered TX pdf= %s cmdidx= %d flags= 0x%x\n",
+ tb_get_string(sof, nhi_frame_pdf), cmd->idx, desc->flags_sof);
+
+ if ((desc->flags_sof & TX_BUFFER_DESC_DONE) == 0)
+ tb_debug(sc, DBG_TXQ,
+ "warning, TX descriptor DONE flag not set\n");
+
+ /* XXX Atomics */
+ cmd->flags |= CMD_REQ_COMPLETE;
+
+ txpdf = &r->tracker->txpdf[sof];
+ if (txpdf->cb != NULL) {
+ tb_debug(sc, DBG_INTR|DBG_TXQ, "Calling PDF TX callback\n");
+ txpdf->cb(txpdf->context, (union nhi_ring_desc *)desc, cmd);
+ return (0);
+ }
+
+ tb_debug(sc, DBG_TXQ, "Unhandled TX complete %s\n",
+ tb_get_string(sof, nhi_frame_pdf));
+ nhi_free_tx_frame(r, cmd);
+
+ return (0);
+}
+
+static int
+nhi_rx_complete(struct nhi_ring_pair *r, struct nhi_rx_post_desc *desc,
+ struct nhi_cmd_frame *cmd)
+{
+ struct nhi_softc *sc;
+ struct nhi_pdf_dispatch *rxpdf;
+ u_int eof, len;
+
+ sc = r->sc;
+ eof = desc->eof_len >> RX_BUFFER_DESC_EOF_SHIFT;
+ len = desc->eof_len & RX_BUFFER_DESC_LEN_MASK;
+ tb_debug(sc, DBG_INTR|DBG_RXQ,
+ "Recovered RX pdf= %s len= %d cmdidx= %d, busaddr= 0x%jx\n",
+ tb_get_string(eof, nhi_frame_pdf), len, cmd->idx,
+ cmd->data_busaddr);
+
+ rxpdf = &r->tracker->rxpdf[eof];
+ if (rxpdf->cb != NULL) {
+ tb_debug(sc, DBG_INTR|DBG_RXQ, "Calling PDF RX callback\n");
+ rxpdf->cb(rxpdf->context, (union nhi_ring_desc *)desc, cmd);
+ return (0);
+ }
+
+ tb_debug(sc, DBG_INTR, "Unhandled RX frame %s\n",
+ tb_get_string(eof, nhi_frame_pdf));
+
+ return (0);
+}
+
+int
+nhi_register_pdf(struct nhi_ring_pair *rp, struct nhi_dispatch *tx,
+ struct nhi_dispatch *rx)
+{
+ struct nhi_intr_tracker *trkr;
+ struct nhi_pdf_dispatch *slot;
+
+ KASSERT(rp != NULL, ("ring_pair is null\n"));
+ tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "nhi_register_pdf called\n");
+
+ trkr = rp->tracker;
+ if (trkr == NULL) {
+ tb_debug(rp->sc, DBG_INTR, "Invalid tracker\n");
+ return (EINVAL);
+ }
+
+ tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "Registering TX interrupts\n");
+ if (tx != NULL) {
+ while (tx->cb != NULL) {
+ if ((tx->pdf < 0) || (tx->pdf > 15))
+ return (EINVAL);
+ slot = &trkr->txpdf[tx->pdf];
+ if (slot->cb != NULL) {
+ tb_debug(rp->sc, DBG_INTR,
+ "Attempted to register busy callback\n");
+ return (EBUSY);
+ }
+ slot->cb = tx->cb;
+ slot->context = tx->context;
+ tb_debug(rp->sc, DBG_INTR,
+ "Registered TX callback for PDF %d\n", tx->pdf);
+ tx++;
+ }
+ }
+
+ tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "Registering RX interrupts\n");
+ if (rx != NULL) {
+ while (rx->cb != NULL) {
+ if ((rx->pdf < 0) || (rx->pdf > 15))
+ return (EINVAL);
+ slot = &trkr->rxpdf[rx->pdf];
+ if (slot->cb != NULL) {
+ tb_debug(rp->sc, DBG_INTR,
+ "Attempted to register busy callback\n");
+ return (EBUSY);
+ }
+ slot->cb = rx->cb;
+ slot->context = rx->context;
+ tb_debug(rp->sc, DBG_INTR,
+ "Registered RX callback for PDF %d\n", rx->pdf);
+ rx++;
+ }
+ }
+
+ return (0);
+}
+
+int
+nhi_deregister_pdf(struct nhi_ring_pair *rp, struct nhi_dispatch *tx,
+ struct nhi_dispatch *rx)
+{
+ struct nhi_intr_tracker *trkr;
+ struct nhi_pdf_dispatch *slot;
+
+ tb_debug(rp->sc, DBG_INTR|DBG_EXTRA, "nhi_register_pdf called\n");
+
+ trkr = rp->tracker;
+
+ if (tx != NULL) {
+ while (tx->cb != NULL) {
+ if ((tx->pdf < 0) || (tx->pdf > 15))
+ return (EINVAL);
+ slot = &trkr->txpdf[tx->pdf];
+ slot->cb = NULL;
+ slot->context = NULL;
+ tx++;
+ }
+ }
+
+ if (rx != NULL) {
+ while (rx->cb != NULL) {
+ if ((rx->pdf < 0) || (rx->pdf > 15))
+ return (EINVAL);
+ slot = &trkr->rxpdf[rx->pdf];
+ slot->cb = NULL;
+ slot->context = NULL;
+ rx++;
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * The CI and PI indexes are not read from the hardware. We track them in
+ * software, so we know where in the ring to start a scan on an interrupt.
+ * All we have to do is check for the appropriate Done bit in the next
+ * descriptor, and we know if we have reached the last descriptor that the
+ * hardware touched. This technique saves at least 2 MEMIO reads per
+ * interrupt.
+ */
+void
+nhi_intr(void *data)
+{
+ union nhi_ring_desc *rxd;
+ struct nhi_cmd_frame *cmd;
+ struct nhi_intr_tracker *trkr = data;
+ struct nhi_softc *sc;
+ struct nhi_ring_pair *r;
+ struct nhi_tx_buffer_desc *txd;
+ uint32_t val, old_ci;
+ u_int count;
+
+ sc = trkr->sc;
+
+ tb_debug(sc, DBG_INTR|DBG_FULL, "Interrupt @ vector %d\n",
+ trkr->vector);
+ if ((r = trkr->ring) == NULL)
+ return;
+
+ /*
+ * Process TX completions from the adapter. Only go through
+ * the ring once to prevent unbounded looping.
+ */
+ count = r->tx_ring_depth;
+ while (count-- > 0) {
+ txd = &r->tx_ring[r->tx_ci].tx;
+ if ((txd->flags_sof & TX_BUFFER_DESC_DONE) == 0)
+ break;
+ cmd = r->tx_cmd_ring[r->tx_ci];
+ tb_debug(sc, DBG_INTR|DBG_TXQ|DBG_FULL,
+ "Found tx cmdidx= %d cmd= %p\n", r->tx_ci, cmd);
+
+ /* Pass the completion up the stack */
+ nhi_tx_complete(r, txd, cmd);
+
+ /*
+ * Advance to the next item in the ring via the cached
+ * copy of the CI. Clear the flags so we can detect
+ * a new done condition the next time the ring wraps
+ * around. Anything higher up the stack that needs this
+ * field should have already copied it.
+ *
+ * XXX is a memory barrier needed?
+ */
+ txd->flags_sof = 0;
+ r->tx_ci = (r->tx_ci + 1) & r->tx_ring_mask;
+ }
+
+ /* Process RX packets from the adapter */
+ count = r->rx_ring_depth;
+ old_ci = r->rx_ci;
+
+ while (count-- > 0) {
+ tb_debug(sc, DBG_INTR|DBG_RXQ|DBG_FULL,
+ "Checking RX descriptor at %d\n", r->rx_pi);
+
+ /* Look up RX descriptor and cmd */
+ rxd = &r->rx_ring[r->rx_pi];
+ tb_debug(sc, DBG_INTR|DBG_RXQ|DBG_FULL,
+ "rx desc len= 0x%04x flags= 0x%04x\n", rxd->rxpost.eof_len,
+ rxd->rxpost.flags_sof);
+ if ((rxd->rxpost.flags_sof & RX_BUFFER_DESC_DONE) == 0)
+ break;
+ cmd = r->rx_cmd_ring[r->rx_pi];
+ tb_debug(sc, DBG_INTR|DBG_RXQ|DBG_FULL,
+ "Found rx cmdidx= %d cmd= %p\n", r->rx_pi, cmd);
+
+ /*
+ * Pass the RX frame up the stack. RX frames are re-used
+ * in-place, so their contents must be copied before this
+ * function returns.
+ *
+ * XXX Rings other than Ring0 might want to have a different
+ * re-use and re-populate policy
+ */
+ nhi_rx_complete(r, &rxd->rxpost, cmd);
+
+ /*
+ * Advance the CI and move forward to the next item in the
+ * ring via our cached copy of the PI. Clear out the
+ * length field so we can detect a new RX frame when the
+ * ring wraps around. Reset the flags of the descriptor.
+ */
+ rxd->rxpost.eof_len = 0;
+ rxd->rx.flags = RX_BUFFER_DESC_RS | RX_BUFFER_DESC_IE;
+ r->rx_ci = (r->rx_ci + 1) & r->rx_ring_mask;
+ r->rx_pi = (r->rx_pi + 1) & r->rx_ring_mask;
+ }
+
+ /*
+ * Tell the firmware about the new RX CI
+ *
+ * XXX There's a chance this will overwrite an update to the PI.
+ * Is that OK? We keep our own copy of the PI and never read it from
+ * hardware. However, will overwriting it result in a missed
+ * interrupt?
+ */
+ if (r->rx_ci != old_ci) {
+ val = r->rx_pi << RX_RING_PI_SHIFT | r->rx_ci;
+ tb_debug(sc, DBG_INTR | DBG_RXQ,
+ "Writing new RX PICI= 0x%08x\n", val);
+ nhi_write_reg(sc, r->rx_pici_reg, val);
+ }
+}
+
+static int
+nhi_setup_sysctl(struct nhi_softc *sc)
+{
+ struct sysctl_ctx_list *ctx = NULL;
+ struct sysctl_oid *tree = NULL;
+
+ ctx = device_get_sysctl_ctx(sc->dev);
+ if (ctx != NULL)
+ tree = device_get_sysctl_tree(sc->dev);
+
+ /*
+ * Not being able to create sysctls is going to hamper other
+ * parts of the driver.
+ */
+ if (tree == NULL) {
+ tb_printf(sc, "Error: cannot create sysctl nodes\n");
+ return (EINVAL);
+ }
+ sc->sysctl_tree = tree;
+ sc->sysctl_ctx = ctx;
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
+ OID_AUTO, "debug_level", CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE,
+ &sc->debug, 0, tb_debug_sysctl, "A", "Thunderbolt debug level");
+ SYSCTL_ADD_U16(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "max_rings", CTLFLAG_RD, &sc->max_ring_count, 0,
+ "Max number of rings available");
+ SYSCTL_ADD_U8(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "force_hcm", CTLFLAG_RD, &sc->force_hcm, 0,
+ "Force on/off the function of the host connection manager");
+
+ return (0);
+}
diff --git a/sys/dev/thunderbolt/nhi_pci.c b/sys/dev/thunderbolt/nhi_pci.c
new file mode 100644
index 000000000000..7dacff523cef
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi_pci.c
@@ -0,0 +1,529 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* PCIe interface for Thunderbolt Native Host Interface */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/param.h>
+#include <sys/endian.h>
+#include <sys/taskqueue.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+#include <sys/rman.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pci_private.h>
+
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include "tb_if.h"
+
+static int nhi_pci_probe(device_t);
+static int nhi_pci_attach(device_t);
+static int nhi_pci_detach(device_t);
+static int nhi_pci_suspend(device_t);
+static int nhi_pci_resume(device_t);
+static void nhi_pci_free(struct nhi_softc *);
+static int nhi_pci_allocate_interrupts(struct nhi_softc *);
+static void nhi_pci_free_interrupts(struct nhi_softc *);
+static int nhi_pci_icl_poweron(struct nhi_softc *);
+
+static device_method_t nhi_methods[] = {
+ DEVMETHOD(device_probe, nhi_pci_probe),
+ DEVMETHOD(device_attach, nhi_pci_attach),
+ DEVMETHOD(device_detach, nhi_pci_detach),
+ DEVMETHOD(device_suspend, nhi_pci_suspend),
+ DEVMETHOD(device_resume, nhi_pci_resume),
+
+ DEVMETHOD(tb_find_ufp, tb_generic_find_ufp),
+ DEVMETHOD(tb_get_debug, tb_generic_get_debug),
+
+ DEVMETHOD_END
+};
+
+static driver_t nhi_pci_driver = {
+ "nhi",
+ nhi_methods,
+ sizeof(struct nhi_softc)
+};
+
+struct nhi_ident {
+ uint16_t vendor;
+ uint16_t device;
+ uint16_t subvendor;
+ uint16_t subdevice;
+ uint32_t flags;
+ const char *desc;
+} nhi_identifiers[] = {
+ { VENDOR_INTEL, DEVICE_AR_2C_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
+ "Thunderbolt 3 NHI (Alpine Ridge 2C)" },
+ { VENDOR_INTEL, DEVICE_AR_DP_B_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
+ "Thunderbolt 3 NHI (Alpine Ridge 4C Rev B)" },
+ { VENDOR_INTEL, DEVICE_AR_DP_C_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
+ "Thunderbolt 3 NHI (Alpine Ridge 4C Rev C)" },
+ { VENDOR_INTEL, DEVICE_AR_LP_NHI, 0xffff, 0xffff, NHI_TYPE_AR,
+ "Thunderbolt 3 NHI (Alpine Ridge LP 2C)" },
+ { VENDOR_INTEL, DEVICE_ICL_NHI_0, 0xffff, 0xffff, NHI_TYPE_ICL,
+ "Thunderbolt 3 NHI Port 0 (IceLake)" },
+ { VENDOR_INTEL, DEVICE_ICL_NHI_1, 0xffff, 0xffff, NHI_TYPE_ICL,
+ "Thunderbolt 3 NHI Port 1 (IceLake)" },
+ { VENDOR_AMD, DEVICE_PINK_SARDINE_0, 0xffff, 0xffff, NHI_TYPE_USB4,
+ "USB4 NHI Port 0 (Pink Sardine)" },
+ { VENDOR_AMD, DEVICE_PINK_SARDINE_1, 0xffff, 0xffff, NHI_TYPE_USB4,
+ "USB4 NHI Port 1 (Pink Sardine)" },
+ { 0, 0, 0, 0, 0, NULL }
+};
+
+DRIVER_MODULE_ORDERED(nhi, pci, nhi_pci_driver, NULL, NULL,
+ SI_ORDER_ANY);
+MODULE_PNP_INFO("U16:vendor;U16:device;V16:subvendor;V16:subdevice;U32:#;D:#",
+ pci, nhi, nhi_identifiers, nitems(nhi_identifiers) - 1);
+
+static struct nhi_ident *
+nhi_find_ident(device_t dev)
+{
+ struct nhi_ident *n;
+
+ for (n = nhi_identifiers; n->vendor != 0; n++) {
+ if (n->vendor != pci_get_vendor(dev))
+ continue;
+ if (n->device != pci_get_device(dev))
+ continue;
+ if ((n->subvendor != 0xffff) &&
+ (n->subvendor != pci_get_subvendor(dev)))
+ continue;
+ if ((n->subdevice != 0xffff) &&
+ (n->subdevice != pci_get_subdevice(dev)))
+ continue;
+ return (n);
+ }
+
+ return (NULL);
+}
+
+static int
+nhi_pci_probe(device_t dev)
+{
+ struct nhi_ident *n;
+
+ if (resource_disabled("tb", 0))
+ return (ENXIO);
+ if ((n = nhi_find_ident(dev)) != NULL) {
+ device_set_desc(dev, n->desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+ return (ENXIO);
+}
+
+static int
+nhi_pci_attach(device_t dev)
+{
+ devclass_t dc;
+ bus_dma_template_t t;
+ struct nhi_softc *sc;
+ struct nhi_ident *n;
+ int error = 0;
+
+ sc = device_get_softc(dev);
+ bzero(sc, sizeof(*sc));
+ sc->dev = dev;
+ n = nhi_find_ident(dev);
+ sc->hwflags = n->flags;
+ nhi_get_tunables(sc);
+
+ tb_debug(sc, DBG_INIT|DBG_FULL, "busmaster status was %s\n",
+ (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_BUSMASTEREN)
+ ? "enabled" : "disabled");
+ pci_enable_busmaster(dev);
+
+ sc->ufp = NULL;
+ if ((TB_FIND_UFP(dev, &sc->ufp) != 0) || (sc->ufp == NULL)) {
+ dc = devclass_find("tbolt");
+ if (dc != NULL)
+ sc->ufp = devclass_get_device(dc, device_get_unit(dev));
+ }
+ if (sc->ufp == NULL)
+ tb_printf(sc, "Cannot find Upstream Facing Port\n");
+ else
+ tb_printf(sc, "Upstream Facing Port is %s\n",
+ device_get_nameunit(sc->ufp));
+
+ if (NHI_IS_ICL(sc)) {
+ if ((error = nhi_pci_icl_poweron(sc)) != 0)
+ return (error);
+ }
+
+
+ /* Allocate BAR0 DMA registers */
+ sc->regs_rid = PCIR_BAR(0);
+ if ((sc->regs_resource = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &sc->regs_rid, RF_ACTIVE)) == NULL) {
+ tb_printf(sc, "Cannot allocate PCI registers\n");
+ return (ENXIO);
+ }
+ sc->regs_btag = rman_get_bustag(sc->regs_resource);
+ sc->regs_bhandle = rman_get_bushandle(sc->regs_resource);
+
+ /* Allocate parent DMA tag */
+ bus_dma_template_init(&t, bus_get_dma_tag(dev));
+ if (bus_dma_template_tag(&t, &sc->parent_dmat) != 0) {
+ tb_printf(sc, "Cannot allocate parent DMA tag\n");
+ nhi_pci_free(sc);
+ return (ENOMEM);
+ }
+
+ error = nhi_pci_allocate_interrupts(sc);
+ if (error == 0)
+ error = nhi_attach(sc);
+ if (error != 0)
+ nhi_pci_detach(sc->dev);
+ return (error);
+}
+
+static int
+nhi_pci_detach(device_t dev)
+{
+ struct nhi_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ nhi_detach(sc);
+ nhi_pci_free(sc);
+
+ return (0);
+}
+
+static int
+nhi_pci_suspend(device_t dev)
+{
+
+ return (0);
+}
+
+static int
+nhi_pci_resume(device_t dev)
+{
+
+ return (0);
+}
+
+static void
+nhi_pci_free(struct nhi_softc *sc)
+{
+
+ nhi_pci_free_interrupts(sc);
+
+ if (sc->parent_dmat != NULL) {
+ bus_dma_tag_destroy(sc->parent_dmat);
+ sc->parent_dmat = NULL;
+ }
+
+ if (sc->regs_resource != NULL) {
+ bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ sc->regs_rid, sc->regs_resource);
+ sc->regs_resource = NULL;
+ }
+
+ return;
+}
+
+static int
+nhi_pci_allocate_interrupts(struct nhi_softc *sc)
+{
+ int msgs, error = 0;
+
+ /* Map the Pending Bit Array and Vector Table BARs for MSI-X */
+ sc->irq_pba_rid = pci_msix_pba_bar(sc->dev);
+ sc->irq_table_rid = pci_msix_table_bar(sc->dev);
+
+ if (sc->irq_pba_rid != -1)
+ sc->irq_pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
+ &sc->irq_pba_rid, RF_ACTIVE);
+ if (sc->irq_table_rid != -1)
+ sc->irq_table = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
+ &sc->irq_table_rid, RF_ACTIVE);
+
+ msgs = pci_msix_count(sc->dev);
+ tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
+ "Counted %d MSI-X messages\n", msgs);
+ msgs = min(msgs, NHI_MSIX_MAX);
+ msgs = max(msgs, 1);
+ if (msgs != 0) {
+ tb_debug(sc, DBG_INIT|DBG_INTR, "Attempting to allocate %d "
+ "MSI-X interrupts\n", msgs);
+ error = pci_alloc_msix(sc->dev, &msgs);
+ tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
+ "pci_alloc_msix return msgs= %d, error= %d\n", msgs, error);
+ }
+
+ if ((error != 0) || (msgs <= 0)) {
+ tb_printf(sc, "Failed to allocate any interrupts\n");
+ msgs = 0;
+ }
+
+ sc->msix_count = msgs;
+ return (error);
+}
+
+static void
+nhi_pci_free_interrupts(struct nhi_softc *sc)
+{
+ int i;
+
+ for (i = 0; i < sc->msix_count; i++) {
+ bus_teardown_intr(sc->dev, sc->irqs[i], sc->intrhand[i]);
+ bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid[i],
+ sc->irqs[i]);
+ }
+
+ pci_release_msi(sc->dev);
+
+ if (sc->irq_table != NULL) {
+ bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ sc->irq_table_rid, sc->irq_table);
+ sc->irq_table = NULL;
+ }
+
+ if (sc->irq_pba != NULL) {
+ bus_release_resource(sc->dev, SYS_RES_MEMORY,
+ sc->irq_pba_rid, sc->irq_pba);
+ sc->irq_pba = NULL;
+ }
+
+ if (sc->intr_trackers != NULL)
+ free(sc->intr_trackers, M_NHI);
+ return;
+}
+
+int
+nhi_pci_configure_interrupts(struct nhi_softc *sc)
+{
+ struct nhi_intr_tracker *trkr;
+ int rid, i, error;
+
+ nhi_pci_disable_interrupts(sc);
+
+ sc->intr_trackers = malloc(sizeof(struct nhi_intr_tracker) *
+ sc->msix_count, M_NHI, M_ZERO | M_NOWAIT);
+ if (sc->intr_trackers == NULL) {
+ tb_debug(sc, DBG_INIT, "Cannot allocate intr trackers\n");
+ return (ENOMEM);
+ }
+
+ for (i = 0; i < sc->msix_count; i++) {
+ rid = i + 1;
+ trkr = &sc->intr_trackers[i];
+ trkr->sc = sc;
+ trkr->ring = NULL;
+ trkr->vector = i;
+
+ sc->irq_rid[i] = rid;
+ sc->irqs[i] = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
+ &sc->irq_rid[i], RF_ACTIVE);
+ if (sc->irqs[i] == NULL) {
+ tb_debug(sc, DBG_INIT,
+ "Cannot allocate interrupt RID %d\n",
+ sc->irq_rid[i]);
+ break;
+ }
+ error = bus_setup_intr(sc->dev, sc->irqs[i], INTR_TYPE_BIO |
+ INTR_MPSAFE, NULL, nhi_intr, trkr, &sc->intrhand[i]);
+ if (error) {
+ tb_debug(sc, DBG_INIT,
+ "cannot setup interrupt RID %d\n", sc->irq_rid[i]);
+ break;
+ }
+ }
+
+ tb_debug(sc, DBG_INIT, "Set up %d interrupts\n", sc->msix_count);
+
+ /* Set the interrupt throttle rate to 128us */
+ for (i = 0; i < 16; i ++)
+ nhi_write_reg(sc, NHI_ITR0 + i * 4, 0x1f4);
+
+ return (error);
+}
+
+#define NHI_SET_INTERRUPT(offset, mask, val) \
+do { \
+ reg = offset / 32; \
+ offset %= 32; \
+ ivr[reg] &= ~(mask << offset); \
+ ivr[reg] |= (val << offset); \
+} while (0)
+
+void
+nhi_pci_enable_interrupt(struct nhi_ring_pair *r)
+{
+ struct nhi_softc *sc = r->sc;
+ uint32_t ivr[5];
+ u_int offset, reg;
+
+ tb_debug(sc, DBG_INIT|DBG_INTR, "Enabling interrupts for ring %d\n",
+ r->ring_num);
+ /*
+ * Compute the routing between event type and MSI-X vector.
+ * 4 bits per descriptor.
+ */
+ ivr[0] = nhi_read_reg(sc, NHI_IVR0);
+ ivr[1] = nhi_read_reg(sc, NHI_IVR1);
+ ivr[2] = nhi_read_reg(sc, NHI_IVR2);
+ ivr[3] = nhi_read_reg(sc, NHI_IVR3);
+ ivr[4] = nhi_read_reg(sc, NHI_IVR4);
+
+ /* Program TX */
+ offset = (r->ring_num + IVR_TX_OFFSET) * 4;
+ NHI_SET_INTERRUPT(offset, 0x0f, r->ring_num);
+
+ /* Now program RX */
+ offset = (r->ring_num + IVR_RX_OFFSET) * 4;
+ NHI_SET_INTERRUPT(offset, 0x0f, r->ring_num);
+
+ /* Last, program Nearly Empty. This one always going to vector 15 */
+ offset = (r->ring_num + IVR_NE_OFFSET) * 4;
+ NHI_SET_INTERRUPT(offset, 0x0f, 0x0f);
+
+ nhi_write_reg(sc, NHI_IVR0, ivr[0]);
+ nhi_write_reg(sc, NHI_IVR1, ivr[1]);
+ nhi_write_reg(sc, NHI_IVR2, ivr[2]);
+ nhi_write_reg(sc, NHI_IVR3, ivr[3]);
+ nhi_write_reg(sc, NHI_IVR4, ivr[4]);
+
+ tb_debug(sc, DBG_INIT|DBG_INTR|DBG_FULL,
+ "Wrote IVR 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ ivr[0], ivr[1], ivr[2], ivr[3], ivr[4]);
+
+ /* Now do the Interrupt Mask Register, 1 bit per descriptor */
+ ivr[0] = nhi_read_reg(sc, NHI_IMR0);
+ ivr[1] = nhi_read_reg(sc, NHI_IMR1);
+
+ /* Tx */
+ offset = r->ring_num + IMR_TX_OFFSET;
+ NHI_SET_INTERRUPT(offset, 0x01, 1);
+
+ /* Rx */
+ offset = r->ring_num + IMR_RX_OFFSET;
+ NHI_SET_INTERRUPT(offset, 0x01, 1);
+
+ /* NE */
+ offset = r->ring_num + IMR_NE_OFFSET;
+ NHI_SET_INTERRUPT(offset, 0x01, 1);
+
+ nhi_write_reg(sc, NHI_IMR0, ivr[0]);
+ nhi_write_reg(sc, NHI_IMR1, ivr[1]);
+ tb_debug(sc, DBG_INIT|DBG_FULL,
+ "Wrote IMR 0x%08x 0x%08x\n", ivr[0], ivr[1]);
+}
+
+void
+nhi_pci_disable_interrupts(struct nhi_softc *sc)
+{
+
+ tb_debug(sc, DBG_INIT, "Disabling interrupts\n");
+ nhi_write_reg(sc, NHI_IMR0, 0);
+ nhi_write_reg(sc, NHI_IMR1, 0);
+ nhi_write_reg(sc, NHI_IVR0, 0);
+ nhi_write_reg(sc, NHI_IVR1, 0);
+ nhi_write_reg(sc, NHI_IVR2, 0);
+ nhi_write_reg(sc, NHI_IVR3, 0);
+ nhi_write_reg(sc, NHI_IVR4, 0);
+
+ /* Dummy reads to clear pending bits */
+ nhi_read_reg(sc, NHI_ISR0);
+ nhi_read_reg(sc, NHI_ISR1);
+}
+
+/*
+ * Icelake controllers need to be notified of power-on
+ */
+static int
+nhi_pci_icl_poweron(struct nhi_softc *sc)
+{
+ device_t dev;
+ uint32_t val;
+ int i, error = 0;
+
+ dev = sc->dev;
+ val = pci_read_config(dev, ICL_VSCAP_9, 4);
+ tb_debug(sc, DBG_INIT, "icl_poweron val= 0x%x\n", val);
+ if (val & ICL_VSCAP9_FWREADY)
+ return (0);
+
+ val = pci_read_config(dev, ICL_VSCAP_22, 4);
+ val |= ICL_VSCAP22_FORCEPWR;
+ tb_debug(sc, DBG_INIT|DBG_FULL, "icl_poweron writing 0x%x\n", val);
+ pci_write_config(dev, ICL_VSCAP_22, val, 4);
+
+ error = ETIMEDOUT;
+ for (i = 0; i < 15; i++) {
+ DELAY(1000000);
+ val = pci_read_config(dev, ICL_VSCAP_9, 4);
+ if (val & ICL_VSCAP9_FWREADY) {
+ error = 0;
+ break;
+ }
+ }
+
+ return (error);
+}
+
+/*
+ * Icelake and Alderlake controllers store their UUID in PCI config space
+ */
+int
+nhi_pci_get_uuid(struct nhi_softc *sc)
+{
+ device_t dev;
+ uint32_t val[4];
+
+ dev = sc->dev;
+ val[0] = pci_read_config(dev, ICL_VSCAP_10, 4);
+ val[1] = pci_read_config(dev, ICL_VSCAP_11, 4);
+ val[2] = 0xffffffff;
+ val[3] = 0xffffffff;
+
+ bcopy(val, &sc->uuid, 16);
+ return (0);
+}
diff --git a/sys/dev/thunderbolt/nhi_reg.h b/sys/dev/thunderbolt/nhi_reg.h
new file mode 100644
index 000000000000..6e71f4c9646b
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi_reg.h
@@ -0,0 +1,332 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt 3 register definitions
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _NHI_REG_H
+#define _NHI_REG_H
+
+/* Some common definitions */
+#define TBT_SEC_NONE 0x00
+#define TBT_SEC_USER 0x01
+#define TBT_SEC_SECURE 0x02
+#define TBT_SEC_DP 0x03
+
+#define GENMASK(h, l) (((~0U) >> (31 - (h))) ^ ((~0U) >> (31 - (l)) >> 1))
+
+/* PCI Vendor and Device ID's */
+#define VENDOR_INTEL 0x8086
+#define DEVICE_AR_2C_NHI 0x1575
+#define DEVICE_AR_DP_B_NHI 0x1577
+#define DEVICE_AR_DP_C_NHI 0x15d2
+#define DEVICE_AR_LP_NHI 0x15bf
+#define DEVICE_ICL_NHI_0 0x8a17
+#define DEVICE_ICL_NHI_1 0x8a0d
+
+#define VENDOR_AMD 0x1022
+#define DEVICE_PINK_SARDINE_0 0x1668
+#define DEVICE_PINK_SARDINE_1 0x1669
+
+/* * * MMIO Registers
+ * * Ring buffer registers
+ *
+ * 32 transmit and receive rings are available, with Ring 0 being the most
+ * important one. The ring descriptors are 16 bytes each, and each set of
+ * TX and RX descriptors are packed together. There are only definitions
+ * for the Ring 0 addresses, others can be directly computed.
+ */
+#define NHI_TX_RING_ADDR_LO 0x00000
+#define NHI_TX_RING_ADDR_HI 0x00004
+#define NHI_TX_RING_PICI 0x00008
+#define TX_RING_CI_MASK GENMASK(15, 0)
+#define TX_RING_PI_SHIFT 16
+#define NHI_TX_RING_SIZE 0x0000c
+
+#define NHI_RX_RING_ADDR_LO 0x08000
+#define NHI_RX_RING_ADDR_HI 0x08004
+#define NHI_RX_RING_PICI 0x08008
+#define RX_RING_CI_MASK GENMASK(15, 0)
+#define RX_RING_PI_SHIFT 16
+#define NHI_RX_RING_SIZE 0x0800c
+#define RX_RING_BUF_SIZE_SHIFT 16
+
+/*
+ * One 32-bit status register encodes one status bit per ring indicates that
+ * the watermark from the control descriptor has been reached.
+ */
+#define NHI_RX_RING_STATUS 0x19400
+
+/*
+ * TX and RX Tables. These are 32 byte control fields for each ring.
+ * Only 8 bytes are controllable by the host software, the rest are a
+ * shadow copy by the controller of the current packet that's being
+ * processed.
+ */
+#define NHI_TX_RING_TABLE_BASE0 0x19800
+#define TX_TABLE_INTERVAL_MASK GENMASK(23,0) /* Isoch interval 256ns */
+#define TX_TABLE_ITE (1 << 27) /* Isoch tx enable */
+#define TX_TABLE_E2E (1 << 28) /* End-to-end flow control */
+#define TX_TABLE_NS (1 << 29) /* PCIe No Snoop */
+#define TX_TABLE_RAW (1 << 30) /* Raw (1)/frame(0) mode */
+#define TX_TABLE_VALID (1 << 31) /* Table entry is valid */
+#define NHI_TX_RING_TABLE_TIMESTAMP 0x19804
+
+#define NHI_RX_RING_TABLE_BASE0 0x29800
+#define RX_TABLE_TX_E2E_HOPID_SHIFT (1 << 12)
+#define RX_TABLE_E2E (1 << 28) /* End-to-end flow control */
+#define RX_TABLE_NS (1 << 29) /* PCIe No Snoop */
+#define RX_TABLE_RAW (1 << 30) /* Raw (1)/frame(0) mode */
+#define RX_TABLE_VALID (1 << 31) /* Table entry is valid */
+#define NHI_RX_RING_TABLE_BASE1 0x29804
+#define RX_TABLE_EOF_MASK (1 << 0)
+#define RX_TABLE_SOF_MASK (1 << 16)
+
+/* * Interrupt Control/Status Registers
+ * Interrupt Status Register (ISR)
+ * Interrupt status for RX, TX, and Nearly Empty events, one bit per
+ * MSI-X vector. Clear on read.
+ * Only 12 bits per operation, instead of 16? I guess it relates to the
+ * number paths, advertised in the HOST_CAPS register, which is wired to
+ * 0x0c for Alpine Ridge.
+ */
+#define NHI_ISR0 0x37800
+#define ISR0_TX_DESC_SHIFT 0
+#define ISR0_RX_DESC_SHIFT 12
+#define ISR0_RX_EMPTY_SHIFT 24
+#define NHI_ISR1 0x37804
+#define ISR1_RX_EMPTY_SHIFT 0
+
+/* * Interrupt Status Clear, corresponds to ISR0/ISR1. Write Only */
+#define NHI_ISC0 0x37808
+#define NHI_ISC1 0x3780c
+
+/* * Interrupt Status Set, corresponds to ISR0/ISR1. Write Only */
+#define NHI_ISS0 0x37810
+#define NHI_ISS1 0x37814
+
+/* * Interrupt Mask, corresponds to ISR0/ISR1. Read-Write */
+#define NHI_IMR0 0x38200
+#define NHI_IMR1 0x38204
+#define IMR_TX_OFFSET 0
+#define IMR_RX_OFFSET 12
+#define IMR_NE_OFFSET 24
+
+/* * Interrupt Mask Clear, corresponds to ISR0/ISR1. Write-only */
+#define NHI_IMC0 0x38208
+#define NHI_IMC1 0x3820c
+
+/* * Interrupt Mask Set, corresponds to ISR0/ISR1. Write-only */
+#define NHI_IMS0 0x38210
+#define NHI_IMS1 0x38214
+
+/*
+ * Interrupt Throttle Rate. One 32 bit register per interrupt,
+ * 16 registers for the 16 MSI-X interrupts. Interval is in 256ns
+ * increments.
+ */
+#define NHI_ITR0 0x38c00
+#define ITR_INTERVAL_SHIFT 0
+#define ITR_COUNTER_SHIFT 16
+
+/*
+ * Interrupt Vector Allocation.
+ * There are 12 4-bit descriptors for TX, 12 4-bit descriptors for RX,
+ * and 12 4-bit descriptors for Nearly Empty. Each descriptor holds
+ * the numerical value of the MSI-X vector that will receive the
+ * corresponding interrupt.
+ * Bits 0-31 of IVR0 and 0-15 of IVR1 are for TX
+ * Bits 16-31 of IVR1 and 0-31 of IVR2 are for RX
+ * Bits 0-31 of IVR3 and 0-15 of IVR4 are for Nearly Empty
+ */
+#define NHI_IVR0 0x38c40
+#define NHI_IVR1 0x38c44
+#define NHI_IVR2 0x38c48
+#define NHI_IVR3 0x38c4c
+#define NHI_IVR4 0x38c50
+#define IVR_TX_OFFSET 0
+#define IVR_RX_OFFSET 12
+#define IVR_NE_OFFSET 24
+
+/* Native Host Interface Control registers */
+#define NHI_HOST_CAPS 0x39640
+#define GET_HOST_CAPS_PATHS(val) ((val) & 0x3f)
+
+/*
+ * This definition comes from the Linux driver. In the USB4 spec, this
+ * register is named Host Interface Control, and the Interrupt Autoclear bit
+ * is at bit17, not bit2. The Linux driver doesn't seem to acknowledge this.
+ */
+#define NHI_DMA_MISC 0x39864
+#define DMA_MISC_INT_AUTOCLEAR (1 << 2)
+
+/* Thunderbolt firmware mailbox registers */
+#define TBT_INMAILDATA 0x39900
+
+#define TBT_INMAILCMD 0x39904
+#define INMAILCMD_CMD_MASK 0xff
+#define INMAILCMD_SAVE_CONNECTED 0x05
+#define INMAILCMD_DISCONNECT_PCIE 0x06
+#define INMAILCMD_DRIVER_UNLOAD_DISCONNECT 0x07
+#define INMAILCMD_DISCONNECT_PORTA 0x10
+#define INMAILCMD_DISCONNECT_PORTB 0x11
+#define INMAILCMD_SETMODE_CERT_TB_1ST_DEPTH 0x20
+#define INMAILCMD_SETMODE_ANY_TB_1ST_DEPTH 0x21
+#define INMAILCMD_SETMODE_CERT_TB_ANY_DEPTH 0x22
+#define INMAILCMD_SETMODE_ANY_TB_ANY_DEPTH 0x23
+#define INMAILCMD_CIO_RESET 0xf0
+#define INMAILCMD_ERROR (1 << 30)
+#define INMAILCMD_OPREQ (1 << 31)
+
+#define TBT_OUTMAILCMD 0x3990c
+#define OUTMAILCMD_STATUS_BUSY (1 << 12)
+#define OUTMAILCMD_OPMODE_MASK 0xf00
+#define OUTMAILCMD_OPMODE_SAFE 0x000
+#define OUTMAILCMD_OPMODE_AUTH 0x100
+#define OUTMAILCMD_OPMODE_ENDPOINT 0x200
+#define OUTMAILCMD_OPMODE_CM_FULL 0x300
+
+#define TBT_FW_STATUS 0x39944
+#define FWSTATUS_ENABLE (1 << 0)
+#define FWSTATUS_INVERT (1 << 1)
+#define FWSTATUS_START (1 << 2)
+#define FWSTATUS_CIO_RESET (1 << 30)
+#define FWSTATUS_CM_READY (1 << 31)
+
+/*
+ * Link Controller (LC) registers. These are in the Vendor Specific
+ * Extended Capability registers in PCICFG.
+ */
+#define AR_LC_MBOX_OUT 0x4c
+#define ICL_LC_MBOX_OUT 0xf0
+#define LC_MBOXOUT_VALID (1 << 0)
+#define LC_MBOXOUT_CMD_SHIFT 1
+#define LC_MBOXOUT_CMD_MASK (0x7f << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_GO2SX (0x02 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_GO2SX_NOWAKE (0x03 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_SXEXIT_TBT (0x04 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_SXEXIT_NOTBT (0x05 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_CMD_OS_UP (0x06 << LC_MBOXOUT_CMD_SHIFT)
+#define LC_MBOXOUT_DATA_SHIFT 8
+#define SET_LC_MBOXOUT_DATA(val) ((val) << LC_MBOXOUT_DATA_SHIFT)
+
+#define AR_LC_MBOX_IN 0x48
+#define ICL_LC_MBOX_IN 0xec
+#define LC_MBOXIN_DONE (1 << 0)
+#define LC_MBOXIN_CMD_SHIFT 1
+#define LC_MBOXIN_CMD_MASK (0x7f << LC_MBOXIN_CMD_SHIFT)
+#define LC_MBOXIN_DATA_SHIFT 8
+#define GET_LC_MBOXIN_DATA(val) ((val) >> LC_MBOXIN_DATA_SHIFT)
+
+/* Other Vendor Specific registers */
+#define AR_VSCAP_1C 0x1c
+#define AR_VSCAP_B0 0xb0
+
+#define ICL_VSCAP_9 0xc8
+#define ICL_VSCAP9_FWREADY (1 << 31)
+#define ICL_VSCAP_10 0xcc
+#define ICL_VSCAP_11 0xd0
+#define ICL_VSCAP_22 0xfc
+#define ICL_VSCAP22_FORCEPWR (1 << 1)
+
+/* * Data structures
+ * Transmit buffer descriptor, 12.3.1. Must be aligned on a 4byte boundary.
+ */
+struct nhi_tx_buffer_desc {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint16_t eof_len;
+#define TX_BUFFER_DESC_LEN_MASK 0xfff
+#define TX_BUFFER_DESC_EOF_SHIFT 12
+ uint8_t flags_sof;
+#define TX_BUFFER_DESC_SOF_MASK 0xf
+#define TX_BUFFER_DESC_IDE (1 << 4) /* Isoch DMA enable */
+#define TX_BUFFER_DESC_DONE (1 << 5) /* Descriptor Done */
+#define TX_BUFFER_DESC_RS (1 << 6) /* Request Status/Done */
+#define TX_BUFFER_DESC_IE (1 << 7) /* Interrupt Enable */
+ uint8_t offset;
+ uint32_t payload_time;
+} __packed;
+
+/*
+ * Receive buffer descriptor, 12.4.1. 4 byte aligned. This goes into
+ * the descriptor ring, but changes into the _post form when the
+ * controller uses it.
+ */
+struct nhi_rx_buffer_desc {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint16_t reserved0;
+ uint8_t flags;
+#define RX_BUFFER_DESC_RS (1 << 6) /* Request Status/Done */
+#define RX_BUFFER_DESC_IE (1 << 7) /* Interrupt Enable */
+ uint8_t offset;
+ uint32_t reserved1;
+} __packed;
+
+/*
+ * Receive buffer descriptor, after the controller fills it in
+ */
+struct nhi_rx_post_desc {
+ uint32_t addr_lo;
+ uint32_t addr_hi;
+ uint16_t eof_len;
+#define RX_BUFFER_DESC_LEN_MASK 0xfff
+#define RX_BUFFER_DESC_EOF_SHIFT 12
+ uint8_t flags_sof;
+#define RX_BUFFER_DESC_SOF_MASK 0xf
+#define RX_BUFFER_DESC_CRC_ERROR (1 << 4) /* CRC error (frame mode) */
+#define RX_BUFFER_DESC_DONE (1 << 5) /* Descriptor Done */
+#define RX_BUFFER_DESC_OVERRUN (1 << 6) /* Buffer overrun */
+#define RX_BUFFER_DESC_IE (1 << 7) /* Interrupt Enable */
+ uint8_t offset;
+ uint32_t payload_time;
+} __packed;
+
+union nhi_ring_desc {
+ struct nhi_tx_buffer_desc tx;
+ struct nhi_rx_buffer_desc rx;
+ struct nhi_rx_post_desc rxpost;
+ uint32_t dword[4];
+};
+
+/* Protocol Defined Field (PDF) */
+#define PDF_READ 0x01
+#define PDF_WRITE 0x02
+#define PDF_NOTIFY 0x03
+#define PDF_NOTIFY_ACK 0x04
+#define PDF_HOTPLUG 0x05
+#define PDF_XDOMAIN_REQ 0x06
+#define PDF_XDOMAIN_RESP 0x07
+/* Thunderbolt-only */
+#define PDF_CM_EVENT 0x0a
+#define PDF_CM_REQ 0x0b
+#define PDF_CM_RESP 0x0c
+
+#endif /* _NHI_REG_H */
diff --git a/sys/dev/thunderbolt/nhi_var.h b/sys/dev/thunderbolt/nhi_var.h
new file mode 100644
index 000000000000..2b9e878af47d
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi_var.h
@@ -0,0 +1,277 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt 3 / Native Host Interface driver variables
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _NHI_VAR
+#define _NHI_VAR
+
+MALLOC_DECLARE(M_NHI);
+
+#define NHI_MSIX_MAX 32
+#define NHI_RING0_TX_DEPTH 16
+#define NHI_RING0_RX_DEPTH 16
+#define NHI_DEFAULT_NUM_RINGS 1
+#define NHI_MAX_NUM_RINGS 32 /* XXX 2? */
+#define NHI_RING0_FRAME_SIZE 256
+#define NHI_MAILBOX_TIMEOUT 15
+
+#define NHI_CMD_TIMEOUT 3 /* 3 seconds */
+
+struct nhi_softc;
+struct nhi_ring_pair;
+struct nhi_intr_tracker;
+struct nhi_cmd_frame;
+struct hcm_softc;
+struct router_softc;
+
+struct nhi_cmd_frame {
+ TAILQ_ENTRY(nhi_cmd_frame) cm_link;
+ uint32_t *data;
+ bus_addr_t data_busaddr;
+ u_int req_len;
+ uint16_t flags;
+#define CMD_MAPPED (1 << 0)
+#define CMD_POLLED (1 << 1)
+#define CMD_REQ_COMPLETE (1 << 2)
+#define CMD_RESP_COMPLETE (1 << 3)
+#define CMD_RESP_OVERRUN (1 << 4)
+ uint16_t retries;
+ uint16_t pdf;
+ uint16_t idx;
+
+ void *context;
+ u_int timeout;
+
+ uint32_t *resp_buffer;
+ u_int resp_len;
+};
+
+#define NHI_RING_NAMELEN 16
+struct nhi_ring_pair {
+ struct nhi_softc *sc;
+
+ union nhi_ring_desc *tx_ring;
+ union nhi_ring_desc *rx_ring;
+
+ uint16_t tx_pi;
+ uint16_t tx_ci;
+ uint16_t rx_pi;
+ uint16_t rx_ci;
+
+ uint16_t rx_pici_reg;
+ uint16_t tx_pici_reg;
+
+ struct nhi_cmd_frame **rx_cmd_ring;
+ struct nhi_cmd_frame **tx_cmd_ring;
+
+ struct mtx mtx;
+ char name[NHI_RING_NAMELEN];
+ struct nhi_intr_tracker *tracker;
+ SLIST_ENTRY(nhi_ring_pair) ring_link;
+
+ TAILQ_HEAD(, nhi_cmd_frame) tx_head;
+ TAILQ_HEAD(, nhi_cmd_frame) rx_head;
+
+ uint16_t tx_ring_depth;
+ uint16_t tx_ring_mask;
+ uint16_t rx_ring_depth;
+ uint16_t rx_ring_mask;
+ uint16_t rx_buffer_size;
+ u_char ring_num;
+
+ bus_dma_tag_t ring_dmat;
+ bus_dmamap_t ring_map;
+ void *ring;
+ bus_addr_t tx_ring_busaddr;
+ bus_addr_t rx_ring_busaddr;
+
+ bus_dma_tag_t frames_dmat;
+ bus_dmamap_t frames_map;
+ void *frames;
+ bus_addr_t tx_frames_busaddr;
+ bus_addr_t rx_frames_busaddr;
+};
+
+/* PDF-indexed array of dispatch routines for interrupts */
+typedef void (nhi_ring_cb_t)(void *, union nhi_ring_desc *,
+ struct nhi_cmd_frame *);
+struct nhi_pdf_dispatch {
+ nhi_ring_cb_t *cb;
+ void *context;
+};
+
+struct nhi_intr_tracker {
+ struct nhi_softc *sc;
+ struct nhi_ring_pair *ring;
+ struct nhi_pdf_dispatch txpdf[16];
+ struct nhi_pdf_dispatch rxpdf[16];
+ u_int vector;
+};
+
+struct nhi_softc {
+ device_t dev;
+ device_t ufp;
+ u_int debug;
+ u_int hwflags;
+#define NHI_TYPE_UNKNOWN 0x00
+#define NHI_TYPE_AR 0x01 /* Alpine Ridge */
+#define NHI_TYPE_TR 0x02 /* Titan Ridge */
+#define NHI_TYPE_ICL 0x03 /* IceLake */
+#define NHI_TYPE_MR 0x04 /* Maple Ridge */
+#define NHI_TYPE_ADL 0x05 /* AlderLake */
+#define NHI_TYPE_USB4 0x0f
+#define NHI_TYPE_MASK 0x0f
+#define NHI_MBOX_BUSY 0x10
+ u_int caps;
+#define NHI_CAP_ICM 0x01
+#define NHI_CAP_HCM 0x02
+#define NHI_USE_ICM(sc) ((sc)->caps & NHI_CAP_ICM)
+#define NHI_USE_HCM(sc) ((sc)->caps & NHI_CAP_HCM)
+ struct hcm_softc *hcm;
+ struct router_softc *root_rsc;
+
+ struct nhi_ring_pair *ring0;
+ struct nhi_intr_tracker *intr_trackers;
+
+ uint16_t path_count;
+ uint16_t max_ring_count;
+
+ struct mtx nhi_mtx;
+ SLIST_HEAD(, nhi_ring_pair) ring_list;
+
+ int msix_count;
+ struct resource *irqs[NHI_MSIX_MAX];
+ void *intrhand[NHI_MSIX_MAX];
+ int irq_rid[NHI_MSIX_MAX];
+ struct resource *irq_pba;
+ int irq_pba_rid;
+ struct resource *irq_table;
+ int irq_table_rid;
+
+ struct resource *regs_resource;
+ bus_space_handle_t regs_bhandle;
+ bus_space_tag_t regs_btag;
+ int regs_rid;
+
+ bus_dma_tag_t parent_dmat;
+
+ bus_dma_tag_t ring0_dmat;
+ bus_dmamap_t ring0_map;
+ void *ring0_frames;
+ bus_addr_t ring0_frames_busaddr;
+ struct nhi_cmd_frame *ring0_cmds;
+
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+
+ struct intr_config_hook ich;
+
+ uint8_t force_hcm;
+#define NHI_FORCE_HCM_DEFAULT 0x00
+#define NHI_FORCE_HCM_ON 0x01
+#define NHI_FORCE_HCM_OFF 0x02
+
+ uint8_t uuid[16];
+ uint8_t lc_uuid[16];
+};
+
+struct nhi_dispatch {
+ uint8_t pdf;
+ nhi_ring_cb_t *cb;
+ void *context;
+};
+
+#define NHI_IS_AR(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_AR)
+#define NHI_IS_TR(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_TR)
+#define NHI_IS_ICL(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_ICL)
+#define NHI_IS_USB4(sc) (((sc)->hwflags & NHI_TYPE_MASK) == NHI_TYPE_USB4)
+
+int nhi_pci_configure_interrupts(struct nhi_softc *sc);
+void nhi_pci_enable_interrupt(struct nhi_ring_pair *r);
+void nhi_pci_disable_interrupts(struct nhi_softc *sc);
+int nhi_pci_get_uuid(struct nhi_softc *sc);
+int nhi_read_lc_mailbox(struct nhi_softc *, u_int reg, uint32_t *val);
+int nhi_write_lc_mailbox(struct nhi_softc *, u_int reg, uint32_t val);
+
+void nhi_get_tunables(struct nhi_softc *);
+int nhi_attach(struct nhi_softc *);
+int nhi_detach(struct nhi_softc *);
+
+struct nhi_cmd_frame * nhi_alloc_tx_frame(struct nhi_ring_pair *);
+void nhi_free_tx_frame(struct nhi_ring_pair *, struct nhi_cmd_frame *);
+
+int nhi_inmail_cmd(struct nhi_softc *, uint32_t, uint32_t);
+int nhi_outmail_cmd(struct nhi_softc *, uint32_t *);
+
+int nhi_tx_schedule(struct nhi_ring_pair *, struct nhi_cmd_frame *);
+int nhi_tx_synchronous(struct nhi_ring_pair *, struct nhi_cmd_frame *);
+void nhi_intr(void *);
+
+int nhi_register_pdf(struct nhi_ring_pair *, struct nhi_dispatch *,
+ struct nhi_dispatch *);
+int nhi_deregister_pdf(struct nhi_ring_pair *, struct nhi_dispatch *,
+ struct nhi_dispatch *);
+
+/* Low level read/write MMIO registers */
+static __inline uint32_t
+nhi_read_reg(struct nhi_softc *sc, u_int offset)
+{
+ return (le32toh(bus_space_read_4(sc->regs_btag, sc->regs_bhandle,
+ offset)));
+}
+
+static __inline void
+nhi_write_reg(struct nhi_softc *sc, u_int offset, uint32_t val)
+{
+ bus_space_write_4(sc->regs_btag, sc->regs_bhandle, offset,
+ htole32(val));
+}
+
+static __inline struct nhi_cmd_frame *
+nhi_alloc_tx_frame_locked(struct nhi_ring_pair *r)
+{
+ struct nhi_cmd_frame *cmd;
+
+ if ((cmd = TAILQ_FIRST(&r->tx_head)) != NULL)
+ TAILQ_REMOVE(&r->tx_head, cmd, cm_link);
+ return (cmd);
+}
+
+static __inline void
+nhi_free_tx_frame_locked(struct nhi_ring_pair *r, struct nhi_cmd_frame *cmd)
+{
+ /* Clear all flags except for MAPPED */
+ cmd->flags &= CMD_MAPPED;
+ cmd->resp_buffer = NULL;
+ TAILQ_INSERT_TAIL(&r->tx_head, cmd, cm_link);
+}
+
+#endif /* _NHI_VAR */
diff --git a/sys/dev/thunderbolt/nhi_wmi.c b/sys/dev/thunderbolt/nhi_wmi.c
new file mode 100644
index 000000000000..3feba3bcd8d1
--- /dev/null
+++ b/sys/dev/thunderbolt/nhi_wmi.c
@@ -0,0 +1,198 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+#include "opt_thunderbolt.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/uio.h>
+#include <sys/proc.h>
+#include <sys/kernel.h>
+#include <sys/bus.h>
+#include <sys/sbuf.h>
+#include <sys/module.h>
+#include <sys/sysctl.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <dev/acpica/acpivar.h>
+#include "acpi_wmi_if.h"
+
+ACPI_MODULE_NAME("THUNDERBOLT-NHI-WMI")
+
+#define ACPI_INTEL_THUNDERBOLT_GUID "86CCFD48-205E-4A77-9C48-2021CBEDE341"
+
+struct nhi_wmi_softc {
+ device_t dev;
+ device_t wmi_dev;
+ u_int state;
+ char *guid;
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+};
+
+ACPI_SERIAL_DECL(nhi_wmi, "Thunderbolt NHI WMI device");
+
+static void nhi_wmi_identify(driver_t *driver, device_t parent);
+static int nhi_wmi_probe(device_t dev);
+static int nhi_wmi_attach(device_t dev);
+static int nhi_wmi_detach(device_t dev);
+static int nhi_wmi_sysctl(SYSCTL_HANDLER_ARGS);
+static int nhi_wmi_evaluate_method(struct nhi_wmi_softc *sc,
+ int method, uint32_t arg0, uint32_t *retval);
+
+static device_method_t nhi_wmi_methods[] = {
+ DEVMETHOD(device_identify, nhi_wmi_identify),
+ DEVMETHOD(device_probe, nhi_wmi_probe),
+ DEVMETHOD(device_attach, nhi_wmi_attach),
+ DEVMETHOD(device_detach, nhi_wmi_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t nhi_wmi_driver = {
+ "nhi_wmi",
+ nhi_wmi_methods,
+ sizeof(struct nhi_wmi_softc)
+};
+
+DRIVER_MODULE(nhi_wmi, acpi_wmi, nhi_wmi_driver,
+ NULL, NULL);
+MODULE_DEPEND(nhi_wmi, acpi_wmi, 1, 1, 1);
+MODULE_DEPEND(nhi_wmi, acpi, 1, 1, 1);
+
+static void
+nhi_wmi_identify(driver_t *driver, device_t parent)
+{
+
+ if (acpi_disabled("nhi_wmi") != 0)
+ return;
+
+ if (device_find_child(parent, "nhi_wmi", -1) != NULL)
+ return;
+
+ if (ACPI_WMI_PROVIDES_GUID_STRING(parent,
+ ACPI_INTEL_THUNDERBOLT_GUID) == 0)
+ return;
+
+ if (BUS_ADD_CHILD(parent, 0, "nhi_wmi", -1) == NULL)
+ device_printf(parent, "failed to add nhi_wmi\n");
+}
+
+static int
+nhi_wmi_probe(device_t dev)
+{
+
+ if (ACPI_WMI_PROVIDES_GUID_STRING(device_get_parent(dev),
+ ACPI_INTEL_THUNDERBOLT_GUID) == 0)
+ return (EINVAL);
+ device_set_desc(dev, "Thunderbolt WMI Endpoint");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+nhi_wmi_attach(device_t dev)
+{
+ struct nhi_wmi_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->wmi_dev = device_get_parent(dev);
+
+ sc->sysctl_ctx = device_get_sysctl_ctx(dev);
+ sc->sysctl_tree = device_get_sysctl_tree(dev);
+ sc->state = 0;
+ sc->guid = ACPI_INTEL_THUNDERBOLT_GUID;
+
+ SYSCTL_ADD_STRING(sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+ OID_AUTO, "GUID", CTLFLAG_RD, sc->guid, 0, "WMI GUID");
+ SYSCTL_ADD_PROC(sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree),
+ OID_AUTO, "force_power", CTLTYPE_INT|CTLFLAG_RW|CTLFLAG_MPSAFE,
+ sc, 0, nhi_wmi_sysctl, "I", "Force controller power on");
+
+ return (0);
+}
+
+static int
+nhi_wmi_detach(device_t dev)
+{
+
+ return (0);
+}
+
+static int
+nhi_wmi_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct nhi_wmi_softc *sc;
+ int error, arg;
+
+ sc = (struct nhi_wmi_softc *)arg1;
+ arg = !!sc->state;
+ error = sysctl_handle_int(oidp, &arg, 0, req);
+ if (!error && req->newptr != NULL) {
+ ACPI_SERIAL_BEGIN(nhi_wmi);
+ error = nhi_wmi_evaluate_method(sc, 1, arg, NULL);
+ ACPI_SERIAL_END(nhi_wmi);
+ if (error == 0)
+ sc->state = arg;
+ }
+ return (error);
+}
+
+static int
+nhi_wmi_evaluate_method(struct nhi_wmi_softc *sc, int method, uint32_t arg0,
+ uint32_t *retval)
+{
+ ACPI_OBJECT *obj;
+ ACPI_BUFFER in, out;
+ uint32_t val, params[1];
+
+ params[0] = arg0;
+ in.Pointer = &params;
+ in.Length = sizeof(params);
+ out.Pointer = NULL;
+ out.Length = ACPI_ALLOCATE_BUFFER;
+
+ if (ACPI_FAILURE(ACPI_WMI_EVALUATE_CALL(sc->wmi_dev,
+ ACPI_INTEL_THUNDERBOLT_GUID, 0, method, &in, &out))) {
+ AcpiOsFree(out.Pointer);
+ return (EINVAL);
+ }
+
+ obj = out.Pointer;
+ if (obj != NULL && obj->Type == ACPI_TYPE_INTEGER)
+ val = (uint32_t)obj->Integer.Value;
+ else
+ val = 0;
+
+ AcpiOsFree(out.Pointer);
+ if (retval)
+ *retval = val;
+
+ return (0);
+}
diff --git a/sys/dev/thunderbolt/router.c b/sys/dev/thunderbolt/router.c
new file mode 100644
index 000000000000..a3b418d77fac
--- /dev/null
+++ b/sys/dev/thunderbolt/router.c
@@ -0,0 +1,939 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* Config space access for switches, ports, and devices in TB3 and USB4 */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+
+static int router_alloc_cmd(struct router_softc *, struct router_command **);
+static void router_free_cmd(struct router_softc *, struct router_command *);
+static int _tb_router_attach(struct router_softc *);
+static void router_prepare_read(struct router_softc *, struct router_command *,
+ int);
+static int _tb_config_read(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *, void *, struct router_command **);
+static int router_schedule(struct router_softc *, struct router_command *);
+static int router_schedule_locked(struct router_softc *,
+ struct router_command *);
+static nhi_ring_cb_t router_complete_intr;
+static nhi_ring_cb_t router_response_intr;
+static nhi_ring_cb_t router_notify_intr;
+
+#define CFG_DEFAULT_RETRIES 3
+#define CFG_DEFAULT_TIMEOUT 2
+
+static int
+router_lookup_device(struct router_softc *sc, tb_route_t route,
+ struct router_softc **dev)
+{
+ struct router_softc *cursor;
+ uint64_t search_rt, remainder_rt, this_rt;
+ uint8_t hop;
+
+ KASSERT(dev != NULL, ("dev cannot be NULL\n"));
+
+ cursor = tb_config_get_root(sc);
+ remainder_rt = search_rt = route.lo | ((uint64_t)route.hi << 32);
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "%s: Searching for router 0x%016jx\n", __func__, search_rt);
+
+ while (cursor != NULL) {
+ this_rt = TB_ROUTE(cursor);
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Comparing cursor route 0x%016jx\n", this_rt);
+ if (this_rt == search_rt)
+ break;
+
+ /* Prepare to go to the next hop node in the route */
+ hop = remainder_rt & 0xff;
+ remainder_rt >>= 8;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "hop= 0x%02x, remainder= 0x%016jx\n", hop, remainder_rt);
+
+ /*
+ * An adapter index of 0x0 is only for the host interface
+ * adapter on the root route. The only time that
+ * it's valid for searches is when you're looking for the
+ * root route, and that case has already been handled.
+ */
+ if (hop == 0) {
+ tb_debug(sc, DBG_ROUTER,
+ "End of route chain, route not found\n");
+ return (ENOENT);
+ }
+
+ if (hop > cursor->max_adap) {
+ tb_debug(sc, DBG_ROUTER,
+ "Route hop out of range for parent\n");
+ return (EINVAL);
+ }
+
+ if (cursor->adapters == NULL) {
+ tb_debug(sc, DBG_ROUTER,
+ "Error, router not fully initialized\n");
+ return (EINVAL);
+ }
+
+ cursor = cursor->adapters[hop];
+ }
+
+ if (cursor == NULL)
+ return (ENOENT);
+
+ *dev = cursor;
+ return (0);
+}
+
+static int
+router_insert(struct router_softc *sc, struct router_softc *parent)
+{
+ uint64_t this_rt;
+ uint8_t this_hop;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_insert called\n");
+
+ if (parent == NULL) {
+ tb_debug(sc, DBG_ROUTER, "Parent cannot be NULL in insert\n");
+ return (EINVAL);
+ }
+
+ this_rt = TB_ROUTE(sc);
+ if (((this_rt >> (sc->depth * 8)) > 0xffULL) ||
+ (parent->depth + 1 != sc->depth)) {
+ tb_debug(sc, DBG_ROUTER, "Added route 0x%08x%08x is not a "
+ "direct child of the parent route 0x%08x%08x\n",
+ sc->route.hi, sc->route.lo, parent->route.hi,
+ parent->route.lo);
+ return (EINVAL);
+ }
+
+ this_hop = (uint8_t)(this_rt >> (sc->depth * 8));
+
+ tb_debug(sc, DBG_ROUTER, "Inserting route 0x%08x%08x with last hop "
+ "of 0x%02x and depth of %d\n", sc->route.hi, sc->route.lo,
+ this_hop, sc->depth);
+
+ if (this_hop > parent->max_adap) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Inserted route is out of range of the parent\n");
+ return (EINVAL);
+ }
+
+ if (parent->adapters[this_hop] != NULL) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Inserted route already exists\n");
+ return (EEXIST);
+ }
+
+ parent->adapters[this_hop] = sc;
+
+ tb_debug(sc, DBG_ROUTER, "Added router 0x%08x%08x to parent "
+ "0x%08x%08x\n", sc->route.hi, sc->route.lo, parent->route.hi,
+ parent->route.lo);
+ return (0);
+}
+
+static int
+router_register_interrupts(struct router_softc *sc)
+{
+ struct nhi_dispatch tx[] = { { PDF_READ, router_complete_intr, sc },
+ { PDF_WRITE, router_complete_intr, sc },
+ { 0, NULL, NULL } };
+ struct nhi_dispatch rx[] = { { PDF_READ, router_response_intr, sc },
+ { PDF_WRITE, router_response_intr, sc },
+ { PDF_NOTIFY, router_notify_intr, sc },
+ { 0, NULL, NULL } };
+
+ return (nhi_register_pdf(sc->ring0, tx, rx));
+}
+
+int
+tb_router_attach(struct router_softc *parent, tb_route_t route)
+{
+ struct router_softc *sc;
+
+ tb_debug(parent, DBG_ROUTER|DBG_EXTRA, "tb_router_attach called\n");
+
+ sc = malloc(sizeof(*sc), M_THUNDERBOLT, M_ZERO|M_NOWAIT);
+ if (sc == NULL) {
+ tb_debug(parent, DBG_ROUTER, "Cannot allocate root router\n");
+ return (ENOMEM);
+ }
+
+ sc->dev = parent->dev;
+ sc->debug = parent->debug;
+ sc->ring0 = parent->ring0;
+ sc->route = route;
+ sc->nsc = parent->nsc;
+
+ mtx_init(&sc->mtx, "tbcfg", "Thunderbolt Router Config", MTX_DEF);
+ TAILQ_INIT(&sc->cmd_queue);
+
+ router_insert(sc, parent);
+
+ return (_tb_router_attach(sc));
+}
+
+int
+tb_router_attach_root(struct nhi_softc *nsc, tb_route_t route)
+{
+ struct router_softc *sc;
+ int error;
+
+ tb_debug(nsc, DBG_ROUTER|DBG_EXTRA, "tb_router_attach_root called\n");
+
+ sc = malloc(sizeof(*sc), M_THUNDERBOLT, M_ZERO|M_NOWAIT);
+ if (sc == NULL) {
+ tb_debug(nsc, DBG_ROUTER, "Cannot allocate root router\n");
+ return (ENOMEM);
+ }
+
+ sc->dev = nsc->dev;
+ sc->debug = nsc->debug;
+ sc->ring0 = nsc->ring0;
+ sc->route = route;
+ sc->nsc = nsc;
+
+ mtx_init(&sc->mtx, "tbcfg", "Thunderbolt Router Config", MTX_DEF);
+ TAILQ_INIT(&sc->cmd_queue);
+
+ /*
+ * This router is semi-virtual and represents the router that's part
+ * of the NHI DMA engine. Commands can't be issued to the topology
+ * until the NHI is initialized and this router is initialized, so
+ * there's no point in registering router interrupts earlier than this,
+ * even if other routers are found first.
+ */
+ tb_config_set_root(sc);
+ error = router_register_interrupts(sc);
+ if (error) {
+ tb_router_detach(sc);
+ return (error);
+ }
+
+ error = _tb_router_attach(sc);
+ if (error)
+ return (error);
+
+ bcopy((uint8_t *)sc->uuid, nsc->uuid, 16);
+ return (0);
+}
+
+static int
+_tb_router_attach(struct router_softc *sc)
+{
+ struct tb_cfg_router *cfg;
+ uint32_t *buf;
+ int error, up;
+
+ buf = malloc(9 * 4, M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (buf == NULL)
+ return (ENOMEM);
+
+ error = tb_config_router_read_polled(sc, 0, 9, buf);
+ if (error != 0) {
+ free(buf, M_THUNDERBOLT);
+ return (error);
+ }
+
+ cfg = (struct tb_cfg_router *)buf;
+ up = GET_ROUTER_CS_UPSTREAM_ADAP(cfg);
+ sc->max_adap = GET_ROUTER_CS_MAX_ADAP(cfg);
+ sc->depth = GET_ROUTER_CS_DEPTH(cfg);
+ sc->uuid[0] = cfg->uuid_lo;
+ sc->uuid[1] = cfg->uuid_hi;
+ sc->uuid[2] = 0xffffffff;
+ sc->uuid[3] = 0xffffffff;
+ tb_debug(sc, DBG_ROUTER, "Router upstream_port= %d, max_port= %d, "
+ "depth= %d\n", up, sc->max_adap, sc->depth);
+ free(buf, M_THUNDERBOLT);
+
+ /* Downstream adapters are indexed in the array allocated here. */
+ sc->max_adap = MIN(sc->max_adap, ROUTER_CS1_MAX_ADAPTERS);
+ sc->adapters = malloc((1 + sc->max_adap) * sizeof(void *),
+ M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (sc->adapters == NULL) {
+ tb_debug(sc, DBG_ROUTER,
+ "Cannot allocate downstream adapter memory\n");
+ return (ENOMEM);
+ }
+
+ tb_debug(sc, DBG_ROUTER, "Router created, route 0x%08x%08x\n",
+ sc->route.hi, sc->route.lo);
+
+ return (0);
+}
+
+int
+tb_router_detach(struct router_softc *sc)
+{
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "tb_router_deattach called\n");
+
+ if (TAILQ_FIRST(&sc->cmd_queue) != NULL)
+ return (EBUSY);
+
+ mtx_destroy(&sc->mtx);
+
+ if (sc->adapters != NULL)
+ free(sc->adapters, M_THUNDERBOLT);
+
+ if (sc != NULL)
+ free(sc, M_THUNDERBOLT);
+
+ return (0);
+}
+
+static void
+router_get_config_cb(struct router_softc *sc, struct router_command *cmd,
+ void *arg)
+{
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_get_config_cb called\n");
+
+ /*
+ * Only do the copy if the command didn't have a notify event thrown.
+ * These events serve as asynchronous exception signals, which is
+ * cumbersome.
+ */
+ if (cmd->ev == 0)
+ bcopy((uint8_t *)cmd->resp_buffer,
+ (uint8_t *)cmd->callback_arg, cmd->dwlen * 4);
+
+ mtx_lock(&sc->mtx);
+ sc->inflight_cmd = NULL;
+
+ if ((cmd->flags & RCMD_POLLED) == 0)
+ wakeup(cmd);
+ else
+ cmd->flags |= RCMD_POLL_COMPLETE;
+
+ router_schedule_locked(sc, NULL);
+ mtx_unlock(&sc->mtx);
+}
+
+int
+tb_config_read(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf)
+{
+ struct router_command *cmd;
+ int error, retries;
+
+ if ((error = _tb_config_read(sc, space, adapter, offset, dwlen, buf,
+ router_get_config_cb, &cmd)) != 0)
+ return (error);
+
+ retries = cmd->retries;
+ mtx_lock(&sc->mtx);
+ while (retries-- >= 0) {
+ error = router_schedule_locked(sc, cmd);
+ if (error)
+ break;
+
+ error = msleep(cmd, &sc->mtx, 0, "tbtcfg", cmd->timeout * hz);
+ if (error != EWOULDBLOCK)
+ break;
+ sc->inflight_cmd = NULL;
+ tb_debug(sc, DBG_ROUTER, "Config command timed out, retries=%d\n", retries);
+ }
+
+ if (cmd->ev != 0)
+ error = EINVAL;
+ router_free_cmd(sc, cmd);
+ mtx_unlock(&sc->mtx);
+ return (error);
+}
+
+int
+tb_config_read_polled(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf)
+{
+ struct router_command *cmd;
+ int error, retries, timeout;
+
+ if ((error = _tb_config_read(sc, space, adapter, offset, dwlen, buf,
+ router_get_config_cb, &cmd)) != 0)
+ return (error);
+
+ retries = cmd->retries;
+ cmd->flags |= RCMD_POLLED;
+ timeout = cmd->timeout * 1000000;
+
+ mtx_lock(&sc->mtx);
+ while (retries-- >= 0) {
+ error = router_schedule_locked(sc, cmd);
+ if (error)
+ break;
+ mtx_unlock(&sc->mtx);
+
+ while (timeout > 0) {
+ DELAY(100 * 1000);
+ if ((cmd->flags & RCMD_POLL_COMPLETE) != 0)
+ break;
+ timeout -= 100000;
+ }
+
+ mtx_lock(&sc->mtx);
+ if ((cmd->flags & RCMD_POLL_COMPLETE) == 0) {
+ error = ETIMEDOUT;
+ sc->inflight_cmd = NULL;
+ tb_debug(sc, DBG_ROUTER, "Config command timed out, retries=%d\n", retries);
+ continue;
+ } else
+ break;
+ }
+
+ if (cmd->ev != 0)
+ error = EINVAL;
+ router_free_cmd(sc, cmd);
+ mtx_unlock(&sc->mtx);
+ return (error);
+}
+
+int
+tb_config_read_async(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf, void *cb)
+{
+ struct router_command *cmd;
+ int error;
+
+ if ((error = _tb_config_read(sc, space, adapter, offset, dwlen, buf,
+ cb, &cmd)) != 0)
+ return (error);
+
+ error = router_schedule(sc, cmd);
+
+ return (error);
+}
+
+static int
+_tb_config_read(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf, void *cb,
+ struct router_command **rcmd)
+{
+ struct router_command *cmd;
+ struct tb_cfg_read *msg;
+ int error;
+
+ if ((error = router_alloc_cmd(sc, &cmd)) != 0)
+ return (error);
+
+ msg = router_get_frame_data(cmd);
+ bzero(msg, sizeof(*msg));
+ msg->route.hi = sc->route.hi;
+ msg->route.lo = sc->route.lo;
+ msg->addr_attrs = TB_CONFIG_ADDR(0, space, adapter, dwlen, offset);
+ cmd->callback = cb;
+ cmd->callback_arg = buf;
+ cmd->dwlen = dwlen;
+ router_prepare_read(sc, cmd, sizeof(*msg));
+
+ if (rcmd != NULL)
+ *rcmd = cmd;
+
+ return (0);
+}
+
+int
+tb_config_write(struct router_softc *sc, u_int space, u_int adapter,
+ u_int offset, u_int dwlen, uint32_t *buf)
+{
+
+ return(0);
+}
+
+static int
+router_alloc_cmd(struct router_softc *sc, struct router_command **rcmd)
+{
+ struct router_command *cmd;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_alloc_cmd\n");
+
+ cmd = malloc(sizeof(*cmd), M_THUNDERBOLT, M_ZERO|M_NOWAIT);
+ if (cmd == NULL) {
+ tb_debug(sc, DBG_ROUTER, "Cannot allocate cmd/response\n");
+ return (ENOMEM);
+ }
+
+ cmd->nhicmd = nhi_alloc_tx_frame(sc->ring0);
+ if (cmd->nhicmd == NULL) {
+ tb_debug(sc, DBG_ROUTER, "Cannot allocate command frame\n");
+ free(cmd, M_THUNDERBOLT);
+ return (EBUSY);
+ }
+
+ cmd->sc = sc;
+ *rcmd = cmd;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "Allocated command with index %d\n",
+ cmd->nhicmd->idx);
+
+ return (0);
+}
+
+static void
+router_free_cmd(struct router_softc *sc, struct router_command *cmd)
+{
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_free_cmd\n");
+
+ if (cmd == NULL)
+ return;
+
+ if (cmd->nhicmd != NULL) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "Freeing nhi command %d\n",
+ cmd->nhicmd->idx);
+ nhi_free_tx_frame(sc->ring0, cmd->nhicmd);
+ }
+ free(cmd, M_THUNDERBOLT);
+
+ return;
+}
+
+static void
+router_prepare_read(struct router_softc *sc, struct router_command *cmd,
+ int len)
+{
+ struct nhi_cmd_frame *nhicmd;
+ uint32_t *msg;
+ int msglen, i;
+
+ KASSERT(cmd != NULL, ("cmd cannot be NULL\n"));
+ KASSERT(len != 0, ("Invalid zero-length command\n"));
+ KASSERT(len % 4 == 0, ("Message must be 32bit padded\n"));
+
+ nhicmd = cmd->nhicmd;
+ msglen = (len - 4) / 4;
+ for (i = 0; i < msglen; i++)
+ nhicmd->data[i] = htobe32(nhicmd->data[i]);
+
+ msg = (uint32_t *)nhicmd->data;
+ msg[msglen] = htobe32(tb_calc_crc(nhicmd->data, len-4));
+
+ nhicmd->pdf = PDF_READ;
+ nhicmd->req_len = len;
+
+ nhicmd->timeout = NHI_CMD_TIMEOUT;
+ nhicmd->retries = 0;
+ nhicmd->resp_buffer = (uint32_t *)cmd->resp_buffer;
+ nhicmd->resp_len = (cmd->dwlen + 3) * 4;
+ nhicmd->context = cmd;
+
+ cmd->retries = CFG_DEFAULT_RETRIES;
+ cmd->timeout = CFG_DEFAULT_TIMEOUT;
+
+ return;
+}
+
+static int
+router_schedule(struct router_softc *sc, struct router_command *cmd)
+{
+ int error;
+
+ mtx_lock(&sc->mtx);
+ error = router_schedule_locked(sc, cmd);
+ mtx_unlock(&sc->mtx);
+
+ return(error);
+}
+
+static int
+router_schedule_locked(struct router_softc *sc, struct router_command *cmd)
+{
+ struct nhi_cmd_frame *nhicmd;
+ int error;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_schedule\n");
+
+ if (cmd != NULL)
+ TAILQ_INSERT_TAIL(&sc->cmd_queue, cmd, link);
+
+ while ((sc->inflight_cmd == NULL) &&
+ ((cmd = TAILQ_FIRST(&sc->cmd_queue)) != NULL)) {
+
+ TAILQ_REMOVE(&sc->cmd_queue, cmd, link);
+ nhicmd = cmd->nhicmd;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Scheduling command with index %d\n", nhicmd->idx);
+ sc->inflight_cmd = cmd;
+ if ((error = nhi_tx_schedule(sc->ring0, nhicmd)) != 0) {
+ tb_debug(sc, DBG_ROUTER, "nhi ring error "
+ "%d\n", error);
+ sc->inflight_cmd = NULL;
+ if (error == EBUSY) {
+ TAILQ_INSERT_HEAD(&sc->cmd_queue, cmd, link);
+ error = 0;
+ }
+ break;
+ }
+ }
+
+ return (error);
+}
+
+static void
+router_complete_intr(void *context, union nhi_ring_desc *ring,
+ struct nhi_cmd_frame *nhicmd)
+{
+ struct router_softc *sc;
+ struct router_command *cmd;
+
+ KASSERT(context != NULL, ("context cannot be NULL\n"));
+ KASSERT(nhicmd != NULL, ("nhicmd cannot be NULL\n"));
+
+ cmd = (struct router_command *)(nhicmd->context);
+ sc = cmd->sc;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_complete_intr called\n");
+
+ if (nhicmd->flags & CMD_RESP_COMPLETE) {
+ cmd->callback(sc, cmd, cmd->callback_arg);
+ }
+
+ return;
+}
+
+static void
+router_response_intr(void *context, union nhi_ring_desc *ring, struct nhi_cmd_frame *nhicmd)
+{
+ struct router_softc *sc, *dev;
+ struct tb_cfg_read_resp *read;
+ struct tb_cfg_write_resp *write;
+ struct router_command *cmd;
+ tb_route_t route;
+ u_int error, i, eof, len;
+ uint32_t attrs;
+
+ KASSERT(context != NULL, ("context cannot be NULL\n"));
+
+ sc = (struct router_softc *)context;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_response_intr called\n");
+
+ eof = ring->rxpost.eof_len >> RX_BUFFER_DESC_EOF_SHIFT;
+
+ if (eof == PDF_WRITE) {
+ write = (struct tb_cfg_write_resp *)nhicmd->data;
+ route.hi = be32toh(write->route.hi);
+ route.lo = be32toh(write->route.lo);
+ } else {
+ read = (struct tb_cfg_read_resp *)nhicmd->data;
+ route.hi = be32toh(read->route.hi);
+ route.lo = be32toh(read->route.lo);
+ attrs = be32toh(read->addr_attrs);
+ len = (attrs & TB_CFG_SIZE_MASK) >> TB_CFG_SIZE_SHIFT;
+ }
+
+ /* XXX Is this a problem? */
+ if ((route.hi & 0x80000000) == 0)
+ tb_debug(sc, DBG_ROUTER, "Invalid route\n");
+ route.hi &= ~0x80000000;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "Looking up route 0x%08x%08x\n",
+ route.hi, route.lo);
+
+ error = router_lookup_device(sc, route, &dev);
+ if (error != 0 || dev == NULL) {
+ tb_debug(sc, DBG_ROUTER, "Cannot find device, error= %d\n",
+ error);
+ return;
+ }
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "Found device %s route 0x%08x%08x, "
+ "inflight_cmd= %p\n", device_get_nameunit(dev->dev), dev->route.hi,
+ dev->route.lo, dev->inflight_cmd);
+
+ cmd = dev->inflight_cmd;
+ if (cmd == NULL) {
+ tb_debug(dev, DBG_ROUTER, "Null inflight cmd\n");
+ return;
+ }
+
+ if (eof == PDF_READ) {
+ for (i = 0; i < len; i++)
+ cmd->nhicmd->resp_buffer[i] = be32toh(read->data[i]);
+ }
+
+ cmd->nhicmd->flags |= CMD_RESP_COMPLETE;
+ if (cmd->nhicmd->flags & CMD_REQ_COMPLETE) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "TX_COMPLETE set\n");
+ cmd->callback(dev, cmd, cmd->callback_arg);
+ }
+
+ return;
+}
+
+static void
+router_notify_intr(void *context, union nhi_ring_desc *ring, struct nhi_cmd_frame *nhicmd)
+{
+ struct router_softc *sc;
+ struct router_command *cmd;
+ struct tb_cfg_notify event;
+ u_int ev, adap;
+
+ KASSERT(context != NULL, ("context cannot be NULL\n"));
+
+ sc = (struct router_softc *)context;
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "router_notify_intr called\n");
+
+ event.route.hi = be32toh(nhicmd->data[0]);
+ event.route.lo = be32toh(nhicmd->data[1]);
+ event.event_adap = be32toh(nhicmd->data[2]);
+
+ ev = GET_NOTIFY_EVENT(&event);
+ adap = GET_NOTIFY_ADAPTER(&event);
+
+ tb_debug(sc, DBG_ROUTER, "Event route 0x%08x%08x adap %d code %s\n",
+ event.route.hi, event.route.lo, adap,
+ tb_get_string(ev, tb_notify_event));
+
+ switch (ev) {
+ case TB_CFG_ERR_CONN:
+ case TB_CFG_ERR_LINK:
+ case TB_CFG_ERR_ADDR:
+ case TB_CFG_ERR_ADP:
+ case TB_CFG_ERR_ENUM:
+ case TB_CFG_ERR_NUA:
+ case TB_CFG_ERR_LEN:
+ case TB_CFG_ERR_HEC:
+ case TB_CFG_ERR_FC:
+ case TB_CFG_ERR_PLUG:
+ case TB_CFG_ERR_LOCK:
+ case TB_CFG_HP_ACK:
+ case TB_CFG_DP_BW:
+ if (sc->inflight_cmd != NULL) {
+ cmd = sc->inflight_cmd;
+ cmd->ev = ev;
+ cmd->callback(sc, cmd, cmd->callback_arg);
+ }
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+int
+tb_config_next_cap(struct router_softc *sc, struct router_cfg_cap *cap)
+{
+ union tb_cfg_cap *tbcap;
+ uint32_t *buf;
+ uint16_t current;
+ int error;
+
+ KASSERT(cap != NULL, ("cap cannot be NULL\n"));
+ KASSERT(cap->next_cap != 0, ("next_cap cannot be 0\n"));
+
+ buf = malloc(sizeof(*tbcap), M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+
+ current = cap->next_cap;
+ error = tb_config_read(sc, cap->space, cap->adap, current, 1, buf);
+ if (error)
+ return (error);
+
+ tbcap = (union tb_cfg_cap *)buf;
+ cap->cap_id = tbcap->hdr.cap_id;
+ cap->next_cap = tbcap->hdr.next_cap;
+ cap->current_cap = current;
+
+ if ((cap->space != TB_CFG_CS_ROUTER) &&
+ (tbcap->hdr.cap_id != TB_CFG_CAP_VSC)) {
+ free(buf, M_THUNDERBOLT);
+ return (0);
+ }
+
+ tb_config_read(sc, cap->space, cap->adap, current, 2, buf);
+ if (error) {
+ free(buf, M_THUNDERBOLT);
+ return (error);
+ }
+
+ cap->vsc_id = tbcap->vsc.vsc_id;
+ cap->vsc_len = tbcap->vsc.len;
+ if (tbcap->vsc.len == 0) {
+ cap->next_cap = tbcap->vsec.vsec_next_cap;
+ cap->vsec_len = tbcap->vsec.vsec_len;
+ }
+
+ free(buf, M_THUNDERBOLT);
+ return (0);
+}
+
+int
+tb_config_find_cap(struct router_softc *sc, struct router_cfg_cap *cap)
+{
+ u_int cap_id, vsc_id;
+ int error;
+
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA, "tb_config_find_cap called\n");
+
+ cap_id = cap->cap_id;
+ vsc_id = cap->vsc_id;
+
+ cap->cap_id = cap->vsc_id = 0;
+ while ((cap->cap_id != cap_id) || (cap->vsc_id != vsc_id)) {
+ tb_debug(sc, DBG_ROUTER|DBG_EXTRA,
+ "Looking for cap %d at offset %d\n", cap->cap_id,
+ cap->next_cap);
+ if ((cap->next_cap == 0) ||
+ (cap->next_cap > TB_CFG_CAP_OFFSET_MAX))
+ return (EINVAL);
+ error = tb_config_next_cap(sc, cap);
+ if (error)
+ break;
+ }
+
+ return (0);
+}
+
+int
+tb_config_find_router_cap(struct router_softc *sc, u_int cap, u_int vsc, u_int *offset)
+{
+ struct router_cfg_cap rcap;
+ struct tb_cfg_router *cfg;
+ uint32_t *buf;
+ int error;
+
+ buf = malloc(8 * 4, M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (buf == NULL)
+ return (ENOMEM);
+
+ error = tb_config_router_read(sc, 0, 5, buf);
+ if (error != 0) {
+ free(buf, M_THUNDERBOLT);
+ return (error);
+ }
+
+ cfg = (struct tb_cfg_router *)buf;
+ rcap.space = TB_CFG_CS_ROUTER;
+ rcap.adap = 0;
+ rcap.next_cap = GET_ROUTER_CS_NEXT_CAP(cfg);
+ rcap.cap_id = cap;
+ rcap.vsc_id = vsc;
+ error = tb_config_find_cap(sc, &rcap);
+ if (error == 0)
+ *offset = rcap.current_cap;
+
+ free(buf, M_THUNDERBOLT);
+ return (error);
+}
+
+int
+tb_config_find_router_vsc(struct router_softc *sc, u_int cap, u_int *offset)
+{
+
+ return (tb_config_find_router_cap(sc, TB_CFG_CAP_VSC, cap, offset));
+}
+
+int
+tb_config_find_router_vsec(struct router_softc *sc, u_int cap, u_int *offset)
+{
+
+ return (tb_config_find_router_cap(sc, TB_CFG_CAP_VSEC, cap, offset));
+}
+
+int
+tb_config_find_adapter_cap(struct router_softc *sc, u_int adap, u_int cap, u_int *offset)
+{
+ struct router_cfg_cap rcap;
+ struct tb_cfg_adapter *cfg;
+ uint32_t *buf;
+ int error;
+
+ buf = malloc(8 * 4, M_THUNDERBOLT, M_NOWAIT|M_ZERO);
+ if (buf == NULL)
+ return (ENOMEM);
+
+ error = tb_config_adapter_read(sc, adap, 0, 8, buf);
+ if (error != 0) {
+ free(buf, M_THUNDERBOLT);
+ return (error);
+ }
+
+ cfg = (struct tb_cfg_adapter *)buf;
+ rcap.space = TB_CFG_CS_ADAPTER;
+ rcap.adap = adap;
+ rcap.next_cap = GET_ADP_CS_NEXT_CAP(cfg);
+ rcap.cap_id = cap;
+ rcap.vsc_id = 0;
+ error = tb_config_find_cap(sc, &rcap);
+ if (error == 0)
+ *offset = rcap.current_cap;
+
+ free(buf, M_THUNDERBOLT);
+ return (error);
+}
+
+int
+tb_config_get_lc_uuid(struct router_softc *rsc, uint8_t *uuid)
+{
+ u_int error, offset;
+ uint32_t buf[8];
+
+ bzero(buf, sizeof(buf));
+
+ error = tb_config_find_router_vsec(rsc, TB_CFG_VSEC_LC, &offset);
+ if (error != 0) {
+ tb_debug(rsc, DBG_ROUTER, "Error finding LC registers: %d\n",
+ error);
+ return (error);
+ }
+
+ error = tb_config_router_read(rsc, offset + TB_LC_UUID, 4, buf);
+ if (error != 0) {
+ tb_debug(rsc, DBG_ROUTER, "Error fetching UUID: %d\n", error);
+ return (error);
+ }
+
+ bcopy(buf, uuid, 16);
+ return (0);
+}
diff --git a/sys/dev/thunderbolt/router_var.h b/sys/dev/thunderbolt/router_var.h
new file mode 100644
index 000000000000..8366ede852e7
--- /dev/null
+++ b/sys/dev/thunderbolt/router_var.h
@@ -0,0 +1,242 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _ROUTER_VAR_H
+#define _ROUTER_VAR_H
+
+struct router_softc;
+struct router_command;
+struct router_topo;
+
+typedef void (*router_callback_t)(struct router_softc *,
+ struct router_command *, void *);
+
+struct router_command {
+ TAILQ_ENTRY(router_command) link;
+ struct router_softc *sc;
+ struct nhi_cmd_frame *nhicmd;
+ u_int flags;
+#define RCMD_POLLED (1 << 0)
+#define RCMD_POLL_COMPLETE (1 << 1)
+ int resp_len;
+ router_callback_t callback;
+ void *callback_arg;
+ u_int dwlen;
+ u_int timeout;
+ int retries;
+ u_int ev;
+ uint8_t resp_buffer[NHI_RING0_FRAME_SIZE];
+};
+
+struct router_softc {
+ TAILQ_ENTRY(router_softc) link;
+ u_int debug;
+ tb_route_t route;
+ device_t dev;
+ struct nhi_softc *nsc;
+
+ struct mtx mtx;
+ struct nhi_ring_pair *ring0;
+ TAILQ_HEAD(,router_command) cmd_queue;
+
+ struct router_command *inflight_cmd;
+
+ uint8_t depth;
+ uint8_t max_adap;
+
+ struct router_softc **adapters;
+
+ uint32_t uuid[4];
+};
+
+struct router_cfg_cap {
+ uint16_t current_cap;
+ uint16_t next_cap;
+ uint32_t space;
+ uint8_t adap;
+ uint8_t cap_id;
+ uint8_t vsc_id;
+ uint8_t vsc_len;
+ uint16_t vsec_len;
+};
+
+int tb_router_attach(struct router_softc *, tb_route_t);
+int tb_router_attach_root(struct nhi_softc *, tb_route_t);
+int tb_router_detach(struct router_softc *);
+int tb_config_read(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *);
+int tb_config_read_polled(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *);
+int tb_config_read_async(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *, void *);
+int tb_config_write(struct router_softc *, u_int, u_int, u_int, u_int,
+ uint32_t *);
+int tb_config_next_cap(struct router_softc *, struct router_cfg_cap *);
+int tb_config_find_cap(struct router_softc *, struct router_cfg_cap *);
+int tb_config_find_router_cap(struct router_softc *, u_int, u_int, u_int *);
+int tb_config_find_router_vsc(struct router_softc *, u_int, u_int *);
+int tb_config_find_router_vsec(struct router_softc *, u_int, u_int *);
+int tb_config_find_adapter_cap(struct router_softc *, u_int, u_int, u_int *);
+int tb_config_get_lc_uuid(struct router_softc *, uint8_t *);
+
+#define TB_CONFIG_ADDR(seq, space, adapter, dwlen, offset) \
+ ((seq << TB_CFG_SEQ_SHIFT) | space | \
+ (adapter << TB_CFG_ADAPTER_SHIFT) | (dwlen << TB_CFG_SIZE_SHIFT) | \
+ (offset & TB_CFG_ADDR_MASK))
+
+#define TB_ROUTE(router) \
+ ((uint64_t)(router)->route.hi << 32) | (router)->route.lo
+
+static __inline void *
+router_get_frame_data(struct router_command *cmd)
+{
+ return ((void *)cmd->nhicmd->data);
+}
+
+/*
+ * Read the Router config space for the router referred to in the softc.
+ * addr - The dword offset in the config space
+ * dwlen - The number of dwords
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_router_read(struct router_softc *sc, u_int addr, u_int dwlen,
+ uint32_t *buf)
+{
+ return (tb_config_read(sc, TB_CFG_CS_ROUTER, 0, addr, dwlen, buf));
+}
+
+static __inline int
+tb_config_router_read_polled(struct router_softc *sc, u_int addr, u_int dwlen,
+ uint32_t *buf)
+{
+ return (tb_config_read_polled(sc, TB_CFG_CS_ROUTER, 0, addr, dwlen, buf));
+}
+
+/*
+ * Write the Router config space for the router referred to in the softc.
+ * addr - The dword offset in the config space
+ * dwlen - The number of dwords
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_router_write(struct router_softc *sc, u_int addr, u_int dwlen,
+ uint32_t *buf)
+{
+ return (tb_config_write(sc, TB_CFG_CS_ROUTER, 0, addr, dwlen, buf));
+}
+
+/*
+ * Read the Adapter config space for the router referred to in the softc.
+ * adap - Adapter number
+ * addr - The dword offset in the config space
+ * dwlen - The number of dwords
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_adapter_read(struct router_softc *sc, u_int adap, u_int addr,
+ u_int dwlen, uint32_t *buf)
+{
+ return (tb_config_read(sc, TB_CFG_CS_ADAPTER, adap, addr, dwlen, buf));
+}
+
+/*
+ * Read the Adapter config space for the router referred to in the softc.
+ * adap - Adapter number
+ * addr - The dword offset in the config space
+ * dwlen - The number of dwords
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_adapter_write(struct router_softc *sc, u_int adap, u_int addr,
+ u_int dwlen, uint32_t *buf)
+{
+ return (tb_config_write(sc, TB_CFG_CS_ADAPTER, adap, addr, dwlen, buf));
+}
+
+/*
+ * Read the Path config space for the router referred to in the softc.
+ * adap - Adapter number
+ * hopid - HopID of the path
+ * len - The number of adjacent paths
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_path_read(struct router_softc *sc, u_int adap, u_int hopid,
+ u_int num, uint32_t *buf)
+{
+ return (tb_config_read(sc, TB_CFG_CS_PATH, adap, hopid * 2,
+ num * 2, buf));
+}
+
+/*
+ * Write the Path config space for the router referred to in the softc.
+ * adap - Adapter number
+ * hopid - HopID of the path
+ * len - The number of adjacent paths
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_path_write(struct router_softc *sc, u_int adap, u_int hopid,
+ u_int num, uint32_t *buf)
+{
+ return (tb_config_write(sc, TB_CFG_CS_PATH, adap, hopid * 2,
+ num * 2, buf));
+}
+
+/*
+ * Read the Counters config space for the router referred to in the softc.
+ * Counters come in sets of 3 dwords.
+ * adap - Adapter number
+ * set - The counter set index
+ * num - The number of adjacent counter sets to read
+ * buf - must be large enough to hold the number of dwords requested.
+ */
+static __inline int
+tb_config_counters_read(struct router_softc *sc, u_int adap, u_int set,
+ u_int num, uint32_t *buf)
+{
+ return (tb_config_read(sc, TB_CFG_CS_COUNTERS, adap, set * 3,
+ num * 3, buf));
+}
+
+static __inline void
+tb_config_set_root(struct router_softc *sc)
+{
+ sc->nsc->root_rsc = sc;
+}
+
+static __inline void *
+tb_config_get_root(struct router_softc *sc)
+{
+ return (sc->nsc->root_rsc);
+}
+
+#endif /* _ROUTER_VAR_H */
diff --git a/sys/dev/thunderbolt/tb_acpi_pcib.c b/sys/dev/thunderbolt/tb_acpi_pcib.c
new file mode 100644
index 000000000000..947df3688535
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_acpi_pcib.c
@@ -0,0 +1,181 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+#include "opt_thunderbolt.h"
+
+/* ACPI identified PCIe bridge for Thunderbolt */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/param.h>
+#include <sys/endian.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+#include <sys/rman.h>
+
+#include <machine/pci_cfgreg.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcib_private.h>
+#include <dev/pci/pci_private.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpi_pcibvar.h>
+#include <machine/md_var.h>
+
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_pcib.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/tb_debug.h>
+
+static int tb_acpi_pcib_probe(device_t);
+static int tb_acpi_pcib_attach(device_t);
+static int tb_acpi_pcib_detach(device_t);
+
+/* ACPI attachment for Thudnerbolt Bridges */
+
+static int
+tb_acpi_pcib_probe(device_t dev)
+{
+ char desc[TB_DESC_MAX], desc1[TB_DESC_MAX];
+ int val;
+
+ if (pci_get_class(dev) != PCIC_BRIDGE ||
+ pci_get_subclass(dev) != PCIS_BRIDGE_PCI ||
+ acpi_disabled("pci"))
+ return (ENXIO);
+ if (acpi_get_handle(dev) == NULL)
+ return (ENXIO);
+ if (pci_cfgregopen() == 0)
+ return (ENXIO);
+
+ /*
+ * Success? Specify a higher probe priority than the conventional
+ * Thunderbolt PCIb driver
+ */
+ if ((val = tb_pcib_probe_common(dev, desc)) < 0) {
+ val++;
+ snprintf(desc1, TB_DESC_MAX, "ACPI %s", desc);
+ device_set_desc_copy(dev, desc1);
+ }
+
+ return (val);
+}
+
+static int
+tb_acpi_pcib_attach(device_t dev)
+{
+ struct tb_pcib_softc *sc;
+ int error;
+
+ error = tb_pcib_attach_common(dev);
+ if (error)
+ return (error);
+
+ sc = device_get_softc(dev);
+ sc->ap_handle = acpi_get_handle(dev);
+ KASSERT(sc->ap_handle != NULL, ("ACPI handle cannot be NULL\n"));
+
+ /* Execute OSUP in case the BIOS didn't */
+ if (TB_IS_ROOT(sc)) {
+ ACPI_OBJECT_LIST list;
+ ACPI_OBJECT arg;
+ ACPI_BUFFER buf;
+ ACPI_STATUS s;
+
+ tb_debug(sc, DBG_BRIDGE, "Executing OSUP\n");
+
+ list.Pointer = &arg;
+ list.Count = 1;
+ arg.Integer.Value = 0;
+ arg.Type = ACPI_TYPE_INTEGER;
+ buf.Length = ACPI_ALLOCATE_BUFFER;
+ buf.Pointer = NULL;
+
+ s = AcpiEvaluateObject(sc->ap_handle, "\\_GPE.OSUP", &list,
+ &buf);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL,
+ "ACPI returned %d, buf= %p\n", s, buf.Pointer);
+ if (buf.Pointer != NULL)
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "buffer= 0x%x\n",
+ *(uint32_t *)buf.Pointer);
+
+ AcpiOsFree(buf.Pointer);
+ }
+
+ pcib_attach_common(dev);
+ acpi_pcib_fetch_prt(dev, &sc->ap_prt);
+
+ return (pcib_attach_child(dev));
+}
+
+static int
+tb_acpi_pcib_detach(device_t dev)
+{
+ struct tb_pcib_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+ tb_debug(sc, DBG_BRIDGE|DBG_ROUTER|DBG_EXTRA, "tb_acpi_pcib_detach\n");
+
+ error = pcib_detach(dev);
+ if (error == 0)
+ AcpiOsFree(sc->ap_prt.Pointer);
+ return (error);
+}
+
+static device_method_t tb_acpi_pcib_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, tb_acpi_pcib_probe),
+ DEVMETHOD(device_attach, tb_acpi_pcib_attach),
+ DEVMETHOD(device_detach, tb_acpi_pcib_detach),
+
+ /* Thunderbolt interface is inherited */
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_2(tbolt, tb_acpi_pcib_driver, tb_acpi_pcib_methods,
+ sizeof(struct tb_pcib_softc), pcib_driver, tb_pcib_driver);
+DRIVER_MODULE_ORDERED(tb_acpi_pcib, pci, tb_acpi_pcib_driver,
+ NULL, NULL, SI_ORDER_MIDDLE);
+MODULE_DEPEND(tb_acpi_pcib, acpi, 1, 1, 1);
diff --git a/sys/dev/thunderbolt/tb_debug.c b/sys/dev/thunderbolt/tb_debug.c
new file mode 100644
index 000000000000..f455ee72e9f6
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_debug.c
@@ -0,0 +1,334 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* PCIe bridge for Thunderbolt */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sbuf.h>
+#include <sys/sysctl.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/tb_debug.h>
+
+tb_string_t nhi_outmailcmd_opmode[] = {
+ { 0x000, "Safe Mode" },
+ { 0x100, "Authentication Mode" },
+ { 0x200, "Endpoint Mode" },
+ { 0x300, "Connection Manager Fully Functional" },
+ { 0, NULL }
+};
+
+tb_string_t nhi_frame_pdf[] = {
+ { 0x01, "PDF_READ" },
+ { 0x02, "PDF_WRITE" },
+ { 0x03, "PDF_NOTIFY" },
+ { 0x04, "PDF_NOTIFY_ACK" },
+ { 0x05, "PDF_HOTPLUG" },
+ { 0x06, "PDF_XDOMAIN_REQ" },
+ { 0x07, "PDF_XDOMAIN_RESP" },
+ { 0x0a, "PDF_CM_EVENT" },
+ { 0x0b, "PDF_CM_REQ" },
+ { 0x0c, "PDF_CM_RESP" },
+ { 0, NULL }
+};
+
+tb_string_t tb_security_level[] = {
+ { TBSEC_NONE, "None" },
+ { TBSEC_USER, "User" },
+ { TBSEC_SECURE, "Secure Authorization" },
+ { TBSEC_DP, "Display Port" },
+ { TBSEC_UNKNOWN,"Unknown" },
+ { 0, NULL }
+};
+
+tb_string_t tb_mbox_connmode[] = {
+ { INMAILCMD_SETMODE_CERT_TB_1ST_DEPTH, "Certified/1st" },
+ { INMAILCMD_SETMODE_ANY_TB_1ST_DEPTH, "Any/1st" },
+ { INMAILCMD_SETMODE_CERT_TB_ANY_DEPTH, "Certified/Any" },
+ { INMAILCMD_SETMODE_ANY_TB_ANY_DEPTH, "Any/Any" },
+ { 0, NULL }
+};
+
+tb_string_t tb_device_power[] = {
+ { 0x0, "Self-powered" },
+ { 0x1, "Normal power" },
+ { 0x2, "High power" },
+ { 0x3, "Unknown power draw" },
+ { 0, NULL }
+};
+
+tb_string_t tb_notify_code[] = {
+ { 0x03, "DEVCONN" },
+ { 0x04, "DISCONN" },
+ { 0x05, "DPCONN" },
+ { 0x06, "DOMCONN" },
+ { 0x07, "DOMDISCONN" },
+ { 0x08, "DPCHANGE" },
+ { 0x09, "I2C" },
+ { 0x0a, "RTD3" },
+ { 0, NULL }
+};
+
+tb_string_t tb_adapter_type[] = {
+ { ADP_CS2_UNSUPPORTED, "Unsupported Adapter" },
+ { ADP_CS2_LANE, "Lane Adapter" },
+ { ADP_CS2_HOSTIF, "Host Interface Adapter" },
+ { ADP_CS2_PCIE_DFP, "Downstream PCIe Adapter" },
+ { ADP_CS2_PCIE_UFP, "Upstream PCIe Adapter" },
+ { ADP_CS2_DP_OUT, "DP OUT Adapter" },
+ { ADP_CS2_DP_IN, "DP IN Adapter" },
+ { ADP_CS2_USB3_DFP, "Downstream USB3 Adapter" },
+ { ADP_CS2_USB3_UFP, "Upstream USB3 Adapter" },
+ { 0, NULL }
+};
+
+tb_string_t tb_adapter_state[] = {
+ { CAP_LANE_STATE_DISABLE, "Disabled" },
+ { CAP_LANE_STATE_TRAINING, "Training" },
+ { CAP_LANE_STATE_CL0, "CL0" },
+ { CAP_LANE_STATE_TXCL0, "TX CL0s" },
+ { CAP_LANE_STATE_RXCL0, "RX CL0s" },
+ { CAP_LANE_STATE_CL1, "CL1" },
+ { CAP_LANE_STATE_CL2, "CL2" },
+ { CAP_LANE_STATE_CLD, "CLd" },
+ { 0, NULL }
+};
+
+tb_string_t tb_notify_event[] = {
+ { TB_CFG_ERR_CONN, "Connection error" },
+ { TB_CFG_ERR_LINK, "Link error" },
+ { TB_CFG_ERR_ADDR, "Addressing error" },
+ { TB_CFG_ERR_ADP, "Invalid adapter" },
+ { TB_CFG_ERR_ENUM, "Enumeration error" },
+ { TB_CFG_ERR_NUA, "Adapter not enumerated" },
+ { TB_CFG_ERR_LEN, "Invalid request length" },
+ { TB_CFG_ERR_HEC, "Invalid packet header" },
+ { TB_CFG_ERR_FC, "Flow control error" },
+ { TB_CFG_ERR_PLUG, "Hot plug error" },
+ { TB_CFG_ERR_LOCK, "Adapter locked" },
+ { TB_CFG_HP_ACK, "Hotplug acknowledgement" },
+ { TB_CFG_DP_BW, "Display port bandwidth change" },
+ { 0, NULL }
+};
+
+const char *
+tb_get_string(uintmax_t key, tb_string_t *table)
+{
+
+ if (table == NULL)
+ return ("<null>");
+
+ while (table->value != NULL) {
+ if (table->key == key)
+ return (table->value);
+ table++;
+ }
+
+ return ("<unknown>");
+}
+
+static struct tb_debug_string {
+ char *name;
+ int flag;
+} tb_debug_strings[] = {
+ {"info", DBG_INFO},
+ {"init", DBG_INIT},
+ {"info", DBG_INFO},
+ {"rxq", DBG_RXQ},
+ {"txq", DBG_TXQ},
+ {"intr", DBG_INTR},
+ {"tb", DBG_TB},
+ {"mbox", DBG_MBOX},
+ {"bridge", DBG_BRIDGE},
+ {"cfg", DBG_CFG},
+ {"router", DBG_ROUTER},
+ {"port", DBG_PORT},
+ {"hcm", DBG_HCM},
+ {"extra", DBG_EXTRA},
+ {"noisy", DBG_NOISY},
+ {"full", DBG_FULL}
+};
+
+enum tb_debug_level_combiner {
+ COMB_NONE,
+ COMB_ADD,
+ COMB_SUB
+};
+
+int
+tb_debug_sysctl(SYSCTL_HANDLER_ARGS)
+{
+ struct sbuf *sbuf;
+#if defined (THUNDERBOLT_DEBUG) && (THUNDERBOLT_DEBUG > 0)
+ struct tb_debug_string *string;
+ char *buffer;
+ size_t sz;
+ u_int *debug;
+ int i, len;
+#endif
+ int error;
+
+ error = sysctl_wire_old_buffer(req, 0);
+ if (error != 0)
+ return (error);
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+
+#if defined (THUNDERBOLT_DEBUG) && (THUNDERBOLT_DEBUG > 0)
+ debug = (u_int *)arg1;
+
+ sbuf_printf(sbuf, "%#x", *debug);
+
+ sz = sizeof(tb_debug_strings) / sizeof(tb_debug_strings[0]);
+ for (i = 0; i < sz; i++) {
+ string = &tb_debug_strings[i];
+ if (*debug & string->flag)
+ sbuf_printf(sbuf, ",%s", string->name);
+ }
+
+ error = sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ if (error || req->newptr == NULL)
+ return (error);
+
+ len = req->newlen - req->newidx;
+ if (len == 0)
+ return (0);
+
+ buffer = malloc(len, M_THUNDERBOLT, M_ZERO|M_WAITOK);
+ error = SYSCTL_IN(req, buffer, len);
+
+ tb_parse_debug(debug, buffer);
+
+ free(buffer, M_THUNDERBOLT);
+#else
+ sbuf_printf(sbuf, "debugging unavailable");
+ error = sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+#endif
+
+ return (error);
+}
+
+void
+tb_parse_debug(u_int *debug, char *list)
+{
+ struct tb_debug_string *string;
+ enum tb_debug_level_combiner op;
+ char *token, *endtoken;
+ size_t sz;
+ int flags, i;
+
+ if (list == NULL || *list == '\0')
+ return;
+
+ if (*list == '+') {
+ op = COMB_ADD;
+ list++;
+ } else if (*list == '-') {
+ op = COMB_SUB;
+ list++;
+ } else
+ op = COMB_NONE;
+ if (*list == '\0')
+ return;
+
+ flags = 0;
+ sz = sizeof(tb_debug_strings) / sizeof(tb_debug_strings[0]);
+ while ((token = strsep(&list, ":,")) != NULL) {
+
+ /* Handle integer flags */
+ flags |= strtol(token, &endtoken, 0);
+ if (token != endtoken)
+ continue;
+
+ /* Handle text flags */
+ for (i = 0; i < sz; i++) {
+ string = &tb_debug_strings[i];
+ if (strcasecmp(token, string->name) == 0) {
+ flags |= string->flag;
+ break;
+ }
+ }
+ }
+
+ switch (op) {
+ case COMB_NONE:
+ *debug = flags;
+ break;
+ case COMB_ADD:
+ *debug |= flags;
+ break;
+ case COMB_SUB:
+ *debug &= (~flags);
+ break;
+ }
+ return;
+}
+
+void
+tbdbg_dprintf(device_t dev, u_int debug, u_int val, const char *fmt, ...)
+{
+#if defined(THUNDERBOLT_DEBUG) && (THUNDERBOLT_DEBUG > 0)
+ va_list ap;
+ u_int lvl, dbg;
+
+ lvl = debug & 0xc0000000;
+ dbg = debug & 0x3fffffff;
+ va_start(ap, fmt);
+ if ((lvl >= (val & 0xc0000000)) &&
+ ((dbg & (val & 0x3fffffff)) != 0)) {
+ device_printf(dev, "");
+ vprintf(fmt, ap);
+ }
+ va_end(ap);
+#endif
+}
diff --git a/sys/dev/thunderbolt/tb_debug.h b/sys/dev/thunderbolt/tb_debug.h
new file mode 100644
index 000000000000..4f5584420882
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_debug.h
@@ -0,0 +1,93 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt 3 driver debug strings
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_DEBUG_H
+#define _TB_DEBUG_H
+
+typedef struct {
+ uintmax_t key;
+ const char * value;
+} tb_string_t;
+
+const char * tb_get_string(uintmax_t, tb_string_t *);
+int tb_debug_sysctl(SYSCTL_HANDLER_ARGS);
+void tb_parse_debug(u_int *, char *);
+
+extern tb_string_t nhi_outmailcmd_opmode[];
+extern tb_string_t nhi_frame_pdf[];
+extern tb_string_t tb_security_level[];
+extern tb_string_t tb_rdy_connmode[];
+extern tb_string_t tb_mbox_connmode[];
+extern tb_string_t tb_device_power[];
+extern tb_string_t tb_notify_code[];
+extern tb_string_t tb_adapter_type[];
+extern tb_string_t tb_adapter_state[];
+extern tb_string_t tb_notify_event[];
+
+enum {
+ /* Debug subsystems */
+ DBG_NONE = 0,
+ DBG_INIT = (1 << 0),
+ DBG_INFO = (1 << 1),
+ DBG_RXQ = (1 << 2),
+ DBG_TXQ = (1 << 3),
+ DBG_INTR = (1 << 4),
+ DBG_TB = (1 << 5),
+ DBG_MBOX = (1 << 6),
+ DBG_BRIDGE = (1 << 7),
+ DBG_CFG = (1 << 8),
+ DBG_ROUTER = (1 << 9),
+ DBG_PORT = (1 << 10),
+ DBG_HCM = (1 << 11),
+ /* Debug levels */
+ DBG_EXTRA = (1 << 30),
+ DBG_NOISY = (1 << 31),
+ DBG_FULL = DBG_EXTRA | DBG_NOISY
+};
+
+/*
+ * Macros to wrap printing.
+ * Each softc type needs a `dev` and `debug` field. Do tbdbg_printf as a
+ * function to make format errors more clear during compile.
+ */
+void tbdbg_dprintf(device_t dev, u_int debug, u_int val, const char *fmt, ...) __printflike(4, 5);
+
+#if defined(THUNDERBOLT_DEBUG) && (THUNDERBOLT_DEBUG > 0)
+#define tb_debug(sc, level, fmt...) \
+ tbdbg_dprintf((sc)->dev, (sc)->debug, level, ##fmt)
+#else
+#define tb_debug(sc, level, fmt...)
+#endif
+#define tb_printf(sc, fmt...) \
+ device_printf((sc)->dev, ##fmt)
+
+#endif /* _TB_DEBUG_H */
diff --git a/sys/dev/thunderbolt/tb_dev.c b/sys/dev/thunderbolt/tb_dev.c
new file mode 100644
index 000000000000..7ea545dee0c3
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_dev.c
@@ -0,0 +1,331 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_thunderbolt.h"
+
+/* Userspace control device for USB4 / TB3 */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/queue.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/nv.h>
+#include <sys/taskqueue.h>
+#include <sys/gsb_crc32.h>
+#include <sys/endian.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/stdarg.h>
+
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_var.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/router_var.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include <dev/thunderbolt/tb_dev.h>
+#include <dev/thunderbolt/tb_ioctl.h>
+
+struct tbdev_if;
+struct tbdev_dm;
+struct tbdev_rt;
+
+struct tbdev_if {
+ TAILQ_ENTRY(tbdev_if) dev_next;
+ char name[SPECNAMELEN];
+};
+
+struct tbdev_dm {
+ TAILQ_ENTRY(tbdev_dm) dev_next;
+ char uid[16];
+};
+
+struct tbdev_rt {
+ TAILQ_ENTRY(tbdev_rt) dev_next;
+ uint64_t route;
+};
+
+static int tbdev_static_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td);
+
+static struct cdevsw tbdev_static_devsw = {
+ .d_version = D_VERSION,
+ .d_ioctl = tbdev_static_ioctl,
+ .d_name = "tbt"
+};
+static struct cdev *tb_dev = NULL;
+
+static TAILQ_HEAD(, tbdev_if) tbdev_head = TAILQ_HEAD_INITIALIZER(tbdev_head);
+static TAILQ_HEAD(, tbdev_dm) tbdomain_head = TAILQ_HEAD_INITIALIZER(tbdomain_head);
+static TAILQ_HEAD(, tbdev_rt) tbrouter_head = TAILQ_HEAD_INITIALIZER(tbrouter_head);
+
+static struct mtx tbdev_mtx;
+MTX_SYSINIT(tbdev_mtx, &tbdev_mtx, "TBT Device Mutex", MTX_DEF);
+
+MALLOC_DEFINE(M_THUNDERBOLT, "thunderbolt", "memory for thunderbolt");
+
+static void
+tbdev_init(void *arg)
+{
+
+ tb_dev = make_dev(&tbdev_static_devsw, 0, UID_ROOT, GID_OPERATOR,
+ 0644, TBT_DEVICE_NAME);
+ if (tb_dev == NULL)
+ printf("Cannot create Thunderbolt system device\n");
+
+ return;
+}
+
+SYSINIT(tbdev_init, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, tbdev_init, NULL);
+
+static void
+tbdev_uninit(void *arg)
+{
+ if (tb_dev != NULL) {
+ destroy_dev(tb_dev);
+ tb_dev = NULL;
+ }
+}
+
+SYSUNINIT(tbdev_uninit, SI_SUB_KICK_SCHEDULER, SI_ORDER_ANY, tbdev_uninit, NULL);
+
+int
+tbdev_add_interface(struct nhi_softc *nhi)
+{
+ struct tbdev_if *ifce;
+
+ ifce = malloc(sizeof(struct tbdev_if), M_THUNDERBOLT, M_ZERO|M_NOWAIT);
+ if (ifce == NULL)
+ return (ENOMEM);
+
+ strlcpy(ifce->name, device_get_nameunit(nhi->dev), SPECNAMELEN);
+ mtx_lock(&tbdev_mtx);
+ TAILQ_INSERT_TAIL(&tbdev_head, ifce, dev_next);
+ mtx_unlock(&tbdev_mtx);
+
+ return (0);
+}
+
+int
+tbdev_remove_interface(struct nhi_softc *nhi)
+{
+ struct tbdev_if *ifce = NULL, *if_back;
+ const char *name;
+
+ name = device_get_nameunit(nhi->dev);
+ mtx_lock(&tbdev_mtx);
+ TAILQ_FOREACH_SAFE(ifce, &tbdev_head, dev_next, if_back) {
+ if (strncmp(name, ifce->name, SPECNAMELEN) == 0) {
+ TAILQ_REMOVE(&tbdev_head, ifce, dev_next);
+ break;
+ }
+ }
+ mtx_unlock(&tbdev_mtx);
+
+ if (ifce != NULL)
+ free(ifce, M_THUNDERBOLT);
+
+ return (0);
+}
+
+int
+tbdev_add_domain(void *domain)
+{
+
+ return (0);
+}
+
+int
+tbdev_remove_domain(void *domain)
+{
+
+ return (0);
+}
+
+int
+tbdev_add_router(struct router_softc *rt)
+{
+
+ return (0);
+}
+
+int
+tbdev_remove_router(struct router_softc *rt)
+{
+
+ return (0);
+}
+
+static int
+tbdev_discover(caddr_t addr)
+{
+ nvlist_t *nvl = NULL;
+ struct tbt_ioc *ioc = (struct tbt_ioc *)addr;
+ struct tbdev_if *dev;
+ struct tbdev_dm *dm;
+ struct tbdev_rt *rt;
+ void *nvlpacked = NULL;
+ const char *cmd = NULL;
+ int error = 0;
+
+ if ((ioc->data == NULL) || (ioc->size == 0)) {
+ printf("data or size is 0\n");
+ return (EINVAL);
+ }
+
+ if ((ioc->len == 0) || (ioc->len > TBT_IOCMAXLEN) ||
+ (ioc->len > ioc->size)) {
+ printf("len is wrong\n");
+ return (EINVAL);
+ }
+
+ nvlpacked = malloc(ioc->len, M_THUNDERBOLT, M_NOWAIT);
+ if (nvlpacked == NULL) {
+ printf("cannot allocate nvlpacked\n");
+ return (ENOMEM);
+ }
+
+ error = copyin(ioc->data, nvlpacked, ioc->len);
+ if (error) {
+ free(nvlpacked, M_THUNDERBOLT);
+ printf("error %d from copyin\n", error);
+ return (error);
+ }
+
+ nvl = nvlist_unpack(nvlpacked, ioc->len, NV_FLAG_NO_UNIQUE);
+ if (nvl == NULL) {
+ free(nvlpacked, M_THUNDERBOLT);
+ printf("cannot unpack nvlist\n");
+ return (EINVAL);
+ }
+ free(nvlpacked, M_THUNDERBOLT);
+ nvlpacked = NULL;
+
+ if (nvlist_exists_string(nvl, TBT_DISCOVER_TYPE))
+ cmd = nvlist_get_string(nvl, TBT_DISCOVER_TYPE);
+ if (cmd == NULL) {
+ printf("cannot find type string\n");
+ error = EINVAL;
+ goto out;
+ }
+
+ mtx_lock(&tbdev_mtx);
+ if (strncmp(cmd, TBT_DISCOVER_IFACE, TBT_NAMLEN) == 0) {
+ TAILQ_FOREACH(dev, &tbdev_head, dev_next)
+ nvlist_add_string(nvl, TBT_DISCOVER_IFACE, dev->name);
+ } else if (strncmp(cmd, TBT_DISCOVER_DOMAIN, TBT_NAMLEN) == 0) {
+ TAILQ_FOREACH(dm, &tbdomain_head, dev_next)
+ nvlist_add_string(nvl, TBT_DISCOVER_DOMAIN, dm->uid);
+ } else if (strncmp(cmd, TBT_DISCOVER_ROUTER, TBT_NAMLEN) == 0) {
+ TAILQ_FOREACH(rt, &tbrouter_head, dev_next)
+ nvlist_add_number(nvl, TBT_DISCOVER_ROUTER, rt->route);
+ } else {
+ printf("cannot find supported tpye\n");
+ error = EINVAL;
+ goto out;
+ }
+ mtx_unlock(&tbdev_mtx);
+
+ error = nvlist_error(nvl);
+ if (error != 0) {
+ printf("error %d state in nvlist\n", error);
+ return (error);
+ }
+
+ nvlpacked = nvlist_pack(nvl, &ioc->len);
+ if (nvlpacked == NULL) {
+ printf("cannot allocate new packed buffer\n");
+ return (ENOMEM);
+ }
+ if (ioc->size < ioc->len) {
+ printf("packed buffer is too big to copyout\n");
+ return (ENOSPC);
+ }
+
+ error = copyout(nvlpacked, ioc->data, ioc->len);
+ if (error)
+ printf("error %d on copyout\n", error);
+
+out:
+ if (nvlpacked != NULL)
+ free(nvlpacked, M_NVLIST);
+ if (nvl != NULL)
+ nvlist_destroy(nvl);
+
+ return (error);
+}
+
+static int
+tbdev_request(caddr_t addr)
+{
+ struct tbt_ioc *ioc = (struct tbt_ioc *)addr;
+ nvlist_t *nvl = NULL;
+ void *nvlpacked = NULL;
+ int error = 0;
+
+ if ((ioc->data == NULL) || (ioc->size == 0))
+ return (ENOMEM);
+
+ nvlpacked = nvlist_pack(nvl, &ioc->len);
+ if (nvlpacked == NULL)
+ return (ENOMEM);
+ if (ioc->size < ioc->len)
+ return (ENOSPC);
+
+ error = copyout(nvlpacked, ioc->data, ioc->len);
+ return (error);
+}
+
+static int
+tbdev_static_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
+ struct thread *td)
+{
+ int error = 0;
+
+ switch (cmd) {
+ case TBT_DISCOVER:
+ error = tbdev_discover(addr);
+ break;
+ case TBT_REQUEST:
+ error = tbdev_request(addr);
+ break;
+ default:
+ error = EINVAL;
+ }
+
+ return (error);
+}
diff --git a/sys/dev/sound/version.h b/sys/dev/thunderbolt/tb_dev.h
index b816bbf964e8..c40a7fbc3d5a 100644
--- a/sys/dev/sound/version.h
+++ b/sys/dev/thunderbolt/tb_dev.h
@@ -1,7 +1,7 @@
/*-
- * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
*
- * Copyright (c) 2007 Ariff Abdullah <ariff@FreeBSD.org>
+ * Copyright (c) 2022 Scott Long
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -24,19 +24,18 @@
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
+ *
+ * $FreeBSD$
*/
-#ifndef _SND_VERSION_H_
-#define _SND_VERSION_H_
+#ifndef _TB_DEV_H
+#define _TB_DEV_H
-/*
- * FreeBSD sound driver internal versioning, nothing to do
- * with OSS whatsoever. Dear future maintainer, please revisit
- * this _before_ Jan 1 2148
- *
- * Last 2 decimal places reserved for daily versioning, starting
- * with 0.
- */
-#define SND_DRV_VERSION 2009061500
+int tbdev_add_interface(struct nhi_softc *);
+int tbdev_remove_interface(struct nhi_softc *);
+int tbdev_add_domain(void *);
+int tbdev_remove_domain(void *);
+int tbdev_add_router(struct router_softc *);
+int tbdev_remove_router(struct router_softc *);
-#endif /* !_SND_VERSION_H_ */
+#endif /* _TB_DEV_H */
diff --git a/sys/dev/virtio/mmio/virtio_mmio_if.m b/sys/dev/thunderbolt/tb_if.m
index baebbd9a0b1c..8b0918811a5d 100644
--- a/sys/dev/virtio/mmio/virtio_mmio_if.m
+++ b/sys/dev/thunderbolt/tb_if.m
@@ -1,10 +1,8 @@
#-
-# Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
-# All rights reserved.
+# SPDX-License-Identifier: BSD-2-Clause-FreeBSD
#
-# This software was developed by SRI International and the University of
-# Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
-# ("CTSRD"), as part of the DARPA CRASH research programme.
+# Copyright (c) 2022 Scott Long
+# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -27,73 +25,97 @@
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
+# $FreeBSD$
#
+#include <sys/bus.h>
#include <sys/types.h>
+#include <dev/thunderbolt/tb_reg.h>
-#
-# This is optional interface to virtio mmio backend.
-# Useful when backend is implemented not by the hardware but software, e.g.
-# by using another cpu core.
-#
-
-INTERFACE virtio_mmio;
+INTERFACE tb;
CODE {
- static int
- virtio_mmio_prewrite(device_t dev, size_t offset, int val)
+ struct nhi_softc;
+
+ int
+ tb_generic_find_ufp(device_t dev, device_t *ufp)
{
+ device_t parent;
+
+ parent = device_get_parent(dev);
+ if (parent == NULL)
+ return (EOPNOTSUPP);
- return (1);
+ return (TB_FIND_UFP(parent, ufp));
}
- static int
- virtio_mmio_note(device_t dev, size_t offset, int val)
+ int
+ tb_generic_get_debug(device_t dev, u_int *debug)
{
+ device_t parent;
- return (1);
+ parent = device_get_parent(dev);
+ if (parent == NULL)
+ return (EOPNOTSUPP);
+
+ return (TB_GET_DEBUG(parent, debug));
}
- static int
- virtio_mmio_setup_intr(device_t dev, device_t mmio_dev,
- void *handler, void *ih_user)
- {
+}
- return (1);
- }
-};
+HEADER {
+ struct nhi_softc;
+
+ struct tb_lcmbox_cmd {
+ uint32_t cmd;
+ uint32_t cmd_resp;
+ uint32_t data_in;
+ uint32_t data_out;
+ };
+
+ int tb_generic_find_ufp(device_t, device_t *);
+ int tb_generic_get_debug(device_t, u_int *);
+}
#
-# Inform backend we are going to write data at offset.
+# Read the LC Mailbox
#
-METHOD int prewrite {
+METHOD int lc_mailbox {
device_t dev;
- size_t offset;
- int val;
-} DEFAULT virtio_mmio_prewrite;
+ struct tb_lcmbox_cmd *cmd;
+};
#
-# Inform backend we have data wrotten to offset.
+# Read from the PCIE2CIO port
#
-METHOD int note {
+METHOD int pcie2cio_read {
device_t dev;
- size_t offset;
- int val;
-} DEFAULT virtio_mmio_note;
+ u_int space;
+ u_int port;
+ u_int index;
+ uint32_t *val;
+}
#
-# Inform backend we are going to poll virtqueue.
+# Write to the PCIE2CIO port
#
-METHOD int poll {
+METHOD int pcie2cio_write {
device_t dev;
-};
+ u_int space;
+ u_int port;
+ u_int index;
+ uint32_t val;
+}
#
-# Setup backend-specific interrupts.
+# Return the device that's the upstream facing port
#
-METHOD int setup_intr {
- device_t dev;
- device_t mmio_dev;
- void *handler;
- void *ih_user;
-} DEFAULT virtio_mmio_setup_intr;
+METHOD int find_ufp {
+ device_t dev;
+ device_t *ufp;
+} DEFAULT tb_generic_find_ufp;
+
+METHOD int get_debug {
+ device_t dev;
+ u_int *debug;
+} DEFAULT tb_generic_get_debug;
diff --git a/sys/dev/thunderbolt/tb_ioctl.h b/sys/dev/thunderbolt/tb_ioctl.h
new file mode 100644
index 000000000000..60fafb091cef
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_ioctl.h
@@ -0,0 +1,52 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_IOCTL_H
+#define _TB_IOCTL_H
+
+struct tbt_ioc {
+ void *data; /* user-supplied buffer for the nvlist */
+ size_t size; /* size of the user-supplied buffer */
+ size_t len; /* amount of data in the nvlist */
+};
+
+#define TBT_NAMLEN 16
+#define TBT_DEVICE_NAME "tbtctl"
+#define TBT_IOCMAXLEN 4096
+
+#define TBT_DISCOVER _IOWR('h', 1, struct tbt_ioc)
+#define TBT_DISCOVER_TYPE "type"
+#define TBT_DISCOVER_IFACE "iface"
+#define TBT_DISCOVER_DOMAIN "domain"
+#define TBT_DISCOVER_ROUTER "router"
+
+#define TBT_REQUEST _IOWR('h', 2, struct tbt_ioc)
+
+#endif /* _TB_IOCTL_H */
diff --git a/sys/dev/thunderbolt/tb_pcib.c b/sys/dev/thunderbolt/tb_pcib.c
new file mode 100644
index 000000000000..00738984ad1c
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_pcib.c
@@ -0,0 +1,614 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_acpi.h"
+#include "opt_thunderbolt.h"
+
+/* PCIe bridge for Thunderbolt */
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/sysctl.h>
+#include <sys/lock.h>
+#include <sys/param.h>
+#include <sys/endian.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/stdarg.h>
+#include <sys/rman.h>
+
+#include <machine/pci_cfgreg.h>
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+#include <dev/pci/pcib_private.h>
+#include <dev/pci/pci_private.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <dev/acpica/acpivar.h>
+#include <dev/acpica/acpi_pcibvar.h>
+#include <machine/md_var.h>
+
+#include <dev/thunderbolt/tb_reg.h>
+#include <dev/thunderbolt/tb_pcib.h>
+#include <dev/thunderbolt/nhi_var.h>
+#include <dev/thunderbolt/nhi_reg.h>
+#include <dev/thunderbolt/tbcfg_reg.h>
+#include <dev/thunderbolt/tb_debug.h>
+#include "tb_if.h"
+
+static int tb_pcib_probe(device_t);
+static int tb_pcib_attach(device_t);
+static int tb_pcib_detach(device_t);
+static int tb_pcib_lc_mailbox(device_t, struct tb_lcmbox_cmd *);
+static int tb_pcib_pcie2cio_read(device_t, u_int, u_int, u_int,
+ uint32_t *);
+static int tb_pcib_pcie2cio_write(device_t, u_int, u_int, u_int, uint32_t);
+static int tb_pcib_find_ufp(device_t, device_t *);
+static int tb_pcib_get_debug(device_t, u_int *);
+
+static int tb_pci_probe(device_t);
+static int tb_pci_attach(device_t);
+static int tb_pci_detach(device_t);
+
+struct tb_pcib_ident {
+ uint16_t vendor;
+ uint16_t device;
+ uint16_t subvendor;
+ uint16_t subdevice;
+ uint32_t flags; /* This follows the tb_softc flags */
+ const char *desc;
+} tb_pcib_identifiers[] = {
+ { VENDOR_INTEL, TB_DEV_AR_2C, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
+ "Thunderbolt 3 PCI-PCI Bridge (Alpine Ridge 2C)" },
+ { VENDOR_INTEL, TB_DEV_AR_LP, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
+ "Thunderbolt 3 PCI-PCI Bridge (Alpine Ridge LP)" },
+ { VENDOR_INTEL, TB_DEV_AR_C_4C, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
+ "Thunderbolt 3 PCI-PCI Bridge (Alpine Ridge C 4C)" },
+ { VENDOR_INTEL, TB_DEV_AR_C_2C, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_AR,
+ "Thunderbolt 3 PCI-PCI Bridge C (Alpine Ridge C 2C)" },
+ { VENDOR_INTEL, TB_DEV_ICL_0, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_ICL,
+ "Thunderbolt 3 PCI-PCI Bridge (IceLake)" },
+ { VENDOR_INTEL, TB_DEV_ICL_1, 0xffff, 0xffff, TB_GEN_TB3|TB_HWIF_ICL,
+ "Thunderbolt 3 PCI-PCI Bridge (IceLake)" },
+ { 0, 0, 0, 0, 0, NULL }
+};
+
+static struct tb_pcib_ident *
+tb_pcib_find_ident(device_t dev)
+{
+ struct tb_pcib_ident *n;
+ uint16_t v, d, sv, sd;
+
+ v = pci_get_vendor(dev);
+ d = pci_get_device(dev);
+ sv = pci_get_subvendor(dev);
+ sd = pci_get_subdevice(dev);
+
+ for (n = tb_pcib_identifiers; n->vendor != 0; n++) {
+ if ((n->vendor != v) || (n->device != d))
+ continue;
+ if (((n->subvendor != 0xffff) && (n->subvendor != sv)) ||
+ ((n->subdevice != 0xffff) && (n->subdevice != sd)))
+ continue;
+ return (n);
+ }
+
+ return (NULL);
+}
+
+static void
+tb_pcib_get_tunables(struct tb_pcib_softc *sc)
+{
+ char tmpstr[80], oid[80];
+
+ /* Set the default */
+ sc->debug = 0;
+
+ /* Grab global variables */
+ bzero(oid, 80);
+ if (TUNABLE_STR_FETCH("hw.tbolt.debug_level", oid, 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+
+ /* Grab instance variables */
+ bzero(oid, 80);
+ snprintf(tmpstr, sizeof(tmpstr), "dev.tbolt.%d.debug_level",
+ device_get_unit(sc->dev));
+ if (TUNABLE_STR_FETCH(tmpstr, oid, 80) != 0)
+ tb_parse_debug(&sc->debug, oid);
+
+ return;
+}
+
+static int
+tb_pcib_setup_sysctl(struct tb_pcib_softc *sc)
+{
+ struct sysctl_ctx_list *ctx = NULL;
+ struct sysctl_oid *tree = NULL;
+
+ ctx = device_get_sysctl_ctx(sc->dev);
+ if (ctx != NULL)
+ tree = device_get_sysctl_tree(sc->dev);
+
+ if (tree == NULL) {
+ tb_printf(sc, "Error: cannot create sysctl nodes\n");
+ return (EINVAL);
+ }
+ sc->sysctl_tree = tree;
+ sc->sysctl_ctx = ctx;
+
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree),
+ OID_AUTO, "debug_level", CTLTYPE_STRING|CTLFLAG_RW|CTLFLAG_MPSAFE,
+ &sc->debug, 0, tb_debug_sysctl, "A", "Thunderbolt debug level");
+
+ return (0);
+}
+
+/*
+ * This is used for both the PCI and ACPI attachments. It shouldn't return
+ * 0, doing so will force the ACPI attachment to fail.
+ */
+int
+tb_pcib_probe_common(device_t dev, char *desc)
+{
+ device_t ufp;
+ struct tb_pcib_ident *n;
+ char *suffix;
+
+ if ((n = tb_pcib_find_ident(dev)) != NULL) {
+ ufp = NULL;
+ if ((TB_FIND_UFP(dev, &ufp) == 0) && (ufp == dev))
+ suffix = "(Upstream port)";
+ else
+ suffix = "(Downstream port)";
+ snprintf(desc, TB_DESC_MAX, "%s %s", n->desc, suffix);
+ return (BUS_PROBE_VENDOR);
+ }
+ return (ENXIO);
+}
+
+static int
+tb_pcib_probe(device_t dev)
+{
+ char desc[TB_DESC_MAX];
+ int val;
+
+ if ((val = tb_pcib_probe_common(dev, desc)) <= 0)
+ device_set_desc_copy(dev, desc);
+
+ return (val);
+}
+
+int
+tb_pcib_attach_common(device_t dev)
+{
+ device_t ufp;
+ struct tb_pcib_ident *n;
+ struct tb_pcib_softc *sc;
+ uint32_t val;
+ int error;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+ sc->vsec = -1;
+
+ n = tb_pcib_find_ident(dev);
+ KASSERT(n != NULL, ("Cannot find TB ident"));
+ sc->flags = n->flags;
+
+ tb_pcib_get_tunables(sc);
+ tb_pcib_setup_sysctl(sc);
+
+ /* XXX Is this necessary for ACPI attachments? */
+ tb_debug(sc, DBG_BRIDGE, "busmaster status was %s\n",
+ (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_BUSMASTEREN)
+ ? "enabled" : "disabled");
+ pci_enable_busmaster(dev);
+
+ /*
+ * Determine if this is an upstream or downstream facing device, and
+ * whether it's the root of the Thunderbolt topology. It's too bad
+ * that there aren't unique PCI ID's to help with this.
+ */
+ ufp = NULL;
+ if ((TB_FIND_UFP(dev, &ufp) == 0) && (ufp != NULL)) {
+ if (ufp == dev) {
+ sc->flags |= TB_FLAGS_ISUFP;
+ if (TB_FIND_UFP(device_get_parent(dev), NULL) ==
+ EOPNOTSUPP) {
+ sc->flags |= TB_FLAGS_ISROOT;
+ }
+ }
+ }
+
+ /*
+ * Find the PCI Vendor Specific Extended Capability. It's the magic
+ * wand to configuring the Thunderbolt root bridges.
+ */
+ if (TB_IS_AR(sc) || TB_IS_TR(sc)) {
+ error = pci_find_extcap(dev, PCIZ_VENDOR, &sc->vsec);
+ if (error) {
+ tb_printf(sc, "Cannot find VSEC capability: %d\n",
+ error);
+ return (ENXIO);
+ }
+ }
+
+ /*
+ * Take the AR bridge out of low-power mode.
+ * XXX AR only?
+ */
+ if ((1 || TB_IS_AR(sc)) && TB_IS_ROOT(sc)) {
+ struct tb_lcmbox_cmd cmd;
+
+ cmd.cmd = LC_MBOXOUT_CMD_SXEXIT_TBT;
+ cmd.data_in = 0;
+
+ error = TB_LC_MAILBOX(dev, &cmd);
+ tb_debug(sc, DBG_BRIDGE, "SXEXIT returned error= %d resp= 0x%x "
+ "data= 0x%x\n", error, cmd.cmd_resp, cmd.data_out);
+ }
+
+ /* The downstream facing port on AR needs some help */
+ if (TB_IS_AR(sc) && TB_IS_DFP(sc)) {
+ tb_debug(sc, DBG_BRIDGE, "Doing AR L1 fixup\n");
+ val = pci_read_config(dev, sc->vsec + AR_VSCAP_1C, 4);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "VSEC+0x1c= 0x%08x\n", val);
+ val |= (1 << 8);
+ pci_write_config(dev, sc->vsec + AR_VSCAP_1C, val, 4);
+
+ val = pci_read_config(dev, sc->vsec + AR_VSCAP_B0, 4);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "VSEC+0xb0= 0x%08x\n", val);
+ val |= (1 << 12);
+ pci_write_config(dev, sc->vsec + AR_VSCAP_B0, val, 4);
+ }
+
+ return (0);
+}
+
+static int
+tb_pcib_attach(device_t dev)
+{
+ int error;
+
+ error = tb_pcib_attach_common(dev);
+ if (error)
+ return (error);
+ return (pcib_attach(dev));
+}
+
+static int
+tb_pcib_detach(device_t dev)
+{
+ struct tb_pcib_softc *sc;
+ int error;
+
+ sc = device_get_softc(dev);
+
+ tb_debug(sc, DBG_BRIDGE|DBG_ROUTER|DBG_EXTRA, "tb_pcib_detach\n");
+
+ /* Put the AR bridge back to sleep */
+ /* XXX disable this until power control for downstream switches works */
+ if (0 && TB_IS_ROOT(sc)) {
+ struct tb_lcmbox_cmd cmd;
+
+ cmd.cmd = LC_MBOXOUT_CMD_GO2SX;
+ cmd.data_in = 0;
+
+ error = TB_LC_MAILBOX(dev, &cmd);
+ tb_debug(sc, DBG_BRIDGE, "SXEXIT returned error= %d resp= 0x%x "
+ "data= 0x%x\n", error, cmd.cmd_resp, cmd.data_out);
+ }
+
+ return (pcib_detach(dev));
+}
+
+/* Read/write the Link Controller registers in CFG space */
+static int
+tb_pcib_lc_mailbox(device_t dev, struct tb_lcmbox_cmd *cmd)
+{
+ struct tb_pcib_softc *sc;
+ uint32_t regcmd, result;
+ uint16_t m_in, m_out;
+ int vsec, i;
+
+ sc = device_get_softc(dev);
+ vsec = TB_PCIB_VSEC(dev);
+ if (vsec == -1)
+ return (EOPNOTSUPP);
+
+ if (TB_IS_AR(sc)) {
+ m_in = AR_LC_MBOX_IN;
+ m_out = AR_LC_MBOX_OUT;
+ } else if (TB_IS_ICL(sc)) {
+ m_in = ICL_LC_MBOX_IN;
+ m_out = ICL_LC_MBOX_OUT;
+ } else
+ return (EOPNOTSUPP);
+
+ /* Set the valid bit to signal we're sending a command */
+ regcmd = LC_MBOXOUT_VALID | (cmd->cmd & LC_MBOXOUT_CMD_MASK);
+ regcmd |= (cmd->data_in << LC_MBOXOUT_DATA_SHIFT);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "Writing LC cmd 0x%x\n", regcmd);
+ pci_write_config(dev, vsec + m_out, regcmd, 4);
+
+ for (i = 0; i < 10; i++) {
+ pause("nhi", 1 * hz);
+ result = pci_read_config(dev, vsec + m_in, 4);
+ tb_debug(sc, DBG_BRIDGE|DBG_FULL, "LC Mailbox= 0x%08x\n",
+ result);
+ if ((result & LC_MBOXIN_DONE) != 0)
+ break;
+ }
+
+ /* Clear the valid bit to signal we're done sending the command */
+ pci_write_config(dev, vsec + m_out, 0, 4);
+
+ cmd->cmd_resp = result & LC_MBOXIN_CMD_MASK;
+ cmd->data_out = result >> LC_MBOXIN_CMD_SHIFT;
+
+ if ((result & LC_MBOXIN_DONE) == 0)
+ return (ETIMEDOUT);
+
+ return (0);
+}
+
+static int
+tb_pcib_pcie2cio_wait(device_t dev, u_int timeout)
+{
+#if 0
+ uint32_t val;
+ int vsec;
+
+ vsec = TB_PCIB_VSEC(dev);
+ do {
+ pci_read_config(dev, vsec + PCIE2CIO_CMD, &val);
+ if ((val & PCIE2CIO_CMD_START) == 0) {
+ if (val & PCIE2CIO_CMD_TIMEOUT)
+ break;
+ return 0;
+ }
+
+ msleep(50);
+ } while (time_before(jiffies, end));
+
+#endif
+ return ETIMEDOUT;
+}
+
+static int
+tb_pcib_pcie2cio_read(device_t dev, u_int space, u_int port, u_int offset,
+ uint32_t *val)
+{
+#if 0
+ uint32_t cmd;
+ int ret, vsec;
+
+ vsec = TB_PCIB_VSEC(dev);
+ if (vsec == -1)
+ return (EOPNOTSUPP);
+
+ cmd = index;
+ cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+ cmd |= (space << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+ cmd |= PCIE2CIO_CMD_START;
+ pci_write_config(dev, vsec + PCIE2CIO_CMD, cmd, 4);
+
+ if ((ret = pci2cio_wait_completion(dev, 5000)) != 0)
+ return (ret);
+
+ *val = pci_read_config(dev, vsec + PCIE2CIO_RDDATA, 4);
+#endif
+ return (0);
+}
+
+static int
+tb_pcib_pcie2cio_write(device_t dev, u_int space, u_int port, u_int offset,
+ uint32_t val)
+{
+#if 0
+ uint32_t cmd;
+ int ret, vsec;
+
+ vsec = TB_PCIB_VSEC(dev);
+ if (vsec == -1)
+ return (EOPNOTSUPP);
+
+ pci_write_config(dev, vsec + PCIE2CIO_WRDATA, val, 4);
+
+ cmd = index;
+ cmd |= (port << PCIE2CIO_CMD_PORT_SHIFT) & PCIE2CIO_CMD_PORT_MASK;
+ cmd |= (space << PCIE2CIO_CMD_CS_SHIFT) & PCIE2CIO_CMD_CS_MASK;
+ cmd |= PCIE2CIO_CMD_WRITE | PCIE2CIO_CMD_START;
+ pci_write_config(dev, vsec + PCIE2CIO_CMD, cmd);
+
+#endif
+ return (tb_pcib_pcie2cio_wait(dev, 5000));
+}
+
+/*
+ * The Upstream Facing Port (UFP) in a switch is special, it's the function
+ * that responds to some of the special programming mailboxes. It can't be
+ * differentiated by PCI ID, so a heuristic approach to identifying it is
+ * required.
+ */
+static int
+tb_pcib_find_ufp(device_t dev, device_t *ufp)
+{
+ device_t upstream;
+ struct tb_pcib_softc *sc;
+ uint32_t vsec, val;
+ int error;
+
+ upstream = NULL;
+ sc = device_get_softc(dev);
+ if (sc == NULL)
+ return (EOPNOTSUPP);
+
+ if (TB_IS_UFP(sc)) {
+ upstream = dev;
+ error = 0;
+ goto out;
+ }
+
+ /*
+ * This register is supposed to be filled in on the upstream port
+ * and tells how many downstream ports there are. It doesn't seem
+ * to get filled in on AR host controllers, but is on various
+ * peripherals.
+ */
+ error = pci_find_extcap(dev, PCIZ_VENDOR, &vsec);
+ if (error == 0) {
+ val = pci_read_config(dev, vsec + 0x18, 4);
+ if ((val & 0x1f) > 0) {
+ upstream = dev;
+ goto out;
+ }
+ }
+
+ /*
+ * Since we can't trust that the VSEC register is filled in, the only
+ * other option is to see if we're at the top of the topology, which
+ * implies that we're at the upstream port of the host controller.
+ */
+ error = TB_FIND_UFP(device_get_parent(dev), ufp);
+ if (error == EOPNOTSUPP) {
+ upstream = dev;
+ error = 0;
+ goto out;
+ } else
+ return (error);
+
+out:
+ if (ufp != NULL)
+ *ufp = upstream;
+
+ return (error);
+}
+
+static int
+tb_pcib_get_debug(device_t dev, u_int *debug)
+{
+ struct tb_pcib_softc *sc;
+
+ sc = device_get_softc(dev);
+ if ((sc == NULL) || (debug == NULL))
+ return (EOPNOTSUPP);
+
+ *debug = sc->debug;
+ return (0);
+}
+
+static device_method_t tb_pcib_methods[] = {
+ DEVMETHOD(device_probe, tb_pcib_probe),
+ DEVMETHOD(device_attach, tb_pcib_attach),
+ DEVMETHOD(device_detach, tb_pcib_detach),
+
+ DEVMETHOD(tb_lc_mailbox, tb_pcib_lc_mailbox),
+ DEVMETHOD(tb_pcie2cio_read, tb_pcib_pcie2cio_read),
+ DEVMETHOD(tb_pcie2cio_write, tb_pcib_pcie2cio_write),
+
+ DEVMETHOD(tb_find_ufp, tb_pcib_find_ufp),
+ DEVMETHOD(tb_get_debug, tb_pcib_get_debug),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(tbolt, tb_pcib_driver, tb_pcib_methods,
+ sizeof(struct tb_pcib_softc), pcib_driver);
+DRIVER_MODULE_ORDERED(tb_pcib, pci, tb_pcib_driver,
+ NULL, NULL, SI_ORDER_MIDDLE);
+MODULE_DEPEND(tb_pcib, pci, 1, 1, 1);
+MODULE_PNP_INFO("U16:vendor;U16:device;U16:subvendor;U16:subdevice;U32:#;D:#",
+ pci, tb_pcib, tb_pcib_identifiers, nitems(tb_pcib_identifiers) - 1);
+
+static int
+tb_pci_probe(device_t dev)
+{
+ struct tb_pcib_ident *n;
+
+ if ((n = tb_pcib_find_ident(device_get_parent(dev))) != NULL) {
+ switch (n->flags & TB_GEN_MASK) {
+ case TB_GEN_TB1:
+ device_set_desc(dev, "Thunderbolt 1 Link");
+ break;
+ case TB_GEN_TB2:
+ device_set_desc(dev, "Thunderbolt 2 Link");
+ break;
+ case TB_GEN_TB3:
+ device_set_desc(dev, "Thunderbolt 3 Link");
+ break;
+ case TB_GEN_USB4:
+ device_set_desc(dev, "USB4 Link");
+ break;
+ case TB_GEN_UNK:
+ /* Fallthrough */
+ default:
+ device_set_desc(dev, "Thunderbolt Link");
+ }
+ return (BUS_PROBE_VENDOR);
+ }
+ return (ENXIO);
+}
+
+static int
+tb_pci_attach(device_t dev)
+{
+
+ return (pci_attach(dev));
+}
+
+static int
+tb_pci_detach(device_t dev)
+{
+
+ return (pci_detach(dev));
+}
+
+static device_method_t tb_pci_methods[] = {
+ DEVMETHOD(device_probe, tb_pci_probe),
+ DEVMETHOD(device_attach, tb_pci_attach),
+ DEVMETHOD(device_detach, tb_pci_detach),
+
+ DEVMETHOD(tb_find_ufp, tb_generic_find_ufp),
+ DEVMETHOD(tb_get_debug, tb_generic_get_debug),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(pci, tb_pci_driver, tb_pci_methods, sizeof(struct pci_softc),
+ pci_driver);
+DRIVER_MODULE(tb_pci, pcib, tb_pci_driver, NULL, NULL);
+MODULE_DEPEND(tb_pci, pci, 1, 1, 1);
+MODULE_VERSION(tb_pci, 1);
diff --git a/sys/dev/thunderbolt/tb_pcib.h b/sys/dev/thunderbolt/tb_pcib.h
new file mode 100644
index 000000000000..6928e866a083
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_pcib.h
@@ -0,0 +1,93 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt PCIe bridge/switch definitions
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_PCIB_H
+#define _TB_PCIB_H
+
+DECLARE_CLASS(tb_pcib_driver);
+
+/*
+ * The order of the fields is very important. Class inherentence replies on
+ * implicitly knowing the location of the first 3 fields.
+ */
+struct tb_pcib_softc {
+ struct pcib_softc pcibsc;
+ ACPI_HANDLE ap_handle;
+ ACPI_BUFFER ap_prt;
+ device_t dev;
+ u_int debug;
+ int vsec;
+ int flags;
+ struct sysctl_ctx_list *sysctl_ctx;
+ struct sysctl_oid *sysctl_tree;
+};
+
+/* Flags for tb_softc */
+#define TB_GEN_UNK 0x00
+#define TB_GEN_TB1 0x01
+#define TB_GEN_TB2 0x02
+#define TB_GEN_TB3 0x03
+#define TB_GEN_USB4 0x04
+#define TB_GEN_MASK 0x0f
+#define TB_HWIF_UNK 0x00
+#define TB_HWIF_AR 0x10
+#define TB_HWIF_TR 0x20
+#define TB_HWIF_ICL 0x30
+#define TB_HWIF_USB4 0x40
+#define TB_HWIF_MASK 0xf0
+#define TB_FLAGS_ISROOT 0x100
+#define TB_FLAGS_ISUFP 0x200
+
+#define TB_IS_AR(sc) (((sc)->flags & TB_HWIF_MASK) == TB_HWIF_AR)
+#define TB_IS_TR(sc) (((sc)->flags & TB_HWIF_MASK) == TB_HWIF_TR)
+#define TB_IS_ICL(sc) (((sc)->flags & TB_HWIF_MASK) == TB_HWIF_ICL)
+#define TB_IS_USB4(sc) (((sc)->flags & TB_HWIF_MASK) == TB_HWIF_USB4)
+
+#define TB_IS_ROOT(sc) (((sc)->flags & TB_FLAGS_ISROOT) != 0)
+#define TB_IS_UFP(sc) (((sc)->flags & TB_FLAGS_ISUFP) != 0)
+#define TB_IS_DFP(sc) (((sc)->flags & TB_FLAGS_ISUFP) == 0)
+
+/* PCI IDs for the TB bridges */
+#define TB_DEV_AR_2C 0x1576
+#define TB_DEV_AR_LP 0x15c0
+#define TB_DEV_AR_C_4C 0x15d3
+#define TB_DEV_AR_C_2C 0x15da
+#define TB_DEV_ICL_0 0x8a1d
+#define TB_DEV_ICL_1 0x8a21
+
+#define TB_PCIB_VSEC(dev) ((struct tb_pcib_softc *)(device_get_softc(dev)))->vsec;
+#define TB_DESC_MAX 80
+
+int tb_pcib_probe_common(device_t, char *);
+int tb_pcib_attach_common(device_t dev);
+
+#endif /* _TB_PCIB_H */
diff --git a/sys/dev/thunderbolt/tb_reg.h b/sys/dev/thunderbolt/tb_reg.h
new file mode 100644
index 000000000000..b065e01e6972
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_reg.h
@@ -0,0 +1,52 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt Variables
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_REG_H
+#define _TB_REG_H
+
+#define TBSEC_NONE 0x00
+#define TBSEC_USER 0x01
+#define TBSEC_SECURE 0x02
+#define TBSEC_DP 0x03
+#define TBSEC_UNKNOWN 0xff
+
+/*
+ * SW-FW commands and responses. These are sent over Ring0 to communicate
+ * with the fabric and the TBT Connection Manager firmware.
+ */
+
+typedef struct {
+ uint32_t hi;
+ uint32_t lo;
+} __packed tb_route_t;
+
+#endif /* _TB_REG_H */
diff --git a/sys/dev/thunderbolt/tb_var.h b/sys/dev/thunderbolt/tb_var.h
new file mode 100644
index 000000000000..4874c420300e
--- /dev/null
+++ b/sys/dev/thunderbolt/tb_var.h
@@ -0,0 +1,54 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt firmware connection manager functions.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TB_VAR_H
+#define _TB_VAR_H
+
+typedef struct {
+ int8_t link;
+ int8_t depth;
+} tb_addr_t;
+
+MALLOC_DECLARE(M_THUNDERBOLT);
+
+#define TB_VENDOR_LEN 48
+#define TB_MODEL_LEN 48
+#define TB_MAX_LINKS 4
+#define TB_MAX_DEPTH 6
+
+static __inline uint32_t
+tb_calc_crc(void *data, u_int len)
+{
+ return ( ~ (calculate_crc32c(~0L, data, len)));
+}
+
+#endif /* _TB_VAR_H */
diff --git a/sys/dev/thunderbolt/tbcfg_reg.h b/sys/dev/thunderbolt/tbcfg_reg.h
new file mode 100644
index 000000000000..bb68faa543b0
--- /dev/null
+++ b/sys/dev/thunderbolt/tbcfg_reg.h
@@ -0,0 +1,363 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
+ *
+ * Copyright (c) 2022 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * Thunderbolt3/USB4 config space register definitions
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _TBCFG_REG_H
+#define _TBCFG_REG_H
+
+/* Config space read request, 6.4.2.3 */
+struct tb_cfg_read {
+ tb_route_t route;
+ uint32_t addr_attrs;
+#define TB_CFG_ADDR_SHIFT 0
+#define TB_CFG_ADDR_MASK GENMASK(12,0)
+#define TB_CFG_SIZE_SHIFT 13
+#define TB_CFG_SIZE_MASK GENMASK(18,13)
+#define TB_CFG_ADAPTER_SHIFT 19
+#define TB_CFG_ADAPTER_MASK GENMASK(24,19)
+#define TB_CFG_CS_PATH (0x00 << 25)
+#define TB_CFG_CS_ADAPTER (0x01 << 25)
+#define TB_CFG_CS_ROUTER (0x02 << 25)
+#define TB_CFG_CS_COUNTERS (0x03 << 25)
+#define TB_CFG_SEQ_SHIFT 27
+#define TB_CFG_SEQ_MASK (28,27)
+ uint32_t crc;
+};
+
+/* Config space read request, 6.4.2.4 */
+struct tb_cfg_read_resp {
+ tb_route_t route;
+ uint32_t addr_attrs;
+ uint32_t data[0]; /* Up to 60 dwords */
+ /* uint32_t crc is at the end */
+} __packed;
+
+/* Config space write request, 6.4.2.5 */
+struct tb_cfg_write {
+ tb_route_t route;
+ uint32_t addr_attrs;
+ uint32_t data[0]; /* Up to 60 dwords */
+ /* uint32_t crc is at the end */
+} __packed;
+
+/* Config space write response, 6.4.2.6 */
+struct tb_cfg_write_resp {
+ tb_route_t route;
+ uint32_t addr_attrs;
+ uint32_t crc;
+} __packed;
+
+/* Config space event, 6.4.2.7 */
+struct tb_cfg_notify {
+ tb_route_t route;
+ uint32_t event_adap;
+#define TB_CFG_EVENT_MASK GENMASK(7,0)
+#define GET_NOTIFY_EVENT(n) ((n)->event_adap & TB_CFG_EVENT_MASK)
+#define TB_CFG_ERR_CONN 0x00
+#define TB_CFG_ERR_LINK 0x01
+#define TB_CFG_ERR_ADDR 0x02
+#define TB_CFG_ERR_ADP 0x04
+#define TB_CFG_ERR_ENUM 0x08
+#define TB_CFG_ERR_NUA 0x09
+#define TB_CFG_ERR_LEN 0x0b
+#define TB_CFG_ERR_HEC 0x0c
+#define TB_CFG_ERR_FC 0x0d
+#define TB_CFG_ERR_PLUG 0x0e
+#define TB_CFG_ERR_LOCK 0x0f
+#define TB_CFG_HP_ACK 0x07
+#define TB_CFG_DP_BW 0x20
+#define TB_CFG_EVENT_ADAPTER_SHIFT 8
+#define TB_CFG_EVENT_ADAPTER_MASK GENMASK(13,8)
+#define GET_NOTIFY_ADAPTER(n) (((n)->event_adap & \
+ TB_CFG_EVENT_ADAPTER_MASK) >> \
+ TB_CFG_EVENT_ADAPTER_SHIFT)
+#define TB_CFG_PG_NONE 0x00000000
+#define TB_CFG_PG_PLUG 0x80000000
+#define TB_CFG_PG_UNPLUG 0xc0000000
+ uint32_t crc;
+} __packed;
+
+/* Config space event acknowledgement, 6.4.2.8 */
+struct tb_cfg_notify_ack {
+ tb_route_t route;
+ uint32_t crc;
+} __packed;
+
+/* Config space hot plug event, 6.4.2.10 */
+struct tb_cfg_hotplug {
+ tb_route_t route;
+ uint32_t adapter_attrs;
+#define TB_CFG_ADPT_MASK GENMASK(5,0)
+#define TB_CFG_UPG_PLUG (0x0 << 31)
+#define TB_CFG_UPG_UNPLUG (0x1 << 31)
+ uint32_t crc;
+} __packed;
+
+/* Config space inter-domain request, 6.4.2.11 */
+struct tb_cfg_xdomain {
+ tb_route_t route;
+ uint32_t data[0];
+ /* uint32_t crc is at the end */
+} __packed;
+
+/* Config space inter-domain response, 6.4.2.12 */
+struct tb_cfg_xdomain_resp {
+ tb_route_t route;
+ uint32_t data[0];
+ /* uint32_t crc is at the end */
+} __packed;
+
+/* Config space router basic registers 8.2.1.1 */
+struct tb_cfg_router {
+ uint16_t vendor_id; /* ROUTER_CS_0 */
+ uint16_t product_id;
+ uint32_t router_cs_1; /* ROUTER_CS_1 */
+#define ROUTER_CS1_NEXT_CAP_MASK GENMASK(7,0)
+#define GET_ROUTER_CS_NEXT_CAP(r) (r->router_cs_1 & \
+ ROUTER_CS1_NEXT_CAP_MASK)
+#define ROUTER_CS1_UPSTREAM_SHIFT 8
+#define ROUTER_CS1_UPSTREAM_MASK GENMASK(13,8)
+#define GET_ROUTER_CS_UPSTREAM_ADAP(r) ((r->router_cs_1 & \
+ ROUTER_CS1_UPSTREAM_MASK) >> \
+ ROUTER_CS1_UPSTREAM_SHIFT)
+#define ROUTER_CS1_MAX_SHIFT 14
+#define ROUTER_CS1_MAX_MASK GENMASK(19,14)
+#define GET_ROUTER_CS_MAX_ADAP(r) ((r->router_cs_1 & \
+ ROUTER_CS1_MAX_MASK) >> \
+ ROUTER_CS1_MAX_SHIFT)
+#define ROUTER_CS1_MAX_ADAPTERS 64
+#define ROUTER_CS1_DEPTH_SHIFT 20
+#define ROUTER_CS1_DEPTH_MASK GENMASK(22,20)
+#define GET_ROUTER_CS_DEPTH(r) ((r->router_cs_1 & \
+ ROUTER_CS1_DEPTH_MASK) >> \
+ ROUTER_CS1_DEPTH_SHIFT)
+#define ROUTER_CS1_REVISION_SHIFT 24
+#define ROUTER_CS1_REVISION_MASK GENMASK(31,24)
+#define GET_ROUTER_CS_REVISION ((r->router_cs_1 & \
+ ROUTER_CS1_REVISION_MASK) >> \
+ ROUTER_CS1_REVISION_SHIFT)
+ uint32_t topology_lo; /* ROUTER_CS_2 */
+ uint32_t topology_hi; /* ROUTER_CS_3 */
+#define CFG_TOPOLOGY_VALID (1 << 31)
+ uint8_t notification_timeout; /* ROUTER_CS_4 */
+ uint8_t cm_version;
+#define CFG_CM_USB4 0x10
+ uint8_t rsrvd1;
+ uint8_t usb4_version;
+#define CFG_USB4_V1_0 0x10
+ uint32_t flags_cs5; /* ROUTER_CS_5 */
+#define CFG_CS5_SLP (1 << 0)
+#define CFG_CS5_WOP (1 << 1)
+#define CFG_CS5_WOU (1 << 2)
+#define CFG_CS5_DP (1 << 3)
+#define CFG_CS5_C3S (1 << 23)
+#define CFG_CS5_PTO (1 << 24)
+#define CFG_CS5_UTO (1 << 25)
+#define CFG_CS5_HCO (1 << 26)
+#define CFG_CS5_CV (1 << 31)
+ uint32_t flags_cs6; /* ROUTER_CS_6 */
+#define CFG_CS6_SLPR (1 << 0)
+#define CFG_CS6_TNS (1 << 1)
+#define CFG_CS6_WAKE_PCIE (1 << 2)
+#define CFG_CS6_WAKE_USB3 (1 << 3)
+#define CFG_CS6_WAKE_DP (1 << 4)
+#define CFG_CS6_HCI (1 << 18)
+#define CFG_CS6_RR (1 << 24)
+#define CFG_CS6_CR (1 << 25)
+ uint32_t uuid_hi; /* ROUTER_CS_7 */
+ uint32_t uuid_lo; /* ROUTER_CS_8 */
+ uint32_t data[16]; /* ROUTER_CS_9-24 */
+ uint32_t metadata; /* ROUTER_CS_25 */
+ uint32_t opcode_status; /* ROUTER_CS_26 */
+/* TBD: Opcodes and status */
+#define CFG_ONS (1 << 30)
+#define CFG_OV (1 << 31)
+} __packed;
+
+#define TB_CFG_CAP_OFFSET_MAX 0xfff
+
+/* Config space router capability header 8.2.1.3/8.2.1.4 */
+struct tb_cfg_cap_hdr {
+ uint8_t next_cap;
+ uint8_t cap_id;
+} __packed;
+
+/* Config space router TMU registers 8.2.1.2 */
+struct tb_cfg_cap_tmu {
+ struct tb_cfg_cap_hdr hdr;
+#define TB_CFG_CAP_TMU 0x03
+} __packed;
+
+struct tb_cfg_vsc_cap {
+ struct tb_cfg_cap_hdr hdr;
+#define TB_CFG_CAP_VSC 0x05
+ uint8_t vsc_id;
+ uint8_t len;
+} __packed;
+
+struct tb_cfg_vsec_cap {
+ struct tb_cfg_cap_hdr hdr;
+#define TB_CFG_CAP_VSEC 0x05
+ uint8_t vsec_id;
+ uint8_t len;
+ uint16_t vsec_next_cap;
+ uint16_t vsec_len;
+} __packed;
+
+union tb_cfg_cap {
+ struct tb_cfg_cap_hdr hdr;
+ struct tb_cfg_cap_tmu tmu;
+ struct tb_cfg_vsc_cap vsc;
+ struct tb_cfg_vsec_cap vsec;
+} __packed;
+
+#define TB_CFG_VSC_PLUG 0x01 /* Hot Plug and DROM */
+
+#define TB_CFG_VSEC_LC 0x06 /* Link Controller */
+#define TB_LC_DESC 0x02 /* LC Descriptor fields */
+#define TB_LC_DESC_NUM_LC_MASK GENMASK(3, 0)
+#define TB_LC_DESC_SIZE_SHIFT 8
+#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8)
+#define TB_LC_DESC_PORT_SHIFT 16
+#define TB_LC_DESC_PORT_MASK GENMASK(27, 16)
+#define TB_LC_UUID 0x03
+#define TB_LC_DP_SINK 0x10 /* Display Port config */
+#define TB_LC_PORT_ATTR 0x8d /* Port attributes */
+#define TB_LC_PORT_ATTR_BE (1 << 12) /* Bonding enabled */
+#define TB_LC_SX_CTRL 0x96 /* Sleep control */
+#define TB_LC_SX_CTRL_WOC (1 << 1)
+#define TB_LC_SX_CTRL_WOD (1 << 2)
+#define TB_LC_SX_CTRL_WOU4 (1 << 5)
+#define TB_LC_SX_CTRL_WOP (1 << 6)
+#define TB_LC_SX_CTRL_L1C (1 << 16)
+#define TB_LC_SX_CTRL_L1D (1 << 17)
+#define TB_LC_SX_CTRL_L2C (1 << 20)
+#define TB_LC_SX_CTRL_L2D (1 << 21)
+#define TB_LC_SX_CTRL_UFP (1 << 30)
+#define TB_LC_SX_CTRL_SLP (1 << 31)
+#define TB_LC_POWER 0x740
+
+/* Config space adapter basic registers 8.2.2.1 */
+struct tb_cfg_adapter {
+ uint16_t vendor_id; /* ADP CS0 */
+ uint16_t product_id;
+ uint32_t adp_cs1; /* ADP CS1 */
+#define ADP_CS1_NEXT_CAP_MASK GENMASK(7,0)
+#define GET_ADP_CS_NEXT_CAP(a) (a->adp_cs1 & \
+ ADP_CS1_NEXT_CAP_MASK)
+#define ADP_CS1_COUNTER_SHIFT 8
+#define ADP_CS1_COUNTER_MASK GENMASK(18,8)
+#define GET_ADP_CS_MAX_COUNTERS(a) ((a->adp_cs1 & \
+ ADP_CS1_COUNTER_MASK) >> \
+ ADP_CS1_COUNTER_SHIFT)
+#define CFG_COUNTER_CONFIG_FLAG (1 << 19)
+ uint32_t adp_cs2; /* ADP CS2 */
+#define ADP_CS2_TYPE_MASK GENMASK(23,0)
+#define GET_ADP_CS_TYPE(a) (a->adp_cs2 & ADP_CS2_TYPE_MASK)
+#define ADP_CS2_UNSUPPORTED 0x000000
+#define ADP_CS2_LANE 0x000001
+#define ADP_CS2_HOSTIF 0x000002
+#define ADP_CS2_PCIE_DFP 0x100101
+#define ADP_CS2_PCIE_UFP 0x100102
+#define ADP_CS2_DP_OUT 0x0e0102
+#define ADP_CS2_DP_IN 0x0e0101
+#define ADP_CS2_USB3_DFP 0x200101
+#define ADP_CS2_USB3_UFP 0x200102
+ uint32_t adp_cs3; /* ADP CS 3 */
+#define ADP_CS3_ADP_NUM_SHIFT 20
+#define ADP_CS3_ADP_NUM_MASK GENMASK(25,20)
+#define GET_ADP_CS_ADP_NUM(a) ((a->adp_cs3 & \
+ ADP_CS3_ADP_NUM_MASK) >> \
+ ADP_CS3_ADP_NUM_SHIFT)
+#define CFG_ADP_HEC_ERROR (1 << 29)
+#define CFG_ADP_FC_ERROR (1 << 30)
+#define CFG_ADP_SBC (1 << 31)
+} __packed;
+
+/* Config space lane adapter capability 8.2.2.3 */
+struct tb_cfg_cap_lane {
+ struct tb_cfg_cap_hdr hdr; /* LANE_ADP_CS_0 */
+#define TB_CFG_CAP_LANE 0x01
+ /* Supported link/width/power */
+ uint16_t supp_lwp;
+#define CAP_LANE_LINK_MASK GENMASK(3,0)
+#define CAP_LANE_LINK_GEN3 0x0004
+#define CAP_LANE_LINK_GEN2 0x0008
+#define CAP_LANE_WIDTH_MASK GENMASK(9,4)
+#define CAP_LANE_WIDTH_1X 0x0010
+#define CAP_LANE_WIDTH_2X 0x0020
+#define CAP_LANE_POWER_CL0 0x0400
+#define CAP_LANE_POWER_CL1 0x0800
+#define CAP_LANE_POWER_CL2 0x1000
+ /* Target link/width/power */
+ uint16_t targ_lwp; /* LANE_ADP_CS_1 */
+#define CAP_LANE_TARGET_GEN2 0x0008
+#define CAP_LANE_TARGET_GEN3 0x000c
+#define CAP_LANE_TARGET_SINGLE 0x0010
+#define CAP_LANE_TARGET_DUAL 0x0030
+#define CAP_LANE_DISABLE 0x4000
+#define CAP_LANE_BONDING 0x8000
+ /* Current link/width/state */
+ uint16_t current_lws;
+/* Same definitions a supp_lwp for bits 0 - 9 */
+#define CAP_LANE_STATE_SHIFT 10
+#define CAP_LANE_STATE_MASK GENMASK(13,10)
+#define CAP_LANE_STATE_DISABLE (0x0 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_TRAINING (0x1 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_CL0 (0x2 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_TXCL0 (0x3 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_RXCL0 (0x4 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_CL1 (0x5 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_CL2 (0x6 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_STATE_CLD (0x7 << CAP_LANE_STATE_SHIFT)
+#define CAP_LANE_PMS 0x4000
+ /* Logical Layer Errors */
+ uint16_t lle; /* LANE_ADP_CS_2 */
+#define CAP_LANE_LLE_MASK GENMASK(6,0)
+#define CAP_LANE_LLE_ALE 0x01
+#define CAP_LANE_LLE_OSE 0x02
+#define CAP_LANE_LLE_TE 0x04
+#define CAP_LANE_LLE_EBE 0x08
+#define CAP_LANE_LLE_DBE 0x10
+#define CAP_LANE_LLE_RDE 0x20
+#define CAP_LANE_LLE_RST 0x40
+ uint16_t lle_enable;
+} __packed;
+
+/* Config space path registers 8.2.3.1 */
+struct tb_cfg_path {
+} __packed;
+
+/* Config space counter registers 8.2.4 */
+struct tb_cfg_counters {
+} __packed;
+
+#endif /* _TBCFG_REG_H */
diff --git a/sys/dev/ti/if_ti.c b/sys/dev/ti/if_ti.c
index 6f88862d8009..14f7d353303f 100644
--- a/sys/dev/ti/if_ti.c
+++ b/sys/dev/ti/if_ti.c
@@ -2384,11 +2384,6 @@ ti_attach(device_t dev)
callout_init_mtx(&sc->ti_watchdog, &sc->ti_mtx, 0);
ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts);
ifp = sc->ti_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_sethwassist(ifp, TI_CSUM_FEATURES);
if_setcapabilities(ifp, IFCAP_TXCSUM | IFCAP_RXCSUM);
if_setcapenable(ifp, if_getcapabilities(sc->ti_ifp));
diff --git a/sys/dev/tpm/tpm20.c b/sys/dev/tpm/tpm20.c
index 3399e17f53aa..067e7ccae8f9 100644
--- a/sys/dev/tpm/tpm20.c
+++ b/sys/dev/tpm/tpm20.c
@@ -25,8 +25,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include <sys/random.h>
+#include <dev/random/randomdev.h>
#include "tpm20.h"
@@ -68,6 +68,7 @@ tpm20_read(struct cdev *dev, struct uio *uio, int flags)
{
struct tpm_sc *sc;
size_t bytes_to_transfer;
+ size_t offset;
int result = 0;
sc = (struct tpm_sc *)dev->si_drv1;
@@ -80,10 +81,10 @@ tpm20_read(struct cdev *dev, struct uio *uio, int flags)
}
bytes_to_transfer = MIN(sc->pending_data_length, uio->uio_resid);
+ offset = sc->total_length - sc->pending_data_length;
if (bytes_to_transfer > 0) {
- result = uiomove((caddr_t) sc->buf, bytes_to_transfer, uio);
- memset(sc->buf, 0, TPM_BUFSIZE);
- sc->pending_data_length = 0;
+ result = uiomove((caddr_t) sc->buf + offset, bytes_to_transfer, uio);
+ sc->pending_data_length -= bytes_to_transfer;
cv_signal(&sc->buf_cv);
} else {
result = ETIMEDOUT;
@@ -152,6 +153,7 @@ tpm20_discard_buffer(void *arg)
memset(sc->buf, 0, TPM_BUFSIZE);
sc->pending_data_length = 0;
+ sc->total_length = 0;
cv_signal(&sc->buf_cv);
sx_xunlock(&sc->dev_lock);
@@ -182,6 +184,13 @@ tpm20_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
return (ENOTTY);
}
+#ifdef TPM_HARVEST
+static const struct random_source random_tpm = {
+ .rs_ident = "TPM",
+ .rs_source = RANDOM_PURE_TPM,
+};
+#endif
+
int
tpm20_init(struct tpm_sc *sc)
{
@@ -191,6 +200,7 @@ tpm20_init(struct tpm_sc *sc)
cv_init(&sc->buf_cv, "TPM buffer cv");
callout_init(&sc->discard_buffer_callout, 1);
sc->pending_data_length = 0;
+ sc->total_length = 0;
make_dev_args_init(&args);
args.mda_devsw = &tpm20_cdevsw;
@@ -203,6 +213,7 @@ tpm20_init(struct tpm_sc *sc)
tpm20_release(sc);
#ifdef TPM_HARVEST
+ random_source_register(&random_tpm);
TIMEOUT_TASK_INIT(taskqueue_thread, &sc->harvest_task, 0,
tpm20_harvest, sc);
taskqueue_enqueue_timeout(taskqueue_thread, &sc->harvest_task, 0);
@@ -219,6 +230,7 @@ tpm20_release(struct tpm_sc *sc)
#ifdef TPM_HARVEST
if (device_is_attached(sc->dev))
taskqueue_drain_timeout(taskqueue_thread, &sc->harvest_task);
+ random_source_deregister(&random_tpm);
#endif
if (sc->buf != NULL)
@@ -275,6 +287,7 @@ tpm20_harvest(void *arg, int unused)
/* Ignore response size */
sc->pending_data_length = 0;
+ sc->total_length = 0;
/* The number of random bytes we got is placed right after the header */
entropy_size = (uint16_t) sc->buf[TPM_HEADER_SIZE + 1];
diff --git a/sys/dev/tpm/tpm20.h b/sys/dev/tpm/tpm20.h
index 683cd7549bd4..7c2ccd30143a 100644
--- a/sys/dev/tpm/tpm20.h
+++ b/sys/dev/tpm/tpm20.h
@@ -124,6 +124,7 @@ struct tpm_sc {
uint8_t *buf;
size_t pending_data_length;
+ size_t total_length;
lwpid_t owner_tid;
struct callout discard_buffer_callout;
diff --git a/sys/dev/tpm/tpm_bus.c b/sys/dev/tpm/tpm_bus.c
index f0d3c26c33f1..6e2cc5d81cb6 100644
--- a/sys/dev/tpm/tpm_bus.c
+++ b/sys/dev/tpm/tpm_bus.c
@@ -77,7 +77,7 @@ tpm_write_4(device_t dev, bus_size_t off, uint32_t val)
}
static void
-tpm_write_barrier(device_t dev, bus_addr_t off, bus_size_t length)
+tpm_write_barrier(device_t dev, bus_size_t off, bus_size_t length)
{
struct tpm_sc *sc = device_get_softc(dev);
diff --git a/sys/dev/tpm/tpm_crb.c b/sys/dev/tpm/tpm_crb.c
index b9ddcf0dd3e1..017ebd45c7ea 100644
--- a/sys/dev/tpm/tpm_crb.c
+++ b/sys/dev/tpm/tpm_crb.c
@@ -398,6 +398,7 @@ tpmcrb_transmit(device_t dev, size_t length)
tpmcrb_relinquish_locality(sc);
sc->pending_data_length = bytes_available;
+ sc->total_length = bytes_available;
return (0);
}
diff --git a/sys/dev/tpm/tpm_if.m b/sys/dev/tpm/tpm_if.m
index 2b6afe22f3e8..b0149ba163a6 100644
--- a/sys/dev/tpm/tpm_if.m
+++ b/sys/dev/tpm/tpm_if.m
@@ -41,7 +41,7 @@ METHOD int transmit {
METHOD uint64_t read_8 {
device_t dev;
- bus_addr_t addr;
+ bus_size_t addr;
}
#
@@ -49,28 +49,28 @@ METHOD uint64_t read_8 {
#
METHOD uint32_t read_4 {
device_t dev;
- bus_addr_t addr;
+ bus_size_t addr;
};
METHOD uint8_t read_1 {
device_t dev;
- bus_addr_t addr;
+ bus_size_t addr;
};
METHOD void write_4 {
device_t dev;
- bus_addr_t addr;
+ bus_size_t addr;
uint32_t value;
};
METHOD void write_1 {
device_t dev;
- bus_addr_t addr;
+ bus_size_t addr;
uint8_t value;
};
METHOD void write_barrier {
device_t dev;
- bus_addr_t off;
+ bus_size_t off;
bus_size_t length;
}
diff --git a/sys/dev/tpm/tpm_tis_acpi.c b/sys/dev/tpm/tpm_tis_acpi.c
index 32c73e5f2483..1d76d4f73266 100644
--- a/sys/dev/tpm/tpm_tis_acpi.c
+++ b/sys/dev/tpm/tpm_tis_acpi.c
@@ -83,4 +83,4 @@ static device_method_t tpmtis_methods[] = {
DEFINE_CLASS_2(tpmtis, tpmtis_acpi_driver, tpmtis_methods,
sizeof(struct tpm_sc), tpmtis_driver, tpm_bus_driver);
-DRIVER_MODULE(tpmtis, acpi, tpmtis_driver, 0, 0);
+DRIVER_MODULE(tpmtis, acpi, tpmtis_acpi_driver, 0, 0);
diff --git a/sys/dev/tpm/tpm_tis_core.c b/sys/dev/tpm/tpm_tis_core.c
index 230eb12d2acd..4159de4daf3b 100644
--- a/sys/dev/tpm/tpm_tis_core.c
+++ b/sys/dev/tpm/tpm_tis_core.c
@@ -97,6 +97,7 @@ tpmtis_attach(device_t dev)
{
struct tpm_sc *sc;
int result;
+ int poll = 0;
sc = device_get_softc(dev);
sc->dev = dev;
@@ -105,6 +106,12 @@ tpmtis_attach(device_t dev)
sx_init(&sc->dev_lock, "TPM driver lock");
sc->buf = malloc(TPM_BUFSIZE, M_TPM20, M_WAITOK);
+ resource_int_value("tpm", device_get_unit(dev), "use_polling", &poll);
+ if (poll != 0) {
+ device_printf(dev, "Using poll method to get TPM operation status \n");
+ goto skip_irq;
+ }
+
sc->irq_rid = 0;
sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid,
RF_ACTIVE | RF_SHAREABLE);
@@ -469,6 +476,7 @@ tpmtis_transmit(device_t dev, size_t length)
}
tpmtis_relinquish_locality(sc);
sc->pending_data_length = bytes_available;
+ sc->total_length = bytes_available;
return (0);
}
diff --git a/sys/dev/tsec/if_tsec.c b/sys/dev/tsec/if_tsec.c
index 1c03110e1889..80e42116ed9f 100644
--- a/sys/dev/tsec/if_tsec.c
+++ b/sys/dev/tsec/if_tsec.c
@@ -239,12 +239,6 @@ tsec_attach(struct tsec_softc *sc)
/* Create network interface for upper layers */
ifp = sc->tsec_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(sc->dev, "if_alloc() failed\n");
- tsec_detach(sc);
- return (ENOMEM);
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev));
if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST);
diff --git a/sys/dev/tws/tws.c b/sys/dev/tws/tws.c
index af151c8c4f06..fccd6689a6aa 100644
--- a/sys/dev/tws/tws.c
+++ b/sys/dev/tws/tws.c
@@ -311,7 +311,7 @@ attach_fail_4:
if (sc->cmd_tag)
bus_dma_tag_destroy(sc->cmd_tag);
attach_fail_3:
- for(i=0;i<sc->irqs;i++) {
+ for (i = 0; i < sc->irqs; i++) {
if ( sc->irq_res[i] ){
if (bus_release_resource(sc->tws_dev,
SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
@@ -369,7 +369,7 @@ tws_detach(device_t dev)
tws_teardown_intr(sc);
/* Release irq resource */
- for(i=0;i<sc->irqs;i++) {
+ for (i = 0; i < sc->irqs; i++) {
if ( sc->irq_res[i] ){
if (bus_release_resource(sc->tws_dev,
SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i]))
@@ -402,7 +402,7 @@ tws_detach(device_t dev)
TWS_TRACE(sc, "bus release mem resource", 0, sc->reg_res_id);
}
- for ( i=0; i< tws_queue_depth; i++) {
+ for (i = 0; i < tws_queue_depth; i++) {
if (sc->reqs[i].dma_map)
bus_dmamap_destroy(sc->data_tag, sc->reqs[i].dma_map);
callout_drain(&sc->reqs[i].timeout);
@@ -432,7 +432,7 @@ tws_setup_intr(struct tws_softc *sc, int irqs)
{
int i, error;
- for(i=0;i<irqs;i++) {
+ for (i = 0; i < irqs; i++) {
if (!(sc->intr_handle[i])) {
if ((error = bus_setup_intr(sc->tws_dev, sc->irq_res[i],
INTR_TYPE_CAM | INTR_MPSAFE,
@@ -452,7 +452,7 @@ tws_teardown_intr(struct tws_softc *sc)
{
int i;
- for(i=0;i<sc->irqs;i++) {
+ for (i = 0; i < sc->irqs; i++) {
if (sc->intr_handle[i]) {
bus_teardown_intr(sc->tws_dev,
sc->irq_res[i], sc->intr_handle[i]);
@@ -669,8 +669,7 @@ tws_init_reqs(struct tws_softc *sc, u_int32_t dma_mem_size)
bzero(cmd_buf, dma_mem_size);
TWS_TRACE_DEBUG(sc, "phy cmd", sc->dma_mem_phys, 0);
mtx_lock(&sc->q_lock);
- for ( i=0; i< tws_queue_depth; i++)
- {
+ for (i = 0; i < tws_queue_depth; i++) {
if (bus_dmamap_create(sc->data_tag, 0, &sc->reqs[i].dma_map)) {
/* log a ENOMEM failure msg here */
mtx_unlock(&sc->q_lock);
diff --git a/sys/dev/tws/tws_services.c b/sys/dev/tws/tws_services.c
index da8bbacc39f7..e5c3d45c533f 100644
--- a/sys/dev/tws/tws_services.c
+++ b/sys/dev/tws/tws_services.c
@@ -200,7 +200,7 @@ tws_init_qs(struct tws_softc *sc)
{
mtx_lock(&sc->q_lock);
- for(int i=0;i<TWS_MAX_QS;i++) {
+ for (int i = 0; i < TWS_MAX_QS; i++) {
sc->q_head[i] = NULL;
sc->q_tail[i] = NULL;
}
diff --git a/sys/dev/uart/uart.h b/sys/dev/uart/uart.h
index 4cdec00c9829..b9401996e655 100644
--- a/sys/dev/uart/uart.h
+++ b/sys/dev/uart/uart.h
@@ -40,11 +40,13 @@
struct uart_bas {
bus_space_tag_t bst;
bus_space_handle_t bsh;
+ void *driver1;
u_int chan;
u_int rclk;
u_int regshft;
u_int regiowidth;
u_int busy_detect;
+ u_int rclk_guess;/* if rclk == 0, use baud + divisor to compute rclk */
};
#define uart_regofs(bas, reg) ((reg) << (bas)->regshft)
diff --git a/sys/dev/uart/uart_bus.h b/sys/dev/uart/uart_bus.h
index ccf8ad06a8ec..a605e3d20be7 100644
--- a/sys/dev/uart/uart_bus.h
+++ b/sys/dev/uart/uart_bus.h
@@ -56,7 +56,6 @@
/* UART quirk flags */
#define UART_F_BUSY_DETECT 0x1
-#define UART_F_IGNORE_SPCR_REGSHFT 0x2
/*
* UART class & instance (=softc)
diff --git a/sys/dev/uart/uart_bus_fdt.c b/sys/dev/uart/uart_bus_fdt.c
index 7725d09e212b..431f2962adb2 100644
--- a/sys/dev/uart/uart_bus_fdt.c
+++ b/sys/dev/uart/uart_bus_fdt.c
@@ -234,7 +234,8 @@ uart_cpu_fdt_probe(struct uart_class **classp, bus_space_tag_t *bst,
(struct uart_class *)uart_fdt_find_by_node(node, 1);
if (class == NULL)
return (ENXIO);
- clk = 0;
+ if (uart_fdt_get_clock(node, &clk) != 0)
+ clk = 0;
}
/*
diff --git a/sys/dev/uart/uart_bus_pci.c b/sys/dev/uart/uart_bus_pci.c
index 5f82ef9307d0..22af8ee8663c 100644
--- a/sys/dev/uart/uart_bus_pci.c
+++ b/sys/dev/uart/uart_bus_pci.c
@@ -106,6 +106,20 @@ static const struct pci_id pci_ns8250_ids[] = {
{ 0x131f, 0x2000, 0xffff, 0, "Siig CyberSerial (1-port) 16550", 0x10 },
{ 0x131f, 0x2001, 0xffff, 0, "Siig CyberSerial (1-port) 16650", 0x10 },
{ 0x131f, 0x2002, 0xffff, 0, "Siig CyberSerial (1-port) 16850", 0x10 },
+{ 0x135a, 0x0a61, 0xffff, 0, "Brainboxes UC-324", 0x18 },
+{ 0x135a, 0x0aa1, 0xffff, 0, "Brainboxes UC-246", 0x18 },
+{ 0x135a, 0x0aa2, 0xffff, 0, "Brainboxes UC-246", 0x18 },
+{ 0x135a, 0x0d60, 0xffff, 0, "Intashield IS-100", 0x18 },
+{ 0x135a, 0x0da0, 0xffff, 0, "Intashield IS-300", 0x18 },
+{ 0x135a, 0x4000, 0xffff, 0, "Brainboxes PX-420", 0x10 },
+{ 0x135a, 0x4001, 0xffff, 0, "Brainboxes PX-431", 0x10 },
+{ 0x135a, 0x4002, 0xffff, 0, "Brainboxes PX-820", 0x10 },
+{ 0x135a, 0x4003, 0xffff, 0, "Brainboxes PX-831", 0x10 },
+{ 0x135a, 0x4004, 0xffff, 0, "Brainboxes PX-246", 0x10 },
+{ 0x135a, 0x4005, 0xffff, 0, "Brainboxes PX-101", 0x10 },
+{ 0x135a, 0x4006, 0xffff, 0, "Brainboxes PX-257", 0x10 },
+{ 0x135a, 0x4008, 0xffff, 0, "Brainboxes PX-846", 0x10 },
+{ 0x135a, 0x4009, 0xffff, 0, "Brainboxes PX-857", 0x10 },
{ 0x135c, 0x0190, 0xffff, 0, "Quatech SSCLP-100", 0x18 },
{ 0x135c, 0x01c0, 0xffff, 0, "Quatech SSCLP-200/300", 0x18 },
{ 0x135e, 0x7101, 0xffff, 0, "Sealevel Systems Single Port RS-232/422/485/530",
@@ -127,6 +141,8 @@ static const struct pci_id pci_ns8250_ids[] = {
0x10, 16384000 },
{ 0x1415, 0xc120, 0xffff, 0, "Oxford Semiconductor OXPCIe952 PCIe 16950 UART",
0x10 },
+{ 0x14a1, 0x0008, 0x14a1, 0x0008, "Systembase SB16C1058",
+ 0x10, 8 * DEFAULT_RCLK, },
{ 0x14e4, 0x160a, 0xffff, 0, "Broadcom TruManage UART", 0x10,
128 * DEFAULT_RCLK, 2},
{ 0x14e4, 0x4344, 0xffff, 0, "Sony Ericsson GC89 PC Card", 0x10},
diff --git a/sys/dev/uart/uart_core.c b/sys/dev/uart/uart_core.c
index c2bc818a6fc2..0ee43f6d2d94 100644
--- a/sys/dev/uart/uart_core.c
+++ b/sys/dev/uart/uart_core.c
@@ -38,11 +38,11 @@
#include <sys/malloc.h>
#include <sys/queue.h>
#include <sys/reboot.h>
+#include <sys/stdarg.h>
#include <sys/sysctl.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <machine/resource.h>
-#include <machine/stdarg.h>
#include <dev/uart/uart.h>
#include <dev/uart/uart_bus.h>
@@ -559,13 +559,19 @@ uart_bus_probe(device_t dev, int regshft, int regiowidth, int rclk, int rid, int
uart_cpu_eqres(&sc->sc_bas, &sysdev->bas)) {
/* XXX check if ops matches class. */
sc->sc_sysdev = sysdev;
- sysdev->bas.rclk = sc->sc_bas.rclk;
- }
+ if (sysdev->bas.rclk != 0) {
+ /* Let the boot sequence control */
+ sc->sc_bas.rclk = sysdev->bas.rclk;
+ } else {
+ /* Boot didn't set it, use use class */
+ sysdev->bas.rclk = sc->sc_bas.rclk;
+ }
+ }
}
error = UART_PROBE(sc);
bus_release_resource(dev, sc->sc_rtype, sc->sc_rrid, sc->sc_rres);
- return ((error) ? error : BUS_PROBE_DEFAULT);
+ return ((error) ? error : 0);
}
int
@@ -746,6 +752,11 @@ uart_bus_attach(device_t dev)
"rx_overruns", CTLFLAG_RD, &sc->sc_rxoverruns, 0,
"Receive overruns");
+ SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
+ "rclk", CTLFLAG_RD, &sc->sc_bas.rclk, 0,
+ "Baud clock for device");
+
return (0);
fail:
diff --git a/sys/dev/uart/uart_cpu_acpi.c b/sys/dev/uart/uart_cpu_acpi.c
index 9c9ffc1e3194..da77603f0093 100644
--- a/sys/dev/uart/uart_cpu_acpi.c
+++ b/sys/dev/uart/uart_cpu_acpi.c
@@ -44,23 +44,15 @@
#include <contrib/dev/acpica/include/accommon.h>
#include <contrib/dev/acpica/include/actables.h>
-static struct acpi_uart_compat_data *
+static struct acpi_spcr_compat_data *
uart_cpu_acpi_scan(uint8_t interface_type)
{
- struct acpi_uart_compat_data **cd, *curcd;
+ struct acpi_spcr_compat_data **cd, *curcd;
int i;
- SET_FOREACH(cd, uart_acpi_class_and_device_set) {
+ SET_FOREACH(cd, uart_acpi_spcr_class_set) {
curcd = *cd;
- for (i = 0; curcd[i].cd_hid != NULL; i++) {
- if (curcd[i].cd_port_subtype == interface_type)
- return (&curcd[i]);
- }
- }
-
- SET_FOREACH(cd, uart_acpi_class_set) {
- curcd = *cd;
- for (i = 0; curcd[i].cd_hid != NULL; i++) {
+ for (i = 0; curcd[i].cd_class != NULL; i++) {
if (curcd[i].cd_port_subtype == interface_type)
return (&curcd[i]);
}
@@ -143,7 +135,7 @@ uart_cpu_acpi_spcr(int devtype, struct uart_devinfo *di)
{
vm_paddr_t spcr_physaddr;
ACPI_TABLE_SPCR *spcr;
- struct acpi_uart_compat_data *cd;
+ struct acpi_spcr_compat_data *cd;
struct uart_class *class;
int error = ENXIO;
@@ -167,40 +159,60 @@ uart_cpu_acpi_spcr(int devtype, struct uart_devinfo *di)
if (error != 0)
goto out;
- switch (spcr->BaudRate) {
- case 0:
- /* Special value; means "keep current value unchanged". */
- di->baudrate = 0;
- break;
- case 3:
- di->baudrate = 9600;
- break;
- case 4:
- di->baudrate = 19200;
- break;
- case 6:
- di->baudrate = 57600;
- break;
- case 7:
- di->baudrate = 115200;
- break;
- default:
- printf("SPCR has reserved BaudRate value: %d!\n",
- (int)spcr->BaudRate);
- goto out;
+ /*
+ * SPCR Rev 4 and newer allow a precise baudrate to be passed in for
+ * things like 1.5M or 2.0M. If we have that, then use that value,
+ * otherwise try to decode the older enumeration.
+ */
+ if (spcr->Header.Revision >= 4 && spcr->PreciseBaudrate != 0) {
+ di->baudrate = spcr->PreciseBaudrate;
+ } else {
+ switch (spcr->BaudRate) {
+ case 0:
+ /* Special value; means "keep current value unchanged". */
+ di->baudrate = 0;
+ break;
+ case 3:
+ di->baudrate = 9600;
+ break;
+ case 4:
+ di->baudrate = 19200;
+ break;
+ case 6:
+ di->baudrate = 57600;
+ break;
+ case 7:
+ di->baudrate = 115200;
+ break;
+ default:
+ printf("SPCR has reserved BaudRate value: %d!\n",
+ (int)spcr->BaudRate);
+ goto out;
+ }
}
+
+ /*
+ * Rev 3 and newer can specify a rclk, use it if it's there. It's
+ * defined to be 0 when it's not known, and we've initialized rclk to 0
+ * in uart_cpu_acpi_init_devinfo, so we don't have to test for it.
+ */
+ if (spcr->Header.Revision >= 3)
+ di->bas.rclk = spcr->UartClkFreq;
+
+ /*
+ * If no rclk is set, then we will assume the BIOS has configured the
+ * hardware at the stated baudrate, so we can use it to guess the rclk
+ * relatively accurately, so make a note for later.
+ */
+ if (di->bas.rclk == 0)
+ di->bas.rclk_guess = 1;
+
if (spcr->PciVendorId != PCIV_INVALID &&
spcr->PciDeviceId != PCIV_INVALID) {
di->pci_info.vendor = spcr->PciVendorId;
di->pci_info.device = spcr->PciDeviceId;
}
- /* Apply device tweaks. */
- if ((cd->cd_quirks & UART_F_IGNORE_SPCR_REGSHFT) ==
- UART_F_IGNORE_SPCR_REGSHFT) {
- di->bas.regshft = cd->cd_regshft;
- }
-
/* Create a bus space handle. */
error = bus_space_map(di->bas.bst, spcr->SerialPort.Address,
uart_getrange(class), 0, &di->bas.bsh);
@@ -210,12 +222,89 @@ out:
return (error);
}
+static int
+uart_cpu_acpi_dbg2(struct uart_devinfo *di)
+{
+ vm_paddr_t dbg2_physaddr;
+ ACPI_TABLE_DBG2 *dbg2;
+ ACPI_DBG2_DEVICE *dbg2_dev;
+ ACPI_GENERIC_ADDRESS *base_address;
+ struct acpi_spcr_compat_data *cd;
+ struct uart_class *class;
+ int error;
+ bool found;
+
+ /* Look for the DBG2 table. */
+ dbg2_physaddr = acpi_find_table(ACPI_SIG_DBG2);
+ if (dbg2_physaddr == 0)
+ return (ENXIO);
+
+ dbg2 = acpi_map_table(dbg2_physaddr, ACPI_SIG_DBG2);
+ if (dbg2 == NULL) {
+ printf("Unable to map the DBG2 table!\n");
+ return (ENXIO);
+ }
+
+ error = ENXIO;
+
+ dbg2_dev = (ACPI_DBG2_DEVICE *)((uintptr_t)dbg2 + dbg2->InfoOffset);
+ found = false;
+ while ((uintptr_t)dbg2_dev + dbg2_dev->Length <=
+ (uintptr_t)dbg2 + dbg2->Header.Length) {
+ if (dbg2_dev->PortType != ACPI_DBG2_SERIAL_PORT)
+ goto next;
+
+ /* XXX: Too restrictive? */
+ if (dbg2_dev->RegisterCount != 1)
+ goto next;
+
+ cd = uart_cpu_acpi_scan(dbg2_dev->PortSubtype);
+ if (cd == NULL)
+ goto next;
+
+ class = cd->cd_class;
+ base_address = (ACPI_GENERIC_ADDRESS *)
+ ((uintptr_t)dbg2_dev + dbg2_dev->BaseAddressOffset);
+
+ error = uart_cpu_acpi_init_devinfo(di, class, base_address);
+ if (error == 0) {
+ found = true;
+ break;
+ }
+
+next:
+ dbg2_dev = (ACPI_DBG2_DEVICE *)
+ ((uintptr_t)dbg2_dev + dbg2_dev->Length);
+ }
+ if (!found)
+ goto out;
+
+ /* XXX: Find the correct value */
+ di->baudrate = 115200;
+
+ /* Create a bus space handle. */
+ error = bus_space_map(di->bas.bst, base_address->Address,
+ uart_getrange(class), 0, &di->bas.bsh);
+
+out:
+ acpi_unmap_table(dbg2);
+ return (error);
+}
+
int
uart_cpu_acpi_setup(int devtype, struct uart_devinfo *di)
{
+ char *cp;
+
switch(devtype) {
case UART_DEV_CONSOLE:
return (uart_cpu_acpi_spcr(devtype, di));
+ case UART_DEV_DBGPORT:
+ /* Use the Debug Port Table 2 (DBG2) to find a debug uart */
+ cp = kern_getenv("hw.acpi.enable_dbg2");
+ if (cp != NULL && strcasecmp(cp, "yes") == 0)
+ return (uart_cpu_acpi_dbg2(di));
+ break;
}
return (ENXIO);
}
diff --git a/sys/dev/uart/uart_cpu_acpi.h b/sys/dev/uart/uart_cpu_acpi.h
index 94329e1f1349..218f643c7621 100644
--- a/sys/dev/uart/uart_cpu_acpi.h
+++ b/sys/dev/uart/uart_cpu_acpi.h
@@ -35,11 +35,18 @@
struct uart_class;
+struct acpi_spcr_compat_data {
+ struct uart_class *cd_class;
+ uint16_t cd_port_subtype;
+};
+SET_DECLARE(uart_acpi_spcr_class_set, struct acpi_spcr_compat_data);
+#define UART_ACPI_SPCR_CLASS(data) \
+ DATA_SET(uart_acpi_spcr_class_set, data)
+
struct acpi_uart_compat_data {
const char *cd_hid;
struct uart_class *cd_class;
- uint16_t cd_port_subtype;
int cd_regshft;
int cd_regiowidth;
int cd_rclk;
@@ -56,14 +63,6 @@ SET_DECLARE(uart_acpi_class_and_device_set, struct acpi_uart_compat_data);
#define UART_ACPI_CLASS_AND_DEVICE(data) \
DATA_SET(uart_acpi_class_and_device_set, data)
-/*
- * If your UART driver implements uart_class and custom device layer,
- * then use UART_ACPI_CLASS for its declaration
- */
-SET_DECLARE(uart_acpi_class_set, struct acpi_uart_compat_data);
-#define UART_ACPI_CLASS(data) \
- DATA_SET(uart_acpi_class_set, data)
-
/* Try to initialize UART device from ACPI tables */
int uart_cpu_acpi_setup(int devtype, struct uart_devinfo *di);
diff --git a/sys/dev/uart/uart_cpu_fdt.c b/sys/dev/uart/uart_cpu_fdt.c
index 1cc1c795f29f..fd1647cd78aa 100644
--- a/sys/dev/uart/uart_cpu_fdt.c
+++ b/sys/dev/uart/uart_cpu_fdt.c
@@ -81,7 +81,7 @@ uart_cpu_getdev(int devtype, struct uart_devinfo *di)
/* Allow overriding the FDT using the environment. */
class = &uart_ns8250_class;
err = uart_getenv(devtype, di, class);
- if (!err)
+ if (err == 0)
return (0);
err = uart_cpu_fdt_probe(&class, &bst, &bsh, &br, &rclk,
diff --git a/sys/dev/uart/uart_dev_ns8250.c b/sys/dev/uart/uart_dev_ns8250.c
index 16c3cb2fc5a9..c38d50e54ad8 100644
--- a/sys/dev/uart/uart_dev_ns8250.c
+++ b/sys/dev/uart/uart_dev_ns8250.c
@@ -77,6 +77,11 @@ static int broken_txfifo = 0;
SYSCTL_INT(_hw, OID_AUTO, broken_txfifo, CTLFLAG_RWTUN,
&broken_txfifo, 0, "UART FIFO has QEMU emulation bug");
+static int uart_noise_threshold = 0;
+SYSCTL_INT(_hw, OID_AUTO, uart_noise_threshold, CTLFLAG_RWTUN,
+ &uart_noise_threshold, 0,
+ "Number of UART RX interrupts where TX is not ready, before data is discarded");
+
/*
* To use early printf on x86, add the following to your kernel config:
*
@@ -126,11 +131,11 @@ ns8250_clrint(struct uart_bas *bas)
}
}
-static int
-ns8250_delay(struct uart_bas *bas)
+static uint32_t
+ns8250_get_divisor(struct uart_bas *bas)
{
- int divisor;
- u_char lcr;
+ uint32_t divisor;
+ uint8_t lcr;
lcr = uart_getreg(bas, REG_LCR);
uart_setreg(bas, REG_LCR, lcr | LCR_DLAB);
@@ -140,6 +145,16 @@ ns8250_delay(struct uart_bas *bas)
uart_setreg(bas, REG_LCR, lcr);
uart_barrier(bas);
+ return (divisor);
+}
+
+static int
+ns8250_delay(struct uart_bas *bas)
+{
+ int divisor;
+
+ divisor = ns8250_get_divisor(bas);
+
/* 1/10th the time to transmit 1 character (estimate). */
if (divisor <= 134)
return (16000000 * divisor / bas->rclk);
@@ -187,7 +202,7 @@ ns8250_drain(struct uart_bas *bas, int what)
while ((uart_getreg(bas, REG_LSR) & LSR_TEMT) == 0 && --limit)
DELAY(delay);
if (limit == 0) {
- /* printf("ns8250: transmitter appears stuck... "); */
+ /* printf("uart: ns8250: transmitter appears stuck... "); */
return (EIO);
}
}
@@ -215,7 +230,7 @@ ns8250_drain(struct uart_bas *bas, int what)
DELAY(delay << 2);
}
if (limit == 0) {
- /* printf("ns8250: receiver appears broken... "); */
+ /* printf("uart: ns8250: receiver appears broken... "); */
return (EIO);
}
}
@@ -250,12 +265,12 @@ ns8250_flush(struct uart_bas *bas, int what)
* https://github.com/rust-vmm/vm-superio/issues/83
*/
lsr = uart_getreg(bas, REG_LSR);
- if (((lsr & LSR_TEMT) == 0) && (what & UART_FLUSH_TRANSMITTER))
+ if (((lsr & LSR_THRE) == 0) && (what & UART_FLUSH_TRANSMITTER))
drain |= UART_DRAIN_TRANSMITTER;
if ((lsr & LSR_RXRDY) && (what & UART_FLUSH_RECEIVER))
drain |= UART_DRAIN_RECEIVER;
if (drain != 0) {
- printf("ns8250: UART FCR is broken\n");
+ printf("uart: ns8250: UART FCR is broken (%#x)\n", drain);
ns8250_drain(bas, drain);
}
}
@@ -284,8 +299,8 @@ ns8250_param(struct uart_bas *bas, int baudrate, int databits, int stopbits,
lcr |= LCR_STOPB;
lcr |= parity << 3;
- /* Set baudrate. */
- if (baudrate > 0) {
+ /* Set baudrate if we know a rclk and both are not 0. */
+ if (baudrate > 0 && bas->rclk > 0) {
divisor = ns8250_divisor(bas->rclk, baudrate);
if (divisor == 0)
return (EINVAL);
@@ -349,10 +364,6 @@ ns8250_init(struct uart_bas *bas, int baudrate, int databits, int stopbits,
{
u_char ier;
- if (bas->rclk == 0)
- bas->rclk = DEFAULT_RCLK;
- ns8250_param(bas, baudrate, databits, stopbits, parity);
-
/* Disable all interrupt sources. */
/*
* We use 0xe0 instead of 0xf0 as the mask because the XScale PXA
@@ -363,6 +374,30 @@ ns8250_init(struct uart_bas *bas, int baudrate, int databits, int stopbits,
uart_setreg(bas, REG_IER, ier);
uart_barrier(bas);
+ /*
+ * Loader tells us to infer the rclk when it sets xo to 0 in
+ * hw.uart.console. We know the baudrate was set by the firmware, so
+ * calculate rclk from baudrate and the divisor register. If 'div' is
+ * actually 0, the resulting 0 value will have us fall back to other
+ * rclk methods.
+ */
+ if (bas->rclk_guess && bas->rclk == 0 && baudrate != 0) {
+ uint32_t div;
+
+ div = ns8250_get_divisor(bas);
+ bas->rclk = baudrate * div * 16;
+ }
+
+ /*
+ * Pick a default because we just don't know. This likely needs future
+ * refinement, but that's hard outside of consoles to know what to use.
+ * But defer as long as possible if there's no defined baud rate.
+ */
+ if (bas->rclk == 0 && baudrate != 0)
+ bas->rclk = DEFAULT_RCLK;
+
+ ns8250_param(bas, baudrate, databits, stopbits, parity);
+
/* Disable the FIFO (if present). */
uart_setreg(bas, REG_FCR, 0);
uart_barrier(bas);
@@ -457,22 +492,32 @@ UART_CLASS(uart_ns8250_class);
* XXX -- refactor out ACPI and FDT ifdefs
*/
#ifdef DEV_ACPI
+static struct acpi_spcr_compat_data acpi_spcr_compat_data[] = {
+ { &uart_ns8250_class, ACPI_DBG2_16550_COMPATIBLE },
+ { &uart_ns8250_class, ACPI_DBG2_16550_SUBSET },
+ { &uart_ns8250_class, ACPI_DBG2_16550_WITH_GAS },
+ { NULL, 0 },
+};
+UART_ACPI_SPCR_CLASS(acpi_spcr_compat_data);
+
static struct acpi_uart_compat_data acpi_compat_data[] = {
- {"AMD0020", &uart_ns8250_class, 0, 2, 0, 48000000, UART_F_BUSY_DETECT, "AMD / Synopsys Designware UART"},
- {"AMDI0020", &uart_ns8250_class, 0, 2, 0, 48000000, UART_F_BUSY_DETECT, "AMD / Synopsys Designware UART"},
- {"MRVL0001", &uart_ns8250_class, ACPI_DBG2_16550_SUBSET, 2, 0, 200000000, UART_F_BUSY_DETECT, "Marvell / Synopsys Designware UART"},
- {"SCX0006", &uart_ns8250_class, 0, 2, 0, 62500000, UART_F_BUSY_DETECT, "SynQuacer / Synopsys Designware UART"},
- {"HISI0031", &uart_ns8250_class, 0, 2, 0, 200000000, UART_F_BUSY_DETECT, "HiSilicon / Synopsys Designware UART"},
- {"NXP0018", &uart_ns8250_class, 0, 0, 0, 350000000, UART_F_BUSY_DETECT, "NXP / Synopsys Designware UART"},
- {"PNP0500", &uart_ns8250_class, 0, 0, 0, 0, 0, "Standard PC COM port"},
- {"PNP0501", &uart_ns8250_class, 0, 0, 0, 0, 0, "16550A-compatible COM port"},
- {"PNP0502", &uart_ns8250_class, 0, 0, 0, 0, 0, "Multiport serial device (non-intelligent 16550)"},
- {"PNP0510", &uart_ns8250_class, 0, 0, 0, 0, 0, "Generic IRDA-compatible device"},
- {"PNP0511", &uart_ns8250_class, 0, 0, 0, 0, 0, "Generic IRDA-compatible device"},
- {"WACF004", &uart_ns8250_class, 0, 0, 0, 0, 0, "Wacom Tablet PC Screen"},
- {"WACF00E", &uart_ns8250_class, 0, 0, 0, 0, 0, "Wacom Tablet PC Screen 00e"},
- {"FUJ02E5", &uart_ns8250_class, 0, 0, 0, 0, 0, "Wacom Tablet at FuS Lifebook T"},
- {NULL, NULL, 0, 0 , 0, 0, 0, NULL},
+ {"AMD0020", &uart_ns8250_class, 2, 0, 48000000, UART_F_BUSY_DETECT, "AMD / Synopsys Designware UART"},
+ {"AMDI0020", &uart_ns8250_class, 2, 0, 48000000, UART_F_BUSY_DETECT, "AMD / Synopsys Designware UART"},
+ {"APMC0D08", &uart_ns8250_class, 2, 4, 0, 0, "APM compatible UART"},
+ {"MRVL0001", &uart_ns8250_class, 2, 0, 200000000, UART_F_BUSY_DETECT, "Marvell / Synopsys Designware UART"},
+ {"SCX0006", &uart_ns8250_class, 2, 0, 62500000, UART_F_BUSY_DETECT, "SynQuacer / Synopsys Designware UART"},
+ {"HISI0031", &uart_ns8250_class, 2, 0, 200000000, UART_F_BUSY_DETECT, "HiSilicon / Synopsys Designware UART"},
+ {"INTC1006", &uart_ns8250_class, 2, 0, 25000000, 0, "Intel ARM64 UART"},
+ {"NXP0018", &uart_ns8250_class, 0, 0, 350000000, UART_F_BUSY_DETECT, "NXP / Synopsys Designware UART"},
+ {"PNP0500", &uart_ns8250_class, 0, 0, 0, 0, "Standard PC COM port"},
+ {"PNP0501", &uart_ns8250_class, 0, 0, 0, 0, "16550A-compatible COM port"},
+ {"PNP0502", &uart_ns8250_class, 0, 0, 0, 0, "Multiport serial device (non-intelligent 16550)"},
+ {"PNP0510", &uart_ns8250_class, 0, 0, 0, 0, "Generic IRDA-compatible device"},
+ {"PNP0511", &uart_ns8250_class, 0, 0, 0, 0, "Generic IRDA-compatible device"},
+ {"WACF004", &uart_ns8250_class, 0, 0, 0, 0, "Wacom Tablet PC Screen"},
+ {"WACF00E", &uart_ns8250_class, 0, 0, 0, 0, "Wacom Tablet PC Screen 00e"},
+ {"FUJ02E5", &uart_ns8250_class, 0, 0, 0, 0, "Wacom Tablet at FuS Lifebook T"},
+ {NULL, NULL, 0 , 0, 0, 0, NULL},
};
UART_ACPI_CLASS_AND_DEVICE(acpi_compat_data);
#endif
@@ -725,14 +770,7 @@ ns8250_bus_ioctl(struct uart_softc *sc, int request, intptr_t data)
uart_barrier(bas);
break;
case UART_IOCTL_BAUD:
- lcr = uart_getreg(bas, REG_LCR);
- uart_setreg(bas, REG_LCR, lcr | LCR_DLAB);
- uart_barrier(bas);
- divisor = uart_getreg(bas, REG_DLL) |
- (uart_getreg(bas, REG_DLH) << 8);
- uart_barrier(bas);
- uart_setreg(bas, REG_LCR, lcr);
- uart_barrier(bas);
+ divisor = ns8250_get_divisor(bas);
baudrate = (divisor > 0) ? bas->rclk / divisor / 16 : 0;
if (baudrate > 0)
*(int*)data = baudrate;
@@ -987,6 +1025,7 @@ int
ns8250_bus_receive(struct uart_softc *sc)
{
struct uart_bas *bas;
+ struct ns8250_softc *ns8250 = (struct ns8250_softc *)sc;
int xc;
uint8_t lsr;
@@ -998,6 +1037,17 @@ ns8250_bus_receive(struct uart_softc *sc)
sc->sc_rxbuf[sc->sc_rxput] = UART_STAT_OVERRUN;
break;
}
+ /* Filter out possible noise on the line.
+ * Expect that the device should be able to transmit as well as
+ * receive, so if we receive too many characters before transmit
+ * is ready, it's probably noise.
+ */
+ if ((lsr & (LSR_TXRDY | LSR_TEMT)) == 0 &&
+ uart_noise_threshold > 0) {
+ if (++ns8250->noise_count >= uart_noise_threshold)
+ break;
+ } else
+ ns8250->noise_count = 0;
xc = uart_getreg(bas, REG_DATA);
if (lsr & LSR_FE)
xc |= UART_STAT_FRAMERR;
diff --git a/sys/dev/uart/uart_dev_ns8250.h b/sys/dev/uart/uart_dev_ns8250.h
index 324ff72f6e5d..e8a17e96c268 100644
--- a/sys/dev/uart/uart_dev_ns8250.h
+++ b/sys/dev/uart/uart_dev_ns8250.h
@@ -41,6 +41,7 @@ struct ns8250_softc {
uint8_t ier_mask;
uint8_t ier_rxbits;
uint8_t busy_detect;
+ int noise_count;
};
extern struct uart_ops uart_ns8250_ops;
diff --git a/sys/dev/uart/uart_dev_pl011.c b/sys/dev/uart/uart_dev_pl011.c
index daba9d19704c..6afc693cd347 100644
--- a/sys/dev/uart/uart_dev_pl011.c
+++ b/sys/dev/uart/uart_dev_pl011.c
@@ -172,6 +172,27 @@ static int
uart_pl011_probe(struct uart_bas *bas)
{
+ /*
+ * Versions of QEMU before 41f7b58b634e (8.3) reported bogus values for
+ * this tabel. The PL011 IP is always 32-bits wide and should be shifted
+ * 2 to match the 4-byte size of the data. QEMU reported these values
+ * incorrectly before that.
+ * https://github.com/qemu/qemu/commit/41f7b58b634ec3b60ae874375d2bbb61d790971e
+ *
+ * In additon, other hardware vendors also reported this value
+ * incorrectly. It's not tied to what the ACPI device node is, but was a
+ * misunderstanding coupled with a Linux driver that didn't need the
+ * right values. Quirks used to be used to ignore the bad values, now we
+ * detect the historic mistake and override (to allow for a future where
+ * we may need to override these values).
+ *
+ * PL011 Docs: https://developer.arm.com/documentation/ddi0183/latest/
+ */
+ if (bas->regshft == 0 || bas->regiowidth == 1) {
+ bas->regshft = 2;
+ bas->regiowidth = 4;
+ }
+
return (0);
}
@@ -231,6 +252,24 @@ uart_pl011_param(struct uart_bas *bas, int baudrate, int databits, int stopbits,
__uart_setreg(bas, UART_IFLS, FIFO_IFLS_BITS);
__uart_setreg(bas, UART_CR, ctrl);
+
+ /*
+ * Loader tells us to infer the rclk when it sets xo to 0 in
+ * hw.uart.console. The APCI SPCR code does likewise. We know the
+ * baudrate was set by the firmware, so calculate rclk from baudrate and
+ * the divisor register. If 'div' is actually 0, the resulting 0 value
+ * will have us fall back to other rclk methods. This method should be
+ * good to 5% or better because the error in baud rates needs to be
+ * below this for devices to communicate.
+ */
+ if (bas->rclk == 0 && baudrate > 0 && bas->rclk_guess) {
+ uint32_t div;
+
+ div = ((__uart_getreg(bas, UART_IBRD) & IBRD_BDIVINT) << 6) |
+ (__uart_getreg(bas, UART_FBRD) & FBRD_BDIVFRAC);
+ bas->rclk = (div * baudrate) / 4;
+ }
+
}
static void
@@ -338,7 +377,8 @@ static struct uart_class uart_pl011_class = {
.uc_ops = &uart_pl011_ops,
.uc_range = 0x48,
.uc_rclk = 0,
- .uc_rshift = 2
+ .uc_rshift = 2,
+ .uc_riowidth = 4,
};
UART_CLASS(uart_pl011_class);
@@ -351,11 +391,19 @@ UART_FDT_CLASS_AND_DEVICE(fdt_compat_data);
#endif
#ifdef DEV_ACPI
+static struct acpi_spcr_compat_data acpi_spcr_compat_data[] = {
+ { &uart_pl011_class, ACPI_DBG2_ARM_PL011 },
+ { &uart_pl011_class, ACPI_DBG2_ARM_SBSA_GENERIC },
+ { &uart_pl011_class, ACPI_DBG2_ARM_SBSA_32BIT },
+ { NULL, 0 },
+};
+UART_ACPI_SPCR_CLASS(acpi_spcr_compat_data);
+
static struct acpi_uart_compat_data acpi_compat_data[] = {
- {"ARMH0011", &uart_pl011_class, ACPI_DBG2_ARM_PL011, 2, 0, 0, UART_F_IGNORE_SPCR_REGSHFT, "uart pl011"},
- {"ARMHB000", &uart_pl011_class, ACPI_DBG2_ARM_SBSA_GENERIC, 2, 0, 0, UART_F_IGNORE_SPCR_REGSHFT, "uart pl011"},
- {"ARMHB000", &uart_pl011_class, ACPI_DBG2_ARM_SBSA_32BIT, 2, 0, 0, UART_F_IGNORE_SPCR_REGSHFT, "uart pl011"},
- {NULL, NULL, 0, 0, 0, 0, 0, NULL},
+ {"ARMH0011", &uart_pl011_class, 2, 0, 0, 0, "uart pl011"},
+ {"ARMHB000", &uart_pl011_class, 2, 0, 0, 0, "uart pl011"},
+ {"ARMHB000", &uart_pl011_class, 2, 0, 0, 0, "uart pl011"},
+ {NULL, NULL, 0, 0, 0, 0, NULL},
};
UART_ACPI_CLASS_AND_DEVICE(acpi_compat_data);
#endif
diff --git a/sys/dev/uart/uart_dev_quicc.c b/sys/dev/uart/uart_dev_quicc.c
index bd735f2da6f4..d6a8846b874e 100644
--- a/sys/dev/uart/uart_dev_quicc.c
+++ b/sys/dev/uart/uart_dev_quicc.c
@@ -412,7 +412,6 @@ quicc_bus_param(struct uart_softc *sc, int baudrate, int databits,
static int
quicc_bus_probe(struct uart_softc *sc)
{
- char buf[80];
int error;
error = quicc_probe(&sc->sc_bas);
@@ -422,8 +421,7 @@ quicc_bus_probe(struct uart_softc *sc)
sc->sc_rxfifosz = 1;
sc->sc_txfifosz = 1;
- snprintf(buf, sizeof(buf), "quicc, channel %d", sc->sc_bas.chan);
- device_set_desc_copy(sc->sc_dev, buf);
+ device_set_descf(sc->sc_dev, "quicc, channel %d", sc->sc_bas.chan);
return (0);
}
diff --git a/sys/dev/uart/uart_dev_z8530.c b/sys/dev/uart/uart_dev_z8530.c
index 2ca480a5690d..45bf63f20bb2 100644
--- a/sys/dev/uart/uart_dev_z8530.c
+++ b/sys/dev/uart/uart_dev_z8530.c
@@ -509,7 +509,6 @@ z8530_bus_param(struct uart_softc *sc, int baudrate, int databits,
static int
z8530_bus_probe(struct uart_softc *sc)
{
- char buf[80];
int error;
char ch;
@@ -522,8 +521,7 @@ z8530_bus_probe(struct uart_softc *sc)
ch = sc->sc_bas.chan - 1 + 'A';
- snprintf(buf, sizeof(buf), "z8530, channel %c", ch);
- device_set_desc_copy(sc->sc_dev, buf);
+ device_set_descf(sc->sc_dev, "z8530, channel %c", ch);
return (0);
}
diff --git a/sys/dev/uart/uart_subr.c b/sys/dev/uart/uart_subr.c
index 03c7fd8caea9..ca127b3a956e 100644
--- a/sys/dev/uart/uart_subr.c
+++ b/sys/dev/uart/uart_subr.c
@@ -185,6 +185,7 @@ out:
* mm = Memory mapped I/O address
* pa = Parity
* rs = Register shift
+ * rw = Register width
* sb = Stopbits
* xo = Device clock (xtal oscillator)
*
@@ -200,13 +201,6 @@ uart_getenv(int devtype, struct uart_devinfo *di, struct uart_class *class)
int error;
/*
- * All uart_class references are weak. Make sure the default
- * device class has been compiled-in.
- */
- if (class == NULL)
- return (ENXIO);
-
- /*
* Check the environment variables "hw.uart.console" and
* "hw.uart.dbgport". These variables, when present, specify
* which UART port is to be used as serial console or debug
@@ -278,6 +272,8 @@ uart_getenv(int devtype, struct uart_devinfo *di, struct uart_class *class)
break;
case UART_TAG_XO:
di->bas.rclk = uart_parse_long(&spec);
+ if (di->bas.rclk == 0)
+ di->bas.rclk_guess = 1;
break;
default:
goto inval;
@@ -298,6 +294,13 @@ uart_getenv(int devtype, struct uart_devinfo *di, struct uart_class *class)
freeenv(cp);
/*
+ * The default uart_class reference is weak. Make sure the default
+ * device class has been compiled-in or we've set one with dt=.
+ */
+ if (class == NULL)
+ return (ENXIO);
+
+ /*
* Accept only the well-known baudrates. Any invalid baudrate
* is silently replaced with a 0-valued baudrate. The 0 baudrate
* has special meaning. It means that we're not supposed to
diff --git a/sys/dev/uart/uart_tty.c b/sys/dev/uart/uart_tty.c
index faae077916f3..d15d1d0c6ac2 100644
--- a/sys/dev/uart/uart_tty.c
+++ b/sys/dev/uart/uart_tty.c
@@ -36,11 +36,11 @@
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/reboot.h>
+#include <sys/stdarg.h>
#include <machine/bus.h>
#include <sys/rman.h>
#include <sys/tty.h>
#include <machine/resource.h>
-#include <machine/stdarg.h>
#include <dev/uart/uart.h>
#include <dev/uart/uart_bus.h>
diff --git a/sys/dev/ufshci/ufshci.c b/sys/dev/ufshci/ufshci.c
new file mode 100644
index 000000000000..84a9629e74b0
--- /dev/null
+++ b/sys/dev/ufshci/ufshci.c
@@ -0,0 +1,76 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/module.h>
+
+#include "ufshci_private.h"
+
+MALLOC_DEFINE(M_UFSHCI, "ufshci", "ufshci(4) memory allocations");
+
+int
+ufshci_attach(device_t dev)
+{
+ struct ufshci_controller *ctrlr = device_get_softc(dev);
+ int status;
+
+ status = ufshci_ctrlr_construct(ctrlr, dev);
+ if (status != 0) {
+ ufshci_ctrlr_destruct(ctrlr, dev);
+ return (status);
+ }
+
+ ctrlr->config_hook.ich_func = ufshci_ctrlr_start_config_hook;
+ ctrlr->config_hook.ich_arg = ctrlr;
+
+ if (config_intrhook_establish(&ctrlr->config_hook) != 0)
+ return (ENOMEM);
+
+ return (0);
+}
+
+int
+ufshci_detach(device_t dev)
+{
+ struct ufshci_controller *ctrlr = device_get_softc(dev);
+
+ config_intrhook_drain(&ctrlr->config_hook);
+
+ ufshci_ctrlr_destruct(ctrlr, dev);
+
+ return (0);
+}
+
+void
+ufshci_completion_poll_cb(void *arg, const struct ufshci_completion *cpl,
+ bool error)
+{
+ struct ufshci_completion_poll_status *status = arg;
+
+ /*
+ * Copy status into the argument passed by the caller, so that the
+ * caller can check the status to determine if the the request passed
+ * or failed.
+ */
+ memcpy(&status->cpl.response_upiu, &cpl->response_upiu, cpl->size);
+ status->error = error;
+ atomic_store_rel_int(&status->done, 1);
+}
+
+static int
+ufshci_modevent(module_t mod __unused, int type __unused, void *argp __unused)
+{
+ return (0);
+}
+
+static moduledata_t ufshci_mod = { "ufshci", ufshci_modevent, 0 };
+
+DECLARE_MODULE(ufshci, ufshci_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
+MODULE_VERSION(ufshci, 1);
+MODULE_DEPEND(ufshci, cam, 1, 1, 1);
diff --git a/sys/dev/ufshci/ufshci.h b/sys/dev/ufshci/ufshci.h
new file mode 100644
index 000000000000..b055d2d2d769
--- /dev/null
+++ b/sys/dev/ufshci/ufshci.h
@@ -0,0 +1,1086 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#ifndef __UFSHCI_H__
+#define __UFSHCI_H__
+
+#include <sys/param.h>
+#include <sys/endian.h>
+
+/*
+ * Note: This driver currently assumes a little-endian architecture.
+ * Big-endian support is not yet implemented.
+ */
+
+/* MIPI UniPro spec 2.0, section 5.8.1 "PHY Adapter Common Attributes" */
+#define PA_AvailTxDataLanes 0x1520
+#define PA_AvailRxDataLanes 0x1540
+
+/*
+ * MIPI UniPro spec 2.0, section 5.8.2 "PHY Adapter M-PHY-Specific
+ * Attributes"
+ */
+#define PA_ConnectedTxDataLanes 0x1561
+#define PA_ConnectedRxDataLanes 0x1581
+#define PA_MaxRxHSGear 0x1587
+#define PA_Granularity 0x15AA
+#define PA_TActivate 0x15A8
+
+#define PA_RemoteVerInfo 0x15A0
+#define PA_LocalVerInfo 0x15A9
+
+/* UFSHCI spec 4.1, section 7.4 "UIC Power Mode Change" */
+#define PA_ActiveTxDataLanes 0x1560
+#define PA_ActiveRxDataLanes 0x1580
+#define PA_TxGear 0x1568
+#define PA_RxGear 0x1583
+#define PA_TxTermination 0x1569
+#define PA_RxTermination 0x1584
+#define PA_HSSeries 0x156A
+#define PA_PWRModeUserData0 0x15B0
+#define PA_PWRModeUserData1 0x15B1
+#define PA_PWRModeUserData2 0x15B2
+#define PA_PWRModeUserData3 0x15B3
+#define PA_PWRModeUserData4 0x15B4
+#define PA_PWRModeUserData5 0x15B5
+
+#define PA_TxHsAdaptType 0x15D4
+#define PA_PWRMode 0x1571
+
+#define DME_LocalFC0ProtectionTimeOutVal 0xD041
+#define DME_LocalTC0ReplayTimeOutVal 0xD042
+#define DME_LocalAFC0ReqTimeOutVal 0xD043
+
+/* Currently, UFS uses TC0 only. */
+#define DL_FC0ProtectionTimeOutVal_Default 8191
+#define DL_TC0ReplayTimeOutVal_Default 65535
+#define DL_AFC0ReqTimeOutVal_Default 32767
+
+/* UFS Spec 4.1, section 6.4 "Reference Clock" */
+enum ufshci_attribute_reference_clock {
+ UFSHCI_REF_CLK_19_2MHz = 0x0,
+ UFSHCI_REF_CLK_26MHz = 0x1,
+ UFSHCI_REF_CLK_38_4MHz = 0x2,
+ UFSHCI_REF_CLK_OBSOLETE = 0x3,
+};
+
+/* UFS spec 4.1, section 9 "UFS UIC Layer: MIPI Unipro" */
+enum ufshci_uic_cmd_opcode {
+ /* Configuration */
+ UFSHCI_DME_GET = 0x01,
+ UFSHCI_DME_SET = 0x02,
+ UFSHCI_DME_PEER_GET = 0x03,
+ UFSHCI_DME_PEER_SET = 0x04,
+ /* Controll */
+ UFSHCI_DME_POWER_ON = 0x10,
+ UFSHCI_DME_POWER_OFF = 0x11,
+ UFSHCI_DME_ENABLE = 0x12,
+ UFSHCI_DME_RESET = 0x14,
+ UFSHCI_DME_ENDPOINT_RESET = 0x15,
+ UFSHCI_DME_LINK_STARTUP = 0x16,
+ UFSHCI_DME_HIBERNATE_ENTER = 0x17,
+ UFSHCI_DME_HIBERNATE_EXIT = 0x18,
+ UFSHCI_DME_TEST_MODE = 0x1a,
+};
+
+/* UFSHCI spec 4.1, section 5.6.3 "Offset 98h: UICCMDARG2 – UIC Command
+ * Argument" */
+enum ufshci_uic_cmd_attr_set_type {
+ UFSHCI_ATTR_SET_TYPE_NORMAL = 0, /* volatile value */
+ UFSHCI_ATTR_SET_TYPE_STATIC = 1, /* non-volatile reset value */
+};
+
+struct ufshci_uic_cmd {
+ uint8_t opcode;
+ uint32_t argument1;
+ uint32_t argument2;
+ uint32_t argument3;
+};
+
+/* UFS spec 4.1, section 10.5 "UPIU Transactions" */
+enum transaction_code {
+ UFSHCI_UPIU_TRANSACTION_CODE_NOP_OUT = 0x00,
+ UFSHCI_UPIU_TRANSACTION_CODE_COMMAND = 0x01,
+ UFSHCI_UPIU_TRANSACTION_CODE_DATA_OUT = 0x02,
+ UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_REQUEST = 0x04,
+ UFSHCI_UPIU_TRANSACTION_CODE_QUERY_REQUEST = 0x16,
+ UFSHCI_UPIU_TRANSACTION_CODE_NOP_IN = 0x20,
+ UFSHCI_UPIU_TRANSACTION_CODE_RESPONSE = 0x21,
+ UFSHCI_UPIU_TRANSACTION_CODE_DATA_IN = 0x22,
+ UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_RESPONSE = 0x24,
+ UFSHCI_UPIU_TRANSACTION_CODE_READY_TO_TRANSFER = 0x31,
+ UFSHCI_UPIU_TRANSACTION_CODE_QUERY_RESPONSE = 0x36,
+ UFSHCI_UPIU_TRANSACTION_CODE_REJECT_UPIU = 0x3f,
+};
+
+enum overall_command_status {
+ UFSHCI_DESC_SUCCESS = 0x0,
+ UFSHCI_DESC_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01,
+ UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES = 0x02,
+ UFSHCI_DESC_MISMATCH_DATA_BUFFER_SIZE = 0x03,
+ UFSHCI_DESC_MISMATCH_RESPONSE_UPIU_SIZE = 0x04,
+ UFSHCI_DESC_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05,
+ UFSHCI_DESC_ABORTED = 0x06,
+ UFSHCI_DESC_HOST_CONTROLLER_FATAL_ERROR = 0x07,
+ UFSHCI_DESC_DEVICEFATALERROR = 0x08,
+ UFSHCI_DESC_INVALID_CRYPTO_CONFIGURATION = 0x09,
+ UFSHCI_DESC_GENERAL_CRYPTO_ERROR = 0x0A,
+ UFSHCI_DESC_INVALID = 0x0F,
+};
+
+enum response_code {
+ UFSHCI_RESPONSE_CODE_TARGET_SUCCESS = 0x00,
+ UFSHCI_RESPONSE_CODE_TARGET_FAILURE = 0x01,
+ UFSHCI_RESPONSE_CODE_PARAMETER_NOTREADABLE = 0xF6,
+ UFSHCI_RESPONSE_CODE_PARAMETER_NOTWRITEABLE = 0xF7,
+ UFSHCI_RESPONSE_CODE_PARAMETER_ALREADYWRITTEN = 0xF8,
+ UFSHCI_RESPONSE_CODE_INVALID_LENGTH = 0xF9,
+ UFSHCI_RESPONSE_CODE_INVALID_VALUE = 0xFA,
+ UFSHCI_RESPONSE_CODE_INVALID_SELECTOR = 0xFB,
+ UFSHCI_RESPONSE_CODE_INVALID_INDEX = 0xFC,
+ UFSHCI_RESPONSE_CODE_INVALID_IDN = 0xFD,
+ UFSHCI_RESPONSE_CODE_INVALID_OPCODE = 0xFE,
+ UFSHCI_RESPONSE_CODE_GENERAL_FAILURE = 0xFF,
+};
+
+/* UFSHCI spec 4.1, section 6.1.1 "UTP Transfer Request Descriptor" */
+enum ufshci_command_type {
+ UFSHCI_COMMAND_TYPE_UFS_STORAGE = 0x01,
+ UFSHCI_COMMAND_TYPE_NULLIFIED_UTRD = 0x0F,
+};
+
+enum ufshci_data_direction {
+ UFSHCI_DATA_DIRECTION_NO_DATA_TRANSFER = 0x00,
+ UFSHCI_DATA_DIRECTION_FROM_SYS_TO_TGT = 0x01,
+ UFSHCI_DATA_DIRECTION_FROM_TGT_TO_SYS = 0x10,
+ UFSHCI_DATA_DIRECTION_RESERVED = 0b11,
+};
+
+enum ufshci_utr_overall_command_status {
+ UFSHCI_UTR_OCS_SUCCESS = 0x0,
+ UFSHCI_UTR_OCS_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01,
+ UFSHCI_UTR_OCS_INVALID_PRDT_ATTRIBUTES = 0x02,
+ UFSHCI_UTR_OCS_MISMATCH_DATA_BUFFER_SIZE = 0x03,
+ UFSHCI_UTR_OCS_MISMATCH_RESPONSE_UPIU_SIZE = 0x04,
+ UFSHCI_UTR_OCS_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05,
+ UFSHCI_UTR_OCS_ABORTED = 0x06,
+ UFSHCI_UTR_OCS_HOST_CONTROLLER_FATAL_ERROR = 0x07,
+ UFSHCI_UTR_OCS_DEVICE_FATAL_ERROR = 0x08,
+ UFSHCI_UTR_OCS_INVALID_CRYPTO_CONFIGURATION = 0x09,
+ UFSHCI_UTR_OCS_GENERAL_CRYPTO_ERROR = 0x0A,
+ UFSHCI_UTR_OCS_INVALID = 0xF,
+};
+
+struct ufshci_utp_xfer_req_desc {
+ /* dword 0 */
+ uint32_t cci : 8; /* [7:0] */
+ uint32_t total_ehs_length : 8; /* [15:8] */
+ uint32_t reserved0 : 7; /* [22:16] */
+ uint32_t ce : 1; /* [23] */
+ uint32_t interrupt : 1; /* [24] */
+ uint32_t data_direction : 2; /* [26:25] */
+ uint32_t reserved1 : 1; /* [27] */
+ uint32_t command_type : 4; /* [31:28] */
+
+ /* dword 1 */
+ uint32_t data_unit_number_lower; /* [31:0] */
+
+ /* dword 2 */
+ uint8_t overall_command_status; /* [7:0] */
+ uint8_t common_data_size; /* [15:8] */
+ uint16_t last_data_byte_count; /* [31:16] */
+
+ /* dword 3 */
+ uint32_t data_unit_number_upper; /* [31:0] */
+
+ /* dword 4 */
+ uint32_t utp_command_descriptor_base_address; /* [31:0] */
+
+ /* dword 5 */
+ uint32_t utp_command_descriptor_base_address_upper; /* [31:0] */
+
+ /* dword 6 */
+ uint16_t response_upiu_length; /* [15:0] */
+ uint16_t response_upiu_offset; /* [31:16] */
+
+ /* dword 7 */
+ uint16_t prdt_length; /* [15:0] */
+ uint16_t prdt_offset; /* [31:16] */
+} __packed __aligned(8);
+
+_Static_assert(sizeof(struct ufshci_utp_xfer_req_desc) == 32,
+ "ufshci_utp_xfer_req_desc must be 32 bytes");
+
+/*
+ * According to the UFSHCI specification, the size of the UTP command
+ * descriptor is as follows. The size of the transfer request is not limited,
+ * a transfer response can be as long as 65535 * dwords, and a PRDT can be as
+ * long as 65565 * PRDT entry size(16 bytes). However, for ease of use, this
+ * UFSHCI Driver imposes the following limits. The size of the transfer
+ * request and the transfer response is 1024 bytes or less. The PRDT region
+ * limits the number of scatter gathers to 256 + 1, using a total of 4096 +
+ * 16 bytes. Therefore, only 8KB size is allocated for the UTP command
+ * descriptor.
+ */
+#define UFSHCI_UTP_COMMAND_DESCRIPTOR_SIZE 8192
+#define UFSHCI_UTP_XFER_REQ_SIZE 512
+#define UFSHCI_UTP_XFER_RESP_SIZE 512
+
+/*
+ * To reduce the size of the UTP Command Descriptor(8KB), we must use only
+ * 256 + 1 PRDT entries. The reason for adding the 1 is that if the data is
+ * not aligned, one additional PRDT_ENTRY is used.
+ */
+#define UFSHCI_MAX_PRDT_ENTRY_COUNT (256 + 1)
+
+/* UFSHCI spec 4.1, section 6.1.2 "UTP Command Descriptor" */
+struct ufshci_prdt_entry {
+ /* dword 0 */
+ uint32_t data_base_address; /* [31:0] */
+
+ /* dword 1 */
+ uint32_t data_base_address_upper; /* [31:0] */
+
+ /* dword 2 */
+ uint32_t reserved; /* [31:0] */
+
+ /* dword 3 */
+ uint32_t data_byte_count; /* [17:0] Maximum byte
+ * count is 256KB */
+} __packed __aligned(8);
+
+_Static_assert(sizeof(struct ufshci_prdt_entry) == 16,
+ "ufshci_prdt_entry must be 16 bytes");
+
+struct ufshci_utp_cmd_desc {
+ uint8_t command_upiu[UFSHCI_UTP_XFER_REQ_SIZE];
+ uint8_t response_upiu[UFSHCI_UTP_XFER_RESP_SIZE];
+ uint8_t prd_table[sizeof(struct ufshci_prdt_entry) *
+ UFSHCI_MAX_PRDT_ENTRY_COUNT];
+ uint8_t padding[3072 - sizeof(struct ufshci_prdt_entry)];
+} __packed __aligned(128);
+
+_Static_assert(sizeof(struct ufshci_utp_cmd_desc) ==
+ UFSHCI_UTP_COMMAND_DESCRIPTOR_SIZE,
+ "ufshci_utp_cmd_desc must be 8192 bytes");
+
+#define UFSHCI_UTP_TASK_MGMT_REQ_SIZE 32
+#define UFSHCI_UTP_TASK_MGMT_RESP_SIZE 32
+
+enum ufshci_utmr_overall_command_status {
+ UFSHCI_UTMR_OCS_SUCCESS = 0x0,
+ UFSHCI_UTMR_OCS_INVALID_TASK_MANAGEMENT_FUNCTION_ATTRIBUTES = 0x01,
+ UFSHCI_UTMR_OCS_MISMATCH_TASK_MANAGEMENT_REQUEST_SIZE = 0x02,
+ UFSHCI_UTMR_OCS_MISMATCH_TASK_MANAGEMENT_RESPONSE_SIZE = 0x03,
+ UFSHCI_UTMR_OCS_PEER_COMMUNICATION_FAILURE = 0x04,
+ UFSHCI_UTMR_OCS_ABORTED = 0x05,
+ UFSHCI_UTMR_OCS_FATAL_ERROR = 0x06,
+ UFSHCI_UTMR_OCS_DEVICE_FATAL_ERROR = 0x07,
+ UFSHCI_UTMR_OCS_INVALID = 0xF,
+};
+
+/* UFSHCI spec 4.1, section 6.3.1 "UTP Task Management Request Descriptor" */
+struct ufshci_utp_task_mgmt_req_desc {
+ /* dword 0 */
+ uint32_t reserved0 : 24; /* [23:0] */
+ uint32_t interrupt : 1; /* [24] */
+ uint32_t reserved1 : 7; /* [31:25] */
+
+ /* dword 1 */
+ uint32_t reserved2; /* [31:0] */
+
+ /* dword 2 */
+ uint8_t overall_command_status; /* [7:0] */
+ uint8_t reserved3; /* [15:8] */
+ uint16_t reserved4; /* [31:16] */
+
+ /* dword 3 */
+ uint32_t reserved5; /* [31:0] */
+
+ /* dword 4-11 */
+ uint8_t request_upiu[UFSHCI_UTP_TASK_MGMT_REQ_SIZE];
+
+ /* dword 12-19 */
+ uint8_t response_upiu[UFSHCI_UTP_TASK_MGMT_RESP_SIZE];
+
+} __packed __aligned(8);
+
+_Static_assert(sizeof(struct ufshci_utp_task_mgmt_req_desc) == 80,
+ "ufshci_utp_task_mgmt_req_desc must be 80 bytes");
+
+/* UFS spec 4.1, section 10.6.2 "Basic Header Format" */
+struct ufshci_upiu_header {
+ /* dword 0 */
+ union {
+ struct {
+ uint8_t trans_code : 6; /* [5:0] */
+ uint8_t dd : 1; /* [6] */
+ uint8_t hd : 1; /* [7] */
+ };
+ uint8_t trans_type;
+ };
+ union {
+ struct {
+ uint8_t task_attribute : 2; /* [1:0] */
+ uint8_t cp : 1; /* [2] */
+ uint8_t retransmit_indicator : 1; /* [3] */
+#define UFSHCI_OPERATIONAL_FLAG_W 0x2
+#define UFSHCI_OPERATIONAL_FLAG_R 0x4
+ uint8_t operational_flags : 4; /* [7:4] */
+ };
+ uint8_t flags;
+ };
+ uint8_t lun;
+ uint8_t task_tag;
+
+ /* dword 1 */
+#define UFSHCI_COMMAND_SET_TYPE_SCSI 0
+ uint8_t cmd_set_type : 4; /* [3:0] */
+ uint8_t iid : 4; /* [7:4] */
+ uint8_t ext_iid_or_function;
+ uint8_t response;
+ uint8_t ext_iid_or_status;
+
+ /* dword 2 */
+ uint8_t ehs_length;
+ uint8_t device_infomation;
+ uint16_t data_segment_length; /* (Big-endian) */
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_upiu_header) == 12,
+ "ufshci_upiu_header must be 12 bytes");
+
+#define UFSHCI_MAX_UPIU_SIZE 512
+#define UFSHCI_UPIU_ALIGNMENT 8 /* UPIU requires 64-bit alignment. */
+
+struct ufshci_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3-127 */
+ uint8_t
+ reserved[UFSHCI_MAX_UPIU_SIZE - sizeof(struct ufshci_upiu_header)];
+} __packed __aligned(8);
+
+_Static_assert(sizeof(struct ufshci_upiu) == 512,
+ "ufshci_upiu must be 512 bytes");
+
+/* UFS Spec 4.1, section 10.7.1 "COMMAND UPIU" */
+struct ufshci_cmd_command_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint32_t expected_data_transfer_length; /* (Big-endian) */
+
+ /* dword 4-7 */
+ uint8_t cdb[16];
+
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_cmd_command_upiu) == 32,
+ "bad size for ufshci_cmd_command_upiu");
+_Static_assert(sizeof(struct ufshci_cmd_command_upiu) <=
+ UFSHCI_UTP_XFER_REQ_SIZE,
+ "bad size for ufshci_cmd_command_upiu");
+_Static_assert(sizeof(struct ufshci_cmd_command_upiu) % UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
+/* UFS Spec 4.1, section 10.7.2 "RESPONSE UPIU" */
+struct ufshci_cmd_response_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint32_t residual_transfer_count; /* (Big-endian) */
+
+ /* dword 4-7 */
+ uint8_t reserved[16];
+
+ /* Sense Data */
+ uint16_t sense_data_len; /* (Big-endian) */
+ uint8_t sense_data[18];
+
+ /* Add padding to align the kUpiuAlignment. */
+ uint8_t padding[4];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_cmd_response_upiu) == 56,
+ "bad size for ufshci_cmd_response_upiu");
+_Static_assert(sizeof(struct ufshci_cmd_response_upiu) <=
+ UFSHCI_UTP_XFER_RESP_SIZE,
+ "bad size for ufshci_cmd_response_upiu");
+_Static_assert(sizeof(struct ufshci_cmd_response_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
+enum task_management_function {
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK = 0x01,
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK_SET = 0x02,
+ UFSHCI_TASK_MGMT_FUNCTION_CLEAR_TASK_SET = 0x04,
+ UFSHCI_TASK_MGMT_FUNCTION_LOGICAL_UNIT_RESET = 0x08,
+ UFSHCI_TASK_MGMT_FUNCTION_QUERY_TASK = 0x80,
+ UFSHCI_TASK_MGMT_FUNCTION_QUERY_TASKSET = 0x81,
+};
+
+/* UFS Spec 4.1, section 10.7.6 "TASK MANAGEMENT REQUEST UPIU" */
+struct ufshci_task_mgmt_request_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint32_t input_param1; /* (Big-endian) */
+ /* dword 4 */
+ uint32_t input_param2; /* (Big-endian) */
+ /* dword 5 */
+ uint32_t input_param3; /* (Big-endian) */
+ /* dword 6-7 */
+ uint8_t reserved[8];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) == 32,
+ "bad size for ufshci_task_mgmt_request_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) <=
+ UFSHCI_UTP_XFER_RESP_SIZE,
+ "bad size for ufshci_task_mgmt_request_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
+enum task_management_service_response {
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE = 0x00,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_NOT_SUPPORTED = 0x04,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_FAILED = 0x05,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED = 0x08,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_INCORRECT_LUN = 0x09,
+};
+
+/* UFS Spec 4.1, section 10.7.7 "TASK MANAGEMENT RESPONSE UPIU" */
+struct ufshci_task_mgmt_response_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint32_t output_param1; /* (Big-endian) */
+ /* dword 4 */
+ uint32_t output_param2; /* (Big-endian) */
+ /* dword 5-7 */
+ uint8_t reserved[12];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) == 32,
+ "bad size for ufshci_task_mgmt_response_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) <=
+ UFSHCI_UTP_XFER_RESP_SIZE,
+ "bad size for ufshci_task_mgmt_response_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
+/* UFS Spec 4.1, section 10.7.8 "QUERY REQUEST UPIU" */
+enum ufshci_query_function {
+ UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
+ UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
+};
+
+enum ufshci_query_opcode {
+ UFSHCI_QUERY_OPCODE_NOP = 0,
+ UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR,
+ UFSHCI_QUERY_OPCODE_WRITE_DESCRIPTOR,
+ UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE,
+ UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE,
+ UFSHCI_QUERY_OPCODE_READ_FLAG,
+ UFSHCI_QUERY_OPCODE_SET_FLAG,
+ UFSHCI_QUERY_OPCODE_CLEAR_FLAG,
+ UFSHCI_QUERY_OPCODE_TOGGLE_FLAG,
+};
+
+struct ufshci_query_param {
+ enum ufshci_query_function function;
+ enum ufshci_query_opcode opcode;
+ uint8_t type;
+ uint8_t index;
+ uint8_t selector;
+ uint64_t value;
+ size_t desc_size;
+};
+
+struct ufshci_query_request_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint8_t opcode;
+ uint8_t idn;
+ uint8_t index;
+ uint8_t selector;
+
+ /* dword 4-5 */
+ union {
+ /* The Write Attribute opcode uses 64 - bit value. */
+ uint64_t value_64; /* (Big-endian) */
+ struct {
+ uint8_t reserved1[2];
+ uint16_t length; /* (Big-endian) */
+ uint32_t value_32; /* (Big-endian) */
+ };
+ } __packed __aligned(4);
+
+ /* dword 6 */
+ uint32_t reserved2;
+
+ /* dword 7 */
+ uint32_t reserved3;
+
+ uint8_t command_data[256];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_query_request_upiu) == 288,
+ "bad size for ufshci_query_request_upiu");
+_Static_assert(sizeof(struct ufshci_query_request_upiu) <=
+ UFSHCI_UTP_XFER_REQ_SIZE,
+ "bad size for ufshci_query_request_upiu");
+_Static_assert(sizeof(struct ufshci_query_request_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
+/* UFS Spec 4.1, section 10.7.9 "QUERY RESPONSE UPIU" */
+enum ufshci_query_response_code {
+ UFSHCI_QUERY_RESP_CODE_SUCCESS = 0x00,
+ UFSHCI_QUERY_RESP_CODE_PARAMETER_NOT_READABLE = 0xf6,
+ UFSHCI_QUERY_RESP_CODE_PARAMETER_NOT_WRITEABLE = 0xf7,
+ UFSHCI_QUERY_RESP_CODE_PARAMETER_ALREADY_WRITTEN = 0xf8,
+ UFSHCI_QUERY_RESP_CODE_INVALID_LENGTH = 0xf9,
+ UFSHCI_QUERY_RESP_CODE_INVALID_VALUE = 0xfa,
+ UFSHCI_QUERY_RESP_CODE_INVALID_SELECTOR = 0xfb,
+ UFSHCI_QUERY_RESP_CODE_INVALID_INDEX = 0xfc,
+ UFSHCI_QUERY_RESP_CODE_INVALID_IDN = 0xfd,
+ UFSHCI_QUERY_RESP_CODE_INVALID_OPCODE = 0xfe,
+ UFSHCI_QUERY_RESP_CODE_GENERAL_FAILURE = 0xff,
+};
+
+struct ufshci_query_response_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint8_t opcode;
+ uint8_t idn;
+ uint8_t index;
+ uint8_t selector;
+
+ /* dword 4-5 */
+ union {
+ /* The Read / Write Attribute opcodes use 64 - bit value. */
+ uint64_t value_64; /* (Big-endian) */
+ struct {
+ uint8_t reserved1[2];
+ uint16_t length; /* (Big-endian) */
+ union {
+ uint32_t value_32; /* (Big-endian) */
+ struct {
+ uint8_t reserved2[3];
+ uint8_t flag_value;
+ };
+ };
+ };
+ } __packed __aligned(4);
+
+ /* dword 6 */
+ uint8_t reserved3[4];
+
+ /* dword 7 */
+ uint8_t reserved4[4];
+
+ uint8_t command_data[256];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_query_response_upiu) == 288,
+ "bad size for ufshci_query_response_upiu");
+_Static_assert(sizeof(struct ufshci_query_response_upiu) <=
+ UFSHCI_UTP_XFER_RESP_SIZE,
+ "bad size for ufshci_query_response_upiu");
+_Static_assert(sizeof(struct ufshci_query_response_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
+/* UFS 4.1, section 10.7.11 "NOP OUT UPIU" */
+struct ufshci_nop_out_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3-7 */
+ uint8_t reserved[20];
+} __packed __aligned(8);
+_Static_assert(sizeof(struct ufshci_nop_out_upiu) == 32,
+ "ufshci_upiu_nop_out must be 32 bytes");
+
+/* UFS 4.1, section 10.7.12 "NOP IN UPIU" */
+struct ufshci_nop_in_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3-7 */
+ uint8_t reserved[20];
+} __packed __aligned(8);
+_Static_assert(sizeof(struct ufshci_nop_in_upiu) == 32,
+ "ufshci_upiu_nop_in must be 32 bytes");
+
+union ufshci_reponse_upiu {
+ struct ufshci_upiu_header header;
+ struct ufshci_cmd_response_upiu cmd_response_upiu;
+ struct ufshci_query_response_upiu query_response_upiu;
+ struct ufshci_task_mgmt_response_upiu task_mgmt_response_upiu;
+ struct ufshci_nop_in_upiu nop_in_upiu;
+};
+
+struct ufshci_completion {
+ union ufshci_reponse_upiu response_upiu;
+ size_t size;
+};
+
+typedef void (*ufshci_cb_fn_t)(void *, const struct ufshci_completion *, bool);
+
+/*
+ * UFS Spec 4.1, section 14.1 "UFS Descriptors"
+ * All descriptors use big-endian byte ordering.
+ */
+enum ufshci_descriptor_type {
+ UFSHCI_DESC_TYPE_DEVICE = 0x00,
+ UFSHCI_DESC_TYPE_CONFIGURATION = 0x01,
+ UFSHCI_DESC_TYPE_UNIT = 0x02,
+ UFSHCI_DESC_TYPE_INTERCONNECT = 0x04,
+ UFSHCI_DESC_TYPE_STRING = 0x05,
+ UFSHCI_DESC_TYPE_GEOMETRY = 0X07,
+ UFSHCI_DESC_TYPE_POWER = 0x08,
+ UFSHCI_DESC_TYPE_DEVICE_HEALTH = 0x09,
+ UFSHCI_DESC_TYPE_FBO_EXTENSION_SPECIFICATION = 0x0a,
+};
+
+/*
+ * UFS Spec 4.1, section 14.1.5.2 "Device Descriptor"
+ * DeviceDescriptor use big-endian byte ordering.
+ */
+struct ufshci_device_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint8_t bDevice;
+ uint8_t bDeviceClass;
+ uint8_t bDeviceSubClass;
+ uint8_t bProtocol;
+ uint8_t bNumberLU;
+ uint8_t bNumberWLU;
+ uint8_t bBootEnable;
+ uint8_t bDescrAccessEn;
+ uint8_t bInitPowerMode;
+ uint8_t bHighPriorityLUN;
+ uint8_t bSecureRemovalType;
+ uint8_t bSecurityLU;
+ uint8_t bBackgroundOpsTermLat;
+ uint8_t bInitActiveICCLevel;
+ /* 0x10 */
+ uint16_t wSpecVersion;
+ uint16_t wManufactureDate;
+ uint8_t iManufacturerName;
+ uint8_t iProductName;
+ uint8_t iSerialNumber;
+ uint8_t iOemID;
+ uint16_t wManufacturerID;
+ uint8_t bUD0BaseOffset;
+ uint8_t bUDConfigPLength;
+ uint8_t bDeviceRTTCap;
+ uint16_t wPeriodicRTCUpdate;
+ uint8_t bUfsFeaturesSupport;
+ /* 0x20 */
+ uint8_t bFFUTimeout;
+ uint8_t bQueueDepth;
+ uint16_t wDeviceVersion;
+ uint8_t bNumSecureWPArea;
+ uint32_t dPSAMaxDataSize;
+ uint8_t bPSAStateTimeout;
+ uint8_t iProductRevisionLevel;
+ uint8_t Reserved[5];
+ /* 0x2a */
+ /* 0x30 */
+ uint8_t ReservedUME[16];
+ /* 0x40 */
+ uint8_t ReservedHpb[3];
+ uint8_t Reserved2[12];
+ uint32_t dExtendedUfsFeaturesSupport;
+ uint8_t bWriteBoosterBufferPreserveUserSpaceEn;
+ uint8_t bWriteBoosterBufferType;
+ uint32_t dNumSharedWriteBoosterBufferAllocUnits;
+} __packed;
+
+_Static_assert(sizeof(struct ufshci_device_descriptor) == 89,
+ "bad size for ufshci_device_descriptor");
+
+/* Defines the bit field of dExtendedUfsFeaturesSupport. */
+enum ufshci_desc_wb_ext_ufs_feature {
+ UFSHCI_DESC_EXT_UFS_FEATURE_FFU = (1 << 0),
+ UFSHCI_DESC_EXT_UFS_FEATURE_PSA = (1 << 1),
+ UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LIFE_SPAN = (1 << 2),
+ UFSHCI_DESC_EXT_UFS_FEATURE_REFRESH_OP = (1 << 3),
+ UFSHCI_DESC_EXT_UFS_FEATURE_TOO_HIGH_TEMP = (1 << 4),
+ UFSHCI_DESC_EXT_UFS_FEATURE_TOO_LOW_TEMP = (1 << 5),
+ UFSHCI_DESC_EXT_UFS_FEATURE_EXT_TEMP = (1 << 6),
+ UFSHCI_DESC_EXT_UFS_FEATURE_HPB_SUPPORT = (1 << 7),
+ UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER = (1 << 8),
+ UFSHCI_DESC_EXT_UFS_FEATURE_PERF_THROTTLING = (1 << 9),
+ UFSHCI_DESC_EXT_UFS_FEATURE_ADVANCED_RPMB = (1 << 10),
+ UFSHCI_DESC_EXT_UFS_FEATURE_ZONED_UFS_EXTENSION = (1 << 11),
+ UFSHCI_DESC_EXT_UFS_FEATURE_DEV_LEVEL_EXCEPTION = (1 << 12),
+ UFSHCI_DESC_EXT_UFS_FEATURE_HID = (1 << 13),
+ UFSHCI_DESC_EXT_UFS_FEATURE_BARRIER = (1 << 14),
+ UFSHCI_DESC_EXT_UFS_FEATURE_CLEAR_ERROR_HISTORY = (1 << 15),
+ UFSHCI_DESC_EXT_UFS_FEATURE_EXT_IID = (1 << 16),
+ UFSHCI_DESC_EXT_UFS_FEATURE_FBO = (1 << 17),
+ UFSHCI_DESC_EXT_UFS_FEATURE_FAST_RECOVERY_MODE = (1 << 18),
+ UFSHCI_DESC_EXT_UFS_FEATURE_RPMB_VENDOR_CMD = (1 << 19),
+};
+
+/* Defines the bit field of bWriteBoosterBufferType. */
+enum ufshci_desc_wb_buffer_type {
+ UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED = 0x00,
+ UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED = 0x01,
+};
+
+/* Defines the bit field of bWriteBoosterBufferPreserveUserSpaceEn. */
+enum ufshci_desc_user_space_config {
+ UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION = 0x00,
+ UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE = 0x01,
+};
+
+/*
+ * UFS Spec 4.1, section 14.1.5.3 "Configuration Descriptor"
+ * ConfigurationDescriptor use big-endian byte ordering.
+ */
+struct ufshci_unit_descriptor_configurable_parameters {
+ uint8_t bLUEnable;
+ uint8_t bBootLunID;
+ uint8_t bLUWriteProtect;
+ uint8_t bMemoryType;
+ uint32_t dNumAllocUnits;
+ uint8_t bDataReliability;
+ uint8_t bLogicalBlockSize;
+ uint8_t bProvisioningType;
+ uint16_t wContextCapabilities;
+ union {
+ struct {
+ uint8_t Reserved[3];
+ uint8_t ReservedHpb[6];
+ } __packed;
+ uint16_t wZoneBufferAllocUnits;
+ };
+ uint32_t dLUNumWriteBoosterBufferAllocUnits;
+} __packed;
+
+_Static_assert(sizeof(struct ufshci_unit_descriptor_configurable_parameters) ==
+ 27,
+ "bad size for ufshci_unit_descriptor_configurable_parameters");
+
+#define UFSHCI_CONFIGURATION_DESCEIPTOR_LU_NUM 8
+
+struct ufshci_configuration_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint8_t bConfDescContinue;
+ uint8_t bBootEnable;
+ uint8_t bDescrAccessEn;
+ uint8_t bInitPowerMode;
+ uint8_t bHighPriorityLUN;
+ uint8_t bSecureRemovalType;
+ uint8_t bInitActiveICCLevel;
+ uint16_t wPeriodicRTCUpdate;
+ uint8_t Reserved;
+ uint8_t bRPMBRegionEnable;
+ uint8_t bRPMBRegion1Size;
+ uint8_t bRPMBRegion2Size;
+ uint8_t bRPMBRegion3Size;
+ uint8_t bWriteBoosterBufferPreserveUserSpaceEn;
+ uint8_t bWriteBoosterBufferType;
+ uint32_t dNumSharedWriteBoosterBufferAllocUnits;
+ /* 0x16 */
+ struct ufshci_unit_descriptor_configurable_parameters
+ unit_config_params[UFSHCI_CONFIGURATION_DESCEIPTOR_LU_NUM];
+} __packed;
+
+_Static_assert(sizeof(struct ufshci_configuration_descriptor) == (22 + 27 * 8),
+ "bad size for ufshci_configuration_descriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.4 "Geometry Descriptor"
+ * GeometryDescriptor use big-endian byte ordering.
+ */
+struct ufshci_geometry_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint8_t bMediaTechnology;
+ uint8_t Reserved;
+ uint64_t qTotalRawDeviceCapacity;
+ uint8_t bMaxNumberLU;
+ uint32_t dSegmentSize;
+ /* 0x11 */
+ uint8_t bAllocationUnitSize;
+ uint8_t bMinAddrBlockSize;
+ uint8_t bOptimalReadBlockSize;
+ uint8_t bOptimalWriteBlockSize;
+ uint8_t bMaxInBufferSize;
+ uint8_t bMaxOutBufferSize;
+ uint8_t bRPMB_ReadWriteSize;
+ uint8_t bDynamicCapacityResourcePolicy;
+ uint8_t bDataOrdering;
+ uint8_t bMaxContexIDNumber;
+ uint8_t bSysDataTagUnitSize;
+ uint8_t bSysDataTagResSize;
+ uint8_t bSupportedSecRTypes;
+ uint16_t wSupportedMemoryTypes;
+ /* 0x20 */
+ uint32_t dSystemCodeMaxNAllocU;
+ uint16_t wSystemCodeCapAdjFac;
+ uint32_t dNonPersistMaxNAllocU;
+ uint16_t wNonPersistCapAdjFac;
+ uint32_t dEnhanced1MaxNAllocU;
+ /* 0x30 */
+ uint16_t wEnhanced1CapAdjFac;
+ uint32_t dEnhanced2MaxNAllocU;
+ uint16_t wEnhanced2CapAdjFac;
+ uint32_t dEnhanced3MaxNAllocU;
+ uint16_t wEnhanced3CapAdjFac;
+ uint32_t dEnhanced4MaxNAllocU;
+ /* 0x42 */
+ uint16_t wEnhanced4CapAdjFac;
+ uint32_t dOptimalLogicalBlockSize;
+ uint8_t ReservedHpb[5];
+ uint8_t Reserved2[2];
+ uint32_t dWriteBoosterBufferMaxNAllocUnits;
+ uint8_t bDeviceMaxWriteBoosterLUs;
+ uint8_t bWriteBoosterBufferCapAdjFac;
+ uint8_t bSupportedWriteBoosterBufferUserSpaceReductionTypes;
+ uint8_t bSupportedWriteBoosterBufferTypes;
+} __packed;
+
+_Static_assert(sizeof(struct ufshci_geometry_descriptor) == 87,
+ "bad size for ufshci_geometry_descriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.5 "Unit Descriptor"
+ * UnitDescriptor use big-endian byte ordering.
+ */
+struct ufshci_unit_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint8_t bUnitIndex;
+ uint8_t bLUEnable;
+ uint8_t bBootLunID;
+ uint8_t bLUWriteProtect;
+ uint8_t bLUQueueDepth;
+ uint8_t bPSASensitive;
+ uint8_t bMemoryType;
+ uint8_t bDataReliability;
+ uint8_t bLogicalBlockSize;
+ uint64_t qLogicalBlockCount;
+ /* 0x13 */
+ uint32_t dEraseBlockSize;
+ uint8_t bProvisioningType;
+ uint64_t qPhyMemResourceCount;
+ /* 0x20 */
+ uint16_t wContextCapabilities;
+ uint8_t bLargeUnitGranularity_M1;
+ uint8_t ReservedHpb[6];
+ uint32_t dLUNumWriteBoosterBufferAllocUnits;
+} __packed;
+_Static_assert(sizeof(struct ufshci_unit_descriptor) == 45,
+ "bad size for ufshci_unit_descriptor");
+
+enum LUWriteProtect {
+ kNoWriteProtect = 0x00,
+ kPowerOnWriteProtect = 0x01,
+ kPermanentWriteProtect = 0x02,
+};
+
+/*
+ * UFS Spec 4.1, section 14.1.5.6 "RPMB Unit Descriptor"
+ * RpmbUnitDescriptor use big-endian byte ordering.
+ */
+struct ufshci_rpmb_unit_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint8_t bUnitIndex;
+ uint8_t bLUEnable;
+ uint8_t bBootLunID;
+ uint8_t bLUWriteProtect;
+ uint8_t bLUQueueDepth;
+ uint8_t bPSASensitive;
+ uint8_t bMemoryType;
+ uint8_t Reserved;
+ uint8_t bLogicalBlockSize;
+ uint64_t qLogicalBlockCount;
+ /* 0x13 */
+ uint32_t dEraseBlockSize;
+ uint8_t bProvisioningType;
+ uint64_t qPhyMemResourceCount;
+ /* 0x20 */
+ uint8_t Reserved1[3];
+} __packed;
+_Static_assert(sizeof(struct ufshci_rpmb_unit_descriptor) == 35,
+ "bad size for RpmbUnitDescriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.7 "Power Parameters Descriptor"
+ * PowerParametersDescriptor use big-endian byte ordering.
+ */
+struct ufshci_power_parameters_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint16_t wActiveICCLevelsVCC[16];
+ uint16_t wActiveICCLevelsVCCQ[16];
+ uint16_t wActiveICCLevelsVCCQ2[16];
+} __packed;
+_Static_assert(sizeof(struct ufshci_power_parameters_descriptor) == 98,
+ "bad size for PowerParametersDescriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.8 "Interconnect Descriptor"
+ * InterconnectDescriptor use big-endian byte ordering.
+ */
+struct ufshci_interconnect_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint16_t bcdUniproVersion;
+ uint16_t bcdMphyVersion;
+} __packed;
+_Static_assert(sizeof(struct ufshci_interconnect_descriptor) == 6,
+ "bad size for InterconnectDescriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.9-13 "String Descriptor"
+ * StringDescriptor use big-endian byte ordering.
+ */
+struct ufshci_string_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint16_t UC[126];
+} __packed;
+_Static_assert(sizeof(struct ufshci_string_descriptor) == 254,
+ "bad size for StringDescriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.14 "Device Health Descriptor"
+ * DeviceHealthDescriptor use big-endian byte ordering.
+ */
+struct ufshci_device_healthd_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint8_t bPreEOLInfo;
+ uint8_t bDeviceLifeTimeEstA;
+ uint8_t bDeviceLifeTimeEstB;
+ uint8_t VendorPropInfo[32];
+ uint32_t dRefreshTotalCount;
+ uint32_t dRefreshProgress;
+} __packed;
+_Static_assert(sizeof(struct ufshci_device_healthd_descriptor) == 45,
+ "bad size for DeviceHealthDescriptor");
+
+/*
+ * UFS Spec 4.1, section 14.1.5.15 "Vendor Specific Descriptor"
+ * VendorSpecificDescriptor use big-endian byte ordering.
+ */
+struct ufshci_vendor_specific_descriptor {
+ uint8_t bLength;
+ uint8_t bDescriptorIDN;
+ uint8_t DATA[254];
+} __packed;
+_Static_assert(sizeof(struct ufshci_vendor_specific_descriptor) == 256,
+ "bad size for VendorSpecificDescriptor");
+
+/* UFS Spec 4.1, section 14.2 "Flags" */
+enum ufshci_flags {
+ UFSHCI_FLAG_F_RESERVED = 0x00,
+ UFSHCI_FLAG_F_DEVICE_INIT = 0x01,
+ UFSHCI_FLAG_F_PERMANENT_WP_EN = 0x02,
+ UFSHCI_FLAS_F_POWER_ON_WP_EN = 0x03,
+ UFSHCI_FLAG_F_BACKGROUND_OPS_EN = 0x04,
+ UFSHCI_FLAG_F_DEVICE_LIFE_SPAN_MODE_EN = 0x05,
+ UFSHCI_FLAG_F_PURGE_ENABLE = 0x06,
+ UFSHCI_FLAG_F_REFRESH_ENABLE = 0x07,
+ UFSHCI_FLAG_F_PHY_RESOURCE_REMOVAL = 0x08,
+ UFSHCI_FLAG_F_BUSY_RTC = 0x09,
+ UFSHCI_FLAG_F_PERMANENTLY_DISABLE_FW_UPDATE = 0x0b,
+ UFSHCI_FLAG_F_WRITE_BOOSTER_EN = 0x0e,
+ UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN = 0x0f,
+ UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE = 0x10,
+ UFSHCI_FLAG_F_UNPIN_EN = 0x13,
+};
+
+/* UFS Spec 4.1, section 14.3 "Attributes" */
+enum ufshci_attributes {
+ UFSHCI_ATTR_B_BOOT_LUN_EN = 0x00,
+ UFSHCI_ATTR_B_CURRENT_POWER_MODE = 0x02,
+ UFSHCI_ATTR_B_ACTIVE_ICC_LEVEL = 0x03,
+ UFSHCI_ATTR_B_OUT_OF_ORDER_DATA_EN = 0x04,
+ UFSHCI_ATTR_B_BACKGROUND_OP_STATUS = 0x05,
+ UFSHCI_ATTR_B_PURGE_STATUS = 0x06,
+ UFSHCI_ATTR_B_MAX_DATA_IN_SIZE = 0x07,
+ UFSHCI_ATTR_B_MAX_DATA_OUT_SIZE = 0x08,
+ UFSHCI_ATTR_D_DYN_CAP_NEEDED = 0x09,
+ UFSHCI_ATTR_B_REF_CLK_FREQ = 0x0a,
+ UFSHCI_ATTR_B_CONFIG_DESCR_LOCK = 0x0b,
+ UFSHCI_ATTR_B_MAX_NUM_OF_RTT = 0x0c,
+ UFSHCI_ATTR_W_EXCEPTION_EVENT_CONTROL = 0x0d,
+ UFSHCI_ATTR_W_EXCEPTION_EVENT_STATUS = 0x0e,
+ UFSHCI_ATTR_D_SECONDS_PASSED = 0x0f,
+ UFSHCI_ATTR_W_CONTEXT_CONF = 0x10,
+ UFSHCI_ATTR_B_DEVICE_FFU_STATUS = 0x14,
+ UFSHCI_ATTR_B_PSA_STATE = 0x15,
+ UFSHCI_ATTR_D_PSA_DATA_SIZE = 0x16,
+ UFSHCI_ATTR_B_REF_CLK_GATING_WAIT_TIME = 0x17,
+ UFSHCI_ATTR_B_DEVICE_CASE_ROUGH_TEMPERAURE = 0x18,
+ UFSHCI_ATTR_B_DEVICE_TOO_HIGH_TEMP_BOUNDARY = 0x19,
+ UFSHCI_ATTR_B_DEVICE_TOO_LOW_TEMP_BOUNDARY = 0x1a,
+ UFSHCI_ATTR_B_THROTTLING_STATUS = 0x1b,
+ UFSHCI_ATTR_B_WB_BUFFER_FLUSH_STATUS = 0x1c,
+ UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE = 0x1d,
+ UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST = 0x1e,
+ UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE = 0x1f,
+ UFSHCI_ATTR_B_REFRESH_STATUS = 0x2c,
+ UFSHCI_ATTR_B_REFRESH_FREQ = 0x2d,
+ UFSHCI_ATTR_B_REFRESH_UNIT = 0x2e,
+ UFSHCI_ATTR_B_REFRESH_METHOD = 0x2f,
+};
+
+/* bAvailableWriteBoosterBufferSize codes (UFS WriteBooster abailable buffer
+ * left %) */
+enum ufshci_wb_available_buffer_Size {
+ UFSHCI_ATTR_WB_AVAILABLE_0 = 0x00, /* 0% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_10 = 0x01, /* 10% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_20 = 0x02, /* 20% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_30 = 0x03, /* 30% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_40 = 0x04, /* 40% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_50 = 0x05, /* 50% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_60 = 0x06, /* 60% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_70 = 0x07, /* 70% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_80 = 0x08, /* 80% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_90 = 0x09, /* 90% buffer remains */
+ UFSHCI_ATTR_WB_AVAILABLE_100 = 0x0A, /* 100% buffer remains */
+};
+
+/* bWriteBoosterBufferLifeTimeEst codes (UFS WriteBooster buffer life %) */
+enum ufshci_wb_lifetime {
+ UFSHCI_ATTR_WB_LIFE_DISABLED = 0x00, /* Info not available */
+ UFSHCI_ATTR_WB_LIFE_0_10 = 0x01, /* 0%–10% used */
+ UFSHCI_ATTR_WB_LIFE_10_20 = 0x02, /* 10%–20% used */
+ UFSHCI_ATTR_WB_LIFE_20_30 = 0x03, /* 20%–30% used */
+ UFSHCI_ATTR_WB_LIFE_30_40 = 0x04, /* 30%–40% used */
+ UFSHCI_ATTR_WB_LIFE_40_50 = 0x05, /* 40%–50% used */
+ UFSHCI_ATTR_WB_LIFE_50_60 = 0x06, /* 50%–60% used */
+ UFSHCI_ATTR_WB_LIFE_60_70 = 0x07, /* 60%–70% used */
+ UFSHCI_ATTR_WB_LIFE_70_80 = 0x08, /* 70%–80% used */
+ UFSHCI_ATTR_WB_LIFE_80_90 = 0x09, /* 80%–90% used */
+ UFSHCI_ATTR_WB_LIFE_90_100 = 0x0A, /* 90%–100% used */
+ UFSHCI_ATTR_WB_LIFE_EXCEEDED =
+ 0x0B, /* Exceeded estimated life (treat as WB disabled) */
+};
+
+#endif /* __UFSHCI_H__ */
diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c
new file mode 100644
index 000000000000..35663b480cfa
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_ctrlr.c
@@ -0,0 +1,612 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+
+#include "ufshci_private.h"
+#include "ufshci_reg.h"
+
+static void
+ufshci_ctrlr_fail(struct ufshci_controller *ctrlr)
+{
+ ctrlr->is_failed = true;
+
+ ufshci_req_queue_fail(ctrlr,
+ ctrlr->task_mgmt_req_queue.qops.get_hw_queue(
+ &ctrlr->task_mgmt_req_queue));
+ ufshci_req_queue_fail(ctrlr,
+ ctrlr->transfer_req_queue.qops.get_hw_queue(
+ &ctrlr->transfer_req_queue));
+}
+
+static void
+ufshci_ctrlr_start(struct ufshci_controller *ctrlr, bool resetting)
+{
+ TSENTER();
+
+ /*
+ * If `resetting` is true, we are on the reset path.
+ * Re-enable request queues here because ufshci_ctrlr_reset_task()
+ * disables them during reset.
+ */
+ if (resetting) {
+ if (ufshci_utmr_req_queue_enable(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+ if (ufshci_utr_req_queue_enable(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+ }
+
+ if (ufshci_ctrlr_send_nop(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize UFS target drvice */
+ if (ufshci_dev_init(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize Reference Clock */
+ if (ufshci_dev_init_reference_clock(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize unipro */
+ if (ufshci_dev_init_unipro(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /*
+ * Initialize UIC Power Mode
+ * QEMU UFS devices do not support unipro and power mode.
+ */
+ if (!(ctrlr->quirks & UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE) &&
+ ufshci_dev_init_uic_power_mode(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Initialize UFS Power Mode */
+ if (ufshci_dev_init_ufs_power_mode(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* Read Controller Descriptor (Device, Geometry) */
+ if (ufshci_dev_get_descriptor(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ if (ufshci_dev_config_write_booster(ctrlr)) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ /* TODO: Configure Write Protect */
+
+ /* TODO: Configure Background Operations */
+
+ /*
+ * If the reset is due to a timeout, it is already attached to the SIM
+ * and does not need to be attached again.
+ */
+ if (!resetting && ufshci_sim_attach(ctrlr) != 0) {
+ ufshci_ctrlr_fail(ctrlr);
+ return;
+ }
+
+ TSEXIT();
+}
+
+static int
+ufshci_ctrlr_disable_host_ctrlr(struct ufshci_controller *ctrlr)
+{
+ int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
+ sbintime_t delta_t = SBT_1US;
+ uint32_t hce;
+
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+
+ /* If UFS host controller is already enabled, disable it. */
+ if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce)) {
+ hce &= ~UFSHCIM(UFSHCI_HCE_REG_HCE);
+ ufshci_mmio_write_4(ctrlr, hce, hce);
+ }
+
+ /* Wait for the HCE flag to change */
+ while (1) {
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+ if (!UFSHCIV(UFSHCI_HCE_REG_HCE, hce))
+ break;
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "host controller failed to disable "
+ "within %d ms\n",
+ ctrlr->device_init_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ pause_sbt("ufshci_disable_hce", delta_t, 0, C_PREL(1));
+ delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_ctrlr_enable_host_ctrlr(struct ufshci_controller *ctrlr)
+{
+ int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
+ sbintime_t delta_t = SBT_1US;
+ uint32_t hce;
+
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+
+ /* Enable UFS host controller */
+ hce |= UFSHCIM(UFSHCI_HCE_REG_HCE);
+ ufshci_mmio_write_4(ctrlr, hce, hce);
+
+ /*
+ * During the controller initialization, the value of the HCE bit is
+ * unstable, so we need to read the HCE value after some time after
+ * initialization is complete.
+ */
+ pause_sbt("ufshci_enable_hce", ustosbt(100), 0, C_PREL(1));
+
+ /* Wait for the HCE flag to change */
+ while (1) {
+ hce = ufshci_mmio_read_4(ctrlr, hce);
+ if (UFSHCIV(UFSHCI_HCE_REG_HCE, hce))
+ break;
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "host controller failed to enable "
+ "within %d ms\n",
+ ctrlr->device_init_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ pause_sbt("ufshci_enable_hce", delta_t, 0, C_PREL(1));
+ delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_ctrlr_disable(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ /* Disable all interrupts */
+ ufshci_mmio_write_4(ctrlr, ie, 0);
+
+ error = ufshci_ctrlr_disable_host_ctrlr(ctrlr);
+ return (error);
+}
+
+static int
+ufshci_ctrlr_enable(struct ufshci_controller *ctrlr)
+{
+ uint32_t ie, hcs;
+ int error;
+
+ error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
+ if (error)
+ return (error);
+
+ /* Send DME_LINKSTARTUP command to start the link startup procedure */
+ error = ufshci_uic_send_dme_link_startup(ctrlr);
+ if (error)
+ return (error);
+
+ /*
+ * The device_present(UFSHCI_HCS_REG_DP) bit becomes true if the host
+ * controller has successfully received a Link Startup UIC command
+ * response and the UFS device has found a physical link to the
+ * controller.
+ */
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (!UFSHCIV(UFSHCI_HCS_REG_DP, hcs)) {
+ ufshci_printf(ctrlr, "UFS device not found\n");
+ return (ENXIO);
+ }
+
+ /* Enable additional interrupts by programming the IE register. */
+ ie = ufshci_mmio_read_4(ctrlr, ie);
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTRCE); /* UTR Completion */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UEE); /* UIC Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTMRCE); /* UTMR Completion */
+ ie |= UFSHCIM(UFSHCI_IE_REG_DFEE); /* Device Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_UTPEE); /* UTP Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_HCFEE); /* Host Ctrlr Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_SBFEE); /* System Bus Fatal Error */
+ ie |= UFSHCIM(UFSHCI_IE_REG_CEFEE); /* Crypto Engine Fatal Error */
+ ufshci_mmio_write_4(ctrlr, ie, ie);
+
+ /* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
+
+ return (0);
+}
+
+static int
+ufshci_ctrlr_hw_reset(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ error = ufshci_ctrlr_disable(ctrlr);
+ if (error)
+ return (error);
+
+ error = ufshci_ctrlr_enable(ctrlr);
+ return (error);
+}
+
+static void
+ufshci_ctrlr_reset_task(void *arg, int pending)
+{
+ struct ufshci_controller *ctrlr = arg;
+ int error;
+
+ /* Release resources */
+ ufshci_utmr_req_queue_disable(ctrlr);
+ ufshci_utr_req_queue_disable(ctrlr);
+
+ error = ufshci_ctrlr_hw_reset(ctrlr);
+ if (error)
+ return (ufshci_ctrlr_fail(ctrlr));
+
+ ufshci_ctrlr_start(ctrlr, true);
+}
+
+int
+ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
+{
+ uint32_t ver, cap, ahit;
+ uint32_t timeout_period, retry_count;
+ int error;
+
+ ctrlr->device_init_timeout_in_ms = UFSHCI_DEVICE_INIT_TIMEOUT_MS;
+ ctrlr->uic_cmd_timeout_in_ms = UFSHCI_UIC_CMD_TIMEOUT_MS;
+ ctrlr->dev = dev;
+ ctrlr->sc_unit = device_get_unit(dev);
+
+ snprintf(ctrlr->sc_name, sizeof(ctrlr->sc_name), "%s",
+ device_get_nameunit(dev));
+
+ mtx_init(&ctrlr->sc_mtx, device_get_nameunit(dev), NULL,
+ MTX_DEF | MTX_RECURSE);
+
+ mtx_init(&ctrlr->uic_cmd_lock, "ufshci ctrlr uic cmd lock", NULL,
+ MTX_DEF);
+
+ ver = ufshci_mmio_read_4(ctrlr, ver);
+ ctrlr->major_version = UFSHCIV(UFSHCI_VER_REG_MJR, ver);
+ ctrlr->minor_version = UFSHCIV(UFSHCI_VER_REG_MNR, ver);
+ ufshci_printf(ctrlr, "UFSHCI Version: %d.%d\n", ctrlr->major_version,
+ ctrlr->minor_version);
+
+ /* Read Device Capabilities */
+ ctrlr->cap = cap = ufshci_mmio_read_4(ctrlr, cap);
+ ctrlr->is_single_db_supported = UFSHCIV(UFSHCI_CAP_REG_LSDBS, cap);
+ /*
+ * TODO: This driver does not yet support multi-queue.
+ * Check the UFSHCI_CAP_REG_MCQS bit in the future to determine if
+ * multi-queue support is available.
+ */
+ ctrlr->is_mcq_supported = false;
+ if (!(ctrlr->is_single_db_supported == 0 || ctrlr->is_mcq_supported))
+ return (ENXIO);
+ /*
+ * The maximum transfer size supported by UFSHCI spec is 65535 * 256 KiB
+ * However, we limit the maximum transfer size to 1MiB(256 * 4KiB) for
+ * performance reason.
+ */
+ ctrlr->page_size = PAGE_SIZE;
+ ctrlr->max_xfer_size = ctrlr->page_size * UFSHCI_MAX_PRDT_ENTRY_COUNT;
+
+ timeout_period = UFSHCI_DEFAULT_TIMEOUT_PERIOD;
+ TUNABLE_INT_FETCH("hw.ufshci.timeout_period", &timeout_period);
+ timeout_period = min(timeout_period, UFSHCI_MAX_TIMEOUT_PERIOD);
+ timeout_period = max(timeout_period, UFSHCI_MIN_TIMEOUT_PERIOD);
+ ctrlr->timeout_period = timeout_period;
+
+ retry_count = UFSHCI_DEFAULT_RETRY_COUNT;
+ TUNABLE_INT_FETCH("hw.ufshci.retry_count", &retry_count);
+ ctrlr->retry_count = retry_count;
+
+ ctrlr->enable_aborts = 1;
+ if (ctrlr->quirks & UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK)
+ ctrlr->enable_aborts = 0;
+ else
+ TUNABLE_INT_FETCH("hw.ufshci.enable_aborts",
+ &ctrlr->enable_aborts);
+
+ /* Reset the UFSHCI controller */
+ error = ufshci_ctrlr_hw_reset(ctrlr);
+ if (error)
+ return (error);
+
+ /* Read the UECPA register to clear */
+ ufshci_mmio_read_4(ctrlr, uecpa);
+
+ /* Diable Auto-hibernate */
+ ahit = 0;
+ ufshci_mmio_write_4(ctrlr, ahit, ahit);
+
+ /* Allocate and initialize UTP Task Management Request List. */
+ error = ufshci_utmr_req_queue_construct(ctrlr);
+ if (error)
+ return (error);
+
+ /* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
+ error = ufshci_utr_req_queue_construct(ctrlr);
+ if (error)
+ return (error);
+
+ /* TODO: Separate IO and Admin slot */
+
+ /*
+ * max_hw_pend_io is the number of slots in the transfer_req_queue.
+ * Reduce num_entries by one to reserve an admin slot.
+ */
+ ctrlr->max_hw_pend_io = ctrlr->transfer_req_queue.num_entries - 1;
+
+ /* Create a thread for the taskqueue. */
+ ctrlr->taskqueue = taskqueue_create("ufshci_taskq", M_WAITOK,
+ taskqueue_thread_enqueue, &ctrlr->taskqueue);
+ taskqueue_start_threads(&ctrlr->taskqueue, 1, PI_DISK, "ufshci taskq");
+
+ TASK_INIT(&ctrlr->reset_task, 0, ufshci_ctrlr_reset_task, ctrlr);
+
+ return (0);
+}
+
+void
+ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev)
+{
+ if (ctrlr->resource == NULL)
+ goto nores;
+
+ /* TODO: Flush In-flight IOs */
+
+ /* Release resources */
+ ufshci_utmr_req_queue_destroy(ctrlr);
+ ufshci_utr_req_queue_destroy(ctrlr);
+
+ if (ctrlr->tag)
+ bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
+
+ if (ctrlr->res)
+ bus_release_resource(ctrlr->dev, SYS_RES_IRQ,
+ rman_get_rid(ctrlr->res), ctrlr->res);
+
+ mtx_lock(&ctrlr->sc_mtx);
+
+ ufshci_sim_detach(ctrlr);
+
+ mtx_unlock(&ctrlr->sc_mtx);
+
+ bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
+ ctrlr->resource);
+nores:
+ KASSERT(!mtx_owned(&ctrlr->uic_cmd_lock),
+ ("destroying uic_cmd_lock while still owned"));
+ mtx_destroy(&ctrlr->uic_cmd_lock);
+
+ KASSERT(!mtx_owned(&ctrlr->sc_mtx),
+ ("destroying sc_mtx while still owned"));
+ mtx_destroy(&ctrlr->sc_mtx);
+
+ return;
+}
+
+void
+ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
+{
+ taskqueue_enqueue(ctrlr->taskqueue, &ctrlr->reset_task);
+}
+
+int
+ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req)
+{
+ return (
+ ufshci_req_queue_submit_request(&ctrlr->task_mgmt_req_queue, req,
+ /*is_admin*/ false));
+}
+
+int
+ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req)
+{
+ return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req,
+ /*is_admin*/ true));
+}
+
+int
+ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req)
+{
+ return (ufshci_req_queue_submit_request(&ctrlr->transfer_req_queue, req,
+ /*is_admin*/ false));
+}
+
+int
+ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_completion_poll_status status;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_nop(ctrlr, ufshci_completion_poll_cb, &status);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_ctrlr_send_nop failed!\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+void
+ufshci_ctrlr_start_config_hook(void *arg)
+{
+ struct ufshci_controller *ctrlr = arg;
+
+ TSENTER();
+
+ if (ufshci_utmr_req_queue_enable(ctrlr) == 0 &&
+ ufshci_utr_req_queue_enable(ctrlr) == 0)
+ ufshci_ctrlr_start(ctrlr, false);
+ else
+ ufshci_ctrlr_fail(ctrlr);
+
+ ufshci_sysctl_initialize_ctrlr(ctrlr);
+ config_intrhook_disestablish(&ctrlr->config_hook);
+
+ TSEXIT();
+}
+
+/*
+ * Poll all the queues enabled on the device for completion.
+ */
+void
+ufshci_ctrlr_poll(struct ufshci_controller *ctrlr)
+{
+ uint32_t is;
+
+ is = ufshci_mmio_read_4(ctrlr, is);
+
+ /* UIC error */
+ if (is & UFSHCIM(UFSHCI_IS_REG_UE)) {
+ uint32_t uecpa, uecdl, uecn, uect, uecdme;
+
+ /* UECPA for Host UIC Error Code within PHY Adapter Layer */
+ uecpa = ufshci_mmio_read_4(ctrlr, uecpa);
+ if (uecpa & UFSHCIM(UFSHCI_UECPA_REG_ERR)) {
+ ufshci_printf(ctrlr, "UECPA error code: 0x%x\n",
+ UFSHCIV(UFSHCI_UECPA_REG_EC, uecpa));
+ }
+ /* UECDL for Host UIC Error Code within Data Link Layer */
+ uecdl = ufshci_mmio_read_4(ctrlr, uecdl);
+ if (uecdl & UFSHCIM(UFSHCI_UECDL_REG_ERR)) {
+ ufshci_printf(ctrlr, "UECDL error code: 0x%x\n",
+ UFSHCIV(UFSHCI_UECDL_REG_EC, uecdl));
+ }
+ /* UECN for Host UIC Error Code within Network Layer */
+ uecn = ufshci_mmio_read_4(ctrlr, uecn);
+ if (uecn & UFSHCIM(UFSHCI_UECN_REG_ERR)) {
+ ufshci_printf(ctrlr, "UECN error code: 0x%x\n",
+ UFSHCIV(UFSHCI_UECN_REG_EC, uecn));
+ }
+ /* UECT for Host UIC Error Code within Transport Layer */
+ uect = ufshci_mmio_read_4(ctrlr, uect);
+ if (uect & UFSHCIM(UFSHCI_UECT_REG_ERR)) {
+ ufshci_printf(ctrlr, "UECT error code: 0x%x\n",
+ UFSHCIV(UFSHCI_UECT_REG_EC, uect));
+ }
+ /* UECDME for Host UIC Error Code within DME subcomponent */
+ uecdme = ufshci_mmio_read_4(ctrlr, uecdme);
+ if (uecdme & UFSHCIM(UFSHCI_UECDME_REG_ERR)) {
+ ufshci_printf(ctrlr, "UECDME error code: 0x%x\n",
+ UFSHCIV(UFSHCI_UECDME_REG_EC, uecdme));
+ }
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UE));
+ }
+ /* Device Fatal Error Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_DFES)) {
+ ufshci_printf(ctrlr, "Device fatal error on ISR\n");
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_DFES));
+ }
+ /* UTP Error Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_UTPES)) {
+ ufshci_printf(ctrlr, "UTP error on ISR\n");
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTPES));
+ }
+ /* Host Controller Fatal Error Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_HCFES)) {
+ ufshci_printf(ctrlr, "Host controller fatal error on ISR\n");
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_HCFES));
+ }
+ /* System Bus Fatal Error Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_SBFES)) {
+ ufshci_printf(ctrlr, "System bus fatal error on ISR\n");
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_SBFES));
+ }
+ /* Crypto Engine Fatal Error Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_CEFES)) {
+ ufshci_printf(ctrlr, "Crypto engine fatal error on ISR\n");
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CEFES));
+ }
+ /* UTP Task Management Request Completion Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) {
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS));
+ ufshci_req_queue_process_completions(
+ &ctrlr->task_mgmt_req_queue);
+ }
+ /* UTP Transfer Request Completion Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) {
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTRCS));
+ ufshci_req_queue_process_completions(
+ &ctrlr->transfer_req_queue);
+ }
+ /* MCQ CQ Event Status */
+ if (is & UFSHCIM(UFSHCI_IS_REG_CQES)) {
+ /* TODO: We need to process completion Queue Pairs */
+ ufshci_printf(ctrlr, "MCQ completion not yet implemented\n");
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_CQES));
+ }
+}
+
+/*
+ * Poll the single-vector interrupt case: num_io_queues will be 1 and
+ * there's only a single vector. While we're polling, we mask further
+ * interrupts in the controller.
+ */
+void
+ufshci_ctrlr_shared_handler(void *arg)
+{
+ struct ufshci_controller *ctrlr = arg;
+
+ ufshci_ctrlr_poll(ctrlr);
+}
+
+void
+ufshci_reg_dump(struct ufshci_controller *ctrlr)
+{
+ ufshci_printf(ctrlr, "========= UFSHCI Register Dump =========\n");
+
+ UFSHCI_DUMP_REG(ctrlr, cap);
+ UFSHCI_DUMP_REG(ctrlr, mcqcap);
+ UFSHCI_DUMP_REG(ctrlr, ver);
+ UFSHCI_DUMP_REG(ctrlr, ext_cap);
+ UFSHCI_DUMP_REG(ctrlr, hcpid);
+ UFSHCI_DUMP_REG(ctrlr, hcmid);
+ UFSHCI_DUMP_REG(ctrlr, ahit);
+ UFSHCI_DUMP_REG(ctrlr, is);
+ UFSHCI_DUMP_REG(ctrlr, ie);
+ UFSHCI_DUMP_REG(ctrlr, hcsext);
+ UFSHCI_DUMP_REG(ctrlr, hcs);
+ UFSHCI_DUMP_REG(ctrlr, hce);
+ UFSHCI_DUMP_REG(ctrlr, uecpa);
+ UFSHCI_DUMP_REG(ctrlr, uecdl);
+ UFSHCI_DUMP_REG(ctrlr, uecn);
+ UFSHCI_DUMP_REG(ctrlr, uect);
+ UFSHCI_DUMP_REG(ctrlr, uecdme);
+
+ ufshci_printf(ctrlr, "========================================\n");
+}
diff --git a/sys/dev/ufshci/ufshci_ctrlr_cmd.c b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
new file mode 100644
index 000000000000..253f31a93c2e
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
@@ -0,0 +1,79 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include "ufshci_private.h"
+
+void
+ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun,
+ uint8_t task_tag, uint8_t iid)
+{
+ struct ufshci_request *req;
+ struct ufshci_task_mgmt_request_upiu *upiu;
+
+ req = ufshci_allocate_request_vaddr(NULL, 0, M_NOWAIT, cb_fn, cb_arg);
+
+ req->request_size = sizeof(struct ufshci_task_mgmt_request_upiu);
+ req->response_size = sizeof(struct ufshci_task_mgmt_response_upiu);
+
+ upiu = (struct ufshci_task_mgmt_request_upiu *)&req->request_upiu;
+ memset(upiu, 0, req->request_size);
+ upiu->header.trans_type =
+ UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_REQUEST;
+ upiu->header.lun = lun;
+ upiu->header.ext_iid_or_function = function;
+ upiu->input_param1 = lun;
+ upiu->input_param2 = task_tag;
+ upiu->input_param3 = iid;
+
+ ufshci_ctrlr_submit_task_mgmt_request(ctrlr, req);
+}
+
+void
+ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn,
+ void *cb_arg)
+{
+ struct ufshci_request *req;
+ struct ufshci_nop_out_upiu *upiu;
+
+ req = ufshci_allocate_request_vaddr(NULL, 0, M_WAITOK, cb_fn, cb_arg);
+
+ req->request_size = sizeof(struct ufshci_nop_out_upiu);
+ req->response_size = sizeof(struct ufshci_nop_in_upiu);
+
+ upiu = (struct ufshci_nop_out_upiu *)&req->request_upiu;
+ memset(upiu, 0, req->request_size);
+ upiu->header.trans_type = UFSHCI_UPIU_TRANSACTION_CODE_NOP_OUT;
+
+ ufshci_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+void
+ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, struct ufshci_query_param param)
+{
+ struct ufshci_request *req;
+ struct ufshci_query_request_upiu *upiu;
+
+ req = ufshci_allocate_request_vaddr(NULL, 0, M_WAITOK, cb_fn, cb_arg);
+
+ req->request_size = sizeof(struct ufshci_query_request_upiu);
+ req->response_size = sizeof(struct ufshci_query_response_upiu);
+
+ upiu = (struct ufshci_query_request_upiu *)&req->request_upiu;
+ memset(upiu, 0, req->request_size);
+ upiu->header.trans_type = UFSHCI_UPIU_TRANSACTION_CODE_QUERY_REQUEST;
+ upiu->header.ext_iid_or_function = param.function;
+ upiu->opcode = param.opcode;
+ upiu->idn = param.type;
+ upiu->index = param.index;
+ upiu->selector = param.selector;
+ upiu->value_64 = param.value;
+ upiu->length = param.desc_size;
+
+ ufshci_ctrlr_submit_admin_request(ctrlr, req);
+}
diff --git a/sys/dev/ufshci/ufshci_dev.c b/sys/dev/ufshci/ufshci_dev.c
new file mode 100644
index 000000000000..975468e5156f
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_dev.c
@@ -0,0 +1,776 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+
+#include "ufshci_private.h"
+#include "ufshci_reg.h"
+
+static int
+ufshci_dev_read_descriptor(struct ufshci_controller *ctrlr,
+ enum ufshci_descriptor_type desc_type, uint8_t index, uint8_t selector,
+ void *desc, size_t desc_size)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_READ_DESCRIPTOR;
+ param.type = desc_type;
+ param.index = index;
+ param.selector = selector;
+ param.value = 0;
+ param.desc_size = desc_size;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_read_descriptor failed!\n");
+ return (ENXIO);
+ }
+
+ memcpy(desc, status.cpl.response_upiu.query_response_upiu.command_data,
+ desc_size);
+
+ return (0);
+}
+
+static int
+ufshci_dev_read_device_descriptor(struct ufshci_controller *ctrlr,
+ struct ufshci_device_descriptor *desc)
+{
+ return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_DEVICE, 0, 0,
+ desc, sizeof(struct ufshci_device_descriptor)));
+}
+
+static int
+ufshci_dev_read_geometry_descriptor(struct ufshci_controller *ctrlr,
+ struct ufshci_geometry_descriptor *desc)
+{
+ return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_GEOMETRY, 0,
+ 0, desc, sizeof(struct ufshci_geometry_descriptor)));
+}
+
+static int
+ufshci_dev_read_unit_descriptor(struct ufshci_controller *ctrlr, uint8_t lun,
+ struct ufshci_unit_descriptor *desc)
+{
+ return (ufshci_dev_read_descriptor(ctrlr, UFSHCI_DESC_TYPE_UNIT, lun, 0,
+ desc, sizeof(struct ufshci_unit_descriptor)));
+}
+
+static int
+ufshci_dev_read_flag(struct ufshci_controller *ctrlr,
+ enum ufshci_flags flag_type, uint8_t *flag)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_READ_FLAG;
+ param.type = flag_type;
+ param.index = 0;
+ param.selector = 0;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_read_flag failed!\n");
+ return (ENXIO);
+ }
+
+ *flag = status.cpl.response_upiu.query_response_upiu.flag_value;
+
+ return (0);
+}
+
+static int
+ufshci_dev_set_flag(struct ufshci_controller *ctrlr,
+ enum ufshci_flags flag_type)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_SET_FLAG;
+ param.type = flag_type;
+ param.index = 0;
+ param.selector = 0;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_set_flag failed!\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_dev_clear_flag(struct ufshci_controller *ctrlr,
+ enum ufshci_flags flag_type)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_CLEAR_FLAG;
+ param.type = flag_type;
+ param.index = 0;
+ param.selector = 0;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_clear_flag failed!\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_dev_read_attribute(struct ufshci_controller *ctrlr,
+ enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
+ uint64_t *value)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_READ_ATTRIBUTE;
+ param.type = attr_type;
+ param.index = index;
+ param.selector = selector;
+ param.value = 0;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_read_attribute failed!\n");
+ return (ENXIO);
+ }
+
+ *value = status.cpl.response_upiu.query_response_upiu.value_64;
+
+ return (0);
+}
+
+static int
+ufshci_dev_write_attribute(struct ufshci_controller *ctrlr,
+ enum ufshci_attributes attr_type, uint8_t index, uint8_t selector,
+ uint64_t value)
+{
+ struct ufshci_completion_poll_status status;
+ struct ufshci_query_param param;
+
+ param.function = UFSHCI_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ param.opcode = UFSHCI_QUERY_OPCODE_WRITE_ATTRIBUTE;
+ param.type = attr_type;
+ param.index = index;
+ param.selector = selector;
+ param.value = value;
+
+ status.done = 0;
+ ufshci_ctrlr_cmd_send_query_request(ctrlr, ufshci_completion_poll_cb,
+ &status, param);
+ ufshci_completion_poll(&status);
+ if (status.error) {
+ ufshci_printf(ctrlr, "ufshci_dev_write_attribute failed!\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+int
+ufshci_dev_init(struct ufshci_controller *ctrlr)
+{
+ int timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
+ sbintime_t delta_t = SBT_1US;
+ uint8_t flag;
+ int error;
+ const uint8_t device_init_completed = 0;
+
+ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT);
+ if (error)
+ return (error);
+
+ /* Wait for the UFSHCI_FLAG_F_DEVICE_INIT flag to change */
+ while (1) {
+ error = ufshci_dev_read_flag(ctrlr, UFSHCI_FLAG_F_DEVICE_INIT,
+ &flag);
+ if (error)
+ return (error);
+ if (flag == device_init_completed)
+ break;
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "device init did not become %d "
+ "within %d ms\n",
+ device_init_completed,
+ ctrlr->device_init_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ pause_sbt("ufshciinit", delta_t, 0, C_PREL(1));
+ delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ }
+
+ return (0);
+}
+
+int
+ufshci_dev_reset(struct ufshci_controller *ctrlr)
+{
+ if (ufshci_uic_send_dme_endpoint_reset(ctrlr))
+ return (ENXIO);
+
+ return (ufshci_dev_init(ctrlr));
+}
+
+int
+ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr)
+{
+ int error;
+ uint8_t index, selector;
+
+ index = 0; /* bRefClkFreq is device type attribute */
+ selector = 0; /* bRefClkFreq is device type attribute */
+
+ error = ufshci_dev_write_attribute(ctrlr, UFSHCI_ATTR_B_REF_CLK_FREQ,
+ index, selector, ctrlr->ref_clk);
+ if (error)
+ return (error);
+
+ return (0);
+}
+
+int
+ufshci_dev_init_unipro(struct ufshci_controller *ctrlr)
+{
+ uint32_t pa_granularity, peer_pa_granularity;
+ uint32_t t_activate, pear_t_activate;
+
+ /*
+ * Unipro Version:
+ * - 7~15 = Above 2.0, 6 = 2.0, 5 = 1.8, 4 = 1.61, 3 = 1.6, 2 = 1.41,
+ * 1 = 1.40, 0 = Reserved
+ */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_LocalVerInfo,
+ &ctrlr->unipro_version))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_get(ctrlr, PA_RemoteVerInfo,
+ &ctrlr->ufs_dev.unipro_version))
+ return (ENXIO);
+
+ /*
+ * PA_Granularity: Granularity for PA_TActivate and PA_Hibern8Time
+ * - 1=1us, 2=4us, 3=8us, 4=16us, 5=32us, 6=100us
+ */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_Granularity, &pa_granularity))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
+ &peer_pa_granularity))
+ return (ENXIO);
+
+ /*
+ * PA_TActivate: Time to wait before activating a burst in order to
+ * wake-up peer M-RX
+ * UniPro automatically sets timing information such as PA_TActivate
+ * through the PACP_CAP_EXT1_ind command during Link Startup operation.
+ */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_TActivate, &t_activate))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_peer_get(ctrlr, PA_TActivate, &pear_t_activate))
+ return (ENXIO);
+
+ if (ctrlr->quirks & UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE) {
+ /*
+ * Intel Lake-field UFSHCI has a quirk. We need to add 200us to
+ * the PEER's PA_TActivate.
+ */
+ if (pa_granularity == peer_pa_granularity) {
+ pear_t_activate = t_activate + 2;
+ if (ufshci_uic_send_dme_peer_set(ctrlr, PA_TActivate,
+ pear_t_activate))
+ return (ENXIO);
+ }
+ }
+
+ return (0);
+}
+
+int
+ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr)
+{
+ /* HSSerise: A = 1, B = 2 */
+ const uint32_t hs_series = 2;
+ /*
+ * TX/RX PWRMode:
+ * - TX[3:0], RX[7:4]
+ * - Fast Mode = 1, Slow Mode = 2, FastAuto Mode = 4, SlowAuto Mode = 5
+ */
+ const uint32_t fast_mode = 1;
+ const uint32_t rx_bit_shift = 4;
+ uint32_t power_mode, peer_granularity;
+
+ /* Update lanes with available TX/RX lanes */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_AvailTxDataLanes,
+ &ctrlr->max_tx_lanes))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_get(ctrlr, PA_AvailRxDataLanes,
+ &ctrlr->max_rx_lanes))
+ return (ENXIO);
+
+ /* Get max HS-GEAR value */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_MaxRxHSGear,
+ &ctrlr->max_rx_hs_gear))
+ return (ENXIO);
+
+ /* Set the data lane to max */
+ ctrlr->tx_lanes = ctrlr->max_tx_lanes;
+ ctrlr->rx_lanes = ctrlr->max_rx_lanes;
+ if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveTxDataLanes,
+ ctrlr->tx_lanes))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_ActiveRxDataLanes,
+ ctrlr->rx_lanes))
+ return (ENXIO);
+
+ if (ctrlr->quirks & UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY) {
+ /* Before changing gears, first change the number of lanes. */
+ if (ufshci_uic_send_dme_get(ctrlr, PA_PWRMode, &power_mode))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
+ return (ENXIO);
+
+ /* Wait for power mode changed. */
+ if (ufshci_uic_power_mode_ready(ctrlr)) {
+ ufshci_reg_dump(ctrlr);
+ return (ENXIO);
+ }
+ }
+
+ /* Set HS-GEAR to max gear */
+ ctrlr->hs_gear = ctrlr->max_rx_hs_gear;
+ if (ufshci_uic_send_dme_set(ctrlr, PA_TxGear, ctrlr->hs_gear))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_RxGear, ctrlr->hs_gear))
+ return (ENXIO);
+
+ /*
+ * Set termination
+ * - HS-MODE = ON / LS-MODE = OFF
+ */
+ if (ufshci_uic_send_dme_set(ctrlr, PA_TxTermination, true))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_RxTermination, true))
+ return (ENXIO);
+
+ /* Set HSSerise (A = 1, B = 2) */
+ if (ufshci_uic_send_dme_set(ctrlr, PA_HSSeries, hs_series))
+ return (ENXIO);
+
+ /* Set Timeout values */
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData0,
+ DL_FC0ProtectionTimeOutVal_Default))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData1,
+ DL_TC0ReplayTimeOutVal_Default))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData2,
+ DL_AFC0ReqTimeOutVal_Default))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData3,
+ DL_FC0ProtectionTimeOutVal_Default))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData4,
+ DL_TC0ReplayTimeOutVal_Default))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRModeUserData5,
+ DL_AFC0ReqTimeOutVal_Default))
+ return (ENXIO);
+
+ if (ufshci_uic_send_dme_set(ctrlr, DME_LocalFC0ProtectionTimeOutVal,
+ DL_FC0ProtectionTimeOutVal_Default))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, DME_LocalTC0ReplayTimeOutVal,
+ DL_TC0ReplayTimeOutVal_Default))
+ return (ENXIO);
+ if (ufshci_uic_send_dme_set(ctrlr, DME_LocalAFC0ReqTimeOutVal,
+ DL_AFC0ReqTimeOutVal_Default))
+ return (ENXIO);
+
+ /* Set TX/RX PWRMode */
+ power_mode = (fast_mode << rx_bit_shift) | fast_mode;
+ if (ufshci_uic_send_dme_set(ctrlr, PA_PWRMode, power_mode))
+ return (ENXIO);
+
+ /* Wait for power mode changed. */
+ if (ufshci_uic_power_mode_ready(ctrlr)) {
+ ufshci_reg_dump(ctrlr);
+ return (ENXIO);
+ }
+
+ /* Clear 'Power Mode completion status' */
+ ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UPMS));
+
+ if (ctrlr->quirks & UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE) {
+ /*
+ * Intel Lake-field UFSHCI has a quirk.
+ * We need to wait 1250us and clear dme error.
+ */
+ pause_sbt("ufshci", ustosbt(1250), 0, C_PREL(1));
+
+ /* Test with dme_peer_get to make sure there are no errors. */
+ if (ufshci_uic_send_dme_peer_get(ctrlr, PA_Granularity,
+ &peer_granularity))
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+int
+ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr)
+{
+ /* TODO: Need to implement */
+
+ return (0);
+}
+
+int
+ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *device = &ctrlr->ufs_dev;
+ /*
+ * The kDeviceDensityUnit is defined in the spec as 512.
+ * qTotalRawDeviceCapacity use big-endian byte ordering.
+ */
+ const uint32_t device_density_unit = 512;
+ uint32_t ver;
+ int error;
+
+ error = ufshci_dev_read_device_descriptor(ctrlr, &device->dev_desc);
+ if (error)
+ return (error);
+
+ ver = be16toh(device->dev_desc.wSpecVersion);
+ ufshci_printf(ctrlr, "UFS device spec version %u.%u.%u\n",
+ UFSHCIV(UFSHCI_VER_REG_MJR, ver), UFSHCIV(UFSHCI_VER_REG_MNR, ver),
+ UFSHCIV(UFSHCI_VER_REG_VS, ver));
+ ufshci_printf(ctrlr, "%u enabled LUNs found\n",
+ device->dev_desc.bNumberLU);
+
+ error = ufshci_dev_read_geometry_descriptor(ctrlr, &device->geo_desc);
+ if (error)
+ return (error);
+
+ if (device->geo_desc.bMaxNumberLU == 0) {
+ device->max_lun_count = 8;
+ } else if (device->geo_desc.bMaxNumberLU == 1) {
+ device->max_lun_count = 32;
+ } else {
+ ufshci_printf(ctrlr,
+ "Invalid Geometry Descriptor bMaxNumberLU value=%d\n",
+ device->geo_desc.bMaxNumberLU);
+ return (ENXIO);
+ }
+ ctrlr->max_lun_count = device->max_lun_count;
+
+ ufshci_printf(ctrlr, "UFS device total size is %lu bytes\n",
+ be64toh(device->geo_desc.qTotalRawDeviceCapacity) *
+ device_density_unit);
+
+ return (0);
+}
+
+static int
+ufshci_dev_enable_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ int error;
+
+ /* Enable WriteBooster */
+ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
+ if (error) {
+ ufshci_printf(ctrlr, "Failed to enable WriteBooster\n");
+ return (error);
+ }
+ dev->is_wb_enabled = true;
+
+ /* Enable WriteBooster buffer flush during hibernate */
+ error = ufshci_dev_set_flag(ctrlr,
+ UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to enable WriteBooster buffer flush during hibernate\n");
+ return (error);
+ }
+
+ /* Enable WriteBooster buffer flush */
+ error = ufshci_dev_set_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to enable WriteBooster buffer flush\n");
+ return (error);
+ }
+ dev->is_wb_flush_enabled = true;
+
+ return (0);
+}
+
+static int
+ufshci_dev_disable_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ int error;
+
+ /* Disable WriteBooster buffer flush */
+ error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WB_BUFFER_FLUSH_EN);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to disable WriteBooster buffer flush\n");
+ return (error);
+ }
+ dev->is_wb_flush_enabled = false;
+
+ /* Disable WriteBooster buffer flush during hibernate */
+ error = ufshci_dev_clear_flag(ctrlr,
+ UFSHCI_FLAG_F_WB_BUFFER_FLUSH_DURING_HIBERNATE);
+ if (error) {
+ ufshci_printf(ctrlr,
+ "Failed to disable WriteBooster buffer flush during hibernate\n");
+ return (error);
+ }
+
+ /* Disable WriteBooster */
+ error = ufshci_dev_clear_flag(ctrlr, UFSHCI_FLAG_F_WRITE_BOOSTER_EN);
+ if (error) {
+ ufshci_printf(ctrlr, "Failed to disable WriteBooster\n");
+ return (error);
+ }
+ dev->is_wb_enabled = false;
+
+ return (0);
+}
+
+static int
+ufshci_dev_is_write_booster_buffer_life_time_left(
+ struct ufshci_controller *ctrlr, bool *is_life_time_left)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ uint8_t buffer_lun;
+ uint64_t life_time;
+ uint32_t error;
+
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
+ buffer_lun = dev->wb_dedicated_lu;
+ else
+ buffer_lun = 0;
+
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_B_WB_BUFFER_LIFE_TIME_EST, buffer_lun, 0, &life_time);
+ if (error)
+ return (error);
+
+ *is_life_time_left = (life_time != UFSHCI_ATTR_WB_LIFE_EXCEEDED);
+
+ return (0);
+}
+
+/*
+ * This function is not yet in use. It will be used when suspend/resume is
+ * implemented.
+ */
+static __unused int
+ufshci_dev_need_write_booster_buffer_flush(struct ufshci_controller *ctrlr,
+ bool *need_flush)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ bool is_life_time_left = false;
+ uint64_t available_buffer_size, current_buffer_size;
+ uint8_t buffer_lun;
+ uint32_t error;
+
+ *need_flush = false;
+
+ if (!dev->is_wb_enabled)
+ return (0);
+
+ error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
+ &is_life_time_left);
+ if (error)
+ return (error);
+
+ if (!is_life_time_left)
+ return (ufshci_dev_disable_write_booster(ctrlr));
+
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED)
+ buffer_lun = dev->wb_dedicated_lu;
+ else
+ buffer_lun = 0;
+
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_B_AVAILABLE_WB_BUFFER_SIZE, buffer_lun, 0,
+ &available_buffer_size);
+ if (error)
+ return (error);
+
+ switch (dev->wb_user_space_config_option) {
+ case UFSHCI_DESC_WB_BUF_USER_SPACE_REDUCTION:
+ *need_flush = (available_buffer_size <=
+ UFSHCI_ATTR_WB_AVAILABLE_10);
+ break;
+ case UFSHCI_DESC_WB_BUF_PRESERVE_USER_SPACE:
+ /*
+ * In PRESERVE USER SPACE mode, flush should be performed when
+ * the current buffer is greater than 0 and the available buffer
+ * below write_booster_flush_threshold is left.
+ */
+ error = ufshci_dev_read_attribute(ctrlr,
+ UFSHCI_ATTR_D_CURRENT_WB_BUFFER_SIZE, buffer_lun, 0,
+ &current_buffer_size);
+ if (error)
+ return (error);
+
+ if (current_buffer_size == 0)
+ return (0);
+
+ *need_flush = (available_buffer_size <
+ dev->write_booster_flush_threshold);
+ break;
+ default:
+ ufshci_printf(ctrlr,
+ "Invalid bWriteBoosterBufferPreserveUserSpaceEn value");
+ return (EINVAL);
+ }
+
+ /*
+ * TODO: Need to handle WRITEBOOSTER_FLUSH_NEEDED exception case from
+ * wExceptionEventStatus attribute.
+ */
+
+ return (0);
+}
+
+int
+ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+ uint32_t extended_ufs_feature_support;
+ uint32_t alloc_units;
+ struct ufshci_unit_descriptor unit_desc;
+ uint8_t lun;
+ bool is_life_time_left;
+ uint32_t mega_byte = 1024 * 1024;
+ uint32_t error = 0;
+
+ extended_ufs_feature_support = be32toh(
+ dev->dev_desc.dExtendedUfsFeaturesSupport);
+ if (!(extended_ufs_feature_support &
+ UFSHCI_DESC_EXT_UFS_FEATURE_WRITE_BOOSTER)) {
+ /* This device does not support Write Booster */
+ return (0);
+ }
+
+ if (ufshci_dev_enable_write_booster(ctrlr))
+ return (0);
+
+ /* Get WriteBooster buffer parameters */
+ dev->wb_buffer_type = dev->dev_desc.bWriteBoosterBufferType;
+ dev->wb_user_space_config_option =
+ dev->dev_desc.bWriteBoosterBufferPreserveUserSpaceEn;
+
+ /*
+ * Find the size of the write buffer.
+ * With LU-dedicated (00h), the WriteBooster buffer is assigned
+ * exclusively to one chosen LU (not one-per-LU), whereas Shared (01h)
+ * uses a single device-wide buffer shared by multiple LUs.
+ */
+ if (dev->wb_buffer_type == UFSHCI_DESC_WB_BUF_TYPE_SINGLE_SHARED) {
+ alloc_units = be32toh(
+ dev->dev_desc.dNumSharedWriteBoosterBufferAllocUnits);
+ ufshci_printf(ctrlr,
+ "WriteBooster buffer type = Shared, alloc_units=%d\n",
+ alloc_units);
+ } else if (dev->wb_buffer_type ==
+ UFSHCI_DESC_WB_BUF_TYPE_LU_DEDICATED) {
+ ufshci_printf(ctrlr, "WriteBooster buffer type = Dedicated\n");
+ for (lun = 0; lun < ctrlr->max_lun_count; lun++) {
+ /* Find a dedicated buffer using a unit descriptor */
+ if (ufshci_dev_read_unit_descriptor(ctrlr, lun,
+ &unit_desc))
+ continue;
+
+ alloc_units = be32toh(
+ unit_desc.dLUNumWriteBoosterBufferAllocUnits);
+ if (alloc_units) {
+ dev->wb_dedicated_lu = lun;
+ break;
+ }
+ }
+ } else {
+ ufshci_printf(ctrlr,
+ "Not supported WriteBooster buffer type: 0x%x\n",
+ dev->wb_buffer_type);
+ goto out;
+ }
+
+ if (alloc_units == 0) {
+ ufshci_printf(ctrlr, "The WriteBooster buffer size is zero\n");
+ goto out;
+ }
+
+ dev->wb_buffer_size_mb = alloc_units *
+ dev->geo_desc.bAllocationUnitSize *
+ (be32toh(dev->geo_desc.dSegmentSize)) /
+ (mega_byte / UFSHCI_SECTOR_SIZE);
+
+ /* Set to flush when 40% of the available buffer size remains */
+ dev->write_booster_flush_threshold = UFSHCI_ATTR_WB_AVAILABLE_40;
+
+ /*
+ * Check if WriteBooster Buffer lifetime is left.
+ * WriteBooster Buffer lifetime — percent of life used based on P/E
+ * cycles. If "preserve user space" is enabled, writes to normal user
+ * space also consume WB life since the area is shared.
+ */
+ error = ufshci_dev_is_write_booster_buffer_life_time_left(ctrlr,
+ &is_life_time_left);
+ if (error)
+ goto out;
+
+ if (!is_life_time_left) {
+ ufshci_printf(ctrlr,
+ "There is no WriteBooster buffer life time left.\n");
+ goto out;
+ }
+
+ ufshci_printf(ctrlr, "WriteBooster Enabled\n");
+ return (0);
+out:
+ ufshci_dev_disable_write_booster(ctrlr);
+ return (error);
+}
diff --git a/sys/dev/ufshci/ufshci_pci.c b/sys/dev/ufshci/ufshci_pci.c
new file mode 100644
index 000000000000..992026fd4f4d
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_pci.c
@@ -0,0 +1,262 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/proc.h>
+#include <sys/smp.h>
+
+#include <vm/vm.h>
+
+#include <dev/pci/pcireg.h>
+#include <dev/pci/pcivar.h>
+
+#include "ufshci_private.h"
+
+static int ufshci_pci_probe(device_t);
+static int ufshci_pci_attach(device_t);
+static int ufshci_pci_detach(device_t);
+
+static int ufshci_pci_setup_interrupts(struct ufshci_controller *ctrlr);
+
+static device_method_t ufshci_pci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, ufshci_pci_probe),
+ DEVMETHOD(device_attach, ufshci_pci_attach),
+ DEVMETHOD(device_detach, ufshci_pci_detach),
+ /* TODO: Implement Suspend, Resume */
+ { 0, 0 }
+};
+
+static driver_t ufshci_pci_driver = {
+ "ufshci",
+ ufshci_pci_methods,
+ sizeof(struct ufshci_controller),
+};
+
+DRIVER_MODULE(ufshci, pci, ufshci_pci_driver, 0, 0);
+
+static struct _pcsid {
+ uint32_t devid;
+ const char *desc;
+ uint32_t ref_clk;
+ uint32_t quirks;
+} pci_ids[] = { { 0x131b36, "QEMU UFS Host Controller", UFSHCI_REF_CLK_19_2MHz,
+ UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE |
+ UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK },
+ { 0x98fa8086, "Intel Lakefield UFS Host Controller",
+ UFSHCI_REF_CLK_19_2MHz,
+ UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE |
+ UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE |
+ UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY },
+ { 0x54ff8086, "Intel UFS Host Controller", UFSHCI_REF_CLK_19_2MHz },
+ { 0x00000000, NULL } };
+
+static int
+ufshci_pci_probe(device_t device)
+{
+ struct ufshci_controller *ctrlr = device_get_softc(device);
+ uint32_t devid = pci_get_devid(device);
+ struct _pcsid *ep = pci_ids;
+
+ while (ep->devid && ep->devid != devid)
+ ++ep;
+
+ if (ep->devid) {
+ ctrlr->quirks = ep->quirks;
+ ctrlr->ref_clk = ep->ref_clk;
+ }
+
+ if (ep->desc) {
+ device_set_desc(device, ep->desc);
+ return (BUS_PROBE_DEFAULT);
+ }
+
+ return (ENXIO);
+}
+
+static int
+ufshci_pci_allocate_bar(struct ufshci_controller *ctrlr)
+{
+ ctrlr->resource_id = PCIR_BAR(0);
+
+ ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY,
+ &ctrlr->resource_id, RF_ACTIVE);
+
+ if (ctrlr->resource == NULL) {
+ ufshci_printf(ctrlr, "unable to allocate pci resource\n");
+ return (ENOMEM);
+ }
+
+ ctrlr->bus_tag = rman_get_bustag(ctrlr->resource);
+ ctrlr->bus_handle = rman_get_bushandle(ctrlr->resource);
+ ctrlr->regs = (struct ufshci_registers *)ctrlr->bus_handle;
+
+ return (0);
+}
+
+static int
+ufshci_pci_attach(device_t dev)
+{
+ struct ufshci_controller *ctrlr = device_get_softc(dev);
+ int status;
+
+ ctrlr->dev = dev;
+ status = ufshci_pci_allocate_bar(ctrlr);
+ if (status != 0)
+ goto bad;
+ pci_enable_busmaster(dev);
+ status = ufshci_pci_setup_interrupts(ctrlr);
+ if (status != 0)
+ goto bad;
+
+ return (ufshci_attach(dev));
+bad:
+ if (ctrlr->resource != NULL) {
+ bus_release_resource(dev, SYS_RES_MEMORY, ctrlr->resource_id,
+ ctrlr->resource);
+ }
+
+ if (ctrlr->tag)
+ bus_teardown_intr(dev, ctrlr->res, ctrlr->tag);
+
+ if (ctrlr->res)
+ bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(ctrlr->res),
+ ctrlr->res);
+
+ if (ctrlr->msi_count > 0)
+ pci_release_msi(dev);
+
+ return (status);
+}
+
+static int
+ufshci_pci_detach(device_t dev)
+{
+ struct ufshci_controller *ctrlr = device_get_softc(dev);
+ int error;
+
+ error = ufshci_detach(dev);
+ if (ctrlr->msi_count > 0)
+ pci_release_msi(dev);
+ pci_disable_busmaster(dev);
+ return (error);
+}
+
+static int
+ufshci_pci_setup_shared(struct ufshci_controller *ctrlr, int rid)
+{
+ int error;
+
+ ctrlr->num_io_queues = 1;
+ ctrlr->rid = rid;
+ ctrlr->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ,
+ &ctrlr->rid, RF_SHAREABLE | RF_ACTIVE);
+ if (ctrlr->res == NULL) {
+ ufshci_printf(ctrlr, "unable to allocate shared interrupt\n");
+ return (ENOMEM);
+ }
+
+ error = bus_setup_intr(ctrlr->dev, ctrlr->res,
+ INTR_TYPE_MISC | INTR_MPSAFE, NULL, ufshci_ctrlr_shared_handler,
+ ctrlr, &ctrlr->tag);
+ if (error) {
+ ufshci_printf(ctrlr, "unable to setup shared interrupt\n");
+ return (error);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_pci_setup_interrupts(struct ufshci_controller *ctrlr)
+{
+ device_t dev = ctrlr->dev;
+ int force_intx = 0;
+ int num_io_queues, per_cpu_io_queues, min_cpus_per_ioq;
+ int num_vectors_requested;
+
+ TUNABLE_INT_FETCH("hw.ufshci.force_intx", &force_intx);
+ if (force_intx)
+ goto intx;
+
+ if (pci_msix_count(dev) == 0)
+ goto msi;
+
+ /*
+ * Try to allocate one MSI-X per core for I/O queues, plus one
+ * for admin queue, but accept single shared MSI-X if have to.
+ * Fall back to MSI if can't get any MSI-X.
+ */
+
+ /*
+ * TODO: Need to implement MCQ(Multi Circular Queue)
+ * Example: num_io_queues = mp_ncpus;
+ */
+ num_io_queues = 1;
+
+ TUNABLE_INT_FETCH("hw.ufshci.num_io_queues", &num_io_queues);
+ if (num_io_queues < 1 || num_io_queues > mp_ncpus)
+ num_io_queues = mp_ncpus;
+
+ per_cpu_io_queues = 1;
+ TUNABLE_INT_FETCH("hw.ufshci.per_cpu_io_queues", &per_cpu_io_queues);
+ if (per_cpu_io_queues == 0)
+ num_io_queues = 1;
+
+ min_cpus_per_ioq = smp_threads_per_core;
+ TUNABLE_INT_FETCH("hw.ufshci.min_cpus_per_ioq", &min_cpus_per_ioq);
+ if (min_cpus_per_ioq > 1) {
+ num_io_queues = min(num_io_queues,
+ max(1, mp_ncpus / min_cpus_per_ioq));
+ }
+
+ num_io_queues = min(num_io_queues, max(1, pci_msix_count(dev) - 1));
+
+again:
+ if (num_io_queues > vm_ndomains)
+ num_io_queues -= num_io_queues % vm_ndomains;
+ num_vectors_requested = min(num_io_queues + 1, pci_msix_count(dev));
+ ctrlr->msi_count = num_vectors_requested;
+ if (pci_alloc_msix(dev, &ctrlr->msi_count) != 0) {
+ ufshci_printf(ctrlr, "unable to allocate MSI-X\n");
+ ctrlr->msi_count = 0;
+ goto msi;
+ }
+ if (ctrlr->msi_count == 1)
+ return (ufshci_pci_setup_shared(ctrlr, 1));
+ if (ctrlr->msi_count != num_vectors_requested) {
+ pci_release_msi(dev);
+ num_io_queues = ctrlr->msi_count - 1;
+ goto again;
+ }
+
+ ctrlr->num_io_queues = num_io_queues;
+ return (0);
+
+msi:
+ /*
+ * Try to allocate 2 MSIs (admin and I/O queues), but accept single
+ * shared if have to. Fall back to INTx if can't get any MSI.
+ */
+ ctrlr->msi_count = min(pci_msi_count(dev), 2);
+ if (ctrlr->msi_count > 0) {
+ if (pci_alloc_msi(dev, &ctrlr->msi_count) != 0) {
+ ufshci_printf(ctrlr, "unable to allocate MSI\n");
+ ctrlr->msi_count = 0;
+ } else if (ctrlr->msi_count == 2) {
+ ctrlr->num_io_queues = 1;
+ return (0);
+ }
+ }
+
+intx:
+ return (ufshci_pci_setup_shared(ctrlr, ctrlr->msi_count > 0 ? 1 : 0));
+}
diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h
new file mode 100644
index 000000000000..ec388c06e248
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_private.h
@@ -0,0 +1,570 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#ifndef __UFSHCI_PRIVATE_H__
+#define __UFSHCI_PRIVATE_H__
+
+#ifdef _KERNEL
+#include <sys/types.h>
+#else /* !_KERNEL */
+#include <stdbool.h>
+#include <stdint.h>
+#endif /* _KERNEL */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bio.h>
+#include <sys/bus.h>
+#include <sys/counter.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/memdesc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/rman.h>
+#include <sys/taskqueue.h>
+
+#include <machine/bus.h>
+
+#include "ufshci.h"
+
+MALLOC_DECLARE(M_UFSHCI);
+
+#define UFSHCI_DEVICE_INIT_TIMEOUT_MS (2000) /* in milliseconds */
+#define UFSHCI_UIC_CMD_TIMEOUT_MS (500) /* in milliseconds */
+#define UFSHCI_DEFAULT_TIMEOUT_PERIOD (10) /* in seconds */
+#define UFSHCI_MIN_TIMEOUT_PERIOD (5) /* in seconds */
+#define UFSHCI_MAX_TIMEOUT_PERIOD (120) /* in seconds */
+
+#define UFSHCI_DEFAULT_RETRY_COUNT (4)
+
+#define UFSHCI_UTR_ENTRIES (32)
+#define UFSHCI_UTRM_ENTRIES (8)
+
+#define UFSHCI_SECTOR_SIZE (512)
+
+struct ufshci_controller;
+
+struct ufshci_completion_poll_status {
+ struct ufshci_completion cpl;
+ int done;
+ bool error;
+};
+
+struct ufshci_request {
+ struct ufshci_upiu request_upiu;
+ size_t request_size;
+ size_t response_size;
+
+ struct memdesc payload;
+ enum ufshci_data_direction data_direction;
+ ufshci_cb_fn_t cb_fn;
+ void *cb_arg;
+ bool is_admin;
+ int32_t retries;
+ bool payload_valid;
+ bool spare[2]; /* Future use */
+ STAILQ_ENTRY(ufshci_request) stailq;
+};
+
+enum ufshci_slot_state {
+ UFSHCI_SLOT_STATE_FREE = 0x0,
+ UFSHCI_SLOT_STATE_RESERVED = 0x1,
+ UFSHCI_SLOT_STATE_SCHEDULED = 0x2,
+ UFSHCI_SLOT_STATE_TIMEOUT = 0x3,
+ UFSHCI_SLOT_STATE_NEED_ERROR_HANDLING = 0x4,
+};
+
+struct ufshci_tracker {
+ TAILQ_ENTRY(ufshci_tracker) tailq;
+ struct ufshci_request *req;
+ struct ufshci_req_queue *req_queue;
+ struct ufshci_hw_queue *hwq;
+ uint8_t slot_num;
+ enum ufshci_slot_state slot_state;
+ size_t response_size;
+ sbintime_t deadline;
+
+ bus_dmamap_t payload_dma_map;
+ uint64_t payload_addr;
+
+ struct ufshci_utp_cmd_desc *ucd;
+ bus_addr_t ucd_bus_addr;
+
+ uint16_t prdt_off;
+ uint16_t prdt_entry_cnt;
+};
+
+enum ufshci_queue_mode {
+ UFSHCI_Q_MODE_SDB = 0x00, /* Single Doorbell Mode*/
+ UFSHCI_Q_MODE_MCQ = 0x01, /* Multi-Circular Queue Mode*/
+};
+
+/*
+ * UFS uses slot-based Single Doorbell (SDB) mode for request submission by
+ * default and additionally supports Multi-Circular Queue (MCQ) in UFS 4.0. To
+ * minimize duplicated code between SDB and MCQ, mode dependent operations are
+ * extracted into ufshci_qops.
+ */
+struct ufshci_qops {
+ int (*construct)(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue, uint32_t num_entries,
+ bool is_task_mgmt);
+ void (*destroy)(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
+ struct ufshci_hw_queue *(*get_hw_queue)(
+ struct ufshci_req_queue *req_queue);
+ int (*enable)(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
+ void (*disable)(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
+ int (*reserve_slot)(struct ufshci_req_queue *req_queue,
+ struct ufshci_tracker **tr);
+ int (*reserve_admin_slot)(struct ufshci_req_queue *req_queue,
+ struct ufshci_tracker **tr);
+ void (*ring_doorbell)(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+ bool (*is_doorbell_cleared)(struct ufshci_controller *ctrlr,
+ uint8_t slot);
+ void (*clear_cpl_ntf)(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+ bool (*process_cpl)(struct ufshci_req_queue *req_queue);
+ int (*get_inflight_io)(struct ufshci_controller *ctrlr);
+};
+
+#define UFSHCI_SDB_Q 0 /* Queue number for a single doorbell queue */
+
+enum ufshci_recovery {
+ RECOVERY_NONE = 0, /* Normal operations */
+ RECOVERY_WAITING, /* waiting for the reset to complete */
+};
+
+/*
+ * Generic queue container used by both SDB (fixed 32-slot bitmap) and MCQ
+ * (ring buffer) modes. Fields are shared; some such as sq_head, sq_tail and
+ * cq_head are not used in SDB but used in MCQ.
+ */
+struct ufshci_hw_queue {
+ struct ufshci_controller *ctrlr;
+ struct ufshci_req_queue *req_queue;
+ uint32_t id;
+ int domain;
+ int cpu;
+
+ struct callout timer; /* recovery lock */
+ bool timer_armed; /* recovery lock */
+ enum ufshci_recovery recovery_state; /* recovery lock */
+
+ union {
+ struct ufshci_utp_xfer_req_desc *utrd;
+ struct ufshci_utp_task_mgmt_req_desc *utmrd;
+ };
+
+ bus_dma_tag_t dma_tag_queue;
+ bus_dmamap_t queuemem_map;
+ bus_addr_t req_queue_addr;
+
+ bus_addr_t *ucd_bus_addr;
+
+ uint32_t num_entries;
+ uint32_t num_trackers;
+
+ TAILQ_HEAD(, ufshci_tracker) free_tr;
+ TAILQ_HEAD(, ufshci_tracker) outstanding_tr;
+
+ /*
+ * A Request List using the single doorbell method uses a dedicated
+ * ufshci_tracker, one per slot.
+ */
+ struct ufshci_tracker **act_tr;
+
+ uint32_t sq_head; /* MCQ mode */
+ uint32_t sq_tail; /* MCQ mode */
+ uint32_t cq_head; /* MCQ mode */
+
+ uint32_t phase;
+ int64_t num_cmds;
+ int64_t num_intr_handler_calls;
+ int64_t num_retries;
+ int64_t num_failures;
+
+ /*
+ * Each lock may be acquired independently.
+ * When both are required, acquire them in this order to avoid
+ * deadlocks. (recovery_lock -> qlock)
+ */
+ struct mtx_padalign qlock;
+ struct mtx_padalign recovery_lock;
+};
+
+struct ufshci_req_queue {
+ struct ufshci_controller *ctrlr;
+ int domain;
+
+ /*
+ * queue_mode: active transfer scheme
+ * UFSHCI_Q_MODE_SDB – legacy single‑doorbell list
+ * UFSHCI_Q_MODE_MCQ – modern multi‑circular queue (UFSHCI 4.0+)
+ */
+ enum ufshci_queue_mode queue_mode;
+
+ uint8_t num_q;
+ struct ufshci_hw_queue *hwq;
+
+ struct ufshci_qops qops;
+
+ bool is_task_mgmt;
+ uint32_t num_entries;
+ uint32_t num_trackers;
+
+ /* Shared DMA resource */
+ struct ufshci_utp_cmd_desc *ucd;
+
+ bus_dma_tag_t dma_tag_ucd;
+ bus_dma_tag_t dma_tag_payload;
+
+ bus_dmamap_t ucdmem_map;
+};
+
+struct ufshci_device {
+ uint32_t max_lun_count;
+
+ struct ufshci_device_descriptor dev_desc;
+ struct ufshci_geometry_descriptor geo_desc;
+
+ uint32_t unipro_version;
+
+ /* WriteBooster */
+ bool is_wb_enabled;
+ bool is_wb_flush_enabled;
+ uint32_t wb_buffer_type;
+ uint32_t wb_buffer_size_mb;
+ uint32_t wb_user_space_config_option;
+ uint8_t wb_dedicated_lu;
+ uint32_t write_booster_flush_threshold;
+};
+
+/*
+ * One of these per allocated device.
+ */
+struct ufshci_controller {
+ device_t dev;
+
+ uint32_t quirks;
+#define UFSHCI_QUIRK_IGNORE_UIC_POWER_MODE \
+ 1 /* QEMU does not support UIC POWER MODE */
+#define UFSHCI_QUIRK_LONG_PEER_PA_TACTIVATE \
+ 2 /* Need an additional 200 ms of PA_TActivate */
+#define UFSHCI_QUIRK_WAIT_AFTER_POWER_MODE_CHANGE \
+ 4 /* Need to wait 1250us after power mode change */
+#define UFSHCI_QUIRK_CHANGE_LANE_AND_GEAR_SEPARATELY \
+ 8 /* Need to change the number of lanes before changing HS-GEAR. */
+#define UFSHCI_QUIRK_NOT_SUPPORT_ABORT_TASK \
+ 16 /* QEMU does not support Task Management Request */
+
+ uint32_t ref_clk;
+
+ struct cam_sim *ufshci_sim;
+ struct cam_path *ufshci_path;
+
+ struct mtx sc_mtx;
+ uint32_t sc_unit;
+ uint8_t sc_name[16];
+
+ struct ufshci_device ufs_dev;
+
+ bus_space_tag_t bus_tag;
+ bus_space_handle_t bus_handle;
+ int resource_id;
+ struct resource *resource;
+
+ /* Currently, there is no UFSHCI that supports MSI, MSI-X. */
+ int msi_count;
+
+ /* Fields for tracking progress during controller initialization. */
+ struct intr_config_hook config_hook;
+
+ struct task reset_task;
+ struct taskqueue *taskqueue;
+
+ /* For shared legacy interrupt. */
+ int rid;
+ struct resource *res;
+ void *tag;
+
+ uint32_t major_version;
+ uint32_t minor_version;
+
+ uint32_t enable_aborts;
+
+ uint32_t num_io_queues;
+ uint32_t max_hw_pend_io;
+
+ /* Maximum logical unit number */
+ uint32_t max_lun_count;
+
+ /* Maximum i/o size in bytes */
+ uint32_t max_xfer_size;
+
+ /* Controller capacity */
+ uint32_t cap;
+
+ /* Page size and log2(page_size) - 12 that we're currently using */
+ uint32_t page_size;
+
+ /* Timeout value on device initialization */
+ uint32_t device_init_timeout_in_ms;
+
+ /* Timeout value on UIC command */
+ uint32_t uic_cmd_timeout_in_ms;
+
+ /* UTMR/UTR queue timeout period in seconds */
+ uint32_t timeout_period;
+
+ /* UTMR/UTR queue retry count */
+ uint32_t retry_count;
+
+ /* UFS Host Controller Interface Registers */
+ struct ufshci_registers *regs;
+
+ /* UFS Transport Protocol Layer (UTP) */
+ struct ufshci_req_queue task_mgmt_req_queue;
+ struct ufshci_req_queue transfer_req_queue;
+ bool is_single_db_supported; /* 0 = supported */
+ bool is_mcq_supported; /* 1 = supported */
+
+ /* UFS Interconnect Layer (UIC) */
+ struct mtx uic_cmd_lock;
+ uint32_t unipro_version;
+ uint8_t hs_gear;
+ uint32_t tx_lanes;
+ uint32_t rx_lanes;
+ uint32_t max_rx_hs_gear;
+ uint32_t max_tx_lanes;
+ uint32_t max_rx_lanes;
+
+ bool is_failed;
+};
+
+#define ufshci_mmio_offsetof(reg) offsetof(struct ufshci_registers, reg)
+
+#define ufshci_mmio_read_4(sc, reg) \
+ bus_space_read_4((sc)->bus_tag, (sc)->bus_handle, \
+ ufshci_mmio_offsetof(reg))
+
+#define ufshci_mmio_write_4(sc, reg, val) \
+ bus_space_write_4((sc)->bus_tag, (sc)->bus_handle, \
+ ufshci_mmio_offsetof(reg), val)
+
+#define ufshci_printf(ctrlr, fmt, args...) \
+ device_printf(ctrlr->dev, fmt, ##args)
+
+/* UFSHCI */
+void ufshci_completion_poll_cb(void *arg, const struct ufshci_completion *cpl,
+ bool error);
+
+/* SIM */
+int ufshci_sim_attach(struct ufshci_controller *ctrlr);
+void ufshci_sim_detach(struct ufshci_controller *ctrlr);
+
+/* Controller */
+int ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev);
+void ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev);
+void ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
+/* ctrlr defined as void * to allow use with config_intrhook. */
+void ufshci_ctrlr_start_config_hook(void *arg);
+void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
+
+int ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req);
+int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req);
+int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req);
+int ufshci_ctrlr_send_nop(struct ufshci_controller *ctrlr);
+
+void ufshci_reg_dump(struct ufshci_controller *ctrlr);
+
+/* Device */
+int ufshci_dev_init(struct ufshci_controller *ctrlr);
+int ufshci_dev_reset(struct ufshci_controller *ctrlr);
+int ufshci_dev_init_reference_clock(struct ufshci_controller *ctrlr);
+int ufshci_dev_init_unipro(struct ufshci_controller *ctrlr);
+int ufshci_dev_init_uic_power_mode(struct ufshci_controller *ctrlr);
+int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr);
+int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr);
+int ufshci_dev_config_write_booster(struct ufshci_controller *ctrlr);
+
+/* Controller Command */
+void ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun,
+ uint8_t task_tag, uint8_t iid);
+void ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg);
+void ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, struct ufshci_query_param param);
+void ufshci_ctrlr_cmd_send_scsi_command(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t *cmd_ptr, uint8_t cmd_len,
+ uint32_t data_len, uint8_t lun, bool is_write);
+
+/* Request Queue */
+bool ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue);
+int ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr);
+int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr);
+void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr);
+void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr);
+void ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr);
+int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr);
+void ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr);
+int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr);
+void ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
+ struct ufshci_hw_queue *hwq);
+int ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
+ struct ufshci_request *req, bool is_admin);
+void ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr);
+
+/* Request Single Doorbell Queue */
+int ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue, uint32_t num_entries,
+ bool is_task_mgmt);
+void ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
+struct ufshci_hw_queue *ufshci_req_sdb_get_hw_queue(
+ struct ufshci_req_queue *req_queue);
+void ufshci_req_sdb_disable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
+int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue);
+int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
+ struct ufshci_tracker **tr);
+void ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+void ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+bool ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot);
+bool ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot);
+void ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+void ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+bool ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue);
+int ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr);
+
+/* UIC Command */
+int ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr);
+int ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr);
+int ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr);
+int ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute,
+ uint32_t *return_value);
+int ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute,
+ uint32_t value);
+int ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr,
+ uint16_t attribute, uint32_t *return_value);
+int ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr,
+ uint16_t attribute, uint32_t value);
+int ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr);
+
+/* SYSCTL */
+void ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr);
+
+int ufshci_attach(device_t dev);
+int ufshci_detach(device_t dev);
+
+/*
+ * Wait for a command to complete using the ufshci_completion_poll_cb. Used in
+ * limited contexts where the caller knows it's OK to block briefly while the
+ * command runs. The ISR will run the callback which will set status->done to
+ * true, usually within microseconds. If not, then after one second timeout
+ * handler should reset the controller and abort all outstanding requests
+ * including this polled one. If still not after ten seconds, then something is
+ * wrong with the driver, and panic is the only way to recover.
+ *
+ * Most commands using this interface aren't actual I/O to the drive's media so
+ * complete within a few microseconds. Adaptively spin for one tick to catch the
+ * vast majority of these without waiting for a tick plus scheduling delays.
+ * Since these are on startup, this drastically reduces startup time.
+ */
+static __inline void
+ufshci_completion_poll(struct ufshci_completion_poll_status *status)
+{
+ int timeout = ticks + 10 * hz;
+ sbintime_t delta_t = SBT_1US;
+
+ while (!atomic_load_acq_int(&status->done)) {
+ if (timeout - ticks < 0)
+ panic(
+ "UFSHCI polled command failed to complete within 10s.");
+ pause_sbt("ufshci_cpl", delta_t, 0, C_PREL(1));
+ delta_t = min(SBT_1MS, delta_t * 3 / 2);
+ }
+}
+
+static __inline void
+ufshci_single_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
+{
+ uint64_t *bus_addr = (uint64_t *)arg;
+
+ KASSERT(nseg == 1, ("number of segments (%d) is not 1", nseg));
+ if (error != 0)
+ printf("ufshci_single_map err %d\n", error);
+ *bus_addr = seg[0].ds_addr;
+}
+
+static __inline struct ufshci_request *
+_ufshci_allocate_request(const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct ufshci_request *req;
+
+ KASSERT(how == M_WAITOK || how == M_NOWAIT,
+ ("ufshci_allocate_request: invalid how %d", how));
+
+ req = malloc(sizeof(*req), M_UFSHCI, how | M_ZERO);
+ if (req != NULL) {
+ req->cb_fn = cb_fn;
+ req->cb_arg = cb_arg;
+ }
+ return (req);
+}
+
+static __inline struct ufshci_request *
+ufshci_allocate_request_vaddr(void *payload, uint32_t payload_size,
+ const int how, ufshci_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct ufshci_request *req;
+
+ req = _ufshci_allocate_request(how, cb_fn, cb_arg);
+ if (req != NULL) {
+ if (payload_size) {
+ req->payload = memdesc_vaddr(payload, payload_size);
+ req->payload_valid = true;
+ }
+ }
+ return (req);
+}
+
+static __inline struct ufshci_request *
+ufshci_allocate_request_bio(struct bio *bio, const int how,
+ ufshci_cb_fn_t cb_fn, void *cb_arg)
+{
+ struct ufshci_request *req;
+
+ req = _ufshci_allocate_request(how, cb_fn, cb_arg);
+ if (req != NULL) {
+ req->payload = memdesc_bio(bio);
+ req->payload_valid = true;
+ }
+ return (req);
+}
+
+#define ufshci_free_request(req) free(req, M_UFSHCI)
+
+void ufshci_ctrlr_shared_handler(void *arg);
+
+#endif /* __UFSHCI_PRIVATE_H__ */
diff --git a/sys/dev/ufshci/ufshci_reg.h b/sys/dev/ufshci/ufshci_reg.h
new file mode 100644
index 000000000000..6d5768505102
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_reg.h
@@ -0,0 +1,469 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+#ifndef __UFSHCI_REG_H__
+#define __UFSHCI_REG_H__
+
+#include <sys/param.h>
+#include <sys/endian.h>
+
+/* UFSHCI 4.1, section 5.1 Register Map */
+struct ufshci_registers {
+ /* Host Capabilities (00h) */
+ uint32_t cap; /* Host Controller Capabiities */
+ uint32_t mcqcap; /* Multi-Circular Queue Capability Register */
+ uint32_t ver; /* UFS Version */
+ uint32_t ext_cap; /* Extended Controller Capabilities */
+ uint32_t hcpid; /* Product ID */
+ uint32_t hcmid; /* Manufacturer ID */
+ uint32_t ahit; /* Auto-Hibernate Idle Timer */
+ uint32_t reserved1;
+ /* Operation and Runtime (20h) */
+ uint32_t is; /* Interrupt Status */
+ uint32_t ie; /* Interrupt Enable */
+ uint32_t reserved2;
+ uint32_t hcsext; /* Host Controller Status Extended */
+ uint32_t hcs; /* Host Controller Status */
+ uint32_t hce; /* Host Controller Enable */
+ uint32_t uecpa; /* Host UIC Error Code PHY Adapter Layer */
+ uint32_t uecdl; /* Host UIC Error Code Data Link Layer */
+ uint32_t uecn; /* Host UIC Error Code Network Layer */
+ uint32_t uect; /* Host UIC Error Code Transport Layer */
+ uint32_t uecdme; /* Host UIC Error Code DME */
+ uint32_t utriacr; /* Interrupt Aggregation Control */
+ /* UTP Transfer (50h) */
+ uint32_t utrlba; /* UTRL Base Address */
+ uint32_t utrlbau; /* UTRL Base Address Upper 32-Bits */
+ uint32_t utrldbr; /* UTRL DoorBell Register */
+ uint32_t utrlclr; /* UTRL CLear Register */
+ uint32_t utrlrsr; /* UTR Run-Stop Register */
+ uint32_t utrlcnr; /* UTRL Completion Notification */
+ uint64_t reserved3;
+ /* UTP Task Managemeng (70h) */
+ uint32_t utmrlba; /* UTRL Base Address */
+ uint32_t utmrlbau; /* UTMRL Base Address Upper 32-Bits */
+ uint32_t utmrldbr; /* UTMRL DoorBell Register */
+ uint32_t utmrlclr; /* UTMRL CLear Register */
+ uint32_t utmrlrsr; /* UTM Run-Stop Register */
+ uint8_t reserved4[12];
+ /* UIC Command (90h) */
+ uint32_t uiccmd; /* UIC Command Register */
+ uint32_t ucmdarg1; /* UIC Command Argument 1 */
+ uint32_t ucmdarg2; /* UIC Command Argument 2 */
+ uint32_t ucmdarg3; /* UIC Command Argument 3 */
+ uint8_t reserved5[16];
+ /* UMA (B0h) */
+ uint8_t reserved6[16]; /* Reserved for Unified Memory Extension */
+ /* Vendor Specific (C0h) */
+ uint8_t vendor[64]; /* Vendor Specific Registers */
+ /* Crypto (100h) */
+ uint32_t ccap; /* Crypto Capability */
+ uint32_t reserved7[511];
+ /* Config (300h) */
+ uint32_t config; /* Global Configuration */
+ uint8_t reserved9[124];
+ /* MCQ Configuration (380h) */
+ uint32_t mcqconfig; /* MCQ Config Register */
+ /* Event Specific Interrupt Lower Base Address */
+ uint32_t esilba;
+ /* Event Specific Interrupt Upper Base Address */
+ uint32_t esiuba;
+ /* TODO: Need to define SQ/CQ registers */
+};
+
+/* Register field definitions */
+#define UFSHCI__REG__SHIFT (0)
+#define UFSHCI__REG__MASK (0)
+
+/*
+ * UFSHCI 4.1, section 5.2.1, Offset 00h: CAP
+ * Controller Capabilities
+ */
+#define UFSHCI_CAP_REG_NUTRS_SHIFT (0)
+#define UFSHCI_CAP_REG_NUTRS_MASK (0xFF)
+#define UFSHCI_CAP_REG_NORTT_SHIFT (8)
+#define UFSHCI_CAP_REG_NORTT_MASK (0xFF)
+#define UFSHCI_CAP_REG_NUTMRS_SHIFT (16)
+#define UFSHCI_CAP_REG_NUTMRS_MASK (0x7)
+#define UFSHCI_CAP_REG_EHSLUTRDS_SHIFT (22)
+#define UFSHCI_CAP_REG_EHSLUTRDS_MASK (0x1)
+#define UFSHCI_CAP_REG_AUTOH8_SHIFT (23)
+#define UFSHCI_CAP_REG_AUTOH8_MASK (0x1)
+#define UFSHCI_CAP_REG_64AS_SHIFT (24)
+#define UFSHCI_CAP_REG_64AS_MASK (0x1)
+#define UFSHCI_CAP_REG_OODDS_SHIFT (25)
+#define UFSHCI_CAP_REG_OODDS_MASK (0x1)
+#define UFSHCI_CAP_REG_UICDMETMS_SHIFT (26)
+#define UFSHCI_CAP_REG_UICDMETMS_MASK (0x1)
+#define UFSHCI_CAP_REG_CS_SHIFT (28)
+#define UFSHCI_CAP_REG_CS_MASK (0x1)
+#define UFSHCI_CAP_REG_LSDBS_SHIFT (29)
+#define UFSHCI_CAP_REG_LSDBS_MASK (0x1)
+#define UFSHCI_CAP_REG_MCQS_SHIFT (30)
+#define UFSHCI_CAP_REG_MCQS_MASK (0x1)
+#define UFSHCI_CAP_REG_EIS_SHIFT (31)
+#define UFSHCI_CAP_REG_EIS_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.2.2, Offset 04h: MCQCAP
+ * Multi-Circular Queue Capability Register
+ */
+#define UFSHCI_MCQCAP_REG_MAXQ_SHIFT (0)
+#define UFSHCI_MCQCAP_REG_MAXQ_MASK (0xFF)
+#define UFSHCI_MCQCAP_REG_SP_SHIFT (8)
+#define UFSHCI_MCQCAP_REG_SP_MASK (0x1)
+#define UFSHCI_MCQCAP_REG_RRP_SHIFT (9)
+#define UFSHCI_MCQCAP_REG_RRP_MASK (0x1)
+#define UFSHCI_MCQCAP_REG_EIS_SHIFT (10)
+#define UFSHCI_MCQCAP_REG_EIS_MASK (0x1)
+#define UFSHCI_MCQCAP_REG_QCFGPTR_SHIFT (16)
+#define UFSHCI_MCQCAP_REG_QCFGPTR_MASK (0xFF)
+#define UFSHCI_MCQCAP_REG_MIAG_SHIFT (24)
+#define UFSHCI_MCQCAP_REG_MIAG_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.2.3, Offset 08h: VER
+ * UFS Version
+ */
+#define UFSHCI_VER_REG_VS_SHIFT (0)
+#define UFSHCI_VER_REG_VS_MASK (0xF)
+#define UFSHCI_VER_REG_MNR_SHIFT (4)
+#define UFSHCI_VER_REG_MNR_MASK (0xF)
+#define UFSHCI_VER_REG_MJR_SHIFT (8)
+#define UFSHCI_VER_REG_MJR_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.2.4, Offset 0Ch: EXT_CAP
+ * Extended Controller Capabilities
+ */
+#define UFSHCI_EXTCAP_REG_HOST_HINT_CACAHE_SIZE_SHIFT (0)
+#define UFSHCI_EXTCAP_REG_HOST_HINT_CACAHE_SIZE_MASK (0xFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.2.5, Offset 10h: HCPID
+ * Host Controller Identification Descriptor – Product ID
+ */
+#define UFSHCI_HCPID_REG_PID_SHIFT (0)
+#define UFSHCI_HCPID_REG_PID_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.2.6, Offset 14h: HCMID
+ * Host Controller Identification Descriptor – Manufacturer ID
+ */
+#define UFSHCI_HCMID_REG_MIC_SHIFT (0)
+#define UFSHCI_HCMID_REG_MIC_MASK (0xFFFF)
+#define UFSHCI_HCMID_REG_BI_SHIFT (8)
+#define UFSHCI_HCMID_REG_BI_MASK (0xFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.2.7, Offset 18h: AHIT
+ * Auto-Hibernate Idle Timer
+ */
+#define UFSHCI_AHIT_REG_AH8ITV_SHIFT (0)
+#define UFSHCI_AHIT_REG_AH8ITV_MASK (0x3FF)
+#define UFSHCI_AHIT_REG_TS_SHIFT (10)
+#define UFSHCI_AHIT_REG_TS_MASK (0x7)
+
+/*
+ * UFSHCI 4.1, section 5.3.1, Offset 20h: IS
+ * Interrupt Status
+ */
+#define UFSHCI_IS_REG_UTRCS_SHIFT (0)
+#define UFSHCI_IS_REG_UTRCS_MASK (0x1)
+#define UFSHCI_IS_REG_UDEPRI_SHIFT (1)
+#define UFSHCI_IS_REG_UDEPRI_MASK (0x1)
+#define UFSHCI_IS_REG_UE_SHIFT (2)
+#define UFSHCI_IS_REG_UE_MASK (0x1)
+#define UFSHCI_IS_REG_UTMS_SHIFT (3)
+#define UFSHCI_IS_REG_UTMS_MASK (0x1)
+#define UFSHCI_IS_REG_UPMS_SHIFT (4)
+#define UFSHCI_IS_REG_UPMS_MASK (0x1)
+#define UFSHCI_IS_REG_UHXS_SHIFT (5)
+#define UFSHCI_IS_REG_UHXS_MASK (0x1)
+#define UFSHCI_IS_REG_UHES_SHIFT (6)
+#define UFSHCI_IS_REG_UHES_MASK (0x1)
+#define UFSHCI_IS_REG_ULLS_SHIFT (7)
+#define UFSHCI_IS_REG_ULLS_MASK (0x1)
+#define UFSHCI_IS_REG_ULSS_SHIFT (8)
+#define UFSHCI_IS_REG_ULSS_MASK (0x1)
+#define UFSHCI_IS_REG_UTMRCS_SHIFT (9)
+#define UFSHCI_IS_REG_UTMRCS_MASK (0x1)
+#define UFSHCI_IS_REG_UCCS_SHIFT (10)
+#define UFSHCI_IS_REG_UCCS_MASK (0x1)
+#define UFSHCI_IS_REG_DFES_SHIFT (11)
+#define UFSHCI_IS_REG_DFES_MASK (0x1)
+#define UFSHCI_IS_REG_UTPES_SHIFT (12)
+#define UFSHCI_IS_REG_UTPES_MASK (0x1)
+#define UFSHCI_IS_REG_HCFES_SHIFT (16)
+#define UFSHCI_IS_REG_HCFES_MASK (0x1)
+#define UFSHCI_IS_REG_SBFES_SHIFT (17)
+#define UFSHCI_IS_REG_SBFES_MASK (0x1)
+#define UFSHCI_IS_REG_CEFES_SHIFT (18)
+#define UFSHCI_IS_REG_CEFES_MASK (0x1)
+#define UFSHCI_IS_REG_SQES_SHIFT (19)
+#define UFSHCI_IS_REG_SQES_MASK (0x1)
+#define UFSHCI_IS_REG_CQES_SHIFT (20)
+#define UFSHCI_IS_REG_CQES_MASK (0x1)
+#define UFSHCI_IS_REG_IAGES_SHIFT (21)
+#define UFSHCI_IS_REG_IAGES_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.3.2, Offset 24h: IE
+ * Interrupt Enable
+ */
+#define UFSHCI_IE_REG_UTRCE_SHIFT (0)
+#define UFSHCI_IE_REG_UTRCE_MASK (0x1)
+#define UFSHCI_IE_REG_UDEPRIE_SHIFT (1)
+#define UFSHCI_IE_REG_UDEPRIE_MASK (0x1)
+#define UFSHCI_IE_REG_UEE_SHIFT (2)
+#define UFSHCI_IE_REG_UEE_MASK (0x1)
+#define UFSHCI_IE_REG_UTMSE_SHIFT (3)
+#define UFSHCI_IE_REG_UTMSE_MASK (0x1)
+#define UFSHCI_IE_REG_UPMSE_SHIFT (4)
+#define UFSHCI_IE_REG_UPMSE_MASK (0x1)
+#define UFSHCI_IE_REG_UHXSE_SHIFT (5)
+#define UFSHCI_IE_REG_UHXSE_MASK (0x1)
+#define UFSHCI_IE_REG_UHESE_SHIFT (6)
+#define UFSHCI_IE_REG_UHESE_MASK (0x1)
+#define UFSHCI_IE_REG_ULLSE_SHIFT (7)
+#define UFSHCI_IE_REG_ULLSE_MASK (0x1)
+#define UFSHCI_IE_REG_ULSSE_SHIFT (8)
+#define UFSHCI_IE_REG_ULSSE_MASK (0x1)
+#define UFSHCI_IE_REG_UTMRCE_SHIFT (9)
+#define UFSHCI_IE_REG_UTMRCE_MASK (0x1)
+#define UFSHCI_IE_REG_UCCE_SHIFT (10)
+#define UFSHCI_IE_REG_UCCE_MASK (0x1)
+#define UFSHCI_IE_REG_DFEE_SHIFT (11)
+#define UFSHCI_IE_REG_DFEE_MASK (0x1)
+#define UFSHCI_IE_REG_UTPEE_SHIFT (12)
+#define UFSHCI_IE_REG_UTPEE_MASK (0x1)
+#define UFSHCI_IE_REG_HCFEE_SHIFT (16)
+#define UFSHCI_IE_REG_HCFEE_MASK (0x1)
+#define UFSHCI_IE_REG_SBFEE_SHIFT (17)
+#define UFSHCI_IE_REG_SBFEE_MASK (0x1)
+#define UFSHCI_IE_REG_CEFEE_SHIFT (18)
+#define UFSHCI_IE_REG_CEFEE_MASK (0x1)
+#define UFSHCI_IE_REG_SQEE_SHIFT (19)
+#define UFSHCI_IE_REG_SQEE_MASK (0x1)
+#define UFSHCI_IE_REG_CQEE_SHIFT (20)
+#define UFSHCI_IE_REG_CQEE_MASK (0x1)
+#define UFSHCI_IE_REG_IAGEE_SHIFT (21)
+#define UFSHCI_IE_REG_IAGEE_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.3.3, Offset 2Ch: HCSEXT
+ * Host Controller Status Extended
+ */
+#define UFSHCI_HCSEXT_IIDUTPE_SHIFT (0)
+#define UFSHCI_HCSEXT_IIDUTPE_MASK (0xF)
+#define UFSHCI_HCSEXT_EXT_IIDUTPE_SHIFT (4)
+#define UFSHCI_HCSEXT_EXT_IIDUTPE_MASK (0xF)
+
+/*
+ * UFSHCI 4.1, section 5.3.4, Offset 30h: HCS
+ * Host Controller Status
+ */
+#define UFSHCI_HCS_REG_DP_SHIFT (0)
+#define UFSHCI_HCS_REG_DP_MASK (0x1)
+#define UFSHCI_HCS_REG_UTRLRDY_SHIFT (1)
+#define UFSHCI_HCS_REG_UTRLRDY_MASK (0x1)
+#define UFSHCI_HCS_REG_UTMRLRDY_SHIFT (2)
+#define UFSHCI_HCS_REG_UTMRLRDY_MASK (0x1)
+#define UFSHCI_HCS_REG_UCRDY_SHIFT (3)
+#define UFSHCI_HCS_REG_UCRDY_MASK (0x1)
+#define UFSHCI_HCS_REG_UPMCRS_SHIFT (8)
+#define UFSHCI_HCS_REG_UPMCRS_MASK (0x7)
+#define UFSHCI_HCS_REG_UTPEC_SHIFT (12)
+#define UFSHCI_HCS_REG_UTPEC_MASK (0xF)
+#define UFSHCI_HCS_REG_TTAGUTPE_SHIFT (16)
+#define UFSHCI_HCS_REG_TTAGUTPE_MASK (0xFF)
+#define UFSHCI_HCS_REG_TLUNUTPE_SHIFT (24)
+#define UFSHCI_HCS_REG_TLUNUTPE_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.3.5, Offset 34h: HCE
+ * Host Controller Enable
+ */
+#define UFSHCI_HCE_REG_HCE_SHIFT (0)
+#define UFSHCI_HCE_REG_HCE_MASK (0x1)
+#define UFSHCI_HCE_REG_CGE_SHIFT (1)
+#define UFSHCI_HCE_REG_CGE_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.3.6, Offset 38h: UECPA
+ * Host UIC Error Code PHY Adapter Layer
+ */
+#define UFSHCI_UECPA_REG_EC_SHIFT (0)
+#define UFSHCI_UECPA_REG_EC_MASK (0xF)
+#define UFSHCI_UECPA_REG_ERR_SHIFT (31)
+#define UFSHCI_UECPA_REG_ERR_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.3.7, Offset 3Ch: UECDL
+ * Host UIC Error Code Data Link Layer
+ */
+#define UFSHCI_UECDL_REG_EC_SHIFT (0)
+#define UFSHCI_UECDL_REG_EC_MASK (0xFFFF)
+#define UFSHCI_UECDL_REG_ERR_SHIFT (31)
+#define UFSHCI_UECDL_REG_ERR_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.3.8, Offset 40h: UECN
+ * Host UIC Error Code Network Layer
+ */
+#define UFSHCI_UECN_REG_EC_SHIFT (0)
+#define UFSHCI_UECN_REG_EC_MASK (0x7)
+#define UFSHCI_UECN_REG_ERR_SHIFT (31)
+#define UFSHCI_UECN_REG_ERR_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.3.9, Offset 44h: UECT
+ * Host UIC Error Code Transport Layer
+ */
+#define UFSHCI_UECT_REG_EC_SHIFT (0)
+#define UFSHCI_UECT_REG_EC_MASK (0x7F)
+#define UFSHCI_UECT_REG_ERR_SHIFT (31)
+#define UFSHCI_UECT_REG_ERR_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.3.10, Offset 48h: UECDME
+ * Host UIC Error Code
+ */
+#define UFSHCI_UECDME_REG_EC_SHIFT (0)
+#define UFSHCI_UECDME_REG_EC_MASK (0xF)
+#define UFSHCI_UECDME_REG_ERR_SHIFT (31)
+#define UFSHCI_UECDME_REG_ERR_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.4.1, Offset 50h: UTRLBA
+ * UTP Transfer Request List Base Address
+ */
+#define UFSHCI_UTRLBA_REG_UTRLBA_SHIFT (0)
+#define UFSHCI_UTRLBA_REG_UTRLBA_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.4.2, Offset 54h: UTRLBAU
+ * UTP Transfer Request List Base Address Upper 32-bits
+ */
+#define UFSHCI_UTRLBAU_REG_UTRLBAU_SHIFT (0)
+#define UFSHCI_UTRLBAU_REG_UTRLBAU_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.4.3, Offset 58h: UTRLDBR
+ * UTP Transfer Request List Door Bell Register
+ */
+#define UFSHCI_UTRLDBR_REG_UTRLDBR_SHIFT (0)
+#define UFSHCI_UTRLDBR_REG_UTRLDBR_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.4.4, Offset 5Ch: UTRLCLR
+ * UTP Transfer Request List Clear Register
+ */
+#define UFSHCI_UTRLCLR_REG_UTRLCLR_SHIFT (0)
+#define UFSHCI_UTRLCLR_REG_UTRLCLR_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.4.5, Offset 60h: UTRLRSR
+ * UTP Transfer Request List Run Stop Register
+ */
+#define UFSHCI_UTRLRSR_REG_UTRLRSR_SHIFT (0)
+#define UFSHCI_UTRLRSR_REG_UTRLRSR_MASK (0x1)
+
+/*
+ * UFSHCI 4.1, section 5.4.6, Offset 64h: UTRLCNR
+ * UTP Transfer Request List Completion Notification Register
+ */
+#define UFSHCI_UTRLCNR_REG_UTRLCNR_SHIFT (0)
+#define UFSHCI_UTRLCNR_REG_UTRLCNR_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.5.1, Offset 70h: UTMRLBA
+ * UTP Task Management Request List Base Address
+ */
+#define UFSHCI_UTMRLBA_REG_UTMRLBA_SHIFT (0)
+#define UFSHCI_UTMRLBA_REG_UTMRLBA_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.5.2, Offset 74h: UTMRLBAU
+ * UTP Task Management Request List Base Address Upper 32-bits
+ */
+#define UFSHCI_UTMRLBAU_REG_UTMRLBAU_SHIFT (0)
+#define UFSHCI_UTMRLBAU_REG_UTMRLBAU_MASK (0xFFFFFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.5.3, Offset 78h: UTMRLDBR
+ * UTP Task Management Request List Door Bell Register
+ */
+#define UFSHCI_UTMRLDBR_REG_UTMRLDBR_SHIFT (0)
+#define UFSHCI_UTMRLDBR_REG_UTMRLDBR_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.5.4, Offset 7Ch: UTMRLCLR
+ * UTP Task Management Request List CLear Register
+ */
+#define UFSHCI_UTMRLCLR_REG_UTMRLCLR_SHIFT (0)
+#define UFSHCI_UTMRLCLR_REG_UTMRLCLR_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.5.5, Offset 80h: UTMRLRSR
+ * UTP Task Management Request List Run Stop Register
+ */
+#define UFSHCI_UTMRLRSR_REG_UTMRLRSR_SHIFT (0)
+#define UFSHCI_UTMRLRSR_REG_UTMRLRSR_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.6.1
+ * Offset 90h: UICCMD – UIC Command
+ */
+#define UFSHCI_UICCMD_REG_CMDOP_SHIFT (0)
+#define UFSHCI_UICCMD_REG_CMDOP_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.6.2
+ * Offset 94h: UICCMDARG1 – UIC Command Argument 1
+ */
+#define UFSHCI_UICCMDARG1_REG_ARG1_SHIFT (0)
+#define UFSHCI_UICCMDARG1_REG_ARG1_MASK (0xFFFFFFFF)
+#define UFSHCI_UICCMDARG1_REG_GEN_SELECTOR_INDEX_SHIFT (0)
+#define UFSHCI_UICCMDARG1_REG_GEN_SELECTOR_INDEX_MASK (0xFFFF)
+#define UFSHCI_UICCMDARG1_REG_MIB_ATTR_SHIFT (16)
+#define UFSHCI_UICCMDARG1_REG_MIB_ATTR_MASK (0xFFFF)
+
+/*
+ * UFSHCI 4.1, section 5.6.3
+ * Offset 98h: UICCMDARG2 – UIC Command Argument 2
+ */
+#define UFSHCI_UICCMDARG2_REG_ARG2_SHIFT (0)
+#define UFSHCI_UICCMDARG2_REG_ARG2_MASK (0xFFFFFFFF)
+#define UFSHCI_UICCMDARG2_REG_ERROR_CODE_SHIFT (0)
+#define UFSHCI_UICCMDARG2_REG_ERROR_CODE_MASK (0xFF)
+#define UFSHCI_UICCMDARG2_REG_ATTR_SET_TYPE_SHIFT (16)
+#define UFSHCI_UICCMDARG2_REG_ATTR_SET_TYPE_MASK (0xFF)
+
+/*
+ * UFSHCI 4.1, section 5.6.4
+ * Offset 9Ch: UICCMDARG3 – UIC Command Argument 3
+ */
+#define UFSHCI_UICCMDARG3_REG_ARG3_SHIFT (0)
+#define UFSHCI_UICCMDARG3_REG_ARG3_MASK (0xFFFFFFFF)
+
+/* Helper macro to combine *_MASK and *_SHIFT defines */
+#define UFSHCIM(name) (name##_MASK << name##_SHIFT)
+
+/* Helper macro to extract value from x */
+#define UFSHCIV(name, x) (((x) >> name##_SHIFT) & name##_MASK)
+
+/* Helper macro to construct a field value */
+#define UFSHCIF(name, x) (((x)&name##_MASK) << name##_SHIFT)
+
+#define UFSHCI_DUMP_REG(ctrlr, member) \
+ do { \
+ uint32_t _val = ufshci_mmio_read_4(ctrlr, member); \
+ ufshci_printf(ctrlr, " %-15s (0x%03lx) : 0x%08x\n", #member, \
+ ufshci_mmio_offsetof(member), _val); \
+ } while (0)
+
+#endif /* __UFSHCI_REG_H__ */
diff --git a/sys/dev/ufshci/ufshci_req_queue.c b/sys/dev/ufshci/ufshci_req_queue.c
new file mode 100644
index 000000000000..7aa164d00bec
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_req_queue.c
@@ -0,0 +1,799 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/domainset.h>
+#include <sys/module.h>
+
+#include <cam/scsi/scsi_all.h>
+
+#include "sys/kassert.h"
+#include "ufshci_private.h"
+
+static void ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
+ struct ufshci_tracker *tr, enum ufshci_data_direction data_direction);
+
+static const struct ufshci_qops sdb_utmr_qops = {
+ .construct = ufshci_req_sdb_construct,
+ .destroy = ufshci_req_sdb_destroy,
+ .get_hw_queue = ufshci_req_sdb_get_hw_queue,
+ .enable = ufshci_req_sdb_enable,
+ .disable = ufshci_req_sdb_disable,
+ .reserve_slot = ufshci_req_sdb_reserve_slot,
+ .reserve_admin_slot = ufshci_req_sdb_reserve_slot,
+ .ring_doorbell = ufshci_req_sdb_utmr_ring_doorbell,
+ .is_doorbell_cleared = ufshci_req_sdb_utmr_is_doorbell_cleared,
+ .clear_cpl_ntf = ufshci_req_sdb_utmr_clear_cpl_ntf,
+ .process_cpl = ufshci_req_sdb_process_cpl,
+ .get_inflight_io = ufshci_req_sdb_get_inflight_io,
+};
+
+static const struct ufshci_qops sdb_utr_qops = {
+ .construct = ufshci_req_sdb_construct,
+ .destroy = ufshci_req_sdb_destroy,
+ .get_hw_queue = ufshci_req_sdb_get_hw_queue,
+ .enable = ufshci_req_sdb_enable,
+ .disable = ufshci_req_sdb_disable,
+ .reserve_slot = ufshci_req_sdb_reserve_slot,
+ .reserve_admin_slot = ufshci_req_sdb_reserve_slot,
+ .ring_doorbell = ufshci_req_sdb_utr_ring_doorbell,
+ .is_doorbell_cleared = ufshci_req_sdb_utr_is_doorbell_cleared,
+ .clear_cpl_ntf = ufshci_req_sdb_utr_clear_cpl_ntf,
+ .process_cpl = ufshci_req_sdb_process_cpl,
+ .get_inflight_io = ufshci_req_sdb_get_inflight_io,
+};
+
+int
+ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_req_queue *req_queue;
+ int error;
+
+ /*
+ * UTP Task Management Request only supports Legacy Single Doorbell
+ * Queue.
+ */
+ req_queue = &ctrlr->task_mgmt_req_queue;
+ req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
+ req_queue->qops = sdb_utmr_qops;
+
+ error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTRM_ENTRIES,
+ /*is_task_mgmt*/ true);
+
+ return (error);
+}
+
+void
+ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr)
+{
+ ctrlr->task_mgmt_req_queue.qops.destroy(ctrlr,
+ &ctrlr->task_mgmt_req_queue);
+}
+
+void
+ufshci_utmr_req_queue_disable(struct ufshci_controller *ctrlr)
+{
+ ctrlr->task_mgmt_req_queue.qops.disable(ctrlr,
+ &ctrlr->task_mgmt_req_queue);
+}
+
+int
+ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr)
+{
+ return (ctrlr->task_mgmt_req_queue.qops.enable(ctrlr,
+ &ctrlr->task_mgmt_req_queue));
+}
+
+int
+ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_req_queue *req_queue;
+ int error;
+
+ /*
+ * Currently, it does not support MCQ mode, so it should be set to SDB
+ * mode by default.
+ * TODO: Determine queue mode by checking Capability Registers
+ */
+ req_queue = &ctrlr->transfer_req_queue;
+ req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
+ req_queue->qops = sdb_utr_qops;
+
+ error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTR_ENTRIES,
+ /*is_task_mgmt*/ false);
+
+ return (error);
+}
+
+void
+ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr)
+{
+ ctrlr->transfer_req_queue.qops.destroy(ctrlr,
+ &ctrlr->transfer_req_queue);
+}
+
+void
+ufshci_utr_req_queue_disable(struct ufshci_controller *ctrlr)
+{
+ ctrlr->transfer_req_queue.qops.disable(ctrlr,
+ &ctrlr->transfer_req_queue);
+}
+
+int
+ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr)
+{
+ return (ctrlr->transfer_req_queue.qops.enable(ctrlr,
+ &ctrlr->transfer_req_queue));
+}
+
+static bool
+ufshci_req_queue_response_is_error(struct ufshci_req_queue *req_queue,
+ uint8_t ocs, union ufshci_reponse_upiu *response)
+{
+ bool is_error = false;
+
+ /* Check request descriptor */
+ if (ocs != UFSHCI_DESC_SUCCESS) {
+ ufshci_printf(req_queue->ctrlr, "Invalid OCS = 0x%x\n", ocs);
+ is_error = true;
+ }
+
+ /* Check response UPIU header */
+ if (response->header.response != UFSHCI_RESPONSE_CODE_TARGET_SUCCESS) {
+ ufshci_printf(req_queue->ctrlr,
+ "Invalid response code = 0x%x\n",
+ response->header.response);
+ is_error = true;
+ }
+
+ return (is_error);
+}
+
+static void
+ufshci_req_queue_manual_complete_tracker(struct ufshci_tracker *tr, uint8_t ocs,
+ uint8_t rc)
+{
+ struct ufshci_utp_xfer_req_desc *desc;
+ struct ufshci_upiu_header *resp_header;
+
+ mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
+
+ resp_header = (struct ufshci_upiu_header *)tr->ucd->response_upiu;
+ resp_header->response = rc;
+
+ desc = &tr->hwq->utrd[tr->slot_num];
+ desc->overall_command_status = ocs;
+
+ ufshci_req_queue_complete_tracker(tr);
+}
+
+static void
+ufshci_req_queue_manual_complete_request(struct ufshci_req_queue *req_queue,
+ struct ufshci_request *req, uint8_t ocs, uint8_t rc)
+{
+ struct ufshci_completion cpl;
+ bool error;
+
+ memset(&cpl, 0, sizeof(cpl));
+ cpl.response_upiu.header.response = rc;
+ error = ufshci_req_queue_response_is_error(req_queue, ocs,
+ &cpl.response_upiu);
+
+ if (error) {
+ ufshci_printf(req_queue->ctrlr,
+ "Manual complete request error:0x%x", error);
+ }
+
+ if (req->cb_fn)
+ req->cb_fn(req->cb_arg, &cpl, error);
+
+ ufshci_free_request(req);
+}
+
+void
+ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
+ struct ufshci_hw_queue *hwq)
+{
+ struct ufshci_req_queue *req_queue;
+ struct ufshci_tracker *tr;
+ struct ufshci_request *req;
+ int i;
+
+ if (!mtx_initialized(&hwq->qlock))
+ return;
+
+ mtx_lock(&hwq->qlock);
+
+ req_queue = &ctrlr->transfer_req_queue;
+
+ for (i = 0; i < req_queue->num_entries; i++) {
+ tr = hwq->act_tr[i];
+ req = tr->req;
+
+ if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED) {
+ mtx_unlock(&hwq->qlock);
+ ufshci_req_queue_manual_complete_request(req_queue, req,
+ UFSHCI_DESC_ABORTED,
+ UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
+ mtx_lock(&hwq->qlock);
+ } else if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED) {
+ /*
+ * Do not remove the tracker. The abort_tracker path
+ * will do that for us.
+ */
+ mtx_unlock(&hwq->qlock);
+ ufshci_req_queue_manual_complete_tracker(tr,
+ UFSHCI_DESC_ABORTED,
+ UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
+ mtx_lock(&hwq->qlock);
+ }
+ }
+
+ mtx_unlock(&hwq->qlock);
+}
+
+void
+ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
+{
+ struct ufshci_req_queue *req_queue = tr->req_queue;
+ struct ufshci_hw_queue *hwq = tr->hwq;
+ struct ufshci_request *req = tr->req;
+ struct ufshci_completion cpl;
+ uint8_t ocs;
+ bool retry, error, retriable;
+
+ mtx_assert(&hwq->qlock, MA_NOTOWNED);
+
+ /* Copy the response from the Request Descriptor or UTP Command
+ * Descriptor. */
+ cpl.size = tr->response_size;
+ if (req_queue->is_task_mgmt) {
+ memcpy(&cpl.response_upiu,
+ (void *)hwq->utmrd[tr->slot_num].response_upiu, cpl.size);
+
+ ocs = hwq->utmrd[tr->slot_num].overall_command_status;
+ } else {
+ bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu,
+ cpl.size);
+
+ ocs = hwq->utrd[tr->slot_num].overall_command_status;
+ }
+
+ error = ufshci_req_queue_response_is_error(req_queue, ocs,
+ &cpl.response_upiu);
+
+ /* TODO: Implement retry */
+ // retriable = ufshci_completion_is_retry(cpl);
+ retriable = false;
+ retry = error && retriable &&
+ req->retries < req_queue->ctrlr->retry_count;
+ if (retry)
+ hwq->num_retries++;
+ if (error && req->retries >= req_queue->ctrlr->retry_count && retriable)
+ hwq->num_failures++;
+
+ KASSERT(tr->req, ("there is no request assigned to the tracker\n"));
+ KASSERT(cpl.response_upiu.header.task_tag ==
+ req->request_upiu.header.task_tag,
+ ("response task_tag does not match request task_tag\n"));
+
+ if (!retry) {
+ if (req->payload_valid) {
+ bus_dmamap_sync(req_queue->dma_tag_payload,
+ tr->payload_dma_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ }
+ /* Copy response from the command descriptor */
+ if (req->cb_fn)
+ req->cb_fn(req->cb_arg, &cpl, error);
+ }
+
+ mtx_lock(&hwq->qlock);
+
+ /* Clear the UTRL Completion Notification register */
+ req_queue->qops.clear_cpl_ntf(req_queue->ctrlr, tr);
+
+ if (retry) {
+ req->retries++;
+ ufshci_req_queue_submit_tracker(req_queue, tr,
+ req->data_direction);
+ } else {
+ if (req->payload_valid) {
+ bus_dmamap_unload(req_queue->dma_tag_payload,
+ tr->payload_dma_map);
+ }
+
+ /* Clear tracker */
+ ufshci_free_request(req);
+ tr->req = NULL;
+ tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+
+ TAILQ_REMOVE(&hwq->outstanding_tr, tr, tailq);
+ TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
+ }
+
+ mtx_unlock(&tr->hwq->qlock);
+}
+
+bool
+ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue)
+{
+ struct ufshci_hw_queue *hwq;
+ bool done;
+
+ hwq = req_queue->qops.get_hw_queue(req_queue);
+
+ mtx_lock(&hwq->recovery_lock);
+ done = req_queue->qops.process_cpl(req_queue);
+ mtx_unlock(&hwq->recovery_lock);
+
+ return (done);
+}
+
+static void
+ufshci_payload_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
+{
+ struct ufshci_tracker *tr = arg;
+ struct ufshci_prdt_entry *prdt_entry;
+ int i;
+
+ /*
+ * If the mapping operation failed, return immediately. The caller
+ * is responsible for detecting the error status and failing the
+ * tracker manually.
+ */
+ if (error != 0) {
+ ufshci_printf(tr->req_queue->ctrlr,
+ "Failed to map payload %d\n", error);
+ return;
+ }
+
+ prdt_entry = (struct ufshci_prdt_entry *)tr->ucd->prd_table;
+
+ tr->prdt_entry_cnt = nseg;
+
+ for (i = 0; i < nseg; i++) {
+ prdt_entry->data_base_address = htole64(seg[i].ds_addr) &
+ 0xffffffff;
+ prdt_entry->data_base_address_upper = htole64(seg[i].ds_addr) >>
+ 32;
+ prdt_entry->data_byte_count = htole32(seg[i].ds_len - 1);
+
+ ++prdt_entry;
+ }
+
+ bus_dmamap_sync(tr->req_queue->dma_tag_payload, tr->payload_dma_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+}
+
+static void
+ufshci_req_queue_prepare_prdt(struct ufshci_tracker *tr)
+{
+ struct ufshci_request *req = tr->req;
+ struct ufshci_utp_cmd_desc *cmd_desc = tr->ucd;
+ int error;
+
+ tr->prdt_off = UFSHCI_UTP_XFER_REQ_SIZE + UFSHCI_UTP_XFER_RESP_SIZE;
+
+ memset(cmd_desc->prd_table, 0, sizeof(cmd_desc->prd_table));
+
+ /* Filling PRDT enrties with payload */
+ error = bus_dmamap_load_mem(tr->req_queue->dma_tag_payload,
+ tr->payload_dma_map, &req->payload, ufshci_payload_map, tr,
+ BUS_DMA_NOWAIT);
+ if (error != 0) {
+ /*
+ * The dmamap operation failed, so we manually fail the
+ * tracker here with UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES.
+ *
+ * ufshci_req_queue_manual_complete_tracker must not be called
+ * with the req_queue lock held.
+ */
+ ufshci_printf(tr->req_queue->ctrlr,
+ "bus_dmamap_load_mem returned with error:0x%x!\n", error);
+
+ mtx_unlock(&tr->hwq->qlock);
+ ufshci_req_queue_manual_complete_tracker(tr,
+ UFSHCI_DESC_INVALID_PRDT_ATTRIBUTES,
+ UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
+ mtx_lock(&tr->hwq->qlock);
+ }
+}
+
+static void
+ufshci_req_queue_fill_utmr_descriptor(
+ struct ufshci_utp_task_mgmt_req_desc *desc, struct ufshci_request *req)
+{
+ memset(desc, 0, sizeof(struct ufshci_utp_task_mgmt_req_desc));
+ desc->interrupt = true;
+ /* Set the initial value to Invalid. */
+ desc->overall_command_status = UFSHCI_UTMR_OCS_INVALID;
+
+ memcpy(desc->request_upiu, &req->request_upiu, req->request_size);
+}
+
+static void
+ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc *desc,
+ uint8_t data_direction, const uint64_t paddr, const uint16_t response_off,
+ const uint16_t response_len, const uint16_t prdt_off,
+ const uint16_t prdt_entry_cnt)
+{
+ uint8_t command_type;
+ /* Value to convert bytes to dwords */
+ const uint16_t dword_size = 4;
+
+ /*
+ * Set command type to UFS storage.
+ * The UFS 4.1 spec only defines 'UFS Storage' as a command type.
+ */
+ command_type = UFSHCI_COMMAND_TYPE_UFS_STORAGE;
+
+ memset(desc, 0, sizeof(struct ufshci_utp_xfer_req_desc));
+ desc->command_type = command_type;
+ desc->data_direction = data_direction;
+ desc->interrupt = true;
+ /* Set the initial value to Invalid. */
+ desc->overall_command_status = UFSHCI_UTR_OCS_INVALID;
+ desc->utp_command_descriptor_base_address = (uint32_t)(paddr &
+ 0xffffffff);
+ desc->utp_command_descriptor_base_address_upper = (uint32_t)(paddr >>
+ 32);
+
+ desc->response_upiu_offset = response_off / dword_size;
+ desc->response_upiu_length = response_len / dword_size;
+ desc->prdt_offset = prdt_off / dword_size;
+ desc->prdt_length = prdt_entry_cnt;
+}
+
+static void
+ufshci_req_queue_timeout_recovery(struct ufshci_controller *ctrlr,
+ struct ufshci_hw_queue *hwq)
+{
+ /* TODO: Step 2. Logical unit reset */
+ /* TODO: Step 3. Target device reset */
+ /* TODO: Step 4. Bus reset */
+
+ /*
+ * Step 5. All previous commands were timeout.
+ * Recovery failed, reset the host controller.
+ */
+ ufshci_printf(ctrlr,
+ "Recovery step 5: Resetting controller due to a timeout.\n");
+ hwq->recovery_state = RECOVERY_WAITING;
+
+ ufshci_ctrlr_reset(ctrlr);
+}
+
+static void
+ufshci_abort_complete(void *arg, const struct ufshci_completion *status,
+ bool error)
+{
+ struct ufshci_tracker *tr = arg;
+
+ /*
+ * We still need to check the active tracker array, to cover race where
+ * I/O timed out at same time controller was completing the I/O. An
+ * abort request always is on the Task Management Request queue, but
+ * affects either an Task Management Request or an I/O (UTRL) queue, so
+ * take the appropriate queue lock for the original command's queue,
+ * since we'll need it to avoid races with the completion code and to
+ * complete the command manually.
+ */
+ mtx_lock(&tr->hwq->qlock);
+ if (tr->slot_state != UFSHCI_SLOT_STATE_FREE) {
+ mtx_unlock(&tr->hwq->qlock);
+ /*
+ * An I/O has timed out, and the controller was unable to abort
+ * it for some reason. And we've not processed a completion for
+ * it yet. Construct a fake completion status, and then complete
+ * the I/O's tracker manually.
+ */
+ ufshci_printf(tr->hwq->ctrlr,
+ "abort task request failed, aborting task manually\n");
+ ufshci_req_queue_manual_complete_tracker(tr,
+ UFSHCI_DESC_ABORTED, UFSHCI_RESPONSE_CODE_GENERAL_FAILURE);
+
+ if ((status->response_upiu.task_mgmt_response_upiu
+ .output_param1 ==
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE) ||
+ (status->response_upiu.task_mgmt_response_upiu
+ .output_param1 ==
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED)) {
+ ufshci_printf(tr->hwq->ctrlr,
+ "Warning: the abort task request completed \
+ successfully, but the original task is still incomplete.");
+ return;
+ }
+
+ /* Abort Task failed. Perform recovery steps 2-5 */
+ ufshci_req_queue_timeout_recovery(tr->hwq->ctrlr, tr->hwq);
+ } else {
+ mtx_unlock(&tr->hwq->qlock);
+ }
+}
+
+static void
+ufshci_req_queue_timeout(void *arg)
+{
+ struct ufshci_hw_queue *hwq = arg;
+ struct ufshci_controller *ctrlr = hwq->ctrlr;
+ struct ufshci_tracker *tr;
+ sbintime_t now;
+ bool idle = true;
+ bool fast;
+
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+
+ /*
+ * If the controller is failed, then stop polling. This ensures that any
+ * failure processing that races with the hwq timeout will fail safely.
+ */
+ if (ctrlr->is_failed) {
+ ufshci_printf(ctrlr,
+ "Failed controller, stopping watchdog timeout.\n");
+ hwq->timer_armed = false;
+ return;
+ }
+
+ /*
+ * Shutdown condition: We set hwq->timer_armed to false in
+ * ufshci_req_sdb_destroy before calling callout_drain. When we call
+ * that, this routine might get called one last time. Exit w/o setting a
+ * timeout. None of the watchdog stuff needs to be done since we're
+ * destroying the hwq.
+ */
+ if (!hwq->timer_armed) {
+ ufshci_printf(ctrlr,
+ "Timeout fired during ufshci_utr_req_queue_destroy\n");
+ return;
+ }
+
+ switch (hwq->recovery_state) {
+ case RECOVERY_NONE:
+ /*
+ * See if there's any recovery needed. First, do a fast check to
+ * see if anything could have timed out. If not, then skip
+ * everything else.
+ */
+ fast = false;
+ mtx_lock(&hwq->qlock);
+ now = getsbinuptime();
+ TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
+ /*
+ * If the first real transaction is not in timeout, then
+ * we're done. Otherwise, we try recovery.
+ */
+ idle = false;
+ if (now <= tr->deadline)
+ fast = true;
+ break;
+ }
+ mtx_unlock(&hwq->qlock);
+ if (idle || fast)
+ break;
+
+ /*
+ * There's a stale transaction at the start of the queue whose
+ * deadline has passed. Poll the competions as a last-ditch
+ * effort in case an interrupt has been missed.
+ */
+ hwq->req_queue->qops.process_cpl(hwq->req_queue);
+
+ /*
+ * Now that we've run the ISR, re-rheck to see if there's any
+ * timed out commands and abort them or reset the card if so.
+ */
+ mtx_lock(&hwq->qlock);
+ idle = true;
+ TAILQ_FOREACH(tr, &hwq->outstanding_tr, tailq) {
+ /*
+ * If we know this tracker hasn't timed out, we also
+ * know all subsequent ones haven't timed out. The tr
+ * queue is in submission order and all normal commands
+ * in a queue have the same timeout (or the timeout was
+ * changed by the user, but we eventually timeout then).
+ */
+ idle = false;
+ if (now <= tr->deadline)
+ break;
+
+ /*
+ * Timeout recovery is performed in five steps. If
+ * recovery fails at any step, the process continues to
+ * the next one:
+ * next steps:
+ * Step 1. Abort task
+ * Step 2. Logical unit reset (TODO)
+ * Step 3. Target device reset (TODO)
+ * Step 4. Bus reset (TODO)
+ * Step 5. Host controller reset
+ *
+ * If the timeout occurred in the Task Management
+ * Request queue, ignore Step 1.
+ */
+ if (ctrlr->enable_aborts &&
+ !hwq->req_queue->is_task_mgmt &&
+ tr->req->cb_fn != ufshci_abort_complete) {
+ /*
+ * Step 1. Timeout expired, abort the task.
+ *
+ * This isn't an abort command, ask for a
+ * hardware abort. This goes to the Task
+ * Management Request queue which will reset the
+ * task if it times out.
+ */
+ ufshci_printf(ctrlr,
+ "Recovery step 1: Timeout occurred. aborting the task(%d).\n",
+ tr->req->request_upiu.header.task_tag);
+ ufshci_ctrlr_cmd_send_task_mgmt_request(ctrlr,
+ ufshci_abort_complete, tr,
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK,
+ tr->req->request_upiu.header.lun,
+ tr->req->request_upiu.header.task_tag, 0);
+ } else {
+ /* Recovery Step 2-5 */
+ ufshci_req_queue_timeout_recovery(ctrlr, hwq);
+ idle = false;
+ break;
+ }
+ }
+ mtx_unlock(&hwq->qlock);
+ break;
+
+ case RECOVERY_WAITING:
+ /*
+ * These messages aren't interesting while we're suspended. We
+ * put the queues into waiting state while suspending.
+ * Suspending takes a while, so we'll see these during that time
+ * and they aren't diagnostic. At other times, they indicate a
+ * problem that's worth complaining about.
+ */
+ if (!device_is_suspended(ctrlr->dev))
+ ufshci_printf(ctrlr, "Waiting for reset to complete\n");
+ idle = false; /* We want to keep polling */
+ break;
+ }
+
+ /*
+ * Rearm the timeout.
+ */
+ if (!idle) {
+ callout_schedule_sbt(&hwq->timer, SBT_1S / 2, SBT_1S / 2, 0);
+ } else {
+ hwq->timer_armed = false;
+ }
+}
+
+/*
+ * Submit the tracker to the hardware.
+ */
+static void
+ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
+ struct ufshci_tracker *tr, enum ufshci_data_direction data_direction)
+{
+ struct ufshci_controller *ctrlr = req_queue->ctrlr;
+ struct ufshci_request *req = tr->req;
+ struct ufshci_hw_queue *hwq;
+ uint64_t ucd_paddr;
+ uint16_t request_len, response_off, response_len;
+ uint8_t slot_num = tr->slot_num;
+ int timeout;
+
+ hwq = req_queue->qops.get_hw_queue(req_queue);
+
+ mtx_assert(&hwq->qlock, MA_OWNED);
+
+ if (req->cb_fn == ufshci_completion_poll_cb)
+ timeout = 1;
+ else
+ timeout = ctrlr->timeout_period;
+ tr->deadline = getsbinuptime() + timeout * SBT_1S;
+ if (!hwq->timer_armed) {
+ hwq->timer_armed = true;
+ /*
+ * It wakes up once every 0.5 seconds to check if the deadline
+ * has passed.
+ */
+ callout_reset_sbt_on(&hwq->timer, SBT_1S / 2, SBT_1S / 2,
+ ufshci_req_queue_timeout, hwq, hwq->cpu, 0);
+ }
+
+ if (req_queue->is_task_mgmt) {
+ /* Prepare UTP Task Management Request Descriptor. */
+ ufshci_req_queue_fill_utmr_descriptor(&tr->hwq->utmrd[slot_num],
+ req);
+ } else {
+ request_len = req->request_size;
+ response_off = UFSHCI_UTP_XFER_REQ_SIZE;
+ response_len = req->response_size;
+
+ /* Prepare UTP Command Descriptor */
+ memcpy(tr->ucd, &req->request_upiu, request_len);
+ memset((uint8_t *)tr->ucd + response_off, 0, response_len);
+
+ /* Prepare PRDT */
+ if (req->payload_valid)
+ ufshci_req_queue_prepare_prdt(tr);
+
+ /* Prepare UTP Transfer Request Descriptor. */
+ ucd_paddr = tr->ucd_bus_addr;
+ ufshci_req_queue_fill_utr_descriptor(&tr->hwq->utrd[slot_num],
+ data_direction, ucd_paddr, response_off, response_len,
+ tr->prdt_off, tr->prdt_entry_cnt);
+
+ bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ }
+
+ bus_dmamap_sync(tr->hwq->dma_tag_queue, tr->hwq->queuemem_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
+ tr->slot_state = UFSHCI_SLOT_STATE_SCHEDULED;
+
+ /* Ring the doorbell */
+ req_queue->qops.ring_doorbell(ctrlr, tr);
+}
+
+static int
+_ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
+ struct ufshci_request *req)
+{
+ struct ufshci_tracker *tr = NULL;
+ int error;
+
+ mtx_assert(&req_queue->qops.get_hw_queue(req_queue)->qlock, MA_OWNED);
+
+ error = req_queue->qops.reserve_slot(req_queue, &tr);
+ if (error != 0) {
+ ufshci_printf(req_queue->ctrlr, "Failed to get tracker");
+ return (error);
+ }
+ KASSERT(tr, ("There is no tracker allocated."));
+
+ if (tr->slot_state == UFSHCI_SLOT_STATE_RESERVED ||
+ tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED)
+ return (EBUSY);
+
+ /* Set the task_tag value to slot_num for traceability. */
+ req->request_upiu.header.task_tag = tr->slot_num;
+
+ tr->slot_state = UFSHCI_SLOT_STATE_RESERVED;
+ tr->response_size = req->response_size;
+ tr->deadline = SBT_MAX;
+ tr->req = req;
+
+ TAILQ_REMOVE(&tr->hwq->free_tr, tr, tailq);
+ TAILQ_INSERT_TAIL(&tr->hwq->outstanding_tr, tr, tailq);
+
+ ufshci_req_queue_submit_tracker(req_queue, tr, req->data_direction);
+
+ return (0);
+}
+
+int
+ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
+ struct ufshci_request *req, bool is_admin)
+{
+ struct ufshci_hw_queue *hwq;
+ uint32_t error;
+
+ /* TODO: MCQs should use a separate Admin queue. */
+
+ hwq = req_queue->qops.get_hw_queue(req_queue);
+ KASSERT(hwq, ("There is no HW queue allocated."));
+
+ mtx_lock(&hwq->qlock);
+ error = _ufshci_req_queue_submit_request(req_queue, req);
+ mtx_unlock(&hwq->qlock);
+
+ return (error);
+}
diff --git a/sys/dev/ufshci/ufshci_req_sdb.c b/sys/dev/ufshci/ufshci_req_sdb.c
new file mode 100644
index 000000000000..ca47aa159c5b
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_req_sdb.c
@@ -0,0 +1,562 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/domainset.h>
+#include <sys/module.h>
+
+#include "sys/kassert.h"
+#include "ufshci_private.h"
+#include "ufshci_reg.h"
+
+static void
+ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr;
+ int i;
+
+ for (i = 0; i < req_queue->num_trackers; i++) {
+ tr = hwq->act_tr[i];
+ bus_dmamap_destroy(req_queue->dma_tag_payload,
+ tr->payload_dma_map);
+ }
+
+ if (req_queue->ucd) {
+ bus_dmamap_unload(req_queue->dma_tag_ucd,
+ req_queue->ucdmem_map);
+ bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd,
+ req_queue->ucdmem_map);
+ req_queue->ucd = NULL;
+ }
+
+ if (req_queue->dma_tag_ucd) {
+ bus_dma_tag_destroy(req_queue->dma_tag_ucd);
+ req_queue->dma_tag_ucd = NULL;
+ }
+
+ free(req_queue->hwq->ucd_bus_addr, M_UFSHCI);
+}
+
+static void
+ufshci_ucd_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
+{
+ struct ufshci_hw_queue *hwq = arg;
+ int i;
+
+ if (error != 0) {
+ printf("ufshci: Failed to map UCD, error = %d\n", error);
+ return;
+ }
+
+ if (hwq->num_trackers != nseg) {
+ printf(
+ "ufshci: Failed to map UCD, num_trackers = %d, nseg = %d\n",
+ hwq->num_trackers, nseg);
+ return;
+ }
+
+ for (i = 0; i < nseg; i++) {
+ hwq->ucd_bus_addr[i] = seg[i].ds_addr;
+ }
+}
+
+static int
+ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
+ uint32_t num_entries, struct ufshci_controller *ctrlr)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ size_t ucd_allocsz, payload_allocsz;
+ uint8_t *ucdmem;
+ int i, error;
+
+ req_queue->hwq->ucd_bus_addr = malloc(sizeof(bus_addr_t) *
+ req_queue->num_trackers,
+ M_UFSHCI, M_ZERO | M_NOWAIT);
+
+ /*
+ * Each component must be page aligned, and individual PRP lists
+ * cannot cross a page boundary.
+ */
+ ucd_allocsz = num_entries * sizeof(struct ufshci_utp_cmd_desc);
+ ucd_allocsz = roundup2(ucd_allocsz, ctrlr->page_size);
+ payload_allocsz = num_entries * ctrlr->max_xfer_size;
+
+ /*
+ * Allocate physical memory for UTP Command Descriptor (UCD)
+ * Note: UFSHCI UCD format is restricted to 128-byte alignment.
+ */
+ error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 128, 0,
+ BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, ucd_allocsz,
+ howmany(ucd_allocsz, sizeof(struct ufshci_utp_cmd_desc)),
+ sizeof(struct ufshci_utp_cmd_desc), 0, NULL, NULL,
+ &req_queue->dma_tag_ucd);
+ if (error != 0) {
+ ufshci_printf(ctrlr, "request cmd desc tag create failed %d\n",
+ error);
+ goto out;
+ }
+
+ if (bus_dmamem_alloc(req_queue->dma_tag_ucd, (void **)&ucdmem,
+ BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &req_queue->ucdmem_map)) {
+ ufshci_printf(ctrlr, "failed to allocate cmd desc memory\n");
+ goto out;
+ }
+
+ if (bus_dmamap_load(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
+ ucdmem, ucd_allocsz, ufshci_ucd_map, hwq, 0) != 0) {
+ ufshci_printf(ctrlr, "failed to load cmd desc memory\n");
+ bus_dmamem_free(req_queue->dma_tag_ucd, req_queue->ucd,
+ req_queue->ucdmem_map);
+ goto out;
+ }
+
+ req_queue->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
+
+ /*
+ * Allocate physical memory for PRDT
+ * Note: UFSHCI PRDT format is restricted to 8-byte alignment.
+ */
+ error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 8,
+ ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+ payload_allocsz, howmany(payload_allocsz, ctrlr->page_size) + 1,
+ ctrlr->page_size, 0, NULL, NULL, &req_queue->dma_tag_payload);
+ if (error != 0) {
+ ufshci_printf(ctrlr, "request prdt tag create failed %d\n",
+ error);
+ goto out;
+ }
+
+ for (i = 0; i < req_queue->num_trackers; i++) {
+ bus_dmamap_create(req_queue->dma_tag_payload, 0,
+ &hwq->act_tr[i]->payload_dma_map);
+
+ hwq->act_tr[i]->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
+ hwq->act_tr[i]->ucd_bus_addr = hwq->ucd_bus_addr[i];
+
+ ucdmem += sizeof(struct ufshci_utp_cmd_desc);
+ }
+
+ return (0);
+out:
+ ufshci_req_sdb_cmd_desc_destroy(req_queue);
+ return (ENOMEM);
+}
+
+int
+ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt)
+{
+ struct ufshci_hw_queue *hwq;
+ size_t desc_size, alloc_size;
+ uint64_t queuemem_phys;
+ uint8_t *queuemem;
+ struct ufshci_tracker *tr;
+ const size_t lock_name_len = 32;
+ char qlock_name[lock_name_len], recovery_lock_name[lock_name_len];
+ char *base;
+ int i, error;
+
+ req_queue->ctrlr = ctrlr;
+ req_queue->is_task_mgmt = is_task_mgmt;
+ req_queue->num_entries = num_entries;
+ /*
+ * In Single Doorbell mode, the number of queue entries and the number
+ * of trackers are the same.
+ */
+ req_queue->num_trackers = num_entries;
+
+ /* Single Doorbell mode uses only one queue. (UFSHCI_SDB_Q = 0) */
+ req_queue->hwq = malloc(sizeof(struct ufshci_hw_queue), M_UFSHCI,
+ M_ZERO | M_NOWAIT);
+ hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ hwq->num_entries = req_queue->num_entries;
+ hwq->num_trackers = req_queue->num_trackers;
+ hwq->ctrlr = ctrlr;
+ hwq->req_queue = req_queue;
+
+ base = is_task_mgmt ? "ufshci utmrq" : "ufshci utrq";
+ snprintf(qlock_name, sizeof(qlock_name), "%s #%d lock", base,
+ UFSHCI_SDB_Q);
+ snprintf(recovery_lock_name, sizeof(recovery_lock_name),
+ "%s #%d recovery lock", base, UFSHCI_SDB_Q);
+
+ mtx_init(&hwq->qlock, qlock_name, NULL, MTX_DEF);
+ mtx_init(&hwq->recovery_lock, recovery_lock_name, NULL, MTX_DEF);
+
+ callout_init_mtx(&hwq->timer, &hwq->recovery_lock, 0);
+ hwq->timer_armed = false;
+ hwq->recovery_state = RECOVERY_WAITING;
+
+ /*
+ * Allocate physical memory for request queue (UTP Transfer Request
+ * Descriptor (UTRD) or UTP Task Management Request Descriptor (UTMRD))
+ * Note: UTRD/UTMRD format is restricted to 1024-byte alignment.
+ */
+ desc_size = is_task_mgmt ?
+ sizeof(struct ufshci_utp_task_mgmt_req_desc) :
+ sizeof(struct ufshci_utp_xfer_req_desc);
+ alloc_size = num_entries * desc_size;
+ error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 1024,
+ ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
+ alloc_size, 1, alloc_size, 0, NULL, NULL, &hwq->dma_tag_queue);
+ if (error != 0) {
+ ufshci_printf(ctrlr, "request queue tag create failed %d\n",
+ error);
+ goto out;
+ }
+
+ if (bus_dmamem_alloc(hwq->dma_tag_queue, (void **)&queuemem,
+ BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &hwq->queuemem_map)) {
+ ufshci_printf(ctrlr,
+ "failed to allocate request queue memory\n");
+ goto out;
+ }
+
+ if (bus_dmamap_load(hwq->dma_tag_queue, hwq->queuemem_map, queuemem,
+ alloc_size, ufshci_single_map, &queuemem_phys, 0) != 0) {
+ ufshci_printf(ctrlr, "failed to load request queue memory\n");
+ bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
+ hwq->queuemem_map);
+ goto out;
+ }
+
+ hwq->num_cmds = 0;
+ hwq->num_intr_handler_calls = 0;
+ hwq->num_retries = 0;
+ hwq->num_failures = 0;
+ hwq->req_queue_addr = queuemem_phys;
+
+ /* Allocate trackers */
+ hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) *
+ req_queue->num_entries,
+ M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+
+ TAILQ_INIT(&hwq->free_tr);
+ TAILQ_INIT(&hwq->outstanding_tr);
+
+ for (i = 0; i < req_queue->num_trackers; i++) {
+ tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
+ DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+
+ tr->req_queue = req_queue;
+ tr->slot_num = i;
+ tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+ TAILQ_INSERT_HEAD(&hwq->free_tr, tr, tailq);
+
+ hwq->act_tr[i] = tr;
+ }
+
+ if (is_task_mgmt) {
+ /* UTP Task Management Request (UTMR) */
+ uint32_t utmrlba, utmrlbau;
+
+ hwq->utmrd = (struct ufshci_utp_task_mgmt_req_desc *)queuemem;
+
+ utmrlba = hwq->req_queue_addr & 0xffffffff;
+ utmrlbau = hwq->req_queue_addr >> 32;
+ ufshci_mmio_write_4(ctrlr, utmrlba, utmrlba);
+ ufshci_mmio_write_4(ctrlr, utmrlbau, utmrlbau);
+ } else {
+ /* UTP Transfer Request (UTR) */
+ uint32_t utrlba, utrlbau;
+
+ hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem;
+
+ /*
+ * Allocate physical memory for the command descriptor.
+ * UTP Transfer Request (UTR) requires memory for a separate
+ * command in addition to the queue.
+ */
+ if (ufshci_req_sdb_cmd_desc_construct(req_queue, num_entries,
+ ctrlr) != 0) {
+ ufshci_printf(ctrlr,
+ "failed to construct cmd descriptor memory\n");
+ goto out;
+ }
+
+ utrlba = hwq->req_queue_addr & 0xffffffff;
+ utrlbau = hwq->req_queue_addr >> 32;
+ ufshci_mmio_write_4(ctrlr, utrlba, utrlba);
+ ufshci_mmio_write_4(ctrlr, utrlbau, utrlbau);
+ }
+
+ return (0);
+out:
+ ufshci_req_sdb_destroy(ctrlr, req_queue);
+ return (ENOMEM);
+}
+
+void
+ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr;
+ int i;
+
+ mtx_lock(&hwq->recovery_lock);
+ hwq->timer_armed = false;
+ mtx_unlock(&hwq->recovery_lock);
+ callout_drain(&hwq->timer);
+
+ if (!req_queue->is_task_mgmt)
+ ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue);
+
+ for (i = 0; i < req_queue->num_trackers; i++) {
+ tr = hwq->act_tr[i];
+ free(tr, M_UFSHCI);
+ }
+
+ if (hwq->act_tr) {
+ free(hwq->act_tr, M_UFSHCI);
+ hwq->act_tr = NULL;
+ }
+
+ if (hwq->utrd != NULL) {
+ bus_dmamap_unload(hwq->dma_tag_queue, hwq->queuemem_map);
+ bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
+ hwq->queuemem_map);
+ hwq->utrd = NULL;
+ }
+
+ if (hwq->dma_tag_queue) {
+ bus_dma_tag_destroy(hwq->dma_tag_queue);
+ hwq->dma_tag_queue = NULL;
+ }
+
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_destroy(&hwq->recovery_lock);
+ if (mtx_initialized(&hwq->qlock))
+ mtx_destroy(&hwq->qlock);
+
+ free(req_queue->hwq, M_UFSHCI);
+}
+
+struct ufshci_hw_queue *
+ufshci_req_sdb_get_hw_queue(struct ufshci_req_queue *req_queue)
+{
+ return &req_queue->hwq[UFSHCI_SDB_Q];
+}
+
+void
+ufshci_req_sdb_disable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr, *tr_temp;
+
+ mtx_lock(&hwq->recovery_lock);
+ mtx_lock(&hwq->qlock);
+
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+ if (mtx_initialized(&hwq->qlock))
+ mtx_assert(&hwq->qlock, MA_OWNED);
+
+ hwq->recovery_state = RECOVERY_WAITING;
+ TAILQ_FOREACH_SAFE(tr, &hwq->outstanding_tr, tailq, tr_temp) {
+ tr->deadline = SBT_MAX;
+ }
+
+ mtx_unlock(&hwq->qlock);
+ mtx_unlock(&hwq->recovery_lock);
+}
+
+int
+ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
+ struct ufshci_req_queue *req_queue)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+
+ if (req_queue->is_task_mgmt) {
+ uint32_t hcs, utmrldbr, utmrlrsr;
+
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTMRLRDY))) {
+ ufshci_printf(ctrlr,
+ "UTP task management request list is not ready\n");
+ return (ENXIO);
+ }
+
+ utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr);
+ if (utmrldbr != 0) {
+ ufshci_printf(ctrlr,
+ "UTP task management request list door bell is not ready\n");
+ return (ENXIO);
+ }
+
+ utmrlrsr = UFSHCIM(UFSHCI_UTMRLRSR_REG_UTMRLRSR);
+ ufshci_mmio_write_4(ctrlr, utmrlrsr, utmrlrsr);
+ } else {
+ uint32_t hcs, utrldbr, utrlcnr, utrlrsr;
+
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (!(hcs & UFSHCIM(UFSHCI_HCS_REG_UTRLRDY))) {
+ ufshci_printf(ctrlr,
+ "UTP transfer request list is not ready\n");
+ return (ENXIO);
+ }
+
+ utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
+ if (utrldbr != 0) {
+ ufshci_printf(ctrlr,
+ "UTP transfer request list door bell is not ready\n");
+ ufshci_printf(ctrlr,
+ "Clear the UTP transfer request list door bell\n");
+ ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
+ }
+
+ utrlcnr = ufshci_mmio_read_4(ctrlr, utrlcnr);
+ if (utrlcnr != 0) {
+ ufshci_printf(ctrlr,
+ "UTP transfer request list notification is not ready\n");
+ ufshci_printf(ctrlr,
+ "Clear the UTP transfer request list notification\n");
+ ufshci_mmio_write_4(ctrlr, utrlcnr, utrlcnr);
+ }
+
+ utrlrsr = UFSHCIM(UFSHCI_UTRLRSR_REG_UTRLRSR);
+ ufshci_mmio_write_4(ctrlr, utrlrsr, utrlrsr);
+ }
+
+ if (mtx_initialized(&hwq->recovery_lock))
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+ if (mtx_initialized(&hwq->qlock))
+ mtx_assert(&hwq->qlock, MA_OWNED);
+ KASSERT(!req_queue->ctrlr->is_failed, ("Enabling a failed hwq\n"));
+
+ hwq->recovery_state = RECOVERY_NONE;
+
+ return (0);
+}
+
+int
+ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
+ struct ufshci_tracker **tr)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ uint8_t i;
+
+ for (i = 0; i < req_queue->num_entries; i++) {
+ if (hwq->act_tr[i]->slot_state == UFSHCI_SLOT_STATE_FREE) {
+ *tr = hwq->act_tr[i];
+ (*tr)->hwq = hwq;
+ return (0);
+ }
+ }
+ return (EBUSY);
+}
+
+void
+ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr)
+{
+ /*
+ * NOP
+ * UTP Task Management does not have a Completion Notification
+ * Register.
+ */
+}
+
+void
+ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr)
+{
+ uint32_t utrlcnr;
+
+ utrlcnr = 1 << tr->slot_num;
+ ufshci_mmio_write_4(ctrlr, utrlcnr, utrlcnr);
+}
+
+void
+ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr)
+{
+ uint32_t utmrldbr = 0;
+
+ utmrldbr |= 1 << tr->slot_num;
+ ufshci_mmio_write_4(ctrlr, utmrldbr, utmrldbr);
+
+ tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
+}
+
+void
+ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr)
+{
+ uint32_t utrldbr = 0;
+
+ utrldbr |= 1 << tr->slot_num;
+ ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
+
+ tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
+}
+
+bool
+ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot)
+{
+ uint32_t utmrldbr;
+
+ utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr);
+ return (!(utmrldbr & (1 << slot)));
+}
+
+bool
+ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot)
+{
+ uint32_t utrldbr;
+
+ utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
+ return (!(utrldbr & (1 << slot)));
+}
+
+bool
+ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue)
+{
+ struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr;
+ uint8_t slot;
+ bool done = false;
+
+ mtx_assert(&hwq->recovery_lock, MA_OWNED);
+
+ hwq->num_intr_handler_calls++;
+
+ bus_dmamap_sync(hwq->dma_tag_queue, hwq->queuemem_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ for (slot = 0; slot < req_queue->num_entries; slot++) {
+ tr = hwq->act_tr[slot];
+
+ KASSERT(tr, ("there is no tracker assigned to the slot"));
+ /*
+ * When the response is delivered from the device, the doorbell
+ * is cleared.
+ */
+ if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED &&
+ req_queue->qops.is_doorbell_cleared(req_queue->ctrlr,
+ slot)) {
+ ufshci_req_queue_complete_tracker(tr);
+ done = true;
+ }
+ }
+
+ return (done);
+}
+
+int
+ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr)
+{
+ /* TODO: Implement inflight io*/
+
+ return (0);
+}
diff --git a/sys/dev/ufshci/ufshci_sim.c b/sys/dev/ufshci/ufshci_sim.c
new file mode 100644
index 000000000000..828b520614a5
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_sim.c
@@ -0,0 +1,371 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+
+#include <cam/cam.h>
+#include <cam/cam_ccb.h>
+#include <cam/cam_debug.h>
+#include <cam/cam_sim.h>
+#include <cam/cam_xpt_sim.h>
+#include <cam/scsi/scsi_all.h>
+
+#include "ufshci_private.h"
+
+#define sim2ctrlr(sim) ((struct ufshci_controller *)cam_sim_softc(sim))
+
+static void
+ufshci_sim_scsiio_done(void *ccb_arg, const struct ufshci_completion *cpl,
+ bool error)
+{
+ const uint8_t *sense_data;
+ uint16_t sense_data_max_size;
+ uint16_t sense_data_len;
+
+ union ccb *ccb = (union ccb *)ccb_arg;
+
+ /*
+ * Let the periph know the completion, and let it sort out what
+ * it means. Report an error or success based on OCS and UPIU
+ * response code. And We need to copy the sense data to be handled
+ * by the CAM.
+ */
+ sense_data = cpl->response_upiu.cmd_response_upiu.sense_data;
+ sense_data_max_size = sizeof(
+ cpl->response_upiu.cmd_response_upiu.sense_data);
+ sense_data_len = be16toh(
+ cpl->response_upiu.cmd_response_upiu.sense_data_len);
+ memcpy(&ccb->csio.sense_data, sense_data,
+ min(sense_data_len, sense_data_max_size));
+
+ ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
+ if (error) {
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ xpt_done(ccb);
+ } else {
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ xpt_done_direct(ccb);
+ }
+}
+
+/*
+ * Complete the command as an illegal command with invalid field
+ */
+static void
+ufshci_sim_illegal_request(union ccb *ccb)
+{
+ scsi_set_sense_data(&ccb->csio.sense_data,
+ /*sense_format*/ SSD_TYPE_NONE,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x24, /* 24h/00h INVALID FIELD IN CDB */
+ /*ascq*/ 0x00,
+ /*extra args*/ SSD_ELEM_NONE);
+ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
+ ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID |
+ CAM_DEV_QFRZN;
+ xpt_freeze_devq(ccb->ccb_h.path, 1);
+ xpt_done(ccb);
+}
+
+static void
+ufshchi_sim_scsiio(struct cam_sim *sim, union ccb *ccb)
+{
+ struct ccb_scsiio *csio = &ccb->csio;
+ struct ufshci_request *req;
+ void *payload;
+ struct ufshci_cmd_command_upiu *upiu;
+ uint8_t *cdb;
+ uint32_t payload_len;
+ bool is_write;
+ struct ufshci_controller *ctrlr;
+ uint8_t data_direction;
+ int error;
+
+ /* UFS device cannot process these commands */
+ if (csio->cdb_io.cdb_bytes[0] == MODE_SENSE_6 ||
+ csio->cdb_io.cdb_bytes[0] == MODE_SELECT_6 ||
+ csio->cdb_io.cdb_bytes[0] == READ_12 ||
+ csio->cdb_io.cdb_bytes[0] == WRITE_12) {
+ ufshci_sim_illegal_request(ccb);
+ return;
+ }
+
+ ctrlr = sim2ctrlr(sim);
+ payload = csio->data_ptr;
+
+ payload_len = csio->dxfer_len;
+ is_write = csio->ccb_h.flags & CAM_DIR_OUT;
+
+ /* TODO: Check other data type */
+ if ((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
+ req = ufshci_allocate_request_bio((struct bio *)payload,
+ M_NOWAIT, ufshci_sim_scsiio_done, ccb);
+ else
+ req = ufshci_allocate_request_vaddr(payload, payload_len,
+ M_NOWAIT, ufshci_sim_scsiio_done, ccb);
+
+ req->request_size = sizeof(struct ufshci_cmd_command_upiu);
+ req->response_size = sizeof(struct ufshci_cmd_response_upiu);
+
+ switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
+ case CAM_DIR_IN:
+ data_direction = UFSHCI_DATA_DIRECTION_FROM_TGT_TO_SYS;
+ break;
+ case CAM_DIR_OUT:
+ data_direction = UFSHCI_DATA_DIRECTION_FROM_SYS_TO_TGT;
+ break;
+ default:
+ data_direction = UFSHCI_DATA_DIRECTION_NO_DATA_TRANSFER;
+ }
+ req->data_direction = data_direction;
+
+ upiu = (struct ufshci_cmd_command_upiu *)&req->request_upiu;
+ memset(upiu, 0, req->request_size);
+ upiu->header.trans_type = UFSHCI_UPIU_TRANSACTION_CODE_COMMAND;
+ upiu->header.operational_flags = is_write ? UFSHCI_OPERATIONAL_FLAG_W :
+ UFSHCI_OPERATIONAL_FLAG_R;
+ upiu->header.lun = csio->ccb_h.target_lun;
+ upiu->header.cmd_set_type = UFSHCI_COMMAND_SET_TYPE_SCSI;
+
+ upiu->expected_data_transfer_length = htobe32(payload_len);
+
+ ccb->ccb_h.status |= CAM_SIM_QUEUED;
+
+ if (csio->ccb_h.flags & CAM_CDB_POINTER)
+ cdb = csio->cdb_io.cdb_ptr;
+ else
+ cdb = csio->cdb_io.cdb_bytes;
+
+ if (cdb == NULL || csio->cdb_len > sizeof(upiu->cdb)) {
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ return;
+ }
+ memcpy(upiu->cdb, cdb, csio->cdb_len);
+
+ error = ufshci_ctrlr_submit_io_request(ctrlr, req);
+ if (error == EBUSY) {
+ ccb->ccb_h.status = CAM_SCSI_BUSY;
+ xpt_done(ccb);
+ return;
+ } else if (error) {
+ ccb->ccb_h.status = CAM_REQ_INVALID;
+ xpt_done(ccb);
+ return;
+ }
+}
+
+static uint32_t
+ufshci_link_kBps(struct ufshci_controller *ctrlr)
+{
+ uint32_t gear = ctrlr->hs_gear;
+ uint32_t lanes = ctrlr->rx_lanes;
+
+ /*
+ * per-lane effective bandwidth (KB/s, SI 1 KB = 1000 B)
+ * All HS-Gears use 8b/10b line coding, i.e. 80 % efficiency.
+ * - KB/s per lane = raw-rate(Gbps) × 0.8(8b/10b) / 8(bit)
+ */
+ static const uint32_t kbps_per_lane[] = {
+ 0, /* unused */
+ 145920, /* HS-Gear1 : 1459.2 Mbps */
+ 291840, /* HS-Gear2 : 2918.4 Mbps */
+ 583680, /* HS-Gear3 : 5836.8 Mbps */
+ 1167360, /* HS-Gear4 : 11673.6 Mbps */
+ 2334720 /* HS-Gear5 : 23347.2 Mbps */
+ };
+
+ /* Sanity checks */
+ if (gear >= nitems(kbps_per_lane))
+ gear = 0; /* out-of-range -> treat as invalid */
+
+ if (lanes == 0 || lanes > 2)
+ lanes = 1; /* UFS spec allows 1–2 data lanes */
+
+ return kbps_per_lane[gear] * lanes;
+}
+
+static void
+ufshci_cam_action(struct cam_sim *sim, union ccb *ccb)
+{
+ struct ufshci_controller *ctrlr = sim2ctrlr(sim);
+
+ if (ctrlr == NULL) {
+ ccb->ccb_h.status = CAM_SEL_TIMEOUT;
+ xpt_done(ccb);
+ return;
+ }
+
+ /* Perform the requested action */
+ switch (ccb->ccb_h.func_code) {
+ case XPT_SCSI_IO:
+ ufshchi_sim_scsiio(sim, ccb);
+ return;
+ case XPT_PATH_INQ: {
+ struct ccb_pathinq *cpi = &ccb->cpi;
+
+ cpi->version_num = 1;
+ cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE;
+ cpi->target_sprt = 0;
+ cpi->hba_misc = PIM_UNMAPPED | PIM_NO_6_BYTE;
+ cpi->hba_eng_cnt = 0;
+ cpi->max_target = 0;
+ cpi->max_lun = ctrlr->max_lun_count;
+ cpi->async_flags = 0;
+ cpi->maxio = ctrlr->max_xfer_size;
+ cpi->initiator_id = 1;
+ strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
+ strlcpy(cpi->hba_vid, "UFSHCI", HBA_IDLEN);
+ strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
+ cpi->unit_number = cam_sim_unit(sim);
+ cpi->base_transfer_speed = ufshci_link_kBps(ctrlr);
+ cpi->transport = XPORT_UFSHCI;
+ cpi->transport_version = 1;
+ cpi->protocol = PROTO_SCSI;
+ cpi->protocol_version = SCSI_REV_SPC5;
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ }
+ case XPT_RESET_BUS:
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case XPT_RESET_DEV:
+ if (ufshci_dev_reset(ctrlr))
+ ccb->ccb_h.status = CAM_REQ_CMP_ERR;
+ else
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ case XPT_ABORT:
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ break;
+ case XPT_SET_TRAN_SETTINGS:
+ ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
+ break;
+ case XPT_GET_TRAN_SETTINGS: {
+ struct ccb_trans_settings *cts;
+ struct ccb_trans_settings_ufshci *ufshcix;
+
+ cts = &ccb->cts;
+ ufshcix = &cts->xport_specific.ufshci;
+
+ ufshcix->hs_gear = ctrlr->hs_gear;
+ ufshcix->tx_lanes = ctrlr->tx_lanes;
+ ufshcix->rx_lanes = ctrlr->rx_lanes;
+ ufshcix->max_hs_gear = ctrlr->max_rx_hs_gear;
+ ufshcix->max_tx_lanes = ctrlr->max_tx_lanes;
+ ufshcix->max_rx_lanes = ctrlr->max_rx_lanes;
+ ufshcix->valid = CTS_UFSHCI_VALID_LINK;
+
+ cts->transport = XPORT_UFSHCI;
+ cts->transport_version = 1;
+ cts->protocol = PROTO_SCSI;
+ cts->protocol_version = SCSI_REV_SPC5;
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ }
+ case XPT_CALC_GEOMETRY:
+ cam_calc_geometry(&ccb->ccg, 1);
+ break;
+ case XPT_NOOP:
+ ccb->ccb_h.status = CAM_REQ_CMP;
+ break;
+ default:
+ printf("invalid ccb=%p func=%#x\n", ccb, ccb->ccb_h.func_code);
+ break;
+ }
+ xpt_done(ccb);
+
+ return;
+}
+
+static void
+ufshci_cam_poll(struct cam_sim *sim)
+{
+ struct ufshci_controller *ctrlr = sim2ctrlr(sim);
+
+ ufshci_ctrlr_poll(ctrlr);
+}
+
+int
+ufshci_sim_attach(struct ufshci_controller *ctrlr)
+{
+ device_t dev;
+ struct cam_devq *devq;
+ int max_trans;
+
+ dev = ctrlr->dev;
+ max_trans = ctrlr->max_hw_pend_io;
+ if ((devq = cam_simq_alloc(max_trans)) == NULL) {
+ printf("Failed to allocate a simq\n");
+ return (ENOMEM);
+ }
+
+ ctrlr->ufshci_sim = cam_sim_alloc(ufshci_cam_action, ufshci_cam_poll,
+ "ufshci", ctrlr, device_get_unit(dev), &ctrlr->sc_mtx, max_trans,
+ max_trans, devq);
+ if (ctrlr->ufshci_sim == NULL) {
+ printf("Failed to allocate a sim\n");
+ cam_simq_free(devq);
+ return (ENOMEM);
+ }
+
+ mtx_lock(&ctrlr->sc_mtx);
+ if (xpt_bus_register(ctrlr->ufshci_sim, ctrlr->dev, 0) != CAM_SUCCESS) {
+ cam_sim_free(ctrlr->ufshci_sim, /*free_devq*/ TRUE);
+ cam_simq_free(devq);
+ mtx_unlock(&ctrlr->sc_mtx);
+ printf("Failed to create a bus\n");
+ return (ENOMEM);
+ }
+
+ if (xpt_create_path(&ctrlr->ufshci_path, /*periph*/ NULL,
+ cam_sim_path(ctrlr->ufshci_sim), CAM_TARGET_WILDCARD,
+ CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
+ xpt_bus_deregister(cam_sim_path(ctrlr->ufshci_sim));
+ cam_sim_free(ctrlr->ufshci_sim, /*free_devq*/ TRUE);
+ cam_simq_free(devq);
+ mtx_unlock(&ctrlr->sc_mtx);
+ printf("Failed to create a path\n");
+ return (ENOMEM);
+ }
+ mtx_unlock(&ctrlr->sc_mtx);
+
+ return (0);
+}
+
+void
+ufshci_sim_detach(struct ufshci_controller *ctrlr)
+{
+ int error;
+
+ if (ctrlr->ufshci_path != NULL) {
+ xpt_free_path(ctrlr->ufshci_path);
+ ctrlr->ufshci_path = NULL;
+ }
+
+ if (ctrlr->ufshci_sim != NULL) {
+ error = xpt_bus_deregister(cam_sim_path(ctrlr->ufshci_sim));
+ if (error == 0) {
+ /* accessing the softc is not possible after this */
+ ctrlr->ufshci_sim->softc = NULL;
+ ufshci_printf(ctrlr,
+ "%s: %s:%d:%d caling "
+ "cam_sim_free sim %p refc %u mtx %p\n",
+ __func__, ctrlr->sc_name,
+ cam_sim_path(ctrlr->ufshci_sim), ctrlr->sc_unit,
+ ctrlr->ufshci_sim, ctrlr->ufshci_sim->refcount,
+ ctrlr->ufshci_sim->mtx);
+ } else {
+ panic("%s: %s: CAM layer is busy: errno %d\n", __func__,
+ ctrlr->sc_name, error);
+ }
+
+ cam_sim_free(ctrlr->ufshci_sim, /* free_devq */ TRUE);
+ ctrlr->ufshci_sim = NULL;
+ }
+}
diff --git a/sys/dev/ufshci/ufshci_sysctl.c b/sys/dev/ufshci/ufshci_sysctl.c
new file mode 100644
index 000000000000..56bc06b13f3c
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_sysctl.c
@@ -0,0 +1,253 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/sysctl.h>
+
+#include "ufshci_private.h"
+
+static int
+ufshci_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
+{
+ uint32_t *ptr = arg1;
+ uint32_t newval = *ptr;
+ int error = sysctl_handle_int(oidp, &newval, 0, req);
+
+ if (error || (req->newptr == NULL))
+ return (error);
+
+ if (newval > UFSHCI_MAX_TIMEOUT_PERIOD ||
+ newval < UFSHCI_MIN_TIMEOUT_PERIOD) {
+ return (EINVAL);
+ } else {
+ *ptr = newval;
+ }
+
+ return (0);
+}
+
+static int
+ufshci_sysctl_num_cmds(SYSCTL_HANDLER_ARGS)
+{
+ struct ufshci_controller *ctrlr = arg1;
+ int64_t num_cmds = 0;
+ int i;
+
+ num_cmds = ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_cmds;
+
+ if (ctrlr->transfer_req_queue.hwq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_cmds += ctrlr->transfer_req_queue.hwq[i].num_cmds;
+ }
+
+ return (sysctl_handle_64(oidp, &num_cmds, 0, req));
+}
+
+static int
+ufshci_sysctl_num_intr_handler_calls(SYSCTL_HANDLER_ARGS)
+{
+ struct ufshci_controller *ctrlr = arg1;
+ int64_t num_intr_handler_calls = 0;
+ int i;
+
+ num_intr_handler_calls =
+ ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_intr_handler_calls;
+
+ if (ctrlr->transfer_req_queue.hwq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_intr_handler_calls += ctrlr->transfer_req_queue
+ .hwq[i]
+ .num_intr_handler_calls;
+ }
+
+ return (sysctl_handle_64(oidp, &num_intr_handler_calls, 0, req));
+}
+
+static int
+ufshci_sysctl_num_retries(SYSCTL_HANDLER_ARGS)
+{
+ struct ufshci_controller *ctrlr = arg1;
+ int64_t num_retries = 0;
+ int i;
+
+ num_retries = ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_retries;
+
+ if (ctrlr->transfer_req_queue.hwq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_retries +=
+ ctrlr->transfer_req_queue.hwq[i].num_retries;
+ }
+
+ return (sysctl_handle_64(oidp, &num_retries, 0, req));
+}
+
+static int
+ufshci_sysctl_num_failures(SYSCTL_HANDLER_ARGS)
+{
+ struct ufshci_controller *ctrlr = arg1;
+ int64_t num_failures = 0;
+ int i;
+
+ num_failures =
+ ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q].num_failures;
+
+ if (ctrlr->transfer_req_queue.hwq != NULL) {
+ for (i = 0; i < ctrlr->num_io_queues; i++)
+ num_failures +=
+ ctrlr->transfer_req_queue.hwq[i].num_failures;
+ }
+
+ return (sysctl_handle_64(oidp, &num_failures, 0, req));
+}
+
+static void
+ufshci_sysctl_initialize_queue(struct ufshci_hw_queue *hwq,
+ struct sysctl_ctx_list *ctrlr_ctx, struct sysctl_oid *que_tree)
+{
+ struct sysctl_oid_list *que_list = SYSCTL_CHILDREN(que_tree);
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_entries",
+ CTLFLAG_RD, &hwq->num_entries, 0,
+ "Number of entries in hardware queue");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "num_trackers",
+ CTLFLAG_RD, &hwq->num_trackers, 0,
+ "Number of trackers pre-allocated for this queue pair");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_head", CTLFLAG_RD,
+ &hwq->sq_head, 0,
+ "Current head of submission queue (as observed by driver)");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "sq_tail", CTLFLAG_RD,
+ &hwq->sq_tail, 0,
+ "Current tail of submission queue (as observed by driver)");
+ SYSCTL_ADD_UINT(ctrlr_ctx, que_list, OID_AUTO, "cq_head", CTLFLAG_RD,
+ &hwq->cq_head, 0,
+ "Current head of completion queue (as observed by driver)");
+
+ SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_cmds", CTLFLAG_RD,
+ &hwq->num_cmds, "Number of commands submitted");
+ SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_intr_handler_calls",
+ CTLFLAG_RD, &hwq->num_intr_handler_calls,
+ "Number of times interrupt handler was invoked (will typically be "
+ "less than number of actual interrupts generated due to "
+ "interrupt aggregation)");
+ SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_retries",
+ CTLFLAG_RD, &hwq->num_retries, "Number of commands retried");
+ SYSCTL_ADD_QUAD(ctrlr_ctx, que_list, OID_AUTO, "num_failures",
+ CTLFLAG_RD, &hwq->num_failures,
+ "Number of commands ending in failure after all retries");
+
+ /* TODO: Implement num_ignored */
+ /* TODO: Implement recovery state */
+ /* TODO: Implement dump debug */
+}
+
+void
+ufshci_sysctl_initialize_ctrlr(struct ufshci_controller *ctrlr)
+{
+ struct sysctl_ctx_list *ctrlr_ctx;
+ struct sysctl_oid *ctrlr_tree, *que_tree, *ioq_tree;
+ struct sysctl_oid_list *ctrlr_list, *ioq_list;
+ struct ufshci_device *dev = &ctrlr->ufs_dev;
+#define QUEUE_NAME_LENGTH 16
+ char queue_name[QUEUE_NAME_LENGTH];
+ int i;
+
+ ctrlr_ctx = device_get_sysctl_ctx(ctrlr->dev);
+ ctrlr_tree = device_get_sysctl_tree(ctrlr->dev);
+ ctrlr_list = SYSCTL_CHILDREN(ctrlr_tree);
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "major_version",
+ CTLFLAG_RD, &ctrlr->major_version, 0, "UFS spec major version");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "minor_version",
+ CTLFLAG_RD, &ctrlr->minor_version, 0, "UFS spec minor version");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "io_queue_mode",
+ CTLFLAG_RD, &ctrlr->transfer_req_queue.queue_mode, 0,
+ "Active host-side queuing scheme "
+ "(Single-Doorbell or Multi-Circular-Queue)");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_io_queues",
+ CTLFLAG_RD, &ctrlr->num_io_queues, 0, "Number of I/O queue pairs");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "cap", CTLFLAG_RD,
+ &ctrlr->cap, 0, "Number of I/O queue pairs");
+
+ SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_enabled",
+ CTLFLAG_RD, &dev->is_wb_enabled, 0, "WriteBooster enable/disable");
+
+ SYSCTL_ADD_BOOL(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_flush_enabled",
+ CTLFLAG_RD, &dev->is_wb_flush_enabled, 0,
+ "WriteBooster flush enable/disable");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_type",
+ CTLFLAG_RD, &dev->wb_buffer_type, 0, "WriteBooster type");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO, "wb_buffer_size_mb",
+ CTLFLAG_RD, &dev->wb_buffer_size_mb, 0,
+ "WriteBooster buffer size in MB");
+
+ SYSCTL_ADD_UINT(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "wb_user_space_config_option", CTLFLAG_RD,
+ &dev->wb_user_space_config_option, 0,
+ "WriteBooster preserve user space mode");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "timeout_period",
+ CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, &ctrlr->timeout_period,
+ 0, ufshci_sysctl_timeout_period, "IU",
+ "Timeout period for I/O queues (in seconds)");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_cmds",
+ CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0,
+ ufshci_sysctl_num_cmds, "IU", "Number of commands submitted");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "num_intr_handler_calls", CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ ctrlr, 0, ufshci_sysctl_num_intr_handler_calls, "IU",
+ "Number of times interrupt handler was invoked (will "
+ "typically be less than number of actual interrupts "
+ "generated due to coalescing)");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_retries",
+ CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0,
+ ufshci_sysctl_num_retries, "IU", "Number of commands retried");
+
+ SYSCTL_ADD_PROC(ctrlr_ctx, ctrlr_list, OID_AUTO, "num_failures",
+ CTLTYPE_S64 | CTLFLAG_RD | CTLFLAG_MPSAFE, ctrlr, 0,
+ ufshci_sysctl_num_failures, "IU",
+ "Number of commands ending in failure after all retries");
+
+ que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO, "utmrq",
+ CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
+ "UTP Task Management Request Queue");
+
+ ufshci_sysctl_initialize_queue(
+ &ctrlr->task_mgmt_req_queue.hwq[UFSHCI_SDB_Q], ctrlr_ctx, que_tree);
+
+ /*
+ * Make sure that we've constructed the I/O queues before setting up the
+ * sysctls. Failed controllers won't allocate it, but we want the rest
+ * of the sysctls to diagnose things.
+ */
+ if (ctrlr->transfer_req_queue.hwq != NULL) {
+ ioq_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ctrlr_list, OID_AUTO,
+ "ioq", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
+ "UTP Transfer Request Queue (I/O Queue)");
+ ioq_list = SYSCTL_CHILDREN(ioq_tree);
+
+ for (i = 0; i < ctrlr->num_io_queues; i++) {
+ snprintf(queue_name, QUEUE_NAME_LENGTH, "%d", i);
+ que_tree = SYSCTL_ADD_NODE(ctrlr_ctx, ioq_list,
+ OID_AUTO, queue_name, CTLFLAG_RD | CTLFLAG_MPSAFE,
+ NULL, "IO Queue");
+ ufshci_sysctl_initialize_queue(
+ &ctrlr->transfer_req_queue.hwq[i], ctrlr_ctx,
+ que_tree);
+ }
+ }
+}
diff --git a/sys/dev/ufshci/ufshci_uic_cmd.c b/sys/dev/ufshci/ufshci_uic_cmd.c
new file mode 100644
index 000000000000..b9c867ff7065
--- /dev/null
+++ b/sys/dev/ufshci/ufshci_uic_cmd.c
@@ -0,0 +1,241 @@
+/*-
+ * Copyright (c) 2025, Samsung Electronics Co., Ltd.
+ * Written by Jaeyoon Choi
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+
+#include "ufshci_private.h"
+#include "ufshci_reg.h"
+
+int
+ufshci_uic_power_mode_ready(struct ufshci_controller *ctrlr)
+{
+ uint32_t is, hcs;
+ int timeout;
+
+ /* Wait for the IS flag to change */
+ timeout = ticks + MSEC_2_TICKS(ctrlr->device_init_timeout_in_ms);
+
+ while (1) {
+ is = ufshci_mmio_read_4(ctrlr, is);
+ if (UFSHCIV(UFSHCI_IS_REG_UPMS, is)) {
+ ufshci_mmio_write_4(ctrlr, is,
+ UFSHCIM(UFSHCI_IS_REG_UPMS));
+ break;
+ }
+
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "Power mode is not changed "
+ "within %d ms\n",
+ ctrlr->uic_cmd_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ /* TODO: Replace busy-wait with interrupt-based pause. */
+ DELAY(10);
+ }
+
+ /* Check HCS power mode change request status */
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs) != 0x01) {
+ ufshci_printf(ctrlr,
+ "Power mode change request status error: 0x%x\n",
+ UFSHCIV(UFSHCI_HCS_REG_UPMCRS, hcs));
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+int
+ufshci_uic_cmd_ready(struct ufshci_controller *ctrlr)
+{
+ uint32_t hcs;
+ int timeout;
+
+ /* Wait for the HCS flag to change */
+ timeout = ticks + MSEC_2_TICKS(ctrlr->uic_cmd_timeout_in_ms);
+
+ while (1) {
+ hcs = ufshci_mmio_read_4(ctrlr, hcs);
+ if (UFSHCIV(UFSHCI_HCS_REG_UCRDY, hcs))
+ break;
+
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "UIC command is not ready "
+ "within %d ms\n",
+ ctrlr->uic_cmd_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ /* TODO: Replace busy-wait with interrupt-based pause. */
+ DELAY(10);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_uic_wait_cmd(struct ufshci_controller *ctrlr,
+ struct ufshci_uic_cmd *uic_cmd)
+{
+ uint32_t is;
+ int timeout;
+
+ mtx_assert(&ctrlr->uic_cmd_lock, MA_OWNED);
+
+ /* Wait for the IS flag to change */
+ timeout = ticks + MSEC_2_TICKS(ctrlr->uic_cmd_timeout_in_ms);
+ int delta = 10;
+
+ while (1) {
+ is = ufshci_mmio_read_4(ctrlr, is);
+ if (UFSHCIV(UFSHCI_IS_REG_UCCS, is)) {
+ ufshci_mmio_write_4(ctrlr, is,
+ UFSHCIM(UFSHCI_IS_REG_UCCS));
+ break;
+ }
+ if (timeout - ticks < 0) {
+ ufshci_printf(ctrlr,
+ "UIC command is not completed "
+ "within %d ms\n",
+ ctrlr->uic_cmd_timeout_in_ms);
+ return (ENXIO);
+ }
+
+ DELAY(delta);
+ delta = min(1000, delta * 2);
+ }
+
+ return (0);
+}
+
+static int
+ufshci_uic_send_cmd(struct ufshci_controller *ctrlr,
+ struct ufshci_uic_cmd *uic_cmd, uint32_t *return_value)
+{
+ int error;
+ uint32_t config_result_code;
+
+ mtx_lock(&ctrlr->uic_cmd_lock);
+
+ error = ufshci_uic_cmd_ready(ctrlr);
+ if (error) {
+ mtx_unlock(&ctrlr->uic_cmd_lock);
+ return (ENXIO);
+ }
+
+ ufshci_mmio_write_4(ctrlr, ucmdarg1, uic_cmd->argument1);
+ ufshci_mmio_write_4(ctrlr, ucmdarg2, uic_cmd->argument2);
+ ufshci_mmio_write_4(ctrlr, ucmdarg3, uic_cmd->argument3);
+
+ ufshci_mmio_write_4(ctrlr, uiccmd, uic_cmd->opcode);
+
+ error = ufshci_uic_wait_cmd(ctrlr, uic_cmd);
+
+ mtx_unlock(&ctrlr->uic_cmd_lock);
+
+ if (error)
+ return (ENXIO);
+
+ config_result_code = ufshci_mmio_read_4(ctrlr, ucmdarg2);
+ if (config_result_code) {
+ ufshci_printf(ctrlr,
+ "Failed to send UIC command. (config result code = 0x%x)\n",
+ config_result_code);
+ }
+
+ if (return_value != NULL)
+ *return_value = ufshci_mmio_read_4(ctrlr, ucmdarg3);
+
+ return (0);
+}
+
+int
+ufshci_uic_send_dme_link_startup(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_uic_cmd uic_cmd;
+ uic_cmd.opcode = UFSHCI_DME_LINK_STARTUP;
+ uic_cmd.argument1 = 0;
+ uic_cmd.argument2 = 0;
+ uic_cmd.argument3 = 0;
+
+ return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL));
+}
+
+int
+ufshci_uic_send_dme_get(struct ufshci_controller *ctrlr, uint16_t attribute,
+ uint32_t *return_value)
+{
+ struct ufshci_uic_cmd uic_cmd;
+
+ uic_cmd.opcode = UFSHCI_DME_GET;
+ uic_cmd.argument1 = attribute << 16;
+ uic_cmd.argument2 = 0;
+ uic_cmd.argument3 = 0;
+
+ return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, return_value));
+}
+
+int
+ufshci_uic_send_dme_set(struct ufshci_controller *ctrlr, uint16_t attribute,
+ uint32_t value)
+{
+ struct ufshci_uic_cmd uic_cmd;
+
+ uic_cmd.opcode = UFSHCI_DME_SET;
+ uic_cmd.argument1 = attribute << 16;
+ /* This drvier always sets only volatile values. */
+ uic_cmd.argument2 = UFSHCI_ATTR_SET_TYPE_NORMAL << 16;
+ uic_cmd.argument3 = value;
+
+ return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL));
+}
+
+int
+ufshci_uic_send_dme_peer_get(struct ufshci_controller *ctrlr,
+ uint16_t attribute, uint32_t *return_value)
+{
+ struct ufshci_uic_cmd uic_cmd;
+
+ uic_cmd.opcode = UFSHCI_DME_PEER_GET;
+ uic_cmd.argument1 = attribute << 16;
+ uic_cmd.argument2 = 0;
+ uic_cmd.argument3 = 0;
+
+ return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, return_value));
+}
+
+int
+ufshci_uic_send_dme_peer_set(struct ufshci_controller *ctrlr,
+ uint16_t attribute, uint32_t value)
+{
+ struct ufshci_uic_cmd uic_cmd;
+
+ uic_cmd.opcode = UFSHCI_DME_PEER_SET;
+ uic_cmd.argument1 = attribute << 16;
+ /* This drvier always sets only volatile values. */
+ uic_cmd.argument2 = UFSHCI_ATTR_SET_TYPE_NORMAL << 16;
+ uic_cmd.argument3 = value;
+
+ return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL));
+}
+
+int
+ufshci_uic_send_dme_endpoint_reset(struct ufshci_controller *ctrlr)
+{
+ struct ufshci_uic_cmd uic_cmd;
+
+ uic_cmd.opcode = UFSHCI_DME_ENDPOINT_RESET;
+ uic_cmd.argument1 = 0;
+ uic_cmd.argument2 = 0;
+ uic_cmd.argument3 = 0;
+
+ return (ufshci_uic_send_cmd(ctrlr, &uic_cmd, NULL));
+}
diff --git a/sys/dev/usb/controller/dwc3/aw_dwc3.c b/sys/dev/usb/controller/dwc3/aw_dwc3.c
index 802c46bdae28..be941ca2148f 100644
--- a/sys/dev/usb/controller/dwc3/aw_dwc3.c
+++ b/sys/dev/usb/controller/dwc3/aw_dwc3.c
@@ -125,7 +125,8 @@ aw_dwc3_attach(device_t dev)
device_probe_and_attach(cdev);
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static device_method_t aw_dwc3_methods[] = {
diff --git a/sys/dev/usb/controller/dwc3/dwc3.c b/sys/dev/usb/controller/dwc3/dwc3.c
index a44c2371b891..39b5d3ae4cb1 100644
--- a/sys/dev/usb/controller/dwc3/dwc3.c
+++ b/sys/dev/usb/controller/dwc3/dwc3.c
@@ -26,7 +26,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include "opt_platform.h"
#include "opt_acpi.h"
@@ -129,7 +128,7 @@ snps_dwc3_attach_xhci(device_t dev)
return (ENXIO);
}
- sc->sc_bus.bdev = device_add_child(dev, "usbus", -1);
+ sc->sc_bus.bdev = device_add_child(dev, "usbus", DEVICE_UNIT_ANY);
if (sc->sc_bus.bdev == NULL) {
device_printf(dev, "Failed to add USB device\n");
return (ENXIO);
diff --git a/sys/dev/usb/controller/dwc3/rk_dwc3.c b/sys/dev/usb/controller/dwc3/rk_dwc3.c
index f53f446a29f3..16fc5f73f922 100644
--- a/sys/dev/usb/controller/dwc3/rk_dwc3.c
+++ b/sys/dev/usb/controller/dwc3/rk_dwc3.c
@@ -182,7 +182,8 @@ rk_dwc3_attach(device_t dev)
device_probe_and_attach(cdev);
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static device_method_t rk_dwc3_methods[] = {
diff --git a/sys/dev/usb/controller/dwc_otg.c b/sys/dev/usb/controller/dwc_otg.c
index c888d4c48be5..6c44eebd0616 100644
--- a/sys/dev/usb/controller/dwc_otg.c
+++ b/sys/dev/usb/controller/dwc_otg.c
@@ -3856,7 +3856,7 @@ dwc_otg_init(struct dwc_otg_softc *sc)
return (ENOMEM);
}
- sc->sc_bus.bdev = device_add_child(sc->sc_bus.parent, "usbus", -1);
+ sc->sc_bus.bdev = device_add_child(sc->sc_bus.parent, "usbus", DEVICE_UNIT_ANY);
if (sc->sc_bus.bdev == NULL)
return (ENXIO);
diff --git a/sys/dev/usb/controller/dwc_otg_acpi.c b/sys/dev/usb/controller/dwc_otg_acpi.c
index 9b982dfd6e41..d8deaa80e94e 100644
--- a/sys/dev/usb/controller/dwc_otg_acpi.c
+++ b/sys/dev/usb/controller/dwc_otg_acpi.c
@@ -25,7 +25,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include "opt_acpi.h"
#include <sys/param.h>
@@ -127,9 +126,12 @@ static int
dwc_otg_detach(device_t dev)
{
struct dwc_otg_softc *sc = device_get_softc(dev);
+ int error;
/* during module unload there are lots of children leftover */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
if (sc->sc_irq_res && sc->sc_intr_hdl) {
/*
diff --git a/sys/dev/usb/controller/dwc_otg_fdt.c b/sys/dev/usb/controller/dwc_otg_fdt.c
index ea46494df8d9..2ed94b23212c 100644
--- a/sys/dev/usb/controller/dwc_otg_fdt.c
+++ b/sys/dev/usb/controller/dwc_otg_fdt.c
@@ -140,10 +140,6 @@ dwc_otg_attach(device_t dev)
if (sc->sc_otg.sc_irq_res == NULL)
goto error;
- sc->sc_otg.sc_bus.bdev = device_add_child(dev, "usbus", -1);
- if (sc->sc_otg.sc_bus.bdev == NULL)
- goto error;
-
err = dwc_otg_init(&sc->sc_otg);
if (err == 0) {
err = device_probe_and_attach(sc->sc_otg.sc_bus.bdev);
@@ -162,9 +158,12 @@ int
dwc_otg_detach(device_t dev)
{
struct dwc_otg_fdt_softc *sc = device_get_softc(dev);
+ int error;
/* during module unload there are lots of children leftover */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
if (sc->sc_otg.sc_irq_res && sc->sc_otg.sc_intr_hdl) {
/*
diff --git a/sys/dev/usb/controller/ehci_fsl.c b/sys/dev/usb/controller/ehci_fsl.c
index 668a5b44e4c3..ce1749775ab2 100644
--- a/sys/dev/usb/controller/ehci_fsl.c
+++ b/sys/dev/usb/controller/ehci_fsl.c
@@ -26,7 +26,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include "opt_bus.h"
#include <sys/param.h>
@@ -306,7 +305,7 @@ fsl_ehci_attach(device_t self)
}
/* Add USB device */
- sc->sc_bus.bdev = device_add_child(self, "usbus", -1);
+ sc->sc_bus.bdev = device_add_child(self, "usbus", DEVICE_UNIT_ANY);
if (!sc->sc_bus.bdev) {
device_printf(self, "Could not add USB device\n");
err = fsl_ehci_detach(self);
@@ -372,10 +371,14 @@ fsl_ehci_attach(device_t self)
static int
fsl_ehci_detach(device_t self)
{
-
int err;
ehci_softc_t *sc;
+ /* During module unload there are lots of children leftover */
+ err = bus_generic_detach(self);
+ if (err != 0)
+ return (err);
+
sc = device_get_softc(self);
/*
* only call ehci_detach() after ehci_init()
@@ -399,14 +402,6 @@ fsl_ehci_detach(device_t self)
sc->sc_intr_hdl = NULL;
}
- if (sc->sc_bus.bdev) {
- device_delete_child(self, sc->sc_bus.bdev);
- sc->sc_bus.bdev = NULL;
- }
-
- /* During module unload there are lots of children leftover */
- device_delete_children(self);
-
if (sc->sc_irq_res) {
bus_release_resource(self, SYS_RES_IRQ, 0, sc->sc_irq_res);
sc->sc_irq_res = NULL;
diff --git a/sys/dev/usb/controller/ehci_imx.c b/sys/dev/usb/controller/ehci_imx.c
index 1fa2d1dab737..149b26f04760 100644
--- a/sys/dev/usb/controller/ehci_imx.c
+++ b/sys/dev/usb/controller/ehci_imx.c
@@ -31,7 +31,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* EHCI driver for Freescale i.MX SoCs which incorporate the USBOH3 controller.
*/
@@ -314,7 +313,7 @@ imx_ehci_detach(device_t dev)
esc = &sc->ehci_softc;
/* First detach all children; we can't detach if that fails. */
- if ((err = device_delete_children(dev)) != 0)
+ if ((err = bus_generic_detach(dev)) != 0)
return (err);
if (esc->sc_flags & EHCI_SCFLG_DONEINIT)
@@ -438,7 +437,7 @@ imx_ehci_attach(device_t dev)
imx_ehci_disable_oc(sc);
/* Add USB bus device. */
- esc->sc_bus.bdev = device_add_child(dev, "usbus", -1);
+ esc->sc_bus.bdev = device_add_child(dev, "usbus", DEVICE_UNIT_ANY);
if (esc->sc_bus.bdev == NULL) {
device_printf(dev, "Could not add USB device\n");
goto out;
diff --git a/sys/dev/usb/controller/ehci_msm.c b/sys/dev/usb/controller/ehci_msm.c
index 37e330fd6ea7..2586df634b3c 100644
--- a/sys/dev/usb/controller/ehci_msm.c
+++ b/sys/dev/usb/controller/ehci_msm.c
@@ -31,7 +31,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include "opt_bus.h"
#include <sys/param.h>
@@ -125,7 +124,7 @@ ehci_msm_attach(device_t dev)
panic("%s: unable to subregion USB host registers",
device_get_name(dev));
- sc->sc_bus.bdev = device_add_child(dev, "usbus", -1);
+ sc->sc_bus.bdev = device_add_child(dev, "usbus", DEVICE_UNIT_ANY);
if (!sc->sc_bus.bdev) {
device_printf(dev, "Could not add USB device\n");
goto error;
@@ -171,13 +170,9 @@ ehci_msm_detach(device_t dev)
sc = device_get_softc(dev);
- if (sc->sc_bus.bdev) {
- bdev = sc->sc_bus.bdev;
- device_detach(bdev);
- device_delete_child(dev, bdev);
- }
-
- device_delete_children(dev);
+ err = bus_generic_detach(dev);
+ if (err != 0)
+ return (err);
if (sc->sc_irq_res && sc->sc_intr_hdl) {
/* only call ehci_detach() after ehci_init() */
diff --git a/sys/dev/usb/controller/ehci_mv.c b/sys/dev/usb/controller/ehci_mv.c
index b5096e5f2fb6..5dc72d4af3d8 100644
--- a/sys/dev/usb/controller/ehci_mv.c
+++ b/sys/dev/usb/controller/ehci_mv.c
@@ -35,7 +35,6 @@
* FDT attachment driver for the USB Enhanced Host Controller.
*/
-#include <sys/cdefs.h>
#include "opt_bus.h"
#include <sys/stdint.h>
@@ -213,7 +212,7 @@ mv_ehci_attach(device_t self)
goto error;
}
- sc->sc_bus.bdev = device_add_child(self, "usbus", -1);
+ sc->sc_bus.bdev = device_add_child(self, "usbus", DEVICE_UNIT_ANY);
if (!sc->sc_bus.bdev) {
device_printf(self, "Could not add USB device\n");
goto error;
@@ -283,7 +282,9 @@ mv_ehci_detach(device_t self)
int err;
/* during module unload there are lots of children leftover */
- device_delete_children(self);
+ err = bus_generic_detach(self);
+ if (err != 0)
+ return (err);
/*
* disable interrupts that might have been switched on in mv_ehci_attach
diff --git a/sys/dev/usb/controller/ehci_pci.c b/sys/dev/usb/controller/ehci_pci.c
index 2aa0cd2fbcd8..d7298ab89df7 100644
--- a/sys/dev/usb/controller/ehci_pci.c
+++ b/sys/dev/usb/controller/ehci_pci.c
@@ -30,7 +30,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* USB Enhanced Host Controller Driver, a.k.a. USB 2.0 controller.
*
@@ -360,7 +359,7 @@ ehci_pci_attach(device_t self)
device_printf(self, "Could not allocate irq\n");
goto error;
}
- sc->sc_bus.bdev = device_add_child(self, "usbus", -1);
+ sc->sc_bus.bdev = device_add_child(self, "usbus", DEVICE_UNIT_ANY);
if (!sc->sc_bus.bdev) {
device_printf(self, "Could not add USB device\n");
goto error;
@@ -505,9 +504,12 @@ static int
ehci_pci_detach(device_t self)
{
ehci_softc_t *sc = device_get_softc(self);
+ int error;
/* during module unload there are lots of children leftover */
- device_delete_children(self);
+ error = bus_generic_detach(self);
+ if (error != 0)
+ return (error);
pci_disable_busmaster(self);
diff --git a/sys/dev/usb/controller/generic_ehci.c b/sys/dev/usb/controller/generic_ehci.c
index 471ce72776d4..bd7dc32b1ea8 100644
--- a/sys/dev/usb/controller/generic_ehci.c
+++ b/sys/dev/usb/controller/generic_ehci.c
@@ -32,7 +32,6 @@
* Generic EHCI driver based on the Allwinner A10 EHCI driver
*/
-#include <sys/cdefs.h>
#include "opt_bus.h"
#include <sys/param.h>
@@ -100,7 +99,7 @@ generic_ehci_attach(device_t self)
device_printf(self, "Could not allocate irq\n");
goto error;
}
- sc->sc_bus.bdev = device_add_child(self, "usbus", -1);
+ sc->sc_bus.bdev = device_add_child(self, "usbus", DEVICE_UNIT_ANY);
if (!sc->sc_bus.bdev) {
device_printf(self, "Could not add USB device\n");
goto error;
@@ -139,7 +138,9 @@ generic_ehci_detach(device_t self)
int err;
/* during module unload there are lots of children leftover */
- device_delete_children(self);
+ err = bus_generic_detach(self);
+ if (err != 0)
+ return (err);
if (sc->sc_irq_res && sc->sc_intr_hdl) {
/*
diff --git a/sys/dev/usb/controller/generic_ehci_acpi.c b/sys/dev/usb/controller/generic_ehci_acpi.c
index f565590fa09a..d947215ad355 100644
--- a/sys/dev/usb/controller/generic_ehci_acpi.c
+++ b/sys/dev/usb/controller/generic_ehci_acpi.c
@@ -28,7 +28,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include "opt_bus.h"
#include <sys/param.h>
diff --git a/sys/dev/usb/controller/generic_ehci_fdt.c b/sys/dev/usb/controller/generic_ehci_fdt.c
index af22d0bdef73..df2dc7fba4b9 100644
--- a/sys/dev/usb/controller/generic_ehci_fdt.c
+++ b/sys/dev/usb/controller/generic_ehci_fdt.c
@@ -28,7 +28,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include "opt_bus.h"
#include <sys/param.h>
diff --git a/sys/dev/usb/controller/generic_ohci.c b/sys/dev/usb/controller/generic_ohci.c
index f3a9e0481bb2..5c0de59074d2 100644
--- a/sys/dev/usb/controller/generic_ohci.c
+++ b/sys/dev/usb/controller/generic_ohci.c
@@ -141,7 +141,7 @@ generic_ohci_attach(device_t dev)
err = ENXIO;
goto error;
}
- sc->ohci_sc.sc_bus.bdev = device_add_child(dev, "usbus", -1);
+ sc->ohci_sc.sc_bus.bdev = device_add_child(dev, "usbus", DEVICE_UNIT_ANY);
if (sc->ohci_sc.sc_bus.bdev == 0) {
err = ENXIO;
goto error;
@@ -231,7 +231,9 @@ generic_ohci_detach(device_t dev)
struct hwrst_list *rst, *rst_tmp;
/* during module unload there are lots of children leftover */
- device_delete_children(dev);
+ err = bus_generic_detach(dev);
+ if (err != 0)
+ return (err);
/*
* Put the controller into reset, then disable clocks and do
diff --git a/sys/dev/usb/controller/generic_xhci.c b/sys/dev/usb/controller/generic_xhci.c
index e89d1bc84497..16bda77e043d 100644
--- a/sys/dev/usb/controller/generic_xhci.c
+++ b/sys/dev/usb/controller/generic_xhci.c
@@ -27,7 +27,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
@@ -99,7 +98,7 @@ generic_xhci_attach(device_t dev)
return (ENXIO);
}
- sc->sc_bus.bdev = device_add_child(dev, "usbus", -1);
+ sc->sc_bus.bdev = device_add_child(dev, "usbus", DEVICE_UNIT_ANY);
if (sc->sc_bus.bdev == NULL) {
device_printf(dev, "Failed to add USB device\n");
generic_xhci_detach(dev);
@@ -152,7 +151,9 @@ generic_xhci_detach(device_t dev)
int err;
/* during module unload there are lots of children leftover */
- device_delete_children(dev);
+ err = bus_generic_detach(dev);
+ if (err != 0)
+ return (err);
if (sc->sc_irq_res != NULL && sc->sc_intr_hdl != NULL) {
err = bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intr_hdl);
diff --git a/sys/dev/usb/controller/generic_xhci_acpi.c b/sys/dev/usb/controller/generic_xhci_acpi.c
index 2cb5977e0cf1..e24fe1b1bcc3 100644
--- a/sys/dev/usb/controller/generic_xhci_acpi.c
+++ b/sys/dev/usb/controller/generic_xhci_acpi.c
@@ -25,7 +25,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include "opt_acpi.h"
#include <sys/param.h>
diff --git a/sys/dev/usb/controller/generic_xhci_fdt.c b/sys/dev/usb/controller/generic_xhci_fdt.c
index 66fc1ab65a23..8aab938cbc77 100644
--- a/sys/dev/usb/controller/generic_xhci_fdt.c
+++ b/sys/dev/usb/controller/generic_xhci_fdt.c
@@ -27,7 +27,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include "opt_bus.h"
#include <sys/param.h>
diff --git a/sys/dev/usb/controller/musb_otg_allwinner.c b/sys/dev/usb/controller/musb_otg_allwinner.c
index 3bfe2b525138..781b4d7e33fa 100644
--- a/sys/dev/usb/controller/musb_otg_allwinner.c
+++ b/sys/dev/usb/controller/musb_otg_allwinner.c
@@ -77,7 +77,7 @@
#if defined(__arm__)
#define bs_parent_space(bs) ((bs)->bs_parent)
typedef bus_space_tag_t awusb_bs_tag;
-#elif defined(__aarch64__)
+#elif defined(__aarch64__) || defined(__riscv)
#define bs_parent_space(bs) (bs)
typedef void * awusb_bs_tag;
#endif
@@ -89,6 +89,7 @@ static struct ofw_compat_data compat_data[] = {
{ "allwinner,sun6i-a31-musb", AWUSB_OKAY },
{ "allwinner,sun8i-a33-musb", AWUSB_OKAY | AWUSB_NO_CONFDATA },
{ "allwinner,sun8i-h3-musb", AWUSB_OKAY | AWUSB_NO_CONFDATA },
+ { "allwinner,sun20i-d1-musb", AWUSB_OKAY | AWUSB_NO_CONFDATA },
{ NULL, 0 }
};
@@ -474,7 +475,7 @@ awusbdrd_attach(device_t dev)
#if defined(__arm__)
sc->bs.bs_parent = rman_get_bustag(sc->res[0]);
-#elif defined(__aarch64__)
+#elif defined(__aarch64__) || defined(__riscv)
sc->bs.bs_cookie = rman_get_bustag(sc->res[0]);
#endif
@@ -494,7 +495,7 @@ awusbdrd_attach(device_t dev)
sc->sc.sc_io_hdl = rman_get_bushandle(sc->res[0]);
sc->sc.sc_io_size = rman_get_size(sc->res[0]);
- sc->sc.sc_bus.bdev = device_add_child(dev, "usbus", -1);
+ sc->sc.sc_bus.bdev = device_add_child(dev, "usbus", DEVICE_UNIT_ANY);
if (sc->sc.sc_bus.bdev == NULL) {
error = ENXIO;
goto fail;
@@ -561,16 +562,13 @@ static int
awusbdrd_detach(device_t dev)
{
struct awusbdrd_softc *sc;
- device_t bdev;
int error;
- sc = device_get_softc(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
- if (sc->sc.sc_bus.bdev != NULL) {
- bdev = sc->sc.sc_bus.bdev;
- device_detach(bdev);
- device_delete_child(dev, bdev);
- }
+ sc = device_get_softc(dev);
musbotg_uninit(&sc->sc);
error = bus_teardown_intr(dev, sc->res[1], sc->sc.sc_intr_hdl);
@@ -594,8 +592,6 @@ awusbdrd_detach(device_t dev)
bus_release_resources(dev, awusbdrd_spec, sc->res);
- device_delete_children(dev);
-
return (0);
}
diff --git a/sys/dev/usb/controller/ohci_pci.c b/sys/dev/usb/controller/ohci_pci.c
index 12bf55785215..0edcebcb0b38 100644
--- a/sys/dev/usb/controller/ohci_pci.c
+++ b/sys/dev/usb/controller/ohci_pci.c
@@ -30,7 +30,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* USB Open Host Controller driver.
*
@@ -240,7 +239,7 @@ ohci_pci_attach(device_t self)
device_printf(self, "Could not allocate irq\n");
goto error;
}
- sc->sc_bus.bdev = device_add_child(self, "usbus", -1);
+ sc->sc_bus.bdev = device_add_child(self, "usbus", DEVICE_UNIT_ANY);
if (!sc->sc_bus.bdev) {
device_printf(self, "Could not add USB device\n");
goto error;
@@ -320,9 +319,12 @@ static int
ohci_pci_detach(device_t self)
{
ohci_softc_t *sc = device_get_softc(self);
+ int error;
/* during module unload there are lots of children leftover */
- device_delete_children(self);
+ error = bus_generic_detach(self);
+ if (error != 0)
+ return (error);
pci_disable_busmaster(self);
diff --git a/sys/dev/usb/controller/uhci_pci.c b/sys/dev/usb/controller/uhci_pci.c
index 250e2a7b31c6..97f6d09f9e65 100644
--- a/sys/dev/usb/controller/uhci_pci.c
+++ b/sys/dev/usb/controller/uhci_pci.c
@@ -30,7 +30,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/* Universal Host Controller Interface
*
* UHCI spec: http://www.intel.com/
@@ -323,7 +322,7 @@ uhci_pci_attach(device_t self)
device_printf(self, "Could not allocate irq\n");
goto error;
}
- sc->sc_bus.bdev = device_add_child(self, "usbus", -1);
+ sc->sc_bus.bdev = device_add_child(self, "usbus", DEVICE_UNIT_ANY);
if (!sc->sc_bus.bdev) {
device_printf(self, "Could not add USB device\n");
goto error;
@@ -414,9 +413,12 @@ int
uhci_pci_detach(device_t self)
{
uhci_softc_t *sc = device_get_softc(self);
+ int error;
/* during module unload there are lots of children leftover */
- device_delete_children(self);
+ error = bus_generic_detach(self);
+ if (error != 0)
+ return (error);
/*
* disable interrupts that might have been switched on in
diff --git a/sys/dev/usb/controller/usb_controller.c b/sys/dev/usb/controller/usb_controller.c
index 163ee14bd097..7e89a5ab0155 100644
--- a/sys/dev/usb/controller/usb_controller.c
+++ b/sys/dev/usb/controller/usb_controller.c
@@ -135,7 +135,6 @@ DRIVER_MODULE(usbus, octusb, usb_driver, 0, 0);
/* Dual Mode Drivers */
DRIVER_MODULE(usbus, dwcotg, usb_driver, 0, 0);
-DRIVER_MODULE(usbus, saf1761otg, usb_driver, 0, 0);
/*------------------------------------------------------------------------*
* usb_probe
@@ -439,7 +438,7 @@ usb_bus_detach(struct usb_proc_msg *pm)
/* detach children first */
bus_topo_lock();
- bus_generic_detach(dev);
+ bus_detach_children(dev);
bus_topo_unlock();
/*
@@ -654,8 +653,8 @@ usb_bus_cleanup(struct usb_proc_msg *pm)
bus = ((struct usb_bus_msg *)pm)->bus;
- while ((pd = LIST_FIRST(&bus->pd_cleanup_list)) != NULL) {
- LIST_REMOVE(pd, pd_next);
+ while ((pd = SLIST_FIRST(&bus->pd_cleanup_list)) != NULL) {
+ SLIST_REMOVE(&bus->pd_cleanup_list, pd, usb_fs_privdata, pd_next);
USB_BUS_UNLOCK(bus);
usb_destroy_dev_sync(pd);
@@ -848,7 +847,7 @@ usb_attach_sub(device_t dev, struct usb_bus *bus)
bus->shutdown_msg[1].bus = bus;
#if USB_HAVE_UGEN
- LIST_INIT(&bus->pd_cleanup_list);
+ SLIST_INIT(&bus->pd_cleanup_list);
bus->cleanup_msg[0].hdr.pm_callback = &usb_bus_cleanup;
bus->cleanup_msg[0].bus = bus;
bus->cleanup_msg[1].hdr.pm_callback = &usb_bus_cleanup;
diff --git a/sys/dev/usb/controller/xhci.c b/sys/dev/usb/controller/xhci.c
index 5be592512196..788b2b718062 100644
--- a/sys/dev/usb/controller/xhci.c
+++ b/sys/dev/usb/controller/xhci.c
@@ -156,6 +156,7 @@ struct xhci_std_temp {
static void xhci_do_poll(struct usb_bus *);
static void xhci_device_done(struct usb_xfer *, usb_error_t);
+static void xhci_get_xecp(struct xhci_softc *);
static void xhci_root_intr(struct xhci_softc *);
static void xhci_free_device_ext(struct usb_device *);
static struct xhci_endpoint_ext *xhci_get_endpoint_ext(struct usb_device *,
@@ -566,6 +567,8 @@ xhci_init(struct xhci_softc *sc, device_t self, uint8_t dma32)
device_printf(self, "%d bytes context size, %d-bit DMA\n",
sc->sc_ctx_is_64_byte ? 64 : 32, (int)sc->sc_bus.dma_bits);
+ xhci_get_xecp(sc);
+
/* enable 64Kbyte control endpoint quirk */
sc->sc_bus.control_ep_quirk = (xhcictlquirk ? 1 : 0);
@@ -654,6 +657,88 @@ xhci_uninit(struct xhci_softc *sc)
}
static void
+xhci_get_xecp(struct xhci_softc *sc)
+{
+
+ uint32_t hccp1;
+ uint32_t eec;
+ uint32_t eecp;
+ bool first = true;
+
+ hccp1 = XREAD4(sc, capa, XHCI_HCSPARAMS0);
+
+ if (XHCI_HCS0_XECP(hccp1) == 0) {
+ device_printf(sc->sc_bus.parent,
+ "xECP: no capabilities found\n");
+ return;
+ }
+
+ /*
+ * Parse the xECP Capabilities table and print known caps.
+ * Implemented, vendor and reserved xECP Capabilities values are
+ * documented in Table 7.2 of eXtensible Host Controller Interface for
+ * Universal Serial Bus (xHCI) Rev 1.2b 2023.
+ */
+ device_printf(sc->sc_bus.parent, "xECP capabilities <");
+
+ eec = -1;
+ for (eecp = XHCI_HCS0_XECP(hccp1) << 2;
+ eecp != 0 && XHCI_XECP_NEXT(eec) != 0;
+ eecp += XHCI_XECP_NEXT(eec) << 2) {
+ eec = XREAD4(sc, capa, eecp);
+
+ uint8_t xecpid = XHCI_XECP_ID(eec);
+
+ if ((xecpid >= 11 && xecpid <= 16) ||
+ (xecpid >= 19 && xecpid <= 191)) {
+ if (!first)
+ printf(",");
+ printf("RES(%x)", xecpid);
+ } else if (xecpid > 191) {
+ if (!first)
+ printf(",");
+ printf("VEND(%x)", xecpid);
+ } else {
+ if (!first)
+ printf(",");
+ switch (xecpid)
+ {
+ case XHCI_ID_USB_LEGACY:
+ printf("LEGACY");
+ break;
+ case XHCI_ID_PROTOCOLS:
+ printf("PROTO");
+ break;
+ case XHCI_ID_POWER_MGMT:
+ printf("POWER");
+ break;
+ case XHCI_ID_VIRTUALIZATION:
+ printf("VIRT");
+ break;
+ case XHCI_ID_MSG_IRQ:
+ printf("MSG IRQ");
+ break;
+ case XHCI_ID_USB_LOCAL_MEM:
+ printf("LOCAL MEM");
+ break;
+ case XHCI_ID_USB_DEBUG:
+ printf("DEBUG");
+ break;
+ case XHCI_ID_EXT_MSI:
+ printf("EXT MSI");
+ break;
+ case XHCI_ID_USB3_TUN:
+ printf("TUN");
+ break;
+
+ }
+ }
+ first = false;
+ }
+ printf(">\n");
+}
+
+static void
xhci_set_hw_power_sleep(struct usb_bus *bus, uint32_t state)
{
struct xhci_softc *sc = XHCI_BUS2SC(bus);
diff --git a/sys/dev/usb/controller/xhci_pci.c b/sys/dev/usb/controller/xhci_pci.c
index 359f14bb1e3c..820fb2f738a1 100644
--- a/sys/dev/usb/controller/xhci_pci.c
+++ b/sys/dev/usb/controller/xhci_pci.c
@@ -25,7 +25,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
@@ -100,6 +99,14 @@ xhci_pci_match(device_t self)
return ("AMD Starship USB 3.0 controller");
case 0x149c1022:
return ("AMD Matisse USB 3.0 controller");
+ case 0x15b61022:
+ case 0x15b71022:
+ return ("AMD Raphael/Granite Ridge USB 3.1 controller");
+ case 0x15b81022:
+ return ("AMD Raphael/Granite Ridge USB 2.0 controller");
+ case 0x15e01022:
+ case 0x15e11022:
+ return ("AMD Raven USB 3.1 controller");
case 0x43ba1022:
return ("AMD X399 USB 3.0 controller");
case 0x43b91022: /* X370 */
@@ -107,6 +114,8 @@ xhci_pci_match(device_t self)
return ("AMD 300 Series USB 3.1 controller");
case 0x43d51022:
return ("AMD 400 Series USB 3.1 controller");
+ case 0x43f71022:
+ return ("AMD 600 Series USB 3.2 controller");
case 0x78121022:
case 0x78141022:
case 0x79141022:
@@ -169,6 +178,8 @@ xhci_pci_match(device_t self)
return ("Intel Tiger Lake-H USB 3.2 controller");
case 0x461e8086:
return ("Intel Alder Lake-P Thunderbolt 4 USB controller");
+ case 0x4b7d8086:
+ return ("Intel Elkhart Lake USB 3.1 controller");
case 0x51ed8086:
return ("Intel Alder Lake USB 3.2 controller");
case 0x5aa88086:
@@ -386,7 +397,7 @@ xhci_pci_attach(device_t self)
device_printf(self, "Could not allocate IRQ\n");
/* goto error; FALLTHROUGH - use polling */
}
- sc->sc_bus.bdev = device_add_child(self, "usbus", -1);
+ sc->sc_bus.bdev = device_add_child(self, "usbus", DEVICE_UNIT_ANY);
if (sc->sc_bus.bdev == NULL) {
device_printf(self, "Could not add USB device\n");
goto error;
@@ -462,9 +473,12 @@ static int
xhci_pci_detach(device_t self)
{
struct xhci_softc *sc = device_get_softc(self);
+ int error;
/* during module unload there are lots of children leftover */
- device_delete_children(self);
+ error = bus_generic_detach(self);
+ if (error != 0)
+ return (error);
usb_callout_drain(&sc->sc_callout);
xhci_halt_controller(sc);
diff --git a/sys/dev/usb/controller/xhcireg.h b/sys/dev/usb/controller/xhcireg.h
index 9d0b6e2f4b4b..821897155544 100644
--- a/sys/dev/usb/controller/xhcireg.h
+++ b/sys/dev/usb/controller/xhcireg.h
@@ -205,6 +205,11 @@
#define XHCI_ID_VIRTUALIZATION 0x0004
#define XHCI_ID_MSG_IRQ 0x0005
#define XHCI_ID_USB_LOCAL_MEM 0x0006
+/* values 7-9 are reserved */
+#define XHCI_ID_USB_DEBUG 0x000a
+/* values 11-16 are reserved */
+#define XHCI_ID_EXT_MSI 0x0011
+#define XHCI_ID_USB3_TUN 0x0012
/* XHCI register R/W wrappers */
#define XREAD1(sc, what, a) \
diff --git a/sys/dev/usb/controller/xlnx_dwc3.c b/sys/dev/usb/controller/xlnx_dwc3.c
index b0680db97d22..c450734e4225 100644
--- a/sys/dev/usb/controller/xlnx_dwc3.c
+++ b/sys/dev/usb/controller/xlnx_dwc3.c
@@ -29,8 +29,6 @@
* Xilinx DWC3 glue
*/
-#include <sys/cdefs.h>
-
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
@@ -133,7 +131,8 @@ xlnx_dwc3_attach(device_t dev)
device_probe_and_attach(cdev);
}
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
static device_method_t xlnx_dwc3_methods[] = {
diff --git a/sys/dev/usb/input/atp.c b/sys/dev/usb/input/atp.c
index fa78f7d7221b..41ab37c6f1cc 100644
--- a/sys/dev/usb/input/atp.c
+++ b/sys/dev/usb/input/atp.c
@@ -58,7 +58,6 @@
* giving me an opportunity to do this work.
*/
-#include <sys/cdefs.h>
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
diff --git a/sys/dev/usb/input/uhid.c b/sys/dev/usb/input/uhid.c
index 863e04f4e52b..e2b97f5accac 100644
--- a/sys/dev/usb/input/uhid.c
+++ b/sys/dev/usb/input/uhid.c
@@ -4,7 +4,6 @@
* $NetBSD: uhid.c,v 1.54 2002/09/23 05:51:21 simonb Exp $
*/
-#include <sys/cdefs.h>
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
@@ -41,8 +40,6 @@
* HID spec: http://www.usb.org/developers/devclass_docs/HID1_11.pdf
*/
-#include "opt_hid.h"
-
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
@@ -633,11 +630,13 @@ uhid_ioctl(struct usb_fifo *fifo, u_long cmd, void *addr,
default:
return (EINVAL);
}
+ size = imin(ugd->ugd_maxlen, size);
if (id != 0)
error = copyin(ugd->ugd_data, &id, 1);
if (error == 0)
error = uhid_get_report(sc, ugd->ugd_report_type, id,
- NULL, ugd->ugd_data, imin(ugd->ugd_maxlen, size));
+ NULL, ugd->ugd_data, size);
+ ugd->ugd_actlen = size;
break;
case USB_SET_REPORT:
@@ -927,11 +926,7 @@ static device_method_t uhid_methods[] = {
};
static driver_t uhid_driver = {
-#ifdef HIDRAW_MAKE_UHID_ALIAS
- .name = "hidraw",
-#else
.name = "uhid",
-#endif
.methods = uhid_methods,
.size = sizeof(struct uhid_softc),
};
diff --git a/sys/dev/usb/input/ukbd.c b/sys/dev/usb/input/ukbd.c
index f33ae6e8a620..57e9beac34b6 100644
--- a/sys/dev/usb/input/ukbd.c
+++ b/sys/dev/usb/input/ukbd.c
@@ -1,4 +1,3 @@
-#include <sys/cdefs.h>
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
@@ -72,6 +71,8 @@
#include <dev/usb/quirk/usb_quirk.h>
+#include "usbdevs.h"
+
#ifdef EVDEV_SUPPORT
#include <dev/evdev/input.h>
#include <dev/evdev/evdev.h>
@@ -173,13 +174,19 @@ struct ukbd_softc {
#define UKBD_FLAG_ATTACHED 0x00000010
#define UKBD_FLAG_GONE 0x00000020
-#define UKBD_FLAG_HID_MASK 0x003fffc0
-#define UKBD_FLAG_APPLE_EJECT 0x00000040
-#define UKBD_FLAG_APPLE_FN 0x00000080
-#define UKBD_FLAG_APPLE_SWAP 0x00000100
+/* set in ukbd_attach */
+#define UKBD_FLAG_APPLE_SWAP 0x00000040
+/* set in ukbd_parse_hid */
+#define UKBD_FLAG_APPLE_EJECT 0x00000080
+#define UKBD_FLAG_APPLE_FN 0x00000100
#define UKBD_FLAG_NUMLOCK 0x00080000
#define UKBD_FLAG_CAPSLOCK 0x00100000
#define UKBD_FLAG_SCROLLLOCK 0x00200000
+#define UKBD_FLAG_HID_MASK UKBD_FLAG_APPLE_EJECT | \
+ UKBD_FLAG_APPLE_FN | \
+ UKBD_FLAG_NUMLOCK | \
+ UKBD_FLAG_CAPSLOCK | \
+ UKBD_FLAG_SCROLLLOCK
int sc_mode; /* input mode (K_XLATE,K_RAW,K_CODE) */
int sc_state; /* shift/lock key state */
@@ -296,6 +303,48 @@ static const uint8_t ukbd_boot_desc[] = {
0xff, 0x00, 0x81, 0x00, 0xc0
};
+static const STRUCT_USB_HOST_ID ukbd_apple_iso_models[] = {
+ /* PowerBooks Feb 2005, iBooks G4 */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_FOUNTAIN_ISO) },
+ /* PowerBooks Oct 2005 */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_GEYSER_ISO) },
+ /* Core Duo MacBook & MacBook Pro */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_GEYSER3_ISO) },
+ /* Core2 Duo MacBook & MacBook Pro */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_GEYSER4_ISO) },
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_GEYSER4_HF_ISO) },
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_ALU_MINI_ISO) },
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_ALU_ISO) },
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_ALU_REVB_ISO) },
+ /* MacbookAir, aka wellspring */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_WELLSPRING_ISO) },
+ /* MacbookProPenryn, aka wellspring2 */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_WELLSPRING2_ISO) },
+ /* Macbook5,1 (unibody), aka wellspring3 */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_WELLSPRING3_ISO) },
+ /* MacbookAir3,2 (unibody), aka wellspring4 */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_WELLSPRING4_ISO) },
+ /* MacbookAir3,1 (unibody), aka wellspring4 */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_WELLSPRING4A_ISO) },
+ /* Macbook8 (unibody, March 2011) */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_WELLSPRING5_ISO) },
+ /* Macbook8,2 (unibody) */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_WELLSPRING5A_ISO) },
+ /* MacbookAir4,2 (unibody, July 2011) */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_WELLSPRING6_ISO) },
+ /* MacbookAir4,1 (unibody, July 2011) */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_WELLSPRING6A_ISO) },
+ /* MacbookPro10,1 (unibody, June 2012) */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_WELLSPRING7_ISO) },
+ /* MacbookPro10,2 (unibody, October 2012) */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_WELLSPRING7A_ISO) },
+ /* MacbookAir6,2 (unibody, June 2013) */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_WELLSPRING8_ISO) },
+ /* MacbookPro12,1 */
+ { USB_VP(USB_VENDOR_APPLE, USB_PRODUCT_APPLE_WELLSPRING9_ISO) },
+};
+
+
/* prototypes */
static void ukbd_timeout(void *);
static void ukbd_set_leds(struct ukbd_softc *, uint8_t);
@@ -1001,8 +1050,7 @@ ukbd_parse_hid(struct ukbd_softc *sc, const uint8_t *ptr, uint32_t len)
hid_input, 0, &sc->sc_loc_apple_eject, &flags,
&sc->sc_id_apple_eject)) {
if (flags & HIO_VARIABLE)
- sc->sc_flags |= UKBD_FLAG_APPLE_EJECT |
- UKBD_FLAG_APPLE_SWAP;
+ sc->sc_flags |= UKBD_FLAG_APPLE_EJECT;
DPRINTFN(1, "Found Apple eject-key\n");
}
if (hid_locate(ptr, len,
@@ -1138,6 +1186,17 @@ ukbd_attach(device_t dev)
sc->sc_fkeymap[n] = fkey_tab[n];
}
+ /* check if this is an Apple keyboard with swapped key codes
+ * apparently, these are the ISO layout models
+ */
+ DPRINTF("uaa vendor: 0x%04x, uaa product 0x%04x\n", uaa->info.idVendor, uaa->info.idProduct );
+ if (usbd_lookup_id_by_uaa(ukbd_apple_iso_models, sizeof(ukbd_apple_iso_models), uaa) == 0) {
+ sc->sc_flags |= UKBD_FLAG_APPLE_SWAP;
+ DPRINTF("UKBD_FLAG_APPLE_SWAP set\n");
+ } else {
+ DPRINTF("UKBD_FLAG_APPLE_SWAP not set\n");
+ }
+
kbd_set_maps(kbd, &sc->sc_keymap, &sc->sc_accmap,
sc->sc_fkeymap, UKBD_NFKEY);
diff --git a/sys/dev/usb/input/ums.c b/sys/dev/usb/input/ums.c
index 8416be656f81..523ec4d05db9 100644
--- a/sys/dev/usb/input/ums.c
+++ b/sys/dev/usb/input/ums.c
@@ -30,7 +30,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* HID spec: http://www.usb.org/developers/devclass_docs/HID1_11.pdf
*/
@@ -320,11 +319,12 @@ ums_intr_callback(struct usb_xfer *xfer, usb_error_t error)
if (++info != &sc->sc_info[UMS_INFO_MAX])
goto repeat;
+ /* keep old button value(s) for non-detected buttons */
+ buttons |= sc->sc_status.button & ~buttons_found;
+
#ifdef EVDEV_SUPPORT
buttons_reported = buttons;
#endif
- /* keep old button value(s) for non-detected buttons */
- buttons |= sc->sc_status.button & ~buttons_found;
if (dx || dy || dz || dt || dw ||
(buttons != sc->sc_status.button)) {
diff --git a/sys/dev/usb/input/usbhid.c b/sys/dev/usb/input/usbhid.c
index 0832d657e521..cba3f34053e5 100644
--- a/sys/dev/usb/input/usbhid.c
+++ b/sys/dev/usb/input/usbhid.c
@@ -30,7 +30,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* HID spec: https://www.usb.org/sites/default/files/documents/hid1_11.pdf
*/
@@ -77,7 +76,7 @@
#include "hid_if.h"
static SYSCTL_NODE(_hw_usb, OID_AUTO, usbhid, CTLFLAG_RW, 0, "USB usbhid");
-static int usbhid_enable = 0;
+static int usbhid_enable = 1;
SYSCTL_INT(_hw_usb_usbhid, OID_AUTO, enable, CTLFLAG_RWTUN,
&usbhid_enable, 0, "Enable usbhid and prefer it to other USB HID drivers");
#ifdef USB_DEBUG
@@ -115,6 +114,7 @@ struct usbhid_xfer_ctx {
void *cb_ctx;
int waiters;
bool influx;
+ bool no_readahead;
};
struct usbhid_softc {
@@ -273,7 +273,7 @@ usbhid_intr_handler_cb(struct usbhid_xfer_ctx *xfer_ctx)
sc->sc_intr_handler(sc->sc_intr_ctx, xfer_ctx->buf,
xfer_ctx->req.intr.actlen);
- return (0);
+ return (xfer_ctx->no_readahead ? ECANCELED : 0);
}
static int
@@ -431,6 +431,7 @@ usbhid_intr_start(device_t dev, device_t child __unused)
.cb = usbhid_intr_handler_cb,
.cb_ctx = sc,
.buf = sc->sc_intr_buf,
+ .no_readahead = hid_test_quirk(&sc->sc_hw, HQ_NO_READAHEAD),
};
sc->sc_xfer_ctx[POLL_XFER(USBHID_INTR_IN_DT)] = (struct usbhid_xfer_ctx) {
.req.intr.maxlen =
@@ -706,6 +707,10 @@ usbhid_ioctl(device_t dev, device_t child __unused, unsigned long cmd,
if (error == 0)
ucr->ucr_actlen = UGETW(req.ctrl.wLength);
break;
+ case USB_GET_DEVICEINFO:
+ error = usbd_fill_deviceinfo(sc->sc_udev,
+ (struct usb_device_info *)data);
+ break;
default:
error = EINVAL;
}
@@ -834,7 +839,7 @@ usbhid_attach(device_t dev)
mtx_init(&sc->sc_mtx, "usbhid lock", NULL, MTX_DEF);
- child = device_add_child(dev, "hidbus", -1);
+ child = device_add_child(dev, "hidbus", DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(dev, "Could not add hidbus device\n");
usbhid_detach(dev);
@@ -842,12 +847,7 @@ usbhid_attach(device_t dev)
}
device_set_ivars(child, &sc->sc_hw);
- error = bus_generic_attach(dev);
- if (error) {
- device_printf(dev, "failed to attach child: %d\n", error);
- usbhid_detach(dev);
- return (error);
- }
+ bus_attach_children(dev);
return (0); /* success */
}
@@ -856,8 +856,12 @@ static int
usbhid_detach(device_t dev)
{
struct usbhid_softc *sc = device_get_softc(dev);
+ int error;
+
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
- device_delete_children(dev);
mtx_destroy(&sc->sc_mtx);
return (0);
diff --git a/sys/dev/usb/input/wmt.c b/sys/dev/usb/input/wmt.c
index 23692e77a0fa..03e4da35a9fe 100644
--- a/sys/dev/usb/input/wmt.c
+++ b/sys/dev/usb/input/wmt.c
@@ -24,7 +24,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* MS Windows 7/8/10 compatible USB HID Multi-touch Device driver.
* https://msdn.microsoft.com/en-us/library/windows/hardware/jj151569(v=vs.85).aspx
diff --git a/sys/dev/usb/input/wsp.c b/sys/dev/usb/input/wsp.c
index f1931c9e03c0..f78d64f69c08 100644
--- a/sys/dev/usb/input/wsp.c
+++ b/sys/dev/usb/input/wsp.c
@@ -26,7 +26,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
#include "opt_evdev.h"
#include <sys/param.h>
@@ -74,7 +73,7 @@
} while (0)
/* Tunables */
-static SYSCTL_NODE(_hw_usb, OID_AUTO, wsp, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+static SYSCTL_NODE(_hw_usb, OID_AUTO, wsp, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"USB wsp");
#ifdef USB_DEBUG
@@ -87,60 +86,101 @@ enum wsp_log_level {
static int wsp_debug = WSP_LLEVEL_ERROR;/* the default is to only log errors */
SYSCTL_INT(_hw_usb_wsp, OID_AUTO, debug, CTLFLAG_RWTUN,
- &wsp_debug, WSP_LLEVEL_ERROR, "WSP debug level");
+ &wsp_debug, WSP_LLEVEL_ERROR, "WSP debug level (0-3)");
#endif /* USB_DEBUG */
static struct wsp_tuning {
int scale_factor;
+ int scroll_finger_count;
+ int horizontal_swipe_finger_count;
int z_factor;
int z_invert;
+ int t_factor;
+ int t_invert;
int pressure_touch_threshold;
int pressure_untouch_threshold;
int pressure_tap_threshold;
- int scr_hor_threshold;
+ int scr_threshold;
+ int max_finger_diameter;
+ int max_scroll_finger_distance;
+ int max_double_tap_distance;
int enable_single_tap_clicks;
+ int enable_single_tap_movement;
}
wsp_tuning =
{
.scale_factor = 12,
+ .scroll_finger_count = 2,
+ .horizontal_swipe_finger_count = 3,
.z_factor = 5,
.z_invert = 0,
+ .t_factor = 0,
+ .t_invert = 0,
.pressure_touch_threshold = 50,
.pressure_untouch_threshold = 10,
.pressure_tap_threshold = 120,
- .scr_hor_threshold = 20,
+ .scr_threshold = 20,
+ .max_finger_diameter = 1900,
+ .max_scroll_finger_distance = 8192,
+ .max_double_tap_distance = 2500,
.enable_single_tap_clicks = 1,
+ .enable_single_tap_movement = 1,
};
static void
-wsp_runing_rangecheck(struct wsp_tuning *ptun)
+wsp_running_rangecheck(struct wsp_tuning *ptun)
{
WSP_CLAMP(ptun->scale_factor, 1, 63);
- WSP_CLAMP(ptun->z_factor, 1, 63);
+ WSP_CLAMP(ptun->scroll_finger_count, 0, 3);
+ WSP_CLAMP(ptun->horizontal_swipe_finger_count, 0, 3);
+ WSP_CLAMP(ptun->z_factor, 0, 63);
WSP_CLAMP(ptun->z_invert, 0, 1);
+ WSP_CLAMP(ptun->t_factor, 0, 63);
+ WSP_CLAMP(ptun->t_invert, 0, 1);
WSP_CLAMP(ptun->pressure_touch_threshold, 1, 255);
WSP_CLAMP(ptun->pressure_untouch_threshold, 1, 255);
WSP_CLAMP(ptun->pressure_tap_threshold, 1, 255);
- WSP_CLAMP(ptun->scr_hor_threshold, 1, 255);
+ WSP_CLAMP(ptun->max_finger_diameter, 1, 2400);
+ WSP_CLAMP(ptun->max_scroll_finger_distance, 1, 16384);
+ WSP_CLAMP(ptun->max_double_tap_distance, 1, 16384);
+ WSP_CLAMP(ptun->scr_threshold, 1, 255);
WSP_CLAMP(ptun->enable_single_tap_clicks, 0, 1);
+ WSP_CLAMP(ptun->enable_single_tap_movement, 0, 1);
}
SYSCTL_INT(_hw_usb_wsp, OID_AUTO, scale_factor, CTLFLAG_RWTUN,
&wsp_tuning.scale_factor, 0, "movement scale factor");
+SYSCTL_INT(_hw_usb_wsp, OID_AUTO, scroll_finger_count, CTLFLAG_RWTUN,
+ &wsp_tuning.scroll_finger_count, 0, "amount of fingers to use scrolling gesture");
+SYSCTL_INT(_hw_usb_wsp, OID_AUTO, horizontal_swipe_finger_count, CTLFLAG_RWTUN,
+ &wsp_tuning.horizontal_swipe_finger_count, 0, "amount of fingers to use horizontal swipe gesture");
SYSCTL_INT(_hw_usb_wsp, OID_AUTO, z_factor, CTLFLAG_RWTUN,
- &wsp_tuning.z_factor, 0, "Z-axis scale factor");
+ &wsp_tuning.z_factor, 0, "Z-axis (vertical) scale factor");
SYSCTL_INT(_hw_usb_wsp, OID_AUTO, z_invert, CTLFLAG_RWTUN,
- &wsp_tuning.z_invert, 0, "enable Z-axis inversion");
+ &wsp_tuning.z_invert, 0, "enable (vertical) Z-axis inversion");
+SYSCTL_INT(_hw_usb_wsp, OID_AUTO, t_factor, CTLFLAG_RWTUN,
+ &wsp_tuning.t_factor, 0, "T-axis (horizontal) scale factor");
+SYSCTL_INT(_hw_usb_wsp, OID_AUTO, t_invert, CTLFLAG_RWTUN,
+ &wsp_tuning.t_invert, 0, "enable T-axis (horizontal) inversion");
SYSCTL_INT(_hw_usb_wsp, OID_AUTO, pressure_touch_threshold, CTLFLAG_RWTUN,
&wsp_tuning.pressure_touch_threshold, 0, "touch pressure threshold");
SYSCTL_INT(_hw_usb_wsp, OID_AUTO, pressure_untouch_threshold, CTLFLAG_RWTUN,
&wsp_tuning.pressure_untouch_threshold, 0, "untouch pressure threshold");
SYSCTL_INT(_hw_usb_wsp, OID_AUTO, pressure_tap_threshold, CTLFLAG_RWTUN,
&wsp_tuning.pressure_tap_threshold, 0, "tap pressure threshold");
-SYSCTL_INT(_hw_usb_wsp, OID_AUTO, scr_hor_threshold, CTLFLAG_RWTUN,
- &wsp_tuning.scr_hor_threshold, 0, "horizontal scrolling threshold");
+SYSCTL_INT(_hw_usb_wsp, OID_AUTO, max_finger_diameter, CTLFLAG_RWTUN,
+ &wsp_tuning.max_finger_diameter, 0, "maximum finger diameter");
+SYSCTL_INT(_hw_usb_wsp, OID_AUTO, max_scroll_finger_distance, CTLFLAG_RWTUN,
+ &wsp_tuning.max_scroll_finger_distance, 0, "maximum scroll finger distance");
+SYSCTL_INT(_hw_usb_wsp, OID_AUTO, max_double_tap_distance, CTLFLAG_RWTUN,
+ &wsp_tuning.max_double_tap_distance, 0, "maximum double-finger click distance");
+SYSCTL_INT(_hw_usb_wsp, OID_AUTO, scr_threshold, CTLFLAG_RWTUN,
+ &wsp_tuning.scr_threshold, 0, "scrolling threshold");
SYSCTL_INT(_hw_usb_wsp, OID_AUTO, enable_single_tap_clicks, CTLFLAG_RWTUN,
&wsp_tuning.enable_single_tap_clicks, 0, "enable single tap clicks");
+SYSCTL_INT(_hw_usb_wsp, OID_AUTO, enable_single_tap_movement, CTLFLAG_RWTUN,
+ &wsp_tuning.enable_single_tap_movement, 0, "enable single tap movement");
+
/*
* Some tables, structures, definitions and constant values for the
@@ -289,7 +329,7 @@ struct tp_finger {
int16_t unused[2]; /* zeros */
int16_t pressure; /* pressure on forcetouch touchpad */
int16_t multi; /* one finger: varies, more fingers:
- * constant */
+ * constant */
} __packed;
/* trackpad finger data size, empirically at least ten fingers */
@@ -567,13 +607,13 @@ struct wsp_softc {
struct tp_finger *index[MAX_FINGERS]; /* finger index data */
int16_t pos_x[MAX_FINGERS]; /* position array */
int16_t pos_y[MAX_FINGERS]; /* position array */
+ int16_t pre_pos_x[MAX_FINGERS]; /* previous position array */
+ int16_t pre_pos_y[MAX_FINGERS]; /* previous position array */
u_int sc_touch; /* touch status */
#define WSP_UNTOUCH 0x00
#define WSP_FIRST_TOUCH 0x01
#define WSP_SECOND_TOUCH 0x02
#define WSP_TOUCHING 0x04
- int16_t pre_pos_x; /* previous position array */
- int16_t pre_pos_y; /* previous position array */
int dx_sum; /* x axis cumulative movement */
int dy_sum; /* y axis cumulative movement */
int dz_sum; /* z axis cumulative movement */
@@ -590,7 +630,6 @@ struct wsp_softc {
#define WSP_TAP_THRESHOLD 3
#define WSP_TAP_MAX_COUNT 20
int distance; /* the distance of 2 fingers */
-#define MAX_DISTANCE 2500 /* the max allowed distance */
uint8_t ibtn; /* button status in tapping */
uint8_t ntaps; /* finger status in tapping */
uint8_t scr_mode; /* scroll status in movement */
@@ -858,10 +897,10 @@ wsp_attach(device_t dev)
WSP_SUPPORT_ABS(sc->sc_evdev, ABS_MT_POSITION_Y, sc->sc_params->y);
/* finger pressure */
WSP_SUPPORT_ABS(sc->sc_evdev, ABS_MT_PRESSURE, sc->sc_params->p);
- /* finger touch area */
+ /* finger major/minor axis */
WSP_SUPPORT_ABS(sc->sc_evdev, ABS_MT_TOUCH_MAJOR, sc->sc_params->w);
WSP_SUPPORT_ABS(sc->sc_evdev, ABS_MT_TOUCH_MINOR, sc->sc_params->w);
- /* finger approach area */
+ /* finger major/minor approach */
WSP_SUPPORT_ABS(sc->sc_evdev, ABS_MT_WIDTH_MAJOR, sc->sc_params->w);
WSP_SUPPORT_ABS(sc->sc_evdev, ABS_MT_WIDTH_MINOR, sc->sc_params->w);
/* finger orientation */
@@ -939,7 +978,7 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
int slot = 0;
#endif
- wsp_runing_rangecheck(&tun);
+ wsp_running_rangecheck(&tun);
if (sc->dz_count == 0)
sc->dz_count = WSP_DZ_MAX_COUNT;
@@ -994,7 +1033,7 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
f->pressure = le16toh((uint16_t)f->pressure);
f->multi = le16toh((uint16_t)f->multi);
}
- DPRINTFN(WSP_LLEVEL_INFO,
+ DPRINTFN(WSP_LLEVEL_INFO,
"[%d]ibt=%d, taps=%d, o=%4d, ax=%5d, ay=%5d, "
"rx=%5d, ry=%5d, tlmaj=%4d, tlmin=%4d, ot=%4x, "
"tchmaj=%4d, tchmin=%4d, presure=%4d, m=%4x\n",
@@ -1034,13 +1073,35 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
sc->sc_status.obutton = sc->sc_status.button;
sc->sc_status.button = 0;
+ if (ntouch == 2) {
+ sc->distance = max(sc->distance, max(
+ abs(sc->pos_x[0] - sc->pos_x[1]),
+ abs(sc->pos_y[0] - sc->pos_y[1])));
+ }
+
if (ibt != 0) {
- if ((params->tp->caps & HAS_INTEGRATED_BUTTON) && ntouch == 2)
- sc->sc_status.button |= MOUSE_BUTTON3DOWN;
- else if ((params->tp->caps & HAS_INTEGRATED_BUTTON) && ntouch == 3)
- sc->sc_status.button |= MOUSE_BUTTON2DOWN;
- else
+ if (params->tp->caps & HAS_INTEGRATED_BUTTON) {
+ switch (ntouch) {
+ case 1:
+ sc->sc_status.button |= MOUSE_BUTTON1DOWN;
+ break;
+ case 2:
+ if (sc->distance < tun.max_double_tap_distance && abs(sc->dx_sum) < 5 &&
+ abs(sc->dy_sum) < 5)
+ sc->sc_status.button |= MOUSE_BUTTON3DOWN;
+ else
+ sc->sc_status.button |= MOUSE_BUTTON1DOWN;
+ break;
+ case 3:
+ sc->sc_status.button |= MOUSE_BUTTON2DOWN;
+ break;
+ default:
+ break;
+ }
+ } else {
sc->sc_status.button |= MOUSE_BUTTON1DOWN;
+ }
+
sc->ibtn = 1;
}
sc->intr_count++;
@@ -1049,7 +1110,7 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
switch (ntouch) {
case 1:
if (sc->index[0]->touch_major > tun.pressure_tap_threshold &&
- sc->index[0]->tool_major <= 1200)
+ sc->index[0]->tool_major <= tun.max_finger_diameter)
sc->ntaps = 1;
break;
case 2:
@@ -1067,11 +1128,7 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
break;
}
}
- if (ntouch == 2) {
- sc->distance = max(sc->distance, max(
- abs(sc->pos_x[0] - sc->pos_x[1]),
- abs(sc->pos_y[0] - sc->pos_y[1])));
- }
+
if (sc->index[0]->touch_major < tun.pressure_untouch_threshold &&
sc->sc_status.button == 0) {
sc->sc_touch = WSP_UNTOUCH;
@@ -1092,7 +1149,7 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
case 2:
DPRINTFN(WSP_LLEVEL_INFO, "sum_x=%5d, sum_y=%5d\n",
sc->dx_sum, sc->dy_sum);
- if (sc->distance < MAX_DISTANCE && abs(sc->dx_sum) < 5 &&
+ if (sc->distance < tun.max_double_tap_distance && abs(sc->dx_sum) < 5 &&
abs(sc->dy_sum) < 5) {
wsp_add_to_queue(sc, 0, 0, 0, MOUSE_BUTTON3DOWN);
DPRINTFN(WSP_LLEVEL_INFO, "RIGHT CLICK!\n");
@@ -1107,17 +1164,19 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
}
wsp_add_to_queue(sc, 0, 0, 0, 0); /* button release */
}
- if ((sc->dt_sum / tun.scr_hor_threshold) != 0 &&
- sc->ntaps == 2 && sc->scr_mode == WSP_SCR_HOR) {
+
+ if (sc->scr_mode == WSP_SCR_HOR && sc->ntaps == tun.horizontal_swipe_finger_count
+ && tun.horizontal_swipe_finger_count > 0 && (sc->dt_sum / tun.scr_threshold) != 0) {
/*
- * translate T-axis into button presses
- * until further
+ * translate T-axis swipe into button
+ * presses 3 and 4 (forward/back)
*/
if (sc->dt_sum > 0)
wsp_add_to_queue(sc, 0, 0, 0, 1UL << 3);
else if (sc->dt_sum < 0)
wsp_add_to_queue(sc, 0, 0, 0, 1UL << 4);
}
+
sc->dz_count = WSP_DZ_MAX_COUNT;
sc->dz_sum = 0;
sc->intr_count = 0;
@@ -1138,38 +1197,38 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
} else if (sc->index[0]->touch_major >= tun.pressure_touch_threshold &&
sc->sc_touch == WSP_FIRST_TOUCH) { /* ignore second touch */
sc->sc_touch = WSP_SECOND_TOUCH;
- DPRINTFN(WSP_LLEVEL_INFO, "Fist pre_x=%5d, pre_y=%5d\n",
- sc->pre_pos_x, sc->pre_pos_y);
+ DPRINTFN(WSP_LLEVEL_INFO, "First pre_x[0]=%5d, pre_y[0]=%5d\n",
+ sc->pre_pos_x[0], sc->pre_pos_y[0]);
} else {
if (sc->sc_touch == WSP_SECOND_TOUCH)
sc->sc_touch = WSP_TOUCHING;
if (ntouch != 0 &&
sc->index[0]->touch_major >= tun.pressure_touch_threshold) {
- dx = sc->pos_x[0] - sc->pre_pos_x;
- dy = sc->pos_y[0] - sc->pre_pos_y;
+ dx = sc->pos_x[0] - sc->pre_pos_x[0];
+ dy = sc->pos_y[0] - sc->pre_pos_y[0];
- /* Ignore movement during button is releasing */
- if (sc->ibtn != 0 && sc->sc_status.button == 0)
+ /* Optionally ignore movement during button is releasing */
+ if (tun.enable_single_tap_movement != 1 && sc->ibtn != 0 && sc->sc_status.button == 0)
dx = dy = 0;
/* Ignore movement if ntouch changed */
if (sc->o_ntouch != ntouch)
dx = dy = 0;
- /* Ignore unexpeted movement when typing */
- if (ntouch == 1 && sc->index[0]->tool_major > 1200)
+ /* Ignore unexpected movement when typing (palm detection) */
+ if (ntouch == 1 && sc->index[0]->tool_major > tun.max_finger_diameter)
dx = dy = 0;
- if (sc->ibtn != 0 && ntouch == 1 &&
- sc->intr_count < WSP_TAP_MAX_COUNT &&
+ if (sc->ibtn != 0 && ntouch == 1 &&
+ sc->intr_count < WSP_TAP_MAX_COUNT &&
abs(sc->dx_sum) < 1 && abs(sc->dy_sum) < 1 )
dx = dy = 0;
if (ntouch == 2 && sc->sc_status.button != 0) {
- dx = sc->pos_x[sc->finger] - sc->pre_pos_x;
- dy = sc->pos_y[sc->finger] - sc->pre_pos_y;
-
+ dx = sc->pos_x[sc->finger] - sc->pre_pos_x[sc->finger];
+ dy = sc->pos_y[sc->finger] - sc->pre_pos_y[sc->finger];
+
/*
* Ignore movement of switch finger or
* movement from ibt=0 to ibt=1
@@ -1197,11 +1256,18 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
dx, dy, sc->finger);
}
if (sc->dz_count--) {
- rdz = (dy + sc->rdz) % tun.scale_factor;
- sc->dz_sum -= (dy + sc->rdz) / tun.scale_factor;
+ if (sc->scr_mode == WSP_SCR_HOR) {
+ rdz = (dx + sc->rdz) % tun.scale_factor;
+ sc->dz_sum -= (dx + sc->rdz) / tun.scale_factor;
+ } else if (sc->scr_mode == WSP_SCR_VER) {
+ rdz = (dy + sc->rdz) % tun.scale_factor;
+ sc->dz_sum -= (dy + sc->rdz) / tun.scale_factor;
+ }
sc->rdz = rdz;
}
- if ((sc->dz_sum / tun.z_factor) != 0)
+ if (sc->scr_mode == WSP_SCR_VER && (tun.z_factor == 0 || (sc->dz_sum / tun.z_factor) != 0))
+ sc->dz_count = 0;
+ else if (sc->scr_mode == WSP_SCR_HOR && (tun.t_factor == 0 || (sc->dz_sum / tun.t_factor) != 0))
sc->dz_count = 0;
}
rdx = (dx + sc->rdx) % tun.scale_factor;
@@ -1215,28 +1281,49 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
sc->dx_sum += dx;
sc->dy_sum += dy;
- if (ntouch == 2 && sc->sc_status.button == 0) {
- if (sc->scr_mode == WSP_SCR_NONE &&
- abs(sc->dx_sum) + abs(sc->dy_sum) > tun.scr_hor_threshold)
- sc->scr_mode = abs(sc->dx_sum) >
- abs(sc->dy_sum) * 2 ? WSP_SCR_HOR : WSP_SCR_VER;
- DPRINTFN(WSP_LLEVEL_INFO, "scr_mode=%5d, count=%d, dx_sum=%d, dy_sum=%d\n",
- sc->scr_mode, sc->intr_count, sc->dx_sum, sc->dy_sum);
- if (sc->scr_mode == WSP_SCR_HOR)
- sc->dt_sum += dx;
- else
- sc->dt_sum = 0;
+ if (sc->sc_status.button == 0 && ntouch > 0) {
+ if (ntouch == tun.scroll_finger_count || ntouch == tun.horizontal_swipe_finger_count) {
+ if (sc->scr_mode == WSP_SCR_NONE && abs(sc->dx_sum) + abs(sc->dy_sum) > tun.scr_threshold)
+ sc->scr_mode = abs(sc->dx_sum) > abs(sc->dy_sum) * 2 ? WSP_SCR_HOR : WSP_SCR_VER;
+
+ DPRINTFN(WSP_LLEVEL_INFO, "scr_mode=%5d, count=%d, dx_sum=%d, dy_sum=%d\n", sc->scr_mode, sc->intr_count, sc->dx_sum, sc->dy_sum);
+ }
- dx = dy = 0;
- if (sc->dz_count == 0)
- dz = (sc->dz_sum / tun.z_factor) * (tun.z_invert ? -1 : 1);
- if (sc->scr_mode == WSP_SCR_HOR ||
- abs(sc->pos_x[0] - sc->pos_x[1]) > MAX_DISTANCE ||
- abs(sc->pos_y[0] - sc->pos_y[1]) > MAX_DISTANCE)
+ if (ntouch == tun.scroll_finger_count) { /* preference scrolling over swipe if tun.scroll_finger_count == tun.horizontal_swipe_finger_count */
+ if (sc->scr_mode == WSP_SCR_HOR) {
+ sc->sc_status.button = 1 << 5;
+ }
+ dx = dy = dz = 0;
dz = 0;
+ sc->dt_sum = 0;
+ if (sc->distance <= tun.max_scroll_finger_distance && sc->dz_count == 0) {
+ if (sc->scr_mode == WSP_SCR_VER) {
+ if (tun.z_factor > 0)
+ dz = (sc->dz_sum / tun.z_factor) * (tun.z_invert ? -1 : 1);
+ } else if (sc->scr_mode == WSP_SCR_HOR) {
+ if (tun.t_factor > 0)
+ dz = (sc->dz_sum / tun.t_factor) * (tun.t_invert ? -1 : 1);
+ }
+ }
+ } else if (ntouch == tun.horizontal_swipe_finger_count) {
+ if (sc->scr_mode == WSP_SCR_HOR) {
+ sc->dt_sum += dx * (tun.t_invert ? -1 : 1);
+ } else {
+ sc->dt_sum = 0;
+ }
+ dx = dy = dz = 0;
+ }
}
+
if (ntouch == 3)
dx = dy = dz = 0;
+
+ if (ntouch != tun.horizontal_swipe_finger_count)
+ sc->dt_sum = 0;
+
+ if (ntouch == 0)
+ sc->scr_mode = WSP_SCR_NONE;
+
if (sc->intr_count < WSP_TAP_MAX_COUNT &&
abs(dx) < 3 && abs(dy) < 3 && abs(dz) < 3)
dx = dy = dz = 0;
@@ -1256,12 +1343,12 @@ wsp_intr_callback(struct usb_xfer *xfer, usb_error_t error)
sc->rdz = 0;
}
}
- sc->pre_pos_x = sc->pos_x[0];
- sc->pre_pos_y = sc->pos_y[0];
+ sc->pre_pos_x[0] = sc->pos_x[0];
+ sc->pre_pos_y[0] = sc->pos_y[0];
if (ntouch == 2 && sc->sc_status.button != 0) {
- sc->pre_pos_x = sc->pos_x[sc->finger];
- sc->pre_pos_y = sc->pos_y[sc->finger];
+ sc->pre_pos_x[sc->finger] = sc->pos_x[sc->finger];
+ sc->pre_pos_y[sc->finger] = sc->pos_y[sc->finger];
}
sc->o_ntouch = ntouch;
@@ -1321,6 +1408,7 @@ wsp_add_to_queue(struct wsp_softc *sc, int dx, int dy, int dz,
buf[6] = dz - (dz >> 1);/* dz - (dz / 2) */
buf[7] = (((~buttons_in) >> 3) & MOUSE_SYS_EXTBUTTONS);
}
+
usb_fifo_put_data_linear(sc->sc_fifo.fp[USB_FIFO_RX], buf,
sc->sc_mode.packetsize, 1);
}
diff --git a/sys/dev/usb/misc/cp2112.c b/sys/dev/usb/misc/cp2112.c
index 9fccfc5a3fef..201a3ec51ce4 100644
--- a/sys/dev/usb/misc/cp2112.c
+++ b/sys/dev/usb/misc/cp2112.c
@@ -252,7 +252,6 @@ struct cp2112iic_softc {
} io;
};
-static int cp2112_detach(device_t dev);
static int cp2112gpio_detach(device_t dev);
static int cp2112iic_detach(device_t dev);
@@ -328,7 +327,7 @@ cp2112_attach(device_t dev)
goto detach;
}
sc->sc_version = vdata.version;
- sc->sc_gpio_dev = device_add_child(dev, "gpio", -1);
+ sc->sc_gpio_dev = device_add_child(dev, "gpio", DEVICE_UNIT_ANY);
if (sc->sc_gpio_dev != NULL) {
err = device_probe_and_attach(sc->sc_gpio_dev);
if (err != 0) {
@@ -338,7 +337,7 @@ cp2112_attach(device_t dev)
device_printf(dev, "failed to create gpio child\n");
}
- sc->sc_iic_dev = device_add_child(dev, "iichb", -1);
+ sc->sc_iic_dev = device_add_child(dev, "iichb", DEVICE_UNIT_ANY);
if (sc->sc_iic_dev != NULL) {
err = device_probe_and_attach(sc->sc_iic_dev);
if (err != 0) {
@@ -351,23 +350,11 @@ cp2112_attach(device_t dev)
return (0);
detach:
- cp2112_detach(dev);
+ bus_generic_detach(dev);
return (ENXIO);
}
static int
-cp2112_detach(device_t dev)
-{
- int err;
-
- err = bus_generic_detach(dev);
- if (err != 0)
- return (err);
- device_delete_children(dev);
- return (0);
-}
-
-static int
cp2112_gpio_read_pin(device_t dev, uint32_t pin_num, bool *on)
{
struct gpio_get_req data;
@@ -721,11 +708,12 @@ cp2112gpio_attach(device_t dev)
}
}
- sc->busdev = gpiobus_attach_bus(dev);
+ sc->busdev = gpiobus_add_bus(dev);
if (sc->busdev == NULL) {
- device_printf(dev, "gpiobus_attach_bus failed\n");
+ device_printf(dev, "gpiobus_add_bus failed\n");
goto detach;
}
+ bus_attach_children(dev);
return (0);
detach:
@@ -1334,13 +1322,13 @@ cp2112iic_attach(device_t dev)
usbd_transfer_start(sc->xfers[CP2112_INTR_IN]);
mtx_unlock(&sc->io.lock);
- sc->iicbus_dev = device_add_child(dev, "iicbus", -1);
+ sc->iicbus_dev = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY);
if (sc->iicbus_dev == NULL) {
device_printf(dev, "iicbus creation failed\n");
err = ENXIO;
goto detach;
}
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
detach:
@@ -1358,7 +1346,6 @@ cp2112iic_detach(device_t dev)
err = bus_generic_detach(dev);
if (err != 0)
return (err);
- device_delete_children(dev);
mtx_lock(&sc->io.lock);
usbd_transfer_stop(sc->xfers[CP2112_INTR_IN]);
@@ -1374,7 +1361,7 @@ cp2112iic_detach(device_t dev)
static device_method_t cp2112hid_methods[] = {
DEVMETHOD(device_probe, cp2112_probe),
DEVMETHOD(device_attach, cp2112_attach),
- DEVMETHOD(device_detach, cp2112_detach),
+ DEVMETHOD(device_detach, bus_generic_detach),
DEVMETHOD_END
};
diff --git a/sys/dev/usb/misc/i2ctinyusb.c b/sys/dev/usb/misc/i2ctinyusb.c
index cae20880e441..c6e8f946d78e 100644
--- a/sys/dev/usb/misc/i2ctinyusb.c
+++ b/sys/dev/usb/misc/i2ctinyusb.c
@@ -167,13 +167,13 @@ i2ctinyusb_attach(device_t dev)
sc->sc_udev = uaa->device;
mtx_init(&sc->sc_mtx, "i2ctinyusb lock", NULL, MTX_DEF | MTX_RECURSE);
- sc->iicbus_dev = device_add_child(dev, "iicbus", -1);
+ sc->iicbus_dev = device_add_child(dev, "iicbus", DEVICE_UNIT_ANY);
if (sc->iicbus_dev == NULL) {
device_printf(dev, "iicbus creation failed\n");
err = ENXIO;
goto detach;
}
- err = bus_generic_attach(dev);
+ bus_attach_children(dev);
return (0);
@@ -193,7 +193,6 @@ i2ctinyusb_detach(device_t dev)
err = bus_generic_detach(dev);
if (err != 0)
return (err);
- device_delete_children(dev);
mtx_destroy(&sc->sc_mtx);
diff --git a/sys/dev/usb/misc/udbp.c b/sys/dev/usb/misc/udbp.c
index 61f77837ffbd..1d348557e1a1 100644
--- a/sys/dev/usb/misc/udbp.c
+++ b/sys/dev/usb/misc/udbp.c
@@ -30,7 +30,6 @@
*
*/
-#include <sys/cdefs.h>
/* Driver for arbitrary double bulk pipe devices.
* The driver assumes that there will be the same driver on the other side.
*
diff --git a/sys/dev/usb/misc/ugold.c b/sys/dev/usb/misc/ugold.c
index d322348ee333..1b5f54bc679b 100644
--- a/sys/dev/usb/misc/ugold.c
+++ b/sys/dev/usb/misc/ugold.c
@@ -19,7 +19,6 @@
/* Driver for Microdia's HID based TEMPer Temperature sensor */
-#include <sys/cdefs.h>
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
diff --git a/sys/dev/usb/misc/uled.c b/sys/dev/usb/misc/uled.c
index 6fc325522f20..7608524cddee 100644
--- a/sys/dev/usb/misc/uled.c
+++ b/sys/dev/usb/misc/uled.c
@@ -25,7 +25,6 @@
*
*/
-#include <sys/cdefs.h>
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
diff --git a/sys/dev/usb/net/if_aue.c b/sys/dev/usb/net/if_aue.c
index 3bd6dd50f7b7..84268c60a780 100644
--- a/sys/dev/usb/net/if_aue.c
+++ b/sys/dev/usb/net/if_aue.c
@@ -35,7 +35,6 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* ADMtek AN986 Pegasus and AN8511 Pegasus II USB to ethernet driver.
* Datasheet is available from http://www.admtek.com.tw.
diff --git a/sys/dev/usb/net/if_axe.c b/sys/dev/usb/net/if_axe.c
index ed40a1e415c3..117a3daa170f 100644
--- a/sys/dev/usb/net/if_axe.c
+++ b/sys/dev/usb/net/if_axe.c
@@ -32,7 +32,6 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* ASIX Electronics AX88172/AX88178/AX88778 USB 2.0 ethernet driver.
* Used in the LinkSys USB200M and various other adapters.
diff --git a/sys/dev/usb/net/if_axge.c b/sys/dev/usb/net/if_axge.c
index cae1d61feb34..cb8f0fafff45 100644
--- a/sys/dev/usb/net/if_axge.c
+++ b/sys/dev/usb/net/if_axge.c
@@ -26,7 +26,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* ASIX Electronics AX88178A/AX88179/AX88179A USB 2.0/3.0 gigabit ethernet
* driver.
diff --git a/sys/dev/usb/net/if_cdce.c b/sys/dev/usb/net/if_cdce.c
index d53d861cfb00..25697c8ec4c9 100644
--- a/sys/dev/usb/net/if_cdce.c
+++ b/sys/dev/usb/net/if_cdce.c
@@ -47,7 +47,6 @@
* http://www.usb.org/developers/devclass_docs/NCM10.zip
*/
-#include <sys/cdefs.h>
#include <sys/gsb_crc32.h>
#include <sys/eventhandler.h>
#include <sys/stdint.h>
@@ -281,7 +280,6 @@ static const STRUCT_USB_HOST_ID cdce_host_devs[] = {
{USB_VPI(USB_VENDOR_SHARP, USB_PRODUCT_SHARP_SLA300, CDCE_FLAG_ZAURUS | CDCE_FLAG_NO_UNION)},
{USB_VPI(USB_VENDOR_SHARP, USB_PRODUCT_SHARP_SLC700, CDCE_FLAG_ZAURUS | CDCE_FLAG_NO_UNION)},
{USB_VPI(USB_VENDOR_SHARP, USB_PRODUCT_SHARP_SLC750, CDCE_FLAG_ZAURUS | CDCE_FLAG_NO_UNION)},
- {USB_VPI(USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_RTL8156, 0)},
{USB_VENDOR(USB_VENDOR_HUAWEI), USB_IFACE_CLASS(UICLASS_VENDOR),
USB_IFACE_SUBCLASS(0x02), USB_IFACE_PROTOCOL(0x16),
diff --git a/sys/dev/usb/net/if_cdceem.c b/sys/dev/usb/net/if_cdceem.c
index 07ce7328a3ca..b4978e5ea394 100644
--- a/sys/dev/usb/net/if_cdceem.c
+++ b/sys/dev/usb/net/if_cdceem.c
@@ -37,7 +37,6 @@
* https://usb.org/sites/default/files/CDC_EEM10.pdf
*/
-#include <sys/cdefs.h>
#include <sys/gsb_crc32.h>
#include <sys/eventhandler.h>
#include <sys/stdint.h>
diff --git a/sys/dev/usb/net/if_cue.c b/sys/dev/usb/net/if_cue.c
index e7b6d6cbbca4..a65bafee066f 100644
--- a/sys/dev/usb/net/if_cue.c
+++ b/sys/dev/usb/net/if_cue.c
@@ -32,7 +32,6 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* CATC USB-EL1210A USB to ethernet driver. Used in the CATC Netmate
* adapters and others.
diff --git a/sys/dev/usb/net/if_ipheth.c b/sys/dev/usb/net/if_ipheth.c
index dfb9463cf024..cfa800707391 100644
--- a/sys/dev/usb/net/if_ipheth.c
+++ b/sys/dev/usb/net/if_ipheth.c
@@ -31,7 +31,6 @@
* the Apple iPhone Ethernet driver.
*/
-#include <sys/cdefs.h>
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
@@ -56,6 +55,7 @@
#include <net/if_var.h>
#include <dev/usb/usb.h>
+#include <dev/usb/usb_cdc.h>
#include <dev/usb/usbdi.h>
#include <dev/usb/usbdi_util.h>
#include "usbdevs.h"
@@ -82,6 +82,9 @@ static uether_fn_t ipheth_start;
static uether_fn_t ipheth_setmulti;
static uether_fn_t ipheth_setpromisc;
+static ipheth_consumer_t ipheth_consume_read;
+static ipheth_consumer_t ipheth_consume_read_ncm;
+
#ifdef USB_DEBUG
static int ipheth_debug = 0;
@@ -97,7 +100,31 @@ static const struct usb_config ipheth_config[IPHETH_N_TRANSFER] = {
.direction = UE_DIR_RX,
.frames = IPHETH_RX_FRAMES_MAX,
.bufsize = (IPHETH_RX_FRAMES_MAX * MCLBYTES),
- .flags = {.short_frames_ok = 1,.short_xfer_ok = 1,.ext_buffer = 1,},
+ .flags = {.short_frames_ok = 1, .short_xfer_ok = 1, .ext_buffer = 1,},
+ .callback = ipheth_bulk_read_callback,
+ .timeout = 0, /* no timeout */
+ },
+
+ [IPHETH_BULK_TX] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_TX,
+ .frames = IPHETH_TX_FRAMES_MAX,
+ .bufsize = (IPHETH_TX_FRAMES_MAX * IPHETH_BUF_SIZE),
+ .flags = {.force_short_xfer = 1,},
+ .callback = ipheth_bulk_write_callback,
+ .timeout = IPHETH_TX_TIMEOUT,
+ },
+};
+
+static const struct usb_config ipheth_config_ncm[IPHETH_N_TRANSFER] = {
+ [IPHETH_BULK_RX] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_RX,
+ .frames = 1,
+ .bufsize = IPHETH_RX_NCM_BUF_SIZE,
+ .flags = {.short_frames_ok = 1, .short_xfer_ok = 1,},
.callback = ipheth_bulk_read_callback,
.timeout = 0, /* no timeout */
},
@@ -205,6 +232,21 @@ ipheth_get_mac_addr(struct ipheth_softc *sc)
return (0);
}
+static bool
+ipheth_enable_ncm(struct ipheth_softc *sc)
+{
+ struct usb_device_request req;
+
+ req.bmRequestType = UT_WRITE_VENDOR_INTERFACE;
+ req.bRequest = IPHETH_CMD_ENABLE_NCM;
+ USETW(req.wValue, 0);
+ req.wIndex[0] = sc->sc_iface_no;
+ req.wIndex[1] = 0;
+ USETW(req.wLength, 0);
+
+ return (usbd_do_request(sc->sc_ue.ue_udev, NULL, &req, NULL) == 0);
+}
+
static int
ipheth_probe(device_t dev)
{
@@ -222,6 +264,7 @@ ipheth_attach(device_t dev)
struct ipheth_softc *sc = device_get_softc(dev);
struct usb_ether *ue = &sc->sc_ue;
struct usb_attach_arg *uaa = device_get_ivars(dev);
+ const struct usb_config *config;
int error;
sc->sc_iface_no = uaa->info.bIfaceIndex;
@@ -236,18 +279,29 @@ ipheth_attach(device_t dev)
device_printf(dev, "Cannot set alternate setting\n");
goto detach;
}
- error = usbd_transfer_setup(uaa->device, &sc->sc_iface_no,
- sc->sc_xfer, ipheth_config, IPHETH_N_TRANSFER, sc, &sc->sc_mtx);
- if (error) {
- device_printf(dev, "Cannot setup USB transfers\n");
- goto detach;
- }
+
ue->ue_sc = sc;
ue->ue_dev = dev;
ue->ue_udev = uaa->device;
ue->ue_mtx = &sc->sc_mtx;
ue->ue_methods = &ipheth_ue_methods;
+ if (ipheth_enable_ncm(sc)) {
+ config = ipheth_config_ncm;
+ sc->is_ncm = true;
+ sc->consume = &ipheth_consume_read_ncm;
+ } else {
+ config = ipheth_config;
+ sc->consume = &ipheth_consume_read;
+ }
+
+ error = usbd_transfer_setup(uaa->device, &sc->sc_iface_no, sc->sc_xfer,
+ config, IPHETH_N_TRANSFER, sc, &sc->sc_mtx);
+ if (error) {
+ device_printf(dev, "Cannot setup USB transfers\n");
+ goto detach;
+ }
+
error = ipheth_get_mac_addr(sc);
if (error) {
device_printf(dev, "Cannot get MAC address\n");
@@ -390,12 +444,9 @@ ipheth_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error)
int actlen;
int aframes;
- usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
-
- DPRINTFN(1, "\n");
-
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
+ usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
DPRINTFN(11, "transfer complete: %u bytes in %u frames\n",
actlen, aframes);
@@ -472,53 +523,40 @@ ipheth_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
uint8_t x;
int actlen;
int aframes;
- int len;
-
- usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
switch (USB_GET_STATE(xfer)) {
case USB_ST_TRANSFERRED:
-
+ usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
DPRINTF("received %u bytes in %u frames\n", actlen, aframes);
- for (x = 0; x != aframes; x++) {
- m = sc->sc_rx_buf[x];
- sc->sc_rx_buf[x] = NULL;
- len = usbd_xfer_frame_len(xfer, x);
-
- if (len < (int)(sizeof(struct ether_header) +
- IPHETH_RX_ADJ)) {
- m_freem(m);
- continue;
- }
-
- m_adj(m, IPHETH_RX_ADJ);
-
- /* queue up mbuf */
- uether_rxmbuf(&sc->sc_ue, m, len - IPHETH_RX_ADJ);
- }
+ for (x = 0; x != aframes; x++)
+ sc->consume(xfer, x);
/* FALLTHROUGH */
case USB_ST_SETUP:
-
- for (x = 0; x != IPHETH_RX_FRAMES_MAX; x++) {
- if (sc->sc_rx_buf[x] == NULL) {
- m = uether_newbuf();
- if (m == NULL)
- goto tr_stall;
-
- /* cancel alignment for ethernet */
- m_adj(m, ETHER_ALIGN);
-
- sc->sc_rx_buf[x] = m;
- } else {
- m = sc->sc_rx_buf[x];
+ if (!sc->is_ncm) {
+ for (x = 0; x != IPHETH_RX_FRAMES_MAX; x++) {
+ if (sc->sc_rx_buf[x] == NULL) {
+ m = uether_newbuf();
+ if (m == NULL)
+ goto tr_stall;
+
+ /* cancel alignment for ethernet */
+ m_adj(m, ETHER_ALIGN);
+
+ sc->sc_rx_buf[x] = m;
+ } else {
+ m = sc->sc_rx_buf[x];
+ }
+ usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len);
}
-
- usbd_xfer_set_frame_data(xfer, x, m->m_data, m->m_len);
+ usbd_xfer_set_frames(xfer, x);
+ } else {
+ usbd_xfer_set_frame_len(xfer, 0,
+ IPHETH_RX_NCM_BUF_SIZE);
+ usbd_xfer_set_frames(xfer, 1);
}
- /* set number of frames and start hardware */
- usbd_xfer_set_frames(xfer, x);
+
usbd_transfer_submit(xfer);
/* flush any received frames */
uether_rxflush(&sc->sc_ue);
@@ -540,3 +578,86 @@ ipheth_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
break;
}
}
+
+static void
+ipheth_consume_read(struct usb_xfer *xfer, int x)
+{
+ struct ipheth_softc *sc = usbd_xfer_softc(xfer);
+ struct mbuf *m = sc->sc_rx_buf[x];
+ int len;
+
+ sc->sc_rx_buf[x] = NULL;
+ len = usbd_xfer_frame_len(xfer, x);
+
+ if (len < (int)(sizeof(struct ether_header) + IPHETH_RX_ADJ)) {
+ m_freem(m);
+ return;
+ }
+
+ m_adj(m, IPHETH_RX_ADJ);
+
+ /* queue up mbuf */
+ uether_rxmbuf(&sc->sc_ue, m, len - IPHETH_RX_ADJ);
+}
+
+static void
+ipheth_consume_read_ncm(struct usb_xfer *xfer, int x)
+{
+ struct ipheth_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_page_cache *pc = usbd_xfer_get_frame(xfer, 0);
+ struct ncm_data_cache ncm;
+ if_t ifp = uether_getifp(&sc->sc_ue);
+ struct mbuf *new_buf;
+ int i, actlen;
+ uint16_t dp_offset, dp_len;
+
+ usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
+
+ if (actlen < IPHETH_NCM_HEADER_SIZE)
+ return;
+
+ usbd_copy_out(pc, 0, &ncm.hdr, sizeof(ncm.hdr));
+
+ if (UGETDW(ncm.hdr.dwSignature) != 0x484D434E)
+ return;
+
+ /* Dpt follows the hdr on iOS */
+ if (UGETW(ncm.hdr.wDptIndex) != (int)(sizeof(struct usb_ncm16_hdr)))
+ return;
+
+ usbd_copy_out(pc, UGETW(ncm.hdr.wDptIndex), &ncm.dpt, sizeof(ncm.dpt));
+
+ if (UGETDW(ncm.dpt.dwSignature) != 0x304D434E)
+ return;
+
+ usbd_copy_out(pc, UGETW(ncm.hdr.wDptIndex) + sizeof(ncm.dpt), &ncm.dp,
+ sizeof(ncm.dp));
+
+ for (i = 0; i < IPHETH_NCM_DPT_DP_NUM; ++i) {
+ dp_offset = UGETW(ncm.dp[i].wFrameIndex);
+ dp_len = UGETW(ncm.dp[i].wFrameLength);
+
+ /* (3.3.1 USB CDC NCM spec v1.0) */
+ if (dp_offset == 0 && dp_len == 0)
+ break;
+
+ if (dp_offset < IPHETH_NCM_HEADER_SIZE || dp_offset >= actlen ||
+ actlen < (dp_len + dp_offset) ||
+ dp_len < sizeof(struct ether_header)) {
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ continue;
+ }
+ if (dp_len > (MCLBYTES - ETHER_ALIGN)) {
+ if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+ continue;
+ }
+
+ new_buf = uether_newbuf();
+ if (new_buf == NULL) {
+ if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+ continue;
+ }
+ usbd_copy_out(pc, dp_offset, new_buf->m_data, dp_len);
+ uether_rxmbuf(&sc->sc_ue, new_buf, dp_len);
+ }
+}
diff --git a/sys/dev/usb/net/if_iphethvar.h b/sys/dev/usb/net/if_iphethvar.h
index 203bb96b6f22..d637e8f67d01 100644
--- a/sys/dev/usb/net/if_iphethvar.h
+++ b/sys/dev/usb/net/if_iphethvar.h
@@ -41,6 +41,7 @@
#define IPHETH_BUF_SIZE 1514
#define IPHETH_TX_TIMEOUT 5000 /* ms */
+#define IPHETH_RX_NCM_BUF_SIZE 65536
#define IPHETH_RX_FRAMES_MAX 1
#define IPHETH_TX_FRAMES_MAX 8
@@ -55,10 +56,20 @@
#define IPHETH_CTRL_TIMEOUT 5000 /* ms */
#define IPHETH_CMD_GET_MACADDR 0x00
+#define IPHETH_CMD_ENABLE_NCM 0x04
#define IPHETH_CMD_CARRIER_CHECK 0x45
#define IPHETH_CARRIER_ON 0x04
+#define IPHETH_NCM_DPT_DP_NUM 22
+#define IPHETH_NCM_DPT_HEADER_SIZE \
+ (sizeof(struct usb_ncm16_dpt) + \
+ IPHETH_NCM_DPT_DP_NUM * sizeof(struct usb_ncm16_dp))
+#define IPHETH_NCM_HEADER_SIZE \
+ (sizeof(struct usb_ncm16_hdr) + IPHETH_NCM_DPT_HEADER_SIZE)
+
+typedef void (ipheth_consumer_t)(struct usb_xfer *xfer, int idx);
+
enum {
IPHETH_BULK_TX,
IPHETH_BULK_RX,
@@ -76,6 +87,16 @@ struct ipheth_softc {
uint8_t sc_data[IPHETH_CTRL_BUF_SIZE];
uint8_t sc_iface_no;
uint8_t sc_carrier_on;
+
+ bool is_ncm;
+
+ ipheth_consumer_t *consume;
+};
+
+struct ncm_data_cache {
+ struct usb_ncm16_hdr hdr;
+ struct usb_ncm16_dpt dpt;
+ struct usb_ncm16_dp dp[IPHETH_NCM_DPT_DP_NUM];
};
#define IPHETH_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx)
diff --git a/sys/dev/usb/net/if_kue.c b/sys/dev/usb/net/if_kue.c
index 9398b7ac98ee..55c531e278fb 100644
--- a/sys/dev/usb/net/if_kue.c
+++ b/sys/dev/usb/net/if_kue.c
@@ -32,7 +32,6 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* Kawasaki LSI KL5KUSB101B USB to ethernet adapter driver.
*
diff --git a/sys/dev/usb/net/if_mos.c b/sys/dev/usb/net/if_mos.c
index b0ad55e726b5..41881fb778a5 100644
--- a/sys/dev/usb/net/if_mos.c
+++ b/sys/dev/usb/net/if_mos.c
@@ -1,6 +1,6 @@
+/* SPDX-License-Identifier: ISC AND BSD-4-Clause */
+
/*-
- * SPDX-License-Identifier: (BSD-1-Clause AND BSD-4-Clause)
- *
* Copyright (c) 2011 Rick van der Zwet <info@rickvanderzwet.nl>
*
* Permission to use, copy, modify, and distribute this software for any
@@ -80,7 +80,6 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* Moschip MCS7730/MCS7830/MCS7832 USB to Ethernet controller
* The datasheet is available at the following URL:
diff --git a/sys/dev/usb/net/if_muge.c b/sys/dev/usb/net/if_muge.c
index dd4a89aa8b05..a39343b2e3c9 100644
--- a/sys/dev/usb/net/if_muge.c
+++ b/sys/dev/usb/net/if_muge.c
@@ -29,7 +29,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* USB-To-Ethernet adapter driver for Microchip's LAN78XX and related families.
*
diff --git a/sys/dev/usb/net/if_rue.c b/sys/dev/usb/net/if_rue.c
index 1c11e70e63a6..d1b46887cd20 100644
--- a/sys/dev/usb/net/if_rue.c
+++ b/sys/dev/usb/net/if_rue.c
@@ -58,7 +58,6 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* RealTek RTL8150 USB to fast ethernet controller driver.
* Datasheet is available from
diff --git a/sys/dev/usb/net/if_smsc.c b/sys/dev/usb/net/if_smsc.c
index a59501b6bbff..0ebbf8482446 100644
--- a/sys/dev/usb/net/if_smsc.c
+++ b/sys/dev/usb/net/if_smsc.c
@@ -26,9 +26,8 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
- * SMSC LAN9xxx devices (http://www.smsc.com/)
+ * Microchip LAN9xxx devices (https://www.microchip.com/en-us/product/lan9500a)
*
* The LAN9500 & LAN9500A devices are stand-alone USB to Ethernet chips that
* support USB 2.0 and 10/100 Mbps Ethernet.
@@ -38,7 +37,7 @@
* supports the hub part.
*
* This driver is closely modelled on the Linux driver written and copyrighted
- * by SMSC.
+ * by SMSC (later acquired by Microchip).
*
*
*
diff --git a/sys/dev/usb/net/if_udav.c b/sys/dev/usb/net/if_udav.c
index 6517a4a0e7b7..1554f0a4cd57 100644
--- a/sys/dev/usb/net/if_udav.c
+++ b/sys/dev/usb/net/if_udav.c
@@ -44,7 +44,6 @@
* External PHYs
*/
-#include <sys/cdefs.h>
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
diff --git a/sys/dev/usb/net/if_umb.c b/sys/dev/usb/net/if_umb.c
new file mode 100644
index 000000000000..b1082b117259
--- /dev/null
+++ b/sys/dev/usb/net/if_umb.c
@@ -0,0 +1,2928 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Original copyright (c) 2016 genua mbH (OpenBSD version)
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Copyright (c) 2022 ADISTA SAS (re-write for FreeBSD)
+ *
+ * Re-write for FreeBSD by Pierre Pronchery <pierre@defora.net>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * - Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $NetBSD: if_umb.c,v 1.5 2018/09/20 09:45:16 khorben Exp $
+ * $OpenBSD: if_umb.c,v 1.18 2018/02/19 08:59:52 mpi Exp $
+ */
+
+/*
+ * Mobile Broadband Interface Model specification:
+ * http://www.usb.org/developers/docs/devclass_docs/MBIM10Errata1_073013.zip
+ * Compliance testing guide
+ * http://www.usb.org/developers/docs/devclass_docs/MBIM-Compliance-1.0.pdf
+ */
+
+#include <sys/param.h>
+#include <sys/module.h>
+#include <sys/endian.h>
+#include <sys/kernel.h>
+#include <sys/mbuf.h>
+#include <sys/priv.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/systm.h>
+#include <sys/syslog.h>
+#include <sys/kernel.h>
+#include <sys/queue.h>
+
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/mutex.h>
+#include <sys/condvar.h>
+#include <sys/taskqueue.h>
+
+#include <machine/_inttypes.h>
+
+#include <net/bpf.h>
+#include <net/if.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+#include <net/netisr.h>
+#include <net/route.h>
+
+#include <netinet/in.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+
+#include <dev/usb/usb.h>
+#include <dev/usb/usb_cdc.h>
+#include <dev/usb/usbdi.h>
+#include <dev/usb/usb_device.h>
+#include <dev/usb/usb_process.h>
+#include <dev/usb/usbdi_util.h>
+#include "usb_if.h"
+
+#include "mbim.h"
+#include "if_umbreg.h"
+
+MALLOC_DECLARE(M_MBIM_CID_CONNECT);
+MALLOC_DEFINE(M_MBIM_CID_CONNECT, "mbim_cid_connect",
+ "Connection parameters for MBIM");
+
+#ifdef UMB_DEBUG
+#define DPRINTF(x...) \
+ do { if (umb_debug) log(LOG_DEBUG, x); } while (0)
+
+#define DPRINTFN(n, x...) \
+ do { if (umb_debug >= (n)) log(LOG_DEBUG, x); } while (0)
+
+#define DDUMPN(n, b, l) \
+ do { \
+ if (umb_debug >= (n)) \
+ umb_dump((b), (l)); \
+ } while (0)
+
+const int umb_debug = 1;
+static char *umb_uuid2str(uint8_t [MBIM_UUID_LEN]);
+static void umb_dump(void *, int);
+
+#else
+#define DPRINTF(x...) do { } while (0)
+#define DPRINTFN(n, x...) do { } while (0)
+#define DDUMPN(n, b, l) do { } while (0)
+#endif
+
+#define DEVNAM(sc) device_get_nameunit((sc)->sc_dev)
+
+/*
+ * State change timeout
+ */
+#define UMB_STATE_CHANGE_TIMEOUT 30
+
+/*
+ * State change flags
+ */
+#define UMB_NS_DONT_DROP 0x0001 /* do not drop below current state */
+#define UMB_NS_DONT_RAISE 0x0002 /* do not raise below current state */
+
+/*
+ * Diagnostic macros
+ */
+const struct umb_valdescr umb_regstates[] = MBIM_REGSTATE_DESCRIPTIONS;
+const struct umb_valdescr umb_dataclasses[] = MBIM_DATACLASS_DESCRIPTIONS;
+const struct umb_valdescr umb_simstate[] = MBIM_SIMSTATE_DESCRIPTIONS;
+const struct umb_valdescr umb_messages[] = MBIM_MESSAGES_DESCRIPTIONS;
+const struct umb_valdescr umb_status[] = MBIM_STATUS_DESCRIPTIONS;
+const struct umb_valdescr umb_cids[] = MBIM_CID_DESCRIPTIONS;
+const struct umb_valdescr umb_pktstate[] = MBIM_PKTSRV_STATE_DESCRIPTIONS;
+const struct umb_valdescr umb_actstate[] = MBIM_ACTIVATION_STATE_DESCRIPTIONS;
+const struct umb_valdescr umb_error[] = MBIM_ERROR_DESCRIPTIONS;
+const struct umb_valdescr umb_pintype[] = MBIM_PINTYPE_DESCRIPTIONS;
+const struct umb_valdescr umb_istate[] = UMB_INTERNAL_STATE_DESCRIPTIONS;
+
+#define umb_regstate(c) umb_val2descr(umb_regstates, (c))
+#define umb_dataclass(c) umb_val2descr(umb_dataclasses, (c))
+#define umb_simstate(s) umb_val2descr(umb_simstate, (s))
+#define umb_request2str(m) umb_val2descr(umb_messages, (m))
+#define umb_status2str(s) umb_val2descr(umb_status, (s))
+#define umb_cid2str(c) umb_val2descr(umb_cids, (c))
+#define umb_packet_state(s) umb_val2descr(umb_pktstate, (s))
+#define umb_activation(s) umb_val2descr(umb_actstate, (s))
+#define umb_error2str(e) umb_val2descr(umb_error, (e))
+#define umb_pin_type(t) umb_val2descr(umb_pintype, (t))
+#define umb_istate(s) umb_val2descr(umb_istate, (s))
+
+static device_probe_t umb_probe;
+static device_attach_t umb_attach;
+static device_detach_t umb_detach;
+static device_suspend_t umb_suspend;
+static device_resume_t umb_resume;
+static void umb_attach_task(struct usb_proc_msg *);
+static usb_handle_request_t umb_handle_request;
+static int umb_deactivate(device_t);
+static void umb_ncm_setup(struct umb_softc *, struct usb_config *);
+static void umb_close_bulkpipes(struct umb_softc *);
+static int umb_ioctl(if_t , u_long, caddr_t);
+static void umb_init(void *);
+static void umb_input(if_t , struct mbuf *);
+static int umb_output(if_t , struct mbuf *,
+ const struct sockaddr *, struct route *);
+static void umb_start(if_t );
+static void umb_start_task(struct usb_proc_msg *);
+#if 0
+static void umb_watchdog(if_t );
+#endif
+static void umb_statechg_timeout(void *);
+
+static int umb_mediachange(if_t );
+static void umb_mediastatus(if_t , struct ifmediareq *);
+
+static void umb_add_task(struct umb_softc *sc, usb_proc_callback_t,
+ struct usb_proc_msg *, struct usb_proc_msg *, int);
+static void umb_newstate(struct umb_softc *, enum umb_state, int);
+static void umb_state_task(struct usb_proc_msg *);
+static void umb_up(struct umb_softc *);
+static void umb_down(struct umb_softc *, int);
+
+static void umb_get_response_task(struct usb_proc_msg *);
+
+static void umb_decode_response(struct umb_softc *, void *, int);
+static void umb_handle_indicate_status_msg(struct umb_softc *, void *,
+ int);
+static void umb_handle_opendone_msg(struct umb_softc *, void *, int);
+static void umb_handle_closedone_msg(struct umb_softc *, void *, int);
+static int umb_decode_register_state(struct umb_softc *, void *, int);
+static int umb_decode_devices_caps(struct umb_softc *, void *, int);
+static int umb_decode_subscriber_status(struct umb_softc *, void *, int);
+static int umb_decode_radio_state(struct umb_softc *, void *, int);
+static int umb_decode_pin(struct umb_softc *, void *, int);
+static int umb_decode_packet_service(struct umb_softc *, void *, int);
+static int umb_decode_signal_state(struct umb_softc *, void *, int);
+static int umb_decode_connect_info(struct umb_softc *, void *, int);
+static int umb_decode_ip_configuration(struct umb_softc *, void *, int);
+static void umb_rx(struct umb_softc *);
+static usb_callback_t umb_rxeof;
+static void umb_rxflush(struct umb_softc *);
+static int umb_encap(struct umb_softc *, struct mbuf *, struct usb_xfer *);
+static usb_callback_t umb_txeof;
+static void umb_txflush(struct umb_softc *);
+static void umb_decap(struct umb_softc *, struct usb_xfer *, int);
+
+static usb_error_t umb_send_encap_command(struct umb_softc *, void *, int);
+static int umb_get_encap_response(struct umb_softc *, void *, int *);
+static void umb_ctrl_msg(struct umb_softc *, uint32_t, void *, int);
+
+static void umb_open(struct umb_softc *);
+static void umb_close(struct umb_softc *);
+
+static int umb_setpin(struct umb_softc *, int, int, void *, int, void *,
+ int);
+static void umb_setdataclass(struct umb_softc *);
+static void umb_radio(struct umb_softc *, int);
+static void umb_allocate_cid(struct umb_softc *);
+static void umb_send_fcc_auth(struct umb_softc *);
+static void umb_packet_service(struct umb_softc *, int);
+static void umb_connect(struct umb_softc *);
+static void umb_disconnect(struct umb_softc *);
+static void umb_send_connect(struct umb_softc *, int);
+
+static void umb_qry_ipconfig(struct umb_softc *);
+static void umb_cmd(struct umb_softc *, int, int, const void *, int);
+static void umb_cmd1(struct umb_softc *, int, int, const void *, int, uint8_t *);
+static void umb_command_done(struct umb_softc *, void *, int);
+static void umb_decode_cid(struct umb_softc *, uint32_t, void *, int);
+static void umb_decode_qmi(struct umb_softc *, uint8_t *, int);
+
+static usb_callback_t umb_intr;
+
+static char *umb_ntop(struct sockaddr *);
+
+static const int umb_xfer_tout = USB_DEFAULT_TIMEOUT;
+
+static uint8_t umb_uuid_basic_connect[] = MBIM_UUID_BASIC_CONNECT;
+static uint8_t umb_uuid_context_internet[] = MBIM_UUID_CONTEXT_INTERNET;
+static uint8_t umb_uuid_qmi_mbim[] = MBIM_UUID_QMI_MBIM;
+static uint32_t umb_session_id = 0;
+
+static const struct usb_config umb_config[UMB_N_TRANSFER] = {
+ [UMB_INTR_RX] = {
+ .type = UE_INTERRUPT,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_IN,
+ .if_index = 1,
+ .callback = umb_intr,
+ .bufsize = sizeof (struct usb_cdc_notification),
+ .flags = {.pipe_bof = 1,.short_xfer_ok = 1},
+ .usb_mode = USB_MODE_HOST,
+ },
+ [UMB_BULK_RX] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_IN,
+ .if_index = 0,
+ .callback = umb_rxeof,
+ .bufsize = 8 * 1024,
+ .flags = {.pipe_bof = 1,.short_xfer_ok = 1,.ext_buffer = 1},
+ .usb_mode = USB_MODE_HOST,
+ },
+ [UMB_BULK_TX] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .if_index = 0,
+ .callback = umb_txeof,
+ .bufsize = 8 * 1024,
+ .flags = {.pipe_bof = 1,.force_short_xfer = 1,.ext_buffer = 1},
+ .timeout = umb_xfer_tout,
+ .usb_mode = USB_MODE_HOST,
+ },
+};
+
+static device_method_t umb_methods[] = {
+ /* USB interface */
+ DEVMETHOD(usb_handle_request, umb_handle_request),
+
+ /* Device interface */
+ DEVMETHOD(device_probe, umb_probe),
+ DEVMETHOD(device_attach, umb_attach),
+ DEVMETHOD(device_detach, umb_detach),
+ DEVMETHOD(device_suspend, umb_suspend),
+ DEVMETHOD(device_resume, umb_resume),
+
+ DEVMETHOD_END
+};
+
+static driver_t umb_driver = {
+ .name = "umb",
+ .methods = umb_methods,
+ .size = sizeof (struct umb_softc),
+};
+
+MALLOC_DEFINE(M_USB_UMB, "USB UMB", "USB MBIM driver");
+
+const int umb_delay = 4000;
+
+/*
+ * These devices require an "FCC Authentication" command.
+ */
+#ifndef USB_VENDOR_SIERRA
+# define USB_VENDOR_SIERRA 0x1199
+#endif
+#ifndef USB_PRODUCT_SIERRA_EM7455
+# define USB_PRODUCT_SIERRA_EM7455 0x9079
+#endif
+const struct usb_device_id umb_fccauth_devs[] = {
+ {
+ .match_flag_vendor = 1,
+ .match_flag_product = 1,
+ .idVendor = USB_VENDOR_SIERRA,
+ .idProduct = USB_PRODUCT_SIERRA_EM7455
+ }
+};
+
+static const uint8_t umb_qmi_alloc_cid[] = {
+ 0x01,
+ 0x0f, 0x00, /* len */
+ 0x00, /* QMUX flags */
+ 0x00, /* service "ctl" */
+ 0x00, /* CID */
+ 0x00, /* QMI flags */
+ 0x01, /* transaction */
+ 0x22, 0x00, /* msg "Allocate CID" */
+ 0x04, 0x00, /* TLV len */
+ 0x01, 0x01, 0x00, 0x02 /* TLV */
+};
+
+static const uint8_t umb_qmi_fcc_auth[] = {
+ 0x01,
+ 0x0c, 0x00, /* len */
+ 0x00, /* QMUX flags */
+ 0x02, /* service "dms" */
+#define UMB_QMI_CID_OFFS 5
+ 0x00, /* CID (filled in later) */
+ 0x00, /* QMI flags */
+ 0x01, 0x00, /* transaction */
+ 0x5f, 0x55, /* msg "Send FCC Authentication" */
+ 0x00, 0x00 /* TLV len */
+};
+
+static int
+umb_probe(device_t dev)
+{
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+ usb_interface_descriptor_t *id;
+
+ if (uaa->usb_mode != USB_MODE_HOST)
+ return (ENXIO);
+ if ((id = usbd_get_interface_descriptor(uaa->iface)) == NULL)
+ return (ENXIO);
+
+ /*
+ * If this function implements NCM, check if alternate setting
+ * 1 implements MBIM.
+ */
+ if (id->bInterfaceClass == UICLASS_CDC &&
+ id->bInterfaceSubClass ==
+ UISUBCLASS_NETWORK_CONTROL_MODEL) {
+ id = usbd_get_interface_descriptor(
+ usbd_get_iface(uaa->device,
+ uaa->info.bIfaceIndex + 1));
+ if (id == NULL || id->bAlternateSetting != 1)
+ return (ENXIO);
+ }
+
+#ifndef UISUBCLASS_MOBILE_BROADBAND_INTERFACE_MODEL
+# define UISUBCLASS_MOBILE_BROADBAND_INTERFACE_MODEL 14
+#endif
+ if (id->bInterfaceClass == UICLASS_CDC &&
+ id->bInterfaceSubClass ==
+ UISUBCLASS_MOBILE_BROADBAND_INTERFACE_MODEL &&
+ id->bInterfaceProtocol == 0)
+ return (BUS_PROBE_SPECIFIC);
+
+ return (ENXIO);
+}
+
+static int
+umb_attach(device_t dev)
+{
+ struct umb_softc *sc = device_get_softc(dev);
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+ struct usb_config config[UMB_N_TRANSFER];
+ int v;
+ const struct usb_cdc_union_descriptor *ud;
+ const struct mbim_descriptor *md;
+ int i;
+ usb_interface_descriptor_t *id;
+ struct usb_interface *iface;
+ int data_ifaceno = -1;
+ usb_error_t error;
+
+ sc->sc_dev = dev;
+ sc->sc_udev = uaa->device;
+
+ memcpy(config, umb_config, sizeof (config));
+
+ device_set_usb_desc(dev);
+
+ sc->sc_ctrl_ifaceno = uaa->info.bIfaceNum;
+
+ mtx_init(&sc->sc_mutex, device_get_nameunit(dev), NULL, MTX_DEF);
+
+ /*
+ * Some MBIM hardware does not provide the mandatory CDC Union
+ * Descriptor, so we also look at matching Interface
+ * Association Descriptors to find out the MBIM Data Interface
+ * number.
+ */
+ sc->sc_ver_maj = sc->sc_ver_min = -1;
+ sc->sc_maxpktlen = MBIM_MAXSEGSZ_MINVAL;
+ id = usbd_get_interface_descriptor(uaa->iface);
+
+ ud = usbd_find_descriptor(sc->sc_udev, id, uaa->info.bIfaceIndex,
+ UDESC_CS_INTERFACE, 0xff, UDESCSUB_CDC_UNION, 0xff);
+ if (ud != NULL) {
+ data_ifaceno = ud->bSlaveInterface[0];
+ }
+
+ md = usbd_find_descriptor(sc->sc_udev, id, uaa->info.bIfaceIndex,
+ UDESC_CS_INTERFACE, 0xff, UDESCSUB_MBIM, 0xff);
+ if (md != NULL) {
+ v = UGETW(md->bcdMBIMVersion);
+ sc->sc_ver_maj = MBIM_VER_MAJOR(v);
+ sc->sc_ver_min = MBIM_VER_MINOR(v);
+ sc->sc_ctrl_len = UGETW(md->wMaxControlMessage);
+ /* Never trust a USB device! Could try to exploit us */
+ if (sc->sc_ctrl_len < MBIM_CTRLMSG_MINLEN ||
+ sc->sc_ctrl_len > MBIM_CTRLMSG_MAXLEN) {
+ DPRINTF("control message len %d out of "
+ "bounds [%d .. %d]\n",
+ sc->sc_ctrl_len, MBIM_CTRLMSG_MINLEN,
+ MBIM_CTRLMSG_MAXLEN);
+ /* continue anyway */
+ }
+ sc->sc_maxpktlen = UGETW(md->wMaxSegmentSize);
+ DPRINTFN(2, "ctrl_len=%d, maxpktlen=%d, cap=0x%x\n",
+ sc->sc_ctrl_len, sc->sc_maxpktlen,
+ md->bmNetworkCapabilities);
+ }
+ if (sc->sc_ver_maj < 0) {
+ device_printf(dev, "error: missing MBIM descriptor\n");
+ goto fail;
+ }
+
+ device_printf(dev, "version %d.%d\n", sc->sc_ver_maj,
+ sc->sc_ver_min);
+
+ if (usbd_lookup_id_by_uaa(umb_fccauth_devs, sizeof (umb_fccauth_devs),
+ uaa)) {
+ sc->sc_flags |= UMBFLG_FCC_AUTH_REQUIRED;
+ sc->sc_cid = -1;
+ }
+
+ for (i = 0; i < sc->sc_udev->ifaces_max; i++) {
+ iface = usbd_get_iface(sc->sc_udev, i);
+ id = usbd_get_interface_descriptor(iface);
+ if (id == NULL)
+ break;
+
+ if (id->bInterfaceNumber == data_ifaceno) {
+ sc->sc_data_iface = iface;
+ sc->sc_ifaces_index[0] = i;
+ sc->sc_ifaces_index[1] = uaa->info.bIfaceIndex;
+ break;
+ }
+ }
+ if (sc->sc_data_iface == NULL) {
+ device_printf(dev, "error: no data interface found\n");
+ goto fail;
+ }
+
+ /*
+ * If this is a combined NCM/MBIM function, switch to
+ * alternate setting one to enable MBIM.
+ */
+ id = usbd_get_interface_descriptor(uaa->iface);
+ if (id != NULL && id->bInterfaceClass == UICLASS_CDC &&
+ id->bInterfaceSubClass == UISUBCLASS_NETWORK_CONTROL_MODEL) {
+ device_printf(sc->sc_dev, "combined NCM/MBIM\n");
+ error = usbd_req_set_alt_interface_no(sc->sc_udev,
+ NULL, uaa->info.bIfaceIndex, 1);
+ if (error != USB_ERR_NORMAL_COMPLETION) {
+ device_printf(dev, "error: Could not switch to"
+ " alternate setting for MBIM\n");
+ goto fail;
+ }
+ sc->sc_ifaces_index[1] = uaa->info.bIfaceIndex + 1;
+ }
+
+ if (usb_proc_create(&sc->sc_taskqueue, &sc->sc_mutex,
+ device_get_nameunit(sc->sc_dev),
+ USB_PRI_MED) != 0)
+ goto fail;
+
+ DPRINTFN(2, "ctrl-ifno#%d: data-ifno#%d\n", sc->sc_ctrl_ifaceno,
+ data_ifaceno);
+
+ usb_callout_init_mtx(&sc->sc_statechg_timer, &sc->sc_mutex, 0);
+
+ umb_ncm_setup(sc, config);
+ DPRINTFN(2, "%s: rx/tx size %d/%d\n", DEVNAM(sc),
+ sc->sc_rx_bufsz, sc->sc_tx_bufsz);
+
+ sc->sc_rx_buf = malloc(sc->sc_rx_bufsz, M_DEVBUF, M_WAITOK);
+ sc->sc_tx_buf = malloc(sc->sc_tx_bufsz, M_DEVBUF, M_WAITOK);
+
+ for (i = 0; i != 32; i++) {
+ error = usbd_set_alt_interface_index(sc->sc_udev,
+ sc->sc_ifaces_index[0], i);
+ if (error)
+ break;
+
+ error = usbd_transfer_setup(sc->sc_udev, sc->sc_ifaces_index,
+ sc->sc_xfer, config, UMB_N_TRANSFER,
+ sc, &sc->sc_mutex);
+ if (error == USB_ERR_NORMAL_COMPLETION)
+ break;
+ }
+ if (error || (i == 32)) {
+ device_printf(sc->sc_dev, "error: failed to setup xfers\n");
+ goto fail;
+ }
+
+ sc->sc_resp_buf = malloc(sc->sc_ctrl_len, M_DEVBUF, M_WAITOK);
+ sc->sc_ctrl_msg = malloc(sc->sc_ctrl_len, M_DEVBUF, M_WAITOK);
+
+ sc->sc_info.regstate = MBIM_REGSTATE_UNKNOWN;
+ sc->sc_info.pin_attempts_left = UMB_VALUE_UNKNOWN;
+ sc->sc_info.rssi = UMB_VALUE_UNKNOWN;
+ sc->sc_info.ber = UMB_VALUE_UNKNOWN;
+
+ /* defer attaching the interface */
+ mtx_lock(&sc->sc_mutex);
+ umb_add_task(sc, umb_attach_task,
+ &sc->sc_proc_attach_task[0].hdr,
+ &sc->sc_proc_attach_task[1].hdr, 0);
+ mtx_unlock(&sc->sc_mutex);
+
+ return (0);
+
+fail:
+ umb_detach(sc->sc_dev);
+ return (ENXIO);
+}
+
+static void
+umb_attach_task(struct usb_proc_msg *msg)
+{
+ struct umb_task *task = (struct umb_task *)msg;
+ struct umb_softc *sc = task->sc;
+ if_t ifp;
+
+ mtx_unlock(&sc->sc_mutex);
+
+ CURVNET_SET_QUIET(vnet0);
+
+ /* initialize the interface */
+ sc->sc_if = ifp = if_alloc(IFT_MBIM);
+ if_initname(ifp, "umb", device_get_unit(sc->sc_dev));
+
+ if_setsoftc(ifp, sc);
+ if_setflags(ifp, IFF_SIMPLEX | IFF_MULTICAST | IFF_POINTOPOINT);
+ if_setioctlfn(ifp, umb_ioctl);
+ if_setinputfn(ifp, umb_input);
+ if_setoutputfn(ifp, umb_output);
+ if_setstartfn(ifp, umb_start);
+ if_setinitfn(ifp, umb_init);
+
+#if 0
+ if_setwatchdog(ifp, umb_watchdog);
+#endif
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ ifmedia_init(&sc->sc_im, 0, umb_mediachange, umb_mediastatus);
+ ifmedia_add(&sc->sc_im, IFM_NONE | IFM_AUTO, 0, NULL);
+
+ if_setifheaderlen(ifp, sizeof (struct ncm_header16) +
+ sizeof (struct ncm_pointer16)); /* XXX - IFAPI */
+ /* XXX hard-coded atm */
+ if_setmtu(ifp, MIN(2048, sc->sc_maxpktlen));
+ if_setsendqlen(ifp, ifqmaxlen);
+ if_setsendqready(ifp);
+
+ /* attach the interface */
+ if_attach(ifp);
+ bpfattach(ifp, DLT_RAW, 0);
+
+ sc->sc_attached = 1;
+
+ CURVNET_RESTORE();
+
+ umb_init(sc);
+ mtx_lock(&sc->sc_mutex);
+}
+
+static int
+umb_detach(device_t dev)
+{
+ struct umb_softc *sc = device_get_softc(dev);
+ if_t ifp = GET_IFP(sc);
+
+ usb_proc_drain(&sc->sc_taskqueue);
+
+ mtx_lock(&sc->sc_mutex);
+ if (ifp != NULL && (if_getdrvflags(ifp) & IFF_DRV_RUNNING))
+ umb_down(sc, 1);
+ umb_close(sc);
+ mtx_unlock(&sc->sc_mutex);
+
+ usbd_transfer_unsetup(sc->sc_xfer, UMB_N_TRANSFER);
+
+ free(sc->sc_tx_buf, M_DEVBUF);
+ free(sc->sc_rx_buf, M_DEVBUF);
+
+ usb_callout_drain(&sc->sc_statechg_timer);
+
+ usb_proc_free(&sc->sc_taskqueue);
+
+ mtx_destroy(&sc->sc_mutex);
+
+ free(sc->sc_ctrl_msg, M_DEVBUF);
+ free(sc->sc_resp_buf, M_DEVBUF);
+
+ if (ifp != NULL && if_getsoftc(ifp)) {
+ ifmedia_removeall(&sc->sc_im);
+ }
+ if (sc->sc_attached) {
+ bpfdetach(ifp);
+ if_detach(ifp);
+ if_free(ifp);
+ sc->sc_if = NULL;
+ }
+
+ return 0;
+}
+
+static void
+umb_ncm_setup(struct umb_softc *sc, struct usb_config * config)
+{
+ usb_device_request_t req;
+ struct ncm_ntb_parameters np;
+ usb_error_t error;
+
+ /* Query NTB transfers sizes */
+ req.bmRequestType = UT_READ_CLASS_INTERFACE;
+ req.bRequest = NCM_GET_NTB_PARAMETERS;
+ USETW(req.wValue, 0);
+ USETW(req.wIndex, sc->sc_ctrl_ifaceno);
+ USETW(req.wLength, sizeof (np));
+ mtx_lock(&sc->sc_mutex);
+ error = usbd_do_request(sc->sc_udev, &sc->sc_mutex, &req, &np);
+ mtx_unlock(&sc->sc_mutex);
+ if (error == USB_ERR_NORMAL_COMPLETION &&
+ UGETW(np.wLength) == sizeof (np)) {
+ config[UMB_BULK_RX].bufsize = UGETDW(np.dwNtbInMaxSize);
+ config[UMB_BULK_TX].bufsize = UGETDW(np.dwNtbOutMaxSize);
+ }
+ sc->sc_rx_bufsz = config[UMB_BULK_RX].bufsize;
+ sc->sc_tx_bufsz = config[UMB_BULK_TX].bufsize;
+}
+
+static int
+umb_handle_request(device_t dev,
+ const void *preq, void **pptr, uint16_t *plen,
+ uint16_t offset, uint8_t *pstate)
+{
+ /* FIXME really implement */
+
+ return (ENXIO);
+}
+
+static int
+umb_suspend(device_t dev)
+{
+ device_printf(dev, "Suspending\n");
+ return (0);
+}
+
+static int
+umb_resume(device_t dev)
+{
+ device_printf(dev, "Resuming\n");
+ return (0);
+}
+
+static int
+umb_deactivate(device_t dev)
+{
+ struct umb_softc *sc = device_get_softc(dev);
+ if_t ifp = GET_IFP(sc);
+
+ if (ifp != NULL) {
+ if_dead(ifp);
+ }
+ sc->sc_dying = 1;
+ return 0;
+}
+
+static void
+umb_close_bulkpipes(struct umb_softc *sc)
+{
+ if_t ifp = GET_IFP(sc);
+
+ if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
+
+ umb_rxflush(sc);
+ umb_txflush(sc);
+
+ usbd_transfer_stop(sc->sc_xfer[UMB_BULK_RX]);
+ usbd_transfer_stop(sc->sc_xfer[UMB_BULK_TX]);
+}
+
+static int
+umb_ioctl(if_t ifp, u_long cmd, caddr_t data)
+{
+ struct umb_softc *sc = if_getsoftc(ifp);
+ struct in_ifaddr *ia = (struct in_ifaddr *)data;
+ struct ifreq *ifr = (struct ifreq *)data;
+ int error = 0;
+ struct umb_parameter mp;
+
+ if (sc->sc_dying)
+ return EIO;
+
+ switch (cmd) {
+ case SIOCSIFADDR:
+ switch (ia->ia_ifa.ifa_addr->sa_family) {
+ case AF_INET:
+ break;
+#ifdef INET6
+ case AF_INET6:
+ break;
+#endif /* INET6 */
+ default:
+ error = EAFNOSUPPORT;
+ break;
+ }
+ break;
+ case SIOCSIFFLAGS:
+ mtx_lock(&sc->sc_mutex);
+ umb_add_task(sc, umb_state_task,
+ &sc->sc_proc_state_task[0].hdr,
+ &sc->sc_proc_state_task[1].hdr, 1);
+ mtx_unlock(&sc->sc_mutex);
+ break;
+ case SIOCGUMBINFO:
+ error = copyout(&sc->sc_info, ifr->ifr_ifru.ifru_data,
+ sizeof (sc->sc_info));
+ break;
+ case SIOCSUMBPARAM:
+ error = priv_check(curthread, PRIV_NET_SETIFPHYS);
+ if (error)
+ break;
+
+ if ((error = copyin(ifr->ifr_ifru.ifru_data, &mp, sizeof (mp))) != 0)
+ break;
+
+ if ((error = umb_setpin(sc, mp.op, mp.is_puk, mp.pin, mp.pinlen,
+ mp.newpin, mp.newpinlen)) != 0)
+ break;
+
+ if (mp.apnlen < 0 || mp.apnlen > sizeof (sc->sc_info.apn)) {
+ error = EINVAL;
+ break;
+ }
+ sc->sc_roaming = mp.roaming ? 1 : 0;
+ memset(sc->sc_info.apn, 0, sizeof (sc->sc_info.apn));
+ memcpy(sc->sc_info.apn, mp.apn, mp.apnlen);
+ sc->sc_info.apnlen = mp.apnlen;
+ memset(sc->sc_info.username, 0, sizeof (sc->sc_info.username));
+ memcpy(sc->sc_info.username, mp.username, mp.usernamelen);
+ sc->sc_info.usernamelen = mp.usernamelen;
+ memset(sc->sc_info.password, 0, sizeof (sc->sc_info.password));
+ memcpy(sc->sc_info.password, mp.password, mp.passwordlen);
+ sc->sc_info.passwordlen = mp.passwordlen;
+ sc->sc_info.preferredclasses = mp.preferredclasses;
+ umb_setdataclass(sc);
+ break;
+ case SIOCGUMBPARAM:
+ memset(&mp, 0, sizeof (mp));
+ memcpy(mp.apn, sc->sc_info.apn, sc->sc_info.apnlen);
+ mp.apnlen = sc->sc_info.apnlen;
+ mp.roaming = sc->sc_roaming;
+ mp.preferredclasses = sc->sc_info.preferredclasses;
+ error = copyout(&mp, ifr->ifr_ifru.ifru_data, sizeof (mp));
+ break;
+ case SIOCSIFMTU:
+ /* Does this include the NCM headers and tail? */
+ if (ifr->ifr_mtu > if_getmtu(ifp)) {
+ error = EINVAL;
+ break;
+ }
+ if_setmtu(ifp, ifr->ifr_mtu);
+ break;
+ case SIOCAIFADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ break;
+ case SIOCGIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->sc_im, cmd);
+ break;
+ default:
+ error = EINVAL;
+ break;
+ }
+ return (error);
+}
+
+static void
+umb_init(void *arg)
+{
+ struct umb_softc *sc = arg;
+
+ mtx_lock(&sc->sc_mutex);
+ umb_add_task(sc, umb_start_task,
+ &sc->sc_proc_start_task[0].hdr,
+ &sc->sc_proc_start_task[1].hdr, 0);
+ mtx_unlock(&sc->sc_mutex);
+}
+
+static void
+umb_input(if_t ifp, struct mbuf *m)
+{
+ struct mbuf *mn;
+ struct epoch_tracker et;
+
+ while (m) {
+ mn = m->m_nextpkt;
+ m->m_nextpkt = NULL;
+
+ NET_EPOCH_ENTER(et);
+ BPF_MTAP(ifp, m);
+
+ CURVNET_SET_QUIET(if_getvnet(ifp));
+
+ netisr_dispatch(NETISR_IP, m);
+ m = mn;
+
+ CURVNET_RESTORE();
+ NET_EPOCH_EXIT(et);
+ }
+}
+
+static int
+umb_output(if_t ifp, struct mbuf *m, const struct sockaddr *dst,
+ struct route *rtp)
+{
+ int error;
+
+ DPRINTFN(10, "%s: enter\n", __func__);
+
+ switch (dst->sa_family) {
+#ifdef INET6
+ case AF_INET6:
+ /* fall through */
+#endif
+ case AF_INET:
+ break;
+
+ /* silently drop dhclient packets */
+ case AF_UNSPEC:
+ m_freem(m);
+ return (0);
+
+ /* drop other packet types */
+ default:
+ m_freem(m);
+ return (EAFNOSUPPORT);
+ }
+
+ /*
+ * Queue message on interface, and start output if interface
+ * not yet active.
+ */
+ error = if_transmit(ifp, m);
+ if (error) {
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ return (ENOBUFS);
+ }
+
+ return (0);
+}
+
+static void
+umb_start(if_t ifp)
+{
+ struct umb_softc *sc = if_getsoftc(ifp);
+
+ if (sc->sc_dying || !(if_getdrvflags(ifp) & IFF_DRV_RUNNING))
+ return;
+
+ mtx_lock(&sc->sc_mutex);
+ usbd_transfer_start(sc->sc_xfer[UMB_BULK_TX]);
+ mtx_unlock(&sc->sc_mutex);
+}
+
+static void
+umb_start_task(struct usb_proc_msg *msg)
+{
+ struct umb_task *task = (struct umb_task *)msg;
+ struct umb_softc *sc = task->sc;
+ if_t ifp = GET_IFP(sc);
+
+ DPRINTF("%s()\n", __func__);
+
+ mtx_assert(&sc->sc_mutex, MA_OWNED);
+
+ if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
+
+ /* start interrupt transfer */
+ usbd_transfer_start(sc->sc_xfer[UMB_INTR_RX]);
+
+ umb_open(sc);
+}
+
+#if 0
+static void
+umb_watchdog(if_t ifp)
+{
+ struct umb_softc *sc = if_getsoftc(ifp);
+
+ if (sc->sc_dying)
+ return;
+
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ device_printf(sc->sc_dev, "watchdog timeout\n");
+ usbd_transfer_drain(sc->sc_xfer[UMB_BULK_TX]);
+ return;
+}
+#endif
+
+static void
+umb_statechg_timeout(void *arg)
+{
+ struct umb_softc *sc = arg;
+ if_t ifp = GET_IFP(sc);
+
+ mtx_assert(&sc->sc_mutex, MA_OWNED);
+
+ if (sc->sc_info.regstate != MBIM_REGSTATE_ROAMING || sc->sc_roaming)
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_DEBUG, "%s: state change timeout\n",
+ DEVNAM(sc));
+
+ umb_add_task(sc, umb_state_task,
+ &sc->sc_proc_state_task[0].hdr,
+ &sc->sc_proc_state_task[1].hdr, 0);
+}
+
+static int
+umb_mediachange(if_t ifp)
+{
+ return 0;
+}
+
+static void
+umb_mediastatus(if_t ifp, struct ifmediareq * imr)
+{
+ switch (if_getlinkstate(ifp)) {
+ case LINK_STATE_UP:
+ imr->ifm_status = IFM_AVALID | IFM_ACTIVE;
+ break;
+ case LINK_STATE_DOWN:
+ imr->ifm_status = IFM_AVALID;
+ break;
+ default:
+ imr->ifm_status = 0;
+ break;
+ }
+}
+
+static void
+umb_add_task(struct umb_softc *sc, usb_proc_callback_t callback,
+ struct usb_proc_msg *t0, struct usb_proc_msg *t1, int sync)
+{
+ struct umb_task * task;
+
+ mtx_assert(&sc->sc_mutex, MA_OWNED);
+
+ if (usb_proc_is_gone(&sc->sc_taskqueue)) {
+ return;
+ }
+
+ task = usb_proc_msignal(&sc->sc_taskqueue, t0, t1);
+
+ task->hdr.pm_callback = callback;
+ task->sc = sc;
+
+ if (sync) {
+ usb_proc_mwait(&sc->sc_taskqueue, t0, t1);
+ }
+}
+
+static void
+umb_newstate(struct umb_softc *sc, enum umb_state newstate, int flags)
+{
+ if_t ifp = GET_IFP(sc);
+
+ if (newstate == sc->sc_state)
+ return;
+ if (((flags & UMB_NS_DONT_DROP) && newstate < sc->sc_state) ||
+ ((flags & UMB_NS_DONT_RAISE) && newstate > sc->sc_state))
+ return;
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_DEBUG, "%s: state going %s from '%s' to '%s'\n",
+ DEVNAM(sc), newstate > sc->sc_state ? "up" : "down",
+ umb_istate(sc->sc_state), umb_istate(newstate));
+ sc->sc_state = newstate;
+ umb_add_task(sc, umb_state_task,
+ &sc->sc_proc_state_task[0].hdr,
+ &sc->sc_proc_state_task[1].hdr, 0);
+}
+
+static void
+umb_state_task(struct usb_proc_msg *msg)
+{
+ struct umb_task *task = (struct umb_task *)msg;
+ struct umb_softc *sc = task->sc;
+ if_t ifp = GET_IFP(sc);
+ struct ifreq ifr;
+ int state;
+
+ DPRINTF("%s()\n", __func__);
+
+ if (sc->sc_info.regstate == MBIM_REGSTATE_ROAMING && !sc->sc_roaming) {
+ /*
+ * Query the registration state until we're with the home
+ * network again.
+ */
+ umb_cmd(sc, MBIM_CID_REGISTER_STATE, MBIM_CMDOP_QRY, NULL, 0);
+ return;
+ }
+
+ if (if_getflags(ifp) & IFF_UP)
+ umb_up(sc);
+ else
+ umb_down(sc, 0);
+
+ state = (sc->sc_state == UMB_S_UP) ? LINK_STATE_UP : LINK_STATE_DOWN;
+ if (if_getlinkstate(ifp) != state) {
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_DEBUG, "%s: link state changed from %s to %s\n",
+ DEVNAM(sc),
+ (if_getlinkstate(ifp) == LINK_STATE_UP)
+ ? "up" : "down",
+ (state == LINK_STATE_UP) ? "up" : "down");
+ if_link_state_change(ifp, state); /* XXX - IFAPI */
+ if (state != LINK_STATE_UP) {
+ /*
+ * Purge any existing addresses
+ */
+ memset(sc->sc_info.ipv4dns, 0,
+ sizeof (sc->sc_info.ipv4dns));
+ mtx_unlock(&sc->sc_mutex);
+ CURVNET_SET_QUIET(if_getvnet(ifp));
+ if (in_control(NULL, SIOCGIFADDR, (caddr_t)&ifr, ifp,
+ curthread) == 0 &&
+ satosin(&ifr.ifr_addr)->sin_addr.s_addr !=
+ INADDR_ANY) {
+ in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr,
+ ifp, curthread);
+ }
+ CURVNET_RESTORE();
+ mtx_lock(&sc->sc_mutex);
+ }
+ if_link_state_change(ifp, state);
+ }
+}
+
+static void
+umb_up(struct umb_softc *sc)
+{
+ if_t ifp = GET_IFP(sc);
+
+ switch (sc->sc_state) {
+ case UMB_S_DOWN:
+ DPRINTF("init: opening ...\n");
+ umb_open(sc);
+ break;
+ case UMB_S_OPEN:
+ if (sc->sc_flags & UMBFLG_FCC_AUTH_REQUIRED) {
+ if (sc->sc_cid == -1) {
+ DPRINTF("init: allocating CID ...\n");
+ umb_allocate_cid(sc);
+ break;
+ } else
+ umb_newstate(sc, UMB_S_CID, UMB_NS_DONT_DROP);
+ } else {
+ DPRINTF("init: turning radio on ...\n");
+ umb_radio(sc, 1);
+ break;
+ }
+ /*FALLTHROUGH*/
+ case UMB_S_CID:
+ DPRINTF("init: sending FCC auth ...\n");
+ umb_send_fcc_auth(sc);
+ break;
+ case UMB_S_RADIO:
+ DPRINTF("init: checking SIM state ...\n");
+ umb_cmd(sc, MBIM_CID_SUBSCRIBER_READY_STATUS, MBIM_CMDOP_QRY,
+ NULL, 0);
+ break;
+ case UMB_S_SIMREADY:
+ DPRINTF("init: attaching ...\n");
+ umb_packet_service(sc, 1);
+ break;
+ case UMB_S_ATTACHED:
+ sc->sc_tx_seq = 0;
+ DPRINTF("init: connecting ...\n");
+ umb_connect(sc);
+ break;
+ case UMB_S_CONNECTED:
+ DPRINTF("init: getting IP config ...\n");
+ umb_qry_ipconfig(sc);
+ break;
+ case UMB_S_UP:
+ DPRINTF("init: reached state UP\n");
+ if (!(if_getflags(ifp) & IFF_DRV_RUNNING)) {
+ if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0);
+ if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
+ umb_rx(sc);
+ }
+ break;
+ }
+ if (sc->sc_state < UMB_S_UP)
+ usb_callout_reset(&sc->sc_statechg_timer,
+ UMB_STATE_CHANGE_TIMEOUT * hz, umb_statechg_timeout, sc);
+ else {
+ usb_callout_stop(&sc->sc_statechg_timer);
+ }
+ return;
+}
+
+static void
+umb_down(struct umb_softc *sc, int force)
+{
+ umb_close_bulkpipes(sc);
+
+ switch (sc->sc_state) {
+ case UMB_S_UP:
+ case UMB_S_CONNECTED:
+ DPRINTF("stop: disconnecting ...\n");
+ umb_disconnect(sc);
+ if (!force)
+ break;
+ /*FALLTHROUGH*/
+ case UMB_S_ATTACHED:
+ DPRINTF("stop: detaching ...\n");
+ umb_packet_service(sc, 0);
+ if (!force)
+ break;
+ /*FALLTHROUGH*/
+ case UMB_S_SIMREADY:
+ case UMB_S_RADIO:
+ DPRINTF("stop: turning radio off ...\n");
+ umb_radio(sc, 0);
+ if (!force)
+ break;
+ /*FALLTHROUGH*/
+ case UMB_S_CID:
+ case UMB_S_OPEN:
+ case UMB_S_DOWN:
+ /* Do not close the device */
+ DPRINTF("stop: reached state DOWN\n");
+ break;
+ }
+ if (force)
+ sc->sc_state = UMB_S_OPEN;
+
+ if (sc->sc_state > UMB_S_OPEN)
+ usb_callout_reset(&sc->sc_statechg_timer,
+ UMB_STATE_CHANGE_TIMEOUT * hz, umb_statechg_timeout, sc);
+ else
+ usb_callout_stop(&sc->sc_statechg_timer);
+}
+
+static void
+umb_get_response_task(struct usb_proc_msg *msg)
+{
+ struct umb_task *task = (struct umb_task *)msg;
+ struct umb_softc *sc = task->sc;
+ int len;
+
+ DPRINTF("%s()\n", __func__);
+ /*
+ * Function is required to send on RESPONSE_AVAILABLE notification for
+ * each encapsulated response that is to be processed by the host.
+ * But of course, we can receive multiple notifications before the
+ * response task is run.
+ */
+ while (sc->sc_nresp > 0) {
+ --sc->sc_nresp;
+ len = sc->sc_ctrl_len;
+ if (umb_get_encap_response(sc, sc->sc_resp_buf, &len))
+ umb_decode_response(sc, sc->sc_resp_buf, len);
+ }
+}
+
+static void
+umb_decode_response(struct umb_softc *sc, void *response, int len)
+{
+ struct mbim_msghdr *hdr = response;
+ struct mbim_fragmented_msg_hdr *fraghdr;
+ uint32_t type;
+
+ DPRINTFN(3, "got response: len %d\n", len);
+ DDUMPN(4, response, len);
+
+ if (len < sizeof (*hdr) || le32toh(hdr->len) != len) {
+ /*
+ * We should probably cancel a transaction, but since the
+ * message is too short, we cannot decode the transaction
+ * id (tid) and hence don't know, whom to cancel. Must wait
+ * for the timeout.
+ */
+ DPRINTF("received short response (len %d)\n",
+ len);
+ return;
+ }
+
+ /*
+ * XXX FIXME: if message is fragmented, store it until last frag
+ * is received and then re-assemble all fragments.
+ */
+ type = le32toh(hdr->type);
+ switch (type) {
+ case MBIM_INDICATE_STATUS_MSG:
+ case MBIM_COMMAND_DONE:
+ fraghdr = response;
+ if (le32toh(fraghdr->frag.nfrag) != 1) {
+ DPRINTF("discarding fragmented messages\n");
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+
+ DPRINTF("<- rcv %s (tid %u)\n", umb_request2str(type),
+ le32toh(hdr->tid));
+ switch (type) {
+ case MBIM_FUNCTION_ERROR_MSG:
+ case MBIM_HOST_ERROR_MSG:
+ {
+ struct mbim_f2h_hosterr *e;
+ int err;
+
+ if (len >= sizeof (*e)) {
+ e = response;
+ err = le32toh(e->err);
+
+ DPRINTF("%s message, error %s (tid %u)\n",
+ umb_request2str(type),
+ umb_error2str(err), le32toh(hdr->tid));
+ if (err == MBIM_ERROR_NOT_OPENED)
+ umb_newstate(sc, UMB_S_DOWN, 0);
+ }
+ break;
+ }
+ case MBIM_INDICATE_STATUS_MSG:
+ umb_handle_indicate_status_msg(sc, response, len);
+ break;
+ case MBIM_OPEN_DONE:
+ umb_handle_opendone_msg(sc, response, len);
+ break;
+ case MBIM_CLOSE_DONE:
+ umb_handle_closedone_msg(sc, response, len);
+ break;
+ case MBIM_COMMAND_DONE:
+ umb_command_done(sc, response, len);
+ break;
+ default:
+ DPRINTF("discard message %s\n",
+ umb_request2str(type));
+ break;
+ }
+}
+
+static void
+umb_handle_indicate_status_msg(struct umb_softc *sc, void *data, int len)
+{
+ struct mbim_f2h_indicate_status *m = data;
+ uint32_t infolen;
+ uint32_t cid;
+
+ if (len < sizeof (*m)) {
+ DPRINTF("discard short %s message\n",
+ umb_request2str(le32toh(m->hdr.type)));
+ return;
+ }
+ if (memcmp(m->devid, umb_uuid_basic_connect, sizeof (m->devid))) {
+ DPRINTF("discard %s message for other UUID '%s'\n",
+ umb_request2str(le32toh(m->hdr.type)),
+ umb_uuid2str(m->devid));
+ return;
+ }
+ infolen = le32toh(m->infolen);
+ if (len < sizeof (*m) + infolen) {
+ DPRINTF("discard truncated %s message (want %d, got %d)\n",
+ umb_request2str(le32toh(m->hdr.type)),
+ (int)sizeof (*m) + infolen, len);
+ return;
+ }
+
+ cid = le32toh(m->cid);
+ DPRINTF("indicate %s status\n", umb_cid2str(cid));
+ umb_decode_cid(sc, cid, m->info, infolen);
+}
+
+static void
+umb_handle_opendone_msg(struct umb_softc *sc, void *data, int len)
+{
+ struct mbim_f2h_openclosedone *resp = data;
+ if_t ifp = GET_IFP(sc);
+ uint32_t status;
+
+ status = le32toh(resp->status);
+ if (status == MBIM_STATUS_SUCCESS) {
+ if (sc->sc_maxsessions == 0) {
+ umb_cmd(sc, MBIM_CID_DEVICE_CAPS, MBIM_CMDOP_QRY, NULL,
+ 0);
+ umb_cmd(sc, MBIM_CID_PIN, MBIM_CMDOP_QRY, NULL, 0);
+ umb_cmd(sc, MBIM_CID_REGISTER_STATE, MBIM_CMDOP_QRY,
+ NULL, 0);
+ }
+ umb_newstate(sc, UMB_S_OPEN, UMB_NS_DONT_DROP);
+ } else if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_ERR, "%s: open error: %s\n", DEVNAM(sc),
+ umb_status2str(status));
+ return;
+}
+
+static void
+umb_handle_closedone_msg(struct umb_softc *sc, void *data, int len)
+{
+ struct mbim_f2h_openclosedone *resp = data;
+ uint32_t status;
+
+ status = le32toh(resp->status);
+ if (status == MBIM_STATUS_SUCCESS)
+ umb_newstate(sc, UMB_S_DOWN, 0);
+ else
+ DPRINTF("close error: %s\n",
+ umb_status2str(status));
+ return;
+}
+
+static inline void
+umb_getinfobuf(char *in, int inlen, uint32_t offs, uint32_t sz,
+ void *out, size_t outlen)
+{
+ offs = le32toh(offs);
+ sz = le32toh(sz);
+ memset(out, 0, outlen);
+ if ((uint64_t)inlen >= (uint64_t)offs + (uint64_t)sz)
+ memcpy(out, in + offs, MIN(sz, outlen));
+}
+
+static inline int
+umb_padding(void *data, int len, size_t sz)
+{
+ char *p = data;
+ int np = 0;
+
+ while (len < sz && (len % 4) != 0) {
+ *p++ = '\0';
+ len++;
+ np++;
+ }
+ return np;
+}
+
+static inline int
+umb_addstr(void *buf, size_t bufsz, int *offs, void *str, int slen,
+ uint32_t *offsmember, uint32_t *sizemember)
+{
+ if (*offs + slen > bufsz)
+ return 0;
+
+ *sizemember = htole32((uint32_t)slen);
+ if (slen && str) {
+ *offsmember = htole32((uint32_t)*offs);
+ memcpy((char *)buf + *offs, str, slen);
+ *offs += slen;
+ *offs += umb_padding(buf, *offs, bufsz);
+ } else
+ *offsmember = htole32(0);
+ return 1;
+}
+
+static void
+umb_in_len2mask(struct in_addr *mask, int len)
+{
+ int i;
+ u_char *p;
+
+ p = (u_char *)mask;
+ memset(mask, 0, sizeof (*mask));
+ for (i = 0; i < len / 8; i++)
+ p[i] = 0xff;
+ if (len % 8)
+ p[i] = (0xff00 >> (len % 8)) & 0xff;
+}
+
+static int
+umb_decode_register_state(struct umb_softc *sc, void *data, int len)
+{
+ struct mbim_cid_registration_state_info *rs = data;
+ if_t ifp = GET_IFP(sc);
+
+ if (len < sizeof (*rs))
+ return 0;
+ sc->sc_info.nwerror = le32toh(rs->nwerror);
+ sc->sc_info.regstate = le32toh(rs->regstate);
+ sc->sc_info.regmode = le32toh(rs->regmode);
+ sc->sc_info.cellclass = le32toh(rs->curcellclass);
+
+ /* XXX should we remember the provider_id? */
+ umb_getinfobuf(data, len, rs->provname_offs, rs->provname_size,
+ sc->sc_info.provider, sizeof (sc->sc_info.provider));
+ umb_getinfobuf(data, len, rs->roamingtxt_offs, rs->roamingtxt_size,
+ sc->sc_info.roamingtxt, sizeof (sc->sc_info.roamingtxt));
+
+ DPRINTFN(2, "%s, availclass 0x%x, class 0x%x, regmode %d\n",
+ umb_regstate(sc->sc_info.regstate),
+ le32toh(rs->availclasses), sc->sc_info.cellclass,
+ sc->sc_info.regmode);
+
+ if (sc->sc_info.regstate == MBIM_REGSTATE_ROAMING &&
+ !sc->sc_roaming &&
+ sc->sc_info.activation == MBIM_ACTIVATION_STATE_ACTIVATED) {
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_INFO,
+ "%s: disconnecting from roaming network\n",
+ DEVNAM(sc));
+ umb_disconnect(sc);
+ }
+ return 1;
+}
+
+static int
+umb_decode_devices_caps(struct umb_softc *sc, void *data, int len)
+{
+ struct mbim_cid_device_caps *dc = data;
+
+ if (len < sizeof (*dc))
+ return 0;
+ sc->sc_maxsessions = le32toh(dc->max_sessions);
+ sc->sc_info.supportedclasses = le32toh(dc->dataclass);
+ umb_getinfobuf(data, len, dc->devid_offs, dc->devid_size,
+ sc->sc_info.devid, sizeof (sc->sc_info.devid));
+ umb_getinfobuf(data, len, dc->fwinfo_offs, dc->fwinfo_size,
+ sc->sc_info.fwinfo, sizeof (sc->sc_info.fwinfo));
+ umb_getinfobuf(data, len, dc->hwinfo_offs, dc->hwinfo_size,
+ sc->sc_info.hwinfo, sizeof (sc->sc_info.hwinfo));
+ DPRINTFN(2, "max sessions %d, supported classes 0x%x\n",
+ sc->sc_maxsessions, sc->sc_info.supportedclasses);
+ return 1;
+}
+
+static int
+umb_decode_subscriber_status(struct umb_softc *sc, void *data, int len)
+{
+ struct mbim_cid_subscriber_ready_info *si = data;
+ if_t ifp = GET_IFP(sc);
+ int npn;
+
+ if (len < sizeof (*si))
+ return 0;
+ sc->sc_info.sim_state = le32toh(si->ready);
+
+ umb_getinfobuf(data, len, si->sid_offs, si->sid_size,
+ sc->sc_info.sid, sizeof (sc->sc_info.sid));
+ umb_getinfobuf(data, len, si->icc_offs, si->icc_size,
+ sc->sc_info.iccid, sizeof (sc->sc_info.iccid));
+
+ npn = le32toh(si->no_pn);
+ if (npn > 0)
+ umb_getinfobuf(data, len, si->pn[0].offs, si->pn[0].size,
+ sc->sc_info.pn, sizeof (sc->sc_info.pn));
+ else
+ memset(sc->sc_info.pn, 0, sizeof (sc->sc_info.pn));
+
+ if (sc->sc_info.sim_state == MBIM_SIMSTATE_LOCKED)
+ sc->sc_info.pin_state = UMB_PIN_REQUIRED;
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_INFO, "%s: SIM %s\n", DEVNAM(sc),
+ umb_simstate(sc->sc_info.sim_state));
+ if (sc->sc_info.sim_state == MBIM_SIMSTATE_INITIALIZED)
+ umb_newstate(sc, UMB_S_SIMREADY, UMB_NS_DONT_DROP);
+ return 1;
+}
+
+static int
+umb_decode_radio_state(struct umb_softc *sc, void *data, int len)
+{
+ struct mbim_cid_radio_state_info *rs = data;
+ if_t ifp = GET_IFP(sc);
+
+ if (len < sizeof (*rs))
+ return 0;
+
+ sc->sc_info.hw_radio_on =
+ (le32toh(rs->hw_state) == MBIM_RADIO_STATE_ON) ? 1 : 0;
+ sc->sc_info.sw_radio_on =
+ (le32toh(rs->sw_state) == MBIM_RADIO_STATE_ON) ? 1 : 0;
+ if (!sc->sc_info.hw_radio_on) {
+ device_printf(sc->sc_dev, "radio is disabled by hardware switch\n");
+ /*
+ * XXX do we need a time to poll the state of the rfkill switch
+ * or will the device send an unsolicited notification
+ * in case the state changes?
+ */
+ umb_newstate(sc, UMB_S_OPEN, 0);
+ } else if (!sc->sc_info.sw_radio_on) {
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_INFO, "%s: radio is off\n", DEVNAM(sc));
+ umb_newstate(sc, UMB_S_OPEN, 0);
+ } else
+ umb_newstate(sc, UMB_S_RADIO, UMB_NS_DONT_DROP);
+ return 1;
+}
+
+static int
+umb_decode_pin(struct umb_softc *sc, void *data, int len)
+{
+ struct mbim_cid_pin_info *pi = data;
+ if_t ifp = GET_IFP(sc);
+ uint32_t attempts_left;
+
+ if (len < sizeof (*pi))
+ return 0;
+
+ attempts_left = le32toh(pi->remaining_attempts);
+ if (attempts_left != 0xffffffff)
+ sc->sc_info.pin_attempts_left = attempts_left;
+
+ switch (le32toh(pi->state)) {
+ case MBIM_PIN_STATE_UNLOCKED:
+ sc->sc_info.pin_state = UMB_PIN_UNLOCKED;
+ break;
+ case MBIM_PIN_STATE_LOCKED:
+ switch (le32toh(pi->type)) {
+ case MBIM_PIN_TYPE_PIN1:
+ sc->sc_info.pin_state = UMB_PIN_REQUIRED;
+ break;
+ case MBIM_PIN_TYPE_PUK1:
+ sc->sc_info.pin_state = UMB_PUK_REQUIRED;
+ break;
+ case MBIM_PIN_TYPE_PIN2:
+ case MBIM_PIN_TYPE_PUK2:
+ /* Assume that PIN1 was accepted */
+ sc->sc_info.pin_state = UMB_PIN_UNLOCKED;
+ break;
+ }
+ break;
+ }
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_INFO, "%s: %s state %s (%d attempts left)\n",
+ DEVNAM(sc), umb_pin_type(le32toh(pi->type)),
+ (le32toh(pi->state) == MBIM_PIN_STATE_UNLOCKED) ?
+ "unlocked" : "locked",
+ le32toh(pi->remaining_attempts));
+
+ /*
+ * In case the PIN was set after IFF_UP, retrigger the state machine
+ */
+ umb_add_task(sc, umb_state_task,
+ &sc->sc_proc_state_task[0].hdr,
+ &sc->sc_proc_state_task[1].hdr, 0);
+ return 1;
+}
+
+static int
+umb_decode_packet_service(struct umb_softc *sc, void *data, int len)
+{
+ struct mbim_cid_packet_service_info *psi = data;
+ int state, highestclass;
+ uint64_t up_speed, down_speed;
+ if_t ifp = GET_IFP(sc);
+
+ if (len < sizeof (*psi))
+ return 0;
+
+ sc->sc_info.nwerror = le32toh(psi->nwerror);
+ state = le32toh(psi->state);
+ highestclass = le32toh(psi->highest_dataclass);
+ up_speed = le64toh(psi->uplink_speed);
+ down_speed = le64toh(psi->downlink_speed);
+ if (sc->sc_info.packetstate != state ||
+ sc->sc_info.uplink_speed != up_speed ||
+ sc->sc_info.downlink_speed != down_speed) {
+ if (if_getflags(ifp) & IFF_DEBUG) {
+ log(LOG_INFO, "%s: packet service ", DEVNAM(sc));
+ if (sc->sc_info.packetstate != state)
+ log(LOG_INFO, "changed from %s to ",
+ umb_packet_state(sc->sc_info.packetstate));
+ log(LOG_INFO, "%s, class %s, speed: %" PRIu64 " up / %" PRIu64 " down\n",
+ umb_packet_state(state),
+ umb_dataclass(highestclass), up_speed, down_speed);
+ }
+ }
+ sc->sc_info.packetstate = state;
+ sc->sc_info.highestclass = highestclass;
+ sc->sc_info.uplink_speed = up_speed;
+ sc->sc_info.downlink_speed = down_speed;
+
+ if (sc->sc_info.regmode == MBIM_REGMODE_AUTOMATIC) {
+ /*
+ * For devices using automatic registration mode, just proceed,
+ * once registration has completed.
+ */
+ if (if_getflags(ifp) & IFF_UP) {
+ switch (sc->sc_info.regstate) {
+ case MBIM_REGSTATE_HOME:
+ case MBIM_REGSTATE_ROAMING:
+ case MBIM_REGSTATE_PARTNER:
+ umb_newstate(sc, UMB_S_ATTACHED,
+ UMB_NS_DONT_DROP);
+ break;
+ default:
+ break;
+ }
+ } else
+ umb_newstate(sc, UMB_S_SIMREADY, UMB_NS_DONT_RAISE);
+ } else switch (sc->sc_info.packetstate) {
+ case MBIM_PKTSERVICE_STATE_ATTACHED:
+ umb_newstate(sc, UMB_S_ATTACHED, UMB_NS_DONT_DROP);
+ break;
+ case MBIM_PKTSERVICE_STATE_DETACHED:
+ umb_newstate(sc, UMB_S_SIMREADY, UMB_NS_DONT_RAISE);
+ break;
+ }
+ return 1;
+}
+
+static int
+umb_decode_signal_state(struct umb_softc *sc, void *data, int len)
+{
+ struct mbim_cid_signal_state *ss = data;
+ if_t ifp = GET_IFP(sc);
+ int rssi;
+
+ if (len < sizeof (*ss))
+ return 0;
+
+ if (le32toh(ss->rssi) == 99)
+ rssi = UMB_VALUE_UNKNOWN;
+ else {
+ rssi = -113 + 2 * le32toh(ss->rssi);
+ if ((if_getflags(ifp) & IFF_DEBUG) && sc->sc_info.rssi != rssi &&
+ sc->sc_state >= UMB_S_CONNECTED)
+ log(LOG_INFO, "%s: rssi %d dBm\n", DEVNAM(sc), rssi);
+ }
+ sc->sc_info.rssi = rssi;
+ sc->sc_info.ber = le32toh(ss->err_rate);
+ if (sc->sc_info.ber == -99)
+ sc->sc_info.ber = UMB_VALUE_UNKNOWN;
+ return 1;
+}
+
+static int
+umb_decode_connect_info(struct umb_softc *sc, void *data, int len)
+{
+ struct mbim_cid_connect_info *ci = data;
+ if_t ifp = GET_IFP(sc);
+ int act;
+
+ if (len < sizeof (*ci))
+ return 0;
+
+ if (le32toh(ci->sessionid) != umb_session_id) {
+ DPRINTF("discard connection info for session %u\n",
+ le32toh(ci->sessionid));
+ return 1;
+ }
+ if (memcmp(ci->context, umb_uuid_context_internet,
+ sizeof (ci->context))) {
+ DPRINTF("discard connection info for other context\n");
+ return 1;
+ }
+ act = le32toh(ci->activation);
+ if (sc->sc_info.activation != act) {
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_INFO, "%s: connection %s\n", DEVNAM(sc),
+ umb_activation(act));
+ if ((if_getflags(ifp) & IFF_DEBUG) &&
+ le32toh(ci->iptype) != MBIM_CONTEXT_IPTYPE_DEFAULT &&
+ le32toh(ci->iptype) != MBIM_CONTEXT_IPTYPE_IPV4)
+ log(LOG_DEBUG, "%s: got iptype %d connection\n",
+ DEVNAM(sc), le32toh(ci->iptype));
+
+ sc->sc_info.activation = act;
+ sc->sc_info.nwerror = le32toh(ci->nwerror);
+
+ if (sc->sc_info.activation == MBIM_ACTIVATION_STATE_ACTIVATED)
+ umb_newstate(sc, UMB_S_CONNECTED, UMB_NS_DONT_DROP);
+ else if (sc->sc_info.activation ==
+ MBIM_ACTIVATION_STATE_DEACTIVATED)
+ umb_newstate(sc, UMB_S_ATTACHED, 0);
+ /* else: other states are purely transitional */
+ }
+ return 1;
+}
+
+static int
+umb_add_inet_config(struct umb_softc *sc, struct in_addr ip, u_int prefixlen,
+ struct in_addr gw)
+{
+ if_t ifp = GET_IFP(sc);
+ struct in_aliasreq ifra;
+ struct sockaddr_in *sin;
+ int rv;
+
+ memset(&ifra, 0, sizeof (ifra));
+ sin = (struct sockaddr_in *)&ifra.ifra_addr;
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof (*sin);
+ sin->sin_addr = ip;
+
+ sin = (struct sockaddr_in *)&ifra.ifra_dstaddr;
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof (*sin);
+ sin->sin_addr = gw;
+
+ sin = (struct sockaddr_in *)&ifra.ifra_mask;
+ sin->sin_family = AF_INET;
+ sin->sin_len = sizeof (*sin);
+ umb_in_len2mask(&sin->sin_addr,
+ MIN(prefixlen, sizeof (struct in_addr) * 8));
+
+ mtx_unlock(&sc->sc_mutex);
+ CURVNET_SET_QUIET(if_getvnet(ifp));
+ rv = in_control(NULL, SIOCAIFADDR, (caddr_t)&ifra, ifp, curthread);
+ CURVNET_RESTORE();
+ mtx_lock(&sc->sc_mutex);
+ if (rv != 0) {
+ device_printf(sc->sc_dev, "unable to set IPv4 address, error %d\n",
+ rv);
+ return rv;
+ }
+
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_INFO, "%s: IPv4 addr %s, mask %s, "
+ "gateway %s\n", DEVNAM(sc),
+ umb_ntop(sintosa(&ifra.ifra_addr)),
+ umb_ntop(sintosa(&ifra.ifra_mask)),
+ umb_ntop(sintosa(&ifra.ifra_dstaddr)));
+
+ return 0;
+}
+
+static int
+umb_decode_ip_configuration(struct umb_softc *sc, void *data, int len)
+{
+ struct mbim_cid_ip_configuration_info *ic = data;
+ if_t ifp = GET_IFP(sc);
+ uint32_t avail_v4;
+ uint32_t val;
+ int n, i;
+ int off;
+ struct mbim_cid_ipv4_element ipv4elem;
+ struct in_addr addr, gw;
+ int state = -1;
+ int rv;
+
+ if (len < sizeof (*ic))
+ return 0;
+ if (le32toh(ic->sessionid) != umb_session_id) {
+ DPRINTF("ignore IP configuration for session id %d\n",
+ le32toh(ic->sessionid));
+ return 0;
+ }
+
+ /*
+ * IPv4 configuration
+ */
+ avail_v4 = le32toh(ic->ipv4_available);
+ if ((avail_v4 & (MBIM_IPCONF_HAS_ADDRINFO | MBIM_IPCONF_HAS_GWINFO)) ==
+ (MBIM_IPCONF_HAS_ADDRINFO | MBIM_IPCONF_HAS_GWINFO)) {
+ n = le32toh(ic->ipv4_naddr);
+ off = le32toh(ic->ipv4_addroffs);
+
+ if (n == 0 || off + sizeof (ipv4elem) > len)
+ goto tryv6;
+ if (n != 1 && if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_INFO, "%s: more than one IPv4 addr: %d\n",
+ DEVNAM(sc), n);
+
+ /* Only pick the first one */
+ memcpy(&ipv4elem, (char *)data + off, sizeof (ipv4elem));
+ ipv4elem.prefixlen = le32toh(ipv4elem.prefixlen);
+ addr.s_addr = ipv4elem.addr;
+
+ off = le32toh(ic->ipv4_gwoffs);
+ if (off + sizeof (gw) > len)
+ goto done;
+ memcpy(&gw, (char *)data + off, sizeof (gw));
+
+ rv = umb_add_inet_config(sc, addr, ipv4elem.prefixlen, gw);
+ if (rv == 0)
+ state = UMB_S_UP;
+ }
+
+ memset(sc->sc_info.ipv4dns, 0, sizeof (sc->sc_info.ipv4dns));
+ if (avail_v4 & MBIM_IPCONF_HAS_DNSINFO) {
+ n = le32toh(ic->ipv4_ndnssrv);
+ off = le32toh(ic->ipv4_dnssrvoffs);
+ i = 0;
+ while (n-- > 0) {
+ if (off + sizeof (addr) > len)
+ break;
+ memcpy(&addr, (char *)data + off, sizeof(addr));
+ if (i < UMB_MAX_DNSSRV)
+ sc->sc_info.ipv4dns[i++] = addr;
+ off += sizeof(addr);
+ }
+ }
+
+ if ((avail_v4 & MBIM_IPCONF_HAS_MTUINFO)) {
+ val = le32toh(ic->ipv4_mtu);
+ if (if_getmtu(ifp) != val && val <= sc->sc_maxpktlen) {
+ if_setmtu(ifp, val);
+ if (if_getmtu(ifp) > val)
+ if_setmtu(ifp, val);
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_INFO, "%s: MTU %d\n", DEVNAM(sc), val);
+ }
+ }
+
+ avail_v4 = le32toh(ic->ipv6_available);
+ if ((if_getflags(ifp) & IFF_DEBUG) && avail_v4 & MBIM_IPCONF_HAS_ADDRINFO) {
+ /* XXX FIXME: IPv6 configuration missing */
+ log(LOG_INFO, "%s: ignoring IPv6 configuration\n", DEVNAM(sc));
+ }
+ if (state != -1)
+ umb_newstate(sc, state, 0);
+
+tryv6:
+done:
+ return 1;
+}
+
+static void
+umb_rx(struct umb_softc *sc)
+{
+ mtx_assert(&sc->sc_mutex, MA_OWNED);
+
+ usbd_transfer_start(sc->sc_xfer[UMB_BULK_RX]);
+}
+
+static void
+umb_rxeof(struct usb_xfer *xfer, usb_error_t status)
+{
+ struct umb_softc *sc = usbd_xfer_softc(xfer);
+ if_t ifp = GET_IFP(sc);
+ int actlen;
+ int aframes;
+ int i;
+
+ DPRINTF("%s(%u): state=%u\n", __func__, status, USB_GET_STATE(xfer));
+
+ mtx_assert(&sc->sc_mutex, MA_OWNED);
+
+ usbd_xfer_status(xfer, &actlen, NULL, &aframes, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ DPRINTF("received %u bytes in %u frames\n", actlen, aframes);
+
+ if (actlen == 0) {
+ if (sc->sc_rx_nerr >= 4)
+ /* throttle transfers */
+ usbd_xfer_set_interval(xfer, 500);
+ else
+ sc->sc_rx_nerr++;
+ }
+ else {
+ /* disable throttling */
+ usbd_xfer_set_interval(xfer, 0);
+ sc->sc_rx_nerr = 0;
+ }
+
+ for(i = 0; i < aframes; i++) {
+ umb_decap(sc, xfer, i);
+ }
+
+ /* fall through */
+ case USB_ST_SETUP:
+ usbd_xfer_set_frame_data(xfer, 0, sc->sc_rx_buf,
+ sc->sc_rx_bufsz);
+ usbd_xfer_set_frames(xfer, 1);
+ usbd_transfer_submit(xfer);
+
+ umb_rxflush(sc);
+ break;
+ default:
+ DPRINTF("rx error: %s\n", usbd_errstr(status));
+
+ /* disable throttling */
+ usbd_xfer_set_interval(xfer, 0);
+
+ if (status != USB_ERR_CANCELLED) {
+ /* try to clear stall first */
+ usbd_xfer_set_stall(xfer);
+ usbd_xfer_set_frames(xfer, 0);
+ usbd_transfer_submit(xfer);
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ }
+ else if (++sc->sc_rx_nerr > 100) {
+ log(LOG_ERR, "%s: too many rx errors, disabling\n",
+ DEVNAM(sc));
+ umb_deactivate(sc->sc_dev);
+ }
+ break;
+ }
+}
+
+static void
+umb_rxflush(struct umb_softc *sc)
+{
+ if_t ifp = GET_IFP(sc);
+ struct mbuf *m;
+
+ mtx_assert(&sc->sc_mutex, MA_OWNED);
+
+ for (;;) {
+ _IF_DEQUEUE(&sc->sc_rx_queue, m);
+ if (m == NULL)
+ break;
+
+ /*
+ * The USB xfer has been resubmitted so it's safe to unlock now.
+ */
+ mtx_unlock(&sc->sc_mutex);
+ CURVNET_SET_QUIET(if_getvnet(ifp));
+ if (if_getdrvflags(ifp) & IFF_DRV_RUNNING)
+ if_input(ifp, m);
+ else
+ m_freem(m);
+ CURVNET_RESTORE();
+ mtx_lock(&sc->sc_mutex);
+ }
+}
+
+static int
+umb_encap(struct umb_softc *sc, struct mbuf *m, struct usb_xfer *xfer)
+{
+ struct ncm_header16 *hdr;
+ struct ncm_pointer16 *ptr;
+ int len;
+
+ KASSERT(sc->sc_tx_m == NULL,
+ ("Assertion failed in umb_encap()"));
+
+ /* All size constraints have been validated by the caller! */
+ hdr = (struct ncm_header16 *)sc->sc_tx_buf;
+ ptr = (struct ncm_pointer16 *)(hdr + 1);
+
+ USETDW(hdr->dwSignature, NCM_HDR16_SIG);
+ USETW(hdr->wHeaderLength, sizeof (*hdr));
+ USETW(hdr->wSequence, sc->sc_tx_seq);
+ sc->sc_tx_seq++;
+ USETW(hdr->wNdpIndex, sizeof (*hdr));
+
+ len = m->m_pkthdr.len;
+ USETDW(ptr->dwSignature, MBIM_NCM_NTH16_SIG(umb_session_id));
+ USETW(ptr->wLength, sizeof (*ptr));
+ USETW(ptr->wNextNdpIndex, 0);
+ USETW(ptr->dgram[0].wDatagramIndex, MBIM_HDR16_LEN);
+ USETW(ptr->dgram[0].wDatagramLen, len);
+ USETW(ptr->dgram[1].wDatagramIndex, 0);
+ USETW(ptr->dgram[1].wDatagramLen, 0);
+
+ KASSERT(len + MBIM_HDR16_LEN <= sc->sc_tx_bufsz,
+ ("Assertion failed in umb_encap()"));
+ m_copydata(m, 0, len, (char *)(ptr + 1));
+ sc->sc_tx_m = m;
+ len += MBIM_HDR16_LEN;
+ USETW(hdr->wBlockLength, len);
+
+ usbd_xfer_set_frame_data(xfer, 0, sc->sc_tx_buf, len);
+ usbd_xfer_set_interval(xfer, 0);
+ usbd_xfer_set_frames(xfer, 1);
+
+ DPRINTFN(3, "%s: encap %d bytes\n", DEVNAM(sc), len);
+ DDUMPN(5, sc->sc_tx_buf, len);
+ return 0;
+}
+
+static void
+umb_txeof(struct usb_xfer *xfer, usb_error_t status)
+{
+ struct umb_softc *sc = usbd_xfer_softc(xfer);
+ if_t ifp = GET_IFP(sc);
+ struct mbuf *m;
+
+ DPRINTF("%s(%u) state=%u\n", __func__, status, USB_GET_STATE(xfer));
+
+ mtx_assert(&sc->sc_mutex, MA_OWNED);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
+ if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
+
+ umb_txflush(sc);
+
+ /* fall through */
+ case USB_ST_SETUP:
+tr_setup:
+ if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
+ break;
+
+ m = if_dequeue(ifp); /* XXX - IFAPI */
+ if (m == NULL)
+ break;
+
+ if (umb_encap(sc, m, xfer)) {
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ umb_txflush(sc);
+ break;
+ }
+
+ BPF_MTAP(ifp, m);
+
+ if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0);
+ usbd_transfer_submit(xfer);
+
+ break;
+
+ default:
+ umb_txflush(sc);
+
+ /* count output errors */
+ if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
+ DPRINTF("tx error: %s\n",
+ usbd_errstr(status));
+
+ if (status != USB_ERR_CANCELLED) {
+ /* try to clear stall first */
+ usbd_xfer_set_stall(xfer);
+ goto tr_setup;
+ }
+ break;
+ }
+}
+
+static void
+umb_txflush(struct umb_softc *sc)
+{
+ mtx_assert(&sc->sc_mutex, MA_OWNED);
+
+ if (sc->sc_tx_m != NULL) {
+ m_freem(sc->sc_tx_m);
+ sc->sc_tx_m = NULL;
+ }
+}
+
+static void
+umb_decap(struct umb_softc *sc, struct usb_xfer *xfer, int frame)
+{
+ if_t ifp = GET_IFP(sc);
+ char *buf;
+ int len;
+ char *dp;
+ struct ncm_header16 *hdr16;
+ struct ncm_header32 *hdr32;
+ struct ncm_pointer16 *ptr16;
+ struct ncm_pointer16_dgram *dgram16;
+ struct ncm_pointer32_dgram *dgram32;
+ uint32_t hsig, psig;
+ int hlen, blen;
+ int ptrlen, ptroff, dgentryoff;
+ uint32_t doff, dlen;
+ struct mbuf *m;
+
+ usbd_xfer_frame_data(xfer, frame, (void **)&buf, &len);
+ DPRINTFN(4, "recv %d bytes\n", len);
+ DDUMPN(5, buf, len);
+ if (len < sizeof (*hdr16))
+ goto toosmall;
+
+ hdr16 = (struct ncm_header16 *)buf;
+ hsig = UGETDW(hdr16->dwSignature);
+ hlen = UGETW(hdr16->wHeaderLength);
+ if (len < hlen)
+ goto toosmall;
+ if (len > sc->sc_rx_bufsz) {
+ DPRINTF("packet too large (%d)\n", len);
+ goto fail;
+ }
+ switch (hsig) {
+ case NCM_HDR16_SIG:
+ blen = UGETW(hdr16->wBlockLength);
+ ptroff = UGETW(hdr16->wNdpIndex);
+ if (hlen != sizeof (*hdr16)) {
+ DPRINTF("%s: bad header len %d for NTH16 (exp %zu)\n",
+ DEVNAM(sc), hlen, sizeof (*hdr16));
+ goto fail;
+ }
+ break;
+ case NCM_HDR32_SIG:
+ hdr32 = (struct ncm_header32 *)hdr16;
+ blen = UGETDW(hdr32->dwBlockLength);
+ ptroff = UGETDW(hdr32->dwNdpIndex);
+ if (hlen != sizeof (*hdr32)) {
+ DPRINTF("%s: bad header len %d for NTH32 (exp %zu)\n",
+ DEVNAM(sc), hlen, sizeof (*hdr32));
+ goto fail;
+ }
+ break;
+ default:
+ DPRINTF("%s: unsupported NCM header signature (0x%08x)\n",
+ DEVNAM(sc), hsig);
+ goto fail;
+ }
+ if (len < blen) {
+ DPRINTF("%s: bad NTB len (%d) for %d bytes of data\n",
+ DEVNAM(sc), blen, len);
+ goto fail;
+ }
+
+ if (len < ptroff)
+ goto toosmall;
+ ptr16 = (struct ncm_pointer16 *)(buf + ptroff);
+ psig = UGETDW(ptr16->dwSignature);
+ ptrlen = UGETW(ptr16->wLength);
+ if ((uint64_t)len < (uint64_t)ptrlen + (uint64_t)ptroff)
+ goto toosmall;
+ if (!MBIM_NCM_NTH16_ISISG(psig) && !MBIM_NCM_NTH32_ISISG(psig)) {
+ DPRINTF("%s: unsupported NCM pointer signature (0x%08x)\n",
+ DEVNAM(sc), psig);
+ goto fail;
+ }
+
+ switch (hsig) {
+ case NCM_HDR16_SIG:
+ dgentryoff = offsetof(struct ncm_pointer16, dgram);
+ break;
+ case NCM_HDR32_SIG:
+ dgentryoff = offsetof(struct ncm_pointer32, dgram);
+ break;
+ default:
+ goto fail;
+ }
+
+ while (dgentryoff < ptrlen) {
+ switch (hsig) {
+ case NCM_HDR16_SIG:
+ if (ptroff + dgentryoff < sizeof (*dgram16))
+ goto done;
+ dgram16 = (struct ncm_pointer16_dgram *)
+ (buf + ptroff + dgentryoff);
+ dgentryoff += sizeof (*dgram16);
+ dlen = UGETW(dgram16->wDatagramLen);
+ doff = UGETW(dgram16->wDatagramIndex);
+ break;
+ case NCM_HDR32_SIG:
+ if (ptroff + dgentryoff < sizeof (*dgram32))
+ goto done;
+ dgram32 = (struct ncm_pointer32_dgram *)
+ (buf + ptroff + dgentryoff);
+ dgentryoff += sizeof (*dgram32);
+ dlen = UGETDW(dgram32->dwDatagramLen);
+ doff = UGETDW(dgram32->dwDatagramIndex);
+ break;
+ default:
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+ goto done;
+ }
+
+ /* Terminating zero entry */
+ if (dlen == 0 || doff == 0)
+ break;
+ if ((uint64_t)len < (uint64_t)dlen + (uint64_t)doff) {
+ /* Skip giant datagram but continue processing */
+ DPRINTF("%s: datagram too large (%d @ off %d)\n",
+ DEVNAM(sc), dlen, doff);
+ continue;
+ }
+
+ dp = buf + doff;
+ DPRINTFN(3, "%s: decap %d bytes\n", DEVNAM(sc), dlen);
+ m = m_devget(dp, dlen, 0, ifp, NULL);
+ if (m == NULL) {
+ if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
+ continue;
+ }
+
+ /* enqueue for later when the lock can be released */
+ _IF_ENQUEUE(&sc->sc_rx_queue, m);
+
+ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
+
+ }
+done:
+ sc->sc_rx_nerr = 0;
+ return;
+toosmall:
+ DPRINTF("%s: packet too small (%d)\n", DEVNAM(sc), len);
+fail:
+ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
+}
+
+static usb_error_t
+umb_send_encap_command(struct umb_softc *sc, void *data, int len)
+{
+ usb_device_request_t req;
+
+ if (len > sc->sc_ctrl_len)
+ return USB_ERR_INVAL;
+
+ /* XXX FIXME: if (total len > sc->sc_ctrl_len) => must fragment */
+ req.bmRequestType = UT_WRITE_CLASS_INTERFACE;
+ req.bRequest = UCDC_SEND_ENCAPSULATED_COMMAND;
+ USETW(req.wValue, 0);
+ USETW(req.wIndex, sc->sc_ctrl_ifaceno);
+ USETW(req.wLength, len);
+ mtx_unlock(&sc->sc_mutex);
+ DELAY(umb_delay);
+ mtx_lock(&sc->sc_mutex);
+ return usbd_do_request_flags(sc->sc_udev, &sc->sc_mutex, &req, data, 0,
+ NULL, umb_xfer_tout);
+}
+
+static int
+umb_get_encap_response(struct umb_softc *sc, void *buf, int *len)
+{
+ usb_device_request_t req;
+ usb_error_t err;
+ uint16_t l = *len;
+
+ req.bmRequestType = UT_READ_CLASS_INTERFACE;
+ req.bRequest = UCDC_GET_ENCAPSULATED_RESPONSE;
+ USETW(req.wValue, 0);
+ USETW(req.wIndex, sc->sc_ctrl_ifaceno);
+ USETW(req.wLength, l);
+ /* XXX FIXME: re-assemble fragments */
+
+ mtx_unlock(&sc->sc_mutex);
+ DELAY(umb_delay);
+ mtx_lock(&sc->sc_mutex);
+ err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mutex, &req, buf,
+ USB_SHORT_XFER_OK, &l, umb_xfer_tout);
+ if (err == USB_ERR_NORMAL_COMPLETION) {
+ *len = l;
+ return 1;
+ }
+ DPRINTF("ctrl recv: %s\n", usbd_errstr(err));
+ return 0;
+}
+
+static void
+umb_ctrl_msg(struct umb_softc *sc, uint32_t req, void *data, int len)
+{
+ if_t ifp = GET_IFP(sc);
+ uint32_t tid;
+ struct mbim_msghdr *hdr = data;
+ usb_error_t err;
+
+ if (sc->sc_dying)
+ return;
+ if (len < sizeof (*hdr))
+ return;
+ tid = ++sc->sc_tid;
+
+ hdr->type = htole32(req);
+ hdr->len = htole32(len);
+ hdr->tid = htole32(tid);
+
+#ifdef UMB_DEBUG
+ if (umb_debug) {
+ const char *op, *str;
+ if (req == MBIM_COMMAND_MSG) {
+ struct mbim_h2f_cmd *c = data;
+ if (le32toh(c->op) == MBIM_CMDOP_SET)
+ op = "set";
+ else
+ op = "qry";
+ str = umb_cid2str(le32toh(c->cid));
+ } else {
+ op = "snd";
+ str = umb_request2str(req);
+ }
+ DPRINTF("-> %s %s (tid %u)\n", op, str, tid);
+ }
+#endif
+ err = umb_send_encap_command(sc, data, len);
+ if (err != USB_ERR_NORMAL_COMPLETION) {
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_ERR, "%s: send %s msg (tid %u) failed: %s\n",
+ DEVNAM(sc), umb_request2str(req), tid,
+ usbd_errstr(err));
+
+ /* will affect other transactions, too */
+ usbd_transfer_stop(sc->sc_xfer[UMB_INTR_RX]);
+ } else {
+ DPRINTFN(2, "sent %s (tid %u)\n",
+ umb_request2str(req), tid);
+ DDUMPN(3, data, len);
+ }
+ return;
+}
+
+static void
+umb_open(struct umb_softc *sc)
+{
+ struct mbim_h2f_openmsg msg;
+
+ memset(&msg, 0, sizeof (msg));
+ msg.maxlen = htole32(sc->sc_ctrl_len);
+ umb_ctrl_msg(sc, MBIM_OPEN_MSG, &msg, sizeof (msg));
+ return;
+}
+
+static void
+umb_close(struct umb_softc *sc)
+{
+ struct mbim_h2f_closemsg msg;
+
+ memset(&msg, 0, sizeof (msg));
+ umb_ctrl_msg(sc, MBIM_CLOSE_MSG, &msg, sizeof (msg));
+}
+
+static int
+umb_setpin(struct umb_softc *sc, int op, int is_puk, void *pin, int pinlen,
+ void *newpin, int newpinlen)
+{
+ struct mbim_cid_pin cp;
+ int off;
+
+ if (pinlen == 0)
+ return 0;
+ if (pinlen < 0 || pinlen > MBIM_PIN_MAXLEN ||
+ newpinlen < 0 || newpinlen > MBIM_PIN_MAXLEN ||
+ op < 0 || op > MBIM_PIN_OP_CHANGE ||
+ (is_puk && op != MBIM_PIN_OP_ENTER))
+ return EINVAL;
+
+ memset(&cp, 0, sizeof (cp));
+ cp.type = htole32(is_puk ? MBIM_PIN_TYPE_PUK1 : MBIM_PIN_TYPE_PIN1);
+
+ off = offsetof(struct mbim_cid_pin, data);
+ if (!umb_addstr(&cp, sizeof (cp), &off, pin, pinlen,
+ &cp.pin_offs, &cp.pin_size))
+ return EINVAL;
+
+ cp.op = htole32(op);
+ if (newpinlen) {
+ if (!umb_addstr(&cp, sizeof (cp), &off, newpin, newpinlen,
+ &cp.newpin_offs, &cp.newpin_size))
+ return EINVAL;
+ } else {
+ if ((op == MBIM_PIN_OP_CHANGE) || is_puk)
+ return EINVAL;
+ if (!umb_addstr(&cp, sizeof (cp), &off, NULL, 0,
+ &cp.newpin_offs, &cp.newpin_size))
+ return EINVAL;
+ }
+ mtx_lock(&sc->sc_mutex);
+ umb_cmd(sc, MBIM_CID_PIN, MBIM_CMDOP_SET, &cp, off);
+ mtx_unlock(&sc->sc_mutex);
+ return 0;
+}
+
+static void
+umb_setdataclass(struct umb_softc *sc)
+{
+ struct mbim_cid_registration_state rs;
+ uint32_t classes;
+
+ if (sc->sc_info.supportedclasses == MBIM_DATACLASS_NONE)
+ return;
+
+ memset(&rs, 0, sizeof (rs));
+ rs.regaction = htole32(MBIM_REGACTION_AUTOMATIC);
+ classes = sc->sc_info.supportedclasses;
+ if (sc->sc_info.preferredclasses != MBIM_DATACLASS_NONE)
+ classes &= sc->sc_info.preferredclasses;
+ rs.data_class = htole32(classes);
+ mtx_lock(&sc->sc_mutex);
+ umb_cmd(sc, MBIM_CID_REGISTER_STATE, MBIM_CMDOP_SET, &rs, sizeof (rs));
+ mtx_unlock(&sc->sc_mutex);
+}
+
+static void
+umb_radio(struct umb_softc *sc, int on)
+{
+ struct mbim_cid_radio_state s;
+
+ DPRINTF("set radio %s\n", on ? "on" : "off");
+ memset(&s, 0, sizeof (s));
+ s.state = htole32(on ? MBIM_RADIO_STATE_ON : MBIM_RADIO_STATE_OFF);
+ umb_cmd(sc, MBIM_CID_RADIO_STATE, MBIM_CMDOP_SET, &s, sizeof (s));
+}
+
+static void
+umb_allocate_cid(struct umb_softc *sc)
+{
+ umb_cmd1(sc, MBIM_CID_DEVICE_CAPS, MBIM_CMDOP_SET,
+ umb_qmi_alloc_cid, sizeof (umb_qmi_alloc_cid), umb_uuid_qmi_mbim);
+}
+
+static void
+umb_send_fcc_auth(struct umb_softc *sc)
+{
+ uint8_t fccauth[sizeof (umb_qmi_fcc_auth)];
+
+ if (sc->sc_cid == -1) {
+ DPRINTF("missing CID, cannot send FCC auth\n");
+ umb_allocate_cid(sc);
+ return;
+ }
+ memcpy(fccauth, umb_qmi_fcc_auth, sizeof (fccauth));
+ fccauth[UMB_QMI_CID_OFFS] = sc->sc_cid;
+ umb_cmd1(sc, MBIM_CID_DEVICE_CAPS, MBIM_CMDOP_SET,
+ fccauth, sizeof (fccauth), umb_uuid_qmi_mbim);
+}
+
+static void
+umb_packet_service(struct umb_softc *sc, int attach)
+{
+ struct mbim_cid_packet_service s;
+
+ DPRINTF("%s packet service\n",
+ attach ? "attach" : "detach");
+ memset(&s, 0, sizeof (s));
+ s.action = htole32(attach ?
+ MBIM_PKTSERVICE_ACTION_ATTACH : MBIM_PKTSERVICE_ACTION_DETACH);
+ umb_cmd(sc, MBIM_CID_PACKET_SERVICE, MBIM_CMDOP_SET, &s, sizeof (s));
+}
+
+static void
+umb_connect(struct umb_softc *sc)
+{
+ if_t ifp = GET_IFP(sc);
+
+ if (sc->sc_info.regstate == MBIM_REGSTATE_ROAMING && !sc->sc_roaming) {
+ log(LOG_INFO, "%s: connection disabled in roaming network\n",
+ DEVNAM(sc));
+ return;
+ }
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_DEBUG, "%s: connecting ...\n", DEVNAM(sc));
+ umb_send_connect(sc, MBIM_CONNECT_ACTIVATE);
+}
+
+static void
+umb_disconnect(struct umb_softc *sc)
+{
+ if_t ifp = GET_IFP(sc);
+
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_DEBUG, "%s: disconnecting ...\n", DEVNAM(sc));
+ umb_send_connect(sc, MBIM_CONNECT_DEACTIVATE);
+}
+
+static void
+umb_send_connect(struct umb_softc *sc, int command)
+{
+ struct mbim_cid_connect *c;
+ int off;
+
+ /* Too large for the stack */
+ mtx_unlock(&sc->sc_mutex);
+ c = malloc(sizeof (*c), M_MBIM_CID_CONNECT, M_WAITOK | M_ZERO);
+ mtx_lock(&sc->sc_mutex);
+ c->sessionid = htole32(umb_session_id);
+ c->command = htole32(command);
+ off = offsetof(struct mbim_cid_connect, data);
+ if (!umb_addstr(c, sizeof (*c), &off, sc->sc_info.apn,
+ sc->sc_info.apnlen, &c->access_offs, &c->access_size))
+ goto done;
+ if (!umb_addstr(c, sizeof (*c), &off, sc->sc_info.username,
+ sc->sc_info.usernamelen, &c->user_offs, &c->user_size))
+ goto done;
+ if (!umb_addstr(c, sizeof (*c), &off, sc->sc_info.password,
+ sc->sc_info.passwordlen, &c->passwd_offs, &c->passwd_size))
+ goto done;
+ c->authprot = htole32(MBIM_AUTHPROT_NONE);
+ c->compression = htole32(MBIM_COMPRESSION_NONE);
+ c->iptype = htole32(MBIM_CONTEXT_IPTYPE_IPV4);
+ memcpy(c->context, umb_uuid_context_internet, sizeof (c->context));
+ umb_cmd(sc, MBIM_CID_CONNECT, MBIM_CMDOP_SET, c, off);
+done:
+ free(c, M_MBIM_CID_CONNECT);
+ return;
+}
+
+static void
+umb_qry_ipconfig(struct umb_softc *sc)
+{
+ struct mbim_cid_ip_configuration_info ipc;
+
+ memset(&ipc, 0, sizeof (ipc));
+ ipc.sessionid = htole32(umb_session_id);
+ umb_cmd(sc, MBIM_CID_IP_CONFIGURATION, MBIM_CMDOP_QRY,
+ &ipc, sizeof (ipc));
+}
+
+static void
+umb_cmd(struct umb_softc *sc, int cid, int op, const void *data, int len)
+{
+ umb_cmd1(sc, cid, op, data, len, umb_uuid_basic_connect);
+}
+
+static void
+umb_cmd1(struct umb_softc *sc, int cid, int op, const void *data, int len,
+ uint8_t *uuid)
+{
+ struct mbim_h2f_cmd *cmd;
+ int totlen;
+
+ /* XXX FIXME support sending fragments */
+ if (sizeof (*cmd) + len > sc->sc_ctrl_len) {
+ DPRINTF("set %s msg too long: cannot send\n",
+ umb_cid2str(cid));
+ return;
+ }
+ cmd = sc->sc_ctrl_msg;
+ memset(cmd, 0, sizeof (*cmd));
+ cmd->frag.nfrag = htole32(1);
+ memcpy(cmd->devid, uuid, sizeof (cmd->devid));
+ cmd->cid = htole32(cid);
+ cmd->op = htole32(op);
+ cmd->infolen = htole32(len);
+ totlen = sizeof (*cmd);
+ if (len > 0) {
+ memcpy(cmd + 1, data, len);
+ totlen += len;
+ }
+ umb_ctrl_msg(sc, MBIM_COMMAND_MSG, cmd, totlen);
+}
+
+static void
+umb_command_done(struct umb_softc *sc, void *data, int len)
+{
+ struct mbim_f2h_cmddone *cmd = data;
+ if_t ifp = GET_IFP(sc);
+ uint32_t status;
+ uint32_t cid;
+ uint32_t infolen;
+ int qmimsg = 0;
+
+ if (len < sizeof (*cmd)) {
+ DPRINTF("discard short %s message\n",
+ umb_request2str(le32toh(cmd->hdr.type)));
+ return;
+ }
+ cid = le32toh(cmd->cid);
+ if (memcmp(cmd->devid, umb_uuid_basic_connect, sizeof (cmd->devid))) {
+ if (memcmp(cmd->devid, umb_uuid_qmi_mbim,
+ sizeof (cmd->devid))) {
+ DPRINTF("discard %s message for other UUID '%s'\n",
+ umb_request2str(le32toh(cmd->hdr.type)),
+ umb_uuid2str(cmd->devid));
+ return;
+ } else
+ qmimsg = 1;
+ }
+
+ status = le32toh(cmd->status);
+ switch (status) {
+ case MBIM_STATUS_SUCCESS:
+ break;
+ case MBIM_STATUS_NOT_INITIALIZED:
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_ERR, "%s: SIM not initialized (PIN missing)\n",
+ DEVNAM(sc));
+ return;
+ case MBIM_STATUS_PIN_REQUIRED:
+ sc->sc_info.pin_state = UMB_PIN_REQUIRED;
+ /*FALLTHROUGH*/
+ default:
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_ERR, "%s: set/qry %s failed: %s\n", DEVNAM(sc),
+ umb_cid2str(cid), umb_status2str(status));
+ return;
+ }
+
+ infolen = le32toh(cmd->infolen);
+ if (len < sizeof (*cmd) + infolen) {
+ DPRINTF("discard truncated %s message (want %d, got %d)\n",
+ umb_cid2str(cid),
+ (int)sizeof (*cmd) + infolen, len);
+ return;
+ }
+ if (qmimsg) {
+ if (sc->sc_flags & UMBFLG_FCC_AUTH_REQUIRED)
+ umb_decode_qmi(sc, cmd->info, infolen);
+ } else {
+ DPRINTFN(2, "set/qry %s done\n",
+ umb_cid2str(cid));
+ umb_decode_cid(sc, cid, cmd->info, infolen);
+ }
+}
+
+static void
+umb_decode_cid(struct umb_softc *sc, uint32_t cid, void *data, int len)
+{
+ int ok = 1;
+
+ switch (cid) {
+ case MBIM_CID_DEVICE_CAPS:
+ ok = umb_decode_devices_caps(sc, data, len);
+ break;
+ case MBIM_CID_SUBSCRIBER_READY_STATUS:
+ ok = umb_decode_subscriber_status(sc, data, len);
+ break;
+ case MBIM_CID_RADIO_STATE:
+ ok = umb_decode_radio_state(sc, data, len);
+ break;
+ case MBIM_CID_PIN:
+ ok = umb_decode_pin(sc, data, len);
+ break;
+ case MBIM_CID_REGISTER_STATE:
+ ok = umb_decode_register_state(sc, data, len);
+ break;
+ case MBIM_CID_PACKET_SERVICE:
+ ok = umb_decode_packet_service(sc, data, len);
+ break;
+ case MBIM_CID_SIGNAL_STATE:
+ ok = umb_decode_signal_state(sc, data, len);
+ break;
+ case MBIM_CID_CONNECT:
+ ok = umb_decode_connect_info(sc, data, len);
+ break;
+ case MBIM_CID_IP_CONFIGURATION:
+ ok = umb_decode_ip_configuration(sc, data, len);
+ break;
+ default:
+ /*
+ * Note: the above list is incomplete and only contains
+ * mandatory CIDs from the BASIC_CONNECT set.
+ * So alternate values are not unusual.
+ */
+ DPRINTFN(4, "ignore %s\n", umb_cid2str(cid));
+ break;
+ }
+ if (!ok)
+ DPRINTF("discard %s with bad info length %d\n",
+ umb_cid2str(cid), len);
+ return;
+}
+
+static void
+umb_decode_qmi(struct umb_softc *sc, uint8_t *data, int len)
+{
+ uint8_t srv;
+ uint16_t msg, tlvlen;
+ uint32_t val;
+
+#define UMB_QMI_QMUXLEN 6
+ if (len < UMB_QMI_QMUXLEN)
+ goto tooshort;
+
+ srv = data[4];
+ data += UMB_QMI_QMUXLEN;
+ len -= UMB_QMI_QMUXLEN;
+
+#define UMB_GET16(p) ((uint16_t)*p | (uint16_t)*(p + 1) << 8)
+#define UMB_GET32(p) ((uint32_t)*p | (uint32_t)*(p + 1) << 8 | \
+ (uint32_t)*(p + 2) << 16 |(uint32_t)*(p + 3) << 24)
+ switch (srv) {
+ case 0: /* ctl */
+#define UMB_QMI_CTLLEN 6
+ if (len < UMB_QMI_CTLLEN)
+ goto tooshort;
+ msg = UMB_GET16(&data[2]);
+ tlvlen = UMB_GET16(&data[4]);
+ data += UMB_QMI_CTLLEN;
+ len -= UMB_QMI_CTLLEN;
+ break;
+ case 2: /* dms */
+#define UMB_QMI_DMSLEN 7
+ if (len < UMB_QMI_DMSLEN)
+ goto tooshort;
+ msg = UMB_GET16(&data[3]);
+ tlvlen = UMB_GET16(&data[5]);
+ data += UMB_QMI_DMSLEN;
+ len -= UMB_QMI_DMSLEN;
+ break;
+ default:
+ DPRINTF("discard QMI message for unknown service type %d\n",
+ srv);
+ return;
+ }
+
+ if (len < tlvlen)
+ goto tooshort;
+
+#define UMB_QMI_TLVLEN 3
+ while (len > 0) {
+ if (len < UMB_QMI_TLVLEN)
+ goto tooshort;
+ tlvlen = UMB_GET16(&data[1]);
+ if (len < UMB_QMI_TLVLEN + tlvlen)
+ goto tooshort;
+ switch (data[0]) {
+ case 1: /* allocation info */
+ if (msg == 0x0022) { /* Allocate CID */
+ if (tlvlen != 2 || data[3] != 2) /* dms */
+ break;
+ sc->sc_cid = data[4];
+ DPRINTF("QMI CID %d allocated\n",
+ sc->sc_cid);
+ umb_newstate(sc, UMB_S_CID, UMB_NS_DONT_DROP);
+ }
+ break;
+ case 2: /* response */
+ if (tlvlen != sizeof (val))
+ break;
+ val = UMB_GET32(&data[3]);
+ switch (msg) {
+ case 0x0022: /* Allocate CID */
+ if (val != 0) {
+ log(LOG_ERR, "%s: allocation of QMI CID"
+ " failed, error 0x%x\n", DEVNAM(sc),
+ val);
+ /* XXX how to proceed? */
+ return;
+ }
+ break;
+ case 0x555f: /* Send FCC Authentication */
+ if (val == 0)
+ DPRINTF("%s: send FCC "
+ "Authentication succeeded\n",
+ DEVNAM(sc));
+ else if (val == 0x001a0001)
+ DPRINTF("%s: FCC Authentication "
+ "not required\n", DEVNAM(sc));
+ else
+ log(LOG_INFO, "%s: send FCC "
+ "Authentication failed, "
+ "error 0x%x\n", DEVNAM(sc), val);
+
+ /* FCC Auth is needed only once after power-on*/
+ sc->sc_flags &= ~UMBFLG_FCC_AUTH_REQUIRED;
+
+ /* Try to proceed anyway */
+ DPRINTF("init: turning radio on ...\n");
+ umb_radio(sc, 1);
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ data += UMB_QMI_TLVLEN + tlvlen;
+ len -= UMB_QMI_TLVLEN + tlvlen;
+ }
+ return;
+
+tooshort:
+ DPRINTF("discard short QMI message\n");
+ return;
+}
+
+static void
+umb_intr(struct usb_xfer *xfer, usb_error_t status)
+{
+ struct umb_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_cdc_notification notification;
+ struct usb_page_cache *pc;
+ if_t ifp = GET_IFP(sc);
+ int total_len;
+
+ mtx_assert(&sc->sc_mutex, MA_OWNED);
+
+ /* FIXME use actlen or total_len? */
+ usbd_xfer_status(xfer, &total_len, NULL, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ DPRINTF("Received %d bytes\n", total_len);
+
+ if (total_len < UCDC_NOTIFICATION_LENGTH) {
+ DPRINTF("short notification (%d<%d)\n",
+ total_len, UCDC_NOTIFICATION_LENGTH);
+ return;
+ }
+
+ pc = usbd_xfer_get_frame(xfer, 0);
+ usbd_copy_out(pc, 0, &notification, sizeof (notification));
+
+ if (notification.bmRequestType != UCDC_NOTIFICATION) {
+ DPRINTF("unexpected notification (type=0x%02x)\n",
+ notification.bmRequestType);
+ return;
+ }
+
+ switch (notification.bNotification) {
+ case UCDC_N_NETWORK_CONNECTION:
+ if (if_getflags(ifp) & IFF_DEBUG)
+ log(LOG_DEBUG, "%s: network %sconnected\n",
+ DEVNAM(sc),
+ UGETW(notification.wValue)
+ ? "" : "dis");
+ break;
+ case UCDC_N_RESPONSE_AVAILABLE:
+ DPRINTFN(2, "umb_intr: response available\n");
+ ++sc->sc_nresp;
+ umb_add_task(sc, umb_get_response_task,
+ &sc->sc_proc_get_response_task[0].hdr,
+ &sc->sc_proc_get_response_task[1].hdr,
+ 0);
+ break;
+ case UCDC_N_CONNECTION_SPEED_CHANGE:
+ DPRINTFN(2, "umb_intr: connection speed changed\n");
+ break;
+ default:
+ DPRINTF("unexpected notification (0x%02x)\n",
+ notification.bNotification);
+ break;
+ }
+ /* fallthrough */
+ case USB_ST_SETUP:
+tr_setup:
+ usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
+ usbd_transfer_submit(xfer);
+ break;
+ default:
+ if (status != USB_ERR_CANCELLED) {
+ /* start clear stall */
+ usbd_xfer_set_stall(xfer);
+ goto tr_setup;
+ }
+ break;
+ }
+}
+
+/*
+ * Diagnostic routines
+ */
+static char *
+umb_ntop(struct sockaddr *sa)
+{
+#define NUMBUFS 4
+ static char astr[NUMBUFS][INET_ADDRSTRLEN];
+ static unsigned nbuf = 0;
+ char *s;
+
+ s = astr[nbuf++];
+ if (nbuf >= NUMBUFS)
+ nbuf = 0;
+
+ switch (sa->sa_family) {
+ case AF_INET:
+ default:
+ inet_ntop(AF_INET, &satosin(sa)->sin_addr, s, sizeof (astr[0]));
+ break;
+ case AF_INET6:
+ inet_ntop(AF_INET6, &satosin6(sa)->sin6_addr, s,
+ sizeof (astr[0]));
+ break;
+ }
+ return s;
+}
+
+#ifdef UMB_DEBUG
+static char *
+umb_uuid2str(uint8_t uuid[MBIM_UUID_LEN])
+{
+ static char uuidstr[2 * MBIM_UUID_LEN + 5];
+
+#define UUID_BFMT "%02X"
+#define UUID_SEP "-"
+ snprintf(uuidstr, sizeof (uuidstr),
+ UUID_BFMT UUID_BFMT UUID_BFMT UUID_BFMT UUID_SEP
+ UUID_BFMT UUID_BFMT UUID_SEP
+ UUID_BFMT UUID_BFMT UUID_SEP
+ UUID_BFMT UUID_BFMT UUID_SEP
+ UUID_BFMT UUID_BFMT UUID_BFMT UUID_BFMT UUID_BFMT UUID_BFMT,
+ uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],
+ uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],
+ uuid[12], uuid[13], uuid[14], uuid[15]);
+ return uuidstr;
+}
+
+static void
+umb_dump(void *buf, int len)
+{
+ int i = 0;
+ uint8_t *c = buf;
+
+ if (len == 0)
+ return;
+ while (i < len) {
+ if ((i % 16) == 0) {
+ if (i > 0)
+ log(LOG_DEBUG, "\n");
+ log(LOG_DEBUG, "%4d: ", i);
+ }
+ log(LOG_DEBUG, " %02x", *c);
+ c++;
+ i++;
+ }
+ log(LOG_DEBUG, "\n");
+}
+#endif /* UMB_DEBUG */
+
+DRIVER_MODULE(umb, uhub, umb_driver, NULL, NULL);
+MODULE_DEPEND(umb, usb, 1, 1, 1);
diff --git a/sys/dev/usb/net/if_umbreg.h b/sys/dev/usb/net/if_umbreg.h
new file mode 100644
index 000000000000..1f3a4aff5d54
--- /dev/null
+++ b/sys/dev/usb/net/if_umbreg.h
@@ -0,0 +1,443 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Original copyright (c) 2016 genua mbH (OpenBSD version)
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Copyright (c) 2022 ADISTA SAS (re-write for FreeBSD)
+ *
+ * Re-write for FreeBSD by Pierre Pronchery <pierre@defora.net>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * - Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $OpenBSD: if_umb.h,v 1.4 2017/04/18 13:27:55 gerhard Exp $
+ */
+
+/*
+ * Mobile Broadband Interface Model
+ * http://www.usb.org/developers/docs/devclass_docs/MBIM-Compliance-1.0.pdf
+ */
+
+struct umb_valdescr {
+ int val;
+ char const *descr;
+};
+
+static const char *
+umb_val2descr(const struct umb_valdescr *vdp, int val)
+{
+ static char sval[32];
+
+ while (vdp->descr != NULL) {
+ if (vdp->val == val)
+ return vdp->descr;
+ vdp++;
+ }
+ snprintf(sval, sizeof (sval), "#%d", val);
+ return sval;
+}
+
+#define MBIM_REGSTATE_DESCRIPTIONS { \
+ { MBIM_REGSTATE_UNKNOWN, "unknown" }, \
+ { MBIM_REGSTATE_DEREGISTERED, "not registered" }, \
+ { MBIM_REGSTATE_SEARCHING, "searching" }, \
+ { MBIM_REGSTATE_HOME, "home network" }, \
+ { MBIM_REGSTATE_ROAMING, "roaming network" }, \
+ { MBIM_REGSTATE_PARTNER, "partner network" }, \
+ { MBIM_REGSTATE_DENIED, "access denied" }, \
+ { 0, NULL } }
+
+#define MBIM_DATACLASS_DESCRIPTIONS { \
+ { MBIM_DATACLASS_NONE, "none" }, \
+ { MBIM_DATACLASS_GPRS, "GPRS" }, \
+ { MBIM_DATACLASS_EDGE, "EDGE" }, \
+ { MBIM_DATACLASS_UMTS, "UMTS" }, \
+ { MBIM_DATACLASS_HSDPA, "HSDPA" }, \
+ { MBIM_DATACLASS_HSUPA, "HSUPA" }, \
+ { MBIM_DATACLASS_HSDPA|MBIM_DATACLASS_HSUPA, "HSPA" }, \
+ { MBIM_DATACLASS_LTE, "LTE" }, \
+ { MBIM_DATACLASS_1XRTT, "CDMA2000" }, \
+ { MBIM_DATACLASS_1XEVDO, "CDMA2000" }, \
+ { MBIM_DATACLASS_1XEVDO_REV_A, "CDMA2000" }, \
+ { MBIM_DATACLASS_1XEVDV, "CDMA2000" }, \
+ { MBIM_DATACLASS_3XRTT, "CDMA2000" }, \
+ { MBIM_DATACLASS_1XEVDO_REV_B, "CDMA2000" }, \
+ { MBIM_DATACLASS_UMB, "CDMA2000" }, \
+ { MBIM_DATACLASS_CUSTOM, "custom" }, \
+ { 0, NULL } }
+
+#define MBIM_1TO1_DESCRIPTION(m) { (m), #m }
+#define MBIM_MESSAGES_DESCRIPTIONS { \
+ MBIM_1TO1_DESCRIPTION(MBIM_OPEN_MSG), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CLOSE_MSG), \
+ MBIM_1TO1_DESCRIPTION(MBIM_COMMAND_MSG), \
+ MBIM_1TO1_DESCRIPTION(MBIM_HOST_ERROR_MSG), \
+ MBIM_1TO1_DESCRIPTION(MBIM_OPEN_DONE), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CLOSE_DONE), \
+ MBIM_1TO1_DESCRIPTION(MBIM_COMMAND_DONE), \
+ MBIM_1TO1_DESCRIPTION(MBIM_FUNCTION_ERROR_MSG), \
+ MBIM_1TO1_DESCRIPTION(MBIM_INDICATE_STATUS_MSG), \
+ { 0, NULL } }
+
+#define MBIM_STATUS_DESCRIPTION(m) { MBIM_STATUS_ ## m, #m }
+#define MBIM_STATUS_DESCRIPTIONS { \
+ MBIM_STATUS_DESCRIPTION(SUCCESS), \
+ MBIM_STATUS_DESCRIPTION(BUSY), \
+ MBIM_STATUS_DESCRIPTION(FAILURE), \
+ MBIM_STATUS_DESCRIPTION(SIM_NOT_INSERTED), \
+ MBIM_STATUS_DESCRIPTION(BAD_SIM), \
+ MBIM_STATUS_DESCRIPTION(PIN_REQUIRED), \
+ MBIM_STATUS_DESCRIPTION(PIN_DISABLED), \
+ MBIM_STATUS_DESCRIPTION(NOT_REGISTERED), \
+ MBIM_STATUS_DESCRIPTION(PROVIDERS_NOT_FOUND), \
+ MBIM_STATUS_DESCRIPTION(NO_DEVICE_SUPPORT), \
+ MBIM_STATUS_DESCRIPTION(PROVIDER_NOT_VISIBLE), \
+ MBIM_STATUS_DESCRIPTION(DATA_CLASS_NOT_AVAILABLE), \
+ MBIM_STATUS_DESCRIPTION(PACKET_SERVICE_DETACHED), \
+ MBIM_STATUS_DESCRIPTION(MAX_ACTIVATED_CONTEXTS), \
+ MBIM_STATUS_DESCRIPTION(NOT_INITIALIZED), \
+ MBIM_STATUS_DESCRIPTION(VOICE_CALL_IN_PROGRESS), \
+ MBIM_STATUS_DESCRIPTION(CONTEXT_NOT_ACTIVATED), \
+ MBIM_STATUS_DESCRIPTION(SERVICE_NOT_ACTIVATED), \
+ MBIM_STATUS_DESCRIPTION(INVALID_ACCESS_STRING), \
+ MBIM_STATUS_DESCRIPTION(INVALID_USER_NAME_PWD), \
+ MBIM_STATUS_DESCRIPTION(RADIO_POWER_OFF), \
+ MBIM_STATUS_DESCRIPTION(INVALID_PARAMETERS), \
+ MBIM_STATUS_DESCRIPTION(READ_FAILURE), \
+ MBIM_STATUS_DESCRIPTION(WRITE_FAILURE), \
+ MBIM_STATUS_DESCRIPTION(NO_PHONEBOOK), \
+ MBIM_STATUS_DESCRIPTION(PARAMETER_TOO_LONG), \
+ MBIM_STATUS_DESCRIPTION(STK_BUSY), \
+ MBIM_STATUS_DESCRIPTION(OPERATION_NOT_ALLOWED), \
+ MBIM_STATUS_DESCRIPTION(MEMORY_FAILURE), \
+ MBIM_STATUS_DESCRIPTION(INVALID_MEMORY_INDEX), \
+ MBIM_STATUS_DESCRIPTION(MEMORY_FULL), \
+ MBIM_STATUS_DESCRIPTION(FILTER_NOT_SUPPORTED), \
+ MBIM_STATUS_DESCRIPTION(DSS_INSTANCE_LIMIT), \
+ MBIM_STATUS_DESCRIPTION(INVALID_DEVICE_SERVICE_OPERATION), \
+ MBIM_STATUS_DESCRIPTION(AUTH_INCORRECT_AUTN), \
+ MBIM_STATUS_DESCRIPTION(AUTH_SYNC_FAILURE), \
+ MBIM_STATUS_DESCRIPTION(AUTH_AMF_NOT_SET), \
+ MBIM_STATUS_DESCRIPTION(CONTEXT_NOT_SUPPORTED), \
+ MBIM_STATUS_DESCRIPTION(SMS_UNKNOWN_SMSC_ADDRESS), \
+ MBIM_STATUS_DESCRIPTION(SMS_NETWORK_TIMEOUT), \
+ MBIM_STATUS_DESCRIPTION(SMS_LANG_NOT_SUPPORTED), \
+ MBIM_STATUS_DESCRIPTION(SMS_ENCODING_NOT_SUPPORTED), \
+ MBIM_STATUS_DESCRIPTION(SMS_FORMAT_NOT_SUPPORTED), \
+ { 0, NULL } }
+
+#define MBIM_ERROR_DESCRIPTION(m) { MBIM_ERROR_ ## m, #m }
+#define MBIM_ERROR_DESCRIPTIONS { \
+ MBIM_ERROR_DESCRIPTION(TIMEOUT_FRAGMENT), \
+ MBIM_ERROR_DESCRIPTION(FRAGMENT_OUT_OF_SEQUENCE), \
+ MBIM_ERROR_DESCRIPTION(LENGTH_MISMATCH), \
+ MBIM_ERROR_DESCRIPTION(DUPLICATED_TID), \
+ MBIM_ERROR_DESCRIPTION(NOT_OPENED), \
+ MBIM_ERROR_DESCRIPTION(UNKNOWN), \
+ MBIM_ERROR_DESCRIPTION(CANCEL), \
+ MBIM_ERROR_DESCRIPTION(MAX_TRANSFER), \
+ { 0, NULL } }
+
+#define MBIM_CID_DESCRIPTIONS { \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_DEVICE_CAPS), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_SUBSCRIBER_READY_STATUS), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_RADIO_STATE), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_PIN), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_PIN_LIST), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_HOME_PROVIDER), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_PREFERRED_PROVIDERS), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_VISIBLE_PROVIDERS), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_REGISTER_STATE), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_PACKET_SERVICE), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_SIGNAL_STATE), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_CONNECT), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_PROVISIONED_CONTEXTS), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_SERVICE_ACTIVATION), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_IP_CONFIGURATION), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_DEVICE_SERVICES), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_DEVICE_SERVICE_SUBSCRIBE_LIST), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_PACKET_STATISTICS), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_NETWORK_IDLE_HINT), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_EMERGENCY_MODE), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_IP_PACKET_FILTERS), \
+ MBIM_1TO1_DESCRIPTION(MBIM_CID_MULTICARRIER_PROVIDERS), \
+ { 0, NULL } }
+
+#define MBIM_SIMSTATE_DESCRIPTIONS { \
+ { MBIM_SIMSTATE_NOTINITIALIZED, "not initialized" }, \
+ { MBIM_SIMSTATE_INITIALIZED, "initialized" }, \
+ { MBIM_SIMSTATE_NOTINSERTED, "not inserted" }, \
+ { MBIM_SIMSTATE_BADSIM, "bad type" }, \
+ { MBIM_SIMSTATE_FAILURE, "failed" }, \
+ { MBIM_SIMSTATE_NOTACTIVATED, "not activated" }, \
+ { MBIM_SIMSTATE_LOCKED, "locked" }, \
+ { 0, NULL } }
+
+#define MBIM_PINTYPE_DESCRIPTIONS { \
+ { MBIM_PIN_TYPE_NONE, "none" }, \
+ { MBIM_PIN_TYPE_CUSTOM, "custom" }, \
+ { MBIM_PIN_TYPE_PIN1, "PIN1" }, \
+ { MBIM_PIN_TYPE_PIN2, "PIN2" }, \
+ { MBIM_PIN_TYPE_DEV_SIM_PIN, "device PIN" }, \
+ { MBIM_PIN_TYPE_DEV_FIRST_SIM_PIN, "device 1st PIN" }, \
+ { MBIM_PIN_TYPE_NETWORK_PIN, "network PIN" }, \
+ { MBIM_PIN_TYPE_NETWORK_SUBSET_PIN, "network subset PIN" }, \
+ { MBIM_PIN_TYPE_SERVICE_PROVIDER_PIN, "provider PIN" }, \
+ { MBIM_PIN_TYPE_CORPORATE_PIN, "corporate PIN" }, \
+ { MBIM_PIN_TYPE_SUBSIDY_LOCK, "subsidy lock" }, \
+ { MBIM_PIN_TYPE_PUK1, "PUK" }, \
+ { MBIM_PIN_TYPE_PUK2, "PUK2" }, \
+ { MBIM_PIN_TYPE_DEV_FIRST_SIM_PUK, "device 1st PUK" }, \
+ { MBIM_PIN_TYPE_NETWORK_PUK, "network PUK" }, \
+ { MBIM_PIN_TYPE_NETWORK_SUBSET_PUK, "network subset PUK" }, \
+ { MBIM_PIN_TYPE_SERVICE_PROVIDER_PUK, "provider PUK" }, \
+ { MBIM_PIN_TYPE_CORPORATE_PUK, "corporate PUK" }, \
+ { 0, NULL } }
+
+#define MBIM_PKTSRV_STATE_DESCRIPTIONS { \
+ { MBIM_PKTSERVICE_STATE_UNKNOWN, "unknown" }, \
+ { MBIM_PKTSERVICE_STATE_ATTACHING, "attaching" }, \
+ { MBIM_PKTSERVICE_STATE_ATTACHED, "attached" }, \
+ { MBIM_PKTSERVICE_STATE_DETACHING, "detaching" }, \
+ { MBIM_PKTSERVICE_STATE_DETACHED, "detached" }, \
+ { 0, NULL } }
+
+#define MBIM_ACTIVATION_STATE_DESCRIPTIONS { \
+ { MBIM_ACTIVATION_STATE_UNKNOWN, "unknown" }, \
+ { MBIM_ACTIVATION_STATE_ACTIVATED, "activated" }, \
+ { MBIM_ACTIVATION_STATE_ACTIVATING, "activating" }, \
+ { MBIM_ACTIVATION_STATE_DEACTIVATED, "deactivated" }, \
+ { MBIM_ACTIVATION_STATE_DEACTIVATING, "deactivating" }, \
+ { 0, NULL } }
+
+/*
+ * Driver internal state
+ */
+enum umb_state {
+ UMB_S_DOWN = 0, /* interface down */
+ UMB_S_OPEN, /* MBIM device has been opened */
+ UMB_S_CID, /* QMI client id allocated */
+ UMB_S_RADIO, /* radio is on */
+ UMB_S_SIMREADY, /* SIM is ready */
+ UMB_S_ATTACHED, /* packet service is attached */
+ UMB_S_CONNECTED, /* connected to provider */
+ UMB_S_UP, /* have IP configuration */
+};
+
+#define UMB_INTERNAL_STATE_DESCRIPTIONS { \
+ { UMB_S_DOWN, "down" }, \
+ { UMB_S_OPEN, "open" }, \
+ { UMB_S_CID, "CID allocated" }, \
+ { UMB_S_RADIO, "radio on" }, \
+ { UMB_S_SIMREADY, "SIM is ready" }, \
+ { UMB_S_ATTACHED, "attached" }, \
+ { UMB_S_CONNECTED, "connected" }, \
+ { UMB_S_UP, "up" }, \
+ { 0, NULL } }
+
+/*
+ * UMB parameters (SIOC[GS]UMBPARAM ioctls)
+ */
+struct umb_parameter {
+ int op;
+ int is_puk;
+ uint16_t pin[MBIM_PIN_MAXLEN];
+ int pinlen;
+
+ uint16_t newpin[MBIM_PIN_MAXLEN];
+ int newpinlen;
+
+#define UMB_APN_MAXLEN 100
+ uint16_t apn[UMB_APN_MAXLEN];
+ int apnlen;
+
+#define UMB_USERNAME_MAXLEN 205
+ uint16_t username[UMB_USERNAME_MAXLEN];
+ int usernamelen;
+
+#define UMB_PASSWORD_MAXLEN 205
+ uint16_t password[UMB_PASSWORD_MAXLEN];
+ int passwordlen;
+
+ int roaming;
+ uint32_t preferredclasses;
+};
+
+/*
+ * UMB device status info (SIOCGUMBINFO ioctl)
+ */
+struct umb_info {
+ enum umb_state state;
+ int enable_roaming;
+#define UMB_PIN_REQUIRED 0
+#define UMB_PIN_UNLOCKED 1
+#define UMB_PUK_REQUIRED 2
+ int pin_state;
+ int pin_attempts_left;
+ int activation;
+ int sim_state;
+ int regstate;
+ int regmode;
+ int nwerror;
+ int packetstate;
+ uint32_t supportedclasses; /* what the hw supports */
+ uint32_t preferredclasses; /* what the user prefers */
+ uint32_t highestclass; /* what the network offers */
+ uint32_t cellclass;
+#define UMB_PROVIDERNAME_MAXLEN 20
+ uint16_t provider[UMB_PROVIDERNAME_MAXLEN];
+#define UMB_PHONENR_MAXLEN 22
+ uint16_t pn[UMB_PHONENR_MAXLEN];
+#define UMB_SUBSCRIBERID_MAXLEN 15
+ uint16_t sid[UMB_SUBSCRIBERID_MAXLEN];
+#define UMB_ICCID_MAXLEN 20
+ uint16_t iccid[UMB_ICCID_MAXLEN];
+#define UMB_ROAMINGTEXT_MAXLEN 63
+ uint16_t roamingtxt[UMB_ROAMINGTEXT_MAXLEN];
+
+#define UMB_DEVID_MAXLEN 18
+ uint16_t devid[UMB_DEVID_MAXLEN];
+#define UMB_FWINFO_MAXLEN 30
+ uint16_t fwinfo[UMB_FWINFO_MAXLEN];
+#define UMB_HWINFO_MAXLEN 30
+ uint16_t hwinfo[UMB_HWINFO_MAXLEN];
+
+ uint16_t apn[UMB_APN_MAXLEN];
+ int apnlen;
+
+ uint16_t username[UMB_USERNAME_MAXLEN];
+ int usernamelen;
+
+ uint16_t password[UMB_PASSWORD_MAXLEN];
+ int passwordlen;
+
+#define UMB_VALUE_UNKNOWN -999
+ int rssi;
+#define UMB_BER_EXCELLENT 0
+#define UMB_BER_VERYGOOD 1
+#define UMB_BER_GOOD 2
+#define UMB_BER_OK 3
+#define UMB_BER_MEDIUM 4
+#define UMB_BER_BAD 5
+#define UMB_BER_VERYBAD 6
+#define UMB_BER_EXTREMELYBAD 7
+ int ber;
+
+ int hw_radio_on;
+ int sw_radio_on;
+
+ uint64_t uplink_speed;
+ uint64_t downlink_speed;
+
+#define UMB_MAX_DNSSRV 2
+ struct in_addr ipv4dns[UMB_MAX_DNSSRV];
+};
+
+#if !defined(ifr_mtu)
+#define ifr_mtu ifr_ifru.ifru_metric
+#endif
+
+#ifdef _KERNEL
+/*
+ * UMB device
+ */
+enum {
+ UMB_INTR_RX,
+ UMB_BULK_RX,
+ UMB_BULK_TX,
+ UMB_N_TRANSFER,
+};
+
+struct umb_task {
+ struct usb_proc_msg hdr;
+ struct umb_softc *sc;
+};
+
+struct umb_softc {
+ device_t sc_dev;
+ struct ifnet *sc_if;
+#define GET_IFP(sc) ((sc)->sc_if)
+ struct ifmedia sc_im;
+ struct usb_device *sc_udev;
+ struct usb_xfer *sc_xfer[UMB_N_TRANSFER];
+ uint8_t sc_ifaces_index[2];
+
+ int sc_ver_maj;
+ int sc_ver_min;
+ int sc_ctrl_len;
+ int sc_maxpktlen;
+ int sc_maxsessions;
+
+#define UMBFLG_FCC_AUTH_REQUIRED 0x0001
+#define UMBFLG_NO_INET6 0x0002
+ uint32_t sc_flags;
+ int sc_cid;
+
+ struct usb_process sc_taskqueue;
+ struct umb_task sc_proc_attach_task[2];
+ struct umb_task sc_proc_start_task[2];
+ struct umb_task sc_proc_state_task[2];
+ struct umb_task sc_proc_get_response_task[2];
+
+ int sc_nresp;
+ struct mtx sc_mutex;
+ struct usb_callout sc_statechg_timer;
+ char sc_dying;
+ char sc_attached;
+
+ uint8_t sc_ctrl_ifaceno;
+ struct usb_interface *sc_data_iface;
+
+ void *sc_resp_buf;
+ void *sc_ctrl_msg;
+
+ void *sc_rx_buf;
+ uint32_t sc_rx_bufsz;
+ unsigned sc_rx_nerr;
+ struct ifqueue sc_rx_queue;
+
+ void *sc_tx_buf;
+ struct mbuf *sc_tx_m;
+ uint32_t sc_tx_bufsz;
+ uint32_t sc_tx_seq;
+
+ uint32_t sc_tid;
+
+#define sc_state sc_info.state
+#define sc_roaming sc_info.enable_roaming
+ struct umb_info sc_info;
+};
+#endif /* _KERNEL */
diff --git a/sys/dev/usb/net/if_ure.c b/sys/dev/usb/net/if_ure.c
index e9112f403ef5..c3f7b622d687 100644
--- a/sys/dev/usb/net/if_ure.c
+++ b/sys/dev/usb/net/if_ure.c
@@ -96,10 +96,12 @@ static const STRUCT_USB_HOST_ID ure_devs[] = {
USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, i), \
USB_IFACE_CLASS(UICLASS_VENDOR), \
USB_IFACE_SUBCLASS(UISUBCLASS_VENDOR) }
+ URE_DEV(ELECOM, EDCQUA3C, 0),
URE_DEV(LENOVO, RTL8153, URE_FLAG_8153),
URE_DEV(LENOVO, TBT3LANGEN2, 0),
URE_DEV(LENOVO, ONELINK, 0),
URE_DEV(LENOVO, RTL8153_04, URE_FLAG_8153),
+ URE_DEV(LENOVO, ONELINKPLUS, URE_FLAG_8153),
URE_DEV(LENOVO, USBCLAN, 0),
URE_DEV(LENOVO, USBCLANGEN2, 0),
URE_DEV(LENOVO, USBCLANHYBRID, 0),
@@ -478,7 +480,7 @@ done:
}
/*
- * Probe for a RTL8152/RTL8153 chip.
+ * Probe for a RTL8152/RTL8153/RTL8156 chip.
*/
static int
ure_probe(device_t dev)
@@ -705,7 +707,13 @@ ure_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error)
/* set the necessary flags for rx checksum */
ure_rxcsum(caps, &pkt, m);
- uether_rxmbuf(ue, m, len - ETHER_CRC_LEN);
+ /*
+ * len has been known to be bogus at times,
+ * which leads to problems when passed to
+ * uether_rxmbuf(). Better understanding why we
+ * can get there make for good future work.
+ */
+ uether_rxmbuf(ue, m, 0);
}
off += roundup(len, URE_RXPKT_ALIGN);
diff --git a/sys/dev/usb/net/if_urndis.c b/sys/dev/usb/net/if_urndis.c
index 824609aa869f..4b0582442e30 100644
--- a/sys/dev/usb/net/if_urndis.c
+++ b/sys/dev/usb/net/if_urndis.c
@@ -20,7 +20,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <sys/cdefs.h>
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
@@ -214,15 +213,15 @@ urndis_attach_post(struct usb_ether *ue)
static int
urndis_attach(device_t dev)
{
- static struct {
- union {
+ union {
+ struct {
struct rndis_query_req query;
+ uint8_t addr[ETHER_ADDR_LEN];
+ } eaddr;
+ struct {
struct rndis_set_req set;
- } hdr;
- union {
- uint8_t eaddr[ETHER_ADDR_LEN];
uint32_t filter;
- } ibuf;
+ } filter;
} msg;
struct urndis_softc *sc = device_get_softc(dev);
struct usb_ether *ue = &sc->sc_ue;
@@ -278,10 +277,10 @@ urndis_attach(device_t dev)
}
/* Determine MAC address */
- memset(msg.ibuf.eaddr, 0, sizeof(msg.ibuf.eaddr));
+ memset(msg.eaddr.addr, 0, sizeof(msg.eaddr.addr));
URNDIS_LOCK(sc);
error = urndis_ctrl_query(sc, OID_802_3_PERMANENT_ADDRESS,
- &msg.hdr.query, sizeof(msg.hdr.query) + sizeof(msg.ibuf.eaddr),
+ (struct rndis_query_req *)&msg.eaddr, sizeof(msg.eaddr),
&buf, &bufsz);
URNDIS_UNLOCK(sc);
if (error != (int)RNDIS_STATUS_SUCCESS) {
@@ -297,10 +296,10 @@ urndis_attach(device_t dev)
/* Initialize packet filter */
sc->sc_filter = NDIS_PACKET_TYPE_BROADCAST |
NDIS_PACKET_TYPE_ALL_MULTICAST;
- msg.ibuf.filter = htole32(sc->sc_filter);
+ msg.filter.filter = htole32(sc->sc_filter);
URNDIS_LOCK(sc);
error = urndis_ctrl_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
- &msg.hdr.set, sizeof(msg.hdr.set) + sizeof(msg.ibuf.filter));
+ (struct rndis_set_req *)&msg.filter, sizeof(msg.filter));
URNDIS_UNLOCK(sc);
if (error != (int)RNDIS_STATUS_SUCCESS) {
device_printf(dev, "Unable to set data filters\n");
@@ -641,7 +640,7 @@ urndis_ctrl_handle_reset(struct urndis_softc *sc,
msg_filter.filter = htole32(sc->sc_filter);
rval = urndis_ctrl_set(sc, OID_GEN_CURRENT_PACKET_FILTER,
- &msg_filter.hdr, sizeof(msg_filter));
+ (struct rndis_set_req *)&msg_filter, sizeof(msg_filter));
if (rval != RNDIS_STATUS_SUCCESS) {
DPRINTF("unable to reset data filters\n");
diff --git a/sys/dev/usb/net/if_usie.c b/sys/dev/usb/net/if_usie.c
index 6f5c207ce42c..bcbf056f3844 100644
--- a/sys/dev/usb/net/if_usie.c
+++ b/sys/dev/usb/net/if_usie.c
@@ -27,6 +27,8 @@
* SUCH DAMAGE.
*/
+#include "opt_inet6.h"
+
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/systm.h>
@@ -430,11 +432,6 @@ usie_attach(device_t self)
/* setup ifnet (Direct IP) */
sc->sc_ifp = ifp = if_alloc(IFT_OTHER);
-
- if (ifp == NULL) {
- device_printf(self, "Could not allocate a network interface\n");
- goto detach;
- }
if_initname(ifp, "usie", device_get_unit(self));
if_setsoftc(ifp, sc);
@@ -1195,7 +1192,7 @@ usie_if_output(if_t ifp, struct mbuf *m, const struct sockaddr *dst,
switch (dst->sa_family) {
#ifdef INET6
- case AF_INET6;
+ case AF_INET6:
/* fall though */
#endif
case AF_INET:
diff --git a/sys/dev/usb/net/mbim.h b/sys/dev/usb/net/mbim.h
new file mode 100644
index 000000000000..b8b54f72e282
--- /dev/null
+++ b/sys/dev/usb/net/mbim.h
@@ -0,0 +1,727 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Original copyright (c) 2016 genua mbH (OpenBSD version)
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Copyright (c) 2022 ADISTA SAS (re-write for FreeBSD)
+ *
+ * Re-write for FreeBSD by Pierre Pronchery <pierre@defora.net>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * - Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $OpenBSD: mbim.h,v 1.4 2017/04/18 13:27:55 gerhard Exp $
+ */
+
+/*
+ * Mobile Broadband Interface Model
+ * http://www.usb.org/developers/docs/devclass_docs/MBIM-Compliance-1.0.pdf
+ */
+
+#ifndef _MBIM_H_
+#define _MBIM_H_
+
+#define UDESCSUB_MBIM 27
+#define MBIM_INTERFACE_ALTSETTING 1
+
+#define MBIM_RESET_FUNCTION 0x05
+
+/*
+ * Registration state (MBIM_REGISTER_STATE)
+ */
+#define MBIM_REGSTATE_UNKNOWN 0
+#define MBIM_REGSTATE_DEREGISTERED 1
+#define MBIM_REGSTATE_SEARCHING 2
+#define MBIM_REGSTATE_HOME 3
+#define MBIM_REGSTATE_ROAMING 4
+#define MBIM_REGSTATE_PARTNER 5
+#define MBIM_REGSTATE_DENIED 6
+
+/*
+ * Data classes mask (MBIM_DATA_CLASS)
+ */
+#define MBIM_DATACLASS_NONE 0x00000000
+#define MBIM_DATACLASS_GPRS 0x00000001
+#define MBIM_DATACLASS_EDGE 0x00000002
+#define MBIM_DATACLASS_UMTS 0x00000004
+#define MBIM_DATACLASS_HSDPA 0x00000008
+#define MBIM_DATACLASS_HSUPA 0x00000010
+#define MBIM_DATACLASS_LTE 0x00000020
+#define MBIM_DATACLASS_1XRTT 0x00010000
+#define MBIM_DATACLASS_1XEVDO 0x00020000
+#define MBIM_DATACLASS_1XEVDO_REV_A 0x00040000
+#define MBIM_DATACLASS_1XEVDV 0x00080000
+#define MBIM_DATACLASS_3XRTT 0x00100000
+#define MBIM_DATACLASS_1XEVDO_REV_B 0x00200000
+#define MBIM_DATACLASS_UMB 0x00400000
+#define MBIM_DATACLASS_CUSTOM 0x80000000
+
+/*
+ * Cell classes mask (MBIM_CELLULAR_CLASS)
+ */
+#define MBIM_CELLCLASS_GSM 0x00000001
+#define MBIM_CELLCLASS_CDMA 0x00000002
+
+/*
+ * UUIDs
+ */
+#define MBIM_UUID_LEN 16
+
+#define MBIM_UUID_BASIC_CONNECT { \
+ 0xa2, 0x89, 0xcc, 0x33, 0xbc, 0xbb, 0x8b, 0x4f, \
+ 0xb6, 0xb0, 0x13, 0x3e, 0xc2, 0xaa, 0xe6, 0xdf \
+ }
+
+#define MBIM_UUID_CONTEXT_INTERNET { \
+ 0x7e, 0x5e, 0x2a, 0x7e, 0x4e, 0x6f, 0x72, 0x72, \
+ 0x73, 0x6b, 0x65, 0x6e, 0x7e, 0x5e, 0x2a, 0x7e \
+ }
+
+#define MBIM_UUID_CONTEXT_VPN { \
+ 0x9b, 0x9f, 0x7b, 0xbe, 0x89, 0x52, 0x44, 0xb7, \
+ 0x83, 0xac, 0xca, 0x41, 0x31, 0x8d, 0xf7, 0xa0 \
+ }
+
+#define MBIM_UUID_QMI_MBIM { \
+ 0xd1, 0xa3, 0x0b, 0xc2, 0xf9, 0x7a, 0x6e, 0x43, \
+ 0xbf, 0x65, 0xc7, 0xe2, 0x4f, 0xb0, 0xf0, 0xd3 \
+ }
+
+#define MBIM_CTRLMSG_MINLEN 64
+#define MBIM_CTRLMSG_MAXLEN (4 * 1204)
+
+#define MBIM_MAXSEGSZ_MINVAL (2 * 1024)
+
+/*
+ * Control messages (host to function)
+ */
+#define MBIM_OPEN_MSG 1U
+#define MBIM_CLOSE_MSG 2U
+#define MBIM_COMMAND_MSG 3U
+#define MBIM_HOST_ERROR_MSG 4U
+
+/*
+ * Control messages (function to host)
+ */
+#define MBIM_OPEN_DONE 0x80000001U
+#define MBIM_CLOSE_DONE 0x80000002U
+#define MBIM_COMMAND_DONE 0x80000003U
+#define MBIM_FUNCTION_ERROR_MSG 0x80000004U
+#define MBIM_INDICATE_STATUS_MSG 0x80000007U
+
+/*
+ * Generic status codes
+ */
+#define MBIM_STATUS_SUCCESS 0
+#define MBIM_STATUS_BUSY 1
+#define MBIM_STATUS_FAILURE 2
+#define MBIM_STATUS_SIM_NOT_INSERTED 3
+#define MBIM_STATUS_BAD_SIM 4
+#define MBIM_STATUS_PIN_REQUIRED 5
+#define MBIM_STATUS_PIN_DISABLED 6
+#define MBIM_STATUS_NOT_REGISTERED 7
+#define MBIM_STATUS_PROVIDERS_NOT_FOUND 8
+#define MBIM_STATUS_NO_DEVICE_SUPPORT 9
+#define MBIM_STATUS_PROVIDER_NOT_VISIBLE 10
+#define MBIM_STATUS_DATA_CLASS_NOT_AVAILABLE 11
+#define MBIM_STATUS_PACKET_SERVICE_DETACHED 12
+#define MBIM_STATUS_MAX_ACTIVATED_CONTEXTS 13
+#define MBIM_STATUS_NOT_INITIALIZED 14
+#define MBIM_STATUS_VOICE_CALL_IN_PROGRESS 15
+#define MBIM_STATUS_CONTEXT_NOT_ACTIVATED 16
+#define MBIM_STATUS_SERVICE_NOT_ACTIVATED 17
+#define MBIM_STATUS_INVALID_ACCESS_STRING 18
+#define MBIM_STATUS_INVALID_USER_NAME_PWD 19
+#define MBIM_STATUS_RADIO_POWER_OFF 20
+#define MBIM_STATUS_INVALID_PARAMETERS 21
+#define MBIM_STATUS_READ_FAILURE 22
+#define MBIM_STATUS_WRITE_FAILURE 23
+#define MBIM_STATUS_NO_PHONEBOOK 25
+#define MBIM_STATUS_PARAMETER_TOO_LONG 26
+#define MBIM_STATUS_STK_BUSY 27
+#define MBIM_STATUS_OPERATION_NOT_ALLOWED 28
+#define MBIM_STATUS_MEMORY_FAILURE 29
+#define MBIM_STATUS_INVALID_MEMORY_INDEX 30
+#define MBIM_STATUS_MEMORY_FULL 31
+#define MBIM_STATUS_FILTER_NOT_SUPPORTED 32
+#define MBIM_STATUS_DSS_INSTANCE_LIMIT 33
+#define MBIM_STATUS_INVALID_DEVICE_SERVICE_OPERATION 34
+#define MBIM_STATUS_AUTH_INCORRECT_AUTN 35
+#define MBIM_STATUS_AUTH_SYNC_FAILURE 36
+#define MBIM_STATUS_AUTH_AMF_NOT_SET 37
+#define MBIM_STATUS_CONTEXT_NOT_SUPPORTED 38
+#define MBIM_STATUS_SMS_UNKNOWN_SMSC_ADDRESS 100
+#define MBIM_STATUS_SMS_NETWORK_TIMEOUT 101
+#define MBIM_STATUS_SMS_LANG_NOT_SUPPORTED 102
+#define MBIM_STATUS_SMS_ENCODING_NOT_SUPPORTED 103
+#define MBIM_STATUS_SMS_FORMAT_NOT_SUPPORTED 104
+
+/*
+ * Message formats
+ */
+struct mbim_msghdr {
+ /* Msg header */
+ uint32_t type; /* message type */
+ uint32_t len; /* message length */
+ uint32_t tid; /* transaction id */
+} __packed;
+
+struct mbim_fraghdr {
+ uint32_t nfrag; /* total # of fragments */
+ uint32_t currfrag; /* current fragment */
+} __packed;
+
+struct mbim_fragmented_msg_hdr {
+ struct mbim_msghdr hdr;
+ struct mbim_fraghdr frag;
+} __packed;
+
+struct mbim_h2f_openmsg {
+ struct mbim_msghdr hdr;
+ uint32_t maxlen;
+} __packed;
+
+struct mbim_h2f_closemsg {
+ struct mbim_msghdr hdr;
+} __packed;
+
+struct mbim_h2f_cmd {
+ struct mbim_msghdr hdr;
+ struct mbim_fraghdr frag;
+ uint8_t devid[MBIM_UUID_LEN];
+ uint32_t cid; /* command id */
+#define MBIM_CMDOP_QRY 0
+#define MBIM_CMDOP_SET 1
+ uint32_t op;
+ uint32_t infolen;
+ uint8_t info[];
+} __packed;
+
+struct mbim_f2h_indicate_status {
+ struct mbim_msghdr hdr;
+ struct mbim_fraghdr frag;
+ uint8_t devid[MBIM_UUID_LEN];
+ uint32_t cid; /* command id */
+ uint32_t infolen;
+ uint8_t info[];
+} __packed;
+
+struct mbim_f2h_hosterr {
+ struct mbim_msghdr hdr;
+
+#define MBIM_ERROR_TIMEOUT_FRAGMENT 1
+#define MBIM_ERROR_FRAGMENT_OUT_OF_SEQUENCE 2
+#define MBIM_ERROR_LENGTH_MISMATCH 3
+#define MBIM_ERROR_DUPLICATED_TID 4
+#define MBIM_ERROR_NOT_OPENED 5
+#define MBIM_ERROR_UNKNOWN 6
+#define MBIM_ERROR_CANCEL 7
+#define MBIM_ERROR_MAX_TRANSFER 8
+ uint32_t err;
+} __packed;
+
+struct mbim_f2h_openclosedone {
+ struct mbim_msghdr hdr;
+ int32_t status;
+} __packed;
+
+struct mbim_f2h_cmddone {
+ struct mbim_msghdr hdr;
+ struct mbim_fraghdr frag;
+ uint8_t devid[MBIM_UUID_LEN];
+ uint32_t cid; /* command id */
+ int32_t status;
+ uint32_t infolen;
+ uint8_t info[];
+} __packed;
+
+/*
+ * Messages and commands for MBIM_UUID_BASIC_CONNECT
+ */
+#define MBIM_CID_DEVICE_CAPS 1
+#define MBIM_CID_SUBSCRIBER_READY_STATUS 2
+#define MBIM_CID_RADIO_STATE 3
+#define MBIM_CID_PIN 4
+#define MBIM_CID_PIN_LIST 5
+#define MBIM_CID_HOME_PROVIDER 6
+#define MBIM_CID_PREFERRED_PROVIDERS 7
+#define MBIM_CID_VISIBLE_PROVIDERS 8
+#define MBIM_CID_REGISTER_STATE 9
+#define MBIM_CID_PACKET_SERVICE 10
+#define MBIM_CID_SIGNAL_STATE 11
+#define MBIM_CID_CONNECT 12
+#define MBIM_CID_PROVISIONED_CONTEXTS 13
+#define MBIM_CID_SERVICE_ACTIVATION 14
+#define MBIM_CID_IP_CONFIGURATION 15
+#define MBIM_CID_DEVICE_SERVICES 16
+#define MBIM_CID_DEVICE_SERVICE_SUBSCRIBE_LIST 19
+#define MBIM_CID_PACKET_STATISTICS 20
+#define MBIM_CID_NETWORK_IDLE_HINT 21
+#define MBIM_CID_EMERGENCY_MODE 22
+#define MBIM_CID_IP_PACKET_FILTERS 23
+#define MBIM_CID_MULTICARRIER_PROVIDERS 24
+
+struct mbim_cid_subscriber_ready_info {
+#define MBIM_SIMSTATE_NOTINITIALIZED 0
+#define MBIM_SIMSTATE_INITIALIZED 1
+#define MBIM_SIMSTATE_NOTINSERTED 2
+#define MBIM_SIMSTATE_BADSIM 3
+#define MBIM_SIMSTATE_FAILURE 4
+#define MBIM_SIMSTATE_NOTACTIVATED 5
+#define MBIM_SIMSTATE_LOCKED 6
+ uint32_t ready;
+
+ uint32_t sid_offs;
+ uint32_t sid_size;
+
+ uint32_t icc_offs;
+ uint32_t icc_size;
+
+#define MBIM_SIMUNIQEID_NONE 0
+#define MBIM_SIMUNIQEID_PROTECT 1
+ uint32_t info;
+
+ uint32_t no_pn;
+ struct {
+ uint32_t offs;
+ uint32_t size;
+ }
+ pn[];
+} __packed;
+
+struct mbim_cid_radio_state {
+#define MBIM_RADIO_STATE_OFF 0
+#define MBIM_RADIO_STATE_ON 1
+ uint32_t state;
+} __packed;
+
+struct mbim_cid_radio_state_info {
+ uint32_t hw_state;
+ uint32_t sw_state;
+} __packed;
+
+struct mbim_cid_pin {
+#define MBIM_PIN_TYPE_NONE 0
+#define MBIM_PIN_TYPE_CUSTOM 1
+#define MBIM_PIN_TYPE_PIN1 2
+#define MBIM_PIN_TYPE_PIN2 3
+#define MBIM_PIN_TYPE_DEV_SIM_PIN 4
+#define MBIM_PIN_TYPE_DEV_FIRST_SIM_PIN 5
+#define MBIM_PIN_TYPE_NETWORK_PIN 6
+#define MBIM_PIN_TYPE_NETWORK_SUBSET_PIN 7
+#define MBIM_PIN_TYPE_SERVICE_PROVIDER_PIN 8
+#define MBIM_PIN_TYPE_CORPORATE_PIN 9
+#define MBIM_PIN_TYPE_SUBSIDY_LOCK 10
+#define MBIM_PIN_TYPE_PUK1 11
+#define MBIM_PIN_TYPE_PUK2 12
+#define MBIM_PIN_TYPE_DEV_FIRST_SIM_PUK 13
+#define MBIM_PIN_TYPE_NETWORK_PUK 14
+#define MBIM_PIN_TYPE_NETWORK_SUBSET_PUK 15
+#define MBIM_PIN_TYPE_SERVICE_PROVIDER_PUK 16
+#define MBIM_PIN_TYPE_CORPORATE_PUK 17
+ uint32_t type;
+
+#define MBIM_PIN_OP_ENTER 0
+#define MBIM_PIN_OP_ENABLE 1
+#define MBIM_PIN_OP_DISABLE 2
+#define MBIM_PIN_OP_CHANGE 3
+ uint32_t op;
+ uint32_t pin_offs;
+ uint32_t pin_size;
+ uint32_t newpin_offs;
+ uint32_t newpin_size;
+#define MBIM_PIN_MAXLEN 32
+ uint8_t data[2 * MBIM_PIN_MAXLEN];
+} __packed;
+
+struct mbim_cid_pin_info {
+ uint32_t type;
+
+#define MBIM_PIN_STATE_UNLOCKED 0
+#define MBIM_PIN_STATE_LOCKED 1
+ uint32_t state;
+ uint32_t remaining_attempts;
+} __packed;
+
+struct mbim_cid_pin_list_info {
+ struct mbim_pin_desc {
+
+#define MBIM_PINMODE_NOTSUPPORTED 0
+#define MBIM_PINMODE_ENABLED 1
+#define MBIM_PINMODE_DISABLED 2
+ uint32_t mode;
+
+#define MBIM_PINFORMAT_UNKNOWN 0
+#define MBIM_PINFORMAT_NUMERIC 1
+#define MBIM_PINFORMAT_ALPHANUMERIC 2
+ uint32_t format;
+
+ uint32_t minlen;
+ uint32_t maxlen;
+ }
+ pin1,
+ pin2,
+ dev_sim_pin,
+ first_dev_sim_pin,
+ net_pin,
+ net_sub_pin,
+ svp_pin,
+ corp_pin,
+ subsidy_lock,
+ custom;
+} __packed;
+
+struct mbim_cid_device_caps {
+#define MBIM_DEVTYPE_UNKNOWN 0
+#define MBIM_DEVTYPE_EMBEDDED 1
+#define MBIM_DEVTYPE_REMOVABLE 2
+#define MBIM_DEVTYPE_REMOTE 3
+ uint32_t devtype;
+
+ uint32_t cellclass; /* values: MBIM_CELLULAR_CLASS */
+ uint32_t voiceclass;
+ uint32_t simclass;
+ uint32_t dataclass; /* values: MBIM_DATA_CLASS */
+ uint32_t smscaps;
+ uint32_t cntrlcaps;
+ uint32_t max_sessions;
+
+ uint32_t custdataclass_offs;
+ uint32_t custdataclass_size;
+
+ uint32_t devid_offs;
+ uint32_t devid_size;
+
+ uint32_t fwinfo_offs;
+ uint32_t fwinfo_size;
+
+ uint32_t hwinfo_offs;
+ uint32_t hwinfo_size;
+
+ uint32_t data[];
+} __packed;
+
+struct mbim_cid_registration_state {
+ uint32_t provid_offs;
+ uint32_t provid_size;
+
+#define MBIM_REGACTION_AUTOMATIC 0
+#define MBIM_REGACTION_MANUAL 1
+ uint32_t regaction;
+ uint32_t data_class;
+
+ uint32_t data[];
+} __packed;
+
+struct mbim_cid_registration_state_info {
+ uint32_t nwerror;
+
+ uint32_t regstate; /* values: MBIM_REGISTER_STATE */
+
+#define MBIM_REGMODE_UNKNOWN 0
+#define MBIM_REGMODE_AUTOMATIC 1
+#define MBIM_REGMODE_MANUAL 2
+ uint32_t regmode;
+
+ uint32_t availclasses; /* values: MBIM_DATA_CLASS */
+ uint32_t curcellclass; /* values: MBIM_CELLULAR_CLASS */
+
+ uint32_t provid_offs;
+ uint32_t provid_size;
+
+ uint32_t provname_offs;
+ uint32_t provname_size;
+
+ uint32_t roamingtxt_offs;
+ uint32_t roamingtxt_size;
+
+#define MBIM_REGFLAGS_NONE 0
+#define MBIM_REGFLAGS_MANUAL_NOT_AVAILABLE 1
+#define MBIM_REGFLAGS_PACKETSERVICE_AUTOATTACH 2
+ uint32_t regflag;
+
+ uint32_t data[];
+} __packed;
+
+struct mbim_cid_packet_service {
+#define MBIM_PKTSERVICE_ACTION_ATTACH 0
+#define MBIM_PKTSERVICE_ACTION_DETACH 1
+ uint32_t action;
+} __packed;
+
+struct mbim_cid_packet_service_info {
+ uint32_t nwerror;
+
+#define MBIM_PKTSERVICE_STATE_UNKNOWN 0
+#define MBIM_PKTSERVICE_STATE_ATTACHING 1
+#define MBIM_PKTSERVICE_STATE_ATTACHED 2
+#define MBIM_PKTSERVICE_STATE_DETACHING 3
+#define MBIM_PKTSERVICE_STATE_DETACHED 4
+ uint32_t state;
+
+ uint32_t highest_dataclass;
+ uint64_t uplink_speed;
+ uint64_t downlink_speed;
+} __packed;
+
+struct mbim_cid_signal_state {
+ uint32_t rssi;
+ uint32_t err_rate;
+ uint32_t ss_intvl;
+ uint32_t rssi_thr;
+ uint32_t err_thr;
+} __packed;
+
+struct mbim_cid_connect {
+ uint32_t sessionid;
+
+#define MBIM_CONNECT_DEACTIVATE 0
+#define MBIM_CONNECT_ACTIVATE 1
+ uint32_t command;
+
+#define MBIM_ACCESS_MAXLEN 200
+ uint32_t access_offs;
+ uint32_t access_size;
+
+#define MBIM_USER_MAXLEN 510
+ uint32_t user_offs;
+ uint32_t user_size;
+
+#define MBIM_PASSWD_MAXLEN 510
+ uint32_t passwd_offs;
+ uint32_t passwd_size;
+
+#define MBIM_COMPRESSION_NONE 0
+#define MBIM_COMPRESSION_ENABLE 1
+ uint32_t compression;
+
+#define MBIM_AUTHPROT_NONE 0
+#define MBIM_AUTHPROT_PAP 1
+#define MBIM_AUTHPROT_CHAP 2
+#define MBIM_AUTHPROT_MSCHAP 3
+ uint32_t authprot;
+
+#define MBIM_CONTEXT_IPTYPE_DEFAULT 0
+#define MBIM_CONTEXT_IPTYPE_IPV4 1
+#define MBIM_CONTEXT_IPTYPE_IPV6 2
+#define MBIM_CONTEXT_IPTYPE_IPV4V6 3
+#define MBIM_CONTEXT_IPTYPE_IPV4ANDV6 4
+ uint32_t iptype;
+
+ uint8_t context[MBIM_UUID_LEN];
+
+ uint8_t data[MBIM_ACCESS_MAXLEN + MBIM_USER_MAXLEN +
+ MBIM_PASSWD_MAXLEN];
+
+} __packed;
+
+struct mbim_cid_connect_info {
+ uint32_t sessionid;
+
+#define MBIM_ACTIVATION_STATE_UNKNOWN 0
+#define MBIM_ACTIVATION_STATE_ACTIVATED 1
+#define MBIM_ACTIVATION_STATE_ACTIVATING 2
+#define MBIM_ACTIVATION_STATE_DEACTIVATED 3
+#define MBIM_ACTIVATION_STATE_DEACTIVATING 4
+ uint32_t activation;
+
+ uint32_t voice;
+ uint32_t iptype;
+ uint8_t context[MBIM_UUID_LEN];
+ uint32_t nwerror;
+} __packed;
+
+struct mbim_cid_ipv4_element {
+ uint32_t prefixlen;
+ uint32_t addr;
+} __packed;
+
+struct mbim_cid_ipv6_element {
+ uint32_t prefixlen;
+ uint8_t addr[16];
+} __packed;
+
+struct mbim_cid_ip_configuration_info {
+ uint32_t sessionid;
+
+#define MBIM_IPCONF_HAS_ADDRINFO 0x0001
+#define MBIM_IPCONF_HAS_GWINFO 0x0002
+#define MBIM_IPCONF_HAS_DNSINFO 0x0004
+#define MBIM_IPCONF_HAS_MTUINFO 0x0008
+ uint32_t ipv4_available;
+ uint32_t ipv6_available;
+
+ uint32_t ipv4_naddr;
+ uint32_t ipv4_addroffs;
+ uint32_t ipv6_naddr;
+ uint32_t ipv6_addroffs;
+
+ uint32_t ipv4_gwoffs;
+ uint32_t ipv6_gwoffs;
+
+ uint32_t ipv4_ndnssrv;
+ uint32_t ipv4_dnssrvoffs;
+ uint32_t ipv6_ndnssrv;
+ uint32_t ipv6_dnssrvoffs;
+
+ uint32_t ipv4_mtu;
+ uint32_t ipv6_mtu;
+
+ uint32_t data[];
+} __packed;
+
+struct mbim_cid_packet_statistics_info {
+ uint32_t in_discards;
+ uint32_t in_errors;
+ uint64_t in_octets;
+ uint64_t in_packets;
+ uint64_t out_octets;
+ uint64_t out_packets;
+ uint32_t out_errors;
+ uint32_t out_discards;
+} __packed;
+
+
+#ifdef _KERNEL
+
+struct mbim_descriptor {
+ uByte bLength;
+ uByte bDescriptorType;
+ uByte bDescriptorSubtype;
+#define MBIM_VER_MAJOR(v) (((v) >> 8) & 0x0f)
+#define MBIM_VER_MINOR(v) ((v) & 0x0f)
+ uWord bcdMBIMVersion;
+ uWord wMaxControlMessage;
+ uByte bNumberFilters;
+ uByte bMaxFilterSize;
+ uWord wMaxSegmentSize;
+ uByte bmNetworkCapabilities;
+} __packed;
+
+/*
+ * NCM Parameters
+ */
+#define NCM_GET_NTB_PARAMETERS 0x80
+
+struct ncm_ntb_parameters {
+ uWord wLength;
+ uWord bmNtbFormatsSupported;
+#define NCM_FORMAT_NTB16 0x0001
+#define NCM_FORMAT_NTB32 0x0002
+ uDWord dwNtbInMaxSize;
+ uWord wNdpInDivisor;
+ uWord wNdpInPayloadRemainder;
+ uWord wNdpInAlignment;
+ uWord wReserved1;
+ uDWord dwNtbOutMaxSize;
+ uWord wNdpOutDivisor;
+ uWord wNdpOutPayloadRemainder;
+ uWord wNdpOutAlignment;
+ uWord wNtbOutMaxDatagrams;
+} __packed;
+
+/*
+ * NCM Encoding
+ */
+#define MBIM_HDR16_LEN \
+ (sizeof(struct ncm_header16) + sizeof(struct ncm_pointer16))
+#define MBIM_HDR32_LEN \
+ (sizeof(struct ncm_header32) + sizeof(struct ncm_pointer32))
+
+struct ncm_header16 {
+#define NCM_HDR16_SIG 0x484d434e
+ uDWord dwSignature;
+ uWord wHeaderLength;
+ uWord wSequence;
+ uWord wBlockLength;
+ uWord wNdpIndex;
+} __packed;
+
+struct ncm_header32 {
+#define NCM_HDR32_SIG 0x686d636e
+ uDWord dwSignature;
+ uWord wHeaderLength;
+ uWord wSequence;
+ uDWord dwBlockLength;
+ uDWord dwNdpIndex;
+} __packed;
+
+
+#define MBIM_NCM_NTH_SIDSHIFT 24
+#define MBIM_NCM_NTH_GETSID(s) (((s) > MBIM_NCM_NTH_SIDSHIFT) & 0xff)
+
+struct ncm_pointer16_dgram {
+ uWord wDatagramIndex;
+ uWord wDatagramLen;
+} __packed;
+
+struct ncm_pointer16 {
+#define MBIM_NCM_NTH16_IPS 0x00535049
+#define MBIM_NCM_NTH16_ISISG(s) (((s) & 0x00ffffff) == MBIM_NCM_NTH16_IPS)
+#define MBIM_NCM_NTH16_SIG(s) \
+ ((((s) & 0xff) << MBIM_NCM_NTH_SIDSHIFT) | MBIM_NCM_NTH16_IPS)
+ uDWord dwSignature;
+ uWord wLength;
+ uWord wNextNdpIndex;
+
+ /* Minimum is two datagrams, but can be more */
+ struct ncm_pointer16_dgram dgram[2];
+} __packed;
+
+struct ncm_pointer32_dgram {
+ uDWord dwDatagramIndex;
+ uDWord dwDatagramLen;
+} __packed;
+
+struct ncm_pointer32 {
+#define MBIM_NCM_NTH32_IPS 0x00737069
+#define MBIM_NCM_NTH32_ISISG(s) \
+ (((s) & 0x00ffffff) == MBIM_NCM_NTH32_IPS)
+#define MBIM_NCM_NTH32_SIG(s) \
+ ((((s) & 0xff) << MBIM_NCM_NTH_SIDSHIFT) | MBIM_NCM_NTH32_IPS)
+ uDWord dwSignature;
+ uWord wLength;
+ uWord wReserved6;
+ uDWord dwNextNdpIndex;
+ uDWord dwReserved12;
+
+ /* Minimum is two datagrams, but can be more */
+ struct ncm_pointer32_dgram dgram[2];
+} __packed;
+
+#endif /* _KERNEL */
+
+#endif /* _MBIM_H_ */
diff --git a/sys/dev/usb/net/ruephy.c b/sys/dev/usb/net/ruephy.c
index 05ed5cc6586a..2b5358ab8d0c 100644
--- a/sys/dev/usb/net/ruephy.c
+++ b/sys/dev/usb/net/ruephy.c
@@ -27,7 +27,6 @@
*
*/
-#include <sys/cdefs.h>
/*
* driver for RealTek RTL8150 internal PHY
*/
diff --git a/sys/dev/usb/net/uhso.c b/sys/dev/usb/net/uhso.c
index 300f982abeae..24135f6ccd5a 100644
--- a/sys/dev/usb/net/uhso.c
+++ b/sys/dev/usb/net/uhso.c
@@ -1566,10 +1566,6 @@ uhso_attach_ifnet(struct uhso_softc *sc, struct usb_interface *iface, int type)
}
sc->sc_ifp = ifp = if_alloc(IFT_OTHER);
- if (sc->sc_ifp == NULL) {
- device_printf(sc->sc_dev, "if_alloc() failed\n");
- return (-1);
- }
callout_init_mtx(&sc->sc_c, &sc->sc_mtx, 0);
mtx_lock(&sc->sc_mtx);
diff --git a/sys/dev/usb/net/usb_ethernet.c b/sys/dev/usb/net/usb_ethernet.c
index 6025412ebc9c..692ea64128b9 100644
--- a/sys/dev/usb/net/usb_ethernet.c
+++ b/sys/dev/usb/net/usb_ethernet.c
@@ -220,11 +220,6 @@ ue_attach_post_task(struct usb_proc_msg *_task)
error = 0;
CURVNET_SET_QUIET(vnet0);
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(ue->ue_dev, "could not allocate ifnet\n");
- goto fail;
- }
-
if_setsoftc(ifp, ue);
if_initname(ifp, "ue", ue->ue_unit);
if (ue->ue_methods->ue_attach_post_sub != NULL) {
@@ -320,11 +315,9 @@ uether_ifdetach(struct usb_ether *ue)
ether_ifdetach(ifp);
/* detach miibus */
- if (ue->ue_miibus != NULL) {
- bus_topo_lock();
- device_delete_child(ue->ue_dev, ue->ue_miibus);
- bus_topo_unlock();
- }
+ bus_topo_lock();
+ bus_generic_detach(ue->ue_dev);
+ bus_topo_unlock();
/* free interface instance */
if_free(ifp);
@@ -600,7 +593,14 @@ uether_rxmbuf(struct usb_ether *ue, struct mbuf *m,
/* finalize mbuf */
if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
m->m_pkthdr.rcvif = ifp;
- m->m_pkthdr.len = m->m_len = len;
+ if (len != 0) {
+ /*
+ * This is going to get it wrong for an mbuf chain, so let's
+ * make sure we're not doing that.
+ */
+ MPASS(m->m_next == NULL);
+ m->m_pkthdr.len = m->m_len = len;
+ }
/* enqueue for later when the lock can be released */
(void)mbufq_enqueue(&ue->ue_rxq, m);
diff --git a/sys/dev/usb/quirk/usb_quirk.c b/sys/dev/usb/quirk/usb_quirk.c
index a0a7b3fc75a5..802ea2b2ae6a 100644
--- a/sys/dev/usb/quirk/usb_quirk.c
+++ b/sys/dev/usb/quirk/usb_quirk.c
@@ -74,572 +74,509 @@ struct usb_quirk_entry {
static struct mtx usb_quirk_mtx;
-#define USB_QUIRK_VP(v,p,l,h,...) \
- { .vid = (v), .pid = (p), .lo_rev = (l), .hi_rev = (h), \
- .quirks = { __VA_ARGS__ } }
-#define USB_QUIRK(v,p,l,h,...) \
- USB_QUIRK_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, l, h, __VA_ARGS__)
+#define USB_QUIRK(v, p, ...) { \
+ .vid = USB_VENDOR_##v, .pid = USB_PRODUCT_##v##_##p, .lo_rev = 0x0000, \
+ .hi_rev = 0xffff, .quirks = { __VA_ARGS__ } \
+}
+
+/* Vendor only */
+#define USB_QUIRK_VO(v, ...) { \
+ .vid = USB_VENDOR_##v, .pid = 0x0000, .lo_rev = 0x0000, .hi_rev = 0xffff, \
+ .quirks = { UQ_MATCH_VENDOR_ONLY, __VA_ARGS__ } \
+}
+
+/* Specific revision(s) */
+#define USB_QUIRK_REV(v, p, l, h, ...) { \
+ .vid = USB_VENDOR_##v, .pid = USB_PRODUCT_##v##_##p, .lo_rev = (l), \
+ .hi_rev = (h), .quirks = { __VA_ARGS__ } \
+}
static struct usb_quirk_entry usb_quirks[USB_DEV_QUIRKS_MAX] = {
- USB_QUIRK(ASUS, LCM, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(INSIDEOUT, EDGEPORT4, 0x094, 0x094, UQ_SWAP_UNICODE),
- USB_QUIRK(DALLAS, J6502, 0x0a2, 0x0a2, UQ_BAD_ADC),
- USB_QUIRK(DALLAS, J6502, 0x0a2, 0x0a2, UQ_AU_NO_XU),
- USB_QUIRK(ALTEC, ADA70, 0x103, 0x103, UQ_BAD_ADC),
- USB_QUIRK(ALTEC, ASC495, 0x000, 0x000, UQ_BAD_AUDIO),
- USB_QUIRK(QTRONIX, 980N, 0x110, 0x110, UQ_SPUR_BUT_UP),
- USB_QUIRK(ALCOR2, KBD_HUB, 0x001, 0x001, UQ_SPUR_BUT_UP),
- USB_QUIRK(MCT, HUB0100, 0x102, 0x102, UQ_BUS_POWERED),
- USB_QUIRK(MCT, USB232, 0x102, 0x102, UQ_BUS_POWERED),
- USB_QUIRK(TI, UTUSB41, 0x110, 0x110, UQ_POWER_CLAIM),
- USB_QUIRK(TELEX, MIC1, 0x009, 0x009, UQ_AU_NO_FRAC),
- USB_QUIRK(SILICONPORTALS, YAPPHONE, 0x100, 0x100, UQ_AU_INP_ASYNC),
- USB_QUIRK(LOGITECH, UN53B, 0x0000, 0xffff, UQ_NO_STRINGS),
- USB_QUIRK(LOGITECH, G510S, 0x0000, 0xFFFF, UQ_KBD_BOOTPROTO),
- USB_QUIRK(REALTEK, RTL8196EU, 0x0000, 0xffff, UQ_CFG_INDEX_1),
- USB_QUIRK(ELSA, MODEM1, 0x0000, 0xffff, UQ_CFG_INDEX_1),
- USB_QUIRK(PLANEX2, MZKUE150N, 0x0000, 0xffff, UQ_CFG_INDEX_1),
- USB_QUIRK(CISCOLINKSYS, USB3GIGV1, 0x0000, 0xffff, UQ_CFG_INDEX_1),
- USB_QUIRK(REALTEK, RTL8156, 0x0000, 0xffff, UQ_CFG_INDEX_2),
+ USB_QUIRK(ASUS, LCM, UQ_HID_IGNORE),
+ USB_QUIRK_REV(INSIDEOUT, EDGEPORT4, 0x094, 0x094, UQ_SWAP_UNICODE),
+ USB_QUIRK_REV(DALLAS, J6502, 0x0a2, 0x0a2, UQ_BAD_ADC),
+ USB_QUIRK_REV(DALLAS, J6502, 0x0a2, 0x0a2, UQ_AU_NO_XU),
+ USB_QUIRK_REV(ALTEC, ADA70, 0x103, 0x103, UQ_BAD_ADC),
+ USB_QUIRK_REV(ALTEC, ASC495, 0x000, 0x000, UQ_BAD_AUDIO),
+ USB_QUIRK_REV(QTRONIX, 980N, 0x110, 0x110, UQ_SPUR_BUT_UP),
+ USB_QUIRK_REV(ALCOR2, KBD_HUB, 0x001, 0x001, UQ_SPUR_BUT_UP),
+ USB_QUIRK_REV(MCT, HUB0100, 0x102, 0x102, UQ_BUS_POWERED),
+ USB_QUIRK_REV(MCT, USB232, 0x102, 0x102, UQ_BUS_POWERED),
+ USB_QUIRK_REV(TI, UTUSB41, 0x110, 0x110, UQ_POWER_CLAIM),
+ USB_QUIRK_REV(TELEX, MIC1, 0x009, 0x009, UQ_AU_NO_FRAC),
+ USB_QUIRK_REV(SILICONPORTALS, YAPPHONE, 0x100, 0x100, UQ_AU_INP_ASYNC),
+ USB_QUIRK(LOGITECH, UN53B, UQ_NO_STRINGS),
+ USB_QUIRK(LOGITECH, G510S, UQ_KBD_BOOTPROTO),
+ USB_QUIRK(REALTEK, RTL8196EU, UQ_CFG_INDEX_1),
+ USB_QUIRK(ELSA, MODEM1, UQ_CFG_INDEX_1),
+ USB_QUIRK(PLANEX2, MZKUE150N, UQ_CFG_INDEX_1),
+ USB_QUIRK(CISCOLINKSYS, USB3GIGV1, UQ_CFG_INDEX_1),
/* Quirks for printer devices */
- USB_QUIRK(HP, 895C, 0x0000, 0xffff, UQ_BROKEN_BIDIR),
- USB_QUIRK(HP, 880C, 0x0000, 0xffff, UQ_BROKEN_BIDIR),
- USB_QUIRK(HP, 815C, 0x0000, 0xffff, UQ_BROKEN_BIDIR),
- USB_QUIRK(HP, 810C, 0x0000, 0xffff, UQ_BROKEN_BIDIR),
- USB_QUIRK(HP, 830C, 0x0000, 0xffff, UQ_BROKEN_BIDIR),
- USB_QUIRK(HP, 1220C, 0x0000, 0xffff, UQ_BROKEN_BIDIR),
- USB_QUIRK(XEROX, WCM15, 0x0000, 0xffff, UQ_BROKEN_BIDIR),
+ USB_QUIRK(HP, 895C, UQ_BROKEN_BIDIR),
+ USB_QUIRK(HP, 880C, UQ_BROKEN_BIDIR),
+ USB_QUIRK(HP, 815C, UQ_BROKEN_BIDIR),
+ USB_QUIRK(HP, 810C, UQ_BROKEN_BIDIR),
+ USB_QUIRK(HP, 830C, UQ_BROKEN_BIDIR),
+ USB_QUIRK(HP, 1220C, UQ_BROKEN_BIDIR),
+ USB_QUIRK(XEROX, WCM15, UQ_BROKEN_BIDIR),
/* Devices which should be ignored by uhid */
- USB_QUIRK(APC, UPS, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(APC, UPS1000, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(BELKIN, F6H375USB, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(BELKIN, F6C550AVR, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(BELKIN, F6C1250TWRK, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(BELKIN, F6C1500TWRK, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(BELKIN, F6C900UNV, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(BELKIN, F6C100UNV, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(BELKIN, F6C120UNV, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(BELKIN, F6C800UNV, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(BELKIN, F6C1100UNV, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(CYBERPOWER, BC900D, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(CYBERPOWER, 1500CAVRLCD, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(CYBERPOWER, OR2200LCDRM2U, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(DELL2, VARIOUS_UPS, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(CYPRESS, SILVERSHIELD, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(DELORME, EARTHMATE, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(DREAMLINK, DL100B, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(MICROCHIP, PICOLCD20X2, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(MICROCHIP, PICOLCD4X20, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(LIEBERT, POWERSURE_PXT, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(LIEBERT2, PSI1000, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(LIEBERT2, POWERSURE_PSA, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(MGE, UPS1, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(MGE, UPS2, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(POWERCOM, IMPERIAL_SERIES, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(POWERCOM, SMART_KING_PRO, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(POWERCOM, WOW, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(POWERCOM, VANGUARD, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(POWERCOM, BLACK_KNIGHT_PRO, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(TRIPPLITE2, AVR550U, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(TRIPPLITE2, AVR750U, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(TRIPPLITE2, ECO550UPS, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(TRIPPLITE2, T750_INTL, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(TRIPPLITE2, RT_2200_INTL, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(TRIPPLITE2, OMNI1000LCD, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(TRIPPLITE2, OMNI900LCD, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(TRIPPLITE2, SMART_2200RMXL2U, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(TRIPPLITE2, UPS_3014, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(TRIPPLITE2, SU1500RTXL2UA, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(TRIPPLITE2, SU6000RT4U, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(TRIPPLITE2, SU1500RTXL2UA_2, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(APPLE, IPHONE, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(APPLE, IPHONE_3G, 0x0000, 0xffff, UQ_HID_IGNORE),
- USB_QUIRK(MEGATEC, UPS, 0x0000, 0xffff, UQ_HID_IGNORE),
+ USB_QUIRK(APC, UPS, UQ_HID_IGNORE),
+ USB_QUIRK(APC, UPS1000, UQ_HID_IGNORE),
+ USB_QUIRK(BELKIN, F6H375USB, UQ_HID_IGNORE),
+ USB_QUIRK(BELKIN, F6C550AVR, UQ_HID_IGNORE),
+ USB_QUIRK(BELKIN, F6C1250TWRK, UQ_HID_IGNORE),
+ USB_QUIRK(BELKIN, F6C1500TWRK, UQ_HID_IGNORE),
+ USB_QUIRK(BELKIN, F6C900UNV, UQ_HID_IGNORE),
+ USB_QUIRK(BELKIN, F6C100UNV, UQ_HID_IGNORE),
+ USB_QUIRK(BELKIN, F6C120UNV, UQ_HID_IGNORE),
+ USB_QUIRK(BELKIN, F6C800UNV, UQ_HID_IGNORE),
+ USB_QUIRK(BELKIN, F6C1100UNV, UQ_HID_IGNORE),
+ USB_QUIRK(CYBERPOWER, BC900D, UQ_HID_IGNORE),
+ USB_QUIRK(CYBERPOWER, 1500CAVRLCD, UQ_HID_IGNORE),
+ USB_QUIRK(CYBERPOWER, OR2200LCDRM2U, UQ_HID_IGNORE),
+ USB_QUIRK(DELL2, VARIOUS_UPS, UQ_HID_IGNORE),
+ USB_QUIRK(CYPRESS, SILVERSHIELD, UQ_HID_IGNORE),
+ USB_QUIRK(DELORME, EARTHMATE, UQ_HID_IGNORE),
+ USB_QUIRK(DREAMLINK, DL100B, UQ_HID_IGNORE),
+ USB_QUIRK(MICROCHIP, PICOLCD20X2, UQ_HID_IGNORE),
+ USB_QUIRK(MICROCHIP, PICOLCD4X20, UQ_HID_IGNORE),
+ USB_QUIRK(LIEBERT, POWERSURE_PXT, UQ_HID_IGNORE),
+ USB_QUIRK(LIEBERT2, PSI1000, UQ_HID_IGNORE),
+ USB_QUIRK(LIEBERT2, POWERSURE_PSA, UQ_HID_IGNORE),
+ USB_QUIRK(MGE, UPS1, UQ_HID_IGNORE),
+ USB_QUIRK(MGE, UPS2, UQ_HID_IGNORE),
+ USB_QUIRK(POWERCOM, IMPERIAL_SERIES, UQ_HID_IGNORE),
+ USB_QUIRK(POWERCOM, SMART_KING_PRO, UQ_HID_IGNORE),
+ USB_QUIRK(POWERCOM, WOW, UQ_HID_IGNORE),
+ USB_QUIRK(POWERCOM, VANGUARD, UQ_HID_IGNORE),
+ USB_QUIRK(POWERCOM, BLACK_KNIGHT_PRO, UQ_HID_IGNORE),
+ USB_QUIRK(TRIPPLITE2, AVR550U, UQ_HID_IGNORE),
+ USB_QUIRK(TRIPPLITE2, AVR750U, UQ_HID_IGNORE),
+ USB_QUIRK(TRIPPLITE2, ECO550UPS, UQ_HID_IGNORE),
+ USB_QUIRK(TRIPPLITE2, T750_INTL, UQ_HID_IGNORE),
+ USB_QUIRK(TRIPPLITE2, RT_2200_INTL, UQ_HID_IGNORE),
+ USB_QUIRK(TRIPPLITE2, OMNI1000LCD, UQ_HID_IGNORE),
+ USB_QUIRK(TRIPPLITE2, OMNI900LCD, UQ_HID_IGNORE),
+ USB_QUIRK(TRIPPLITE2, SMART_2200RMXL2U, UQ_HID_IGNORE),
+ USB_QUIRK(TRIPPLITE2, UPS_3014, UQ_HID_IGNORE),
+ USB_QUIRK(TRIPPLITE2, SU1500RTXL2UA, UQ_HID_IGNORE),
+ USB_QUIRK(TRIPPLITE2, SU6000RT4U, UQ_HID_IGNORE),
+ USB_QUIRK(TRIPPLITE2, SU1500RTXL2UA_2, UQ_HID_IGNORE),
+ USB_QUIRK(APPLE, IPHONE, UQ_HID_IGNORE),
+ USB_QUIRK(APPLE, IPHONE_3G, UQ_HID_IGNORE),
+ USB_QUIRK(MEGATEC, UPS, UQ_HID_IGNORE),
/* Devices which should be ignored by both ukbd and uhid */
- USB_QUIRK(CYPRESS, WISPY1A, 0x0000, 0xffff, UQ_KBD_IGNORE, UQ_HID_IGNORE),
- USB_QUIRK(METAGEEK, WISPY1B, 0x0000, 0xffff, UQ_KBD_IGNORE, UQ_HID_IGNORE),
- USB_QUIRK(METAGEEK, WISPY24X, 0x0000, 0xffff, UQ_KBD_IGNORE, UQ_HID_IGNORE),
- USB_QUIRK(METAGEEK2, WISPYDBX, 0x0000, 0xffff, UQ_KBD_IGNORE, UQ_HID_IGNORE),
- USB_QUIRK(TENX, UAUDIO0, 0x0101, 0x0101, UQ_AUDIO_SWAP_LR),
+ USB_QUIRK(CYPRESS, WISPY1A, UQ_KBD_IGNORE, UQ_HID_IGNORE),
+ USB_QUIRK(METAGEEK, WISPY1B, UQ_KBD_IGNORE, UQ_HID_IGNORE),
+ USB_QUIRK(METAGEEK, WISPY24X, UQ_KBD_IGNORE, UQ_HID_IGNORE),
+ USB_QUIRK(METAGEEK2, WISPYDBX, UQ_KBD_IGNORE, UQ_HID_IGNORE),
+ USB_QUIRK_REV(TENX, UAUDIO0, 0x0101, 0x0101, UQ_AUDIO_SWAP_LR),
/* MS keyboards do weird things */
- USB_QUIRK(MICROSOFT, NATURAL4000, 0x0000, 0xFFFF, UQ_KBD_BOOTPROTO),
- USB_QUIRK(MICROSOFT, WLINTELLIMOUSE, 0x0000, 0xffff, UQ_MS_LEADING_BYTE),
+ USB_QUIRK(MICROSOFT, NATURAL4000, UQ_KBD_BOOTPROTO),
+ USB_QUIRK(MICROSOFT, WLINTELLIMOUSE, UQ_MS_LEADING_BYTE),
/* Quirk for Corsair Vengeance K60 keyboard */
- USB_QUIRK(CORSAIR, K60, 0x0000, 0xffff, UQ_KBD_BOOTPROTO),
+ USB_QUIRK(CORSAIR, K60, UQ_KBD_BOOTPROTO),
/* Quirk for Corsair Gaming K68 keyboard */
- USB_QUIRK(CORSAIR, K68, 0x0000, 0xffff, UQ_KBD_BOOTPROTO),
+ USB_QUIRK(CORSAIR, K68, UQ_KBD_BOOTPROTO),
/* Quirk for Corsair Vengeance K70 keyboard */
- USB_QUIRK(CORSAIR, K70, 0x0000, 0xffff, UQ_KBD_BOOTPROTO),
+ USB_QUIRK(CORSAIR, K70, UQ_KBD_BOOTPROTO),
/* Quirk for Corsair K70 RGB keyboard */
- USB_QUIRK(CORSAIR, K70_RGB, 0x0000, 0xffff, UQ_KBD_BOOTPROTO),
+ USB_QUIRK(CORSAIR, K70_RGB, UQ_KBD_BOOTPROTO),
/* Quirk for Corsair STRAFE Gaming keyboard */
- USB_QUIRK(CORSAIR, STRAFE, 0x0000, 0xffff, UQ_KBD_BOOTPROTO),
- USB_QUIRK(CORSAIR, STRAFE2, 0x0000, 0xffff, UQ_KBD_BOOTPROTO),
+ USB_QUIRK(CORSAIR, STRAFE, UQ_KBD_BOOTPROTO),
+ USB_QUIRK(CORSAIR, STRAFE2, UQ_KBD_BOOTPROTO),
/* Quirk for Kensington Slimblade Trackball */
- USB_QUIRK(KENSINGTON, SLIMBLADE, 0x0000, 0xffff, UQ_MS_VENDOR_BTN),
+ USB_QUIRK(KENSINGTON, SLIMBLADE, UQ_MS_VENDOR_BTN),
/* umodem(4) device quirks */
- USB_QUIRK(METRICOM, RICOCHET_GS, 0x100, 0x100, UQ_ASSUME_CM_OVER_DATA),
- USB_QUIRK(SANYO, SCP4900, 0x000, 0x000, UQ_ASSUME_CM_OVER_DATA),
- USB_QUIRK(MOTOROLA2, T720C, 0x001, 0x001, UQ_ASSUME_CM_OVER_DATA),
- USB_QUIRK(EICON, DIVA852, 0x100, 0x100, UQ_ASSUME_CM_OVER_DATA),
- USB_QUIRK(SIEMENS2, ES75, 0x000, 0x000, UQ_ASSUME_CM_OVER_DATA),
- USB_QUIRK(QUALCOMM, CDMA_MSM, 0x0000, 0xffff, UQ_ASSUME_CM_OVER_DATA),
- USB_QUIRK(QUALCOMM2, CDMA_MSM, 0x0000, 0xffff, UQ_ASSUME_CM_OVER_DATA),
- USB_QUIRK(CURITEL, UM150, 0x0000, 0xffff, UQ_ASSUME_CM_OVER_DATA),
- USB_QUIRK(CURITEL, UM175, 0x0000, 0xffff, UQ_ASSUME_CM_OVER_DATA),
- USB_QUIRK(VERTEX, VW110L, 0x0000, 0xffff, UQ_ASSUME_CM_OVER_DATA),
- USB_QUIRK(BALTECH, SMARTCARDREADER, 0x0000, 0xffff, UQ_IGNORE_CDC_CM),
+ USB_QUIRK_REV(METRICOM, RICOCHET_GS, 0x100, 0x100, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK_REV(SANYO, SCP4900, 0x000, 0x000, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK_REV(MOTOROLA2, T720C, 0x001, 0x001, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK_REV(EICON, DIVA852, 0x100, 0x100, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK_REV(SIEMENS2, ES75, 0x000, 0x000, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK(QUALCOMM, CDMA_MSM, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK(QUALCOMM2, CDMA_MSM, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK(CURITEL, UM150, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK(CURITEL, UM175, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK(VERTEX, VW110L, UQ_ASSUME_CM_OVER_DATA),
+ USB_QUIRK(BALTECH, SMARTCARDREADER, UQ_IGNORE_CDC_CM),
/* USB Mass Storage Class Quirks */
- USB_QUIRK_VP(USB_VENDOR_ASAHIOPTICAL, 0, UQ_MSC_NO_RS_CLEAR_UA,
- UQ_MATCH_VENDOR_ONLY),
- USB_QUIRK(ADDON, ATTACHE, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK_VO(ASAHIOPTICAL, UQ_MSC_NO_RS_CLEAR_UA),
+ USB_QUIRK(ADDON, ATTACHE, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(ADDON, A256MB, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(ADDON, A256MB, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(ADDON, DISKPRO512, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(ADDON, DISKPRO512, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(ADDONICS2, CABLE_205, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(ADDONICS2, CABLE_205, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(AIPTEK, POCKETCAM3M, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(AIPTEK, POCKETCAM3M, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(ALCOR, UMCR_9361, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(ALCOR, UMCR_9361, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(ALCOR, TRANSCEND, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN,
- UQ_MSC_NO_SYNC_CACHE, UQ_MSC_NO_TEST_UNIT_READY),
- USB_QUIRK(APACER, HT202, 0x0000, 0xffff, UQ_MSC_NO_TEST_UNIT_READY,
- UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(ASAHIOPTICAL, OPTIO230, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(APACER, HT202, UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(ASAHIOPTICAL, OPTIO230, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(ASAHIOPTICAL, OPTIO330, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(ASAHIOPTICAL, OPTIO330, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(ATP, EUSB, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(BELKIN, USB2SCSI, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(CASIO, QV_DIGICAM, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ USB_QUIRK(ATP, EUSB, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(BELKIN, USB2SCSI, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(CASIO, QV_DIGICAM, UQ_MSC_FORCE_WIRE_CBI,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(CCYU, ED1064, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(CENTURY, EX35QUAT, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
- UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(CREATIVE, NOMAD, 0x0001, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(CCYU, ED1064, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(CENTURY, EX35QUAT, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_FORCE_SHORT_INQ, UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK_REV(CREATIVE, NOMAD, 0x0001, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_READ_CAP_OFFBY1),
- USB_QUIRK(CYPRESS, XX6830XX, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN,
- UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(EMTEC, DANEELEC4GB, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(DESKNOTE, UCR_61S2B, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(CREATIVE, STAGE_SE_MINI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(CYPRESS, XX6830XX, UQ_MSC_NO_GETMAXLUN, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(EMTEC, DANEELEC4GB, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(DESKNOTE, UCR_61S2B, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(DMI, CFSM_RW, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI,
- UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(EMTEC, RUF2PS, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(EPSON, STYLUS_875DC, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ USB_QUIRK(DMI, CFSM_RW, UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(EMTEC, RUF2PS, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(EPSON, STYLUS_875DC, UQ_MSC_FORCE_WIRE_CBI,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(EPSON, STYLUS_895, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(EPSON, STYLUS_895, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(FEIYA, 5IN1, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(FEIYA, ELANGO, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(FREECOM, DVD, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(FUJIPHOTO, MASS0100, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI_I,
+ USB_QUIRK(FEIYA, 5IN1, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(FEIYA, ELANGO, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(FREECOM, DVD, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(FUJIPHOTO, MASS0100, UQ_MSC_FORCE_WIRE_CBI_I,
UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_RS_CLEAR_UA, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(GARMIN, DAKOTA20, 0x0000, 0xffff, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(GARMIN, FORERUNNER230, 0x0000, 0xffff, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(GARMIN, GPSMAP62S, 0x0000, 0xffff, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(GARMIN, EDGETOURINGPLUS, 0x0000, 0xffff, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(GARMIN, INSTINCTSOLAR, 0x0000, 0xffff, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(GENESYS, GL641USB2IDE, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
- UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(GENESYS, GL641USB2IDE_2, 0x0000, 0xffff,
- UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_ATAPI,
- UQ_MSC_FORCE_SHORT_INQ, UQ_MSC_NO_START_STOP,
- UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(GENESYS, GL641USB, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
+ USB_QUIRK(GARMIN, DAKOTA20, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(GARMIN, FORERUNNER230, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(GARMIN, GPSMAP62S, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(GARMIN, EDGETOURINGPLUS, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(GARMIN, INSTINCTSOLAR, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(GENESYS, GL641USB2IDE, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ, UQ_MSC_NO_START_STOP,
+ UQ_MSC_IGNORE_RESIDUE, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(GENESYS, GL641USB2IDE_2, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_FORCE_SHORT_INQ,
UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(GENESYS, GL641USB_2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(GENESYS, GL641USB, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_FORCE_SHORT_INQ, UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(GENESYS, GL641USB_2, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_WRONG_CSWSIG),
- USB_QUIRK(GENESYS, GL3220, 0x0000, 0xffff, UQ_MSC_NO_INQUIRY,
- UQ_MSC_NO_RS_CLEAR_UA),
- USB_QUIRK(HAGIWARA, FG, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(HAGIWARA, FGSM, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(GENESYS, GL3220, UQ_MSC_NO_INQUIRY, UQ_MSC_NO_RS_CLEAR_UA),
+ USB_QUIRK(HAGIWARA, FG, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(HAGIWARA, FGSM, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(HITACHI, DVDCAM_DZ_MV100A, UQ_MSC_FORCE_WIRE_CBI,
UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(HITACHI, DVDCAM_DZ_MV100A, 0x0000, 0xffff,
- UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_SCSI,
- UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(HITACHI, DVDCAM_USB, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI_I,
+ USB_QUIRK(HITACHI, DVDCAM_USB, UQ_MSC_FORCE_WIRE_CBI_I,
UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(HP, CDW4E, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_ATAPI),
- USB_QUIRK(HP, CDW8200, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI_I,
- UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_TEST_UNIT_READY,
- UQ_MSC_NO_START_STOP),
- USB_QUIRK(HUAWEI, E3372_INIT, 0, 0xffff, UQ_MSC_NO_INQUIRY,
- UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(IMAGINATION, DBX1, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(HP, CDW4E, UQ_MSC_FORCE_PROTO_ATAPI),
+ USB_QUIRK(HP, CDW8200, UQ_MSC_FORCE_WIRE_CBI_I, UQ_MSC_FORCE_PROTO_ATAPI,
+ UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_START_STOP),
+ USB_QUIRK(HUAWEI, E3372_INIT, UQ_MSC_NO_INQUIRY, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(IMAGINATION, DBX1, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_WRONG_CSWSIG),
- USB_QUIRK(INSYSTEM, USBCABLE, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_TEST_UNIT_READY,
- UQ_MSC_NO_START_STOP, UQ_MSC_ALT_IFACE_1),
- USB_QUIRK(INSYSTEM, ATAPI, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ USB_QUIRK(INSYSTEM, USBCABLE, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_ATAPI,
+ UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_START_STOP, UQ_MSC_ALT_IFACE_1),
+ USB_QUIRK(INSYSTEM, ATAPI, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_RBC),
+ USB_QUIRK(INSYSTEM, STORAGE_V2, UQ_MSC_FORCE_WIRE_CBI,
UQ_MSC_FORCE_PROTO_RBC),
- USB_QUIRK(INSYSTEM, STORAGE_V2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_RBC),
- USB_QUIRK(VIALABS, VL701, 0x0000, 0xffff, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(IODATA, IU_CD2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(IODATA, DVR_UEH8, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(IOMEGA, ZIP100, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI,
+ USB_QUIRK(VIALABS, VL701, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(IODATA, IU_CD2, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(IODATA, DVR_UEH8, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(IOMEGA, ZIP100, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
UQ_MSC_NO_TEST_UNIT_READY), /* XXX ZIP drives can also use ATAPI */
- USB_QUIRK(JMICRON, JMS566, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(JMICRON, JMS567, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(JMICRON, JM20337, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI,
+ USB_QUIRK(JMICRON, JMS566, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(JMICRON, JMS567, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(JMICRON, JM20337, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(KINGSTON, HYPERX3_0, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(KINGSTON, DATATRAVELER3_0, UQ_MSC_NO_PREVENT_ALLOW,
UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(KINGSTON, HYPERX3_0, 0x0000, 0xffff, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(KINGSTON, DATATRAVELER3_0, 0x0000, 0xffff, UQ_MSC_NO_PREVENT_ALLOW, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(KYOCERA, FINECAM_L3, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(KYOCERA, FINECAM_L3, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(KYOCERA, FINECAM_S3X, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ USB_QUIRK(KYOCERA, FINECAM_S3X, UQ_MSC_FORCE_WIRE_CBI,
UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(KYOCERA, FINECAM_S4, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ USB_QUIRK(KYOCERA, FINECAM_S4, UQ_MSC_FORCE_WIRE_CBI,
UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(KYOCERA, FINECAM_S5, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(KYOCERA, FINECAM_S5, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(LACIE, HD, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_RBC),
- USB_QUIRK(LEXAR, CF_READER, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(LACIE, HD, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_RBC),
+ USB_QUIRK(LEXAR, CF_READER, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(LEXAR, JUMPSHOT, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(LEXAR, JUMPDRIVE, 0x0000, 0xffff, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(LOGITEC, LDR_H443SU2, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(LOGITEC, LDR_H443U2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI,),
- USB_QUIRK(MELCO, DUBPXXG, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
- UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(MICROTECH, DPCM, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_TEST_UNIT_READY,
- UQ_MSC_NO_START_STOP),
- USB_QUIRK(MICRON, REALSSD, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(MICROTECH, SCSIDB25, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(LEXAR, JUMPSHOT, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(LEXAR, JUMPDRIVE, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(LOGITEC, LDR_H443SU2, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(LOGITEC, LDR_H443U2, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(MICROTECH, SCSIHD50, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(MELCO, DUBPXXG, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_FORCE_SHORT_INQ, UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(MICROTECH, DPCM, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_START_STOP),
+ USB_QUIRK(MICRON, REALSSD, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(MICROTECH, SCSIDB25, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(MINOLTA, E223, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(MINOLTA, F300, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(MICROTECH, SCSIHD50, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(MITSUMI, CDRRW, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI |
- UQ_MSC_FORCE_PROTO_ATAPI),
- USB_QUIRK(MOTOROLA2, E398, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
- UQ_MSC_NO_INQUIRY_EVPD, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK_VP(USB_VENDOR_MPMAN, 0, UQ_MSC_NO_SYNC_CACHE,
- UQ_MATCH_VENDOR_ONLY),
- USB_QUIRK(MSYSTEMS, DISKONKEY, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(MINOLTA, E223, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(MINOLTA, F300, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(MITSUMI, CDRRW, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_ATAPI),
+ USB_QUIRK(MOTOROLA2, E398, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_FORCE_SHORT_INQ, UQ_MSC_NO_INQUIRY_EVPD, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK_VO(MPMAN, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(MSYSTEMS, DISKONKEY, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE, UQ_MSC_NO_GETMAXLUN,
UQ_MSC_NO_RS_CLEAR_UA),
- USB_QUIRK(MSYSTEMS, DISKONKEY2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(MSYSTEMS, DISKONKEY2, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_ATAPI),
- USB_QUIRK(MYSON, HEDEN, 0x0000, 0xffff, UQ_MSC_IGNORE_RESIDUE,
- UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(NEODIO, ND3260, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(MYSON, HEDEN, UQ_MSC_IGNORE_RESIDUE, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(NEODIO, ND3260, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ),
- USB_QUIRK(NETAC, CF_CARD, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(NETAC, CF_CARD, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(NETAC, ONLYDISK, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(NETAC, ONLYDISK, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(NETCHIP, CLIK_40, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_ATAPI,
- UQ_MSC_NO_INQUIRY),
- USB_QUIRK(NETCHIP, POCKETBOOK, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(NIKON, D300, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(NORELSYS, NS1081, 0x0000, 0xffff, UQ_MSC_NO_RS_CLEAR_UA,
- UQ_MSC_NO_INQUIRY),
- USB_QUIRK(OLYMPUS, C1, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_WRONG_CSWSIG),
- USB_QUIRK(OLYMPUS, C700, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(NETCHIP, CLIK_40, UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(NETCHIP, POCKETBOOK, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(NIKON, D300, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(NORELSYS, NS1081, UQ_MSC_NO_RS_CLEAR_UA, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(OLYMPUS, C1, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_WRONG_CSWSIG),
+ USB_QUIRK(OLYMPUS, C700, UQ_MSC_NO_GETMAXLUN),
/* Selected Olympus DSLR and EVIL models. See ../usbdevs for more
* details.
*
* Not all quirks apply to all models. The commented-out entries are
* correct for that model.
*/
- USB_QUIRK(OLYMPUS, E_1, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN,
- UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_PREVENT_ALLOW,
- UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(OLYMPUS, E_1, UQ_MSC_NO_GETMAXLUN, UQ_MSC_NO_TEST_UNIT_READY,
+ UQ_MSC_NO_PREVENT_ALLOW, UQ_MSC_NO_SYNC_CACHE),
/*
* Product code 0x118.
- * USB_QUIRK(OLYMPUS, E_300, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN,
+ * USB_QUIRK(OLYMPUS, E_300, UQ_MSC_NO_GETMAXLUN,
* UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_PREVENT_ALLOW,
* UQ_MSC_NO_SYNC_CACHE),
- * USB_QUIRK(OLYMPUS, E_30, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN,
+ * USB_QUIRK(OLYMPUS, E_30, UQ_MSC_NO_GETMAXLUN,
* UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_PREVENT_ALLOW,
* UQ_MSC_NO_SYNC_CACHE), */
- USB_QUIRK(OLYMPUS, E_330, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN,
- UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_PREVENT_ALLOW,
- UQ_MSC_NO_SYNC_CACHE, UQ_MSC_NO_START_STOP),
- USB_QUIRK(OLYMPUS, E_PM1, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN,
- UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_PREVENT_ALLOW,
- UQ_MSC_NO_SYNC_CACHE, UQ_MSC_NO_START_STOP),
+ USB_QUIRK(OLYMPUS, E_330, UQ_MSC_NO_GETMAXLUN, UQ_MSC_NO_TEST_UNIT_READY,
+ UQ_MSC_NO_PREVENT_ALLOW, UQ_MSC_NO_SYNC_CACHE, UQ_MSC_NO_START_STOP),
+ USB_QUIRK(OLYMPUS, E_PM1, UQ_MSC_NO_GETMAXLUN, UQ_MSC_NO_TEST_UNIT_READY,
+ UQ_MSC_NO_PREVENT_ALLOW, UQ_MSC_NO_SYNC_CACHE, UQ_MSC_NO_START_STOP),
/* Product code 0x12e.
- * USB_QUIRK(OLYMPUS, E_PM2, 0x0000, 0xffff, 0),
- * USB_QUIRK(OLYMPUS, E_M1MarkII, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN,
+ * USB_QUIRK(OLYMPUS, E_PM2, 0),
+ * USB_QUIRK(OLYMPUS, E_M1MarkII, UQ_MSC_NO_GETMAXLUN,
* UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_PREVENT_ALLOW,
* UQ_MSC_NO_SYNC_CACHE),
- * USB_QUIRK(OLYMPUS, E_M5MarkIII, 0x0000, 0xffff, 0),
+ * USB_QUIRK(OLYMPUS, E_M5MarkIII, 0),
*/
- USB_QUIRK(OLYMPUS, E_M1, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN,
- UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_PREVENT_ALLOW,
- UQ_MSC_NO_SYNC_CACHE, UQ_MSC_NO_START_STOP),
- USB_QUIRK(ONSPEC, SDS_HOTFIND_D, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(OLYMPUS, E_M1, UQ_MSC_NO_GETMAXLUN, UQ_MSC_NO_TEST_UNIT_READY,
+ UQ_MSC_NO_PREVENT_ALLOW, UQ_MSC_NO_SYNC_CACHE, UQ_MSC_NO_START_STOP),
+ USB_QUIRK(ONSPEC, SDS_HOTFIND_D, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(ONSPEC, CFMS_RW, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(ONSPEC, CFSM_COMBO, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(ONSPEC, CFSM_READER, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(ONSPEC, CFSM_READER2, 0x0000, 0xffff,
- UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(ONSPEC, MDCFE_B_CF_READER, 0x0000, 0xffff,
- UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(ONSPEC, MDSM_B_READER, 0x0000, 0xffff,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(ONSPEC, READER, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(ONSPEC, UCF100, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(ONSPEC, CFMS_RW, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(ONSPEC, CFSM_COMBO, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(ONSPEC, CFSM_READER, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(ONSPEC, CFSM_READER2, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(ONSPEC, MDCFE_B_CF_READER, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(ONSPEC, MDSM_B_READER, UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(ONSPEC, READER, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(ONSPEC, UCF100, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_INQUIRY, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(ONSPEC2, IMAGEMATE_SDDR55, 0x0000, 0xffff,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(PANASONIC, KXL840AN, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(ONSPEC2, IMAGEMATE_SDDR55, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(PANASONIC, KXL840AN, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(PANASONIC, KXLCB20AN, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(PANASONIC, KXLCB20AN, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(PANASONIC, KXLCB35AN, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(PANASONIC, KXLCB35AN, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(PANASONIC, LS120CAM, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_UFI),
- USB_QUIRK(PLEXTOR, 40_12_40U, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(PANASONIC, LS120CAM, UQ_MSC_FORCE_PROTO_UFI),
+ USB_QUIRK(PLEXTOR, 40_12_40U, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_TEST_UNIT_READY),
- USB_QUIRK(PNY, ATTACHE2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE,
- UQ_MSC_NO_START_STOP),
- USB_QUIRK(PROLIFIC, PL2506, 0x0000, 0xffff,
- UQ_MSC_NO_SYNC_CACHE, UQ_MSC_NO_PREVENT_ALLOW),
- USB_QUIRK_VP(USB_VENDOR_SAMSUNG_TECHWIN,
- USB_PRODUCT_SAMSUNG_TECHWIN_DIGIMAX_410, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(PNY, ATTACHE2, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_IGNORE_RESIDUE, UQ_MSC_NO_START_STOP),
+ USB_QUIRK(PROLIFIC, PL2506, UQ_MSC_NO_SYNC_CACHE, UQ_MSC_NO_PREVENT_ALLOW),
+ USB_QUIRK(SAMSUNG_TECHWIN, DIGIMAX_410, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(SANDISK, SDDR05A, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_READ_CAP_OFFBY1,
- UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(SANDISK, SDDR09, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI,
+ USB_QUIRK(SANDISK, SDDR05A, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_READ_CAP_OFFBY1),
+ USB_QUIRK(SANDISK, SDDR09, UQ_MSC_FORCE_PROTO_SCSI,
UQ_MSC_READ_CAP_OFFBY1, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(SANDISK, SDDR12, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_READ_CAP_OFFBY1,
- UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(SANDISK, SDCZ2_128, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE,
- UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(SANDISK, SDCZ2_256, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(SANDISK, SDDR12, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_READ_CAP_OFFBY1),
+ USB_QUIRK(SANDISK, SDCZ2_128, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(SANDISK, SDCZ2_256, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(SANDISK, SDCZ4_128, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(SANDISK, SDCZ4_128, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(SANDISK, SDCZ4_256, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(SANDISK, SDCZ4_256, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(SANDISK, SDCZ48_32, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE,
+ USB_QUIRK(SANDISK, SDCZ48_32, UQ_MSC_NO_SYNC_CACHE,
UQ_MSC_NO_TEST_UNIT_READY),
- USB_QUIRK(SANDISK, SDDR31, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(SANDISK, SDDR31, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_READ_CAP_OFFBY1),
- USB_QUIRK(SANDISK, IMAGEMATE_SDDR289, 0x0000, 0xffff,
- UQ_MSC_NO_SYNC_CACHE, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(SCANLOGIC, SL11R, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(SANDISK, IMAGEMATE_SDDR289, UQ_MSC_NO_SYNC_CACHE,
+ UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(SCANLOGIC, SL11R, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(SHUTTLE, EUSB, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI_I,
- UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_NO_TEST_UNIT_READY,
- UQ_MSC_NO_START_STOP, UQ_MSC_SHUTTLE_INIT),
- USB_QUIRK(SHUTTLE, CDRW, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_ATAPI),
- USB_QUIRK(SHUTTLE, CF, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
+ USB_QUIRK(SHUTTLE, EUSB, UQ_MSC_FORCE_WIRE_CBI_I, UQ_MSC_FORCE_PROTO_ATAPI,
+ UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_START_STOP, UQ_MSC_SHUTTLE_INIT),
+ USB_QUIRK(SHUTTLE, CDRW, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_ATAPI),
+ USB_QUIRK(SHUTTLE, CF, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_ATAPI),
+ USB_QUIRK(SHUTTLE, EUSBATAPI, UQ_MSC_FORCE_WIRE_CBI,
UQ_MSC_FORCE_PROTO_ATAPI),
- USB_QUIRK(SHUTTLE, EUSBATAPI, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_ATAPI),
- USB_QUIRK(SHUTTLE, EUSBCFSM, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(SHUTTLE, EUSCSI, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(SHUTTLE, EUSBCFSM, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(SHUTTLE, EUSCSI, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(SHUTTLE, HIFD, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(SHUTTLE, SDDR09, UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(SHUTTLE, ZIOMMC, UQ_MSC_FORCE_WIRE_CBI,
UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(SHUTTLE, HIFD, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(SHUTTLE, SDDR09, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI,
- UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(SHUTTLE, ZIOMMC, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(SIGMATEL, I_BEAD100, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(SIGMATEL, I_BEAD100, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_SHUTTLE_INIT),
- USB_QUIRK(SIIG, WINTERREADER, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(SIIG, WINTERREADER, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(SKANHEX, MD_7425, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(SKANHEX, MD_7425, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(SKANHEX, SX_520Z, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(SKANHEX, SX_520Z, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(SONY, HANDYCAM, 0x0500, 0x0500, UQ_MSC_FORCE_WIRE_CBI,
+ USB_QUIRK_REV(SONY, HANDYCAM, 0x0500, 0x0500, UQ_MSC_FORCE_WIRE_CBI,
UQ_MSC_FORCE_PROTO_RBC, UQ_MSC_RBC_PAD_TO_12),
- USB_QUIRK(SONY, CLIE_40_MS, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(SONY, CLIE_40_MS, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(SONY, DSC, 0x0500, 0x0500, UQ_MSC_FORCE_WIRE_CBI,
+ USB_QUIRK_REV(SONY, DSC, 0x0500, 0x0500, UQ_MSC_FORCE_WIRE_CBI,
UQ_MSC_FORCE_PROTO_RBC, UQ_MSC_RBC_PAD_TO_12),
- USB_QUIRK(SONY, DSC, 0x0600, 0x0600, UQ_MSC_FORCE_WIRE_CBI,
+ USB_QUIRK_REV(SONY, DSC, 0x0600, 0x0600, UQ_MSC_FORCE_WIRE_CBI,
UQ_MSC_FORCE_PROTO_RBC, UQ_MSC_RBC_PAD_TO_12),
- USB_QUIRK(SONY, DSC, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_RBC),
- USB_QUIRK(SONY, HANDYCAM, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_RBC),
- USB_QUIRK(SONY, MSC, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_RBC),
- USB_QUIRK(SONY, MS_MSC_U03, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_UFI, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(SONY, MS_NW_MS7, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(SONY, DSC, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_RBC),
+ USB_QUIRK(SONY, HANDYCAM, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_RBC),
+ USB_QUIRK(SONY, MSC, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_RBC),
+ USB_QUIRK(SONY, MS_MSC_U03, UQ_MSC_FORCE_WIRE_CBI,
+ UQ_MSC_FORCE_PROTO_UFI),
+ USB_QUIRK(SONY, MS_NW_MS7, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(SONY, MS_PEG_N760C, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(SONY, MS_PEG_N760C, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(SONY, MSACUS1, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(SONY, PORTABLE_HDD_V2, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(SONY, MSACUS1, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_NO_GETMAXLUN),
+ USB_QUIRK(SONY, PORTABLE_HDD_V2, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(STMICRO, ST72682, 0x0000, 0xffff, UQ_MSC_NO_PREVENT_ALLOW),
- USB_QUIRK(SUPERTOP, IDE, 0x0000, 0xffff, UQ_MSC_IGNORE_RESIDUE,
- UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(SUPERTOP, FLASHDRIVE, 0x0000, 0xffff, UQ_MSC_NO_TEST_UNIT_READY,
+ USB_QUIRK(STMICRO, ST72682, UQ_MSC_NO_PREVENT_ALLOW),
+ USB_QUIRK(SUPERTOP, IDE, UQ_MSC_IGNORE_RESIDUE, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(SUPERTOP, FLASHDRIVE, UQ_MSC_NO_TEST_UNIT_READY,
UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(TAUGA, CAMERAMATE, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(TEAC, FD05PUB, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_UFI),
- USB_QUIRK(TECLAST, TLC300, 0x0000, 0xffff, UQ_MSC_NO_TEST_UNIT_READY,
- UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(TREK, MEMKEY, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(TREK, THUMBDRIVE_8MB, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(TAUGA, CAMERAMATE, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(TEAC, FD05PUB, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_UFI),
+ USB_QUIRK(TECLAST, TLC300, UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(TREK, MEMKEY, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(TREK, THUMBDRIVE_8MB, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(TRUMPION, C3310, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_UFI),
- USB_QUIRK(TRUMPION, MP3, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_RBC),
- USB_QUIRK(TRUMPION, T33520, 0x0000, 0xffff, UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(TWINMOS, MDIV, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI),
- USB_QUIRK(VIA, USB2IDEBRIDGE, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
+ USB_QUIRK(TRUMPION, C3310, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_UFI),
+ USB_QUIRK(TRUMPION, MP3, UQ_MSC_FORCE_PROTO_RBC),
+ USB_QUIRK(TRUMPION, T33520, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(TWINMOS, MDIV, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(VIA, USB2IDEBRIDGE, UQ_MSC_FORCE_WIRE_BBB,
UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(VIVITAR, 35XX, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(WESTERN, COMBO, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
- UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(WESTERN, EXTHDD, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
- UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(WESTERN, MYBOOK, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY_EVPD,
- UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORT_00, 0x0000, 0xffff, UQ_MSC_FORCE_SHORT_INQ),
- USB_QUIRK(WESTERN, MYPASSPORT_01, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORT_02, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORT_03, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORT_04, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORT_05, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORT_06, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORT_07, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORT_08, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORT_09, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORT_10, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORT_11, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORTES_00, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORTES_01, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORTES_02, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORTES_03, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORTES_04, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORTES_05, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORTES_06, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORTES_07, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORTES_08, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORTES_09, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(WESTERN, MYPASSPORTUL_00, 0x0000, 0xffff, UQ_MSC_NO_TEST_UNIT_READY),
- USB_QUIRK(WINMAXGROUP, FLASH64MC, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(YANO, FW800HD, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_FORCE_SHORT_INQ,
- UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
- USB_QUIRK(YANO, U640MO, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI_I,
+ USB_QUIRK(VIVITAR, 35XX, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_NO_INQUIRY),
+ USB_QUIRK(WESTERN, COMBO, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_FORCE_SHORT_INQ, UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(WESTERN, EXTHDD, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_FORCE_SHORT_INQ, UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(WESTERN, MYBOOK, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_NO_INQUIRY_EVPD, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORT_00, UQ_MSC_FORCE_SHORT_INQ),
+ USB_QUIRK(WESTERN, MYPASSPORT_01, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORT_02, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORT_03, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORT_04, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORT_05, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORT_06, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORT_07, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORT_08, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORT_09, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORT_10, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORT_11, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORTES_00, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORTES_01, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORTES_02, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORTES_03, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORTES_04, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORTES_05, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORTES_06, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORTES_07, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORTES_08, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORTES_09, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(WESTERN, MYPASSPORTUL_00, UQ_MSC_NO_TEST_UNIT_READY),
+ USB_QUIRK(WINMAXGROUP, FLASH64MC, UQ_MSC_FORCE_WIRE_BBB,
+ UQ_MSC_NO_INQUIRY, UQ_MSC_FORCE_PROTO_SCSI),
+ USB_QUIRK(YANO, FW800HD, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_FORCE_SHORT_INQ, UQ_MSC_NO_START_STOP, UQ_MSC_IGNORE_RESIDUE),
+ USB_QUIRK(YANO, U640MO, UQ_MSC_FORCE_WIRE_CBI_I,
UQ_MSC_FORCE_PROTO_ATAPI, UQ_MSC_FORCE_SHORT_INQ),
- USB_QUIRK(YEDATA, FLASHBUSTERU, 0x0000, 0x007F, UQ_MSC_FORCE_WIRE_CBI,
+ USB_QUIRK_REV(YEDATA, FLASHBUSTERU, 0x0000, 0x007F, UQ_MSC_FORCE_WIRE_CBI,
UQ_MSC_FORCE_PROTO_UFI, UQ_MSC_NO_RS_CLEAR_UA, UQ_MSC_FLOPPY_SPEED,
- UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(YEDATA, FLASHBUSTERU, 0x0080, 0x0080, UQ_MSC_FORCE_WIRE_CBI_I,
- UQ_MSC_FORCE_PROTO_UFI, UQ_MSC_NO_RS_CLEAR_UA, UQ_MSC_FLOPPY_SPEED,
- UQ_MSC_NO_TEST_UNIT_READY, UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(YEDATA, FLASHBUSTERU, 0x0081, 0xFFFF, UQ_MSC_FORCE_WIRE_CBI_I,
- UQ_MSC_FORCE_PROTO_UFI, UQ_MSC_NO_RS_CLEAR_UA, UQ_MSC_FLOPPY_SPEED,
- UQ_MSC_NO_GETMAXLUN),
- USB_QUIRK(ZORAN, EX20DSC, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_CBI,
- UQ_MSC_FORCE_PROTO_ATAPI),
- USB_QUIRK(MEIZU, M6_SL, 0x0000, 0xffff, UQ_MSC_FORCE_WIRE_BBB,
- UQ_MSC_FORCE_PROTO_SCSI, UQ_MSC_NO_INQUIRY, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(TOSHIBA, TRANSMEMORY, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE,
+ UQ_MSC_NO_TEST_UNIT_READY),
+ USB_QUIRK_REV(YEDATA, FLASHBUSTERU, 0x0080, 0x0080,
+ UQ_MSC_FORCE_WIRE_CBI_I, UQ_MSC_FORCE_PROTO_UFI,
+ UQ_MSC_NO_RS_CLEAR_UA, UQ_MSC_FLOPPY_SPEED,
+ UQ_MSC_NO_TEST_UNIT_READY),
+ USB_QUIRK_REV(YEDATA, FLASHBUSTERU, 0x0081, 0xFFFF,
+ UQ_MSC_FORCE_WIRE_CBI_I, UQ_MSC_FORCE_PROTO_UFI,
+ UQ_MSC_NO_RS_CLEAR_UA, UQ_MSC_FLOPPY_SPEED),
+ USB_QUIRK(ZORAN, EX20DSC, UQ_MSC_FORCE_WIRE_CBI, UQ_MSC_FORCE_PROTO_ATAPI),
+ USB_QUIRK(MEIZU, M6_SL, UQ_MSC_FORCE_WIRE_BBB, UQ_MSC_FORCE_PROTO_SCSI,
+ UQ_MSC_NO_INQUIRY, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(TOSHIBA, TRANSMEMORY, UQ_MSC_NO_SYNC_CACHE,
UQ_MSC_NO_PREVENT_ALLOW),
- USB_QUIRK(VIALABS, USB30SATABRIDGE, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE),
- USB_QUIRK(QUALCOMMINC, ZTE_MF730M, 0x0000, 0xffff, UQ_MSC_NO_GETMAXLUN,
+ USB_QUIRK(VIALABS, USB30SATABRIDGE, UQ_MSC_NO_SYNC_CACHE),
+ USB_QUIRK(QUALCOMMINC, ZTE_MF730M, UQ_MSC_NO_GETMAXLUN,
UQ_MSC_NO_INQUIRY, UQ_CFG_INDEX_0),
- USB_QUIRK(SMART2, G2MEMKEY, 0x0000, 0xffff, UQ_MSC_NO_INQUIRY),
- USB_QUIRK(RALINK, RT_STOR, 0x0001, 0x0001, UQ_MSC_IGNORE),
- USB_QUIRK(REALTEK, RTW8821CU_CD, 0x0001, 0x0001, UQ_MSC_IGNORE),
+ USB_QUIRK(SMART2, G2MEMKEY, UQ_MSC_NO_INQUIRY),
+ USB_QUIRK_REV(RALINK, RT_STOR, 0x0001, 0x0001, UQ_MSC_IGNORE),
+ USB_QUIRK(REALTEK, RTW8821CU_CD, UQ_MSC_IGNORE),
/* Non-standard USB MIDI devices */
- USB_QUIRK(ROLAND, UM1, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, SC8850, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, SD90, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, UM880N, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, UA100, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, UM4, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, U8, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, UM2, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, SC8820, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, PC300, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, SK500, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, SCD70, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, UM550, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, SD20, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, SD80, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, UA700, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(ROLAND, PCR300, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(EGO, M4U, 0x0000, 0xffff, UQ_SINGLE_CMD_MIDI),
- USB_QUIRK(LOGILINK, U2M, 0x0000, 0xffff, UQ_SINGLE_CMD_MIDI),
- USB_QUIRK(MEDELI, DD305, 0x0000, 0xffff, UQ_SINGLE_CMD_MIDI, UQ_MATCH_VENDOR_ONLY),
- USB_QUIRK(REDOCTANE, GHMIDI, 0x0000, 0xffff, UQ_SINGLE_CMD_MIDI),
- USB_QUIRK(TEXTECH, U2M_1, 0x0000, 0xffff, UQ_SINGLE_CMD_MIDI),
- USB_QUIRK(TEXTECH, U2M_2, 0x0000, 0xffff, UQ_SINGLE_CMD_MIDI),
- USB_QUIRK(WCH2, U2M, 0x0000, 0xffff, UQ_SINGLE_CMD_MIDI),
+ USB_QUIRK(ROLAND, UM1, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, SC8850, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, SD90, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, UM880N, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, UA100, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, UM4, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, U8, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, UM2, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, SC8820, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, PC300, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, SK500, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, SCD70, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, UM550, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, SD20, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, SD80, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, UA700, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(ROLAND, PCR300, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(EGO, M4U, UQ_SINGLE_CMD_MIDI),
+ USB_QUIRK(LOGILINK, U2M, UQ_SINGLE_CMD_MIDI),
+ USB_QUIRK(MEDELI, DD305, UQ_SINGLE_CMD_MIDI, UQ_MATCH_VENDOR_ONLY),
+ USB_QUIRK(REDOCTANE, GHMIDI, UQ_SINGLE_CMD_MIDI),
+ USB_QUIRK(TEXTECH, U2M_1, UQ_SINGLE_CMD_MIDI),
+ USB_QUIRK(TEXTECH, U2M_2, UQ_SINGLE_CMD_MIDI),
+ USB_QUIRK(WCH2, U2M, UQ_SINGLE_CMD_MIDI),
/* Non-standard USB AUDIO devices */
- USB_QUIRK(MAUDIO, FASTTRACKULTRA, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(MAUDIO, FASTTRACKULTRA8R, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
- USB_QUIRK(CMEDIA, CM6206, 0x0000, 0xffff, UQ_AU_SET_SPDIF_CM6206),
- USB_QUIRK(PLOYTEC, SPL_CRIMSON_1, 0x0000, 0xffff, UQ_CFG_INDEX_1),
- USB_QUIRK(ROLAND, UA25EX_AD, 0x0000, 0xffff, UQ_AU_VENDOR_CLASS),
-
- /*
- * Quirks for manufacturers which USB devices does not respond
- * after issuing non-supported commands:
- */
- USB_QUIRK(ALCOR, DUMMY, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE, UQ_MSC_NO_TEST_UNIT_READY, UQ_MATCH_VENDOR_ONLY),
- USB_QUIRK(APPLE, DUMMY, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE, UQ_MATCH_VENDOR_ONLY),
- USB_QUIRK(FEIYA, DUMMY, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE, UQ_MATCH_VENDOR_ONLY),
- USB_QUIRK(REALTEK, DUMMY, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE, UQ_MATCH_VENDOR_ONLY),
- USB_QUIRK(INITIO, DUMMY, 0x0000, 0xffff, UQ_MSC_NO_SYNC_CACHE, UQ_MATCH_VENDOR_ONLY),
+ USB_QUIRK(MAUDIO, FASTTRACKULTRA, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(MAUDIO, FASTTRACKULTRA8R, UQ_AU_VENDOR_CLASS),
+ USB_QUIRK(CMEDIA, CM6206, UQ_AU_SET_SPDIF_CM6206),
+ USB_QUIRK(PLOYTEC, SPL_CRIMSON_1, UQ_CFG_INDEX_1),
+ USB_QUIRK(ROLAND, UA25EX_AD, UQ_AU_VENDOR_CLASS),
/* DYMO LabelManager Pnp */
- USB_QUIRK(DYMO, LABELMANAGERPNP, 0x0000, 0xffff, UQ_MSC_DYMO_EJECT),
+ USB_QUIRK(DYMO, LABELMANAGERPNP, UQ_MSC_DYMO_EJECT),
/* Holtek USB gaming keyboard */
- USB_QUIRK(HOLTEK, F85, 0x0000, 0xffff, UQ_KBD_BOOTPROTO),
+ USB_QUIRK(HOLTEK, F85, UQ_KBD_BOOTPROTO),
/* This works much better with if_cdce than if_ure */
- USB_QUIRK(LENOVO, TBT3LAN, 0x0000, 0xffff, UQ_CFG_INDEX_1),
+ USB_QUIRK(LENOVO, TBT3LAN, UQ_CFG_INDEX_1),
};
-#undef USB_QUIRK_VP
+#undef USB_QUIRK_VO
+#undef USB_QUIRK_REV
#undef USB_QUIRK
static const char *usb_quirk_str[USB_QUIRK_MAX] = {
diff --git a/sys/dev/usb/serial/u3g.c b/sys/dev/usb/serial/u3g.c
index e0a1ff29b0a4..a549f93b2af1 100644
--- a/sys/dev/usb/serial/u3g.c
+++ b/sys/dev/usb/serial/u3g.c
@@ -205,6 +205,7 @@ static const STRUCT_USB_HOST_ID u3g_devs[] = {
U3G_DEV(ALINK, 3GU, 0),
U3G_DEV(ALINK, DWM652U5, 0),
U3G_DEV(ALINK, SIM7600E, 0),
+ U3G_DEV(ALINK, SIM7600G, 0),
U3G_DEV(AMOI, H01, 0),
U3G_DEV(AMOI, H01A, 0),
U3G_DEV(AMOI, H02, 0),
@@ -511,21 +512,21 @@ static const STRUCT_USB_HOST_ID u3g_devs[] = {
U3G_DEV(QUANTA, GLX, 0),
U3G_DEV(QUANTA, Q101, 0),
U3G_DEV(QUANTA, Q111, 0),
+ U3G_DEV(QUECTEL, EC21, 0),
U3G_DEV(QUECTEL, EC25, 0),
U3G_DEV(QUECTEL, EM05, 0),
- U3G_DEV(QUECTEL, EC21, 0),
U3G_DEV(QUECTEL, EG91, 0),
U3G_DEV(QUECTEL, EG95, 0),
+ U3G_DEV(QUECTEL, BG96, 0),
U3G_DEV(QUECTEL, EP06, 0),
U3G_DEV(QUECTEL, EG065K, 0),
- U3G_DEV(QUECTEL, EM12, 0),
- U3G_DEV(QUECTEL, BG96, 0),
- U3G_DEV(QUECTEL, BG95, 0),
- U3G_DEV(QUECTEL, AG35, 0),
U3G_DEV(QUECTEL, AG15, 0),
+ U3G_DEV(QUECTEL, AG35, 0),
U3G_DEV(QUECTEL, AG520, 0),
U3G_DEV(QUECTEL, AG550, 0),
+ U3G_DEV(QUECTEL, EM12, 0),
U3G_DEV(QUECTEL, EM160R, 0),
+ U3G_DEV(QUECTEL, BG95, 0),
U3G_DEV(QUECTEL, RG500, 0),
U3G_DEV(QUECTEL, RG520, 0),
U3G_DEV(QUECTEL, EC200, 0),
@@ -567,6 +568,7 @@ static const STRUCT_USB_HOST_ID u3g_devs[] = {
U3G_DEV(SIERRA, MC5728, 0),
U3G_DEV(SIERRA, MC7354, 0),
U3G_DEV(SIERRA, MC7355, 0),
+ U3G_DEV(SIERRA, AC340U, 0),
U3G_DEV(SIERRA, MC7430, 0),
U3G_DEV(SIERRA, MC8700, 0),
U3G_DEV(SIERRA, MC8755, 0),
diff --git a/sys/dev/usb/serial/ubsa.c b/sys/dev/usb/serial/ubsa.c
index 5ff207a17c62..38782d5aef11 100644
--- a/sys/dev/usb/serial/ubsa.c
+++ b/sys/dev/usb/serial/ubsa.c
@@ -26,7 +26,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*-
* Copyright (c) 2001 The NetBSD Foundation, Inc.
* All rights reserved.
diff --git a/sys/dev/usb/serial/ubser.c b/sys/dev/usb/serial/ubser.c
index 91fefceb8240..978639a809be 100644
--- a/sys/dev/usb/serial/ubser.c
+++ b/sys/dev/usb/serial/ubser.c
@@ -64,7 +64,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* BWCT serial adapter driver
*/
diff --git a/sys/dev/usb/serial/uchcom.c b/sys/dev/usb/serial/uchcom.c
index a61b5a92364c..fdc5515fa722 100644
--- a/sys/dev/usb/serial/uchcom.c
+++ b/sys/dev/usb/serial/uchcom.c
@@ -57,10 +57,8 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
- * Driver for WinChipHead CH341/340, the worst USB-serial chip in the
- * world.
+ * Driver for WinChipHead CH9102/343/341/340.
*/
#include <sys/stdint.h>
@@ -102,17 +100,19 @@ SYSCTL_INT(_hw_usb_uchcom, OID_AUTO, debug, CTLFLAG_RWTUN,
&uchcom_debug, 0, "uchcom debug level");
#endif
-#define UCHCOM_IFACE_INDEX 0
-#define UCHCOM_CONFIG_INDEX 0
+#define UCHCOM_IFACE_INDEX 0
+#define UCHCOM_CONFIG_INDEX 0
+#define UCHCOM_SECOND_IFACE_INDEX 1
#define UCHCOM_REV_CH340 0x0250
#define UCHCOM_INPUT_BUF_SIZE 8
-#define UCHCOM_REQ_GET_VERSION 0x5F
-#define UCHCOM_REQ_READ_REG 0x95
-#define UCHCOM_REQ_WRITE_REG 0x9A
-#define UCHCOM_REQ_RESET 0xA1
-#define UCHCOM_REQ_SET_DTRRTS 0xA4
+#define UCHCOM_REQ_GET_VERSION 0x5F
+#define UCHCOM_REQ_READ_REG 0x95
+#define UCHCOM_REQ_WRITE_REG 0x9A
+#define UCHCOM_REQ_RESET 0xA1
+#define UCHCOM_REQ_SET_DTRRTS 0xA4
+#define UCHCOM_REQ_CH343_WRITE_REG 0xA8
#define UCHCOM_REG_STAT1 0x06
#define UCHCOM_REG_STAT2 0x07
@@ -135,13 +135,21 @@ SYSCTL_INT(_hw_usb_uchcom, OID_AUTO, debug, CTLFLAG_RWTUN,
#define UCHCOM_RTS_MASK 0x40
#define UCHCOM_BRK_MASK 0x01
+#define UCHCOM_ABRK_MASK 0x10
+#define UCHCOM_CH343_BRK_MASK 0x80
#define UCHCOM_LCR1_MASK 0xAF
#define UCHCOM_LCR2_MASK 0x07
#define UCHCOM_LCR1_RX 0x80
#define UCHCOM_LCR1_TX 0x40
#define UCHCOM_LCR1_PARENB 0x08
+#define UCHCOM_LCR1_CS5 0x00
+#define UCHCOM_LCR1_CS6 0x01
+#define UCHCOM_LCR1_CS7 0x02
#define UCHCOM_LCR1_CS8 0x03
+#define UCHCOM_LCR1_STOPB 0x04
+#define UCHCOM_LCR1_PARODD 0x00
+#define UCHCOM_LCR1_PAREVEN 0x10
#define UCHCOM_LCR2_PAREVEN 0x07
#define UCHCOM_LCR2_PARODD 0x06
#define UCHCOM_LCR2_PARMARK 0x05
@@ -151,12 +159,18 @@ SYSCTL_INT(_hw_usb_uchcom, OID_AUTO, debug, CTLFLAG_RWTUN,
#define UCHCOM_INTR_STAT2 0x03
#define UCHCOM_INTR_LEAST 4
-#define UCHCOM_BULK_BUF_SIZE 1024 /* bytes */
+#define UCHCOM_T 0x08
+#define UCHCOM_CL 0x04
+#define UCHCOM_CH343_CT 0x80
+#define UCHCOM_CT 0x90
+
+#define UCHCOM_BULK_BUF_SIZE 1024 /* bytes */
+
+#define TYPE_CH343 1
enum {
UCHCOM_BULK_DT_WR,
UCHCOM_BULK_DT_RD,
- UCHCOM_INTR_DT_RD,
UCHCOM_N_TRANSFER,
};
@@ -165,6 +179,7 @@ struct uchcom_softc {
struct ucom_softc sc_ucom;
struct usb_xfer *sc_xfer[UCHCOM_N_TRANSFER];
+ struct usb_xfer *sc_intr_xfer; /* Interrupt endpoint */
struct usb_device *sc_udev;
struct mtx sc_mtx;
@@ -172,39 +187,19 @@ struct uchcom_softc {
uint8_t sc_rts; /* local copy */
uint8_t sc_version;
uint8_t sc_msr;
- uint8_t sc_lsr; /* local status register */
-};
-
-struct uchcom_divider {
- uint8_t dv_prescaler;
- uint8_t dv_div;
- uint8_t dv_mod;
-};
-
-struct uchcom_divider_record {
- uint32_t dvr_high;
- uint32_t dvr_low;
- uint32_t dvr_base_clock;
- struct uchcom_divider dvr_divider;
-};
-
-static const struct uchcom_divider_record dividers[] =
-{
- {307200, 307200, UCHCOM_BASE_UNKNOWN, {7, 0xD9, 0}},
- {921600, 921600, UCHCOM_BASE_UNKNOWN, {7, 0xF3, 0}},
- {2999999, 23530, 6000000, {3, 0, 0}},
- {23529, 2942, 750000, {2, 0, 0}},
- {2941, 368, 93750, {1, 0, 0}},
- {367, 1, 11719, {0, 0, 0}},
+ uint8_t sc_lsr; /* local status register */
+ uint8_t sc_chiptype; /* type of chip */
+ uint8_t sc_ctrl_iface_no;
+ uint8_t sc_iface_index;
};
-#define NUM_DIVIDERS nitems(dividers)
-
static const STRUCT_USB_HOST_ID uchcom_devs[] = {
{USB_VPI(USB_VENDOR_WCH, USB_PRODUCT_WCH_CH341SER, 0)},
{USB_VPI(USB_VENDOR_WCH2, USB_PRODUCT_WCH2_CH341SER, 0)},
{USB_VPI(USB_VENDOR_WCH2, USB_PRODUCT_WCH2_CH341SER_2, 0)},
{USB_VPI(USB_VENDOR_WCH2, USB_PRODUCT_WCH2_CH341SER_3, 0)},
+ {USB_VPI(USB_VENDOR_WCH2, USB_PRODUCT_WCH2_CH343SER, 0)},
+ {USB_VPI(USB_VENDOR_WCH2, USB_PRODUCT_WCH2_CH9102SER, 0)},
};
/* protypes */
@@ -226,8 +221,9 @@ static void uchcom_update_version(struct uchcom_softc *);
static void uchcom_convert_status(struct uchcom_softc *, uint8_t);
static void uchcom_update_status(struct uchcom_softc *);
static void uchcom_set_dtr_rts(struct uchcom_softc *);
-static int uchcom_calc_divider_settings(struct uchcom_divider *, uint32_t);
-static void uchcom_set_baudrate(struct uchcom_softc *, uint32_t);
+static void uchcom_calc_baudrate(struct uchcom_softc *, uint32_t, uint8_t *,
+ uint8_t *);
+static void uchcom_set_baudrate(struct uchcom_softc *, uint32_t, uint16_t);
static void uchcom_poll(struct ucom_softc *ucom);
static device_probe_t uchcom_probe;
@@ -245,7 +241,7 @@ static const struct usb_config uchcom_config_data[UCHCOM_N_TRANSFER] = {
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_OUT,
.bufsize = UCHCOM_BULK_BUF_SIZE,
- .flags = {.pipe_bof = 1,.force_short_xfer = 1,},
+ .flags = {.pipe_bof = 1,},
.callback = &uchcom_write_callback,
},
@@ -257,8 +253,10 @@ static const struct usb_config uchcom_config_data[UCHCOM_N_TRANSFER] = {
.flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
.callback = &uchcom_read_callback,
},
+};
- [UCHCOM_INTR_DT_RD] = {
+static const struct usb_config uchcom_intr_config_data[1] = {
+ [0] = {
.type = UE_INTERRUPT,
.endpoint = UE_ADDR_ANY,
.direction = UE_DIR_IN,
@@ -312,8 +310,9 @@ uchcom_attach(device_t dev)
{
struct uchcom_softc *sc = device_get_softc(dev);
struct usb_attach_arg *uaa = device_get_ivars(dev);
+ struct usb_interface *iface;
+ struct usb_interface_descriptor *id;
int error;
- uint8_t iface_index;
DPRINTFN(11, "\n");
@@ -331,20 +330,49 @@ uchcom_attach(device_t dev)
case USB_PRODUCT_WCH2_CH341SER_3:
device_printf(dev, "CH341 detected\n");
break;
+ case USB_PRODUCT_WCH2_CH343SER:
+ device_printf(dev, "CH343 detected\n");
+ break;
+ case USB_PRODUCT_WCH2_CH9102SER:
+ device_printf(dev, "CH9102 detected\n");
+ break;
default:
- device_printf(dev, "New CH340/CH341 product 0x%04x detected\n",
- uaa->info.idProduct);
+ device_printf(dev, "New CH340/CH341/CH343/CH9102 product "
+ "0x%04x detected\n", uaa->info.idProduct);
break;
}
- iface_index = UCHCOM_IFACE_INDEX;
- error = usbd_transfer_setup(uaa->device,
- &iface_index, sc->sc_xfer, uchcom_config_data,
- UCHCOM_N_TRANSFER, sc, &sc->sc_mtx);
+ /* CH343/CH9102 has two interfaces. */
+ sc->sc_ctrl_iface_no = uaa->info.bIfaceNum;
+ iface = usbd_get_iface(uaa->device, UCHCOM_SECOND_IFACE_INDEX);
+ if (iface) {
+ id = usbd_get_interface_descriptor(iface);
+ if (id == NULL) {
+ device_printf(dev, "no interface descriptor\n");
+ goto detach;
+ }
+ sc->sc_iface_index = UCHCOM_SECOND_IFACE_INDEX;
+ usbd_set_parent_iface(uaa->device, UCHCOM_SECOND_IFACE_INDEX,
+ uaa->info.bIfaceIndex);
+ sc->sc_chiptype = TYPE_CH343;
+ } else {
+ sc->sc_iface_index = UCHCOM_IFACE_INDEX;
+ }
+
+ /* Setup all transfers. */
+ error = usbd_transfer_setup(uaa->device, &sc->sc_iface_index,
+ sc->sc_xfer, uchcom_config_data, UCHCOM_N_TRANSFER, sc,
+ &sc->sc_mtx);
+ if (error) {
+ device_printf(dev, "could not allocate all pipes\n");
+ goto detach;
+ }
+ error = usbd_transfer_setup(uaa->device, &sc->sc_ctrl_iface_no,
+ &sc->sc_intr_xfer, uchcom_intr_config_data, 1, sc, &sc->sc_mtx);
if (error) {
- DPRINTF("one or more missing USB endpoints, "
- "error=%s\n", usbd_errstr(error));
+ device_printf(dev, "allocating USB transfers failed for "
+ "interrupt\n");
goto detach;
}
@@ -450,7 +478,9 @@ uchcom_write_reg(struct uchcom_softc *sc,
(unsigned)reg1, (unsigned)val1,
(unsigned)reg2, (unsigned)val2);
uchcom_ctrl_write(
- sc, UCHCOM_REQ_WRITE_REG,
+ sc,
+ (sc->sc_chiptype != TYPE_CH343) ?
+ UCHCOM_REQ_WRITE_REG : UCHCOM_REQ_CH343_WRITE_REG,
reg1 | ((uint16_t)reg2 << 8), val1 | ((uint16_t)val2 << 8));
}
@@ -517,9 +547,6 @@ uchcom_update_version(struct uchcom_softc *sc)
static void
uchcom_convert_status(struct uchcom_softc *sc, uint8_t cur)
{
- sc->sc_dtr = !(cur & UCHCOM_DTR_MASK);
- sc->sc_rts = !(cur & UCHCOM_RTS_MASK);
-
cur = ~cur & 0x0F;
sc->sc_msr = (cur << 4) | ((sc->sc_msr >> 4) ^ cur);
}
@@ -556,78 +583,69 @@ uchcom_cfg_set_break(struct ucom_softc *ucom, uint8_t onoff)
uint8_t brk1;
uint8_t brk2;
- uchcom_read_reg(sc, UCHCOM_REG_BREAK1, &brk1, UCHCOM_REG_LCR1, &brk2);
- if (onoff) {
- /* on - clear bits */
- brk1 &= ~UCHCOM_BRK_MASK;
- brk2 &= ~UCHCOM_LCR1_TX;
+ if (sc->sc_chiptype == TYPE_CH343) {
+ brk1 = UCHCOM_CH343_BRK_MASK;
+ if (!onoff)
+ brk1 |= UCHCOM_ABRK_MASK;
+ uchcom_write_reg(sc, brk1, 0, 0, 0);
} else {
- /* off - set bits */
- brk1 |= UCHCOM_BRK_MASK;
- brk2 |= UCHCOM_LCR1_TX;
+ uchcom_read_reg(sc, UCHCOM_REG_BREAK1, &brk1, UCHCOM_REG_LCR1,
+ &brk2);
+ if (onoff) {
+ /* on - clear bits */
+ brk1 &= ~UCHCOM_BRK_MASK;
+ brk2 &= ~UCHCOM_LCR1_TX;
+ } else {
+ /* off - set bits */
+ brk1 |= UCHCOM_BRK_MASK;
+ brk2 |= UCHCOM_LCR1_TX;
+ }
+ uchcom_write_reg(sc, UCHCOM_REG_BREAK1, brk1, UCHCOM_REG_LCR1,
+ brk2);
}
- uchcom_write_reg(sc, UCHCOM_REG_BREAK1, brk1, UCHCOM_REG_LCR1, brk2);
}
-static int
-uchcom_calc_divider_settings(struct uchcom_divider *dp, uint32_t rate)
-{
- const struct uchcom_divider_record *rp;
- uint32_t div;
- uint32_t rem;
- uint32_t mod;
- uint8_t i;
-
- /* find record */
- for (i = 0; i != NUM_DIVIDERS; i++) {
- if (dividers[i].dvr_high >= rate &&
- dividers[i].dvr_low <= rate) {
- rp = &dividers[i];
- goto found;
- }
- }
- return (-1);
-
-found:
- dp->dv_prescaler = rp->dvr_divider.dv_prescaler;
- if (rp->dvr_base_clock == UCHCOM_BASE_UNKNOWN)
- dp->dv_div = rp->dvr_divider.dv_div;
- else {
- div = rp->dvr_base_clock / rate;
- rem = rp->dvr_base_clock % rate;
- if (div == 0 || div >= 0xFF)
- return (-1);
- if ((rem << 1) >= rate)
- div += 1;
- dp->dv_div = (uint8_t)-div;
+static void
+uchcom_calc_baudrate(struct uchcom_softc *sc, uint32_t rate, uint8_t *divisor,
+ uint8_t *factor)
+{
+ uint32_t clk = 12000000;
+
+ if (rate >= 256000 && sc->sc_chiptype == TYPE_CH343)
+ *divisor = 7;
+ else if (rate > 23529) {
+ clk /= 2;
+ *divisor = 3;
+ } else if (rate > 2941) {
+ clk /= 16;
+ *divisor = 2;
+ } else if (rate > 367) {
+ clk /= 128;
+ *divisor = 1;
+ } else {
+ clk = 11719;
+ *divisor = 0;
}
- mod = (UCHCOM_BPS_MOD_BASE / rate) + UCHCOM_BPS_MOD_BASE_OFS;
- mod = mod + (mod / 2);
+ *factor = 256 - clk / rate;
- dp->dv_mod = (mod + 0xFF) / 0x100;
-
- return (0);
+ if (rate == 921600 && sc->sc_chiptype != TYPE_CH343) {
+ *divisor = 7;
+ *factor = 243;
+ }
}
static void
-uchcom_set_baudrate(struct uchcom_softc *sc, uint32_t rate)
+uchcom_set_baudrate(struct uchcom_softc *sc, uint32_t rate, uint16_t lcr)
{
- struct uchcom_divider dv;
+ uint16_t idx;
+ uint8_t factor, div;
- if (uchcom_calc_divider_settings(&dv, rate))
- return;
+ uchcom_calc_baudrate(sc, rate, &div, &factor);
+ div |= (sc->sc_chiptype != TYPE_CH343) ? 0x80 : 0x00;
+ idx = (factor << 8) | div;
- /*
- * According to linux code we need to set bit 7 of UCHCOM_REG_BPS_PRE,
- * otherwise the chip will buffer data.
- */
- uchcom_write_reg(sc,
- UCHCOM_REG_BPS_PRE, dv.dv_prescaler | 0x80,
- UCHCOM_REG_BPS_DIV, dv.dv_div);
- uchcom_write_reg(sc,
- UCHCOM_REG_BPS_MOD, dv.dv_mod,
- UCHCOM_REG_BPS_PAD, 0);
+ uchcom_ctrl_write(sc, UCHCOM_REQ_RESET, lcr, idx);
}
/* ----------------------------------------------------------------------
@@ -674,6 +692,14 @@ uchcom_cfg_open(struct ucom_softc *ucom)
DPRINTF("\n");
+ if (sc->sc_chiptype != TYPE_CH343) {
+ /* Set default configuration. */
+ uchcom_get_version(sc, NULL);
+ uchcom_ctrl_write(sc, UCHCOM_REQ_RESET, 0, 0);
+ uchcom_write_reg(sc, UCHCOM_REG_BPS_PRE, 0x82,
+ UCHCOM_REG_BPS_DIV, 0xd9);
+ uchcom_write_reg(sc, 0x2c, 0x07, UCHCOM_REG_BPS_PAD, 0);
+ }
uchcom_update_version(sc);
uchcom_update_status(sc);
}
@@ -681,53 +707,69 @@ uchcom_cfg_open(struct ucom_softc *ucom)
static int
uchcom_pre_param(struct ucom_softc *ucom, struct termios *t)
{
- struct uchcom_divider dv;
+ struct uchcom_softc *sc = ucom->sc_parent;
- switch (t->c_cflag & CSIZE) {
- case CS8:
+ /*
+ * Check requested baud rate.
+ * The CH340/CH341 can set any baud rate up to 2Mb.
+ * The CH9102/CH343 can set any baud rate up to 6Mb.
+ */
+ switch (sc->sc_chiptype) {
+ case TYPE_CH343:
+ if (t->c_ospeed <= 6000000)
+ return (0);
break;
default:
- return (EIO);
+ if (t->c_ospeed <= 2000000)
+ return (0);
+ break;
}
- if ((t->c_cflag & CSTOPB) != 0)
- return (EIO);
- if ((t->c_cflag & PARENB) != 0)
- return (EIO);
- if (uchcom_calc_divider_settings(&dv, t->c_ospeed)) {
- return (EIO);
- }
- return (0); /* success */
+ return (EIO);
}
static void
uchcom_cfg_param(struct ucom_softc *ucom, struct termios *t)
{
struct uchcom_softc *sc = ucom->sc_parent;
+ uint8_t lcr;
- uchcom_get_version(sc, NULL);
- uchcom_ctrl_write(sc, UCHCOM_REQ_RESET, 0, 0);
- uchcom_set_baudrate(sc, t->c_ospeed);
- if (sc->sc_version < UCHCOM_VER_30) {
- uchcom_read_reg(sc, UCHCOM_REG_LCR1, NULL,
- UCHCOM_REG_LCR2, NULL);
- uchcom_write_reg(sc, UCHCOM_REG_LCR1, 0x50,
- UCHCOM_REG_LCR2, 0x00);
- } else {
- /*
- * Set up line control:
- * - enable transmit and receive
- * - set 8n1 mode
- * To do: support other sizes, parity, stop bits.
- */
- uchcom_write_reg(sc,
- UCHCOM_REG_LCR1,
- UCHCOM_LCR1_RX | UCHCOM_LCR1_TX | UCHCOM_LCR1_CS8,
- UCHCOM_REG_LCR2, 0x00);
+ lcr = UCHCOM_LCR1_RX | UCHCOM_LCR1_TX;
+
+ if (t->c_cflag & CSTOPB)
+ lcr |= UCHCOM_LCR1_STOPB;
+
+ if (t->c_cflag & PARENB) {
+ lcr |= UCHCOM_LCR1_PARENB;
+ if (t->c_cflag & PARODD)
+ lcr |= UCHCOM_LCR1_PARODD;
+ else
+ lcr |= UCHCOM_LCR1_PAREVEN;
}
- uchcom_update_status(sc);
- uchcom_ctrl_write(sc, UCHCOM_REQ_RESET, 0x501f, 0xd90a);
- uchcom_set_baudrate(sc, t->c_ospeed);
+
+ switch (t->c_cflag & CSIZE) {
+ case CS5:
+ lcr |= UCHCOM_LCR1_CS5;
+ break;
+ case CS6:
+ lcr |= UCHCOM_LCR1_CS6;
+ break;
+ case CS7:
+ lcr |= UCHCOM_LCR1_CS7;
+ break;
+ case CS8:
+ default:
+ lcr |= UCHCOM_LCR1_CS8;
+ break;
+ }
+
+ if (sc->sc_chiptype == TYPE_CH343)
+ uchcom_set_baudrate(sc, t->c_ospeed,
+ UCHCOM_T | UCHCOM_CL | UCHCOM_CH343_CT | lcr << 8);
+ else
+ uchcom_set_baudrate(sc, t->c_ospeed,
+ UCHCOM_T | UCHCOM_CL | UCHCOM_CT | lcr << 8);
+
uchcom_set_dtr_rts(sc);
uchcom_update_status(sc);
}
@@ -738,7 +780,7 @@ uchcom_start_read(struct ucom_softc *ucom)
struct uchcom_softc *sc = ucom->sc_parent;
/* start interrupt endpoint */
- usbd_transfer_start(sc->sc_xfer[UCHCOM_INTR_DT_RD]);
+ usbd_transfer_start(sc->sc_intr_xfer);
/* start read endpoint */
usbd_transfer_start(sc->sc_xfer[UCHCOM_BULK_DT_RD]);
@@ -750,7 +792,7 @@ uchcom_stop_read(struct ucom_softc *ucom)
struct uchcom_softc *sc = ucom->sc_parent;
/* stop interrupt endpoint */
- usbd_transfer_stop(sc->sc_xfer[UCHCOM_INTR_DT_RD]);
+ usbd_transfer_stop(sc->sc_intr_xfer);
/* stop read endpoint */
usbd_transfer_stop(sc->sc_xfer[UCHCOM_BULK_DT_RD]);
@@ -780,7 +822,8 @@ uchcom_intr_callback(struct usb_xfer *xfer, usb_error_t error)
{
struct uchcom_softc *sc = usbd_xfer_softc(xfer);
struct usb_page_cache *pc;
- uint8_t buf[UCHCOM_INTR_LEAST];
+ uint32_t intrstat;
+ uint8_t buf[16];
int actlen;
usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
@@ -792,13 +835,12 @@ uchcom_intr_callback(struct usb_xfer *xfer, usb_error_t error)
if (actlen >= UCHCOM_INTR_LEAST) {
pc = usbd_xfer_get_frame(xfer, 0);
- usbd_copy_out(pc, 0, buf, UCHCOM_INTR_LEAST);
+ usbd_copy_out(pc, 0, buf, sizeof(buf));
- DPRINTF("data = 0x%02X 0x%02X 0x%02X 0x%02X\n",
- (unsigned)buf[0], (unsigned)buf[1],
- (unsigned)buf[2], (unsigned)buf[3]);
+ intrstat = (sc->sc_chiptype == TYPE_CH343) ?
+ actlen - 1 : UCHCOM_INTR_STAT1;
- uchcom_convert_status(sc, buf[UCHCOM_INTR_STAT1]);
+ uchcom_convert_status(sc, buf[intrstat]);
ucom_status_change(&sc->sc_ucom);
}
case USB_ST_SETUP:
diff --git a/sys/dev/usb/serial/ucycom.c b/sys/dev/usb/serial/ucycom.c
index 664cb7f05263..5ab1810a0d11 100644
--- a/sys/dev/usb/serial/ucycom.c
+++ b/sys/dev/usb/serial/ucycom.c
@@ -1,4 +1,3 @@
-#include <sys/cdefs.h>
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
diff --git a/sys/dev/usb/serial/udbc.c b/sys/dev/usb/serial/udbc.c
new file mode 100644
index 000000000000..d7ca6b25bf32
--- /dev/null
+++ b/sys/dev/usb/serial/udbc.c
@@ -0,0 +1,404 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause-NetBSD
+ *
+ * Copyright (c) 2000 The NetBSD Foundation, Inc.
+ * Copyright (c) 2016-2024 Hiroki Sato <hrs@FreeBSD.org>
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Lennart Augustsson (lennart@augustsson.net).
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/callout.h>
+#include <sys/condvar.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/priv.h>
+#include <sys/queue.h>
+#include <sys/stddef.h>
+#include <sys/stdint.h>
+#include <sys/sx.h>
+#include <sys/sysctl.h>
+#include <sys/unistd.h>
+
+#include <dev/usb/usb.h>
+#include <dev/usb/usb_ioctl.h>
+#include <dev/usb/usbdi.h>
+#include <dev/usb/usbdi_util.h>
+#include <dev/usb/usb_core.h>
+
+#include "usbdevs.h"
+
+#define USB_DEBUG_VAR udbc_debug
+#include <dev/usb/usb_process.h>
+#include <dev/usb/serial/usb_serial.h>
+#include <dev/usb/usb_debug.h>
+
+static SYSCTL_NODE(_hw_usb, OID_AUTO, udbc, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "USB DbC Client");
+
+#ifdef USB_DEBUG
+static int udbc_debug = 0;
+SYSCTL_INT(_hw_usb_udbc, OID_AUTO, debug, CTLFLAG_RWTUN, &udbc_debug, 0,
+ "Debug level");
+#endif
+
+#define UDBC_CONFIG_INDEX 0
+
+#define UDBC_IBUFSIZE 1024
+#define UDBC_OBUFSIZE 1024
+
+enum {
+ UDBC_BULK_DT_WR,
+ UDBC_BULK_DT_RD,
+ UDBC_N_TRANSFER, /* n of EP */
+};
+
+struct udbc_softc {
+ struct ucom_super_softc sc_super_ucom;
+ struct ucom_softc sc_ucom;
+
+ struct usb_device *sc_udev;
+ struct usb_xfer *sc_xfer[UDBC_N_TRANSFER];
+ device_t sc_dev;
+ struct mtx sc_mtx;
+
+ uint32_t sc_unit;
+};
+
+/* prototypes */
+
+static device_probe_t udbc_probe;
+static device_attach_t udbc_attach;
+static device_detach_t udbc_detach;
+static void udbc_free_softc(struct udbc_softc *);
+
+static usb_callback_t udbc_write_callback;
+static usb_callback_t udbc_read_callback;
+
+static void udbc_free(struct ucom_softc *);
+static void udbc_cfg_open(struct ucom_softc *);
+static void udbc_cfg_close(struct ucom_softc *);
+static int udbc_pre_param(struct ucom_softc *, struct termios *);
+static int udbc_ioctl(struct ucom_softc *, uint32_t, caddr_t, int,
+ struct thread *);
+static void udbc_start_read(struct ucom_softc *);
+static void udbc_stop_read(struct ucom_softc *);
+static void udbc_start_write(struct ucom_softc *);
+static void udbc_stop_write(struct ucom_softc *);
+static void udbc_poll(struct ucom_softc *ucom);
+
+static const struct usb_config udbc_config[UDBC_N_TRANSFER] = {
+ [UDBC_BULK_DT_WR] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = UDBC_OBUFSIZE,
+ .flags = {.pipe_bof = 1,},
+ .callback = &udbc_write_callback,
+ },
+
+ [UDBC_BULK_DT_RD] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_IN,
+ .bufsize = UDBC_IBUFSIZE,
+ .flags = {.pipe_bof = 1,.short_xfer_ok = 1,},
+ .callback = &udbc_read_callback,
+ },
+};
+
+static const struct ucom_callback udbc_callback = {
+ .ucom_cfg_open = &udbc_cfg_open,
+ .ucom_cfg_close = &udbc_cfg_close,
+ .ucom_pre_param = &udbc_pre_param,
+ .ucom_ioctl = &udbc_ioctl,
+ .ucom_start_read = &udbc_start_read,
+ .ucom_stop_read = &udbc_stop_read,
+ .ucom_start_write = &udbc_start_write,
+ .ucom_stop_write = &udbc_stop_write,
+ .ucom_poll = &udbc_poll,
+ .ucom_free = &udbc_free,
+};
+
+static device_method_t udbc_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, udbc_probe),
+ DEVMETHOD(device_attach, udbc_attach),
+ DEVMETHOD(device_detach, udbc_detach),
+ DEVMETHOD_END
+};
+
+static int
+udbc_probe(device_t dev)
+{
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+
+ if (uaa->usb_mode != USB_MODE_HOST)
+ return (ENXIO);
+ if (uaa->info.bConfigIndex != UDBC_CONFIG_INDEX)
+ return (ENXIO);
+ if (uaa->info.bInterfaceClass != UICLASS_DIAGNOSTIC)
+ return (ENXIO);
+ if (uaa->info.bDeviceProtocol != 0x00) /* GNU GDB == 1 */
+ return (ENXIO);
+
+ return (BUS_PROBE_SPECIFIC);
+}
+
+static int
+udbc_attach(device_t dev)
+{
+ struct usb_attach_arg *uaa = device_get_ivars(dev);
+ struct udbc_softc *sc = device_get_softc(dev);
+ int error;
+
+ DPRINTF("\n");
+
+ sc->sc_udev = uaa->device;
+ sc->sc_dev = dev;
+ sc->sc_unit = device_get_unit(dev);
+
+ device_set_usb_desc(dev);
+ mtx_init(&sc->sc_mtx, "udbc", NULL, MTX_DEF);
+ ucom_ref(&sc->sc_super_ucom);
+
+ sc->sc_ucom.sc_portno = 0;
+
+ error = usbd_transfer_setup(uaa->device, &uaa->info.bIfaceIndex,
+ sc->sc_xfer, udbc_config, UDBC_N_TRANSFER, sc, &sc->sc_mtx);
+
+ if (error) {
+ device_printf(dev,
+ "allocating USB transfers failed\n");
+ goto detach;
+ }
+ /* clear stall at first run */
+ mtx_lock(&sc->sc_mtx);
+ usbd_xfer_set_stall(sc->sc_xfer[UDBC_BULK_DT_WR]);
+ usbd_xfer_set_stall(sc->sc_xfer[UDBC_BULK_DT_RD]);
+ mtx_unlock(&sc->sc_mtx);
+
+ error = ucom_attach(&sc->sc_super_ucom, &sc->sc_ucom, 1, sc,
+ &udbc_callback, &sc->sc_mtx);
+ if (error)
+ goto detach;
+ ucom_set_pnpinfo_usb(&sc->sc_super_ucom, dev);
+
+ return (0); /* success */
+
+detach:
+ udbc_detach(dev);
+ return (ENXIO);
+}
+
+static int
+udbc_detach(device_t dev)
+{
+ struct udbc_softc *sc = device_get_softc(dev);
+
+ ucom_detach(&sc->sc_super_ucom, &sc->sc_ucom);
+ usbd_transfer_unsetup(sc->sc_xfer, UDBC_N_TRANSFER);
+
+ device_claim_softc(dev);
+
+ udbc_free_softc(sc);
+
+ return (0);
+}
+
+UCOM_UNLOAD_DRAIN(udbc);
+
+static void
+udbc_free_softc(struct udbc_softc *sc)
+{
+ if (ucom_unref(&sc->sc_super_ucom)) {
+ mtx_destroy(&sc->sc_mtx);
+ device_free_softc(sc);
+ }
+}
+
+static void
+udbc_free(struct ucom_softc *ucom)
+{
+ udbc_free_softc(ucom->sc_parent);
+}
+
+static void
+udbc_cfg_open(struct ucom_softc *ucom)
+{
+ /*
+ * This do-nothing open routine exists for the sole purpose of this
+ * DPRINTF() so that you can see the point at which open gets called
+ * when debugging is enabled.
+ */
+ DPRINTF("\n");
+}
+
+static void
+udbc_cfg_close(struct ucom_softc *ucom)
+{
+ /*
+ * This do-nothing close routine exists for the sole purpose of this
+ * DPRINTF() so that you can see the point at which close gets called
+ * when debugging is enabled.
+ */
+ DPRINTF("\n");
+}
+
+static void
+udbc_write_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct udbc_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_page_cache *pc;
+ uint32_t buflen;
+
+ DPRINTFN(3, "\n");
+
+ switch (USB_GET_STATE(xfer)) {
+ default: /* Error */
+ if (error != USB_ERR_CANCELLED) {
+ /* try to clear stall first */
+ usbd_xfer_set_stall(xfer);
+ }
+ /* FALLTHROUGH */
+ case USB_ST_SETUP:
+ case USB_ST_TRANSFERRED:
+ pc = usbd_xfer_get_frame(xfer, 0);
+ if (ucom_get_data(&sc->sc_ucom, pc, 0, UDBC_OBUFSIZE,
+ &buflen) == 0)
+ break;
+ if (buflen != 0) {
+ usbd_xfer_set_frame_len(xfer, 0, buflen);
+ usbd_transfer_submit(xfer);
+ }
+ break;
+ }
+}
+
+static void
+udbc_read_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct udbc_softc *sc = usbd_xfer_softc(xfer);
+ struct usb_page_cache *pc;
+ int buflen;
+
+ DPRINTFN(3, "\n");
+
+ usbd_xfer_status(xfer, &buflen, NULL, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ pc = usbd_xfer_get_frame(xfer, 0);
+ ucom_put_data(&sc->sc_ucom, pc, 0, buflen);
+ /* FALLTHROUGH */
+ case USB_ST_SETUP:
+tr_setup:
+ usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
+ usbd_transfer_submit(xfer);
+ return;
+
+ default: /* Error */
+ if (error != USB_ERR_CANCELLED) {
+ /* try to clear stall first */
+ usbd_xfer_set_stall(xfer);
+ goto tr_setup;
+ }
+ return;
+ }
+}
+
+static int
+udbc_pre_param(struct ucom_softc *ucom, struct termios *t)
+{
+ DPRINTF("\n");
+
+ return (0);
+}
+
+static int
+udbc_ioctl(struct ucom_softc *ucom, uint32_t cmd, caddr_t data, int flag,
+ struct thread *td)
+{
+ return (ENOIOCTL);
+}
+
+static void
+udbc_start_read(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_start(sc->sc_xfer[UDBC_BULK_DT_RD]);
+}
+
+static void
+udbc_stop_read(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_stop(sc->sc_xfer[UDBC_BULK_DT_RD]);
+}
+
+static void
+udbc_start_write(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_start(sc->sc_xfer[UDBC_BULK_DT_WR]);
+}
+
+static void
+udbc_stop_write(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_stop(sc->sc_xfer[UDBC_BULK_DT_WR]);
+}
+
+static void
+udbc_poll(struct ucom_softc *ucom)
+{
+ struct udbc_softc *sc = ucom->sc_parent;
+
+ usbd_transfer_poll(sc->sc_xfer, UDBC_N_TRANSFER);
+}
+
+static driver_t udbc_driver = {
+ .name = "udbc",
+ .methods = udbc_methods,
+ .size = sizeof(struct udbc_softc),
+};
+
+DRIVER_MODULE(udbc, uhub, udbc_driver, NULL, NULL);
+MODULE_DEPEND(udbc, ucom, 1, 1, 1);
+MODULE_DEPEND(udbc, usb, 1, 1, 1);
+MODULE_VERSION(udbc, 1);
diff --git a/sys/dev/usb/serial/ufoma.c b/sys/dev/usb/serial/ufoma.c
index 66002f57e3b9..3fc6a7a609ba 100644
--- a/sys/dev/usb/serial/ufoma.c
+++ b/sys/dev/usb/serial/ufoma.c
@@ -1,6 +1,5 @@
/* $NetBSD: umodem.c,v 1.45 2002/09/23 05:51:23 simonb Exp $ */
-#include <sys/cdefs.h>
#define UFOMA_HANDSFREE
/*-
* SPDX-License-Identifier: BSD-2-Clause
diff --git a/sys/dev/usb/serial/uftdi.c b/sys/dev/usb/serial/uftdi.c
index 458c6a740f7c..b06dc38432be 100644
--- a/sys/dev/usb/serial/uftdi.c
+++ b/sys/dev/usb/serial/uftdi.c
@@ -31,7 +31,6 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*
* NOTE: all function names beginning like "uftdi_cfg_" can only
* be called from within the config thread function !
@@ -289,8 +288,26 @@ static const STRUCT_USB_HOST_ID uftdi_devs[] = {
UFTDI_DEV(BBELECTRONICS, USPTL4, 0),
UFTDI_DEV(BBELECTRONICS, USTL4, 0),
UFTDI_DEV(BBELECTRONICS, ZZ_PROG1_USB, 0),
+ UFTDI_DEV(BRAINBOXES, US101, 0),
+ UFTDI_DEV(BRAINBOXES, US159, 0),
+ UFTDI_DEV(BRAINBOXES, US235, 0),
UFTDI_DEV(BRAINBOXES, US257, 0),
UFTDI_DEV(BRAINBOXES, US25701, 0),
+ UFTDI_DEV(BRAINBOXES, US279_12, 0),
+ UFTDI_DEV(BRAINBOXES, US279_34, 0),
+ UFTDI_DEV(BRAINBOXES, US279_56, 0),
+ UFTDI_DEV(BRAINBOXES, US279_78, 0),
+ UFTDI_DEV(BRAINBOXES, US313, 0),
+ UFTDI_DEV(BRAINBOXES, US320, 0),
+ UFTDI_DEV(BRAINBOXES, US324, 0),
+ UFTDI_DEV(BRAINBOXES, US346_12, 0),
+ UFTDI_DEV(BRAINBOXES, US346_34, 0),
+ UFTDI_DEV(BRAINBOXES, US701_12, 0),
+ UFTDI_DEV(BRAINBOXES, US701_34, 0),
+ UFTDI_DEV(BRAINBOXES, US842_12, 0),
+ UFTDI_DEV(BRAINBOXES, US842_34, 0),
+ UFTDI_DEV(BRAINBOXES, US842_56, 0),
+ UFTDI_DEV(BRAINBOXES, US842_78, 0),
UFTDI_DEV(CONTEC, COM1USBH, 0),
UFTDI_DEV(DRESDENELEKTRONIK, SENSORTERMINALBOARD, 0),
UFTDI_DEV(DRESDENELEKTRONIK, WIRELESSHANDHELDTERMINAL, 0),
diff --git a/sys/dev/usb/serial/uipaq.c b/sys/dev/usb/serial/uipaq.c
index 2b282009a8bb..f24f1e215767 100644
--- a/sys/dev/usb/serial/uipaq.c
+++ b/sys/dev/usb/serial/uipaq.c
@@ -43,7 +43,6 @@
* Contact isis@cs.umd.edu if you have any questions/comments about this driver
*/
-#include <sys/cdefs.h>
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
diff --git a/sys/dev/usb/serial/ulpt.c b/sys/dev/usb/serial/ulpt.c
index b1e4232ff2a3..ec25ad737596 100644
--- a/sys/dev/usb/serial/ulpt.c
+++ b/sys/dev/usb/serial/ulpt.c
@@ -1,4 +1,3 @@
-#include <sys/cdefs.h>
/* $NetBSD: ulpt.c,v 1.60 2003/10/04 21:19:50 augustss Exp $ */
/*-
diff --git a/sys/dev/usb/serial/umcs.c b/sys/dev/usb/serial/umcs.c
index 18135e3bf67d..8b9b7807ac61 100644
--- a/sys/dev/usb/serial/umcs.c
+++ b/sys/dev/usb/serial/umcs.c
@@ -39,7 +39,6 @@
* quad-port mos7840.
*
*/
-#include <sys/cdefs.h>
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
diff --git a/sys/dev/usb/serial/umct.c b/sys/dev/usb/serial/umct.c
index 4329cf293098..bf6c672907e0 100644
--- a/sys/dev/usb/serial/umct.c
+++ b/sys/dev/usb/serial/umct.c
@@ -1,4 +1,3 @@
-#include <sys/cdefs.h>
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
diff --git a/sys/dev/usb/serial/umodem.c b/sys/dev/usb/serial/umodem.c
index 08d3be554915..59aa5b21e85f 100644
--- a/sys/dev/usb/serial/umodem.c
+++ b/sys/dev/usb/serial/umodem.c
@@ -1,6 +1,5 @@
/* $NetBSD: umodem.c,v 1.45 2002/09/23 05:51:23 simonb Exp $ */
-#include <sys/cdefs.h>
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
diff --git a/sys/dev/usb/serial/uplcom.c b/sys/dev/usb/serial/uplcom.c
index 88278ef5790e..1fd73f1f7665 100644
--- a/sys/dev/usb/serial/uplcom.c
+++ b/sys/dev/usb/serial/uplcom.c
@@ -1,6 +1,5 @@
/* $NetBSD: uplcom.c,v 1.21 2001/11/13 06:24:56 lukem Exp $ */
-#include <sys/cdefs.h>
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
diff --git a/sys/dev/usb/serial/usb_serial.c b/sys/dev/usb/serial/usb_serial.c
index 300438010c05..e62bfdb8ff1d 100644
--- a/sys/dev/usb/serial/usb_serial.c
+++ b/sys/dev/usb/serial/usb_serial.c
@@ -29,7 +29,6 @@
* SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
/*-
* Copyright (c) 1998, 2000 The NetBSD Foundation, Inc.
* All rights reserved.
@@ -151,9 +150,9 @@ static int ucom_unit_alloc(void);
static void ucom_unit_free(int);
static int ucom_attach_tty(struct ucom_super_softc *, struct ucom_softc *);
static void ucom_detach_tty(struct ucom_super_softc *, struct ucom_softc *);
-static void ucom_queue_command(struct ucom_softc *,
+static int ucom_queue_command(struct ucom_softc *,
usb_proc_callback_t *, struct termios *pt,
- struct usb_proc_msg *t0, struct usb_proc_msg *t1);
+ struct usb_proc_msg *t0, struct usb_proc_msg *t1, bool wait);
static void ucom_shutdown(struct ucom_softc *);
static void ucom_ring(struct ucom_softc *, uint8_t);
static void ucom_break(struct ucom_softc *, uint8_t);
@@ -593,18 +592,52 @@ ucom_set_usb_mode(struct ucom_super_softc *ssc, enum usb_hc_mode usb_mode)
}
static void
+ucom_command_barrier_cb(struct usb_proc_msg *msg __unused)
+{
+ /* NOP */
+}
+
+/*
+ * ucom_command_barrier inserts a dummy task and waits for it so that we can be
+ * certain that previously enqueued tasks are finished before returning back to
+ * the tty layer.
+ */
+static int
+ucom_command_barrier(struct ucom_softc *sc)
+{
+ struct ucom_super_softc *ssc = sc->sc_super;
+ struct usb_proc_msg dummy = { .pm_callback = ucom_command_barrier_cb };
+ struct usb_proc_msg *task;
+ int error;
+
+ UCOM_MTX_ASSERT(sc, MA_OWNED);
+
+ if (usb_proc_is_gone(&ssc->sc_tq)) {
+ DPRINTF("proc is gone\n");
+ return (ENXIO); /* nothing to do */
+ }
+
+ task = usb_proc_msignal(&ssc->sc_tq, &dummy, &dummy);
+ error = usb_proc_mwait_sig(&ssc->sc_tq, task, task);
+ if (error == 0 && sc->sc_tty != NULL && tty_gone(sc->sc_tty))
+ error = ENXIO;
+ return (error);
+}
+
+static int
ucom_queue_command(struct ucom_softc *sc,
usb_proc_callback_t *fn, struct termios *pt,
- struct usb_proc_msg *t0, struct usb_proc_msg *t1)
+ struct usb_proc_msg *t0, struct usb_proc_msg *t1, bool wait)
{
struct ucom_super_softc *ssc = sc->sc_super;
struct ucom_param_task *task;
+ int error;
UCOM_MTX_ASSERT(sc, MA_OWNED);
if (usb_proc_is_gone(&ssc->sc_tq)) {
DPRINTF("proc is gone\n");
- return; /* nothing to do */
+ return (ENXIO); /* nothing to do */
}
/*
* NOTE: The task cannot get executed before we drop the
@@ -628,8 +661,15 @@ ucom_queue_command(struct ucom_softc *sc,
/*
* Closing or opening the device should be synchronous.
*/
- if (fn == ucom_cfg_close || fn == ucom_cfg_open)
- usb_proc_mwait(&ssc->sc_tq, t0, t1);
+ if (wait) {
+ error = usb_proc_mwait_sig(&ssc->sc_tq, t0, t1);
+
+ /* usb_proc_mwait_sig may have dropped the tty lock. */
+ if (error == 0 && sc->sc_tty != NULL && tty_gone(sc->sc_tty))
+ error = ENXIO;
+ } else {
+ error = 0;
+ }
/*
* In case of multiple configure requests,
@@ -637,6 +677,8 @@ ucom_queue_command(struct ucom_softc *sc,
*/
if (fn == ucom_cfg_start_transfers)
sc->sc_last_start_xfer = &task->hdr;
+
+ return (error);
}
static void
@@ -760,9 +802,8 @@ ucom_open(struct tty *tp)
* example if the device is not present:
*/
error = (sc->sc_callback->ucom_pre_open) (sc);
- if (error) {
- return (error);
- }
+ if (error != 0)
+ goto out;
}
sc->sc_flag |= UCOM_FLAG_HL_READY;
@@ -782,14 +823,21 @@ ucom_open(struct tty *tp)
sc->sc_jitterbuf_in = 0;
sc->sc_jitterbuf_out = 0;
- ucom_queue_command(sc, ucom_cfg_open, NULL,
+ error = ucom_queue_command(sc, ucom_cfg_open, NULL,
&sc->sc_open_task[0].hdr,
- &sc->sc_open_task[1].hdr);
+ &sc->sc_open_task[1].hdr, true);
+ if (error != 0)
+ goto out;
- /* Queue transfer enable command last */
- ucom_queue_command(sc, ucom_cfg_start_transfers, NULL,
- &sc->sc_start_task[0].hdr,
- &sc->sc_start_task[1].hdr);
+ /*
+ * Queue transfer enable command last, we'll have a barrier later so we
+ * don't need to wait on this to complete specifically.
+ */
+ error = ucom_queue_command(sc, ucom_cfg_start_transfers, NULL,
+ &sc->sc_start_task[0].hdr,
+ &sc->sc_start_task[1].hdr, true);
+ if (error != 0)
+ goto out;
if (sc->sc_tty == NULL || (sc->sc_tty->t_termios.c_cflag & CNO_RTSDTR) == 0)
ucom_modem(tp, SER_DTR | SER_RTS, 0);
@@ -800,7 +848,9 @@ ucom_open(struct tty *tp)
ucom_status_change(sc);
- return (0);
+ error = ucom_command_barrier(sc);
+out:
+ return (error);
}
static void
@@ -836,9 +886,9 @@ ucom_close(struct tty *tp)
}
ucom_shutdown(sc);
- ucom_queue_command(sc, ucom_cfg_close, NULL,
+ (void)ucom_queue_command(sc, ucom_cfg_close, NULL,
&sc->sc_close_task[0].hdr,
- &sc->sc_close_task[1].hdr);
+ &sc->sc_close_task[1].hdr, true);
sc->sc_flag &= ~(UCOM_FLAG_HL_READY | UCOM_FLAG_RTS_IFLOW);
@@ -919,11 +969,15 @@ ucom_ioctl(struct tty *tp, u_long cmd, caddr_t data, struct thread *td)
#endif
case TIOCSBRK:
ucom_break(sc, 1);
- error = 0;
+ error = ucom_command_barrier(sc);
+ if (error == ENXIO)
+ error = ENODEV;
break;
case TIOCCBRK:
ucom_break(sc, 0);
- error = 0;
+ error = ucom_command_barrier(sc);
+ if (error == ENXIO)
+ error = ENODEV;
break;
default:
if (sc->sc_callback->ucom_ioctl) {
@@ -1077,10 +1131,13 @@ ucom_line_state(struct ucom_softc *sc,
sc->sc_pls_set |= set_bits;
sc->sc_pls_clr |= clear_bits;
- /* defer driver programming */
- ucom_queue_command(sc, ucom_cfg_line_state, NULL,
- &sc->sc_line_state_task[0].hdr,
- &sc->sc_line_state_task[1].hdr);
+ /*
+ * defer driver programming - we don't propagate any error from
+ * this call because we'll catch such errors further up the call stack.
+ */
+ (void)ucom_queue_command(sc, ucom_cfg_line_state, NULL,
+ &sc->sc_line_state_task[0].hdr,
+ &sc->sc_line_state_task[1].hdr, false);
}
static void
@@ -1236,9 +1293,9 @@ ucom_status_change(struct ucom_softc *sc)
}
DPRINTF("\n");
- ucom_queue_command(sc, ucom_cfg_status_change, NULL,
+ (void)ucom_queue_command(sc, ucom_cfg_status_change, NULL,
&sc->sc_status_task[0].hdr,
- &sc->sc_status_task[1].hdr);
+ &sc->sc_status_task[1].hdr, true);
}
static void
@@ -1310,14 +1367,18 @@ ucom_param(struct tty *tp, struct termios *t)
sc->sc_flag &= ~UCOM_FLAG_GP_DATA;
/* Queue baud rate programming command first */
- ucom_queue_command(sc, ucom_cfg_param, t,
+ error = ucom_queue_command(sc, ucom_cfg_param, t,
&sc->sc_param_task[0].hdr,
- &sc->sc_param_task[1].hdr);
+ &sc->sc_param_task[1].hdr, true);
+ if (error != 0)
+ goto done;
/* Queue transfer enable command last */
- ucom_queue_command(sc, ucom_cfg_start_transfers, NULL,
- &sc->sc_start_task[0].hdr,
- &sc->sc_start_task[1].hdr);
+ error = ucom_queue_command(sc, ucom_cfg_start_transfers, NULL,
+ &sc->sc_start_task[0].hdr,
+ &sc->sc_start_task[1].hdr, true);
+ if (error != 0)
+ goto done;
if (t->c_cflag & CRTS_IFLOW) {
sc->sc_flag |= UCOM_FLAG_RTS_IFLOW;
@@ -1325,6 +1386,8 @@ ucom_param(struct tty *tp, struct termios *t)
sc->sc_flag &= ~UCOM_FLAG_RTS_IFLOW;
ucom_modem(tp, SER_RTS, 0);
}
+
+ error = ucom_command_barrier(sc);
done:
if (error) {
if (opened) {
diff --git a/sys/dev/usb/serial/uslcom.c b/sys/dev/usb/serial/uslcom.c
index 1c27788f77ce..26b937d0b200 100644
--- a/sys/dev/usb/serial/uslcom.c
+++ b/sys/dev/usb/serial/uslcom.c
@@ -1,6 +1,5 @@
/* $OpenBSD: uslcom.c,v 1.17 2007/11/24 10:52:12 jsg Exp $ */
-#include <sys/cdefs.h>
/*
* Copyright (c) 2006 Jonathan Gray <jsg@openbsd.org>
*
diff --git a/sys/dev/usb/serial/uvscom.c b/sys/dev/usb/serial/uvscom.c
index ee34f0ad3f3d..b9add5c1b37b 100644
--- a/sys/dev/usb/serial/uvscom.c
+++ b/sys/dev/usb/serial/uvscom.c
@@ -1,6 +1,5 @@
/* $NetBSD: usb/uvscom.c,v 1.1 2002/03/19 15:08:42 augustss Exp $ */
-#include <sys/cdefs.h>
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
diff --git a/sys/dev/usb/storage/umass.c b/sys/dev/usb/storage/umass.c
index e0daada72bdb..cacf4ddf8f16 100644
--- a/sys/dev/usb/storage/umass.c
+++ b/sys/dev/usb/storage/umass.c
@@ -1,4 +1,3 @@
-#include <sys/cdefs.h>
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
@@ -300,7 +299,7 @@ typedef void (umass_callback_t)(struct umass_softc *sc, union ccb *ccb,
#define STATUS_CMD_FAILED 2 /* transfer was ok, command failed */
#define STATUS_WIRE_FAILED 3 /* couldn't even get command across */
-typedef uint8_t (umass_transform_t)(struct umass_softc *sc, uint8_t *cmd_ptr,
+typedef bool (umass_transform_t)(struct umass_softc *sc, uint8_t *cmd_ptr,
uint8_t cmd_len);
/* Wire and command protocol */
@@ -365,6 +364,26 @@ typedef uint8_t (umass_transform_t)(struct umass_softc *sc, uint8_t *cmd_ptr,
/* Device does not support 'PREVENT/ALLOW MEDIUM REMOVAL'. */
#define NO_PREVENT_ALLOW 0x8000
+#define UMASS_QUIRKS_STRING \
+ "\020" \
+ "\001NO_TEST_UNIT_READY" \
+ "\002RS_NO_CLEAR_UA" \
+ "\003NO_START_STOP" \
+ "\004FORCE_SHORT_INQUIRY" \
+ "\005SHUTTLE_INIT" \
+ "\006ALT_IFACE_1" \
+ "\007FLOPPY_SPEED" \
+ "\010IGNORE_RESIDUE" \
+ "\011NO_GETMAXLUN" \
+ "\012WRONG_CSWSIG" \
+ "\013NO_INQUIRY" \
+ "\014NO_INQUIRY_EVPD" \
+ "\015RBC_PAD_TO_12" \
+ "\016READ_CAPACITY_OFFBY1" \
+ "\017NO_SYNCHRONIZE_CACHE" \
+ "\020NO_PREVENT_ALLOW" \
+
+
struct umass_softc {
struct scsi_sense cam_scsi_sense;
struct scsi_test_unit_ready cam_scsi_test_unit_ready;
@@ -412,6 +431,7 @@ struct umass_softc {
uint8_t sc_maxlun; /* maximum LUN number, inclusive */
uint8_t sc_last_xfer_index;
uint8_t sc_status_try;
+ bool sc_sending_sense;
};
struct umass_probe_proto {
@@ -470,13 +490,13 @@ static void umass_cam_sense_cb(struct umass_softc *, union ccb *, uint32_t,
uint8_t);
static void umass_cam_quirk_cb(struct umass_softc *, union ccb *, uint32_t,
uint8_t);
-static uint8_t umass_scsi_transform(struct umass_softc *, uint8_t *, uint8_t);
-static uint8_t umass_rbc_transform(struct umass_softc *, uint8_t *, uint8_t);
-static uint8_t umass_ufi_transform(struct umass_softc *, uint8_t *, uint8_t);
-static uint8_t umass_atapi_transform(struct umass_softc *, uint8_t *,
- uint8_t);
-static uint8_t umass_no_transform(struct umass_softc *, uint8_t *, uint8_t);
-static uint8_t umass_std_transform(struct umass_softc *, union ccb *, uint8_t
+static void umass_cam_illegal_request(union ccb *ccb);
+static bool umass_scsi_transform(struct umass_softc *, uint8_t *, uint8_t);
+static bool umass_rbc_transform(struct umass_softc *, uint8_t *, uint8_t);
+static bool umass_ufi_transform(struct umass_softc *, uint8_t *, uint8_t);
+static bool umass_atapi_transform(struct umass_softc *, uint8_t *, uint8_t);
+static bool umass_no_transform(struct umass_softc *, uint8_t *, uint8_t);
+static bool umass_std_transform(struct umass_softc *, union ccb *, uint8_t
*, uint8_t);
#ifdef USB_DEBUG
@@ -956,7 +976,7 @@ umass_attach(device_t dev)
sc->sc_proto & UMASS_PROTO_WIRE);
}
- printf("; quirks = 0x%04x\n", sc->sc_quirks);
+ printf("; quirks = 0x%b\n", sc->sc_quirks, UMASS_QUIRKS_STRING);
#endif
if (sc->sc_quirks & ALT_IFACE_1) {
@@ -1994,16 +2014,20 @@ umass_t_cbi_status_callback(struct usb_xfer *xfer, usb_error_t error)
/*
* Section 3.4.3.1.3 specifies that the UFI command
* protocol returns an ASC and ASCQ in the interrupt
- * data block.
+ * data block. However, we might also be fetching the
+ * sense explicitly, where they are likely to be
+ * non-zero, in which case we should succeed.
*/
DPRINTF(sc, UDMASS_CBI, "UFI CCI, ASC = 0x%02x, "
"ASCQ = 0x%02x\n", sc->sbl.ufi.asc,
sc->sbl.ufi.ascq);
- status = (((sc->sbl.ufi.asc == 0) &&
- (sc->sbl.ufi.ascq == 0)) ?
- STATUS_CMD_OK : STATUS_CMD_FAILED);
+ if ((sc->sbl.ufi.asc == 0 && sc->sbl.ufi.ascq == 0) ||
+ sc->sc_transfer.cmd_data[0] == REQUEST_SENSE)
+ status = STATUS_CMD_OK;
+ else
+ status = STATUS_CMD_FAILED;
sc->sc_transfer.ccb = NULL;
@@ -2210,6 +2234,13 @@ umass_cam_action(struct cam_sim *sim, union ccb *ccb)
* command format needed by the specific command set
* and return the converted command in
* "sc->sc_transfer.cmd_data"
+ *
+ * For commands we know the device doesn't support, we
+ * either complete them with an illegal request, or fake
+ * the completion, based on what upper layers tolerate.
+ * Ideally, we'd let the periph drivers know and not
+ * fake things up, but some periphs fall short of the
+ * ideal.
*/
if (umass_std_transform(sc, ccb, cmd, ccb->csio.cdb_len)) {
if (sc->sc_transfer.cmd_data[0] == INQUIRY) {
@@ -2243,20 +2274,7 @@ umass_cam_action(struct cam_sim *sim, union ccb *ccb)
*/
if ((sc->sc_quirks & (NO_INQUIRY_EVPD | NO_INQUIRY)) &&
(sc->sc_transfer.cmd_data[1] & SI_EVPD)) {
- scsi_set_sense_data(&ccb->csio.sense_data,
- /*sense_format*/ SSD_TYPE_NONE,
- /*current_error*/ 1,
- /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
- /*asc*/ 0x24,
- /*ascq*/ 0x00,
- /*extra args*/ SSD_ELEM_NONE);
- ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
- ccb->ccb_h.status =
- CAM_SCSI_STATUS_ERROR |
- CAM_AUTOSNS_VALID |
- CAM_DEV_QFRZN;
- xpt_freeze_devq(ccb->ccb_h.path, 1);
- xpt_done(ccb);
+ umass_cam_illegal_request(ccb);
goto done;
}
/*
@@ -2283,9 +2301,7 @@ umass_cam_action(struct cam_sim *sim, union ccb *ccb)
}
} else if (sc->sc_transfer.cmd_data[0] == SYNCHRONIZE_CACHE) {
if (sc->sc_quirks & NO_SYNCHRONIZE_CACHE) {
- ccb->csio.scsi_status = SCSI_STATUS_OK;
- ccb->ccb_h.status = CAM_REQ_CMP;
- xpt_done(ccb);
+ umass_cam_illegal_request(ccb);
goto done;
}
} else if (sc->sc_transfer.cmd_data[0] == START_STOP_UNIT) {
@@ -2444,6 +2460,29 @@ umass_cam_poll(struct cam_sim *sim)
usbd_transfer_poll(sc->sc_xfer, UMASS_T_MAX);
}
+/* umass_cam_illegal_request
+ * Complete the command as an illegal command with invalid field
+ */
+
+static void
+umass_cam_illegal_request(union ccb *ccb)
+{
+ scsi_set_sense_data(&ccb->csio.sense_data,
+ /*sense_format*/ SSD_TYPE_NONE,
+ /*current_error*/ 1,
+ /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST,
+ /*asc*/ 0x24, /* 24h/00h INVALID FIELD IN CDB */
+ /*ascq*/ 0x00,
+ /*extra args*/ SSD_ELEM_NONE);
+ ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
+ ccb->ccb_h.status =
+ CAM_SCSI_STATUS_ERROR |
+ CAM_AUTOSNS_VALID |
+ CAM_DEV_QFRZN;
+ xpt_freeze_devq(ccb->ccb_h.path, 1);
+ xpt_done(ccb);
+}
+
/* umass_cam_cb
* finalise a completed CAM command
*/
@@ -2504,10 +2543,6 @@ umass_cam_cb(struct umass_softc *sc, union ccb *ccb, uint32_t residue,
if (umass_std_transform(sc, ccb, &sc->cam_scsi_sense.opcode,
sizeof(sc->cam_scsi_sense))) {
- if ((sc->sc_quirks & FORCE_SHORT_INQUIRY) &&
- (sc->sc_transfer.cmd_data[0] == INQUIRY)) {
- ccb->csio.sense_len = SHORT_INQUIRY_LENGTH;
- }
umass_command_start(sc, DIR_IN, &ccb->csio.sense_data.error_code,
ccb->csio.sense_len, ccb->ccb_h.timeout,
&umass_cam_sense_cb, ccb);
@@ -2595,11 +2630,14 @@ umass_cam_sense_cb(struct umass_softc *sc, union ccb *ccb, uint32_t residue,
DPRINTF(sc, UDMASS_SCSI, "Doing a sneaky"
"TEST_UNIT_READY\n");
- /* the rest of the command was filled in at attach */
-
- if ((sc->sc_transform)(sc,
+ /*
+ * Transform the TUR and if successful, send it. Pass
+ * NULL for the ccb so we don't override the above
+ * status.
+ */
+ if (umass_std_transform(sc, NULL,
&sc->cam_scsi_test_unit_ready.opcode,
- sizeof(sc->cam_scsi_test_unit_ready)) == 1) {
+ sizeof(sc->cam_scsi_test_unit_ready))) {
umass_command_start(sc, DIR_NONE, NULL, 0,
ccb->ccb_h.timeout,
&umass_cam_quirk_cb, ccb);
@@ -2646,56 +2684,18 @@ umass_cam_quirk_cb(struct umass_softc *sc, union ccb *ccb, uint32_t residue,
* SCSI specific functions
*/
-static uint8_t
+static bool
umass_scsi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
uint8_t cmd_len)
{
- if ((cmd_len == 0) ||
- (cmd_len > sizeof(sc->sc_transfer.cmd_data))) {
- DPRINTF(sc, UDMASS_SCSI, "Invalid command "
- "length: %d bytes\n", cmd_len);
- return (0); /* failure */
- }
sc->sc_transfer.cmd_len = cmd_len;
- switch (cmd_ptr[0]) {
- case TEST_UNIT_READY:
- if (sc->sc_quirks & NO_TEST_UNIT_READY) {
- DPRINTF(sc, UDMASS_SCSI, "Converted TEST_UNIT_READY "
- "to START_UNIT\n");
- memset(sc->sc_transfer.cmd_data, 0, cmd_len);
- sc->sc_transfer.cmd_data[0] = START_STOP_UNIT;
- sc->sc_transfer.cmd_data[4] = SSS_START;
- return (1);
- }
- break;
-
- case INQUIRY:
- /*
- * some drives wedge when asked for full inquiry
- * information.
- */
- if (sc->sc_quirks & FORCE_SHORT_INQUIRY) {
- memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
- sc->sc_transfer.cmd_data[4] = SHORT_INQUIRY_LENGTH;
- return (1);
- }
- break;
- }
-
- memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
- return (1);
+ return (true);
}
-static uint8_t
+static bool
umass_rbc_transform(struct umass_softc *sc, uint8_t *cmd_ptr, uint8_t cmd_len)
{
- if ((cmd_len == 0) ||
- (cmd_len > sizeof(sc->sc_transfer.cmd_data))) {
- DPRINTF(sc, UDMASS_SCSI, "Invalid command "
- "length: %d bytes\n", cmd_len);
- return (0); /* failure */
- }
switch (cmd_ptr[0]) {
/* these commands are defined in RBC: */
case READ_10:
@@ -2716,66 +2716,41 @@ umass_rbc_transform(struct umass_softc *sc, uint8_t *cmd_ptr, uint8_t cmd_len)
*/
case REQUEST_SENSE:
case PREVENT_ALLOW:
-
- memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
-
if ((sc->sc_quirks & RBC_PAD_TO_12) && (cmd_len < 12)) {
memset(sc->sc_transfer.cmd_data + cmd_len,
0, 12 - cmd_len);
cmd_len = 12;
}
sc->sc_transfer.cmd_len = cmd_len;
- return (1); /* success */
+ return (true); /* success */
/* All other commands are not legal in RBC */
default:
DPRINTF(sc, UDMASS_SCSI, "Unsupported RBC "
"command 0x%02x\n", cmd_ptr[0]);
- return (0); /* failure */
+ return (false); /* failure */
}
}
-static uint8_t
+static bool
umass_ufi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
uint8_t cmd_len)
{
- if ((cmd_len == 0) ||
- (cmd_len > sizeof(sc->sc_transfer.cmd_data))) {
- DPRINTF(sc, UDMASS_SCSI, "Invalid command "
- "length: %d bytes\n", cmd_len);
- return (0); /* failure */
- }
/* An UFI command is always 12 bytes in length */
sc->sc_transfer.cmd_len = UFI_COMMAND_LENGTH;
- /* Zero the command data */
- memset(sc->sc_transfer.cmd_data, 0, UFI_COMMAND_LENGTH);
-
switch (cmd_ptr[0]) {
/*
* Commands of which the format has been verified. They
* should work. Copy the command into the (zeroed out)
* destination buffer.
*/
- case TEST_UNIT_READY:
- if (sc->sc_quirks & NO_TEST_UNIT_READY) {
- /*
- * Some devices do not support this command. Start
- * Stop Unit should give the same results
- */
- DPRINTF(sc, UDMASS_UFI, "Converted TEST_UNIT_READY "
- "to START_UNIT\n");
-
- sc->sc_transfer.cmd_data[0] = START_STOP_UNIT;
- sc->sc_transfer.cmd_data[4] = SSS_START;
- return (1);
- }
- break;
case REZERO_UNIT:
case REQUEST_SENSE:
case FORMAT_UNIT:
case INQUIRY:
+ case TEST_UNIT_READY:
case START_STOP_UNIT:
case SEND_DIAGNOSTIC:
case PREVENT_ALLOW:
@@ -2794,73 +2769,41 @@ umass_ufi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
/*
* SYNCHRONIZE_CACHE isn't supported by UFI, nor should it be
- * required for UFI devices, so it is appropriate to fake
- * success.
+ * required for UFI devices. Just fail it, the upper layers
+ * know what to do.
*/
case SYNCHRONIZE_CACHE:
- return (2);
-
+ return (false);
default:
DPRINTF(sc, UDMASS_SCSI, "Unsupported UFI "
"command 0x%02x\n", cmd_ptr[0]);
- return (0); /* failure */
+ return (false); /* failure */
}
- memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
- return (1); /* success */
+ return (true); /* success */
}
/*
* 8070i (ATAPI) specific functions
*/
-static uint8_t
+static bool
umass_atapi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
uint8_t cmd_len)
{
- if ((cmd_len == 0) ||
- (cmd_len > sizeof(sc->sc_transfer.cmd_data))) {
- DPRINTF(sc, UDMASS_SCSI, "Invalid command "
- "length: %d bytes\n", cmd_len);
- return (0); /* failure */
- }
/* An ATAPI command is always 12 bytes in length. */
sc->sc_transfer.cmd_len = ATAPI_COMMAND_LENGTH;
- /* Zero the command data */
- memset(sc->sc_transfer.cmd_data, 0, ATAPI_COMMAND_LENGTH);
-
switch (cmd_ptr[0]) {
/*
* Commands of which the format has been verified. They
* should work. Copy the command into the destination
* buffer.
*/
- case INQUIRY:
- /*
- * some drives wedge when asked for full inquiry
- * information.
- */
- if (sc->sc_quirks & FORCE_SHORT_INQUIRY) {
- memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
-
- sc->sc_transfer.cmd_data[4] = SHORT_INQUIRY_LENGTH;
- return (1);
- }
- break;
-
- case TEST_UNIT_READY:
- if (sc->sc_quirks & NO_TEST_UNIT_READY) {
- DPRINTF(sc, UDMASS_SCSI, "Converted TEST_UNIT_READY "
- "to START_UNIT\n");
- sc->sc_transfer.cmd_data[0] = START_STOP_UNIT;
- sc->sc_transfer.cmd_data[4] = SSS_START;
- return (1);
- }
- break;
-
case REZERO_UNIT:
case REQUEST_SENSE:
+ case INQUIRY:
case START_STOP_UNIT:
+ case TEST_UNIT_READY:
case SEND_DIAGNOSTIC:
case PREVENT_ALLOW:
case READ_CAPACITY:
@@ -2902,37 +2845,62 @@ umass_atapi_transform(struct umass_softc *sc, uint8_t *cmd_ptr,
break;
}
- memcpy(sc->sc_transfer.cmd_data, cmd_ptr, cmd_len);
- return (1); /* success */
+ return (true); /* success */
}
-static uint8_t
+static bool
umass_no_transform(struct umass_softc *sc, uint8_t *cmd,
uint8_t cmdlen)
{
- return (0); /* failure */
+ return (false); /* failure */
}
-static uint8_t
+static bool
umass_std_transform(struct umass_softc *sc, union ccb *ccb,
- uint8_t *cmd, uint8_t cmdlen)
+ uint8_t *cmd, uint8_t cmd_len)
{
- uint8_t retval;
+ if (cmd_len == 0 || cmd_len > sizeof(sc->sc_transfer.cmd_data)) {
+ DPRINTF(sc, UDMASS_SCSI, "Invalid command length: %d bytes\n",
+ cmd_len);
+ return (false); /* failure */
+ }
- retval = (sc->sc_transform) (sc, cmd, cmdlen);
+ /*
+ * Copy the CDB to the cmd_data buffer and then apply the common quirks
+ * to the code. We then pass the transformed command down to allow
+ * further transforms.
+ */
+ memset(sc->sc_transfer.cmd_data, 0, sizeof(sc->sc_transfer.cmd_data));
+ memcpy(sc->sc_transfer.cmd_data, cmd, cmd_len);
+ switch (cmd[0]) {
+ case TEST_UNIT_READY:
+ /*
+ * Some drive choke on TEST UNIT READY. Convert it to START STOP
+ * UNIT to get similar status.
+ */
+ if ((sc->sc_quirks & NO_TEST_UNIT_READY) != 0) {
+ DPRINTF(sc, UDMASS_SCSI,
+ "Converted TEST_UNIT_READY to START_UNIT\n");
+ memset(sc->sc_transfer.cmd_data, 0, cmd_len);
+ sc->sc_transfer.cmd_data[0] = START_STOP_UNIT;
+ sc->sc_transfer.cmd_data[4] = SSS_START;
+ }
+ break;
- if (retval == 2) {
- ccb->ccb_h.status = CAM_REQ_CMP;
- xpt_done(ccb);
- return (0);
- } else if (retval == 0) {
- xpt_freeze_devq(ccb->ccb_h.path, 1);
- ccb->ccb_h.status = CAM_REQ_INVALID | CAM_DEV_QFRZN;
- xpt_done(ccb);
- return (0);
+ case INQUIRY:
+ /*
+ * some drives wedge when asked for full inquiry
+ * information.
+ */
+ if ((sc->sc_quirks & FORCE_SHORT_INQUIRY) != 0)
+ sc->sc_transfer.cmd_data[4] = SHORT_INQUIRY_LENGTH;
+ break;
}
- /* Command should be executed */
- return (1);
+ if (sc->sc_transform(sc, cmd, cmd_len))
+ return (true); /* Execute command */
+ if (ccb)
+ umass_cam_illegal_request(ccb);
+ return (false); /* Already failed -- don't submit */
}
#ifdef USB_DEBUG
diff --git a/sys/dev/usb/storage/urio.c b/sys/dev/usb/storage/urio.c
index e733ae4dfcba..c13aab44e435 100644
--- a/sys/dev/usb/storage/urio.c
+++ b/sys/dev/usb/storage/urio.c
@@ -30,7 +30,6 @@
* its contributors.
*/
-#include <sys/cdefs.h>
/*
* 2000/3/24 added NetBSD/OpenBSD support (from Alex Nemirovsky)
* 2000/3/07 use two bulk-pipe handles for read and write (Dirk)
diff --git a/sys/dev/usb/template/usb_template_multi.c b/sys/dev/usb/template/usb_template_multi.c
index 2533459be799..be36e5ea70df 100644
--- a/sys/dev/usb/template/usb_template_multi.c
+++ b/sys/dev/usb/template/usb_template_multi.c
@@ -37,7 +37,6 @@
* USB template for CDC ACM (serial), CDC ECM (network), and CDC MSC (storage).
*/
-#include <sys/cdefs.h>
#ifdef USB_GLOBAL_INCLUDE_FILE
#include USB_GLOBAL_INCLUDE_FILE
#else
diff --git a/sys/dev/usb/template/usb_template_serialnet.c b/sys/dev/usb/template/usb_template_serialnet.c
index adcc561ccd21..6ee43f7f1f28 100644
--- a/sys/dev/usb/template/usb_template_serialnet.c
+++ b/sys/dev/usb/template/usb_template_serialnet.c
@@ -37,7 +37,6 @@
* This file contains the USB template for USB Networking and Serial
*/
-#include <sys/cdefs.h>
#ifdef USB_GLOBAL_INCLUDE_FILE
#include USB_GLOBAL_INCLUDE_FILE
#else
diff --git a/sys/dev/usb/usb.h b/sys/dev/usb/usb.h
index 3d00cda27d18..a6c3c8030c73 100644
--- a/sys/dev/usb/usb.h
+++ b/sys/dev/usb/usb.h
@@ -114,7 +114,7 @@ MALLOC_DECLARE(M_USBDEV);
/* Allow for marginal and non-conforming devices. */
#define USB_PORT_RESET_DELAY 50 /* ms */
#define USB_PORT_ROOT_RESET_DELAY 200 /* ms */
-#define USB_PORT_RESET_RECOVERY 10 /* ms */
+#define USB_PORT_RESET_RECOVERY 20 /* ms */
#define USB_PORT_POWERUP_DELAY 300 /* ms */
#define USB_PORT_RESUME_DELAY (20*2) /* ms */
#define USB_SET_ADDRESS_SETTLE 10 /* ms */
diff --git a/sys/dev/usb/usb_bus.h b/sys/dev/usb/usb_bus.h
index b9197d3cd84a..ad7b661c2ac0 100644
--- a/sys/dev/usb/usb_bus.h
+++ b/sys/dev/usb/usb_bus.h
@@ -86,7 +86,7 @@ struct usb_bus {
struct usb_bus_msg shutdown_msg[2];
#if USB_HAVE_UGEN
struct usb_bus_msg cleanup_msg[2];
- LIST_HEAD(,usb_fs_privdata) pd_cleanup_list;
+ SLIST_HEAD(,usb_fs_privdata) pd_cleanup_list;
#endif
/*
* This mutex protects the USB hardware:
diff --git a/sys/dev/usb/usb_dev.c b/sys/dev/usb/usb_dev.c
index c58c3b5f64d5..293b0c72587f 100644
--- a/sys/dev/usb/usb_dev.c
+++ b/sys/dev/usb/usb_dev.c
@@ -80,8 +80,7 @@
#include <sys/filio.h>
#include <sys/ttycom.h>
#include <sys/syscallsubr.h>
-
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#endif /* USB_GLOBAL_INCLUDE_FILE */
#if USB_HAVE_UGEN
@@ -1228,13 +1227,13 @@ usb_filter_read(struct knote *kn, long hint)
return (m ? 1 : 0);
}
-static struct filterops usb_filtops_write = {
+static const struct filterops usb_filtops_write = {
.f_isfd = 1,
.f_detach = usb_filter_detach,
.f_event = usb_filter_write,
};
-static struct filterops usb_filtops_read = {
+static const struct filterops usb_filtops_read = {
.f_isfd = 1,
.f_detach = usb_filter_detach,
.f_event = usb_filter_read,
diff --git a/sys/dev/usb/usb_dev.h b/sys/dev/usb/usb_dev.h
index 08ceb2555942..381fa1654c95 100644
--- a/sys/dev/usb/usb_dev.h
+++ b/sys/dev/usb/usb_dev.h
@@ -96,7 +96,7 @@ struct usb_fs_privdata {
int fifo_index;
struct cdev *cdev;
- LIST_ENTRY(usb_fs_privdata) pd_next;
+ SLIST_ENTRY(usb_fs_privdata) pd_next;
};
/*
diff --git a/sys/dev/usb/usb_device.c b/sys/dev/usb/usb_device.c
index c4fe3b4a6ab6..f0989972f49f 100644
--- a/sys/dev/usb/usb_device.c
+++ b/sys/dev/usb/usb_device.c
@@ -855,6 +855,7 @@ usb_config_parse(struct usb_device *udev, uint8_t iface_index, uint8_t cmd)
if (ep->refcount_alloc != 0)
return (USB_ERR_IN_USE);
}
+ ep++;
}
}
@@ -1359,7 +1360,7 @@ usb_probe_and_attach_sub(struct usb_device *udev,
}
if (uaa->temp_dev == NULL) {
/* create a new child */
- uaa->temp_dev = device_add_child(udev->parent_dev, NULL, -1);
+ uaa->temp_dev = device_add_child(udev->parent_dev, NULL, DEVICE_UNIT_ANY);
if (uaa->temp_dev == NULL) {
device_printf(udev->parent_dev,
"Device creation failed\n");
@@ -1880,7 +1881,7 @@ usb_alloc_device(device_t parent_dev, struct usb_bus *bus,
snprintf(udev->ugen_name, sizeof(udev->ugen_name),
USB_GENERIC_NAME "%u.%u", device_get_unit(bus->bdev),
device_index);
- LIST_INIT(&udev->pd_list);
+ SLIST_INIT(&udev->pd_list);
/* Create the control endpoint device */
udev->ctrl_dev = usb_make_dev(udev, NULL, 0, 0,
@@ -2060,14 +2061,15 @@ repeat_set_config:
}
#endif
}
-#if USB_HAVE_MSCTEST
+#if USB_HAVE_MSCTEST_AUTOQUIRK
if (set_config_failed == 0 && config_index == 0 &&
usb_test_quirk(&uaa, UQ_MSC_NO_START_STOP) == 0 &&
usb_test_quirk(&uaa, UQ_MSC_NO_PREVENT_ALLOW) == 0 &&
usb_test_quirk(&uaa, UQ_MSC_NO_SYNC_CACHE) == 0 &&
usb_test_quirk(&uaa, UQ_MSC_NO_TEST_UNIT_READY) == 0 &&
usb_test_quirk(&uaa, UQ_MSC_NO_GETMAXLUN) == 0 &&
- usb_test_quirk(&uaa, UQ_MSC_NO_INQUIRY) == 0) {
+ usb_test_quirk(&uaa, UQ_MSC_NO_INQUIRY) == 0 &&
+ usb_test_quirk(&uaa, UQ_MSC_IGNORE) == 0) {
/*
* Try to figure out if there are any MSC quirks we
* should apply automatically:
@@ -2190,7 +2192,7 @@ usb_destroy_dev(struct usb_fs_privdata *pd)
delist_dev(pd->cdev);
USB_BUS_LOCK(bus);
- LIST_INSERT_HEAD(&bus->pd_cleanup_list, pd, pd_next);
+ SLIST_INSERT_HEAD(&bus->pd_cleanup_list, pd, pd_next);
/* get cleanup going */
usb_proc_msignal(USB_BUS_EXPLORE_PROC(bus),
&bus->cleanup_msg[0], &bus->cleanup_msg[1]);
@@ -2207,7 +2209,7 @@ usb_cdev_create(struct usb_device *udev)
int inmode, outmode, inmask, outmask, mode;
uint8_t ep;
- KASSERT(LIST_FIRST(&udev->pd_list) == NULL, ("stale cdev entries"));
+ KASSERT(SLIST_FIRST(&udev->pd_list) == NULL, ("stale cdev entries"));
DPRINTFN(2, "Creating device nodes\n");
@@ -2254,7 +2256,7 @@ usb_cdev_create(struct usb_device *udev)
mode, UID_ROOT, GID_OPERATOR, 0600);
if (pd != NULL)
- LIST_INSERT_HEAD(&udev->pd_list, pd, pd_next);
+ SLIST_INSERT_HEAD(&udev->pd_list, pd, pd_next);
}
}
@@ -2265,10 +2267,10 @@ usb_cdev_free(struct usb_device *udev)
DPRINTFN(2, "Freeing device nodes\n");
- while ((pd = LIST_FIRST(&udev->pd_list)) != NULL) {
+ while ((pd = SLIST_FIRST(&udev->pd_list)) != NULL) {
KASSERT(pd->cdev->si_drv1 == pd, ("privdata corrupt"));
- LIST_REMOVE(pd, pd_next);
+ SLIST_REMOVE(&udev->pd_list, pd, usb_fs_privdata, pd_next);
usb_destroy_dev(pd);
}
@@ -2358,7 +2360,7 @@ usb_free_device(struct usb_device *udev, uint8_t flag)
mtx_destroy(&udev->device_mtx);
#if USB_HAVE_UGEN
- KASSERT(LIST_FIRST(&udev->pd_list) == NULL, ("leaked cdev entries"));
+ KASSERT(SLIST_FIRST(&udev->pd_list) == NULL, ("leaked cdev entries"));
#endif
/* Uninitialise device */
@@ -3109,3 +3111,51 @@ usbd_get_endpoint_mode(struct usb_device *udev, struct usb_endpoint *ep)
{
return (ep->ep_mode);
}
+
+/*------------------------------------------------------------------------*
+ * usbd_fill_deviceinfo
+ *
+ * This function dumps information about an USB device to the
+ * structure pointed to by the "di" argument.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+int
+usbd_fill_deviceinfo(struct usb_device *udev, struct usb_device_info *di)
+{
+ struct usb_device *hub;
+
+ bzero(di, sizeof(di[0]));
+
+ di->udi_bus = device_get_unit(udev->bus->bdev);
+ di->udi_addr = udev->address;
+ di->udi_index = udev->device_index;
+ strlcpy(di->udi_serial, usb_get_serial(udev), sizeof(di->udi_serial));
+ strlcpy(di->udi_vendor, usb_get_manufacturer(udev), sizeof(di->udi_vendor));
+ strlcpy(di->udi_product, usb_get_product(udev), sizeof(di->udi_product));
+ usb_printbcd(di->udi_release, sizeof(di->udi_release),
+ UGETW(udev->ddesc.bcdDevice));
+ di->udi_vendorNo = UGETW(udev->ddesc.idVendor);
+ di->udi_productNo = UGETW(udev->ddesc.idProduct);
+ di->udi_releaseNo = UGETW(udev->ddesc.bcdDevice);
+ di->udi_class = udev->ddesc.bDeviceClass;
+ di->udi_subclass = udev->ddesc.bDeviceSubClass;
+ di->udi_protocol = udev->ddesc.bDeviceProtocol;
+ di->udi_config_no = udev->curr_config_no;
+ di->udi_config_index = udev->curr_config_index;
+ di->udi_power = udev->flags.self_powered ? 0 : udev->power;
+ di->udi_speed = udev->speed;
+ di->udi_mode = udev->flags.usb_mode;
+ di->udi_power_mode = udev->power_mode;
+ di->udi_suspended = udev->flags.peer_suspended;
+
+ hub = udev->parent_hub;
+ if (hub) {
+ di->udi_hubaddr = hub->address;
+ di->udi_hubindex = hub->device_index;
+ di->udi_hubport = udev->port_no;
+ }
+ return (0);
+}
diff --git a/sys/dev/usb/usb_device.h b/sys/dev/usb/usb_device.h
index 3fc5efb049fc..87596cc1d2bd 100644
--- a/sys/dev/usb/usb_device.h
+++ b/sys/dev/usb/usb_device.h
@@ -225,7 +225,7 @@ struct usb_device {
struct usb_fifo *fifo[USB_FIFO_MAX];
struct usb_symlink *ugen_symlink; /* our generic symlink */
struct usb_fs_privdata *ctrl_dev; /* Control Endpoint 0 device node */
- LIST_HEAD(,usb_fs_privdata) pd_list;
+ SLIST_HEAD(,usb_fs_privdata) pd_list;
char ugen_name[20]; /* name of ugenX.X device */
#endif
usb_ticks_t plugtime; /* copy of "ticks" */
diff --git a/sys/dev/usb/usb_freebsd.h b/sys/dev/usb/usb_freebsd.h
index d67e230e72dd..02ae6b245134 100644
--- a/sys/dev/usb/usb_freebsd.h
+++ b/sys/dev/usb/usb_freebsd.h
@@ -42,6 +42,7 @@
#define USB_HAVE_TT_SUPPORT 1
#define USB_HAVE_POWERD 1
#define USB_HAVE_MSCTEST 1
+#define USB_HAVE_MSCTEST_AUTOQUIRK 0
#define USB_HAVE_MSCTEST_DETACH 1
#define USB_HAVE_PF 1
#define USB_HAVE_ROOT_MOUNT_HOLD 1
diff --git a/sys/dev/usb/usb_freebsd_loader.h b/sys/dev/usb/usb_freebsd_loader.h
index 404456f76152..edc6bcc9720c 100644
--- a/sys/dev/usb/usb_freebsd_loader.h
+++ b/sys/dev/usb/usb_freebsd_loader.h
@@ -42,6 +42,7 @@
#define USB_HAVE_TT_SUPPORT 1
#define USB_HAVE_POWERD 1
#define USB_HAVE_MSCTEST 1
+#define USB_HAVE_MSCTEST_AUTOQUIRK 0
#define USB_HAVE_MSCTEST_DETACH 0
#define USB_HAVE_PF 0
#define USB_HAVE_ROOT_MOUNT_HOLD 0
diff --git a/sys/dev/usb/usb_generic.c b/sys/dev/usb/usb_generic.c
index c0af27d77e5d..ccb0b2184ec4 100644
--- a/sys/dev/usb/usb_generic.c
+++ b/sys/dev/usb/usb_generic.c
@@ -831,42 +831,7 @@ ugen_get_iface_driver(struct usb_fifo *f, struct usb_gen_descriptor *ugd)
int
ugen_fill_deviceinfo(struct usb_fifo *f, struct usb_device_info *di)
{
- struct usb_device *udev;
- struct usb_device *hub;
-
- udev = f->udev;
-
- bzero(di, sizeof(di[0]));
-
- di->udi_bus = device_get_unit(udev->bus->bdev);
- di->udi_addr = udev->address;
- di->udi_index = udev->device_index;
- strlcpy(di->udi_serial, usb_get_serial(udev), sizeof(di->udi_serial));
- strlcpy(di->udi_vendor, usb_get_manufacturer(udev), sizeof(di->udi_vendor));
- strlcpy(di->udi_product, usb_get_product(udev), sizeof(di->udi_product));
- usb_printbcd(di->udi_release, sizeof(di->udi_release),
- UGETW(udev->ddesc.bcdDevice));
- di->udi_vendorNo = UGETW(udev->ddesc.idVendor);
- di->udi_productNo = UGETW(udev->ddesc.idProduct);
- di->udi_releaseNo = UGETW(udev->ddesc.bcdDevice);
- di->udi_class = udev->ddesc.bDeviceClass;
- di->udi_subclass = udev->ddesc.bDeviceSubClass;
- di->udi_protocol = udev->ddesc.bDeviceProtocol;
- di->udi_config_no = udev->curr_config_no;
- di->udi_config_index = udev->curr_config_index;
- di->udi_power = udev->flags.self_powered ? 0 : udev->power;
- di->udi_speed = udev->speed;
- di->udi_mode = udev->flags.usb_mode;
- di->udi_power_mode = udev->power_mode;
- di->udi_suspended = udev->flags.peer_suspended;
-
- hub = udev->parent_hub;
- if (hub) {
- di->udi_hubaddr = hub->address;
- di->udi_hubindex = hub->device_index;
- di->udi_hubport = udev->port_no;
- }
- return (0);
+ return (usbd_fill_deviceinfo(f->udev, di));
}
int
diff --git a/sys/dev/usb/usb_hub.c b/sys/dev/usb/usb_hub.c
index e3509862ef54..ee9d8ab0c9bb 100644
--- a/sys/dev/usb/usb_hub.c
+++ b/sys/dev/usb/usb_hub.c
@@ -954,7 +954,8 @@ done:
* packet. This function is called having the "bus_mtx" locked.
*------------------------------------------------------------------------*/
void
-uhub_root_intr(struct usb_bus *bus, const uint8_t *ptr, uint8_t len)
+uhub_root_intr(struct usb_bus *bus,
+ const uint8_t *ptr __unused, uint8_t len __unused)
{
USB_BUS_LOCK_ASSERT(bus, MA_OWNED);
diff --git a/sys/dev/usb/usb_ioctl.h b/sys/dev/usb/usb_ioctl.h
index 6d9184723816..85979b9cf778 100644
--- a/sys/dev/usb/usb_ioctl.h
+++ b/sys/dev/usb/usb_ioctl.h
@@ -239,7 +239,7 @@ struct usb_gen_quirk {
#define USB_DEVICESTATS _IOR ('U', 5, struct usb_device_stats)
#define USB_DEVICEENUMERATE _IOW ('U', 6, int)
-/* Generic HID device. Numbers 26 and 30-39 are occupied by hidraw. */
+/* Generic HID device. Numbers 26 and 30-49 are occupied by hidraw. */
#define USB_GET_REPORT_DESC _IOWR('U', 21, struct usb_gen_descriptor)
#define USB_SET_IMMED _IOW ('U', 22, int)
#define USB_GET_REPORT _IOWR('U', 23, struct usb_gen_descriptor)
diff --git a/sys/dev/usb/usb_msctest.c b/sys/dev/usb/usb_msctest.c
index d31baabcf875..7b31d9dadfab 100644
--- a/sys/dev/usb/usb_msctest.c
+++ b/sys/dev/usb/usb_msctest.c
@@ -741,11 +741,10 @@ usb_iface_is_cdrom(struct usb_device *udev, uint8_t iface_index)
return (is_cdrom);
}
-static uint8_t
+static int
usb_msc_get_max_lun(struct usb_device *udev, uint8_t iface_index)
{
struct usb_device_request req;
- usb_error_t err;
uint8_t buf = 0;
/* The Get Max Lun command is a class-specific request. */
@@ -756,11 +755,7 @@ usb_msc_get_max_lun(struct usb_device *udev, uint8_t iface_index)
req.wIndex[1] = 0;
USETW(req.wLength, 1);
- err = usbd_do_request(udev, NULL, &req, &buf);
- if (err)
- buf = 0;
-
- return (buf);
+ return usbd_do_request(udev, NULL, &req, &buf);
}
#define USB_ADD_QUIRK(udev, any, which) do { \
@@ -803,8 +798,8 @@ usb_msc_auto_quirk(struct usb_device *udev, uint8_t iface_index,
usb_pause_mtx(NULL, hz);
if (usb_test_quirk(uaa, UQ_MSC_NO_GETMAXLUN) == 0 &&
- usb_msc_get_max_lun(udev, iface_index) == 0) {
- DPRINTF("Device has only got one LUN.\n");
+ usb_msc_get_max_lun(udev, iface_index) != 0) {
+ DPRINTF("Device can't handle GETMAXLUN\n");
USB_ADD_QUIRK(udev, any_quirk, UQ_MSC_NO_GETMAXLUN);
}
diff --git a/sys/dev/usb/usb_pf.c b/sys/dev/usb/usb_pf.c
index 4da59419a7c6..0e7a75d04d6a 100644
--- a/sys/dev/usb/usb_pf.c
+++ b/sys/dev/usb/usb_pf.c
@@ -195,12 +195,6 @@ usbpf_clone_create(struct if_clone *ifc, char *name, size_t len,
return (error);
}
ifp = ubus->ifp = if_alloc(IFT_USB);
- if (ifp == NULL) {
- ifc_free_unit(ifc, unit);
- device_printf(ubus->parent, "usbpf: Could not allocate "
- "instance\n");
- return (ENOSPC);
- }
if_setsoftc(ifp, ubus);
if_initname(ifp, usbusname, unit);
if_setname(ifp, name);
diff --git a/sys/dev/usb/usb_process.c b/sys/dev/usb/usb_process.c
index d88de92336f2..4507c999f50a 100644
--- a/sys/dev/usb/usb_process.c
+++ b/sys/dev/usb/usb_process.c
@@ -361,25 +361,21 @@ usb_proc_is_gone(struct usb_process *up)
return (0);
}
-/*------------------------------------------------------------------------*
- * usb_proc_mwait
- *
- * This function will return when the USB process message pointed to
- * by "pm" is no longer on a queue. This function must be called
- * having "up->up_mtx" locked.
- *------------------------------------------------------------------------*/
-void
-usb_proc_mwait(struct usb_process *up, void *_pm0, void *_pm1)
+static int
+usb_proc_mwait_impl(struct usb_process *up, void *_pm0, void *_pm1,
+ bool interruptible)
{
struct usb_proc_msg *pm0 = _pm0;
struct usb_proc_msg *pm1 = _pm1;
+ int error;
/* check if gone */
if (up->up_gone)
- return;
+ return (ENXIO);
USB_MTX_ASSERT(up->up_mtx, MA_OWNED);
+ error = 0;
if (up->up_curtd == curthread) {
/* Just remove the messages from the queue. */
if (pm0->pm_qentry.tqe_prev) {
@@ -391,14 +387,59 @@ usb_proc_mwait(struct usb_process *up, void *_pm0, void *_pm1)
pm1->pm_qentry.tqe_prev = NULL;
}
} else
- while (pm0->pm_qentry.tqe_prev ||
- pm1->pm_qentry.tqe_prev) {
+ while (error == 0 && (pm0->pm_qentry.tqe_prev ||
+ pm1->pm_qentry.tqe_prev)) {
/* check if config thread is gone */
if (up->up_gone)
- break;
+ return (ENXIO);
up->up_dsleep = 1;
- cv_wait(&up->up_drain, up->up_mtx);
+ if (interruptible) {
+ error = cv_wait_sig(&up->up_drain, up->up_mtx);
+
+ /*
+ * The fact that we were interrupted doesn't
+ * matter if our goal was accomplished anyways.
+ */
+ if (error != 0 && !USB_PROC_MSG_ENQUEUED(pm0) &&
+ !USB_PROC_MSG_ENQUEUED(pm1))
+ error = 0;
+ } else {
+ cv_wait(&up->up_drain, up->up_mtx);
+ }
}
+
+ if (error == ERESTART)
+ error = EINTR;
+ return (error);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_proc_mwait
+ *
+ * This function will return when the USB process message pointed to
+ * by "pm" is no longer on a queue. This function must be called
+ * having "up->up_mtx" locked.
+ *------------------------------------------------------------------------*/
+void
+usb_proc_mwait(struct usb_process *up, void *_pm0, void *_pm1)
+{
+
+ (void)usb_proc_mwait_impl(up, _pm0, _pm1, false);
+}
+
+/*------------------------------------------------------------------------*
+ * usb_proc_mwait_sig
+ *
+ * This function will return when the USB process message pointed to
+ * by "pm" is no longer on a queue. This function must be called
+ * having "up->up_mtx" locked. This version of usb_proc_mwait is
+ * interruptible.
+ *------------------------------------------------------------------------*/
+int
+usb_proc_mwait_sig(struct usb_process *up, void *_pm0, void *_pm1)
+{
+
+ return (usb_proc_mwait_impl(up, _pm0, _pm1, true));
}
/*------------------------------------------------------------------------*
diff --git a/sys/dev/usb/usb_process.h b/sys/dev/usb/usb_process.h
index 6a8ac0acda33..1962bdb8b607 100644
--- a/sys/dev/usb/usb_process.h
+++ b/sys/dev/usb/usb_process.h
@@ -31,7 +31,6 @@
#ifndef USB_GLOBAL_INCLUDE_FILE
#include <sys/interrupt.h>
#include <sys/priority.h>
-#include <sys/runq.h>
#endif
/* defines */
@@ -76,6 +75,7 @@ int usb_proc_create(struct usb_process *up, struct mtx *p_mtx,
const char *pmesg, uint8_t prio);
void usb_proc_drain(struct usb_process *up);
void usb_proc_mwait(struct usb_process *up, void *pm0, void *pm1);
+int usb_proc_mwait_sig(struct usb_process *up, void *pm0, void *pm1);
void usb_proc_free(struct usb_process *up);
void *usb_proc_msignal(struct usb_process *up, void *pm0, void *pm1);
void usb_proc_rewakeup(struct usb_process *up);
diff --git a/sys/dev/usb/usbdevs b/sys/dev/usb/usbdevs
index 65dcb06c2f3f..2318e6bd0017 100644
--- a/sys/dev/usb/usbdevs
+++ b/sys/dev/usb/usbdevs
@@ -779,6 +779,7 @@ vendor TRIPPLITE 0x2478 Tripp-Lite
vendor NORELSYS 0x2537 NOREL Systems Ltd.
vendor TENDA2 0x2604 Tenda
vendor HIROSE 0x2631 Hirose Electric
+vendor XIAOMI 0x2717 Xiaomi
vendor NHJ 0x2770 NHJ
vendor THINGM 0x27b8 ThingM
vendor PERASO 0x2932 Peraso Technologies, Inc.
@@ -1047,7 +1048,6 @@ product ALCATEL OT535 0x02df One Touch 535/735
/* Alcor Micro, Inc. products */
product ALCOR2 KBD_HUB 0x2802 Kbd Hub
-product ALCOR DUMMY 0x0000 Dummy product
product ALCOR SDCR_6335 0x6335 SD/MMC Card Reader
product ALCOR SDCR_6362 0x6362 SD/MMC Card Reader
product ALCOR SDCR_6366 0x6366 SD/MMC Card Reader
@@ -1064,6 +1064,7 @@ product ALCOR AU6390 0x6390 AU6390 USB-IDE converter
product ALINK DWM652U5 0xce16 DWM-652
product ALINK 3G 0x9000 3G modem
product ALINK SIM7600E 0x9001 LTE modem
+product ALINK SIM7600G 0x9011 LTE modem
product ALINK 3GU 0x9200 3G modem
/* Altec Lansing products */
@@ -1137,14 +1138,41 @@ product ANYDATA ADU_500A 0x6502 CDMA 2000 EV-DO USB Modem
product AOX USB101 0x0008 Ethernet
/* Apple Computer products */
-product APPLE DUMMY 0x0000 Dummy product
product APPLE IMAC_KBD 0x0201 USB iMac Keyboard
product APPLE KBD 0x0202 USB Keyboard M2452
product APPLE EXT_KBD 0x020c Apple Extended USB Keyboard
+/* PowerBooks Feb 2005, iBooks G4 */
+product APPLE FOUNTAIN_ANSI 0x020e Apple Internal Keyboard/Trackpad
+product APPLE FOUNTAIN_ISO 0x020f Apple Internal Keyboard/Trackpad
+/* 17 inch PowerBook */
+product APPLE GEYSER_17 0x020d Apple Internal Keyboard/Trackpad
+/* PowerBooks Oct 2005 */
+product APPLE GEYSER_ANSI 0x0214 Apple Internal Keyboard/Trackpad
+product APPLE GEYSER_ISO 0x0215 Apple Internal Keyboard/Trackpad
+product APPLE GEYSER_JIS 0x0216 Apple Internal Keyboard/Trackpad
+/* Core Duo MacBook & MacBook Pro */
+product APPLE GEYSER3_ANSI 0x0217 Apple Internal Keyboard/Trackpad
+product APPLE GEYSER3_ISO 0x0218 Apple Internal Keyboard/Trackpad
+product APPLE GEYSER3_JIS 0x0219 Apple Internal Keyboard/Trackpad
+/* Core2 Duo MacBook & MacBook Pro */
+product APPLE GEYSER4_ANSI 0x021a Apple Internal Keyboard/Trackpad
+product APPLE GEYSER4_ISO 0x021b Apple Internal Keyboard/Trackpad
+product APPLE GEYSER4_JIS 0x021c Apple Internal Keyboard/Trackpad
+/* External */
+product APPLE ALU_MINI_ANSI 0x021d Apple Keyboard/Trackpad
+product APPLE ALU_MINI_ISO 0x021e Apple Keyboard/Trackpad
+product APPLE ALU_MINI_JIS 0x021f Apple Keyboard/Trackpad
+product APPLE ALU_ANSI 0x0220 Apple Keyboard/Trackpad
+product APPLE ALU_ISO 0x0221 Apple Keyboard/Trackpad
+product APPLE ALU_JIS 0x0222 Apple Keyboard/Trackpad
/* MacbookAir, aka wellspring */
product APPLE WELLSPRING_ANSI 0x0223 Apple Internal Keyboard/Trackpad
product APPLE WELLSPRING_ISO 0x0224 Apple Internal Keyboard/Trackpad
product APPLE WELLSPRING_JIS 0x0225 Apple Internal Keyboard/Trackpad
+/* Core2 Duo MacBook3,1 */
+product APPLE GEYSER4_HF_ANSI 0x0229 Apple Internal Keyboard/Trackpad
+product APPLE GEYSER4_HF_ISO 0x022a Apple Internal Keyboard/Trackpad
+product APPLE GEYSER4_HF_JIS 0x022b Apple Internal Keyboard/Trackpad
/* MacbookProPenryn, aka wellspring2 */
product APPLE WELLSPRING2_ANSI 0x0230 Apple Internal Keyboard/Trackpad
product APPLE WELLSPRING2_ISO 0x0231 Apple Internal Keyboard/Trackpad
@@ -1173,6 +1201,10 @@ product APPLE WELLSPRING6A_JIS 0x024b Apple Internal Keyboard/Trackpad
product APPLE WELLSPRING6_ANSI 0x024c Apple Internal Keyboard/Trackpad
product APPLE WELLSPRING6_ISO 0x024d Apple Internal Keyboard/Trackpad
product APPLE WELLSPRING6_JIS 0x024e Apple Internal Keyboard/Trackpad
+/* External */
+product APPLE ALU_REVB_ANSI 0x024f Apple Keyboard/Trackpad
+product APPLE ALU_REVB_ISO 0x0250 Apple Keyboard/Trackpad
+product APPLE ALU_REVB_JIS 0x0251 Apple Keyboard/Trackpad
/* Macbook8,2 (unibody) */
product APPLE WELLSPRING5A_ANSI 0x0252 Apple Internal Keyboard/Trackpad
product APPLE WELLSPRING5A_ISO 0x0253 Apple Internal Keyboard/Trackpad
@@ -1193,6 +1225,16 @@ product APPLE WELLSPRING8_JIS 0x0292 Apple Internal Keyboard/Trackpad
product APPLE WELLSPRING9_ANSI 0x0272 Apple Internal Keyboard/Trackpad
product APPLE WELLSPRING9_ISO 0x0273 Apple Internal Keyboard/Trackpad
product APPLE WELLSPRING9_JIS 0x0274 Apple Internal Keyboard/Trackpad
+product APPLE WELLSPRINGT2_J140K 0x027a Apple Internal Keyboard/Trackpad
+product APPLE WELLSPRINGT2_J132 0x027b Apple Internal Keyboard/Trackpad
+product APPLE WELLSPRINGT2_J680 0x027c Apple Internal Keyboard/Trackpad
+product APPLE WELLSPRINGT2_J213 0x027d Apple Internal Keyboard/Trackpad
+product APPLE WELLSPRINGT2_J214K 0x027e Apple Internal Keyboard/Trackpad
+product APPLE WELLSPRINGT2_J223 0x027f Apple Internal Keyboard/Trackpad
+product APPLE WELLSPRINGT2_J230K 0x0280 Apple Internal Keyboard/Trackpad
+product APPLE WELLSPRINGT2_J152F 0x0340 Apple Internal Keyboard/Trackpad
+product APPLE MAGIC_KEYBOARD_2021 0x029c Apple Internal Keyboard/Trackpad
+product APPLE MAGIC_KEYBOARD_FINGERPRINT_2021 0x029a Apple Keyboard/Trackpad
product APPLE MAGIC_TRACKPAD2 0x0265 Apple Magic Trackpad 2
product APPLE MOUSE 0x0301 Mouse M4848
product APPLE OPTMOUSE 0x0302 Optical mouse
@@ -1419,8 +1461,26 @@ product BILLIONTON USBE100 0x8511 USBE100
product BILLIONTON USB2AR 0x90ff USB2AR Ethernet
/* Brainboxes Limited products */
+product BRAINBOXES US101 0x1011 US-101 USB2Serial 1xRS232
+product BRAINBOXES US159 0x1021 US-159 USB2Serial 1xRS232
+product BRAINBOXES US235 0x1017 US-235 USB2Serial 1xRS232
product BRAINBOXES US257 0x5001 US-257 USB2Serial 2xRS232
product BRAINBOXES US25701 0x5002 US-25701 USB2Serial 2xRS232
+product BRAINBOXES US279_12 0x2021 US-279 USB2Serial 8xRS232 (Port 1 and 2)
+product BRAINBOXES US279_34 0x2022 US-279 USB2Serial 8xRS232 (Port 3 and 4)
+product BRAINBOXES US279_56 0x2023 US-279 USB2Serial 8xRS232 (Port 5 and 6)
+product BRAINBOXES US279_78 0x2024 US-279 USB2Serial 8xRS232 (Port 7 and 8)
+product BRAINBOXES US313 0x6001 US-313 USB2Serial 2xRS422/485
+product BRAINBOXES US320 0x1019 US-320 USB2Serial 1xRS422/485
+product BRAINBOXES US324 0x1013 US-324 USB2Serial 1xRS422/485
+product BRAINBOXES US346_12 0x3011 US-346 USB2Serial 4xRS422/485 (Port 1 and 2)
+product BRAINBOXES US346_34 0x3012 US-346 USB2Serial 4xRS422/485 (Port 3 and 4)
+product BRAINBOXES US701_12 0x2011 US-701 USB2Serial 4xRS232 (Port 1 and 2)
+product BRAINBOXES US701_34 0x2012 US-701 USB2Serial 4xRS232 (Port 3 and 4)
+product BRAINBOXES US842_12 0x8001 US-842 USB2Serial 8xRS-422/485 (Port 1 and 2)
+product BRAINBOXES US842_34 0x8002 US-842 USB2Serial 8xRS-422/485 (Port 3 and 4)
+product BRAINBOXES US842_56 0x8003 US-842 USB2Serial 8xRS-422/485 (Port 5 and 6)
+product BRAINBOXES US842_78 0x8004 US-842 USB2Serial 8xRS-422/485 (Port 7 and 8)
/* Broadcom products */
product BROADCOM BCM2033 0x2033 BCM2033 Bluetooth USB dongle
@@ -1601,6 +1661,7 @@ product CORSAIR STRAFE2 0x1b44 Corsair STRAFE Gaming keyboard
product CREATIVE NOMAD_II 0x1002 Nomad II MP3 player
product CREATIVE NOMAD_IIMG 0x4004 Nomad II MG
product CREATIVE NOMAD 0x4106 Nomad
+product CREATIVE STAGE_SE_MINI 0x3295 Stage SE mini
product CREATIVE2 VOIP_BLASTER 0x0258 Voip Blaster
product CREATIVE3 OPTICAL_MOUSE 0x0001 Notebook Optical Mouse
@@ -1729,6 +1790,7 @@ product DLINK DWA180A1 0x3316 DWA-180 rev A1
product DLINK DWA172A1 0x3318 DWA-172 rev A1
product DLINK DWA131E1 0x3319 DWA-131 rev E1
product DLINK DWA182D1 0x331c DWA-182 rev D1
+product DLINK DWA181A1 0x331e DWA-181 rev A1
product DLINK DWL122 0x3700 DWL-122
product DLINK DWLG120 0x3701 DWL-G120
product DLINK DWL120F 0x3702 DWL-120 rev F
@@ -1845,6 +1907,7 @@ product EDIMAX EW7318USG 0x7318 USB Wireless dongle
product EDIMAX RTL8192SU_1 0x7611 RTL8192SU
product EDIMAX RTL8192SU_2 0x7612 RTL8192SU
product EDIMAX EW7622UMN 0x7622 EW-7622UMn
+product EDIMAX MT7601U 0x7710 MT7601U
product EDIMAX RT2870_1 0x7711 RT2870
product EDIMAX EW7717 0x7717 EW-7717
product EDIMAX EW7718 0x7718 EW-7718
@@ -1862,7 +1925,6 @@ product EGALAX TPANEL2 0x0002 Touch Panel
product EGALAX2 TPANEL 0x0001 Touch Panel
/* EGO Products */
-product EGO DUMMY 0x0000 Dummy Product
product EGO M4U 0x1020 ESI M4U
/* Eicon Networks */
@@ -1884,6 +1946,7 @@ product ELECOM WDC150SU2M 0x4008 WDC-150SU2M
product ELECOM LDUSBTX2 0x400b LD-USB/TX
product ELECOM WDB433SU2M2 0x400f WDB-433SU2M2
product ELECOM LDUSB20 0x4010 LD-USB20
+product ELECOM EDCQUA3C 0x4017 EDC-QUA3C
product ELECOM UCSGT 0x5003 UC-SGT
product ELECOM UCSGT0 0x5004 UC-SGT
product ELECOM LDUSBTX3 0xabc1 LD-USB/TX
@@ -1986,7 +2049,6 @@ product FALCOM TWIST 0x0001 USB GSM/GPRS Modem
product FALCOM SAMBA 0x0005 FTDI compatible adapter
/* FEIYA products */
-product FEIYA DUMMY 0x0000 Dummy product
product FEIYA 5IN1 0x1132 5-in-1 Card Reader
product FEIYA ELANGO 0x6200 MicroSDHC Card Reader
product FEIYA AC110 0x6300 AC-110 Card Reader
@@ -2601,7 +2663,6 @@ product IDTECH IDT1221U 0x0300 FTDI compatible adapter
product IMAGINATION DBX1 0x2107 DBX1 DSP core
/* Initio Corporation products */
-product INITIO DUMMY 0x0000 Dummy product
product INITIO INIC_1610P 0x1e40 USB to SATA Bridge
/* Inside Out Networks products */
@@ -2830,6 +2891,7 @@ product LENOVO ETHERNET 0x7203 USB 2.0 Ethernet
product LENOVO RTL8153 0x7205 USB 3.0 Ethernet
product LENOVO ONELINK 0x720a USB 3.0 Ethernet
product LENOVO RTL8153_04 0x720c USB 3.0 Ethernet
+product LENOVO ONELINKPLUS 0x3054 LAN port in Thinkpad OneLink+ dock
product LENOVO TBT3LAN 0x3069 LAN port in Thinkpad TB3 dock
product LENOVO USBCLAN 0x3062 LAN port in Thinkpad USB-C dock
product LENOVO TBT3LANGEN2 0x3082 LAN port in Thinkpad TB3 dock gen2
@@ -2874,7 +2936,6 @@ product LINKSYS4 RT3070 0x0078 RT3070
product LINKSYS4 WUSB600NV2 0x0079 WUSB600N v2
/* Logilink products */
-product LOGILINK DUMMY 0x0000 Dummy product
product LOGILINK U2M 0x0101 LogiLink USB MIDI Cable
/* Logitech products */
@@ -3990,21 +4051,21 @@ product QUANTA GLE 0xea06 HSDPA modem
product QUANTA RW6815R 0xf003 HP iPAQ rw6815 RNDIS
/* Quectel products */
+product QUECTEL EC21 0x0121 Quectel EC21
product QUECTEL EC25 0x0125 Quectel EC20(MDM9x07)/EC25/EG25
product QUECTEL EM05 0x0127 Quectel EM05
-product QUECTEL EC21 0x0121 Quectel EC21
product QUECTEL EG91 0x0191 Quectel EG91
product QUECTEL EG95 0x0195 Quectel EG95
+product QUECTEL BG96 0x0296 Quectel BG96
product QUECTEL EP06 0x0306 Quectel EG06/EP06/EM06
product QUECTEL EG065K 0x030B Quectel EG065K/EG060K
-product QUECTEL EM12 0x0512 Quectel EG12/EP12/EM12/EG16/EG18
-product QUECTEL BG96 0x0296 Quectel BG96
-product QUECTEL BG95 0x0700 Quectel BG95/BG77/BG600L-M3/BC69
-product QUECTEL AG35 0x0435 Quectel AG35
product QUECTEL AG15 0x0415 Quectel AG15
+product QUECTEL AG35 0x0435 Quectel AG35
product QUECTEL AG520 0x0452 Quectel AG520
product QUECTEL AG550 0x0455 Quectel AG550
+product QUECTEL EM12 0x0512 Quectel EG12/EP12/EM12/EG16/EG18
product QUECTEL EM160R 0x0620 Quectel EM160R/EG20
+product QUECTEL BG95 0x0700 Quectel BG95/BG77/BG600L-M3/BC69
product QUECTEL RG500 0x0800 Quectel RG500/RM500/RG510/RM510
product QUECTEL RG520 0x0801 Quectel RG520/RM520/SG520
product QUECTEL EC200 0x6000 Quectel EC200/UC200
@@ -4042,7 +4103,7 @@ product RALINK RT3573 0x3573 RT3573
product RALINK RT5370 0x5370 RT5370
product RALINK RT5372 0x5372 RT5372
product RALINK RT5572 0x5572 RT5572
-product RALINK RT7601 0x7601 RT7601
+product RALINK MT7601U 0x7601 MT7601 Mediatek Wireless Adpater
product RALINK RT8070 0x8070 RT8070
product RALINK RT2570_3 0x9020 RT2500USB Wireless Adapter
product RALINK RT2573_2 0x9021 RT2501USB Wireless Adapter
@@ -4053,7 +4114,6 @@ product RATOC REXUSB60F 0xb020 USB serial adapter REX-USB60F
/* Realtek products */
/* Green House and CompUSA OEM this part */
-product REALTEK DUMMY 0x0000 Dummy product
product REALTEK USB20CRW 0x0158 USB20CRW Card Reader
product REALTEK RTL8188ETV 0x0179 RTL8188ETV
product REALTEK RTL8188CTV 0x018a RTL8188CTV
@@ -4098,7 +4158,6 @@ product REALTEK RTL8192SU 0xc512 RTL8192SU
product REALTEK RTW8821CU 0xc811 RTW8821CU
/* RedOctane products */
-product REDOCTANE DUMMY 0x0000 Dummy product
product REDOCTANE GHMIDI 0x474b GH MIDI INTERFACE
/* Renesas products */
@@ -4362,6 +4421,7 @@ product SIERRA E6893 0x6893 E6893
product SIERRA MC8700 0x68A3 MC8700
product SIERRA MC7354 0x68C0 MC7354
product SIERRA MC7355 0x9041 MC7355
+product SIERRA AC340U 0x9051 Sierra Wireless AirCard 340U
product SIERRA MC7430 0x9071 Sierra Wireless MC7430 Qualcomm Snapdragon X7 LTE-A
product SIERRA AC313U 0x68aa Sierra Wireless AirCard 313U
product SIERRA TRUINSTALL 0x0fff Aircard Tru Installer
@@ -4498,9 +4558,11 @@ product SITECOMEU RT3072_5 0x004a RT3072
product SITECOMEU WL349V1 0x004b WL-349 v1
product SITECOMEU RT3072_6 0x004d RT3072
product SITECOMEU WLA1000 0x005b WLA-1000
+product SITECOMEU RT3070_1 0x0051 RT3070
product SITECOMEU RTL8188CU_1 0x0052 RTL8188CU
product SITECOMEU RTL8188CU_2 0x005c RTL8188CU
product SITECOMEU RTL8192CU 0x0061 RTL8192CU
+product SITECOMEU RTL8188S 0x006b RTL8188S
product SITECOMEU LN032 0x0072 LN-032
product SITECOMEU WLA7100 0x0074 WLA-7100
product SITECOMEU LN031 0x0056 LN-031
@@ -4668,7 +4730,6 @@ product TECLAST TLC300 0x3203 USB Media Player
product TESTO USB_INTERFACE 0x0001 FTDI compatible adapter
/* TexTech products */
-product TEXTECH DUMMY 0x0000 Dummy product
product TEXTECH U2M_1 0x0101 Textech USB MIDI cable
product TEXTECH U2M_2 0x1806 Textech USB MIDI cable
@@ -4768,6 +4829,7 @@ product TOSHIBA HSDPA 0x1302 G450 modem
product TOSHIBA TRANSMEMORY 0x6545 USB ThumbDrive
/* TP-Link products */
+product TPLINK RTL8192CU 0x0100 RTL8192CU
product TPLINK T4U 0x0101 Archer T4U
product TPLINK WN821NV5 0x0107 TL-WN821N v5
product TPLINK WN822NV4 0x0108 TL-WN822N v4
@@ -4909,11 +4971,12 @@ product WAVESENSE JAZZ 0xaaaa Jazz blood glucose meter
/* WCH products */
product WCH CH341SER 0x5523 CH341/CH340 USB-Serial Bridge
-product WCH2 DUMMY 0x0000 Dummy product
product WCH2 CH341SER_2 0x5523 CH341/CH340 USB-Serial Bridge
+product WCH2 CH343SER 0x55d3 CH343 USB Serial
+product WCH2 CH9102SER 0x55d4 CH9102 USB Serial
product WCH2 CH341SER_3 0x7522 CH341/CH340 USB-Serial Bridge
product WCH2 CH341SER 0x7523 CH341/CH340 USB-Serial Bridge
-product WCH2 U2M 0X752d CH345 USB2.0-MIDI
+product WCH2 U2M 0x752d CH345 USB2.0-MIDI
/* West Mountain Radio products */
product WESTMOUNTAIN RIGBLASTER_ADVANTAGE 0x0003 RIGblaster Advantage
@@ -4970,12 +5033,16 @@ product WISTRONNEWEB UR055G 0x0711 UR055G
product WISTRONNEWEB O8494 0x0804 ORiNOCO 802.11n
product WISTRONNEWEB AR5523_1 0x0826 AR5523
product WISTRONNEWEB AR5523_1_NF 0x0827 AR5523 (no firmware)
-product WISTRONNEWEB AR5523_2 0x082a AR5523
+product WISTRONNEWEB AR5523_2 0x0828 AR5523
+product WISTRONNEWEB AR5523_2_ALT 0x082a AR5523
product WISTRONNEWEB AR5523_2_NF 0x0829 AR5523 (no firmware)
/* Xerox products */
product XEROX WCM15 0xffef WorkCenter M15
+/* Xiaomi products */
+product XIAOMI MT7601U 0x4106 MT7601U
+
/* Xirlink products */
product XIRLINK PCCAM 0x8080 IBM PC Camera
diff --git a/sys/dev/usb/usbdi.h b/sys/dev/usb/usbdi.h
index 5192591281f4..0826d9f078c4 100644
--- a/sys/dev/usb/usbdi.h
+++ b/sys/dev/usb/usbdi.h
@@ -38,6 +38,7 @@ struct usb_process;
struct usb_proc_msg;
struct usb_mbuf;
struct usb_fs_privdata;
+struct usb_device_info;
struct mbuf;
typedef enum { /* keep in sync with usb_errstr_table */
@@ -525,6 +526,8 @@ struct usb_proc_msg {
usb_size_t pm_num;
};
+#define USB_PROC_MSG_ENQUEUED(msg) ((msg)->pm_qentry.tqe_prev != NULL)
+
#define USB_FIFO_TX 0
#define USB_FIFO_RX 1
@@ -585,6 +588,8 @@ usb_error_t usbd_set_endpoint_mode(struct usb_device *udev,
struct usb_endpoint *ep, uint8_t ep_mode);
uint8_t usbd_get_endpoint_mode(struct usb_device *udev,
struct usb_endpoint *ep);
+int usbd_fill_deviceinfo(struct usb_device *udev,
+ struct usb_device_info *di);
const struct usb_device_id *usbd_lookup_id_by_info(
const struct usb_device_id *id, usb_size_t sizeof_id,
diff --git a/sys/dev/usb/video/udl.c b/sys/dev/usb/video/udl.c
index 354aa3f876a5..213f1f5bb957 100644
--- a/sys/dev/usb/video/udl.c
+++ b/sys/dev/usb/video/udl.c
@@ -423,7 +423,7 @@ udl_attach(device_t dev)
sc->sc_fb_info.fb_priv = sc;
sc->sc_fb_info.setblankmode = &udl_fb_setblankmode;
- sc->sc_fbdev = device_add_child(dev, "fbd", -1);
+ sc->sc_fbdev = device_add_child(dev, "fbd", DEVICE_UNIT_ANY);
if (sc->sc_fbdev == NULL)
goto detach;
if (device_probe_and_attach(sc->sc_fbdev) != 0)
@@ -441,9 +441,12 @@ static int
udl_detach(device_t dev)
{
struct udl_softc *sc = device_get_softc(dev);
+ int error;
/* delete all child devices */
- device_delete_children(dev);
+ error = bus_generic_detach(dev);
+ if (error != 0)
+ return (error);
UDL_LOCK(sc);
sc->sc_gone = 1;
diff --git a/sys/dev/usb/wlan/if_mtw.c b/sys/dev/usb/wlan/if_mtw.c
new file mode 100644
index 000000000000..6967e5081542
--- /dev/null
+++ b/sys/dev/usb/wlan/if_mtw.c
@@ -0,0 +1,4690 @@
+/*-
+ * Copyright (c) 2008-2010 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2013-2014 Kevin Lo
+ * Copyright (c) 2021 James Hastings
+ * Ported to FreeBSD by Jesper Schmitz Mouridsen jsm@FreeBSD.org
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * MediaTek MT7601U 802.11b/g/n WLAN.
+ */
+
+#include "opt_wlan.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/endian.h>
+#include <sys/eventhandler.h>
+#include <sys/firmware.h>
+#include <sys/kdb.h>
+#include <sys/kernel.h>
+#include <sys/linker.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sysctl.h>
+
+#include <net/bpf.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_arp.h>
+#include <net/if_dl.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+#include <net80211/ieee80211_var.h>
+#include <net80211/ieee80211_radiotap.h>
+#include <net80211/ieee80211_ratectl.h>
+#include <net80211/ieee80211_regdomain.h>
+#ifdef IEEE80211_SUPPORT_SUPERG
+#include <net80211/ieee80211_superg.h>
+#endif
+#include <netinet/if_ether.h>
+#include <netinet/in.h>
+#include <netinet/in_systm.h>
+#include <netinet/in_var.h>
+#include <netinet/ip.h>
+
+#include <dev/usb/usb.h>
+#include <dev/usb/usbdi.h>
+
+#include "usbdevs.h"
+
+#define USB_DEBUG_VAR mtw_debug
+#include <dev/usb/usb_debug.h>
+#include <dev/usb/usb_msctest.h>
+
+#include "if_mtwreg.h"
+#include "if_mtwvar.h"
+
+#define MTW_DEBUG
+
+#ifdef MTW_DEBUG
+int mtw_debug;
+static SYSCTL_NODE(_hw_usb, OID_AUTO, mtw, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+ "USB mtw");
+SYSCTL_INT(_hw_usb_mtw, OID_AUTO, debug, CTLFLAG_RWTUN, &mtw_debug, 0,
+ "mtw debug level");
+
+enum {
+ MTW_DEBUG_XMIT = 0x00000001, /* basic xmit operation */
+ MTW_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */
+ MTW_DEBUG_RECV = 0x00000004, /* basic recv operation */
+ MTW_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */
+ MTW_DEBUG_STATE = 0x00000010, /* 802.11 state transitions */
+ MTW_DEBUG_RATE = 0x00000020, /* rate adaptation */
+ MTW_DEBUG_USB = 0x00000040, /* usb requests */
+ MTW_DEBUG_FIRMWARE = 0x00000080, /* firmware(9) loading debug */
+ MTW_DEBUG_BEACON = 0x00000100, /* beacon handling */
+ MTW_DEBUG_INTR = 0x00000200, /* ISR */
+ MTW_DEBUG_TEMP = 0x00000400, /* temperature calibration */
+ MTW_DEBUG_ROM = 0x00000800, /* various ROM info */
+ MTW_DEBUG_KEY = 0x00001000, /* crypto keys management */
+ MTW_DEBUG_TXPWR = 0x00002000, /* dump Tx power values */
+ MTW_DEBUG_RSSI = 0x00004000, /* dump RSSI lookups */
+ MTW_DEBUG_RESET = 0x00008000, /* initialization progress */
+ MTW_DEBUG_CALIB = 0x00010000, /* calibration progress */
+ MTW_DEBUG_CMD = 0x00020000, /* command queue */
+ MTW_DEBUG_ANY = 0xffffffff
+};
+
+#define MTW_DPRINTF(_sc, _m, ...) \
+ do { \
+ if (mtw_debug & (_m)) \
+ device_printf((_sc)->sc_dev, __VA_ARGS__); \
+ } while (0)
+
+#else
+#define MTW_DPRINTF(_sc, _m, ...) \
+ do { \
+ (void)_sc; \
+ } while (0)
+#endif
+
+#define IEEE80211_HAS_ADDR4(wh) IEEE80211_IS_DSTODS(wh)
+
+/* NB: "11" is the maximum number of padding bytes needed for Tx */
+#define MTW_MAX_TXSZ \
+ (sizeof(struct mtw_txd) + sizeof(struct mtw_txwi) + MCLBYTES + 11)
+
+/*
+ * Because of LOR in mtw_key_delete(), use atomic instead.
+ * '& MTW_CMDQ_MASQ' is to loop cmdq[].
+ */
+#define MTW_CMDQ_GET(c) (atomic_fetchadd_32((c), 1) & MTW_CMDQ_MASQ)
+
+static const STRUCT_USB_HOST_ID mtw_devs[] = {
+#define MTW_DEV(v, p) \
+ { \
+ USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) \
+ }
+ MTW_DEV(EDIMAX, MT7601U),
+ MTW_DEV(RALINK, MT7601U),
+ MTW_DEV(XIAOMI, MT7601U)
+};
+#undef MTW_DEV
+
+static device_probe_t mtw_match;
+static device_attach_t mtw_attach;
+static device_detach_t mtw_detach;
+
+static usb_callback_t mtw_bulk_rx_callback;
+static usb_callback_t mtw_bulk_tx_callback0;
+static usb_callback_t mtw_bulk_tx_callback1;
+static usb_callback_t mtw_bulk_tx_callback2;
+static usb_callback_t mtw_bulk_tx_callback3;
+static usb_callback_t mtw_bulk_tx_callback4;
+static usb_callback_t mtw_bulk_tx_callback5;
+static usb_callback_t mtw_fw_callback;
+
+static void mtw_autoinst(void *, struct usb_device *, struct usb_attach_arg *);
+static int mtw_driver_loaded(struct module *, int, void *);
+static void mtw_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error,
+ u_int index);
+static struct ieee80211vap *mtw_vap_create(struct ieee80211com *,
+ const char[IFNAMSIZ], int, enum ieee80211_opmode, int,
+ const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN]);
+static void mtw_vap_delete(struct ieee80211vap *);
+static void mtw_cmdq_cb(void *, int);
+static void mtw_setup_tx_list(struct mtw_softc *, struct mtw_endpoint_queue *);
+static void mtw_unsetup_tx_list(struct mtw_softc *,
+ struct mtw_endpoint_queue *);
+static void mtw_load_microcode(void *arg);
+
+static usb_error_t mtw_do_request(struct mtw_softc *,
+ struct usb_device_request *, void *);
+static int mtw_read(struct mtw_softc *, uint16_t, uint32_t *);
+static int mtw_read_region_1(struct mtw_softc *, uint16_t, uint8_t *, int);
+static int mtw_write_2(struct mtw_softc *, uint16_t, uint16_t);
+static int mtw_write(struct mtw_softc *, uint16_t, uint32_t);
+static int mtw_write_region_1(struct mtw_softc *, uint16_t, uint8_t *, int);
+static int mtw_set_region_4(struct mtw_softc *, uint16_t, uint32_t, int);
+static int mtw_efuse_read_2(struct mtw_softc *, uint16_t, uint16_t *);
+static int mtw_bbp_read(struct mtw_softc *, uint8_t, uint8_t *);
+static int mtw_bbp_write(struct mtw_softc *, uint8_t, uint8_t);
+static int mtw_mcu_cmd(struct mtw_softc *sc, uint8_t cmd, void *buf, int len);
+static void mtw_get_txpower(struct mtw_softc *);
+static int mtw_read_eeprom(struct mtw_softc *);
+static struct ieee80211_node *mtw_node_alloc(struct ieee80211vap *,
+ const uint8_t mac[IEEE80211_ADDR_LEN]);
+static int mtw_media_change(if_t);
+static int mtw_newstate(struct ieee80211vap *, enum ieee80211_state, int);
+static int mtw_wme_update(struct ieee80211com *);
+static void mtw_key_set_cb(void *);
+static int mtw_key_set(struct ieee80211vap *, struct ieee80211_key *);
+static void mtw_key_delete_cb(void *);
+static int mtw_key_delete(struct ieee80211vap *, struct ieee80211_key *);
+static void mtw_ratectl_to(void *);
+static void mtw_ratectl_cb(void *, int);
+static void mtw_drain_fifo(void *);
+static void mtw_iter_func(void *, struct ieee80211_node *);
+static void mtw_newassoc_cb(void *);
+static void mtw_newassoc(struct ieee80211_node *, int);
+static int mtw_mcu_radio(struct mtw_softc *sc, int func, uint32_t val);
+static void mtw_recv_mgmt(struct ieee80211_node *, struct mbuf *, int,
+ const struct ieee80211_rx_stats *, int, int);
+static void mtw_rx_frame(struct mtw_softc *, struct mbuf *, uint32_t);
+static void mtw_tx_free(struct mtw_endpoint_queue *pq, struct mtw_tx_data *,
+ int);
+static void mtw_set_tx_desc(struct mtw_softc *, struct mtw_tx_data *);
+static int mtw_tx(struct mtw_softc *, struct mbuf *, struct ieee80211_node *);
+static int mtw_tx_mgt(struct mtw_softc *, struct mbuf *,
+ struct ieee80211_node *);
+static int mtw_sendprot(struct mtw_softc *, const struct mbuf *,
+ struct ieee80211_node *, int, int);
+static int mtw_tx_param(struct mtw_softc *, struct mbuf *,
+ struct ieee80211_node *, const struct ieee80211_bpf_params *);
+static int mtw_raw_xmit(struct ieee80211_node *, struct mbuf *,
+ const struct ieee80211_bpf_params *);
+static int mtw_transmit(struct ieee80211com *, struct mbuf *);
+static void mtw_start(struct mtw_softc *);
+static void mtw_parent(struct ieee80211com *);
+static void mtw_select_chan_group(struct mtw_softc *, int);
+
+static int mtw_set_chan(struct mtw_softc *, struct ieee80211_channel *);
+static void mtw_set_channel(struct ieee80211com *);
+static void mtw_getradiocaps(struct ieee80211com *, int, int *,
+ struct ieee80211_channel[]);
+static void mtw_scan_start(struct ieee80211com *);
+static void mtw_scan_end(struct ieee80211com *);
+static void mtw_update_beacon(struct ieee80211vap *, int);
+static void mtw_update_beacon_cb(void *);
+static void mtw_updateprot(struct ieee80211com *);
+static void mtw_updateprot_cb(void *);
+static void mtw_usb_timeout_cb(void *);
+static int mtw_reset(struct mtw_softc *sc);
+static void mtw_enable_tsf_sync(struct mtw_softc *);
+
+
+static void mtw_enable_mrr(struct mtw_softc *);
+static void mtw_set_txpreamble(struct mtw_softc *);
+static void mtw_set_basicrates(struct mtw_softc *);
+static void mtw_set_leds(struct mtw_softc *, uint16_t);
+static void mtw_set_bssid(struct mtw_softc *, const uint8_t *);
+static void mtw_set_macaddr(struct mtw_softc *, const uint8_t *);
+static void mtw_updateslot(struct ieee80211com *);
+static void mtw_updateslot_cb(void *);
+static void mtw_update_mcast(struct ieee80211com *);
+static int8_t mtw_rssi2dbm(struct mtw_softc *, uint8_t, uint8_t);
+static void mtw_update_promisc_locked(struct mtw_softc *);
+static void mtw_update_promisc(struct ieee80211com *);
+static int mtw_txrx_enable(struct mtw_softc *);
+static void mtw_init_locked(struct mtw_softc *);
+static void mtw_stop(void *);
+static void mtw_delay(struct mtw_softc *, u_int);
+static void mtw_update_chw(struct ieee80211com *ic);
+static int mtw_ampdu_enable(struct ieee80211_node *ni,
+ struct ieee80211_tx_ampdu *tap);
+
+static eventhandler_tag mtw_etag;
+
+static const struct {
+ uint8_t reg;
+ uint8_t val;
+} mt7601_rf_bank0[] = { MT7601_BANK0_RF },
+ mt7601_rf_bank4[] = { MT7601_BANK4_RF },
+ mt7601_rf_bank5[] = { MT7601_BANK5_RF };
+static const struct {
+ uint32_t reg;
+ uint32_t val;
+} mt7601_def_mac[] = { MT7601_DEF_MAC };
+static const struct {
+ uint8_t reg;
+ uint8_t val;
+} mt7601_def_bbp[] = { MT7601_DEF_BBP };
+
+
+static const struct {
+ u_int chan;
+ uint8_t r17, r18, r19, r20;
+} mt7601_rf_chan[] = { MT7601_RF_CHAN };
+
+
+static const struct usb_config mtw_config[MTW_N_XFER] = {
+ [MTW_BULK_RX] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_IN,
+ .bufsize = MTW_MAX_RXSZ,
+ .flags = {.pipe_bof = 1,
+ .short_xfer_ok = 1,},
+ .callback = mtw_bulk_rx_callback,
+ },
+ [MTW_BULK_TX_BE] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = MTW_MAX_TXSZ,
+ .flags = {.pipe_bof = 1,
+ .force_short_xfer = 0,},
+ .callback = mtw_bulk_tx_callback0,
+ .timeout = 5000, /* ms */
+ },
+ [MTW_BULK_TX_BK] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = MTW_MAX_TXSZ,
+ .flags = {.pipe_bof = 1,
+ .force_short_xfer = 1,},
+ .callback = mtw_bulk_tx_callback1,
+ .timeout = 5000, /* ms */
+ },
+ [MTW_BULK_TX_VI] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = MTW_MAX_TXSZ,
+ .flags = {.pipe_bof = 1,
+ .force_short_xfer = 1,},
+ .callback = mtw_bulk_tx_callback2,
+ .timeout = 5000, /* ms */
+ },
+ [MTW_BULK_TX_VO] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = MTW_MAX_TXSZ,
+ .flags = {.pipe_bof = 1,
+ .force_short_xfer = 1,},
+ .callback = mtw_bulk_tx_callback3,
+ .timeout = 5000, /* ms */
+ },
+ [MTW_BULK_TX_HCCA] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = MTW_MAX_TXSZ,
+ .flags = {.pipe_bof = 1,
+ .force_short_xfer = 1, .no_pipe_ok = 1,},
+ .callback = mtw_bulk_tx_callback4,
+ .timeout = 5000, /* ms */
+ },
+ [MTW_BULK_TX_PRIO] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = MTW_MAX_TXSZ,
+ .flags = {.pipe_bof = 1,
+ .force_short_xfer = 1, .no_pipe_ok = 1,},
+ .callback = mtw_bulk_tx_callback5,
+ .timeout = 5000, /* ms */
+ },
+
+ [MTW_BULK_FW_CMD] = {
+ .type = UE_BULK,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = 0x2c44,
+ .flags = {.pipe_bof = 1,
+ .force_short_xfer = 1, .no_pipe_ok = 1,},
+ .callback = mtw_fw_callback,
+
+ },
+
+ [MTW_BULK_RAW_TX] = {
+ .type = UE_BULK,
+ .ep_index = 0,
+ .endpoint = UE_ADDR_ANY,
+ .direction = UE_DIR_OUT,
+ .bufsize = MTW_MAX_TXSZ,
+ .flags = {.pipe_bof = 1,
+ .force_short_xfer = 1, .no_pipe_ok = 1,},
+ .callback = mtw_bulk_tx_callback0,
+ .timeout = 5000, /* ms */
+ },
+
+};
+static uint8_t mtw_wme_ac_xfer_map[4] = {
+ [WME_AC_BE] = MTW_BULK_TX_BE,
+ [WME_AC_BK] = MTW_BULK_TX_BK,
+ [WME_AC_VI] = MTW_BULK_TX_VI,
+ [WME_AC_VO] = MTW_BULK_TX_VO,
+};
+static void
+mtw_autoinst(void *arg, struct usb_device *udev, struct usb_attach_arg *uaa)
+{
+ struct usb_interface *iface;
+ struct usb_interface_descriptor *id;
+
+ if (uaa->dev_state != UAA_DEV_READY)
+ return;
+
+ iface = usbd_get_iface(udev, 0);
+ if (iface == NULL)
+ return;
+ id = iface->idesc;
+ if (id == NULL || id->bInterfaceClass != UICLASS_MASS)
+ return;
+ if (usbd_lookup_id_by_uaa(mtw_devs, sizeof(mtw_devs), uaa))
+ return;
+
+ if (usb_msc_eject(udev, 0, MSC_EJECT_STOPUNIT) == 0)
+ uaa->dev_state = UAA_DEV_EJECTING;
+}
+
+static int
+mtw_driver_loaded(struct module *mod, int what, void *arg)
+{
+ switch (what) {
+ case MOD_LOAD:
+ mtw_etag = EVENTHANDLER_REGISTER(usb_dev_configured,
+ mtw_autoinst, NULL, EVENTHANDLER_PRI_ANY);
+ break;
+ case MOD_UNLOAD:
+ EVENTHANDLER_DEREGISTER(usb_dev_configured, mtw_etag);
+ break;
+ default:
+ return (EOPNOTSUPP);
+ }
+ return (0);
+}
+
+static const char *
+mtw_get_rf(int rev)
+{
+ switch (rev) {
+ case MT7601_RF_7601:
+ return ("MT7601");
+ case MT7610_RF_7610:
+ return ("MT7610");
+ case MT7612_RF_7612:
+ return ("MT7612");
+ }
+ return ("unknown");
+}
+static int
+mtw_wlan_enable(struct mtw_softc *sc, int enable)
+{
+ uint32_t tmp;
+ int error = 0;
+
+ if (enable) {
+ mtw_read(sc, MTW_WLAN_CTRL, &tmp);
+ if (sc->asic_ver == 0x7612)
+ tmp &= ~0xfffff000;
+
+ tmp &= ~MTW_WLAN_CLK_EN;
+ tmp |= MTW_WLAN_EN;
+ mtw_write(sc, MTW_WLAN_CTRL, tmp);
+ mtw_delay(sc, 2);
+
+ tmp |= MTW_WLAN_CLK_EN;
+ if (sc->asic_ver == 0x7612) {
+ tmp |= (MTW_WLAN_RESET | MTW_WLAN_RESET_RF);
+ }
+ mtw_write(sc, MTW_WLAN_CTRL, tmp);
+ mtw_delay(sc, 2);
+
+ mtw_read(sc, MTW_OSC_CTRL, &tmp);
+ tmp |= MTW_OSC_EN;
+ mtw_write(sc, MTW_OSC_CTRL, tmp);
+ tmp |= MTW_OSC_CAL_REQ;
+ mtw_write(sc, MTW_OSC_CTRL, tmp);
+ } else {
+ mtw_read(sc, MTW_WLAN_CTRL, &tmp);
+ tmp &= ~(MTW_WLAN_CLK_EN | MTW_WLAN_EN);
+ mtw_write(sc, MTW_WLAN_CTRL, tmp);
+
+ mtw_read(sc, MTW_OSC_CTRL, &tmp);
+ tmp &= ~MTW_OSC_EN;
+ mtw_write(sc, MTW_OSC_CTRL, tmp);
+ }
+ return (error);
+}
+
+static int
+mtw_read_cfg(struct mtw_softc *sc, uint16_t reg, uint32_t *val)
+{
+ usb_device_request_t req;
+ uint32_t tmp;
+ uint16_t actlen;
+ int error;
+
+ req.bmRequestType = UT_READ_VENDOR_DEVICE;
+ req.bRequest = MTW_READ_CFG;
+ USETW(req.wValue, 0);
+ USETW(req.wIndex, reg);
+ USETW(req.wLength, 4);
+ error = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, &tmp, 0,
+ &actlen, 1000);
+
+ if (error == 0)
+ *val = le32toh(tmp);
+ else
+ *val = 0xffffffff;
+ return (error);
+}
+
+static int
+mtw_match(device_t self)
+{
+ struct usb_attach_arg *uaa = device_get_ivars(self);
+
+ if (uaa->usb_mode != USB_MODE_HOST)
+ return (ENXIO);
+ if (uaa->info.bConfigIndex != 0)
+ return (ENXIO);
+ if (uaa->info.bIfaceIndex != 0)
+ return (ENXIO);
+
+ return (usbd_lookup_id_by_uaa(mtw_devs, sizeof(mtw_devs), uaa));
+}
+
+static int
+mtw_attach(device_t self)
+{
+ struct mtw_softc *sc = device_get_softc(self);
+ struct usb_attach_arg *uaa = device_get_ivars(self);
+ struct ieee80211com *ic = &sc->sc_ic;
+ uint32_t ver;
+ int i, ret;
+ uint32_t tmp;
+ uint8_t iface_index;
+ int ntries, error;
+
+ device_set_usb_desc(self);
+ sc->sc_udev = uaa->device;
+ sc->sc_dev = self;
+ sc->sc_sent = 0;
+
+ mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev),
+ MTX_NETWORK_LOCK, MTX_DEF);
+
+ iface_index = 0;
+
+ error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer,
+ mtw_config, MTW_N_XFER, sc, &sc->sc_mtx);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "could not allocate USB transfers, "
+ "err=%s\n",
+ usbd_errstr(error));
+ goto detach;
+ }
+ for (i = 0; i < 4; i++) {
+ sc->txd_fw[i] = (struct mtw_txd_fw *)
+ malloc(sizeof(struct mtw_txd_fw),
+ M_USBDEV, M_NOWAIT | M_ZERO);
+ }
+ MTW_LOCK(sc);
+ sc->sc_idx = 0;
+ mbufq_init(&sc->sc_snd, ifqmaxlen);
+
+ /*enable WLAN core */
+ if ((error = mtw_wlan_enable(sc, 1)) != 0) {
+ device_printf(sc->sc_dev, "could not enable WLAN core\n");
+ return (ENXIO);
+ }
+
+ /* wait for the chip to settle */
+ DELAY(100);
+ for (ntries = 0; ntries < 100; ntries++) {
+ if (mtw_read(sc, MTW_ASIC_VER, &ver) != 0) {
+ goto detach;
+ }
+ if (ver != 0 && ver != 0xffffffff)
+ break;
+ DELAY(10);
+ }
+ if (ntries == 100) {
+ device_printf(sc->sc_dev,
+ "timeout waiting for NIC to initialize\n");
+ goto detach;
+ }
+ sc->asic_ver = ver >> 16;
+ sc->asic_rev = ver & 0xffff;
+ DELAY(100);
+ if (sc->asic_ver != 0x7601) {
+ device_printf(sc->sc_dev,
+ "Your revision 0x04%x is not supported yet\n",
+ sc->asic_rev);
+ goto detach;
+ }
+
+
+ if (mtw_read(sc, MTW_MAC_VER_ID, &tmp) != 0)
+ goto detach;
+ sc->mac_rev = tmp & 0xffff;
+
+ mtw_load_microcode(sc);
+ ret = msleep(&sc->fwloading, &sc->sc_mtx, 0, "fwload", 3 * hz);
+ if (ret == EWOULDBLOCK || sc->fwloading != 1) {
+ device_printf(sc->sc_dev,
+ "timeout waiting for MCU to initialize\n");
+ goto detach;
+ }
+
+ sc->sc_srom_read = mtw_efuse_read_2;
+ /* retrieve RF rev. no and various other things from EEPROM */
+ mtw_read_eeprom(sc);
+
+ device_printf(sc->sc_dev,
+ "MAC/BBP RT%04X (rev 0x%04X), RF %s (MIMO %dT%dR), address %s\n",
+ sc->asic_ver, sc->mac_rev, mtw_get_rf(sc->rf_rev), sc->ntxchains,
+ sc->nrxchains, ether_sprintf(ic->ic_macaddr));
+ DELAY(100);
+
+ //mtw_set_leds(sc,5);
+ // mtw_mcu_radio(sc,0x31,0);
+ MTW_UNLOCK(sc);
+
+
+ ic->ic_softc = sc;
+ ic->ic_name = device_get_nameunit(self);
+ ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
+ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
+
+ ic->ic_caps = IEEE80211_C_STA | /* station mode supported */
+ IEEE80211_C_MONITOR | /* monitor mode supported */
+ IEEE80211_C_IBSS |
+ IEEE80211_C_HOSTAP |
+ IEEE80211_C_WDS | /* 4-address traffic works */
+ IEEE80211_C_MBSS |
+ IEEE80211_C_SHPREAMBLE | /* short preamble supported */
+ IEEE80211_C_SHSLOT | /* short slot time supported */
+ IEEE80211_C_WME | /* WME */
+ IEEE80211_C_WPA; /* WPA1|WPA2(RSN) */
+ device_printf(sc->sc_dev, "[HT] Enabling 802.11n\n");
+ ic->ic_htcaps = IEEE80211_HTC_HT
+ | IEEE80211_HTC_AMPDU
+ | IEEE80211_HTC_AMSDU
+ | IEEE80211_HTCAP_MAXAMSDU_3839
+ | IEEE80211_HTCAP_SMPS_OFF;
+
+ ic->ic_rxstream = sc->nrxchains;
+ ic->ic_txstream = sc->ntxchains;
+
+ ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM |
+ IEEE80211_CRYPTO_AES_OCB | IEEE80211_CRYPTO_TKIP |
+ IEEE80211_CRYPTO_TKIPMIC;
+
+ ic->ic_flags |= IEEE80211_F_DATAPAD;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
+ mtw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
+ ic->ic_channels);
+
+ ieee80211_ifattach(ic);
+
+ ic->ic_scan_start = mtw_scan_start;
+ ic->ic_scan_end = mtw_scan_end;
+ ic->ic_set_channel = mtw_set_channel;
+ ic->ic_getradiocaps = mtw_getradiocaps;
+ ic->ic_node_alloc = mtw_node_alloc;
+ ic->ic_newassoc = mtw_newassoc;
+ ic->ic_update_mcast = mtw_update_mcast;
+ ic->ic_updateslot = mtw_updateslot;
+ ic->ic_wme.wme_update = mtw_wme_update;
+ ic->ic_raw_xmit = mtw_raw_xmit;
+ ic->ic_update_promisc = mtw_update_promisc;
+ ic->ic_vap_create = mtw_vap_create;
+ ic->ic_vap_delete = mtw_vap_delete;
+ ic->ic_transmit = mtw_transmit;
+ ic->ic_parent = mtw_parent;
+
+ ic->ic_update_chw = mtw_update_chw;
+ ic->ic_ampdu_enable = mtw_ampdu_enable;
+
+ ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr,
+ sizeof(sc->sc_txtap), MTW_TX_RADIOTAP_PRESENT,
+ &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
+ MTW_RX_RADIOTAP_PRESENT);
+ TASK_INIT(&sc->cmdq_task, 0, mtw_cmdq_cb, sc);
+ TASK_INIT(&sc->ratectl_task, 0, mtw_ratectl_cb, sc);
+ usb_callout_init_mtx(&sc->ratectl_ch, &sc->sc_mtx, 0);
+
+ if (bootverbose)
+ ieee80211_announce(ic);
+
+ return (0);
+
+detach:
+ MTW_UNLOCK(sc);
+ mtw_detach(self);
+ return (ENXIO);
+}
+
+static void
+mtw_drain_mbufq(struct mtw_softc *sc)
+{
+ struct mbuf *m;
+ struct ieee80211_node *ni;
+
+ MTW_LOCK_ASSERT(sc, MA_OWNED);
+ while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
+ ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
+ m->m_pkthdr.rcvif = NULL;
+ ieee80211_free_node(ni);
+ m_freem(m);
+ }
+}
+
+static int
+mtw_detach(device_t self)
+{
+ struct mtw_softc *sc = device_get_softc(self);
+ struct ieee80211com *ic = &sc->sc_ic;
+ int i;
+ MTW_LOCK(sc);
+ mtw_reset(sc);
+ DELAY(10000);
+ sc->sc_detached = 1;
+ MTW_UNLOCK(sc);
+
+
+ /* stop all USB transfers */
+ for (i = 0; i < MTW_N_XFER; i++)
+ usbd_transfer_drain(sc->sc_xfer[i]);
+
+ MTW_LOCK(sc);
+ sc->ratectl_run = MTW_RATECTL_OFF;
+ sc->cmdq_run = sc->cmdq_key_set = MTW_CMDQ_ABORT;
+
+ /* free TX list, if any */
+ if (ic->ic_nrunning > 0)
+ for (i = 0; i < MTW_EP_QUEUES; i++)
+ mtw_unsetup_tx_list(sc, &sc->sc_epq[i]);
+
+ /* Free TX queue */
+ mtw_drain_mbufq(sc);
+ MTW_UNLOCK(sc);
+ if (sc->sc_ic.ic_softc == sc) {
+ /* drain tasks */
+ usb_callout_drain(&sc->ratectl_ch);
+ ieee80211_draintask(ic, &sc->cmdq_task);
+ ieee80211_draintask(ic, &sc->ratectl_task);
+ ieee80211_ifdetach(ic);
+ }
+ for (i = 0; i < 4; i++) {
+ free(sc->txd_fw[i], M_USBDEV);
+ }
+ firmware_unregister("/mediatek/mt7601u");
+ mtx_destroy(&sc->sc_mtx);
+
+ return (0);
+}
+
+static struct ieee80211vap *
+mtw_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
+ enum ieee80211_opmode opmode, int flags,
+ const uint8_t bssid[IEEE80211_ADDR_LEN],
+ const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ struct mtw_softc *sc = ic->ic_softc;
+ struct mtw_vap *rvp;
+ struct ieee80211vap *vap;
+ int i;
+
+ if (sc->rvp_cnt >= MTW_VAP_MAX) {
+ device_printf(sc->sc_dev, "number of VAPs maxed out\n");
+ return (NULL);
+ }
+
+ switch (opmode) {
+ case IEEE80211_M_STA:
+ /* enable s/w bmiss handling for sta mode */
+ flags |= IEEE80211_CLONE_NOBEACONS;
+ /* fall though */
+ case IEEE80211_M_IBSS:
+ case IEEE80211_M_MONITOR:
+ case IEEE80211_M_HOSTAP:
+ case IEEE80211_M_MBSS:
+ /* other than WDS vaps, only one at a time */
+ if (!TAILQ_EMPTY(&ic->ic_vaps))
+ return (NULL);
+ break;
+ case IEEE80211_M_WDS:
+ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) {
+ if (vap->iv_opmode != IEEE80211_M_HOSTAP)
+ continue;
+ /* WDS vap's always share the local mac address. */
+ flags &= ~IEEE80211_CLONE_BSSID;
+ break;
+ }
+ if (vap == NULL) {
+ device_printf(sc->sc_dev,
+ "wds only supported in ap mode\n");
+ return (NULL);
+ }
+ break;
+ default:
+ device_printf(sc->sc_dev, "unknown opmode %d\n", opmode);
+ return (NULL);
+ }
+
+ rvp = malloc(sizeof(struct mtw_vap), M_80211_VAP, M_WAITOK | M_ZERO);
+ vap = &rvp->vap;
+
+ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid) !=
+ 0) {
+ /* out of memory */
+ free(rvp, M_80211_VAP);
+ return (NULL);
+ }
+
+ vap->iv_update_beacon = mtw_update_beacon;
+ vap->iv_max_aid = MTW_WCID_MAX;
+
+ /*
+ * The linux rt2800 driver limits 1 stream devices to a 32KB
+ * RX AMPDU.
+ */
+ if (ic->ic_rxstream > 1)
+ vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
+ else
+ vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K;
+ vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_2; /* 2uS */
+
+ /*
+ * To delete the right key from h/w, we need wcid.
+ * Luckily, there is unused space in ieee80211_key{}, wk_pad,
+ * and matching wcid will be written into there. So, cast
+ * some spells to remove 'const' from ieee80211_key{}
+ */
+ vap->iv_key_delete = (void *)mtw_key_delete;
+ vap->iv_key_set = (void *)mtw_key_set;
+
+ // override state transition machine
+ rvp->newstate = vap->iv_newstate;
+ vap->iv_newstate = mtw_newstate;
+ if (opmode == IEEE80211_M_IBSS) {
+ rvp->recv_mgmt = vap->iv_recv_mgmt;
+ vap->iv_recv_mgmt = mtw_recv_mgmt;
+ }
+
+ ieee80211_ratectl_init(vap);
+ ieee80211_ratectl_setinterval(vap, 1000); // 1 second
+
+ /* complete setup */
+ ieee80211_vap_attach(vap, mtw_media_change, ieee80211_media_status,
+ mac);
+
+ /* make sure id is always unique */
+ for (i = 0; i < MTW_VAP_MAX; i++) {
+ if ((sc->rvp_bmap & 1 << i) == 0) {
+ sc->rvp_bmap |= 1 << i;
+ rvp->rvp_id = i;
+ break;
+ }
+ }
+ if (sc->rvp_cnt++ == 0)
+ ic->ic_opmode = opmode;
+
+ if (opmode == IEEE80211_M_HOSTAP)
+ sc->cmdq_run = MTW_CMDQ_GO;
+
+ MTW_DPRINTF(sc, MTW_DEBUG_STATE, "rvp_id=%d bmap=%x rvp_cnt=%d\n",
+ rvp->rvp_id, sc->rvp_bmap, sc->rvp_cnt);
+
+ return (vap);
+}
+
+static void
+mtw_vap_delete(struct ieee80211vap *vap)
+{
+ struct mtw_vap *rvp = MTW_VAP(vap);
+ struct ieee80211com *ic;
+ struct mtw_softc *sc;
+ uint8_t rvp_id;
+
+ if (vap == NULL)
+ return;
+
+ ic = vap->iv_ic;
+ sc = ic->ic_softc;
+
+ MTW_LOCK(sc);
+ m_freem(rvp->beacon_mbuf);
+ rvp->beacon_mbuf = NULL;
+
+ rvp_id = rvp->rvp_id;
+ sc->ratectl_run &= ~(1 << rvp_id);
+ sc->rvp_bmap &= ~(1 << rvp_id);
+ mtw_set_region_4(sc, MTW_SKEY(rvp_id, 0), 0, 256);
+ mtw_set_region_4(sc, (0x7800 + (rvp_id) * 512), 0, 512);
+ --sc->rvp_cnt;
+
+ MTW_DPRINTF(sc, MTW_DEBUG_STATE,
+ "vap=%p rvp_id=%d bmap=%x rvp_cnt=%d\n", vap, rvp_id, sc->rvp_bmap,
+ sc->rvp_cnt);
+
+ MTW_UNLOCK(sc);
+
+ ieee80211_ratectl_deinit(vap);
+ ieee80211_vap_detach(vap);
+ free(rvp, M_80211_VAP);
+}
+
+/*
+ * There are numbers of functions need to be called in context thread.
+ * Rather than creating taskqueue event for each of those functions,
+ * here is all-for-one taskqueue callback function. This function
+ * guarantees deferred functions are executed in the same order they
+ * were enqueued.
+ * '& MTW_CMDQ_MASQ' is to loop cmdq[].
+ */
+static void
+mtw_cmdq_cb(void *arg, int pending)
+{
+ struct mtw_softc *sc = arg;
+ uint8_t i;
+ /* call cmdq[].func locked */
+ MTW_LOCK(sc);
+ for (i = sc->cmdq_exec; sc->cmdq[i].func && pending;
+ i = sc->cmdq_exec, pending--) {
+ MTW_DPRINTF(sc, MTW_DEBUG_CMD, "cmdq_exec=%d pending=%d\n", i,
+ pending);
+ if (sc->cmdq_run == MTW_CMDQ_GO) {
+ /*
+ * If arg0 is NULL, callback func needs more
+ * than one arg. So, pass ptr to cmdq struct.
+ */
+ if (sc->cmdq[i].arg0)
+ sc->cmdq[i].func(sc->cmdq[i].arg0);
+ else
+ sc->cmdq[i].func(&sc->cmdq[i]);
+ }
+ sc->cmdq[i].arg0 = NULL;
+ sc->cmdq[i].func = NULL;
+ sc->cmdq_exec++;
+ sc->cmdq_exec &= MTW_CMDQ_MASQ;
+ }
+ MTW_UNLOCK(sc);
+}
+
+static void
+mtw_setup_tx_list(struct mtw_softc *sc, struct mtw_endpoint_queue *pq)
+{
+ struct mtw_tx_data *data;
+
+ memset(pq, 0, sizeof(*pq));
+
+ STAILQ_INIT(&pq->tx_qh);
+ STAILQ_INIT(&pq->tx_fh);
+
+ for (data = &pq->tx_data[0]; data < &pq->tx_data[MTW_TX_RING_COUNT];
+ data++) {
+ data->sc = sc;
+ STAILQ_INSERT_TAIL(&pq->tx_fh, data, next);
+ }
+ pq->tx_nfree = MTW_TX_RING_COUNT;
+}
+
+static void
+mtw_unsetup_tx_list(struct mtw_softc *sc, struct mtw_endpoint_queue *pq)
+{
+ struct mtw_tx_data *data;
+ /* make sure any subsequent use of the queues will fail */
+ pq->tx_nfree = 0;
+
+ STAILQ_INIT(&pq->tx_fh);
+ STAILQ_INIT(&pq->tx_qh);
+
+ /* free up all node references and mbufs */
+ for (data = &pq->tx_data[0]; data < &pq->tx_data[MTW_TX_RING_COUNT];
+ data++) {
+ if (data->m != NULL) {
+ m_freem(data->m);
+ data->m = NULL;
+ }
+ if (data->ni != NULL) {
+ ieee80211_free_node(data->ni);
+ data->ni = NULL;
+ }
+ }
+}
+
+static int
+mtw_write_ivb(struct mtw_softc *sc, void *buf, uint16_t len)
+{
+ usb_device_request_t req;
+ uint16_t actlen;
+ req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
+ req.bRequest = MTW_RESET;
+ USETW(req.wValue, 0x12);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, len);
+
+ int error = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, &req, buf,
+ 0, &actlen, 1000);
+
+ return (error);
+}
+
+static int
+mtw_write_cfg(struct mtw_softc *sc, uint16_t reg, uint32_t val)
+{
+ usb_device_request_t req;
+ int error;
+
+ req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
+ req.bRequest = MTW_WRITE_CFG;
+ USETW(req.wValue, 0);
+ USETW(req.wIndex, reg);
+ USETW(req.wLength, 4);
+ val = htole32(val);
+ error = usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, &val);
+ return (error);
+}
+
+static int
+mtw_usb_dma_write(struct mtw_softc *sc, uint32_t val)
+{
+ // if (sc->asic_ver == 0x7612)
+ // return mtw_write_cfg(sc, MTW_USB_U3DMA_CFG, val);
+ // else
+ return (mtw_write(sc, MTW_USB_DMA_CFG, val));
+}
+
+static void
+mtw_ucode_setup(struct mtw_softc *sc)
+{
+
+ mtw_usb_dma_write(sc, (MTW_USB_TX_EN | MTW_USB_RX_EN));
+ mtw_write(sc, MTW_FCE_PSE_CTRL, 1);
+ mtw_write(sc, MTW_TX_CPU_FCE_BASE, 0x400230);
+ mtw_write(sc, MTW_TX_CPU_FCE_MAX_COUNT, 1);
+ mtw_write(sc, MTW_MCU_FW_IDX, 1);
+ mtw_write(sc, MTW_FCE_PDMA, 0x44);
+ mtw_write(sc, MTW_FCE_SKIP_FS, 3);
+}
+static int
+mtw_ucode_write(struct mtw_softc *sc, const uint8_t *fw, const uint8_t *ivb,
+ int32_t len, uint32_t offset)
+{
+
+ // struct usb_attach_arg *uaa = device_get_ivars(sc->sc_dev);
+#if 0 // firmware not tested
+
+ if (sc->asic_ver == 0x7612 && offset >= 0x90000)
+ blksz = 0x800; /* MT7612 ROM Patch */
+
+ xfer = usbd_alloc_xfer(sc->sc_udev);
+ if (xfer == NULL) {
+ error = ENOMEM;
+ goto fail;
+ }
+ buf = usbd_alloc_buffer(xfer, blksz + 12);
+ if (buf == NULL) {
+ error = ENOMEM;
+ goto fail;
+ }
+#endif
+
+
+
+ int mlen;
+ int idx = 0;
+
+ mlen = 0x2c44;
+
+ while (len > 0) {
+
+ if (len < 0x2c44 && len > 0) {
+ mlen = len;
+ }
+
+ sc->txd_fw[idx]->len = htole16(mlen);
+ sc->txd_fw[idx]->flags = htole16(MTW_TXD_DATA | MTW_TXD_MCU);
+
+ memcpy(&sc->txd_fw[idx]->fw, fw, mlen);
+ // memcpy(&txd[1], fw, mlen);
+ // memset(&txd[1] + mlen, 0, MTW_DMA_PAD);
+ // mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, offset
+ //+sent); 1mtw_write_cfg(sc, MTW_MCU_DMA_LEN, (mlen << 16));
+
+ // sc->sc_fw_data[idx]->len=htole16(mlen);
+
+ // memcpy(tmpbuf,fw,mlen);
+ // memset(tmpbuf+mlen,0,MTW_DMA_PAD);
+ // memcpy(sc->sc_fw_data[idx].buf, fw, mlen);
+
+ fw += mlen;
+ len -= mlen;
+ // sent+=mlen;
+ idx++;
+ }
+ sc->sc_sent = 0;
+ memcpy(sc->sc_ivb_1, ivb, MTW_MCU_IVB_LEN);
+
+ usbd_transfer_start(sc->sc_xfer[7]);
+
+ return (0);
+}
+
+static void
+mtw_load_microcode(void *arg)
+{
+
+ struct mtw_softc *sc = (struct mtw_softc *)arg;
+ const struct mtw_ucode_hdr *hdr;
+ // onst struct mtw_ucode *fw = NULL;
+ const char *fwname;
+ size_t size;
+ int error = 0;
+ uint32_t tmp, iofs = 0x40;
+ // int ntries;
+ int dlen, ilen;
+ device_printf(sc->sc_dev, "version:0x%hx\n", sc->asic_ver);
+ /* is firmware already running? */
+ mtw_read_cfg(sc, MTW_MCU_DMA_ADDR, &tmp);
+ if (tmp == MTW_MCU_READY) {
+ return;
+ }
+ if (sc->asic_ver == 0x7612) {
+ fwname = "mtw-mt7662u_rom_patch";
+
+ const struct firmware *firmware = firmware_get_flags(fwname,FIRMWARE_GET_NOWARN);
+ if (firmware == NULL) {
+ device_printf(sc->sc_dev,
+ "failed loadfirmware of file %s (error %d)\n",
+ fwname, error);
+ return;
+ }
+ size = firmware->datasize;
+
+ const struct mtw_ucode *fw = (const struct mtw_ucode *)
+ firmware->data;
+ hdr = (const struct mtw_ucode_hdr *)&fw->hdr;
+ // memcpy(fw,(const unsigned char*)firmware->data +
+ // 0x1e,size-0x1e);
+ ilen = size - 0x1e;
+
+ mtw_ucode_setup(sc);
+
+ if ((error = mtw_ucode_write(sc, firmware->data, fw->ivb, ilen,
+ 0x90000)) != 0) {
+ goto fail;
+ }
+ mtw_usb_dma_write(sc, 0x00e41814);
+ }
+
+ fwname = "/mediatek/mt7601u.bin";
+ iofs = 0x40;
+ // dofs = 0;
+ if (sc->asic_ver == 0x7612) {
+ fwname = "mtw-mt7662u";
+ iofs = 0x80040;
+ // dofs = 0x110800;
+ } else if (sc->asic_ver == 0x7610) {
+ fwname = "mt7610u";
+ // dofs = 0x80000;
+ }
+ MTW_UNLOCK(sc);
+ const struct firmware *firmware = firmware_get_flags(fwname, FIRMWARE_GET_NOWARN);
+
+ if (firmware == NULL) {
+ device_printf(sc->sc_dev,
+ "failed loadfirmware of file %s (error %d)\n", fwname,
+ error);
+ MTW_LOCK(sc);
+ return;
+ }
+ MTW_LOCK(sc);
+ size = firmware->datasize;
+ MTW_DPRINTF(sc, MTW_DEBUG_FIRMWARE, "firmware size:%zu\n", size);
+ const struct mtw_ucode *fw = (const struct mtw_ucode *)firmware->data;
+
+ if (size < sizeof(struct mtw_ucode_hdr)) {
+ device_printf(sc->sc_dev, "firmware header too short\n");
+ goto fail;
+ }
+
+ hdr = (const struct mtw_ucode_hdr *)&fw->hdr;
+
+ if (size < sizeof(struct mtw_ucode_hdr) + le32toh(hdr->ilm_len) +
+ le32toh(hdr->dlm_len)) {
+ device_printf(sc->sc_dev, "firmware payload too short\n");
+ goto fail;
+ }
+
+ ilen = le32toh(hdr->ilm_len) - MTW_MCU_IVB_LEN;
+ dlen = le32toh(hdr->dlm_len);
+
+ if (ilen > size || dlen > size) {
+ device_printf(sc->sc_dev, "firmware payload too large\n");
+ goto fail;
+ }
+
+ mtw_write(sc, MTW_FCE_PDMA, 0);
+ mtw_write(sc, MTW_FCE_PSE_CTRL, 0);
+ mtw_ucode_setup(sc);
+
+ if ((error = mtw_ucode_write(sc, fw->data, fw->ivb, ilen, iofs)) != 0)
+ device_printf(sc->sc_dev, "Could not write ucode errro=%d\n",
+ error);
+
+ device_printf(sc->sc_dev, "loaded firmware ver %.8x %.8x %s\n",
+ le32toh(hdr->fw_ver), le32toh(hdr->build_ver), hdr->build_time);
+
+ return;
+fail:
+ return;
+}
+static usb_error_t
+mtw_do_request(struct mtw_softc *sc, struct usb_device_request *req, void *data)
+{
+ usb_error_t err;
+ int ntries = 5;
+
+ MTW_LOCK_ASSERT(sc, MA_OWNED);
+
+ while (ntries--) {
+ err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data,
+ 0, NULL, 2000); // ms seconds
+ if (err == 0)
+ break;
+ MTW_DPRINTF(sc, MTW_DEBUG_USB,
+ "Control request failed, %s (retrying)\n",
+ usbd_errstr(err));
+ mtw_delay(sc, 10);
+ }
+ return (err);
+}
+
+static int
+mtw_read(struct mtw_softc *sc, uint16_t reg, uint32_t *val)
+{
+ uint32_t tmp;
+ int error;
+
+ error = mtw_read_region_1(sc, reg, (uint8_t *)&tmp, sizeof tmp);
+ if (error == 0)
+ *val = le32toh(tmp);
+ else
+ *val = 0xffffffff;
+ return (error);
+}
+
+static int
+mtw_read_region_1(struct mtw_softc *sc, uint16_t reg, uint8_t *buf, int len)
+{
+ usb_device_request_t req;
+
+ req.bmRequestType = UT_READ_VENDOR_DEVICE;
+ req.bRequest = MTW_READ_REGION_1;
+ USETW(req.wValue, 0);
+ USETW(req.wIndex, reg);
+ USETW(req.wLength, len);
+
+ return (mtw_do_request(sc, &req, buf));
+}
+
+static int
+mtw_write_2(struct mtw_softc *sc, uint16_t reg, uint16_t val)
+{
+
+ usb_device_request_t req;
+ req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
+ req.bRequest = MTW_WRITE_2;
+ USETW(req.wValue, val);
+ USETW(req.wIndex, reg);
+ USETW(req.wLength, 0);
+ return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, NULL));
+}
+
+static int
+mtw_write(struct mtw_softc *sc, uint16_t reg, uint32_t val)
+{
+
+ int error;
+
+ if ((error = mtw_write_2(sc, reg, val & 0xffff)) == 0) {
+
+ error = mtw_write_2(sc, reg + 2, val >> 16);
+ }
+
+ return (error);
+}
+
+static int
+mtw_write_region_1(struct mtw_softc *sc, uint16_t reg, uint8_t *buf, int len)
+{
+
+ usb_device_request_t req;
+ req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
+ req.bRequest = MTW_WRITE_REGION_1;
+ USETW(req.wValue, 0);
+ USETW(req.wIndex, reg);
+ USETW(req.wLength, len);
+ return (usbd_do_request(sc->sc_udev, &sc->sc_mtx, &req, buf));
+}
+
+static int
+mtw_set_region_4(struct mtw_softc *sc, uint16_t reg, uint32_t val, int count)
+{
+ int i, error = 0;
+
+ KASSERT((count & 3) == 0, ("mte_set_region_4: Invalid data length.\n"));
+ for (i = 0; i < count && error == 0; i += 4)
+ error = mtw_write(sc, reg + i, val);
+ return (error);
+}
+
+static int
+mtw_efuse_read_2(struct mtw_softc *sc, uint16_t addr, uint16_t *val)
+{
+
+ uint32_t tmp;
+ uint16_t reg;
+ int error, ntries;
+
+ if ((error = mtw_read(sc, MTW_EFUSE_CTRL, &tmp)) != 0)
+ return (error);
+
+ addr *= 2;
+ /*
+ * Read one 16-byte block into registers EFUSE_DATA[0-3]:
+ * DATA0: 3 2 1 0
+ * DATA1: 7 6 5 4
+ * DATA2: B A 9 8
+ * DATA3: F E D C
+ */
+ tmp &= ~(MTW_EFSROM_MODE_MASK | MTW_EFSROM_AIN_MASK);
+ tmp |= (addr & ~0xf) << MTW_EFSROM_AIN_SHIFT | MTW_EFSROM_KICK;
+ mtw_write(sc, MTW_EFUSE_CTRL, tmp);
+ for (ntries = 0; ntries < 100; ntries++) {
+ if ((error = mtw_read(sc, MTW_EFUSE_CTRL, &tmp)) != 0)
+ return (error);
+ if (!(tmp & MTW_EFSROM_KICK))
+ break;
+ DELAY(2);
+ }
+ if (ntries == 100)
+ return (ETIMEDOUT);
+
+ if ((tmp & MTW_EFUSE_AOUT_MASK) == MTW_EFUSE_AOUT_MASK) {
+ *val = 0xffff; // address not found
+ return (0);
+ }
+// determine to which 32-bit register our 16-bit word belongs
+ reg = MTW_EFUSE_DATA0 + (addr & 0xc);
+ if ((error = mtw_read(sc, reg, &tmp)) != 0)
+ return (error);
+
+ *val = (addr & 2) ? tmp >> 16 : tmp & 0xffff;
+ return (0);
+}
+
+static __inline int
+mtw_srom_read(struct mtw_softc *sc, uint16_t addr, uint16_t *val)
+{
+ /* either eFUSE ROM or EEPROM */
+ return (sc->sc_srom_read(sc, addr, val));
+}
+
+static int
+mtw_bbp_read(struct mtw_softc *sc, uint8_t reg, uint8_t *val)
+{
+ uint32_t tmp;
+ int ntries, error;
+
+ for (ntries = 0; ntries < 10; ntries++) {
+ if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
+ return (error);
+ if (!(tmp & MTW_BBP_CSR_KICK))
+ break;
+ }
+ if (ntries == 10)
+ return (ETIMEDOUT);
+
+ tmp = MTW_BBP_CSR_READ | MTW_BBP_CSR_KICK | reg << 8;
+ if ((error = mtw_write(sc, MTW_BBP_CSR, tmp)) != 0)
+ return (error);
+
+ for (ntries = 0; ntries < 10; ntries++) {
+ if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
+ return (error);
+ if (!(tmp & MTW_BBP_CSR_KICK))
+ break;
+ }
+ if (ntries == 10)
+ return (ETIMEDOUT);
+
+ *val = tmp & 0xff;
+ return (0);
+}
+
+static int
+mtw_bbp_write(struct mtw_softc *sc, uint8_t reg, uint8_t val)
+{
+ uint32_t tmp;
+ int ntries, error;
+
+ for (ntries = 0; ntries < 10; ntries++) {
+ if ((error = mtw_read(sc, MTW_BBP_CSR, &tmp)) != 0)
+ return (error);
+ if (!(tmp & MTW_BBP_CSR_KICK))
+ break;
+ }
+ if (ntries == 10)
+ return (ETIMEDOUT);
+
+ tmp = MTW_BBP_CSR_KICK | reg << 8 | val;
+ return (mtw_write(sc, MTW_BBP_CSR, tmp));
+}
+
+static int
+mtw_mcu_cmd(struct mtw_softc *sc, u_int8_t cmd, void *buf, int len)
+{
+ sc->sc_idx = 0;
+ sc->txd_fw[sc->sc_idx]->len = htole16(
+ len + 8);
+ sc->txd_fw[sc->sc_idx]->flags = htole16(MTW_TXD_CMD | MTW_TXD_MCU |
+ (cmd & 0x1f) << MTW_TXD_CMD_SHIFT | (0 & 0xf));
+
+ memset(&sc->txd_fw[sc->sc_idx]->fw, 0, 2004);
+ memcpy(&sc->txd_fw[sc->sc_idx]->fw, buf, len);
+ usbd_transfer_start(sc->sc_xfer[7]);
+ return (0);
+}
+
+/*
+ * Add `delta' (signed) to each 4-bit sub-word of a 32-bit word.
+ * Used to adjust per-rate Tx power registers.
+ */
+static __inline uint32_t
+b4inc(uint32_t b32, int8_t delta)
+{
+ int8_t i, b4;
+
+ for (i = 0; i < 8; i++) {
+ b4 = b32 & 0xf;
+ b4 += delta;
+ if (b4 < 0)
+ b4 = 0;
+ else if (b4 > 0xf)
+ b4 = 0xf;
+ b32 = b32 >> 4 | b4 << 28;
+ }
+ return (b32);
+}
+static void
+mtw_get_txpower(struct mtw_softc *sc)
+{
+ uint16_t val;
+ int i;
+
+ /* Read power settings for 2GHz channels. */
+ for (i = 0; i < 14; i += 2) {
+ mtw_srom_read(sc, MTW_EEPROM_PWR2GHZ_BASE1 + i / 2, &val);
+ sc->txpow1[i + 0] = (int8_t)(val & 0xff);
+ sc->txpow1[i + 1] = (int8_t)(val >> 8);
+ mtw_srom_read(sc, MTW_EEPROM_PWR2GHZ_BASE2 + i / 2, &val);
+ sc->txpow2[i + 0] = (int8_t)(val & 0xff);
+ sc->txpow2[i + 1] = (int8_t)(val >> 8);
+ }
+ /* Fix broken Tx power entries. */
+ for (i = 0; i < 14; i++) {
+ if (sc->txpow1[i] < 0 || sc->txpow1[i] > 27)
+ sc->txpow1[i] = 5;
+ if (sc->txpow2[i] < 0 || sc->txpow2[i] > 27)
+ sc->txpow2[i] = 5;
+ MTW_DPRINTF(sc, MTW_DEBUG_TXPWR,
+ "chan %d: power1=%d, power2=%d\n", mt7601_rf_chan[i].chan,
+ sc->txpow1[i], sc->txpow2[i]);
+ }
+}
+
+struct ieee80211_node *
+mtw_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
+{
+ return (malloc(sizeof(struct mtw_node), M_80211_NODE,
+ M_NOWAIT | M_ZERO));
+}
+static int
+mtw_read_eeprom(struct mtw_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ int8_t delta_2ghz, delta_5ghz;
+ uint16_t val;
+ int ridx, ant;
+
+ sc->sc_srom_read = mtw_efuse_read_2;
+
+ /* read RF information */
+ mtw_srom_read(sc, MTW_EEPROM_CHIPID, &val);
+ sc->rf_rev = val;
+ mtw_srom_read(sc, MTW_EEPROM_ANTENNA, &val);
+ sc->ntxchains = (val >> 4) & 0xf;
+ sc->nrxchains = val & 0xf;
+ MTW_DPRINTF(sc, MTW_DEBUG_ROM, "EEPROM RF rev=0x%02x chains=%dT%dR\n",
+ sc->rf_rev, sc->ntxchains, sc->nrxchains);
+
+ /* read ROM version */
+ mtw_srom_read(sc, MTW_EEPROM_VERSION, &val);
+ MTW_DPRINTF(sc, MTW_DEBUG_ROM, "EEPROM rev=%d, FAE=%d\n", val & 0xff,
+ val >> 8);
+
+ /* read MAC address */
+ mtw_srom_read(sc, MTW_EEPROM_MAC01, &val);
+ ic->ic_macaddr[0] = val & 0xff;
+ ic->ic_macaddr[1] = val >> 8;
+ mtw_srom_read(sc, MTW_EEPROM_MAC23, &val);
+ ic->ic_macaddr[2] = val & 0xff;
+ ic->ic_macaddr[3] = val >> 8;
+ mtw_srom_read(sc, MTW_EEPROM_MAC45, &val);
+ ic->ic_macaddr[4] = val & 0xff;
+ ic->ic_macaddr[5] = val >> 8;
+#if 0
+ printf("eFUSE ROM\n00: ");
+ for (int i = 0; i < 256; i++) {
+ if (((i % 8) == 0) && i > 0)
+ printf("\n%02x: ", i);
+ mtw_srom_read(sc, i, &val);
+ printf(" %04x", val);
+ }
+ printf("\n");
+#endif
+ /* check if RF supports automatic Tx access gain control */
+ mtw_srom_read(sc, MTW_EEPROM_CONFIG, &val);
+ device_printf(sc->sc_dev, "EEPROM CFG 0x%04x\n", val);
+ if ((val & 0xff) != 0xff) {
+ sc->ext_5ghz_lna = (val >> 3) & 1;
+ sc->ext_2ghz_lna = (val >> 2) & 1;
+ /* check if RF supports automatic Tx access gain control */
+ sc->calib_2ghz = sc->calib_5ghz = (val >> 1) & 1;
+ /* check if we have a hardware radio switch */
+ sc->rfswitch = val & 1;
+ }
+
+ /* read RF frequency offset from EEPROM */
+ mtw_srom_read(sc, MTW_EEPROM_FREQ_OFFSET, &val);
+ if ((val & 0xff) != 0xff)
+ sc->rf_freq_offset = val;
+ else
+ sc->rf_freq_offset = 0;
+ MTW_DPRINTF(sc, MTW_DEBUG_ROM, "frequency offset 0x%x\n",
+ sc->rf_freq_offset);
+
+ /* Read Tx power settings. */
+ mtw_get_txpower(sc);
+
+ /* read Tx power compensation for each Tx rate */
+ mtw_srom_read(sc, MTW_EEPROM_DELTAPWR, &val);
+ delta_2ghz = delta_5ghz = 0;
+ if ((val & 0xff) != 0xff && (val & 0x80)) {
+ delta_2ghz = val & 0xf;
+ if (!(val & 0x40)) /* negative number */
+ delta_2ghz = -delta_2ghz;
+ }
+ val >>= 8;
+ if ((val & 0xff) != 0xff && (val & 0x80)) {
+ delta_5ghz = val & 0xf;
+ if (!(val & 0x40)) /* negative number */
+ delta_5ghz = -delta_5ghz;
+ }
+ MTW_DPRINTF(sc, MTW_DEBUG_ROM | MTW_DEBUG_TXPWR,
+ "power compensation=%d (2GHz), %d (5GHz)\n", delta_2ghz,
+ delta_5ghz);
+
+ for (ridx = 0; ridx < 5; ridx++) {
+ uint32_t reg;
+
+ mtw_srom_read(sc, MTW_EEPROM_RPWR + ridx * 2, &val);
+ reg = val;
+ mtw_srom_read(sc, MTW_EEPROM_RPWR + ridx * 2 + 1, &val);
+ reg |= (uint32_t)val << 16;
+
+ sc->txpow20mhz[ridx] = reg;
+ sc->txpow40mhz_2ghz[ridx] = b4inc(reg, delta_2ghz);
+ sc->txpow40mhz_5ghz[ridx] = b4inc(reg, delta_5ghz);
+
+ MTW_DPRINTF(sc, MTW_DEBUG_ROM | MTW_DEBUG_TXPWR,
+ "ridx %d: power 20MHz=0x%08x, 40MHz/2GHz=0x%08x, "
+ "40MHz/5GHz=0x%08x\n",
+ ridx, sc->txpow20mhz[ridx], sc->txpow40mhz_2ghz[ridx],
+ sc->txpow40mhz_5ghz[ridx]);
+ }
+
+ /* read RSSI offsets and LNA gains from EEPROM */
+ val = 0;
+ mtw_srom_read(sc, MTW_EEPROM_RSSI1_2GHZ, &val);
+ sc->rssi_2ghz[0] = val & 0xff; /* Ant A */
+ sc->rssi_2ghz[1] = val >> 8; /* Ant B */
+ mtw_srom_read(sc, MTW_EEPROM_RSSI2_2GHZ, &val);
+ /*
+ * On RT3070 chips (limited to 2 Rx chains), this ROM
+ * field contains the Tx mixer gain for the 2GHz band.
+ */
+ if ((val & 0xff) != 0xff)
+ sc->txmixgain_2ghz = val & 0x7;
+ MTW_DPRINTF(sc, MTW_DEBUG_ROM, "tx mixer gain=%u (2GHz)\n",
+ sc->txmixgain_2ghz);
+ sc->lna[2] = val >> 8; /* channel group 2 */
+ mtw_srom_read(sc, MTW_EEPROM_RSSI1_5GHZ, &val);
+ sc->rssi_5ghz[0] = val & 0xff; /* Ant A */
+ sc->rssi_5ghz[1] = val >> 8; /* Ant B */
+ mtw_srom_read(sc, MTW_EEPROM_RSSI2_5GHZ, &val);
+ sc->rssi_5ghz[2] = val & 0xff; /* Ant C */
+
+ sc->lna[3] = val >> 8; /* channel group 3 */
+
+ mtw_srom_read(sc, MTW_EEPROM_LNA, &val);
+ sc->lna[0] = val & 0xff; /* channel group 0 */
+ sc->lna[1] = val >> 8; /* channel group 1 */
+ MTW_DPRINTF(sc, MTW_DEBUG_ROM, "LNA0 0x%x\n", sc->lna[0]);
+
+ /* fix broken 5GHz LNA entries */
+ if (sc->lna[2] == 0 || sc->lna[2] == 0xff) {
+ MTW_DPRINTF(sc, MTW_DEBUG_ROM,
+ "invalid LNA for channel group %d\n", 2);
+ sc->lna[2] = sc->lna[1];
+ }
+ if (sc->lna[3] == 0 || sc->lna[3] == 0xff) {
+ MTW_DPRINTF(sc, MTW_DEBUG_ROM,
+ "invalid LNA for channel group %d\n", 3);
+ sc->lna[3] = sc->lna[1];
+ }
+
+ /* fix broken RSSI offset entries */
+ for (ant = 0; ant < 3; ant++) {
+ if (sc->rssi_2ghz[ant] < -10 || sc->rssi_2ghz[ant] > 10) {
+ MTW_DPRINTF(sc, MTW_DEBUG_ROM,
+ "invalid RSSI%d offset: %d (2GHz)\n", ant + 1,
+ sc->rssi_2ghz[ant]);
+ sc->rssi_2ghz[ant] = 0;
+ }
+ if (sc->rssi_5ghz[ant] < -10 || sc->rssi_5ghz[ant] > 10) {
+ MTW_DPRINTF(sc, MTW_DEBUG_ROM,
+ "invalid RSSI%d offset: %d (5GHz)\n", ant + 1,
+ sc->rssi_5ghz[ant]);
+ sc->rssi_5ghz[ant] = 0;
+ }
+ }
+ return (0);
+}
+static int
+mtw_media_change(if_t ifp)
+{
+ struct ieee80211vap *vap = if_getsoftc(ifp);
+ struct ieee80211com *ic = vap->iv_ic;
+ const struct ieee80211_txparam *tp;
+ struct mtw_softc *sc = ic->ic_softc;
+ uint8_t rate, ridx;
+
+ MTW_LOCK(sc);
+ ieee80211_media_change(ifp);
+ //tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)];
+ tp = &vap->iv_txparms[ic->ic_curmode];
+ if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
+ struct ieee80211_node *ni;
+ struct mtw_node *rn;
+ /* XXX TODO: methodize with MCS rates */
+ rate =
+ ic->ic_sup_rates[ic->ic_curmode].rs_rates[tp->ucastrate] &
+ IEEE80211_RATE_VAL;
+ for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) {
+ if (rt2860_rates[ridx].rate == rate)
+ break;
+ }
+ ni = ieee80211_ref_node(vap->iv_bss);
+ rn = MTW_NODE(ni);
+ rn->fix_ridx = ridx;
+
+ MTW_DPRINTF(sc, MTW_DEBUG_RATE, "rate=%d, fix_ridx=%d\n", rate,
+ rn->fix_ridx);
+ ieee80211_free_node(ni);
+ }
+ MTW_UNLOCK(sc);
+
+ return (0);
+}
+
+void
+mtw_set_leds(struct mtw_softc *sc, uint16_t which)
+{
+ struct mtw_mcu_cmd_8 cmd;
+ cmd.func = htole32(0x1);
+ cmd.val = htole32(which);
+ mtw_mcu_cmd(sc, CMD_LED_MODE, &cmd, sizeof(struct mtw_mcu_cmd_8));
+}
+static void
+mtw_abort_tsf_sync(struct mtw_softc *sc)
+{
+ uint32_t tmp;
+
+ mtw_read(sc, MTW_BCN_TIME_CFG, &tmp);
+ tmp &= ~(MTW_BCN_TX_EN | MTW_TSF_TIMER_EN | MTW_TBTT_TIMER_EN);
+ mtw_write(sc, MTW_BCN_TIME_CFG, tmp);
+}
+static int
+mtw_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
+{
+ const struct ieee80211_txparam *tp;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct mtw_softc *sc = ic->ic_softc;
+ struct mtw_vap *rvp = MTW_VAP(vap);
+ enum ieee80211_state ostate;
+ uint32_t sta[3];
+ uint8_t ratectl = 0;
+ uint8_t restart_ratectl = 0;
+ uint8_t bid = 1 << rvp->rvp_id;
+
+
+ ostate = vap->iv_state;
+ MTW_DPRINTF(sc, MTW_DEBUG_STATE, "%s -> %s\n",
+ ieee80211_state_name[ostate], ieee80211_state_name[nstate]);
+ IEEE80211_UNLOCK(ic);
+ MTW_LOCK(sc);
+ ratectl = sc->ratectl_run; /* remember current state */
+ usb_callout_stop(&sc->ratectl_ch);
+ sc->ratectl_run = MTW_RATECTL_OFF;
+ if (ostate == IEEE80211_S_RUN) {
+ /* turn link LED off */
+ }
+
+ switch (nstate) {
+ case IEEE80211_S_INIT:
+ restart_ratectl = 1;
+ if (ostate != IEEE80211_S_RUN)
+ break;
+
+ ratectl &= ~bid;
+ sc->runbmap &= ~bid;
+
+ /* abort TSF synchronization if there is no vap running */
+ if (--sc->running == 0)
+ mtw_abort_tsf_sync(sc);
+ break;
+
+ case IEEE80211_S_RUN:
+ if (!(sc->runbmap & bid)) {
+ if (sc->running++)
+ restart_ratectl = 1;
+ sc->runbmap |= bid;
+ }
+
+ m_freem(rvp->beacon_mbuf);
+ rvp->beacon_mbuf = NULL;
+
+ switch (vap->iv_opmode) {
+ case IEEE80211_M_HOSTAP:
+ case IEEE80211_M_MBSS:
+ sc->ap_running |= bid;
+ ic->ic_opmode = vap->iv_opmode;
+ mtw_update_beacon_cb(vap);
+ break;
+ case IEEE80211_M_IBSS:
+ sc->adhoc_running |= bid;
+ if (!sc->ap_running)
+ ic->ic_opmode = vap->iv_opmode;
+ mtw_update_beacon_cb(vap);
+ break;
+ case IEEE80211_M_STA:
+ sc->sta_running |= bid;
+ if (!sc->ap_running && !sc->adhoc_running)
+ ic->ic_opmode = vap->iv_opmode;
+
+ /* read statistic counters (clear on read) */
+ mtw_read_region_1(sc, MTW_TX_STA_CNT0, (uint8_t *)sta,
+ sizeof sta);
+
+ break;
+ default:
+ ic->ic_opmode = vap->iv_opmode;
+ break;
+ }
+
+ if (vap->iv_opmode != IEEE80211_M_MONITOR) {
+ struct ieee80211_node *ni;
+
+ if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) {
+ MTW_UNLOCK(sc);
+ IEEE80211_LOCK(ic);
+ return (-1);
+ }
+ mtw_updateslot(ic);
+ mtw_enable_mrr(sc);
+ mtw_set_txpreamble(sc);
+ mtw_set_basicrates(sc);
+ ni = ieee80211_ref_node(vap->iv_bss);
+ IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid);
+ mtw_set_bssid(sc, sc->sc_bssid);
+ ieee80211_free_node(ni);
+ mtw_enable_tsf_sync(sc);
+
+ /* enable automatic rate adaptation */
+ tp = &vap->iv_txparms[ieee80211_chan2mode(
+ ic->ic_curchan)];
+ if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE)
+ ratectl |= bid;
+ } else {
+ mtw_enable_tsf_sync(sc);
+ }
+
+ break;
+ default:
+ MTW_DPRINTF(sc, MTW_DEBUG_STATE, "undefined state\n");
+ break;
+ }
+
+ /* restart amrr for running VAPs */
+ if ((sc->ratectl_run = ratectl) && restart_ratectl) {
+ usb_callout_reset(&sc->ratectl_ch, hz, mtw_ratectl_to, sc);
+ }
+ MTW_UNLOCK(sc);
+ IEEE80211_LOCK(ic);
+ return (rvp->newstate(vap, nstate, arg));
+}
+
+static int
+mtw_wme_update(struct ieee80211com *ic)
+{
+ struct chanAccParams chp;
+ struct mtw_softc *sc = ic->ic_softc;
+ const struct wmeParams *ac;
+ int aci, error = 0;
+ ieee80211_wme_ic_getparams(ic, &chp);
+ ac = chp.cap_wmeParams;
+
+ MTW_LOCK(sc);
+ /* update MAC TX configuration registers */
+ for (aci = 0; aci < WME_NUM_AC; aci++) {
+ error = mtw_write(sc, MTW_EDCA_AC_CFG(aci),
+ ac[aci].wmep_logcwmax << 16 | ac[aci].wmep_logcwmin << 12 |
+ ac[aci].wmep_aifsn << 8 | ac[aci].wmep_txopLimit);
+ if (error)
+ goto err;
+ }
+
+ /* update SCH/DMA registers too */
+ error = mtw_write(sc, MTW_WMM_AIFSN_CFG,
+ ac[WME_AC_VO].wmep_aifsn << 12 | ac[WME_AC_VI].wmep_aifsn << 8 |
+ ac[WME_AC_BK].wmep_aifsn << 4 | ac[WME_AC_BE].wmep_aifsn);
+ if (error)
+ goto err;
+ error = mtw_write(sc, MTW_WMM_CWMIN_CFG,
+ ac[WME_AC_VO].wmep_logcwmin << 12 |
+ ac[WME_AC_VI].wmep_logcwmin << 8 |
+ ac[WME_AC_BK].wmep_logcwmin << 4 | ac[WME_AC_BE].wmep_logcwmin);
+ if (error)
+ goto err;
+ error = mtw_write(sc, MTW_WMM_CWMAX_CFG,
+ ac[WME_AC_VO].wmep_logcwmax << 12 |
+ ac[WME_AC_VI].wmep_logcwmax << 8 |
+ ac[WME_AC_BK].wmep_logcwmax << 4 | ac[WME_AC_BE].wmep_logcwmax);
+ if (error)
+ goto err;
+ error = mtw_write(sc, MTW_WMM_TXOP0_CFG,
+ ac[WME_AC_BK].wmep_txopLimit << 16 | ac[WME_AC_BE].wmep_txopLimit);
+ if (error)
+ goto err;
+ error = mtw_write(sc, MTW_WMM_TXOP1_CFG,
+ ac[WME_AC_VO].wmep_txopLimit << 16 | ac[WME_AC_VI].wmep_txopLimit);
+
+err:
+ MTW_UNLOCK(sc);
+ if (error)
+ MTW_DPRINTF(sc, MTW_DEBUG_USB, "WME update failed\n");
+
+ return (error);
+}
+
+static int
+mtw_key_set(struct ieee80211vap *vap, struct ieee80211_key *k)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct mtw_softc *sc = ic->ic_softc;
+ uint32_t i;
+
+ i = MTW_CMDQ_GET(&sc->cmdq_store);
+ MTW_DPRINTF(sc, MTW_DEBUG_KEY, "cmdq_store=%d\n", i);
+ sc->cmdq[i].func = mtw_key_set_cb;
+ sc->cmdq[i].arg0 = NULL;
+ sc->cmdq[i].arg1 = vap;
+ sc->cmdq[i].k = k;
+ IEEE80211_ADDR_COPY(sc->cmdq[i].mac, k->wk_macaddr);
+ ieee80211_runtask(ic, &sc->cmdq_task);
+
+ /*
+ * To make sure key will be set when hostapd
+ * calls iv_key_set() before if_init().
+ */
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP) {
+ MTW_LOCK(sc);
+ sc->cmdq_key_set = MTW_CMDQ_GO;
+ MTW_UNLOCK(sc);
+ }
+
+ return (1);
+}
+static void
+mtw_key_set_cb(void *arg)
+{
+ struct mtw_cmdq *cmdq = arg;
+ struct ieee80211vap *vap = cmdq->arg1;
+ struct ieee80211_key *k = cmdq->k;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct mtw_softc *sc = ic->ic_softc;
+ struct ieee80211_node *ni;
+ u_int cipher = k->wk_cipher->ic_cipher;
+ uint32_t attr;
+ uint16_t base;
+ uint8_t mode, wcid, iv[8];
+ MTW_LOCK_ASSERT(sc, MA_OWNED);
+
+ if (vap->iv_opmode == IEEE80211_M_HOSTAP)
+ ni = ieee80211_find_vap_node(&ic->ic_sta, vap, cmdq->mac);
+ else
+ ni = vap->iv_bss;
+
+ /* map net80211 cipher to RT2860 security mode */
+ switch (cipher) {
+ case IEEE80211_CIPHER_WEP:
+ if (k->wk_keylen < 8)
+ mode = MTW_MODE_WEP40;
+ else
+ mode = MTW_MODE_WEP104;
+ break;
+ case IEEE80211_CIPHER_TKIP:
+ mode = MTW_MODE_TKIP;
+ break;
+ case IEEE80211_CIPHER_AES_CCM:
+ mode = MTW_MODE_AES_CCMP;
+ break;
+ default:
+ MTW_DPRINTF(sc, MTW_DEBUG_KEY, "undefined case\n");
+ return;
+ }
+
+ if (k->wk_flags & IEEE80211_KEY_GROUP) {
+ wcid = 0; /* NB: update WCID0 for group keys */
+ base = MTW_SKEY(0, k->wk_keyix);
+ } else {
+ wcid = (ni != NULL) ? MTW_AID2WCID(ni->ni_associd) : 0;
+ base = MTW_PKEY(wcid);
+ }
+
+ if (cipher == IEEE80211_CIPHER_TKIP) {
+ mtw_write_region_1(sc, base, k->wk_key, 16);
+ mtw_write_region_1(sc, base + 16, &k->wk_key[24], 8);
+ mtw_write_region_1(sc, base + 24, &k->wk_key[16], 8);
+ } else {
+ /* roundup len to 16-bit: XXX fix write_region_1() instead */
+ mtw_write_region_1(sc, base, k->wk_key,
+ (k->wk_keylen + 1) & ~1);
+ }
+
+ if (!(k->wk_flags & IEEE80211_KEY_GROUP) ||
+ (k->wk_flags & (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV))) {
+ /* set initial packet number in IV+EIV */
+ if (cipher == IEEE80211_CIPHER_WEP) {
+ memset(iv, 0, sizeof iv);
+ iv[3] = vap->iv_def_txkey << 6;
+ } else {
+ if (cipher == IEEE80211_CIPHER_TKIP) {
+ iv[0] = k->wk_keytsc >> 8;
+ iv[1] = (iv[0] | 0x20) & 0x7f;
+ iv[2] = k->wk_keytsc;
+ } else { //CCMP
+ iv[0] = k->wk_keytsc;
+ iv[1] = k->wk_keytsc >> 8;
+ iv[2] = 0;
+ }
+ iv[3] = k->wk_keyix << 6 | IEEE80211_WEP_EXTIV;
+ iv[4] = k->wk_keytsc >> 16;
+ iv[5] = k->wk_keytsc >> 24;
+ iv[6] = k->wk_keytsc >> 32;
+ iv[7] = k->wk_keytsc >> 40;
+ }
+ mtw_write_region_1(sc, MTW_IVEIV(wcid), iv, 8);
+ }
+
+ if (k->wk_flags & IEEE80211_KEY_GROUP) {
+ /* install group key */
+ mtw_read(sc, MTW_SKEY_MODE_0_7, &attr);
+ attr &= ~(0xf << (k->wk_keyix * 4));
+ attr |= mode << (k->wk_keyix * 4);
+ mtw_write(sc, MTW_SKEY_MODE_0_7, attr);
+
+ if (cipher & (IEEE80211_CIPHER_WEP)) {
+ mtw_read(sc, MTW_WCID_ATTR(wcid + 1), &attr);
+ attr = (attr & ~0xf) | (mode << 1);
+ mtw_write(sc, MTW_WCID_ATTR(wcid + 1), attr);
+
+ mtw_set_region_4(sc, MTW_IVEIV(0), 0, 4);
+
+ mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
+ attr = (attr & ~0xf) | (mode << 1);
+ mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
+ }
+ } else {
+ /* install pairwise key */
+ mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
+ attr = (attr & ~0xf) | (mode << 1) | MTW_RX_PKEY_EN;
+ mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
+ }
+ k->wk_pad = wcid;
+}
+
+/*
+ * If wlan is destroyed without being brought down i.e. without
+ * wlan down or wpa_cli terminate, this function is called after
+ * vap is gone. Don't refer it.
+ */
+static void
+mtw_key_delete_cb(void *arg)
+{
+ struct mtw_cmdq *cmdq = arg;
+ struct mtw_softc *sc = cmdq->arg1;
+ struct ieee80211_key *k = &cmdq->key;
+ uint32_t attr;
+ uint8_t wcid;
+
+ MTW_LOCK_ASSERT(sc, MA_OWNED);
+
+ if (k->wk_flags & IEEE80211_KEY_GROUP) {
+ /* remove group key */
+ MTW_DPRINTF(sc, MTW_DEBUG_KEY, "removing group key\n");
+ mtw_read(sc, MTW_SKEY_MODE_0_7, &attr);
+ attr &= ~(0xf << (k->wk_keyix * 4));
+ mtw_write(sc, MTW_SKEY_MODE_0_7, attr);
+ } else {
+ /* remove pairwise key */
+ MTW_DPRINTF(sc, MTW_DEBUG_KEY, "removing key for wcid %x\n",
+ k->wk_pad);
+ /* matching wcid was written to wk_pad in mtw_key_set() */
+ wcid = k->wk_pad;
+ mtw_read(sc, MTW_WCID_ATTR(wcid), &attr);
+ attr &= ~0xf;
+ mtw_write(sc, MTW_WCID_ATTR(wcid), attr);
+ }
+
+ k->wk_pad = 0;
+}
+
+/*
+ * return 0 on error
+ */
+static int
+mtw_key_delete(struct ieee80211vap *vap, struct ieee80211_key *k)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct mtw_softc *sc = ic->ic_softc;
+ struct ieee80211_key *k0;
+ uint32_t i;
+ if (sc->sc_flags & MTW_RUNNING)
+ return (1);
+
+ /*
+ * When called back, key might be gone. So, make a copy
+ * of some values need to delete keys before deferring.
+ * But, because of LOR with node lock, cannot use lock here.
+ * So, use atomic instead.
+ */
+ i = MTW_CMDQ_GET(&sc->cmdq_store);
+ MTW_DPRINTF(sc, MTW_DEBUG_KEY, "cmdq_store=%d\n", i);
+ sc->cmdq[i].func = mtw_key_delete_cb;
+ sc->cmdq[i].arg0 = NULL;
+ sc->cmdq[i].arg1 = sc;
+ k0 = &sc->cmdq[i].key;
+ k0->wk_flags = k->wk_flags;
+ k0->wk_keyix = k->wk_keyix;
+ /* matching wcid was written to wk_pad in mtw_key_set() */
+ k0->wk_pad = k->wk_pad;
+ ieee80211_runtask(ic, &sc->cmdq_task);
+ return (1); /* return fake success */
+}
+
+static void
+mtw_ratectl_to(void *arg)
+{
+ struct mtw_softc *sc = arg;
+ /* do it in a process context, so it can go sleep */
+ ieee80211_runtask(&sc->sc_ic, &sc->ratectl_task);
+ /* next timeout will be rescheduled in the callback task */
+}
+
+/* ARGSUSED */
+static void
+mtw_ratectl_cb(void *arg, int pending)
+{
+
+ struct mtw_softc *sc = arg;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+
+ if (vap == NULL)
+ return;
+
+ ieee80211_iterate_nodes(&ic->ic_sta, mtw_iter_func, sc);
+
+ usb_callout_reset(&sc->ratectl_ch, hz, mtw_ratectl_to, sc);
+
+
+}
+
+static void
+mtw_drain_fifo(void *arg)
+{
+ struct mtw_softc *sc = arg;
+ uint32_t stat;
+ uint16_t(*wstat)[3];
+ uint8_t wcid, mcs, pid;
+ int8_t retry;
+
+ MTW_LOCK_ASSERT(sc, MA_OWNED);
+
+ for (;;) {
+ /* drain Tx status FIFO (maxsize = 16) */
+ mtw_read(sc, MTW_TX_STAT_FIFO, &stat);
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "tx stat 0x%08x\n", stat);
+ if (!(stat & MTW_TXQ_VLD))
+ break;
+
+ wcid = (stat >> MTW_TXQ_WCID_SHIFT) & 0xff;
+
+ /* if no ACK was requested, no feedback is available */
+ if (!(stat & MTW_TXQ_ACKREQ) || wcid > MTW_WCID_MAX ||
+ wcid == 0)
+ continue;
+
+ /*
+ * Even though each stat is Tx-complete-status like format,
+ * the device can poll stats. Because there is no guarantee
+ * that the referring node is still around when read the stats.
+ * So that, if we use ieee80211_ratectl_tx_update(), we will
+ * have hard time not to refer already freed node.
+ *
+ * To eliminate such page faults, we poll stats in softc.
+ * Then, update the rates later with
+ * ieee80211_ratectl_tx_update().
+ */
+ wstat = &(sc->wcid_stats[wcid]);
+ (*wstat)[MTW_TXCNT]++;
+ if (stat & MTW_TXQ_OK)
+ (*wstat)[MTW_SUCCESS]++;
+ else
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
+ /*
+ * Check if there were retries, ie if the Tx success rate is
+ * different from the requested rate. Note that it works only
+ * because we do not allow rate fallback from OFDM to CCK.
+ */
+ mcs = (stat >> MTW_TXQ_MCS_SHIFT) & 0x7f;
+ pid = (stat >> MTW_TXQ_PID_SHIFT) & 0xf;
+ if ((retry = pid - 1 - mcs) > 0) {
+ (*wstat)[MTW_TXCNT] += retry;
+ (*wstat)[MTW_RETRY] += retry;
+ }
+ }
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "count=%d\n", sc->fifo_cnt);
+
+ sc->fifo_cnt = 0;
+}
+
+static void
+mtw_iter_func(void *arg, struct ieee80211_node *ni)
+{
+ struct mtw_softc *sc = arg;
+ MTW_LOCK(sc);
+ struct ieee80211_ratectl_tx_stats *txs = &sc->sc_txs;
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct mtw_node *rn = MTW_NODE(ni);
+ uint32_t sta[3];
+ uint16_t(*wstat)[3];
+ int error, ridx;
+ uint8_t txrate = 0;
+
+ /* Check for special case */
+ if (sc->rvp_cnt <= 1 && vap->iv_opmode == IEEE80211_M_STA &&
+ ni != vap->iv_bss)
+ goto fail;
+
+ txs->flags = IEEE80211_RATECTL_TX_STATS_NODE |
+ IEEE80211_RATECTL_TX_STATS_RETRIES;
+ txs->ni = ni;
+ if (sc->rvp_cnt <= 1 &&
+ (vap->iv_opmode == IEEE80211_M_IBSS ||
+ vap->iv_opmode == IEEE80211_M_STA)) {
+ /*
+ * read statistic counters (clear on read) and update AMRR state
+ */
+ error = mtw_read_region_1(sc, MTW_TX_STA_CNT0, (uint8_t *)sta,
+ sizeof sta);
+ MTW_DPRINTF(sc, MTW_DEBUG_RATE, "error:%d\n", error);
+ if (error != 0)
+ goto fail;
+
+ /* count failed TX as errors */
+ if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS,
+ le32toh(sta[0]) & 0xffff);
+
+ txs->nretries = (le32toh(sta[1]) >> 16);
+ txs->nsuccess = (le32toh(sta[1]) & 0xffff);
+ /* nretries??? */
+ txs->nframes = txs->nsuccess + (le32toh(sta[0]) & 0xffff);
+
+ MTW_DPRINTF(sc, MTW_DEBUG_RATE,
+ "retrycnt=%d success=%d failcnt=%d\n", txs->nretries,
+ txs->nsuccess, le32toh(sta[0]) & 0xffff);
+ } else {
+ wstat = &(sc->wcid_stats[MTW_AID2WCID(ni->ni_associd)]);
+
+ if (wstat == &(sc->wcid_stats[0]) ||
+ wstat > &(sc->wcid_stats[MTW_WCID_MAX]))
+ goto fail;
+
+ txs->nretries = (*wstat)[MTW_RETRY];
+ txs->nsuccess = (*wstat)[MTW_SUCCESS];
+ txs->nframes = (*wstat)[MTW_TXCNT];
+ MTW_DPRINTF(sc, MTW_DEBUG_RATE,
+ "wstat retrycnt=%d txcnt=%d success=%d\n", txs->nretries,
+ txs->nframes, txs->nsuccess);
+
+ memset(wstat, 0, sizeof(*wstat));
+ }
+
+ ieee80211_ratectl_tx_update(vap, txs);
+ ieee80211_ratectl_rate(ni, NULL, 0);
+ txrate = ieee80211_node_get_txrate_dot11rate(ni);
+
+ /* XXX TODO: methodize with MCS rates */
+ for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++) {
+ MTW_DPRINTF(sc, MTW_DEBUG_RATE, "ni_txrate=0x%x\n",
+ txrate);
+ if (rt2860_rates[ridx].rate == txrate) {
+ break;
+ }
+ }
+ rn->amrr_ridx = ridx;
+fail:
+ MTW_UNLOCK(sc);
+
+ MTW_DPRINTF(sc, MTW_DEBUG_RATE, "rate=%d, ridx=%d\n",
+ txrate, rn->amrr_ridx);
+}
+
+static void
+mtw_newassoc_cb(void *arg)
+{
+ struct mtw_cmdq *cmdq = arg;
+ struct ieee80211_node *ni = cmdq->arg1;
+ struct mtw_softc *sc = ni->ni_vap->iv_ic->ic_softc;
+
+ uint8_t wcid = cmdq->wcid;
+
+ MTW_LOCK_ASSERT(sc, MA_OWNED);
+
+ mtw_write_region_1(sc, MTW_WCID_ENTRY(wcid), ni->ni_macaddr,
+ IEEE80211_ADDR_LEN);
+
+ memset(&(sc->wcid_stats[wcid]), 0, sizeof(sc->wcid_stats[wcid]));
+}
+
+static void
+mtw_newassoc(struct ieee80211_node *ni, int isnew)
+{
+
+ struct mtw_node *mn = MTW_NODE(ni);
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211com *ic = vap->iv_ic;
+ struct mtw_softc *sc = ic->ic_softc;
+
+ uint8_t rate;
+ uint8_t ridx;
+ uint8_t wcid;
+ //int i;
+ // int i,j;
+ wcid = MTW_AID2WCID(ni->ni_associd);
+
+ if (wcid > MTW_WCID_MAX) {
+ device_printf(sc->sc_dev, "wcid=%d out of range\n", wcid);
+ return;
+ }
+
+ /* only interested in true associations */
+ if (isnew && ni->ni_associd != 0) {
+ /*
+ * This function could is called though timeout function.
+ * Need to deferggxr.
+ */
+
+ uint32_t cnt = MTW_CMDQ_GET(&sc->cmdq_store);
+ MTW_DPRINTF(sc, MTW_DEBUG_STATE, "cmdq_store=%d\n", cnt);
+ sc->cmdq[cnt].func = mtw_newassoc_cb;
+ sc->cmdq[cnt].arg0 = NULL;
+ sc->cmdq[cnt].arg1 = ni;
+ sc->cmdq[cnt].wcid = wcid;
+ ieee80211_runtask(ic, &sc->cmdq_task);
+ }
+
+ MTW_DPRINTF(sc, MTW_DEBUG_STATE,
+ "new assoc isnew=%d associd=%x addr=%s\n", isnew, ni->ni_associd,
+ ether_sprintf(ni->ni_macaddr));
+ rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate;
+ /* XXX TODO: methodize with MCS rates */
+ for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
+ if (rt2860_rates[ridx].rate == rate)
+ break;
+ mn->mgt_ridx = ridx;
+ MTW_DPRINTF(sc, MTW_DEBUG_STATE | MTW_DEBUG_RATE,
+ "rate=%d, ctl_ridx=%d\n", rate, ridx);
+ MTW_LOCK(sc);
+ if (sc->ratectl_run != MTW_RATECTL_OFF) {
+ usb_callout_reset(&sc->ratectl_ch, hz, &mtw_ratectl_to, sc);
+ }
+ MTW_UNLOCK(sc);
+
+}
+
+/*
+ * Return the Rx chain with the highest RSSI for a given frame.
+ */
+static __inline uint8_t
+mtw_maxrssi_chain(struct mtw_softc *sc, const struct mtw_rxwi *rxwi)
+{
+ uint8_t rxchain = 0;
+
+ if (sc->nrxchains > 1) {
+ if (rxwi->rssi[1] > rxwi->rssi[rxchain])
+ rxchain = 1;
+ if (sc->nrxchains > 2)
+ if (rxwi->rssi[2] > rxwi->rssi[rxchain])
+ rxchain = 2;
+ }
+ return (rxchain);
+}
+static void
+mtw_get_tsf(struct mtw_softc *sc, uint64_t *buf)
+{
+ mtw_read_region_1(sc, MTW_TSF_TIMER_DW0, (uint8_t *)buf, sizeof(*buf));
+}
+
+static void
+mtw_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype,
+ const struct ieee80211_rx_stats *rxs, int rssi, int nf)
+{
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct mtw_softc *sc = vap->iv_ic->ic_softc;
+ struct mtw_vap *rvp = MTW_VAP(vap);
+ uint64_t ni_tstamp, rx_tstamp;
+
+ rvp->recv_mgmt(ni, m, subtype, rxs, rssi, nf);
+
+ if (vap->iv_state == IEEE80211_S_RUN &&
+ (subtype == IEEE80211_FC0_SUBTYPE_BEACON ||
+ subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)) {
+ ni_tstamp = le64toh(ni->ni_tstamp.tsf);
+ MTW_LOCK(sc);
+ mtw_get_tsf(sc, &rx_tstamp);
+ MTW_UNLOCK(sc);
+ rx_tstamp = le64toh(rx_tstamp);
+
+ if (ni_tstamp >= rx_tstamp) {
+ MTW_DPRINTF(sc, MTW_DEBUG_RECV | MTW_DEBUG_BEACON,
+ "ibss merge, tsf %ju tstamp %ju\n",
+ (uintmax_t)rx_tstamp, (uintmax_t)ni_tstamp);
+ (void)ieee80211_ibss_merge(ni);
+ }
+ }
+}
+static void
+mtw_rx_frame(struct mtw_softc *sc, struct mbuf *m, uint32_t dmalen)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_frame *wh;
+ struct ieee80211_node *ni;
+ struct epoch_tracker et;
+
+ struct mtw_rxwi *rxwi;
+ uint32_t flags;
+ uint16_t len, rxwisize;
+ uint8_t ant, rssi;
+ int8_t nf;
+
+ rxwisize = sizeof(struct mtw_rxwi);
+
+ if (__predict_false(
+ dmalen < rxwisize + sizeof(struct ieee80211_frame_ack))) {
+ MTW_DPRINTF(sc, MTW_DEBUG_RECV,
+ "payload is too short: dma length %u < %zu\n", dmalen,
+ rxwisize + sizeof(struct ieee80211_frame_ack));
+ goto fail;
+ }
+
+ rxwi = mtod(m, struct mtw_rxwi *);
+ len = le16toh(rxwi->len) & 0xfff;
+ flags = le32toh(rxwi->flags);
+ if (__predict_false(len > dmalen - rxwisize)) {
+ MTW_DPRINTF(sc, MTW_DEBUG_RECV, "bad RXWI length %u > %u\n",
+ len, dmalen);
+ goto fail;
+ }
+
+ if (__predict_false(flags & (MTW_RX_CRCERR | MTW_RX_ICVERR))) {
+ MTW_DPRINTF(sc, MTW_DEBUG_RECV, "%s error.\n",
+ (flags & MTW_RX_CRCERR) ? "CRC" : "ICV");
+ goto fail;
+ }
+
+ if (flags & MTW_RX_L2PAD) {
+ MTW_DPRINTF(sc, MTW_DEBUG_RECV,
+ "received RT2860_RX_L2PAD frame\n");
+ len += 2;
+ }
+
+ m->m_data += rxwisize;
+ m->m_pkthdr.len = m->m_len = len;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
+ wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED;
+ m->m_flags |= M_WEP;
+ }
+
+ if (len >= sizeof(struct ieee80211_frame_min)) {
+ ni = ieee80211_find_rxnode(ic,
+ mtod(m, struct ieee80211_frame_min *));
+ } else
+ ni = NULL;
+
+ if (ni && ni->ni_flags & IEEE80211_NODE_HT) {
+ m->m_flags |= M_AMPDU;
+ }
+
+ if (__predict_false(flags & MTW_RX_MICERR)) {
+ /* report MIC failures to net80211 for TKIP */
+ if (ni != NULL)
+ ieee80211_notify_michael_failure(ni->ni_vap, wh,
+ rxwi->keyidx);
+ MTW_DPRINTF(sc, MTW_DEBUG_RECV,
+ "MIC error. Someone is lying.\n");
+ goto fail;
+ }
+
+ ant = mtw_maxrssi_chain(sc, rxwi);
+ rssi = rxwi->rssi[ant];
+ nf = mtw_rssi2dbm(sc, rssi, ant);
+
+ if (__predict_false(ieee80211_radiotap_active(ic))) {
+ struct mtw_rx_radiotap_header *tap = &sc->sc_rxtap;
+ uint16_t phy;
+
+ tap->wr_flags = 0;
+ if (flags & MTW_RX_L2PAD)
+ tap->wr_flags |= IEEE80211_RADIOTAP_F_DATAPAD;
+ tap->wr_antsignal = rssi;
+ tap->wr_antenna = ant;
+ tap->wr_dbm_antsignal = mtw_rssi2dbm(sc, rssi, ant);
+ tap->wr_rate = 2; /* in case it can't be found below */
+ //MTW_LOCK(sc);
+
+ // MTW_UNLOCK(sc);
+ phy = le16toh(rxwi->phy);
+ switch (phy >> MT7601_PHY_SHIFT) {
+ case MTW_PHY_CCK:
+ switch ((phy & MTW_PHY_MCS) & ~MTW_PHY_SHPRE) {
+ case 0:
+ tap->wr_rate = 2;
+ break;
+ case 1:
+ tap->wr_rate = 4;
+ break;
+ case 2:
+ tap->wr_rate = 11;
+ break;
+ case 3:
+ tap->wr_rate = 22;
+ break;
+ }
+ if (phy & MTW_PHY_SHPRE)
+ tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
+ break;
+ case MTW_PHY_OFDM:
+ switch (phy & MTW_PHY_MCS) {
+ case 0:
+ tap->wr_rate = 12;
+ break;
+ case 1:
+ tap->wr_rate = 18;
+ break;
+ case 2:
+ tap->wr_rate = 24;
+ break;
+ case 3:
+ tap->wr_rate = 36;
+ break;
+ case 4:
+ tap->wr_rate = 48;
+ break;
+ case 5:
+ tap->wr_rate = 72;
+ break;
+ case 6:
+ tap->wr_rate = 96;
+ break;
+ case 7:
+ tap->wr_rate = 108;
+ break;
+ }
+ break;
+ }
+ }
+
+ NET_EPOCH_ENTER(et);
+ if (ni != NULL) {
+ (void)ieee80211_input(ni, m, rssi, nf);
+ ieee80211_free_node(ni);
+ } else {
+ (void)ieee80211_input_all(ic, m, rssi, nf);
+ }
+ NET_EPOCH_EXIT(et);
+
+ return;
+
+fail:
+ m_freem(m);
+ counter_u64_add(ic->ic_ierrors, 1);
+}
+
+static void
+mtw_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct mtw_softc *sc = usbd_xfer_softc(xfer);
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct mbuf *m = NULL;
+ struct mbuf *m0;
+ uint32_t dmalen, mbuf_len;
+ uint16_t rxwisize;
+ int xferlen;
+
+ rxwisize = sizeof(struct mtw_rxwi);
+
+ usbd_xfer_status(xfer, &xferlen, NULL, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ MTW_DPRINTF(sc, MTW_DEBUG_RECV, "rx done, actlen=%d\n",
+ xferlen);
+ if (xferlen < (int)(sizeof(uint32_t) + rxwisize +
+ sizeof(struct mtw_rxd))) {
+ MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
+ "xfer too short %d %d\n", xferlen,
+ (int)(sizeof(uint32_t) + rxwisize +
+ sizeof(struct mtw_rxd)));
+ goto tr_setup;
+ }
+
+ m = sc->rx_m;
+ sc->rx_m = NULL;
+
+ /* FALLTHROUGH */
+ case USB_ST_SETUP:
+ tr_setup:
+
+ if (sc->rx_m == NULL) {
+ sc->rx_m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR,
+ MTW_MAX_RXSZ);
+ }
+ if (sc->rx_m == NULL) {
+ MTW_DPRINTF(sc,
+ MTW_DEBUG_RECV | MTW_DEBUG_RECV_DESC |
+ MTW_DEBUG_USB,
+ "could not allocate mbuf - idle with stall\n");
+ counter_u64_add(ic->ic_ierrors, 1);
+ usbd_xfer_set_stall(xfer);
+ usbd_xfer_set_frames(xfer, 0);
+ } else {
+ /*
+ * Directly loading a mbuf cluster into DMA to
+ * save some data copying. This works because
+ * there is only one cluster.
+ */
+ usbd_xfer_set_frame_data(xfer, 0,
+ mtod(sc->rx_m, caddr_t), MTW_MAX_RXSZ);
+ usbd_xfer_set_frames(xfer, 1);
+ }
+ usbd_transfer_submit(xfer);
+ break;
+
+ default: /* Error */
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
+ "USB transfer error, %s\n", usbd_errstr(error));
+
+ if (error != USB_ERR_CANCELLED) {
+ /* try to clear stall first */
+ usbd_xfer_set_stall(xfer);
+ if (error == USB_ERR_TIMEOUT)
+ device_printf(sc->sc_dev, "device timeout %s\n",
+ __func__);
+ counter_u64_add(ic->ic_ierrors, 1);
+ goto tr_setup;
+ }
+ if (sc->rx_m != NULL) {
+ m_freem(sc->rx_m);
+ sc->rx_m = NULL;
+ }
+ break;
+ }
+
+ if (m == NULL)
+ return;
+
+ /* inputting all the frames must be last */
+
+ MTW_UNLOCK(sc);
+
+ m->m_pkthdr.len = m->m_len = xferlen;
+
+ /* HW can aggregate multiple 802.11 frames in a single USB xfer */
+ for (;;) {
+ dmalen = le32toh(*mtod(m, uint32_t *)) & 0xffff;
+
+ if ((dmalen >= (uint32_t)-8) || (dmalen == 0) ||
+ ((dmalen & 3) != 0)) {
+ MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
+ "bad DMA length %u\n", dmalen);
+ break;
+ }
+ if ((dmalen + 8) > (uint32_t)xferlen) {
+ MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
+ "bad DMA length %u > %d\n", dmalen + 8, xferlen);
+ break;
+ }
+
+ /* If it is the last one or a single frame, we won't copy. */
+ if ((xferlen -= dmalen + 8) <= 8) {
+ /* trim 32-bit DMA-len header */
+ m->m_data += 4;
+ m->m_pkthdr.len = m->m_len -= 4;
+ mtw_rx_frame(sc, m, dmalen);
+ m = NULL; /* don't free source buffer */
+ break;
+ }
+
+ mbuf_len = dmalen + sizeof(struct mtw_rxd);
+ if (__predict_false(mbuf_len > MCLBYTES)) {
+ MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC | MTW_DEBUG_USB,
+ "payload is too big: mbuf_len %u\n", mbuf_len);
+ counter_u64_add(ic->ic_ierrors, 1);
+ break;
+ }
+
+ /* copy aggregated frames to another mbuf */
+ m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
+ if (__predict_false(m0 == NULL)) {
+ MTW_DPRINTF(sc, MTW_DEBUG_RECV_DESC,
+ "could not allocate mbuf\n");
+ counter_u64_add(ic->ic_ierrors, 1);
+ break;
+ }
+ m_copydata(m, 4 /* skip 32-bit DMA-len header */, mbuf_len,
+ mtod(m0, caddr_t));
+ m0->m_pkthdr.len = m0->m_len = mbuf_len;
+ mtw_rx_frame(sc, m0, dmalen);
+
+ /* update data ptr */
+ m->m_data += mbuf_len + 4;
+ m->m_pkthdr.len = m->m_len -= mbuf_len + 4;
+ }
+
+ /* make sure we free the source buffer, if any */
+ m_freem(m);
+
+#ifdef IEEE80211_SUPPORT_SUPERG
+ ieee80211_ff_age_all(ic, 100);
+#endif
+ MTW_LOCK(sc);
+}
+
+static void
+mtw_tx_free(struct mtw_endpoint_queue *pq, struct mtw_tx_data *data, int txerr)
+{
+
+ ieee80211_tx_complete(data->ni, data->m, txerr);
+ data->m = NULL;
+ data->ni = NULL;
+
+ STAILQ_INSERT_TAIL(&pq->tx_fh, data, next);
+ pq->tx_nfree++;
+}
+static void
+mtw_bulk_tx_callbackN(struct usb_xfer *xfer, usb_error_t error, u_int index)
+{
+ struct mtw_softc *sc = usbd_xfer_softc(xfer);
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct mtw_tx_data *data;
+ struct ieee80211vap *vap = NULL;
+ struct usb_page_cache *pc;
+ struct mtw_endpoint_queue *pq = &sc->sc_epq[index];
+ struct mbuf *m;
+ usb_frlength_t size;
+ int actlen;
+ int sumlen;
+ usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL);
+
+ switch (USB_GET_STATE(xfer)) {
+ case USB_ST_TRANSFERRED:
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
+ "transfer complete: %d bytes @ index %d\n", actlen, index);
+
+ data = usbd_xfer_get_priv(xfer);
+ mtw_tx_free(pq, data, 0);
+ usbd_xfer_set_priv(xfer, NULL);
+
+ /* FALLTHROUGH */
+ case USB_ST_SETUP:
+ tr_setup:
+ data = STAILQ_FIRST(&pq->tx_qh);
+ if (data == NULL)
+ break;
+
+ STAILQ_REMOVE_HEAD(&pq->tx_qh, next);
+
+ m = data->m;
+
+ size = sizeof(data->desc);
+ if ((m->m_pkthdr.len + size + 3 + 8) > MTW_MAX_TXSZ) {
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT_DESC | MTW_DEBUG_USB,
+ "data overflow, %u bytes\n", m->m_pkthdr.len);
+ mtw_tx_free(pq, data, 1);
+ goto tr_setup;
+ }
+
+ pc = usbd_xfer_get_frame(xfer, 0);
+ usbd_copy_in(pc, 0, &data->desc, size);
+ usbd_m_copy_in(pc, size, m, 0, m->m_pkthdr.len);
+ size += m->m_pkthdr.len;
+ /*
+ * Align end on a 4-byte boundary, pad 8 bytes (CRC +
+ * 4-byte padding), and be sure to zero those trailing
+ * bytes:
+ */
+ usbd_frame_zero(pc, size, ((-size) & 3) + MTW_DMA_PAD);
+ size += ((-size) & 3) + MTW_DMA_PAD;
+
+ vap = data->ni->ni_vap;
+ if (ieee80211_radiotap_active_vap(vap)) {
+ const struct ieee80211_frame *wh;
+ struct mtw_tx_radiotap_header *tap = &sc->sc_txtap;
+ struct mtw_txwi *txwi =
+ (struct mtw_txwi *)(&data->desc +
+ sizeof(struct mtw_txd));
+ int has_l2pad;
+
+ wh = mtod(m, struct ieee80211_frame *);
+ has_l2pad = IEEE80211_HAS_ADDR4(wh) !=
+ IEEE80211_QOS_HAS_SEQ(wh);
+
+ tap->wt_flags = 0;
+ tap->wt_rate = rt2860_rates[data->ridx].rate;
+ tap->wt_hwqueue = index;
+ if (le16toh(txwi->phy) & MTW_PHY_SHPRE)
+ tap->wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
+ if (has_l2pad)
+ tap->wt_flags |= IEEE80211_RADIOTAP_F_DATAPAD;
+
+ ieee80211_radiotap_tx(vap, m);
+ }
+
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
+ "sending frame len=%u/%u @ index %d\n", m->m_pkthdr.len,
+ size, index);
+
+ usbd_xfer_set_frame_len(xfer, 0, size);
+ usbd_xfer_set_priv(xfer, data);
+ usbd_transfer_submit(xfer);
+ mtw_start(sc);
+
+ break;
+
+ default:
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
+ "USB transfer error, %s\n", usbd_errstr(error));
+
+ data = usbd_xfer_get_priv(xfer);
+
+ if (data != NULL) {
+ if (data->ni != NULL)
+ vap = data->ni->ni_vap;
+ mtw_tx_free(pq, data, error);
+ usbd_xfer_set_priv(xfer, NULL);
+ }
+
+ if (vap == NULL)
+ vap = TAILQ_FIRST(&ic->ic_vaps);
+
+ if (error != USB_ERR_CANCELLED) {
+ if (error == USB_ERR_TIMEOUT) {
+ device_printf(sc->sc_dev, "device timeout %s\n",
+ __func__);
+ uint32_t i = MTW_CMDQ_GET(&sc->cmdq_store);
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT | MTW_DEBUG_USB,
+ "cmdq_store=%d\n", i);
+ sc->cmdq[i].func = mtw_usb_timeout_cb;
+ sc->cmdq[i].arg0 = vap;
+ ieee80211_runtask(ic, &sc->cmdq_task);
+ }
+
+ /*
+ * Try to clear stall first, also if other
+ * errors occur, hence clearing stall
+ * introduces a 50 ms delay:
+ */
+ usbd_xfer_set_stall(xfer);
+ goto tr_setup;
+ }
+ break;
+ }
+#ifdef IEEE80211_SUPPORT_SUPERG
+ /* XXX TODO: make this deferred rather than unlock/relock */
+ /* XXX TODO: should only do the QoS AC this belongs to */
+ if (pq->tx_nfree >= MTW_TX_RING_COUNT) {
+ MTW_UNLOCK(sc);
+ ieee80211_ff_flush_all(ic);
+ MTW_LOCK(sc);
+ }
+#endif
+}
+
+static void
+mtw_fw_callback(struct usb_xfer *xfer, usb_error_t error)
+{
+ struct mtw_softc *sc = usbd_xfer_softc(xfer);
+
+ int actlen;
+ int ntries, tmp;
+ // struct mtw_txd *data;
+
+ usbd_xfer_status(xfer, &actlen, NULL, NULL, NULL);
+ // data = usbd_xfer_get_priv(xfer);
+ usbd_xfer_set_priv(xfer, NULL);
+ switch (USB_GET_STATE(xfer)) {
+
+ case USB_ST_TRANSFERRED:
+ sc->sc_sent += actlen;
+ memset(sc->txd_fw[sc->sc_idx], 0, actlen);
+
+ if (actlen < 0x2c44 && sc->sc_idx == 0) {
+ return;
+ }
+ if (sc->sc_idx == 3) {
+
+ if ((error = mtw_write_ivb(sc, sc->sc_ivb_1,
+ MTW_MCU_IVB_LEN)) != 0) {
+ device_printf(sc->sc_dev,
+ "Could not write ivb error: %d\n", error);
+ }
+
+ mtw_delay(sc, 10);
+ for (ntries = 0; ntries < 100; ntries++) {
+ if ((error = mtw_read_cfg(sc, MTW_MCU_DMA_ADDR,
+ &tmp)) != 0) {
+ device_printf(sc->sc_dev,
+ "Could not read cfg error: %d\n", error);
+
+ }
+ if (tmp == MTW_MCU_READY) {
+ MTW_DPRINTF(sc, MTW_DEBUG_FIRMWARE,
+ "mcu reaady %d\n", tmp);
+ sc->fwloading = 1;
+ break;
+ }
+
+ mtw_delay(sc, 10);
+ }
+ if (ntries == 100)
+ sc->fwloading = 0;
+ wakeup(&sc->fwloading);
+ return;
+ }
+
+ if (actlen == 0x2c44) {
+ sc->sc_idx++;
+ DELAY(1000);
+ }
+
+ case USB_ST_SETUP: {
+ int dlen = 0;
+ dlen = sc->txd_fw[sc->sc_idx]->len;
+
+ mtw_write_cfg(sc, MTW_MCU_DMA_ADDR, 0x40 + sc->sc_sent);
+ mtw_write_cfg(sc, MTW_MCU_DMA_LEN, (dlen << 16));
+
+ usbd_xfer_set_frame_len(xfer, 0, dlen);
+ usbd_xfer_set_frame_data(xfer, 0, sc->txd_fw[sc->sc_idx], dlen);
+
+ // usbd_xfer_set_priv(xfer,sc->txd[sc->sc_idx]);
+ usbd_transfer_submit(xfer);
+ break;
+
+ default: /* Error */
+ device_printf(sc->sc_dev, "%s:%d %s\n", __FILE__, __LINE__,
+ usbd_errstr(error));
+ sc->fwloading = 0;
+ wakeup(&sc->fwloading);
+ /*
+ * Print error message and clear stall
+ * for example.
+ */
+ break;
+ }
+ /*
+ * Here it is safe to do something without the private
+ * USB mutex locked.
+ */
+ }
+ return;
+}
+static void
+mtw_bulk_tx_callback0(struct usb_xfer *xfer, usb_error_t error)
+{
+ mtw_bulk_tx_callbackN(xfer, error, 0);
+}
+
+static void
+mtw_bulk_tx_callback1(struct usb_xfer *xfer, usb_error_t error)
+{
+
+
+ mtw_bulk_tx_callbackN(xfer, error, 1);
+}
+
+static void
+mtw_bulk_tx_callback2(struct usb_xfer *xfer, usb_error_t error)
+{
+ mtw_bulk_tx_callbackN(xfer, error, 2);
+}
+
+static void
+mtw_bulk_tx_callback3(struct usb_xfer *xfer, usb_error_t error)
+{
+ mtw_bulk_tx_callbackN(xfer, error, 3);
+}
+
+static void
+mtw_bulk_tx_callback4(struct usb_xfer *xfer, usb_error_t error)
+{
+ mtw_bulk_tx_callbackN(xfer, error, 4);
+}
+
+static void
+mtw_bulk_tx_callback5(struct usb_xfer *xfer, usb_error_t error)
+{
+ mtw_bulk_tx_callbackN(xfer, error, 5);
+}
+
+static void
+mtw_set_tx_desc(struct mtw_softc *sc, struct mtw_tx_data *data)
+{
+ struct mbuf *m = data->m;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = data->ni->ni_vap;
+ struct ieee80211_frame *wh;
+ struct mtw_txd *txd;
+ struct mtw_txwi *txwi;
+ uint16_t xferlen, txwisize;
+ uint16_t mcs;
+ uint8_t ridx = data->ridx;
+ uint8_t pad;
+
+ /* get MCS code from rate index */
+ mcs = rt2860_rates[ridx].mcs;
+
+ txwisize = sizeof(*txwi);
+ xferlen = txwisize + m->m_pkthdr.len;
+
+ /* roundup to 32-bit alignment */
+ xferlen = (xferlen + 3) & ~3;
+
+ txd = (struct mtw_txd *)&data->desc;
+ txd->len = htole16(xferlen);
+
+ wh = mtod(m, struct ieee80211_frame *);
+
+ /*
+ * Ether both are true or both are false, the header
+ * are nicely aligned to 32-bit. So, no L2 padding.
+ */
+ if (IEEE80211_HAS_ADDR4(wh) == IEEE80211_QOS_HAS_SEQ(wh))
+ pad = 0;
+ else
+ pad = 2;
+
+ /* setup TX Wireless Information */
+ txwi = (struct mtw_txwi *)(txd + 1);
+ txwi->len = htole16(m->m_pkthdr.len - pad);
+ if (rt2860_rates[ridx].phy == IEEE80211_T_DS) {
+ mcs |= MTW_PHY_CCK;
+ if (ridx != MTW_RIDX_CCK1 &&
+ (ic->ic_flags & IEEE80211_F_SHPREAMBLE))
+ mcs |= MTW_PHY_SHPRE;
+ } else if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM) {
+ mcs |= MTW_PHY_OFDM;
+ } else if (rt2860_rates[ridx].phy == IEEE80211_T_HT) {
+ /* XXX TODO: [adrian] set short preamble for MCS? */
+ mcs |= MTW_PHY_HT; /* Mixed, not greenfield */
+ }
+ txwi->phy = htole16(mcs);
+
+ /* check if RTS/CTS or CTS-to-self protection is required */
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
+ ((m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) ||
+ ((ic->ic_flags & IEEE80211_F_USEPROT) &&
+ rt2860_rates[ridx].phy == IEEE80211_T_OFDM) ||
+ ((ic->ic_htprotmode == IEEE80211_PROT_RTSCTS) &&
+ rt2860_rates[ridx].phy == IEEE80211_T_HT)))
+ txwi->txop |= MTW_TX_TXOP_HT;
+ else
+ txwi->txop |= MTW_TX_TXOP_BACKOFF;
+
+}
+
+/* This function must be called locked */
+static int
+mtw_tx(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = ni->ni_vap;
+ struct ieee80211_frame *wh;
+
+
+ //const struct ieee80211_txparam *tp = ni->ni_txparms;
+ struct mtw_node *rn = MTW_NODE(ni);
+ struct mtw_tx_data *data;
+ struct mtw_txd *txd;
+ struct mtw_txwi *txwi;
+ uint16_t qos;
+ uint16_t dur;
+ uint16_t qid;
+ uint8_t type;
+ uint8_t tid;
+ uint16_t ridx;
+ uint8_t ctl_ridx;
+ uint16_t qflags;
+ uint8_t xflags = 0;
+
+ int hasqos;
+
+ MTW_LOCK_ASSERT(sc, MA_OWNED);
+
+ wh = mtod(m, struct ieee80211_frame *);
+ const struct ieee80211_txparam *tp = ni->ni_txparms;
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+
+ qflags = htole16(MTW_TXD_DATA | MTW_TXD_80211 |
+ MTW_TXD_WLAN | MTW_TXD_QSEL_HCCA);
+
+ if ((hasqos = IEEE80211_QOS_HAS_SEQ(wh))) {
+ uint8_t *frm;
+ frm = ieee80211_getqos(wh);
+
+
+ //device_printf(sc->sc_dev,"JSS:frm:%d",*frm);
+ qos = le16toh(*(const uint16_t *)frm);
+ tid = ieee80211_gettid(wh);
+ qid = TID_TO_WME_AC(tid);
+ qflags |= MTW_TXD_QSEL_EDCA;
+ } else {
+ qos = 0;
+ tid = 0;
+ qid = WME_AC_BE;
+ }
+ if (type & IEEE80211_FC0_TYPE_MGT) {
+ qid = 0;
+ }
+
+ if (type != IEEE80211_FC0_TYPE_DATA)
+ qflags |= htole16(MTW_TXD_WIV);
+
+ if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
+ type != IEEE80211_FC0_TYPE_DATA || m->m_flags & M_EAPOL) {
+ /* XXX TODO: methodize for 11n; use MCS0 for 11NA/11NG */
+ ridx = (ic->ic_curmode == IEEE80211_MODE_11A
+ || ic->ic_curmode == IEEE80211_MODE_11NA) ?
+ MTW_RIDX_OFDM6 : MTW_RIDX_CCK1;
+ if (type == IEEE80211_MODE_11NG) {
+ ridx = 12;
+ }
+ ctl_ridx = rt2860_rates[ridx].ctl_ridx;
+ } else {
+ if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
+ ridx = rn->fix_ridx;
+
+ } else {
+ ridx = rn->amrr_ridx;
+ ctl_ridx = rt2860_rates[ridx].ctl_ridx;
+ }
+ }
+
+ if (hasqos)
+ xflags = 0;
+ else
+ xflags = MTW_TX_NSEQ;
+
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1) &&
+ (!hasqos ||
+ (qos & IEEE80211_QOS_ACKPOLICY) !=
+ IEEE80211_QOS_ACKPOLICY_NOACK)) {
+ xflags |= MTW_TX_ACK;
+ if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
+ dur = rt2860_rates[ctl_ridx].sp_ack_dur;
+ else
+ dur = rt2860_rates[ctl_ridx].lp_ack_dur;
+ USETW(wh->i_dur, dur);
+ }
+ /* reserve slots for mgmt packets, just in case */
+ if (sc->sc_epq[qid].tx_nfree < 3) {
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "tx ring %d is full\n", qid);
+ return (-1);
+ }
+
+ data = STAILQ_FIRST(&sc->sc_epq[qid].tx_fh);
+ STAILQ_REMOVE_HEAD(&sc->sc_epq[qid].tx_fh, next);
+ sc->sc_epq[qid].tx_nfree--;
+
+ txd = (struct mtw_txd *)&data->desc;
+ txd->flags = qflags;
+
+ txwi = (struct mtw_txwi *)(txd + 1);
+ txwi->xflags = xflags;
+ txwi->wcid = (type == IEEE80211_FC0_TYPE_DATA) ?
+
+ MTW_AID2WCID(ni->ni_associd) :
+ 0xff;
+
+ /* clear leftover garbage bits */
+ txwi->flags = 0;
+ txwi->txop = 0;
+
+ data->m = m;
+ data->ni = ni;
+ data->ridx = ridx;
+
+ ieee80211_output_seqno_assign(ni, -1, m);
+
+ mtw_set_tx_desc(sc, data);
+
+ /*
+ * The chip keeps track of 2 kind of Tx stats,
+ * * TX_STAT_FIFO, for per WCID stats, and
+ * * TX_STA_CNT0 for all-TX-in-one stats.
+ *
+ * To use FIFO stats, we need to store MCS into the driver-private
+ * PacketID field. So that, we can tell whose stats when we read them.
+ * We add 1 to the MCS because setting the PacketID field to 0 means
+ * that we don't want feedback in TX_STAT_FIFO.
+ * And, that's what we want for STA mode, since TX_STA_CNT0 does the
+ * job.
+ *
+ * FIFO stats doesn't count Tx with WCID 0xff, so we do this in
+ * run_tx().
+ */
+
+ if (sc->rvp_cnt > 1 || vap->iv_opmode == IEEE80211_M_HOSTAP ||
+ vap->iv_opmode == IEEE80211_M_MBSS) {
+
+ /*
+ * Unlike PCI based devices, we don't get any interrupt from
+ * USB devices, so we simulate FIFO-is-full interrupt here.
+ * Ralink recommends to drain FIFO stats every 100 ms, but 16
+ * slots quickly get fulled. To prevent overflow, increment a
+ * counter on every FIFO stat request, so we know how many slots
+ * are left. We do this only in HOSTAP or multiple vap mode
+ * since FIFO stats are used only in those modes. We just drain
+ * stats. AMRR gets updated every 1 sec by run_ratectl_cb() via
+ * callout. Call it early. Otherwise overflow.
+ */
+ if (sc->fifo_cnt++ == 10) {
+ /*
+ * With multiple vaps or if_bridge, if_start() is called
+ * with a non-sleepable lock, tcpinp. So, need to defer.
+ */
+ uint32_t i = MTW_CMDQ_GET(&sc->cmdq_store);
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "cmdq_store=%d\n", i);
+ sc->cmdq[i].func = mtw_drain_fifo;
+ sc->cmdq[i].arg0 = sc;
+ ieee80211_runtask(ic, &sc->cmdq_task);
+ }
+ }
+
+ STAILQ_INSERT_TAIL(&sc->sc_epq[qid].tx_qh, data, next);
+ usbd_transfer_start(sc->sc_xfer[mtw_wme_ac_xfer_map[qid]]);
+
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
+ "sending data frame len=%d rate=%d qid=%d\n",
+ m->m_pkthdr.len +
+ (int)(sizeof(struct mtw_txd) + sizeof(struct mtw_txwi)),
+ rt2860_rates[ridx].rate, qid);
+
+ return (0);
+ }
+
+static int
+mtw_tx_mgt(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct mtw_node *rn = MTW_NODE(ni);
+ struct mtw_tx_data *data;
+ struct ieee80211_frame *wh;
+ struct mtw_txd *txd;
+ struct mtw_txwi *txwi;
+ uint8_t type;
+ uint16_t dur;
+ uint8_t ridx = rn->mgt_ridx;
+ uint8_t xflags = 0;
+ uint8_t wflags = 0;
+
+ MTW_LOCK_ASSERT(sc, MA_OWNED);
+
+ wh = mtod(m, struct ieee80211_frame *);
+
+ /* tell hardware to add timestamp for probe responses */
+ if ((wh->i_fc[0] &
+ (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
+ (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
+ wflags |= MTW_TX_TS;
+ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
+ xflags |= MTW_TX_ACK;
+
+ dur = ieee80211_ack_duration(ic->ic_rt, rt2860_rates[ridx].rate,
+ ic->ic_flags & IEEE80211_F_SHPREAMBLE);
+ USETW(wh->i_dur, dur);
+ }
+ type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ if (sc->sc_epq[0].tx_nfree == 0)
+ /* let caller free mbuf */
+ return (EIO);
+ data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
+ STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
+ sc->sc_epq[0].tx_nfree--;
+
+ txd = (struct mtw_txd *)&data->desc;
+ txd->flags = htole16(
+ MTW_TXD_DATA | MTW_TXD_80211 | MTW_TXD_WLAN | MTW_TXD_QSEL_EDCA);
+ if (type != IEEE80211_FC0_TYPE_DATA)
+ txd->flags |= htole16(MTW_TXD_WIV);
+
+ txwi = (struct mtw_txwi *)(txd + 1);
+ txwi->wcid = 0xff;
+ txwi->xflags = xflags;
+ txwi->flags = wflags;
+
+ txwi->txop = 0; /* clear leftover garbage bits */
+
+ data->m = m;
+ data->ni = ni;
+ data->ridx = ridx;
+
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending mgt frame len=%d rate=%d\n",
+ m->m_pkthdr.len +
+ (int)(sizeof(struct mtw_txd) + sizeof(struct mtw_txwi)),
+ rt2860_rates[ridx].rate);
+
+ STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
+
+ usbd_transfer_start(sc->sc_xfer[MTW_BULK_TX_BE]);
+
+ return (0);
+}
+
+static int
+mtw_sendprot(struct mtw_softc *sc, const struct mbuf *m,
+ struct ieee80211_node *ni, int prot, int rate)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct mtw_tx_data *data;
+ struct mtw_txd *txd;
+ struct mtw_txwi *txwi;
+ struct mbuf *mprot;
+ int ridx;
+ int protrate;
+ uint8_t wflags = 0;
+ uint8_t xflags = 0;
+
+ MTW_LOCK_ASSERT(sc, MA_OWNED);
+
+ /* check that there are free slots before allocating the mbuf */
+ if (sc->sc_epq[0].tx_nfree == 0)
+ /* let caller free mbuf */
+ return (ENOBUFS);
+
+ mprot = ieee80211_alloc_prot(ni, m, rate, prot);
+ if (mprot == NULL) {
+ if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1);
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "could not allocate mbuf\n");
+ return (ENOBUFS);
+ }
+
+ protrate = ieee80211_ctl_rate(ic->ic_rt, rate);
+ wflags = MTW_TX_FRAG;
+ xflags = 0;
+ if (prot == IEEE80211_PROT_RTSCTS)
+ xflags |= MTW_TX_ACK;
+
+ data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
+ STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
+ sc->sc_epq[0].tx_nfree--;
+
+ txd = (struct mtw_txd *)&data->desc;
+ txd->flags = RT2860_TX_QSEL_EDCA;
+ txwi = (struct mtw_txwi *)(txd + 1);
+ txwi->wcid = 0xff;
+ txwi->flags = wflags;
+ txwi->xflags = xflags;
+ txwi->txop = 0; /* clear leftover garbage bits */
+
+ data->m = mprot;
+ data->ni = ieee80211_ref_node(ni);
+
+ /* XXX TODO: methodize with MCS rates */
+ for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
+ if (rt2860_rates[ridx].rate == protrate)
+ break;
+ data->ridx = ridx;
+
+ mtw_set_tx_desc(sc, data);
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending prot len=%u rate=%u\n",
+ m->m_pkthdr.len, rate);
+
+ STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
+
+ usbd_transfer_start(sc->sc_xfer[0]);
+
+ return (0);
+}
+
+static int
+mtw_tx_param(struct mtw_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
+ const struct ieee80211_bpf_params *params)
+{
+ struct ieee80211com *ic = ni->ni_ic;
+ struct mtw_tx_data *data;
+ struct mtw_txd *txd;
+ struct mtw_txwi *txwi;
+ uint8_t ridx;
+ uint8_t rate;
+ uint8_t opflags = 0;
+ uint8_t xflags = 0;
+ int error;
+
+ MTW_LOCK_ASSERT(sc, MA_OWNED);
+
+ KASSERT(params != NULL, ("no raw xmit params"));
+
+ rate = params->ibp_rate0;
+ if (!ieee80211_isratevalid(ic->ic_rt, rate)) {
+ /* let caller free mbuf */
+ return (EINVAL);
+ }
+
+ if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0)
+ xflags |= MTW_TX_ACK;
+ if (params->ibp_flags & (IEEE80211_BPF_RTS | IEEE80211_BPF_CTS)) {
+ error = mtw_sendprot(sc, m, ni,
+ params->ibp_flags & IEEE80211_BPF_RTS ?
+ IEEE80211_PROT_RTSCTS :
+ IEEE80211_PROT_CTSONLY,
+ rate);
+ if (error) {
+ device_printf(sc->sc_dev, "%s:%d %d\n", __FILE__,
+ __LINE__, error);
+ return (error);
+ }
+ opflags |= MTW_TX_TXOP_SIFS;
+ }
+
+ if (sc->sc_epq[0].tx_nfree == 0) {
+ /* let caller free mbuf */
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
+ "sending raw frame, but tx ring is full\n");
+ return (EIO);
+ }
+ data = STAILQ_FIRST(&sc->sc_epq[0].tx_fh);
+ STAILQ_REMOVE_HEAD(&sc->sc_epq[0].tx_fh, next);
+ sc->sc_epq[0].tx_nfree--;
+
+ txd = (struct mtw_txd *)&data->desc;
+ txd->flags = htole16(
+ MTW_TXD_DATA | MTW_TXD_80211 | MTW_TXD_WLAN | MTW_TXD_QSEL_EDCA);
+ // txd->flags = htole16(MTW_TXD_QSEL_EDCA);
+ txwi = (struct mtw_txwi *)(txd + 1);
+ txwi->wcid = 0xff;
+ txwi->xflags = xflags;
+ txwi->txop = opflags;
+ txwi->flags = 0; /* clear leftover garbage bits */
+
+ data->m = m;
+ data->ni = ni;
+ /* XXX TODO: methodize with MCS rates */
+ for (ridx = 0; ridx < MTW_RIDX_MAX; ridx++)
+ if (rt2860_rates[ridx].rate == rate)
+ break;
+ data->ridx = ridx;
+
+ ieee80211_output_seqno_assign(ni, -1, m);
+
+ mtw_set_tx_desc(sc, data);
+
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "sending raw frame len=%u rate=%u\n",
+ m->m_pkthdr.len, rate);
+
+ STAILQ_INSERT_TAIL(&sc->sc_epq[0].tx_qh, data, next);
+
+ usbd_transfer_start(sc->sc_xfer[MTW_BULK_RAW_TX]);
+
+ return (0);
+}
+
+static int
+mtw_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
+ const struct ieee80211_bpf_params *params)
+{
+ struct mtw_softc *sc = ni->ni_ic->ic_softc;
+ int error = 0;
+ MTW_LOCK(sc);
+ /* prevent management frames from being sent if we're not ready */
+ if (!(sc->sc_flags & MTW_RUNNING)) {
+ error = ENETDOWN;
+ goto done;
+ }
+
+ if (params == NULL) {
+ /* tx mgt packet */
+ if ((error = mtw_tx_mgt(sc, m, ni)) != 0) {
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT, "mgt tx failed\n");
+ goto done;
+ }
+ } else {
+ /* tx raw packet with param */
+ if ((error = mtw_tx_param(sc, m, ni, params)) != 0) {
+ MTW_DPRINTF(sc, MTW_DEBUG_XMIT,
+ "tx with param failed\n");
+ goto done;
+ }
+ }
+
+done:
+
+ MTW_UNLOCK(sc);
+
+ if (error != 0) {
+ if (m != NULL)
+ m_freem(m);
+ }
+
+ return (error);
+}
+
+static int
+mtw_transmit(struct ieee80211com *ic, struct mbuf *m)
+{
+ struct mtw_softc *sc = ic->ic_softc;
+ int error;
+ MTW_LOCK(sc);
+ if ((sc->sc_flags & MTW_RUNNING) == 0) {
+ MTW_UNLOCK(sc);
+ return (ENXIO);
+ }
+ error = mbufq_enqueue(&sc->sc_snd, m);
+ if (error) {
+ MTW_UNLOCK(sc);
+ return (error);
+ }
+ mtw_start(sc);
+ MTW_UNLOCK(sc);
+
+ return (0);
+}
+
+static void
+mtw_start(struct mtw_softc *sc)
+{
+ struct ieee80211_node *ni;
+ struct mbuf *m;
+
+ MTW_LOCK_ASSERT(sc, MA_OWNED);
+
+ if ((sc->sc_flags & MTW_RUNNING) == 0) {
+
+ return;
+ }
+ while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
+ ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
+ if (mtw_tx(sc, m, ni) != 0) {
+ mbufq_prepend(&sc->sc_snd, m);
+ break;
+ }
+ }
+}
+
+static void
+mtw_parent(struct ieee80211com *ic)
+{
+
+ struct mtw_softc *sc = ic->ic_softc;
+
+ MTW_LOCK(sc);
+ if (sc->sc_detached) {
+ MTW_UNLOCK(sc);
+ return;
+ }
+
+ if (!(sc->sc_flags & MTW_RUNNING) && ic->ic_nrunning > 0) {
+ mtw_init_locked(sc);
+ MTW_UNLOCK(sc);
+ ieee80211_start_all(ic);
+ return;
+ }
+ if (!(sc->sc_flags & MTW_RUNNING) && ic->ic_nrunning > 0) {
+ mtw_update_promisc_locked(sc);
+ MTW_UNLOCK(sc);
+ return;
+ }
+ if ((sc->sc_flags & MTW_RUNNING) && sc->rvp_cnt <= 1 &&
+ ic->ic_nrunning == 0) {
+ mtw_stop(sc);
+ MTW_UNLOCK(sc);
+ return;
+ }
+ return;
+}
+
+static void
+mt7601_set_agc(struct mtw_softc *sc, uint8_t agc)
+{
+ uint8_t bbp;
+
+ mtw_bbp_write(sc, 66, agc);
+ mtw_bbp_write(sc, 195, 0x87);
+ bbp = (agc & 0xf0) | 0x08;
+ mtw_bbp_write(sc, 196, bbp);
+}
+
+static int
+mtw_mcu_calibrate(struct mtw_softc *sc, int func, uint32_t val)
+{
+ struct mtw_mcu_cmd_8 cmd;
+
+ cmd.func = htole32(func);
+ cmd.val = htole32(val);
+ return (mtw_mcu_cmd(sc, 31, &cmd, sizeof(struct mtw_mcu_cmd_8)));
+}
+
+static int
+mtw_rf_write(struct mtw_softc *sc, uint8_t bank, uint8_t reg, uint8_t val)
+{
+ uint32_t tmp;
+ int error, ntries, shift;
+
+ for (ntries = 0; ntries < 10; ntries++) {
+ if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
+ return (error);
+ if (!(tmp & MTW_RF_CSR_KICK))
+ break;
+ }
+ if (ntries == 10)
+ return (ETIMEDOUT);
+
+ if (sc->asic_ver == 0x7601)
+ shift = MT7601_BANK_SHIFT;
+ else
+ shift = MT7610_BANK_SHIFT;
+
+ tmp = MTW_RF_CSR_WRITE | MTW_RF_CSR_KICK | (bank & 0xf) << shift |
+ reg << 8 | val;
+ return (mtw_write(sc, MTW_RF_CSR, tmp));
+}
+
+void
+mtw_select_chan_group(struct mtw_softc *sc, int group)
+{
+ uint32_t tmp;
+ uint8_t bbp;
+
+ /* Tx band 20MHz 2G */
+ mtw_read(sc, MTW_TX_BAND_CFG, &tmp);
+ tmp &= ~(
+ MTW_TX_BAND_SEL_2G | MTW_TX_BAND_SEL_5G | MTW_TX_BAND_UPPER_40M);
+ tmp |= (group == 0) ? MTW_TX_BAND_SEL_2G : MTW_TX_BAND_SEL_5G;
+ mtw_write(sc, MTW_TX_BAND_CFG, tmp);
+
+ /* select 20 MHz bandwidth */
+ mtw_bbp_read(sc, 4, &bbp);
+ bbp &= ~0x18;
+ bbp |= 0x40;
+ mtw_bbp_write(sc, 4, bbp);
+
+ /* calibrate BBP */
+ mtw_bbp_write(sc, 69, 0x12);
+ mtw_bbp_write(sc, 91, 0x07);
+ mtw_bbp_write(sc, 195, 0x23);
+ mtw_bbp_write(sc, 196, 0x17);
+ mtw_bbp_write(sc, 195, 0x24);
+ mtw_bbp_write(sc, 196, 0x06);
+ mtw_bbp_write(sc, 195, 0x81);
+ mtw_bbp_write(sc, 196, 0x12);
+ mtw_bbp_write(sc, 195, 0x83);
+ mtw_bbp_write(sc, 196, 0x17);
+ mtw_rf_write(sc, 5, 8, 0x00);
+ // mtw_mcu_calibrate(sc, 0x6, 0x10001);
+
+ /* set initial AGC value */
+ mt7601_set_agc(sc, 0x14);
+}
+
+static int
+mtw_rf_read(struct mtw_softc *sc, uint8_t bank, uint8_t reg, uint8_t *val)
+{
+ uint32_t tmp;
+ int error, ntries, shift;
+
+ for (ntries = 0; ntries < 100; ntries++) {
+ if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
+ return (error);
+ if (!(tmp & MTW_RF_CSR_KICK))
+ break;
+ }
+ if (ntries == 100)
+ return (ETIMEDOUT);
+
+ if (sc->asic_ver == 0x7601)
+ shift = MT7601_BANK_SHIFT;
+ else
+ shift = MT7610_BANK_SHIFT;
+
+ tmp = MTW_RF_CSR_KICK | (bank & 0xf) << shift | reg << 8;
+ if ((error = mtw_write(sc, MTW_RF_CSR, tmp)) != 0)
+ return (error);
+
+ for (ntries = 0; ntries < 100; ntries++) {
+ if ((error = mtw_read(sc, MTW_RF_CSR, &tmp)) != 0)
+ return (error);
+ if (!(tmp & MTW_RF_CSR_KICK))
+ break;
+ }
+ if (ntries == 100)
+ return (ETIMEDOUT);
+
+ *val = tmp & 0xff;
+ return (0);
+}
+static void
+mt7601_set_chan(struct mtw_softc *sc, u_int chan)
+{
+ uint32_t tmp;
+ uint8_t bbp, rf, txpow1;
+ int i;
+ /* find the settings for this channel */
+ for (i = 0; mt7601_rf_chan[i].chan != chan; i++)
+ ;
+
+ mtw_rf_write(sc, 0, 17, mt7601_rf_chan[i].r17);
+ mtw_rf_write(sc, 0, 18, mt7601_rf_chan[i].r18);
+ mtw_rf_write(sc, 0, 19, mt7601_rf_chan[i].r19);
+ mtw_rf_write(sc, 0, 20, mt7601_rf_chan[i].r20);
+
+ /* use Tx power values from EEPROM */
+ txpow1 = sc->txpow1[i];
+
+ /* Tx automatic level control */
+ mtw_read(sc, MTW_TX_ALC_CFG0, &tmp);
+ tmp &= ~0x3f3f;
+ tmp |= (txpow1 & 0x3f);
+ mtw_write(sc, MTW_TX_ALC_CFG0, tmp);
+
+ /* LNA */
+ mtw_bbp_write(sc, 62, 0x37 - sc->lna[0]);
+ mtw_bbp_write(sc, 63, 0x37 - sc->lna[0]);
+ mtw_bbp_write(sc, 64, 0x37 - sc->lna[0]);
+
+ /* VCO calibration */
+ mtw_rf_write(sc, 0, 4, 0x0a);
+ mtw_rf_write(sc, 0, 5, 0x20);
+ mtw_rf_read(sc, 0, 4, &rf);
+ mtw_rf_write(sc, 0, 4, rf | 0x80);
+
+ /* select 20 MHz bandwidth */
+ mtw_bbp_read(sc, 4, &bbp);
+ bbp &= ~0x18;
+ bbp |= 0x40;
+ mtw_bbp_write(sc, 4, bbp);
+ mtw_bbp_write(sc, 178, 0xff);
+}
+
+static int
+mtw_set_chan(struct mtw_softc *sc, struct ieee80211_channel *c)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ u_int chan, group;
+
+ chan = ieee80211_chan2ieee(ic, c);
+ if (chan == 0 || chan == IEEE80211_CHAN_ANY)
+ return (EINVAL);
+
+ /* determine channel group */
+ if (chan <= 14)
+ group = 0;
+ else if (chan <= 64)
+ group = 1;
+ else if (chan <= 128)
+ group = 2;
+ else
+ group = 3;
+
+ if (group != sc->sc_chan_group || !sc->sc_bw_calibrated)
+ mtw_select_chan_group(sc, group);
+
+ sc->sc_chan_group = group;
+
+ /* chipset specific */
+ if (sc->asic_ver == 0x7601)
+ mt7601_set_chan(sc, chan);
+
+ DELAY(1000);
+ return (0);
+}
+
+static void
+mtw_set_channel(struct ieee80211com *ic)
+{
+ struct mtw_softc *sc = ic->ic_softc;
+
+ MTW_LOCK(sc);
+ mtw_set_chan(sc, ic->ic_curchan);
+ MTW_UNLOCK(sc);
+
+ return;
+}
+
+static void
+mtw_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans,
+ struct ieee80211_channel chans[])
+{
+ // struct mtw_softc *sc = ic->ic_softc;
+ uint8_t bands[IEEE80211_MODE_BYTES];
+
+ memset(bands, 0, sizeof(bands));
+ setbit(bands, IEEE80211_MODE_11B);
+ setbit(bands, IEEE80211_MODE_11G);
+ setbit(bands, IEEE80211_MODE_11NG);
+
+ /* Note: for now, only support HT20 channels */
+ ieee80211_add_channels_default_2ghz(chans, maxchans, nchans, bands, 0);
+}
+
+static void
+mtw_scan_start(struct ieee80211com *ic)
+{
+ struct mtw_softc *sc = ic->ic_softc;
+ MTW_LOCK(sc);
+ /* abort TSF synchronization */
+ mtw_abort_tsf_sync(sc);
+ mtw_set_bssid(sc, ieee80211broadcastaddr);
+
+ MTW_UNLOCK(sc);
+
+ return;
+}
+
+static void
+mtw_scan_end(struct ieee80211com *ic)
+{
+ struct mtw_softc *sc = ic->ic_softc;
+
+ MTW_LOCK(sc);
+
+ mtw_enable_tsf_sync(sc);
+ mtw_set_bssid(sc, sc->sc_bssid);
+
+ MTW_UNLOCK(sc);
+
+ return;
+}
+
+/*
+ * Could be called from ieee80211_node_timeout()
+ * (non-sleepable thread)
+ */
+static void
+mtw_update_beacon(struct ieee80211vap *vap, int item)
+{
+ struct ieee80211com *ic = vap->iv_ic;
+ struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off;
+ struct ieee80211_node *ni = vap->iv_bss;
+ struct mtw_softc *sc = ic->ic_softc;
+ struct mtw_vap *rvp = MTW_VAP(vap);
+ int mcast = 0;
+ uint32_t i;
+
+ switch (item) {
+ case IEEE80211_BEACON_ERP:
+ mtw_updateslot(ic);
+ break;
+ case IEEE80211_BEACON_HTINFO:
+ mtw_updateprot(ic);
+ break;
+ case IEEE80211_BEACON_TIM:
+ mcast = 1; /*TODO*/
+ break;
+ default:
+ break;
+ }
+
+ setbit(bo->bo_flags, item);
+ if (rvp->beacon_mbuf == NULL) {
+ rvp->beacon_mbuf = ieee80211_beacon_alloc(ni);
+ if (rvp->beacon_mbuf == NULL)
+ return;
+ }
+ ieee80211_beacon_update(ni, rvp->beacon_mbuf, mcast);
+
+ i = MTW_CMDQ_GET(&sc->cmdq_store);
+ MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "cmdq_store=%d\n", i);
+ sc->cmdq[i].func = mtw_update_beacon_cb;
+ sc->cmdq[i].arg0 = vap;
+ ieee80211_runtask(ic, &sc->cmdq_task);
+
+ return;
+}
+
+static void
+mtw_update_beacon_cb(void *arg)
+{
+
+ struct ieee80211vap *vap = arg;
+ struct ieee80211_node *ni = vap->iv_bss;
+ struct mtw_vap *rvp = MTW_VAP(vap);
+ struct ieee80211com *ic = vap->iv_ic;
+ struct mtw_softc *sc = ic->ic_softc;
+ struct mtw_txwi txwi;
+ struct mbuf *m;
+ uint16_t txwisize;
+ uint8_t ridx;
+ if (ni->ni_chan == IEEE80211_CHAN_ANYC)
+ return;
+ if (ic->ic_bsschan == IEEE80211_CHAN_ANYC)
+ return;
+
+ /*
+ * No need to call ieee80211_beacon_update(), mtw_update_beacon()
+ * is taking care of appropriate calls.
+ */
+ if (rvp->beacon_mbuf == NULL) {
+ rvp->beacon_mbuf = ieee80211_beacon_alloc(ni);
+ if (rvp->beacon_mbuf == NULL)
+ return;
+ }
+ m = rvp->beacon_mbuf;
+
+ memset(&txwi, 0, sizeof(txwi));
+ txwi.wcid = 0xff;
+ txwi.len = htole16(m->m_pkthdr.len);
+
+ /* send beacons at the lowest available rate */
+ ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ? MTW_RIDX_OFDM6 :
+ MTW_RIDX_CCK1;
+ txwi.phy = htole16(rt2860_rates[ridx].mcs);
+ if (rt2860_rates[ridx].phy == IEEE80211_T_OFDM)
+ txwi.phy |= htole16(MTW_PHY_OFDM);
+ txwi.txop = MTW_TX_TXOP_HT;
+ txwi.flags = MTW_TX_TS;
+ txwi.xflags = MTW_TX_NSEQ;
+
+ txwisize = sizeof(txwi);
+ mtw_write_region_1(sc, MTW_BCN_BASE, (uint8_t *)&txwi, txwisize);
+ mtw_write_region_1(sc, MTW_BCN_BASE + txwisize, mtod(m, uint8_t *),
+ (m->m_pkthdr.len + 1) & ~1);
+}
+
+static void
+mtw_updateprot(struct ieee80211com *ic)
+{
+ struct mtw_softc *sc = ic->ic_softc;
+ uint32_t i;
+
+ i = MTW_CMDQ_GET(&sc->cmdq_store);
+ MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "test cmdq_store=%d\n", i);
+ sc->cmdq[i].func = mtw_updateprot_cb;
+ sc->cmdq[i].arg0 = ic;
+ ieee80211_runtask(ic, &sc->cmdq_task);
+}
+
+static void
+mtw_updateprot_cb(void *arg)
+{
+
+ struct ieee80211com *ic = arg;
+ struct mtw_softc *sc = ic->ic_softc;
+ uint32_t tmp;
+
+ tmp = RT2860_RTSTH_EN | RT2860_PROT_NAV_SHORT | RT2860_TXOP_ALLOW_ALL;
+ /* setup protection frame rate (MCS code) */
+ tmp |= (ic->ic_curmode == IEEE80211_MODE_11A) ?
+ rt2860_rates[MTW_RIDX_OFDM6].mcs | MTW_PHY_OFDM :
+ rt2860_rates[MTW_RIDX_CCK11].mcs;
+
+ /* CCK frames don't require protection */
+ mtw_write(sc, MTW_CCK_PROT_CFG, tmp);
+ if (ic->ic_flags & IEEE80211_F_USEPROT) {
+ if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
+ tmp |= RT2860_PROT_CTRL_RTS_CTS;
+ else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
+ tmp |= RT2860_PROT_CTRL_CTS;
+ }
+ mtw_write(sc, MTW_OFDM_PROT_CFG, tmp);
+}
+
+static void
+mtw_usb_timeout_cb(void *arg)
+{
+ struct ieee80211vap *vap = arg;
+ struct mtw_softc *sc = vap->iv_ic->ic_softc;
+
+ MTW_LOCK_ASSERT(sc, MA_OWNED);
+
+ if (vap->iv_state == IEEE80211_S_SCAN) {
+ MTW_DPRINTF(sc, MTW_DEBUG_USB | MTW_DEBUG_STATE,
+ "timeout caused by scan\n");
+ /* cancel bgscan */
+ ieee80211_cancel_scan(vap);
+ } else {
+ MTW_DPRINTF(sc, MTW_DEBUG_USB | MTW_DEBUG_STATE,
+ "timeout by unknown cause\n");
+ }
+}
+static int mtw_reset(struct mtw_softc *sc)
+{
+
+ usb_device_request_t req;
+ uint16_t tmp;
+ uint16_t actlen;
+
+ req.bmRequestType = UT_WRITE_VENDOR_DEVICE;
+ req.bRequest = MTW_RESET;
+ USETW(req.wValue, 1);
+ USETW(req.wIndex, 0);
+ USETW(req.wLength, 0);
+ return (usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx,
+ &req, &tmp, 0, &actlen, 1000));
+
+}
+
+
+static void
+mtw_update_promisc_locked(struct mtw_softc *sc)
+{
+
+ uint32_t tmp;
+
+ mtw_read(sc, MTW_RX_FILTR_CFG, &tmp);
+
+ tmp |= MTW_DROP_UC_NOME;
+ if (sc->sc_ic.ic_promisc > 0)
+ tmp &= ~MTW_DROP_UC_NOME;
+
+ mtw_write(sc, MTW_RX_FILTR_CFG, tmp);
+
+ MTW_DPRINTF(sc, MTW_DEBUG_RECV, "%s promiscuous mode\n",
+ (sc->sc_ic.ic_promisc > 0) ? "entering" : "leaving");
+}
+
+static void
+mtw_update_promisc(struct ieee80211com *ic)
+{
+ struct mtw_softc *sc = ic->ic_softc;
+
+ if ((sc->sc_flags & MTW_RUNNING) == 0)
+ return;
+
+ MTW_LOCK(sc);
+ mtw_update_promisc_locked(sc);
+ MTW_UNLOCK(sc);
+}
+
+static void
+mtw_enable_tsf_sync(struct mtw_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ uint32_t tmp;
+ int error;
+ mtw_read(sc, MTW_BCN_TIME_CFG, &tmp);
+ tmp &= ~0x1fffff;
+ tmp |= vap->iv_bss->ni_intval * 16;
+ tmp |= MTW_TSF_TIMER_EN | MTW_TBTT_TIMER_EN;
+
+ /* local TSF is always updated with remote TSF on beacon reception */
+ tmp |= 1 << MTW_TSF_SYNC_MODE_SHIFT;
+ error = mtw_write(sc, MTW_BCN_TIME_CFG, tmp);
+ if (error != 0) {
+ device_printf(sc->sc_dev, "enable_tsf_sync failed error:%d\n",
+ error);
+ }
+ return;
+}
+
+static void
+mtw_enable_mrr(struct mtw_softc *sc)
+{
+#define CCK(mcs) (mcs)
+
+#define OFDM(mcs) (1 << 3 | (mcs))
+ mtw_write(sc, MTW_LG_FBK_CFG0,
+ OFDM(6) << 28 | /* 54->48 */
+ OFDM(5) << 24 | /* 48->36 */
+ OFDM(4) << 20 | /* 36->24 */
+ OFDM(3) << 16 | /* 24->18 */
+ OFDM(2) << 12 | /* 18->12 */
+ OFDM(1) << 8 | /* 12-> 9 */
+ OFDM(0) << 4 | /* 9-> 6 */
+ OFDM(0)); /* 6-> 6 */
+
+ mtw_write(sc, MTW_LG_FBK_CFG1,
+ CCK(2) << 12 | /* 11->5.5 */
+ CCK(1) << 8 | /* 5.5-> 2 */
+ CCK(0) << 4 | /* 2-> 1 */
+ CCK(0)); /* 1-> 1 */
+#undef OFDM
+#undef CCK
+}
+
+static void
+mtw_set_txpreamble(struct mtw_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ uint32_t tmp;
+
+ mtw_read(sc, MTW_AUTO_RSP_CFG, &tmp);
+ if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
+ tmp |= MTW_CCK_SHORT_EN;
+ else
+ tmp &= ~MTW_CCK_SHORT_EN;
+ mtw_write(sc, MTW_AUTO_RSP_CFG, tmp);
+}
+
+static void
+mtw_set_basicrates(struct mtw_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+
+ /* set basic rates mask */
+ if (ic->ic_curmode == IEEE80211_MODE_11B)
+ mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x003);
+ else if (ic->ic_curmode == IEEE80211_MODE_11A)
+ mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x150);
+ else /* 11g */
+ mtw_write(sc, MTW_LEGACY_BASIC_RATE, 0x17f);
+}
+
+static void
+mtw_set_bssid(struct mtw_softc *sc, const uint8_t *bssid)
+{
+ mtw_write(sc, MTW_MAC_BSSID_DW0,
+ bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24);
+ mtw_write(sc, MTW_MAC_BSSID_DW1, bssid[4] | bssid[5] << 8);
+}
+
+static void
+mtw_set_macaddr(struct mtw_softc *sc, const uint8_t *addr)
+{
+ mtw_write(sc, MTW_MAC_ADDR_DW0,
+ addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
+ mtw_write(sc, MTW_MAC_ADDR_DW1, addr[4] | addr[5] << 8 | 0xff << 16);
+}
+
+static void
+mtw_updateslot(struct ieee80211com *ic)
+{
+
+ struct mtw_softc *sc = ic->ic_softc;
+ uint32_t i;
+
+ i = MTW_CMDQ_GET(&sc->cmdq_store);
+ MTW_DPRINTF(sc, MTW_DEBUG_BEACON, "cmdq_store=%d\n", i);
+ sc->cmdq[i].func = mtw_updateslot_cb;
+ sc->cmdq[i].arg0 = ic;
+ ieee80211_runtask(ic, &sc->cmdq_task);
+
+ return;
+}
+
+/* ARGSUSED */
+static void
+mtw_updateslot_cb(void *arg)
+{
+ struct ieee80211com *ic = arg;
+ struct mtw_softc *sc = ic->ic_softc;
+ uint32_t tmp;
+ mtw_read(sc, MTW_BKOFF_SLOT_CFG, &tmp);
+ tmp &= ~0xff;
+ tmp |= IEEE80211_GET_SLOTTIME(ic);
+ mtw_write(sc, MTW_BKOFF_SLOT_CFG, tmp);
+}
+
+static void
+mtw_update_mcast(struct ieee80211com *ic)
+{
+}
+
+static int8_t
+mtw_rssi2dbm(struct mtw_softc *sc, uint8_t rssi, uint8_t rxchain)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_channel *c = ic->ic_curchan;
+ int delta;
+
+ if (IEEE80211_IS_CHAN_5GHZ(c)) {
+ u_int chan = ieee80211_chan2ieee(ic, c);
+ delta = sc->rssi_5ghz[rxchain];
+
+ /* determine channel group */
+ if (chan <= 64)
+ delta -= sc->lna[1];
+ else if (chan <= 128)
+ delta -= sc->lna[2];
+ else
+ delta -= sc->lna[3];
+ } else
+ delta = sc->rssi_2ghz[rxchain] - sc->lna[0];
+
+ return (-12 - delta - rssi);
+}
+static int
+mt7601_bbp_init(struct mtw_softc *sc)
+{
+ uint8_t bbp;
+ int i, error, ntries;
+
+ /* wait for BBP to wake up */
+ for (ntries = 0; ntries < 20; ntries++) {
+ if ((error = mtw_bbp_read(sc, 0, &bbp)) != 0)
+ return (error);
+ if (bbp != 0 && bbp != 0xff)
+ break;
+ }
+
+ if (ntries == 20)
+ return (ETIMEDOUT);
+
+ mtw_bbp_read(sc, 3, &bbp);
+ mtw_bbp_write(sc, 3, 0);
+ mtw_bbp_read(sc, 105, &bbp);
+ mtw_bbp_write(sc, 105, 0);
+
+ /* initialize BBP registers to default values */
+ for (i = 0; i < nitems(mt7601_def_bbp); i++) {
+ if ((error = mtw_bbp_write(sc, mt7601_def_bbp[i].reg,
+ mt7601_def_bbp[i].val)) != 0)
+ return (error);
+ }
+
+ sc->sc_bw_calibrated = 0;
+
+ return (0);
+}
+
+static int
+mt7601_rf_init(struct mtw_softc *sc)
+{
+ int i, error;
+
+ /* RF bank 0 */
+ for (i = 0; i < nitems(mt7601_rf_bank0); i++) {
+ error = mtw_rf_write(sc, 0, mt7601_rf_bank0[i].reg,
+ mt7601_rf_bank0[i].val);
+ if (error != 0)
+ return (error);
+ }
+ /* RF bank 4 */
+ for (i = 0; i < nitems(mt7601_rf_bank4); i++) {
+ error = mtw_rf_write(sc, 4, mt7601_rf_bank4[i].reg,
+ mt7601_rf_bank4[i].val);
+ if (error != 0)
+ return (error);
+ }
+ /* RF bank 5 */
+ for (i = 0; i < nitems(mt7601_rf_bank5); i++) {
+ error = mtw_rf_write(sc, 5, mt7601_rf_bank5[i].reg,
+ mt7601_rf_bank5[i].val);
+ if (error != 0)
+ return (error);
+ }
+ return (0);
+}
+
+static int
+mtw_txrx_enable(struct mtw_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ uint32_t tmp;
+ int error, ntries;
+ mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_TX_EN);
+ for (ntries = 0; ntries < 200; ntries++) {
+ if ((error = mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp)) != 0) {
+ return (error);
+ }
+ if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
+ break;
+ mtw_delay(sc, 50);
+ }
+ if (ntries == 200) {
+ return (ETIMEDOUT);
+ }
+
+ DELAY(50);
+
+ tmp |= MTW_RX_DMA_EN | MTW_TX_DMA_EN | MTW_TX_WB_DDONE;
+ mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
+
+ /* enable Rx bulk aggregation (set timeout and limit) */
+ tmp = MTW_USB_TX_EN | MTW_USB_RX_EN | MTW_USB_RX_AGG_EN |
+ MTW_USB_RX_AGG_TO(128) | MTW_USB_RX_AGG_LMT(2);
+ mtw_write(sc, MTW_USB_DMA_CFG, tmp);
+
+ /* set Rx filter */
+ tmp = MTW_DROP_CRC_ERR | MTW_DROP_PHY_ERR;
+ if (ic->ic_opmode != IEEE80211_M_MONITOR) {
+ tmp |= MTW_DROP_UC_NOME | MTW_DROP_DUPL | MTW_DROP_CTS |
+ MTW_DROP_BA | MTW_DROP_ACK | MTW_DROP_VER_ERR |
+ MTW_DROP_CTRL_RSV | MTW_DROP_CFACK | MTW_DROP_CFEND;
+ if (ic->ic_opmode == IEEE80211_M_STA)
+ tmp |= MTW_DROP_RTS | MTW_DROP_PSPOLL;
+ }
+ mtw_write(sc, MTW_RX_FILTR_CFG, tmp);
+
+ mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_RX_EN | MTW_MAC_TX_EN);
+ return (0);
+}
+static int
+mt7601_rxdc_cal(struct mtw_softc *sc)
+{
+ uint32_t tmp;
+ uint8_t bbp;
+ int ntries;
+
+ mtw_read(sc, MTW_MAC_SYS_CTRL, &tmp);
+ mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_MAC_RX_EN);
+ mtw_bbp_write(sc, 158, 0x8d);
+ mtw_bbp_write(sc, 159, 0xfc);
+ mtw_bbp_write(sc, 158, 0x8c);
+ mtw_bbp_write(sc, 159, 0x4c);
+
+ for (ntries = 0; ntries < 20; ntries++) {
+ DELAY(300);
+ mtw_bbp_write(sc, 158, 0x8c);
+ mtw_bbp_read(sc, 159, &bbp);
+ if (bbp == 0x0c)
+ break;
+ }
+
+ if (ntries == 20)
+ return (ETIMEDOUT);
+
+ mtw_write(sc, MTW_MAC_SYS_CTRL, 0);
+ mtw_bbp_write(sc, 158, 0x8d);
+ mtw_bbp_write(sc, 159, 0xe0);
+ mtw_write(sc, MTW_MAC_SYS_CTRL, tmp);
+ return (0);
+}
+
+static int
+mt7601_r49_read(struct mtw_softc *sc, uint8_t flag, int8_t *val)
+{
+ uint8_t bbp;
+
+ mtw_bbp_read(sc, 47, &bbp);
+ bbp = 0x90;
+ mtw_bbp_write(sc, 47, bbp);
+ bbp &= ~0x0f;
+ bbp |= flag;
+ mtw_bbp_write(sc, 47, bbp);
+ return (mtw_bbp_read(sc, 49, val));
+}
+
+static int
+mt7601_rf_temperature(struct mtw_softc *sc, int8_t *val)
+{
+ uint32_t rfb, rfs;
+ uint8_t bbp;
+ int ntries;
+
+ mtw_read(sc, MTW_RF_BYPASS0, &rfb);
+ mtw_read(sc, MTW_RF_SETTING0, &rfs);
+ mtw_write(sc, MTW_RF_BYPASS0, 0);
+ mtw_write(sc, MTW_RF_SETTING0, 0x10);
+ mtw_write(sc, MTW_RF_BYPASS0, 0x10);
+
+ mtw_bbp_read(sc, 47, &bbp);
+ bbp &= ~0x7f;
+ bbp |= 0x10;
+ mtw_bbp_write(sc, 47, bbp);
+
+ mtw_bbp_write(sc, 22, 0x40);
+
+ for (ntries = 0; ntries < 10; ntries++) {
+ mtw_bbp_read(sc, 47, &bbp);
+ if ((bbp & 0x10) == 0)
+ break;
+ }
+ if (ntries == 10)
+ return (ETIMEDOUT);
+
+ mt7601_r49_read(sc, MT7601_R47_TEMP, val);
+
+ mtw_bbp_write(sc, 22, 0);
+
+ mtw_bbp_read(sc, 21, &bbp);
+ bbp |= 0x02;
+ mtw_bbp_write(sc, 21, bbp);
+ bbp &= ~0x02;
+ mtw_bbp_write(sc, 21, bbp);
+
+ mtw_write(sc, MTW_RF_BYPASS0, 0);
+ mtw_write(sc, MTW_RF_SETTING0, rfs);
+ mtw_write(sc, MTW_RF_BYPASS0, rfb);
+ return (0);
+}
+
+static int
+mt7601_rf_setup(struct mtw_softc *sc)
+{
+ uint32_t tmp;
+ uint8_t rf;
+ int error;
+
+ if (sc->sc_rf_calibrated)
+ return (0);
+
+ /* init RF registers */
+ if ((error = mt7601_rf_init(sc)) != 0)
+ return (error);
+
+ /* init frequency offset */
+ mtw_rf_write(sc, 0, 12, sc->rf_freq_offset);
+ mtw_rf_read(sc, 0, 12, &rf);
+
+ /* read temperature */
+ mt7601_rf_temperature(sc, &rf);
+ sc->bbp_temp = rf;
+ device_printf(sc->sc_dev, "BBP temp 0x%x\n", rf);
+
+ mtw_rf_read(sc, 0, 7, &rf);
+ if ((error = mtw_mcu_calibrate(sc, 0x1, 0)) != 0)
+ return (error);
+ mtw_delay(sc, 100);
+ mtw_rf_read(sc, 0, 7, &rf);
+
+ /* Calibrate VCO RF 0/4 */
+ mtw_rf_write(sc, 0, 4, 0x0a);
+ mtw_rf_write(sc, 0, 4, 0x20);
+ mtw_rf_read(sc, 0, 4, &rf);
+ mtw_rf_write(sc, 0, 4, rf | 0x80);
+
+ if ((error = mtw_mcu_calibrate(sc, 0x9, 0)) != 0)
+ return (error);
+ if ((error = mt7601_rxdc_cal(sc)) != 0)
+ return (error);
+ if ((error = mtw_mcu_calibrate(sc, 0x6, 1)) != 0)
+ return (error);
+ if ((error = mtw_mcu_calibrate(sc, 0x6, 0)) != 0)
+ return (error);
+ if ((error = mtw_mcu_calibrate(sc, 0x4, 0)) != 0)
+ return (error);
+ if ((error = mtw_mcu_calibrate(sc, 0x5, 0)) != 0)
+ return (error);
+
+ mtw_read(sc, MTW_LDO_CFG0, &tmp);
+ tmp &= ~(1 << 4);
+ tmp |= (1 << 2);
+ mtw_write(sc, MTW_LDO_CFG0, tmp);
+
+ if ((error = mtw_mcu_calibrate(sc, 0x8, 0)) != 0)
+ return (error);
+ if ((error = mt7601_rxdc_cal(sc)) != 0)
+ return (error);
+
+ sc->sc_rf_calibrated = 1;
+ return (0);
+}
+
+static void
+mtw_set_txrts(struct mtw_softc *sc)
+{
+ uint32_t tmp;
+
+ /* set RTS threshold */
+ mtw_read(sc, MTW_TX_RTS_CFG, &tmp);
+ tmp &= ~0xffff00;
+ tmp |= 0x1000 << MTW_RTS_THRES_SHIFT;
+ mtw_write(sc, MTW_TX_RTS_CFG, tmp);
+}
+static int
+mtw_mcu_radio(struct mtw_softc *sc, int func, uint32_t val)
+{
+ struct mtw_mcu_cmd_16 cmd;
+
+ cmd.r1 = htole32(func);
+ cmd.r2 = htole32(val);
+ cmd.r3 = 0;
+ cmd.r4 = 0;
+ return (mtw_mcu_cmd(sc, 20, &cmd, sizeof(struct mtw_mcu_cmd_16)));
+}
+static void
+mtw_init_locked(struct mtw_softc *sc)
+{
+
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ uint32_t tmp;
+ int i, error, ridx, ntries;
+ if (ic->ic_nrunning > 1)
+ return;
+ mtw_stop(sc);
+
+ for (i = 0; i != MTW_EP_QUEUES; i++)
+ mtw_setup_tx_list(sc, &sc->sc_epq[i]);
+
+ for (ntries = 0; ntries < 100; ntries++) {
+ if ((error = mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp)) != 0)
+ goto fail;
+ if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
+ break;
+ DELAY(1000);
+ }
+ if (ntries == 100) {
+ device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
+ error = ETIMEDOUT;
+ goto fail;
+ }
+ tmp &= 0xff0;
+ tmp |= MTW_TX_WB_DDONE;
+ mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
+
+ mtw_set_leds(sc, MTW_LED_MODE_ON);
+ /* reset MAC and baseband */
+ mtw_write(sc, MTW_MAC_SYS_CTRL, MTW_BBP_HRST | MTW_MAC_SRST);
+ mtw_write(sc, MTW_USB_DMA_CFG, 0);
+ mtw_write(sc, MTW_MAC_SYS_CTRL, 0);
+
+ /* init MAC values */
+ if (sc->asic_ver == 0x7601) {
+ for (i = 0; i < nitems(mt7601_def_mac); i++)
+ mtw_write(sc, mt7601_def_mac[i].reg,
+ mt7601_def_mac[i].val);
+ }
+
+ /* wait while MAC is busy */
+ for (ntries = 0; ntries < 100; ntries++) {
+ if ((error = mtw_read(sc, MTW_MAC_STATUS_REG, &tmp)) != 0)
+ goto fail;
+ if (!(tmp & (MTW_RX_STATUS_BUSY | MTW_TX_STATUS_BUSY)))
+ break;
+ DELAY(1000);
+ }
+ if (ntries == 100) {
+ error = ETIMEDOUT;
+ goto fail;
+ }
+
+ /* set MAC address */
+
+ mtw_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr);
+
+ /* clear WCID attribute table */
+ mtw_set_region_4(sc, MTW_WCID_ATTR(0), 1, 8 * 32);
+
+ mtw_write(sc, 0x1648, 0x00830083);
+ mtw_read(sc, MTW_FCE_L2_STUFF, &tmp);
+ tmp &= ~MTW_L2S_WR_MPDU_LEN_EN;
+ mtw_write(sc, MTW_FCE_L2_STUFF, tmp);
+
+ /* RTS config */
+ mtw_set_txrts(sc);
+
+ /* clear Host to MCU mailbox */
+ mtw_write(sc, MTW_BBP_CSR, 0);
+ mtw_write(sc, MTW_H2M_MAILBOX, 0);
+
+ /* clear RX WCID search table */
+ mtw_set_region_4(sc, MTW_WCID_ENTRY(0), 0xffffffff, 512);
+
+ /* abort TSF synchronization */
+ mtw_abort_tsf_sync(sc);
+
+ mtw_read(sc, MTW_US_CYC_CNT, &tmp);
+ tmp = (tmp & ~0xff);
+ if (sc->asic_ver == 0x7601)
+ tmp |= 0x1e;
+ mtw_write(sc, MTW_US_CYC_CNT, tmp);
+
+ /* clear shared key table */
+ mtw_set_region_4(sc, MTW_SKEY(0, 0), 0, 8 * 32);
+
+ /* clear IV/EIV table */
+ mtw_set_region_4(sc, MTW_IVEIV(0), 0, 8 * 32);
+
+ /* clear shared key mode */
+ mtw_write(sc, MTW_SKEY_MODE_0_7, 0);
+ mtw_write(sc, MTW_SKEY_MODE_8_15, 0);
+
+ /* txop truncation */
+ mtw_write(sc, MTW_TXOP_CTRL_CFG, 0x0000583f);
+
+ /* init Tx power for all Tx rates */
+ for (ridx = 0; ridx < 5; ridx++) {
+ if (sc->txpow20mhz[ridx] == 0xffffffff)
+ continue;
+ mtw_write(sc, MTW_TX_PWR_CFG(ridx), sc->txpow20mhz[ridx]);
+ }
+ mtw_write(sc, MTW_TX_PWR_CFG7, 0);
+ mtw_write(sc, MTW_TX_PWR_CFG9, 0);
+
+ mtw_read(sc, MTW_CMB_CTRL, &tmp);
+ tmp &= ~(1 << 18 | 1 << 14);
+ mtw_write(sc, MTW_CMB_CTRL, tmp);
+
+ /* clear USB DMA */
+ mtw_write(sc, MTW_USB_DMA_CFG,
+ MTW_USB_TX_EN | MTW_USB_RX_EN | MTW_USB_RX_AGG_EN |
+ MTW_USB_TX_CLEAR | MTW_USB_TXOP_HALT | MTW_USB_RX_WL_DROP);
+ mtw_delay(sc, 50);
+ mtw_read(sc, MTW_USB_DMA_CFG, &tmp);
+ tmp &= ~(MTW_USB_TX_CLEAR | MTW_USB_TXOP_HALT | MTW_USB_RX_WL_DROP);
+ mtw_write(sc, MTW_USB_DMA_CFG, tmp);
+
+ /* enable radio */
+ mtw_mcu_radio(sc, 0x31, 0);
+
+ /* init RF registers */
+ if (sc->asic_ver == 0x7601)
+ mt7601_rf_init(sc);
+
+ /* init baseband registers */
+ if (sc->asic_ver == 0x7601)
+ error = mt7601_bbp_init(sc);
+
+ if (error != 0) {
+ device_printf(sc->sc_dev, "could not initialize BBP\n");
+ goto fail;
+ }
+
+ /* setup and calibrate RF */
+ error = mt7601_rf_setup(sc);
+
+ if (error != 0) {
+ device_printf(sc->sc_dev, "could not initialize RF\n");
+ goto fail;
+ }
+
+ /* select default channel */
+ mtw_set_chan(sc, ic->ic_curchan);
+
+ /* setup initial protection mode */
+ mtw_updateprot_cb(ic);
+
+ sc->sc_flags |= MTW_RUNNING;
+ sc->cmdq_run = MTW_CMDQ_GO;
+ for (i = 0; i != MTW_N_XFER; i++)
+ usbd_xfer_set_stall(sc->sc_xfer[i]);
+
+ usbd_transfer_start(sc->sc_xfer[MTW_BULK_RX]);
+
+ error = mtw_txrx_enable(sc);
+ if (error != 0) {
+ goto fail;
+ }
+
+ return;
+
+fail:
+
+ mtw_stop(sc);
+ return;
+}
+
+static void
+mtw_stop(void *arg)
+{
+ struct mtw_softc *sc = (struct mtw_softc *)arg;
+ uint32_t tmp;
+ int i, ntries, error;
+
+ MTW_LOCK_ASSERT(sc, MA_OWNED);
+
+ sc->sc_flags &= ~MTW_RUNNING;
+
+ sc->ratectl_run = MTW_RATECTL_OFF;
+ sc->cmdq_run = sc->cmdq_key_set;
+
+ MTW_UNLOCK(sc);
+
+ for (i = 0; i < MTW_N_XFER; i++)
+ usbd_transfer_drain(sc->sc_xfer[i]);
+
+ MTW_LOCK(sc);
+
+ mtw_drain_mbufq(sc);
+
+ if (sc->rx_m != NULL) {
+ m_free(sc->rx_m);
+ sc->rx_m = NULL;
+ }
+
+ /* Disable Tx/Rx DMA. */
+ mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp);
+ tmp &= ~(MTW_RX_DMA_EN | MTW_TX_DMA_EN);
+ mtw_write(sc, MTW_WPDMA_GLO_CFG, tmp);
+ // mtw_usb_dma_write(sc, 0);
+
+ for (ntries = 0; ntries < 100; ntries++) {
+ if (mtw_read(sc, MTW_WPDMA_GLO_CFG, &tmp) != 0)
+ break;
+ if ((tmp & (MTW_TX_DMA_BUSY | MTW_RX_DMA_BUSY)) == 0)
+ break;
+ DELAY(10);
+ }
+ if (ntries == 100) {
+ device_printf(sc->sc_dev, "timeout waiting for DMA engine\n");
+ }
+
+ /* stop MAC Tx/Rx */
+ mtw_read(sc, MTW_MAC_SYS_CTRL, &tmp);
+ tmp &= ~(MTW_MAC_RX_EN | MTW_MAC_TX_EN);
+ mtw_write(sc, MTW_MAC_SYS_CTRL, tmp);
+
+ /* disable RTS retry */
+ mtw_read(sc, MTW_TX_RTS_CFG, &tmp);
+ tmp &= ~0xff;
+ mtw_write(sc, MTW_TX_RTS_CFG, tmp);
+
+ /* US_CYC_CFG */
+ mtw_read(sc, MTW_US_CYC_CNT, &tmp);
+ tmp = (tmp & ~0xff);
+ mtw_write(sc, MTW_US_CYC_CNT, tmp);
+
+ /* stop PBF */
+ mtw_read(sc, MTW_PBF_CFG, &tmp);
+ tmp &= ~0x3;
+ mtw_write(sc, MTW_PBF_CFG, tmp);
+
+ /* wait for pending Tx to complete */
+ for (ntries = 0; ntries < 100; ntries++) {
+ if ((error = mtw_read(sc, MTW_TXRXQ_PCNT, &tmp)) != 0)
+ break;
+ if ((tmp & MTW_TX2Q_PCNT_MASK) == 0)
+ break;
+ }
+
+}
+
+static void
+mtw_delay(struct mtw_softc *sc, u_int ms)
+{
+ usb_pause_mtx(mtx_owned(&sc->sc_mtx) ? &sc->sc_mtx : NULL,
+ USB_MS_TO_TICKS(ms));
+}
+
+static void
+mtw_update_chw(struct ieee80211com *ic)
+{
+
+ printf("%s: TODO\n", __func__);
+}
+
+static int
+mtw_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
+{
+
+ /* For now, no A-MPDU TX support in the driver */
+ return (0);
+}
+
+static device_method_t mtw_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, mtw_match),
+ DEVMETHOD(device_attach, mtw_attach),
+ DEVMETHOD(device_detach, mtw_detach), DEVMETHOD_END
+};
+
+static driver_t mtw_driver = { .name = "mtw",
+ .methods = mtw_methods,
+ .size = sizeof(struct mtw_softc) };
+
+DRIVER_MODULE(mtw, uhub, mtw_driver, mtw_driver_loaded, NULL);
+MODULE_DEPEND(mtw, wlan, 1, 1, 1);
+MODULE_DEPEND(mtw, usb, 1, 1, 1);
+MODULE_DEPEND(mtw, firmware, 1, 1, 1);
+MODULE_VERSION(mtw, 1);
+USB_PNP_HOST_INFO(mtw_devs);
diff --git a/sys/dev/usb/wlan/if_mtwreg.h b/sys/dev/usb/wlan/if_mtwreg.h
new file mode 100644
index 000000000000..05af4f4f6cf3
--- /dev/null
+++ b/sys/dev/usb/wlan/if_mtwreg.h
@@ -0,0 +1,1439 @@
+/* $OpenBSD: mtwreg.h,v 1.2 2022/07/27 06:41:04 hastings Exp $ */
+/*
+ * Copyright (c) 2007 Damien Bergamini <damien.bergamini@free.fr>
+ * Copyright (c) 2021 James Hastings
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define MTW_ASIC_VER 0x0000
+#define MTW_CMB_CTRL 0x0020
+#define MTW_EFUSE_CTRL 0x0024
+#define MTW_EFUSE_DATA0 0x0028
+#define MTW_EFUSE_DATA1 0x002c
+#define MTW_EFUSE_DATA2 0x0030
+#define MTW_EFUSE_DATA3 0x0034
+#define MTW_OSC_CTRL 0x0038
+#define MTW_COEX_CFG0 0x0040
+#define MTW_PLL_CTRL 0x0050
+#define MTW_LDO_CFG0 0x006c
+#define MTW_LDO_CFG1 0x0070
+#define MTW_WLAN_CTRL 0x0080
+
+/* SCH/DMA registers */
+#define MTW_INT_STATUS 0x0200
+#define RT2860_INT_MASK 0x0204
+#define MTW_WPDMA_GLO_CFG 0x0208
+#define RT2860_WPDMA_RST_IDX 0x020c
+#define RT2860_DELAY_INT_CFG 0x0210
+#define MTW_WMM_AIFSN_CFG 0x0214
+#define MTW_WMM_CWMIN_CFG 0x0218
+#define MTW_WMM_CWMAX_CFG 0x021c
+#define MTW_WMM_TXOP0_CFG 0x0220
+#define MTW_WMM_TXOP1_CFG 0x0224
+#define RT2860_GPIO_CTRL 0x0228
+#define RT2860_MCU_CMD_REG 0x022c
+#define MTW_MCU_DMA_ADDR 0x0230
+#define MTW_MCU_DMA_LEN 0x0234
+#define MTW_USB_DMA_CFG 0x0238
+#define RT2860_TX_BASE_PTR(qid) (0x0230 + (qid) * 16)
+#define RT2860_TX_MAX_CNT(qid) (0x0234 + (qid) * 16)
+#define RT2860_TX_CTX_IDX(qid) (0x0238 + (qid) * 16)
+#define RT2860_TX_DTX_IDX(qid) (0x023c + (qid) * 16)
+#define MTW_TSO_CTRL 0x0250
+#define MTW_HDR_TRANS_CTRL 0x0260
+#define RT2860_RX_BASE_PTR 0x0290
+#define RT2860_RX_MAX_CNT 0x0294
+#define RT2860_RX_CALC_IDX 0x0298
+#define RT2860_FS_DRX_IDX 0x029c
+#define MTW_US_CYC_CNT 0x02a4
+
+#define MTW_TX_RING_BASE 0x0300
+#define MTW_RX_RING_BASE 0x03c0
+
+/* Packet Buffer registers */
+#define MTW_SYS_CTRL 0x0400
+#define MTW_PBF_CFG 0x0404
+#define MTW_TX_MAX_PCNT 0x0408
+#define MTW_RX_MAX_PCNT 0x040c
+#define MTW_PBF_CTRL 0x0410
+#define RT2860_BUF_CTRL 0x0410
+#define RT2860_MCU_INT_STA 0x0414
+#define RT2860_MCU_INT_ENA 0x0418
+#define RT2860_TXQ_IO(qid) (0x041c + (qid) * 4)
+#define MTW_BCN_OFFSET0 0x041c
+#define MTW_BCN_OFFSET1 0x0420
+#define MTW_BCN_OFFSET2 0x0424
+#define MTW_BCN_OFFSET3 0x0428
+#define RT2860_RX0Q_IO 0x0424
+#define MTW_RXQ_STA 0x0430
+#define MTW_TXQ_STA 0x0434
+#define MTW_TXRXQ_PCNT 0x0438
+
+/* RF registers */
+#define MTW_RF_CSR 0x0500
+#define MTW_RF_BYPASS0 0x0504
+#define MTW_RF_BYPASS1 0x0508
+#define MTW_RF_SETTING0 0x050C
+#define MTW_RF_MISC 0x0518
+#define MTW_RF_DATA_WR 0x0524
+#define MTW_RF_CTRL 0x0528
+#define MTW_RF_DATA_RD 0x052c
+
+/* MCU registers */
+#define MTW_MCU_RESET_CTL 0x070c
+#define MTW_MCU_INT_LEVEL 0x0718
+#define MTW_MCU_COM_REG0 0x0730
+#define MTW_MCU_COM_REG1 0x0734
+#define MTW_MCU_COM_REG2 0x0738
+#define MTW_MCU_COM_REG3 0x073c
+#define MTW_FCE_PSE_CTRL 0x0800
+#define MTW_FCE_PARAMETERS 0x0804
+#define MTW_FCE_CSO 0x0808
+#define MTW_FCE_L2_STUFF 0x080c
+#define MTW_FCE_WLAN_FLOW_CTRL 0x0824
+#define MTW_TX_CPU_FCE_BASE 0x09a0
+#define MTW_TX_CPU_FCE_MAX_COUNT 0x09a4
+#define MTW_MCU_FW_IDX 0x09a8
+#define MTW_FCE_PDMA 0x09c4
+#define MTW_FCE_SKIP_FS 0x0a6c
+
+/* MAC registers */
+#define MTW_MAC_VER_ID 0x1000
+#define MTW_MAC_SYS_CTRL 0x1004
+#define MTW_MAC_ADDR_DW0 0x1008
+#define MTW_MAC_ADDR_DW1 0x100c
+#define MTW_MAC_BSSID_DW0 0x1010
+#define MTW_MAC_BSSID_DW1 0x1014
+#define MTW_MAX_LEN_CFG 0x1018
+#define MTW_BBP_CSR 0x101c
+#define MTW_LED_CFG 0x102c
+#define MTW_AMPDU_MAX_LEN_20M1S 0x1030
+#define MTW_AMPDU_MAX_LEN_20M2S 0x1034
+#define MTW_AMPDU_MAX_LEN_40M1S 0x1038
+#define MTW_AMPDU_MAX_LEN_40M2S 0x103c
+#define MTW_AMPDU_MAX_LEN 0x1040
+
+/* MAC Timing control registers */
+#define MTW_XIFS_TIME_CFG 0x1100
+#define MTW_BKOFF_SLOT_CFG 0x1104
+#define RT2860_NAV_TIME_CFG 0x1108
+#define RT2860_CH_TIME_CFG 0x110c
+#define RT2860_PBF_LIFE_TIMER 0x1110
+#define MTW_BCN_TIME_CFG 0x1114
+#define MTW_TBTT_SYNC_CFG 0x1118
+#define MTW_TSF_TIMER_DW0 0x111c
+#define MTW_TSF_TIMER_DW1 0x1120
+#define RT2860_TBTT_TIMER 0x1124
+#define MTW_INT_TIMER_CFG 0x1128
+#define RT2860_INT_TIMER_EN 0x112c
+#define RT2860_CH_IDLE_TIME 0x1130
+
+/* MAC Power Save configuration registers */
+#define MTW_MAC_STATUS_REG 0x1200
+#define MTW_PWR_PIN_CFG 0x1204
+#define MTW_AUTO_WAKEUP_CFG 0x1208
+#define MTW_AUX_CLK_CFG 0x120c
+#define MTW_BBP_PA_MODE_CFG0 0x1214
+#define MTW_BBP_PA_MODE_CFG1 0x1218
+#define MTW_RF_PA_MODE_CFG0 0x121c
+#define MTW_RF_PA_MODE_CFG1 0x1220
+#define MTW_RF_PA_MODE_ADJ0 0x1228
+#define MTW_RF_PA_MODE_ADJ1 0x122c
+#define MTW_DACCLK_EN_DLY_CFG 0x1264 /* MT7612 */
+
+/* MAC TX configuration registers */
+#define MTW_EDCA_AC_CFG(aci) (0x1300 + (aci) * 4)
+#define MTW_EDCA_TID_AC_MAP 0x1310
+#define MTW_TX_PWR_CFG(ridx) (0x1314 + (ridx) * 4)
+#define MTW_TX_PIN_CFG 0x1328
+#define MTW_TX_BAND_CFG 0x132c
+#define MTW_TX_SW_CFG0 0x1330
+#define MTW_TX_SW_CFG1 0x1334
+#define MTW_TX_SW_CFG2 0x1338
+#define RT2860_TXOP_THRES_CFG 0x133c
+#define MTW_TXOP_CTRL_CFG 0x1340
+#define MTW_TX_RTS_CFG 0x1344
+#define MTW_TX_TIMEOUT_CFG 0x1348
+#define MTW_TX_RETRY_CFG 0x134c
+#define MTW_TX_LINK_CFG 0x1350
+#define MTW_HT_FBK_CFG0 0x1354
+#define MTW_HT_FBK_CFG1 0x1358
+#define MTW_LG_FBK_CFG0 0x135c
+#define MTW_LG_FBK_CFG1 0x1360
+#define MTW_CCK_PROT_CFG 0x1364
+#define MTW_OFDM_PROT_CFG 0x1368
+#define MTW_MM20_PROT_CFG 0x136c
+#define MTW_MM40_PROT_CFG 0x1370
+#define MTW_GF20_PROT_CFG 0x1374
+#define MTW_GF40_PROT_CFG 0x1378
+#define RT2860_EXP_CTS_TIME 0x137c
+#define MTW_EXP_ACK_TIME 0x1380
+#define MTW_TX_PWR_CFG5 0x1384
+#define MTW_TX_PWR_CFG6 0x1388
+#define MTW_TX_PWR_EXT_CFG(ridx) (0x1390 + (ridx) * 4)
+#define MTW_TX0_RF_GAIN_CORR 0x13a0
+#define MTW_TX1_RF_GAIN_CORR 0x13a4
+#define MTW_TX0_RF_GAIN_ATTEN 0x13a8
+#define MTW_TX_ALC_CFG3 0x13ac
+#define MTW_TX_ALC_CFG0 0x13b0
+#define MTW_TX_ALC_CFG1 0x13b4
+#define MTW_TX_ALC_CFG4 0x13c0
+#define MTW_TX_ALC_VGA3 0x13c8
+#define MTW_TX_PWR_CFG7 0x13d4
+#define MTW_TX_PWR_CFG8 0x13d8
+#define MTW_TX_PWR_CFG9 0x13dc
+#define MTW_VHT20_PROT_CFG 0x13e0
+#define MTW_VHT40_PROT_CFG 0x13e4
+#define MTW_VHT80_PROT_CFG 0x13e8
+#define MTW_TX_PIFS_CFG 0x13ec /* MT761X */
+
+/* MAC RX configuration registers */
+#define MTW_RX_FILTR_CFG 0x1400
+#define MTW_AUTO_RSP_CFG 0x1404
+#define MTW_LEGACY_BASIC_RATE 0x1408
+#define MTW_HT_BASIC_RATE 0x140c
+#define MTW_HT_CTRL_CFG 0x1410
+#define RT2860_SIFS_COST_CFG 0x1414
+#define RT2860_RX_PARSER_CFG 0x1418
+
+/* MAC Security configuration registers */
+#define RT2860_TX_SEC_CNT0 0x1500
+#define RT2860_RX_SEC_CNT0 0x1504
+#define RT2860_CCMP_FC_MUTE 0x1508
+#define MTW_PN_PAD_MODE 0x150c /* MT761X */
+
+/* MAC HCCA/PSMP configuration registers */
+#define MTW_TXOP_HLDR_ADDR0 0x1600
+#define MTW_TXOP_HLDR_ADDR1 0x1604
+#define MTW_TXOP_HLDR_ET 0x1608
+#define RT2860_QOS_CFPOLL_RA_DW0 0x160c
+#define RT2860_QOS_CFPOLL_A1_DW1 0x1610
+#define RT2860_QOS_CFPOLL_QC 0x1614
+#define MTW_PROT_AUTO_TX_CFG 0x1648
+
+/* MAC Statistics Counters */
+#define MTW_RX_STA_CNT0 0x1700
+#define MTW_RX_STA_CNT1 0x1704
+#define MTW_RX_STA_CNT2 0x1708
+#define MTW_TX_STA_CNT0 0x170c
+#define MTW_TX_STA_CNT1 0x1710
+#define MTW_TX_STA_CNT2 0x1714
+#define MTW_TX_STAT_FIFO 0x1718
+
+/* RX WCID search table */
+#define MTW_WCID_ENTRY(wcid) (0x1800 + (wcid) * 8)
+
+/* MT761x Baseband */
+#define MTW_BBP_CORE(x) (0x2000 + (x) * 4)
+#define MTW_BBP_IBI(x) (0x2100 + (x) * 4)
+#define MTW_BBP_AGC(x) (0x2300 + (x) * 4)
+#define MTW_BBP_TXC(x) (0x2400 + (x) * 4)
+#define MTW_BBP_RXC(x) (0x2500 + (x) * 4)
+#define MTW_BBP_TXQ(x) (0x2600 + (x) * 4)
+#define MTW_BBP_TXBE(x) (0x2700 + (x) * 4)
+#define MTW_BBP_RXFE(x) (0x2800 + (x) * 4)
+#define MTW_BBP_RXO(x) (0x2900 + (x) * 4)
+#define MTW_BBP_DFS(x) (0x2a00 + (x) * 4)
+#define MTW_BBP_TR(x) (0x2b00 + (x) * 4)
+#define MTW_BBP_CAL(x) (0x2c00 + (x) * 4)
+#define MTW_BBP_DSC(x) (0x2e00 + (x) * 4)
+#define MTW_BBP_PFMU(x) (0x2f00 + (x) * 4)
+
+#define MTW_SKEY_MODE_16_23 0x7008
+#define MTW_SKEY_MODE_24_31 0x700c
+#define MTW_H2M_MAILBOX 0x7010
+
+/* Pair-wise key table */
+#define MTW_PKEY(wcid) (0x8000 + (wcid) * 32)
+
+/* USB 3.0 DMA */
+#define MTW_USB_U3DMA_CFG 0x9018
+
+/* IV/EIV table */
+#define MTW_IVEIV(wcid) (0xa000 + (wcid) * 8)
+
+/* WCID attribute table */
+#define MTW_WCID_ATTR(wcid) (0xa800 + (wcid) * 4)
+
+/* Shared Key Table */
+#define MTW_SKEY(vap, kidx) ((vap & 8) ? MTW_SKEY_1(vap, kidx) : \
+ MTW_SKEY_0(vap, kidx))
+#define MTW_SKEY_0(vap, kidx) (0xac00 + (4 * (vap) + (kidx)) * 32)
+#define MTW_SKEY_1(vap, kidx) (0xb400 + (4 * ((vap) & 7) + (kidx)) * 32)
+
+/* Shared Key Mode */
+#define MTW_SKEY_MODE_0_7 0xb000
+#define MTW_SKEY_MODE_8_15 0xb004
+
+/* Shared Key Mode */
+#define MTW_SKEY_MODE_BASE 0xb000
+
+/* Beacon */
+#define MTW_BCN_BASE 0xc000
+
+/* possible flags for register CMB_CTRL 0x0020 */
+#define MTW_PLL_LD (1U << 23)
+#define MTW_XTAL_RDY (1U << 22)
+
+/* possible flags for register EFUSE_CTRL 0x0024 */
+#define MTW_SEL_EFUSE (1U << 31)
+#define MTW_EFSROM_KICK (1U << 30)
+#define MTW_EFSROM_AIN_MASK 0x03ff0000
+#define MTW_EFSROM_AIN_SHIFT 16
+#define MTW_EFSROM_MODE_MASK 0x000000c0
+#define MTW_EFUSE_AOUT_MASK 0x0000003f
+
+/* possible flags for register OSC_CTRL 0x0038 */
+#define MTW_OSC_EN (1U << 31)
+#define MTW_OSC_CAL_REQ (1U << 30)
+#define MTW_OSC_CLK_32K_VLD (1U << 29)
+#define MTW_OSC_CAL_ACK (1U << 28)
+#define MTW_OSC_CAL_CNT (0xfff << 16)
+#define MTW_OSC_REF_CYCLE 0x1fff
+
+/* possible flags for register WLAN_CTRL 0x0080 */
+#define MTW_GPIO_OUT_OE_ALL (0xff << 24)
+#define MTW_GPIO_OUT_ALL (0xff << 16)
+#define MTW_GPIO_IN_ALL (0xff << 8)
+#define MTW_THERM_CKEN (1U << 9)
+#define MTW_THERM_RST (1U << 8)
+#define MTW_INV_TR_SW0 (1U << 6)
+#define MTW_FRC_WL_ANT_SET (1U << 5)
+#define MTW_PCIE_APP0_CLK_REQ (1U << 4)
+#define MTW_WLAN_RESET (1U << 3)
+#define MTW_WLAN_RESET_RF (1U << 2)
+#define MTW_WLAN_CLK_EN (1U << 1)
+#define MTW_WLAN_EN (1U << 0)
+
+/* possible flags for registers INT_STATUS/INT_MASK 0x0200 */
+#define RT2860_TX_COHERENT (1 << 17)
+#define RT2860_RX_COHERENT (1 << 16)
+#define RT2860_MAC_INT_4 (1 << 15)
+#define RT2860_MAC_INT_3 (1 << 14)
+#define RT2860_MAC_INT_2 (1 << 13)
+#define RT2860_MAC_INT_1 (1 << 12)
+#define RT2860_MAC_INT_0 (1 << 11)
+#define RT2860_TX_RX_COHERENT (1 << 10)
+#define RT2860_MCU_CMD_INT (1 << 9)
+#define RT2860_TX_DONE_INT5 (1 << 8)
+#define RT2860_TX_DONE_INT4 (1 << 7)
+#define RT2860_TX_DONE_INT3 (1 << 6)
+#define RT2860_TX_DONE_INT2 (1 << 5)
+#define RT2860_TX_DONE_INT1 (1 << 4)
+#define RT2860_TX_DONE_INT0 (1 << 3)
+#define RT2860_RX_DONE_INT (1 << 2)
+#define RT2860_TX_DLY_INT (1 << 1)
+#define RT2860_RX_DLY_INT (1 << 0)
+
+/* possible flags for register WPDMA_GLO_CFG 0x0208 */
+#define MTW_HDR_SEG_LEN_SHIFT 8
+#define MTW_BIG_ENDIAN (1 << 7)
+#define MTW_TX_WB_DDONE (1 << 6)
+#define MTW_WPDMA_BT_SIZE_SHIFT 4
+#define MTW_WPDMA_BT_SIZE16 0
+#define MTW_WPDMA_BT_SIZE32 1
+#define MTW_WPDMA_BT_SIZE64 2
+#define MTW_WPDMA_BT_SIZE128 3
+#define MTW_RX_DMA_BUSY (1 << 3)
+#define MTW_RX_DMA_EN (1 << 2)
+#define MTW_TX_DMA_BUSY (1 << 1)
+#define MTW_TX_DMA_EN (1 << 0)
+
+/* possible flags for register DELAY_INT_CFG */
+#define RT2860_TXDLY_INT_EN (1U << 31)
+#define RT2860_TXMAX_PINT_SHIFT 24
+#define RT2860_TXMAX_PTIME_SHIFT 16
+#define RT2860_RXDLY_INT_EN (1U << 15)
+#define RT2860_RXMAX_PINT_SHIFT 8
+#define RT2860_RXMAX_PTIME_SHIFT 0
+
+/* possible flags for register GPIO_CTRL */
+#define RT2860_GPIO_D_SHIFT 8
+#define RT2860_GPIO_O_SHIFT 0
+
+/* possible flags for register MCU_DMA_ADDR 0x0230 */
+#define MTW_MCU_READY (1U << 0)
+
+/* possible flags for register USB_DMA_CFG 0x0238 */
+#define MTW_USB_TX_BUSY (1U << 31)
+#define MTW_USB_RX_BUSY (1U << 30)
+#define MTW_USB_EPOUT_VLD_SHIFT 24
+#define MTW_USB_RX_WL_DROP (1U << 25)
+#define MTW_USB_TX_EN (1U << 23)
+#define MTW_USB_RX_EN (1U << 22)
+#define MTW_USB_RX_AGG_EN (1U << 21)
+#define MTW_USB_TXOP_HALT (1U << 20)
+#define MTW_USB_TX_CLEAR (1U << 19)
+#define MTW_USB_PHY_WD_EN (1U << 16)
+#define MTW_USB_PHY_MAN_RST (1U << 15)
+#define MTW_USB_RX_AGG_LMT(x) ((x) << 8) /* in unit of 1KB */
+#define MTW_USB_RX_AGG_TO(x) ((x) & 0xff) /* in unit of 33ns */
+
+/* possible flags for register US_CYC_CNT 0x02a4 */
+#define RT2860_TEST_EN (1 << 24)
+#define RT2860_TEST_SEL_SHIFT 16
+#define RT2860_BT_MODE_EN (1 << 8)
+#define RT2860_US_CYC_CNT_SHIFT 0
+
+/* possible flags for register PBF_CFG 0x0404 */
+#define MTW_PBF_CFG_RX_DROP (1 << 8)
+#define MTW_PBF_CFG_RX0Q_EN (1 << 4)
+#define MTW_PBF_CFG_TX3Q_EN (1 << 3)
+#define MTW_PBF_CFG_TX2Q_EN (1 << 2)
+#define MTW_PBF_CFG_TX1Q_EN (1 << 1)
+#define MTW_PBF_CFG_TX0Q_EN (1 << 0)
+
+/* possible flags for register BUF_CTRL 0x0410 */
+#define RT2860_WRITE_TXQ(qid) (1 << (11 - (qid)))
+#define RT2860_NULL0_KICK (1 << 7)
+#define RT2860_NULL1_KICK (1 << 6)
+#define RT2860_BUF_RESET (1 << 5)
+#define RT2860_READ_TXQ(qid) (1 << (3 - (qid))
+#define RT2860_READ_RX0Q (1 << 0)
+
+/* possible flags for registers MCU_INT_STA/MCU_INT_ENA */
+#define RT2860_MCU_MAC_INT_8 (1 << 24)
+#define RT2860_MCU_MAC_INT_7 (1 << 23)
+#define RT2860_MCU_MAC_INT_6 (1 << 22)
+#define RT2860_MCU_MAC_INT_4 (1 << 20)
+#define RT2860_MCU_MAC_INT_3 (1 << 19)
+#define RT2860_MCU_MAC_INT_2 (1 << 18)
+#define RT2860_MCU_MAC_INT_1 (1 << 17)
+#define RT2860_MCU_MAC_INT_0 (1 << 16)
+#define RT2860_DTX0_INT (1 << 11)
+#define RT2860_DTX1_INT (1 << 10)
+#define RT2860_DTX2_INT (1 << 9)
+#define RT2860_DRX0_INT (1 << 8)
+#define RT2860_HCMD_INT (1 << 7)
+#define RT2860_N0TX_INT (1 << 6)
+#define RT2860_N1TX_INT (1 << 5)
+#define RT2860_BCNTX_INT (1 << 4)
+#define RT2860_MTX0_INT (1 << 3)
+#define RT2860_MTX1_INT (1 << 2)
+#define RT2860_MTX2_INT (1 << 1)
+#define RT2860_MRX0_INT (1 << 0)
+
+/* possible flags for register TXRXQ_PCNT 0x0438 */
+#define MTW_RX0Q_PCNT_MASK 0xff000000
+#define MTW_TX2Q_PCNT_MASK 0x00ff0000
+#define MTW_TX1Q_PCNT_MASK 0x0000ff00
+#define MTW_TX0Q_PCNT_MASK 0x000000ff
+
+/* possible flags for register RF_CSR_CFG 0x0500 */
+#define MTW_RF_CSR_KICK (1U << 31)
+#define MTW_RF_CSR_WRITE (1U << 30)
+#define MT7610_BANK_SHIFT 15
+#define MT7601_BANK_SHIFT 14
+
+/* possible flags for register FCE_L2_STUFF 0x080c */
+#define MTW_L2S_WR_MPDU_LEN_EN (1 << 4)
+
+/* possible flag for register DEBUG_INDEX */
+#define RT5592_SEL_XTAL (1U << 31)
+
+/* possible flags for register MAC_SYS_CTRL 0x1004 */
+#define MTW_RX_TS_EN (1 << 7)
+#define MTW_WLAN_HALT_EN (1 << 6)
+#define MTW_PBF_LOOP_EN (1 << 5)
+#define MTW_CONT_TX_TEST (1 << 4)
+#define MTW_MAC_RX_EN (1 << 3)
+#define MTW_MAC_TX_EN (1 << 2)
+#define MTW_BBP_HRST (1 << 1)
+#define MTW_MAC_SRST (1 << 0)
+
+/* possible flags for register MAC_BSSID_DW1 0x100c */
+#define RT2860_MULTI_BCN_NUM_SHIFT 18
+#define RT2860_MULTI_BSSID_MODE_SHIFT 16
+
+/* possible flags for register MAX_LEN_CFG 0x1018 */
+#define RT2860_MIN_MPDU_LEN_SHIFT 16
+#define RT2860_MAX_PSDU_LEN_SHIFT 12
+#define RT2860_MAX_PSDU_LEN8K 0
+#define RT2860_MAX_PSDU_LEN16K 1
+#define RT2860_MAX_PSDU_LEN32K 2
+#define RT2860_MAX_PSDU_LEN64K 3
+#define RT2860_MAX_MPDU_LEN_SHIFT 0
+
+/* possible flags for registers BBP_CSR_CFG 0x101c */
+#define MTW_BBP_CSR_KICK (1 << 17)
+#define MTW_BBP_CSR_READ (1 << 16)
+#define MTW_BBP_ADDR_SHIFT 8
+#define MTW_BBP_DATA_SHIFT 0
+
+/* possible flags for register LED_CFG */
+#define MTW_LED_MODE_ON 0
+#define MTW_LED_MODE_DIM 1
+#define MTW_LED_MODE_BLINK_TX 2
+#define MTW_LED_MODE_SLOW_BLINK 3
+
+/* possible flags for register XIFS_TIME_CFG 0x1100 */
+#define MTW_BB_RXEND_EN (1 << 29)
+#define MTW_EIFS_TIME_SHIFT 20
+#define MTW_OFDM_XIFS_TIME_SHIFT 16
+#define MTW_OFDM_SIFS_TIME_SHIFT 8
+#define MTW_CCK_SIFS_TIME_SHIFT 0
+
+/* possible flags for register BKOFF_SLOT_CFG 0x1104 */
+#define MTW_CC_DELAY_TIME_SHIFT 8
+#define MTW_SLOT_TIME 0
+
+/* possible flags for register NAV_TIME_CFG */
+#define RT2860_NAV_UPD (1U << 31)
+#define RT2860_NAV_UPD_VAL_SHIFT 16
+#define RT2860_NAV_CLR_EN (1U << 15)
+#define RT2860_NAV_TIMER_SHIFT 0
+
+/* possible flags for register CH_TIME_CFG */
+#define RT2860_EIFS_AS_CH_BUSY (1 << 4)
+#define RT2860_NAV_AS_CH_BUSY (1 << 3)
+#define RT2860_RX_AS_CH_BUSY (1 << 2)
+#define RT2860_TX_AS_CH_BUSY (1 << 1)
+#define RT2860_CH_STA_TIMER_EN (1 << 0)
+
+/* possible values for register BCN_TIME_CFG 0x1114 */
+#define MTW_TSF_INS_COMP_SHIFT 24
+#define MTW_BCN_TX_EN (1 << 20)
+#define MTW_TBTT_TIMER_EN (1 << 19)
+#define MTW_TSF_SYNC_MODE_SHIFT 17
+#define MTW_TSF_SYNC_MODE_DIS 0
+#define MTW_TSF_SYNC_MODE_STA 1
+#define MTW_TSF_SYNC_MODE_IBSS 2
+#define MTW_TSF_SYNC_MODE_HOSTAP 3
+#define MTW_TSF_TIMER_EN (1 << 16)
+#define MTW_BCN_INTVAL_SHIFT 0
+
+/* possible flags for register TBTT_SYNC_CFG 0x1118 */
+#define RT2860_BCN_CWMIN_SHIFT 20
+#define RT2860_BCN_AIFSN_SHIFT 16
+#define RT2860_BCN_EXP_WIN_SHIFT 8
+#define RT2860_TBTT_ADJUST_SHIFT 0
+
+/* possible flags for register INT_TIMER_CFG 0x1128 */
+#define RT2860_GP_TIMER_SHIFT 16
+#define RT2860_PRE_TBTT_TIMER_SHIFT 0
+
+/* possible flags for register INT_TIMER_EN */
+#define RT2860_GP_TIMER_EN (1 << 1)
+#define RT2860_PRE_TBTT_INT_EN (1 << 0)
+
+/* possible flags for register MAC_STATUS_REG 0x1200 */
+#define MTW_RX_STATUS_BUSY (1 << 1)
+#define MTW_TX_STATUS_BUSY (1 << 0)
+
+/* possible flags for register PWR_PIN_CFG 0x1204 */
+#define RT2860_IO_ADDA_PD (1 << 3)
+#define RT2860_IO_PLL_PD (1 << 2)
+#define RT2860_IO_RA_PE (1 << 1)
+#define RT2860_IO_RF_PE (1 << 0)
+
+/* possible flags for register AUTO_WAKEUP_CFG 0x1208 */
+#define MTW_AUTO_WAKEUP_EN (1 << 15)
+#define MTW_SLEEP_TBTT_NUM_SHIFT 8
+#define MTW_WAKEUP_LEAD_TIME_SHIFT 0
+
+/* possible flags for register TX_PIN_CFG 0x1328 */
+#define RT2860_TRSW_POL (1U << 19)
+#define RT2860_TRSW_EN (1U << 18)
+#define RT2860_RFTR_POL (1U << 17)
+#define RT2860_RFTR_EN (1U << 16)
+#define RT2860_LNA_PE_G1_POL (1U << 15)
+#define RT2860_LNA_PE_A1_POL (1U << 14)
+#define RT2860_LNA_PE_G0_POL (1U << 13)
+#define RT2860_LNA_PE_A0_POL (1U << 12)
+#define RT2860_LNA_PE_G1_EN (1U << 11)
+#define RT2860_LNA_PE_A1_EN (1U << 10)
+#define RT2860_LNA_PE1_EN (RT2860_LNA_PE_A1_EN | RT2860_LNA_PE_G1_EN)
+#define RT2860_LNA_PE_G0_EN (1U << 9)
+#define RT2860_LNA_PE_A0_EN (1U << 8)
+#define RT2860_LNA_PE0_EN (RT2860_LNA_PE_A0_EN | RT2860_LNA_PE_G0_EN)
+#define RT2860_PA_PE_G1_POL (1U << 7)
+#define RT2860_PA_PE_A1_POL (1U << 6)
+#define RT2860_PA_PE_G0_POL (1U << 5)
+#define RT2860_PA_PE_A0_POL (1U << 4)
+#define RT2860_PA_PE_G1_EN (1U << 3)
+#define RT2860_PA_PE_A1_EN (1U << 2)
+#define RT2860_PA_PE_G0_EN (1U << 1)
+#define RT2860_PA_PE_A0_EN (1U << 0)
+
+/* possible flags for register TX_BAND_CFG 0x132c */
+#define MTW_TX_BAND_SEL_2G (1 << 2)
+#define MTW_TX_BAND_SEL_5G (1 << 1)
+#define MTW_TX_BAND_UPPER_40M (1 << 0)
+
+/* possible flags for register TX_SW_CFG0 0x1330 */
+#define RT2860_DLY_RFTR_EN_SHIFT 24
+#define RT2860_DLY_TRSW_EN_SHIFT 16
+#define RT2860_DLY_PAPE_EN_SHIFT 8
+#define RT2860_DLY_TXPE_EN_SHIFT 0
+
+/* possible flags for register TX_SW_CFG1 0x1334 */
+#define RT2860_DLY_RFTR_DIS_SHIFT 16
+#define RT2860_DLY_TRSW_DIS_SHIFT 8
+#define RT2860_DLY_PAPE_DIS SHIFT 0
+
+/* possible flags for register TX_SW_CFG2 0x1338 */
+#define RT2860_DLY_LNA_EN_SHIFT 24
+#define RT2860_DLY_LNA_DIS_SHIFT 16
+#define RT2860_DLY_DAC_EN_SHIFT 8
+#define RT2860_DLY_DAC_DIS_SHIFT 0
+
+/* possible flags for register TXOP_THRES_CFG 0x133c */
+#define RT2860_TXOP_REM_THRES_SHIFT 24
+#define RT2860_CF_END_THRES_SHIFT 16
+#define RT2860_RDG_IN_THRES 8
+#define RT2860_RDG_OUT_THRES 0
+
+/* possible flags for register TXOP_CTRL_CFG 0x1340 */
+#define MTW_TXOP_ED_CCA_EN (1 << 20)
+#define MTW_EXT_CW_MIN_SHIFT 16
+#define MTW_EXT_CCA_DLY_SHIFT 8
+#define MTW_EXT_CCA_EN (1 << 7)
+#define MTW_LSIG_TXOP_EN (1 << 6)
+#define MTW_TXOP_TRUN_EN_MIMOPS (1 << 4)
+#define MTW_TXOP_TRUN_EN_TXOP (1 << 3)
+#define MTW_TXOP_TRUN_EN_RATE (1 << 2)
+#define MTW_TXOP_TRUN_EN_AC (1 << 1)
+#define MTW_TXOP_TRUN_EN_TIMEOUT (1 << 0)
+
+/* possible flags for register TX_RTS_CFG 0x1344 */
+#define MTW_RTS_FBK_EN (1 << 24)
+#define MTW_RTS_THRES_SHIFT 8
+#define MTW_RTS_RTY_LIMIT_SHIFT 0
+
+/* possible flags for register TX_TIMEOUT_CFG 0x1348 */
+#define MTW_TXOP_TIMEOUT_SHIFT 16
+#define MTW_RX_ACK_TIMEOUT_SHIFT 8
+#define MTW_MPDU_LIFE_TIME_SHIFT 4
+
+/* possible flags for register TX_RETRY_CFG 0x134c */
+#define MTW_TX_AUTOFB_EN (1 << 30)
+#define MTW_AGG_RTY_MODE_TIMER (1 << 29)
+#define MTW_NAG_RTY_MODE_TIMER (1 << 28)
+#define MTW_LONG_RTY_THRES_SHIFT 16
+#define MTW_LONG_RTY_LIMIT_SHIFT 8
+#define MTW_SHORT_RTY_LIMIT_SHIFT 0
+
+/* possible flags for register TX_LINK_CFG 0x1350 */
+#define MTW_REMOTE_MFS_SHIFT 24
+#define MTW_REMOTE_MFB_SHIFT 16
+#define MTW_TX_CFACK_EN (1 << 12)
+#define MTW_TX_RDG_EN (1 << 11)
+#define MTW_TX_MRQ_EN (1 << 10)
+#define MTW_REMOTE_UMFS_EN (1 << 9)
+#define MTW_TX_MFB_EN (1 << 8)
+#define MTW_REMOTE_MFB_LT_SHIFT 0
+
+/* possible flags for registers *_PROT_CFG */
+#define RT2860_RTSTH_EN (1 << 26)
+#define RT2860_TXOP_ALLOW_GF40 (1 << 25)
+#define RT2860_TXOP_ALLOW_GF20 (1 << 24)
+#define RT2860_TXOP_ALLOW_MM40 (1 << 23)
+#define RT2860_TXOP_ALLOW_MM20 (1 << 22)
+#define RT2860_TXOP_ALLOW_OFDM (1 << 21)
+#define RT2860_TXOP_ALLOW_CCK (1 << 20)
+#define RT2860_TXOP_ALLOW_ALL (0x3f << 20)
+#define RT2860_PROT_NAV_SHORT (1 << 18)
+#define RT2860_PROT_NAV_LONG (2 << 18)
+#define RT2860_PROT_CTRL_RTS_CTS (1 << 16)
+#define RT2860_PROT_CTRL_CTS (2 << 16)
+
+/* possible flags for registers EXP_{CTS,ACK}_TIME */
+#define RT2860_EXP_OFDM_TIME_SHIFT 16
+#define RT2860_EXP_CCK_TIME_SHIFT 0
+
+/* possible flags for register RX_FILTR_CFG 0x1400 */
+#define MTW_DROP_CTRL_RSV (1 << 16)
+#define MTW_DROP_BAR (1 << 15)
+#define MTW_DROP_BA (1 << 14)
+#define MTW_DROP_PSPOLL (1 << 13)
+#define MTW_DROP_RTS (1 << 12)
+#define MTW_DROP_CTS (1 << 11)
+#define MTW_DROP_ACK (1 << 10)
+#define MTW_DROP_CFEND (1 << 9)
+#define MTW_DROP_CFACK (1 << 8)
+#define MTW_DROP_DUPL (1 << 7)
+#define MTW_DROP_BC (1 << 6)
+#define MTW_DROP_MC (1 << 5)
+#define MTW_DROP_VER_ERR (1 << 4)
+#define MTW_DROP_NOT_MYBSS (1 << 3)
+#define MTW_DROP_UC_NOME (1 << 2)
+#define MTW_DROP_PHY_ERR (1 << 1)
+#define MTW_DROP_CRC_ERR (1 << 0)
+
+/* possible flags for register AUTO_RSP_CFG 0x1404 */
+#define MTW_CTRL_PWR_BIT (1 << 7)
+#define MTW_BAC_ACK_POLICY (1 << 6)
+#define MTW_CCK_SHORT_EN (1 << 4)
+#define MTW_CTS_40M_REF_EN (1 << 3)
+#define MTW_CTS_40M_MODE_EN (1 << 2)
+#define MTW_BAC_ACKPOLICY_EN (1 << 1)
+#define MTW_AUTO_RSP_EN (1 << 0)
+
+/* possible flags for register SIFS_COST_CFG */
+#define RT2860_OFDM_SIFS_COST_SHIFT 8
+#define RT2860_CCK_SIFS_COST_SHIFT 0
+
+/* possible flags for register TXOP_HLDR_ET 0x1608 */
+#define MTW_TXOP_ETM1_EN (1 << 25)
+#define MTW_TXOP_ETM0_EN (1 << 24)
+#define MTW_TXOP_ETM_THRES_SHIFT 16
+#define MTW_TXOP_ETO_EN (1 << 8)
+#define MTW_TXOP_ETO_THRES_SHIFT 1
+#define MTW_PER_RX_RST_EN (1 << 0)
+
+/* possible flags for register TX_STAT_FIFO 0x1718 */
+#define MTW_TXQ_MCS_SHIFT 16
+#define MTW_TXQ_WCID_SHIFT 8
+#define MTW_TXQ_ACKREQ (1 << 7)
+#define MTW_TXQ_AGG (1 << 6)
+#define MTW_TXQ_OK (1 << 5)
+#define MTW_TXQ_PID_SHIFT 1
+#define MTW_TXQ_VLD (1 << 0)
+
+/* possible flags for register TX_STAT_FIFO_EXT 0x1798 */
+#define MTW_TXQ_PKTID_SHIFT 8
+#define MTW_TXQ_RETRY_SHIFT 0
+
+/* possible flags for register WCID_ATTR 0xa800 */
+#define MTW_MODE_NOSEC 0
+#define MTW_MODE_WEP40 1
+#define MTW_MODE_WEP104 2
+#define MTW_MODE_TKIP 3
+#define MTW_MODE_AES_CCMP 4
+#define MTW_MODE_CKIP40 5
+#define MTW_MODE_CKIP104 6
+#define MTW_MODE_CKIP128 7
+#define MTW_RX_PKEY_EN (1 << 0)
+
+/* possible flags for MT7601 BBP register 47 */
+#define MT7601_R47_MASK 0x07
+#define MT7601_R47_TSSI (0 << 0)
+#define MT7601_R47_PKT (1 << 0)
+#define MT7601_R47_TXRATE (1 << 1)
+#define MT7601_R47_TEMP (1 << 2)
+
+#define MTW_RXQ_WLAN 0
+#define MTW_RXQ_MCU 1
+#define MTW_TXQ_MCU 5
+
+enum mtw_phy_mode {
+ MTW_PHY_CCK,
+ MTW_PHY_OFDM,
+ MTW_PHY_HT,
+ MTW_PHY_HT_GF,
+ MTW_PHY_VHT,
+};
+
+/* RT2860 TX descriptor */
+struct rt2860_txd {
+ uint32_t sdp0; /* Segment Data Pointer 0 */
+ uint16_t sdl1; /* Segment Data Length 1 */
+#define RT2860_TX_BURST (1 << 15)
+#define RT2860_TX_LS1 (1 << 14) /* SDP1 is the last segment */
+
+ uint16_t sdl0; /* Segment Data Length 0 */
+#define RT2860_TX_DDONE (1 << 15)
+#define RT2860_TX_LS0 (1 << 14) /* SDP0 is the last segment */
+
+ uint32_t sdp1; /* Segment Data Pointer 1 */
+ uint8_t reserved[3];
+ uint8_t flags;
+#define RT2860_TX_QSEL_SHIFT 1
+#define RT2860_TX_QSEL_MGMT (0 << 1)
+#define RT2860_TX_QSEL_HCCA (1 << 1)
+#define RT2860_TX_QSEL_EDCA (2 << 1)
+#define RT2860_TX_WIV (1 << 0)
+} __packed;
+
+/* TX descriptor */
+struct mtw_txd {
+ uint16_t len;
+ uint16_t flags;
+#define MTW_TXD_CMD (1 << 14)
+#define MTW_TXD_DATA (0 << 14)
+#define MTW_TXD_MCU (2 << 11)
+#define MTW_TXD_WLAN (0 << 11)
+#define MTW_TXD_QSEL_EDCA (2 << 9)
+#define MTW_TXD_QSEL_HCCA (1 << 9)
+#define MTW_TXD_QSEL_MGMT (0 << 9)
+#define MTW_TXD_WIV (1 << 8)
+#define MTW_TXD_CMD_SHIFT 4
+#define MTW_TXD_80211 (1 << 3)
+} __packed;
+struct mtw_txd_fw {
+ uint16_t len;
+ uint16_t flags;
+uint8_t fw[0x2c44];
+} __packed;
+/* TX Wireless Information */
+struct mtw_txwi {
+ uint8_t flags;
+#define MTW_TX_MPDU_DSITY_SHIFT 5
+#define MTW_TX_AMPDU (1 << 4)
+#define MTW_TX_TS (1 << 3)
+#define MTW_TX_CFACK (1 << 2)
+#define MTW_TX_MMPS (1 << 1)
+#define MTW_TX_FRAG (1 << 0)
+
+ uint8_t txop;
+#define MTW_TX_TXOP_HT 0
+#define MTW_TX_TXOP_PIFS 1
+#define MTW_TX_TXOP_SIFS 2
+#define MTW_TX_TXOP_BACKOFF 3
+
+ uint16_t phy;
+#define MT7650_PHY_MODE 0xe000
+#define MT7601_PHY_MODE 0xc000
+#define MT7601_PHY_SHIFT 14
+#define MT7650_PHY_SHIFT 13
+#define MT7650_PHY_SGI (1 << 9)
+#define MT7601_PHY_SGI (1 << 8)
+#define MTW_PHY_BW20 (0 << 7)
+#define MTW_PHY_BW40 (1 << 7)
+#define MTW_PHY_BW80 (2 << 7)
+#define MTW_PHY_BW160 (3 << 7)
+#define MTW_PHY_LDPC (1 << 6)
+#define MTW_PHY_MCS 0x3f
+#define MTW_PHY_SHPRE (1 << 3)
+
+ uint8_t xflags;
+#define MTW_TX_BAWINSIZE_SHIFT 2
+#define MTW_TX_NSEQ (1 << 1)
+#define MTW_TX_ACK (1 << 0)
+
+ uint8_t wcid; /* Wireless Client ID */
+ uint16_t len;
+#define MTW_TX_PID_SHIFT 12
+
+ uint32_t iv;
+ uint32_t eiv;
+ uint32_t reserved1;
+} __packed;
+
+/* RT2860 RX descriptor */
+struct rt2860_rxd {
+ uint32_t sdp0;
+ uint16_t sdl1; /* unused */
+ uint16_t sdl0;
+#define MTW_RX_DDONE (1 << 15)
+#define MTW_RX_LS0 (1 << 14)
+
+ uint32_t sdp1; /* unused */
+ uint32_t flags;
+#define MTW_RX_DEC (1 << 16)
+#define MTW_RX_AMPDU (1 << 15)
+#define MTW_RX_L2PAD (1 << 14)
+#define MTW_RX_RSSI (1 << 13)
+#define MTW_RX_HTC (1 << 12)
+#define MTW_RX_AMSDU (1 << 11)
+#define MTW_RX_MICERR (1 << 10)
+#define MTW_RX_ICVERR (1 << 9)
+#define MTW_RX_CRCERR (1 << 8)
+#define MTW_RX_MYBSS (1 << 7)
+#define MTW_RX_BC (1 << 6)
+#define MTW_RX_MC (1 << 5)
+#define MTW_RX_UC2ME (1 << 4)
+#define MTW_RX_FRAG (1 << 3)
+#define MTW_RX_NULL (1 << 2)
+#define MTW_RX_DATA (1 << 1)
+#define MTW_RX_BA (1 << 0)
+} __packed;
+
+/* RX descriptor */
+struct mtw_rxd {
+ uint16_t len;
+#define MTW_RXD_SELF_GEN (1 << 15)
+#define MTW_RXD_LEN 0x3fff
+
+ uint16_t flags;
+} __packed;
+
+/* RX Wireless Information */
+struct mtw_rxwi {
+ uint32_t flags;
+ uint8_t wcid;
+ uint8_t keyidx;
+#define MTW_RX_UDF_SHIFT 5
+#define MTW_RX_BSS_IDX_SHIFT 2
+
+ uint16_t len;
+#define MTW_RX_TID_SHIFT 12
+
+ uint16_t seq;
+ uint16_t phy;
+ uint8_t rssi[4];
+ uint32_t reserved1;
+ uint32_t reserved2;
+ uint32_t reserved3;
+} __packed __aligned(4);
+
+/* MCU Command */
+struct mtw_mcu_cmd_8 {
+ uint32_t func;
+ uint32_t val;
+} __packed __aligned(4);
+
+struct mtw_mcu_cmd_16 {
+ uint32_t r1;
+ uint32_t r2;
+ uint32_t r3;
+ uint32_t r4;
+} __packed __aligned(4);
+
+#define MTW_DMA_PAD 4
+
+/* first DMA segment contains TXWI + 802.11 header + 32-bit padding */
+#define MTW_TXWI_DMASZ \
+ (sizeof (struct mtw_txwi) + \
+ sizeof (struct ieee80211_htframe) + \
+ sizeof (uint16_t))
+
+#define MT7601_RF_7601 0x7601 /* 1T1R */
+#define MT7610_RF_7610 0x7610 /* 1T1R */
+#define MT7612_RF_7612 0x7612 /* 2T2R */
+
+#define MTW_CONFIG_NO 1
+
+/* USB vendor request */
+#define MTW_RESET 0x1
+#define MTW_WRITE_2 0x2
+#define MTW_WRITE_REGION_1 0x6
+#define MTW_READ_REGION_1 0x7
+#define MTW_EEPROM_READ 0x9
+#define MTW_WRITE_CFG 0x46
+#define MTW_READ_CFG 0x47
+
+/* eFUSE ROM */
+#define MTW_EEPROM_CHIPID 0x00
+#define MTW_EEPROM_VERSION 0x01
+#define MTW_EEPROM_MAC01 0x02
+#define MTW_EEPROM_MAC23 0x03
+#define MTW_EEPROM_MAC45 0x04
+#define MTW_EEPROM_ANTENNA 0x1a
+#define MTW_EEPROM_CONFIG 0x1b
+#define MTW_EEPROM_COUNTRY 0x1c
+#define MTW_EEPROM_FREQ_OFFSET 0x1d
+#define MTW_EEPROM_LED1 0x1e
+#define MTW_EEPROM_LED2 0x1f
+#define MTW_EEPROM_LED3 0x20
+#define MTW_EEPROM_LNA 0x22
+#define MTW_EEPROM_RSSI1_2GHZ 0x23
+#define MTW_EEPROM_RSSI2_2GHZ 0x24
+#define MTW_EEPROM_RSSI1_5GHZ 0x25
+#define MTW_EEPROM_RSSI2_5GHZ 0x26
+#define MTW_EEPROM_DELTAPWR 0x28
+#define MTW_EEPROM_PWR2GHZ_BASE1 0x29
+#define MTW_EEPROM_PWR2GHZ_BASE2 0x30
+#define MTW_EEPROM_TSSI1_2GHZ 0x37
+#define MTW_EEPROM_TSSI2_2GHZ 0x38
+#define MTW_EEPROM_TSSI3_2GHZ 0x39
+#define MTW_EEPROM_TSSI4_2GHZ 0x3a
+#define MTW_EEPROM_TSSI5_2GHZ 0x3b
+#define MTW_EEPROM_PWR5GHZ_BASE1 0x3c
+#define MTW_NIC_CONF2 0x42
+#define MTW_EEPROM_PWR5GHZ_BASE2 0x53
+#define MTW_TXPWR_EXT_PA_5G 0x54
+#define MTW_TXPWR_START_2G_0 0x56
+#define MTW_TXPWR_START_2G_1 0x5c
+#define MTW_TXPWR_START_5G_0 0x62
+#define RT2860_EEPROM_TSSI1_5GHZ 0x6a
+#define RT2860_EEPROM_TSSI2_5GHZ 0x6b
+#define RT2860_EEPROM_TSSI3_5GHZ 0x6c
+#define RT2860_EEPROM_TSSI4_5GHZ 0x6d
+#define RT2860_EEPROM_TSSI5_5GHZ 0x6e
+#define MTW_TX_TSSI_SLOPE 0x6e
+#define MTW_EEPROM_RPWR 0x6f
+
+/* led related */
+#define CMD_LED_MODE 0x10
+#define CMD_MODE_ON 0x0
+static const struct rt2860_rate {
+ uint8_t rate;
+ uint8_t mcs;
+ enum ieee80211_phytype phy;
+ uint8_t ctl_ridx;
+ uint16_t sp_ack_dur;
+ uint16_t lp_ack_dur;
+} rt2860_rates[] = {
+ { 2, 0, IEEE80211_T_DS, 0, 314, 314 },
+ { 4, 1, IEEE80211_T_DS, 1, 258, 162 },
+ { 11, 2, IEEE80211_T_DS, 2, 223, 127 },
+ { 22, 3, IEEE80211_T_DS, 3, 213, 117 },
+ { 12, 0, IEEE80211_T_OFDM, 4, 60, 60 },
+ { 18, 1, IEEE80211_T_OFDM, 4, 52, 52 },
+ { 24, 2, IEEE80211_T_OFDM, 6, 48, 48 },
+ { 36, 3, IEEE80211_T_OFDM, 6, 44, 44 },
+ { 48, 4, IEEE80211_T_OFDM, 8, 44, 44 },
+ { 72, 5, IEEE80211_T_OFDM, 8, 40, 40 },
+ { 96, 6, IEEE80211_T_OFDM, 8, 40, 40 },
+ { 108, 7, IEEE80211_T_OFDM, 8, 40, 40 },
+ { 0x80, 0, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x81, 1, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x82, 2, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x83, 3, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x84, 4, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x85, 5, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x86, 6, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x87, 7, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x88, 8, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x89, 9, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x8a, 10, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x8b, 11, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x8c, 12, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x8d, 13, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x8e, 14, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x8f, 15, IEEE80211_T_HT, 4, 60, 60 },
+
+ /* MCS - 3 streams */
+ { 0x90, 16, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x91, 17, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x92, 18, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x93, 19, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x94, 20, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x95, 21, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x96, 22, IEEE80211_T_HT, 4, 60, 60 },
+ { 0x97, 23, IEEE80211_T_HT, 4, 60, 60 }
+};
+/* These are indexes into the above rt2860_rates[] array */
+#define MTW_RIDX_CCK1 0
+#define MTW_RIDX_CCK11 3
+#define MTW_RIDX_OFDM6 4
+#define MTW_RIDX_MCS0 12
+#define MTW_RIDX_MAX 36
+
+#define MT7601_RF_CHAN \
+ { 1, 0x99, 0x99, 0x09, 0x50 }, \
+ { 2, 0x46, 0x44, 0x0a, 0x50 }, \
+ { 3, 0xec, 0xee, 0x0a, 0x50 }, \
+ { 4, 0x99, 0x99, 0x0b, 0x50 }, \
+ { 5, 0x46, 0x44, 0x08, 0x51 }, \
+ { 6, 0xec, 0xee, 0x08, 0x51 }, \
+ { 7, 0x99, 0x99, 0x09, 0x51 }, \
+ { 8, 0x46, 0x44, 0x0a, 0x51 }, \
+ { 9, 0xec, 0xee, 0x0a, 0x51 }, \
+ { 10, 0x99, 0x99, 0x0b, 0x51 }, \
+ { 11, 0x46, 0x44, 0x08, 0x52 }, \
+ { 12, 0xec, 0xee, 0x08, 0x52 }, \
+ { 13, 0x99, 0x99, 0x09, 0x52 }, \
+ { 14, 0x33, 0x33, 0x0b, 0x52 }
+
+/*
+ * Default values for MAC registers.
+ */
+#define MT7601_DEF_MAC \
+ { MTW_BCN_OFFSET0, 0x18100800 }, \
+ { MTW_BCN_OFFSET1, 0x38302820 }, \
+ { MTW_BCN_OFFSET2, 0x58504840 }, \
+ { MTW_BCN_OFFSET3, 0x78706860 }, \
+ { MTW_MAC_SYS_CTRL, 0x0000000c }, \
+ { MTW_MAX_LEN_CFG, 0x000a3fff }, \
+ { MTW_AMPDU_MAX_LEN_20M1S, 0x77777777 }, \
+ { MTW_AMPDU_MAX_LEN_20M2S, 0x77777777 }, \
+ { MTW_AMPDU_MAX_LEN_40M1S, 0x77777777 }, \
+ { MTW_AMPDU_MAX_LEN_40M2S, 0x77777777 }, \
+ { MTW_XIFS_TIME_CFG, 0x33a41010 }, \
+ { MTW_BKOFF_SLOT_CFG, 0x00000209 }, \
+ { MTW_TBTT_SYNC_CFG, 0x00422010 }, \
+ { MTW_INT_TIMER_CFG, 0x00000000 }, \
+ { MTW_PWR_PIN_CFG, 0x00000000 }, \
+ { MTW_AUTO_WAKEUP_CFG, 0x00000014 }, \
+ { MTW_EDCA_AC_CFG(0), 0x000a4360 }, \
+ { MTW_EDCA_AC_CFG(1), 0x000a4700 }, \
+ { MTW_EDCA_AC_CFG(2), 0x00043338 }, \
+ { MTW_EDCA_AC_CFG(3), 0x0003222f }, \
+ { MTW_TX_PIN_CFG, 0x33150f0f }, \
+ { MTW_TX_BAND_CFG, 0x00000005 }, \
+ { MTW_TX_SW_CFG0, 0x00000402 }, \
+ { MTW_TX_SW_CFG1, 0x00000000 }, \
+ { MTW_TX_SW_CFG2, 0x00000000 }, \
+ { MTW_TXOP_CTRL_CFG, 0x0000583f }, \
+ { MTW_TX_RTS_CFG, 0x01100020 }, \
+ { MTW_TX_TIMEOUT_CFG, 0x000a2090 }, \
+ { MTW_TX_RETRY_CFG, 0x47d01f0f }, \
+ { MTW_TX_LINK_CFG, 0x007f1820 }, \
+ { MTW_HT_FBK_CFG1, 0xedcba980 }, \
+ { MTW_CCK_PROT_CFG, 0x07f40000 }, \
+ { MTW_OFDM_PROT_CFG, 0x07f60000 }, \
+ { MTW_MM20_PROT_CFG, 0x01750003 }, \
+ { MTW_MM40_PROT_CFG, 0x03f50003 }, \
+ { MTW_GF20_PROT_CFG, 0x01750003 }, \
+ { MTW_GF40_PROT_CFG, 0x03f50003 }, \
+ { MTW_EXP_ACK_TIME, 0x002400ca }, \
+ { MTW_TX_PWR_CFG5, 0x00000000 }, \
+ { MTW_TX_PWR_CFG6, 0x01010101 }, \
+ { MTW_TX0_RF_GAIN_CORR, 0x003b0005 }, \
+ { MTW_TX1_RF_GAIN_CORR, 0x00000000 }, \
+ { MTW_TX0_RF_GAIN_ATTEN, 0x00006969 }, \
+ { MTW_TX_ALC_CFG3, 0x6c6c6c6c }, \
+ { MTW_TX_ALC_CFG0, 0x2f2f0005 }, \
+ { MTW_TX_ALC_CFG4, 0x00000400 }, \
+ { MTW_TX_ALC_VGA3, 0x00060006 }, \
+ { MTW_RX_FILTR_CFG, 0x00015f97 }, \
+ { MTW_AUTO_RSP_CFG, 0x00000003 }, \
+ { MTW_LEGACY_BASIC_RATE, 0x0000015f }, \
+ { MTW_HT_BASIC_RATE, 0x00008003 }, \
+ { MTW_RX_MAX_PCNT, 0x0000009f }, \
+ { MTW_WPDMA_GLO_CFG, 0x00000030 }, \
+ { MTW_WMM_AIFSN_CFG, 0x00002273 }, \
+ { MTW_WMM_CWMIN_CFG, 0x00002344 }, \
+ { MTW_WMM_CWMAX_CFG, 0x000034aa }, \
+ { MTW_TSO_CTRL, 0x00000000 }, \
+ { MTW_SYS_CTRL, 0x00080c00 }, \
+ { MTW_FCE_PSE_CTRL, 0x00000001 }, \
+ { MTW_AUX_CLK_CFG, 0x00000000 }, \
+ { MTW_BBP_PA_MODE_CFG0, 0x010055ff }, \
+ { MTW_BBP_PA_MODE_CFG1, 0x00550055 }, \
+ { MTW_RF_PA_MODE_CFG0, 0x010055ff }, \
+ { MTW_RF_PA_MODE_CFG1, 0x00550055 }, \
+ { 0x0a38, 0x00000000 }, \
+ { MTW_BBP_CSR, 0x00000000 }, \
+ { MTW_PBF_CFG, 0x7f723c1f }
+
+/*
+ * Default values for Baseband registers
+ */
+#define MT7601_DEF_BBP \
+ { 1, 0x04 }, \
+ { 4, 0x40 }, \
+ { 20, 0x06 }, \
+ { 31, 0x08 }, \
+ { 178, 0xff }, \
+ { 66, 0x14 }, \
+ { 68, 0x8b }, \
+ { 69, 0x12 }, \
+ { 70, 0x09 }, \
+ { 73, 0x11 }, \
+ { 75, 0x60 }, \
+ { 76, 0x44 }, \
+ { 84, 0x9a }, \
+ { 86, 0x38 }, \
+ { 91, 0x07 }, \
+ { 92, 0x02 }, \
+ { 99, 0x50 }, \
+ { 101, 0x00 }, \
+ { 103, 0xc0 }, \
+ { 104, 0x92 }, \
+ { 105, 0x3c }, \
+ { 106, 0x03 }, \
+ { 128, 0x12 }, \
+ { 142, 0x04 }, \
+ { 143, 0x37 }, \
+ { 142, 0x03 }, \
+ { 143, 0x99 }, \
+ { 160, 0xeb }, \
+ { 161, 0xc4 }, \
+ { 162, 0x77 }, \
+ { 163, 0xf9 }, \
+ { 164, 0x88 }, \
+ { 165, 0x80 }, \
+ { 166, 0xff }, \
+ { 167, 0xe4 }, \
+ { 195, 0x00 }, \
+ { 196, 0x00 }, \
+ { 195, 0x01 }, \
+ { 196, 0x04 }, \
+ { 195, 0x02 }, \
+ { 196, 0x20 }, \
+ { 195, 0x03 }, \
+ { 196, 0x0a }, \
+ { 195, 0x06 }, \
+ { 196, 0x16 }, \
+ { 195, 0x07 }, \
+ { 196, 0x05 }, \
+ { 195, 0x08 }, \
+ { 196, 0x37 }, \
+ { 195, 0x0a }, \
+ { 196, 0x15 }, \
+ { 195, 0x0b }, \
+ { 196, 0x17 }, \
+ { 195, 0x0c }, \
+ { 196, 0x06 }, \
+ { 195, 0x0d }, \
+ { 196, 0x09 }, \
+ { 195, 0x0e }, \
+ { 196, 0x05 }, \
+ { 195, 0x0f }, \
+ { 196, 0x09 }, \
+ { 195, 0x10 }, \
+ { 196, 0x20 }, \
+ { 195, 0x20 }, \
+ { 196, 0x17 }, \
+ { 195, 0x21 }, \
+ { 196, 0x06 }, \
+ { 195, 0x22 }, \
+ { 196, 0x09 }, \
+ { 195, 0x23 }, \
+ { 196, 0x17 }, \
+ { 195, 0x24 }, \
+ { 196, 0x06 }, \
+ { 195, 0x25 }, \
+ { 196, 0x09 }, \
+ { 195, 0x26 }, \
+ { 196, 0x17 }, \
+ { 195, 0x27 }, \
+ { 196, 0x06 }, \
+ { 195, 0x28 }, \
+ { 196, 0x09 }, \
+ { 195, 0x29 }, \
+ { 196, 0x05 }, \
+ { 195, 0x2a }, \
+ { 196, 0x09 }, \
+ { 195, 0x80 }, \
+ { 196, 0x8b }, \
+ { 195, 0x81 }, \
+ { 196, 0x12 }, \
+ { 195, 0x82 }, \
+ { 196, 0x09 }, \
+ { 195, 0x83 }, \
+ { 196, 0x17 }, \
+ { 195, 0x84 }, \
+ { 196, 0x11 }, \
+ { 195, 0x85 }, \
+ { 196, 0x00 }, \
+ { 195, 0x86 }, \
+ { 196, 0x00 }, \
+ { 195, 0x87 }, \
+ { 196, 0x18 }, \
+ { 195, 0x88 }, \
+ { 196, 0x60 }, \
+ { 195, 0x89 }, \
+ { 196, 0x44 }, \
+ { 195, 0x8a }, \
+ { 196, 0x8b }, \
+ { 195, 0x8b }, \
+ { 196, 0x8b }, \
+ { 195, 0x8c }, \
+ { 196, 0x8b }, \
+ { 195, 0x8d }, \
+ { 196, 0x8b }, \
+ { 195, 0x8e }, \
+ { 196, 0x09 }, \
+ { 195, 0x8f }, \
+ { 196, 0x09 }, \
+ { 195, 0x90 }, \
+ { 196, 0x09 }, \
+ { 195, 0x91 }, \
+ { 196, 0x09 }, \
+ { 195, 0x92 }, \
+ { 196, 0x11 }, \
+ { 195, 0x93 }, \
+ { 196, 0x11 }, \
+ { 195, 0x94 }, \
+ { 196, 0x11 }, \
+ { 195, 0x95 }, \
+ { 196, 0x11 }, \
+ { 47, 0x80 }, \
+ { 60, 0x80 }, \
+ { 150, 0xd2 }, \
+ { 151, 0x32 }, \
+ { 152, 0x23 }, \
+ { 153, 0x41 }, \
+ { 154, 0x00 }, \
+ { 155, 0x4f }, \
+ { 253, 0x7e }, \
+ { 195, 0x30 }, \
+ { 196, 0x32 }, \
+ { 195, 0x31 }, \
+ { 196, 0x23 }, \
+ { 195, 0x32 }, \
+ { 196, 0x45 }, \
+ { 195, 0x35 }, \
+ { 196, 0x4a }, \
+ { 195, 0x36 }, \
+ { 196, 0x5a }, \
+ { 195, 0x37 }, \
+ { 196, 0x5a }
+
+/*
+ * Default values for RF registers
+ */
+#define MT7601_BANK0_RF \
+ { 0, 0x02 }, \
+ { 1, 0x01 }, \
+ { 2, 0x11 }, \
+ { 3, 0xff }, \
+ { 4, 0x0a }, \
+ { 5, 0x20 }, \
+ { 6, 0x00 }, \
+ { 7, 0x00 }, \
+ { 8, 0x00 }, \
+ { 9, 0x00 }, \
+ { 10, 0x00 }, \
+ { 11, 0x21 }, \
+ { 13, 0x00 }, \
+ { 14, 0x7c }, \
+ { 15, 0x22 }, \
+ { 16, 0x80 }, \
+ { 17, 0x99 }, \
+ { 18, 0x99 }, \
+ { 19, 0x09 }, \
+ { 20, 0x50 }, \
+ { 21, 0xb0 }, \
+ { 22, 0x00 }, \
+ { 23, 0xc5 }, \
+ { 24, 0xfc }, \
+ { 25, 0x40 }, \
+ { 26, 0x4d }, \
+ { 27, 0x02 }, \
+ { 28, 0x72 }, \
+ { 29, 0x01 }, \
+ { 30, 0x00 }, \
+ { 31, 0x00 }, \
+ { 32, 0x00 }, \
+ { 33, 0x00 }, \
+ { 34, 0x23 }, \
+ { 35, 0x01 }, \
+ { 36, 0x00 }, \
+ { 37, 0x00 }, \
+ { 38, 0x00 }, \
+ { 39, 0x20 }, \
+ { 40, 0x00 }, \
+ { 41, 0xd0 }, \
+ { 42, 0x1b }, \
+ { 43, 0x02 }, \
+ { 44, 0x00 }
+
+#define MT7601_BANK4_RF \
+ { 0, 0x01 }, \
+ { 1, 0x00 }, \
+ { 2, 0x00 }, \
+ { 3, 0x00 }, \
+ { 4, 0x00 }, \
+ { 5, 0x08 }, \
+ { 6, 0x00 }, \
+ { 7, 0x5b }, \
+ { 8, 0x52 }, \
+ { 9, 0xb6 }, \
+ { 10, 0x57 }, \
+ { 11, 0x33 }, \
+ { 12, 0x22 }, \
+ { 13, 0x3d }, \
+ { 14, 0x3e }, \
+ { 15, 0x13 }, \
+ { 16, 0x22 }, \
+ { 17, 0x23 }, \
+ { 18, 0x02 }, \
+ { 19, 0xa4 }, \
+ { 20, 0x01 }, \
+ { 21, 0x12 }, \
+ { 22, 0x80 }, \
+ { 23, 0xb3 }, \
+ { 24, 0x00 }, \
+ { 25, 0x00 }, \
+ { 26, 0x00 }, \
+ { 27, 0x00 }, \
+ { 28, 0x18 }, \
+ { 29, 0xee }, \
+ { 30, 0x6b }, \
+ { 31, 0x31 }, \
+ { 32, 0x5d }, \
+ { 33, 0x00 }, \
+ { 34, 0x96 }, \
+ { 35, 0x55 }, \
+ { 36, 0x08 }, \
+ { 37, 0xbb }, \
+ { 38, 0xb3 }, \
+ { 39, 0xb3 }, \
+ { 40, 0x03 }, \
+ { 41, 0x00 }, \
+ { 42, 0x00 }, \
+ { 43, 0xc5 }, \
+ { 44, 0xc5 }, \
+ { 45, 0xc5 }, \
+ { 46, 0x07 }, \
+ { 47, 0xa8 }, \
+ { 48, 0xef }, \
+ { 49, 0x1a }, \
+ { 54, 0x07 }, \
+ { 55, 0xa7 }, \
+ { 56, 0xcc }, \
+ { 57, 0x14 }, \
+ { 58, 0x07 }, \
+ { 59, 0xa8 }, \
+ { 60, 0xd7 }, \
+ { 61, 0x10 }, \
+ { 62, 0x1c }, \
+ { 63, 0x00 }
+
+#define MT7601_BANK5_RF \
+ { 0, 0x47 }, \
+ { 1, 0x00 }, \
+ { 2, 0x00 }, \
+ { 3, 0x08 }, \
+ { 4, 0x04 }, \
+ { 5, 0x20 }, \
+ { 6, 0x3a }, \
+ { 7, 0x3a }, \
+ { 8, 0x00 }, \
+ { 9, 0x00 }, \
+ { 10, 0x10 }, \
+ { 11, 0x10 }, \
+ { 12, 0x10 }, \
+ { 13, 0x10 }, \
+ { 14, 0x10 }, \
+ { 15, 0x20 }, \
+ { 16, 0x22 }, \
+ { 17, 0x7c }, \
+ { 18, 0x00 }, \
+ { 19, 0x00 }, \
+ { 20, 0x00 }, \
+ { 21, 0xf1 }, \
+ { 22, 0x11 }, \
+ { 23, 0x02 }, \
+ { 24, 0x41 }, \
+ { 25, 0x20 }, \
+ { 26, 0x00 }, \
+ { 27, 0xd7 }, \
+ { 28, 0xa2 }, \
+ { 29, 0x20 }, \
+ { 30, 0x49 }, \
+ { 31, 0x20 }, \
+ { 32, 0x04 }, \
+ { 33, 0xf1 }, \
+ { 34, 0xa1 }, \
+ { 35, 0x01 }, \
+ { 41, 0x00 }, \
+ { 42, 0x00 }, \
+ { 43, 0x00 }, \
+ { 44, 0x00 }, \
+ { 45, 0x00 }, \
+ { 46, 0x00 }, \
+ { 47, 0x00 }, \
+ { 48, 0x00 }, \
+ { 49, 0x00 }, \
+ { 50, 0x00 }, \
+ { 51, 0x00 }, \
+ { 52, 0x00 }, \
+ { 53, 0x00 }, \
+ { 54, 0x00 }, \
+ { 55, 0x00 }, \
+ { 56, 0x00 }, \
+ { 57, 0x00 }, \
+ { 58, 0x31 }, \
+ { 59, 0x31 }, \
+ { 60, 0x0a }, \
+ { 61, 0x02 }, \
+ { 62, 0x00 }, \
+ { 63, 0x00 }
+union mtw_stats {
+ uint32_t raw;
+ struct {
+ uint16_t fail;
+ uint16_t pad;
+ } error;
+ struct {
+ uint16_t success;
+ uint16_t retry;
+ } tx;
+} __aligned(4);
diff --git a/sys/dev/usb/wlan/if_mtwvar.h b/sys/dev/usb/wlan/if_mtwvar.h
new file mode 100644
index 000000000000..3cf4c4f9c94e
--- /dev/null
+++ b/sys/dev/usb/wlan/if_mtwvar.h
@@ -0,0 +1,387 @@
+/* $OpenBSD: if_mtwvar.h,v 1.1 2021/12/20 13:59:02 hastings Exp $ */
+/*
+ * Copyright (c) 2008,2009 Damien Bergamini <damien.bergamini@free.fr>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#define MTW_MAX_RXSZ \
+ 4096
+#if 0
+ (sizeof (uint32_t) + \
+ sizeof (struct mtw_rxwi) + \
+ sizeof (uint16_t) + \
+ MCLBYTES + \
+ sizeof (struct mtw_rxd))
+#endif
+
+#define MTW_TX_TIMEOUT 5000 /* ms */
+#define MTW_VAP_MAX 8
+#define MTW_RX_RING_COUNT 1
+#define MTW_TX_RING_COUNT 32
+
+#define MTW_RXQ_COUNT 2
+#define MTW_TXQ_COUNT 6
+
+#define MTW_WCID_MAX 64
+#define MTW_AID2WCID(aid) (1 + ((aid) & 0x7))
+
+struct mtw_rx_radiotap_header {
+ struct ieee80211_radiotap_header wr_ihdr;
+ uint64_t wr_tsf;
+ uint8_t wr_flags;
+ uint8_t wr_rate;
+ uint16_t wr_chan_freq;
+ uint16_t wr_chan_flags;
+ uint8_t wr_dbm_antsignal;
+ uint8_t wr_antenna;
+ uint8_t wr_antsignal;
+} __packed;
+#define MTW_RATECTL_OFF 0
+#define MTW_RX_RADIOTAP_PRESENT \
+ (1 << IEEE80211_RADIOTAP_FLAGS | \
+ 1 << IEEE80211_RADIOTAP_RATE | \
+ 1 << IEEE80211_RADIOTAP_CHANNEL | \
+ 1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL | \
+ 1 << IEEE80211_RADIOTAP_ANTENNA | \
+ 1 << IEEE80211_RADIOTAP_DB_ANTSIGNAL)
+struct mtw_tx_radiotap_header {
+ struct ieee80211_radiotap_header wt_ihdr;
+ uint8_t wt_flags;
+ uint8_t wt_rate;
+ uint16_t wt_chan_freq;
+ uint16_t wt_chan_flags;
+ uint8_t wt_hwqueue;
+} __packed;
+
+#define MTW_TX_RADIOTAP_PRESENT \
+ (1 << IEEE80211_RADIOTAP_FLAGS | \
+ 1 << IEEE80211_RADIOTAP_RATE | \
+ 1 << IEEE80211_RADIOTAP_CHANNEL)
+
+struct mtw_softc;
+
+struct mtw_fw_data {
+ uint16_t len;
+ uint16_t flags;
+
+ uint8_t *buf;
+ uint32_t buflen;
+
+};
+struct mtw_tx_desc {
+ uint32_t flags;
+#define RT2573_TX_BURST (1 << 0)
+#define RT2573_TX_VALID (1 << 1)
+#define RT2573_TX_MORE_FRAG (1 << 2)
+#define RT2573_TX_NEED_ACK (1 << 3)
+#define RT2573_TX_TIMESTAMP (1 << 4)
+#define RT2573_TX_OFDM (1 << 5)
+#define RT2573_TX_IFS_SIFS (1 << 6)
+#define RT2573_TX_LONG_RETRY (1 << 7)
+#define RT2573_TX_TKIPMIC (1 << 8)
+#define RT2573_TX_KEY_PAIR (1 << 9)
+#define RT2573_TX_KEY_ID(id) (((id) & 0x3f) << 10)
+#define RT2573_TX_CIP_MODE(m) ((m) << 29)
+
+ uint16_t wme;
+#define RT2573_QID(v) (v)
+#define RT2573_AIFSN(v) ((v) << 4)
+#define RT2573_LOGCWMIN(v) ((v) << 8)
+#define RT2573_LOGCWMAX(v) ((v) << 12)
+
+ uint8_t hdrlen;
+ uint8_t xflags;
+#define RT2573_TX_HWSEQ (1 << 4)
+
+ uint8_t plcp_signal;
+ uint8_t plcp_service;
+#define RT2573_PLCP_LENGEXT 0x80
+
+ uint8_t plcp_length_lo;
+ uint8_t plcp_length_hi;
+
+ uint32_t iv;
+ uint32_t eiv;
+
+ uint8_t offset;
+ uint8_t qid;
+ uint8_t txpower;
+#define RT2573_DEFAULT_TXPOWER 0
+
+ uint8_t reserved;
+} __packed;
+
+struct mtw_tx_data {
+ STAILQ_ENTRY(mtw_tx_data) next;
+ struct mbuf *m;
+ struct mtw_softc *sc;
+ struct usbd_xfer *xfer;
+ uint8_t qid;
+ uint8_t ridx;
+ uint32_t buflen;
+ //struct mtw_tx_desc desc;
+ struct ieee80211_node *ni;
+ //struct mtw_txd desc;
+ uint8_t desc[sizeof(struct mtw_txd)+sizeof(struct mtw_txwi)];
+
+};
+
+struct mtw_rx_data {
+ STAILQ_ENTRY(mtw_rx_data) next;
+ struct mtw_softc *sc;
+ struct usbd_xfer *xfer;
+
+ uint8_t *buf;
+};
+
+struct mtw_tx_ring {
+ struct mtw_tx_data data[MTW_TX_RING_COUNT];
+ struct usbd_pipe *pipeh;
+ int cur;
+ int queued;
+ uint8_t pipe_no;
+};
+
+struct mtw_rx_ring {
+ struct mtw_rx_data data[MTW_RX_RING_COUNT];
+ struct usbd_pipe *pipeh;
+ uint8_t pipe_no;
+};
+
+struct mtw_vap {
+ struct ieee80211vap vap;
+ struct mbuf *beacon_mbuf;
+
+ int (*newstate)(struct ieee80211vap *,
+ enum ieee80211_state, int);
+ void (*recv_mgmt)(struct ieee80211_node *,
+ struct mbuf *, int,
+ const struct ieee80211_rx_stats *,
+ int, int);
+
+ uint8_t rvp_id;
+};
+#define MTW_VAP(vap) ((struct mtw_vap *)(vap))
+struct mtw_host_cmd {
+ void (*cb)(struct mtw_softc *, void *);
+ uint8_t data[256];
+};
+
+struct mtw_cmd_newstate {
+ enum ieee80211_state state;
+ int arg;
+};
+
+struct mtw_cmd_key {
+ struct ieee80211_key key;
+ struct ieee80211_node *ni;
+};
+
+#define MTW_HOST_CMD_RING_COUNT 32
+struct mtw_host_cmd_ring {
+ struct mtw_host_cmd cmd[MTW_HOST_CMD_RING_COUNT];
+ int cur;
+ int next;
+ int queued;
+};
+
+
+
+struct mtw_node {
+ struct ieee80211_node ni;
+ uint8_t mgt_ridx;
+ uint8_t amrr_ridx;
+ uint8_t fix_ridx;
+
+};
+#define MTW_NODE(ni) ((struct mtw_node *)(ni))
+
+struct mtw_mcu_tx {
+ struct mtw_softc *sc;
+ struct usbd_xfer *xfer;
+ struct usbd_pipe *pipeh;
+ uint8_t pipe_no;
+ uint8_t *buf;
+ int8_t seq;
+};
+
+#define MTW_MCU_IVB_LEN 0x40
+struct mtw_ucode_hdr {
+ uint32_t ilm_len;
+ uint32_t dlm_len;
+ uint16_t build_ver;
+ uint16_t fw_ver;
+ uint8_t pad[4];
+ char build_time[16];
+} __packed;
+
+struct mtw_ucode {
+ struct mtw_ucode_hdr hdr;
+ uint8_t ivb[MTW_MCU_IVB_LEN];
+ uint8_t data[];
+} __packed;
+
+STAILQ_HEAD(mtw_tx_data_head, mtw_tx_data);
+struct mtw_endpoint_queue {
+ struct mtw_tx_data tx_data[MTW_TX_RING_COUNT];
+ struct mtw_tx_data_head tx_qh;
+ struct mtw_tx_data_head tx_fh;
+ uint32_t tx_nfree;
+};
+
+struct mtw_cmdq {
+ void *arg0;
+ void *arg1;
+ void (*func)(void *);
+ struct ieee80211_key *k;
+ struct ieee80211_key key;
+ uint8_t mac[IEEE80211_ADDR_LEN];
+ uint8_t wcid;
+};
+enum {
+ MTW_BULK_RX, /* = WME_AC_BK */
+ //MTW_BULK_RX1,
+ MTW_BULK_TX_BE, /* = WME_AC_BE */
+ MTW_BULK_TX_VI, /* = WME_AC_VI */
+ MTW_BULK_TX_VO, /* = WME_AC_VO */
+ MTW_BULK_TX_HCCA,
+ MTW_BULK_TX_PRIO,
+ MTW_BULK_TX_BK,
+ MTW_BULK_FW_CMD,
+ MTW_BULK_RAW_TX,
+ MTW_N_XFER,
+};
+#define MTW_TXCNT 0
+#define MTW_SUCCESS 1
+#define MTW_RETRY 2
+#define MTW_EP_QUEUES 6
+#define MTW_FLAG_FWLOAD_NEEDED 0x01
+#define MTW_RUNNING 0x02
+struct mtw_softc {
+ device_t sc_dev;
+ int sc_idx;
+ struct ieee80211com sc_ic;
+ struct ieee80211_ratectl_tx_stats sc_txs;
+ int (*sc_newstate)(struct ieee80211com *,
+ enum ieee80211_state, int);
+ int (*sc_srom_read)(struct mtw_softc *,
+ uint16_t, uint16_t *);
+#define MTW_CMDQ_MAX 16
+#define MTW_CMDQ_MASQ (MTW_CMDQ_MAX - 1)
+#define MTW_CMDQ_ABORT 0
+#define MTW_CMDQ_GO 1
+ struct mbuf *rx_m;
+ uint8_t runbmap;
+ uint8_t running;
+ uint8_t ap_running;
+ uint8_t adhoc_running;
+ uint8_t sta_running;
+ uint8_t fwloading;
+ uint16_t wcid_stats[MTW_WCID_MAX + 1][3];
+ struct mbufq sc_snd;
+ uint8_t cmdq_exec;
+ uint8_t fifo_cnt;
+ uint32_t sc_flags;
+ uint8_t rvp_cnt;
+ uint8_t cmdq_run;
+ uint8_t rvp_bmap;
+ struct mtw_cmdq cmdq[MTW_CMDQ_MAX];
+ struct task cmdq_task;
+ uint8_t cmdq_mtw;
+ uint8_t cmdq_key_set;
+ struct usb_device *sc_udev;
+ struct usb_interface *sc_iface;
+ uint32_t cmdq_store;
+ struct mtx sc_mtx;
+ uint32_t sc_mcu_xferlen;
+ struct usb_xfer *sc_xfer[MTW_N_XFER];
+ uint16_t asic_ver;
+ uint16_t asic_rev;
+ uint16_t mac_ver;
+ uint16_t mac_rev;
+ uint16_t rf_rev;
+ int ridx;
+ int amrr_ridx;
+ uint8_t freq;
+ uint8_t ntxchains;
+ uint8_t nrxchains;
+
+ struct mtw_txd_fw *txd_fw[4];
+ int sc_sent;
+ uint8_t sc_ivb_1[MTW_MCU_IVB_LEN];
+ struct mtw_endpoint_queue sc_epq[MTW_BULK_RX];
+ uint8_t rfswitch;
+ uint8_t ext_2ghz_lna;
+ uint8_t ext_5ghz_lna;
+ uint8_t calib_2ghz;
+ uint8_t calib_5ghz;
+ uint8_t txmixgain_2ghz;
+ uint8_t txmixgain_5ghz;
+ int8_t txpow1[54];
+ int8_t txpow2[54];
+ int8_t txpow3[54];
+ int8_t rssi_2ghz[3];
+ int8_t rssi_5ghz[3];
+ uint8_t lna[4];
+
+ uint8_t leds;
+ uint16_t led[3];
+ uint32_t txpow20mhz[5];
+ uint32_t txpow40mhz_2ghz[5];
+ uint32_t txpow40mhz_5ghz[5];
+
+ int8_t bbp_temp;
+ uint8_t rf_freq_offset;
+ uint32_t rf_pa_mode[2];
+ int sc_rf_calibrated;
+ int sc_bw_calibrated;
+ int sc_chan_group;
+
+
+
+
+ uint8_t cmd_seq;
+ uint8_t sc_detached;
+ struct mtw_tx_ring sc_mcu;
+ struct mtw_rx_ring rxq[MTW_RXQ_COUNT];
+ struct mtw_tx_ring txq[MTW_TXQ_COUNT];
+ struct task ratectl_task;
+ struct usb_callout ratectl_ch;
+ uint8_t ratectl_run;
+ //struct mtw_host_cmd_ring cmdq;
+ uint8_t qfullmsk;
+ int sc_tx_timer;
+
+ uint8_t sc_bssid[IEEE80211_ADDR_LEN];
+
+ union {
+ struct mtw_rx_radiotap_header th;
+ uint8_t pad[64];
+ } sc_rxtapu;
+#define sc_rxtap sc_rxtapu.th
+ int sc_rxtap_len;
+
+ union {
+ struct mtw_tx_radiotap_header th;
+ uint8_t pad[64];
+ uint8_t wt_hwqueue;
+
+ } sc_txtapu;
+#define sc_txtap sc_txtapu.th
+ int sc_txtap_len;
+ int sc_key_tasks;
+};
+#define MTW_LOCK(sc) mtx_lock(&(sc)->sc_mtx)
+#define MTW_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx)
+#define MTW_LOCK_ASSERT(sc, t) mtx_assert(&(sc)->sc_mtx, t)
diff --git a/sys/dev/usb/wlan/if_rsu.c b/sys/dev/usb/wlan/if_rsu.c
index e000d1fb5992..e976948f6849 100644
--- a/sys/dev/usb/wlan/if_rsu.c
+++ b/sys/dev/usb/wlan/if_rsu.c
@@ -15,7 +15,6 @@
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <sys/cdefs.h>
/*
* Driver for Realtek RTL8188SU/RTL8191SU/RTL8192SU.
*
@@ -145,6 +144,7 @@ static const STRUCT_USB_HOST_ID rsu_devs[] = {
RSU_DEV_HT(SENAO, RTL8192SU_2),
RSU_DEV_HT(SITECOMEU, WL349V1),
RSU_DEV_HT(SITECOMEU, WL353),
+ RSU_DEV_HT(SITECOMEU, RTL8188S),
RSU_DEV_HT(SWEEX2, LW154),
RSU_DEV_HT(TRENDNET, TEW646UBH),
#undef RSU_DEV_HT
@@ -371,18 +371,16 @@ rsu_update_chw(struct ieee80211com *ic)
/*
* notification from net80211 that it'd like to do A-MPDU on the given TID.
- *
- * Note: this actually hangs traffic at the present moment, so don't use it.
- * The firmware debug does indiciate it's sending and establishing a TX AMPDU
- * session, but then no traffic flows.
*/
static int
rsu_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
{
-#if 0
struct rsu_softc *sc = ni->ni_ic->ic_softc;
struct r92s_add_ba_req req;
+ RSU_DPRINTF(sc, RSU_DEBUG_AMPDU, "%s: called, tid=%d\n",
+ __func__, tap->txa_tid);
+
/* Don't enable if it's requested or running */
if (IEEE80211_AMPDU_REQUESTED(tap))
return (0);
@@ -397,23 +395,30 @@ rsu_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
return (0);
/* Send the firmware command */
- RSU_DPRINTF(sc, RSU_DEBUG_AMPDU, "%s: establishing AMPDU TX for TID %d\n",
+ RSU_DPRINTF(sc, RSU_DEBUG_AMPDU,
+ "%s: establishing AMPDU TX for TID %d\n",
__func__,
tap->txa_tid);
RSU_LOCK(sc);
- if (rsu_fw_cmd(sc, R92S_CMD_ADDBA_REQ, &req, sizeof(req)) != 1) {
+ if (rsu_fw_cmd(sc, R92S_CMD_ADDBA_REQ, &req, sizeof(req)) != 0) {
RSU_UNLOCK(sc);
+ RSU_DPRINTF(sc, RSU_DEBUG_AMPDU, "%s: AMPDU TX cmd failure\n",
+ __func__);
/* Mark failure */
- (void) ieee80211_ampdu_tx_request_active_ext(ni, tap->txa_tid, 0);
+ ieee80211_ampdu_tx_request_active_ext(ni, tap->txa_tid, 0);
+ /* Return 0, we've been driving this ourselves */
return (0);
}
RSU_UNLOCK(sc);
+ RSU_DPRINTF(sc, RSU_DEBUG_AMPDU, "%s: AMPDU TX cmd success\n",
+ __func__);
+
/* Mark success; we don't get any further notifications */
- (void) ieee80211_ampdu_tx_request_active_ext(ni, tap->txa_tid, 1);
-#endif
- /* Return 0, we're driving this ourselves */
+ ieee80211_ampdu_tx_request_active_ext(ni, tap->txa_tid, 1);
+
+ /* Return 0, we've been driving this ourselves */
return (0);
}
@@ -563,9 +568,7 @@ rsu_attach(device_t self)
/* Enable basic HT */
ic->ic_htcaps = IEEE80211_HTC_HT |
-#if 0
IEEE80211_HTC_AMPDU |
-#endif
IEEE80211_HTC_AMSDU |
IEEE80211_HTCAP_MAXAMSDU_3839 |
IEEE80211_HTCAP_SMPS_OFF;
@@ -576,6 +579,7 @@ rsu_attach(device_t self)
ic->ic_rxstream = sc->sc_nrxstream;
}
ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_OFFLOAD;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
rsu_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1500,7 +1504,8 @@ rsu_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
ni = ieee80211_ref_node(vap->iv_bss);
rs = &ni->ni_rates;
/* Indicate highest supported rate. */
- ni->ni_txrate = rs->rs_rates[rs->rs_nrates - 1];
+ ieee80211_node_set_txrate_dot11rate(ni,
+ rs->rs_rates[rs->rs_nrates - 1]);
(void) rsu_set_fw_power_state(sc, RSU_PWR_SLEEP);
ieee80211_free_node(ni);
startcal = 1;
@@ -1526,16 +1531,20 @@ rsu_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
struct rsu_softc *sc = vap->iv_ic->ic_softc;
int is_checked = 0;
- if (&vap->iv_nw_keys[0] <= k &&
- k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
+ if (ieee80211_is_key_global(vap, k)) {
*keyix = ieee80211_crypto_get_key_wepidx(vap, k);
} else {
+ /* Note: assumes this is a pairwise key */
if (vap->iv_opmode != IEEE80211_M_STA) {
*keyix = 0;
/* TODO: obtain keyix from node id */
is_checked = 1;
k->wk_flags |= IEEE80211_KEY_SWCRYPT;
} else
+ /*
+ * TODO: should allocate these from the CAM space;
+ * skipping over the fixed slots and _BC / _BSS.
+ */
*keyix = R92S_MACID_BSS;
}
@@ -1570,8 +1579,7 @@ rsu_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k,
}
/* Handle group keys. */
- if (&vap->iv_nw_keys[0] <= k &&
- k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
+ if (ieee80211_is_key_global(vap, k)) {
KASSERT(k->wk_keyix < nitems(sc->group_keys),
("keyix %u > %zu\n", k->wk_keyix, nitems(sc->group_keys)));
@@ -2166,7 +2174,7 @@ rsu_event_addba_req_report(struct rsu_softc *sc, uint8_t *buf, int len)
__func__,
ether_sprintf(ba->mac_addr),
(int) ba->tid,
- (int) le16toh(ba->ssn));
+ (int) le16toh(ba->ssn) >> 4);
/* XXX do node lookup; this is STA specific */
@@ -2212,6 +2220,11 @@ rsu_rx_event(struct rsu_softc *sc, uint8_t code, uint8_t *buf, int len)
if (vap->iv_state == IEEE80211_S_AUTH)
rsu_event_join_bss(sc, buf, len);
break;
+
+ /* TODO: what about R92S_EVT_ADD_STA? and decoding macid? */
+ /* It likely is required for IBSS/AP mode */
+
+ /* TODO: should I be doing this transition in AP mode? */
case R92S_EVT_DEL_STA:
RSU_DPRINTF(sc, RSU_DEBUG_FWCMD | RSU_DEBUG_STATE,
"%s: disassociated from %s\n", __func__,
@@ -2229,6 +2242,7 @@ rsu_rx_event(struct rsu_softc *sc, uint8_t code, uint8_t *buf, int len)
break;
case R92S_EVT_FWDBG:
buf[60] = '\0';
+ /* TODO: some are \n terminated, some aren't, sigh */
RSU_DPRINTF(sc, RSU_DEBUG_FWDBG, "FWDBG: %s\n", (char *)buf);
break;
case R92S_EVT_ADDBA_REQ_REPORT:
@@ -2782,6 +2796,9 @@ rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni,
if (rate != 0)
ridx = rate2ridx(rate);
+ /* Assign sequence number, A-MPDU or otherwise */
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
@@ -2838,8 +2855,10 @@ rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni,
SM(R92S_TXDW0_OFFSET, sizeof(*txd)) |
R92S_TXDW0_OWN | R92S_TXDW0_FSG | R92S_TXDW0_LSG);
+ /* TODO: correct macid here? It should be in the node */
txd->txdw1 |= htole32(
SM(R92S_TXDW1_MACID, R92S_MACID_BSS) | SM(R92S_TXDW1_QSEL, qid));
+
if (!hasqos)
txd->txdw1 |= htole32(R92S_TXDW1_NONQOS);
if (k != NULL && !(k->wk_flags & IEEE80211_KEY_SWENCRYPT)) {
@@ -2860,8 +2879,13 @@ rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni,
SM(R92S_TXDW1_CIPHER, cipher) |
SM(R92S_TXDW1_KEYIDX, k->wk_keyix));
}
- /* XXX todo: set AGGEN bit if appropriate? */
- txd->txdw2 |= htole32(R92S_TXDW2_BK);
+
+ /*
+ * Note: no need to set TXDW2_AGGEN/TXDW2_BK to mark
+ * A-MPDU and non-AMPDU candidates; the firmware will
+ * handle this for us.
+ */
+
if (ismcast)
txd->txdw2 |= htole32(R92S_TXDW2_BMCAST);
@@ -2880,8 +2904,11 @@ rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni,
}
/*
- * Firmware will use and increment the sequence number for the
- * specified priority.
+ * Pass in prio here, NOT the sequence number.
+ *
+ * The hardware is in theory incrementing sequence numbers
+ * for us, but I haven't yet figured out exactly when/how
+ * it's supposed to work.
*/
txd->txdw3 |= htole32(SM(R92S_TXDW3_SEQ, prio));
@@ -3481,7 +3508,8 @@ rsu_load_firmware(struct rsu_softc *sc)
dmem.vcs_mode = R92S_VCS_MODE_RTS_CTS;
dmem.turbo_mode = 0;
dmem.bw40_en = !! (ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40);
- dmem.amsdu2ampdu_en = !! (sc->sc_ht);
+ /* net80211 handles AMSDUs just fine */
+ dmem.amsdu2ampdu_en = 0;
dmem.ampdu_en = !! (sc->sc_ht);
dmem.agg_offload = !! (sc->sc_ht);
dmem.qos_en = 1;
diff --git a/sys/dev/usb/wlan/if_rsureg.h b/sys/dev/usb/wlan/if_rsureg.h
index fb706a4d9b1a..e2074e1dd2ad 100644
--- a/sys/dev/usb/wlan/if_rsureg.h
+++ b/sys/dev/usb/wlan/if_rsureg.h
@@ -593,7 +593,14 @@ struct r92s_event_join_bss {
struct ndis_wlan_bssid_ex bss;
} __packed;
-#define R92S_MACID_BSS 5 /* XXX hardcoded somewhere */
+/*
+ * This is hard-coded in the firmware for a STA mode
+ * BSS join. If you turn on FWDEBUG, you'll see this
+ * in the logs:
+ *
+ * rsu0: FWDBG: mac id #5: 0000005b, 000fffff, 00000000
+ */
+#define R92S_MACID_BSS 5
/* Rx MAC descriptor. */
struct r92s_rx_stat {
diff --git a/sys/dev/usb/wlan/if_rum.c b/sys/dev/usb/wlan/if_rum.c
index 2720f2ffedcb..b822766f0ba5 100644
--- a/sys/dev/usb/wlan/if_rum.c
+++ b/sys/dev/usb/wlan/if_rum.c
@@ -18,7 +18,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <sys/cdefs.h>
/*-
* Ralink Technology RT2501USB/RT2601USB chipset driver
* http://www.ralinktech.com.tw/
@@ -1468,8 +1467,7 @@ rum_tx_crypto_flags(struct rum_softc *sc, struct ieee80211_node *ni,
flags |= RT2573_TX_CIP_MODE(mode);
/* Do not trust GROUP flag */
- if (!(k >= &vap->iv_nw_keys[0] &&
- k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]))
+ if (ieee80211_is_key_unicast(vap, k))
flags |= RT2573_TX_KEY_PAIR;
else
pos += 0 * RT2573_SKEY_MAX; /* vap id */
@@ -1527,9 +1525,7 @@ rum_tx_mgt(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
USETW(wh->i_dur, dur);
/* tell hardware to add timestamp for probe responses */
- if (type == IEEE80211_FC0_TYPE_MGT &&
- (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
- IEEE80211_FC0_SUBTYPE_PROBE_RESP)
+ if (IEEE80211_IS_MGMT_PROBE_RESP(wh))
flags |= RT2573_TX_TIMESTAMP;
}
@@ -1649,7 +1645,7 @@ rum_tx_data(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
rate = tp->ucastrate;
else {
(void) ieee80211_ratectl_rate(ni, NULL, 0);
- rate = ni->ni_txrate;
+ rate = ieee80211_node_get_txrate_dot11rate(ni);
}
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
@@ -3006,8 +3002,7 @@ rum_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k,
struct rum_softc *sc = vap->iv_ic->ic_softc;
uint8_t i;
- if (!(&vap->iv_nw_keys[0] <= k &&
- k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) {
+ if (ieee80211_is_key_unicast(vap, k)) {
if (!(k->wk_flags & IEEE80211_KEY_SWCRYPT)) {
RUM_LOCK(sc);
for (i = 0; i < RT2573_ADDR_MAX; i++) {
@@ -3044,7 +3039,7 @@ rum_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k)
return 1;
}
- group = k >= &vap->iv_nw_keys[0] && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID];
+ group = ieee80211_is_key_global(vap, k);
return !rum_cmd_sleepable(sc, k, sizeof(*k), 0,
group ? rum_group_key_set_cb : rum_pair_key_set_cb);
@@ -3061,7 +3056,7 @@ rum_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k)
return 1;
}
- group = k >= &vap->iv_nw_keys[0] && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID];
+ group = ieee80211_is_key_global(vap, k);
return !rum_cmd_sleepable(sc, k, sizeof(*k), 0,
group ? rum_group_key_del_cb : rum_pair_key_del_cb);
diff --git a/sys/dev/usb/wlan/if_run.c b/sys/dev/usb/wlan/if_run.c
index e2ea78f78b13..147aa4044057 100644
--- a/sys/dev/usb/wlan/if_run.c
+++ b/sys/dev/usb/wlan/if_run.c
@@ -17,7 +17,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <sys/cdefs.h>
/*-
* Ralink Technology RT2700U/RT2800U/RT3000U/RT3900E chipset driver.
* http://www.ralinktech.com/
@@ -325,6 +324,7 @@ static const STRUCT_USB_HOST_ID run_devs[] = {
RUN_DEV(SITECOMEU, RT2870_3),
RUN_DEV(SITECOMEU, RT2870_4),
RUN_DEV(SITECOMEU, RT3070),
+ RUN_DEV(SITECOMEU, RT3070_1),
RUN_DEV(SITECOMEU, RT3070_2),
RUN_DEV(SITECOMEU, RT3070_3),
RUN_DEV(SITECOMEU, RT3070_4),
@@ -882,6 +882,7 @@ run_attach(device_t self)
ic->ic_flags |= IEEE80211_F_DATAPAD;
ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
run_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -2685,6 +2686,7 @@ run_iter_func(void *arg, struct ieee80211_node *ni)
union run_stats sta[2];
uint16_t (*wstat)[3];
int error, ridx;
+ uint8_t dot11rate;
RUN_LOCK(sc);
@@ -2737,15 +2739,17 @@ run_iter_func(void *arg, struct ieee80211_node *ni)
ieee80211_ratectl_tx_update(vap, txs);
ieee80211_ratectl_rate(ni, NULL, 0);
/* XXX TODO: methodize with MCS rates */
+ dot11rate = ieee80211_node_get_txrate_dot11rate(ni);
for (ridx = 0; ridx < RT2860_RIDX_MAX; ridx++)
- if (rt2860_rates[ridx].rate == ni->ni_txrate)
+ if (rt2860_rates[ridx].rate == dot11rate)
break;
rn->amrr_ridx = ridx;
fail:
RUN_UNLOCK(sc);
- RUN_DPRINTF(sc, RUN_DEBUG_RATE, "rate=%d, ridx=%d\n", ni->ni_txrate, rn->amrr_ridx);
+ RUN_DPRINTF(sc, RUN_DEBUG_RATE, "rate=0x%02x, ridx=%d\n",
+ ieee80211_node_get_txrate_dot11rate(ni), rn->amrr_ridx);
}
static void
@@ -3519,6 +3523,9 @@ run_tx(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
data->ni = ni;
data->ridx = ridx;
+ /* Assign sequence number now, regardless of A-MPDU TX or otherwise (for now) */
+ ieee80211_output_seqno_assign(ni, -1, m);
+
run_set_tx_desc(sc, data);
/*
@@ -3595,9 +3602,7 @@ run_tx_mgt(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
wh = mtod(m, struct ieee80211_frame *);
/* tell hardware to add timestamp for probe responses */
- if ((wh->i_fc[0] &
- (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
- (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP))
+ if (IEEE80211_IS_MGMT_PROBE_RESP(wh))
wflags |= RT2860_TX_TS;
else if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
xflags |= RT2860_TX_ACK;
@@ -3626,6 +3631,9 @@ run_tx_mgt(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
data->ni = ni;
data->ridx = ridx;
+ /* Assign sequence number now, regardless of A-MPDU TX or otherwise (for now) */
+ ieee80211_output_seqno_assign(ni, -1, m);
+
run_set_tx_desc(sc, data);
RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "sending mgt frame len=%d rate=%d\n",
@@ -3770,6 +3778,9 @@ run_tx_param(struct run_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
break;
data->ridx = ridx;
+ /* Assign sequence number now, regardless of A-MPDU TX or otherwise (for now) */
+ ieee80211_output_seqno_assign(ni, -1, m);
+
run_set_tx_desc(sc, data);
RUN_DPRINTF(sc, RUN_DEBUG_XMIT, "sending raw frame len=%u rate=%u\n",
@@ -6415,6 +6426,10 @@ run_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap)
{
/* For now, no A-MPDU TX support in the driver */
+ /*
+ * TODO: maybe we needed to enable seqno generation too?
+ * What other TX desc bits are missing/needed?
+ */
return (0);
}
diff --git a/sys/dev/usb/wlan/if_uath.c b/sys/dev/usb/wlan/if_uath.c
index e78003bc250a..cc303e565bca 100644
--- a/sys/dev/usb/wlan/if_uath.c
+++ b/sys/dev/usb/wlan/if_uath.c
@@ -50,7 +50,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <sys/cdefs.h>
/*-
* Driver for Atheros AR5523 USB parts.
*
@@ -183,6 +182,7 @@ static const STRUCT_USB_HOST_ID uath_devs[] = {
UATH_DEV(UMEDIA, AR5523_2),
UATH_DEV(WISTRONNEWEB, AR5523_1),
UATH_DEV(WISTRONNEWEB, AR5523_2),
+ UATH_DEV(WISTRONNEWEB, AR5523_2_ALT),
UATH_DEV(ZCOM, AR5523)
#undef UATH_DEV
};
@@ -432,6 +432,8 @@ uath_attach(device_t dev)
/* put a regulatory domain to reveal informations. */
uath_regdomain = sc->sc_devcap.regDomain;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
memset(bands, 0, sizeof(bands));
setbit(bands, IEEE80211_MODE_11B);
setbit(bands, IEEE80211_MODE_11G);
@@ -441,6 +443,12 @@ uath_attach(device_t dev)
ieee80211_init_channels(ic, NULL, bands);
ieee80211_ifattach(ic);
+
+ /* Note: this has to happen AFTER ieee80211_ifattach() */
+ ieee80211_set_software_ciphers(ic,
+ IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_TKIP |
+ IEEE80211_CRYPTO_AES_CCM | IEEE80211_CRYPTO_AES_GCM_128);
+
ic->ic_raw_xmit = uath_raw_xmit;
ic->ic_scan_start = uath_scan_start;
ic->ic_scan_end = uath_scan_end;
@@ -1542,6 +1550,8 @@ uath_tx_start(struct uath_softc *sc, struct mbuf *m0, struct ieee80211_node *ni,
ieee80211_radiotap_tx(vap, m0);
}
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
@@ -1830,6 +1840,8 @@ uath_set_channel(struct ieee80211com *ic)
UATH_UNLOCK(sc);
return;
}
+ /* flush data & control requests into the target */
+ (void)uath_flush(sc);
(void)uath_switch_channel(sc, ic->ic_curchan);
UATH_UNLOCK(sc);
}
@@ -2014,6 +2026,8 @@ uath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
break;
case IEEE80211_S_AUTH:
+ /* flush data & control requests into the target */
+ (void)uath_flush(sc);
/* XXX good place? set RTS threshold */
uath_config(sc, CFG_USER_RTS_THRESHOLD, vap->iv_rtsthreshold);
/* XXX bad place */
@@ -2053,7 +2067,8 @@ uath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
* Tx rate is controlled by firmware, report the maximum
* negotiated rate in ifconfig output.
*/
- ni->ni_txrate = ni->ni_rates.rs_rates[ni->ni_rates.rs_nrates-1];
+ ieee80211_node_set_txrate_dot11rate(ni,
+ ni->ni_rates.rs_rates[ni->ni_rates.rs_nrates-1]);
if (uath_write_associd(sc) != 0) {
device_printf(sc->sc_dev,
@@ -2303,10 +2318,12 @@ uath_cmdeof(struct uath_softc *sc, struct uath_cmd *cmd)
__func__, dlen, sizeof(uint32_t));
return;
}
- /* XXX have submitter do this */
- /* copy answer into caller's supplied buffer */
- bcopy(hdr+1, cmd->odata, sizeof(uint32_t));
- cmd->olen = sizeof(uint32_t);
+ if (cmd->odata != NULL) {
+ /* XXX have submitter do this */
+ /* copy answer into caller's supplied buffer */
+ bcopy(hdr+1, cmd->odata, sizeof(uint32_t));
+ cmd->olen = sizeof(uint32_t);
+ }
wakeup_one(cmd); /* wake up caller */
break;
diff --git a/sys/dev/usb/wlan/if_upgt.c b/sys/dev/usb/wlan/if_upgt.c
index 55d231e2c655..1ab833301b3c 100644
--- a/sys/dev/usb/wlan/if_upgt.c
+++ b/sys/dev/usb/wlan/if_upgt.c
@@ -354,6 +354,8 @@ upgt_attach(device_t dev)
ic->ic_transmit = upgt_transmit;
ic->ic_parent = upgt_parent;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
ieee80211_radiotap_attach(ic,
&sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
UPGT_TX_RADIOTAP_PRESENT,
@@ -2116,6 +2118,9 @@ upgt_tx_start(struct upgt_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
upgt_set_led(sc, UPGT_LED_BLINK);
+ /* Assign sequence number */
+ ieee80211_output_seqno_assign(ni, -1, m);
+
/*
* Software crypto.
*/
@@ -2139,8 +2144,7 @@ upgt_tx_start(struct upgt_softc *sc, struct mbuf *m, struct ieee80211_node *ni,
mem->addr = htole32(data->addr);
txdesc = (struct upgt_lmac_tx_desc *)(mem + 1);
- if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
- IEEE80211_FC0_TYPE_MGT) {
+ if (IEEE80211_IS_MGMT(wh)) {
/* mgmt frames */
txdesc->header1.flags = UPGT_H1_FLAGS_TX_MGMT;
/* always send mgmt frames at lowest rate (DS1) */
diff --git a/sys/dev/usb/wlan/if_ural.c b/sys/dev/usb/wlan/if_ural.c
index 1acae6d84b53..adef924a085c 100644
--- a/sys/dev/usb/wlan/if_ural.c
+++ b/sys/dev/usb/wlan/if_ural.c
@@ -19,7 +19,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <sys/cdefs.h>
/*-
* Ralink Technology RT2500USB chipset driver
* http://www.ralinktech.com/
@@ -474,6 +473,8 @@ ural_attach(device_t self)
| IEEE80211_C_WPA /* 802.11i */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
ural_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1074,6 +1075,8 @@ ural_tx_mgt(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
STAILQ_REMOVE_HEAD(&sc->tx_free, next);
sc->tx_nfree--;
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
wh = mtod(m0, struct ieee80211_frame *);
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
@@ -1097,10 +1100,7 @@ ural_tx_mgt(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
USETW(wh->i_dur, dur);
/* tell hardware to add timestamp for probe responses */
- if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
- IEEE80211_FC0_TYPE_MGT &&
- (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
- IEEE80211_FC0_SUBTYPE_PROBE_RESP)
+ if (IEEE80211_IS_MGMT_PROBE_RESP(wh))
flags |= RAL_TX_TIMESTAMP;
}
@@ -1230,9 +1230,11 @@ ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
rate = tp->ucastrate;
else {
(void) ieee80211_ratectl_rate(ni, NULL, 0);
- rate = ni->ni_txrate;
+ rate = ieee80211_node_get_txrate_dot11rate(ni);
}
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
diff --git a/sys/dev/usb/wlan/if_urtw.c b/sys/dev/usb/wlan/if_urtw.c
index d08a3237e9b2..86cf4c653ae7 100644
--- a/sys/dev/usb/wlan/if_urtw.c
+++ b/sys/dev/usb/wlan/if_urtw.c
@@ -14,7 +14,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <sys/cdefs.h>
#include "opt_wlan.h"
#include <sys/param.h>
@@ -885,6 +884,8 @@ urtw_attach(device_t dev)
/* XXX TODO: setup regdomain if URTW_EPROM_CHANPLAN_BY_HW bit is set.*/
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
urtw_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -1700,6 +1701,10 @@ urtw_tx_start(struct urtw_softc *sc, struct ieee80211_node *ni, struct mbuf *m0,
ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1);
type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+
+ /* Assign sequence number */
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
/*
* Software crypto.
*/
@@ -1724,8 +1729,7 @@ urtw_tx_start(struct urtw_softc *sc, struct ieee80211_node *ni, struct mbuf *m0,
ieee80211_radiotap_tx(vap, m0);
}
- if (type == IEEE80211_FC0_TYPE_MGT ||
- type == IEEE80211_FC0_TYPE_CTL ||
+ if (IEEE80211_IS_MGMT(wh) || IEEE80211_IS_CTL(wh) ||
(m0->m_flags & M_EAPOL) != 0) {
rate = tp->mgmtrate;
} else {
@@ -1803,9 +1807,7 @@ urtw_tx_start(struct urtw_softc *sc, struct ieee80211_node *ni, struct mbuf *m0,
}
tx->flag = htole32(flags);
tx->txdur = txdur;
- if (type == IEEE80211_FC0_TYPE_MGT &&
- (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
- IEEE80211_FC0_SUBTYPE_PROBE_RESP)
+ if (IEEE80211_IS_MGMT_PROBE_RESP(wh))
tx->retry = 1;
else
tx->retry = URTW_TX_MAXRETRY;
diff --git a/sys/dev/usb/wlan/if_zyd.c b/sys/dev/usb/wlan/if_zyd.c
index a4dc6b972c96..7affdcdce089 100644
--- a/sys/dev/usb/wlan/if_zyd.c
+++ b/sys/dev/usb/wlan/if_zyd.c
@@ -18,7 +18,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include <sys/cdefs.h>
/*
* ZyDAS ZD1211/ZD1211B USB WLAN driver.
*/
@@ -385,6 +384,8 @@ zyd_attach(device_t dev)
| IEEE80211_C_WPA /* 802.11i */
;
+ ic->ic_flags_ext |= IEEE80211_FEXT_SEQNO_OFFLOAD;
+
zyd_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
ic->ic_channels);
@@ -2460,10 +2461,12 @@ zyd_tx_start(struct zyd_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
rate = tp->ucastrate;
else {
(void) ieee80211_ratectl_rate(ni, NULL, 0);
- rate = ni->ni_txrate;
+ rate = ieee80211_node_get_txrate_dot11rate(ni);
}
}
+ ieee80211_output_seqno_assign(ni, -1, m0);
+
if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
k = ieee80211_crypto_encap(ni, m0);
if (k == NULL) {
@@ -2505,9 +2508,7 @@ zyd_tx_start(struct zyd_softc *sc, struct mbuf *m0, struct ieee80211_node *ni)
}
} else
desc->flags |= ZYD_TX_FLAG_MULTICAST;
- if ((wh->i_fc[0] &
- (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
- (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_PS_POLL))
+ if (IEEE80211_IS_CTL_PS_POLL(wh))
desc->flags |= ZYD_TX_FLAG_TYPE(ZYD_TX_TYPE_PS_POLL);
/* actual transmit length (XXX why +10?) */
diff --git a/sys/dev/vge/if_vge.c b/sys/dev/vge/if_vge.c
index 37606f28ba17..227ce30d9a2c 100644
--- a/sys/dev/vge/if_vge.c
+++ b/sys/dev/vge/if_vge.c
@@ -1024,10 +1024,8 @@ vge_attach(device_t dev)
sc->vge_expcap = cap;
} else
sc->vge_flags |= VGE_FLAG_JUMBO;
- if (pci_find_cap(dev, PCIY_PMG, &cap) == 0) {
+ if (pci_has_pm(dev))
sc->vge_flags |= VGE_FLAG_PMCAP;
- sc->vge_pmcap = cap;
- }
rid = 0;
msic = pci_msi_count(dev);
if (msi_disable == 0 && msic > 0) {
@@ -1093,12 +1091,6 @@ vge_attach(device_t dev)
goto fail;
ifp = sc->vge_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
-
vge_miipoll_start(sc);
/* Do MII setup */
error = mii_attach(dev, &sc->vge_miibus, ifp, vge_ifmedia_upd,
@@ -1183,8 +1175,6 @@ vge_detach(device_t dev)
VGE_UNLOCK(sc);
callout_drain(&sc->vge_watchdog);
}
- if (sc->vge_miibus)
- device_delete_child(dev, sc->vge_miibus);
bus_generic_detach(dev);
if (sc->vge_intrhand)
@@ -2452,20 +2442,9 @@ vge_resume(device_t dev)
{
struct vge_softc *sc;
if_t ifp;
- uint16_t pmstat;
sc = device_get_softc(dev);
VGE_LOCK(sc);
- if ((sc->vge_flags & VGE_FLAG_PMCAP) != 0) {
- /* Disable PME and clear PME status. */
- pmstat = pci_read_config(sc->vge_dev,
- sc->vge_pmcap + PCIR_POWER_STATUS, 2);
- if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) {
- pmstat &= ~PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->vge_dev,
- sc->vge_pmcap + PCIR_POWER_STATUS, pmstat, 2);
- }
- }
vge_clrwol(sc);
/* Restart MII auto-polling. */
vge_miipoll_start(sc);
@@ -2844,7 +2823,6 @@ static void
vge_setwol(struct vge_softc *sc)
{
if_t ifp;
- uint16_t pmstat;
uint8_t val;
VGE_LOCK_ASSERT(sc);
@@ -2896,13 +2874,8 @@ vge_setwol(struct vge_softc *sc)
val |= VGE_STICKHW_DS0 | VGE_STICKHW_DS1;
CSR_WRITE_1(sc, VGE_PWRSTAT, val);
/* Request PME if WOL is requested. */
- pmstat = pci_read_config(sc->vge_dev, sc->vge_pmcap +
- PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->vge_dev, sc->vge_pmcap + PCIR_POWER_STATUS,
- pmstat, 2);
+ pci_enable_pme(sc->vge_dev);
}
static void
diff --git a/sys/dev/vge/if_vgevar.h b/sys/dev/vge/if_vgevar.h
index 84bd7bcb0fc5..d2b1cf8e4b2a 100644
--- a/sys/dev/vge/if_vgevar.h
+++ b/sys/dev/vge/if_vgevar.h
@@ -191,7 +191,6 @@ struct vge_softc {
#define VGE_FLAG_SUSPENDED 0x4000
#define VGE_FLAG_LINK 0x8000
int vge_expcap;
- int vge_pmcap;
int vge_camidx;
int vge_int_holdoff;
int vge_rx_coal_pkt;
diff --git a/sys/dev/viapm/viapm.c b/sys/dev/viapm/viapm.c
index 4381b5323222..1aaaf25dcc34 100644
--- a/sys/dev/viapm/viapm.c
+++ b/sys/dev/viapm/viapm.c
@@ -371,10 +371,10 @@ viapm_pro_attach(device_t dev)
device_printf(dev, "SMBus revision code 0x%x\n", l);
}
- viapm->smbus = device_add_child(dev, "smbus", -1);
+ viapm->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY);
/* probe and attach the smbus */
- bus_generic_attach(dev);
+ bus_attach_children(dev);
/* disable slave function */
VIAPM_OUTB(SMBSCTRL, VIAPM_INB(SMBSCTRL) & ~SMBSCTRL_ENABLE);
@@ -423,10 +423,10 @@ viapm_586b_attach(device_t dev)
VIAPM_OUTB(GPIO_DIR, VIAPM_INB(GPIO_DIR) | VIAPM_SCL | VIAPM_SDA);
/* add generic bit-banging code */
- if (!(viapm->iicbb = device_add_child(dev, "iicbb", -1)))
+ if (!(viapm->iicbb = device_add_child(dev, "iicbb", DEVICE_UNIT_ANY)))
goto error;
- bus_generic_attach(dev);
+ bus_attach_children(dev);
return 0;
@@ -444,9 +444,6 @@ viapm_586b_detach(device_t dev)
struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev);
bus_generic_detach(dev);
- if (viapm->iicbb) {
- device_delete_child(dev, viapm->iicbb);
- }
if (viapm->iores)
bus_release_resource(dev, SYS_RES_IOPORT, viapm->iorid,
@@ -462,9 +459,6 @@ viapm_pro_detach(device_t dev)
struct viapm_softc *viapm = (struct viapm_softc *)device_get_softc(dev);
bus_generic_detach(dev);
- if (viapm->smbus) {
- device_delete_child(dev, viapm->smbus);
- }
bus_release_resource(dev, SYS_RES_IOPORT, viapm->iorid, viapm->iores);
diff --git a/sys/dev/viawd/viawd.c b/sys/dev/viawd/viawd.c
index d088284762cb..9e815b8171d1 100644
--- a/sys/dev/viawd/viawd.c
+++ b/sys/dev/viawd/viawd.c
@@ -125,7 +125,7 @@ viawd_identify(driver_t *driver, device_t parent)
if (viawd_find(parent) == NULL)
return;
- if (device_find_child(parent, driver->name, -1) == NULL)
+ if (device_find_child(parent, driver->name, DEVICE_UNIT_ANY) == NULL)
BUS_ADD_CHILD(parent, 0, driver->name, 0);
}
diff --git a/sys/dev/virtio/block/virtio_blk.c b/sys/dev/virtio/block/virtio_blk.c
index d7fa903936a1..5eb681128e9c 100644
--- a/sys/dev/virtio/block/virtio_blk.c
+++ b/sys/dev/virtio/block/virtio_blk.c
@@ -699,10 +699,14 @@ vtblk_alloc_virtqueue(struct vtblk_softc *sc)
{
device_t dev;
struct vq_alloc_info vq_info;
+ int indir_segs;
dev = sc->vtblk_dev;
- VQ_ALLOC_INFO_INIT(&vq_info, sc->vtblk_max_nsegs,
+ indir_segs = 0;
+ if (sc->vtblk_flags & VTBLK_FLAG_INDIRECT)
+ indir_segs = sc->vtblk_max_nsegs;
+ VQ_ALLOC_INFO_INIT(&vq_info, indir_segs,
vtblk_vq_intr, sc, &sc->vtblk_vq,
"%s request", device_get_nameunit(dev));
@@ -755,6 +759,8 @@ vtblk_alloc_disk(struct vtblk_softc *sc, struct virtio_blk_config *blkcfg)
dp->d_hba_device = virtio_get_device(dev);
dp->d_hba_subvendor = virtio_get_subvendor(dev);
dp->d_hba_subdevice = virtio_get_subdevice(dev);
+ strlcpy(dp->d_attachment, device_get_nameunit(dev),
+ sizeof(dp->d_attachment));
if (virtio_with_feature(dev, VIRTIO_BLK_F_RO))
dp->d_flags |= DISKFLAG_WRITE_PROTECT;
@@ -1177,6 +1183,35 @@ vtblk_request_error(struct vtblk_request *req)
return (error);
}
+static struct bio *
+vtblk_queue_complete_one(struct vtblk_softc *sc, struct vtblk_request *req)
+{
+ struct bio *bp;
+
+ if (sc->vtblk_req_ordered != NULL) {
+ MPASS(sc->vtblk_req_ordered == req);
+ sc->vtblk_req_ordered = NULL;
+ }
+
+ bp = req->vbr_bp;
+ if (req->vbr_mapp != NULL) {
+ switch (bp->bio_cmd) {
+ case BIO_READ:
+ bus_dmamap_sync(sc->vtblk_dmat, req->vbr_mapp,
+ BUS_DMASYNC_POSTREAD);
+ bus_dmamap_unload(sc->vtblk_dmat, req->vbr_mapp);
+ break;
+ case BIO_WRITE:
+ bus_dmamap_sync(sc->vtblk_dmat, req->vbr_mapp,
+ BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(sc->vtblk_dmat, req->vbr_mapp);
+ break;
+ }
+ }
+ bp->bio_error = vtblk_request_error(req);
+ return (bp);
+}
+
static void
vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
{
@@ -1184,31 +1219,9 @@ vtblk_queue_completed(struct vtblk_softc *sc, struct bio_queue *queue)
struct bio *bp;
while ((req = virtqueue_dequeue(sc->vtblk_vq, NULL)) != NULL) {
- if (sc->vtblk_req_ordered != NULL) {
- MPASS(sc->vtblk_req_ordered == req);
- sc->vtblk_req_ordered = NULL;
- }
+ bp = vtblk_queue_complete_one(sc, req);
- bp = req->vbr_bp;
- if (req->vbr_mapp != NULL) {
- switch (bp->bio_cmd) {
- case BIO_READ:
- bus_dmamap_sync(sc->vtblk_dmat, req->vbr_mapp,
- BUS_DMASYNC_POSTREAD);
- bus_dmamap_unload(sc->vtblk_dmat,
- req->vbr_mapp);
- break;
- case BIO_WRITE:
- bus_dmamap_sync(sc->vtblk_dmat, req->vbr_mapp,
- BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(sc->vtblk_dmat,
- req->vbr_mapp);
- break;
- }
- }
- bp->bio_error = vtblk_request_error(req);
TAILQ_INSERT_TAIL(queue, bp, bio_queue);
-
vtblk_request_enqueue(sc, req);
}
}
@@ -1412,8 +1425,6 @@ vtblk_ident(struct vtblk_softc *sc)
error = vtblk_poll_request(sc, req);
VTBLK_UNLOCK(sc);
- vtblk_request_enqueue(sc, req);
-
if (error) {
device_printf(sc->vtblk_dev,
"error getting device identifier: %d\n", error);
@@ -1423,7 +1434,9 @@ vtblk_ident(struct vtblk_softc *sc)
static int
vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
{
+ struct vtblk_request *req1 __diagused;
struct virtqueue *vq;
+ struct bio *bp;
int error;
vq = sc->vtblk_vq;
@@ -1436,13 +1449,18 @@ vtblk_poll_request(struct vtblk_softc *sc, struct vtblk_request *req)
return (error);
virtqueue_notify(vq);
- virtqueue_poll(vq, NULL);
+ req1 = virtqueue_poll(vq, NULL);
+ KASSERT(req == req1,
+ ("%s: polling completed %p not %p", __func__, req1, req));
- error = vtblk_request_error(req);
+ bp = vtblk_queue_complete_one(sc, req);
+ error = bp->bio_error;
if (error && bootverbose) {
device_printf(sc->vtblk_dev,
"%s: IO error: %d\n", __func__, error);
}
+ if (req != &sc->vtblk_dump_request)
+ vtblk_request_enqueue(sc, req);
return (error);
}
diff --git a/sys/dev/virtio/console/virtio_console.c b/sys/dev/virtio/console/virtio_console.c
index 4a3fb1e97e57..66433565ce25 100644
--- a/sys/dev/virtio/console/virtio_console.c
+++ b/sys/dev/virtio/console/virtio_console.c
@@ -978,7 +978,7 @@ vtcon_ctrl_poll(struct vtcon_softc *sc,
*/
VTCON_CTRL_TX_LOCK(sc);
KASSERT(virtqueue_empty(vq),
- ("%s: virtqueue is not emtpy", __func__));
+ ("%s: virtqueue is not empty", __func__));
error = virtqueue_enqueue(vq, control, &sg, sg.sg_nseg, 0);
if (error == 0) {
virtqueue_notify(vq);
@@ -1366,7 +1366,7 @@ vtcon_port_out(struct vtcon_port *port, void *buf, int bufsize)
vq = port->vtcport_outvq;
KASSERT(virtqueue_empty(vq),
- ("%s: port %p out virtqueue not emtpy", __func__, port));
+ ("%s: port %p out virtqueue not empty", __func__, port));
sglist_init(&sg, 2, segs);
error = sglist_append(&sg, buf, bufsize);
diff --git a/sys/dev/virtio/gpu/virtio_gpu.c b/sys/dev/virtio/gpu/virtio_gpu.c
index f18eef985cc6..6f786a450900 100644
--- a/sys/dev/virtio/gpu/virtio_gpu.c
+++ b/sys/dev/virtio/gpu/virtio_gpu.c
@@ -102,6 +102,7 @@ static vd_bitblt_text_t vtgpu_fb_bitblt_text;
static vd_bitblt_bmp_t vtgpu_fb_bitblt_bitmap;
static vd_drawrect_t vtgpu_fb_drawrect;
static vd_setpixel_t vtgpu_fb_setpixel;
+static vd_bitblt_argb_t vtgpu_fb_bitblt_argb;
static struct vt_driver vtgpu_fb_driver = {
.vd_name = "virtio_gpu",
@@ -111,6 +112,7 @@ static struct vt_driver vtgpu_fb_driver = {
.vd_bitblt_text = vtgpu_fb_bitblt_text,
.vd_invalidate_text = vt_fb_invalidate_text,
.vd_bitblt_bmp = vtgpu_fb_bitblt_bitmap,
+ .vd_bitblt_argb = vtgpu_fb_bitblt_argb,
.vd_drawrect = vtgpu_fb_drawrect,
.vd_setpixel = vtgpu_fb_setpixel,
.vd_postswitch = vt_fb_postswitch,
@@ -180,6 +182,16 @@ vtgpu_fb_bitblt_bitmap(struct vt_device *vd, const struct vt_window *vw,
vtgpu_resource_flush(sc, x, y, width, height);
}
+static int
+vtgpu_fb_bitblt_argb(struct vt_device *vd, const struct vt_window *vw,
+ const uint8_t *argb,
+ unsigned int width, unsigned int height,
+ unsigned int x, unsigned int y)
+{
+
+ return (EOPNOTSUPP);
+}
+
static void
vtgpu_fb_drawrect(struct vt_device *vd, int x1, int y1, int x2, int y2,
int fill, term_color_t color)
@@ -359,8 +371,8 @@ vtgpu_detach(device_t dev)
vt_deallocate(&vtgpu_fb_driver, &sc->vtgpu_fb_info);
if (sc->vtgpu_fb_info.fb_vbase != 0) {
MPASS(sc->vtgpu_fb_info.fb_size != 0);
- contigfree((void *)sc->vtgpu_fb_info.fb_vbase,
- sc->vtgpu_fb_info.fb_size, M_DEVBUF);
+ free((void *)sc->vtgpu_fb_info.fb_vbase,
+ M_DEVBUF);
}
/* TODO: Tell the host we are detaching */
diff --git a/sys/dev/virtio/mmio/virtio_mmio.c b/sys/dev/virtio/mmio/virtio_mmio.c
index b1a4230f7b46..fe531fced998 100644
--- a/sys/dev/virtio/mmio/virtio_mmio.c
+++ b/sys/dev/virtio/mmio/virtio_mmio.c
@@ -53,7 +53,6 @@
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/mmio/virtio_mmio.h>
-#include "virtio_mmio_if.h"
#include "virtio_bus_if.h"
#include "virtio_if.h"
@@ -79,7 +78,6 @@ static int vtmmio_alloc_virtqueues(device_t, int,
struct vq_alloc_info *);
static int vtmmio_setup_intr(device_t, enum intr_type);
static void vtmmio_stop(device_t);
-static void vtmmio_poll(device_t);
static int vtmmio_reinit(device_t, uint64_t);
static void vtmmio_reinit_complete(device_t);
static void vtmmio_notify_virtqueue(device_t, uint16_t, bus_size_t);
@@ -104,29 +102,11 @@ static void vtmmio_vq_intr(void *);
* I/O port read/write wrappers.
*/
#define vtmmio_write_config_1(sc, o, v) \
-do { \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
- bus_write_1((sc)->res[0], (o), (v)); \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
-} while (0)
+ bus_write_1((sc)->res[0], (o), (v))
#define vtmmio_write_config_2(sc, o, v) \
-do { \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
- bus_write_2((sc)->res[0], (o), (v)); \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
-} while (0)
+ bus_write_2((sc)->res[0], (o), (v))
#define vtmmio_write_config_4(sc, o, v) \
-do { \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
- bus_write_4((sc)->res[0], (o), (v)); \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
-} while (0)
+ bus_write_4((sc)->res[0], (o), (v))
#define vtmmio_read_config_1(sc, o) \
bus_read_1((sc)->res[0], (o))
@@ -157,7 +137,6 @@ static device_method_t vtmmio_methods[] = {
DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues),
DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr),
DEVMETHOD(virtio_bus_stop, vtmmio_stop),
- DEVMETHOD(virtio_bus_poll, vtmmio_poll),
DEVMETHOD(virtio_bus_reinit, vtmmio_reinit),
DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete),
DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue),
@@ -220,19 +199,9 @@ vtmmio_setup_intr(device_t dev, enum intr_type type)
{
struct vtmmio_softc *sc;
int rid;
- int err;
sc = device_get_softc(dev);
- if (sc->platform != NULL) {
- err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev,
- vtmmio_vq_intr, sc);
- if (err == 0) {
- /* Okay we have backend-specific interrupts */
- return (0);
- }
- }
-
rid = 0;
sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
@@ -275,7 +244,7 @@ vtmmio_attach(device_t dev)
/* Tell the host we've noticed this device. */
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK);
- if ((child = device_add_child(dev, NULL, -1)) == NULL) {
+ if ((child = device_add_child(dev, NULL, DEVICE_UNIT_ANY)) == NULL) {
device_printf(dev, "Cannot create child device.\n");
vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED);
vtmmio_detach(dev);
@@ -292,17 +261,13 @@ static int
vtmmio_detach(device_t dev)
{
struct vtmmio_softc *sc;
- device_t child;
int error;
sc = device_get_softc(dev);
- if ((child = sc->vtmmio_child_dev) != NULL) {
- error = device_delete_child(dev, child);
- if (error)
- return (error);
- sc->vtmmio_child_dev = NULL;
- }
+ error = bus_generic_detach(dev);
+ if (error)
+ return (error);
vtmmio_reset(sc);
@@ -601,17 +566,6 @@ vtmmio_stop(device_t dev)
vtmmio_reset(device_get_softc(dev));
}
-static void
-vtmmio_poll(device_t dev)
-{
- struct vtmmio_softc *sc;
-
- sc = device_get_softc(dev);
-
- if (sc->platform != NULL)
- VIRTIO_MMIO_POLL(sc->platform);
-}
-
static int
vtmmio_reinit(device_t dev, uint64_t features)
{
diff --git a/sys/dev/virtio/mmio/virtio_mmio.h b/sys/dev/virtio/mmio/virtio_mmio.h
index ac6a96c1c7fe..edcbf0519acc 100644
--- a/sys/dev/virtio/mmio/virtio_mmio.h
+++ b/sys/dev/virtio/mmio/virtio_mmio.h
@@ -37,7 +37,6 @@ struct vtmmio_virtqueue;
struct vtmmio_softc {
device_t dev;
- device_t platform;
struct resource *res[2];
uint64_t vtmmio_features;
diff --git a/sys/dev/virtio/mmio/virtio_mmio_fdt.c b/sys/dev/virtio/mmio/virtio_mmio_fdt.c
index 7fba8aad8db8..bb9ea8efbaeb 100644
--- a/sys/dev/virtio/mmio/virtio_mmio_fdt.c
+++ b/sys/dev/virtio/mmio/virtio_mmio_fdt.c
@@ -63,12 +63,10 @@
#include <dev/virtio/mmio/virtio_mmio.h>
static int vtmmio_fdt_probe(device_t);
-static int vtmmio_fdt_attach(device_t);
static device_method_t vtmmio_fdt_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, vtmmio_fdt_probe),
- DEVMETHOD(device_attach, vtmmio_fdt_attach),
DEVMETHOD_END
};
@@ -93,48 +91,3 @@ vtmmio_fdt_probe(device_t dev)
return (vtmmio_probe(dev));
}
-
-static int
-vtmmio_setup_platform(device_t dev, struct vtmmio_softc *sc)
-{
- phandle_t platform_node;
- struct fdt_ic *ic;
- phandle_t xref;
- phandle_t node;
-
- sc->platform = NULL;
-
- if ((node = ofw_bus_get_node(dev)) == -1)
- return (ENXIO);
-
- if (OF_searchencprop(node, "platform", &xref,
- sizeof(xref)) == -1) {
- return (ENXIO);
- }
-
- platform_node = OF_node_from_xref(xref);
-
- SLIST_FOREACH(ic, &fdt_ic_list_head, fdt_ics) {
- if (ic->iph == platform_node) {
- sc->platform = ic->dev;
- break;
- }
- }
-
- if (sc->platform == NULL) {
- /* No platform-specific device. Ignore it. */
- }
-
- return (0);
-}
-
-static int
-vtmmio_fdt_attach(device_t dev)
-{
- struct vtmmio_softc *sc;
-
- sc = device_get_softc(dev);
- vtmmio_setup_platform(dev, sc);
-
- return (vtmmio_attach(dev));
-}
diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c
index 9c14e688f364..471c6b3714b2 100644
--- a/sys/dev/virtio/network/if_vtnet.c
+++ b/sys/dev/virtio/network/if_vtnet.c
@@ -28,6 +28,9 @@
/* Driver for VirtIO network devices. */
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/systm.h>
@@ -82,9 +85,6 @@
#include <dev/virtio/network/if_vtnetvar.h>
#include "virtio_if.h"
-#include "opt_inet.h"
-#include "opt_inet6.h"
-
#if defined(INET) || defined(INET6)
#include <machine/in_cksum.h>
#endif
@@ -115,7 +115,7 @@ static void vtnet_free_rxtx_queues(struct vtnet_softc *);
static int vtnet_alloc_rx_filters(struct vtnet_softc *);
static void vtnet_free_rx_filters(struct vtnet_softc *);
static int vtnet_alloc_virtqueues(struct vtnet_softc *);
-static int vtnet_alloc_interface(struct vtnet_softc *);
+static void vtnet_alloc_interface(struct vtnet_softc *);
static int vtnet_setup_interface(struct vtnet_softc *);
static int vtnet_ioctl_mtu(struct vtnet_softc *, u_int);
static int vtnet_ioctl_ifflags(struct vtnet_softc *);
@@ -133,12 +133,14 @@ static int vtnet_rxq_replace_lro_nomrg_buf(struct vtnet_rxq *,
static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int);
static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *);
static int vtnet_rxq_new_buf(struct vtnet_rxq *);
+#if defined(INET) || defined(INET6)
static int vtnet_rxq_csum_needs_csum(struct vtnet_rxq *, struct mbuf *,
- uint16_t, int, struct virtio_net_hdr *);
-static int vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
- uint16_t, int, struct virtio_net_hdr *);
+ bool, int, struct virtio_net_hdr *);
+static void vtnet_rxq_csum_data_valid(struct vtnet_rxq *, struct mbuf *,
+ int);
static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *,
struct virtio_net_hdr *);
+#endif
static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int);
static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *);
static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int);
@@ -163,24 +165,24 @@ static struct mbuf *
static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **,
struct vtnet_tx_header *);
static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **, int);
-#ifdef VTNET_LEGACY_TX
+
+/* Required for ALTQ */
static void vtnet_start_locked(struct vtnet_txq *, if_t);
static void vtnet_start(if_t);
-#else
+
+/* Required for MQ */
static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *);
static int vtnet_txq_mq_start(if_t, struct mbuf *);
static void vtnet_txq_tq_deferred(void *, int);
-#endif
+static void vtnet_qflush(if_t);
+
+
static void vtnet_txq_start(struct vtnet_txq *);
static void vtnet_txq_tq_intr(void *, int);
static int vtnet_txq_eof(struct vtnet_txq *);
static void vtnet_tx_vq_intr(void *);
static void vtnet_tx_start_all(struct vtnet_softc *);
-#ifndef VTNET_LEGACY_TX
-static void vtnet_qflush(if_t);
-#endif
-
static int vtnet_watchdog(struct vtnet_txq *);
static void vtnet_accum_stats(struct vtnet_softc *,
struct vtnet_rxq_stats *, struct vtnet_txq_stats *);
@@ -279,7 +281,7 @@ static int vtnet_tso_disable = 0;
SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN,
&vtnet_tso_disable, 0, "Disables TSO");
-static int vtnet_lro_disable = 0;
+static int vtnet_lro_disable = 1;
SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN,
&vtnet_lro_disable, 0, "Disables hardware LRO");
@@ -309,6 +311,19 @@ static int vtnet_lro_mbufq_depth = 0;
SYSCTL_UINT(_hw_vtnet, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN,
&vtnet_lro_mbufq_depth, 0, "Depth of software LRO mbuf queue");
+/* Deactivate ALTQ Support */
+static int vtnet_altq_disable = 0;
+SYSCTL_INT(_hw_vtnet, OID_AUTO, altq_disable, CTLFLAG_RDTUN,
+ &vtnet_altq_disable, 0, "Disables ALTQ Support");
+
+/*
+ * For the driver to be considered as having altq enabled,
+ * it must be compiled with an ALTQ capable kernel,
+ * and the tunable hw.vtnet.altq_disable must be zero
+ */
+#define VTNET_ALTQ_ENABLED (VTNET_ALTQ_CAPABLE && (!vtnet_altq_disable))
+
+
static uma_zone_t vtnet_tx_header_zone;
static struct virtio_feature_desc vtnet_feature_desc[] = {
@@ -437,12 +452,7 @@ vtnet_attach(device_t dev)
callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0);
vtnet_load_tunables(sc);
- error = vtnet_alloc_interface(sc);
- if (error) {
- device_printf(dev, "cannot allocate interface\n");
- goto fail;
- }
-
+ vtnet_alloc_interface(sc);
vtnet_setup_sysctl(sc);
error = vtnet_setup_features(sc);
@@ -648,12 +658,9 @@ vtnet_negotiate_features(struct vtnet_softc *sc)
if (no_csum || vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable))
features &= ~VTNET_LRO_FEATURES;
-#ifndef VTNET_LEGACY_TX
- if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
+ /* Deactivate MQ Feature flag, if driver has ALTQ enabled, or MQ is explicitly disabled */
+ if (VTNET_ALTQ_ENABLED || vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable))
features &= ~VIRTIO_NET_F_MQ;
-#else
- features &= ~VIRTIO_NET_F_MQ;
-#endif
negotiated_features = virtio_negotiate_features(dev, features);
@@ -662,7 +669,7 @@ vtnet_negotiate_features(struct vtnet_softc *sc)
mtu = virtio_read_dev_config_2(dev,
offsetof(struct virtio_net_config, mtu));
- if (mtu < VTNET_MIN_MTU /* || mtu > VTNET_MAX_MTU */) {
+ if (mtu < VTNET_MIN_MTU) {
device_printf(dev, "Invalid MTU value: %d. "
"MTU feature disabled.\n", mtu);
features &= ~VIRTIO_NET_F_MTU;
@@ -871,14 +878,14 @@ vtnet_init_txq(struct vtnet_softc *sc, int id)
if (txq->vtntx_sg == NULL)
return (ENOMEM);
-#ifndef VTNET_LEGACY_TX
- txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
- M_NOWAIT, &txq->vtntx_mtx);
- if (txq->vtntx_br == NULL)
- return (ENOMEM);
+ if (!VTNET_ALTQ_ENABLED) {
+ txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF,
+ M_NOWAIT, &txq->vtntx_mtx);
+ if (txq->vtntx_br == NULL)
+ return (ENOMEM);
- TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
-#endif
+ TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq);
+ }
TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq);
txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT,
taskqueue_thread_enqueue, &txq->vtntx_tq);
@@ -949,12 +956,12 @@ vtnet_destroy_txq(struct vtnet_txq *txq)
txq->vtntx_sg = NULL;
}
-#ifndef VTNET_LEGACY_TX
- if (txq->vtntx_br != NULL) {
- buf_ring_free(txq->vtntx_br, M_DEVBUF);
- txq->vtntx_br = NULL;
+ if (!VTNET_ALTQ_ENABLED) {
+ if (txq->vtntx_br != NULL) {
+ buf_ring_free(txq->vtntx_br, M_DEVBUF);
+ txq->vtntx_br = NULL;
+ }
}
-#endif
if (mtx_initialized(&txq->vtntx_mtx) != 0)
mtx_destroy(&txq->vtntx_mtx);
@@ -1042,19 +1049,19 @@ vtnet_alloc_virtqueues(struct vtnet_softc *sc)
"%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
txq = &sc->vtnet_txqs[i];
- VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs,
+ VQ_ALLOC_INFO_INIT(&info[idx + 1], sc->vtnet_tx_nsegs,
vtnet_tx_vq_intr, txq, &txq->vtntx_vq,
"%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
}
/* These queues will not be used so allocate the minimum resources. */
- for (/**/; i < sc->vtnet_max_vq_pairs; i++, idx += 2) {
+ for (; i < sc->vtnet_max_vq_pairs; i++, idx += 2) {
rxq = &sc->vtnet_rxqs[i];
VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, rxq, &rxq->vtnrx_vq,
"%s-rx%d", device_get_nameunit(dev), rxq->vtnrx_id);
txq = &sc->vtnet_txqs[i];
- VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, txq, &txq->vtntx_vq,
+ VQ_ALLOC_INFO_INIT(&info[idx + 1], 0, NULL, txq, &txq->vtntx_vq,
"%s-tx%d", device_get_nameunit(dev), txq->vtntx_id);
}
@@ -1069,7 +1076,7 @@ vtnet_alloc_virtqueues(struct vtnet_softc *sc)
return (error);
}
-static int
+static void
vtnet_alloc_interface(struct vtnet_softc *sc)
{
device_t dev;
@@ -1078,14 +1085,9 @@ vtnet_alloc_interface(struct vtnet_softc *sc)
dev = sc->vtnet_dev;
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL)
- return (ENOMEM);
-
sc->vtnet_ifp = ifp;
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
-
- return (0);
}
static int
@@ -1103,15 +1105,16 @@ vtnet_setup_interface(struct vtnet_softc *sc)
if_setinitfn(ifp, vtnet_init);
if_setioctlfn(ifp, vtnet_ioctl);
if_setgetcounterfn(ifp, vtnet_get_counter);
-#ifndef VTNET_LEGACY_TX
- if_settransmitfn(ifp, vtnet_txq_mq_start);
- if_setqflushfn(ifp, vtnet_qflush);
-#else
- struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
- if_setstartfn(ifp, vtnet_start);
- if_setsendqlen(ifp, virtqueue_size(vq) - 1);
- if_setsendqready(ifp);
-#endif
+
+ if (!VTNET_ALTQ_ENABLED) {
+ if_settransmitfn(ifp, vtnet_txq_mq_start);
+ if_setqflushfn(ifp, vtnet_qflush);
+ } else {
+ struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq;
+ if_setstartfn(ifp, vtnet_start);
+ if_setsendqlen(ifp, virtqueue_size(vq) - 1);
+ if_setsendqready(ifp);
+ }
vtnet_get_macaddr(sc);
@@ -1150,11 +1153,9 @@ vtnet_setup_interface(struct vtnet_softc *sc)
}
if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) {
- if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0);
-#ifdef notyet
/* BMV: Rx checksums not distinguished between IPv4 and IPv6. */
+ if_setcapabilitiesbit(ifp, IFCAP_RXCSUM, 0);
if_setcapabilitiesbit(ifp, IFCAP_RXCSUM_IPV6, 0);
-#endif
if (vtnet_tunable_int(sc, "fixup_needs_csum",
vtnet_fixup_needs_csum) != 0)
@@ -1177,6 +1178,7 @@ vtnet_setup_interface(struct vtnet_softc *sc)
if (sc->vtnet_max_mtu >= ETHERMTU_JUMBO)
if_setcapabilitiesbit(ifp, IFCAP_JUMBO_MTU, 0);
if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU, 0);
+ if_setcapabilitiesbit(ifp, IFCAP_HWSTATS, 0);
/*
* Capabilities after here are not enabled by default.
@@ -1343,14 +1345,22 @@ vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
VTNET_CORE_LOCK_ASSERT(sc);
- if (mask & IFCAP_TXCSUM)
+ if (mask & IFCAP_TXCSUM) {
if_togglecapenable(ifp, IFCAP_TXCSUM);
- if (mask & IFCAP_TXCSUM_IPV6)
+ if_togglehwassist(ifp, VTNET_CSUM_OFFLOAD);
+ }
+ if (mask & IFCAP_TXCSUM_IPV6) {
if_togglecapenable(ifp, IFCAP_TXCSUM_IPV6);
- if (mask & IFCAP_TSO4)
+ if_togglehwassist(ifp, VTNET_CSUM_OFFLOAD_IPV6);
+ }
+ if (mask & IFCAP_TSO4) {
if_togglecapenable(ifp, IFCAP_TSO4);
- if (mask & IFCAP_TSO6)
+ if_togglehwassist(ifp, IFCAP_TSO4);
+ }
+ if (mask & IFCAP_TSO6) {
if_togglecapenable(ifp, IFCAP_TSO6);
+ if_togglehwassist(ifp, IFCAP_TSO6);
+ }
if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) {
/*
@@ -1366,27 +1376,20 @@ vtnet_ioctl_ifcap(struct vtnet_softc *sc, struct ifreq *ifr)
if ((mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO)) ==
IFCAP_LRO && vtnet_software_lro(sc))
reinit = update = 0;
-
- if (mask & IFCAP_RXCSUM)
+ /*
+ * VirtIO does not distinguish between receive checksum offload
+ * for IPv4 and IPv6 packets, so treat them as a pair.
+ */
+ if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
if_togglecapenable(ifp, IFCAP_RXCSUM);
- if (mask & IFCAP_RXCSUM_IPV6)
if_togglecapenable(ifp, IFCAP_RXCSUM_IPV6);
+ }
if (mask & IFCAP_LRO)
if_togglecapenable(ifp, IFCAP_LRO);
-
- /*
- * VirtIO does not distinguish between IPv4 and IPv6 checksums
- * so treat them as a pair. Guest TSO (LRO) requires receive
- * checksums.
- */
- if (if_getcapenable(ifp) & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) {
- if_setcapenablebit(ifp, IFCAP_RXCSUM, 0);
-#ifdef notyet
- if_setcapenablebit(ifp, IFCAP_RXCSUM_IPV6, 0);
-#endif
- } else
- if_setcapenablebit(ifp, 0,
- (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO));
+ /* Both SW and HW TCP LRO require receive checksum offload. */
+ if ((if_getcapenable(ifp) &
+ (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) == 0)
+ if_setcapenablebit(ifp, 0, IFCAP_LRO);
}
if (mask & IFCAP_VLAN_HWFILTER) {
@@ -1759,164 +1762,165 @@ vtnet_rxq_new_buf(struct vtnet_rxq *rxq)
return (error);
}
+#if defined(INET) || defined(INET6)
static int
-vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t etype,
- int hoff, struct virtio_net_hdr *hdr)
+vtnet_rxq_csum_needs_csum(struct vtnet_rxq *rxq, struct mbuf *m, bool isipv6,
+ int protocol, struct virtio_net_hdr *hdr)
{
struct vtnet_softc *sc;
- int error;
- sc = rxq->vtnrx_sc;
+ /*
+ * The packet is likely from another VM on the same host or from the
+ * host that itself performed checksum offloading so Tx/Rx is basically
+ * a memcpy and the checksum has little value so far.
+ */
+
+ KASSERT(protocol == IPPROTO_TCP || protocol == IPPROTO_UDP,
+ ("%s: unsupported IP protocol %d", __func__, protocol));
/*
- * NEEDS_CSUM corresponds to Linux's CHECKSUM_PARTIAL, but FreeBSD does
- * not have an analogous CSUM flag. The checksum has been validated,
- * but is incomplete (TCP/UDP pseudo header).
- *
- * The packet is likely from another VM on the same host that itself
- * performed checksum offloading so Tx/Rx is basically a memcpy and
- * the checksum has little value.
- *
- * Default to receiving the packet as-is for performance reasons, but
- * this can cause issues if the packet is to be forwarded because it
- * does not contain a valid checksum. This patch may be helpful:
- * https://reviews.freebsd.org/D6611. In the meantime, have the driver
- * compute the checksum if requested.
- *
- * BMV: Need to add an CSUM_PARTIAL flag?
+ * If the user don't want us to fix it up here by computing the
+ * checksum, just forward the order to compute the checksum by setting
+ * the corresponding mbuf flag (e.g., CSUM_TCP).
*/
+ sc = rxq->vtnrx_sc;
if ((sc->vtnet_flags & VTNET_FLAG_FIXUP_NEEDS_CSUM) == 0) {
- error = vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr);
- return (error);
+ switch (protocol) {
+ case IPPROTO_TCP:
+ m->m_pkthdr.csum_flags |=
+ (isipv6 ? CSUM_TCP_IPV6 : CSUM_TCP);
+ break;
+ case IPPROTO_UDP:
+ m->m_pkthdr.csum_flags |=
+ (isipv6 ? CSUM_UDP_IPV6 : CSUM_UDP);
+ break;
+ }
+ m->m_pkthdr.csum_data = hdr->csum_offset;
+ return (0);
}
/*
* Compute the checksum in the driver so the packet will contain a
* valid checksum. The checksum is at csum_offset from csum_start.
*/
- switch (etype) {
-#if defined(INET) || defined(INET6)
- case ETHERTYPE_IP:
- case ETHERTYPE_IPV6: {
- int csum_off, csum_end;
- uint16_t csum;
+ int csum_off, csum_end;
+ uint16_t csum;
- csum_off = hdr->csum_start + hdr->csum_offset;
- csum_end = csum_off + sizeof(uint16_t);
+ csum_off = hdr->csum_start + hdr->csum_offset;
+ csum_end = csum_off + sizeof(uint16_t);
- /* Assume checksum will be in the first mbuf. */
- if (m->m_len < csum_end || m->m_pkthdr.len < csum_end)
- return (1);
-
- /*
- * Like in_delayed_cksum()/in6_delayed_cksum(), compute the
- * checksum and write it at the specified offset. We could
- * try to verify the packet: csum_start should probably
- * correspond to the start of the TCP/UDP header.
- *
- * BMV: Need to properly handle UDP with zero checksum. Is
- * the IPv4 header checksum implicitly validated?
- */
- csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
- *(uint16_t *)(mtodo(m, csum_off)) = csum;
- m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
- m->m_pkthdr.csum_data = 0xFFFF;
- break;
- }
-#endif
- default:
- sc->vtnet_stats.rx_csum_bad_ethtype++;
+ /* Assume checksum will be in the first mbuf. */
+ if (m->m_len < csum_end || m->m_pkthdr.len < csum_end) {
+ sc->vtnet_stats.rx_csum_bad_offset++;
return (1);
}
+ /*
+ * Like in_delayed_cksum()/in6_delayed_cksum(), compute the
+ * checksum and write it at the specified offset. We could
+ * try to verify the packet: csum_start should probably
+ * correspond to the start of the TCP/UDP header.
+ *
+ * BMV: Need to properly handle UDP with zero checksum. Is
+ * the IPv4 header checksum implicitly validated?
+ */
+ csum = in_cksum_skip(m, m->m_pkthdr.len, hdr->csum_start);
+ *(uint16_t *)(mtodo(m, csum_off)) = csum;
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xFFFF;
+
return (0);
}
+static void
+vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m, int protocol)
+{
+ KASSERT(protocol == IPPROTO_TCP || protocol == IPPROTO_UDP,
+ ("%s: unsupported IP protocol %d", __func__, protocol));
+
+ m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
+ m->m_pkthdr.csum_data = 0xFFFF;
+}
+
static int
-vtnet_rxq_csum_data_valid(struct vtnet_rxq *rxq, struct mbuf *m,
- uint16_t etype, int hoff, struct virtio_net_hdr *hdr __unused)
+vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
+ struct virtio_net_hdr *hdr)
{
-#if 0
+ const struct ether_header *eh;
struct vtnet_softc *sc;
-#endif
- int protocol;
+ int hoff, protocol;
+ uint16_t etype;
+ bool isipv6;
+
+ KASSERT(hdr->flags &
+ (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID),
+ ("%s: missing checksum offloading flag %x", __func__, hdr->flags));
+
+ eh = mtod(m, const struct ether_header *);
+ etype = ntohs(eh->ether_type);
+ if (etype == ETHERTYPE_VLAN) {
+ /* TODO BMV: Handle QinQ. */
+ const struct ether_vlan_header *evh =
+ mtod(m, const struct ether_vlan_header *);
+ etype = ntohs(evh->evl_proto);
+ hoff = sizeof(struct ether_vlan_header);
+ } else
+ hoff = sizeof(struct ether_header);
-#if 0
sc = rxq->vtnrx_sc;
-#endif
+ /* Check whether ethernet type is IP or IPv6, and get protocol. */
switch (etype) {
#if defined(INET)
case ETHERTYPE_IP:
- if (__predict_false(m->m_len < hoff + sizeof(struct ip)))
- protocol = IPPROTO_DONE;
- else {
+ if (__predict_false(m->m_len < hoff + sizeof(struct ip))) {
+ sc->vtnet_stats.rx_csum_inaccessible_ipproto++;
+ return (1);
+ } else {
struct ip *ip = (struct ip *)(m->m_data + hoff);
protocol = ip->ip_p;
}
+ isipv6 = false;
break;
#endif
#if defined(INET6)
case ETHERTYPE_IPV6:
if (__predict_false(m->m_len < hoff + sizeof(struct ip6_hdr))
- || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0)
- protocol = IPPROTO_DONE;
+ || ip6_lasthdr(m, hoff, IPPROTO_IPV6, &protocol) < 0) {
+ sc->vtnet_stats.rx_csum_inaccessible_ipproto++;
+ return (1);
+ }
+ isipv6 = true;
break;
#endif
default:
- protocol = IPPROTO_DONE;
- break;
+ sc->vtnet_stats.rx_csum_bad_ethtype++;
+ return (1);
}
+ /* Check whether protocol is TCP or UDP. */
switch (protocol) {
case IPPROTO_TCP:
case IPPROTO_UDP:
- m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
- m->m_pkthdr.csum_data = 0xFFFF;
break;
default:
/*
* FreeBSD does not support checksum offloading of this
- * protocol. Let the stack re-verify the checksum later
- * if the protocol is supported.
+ * protocol here.
*/
-#if 0
- if_printf(sc->vtnet_ifp,
- "%s: checksum offload of unsupported protocol "
- "etype=%#x protocol=%d csum_start=%d csum_offset=%d\n",
- __func__, etype, protocol, hdr->csum_start,
- hdr->csum_offset);
-#endif
- break;
+ sc->vtnet_stats.rx_csum_bad_ipproto++;
+ return (1);
}
- return (0);
-}
-
-static int
-vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m,
- struct virtio_net_hdr *hdr)
-{
- const struct ether_header *eh;
- int hoff;
- uint16_t etype;
-
- eh = mtod(m, const struct ether_header *);
- etype = ntohs(eh->ether_type);
- if (etype == ETHERTYPE_VLAN) {
- /* TODO BMV: Handle QinQ. */
- const struct ether_vlan_header *evh =
- mtod(m, const struct ether_vlan_header *);
- etype = ntohs(evh->evl_proto);
- hoff = sizeof(struct ether_vlan_header);
- } else
- hoff = sizeof(struct ether_header);
-
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
- return (vtnet_rxq_csum_needs_csum(rxq, m, etype, hoff, hdr));
+ return (vtnet_rxq_csum_needs_csum(rxq, m, isipv6, protocol,
+ hdr));
else /* VIRTIO_NET_HDR_F_DATA_VALID */
- return (vtnet_rxq_csum_data_valid(rxq, m, etype, hoff, hdr));
+ vtnet_rxq_csum_data_valid(rxq, m, protocol);
+
+ return (0);
}
+#endif
static void
vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs)
@@ -2039,10 +2043,15 @@ vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m,
if (hdr->flags &
(VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) {
+#if defined(INET) || defined(INET6)
if (vtnet_rxq_csum(rxq, m, hdr) == 0)
rxq->vtnrx_stats.vrxs_csum++;
else
rxq->vtnrx_stats.vrxs_csum_failed++;
+#else
+ sc->vtnet_stats.rx_csum_bad_ethtype++;
+ rxq->vtnrx_stats.vrxs_csum_failed++;
+#endif
}
if (hdr->gso_size != 0) {
@@ -2448,7 +2457,7 @@ vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type,
hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
VIRTIO_NET_HDR_GSO_TCPV6;
- if (__predict_false(tcp->th_flags & TH_CWR)) {
+ if (__predict_false(tcp_get_flags(tcp) & TH_CWR)) {
/*
* Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In
* FreeBSD, ECN support is not on a per-interface basis,
@@ -2496,6 +2505,10 @@ vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m,
hdr->csum_start = vtnet_gtoh16(sc, csum_start);
hdr->csum_offset = vtnet_gtoh16(sc, m->m_pkthdr.csum_data);
txq->vtntx_stats.vtxs_csum++;
+ } else if ((flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) &&
+ (proto == IPPROTO_TCP || proto == IPPROTO_UDP) &&
+ (m->m_pkthdr.csum_data == 0xFFFF)) {
+ hdr->flags |= VIRTIO_NET_HDR_F_DATA_VALID;
}
if (flags & (CSUM_IP_TSO | CSUM_IP6_TSO)) {
@@ -2550,8 +2563,10 @@ vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
error = sglist_append_mbuf(sg, m);
if (error) {
m = m_defrag(m, M_NOWAIT);
- if (m == NULL)
+ if (m == NULL) {
+ sc->vtnet_stats.tx_defrag_failed++;
goto fail;
+ }
*m_head = m;
sc->vtnet_stats.tx_defragged++;
@@ -2567,7 +2582,6 @@ vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head,
return (error);
fail:
- sc->vtnet_stats.tx_defrag_failed++;
m_freem(*m_head);
*m_head = NULL;
@@ -2608,7 +2622,8 @@ vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head, int flags)
m->m_flags &= ~M_VLANTAG;
}
- if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) {
+ if (m->m_pkthdr.csum_flags &
+ (VTNET_CSUM_ALL_OFFLOAD | CSUM_DATA_VALID)) {
m = vtnet_txq_offload(txq, m, hdr);
if ((*m_head = m) == NULL) {
error = ENOBUFS;
@@ -2624,7 +2639,6 @@ fail:
return (error);
}
-#ifdef VTNET_LEGACY_TX
static void
vtnet_start_locked(struct vtnet_txq *txq, if_t ifp)
@@ -2690,7 +2704,6 @@ vtnet_start(if_t ifp)
VTNET_TXQ_UNLOCK(txq);
}
-#else /* !VTNET_LEGACY_TX */
static int
vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m)
@@ -2801,7 +2814,6 @@ vtnet_txq_tq_deferred(void *xtxq, int pending __unused)
VTNET_TXQ_UNLOCK(txq);
}
-#endif /* VTNET_LEGACY_TX */
static void
vtnet_txq_start(struct vtnet_txq *txq)
@@ -2812,13 +2824,14 @@ vtnet_txq_start(struct vtnet_txq *txq)
sc = txq->vtntx_sc;
ifp = sc->vtnet_ifp;
-#ifdef VTNET_LEGACY_TX
- if (!if_sendq_empty(ifp))
- vtnet_start_locked(txq, ifp);
-#else
- if (!drbr_empty(ifp, txq->vtntx_br))
- vtnet_txq_mq_start_locked(txq, NULL);
-#endif
+ if (!VTNET_ALTQ_ENABLED) {
+ if (!drbr_empty(ifp, txq->vtntx_br))
+ vtnet_txq_mq_start_locked(txq, NULL);
+ } else {
+ if (!if_sendq_empty(ifp))
+ vtnet_start_locked(txq, ifp);
+
+ }
}
static void
@@ -2933,7 +2946,6 @@ vtnet_tx_start_all(struct vtnet_softc *sc)
}
}
-#ifndef VTNET_LEGACY_TX
static void
vtnet_qflush(if_t ifp)
{
@@ -2955,7 +2967,6 @@ vtnet_qflush(if_t ifp)
if_qflush(ifp);
}
-#endif
static int
vtnet_watchdog(struct vtnet_txq *txq)
@@ -3034,14 +3045,14 @@ vtnet_get_counter(if_t ifp, ift_counter cnt)
return (rxaccum.vrxs_iqdrops);
case IFCOUNTER_IERRORS:
return (rxaccum.vrxs_ierrors);
+ case IFCOUNTER_IBYTES:
+ return (rxaccum.vrxs_ibytes);
case IFCOUNTER_OPACKETS:
return (txaccum.vtxs_opackets);
-#ifndef VTNET_LEGACY_TX
case IFCOUNTER_OBYTES:
return (txaccum.vtxs_obytes);
case IFCOUNTER_OMCASTS:
return (txaccum.vtxs_omcasts);
-#endif
default:
return (if_get_counter_default(ifp, cnt));
}
@@ -3145,9 +3156,8 @@ vtnet_drain_taskqueues(struct vtnet_softc *sc)
txq = &sc->vtnet_txqs[i];
if (txq->vtntx_tq != NULL) {
taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask);
-#ifndef VTNET_LEGACY_TX
- taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
-#endif
+ if (!VTNET_ALTQ_ENABLED)
+ taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask);
}
}
}
@@ -3815,9 +3825,9 @@ vtnet_rx_filter_mac(struct vtnet_softc *sc)
if_printf(ifp, "error setting host MAC filter table\n");
out:
- if (promisc != 0 && vtnet_set_promisc(sc, true) != 0)
+ if (promisc && vtnet_set_promisc(sc, true) != 0)
if_printf(ifp, "cannot enable promiscuous mode\n");
- if (allmulti != 0 && vtnet_set_allmulti(sc, true) != 0)
+ if (allmulti && vtnet_set_allmulti(sc, true) != 0)
if_printf(ifp, "cannot enable all-multicast mode\n");
}
@@ -4102,21 +4112,29 @@ vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx,
stats = &rxq->vtnrx_stats;
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_ipackets, "Receive packets");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_ibytes, "Receive bytes");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_iqdrops, "Receive drops");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_ierrors, "Receive errors");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_csum, "Receive checksum offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_csum_failed, "Receive checksum offload failed");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "host_lro",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_host_lro, "Receive host segmentation offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vrxs_rescheduled,
"Receive interrupt handler rescheduled");
}
@@ -4137,17 +4155,23 @@ vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx,
stats = &txq->vtntx_stats;
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_opackets, "Transmit packets");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_obytes, "Transmit bytes");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_omcasts, "Transmit multicasts");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_csum, "Transmit checksum offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_tso, "Transmit TCP segmentation offloaded");
- SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD,
+ SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled",
+ CTLFLAG_RD | CTLFLAG_STATS,
&stats->vtxs_rescheduled,
"Transmit interrupt handler rescheduled");
}
@@ -4172,6 +4196,102 @@ vtnet_setup_queue_sysctl(struct vtnet_softc *sc)
}
}
+static int
+vtnet_sysctl_rx_csum_failed(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_rxq_stats *rxst;
+ int i;
+
+ stats->rx_csum_failed = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
+ stats->rx_csum_failed += rxst->vrxs_csum_failed;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->rx_csum_failed, req));
+}
+
+static int
+vtnet_sysctl_rx_csum_offloaded(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_rxq_stats *rxst;
+ int i;
+
+ stats->rx_csum_offloaded = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
+ stats->rx_csum_offloaded += rxst->vrxs_csum;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->rx_csum_offloaded, req));
+}
+
+static int
+vtnet_sysctl_rx_task_rescheduled(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_rxq_stats *rxst;
+ int i;
+
+ stats->rx_task_rescheduled = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ rxst = &sc->vtnet_rxqs[i].vtnrx_stats;
+ stats->rx_task_rescheduled += rxst->vrxs_rescheduled;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->rx_task_rescheduled, req));
+}
+
+static int
+vtnet_sysctl_tx_csum_offloaded(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_txq_stats *txst;
+ int i;
+
+ stats->tx_csum_offloaded = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ txst = &sc->vtnet_txqs[i].vtntx_stats;
+ stats->tx_csum_offloaded += txst->vtxs_csum;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->tx_csum_offloaded, req));
+}
+
+static int
+vtnet_sysctl_tx_tso_offloaded(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_txq_stats *txst;
+ int i;
+
+ stats->tx_tso_offloaded = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ txst = &sc->vtnet_txqs[i].vtntx_stats;
+ stats->tx_tso_offloaded += txst->vtxs_tso;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->tx_tso_offloaded, req));
+}
+
+static int
+vtnet_sysctl_tx_task_rescheduled(SYSCTL_HANDLER_ARGS)
+{
+ struct vtnet_softc *sc = (struct vtnet_softc *)arg1;
+ struct vtnet_statistics *stats = &sc->vtnet_stats;
+ struct vtnet_txq_stats *txst;
+ int i;
+
+ stats->tx_task_rescheduled = 0;
+ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) {
+ txst = &sc->vtnet_txqs[i].vtntx_stats;
+ stats->tx_task_rescheduled += txst->vtxs_rescheduled;
+ }
+ return (sysctl_handle_64(oidp, NULL, stats->tx_task_rescheduled, req));
+}
+
static void
vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child, struct vtnet_softc *sc)
@@ -4191,69 +4311,75 @@ vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx,
stats->tx_task_rescheduled = txaccum.vtxs_rescheduled;
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed",
- CTLFLAG_RD, &stats->mbuf_alloc_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->mbuf_alloc_failed,
"Mbuf cluster allocation failures");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large",
- CTLFLAG_RD, &stats->rx_frame_too_large,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_frame_too_large,
"Received frame larger than the mbuf chain");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed",
- CTLFLAG_RD, &stats->rx_enq_replacement_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_enq_replacement_failed,
"Enqueuing the replacement receive mbuf failed");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed",
- CTLFLAG_RD, &stats->rx_mergeable_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_mergeable_failed,
"Mergeable buffers receive failures");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype",
- CTLFLAG_RD, &stats->rx_csum_bad_ethtype,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_ethtype,
"Received checksum offloaded buffer with unsupported "
"Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto",
- CTLFLAG_RD, &stats->rx_csum_bad_ipproto,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_ipproto,
"Received checksum offloaded buffer with incorrect IP protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset",
- CTLFLAG_RD, &stats->rx_csum_bad_offset,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_bad_offset,
"Received checksum offloaded buffer with incorrect offset");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto",
- CTLFLAG_RD, &stats->rx_csum_bad_proto,
- "Received checksum offloaded buffer with incorrect protocol");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed",
- CTLFLAG_RD, &stats->rx_csum_failed,
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_inaccessible_ipproto",
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->rx_csum_inaccessible_ipproto,
+ "Received checksum offloaded buffer with inaccessible IP protocol");
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_csum_failed",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_rx_csum_failed, "QU",
"Received buffer checksum offload failed");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded",
- CTLFLAG_RD, &stats->rx_csum_offloaded,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_csum_offloaded",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_rx_csum_offloaded, "QU",
"Received buffer checksum offload succeeded");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled",
- CTLFLAG_RD, &stats->rx_task_rescheduled,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "rx_task_rescheduled",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_rx_task_rescheduled, "QU",
"Times the receive interrupt task rescheduled itself");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_unknown_ethtype",
- CTLFLAG_RD, &stats->tx_csum_unknown_ethtype,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_csum_unknown_ethtype,
"Aborted transmit of checksum offloaded buffer with unknown "
"Ethernet type");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_proto_mismatch",
- CTLFLAG_RD, &stats->tx_csum_proto_mismatch,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_csum_proto_mismatch,
"Aborted transmit of checksum offloaded buffer because mismatched "
"protocols");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp",
- CTLFLAG_RD, &stats->tx_tso_not_tcp,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_tso_not_tcp,
"Aborted transmit of TSO buffer with non TCP protocol");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_without_csum",
- CTLFLAG_RD, &stats->tx_tso_without_csum,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_tso_without_csum,
"Aborted transmit of TSO buffer without TCP checksum offload");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged",
- CTLFLAG_RD, &stats->tx_defragged,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_defragged,
"Transmit mbufs defragged");
SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed",
- CTLFLAG_RD, &stats->tx_defrag_failed,
+ CTLFLAG_RD | CTLFLAG_STATS, &stats->tx_defrag_failed,
"Aborted transmit of buffer because defrag failed");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded",
- CTLFLAG_RD, &stats->tx_csum_offloaded,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_csum_offloaded",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_tx_csum_offloaded, "QU",
"Offloaded checksum of transmitted buffer");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded",
- CTLFLAG_RD, &stats->tx_tso_offloaded,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_tso_offloaded",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_tx_tso_offloaded, "QU",
"Segmentation offload of transmitted buffer");
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled",
- CTLFLAG_RD, &stats->tx_task_rescheduled,
+ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_task_rescheduled",
+ CTLTYPE_U64 | CTLFLAG_RD | CTLFLAG_STATS,
+ sc, 0, vtnet_sysctl_tx_task_rescheduled, "QU",
"Times the transmit interrupt task rescheduled itself");
}
diff --git a/sys/dev/virtio/network/if_vtnetvar.h b/sys/dev/virtio/network/if_vtnetvar.h
index d690ad3bf63c..cab7ced639a7 100644
--- a/sys/dev/virtio/network/if_vtnetvar.h
+++ b/sys/dev/virtio/network/if_vtnetvar.h
@@ -29,8 +29,10 @@
#ifndef _IF_VTNETVAR_H
#define _IF_VTNETVAR_H
+#define VTNET_ALTQ_CAPABLE (0)
#ifdef ALTQ
-#define VTNET_LEGACY_TX
+#undef VTNET_ALTQ_CAPABLE
+#define VTNET_ALTQ_CAPABLE (1)
#endif
struct vtnet_softc;
@@ -44,7 +46,7 @@ struct vtnet_statistics {
uint64_t rx_csum_bad_ethtype;
uint64_t rx_csum_bad_ipproto;
uint64_t rx_csum_bad_offset;
- uint64_t rx_csum_bad_proto;
+ uint64_t rx_csum_inaccessible_ipproto;
uint64_t tx_csum_unknown_ethtype;
uint64_t tx_csum_proto_mismatch;
uint64_t tx_tso_not_tcp;
@@ -112,18 +114,14 @@ struct vtnet_txq {
struct vtnet_softc *vtntx_sc;
struct virtqueue *vtntx_vq;
struct sglist *vtntx_sg;
-#ifndef VTNET_LEGACY_TX
struct buf_ring *vtntx_br;
-#endif
int vtntx_id;
int vtntx_watchdog;
int vtntx_intr_threshold;
struct vtnet_txq_stats vtntx_stats;
struct taskqueue *vtntx_tq;
struct task vtntx_intrtask;
-#ifndef VTNET_LEGACY_TX
struct task vtntx_defrtask;
-#endif
#ifdef DEV_NETMAP
struct virtio_net_hdr_mrg_rxbuf vtntx_shrhdr;
#endif /* DEV_NETMAP */
@@ -374,7 +372,7 @@ CTASSERT(((VTNET_TX_SEGS_MAX - 1) * MCLBYTES) >= VTNET_MAX_MTU);
*/
#define VTNET_DEFAULT_BUFRING_SIZE 4096
-#define VTNET_CORE_MTX(_sc) &(_sc)->vtnet_mtx
+#define VTNET_CORE_MTX(_sc) (&(_sc)->vtnet_mtx)
#define VTNET_CORE_LOCK(_sc) mtx_lock(VTNET_CORE_MTX((_sc)))
#define VTNET_CORE_UNLOCK(_sc) mtx_unlock(VTNET_CORE_MTX((_sc)))
#define VTNET_CORE_LOCK_DESTROY(_sc) mtx_destroy(VTNET_CORE_MTX((_sc)))
diff --git a/sys/dev/virtio/network/virtio_net.h b/sys/dev/virtio/network/virtio_net.h
index 4b728f7af21a..9ea53cbe2376 100644
--- a/sys/dev/virtio/network/virtio_net.h
+++ b/sys/dev/virtio/network/virtio_net.h
@@ -481,7 +481,7 @@ virtio_net_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type,
hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
VIRTIO_NET_HDR_GSO_TCPV6;
- if (tcp->th_flags & TH_CWR) {
+ if (tcp_get_flags(tcp) & TH_CWR) {
/*
* Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
* ECN support is not on a per-interface basis, but globally via
diff --git a/sys/dev/virtio/p9fs/virtio_p9fs.c b/sys/dev/virtio/p9fs/virtio_p9fs.c
new file mode 100644
index 000000000000..aa84d3970698
--- /dev/null
+++ b/sys/dev/virtio/p9fs/virtio_p9fs.c
@@ -0,0 +1,494 @@
+/*-
+ * Copyright (c) 2017 Juniper Networks, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+/*
+ * The Virtio 9P transport driver. This file contains all functions related to
+ * the virtqueue infrastructure which include creating the virtqueue, host
+ * interactions, interrupts etc.
+ */
+
+#include <sys/param.h>
+#include <sys/errno.h>
+#include <sys/module.h>
+#include <sys/sglist.h>
+#include <sys/queue.h>
+#include <sys/bus.h>
+#include <sys/kthread.h>
+#include <sys/condvar.h>
+#include <sys/sysctl.h>
+
+#include <machine/bus.h>
+
+#include <fs/p9fs/p9_client.h>
+#include <fs/p9fs/p9_debug.h>
+#include <fs/p9fs/p9_protocol.h>
+#include <fs/p9fs/p9_transport.h>
+
+#include <dev/virtio/virtio.h>
+#include <dev/virtio/virtqueue.h>
+#include <dev/virtio/virtio_ring.h>
+#include <dev/virtio/p9fs/virtio_p9fs.h>
+
+#define VT9P_MTX(_sc) (&(_sc)->vt9p_mtx)
+#define VT9P_LOCK(_sc) mtx_lock(VT9P_MTX(_sc))
+#define VT9P_UNLOCK(_sc) mtx_unlock(VT9P_MTX(_sc))
+#define VT9P_LOCK_INIT(_sc) mtx_init(VT9P_MTX(_sc), \
+ "VIRTIO 9P CHAN lock", NULL, MTX_DEF)
+#define VT9P_LOCK_DESTROY(_sc) mtx_destroy(VT9P_MTX(_sc))
+#define MAX_SUPPORTED_SGS 20
+static MALLOC_DEFINE(M_P9FS_MNTTAG, "p9fs_mount_tag", "P9fs Mounttag");
+
+struct vt9p_softc {
+ device_t vt9p_dev;
+ struct mtx vt9p_mtx;
+ struct sglist *vt9p_sglist;
+ struct cv submit_cv;
+ bool busy;
+ struct virtqueue *vt9p_vq;
+ int max_nsegs;
+ uint16_t mount_tag_len;
+ char *mount_tag;
+ STAILQ_ENTRY(vt9p_softc) chan_next;
+};
+
+/* Global channel list, Each channel will correspond to a mount point */
+static STAILQ_HEAD( ,vt9p_softc) global_chan_list =
+ STAILQ_HEAD_INITIALIZER(global_chan_list);
+struct mtx global_chan_list_mtx;
+MTX_SYSINIT(global_chan_list_mtx, &global_chan_list_mtx, "9pglobal", MTX_DEF);
+
+static struct virtio_feature_desc virtio_9p_feature_desc[] = {
+ { VIRTIO_9PNET_F_MOUNT_TAG, "9PMountTag" },
+ { 0, NULL }
+};
+
+/* We don't currently allow canceling of virtio requests */
+static int
+vt9p_cancel(void *handle, struct p9_req_t *req)
+{
+ return (1);
+}
+
+SYSCTL_NODE(_vfs, OID_AUTO, 9p, CTLFLAG_RW, 0, "9P File System Protocol");
+
+/*
+ * Maximum number of seconds vt9p_request thread sleep waiting for an
+ * ack from the host, before exiting
+ */
+static unsigned int vt9p_ackmaxidle = 120;
+SYSCTL_UINT(_vfs_9p, OID_AUTO, ackmaxidle, CTLFLAG_RW, &vt9p_ackmaxidle, 0,
+ "Maximum time request thread waits for ack from host");
+
+/*
+ * Wait for completion of a p9 request.
+ *
+ * This routine will sleep and release the chan mtx during the period.
+ * chan mtx will be acquired again upon return.
+ */
+static int
+vt9p_req_wait(struct vt9p_softc *chan, struct p9_req_t *req)
+{
+ KASSERT(req->tc->tag != req->rc->tag,
+ ("%s: request %p already completed", __func__, req));
+
+ if (msleep(req, VT9P_MTX(chan), 0, "chan lock", vt9p_ackmaxidle * hz)) {
+ /*
+ * Waited for 120s. No response from host.
+ * Can't wait for ever..
+ */
+ P9_DEBUG(ERROR, "Timeout after waiting %u seconds"
+ "for an ack from host\n", vt9p_ackmaxidle);
+ return (EIO);
+ }
+ KASSERT(req->tc->tag == req->rc->tag,
+ ("%s spurious event on request %p", __func__, req));
+ return (0);
+}
+
+/*
+ * Request handler. This is called for every request submitted to the host
+ * It basically maps the tc/rc buffers to sg lists and submits the requests
+ * into the virtqueue. Since we have implemented a synchronous version, the
+ * submission thread sleeps until the ack in the interrupt wakes it up. Once
+ * it wakes up, it returns back to the P9fs layer. The rc buffer is then
+ * processed and completed to its upper layers.
+ */
+static int
+vt9p_request(void *handle, struct p9_req_t *req)
+{
+ int error;
+ struct vt9p_softc *chan;
+ int readable, writable;
+ struct sglist *sg;
+ struct virtqueue *vq;
+
+ chan = handle;
+ sg = chan->vt9p_sglist;
+ vq = chan->vt9p_vq;
+
+ P9_DEBUG(TRANS, "%s: req=%p\n", __func__, req);
+
+ /* Grab the channel lock*/
+ VT9P_LOCK(chan);
+req_retry:
+ sglist_reset(sg);
+ /* Handle out VirtIO ring buffers */
+ error = sglist_append(sg, req->tc->sdata, req->tc->size);
+ if (error != 0) {
+ P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__);
+ VT9P_UNLOCK(chan);
+ return (error);
+ }
+ readable = sg->sg_nseg;
+
+ error = sglist_append(sg, req->rc->sdata, req->rc->capacity);
+ if (error != 0) {
+ P9_DEBUG(ERROR, "%s: sglist append failed\n", __func__);
+ VT9P_UNLOCK(chan);
+ return (error);
+ }
+ writable = sg->sg_nseg - readable;
+
+ error = virtqueue_enqueue(vq, req, sg, readable, writable);
+ if (error != 0) {
+ if (error == ENOSPC) {
+ /*
+ * Condvar for the submit queue. Unlock the chan
+ * since wakeup needs one.
+ */
+ cv_wait(&chan->submit_cv, VT9P_MTX(chan));
+ P9_DEBUG(TRANS, "%s: retry virtio request\n", __func__);
+ goto req_retry;
+ } else {
+ P9_DEBUG(ERROR, "%s: virtio enuqueue failed \n", __func__);
+ VT9P_UNLOCK(chan);
+ return (EIO);
+ }
+ }
+
+ /* We have to notify */
+ virtqueue_notify(vq);
+
+ error = vt9p_req_wait(chan, req);
+ if (error != 0) {
+ VT9P_UNLOCK(chan);
+ return (error);
+ }
+
+ VT9P_UNLOCK(chan);
+
+ P9_DEBUG(TRANS, "%s: virtio request kicked\n", __func__);
+
+ return (0);
+}
+
+/*
+ * Completion of the request from the virtqueue. This interrupt handler is
+ * setup at initialization and is called for every completing request. It
+ * just wakes up the sleeping submission requests.
+ */
+static void
+vt9p_intr_complete(void *xsc)
+{
+ struct vt9p_softc *chan;
+ struct virtqueue *vq;
+ struct p9_req_t *curreq;
+
+ chan = (struct vt9p_softc *)xsc;
+ vq = chan->vt9p_vq;
+
+ P9_DEBUG(TRANS, "%s: completing\n", __func__);
+
+ VT9P_LOCK(chan);
+again:
+ while ((curreq = virtqueue_dequeue(vq, NULL)) != NULL) {
+ curreq->rc->tag = curreq->tc->tag;
+ wakeup_one(curreq);
+ }
+ if (virtqueue_enable_intr(vq) != 0) {
+ virtqueue_disable_intr(vq);
+ goto again;
+ }
+ cv_signal(&chan->submit_cv);
+ VT9P_UNLOCK(chan);
+}
+
+/*
+ * Allocation of the virtqueue with interrupt complete routines.
+ */
+static int
+vt9p_alloc_virtqueue(struct vt9p_softc *sc)
+{
+ struct vq_alloc_info vq_info;
+ device_t dev;
+
+ dev = sc->vt9p_dev;
+
+ VQ_ALLOC_INFO_INIT(&vq_info, sc->max_nsegs,
+ vt9p_intr_complete, sc, &sc->vt9p_vq,
+ "%s request", device_get_nameunit(dev));
+
+ return (virtio_alloc_virtqueues(dev, 1, &vq_info));
+}
+
+/* Probe for existence of 9P virtio channels */
+static int
+vt9p_probe(device_t dev)
+{
+
+ /* If the virtio device type is a 9P device, then we claim and attach it */
+ if (virtio_get_device_type(dev) != VIRTIO_ID_9P)
+ return (ENXIO);
+ device_set_desc(dev, "VirtIO 9P Transport");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static void
+vt9p_stop(struct vt9p_softc *sc)
+{
+
+ /* Device specific stops .*/
+ virtqueue_disable_intr(sc->vt9p_vq);
+ virtio_stop(sc->vt9p_dev);
+}
+
+/* Detach the 9P virtio PCI device */
+static int
+vt9p_detach(device_t dev)
+{
+ struct vt9p_softc *sc;
+
+ sc = device_get_softc(dev);
+ VT9P_LOCK(sc);
+ vt9p_stop(sc);
+ VT9P_UNLOCK(sc);
+
+ if (sc->vt9p_sglist) {
+ sglist_free(sc->vt9p_sglist);
+ sc->vt9p_sglist = NULL;
+ }
+ if (sc->mount_tag) {
+ free(sc->mount_tag, M_P9FS_MNTTAG);
+ sc->mount_tag = NULL;
+ }
+ mtx_lock(&global_chan_list_mtx);
+ STAILQ_REMOVE(&global_chan_list, sc, vt9p_softc, chan_next);
+ mtx_unlock(&global_chan_list_mtx);
+
+ VT9P_LOCK_DESTROY(sc);
+ cv_destroy(&sc->submit_cv);
+
+ return (0);
+}
+
+/* Attach the 9P virtio PCI device */
+static int
+vt9p_attach(device_t dev)
+{
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *tree;
+ struct vt9p_softc *chan;
+ char *mount_tag;
+ int error;
+ uint16_t mount_tag_len;
+
+ chan = device_get_softc(dev);
+ chan->vt9p_dev = dev;
+
+ /* Init the channel lock. */
+ VT9P_LOCK_INIT(chan);
+ /* Initialize the condition variable */
+ cv_init(&chan->submit_cv, "Conditional variable for submit queue" );
+ chan->max_nsegs = MAX_SUPPORTED_SGS;
+ chan->vt9p_sglist = sglist_alloc(chan->max_nsegs, M_WAITOK);
+
+ /* Negotiate the features from the host */
+ virtio_set_feature_desc(dev, virtio_9p_feature_desc);
+ virtio_negotiate_features(dev, VIRTIO_9PNET_F_MOUNT_TAG);
+
+ /*
+ * If mount tag feature is supported read the mount tag
+ * from device config
+ */
+ if (virtio_with_feature(dev, VIRTIO_9PNET_F_MOUNT_TAG))
+ mount_tag_len = virtio_read_dev_config_2(dev,
+ offsetof(struct virtio_9pnet_config, mount_tag_len));
+ else {
+ error = EINVAL;
+ P9_DEBUG(ERROR, "%s: Mount tag feature not supported by host\n", __func__);
+ goto out;
+ }
+ mount_tag = malloc(mount_tag_len + 1, M_P9FS_MNTTAG,
+ M_WAITOK | M_ZERO);
+
+ virtio_read_device_config_array(dev,
+ offsetof(struct virtio_9pnet_config, mount_tag),
+ mount_tag, 1, mount_tag_len);
+
+ device_printf(dev, "Mount tag: %s\n", mount_tag);
+
+ mount_tag_len++;
+ chan->mount_tag_len = mount_tag_len;
+ chan->mount_tag = mount_tag;
+
+ ctx = device_get_sysctl_ctx(dev);
+ tree = device_get_sysctl_tree(dev);
+ SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "p9fs_mount_tag",
+ CTLFLAG_RD, chan->mount_tag, 0, "Mount tag");
+
+ /* We expect one virtqueue, for requests. */
+ error = vt9p_alloc_virtqueue(chan);
+ if (error != 0) {
+ P9_DEBUG(ERROR, "%s: Allocating the virtqueue failed \n", __func__);
+ goto out;
+ }
+ error = virtio_setup_intr(dev, INTR_TYPE_MISC|INTR_MPSAFE);
+ if (error != 0) {
+ P9_DEBUG(ERROR, "%s: Cannot setup virtqueue interrupt\n", __func__);
+ goto out;
+ }
+ error = virtqueue_enable_intr(chan->vt9p_vq);
+ if (error != 0) {
+ P9_DEBUG(ERROR, "%s: Cannot enable virtqueue interrupt\n", __func__);
+ goto out;
+ }
+
+ mtx_lock(&global_chan_list_mtx);
+ /* Insert the channel in global channel list */
+ STAILQ_INSERT_HEAD(&global_chan_list, chan, chan_next);
+ mtx_unlock(&global_chan_list_mtx);
+
+ return (0);
+out:
+ /* Something went wrong, detach the device */
+ vt9p_detach(dev);
+ return (error);
+}
+
+/*
+ * Allocate a new virtio channel. This sets up a transport channel
+ * for 9P communication
+ */
+static int
+vt9p_create(const char *mount_tag, void **handlep)
+{
+ struct vt9p_softc *sc, *chan;
+
+ chan = NULL;
+
+ /*
+ * Find out the corresponding channel for a client from global list
+ * of channels based on mount tag and attach it to client
+ */
+ mtx_lock(&global_chan_list_mtx);
+ STAILQ_FOREACH(sc, &global_chan_list, chan_next) {
+ if (!strcmp(sc->mount_tag, mount_tag)) {
+ chan = sc;
+ break;
+ }
+ }
+ mtx_unlock(&global_chan_list_mtx);
+
+ /*
+ * If chan is already attached to a client then it cannot be used for
+ * another client.
+ */
+ if (chan && chan->busy) {
+ //p9_debug(TRANS, "Channel busy: used by clnt=%p\n", chan->client);
+ return (EBUSY);
+ }
+
+ /* If we dont have one, for now bail out.*/
+ if (chan) {
+ *handlep = (void *)chan;
+ chan->busy = true;
+ } else {
+ P9_DEBUG(TRANS, "%s: No Global channel with mount_tag=%s\n",
+ __func__, mount_tag);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
+static void
+vt9p_close(void *handle)
+{
+ struct vt9p_softc *chan = handle;
+
+ chan->busy = false;
+}
+
+static struct p9_trans_module vt9p_trans = {
+ .name = "virtio",
+ .create = vt9p_create,
+ .close = vt9p_close,
+ .request = vt9p_request,
+ .cancel = vt9p_cancel,
+};
+
+static device_method_t vt9p_mthds[] = {
+ /* Device methods. */
+ DEVMETHOD(device_probe, vt9p_probe),
+ DEVMETHOD(device_attach, vt9p_attach),
+ DEVMETHOD(device_detach, vt9p_detach),
+ DEVMETHOD_END
+};
+
+static driver_t vt9p_drv = {
+ "virtio_p9fs",
+ vt9p_mthds,
+ sizeof(struct vt9p_softc)
+};
+
+static int
+vt9p_modevent(module_t mod, int type, void *unused)
+{
+ int error;
+
+ error = 0;
+
+ switch (type) {
+ case MOD_LOAD:
+ p9_init_zones();
+ p9_register_trans(&vt9p_trans);
+ break;
+ case MOD_UNLOAD:
+ p9_destroy_zones();
+ break;
+ case MOD_SHUTDOWN:
+ break;
+ default:
+ error = EOPNOTSUPP;
+ break;
+ }
+ return (error);
+}
+
+VIRTIO_DRIVER_MODULE(virtio_p9fs, vt9p_drv, vt9p_modevent, NULL);
+MODULE_VERSION(virtio_p9fs, 1);
+MODULE_DEPEND(virtio_p9fs, virtio, 1, 1, 1);
+MODULE_DEPEND(virtio_p9fs, p9fs, 1, 1, 1);
diff --git a/sys/dev/virtio/p9fs/virtio_p9fs.h b/sys/dev/virtio/p9fs/virtio_p9fs.h
new file mode 100644
index 000000000000..924b413d29a5
--- /dev/null
+++ b/sys/dev/virtio/p9fs/virtio_p9fs.h
@@ -0,0 +1,39 @@
+/*-
+ * Copyright (c) 2017 Juniper Networks, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __VIRTIO_9P_CONFIG__
+#define __VIRTIO_9P_CONFIG__
+
+/* Mount point feature specified in config variable */
+#define VIRTIO_9PNET_F_MOUNT_TAG 1
+
+struct virtio_9pnet_config {
+ /* Mount tag length */
+ uint16_t mount_tag_len;
+ /* non NULL terminated tag name */
+ uint8_t mount_tag[0];
+};
+#endif /* __VIRTIO_9P_CONFIG__ */
diff --git a/sys/dev/virtio/pci/virtio_pci.c b/sys/dev/virtio/pci/virtio_pci.c
index 4d93e94e59f2..b7b34b448f6e 100644
--- a/sys/dev/virtio/pci/virtio_pci.c
+++ b/sys/dev/virtio/pci/virtio_pci.c
@@ -168,7 +168,7 @@ vtpci_add_child(struct vtpci_common *cn)
dev = cn->vtpci_dev;
- child = device_add_child(dev, NULL, -1);
+ child = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
device_printf(dev, "cannot create child device\n");
return (ENOMEM);
@@ -182,18 +182,14 @@ vtpci_add_child(struct vtpci_common *cn)
int
vtpci_delete_child(struct vtpci_common *cn)
{
- device_t dev, child;
+ device_t dev;
int error;
dev = cn->vtpci_dev;
- child = cn->vtpci_child_dev;
- if (child != NULL) {
- error = device_delete_child(dev, child);
- if (error)
- return (error);
- cn->vtpci_child_dev = NULL;
- }
+ error = bus_generic_detach(dev);
+ if (error)
+ return (error);
return (0);
}
diff --git a/sys/dev/virtio/pci/virtio_pci_legacy.c b/sys/dev/virtio/pci/virtio_pci_legacy.c
index 238434c350fb..18dbb00e0d8c 100644
--- a/sys/dev/virtio/pci/virtio_pci_legacy.c
+++ b/sys/dev/virtio/pci/virtio_pci_legacy.c
@@ -190,7 +190,6 @@ DRIVER_MODULE(virtio_pci_legacy, pci, vtpci_legacy_driver, 0, 0);
static int
vtpci_legacy_probe(device_t dev)
{
- char desc[64];
const char *name;
if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID)
@@ -207,8 +206,7 @@ vtpci_legacy_probe(device_t dev)
if (name == NULL)
name = "Unknown";
- snprintf(desc, sizeof(desc), "VirtIO PCI (legacy) %s adapter", name);
- device_set_desc_copy(dev, desc);
+ device_set_descf(dev, "VirtIO PCI (legacy) %s adapter", name);
/* Prefer transitional modern VirtIO PCI. */
return (BUS_PROBE_LOW_PRIORITY);
diff --git a/sys/dev/virtio/pci/virtio_pci_modern.c b/sys/dev/virtio/pci/virtio_pci_modern.c
index 8f9b1f21aeab..eb1d5a1e6989 100644
--- a/sys/dev/virtio/pci/virtio_pci_modern.c
+++ b/sys/dev/virtio/pci/virtio_pci_modern.c
@@ -244,7 +244,6 @@ DRIVER_MODULE(virtio_pci_modern, pci, vtpci_modern_driver, 0, 0);
static int
vtpci_modern_probe(device_t dev)
{
- char desc[64];
const char *name;
uint16_t devid;
@@ -269,8 +268,7 @@ vtpci_modern_probe(device_t dev)
if (name == NULL)
name = "Unknown";
- snprintf(desc, sizeof(desc), "VirtIO PCI (modern) %s adapter", name);
- device_set_desc_copy(dev, desc);
+ device_set_descf(dev, "VirtIO PCI (modern) %s adapter", name);
return (BUS_PROBE_DEFAULT);
}
diff --git a/sys/dev/virtio/random/virtio_random.c b/sys/dev/virtio/random/virtio_random.c
index f938ba99ae53..3f30c8b68f4c 100644
--- a/sys/dev/virtio/random/virtio_random.c
+++ b/sys/dev/virtio/random/virtio_random.c
@@ -77,7 +77,7 @@ static struct virtio_feature_desc vtrnd_feature_desc[] = {
{ 0, NULL }
};
-static struct random_source random_vtrnd = {
+static const struct random_source random_vtrnd = {
.rs_ident = "VirtIO Entropy Adapter",
.rs_source = RANDOM_PURE_VIRTIO,
.rs_read = vtrnd_read,
diff --git a/sys/dev/virtio/scsi/virtio_scsi.c b/sys/dev/virtio/scsi/virtio_scsi.c
index 68da81a97855..857da56ba426 100644
--- a/sys/dev/virtio/scsi/virtio_scsi.c
+++ b/sys/dev/virtio/scsi/virtio_scsi.c
@@ -41,8 +41,7 @@
#include <sys/callout.h>
#include <sys/queue.h>
#include <sys/sbuf.h>
-
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include <machine/bus.h>
#include <machine/resource.h>
diff --git a/sys/dev/virtio/virtio_bus_if.m b/sys/dev/virtio/virtio_bus_if.m
index 57ae90bdc917..4181b641faad 100644
--- a/sys/dev/virtio/virtio_bus_if.m
+++ b/sys/dev/virtio/virtio_bus_if.m
@@ -109,7 +109,3 @@ METHOD void write_device_config {
int len;
};
-METHOD void poll {
- device_t dev;
-};
-
diff --git a/sys/dev/virtio/virtqueue.c b/sys/dev/virtio/virtqueue.c
index c92f635832f2..cc7a233d60ee 100644
--- a/sys/dev/virtio/virtqueue.c
+++ b/sys/dev/virtio/virtqueue.c
@@ -360,7 +360,7 @@ virtqueue_free(struct virtqueue *vq)
virtqueue_free_indirect(vq);
if (vq->vq_ring_mem != NULL) {
- contigfree(vq->vq_ring_mem, vq->vq_ring_size, M_DEVBUF);
+ free(vq->vq_ring_mem, M_DEVBUF);
vq->vq_ring_size = 0;
vq->vq_ring_mem = NULL;
}
@@ -605,10 +605,8 @@ virtqueue_poll(struct virtqueue *vq, uint32_t *len)
{
void *cookie;
- VIRTIO_BUS_POLL(vq->vq_dev);
while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
cpu_spinwait();
- VIRTIO_BUS_POLL(vq->vq_dev);
}
return (cookie);
diff --git a/sys/dev/vkbd/vkbd.c b/sys/dev/vkbd/vkbd.c
index 3dbf64063e33..27a7e626fa64 100644
--- a/sys/dev/vkbd/vkbd.c
+++ b/sys/dev/vkbd/vkbd.c
@@ -82,7 +82,7 @@ MALLOC_DEFINE(M_VKBD, KEYBOARD_NAME, "Virtual AT keyboard");
#define VKBD_UNLOCK(s) mtx_unlock(&(s)->ks_lock)
#define VKBD_LOCK_ASSERT(s, w) mtx_assert(&(s)->ks_lock, w)
#define VKBD_SLEEP(s, f, d, t) \
- msleep(&(s)->f, &(s)->ks_lock, PCATCH | (PZERO + 1), d, t)
+ msleep(&(s)->f, &(s)->ks_lock, PCATCH | PZERO, d, t)
#else
#define VKBD_LOCK_DECL
#define VKBD_LOCK_INIT(s)
@@ -90,7 +90,7 @@ MALLOC_DEFINE(M_VKBD, KEYBOARD_NAME, "Virtual AT keyboard");
#define VKBD_LOCK(s)
#define VKBD_UNLOCK(s)
#define VKBD_LOCK_ASSERT(s, w)
-#define VKBD_SLEEP(s, f, d, t) tsleep(&(s)->f, PCATCH | (PZERO + 1), d, t)
+#define VKBD_SLEEP(s, f, d, t) tsleep(&(s)->f, PCATCH | PZERO, d, t)
#endif
#define VKBD_KEYBOARD(d) \
@@ -268,8 +268,8 @@ vkbd_dev_close(struct cdev *dev, int foo, int bar, struct thread *td)
VKBD_SLEEP(state, ks_task, "vkbdc", 0);
/* wakeup poll()ers */
- selwakeuppri(&state->ks_rsel, PZERO + 1);
- selwakeuppri(&state->ks_wsel, PZERO + 1);
+ selwakeuppri(&state->ks_rsel, PZERO);
+ selwakeuppri(&state->ks_wsel, PZERO);
state->ks_flags &= ~OPEN;
state->ks_dev = NULL;
@@ -498,7 +498,7 @@ vkbd_status_changed(vkbd_state_t *state)
if (!(state->ks_flags & STATUS)) {
state->ks_flags |= STATUS;
- selwakeuppri(&state->ks_rsel, PZERO + 1);
+ selwakeuppri(&state->ks_rsel, PZERO);
wakeup(&state->ks_flags);
}
}
@@ -531,7 +531,7 @@ vkbd_data_read(vkbd_state_t *state, int wait)
q->head = 0;
/* wakeup ks_inq writers/poll()ers */
- selwakeuppri(&state->ks_wsel, PZERO + 1);
+ selwakeuppri(&state->ks_wsel, PZERO);
wakeup(q);
return (c);
@@ -1246,7 +1246,7 @@ vkbd_clear_state_locked(vkbd_state_t *state)
/* flush ks_inq and wakeup writers/poll()ers */
state->ks_inq.head = state->ks_inq.tail = state->ks_inq.cc = 0;
- selwakeuppri(&state->ks_wsel, PZERO + 1);
+ selwakeuppri(&state->ks_wsel, PZERO);
wakeup(&state->ks_inq);
}
diff --git a/sys/dev/vmd/vmd.c b/sys/dev/vmd/vmd.c
index b52787fc45d3..0595a6c5be16 100644
--- a/sys/dev/vmd/vmd.c
+++ b/sys/dev/vmd/vmd.c
@@ -383,9 +383,9 @@ vmd_attach(device_t dev)
}
sc->vmd_dma_tag = bus_get_dma_tag(dev);
-
- sc->psc.child = device_add_child(dev, "pci", -1);
- return (bus_generic_attach(dev));
+ sc->psc.child = device_add_child(dev, "pci", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
+ return (0);
fail:
vmd_free(sc);
@@ -401,9 +401,6 @@ vmd_detach(device_t dev)
error = bus_generic_detach(dev);
if (error)
return (error);
- error = device_delete_children(dev);
- if (error)
- return (error);
if (sc->vmd_msix_count == 0)
vmd_set_msi_bypass(dev, false);
vmd_free(sc);
@@ -543,7 +540,7 @@ vmd_map_resource(device_t dev, device_t child, struct resource *r,
args.offset = start - rman_get_start(pres);
args.length = length;
- return (bus_generic_map_resource(dev, child, pres, &args, map));
+ return (bus_map_resource(dev, pres, &args, map));
}
static int
@@ -551,11 +548,12 @@ vmd_unmap_resource(device_t dev, device_t child, struct resource *r,
struct resource_map *map)
{
struct vmd_softc *sc = device_get_softc(dev);
+ struct resource *pres;
- r = vmd_find_parent_resource(sc, r);
- if (r == NULL)
+ pres = vmd_find_parent_resource(sc, r);
+ if (pres == NULL)
return (ENOENT);
- return (bus_generic_unmap_resource(dev, child, r, map));
+ return (bus_unmap_resource(dev, pres, map));
}
static int
diff --git a/sys/dev/vmgenc/vmgenc_acpi.c b/sys/dev/vmgenc/vmgenc_acpi.c
index 2ad8929dfd34..18519a8e4f22 100644
--- a/sys/dev/vmgenc/vmgenc_acpi.c
+++ b/sys/dev/vmgenc/vmgenc_acpi.c
@@ -56,6 +56,7 @@
#include <contrib/dev/acpica/include/acpi.h>
#include <dev/acpica/acpivar.h>
+#include <dev/random/randomdev.h>
#include <dev/random/random_harvestq.h>
#include <dev/vmgenc/vmgenc_acpi.h>
@@ -210,6 +211,11 @@ acpi_GetPackedUINT64(device_t dev, ACPI_HANDLE handle, char *path,
}
+static const struct random_source random_vmgenid = {
+ .rs_ident = "VM Generation ID",
+ .rs_source = RANDOM_PURE_VMGENID,
+};
+
static int
vmgenc_attach(device_t dev)
{
@@ -234,7 +240,7 @@ vmgenc_attach(device_t dev)
memcpy(sc->vmg_cache_guid, __DEVOLATILE(void *, sc->vmg_pguid),
sizeof(sc->vmg_cache_guid));
- random_harvest_register_source(RANDOM_PURE_VMGENID);
+ random_source_register(&random_vmgenid);
vmgenc_harvest_all(sc->vmg_cache_guid, sizeof(sc->vmg_cache_guid));
AcpiInstallNotifyHandler(h, ACPI_DEVICE_NOTIFY, vmgenc_notify, dev);
diff --git a/sys/dev/vmm/vmm_dev.c b/sys/dev/vmm/vmm_dev.c
new file mode 100644
index 000000000000..460a508a60dc
--- /dev/null
+++ b/sys/dev/vmm/vmm_dev.c
@@ -0,0 +1,1209 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2011 NetApp, Inc.
+ * Copyright (C) 2015 Mihai Carabas <mihai.carabas@gmail.com>
+ * All rights reserved.
+ */
+
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+#include <sys/ioccom.h>
+#include <sys/jail.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/proc.h>
+#include <sys/queue.h>
+#include <sys/sx.h>
+#include <sys/sysctl.h>
+#include <sys/ucred.h>
+#include <sys/uio.h>
+
+#include <machine/vmm.h>
+
+#include <vm/vm.h>
+#include <vm/vm_object.h>
+
+#include <dev/vmm/vmm_dev.h>
+#include <dev/vmm/vmm_mem.h>
+#include <dev/vmm/vmm_stat.h>
+
+#ifdef __amd64__
+#ifdef COMPAT_FREEBSD12
+struct vm_memseg_12 {
+ int segid;
+ size_t len;
+ char name[64];
+};
+_Static_assert(sizeof(struct vm_memseg_12) == 80, "COMPAT_FREEBSD12 ABI");
+
+#define VM_ALLOC_MEMSEG_12 \
+ _IOW('v', IOCNUM_ALLOC_MEMSEG, struct vm_memseg_12)
+#define VM_GET_MEMSEG_12 \
+ _IOWR('v', IOCNUM_GET_MEMSEG, struct vm_memseg_12)
+#endif /* COMPAT_FREEBSD12 */
+#ifdef COMPAT_FREEBSD14
+struct vm_memseg_14 {
+ int segid;
+ size_t len;
+ char name[VM_MAX_SUFFIXLEN + 1];
+};
+_Static_assert(sizeof(struct vm_memseg_14) == (VM_MAX_SUFFIXLEN + 1 + 16),
+ "COMPAT_FREEBSD14 ABI");
+
+#define VM_ALLOC_MEMSEG_14 \
+ _IOW('v', IOCNUM_ALLOC_MEMSEG, struct vm_memseg_14)
+#define VM_GET_MEMSEG_14 \
+ _IOWR('v', IOCNUM_GET_MEMSEG, struct vm_memseg_14)
+#endif /* COMPAT_FREEBSD14 */
+#endif /* __amd64__ */
+
+struct devmem_softc {
+ int segid;
+ char *name;
+ struct cdev *cdev;
+ struct vmmdev_softc *sc;
+ SLIST_ENTRY(devmem_softc) link;
+};
+
+struct vmmdev_softc {
+ struct vm *vm; /* vm instance cookie */
+ struct cdev *cdev;
+ struct ucred *ucred;
+ SLIST_ENTRY(vmmdev_softc) link;
+ SLIST_HEAD(, devmem_softc) devmem;
+ int flags;
+};
+
+static SLIST_HEAD(, vmmdev_softc) head;
+
+static unsigned pr_allow_flag;
+static struct sx vmmdev_mtx;
+SX_SYSINIT(vmmdev_mtx, &vmmdev_mtx, "vmm device mutex");
+
+static MALLOC_DEFINE(M_VMMDEV, "vmmdev", "vmmdev");
+
+SYSCTL_DECL(_hw_vmm);
+
+static void devmem_destroy(void *arg);
+static int devmem_create_cdev(struct vmmdev_softc *sc, int id, char *devmem);
+
+static int
+vmm_priv_check(struct ucred *ucred)
+{
+ if (jailed(ucred) &&
+ !(ucred->cr_prison->pr_allow & pr_allow_flag))
+ return (EPERM);
+
+ return (0);
+}
+
+static int
+vcpu_lock_one(struct vcpu *vcpu)
+{
+ return (vcpu_set_state(vcpu, VCPU_FROZEN, true));
+}
+
+static void
+vcpu_unlock_one(struct vcpu *vcpu)
+{
+ enum vcpu_state state;
+
+ state = vcpu_get_state(vcpu, NULL);
+ if (state != VCPU_FROZEN) {
+ panic("vcpu %s(%d) has invalid state %d",
+ vm_name(vcpu_vm(vcpu)), vcpu_vcpuid(vcpu), state);
+ }
+
+ vcpu_set_state(vcpu, VCPU_IDLE, false);
+}
+
+static int
+vcpu_lock_all(struct vmmdev_softc *sc)
+{
+ struct vcpu *vcpu;
+ int error;
+ uint16_t i, j, maxcpus;
+
+ error = 0;
+ vm_slock_vcpus(sc->vm);
+ maxcpus = vm_get_maxcpus(sc->vm);
+ for (i = 0; i < maxcpus; i++) {
+ vcpu = vm_vcpu(sc->vm, i);
+ if (vcpu == NULL)
+ continue;
+ error = vcpu_lock_one(vcpu);
+ if (error)
+ break;
+ }
+
+ if (error) {
+ for (j = 0; j < i; j++) {
+ vcpu = vm_vcpu(sc->vm, j);
+ if (vcpu == NULL)
+ continue;
+ vcpu_unlock_one(vcpu);
+ }
+ vm_unlock_vcpus(sc->vm);
+ }
+
+ return (error);
+}
+
+static void
+vcpu_unlock_all(struct vmmdev_softc *sc)
+{
+ struct vcpu *vcpu;
+ uint16_t i, maxcpus;
+
+ maxcpus = vm_get_maxcpus(sc->vm);
+ for (i = 0; i < maxcpus; i++) {
+ vcpu = vm_vcpu(sc->vm, i);
+ if (vcpu == NULL)
+ continue;
+ vcpu_unlock_one(vcpu);
+ }
+ vm_unlock_vcpus(sc->vm);
+}
+
+static struct vmmdev_softc *
+vmmdev_lookup(const char *name, struct ucred *cred)
+{
+ struct vmmdev_softc *sc;
+
+ sx_assert(&vmmdev_mtx, SA_XLOCKED);
+
+ SLIST_FOREACH(sc, &head, link) {
+ if (strcmp(name, vm_name(sc->vm)) == 0)
+ break;
+ }
+
+ if (sc == NULL)
+ return (NULL);
+
+ if (cr_cansee(cred, sc->ucred))
+ return (NULL);
+
+ return (sc);
+}
+
+static struct vmmdev_softc *
+vmmdev_lookup2(struct cdev *cdev)
+{
+ return (cdev->si_drv1);
+}
+
+static int
+vmmdev_rw(struct cdev *cdev, struct uio *uio, int flags)
+{
+ int error, off, c, prot;
+ vm_paddr_t gpa, maxaddr;
+ void *hpa, *cookie;
+ struct vmmdev_softc *sc;
+
+ sc = vmmdev_lookup2(cdev);
+ if (sc == NULL)
+ return (ENXIO);
+
+ /*
+ * Get a read lock on the guest memory map.
+ */
+ vm_slock_memsegs(sc->vm);
+
+ error = 0;
+ prot = (uio->uio_rw == UIO_WRITE ? VM_PROT_WRITE : VM_PROT_READ);
+ maxaddr = vmm_sysmem_maxaddr(sc->vm);
+ while (uio->uio_resid > 0 && error == 0) {
+ gpa = uio->uio_offset;
+ off = gpa & PAGE_MASK;
+ c = min(uio->uio_resid, PAGE_SIZE - off);
+
+ /*
+ * The VM has a hole in its physical memory map. If we want to
+ * use 'dd' to inspect memory beyond the hole we need to
+ * provide bogus data for memory that lies in the hole.
+ *
+ * Since this device does not support lseek(2), dd(1) will
+ * read(2) blocks of data to simulate the lseek(2).
+ */
+ hpa = vm_gpa_hold_global(sc->vm, gpa, c, prot, &cookie);
+ if (hpa == NULL) {
+ if (uio->uio_rw == UIO_READ && gpa < maxaddr)
+ error = uiomove(__DECONST(void *, zero_region),
+ c, uio);
+ else
+ error = EFAULT;
+ } else {
+ error = uiomove(hpa, c, uio);
+ vm_gpa_release(cookie);
+ }
+ }
+ vm_unlock_memsegs(sc->vm);
+ return (error);
+}
+
+CTASSERT(sizeof(((struct vm_memseg *)0)->name) >= VM_MAX_SUFFIXLEN + 1);
+
+static int
+get_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len)
+{
+ struct devmem_softc *dsc;
+ int error;
+ bool sysmem;
+
+ error = vm_get_memseg(sc->vm, mseg->segid, &mseg->len, &sysmem, NULL);
+ if (error || mseg->len == 0)
+ return (error);
+
+ if (!sysmem) {
+ SLIST_FOREACH(dsc, &sc->devmem, link) {
+ if (dsc->segid == mseg->segid)
+ break;
+ }
+ KASSERT(dsc != NULL, ("%s: devmem segment %d not found",
+ __func__, mseg->segid));
+ error = copystr(dsc->name, mseg->name, len, NULL);
+ } else {
+ bzero(mseg->name, len);
+ }
+
+ return (error);
+}
+
+static int
+alloc_memseg(struct vmmdev_softc *sc, struct vm_memseg *mseg, size_t len,
+ struct domainset *domainset)
+{
+ char *name;
+ int error;
+ bool sysmem;
+
+ error = 0;
+ name = NULL;
+ sysmem = true;
+
+ /*
+ * The allocation is lengthened by 1 to hold a terminating NUL. It'll
+ * by stripped off when devfs processes the full string.
+ */
+ if (VM_MEMSEG_NAME(mseg)) {
+ sysmem = false;
+ name = malloc(len, M_VMMDEV, M_WAITOK);
+ error = copystr(mseg->name, name, len, NULL);
+ if (error)
+ goto done;
+ }
+ error = vm_alloc_memseg(sc->vm, mseg->segid, mseg->len, sysmem, domainset);
+ if (error)
+ goto done;
+
+ if (VM_MEMSEG_NAME(mseg)) {
+ error = devmem_create_cdev(sc, mseg->segid, name);
+ if (error)
+ vm_free_memseg(sc->vm, mseg->segid);
+ else
+ name = NULL; /* freed when 'cdev' is destroyed */
+ }
+done:
+ free(name, M_VMMDEV);
+ return (error);
+}
+
+#if defined(__amd64__) && \
+ (defined(COMPAT_FREEBSD14) || defined(COMPAT_FREEBSD12))
+/*
+ * Translate pre-15.0 memory segment identifiers into their 15.0 counterparts.
+ */
+static void
+adjust_segid(struct vm_memseg *mseg)
+{
+ if (mseg->segid != VM_SYSMEM) {
+ mseg->segid += (VM_BOOTROM - 1);
+ }
+}
+#endif
+
+static int
+vm_get_register_set(struct vcpu *vcpu, unsigned int count, int *regnum,
+ uint64_t *regval)
+{
+ int error, i;
+
+ error = 0;
+ for (i = 0; i < count; i++) {
+ error = vm_get_register(vcpu, regnum[i], &regval[i]);
+ if (error)
+ break;
+ }
+ return (error);
+}
+
+static int
+vm_set_register_set(struct vcpu *vcpu, unsigned int count, int *regnum,
+ uint64_t *regval)
+{
+ int error, i;
+
+ error = 0;
+ for (i = 0; i < count; i++) {
+ error = vm_set_register(vcpu, regnum[i], regval[i]);
+ if (error)
+ break;
+ }
+ return (error);
+}
+
+static int
+vmmdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
+{
+ int error;
+
+ /*
+ * A jail without vmm access shouldn't be able to access vmm device
+ * files at all, but check here just to be thorough.
+ */
+ error = vmm_priv_check(td->td_ucred);
+ if (error != 0)
+ return (error);
+
+ return (0);
+}
+
+static const struct vmmdev_ioctl vmmdev_ioctls[] = {
+ VMMDEV_IOCTL(VM_GET_REGISTER, VMMDEV_IOCTL_LOCK_ONE_VCPU),
+ VMMDEV_IOCTL(VM_SET_REGISTER, VMMDEV_IOCTL_LOCK_ONE_VCPU),
+ VMMDEV_IOCTL(VM_GET_REGISTER_SET, VMMDEV_IOCTL_LOCK_ONE_VCPU),
+ VMMDEV_IOCTL(VM_SET_REGISTER_SET, VMMDEV_IOCTL_LOCK_ONE_VCPU),
+ VMMDEV_IOCTL(VM_GET_CAPABILITY, VMMDEV_IOCTL_LOCK_ONE_VCPU),
+ VMMDEV_IOCTL(VM_SET_CAPABILITY, VMMDEV_IOCTL_LOCK_ONE_VCPU),
+ VMMDEV_IOCTL(VM_ACTIVATE_CPU, VMMDEV_IOCTL_LOCK_ONE_VCPU),
+ VMMDEV_IOCTL(VM_INJECT_EXCEPTION, VMMDEV_IOCTL_LOCK_ONE_VCPU),
+ VMMDEV_IOCTL(VM_STATS, VMMDEV_IOCTL_LOCK_ONE_VCPU),
+ VMMDEV_IOCTL(VM_STAT_DESC, 0),
+
+#ifdef __amd64__
+#ifdef COMPAT_FREEBSD12
+ VMMDEV_IOCTL(VM_ALLOC_MEMSEG_12,
+ VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS),
+#endif
+#ifdef COMPAT_FREEBSD14
+ VMMDEV_IOCTL(VM_ALLOC_MEMSEG_14,
+ VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS),
+#endif
+#endif /* __amd64__ */
+ VMMDEV_IOCTL(VM_ALLOC_MEMSEG,
+ VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS),
+ VMMDEV_IOCTL(VM_MMAP_MEMSEG,
+ VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS),
+ VMMDEV_IOCTL(VM_MUNMAP_MEMSEG,
+ VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS),
+ VMMDEV_IOCTL(VM_REINIT,
+ VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_LOCK_ALL_VCPUS),
+
+#ifdef __amd64__
+#if defined(COMPAT_FREEBSD12)
+ VMMDEV_IOCTL(VM_GET_MEMSEG_12, VMMDEV_IOCTL_SLOCK_MEMSEGS),
+#endif
+#ifdef COMPAT_FREEBSD14
+ VMMDEV_IOCTL(VM_GET_MEMSEG_14, VMMDEV_IOCTL_SLOCK_MEMSEGS),
+#endif
+#endif /* __amd64__ */
+ VMMDEV_IOCTL(VM_GET_MEMSEG, VMMDEV_IOCTL_SLOCK_MEMSEGS),
+ VMMDEV_IOCTL(VM_MMAP_GETNEXT, VMMDEV_IOCTL_SLOCK_MEMSEGS),
+
+ VMMDEV_IOCTL(VM_SUSPEND_CPU, VMMDEV_IOCTL_MAYBE_ALLOC_VCPU),
+ VMMDEV_IOCTL(VM_RESUME_CPU, VMMDEV_IOCTL_MAYBE_ALLOC_VCPU),
+
+ VMMDEV_IOCTL(VM_SUSPEND, 0),
+ VMMDEV_IOCTL(VM_GET_CPUS, 0),
+ VMMDEV_IOCTL(VM_GET_TOPOLOGY, 0),
+ VMMDEV_IOCTL(VM_SET_TOPOLOGY, 0),
+};
+
+static int
+vmmdev_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
+ struct thread *td)
+{
+ struct vmmdev_softc *sc;
+ struct vcpu *vcpu;
+ const struct vmmdev_ioctl *ioctl;
+ struct vm_memseg *mseg;
+ int error, vcpuid;
+
+ sc = vmmdev_lookup2(cdev);
+ if (sc == NULL)
+ return (ENXIO);
+
+ ioctl = NULL;
+ for (size_t i = 0; i < nitems(vmmdev_ioctls); i++) {
+ if (vmmdev_ioctls[i].cmd == cmd) {
+ ioctl = &vmmdev_ioctls[i];
+ break;
+ }
+ }
+ if (ioctl == NULL) {
+ for (size_t i = 0; i < vmmdev_machdep_ioctl_count; i++) {
+ if (vmmdev_machdep_ioctls[i].cmd == cmd) {
+ ioctl = &vmmdev_machdep_ioctls[i];
+ break;
+ }
+ }
+ }
+ if (ioctl == NULL)
+ return (ENOTTY);
+
+ if ((ioctl->flags & VMMDEV_IOCTL_XLOCK_MEMSEGS) != 0)
+ vm_xlock_memsegs(sc->vm);
+ else if ((ioctl->flags & VMMDEV_IOCTL_SLOCK_MEMSEGS) != 0)
+ vm_slock_memsegs(sc->vm);
+
+ vcpu = NULL;
+ vcpuid = -1;
+ if ((ioctl->flags & (VMMDEV_IOCTL_LOCK_ONE_VCPU |
+ VMMDEV_IOCTL_ALLOC_VCPU | VMMDEV_IOCTL_MAYBE_ALLOC_VCPU)) != 0) {
+ vcpuid = *(int *)data;
+ if (vcpuid == -1) {
+ if ((ioctl->flags &
+ VMMDEV_IOCTL_MAYBE_ALLOC_VCPU) == 0) {
+ error = EINVAL;
+ goto lockfail;
+ }
+ } else {
+ vcpu = vm_alloc_vcpu(sc->vm, vcpuid);
+ if (vcpu == NULL) {
+ error = EINVAL;
+ goto lockfail;
+ }
+ if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ONE_VCPU) != 0) {
+ error = vcpu_lock_one(vcpu);
+ if (error)
+ goto lockfail;
+ }
+ }
+ }
+ if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ALL_VCPUS) != 0) {
+ error = vcpu_lock_all(sc);
+ if (error)
+ goto lockfail;
+ }
+
+ switch (cmd) {
+ case VM_SUSPEND: {
+ struct vm_suspend *vmsuspend;
+
+ vmsuspend = (struct vm_suspend *)data;
+ error = vm_suspend(sc->vm, vmsuspend->how);
+ break;
+ }
+ case VM_REINIT:
+ error = vm_reinit(sc->vm);
+ break;
+ case VM_STAT_DESC: {
+ struct vm_stat_desc *statdesc;
+
+ statdesc = (struct vm_stat_desc *)data;
+ error = vmm_stat_desc_copy(statdesc->index, statdesc->desc,
+ sizeof(statdesc->desc));
+ break;
+ }
+ case VM_STATS: {
+ struct vm_stats *vmstats;
+
+ vmstats = (struct vm_stats *)data;
+ getmicrotime(&vmstats->tv);
+ error = vmm_stat_copy(vcpu, vmstats->index,
+ nitems(vmstats->statbuf), &vmstats->num_entries,
+ vmstats->statbuf);
+ break;
+ }
+ case VM_MMAP_GETNEXT: {
+ struct vm_memmap *mm;
+
+ mm = (struct vm_memmap *)data;
+ error = vm_mmap_getnext(sc->vm, &mm->gpa, &mm->segid,
+ &mm->segoff, &mm->len, &mm->prot, &mm->flags);
+ break;
+ }
+ case VM_MMAP_MEMSEG: {
+ struct vm_memmap *mm;
+
+ mm = (struct vm_memmap *)data;
+ error = vm_mmap_memseg(sc->vm, mm->gpa, mm->segid, mm->segoff,
+ mm->len, mm->prot, mm->flags);
+ break;
+ }
+ case VM_MUNMAP_MEMSEG: {
+ struct vm_munmap *mu;
+
+ mu = (struct vm_munmap *)data;
+ error = vm_munmap_memseg(sc->vm, mu->gpa, mu->len);
+ break;
+ }
+#ifdef __amd64__
+#ifdef COMPAT_FREEBSD12
+ case VM_ALLOC_MEMSEG_12:
+ mseg = (struct vm_memseg *)data;
+
+ adjust_segid(mseg);
+ error = alloc_memseg(sc, mseg,
+ sizeof(((struct vm_memseg_12 *)0)->name), NULL);
+ break;
+ case VM_GET_MEMSEG_12:
+ mseg = (struct vm_memseg *)data;
+
+ adjust_segid(mseg);
+ error = get_memseg(sc, mseg,
+ sizeof(((struct vm_memseg_12 *)0)->name));
+ break;
+#endif /* COMPAT_FREEBSD12 */
+#ifdef COMPAT_FREEBSD14
+ case VM_ALLOC_MEMSEG_14:
+ mseg = (struct vm_memseg *)data;
+
+ adjust_segid(mseg);
+ error = alloc_memseg(sc, mseg,
+ sizeof(((struct vm_memseg_14 *)0)->name), NULL);
+ break;
+ case VM_GET_MEMSEG_14:
+ mseg = (struct vm_memseg *)data;
+
+ adjust_segid(mseg);
+ error = get_memseg(sc, mseg,
+ sizeof(((struct vm_memseg_14 *)0)->name));
+ break;
+#endif /* COMPAT_FREEBSD14 */
+#endif /* __amd64__ */
+ case VM_ALLOC_MEMSEG: {
+ domainset_t *mask;
+ struct domainset *domainset, domain;
+
+ domainset = NULL;
+ mseg = (struct vm_memseg *)data;
+ if (mseg->ds_policy != DOMAINSET_POLICY_INVALID && mseg->ds_mask != NULL) {
+ if (mseg->ds_mask_size < sizeof(domainset_t) ||
+ mseg->ds_mask_size > DOMAINSET_MAXSIZE / NBBY) {
+ error = ERANGE;
+ break;
+ }
+ memset(&domain, 0, sizeof(domain));
+ mask = malloc(mseg->ds_mask_size, M_VMMDEV, M_WAITOK);
+ error = copyin(mseg->ds_mask, mask, mseg->ds_mask_size);
+ if (error) {
+ free(mask, M_VMMDEV);
+ break;
+ }
+ error = domainset_populate(&domain, mask, mseg->ds_policy,
+ mseg->ds_mask_size);
+ if (error) {
+ free(mask, M_VMMDEV);
+ break;
+ }
+ domainset = domainset_create(&domain);
+ if (domainset == NULL) {
+ error = EINVAL;
+ free(mask, M_VMMDEV);
+ break;
+ }
+ free(mask, M_VMMDEV);
+ }
+ error = alloc_memseg(sc, mseg, sizeof(mseg->name), domainset);
+
+ break;
+ }
+ case VM_GET_MEMSEG:
+ error = get_memseg(sc, (struct vm_memseg *)data,
+ sizeof(((struct vm_memseg *)0)->name));
+ break;
+ case VM_GET_REGISTER: {
+ struct vm_register *vmreg;
+
+ vmreg = (struct vm_register *)data;
+ error = vm_get_register(vcpu, vmreg->regnum, &vmreg->regval);
+ break;
+ }
+ case VM_SET_REGISTER: {
+ struct vm_register *vmreg;
+
+ vmreg = (struct vm_register *)data;
+ error = vm_set_register(vcpu, vmreg->regnum, vmreg->regval);
+ break;
+ }
+ case VM_GET_REGISTER_SET: {
+ struct vm_register_set *vmregset;
+ uint64_t *regvals;
+ int *regnums;
+
+ vmregset = (struct vm_register_set *)data;
+ if (vmregset->count > VM_REG_LAST) {
+ error = EINVAL;
+ break;
+ }
+ regvals = malloc(sizeof(regvals[0]) * vmregset->count, M_VMMDEV,
+ M_WAITOK);
+ regnums = malloc(sizeof(regnums[0]) * vmregset->count, M_VMMDEV,
+ M_WAITOK);
+ error = copyin(vmregset->regnums, regnums, sizeof(regnums[0]) *
+ vmregset->count);
+ if (error == 0)
+ error = vm_get_register_set(vcpu,
+ vmregset->count, regnums, regvals);
+ if (error == 0)
+ error = copyout(regvals, vmregset->regvals,
+ sizeof(regvals[0]) * vmregset->count);
+ free(regvals, M_VMMDEV);
+ free(regnums, M_VMMDEV);
+ break;
+ }
+ case VM_SET_REGISTER_SET: {
+ struct vm_register_set *vmregset;
+ uint64_t *regvals;
+ int *regnums;
+
+ vmregset = (struct vm_register_set *)data;
+ if (vmregset->count > VM_REG_LAST) {
+ error = EINVAL;
+ break;
+ }
+ regvals = malloc(sizeof(regvals[0]) * vmregset->count, M_VMMDEV,
+ M_WAITOK);
+ regnums = malloc(sizeof(regnums[0]) * vmregset->count, M_VMMDEV,
+ M_WAITOK);
+ error = copyin(vmregset->regnums, regnums, sizeof(regnums[0]) *
+ vmregset->count);
+ if (error == 0)
+ error = copyin(vmregset->regvals, regvals,
+ sizeof(regvals[0]) * vmregset->count);
+ if (error == 0)
+ error = vm_set_register_set(vcpu,
+ vmregset->count, regnums, regvals);
+ free(regvals, M_VMMDEV);
+ free(regnums, M_VMMDEV);
+ break;
+ }
+ case VM_GET_CAPABILITY: {
+ struct vm_capability *vmcap;
+
+ vmcap = (struct vm_capability *)data;
+ error = vm_get_capability(vcpu, vmcap->captype, &vmcap->capval);
+ break;
+ }
+ case VM_SET_CAPABILITY: {
+ struct vm_capability *vmcap;
+
+ vmcap = (struct vm_capability *)data;
+ error = vm_set_capability(vcpu, vmcap->captype, vmcap->capval);
+ break;
+ }
+ case VM_ACTIVATE_CPU:
+ error = vm_activate_cpu(vcpu);
+ break;
+ case VM_GET_CPUS: {
+ struct vm_cpuset *vm_cpuset;
+ cpuset_t *cpuset;
+ int size;
+
+ error = 0;
+ vm_cpuset = (struct vm_cpuset *)data;
+ size = vm_cpuset->cpusetsize;
+ if (size < 1 || size > CPU_MAXSIZE / NBBY) {
+ error = ERANGE;
+ break;
+ }
+ cpuset = malloc(max(size, sizeof(cpuset_t)), M_TEMP,
+ M_WAITOK | M_ZERO);
+ if (vm_cpuset->which == VM_ACTIVE_CPUS)
+ *cpuset = vm_active_cpus(sc->vm);
+ else if (vm_cpuset->which == VM_SUSPENDED_CPUS)
+ *cpuset = vm_suspended_cpus(sc->vm);
+ else if (vm_cpuset->which == VM_DEBUG_CPUS)
+ *cpuset = vm_debug_cpus(sc->vm);
+ else
+ error = EINVAL;
+ if (error == 0 && size < howmany(CPU_FLS(cpuset), NBBY))
+ error = ERANGE;
+ if (error == 0)
+ error = copyout(cpuset, vm_cpuset->cpus, size);
+ free(cpuset, M_TEMP);
+ break;
+ }
+ case VM_SUSPEND_CPU:
+ error = vm_suspend_cpu(sc->vm, vcpu);
+ break;
+ case VM_RESUME_CPU:
+ error = vm_resume_cpu(sc->vm, vcpu);
+ break;
+ case VM_SET_TOPOLOGY: {
+ struct vm_cpu_topology *topology;
+
+ topology = (struct vm_cpu_topology *)data;
+ error = vm_set_topology(sc->vm, topology->sockets,
+ topology->cores, topology->threads, topology->maxcpus);
+ break;
+ }
+ case VM_GET_TOPOLOGY: {
+ struct vm_cpu_topology *topology;
+
+ topology = (struct vm_cpu_topology *)data;
+ vm_get_topology(sc->vm, &topology->sockets, &topology->cores,
+ &topology->threads, &topology->maxcpus);
+ error = 0;
+ break;
+ }
+ default:
+ error = vmmdev_machdep_ioctl(sc->vm, vcpu, cmd, data, fflag,
+ td);
+ break;
+ }
+
+ if ((ioctl->flags &
+ (VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_SLOCK_MEMSEGS)) != 0)
+ vm_unlock_memsegs(sc->vm);
+ if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ALL_VCPUS) != 0)
+ vcpu_unlock_all(sc);
+ else if ((ioctl->flags & VMMDEV_IOCTL_LOCK_ONE_VCPU) != 0)
+ vcpu_unlock_one(vcpu);
+
+ /*
+ * Make sure that no handler returns a kernel-internal
+ * error value to userspace.
+ */
+ KASSERT(error == ERESTART || error >= 0,
+ ("vmmdev_ioctl: invalid error return %d", error));
+ return (error);
+
+lockfail:
+ if ((ioctl->flags &
+ (VMMDEV_IOCTL_XLOCK_MEMSEGS | VMMDEV_IOCTL_SLOCK_MEMSEGS)) != 0)
+ vm_unlock_memsegs(sc->vm);
+ return (error);
+}
+
+static int
+vmmdev_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t mapsize,
+ struct vm_object **objp, int nprot)
+{
+ struct vmmdev_softc *sc;
+ vm_paddr_t gpa;
+ size_t len;
+ vm_ooffset_t segoff, first, last;
+ int error, found, segid;
+ bool sysmem;
+
+ first = *offset;
+ last = first + mapsize;
+ if ((nprot & PROT_EXEC) || first < 0 || first >= last)
+ return (EINVAL);
+
+ sc = vmmdev_lookup2(cdev);
+ if (sc == NULL) {
+ /* virtual machine is in the process of being created */
+ return (EINVAL);
+ }
+
+ /*
+ * Get a read lock on the guest memory map.
+ */
+ vm_slock_memsegs(sc->vm);
+
+ gpa = 0;
+ found = 0;
+ while (!found) {
+ error = vm_mmap_getnext(sc->vm, &gpa, &segid, &segoff, &len,
+ NULL, NULL);
+ if (error)
+ break;
+
+ if (first >= gpa && last <= gpa + len)
+ found = 1;
+ else
+ gpa += len;
+ }
+
+ if (found) {
+ error = vm_get_memseg(sc->vm, segid, &len, &sysmem, objp);
+ KASSERT(error == 0 && *objp != NULL,
+ ("%s: invalid memory segment %d", __func__, segid));
+ if (sysmem) {
+ vm_object_reference(*objp);
+ *offset = segoff + (first - gpa);
+ } else {
+ error = EINVAL;
+ }
+ }
+ vm_unlock_memsegs(sc->vm);
+ return (error);
+}
+
+static void
+vmmdev_destroy(struct vmmdev_softc *sc)
+{
+ struct devmem_softc *dsc;
+ int error __diagused;
+
+ KASSERT(sc->cdev == NULL, ("%s: cdev not free", __func__));
+
+ /*
+ * Destroy all cdevs:
+ *
+ * - any new operations on the 'cdev' will return an error (ENXIO).
+ *
+ * - the 'devmem' cdevs are destroyed before the virtual machine 'cdev'
+ */
+ SLIST_FOREACH(dsc, &sc->devmem, link) {
+ KASSERT(dsc->cdev != NULL, ("devmem cdev already destroyed"));
+ devmem_destroy(dsc);
+ }
+
+ vm_disable_vcpu_creation(sc->vm);
+ error = vcpu_lock_all(sc);
+ KASSERT(error == 0, ("%s: error %d freezing vcpus", __func__, error));
+ vm_unlock_vcpus(sc->vm);
+
+ while ((dsc = SLIST_FIRST(&sc->devmem)) != NULL) {
+ KASSERT(dsc->cdev == NULL, ("%s: devmem not free", __func__));
+ SLIST_REMOVE_HEAD(&sc->devmem, link);
+ free(dsc->name, M_VMMDEV);
+ free(dsc, M_VMMDEV);
+ }
+
+ if (sc->vm != NULL)
+ vm_destroy(sc->vm);
+
+ if (sc->ucred != NULL)
+ crfree(sc->ucred);
+
+ sx_xlock(&vmmdev_mtx);
+ SLIST_REMOVE(&head, sc, vmmdev_softc, link);
+ sx_xunlock(&vmmdev_mtx);
+ free(sc, M_VMMDEV);
+}
+
+static int
+vmmdev_lookup_and_destroy(const char *name, struct ucred *cred)
+{
+ struct cdev *cdev;
+ struct vmmdev_softc *sc;
+
+ sx_xlock(&vmmdev_mtx);
+ sc = vmmdev_lookup(name, cred);
+ if (sc == NULL || sc->cdev == NULL) {
+ sx_xunlock(&vmmdev_mtx);
+ return (EINVAL);
+ }
+
+ /*
+ * Setting 'sc->cdev' to NULL is used to indicate that the VM
+ * is scheduled for destruction.
+ */
+ cdev = sc->cdev;
+ sc->cdev = NULL;
+ sx_xunlock(&vmmdev_mtx);
+
+ vm_suspend(sc->vm, VM_SUSPEND_DESTROY);
+ destroy_dev(cdev);
+ vmmdev_destroy(sc);
+
+ return (0);
+}
+
+static int
+sysctl_vmm_destroy(SYSCTL_HANDLER_ARGS)
+{
+ char *buf;
+ int error, buflen;
+
+ error = vmm_priv_check(req->td->td_ucred);
+ if (error)
+ return (error);
+
+ buflen = VM_MAX_NAMELEN + 1;
+ buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO);
+ error = sysctl_handle_string(oidp, buf, buflen, req);
+ if (error == 0 && req->newptr != NULL)
+ error = vmmdev_lookup_and_destroy(buf, req->td->td_ucred);
+ free(buf, M_VMMDEV);
+ return (error);
+}
+SYSCTL_PROC(_hw_vmm, OID_AUTO, destroy,
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE,
+ NULL, 0, sysctl_vmm_destroy, "A",
+ "Destroy a vmm(4) instance (legacy interface)");
+
+static struct cdevsw vmmdevsw = {
+ .d_name = "vmmdev",
+ .d_version = D_VERSION,
+ .d_open = vmmdev_open,
+ .d_ioctl = vmmdev_ioctl,
+ .d_mmap_single = vmmdev_mmap_single,
+ .d_read = vmmdev_rw,
+ .d_write = vmmdev_rw,
+};
+
+static struct vmmdev_softc *
+vmmdev_alloc(struct vm *vm, struct ucred *cred)
+{
+ struct vmmdev_softc *sc;
+
+ sc = malloc(sizeof(*sc), M_VMMDEV, M_WAITOK | M_ZERO);
+ SLIST_INIT(&sc->devmem);
+ sc->vm = vm;
+ sc->ucred = crhold(cred);
+ return (sc);
+}
+
+static int
+vmmdev_create(const char *name, struct ucred *cred)
+{
+ struct make_dev_args mda;
+ struct cdev *cdev;
+ struct vmmdev_softc *sc;
+ struct vm *vm;
+ int error;
+
+ sx_xlock(&vmmdev_mtx);
+ sc = vmmdev_lookup(name, cred);
+ if (sc != NULL) {
+ sx_xunlock(&vmmdev_mtx);
+ return (EEXIST);
+ }
+
+ error = vm_create(name, &vm);
+ if (error != 0) {
+ sx_xunlock(&vmmdev_mtx);
+ return (error);
+ }
+ sc = vmmdev_alloc(vm, cred);
+ SLIST_INSERT_HEAD(&head, sc, link);
+
+ make_dev_args_init(&mda);
+ mda.mda_devsw = &vmmdevsw;
+ mda.mda_cr = sc->ucred;
+ mda.mda_uid = UID_ROOT;
+ mda.mda_gid = GID_WHEEL;
+ mda.mda_mode = 0600;
+ mda.mda_si_drv1 = sc;
+ mda.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
+ error = make_dev_s(&mda, &cdev, "vmm/%s", name);
+ if (error != 0) {
+ sx_xunlock(&vmmdev_mtx);
+ vmmdev_destroy(sc);
+ return (error);
+ }
+ sc->cdev = cdev;
+ sx_xunlock(&vmmdev_mtx);
+ return (0);
+}
+
+static int
+sysctl_vmm_create(SYSCTL_HANDLER_ARGS)
+{
+ char *buf;
+ int error, buflen;
+
+ error = vmm_priv_check(req->td->td_ucred);
+ if (error != 0)
+ return (error);
+
+ buflen = VM_MAX_NAMELEN + 1;
+ buf = malloc(buflen, M_VMMDEV, M_WAITOK | M_ZERO);
+ error = sysctl_handle_string(oidp, buf, buflen, req);
+ if (error == 0 && req->newptr != NULL)
+ error = vmmdev_create(buf, req->td->td_ucred);
+ free(buf, M_VMMDEV);
+ return (error);
+}
+SYSCTL_PROC(_hw_vmm, OID_AUTO, create,
+ CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_PRISON | CTLFLAG_MPSAFE,
+ NULL, 0, sysctl_vmm_create, "A",
+ "Create a vmm(4) instance (legacy interface)");
+
+static int
+vmmctl_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
+{
+ int error;
+
+ error = vmm_priv_check(td->td_ucred);
+ if (error != 0)
+ return (error);
+
+ if ((flags & FWRITE) == 0)
+ return (EPERM);
+
+ return (0);
+}
+
+static int
+vmmctl_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
+ struct thread *td)
+{
+ int error;
+
+ switch (cmd) {
+ case VMMCTL_VM_CREATE: {
+ struct vmmctl_vm_create *vmc;
+
+ vmc = (struct vmmctl_vm_create *)data;
+ vmc->name[VM_MAX_NAMELEN] = '\0';
+ for (size_t i = 0; i < nitems(vmc->reserved); i++) {
+ if (vmc->reserved[i] != 0) {
+ error = EINVAL;
+ return (error);
+ }
+ }
+
+ error = vmmdev_create(vmc->name, td->td_ucred);
+ break;
+ }
+ case VMMCTL_VM_DESTROY: {
+ struct vmmctl_vm_destroy *vmd;
+
+ vmd = (struct vmmctl_vm_destroy *)data;
+ vmd->name[VM_MAX_NAMELEN] = '\0';
+ for (size_t i = 0; i < nitems(vmd->reserved); i++) {
+ if (vmd->reserved[i] != 0) {
+ error = EINVAL;
+ return (error);
+ }
+ }
+
+ error = vmmdev_lookup_and_destroy(vmd->name, td->td_ucred);
+ break;
+ }
+ default:
+ error = ENOTTY;
+ break;
+ }
+
+ return (error);
+}
+
+static struct cdev *vmmctl_cdev;
+static struct cdevsw vmmctlsw = {
+ .d_name = "vmmctl",
+ .d_version = D_VERSION,
+ .d_open = vmmctl_open,
+ .d_ioctl = vmmctl_ioctl,
+};
+
+int
+vmmdev_init(void)
+{
+ int error;
+
+ sx_xlock(&vmmdev_mtx);
+ error = make_dev_p(MAKEDEV_CHECKNAME, &vmmctl_cdev, &vmmctlsw, NULL,
+ UID_ROOT, GID_WHEEL, 0600, "vmmctl");
+ if (error == 0)
+ pr_allow_flag = prison_add_allow(NULL, "vmm", NULL,
+ "Allow use of vmm in a jail.");
+ sx_xunlock(&vmmdev_mtx);
+
+ return (error);
+}
+
+int
+vmmdev_cleanup(void)
+{
+ sx_xlock(&vmmdev_mtx);
+ if (!SLIST_EMPTY(&head)) {
+ sx_xunlock(&vmmdev_mtx);
+ return (EBUSY);
+ }
+ if (vmmctl_cdev != NULL) {
+ destroy_dev(vmmctl_cdev);
+ vmmctl_cdev = NULL;
+ }
+ sx_xunlock(&vmmdev_mtx);
+
+ return (0);
+}
+
+static int
+devmem_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t len,
+ struct vm_object **objp, int nprot)
+{
+ struct devmem_softc *dsc;
+ vm_ooffset_t first, last;
+ size_t seglen;
+ int error;
+ bool sysmem;
+
+ dsc = cdev->si_drv1;
+ if (dsc == NULL) {
+ /* 'cdev' has been created but is not ready for use */
+ return (ENXIO);
+ }
+
+ first = *offset;
+ last = *offset + len;
+ if ((nprot & PROT_EXEC) || first < 0 || first >= last)
+ return (EINVAL);
+
+ vm_slock_memsegs(dsc->sc->vm);
+
+ error = vm_get_memseg(dsc->sc->vm, dsc->segid, &seglen, &sysmem, objp);
+ KASSERT(error == 0 && !sysmem && *objp != NULL,
+ ("%s: invalid devmem segment %d", __func__, dsc->segid));
+
+ if (seglen >= last)
+ vm_object_reference(*objp);
+ else
+ error = EINVAL;
+
+ vm_unlock_memsegs(dsc->sc->vm);
+ return (error);
+}
+
+static struct cdevsw devmemsw = {
+ .d_name = "devmem",
+ .d_version = D_VERSION,
+ .d_mmap_single = devmem_mmap_single,
+};
+
+static int
+devmem_create_cdev(struct vmmdev_softc *sc, int segid, char *devname)
+{
+ struct make_dev_args mda;
+ struct devmem_softc *dsc;
+ int error;
+
+ sx_xlock(&vmmdev_mtx);
+
+ dsc = malloc(sizeof(struct devmem_softc), M_VMMDEV, M_WAITOK | M_ZERO);
+ dsc->segid = segid;
+ dsc->name = devname;
+ dsc->sc = sc;
+ SLIST_INSERT_HEAD(&sc->devmem, dsc, link);
+
+ make_dev_args_init(&mda);
+ mda.mda_devsw = &devmemsw;
+ mda.mda_cr = sc->ucred;
+ mda.mda_uid = UID_ROOT;
+ mda.mda_gid = GID_WHEEL;
+ mda.mda_mode = 0600;
+ mda.mda_si_drv1 = dsc;
+ mda.mda_flags = MAKEDEV_CHECKNAME | MAKEDEV_WAITOK;
+ error = make_dev_s(&mda, &dsc->cdev, "vmm.io/%s.%s", vm_name(sc->vm),
+ devname);
+ if (error != 0) {
+ SLIST_REMOVE(&sc->devmem, dsc, devmem_softc, link);
+ free(dsc->name, M_VMMDEV);
+ free(dsc, M_VMMDEV);
+ }
+
+ sx_xunlock(&vmmdev_mtx);
+
+ return (error);
+}
+
+static void
+devmem_destroy(void *arg)
+{
+ struct devmem_softc *dsc = arg;
+
+ destroy_dev(dsc->cdev);
+ dsc->cdev = NULL;
+ dsc->sc = NULL;
+}
diff --git a/sys/dev/vmm/vmm_dev.h b/sys/dev/vmm/vmm_dev.h
new file mode 100644
index 000000000000..410066c49cf2
--- /dev/null
+++ b/sys/dev/vmm/vmm_dev.h
@@ -0,0 +1,70 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2011 NetApp, Inc.
+ * Copyright (C) 2015 Mihai Carabas <mihai.carabas@gmail.com>
+ * All rights reserved.
+ */
+
+#ifndef _DEV_VMM_DEV_H_
+#define _DEV_VMM_DEV_H_
+
+#include <sys/types.h>
+#include <sys/ioccom.h>
+#include <machine/vmm_dev.h>
+
+#ifdef _KERNEL
+struct thread;
+struct vm;
+struct vcpu;
+
+int vmmdev_init(void);
+int vmmdev_cleanup(void);
+int vmmdev_machdep_ioctl(struct vm *vm, struct vcpu *vcpu, u_long cmd,
+ caddr_t data, int fflag, struct thread *td);
+
+/*
+ * Entry in an ioctl handler table. A number of generic ioctls are defined,
+ * plus a table of machine-dependent ioctls. The flags indicate the
+ * required preconditions for a given ioctl.
+ *
+ * Some ioctls encode a vcpuid as the first member of their ioctl structure.
+ * These ioctls must specify one of the following flags:
+ * - ALLOC_VCPU: create the vCPU if it does not already exist
+ * - LOCK_ONE_VCPU: create the vCPU if it does not already exist
+ * and lock the vCPU for the duration of the ioctl
+ * - MAYBE_ALLOC_VCPU: if the vcpuid is -1, do nothing, otherwise
+ * create the vCPU if it does not already exist
+ */
+struct vmmdev_ioctl {
+ unsigned long cmd;
+#define VMMDEV_IOCTL_SLOCK_MEMSEGS 0x01
+#define VMMDEV_IOCTL_XLOCK_MEMSEGS 0x02
+#define VMMDEV_IOCTL_LOCK_ONE_VCPU 0x04
+#define VMMDEV_IOCTL_LOCK_ALL_VCPUS 0x08
+#define VMMDEV_IOCTL_ALLOC_VCPU 0x10
+#define VMMDEV_IOCTL_MAYBE_ALLOC_VCPU 0x20
+ int flags;
+};
+
+#define VMMDEV_IOCTL(_cmd, _flags) { .cmd = (_cmd), .flags = (_flags) }
+
+extern const struct vmmdev_ioctl vmmdev_machdep_ioctls[];
+extern const size_t vmmdev_machdep_ioctl_count;
+
+#endif /* _KERNEL */
+
+struct vmmctl_vm_create {
+ char name[VM_MAX_NAMELEN + 1];
+ int reserved[16];
+};
+
+struct vmmctl_vm_destroy {
+ char name[VM_MAX_NAMELEN + 1];
+ int reserved[16];
+};
+
+#define VMMCTL_VM_CREATE _IOWR('V', 0, struct vmmctl_vm_create)
+#define VMMCTL_VM_DESTROY _IOWR('V', 1, struct vmmctl_vm_destroy)
+
+#endif /* _DEV_VMM_DEV_H_ */
diff --git a/sys/dev/vmm/vmm_ktr.h b/sys/dev/vmm/vmm_ktr.h
new file mode 100644
index 000000000000..20370a229530
--- /dev/null
+++ b/sys/dev/vmm/vmm_ktr.h
@@ -0,0 +1,69 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _VMM_KTR_H_
+#define _VMM_KTR_H_
+
+#include <sys/ktr.h>
+#include <sys/pcpu.h>
+
+#ifndef KTR_VMM
+#define KTR_VMM KTR_GEN
+#endif
+
+#define VCPU_CTR0(vm, vcpuid, format) \
+CTR2(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid))
+
+#define VCPU_CTR1(vm, vcpuid, format, p1) \
+CTR3(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1))
+
+#define VCPU_CTR2(vm, vcpuid, format, p1, p2) \
+CTR4(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1), (p2))
+
+#define VCPU_CTR3(vm, vcpuid, format, p1, p2, p3) \
+CTR5(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), (p1), (p2), (p3))
+
+#define VCPU_CTR4(vm, vcpuid, format, p1, p2, p3, p4) \
+CTR6(KTR_VMM, "vm %s[%d]: " format, vm_name((vm)), (vcpuid), \
+ (p1), (p2), (p3), (p4))
+
+#define VM_CTR0(vm, format) \
+CTR1(KTR_VMM, "vm %s: " format, vm_name((vm)))
+
+#define VM_CTR1(vm, format, p1) \
+CTR2(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1))
+
+#define VM_CTR2(vm, format, p1, p2) \
+CTR3(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2))
+
+#define VM_CTR3(vm, format, p1, p2, p3) \
+CTR4(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2), (p3))
+
+#define VM_CTR4(vm, format, p1, p2, p3, p4) \
+CTR5(KTR_VMM, "vm %s: " format, vm_name((vm)), (p1), (p2), (p3), (p4))
+#endif
diff --git a/sys/dev/vmm/vmm_mem.c b/sys/dev/vmm/vmm_mem.c
new file mode 100644
index 000000000000..9df31c9ba133
--- /dev/null
+++ b/sys/dev/vmm/vmm_mem.c
@@ -0,0 +1,485 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ */
+
+#include <sys/types.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/sx.h>
+#include <sys/systm.h>
+
+#include <machine/vmm.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_extern.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+
+#include <dev/vmm/vmm_dev.h>
+#include <dev/vmm/vmm_mem.h>
+
+static void vm_free_memmap(struct vm *vm, int ident);
+
+int
+vm_mem_init(struct vm_mem *mem, vm_offset_t lo, vm_offset_t hi)
+{
+ mem->mem_vmspace = vmmops_vmspace_alloc(lo, hi);
+ if (mem->mem_vmspace == NULL)
+ return (ENOMEM);
+ sx_init(&mem->mem_segs_lock, "vm_mem_segs");
+ return (0);
+}
+
+static bool
+sysmem_mapping(struct vm_mem *mem, int idx)
+{
+ if (mem->mem_maps[idx].len != 0 &&
+ mem->mem_segs[mem->mem_maps[idx].segid].sysmem)
+ return (true);
+ else
+ return (false);
+}
+
+bool
+vm_memseg_sysmem(struct vm *vm, int ident)
+{
+ struct vm_mem *mem;
+
+ mem = vm_mem(vm);
+ vm_assert_memseg_locked(vm);
+
+ if (ident < 0 || ident >= VM_MAX_MEMSEGS)
+ return (false);
+
+ return (mem->mem_segs[ident].sysmem);
+}
+
+void
+vm_mem_cleanup(struct vm *vm)
+{
+ struct vm_mem *mem;
+
+ mem = vm_mem(vm);
+
+ /*
+ * System memory is removed from the guest address space only when
+ * the VM is destroyed. This is because the mapping remains the same
+ * across VM reset.
+ *
+ * Device memory can be relocated by the guest (e.g. using PCI BARs)
+ * so those mappings are removed on a VM reset.
+ */
+ for (int i = 0; i < VM_MAX_MEMMAPS; i++) {
+ if (!sysmem_mapping(mem, i))
+ vm_free_memmap(vm, i);
+ }
+}
+
+void
+vm_mem_destroy(struct vm *vm)
+{
+ struct vm_mem *mem;
+
+ mem = vm_mem(vm);
+ vm_assert_memseg_xlocked(vm);
+
+ for (int i = 0; i < VM_MAX_MEMMAPS; i++) {
+ if (sysmem_mapping(mem, i))
+ vm_free_memmap(vm, i);
+ }
+
+ for (int i = 0; i < VM_MAX_MEMSEGS; i++)
+ vm_free_memseg(vm, i);
+
+ vmmops_vmspace_free(mem->mem_vmspace);
+
+ sx_xunlock(&mem->mem_segs_lock);
+ sx_destroy(&mem->mem_segs_lock);
+}
+
+struct vmspace *
+vm_vmspace(struct vm *vm)
+{
+ struct vm_mem *mem;
+
+ mem = vm_mem(vm);
+ return (mem->mem_vmspace);
+}
+
+void
+vm_slock_memsegs(struct vm *vm)
+{
+ sx_slock(&vm_mem(vm)->mem_segs_lock);
+}
+
+void
+vm_xlock_memsegs(struct vm *vm)
+{
+ sx_xlock(&vm_mem(vm)->mem_segs_lock);
+}
+
+void
+vm_unlock_memsegs(struct vm *vm)
+{
+ sx_unlock(&vm_mem(vm)->mem_segs_lock);
+}
+
+void
+vm_assert_memseg_locked(struct vm *vm)
+{
+ sx_assert(&vm_mem(vm)->mem_segs_lock, SX_LOCKED);
+}
+
+void
+vm_assert_memseg_xlocked(struct vm *vm)
+{
+ sx_assert(&vm_mem(vm)->mem_segs_lock, SX_XLOCKED);
+}
+
+/*
+ * Return 'true' if 'gpa' is allocated in the guest address space.
+ *
+ * This function is called in the context of a running vcpu which acts as
+ * an implicit lock on 'vm->mem_maps[]'.
+ */
+bool
+vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa)
+{
+ struct vm *vm = vcpu_vm(vcpu);
+ struct vm_mem_map *mm;
+ int i;
+
+#ifdef INVARIANTS
+ int hostcpu, state;
+ state = vcpu_get_state(vcpu, &hostcpu);
+ KASSERT(state == VCPU_RUNNING && hostcpu == curcpu,
+ ("%s: invalid vcpu state %d/%d", __func__, state, hostcpu));
+#endif
+
+ for (i = 0; i < VM_MAX_MEMMAPS; i++) {
+ mm = &vm_mem(vm)->mem_maps[i];
+ if (mm->len != 0 && gpa >= mm->gpa && gpa < mm->gpa + mm->len)
+ return (true); /* 'gpa' is sysmem or devmem */
+ }
+
+ return (false);
+}
+
+int
+vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem,
+ struct domainset *obj_domainset)
+{
+ struct vm_mem_seg *seg;
+ struct vm_mem *mem;
+ vm_object_t obj;
+
+ mem = vm_mem(vm);
+ vm_assert_memseg_xlocked(vm);
+
+ if (ident < 0 || ident >= VM_MAX_MEMSEGS)
+ return (EINVAL);
+
+ if (len == 0 || (len & PAGE_MASK))
+ return (EINVAL);
+
+ seg = &mem->mem_segs[ident];
+ if (seg->object != NULL) {
+ if (seg->len == len && seg->sysmem == sysmem)
+ return (EEXIST);
+ else
+ return (EINVAL);
+ }
+
+ /*
+ * When given an impossible policy, signal an
+ * error to the user.
+ */
+ if (obj_domainset != NULL && domainset_empty_vm(obj_domainset))
+ return (EINVAL);
+ obj = vm_object_allocate(OBJT_SWAP, len >> PAGE_SHIFT);
+ if (obj == NULL)
+ return (ENOMEM);
+
+ seg->len = len;
+ seg->object = obj;
+ if (obj_domainset != NULL)
+ seg->object->domain.dr_policy = obj_domainset;
+ seg->sysmem = sysmem;
+
+ return (0);
+}
+
+int
+vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
+ vm_object_t *objptr)
+{
+ struct vm_mem *mem;
+ struct vm_mem_seg *seg;
+
+ mem = vm_mem(vm);
+
+ vm_assert_memseg_locked(vm);
+
+ if (ident < 0 || ident >= VM_MAX_MEMSEGS)
+ return (EINVAL);
+
+ seg = &mem->mem_segs[ident];
+ if (len)
+ *len = seg->len;
+ if (sysmem)
+ *sysmem = seg->sysmem;
+ if (objptr)
+ *objptr = seg->object;
+ return (0);
+}
+
+void
+vm_free_memseg(struct vm *vm, int ident)
+{
+ struct vm_mem_seg *seg;
+
+ KASSERT(ident >= 0 && ident < VM_MAX_MEMSEGS,
+ ("%s: invalid memseg ident %d", __func__, ident));
+
+ seg = &vm_mem(vm)->mem_segs[ident];
+ if (seg->object != NULL) {
+ vm_object_deallocate(seg->object);
+ bzero(seg, sizeof(struct vm_mem_seg));
+ }
+}
+
+int
+vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t first,
+ size_t len, int prot, int flags)
+{
+ struct vm_mem *mem;
+ struct vm_mem_seg *seg;
+ struct vm_mem_map *m, *map;
+ struct vm_map *vmmap;
+ vm_ooffset_t last;
+ int i, error;
+
+ if (prot == 0 || (prot & ~(VM_PROT_ALL)) != 0)
+ return (EINVAL);
+
+ if (flags & ~VM_MEMMAP_F_WIRED)
+ return (EINVAL);
+
+ if (segid < 0 || segid >= VM_MAX_MEMSEGS)
+ return (EINVAL);
+
+ mem = vm_mem(vm);
+ seg = &mem->mem_segs[segid];
+ if (seg->object == NULL)
+ return (EINVAL);
+
+ last = first + len;
+ if (first < 0 || first >= last || last > seg->len)
+ return (EINVAL);
+
+ if ((gpa | first | last) & PAGE_MASK)
+ return (EINVAL);
+
+ map = NULL;
+ for (i = 0; i < VM_MAX_MEMMAPS; i++) {
+ m = &mem->mem_maps[i];
+ if (m->len == 0) {
+ map = m;
+ break;
+ }
+ }
+ if (map == NULL)
+ return (ENOSPC);
+
+ vmmap = &mem->mem_vmspace->vm_map;
+ error = vm_map_find(vmmap, seg->object, first, &gpa, len, 0,
+ VMFS_NO_SPACE, prot, prot, 0);
+ if (error != KERN_SUCCESS)
+ return (EFAULT);
+
+ vm_object_reference(seg->object);
+
+ if (flags & VM_MEMMAP_F_WIRED) {
+ error = vm_map_wire(vmmap, gpa, gpa + len,
+ VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
+ if (error != KERN_SUCCESS) {
+ vm_map_remove(vmmap, gpa, gpa + len);
+ return (error == KERN_RESOURCE_SHORTAGE ? ENOMEM :
+ EFAULT);
+ }
+ }
+
+ map->gpa = gpa;
+ map->len = len;
+ map->segoff = first;
+ map->segid = segid;
+ map->prot = prot;
+ map->flags = flags;
+ return (0);
+}
+
+int
+vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len)
+{
+ struct vm_mem *mem;
+ struct vm_mem_map *m;
+ int i;
+
+ mem = vm_mem(vm);
+ for (i = 0; i < VM_MAX_MEMMAPS; i++) {
+ m = &mem->mem_maps[i];
+#ifdef VM_MEMMAP_F_IOMMU
+ if ((m->flags & VM_MEMMAP_F_IOMMU) != 0)
+ continue;
+#endif
+ if (m->gpa == gpa && m->len == len) {
+ vm_free_memmap(vm, i);
+ return (0);
+ }
+ }
+
+ return (EINVAL);
+}
+
+int
+vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
+ vm_ooffset_t *segoff, size_t *len, int *prot, int *flags)
+{
+ struct vm_mem *mem;
+ struct vm_mem_map *mm, *mmnext;
+ int i;
+
+ mem = vm_mem(vm);
+
+ mmnext = NULL;
+ for (i = 0; i < VM_MAX_MEMMAPS; i++) {
+ mm = &mem->mem_maps[i];
+ if (mm->len == 0 || mm->gpa < *gpa)
+ continue;
+ if (mmnext == NULL || mm->gpa < mmnext->gpa)
+ mmnext = mm;
+ }
+
+ if (mmnext != NULL) {
+ *gpa = mmnext->gpa;
+ if (segid)
+ *segid = mmnext->segid;
+ if (segoff)
+ *segoff = mmnext->segoff;
+ if (len)
+ *len = mmnext->len;
+ if (prot)
+ *prot = mmnext->prot;
+ if (flags)
+ *flags = mmnext->flags;
+ return (0);
+ } else {
+ return (ENOENT);
+ }
+}
+
+static void
+vm_free_memmap(struct vm *vm, int ident)
+{
+ struct vm_mem_map *mm;
+ int error __diagused;
+
+ mm = &vm_mem(vm)->mem_maps[ident];
+ if (mm->len) {
+ error = vm_map_remove(&vm_vmspace(vm)->vm_map, mm->gpa,
+ mm->gpa + mm->len);
+ KASSERT(error == KERN_SUCCESS, ("%s: vm_map_remove error %d",
+ __func__, error));
+ bzero(mm, sizeof(struct vm_mem_map));
+ }
+}
+
+vm_paddr_t
+vmm_sysmem_maxaddr(struct vm *vm)
+{
+ struct vm_mem *mem;
+ struct vm_mem_map *mm;
+ vm_paddr_t maxaddr;
+ int i;
+
+ mem = vm_mem(vm);
+ maxaddr = 0;
+ for (i = 0; i < VM_MAX_MEMMAPS; i++) {
+ mm = &mem->mem_maps[i];
+ if (sysmem_mapping(mem, i)) {
+ if (maxaddr < mm->gpa + mm->len)
+ maxaddr = mm->gpa + mm->len;
+ }
+ }
+ return (maxaddr);
+}
+
+static void *
+_vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
+ void **cookie)
+{
+ struct vm_mem_map *mm;
+ vm_page_t m;
+ int i, count, pageoff;
+
+ pageoff = gpa & PAGE_MASK;
+ if (len > PAGE_SIZE - pageoff)
+ panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);
+
+ count = 0;
+ for (i = 0; i < VM_MAX_MEMMAPS; i++) {
+ mm = &vm_mem(vm)->mem_maps[i];
+ if (gpa >= mm->gpa && gpa < mm->gpa + mm->len) {
+ count = vm_fault_quick_hold_pages(
+ &vm_vmspace(vm)->vm_map, trunc_page(gpa),
+ PAGE_SIZE, reqprot, &m, 1);
+ break;
+ }
+ }
+
+ if (count == 1) {
+ *cookie = m;
+ return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
+ } else {
+ *cookie = NULL;
+ return (NULL);
+ }
+}
+
+void *
+vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len, int reqprot,
+ void **cookie)
+{
+#ifdef INVARIANTS
+ /*
+ * The current vcpu should be frozen to ensure 'vm_memmap[]'
+ * stability.
+ */
+ int state = vcpu_get_state(vcpu, NULL);
+ KASSERT(state == VCPU_FROZEN, ("%s: invalid vcpu state %d",
+ __func__, state));
+#endif
+ return (_vm_gpa_hold(vcpu_vm(vcpu), gpa, len, reqprot, cookie));
+}
+
+void *
+vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
+ void **cookie)
+{
+ vm_assert_memseg_locked(vm);
+ return (_vm_gpa_hold(vm, gpa, len, reqprot, cookie));
+}
+
+void
+vm_gpa_release(void *cookie)
+{
+ vm_page_t m = cookie;
+
+ vm_page_unwire(m, PQ_ACTIVE);
+}
diff --git a/sys/dev/vmm/vmm_mem.h b/sys/dev/vmm/vmm_mem.h
new file mode 100644
index 000000000000..f3d22058c7b8
--- /dev/null
+++ b/sys/dev/vmm/vmm_mem.h
@@ -0,0 +1,107 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _DEV_VMM_MEM_H_
+#define _DEV_VMM_MEM_H_
+
+/* Maximum number of NUMA domains in a guest. */
+#define VM_MAXMEMDOM 8
+#define VM_MAXSYSMEM VM_MAXMEMDOM
+
+/*
+ * Identifiers for memory segments.
+ * Each guest NUMA domain is represented by a single system
+ * memory segment from [VM_SYSMEM, VM_MAXSYSMEM).
+ * The remaining identifiers can be used to create devmem segments.
+ */
+enum {
+ VM_SYSMEM = 0,
+ VM_BOOTROM = VM_MAXSYSMEM,
+ VM_FRAMEBUFFER,
+ VM_PCIROM,
+ VM_MEMSEG_END
+};
+
+#define VM_MAX_MEMSEGS VM_MEMSEG_END
+#define VM_MAX_MEMMAPS (VM_MAX_MEMSEGS * 2)
+
+#ifdef _KERNEL
+
+#include <sys/types.h>
+#include <sys/_sx.h>
+
+struct vm;
+struct vm_object;
+struct vmspace;
+
+struct vm_mem_seg {
+ size_t len;
+ bool sysmem;
+ struct vm_object *object;
+};
+
+struct vm_mem_map {
+ vm_paddr_t gpa;
+ size_t len;
+ vm_ooffset_t segoff;
+ int segid;
+ int prot;
+ int flags;
+};
+
+struct vm_mem {
+ struct vm_mem_map mem_maps[VM_MAX_MEMMAPS];
+ struct vm_mem_seg mem_segs[VM_MAX_MEMSEGS];
+ struct sx mem_segs_lock;
+ struct vmspace *mem_vmspace;
+};
+
+int vm_mem_init(struct vm_mem *mem, vm_offset_t lo, vm_offset_t hi);
+void vm_mem_cleanup(struct vm *vm);
+void vm_mem_destroy(struct vm *vm);
+
+struct vmspace *vm_vmspace(struct vm *vm);
+
+/*
+ * APIs that modify the guest memory map require all vcpus to be frozen.
+ */
+void vm_slock_memsegs(struct vm *vm);
+void vm_xlock_memsegs(struct vm *vm);
+void vm_unlock_memsegs(struct vm *vm);
+void vm_assert_memseg_locked(struct vm *vm);
+void vm_assert_memseg_xlocked(struct vm *vm);
+int vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, vm_ooffset_t off,
+ size_t len, int prot, int flags);
+int vm_munmap_memseg(struct vm *vm, vm_paddr_t gpa, size_t len);
+int vm_alloc_memseg(struct vm *vm, int ident, size_t len, bool sysmem,
+ struct domainset *obj_domainset);
+void vm_free_memseg(struct vm *vm, int ident);
+
+/*
+ * APIs that inspect the guest memory map require only a *single* vcpu to
+ * be frozen. This acts like a read lock on the guest memory map since any
+ * modification requires *all* vcpus to be frozen.
+ */
+int vm_mmap_getnext(struct vm *vm, vm_paddr_t *gpa, int *segid,
+ vm_ooffset_t *segoff, size_t *len, int *prot, int *flags);
+bool vm_memseg_sysmem(struct vm *vm, int ident);
+int vm_get_memseg(struct vm *vm, int ident, size_t *len, bool *sysmem,
+ struct vm_object **objptr);
+vm_paddr_t vmm_sysmem_maxaddr(struct vm *vm);
+void *vm_gpa_hold(struct vcpu *vcpu, vm_paddr_t gpa, size_t len,
+ int prot, void **cookie);
+void *vm_gpa_hold_global(struct vm *vm, vm_paddr_t gpa, size_t len,
+ int prot, void **cookie);
+void vm_gpa_release(void *cookie);
+bool vm_mem_allocated(struct vcpu *vcpu, vm_paddr_t gpa);
+
+int vm_gla2gpa_nofault(struct vcpu *vcpu, struct vm_guest_paging *paging,
+ uint64_t gla, int prot, uint64_t *gpa, int *is_fault);
+
+#endif /* _KERNEL */
+
+#endif /* !_DEV_VMM_MEM_H_ */
diff --git a/sys/dev/vmm/vmm_stat.c b/sys/dev/vmm/vmm_stat.c
new file mode 100644
index 000000000000..44edd6af85dd
--- /dev/null
+++ b/sys/dev/vmm/vmm_stat.c
@@ -0,0 +1,151 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/malloc.h>
+
+#include <machine/vmm.h>
+
+#include <dev/vmm/vmm_stat.h>
+
+/*
+ * 'vst_num_elems' is the total number of addressable statistic elements
+ * 'vst_num_types' is the number of unique statistic types
+ *
+ * It is always true that 'vst_num_elems' is greater than or equal to
+ * 'vst_num_types'. This is because a stat type may represent more than
+ * one element (for e.g. VMM_STAT_ARRAY).
+ */
+static int vst_num_elems, vst_num_types;
+static struct vmm_stat_type *vsttab[MAX_VMM_STAT_ELEMS];
+
+static MALLOC_DEFINE(M_VMM_STAT, "vmm stat", "vmm stat");
+
+#define vst_size ((size_t)vst_num_elems * sizeof(uint64_t))
+
+void
+vmm_stat_register(void *arg)
+{
+ struct vmm_stat_type *vst = arg;
+
+ /* We require all stats to identify themselves with a description */
+ if (vst->desc == NULL)
+ return;
+
+ if (vst->pred != NULL && !vst->pred())
+ return;
+
+ if (vst_num_elems + vst->nelems >= MAX_VMM_STAT_ELEMS) {
+ printf("Cannot accommodate vmm stat type \"%s\"!\n", vst->desc);
+ return;
+ }
+
+ vst->index = vst_num_elems;
+ vst_num_elems += vst->nelems;
+
+ vsttab[vst_num_types++] = vst;
+}
+
+int
+vmm_stat_copy(struct vcpu *vcpu, int index, int count, int *num_stats,
+ uint64_t *buf)
+{
+ struct vmm_stat_type *vst;
+ uint64_t *stats;
+ int i, tocopy;
+
+ if (index < 0 || count < 0)
+ return (EINVAL);
+
+ if (index > vst_num_elems)
+ return (ENOENT);
+
+ if (index == vst_num_elems) {
+ *num_stats = 0;
+ return (0);
+ }
+
+ tocopy = min(vst_num_elems - index, count);
+
+ /* Let stats functions update their counters */
+ for (i = 0; i < vst_num_types; i++) {
+ vst = vsttab[i];
+ if (vst->func != NULL)
+ (*vst->func)(vcpu, vst);
+ }
+
+ /* Copy over the stats */
+ stats = vcpu_stats(vcpu);
+ memcpy(buf, stats + index, tocopy * sizeof(stats[0]));
+ *num_stats = tocopy;
+ return (0);
+}
+
+void *
+vmm_stat_alloc(void)
+{
+
+ return (malloc(vst_size, M_VMM_STAT, M_WAITOK));
+}
+
+void
+vmm_stat_init(void *vp)
+{
+
+ bzero(vp, vst_size);
+}
+
+void
+vmm_stat_free(void *vp)
+{
+ free(vp, M_VMM_STAT);
+}
+
+int
+vmm_stat_desc_copy(int index, char *buf, int bufsize)
+{
+ int i;
+ struct vmm_stat_type *vst;
+
+ for (i = 0; i < vst_num_types; i++) {
+ vst = vsttab[i];
+ if (index >= vst->index && index < vst->index + vst->nelems) {
+ if (vst->nelems > 1) {
+ snprintf(buf, bufsize, "%s[%d]",
+ vst->desc, index - vst->index);
+ } else {
+ strlcpy(buf, vst->desc, bufsize);
+ }
+ return (0); /* found it */
+ }
+ }
+
+ return (EINVAL);
+}
diff --git a/sys/dev/vmm/vmm_stat.h b/sys/dev/vmm/vmm_stat.h
new file mode 100644
index 000000000000..471afd0dd827
--- /dev/null
+++ b/sys/dev/vmm/vmm_stat.h
@@ -0,0 +1,135 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2011 NetApp, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _DEV_VMM_STAT_H_
+#define _DEV_VMM_STAT_H_
+
+struct vm;
+
+#define MAX_VMM_STAT_ELEMS 64 /* arbitrary */
+
+struct vmm_stat_type;
+typedef void (*vmm_stat_func_t)(struct vcpu *vcpu,
+ struct vmm_stat_type *stat);
+typedef bool (*vmm_stat_func_pred_t)(void);
+
+struct vmm_stat_type {
+ int index; /* position in the stats buffer */
+ int nelems; /* standalone or array */
+ const char *desc; /* description of statistic */
+ vmm_stat_func_t func;
+ vmm_stat_func_pred_t pred; /* predicate to check during registration */
+};
+
+void vmm_stat_register(void *arg);
+
+#define VMM_STAT_FDEFINE(type, _nelems, _desc, _func, _pred) \
+ struct vmm_stat_type type[1] = { \
+ { \
+ .index = -1, \
+ .nelems = _nelems, \
+ .desc = _desc, \
+ .func = _func, \
+ .pred = _pred, \
+ } \
+ }; \
+ SYSINIT(type##_stat, SI_SUB_KLD, SI_ORDER_ANY, vmm_stat_register, type)
+
+#define VMM_STAT_DEFINE(type, nelems, desc, pred) \
+ VMM_STAT_FDEFINE(type, nelems, desc, NULL, pred)
+
+#define VMM_STAT_DECLARE(type) \
+ extern struct vmm_stat_type type[1]
+
+#define VMM_STAT(type, desc) \
+ VMM_STAT_DEFINE(type, 1, desc, NULL)
+
+#define VMM_STAT_FUNC(type, desc, func) \
+ VMM_STAT_FDEFINE(type, 1, desc, func, NULL)
+
+#define VMM_STAT_ARRAY(type, nelems, desc) \
+ VMM_STAT_DEFINE(type, nelems, desc, NULL)
+
+void *vmm_stat_alloc(void);
+void vmm_stat_init(void *vp);
+void vmm_stat_free(void *vp);
+
+int vmm_stat_copy(struct vcpu *vcpu, int index, int count,
+ int *num_stats, uint64_t *buf);
+int vmm_stat_desc_copy(int index, char *buf, int buflen);
+
+static void __inline
+vmm_stat_array_incr(struct vcpu *vcpu, struct vmm_stat_type *vst, int statidx,
+ uint64_t x)
+{
+#ifdef VMM_KEEP_STATS
+ uint64_t *stats;
+
+ stats = vcpu_stats(vcpu);
+
+ if (vst->index >= 0 && statidx < vst->nelems)
+ stats[vst->index + statidx] += x;
+#endif
+}
+
+static void __inline
+vmm_stat_array_set(struct vcpu *vcpu, struct vmm_stat_type *vst, int statidx,
+ uint64_t val)
+{
+#ifdef VMM_KEEP_STATS
+ uint64_t *stats;
+
+ stats = vcpu_stats(vcpu);
+
+ if (vst->index >= 0 && statidx < vst->nelems)
+ stats[vst->index + statidx] = val;
+#endif
+}
+
+static void __inline
+vmm_stat_incr(struct vcpu *vcpu, struct vmm_stat_type *vst, uint64_t x)
+{
+
+#ifdef VMM_KEEP_STATS
+ vmm_stat_array_incr(vcpu, vst, 0, x);
+#endif
+}
+
+static void __inline
+vmm_stat_set(struct vcpu *vcpu, struct vmm_stat_type *vst, uint64_t val)
+{
+
+#ifdef VMM_KEEP_STATS
+ vmm_stat_array_set(vcpu, vst, 0, val);
+#endif
+}
+
+#endif /* !_DEV_VMM_STAT_H_ */
diff --git a/sys/dev/vmware/pvscsi/pvscsi.c b/sys/dev/vmware/pvscsi/pvscsi.c
index 111330e4f17f..08bdb8c3f108 100644
--- a/sys/dev/vmware/pvscsi/pvscsi.c
+++ b/sys/dev/vmware/pvscsi/pvscsi.c
@@ -1444,6 +1444,10 @@ finish_ccb:
cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
+ /* Prefer connection speed over sas port speed */
+ cts->xport_specific.sas.valid &= ~CTS_SAS_VALID_SPEED;
+ cts->xport_specific.sas.bitrate = 0;
+
ccb_h->status = CAM_REQ_CMP;
xpt_done(ccb);
} break;
diff --git a/sys/dev/vmware/vmci/vmci_kernel_if.c b/sys/dev/vmware/vmci/vmci_kernel_if.c
index c7cdf5e3e3d8..e5d5c5160803 100644
--- a/sys/dev/vmware/vmci/vmci_kernel_if.c
+++ b/sys/dev/vmware/vmci/vmci_kernel_if.c
@@ -236,10 +236,10 @@ vmci_alloc_kernel_mem(size_t size, int flags)
*/
void
-vmci_free_kernel_mem(void *ptr, size_t size)
+vmci_free_kernel_mem(void *ptr, size_t size __unused)
{
- contigfree(ptr, size, M_DEVBUF);
+ free(ptr, M_DEVBUF);
}
/*
diff --git a/sys/dev/vmware/vmxnet3/if_vmx.c b/sys/dev/vmware/vmxnet3/if_vmx.c
index fdcad0dd4bba..1a314ca6660e 100644
--- a/sys/dev/vmware/vmxnet3/if_vmx.c
+++ b/sys/dev/vmware/vmxnet3/if_vmx.c
@@ -1429,7 +1429,8 @@ vmxnet3_isc_txd_credits_update(void *vsc, uint16_t txqid, bool clear)
return (1);
vmxnet3_barrier(sc, VMXNET3_BARRIER_RD);
- if (++txc->vxcr_next == txc->vxcr_ndesc) {
+ MPASS(txc->vxcr_next < txc->vxcr_ndesc);
+ if (++txc->vxcr_next >= txc->vxcr_ndesc) {
txc->vxcr_next = 0;
txc->vxcr_gen ^= 1;
}
@@ -2055,7 +2056,12 @@ vmxnet3_update_admin_status(if_ctx_t ctx)
struct vmxnet3_softc *sc;
sc = iflib_get_softc(ctx);
- if (sc->vmx_ds->event != 0)
+ /*
+ * iflib may invoke this routine before vmxnet3_attach_post() has
+ * run, which is before the top level shared data area is
+ * initialized and the device made aware of it.
+ */
+ if (sc->vmx_ds != NULL && sc->vmx_ds->event != 0)
vmxnet3_evintr(sc);
vmxnet3_refresh_host_stats(sc);
diff --git a/sys/dev/vnic/mrml_bridge.c b/sys/dev/vnic/mrml_bridge.c
index d89d49d5031f..b77ef03ad072 100644
--- a/sys/dev/vnic/mrml_bridge.c
+++ b/sys/dev/vnic/mrml_bridge.c
@@ -108,7 +108,8 @@ mrmlb_fdt_attach(device_t dev)
if (err != 0)
return (err);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
/* OFW bus interface */
@@ -261,7 +262,7 @@ mrmlb_ofw_bus_attach(device_t dev)
ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL);
/* Add newbus device for this FDT node */
- child = device_add_child(dev, NULL, -1);
+ child = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
resource_list_free(&di->di_rl);
ofw_bus_gen_destroy_devinfo(&di->di_dinfo);
diff --git a/sys/dev/vnic/nicvf_main.c b/sys/dev/vnic/nicvf_main.c
index f5cf99ef0ce4..dd44e420c78f 100644
--- a/sys/dev/vnic/nicvf_main.c
+++ b/sys/dev/vnic/nicvf_main.c
@@ -139,7 +139,7 @@ static void nicvf_config_cpi(struct nicvf *);
static int nicvf_rss_init(struct nicvf *);
static int nicvf_init_resources(struct nicvf *);
-static int nicvf_setup_ifnet(struct nicvf *);
+static void nicvf_setup_ifnet(struct nicvf *);
static int nicvf_setup_ifmedia(struct nicvf *);
static void nicvf_hw_addr_random(uint8_t *);
@@ -247,11 +247,7 @@ nicvf_attach(device_t dev)
nicvf_rss_init(nic);
NICVF_CORE_UNLOCK(nic);
- err = nicvf_setup_ifnet(nic);
- if (err != 0) {
- device_printf(dev, "Could not set-up ifnet\n");
- goto err_release_intr;
- }
+ nicvf_setup_ifnet(nic);
err = nicvf_setup_ifmedia(nic);
if (err != 0) {
@@ -329,17 +325,12 @@ nicvf_hw_addr_random(uint8_t *hwaddr)
memcpy(hwaddr, addr, ETHER_ADDR_LEN);
}
-static int
+static void
nicvf_setup_ifnet(struct nicvf *nic)
{
if_t ifp;
ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(nic->dev, "Could not allocate ifnet structure\n");
- return (ENOMEM);
- }
-
nic->ifp = ifp;
if_setsoftc(ifp, nic);
@@ -379,8 +370,6 @@ nicvf_setup_ifnet(struct nicvf *nic)
if (nic->hw_tso)
if_sethwassistbits(ifp, (CSUM_TSO), 0);
if_setcapenable(ifp, if_getcapabilities(ifp));
-
- return (0);
}
static int
diff --git a/sys/dev/vnic/thunder_mdio.c b/sys/dev/vnic/thunder_mdio.c
index 7017d208bc80..a98e6aff05fd 100644
--- a/sys/dev/vnic/thunder_mdio.c
+++ b/sys/dev/vnic/thunder_mdio.c
@@ -453,10 +453,6 @@ thunder_mdio_phy_connect(device_t dev, int lmacid, int phy)
if (pd == NULL)
return (ENOMEM);
pd->ifp = if_alloc(IFT_ETHER);
- if (pd->ifp == NULL) {
- free(pd, M_THUNDER_MDIO);
- return (ENOMEM);
- }
pd->lmacid = lmacid;
}
@@ -498,7 +494,6 @@ thunder_mdio_phy_disconnect(device_t dev, int lmacid, int phy)
/* Detach miibus */
bus_generic_detach(dev);
- device_delete_child(dev, pd->miibus);
/* Free fake ifnet */
if_free(pd->ifp);
/* Free memory under phy descriptor */
diff --git a/sys/dev/vnic/thunder_mdio_fdt.c b/sys/dev/vnic/thunder_mdio_fdt.c
index eb0cc3c75c64..e98471535bf8 100644
--- a/sys/dev/vnic/thunder_mdio_fdt.c
+++ b/sys/dev/vnic/thunder_mdio_fdt.c
@@ -167,7 +167,8 @@ mdionexus_fdt_attach(device_t dev)
if (err != 0)
return (err);
- return (bus_generic_attach(dev));
+ bus_attach_children(dev);
+ return (0);
}
/* OFW bus interface */
@@ -269,7 +270,7 @@ mdionexus_ofw_bus_attach(device_t dev)
ofw_bus_intr_to_rl(dev, node, &di->di_rl, NULL);
/* Add newbus device for this FDT node */
- child = device_add_child(dev, NULL, -1);
+ child = device_add_child(dev, NULL, DEVICE_UNIT_ANY);
if (child == NULL) {
resource_list_free(&di->di_rl);
ofw_bus_gen_destroy_devinfo(&di->di_dinfo);
diff --git a/sys/dev/vr/if_vr.c b/sys/dev/vr/if_vr.c
index 2afe16e3c6d2..284628365c83 100644
--- a/sys/dev/vr/if_vr.c
+++ b/sys/dev/vr/if_vr.c
@@ -607,7 +607,7 @@ vr_attach(device_t dev)
const struct vr_type *t;
uint8_t eaddr[ETHER_ADDR_LEN];
int error, rid;
- int i, phy, pmc;
+ int i, phy;
sc = device_get_softc(dev);
sc->vr_dev = dev;
@@ -656,11 +656,6 @@ vr_attach(device_t dev)
/* Allocate ifnet structure. */
ifp = sc->vr_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "couldn't allocate ifnet structure\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -693,8 +688,7 @@ vr_attach(device_t dev)
sc->vr_txthresh = VR_TXTHRESH_MAX;
}
- if (sc->vr_revid >= REV_ID_VT6102_A &&
- pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
+ if (sc->vr_revid >= REV_ID_VT6102_A && pci_has_pm(dev))
if_setcapabilitiesbit(ifp, IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC, 0);
/* Rhine supports oversized VLAN frame. */
@@ -709,7 +703,7 @@ vr_attach(device_t dev)
* shuts down. Be sure to kick it in the head to wake it
* up again.
*/
- if (pci_find_cap(dev, PCIY_PMG, &pmc) == 0)
+ if (pci_has_pm(dev))
VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1));
/*
@@ -843,8 +837,6 @@ vr_detach(device_t dev)
taskqueue_drain(taskqueue_fast, &sc->vr_inttask);
ether_ifdetach(ifp);
}
- if (sc->vr_miibus)
- device_delete_child(dev, sc->vr_miibus);
bus_generic_detach(dev);
if (sc->vr_intrhand)
@@ -2533,14 +2525,12 @@ static void
vr_setwol(struct vr_softc *sc)
{
if_t ifp;
- int pmc;
- uint16_t pmstat;
uint8_t v;
VR_LOCK_ASSERT(sc);
if (sc->vr_revid < REV_ID_VT6102_A ||
- pci_find_cap(sc->vr_dev, PCIY_PMG, &pmc) != 0)
+ !pci_has_pm(sc->vr_dev))
return;
ifp = sc->vr_ifp;
@@ -2579,11 +2569,8 @@ vr_setwol(struct vr_softc *sc)
CSR_WRITE_1(sc, VR_STICKHW, v);
/* Request PME if WOL is requested. */
- pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2);
- pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE);
if ((if_getcapenable(ifp) & IFCAP_WOL) != 0)
- pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2);
+ pci_enable_pme(sc->vr_dev);
}
static void
diff --git a/sys/dev/vt/hw/efifb/efifb.c b/sys/dev/vt/hw/efifb/efifb.c
index 805b36d499ae..ad49b6735998 100644
--- a/sys/dev/vt/hw/efifb/efifb.c
+++ b/sys/dev/vt/hw/efifb/efifb.c
@@ -58,6 +58,7 @@ static struct vt_driver vt_efifb_driver = {
.vd_bitblt_text = vt_fb_bitblt_text,
.vd_invalidate_text = vt_fb_invalidate_text,
.vd_bitblt_bmp = vt_fb_bitblt_bitmap,
+ .vd_bitblt_argb = vt_fb_bitblt_argb,
.vd_drawrect = vt_fb_drawrect,
.vd_setpixel = vt_fb_setpixel,
.vd_fb_ioctl = vt_fb_ioctl,
@@ -76,17 +77,13 @@ vt_efifb_probe(struct vt_device *vd)
{
int disabled;
struct efi_fb *efifb;
- caddr_t kmdp;
disabled = 0;
TUNABLE_INT_FETCH("hw.syscons.disable", &disabled);
if (disabled != 0)
return (CN_DEAD);
- kmdp = preload_search_by_type("elf kernel");
- if (kmdp == NULL)
- kmdp = preload_search_by_type("elf64 kernel");
- efifb = (struct efi_fb *)preload_search_info(kmdp,
+ efifb = (struct efi_fb *)preload_search_info(preload_kmdp,
MODINFO_METADATA | MODINFOMD_EFI_FB);
if (efifb == NULL)
return (CN_DEAD);
@@ -99,10 +96,11 @@ vt_efifb_init(struct vt_device *vd)
{
struct fb_info *info;
struct efi_fb *efifb;
- caddr_t kmdp;
- int memattr;
+ vm_memattr_t memattr;
int roff, goff, boff;
- char attr[16];
+
+#ifdef VM_MEMATTR_WRITE_COMBINING
+ char attr[16];
/*
* XXX TODO: I think there's more nuance here than we're acknowledging,
@@ -126,15 +124,15 @@ vt_efifb_init(struct vt_device *vd)
memattr = VM_MEMATTR_UNCACHEABLE;
}
}
+#else
+ memattr = VM_MEMATTR_UNCACHEABLE;
+#endif
info = vd->vd_softc;
if (info == NULL)
info = vd->vd_softc = (void *)&local_info;
- kmdp = preload_search_by_type("elf kernel");
- if (kmdp == NULL)
- kmdp = preload_search_by_type("elf64 kernel");
- efifb = (struct efi_fb *)preload_search_info(kmdp,
+ efifb = (struct efi_fb *)preload_search_info(preload_kmdp,
MODINFO_METADATA | MODINFOMD_EFI_FB);
if (efifb == NULL)
return (CN_DEAD);
diff --git a/sys/dev/vt/hw/fb/vt_early_fb.c b/sys/dev/vt/hw/fb/vt_early_fb.c
index 52241038ce35..9d66f5cd1398 100644
--- a/sys/dev/vt/hw/fb/vt_early_fb.c
+++ b/sys/dev/vt/hw/fb/vt_early_fb.c
@@ -58,6 +58,7 @@ static struct vt_driver vt_fb_early_driver = {
.vd_bitblt_text = vt_fb_bitblt_text,
.vd_invalidate_text = vt_fb_invalidate_text,
.vd_bitblt_bmp = vt_fb_bitblt_bitmap,
+ .vd_bitblt_argb = vt_fb_bitblt_argb,
.vd_drawrect = vt_fb_drawrect,
.vd_setpixel = vt_fb_setpixel,
.vd_priority = VD_PRIORITY_GENERIC,
diff --git a/sys/dev/vt/hw/fb/vt_fb.c b/sys/dev/vt/hw/fb/vt_fb.c
index c1eeb5e69e08..6130dc4061b4 100644
--- a/sys/dev/vt/hw/fb/vt_fb.c
+++ b/sys/dev/vt/hw/fb/vt_fb.c
@@ -34,6 +34,7 @@
#include <sys/queue.h>
#include <sys/fbio.h>
#include <sys/kernel.h>
+#include <sys/endian.h>
#include <dev/vt/vt.h>
#include <dev/vt/hw/fb/vt_fb.h>
#include <dev/vt/colors/vt_termcolors.h>
@@ -49,6 +50,7 @@ static struct vt_driver vt_fb_driver = {
.vd_bitblt_text = vt_fb_bitblt_text,
.vd_invalidate_text = vt_fb_invalidate_text,
.vd_bitblt_bmp = vt_fb_bitblt_bitmap,
+ .vd_bitblt_argb = vt_fb_bitblt_argb,
.vd_drawrect = vt_fb_drawrect,
.vd_setpixel = vt_fb_setpixel,
.vd_postswitch = vt_fb_postswitch,
@@ -73,15 +75,31 @@ static void
vt_fb_mem_wr2(struct fb_info *sc, uint32_t o, uint16_t v)
{
- KASSERT((o < sc->fb_size), ("Offset %#08x out of fb size", o));
+ KASSERT((o + 1 < sc->fb_size), ("Offset %#08x out of fb size", o + 1));
*(uint16_t *)(sc->fb_vbase + o) = v;
}
static void
+vt_fb_mem_wr3(struct fb_info *sc, uint32_t o, uint32_t v)
+{
+ uint8_t *b = (uint8_t *)sc->fb_vbase + o;
+
+ KASSERT((o + 2 < sc->fb_size), ("Offset %#08x out of fb size", o + 2));
+ /*
+ * We want to write three bytes, independent
+ * of endianness. Multiply _QUAD_LOWWORD and
+ * _QUAD_HIGHWORD by 2 to skip the middle byte.
+ */
+ b[_QUAD_LOWWORD * 2] = v & 0xff;
+ b[1] = (v >> 8) & 0xff;
+ b[_QUAD_HIGHWORD * 2] = (v >> 16) & 0xff;
+}
+
+static void
vt_fb_mem_wr4(struct fb_info *sc, uint32_t o, uint32_t v)
{
- KASSERT((o < sc->fb_size), ("Offset %#08x out of fb size", o));
+ KASSERT((o + 3 < sc->fb_size), ("Offset %#08x out of fb size", o + 3));
*(uint32_t *)(sc->fb_vbase + o) = v;
}
@@ -186,9 +204,7 @@ vt_fb_setpixel(struct vt_device *vd, int x, int y, term_color_t color)
vt_fb_mem_wr2(info, o, c);
break;
case 3:
- vt_fb_mem_wr1(info, o, (c >> 16) & 0xff);
- vt_fb_mem_wr1(info, o + 1, (c >> 8) & 0xff);
- vt_fb_mem_wr1(info, o + 2, c & 0xff);
+ vt_fb_mem_wr3(info, o, c);
break;
case 4:
vt_fb_mem_wr4(info, o, c);
@@ -245,12 +261,7 @@ vt_fb_blank(struct vt_device *vd, term_color_t color)
case 3:
for (h = 0; h < info->fb_height; h++)
for (o = 0; o < info->fb_stride - 2; o += 3) {
- vt_fb_mem_wr1(info, h*info->fb_stride + o,
- (c >> 16) & 0xff);
- vt_fb_mem_wr1(info, h*info->fb_stride + o + 1,
- (c >> 8) & 0xff);
- vt_fb_mem_wr1(info, h*info->fb_stride + o + 2,
- c & 0xff);
+ vt_fb_mem_wr3(info, h*info->fb_stride + o, c);
}
break;
case 4:
@@ -316,10 +327,7 @@ vt_fb_bitblt_bitmap(struct vt_device *vd, const struct vt_window *vw,
vt_fb_mem_wr2(info, o, cc);
break;
case 3:
- /* Packed mode, so unaligned. Byte access. */
- vt_fb_mem_wr1(info, o, (cc >> 16) & 0xff);
- vt_fb_mem_wr1(info, o + 1, (cc >> 8) & 0xff);
- vt_fb_mem_wr1(info, o + 2, cc & 0xff);
+ vt_fb_mem_wr3(info, o, cc);
break;
case 4:
vt_fb_mem_wr4(info, o, cc);
@@ -332,6 +340,52 @@ vt_fb_bitblt_bitmap(struct vt_device *vd, const struct vt_window *vw,
}
}
+int
+vt_fb_bitblt_argb(struct vt_device *vd, const struct vt_window *vw,
+ const uint8_t *argb,
+ unsigned int width, unsigned int height,
+ unsigned int x, unsigned int y)
+{
+ struct fb_info *info;
+ uint32_t o, cc;
+ int bpp, xi, yi;
+
+ info = vd->vd_softc;
+ bpp = FBTYPE_GET_BYTESPP(info);
+ if (bpp != 4)
+ return (EOPNOTSUPP);
+
+ if (info->fb_flags & FB_FLAG_NOWRITE)
+ return (0);
+
+ KASSERT((info->fb_vbase != 0), ("Unmapped framebuffer"));
+
+ /* Bound by right and bottom edges. */
+ if (y + height > vw->vw_draw_area.tr_end.tp_row) {
+ if (y >= vw->vw_draw_area.tr_end.tp_row)
+ return (EINVAL);
+ height = vw->vw_draw_area.tr_end.tp_row - y;
+ }
+ if (x + width > vw->vw_draw_area.tr_end.tp_col) {
+ if (x >= vw->vw_draw_area.tr_end.tp_col)
+ return (EINVAL);
+ width = vw->vw_draw_area.tr_end.tp_col - x;
+ }
+ for (yi = 0; yi < height; yi++) {
+ for (xi = 0; xi < (width * 4); xi += 4) {
+ o = (y + yi) * info->fb_stride + (x + (xi / 4)) * bpp;
+ o += vd->vd_transpose;
+ cc = (argb[yi * width * 4 + xi] << 16) |
+ (argb[yi * width * 4 + xi + 1] << 8) |
+ (argb[yi * width * 4 + xi + 2]) |
+ (argb[yi * width * 4 + xi + 3] << 24);
+ vt_fb_mem_wr4(info, o, cc);
+ }
+ }
+
+ return (0);
+}
+
void
vt_fb_bitblt_text(struct vt_device *vd, const struct vt_window *vw,
const term_rect_t *area)
diff --git a/sys/dev/vt/hw/fb/vt_fb.h b/sys/dev/vt/hw/fb/vt_fb.h
index 54f7ba667eb1..fc3db42b2a15 100644
--- a/sys/dev/vt/hw/fb/vt_fb.h
+++ b/sys/dev/vt/hw/fb/vt_fb.h
@@ -42,6 +42,7 @@ vd_blank_t vt_fb_blank;
vd_bitblt_text_t vt_fb_bitblt_text;
vd_invalidate_text_t vt_fb_invalidate_text;
vd_bitblt_bmp_t vt_fb_bitblt_bitmap;
+vd_bitblt_argb_t vt_fb_bitblt_argb;
vd_drawrect_t vt_fb_drawrect;
vd_setpixel_t vt_fb_setpixel;
vd_postswitch_t vt_fb_postswitch;
diff --git a/sys/dev/vt/hw/ofwfb/ofwfb.c b/sys/dev/vt/hw/ofwfb/ofwfb.c
index 6469240ed303..f5653b5f3ff2 100644
--- a/sys/dev/vt/hw/ofwfb/ofwfb.c
+++ b/sys/dev/vt/hw/ofwfb/ofwfb.c
@@ -66,6 +66,7 @@ static vd_probe_t ofwfb_probe;
static vd_init_t ofwfb_init;
static vd_bitblt_text_t ofwfb_bitblt_text;
static vd_bitblt_bmp_t ofwfb_bitblt_bitmap;
+static vd_bitblt_argb_t ofwfb_bitblt_argb;
static const struct vt_driver vt_ofwfb_driver = {
.vd_name = "ofwfb",
@@ -74,6 +75,7 @@ static const struct vt_driver vt_ofwfb_driver = {
.vd_blank = vt_fb_blank,
.vd_bitblt_text = ofwfb_bitblt_text,
.vd_bitblt_bmp = ofwfb_bitblt_bitmap,
+ .vd_bitblt_argb = ofwfb_bitblt_argb,
.vd_fb_ioctl = vt_fb_ioctl,
.vd_fb_mmap = vt_fb_mmap,
.vd_priority = VD_PRIORITY_GENERIC+1,
@@ -242,6 +244,16 @@ ofwfb_bitblt_bitmap(struct vt_device *vd, const struct vt_window *vw,
}
}
+static int
+ofwfb_bitblt_argb(struct vt_device *vd, const struct vt_window *vw,
+ const uint8_t *argb,
+ unsigned int width, unsigned int height,
+ unsigned int x, unsigned int y)
+{
+
+ return (EOPNOTSUPP);
+}
+
void
ofwfb_bitblt_text(struct vt_device *vd, const struct vt_window *vw,
const term_rect_t *area)
diff --git a/sys/dev/vt/hw/simplefb/simplefb.c b/sys/dev/vt/hw/simplefb/simplefb.c
index c7381b102d3e..6983b41b3888 100644
--- a/sys/dev/vt/hw/simplefb/simplefb.c
+++ b/sys/dev/vt/hw/simplefb/simplefb.c
@@ -49,6 +49,7 @@
static vd_init_t vt_simplefb_init;
static vd_fini_t vt_simplefb_fini;
static vd_probe_t vt_simplefb_probe;
+static vd_bitblt_argb_t vt_simplefb_bitblt_argb;
static struct vt_driver vt_simplefb_driver = {
.vd_name = "simplefb",
@@ -59,6 +60,7 @@ static struct vt_driver vt_simplefb_driver = {
.vd_bitblt_text = vt_fb_bitblt_text,
.vd_invalidate_text = vt_fb_invalidate_text,
.vd_bitblt_bmp = vt_fb_bitblt_bitmap,
+ .vd_bitblt_argb = vt_simplefb_bitblt_argb,
.vd_drawrect = vt_fb_drawrect,
.vd_setpixel = vt_fb_setpixel,
.vd_fb_ioctl = vt_fb_ioctl,
@@ -221,3 +223,13 @@ vt_simplefb_fini(struct vt_device *vd, void *softc)
vt_fb_fini(vd, softc);
pmap_unmapdev((void *)sc->fb_vbase, sc->fb_size);
}
+
+static int
+vt_simplefb_bitblt_argb(struct vt_device *vd, const struct vt_window *vw,
+ const uint8_t *argb,
+ unsigned int width, unsigned int height,
+ unsigned int x, unsigned int y)
+{
+
+ return (EOPNOTSUPP);
+}
diff --git a/sys/dev/vt/hw/vbefb/vbefb.c b/sys/dev/vt/hw/vbefb/vbefb.c
index bb4d88dd2ecf..0f9d9257948c 100644
--- a/sys/dev/vt/hw/vbefb/vbefb.c
+++ b/sys/dev/vt/hw/vbefb/vbefb.c
@@ -58,6 +58,7 @@ static struct vt_driver vt_vbefb_driver = {
.vd_bitblt_text = vt_fb_bitblt_text,
.vd_invalidate_text = vt_fb_invalidate_text,
.vd_bitblt_bmp = vt_fb_bitblt_bitmap,
+ .vd_bitblt_argb = vt_fb_bitblt_argb,
.vd_drawrect = vt_fb_drawrect,
.vd_setpixel = vt_fb_setpixel,
.vd_fb_ioctl = vt_fb_ioctl,
@@ -76,17 +77,13 @@ vt_vbefb_probe(struct vt_device *vd)
{
int disabled;
struct vbe_fb *vbefb;
- caddr_t kmdp;
disabled = 0;
TUNABLE_INT_FETCH("hw.syscons.disable", &disabled);
if (disabled != 0)
return (CN_DEAD);
- kmdp = preload_search_by_type("elf kernel");
- if (kmdp == NULL)
- kmdp = preload_search_by_type("elf64 kernel");
- vbefb = (struct vbe_fb *)preload_search_info(kmdp,
+ vbefb = (struct vbe_fb *)preload_search_info(preload_kmdp,
MODINFO_METADATA | MODINFOMD_VBE_FB);
if (vbefb == NULL)
return (CN_DEAD);
@@ -99,17 +96,13 @@ vt_vbefb_init(struct vt_device *vd)
{
struct fb_info *info;
struct vbe_fb *vbefb;
- caddr_t kmdp;
int format, roff, goff, boff;
info = vd->vd_softc;
if (info == NULL)
info = vd->vd_softc = (void *)&local_vbe_info;
- kmdp = preload_search_by_type("elf kernel");
- if (kmdp == NULL)
- kmdp = preload_search_by_type("elf64 kernel");
- vbefb = (struct vbe_fb *)preload_search_info(kmdp,
+ vbefb = (struct vbe_fb *)preload_search_info(preload_kmdp,
MODINFO_METADATA | MODINFOMD_VBE_FB);
if (vbefb == NULL)
return (CN_DEAD);
diff --git a/sys/dev/vt/hw/vga/vt_vga.c b/sys/dev/vt/hw/vga/vt_vga.c
index e2ae20894fc5..675c0573bd7e 100644
--- a/sys/dev/vt/hw/vga/vt_vga.c
+++ b/sys/dev/vt/hw/vga/vt_vga.c
@@ -96,6 +96,7 @@ static vd_blank_t vga_blank;
static vd_bitblt_text_t vga_bitblt_text;
static vd_invalidate_text_t vga_invalidate_text;
static vd_bitblt_bmp_t vga_bitblt_bitmap;
+static vd_bitblt_argb_t vga_bitblt_argb;
static vd_drawrect_t vga_drawrect;
static vd_setpixel_t vga_setpixel;
static vd_postswitch_t vga_postswitch;
@@ -108,6 +109,7 @@ static const struct vt_driver vt_vga_driver = {
.vd_bitblt_text = vga_bitblt_text,
.vd_invalidate_text = vga_invalidate_text,
.vd_bitblt_bmp = vga_bitblt_bitmap,
+ .vd_bitblt_argb = vga_bitblt_argb,
.vd_drawrect = vga_drawrect,
.vd_setpixel = vga_setpixel,
.vd_postswitch = vga_postswitch,
@@ -998,6 +1000,16 @@ vga_bitblt_bitmap(struct vt_device *vd, const struct vt_window *vw,
}
}
+static int
+vga_bitblt_argb(struct vt_device *vd, const struct vt_window *vw,
+ const uint8_t *argb,
+ unsigned int width, unsigned int height,
+ unsigned int x, unsigned int y)
+{
+
+ return (EOPNOTSUPP);
+}
+
static void
vga_initialize_graphics(struct vt_device *vd)
{
@@ -1335,7 +1347,7 @@ vga_postswitch(struct vt_device *vd)
/* Reinit VGA mode, to restore view after app which change mode. */
vga_initialize(vd, (vd->vd_flags & VDF_TEXTMODE));
- /* Ask vt(9) to update chars on visible area. */
+ /* Ask vt(4) to update chars on visible area. */
vd->vd_flags |= VDF_INVALID;
}
diff --git a/sys/dev/vt/vt.h b/sys/dev/vt/vt.h
index 56a28c0420c7..8e35a81bc101 100644
--- a/sys/dev/vt/vt.h
+++ b/sys/dev/vt/vt.h
@@ -345,6 +345,10 @@ typedef void vd_bitblt_bmp_t(struct vt_device *vd, const struct vt_window *vw,
const uint8_t *pattern, const uint8_t *mask,
unsigned int width, unsigned int height,
unsigned int x, unsigned int y, term_color_t fg, term_color_t bg);
+typedef int vd_bitblt_argb_t(struct vt_device *vd, const struct vt_window *vw,
+ const uint8_t *argb,
+ unsigned int width, unsigned int height,
+ unsigned int x, unsigned int y);
typedef int vd_fb_ioctl_t(struct vt_device *, u_long, caddr_t, struct thread *);
typedef int vd_fb_mmap_t(struct vt_device *, vm_ooffset_t, vm_paddr_t *, int,
vm_memattr_t *);
@@ -368,6 +372,7 @@ struct vt_driver {
vd_bitblt_text_t *vd_bitblt_text;
vd_invalidate_text_t *vd_invalidate_text;
vd_bitblt_bmp_t *vd_bitblt_bmp;
+ vd_bitblt_argb_t *vd_bitblt_argb;
/* Framebuffer ioctls, if present. */
vd_fb_ioctl_t *vd_fb_ioctl;
diff --git a/sys/dev/vt/vt_core.c b/sys/dev/vt/vt_core.c
index 92c83aee5967..bcf67ddc9689 100644
--- a/sys/dev/vt/vt_core.c
+++ b/sys/dev/vt/vt_core.c
@@ -44,6 +44,7 @@
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
+#include <sys/splash.h>
#include <sys/power.h>
#include <sys/priv.h>
#include <sys/proc.h>
@@ -118,19 +119,22 @@ static const struct terminal_class vt_termclass = {
/* Bell pitch/duration. */
#define VT_BELLDURATION (SBT_1S / 20)
-#define VT_BELLPITCH (1193182 / 800) /* Approx 1491Hz */
+#define VT_BELLPITCH 800
#define VT_UNIT(vw) ((vw)->vw_device->vd_unit * VT_MAXWINDOWS + \
(vw)->vw_number)
static SYSCTL_NODE(_kern, OID_AUTO, vt, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
- "vt(9) parameters");
+ "vt(4) parameters");
static VT_SYSCTL_INT(enable_altgr, 1, "Enable AltGr key (Do not assume R.Alt as Alt)");
static VT_SYSCTL_INT(enable_bell, 0, "Enable bell");
-static VT_SYSCTL_INT(debug, 0, "vt(9) debug level");
+static VT_SYSCTL_INT(debug, 0, "vt(4) debug level");
static VT_SYSCTL_INT(deadtimer, 15, "Time to wait busy process in VT_PROCESS mode");
static VT_SYSCTL_INT(suspendswitch, 1, "Switch to VT0 before suspend");
+/* Slow down and dont rely on timers and interrupts */
+static VT_SYSCTL_INT(slow_down, 0, "Non-zero make console slower and synchronous.");
+
/* Allow to disable some keyboard combinations. */
static VT_SYSCTL_INT(kbd_halt, 1, "Enable halt keyboard combination. "
"See kbdmap(5) to configure.");
@@ -191,8 +195,8 @@ static void vt_update_static(void *);
#ifndef SC_NO_CUTPASTE
static void vt_mouse_paste(void);
#endif
-static void vt_suspend_handler(void *priv);
-static void vt_resume_handler(void *priv);
+static void vt_suspend_handler(void *priv, enum power_stype stype);
+static void vt_resume_handler(void *priv, enum power_stype stype);
SET_DECLARE(vt_drv_set, struct vt_driver);
@@ -1134,6 +1138,13 @@ vtterm_bell(struct terminal *tm)
sysbeep(vw->vw_bell_pitch, vw->vw_bell_duration);
}
+/*
+ * Beep with user-provided frequency and duration as specified by a KDMKTONE
+ * ioctl (compatible with Linux). The frequency is specified as a 8254 PIT
+ * divisor for a 1.19MHz clock.
+ *
+ * See https://tldp.org/LDP/lpg/node83.html.
+ */
static void
vtterm_beep(struct terminal *tm, u_int param)
{
@@ -1147,6 +1158,7 @@ vtterm_beep(struct terminal *tm, u_int param)
return;
}
+ /* XXX period unit is supposed to be "timer ticks." */
period = ((param >> 16) & 0xffff) * SBT_1MS;
freq = 1193182 / (param & 0xffff);
@@ -1648,6 +1660,12 @@ vtterm_done(struct terminal *tm)
}
vd->vd_flags &= ~VDF_SPLASH;
vt_flush(vd);
+ } else if (vt_slow_down > 0) {
+ int i, j;
+ for (i = 0; i < vt_slow_down; i++) {
+ for (j = 0; j < 1000; j++)
+ vt_flush(vd);
+ }
} else if (!(vd->vd_flags & VDF_ASYNC)) {
vt_flush(vd);
}
@@ -1657,18 +1675,28 @@ vtterm_done(struct terminal *tm)
static void
vtterm_splash(struct vt_device *vd)
{
+ struct splash_info *si;
+ uintptr_t image;
vt_axis_t top, left;
- /* Display a nice boot splash. */
+ si = MD_FETCH(preload_kmdp, MODINFOMD_SPLASH, struct splash_info *);
if (!(vd->vd_flags & VDF_TEXTMODE) && (boothowto & RB_MUTE)) {
- top = (vd->vd_height - vt_logo_height) / 2;
- left = (vd->vd_width - vt_logo_width) / 2;
- switch (vt_logo_depth) {
- case 1:
- /* XXX: Unhardcode colors! */
+ if (si == NULL) {
+ top = (vd->vd_height - vt_logo_height) / 2;
+ left = (vd->vd_width - vt_logo_width) / 2;
vd->vd_driver->vd_bitblt_bmp(vd, vd->vd_curwindow,
vt_logo_image, NULL, vt_logo_width, vt_logo_height,
left, top, TC_WHITE, TC_BLACK);
+ } else {
+ if (si->si_depth != 4)
+ return;
+ image = (uintptr_t)si + sizeof(struct splash_info);
+ image = roundup2(image, 8);
+ top = (vd->vd_height - si->si_height) / 2;
+ left = (vd->vd_width - si->si_width) / 2;
+ vd->vd_driver->vd_bitblt_argb(vd, vd->vd_curwindow,
+ (unsigned char *)image, si->si_width, si->si_height,
+ left, top);
}
vd->vd_flags |= VDF_SPLASH;
}
@@ -1774,14 +1802,10 @@ parse_font_info(struct font_info *fi)
static void
vt_init_font(void *arg)
{
- caddr_t kmdp;
struct font_info *fi;
struct vt_font *font;
- kmdp = preload_search_by_type("elf kernel");
- if (kmdp == NULL)
- kmdp = preload_search_by_type("elf64 kernel");
- fi = MD_FETCH(kmdp, MODINFOMD_FONT, struct font_info *);
+ fi = MD_FETCH(preload_kmdp, MODINFOMD_FONT, struct font_info *);
font = parse_font_info(fi);
if (font != NULL)
@@ -1793,14 +1817,10 @@ SYSINIT(vt_init_font, SI_SUB_KMEM, SI_ORDER_ANY, vt_init_font, &vt_consdev);
static void
vt_init_font_static(void)
{
- caddr_t kmdp;
struct font_info *fi;
struct vt_font *font;
- kmdp = preload_search_by_type("elf kernel");
- if (kmdp == NULL)
- kmdp = preload_search_by_type("elf64 kernel");
- fi = MD_FETCH(kmdp, MODINFOMD_FONT, struct font_info *);
+ fi = MD_FETCH(preload_kmdp, MODINFOMD_FONT, struct font_info *);
font = parse_font_info_static(fi);
if (font != NULL)
@@ -3310,7 +3330,7 @@ vt_replace_backend(const struct vt_driver *drv, void *softc)
}
static void
-vt_suspend_handler(void *priv)
+vt_suspend_handler(void *priv, enum power_stype stype)
{
struct vt_device *vd;
@@ -3321,7 +3341,7 @@ vt_suspend_handler(void *priv)
}
static void
-vt_resume_handler(void *priv)
+vt_resume_handler(void *priv, enum power_stype stype)
{
struct vt_device *vd;
diff --git a/sys/dev/vt/vt_sysmouse.c b/sys/dev/vt/vt_sysmouse.c
index 5147865fc20f..f2f5a0fa5c3a 100644
--- a/sys/dev/vt/vt_sysmouse.c
+++ b/sys/dev/vt/vt_sysmouse.c
@@ -128,7 +128,15 @@ sysmouse_evdev_store(int x, int y, int z, int buttons)
evdev_push_event(sysmouse_evdev, EV_REL, REL_X, x);
evdev_push_event(sysmouse_evdev, EV_REL, REL_Y, y);
switch (evdev_sysmouse_t_axis) {
- case EVDEV_SYSMOUSE_T_AXIS_PSM:
+ case EVDEV_SYSMOUSE_T_AXIS_WSP: /* 3 */
+ if (buttons & (1 << 5)) {
+ evdev_push_rel(sysmouse_evdev, REL_HWHEEL, z);
+ buttons &= ~(1 << 5);
+ } else {
+ evdev_push_rel(sysmouse_evdev, REL_WHEEL, -z);
+ }
+ break;
+ case EVDEV_SYSMOUSE_T_AXIS_PSM: /* 2 */
switch (z) {
case 1:
case -1:
@@ -140,14 +148,14 @@ sysmouse_evdev_store(int x, int y, int z, int buttons)
break;
}
break;
- case EVDEV_SYSMOUSE_T_AXIS_UMS:
+ case EVDEV_SYSMOUSE_T_AXIS_UMS: /* 1 */
if (buttons & (1 << 6))
evdev_push_rel(sysmouse_evdev, REL_HWHEEL, 1);
else if (buttons & (1 << 5))
evdev_push_rel(sysmouse_evdev, REL_HWHEEL, -1);
buttons &= ~((1 << 5)|(1 << 6));
/* PASSTHROUGH */
- case EVDEV_SYSMOUSE_T_AXIS_NONE:
+ case EVDEV_SYSMOUSE_T_AXIS_NONE: /* 0 */
default:
evdev_push_rel(sysmouse_evdev, REL_WHEEL, -z);
}
diff --git a/sys/dev/vte/if_vte.c b/sys/dev/vte/if_vte.c
index d4e1553c432d..f32053c36cee 100644
--- a/sys/dev/vte/if_vte.c
+++ b/sys/dev/vte/if_vte.c
@@ -435,12 +435,6 @@ vte_attach(device_t dev)
vte_get_macaddr(sc);
ifp = sc->vte_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "cannot allocate ifnet structure.\n");
- error = ENXIO;
- goto fail;
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -512,10 +506,6 @@ vte_detach(device_t dev)
ether_ifdetach(ifp);
}
- if (sc->vte_miibus != NULL) {
- device_delete_child(dev, sc->vte_miibus);
- sc->vte_miibus = NULL;
- }
bus_generic_detach(dev);
if (sc->vte_intrhand != NULL) {
diff --git a/sys/dev/watchdog/watchdog.c b/sys/dev/watchdog/watchdog.c
index e6b6dc1eac70..c599db56bf95 100644
--- a/sys/dev/watchdog/watchdog.c
+++ b/sys/dev/watchdog/watchdog.c
@@ -50,11 +50,20 @@
#include <sys/syscallsubr.h> /* kern_clock_gettime() */
-static int wd_set_pretimeout(int newtimeout, int disableiftoolong);
+#ifdef COMPAT_FREEBSD14
+#define WDIOCPATPAT_14 _IOW('W', 42, u_int) /* pat the watchdog */
+#define WDIOC_SETTIMEOUT_14 _IOW('W', 43, int) /* set/reset the timer */
+#define WDIOC_GETTIMEOUT_14 _IOR('W', 44, int) /* get total timeout */
+#define WDIOC_GETTIMELEFT_14 _IOR('W', 45, int) /* get time left */
+#define WDIOC_GETPRETIMEOUT_14 _IOR('W', 46, int) /* get the pre-timeout */
+#define WDIOC_SETPRETIMEOUT_14 _IOW('W', 47, int) /* set the pre-timeout */
+#endif
+
+static int wd_set_pretimeout(sbintime_t newtimeout, int disableiftoolong);
static void wd_timeout_cb(void *arg);
static struct callout wd_pretimeo_handle;
-static int wd_pretimeout;
+static sbintime_t wd_pretimeout;
static int wd_pretimeout_act = WD_SOFT_LOG;
static struct callout wd_softtimeo_handle;
@@ -63,6 +72,8 @@ static int wd_softtimer; /* true = use softtimer instead of hardware
static int wd_softtimeout_act = WD_SOFT_LOG; /* action for the software timeout */
static struct cdev *wd_dev;
+static volatile sbintime_t wd_last_sbt; /* last timeout value (sbt) */
+static sbintime_t wd_last_sbt_sysctl; /* last timeout value (sbt) */
static volatile u_int wd_last_u; /* last timeout value set by kern_do_pat */
static u_int wd_last_u_sysctl; /* last timeout value set by kern_do_pat */
static u_int wd_last_u_sysctl_secs; /* wd_last_u in seconds */
@@ -73,6 +84,8 @@ SYSCTL_UINT(_hw_watchdog, OID_AUTO, wd_last_u, CTLFLAG_RD,
&wd_last_u_sysctl, 0, "Watchdog last update time");
SYSCTL_UINT(_hw_watchdog, OID_AUTO, wd_last_u_secs, CTLFLAG_RD,
&wd_last_u_sysctl_secs, 0, "Watchdog last update time");
+SYSCTL_SBINTIME_MSEC(_hw_watchdog, OID_AUTO, wd_last_msecs, CTLFLAG_RD,
+ &wd_last_sbt_sysctl, "Watchdog last update time (milliseconds)");
static int wd_lastpat_valid = 0;
static time_t wd_lastpat = 0; /* when the watchdog was last patted */
@@ -80,105 +93,94 @@ static time_t wd_lastpat = 0; /* when the watchdog was last patted */
/* Hook for external software watchdog to register for use if needed */
void (*wdog_software_attach)(void);
-static void
-pow2ns_to_ts(int pow2ns, struct timespec *ts)
+/* Legacy interface to watchdog. */
+int
+wdog_kern_pat(u_int utim)
{
- uint64_t ns;
+ sbintime_t sbt;
- ns = 1ULL << pow2ns;
- ts->tv_sec = ns / 1000000000ULL;
- ts->tv_nsec = ns % 1000000000ULL;
-}
+ if ((utim & WD_LASTVAL) != 0 && (utim & WD_INTERVAL) > 0)
+ return (EINVAL);
-static int
-pow2ns_to_ticks(int pow2ns)
-{
- struct timeval tv;
- struct timespec ts;
+ if ((utim & WD_LASTVAL) != 0) {
+ return (wdog_control(WD_CTRL_RESET));
+ }
- pow2ns_to_ts(pow2ns, &ts);
- TIMESPEC_TO_TIMEVAL(&tv, &ts);
- return (tvtohz(&tv));
+ utim &= WD_INTERVAL;
+ if (utim == WD_TO_NEVER)
+ sbt = 0;
+ else
+ sbt = nstosbt(1 << utim);
+
+ return (wdog_kern_pat_sbt(sbt));
}
-static int
-seconds_to_pow2ns(int seconds)
+int
+wdog_control(int ctrl)
{
- uint64_t power;
- uint64_t ns;
- uint64_t shifted;
-
- ns = ((uint64_t)seconds) * 1000000000ULL;
- power = flsll(ns);
- shifted = 1ULL << power;
- if (shifted <= ns) {
- power++;
+ /* Disable takes precedence */
+ if (ctrl == WD_CTRL_DISABLE) {
+ wdog_kern_pat(0);
}
- return (power);
+
+ if ((ctrl & WD_CTRL_RESET) != 0) {
+ wdog_kern_pat_sbt(wd_last_sbt);
+ } else if ((ctrl & WD_CTRL_ENABLE) != 0) {
+ wdog_kern_pat_sbt(wd_last_sbt);
+ }
+
+ return (0);
}
int
-wdog_kern_pat(u_int utim)
+wdog_kern_pat_sbt(sbintime_t sbt)
{
- int error;
- static int first = 1;
-
- if ((utim & WD_LASTVAL) != 0 && (utim & WD_INTERVAL) > 0)
- return (EINVAL);
-
- if ((utim & WD_LASTVAL) != 0) {
- /*
- * if WD_LASTVAL is set, fill in the bits for timeout
- * from the saved value in wd_last_u.
- */
- MPASS((wd_last_u & ~WD_INTERVAL) == 0);
- utim &= ~WD_LASTVAL;
- utim |= wd_last_u;
- } else {
- /*
- * Otherwise save the new interval.
- * This can be zero (to disable the watchdog)
- */
- wd_last_u = (utim & WD_INTERVAL);
+ sbintime_t error_sbt = 0;
+ int pow2ns = 0;
+ int error = 0;
+ static bool first = true;
+
+ /* legacy uses power-of-2-nanoseconds time. */
+ if (sbt != 0) {
+ pow2ns = flsl(sbttons(sbt));
+ }
+ if (wd_last_sbt != sbt) {
+ wd_last_u = pow2ns;
wd_last_u_sysctl = wd_last_u;
- wd_last_u_sysctl_secs = pow2ns_to_ticks(wd_last_u) / hz;
+ wd_last_u_sysctl_secs = sbt / SBT_1S;
+
+ wd_last_sbt = sbt;
}
- if ((utim & WD_INTERVAL) == WD_TO_NEVER) {
- utim = 0;
- /* Assume all is well; watchdog signals failure. */
- error = 0;
- } else {
- /* Assume no watchdog available; watchdog flags success */
+ if (sbt != 0)
error = EOPNOTSUPP;
- }
+
if (wd_softtimer) {
- if (utim == 0) {
+ if (sbt == 0) {
callout_stop(&wd_softtimeo_handle);
} else {
- (void) callout_reset(&wd_softtimeo_handle,
- pow2ns_to_ticks(utim), wd_timeout_cb, "soft");
+ (void) callout_reset_sbt(&wd_softtimeo_handle,
+ sbt, 0, wd_timeout_cb, "soft", 0);
}
error = 0;
} else {
- EVENTHANDLER_INVOKE(watchdog_list, utim, &error);
+ EVENTHANDLER_INVOKE(watchdog_sbt_list, sbt, &error_sbt, &error);
+ EVENTHANDLER_INVOKE(watchdog_list, pow2ns, &error);
}
/*
- * If we no hardware watchdog responded, we have not tried to
+ * If no hardware watchdog responded, we have not tried to
* attach an external software watchdog, and one is available,
* attach it now and retry.
*/
- if (error == EOPNOTSUPP && first && *wdog_software_attach != NULL) {
+ if (error == EOPNOTSUPP && first && wdog_software_attach != NULL) {
(*wdog_software_attach)();
- EVENTHANDLER_INVOKE(watchdog_list, utim, &error);
+ EVENTHANDLER_INVOKE(watchdog_sbt_list, sbt, &error_sbt, &error);
+ EVENTHANDLER_INVOKE(watchdog_list, pow2ns, &error);
}
- first = 0;
+ first = false;
+ /* TODO: Print a (rate limited?) warning if error_sbt is too far away */
wd_set_pretimeout(wd_pretimeout, true);
- /*
- * If we were able to arm/strobe the watchdog, then
- * update the last time it was strobed for WDIOC_GETTIMELEFT
- */
if (!error) {
struct timespec ts;
@@ -189,6 +191,7 @@ wdog_kern_pat(u_int utim)
wd_lastpat_valid = 1;
}
}
+
return (error);
}
@@ -201,6 +204,7 @@ wd_valid_act(int act)
return true;
}
+#ifdef COMPAT_FREEBSD14
static int
wd_ioctl_patpat(caddr_t data)
{
@@ -220,6 +224,7 @@ wd_ioctl_patpat(caddr_t data)
return (wdog_kern_pat(u));
}
+#endif
static int
wd_get_time_left(struct thread *td, time_t *remainp)
@@ -265,16 +270,14 @@ wd_timeout_cb(void *arg)
* current actual watchdog timeout.
*/
static int
-wd_set_pretimeout(int newtimeout, int disableiftoolong)
+wd_set_pretimeout(sbintime_t newtimeout, int disableiftoolong)
{
- u_int utime;
- struct timespec utime_ts;
- int timeout_ticks;
+ sbintime_t utime;
+ sbintime_t timeout_left;
- utime = wdog_kern_last_timeout();
- pow2ns_to_ts(utime, &utime_ts);
+ utime = wdog_kern_last_timeout_sbt();
/* do not permit a pre-timeout >= than the timeout. */
- if (newtimeout >= utime_ts.tv_sec) {
+ if (newtimeout >= utime) {
/*
* If 'disableiftoolong' then just fall through
* so as to disable the pre-watchdog
@@ -292,7 +295,7 @@ wd_set_pretimeout(int newtimeout, int disableiftoolong)
return 0;
}
- timeout_ticks = pow2ns_to_ticks(utime) - (hz*newtimeout);
+ timeout_left = utime - newtimeout;
#if 0
printf("wd_set_pretimeout: "
"newtimeout: %d, "
@@ -306,8 +309,8 @@ wd_set_pretimeout(int newtimeout, int disableiftoolong)
#endif
/* We determined the value is sane, so reset the callout */
- (void) callout_reset(&wd_pretimeo_handle,
- timeout_ticks, wd_timeout_cb, "pre");
+ (void) callout_reset_sbt(&wd_pretimeo_handle,
+ timeout_left, 0, wd_timeout_cb, "pre", 0);
wd_pretimeout = newtimeout;
return 0;
}
@@ -316,6 +319,7 @@ static int
wd_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data,
int flags __unused, struct thread *td)
{
+ sbintime_t sb;
u_int u;
time_t timeleft;
int error;
@@ -351,29 +355,55 @@ wd_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data,
error = EINVAL;
}
break;
- case WDIOC_GETPRETIMEOUT:
- *(int *)data = (int)wd_pretimeout;
+#ifdef COMPAT_FREEBSD14
+ case WDIOC_GETPRETIMEOUT_14:
+ *(int *)data = (int)(wd_pretimeout / SBT_1S);
break;
- case WDIOC_SETPRETIMEOUT:
- error = wd_set_pretimeout(*(int *)data, false);
+ case WDIOC_SETPRETIMEOUT_14:
+ error = wd_set_pretimeout(*(int *)data * SBT_1S, false);
break;
- case WDIOC_GETTIMELEFT:
+ case WDIOC_GETTIMELEFT_14:
error = wd_get_time_left(td, &timeleft);
if (error)
break;
*(int *)data = (int)timeleft;
break;
- case WDIOC_SETTIMEOUT:
+ case WDIOC_SETTIMEOUT_14:
u = *(u_int *)data;
- error = wdog_kern_pat(seconds_to_pow2ns(u));
+ error = wdog_kern_pat_sbt(mstosbt(u * 1000ULL));
break;
- case WDIOC_GETTIMEOUT:
+ case WDIOC_GETTIMEOUT_14:
u = wdog_kern_last_timeout();
*(u_int *)data = u;
break;
- case WDIOCPATPAT:
+ case WDIOCPATPAT_14:
error = wd_ioctl_patpat(data);
break;
+#endif
+
+ /* New API */
+ case WDIOC_CONTROL:
+ wdog_control(*(int *)data);
+ break;
+ case WDIOC_SETTIMEOUT:
+ sb = *(sbintime_t *)data;
+ error = wdog_kern_pat_sbt(sb);
+ break;
+ case WDIOC_GETTIMEOUT:
+ *(sbintime_t *)data = wdog_kern_last_timeout_sbt();
+ break;
+ case WDIOC_GETTIMELEFT:
+ error = wd_get_time_left(td, &timeleft);
+ if (error)
+ break;
+ *(sbintime_t *)data = (sbintime_t)timeleft * SBT_1S;
+ break;
+ case WDIOC_GETPRETIMEOUT:
+ *(sbintime_t *)data = wd_pretimeout;
+ break;
+ case WDIOC_SETPRETIMEOUT:
+ error = wd_set_pretimeout(*(sbintime_t *)data, false);
+ break;
default:
error = ENOIOCTL;
break;
@@ -392,6 +422,12 @@ wdog_kern_last_timeout(void)
return (wd_last_u);
}
+sbintime_t
+wdog_kern_last_timeout_sbt(void)
+{
+ return (wd_last_sbt);
+}
+
static struct cdevsw wd_cdevsw = {
.d_version = D_VERSION,
.d_ioctl = wd_ioctl,
diff --git a/sys/dev/wbwd/wbwd.c b/sys/dev/wbwd/wbwd.c
index 56b3f8adcd90..e19a60de14a2 100644
--- a/sys/dev/wbwd/wbwd.c
+++ b/sys/dev/wbwd/wbwd.c
@@ -461,7 +461,6 @@ wb_watchdog_fn(void *private, u_int cmd, int *error)
static int
wb_probe(device_t dev)
{
- char buf[128];
struct wb_softc *sc;
int j;
uint8_t devid;
@@ -478,10 +477,9 @@ wb_probe(device_t dev)
for (j = 0; j < nitems(wb_devs); j++) {
if (wb_devs[j].device_id == devid) {
sc->chip = wb_devs[j].chip;
- snprintf(buf, sizeof(buf),
+ device_set_descf(dev,
"%s (0x%02x/0x%02x) Watchdog Timer",
wb_devs[j].descr, devid, revid);
- device_set_desc_copy(dev, buf);
return (BUS_PROBE_SPECIFIC);
}
}
diff --git a/sys/dev/wg/if_wg.c b/sys/dev/wg/if_wg.c
index d0b75d5cbf4b..17aedee0e6b0 100644
--- a/sys/dev/wg/if_wg.c
+++ b/sys/dev/wg/if_wg.c
@@ -312,10 +312,14 @@ static void wg_timers_run_send_keepalive(void *);
static void wg_timers_run_new_handshake(void *);
static void wg_timers_run_zero_key_material(void *);
static void wg_timers_run_persistent_keepalive(void *);
-static int wg_aip_add(struct wg_softc *, struct wg_peer *, sa_family_t, const void *, uint8_t);
+static int wg_aip_add(struct wg_softc *, struct wg_peer *, sa_family_t,
+ const void *, uint8_t);
+static int wg_aip_del(struct wg_softc *, struct wg_peer *, sa_family_t,
+ const void *, uint8_t);
static struct wg_peer *wg_aip_lookup(struct wg_softc *, sa_family_t, void *);
static void wg_aip_remove_all(struct wg_softc *, struct wg_peer *);
-static struct wg_peer *wg_peer_alloc(struct wg_softc *, const uint8_t [WG_KEY_SIZE]);
+static struct wg_peer *wg_peer_create(struct wg_softc *,
+ const uint8_t [WG_KEY_SIZE], int *);
static void wg_peer_free_deferred(struct noise_remote *);
static void wg_peer_destroy(struct wg_peer *);
static void wg_peer_destroy_all(struct wg_softc *);
@@ -378,18 +382,26 @@ static void wg_module_deinit(void);
/* TODO Peer */
static struct wg_peer *
-wg_peer_alloc(struct wg_softc *sc, const uint8_t pub_key[WG_KEY_SIZE])
+wg_peer_create(struct wg_softc *sc, const uint8_t pub_key[WG_KEY_SIZE],
+ int *errp)
{
struct wg_peer *peer;
sx_assert(&sc->sc_lock, SX_XLOCKED);
peer = malloc(sizeof(*peer), M_WG, M_WAITOK | M_ZERO);
+
peer->p_remote = noise_remote_alloc(sc->sc_local, peer, pub_key);
- peer->p_tx_bytes = counter_u64_alloc(M_WAITOK);
- peer->p_rx_bytes = counter_u64_alloc(M_WAITOK);
+ if ((*errp = noise_remote_enable(peer->p_remote)) != 0) {
+ noise_remote_free(peer->p_remote, NULL);
+ free(peer, M_WG);
+ return (NULL);
+ }
+
peer->p_id = peer_counter++;
peer->p_sc = sc;
+ peer->p_tx_bytes = counter_u64_alloc(M_WAITOK);
+ peer->p_rx_bytes = counter_u64_alloc(M_WAITOK);
cookie_maker_init(&peer->p_cookie, pub_key);
@@ -420,6 +432,13 @@ wg_peer_alloc(struct wg_softc *sc, const uint8_t pub_key[WG_KEY_SIZE])
LIST_INIT(&peer->p_aips);
peer->p_aips_num = 0;
+ TAILQ_INSERT_TAIL(&sc->sc_peers, peer, p_entry);
+ sc->sc_peers_num++;
+
+ if (if_getlinkstate(sc->sc_ifp) == LINK_STATE_UP)
+ wg_timers_enable(peer);
+
+ DPRINTF(sc, "Peer %" PRIu64 " created\n", peer->p_id);
return (peer);
}
@@ -510,46 +529,64 @@ wg_peer_get_endpoint(struct wg_peer *peer, struct wg_endpoint *e)
rw_runlock(&peer->p_endpoint_lock);
}
-/* Allowed IP */
static int
-wg_aip_add(struct wg_softc *sc, struct wg_peer *peer, sa_family_t af, const void *addr, uint8_t cidr)
+wg_aip_addrinfo(struct wg_aip *aip, const void *baddr, uint8_t cidr)
{
- struct radix_node_head *root;
- struct radix_node *node;
- struct wg_aip *aip;
- int ret = 0;
+#if defined(INET) || defined(INET6)
+ struct aip_addr *addr, *mask;
- aip = malloc(sizeof(*aip), M_WG, M_WAITOK | M_ZERO);
- aip->a_peer = peer;
- aip->a_af = af;
-
- switch (af) {
+ addr = &aip->a_addr;
+ mask = &aip->a_mask;
+#endif
+ switch (aip->a_af) {
#ifdef INET
case AF_INET:
if (cidr > 32) cidr = 32;
- root = sc->sc_aip4;
- aip->a_addr.in = *(const struct in_addr *)addr;
- aip->a_mask.ip = htonl(~((1LL << (32 - cidr)) - 1) & 0xffffffff);
- aip->a_addr.ip &= aip->a_mask.ip;
- aip->a_addr.length = aip->a_mask.length = offsetof(struct aip_addr, in) + sizeof(struct in_addr);
+ addr->in = *(const struct in_addr *)baddr;
+ mask->ip = htonl(~((1LL << (32 - cidr)) - 1) & 0xffffffff);
+ addr->ip &= mask->ip;
+ addr->length = mask->length = offsetof(struct aip_addr, in) + sizeof(struct in_addr);
break;
#endif
#ifdef INET6
case AF_INET6:
if (cidr > 128) cidr = 128;
- root = sc->sc_aip6;
- aip->a_addr.in6 = *(const struct in6_addr *)addr;
- in6_prefixlen2mask(&aip->a_mask.in6, cidr);
+ addr->in6 = *(const struct in6_addr *)baddr;
+ in6_prefixlen2mask(&mask->in6, cidr);
for (int i = 0; i < 4; i++)
- aip->a_addr.ip6[i] &= aip->a_mask.ip6[i];
- aip->a_addr.length = aip->a_mask.length = offsetof(struct aip_addr, in6) + sizeof(struct in6_addr);
+ addr->ip6[i] &= mask->ip6[i];
+ addr->length = mask->length = offsetof(struct aip_addr, in6) + sizeof(struct in6_addr);
break;
#endif
default:
- free(aip, M_WG);
return (EAFNOSUPPORT);
}
+ return (0);
+}
+
+/* Allowed IP */
+static int
+wg_aip_add(struct wg_softc *sc, struct wg_peer *peer, sa_family_t af,
+ const void *baddr, uint8_t cidr)
+{
+ struct radix_node_head *root = NULL;
+ struct radix_node *node;
+ struct wg_aip *aip;
+ int ret = 0;
+
+ aip = malloc(sizeof(*aip), M_WG, M_WAITOK | M_ZERO);
+ aip->a_peer = peer;
+ aip->a_af = af;
+
+ ret = wg_aip_addrinfo(aip, baddr, cidr);
+ if (ret != 0) {
+ free(aip, M_WG);
+ return (ret);
+ }
+
+ root = af == AF_INET ? sc->sc_aip4 : sc->sc_aip6;
+ MPASS(root != NULL);
RADIX_NODE_HEAD_LOCK(root);
node = root->rnh_addaddr(&aip->a_addr, &aip->a_mask, &root->rh, aip->a_nodes);
if (node == aip->a_nodes) {
@@ -575,6 +612,58 @@ wg_aip_add(struct wg_softc *sc, struct wg_peer *peer, sa_family_t af, const void
return (ret);
}
+static int
+wg_aip_del(struct wg_softc *sc, struct wg_peer *peer, sa_family_t af,
+ const void *baddr, uint8_t cidr)
+{
+ struct radix_node_head *root = NULL;
+ struct radix_node *dnode __diagused, *node;
+ struct wg_aip *aip, addr;
+ int ret = 0;
+
+ /*
+ * We need to be sure that all padding is cleared, as it is above when
+ * new AllowedIPs are added, since we want to do a direct comparison.
+ */
+ memset(&addr, 0, sizeof(addr));
+ addr.a_af = af;
+
+ ret = wg_aip_addrinfo(&addr, baddr, cidr);
+ if (ret != 0)
+ return (ret);
+
+ root = af == AF_INET ? sc->sc_aip4 : sc->sc_aip6;
+
+ MPASS(root != NULL);
+ RADIX_NODE_HEAD_LOCK(root);
+
+ node = root->rnh_lookup(&addr.a_addr, &addr.a_mask, &root->rh);
+ if (node == NULL) {
+ RADIX_NODE_HEAD_UNLOCK(root);
+ return (0);
+ }
+
+ aip = (struct wg_aip *)node;
+ if (aip->a_peer != peer) {
+ /*
+ * They could have specified an allowed-ip that belonged to a
+ * different peer, in which case our job is done because the
+ * AllowedIP has been removed.
+ */
+ RADIX_NODE_HEAD_UNLOCK(root);
+ return (0);
+ }
+
+ dnode = root->rnh_deladdr(&aip->a_addr, &aip->a_mask, &root->rh);
+ MPASS(dnode == node);
+ RADIX_NODE_HEAD_UNLOCK(root);
+
+ LIST_REMOVE(aip, a_entry);
+ peer->p_aips_num--;
+ free(aip, M_WG);
+ return (0);
+}
+
static struct wg_peer *
wg_aip_lookup(struct wg_softc *sc, sa_family_t af, void *a)
{
@@ -2196,7 +2285,6 @@ determine_af_and_pullup(struct mbuf **m, sa_family_t *af)
return (0);
}
-#ifdef DEV_NETMAP
static int
determine_ethertype_and_pullup(struct mbuf **m, int *etp)
{
@@ -2258,6 +2346,7 @@ wg_transmit(if_t ifp, struct mbuf *m)
return (0);
}
+#ifdef DEV_NETMAP
/*
* This should only be invoked by netmap, via nm_os_send_up(), to process
* packets from the host TX ring.
@@ -2335,7 +2424,7 @@ wg_output(if_t ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro
if (dst->sa_family == AF_UNSPEC || dst->sa_family == pseudo_AF_HDRCMPLT)
memcpy(&af, dst->sa_data, sizeof(af));
else
- af = dst->sa_family;
+ af = RO_GET_FAMILY(ro, dst);
if (af == AF_UNSPEC) {
xmit_err(ifp, m, NULL, af);
return (EAFNOSUPPORT);
@@ -2360,10 +2449,8 @@ wg_output(if_t ifp, struct mbuf *m, const struct sockaddr *dst, struct route *ro
xmit_err(ifp, m, NULL, AF_UNSPEC);
return (ret);
}
- if (parsed_af != af) {
- xmit_err(ifp, m, NULL, AF_UNSPEC);
- return (EAFNOSUPPORT);
- }
+
+ MPASS(parsed_af == af);
mtu = (ro != NULL && ro->ro_mtu > 0) ? ro->ro_mtu : if_getmtu(ifp);
return (wg_xmit(ifp, m, parsed_af, mtu));
}
@@ -2378,7 +2465,7 @@ wg_peer_add(struct wg_softc *sc, const nvlist_t *nvl)
size_t size;
struct noise_remote *remote;
struct wg_peer *peer = NULL;
- bool need_insert = false;
+ bool need_cleanup = false;
sx_assert(&sc->sc_lock, SX_XLOCKED);
@@ -2410,8 +2497,10 @@ wg_peer_add(struct wg_softc *sc, const nvlist_t *nvl)
wg_aip_remove_all(sc, peer);
}
if (peer == NULL) {
- peer = wg_peer_alloc(sc, pub_key);
- need_insert = true;
+ peer = wg_peer_create(sc, pub_key, &err);
+ if (peer == NULL)
+ goto out;
+ need_cleanup = true;
}
if (nvlist_exists_binary(nvl, "endpoint")) {
endpoint = nvlist_get_binary(nvl, "endpoint", &size);
@@ -2445,8 +2534,20 @@ wg_peer_add(struct wg_softc *sc, const nvlist_t *nvl)
aipl = nvlist_get_nvlist_array(nvl, "allowed-ips", &allowedip_count);
for (size_t idx = 0; idx < allowedip_count; idx++) {
+ sa_family_t ipaf;
+ int ipflags;
+
if (!nvlist_exists_number(aipl[idx], "cidr"))
continue;
+
+ ipaf = AF_UNSPEC;
+ ipflags = 0;
+ if (nvlist_exists_number(aipl[idx], "flags"))
+ ipflags = nvlist_get_number(aipl[idx], "flags");
+ if ((ipflags & ~WGALLOWEDIP_VALID_FLAGS) != 0) {
+ err = EOPNOTSUPP;
+ goto out;
+ }
cidr = nvlist_get_number(aipl[idx], "cidr");
if (nvlist_exists_binary(aipl[idx], "ipv4")) {
addr = nvlist_get_binary(aipl[idx], "ipv4", &size);
@@ -2454,34 +2555,36 @@ wg_peer_add(struct wg_softc *sc, const nvlist_t *nvl)
err = EINVAL;
goto out;
}
- if ((err = wg_aip_add(sc, peer, AF_INET, addr, cidr)) != 0)
- goto out;
+
+ ipaf = AF_INET;
} else if (nvlist_exists_binary(aipl[idx], "ipv6")) {
addr = nvlist_get_binary(aipl[idx], "ipv6", &size);
if (addr == NULL || cidr > 128 || size != sizeof(struct in6_addr)) {
err = EINVAL;
goto out;
}
- if ((err = wg_aip_add(sc, peer, AF_INET6, addr, cidr)) != 0)
- goto out;
+
+ ipaf = AF_INET6;
} else {
continue;
}
+
+ MPASS(ipaf != AF_UNSPEC);
+ if ((ipflags & WGALLOWEDIP_REMOVE_ME) != 0) {
+ err = wg_aip_del(sc, peer, ipaf, addr, cidr);
+ } else {
+ err = wg_aip_add(sc, peer, ipaf, addr, cidr);
+ }
+
+ if (err != 0)
+ goto out;
}
}
- if (need_insert) {
- if ((err = noise_remote_enable(peer->p_remote)) != 0)
- goto out;
- TAILQ_INSERT_TAIL(&sc->sc_peers, peer, p_entry);
- sc->sc_peers_num++;
- if (if_getlinkstate(sc->sc_ifp) == LINK_STATE_UP)
- wg_timers_enable(peer);
- }
if (remote != NULL)
noise_remote_put(remote);
return (0);
out:
- if (need_insert) /* If we fail, only destroy if it was new. */
+ if (need_cleanup) /* If we fail, only destroy if it was new. */
wg_peer_destroy(peer);
if (remote != NULL)
noise_remote_put(remote);
@@ -2921,8 +3024,8 @@ wg_clone_create(struct if_clone *ifc, char *name, size_t len,
if_setinitfn(ifp, wg_init);
if_setreassignfn(ifp, wg_reassign);
if_setqflushfn(ifp, wg_qflush);
-#ifdef DEV_NETMAP
if_settransmitfn(ifp, wg_transmit);
+#ifdef DEV_NETMAP
if_setinputfn(ifp, wg_if_input);
#endif
if_setoutputfn(ifp, wg_output);
diff --git a/sys/dev/wg/if_wg.h b/sys/dev/wg/if_wg.h
index f00b7f676319..801eaf38141d 100644
--- a/sys/dev/wg/if_wg.h
+++ b/sys/dev/wg/if_wg.h
@@ -32,4 +32,10 @@ struct wg_data_io {
#define SIOCSWG _IOWR('i', 210, struct wg_data_io)
#define SIOCGWG _IOWR('i', 211, struct wg_data_io)
+
+/* Keep these in sync with wireguard-tools:containers.h */
+#define WGALLOWEDIP_REMOVE_ME 0x0001
+
+#define WGALLOWEDIP_VALID_FLAGS WGALLOWEDIP_REMOVE_ME
+
#endif /* __IF_WG_H__ */
diff --git a/sys/dev/wpi/if_wpi.c b/sys/dev/wpi/if_wpi.c
index fc5cf02f3a25..471700ca9f5c 100644
--- a/sys/dev/wpi/if_wpi.c
+++ b/sys/dev/wpi/if_wpi.c
@@ -2821,7 +2821,7 @@ wpi_tx_data(struct wpi_softc *sc, struct mbuf *m, struct ieee80211_node *ni)
else {
/* XXX pass pktlen */
(void) ieee80211_ratectl_rate(ni, NULL, 0);
- rate = ni->ni_txrate;
+ rate = ieee80211_node_get_txrate_dot11rate(ni);
}
/* Encrypt the frame if need be. */
@@ -4641,8 +4641,8 @@ again:
return !error;
}
- if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k &&
- k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
+ if (!(kflags & WPI_KFLAG_MULTICAST) &&
+ ieee80211_is_key_global(vap, k)) {
kflags |= WPI_KFLAG_MULTICAST;
node.kflags = htole16(kflags);
@@ -4726,8 +4726,8 @@ again:
return !error;
}
- if (!(kflags & WPI_KFLAG_MULTICAST) && &vap->iv_nw_keys[0] <= k &&
- k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
+ if (!(kflags & WPI_KFLAG_MULTICAST) &&
+ ieee80211_is_key_global(vap, k)) {
kflags |= WPI_KFLAG_MULTICAST;
node.kflags = htole16(kflags);
@@ -4782,8 +4782,7 @@ wpi_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k,
}
/* Handle group keys. */
- if (&vap->iv_nw_keys[0] <= k &&
- k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) {
+ if (ieee80211_is_key_global(vap, k)) {
WPI_NT_LOCK(sc);
if (set)
wvp->wv_gtk |= WPI_VAP_KEY(k->wk_keyix);
diff --git a/sys/dev/wtap/if_wtap.c b/sys/dev/wtap/if_wtap.c
index f9e0c2a7e5c4..dd332c538c8f 100644
--- a/sys/dev/wtap/if_wtap.c
+++ b/sys/dev/wtap/if_wtap.c
@@ -33,11 +33,8 @@
*/
#include "if_wtapvar.h"
#include <sys/uio.h> /* uio struct */
-#include <sys/jail.h>
#include <net/if_var.h>
-#include <net/vnet.h>
-#include <net80211/ieee80211_ratectl.h>
#include "if_medium.h"
#include "wtap_hal/hal.h"
@@ -395,13 +392,13 @@ wtap_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ],
ieee80211_vap_attach(vap, ieee80211_media_change,
ieee80211_media_status, mac);
avp->av_dev = make_dev(&wtap_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
- "%s", (const char *)vap->iv_ifp->if_xname);
+ "%s", if_name(vap->iv_ifp));
avp->av_dev->si_drv1 = sc;
callout_init(&avp->av_swba, 0);
/* TODO this is a hack to force it to choose the rate we want */
ni = ieee80211_ref_node(vap->iv_bss);
- ni->ni_txrate = 130;
+ ieee80211_node_set_txrate_ht_mcsrate(ni, 2);
ieee80211_free_node(ni);
return vap;
}
@@ -617,8 +614,7 @@ wtap_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
M_NOWAIT|M_ZERO);
if (ni == NULL)
return (NULL);
-
- ni->ni_txrate = 130;
+ ieee80211_node_set_txrate_ht_mcsrate(ni, 2);
return ni;
}
diff --git a/sys/dev/wtap/if_wtap_module.c b/sys/dev/wtap/if_wtap_module.c
index 52a371fd4b8f..6b5f8502bd08 100644
--- a/sys/dev/wtap/if_wtap_module.c
+++ b/sys/dev/wtap/if_wtap_module.c
@@ -28,52 +28,19 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
+
#include <sys/param.h>
#include <sys/module.h>
-#include <sys/kernel.h>
-#include <sys/systm.h>
-#include <sys/sysctl.h>
-#include <sys/mbuf.h>
#include <sys/malloc.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/proc.h>
-#include <sys/ucred.h>
#include <sys/jail.h>
-#include <sys/sockio.h>
#include <sys/socket.h>
-#include <sys/socketvar.h>
-#include <sys/errno.h>
-#include <sys/callout.h>
-#include <sys/endian.h>
-#include <sys/kthread.h>
-#include <sys/taskqueue.h>
-#include <sys/priv.h>
-#include <sys/sysctl.h>
-
-#include <machine/bus.h>
#include <net/if.h>
-#include <net/if_dl.h>
#include <net/if_media.h>
-#include <net/if_types.h>
-#include <net/if_arp.h>
#include <net/ethernet.h>
-#include <net/if_llc.h>
-#include <net/vnet.h>
-
-#include <net80211/ieee80211_var.h>
-#include <net80211/ieee80211_regdomain.h>
-
-#include <net/bpf.h>
-
-#include <sys/errno.h>
-#include <sys/conf.h> /* cdevsw struct */
-#include <sys/uio.h> /* uio struct */
#include <netinet/in.h>
-#include <netinet/if_ether.h>
#include "if_wtapvar.h"
#include "if_wtapioctl.h"
diff --git a/sys/dev/wtap/if_wtapioctl.h b/sys/dev/wtap/if_wtapioctl.h
index 6cde392ab7c1..05f88a5dcb81 100644
--- a/sys/dev/wtap/if_wtapioctl.h
+++ b/sys/dev/wtap/if_wtapioctl.h
@@ -40,7 +40,7 @@
#ifndef _DEV_WTAP_WTAPIOCTL_H
#define _DEV_WTAP_WTAPIOCTL_H
-#include <sys/param.h>
+#include <sys/types.h>
#include <net80211/ieee80211_radiotap.h>
#define SIOCGATHSTATS _IOWR('i', 137, struct ifreq)
diff --git a/sys/dev/wtap/if_wtapvar.h b/sys/dev/wtap/if_wtapvar.h
index 4a7336368843..0cc0fd487a1c 100644
--- a/sys/dev/wtap/if_wtapvar.h
+++ b/sys/dev/wtap/if_wtapvar.h
@@ -34,46 +34,18 @@
#include <sys/param.h>
#include <sys/conf.h>
-#include <sys/module.h>
-#include <sys/kernel.h>
-#include <sys/systm.h>
-#include <sys/sysctl.h>
-#include <sys/mbuf.h>
#include <sys/malloc.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
#include <sys/sockio.h>
#include <sys/socket.h>
-#include <sys/socketvar.h>
-#include <sys/errno.h>
-#include <sys/callout.h>
-#include <sys/endian.h>
-#include <sys/kthread.h>
-#include <sys/taskqueue.h>
-#include <sys/priv.h>
-#include <sys/sysctl.h>
-
-#include <machine/bus.h>
#include <net/if.h>
-#include <net/if_dl.h>
#include <net/if_media.h>
-#include <net/if_types.h>
-#include <net/if_arp.h>
#include <net/ethernet.h>
-#include <net/if_llc.h>
#include <net80211/ieee80211_var.h>
#include <net80211/ieee80211_regdomain.h>
-#include <net/bpf.h>
-
-#include <net/vnet.h>
-
-#include <netinet/in.h>
-#include <netinet/if_ether.h>
-
#if 0
#define DWTAP_PRINTF(...) printf(__VA_ARGS__)
#else
diff --git a/sys/dev/wtap/plugins/visibility.c b/sys/dev/wtap/plugins/visibility.c
index 2ff695fba5f1..1e6a9057728f 100644
--- a/sys/dev/wtap/plugins/visibility.c
+++ b/sys/dev/wtap/plugins/visibility.c
@@ -28,52 +28,14 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
+
#include <sys/param.h>
-#include <sys/module.h>
-#include <sys/kernel.h>
-#include <sys/systm.h>
-#include <sys/sysctl.h>
-#include <sys/mbuf.h>
-#include <sys/malloc.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/proc.h>
-#include <sys/ucred.h>
#include <sys/jail.h>
-#include <sys/sockio.h>
#include <sys/socket.h>
-#include <sys/socketvar.h>
-#include <sys/errno.h>
-#include <sys/callout.h>
-#include <sys/endian.h>
-#include <sys/kthread.h>
-#include <sys/taskqueue.h>
-#include <sys/priv.h>
-#include <sys/sysctl.h>
-
-#include <machine/bus.h>
#include <net/if.h>
-#include <net/if_dl.h>
-#include <net/if_media.h>
-#include <net/if_types.h>
-#include <net/if_arp.h>
-#include <net/ethernet.h>
-#include <net/if_llc.h>
-#include <net/vnet.h>
-
-#include <net80211/ieee80211_var.h>
-#include <net80211/ieee80211_regdomain.h>
-
-#include <net/bpf.h>
-
-#include <sys/errno.h>
-#include <sys/conf.h> /* cdevsw struct */
-#include <sys/uio.h> /* uio struct */
-
#include <netinet/in.h>
-#include <netinet/if_ether.h>
#include "visibility.h"
diff --git a/sys/dev/wtap/plugins/visibility_ioctl.h b/sys/dev/wtap/plugins/visibility_ioctl.h
index b9c43984b4f5..a5c09e6af22a 100644
--- a/sys/dev/wtap/plugins/visibility_ioctl.h
+++ b/sys/dev/wtap/plugins/visibility_ioctl.h
@@ -36,8 +36,6 @@
#ifndef _VISIBILITY_IOCTL_H
#define _VISIBILITY_IOCTL_H
-#include <sys/param.h>
-
struct link {
int op; //0 remove, 1 link
int id1;
diff --git a/sys/dev/xdma/xdma.c b/sys/dev/xdma/xdma.c
index 62b781159d03..cdd9ad0b8f39 100644
--- a/sys/dev/xdma/xdma.c
+++ b/sys/dev/xdma/xdma.c
@@ -555,7 +555,7 @@ xdma_put(xdma_controller_t *xdma)
}
static void
-xdma_init(void)
+xdma_init(void *dummy __unused)
{
mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
diff --git a/sys/dev/xen/blkback/blkback.c b/sys/dev/xen/blkback/blkback.c
index 3717264256f3..c6cba729b991 100644
--- a/sys/dev/xen/blkback/blkback.c
+++ b/sys/dev/xen/blkback/blkback.c
@@ -145,6 +145,8 @@ static MALLOC_DEFINE(M_XENBLOCKBACK, "xbbd", "Xen Block Back Driver Data");
*/
#define XBB_MAX_SEGMENTS_PER_REQLIST XBB_MAX_SEGMENTS_PER_REQUEST
+#define XBD_SECTOR_SHFT 9
+
/*--------------------------- Forward Declarations ---------------------------*/
struct xbb_softc;
struct xbb_xen_req;
@@ -1150,7 +1152,9 @@ xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist,
if (*reqlist == NULL) {
*reqlist = nreqlist;
nreqlist->operation = ring_req->operation;
- nreqlist->starting_sector_number = ring_req->sector_number;
+ nreqlist->starting_sector_number =
+ (ring_req->sector_number << XBD_SECTOR_SHFT) >>
+ xbb->sector_size_shift;
STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist,
links);
}
@@ -2476,13 +2480,13 @@ xbb_open_file(struct xbb_softc *xbb)
xbb->sector_size = 512;
/*
- * Sanity check. The media size has to be at least one
- * sector long.
+ * Sanity check. The media size must be a multiple of the sector
+ * size.
*/
- if (xbb->media_size < xbb->sector_size) {
+ if ((xbb->media_size % xbb->sector_size) != 0) {
error = EINVAL;
xenbus_dev_fatal(xbb->dev, error,
- "file %s size %ju < block size %u",
+ "file %s size %ju not multiple of block size %u",
xbb->dev_name,
(uintmax_t)xbb->media_size,
xbb->sector_size);
@@ -3086,9 +3090,13 @@ xbb_publish_backend_info(struct xbb_softc *xbb)
return (error);
}
+ /*
+ * The 'sectors' node is special and always contains the size
+ * in units of 512b, regardless of the value in 'sector-size'.
+ */
leaf = "sectors";
- error = xs_printf(xst, our_path, leaf,
- "%"PRIu64, xbb->media_num_sectors);
+ error = xs_printf(xst, our_path, leaf, "%ju",
+ (uintmax_t)(xbb->media_size >> XBD_SECTOR_SHFT));
if (error != 0)
break;
diff --git a/sys/dev/xen/blkfront/blkfront.c b/sys/dev/xen/blkfront/blkfront.c
index 2c5862232b41..6bb4e32f1328 100644
--- a/sys/dev/xen/blkfront/blkfront.c
+++ b/sys/dev/xen/blkfront/blkfront.c
@@ -158,7 +158,8 @@ xbd_free_command(struct xbd_command *cm)
static void
xbd_mksegarray(bus_dma_segment_t *segs, int nsegs,
grant_ref_t * gref_head, int otherend_id, int readonly,
- grant_ref_t * sg_ref, struct blkif_request_segment *sg)
+ grant_ref_t * sg_ref, struct blkif_request_segment *sg,
+ unsigned int sector_size)
{
struct blkif_request_segment *last_block_sg = sg + nsegs;
vm_paddr_t buffer_ma;
@@ -166,9 +167,9 @@ xbd_mksegarray(bus_dma_segment_t *segs, int nsegs,
int ref;
while (sg < last_block_sg) {
- KASSERT(segs->ds_addr % (1 << XBD_SECTOR_SHFT) == 0,
+ KASSERT((segs->ds_addr & (sector_size - 1)) == 0,
("XEN disk driver I/O must be sector aligned"));
- KASSERT(segs->ds_len % (1 << XBD_SECTOR_SHFT) == 0,
+ KASSERT((segs->ds_len & (sector_size - 1)) == 0,
("XEN disk driver I/Os must be a multiple of "
"the sector length"));
buffer_ma = segs->ds_addr;
@@ -241,7 +242,8 @@ xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
xbd_mksegarray(segs, nsegs, &cm->cm_gref_head,
xenbus_get_otherend_id(sc->xbd_dev),
cm->cm_operation == BLKIF_OP_WRITE,
- cm->cm_sg_refs, ring_req->seg);
+ cm->cm_sg_refs, ring_req->seg,
+ sc->xbd_disk->d_sectorsize);
} else {
blkif_request_indirect_t *ring_req;
@@ -259,7 +261,8 @@ xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
xbd_mksegarray(segs, nsegs, &cm->cm_gref_head,
xenbus_get_otherend_id(sc->xbd_dev),
cm->cm_operation == BLKIF_OP_WRITE,
- cm->cm_sg_refs, cm->cm_indirectionpages);
+ cm->cm_sg_refs, cm->cm_indirectionpages,
+ sc->xbd_disk->d_sectorsize);
memcpy(ring_req->indirect_grefs, &cm->cm_indirectionrefs,
sizeof(grant_ref_t) * sc->xbd_max_request_indirectpages);
}
@@ -359,7 +362,9 @@ xbd_bio_command(struct xbd_softc *sc)
}
cm->cm_bp = bp;
- cm->cm_sector_number = (blkif_sector_t)bp->bio_pblkno;
+ cm->cm_sector_number =
+ ((blkif_sector_t)bp->bio_pblkno * sc->xbd_disk->d_sectorsize) >>
+ XBD_SECTOR_SHFT;
switch (bp->bio_cmd) {
case BIO_READ:
@@ -631,7 +636,7 @@ xbd_dump(void *arg, void *virtual, off_t offset, size_t length)
cm->cm_data = virtual;
cm->cm_datalen = chunk;
cm->cm_operation = BLKIF_OP_WRITE;
- cm->cm_sector_number = offset / dp->d_sectorsize;
+ cm->cm_sector_number = offset >> XBD_SECTOR_SHFT;
cm->cm_complete = xbd_dump_complete;
xbd_enqueue_cm(cm, XBD_Q_READY);
@@ -1025,7 +1030,19 @@ xbd_instance_create(struct xbd_softc *sc, blkif_sector_t sectors,
sc->xbd_disk->d_stripesize = phys_sector_size;
sc->xbd_disk->d_stripeoffset = 0;
- sc->xbd_disk->d_mediasize = sectors * sector_size;
+ /*
+ * The 'sectors' xenbus node is always in units of 512b, regardless of
+ * the 'sector-size' xenbus node value.
+ */
+ sc->xbd_disk->d_mediasize = sectors << XBD_SECTOR_SHFT;
+ if ((sc->xbd_disk->d_mediasize % sc->xbd_disk->d_sectorsize) != 0) {
+ error = EINVAL;
+ xenbus_dev_fatal(sc->xbd_dev, error,
+ "Disk size (%ju) not a multiple of sector size (%ju)",
+ (uintmax_t)sc->xbd_disk->d_mediasize,
+ (uintmax_t)sc->xbd_disk->d_sectorsize);
+ return (error);
+ }
sc->xbd_disk->d_maxsize = sc->xbd_max_request_size;
sc->xbd_disk->d_flags = DISKFLAG_UNMAPPED_BIO;
if ((sc->xbd_flags & (XBDF_FLUSH|XBDF_BARRIER)) != 0) {
@@ -1064,9 +1081,7 @@ xbd_free(struct xbd_softc *sc)
gnttab_end_foreign_access_references(
sc->xbd_max_request_indirectpages,
&cm->cm_indirectionrefs[0]);
- contigfree(cm->cm_indirectionpages, PAGE_SIZE *
- sc->xbd_max_request_indirectpages,
- M_XENBLOCKFRONT);
+ free(cm->cm_indirectionpages, M_XENBLOCKFRONT);
cm->cm_indirectionpages = NULL;
}
@@ -1312,7 +1327,7 @@ xbd_connect(struct xbd_softc *sc)
/* Allocate datastructures based on negotiated values. */
err = bus_dma_tag_create(
bus_get_dma_tag(sc->xbd_dev), /* parent */
- 512, PAGE_SIZE, /* algnmnt, boundary */
+ sector_size, PAGE_SIZE, /* algnmnt, boundary */
BUS_SPACE_MAXADDR, /* lowaddr */
BUS_SPACE_MAXADDR, /* highaddr */
NULL, NULL, /* filter, filterarg */
@@ -1373,9 +1388,7 @@ xbd_connect(struct xbd_softc *sc)
break;
}
if (j < sc->xbd_max_request_indirectpages) {
- contigfree(indirectpages,
- PAGE_SIZE * sc->xbd_max_request_indirectpages,
- M_XENBLOCKFRONT);
+ free(indirectpages, M_XENBLOCKFRONT);
break;
}
cm->cm_indirectionpages = indirectpages;
@@ -1384,13 +1397,17 @@ xbd_connect(struct xbd_softc *sc)
if (sc->xbd_disk == NULL) {
device_printf(dev, "%juMB <%s> at %s",
- (uintmax_t) sectors / (1048576 / sector_size),
+ (uintmax_t)((sectors << XBD_SECTOR_SHFT) / 1048576),
device_get_desc(dev),
xenbus_get_node(dev));
bus_print_child_footer(device_get_parent(dev), dev);
- xbd_instance_create(sc, sectors, sc->xbd_vdevice, binfo,
+ err = xbd_instance_create(sc, sectors, sc->xbd_vdevice, binfo,
sector_size, phys_sector_size);
+ if (err != 0) {
+ xenbus_dev_fatal(dev, err, "Unable to create instance");
+ return;
+ }
}
(void)xenbus_set_state(dev, XenbusStateConnected);
diff --git a/sys/dev/xen/bus/xen_intr.c b/sys/dev/xen/bus/xen_intr.c
index bfe080b16f03..2b5fa8fb7cd1 100644
--- a/sys/dev/xen/bus/xen_intr.c
+++ b/sys/dev/xen/bus/xen_intr.c
@@ -45,12 +45,12 @@
#include <sys/proc.h>
#include <sys/smp.h>
#include <sys/refcount.h>
+#include <sys/stdarg.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <machine/smp.h>
-#include <machine/stdarg.h>
#include <xen/xen-os.h>
#include <xen/hypervisor.h>
@@ -460,7 +460,7 @@ xen_intr_handle_upcall(void *unused __unused)
return (FILTER_HANDLED);
}
-static int
+static void
xen_intr_init(void *dummy __unused)
{
shared_info_t *s = HYPERVISOR_shared_info;
@@ -468,7 +468,7 @@ xen_intr_init(void *dummy __unused)
int i;
if (!xen_domain())
- return (0);
+ return;
_Static_assert(is_valid_evtchn(0),
"is_valid_evtchn(0) fails (unused by Xen, but valid by interface");
@@ -502,8 +502,6 @@ xen_intr_init(void *dummy __unused)
if (bootverbose)
printf("Xen interrupt system initialized\n");
-
- return (0);
}
SYSINIT(xen_intr_init, SI_SUB_INTR, SI_ORDER_SECOND, xen_intr_init, NULL);
diff --git a/sys/dev/xen/bus/xenpv.c b/sys/dev/xen/bus/xenpv.c
index 169e52f3a879..f710b0f91140 100644
--- a/sys/dev/xen/bus/xenpv.c
+++ b/sys/dev/xen/bus/xenpv.c
@@ -65,6 +65,16 @@
#define LOW_MEM_LIMIT 0
#endif
+/*
+ * Memory ranges available for creating external mappings (foreign or grant
+ * pages for example).
+ */
+static struct rman unpopulated_mem = {
+ .rm_end = ~0,
+ .rm_type = RMAN_ARRAY,
+ .rm_descr = "Xen scratch memory",
+};
+
static void
xenpv_identify(driver_t *driver, device_t parent)
{
@@ -91,23 +101,43 @@ xenpv_probe(device_t dev)
return (BUS_PROBE_NOWILDCARD);
}
+/* Dummy init for arches that don't have a specific implementation. */
+int __weak_symbol
+xen_arch_init_physmem(device_t dev, struct rman *mem)
+{
+
+ return (0);
+}
+
static int
xenpv_attach(device_t dev)
{
- int error;
+ int error = rman_init(&unpopulated_mem);
+
+ if (error != 0)
+ return (error);
+
+ error = xen_arch_init_physmem(dev, &unpopulated_mem);
+ if (error != 0)
+ return (error);
/*
* Let our child drivers identify any child devices that they
* can find. Once that is done attach any devices that we
* found.
*/
- error = bus_generic_probe(dev);
- if (error)
- return (error);
+ bus_identify_children(dev);
+ bus_attach_children(dev);
- error = bus_generic_attach(dev);
+ return (0);
+}
- return (error);
+static int
+release_unpopulated_mem(device_t dev, struct resource *res)
+{
+
+ return (rman_is_region_manager(res, &unpopulated_mem) ?
+ rman_release_resource(res) : bus_release_resource(dev, res));
}
static struct resource *
@@ -117,17 +147,48 @@ xenpv_alloc_physmem(device_t dev, device_t child, int *res_id, size_t size)
vm_paddr_t phys_addr;
void *virt_addr;
int error;
+ const unsigned int flags = RF_ACTIVE | RF_UNMAPPED |
+ RF_ALIGNMENT_LOG2(PAGE_SHIFT);
+
+ KASSERT((size & PAGE_MASK) == 0, ("unaligned size requested"));
+ size = round_page(size);
+
+ /* Attempt to allocate from arch resource manager. */
+ res = rman_reserve_resource(&unpopulated_mem, 0, ~0, size, flags,
+ child);
+ if (res != NULL) {
+ rman_set_rid(res, *res_id);
+ rman_set_type(res, SYS_RES_MEMORY);
+ } else {
+ static bool warned = false;
- res = bus_alloc_resource(child, SYS_RES_MEMORY, res_id, LOW_MEM_LIMIT,
- ~0, size, RF_ACTIVE | RF_UNMAPPED);
- if (res == NULL)
+ /* Fallback to generic MMIO allocator. */
+ if (__predict_false(!warned)) {
+ warned = true;
+ device_printf(dev,
+ "unable to allocate from arch specific routine, "
+ "fall back to unused memory areas\n");
+ }
+ res = bus_alloc_resource(child, SYS_RES_MEMORY, res_id,
+ LOW_MEM_LIMIT, ~0, size, flags);
+ }
+
+ if (res == NULL) {
+ device_printf(dev,
+ "failed to allocate Xen unpopulated memory\n");
return (NULL);
+ }
phys_addr = rman_get_start(res);
error = vm_phys_fictitious_reg_range(phys_addr, phys_addr + size,
VM_MEMATTR_XEN);
if (error) {
- bus_release_resource(child, SYS_RES_MEMORY, *res_id, res);
+ int error = release_unpopulated_mem(child, res);
+
+ if (error != 0)
+ device_printf(dev, "failed to release resource: %d\n",
+ error);
+
return (NULL);
}
virt_addr = pmap_mapdev_attr(phys_addr, size, VM_MEMATTR_XEN);
@@ -150,7 +211,8 @@ xenpv_free_physmem(device_t dev, device_t child, int res_id, struct resource *re
pmap_unmapdev(virt_addr, size);
vm_phys_fictitious_unreg_range(phys_addr, phys_addr + size);
- return (bus_release_resource(child, SYS_RES_MEMORY, res_id, res));
+
+ return (release_unpopulated_mem(child, res));
}
static device_method_t xenpv_methods[] = {
diff --git a/sys/dev/xen/console/xen_console.c b/sys/dev/xen/console/xen_console.c
index f1a298a2aefa..1d12d6caa257 100644
--- a/sys/dev/xen/console/xen_console.c
+++ b/sys/dev/xen/console/xen_console.c
@@ -42,8 +42,7 @@
#include <sys/kdb.h>
#include <sys/proc.h>
#include <sys/reboot.h>
-
-#include <machine/stdarg.h>
+#include <sys/stdarg.h>
#include <vm/vm.h>
#include <vm/pmap.h>
diff --git a/sys/dev/xen/control/control.c b/sys/dev/xen/control/control.c
index 1dc1df935b84..2c61b48c0451 100644
--- a/sys/dev/xen/control/control.c
+++ b/sys/dev/xen/control/control.c
@@ -1,5 +1,5 @@
/*-
- * SPDX-License-Identifier: BSD-2-Clause AND BSD-4-Clause
+ * SPDX-License-Identifier: BSD-2-Clause
*
* Copyright (c) 2010 Justin T. Gibbs, Spectra Logic Corporation
* All rights reserved.
@@ -31,39 +31,6 @@
*/
/*-
- * PV suspend/resume support:
- *
- * Copyright (c) 2004 Christian Limpach.
- * Copyright (c) 2004-2006,2008 Kip Macy
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by Christian Limpach.
- * 4. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*-
* HVM suspend/resume support:
*
* Copyright (c) 2008 Citrix Systems, Inc.
@@ -124,6 +91,7 @@
#include <sys/smp.h>
#include <sys/eventhandler.h>
#include <sys/timetc.h>
+#include <sys/power.h>
#include <geom/geom.h>
@@ -208,12 +176,12 @@ xctrl_suspend(void)
cpuset_t cpu_suspend_map;
#endif
- EVENTHANDLER_INVOKE(power_suspend_early);
+ EVENTHANDLER_INVOKE(power_suspend_early, POWER_STYPE_SUSPEND_TO_MEM);
xs_lock();
stop_all_proc();
xs_unlock();
suspend_all_fs();
- EVENTHANDLER_INVOKE(power_suspend);
+ EVENTHANDLER_INVOKE(power_suspend, POWER_STYPE_SUSPEND_TO_MEM);
#ifdef EARLY_AP_STARTUP
MPASS(mp_ncpus == 1 || smp_started);
@@ -330,7 +298,7 @@ xctrl_suspend(void)
resume_all_fs();
resume_all_proc();
- EVENTHANDLER_INVOKE(power_resume);
+ EVENTHANDLER_INVOKE(power_resume, POWER_STYPE_SUSPEND_TO_MEM);
if (bootverbose)
printf("System resumed after suspension\n");
@@ -394,6 +362,20 @@ xctrl_on_watch_event(struct xs_watch *watch, const char **vec, unsigned int len)
}
/*------------------ Private Device Attachment Functions --------------------*/
+
+static void
+notify_support(void)
+{
+ /*
+ * Notify kernel is ready to handle "control/shutdown" events. Ignore
+ * errors if the nodes haven't been created by the toolstack, as the
+ * parent "control" directory should be read-only for the guest.
+ */
+ xs_write(XST_NIL, "control", "feature-poweroff", "1");
+ xs_write(XST_NIL, "control", "feature-reboot", "1");
+ xs_write(XST_NIL, "control", "feature-suspend", "1");
+}
+
/**
* \brief Identify instances of this device type in the system.
*
@@ -455,6 +437,8 @@ xctrl_attach(device_t dev)
EVENTHANDLER_REGISTER(shutdown_final, xctrl_shutdown_final, NULL,
SHUTDOWN_PRI_LAST);
+ notify_support();
+
return (0);
}
@@ -479,6 +463,14 @@ xctrl_detach(device_t dev)
return (0);
}
+static int
+xctrl_resume(device_t dev)
+{
+ notify_support();
+
+ return (0);
+}
+
/*-------------------- Private Device Attachment Data -----------------------*/
static device_method_t xctrl_methods[] = {
/* Device interface */
@@ -486,6 +478,7 @@ static device_method_t xctrl_methods[] = {
DEVMETHOD(device_probe, xctrl_probe),
DEVMETHOD(device_attach, xctrl_attach),
DEVMETHOD(device_detach, xctrl_detach),
+ DEVMETHOD(device_resume, xctrl_resume),
DEVMETHOD_END
};
diff --git a/sys/dev/xen/debug/debug.c b/sys/dev/xen/debug/debug.c
index f17d0c612262..2889b5efdba7 100644
--- a/sys/dev/xen/debug/debug.c
+++ b/sys/dev/xen/debug/debug.c
@@ -58,8 +58,11 @@ static struct sbuf *buf;
static int
xendebug_drain(void *arg, const char *str, int len)
{
-
- HYPERVISOR_console_write(__DECONST(char *, str), len);
+ /*
+ * Use xen_emergency_print() instead of xc_printf() to avoid the
+ * overhead of parsing a format string when it's not needed.
+ */
+ xen_emergency_print(str, len);
return (len);
}
@@ -75,10 +78,9 @@ xendebug_filter(void *arg __unused)
stack_save(&st);
mtx_lock_spin(&lock);
- sbuf_clear(buf);
xc_printf("Printing stack trace vCPU%u\n", XEN_VCPUID());
stack_sbuf_print_ddb(buf, &st);
- sbuf_finish(buf);
+ sbuf_drain(buf);
mtx_unlock_spin(&lock);
#endif
diff --git a/sys/dev/xen/efi/pvefi.c b/sys/dev/xen/efi/pvefi.c
index f400060c1aac..b69fcd5b80ac 100644
--- a/sys/dev/xen/efi/pvefi.c
+++ b/sys/dev/xen/efi/pvefi.c
@@ -122,7 +122,7 @@ set_time(struct efi_tm *tm)
}
static int
-var_get(efi_char *name, struct uuid *vendor, uint32_t *attrib,
+var_get(efi_char *name, efi_guid_t *vendor, uint32_t *attrib,
size_t *datasize, void *data)
{
struct xen_platform_op op = {
@@ -151,7 +151,7 @@ var_get(efi_char *name, struct uuid *vendor, uint32_t *attrib,
}
static int
-var_nextname(size_t *namesize, efi_char *name, struct uuid *vendor)
+var_nextname(size_t *namesize, efi_char *name, efi_guid_t *vendor)
{
struct xen_platform_op op = {
.cmd = XENPF_efi_runtime_call,
@@ -177,7 +177,7 @@ var_nextname(size_t *namesize, efi_char *name, struct uuid *vendor)
}
static int
-var_set(efi_char *name, struct uuid *vendor, uint32_t attrib,
+var_set(efi_char *name, efi_guid_t *vendor, uint32_t attrib,
size_t datasize, void *data)
{
struct xen_platform_op op = {
diff --git a/sys/dev/xen/gntdev/gntdev.c b/sys/dev/xen/gntdev/gntdev.c
index 4530feb1c76d..e3bc1ecf35ab 100644
--- a/sys/dev/xen/gntdev/gntdev.c
+++ b/sys/dev/xen/gntdev/gntdev.c
@@ -563,7 +563,6 @@ notify_unmap_cleanup(struct gntdev_gmap *gmap)
{
uint32_t i;
int error, count;
- vm_page_t m;
struct gnttab_unmap_grant_ref *unmap_ops;
unmap_ops = malloc(sizeof(struct gnttab_unmap_grant_ref) * gmap->count,
@@ -592,17 +591,7 @@ notify_unmap_cleanup(struct gntdev_gmap *gmap)
}
/* Free the pages. */
- VM_OBJECT_WLOCK(gmap->map->mem);
-retry:
- for (i = 0; i < gmap->count; i++) {
- m = vm_page_lookup(gmap->map->mem, i);
- if (m == NULL)
- continue;
- if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0)
- goto retry;
- cdev_pager_free_page(gmap->map->mem, m);
- }
- VM_OBJECT_WUNLOCK(gmap->map->mem);
+ cdev_mgtdev_pager_free_pages(gmap->map->mem);
/* Perform unmap hypercall. */
error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
diff --git a/sys/dev/xen/netfront/netfront.c b/sys/dev/xen/netfront/netfront.c
index 6ac6ecc3bdb7..3bc3679eb0db 100644
--- a/sys/dev/xen/netfront/netfront.c
+++ b/sys/dev/xen/netfront/netfront.c
@@ -335,8 +335,16 @@ static void mbuf_release(struct mbuf *m)
KASSERT(ref != NULL, ("Cannot find refcount"));
KASSERT(ref->count > 0, ("Invalid reference count"));
- if (--ref->count == 0)
+ if (--ref->count == 0) {
+ /*
+ * Explicitly free the tag while we hold the tx queue lock.
+ * This ensures that the tag is deleted promptly in case
+ * something else is holding extra references to the mbuf chain,
+ * such as netmap.
+ */
+ m_tag_delete(m, &ref->tag);
m_freem(m);
+ }
}
static void tag_free(struct m_tag *t)
@@ -1021,27 +1029,6 @@ out:
return (error);
}
-#ifdef INET
-static u_int
-netfront_addr_cb(void *arg, struct ifaddr *a, u_int count)
-{
- arp_ifinit((if_t)arg, a);
- return (1);
-}
-/**
- * If this interface has an ipv4 address, send an arp for it. This
- * helps to get the network going again after migrating hosts.
- */
-static void
-netfront_send_fake_arp(device_t dev, struct netfront_info *info)
-{
- if_t ifp;
-
- ifp = info->xn_ifp;
- if_foreach_addr_type(ifp, AF_INET, netfront_addr_cb, ifp);
-}
-#endif
-
/**
* Callback received when the backend's state changes.
*/
@@ -1082,7 +1069,12 @@ netfront_backend_changed(device_t dev, XenbusState newstate)
break;
case XenbusStateConnected:
#ifdef INET
- netfront_send_fake_arp(dev, sc);
+ /*
+ * If this interface has an ipv4 address, send an arp for it.
+ * This helps to get the network going again after migrating
+ * hosts.
+ */
+ EVENTHANDLER_INVOKE(iflladdr_event, sc->xn_ifp);
#endif
break;
}
diff --git a/sys/dev/xen/pcifront/pcifront.c b/sys/dev/xen/pcifront/pcifront.c
new file mode 100644
index 000000000000..76339cd3d361
--- /dev/null
+++ b/sys/dev/xen/pcifront/pcifront.c
@@ -0,0 +1,690 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 2006, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Cisco Systems, Inc. nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/module.h>
+#include <sys/systm.h>
+#include <sys/mbuf.h>
+#include <sys/malloc.h>
+#include <sys/kernel.h>
+#include <sys/socket.h>
+#include <sys/queue.h>
+
+#include <machine/vmparam.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <machine/frame.h>
+
+#include <sys/bus.h>
+#include <sys/rman.h>
+
+#include <machine/intr_machdep.h>
+
+#include <machine/xen-os.h>
+#include <machine/hypervisor.h>
+#include <machine/hypervisor-ifs.h>
+#include <machine/xen_intr.h>
+#include <machine/evtchn.h>
+#include <machine/xenbus.h>
+#include <machine/gnttab.h>
+#include <machine/xen-public/memory.h>
+#include <machine/xen-public/io/pciif.h>
+
+#include <sys/pciio.h>
+#include <dev/pci/pcivar.h>
+#include "pcib_if.h"
+
+#ifdef XEN_PCIDEV_FE_DEBUG
+#define DPRINTF(fmt, args...) \
+ printf("pcifront (%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
+#else
+#define DPRINTF(fmt, args...) ((void)0)
+#endif
+#define WPRINTF(fmt, args...) \
+ printf("pcifront (%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
+
+#define INVALID_GRANT_REF (0)
+#define INVALID_EVTCHN (-1)
+#define virt_to_mfn(x) (vtophys(x) >> PAGE_SHIFT)
+
+struct pcifront_device {
+ STAILQ_ENTRY(pcifront_device) next;
+
+ struct xenbus_device *xdev;
+
+ int unit;
+ int evtchn;
+ int gnt_ref;
+
+ /* Lock this when doing any operations in sh_info */
+ struct mtx sh_info_lock;
+ struct xen_pci_sharedinfo *sh_info;
+
+ device_t ndev;
+
+ int ref_cnt;
+};
+
+static STAILQ_HEAD(pcifront_dlist, pcifront_device) pdev_list = STAILQ_HEAD_INITIALIZER(pdev_list);
+
+struct xpcib_softc {
+ int domain;
+ int bus;
+ struct pcifront_device *pdev;
+};
+
+/* Allocate a PCI device structure */
+static struct pcifront_device *
+alloc_pdev(struct xenbus_device *xdev)
+{
+ struct pcifront_device *pdev = NULL;
+ int err, unit;
+
+ err = sscanf(xdev->nodename, "device/pci/%d", &unit);
+ if (err != 1) {
+ if (err == 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err, "Error scanning pci device instance number");
+ goto out;
+ }
+
+ pdev = (struct pcifront_device *)malloc(sizeof(struct pcifront_device), M_DEVBUF, M_NOWAIT);
+ if (pdev == NULL) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(xdev, err, "Error allocating pcifront_device struct");
+ goto out;
+ }
+ pdev->unit = unit;
+ pdev->xdev = xdev;
+ pdev->ref_cnt = 1;
+
+ pdev->sh_info = (struct xen_pci_sharedinfo *)malloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT);
+ if (pdev->sh_info == NULL) {
+ free(pdev, M_DEVBUF);
+ pdev = NULL;
+ err = -ENOMEM;
+ xenbus_dev_fatal(xdev, err, "Error allocating sh_info struct");
+ goto out;
+ }
+ pdev->sh_info->flags = 0;
+
+ xdev->data = pdev;
+
+ mtx_init(&pdev->sh_info_lock, "info_lock", "pci shared dev info lock", MTX_DEF);
+
+ pdev->evtchn = INVALID_EVTCHN;
+ pdev->gnt_ref = INVALID_GRANT_REF;
+
+ STAILQ_INSERT_TAIL(&pdev_list, pdev, next);
+
+ DPRINTF("Allocated pdev @ 0x%p (unit=%d)\n", pdev, unit);
+
+ out:
+ return pdev;
+}
+
+/* Hold a reference to a pcifront device */
+static void
+get_pdev(struct pcifront_device *pdev)
+{
+ pdev->ref_cnt++;
+}
+
+/* Release a reference to a pcifront device */
+static void
+put_pdev(struct pcifront_device *pdev)
+{
+ if (--pdev->ref_cnt > 0)
+ return;
+
+ DPRINTF("freeing pdev @ 0x%p (ref_cnt=%d)\n", pdev, pdev->ref_cnt);
+
+ if (pdev->evtchn != INVALID_EVTCHN)
+ xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
+
+ if (pdev->gnt_ref != INVALID_GRANT_REF)
+ gnttab_end_foreign_access(pdev->gnt_ref, 0, (void *)pdev->sh_info);
+
+ pdev->xdev->data = NULL;
+
+ free(pdev, M_DEVBUF);
+}
+
+/* Write to the xenbus info needed by backend */
+static int
+pcifront_publish_info(struct pcifront_device *pdev)
+{
+ int err = 0;
+ struct xenbus_transaction *trans;
+
+ err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info));
+ if (err < 0) {
+ WPRINTF("error granting access to ring page\n");
+ goto out;
+ }
+
+ pdev->gnt_ref = err;
+
+ err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn);
+ if (err)
+ goto out;
+
+ do_publish:
+ trans = xenbus_transaction_start();
+ if (IS_ERR(trans)) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error writing configuration for backend "
+ "(start transaction)");
+ goto out;
+ }
+
+ err = xenbus_printf(trans, pdev->xdev->nodename,
+ "pci-op-ref", "%u", pdev->gnt_ref);
+ if (!err)
+ err = xenbus_printf(trans, pdev->xdev->nodename,
+ "event-channel", "%u", pdev->evtchn);
+ if (!err)
+ err = xenbus_printf(trans, pdev->xdev->nodename,
+ "magic", XEN_PCI_MAGIC);
+ if (!err)
+ err = xenbus_switch_state(pdev->xdev, trans,
+ XenbusStateInitialised);
+
+ if (err) {
+ xenbus_transaction_end(trans, 1);
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error writing configuration for backend");
+ goto out;
+ } else {
+ err = xenbus_transaction_end(trans, 0);
+ if (err == -EAGAIN)
+ goto do_publish;
+ else if (err) {
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error completing transaction for backend");
+ goto out;
+ }
+ }
+
+ out:
+ return err;
+}
+
+/* The backend is now connected so complete the connection process on our side */
+static int
+pcifront_connect(struct pcifront_device *pdev)
+{
+ device_t nexus;
+ devclass_t nexus_devclass;
+
+ /* We will add our device as a child of the nexus0 device */
+ if (!(nexus_devclass = devclass_find("nexus")) ||
+ !(nexus = devclass_get_device(nexus_devclass, 0))) {
+ WPRINTF("could not find nexus0!\n");
+ return -1;
+ }
+
+ /* Create a newbus device representing this frontend instance */
+ pdev->ndev = BUS_ADD_CHILD(nexus, 0, "xpcife", pdev->unit);
+ if (!pdev->ndev) {
+ WPRINTF("could not create xpcife%d!\n", pdev->unit);
+ return -EFAULT;
+ }
+ get_pdev(pdev);
+ device_set_ivars(pdev->ndev, pdev);
+
+ /* Good to go connected now */
+ xenbus_switch_state(pdev->xdev, NULL, XenbusStateConnected);
+
+ printf("pcifront: connected to %s\n", pdev->xdev->nodename);
+
+ mtx_lock(&Giant);
+ device_probe_and_attach(pdev->ndev);
+ mtx_unlock(&Giant);
+
+ return 0;
+}
+
+/* The backend is closing so process a disconnect */
+static int
+pcifront_disconnect(struct pcifront_device *pdev)
+{
+ int err = 0;
+ XenbusState prev_state;
+
+ prev_state = xenbus_read_driver_state(pdev->xdev->nodename);
+
+ if (prev_state < XenbusStateClosing) {
+ err = xenbus_switch_state(pdev->xdev, NULL, XenbusStateClosing);
+ if (!err && prev_state == XenbusStateConnected) {
+ /* TODO - need to detach the newbus devices */
+ }
+ }
+
+ return err;
+}
+
+/* Process a probe from the xenbus */
+static int
+pcifront_probe(struct xenbus_device *xdev,
+ const struct xenbus_device_id *id)
+{
+ int err = 0;
+ struct pcifront_device *pdev;
+
+ DPRINTF("xenbus probing\n");
+
+ if ((pdev = alloc_pdev(xdev)) == NULL)
+ goto out;
+
+ err = pcifront_publish_info(pdev);
+
+ out:
+ if (err)
+ put_pdev(pdev);
+ return err;
+}
+
+/* Remove the xenbus PCI device */
+static int
+pcifront_remove(struct xenbus_device *xdev)
+{
+ DPRINTF("removing xenbus device node (%s)\n", xdev->nodename);
+ if (xdev->data)
+ put_pdev(xdev->data);
+ return 0;
+}
+
+/* Called by xenbus when our backend node changes state */
+static void
+pcifront_backend_changed(struct xenbus_device *xdev,
+ XenbusState be_state)
+{
+ struct pcifront_device *pdev = xdev->data;
+
+ switch (be_state) {
+ case XenbusStateClosing:
+ DPRINTF("backend closing (%s)\n", xdev->nodename);
+ pcifront_disconnect(pdev);
+ break;
+
+ case XenbusStateClosed:
+ DPRINTF("backend closed (%s)\n", xdev->nodename);
+ pcifront_disconnect(pdev);
+ break;
+
+ case XenbusStateConnected:
+ DPRINTF("backend connected (%s)\n", xdev->nodename);
+ pcifront_connect(pdev);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/* Process PCI operation */
+static int
+do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
+{
+ int err = 0;
+ struct xen_pci_op *active_op = &pdev->sh_info->op;
+ evtchn_port_t port = pdev->evtchn;
+ time_t timeout;
+
+ mtx_lock(&pdev->sh_info_lock);
+
+ memcpy(active_op, op, sizeof(struct xen_pci_op));
+
+ /* Go */
+ wmb();
+ set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
+ notify_remote_via_evtchn(port);
+
+ timeout = time_uptime + 2;
+
+ clear_evtchn(port);
+
+ /* Spin while waiting for the answer */
+ while (test_bit
+ (_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)) {
+ int err = HYPERVISOR_poll(&port, 1, 3 * hz);
+ if (err)
+ panic("Failed HYPERVISOR_poll: err=%d", err);
+ clear_evtchn(port);
+ if (time_uptime > timeout) {
+ WPRINTF("pciback not responding!!!\n");
+ clear_bit(_XEN_PCIF_active,
+ (unsigned long *)&pdev->sh_info->flags);
+ err = XEN_PCI_ERR_dev_not_found;
+ goto out;
+ }
+ }
+
+ memcpy(op, active_op, sizeof(struct xen_pci_op));
+
+ err = op->err;
+ out:
+ mtx_unlock(&pdev->sh_info_lock);
+ return err;
+}
+
+/* ** XenBus Driver registration ** */
+
+static struct xenbus_device_id pcifront_ids[] = {
+ { "pci" },
+ { "" }
+};
+
+static struct xenbus_driver pcifront = {
+ .name = "pcifront",
+ .ids = pcifront_ids,
+ .probe = pcifront_probe,
+ .remove = pcifront_remove,
+ .otherend_changed = pcifront_backend_changed,
+};
+
+/* Register the driver with xenbus during sys init */
+static void
+pcifront_init(void *unused)
+{
+ if ((xen_start_info->flags & SIF_INITDOMAIN))
+ return;
+
+ DPRINTF("xenbus registering\n");
+
+ xenbus_register_frontend(&pcifront);
+}
+
+SYSINIT(pciif, SI_SUB_PSEUDO, SI_ORDER_ANY, pcifront_init, NULL)
+
+/* Newbus xpcife device driver probe */
+static int
+xpcife_probe(device_t dev)
+{
+#ifdef XEN_PCIDEV_FE_DEBUG
+ struct pcifront_device *pdev = (struct pcifront_device *)device_get_ivars(dev);
+ DPRINTF("xpcife probe (unit=%d)\n", pdev->unit);
+#endif
+ return (BUS_PROBE_NOWILDCARD);
+}
+
+/* Newbus xpcife device driver attach */
+static int
+xpcife_attach(device_t dev)
+{
+ struct pcifront_device *pdev = (struct pcifront_device *)device_get_ivars(dev);
+ int i, num_roots, len, err;
+ char str[64];
+ unsigned int domain, bus;
+
+ DPRINTF("xpcife attach (unit=%d)\n", pdev->unit);
+
+ err = xenbus_scanf(NULL, pdev->xdev->otherend,
+ "root_num", "%d", &num_roots);
+ if (err != 1) {
+ if (err == 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading number of PCI roots");
+ goto out;
+ }
+
+ /* Add a pcib device for each root */
+ for (i = 0; i < num_roots; i++) {
+ device_t child;
+
+ len = snprintf(str, sizeof(str), "root-%d", i);
+ if (unlikely(len >= (sizeof(str) - 1))) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = xenbus_scanf(NULL, pdev->xdev->otherend, str,
+ "%x:%x", &domain, &bus);
+ if (err != 2) {
+ if (err >= 0)
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Error reading PCI root %d", i);
+ goto out;
+ }
+ err = 0;
+ if (domain != pdev->xdev->otherend_id) {
+ err = -EINVAL;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Domain mismatch %d != %d", domain, pdev->xdev->otherend_id);
+ goto out;
+ }
+
+ child = device_add_child(dev, "pcib", bus);
+ if (!child) {
+ err = -ENOMEM;
+ xenbus_dev_fatal(pdev->xdev, err,
+ "Unable to create pcib%d", bus);
+ goto out;
+ }
+ }
+
+ out:
+ bus_attach_children(dev);
+ return (0);
+}
+
+static devclass_t xpcife_devclass;
+
+static device_method_t xpcife_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, xpcife_probe),
+ DEVMETHOD(device_attach, xpcife_attach),
+ DEVMETHOD(device_detach, bus_generic_detach),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+ /* Bus interface */
+ DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource),
+ DEVMETHOD(bus_release_resource, bus_generic_release_resource),
+ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
+ DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
+ DEVMETHOD_END
+};
+
+static driver_t xpcife_driver = {
+ "xpcife",
+ xpcife_methods,
+ 0,
+};
+
+DRIVER_MODULE(xpcife, nexus, xpcife_driver, xpcife_devclass, 0, 0);
+
+/* Newbus xen pcib device driver probe */
+static int
+xpcib_probe(device_t dev)
+{
+ struct xpcib_softc *sc = (struct xpcib_softc *)device_get_softc(dev);
+ struct pcifront_device *pdev = (struct pcifront_device *)device_get_ivars(device_get_parent(dev));
+
+ DPRINTF("xpcib probe (bus=%d)\n", device_get_unit(dev));
+
+ sc->domain = pdev->xdev->otherend_id;
+ sc->bus = device_get_unit(dev);
+ sc->pdev = pdev;
+
+ return 0;
+}
+
+/* Newbus xen pcib device driver attach */
+static int
+xpcib_attach(device_t dev)
+{
+ struct xpcib_softc *sc = (struct xpcib_softc *)device_get_softc(dev);
+
+ DPRINTF("xpcib attach (bus=%d)\n", sc->bus);
+
+ device_add_child(dev, "pci", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
+ return (0);
+}
+
+static int
+xpcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
+{
+ struct xpcib_softc *sc = (struct xpcib_softc *)device_get_softc(dev);
+ switch (which) {
+ case PCIB_IVAR_BUS:
+ *result = sc->bus;
+ return 0;
+ }
+ return ENOENT;
+}
+
+/* Return the number of slots supported */
+static int
+xpcib_maxslots(device_t dev)
+{
+ return 31;
+}
+
+#define PCI_DEVFN(slot,func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
+
+/* Read configuration space register */
+static u_int32_t
+xpcib_read_config(device_t dev, int bus, int slot, int func,
+ int reg, int bytes)
+{
+ struct xpcib_softc *sc = (struct xpcib_softc *)device_get_softc(dev);
+ struct xen_pci_op op = {
+ .cmd = XEN_PCI_OP_conf_read,
+ .domain = sc->domain,
+ .bus = sc->bus,
+ .devfn = PCI_DEVFN(slot, func),
+ .offset = reg,
+ .size = bytes,
+ };
+ int err;
+
+ err = do_pci_op(sc->pdev, &op);
+
+ DPRINTF("read config (b=%d, s=%d, f=%d, reg=%d, len=%d, val=%x, err=%d)\n",
+ bus, slot, func, reg, bytes, op.value, err);
+
+ if (err)
+ op.value = ~0;
+
+ return op.value;
+}
+
+/* Write configuration space register */
+static void
+xpcib_write_config(device_t dev, int bus, int slot, int func,
+ int reg, u_int32_t data, int bytes)
+{
+ struct xpcib_softc *sc = (struct xpcib_softc *)device_get_softc(dev);
+ struct xen_pci_op op = {
+ .cmd = XEN_PCI_OP_conf_write,
+ .domain = sc->domain,
+ .bus = sc->bus,
+ .devfn = PCI_DEVFN(slot, func),
+ .offset = reg,
+ .size = bytes,
+ .value = data,
+ };
+ int err;
+
+ err = do_pci_op(sc->pdev, &op);
+
+ DPRINTF("write config (b=%d, s=%d, f=%d, reg=%d, len=%d, val=%x, err=%d)\n",
+ bus, slot, func, reg, bytes, data, err);
+}
+
+static int
+xpcib_route_interrupt(device_t pcib, device_t dev, int pin)
+{
+ struct pci_devinfo *dinfo = device_get_ivars(dev);
+ pcicfgregs *cfg = &dinfo->cfg;
+
+ DPRINTF("route intr (pin=%d, line=%d)\n", pin, cfg->intline);
+
+ return cfg->intline;
+}
+
+static device_method_t xpcib_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, xpcib_probe),
+ DEVMETHOD(device_attach, xpcib_attach),
+ DEVMETHOD(device_detach, bus_generic_detach),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+
+ /* Bus interface */
+ DEVMETHOD(bus_read_ivar, xpcib_read_ivar),
+ DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource),
+ DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
+ DEVMETHOD(bus_release_resource, bus_generic_release_resource),
+ DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
+ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr),
+ DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr),
+
+ /* pcib interface */
+ DEVMETHOD(pcib_maxslots, xpcib_maxslots),
+ DEVMETHOD(pcib_read_config, xpcib_read_config),
+ DEVMETHOD(pcib_write_config, xpcib_write_config),
+ DEVMETHOD(pcib_route_interrupt, xpcib_route_interrupt),
+ DEVMETHOD(pcib_request_feature, pcib_request_feature_allow),
+
+ DEVMETHOD_END
+};
+
+static devclass_t xpcib_devclass;
+
+DEFINE_CLASS_0(pcib, xpcib_driver, xpcib_methods, sizeof(struct xpcib_softc));
+DRIVER_MODULE(pcib, xpcife, xpcib_driver, xpcib_devclass, 0, 0);
+
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/sys/dev/xen/privcmd/privcmd.c b/sys/dev/xen/privcmd/privcmd.c
index 02e268b23d42..922d24b39432 100644
--- a/sys/dev/xen/privcmd/privcmd.c
+++ b/sys/dev/xen/privcmd/privcmd.c
@@ -120,25 +120,13 @@ privcmd_pg_dtor(void *handle)
struct privcmd_map *map = handle;
int error __diagused;
vm_size_t i;
- vm_page_t m;
/*
* Remove the mappings from the used pages. This will remove the
* underlying p2m bindings in Xen second stage translation.
*/
if (map->mapped == true) {
- VM_OBJECT_WLOCK(map->mem);
-retry:
- for (i = 0; i < map->size; i++) {
- m = vm_page_lookup(map->mem, i);
- if (m == NULL)
- continue;
- if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0)
- goto retry;
- cdev_pager_free_page(map->mem, m);
- }
- VM_OBJECT_WUNLOCK(map->mem);
-
+ cdev_mgtdev_pager_free_pages(map->mem);
for (i = 0; i < map->size; i++) {
rm.gpfn = atop(map->phys_base_addr) + i;
HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &rm);
diff --git a/sys/dev/xen/xenpci/xenpci.c b/sys/dev/xen/xenpci/xenpci.c
index b7a810bf9e15..f4cbd927bd63 100644
--- a/sys/dev/xen/xenpci/xenpci.c
+++ b/sys/dev/xen/xenpci/xenpci.c
@@ -31,13 +31,12 @@
#include <sys/kernel.h>
#include <sys/malloc.h>
#include <sys/module.h>
+#include <sys/stdarg.h>
#include <machine/bus.h>
#include <machine/resource.h>
#include <sys/rman.h>
-#include <machine/stdarg.h>
-
#include <xen/xen-os.h>
#include <xen/features.h>
#include <xen/hypervisor.h>
@@ -127,8 +126,9 @@ errexit:
static int
xenpci_probe(device_t dev)
{
+ uint32_t device_id = pci_get_devid(dev);
- if (pci_get_devid(dev) != 0x00015853)
+ if (device_id != 0x00015853 && device_id != 0x00025853)
return (ENXIO);
device_set_desc(dev, "Xen Platform Device");
diff --git a/sys/dev/xen/xenstore/xenstore.c b/sys/dev/xen/xenstore/xenstore.c
index 811aa1859d41..e5187466f1fe 100644
--- a/sys/dev/xen/xenstore/xenstore.c
+++ b/sys/dev/xen/xenstore/xenstore.c
@@ -45,10 +45,9 @@
#include <sys/uio.h>
#include <sys/unistd.h>
#include <sys/queue.h>
+#include <sys/stdarg.h>
#include <sys/taskqueue.h>
-#include <machine/stdarg.h>
-
#include <xen/xen-os.h>
#include <xen/hypervisor.h>
#include <xen/xen_intr.h>
@@ -1068,8 +1067,8 @@ static void
xs_attach_deferred(void *arg)
{
- bus_generic_probe(xs.xs_dev);
- bus_generic_attach(xs.xs_dev);
+ bus_identify_children(xs.xs_dev);
+ bus_attach_children(xs.xs_dev);
config_intrhook_disestablish(&xs.xs_attachcb);
}
@@ -1079,8 +1078,8 @@ xs_attach_late(void *arg, int pending)
{
KASSERT((pending == 1), ("xs late attach queued several times"));
- bus_generic_probe(xs.xs_dev);
- bus_generic_attach(xs.xs_dev);
+ bus_identify_children(xs.xs_dev);
+ bus_attach_children(xs.xs_dev);
}
/**
diff --git a/sys/dev/xilinx/axi_quad_spi.c b/sys/dev/xilinx/axi_quad_spi.c
index 54f4c6aa8810..86b00473fc96 100644
--- a/sys/dev/xilinx/axi_quad_spi.c
+++ b/sys/dev/xilinx/axi_quad_spi.c
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2016 Ruslan Bukin <br@bsdpad.com>
+ * Copyright (c) 2016-2025 Ruslan Bukin <br@bsdpad.com>
* All rights reserved.
*
* Portions of this software were developed by SRI International and the
@@ -100,6 +100,12 @@ struct spi_softc {
void *ih;
};
+static struct ofw_compat_data compat_data[] = {
+ { "xlnx,xps-spi-3.2", 1 },
+ { "xlnx,xps-spi-2.00.a", 1 },
+ { NULL, 0 }
+};
+
static struct resource_spec spi_spec[] = {
{ SYS_RES_MEMORY, 0, RF_ACTIVE },
{ -1, 0 }
@@ -112,7 +118,7 @@ spi_probe(device_t dev)
if (!ofw_bus_status_okay(dev))
return (ENXIO);
- if (!ofw_bus_is_compatible(dev, "xlnx,xps-spi-3.2"))
+ if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
return (ENXIO);
device_set_desc(dev, "Xilinx Quad SPI");
@@ -148,8 +154,9 @@ spi_attach(device_t dev)
reg = (CR_MASTER | CR_MSS | CR_SPE);
WRITE4(sc, SPI_CR, reg);
- device_add_child(dev, "spibus", 0);
- return (bus_generic_attach(dev));
+ device_add_child(dev, "spibus", DEVICE_UNIT_ANY);
+ bus_attach_children(dev);
+ return (0);
}
static int
@@ -211,20 +218,33 @@ spi_transfer(device_t dev, device_t child, struct spi_command *cmd)
return (0);
}
+static phandle_t
+axispi_get_node(device_t bus, device_t dev)
+{
+
+ return (ofw_bus_get_node(bus));
+}
+
static device_method_t spi_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, spi_probe),
DEVMETHOD(device_attach, spi_attach),
+ /* ofw_bus_if */
+ DEVMETHOD(ofw_bus_get_node, axispi_get_node),
+
/* SPI interface */
DEVMETHOD(spibus_transfer, spi_transfer),
DEVMETHOD_END
};
-static driver_t spi_driver = {
- "spi",
+static driver_t axispi_driver = {
+ "axispi",
spi_methods,
sizeof(struct spi_softc),
};
-DRIVER_MODULE(spi, simplebus, spi_driver, 0, 0);
+DRIVER_MODULE(axispi, simplebus, axispi_driver, 0, 0);
+DRIVER_MODULE(ofw_spibus, axispi, ofw_spibus_driver, 0, 0);
+MODULE_DEPEND(axispi, ofw_spibus, 1, 1, 1);
+SIMPLEBUS_PNP_INFO(compat_data);
diff --git a/sys/dev/xilinx/axidma.c b/sys/dev/xilinx/axidma.c
index 5b92f90df683..77a46c712980 100644
--- a/sys/dev/xilinx/axidma.c
+++ b/sys/dev/xilinx/axidma.c
@@ -169,6 +169,9 @@ axidma_intr(struct axidma_softc *sc,
while (chan->idx_tail != chan->idx_head) {
desc = chan->descs[chan->idx_tail];
+ cpu_dcache_wbinv_range((vm_offset_t)desc,
+ sizeof(struct axidma_desc));
+
if ((desc->status & BD_STATUS_CMPLT) == 0)
break;
@@ -357,7 +360,8 @@ axidma_desc_alloc(struct axidma_softc *sc, struct xdma_channel *xchan,
return (-1);
}
chan->mem_vaddr = kva_alloc(chan->mem_size);
- pmap_kenter_device(chan->mem_vaddr, chan->mem_size, chan->mem_paddr);
+ pmap_kenter(chan->mem_vaddr, chan->mem_size, chan->mem_paddr,
+ VM_MEMATTR_DEFAULT);
device_printf(sc->dev, "Allocated chunk %lx %lu\n",
chan->mem_paddr, chan->mem_size);
@@ -493,6 +497,9 @@ axidma_channel_submit_sg(device_t dev, struct xdma_channel *xchan,
if (sg[i].last == 1)
desc->control |= BD_CONTROL_TXEOF;
+ cpu_dcache_wbinv_range((vm_offset_t)desc,
+ sizeof(struct axidma_desc));
+
tmp = chan->idx_head;
atomic_add_int(&chan->descs_used_count, 1);
diff --git a/sys/dev/xilinx/if_xae.c b/sys/dev/xilinx/if_xae.c
index 722be6a21cbd..97e7aa16dda4 100644
--- a/sys/dev/xilinx/if_xae.c
+++ b/sys/dev/xilinx/if_xae.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: BSD-2-Clause
*
- * Copyright (c) 2019 Ruslan Bukin <br@bsdpad.com>
+ * Copyright (c) 2019-2025 Ruslan Bukin <br@bsdpad.com>
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory (Department of Computer Science and
@@ -43,6 +43,9 @@
#include <sys/socket.h>
#include <sys/sockio.h>
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
#include <net/bpf.h>
#include <net/if.h>
#include <net/ethernet.h>
@@ -94,6 +97,7 @@
#define NUM_RX_MBUF 16
#define BUFRING_SIZE 8192
#define MDIO_CLK_DIV_DEFAULT 29
+#define BUF_NPAGES 512
#define PHY1_RD(sc, _r) \
xae_miibus_read_reg(sc->dev, 1, _r)
@@ -834,6 +838,8 @@ setup_xdma(struct xae_softc *sc)
{
device_t dev;
vmem_t *vmem;
+ vm_paddr_t phys;
+ vm_page_t m;
int error;
dev = sc->dev;
@@ -886,11 +892,19 @@ setup_xdma(struct xae_softc *sc)
/* Setup bounce buffer */
vmem = xdma_get_memory(dev);
- if (vmem) {
- xchan_set_memory(sc->xchan_tx, vmem);
- xchan_set_memory(sc->xchan_rx, vmem);
+ if (!vmem) {
+ m = vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO,
+ BUF_NPAGES, 0, BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, 0,
+ VM_MEMATTR_DEFAULT);
+ phys = VM_PAGE_TO_PHYS(m);
+ vmem = vmem_create("xdma vmem", 0, 0, PAGE_SIZE, PAGE_SIZE,
+ M_BESTFIT | M_WAITOK);
+ vmem_add(vmem, phys, BUF_NPAGES * PAGE_SIZE, 0);
}
+ xchan_set_memory(sc->xchan_tx, vmem);
+ xchan_set_memory(sc->xchan_rx, vmem);
+
xdma_prep_sg(sc->xchan_tx,
TX_QUEUE_SIZE, /* xchan requests queue size */
MCLBYTES, /* maxsegsize */
@@ -990,11 +1004,6 @@ xae_attach(device_t dev)
/* Set up the ethernet interface. */
sc->ifp = ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "could not allocate ifp.\n");
- return (ENXIO);
- }
-
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST);
@@ -1057,8 +1066,7 @@ xae_detach(device_t dev)
ether_ifdetach(ifp);
}
- if (sc->miibus != NULL)
- device_delete_child(dev, sc->miibus);
+ bus_generic_detach(dev);
if (ifp != NULL)
if_free(ifp);
diff --git a/sys/dev/xl/if_xl.c b/sys/dev/xl/if_xl.c
index 573bc6581577..0d1e05833f1e 100644
--- a/sys/dev/xl/if_xl.c
+++ b/sys/dev/xl/if_xl.c
@@ -1063,7 +1063,7 @@ xl_attach(device_t dev)
u_int16_t sinfo2, xcvr[2];
struct xl_softc *sc;
if_t ifp;
- int media, pmcap;
+ int media;
int error = 0, phy, rid, res;
uint16_t did;
@@ -1188,11 +1188,6 @@ xl_attach(device_t dev)
/* Initialize interface name. */
ifp = sc->xl_ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not if_alloc()\n");
- error = ENOSPC;
- goto fail;
- }
if_setsoftc(ifp, sc);
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
@@ -1319,9 +1314,7 @@ xl_attach(device_t dev)
sc->xl_type = XL_TYPE_90X;
/* Check availability of WOL. */
- if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0 &&
- pci_find_cap(dev, PCIY_PMG, &pmcap) == 0) {
- sc->xl_pmcap = pmcap;
+ if ((sc->xl_caps & XL_CAPS_PWRMGMT) != 0 && pci_has_pm(dev)) {
sc->xl_flags |= XL_FLAG_WOL;
sinfo2 = 0;
xl_read_eeprom(sc, (caddr_t)&sinfo2, XL_EE_SOFTINFO2, 1, 0);
@@ -1577,8 +1570,6 @@ xl_detach(device_t dev)
callout_drain(&sc->xl_tick_callout);
ether_ifdetach(ifp);
}
- if (sc->xl_miibus)
- device_delete_child(dev, sc->xl_miibus);
bus_generic_detach(dev);
ifmedia_removeall(&sc->ifmedia);
@@ -3263,7 +3254,7 @@ static void
xl_setwol(struct xl_softc *sc)
{
if_t ifp;
- u_int16_t cfg, pmstat;
+ u_int16_t cfg;
if ((sc->xl_flags & XL_FLAG_WOL) == 0)
return;
@@ -3280,12 +3271,6 @@ xl_setwol(struct xl_softc *sc)
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE);
/* Request PME. */
- pmstat = pci_read_config(sc->xl_dev,
- sc->xl_pmcap + PCIR_POWER_STATUS, 2);
if ((if_getcapenable(ifp) & IFCAP_WOL_MAGIC) != 0)
- pmstat |= PCIM_PSTAT_PMEENABLE;
- else
- pmstat &= ~PCIM_PSTAT_PMEENABLE;
- pci_write_config(sc->xl_dev,
- sc->xl_pmcap + PCIR_POWER_STATUS, pmstat, 2);
+ pci_enable_pme(sc->xl_dev);
}
diff --git a/sys/dev/xl/if_xlreg.h b/sys/dev/xl/if_xlreg.h
index 7cdd7a889e0d..8249622a0e79 100644
--- a/sys/dev/xl/if_xlreg.h
+++ b/sys/dev/xl/if_xlreg.h
@@ -603,7 +603,6 @@ struct xl_softc {
u_int16_t xl_media;
u_int16_t xl_caps;
u_int16_t xl_tx_thresh;
- int xl_pmcap;
int xl_if_flags;
struct xl_list_data xl_ldata;
struct xl_chain_data xl_cdata;